diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml index 1d14aa6cc793..a833e637d038 100644 --- a/hbase-annotations/pom.xml +++ b/hbase-annotations/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase org.apache.hbase + hbase 3.0.0-alpha-3-SNAPSHOT .. diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java index c2510efb026a..d9bae8490637 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the client. This tests the hbase-client package and all of the client * tests in hbase-server. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java index 4341becbd68a..a168adec08af 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to coprocessors. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java index a91033fa2d38..84f346baaea2 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java index 22fbc1b724ff..c23bfa298b36 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as failing commonly on public build infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java index c2375ca4e5cb..8eee0e6ae4b9 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and * the like. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java index 6bc712e270cf..4e555b73fedb 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java @@ -15,23 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as 'integration/system' test, meaning that the test class has the following * characteristics: * - * - * Integration / System tests should have a class name starting with "IntegrationTest", and - * should be annotated with @Category(IntegrationTests.class). Integration tests can be run - * using the IntegrationTestsDriver class or from mvn verify. - * + * Integration / System tests should have a class name starting with "IntegrationTest", and should + * be annotated with @Category(IntegrationTests.class). Integration tests can be run using the + * IntegrationTestsDriver class or from mvn verify. * @see SmallTests * @see MediumTests * @see LargeTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java index aa183d5607d7..b47e5bab9a46 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tagging a test as 'large', means that the test class has the following characteristics: * - * * @see SmallTests * @see MediumTests * @see IntegrationTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java index 4b49da4e4dc0..0e68ab3c0340 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to mapred or mapreduce. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java index e837f49a268a..5dcf51b27e59 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the master. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java index 0f8055b5bab0..d1f836ec0049 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tagging a test as 'medium' means that the test class has the following characteristics: * - * - * Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster. - * + * Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster. * @see SmallTests * @see LargeTests * @see IntegrationTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java index 59962a74c280..27beaacf963e 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java index 2759bfc96df7..695042e801bf 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as not easily falling into any of the below categories. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java index 4edb9bf031d2..929bd6487edf 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to RPC. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java index 80b04eb7e598..050a70762928 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RSGroupTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java index 0f03b761fcb1..3439afa76eba 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the regionserver. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java index 8b8be4de8125..df606c960c25 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to replication. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java index e7d1d1d4c88c..a648b4c39e03 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the REST capability of HBase. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java index 5263d467cbee..a4e55ad3aba0 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to security. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java index 80e6c9d24209..64d2bce381b6 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,14 @@ /** * Tagging a test as 'small' means that the test class has the following characteristics: * - * * @see MediumTests * @see LargeTests * @see IntegrationTests */ -public interface SmallTests {} +public interface SmallTests { +} diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java index efc8d5ddc84c..d1f433b9719d 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** - * Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build + * Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build * infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java index 85507de5ad4d..f556979e5b6a 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as region tests which takes longer than 5 minutes to run on public build * infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java index 86aa6bdc85e6..9fa0579ed47e 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml b/hbase-archetypes/hbase-archetype-builder/pom.xml index 851a3a7ed459..29dfb0692ada 100644 --- a/hbase-archetypes/hbase-archetype-builder/pom.xml +++ b/hbase-archetypes/hbase-archetype-builder/pom.xml @@ -1,6 +1,5 @@ - - + + hbase-client__copy-src-to-build-archetype-subdir - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-client.dir}/${build.archetype.subdir} @@ -76,29 +75,30 @@ hbase-client__copy-pom-to-temp-for-xslt-processing - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-client.dir}/${temp.exemplar.subdir} /${project.basedir}/../${hbase-client.dir} - true + true + pom.xml - + hbase-shaded-client__copy-src-to-build-archetype-subdir - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-shaded-client.dir}/${build.archetype.subdir} @@ -113,20 +113,21 @@ hbase-shaded-client__copy-pom-to-temp-for-xslt-processing - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-shaded-client.dir}/${temp.exemplar.subdir} /${project.basedir}/../${hbase-shaded-client.dir} - true + true + pom.xml - + @@ -137,10 +138,10 @@ using xml-maven-plugin for xslt transformation, below. --> hbase-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing - prepare-package copy-resources + prepare-package /${project.basedir}/../${hbase-client.dir}/${temp.archetype.subdir} @@ -149,16 +150,16 @@ pom.xml - + hbase-shaded-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing - prepare-package copy-resources + prepare-package /${project.basedir}/../${hbase-shaded-client.dir}/${temp.archetype.subdir} @@ -167,7 +168,7 @@ pom.xml - + @@ -182,10 +183,10 @@ modify-exemplar-pom-files-via-xslt - process-resources transform + process-resources @@ -212,10 +213,10 @@ prevent warnings when project is generated from archetype. --> modify-archetype-pom-files-via-xslt - package transform + package @@ -242,32 +243,32 @@ - maven-antrun-plugin + maven-antrun-plugin make-scripts-executable - process-resources run + process-resources - - + + run-createArchetypes-script - compile run + compile - - - + + + run-installArchetypes-script - install run + install - - - + + + diff --git a/hbase-archetypes/hbase-client-project/pom.xml b/hbase-archetypes/hbase-client-project/pom.xml index c6d0aa7c97e4..2b1afc0e7eba 100644 --- a/hbase-archetypes/hbase-client-project/pom.xml +++ b/hbase-archetypes/hbase-client-project/pom.xml @@ -1,8 +1,5 @@ - + 4.0.0 - hbase-archetypes org.apache.hbase + hbase-archetypes 3.0.0-alpha-3-SNAPSHOT .. diff --git a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java index a9e522fe16d4..c5bd96ddf4e8 100644 --- a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java +++ b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,19 +37,17 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Successful running of this application requires access to an active instance - * of HBase. For install instructions for a standalone instance of HBase, please - * refer to https://hbase.apache.org/book.html#quickstart + * Successful running of this application requires access to an active instance of HBase. For + * install instructions for a standalone instance of HBase, please refer to + * https://hbase.apache.org/book.html#quickstart */ public final class HelloHBase { protected static final String MY_NAMESPACE_NAME = "myTestNamespace"; static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable"); static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf"); - static final byte[] MY_FIRST_COLUMN_QUALIFIER - = Bytes.toBytes("myFirstColumn"); - static final byte[] MY_SECOND_COLUMN_QUALIFIER - = Bytes.toBytes("mySecondColumn"); + static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn"); + static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn"); static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01"); // Private constructor included here to avoid checkstyle warnings @@ -61,20 +58,20 @@ public static void main(final String[] args) throws IOException { final boolean deleteAllAtEOJ = true; /** - * ConnectionFactory#createConnection() automatically looks for - * hbase-site.xml (HBase configuration parameters) on the system's - * CLASSPATH, to enable creation of Connection to HBase via ZooKeeper. + * ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase + * configuration parameters) on the system's CLASSPATH, to enable creation of Connection to + * HBase via ZooKeeper. */ try (Connection connection = ConnectionFactory.createConnection(); Admin admin = connection.getAdmin()) { admin.getClusterMetrics(); // assure connection successfully established - System.out.println("\n*** Hello HBase! -- Connection has been " - + "established via ZooKeeper!!\n"); + System.out + .println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n"); createNamespaceAndTable(admin); System.out.println("Getting a Table object for [" + MY_TABLE_NAME - + "] with which to perform CRUD operations in HBase."); + + "] with which to perform CRUD operations in HBase."); try (Table table = connection.getTable(MY_TABLE_NAME)) { putRowToTable(table); @@ -92,9 +89,8 @@ public static void main(final String[] args) throws IOException { } /** - * Invokes Admin#createNamespace and Admin#createTable to create a namespace - * with a table that has one column-family. - * + * Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has + * one column-family. * @param admin Standard Admin object * @throws IOException If IO problem encountered */ @@ -103,48 +99,38 @@ static void createNamespaceAndTable(final Admin admin) throws IOException { if (!namespaceExists(admin, MY_NAMESPACE_NAME)) { System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "]."); - admin.createNamespace(NamespaceDescriptor - .create(MY_NAMESPACE_NAME).build()); + admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build()); } if (!admin.tableExists(MY_TABLE_NAME)) { System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString() - + "], with one Column Family [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); + + "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build(); admin.createTable(desc); } } /** - * Invokes Table#put to store a row (with two new columns created 'on the - * fly') into the table. - * + * Invokes Table#put to store a row (with two new columns created 'on the fly') into the table. * @param table Standard Table object (used for CRUD operations). * @throws IOException If IO problem encountered */ static void putRowToTable(final Table table) throws IOException { - table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME, - MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME, - MY_SECOND_COLUMN_QUALIFIER, - Bytes.toBytes("World!"))); - - System.out.println("Row [" + Bytes.toString(MY_ROW_ID) - + "] was put into Table [" - + table.getName().getNameAsString() + "] in HBase;\n" - + " the row's two columns (created 'on the fly') are: [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) - + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); + table.put(new Put(MY_ROW_ID) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello")) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!"))); + + System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table [" + + table.getName().getNameAsString() + "] in HBase;\n" + + " the row's two columns (created 'on the fly') are: [" + + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) + + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); } /** * Invokes Table#get and prints out the contents of the retrieved row. - * * @param table Standard Table object * @throws IOException If IO problem encountered */ @@ -152,38 +138,32 @@ static void getAndPrintRowContents(final Table table) throws IOException { Result row = table.get(new Get(MY_ROW_ID)); - System.out.println("Row [" + Bytes.toString(row.getRow()) - + "] was retrieved from Table [" - + table.getName().getNameAsString() - + "] in HBase, with the following content:"); + System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table [" + + table.getName().getNameAsString() + "] in HBase, with the following content:"); - for (Entry> colFamilyEntry - : row.getNoVersionMap().entrySet()) { + for (Entry> colFamilyEntry : row.getNoVersionMap() + .entrySet()) { String columnFamilyName = Bytes.toString(colFamilyEntry.getKey()); - System.out.println(" Columns in Column Family [" + columnFamilyName - + "]:"); + System.out.println(" Columns in Column Family [" + columnFamilyName + "]:"); - for (Entry columnNameAndValueMap - : colFamilyEntry.getValue().entrySet()) { + for (Entry columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) { System.out.println(" Value of Column [" + columnFamilyName + ":" - + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " - + Bytes.toString(columnNameAndValueMap.getValue())); + + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + + Bytes.toString(columnNameAndValueMap.getValue())); } } } /** * Checks to see whether a namespace exists. - * * @param admin Standard Admin object * @param namespaceName Name of namespace * @return true If namespace exists * @throws IOException If IO problem encountered */ - static boolean namespaceExists(final Admin admin, final String namespaceName) - throws IOException { + static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException { try { admin.getNamespaceDescriptor(namespaceName); } catch (NamespaceNotFoundException e) { @@ -194,28 +174,24 @@ static boolean namespaceExists(final Admin admin, final String namespaceName) /** * Invokes Table#delete to delete test data (i.e. the row) - * * @param table Standard Table object * @throws IOException If IO problem is encountered */ static void deleteRow(final Table table) throws IOException { - System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) - + "] from Table [" - + table.getName().getNameAsString() + "]."); + System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table [" + + table.getName().getNameAsString() + "]."); table.delete(new Delete(MY_ROW_ID)); } /** - * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to - * disable/delete Table and delete Namespace. - * + * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete + * Table and delete Namespace. * @param admin Standard Admin object * @throws IOException If IO problem is encountered */ static void deleteNamespaceAndTable(final Admin admin) throws IOException { if (admin.tableExists(MY_TABLE_NAME)) { - System.out.println("Disabling/deleting Table [" - + MY_TABLE_NAME.getNameAsString() + "]."); + System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "]."); admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it. admin.deleteTable(MY_TABLE_NAME); } diff --git a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/package-info.java b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/package-info.java index 554014e33f36..606504d4f951 100644 --- a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/package-info.java +++ b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/package-info.java @@ -1,25 +1,16 @@ /* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** - * This package provides fully-functional exemplar Java code demonstrating - * simple usage of the hbase-client API, for incorporation into a Maven - * archetype with hbase-client dependency. + * This package provides fully-functional exemplar Java code demonstrating simple usage of the + * hbase-client API, for incorporation into a Maven archetype with hbase-client dependency. */ package org.apache.hbase.archetypes.exemplars.client; diff --git a/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java b/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java index a7c7a5e5ad2a..64214976a7b7 100644 --- a/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java +++ b/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,8 +46,7 @@ public class TestHelloHBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHelloHBase.class); - private static final HBaseTestingUtil TEST_UTIL - = new HBaseTestingUtil(); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @BeforeClass public static void beforeClass() throws Exception { @@ -67,13 +66,11 @@ public void testNamespaceExists() throws Exception { Admin admin = TEST_UTIL.getAdmin(); exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE); - assertEquals("#namespaceExists failed: found nonexistent namespace.", - false, exists); + assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists); admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build()); exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE); - assertEquals("#namespaceExists failed: did NOT find existing namespace.", - true, exists); + assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists); admin.deleteNamespace(EXISTING_NAMESPACE); } @@ -82,14 +79,11 @@ public void testCreateNamespaceAndTable() throws Exception { Admin admin = TEST_UTIL.getAdmin(); HelloHBase.createNamespaceAndTable(admin); - boolean namespaceExists - = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); - assertEquals("#createNamespaceAndTable failed to create namespace.", - true, namespaceExists); + boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); + assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); - assertEquals("#createNamespaceAndTable failed to create table.", - true, tableExists); + assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists); admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME); @@ -100,8 +94,7 @@ public void testCreateNamespaceAndTable() throws Exception { public void testPutRowToTable() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); HelloHBase.putRowToTable(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); @@ -115,13 +108,10 @@ public void testPutRowToTable() throws IOException { public void testDeleteRow() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); - table.put(new Put(HelloHBase.MY_ROW_ID). - addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, - HelloHBase.MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("xyz"))); + table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, + HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz"))); HelloHBase.deleteRow(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); assertEquals("#deleteRow failed to delete row.", true, row.isEmpty()); diff --git a/hbase-archetypes/hbase-shaded-client-project/pom.xml b/hbase-archetypes/hbase-shaded-client-project/pom.xml index 0ede67b739c9..6e7a2a143d2d 100644 --- a/hbase-archetypes/hbase-shaded-client-project/pom.xml +++ b/hbase-archetypes/hbase-shaded-client-project/pom.xml @@ -1,8 +1,5 @@ - + 4.0.0 - hbase-archetypes org.apache.hbase + hbase-archetypes 3.0.0-alpha-3-SNAPSHOT .. @@ -44,16 +41,16 @@ org.apache.hbase hbase-testing-util test - - - javax.xml.bind - jaxb-api - - - javax.ws.rs - jsr311-api - - + + + javax.xml.bind + jaxb-api + + + javax.ws.rs + jsr311-api + + org.apache.hbase diff --git a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java index 053275a3ad33..45a410eb7421 100644 --- a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java +++ b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,19 +36,17 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Successful running of this application requires access to an active instance - * of HBase. For install instructions for a standalone instance of HBase, please - * refer to https://hbase.apache.org/book.html#quickstart + * Successful running of this application requires access to an active instance of HBase. For + * install instructions for a standalone instance of HBase, please refer to + * https://hbase.apache.org/book.html#quickstart */ public final class HelloHBase { protected static final String MY_NAMESPACE_NAME = "myTestNamespace"; static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable"); static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf"); - static final byte[] MY_FIRST_COLUMN_QUALIFIER - = Bytes.toBytes("myFirstColumn"); - static final byte[] MY_SECOND_COLUMN_QUALIFIER - = Bytes.toBytes("mySecondColumn"); + static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn"); + static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn"); static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01"); // Private constructor included here to avoid checkstyle warnings @@ -60,20 +57,20 @@ public static void main(final String[] args) throws IOException { final boolean deleteAllAtEOJ = true; /** - * ConnectionFactory#createConnection() automatically looks for - * hbase-site.xml (HBase configuration parameters) on the system's - * CLASSPATH, to enable creation of Connection to HBase via ZooKeeper. + * ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase + * configuration parameters) on the system's CLASSPATH, to enable creation of Connection to + * HBase via ZooKeeper. */ try (Connection connection = ConnectionFactory.createConnection(); Admin admin = connection.getAdmin()) { admin.getClusterMetrics(); // assure connection successfully established - System.out.println("\n*** Hello HBase! -- Connection has been " - + "established via ZooKeeper!!\n"); + System.out + .println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n"); createNamespaceAndTable(admin); System.out.println("Getting a Table object for [" + MY_TABLE_NAME - + "] with which to perform CRUD operations in HBase."); + + "] with which to perform CRUD operations in HBase."); try (Table table = connection.getTable(MY_TABLE_NAME)) { putRowToTable(table); @@ -91,9 +88,8 @@ public static void main(final String[] args) throws IOException { } /** - * Invokes Admin#createNamespace and Admin#createTable to create a namespace - * with a table that has one column-family. - * + * Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has + * one column-family. * @param admin Standard Admin object * @throws IOException If IO problem encountered */ @@ -102,47 +98,38 @@ static void createNamespaceAndTable(final Admin admin) throws IOException { if (!namespaceExists(admin, MY_NAMESPACE_NAME)) { System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "]."); - admin.createNamespace(NamespaceDescriptor - .create(MY_NAMESPACE_NAME).build()); + admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build()); } if (!admin.tableExists(MY_TABLE_NAME)) { System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString() - + "], with one Column Family [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); + + "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); admin.createTable(TableDescriptorBuilder.newBuilder(MY_TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build()); } } /** - * Invokes Table#put to store a row (with two new columns created 'on the - * fly') into the table. - * + * Invokes Table#put to store a row (with two new columns created 'on the fly') into the table. * @param table Standard Table object (used for CRUD operations). * @throws IOException If IO problem encountered */ static void putRowToTable(final Table table) throws IOException { - table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME, - MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME, - MY_SECOND_COLUMN_QUALIFIER, - Bytes.toBytes("World!"))); - - System.out.println("Row [" + Bytes.toString(MY_ROW_ID) - + "] was put into Table [" - + table.getName().getNameAsString() + "] in HBase;\n" - + " the row's two columns (created 'on the fly') are: [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) - + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); + table.put(new Put(MY_ROW_ID) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello")) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!"))); + + System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table [" + + table.getName().getNameAsString() + "] in HBase;\n" + + " the row's two columns (created 'on the fly') are: [" + + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) + + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); } /** * Invokes Table#get and prints out the contents of the retrieved row. - * * @param table Standard Table object * @throws IOException If IO problem encountered */ @@ -150,38 +137,32 @@ static void getAndPrintRowContents(final Table table) throws IOException { Result row = table.get(new Get(MY_ROW_ID)); - System.out.println("Row [" + Bytes.toString(row.getRow()) - + "] was retrieved from Table [" - + table.getName().getNameAsString() - + "] in HBase, with the following content:"); + System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table [" + + table.getName().getNameAsString() + "] in HBase, with the following content:"); - for (Entry> colFamilyEntry - : row.getNoVersionMap().entrySet()) { + for (Entry> colFamilyEntry : row.getNoVersionMap() + .entrySet()) { String columnFamilyName = Bytes.toString(colFamilyEntry.getKey()); - System.out.println(" Columns in Column Family [" + columnFamilyName - + "]:"); + System.out.println(" Columns in Column Family [" + columnFamilyName + "]:"); - for (Entry columnNameAndValueMap - : colFamilyEntry.getValue().entrySet()) { + for (Entry columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) { System.out.println(" Value of Column [" + columnFamilyName + ":" - + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " - + Bytes.toString(columnNameAndValueMap.getValue())); + + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + + Bytes.toString(columnNameAndValueMap.getValue())); } } } /** * Checks to see whether a namespace exists. - * * @param admin Standard Admin object * @param namespaceName Name of namespace * @return true If namespace exists * @throws IOException If IO problem encountered */ - static boolean namespaceExists(final Admin admin, final String namespaceName) - throws IOException { + static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException { try { admin.getNamespaceDescriptor(namespaceName); } catch (NamespaceNotFoundException e) { @@ -192,28 +173,24 @@ static boolean namespaceExists(final Admin admin, final String namespaceName) /** * Invokes Table#delete to delete test data (i.e. the row) - * * @param table Standard Table object * @throws IOException If IO problem is encountered */ static void deleteRow(final Table table) throws IOException { - System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) - + "] from Table [" - + table.getName().getNameAsString() + "]."); + System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table [" + + table.getName().getNameAsString() + "]."); table.delete(new Delete(MY_ROW_ID)); } /** - * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to - * disable/delete Table and delete Namespace. - * + * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete + * Table and delete Namespace. * @param admin Standard Admin object * @throws IOException If IO problem is encountered */ static void deleteNamespaceAndTable(final Admin admin) throws IOException { if (admin.tableExists(MY_TABLE_NAME)) { - System.out.println("Disabling/deleting Table [" - + MY_TABLE_NAME.getNameAsString() + "]."); + System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "]."); admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it. admin.deleteTable(MY_TABLE_NAME); } diff --git a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/package-info.java b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/package-info.java index 754be16069c6..3181ddb6d30d 100644 --- a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/package-info.java +++ b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/package-info.java @@ -1,25 +1,16 @@ /* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** - * This package provides fully-functional exemplar Java code demonstrating - * simple usage of the hbase-client API, for incorporation into a Maven - * archetype with hbase-shaded-client dependency. + * This package provides fully-functional exemplar Java code demonstrating simple usage of the + * hbase-client API, for incorporation into a Maven archetype with hbase-shaded-client dependency. */ package org.apache.hbase.archetypes.exemplars.shaded_client; diff --git a/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java b/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java index 0282ff68a336..787636490aad 100644 --- a/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java +++ b/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,8 +46,7 @@ public class TestHelloHBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHelloHBase.class); - private static final HBaseTestingUtil TEST_UTIL - = new HBaseTestingUtil(); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @BeforeClass public static void beforeClass() throws Exception { @@ -67,13 +66,11 @@ public void testNamespaceExists() throws Exception { Admin admin = TEST_UTIL.getAdmin(); exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE); - assertEquals("#namespaceExists failed: found nonexistent namespace.", - false, exists); + assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists); admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build()); exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE); - assertEquals("#namespaceExists failed: did NOT find existing namespace.", - true, exists); + assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists); admin.deleteNamespace(EXISTING_NAMESPACE); } @@ -82,14 +79,11 @@ public void testCreateNamespaceAndTable() throws Exception { Admin admin = TEST_UTIL.getAdmin(); HelloHBase.createNamespaceAndTable(admin); - boolean namespaceExists - = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); - assertEquals("#createNamespaceAndTable failed to create namespace.", - true, namespaceExists); + boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); + assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); - assertEquals("#createNamespaceAndTable failed to create table.", - true, tableExists); + assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists); admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME); @@ -100,8 +94,7 @@ public void testCreateNamespaceAndTable() throws Exception { public void testPutRowToTable() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); HelloHBase.putRowToTable(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); @@ -115,13 +108,10 @@ public void testPutRowToTable() throws IOException { public void testDeleteRow() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); - table.put(new Put(HelloHBase.MY_ROW_ID). - addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, - HelloHBase.MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("xyz"))); + table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, + HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz"))); HelloHBase.deleteRow(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); assertEquals("#deleteRow failed to delete row.", true, row.isEmpty()); diff --git a/hbase-archetypes/pom.xml b/hbase-archetypes/pom.xml index 1a05b9617d4f..f6bb3a4e9986 100644 --- a/hbase-archetypes/pom.xml +++ b/hbase-archetypes/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -68,10 +67,10 @@ spotbugs-maven-plugin - false spotbugs + false ${project.basedir}/../dev-support/spotbugs-exclude.xml diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index ff15bd4fb531..44a77558137c 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-assembly - Apache HBase - Assembly - - Module that does project assembly and that is all that it does. - pom + Apache HBase - Assembly + Module that does project assembly and that is all that it does. true - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - aggregate-licenses - - process - - - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - - maven-assembly-plugin - - - hbase-${project.version} - false - true - posix - - ${assembly.file} - src/main/assembly/client.xml - - - - - maven-dependency-plugin - - - - create-hbase-generated-classpath - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath.txt - jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce - - - - - - create-hbase-generated-classpath-jline - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath_jline.txt - jline - - - - - - create-hbase-generated-classpath-jruby - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath_jruby.txt - jruby-complete - - - - - - - unpack-dependency-notices - prepare-package - - unpack-dependencies - - - pom - true - **\/NOTICE,**\/NOTICE.txt - - - - - - org.codehaus.mojo - exec-maven-plugin - ${exec.maven.version} - - - concat-NOTICE-files - package - - exec - - - env - - bash - -c - cat maven-shared-archive-resources/META-INF/NOTICE \ - `find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt` - - - ${project.build.directory}/NOTICE.aggregate - ${project.build.directory} - - - - - - - @@ -189,7 +47,7 @@ org.apache.hbase hbase-shaded-mapreduce - + org.apache.hbase hbase-it @@ -254,25 +112,25 @@ hbase-external-blockcache - org.apache.hbase - hbase-testing-util + org.apache.hbase + hbase-testing-util - org.apache.hbase - hbase-metrics-api + org.apache.hbase + hbase-metrics-api - org.apache.hbase - hbase-metrics + org.apache.hbase + hbase-metrics org.apache.hbase hbase-protocol-shaded - org.apache.hbase - hbase-resource-bundle - true + org.apache.hbase + hbase-resource-bundle + true org.apache.httpcomponents @@ -386,4 +244,143 @@ compile + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + aggregate-licenses + + process + + + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + + maven-assembly-plugin + + + hbase-${project.version} + false + true + posix + + ${assembly.file} + src/main/assembly/client.xml + + + + + maven-dependency-plugin + + + + create-hbase-generated-classpath + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath.txt + jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce + + + + + + create-hbase-generated-classpath-jline + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath_jline.txt + jline + + + + + + create-hbase-generated-classpath-jruby + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath_jruby.txt + jruby-complete + + + + + + + unpack-dependency-notices + + unpack-dependencies + + prepare-package + + pom + true + **\/NOTICE,**\/NOTICE.txt + + + + + + org.codehaus.mojo + exec-maven-plugin + ${exec.maven.version} + + + concat-NOTICE-files + + exec + + package + + env + + bash + -c + cat maven-shared-archive-resources/META-INF/NOTICE \ + `find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt` + + ${project.build.directory}/NOTICE.aggregate + ${project.build.directory} + + + + + + + diff --git a/hbase-asyncfs/pom.xml b/hbase-asyncfs/pom.xml index 073eec750d33..0544cf9d6b84 100644 --- a/hbase-asyncfs/pom.xml +++ b/hbase-asyncfs/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,33 +30,6 @@ hbase-asyncfs Apache HBase - Asynchronous FileSystem HBase Asynchronous FileSystem Implementation for WAL - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - @@ -169,13 +141,42 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -224,8 +225,7 @@ lifecycle-mapping - - + diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java index 059ca00b02cc..b88b32bdb814 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Interface for asynchronous filesystem output stream. diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java index 5b713196d0b0..4ff903676f09 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java index 5885ea685b32..d3735edd8897 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -145,7 +145,7 @@ private static final class Callback { private long lastAckTimestamp = -1; public Callback(CompletableFuture future, long ackedLength, - final Collection replicas, long packetDataLen) { + final Collection replicas, long packetDataLen) { this.future = future; this.ackedLength = ackedLength; this.packetDataLen = packetDataLen; @@ -154,7 +154,7 @@ public Callback(CompletableFuture future, long ackedLength, this.unfinishedReplicas = Collections.emptySet(); } else { this.unfinishedReplicas = - Collections.newSetFromMap(new ConcurrentHashMap(replicas.size())); + Collections.newSetFromMap(new ConcurrentHashMap(replicas.size())); replicas.stream().map(Channel::id).forEachOrdered(unfinishedReplicas::add); } } @@ -196,7 +196,7 @@ private void completed(Channel channel) { if (c.unfinishedReplicas.remove(channel.id())) { long current = EnvironmentEdgeManager.currentTime(); streamSlowMonitor.checkProcessTimeAndSpeed(datanodeInfoMap.get(channel), c.packetDataLen, - current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size()); + current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size()); c.lastAckTimestamp = current; if (c.unfinishedReplicas.isEmpty()) { // we need to remove first before complete the future. It is possible that after we @@ -284,13 +284,13 @@ public AckHandler(int timeoutMs) { protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception { Status reply = getStatus(ack); if (reply != Status.SUCCESS) { - failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + - block + " from datanode " + ctx.channel().remoteAddress())); + failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + block + + " from datanode " + ctx.channel().remoteAddress())); return; } if (PipelineAck.isRestartOOBStatus(reply)) { - failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " + - block + " from datanode " + ctx.channel().remoteAddress())); + failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " + + block + " from datanode " + ctx.channel().remoteAddress())); return; } if (ack.getSeqno() == HEART_BEAT_SEQNO) { @@ -345,8 +345,8 @@ private void setupReceiver(int timeoutMs) { } } - FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs, - DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId, + FanOutOneBlockAsyncDFSOutput(Configuration conf, DistributedFileSystem dfs, DFSClient client, + ClientProtocol namenode, String clientName, String src, long fileId, LocatedBlock locatedBlock, Encryptor encryptor, Map datanodeInfoMap, DataChecksum summer, ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) { this.conf = conf; @@ -418,8 +418,8 @@ private void flushBuffer(CompletableFuture future, ByteBuf dataBuf, ByteBuf headerBuf = alloc.buffer(headerLen); header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); headerBuf.writerIndex(headerLen); - Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen, - datanodeInfoMap.keySet(), dataLen); + Callback c = + new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeInfoMap.keySet(), dataLen); waitingAckQueue.addLast(c); // recheck again after we pushed the callback to queue if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) { @@ -429,7 +429,7 @@ private void flushBuffer(CompletableFuture future, ByteBuf dataBuf, return; } // TODO: we should perhaps measure time taken per DN here; - // we could collect statistics per DN, and/or exclude bad nodes in createOutput. + // we could collect statistics per DN, and/or exclude bad nodes in createOutput. datanodeInfoMap.keySet().forEach(ch -> { ch.write(headerBuf.retainedDuplicate()); ch.write(checksumBuf.retainedDuplicate()); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java index 7c62d67c6cee..fac584ac6871 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -197,13 +197,13 @@ public void end(DFSClient client, long inodeId) { private static FileCreator createFileCreator3_3() throws NoSuchMethodException { Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class, - String.class, EnumSetWritable.class, boolean.class, short.class, long.class, - CryptoProtocolVersion[].class, String.class, String.class); + String.class, EnumSetWritable.class, boolean.class, short.class, long.class, + CryptoProtocolVersion[].class, String.class, String.class); return (instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions) -> { return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, - createParent, replication, blockSize, supportedVersions, null, null); + createParent, replication, blockSize, supportedVersions, null, null); }; } @@ -249,9 +249,9 @@ public boolean progress() { LEASE_MANAGER = createLeaseManager(); FILE_CREATOR = createFileCreator(); } catch (Exception e) { - String msg = "Couldn't properly initialize access to HDFS internals. Please " + - "update your WAL Provider to not make use of the 'asyncfs' provider. See " + - "HBASE-16110 for more information."; + String msg = "Couldn't properly initialize access to HDFS internals. Please " + + "update your WAL Provider to not make use of the 'asyncfs' provider. See " + + "HBASE-16110 for more information."; LOG.error(msg, e); throw new Error(msg, e); } @@ -298,11 +298,11 @@ protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink(); if (resp.getStatus() != Status.SUCCESS) { if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException("Got access token error" + ", status message " + - resp.getMessage() + ", " + logInfo); + throw new InvalidBlockTokenException("Got access token error" + ", status message " + + resp.getMessage() + ", " + logInfo); } else { - throw new IOException("Got error" + ", status=" + resp.getStatus().name() + - ", status message " + resp.getMessage() + ", " + logInfo); + throw new IOException("Got error" + ", status=" + resp.getStatus().name() + + ", status message " + resp.getMessage() + ", " + logInfo); } } // success @@ -344,11 +344,11 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E private static void requestWriteBlock(Channel channel, StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException { - OpWriteBlockProto proto = - writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build(); + OpWriteBlockProto proto = writeBlockProtoBuilder + .setStorageType(PBHelperClient.convertStorageType(storageType)).build(); int protoLen = proto.getSerializedSize(); ByteBuf buffer = - channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen); + channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen); buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); buffer.writeByte(Op.WRITE_BLOCK.code); proto.writeDelimitedTo(new ByteBufOutputStream(buffer)); @@ -388,9 +388,9 @@ private static List> connectToDataNodes(Configuration conf, DFSC ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); blockCopy.setNumBytes(locatedBlock.getBlockSize()); ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder() - .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelperClient.convert(blockCopy)) - .setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))) - .setClientName(clientName).build(); + .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelperClient.convert(blockCopy)) + .setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))) + .setClientName(clientName).build(); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer); OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder() .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())) @@ -464,10 +464,10 @@ private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem d DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES); ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager(); Set toExcludeNodes = - new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet()); + new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet()); for (int retry = 0;; retry++) { LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, - toExcludeNodes, retry); + toExcludeNodes, retry); HdfsFileStatus stat; try { stat = FILE_CREATOR.create(namenode, src, @@ -505,8 +505,8 @@ private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem d } Encryptor encryptor = createEncryptor(conf, stat, client); FanOutOneBlockAsyncDFSOutput output = - new FanOutOneBlockAsyncDFSOutput(conf, dfs, client, namenode, clientName, src, - stat.getFileId(), locatedBlock, encryptor, datanodes, summer, ALLOC, monitor); + new FanOutOneBlockAsyncDFSOutput(conf, dfs, client, namenode, clientName, src, + stat.getFileId(), locatedBlock, encryptor, datanodes, summer, ALLOC, monitor); succ = true; return output; } catch (RemoteException e) { diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java index 090b9b4a63f1..112a88a45e50 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -179,7 +179,7 @@ public AtomicBoolean getFallbackToSimpleAuth(SaslDataTransferClient saslClient) private static TransparentCryptoHelper createTransparentCryptoHelperWithoutHDFS12396() throws NoSuchMethodException { Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class - .getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class); + .getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class); decryptEncryptedDataEncryptionKeyMethod.setAccessible(true); return new TransparentCryptoHelper() { @@ -188,7 +188,7 @@ public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client) throws IOException { try { KeyVersion decryptedKey = - (KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo); + (KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo); CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf, feInfo.getCipherSuite()); Encryptor encryptor = cryptoCodec.createEncryptor(); encryptor.init(decryptedKey.getMaterial(), feInfo.getIV()); @@ -218,7 +218,7 @@ public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client) throws IOException { try { KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod - .invoke(null, feInfo, client.getKeyProvider()); + .invoke(null, feInfo, client.getKeyProvider()); CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf, feInfo.getCipherSuite()); Encryptor encryptor = cryptoCodec.createEncryptor(); encryptor.init(decryptedKey.getMaterial(), feInfo.getIV()); @@ -240,8 +240,9 @@ private static TransparentCryptoHelper createTransparentCryptoHelper() try { return createTransparentCryptoHelperWithoutHDFS12396(); } catch (NoSuchMethodException e) { - LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," + - " should be hadoop version with HDFS-12396", e); + LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," + + " should be hadoop version with HDFS-12396", + e); } return createTransparentCryptoHelperWithHDFS12396(); } @@ -324,8 +325,8 @@ private static final class SaslNegotiateHandler extends ChannelDuplexHandler { private int step = 0; public SaslNegotiateHandler(Configuration conf, String username, char[] password, - Map saslProps, int timeoutMs, Promise promise, - DFSClient dfsClient) throws SaslException { + Map saslProps, int timeoutMs, Promise promise, DFSClient dfsClient) + throws SaslException { this.conf = conf; this.saslProps = saslProps; this.saslClient = Sasl.createSaslClient(new String[] { MECHANISM }, username, PROTOCOL, @@ -355,8 +356,8 @@ private List getCipherOptions() throws IOException { } /** - * The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. - * After Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*. + * The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. After + * Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*. * Use Reflection to check which ones to use. */ private static class BuilderPayloadSetter { @@ -366,13 +367,12 @@ private static class BuilderPayloadSetter { /** * Create a ByteString from byte array without copying (wrap), and then set it as the payload * for the builder. - * * @param builder builder for HDFS DataTransferEncryptorMessage. * @param payload byte array of payload. * @throws IOException */ - static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, byte[] payload) - throws IOException { + static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, + byte[] payload) throws IOException { Object byteStringObject; try { // byteStringObject = new LiteralByteString(payload); @@ -396,18 +396,19 @@ static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, try { // See if it can load the relocated ByteString, which comes from hadoop-thirdparty. byteStringClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString"); - LOG.debug("Found relocated ByteString class from hadoop-thirdparty." + - " Assuming this is Hadoop 3.3.0+."); + LOG.debug("Found relocated ByteString class from hadoop-thirdparty." + + " Assuming this is Hadoop 3.3.0+."); } catch (ClassNotFoundException e) { - LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." + - " Assuming this is below Hadoop 3.3.0", e); + LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." + + " Assuming this is below Hadoop 3.3.0", + e); } // LiteralByteString is a package private class in protobuf. Make it accessible. Class literalByteStringClass; try { - literalByteStringClass = Class.forName( - "org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString"); + literalByteStringClass = + Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString"); LOG.debug("Shaded LiteralByteString from hadoop-thirdparty is found."); } catch (ClassNotFoundException e) { try { @@ -805,8 +806,7 @@ static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo d } doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey), encryptionKeyToPassword(encryptionKey.encryptionKey), - createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, - client); + createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, client); } else if (!UserGroupInformation.isSecurityEnabled()) { if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr @@ -832,7 +832,7 @@ static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo d } doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken), buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise, - client); + client); } else { // It's a secured cluster using non-privileged ports, but no SASL. The only way this can // happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java index 3be9a2e49c1b..65a43bacdfe8 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,33 +17,29 @@ */ package org.apache.hadoop.hbase.io.asyncfs; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.List; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufUtil; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageDecoder; import org.apache.hbase.thirdparty.io.netty.util.internal.ObjectUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.List; /** - * Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. - * The Netty's ProtobufDecode supports unshaded protobuf messages (com.google.protobuf). - * - * Hadoop 3.3.0 and above relocates protobuf classes to a shaded jar (hadoop-thirdparty), and - * so we must use reflection to detect which one (relocated or not) to use. - * - * Do not use this to process HBase's shaded protobuf messages. This is meant to process the - * protobuf messages in HDFS for the asyncfs use case. - * */ + * Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. The Netty's ProtobufDecode + * supports unshaded protobuf messages (com.google.protobuf). Hadoop 3.3.0 and above relocates + * protobuf classes to a shaded jar (hadoop-thirdparty), and so we must use reflection to detect + * which one (relocated or not) to use. Do not use this to process HBase's shaded protobuf messages. + * This is meant to process the protobuf messages in HDFS for the asyncfs use case. + */ @InterfaceAudience.Private public class ProtobufDecoder extends MessageToMessageDecoder { - private static final Logger LOG = - LoggerFactory.getLogger(ProtobufDecoder.class); + private static final Logger LOG = LoggerFactory.getLogger(ProtobufDecoder.class); private static Class protobufMessageLiteClass = null; private static Class protobufMessageLiteBuilderClass = null; @@ -60,23 +56,22 @@ public class ProtobufDecoder extends MessageToMessageDecoder { private Object parser; private Object builder; - public ProtobufDecoder(Object prototype) { try { - Method getDefaultInstanceForTypeMethod = protobufMessageLiteClass.getMethod( - "getDefaultInstanceForType"); - Object prototype1 = getDefaultInstanceForTypeMethod - .invoke(ObjectUtil.checkNotNull(prototype, "prototype")); + Method getDefaultInstanceForTypeMethod = + protobufMessageLiteClass.getMethod("getDefaultInstanceForType"); + Object prototype1 = + getDefaultInstanceForTypeMethod.invoke(ObjectUtil.checkNotNull(prototype, "prototype")); // parser = prototype.getParserForType() parser = getParserForTypeMethod.invoke(prototype1); - parseFromMethod = parser.getClass().getMethod( - "parseFrom", byte[].class, int.class, int.class); + parseFromMethod = + parser.getClass().getMethod("parseFrom", byte[].class, int.class, int.class); // builder = prototype.newBuilderForType(); builder = newBuilderForTypeMethod.invoke(prototype1); - mergeFromMethod = builder.getClass().getMethod( - "mergeFrom", byte[].class, int.class, int.class); + mergeFromMethod = + builder.getClass().getMethod("mergeFrom", byte[].class, int.class, int.class); // All protobuf message builders inherits from MessageLite.Builder buildMethod = protobufMessageLiteBuilderClass.getDeclaredMethod("build"); @@ -88,8 +83,7 @@ public ProtobufDecoder(Object prototype) { } } - protected void decode( - ChannelHandlerContext ctx, ByteBuf msg, List out) throws Exception { + protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List out) throws Exception { int length = msg.readableBytes(); byte[] array; int offset; @@ -122,8 +116,8 @@ protected void decode( try { protobufMessageLiteClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite"); - protobufMessageLiteBuilderClass = Class.forName( - "org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder"); + protobufMessageLiteBuilderClass = + Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder"); LOG.debug("Hadoop 3.3 and above shades protobuf."); } catch (ClassNotFoundException e) { LOG.debug("Hadoop 3.2 and below use unshaded protobuf.", e); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java index 2f652440e38e..d5dbfb02abc2 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java index c7cc1fcfcb4b..4f5f05d94276 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; - import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; @@ -95,8 +94,8 @@ private void flush0(CompletableFuture future, ByteArrayOutputStream buffer } long pos = out.getPos(); /** - * This flush0 method could only be called by single thread, so here we could - * safely overwrite without any synchronization. + * This flush0 method could only be called by single thread, so here we could safely overwrite + * without any synchronization. */ this.syncedLength = pos; future.complete(pos); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java index 80748cad609a..7006fdf6409e 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java @@ -42,32 +42,31 @@ public class ExcludeDatanodeManager implements ConfigurationObserver { * Configure for the max count the excluded datanodes. */ public static final String WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY = - "hbase.regionserver.async.wal.max.exclude.datanode.count"; + "hbase.regionserver.async.wal.max.exclude.datanode.count"; public static final int DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT = 3; /** * Configure for the TTL time of the datanodes excluded */ public static final String WAL_EXCLUDE_DATANODE_TTL_KEY = - "hbase.regionserver.async.wal.exclude.datanode.info.ttl.hour"; + "hbase.regionserver.async.wal.exclude.datanode.info.ttl.hour"; public static final int DEFAULT_WAL_EXCLUDE_DATANODE_TTL = 6; // 6 hours private volatile Cache excludeDNsCache; private final int maxExcludeDNCount; private final Configuration conf; // This is a map of providerId->StreamSlowMonitor - private final Map streamSlowMonitors = - new ConcurrentHashMap<>(1); + private final Map streamSlowMonitors = new ConcurrentHashMap<>(1); public ExcludeDatanodeManager(Configuration conf) { this.conf = conf; this.maxExcludeDNCount = conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT); this.excludeDNsCache = CacheBuilder.newBuilder() - .expireAfterWrite(this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, - DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS) - .maximumSize(this.maxExcludeDNCount) - .build(); + .expireAfterWrite( + this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) + .maximumSize(this.maxExcludeDNCount).build(); } /** @@ -85,15 +84,15 @@ public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) { datanodeInfo, cause, excludeDNsCache.size()); return true; } - LOG.debug("Try add datanode {} to exclude cache by [{}] failed, " - + "current exclude DNs are {}", datanodeInfo, cause, getExcludeDNs().keySet()); + LOG.debug( + "Try add datanode {} to exclude cache by [{}] failed, " + "current exclude DNs are {}", + datanodeInfo, cause, getExcludeDNs().keySet()); return false; } public StreamSlowMonitor getStreamSlowMonitor(String name) { String key = name == null || name.isEmpty() ? "defaultMonitorName" : name; - return streamSlowMonitors - .computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this)); + return streamSlowMonitors.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this)); } public Map getExcludeDNs() { @@ -105,10 +104,12 @@ public void onConfigurationChange(Configuration conf) { for (StreamSlowMonitor monitor : streamSlowMonitors.values()) { monitor.onConfigurationChange(conf); } - this.excludeDNsCache = CacheBuilder.newBuilder().expireAfterWrite( - this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), - TimeUnit.HOURS).maximumSize(this.conf - .getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) - .build(); + this.excludeDNsCache = CacheBuilder.newBuilder() + .expireAfterWrite( + this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) + .maximumSize(this.conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, + DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) + .build(); } } diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java index 7ee04f8eebd2..01755353095e 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java @@ -38,47 +38,44 @@ import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; /** - * Class for monitor the wal file flush performance. - * Each active wal file has a StreamSlowMonitor. + * Class for monitor the wal file flush performance. Each active wal file has a StreamSlowMonitor. */ @InterfaceAudience.Private public class StreamSlowMonitor implements ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(StreamSlowMonitor.class); /** - * Configure for the min count for a datanode detected slow. - * If a datanode is detected slow times up to this count, then it will be added to the exclude - * datanode cache by {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} - * of this regionsever. + * Configure for the min count for a datanode detected slow. If a datanode is detected slow times + * up to this count, then it will be added to the exclude datanode cache by + * {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} of this regionsever. */ private static final String WAL_SLOW_DETECT_MIN_COUNT_KEY = - "hbase.regionserver.async.wal.min.slow.detect.count"; + "hbase.regionserver.async.wal.min.slow.detect.count"; private static final int DEFAULT_WAL_SLOW_DETECT_MIN_COUNT = 3; /** * Configure for the TTL of the data that a datanode detected slow. */ private static final String WAL_SLOW_DETECT_DATA_TTL_KEY = - "hbase.regionserver.async.wal.slow.detect.data.ttl.ms"; + "hbase.regionserver.async.wal.slow.detect.data.ttl.ms"; private static final long DEFAULT_WAL_SLOW_DETECT_DATA_TTL = 10 * 60 * 1000; // 10min in ms /** - * Configure for the speed check of packet min length. - * For packets whose data length smaller than this value, check slow by processing time. - * While for packets whose data length larger than this value, check slow by flushing speed. + * Configure for the speed check of packet min length. For packets whose data length smaller than + * this value, check slow by processing time. While for packets whose data length larger than this + * value, check slow by flushing speed. */ private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY = - "hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min"; - private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = - 64 * 1024; //64KB + "hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min"; + private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024; // 64KB /** - * Configure for the slow packet process time, a duration from send to ACK. - * The processing time check is for packets that data length smaller than + * Configure for the slow packet process time, a duration from send to ACK. The processing time + * check is for packets that data length smaller than * {@link StreamSlowMonitor#DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY} */ public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY = - "hbase.regionserver.async.wal.datanode.slow.packet.process.time.millis"; + "hbase.regionserver.async.wal.datanode.slow.packet.process.time.millis"; private static final long DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME = 6000; // 6s in ms /** @@ -88,7 +85,7 @@ public class StreamSlowMonitor implements ConfigurationObserver { * 64KB should be processed in less than 3.2s. */ private static final String DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY = - "hbase.regionserver.async.wal.datanode.slow.packet.speed.min.kbs"; + "hbase.regionserver.async.wal.datanode.slow.packet.speed.min.kbs"; private static final double DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED = 20; // 20KB/s private final String name; @@ -108,16 +105,17 @@ public StreamSlowMonitor(Configuration conf, String name, this.name = name; this.excludeDatanodeManager = excludeDatanodeManager; this.datanodeSlowDataQueue = CacheBuilder.newBuilder() - .maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, - DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) - .expireAfterWrite(conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, - DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS) - .build(new CacheLoader>() { - @Override - public Deque load(DatanodeInfo key) throws Exception { - return new ConcurrentLinkedDeque<>(); - } - }); + .maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, + DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) + .expireAfterWrite( + conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) + .build(new CacheLoader>() { + @Override + public Deque load(DatanodeInfo key) throws Exception { + return new ConcurrentLinkedDeque<>(); + } + }); LOG.info("New stream slow monitor {}", this.name); } @@ -140,17 +138,18 @@ public void checkProcessTimeAndSpeed(DatanodeInfo datanodeInfo, long packetDataL // 1. For small packet, we just have a simple time limit, without considering // the size of the packet. // 2. For large packet, we will calculate the speed, and check if the speed is too slow. - boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) || ( - packetDataLen > minLengthForSpeedCheck - && (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs); + boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) + || (packetDataLen > minLengthForSpeedCheck + && (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs); if (slow) { // Check if large diff ack timestamp between replicas, // should try to avoid misjudgments that caused by GC STW. - if ((lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) || ( - lastAckTimestamp <= 0 && unfinished == 0)) { - LOG.info("Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, " - + "lastAckTimestamp={}, monitor name: {}", datanodeInfo, packetDataLen, processTimeMs, - unfinished, lastAckTimestamp, this.name); + if ((lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) + || (lastAckTimestamp <= 0 && unfinished == 0)) { + LOG.info( + "Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, " + + "lastAckTimestamp={}, monitor name: {}", + datanodeInfo, packetDataLen, processTimeMs, unfinished, lastAckTimestamp, this.name); if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) { excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack"); } @@ -167,7 +166,7 @@ private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long Deque slowDNQueue = datanodeSlowDataQueue.getUnchecked(datanodeInfo); long current = EnvironmentEdgeManager.currentTime(); while (!slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl - || slowDNQueue.size() >= minSlowDetectCount)) { + || slowDNQueue.size() >= minSlowDetectCount)) { slowDNQueue.removeFirst(); } slowDNQueue.addLast(new PacketAckData(dataLength, processTime)); @@ -175,14 +174,14 @@ private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long } private void setConf(Configuration conf) { - this.minSlowDetectCount = conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, - DEFAULT_WAL_SLOW_DETECT_MIN_COUNT); + this.minSlowDetectCount = + conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, DEFAULT_WAL_SLOW_DETECT_MIN_COUNT); this.slowDataTtl = conf.getLong(WAL_SLOW_DETECT_DATA_TTL_KEY, DEFAULT_WAL_SLOW_DETECT_DATA_TTL); this.slowPacketAckMs = conf.getLong(DATANODE_SLOW_PACKET_PROCESS_TIME_KEY, - DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME); - this.minLengthForSpeedCheck = conf.getLong( - DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY, - DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH); + DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME); + this.minLengthForSpeedCheck = + conf.getLong(DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY, + DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH); this.minPacketFlushSpeedKBs = conf.getDouble(DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY, DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED); } diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java index 91c003cb6dd0..0f80f874a319 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Similar interface as {@link org.apache.hadoop.util.Progressable} but returns - * a boolean to support canceling the operation. + * Similar interface as {@link org.apache.hadoop.util.Progressable} but returns a boolean to support + * canceling the operation. *

* Used for doing updating of OPENING znode during log replay on region open. */ @@ -30,8 +29,8 @@ public interface CancelableProgressable { /** - * Report progress. Returns true if operations should continue, false if the - * operation should be canceled and rolled back. + * Report progress. Returns true if operations should continue, false if the operation should be + * canceled and rolled back. * @return whether to continue (true) or cancel (false) the operation */ boolean progress(); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java index 9c3da1658c70..5a2d72ddc958 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ private RecoverLeaseFSUtils() { } public static void recoverFileLease(FileSystem fs, Path p, Configuration conf) - throws IOException { + throws IOException { recoverFileLease(fs, p, conf, null); } @@ -51,7 +51,7 @@ public static void recoverFileLease(FileSystem fs, Path p, Configuration conf) * Recover the lease from HDFS, retrying multiple times. */ public static void recoverFileLease(FileSystem fs, Path p, Configuration conf, - CancelableProgressable reporter) throws IOException { + CancelableProgressable reporter) throws IOException { if (fs instanceof FilterFileSystem) { fs = ((FilterFileSystem) fs).getRawFileSystem(); } @@ -82,7 +82,7 @@ public static void recoverFileLease(FileSystem fs, Path p, Configuration conf, * second and we might be able to exit early. */ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p, - final Configuration conf, final CancelableProgressable reporter) throws IOException { + final Configuration conf, final CancelableProgressable reporter) throws IOException { LOG.info("Recover lease on dfs file " + p); long startWaiting = EnvironmentEdgeManager.currentTime(); // Default is 15 minutes. It's huge, but the idea is that if we have a major issue, HDFS @@ -120,13 +120,13 @@ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, fina // Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check // isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though. long localStartWaiting = EnvironmentEdgeManager.currentTime(); - while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPauseBase * - nbAttempt) { + while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPauseBase + * nbAttempt) { Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000)); if (findIsFileClosedMeth) { try { isFileClosedMeth = - dfs.getClass().getMethod("isFileClosed", new Class[] { Path.class }); + dfs.getClass().getMethod("isFileClosed", new Class[] { Path.class }); } catch (NoSuchMethodException nsme) { LOG.debug("isFileClosed not available"); } finally { @@ -150,12 +150,12 @@ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, fina } private static boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout, - final int nbAttempt, final Path p, final long startWaiting) { + final int nbAttempt, final Path p, final long startWaiting) { if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) { - LOG.warn("Cannot recoverLease after trying for " + - conf.getInt("hbase.lease.recovery.timeout", 900000) + - "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + - getLogMessageDetail(nbAttempt, p, startWaiting)); + LOG.warn("Cannot recoverLease after trying for " + + conf.getInt("hbase.lease.recovery.timeout", 900000) + + "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + + getLogMessageDetail(nbAttempt, p, startWaiting)); return true; } return false; @@ -166,12 +166,12 @@ private static boolean checkIfTimedout(final Configuration conf, final long reco * @return True if dfs#recoverLease came by true. */ private static boolean recoverLease(final DistributedFileSystem dfs, final int nbAttempt, - final Path p, final long startWaiting) throws FileNotFoundException { + final Path p, final long startWaiting) throws FileNotFoundException { boolean recovered = false; try { recovered = dfs.recoverLease(p); - LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") + - getLogMessageDetail(nbAttempt, p, startWaiting)); + LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") + + getLogMessageDetail(nbAttempt, p, startWaiting)); } catch (IOException e) { if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) { // This exception comes out instead of FNFE, fix it @@ -188,9 +188,9 @@ private static boolean recoverLease(final DistributedFileSystem dfs, final int n * @return Detail to append to any log message around lease recovering. */ private static String getLogMessageDetail(final int nbAttempt, final Path p, - final long startWaiting) { - return "attempt=" + nbAttempt + " on file=" + p + " after " + - (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms"; + final long startWaiting) { + return "attempt=" + nbAttempt + " on file=" + p + " after " + + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms"; } /** @@ -198,7 +198,7 @@ private static String getLogMessageDetail(final int nbAttempt, final Path p, * @return True if file is closed. */ private static boolean isFileClosed(final DistributedFileSystem dfs, final Method m, - final Path p) { + final Path p) { try { return (Boolean) m.invoke(dfs, p); } catch (SecurityException e) { @@ -210,7 +210,7 @@ private static boolean isFileClosed(final DistributedFileSystem dfs, final Metho } private static void checkIfCancelled(final CancelableProgressable reporter) - throws InterruptedIOException { + throws InterruptedIOException { if (reporter == null) { return; } diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java index 51a4aa0b89c0..d8d667a2c697 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ protected static void setupClusterTestDir() { // Using randomUUID ensures that multiple clusters can be launched by // a same test, if it stops & starts them Path testDir = - UTIL.getDataTestDir("cluster_" + HBaseCommonTestingUtil.getRandomUUID().toString()); + UTIL.getDataTestDir("cluster_" + HBaseCommonTestingUtil.getRandomUUID().toString()); CLUSTER_TEST_DIR = new File(testDir.toString()).getAbsoluteFile(); // Have it cleaned up on exit boolean b = deleteOnExit(); diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java index a3da52ef335f..cdf09dda52b9 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -35,28 +36,24 @@ public class TestExcludeDatanodeManager { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExcludeDatanodeManager.class); + HBaseClassTestRule.forClass(TestExcludeDatanodeManager.class); @Test public void testExcludeSlowDNBySpeed() { Configuration conf = HBaseConfiguration.create(); ExcludeDatanodeManager excludeDatanodeManager = new ExcludeDatanodeManager(conf); StreamSlowMonitor streamSlowDNsMonitor = - excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); + excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - DatanodeInfo datanodeInfo = - new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1") - .setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333) - .setIpcPort(444).setNetworkLocation("location1").build(); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); + DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0") + .setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222) + .setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build(); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo)); } @@ -66,21 +63,17 @@ public void testExcludeSlowDNByProcessTime() { Configuration conf = HBaseConfiguration.create(); ExcludeDatanodeManager excludeDatanodeManager = new ExcludeDatanodeManager(conf); StreamSlowMonitor streamSlowDNsMonitor = - excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); + excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - DatanodeInfo datanodeInfo = - new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1") - .setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333) - .setIpcPort(444).setNetworkLocation("location1").build(); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); + DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0") + .setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222) + .setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build(); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo)); } diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java index d363282921c9..0cd246b27a7d 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,6 +57,7 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; @@ -68,7 +69,7 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutput.class); + HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutput.class); private static final Logger LOG = LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutput.class); private static DistributedFileSystem FS; @@ -102,7 +103,7 @@ public static void tearDown() throws IOException, InterruptedException { private static final Random RNG = new Random(); // This test depends on Random#setSeed static void writeAndVerify(FileSystem fs, Path f, AsyncFSOutput out) - throws IOException, InterruptedException, ExecutionException { + throws IOException, InterruptedException, ExecutionException { List> futures = new ArrayList<>(); byte[] b = new byte[10]; // test pipelined flush @@ -199,12 +200,12 @@ public void testCreateParentFailed() throws IOException { @Test public void testConnectToDatanodeFailed() - throws IOException, ClassNotFoundException, NoSuchMethodException, IllegalAccessException, - InvocationTargetException, InterruptedException, NoSuchFieldException { + throws IOException, ClassNotFoundException, NoSuchMethodException, IllegalAccessException, + InvocationTargetException, InterruptedException, NoSuchFieldException { Field xceiverServerDaemonField = DataNode.class.getDeclaredField("dataXceiverServer"); xceiverServerDaemonField.setAccessible(true); Class xceiverServerClass = - Class.forName("org.apache.hadoop.hdfs.server.datanode.DataXceiverServer"); + Class.forName("org.apache.hadoop.hdfs.server.datanode.DataXceiverServer"); Method numPeersMethod = xceiverServerClass.getDeclaredMethod("getNumPeers"); numPeersMethod.setAccessible(true); // make one datanode broken @@ -223,12 +224,12 @@ public void testConnectToDatanodeFailed() @Test public void testExcludeFailedConnectToDatanode() - throws IOException, ClassNotFoundException, NoSuchMethodException, IllegalAccessException, - InvocationTargetException, InterruptedException, NoSuchFieldException { + throws IOException, ClassNotFoundException, NoSuchMethodException, IllegalAccessException, + InvocationTargetException, InterruptedException, NoSuchFieldException { Field xceiverServerDaemonField = DataNode.class.getDeclaredField("dataXceiverServer"); xceiverServerDaemonField.setAccessible(true); Class xceiverServerClass = - Class.forName("org.apache.hadoop.hdfs.server.datanode.DataXceiverServer"); + Class.forName("org.apache.hadoop.hdfs.server.datanode.DataXceiverServer"); Method numPeersMethod = xceiverServerClass.getDeclaredMethod("getNumPeers"); numPeersMethod.setAccessible(true); // make one datanode broken @@ -236,13 +237,13 @@ public void testExcludeFailedConnectToDatanode() Path f = new Path("/test"); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); ExcludeDatanodeManager excludeDatanodeManager = - new ExcludeDatanodeManager(HBaseConfiguration.create()); + new ExcludeDatanodeManager(HBaseConfiguration.create()); StreamSlowMonitor streamSlowDNsMonitor = - excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); + excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - try (FanOutOneBlockAsyncDFSOutput output = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, - f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, - CHANNEL_CLASS, streamSlowDNsMonitor)) { + try (FanOutOneBlockAsyncDFSOutput output = + FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, + FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, streamSlowDNsMonitor)) { // should exclude the dead dn when retry so here we only have 2 DNs in pipeline assertEquals(2, output.getPipeline().length); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java index 8ee838449e14..301fefd185b7 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,6 +47,7 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java index eff8d8a86b7a..9480358557f0 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ public class TestLocalAsyncOutput { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLocalAsyncOutput.class); + HBaseClassTestRule.forClass(TestLocalAsyncOutput.class); private static EventLoopGroup GROUP = new NioEventLoopGroup(); @@ -61,7 +61,7 @@ public static void tearDownAfterClass() throws IOException { @Test public void test() throws IOException, InterruptedException, ExecutionException, - CommonFSUtils.StreamLacksCapabilityException { + CommonFSUtils.StreamLacksCapabilityException { Path f = new Path(TEST_UTIL.getDataTestDir(), "test"); FileSystem fs = FileSystem.getLocal(TEST_UTIL.getConfiguration()); AsyncFSOutput out = AsyncFSOutputHelper.createOutput(fs, f, false, true, diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java index 592598c8bb44..84ebe3786770 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestOverwriteFileUnderConstruction extends AsyncFSTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestOverwriteFileUnderConstruction.class); + HBaseClassTestRule.forClass(TestOverwriteFileUnderConstruction.class); private static FileSystem FS; diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java index e7fce27d60c9..39352cdeed45 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,11 +74,11 @@ public class TestSaslFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase { private static final Logger LOG = - LoggerFactory.getLogger(TestSaslFanOutOneBlockAsyncDFSOutput.class); + LoggerFactory.getLogger(TestSaslFanOutOneBlockAsyncDFSOutput.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSaslFanOutOneBlockAsyncDFSOutput.class); + HBaseClassTestRule.forClass(TestSaslFanOutOneBlockAsyncDFSOutput.class); private static DistributedFileSystem FS; @@ -131,7 +131,7 @@ public static Iterable data() { private static void setUpKeyProvider(Configuration conf) throws Exception { URI keyProviderUri = - new URI("jceks://file" + UTIL.getDataTestDir("test.jks").toUri().toString()); + new URI("jceks://file" + UTIL.getDataTestDir("test.jks").toUri().toString()); conf.set("dfs.encryption.key.provider.uri", keyProviderUri.toString()); KeyProvider keyProvider = KeyProviderFactory.get(keyProviderUri, conf); keyProvider.createKey(TEST_KEY_NAME, KeyProvider.options(conf)); diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java index 55ef0b72b527..f9ea781d11da 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java index a91c95ac4dbc..20ae002e6eb9 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java @@ -115,7 +115,7 @@ public static Configuration getSecuredConfiguration() { * @param spnegoPrincipal SPNEGO principal used by NN web UI. */ public static void setSecuredConfiguration(Configuration conf, String servicePrincipal, - String spnegoPrincipal) { + String spnegoPrincipal) { setPrincipalForTesting(servicePrincipal); setSecuredConfiguration(conf); setSecuredHadoopConfiguration(conf, spnegoPrincipal); @@ -131,7 +131,7 @@ public static void setSecuredConfiguration(Configuration conf) { } private static void setSecuredHadoopConfiguration(Configuration conf, - String spnegoServerPrincipal) { + String spnegoServerPrincipal) { String serverPrincipal = System.getProperty(KRB_PRINCIPAL); String keytabFilePath = System.getProperty(KRB_KEYTAB_FILE); // HDFS @@ -160,7 +160,7 @@ private static void setSecuredHadoopConfiguration(Configuration conf, * @throws Exception if unable to set up SSL configuration */ public static void setSSLConfiguration(HBaseCommonTestingUtil utility, Class clazz) - throws Exception { + throws Exception { Configuration conf = utility.getConfiguration(); conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); @@ -173,19 +173,19 @@ public static void setSSLConfiguration(HBaseCommonTestingUtil utility, Class } public static UserGroupInformation loginAndReturnUGI(Configuration conf, String username) - throws IOException { + throws IOException { String hostname = InetAddress.getLocalHost().getHostName(); String keyTabFileConfKey = "hbase." + username + ".keytab.file"; String keyTabFileLocation = conf.get(keyTabFileConfKey); String principalConfKey = "hbase." + username + ".kerberos.principal"; String principal = org.apache.hadoop.security.SecurityUtil - .getServerPrincipal(conf.get(principalConfKey), hostname); + .getServerPrincipal(conf.get(principalConfKey), hostname); if (keyTabFileLocation == null || principal == null) { LOG.warn( "Principal or key tab file null for : " + principalConfKey + ", " + keyTabFileConfKey); } UserGroupInformation ugi = - UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keyTabFileLocation); + UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keyTabFileLocation); return ugi; } } diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java index 3c58d9c5c780..4723301a80ff 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -41,7 +40,7 @@ public class TestRecoverLeaseFSUtils { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRecoverLeaseFSUtils.class); + HBaseClassTestRule.forClass(TestRecoverLeaseFSUtils.class); private static final HBaseCommonTestingUtil HTU = new HBaseCommonTestingUtil(); static { @@ -64,13 +63,13 @@ public void testRecoverLease() throws IOException { DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class); // Fail four times and pass on the fifth. Mockito.when(dfs.recoverLease(FILE)).thenReturn(false).thenReturn(false).thenReturn(false) - .thenReturn(false).thenReturn(true); + .thenReturn(false).thenReturn(true); RecoverLeaseFSUtils.recoverFileLease(dfs, FILE, HTU.getConfiguration(), reporter); Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE); // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two // invocations will happen pretty fast... the we fall into the longer wait loop). - assertTrue((EnvironmentEdgeManager.currentTime() - startTime) > (3 * - HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); + assertTrue((EnvironmentEdgeManager.currentTime() - startTime) > (3 + * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); } /** diff --git a/hbase-backup/pom.xml b/hbase-backup/pom.xml index 2014710cfa50..93e00df17d4c 100644 --- a/hbase-backup/pom.xml +++ b/hbase-backup/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-backup Apache HBase - Backup Backup for HBase - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - @@ -173,12 +153,34 @@ test + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -213,8 +215,7 @@ lifecycle-mapping - - + diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java index ff1e13f79594..958768f0e460 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.Closeable; import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.BackupSet; import org.apache.yetus.audience.InterfaceAudience; @@ -30,8 +28,8 @@ * The administrative API for HBase Backup. Construct an instance and call {@link #close()} * afterwards. *

- * BackupAdmin can be used to create backups, restore data from backups and for other - * backup-related operations. + * BackupAdmin can be used to create backups, restore data from backups and for other backup-related + * operations. * @since 2.0 */ @InterfaceAudience.Private @@ -71,9 +69,8 @@ public interface BackupAdmin extends Closeable { /** * Merge backup images command - * @param backupIds array of backup ids of images to be merged - * The resulting backup image will have the same backup id as the most - * recent image from a list of images to be merged + * @param backupIds array of backup ids of images to be merged The resulting backup image will + * have the same backup id as the most recent image from a list of images to be merged * @throws IOException exception */ void mergeBackups(String[] backupIds) throws IOException; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java index e3abb6039970..e3f73aeeb966 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,13 +18,11 @@ package org.apache.hadoop.hbase.backup; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient; import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient; import org.apache.hadoop.hbase.backup.impl.TableBackupClient; import org.apache.hadoop.hbase.client.Connection; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -33,7 +31,7 @@ private BackupClientFactory() { } public static TableBackupClient create(Connection conn, String backupId, BackupRequest request) - throws IOException { + throws IOException { Configuration conf = conn.getConfiguration(); try { String clsName = conf.get(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java index f5e213716612..82e9d2830cc5 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.IOException; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.backup.impl.BackupManager; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java index 7889f6cf7b3f..547a39c8d623 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,9 +58,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; /** - * * Command-line entry point for backup operation - * */ @InterfaceAudience.Private public class BackupDriver extends AbstractHBaseTool { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java index 0550f9bc1473..619cecaeaaac 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; @@ -54,7 +53,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor private Connection connection; private long prevReadFromBackupTbl = 0, // timestamp of most recent read from backup:system table secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read from backup:system table - //used by unit test to skip reading backup:system + // used by unit test to skip reading backup:system private boolean checkForFullyBackedUpTables = true; private List fullyBackedUpTables = null; @@ -79,8 +78,7 @@ private Set loadHFileRefs(List tableList) throws IOException connection = ConnectionFactory.createConnection(conf); } try (BackupSystemTable tbl = new BackupSystemTable(connection)) { - Map>[] res = - tbl.readBulkLoadedFiles(null, tableList); + Map>[] res = tbl.readBulkLoadedFiles(null, tableList); secondPrevReadFromBackupTbl = prevReadFromBackupTbl; prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime(); return getFilenameFromBulkLoad(res); @@ -91,6 +89,7 @@ private Set loadHFileRefs(List tableList) throws IOException void setCheckForFullyBackedUpTables(boolean b) { checkForFullyBackedUpTables = b; } + @Override public Iterable getDeletableFiles(Iterable files) { if (conf == null) { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java index d8a6940362a5..84c1f3a2dbc0 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.IOException; @@ -35,6 +34,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder; @@ -137,8 +137,8 @@ public enum BackupPhase { private Map> tableSetTimestampMap; /** - * Previous Region server log timestamps for table set after distributed log roll key - - * table name, value - map of RegionServer hostname -> last log rolled timestamp + * Previous Region server log timestamps for table set after distributed log roll key - table + * name, value - map of RegionServer hostname -> last log rolled timestamp */ private Map> incrTimestampMap; @@ -198,8 +198,7 @@ public Map> getTableSetTimestampMap() { return tableSetTimestampMap; } - public void setTableSetTimestampMap(Map> tableSetTimestampMap) { + public void setTableSetTimestampMap(Map> tableSetTimestampMap) { this.tableSetTimestampMap = tableSetTimestampMap; } @@ -357,8 +356,7 @@ public void setIncrBackupFileList(List incrBackupFileList) { * Set the new region server log timestamps after distributed log roll * @param prevTableSetTimestampMap table timestamp map */ - public void setIncrTimestampMap(Map> prevTableSetTimestampMap) { + public void setIncrTimestampMap(Map> prevTableSetTimestampMap) { this.incrTimestampMap = prevTableSetTimestampMap; } @@ -456,7 +454,7 @@ private void setTableSetTimestampMap(Builder builder) { for (Entry> entry : this.getTableSetTimestampMap().entrySet()) { builder.putTableSetTimestamp(entry.getKey().getNameAsString(), BackupProtos.BackupInfo.RSTimestampMap.newBuilder().putAllRsTimestamp(entry.getValue()) - .build()); + .build()); } } } @@ -482,8 +480,8 @@ public static BackupInfo fromProto(BackupProtos.BackupInfo proto) { context.setState(BackupInfo.BackupState.valueOf(proto.getBackupState().name())); } - context.setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(), - proto.getBackupId())); + context.setHLogTargetDir( + BackupUtils.getLogBackupDir(proto.getBackupRootDir(), proto.getBackupId())); if (proto.hasBackupPhase()) { context.setPhase(BackupPhase.valueOf(proto.getBackupPhase().name())); @@ -507,12 +505,12 @@ private static Map toMap(List> getTableSetTimestampMap( - Map map) { + private static Map> + getTableSetTimestampMap(Map map) { Map> tableSetTimestampMap = new HashMap<>(); for (Entry entry : map.entrySet()) { - tableSetTimestampMap - .put(TableName.valueOf(entry.getKey()), entry.getValue().getRsTimestampMap()); + tableSetTimestampMap.put(TableName.valueOf(entry.getKey()), + entry.getValue().getRsTimestampMap()); } return tableSetTimestampMap; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java index de91fa19c52c..1e2b17145025 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupMergeJob.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.IOException; - import org.apache.hadoop.conf.Configurable; import org.apache.yetus.audience.InterfaceAudience; @@ -32,7 +30,6 @@ public interface BackupMergeJob extends Configurable { /** * Run backup merge operation. - * * @param backupIds backup image ids * @throws IOException if the backup merge operation fails */ diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java index 191e5025dd70..540611144afe 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.backup; @@ -22,7 +21,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -55,8 +53,8 @@ public Optional getRegionObserver() { @Override public void postBulkLoadHFile(ObserverContext ctx, - List> stagingFamilyPaths, Map> finalPaths) - throws IOException { + List> stagingFamilyPaths, Map> finalPaths) + throws IOException { Configuration cfg = ctx.getEnvironment().getConfiguration(); if (finalPaths == null) { // there is no need to record state @@ -82,6 +80,7 @@ public void postBulkLoadHFile(ObserverContext ctx, LOG.error("Failed to get tables which have been fully backed up", ioe); } } + @Override public void preCommitStoreFile(final ObserverContext ctx, final byte[] family, final List> pairs) throws IOException { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java index 003c0e793e2f..c9c7a5b61810 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.util.List; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java index 0e0b90c0fc4d..2add5568ae09 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import org.apache.hadoop.hbase.HConstants; @@ -45,7 +44,7 @@ public interface BackupRestoreConstants { int DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS = 10000; /* - * Drivers option list + * Drivers option list */ String OPTION_OVERWRITE = "o"; String OPTION_OVERWRITE_DESC = "Overwrite data if any of the restore target tables exists"; @@ -62,8 +61,8 @@ public interface BackupRestoreConstants { String OPTION_DEBUG_DESC = "Enable debug loggings"; String OPTION_TABLE = "t"; - String OPTION_TABLE_DESC = "Table name. If specified, only backup images," - + " which contain this table will be listed."; + String OPTION_TABLE_DESC = + "Table name. If specified, only backup images," + " which contain this table will be listed."; String OPTION_LIST = "l"; String OPTION_TABLE_LIST_DESC = "Table name list, comma-separated."; @@ -84,37 +83,32 @@ public interface BackupRestoreConstants { String OPTION_KEEP = "k"; String OPTION_KEEP_DESC = "Specifies maximum age of backup (in days) to keep during bulk delete"; - String OPTION_TABLE_MAPPING = "m"; - String OPTION_TABLE_MAPPING_DESC = - "A comma separated list of target tables. " - + "If specified, each table in must have a mapping"; + String OPTION_TABLE_MAPPING_DESC = "A comma separated list of target tables. " + + "If specified, each table in must have a mapping"; String OPTION_YARN_QUEUE_NAME = "q"; String OPTION_YARN_QUEUE_NAME_DESC = "Yarn queue name to run backup create command on"; String OPTION_YARN_QUEUE_NAME_RESTORE_DESC = "Yarn queue name to run backup restore command on"; String JOB_NAME_CONF_KEY = "mapreduce.job.name"; - String BACKUP_CONFIG_STRING = BackupRestoreConstants.BACKUP_ENABLE_KEY - + "=true\n" - + "hbase.master.logcleaner.plugins=" - +"YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n" + String BACKUP_CONFIG_STRING = + BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n" + "hbase.master.logcleaner.plugins=" + + "YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n" + "hbase.procedure.master.classes=YOUR_CLASSES," - +"org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n" + + "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n" + "hbase.procedure.regionserver.classes=YOUR_CLASSES," + "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n" + "hbase.coprocessor.region.classes=YOUR_CLASSES," - + "org.apache.hadoop.hbase.backup.BackupObserver\n" - + "and restart the cluster\n" + + "org.apache.hadoop.hbase.backup.BackupObserver\n" + "and restart the cluster\n" + "For more information please see http://hbase.apache.org/book.html#backuprestore\n"; - String ENABLE_BACKUP = "Backup is not enabled. To enable backup, "+ - "in hbase-site.xml, set:\n " + String ENABLE_BACKUP = "Backup is not enabled. To enable backup, " + "in hbase-site.xml, set:\n " + BACKUP_CONFIG_STRING; String VERIFY_BACKUP = "To enable backup, in hbase-site.xml, set:\n " + BACKUP_CONFIG_STRING; /* - * Delimiter in table name list in restore command + * Delimiter in table name list in restore command */ String TABLENAME_DELIMITER_IN_COMMAND = ","; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java index b1bc532d6c1e..84956295320a 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ /** * Factory implementation for backup/restore related jobs - * */ @InterfaceAudience.Private public final class BackupRestoreFactory { @@ -57,9 +56,8 @@ public static RestoreJob getRestoreJob(Configuration conf) { * @return backup copy job instance */ public static BackupCopyJob getBackupCopyJob(Configuration conf) { - Class cls = - conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyJob.class, - BackupCopyJob.class); + Class cls = conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, + MapReduceBackupCopyJob.class, BackupCopyJob.class); BackupCopyJob service = ReflectionUtils.newInstance(cls, conf); service.setConf(conf); return service; @@ -71,9 +69,8 @@ public static BackupCopyJob getBackupCopyJob(Configuration conf) { * @return backup merge job instance */ public static BackupMergeJob getBackupMergeJob(Configuration conf) { - Class cls = - conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS, MapReduceBackupMergeJob.class, - BackupMergeJob.class); + Class cls = conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS, + MapReduceBackupMergeJob.class, BackupMergeJob.class); BackupMergeJob service = ReflectionUtils.newInstance(cls, conf); service.setConf(conf); return service; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java index 50abcc82acc5..01097422e3a1 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupTableInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; @@ -29,14 +29,14 @@ */ @InterfaceAudience.Private -public class BackupTableInfo { +public class BackupTableInfo { /* - * Table name for backup + * Table name for backup */ private TableName table; /* - * Snapshot name for offline/online snapshot + * Snapshot name for offline/online snapshot */ private String snapshotName = null; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java index e0975548ae36..a9a23d472e55 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java @@ -1,14 +1,13 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.IOException; import java.util.HashMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -56,8 +53,8 @@ private HBackupFileSystem() { * @param tableName table name * @return backupPath String for the particular table */ - public static String - getTableBackupDir(String backupRootDir, String backupId, TableName tableName) { + public static String getTableBackupDir(String backupRootDir, String backupId, + TableName tableName) { return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() + Path.SEPARATOR; @@ -126,21 +123,19 @@ public static Path getLogBackupPath(String backupRootDir, String backupId) { private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId) throws IOException { FileSystem fs = backupRootPath.getFileSystem(conf); - Path manifestPath = - new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR - + BackupManifest.MANIFEST_FILE_NAME); + Path manifestPath = new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR + + BackupManifest.MANIFEST_FILE_NAME); if (!fs.exists(manifestPath)) { - String errorMsg = - "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for " - + backupId + ". File " + manifestPath + " does not exists. Did " + backupId - + " correspond to previously taken backup ?"; + String errorMsg = "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + + " for " + backupId + ". File " + manifestPath + " does not exists. Did " + backupId + + " correspond to previously taken backup ?"; throw new IOException(errorMsg); } return manifestPath; } - public static BackupManifest - getManifest(Configuration conf, Path backupRootPath, String backupId) throws IOException { + public static BackupManifest getManifest(Configuration conf, Path backupRootPath, String backupId) + throws IOException { BackupManifest manifest = new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId)); return manifest; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java index 433815851a73..4bef13eccb7c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,9 +59,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; /** - * * Command-line entry point for restore operation - * */ @InterfaceAudience.Private public class RestoreDriver extends AbstractHBaseTool { @@ -107,13 +105,13 @@ private int parseAndRun(String[] args) throws IOException { // whether to only check the dependencies, false by default boolean check = cmd.hasOption(OPTION_CHECK); if (check) { - LOG.debug("Found -check option in restore command, " - + "will check and verify the dependencies"); + LOG.debug( + "Found -check option in restore command, " + "will check and verify the dependencies"); } if (cmd.hasOption(OPTION_SET) && cmd.hasOption(OPTION_TABLE)) { - System.err.println("Options -s and -t are mutaully exclusive,"+ - " you can not specify both of them."); + System.err.println( + "Options -s and -t are mutaully exclusive," + " you can not specify both of them."); printToolUsage(); return -1; } @@ -155,8 +153,8 @@ private int parseAndRun(String[] args) throws IOException { return -2; } if (tables == null) { - System.out.println("ERROR: Backup set '" + setName - + "' is either empty or does not exist"); + System.out + .println("ERROR: Backup set '" + setName + "' is either empty or does not exist"); printToolUsage(); return -3; } @@ -167,15 +165,15 @@ private int parseAndRun(String[] args) throws IOException { TableName[] sTableArray = BackupUtils.parseTableNames(tables); TableName[] tTableArray = BackupUtils.parseTableNames(tableMapping); - if (sTableArray != null && tTableArray != null && - (sTableArray.length != tTableArray.length)) { + if (sTableArray != null && tTableArray != null + && (sTableArray.length != tTableArray.length)) { System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping); printToolUsage(); return -4; } - client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check, - sTableArray, tTableArray, overwrite)); + client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check, sTableArray, + tTableArray, overwrite)); } catch (Exception e) { LOG.error("Error while running restore backup", e); return -5; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java index 29b128887780..44a2ec93e704 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; import java.io.IOException; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; @@ -40,6 +38,6 @@ public interface RestoreJob extends Configurable { * @param fullBackupRestore full backup restore * @throws IOException if running the job fails */ - void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables, - boolean fullBackupRestore) throws IOException; + void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables, boolean fullBackupRestore) + throws IOException; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java index a654cce50d5b..eb4786f57869 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index 0d20f37def6c..34e5eb86ddcc 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -158,7 +157,8 @@ public int deleteBackups(String[] backupIds) throws IOException { BackupSystemTable.deleteSnapshot(conn); // We still have record with unfinished delete operation LOG.error("Delete operation failed, please run backup repair utility to restore " - + "backup system integrity", e); + + "backup system integrity", + e); throw e; } else { LOG.warn("Delete operation succeeded, there were some errors: ", e); @@ -283,10 +283,10 @@ private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOE } private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable) - throws IOException { + throws IOException { List tables = info.getTableNames(); - LOG.debug("Remove " + tn + " from " + info.getBackupId() + " tables=" - + info.getTableListAsString()); + LOG.debug( + "Remove " + tn + " from " + info.getBackupId() + " tables=" + info.getTableListAsString()); if (tables.contains(tn)) { tables.remove(tn); @@ -349,9 +349,8 @@ private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configurat FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); - Path targetDirPath = - new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(), - backupInfo.getBackupId(), table)); + Path targetDirPath = new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(), + backupInfo.getBackupId(), table)); if (outputFs.delete(targetDirPath, true)) { LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); } else { @@ -474,8 +473,8 @@ public void addToBackupSet(String name, TableName[] tables) throws IOException { } } table.addToBackupSet(name, tableNames); - LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name - + "' backup set"); + LOG.info( + "Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + "' backup set"); } } @@ -484,8 +483,8 @@ public void removeFromBackupSet(String name, TableName[] tables) throws IOExcept LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'"); try (final BackupSystemTable table = new BackupSystemTable(conn)) { table.removeFromBackupSet(name, toStringArray(tables)); - LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name - + "' completed."); + LOG.info( + "Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "' completed."); } } @@ -535,8 +534,8 @@ public String backupTables(BackupRequest request) throws IOException { if (incrTableSet.isEmpty()) { String msg = "Incremental backup table set contains no tables. " - + "You need to run full backup first " - + (tableList != null ? "on " + StringUtils.join(tableList, ",") : ""); + + "You need to run full backup first " + + (tableList != null ? "on " + StringUtils.join(tableList, ",") : ""); throw new IOException(msg); } @@ -545,7 +544,7 @@ public String backupTables(BackupRequest request) throws IOException { if (!tableList.isEmpty()) { String extraTables = StringUtils.join(tableList, ","); String msg = "Some tables (" + extraTables + ") haven't gone through full backup. " - + "Perform full backup on " + extraTables + " first, " + "then retry the command"; + + "Perform full backup on " + extraTables + " first, " + "then retry the command"; throw new IOException(msg); } } @@ -559,8 +558,8 @@ public String backupTables(BackupRequest request) throws IOException { FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration()); if (outputFs.exists(targetTableBackupDirPath)) { - throw new IOException("Target backup directory " + targetTableBackupDir - + " exists already."); + throw new IOException( + "Target backup directory " + targetTableBackupDir + " exists already."); } outputFs.mkdirs(targetTableBackupDirPath); } @@ -581,8 +580,8 @@ public String backupTables(BackupRequest request) throws IOException { tableList = excludeNonExistingTables(tableList, nonExistingTableList); } else { // Throw exception only in full mode - we try to backup non-existing table - throw new IOException("Non-existing tables found in the table list: " - + nonExistingTableList); + throw new IOException( + "Non-existing tables found in the table list: " + nonExistingTableList); } } } @@ -590,9 +589,9 @@ public String backupTables(BackupRequest request) throws IOException { // update table list BackupRequest.Builder builder = new BackupRequest.Builder(); request = builder.withBackupType(request.getBackupType()).withTableList(tableList) - .withTargetRootDir(request.getTargetRootDir()) - .withBackupSetName(request.getBackupSetName()).withTotalTasks(request.getTotalTasks()) - .withBandwidthPerTasks((int) request.getBandwidth()).build(); + .withTargetRootDir(request.getTargetRootDir()).withBackupSetName(request.getBackupSetName()) + .withTotalTasks(request.getTotalTasks()).withBandwidthPerTasks((int) request.getBandwidth()) + .build(); TableBackupClient client; try { @@ -619,7 +618,7 @@ private List excludeNonExistingTables(List tableList, public void mergeBackups(String[] backupIds) throws IOException { try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { checkIfValidForMerge(backupIds, sysTable); - //TODO run job on remote cluster + // TODO run job on remote cluster BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration()); job.run(backupIds); } @@ -627,7 +626,6 @@ public void mergeBackups(String[] backupIds) throws IOException { /** * Verifies that backup images are valid for merge. - * *

    *
  • All backups MUST be in the same destination *
  • No FULL backups are allowed - only INCREMENTAL @@ -640,7 +638,7 @@ public void mergeBackups(String[] backupIds) throws IOException { * @throws IOException if the backup image is not valid for merge */ private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) - throws IOException { + throws IOException { String backupRoot = null; final Set allTables = new HashSet<>(); @@ -656,7 +654,7 @@ private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) backupRoot = bInfo.getBackupRootDir(); } else if (!bInfo.getBackupRootDir().equals(backupRoot)) { throw new IOException("Found different backup destinations in a list of a backup sessions " - + "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir()); + + "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir()); } if (bInfo.getType() == BackupType.FULL) { throw new IOException("FULL backup image can not be merged for: \n" + bInfo); @@ -677,7 +675,7 @@ private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) } } - final long startRangeTime = minTime; + final long startRangeTime = minTime; final long endRangeTime = maxTime; final String backupDest = backupRoot; // Check we have no 'holes' in backup id list @@ -688,7 +686,7 @@ private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) BackupInfo.Filter timeRangeFilter = info -> { long time = info.getStartTs(); - return time >= startRangeTime && time <= endRangeTime ; + return time >= startRangeTime && time <= endRangeTime; }; BackupInfo.Filter tableFilter = info -> { @@ -699,20 +697,20 @@ private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL; BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE; - List allInfos = table.getBackupHistory(-1, destinationFilter, - timeRangeFilter, tableFilter, typeFilter, stateFilter); + List allInfos = table.getBackupHistory(-1, destinationFilter, timeRangeFilter, + tableFilter, typeFilter, stateFilter); if (allInfos.size() != allBackups.size()) { - // Yes we have at least one hole in backup image sequence + // Yes we have at least one hole in backup image sequence List missingIds = new ArrayList<>(); - for(BackupInfo info: allInfos) { - if(allBackups.contains(info.getBackupId())) { + for (BackupInfo info : allInfos) { + if (allBackups.contains(info.getBackupId())) { continue; } missingIds.add(info.getBackupId()); } String errMsg = - "Sequence of backup ids has 'holes'. The following backup images must be added:" + - org.apache.hadoop.util.StringUtils.join(",", missingIds); + "Sequence of backup ids has 'holes'. The following backup images must be added:" + + org.apache.hadoop.util.StringUtils.join(",", missingIds); throw new IOException(errMsg); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index b0a29e257b07..733a76f44c21 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC; @@ -44,7 +43,6 @@ import java.io.IOException; import java.net.URI; import java.util.List; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -88,8 +86,7 @@ public final class BackupCommands { + " describe show the detailed information of a backup image\n" + " history show history of all successful backups\n" + " progress show the progress of the latest backup request\n" - + " set backup set management\n" - + " repair repair backup system table\n" + + " set backup set management\n" + " repair repair backup system table\n" + " merge merge backup images\n" + "Run \'hbase backup COMMAND -h\' to see help message for each command\n"; @@ -105,8 +102,8 @@ public final class BackupCommands { public static final String NO_INFO_FOUND = "No info was found for backup id: "; public static final String NO_ACTIVE_SESSION_FOUND = "No active backup sessions found."; - public static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup describe \n" - + " backup_id Backup image id\n"; + public static final String DESCRIBE_CMD_USAGE = + "Usage: hbase backup describe \n" + " backup_id Backup image id\n"; public static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [options]"; @@ -114,13 +111,13 @@ public final class BackupCommands { public static final String REPAIR_CMD_USAGE = "Usage: hbase backup repair\n"; - public static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n" - + " name Backup set name\n" - + " tables Comma separated list of tables.\n" + "COMMAND is one of:\n" - + " add add tables to a set, create a set if needed\n" - + " remove remove tables from a set\n" - + " list list all backup sets in the system\n" - + " describe describe set\n" + " delete delete backup set\n"; + public static final String SET_CMD_USAGE = + "Usage: hbase backup set COMMAND [name] [tables]\n" + " name Backup set name\n" + + " tables Comma separated list of tables.\n" + "COMMAND is one of:\n" + + " add add tables to a set, create a set if needed\n" + + " remove remove tables from a set\n" + + " list list all backup sets in the system\n" + + " describe describe set\n" + " delete delete backup set\n"; public static final String MERGE_CMD_USAGE = "Usage: hbase backup merge [backup_ids]\n" + " backup_ids Comma separated list of backup image ids.\n"; @@ -301,8 +298,8 @@ public void execute() throws IOException { // Check if we have both: backup set and list of tables if (cmdline.hasOption(OPTION_TABLE) && cmdline.hasOption(OPTION_SET)) { - System.out.println("ERROR: You can specify either backup set or list" - + " of tables, but not both"); + System.out.println( + "ERROR: You can specify either backup set or list" + " of tables, but not both"); printUsage(); throw new IOException(INCORRECT_USAGE); } @@ -315,20 +312,20 @@ public void execute() throws IOException { tables = getTablesForSet(setName, getConf()); if (tables == null) { - System.out.println("ERROR: Backup set '" + setName - + "' is either empty or does not exist"); + System.out + .println("ERROR: Backup set '" + setName + "' is either empty or does not exist"); printUsage(); throw new IOException(INCORRECT_USAGE); } } else { tables = cmdline.getOptionValue(OPTION_TABLE); } - int bandwidth = - cmdline.hasOption(OPTION_BANDWIDTH) ? Integer.parseInt(cmdline - .getOptionValue(OPTION_BANDWIDTH)) : -1; - int workers = - cmdline.hasOption(OPTION_WORKERS) ? Integer.parseInt(cmdline - .getOptionValue(OPTION_WORKERS)) : -1; + int bandwidth = cmdline.hasOption(OPTION_BANDWIDTH) + ? Integer.parseInt(cmdline.getOptionValue(OPTION_BANDWIDTH)) + : -1; + int workers = cmdline.hasOption(OPTION_WORKERS) + ? Integer.parseInt(cmdline.getOptionValue(OPTION_WORKERS)) + : -1; if (cmdline.hasOption(OPTION_YARN_QUEUE_NAME)) { String queueName = cmdline.getOptionValue(OPTION_YARN_QUEUE_NAME); @@ -338,13 +335,11 @@ public void execute() throws IOException { try (BackupAdminImpl admin = new BackupAdminImpl(conn)) { BackupRequest.Builder builder = new BackupRequest.Builder(); - BackupRequest request = - builder - .withBackupType(BackupType.valueOf(args[1].toUpperCase())) - .withTableList( - tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null) - .withTargetRootDir(targetBackupDir).withTotalTasks(workers) - .withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build(); + BackupRequest request = builder.withBackupType(BackupType.valueOf(args[1].toUpperCase())) + .withTableList( + tables != null ? Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null) + .withTargetRootDir(targetBackupDir).withTotalTasks(workers) + .withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build(); String backupId = admin.backupTables(request); System.out.println("Backup session " + backupId + " finished. Status: SUCCESS"); } catch (IOException e) { @@ -506,8 +501,8 @@ public static class ProgressCommand extends Command { public void execute() throws IOException { if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 1) { - System.out.println("No backup id was specified, " - + "will retrieve the most recent (ongoing) session"); + System.out.println( + "No backup id was specified, " + "will retrieve the most recent (ongoing) session"); } String[] args = cmdline == null ? null : cmdline.getArgs(); if (args != null && args.length > 2) { @@ -694,10 +689,9 @@ public void execute() throws IOException { // set overall backup status: failed backupInfo.setState(BackupState.FAILED); // compose the backup failed data - String backupFailedData = - "BackupId=" + backupInfo.getBackupId() + ",startts=" + backupInfo.getStartTs() - + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" - + backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg(); + String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts=" + + backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" + + backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg(); System.out.println(backupFailedData); TableBackupClient.cleanupAndRestoreBackupSystem(conn, backupInfo, conf); // If backup session is updated to FAILED state - means we @@ -754,9 +748,11 @@ public static void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTab } boolean res = fs.rename(tmpPath, destPath); if (!res) { - throw new IOException("MERGE repair: failed to rename from "+ tmpPath+" to "+ destPath); + throw new IOException( + "MERGE repair: failed to rename from " + tmpPath + " to " + destPath); } - System.out.println("MERGE repair: renamed from "+ tmpPath+" to "+ destPath+" res="+ res); + System.out + .println("MERGE repair: renamed from " + tmpPath + " to " + destPath + " res=" + res); } else { checkRemoveBackupImages(fs, backupRoot, backupIds); } @@ -771,18 +767,18 @@ public static void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTab } private static void checkRemoveBackupImages(FileSystem fs, String backupRoot, - String[] backupIds) throws IOException { + String[] backupIds) throws IOException { String mergedBackupId = BackupUtils.findMostRecentBackupId(backupIds); - for (String backupId: backupIds) { + for (String backupId : backupIds) { if (backupId.equals(mergedBackupId)) { continue; } Path path = HBackupFileSystem.getBackupPath(backupRoot, backupId); if (fs.exists(path)) { if (!fs.delete(path, true)) { - System.out.println("MERGE repair removing: "+ path +" - FAILED"); + System.out.println("MERGE repair removing: " + path + " - FAILED"); } else { - System.out.println("MERGE repair removing: "+ path +" - OK"); + System.out.println("MERGE repair removing: " + path + " - OK"); } } } @@ -816,16 +812,16 @@ public void execute() throws IOException { String[] args = cmdline == null ? null : cmdline.getArgs(); if (args == null || (args.length != 2)) { - System.err.println("ERROR: wrong number of arguments: " - + (args == null ? null : args.length)); + System.err + .println("ERROR: wrong number of arguments: " + (args == null ? null : args.length)); printUsage(); throw new IOException(INCORRECT_USAGE); } String[] backupIds = args[1].split(","); if (backupIds.length < 2) { - String msg = "ERROR: can not merge a single backup image. "+ - "Number of images must be greater than 1."; + String msg = "ERROR: can not merge a single backup image. " + + "Number of images must be greater than 1."; System.err.println(msg); throw new IOException(msg); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java index 0147c292a276..fb1e21b93f2f 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import org.apache.hadoop.hbase.HBaseIOException; @@ -68,8 +67,7 @@ public BackupException(String msg, Throwable cause, BackupInfo desc) { } /** - * Exception when the description of the backup cannot be determined, due to some other root - * cause + * Exception when the description of the backup cannot be determined, due to some other root cause * @param message description of what caused the failure * @param e root cause */ diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index 08494f0a1e5e..bced27eb8030 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -120,12 +119,13 @@ public static void decorateMasterConfiguration(Configuration conf) { } plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS); - conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, (plugins == null ? "" : plugins + ",") + - BackupHFileCleaner.class.getName()); + conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, + (plugins == null ? "" : plugins + ",") + BackupHFileCleaner.class.getName()); if (LOG.isDebugEnabled()) { - LOG.debug("Added log cleaner: {}. Added master procedure manager: {}." - +"Added master procedure manager: {}", cleanerClass, masterProcedureClass, - BackupHFileCleaner.class.getName()); + LOG.debug( + "Added log cleaner: {}. Added master procedure manager: {}." + + "Added master procedure manager: {}", + cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName()); } } @@ -230,7 +230,7 @@ public BackupInfo createBackupInfo(String backupId, BackupType type, List getAncestors(BackupInfo backupInfo) throws IOExcep if (BackupManifest.canCoverImage(ancestors, image)) { LOG.debug("Met the backup boundary of the current table set:"); for (BackupImage image1 : ancestors) { - LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir()); + LOG.debug(" BackupID={}, BackupDir={}", image1.getBackupId(), image1.getRootDir()); } } else { Path logBackupPath = HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId()); - LOG.debug("Current backup has an incremental backup ancestor, " - + "touching its image manifest in {}" - + " to construct the dependency.", logBackupPath.toString()); + LOG.debug( + "Current backup has an incremental backup ancestor, " + + "touching its image manifest in {}" + " to construct the dependency.", + logBackupPath.toString()); BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath); BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage(); ancestors.add(lastIncrImage); - LOG.debug( - "Last dependent incremental backup image: {BackupID={}" + - "BackupDir={}}", lastIncrImage.getBackupId(), lastIncrImage.getRootDir()); + LOG.debug("Last dependent incremental backup image: {BackupID={}" + "BackupDir={}}", + lastIncrImage.getBackupId(), lastIncrImage.getRootDir()); } } } @@ -403,7 +404,7 @@ public void startBackupSession() throws IOException { || (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000) { lastWarningOutputTime = EnvironmentEdgeManager.currentTime(); LOG.warn("Waiting to acquire backup exclusive lock for {}s", - +(lastWarningOutputTime - startTime) / 1000); + +(lastWarningOutputTime - startTime) / 1000); } } else { throw e; @@ -411,7 +412,7 @@ public void startBackupSession() throws IOException { } } throw new IOException( - "Failed to acquire backup system table exclusive lock after " + timeout / 1000 + "s"); + "Failed to acquire backup system table exclusive lock after " + timeout / 1000 + "s"); } /** @@ -452,7 +453,7 @@ public HashMap readRegionServerLastLogRollResult() throws IOExcept } public Pair>>>>, List> - readBulkloadRows(List tableList) throws IOException { + readBulkloadRows(List tableList) throws IOException { return systemTable.readBulkloadRows(tableList); } @@ -480,8 +481,8 @@ public ArrayList getBackupHistory(boolean completed) throws IOExcept * @param tables tables * @throws IOException exception */ - public void writeRegionServerLogTimestamp(Set tables, - Map newTimestamps) throws IOException { + public void writeRegionServerLogTimestamp(Set tables, Map newTimestamps) + throws IOException { systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, backupInfo.getBackupRootDir()); } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java index 4d4965dd6576..2f41127ab0ee 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; @@ -26,7 +25,6 @@ import java.util.Map; import java.util.Map.Entry; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -50,9 +48,8 @@ /** * Backup manifest contains all the meta data of a backup image. The manifest info will be bundled * as manifest file together with data. So that each backup image will contain all the info needed - * for restore. BackupManifest is a storage container for BackupImage. - * It is responsible for storing/reading backup image data and has some additional utility methods. - * + * for restore. BackupManifest is a storage container for BackupImage. It is responsible for + * storing/reading backup image data and has some additional utility methods. */ @InterfaceAudience.Private public class BackupManifest { @@ -126,8 +123,8 @@ public BackupImage() { super(); } - private BackupImage(String backupId, BackupType type, String rootDir, - List tableList, long startTs, long completeTs) { + private BackupImage(String backupId, BackupType type, String rootDir, List tableList, + long startTs, long completeTs) { this.backupId = backupId; this.type = type; this.rootDir = rootDir; @@ -149,9 +146,8 @@ static BackupImage fromProto(BackupProtos.BackupImage im) { List ancestorList = im.getAncestorsList(); - BackupType type = - im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL - : BackupType.INCREMENTAL; + BackupType type = im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL + : BackupType.INCREMENTAL; BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs); for (BackupProtos.BackupImage img : ancestorList) { @@ -187,8 +183,8 @@ BackupProtos.BackupImage toProto() { return builder.build(); } - private static Map> loadIncrementalTimestampMap( - BackupProtos.BackupImage proto) { + private static Map> + loadIncrementalTimestampMap(BackupProtos.BackupImage proto) { List list = proto.getTstMapList(); Map> incrTimeRanges = new HashMap<>(); @@ -378,10 +374,9 @@ private void setIncrTimeRanges(Map> incrTimeRanges) */ public BackupManifest(BackupInfo backup) { BackupImage.Builder builder = BackupImage.newBuilder(); - this.backupImage = - builder.withBackupId(backup.getBackupId()).withType(backup.getType()) - .withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()) - .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); + this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType()) + .withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()) + .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); } /** @@ -393,15 +388,13 @@ public BackupManifest(BackupInfo backup, TableName table) { List tables = new ArrayList(); tables.add(table); BackupImage.Builder builder = BackupImage.newBuilder(); - this.backupImage = - builder.withBackupId(backup.getBackupId()).withType(backup.getType()) - .withRootDir(backup.getBackupRootDir()).withTableList(tables) - .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); + this.backupImage = builder.withBackupId(backup.getBackupId()).withType(backup.getType()) + .withRootDir(backup.getBackupRootDir()).withTableList(tables) + .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); } /** * Construct manifest from a backup directory. - * * @param conf configuration * @param backupPath backup path * @throws IOException if constructing the manifest from the backup directory fails @@ -479,9 +472,9 @@ public List getTableList() { public void store(Configuration conf) throws BackupException { byte[] data = backupImage.toProto().toByteArray(); // write the file, overwrite if already exist - Path manifestFilePath = - new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(), - backupImage.getBackupId()), MANIFEST_FILE_NAME); + Path manifestFilePath = new Path( + HBackupFileSystem.getBackupPath(backupImage.getRootDir(), backupImage.getBackupId()), + MANIFEST_FILE_NAME); try (FSDataOutputStream out = manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) { out.write(data); @@ -531,8 +524,8 @@ public ArrayList getRestoreDependentList(boolean reverse) { for (BackupImage image : backupImage.getAncestors()) { restoreImages.put(Long.valueOf(image.startTs), image); } - return new ArrayList<>(reverse ? (restoreImages.descendingMap().values()) - : (restoreImages.values())); + return new ArrayList<>( + reverse ? (restoreImages.descendingMap().values()) : (restoreImages.values())); } /** @@ -664,8 +657,8 @@ public BackupInfo toBackupInfo() { info.setStartTs(backupImage.getStartTs()); info.setBackupRootDir(backupImage.getRootDir()); if (backupImage.getType() == BackupType.INCREMENTAL) { - info.setHLogTargetDir(BackupUtils.getLogBackupDir(backupImage.getRootDir(), - backupImage.getBackupId())); + info.setHLogTargetDir( + BackupUtils.getLogBackupDir(backupImage.getRootDir(), backupImage.getBackupId())); } return info; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index 88093ba1c9e1..b923fbfc76c4 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,6 +68,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -232,7 +232,7 @@ private void waitForSystemTable(Admin admin, TableName tableName) throws IOExcep long TIMEOUT = 60000; long startTime = EnvironmentEdgeManager.currentTime(); LOG.debug("Backup table {} is not present and available, waiting for it to become so", - tableName); + tableName); while (!admin.tableExists(tableName) || !admin.isTableAvailable(tableName)) { try { Thread.sleep(100); @@ -240,7 +240,7 @@ private void waitForSystemTable(Admin admin, TableName tableName) throws IOExcep } if (EnvironmentEdgeManager.currentTime() - startTime > TIMEOUT) { throw new IOException( - "Failed to create backup system table " + tableName + " after " + TIMEOUT + "ms"); + "Failed to create backup system table " + tableName + " after " + TIMEOUT + "ms"); } } LOG.debug("Backup table {} exists and available", tableName); @@ -259,7 +259,7 @@ public void close() { public void updateBackupInfo(BackupInfo info) throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("update backup status in backup system table for: " + info.getBackupId() - + " set status=" + info.getState()); + + " set status=" + info.getState()); } try (Table table = connection.getTable(tableName)) { Put put = createPutForBackupInfo(info); @@ -315,11 +315,11 @@ public Map>[] readBulkLoadedFiles(String backupId, List> finalPaths) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("write bulk load descriptor to backup " + tabName + " with " + finalPaths.size() - + " entries"); + + " entries"); } try (Table table = connection.getTable(bulkLoadTableName)) { List puts = BackupSystemTable.createPutForCommittedBulkload(tabName, region, finalPaths); @@ -426,7 +426,7 @@ public void deleteBulkLoadedRows(List rows) throws IOException { * whether the hfile was recorded by preCommitStoreFile hook (true) */ public Pair>>>>, List> - readBulkloadRows(List tableList) throws IOException { + readBulkloadRows(List tableList) throws IOException { Map>>>> map = new HashMap<>(); List rows = new ArrayList<>(); @@ -453,16 +453,16 @@ public void deleteBulkLoadedRows(List rows) throws IOException { fam = Bytes.toString(CellUtil.cloneValue(cell)); } else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0, BackupSystemTable.PATH_COL.length) == 0) { - path = Bytes.toString(CellUtil.cloneValue(cell)); - } else if (CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0, - BackupSystemTable.STATE_COL.length) == 0) { - byte[] state = CellUtil.cloneValue(cell); - if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) { - raw = true; - } else { - raw = false; - } - } + path = Bytes.toString(CellUtil.cloneValue(cell)); + } else if (CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0, + BackupSystemTable.STATE_COL.length) == 0) { + byte[] state = CellUtil.cloneValue(cell); + if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) { + raw = true; + } else { + raw = false; + } + } } if (map.get(tTable) == null) { map.put(tTable, new HashMap<>()); @@ -852,8 +852,8 @@ public ArrayList getBackupInfos(BackupState state) throws IOExceptio * @param backupRoot root directory path to backup * @throws IOException exception */ - public void writeRegionServerLogTimestamp(Set tables, - Map newTimestamps, String backupRoot) throws IOException { + public void writeRegionServerLogTimestamp(Set tables, Map newTimestamps, + String backupRoot) throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("write RS log time stamps to backup system table for tables [" + StringUtils.join(tables, ",") + "]"); @@ -916,7 +916,7 @@ private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName BackupProtos.TableServerTimestamp.Builder tstBuilder = BackupProtos.TableServerTimestamp.newBuilder(); tstBuilder - .setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table)); + .setTableName(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toProtoTableName(table)); for (Entry entry : map.entrySet()) { BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder(); @@ -933,7 +933,7 @@ private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName } private HashMap - fromTableServerTimestampProto(BackupProtos.TableServerTimestamp proto) { + fromTableServerTimestampProto(BackupProtos.TableServerTimestamp proto) { HashMap map = new HashMap<>(); List list = proto.getServerTimestampList(); @@ -981,7 +981,7 @@ public void addIncrementalBackupTableSet(Set tables, String backupRoo throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Add incremental backup table set to backup system table. ROOT=" + backupRoot - + " tables [" + StringUtils.join(tables, " ") + "]"); + + " tables [" + StringUtils.join(tables, " ") + "]"); } if (LOG.isDebugEnabled()) { tables.forEach(table -> LOG.debug(Objects.toString(table))); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/ExclusiveOperationException.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/ExclusiveOperationException.java index af7fd8bb1c89..d5c4ab31c655 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/ExclusiveOperationException.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/ExclusiveOperationException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java index 6ad409e70b36..76534420e263 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_ATTEMPTS_PAUSE_MS_KEY; @@ -28,7 +27,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupCopyJob; import org.apache.hadoop.hbase.backup.BackupInfo; @@ -48,7 +46,6 @@ /** * Full table backup implementation - * */ @InterfaceAudience.Private public class FullTableBackupClient extends TableBackupClient { @@ -127,7 +124,6 @@ protected void snapshotCopy(BackupInfo backupInfo) throws Exception { /** * Backup request execution. - * * @throws IOException if the execution of the backup fails */ @Override @@ -163,9 +159,8 @@ public void execute() throws IOException { // SNAPSHOT_TABLES: backupInfo.setPhase(BackupPhase.SNAPSHOT); for (TableName tableName : tableList) { - String snapshotName = - "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_" - + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); + String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); snapshotTable(admin, tableName, snapshotName); backupInfo.setSnapshotName(tableName, snapshotName); @@ -191,8 +186,7 @@ public void execute() throws IOException { backupInfo.setTableSetTimestampMap(newTableSetTimestampMap); Long newStartCode = - BackupUtils.getMinValue(BackupUtils - .getRSLogTimestampMins(newTableSetTimestampMap)); + BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); backupManager.writeBackupStartCode(newStartCode); // backup complete @@ -206,10 +200,8 @@ public void execute() throws IOException { protected void snapshotTable(Admin admin, TableName tableName, String snapshotName) throws IOException { - int maxAttempts = - conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS); - int pause = - conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS); + int maxAttempts = conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS); + int pause = conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS); int attempts = 0; while (attempts++ < maxAttempts) { @@ -218,7 +210,8 @@ protected void snapshotTable(Admin admin, TableName tableName, String snapshotNa return; } catch (IOException ee) { LOG.warn("Snapshot attempt " + attempts + " failed for table " + tableName - + ", sleeping for " + pause + "ms", ee); + + ", sleeping for " + pause + "ms", + ee); if (attempts < maxAttempts) { try { Thread.sleep(pause); @@ -229,6 +222,6 @@ protected void snapshotTable(Admin admin, TableName tableName, String snapshotNa } } } - throw new IOException("Failed to snapshot table "+ tableName); + throw new IOException("Failed to snapshot table " + tableName); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index 847837f04424..f547e2ec0396 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; @@ -79,9 +78,8 @@ public Map getIncrBackupLogFileMap() throws IOException { // get all new log files from .logs and .oldlogs after last TS and before new timestamp if (savedStartCode == null || previousTimestampMins == null || previousTimestampMins.isEmpty()) { - throw new IOException( - "Cannot read any previous back up timestamps from backup system table. " - + "In order to create an incremental backup, at least one full backup is needed."); + throw new IOException("Cannot read any previous back up timestamps from backup system table. " + + "In order to create an incremental backup, at least one full backup is needed."); } LOG.info("Execute roll log procedure for incremental backup ..."); @@ -103,9 +101,9 @@ public Map getIncrBackupLogFileMap() throws IOException { private List excludeProcV2WALs(List logList) { List list = new ArrayList<>(); - for (int i=0; i < logList.size(); i++) { + for (int i = 0; i < logList.size(); i++) { Path p = new Path(logList.get(i)); - String name = p.getName(); + String name = p.getName(); if (name.startsWith(WALProcedureStore.LOG_PREFIX)) { continue; @@ -191,10 +189,10 @@ private List getLogFilesForNewBackup(Map olderTimestamps, // or RS is down (was decommisioned). In any case, we treat this // log file as eligible for inclusion into incremental backup log list Long ts = newestTimestamps.get(host); - if (ts == null) { + if (ts == null) { LOG.warn("ORPHAN log found: " + log + " host=" + host); LOG.debug("Known hosts (from newestTimestamps):"); - for (String s: newestTimestamps.keySet()) { + for (String s : newestTimestamps.keySet()) { LOG.debug(s); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java index 918e99a444fd..3c36ecf058c0 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; @@ -53,9 +52,7 @@ import org.slf4j.LoggerFactory; /** - * Incremental backup implementation. - * See the {@link #execute() execute} method. - * + * Incremental backup implementation. See the {@link #execute() execute} method. */ @InterfaceAudience.Private public class IncrementalTableBackupClient extends TableBackupClient { @@ -105,19 +102,19 @@ protected static int getIndex(TableName tbl, List sTableList) { } /* - * Reads bulk load records from backup table, iterates through the records and forms the paths - * for bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination + * Reads bulk load records from backup table, iterates through the records and forms the paths for + * bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination * @param sTableList list of tables to be backed up * @return map of table to List of files */ @SuppressWarnings("unchecked") protected Map>[] handleBulkLoad(List sTableList) - throws IOException { + throws IOException { Map>[] mapForSrc = new Map[sTableList.size()]; List activeFiles = new ArrayList<>(); List archiveFiles = new ArrayList<>(); Pair>>>>, List> pair = - backupManager.readBulkloadRows(sTableList); + backupManager.readBulkloadRows(sTableList); Map>>>> map = pair.getFirst(); FileSystem tgtFs; try { @@ -128,8 +125,8 @@ protected Map>[] handleBulkLoad(List sTableList) Path rootdir = CommonFSUtils.getRootDir(conf); Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId); - for (Map.Entry>>>> tblEntry : - map.entrySet()) { + for (Map.Entry>>>> tblEntry : map + .entrySet()) { TableName srcTable = tblEntry.getKey(); int srcIdx = getIndex(srcTable, sTableList); @@ -143,13 +140,13 @@ protected Map>[] handleBulkLoad(List sTableList) Path tblDir = CommonFSUtils.getTableDir(rootdir, srcTable); Path tgtTable = new Path(new Path(tgtRoot, srcTable.getNamespaceAsString()), srcTable.getQualifierAsString()); - for (Map.Entry>>> regionEntry : - tblEntry.getValue().entrySet()){ + for (Map.Entry>>> regionEntry : tblEntry + .getValue().entrySet()) { String regionName = regionEntry.getKey(); Path regionDir = new Path(tblDir, regionName); // map from family to List of hfiles - for (Map.Entry>> famEntry : - regionEntry.getValue().entrySet()) { + for (Map.Entry>> famEntry : regionEntry.getValue() + .entrySet()) { String fam = famEntry.getKey(); Path famDir = new Path(regionDir, fam); List files; @@ -170,7 +167,7 @@ protected Map>[] handleBulkLoad(List sTableList) int idx = file.lastIndexOf("/"); String filename = file; if (idx > 0) { - filename = file.substring(idx+1); + filename = file.substring(idx + 1); } Path p = new Path(famDir, filename); Path tgt = new Path(tgtFam, filename); @@ -183,7 +180,7 @@ protected Map>[] handleBulkLoad(List sTableList) LOG.trace("copying " + p + " to " + tgt); } activeFiles.add(p.toString()); - } else if (fs.exists(archive)){ + } else if (fs.exists(archive)) { LOG.debug("copying archive " + archive + " to " + tgt); archiveFiles.add(archive.toString()); } @@ -207,8 +204,8 @@ private void copyBulkLoadedFiles(List activeFiles, List archiveF String tgtDest = backupInfo.getBackupRootDir() + Path.SEPARATOR + backupInfo.getBackupId(); int attempt = 1; while (activeFiles.size() > 0) { - LOG.info("Copy "+ activeFiles.size() + - " active bulk loaded files. Attempt ="+ (attempt++)); + LOG.info( + "Copy " + activeFiles.size() + " active bulk loaded files. Attempt =" + (attempt++)); String[] toCopy = new String[activeFiles.size()]; activeFiles.toArray(toCopy); // Active file can be archived during copy operation, @@ -270,8 +267,7 @@ public void execute() throws IOException { backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL); LOG.debug("For incremental backup, current table set is " + backupManager.getIncrementalBackupTableSet()); - newTimestamps = - ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap(); + newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap(); } catch (Exception e) { // fail the overall backup and return failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ", @@ -285,8 +281,8 @@ public void execute() throws IOException { BackupUtils.copyTableRegionInfo(conn, backupInfo, conf); // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT convertWALsToHFiles(); - incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()}, - backupInfo.getBackupRootDir()); + incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() }, + backupInfo.getBackupRootDir()); } catch (Exception e) { String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId; // fail the overall backup and return @@ -298,8 +294,7 @@ public void execute() throws IOException { // After this checkpoint, even if entering cancel process, will let the backup finished try { // Set the previousTimestampMap which is before this current log roll to the manifest. - Map> previousTimestampMap = - backupManager.readLogTimestampMap(); + Map> previousTimestampMap = backupManager.readLogTimestampMap(); backupInfo.setIncrTimestampMap(previousTimestampMap); // The table list in backupInfo is good for both full backup and incremental backup. @@ -345,11 +340,11 @@ protected void incrementalCopyHFiles(String[] files, String backupDest) throws I int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr); if (res != 0) { LOG.error("Copy incremental HFile files failed with return code: " + res + "."); - throw new IOException("Failed copy from " + StringUtils.join(files, ',') - + " to " + backupDest); + throw new IOException( + "Failed copy from " + StringUtils.join(files, ',') + " to " + backupDest); } - LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',') - + " to " + backupDest + " finished."); + LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',') + " to " + backupDest + + " finished."); } finally { deleteBulkLoadDirectory(); } @@ -398,7 +393,7 @@ protected void walToHFiles(List dirPaths, List tableList) throws // a Map task for each file. We use ';' as separator // because WAL file names contains ',' String dirs = StringUtils.join(dirPaths, ';'); - String jobname = "Incremental_Backup-" + backupId ; + String jobname = "Incremental_Backup-" + backupId; Path bulkOutputPath = getBulkOutputDir(); conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); @@ -410,7 +405,7 @@ protected void walToHFiles(List dirPaths, List tableList) throws try { player.setConf(conf); int result = player.run(playerArgs); - if(result != 0) { + if (result != 0) { throw new IOException("WAL Player failed"); } conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java index 5ec44ee93964..8a9410dfa448 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.impl; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; @@ -25,7 +24,6 @@ import java.util.HashMap; import java.util.List; import java.util.TreeSet; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -47,7 +45,6 @@ /** * Restore table implementation - * */ @InterfaceAudience.Private public class RestoreTablesClient { @@ -76,7 +73,6 @@ public RestoreTablesClient(Connection conn, RestoreRequest request) { /** * Validate target tables. - * * @param tTableArray target tables * @param isOverwrite overwrite existing table * @throws IOException exception @@ -102,12 +98,11 @@ private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) thr if (existTableList.size() > 0) { if (!isOverwrite) { - LOG.error("Existing table (" + existTableList - + ") found in the restore target, please add " + LOG.error("Existing table (" + existTableList + ") found in the restore target, please add " + "\"-o\" as overwrite option in the command if you mean" + " to restore to these existing tables"); - throw new IOException("Existing table found in target while no \"-o\" " - + "as overwrite option found"); + throw new IOException( + "Existing table found in target while no \"-o\" " + "as overwrite option found"); } else { if (disabledTableList.size() > 0) { LOG.error("Found offline table in the restore target, " @@ -122,7 +117,6 @@ private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) thr /** * Restore operation handle each backupImage in array. - * * @param images array BackupImage * @param sTable table to be restored * @param tTable table to be restored to @@ -164,7 +158,7 @@ private void restoreImages(BackupImage[] images, TableName sTable, TableName tTa for (int i = 1; i < images.length; i++) { BackupImage im = images[i]; String fileBackupDir = - HBackupFileSystem.getTableBackupDir(im.getRootDir(), im.getBackupId(), sTable); + HBackupFileSystem.getTableBackupDir(im.getRootDir(), im.getBackupId(), sTable); List list = getFilesRecursively(fileBackupDir); dirList.addAll(list); @@ -228,9 +222,8 @@ private void restore(HashMap backupManifestMap, if (restoreImageSet != null && !restoreImageSet.isEmpty()) { LOG.info("Restore includes the following image(s):"); for (BackupImage image : restoreImageSet) { - LOG.info("Backup: " + image.getBackupId() + " " - + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), - table)); + LOG.info("Backup: " + image.getBackupId() + " " + HBackupFileSystem + .getTableBackupDir(image.getRootDir(), image.getBackupId(), table)); } } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java index 57f3a50a8eb7..ef82a046c20e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,10 +44,9 @@ import org.slf4j.LoggerFactory; /** - * Base class for backup operation. Concrete implementation for - * full and incremental backup are delegated to corresponding sub-classes: - * {@link FullTableBackupClient} and {@link IncrementalTableBackupClient} - * + * Base class for backup operation. Concrete implementation for full and incremental backup are + * delegated to corresponding sub-classes: {@link FullTableBackupClient} and + * {@link IncrementalTableBackupClient} */ @InterfaceAudience.Private public abstract class TableBackupClient { @@ -88,9 +87,8 @@ public void init(final Connection conn, final String backupId, BackupRequest req this.conn = conn; this.conf = conn.getConfiguration(); this.fs = CommonFSUtils.getCurrentFileSystem(conf); - backupInfo = - backupManager.createBackupInfo(backupId, request.getBackupType(), tableList, - request.getTargetRootDir(), request.getTotalTasks(), request.getBandwidth()); + backupInfo = backupManager.createBackupInfo(backupId, request.getBackupType(), tableList, + request.getTargetRootDir(), request.getTotalTasks(), request.getBandwidth()); if (tableList == null || tableList.isEmpty()) { this.tableList = new ArrayList<>(backupInfo.getTables()); } @@ -159,9 +157,8 @@ protected static void deleteSnapshots(final Connection conn, BackupInfo backupIn */ protected static void cleanupExportSnapshotLog(Configuration conf) throws IOException { FileSystem fs = CommonFSUtils.getCurrentFileSystem(conf); - Path stagingDir = - new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() - .toString())); + Path stagingDir = new Path( + conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory().toString())); FileStatus[] files = CommonFSUtils.listStatus(fs, stagingDir); if (files == null) { return; @@ -177,30 +174,27 @@ protected static void cleanupExportSnapshotLog(Configuration conf) throws IOExce } /** - * Clean up the uncompleted data at target directory if the ongoing backup has already entered - * the copy phase. + * Clean up the uncompleted data at target directory if the ongoing backup has already entered the + * copy phase. */ protected static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) { try { // clean up the uncompleted data at target directory if the ongoing backup has already entered // the copy phase - LOG.debug("Trying to cleanup up target dir. Current backup phase: " - + backupInfo.getPhase()); + LOG.debug("Trying to cleanup up target dir. Current backup phase: " + backupInfo.getPhase()); if (backupInfo.getPhase().equals(BackupPhase.SNAPSHOTCOPY) || backupInfo.getPhase().equals(BackupPhase.INCREMENTAL_COPY) || backupInfo.getPhase().equals(BackupPhase.STORE_MANIFEST)) { - FileSystem outputFs = - FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); + FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); // now treat one backup as a transaction, clean up data that has been partially copied at // table level for (TableName table : backupInfo.getTables()) { - Path targetDirPath = - new Path(HBackupFileSystem.getTableBackupDir(backupInfo.getBackupRootDir(), - backupInfo.getBackupId(), table)); + Path targetDirPath = new Path(HBackupFileSystem + .getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table)); if (outputFs.delete(targetDirPath, true)) { - LOG.debug("Cleaning up uncompleted backup data at " + targetDirPath.toString() - + " done."); + LOG.debug( + "Cleaning up uncompleted backup data at " + targetDirPath.toString() + " done."); } else { LOG.debug("No data has been copied to " + targetDirPath.toString() + "."); } @@ -238,10 +232,9 @@ protected void failBackup(Connection conn, BackupInfo backupInfo, BackupManager // set overall backup status: failed backupInfo.setState(BackupState.FAILED); // compose the backup failed data - String backupFailedData = - "BackupId=" + backupInfo.getBackupId() + ",startts=" + backupInfo.getStartTs() - + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" + backupInfo.getPhase() - + ",failedmessage=" + backupInfo.getFailedMsg(); + String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts=" + + backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase=" + + backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg(); LOG.error(backupFailedData); cleanupAndRestoreBackupSystem(conn, backupInfo, conf); // If backup session is updated to FAILED state - means we @@ -376,9 +369,8 @@ protected void completeBackup(final Connection conn, BackupInfo backupInfo, // compose the backup complete data String backupCompleteData = - obtainBackupMetaDataStr(backupInfo) + ",startts=" + backupInfo.getStartTs() - + ",completets=" + backupInfo.getCompleteTs() + ",bytescopied=" - + backupInfo.getTotalBytesCopied(); + obtainBackupMetaDataStr(backupInfo) + ",startts=" + backupInfo.getStartTs() + ",completets=" + + backupInfo.getCompleteTs() + ",bytescopied=" + backupInfo.getTotalBytesCopied(); if (LOG.isDebugEnabled()) { LOG.debug("Backup " + backupInfo.getBackupId() + " finished: " + backupCompleteData); } @@ -404,19 +396,18 @@ protected void completeBackup(final Connection conn, BackupInfo backupInfo, /** * Backup request execution. - * * @throws IOException if the execution of the backup fails */ public abstract void execute() throws IOException; protected Stage getTestStage() { - return Stage.valueOf("stage_"+ conf.getInt(BACKUP_TEST_MODE_STAGE, 0)); + return Stage.valueOf("stage_" + conf.getInt(BACKUP_TEST_MODE_STAGE, 0)); } protected void failStageIf(Stage stage) throws IOException { Stage current = getTestStage(); if (current == stage) { - throw new IOException("Failed stage " + stage+" in testing"); + throw new IOException("Failed stage " + stage + " in testing"); } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java index 3cc8bcf74091..10b833071998 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.Arrays; import java.util.List; import java.util.Objects; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -130,8 +129,8 @@ public BackupInfo getBackupInfo() { * @param bytesCopied bytes copied * @throws NoNodeException exception */ - static void updateProgress(BackupInfo backupInfo, BackupManager backupManager, - int newProgress, long bytesCopied) throws IOException { + static void updateProgress(BackupInfo backupInfo, BackupManager backupManager, int newProgress, + long bytesCopied) throws IOException { // compose the new backup progress data, using fake number for now String backupProgressData = newProgress + "%"; @@ -142,12 +141,10 @@ static void updateProgress(BackupInfo backupInfo, BackupManager backupManager, } /** - * Extends DistCp for progress updating to backup system table - * during backup. Using DistCpV2 (MAPREDUCE-2765). - * Simply extend it and override execute() method to get the - * Job reference for progress updating. - * Only the argument "src1, [src2, [...]] dst" is supported, - * no more DistCp options. + * Extends DistCp for progress updating to backup system table during backup. Using DistCpV2 + * (MAPREDUCE-2765). Simply extend it and override execute() method to get the Job reference for + * progress updating. Only the argument "src1, [src2, [...]] dst" is supported, no more DistCp + * options. */ class BackupDistCp extends DistCp { @@ -162,8 +159,6 @@ public BackupDistCp(Configuration conf, DistCpOptions options, BackupInfo backup this.backupManager = backupManager; } - - @Override public Job execute() throws Exception { @@ -188,16 +183,14 @@ public Job execute() throws Exception { long totalSrcLgth = 0; for (Path aSrc : srcs) { - totalSrcLgth += - BackupUtils.getFilesLength(aSrc.getFileSystem(super.getConf()), aSrc); + totalSrcLgth += BackupUtils.getFilesLength(aSrc.getFileSystem(super.getConf()), aSrc); } // Async call job = super.execute(); // Update the copy progress to system table every 0.5s if progress value changed - int progressReportFreq = - MapReduceBackupCopyJob.this.getConf().getInt("hbase.backup.progressreport.frequency", - 500); + int progressReportFreq = MapReduceBackupCopyJob.this.getConf() + .getInt("hbase.backup.progressreport.frequency", 500); float lastProgress = progressDone; while (!job.isComplete()) { float newProgress = @@ -241,8 +234,8 @@ public Job execute() throws Exception { String jobID = job.getJobID().toString(); job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID); - LOG.debug("DistCp job-id: " + jobID + " completed: " + job.isComplete() + " " - + job.isSuccessful()); + LOG.debug( + "DistCp job-id: " + jobID + " completed: " + job.isComplete() + " " + job.isSuccessful()); Counters ctrs = job.getCounters(); LOG.debug(Objects.toString(ctrs)); if (job.isComplete() && !job.isSuccessful()) { @@ -252,11 +245,11 @@ public Job execute() throws Exception { return job; } - private Field getInputOptionsField(Class classDistCp) throws IOException{ + private Field getInputOptionsField(Class classDistCp) throws IOException { Field f = null; try { f = classDistCp.getDeclaredField("inputOptions"); - } catch(Exception e) { + } catch (Exception e) { // Haddop 3 try { f = classDistCp.getDeclaredField("context"); @@ -268,7 +261,7 @@ private Field getInputOptionsField(Class classDistCp) throws IOException{ } @SuppressWarnings("unchecked") - private List getSourcePaths(Field fieldInputOptions) throws IOException{ + private List getSourcePaths(Field fieldInputOptions) throws IOException { Object options; try { options = fieldInputOptions.get(this); @@ -282,9 +275,8 @@ private List getSourcePaths(Field fieldInputOptions) throws IOException{ return (List) methodGetSourcePaths.invoke(options); } - } catch (IllegalArgumentException | IllegalAccessException | - ClassNotFoundException | NoSuchMethodException | - SecurityException | InvocationTargetException e) { + } catch (IllegalArgumentException | IllegalAccessException | ClassNotFoundException + | NoSuchMethodException | SecurityException | InvocationTargetException e) { throw new IOException(e); } @@ -352,8 +344,6 @@ private List getSourceFiles() throws NoSuchFieldException, SecurityExcepti return getSourcePaths(options); } - - private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException { FileSystem fs = pathToListFile.getFileSystem(conf); fs.delete(pathToListFile, false); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java index 375f34b48306..06c5da73a454 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.backup.mapreduce; import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -52,9 +53,8 @@ import org.slf4j.LoggerFactory; /** - * MapReduce implementation of {@link BackupMergeJob} - * Must be initialized with configuration of a backup destination cluster - * + * MapReduce implementation of {@link BackupMergeJob} Must be initialized with configuration of a + * backup destination cluster */ @InterfaceAudience.Private public class MapReduceBackupMergeJob implements BackupMergeJob { @@ -119,9 +119,8 @@ public void run(String[] backupIds) throws IOException { Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds); String dirs = StringUtils.join(dirPaths, ","); - Path bulkOutputPath = - BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]), - getConf(), false); + Path bulkOutputPath = BackupUtils.getBulkOutputDir( + BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false); // Delete content if exists if (fs.exists(bulkOutputPath)) { if (!fs.delete(bulkOutputPath, true)) { @@ -149,14 +148,14 @@ public void run(String[] backupIds) throws IOException { // PHASE 2 (modification of a backup file system) // Move existing mergedBackupId data into tmp directory // we will need it later in case of a failure - Path tmpBackupDir = HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, - mergedBackupId); + Path tmpBackupDir = + HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, mergedBackupId); Path backupDirPath = HBackupFileSystem.getBackupPath(backupRoot, mergedBackupId); if (!fs.rename(backupDirPath, tmpBackupDir)) { - throw new IOException("Failed to rename "+ backupDirPath +" to "+tmpBackupDir); + throw new IOException("Failed to rename " + backupDirPath + " to " + tmpBackupDir); } else { - LOG.debug("Renamed "+ backupDirPath +" to "+ tmpBackupDir); + LOG.debug("Renamed " + backupDirPath + " to " + tmpBackupDir); } // Move new data into backup dest for (Pair tn : processedTableList) { @@ -170,7 +169,7 @@ public void run(String[] backupIds) throws IOException { // Delete tmp dir (Rename back during repair) if (!fs.delete(tmpBackupDir, true)) { // WARN and ignore - LOG.warn("Could not delete tmp dir: "+ tmpBackupDir); + LOG.warn("Could not delete tmp dir: " + tmpBackupDir); } // Delete old data deleteBackupImages(backupsToDelete, conn, fs, backupRoot); @@ -249,12 +248,12 @@ protected void copyFile(FileSystem fs, Path p, Path newPath) throws IOException } } -/** - * Converts path before copying - * @param p path - * @param backupDirPath backup root - * @return converted path - */ + /** + * Converts path before copying + * @param p path + * @param backupDirPath backup root + * @return converted path + */ protected Path convertToDest(Path p, Path backupDirPath) { String backupId = backupDirPath.getName(); Stack stack = new Stack(); @@ -339,24 +338,24 @@ protected List getBackupIdsToDelete(String[] backupIds, String mergedBac } protected void moveData(FileSystem fs, String backupRoot, Path bulkOutputPath, - TableName tableName, String mergedBackupId) throws IllegalArgumentException, IOException { + TableName tableName, String mergedBackupId) throws IllegalArgumentException, IOException { Path dest = new Path(HBackupFileSystem.getTableBackupDir(backupRoot, mergedBackupId, tableName)); FileStatus[] fsts = fs.listStatus(bulkOutputPath); for (FileStatus fst : fsts) { if (fst.isDirectory()) { - String family = fst.getPath().getName(); + String family = fst.getPath().getName(); Path newDst = new Path(dest, family); if (fs.exists(newDst)) { if (!fs.delete(newDst, true)) { - throw new IOException("failed to delete :"+ newDst); + throw new IOException("failed to delete :" + newDst); } } else { fs.mkdirs(dest); } boolean result = fs.rename(fst.getPath(), dest); - LOG.debug("MoveData from "+ fst.getPath() +" to "+ dest+" result="+ result); + LOG.debug("MoveData from " + fst.getPath() + " to " + dest + " result=" + result); } } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java index b8d520c530c7..f373573661c6 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup.mapreduce; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; @@ -69,17 +68,15 @@ protected MapReduceHFileSplitterJob(final Configuration c) { } /** - * A mapper that just writes out cells. This one can be used together with - * {@link CellSortReducer} + * A mapper that just writes out cells. This one can be used together with {@link CellSortReducer} */ - static class HFileCellMapper extends - Mapper { + static class HFileCellMapper extends Mapper { @Override public void map(NullWritable key, Cell value, Context context) throws IOException, InterruptedException { context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)), - new MapReduceExtendedCell(value)); + new MapReduceExtendedCell(value)); } @Override @@ -100,9 +97,8 @@ public Job createSubmittableJob(String[] args) throws IOException { String tabName = args[1]; conf.setStrings(TABLES_KEY, tabName); conf.set(FileInputFormat.INPUT_DIR, inputDirs); - Job job = - Job.getInstance(conf, - conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime())); + Job job = Job.getInstance(conf, + conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime())); job.setJarByClass(MapReduceHFileSplitterJob.class); job.setInputFormatClass(HFileInputFormat.class); job.setMapOutputKeyClass(ImmutableBytesWritable.class); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java index 9daa282ffad4..447957d21797 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,13 +34,10 @@ import org.slf4j.LoggerFactory; /** - * MapReduce implementation of {@link RestoreJob} - * - * For backup restore, it runs {@link MapReduceHFileSplitterJob} job and creates - * HFiles which are aligned with a region boundaries of a table being - * restored. - * - * The resulting HFiles then are loaded using HBase bulk load tool {@link BulkLoadHFiles}. + * MapReduce implementation of {@link RestoreJob} For backup restore, it runs + * {@link MapReduceHFileSplitterJob} job and creates HFiles which are aligned with a region + * boundaries of a table being restored. The resulting HFiles then are loaded using HBase bulk load + * tool {@link BulkLoadHFiles}. */ @InterfaceAudience.Private public class MapReduceRestoreJob implements RestoreJob { @@ -74,15 +71,12 @@ public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNam for (int i = 0; i < tableNames.length; i++) { LOG.info("Restore " + tableNames[i] + " into " + newTableNames[i]); - Path bulkOutputPath = - BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(newTableNames[i]), - getConf()); + Path bulkOutputPath = BackupUtils + .getBulkOutputDir(BackupUtils.getFileNameCompatibleString(newTableNames[i]), getConf()); Configuration conf = getConf(); conf.set(bulkOutputConfKey, bulkOutputPath.toString()); - String[] playerArgs = { - dirs, fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i] - .getNameAsString() - }; + String[] playerArgs = { dirs, fullBackupRestore ? newTableNames[i].getNameAsString() + : tableNames[i].getNameAsString() }; int result; try { @@ -97,8 +91,8 @@ public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNam } if (loader.bulkLoad(newTableNames[i], bulkOutputPath).isEmpty()) { - throw new IOException("Can not restore from backup directory " + dirs + - " (check Hadoop and HBase logs). Bulk loader returns null"); + throw new IOException("Can not restore from backup directory " + dirs + + " (check Hadoop and HBase logs). Bulk loader returns null"); } } else { throw new IOException("Can not restore from backup directory " + dirs @@ -107,8 +101,8 @@ public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNam LOG.debug("Restore Job finished:" + result); } catch (Exception e) { LOG.error(e.toString(), e); - throw new IOException("Can not restore from backup directory " + dirs - + " (check Hadoop and HBase logs) ", e); + throw new IOException( + "Can not restore from backup directory " + dirs + " (check Hadoop and HBase logs) ", e); } } } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index 79404b34e6de..326997dac3f4 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,6 +42,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils; import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; @@ -62,8 +62,7 @@ public BackupLogCleaner() { @Override public void init(Map params) { - MasterServices master = (MasterServices) MapUtils.getObject(params, - HMaster.MASTER); + MasterServices master = (MasterServices) MapUtils.getObject(params, HMaster.MASTER); if (master != null) { conn = master.getConnection(); if (getConf() == null) { @@ -79,9 +78,8 @@ public void init(Map params) { } } - private Map getServersToOldestBackupMapping(List backups) - throws IOException { + throws IOException { Map serverAddressToLastBackupMap = new HashMap<>(); Map tableNameBackupInfoMap = new HashMap<>(); @@ -91,7 +89,7 @@ private Map getServersToOldestBackupMapping(List back if (tableNameBackupInfoMap.get(table) <= backupInfo.getStartTs()) { tableNameBackupInfoMap.put(table, backupInfo.getStartTs()); for (Map.Entry entry : backupInfo.getTableSetTimestampMap().get(table) - .entrySet()) { + .entrySet()) { serverAddressToLastBackupMap.put(Address.fromString(entry.getKey()), entry.getValue()); } } @@ -117,7 +115,7 @@ public Iterable getDeletableFiles(Iterable files) { try { try (BackupManager backupManager = new BackupManager(conn, getConf())) { addressToLastBackupMap = - getServersToOldestBackupMapping(backupManager.getBackupHistory(true)); + getServersToOldestBackupMapping(backupManager.getBackupHistory(true)); } } catch (IOException ex) { LOG.error("Failed to analyse backup history with exception: {}. Retaining all logs", @@ -133,11 +131,11 @@ public Iterable getDeletableFiles(Iterable files) { try { Address walServerAddress = - Address.fromString(BackupUtils.parseHostNameFromLogFile(file.getPath())); + Address.fromString(BackupUtils.parseHostNameFromLogFile(file.getPath())); long walTimestamp = AbstractFSWALProvider.getTimestamp(file.getPath().getName()); if (!addressToLastBackupMap.containsKey(walServerAddress) - || addressToLastBackupMap.get(walServerAddress) >= walTimestamp) { + || addressToLastBackupMap.get(walServerAddress) >= walTimestamp) { filteredFiles.add(file); } } catch (Exception ex) { @@ -147,8 +145,8 @@ public Iterable getDeletableFiles(Iterable files) { } } - LOG - .info("Total files: {}, Filtered Files: {}", IterableUtils.size(files), filteredFiles.size()); + LOG.info("Total files: {}, Filtered Files: {}", IterableUtils.size(files), + filteredFiles.size()); return filteredFiles; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java index 32e3e23fdafb..b7c1508f2301 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.master; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ThreadPoolExecutor; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; @@ -61,7 +59,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager { public static final String BACKUP_WAKE_MILLIS_KEY = "hbase.backup.logroll.wake.millis"; public static final String BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.logroll.timeout.millis"; public static final String BACKUP_POOL_THREAD_NUMBER_KEY = - "hbase.backup.logroll.pool.thread.number"; + "hbase.backup.logroll.pool.thread.number"; public static final int BACKUP_WAKE_MILLIS_DEFAULT = 500; public static final int BACKUP_TIMEOUT_MILLIS_DEFAULT = 180000; @@ -89,13 +87,11 @@ public void initialize(MasterServices master, MetricsMaster metricsMaster) // setup the default procedure coordinator String name = master.getServerName().toString(); - // get the configuration for the coordinator Configuration conf = master.getConfiguration(); long wakeFrequency = conf.getInt(BACKUP_WAKE_MILLIS_KEY, BACKUP_WAKE_MILLIS_DEFAULT); - long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY,BACKUP_TIMEOUT_MILLIS_DEFAULT); - int opThreads = conf.getInt(BACKUP_POOL_THREAD_NUMBER_KEY, - BACKUP_POOL_THREAD_NUMBER_DEFAULT); + long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT); + int opThreads = conf.getInt(BACKUP_POOL_THREAD_NUMBER_KEY, BACKUP_POOL_THREAD_NUMBER_DEFAULT); // setup the default procedure coordinator ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, opThreads); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java index 575be3945171..44a8c42fd6b2 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.regionserver; import java.util.HashMap; import java.util.List; import java.util.concurrent.Callable; - import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.client.Connection; @@ -111,8 +109,8 @@ public Void call() throws Exception { String server = host + ":" + port; Long sts = serverTimestampMap.get(host); if (sts != null && sts > highest) { - LOG.warn("Won't update server's last roll log result: current=" + sts + " new=" - + highest); + LOG.warn( + "Won't update server's last roll log result: current=" + sts + " new=" + highest); return null; } // write the log number to backup system table. diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java index ef126d7c52d9..60f22d32f6e6 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.regionserver; import java.io.Closeable; @@ -28,19 +27,18 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** - * Handle running each of the individual tasks for completing a backup procedure on a region - * server. + * Handle running each of the individual tasks for completing a backup procedure on a region server. */ @InterfaceAudience.Private public class LogRollBackupSubprocedurePool implements Closeable, Abortable { @@ -58,15 +56,15 @@ public class LogRollBackupSubprocedurePool implements Closeable, Abortable { public LogRollBackupSubprocedurePool(String name, Configuration conf) { // configure the executor service - long keepAlive = - conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY, - LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT); + long keepAlive = conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY, + LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT); int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS); this.name = name; executor = - new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), - new ThreadFactoryBuilder().setNameFormat("rs(" + name + ")-backup-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), + new ThreadFactoryBuilder().setNameFormat("rs(" + name + ")-backup-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER) + .build()); taskPool = new ExecutorCompletionService<>(executor); } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java index f09e71005598..99b6aa6b5dc8 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.regionserver; import java.io.IOException; import java.util.concurrent.ThreadPoolExecutor; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupManager; @@ -158,7 +156,7 @@ public void initialize(RegionServerServices rss) throws KeeperException { } ProcedureCoordinationManager coordManager = new ZKProcedureCoordinationManager(rss); this.memberRpcs = coordManager - .getProcedureMemberRpcs(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); + .getProcedureMemberRpcs(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); // read in the backup handler configuration properties Configuration conf = rss.getConfiguration(); diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java index 47bb12bb76e5..4228000d1966 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup.util; import java.util.List; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java index 90bb4fdcc203..d3aa7b3f0df9 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.util; import java.io.FileNotFoundException; @@ -83,8 +82,8 @@ private BackupUtils() { * @param rsLogTimestampMap timestamp map * @return the min timestamp of each RS */ - public static Map getRSLogTimestampMins( - Map> rsLogTimestampMap) { + public static Map + getRSLogTimestampMins(Map> rsLogTimestampMap) { if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) { return null; } @@ -120,7 +119,7 @@ public static Map getRSLogTimestampMins( * @throws IOException exception */ public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); @@ -138,17 +137,17 @@ public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, C Path target = new Path(backupInfo.getTableBackupDir(table)); FileSystem targetFs = target.getFileSystem(conf); FSTableDescriptors descriptors = - new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf)); + new FSTableDescriptors(targetFs, CommonFSUtils.getRootDir(conf)); descriptors.createTableDescriptorForTableDirectory(target, orig, false); - LOG.debug("Attempting to copy table info for:" + table + " target: " + target + - " descriptor: " + orig); + LOG.debug("Attempting to copy table info for:" + table + " target: " + target + + " descriptor: " + orig); LOG.debug("Finished copying tableinfo."); List regions = MetaTableAccessor.getTableRegions(conn, table); // For each region, write the region info to disk LOG.debug("Starting to write region info for table " + table); for (RegionInfo regionInfo : regions) { Path regionDir = FSUtils - .getRegionDirFromTableDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo); + .getRegionDirFromTableDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo); regionDir = new Path(backupInfo.getTableBackupDir(table), regionDir.getName()); writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo); } @@ -324,9 +323,8 @@ public static void checkTargetDir(String backupRootPath, Configuration conf) thr String expMsg = e.getMessage(); String newMsg = null; if (expMsg.contains("No FileSystem for scheme")) { - newMsg = - "Unsupported filesystem scheme found in the backup target url. Error Message: " - + expMsg; + newMsg = "Unsupported filesystem scheme found in the backup target url. Error Message: " + + expMsg; LOG.error(newMsg); throw new IOException(newMsg); } else { @@ -449,9 +447,8 @@ private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); for (TableName table : backupInfo.getTables()) { - Path targetDirPath = - new Path(getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), - table)); + Path targetDirPath = new Path( + getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table)); if (outputFs.delete(targetDirPath, true)) { LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); } else { @@ -482,7 +479,7 @@ private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) * @return backupPath String for the particular table */ public static String getTableBackupDir(String backupRootDir, String backupId, - TableName tableName) { + TableName tableName) { return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() + Path.SEPARATOR; @@ -516,7 +513,7 @@ public static ArrayList sortHistoryListDesc(ArrayList hi * @return null if dir is empty or doesn't exist, otherwise FileStatus array */ public static FileStatus[] listStatus(final FileSystem fs, final Path dir, - final PathFilter filter) throws IOException { + final PathFilter filter) throws IOException { FileStatus[] status = null; try { status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter); @@ -535,8 +532,8 @@ public static FileStatus[] listStatus(final FileSystem fs, final Path dir, } /** - * Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the - * 'path' component of a Path's URI: e.g. If a Path is + * Return the 'path' component of a Path. In Hadoop, Path is a URI. This method returns the 'path' + * component of a Path's URI: e.g. If a Path is * hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir, this method returns * /hbase_trunk/TestTable/compaction.dir. This method is useful if you want to print * out a Path without qualifying Filesystem instance. @@ -693,11 +690,10 @@ public static boolean validate(HashMap backupManifest public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit) throws IOException { FileSystem fs = FileSystem.get(conf); - String tmp = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, - fs.getHomeDirectory() + "/hbase-staging"); - Path path = - new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-" - + EnvironmentEdgeManager.currentTime()); + String tmp = + conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging"); + Path path = new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-" + + EnvironmentEdgeManager.currentTime()); if (deleteOnExit) { fs.deleteOnExit(path); } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java index dafed11fe834..6b7dafb721fd 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup.util; import java.io.FileNotFoundException; @@ -207,7 +206,7 @@ public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[ public void fullRestoreTable(Connection conn, Path tableBackupPath, TableName tableName, TableName newTableName, boolean truncateIfExists, String lastIncrBackupId) - throws IOException { + throws IOException { createAndRestoreTable(conn, tableName, newTableName, tableBackupPath, truncateIfExists, lastIncrBackupId); } @@ -228,9 +227,8 @@ Path getTableSnapshotPath(Path backupRootPath, TableName tableName, String backu /** * Returns value represent path for: * ""/$USER/SBACKUP_ROOT/backup_id/namespace/table/.hbase-snapshot/ - * snapshot_1396650097621_namespace_table" - * this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo, - * .data.manifest (trunk) + * snapshot_1396650097621_namespace_table" this path contains .snapshotinfo, .tabledesc (0.96 and + * 0.98) this path contains .snapshotinfo, .data.manifest (trunk) * @param tableName table name * @return path to table info * @throws IOException exception @@ -241,7 +239,7 @@ Path getTableInfoPath(TableName tableName) throws IOException { // can't build the path directly as the timestamp values are different FileStatus[] snapshots = fs.listStatus(tableSnapShotPath, - new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); + new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); for (FileStatus snapshot : snapshots) { tableInfoPath = snapshot.getPath(); // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest"; @@ -264,9 +262,9 @@ TableDescriptor getTableDesc(TableName tableName) throws IOException { TableDescriptor tableDescriptor = manifest.getTableDescriptor(); if (!tableDescriptor.getTableName().equals(tableName)) { LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " - + tableInfoPath.toString()); - LOG.error("tableDescriptor.getNameAsString() = " - + tableDescriptor.getTableName().getNameAsString()); + + tableInfoPath.toString()); + LOG.error( + "tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString()); throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString()); } @@ -277,8 +275,7 @@ private TableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableNa String lastIncrBackupId) throws IOException { if (lastIncrBackupId != null) { String target = - BackupUtils.getTableBackupDir(backupRootPath.toString(), - lastIncrBackupId, tableName); + BackupUtils.getTableBackupDir(backupRootPath.toString(), lastIncrBackupId, tableName); return FSTableDescriptors.getTableDescriptorFromFs(fileSys, new Path(target)); } return null; @@ -315,8 +312,8 @@ private void createAndRestoreTable(Connection conn, TableName tableName, TableNa LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost"); } } else { - throw new IOException("Table snapshot directory: " + - tableSnapshotPath + " does not exist."); + throw new IOException( + "Table snapshot directory: " + tableSnapshotPath + " does not exist."); } } @@ -333,8 +330,8 @@ private void createAndRestoreTable(Connection conn, TableName tableName, TableNa truncateIfExists); return; } else { - throw new IllegalStateException("Cannot restore hbase table because directory '" - + " tableArchivePath is null."); + throw new IllegalStateException( + "Cannot restore hbase table because directory '" + " tableArchivePath is null."); } } @@ -356,7 +353,8 @@ private void createAndRestoreTable(Connection conn, TableName tableName, TableNa RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf); Path[] paths = new Path[regionPathList.size()]; regionPathList.toArray(paths); - restoreService.run(paths, new TableName[]{tableName}, new TableName[] {newTableName}, true); + restoreService.run(paths, new TableName[] { tableName }, new TableName[] { newTableName }, + true); } catch (Exception e) { LOG.error(e.toString(), e); @@ -476,8 +474,8 @@ private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableNam boolean createNew = false; if (admin.tableExists(targetTableName)) { if (truncateIfExists) { - LOG.info("Truncating exising target table '" + targetTableName - + "', preserving region splits"); + LOG.info( + "Truncating exising target table '" + targetTableName + "', preserving region splits"); admin.disableTable(targetTableName); admin.truncateTable(targetTableName, true); } else { @@ -497,7 +495,7 @@ private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableNam // create table using table descriptor and region boundaries admin.createTable(htd, keys); } - } catch (NamespaceNotFoundException e){ + } catch (NamespaceNotFoundException e) { LOG.warn("There was no namespace and the same will be created"); String namespaceAsString = targetTableName.getNamespaceAsString(); LOG.info("Creating target namespace '" + namespaceAsString + "'"); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 8a06425d2224..d50c460aea5d 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -111,8 +110,8 @@ static class IncrementalTableBackupClientForTest extends IncrementalTableBackupC public IncrementalTableBackupClientForTest() { } - public IncrementalTableBackupClientForTest(Connection conn, - String backupId, BackupRequest request) throws IOException { + public IncrementalTableBackupClientForTest(Connection conn, String backupId, + BackupRequest request) throws IOException { super(conn, backupId, request); } @@ -133,7 +132,7 @@ public void execute() throws IOException { BackupUtils.copyTableRegionInfo(conn, backupInfo, conf); // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT convertWALsToHFiles(); - incrementalCopyHFiles(new String[] {getBulkOutputDir().toString()}, + incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() }, backupInfo.getBackupRootDir()); failStageIf(Stage.stage_2); @@ -215,9 +214,8 @@ public void execute() throws IOException { // SNAPSHOT_TABLES: backupInfo.setPhase(BackupPhase.SNAPSHOT); for (TableName tableName : tableList) { - String snapshotName = - "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_" - + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); + String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); snapshotTable(admin, tableName, snapshotName); backupInfo.setSnapshotName(tableName, snapshotName); @@ -242,8 +240,7 @@ public void execute() throws IOException { backupManager.readLogTimestampMap(); Long newStartCode = - BackupUtils.getMinValue(BackupUtils - .getRSLogTimestampMins(newTableSetTimestampMap)); + BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); backupManager.writeBackupStartCode(newStartCode); failStageIf(Stage.stage_4); // backup complete @@ -251,7 +248,7 @@ public void execute() throws IOException { } catch (Exception e) { - if(autoRestoreOnFailure) { + if (autoRestoreOnFailure) { failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ", BackupType.FULL, conf); } @@ -261,13 +258,13 @@ public void execute() throws IOException { } public static void setUpHelper() throws Exception { - BACKUP_ROOT_DIR = Path.SEPARATOR +"backupUT"; + BACKUP_ROOT_DIR = Path.SEPARATOR + "backupUT"; BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT"; if (secure) { // set the always on security provider UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(), - HadoopSecurityEnabledUserProviderForTesting.class); + HadoopSecurityEnabledUserProviderForTesting.class); // setup configuration SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration()); } @@ -299,23 +296,21 @@ public static void setUpHelper() throws Exception { TEST_UTIL.startMiniMapReduceCluster(); BACKUP_ROOT_DIR = - new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), - BACKUP_ROOT_DIR).toString(); + new Path(new Path(TEST_UTIL.getConfiguration().get("fs.defaultFS")), BACKUP_ROOT_DIR) + .toString(); LOG.info("ROOTDIR " + BACKUP_ROOT_DIR); if (useSecondCluster) { - BACKUP_REMOTE_ROOT_DIR = - new Path(new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS")) - + BACKUP_REMOTE_ROOT_DIR).toString(); + BACKUP_REMOTE_ROOT_DIR = new Path( + new Path(TEST_UTIL2.getConfiguration().get("fs.defaultFS")) + BACKUP_REMOTE_ROOT_DIR) + .toString(); LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR); } createTables(); populateFromMasterConfig(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), conf1); } - /** * Setup Cluster with appropriate configurations before running tests. - * * @throws Exception if starting the mini cluster or setting up the tables fails */ @BeforeClass @@ -327,7 +322,6 @@ public static void setUp() throws Exception { setUpHelper(); } - private static void populateFromMasterConfig(Configuration masterConf, Configuration conf) { Iterator> it = masterConf.iterator(); while (it.hasNext()) { @@ -341,7 +335,7 @@ private static void populateFromMasterConfig(Configuration masterConf, Configura */ @AfterClass public static void tearDown() throws Exception { - try{ + try { SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin()); } catch (Exception e) { } @@ -367,12 +361,11 @@ Table insertIntoTable(Connection conn, TableName table, byte[] family, int id, i return t; } - protected BackupRequest createBackupRequest(BackupType type, - List tables, String path) { + protected BackupRequest createBackupRequest(BackupType type, List tables, + String path) { BackupRequest.Builder builder = new BackupRequest.Builder(); - BackupRequest request = builder.withBackupType(type) - .withTableList(tables) - .withTargetRootDir(path).build(); + BackupRequest request = + builder.withBackupType(type).withTableList(tables).withTargetRootDir(path).build(); return request; } @@ -427,7 +420,7 @@ protected static void createTables() throws Exception { ha.createNamespace(NamespaceDescriptor.create("ns4").build()); TableDescriptor desc = TableDescriptorBuilder.newBuilder(table1) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build(); ha.createTable(desc); table1Desc = desc; Connection conn = ConnectionFactory.createConnection(conf1); @@ -436,7 +429,7 @@ protected static void createTables() throws Exception { table.close(); table2 = TableName.valueOf("ns2:test-" + tid + 1); desc = TableDescriptorBuilder.newBuilder(table2) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(famName)).build(); ha.createTable(desc); table = conn.getTable(table2); loadTable(table); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java index e4c74cfb85f3..e9037c519ba2 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,6 @@ public class TestBackupBoundaryTests extends TestBackupBase { /** * Verify that full backup is created on a single empty table correctly. - * * @throws Exception if doing the full backup fails */ @Test @@ -53,7 +52,6 @@ public void testFullBackupSingleEmpty() throws Exception { /** * Verify that full backup is created on multiple empty tables correctly. - * * @throws Exception if doing the full backup fails */ @Test @@ -66,7 +64,6 @@ public void testFullBackupMultipleEmpty() throws Exception { /** * Verify that full backup fails on a single table that does not exist. - * * @throws Exception if doing the full backup fails */ @Test(expected = IOException.class) @@ -78,7 +75,6 @@ public void testFullBackupSingleDNE() throws Exception { /** * Verify that full backup fails on multiple tables that do not exist. - * * @throws Exception if doing the full backup fails */ @Test(expected = IOException.class) @@ -90,7 +86,6 @@ public void testFullBackupMultipleDNE() throws Exception { /** * Verify that full backup fails on tableset containing real and fake tables. - * * @throws Exception if doing the full backup fails */ @Test(expected = IOException.class) diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java index acde21e3eb5a..abe2be1a7f11 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -86,7 +86,6 @@ public void testBackupDriverDescribeHelp() throws Exception { assertTrue(output.indexOf(USAGE_DESCRIBE) >= 0); } - @Test public void testBackupDriverCreateTopLevelBackupDest() throws Exception { String[] args = new String[] { "create", "full", "hdfs://localhost:1020", "-t", "t1" }; @@ -107,7 +106,6 @@ public void testBackupDriverCreateHelp() throws Exception { assertTrue(output.indexOf(USAGE_CREATE) >= 0); assertTrue(output.indexOf(BackupRestoreConstants.OPTION_TABLE_LIST_DESC) > 0); - baos = new ByteArrayOutputStream(); System.setOut(new PrintStream(baos)); args = new String[] { "create", "-h" }; diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java index bc8b346175a6..0200c51e8613 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,6 @@ public class TestBackupDelete extends TestBackupBase { /** * Verify that full backup is created on a single table with data correctly. Verify that history * works as expected. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -80,7 +79,6 @@ public void testBackupDelete() throws Exception { /** * Verify that full backup is created on a single table with data correctly. Verify that history * works as expected. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -116,7 +114,7 @@ public void testBackupPurgeOldBackupsCommand() throws Exception { // time - 2 days @Override public long currentTime() { - return System.currentTimeMillis() - 2 * 24 * 3600 * 1000 ; + return System.currentTimeMillis() - 2 * 24 * 3600 * 1000; } }); String backupId = fullTableBackup(tableList); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java index f649b921b272..5f8a89ece37f 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,6 @@ public class TestBackupDeleteRestore extends TestBackupBase { /** * Verify that load data- backup - delete some data - restore works as expected - deleted data get * restored. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -72,8 +71,8 @@ public void testBackupDeleteRestore() throws Exception { TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = null;// new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - tableset, tablemap, true)); + client.restore( + BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, true)); int numRowsAfterRestore = TEST_UTIL.countRows(table1); assertEquals(numRows, numRowsAfterRestore); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java index 2ab6f55f5b06..a9c3c3a03f74 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,7 +57,7 @@ * tests should have their own classes and extend this one */ @Category(LargeTests.class) -public class TestBackupDeleteWithFailures extends TestBackupBase{ +public class TestBackupDeleteWithFailures extends TestBackupBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -66,16 +66,13 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{ private static final Logger LOG = LoggerFactory.getLogger(TestBackupDeleteWithFailures.class); public enum Failure { - NO_FAILURES, - PRE_SNAPSHOT_FAILURE, - PRE_DELETE_SNAPSHOT_FAILURE, - POST_DELETE_SNAPSHOT_FAILURE + NO_FAILURES, PRE_SNAPSHOT_FAILURE, PRE_DELETE_SNAPSHOT_FAILURE, POST_DELETE_SNAPSHOT_FAILURE } public static class MasterSnapshotObserver implements MasterCoprocessor, MasterObserver { List failures = new ArrayList<>(); - public void setFailures(Failure ... f) { + public void setFailures(Failure... f) { failures.clear(); for (int i = 0; i < f.length; i++) { failures.add(f[i]); @@ -115,15 +112,13 @@ public void postDeleteSnapshot(ObserverContext ctx /** * Setup Cluster with appropriate configurations before running tests. - * * @throws Exception if starting the mini cluster or setting up the tables fails */ @BeforeClass public static void setUp() throws Exception { TEST_UTIL = new HBaseTestingUtil(); conf1 = TEST_UTIL.getConfiguration(); - conf1.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - MasterSnapshotObserver.class.getName()); + conf1.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MasterSnapshotObserver.class.getName()); conf1.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); setUpHelper(); } @@ -140,9 +135,9 @@ public void testBackupDeleteWithFailures() throws Exception { testBackupDeleteWithFailuresAfter(1, Failure.PRE_SNAPSHOT_FAILURE); } - private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures) - throws Exception { - LOG.info("test repair backup delete on a single table with data and failures "+ failures[0]); + private void testBackupDeleteWithFailuresAfter(int expected, Failure... failures) + throws Exception { + LOG.info("test repair backup delete on a single table with data and failures " + failures[0]); List tableList = Lists.newArrayList(table1); String backupId = fullTableBackup(tableList); assertTrue(checkSucceeded(backupId)); @@ -161,8 +156,8 @@ private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures observer.setFailures(failures); try { getBackupAdmin().deleteBackups(backupIds); - } catch(IOException e) { - if(expected != 1) { + } catch (IOException e) { + if (expected != 1) { assertTrue(false); } } @@ -173,7 +168,7 @@ private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures String[] ids = table.getListOfBackupIdsFromDeleteOperation(); // Verify that we still have delete record in backup system table - if(expected == 1) { + if (expected == 1) { assertTrue(ids.length == 1); assertTrue(ids[0].equals(backupId)); } else { @@ -181,7 +176,7 @@ private void testBackupDeleteWithFailuresAfter(int expected, Failure ...failures } // Now run repair command to repair "failed" delete operation - String[] args = new String[] {"repair"}; + String[] args = new String[] { "repair" }; observer.setFailures(Failure.NO_FAILURES); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java index 6ab3d04feff4..257d1b800885 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,6 @@ public class TestBackupDescribe extends TestBackupBase { /** * Verify that describe works as expected if incorrect backup Id is supplied. - * * @throws Exception if creating the {@link BackupDriver} fails */ @Test diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java index 8393087477df..7e89a30def2b 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -121,8 +121,9 @@ public void testGetDeletableFiles() throws IOException { found = true; } } - assertTrue("Cleaner should allow to delete this file as there is no hfile reference " - + "for it.", found); + assertTrue( + "Cleaner should allow to delete this file as there is no hfile reference " + "for it.", + found); // 4. Add the file as bulk load List list = new ArrayList<>(1); @@ -146,7 +147,8 @@ public void testGetDeletableFiles() throws IOException { found = true; } } - assertFalse("Cleaner should not allow to delete this file as there is a hfile reference " - + "for it.", found); + assertFalse( + "Cleaner should not allow to delete this file as there is a hfile reference " + "for it.", + found); } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java index 91bd185b872c..aca82265a7d8 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicLongArray; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -51,7 +49,7 @@ public class TestBackupManager { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBackupManager.class); + HBaseClassTestRule.forClass(TestBackupManager.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); protected static Configuration conf = UTIL.getConfiguration(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index 1a8638c3b7dc..72c867d94988 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,10 +45,7 @@ public class TestBackupMerge extends TestBackupBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestBackupMerge.class); - private static final Logger LOG = - LoggerFactory.getLogger(TestBackupMerge.class); - - + private static final Logger LOG = LoggerFactory.getLogger(TestBackupMerge.class); @Test public void TestIncBackupMergeRestore() throws Exception { @@ -59,7 +56,6 @@ public void TestIncBackupMergeRestore() throws Exception { List tables = Lists.newArrayList(table1, table2); // Set custom Merge Job implementation - Connection conn = ConnectionFactory.createConnection(conf1); Admin admin = conn.getAdmin(); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java index 538488b4c4e4..9cd04a492f9f 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java index a0369890f3fa..2c0261f721e5 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,6 @@ public class TestBackupRepair extends TestBackupBase { private static final Logger LOG = LoggerFactory.getLogger(TestBackupRepair.class); - @Test public void testFullBackupWithFailuresAndRestore() throws Exception { @@ -52,7 +51,7 @@ public void testFullBackupWithFailuresAndRestore() throws Exception { conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS, FullTableBackupClientForTest.class.getName()); - int maxStage = Stage.values().length -1; + int maxStage = Stage.values().length - 1; // Fail stage in loop between 0 and 4 inclusive for (int stage = 0; stage < maxStage; stage++) { LOG.info("Running stage " + stage); @@ -65,23 +64,22 @@ public void runBackupAndFailAtStageWithRestore(int stage) throws Exception { conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); - String[] args = - new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", - table1.getNameAsString() + "," + table2.getNameAsString() }; + String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", + table1.getNameAsString() + "," + table2.getNameAsString() }; // Run backup int ret = ToolRunner.run(conf1, new BackupDriver(), args); assertFalse(ret == 0); // Now run restore - args = new String[] {"repair"}; + args = new String[] { "repair" }; - ret = ToolRunner.run(conf1, new BackupDriver(), args); + ret = ToolRunner.run(conf1, new BackupDriver(), args); assertTrue(ret == 0); List backups = table.getBackupHistory(); int after = table.getBackupHistory().size(); - assertTrue(after == before +1); + assertTrue(after == before + 1); for (BackupInfo data : backups) { String backupId = data.getBackupId(); assertFalse(checkSucceeded(backupId)); @@ -91,5 +89,4 @@ public void runBackupAndFailAtStageWithRestore(int stage) throws Exception { } } - } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java index 4526070106d1..37fe1051bab7 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,6 @@ private boolean findBackup(List history, String backupId) { /** * Verify that full backup is created on a single table with data correctly. Verify that history * works as expected. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -120,9 +119,8 @@ public void testBackupHistory() throws Exception { } assertTrue(success); - history = - BackupUtils.getHistory(conf1, 10, new Path(BACKUP_ROOT_DIR), tableNameFilter, - tableSetFilter); + history = BackupUtils.getHistory(conf1, 10, new Path(BACKUP_ROOT_DIR), tableNameFilter, + tableSetFilter); assertTrue(history.size() > 0); success = true; for (BackupInfo info : history) { @@ -133,9 +131,8 @@ public void testBackupHistory() throws Exception { } assertTrue(success); - args = - new String[] { "history", "-n", "10", "-p", BACKUP_ROOT_DIR, - "-t", "table1", "-s", "backup" }; + args = new String[] { "history", "-n", "10", "-p", BACKUP_ROOT_DIR, "-t", "table1", "-s", + "backup" }; // Run backup ret = ToolRunner.run(conf1, new BackupDriver(), args); assertTrue(ret == 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java index 1aa267b67bbd..d9a3c1dd10b0 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSmallTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,13 +40,15 @@ public class TestBackupSmallTests extends TestBackupBase { UserGroupInformation.createUserForTesting("diana", new String[] {}); private static final String PERMISSION_TEST_PATH = Path.SEPARATOR + "permissionUT"; - @Test public void testBackupPathIsAccessible() throws Exception { + @Test + public void testBackupPathIsAccessible() throws Exception { Path path = new Path(PERMISSION_TEST_PATH); FileSystem fs = FileSystem.get(TEST_UTIL.getConnection().getConfiguration()); fs.mkdirs(path); } - @Test(expected = IOException.class) public void testBackupPathIsNotAccessible() throws Exception { + @Test(expected = IOException.class) + public void testBackupPathIsNotAccessible() throws Exception { Path path = new Path(PERMISSION_TEST_PATH); FileSystem rootFs = FileSystem.get(TEST_UTIL.getConnection().getConfiguration()); rootFs.mkdirs(path.getParent()); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java index 6d2091ea697c..2fb3da9b14f2 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,6 @@ public class TestBackupStatusProgress extends TestBackupBase { /** * Verify that full backup is created on a single table with data correctly. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java index e5a6679a97ac..8c591bc8f64e 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -354,8 +355,8 @@ public void testBackupSetAddExists() throws IOException { String[] addTables = new String[] { "table4", "table5", "table6" }; table.addToBackupSet(setName, addTables); - Set expectedTables = new HashSet<>(Arrays.asList("table1", "table2", "table3", - "table4", "table5", "table6")); + Set expectedTables = + new HashSet<>(Arrays.asList("table1", "table2", "table3", "table4", "table5", "table6")); List tnames = table.describeBackupSet(setName); assertTrue(tnames != null); @@ -377,8 +378,8 @@ public void testBackupSetAddExistsIntersects() throws IOException { String[] addTables = new String[] { "table3", "table4", "table5", "table6" }; table.addToBackupSet(setName, addTables); - Set expectedTables = new HashSet<>(Arrays.asList("table1", "table2", "table3", - "table4", "table5", "table6")); + Set expectedTables = + new HashSet<>(Arrays.asList("table1", "table2", "table3", "table4", "table5", "table6")); List tnames = table.describeBackupSet(setName); assertTrue(tnames != null); @@ -477,8 +478,9 @@ private boolean compare(BackupInfo one, BackupInfo two) { private BackupInfo createBackupInfo() { BackupInfo ctxt = - new BackupInfo("backup_" + System.nanoTime(), BackupType.FULL, new TableName[] { - TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") }, + new BackupInfo( + "backup_" + System.nanoTime(), BackupType.FULL, new TableName[] { + TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") }, "/hbase/backup"); ctxt.setStartTs(EnvironmentEdgeManager.currentTime()); ctxt.setCompleteTs(EnvironmentEdgeManager.currentTime() + 1); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java index fddbec2e31e6..f543e8c30a3c 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.security.PrivilegedAction; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -50,11 +49,12 @@ public void TestGetBulkOutputDir() { // Create a user who is not the current user String fooUserName = "foo1234"; String fooGroupName = "group1"; - UserGroupInformation - ugi = UserGroupInformation.createUserForTesting(fooUserName, new String[]{fooGroupName}); + UserGroupInformation ugi = + UserGroupInformation.createUserForTesting(fooUserName, new String[] { fooGroupName }); // Get user's home directory Path fooHomeDirectory = ugi.doAs(new PrivilegedAction() { - @Override public Path run() { + @Override + public Path run() { try (FileSystem fs = FileSystem.get(conf)) { return fs.getHomeDirectory(); } catch (IOException ioe) { @@ -65,7 +65,8 @@ public void TestGetBulkOutputDir() { }); Path bulkOutputDir = ugi.doAs(new PrivilegedAction() { - @Override public Path run() { + @Override + public Path run() { try { return BackupUtils.getBulkOutputDir("test", conf, false); } catch (IOException ioe) { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java index 28624338f3a3..f475df4de7a7 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,9 +44,8 @@ public void testFullBackupMultipleCommand() throws Exception { LOG.info("test full backup on a multiple tables with data: command-line"); try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); - String[] args = - new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", - table1.getNameAsString() + "," + table2.getNameAsString() }; + String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", + table1.getNameAsString() + "," + table2.getNameAsString() }; // Run backup int ret = ToolRunner.run(conf1, new BackupDriver(), args); assertTrue(ret == 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java index 7a3aec46a9a5..3d7c829bdc12 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,6 @@ public class TestFullBackupSet extends TestBackupBase { /** * Verify that full backup is created on a single table with data correctly. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -74,9 +73,8 @@ public void testFullBackupSetExist() throws Exception { LOG.info("backup complete"); // Restore from set into other table - args = - new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", - table1_restore.getNameAsString(), "-o" }; + args = new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", + table1_restore.getNameAsString(), "-o" }; // Run backup ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java index 3543133734e5..fdbf8854c4e5 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,9 +70,8 @@ public void testFullRestoreSetToOtherTable() throws Exception { LOG.info("backup complete"); // Restore from set into other table - args = - new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", - table1_restore.getNameAsString(), "-o" }; + args = new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", + table1_restore.getNameAsString(), "-o" }; // Run backup ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java index bf3a9896e548..20c824873efc 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ public class TestFullBackupWithFailures extends TestBackupBase { public void testFullBackupWithFailures() throws Exception { conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS, FullTableBackupClientForTest.class.getName()); - int maxStage = Stage.values().length -1; + int maxStage = Stage.values().length - 1; // Fail stages between 0 and 4 inclusive for (int stage = 0; stage <= maxStage; stage++) { LOG.info("Running stage " + stage); @@ -61,16 +61,15 @@ public void runBackupAndFailAtStage(int stage) throws Exception { conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); - String[] args = - new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", - table1.getNameAsString() + "," + table2.getNameAsString() }; + String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", + table1.getNameAsString() + "," + table2.getNameAsString() }; // Run backup int ret = ToolRunner.run(conf1, new BackupDriver(), args); assertFalse(ret == 0); List backups = table.getBackupHistory(); int after = table.getBackupHistory().size(); - assertTrue(after == before +1); + assertTrue(after == before + 1); for (BackupInfo data : backups) { String backupId = data.getBackupId(); assertFalse(checkSucceeded(backupId)); @@ -80,5 +79,4 @@ public void runBackupAndFailAtStage(int stage) throws Exception { } } - } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index f5ad0d7b827e..8df5b343df3f 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,6 @@ public class TestFullRestore extends TestBackupBase { /** * Verify that a single table is restored to a new table. - * * @throws Exception if doing the backup, restoring it or an operation on the tables fails */ @Test @@ -64,8 +63,8 @@ public void testFullRestoreSingle() throws Exception { TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - tableset, tablemap, false)); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, + tablemap, false)); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); @@ -81,9 +80,8 @@ public void testFullRestoreSingleCommand() throws Exception { LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); // restore [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", table1.getNameAsString(), "-m", - table1_restore.getNameAsString() }; + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, "-t", table1.getNameAsString(), "-m", + table1_restore.getNameAsString() }; // Run backup int ret = ToolRunner.run(conf1, new RestoreDriver(), args); @@ -103,20 +101,18 @@ public void testFullRestoreCheckCommand() throws Exception { LOG.info("backup complete"); assertTrue(checkSucceeded(backupId)); // restore [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", table1.getNameAsString(), "-m", - table1_restore.getNameAsString(), "-c" }; + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, "-t", table1.getNameAsString(), "-m", + table1_restore.getNameAsString(), "-c" }; // Run backup int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret == 0); - //Verify that table has not been restored + // Verify that table has not been restored Admin hba = TEST_UTIL.getAdmin(); assertFalse(hba.tableExists(table1_restore)); } /** * Verify that multiple tables are restored to new tables. - * * @throws Exception if doing the backup, restoring it or an operation on the tables fails */ @Test @@ -141,7 +137,6 @@ public void testFullRestoreMultiple() throws Exception { /** * Verify that multiple tables are restored to new tables. - * * @throws Exception if doing the backup, restoring it or an operation on the tables fails */ @Test @@ -155,9 +150,8 @@ public void testFullRestoreMultipleCommand() throws Exception { TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; // restore [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", StringUtils.join(restore_tableset, ","), - "-m", StringUtils.join(tablemap, ",") }; + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, "-t", + StringUtils.join(restore_tableset, ","), "-m", StringUtils.join(tablemap, ",") }; // Run backup int ret = ToolRunner.run(conf1, new RestoreDriver(), args); @@ -172,7 +166,6 @@ public void testFullRestoreMultipleCommand() throws Exception { /** * Verify that a single table is restored using overwrite. - * * @throws Exception if doing the backup or restoring it fails */ @Test @@ -186,13 +179,12 @@ public void testFullRestoreSingleOverwrite() throws Exception { TableName[] tableset = new TableName[] { table1 }; BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - tableset, null, true)); + client.restore( + BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, null, true)); } /** * Verify that a single table is restored using overwrite. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -217,7 +209,6 @@ public void testFullRestoreSingleOverwriteCommand() throws Exception { /** * Verify that multiple tables are restored to new tables using overwrite. - * * @throws Exception if doing the backup or restoring it fails */ @Test @@ -236,7 +227,6 @@ public void testFullRestoreMultipleOverwrite() throws Exception { /** * Verify that multiple tables are restored to new tables using overwrite. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -249,8 +239,7 @@ public void testFullRestoreMultipleOverwriteCommand() throws Exception { TableName[] restore_tableset = new TableName[] { table2, table3 }; // restore [tableMapping] - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, "-t", + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, "-t", StringUtils.join(restore_tableset, ","), "-o" }; // Run backup int ret = ToolRunner.run(conf1, new RestoreDriver(), args); @@ -264,7 +253,6 @@ public void testFullRestoreMultipleOverwriteCommand() throws Exception { /** * Verify that restore fails on a single table that does not exist. - * * @throws Exception if doing the backup or restoring it fails */ @Test(expected = IOException.class) @@ -279,13 +267,12 @@ public void testFullRestoreSingleDNE() throws Exception { TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; TableName[] tablemap = new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, - tableset, tablemap, false)); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, + tablemap, false)); } /** * Verify that restore fails on a single table that does not exist. - * * @throws Exception if doing the backup or restoring it fails */ @Test @@ -299,9 +286,8 @@ public void testFullRestoreSingleDNECommand() throws Exception { TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; TableName[] tablemap = new TableName[] { table1_restore }; - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, StringUtils.join(tableset, ","), "-m", - StringUtils.join(tablemap, ",") }; + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, StringUtils.join(tableset, ","), "-m", + StringUtils.join(tablemap, ",") }; // Run restore int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret != 0); @@ -309,7 +295,6 @@ public void testFullRestoreSingleDNECommand() throws Exception { /** * Verify that restore fails on multiple tables that do not exist. - * * @throws Exception if doing the backup or restoring it fails */ @Test(expected = IOException.class) @@ -330,7 +315,6 @@ public void testFullRestoreMultipleDNE() throws Exception { /** * Verify that restore fails on multiple tables that do not exist. - * * @throws Exception if doing the backup or restoring it fails */ @Test @@ -344,9 +328,8 @@ public void testFullRestoreMultipleDNECommand() throws Exception { TableName[] restore_tableset = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - String[] args = - new String[] { BACKUP_ROOT_DIR, backupId, StringUtils.join(restore_tableset, ","), "-m", - StringUtils.join(tablemap, ",") }; + String[] args = new String[] { BACKUP_ROOT_DIR, backupId, + StringUtils.join(restore_tableset, ","), "-m", StringUtils.join(tablemap, ",") }; // Run restore int ret = ToolRunner.run(conf1, new RestoreDriver(), args); assertTrue(ret != 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index ea552b7945a3..2b65a6df3e6e 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -85,9 +85,10 @@ public void TestIncBackupRestore() throws Exception { final byte[] mobName = Bytes.toBytes("mob"); TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true) - .setMobThreshold(5L).build()).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true) + .setMobThreshold(5L).build()) + .build(); TEST_UTIL.getAdmin().modifyTable(newTable1Desc); try (Connection conn = ConnectionFactory.createConnection(conf1)) { @@ -104,7 +105,7 @@ public void TestIncBackupRestore() throws Exception { Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS); LOG.debug("writing " + ADD_ROWS + " rows to " + table1); Assert.assertEquals(HBaseTestingUtil.countRows(t1), - NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); + NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); LOG.debug("written " + ADD_ROWS + " rows to " + table1); // additionally, insert rows to MOB cf int NB_ROWS_MOB = 111; @@ -112,7 +113,7 @@ public void TestIncBackupRestore() throws Exception { LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob enabled CF"); t1.close(); Assert.assertEquals(HBaseTestingUtil.countRows(t1), - NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB); + NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB); Table t2 = conn.getTable(table2); Put p2; for (int i = 0; i < 5; i++) { @@ -152,8 +153,8 @@ public void TestIncBackupRestore() throws Exception { // drop column family f3 final byte[] fam2Name = Bytes.toBytes("f2"); newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name) + .build(); TEST_UTIL.getAdmin().modifyTable(newTable1Desc); int NB_ROWS_FAM2 = 7; @@ -174,7 +175,7 @@ public void TestIncBackupRestore() throws Exception { LOG.debug("Restoring full " + backupIdFull); client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, - tablesRestoreFull, tablesMapFull, true)); + tablesRestoreFull, tablesMapFull, true)); // #6.1 - check tables for full restore Admin hAdmin = TEST_UTIL.getAdmin(); @@ -194,8 +195,8 @@ public void TestIncBackupRestore() throws Exception { // #7 - restore incremental backup for multiple tables, with overwrite TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 }; TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, - false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, + tablesRestoreIncMultiple, tablesMapIncMultiple, true)); hTable = conn.getTable(table1_restore); LOG.debug("After incremental restore: " + hTable.getDescriptor()); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java index 837de4dd6166..6cb501c5b9b4 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,11 +41,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * 1. Create table t1, t2 - * 2. Load data to t1, t2 - * 3 Full backup t1, t2 - * 4 Delete t2 - * 5 Load data to t1 + * 1. Create table t1, t2 2. Load data to t1, t2 3 Full backup t1, t2 4 Delete t2 5 Load data to t1 * 6 Incremental backup t1 */ @Category(LargeTests.class) @@ -120,8 +116,8 @@ public void testIncBackupDeleteTable() throws Exception { // #6 - restore incremental backup for table1 TableName[] tablesRestoreIncMultiple = new TableName[] { table1 }; TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, - false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, + tablesRestoreIncMultiple, tablesMapIncMultiple, true)); hTable = conn.getTable(table1_restore); Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java index 1bde63ba5527..758d3e9bb6c5 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -82,8 +82,7 @@ public void setConf(Configuration conf) { } /** - * This is the exact copy of parent's run() with injections - * of different types of failures + * This is the exact copy of parent's run() with injections of different types of failures */ @Override public void run(String[] backupIds) throws IOException { @@ -128,9 +127,8 @@ public void run(String[] backupIds) throws IOException { // Find input directories for table Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds); String dirs = StringUtils.join(dirPaths, ","); - Path bulkOutputPath = - BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]), - getConf(), false); + Path bulkOutputPath = BackupUtils.getBulkOutputDir( + BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false); // Delete content if exists if (fs.exists(bulkOutputPath)) { if (!fs.delete(bulkOutputPath, true)) { @@ -163,13 +161,13 @@ public void run(String[] backupIds) throws IOException { // (modification of a backup file system) // Move existing mergedBackupId data into tmp directory // we will need it later in case of a failure - Path tmpBackupDir = HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, - mergedBackupId); + Path tmpBackupDir = + HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, mergedBackupId); Path backupDirPath = HBackupFileSystem.getBackupPath(backupRoot, mergedBackupId); if (!fs.rename(backupDirPath, tmpBackupDir)) { - throw new IOException("Failed to rename "+ backupDirPath +" to "+tmpBackupDir); + throw new IOException("Failed to rename " + backupDirPath + " to " + tmpBackupDir); } else { - LOG.debug("Renamed "+ backupDirPath +" to "+ tmpBackupDir); + LOG.debug("Renamed " + backupDirPath + " to " + tmpBackupDir); } // Move new data into backup dest for (Pair tn : processedTableList) { @@ -184,7 +182,7 @@ public void run(String[] backupIds) throws IOException { // Delete tmp dir (Rename back during repair) if (!fs.delete(tmpBackupDir, true)) { // WARN and ignore - LOG.warn("Could not delete tmp dir: "+ tmpBackupDir); + LOG.warn("Could not delete tmp dir: " + tmpBackupDir); } // Delete old data deleteBackupImages(backupsToDelete, conn, fs, backupRoot); @@ -274,7 +272,7 @@ public void TestIncBackupMergeRestore() throws Exception { request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); String backupIdIncMultiple2 = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple2)); - // #4 Merge backup images with failures + // #4 Merge backup images with failures for (FailurePhase phase : FailurePhase.values()) { Configuration conf = conn.getConfiguration(); @@ -287,14 +285,14 @@ public void TestIncBackupMergeRestore() throws Exception { Assert.fail("Expected IOException"); } catch (IOException e) { BackupSystemTable table = new BackupSystemTable(conn); - if(phase.ordinal() < FailurePhase.PHASE4.ordinal()) { + if (phase.ordinal() < FailurePhase.PHASE4.ordinal()) { // No need to repair: // Both Merge and backup exclusive operations are finished assertFalse(table.isMergeInProgress()); try { table.finishBackupExclusiveOperation(); Assert.fail("IOException is expected"); - } catch(IOException ee) { + } catch (IOException ee) { // Expected } } else { @@ -303,14 +301,14 @@ public void TestIncBackupMergeRestore() throws Exception { try { table.startBackupExclusiveOperation(); Assert.fail("IOException is expected"); - } catch(IOException ee) { + } catch (IOException ee) { // Expected - clean up before proceeding - //table.finishMergeOperation(); - //table.finishBackupExclusiveOperation(); + // table.finishMergeOperation(); + // table.finishBackupExclusiveOperation(); } } table.close(); - LOG.debug("Expected :"+ e.getMessage()); + LOG.debug("Expected :" + e.getMessage()); } } // Now merge w/o failures diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java index 60aa635045a7..cbfaf2ad88a0 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,12 +45,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * 1. Create table t1 - * 2. Load data to t1 - * 3 Full backup t1 - * 4 Load data to t1 - * 5 bulk load into t1 - * 6 Incremental backup t1 + * 1. Create table t1 2. Load data to t1 3 Full backup t1 4 Load data to t1 5 bulk load into t1 6 + * Incremental backup t1 */ @Category(LargeTests.class) public class TestIncrementalBackupWithBulkLoad extends TestBackupBase { @@ -92,11 +88,11 @@ public void TestIncBackupDeleteTable() throws Exception { int NB_ROWS2 = 20; LOG.debug("bulk loading into " + testName); - int actual = TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, - qualName, false, null, new byte[][][] { - new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, - }, true, false, true, NB_ROWS_IN_BATCH*2, NB_ROWS2); + int actual = TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, qualName, + false, null, + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, }, + true, false, true, NB_ROWS_IN_BATCH * 2, NB_ROWS2); // #3 - incremental backup for table1 tables = Lists.newArrayList(table1); @@ -105,10 +101,10 @@ public void TestIncBackupDeleteTable() throws Exception { assertTrue(checkSucceeded(backupIdIncMultiple)); // #4 bulk load again LOG.debug("bulk loading into " + testName); - int actual1 = TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, - qualName, false, null, + int actual1 = TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, qualName, + false, null, new byte[][][] { new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("qqq") }, - new byte[][] { Bytes.toBytes("rrr"), Bytes.toBytes("sss") }, }, + new byte[][] { Bytes.toBytes("rrr"), Bytes.toBytes("sss") }, }, true, false, true, NB_ROWS_IN_BATCH * 2 + actual, NB_ROWS2); // #5 - incremental backup for table1 @@ -123,9 +119,9 @@ public void TestIncBackupDeleteTable() throws Exception { // #6 - restore incremental backup for table1 TableName[] tablesRestoreIncMultiple = new TableName[] { table1 }; - //TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple1, - false, tablesRestoreIncMultiple, tablesRestoreIncMultiple, true)); + // TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple1, false, + tablesRestoreIncMultiple, tablesRestoreIncMultiple, true)); Table hTable = conn.getTable(table1); Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1); @@ -133,10 +129,10 @@ public void TestIncBackupDeleteTable() throws Exception { backupIdFull = client.backupTables(request); try (final BackupSystemTable table = new BackupSystemTable(conn)) { - Pair>>>>, List> pair - = table.readBulkloadRows(tables); + Pair>>>>, List> pair = + table.readBulkloadRows(tables); assertTrue("map still has " + pair.getSecond().size() + " entries", - pair.getSecond().isEmpty()); + pair.getSecond().isEmpty()); } assertTrue(checkSucceeded(backupIdFull)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java index 00b13ba8dbf8..864601663e9e 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java @@ -84,7 +84,7 @@ public void testIncBackupRestore() throws Exception { List tables = Lists.newArrayList(table1, table2); final byte[] fam3Name = Bytes.toBytes("f3"); TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)).build(); TEST_UTIL.getAdmin().modifyTable(newTable1Desc); Connection conn = ConnectionFactory.createConnection(conf1); @@ -127,11 +127,10 @@ public void testIncBackupRestore() throws Exception { } - private void incrementalBackupWithFailures() throws Exception { conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS, IncrementalTableBackupClientForTest.class.getName()); - int maxStage = Stage.values().length -1; + int maxStage = Stage.values().length - 1; // Fail stages between 0 and 4 inclusive for (int stage = 0; stage <= maxStage; stage++) { LOG.info("Running stage " + stage); @@ -144,18 +143,17 @@ private void runBackupAndFailAtStage(int stage) throws Exception { conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); - String[] args = - new String[] { "create", "incremental", BACKUP_ROOT_DIR, "-t", - table1.getNameAsString() + "," + table2.getNameAsString() }; + String[] args = new String[] { "create", "incremental", BACKUP_ROOT_DIR, "-t", + table1.getNameAsString() + "," + table2.getNameAsString() }; // Run backup int ret = ToolRunner.run(conf1, new BackupDriver(), args); assertFalse(ret == 0); List backups = table.getBackupHistory(); int after = table.getBackupHistory().size(); - assertTrue(after == before +1); + assertTrue(after == before + 1); for (BackupInfo data : backups) { - if(data.getType() == BackupType.FULL) { + if (data.getType() == BackupType.FULL) { assertTrue(data.getState() == BackupState.COMPLETE); } else { assertTrue(data.getState() == BackupState.FAILED); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java index 4150d3fd2fc5..ec25ea5df10c 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -59,7 +59,6 @@ public class TestRemoteBackup extends TestBackupBase { /** * Setup Cluster with appropriate configurations before running tests. - * * @throws Exception if starting the mini cluster or setting up the tables fails */ @BeforeClass @@ -73,7 +72,6 @@ public static void setUp() throws Exception { /** * Verify that a remote full backup is created on a single table with data correctly. - * * @throws Exception if an operation on the table fails */ @Test @@ -106,10 +104,10 @@ public void testFullBackupRemote() throws Exception { t.start(); // family 2 is MOB enabled TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2Name).setMobEnabled(true) - .setMobThreshold(0L).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2Name).setMobEnabled(true) + .setMobThreshold(0L).build()) + .build(); TEST_UTIL.getAdmin().modifyTable(newTable1Desc); SnapshotTestingUtils.loadData(TEST_UTIL, table1, 50, fam2Name); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java index 8dd4f7924703..7dcc905d36f2 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,6 @@ public class TestRemoteRestore extends TestBackupBase { /** * Setup Cluster with appropriate configurations before running tests. - * * @throws Exception if starting the mini cluster or setting up the tables fails */ @BeforeClass @@ -56,7 +55,6 @@ public static void setUp() throws Exception { /** * Verify that a remote restore on a single table is successful. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -67,9 +65,8 @@ public void testFullRestoreRemote() throws Exception { LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - getBackupAdmin().restore( - BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tableset, - tablemap, false)); + getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, + false, tableset, tablemap, false)); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java index 62a1f8f294cf..1b847732aedf 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -83,7 +83,7 @@ public void testRepairBackupDelete() throws Exception { table.startDeleteOperation(backupIds); // Now run repair command to repair "failed" delete operation - String[] args = new String[] {"repair"}; + String[] args = new String[] { "repair" }; // Run restore int ret = ToolRunner.run(conf1, new BackupDriver(), args); assertTrue(ret == 0); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java index a6808cd69dc3..d041957475eb 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,6 @@ public class TestRestoreBoundaryTests extends TestBackupBase { /** * Verify that a single empty table is restored to a new table. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -52,9 +51,8 @@ public void testFullRestoreSingleEmpty() throws Exception { LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - getBackupAdmin().restore( - BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, - false)); + getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + tableset, tablemap, false)); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); @@ -62,7 +60,6 @@ public void testFullRestoreSingleEmpty() throws Exception { /** * Verify that multiple tables are restored to new tables. - * * @throws Exception if doing the backup or an operation on the tables fails */ @Test @@ -73,9 +70,8 @@ public void testFullRestoreMultipleEmpty() throws Exception { String backupId = fullTableBackup(tables); TableName[] restore_tableset = new TableName[] { table2, table3 }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - getBackupAdmin().restore( - BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, - tablemap, false)); + getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + restore_tableset, tablemap, false)); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table2_restore)); assertTrue(hba.tableExists(table3_restore)); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java index bd295122a289..2e30118a9d72 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,10 +38,9 @@ public class TestSystemTableSnapshot extends TestBackupBase { /** * Verify backup system table snapshot. - * * @throws Exception if an operation on the table fails */ - // @Test + // @Test public void _testBackupRestoreSystemTable() throws Exception { LOG.info("test snapshot system table"); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java index 5363b1a44b4f..d4196c2e50b9 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.util.HashMap; import java.util.List; import java.util.Map; @@ -40,6 +41,7 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -120,8 +122,8 @@ public void testBackupLogCleaner() throws Exception { // #3 - incremental backup for multiple tables List tableSetIncList = Lists.newArrayList(table1, table2, table3); - String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, - BACKUP_ROOT_DIR); + String backupIdIncMultiple = + backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR); assertTrue(checkSucceeded(backupIdIncMultiple)); deletable = cleaner.getDeletableFiles(newWalFiles); diff --git a/hbase-balancer/pom.xml b/hbase-balancer/pom.xml index 9a0250bb9b89..e8d904338572 100644 --- a/hbase-balancer/pom.xml +++ b/hbase-balancer/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,35 +30,6 @@ hbase-balancer Apache HBase - Balancer HBase Balancer Support - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - @@ -152,13 +122,44 @@ test + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -184,8 +185,7 @@ lifecycle-mapping - - + diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java index df30ebbec0a5..012dd5fdf230 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.favored; import static org.apache.hadoop.hbase.ServerName.NON_STARTCODE; @@ -79,7 +77,7 @@ public class FavoredNodeAssignmentHelper { // region server entries might not match with that is in servers. private Map regionServerToRackMap; private List servers; - public static final byte [] FAVOREDNODES_QUALIFIER = Bytes.toBytes("fn"); + public static final byte[] FAVOREDNODES_QUALIFIER = Bytes.toBytes("fn"); public final static short FAVORED_NODES_NUM = 3; public final static short MAX_ATTEMPTS_FN_GENERATION = 10; @@ -124,8 +122,8 @@ public void initialize() { * @param connection connection to be used */ public static void updateMetaWithFavoredNodesInfo( - Map> regionToFavoredNodes, Connection connection) - throws IOException { + Map> regionToFavoredNodes, Connection connection) + throws IOException { List puts = new ArrayList<>(); for (Map.Entry> entry : regionToFavoredNodes.entrySet()) { Put put = makePut(entry.getKey(), entry.getValue()); @@ -143,7 +141,8 @@ public static void updateMetaWithFavoredNodesInfo( * Update meta table with favored nodes info */ public static void updateMetaWithFavoredNodesInfo( - Map> regionToFavoredNodes, Configuration conf) throws IOException { + Map> regionToFavoredNodes, Configuration conf) + throws IOException { // Write the region assignments to the meta table. // TODO: See above overrides take a Connection rather than a Configuration only the // Connection is a short circuit connection. That is not going to good in all cases, when @@ -155,7 +154,7 @@ public static void updateMetaWithFavoredNodesInfo( } private static Put makePut(RegionInfo regionInfo, List favoredNodeList) - throws IOException { + throws IOException { if (CollectionUtils.isEmpty(favoredNodeList)) { return null; } @@ -163,8 +162,8 @@ private static Put makePut(RegionInfo regionInfo, List favoredNodeLi Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time); byte[] favoredNodes = getFavoredNodes(favoredNodeList); put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY).setQualifier(FAVOREDNODES_QUALIFIER).setTimestamp(time) - .setType(Cell.Type.Put).setValue(favoredNodes).build()); + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(FAVOREDNODES_QUALIFIER) + .setTimestamp(time).setType(Cell.Type.Put).setValue(favoredNodes).build()); LOG.debug("Create the region {} with favored nodes {}", regionInfo.getRegionNameAsString(), favoredNodeList); return put; @@ -217,7 +216,7 @@ public void placePrimaryRSAsRoundRobin(Map> assignm rackList.addAll(rackToRegionServerMap.keySet()); int rackIndex = ThreadLocalRandom.current().nextInt(rackList.size()); int maxRackSize = 0; - for (Map.Entry> r : rackToRegionServerMap.entrySet()) { + for (Map.Entry> r : rackToRegionServerMap.entrySet()) { if (r.getValue().size() > maxRackSize) { maxRackSize = r.getValue().size(); } @@ -234,7 +233,7 @@ public void placePrimaryRSAsRoundRobin(Map> assignm // Get the server list for the current rack currentServerList = rackToRegionServerMap.get(rackName); - if (serverIndex >= currentServerList.size()) { //not enough machines in this rack + if (serverIndex >= currentServerList.size()) { // not enough machines in this rack if (numIterations % rackList.size() == 0) { if (++serverIndex >= maxRackSize) serverIndex = 0; } @@ -268,8 +267,8 @@ public void placePrimaryRSAsRoundRobin(Map> assignm } } - public Map placeSecondaryAndTertiaryRS( - Map primaryRSMap) { + public Map + placeSecondaryAndTertiaryRS(Map primaryRSMap) { Map secondaryAndTertiaryMap = new HashMap<>(); for (Map.Entry entry : primaryRSMap.entrySet()) { // Get the target region and its primary region server rack @@ -284,8 +283,9 @@ public Map placeSecondaryAndTertiaryRS( + regionInfo.getRegionNameAsString()); } } catch (Exception e) { - LOG.warn("Cannot place the favored nodes for region " + - regionInfo.getRegionNameAsString() + " because " + e, e); + LOG.warn("Cannot place the favored nodes for region " + regionInfo.getRegionNameAsString() + + " because " + e, + e); continue; } } @@ -306,8 +306,8 @@ public ServerName[] getSecondaryAndTertiary(RegionInfo regionInfo, ServerName pr return favoredNodes; } - private Map> mapRSToPrimaries( - Map primaryRSMap) { + private Map> + mapRSToPrimaries(Map primaryRSMap) { Map> primaryServerMap = new HashMap<>(); for (Entry e : primaryRSMap.entrySet()) { Set currentSet = primaryServerMap.get(e.getValue()); @@ -321,16 +321,14 @@ private Map> mapRSToPrimaries( } /** - * For regions that share the primary, avoid placing the secondary and tertiary - * on a same RS. Used for generating new assignments for the - * primary/secondary/tertiary RegionServers + * For regions that share the primary, avoid placing the secondary and tertiary on a same RS. Used + * for generating new assignments for the primary/secondary/tertiary RegionServers * @param primaryRSMap * @return the map of regions to the servers the region-files should be hosted on */ - public Map placeSecondaryAndTertiaryWithRestrictions( - Map primaryRSMap) { - Map> serverToPrimaries = - mapRSToPrimaries(primaryRSMap); + public Map + placeSecondaryAndTertiaryWithRestrictions(Map primaryRSMap) { + Map> serverToPrimaries = mapRSToPrimaries(primaryRSMap); Map secondaryAndTertiaryMap = new HashMap<>(); for (Entry entry : primaryRSMap.entrySet()) { @@ -346,8 +344,8 @@ public Map placeSecondaryAndTertiaryWithRestrictions( // from the same rack favoredNodes = singleRackCase(regionInfo, primaryRS, primaryRack); } else { - favoredNodes = multiRackCaseWithRestrictions(serverToPrimaries, - secondaryAndTertiaryMap, primaryRack, primaryRS, regionInfo); + favoredNodes = multiRackCaseWithRestrictions(serverToPrimaries, secondaryAndTertiaryMap, + primaryRack, primaryRS, regionInfo); } if (favoredNodes != null) { secondaryAndTertiaryMap.put(regionInfo, favoredNodes); @@ -355,8 +353,9 @@ public Map placeSecondaryAndTertiaryWithRestrictions( + regionInfo.getRegionNameAsString()); } } catch (Exception e) { - LOG.warn("Cannot place the favored nodes for region " - + regionInfo.getRegionNameAsString() + " because " + e, e); + LOG.warn("Cannot place the favored nodes for region " + regionInfo.getRegionNameAsString() + + " because " + e, + e); continue; } } @@ -365,8 +364,8 @@ public Map placeSecondaryAndTertiaryWithRestrictions( private ServerName[] multiRackCaseWithRestrictions( Map> serverToPrimaries, - Map secondaryAndTertiaryMap, - String primaryRack, ServerName primaryRS, RegionInfo regionInfo) throws IOException { + Map secondaryAndTertiaryMap, String primaryRack, + ServerName primaryRS, RegionInfo regionInfo) throws IOException { // Random to choose the secondary and tertiary region server // from another rack to place the secondary and tertiary // Random to choose one rack except for the current rack @@ -398,8 +397,7 @@ private ServerName[] multiRackCaseWithRestrictions( } } } - if (skipServerSet.size() + 2 <= serverSet.size()) - break; + if (skipServerSet.size() + 2 <= serverSet.size()) break; skipServerSet.clear(); rackSkipSet.add(secondaryRack); // we used all racks @@ -421,8 +419,7 @@ private ServerName[] multiRackCaseWithRestrictions( ServerName tertiaryRS = getOneRandomServer(secondaryRack, skipServerSet); if (secondaryRS == null || tertiaryRS == null) { - LOG.error("Cannot place the secondary and tertiary" - + " region server for region " + LOG.error("Cannot place the secondary and tertiary" + " region server for region " + regionInfo.getRegionNameAsString()); } // Create the secondary and tertiary pair @@ -452,8 +449,7 @@ private ServerName[] multiRackCaseWithRestrictions( return favoredNodes; } - private ServerName[] singleRackCase(RegionInfo regionInfo, - ServerName primaryRS, + private ServerName[] singleRackCase(RegionInfo regionInfo, ServerName primaryRS, String primaryRack) throws IOException { // Single rack case: have to pick the secondary and tertiary // from the same rack @@ -465,35 +461,33 @@ private ServerName[] singleRackCase(RegionInfo regionInfo, } else { // Randomly select two region servers from the server list and make sure // they are not overlap with the primary region server; - Set serverSkipSet = new HashSet<>(); - serverSkipSet.add(primaryRS); + Set serverSkipSet = new HashSet<>(); + serverSkipSet.add(primaryRS); - // Place the secondary RS - ServerName secondaryRS = getOneRandomServer(primaryRack, serverSkipSet); - // Skip the secondary for the tertiary placement - serverSkipSet.add(secondaryRS); - ServerName tertiaryRS = getOneRandomServer(primaryRack, serverSkipSet); + // Place the secondary RS + ServerName secondaryRS = getOneRandomServer(primaryRack, serverSkipSet); + // Skip the secondary for the tertiary placement + serverSkipSet.add(secondaryRS); + ServerName tertiaryRS = getOneRandomServer(primaryRack, serverSkipSet); - if (secondaryRS == null || tertiaryRS == null) { - LOG.error("Cannot place the secondary, tertiary favored node for region " + - regionInfo.getRegionNameAsString()); - } - // Create the secondary and tertiary pair - ServerName[] favoredNodes = new ServerName[2]; - favoredNodes[0] = secondaryRS; - favoredNodes[1] = tertiaryRS; - return favoredNodes; + if (secondaryRS == null || tertiaryRS == null) { + LOG.error("Cannot place the secondary, tertiary favored node for region " + + regionInfo.getRegionNameAsString()); + } + // Create the secondary and tertiary pair + ServerName[] favoredNodes = new ServerName[2]; + favoredNodes[0] = secondaryRS; + favoredNodes[1] = tertiaryRS; + return favoredNodes; } } /** - * Place secondary and tertiary nodes in a multi rack case. - * If there are only two racks, then we try the place the secondary - * and tertiary on different rack than primary. But if the other rack has - * only one region server, then we place primary and tertiary on one rack - * and secondary on another. The aim is two distribute the three favored nodes - * on >= 2 racks. - * TODO: see how we can use generateMissingFavoredNodeMultiRack API here + * Place secondary and tertiary nodes in a multi rack case. If there are only two racks, then we + * try the place the secondary and tertiary on different rack than primary. But if the other rack + * has only one region server, then we place primary and tertiary on one rack and secondary on + * another. The aim is two distribute the three favored nodes on >= 2 racks. TODO: see how we can + * use generateMissingFavoredNodeMultiRack API here * @param regionInfo Region for which we are trying to generate FN * @param primaryRS The primary favored node. * @param primaryRack The rack of the primary favored node. @@ -503,7 +497,7 @@ private ServerName[] singleRackCase(RegionInfo regionInfo, private ServerName[] multiRackCase(RegionInfo regionInfo, ServerName primaryRS, String primaryRack) throws IOException { - ListfavoredNodes = Lists.newArrayList(primaryRS); + List favoredNodes = Lists.newArrayList(primaryRS); // Create the secondary and tertiary pair ServerName secondaryRS = generateMissingFavoredNodeMultiRack(favoredNodes); favoredNodes.add(secondaryRS); @@ -523,7 +517,7 @@ private ServerName[] multiRackCase(RegionInfo regionInfo, ServerName primaryRS, tertiaryRS = generateMissingFavoredNode(Lists.newArrayList(primaryRS, secondaryRS)); } } - return new ServerName[]{ secondaryRS, tertiaryRS }; + return new ServerName[] { secondaryRS, tertiaryRS }; } public boolean canPlaceFavoredNodes() { @@ -540,15 +534,13 @@ private List getServersFromRack(String rack) { /** * Gets a random server from the specified rack and skips anything specified. - * @param rack rack from a server is needed * @param skipServerSet the server shouldn't belong to this set */ protected ServerName getOneRandomServer(String rack, Set skipServerSet) { // Is the rack valid? Do we recognize it? - if (rack == null || getServersFromRack(rack) == null || - getServersFromRack(rack).isEmpty()) { + if (rack == null || getServersFromRack(rack) == null || getServersFromRack(rack).isEmpty()) { return null; } @@ -615,12 +607,12 @@ public static String getFavoredNodesAsString(List nodes) { } /* - * Generates a missing favored node based on the input favored nodes. This helps to generate - * new FN when there is already 2 FN and we need a third one. For eg, while generating new FN - * for split daughters after inheriting 2 FN from the parent. If the cluster has only one rack - * it generates from the same rack. If the cluster has multiple racks, then it ensures the new - * FN respects the rack constraints similar to HDFS. For eg: if there are 3 FN, they will be - * spread across 2 racks. + * Generates a missing favored node based on the input favored nodes. This helps to generate new + * FN when there is already 2 FN and we need a third one. For eg, while generating new FN for + * split daughters after inheriting 2 FN from the parent. If the cluster has only one rack it + * generates from the same rack. If the cluster has multiple racks, then it ensures the new FN + * respects the rack constraints similar to HDFS. For eg: if there are 3 FN, they will be spread + * across 2 racks. */ public ServerName generateMissingFavoredNode(List favoredNodes) throws IOException { if (this.uniqueRackList.size() == 1) { @@ -662,12 +654,11 @@ private ServerName generateMissingFavoredNodeMultiRack(List favoredN } /* - * Generates a missing FN based on the input favoredNodes and also the nodes to be skipped. - * - * Get the current layout of favored nodes arrangement and nodes to be excluded and get a - * random node that goes with HDFS block placement. Eg: If the existing nodes are on one rack, - * generate one from another rack. We exclude as much as possible so the random selection - * has more chance to generate a node within a few iterations, ideally 1. + * Generates a missing FN based on the input favoredNodes and also the nodes to be skipped. Get + * the current layout of favored nodes arrangement and nodes to be excluded and get a random node + * that goes with HDFS block placement. Eg: If the existing nodes are on one rack, generate one + * from another rack. We exclude as much as possible so the random selection has more chance to + * generate a node within a few iterations, ideally 1. */ private ServerName generateMissingFavoredNodeMultiRack(List favoredNodes, List excludeNodes) throws IOException { @@ -692,8 +683,8 @@ private ServerName generateMissingFavoredNodeMultiRack(List favoredN Set skipRackSet = Sets.newHashSet(); /* - * If both the FN are from the same rack, then we don't want to generate another FN on the - * same rack. If that rack fails, the region would be unavailable. + * If both the FN are from the same rack, then we don't want to generate another FN on the same + * rack. If that rack fails, the region would be unavailable. */ if (racks.size() == 1 && favoredNodes.size() > 1) { skipRackSet.add(racks.iterator().next()); @@ -704,8 +695,8 @@ private ServerName generateMissingFavoredNodeMultiRack(List favoredN * reduce the number of iterations for FN selection. */ for (String rack : racks) { - if (getServersFromRack(rack) != null && - rackToFNMapping.get(rack).size() == getServersFromRack(rack).size()) { + if (getServersFromRack(rack) != null + && rackToFNMapping.get(rack).size() == getServersFromRack(rack).size()) { skipRackSet.add(rack); } } @@ -730,9 +721,10 @@ private ServerName generateMissingFavoredNodeMultiRack(List favoredN if (newServer == null) { if (LOG.isTraceEnabled()) { - LOG.trace(String.format("Unable to generate additional favored nodes for %s after " - + "considering racks %s and skip rack %s with a unique rack list of %s and rack " - + "to RS map of %s and RS to rack map of %s", + LOG.trace(String.format( + "Unable to generate additional favored nodes for %s after " + + "considering racks %s and skip rack %s with a unique rack list of %s and rack " + + "to RS map of %s and RS to rack map of %s", StringUtils.join(favoredNodes, ","), randomRacks, skipRackSet, uniqueRackList, rackToRegionServerMap, regionServerToRackMap)); } @@ -743,10 +735,8 @@ private ServerName generateMissingFavoredNodeMultiRack(List favoredN } /* - * Generate favored nodes for a region. - * - * Choose a random server as primary and then choose secondary and tertiary FN so its spread - * across two racks. + * Generate favored nodes for a region. Choose a random server as primary and then choose + * secondary and tertiary FN so its spread across two racks. */ public List generateFavoredNodes(RegionInfo hri) throws IOException { @@ -790,27 +780,25 @@ public Map> generateFavoredNodesRoundRobin( /* * Generate favored nodes for a set of regions when we know where they are currently hosted. */ - private Map> generateFavoredNodes( - Map primaryRSMap) { + private Map> + generateFavoredNodes(Map primaryRSMap) { Map> generatedFavNodes = new HashMap<>(); Map secondaryAndTertiaryRSMap = - placeSecondaryAndTertiaryRS(primaryRSMap); + placeSecondaryAndTertiaryRS(primaryRSMap); for (Entry entry : primaryRSMap.entrySet()) { List favoredNodesForRegion = new ArrayList<>(FAVORED_NODES_NUM); RegionInfo region = entry.getKey(); ServerName primarySN = entry.getValue(); - favoredNodesForRegion.add(ServerName.valueOf(primarySN.getHostname(), primarySN.getPort(), - NON_STARTCODE)); + favoredNodesForRegion + .add(ServerName.valueOf(primarySN.getHostname(), primarySN.getPort(), NON_STARTCODE)); ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region); if (secondaryAndTertiaryNodes != null) { - favoredNodesForRegion.add(ServerName.valueOf( - secondaryAndTertiaryNodes[0].getHostname(), secondaryAndTertiaryNodes[0].getPort(), - NON_STARTCODE)); - favoredNodesForRegion.add(ServerName.valueOf( - secondaryAndTertiaryNodes[1].getHostname(), secondaryAndTertiaryNodes[1].getPort(), - NON_STARTCODE)); + favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), + secondaryAndTertiaryNodes[0].getPort(), NON_STARTCODE)); + favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), + secondaryAndTertiaryNodes[1].getPort(), NON_STARTCODE)); } generatedFavNodes.put(region, favoredNodesForRegion); } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java index d67e7b4066a5..154c3f0823e3 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.favored; import static org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position.PRIMARY; @@ -48,18 +47,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that - * assigns favored nodes for each region. There is a Primary RegionServer that hosts - * the region, and then there is Secondary and Tertiary RegionServers. Currently, the - * favored nodes information is used in creating HDFS files - the Primary RegionServer - * passes the primary, secondary, tertiary node addresses as hints to the - * DistributedFileSystem API for creating files on the filesystem. These nodes are - * treated as hints by the HDFS to place the blocks of the file. This alleviates the - * problem to do with reading from remote nodes (since we can make the Secondary - * RegionServer as the new Primary RegionServer) after a region is recovered. This - * should help provide consistent read latencies for the regions even when their - * primary region servers die. - * + * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that assigns favored + * nodes for each region. There is a Primary RegionServer that hosts the region, and then there is + * Secondary and Tertiary RegionServers. Currently, the favored nodes information is used in + * creating HDFS files - the Primary RegionServer passes the primary, secondary, tertiary node + * addresses as hints to the DistributedFileSystem API for creating files on the filesystem. These + * nodes are treated as hints by the HDFS to place the blocks of the file. This alleviates the + * problem to do with reading from remote nodes (since we can make the Secondary RegionServer as the + * new Primary RegionServer) after a region is recovered. This should help provide consistent read + * latencies for the regions even when their primary region servers die. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements FavoredNodesPromoter { @@ -108,9 +104,9 @@ protected List balanceTable(TableName tableName, // the region is currently on none of the favored nodes // get it on one of them if possible ServerMetrics l1 = - provider.getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(1))); + provider.getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(1))); ServerMetrics l2 = - provider.getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(2))); + provider.getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(2))); if (l1 != null && l2 != null) { if (l1.getRegionMetrics().size() > l2.getRegionMetrics().size()) { destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2)); @@ -147,31 +143,31 @@ public Map> roundRobinAssignment(List r } // Segregate the regions into two types: // 1. The regions that have favored node assignment, and where at least - // one of the favored node is still alive. In this case, try to adhere - // to the current favored nodes assignment as much as possible - i.e., - // if the current primary is gone, then make the secondary or tertiary - // as the new host for the region (based on their current load). - // Note that we don't change the favored - // node assignments here (even though one or more favored node is currently - // down). It is up to the balanceCluster to do this hard work. The HDFS - // can handle the fact that some nodes in the favored nodes hint is down - // It'd allocate some other DNs. In combination with stale settings for HDFS, - // we should be just fine. + // one of the favored node is still alive. In this case, try to adhere + // to the current favored nodes assignment as much as possible - i.e., + // if the current primary is gone, then make the secondary or tertiary + // as the new host for the region (based on their current load). + // Note that we don't change the favored + // node assignments here (even though one or more favored node is currently + // down). It is up to the balanceCluster to do this hard work. The HDFS + // can handle the fact that some nodes in the favored nodes hint is down + // It'd allocate some other DNs. In combination with stale settings for HDFS, + // we should be just fine. // 2. The regions that currently don't have favored node assignment. We will - // need to come up with favored nodes assignments for them. The corner case - // in (1) above is that all the nodes are unavailable and in that case, we - // will note that this region doesn't have favored nodes. - Pair>, List> segregatedRegions = + // need to come up with favored nodes assignments for them. The corner case + // in (1) above is that all the nodes are unavailable and in that case, we + // will note that this region doesn't have favored nodes. + Pair>, List> segregatedRegions = segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers); - Map> regionsWithFavoredNodesMap = segregatedRegions.getFirst(); + Map> regionsWithFavoredNodesMap = segregatedRegions.getFirst(); List regionsWithNoFavoredNodes = segregatedRegions.getSecond(); assignmentMap = new HashMap<>(); roundRobinAssignmentImpl(assignmentHelper, assignmentMap, regionsWithNoFavoredNodes); // merge the assignment maps assignmentMap.putAll(regionsWithFavoredNodesMap); } catch (Exception ex) { - LOG.warn("Encountered exception while doing favored-nodes assignment " + ex + - " Falling back to regular assignment"); + LOG.warn("Encountered exception while doing favored-nodes assignment " + ex + + " Falling back to regular assignment"); assignmentMap = super.roundRobinAssignment(regions, servers); } return assignmentMap; @@ -208,17 +204,17 @@ public ServerName randomAssignment(RegionInfo regionInfo, List serve assignSecondaryAndTertiaryNodesForRegion(assignmentHelper, regions, primaryRSMap); return primary; } catch (Exception ex) { - LOG.warn("Encountered exception while doing favored-nodes (random)assignment " + ex + - " Falling back to regular assignment"); + LOG.warn("Encountered exception while doing favored-nodes (random)assignment " + ex + + " Falling back to regular assignment"); return super.randomAssignment(regionInfo, servers); } } private Pair>, List> - segregateRegionsAndAssignRegionsWithFavoredNodes(List regions, - List availableServers) { + segregateRegionsAndAssignRegionsWithFavoredNodes(List regions, + List availableServers) { Map> assignmentMapForFavoredNodes = - new HashMap<>(regions.size() / 2); + new HashMap<>(regions.size() / 2); List regionsWithNoFavoredNodes = new ArrayList<>(regions.size() / 2); for (RegionInfo region : regions) { List favoredNodes = fnm.getFavoredNodes(region); @@ -240,11 +236,11 @@ public ServerName randomAssignment(RegionInfo regionInfo, List serve } } } - assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region, - primaryHost, secondaryHost, tertiaryHost); + assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region, primaryHost, + secondaryHost, tertiaryHost); } if (primaryHost == null && secondaryHost == null && tertiaryHost == null) { - //all favored nodes unavailable + // all favored nodes unavailable regionsWithNoFavoredNodes.add(region); } } @@ -263,9 +259,9 @@ private ServerName availableServersContains(List servers, ServerName return null; } - private void assignRegionToAvailableFavoredNode(Map> assignmentMapForFavoredNodes, RegionInfo region, ServerName primaryHost, - ServerName secondaryHost, ServerName tertiaryHost) { + private void assignRegionToAvailableFavoredNode( + Map> assignmentMapForFavoredNodes, RegionInfo region, + ServerName primaryHost, ServerName secondaryHost, ServerName tertiaryHost) { if (primaryHost != null) { addRegionToMap(assignmentMapForFavoredNodes, region, primaryHost); } else if (secondaryHost != null && tertiaryHost != null) { @@ -288,7 +284,7 @@ private void assignRegionToAvailableFavoredNode(Map> assignmentMapForFavoredNodes, - RegionInfo region, ServerName host) { + RegionInfo region, ServerName host) { List regionsOnServer = assignmentMapForFavoredNodes.get(host); if (regionsOnServer == null) { regionsOnServer = new ArrayList<>(); @@ -303,7 +299,8 @@ public List getFavoredNodes(RegionInfo regionInfo) { } private void roundRobinAssignmentImpl(FavoredNodeAssignmentHelper assignmentHelper, - Map> assignmentMap, List regions) throws IOException { + Map> assignmentMap, List regions) + throws IOException { Map primaryRSMap = new HashMap<>(); // figure the primary RSs assignmentHelper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); @@ -311,8 +308,8 @@ private void roundRobinAssignmentImpl(FavoredNodeAssignmentHelper assignmentHelp } private void assignSecondaryAndTertiaryNodesForRegion( - FavoredNodeAssignmentHelper assignmentHelper, - List regions, Map primaryRSMap) throws IOException { + FavoredNodeAssignmentHelper assignmentHelper, List regions, + Map primaryRSMap) throws IOException { // figure the secondary and tertiary RSs Map secondaryAndTertiaryRSMap = assignmentHelper.placeSecondaryAndTertiaryRS(primaryRSMap); @@ -324,14 +321,14 @@ private void assignSecondaryAndTertiaryNodesForRegion( // We don't care about the startcode; but only the hostname really List favoredNodesForRegion = new ArrayList<>(3); ServerName sn = primaryRSMap.get(region); - favoredNodesForRegion.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), - ServerName.NON_STARTCODE)); + favoredNodesForRegion + .add(ServerName.valueOf(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE)); ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region); if (secondaryAndTertiaryNodes != null) { favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), - secondaryAndTertiaryNodes[0].getPort(), ServerName.NON_STARTCODE)); + secondaryAndTertiaryNodes[0].getPort(), ServerName.NON_STARTCODE)); favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), - secondaryAndTertiaryNodes[1].getPort(), ServerName.NON_STARTCODE)); + secondaryAndTertiaryNodes[1].getPort(), ServerName.NON_STARTCODE)); } regionFNMap.put(region, favoredNodesForRegion); } @@ -339,14 +336,12 @@ private void assignSecondaryAndTertiaryNodesForRegion( } /* - * Generate Favored Nodes for daughters during region split. - * - * If the parent does not have FN, regenerates them for the daughters. - * - * If the parent has FN, inherit two FN from parent for each daughter and generate the remaining. - * The primary FN for both the daughters should be the same as parent. Inherit the secondary - * FN from the parent but keep it different for each daughter. Choose the remaining FN - * randomly. This would give us better distribution over a period of time after enough splits. + * Generate Favored Nodes for daughters during region split. If the parent does not have FN, + * regenerates them for the daughters. If the parent has FN, inherit two FN from parent for each + * daughter and generate the remaining. The primary FN for both the daughters should be the same + * as parent. Inherit the secondary FN from the parent but keep it different for each daughter. + * Choose the remaining FN randomly. This would give us better distribution over a period of time + * after enough splits. */ @Override public void generateFavoredNodesForDaughter(List servers, RegionInfo parent, @@ -400,11 +395,11 @@ private Set getInheritedFNForDaughter(FavoredNodeAssignmentHelper he } /* - * Generate favored nodes for a region during merge. Choose the FN from one of the sources to - * keep it simple. + * Generate favored nodes for a region during merge. Choose the FN from one of the sources to keep + * it simple. */ @Override - public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo [] mergeParents) + public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo[] mergeParents) throws IOException { Map> regionFNMap = Maps.newHashMap(); regionFNMap.put(merged, getFavoredNodes(mergeParents[0])); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java index d56e9e6b0404..97e9faba6a58 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesManager.java @@ -77,14 +77,14 @@ public FavoredNodesManager(ClusterInfoProvider provider) { public void initializeFromMeta() throws IOException { SnapshotOfRegionAssignmentFromMeta snapshot = - new SnapshotOfRegionAssignmentFromMeta(provider.getConnection()); + new SnapshotOfRegionAssignmentFromMeta(provider.getConnection()); snapshot.initialize(); // Add snapshot to structures made on creation. Current structures may have picked // up data between construction and the scan of meta needed before this method // is called. See HBASE-23737 "[Flakey Tests] TestFavoredNodeTableImport fails 30% of the time" synchronized (this) { this.globalFavoredNodesAssignmentPlan - .updateFavoredNodesMap(snapshot.getExistingAssignmentPlan()); + .updateFavoredNodesMap(snapshot.getExistingAssignmentPlan()); primaryRSToRegionMap.putAll(snapshot.getPrimaryToRegionInfoMap()); secondaryRSToRegionMap.putAll(snapshot.getSecondaryToRegionInfoMap()); teritiaryRSToRegionMap.putAll(snapshot.getTertiaryToRegionInfoMap()); @@ -97,8 +97,8 @@ public synchronized List getFavoredNodes(RegionInfo regionInfo) { } /** - * Favored nodes are not applicable for system tables. We will use this to check before - * we apply any favored nodes logic on a region. + * Favored nodes are not applicable for system tables. We will use this to check before we apply + * any favored nodes logic on a region. */ public static boolean isFavoredNodeApplicable(RegionInfo regionInfo) { return !regionInfo.getTable().isSystemTable(); @@ -113,9 +113,9 @@ public static Set filterNonFNApplicableRegions(Collection getFavoredNodesWithDNPort(RegionInfo regionInfo) { if (getFavoredNodes(regionInfo) == null) { @@ -124,8 +124,8 @@ public synchronized List getFavoredNodesWithDNPort(RegionInfo region List fnWithDNPort = Lists.newArrayList(); for (ServerName sn : getFavoredNodes(regionInfo)) { - fnWithDNPort.add(ServerName.valueOf(sn.getHostname(), datanodeDataTransferPort, - NON_STARTCODE)); + fnWithDNPort + .add(ServerName.valueOf(sn.getHostname(), datanodeDataTransferPort, NON_STARTCODE)); } return fnWithDNPort; } @@ -151,9 +151,9 @@ public synchronized void updateFavoredNodes(Map> re } if (servers.size() != FAVORED_NODES_NUM) { - throw new IOException("At least " + FAVORED_NODES_NUM - + " favored nodes should be present for region : " + regionInfo.getEncodedName() - + " current FN servers:" + servers); + throw new IOException( + "At least " + FAVORED_NODES_NUM + " favored nodes should be present for region : " + + regionInfo.getEncodedName() + " current FN servers:" + servers); } List serversWithNoStartCodes = Lists.newArrayList(); @@ -161,8 +161,8 @@ public synchronized void updateFavoredNodes(Map> re if (sn.getStartcode() == NON_STARTCODE) { serversWithNoStartCodes.add(sn); } else { - serversWithNoStartCodes.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), - NON_STARTCODE)); + serversWithNoStartCodes + .add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE)); } } regionToFavoredNodes.put(regionInfo, serversWithNoStartCodes); @@ -170,7 +170,7 @@ public synchronized void updateFavoredNodes(Map> re // Lets do a bulk update to meta since that reduces the RPC's FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, - provider.getConnection()); + provider.getConnection()); deleteFavoredNodesForRegions(regionToFavoredNodes.keySet()); for (Map.Entry> entry : regionToFavoredNodes.entrySet()) { @@ -183,7 +183,7 @@ public synchronized void updateFavoredNodes(Map> re private synchronized void addToReplicaLoad(RegionInfo hri, List servers) { ServerName serverToUse = - ServerName.valueOf(servers.get(PRIMARY.ordinal()).getAddress().toString(), NON_STARTCODE); + ServerName.valueOf(servers.get(PRIMARY.ordinal()).getAddress().toString(), NON_STARTCODE); List regionList = primaryRSToRegionMap.get(serverToUse); if (regionList == null) { regionList = new ArrayList<>(); @@ -191,8 +191,7 @@ private synchronized void addToReplicaLoad(RegionInfo hri, List serv regionList.add(hri); primaryRSToRegionMap.put(serverToUse, regionList); - serverToUse = ServerName - .valueOf(servers.get(SECONDARY.ordinal()).getAddress(), NON_STARTCODE); + serverToUse = ServerName.valueOf(servers.get(SECONDARY.ordinal()).getAddress(), NON_STARTCODE); regionList = secondaryRSToRegionMap.get(serverToUse); if (regionList == null) { regionList = new ArrayList<>(); @@ -200,8 +199,7 @@ private synchronized void addToReplicaLoad(RegionInfo hri, List serv regionList.add(hri); secondaryRSToRegionMap.put(serverToUse, regionList); - serverToUse = ServerName.valueOf(servers.get(TERTIARY.ordinal()).getAddress(), - NON_STARTCODE); + serverToUse = ServerName.valueOf(servers.get(TERTIARY.ordinal()).getAddress(), NON_STARTCODE); regionList = teritiaryRSToRegionMap.get(serverToUse); if (regionList == null) { regionList = new ArrayList<>(); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java index 4481021f1bac..c1b98c8982ce 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,11 +27,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This class contains the mapping information between each region name and - * its favored region server list. Used by FavoredNodeLoadBalancer set - * of classes and from unit tests (hence the class is public) - * - * All the access to this class is thread-safe. + * This class contains the mapping information between each region name and its favored region + * server list. Used by FavoredNodeLoadBalancer set of classes and from unit tests (hence the class + * is public) All the access to this class is thread-safe. */ @InterfaceAudience.Private public class FavoredNodesPlan { @@ -40,9 +37,7 @@ public class FavoredNodesPlan { private final Map> favoredNodesMap; public static enum Position { - PRIMARY, - SECONDARY, - TERTIARY + PRIMARY, SECONDARY, TERTIARY } public FavoredNodesPlan() { @@ -82,18 +77,18 @@ public List getFavoredNodes(RegionInfo region) { } /** - * Return the position of the server in the favoredNodes list. Assumes the - * favoredNodes list is of size 3. + * Return the position of the server in the favoredNodes list. Assumes the favoredNodes list is of + * size 3. * @return position */ - public static Position getFavoredServerPosition( - List favoredNodes, ServerName server) { - if (favoredNodes == null || server == null || - favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + public static Position getFavoredServerPosition(List favoredNodes, + ServerName server) { + if (favoredNodes == null || server == null + || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { return null; } for (Position p : Position.values()) { - if (ServerName.isSameAddress(favoredNodes.get(p.ordinal()),server)) { + if (ServerName.isSameAddress(favoredNodes.get(p.ordinal()), server)) { return p; } } @@ -105,8 +100,8 @@ public static Position getFavoredServerPosition( */ public Map> getAssignmentMap() { // Make a deep copy so changes don't harm our copy of favoredNodesMap. - return this.favoredNodesMap.entrySet().stream(). - collect(Collectors.toMap(k -> k.getKey(), v -> new ArrayList(v.getValue()))); + return this.favoredNodesMap.entrySet().stream() + .collect(Collectors.toMap(k -> k.getKey(), v -> new ArrayList(v.getValue()))); } public int size() { @@ -125,7 +120,7 @@ public boolean equals(Object o) { return false; } // To compare the map from object o is identical to current assignment map. - Map> comparedMap = ((FavoredNodesPlan)o).favoredNodesMap; + Map> comparedMap = ((FavoredNodesPlan) o).favoredNodesMap; // compare the size if (comparedMap.size() != this.favoredNodesMap.size()) { @@ -133,8 +128,7 @@ public boolean equals(Object o) { } // compare each element in the assignment map - for (Map.Entry> entry : - comparedMap.entrySet()) { + for (Map.Entry> entry : comparedMap.entrySet()) { List serverList = this.favoredNodesMap.get(entry.getKey()); if (serverList == null && entry.getValue() != null) { return false; diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java index ba7af6682abd..fb971c8fa582 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPromoter.java @@ -19,10 +19,9 @@ import java.io.IOException; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface FavoredNodesPromoter { @@ -30,10 +29,10 @@ public interface FavoredNodesPromoter { /* Try and assign regions even if favored nodes are dead */ String FAVORED_ALWAYS_ASSIGN_REGIONS = "hbase.favored.assignment.always.assign"; - void generateFavoredNodesForDaughter(List servers, - RegionInfo parent, RegionInfo hriA, RegionInfo hriB) throws IOException; + void generateFavoredNodesForDaughter(List servers, RegionInfo parent, RegionInfo hriA, + RegionInfo hriB) throws IOException; - void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo [] mergeParents) + void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo[] mergeParents) throws IOException; List getFavoredNodes(RegionInfo regionInfo); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java index 2a7600079d7c..e2095977e3da 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/StartcodeAgnosticServerName.java @@ -17,16 +17,17 @@ */ package org.apache.hadoop.hbase.favored; -import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Addressing; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; /** - * This class differs from ServerName in that start code is always ignored. This is because - * start code, ServerName.NON_STARTCODE is used to persist favored nodes and keeping this separate - * from {@link ServerName} is much cleaner. This should only be used by Favored node specific - * classes and should not be used outside favored nodes. + * This class differs from ServerName in that start code is always ignored. This is because start + * code, ServerName.NON_STARTCODE is used to persist favored nodes and keeping this separate from + * {@link ServerName} is much cleaner. This should only be used by Favored node specific classes and + * should not be used outside favored nodes. */ @InterfaceAudience.Private class StartcodeAgnosticServerName extends ServerName { @@ -45,9 +46,10 @@ public static StartcodeAgnosticServerName valueOf(final String hostnameAndPort, Addressing.parsePort(hostnameAndPort), startcode); } - public static StartcodeAgnosticServerName valueOf(final HostAndPort hostnameAndPort, long startcode) { - return new StartcodeAgnosticServerName(hostnameAndPort.getHost(), - hostnameAndPort.getPort(), startcode); + public static StartcodeAgnosticServerName valueOf(final HostAndPort hostnameAndPort, + long startcode) { + return new StartcodeAgnosticServerName(hostnameAndPort.getHost(), hostnameAndPort.getPort(), + startcode); } @Override diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java index 3a528f42c43e..c54ffa720c39 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -34,15 +32,14 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + /** - * Helper class that is used by RegionPlacementMaintainer to print - * information for favored nodes - * + * Helper class that is used by RegionPlacementMaintainer to print information for favored nodes */ @InterfaceAudience.Private public class AssignmentVerificationReport { - private static final Logger LOG = LoggerFactory.getLogger( - AssignmentVerificationReport.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(AssignmentVerificationReport.class.getName()); private TableName tableName = null; private boolean enforceLocality = false; @@ -91,16 +88,14 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps this.tableName = tableName; // Get all the regions for this table - List regionInfoList = - snapshot.getTableToRegionMap().get(tableName); + List regionInfoList = snapshot.getTableToRegionMap().get(tableName); // Get the total region num for the current table this.totalRegions = regionInfoList.size(); // Get the existing assignment plan FavoredNodesPlan favoredNodesAssignment = snapshot.getExistingAssignmentPlan(); // Get the region to region server mapping - Map currentAssignment = - snapshot.getRegionToRegionServerMap(); + Map currentAssignment = snapshot.getRegionToRegionServerMap(); // Initialize the server to its hosing region counter map Map serverToHostingRegionCounterMap = new HashMap<>(); @@ -128,18 +123,15 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps // Get the favored nodes from the assignment plan and verify it. List favoredNodes = favoredNodesAssignment.getFavoredNodes(region); - if (favoredNodes == null || - favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { + if (favoredNodes == null + || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { regionsWithoutValidFavoredNodes.add(region); continue; } // Get the primary, secondary and tertiary region server - ServerName primaryRS = - favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal()); - ServerName secondaryRS = - favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal()); - ServerName tertiaryRS = - favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal()); + ServerName primaryRS = favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal()); + ServerName secondaryRS = favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal()); + ServerName tertiaryRS = favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal()); // Update the primary rs to its region set map Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS); @@ -160,7 +152,7 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps // Get the position of the current region server in the favored nodes list FavoredNodesPlan.Position favoredNodePosition = - FavoredNodesPlan.getFavoredServerPosition(favoredNodes, currentRS); + FavoredNodesPlan.getFavoredServerPosition(favoredNodes, currentRS); // Handle the non favored assignment. if (favoredNodePosition == null) { @@ -178,7 +170,7 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps // Get the region degree locality map Map regionDegreeLocalityMap = - regionLocalityMap.get(region.getEncodedName()); + regionLocalityMap.get(region.getEncodedName()); if (regionDegreeLocalityMap == null) { continue; // ignore the region which doesn't have any store files. } @@ -187,43 +179,37 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { ServerName favoredNode = favoredNodes.get(p.ordinal()); // Get the locality for the current favored nodes - Float locality = - regionDegreeLocalityMap.get(favoredNode.getHostname()); + Float locality = regionDegreeLocalityMap.get(favoredNode.getHostname()); if (locality != null) { this.favoredNodesLocalitySummary[p.ordinal()] += locality; } } // Get the locality summary for the current region server - Float actualLocality = - regionDegreeLocalityMap.get(currentRS.getHostname()); + Float actualLocality = regionDegreeLocalityMap.get(currentRS.getHostname()); if (actualLocality != null) { this.actualLocalitySummary += actualLocality; } } } catch (Exception e) { - LOG.error("Cannot verify the region assignment for region " + - ((region == null) ? " null " : region.getRegionNameAsString()) + - "because of " + e); + LOG.error("Cannot verify the region assignment for region " + + ((region == null) ? " null " : region.getRegionNameAsString()) + "because of " + e); } } float dispersionScoreSummary = 0; float dispersionNumSummary = 0; // Calculate the secondary score for each primary region server - for (Map.Entry entry : - primaryRSToRegionCounterMap.entrySet()) { + for (Map.Entry entry : primaryRSToRegionCounterMap.entrySet()) { ServerName primaryRS = entry.getKey(); Integer regionsOnPrimary = entry.getValue(); // Process the dispersion number and score float dispersionScore = 0; int dispersionNum = 0; - if (primaryToSecTerRSMap.get(primaryRS) != null - && regionsOnPrimary.intValue() != 0) { + if (primaryToSecTerRSMap.get(primaryRS) != null && regionsOnPrimary.intValue() != 0) { dispersionNum = primaryToSecTerRSMap.get(primaryRS).size(); - dispersionScore = dispersionNum / - ((float) regionsOnPrimary.intValue() * 2); + dispersionScore = dispersionNum / ((float) regionsOnPrimary.intValue() * 2); } // Update the max dispersion score if (dispersionScore > this.maxDispersionScore) { @@ -267,15 +253,14 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps // Update the avg dispersion score if (primaryRSToRegionCounterMap.keySet().size() != 0) { - this.avgDispersionScore = dispersionScoreSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); - this.avgDispersionNum = dispersionNumSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionScore = + dispersionScoreSummary / (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionNum = + dispersionNumSummary / (float) primaryRSToRegionCounterMap.keySet().size(); } // Fill up the most loaded and least loaded region server information - for (Map.Entry entry : - serverToHostingRegionCounterMap.entrySet()) { + for (Map.Entry entry : serverToHostingRegionCounterMap.entrySet()) { ServerName currentRS = entry.getKey(); int hostRegionCounter = entry.getValue().intValue(); @@ -300,8 +285,8 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps // and total region servers this.totalRegionServers = serverToHostingRegionCounterMap.keySet().size(); - this.avgRegionsOnRS = (totalRegionServers == 0) ? 0 : - (totalRegions / (float) totalRegionServers); + this.avgRegionsOnRS = + (totalRegionServers == 0) ? 0 : (totalRegions / (float) totalRegionServers); // Set the isFilledUp as true isFilledUp = true; } @@ -312,13 +297,12 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps * @param snapshot * @param newPlan */ - public void fillUpDispersion(TableName tableName, - SnapshotOfRegionAssignmentFromMeta snapshot, FavoredNodesPlan newPlan) { + public void fillUpDispersion(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot, + FavoredNodesPlan newPlan) { // Set the table name this.tableName = tableName; // Get all the regions for this table - List regionInfoList = snapshot.getTableToRegionMap().get( - tableName); + List regionInfoList = snapshot.getTableToRegionMap().get(tableName); // Get the total region num for the current table this.totalRegions = regionInfoList.size(); FavoredNodesPlan plan = null; @@ -343,12 +327,9 @@ public void fillUpDispersion(TableName tableName, continue; } // Get the primary, secondary and tertiary region server - ServerName primaryRS = favoredNodes - .get(FavoredNodesPlan.Position.PRIMARY.ordinal()); - ServerName secondaryRS = favoredNodes - .get(FavoredNodesPlan.Position.SECONDARY.ordinal()); - ServerName tertiaryRS = favoredNodes - .get(FavoredNodesPlan.Position.TERTIARY.ordinal()); + ServerName primaryRS = favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal()); + ServerName secondaryRS = favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal()); + ServerName tertiaryRS = favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal()); // Update the primary rs to its region set map Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS); @@ -368,26 +349,22 @@ public void fillUpDispersion(TableName tableName, primaryToSecTerRSMap.put(primaryRS, secAndTerSet); } catch (Exception e) { LOG.error("Cannot verify the region assignment for region " - + ((region == null) ? " null " : region.getRegionNameAsString()) - + "because of " + e); + + ((region == null) ? " null " : region.getRegionNameAsString()) + "because of " + e); } } float dispersionScoreSummary = 0; float dispersionNumSummary = 0; // Calculate the secondary score for each primary region server - for (Map.Entry entry : - primaryRSToRegionCounterMap.entrySet()) { + for (Map.Entry entry : primaryRSToRegionCounterMap.entrySet()) { ServerName primaryRS = entry.getKey(); Integer regionsOnPrimary = entry.getValue(); // Process the dispersion number and score float dispersionScore = 0; int dispersionNum = 0; - if (primaryToSecTerRSMap.get(primaryRS) != null - && regionsOnPrimary.intValue() != 0) { + if (primaryToSecTerRSMap.get(primaryRS) != null && regionsOnPrimary.intValue() != 0) { dispersionNum = primaryToSecTerRSMap.get(primaryRS).size(); - dispersionScore = dispersionNum / - ((float) regionsOnPrimary.intValue() * 2); + dispersionScore = dispersionNum / ((float) regionsOnPrimary.intValue() * 2); } // Update the max dispersion num @@ -423,18 +400,16 @@ public void fillUpDispersion(TableName tableName, // Update the avg dispersion score if (primaryRSToRegionCounterMap.keySet().size() != 0) { - this.avgDispersionScore = dispersionScoreSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); - this.avgDispersionNum = dispersionNumSummary / - (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionScore = + dispersionScoreSummary / (float) primaryRSToRegionCounterMap.keySet().size(); + this.avgDispersionNum = + dispersionNumSummary / (float) primaryRSToRegionCounterMap.keySet().size(); } } /** - * @return list which contains just 3 elements: average dispersion score, max - * dispersion score and min dispersion score as first, second and third element - * respectively. - * + * @return list which contains just 3 elements: average dispersion score, max dispersion score and + * min dispersion score as first, second and third element respectively. */ public List getDispersionInformation() { List dispersion = new ArrayList<>(); @@ -446,41 +421,38 @@ public List getDispersionInformation() { public void print(boolean isDetailMode) { if (!isFilledUp) { - System.err.println("[Error] Region assignment verification report" + - "hasn't been filled up"); + System.err.println("[Error] Region assignment verification report" + "hasn't been filled up"); } - DecimalFormat df = new java.text.DecimalFormat( "#.##"); + DecimalFormat df = new java.text.DecimalFormat("#.##"); // Print some basic information - System.out.println("Region Assignment Verification for Table: " + tableName + - "\n\tTotal regions : " + totalRegions); + System.out.println("Region Assignment Verification for Table: " + tableName + + "\n\tTotal regions : " + totalRegions); // Print the number of regions on each kinds of the favored nodes - System.out.println("\tTotal regions on favored nodes " + - totalFavoredAssignments); + System.out.println("\tTotal regions on favored nodes " + totalFavoredAssignments); for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { - System.out.println("\t\tTotal regions on "+ p.toString() + - " region servers: " + favoredNodes[p.ordinal()]); + System.out.println( + "\t\tTotal regions on " + p.toString() + " region servers: " + favoredNodes[p.ordinal()]); } // Print the number of regions in each kinds of invalid assignment - System.out.println("\tTotal unassigned regions: " + - unAssignedRegionsList.size()); + System.out.println("\tTotal unassigned regions: " + unAssignedRegionsList.size()); if (isDetailMode) { for (RegionInfo region : unAssignedRegionsList) { System.out.println("\t\t" + region.getRegionNameAsString()); } } - System.out.println("\tTotal regions NOT on favored nodes: " + - nonFavoredAssignedRegionList.size()); + System.out + .println("\tTotal regions NOT on favored nodes: " + nonFavoredAssignedRegionList.size()); if (isDetailMode) { for (RegionInfo region : nonFavoredAssignedRegionList) { System.out.println("\t\t" + region.getRegionNameAsString()); } } - System.out.println("\tTotal regions without favored nodes: " + - regionsWithoutValidFavoredNodes.size()); + System.out.println( + "\tTotal regions without favored nodes: " + regionsWithoutValidFavoredNodes.size()); if (isDetailMode) { for (RegionInfo region : regionsWithoutValidFavoredNodes) { System.out.println("\t\t" + region.getRegionNameAsString()); @@ -490,77 +462,68 @@ public void print(boolean isDetailMode) { // Print the locality information if enabled if (this.enforceLocality && totalRegions != 0) { // Print the actual locality for this table - float actualLocality = 100 * - this.actualLocalitySummary / (float) totalRegions; - System.out.println("\n\tThe actual avg locality is " + - df.format(actualLocality) + " %"); + float actualLocality = 100 * this.actualLocalitySummary / (float) totalRegions; + System.out.println("\n\tThe actual avg locality is " + df.format(actualLocality) + " %"); // Print the expected locality if regions are placed on the each kinds of // favored nodes for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { - float avgLocality = 100 * - (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions); - System.out.println("\t\tThe expected avg locality if all regions" + - " on the " + p.toString() + " region servers: " - + df.format(avgLocality) + " %"); + float avgLocality = 100 * (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions); + System.out.println("\t\tThe expected avg locality if all regions" + " on the " + + p.toString() + " region servers: " + df.format(avgLocality) + " %"); } } // Print the region balancing information - System.out.println("\n\tTotal hosting region servers: " + - totalRegionServers); + System.out.println("\n\tTotal hosting region servers: " + totalRegionServers); // Print the region balance information if (totalRegionServers != 0) { - System.out.println( - "\tAvg dispersion num: " +df.format(avgDispersionNum) + - " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) + - " hosts;\tMin dispersion num: " + df.format(minDispersionNum) + - " hosts;"); + System.out.println("\tAvg dispersion num: " + df.format(avgDispersionNum) + + " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) + + " hosts;\tMin dispersion num: " + df.format(minDispersionNum) + " hosts;"); - System.out.println("\t\tThe number of the region servers with the max" + - " dispersion num: " + this.maxDispersionNumServerSet.size()); + System.out.println("\t\tThe number of the region servers with the max" + " dispersion num: " + + this.maxDispersionNumServerSet.size()); if (isDetailMode) { printHServerAddressSet(maxDispersionNumServerSet); } - System.out.println("\t\tThe number of the region servers with the min" + - " dispersion num: " + this.minDispersionNumServerSet.size()); + System.out.println("\t\tThe number of the region servers with the min" + " dispersion num: " + + this.minDispersionNumServerSet.size()); if (isDetailMode) { printHServerAddressSet(maxDispersionNumServerSet); } - System.out.println( - "\tAvg dispersion score: " + df.format(avgDispersionScore) + - ";\tMax dispersion score: " + df.format(maxDispersionScore) + - ";\tMin dispersion score: " + df.format(minDispersionScore) + ";"); + System.out.println("\tAvg dispersion score: " + df.format(avgDispersionScore) + + ";\tMax dispersion score: " + df.format(maxDispersionScore) + + ";\tMin dispersion score: " + df.format(minDispersionScore) + ";"); - System.out.println("\t\tThe number of the region servers with the max" + - " dispersion score: " + this.maxDispersionScoreServerSet.size()); + System.out.println("\t\tThe number of the region servers with the max" + " dispersion score: " + + this.maxDispersionScoreServerSet.size()); if (isDetailMode) { printHServerAddressSet(maxDispersionScoreServerSet); } - System.out.println("\t\tThe number of the region servers with the min" + - " dispersion score: " + this.minDispersionScoreServerSet.size()); + System.out.println("\t\tThe number of the region servers with the min" + " dispersion score: " + + this.minDispersionScoreServerSet.size()); if (isDetailMode) { printHServerAddressSet(minDispersionScoreServerSet); } - System.out.println( - "\tAvg regions/region server: " + df.format(avgRegionsOnRS) + - ";\tMax regions/region server: " + maxRegionsOnRS + - ";\tMin regions/region server: " + minRegionsOnRS + ";"); + System.out.println("\tAvg regions/region server: " + df.format(avgRegionsOnRS) + + ";\tMax regions/region server: " + maxRegionsOnRS + ";\tMin regions/region server: " + + minRegionsOnRS + ";"); // Print the details about the most loaded region servers - System.out.println("\t\tThe number of the most loaded region servers: " - + mostLoadedRSSet.size()); + System.out + .println("\t\tThe number of the most loaded region servers: " + mostLoadedRSSet.size()); if (isDetailMode) { printHServerAddressSet(mostLoadedRSSet); } // Print the details about the least loaded region servers - System.out.println("\t\tThe number of the least loaded region servers: " - + leastLoadedRSSet.size()); + System.out + .println("\t\tThe number of the least loaded region servers: " + leastLoadedRSSet.size()); if (isDetailMode) { printHServerAddressSet(leastLoadedRSSet); } @@ -601,8 +564,8 @@ int getTotalFavoredAssignments() { } /** - * Return the number of regions based on the position (primary/secondary/ - * tertiary) assigned to their favored nodes + * Return the number of regions based on the position (primary/secondary/ tertiary) assigned to + * their favored nodes * @param position * @return the number of regions */ @@ -612,10 +575,10 @@ int getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position position) { private void printHServerAddressSet(Set serverSet) { if (serverSet == null) { - return ; + return; } int i = 0; - for (ServerName addr : serverSet){ + for (ServerName addr : serverSet) { if ((i++) % 3 == 0) { System.out.print("\n\t\t\t"); } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index 004e3ce680a1..b3043bd2e7f4 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -82,7 +81,6 @@ public interface LoadBalancer extends Stoppable, ConfigurationObserver { */ void updateClusterMetrics(ClusterMetrics metrics); - /** * Set the cluster info provider. Usually it is just a wrapper of master. */ @@ -95,7 +93,7 @@ public interface LoadBalancer extends Stoppable, ConfigurationObserver { * already balanced */ List balanceCluster(Map>> loadOfAllTable) - throws IOException; + throws IOException; /** * Perform a Round Robin assignment of regions. @@ -145,15 +143,15 @@ Map> retainAssignment(Map r */ void postMasterStartupInitialize(); - /*Updates balancer status tag reported to JMX*/ + /* Updates balancer status tag reported to JMX */ void updateBalancerStatus(boolean status); /** * In some scenarios, Balancer needs to update internal status or information according to the * current tables load - * * @param loadOfAllTable region load of servers for all table */ - default void updateBalancerLoadInfo(Map>> - loadOfAllTable){} + default void + updateBalancerLoadInfo(Map>> loadOfAllTable) { + } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java index 54ccac0cb629..db0b7b0bff33 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RackManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,17 +20,16 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.ScriptBasedMapping; +import org.apache.yetus.audience.InterfaceAudience; + /** - * Wrapper over the rack resolution utility in Hadoop. The rack resolution - * utility in Hadoop does resolution from hosts to the racks they belong to. - * + * Wrapper over the rack resolution utility in Hadoop. The rack resolution utility in Hadoop does + * resolution from hosts to the racks they belong to. */ @InterfaceAudience.Private public class RackManager { @@ -43,14 +42,13 @@ public RackManager() { public RackManager(Configuration conf) { switchMapping = ReflectionUtils.instantiateWithCustomCtor( - conf.getClass("hbase.util.ip.to.rack.determiner", ScriptBasedMapping.class, - DNSToSwitchMapping.class).getName(), new Class[]{Configuration.class}, - new Object[]{conf}); + conf.getClass("hbase.util.ip.to.rack.determiner", ScriptBasedMapping.class, + DNSToSwitchMapping.class).getName(), + new Class[] { Configuration.class }, new Object[] { conf }); } /** - * Get the name of the rack containing a server, according to the DNS to - * switch mapping. + * Get the name of the rack containing a server, according to the DNS to switch mapping. * @param server the server for which to get the rack name * @return the rack name of the server */ diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java index f4d6e63771aa..8fc6cd36dbfc 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java @@ -19,21 +19,16 @@ import java.io.Serializable; import java.util.Comparator; - import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Stores the plan for the move of an individual region. - * - * Contains info for the region being moved, info for the server the region - * should be moved from, and info for the server the region should be moved - * to. - * - * The comparable implementation of this class compares only the region - * information and not the source/dest server info. + * Stores the plan for the move of an individual region. Contains info for the region being moved, + * info for the server the region should be moved from, and info for the server the region should be + * moved to. The comparable implementation of this class compares only the region information and + * not the source/dest server info. */ @InterfaceAudience.LimitedPrivate("Coprocessors") @InterfaceStability.Evolving @@ -52,12 +47,9 @@ public int compare(RegionPlan l, RegionPlan r) { } /** - * Instantiate a plan for a region move, moving the specified region from - * the specified source server to the specified destination server. - * - * Destination server can be instantiated as null and later set - * with {@link #setDestination(ServerName)}. - * + * Instantiate a plan for a region move, moving the specified region from the specified source + * server to the specified destination server. Destination server can be instantiated as null and + * later set with {@link #setDestination(ServerName)}. * @param hri region to be moved * @param source regionserver region should be moved from * @param dest regionserver region should be moved to @@ -134,7 +126,7 @@ private static int compareTo(RegionPlan left, RegionPlan right) { private static int compareServerName(ServerName left, ServerName right) { if (left == null) { - return right == null? 0: -1; + return right == null ? 0 : -1; } else if (right == null) { return +1; } @@ -189,8 +181,8 @@ public boolean equals(Object obj) { @Override public String toString() { - return "hri=" + this.hri.getEncodedName() + ", source=" + - (this.source == null? "": this.source.toString()) + - ", destination=" + (this.dest == null? "": this.dest.toString()); + return "hri=" + this.hri.getEncodedName() + ", source=" + + (this.source == null ? "" : this.source.toString()) + ", destination=" + + (this.dest == null ? "" : this.dest.toString()); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java index 9aaf111d9800..b266e758d34a 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,22 +48,21 @@ import org.slf4j.LoggerFactory; /** - * Used internally for reading meta and constructing datastructures that are - * then queried, for things like regions to regionservers, table to regions, etc. - * It also records the favored nodes mapping for regions. - * + * Used internally for reading meta and constructing datastructures that are then queried, for + * things like regions to regionservers, table to regions, etc. It also records the favored nodes + * mapping for regions. */ @InterfaceAudience.Private public class SnapshotOfRegionAssignmentFromMeta { - private static final Logger LOG = LoggerFactory.getLogger(SnapshotOfRegionAssignmentFromMeta.class - .getName()); + private static final Logger LOG = + LoggerFactory.getLogger(SnapshotOfRegionAssignmentFromMeta.class.getName()); private final Connection connection; /** the table name to region map */ private final Map> tableToRegionMap; /** the region to region server map */ - //private final Map regionToRegionServerMap; + // private final Map regionToRegionServerMap; private Map regionToRegionServerMap; /** the region name to region info map */ private final Map regionNameToRegionInfoMap; @@ -152,8 +150,8 @@ private void processMetaRecord(Result result) throws IOException { * less than FAVORED_NODES_NUM, lets use as much as we can but log a warning. */ if (favoredServerList.length != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { - LOG.warn("Insufficient favored nodes for region " + hri + " fn: " + - Arrays.toString(favoredServerList)); + LOG.warn("Insufficient favored nodes for region " + hri + " fn: " + + Arrays.toString(favoredServerList)); } for (int i = 0; i < favoredServerList.length; i++) { if (i == PRIMARY.ordinal()) { @@ -167,6 +165,7 @@ private void processMetaRecord(Result result) throws IOException { } } } + /** * Initialize the region assignment snapshot by scanning the hbase:meta table */ @@ -174,7 +173,7 @@ public void initialize() throws IOException { LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot"); // Scan hbase:meta to pick up user regions try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME); - ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) { + ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) { for (;;) { Result result = scanner.next(); if (result == null) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java index c86a60ea4451..533f4b624e70 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,15 +35,14 @@ public enum ServerState { CRASHED, /** - * Only server which carries meta can have this state. We will split wal for meta and then - * assign meta first before splitting other wals. + * Only server which carries meta can have this state. We will split wal for meta and then assign + * meta first before splitting other wals. */ SPLITTING_META, /** - * Indicate that the meta splitting is done. We need this state so that the UnassignProcedure - * for meta can safely quit. See the comments in UnassignProcedure.remoteCallFailed for more - * details. + * Indicate that the meta splitting is done. We need this state so that the UnassignProcedure for + * meta can safely quit. See the comments in UnassignProcedure.remoteCallFailed for more details. */ SPLITTING_META_DONE, diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java index f15ca92321a0..3a16f21ff3fd 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerClusterState.java @@ -73,11 +73,11 @@ class BalancerClusterState { int[][] regionsPerHost; // hostIndex -> list of regions int[][] regionsPerRack; // rackIndex -> region list Int2IntCounterMap[] colocatedReplicaCountsPerServer; // serverIndex -> counts of colocated - // replicas by primary region index + // replicas by primary region index Int2IntCounterMap[] colocatedReplicaCountsPerHost; // hostIndex -> counts of colocated replicas by - // primary region index + // primary region index Int2IntCounterMap[] colocatedReplicaCountsPerRack; // rackIndex -> counts of colocated replicas by - // primary region index + // primary region index int[][] serversPerHost; // hostIndex -> list of server indexes int[][] serversPerRack; // rackIndex -> list of server indexes @@ -123,15 +123,15 @@ public String getRack(ServerName server) { } BalancerClusterState(Map> clusterState, - Map> loads, RegionHDFSBlockLocationFinder regionFinder, - RackManager rackManager) { + Map> loads, RegionHDFSBlockLocationFinder regionFinder, + RackManager rackManager) { this(null, clusterState, loads, regionFinder, rackManager); } @SuppressWarnings("unchecked") BalancerClusterState(Collection unassignedRegions, - Map> clusterState, Map> loads, - RegionHDFSBlockLocationFinder regionFinder, RackManager rackManager) { + Map> clusterState, Map> loads, + RegionHDFSBlockLocationFinder regionFinder, RackManager rackManager) { if (unassignedRegions == null) { unassignedRegions = Collections.emptyList(); } @@ -156,8 +156,8 @@ public String getRack(ServerName server) { // a matching hostname and port to have the same index. for (ServerName sn : clusterState.keySet()) { if (sn == null) { - LOG.warn("TODO: Enable TRACE on BaseLoadBalancer. Empty servername); " + - "skipping; unassigned regions?"); + LOG.warn("TODO: Enable TRACE on BaseLoadBalancer. Empty servername); " + + "skipping; unassigned regions?"); if (LOG.isTraceEnabled()) { LOG.trace("EMPTY SERVERNAME " + clusterState.toString()); } @@ -229,8 +229,8 @@ public String getRack(ServerName server) { // keep the servername if this is the first server name for this hostname // or this servername has the newest startcode. - if (servers[serverIndex] == null || - servers[serverIndex].getStartcode() < entry.getKey().getStartcode()) { + if (servers[serverIndex] == null + || servers[serverIndex].getStartcode() < entry.getKey().getStartcode()) { servers[serverIndex] = entry.getKey(); } @@ -238,12 +238,12 @@ public String getRack(ServerName server) { // there is another server with the same hostAndPort in ClusterState. // allocate the array for the total size regionsPerServer[serverIndex] = - new int[entry.getValue().size() + regionsPerServer[serverIndex].length]; + new int[entry.getValue().size() + regionsPerServer[serverIndex].length]; } else { regionsPerServer[serverIndex] = new int[entry.getValue().size()]; } colocatedReplicaCountsPerServer[serverIndex] = new Int2IntCounterMap( - regionsPerServer[serverIndex].length, Hashing.DEFAULT_LOAD_FACTOR, 0); + regionsPerServer[serverIndex].length, Hashing.DEFAULT_LOAD_FACTOR, 0); serverIndicesSortedByRegionCount[serverIndex] = serverIndex; serverIndicesSortedByLocality[serverIndex] = serverIndex; } @@ -289,7 +289,7 @@ public String getRack(ServerName server) { serversPerHost[i] = new int[serversPerHostList.get(i).size()]; for (int j = 0; j < serversPerHost[i].length; j++) { serversPerHost[i][j] = serversPerHostList.get(i).get(j); - LOG.debug("server {} is on host {}",serversPerHostList.get(i).get(j), i); + LOG.debug("server {} is on host {}", serversPerHostList.get(i).get(j), i); } if (serversPerHost[i].length > 1) { multiServersPerHost = true; @@ -300,13 +300,13 @@ public String getRack(ServerName server) { serversPerRack[i] = new int[serversPerRackList.get(i).size()]; for (int j = 0; j < serversPerRack[i].length; j++) { serversPerRack[i][j] = serversPerRackList.get(i).get(j); - LOG.info("server {} is on rack {}",serversPerRackList.get(i).get(j), i); + LOG.info("server {} is on rack {}", serversPerRackList.get(i).get(j), i); } } numTables = tables.size(); - LOG.debug("Number of tables={}, number of hosts={}, number of racks={}", numTables, - numHosts, numRacks); + LOG.debug("Number of tables={}, number of hosts={}, number of racks={}", numTables, numHosts, + numRacks); numRegionsPerServerPerTable = new int[numTables][numServers]; numRegionsPerTable = new int[numTables]; @@ -342,8 +342,8 @@ public String getRack(ServerName server) { } for (int i = 0; i < regionsPerServer.length; i++) { - colocatedReplicaCountsPerServer[i] = new Int2IntCounterMap( - regionsPerServer[i].length, Hashing.DEFAULT_LOAD_FACTOR, 0); + colocatedReplicaCountsPerServer[i] = + new Int2IntCounterMap(regionsPerServer[i].length, Hashing.DEFAULT_LOAD_FACTOR, 0); for (int j = 0; j < regionsPerServer[i].length; j++) { int primaryIndex = regionIndexToPrimaryIndex[regionsPerServer[i][j]]; colocatedReplicaCountsPerServer[i].getAndIncrement(primaryIndex); @@ -363,16 +363,15 @@ public String getRack(ServerName server) { } private void populateRegionPerLocationFromServer(int[][] regionsPerLocation, - Int2IntCounterMap[] colocatedReplicaCountsPerLocation, - int[][] serversPerLocation) { + Int2IntCounterMap[] colocatedReplicaCountsPerLocation, int[][] serversPerLocation) { for (int i = 0; i < serversPerLocation.length; i++) { int numRegionsPerLocation = 0; for (int j = 0; j < serversPerLocation[i].length; j++) { numRegionsPerLocation += regionsPerServer[serversPerLocation[i][j]].length; } regionsPerLocation[i] = new int[numRegionsPerLocation]; - colocatedReplicaCountsPerLocation[i] = new Int2IntCounterMap(numRegionsPerLocation, - Hashing.DEFAULT_LOAD_FACTOR, 0); + colocatedReplicaCountsPerLocation[i] = + new Int2IntCounterMap(numRegionsPerLocation, Hashing.DEFAULT_LOAD_FACTOR, 0); } for (int i = 0; i < serversPerLocation.length; i++) { @@ -392,7 +391,7 @@ private void populateRegionPerLocationFromServer(int[][] regionsPerLocation, /** Helper for Cluster constructor to handle a region */ private void registerRegion(RegionInfo region, int regionIndex, int serverIndex, - Map> loads, RegionHDFSBlockLocationFinder regionFinder) { + Map> loads, RegionHDFSBlockLocationFinder regionFinder) { String tableName = region.getTable().getNameAsString(); if (!tablesToIndex.containsKey(tableName)) { tables.add(tableName); @@ -422,9 +421,9 @@ private void registerRegion(RegionInfo region, int regionIndex, int serverIndex, List loc = regionFinder.getTopBlockLocations(region); regionLocations[regionIndex] = new int[loc.size()]; for (int i = 0; i < loc.size(); i++) { - regionLocations[regionIndex][i] = loc.get(i) == null ? -1 : - (serversToIndex.get(loc.get(i).getAddress()) == null ? -1 : - serversToIndex.get(loc.get(i).getAddress())); + regionLocations[regionIndex][i] = loc.get(i) == null ? -1 + : (serversToIndex.get(loc.get(i).getAddress()) == null ? -1 + : serversToIndex.get(loc.get(i).getAddress())); } } } @@ -464,7 +463,7 @@ public int[] getOrComputeRegionsToMostLocalEntities(BalancerClusterState.Localit * Looks up locality from cache of localities. Will create cache if it does not already exist. */ public float getOrComputeLocality(int region, int entity, - BalancerClusterState.LocalityType type) { + BalancerClusterState.LocalityType type) { switch (type) { case SERVER: return getLocalityOfRegion(region, entity); @@ -480,7 +479,7 @@ public float getOrComputeLocality(int region, int entity, * already exist. */ public double getOrComputeWeightedLocality(int region, int server, - BalancerClusterState.LocalityType type) { + BalancerClusterState.LocalityType type) { return getRegionSizeMB(region) * getOrComputeLocality(region, server, type); } @@ -558,25 +557,25 @@ public void doAction(BalanceAction action) { assert action instanceof AssignRegionAction : action.getClass(); AssignRegionAction ar = (AssignRegionAction) action; regionsPerServer[ar.getServer()] = - addRegion(regionsPerServer[ar.getServer()], ar.getRegion()); + addRegion(regionsPerServer[ar.getServer()], ar.getRegion()); regionMoved(ar.getRegion(), -1, ar.getServer()); break; case MOVE_REGION: assert action instanceof MoveRegionAction : action.getClass(); MoveRegionAction mra = (MoveRegionAction) action; regionsPerServer[mra.getFromServer()] = - removeRegion(regionsPerServer[mra.getFromServer()], mra.getRegion()); + removeRegion(regionsPerServer[mra.getFromServer()], mra.getRegion()); regionsPerServer[mra.getToServer()] = - addRegion(regionsPerServer[mra.getToServer()], mra.getRegion()); + addRegion(regionsPerServer[mra.getToServer()], mra.getRegion()); regionMoved(mra.getRegion(), mra.getFromServer(), mra.getToServer()); break; case SWAP_REGIONS: assert action instanceof SwapRegionsAction : action.getClass(); SwapRegionsAction a = (SwapRegionsAction) action; regionsPerServer[a.getFromServer()] = - replaceRegion(regionsPerServer[a.getFromServer()], a.getFromRegion(), a.getToRegion()); + replaceRegion(regionsPerServer[a.getFromServer()], a.getFromRegion(), a.getToRegion()); regionsPerServer[a.getToServer()] = - replaceRegion(regionsPerServer[a.getToServer()], a.getToRegion(), a.getFromRegion()); + replaceRegion(regionsPerServer[a.getToServer()], a.getToRegion(), a.getFromRegion()); regionMoved(a.getFromRegion(), a.getFromServer(), a.getToServer()); regionMoved(a.getToRegion(), a.getToServer(), a.getFromServer()); break; @@ -640,11 +639,11 @@ boolean wouldLowerAvailability(RegionInfo regionInfo, ServerName serverName) { /** * Common method for better solution check. * @param colocatedReplicaCountsPerLocation colocatedReplicaCountsPerHost or - * colocatedReplicaCountsPerRack + * colocatedReplicaCountsPerRack * @return 1 for better, -1 for no better, 0 for unknown */ private int checkLocationForPrimary(int location, - Int2IntCounterMap[] colocatedReplicaCountsPerLocation, int primary) { + Int2IntCounterMap[] colocatedReplicaCountsPerLocation, int primary) { if (colocatedReplicaCountsPerLocation[location].containsKey(primary)) { // check for whether there are other Locations that we can place this region for (int i = 0; i < colocatedReplicaCountsPerLocation.length; i++) { @@ -698,17 +697,17 @@ void regionMoved(int region, int oldServer, int newServer) { oldServer, newServer, primary, region); } } + /** * Common method for per host and per Location region index updates when a region is moved. * @param serverIndexToLocation serverIndexToHostIndex or serverIndexToLocationIndex * @param regionsPerLocation regionsPerHost or regionsPerLocation * @param colocatedReplicaCountsPerLocation colocatedReplicaCountsPerHost or - * colocatedReplicaCountsPerRack + * colocatedReplicaCountsPerRack */ - private void updateForLocation(int[] serverIndexToLocation, - int[][] regionsPerLocation, - Int2IntCounterMap[] colocatedReplicaCountsPerLocation, - int oldServer, int newServer, int primary, int region) { + private void updateForLocation(int[] serverIndexToLocation, int[][] regionsPerLocation, + Int2IntCounterMap[] colocatedReplicaCountsPerLocation, int oldServer, int newServer, + int primary, int region) { int oldLocation = oldServer >= 0 ? serverIndexToLocation[oldServer] : -1; int newLocation = serverIndexToLocation[newServer]; if (newLocation != oldLocation) { @@ -721,6 +720,7 @@ private void updateForLocation(int[] serverIndexToLocation, } } + int[] removeRegion(int[] regions, int regionIndex) { // TODO: this maybe costly. Consider using linked lists int[] newRegions = new int[regions.length - 1]; @@ -797,7 +797,7 @@ int getLowestLocalityRegionOnServer(int serverIndex) { for (int j = 0; j < regionsPerServer[serverIndex].length; j++) { int regionIndex = regionsPerServer[serverIndex][j]; HDFSBlocksDistribution distribution = - regionFinder.getBlockDistribution(regions[regionIndex]); + regionFinder.getBlockDistribution(regions[regionIndex]); float locality = distribution.getBlockLocalityIndex(servers[serverIndex].getHostname()); // skip empty region if (distribution.getUniqueBlocksTotalWeight() == 0) { @@ -812,11 +812,11 @@ int getLowestLocalityRegionOnServer(int serverIndex) { return -1; } if (LOG.isTraceEnabled()) { - LOG.trace("Lowest locality region is " + - regions[regionsPerServer[serverIndex][lowestLocalityRegionIndex]] - .getRegionNameAsString() + - " with locality " + lowestLocality + " and its region server contains " + - regionsPerServer[serverIndex].length + " regions"); + LOG.trace("Lowest locality region is " + + regions[regionsPerServer[serverIndex][lowestLocalityRegionIndex]] + .getRegionNameAsString() + + " with locality " + lowestLocality + " and its region server contains " + + regionsPerServer[serverIndex].length + " regions"); } return regionsPerServer[serverIndex][lowestLocalityRegionIndex]; } else { @@ -848,12 +848,12 @@ public String toString() { desc.append(sn.getAddress().toString()).append(", "); } desc.append("], serverIndicesSortedByRegionCount=") - .append(Arrays.toString(serverIndicesSortedByRegionCount)).append(", regionsPerServer=") - .append(Arrays.deepToString(regionsPerServer)); + .append(Arrays.toString(serverIndicesSortedByRegionCount)).append(", regionsPerServer=") + .append(Arrays.deepToString(regionsPerServer)); desc.append(", numRegions=").append(numRegions).append(", numServers=").append(numServers) - .append(", numTables=").append(numTables).append(", numMovedRegions=").append(numMovedRegions) - .append('}'); + .append(", numTables=").append(numTables).append(", numMovedRegions=") + .append(numMovedRegions).append('}'); return desc.toString(); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java index 7cc33751a0f0..ffb36cb8ca1a 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BalancerRegionLoad.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.RegionMetrics; @@ -24,8 +23,8 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Wrapper class for the few fields required by the {@link StochasticLoadBalancer} - * from the full {@link RegionMetrics}. + * Wrapper class for the few fields required by the {@link StochasticLoadBalancer} from the full + * {@link RegionMetrics}. */ @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index ac5ef44bed0a..242a49198219 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -63,11 +63,11 @@ public abstract class BaseLoadBalancer implements LoadBalancer { private static final Logger LOG = LoggerFactory.getLogger(BaseLoadBalancer.class); public static final String BALANCER_DECISION_BUFFER_ENABLED = - "hbase.master.balancer.decision.buffer.enabled"; + "hbase.master.balancer.decision.buffer.enabled"; public static final boolean DEFAULT_BALANCER_DECISION_BUFFER_ENABLED = false; public static final String BALANCER_REJECTION_BUFFER_ENABLED = - "hbase.master.balancer.rejection.buffer.enabled"; + "hbase.master.balancer.rejection.buffer.enabled"; public static final boolean DEFAULT_BALANCER_REJECTION_BUFFER_ENABLED = false; protected static final int MIN_SERVER_BALANCE = 2; @@ -93,8 +93,8 @@ protected BaseLoadBalancer() { } /** - * This Constructor accepts an instance of MetricsBalancer, - * which will be used instead of creating a new one + * This Constructor accepts an instance of MetricsBalancer, which will be used instead of creating + * a new one */ protected BaseLoadBalancer(MetricsBalancer metricsBalancer) { this.metricsBalancer = (metricsBalancer != null) ? metricsBalancer : new MetricsBalancer(); @@ -112,7 +112,6 @@ public void updateClusterMetrics(ClusterMetrics st) { } } - @Override public void setClusterInfoProvider(ClusterInfoProvider provider) { this.provider = provider; @@ -125,10 +124,10 @@ public void postMasterStartupInitialize() { } } - protected final boolean idleRegionServerExist(BalancerClusterState c){ + protected final boolean idleRegionServerExist(BalancerClusterState c) { boolean isServerExistsWithMoreRegions = false; boolean isServerExistsWithZeroRegions = false; - for (int[] serverList: c.regionsPerServer){ + for (int[] serverList : c.regionsPerServer) { if (serverList.length > 1) { isServerExistsWithMoreRegions = true; } @@ -140,26 +139,24 @@ protected final boolean idleRegionServerExist(BalancerClusterState c){ } /** - * Generates a bulk assignment plan to be used on cluster startup using a - * simple round-robin assignment. + * Generates a bulk assignment plan to be used on cluster startup using a simple round-robin + * assignment. *

    - * Takes a list of all the regions and all the servers in the cluster and - * returns a map of each server to the regions that it should be assigned. + * Takes a list of all the regions and all the servers in the cluster and returns a map of each + * server to the regions that it should be assigned. *

    - * Currently implemented as a round-robin assignment. Same invariant as load - * balancing, all servers holding floor(avg) or ceiling(avg). - * - * TODO: Use block locations from HDFS to place regions with their blocks - * + * Currently implemented as a round-robin assignment. Same invariant as load balancing, all + * servers holding floor(avg) or ceiling(avg). TODO: Use block locations from HDFS to place + * regions with their blocks * @param regions all regions * @param servers all servers - * @return map of server to the regions it should take, or emptyMap if no - * assignment is possible (ie. no servers) + * @return map of server to the regions it should take, or emptyMap if no assignment is possible + * (ie. no servers) */ @Override @NonNull public Map> roundRobinAssignment(List regions, - List servers) throws HBaseIOException { + List servers) throws HBaseIOException { metricsBalancer.incrMiscInvocations(); int numServers = servers == null ? 0 : servers.size(); if (numServers == 0) { @@ -183,8 +180,8 @@ public Map> roundRobinAssignment(List r } private BalancerClusterState createCluster(List servers, - Collection regions) throws HBaseIOException { - boolean hasRegionReplica= false; + Collection regions) throws HBaseIOException { + boolean hasRegionReplica = false; try { if (provider != null) { hasRegionReplica = provider.hasRegionReplica(regions); @@ -210,8 +207,7 @@ private BalancerClusterState createCluster(List servers, clusterState.put(server, Collections.emptyList()); } } - return new BalancerClusterState(regions, clusterState, null, this.regionFinder, - rackManager); + return new BalancerClusterState(regions, clusterState, null, this.regionFinder, rackManager); } private List findIdleServers(List servers) { @@ -238,30 +234,27 @@ public ServerName randomAssignment(RegionInfo regionInfo, List serve if (idleServers.size() == 1) { return idleServers.get(0); } - final List finalServers = idleServers.isEmpty() ? - servers : idleServers; + final List finalServers = idleServers.isEmpty() ? servers : idleServers; List regions = Lists.newArrayList(regionInfo); BalancerClusterState cluster = createCluster(finalServers, regions); return randomAssignment(cluster, regionInfo, finalServers); } /** - * Generates a bulk assignment startup plan, attempting to reuse the existing - * assignment information from META, but adjusting for the specified list of - * available/online servers available for assignment. + * Generates a bulk assignment startup plan, attempting to reuse the existing assignment + * information from META, but adjusting for the specified list of available/online servers + * available for assignment. *

    - * Takes a map of all regions to their existing assignment from META. Also - * takes a list of online servers for regions to be assigned to. Attempts to - * retain all assignment, so in some instances initial assignment will not be - * completely balanced. + * Takes a map of all regions to their existing assignment from META. Also takes a list of online + * servers for regions to be assigned to. Attempts to retain all assignment, so in some instances + * initial assignment will not be completely balanced. *

    - * Any leftover regions without an existing server to be assigned to will be - * assigned randomly to available servers. - * + * Any leftover regions without an existing server to be assigned to will be assigned randomly to + * available servers. * @param regions regions and existing assignment from meta * @param servers available servers - * @return map of servers and regions to be assigned to them, or emptyMap if no - * assignment is possible (ie. no servers) + * @return map of servers and regions to be assigned to them, or emptyMap if no assignment is + * possible (ie. no servers) */ @Override @NonNull @@ -360,11 +353,10 @@ public Map> retainAssignment(Map 0) { - randomAssignMsg = - numRandomAssignments + " regions were assigned " - + "to random hosts, since the old hosts for these regions are no " - + "longer present in the cluster. These hosts were:\n " - + Joiner.on("\n ").join(oldHostsNoLongerPresent); + randomAssignMsg = numRandomAssignments + " regions were assigned " + + "to random hosts, since the old hosts for these regions are no " + + "longer present in the cluster. These hosts were:\n " + + Joiner.on("\n ").join(oldHostsNoLongerPresent); } LOG.info("Reassigned " + regions.size() + " regions. " + numRetainedAssigments @@ -432,8 +424,8 @@ public void stop(String why) { } /** - * Updates the balancer status tag reported to JMX - */ + * Updates the balancer status tag reported to JMX + */ @Override public void updateBalancerStatus(boolean status) { metricsBalancer.balancerStatus(status); @@ -456,8 +448,7 @@ private ServerName randomAssignment(BalancerClusterState cluster, RegionInfo reg if (!usedSNs.contains(sn)) { usedSNs.add(sn); } - } while (cluster.wouldLowerAvailability(regionInfo, sn) - && iterations++ < maxIterations); + } while (cluster.wouldLowerAvailability(regionInfo, sn) && iterations++ < maxIterations); if (iterations >= maxIterations) { // We have reached the max. Means the servers that we collected is still lowering the // availability @@ -480,7 +471,7 @@ private ServerName randomAssignment(BalancerClusterState cluster, RegionInfo reg * Round robin a list of regions to a list of servers */ private void roundRobinAssignment(BalancerClusterState cluster, List regions, - List servers, Map> assignments) { + List servers, Map> assignments) { Random rand = ThreadLocalRandom.current(); List unassignedRegions = new ArrayList<>(); int numServers = servers.size(); @@ -507,7 +498,6 @@ private void roundRobinAssignment(BalancerClusterState cluster, List regionIdx++; } - List lastFewRegions = new ArrayList<>(); // assign the remaining by going through the list and try to assign to servers one-by-one serverIdx = rand.nextInt(numServers); @@ -541,13 +531,13 @@ private void roundRobinAssignment(BalancerClusterState cluster, List // return a modifiable map, as we may add more entries into the returned map. private Map> - getRegionAssignmentsByServer(Collection regions) { - return provider != null ? new HashMap<>(provider.getSnapShotOfAssignment(regions)) : - new HashMap<>(); + getRegionAssignmentsByServer(Collection regions) { + return provider != null ? new HashMap<>(provider.getSnapShotOfAssignment(regions)) + : new HashMap<>(); } - protected final Map> toEnsumbleTableLoad( - Map>> LoadOfAllTable) { + protected final Map> + toEnsumbleTableLoad(Map>> LoadOfAllTable) { Map> returnMap = new TreeMap<>(); for (Map> serverNameListMap : LoadOfAllTable.values()) { serverNameListMap.forEach((serverName, regionInfoList) -> { @@ -572,14 +562,14 @@ protected final Map> toEnsumbleTableLoad( * @return List of plans */ protected abstract List balanceTable(TableName tableName, - Map> loadOfOneTable); + Map> loadOfOneTable); /** * Called before actually executing balanceCluster. The sub classes could override this method to * do some initialization work. */ protected void - preBalanceCluster(Map>> loadOfAllTable) { + preBalanceCluster(Map>> loadOfAllTable) { } /** @@ -595,7 +585,7 @@ protected abstract List balanceTable(TableName tableName, */ @Override public final List - balanceCluster(Map>> loadOfAllTable) { + balanceCluster(Map>> loadOfAllTable) { preBalanceCluster(loadOfAllTable); if (isByTable) { List result = new ArrayList<>(); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CPRequestCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CPRequestCostFunction.java index 9a6a43494c6f..608c8bb215dc 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CPRequestCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CPRequestCostFunction.java @@ -21,8 +21,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Compute the cost of total number of coprocessor requests The more unbalanced the higher the - * computed cost will be. This uses a rolling average of regionload. + * Compute the cost of total number of coprocessor requests The more unbalanced the higher the + * computed cost will be. This uses a rolling average of regionload. */ @InterfaceAudience.Private class CPRequestCostFunction extends CostFromRegionLoadAsRateFunction { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CandidateGenerator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CandidateGenerator.java index faaaff9733c8..0ef91918b15e 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CandidateGenerator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CandidateGenerator.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.Map; diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterInfoProvider.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterInfoProvider.java index cfd50fc11a21..c00a86ef4ecc 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterInfoProvider.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterInfoProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -81,7 +81,7 @@ public interface ClusterInfoProvider extends ConfigurationObserver { * Used to refresh region block locations on HDFS. */ HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf, - TableDescriptor tableDescriptor, RegionInfo regionInfo) throws IOException; + TableDescriptor tableDescriptor, RegionInfo regionInfo) throws IOException; /** * Check whether we have region replicas enabled for the tables of the given regions. @@ -97,7 +97,7 @@ HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf, * Returns a copy of the internal list of online servers matched by the given {@code filter}. */ List getOnlineServersListWithPredicator(List servers, - Predicate filter); + Predicate filter); /** * Get a snapshot of the current assignment status. diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java index 5d1e1ccac2db..2c93e7852136 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java index 977c6b14ec0a..26c5188faaf3 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/CostFunction.java @@ -101,8 +101,8 @@ public void updateWeight(double[] weights) { * @return The scaled value. */ protected static double scale(double min, double max, double value) { - if (max <= min || value <= min - || Math.abs(max - min) <= COST_EPSILON || Math.abs(value - min) <= COST_EPSILON) { + if (max <= min || value <= min || Math.abs(max - min) <= COST_EPSILON + || Math.abs(value - min) <= COST_EPSILON) { return 0; } if (max <= min || Math.abs(max - min) <= COST_EPSILON) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java index 29afd59084f7..645f74012e97 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/DoubleArrayCost.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -81,8 +81,7 @@ private static double computeCost(double[] stats) { } // No need to compute standard deviation with division by cluster size when scaling. totalCost = Math.sqrt(totalCost); - return CostFunction.scale(getMinSkew(total, count), - getMaxSkew(total, count), totalCost); + return CostFunction.scale(getMinSkew(total, count), getMaxSkew(total, count), totalCost); } private static double getSum(double[] stats) { @@ -105,21 +104,20 @@ public static double getMinSkew(double total, double numServers) { // It's possible that there aren't enough regions to go around double min; if (numServers > total) { - min = ((numServers - total) * mean * mean + (1 - mean) * (1 - mean) * total) ; + min = ((numServers - total) * mean * mean + (1 - mean) * (1 - mean) * total); } else { // Some will have 1 more than everything else. int numHigh = (int) (total - (Math.floor(mean) * numServers)); int numLow = (int) (numServers - numHigh); - min = numHigh * (Math.ceil(mean) - mean) * (Math.ceil(mean) - mean) + - numLow * (mean - Math.floor(mean)) * (mean - Math.floor(mean)); + min = numHigh * (Math.ceil(mean) - mean) * (Math.ceil(mean) - mean) + + numLow * (mean - Math.floor(mean)) * (mean - Math.floor(mean)); } return Math.sqrt(min); } /** - * Return the max deviation of distribution - * Compute max as if all region servers had 0 and one had the sum of all costs. This must be - * a zero sum cost for this to make sense. + * Return the max deviation of distribution Compute max as if all region servers had 0 and one had + * the sum of all costs. This must be a zero sum cost for this to make sense. */ public static double getMaxSkew(double total, double numServers) { if (numServers == 0) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java index acccc321ae3c..b6cab8485fb0 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java @@ -55,23 +55,20 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that - * assigns favored nodes for each region. There is a Primary RegionServer that hosts - * the region, and then there is Secondary and Tertiary RegionServers. Currently, the - * favored nodes information is used in creating HDFS files - the Primary RegionServer - * passes the primary, secondary, tertiary node addresses as hints to the - * DistributedFileSystem API for creating files on the filesystem. These nodes are - * treated as hints by the HDFS to place the blocks of the file. This alleviates the - * problem to do with reading from remote nodes (since we can make the Secondary - * RegionServer as the new Primary RegionServer) after a region is recovered. This - * should help provide consistent read latencies for the regions even when their - * primary region servers die. This provides two + * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that assigns favored + * nodes for each region. There is a Primary RegionServer that hosts the region, and then there is + * Secondary and Tertiary RegionServers. Currently, the favored nodes information is used in + * creating HDFS files - the Primary RegionServer passes the primary, secondary, tertiary node + * addresses as hints to the DistributedFileSystem API for creating files on the filesystem. These + * nodes are treated as hints by the HDFS to place the blocks of the file. This alleviates the + * problem to do with reading from remote nodes (since we can make the Secondary RegionServer as the + * new Primary RegionServer) after a region is recovered. This should help provide consistent read + * latencies for the regions even when their primary region servers die. This provides two * {@link CandidateGenerator} - * */ @InterfaceAudience.Private -public class FavoredStochasticBalancer extends StochasticLoadBalancer implements - FavoredNodesPromoter { +public class FavoredStochasticBalancer extends StochasticLoadBalancer + implements FavoredNodesPromoter { private static final Logger LOG = LoggerFactory.getLogger(FavoredStochasticBalancer.class); @@ -99,18 +96,15 @@ protected CandidateGenerator getRandomGenerator() { } /** - * Round robin assignment: Segregate the regions into two types: - * - * 1. The regions that have favored node assignment where at least one of the favored node - * is still alive. In this case, try to adhere to the current favored nodes assignment as - * much as possible - i.e., if the current primary is gone, then make the secondary or - * tertiary as the new host for the region (based on their current load). Note that we don't - * change the favored node assignments here (even though one or more favored node is - * currently down). That will be done by the admin operations. - * - * 2. The regions that currently don't have favored node assignments. Generate favored nodes - * for them and then assign. Generate the primary fn in round robin fashion and generate - * secondary and tertiary as per favored nodes constraints. + * Round robin assignment: Segregate the regions into two types: 1. The regions that have favored + * node assignment where at least one of the favored node is still alive. In this case, try to + * adhere to the current favored nodes assignment as much as possible - i.e., if the current + * primary is gone, then make the secondary or tertiary as the new host for the region (based on + * their current load). Note that we don't change the favored node assignments here (even though + * one or more favored node is currently down). That will be done by the admin operations. 2. The + * regions that currently don't have favored node assignments. Generate favored nodes for them and + * then assign. Generate the primary fn in round robin fashion and generate secondary and tertiary + * as per favored nodes constraints. */ @Override @NonNull @@ -131,14 +125,14 @@ public Map> roundRobinAssignment(List r // Assign all system regions Map> systemAssignments = - super.roundRobinAssignment(Lists.newArrayList(systemRegions), servers); + super.roundRobinAssignment(Lists.newArrayList(systemRegions), servers); // Segregate favored and non-favored nodes regions and assign accordingly. - Pair>, List> segregatedRegions = - segregateRegionsAndAssignRegionsWithFavoredNodes(regionSet, servers); + Pair>, List> segregatedRegions = + segregateRegionsAndAssignRegionsWithFavoredNodes(regionSet, servers); Map> regionsWithFavoredNodesMap = segregatedRegions.getFirst(); Map> regionsWithoutFN = - generateFNForRegionsWithoutFN(helper, segregatedRegions.getSecond()); + generateFNForRegionsWithoutFN(helper, segregatedRegions.getSecond()); // merge the assignment maps mergeAssignmentMaps(assignmentMap, systemAssignments); @@ -146,8 +140,8 @@ public Map> roundRobinAssignment(List r mergeAssignmentMaps(assignmentMap, regionsWithoutFN); } catch (Exception ex) { - throw new HBaseIOException("Encountered exception while doing favored-nodes assignment " - + ex + " Falling back to regular assignment", ex); + throw new HBaseIOException("Encountered exception while doing favored-nodes assignment " + ex + + " Falling back to regular assignment", ex); } return assignmentMap; } @@ -188,8 +182,8 @@ private Map> generateFNForRegionsWithoutFN( * without favored nodes. */ private Pair>, List> - segregateRegionsAndAssignRegionsWithFavoredNodes(Collection regions, - List onlineServers) throws HBaseIOException { + segregateRegionsAndAssignRegionsWithFavoredNodes(Collection regions, + List onlineServers) throws HBaseIOException { // Since we expect FN to be present most of the time, lets create map with same size Map> assignmentMapForFavoredNodes = @@ -218,7 +212,7 @@ private Map> generateFNForRegionsWithoutFN( } } assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region, primaryHost, - secondaryHost, tertiaryHost); + secondaryHost, tertiaryHost); } else { regionsWithNoFavoredNodes.add(region); } @@ -227,7 +221,7 @@ private Map> generateFNForRegionsWithoutFN( } private void addRegionToMap(Map> assignmentMapForFavoredNodes, - RegionInfo region, ServerName host) { + RegionInfo region, ServerName host) { List regionsOnServer = assignmentMapForFavoredNodes.get(host); if (regionsOnServer == null) { regionsOnServer = Lists.newArrayList(); @@ -286,9 +280,9 @@ private void assignRegionToAvailableFavoredNode( } /** - * If we have favored nodes for a region, we will return one of the FN as destination. If - * favored nodes are not present for a region, we will generate and return one of the FN as - * destination. If we can't generate anything, lets fallback. + * If we have favored nodes for a region, we will return one of the FN as destination. If favored + * nodes are not present for a region, we will generate and return one of the FN as destination. + * If we can't generate anything, lets fallback. */ @Override public ServerName randomAssignment(RegionInfo regionInfo, List servers) @@ -374,16 +368,16 @@ public Map> retainAssignment(Map newFavoredNodes = Lists.newArrayList(); newFavoredNodes.add(primary); newFavoredNodes.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), - secondaryAndTertiaryNodes[0].getPort(), NON_STARTCODE)); + secondaryAndTertiaryNodes[0].getPort(), NON_STARTCODE)); newFavoredNodes.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), - secondaryAndTertiaryNodes[1].getPort(), NON_STARTCODE)); + secondaryAndTertiaryNodes[1].getPort(), NON_STARTCODE)); regionFNMap.put(hri, newFavoredNodes); addRegionToMap(assignmentMap, hri, sn); } else { throw new HBaseIOException("Cannot generate secondary/tertiary FN for " + hri - + " generated " - + (secondaryAndTertiaryNodes != null ? secondaryAndTertiaryNodes : " nothing")); + + " generated " + + (secondaryAndTertiaryNodes != null ? secondaryAndTertiaryNodes : " nothing")); } } else { List onlineFN = getOnlineFavoredNodes(servers, favoredNodes); @@ -396,9 +390,9 @@ public Map> retainAssignment(Map getInheritedFNForDaughter(FavoredNodeAssignmentHelper he } /** - * Generate favored nodes for a region during merge. Choose the FN from one of the sources to - * keep it simple. + * Generate favored nodes for a region during merge. Choose the FN from one of the sources to keep + * it simple. */ @Override - public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo [] mergeParents) + public void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo[] mergeParents) throws IOException { updateFavoredNodesForRegion(merged, fnm.getFavoredNodes(mergeParents[0])); } @@ -536,7 +530,7 @@ protected BalanceAction generate(BalancerClusterState cluster) { if (thisRegion == -1) { if (cluster.regionsPerServer[thisServer].length > 0) { LOG.trace("Could not pick lowest local region even when region server held " - + cluster.regionsPerServer[thisServer].length + " regions"); + + cluster.regionsPerServer[thisServer].length + " regions"); } return BalanceAction.NULL_ACTION; } @@ -587,8 +581,8 @@ private int pickLowestLocalRegionOnServer(BalancerClusterState cluster, int serv } /* - * This is like LoadCandidateGenerator, but we choose appropriate FN for the region on the - * most loaded server. + * This is like LoadCandidateGenerator, but we choose appropriate FN for the region on the most + * loaded server. */ class FavoredNodeLoadPicker extends CandidateGenerator { @@ -615,7 +609,7 @@ BalanceAction generate(BalancerClusterState cluster) { private int pickLeastLoadedServer(final BalancerClusterState cluster, int thisServer) { Integer[] servers = cluster.serverIndicesSortedByRegionCount; int index; - for (index = 0; index < servers.length ; index++) { + for (index = 0; index < servers.length; index++) { if ((servers[index] != null) && servers[index] != thisServer) { break; } @@ -624,7 +618,7 @@ private int pickLeastLoadedServer(final BalancerClusterState cluster, int thisSe } private int pickLeastLoadedFNServer(final BalancerClusterState cluster, - List favoredNodes, int currentServerIndex) { + List favoredNodes, int currentServerIndex) { List fnIndex = new ArrayList<>(); for (ServerName sn : favoredNodes) { if (cluster.serversToIndex.containsKey(sn.getAddress())) { @@ -648,7 +642,7 @@ private int pickLeastLoadedFNServer(final BalancerClusterState cluster, private int pickMostLoadedServer(final BalancerClusterState cluster) { Integer[] servers = cluster.serverIndicesSortedByRegionCount; int index; - for (index = servers.length - 1; index > 0 ; index--) { + for (index = servers.length - 1; index > 0; index--) { if (servers[index] != null) { break; } @@ -663,7 +657,7 @@ private int pickMostLoadedServer(final BalancerClusterState cluster) { */ @Override protected List balanceTable(TableName tableName, - Map> loadOfOneTable) { + Map> loadOfOneTable) { List regionPlans = Lists.newArrayList(); Map> correctAssignments = new HashMap<>(); int misplacedRegions = 0; @@ -675,13 +669,13 @@ protected List balanceTable(TableName tableName, for (RegionInfo hri : entry.getValue()) { List favoredNodes = fnm.getFavoredNodes(hri); - if (FavoredNodesPlan.getFavoredServerPosition(favoredNodes, current) != null || - !FavoredNodesManager.isFavoredNodeApplicable(hri)) { + if (FavoredNodesPlan.getFavoredServerPosition(favoredNodes, current) != null + || !FavoredNodesManager.isFavoredNodeApplicable(hri)) { regions.add(hri); } else { // No favored nodes, lets unassign. - LOG.warn("Region not on favored nodes, unassign. Region: " + hri + " current: " + - current + " favored nodes: " + favoredNodes); + LOG.warn("Region not on favored nodes, unassign. Region: " + hri + " current: " + current + + " favored nodes: " + favoredNodes); try { provider.unassign(hri); } catch (IOException e) { @@ -702,4 +696,3 @@ protected List balanceTable(TableName tableName, return regionPlans; } } - diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java index 4cda751e3b92..af4e6ab4e48b 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/HeterogeneousRegionCountCostFunction.java @@ -1,16 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.master.balancer; @@ -153,7 +156,7 @@ protected double cost() { * used to load the rule files. */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|HeterogeneousRegionCountCostFunction).java") + allowedOnPath = ".*(/src/test/.*|HeterogeneousRegionCountCostFunction).java") void loadRules() { final List lines = readFile(this.rulesPath); if (null == lines) { @@ -213,7 +216,7 @@ private List readFileFromHDFS(final String filename) throws IOException final Path path = new Path(filename); final FileSystem fs = FileSystem.get(this.conf); try (BufferedReader reader = - new BufferedReader(new InputStreamReader(fs.open(path), StandardCharsets.UTF_8))) { + new BufferedReader(new InputStreamReader(fs.open(path), StandardCharsets.UTF_8))) { return CharStreams.readLines(reader); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java index a43fdc88f148..50d257012ff0 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,8 +47,8 @@ public static Class getDefaultLoadBalancerClass() { public static LoadBalancer getLoadBalancer(Configuration conf) { // Create the balancer Class balancerKlass = - conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(), - LoadBalancer.class); + conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(), + LoadBalancer.class); return ReflectionUtils.newInstance(balancerKlass); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java index 8604f4a47f7f..4cd46ccc95f3 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadCandidateGenerator.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.concurrent.ThreadLocalRandom; @@ -42,12 +41,12 @@ private int pickLeastLoadedServer(final BalancerClusterState cluster, int thisSe continue; } if (selectedIndex != -1 - && cluster.getNumRegionsComparator().compare(servers[i], servers[selectedIndex]) != 0) { + && cluster.getNumRegionsComparator().compare(servers[i], servers[selectedIndex]) != 0) { // Exhausted servers of the same region count break; } // we don't know how many servers have the same region count, we will randomly select one - // using a simplified inline reservoir sampling by assignmening a random number to stream + // using a simplified inline reservoir sampling by assignmening a random number to stream // data and choose the greatest one. (http://gregable.com/2007/10/reservoir-sampling.html) double currentRandom = ThreadLocalRandom.current().nextDouble(); if (currentRandom > currentLargestRandom) { @@ -67,13 +66,13 @@ private int pickMostLoadedServer(final BalancerClusterState cluster, int thisSer if (servers[i] == null || servers[i] == thisServer) { continue; } - if (selectedIndex != -1 && cluster.getNumRegionsComparator().compare(servers[i], - servers[selectedIndex]) != 0) { + if (selectedIndex != -1 + && cluster.getNumRegionsComparator().compare(servers[i], servers[selectedIndex]) != 0) { // Exhausted servers of the same region count break; } // we don't know how many servers have the same region count, we will randomly select one - // using a simplified inline reservoir sampling by assignmening a random number to stream + // using a simplified inline reservoir sampling by assignmening a random number to stream // data and choose the greatest one. (http://gregable.com/2007/10/reservoir-sampling.html) double currentRandom = ThreadLocalRandom.current().nextDouble(); if (currentRandom > currentLargestRandom) { @@ -81,7 +80,7 @@ private int pickMostLoadedServer(final BalancerClusterState cluster, int thisSer currentLargestRandom = currentRandom; } } - return selectedIndex == -1? -1 : servers[selectedIndex]; + return selectedIndex == -1 ? -1 : servers[selectedIndex]; } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCandidateGenerator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCandidateGenerator.java index c8e56f193bf0..6a115dd660bb 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCandidateGenerator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCandidateGenerator.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.Optional; @@ -49,22 +48,22 @@ BalanceAction generate(BalancerClusterState cluster) { } private Optional tryMoveOrSwap(BalancerClusterState cluster, int fromServer, - int fromRegion, int toServer) { + int fromRegion, int toServer) { // Try move first. We know apriori fromRegion has the highest locality on toServer if (cluster.serverHasTooFewRegions(toServer)) { return Optional.of(getAction(fromServer, fromRegion, toServer, -1)); } // Compare locality gain/loss from swapping fromRegion with regions on toServer - double fromRegionLocalityDelta = getWeightedLocality(cluster, fromRegion, toServer) - - getWeightedLocality(cluster, fromRegion, fromServer); + double fromRegionLocalityDelta = getWeightedLocality(cluster, fromRegion, toServer) + - getWeightedLocality(cluster, fromRegion, fromServer); int toServertotalRegions = cluster.regionsPerServer[toServer].length; if (toServertotalRegions > 0) { int startIndex = ThreadLocalRandom.current().nextInt(toServertotalRegions); for (int i = 0; i < toServertotalRegions; i++) { int toRegionIndex = (startIndex + i) % toServertotalRegions; int toRegion = cluster.regionsPerServer[toServer][toRegionIndex]; - double toRegionLocalityDelta = getWeightedLocality(cluster, toRegion, fromServer) - - getWeightedLocality(cluster, toRegion, toServer); + double toRegionLocalityDelta = getWeightedLocality(cluster, toRegion, fromServer) + - getWeightedLocality(cluster, toRegion, toServer); // If locality would remain neutral or improve, attempt the swap if (fromRegionLocalityDelta + toRegionLocalityDelta >= 0) { return Optional.of(getAction(fromServer, fromRegion, toServer, toRegion)); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java index 678c9a3e9adf..edb42a4fdedf 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/LocalityBasedCostFunction.java @@ -34,7 +34,7 @@ abstract class LocalityBasedCostFunction extends CostFunction { private double locality; // current locality across cluster weighted by local data size LocalityBasedCostFunction(Configuration conf, LocalityType type, String localityCostKey, - float defaultLocalityCost) { + float defaultLocalityCost) { this.type = type; this.setMultiplier(conf.getFloat(localityCostKey, defaultLocalityCost)); this.locality = 0.0; @@ -66,11 +66,11 @@ void prepare(BalancerClusterState cluster) { @Override protected void regionMoved(int region, int oldServer, int newServer) { int oldEntity = - type == LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer]; + type == LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer]; int newEntity = - type == LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer]; + type == LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer]; double localityDelta = - getWeightedLocality(region, newEntity) - getWeightedLocality(region, oldEntity); + getWeightedLocality(region, newEntity) - getWeightedLocality(region, oldEntity); double normalizedDelta = bestLocality == 0 ? 0.0 : localityDelta / bestLocality; locality += normalizedDelta; } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MemStoreSizeCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MemStoreSizeCostFunction.java index 80abac1f1115..34f0cbe6f011 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MemStoreSizeCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MemStoreSizeCostFunction.java @@ -28,7 +28,7 @@ class MemStoreSizeCostFunction extends CostFromRegionLoadAsRateFunction { private static final String MEMSTORE_SIZE_COST_KEY = - "hbase.master.balancer.stochastic.memstoreSizeCost"; + "hbase.master.balancer.stochastic.memstoreSizeCost"; private static final float DEFAULT_MEMSTORE_SIZE_COST = 5; MemStoreSizeCostFunction(Configuration conf) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java index 015e1d486c16..8a7561f7c08f 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java index 6c79f054ee4a..198fa3b9a822 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.yetus.audience.InterfaceAudience; /** - * This metrics balancer uses extended source for stochastic load balancer - * to report its related metrics to JMX. For details, refer to HBASE-13965 + * This metrics balancer uses extended source for stochastic load balancer to report its related + * metrics to JMX. For details, refer to HBASE-13965 */ @InterfaceAudience.Private public class MetricsStochasticBalancer extends MetricsBalancer { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveCostFunction.java index 21018368f4dd..4eb0f0bd5bc2 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveCostFunction.java @@ -28,9 +28,9 @@ class MoveCostFunction extends CostFunction { private static final String MOVE_COST_KEY = "hbase.master.balancer.stochastic.moveCost"; private static final String MOVE_COST_OFFPEAK_KEY = - "hbase.master.balancer.stochastic.moveCost.offpeak"; + "hbase.master.balancer.stochastic.moveCost.offpeak"; private static final String MAX_MOVES_PERCENT_KEY = - "hbase.master.balancer.stochastic.maxMovePercent"; + "hbase.master.balancer.stochastic.maxMovePercent"; static final float DEFAULT_MOVE_COST = 7; static final float DEFAULT_MOVE_COST_OFFPEAK = 3; private static final int DEFAULT_MAX_MOVES = 600; diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveRegionAction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveRegionAction.java index f73fada18759..822c47b2f19b 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveRegionAction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/MoveRegionAction.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/PrimaryRegionCountSkewCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/PrimaryRegionCountSkewCostFunction.java index a78ff8992f6f..f6640185affd 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/PrimaryRegionCountSkewCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/PrimaryRegionCountSkewCostFunction.java @@ -28,7 +28,7 @@ class PrimaryRegionCountSkewCostFunction extends CostFunction { private static final String PRIMARY_REGION_COUNT_SKEW_COST_KEY = - "hbase.master.balancer.stochastic.primaryRegionCountCost"; + "hbase.master.balancer.stochastic.primaryRegionCountCost"; private static final float DEFAULT_PRIMARY_REGION_COUNT_SKEW_COST = 500; private final DoubleArrayCost cost = new DoubleArrayCost(); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RackLocalityCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RackLocalityCostFunction.java index 0e4735479a13..e3cbf6d6c7eb 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RackLocalityCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RackLocalityCostFunction.java @@ -25,7 +25,7 @@ class RackLocalityCostFunction extends LocalityBasedCostFunction { private static final String RACK_LOCALITY_COST_KEY = - "hbase.master.balancer.stochastic.rackLocalityCost"; + "hbase.master.balancer.stochastic.rackLocalityCost"; private static final float DEFAULT_RACK_LOCALITY_COST = 15; public RackLocalityCostFunction(Configuration conf) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ReadRequestCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ReadRequestCostFunction.java index 402d144777d7..a83357cdd40d 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ReadRequestCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ReadRequestCostFunction.java @@ -28,7 +28,7 @@ class ReadRequestCostFunction extends CostFromRegionLoadAsRateFunction { private static final String READ_REQUEST_COST_KEY = - "hbase.master.balancer.stochastic.readRequestCost"; + "hbase.master.balancer.stochastic.readRequestCost"; private static final float DEFAULT_READ_REQUEST_COST = 5; ReadRequestCostFunction(Configuration conf) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java index 442bbc9b7bcf..393ca50e42d8 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionCountSkewCostFunction.java @@ -26,7 +26,7 @@ @InterfaceAudience.Private class RegionCountSkewCostFunction extends CostFunction { static final String REGION_COUNT_SKEW_COST_KEY = - "hbase.master.balancer.stochastic.regionCountCost"; + "hbase.master.balancer.stochastic.regionCountCost"; static final float DEFAULT_REGION_COUNT_SKEW_COST = 500; private final DoubleArrayCost cost = new DoubleArrayCost(); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionHDFSBlockLocationFinder.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionHDFSBlockLocationFinder.java index 9634dd1eb309..418b972eeb85 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionHDFSBlockLocationFinder.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionHDFSBlockLocationFinder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,6 +42,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; @@ -61,7 +62,7 @@ class RegionHDFSBlockLocationFinder extends Configured { private static final long CACHE_TIME = 240 * 60 * 1000; private static final float EPSILON = 0.0001f; private static final HDFSBlocksDistribution EMPTY_BLOCK_DISTRIBUTION = - new HDFSBlocksDistribution(); + new HDFSBlocksDistribution(); private volatile ClusterMetrics status; private volatile ClusterInfoProvider provider; private final ListeningExecutorService executor; @@ -69,24 +70,24 @@ class RegionHDFSBlockLocationFinder extends Configured { private long lastFullRefresh = EnvironmentEdgeManager.currentTime(); private CacheLoader loader = - new CacheLoader() { - - @Override - public ListenableFuture reload(final RegionInfo hri, - HDFSBlocksDistribution oldValue) throws Exception { - return executor.submit(new Callable() { - @Override - public HDFSBlocksDistribution call() throws Exception { - return internalGetTopBlockLocation(hri); - } - }); - } + new CacheLoader() { + + @Override + public ListenableFuture reload(final RegionInfo hri, + HDFSBlocksDistribution oldValue) throws Exception { + return executor.submit(new Callable() { + @Override + public HDFSBlocksDistribution call() throws Exception { + return internalGetTopBlockLocation(hri); + } + }); + } - @Override - public HDFSBlocksDistribution load(RegionInfo key) throws Exception { - return internalGetTopBlockLocation(key); - } - }; + @Override + public HDFSBlocksDistribution load(RegionInfo key) throws Exception { + return internalGetTopBlockLocation(key); + } + }; // The cache for where regions are located. private LoadingCache cache = null; @@ -103,7 +104,7 @@ public HDFSBlocksDistribution load(RegionInfo key) throws Exception { */ private LoadingCache createCache() { return CacheBuilder.newBuilder().expireAfterWrite(CACHE_TIME, TimeUnit.MILLISECONDS) - .build(loader); + .build(loader); } void setClusterInfoProvider(ClusterInfoProvider provider) { @@ -129,8 +130,8 @@ void setClusterMetrics(ClusterMetrics status) { */ private void refreshLocalityChangedRegions(ClusterMetrics oldStatus, ClusterMetrics newStatus) { if (oldStatus == null || newStatus == null) { - LOG.debug("Skipping locality-based refresh due to oldStatus={}, newStatus={}", - oldStatus, newStatus); + LOG.debug("Skipping locality-based refresh due to oldStatus={}, newStatus={}", oldStatus, + newStatus); return; } @@ -165,7 +166,7 @@ private void refreshLocalityChangedRegions(ClusterMetrics oldStatus, ClusterMetr } private float getOldLocality(ServerName newServer, byte[] regionName, - Map oldServers) { + Map oldServers) { ServerMetrics serverMetrics = oldServers.get(newServer); if (serverMetrics == null) { return -1f; @@ -215,7 +216,7 @@ private HDFSBlocksDistribution internalGetTopBlockLocation(RegionInfo region) { TableDescriptor tableDescriptor = getDescriptor(region.getTable()); if (tableDescriptor != null) { HDFSBlocksDistribution blocksDistribution = - provider.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region); + provider.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region); return blocksDistribution; } } catch (IOException ioe) { @@ -244,7 +245,7 @@ private TableDescriptor getDescriptor(TableName tableName) throws IOException { * @return ServerName list */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*|.*/RegionHDFSBlockLocationFinder.java") + allowedOnPath = ".*/src/test/.*|.*/RegionHDFSBlockLocationFinder.java") List mapHostNameToServerName(List hosts) { if (hosts == null || status == null) { if (hosts == null) { @@ -311,7 +312,7 @@ private ListenableFuture asyncGetBlockDistribution(Regio void refreshAndWait(Collection hris) { ArrayList> regionLocationFutures = - new ArrayList<>(hris.size()); + new ArrayList<>(hris.size()); for (RegionInfo hregionInfo : hris) { regionLocationFutures.add(asyncGetBlockDistribution(hregionInfo)); } @@ -331,7 +332,7 @@ void refreshAndWait(Collection hris) { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") LoadingCache getCache() { return cache; } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.java index 911b70bd6fec..68f442d63a51 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,14 +18,13 @@ package org.apache.hadoop.hbase.master.balancer; import java.util.Comparator; - import org.apache.hadoop.hbase.client.RegionInfo; /** - * The following comparator assumes that RegionId from HRegionInfo can represent - * the age of the region - larger RegionId means the region is younger. This - * comparator is used in balanceCluster() to account for the out-of-band regions - * which were assigned to the server after some other region server crashed. + * The following comparator assumes that RegionId from HRegionInfo can represent the age of the + * region - larger RegionId means the region is younger. This comparator is used in balanceCluster() + * to account for the out-of-band regions which were assigned to the server after some other region + * server crashed. */ class RegionInfoComparator implements Comparator { @Override diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaCandidateGenerator.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaCandidateGenerator.java index e0fd6966c42f..4e414860df92 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaCandidateGenerator.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaCandidateGenerator.java @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.concurrent.ThreadLocalRandom; - import org.agrona.collections.Int2IntCounterMap; import org.agrona.collections.IntArrayList; import org.apache.yetus.audience.InterfaceAudience; @@ -43,7 +41,7 @@ class RegionReplicaCandidateGenerator extends CandidateGenerator { * @return a regionIndex for the selected primary or -1 if there is no co-locating */ int selectCoHostedRegionPerGroup(Int2IntCounterMap colocatedReplicaCountsPerGroup, - int[] regionsPerGroup, int[] regionIndexToPrimaryIndex) { + int[] regionsPerGroup, int[] regionIndexToPrimaryIndex) { final IntArrayList colocated = new IntArrayList(colocatedReplicaCountsPerGroup.size(), -1); colocatedReplicaCountsPerGroup.forEach((primary, count) -> { if (count > 1) { // means consecutive primaries, indicating co-location @@ -75,9 +73,9 @@ BalanceAction generate(BalancerClusterState cluster) { return BalanceAction.NULL_ACTION; } - int regionIndex = selectCoHostedRegionPerGroup( - cluster.colocatedReplicaCountsPerServer[serverIndex], - cluster.regionsPerServer[serverIndex], cluster.regionIndexToPrimaryIndex); + int regionIndex = + selectCoHostedRegionPerGroup(cluster.colocatedReplicaCountsPerServer[serverIndex], + cluster.regionsPerServer[serverIndex], cluster.regionIndexToPrimaryIndex); // if there are no pairs of region replicas co-hosted, default to random generator if (regionIndex == -1) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java index cd4012a0e8ef..da9fac6055b0 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaGroupingCostFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,8 @@ package org.apache.hadoop.hbase.master.balancer; import java.util.concurrent.atomic.AtomicLong; - import org.agrona.collections.Hashing; import org.agrona.collections.Int2IntCounterMap; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -47,8 +45,8 @@ final void prepare(BalancerClusterState cluster) { protected final long getMaxCost(BalancerClusterState cluster) { // max cost is the case where every region replica is hosted together regardless of host - Int2IntCounterMap colocatedReplicaCounts = new Int2IntCounterMap(cluster.numRegions, - Hashing.DEFAULT_LOAD_FACTOR, 0); + Int2IntCounterMap colocatedReplicaCounts = + new Int2IntCounterMap(cluster.numRegions, Hashing.DEFAULT_LOAD_FACTOR, 0); for (int i = 0; i < cluster.regionIndexToPrimaryIndex.length; i++) { colocatedReplicaCounts.getAndIncrement(cluster.regionIndexToPrimaryIndex[i]); } @@ -91,7 +89,7 @@ protected final long costPerGroup(Int2IntCounterMap colocatedReplicaCounts) { final AtomicLong cost = new AtomicLong(0); // colocatedReplicaCounts is a sorted array of primary ids of regions. Replicas of regions // sharing the same primary will have consecutive numbers in the array. - colocatedReplicaCounts.forEach((primary,count) -> { + colocatedReplicaCounts.forEach((primary, count) -> { if (count > 1) { // means consecutive primaries, indicating co-location cost.getAndAdd((count - 1) * (count - 1)); } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaHostCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaHostCostFunction.java index 658b5c862a18..13aa1dc0718e 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaHostCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaHostCostFunction.java @@ -30,7 +30,7 @@ class RegionReplicaHostCostFunction extends RegionReplicaGroupingCostFunction { private static final String REGION_REPLICA_HOST_COST_KEY = - "hbase.master.balancer.stochastic.regionReplicaHostCostKey"; + "hbase.master.balancer.stochastic.regionReplicaHostCostKey"; private static final float DEFAULT_REGION_REPLICA_HOST_COST_KEY = 100000; private Int2IntCounterMap[] colocatedReplicaCountsPerGroup; @@ -46,8 +46,9 @@ protected void loadCosts() { maxCost = cluster.numHosts > 1 ? getMaxCost(cluster) : 0; costsPerGroup = new long[cluster.numHosts]; // either server based or host based - colocatedReplicaCountsPerGroup = cluster.multiServersPerHost - ? cluster.colocatedReplicaCountsPerHost : cluster.colocatedReplicaCountsPerServer; + colocatedReplicaCountsPerGroup = + cluster.multiServersPerHost ? cluster.colocatedReplicaCountsPerHost + : cluster.colocatedReplicaCountsPerServer; for (int i = 0; i < colocatedReplicaCountsPerGroup.length; i++) { costsPerGroup[i] = costPerGroup(colocatedReplicaCountsPerGroup[i]); } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaRackCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaRackCostFunction.java index 2775bac975bb..e6427d160dcd 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaRackCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionReplicaRackCostFunction.java @@ -28,7 +28,7 @@ class RegionReplicaRackCostFunction extends RegionReplicaGroupingCostFunction { private static final String REGION_REPLICA_RACK_COST_KEY = - "hbase.master.balancer.stochastic.regionReplicaRackCostKey"; + "hbase.master.balancer.stochastic.regionReplicaRackCostKey"; private static final float DEFAULT_REGION_REPLICA_RACK_COST_KEY = 10000; public RegionReplicaRackCostFunction(Configuration conf) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.java index 0698d128eb30..b06e838724ab 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,8 @@ package org.apache.hadoop.hbase.master.balancer; import java.io.Serializable; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ServerName; +import org.apache.yetus.audience.InterfaceAudience; /** * Data structure that holds servername and 'load'. diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java index 84418b36f611..0bb62aa2a794 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,13 +62,14 @@ public class SimpleLoadBalancer extends BaseLoadBalancer { private List serverLoadList = new ArrayList<>(); // overallSlop to control simpleLoadBalancer's cluster level threshold private float overallSlop; + /** - * Stores additional per-server information about the regions added/removed - * during the run of the balancing algorithm. + * Stores additional per-server information about the regions added/removed during the run of the + * balancing algorithm. *

    - * For servers that shed regions, we need to track which regions we have already - * shed. nextRegionForUnload contains the index in the list of regions on - * the server that is the next to be shed. + * For servers that shed regions, we need to track which regions we have already shed. + * nextRegionForUnload contains the index in the list of regions on the server that is the + * next to be shed. */ private static final class BalanceInfo { @@ -108,13 +109,13 @@ void setNextRegionForUnload(int nextRegionForUnload) { * Pass RegionStates and allow balancer to set the current cluster load. */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|SimpleLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|SimpleLoadBalancer).java") void setClusterLoad(Map>> clusterLoad) { serverLoadList.clear(); Map server2LoadMap = new HashMap<>(); float sum = 0; for (Map.Entry>> clusterEntry : clusterLoad - .entrySet()) { + .entrySet()) { for (Map.Entry> entry : clusterEntry.getValue().entrySet()) { int regionNum = entry.getValue().size(); server2LoadMap.compute(entry.getKey(), (k, v) -> v == null ? regionNum : regionNum + v); @@ -129,7 +130,7 @@ void setClusterLoad(Map>> clusterLoa @Override protected void - preBalanceCluster(Map>> loadOfAllTable) { + preBalanceCluster(Map>> loadOfAllTable) { // We need clusterLoad of all regions on every server to achieve overall balanced setClusterLoad(loadOfAllTable); } @@ -145,14 +146,15 @@ public void onConfigurationChange(Configuration conf) { float originSlop = slop; float originOverallSlop = overallSlop; loadConf(conf); - LOG.info("Update configuration of SimpleLoadBalancer, previous slop is {}," - + " current slop is {}, previous overallSlop is {}, current overallSlop is {}", + LOG.info( + "Update configuration of SimpleLoadBalancer, previous slop is {}," + + " current slop is {}, previous overallSlop is {}, current overallSlop is {}", originSlop, slop, originOverallSlop, overallSlop); } private void setLoad(List slList, int i, int loadChange) { ServerAndLoad newsl = - new ServerAndLoad(slList.get(i).getServerName(), slList.get(i).getLoad() + loadChange); + new ServerAndLoad(slList.get(i).getServerName(), slList.get(i).getLoad() + loadChange); slList.set(i, newsl); } @@ -165,7 +167,7 @@ private boolean overallNeedsBalance() { int floor = (int) Math.floor(avgLoadOverall * (1 - overallSlop)); int ceiling = (int) Math.ceil(avgLoadOverall * (1 + overallSlop)); int max = 0, min = Integer.MAX_VALUE; - for(ServerAndLoad server : serverLoadList){ + for (ServerAndLoad server : serverLoadList) { max = Math.max(server.getLoad(), max); min = Math.min(server.getLoad(), min); } @@ -201,10 +203,10 @@ private boolean needsBalance(BalancerClusterState c) { NavigableMap> serversByLoad = cs.getServersByLoad(); if (LOG.isTraceEnabled()) { // If nothing to balance, then don't say anything unless trace-level logging. - LOG.trace("Skipping load balancing because balanced cluster; " + "servers=" + - cs.getNumServers() + " regions=" + cs.getNumRegions() + " average=" + average + - " mostloaded=" + serversByLoad.lastKey().getLoad() + " leastloaded=" + - serversByLoad.firstKey().getLoad()); + LOG.trace("Skipping load balancing because balanced cluster; " + "servers=" + + cs.getNumServers() + " regions=" + cs.getNumRegions() + " average=" + average + + " mostloaded=" + serversByLoad.lastKey().getLoad() + " leastloaded=" + + serversByLoad.firstKey().getLoad()); } return false; } @@ -212,89 +214,58 @@ private boolean needsBalance(BalancerClusterState c) { } /** - * Generate a global load balancing plan according to the specified map of - * server information to the most loaded regions of each server. - * - * The load balancing invariant is that all servers are within 1 region of the - * average number of regions per server. If the average is an integer number, - * all servers will be balanced to the average. Otherwise, all servers will - * have either floor(average) or ceiling(average) regions. - * - * HBASE-3609 Modeled regionsToMove using Guava's MinMaxPriorityQueue so that - * we can fetch from both ends of the queue. - * At the beginning, we check whether there was empty region server - * just discovered by Master. If so, we alternately choose new / old - * regions from head / tail of regionsToMove, respectively. This alternation - * avoids clustering young regions on the newly discovered region server. - * Otherwise, we choose new regions from head of regionsToMove. - * - * Another improvement from HBASE-3609 is that we assign regions from - * regionsToMove to underloaded servers in round-robin fashion. - * Previously one underloaded server would be filled before we move onto - * the next underloaded server, leading to clustering of young regions. - * - * Finally, we randomly shuffle underloaded servers so that they receive - * offloaded regions relatively evenly across calls to balanceCluster(). - * - * The algorithm is currently implemented as such: - * + * Generate a global load balancing plan according to the specified map of server information to + * the most loaded regions of each server. The load balancing invariant is that all servers are + * within 1 region of the average number of regions per server. If the average is an integer + * number, all servers will be balanced to the average. Otherwise, all servers will have either + * floor(average) or ceiling(average) regions. HBASE-3609 Modeled regionsToMove using Guava's + * MinMaxPriorityQueue so that we can fetch from both ends of the queue. At the beginning, we + * check whether there was empty region server just discovered by Master. If so, we alternately + * choose new / old regions from head / tail of regionsToMove, respectively. This alternation + * avoids clustering young regions on the newly discovered region server. Otherwise, we choose new + * regions from head of regionsToMove. Another improvement from HBASE-3609 is that we assign + * regions from regionsToMove to underloaded servers in round-robin fashion. Previously one + * underloaded server would be filled before we move onto the next underloaded server, leading to + * clustering of young regions. Finally, we randomly shuffle underloaded servers so that they + * receive offloaded regions relatively evenly across calls to balanceCluster(). The algorithm is + * currently implemented as such: *
      *
    1. Determine the two valid numbers of regions each server should have, - * MIN=floor(average) and MAX=ceiling(average). - * - *
    2. Iterate down the most loaded servers, shedding regions from each so - * each server hosts exactly MAX regions. Stop once you reach a - * server that already has <= MAX regions. - *

      - * Order the regions to move from most recent to least. - * - *

    3. Iterate down the least loaded servers, assigning regions so each server - * has exactly MIN regions. Stop once you reach a server that - * already has >= MIN regions. - * - * Regions being assigned to underloaded servers are those that were shed - * in the previous step. It is possible that there were not enough - * regions shed to fill each underloaded server to MIN. If so we - * end up with a number of regions required to do so, neededRegions. - * - * It is also possible that we were able to fill each underloaded but ended - * up with regions that were unassigned from overloaded servers but that - * still do not have assignment. - * - * If neither of these conditions hold (no regions needed to fill the - * underloaded servers, no regions leftover from overloaded servers), - * we are done and return. Otherwise we handle these cases below. - * - *
    4. If neededRegions is non-zero (still have underloaded servers), - * we iterate the most loaded servers again, shedding a single server from - * each (this brings them from having MAX regions to having - * MIN regions). - * - *
    5. We now definitely have more regions that need assignment, either from - * the previous step or from the original shedding from overloaded servers. - * Iterate the least loaded servers filling each to MIN. - * - *
    6. If we still have more regions that need assignment, again iterate the - * least loaded servers, this time giving each one (filling them to - * MAX) until we run out. - * - *
    7. All servers will now either host MIN or MAX regions. - * - * In addition, any server hosting >= MAX regions is guaranteed - * to end up with MAX regions at the end of the balancing. This - * ensures the minimal number of regions possible are moved. + * MIN=floor(average) and MAX=ceiling(average). + *
    8. Iterate down the most loaded servers, shedding regions from each so each server hosts + * exactly MAX regions. Stop once you reach a server that already has <= MAX + * regions. + *

      + * Order the regions to move from most recent to least. + *

    9. Iterate down the least loaded servers, assigning regions so each server has exactly + * MIN regions. Stop once you reach a server that already has >= MIN regions. + * Regions being assigned to underloaded servers are those that were shed in the previous step. It + * is possible that there were not enough regions shed to fill each underloaded server to + * MIN. If so we end up with a number of regions required to do so, neededRegions. + * It is also possible that we were able to fill each underloaded but ended up with regions that + * were unassigned from overloaded servers but that still do not have assignment. If neither of + * these conditions hold (no regions needed to fill the underloaded servers, no regions leftover + * from overloaded servers), we are done and return. Otherwise we handle these cases below. + *
    10. If neededRegions is non-zero (still have underloaded servers), we iterate the most + * loaded servers again, shedding a single server from each (this brings them from having + * MAX regions to having MIN regions). + *
    11. We now definitely have more regions that need assignment, either from the previous step or + * from the original shedding from overloaded servers. Iterate the least loaded servers filling + * each to MIN. + *
    12. If we still have more regions that need assignment, again iterate the least loaded servers, + * this time giving each one (filling them to MAX) until we run out. + *
    13. All servers will now either host MIN or MAX regions. In addition, any server + * hosting >= MAX regions is guaranteed to end up with MAX regions at the end of + * the balancing. This ensures the minimal number of regions possible are moved. *
    - * - * TODO: We can at-most reassign the number of regions away from a particular - * server to be how many they report as most loaded. - * Should we just keep all assignment in memory? Any objections? - * Does this mean we need HeapSize on HMaster? Or just careful monitor? - * (current thinking is we will hold all assignments in memory) - * - * @param loadOfOneTable Map of regionservers and their load/region information to - * a list of their most loaded regions - * @return a list of regions to be moved, including source and destination, - * or null if cluster is already balanced + * TODO: We can at-most reassign the number of regions away from a particular server to be how + * many they report as most loaded. Should we just keep all assignment in memory? Any objections? + * Does this mean we need HeapSize on HMaster? Or just careful monitor? (current thinking is we + * will hold all assignments in memory) + * @param loadOfOneTable Map of regionservers and their load/region information to a list of their + * most loaded regions + * @return a list of regions to be moved, including source and destination, or null if cluster is + * already balanced */ @Override protected List balanceTable(TableName tableName, @@ -304,7 +275,7 @@ protected List balanceTable(TableName tableName, // construct a Cluster object with clusterMap and rest of the // argument as defaults BalancerClusterState c = - new BalancerClusterState(loadOfOneTable, null, this.regionFinder, this.rackManager); + new BalancerClusterState(loadOfOneTable, null, this.regionFinder, this.rackManager); if (!needsBalance(c) && !this.overallNeedsBalance()) { return null; } @@ -313,20 +284,20 @@ protected List balanceTable(TableName tableName, NavigableMap> serversByLoad = cs.getServersByLoad(); int numRegions = cs.getNumRegions(); float average = cs.getLoadAverage(); - int max = (int)Math.ceil(average); - int min = (int)average; + int max = (int) Math.ceil(average); + int min = (int) average; // Using to check balance result. StringBuilder strBalanceParam = new StringBuilder(); strBalanceParam.append("Balance parameter: numRegions=").append(numRegions) - .append(", numServers=").append(numServers).append(", max=").append(max) - .append(", min=").append(min); + .append(", numServers=").append(numServers).append(", max=").append(max).append(", min=") + .append(min); LOG.debug(strBalanceParam.toString()); // Balance the cluster // TODO: Look at data block locality or a more complex load to do this MinMaxPriorityQueue regionsToMove = - MinMaxPriorityQueue.orderedBy(rpComparator).create(); + MinMaxPriorityQueue.orderedBy(rpComparator).create(); List regionsToReturn = new ArrayList<>(); // Walk down most loaded, pruning each to the max @@ -334,8 +305,8 @@ protected List balanceTable(TableName tableName, // flag used to fetch regions from head and tail of list, alternately boolean fetchFromTail = false; Map serverBalanceInfo = new TreeMap<>(); - for (Map.Entry> server: - serversByLoad.descendingMap().entrySet()) { + for (Map.Entry> server : serversByLoad.descendingMap() + .entrySet()) { ServerAndLoad sal = server.getKey(); int load = sal.getLoad(); if (load <= max) { @@ -349,7 +320,7 @@ protected List balanceTable(TableName tableName, // after some other region server crashed Collections.sort(regions, riComparator); int numTaken = 0; - for (int i = 0; i <= numToOffload; ) { + for (int i = 0; i <= numToOffload;) { RegionInfo hri = regions.get(i); // fetch from head if (fetchFromTail) { hri = regions.get(regions.size() - 1 - i); @@ -372,8 +343,7 @@ protected List balanceTable(TableName tableName, Map underloadedServers = new HashMap<>(); int maxToTake = numRegions - min; - for (Map.Entry> server: - serversByLoad.entrySet()) { + for (Map.Entry> server : serversByLoad.entrySet()) { if (maxToTake == 0) { break; // no more to take } @@ -389,11 +359,11 @@ protected List balanceTable(TableName tableName, int serversUnderloaded = underloadedServers.size(); int incr = 1; List sns = - Arrays.asList(underloadedServers.keySet().toArray(new ServerName[serversUnderloaded])); + Arrays.asList(underloadedServers.keySet().toArray(new ServerName[serversUnderloaded])); Collections.shuffle(sns); while (regionsToMove.size() > 0) { int cnt = 0; - int i = incr > 0 ? 0 : underloadedServers.size()-1; + int i = incr > 0 ? 0 : underloadedServers.size() - 1; for (; i >= 0 && i < underloadedServers.size(); i += incr) { if (regionsToMove.isEmpty()) { break; @@ -406,10 +376,10 @@ protected List balanceTable(TableName tableName, addRegionPlan(regionsToMove, fetchFromTail, si, regionsToReturn); - underloadedServers.put(si, numToTake-1); + underloadedServers.put(si, numToTake - 1); cnt++; BalanceInfo bi = serverBalanceInfo.get(si); - bi.setNumRegionsAdded(bi.getNumRegionsAdded()+1); + bi.setNumRegionsAdded(bi.getNumRegionsAdded() + 1); } if (cnt == 0) { break; @@ -428,12 +398,10 @@ protected List balanceTable(TableName tableName, // If we need more to fill min, grab one from each most loaded until enough if (neededRegions != 0) { // Walk down most loaded, grabbing one from each until we get enough - for (Map.Entry> server : - serversByLoad.descendingMap().entrySet()) { - BalanceInfo balanceInfo = - serverBalanceInfo.get(server.getKey().getServerName()); - int idx = - balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload(); + for (Map.Entry> server : serversByLoad.descendingMap() + .entrySet()) { + BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName()); + int idx = balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload(); if (idx >= server.getValue().size()) { break; } @@ -456,24 +424,23 @@ protected List balanceTable(TableName tableName, // Assign each underloaded up to the min, then if leftovers, assign to max // Walk down least loaded, assigning to each to fill up to min - for (Map.Entry> server : - serversByLoad.entrySet()) { + for (Map.Entry> server : serversByLoad.entrySet()) { int regionCount = server.getKey().getLoad(); if (regionCount >= min) { break; } BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName()); - if(balanceInfo != null) { + if (balanceInfo != null) { regionCount += balanceInfo.getNumRegionsAdded(); } - if(regionCount >= min) { + if (regionCount >= min) { continue; } int numToTake = min - regionCount; int numTaken = 0; - while(numTaken < numToTake && 0 < regionsToMove.size()) { - addRegionPlan(regionsToMove, fetchFromTail, - server.getKey().getServerName(), regionsToReturn); + while (numTaken < numToTake && 0 < regionsToMove.size()) { + addRegionPlan(regionsToMove, fetchFromTail, server.getKey().getServerName(), + regionsToReturn); numTaken++; balanceInfo.setNumRegionsAdded(balanceInfo.getNumRegionsAdded() + 1); } @@ -487,11 +454,11 @@ protected List balanceTable(TableName tableName, if (!regionsToMove.isEmpty() || neededRegions != 0) { // Emit data so can diagnose how balancer went astray. - LOG.warn("regionsToMove=" + totalNumMoved + - ", numServers=" + numServers + ", serversOverloaded=" + serversOverloaded + - ", serversUnderloaded=" + serversUnderloaded); + LOG.warn( + "regionsToMove=" + totalNumMoved + ", numServers=" + numServers + ", serversOverloaded=" + + serversOverloaded + ", serversUnderloaded=" + serversUnderloaded); StringBuilder sb = new StringBuilder(); - for (Map.Entry> e: loadOfOneTable.entrySet()) { + for (Map.Entry> e : loadOfOneTable.entrySet()) { if (sb.length() > 0) { sb.append(", "); } @@ -503,10 +470,9 @@ protected List balanceTable(TableName tableName, } // All done! - LOG.info("Done. Calculated a load balance in " + (endTime-startTime) + "ms. " + - "Moving " + totalNumMoved + " regions off of " + - serversOverloaded + " overloaded servers onto " + - serversUnderloaded + " less loaded servers"); + LOG.info("Done. Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + + totalNumMoved + " regions off of " + serversOverloaded + " overloaded servers onto " + + serversUnderloaded + " less loaded servers"); return regionsToReturn; } @@ -517,8 +483,8 @@ protected List balanceTable(TableName tableName, * that have less regions in whole cluster scope. */ private void balanceOverall(List regionsToReturn, - Map serverBalanceInfo, boolean fetchFromTail, - MinMaxPriorityQueue regionsToMove, int max, int min) { + Map serverBalanceInfo, boolean fetchFromTail, + MinMaxPriorityQueue regionsToMove, int max, int min) { // Step 1. // A map to record the plan we have already got as status quo, in order to resolve a cyclic // assignment pair, @@ -547,7 +513,7 @@ private void balanceOverall(List regionsToReturn, RegionInfo hriToPlan; if (balanceInfo.getHriList().isEmpty()) { LOG.debug("During balanceOverall, we found " + serverload.getServerName() - + " has no RegionInfo, no operation needed"); + + " has no RegionInfo, no operation needed"); continue; } else if (balanceInfo.getNextRegionForUnload() >= balanceInfo.getHriList().size()) { continue; @@ -557,16 +523,16 @@ private void balanceOverall(List regionsToReturn, RegionPlan maxPlan = new RegionPlan(hriToPlan, serverload.getServerName(), null); regionsToMove.add(maxPlan); setLoad(serverLoadList, i, -1); - } else if (balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() > max || - balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() < min) { - LOG.warn( - "Encounter incorrect region numbers after calculating move plan during balanceOverall, " + - "for this table, " + serverload.getServerName() + " originally has " + - balanceInfo.getHriList().size() + " regions and " + balanceInfo.getNumRegionsAdded() + - " regions have been added. Yet, max =" + max + ", min =" + min + - ". Thus stop balance for this table"); // should not happen - return; - } + } else if (balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() > max + || balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() < min) { + LOG.warn( + "Encounter incorrect region numbers after calculating move plan during balanceOverall, " + + "for this table, " + serverload.getServerName() + " originally has " + + balanceInfo.getHriList().size() + " regions and " + + balanceInfo.getNumRegionsAdded() + " regions have been added. Yet, max =" + max + + ", min =" + min + ". Thus stop balance for this table"); // should not happen + return; + } } // Step 3. sort the ServerLoadList, the ArrayList hold overall load for each server. @@ -588,12 +554,12 @@ public int compare(ServerAndLoad s1, ServerAndLoad s2) { // We need to remove the plan that has the source RS equals to destination RS, // since the source RS belongs to the least n loaded RS. int assignLength = regionsToMove.size(); - // A structure help to map ServerName to it's load and index in ServerLoadList - Map> SnLoadMap = new HashMap<>(); + // A structure help to map ServerName to it's load and index in ServerLoadList + Map> SnLoadMap = new HashMap<>(); for (int i = 0; i < serverLoadList.size(); i++) { SnLoadMap.put(serverLoadList.get(i).getServerName(), new Pair<>(serverLoadList.get(i), i)); } - Pair shredLoad; + Pair shredLoad; // A List to help mark the plan in regionsToMove that should be removed List planToRemoveList = new ArrayList<>(); // A structure to record how many times a server becomes the source of a plan, from @@ -601,7 +567,7 @@ public int compare(ServerAndLoad s1, ServerAndLoad s2) { Map sourceMap = new HashMap<>(); // We remove one of the plan which would cause source RS equals destination RS. // But we should keep in mind that the second plan from such RS should be kept. - for(RegionPlan plan: regionsToMove){ + for (RegionPlan plan : regionsToMove) { // the source RS's load and index in ServerLoadList shredLoad = SnLoadMap.get(plan.getSource()); if (!sourceMap.containsKey(plan.getSource())) { @@ -617,7 +583,7 @@ public int compare(ServerAndLoad s1, ServerAndLoad s2) { // Remove those marked plans from regionsToMove, // we cannot direct remove them during iterating through // regionsToMove, due to the fact that regionsToMove is a MinMaxPriorityQueue. - for(RegionPlan planToRemove : planToRemoveList){ + for (RegionPlan planToRemove : planToRemoveList) { regionsToMove.remove(planToRemove); } @@ -626,22 +592,22 @@ public int compare(ServerAndLoad s1, ServerAndLoad s2) { // the first n = regionsToMove.size() of them, with least load. // With this strategy adopted, we can gradually achieve the overall balance, // while keeping table level balanced. - for(int i = 0; i < assignLength; i++){ + for (int i = 0; i < assignLength; i++) { // skip the RS that is also the source, we have removed them from regionsToMove in previous // step if (sourceMap.containsKey(serverLoadList.get(i).getServerName())) { continue; } - addRegionPlan(regionsToMove, fetchFromTail, - serverLoadList.get(i).getServerName(), regionsToReturn); + addRegionPlan(regionsToMove, fetchFromTail, serverLoadList.get(i).getServerName(), + regionsToReturn); setLoad(serverLoadList, i, 1); // resolve a possible cyclic assignment pair if we just produced one: // e.g. plan1: A -> B, plan2: B -> C => resolve plan1 to A -> C and remove plan2 List pos = - returnMap.get(regionsToReturn.get(regionsToReturn.size() - 1).getSource()); + returnMap.get(regionsToReturn.get(regionsToReturn.size() - 1).getSource()); if (pos != null && pos.size() != 0) { - regionsToReturn.get(pos.get(pos.size() - 1)).setDestination( - regionsToReturn.get(regionsToReturn.size() - 1).getDestination()); + regionsToReturn.get(pos.get(pos.size() - 1)) + .setDestination(regionsToReturn.get(regionsToReturn.size() - 1).getDestination()); pos.remove(pos.size() - 1); regionsToReturn.remove(regionsToReturn.size() - 1); } @@ -653,7 +619,7 @@ public int compare(ServerAndLoad s1, ServerAndLoad s2) { * Add a region from the head or tail to the List of regions to return. */ private void addRegionPlan(final MinMaxPriorityQueue regionsToMove, - final boolean fetchFromTail, final ServerName sn, List regionsToReturn) { + final boolean fetchFromTail, final ServerName sn, List regionsToReturn) { RegionPlan rp = null; if (!fetchFromTail) { rp = regionsToMove.remove(); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index ff6d031cef33..82d765809fb7 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,9 +48,11 @@ import org.slf4j.LoggerFactory; /** - *

    This is a best effort load balancer. Given a Cost function F(C) => x It will - * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the - * new cluster state becomes the plan. It includes costs functions to compute the cost of:

    + *

    + * This is a best effort load balancer. Given a Cost function F(C) => x It will randomly try and + * mutate the cluster to Cprime. If F(Cprime) < F(C) then the new cluster state becomes the plan. + * It includes costs functions to compute the cost of: + *

    *
      *
    • Region Load
    • *
    • Table Load
    • @@ -58,44 +60,46 @@ *
    • Memstore Sizes
    • *
    • Storefile Sizes
    • *
    - * - * - *

    Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost - * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are - * scaled by their respective multipliers:

    - * + *

    + * Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost best + * solution, and 1 is the highest possible cost and the worst solution. The computed costs are + * scaled by their respective multipliers: + *

    *
      - *
    • hbase.master.balancer.stochastic.regionLoadCost
    • - *
    • hbase.master.balancer.stochastic.moveCost
    • - *
    • hbase.master.balancer.stochastic.tableLoadCost
    • - *
    • hbase.master.balancer.stochastic.localityCost
    • - *
    • hbase.master.balancer.stochastic.memstoreSizeCost
    • - *
    • hbase.master.balancer.stochastic.storefileSizeCost
    • + *
    • hbase.master.balancer.stochastic.regionLoadCost
    • + *
    • hbase.master.balancer.stochastic.moveCost
    • + *
    • hbase.master.balancer.stochastic.tableLoadCost
    • + *
    • hbase.master.balancer.stochastic.localityCost
    • + *
    • hbase.master.balancer.stochastic.memstoreSizeCost
    • + *
    • hbase.master.balancer.stochastic.storefileSizeCost
    • *
    - * - *

    You can also add custom Cost function by setting the the following configuration value:

    + *

    + * You can also add custom Cost function by setting the the following configuration value: + *

    *
      - *
    • hbase.master.balancer.stochastic.additionalCostFunctions
    • + *
    • hbase.master.balancer.stochastic.additionalCostFunctions
    • *
    - * - *

    All custom Cost Functions needs to extends {@link CostFunction}

    - * - *

    In addition to the above configurations, the balancer can be tuned by the following - * configuration values:

    + *

    + * All custom Cost Functions needs to extends {@link CostFunction} + *

    + *

    + * In addition to the above configurations, the balancer can be tuned by the following configuration + * values: + *

    *
      - *
    • hbase.master.balancer.stochastic.maxMoveRegions which - * controls what the max number of regions that can be moved in a single invocation of this - * balancer.
    • - *
    • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of - * regions is multiplied to try and get the number of times the balancer will - * mutate all servers.
    • - *
    • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that - * the balancer will try and mutate all the servers. The balancer will use the minimum of this - * value and the above computation.
    • + *
    • hbase.master.balancer.stochastic.maxMoveRegions which controls what the max number of regions + * that can be moved in a single invocation of this balancer.
    • + *
    • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of + * regions is multiplied to try and get the number of times the balancer will mutate all + * servers.
    • + *
    • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that the + * balancer will try and mutate all the servers. The balancer will use the minimum of this value and + * the above computation.
    • *
    - * - *

    This balancer is best used with hbase.master.loadbalance.bytable set to false - * so that the balancer gets the full picture of all loads on the cluster.

    + *

    + * This balancer is best used with hbase.master.loadbalance.bytable set to false so that the + * balancer gets the full picture of all loads on the cluster. + *

    */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class StochasticLoadBalancer extends BaseLoadBalancer { @@ -104,10 +108,8 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { protected static final String STEPS_PER_REGION_KEY = "hbase.master.balancer.stochastic.stepsPerRegion"; - protected static final String MAX_STEPS_KEY = - "hbase.master.balancer.stochastic.maxSteps"; - protected static final String RUN_MAX_STEPS_KEY = - "hbase.master.balancer.stochastic.runMaxSteps"; + protected static final String MAX_STEPS_KEY = "hbase.master.balancer.stochastic.maxSteps"; + protected static final String RUN_MAX_STEPS_KEY = "hbase.master.balancer.stochastic.runMaxSteps"; protected static final String MAX_RUNNING_TIME_KEY = "hbase.master.balancer.stochastic.maxRunningTime"; protected static final String KEEP_REGION_LOADS = @@ -116,7 +118,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { protected static final String MIN_COST_NEED_BALANCE_KEY = "hbase.master.balancer.stochastic.minCostNeedBalance"; protected static final String COST_FUNCTIONS_COST_FUNCTIONS_KEY = - "hbase.master.balancer.stochastic.additionalCostFunctions"; + "hbase.master.balancer.stochastic.additionalCostFunctions"; public static final String OVERALL_COST_FUNCTION_NAME = "Overall"; Map> loads = new HashMap<>(); @@ -161,13 +163,13 @@ public StochasticLoadBalancer() { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") public StochasticLoadBalancer(MetricsStochasticBalancer metricsStochasticBalancer) { super(metricsStochasticBalancer); } private static CostFunction createCostFunction(Class clazz, - Configuration conf) { + Configuration conf) { try { Constructor ctor = clazz.getDeclaredConstructor(Configuration.class); return ReflectionUtils.instantiate(clazz.getName(), ctor, conf); @@ -198,7 +200,7 @@ private void loadCustomCostFunctions(Configuration conf) { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") List getCandidateGenerators() { return this.candidateGenerators; } @@ -255,12 +257,11 @@ protected void loadConf(Configuration conf) { curFunctionCosts = new double[costFunctions.size()]; tempFunctionCosts = new double[costFunctions.size()]; - LOG.info( - "Loaded config; maxSteps=" + maxSteps + ", runMaxSteps=" + runMaxSteps + - ", stepsPerRegion=" + stepsPerRegion + - ", maxRunningTime=" + maxRunningTime + ", isByTable=" + isByTable + - ", CostFunctions=" + Arrays.toString(getCostFunctionNames()) + - " , sum of multiplier of cost functions = " + sumMultiplier + " etc."); } + LOG.info("Loaded config; maxSteps=" + maxSteps + ", runMaxSteps=" + runMaxSteps + + ", stepsPerRegion=" + stepsPerRegion + ", maxRunningTime=" + maxRunningTime + + ", isByTable=" + isByTable + ", CostFunctions=" + Arrays.toString(getCostFunctionNames()) + + " , sum of multiplier of cost functions = " + sumMultiplier + " etc."); + } @Override public void updateClusterMetrics(ClusterMetrics st) { @@ -279,13 +280,14 @@ public void updateClusterMetrics(ClusterMetrics st) { } } - private void updateBalancerTableLoadInfo(TableName tableName, Map> loadOfOneTable) { + private void updateBalancerTableLoadInfo(TableName tableName, + Map> loadOfOneTable) { RegionHDFSBlockLocationFinder finder = null; if ((this.localityCost != null) || (this.rackLocalityCost != null)) { finder = this.regionFinder; } BalancerClusterState cluster = - new BalancerClusterState(loadOfOneTable, loads, finder, rackManager); + new BalancerClusterState(loadOfOneTable, loads, finder, rackManager); initCosts(cluster); curOverallCost = computeCost(cluster, Double.MAX_VALUE); @@ -294,14 +296,15 @@ private void updateBalancerTableLoadInfo(TableName tableName, Map>> loadOfAllTable) { + public void + updateBalancerLoadInfo(Map>> loadOfAllTable) { if (isByTable) { loadOfAllTable.forEach((tableName, loadOfOneTable) -> { updateBalancerTableLoadInfo(tableName, loadOfOneTable); }); } else { - updateBalancerTableLoadInfo(HConstants.ENSEMBLE_TABLE_NAME, toEnsumbleTableLoad(loadOfAllTable)); + updateBalancerTableLoadInfo(HConstants.ENSEMBLE_TABLE_NAME, + toEnsumbleTableLoad(loadOfAllTable)); } } @@ -309,7 +312,7 @@ public void updateBalancerLoadInfo( * Update the number of metrics that are reported to JMX */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") void updateMetricsSize(int size) { if (metricsBalancer instanceof MetricsStochasticBalancer) { ((MetricsStochasticBalancer) metricsBalancer).updateMetricsSize(size); @@ -327,33 +330,35 @@ private String getBalanceReason(double total, double sumMultiplier) { } else if (sumMultiplier <= 0) { return "sumMultiplier = " + sumMultiplier + " <= 0"; } else if ((total / sumMultiplier) < minCostNeedBalance) { - return "[(cost1*multiplier1)+(cost2*multiplier2)+...+(costn*multipliern)]/sumMultiplier = " + - (total / sumMultiplier) + " <= minCostNeedBalance(" + minCostNeedBalance + ")"; + return "[(cost1*multiplier1)+(cost2*multiplier2)+...+(costn*multipliern)]/sumMultiplier = " + + (total / sumMultiplier) + " <= minCostNeedBalance(" + minCostNeedBalance + ")"; } else { return ""; } } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") boolean needsBalance(TableName tableName, BalancerClusterState cluster) { ClusterLoadState cs = new ClusterLoadState(cluster.clusterState); if (cs.getNumServers() < MIN_SERVER_BALANCE) { - LOG.info("Not running balancer because only " + cs.getNumServers() - + " active regionserver(s)"); - sendRejectionReasonToRingBuffer(() -> "The number of RegionServers " + cs.getNumServers() + - " < MIN_SERVER_BALANCE(" + MIN_SERVER_BALANCE + ")", null); + LOG.info( + "Not running balancer because only " + cs.getNumServers() + " active regionserver(s)"); + sendRejectionReasonToRingBuffer(() -> "The number of RegionServers " + cs.getNumServers() + + " < MIN_SERVER_BALANCE(" + MIN_SERVER_BALANCE + ")", + null); return false; } if (areSomeRegionReplicasColocated(cluster)) { - LOG.info("Running balancer because at least one server hosts replicas of the same region." + - " function cost={}", functionCost()); + LOG.info("Running balancer because at least one server hosts replicas of the same region." + + " function cost={}", + functionCost()); return true; } - if (idleRegionServerExist(cluster)){ - LOG.info("Running balancer because cluster has idle server(s)."+ - " function cost={}", functionCost()); + if (idleRegionServerExist(cluster)) { + LOG.info("Running balancer because cluster has idle server(s)." + " function cost={}", + functionCost()); return true; } @@ -369,23 +374,23 @@ boolean needsBalance(TableName tableName, BalancerClusterState cluster) { if (balanced) { final double calculatedTotal = total; - sendRejectionReasonToRingBuffer(() -> - getBalanceReason(calculatedTotal, sumMultiplier), costFunctions); + sendRejectionReasonToRingBuffer(() -> getBalanceReason(calculatedTotal, sumMultiplier), + costFunctions); LOG.info("{} - skipping load balancing because weighted average imbalance={} <= " + "threshold({}). If you want more aggressive balancing, either lower " + "hbase.master.balancer.stochastic.minCostNeedBalance from {} or increase the relative " + "multiplier(s) of the specific cost function(s). functionCost={}", - isByTable ? "Table specific ("+tableName+")" : "Cluster wide", total / sumMultiplier, + isByTable ? "Table specific (" + tableName + ")" : "Cluster wide", total / sumMultiplier, minCostNeedBalance, minCostNeedBalance, functionCost()); } else { LOG.info("{} - Calculating plan. may take up to {}ms to complete.", - isByTable ? "Table specific ("+tableName+")" : "Cluster wide", maxRunningTime); + isByTable ? "Table specific (" + tableName + ")" : "Cluster wide", maxRunningTime); } return !balanced; } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") BalanceAction nextAction(BalancerClusterState cluster) { return getRandomGenerator().generate(cluster); } @@ -417,7 +422,7 @@ protected CandidateGenerator getRandomGenerator() { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") void setRackManager(RackManager rackManager) { this.rackManager = rackManager; } @@ -427,27 +432,26 @@ private long calculateMaxSteps(BalancerClusterState cluster) { } /** - * Given the cluster state this will try and approach an optimal balance. This - * should always approach the optimal state given enough steps. + * Given the cluster state this will try and approach an optimal balance. This should always + * approach the optimal state given enough steps. */ @Override - protected List balanceTable(TableName tableName, Map> loadOfOneTable) { + protected List balanceTable(TableName tableName, + Map> loadOfOneTable) { // On clusters with lots of HFileLinks or lots of reference files, // instantiating the storefile infos can be quite expensive. // Allow turning this feature off if the locality cost is not going to // be used in any computations. RegionHDFSBlockLocationFinder finder = null; - if ((this.localityCost != null) - || (this.rackLocalityCost != null)) { + if ((this.localityCost != null) || (this.rackLocalityCost != null)) { finder = this.regionFinder; } - //The clusterState that is given to this method contains the state - //of all the regions in the table(s) (that's true today) + // The clusterState that is given to this method contains the state + // of all the regions in the table(s) (that's true today) // Keep track of servers to iterate through them. BalancerClusterState cluster = - new BalancerClusterState(loadOfOneTable, loads, finder, rackManager); + new BalancerClusterState(loadOfOneTable, loads, finder, rackManager); long startTime = EnvironmentEdgeManager.currentTime(); @@ -455,13 +459,13 @@ protected List balanceTable(TableName tableName, Map 0. For example, set " - + "hbase.master.balancer.stochastic.regionCountCost to a positive value or default"); + + "hbase.master.balancer.stochastic.regionCountCost to a positive value or default"); return null; } @@ -483,15 +487,17 @@ protected List balanceTable(TableName tableName, Map maxSteps) { - LOG.warn("calculatedMaxSteps:{} for loadbalancer's stochastic walk is larger than " - + "maxSteps:{}. Hence load balancing may not work well. Setting parameter " - + "\"hbase.master.balancer.stochastic.runMaxSteps\" to true can overcome this issue." - + "(This config change does not require service restart)", calculatedMaxSteps, - maxSteps); + LOG.warn( + "calculatedMaxSteps:{} for loadbalancer's stochastic walk is larger than " + + "maxSteps:{}. Hence load balancing may not work well. Setting parameter " + + "\"hbase.master.balancer.stochastic.runMaxSteps\" to true can overcome this issue." + + "(This config change does not require service restart)", + calculatedMaxSteps, maxSteps); } } - LOG.info("Start StochasticLoadBalancer.balancer, initial weighted average imbalance={}, " - + "functionCost={} computedMaxSteps={}", + LOG.info( + "Start StochasticLoadBalancer.balancer, initial weighted average imbalance={}, " + + "functionCost={} computedMaxSteps={}", currentCost / sumMultiplier, functionCost(), computedMaxSteps); final String initFunctionTotalCosts = totalCostsPerFunc(); @@ -525,8 +531,7 @@ protected List balanceTable(TableName tableName, Map - maxRunningTime) { + if (EnvironmentEdgeManager.currentTime() - startTime > maxRunningTime) { break; } } @@ -537,23 +542,25 @@ protected List balanceTable(TableName tableName, Map currentCost) { updateStochasticCosts(tableName, curOverallCost, curFunctionCosts); List plans = createRegionPlans(cluster); - LOG.info("Finished computing new moving plan. Computation took {} ms" + - " to try {} different iterations. Found a solution that moves " + - "{} regions; Going from a computed imbalance of {}" + - " to a new imbalance of {}. funtionCost={}", - endTime - startTime, step, plans.size(), - initCost / sumMultiplier, currentCost / sumMultiplier, functionCost()); + LOG.info( + "Finished computing new moving plan. Computation took {} ms" + + " to try {} different iterations. Found a solution that moves " + + "{} regions; Going from a computed imbalance of {}" + + " to a new imbalance of {}. funtionCost={}", + endTime - startTime, step, plans.size(), initCost / sumMultiplier, + currentCost / sumMultiplier, functionCost()); sendRegionPlansToRingBuffer(plans, currentCost, initCost, initFunctionTotalCosts, step); return plans; } - LOG.info("Could not find a better moving plan. Tried {} different configurations in " + - "{} ms, and did not find anything with an imbalance score less than {}", step, - endTime - startTime, initCost / sumMultiplier); + LOG.info( + "Could not find a better moving plan. Tried {} different configurations in " + + "{} ms, and did not find anything with an imbalance score less than {}", + step, endTime - startTime, initCost / sumMultiplier); return null; } private void sendRejectionReasonToRingBuffer(Supplier reason, - List costFunctions) { + List costFunctions) { provider.recordBalancerRejection(() -> { BalancerRejection.Builder builder = new BalancerRejection.Builder().setReason(reason.get()); if (costFunctions != null) { @@ -569,18 +576,18 @@ private void sendRejectionReasonToRingBuffer(Supplier reason, } private void sendRegionPlansToRingBuffer(List plans, double currentCost, - double initCost, String initFunctionTotalCosts, long step) { + double initCost, String initFunctionTotalCosts, long step) { provider.recordBalancerDecision(() -> { List regionPlans = new ArrayList<>(); for (RegionPlan plan : plans) { regionPlans - .add("table: " + plan.getRegionInfo().getTable() + " , region: " + plan.getRegionName() + - " , source: " + plan.getSource() + " , destination: " + plan.getDestination()); + .add("table: " + plan.getRegionInfo().getTable() + " , region: " + plan.getRegionName() + + " , source: " + plan.getSource() + " , destination: " + plan.getDestination()); } return new BalancerDecision.Builder().setInitTotalCost(initCost) - .setInitialFunctionCosts(initFunctionTotalCosts).setComputedTotalCost(currentCost) - .setFinalFunctionCosts(totalCostsPerFunc()).setComputedSteps(step) - .setRegionPlans(regionPlans).build(); + .setInitialFunctionCosts(initFunctionTotalCosts).setComputedTotalCost(currentCost) + .setFinalFunctionCosts(totalCostsPerFunc()).setComputedSteps(step) + .setRegionPlans(regionPlans).build(); }); } @@ -596,8 +603,8 @@ private void updateStochasticCosts(TableName tableName, double overall, double[] if (metricsBalancer instanceof MetricsStochasticBalancer) { MetricsStochasticBalancer balancer = (MetricsStochasticBalancer) metricsBalancer; // overall cost - balancer.updateStochasticCost(tableName.getNameAsString(), - OVERALL_COST_FUNCTION_NAME, "Overall cost", overall); + balancer.updateStochasticCost(tableName.getNameAsString(), OVERALL_COST_FUNCTION_NAME, + "Overall cost", overall); // each cost function for (int i = 0; i < costFunctions.size(); i++) { @@ -663,14 +670,13 @@ private String totalCostsPerFunc() { /** * Create all of the RegionPlan's needed to move from the initial cluster state to the desired * state. - * * @param cluster The state of the cluster * @return List of RegionPlan's that represent the moves needed to get to desired final state. */ private List createRegionPlans(BalancerClusterState cluster) { List plans = new ArrayList<>(); - for (int regionIndex = 0; - regionIndex < cluster.regionIndexToServerIndex.length; regionIndex++) { + for (int regionIndex = + 0; regionIndex < cluster.regionIndexToServerIndex.length; regionIndex++) { int initialServerIndex = cluster.initialRegionIndexToServerIndex[regionIndex]; int newServerIndex = cluster.regionIndexToServerIndex[regionIndex]; @@ -715,7 +721,7 @@ private void updateRegionLoad() { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") void initCosts(BalancerClusterState cluster) { // Initialize the weights of generator every time weightsOfGenerators = new double[this.candidateGenerators.size()]; @@ -729,7 +735,7 @@ void initCosts(BalancerClusterState cluster) { * Update both the costs of costfunctions and the weights of candidate generators */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") void updateCostsAndWeightsWithAction(BalancerClusterState cluster, BalanceAction action) { // Reset all the weights to 0 for (int i = 0; i < weightsOfGenerators.length; i++) { @@ -747,7 +753,7 @@ void updateCostsAndWeightsWithAction(BalancerClusterState cluster, BalanceAction * Get the names of the cost functions */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") String[] getCostFunctionNames() { String[] ret = new String[costFunctions.size()]; for (int i = 0; i < costFunctions.size(); i++) { @@ -759,16 +765,15 @@ String[] getCostFunctionNames() { } /** - * This is the main cost function. It will compute a cost associated with a proposed cluster - * state. All different costs will be combined with their multipliers to produce a double cost. - * + * This is the main cost function. It will compute a cost associated with a proposed cluster + * state. All different costs will be combined with their multipliers to produce a double cost. * @param cluster The state of the cluster * @param previousCost the previous cost. This is used as an early out. - * @return a double of a cost associated with the proposed cluster state. This cost is an + * @return a double of a cost associated with the proposed cluster state. This cost is an * aggregate of all individual cost functions. */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") + allowedOnPath = ".*(/src/test/.*|StochasticLoadBalancer).java") double computeCost(BalancerClusterState cluster, double previousCost) { double total = 0; diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StoreFileCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StoreFileCostFunction.java index 31ad2b3940ff..8ad5d608268f 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StoreFileCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/StoreFileCostFunction.java @@ -28,7 +28,7 @@ class StoreFileCostFunction extends CostFromRegionLoadFunction { private static final String STOREFILE_SIZE_COST_KEY = - "hbase.master.balancer.stochastic.storefileSizeCost"; + "hbase.master.balancer.stochastic.storefileSizeCost"; private static final float DEFAULT_STOREFILE_SIZE_COST = 5; StoreFileCostFunction(Configuration conf) { diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/TableSkewCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/TableSkewCostFunction.java index d1e7cd217343..4ed2257cc912 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/TableSkewCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/TableSkewCostFunction.java @@ -27,7 +27,7 @@ @InterfaceAudience.Private class TableSkewCostFunction extends CostFunction { private static final String TABLE_SKEW_COST_KEY = - "hbase.master.balancer.stochastic.tableSkewCost"; + "hbase.master.balancer.stochastic.tableSkewCost"; private static final float DEFAULT_TABLE_SKEW_COST = 35; DoubleArrayCost[] costsPerTable; diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/WriteRequestCostFunction.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/WriteRequestCostFunction.java index 26c962f5af59..284da992cfc6 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/WriteRequestCostFunction.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/WriteRequestCostFunction.java @@ -28,7 +28,7 @@ class WriteRequestCostFunction extends CostFromRegionLoadAsRateFunction { private static final String WRITE_REQUEST_COST_KEY = - "hbase.master.balancer.stochastic.writeRequestCost"; + "hbase.master.balancer.stochastic.writeRequestCost"; private static final float DEFAULT_WRITE_REQUEST_COST = 5; WriteRequestCostFunction(Configuration conf) { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java index b2baaa0f5a98..e3cf57f26a08 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -99,8 +99,7 @@ public static void setupBeforeClass() throws Exception { } }); for (int i = 0; i < 40; i++) { - ServerName server = ServerName.valueOf("foo" + i, 1234, - EnvironmentEdgeManager.currentTime()); + ServerName server = ServerName.valueOf("foo" + i, 1234, EnvironmentEdgeManager.currentTime()); String rack = getRack(i); if (!rack.equals(RackManager.UNKNOWN_RACK)) { rackToServers.computeIfAbsent(rack, k -> new ArrayList<>()).add(server); @@ -146,8 +145,8 @@ public void testPlacePrimaryRSAsRoundRobin() { @Test public void testRoundRobinAssignmentsWithUnevenSizedRacks() { - //In the case of uneven racks, the regions should be distributed - //proportionately to the rack sizes + // In the case of uneven racks, the regions should be distributed + // proportionately to the rack sizes primaryRSPlacement(6, null, 10, 10, 10); primaryRSPlacement(600, null, 10, 10, 5); primaryRSPlacement(600, null, 10, 5, 10); @@ -165,11 +164,11 @@ public void testRoundRobinAssignmentsWithUnevenSizedRacks() { public void testSecondaryAndTertiaryPlacementWithSingleRack() { // Test the case where there is a single rack and we need to choose // Primary/Secondary/Tertiary from a single rack. - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 10); // have lots of regions to test with - Triple, FavoredNodeAssignmentHelper, List> - primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); + Triple, FavoredNodeAssignmentHelper, List> primaryRSMapAndHelper = + secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map primaryRSMap = primaryRSMapAndHelper.getFirst(); List regions = primaryRSMapAndHelper.getThird(); @@ -191,10 +190,10 @@ public void testSecondaryAndTertiaryPlacementWithSingleRack() { public void testSecondaryAndTertiaryPlacementWithSingleServer() { // Test the case where we have a single node in the cluster. In this case // the primary can be assigned but the secondary/tertiary would be null - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 1); - Triple, FavoredNodeAssignmentHelper, List> - primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount); + Triple, FavoredNodeAssignmentHelper, List> primaryRSMapAndHelper = + secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map primaryRSMap = primaryRSMapAndHelper.getFirst(); List regions = primaryRSMapAndHelper.getThird(); @@ -209,12 +208,12 @@ public void testSecondaryAndTertiaryPlacementWithSingleServer() { public void testSecondaryAndTertiaryPlacementWithMultipleRacks() { // Test the case where we have multiple racks and the region servers // belong to multiple racks - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 10); rackToServerCount.put("rack2", 10); - Triple, FavoredNodeAssignmentHelper, List> - primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); + Triple, FavoredNodeAssignmentHelper, List> primaryRSMapAndHelper = + secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map primaryRSMap = primaryRSMapAndHelper.getFirst(); @@ -240,17 +239,17 @@ public void testSecondaryAndTertiaryPlacementWithMultipleRacks() { public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() { // Test the case where we have two racks but with less than two servers in each // We will not have enough machines to select secondary/tertiary - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 1); rackToServerCount.put("rack2", 1); - Triple, FavoredNodeAssignmentHelper, List> - primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); + Triple, FavoredNodeAssignmentHelper, List> primaryRSMapAndHelper = + secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map primaryRSMap = primaryRSMapAndHelper.getFirst(); List regions = primaryRSMapAndHelper.getThird(); assertTrue(primaryRSMap.size() == 6); Map secondaryAndTertiaryMap = - helper.placeSecondaryAndTertiaryRS(primaryRSMap); + helper.placeSecondaryAndTertiaryRS(primaryRSMap); for (RegionInfo region : regions) { // not enough secondary/tertiary room to place the regions assertTrue(secondaryAndTertiaryMap.get(region) == null); @@ -264,17 +263,17 @@ public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack( // racks than what the primary is on. But if the other rack doesn't have // enough nodes to have both secondary/tertiary RSs, the tertiary is placed // on the same rack as the primary server is on - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 2); rackToServerCount.put("rack2", 1); - Triple, FavoredNodeAssignmentHelper, List> - primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); + Triple, FavoredNodeAssignmentHelper, List> primaryRSMapAndHelper = + secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map primaryRSMap = primaryRSMapAndHelper.getFirst(); List regions = primaryRSMapAndHelper.getThird(); assertTrue(primaryRSMap.size() == 6); Map secondaryAndTertiaryMap = - helper.placeSecondaryAndTertiaryRS(primaryRSMap); + helper.placeSecondaryAndTertiaryRS(primaryRSMap); assertTrue(secondaryAndTertiaryMap.size() == regions.size()); for (RegionInfo region : regions) { ServerName s = primaryRSMap.get(region); @@ -288,20 +287,18 @@ public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack( } private Triple, FavoredNodeAssignmentHelper, List> - secondaryAndTertiaryRSPlacementHelper(int regionCount, Map rackToServerCount) { + secondaryAndTertiaryRSPlacementHelper(int regionCount, + Map rackToServerCount) { Map primaryRSMap = new HashMap(); List servers = getServersFromRack(rackToServerCount); FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); - Map> assignmentMap = - new HashMap>(); + Map> assignmentMap = new HashMap>(); helper.initialize(); // create regions List regions = new ArrayList<>(regionCount); for (int i = 0; i < regionCount; i++) { regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build()); + .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); } // place the regions helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); @@ -310,13 +307,12 @@ public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack( private void primaryRSPlacement(int regionCount, Map primaryRSMap, int firstRackSize, int secondRackSize, int thirdRackSize) { - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", firstRackSize); rackToServerCount.put("rack2", secondRackSize); rackToServerCount.put("rack3", thirdRackSize); List servers = getServersFromRack(rackToServerCount); - FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, - rackManager); + FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager); helper.initialize(); assertTrue(helper.canPlaceFavoredNodes()); @@ -329,9 +325,7 @@ private void primaryRSPlacement(int regionCount, Map pri List regions = new ArrayList<>(regionCount); for (int i = 0; i < regionCount; i++) { regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf("foobar")) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build()); + .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); } // place those regions in primary RSs helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); @@ -356,7 +350,7 @@ private void primaryRSPlacement(int regionCount, Map pri } private void checkNumRegions(int firstRackSize, int secondRackSize, int thirdRackSize, - int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) { + int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) { // The regions should be distributed proportionately to the racksizes // Verify the ordering was as expected by inserting the racks and regions // in sorted maps. The keys being the racksize and numregions; values are @@ -383,11 +377,10 @@ private void checkNumRegions(int firstRackSize, int secondRackSize, int thirdRac rackMap.get(thirdRackSize).intValue(), regionMap.get(regionsOnRack3).intValue()); } - private String printProportions(int firstRackSize, int secondRackSize, - int thirdRackSize, int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) { - return "The rack sizes " + firstRackSize + " " + secondRackSize - + " " + thirdRackSize + " " + regionsOnRack1 + " " + regionsOnRack2 + - " " + regionsOnRack3; + private String printProportions(int firstRackSize, int secondRackSize, int thirdRackSize, + int regionsOnRack1, int regionsOnRack2, int regionsOnRack3) { + return "The rack sizes " + firstRackSize + " " + secondRackSize + " " + thirdRackSize + " " + + regionsOnRack1 + " " + regionsOnRack2 + " " + regionsOnRack3; } @Test @@ -403,12 +396,9 @@ public void testConstrainedPlacement() throws Exception { List regions = new ArrayList<>(20); for (int i = 0; i < 20; i++) { regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build()); + .setStartKey(Bytes.toBytes(i)).setEndKey(Bytes.toBytes(i + 1)).build()); } - Map> assignmentMap = - new HashMap>(); + Map> assignmentMap = new HashMap>(); Map primaryRSMap = new HashMap(); helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); assertTrue(primaryRSMap.size() == regions.size()); @@ -420,7 +410,7 @@ public void testConstrainedPlacement() throws Exception { @Test public void testGetOneRandomRack() throws IOException { - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); Set rackList = Sets.newHashSet("rack1", "rack2", "rack3"); for (String rack : rackList) { rackToServerCount.put(rack, 2); @@ -432,20 +422,20 @@ public void testGetOneRandomRack() throws IOException { assertTrue(helper.canPlaceFavoredNodes()); // Check we don't get a bad rack on any number of attempts - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { assertTrue(rackList.contains(helper.getOneRandomRack(Sets.newHashSet()))); } // Check skipRack multiple times when an invalid rack is specified Set skipRacks = Sets.newHashSet("rack"); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { assertTrue(rackList.contains(helper.getOneRandomRack(skipRacks))); } // Check skipRack multiple times when an valid rack is specified skipRacks = Sets.newHashSet("rack1"); Set validRacks = Sets.newHashSet("rack2", "rack3"); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { assertTrue(validRacks.contains(helper.getOneRandomRack(skipRacks))); } } @@ -453,7 +443,7 @@ public void testGetOneRandomRack() throws IOException { @Test public void testGetRandomServerSingleRack() throws IOException { - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); final String rack = "rack1"; rackToServerCount.put(rack, 4); List servers = getServersFromRack(rackToServerCount); @@ -463,7 +453,7 @@ public void testGetRandomServerSingleRack() throws IOException { assertTrue(helper.canPlaceFavoredNodes()); // Check we don't get a bad node on any number of attempts - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet()); assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); } @@ -471,7 +461,7 @@ public void testGetRandomServerSingleRack() throws IOException { // Check skipServers multiple times when an invalid server is specified Set skipServers = Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { ServerName sn = helper.getOneRandomServer(rack, skipServers); assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); } @@ -479,17 +469,16 @@ public void testGetRandomServerSingleRack() throws IOException { // Check skipRack multiple times when an valid servers are specified ServerName skipSN = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); skipServers = Sets.newHashSet(skipSN); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { ServerName sn = helper.getOneRandomServer(rack, skipServers); - assertNotEquals("Skip server should not be selected ", - skipSN.getAddress(), sn.getAddress()); + assertNotEquals("Skip server should not be selected ", skipSN.getAddress(), sn.getAddress()); assertTrue("Server:" + sn + " does not belong to list: " + servers, servers.contains(sn)); } } @Test public void testGetRandomServerMultiRack() throws IOException { - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); Set rackList = Sets.newHashSet("rack1", "rack2", "rack3"); for (String rack : rackList) { rackToServerCount.put(rack, 4); @@ -501,22 +490,22 @@ public void testGetRandomServerMultiRack() throws IOException { assertTrue(helper.canPlaceFavoredNodes()); // Check we don't get a bad node on any number of attempts - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { for (String rack : rackList) { ServerName sn = helper.getOneRandomServer(rack, Sets.newHashSet()); assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), - rackToServers.get(rack).contains(sn)); + rackToServers.get(rack).contains(sn)); } } // Check skipServers multiple times when an invalid server is specified Set skipServers = Sets.newHashSet(ServerName.valueOf("invalidnode:1234", ServerName.NON_STARTCODE)); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { for (String rack : rackList) { ServerName sn = helper.getOneRandomServer(rack, skipServers); assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), - rackToServers.get(rack).contains(sn)); + rackToServers.get(rack).contains(sn)); } } @@ -525,19 +514,19 @@ public void testGetRandomServerMultiRack() throws IOException { ServerName skipSN2 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE); ServerName skipSN3 = ServerName.valueOf("foo20:1234", ServerName.NON_STARTCODE); skipServers = Sets.newHashSet(skipSN1, skipSN2, skipSN3); - for (int attempts = 0 ; attempts < MAX_ATTEMPTS; attempts++) { + for (int attempts = 0; attempts < MAX_ATTEMPTS; attempts++) { for (String rack : rackList) { ServerName sn = helper.getOneRandomServer(rack, skipServers); assertFalse("Skip server should not be selected ", skipServers.contains(sn)); assertTrue("Server:" + sn + " does not belong to rack servers: " + rackToServers.get(rack), - rackToServers.get(rack).contains(sn)); + rackToServers.get(rack).contains(sn)); } } } @Test public void testGetFavoredNodes() throws IOException { - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); Set rackList = Sets.newHashSet("rack1", "rack2", "rack3"); for (String rack : rackList) { rackToServerCount.put(rack, 4); @@ -549,9 +538,7 @@ public void testGetFavoredNodes() throws IOException { assertTrue(helper.canPlaceFavoredNodes()); RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(HConstants.EMPTY_START_ROW) - .setEndKey(HConstants.EMPTY_END_ROW) - .build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).build(); for (int maxattempts = 0; maxattempts < MAX_ATTEMPTS; maxattempts++) { List fn = helper.generateFavoredNodes(region); @@ -571,7 +558,6 @@ public void testGenMissingFavoredNodeOneRack() throws IOException { helper.initialize(); assertTrue(helper.canPlaceFavoredNodes()); - ServerName snRack1SN1 = ServerName.valueOf("foo1:1234", ServerName.NON_STARTCODE); ServerName snRack1SN2 = ServerName.valueOf("foo2:1234", ServerName.NON_STARTCODE); ServerName snRack1SN3 = ServerName.valueOf("foo3:1234", ServerName.NON_STARTCODE); @@ -598,7 +584,7 @@ public void testGenMissingFavoredNodeMultiRack() throws IOException { ServerName snRack2SN1 = ServerName.valueOf("foo10:1234", ServerName.NON_STARTCODE); ServerName snRack2SN2 = ServerName.valueOf("foo11:1234", ServerName.NON_STARTCODE); - Map rackToServerCount = new HashMap<>(); + Map rackToServerCount = new HashMap<>(); Set rackList = Sets.newHashSet("rack1", "rack2"); for (String rack : rackList) { rackToServerCount.put(rack, 4); @@ -638,13 +624,13 @@ private void checkDuplicateFN(List fnList, ServerName genFN) { assertNotNull("Generated FN can't be null", genFN); favoredNodes.add(genFN); assertEquals("Did not find expected number of favored nodes", - FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); + FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); } private void checkDuplicateFN(List fnList) { Set favoredNodes = Sets.newHashSet(fnList); assertEquals("Did not find expected number of favored nodes", - FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); + FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favoredNodes.size()); } private void checkFNRacks(List fnList, ServerName genFN) { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestStartcodeAgnosticServerName.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestStartcodeAgnosticServerName.java index 05e1e0163fd5..ac2a248db5a3 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestStartcodeAgnosticServerName.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestStartcodeAgnosticServerName.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestStartcodeAgnosticServerName { @ClassRule diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlan.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlan.java index 13154cf9567e..d353e3245bfb 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlan.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionPlan { @ClassRule diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java index 59335079bcda..0e7693e49264 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java @@ -49,95 +49,62 @@ import org.slf4j.LoggerFactory; /** - * Class used to be the base of unit tests on load balancers. It gives helper - * methods to create maps of {@link ServerName} to lists of {@link RegionInfo} - * and to check list of region plans. - * + * Class used to be the base of unit tests on load balancers. It gives helper methods to create maps + * of {@link ServerName} to lists of {@link RegionInfo} and to check list of region plans. */ public class BalancerTestBase { private static final Logger LOG = LoggerFactory.getLogger(BalancerTestBase.class); static int regionId = 0; protected static Configuration conf; - protected int[] largeCluster = new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 }; + protected int[] largeCluster = new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 56 }; // int[testnum][servernumber] -> numregions - protected int[][] clusterStateMocks = new int[][]{ + protected int[][] clusterStateMocks = new int[][] { // 1 node - new int[]{0}, - new int[]{1}, - new int[]{10}, + new int[] { 0 }, new int[] { 1 }, new int[] { 10 }, // 2 node - new int[]{0, 0}, - new int[]{2, 0}, - new int[]{2, 1}, - new int[]{2, 2}, - new int[]{2, 3}, - new int[]{2, 4}, - new int[]{1, 1}, - new int[]{0, 1}, - new int[]{10, 1}, - new int[]{514, 1432}, - new int[]{48, 53}, + new int[] { 0, 0 }, new int[] { 2, 0 }, new int[] { 2, 1 }, new int[] { 2, 2 }, + new int[] { 2, 3 }, new int[] { 2, 4 }, new int[] { 1, 1 }, new int[] { 0, 1 }, + new int[] { 10, 1 }, new int[] { 514, 1432 }, new int[] { 48, 53 }, // 3 node - new int[]{0, 1, 2}, - new int[]{1, 2, 3}, - new int[]{0, 2, 2}, - new int[]{0, 3, 0}, - new int[]{0, 4, 0}, - new int[]{20, 20, 0}, + new int[] { 0, 1, 2 }, new int[] { 1, 2, 3 }, new int[] { 0, 2, 2 }, new int[] { 0, 3, 0 }, + new int[] { 0, 4, 0 }, new int[] { 20, 20, 0 }, // 4 node - new int[]{0, 1, 2, 3}, - new int[]{4, 0, 0, 0}, - new int[]{5, 0, 0, 0}, - new int[]{6, 6, 0, 0}, - new int[]{6, 2, 0, 0}, - new int[]{6, 1, 0, 0}, - new int[]{6, 0, 0, 0}, - new int[]{4, 4, 4, 7}, - new int[]{4, 4, 4, 8}, - new int[]{0, 0, 0, 7}, + new int[] { 0, 1, 2, 3 }, new int[] { 4, 0, 0, 0 }, new int[] { 5, 0, 0, 0 }, + new int[] { 6, 6, 0, 0 }, new int[] { 6, 2, 0, 0 }, new int[] { 6, 1, 0, 0 }, + new int[] { 6, 0, 0, 0 }, new int[] { 4, 4, 4, 7 }, new int[] { 4, 4, 4, 8 }, + new int[] { 0, 0, 0, 7 }, // 5 node - new int[]{1, 1, 1, 1, 4}, + new int[] { 1, 1, 1, 1, 4 }, // 6 nodes - new int[]{1500, 500, 500, 500, 10, 0}, - new int[]{1500, 500, 500, 500, 500, 0}, + new int[] { 1500, 500, 500, 500, 10, 0 }, new int[] { 1500, 500, 500, 500, 500, 0 }, // more nodes - new int[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 10}, - new int[]{6, 6, 5, 6, 6, 6, 6, 6, 6, 1}, - new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 54}, - new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 55}, - new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 56}, - new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 16}, - new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 8}, - new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 9}, - new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 10}, - new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 123}, - new int[]{1, 1, 1, 1, 1, 1, 1, 1, 1, 155}, - new int[]{10, 7, 12, 8, 11, 10, 9, 14}, - new int[]{13, 14, 6, 10, 10, 10, 8, 10}, - new int[]{130, 14, 60, 10, 100, 10, 80, 10}, - new int[]{130, 140, 60, 100, 100, 100, 80, 100}, - new int[]{0, 5 , 5, 5, 5}, - largeCluster, + new int[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 }, new int[] { 6, 6, 5, 6, 6, 6, 6, 6, 6, 1 }, + new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 54 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 55 }, + new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 56 }, new int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 }, + new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 8 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 9 }, + new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 10 }, new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 123 }, + new int[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 155 }, new int[] { 10, 7, 12, 8, 11, 10, 9, 14 }, + new int[] { 13, 14, 6, 10, 10, 10, 8, 10 }, new int[] { 130, 14, 60, 10, 100, 10, 80, 10 }, + new int[] { 130, 140, 60, 100, 100, 100, 80, 100 }, new int[] { 0, 5, 5, 5, 5 }, largeCluster, }; - // This class is introduced because IP to rack resolution can be lengthy. public static class MockMapping implements DNSToSwitchMapping { public MockMapping(Configuration conf) { @@ -158,8 +125,7 @@ public void reloadCachedMappings(List arg0) { } /** - * Invariant is that all servers have between floor(avg) and ceiling(avg) - * number of regions. + * Invariant is that all servers have between floor(avg) and ceiling(avg) number of regions. */ public void assertClusterAsBalanced(List servers) { int numServers = servers.size(); @@ -186,15 +152,14 @@ public void assertClusterAsBalanced(List servers) { for (ServerAndLoad server : servers) { assertTrue("All servers should have a positive load. " + server, server.getLoad() >= 0); assertTrue("All servers should have load no more than " + max + ". " + server, - server.getLoad() <= max); + server.getLoad() <= max); assertTrue("All servers should have load no less than " + min + ". " + server, - server.getLoad() >= min); + server.getLoad() >= min); } } /** - * Invariant is that all servers have between acceptable range - * number of regions. + * Invariant is that all servers have between acceptable range number of regions. */ public boolean assertClusterOverallAsBalanced(List servers, int tablenum) { int numServers = servers.size(); @@ -220,10 +185,10 @@ public boolean assertClusterOverallAsBalanced(List servers, int t for (ServerAndLoad server : servers) { // The '5' in below is arbitrary. - if (server.getLoad() < 0 || server.getLoad() > max + (tablenum/2 + 5) || - server.getLoad() < (min - tablenum/2 - 5)) { - LOG.warn("server={}, load={}, max={}, tablenum={}, min={}", - server.getServerName(), server.getLoad(), max, tablenum, min); + if (server.getLoad() < 0 || server.getLoad() > max + (tablenum / 2 + 5) + || server.getLoad() < (min - tablenum / 2 - 5)) { + LOG.warn("server={}, load={}, max={}, tablenum={}, min={}", server.getServerName(), + server.getLoad(), max, tablenum, min); return false; } } @@ -233,7 +198,8 @@ public boolean assertClusterOverallAsBalanced(List servers, int t /** * Checks whether region replicas are not hosted on the same host. */ - public void assertRegionReplicaPlacement(Map> serverMap, RackManager rackManager) { + public void assertRegionReplicaPlacement(Map> serverMap, + RackManager rackManager) { TreeMap> regionsPerHost = new TreeMap<>(); TreeMap> regionsPerRack = new TreeMap<>(); @@ -313,12 +279,12 @@ protected String printMock(List balancedCluster) { } /** - * This assumes the RegionPlan HSI instances are the same ones in the map, so - * actually no need to even pass in the map, but I think it's clearer. + * This assumes the RegionPlan HSI instances are the same ones in the map, so actually no need to + * even pass in the map, but I think it's clearer. * @return a list of all added {@link ServerAndLoad} values. */ protected List reconcile(List list, List plans, - Map> servers) { + Map> servers) { List result = new ArrayList<>(list.size()); Map map = new HashMap<>(list.size()); @@ -342,9 +308,8 @@ protected List reconcile(List list, List map, - final ServerName sn, - final int diff) { + protected void updateLoad(final Map map, final ServerName sn, + final int diff) { ServerAndLoad sal = map.get(sn); if (sal == null) sal = new ServerAndLoad(sn, 0); sal = new ServerAndLoad(sn, sal.getLoad() + diff); @@ -356,11 +321,11 @@ protected TreeMap> mockClusterServers(int[] mockClu } protected BalancerClusterState mockCluster(int[] mockCluster) { - return new BalancerClusterState( - mockClusterServers(mockCluster, -1), null, null, null); + return new BalancerClusterState(mockClusterServers(mockCluster, -1), null, null, null); } - protected TreeMap> mockClusterServers(int[] mockCluster, int numTables) { + protected TreeMap> mockClusterServers(int[] mockCluster, + int numTables) { int numServers = mockCluster.length; TreeMap> servers = new TreeMap<>(); for (int i = 0; i < numServers; i++) { @@ -384,12 +349,13 @@ protected TreeMap> mockUniformClusterServers(int[] return servers; } - protected HashMap>> mockClusterServersWithTables(Map> clusterServers) { + protected HashMap>> + mockClusterServersWithTables(Map> clusterServers) { HashMap>> result = new HashMap<>(); for (Map.Entry> entry : clusterServers.entrySet()) { ServerName sal = entry.getKey(); List regions = entry.getValue(); - for (RegionInfo hri : regions){ + for (RegionInfo hri : regions) { TreeMap> servers = result.get(hri.getTable()); if (servers == null) { servers = new TreeMap<>(); @@ -403,8 +369,8 @@ protected HashMap>> mockClusterS hrilist.add(hri); } } - for(Map.Entry>> entry : result.entrySet()){ - for(ServerName srn : clusterServers.keySet()){ + for (Map.Entry>> entry : result.entrySet()) { + for (ServerName srn : clusterServers.keySet()) { if (!entry.getValue().containsKey(srn)) entry.getValue().put(srn, new ArrayList<>()); } } @@ -426,11 +392,8 @@ protected List createRegions(int numRegions, TableName tableName) { for (int i = 0; i < numRegions; i++) { Bytes.putInt(start, 0, numRegions << 1); Bytes.putInt(end, 0, (numRegions << 1) + 1); - RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .build(); + RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) + .setSplit(false).build(); regions.add(hri); } return regions; @@ -449,14 +412,10 @@ protected List randomRegions(int numRegions, int numTables) { } Bytes.putInt(start, 0, numRegions << 1); Bytes.putInt(end, 0, (numRegions << 1) + 1); - TableName tableName = TableName.valueOf("table" + - (numTables > 0 ? ThreadLocalRandom.current().nextInt(numTables) : i)); - RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .setRegionId(regionId++) - .build(); + TableName tableName = TableName + .valueOf("table" + (numTables > 0 ? ThreadLocalRandom.current().nextInt(numTables) : i)); + RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) + .setSplit(false).setRegionId(regionId++).build(); regions.add(hri); } return regions; @@ -472,11 +431,8 @@ protected List uniformRegions(int numRegions) { Bytes.putInt(start, 0, numRegions << 1); Bytes.putInt(end, 0, (numRegions << 1) + 1); TableName tableName = TableName.valueOf("table" + i); - RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .build(); + RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) + .setSplit(false).build(); regions.add(hri); } return regions; @@ -518,7 +474,7 @@ protected void returnServers(List servers) { } protected Map> createServerMap(int numNodes, int numRegions, - int numRegionsPerServer, int replication, int numTables) { + int numRegionsPerServer, int replication, int numTables) { // construct a cluster of numNodes, having a total of numRegions. Each RS will hold // numRegionsPerServer many regions except for the last one, which will host all the // remaining regions diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyClusterInfoProvider.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyClusterInfoProvider.java index a62e3378c9b7..4a88aa0cc5a3 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyClusterInfoProvider.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyClusterInfoProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,7 +74,7 @@ public int getNumberOfTables() throws IOException { @Override public HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf, - TableDescriptor tableDescriptor, RegionInfo regionInfo) throws IOException { + TableDescriptor tableDescriptor, RegionInfo regionInfo) throws IOException { return new HDFSBlocksDistribution(); } @@ -90,7 +90,7 @@ public List getOnlineServersList() { @Override public List getOnlineServersListWithPredicator(List servers, - Predicate filter) { + Predicate filter) { return Collections.emptyList(); } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyCostFunction.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyCostFunction.java index 680d292d8a66..83b22da4bec2 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyCostFunction.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyCostFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyMetricsStochasticBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyMetricsStochasticBalancer.java index fcb8f64b0ec3..7baddac97f36 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyMetricsStochasticBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/DummyMetricsStochasticBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,13 @@ import java.util.Map; public class DummyMetricsStochasticBalancer extends MetricsStochasticBalancer { - //We use a map to record those metrics that were updated to MetricsStochasticBalancer when running + // We use a map to record those metrics that were updated to MetricsStochasticBalancer when + // running // unit tests. private Map costsMap; public DummyMetricsStochasticBalancer() { - //noop + // noop } @Override @@ -36,39 +37,39 @@ protected void initSource() { @Override public void balanceCluster(long time) { - //noop + // noop } @Override public void incrMiscInvocations() { - //noop + // noop } @Override public void balancerStatus(boolean status) { - //noop + // noop } @Override public void updateMetricsSize(int size) { - //noop + // noop } @Override public void updateStochasticCost(String tableName, String costFunctionName, - String costFunctionDesc, Double value) { + String costFunctionDesc, Double value) { String key = tableName + "#" + costFunctionName; costsMap.put(key, value); } - public Map getDummyCostsMap(){ + public Map getDummyCostsMap() { return this.costsMap; } /** * Clear all metrics in the cache map then prepare to run the next test - * */ - public void clearDummyMetrics(){ + */ + public void clearDummyMetrics() { this.costsMap.clear(); } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.java index 59433137f654..0dd49b982e30 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.io.IOException; @@ -38,6 +37,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; @@ -45,11 +45,9 @@ /** * Tool to test performance of different {@link org.apache.hadoop.hbase.master.LoadBalancer} - * implementations. - * Example command: - * $ bin/hbase org.apache.hadoop.hbase.master.balancer.LoadBalancerPerformanceEvaluation - * -regions 1000 -servers 100 - * -load_balancer org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer + * implementations. Example command: $ bin/hbase + * org.apache.hadoop.hbase.master.balancer.LoadBalancerPerformanceEvaluation -regions 1000 -servers + * 100 -load_balancer org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class LoadBalancerPerformanceEvaluation extends AbstractHBaseTool { @@ -102,12 +100,8 @@ private void generateRegionsAndServers() { Bytes.putInt(start, 0, i); Bytes.putInt(end, 0, i + 1); - RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .setRegionId(i) - .build(); + RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) + .setSplit(false).setRegionId(i).build(); regions.add(hri); regionServerMap.put(hri, null); } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java index 8a077b793ccb..cbba51331ec1 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,8 +39,8 @@ public class StochasticBalancerTestBase extends BalancerTestBase { protected static StochasticLoadBalancer loadBalancer; - protected static DummyMetricsStochasticBalancer dummyMetricsStochasticBalancer = new - DummyMetricsStochasticBalancer(); + protected static DummyMetricsStochasticBalancer dummyMetricsStochasticBalancer = + new DummyMetricsStochasticBalancer(); @BeforeClass public static void beforeAllTests() throws Exception { @@ -55,32 +55,32 @@ public static void beforeAllTests() throws Exception { } protected void testWithCluster(int numNodes, int numRegions, int numRegionsPerServer, - int replication, int numTables, boolean assertFullyBalanced, - boolean assertFullyBalancedForReplicas) { + int replication, int numTables, boolean assertFullyBalanced, + boolean assertFullyBalancedForReplicas) { Map> serverMap = - createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); - testWithCluster(serverMap, null, assertFullyBalanced, - assertFullyBalancedForReplicas); + createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); + testWithCluster(serverMap, null, assertFullyBalanced, assertFullyBalancedForReplicas); } protected void testWithClusterWithIteration(int numNodes, int numRegions, int numRegionsPerServer, - int replication, int numTables, boolean assertFullyBalanced, - boolean assertFullyBalancedForReplicas) { + int replication, int numTables, boolean assertFullyBalanced, + boolean assertFullyBalancedForReplicas) { Map> serverMap = - createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); + createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); testWithClusterWithIteration(serverMap, null, assertFullyBalanced, assertFullyBalancedForReplicas); } protected void testWithCluster(Map> serverMap, - RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) { + RackManager rackManager, boolean assertFullyBalanced, + boolean assertFullyBalancedForReplicas) { List list = convertToList(serverMap); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); loadBalancer.setRackManager(rackManager); // Run the balancer. Map>> LoadOfAllTable = - (Map) mockClusterServersWithTables(serverMap); + (Map) mockClusterServersWithTables(serverMap); List plans = loadBalancer.balanceCluster(LoadOfAllTable); assertNotNull("Initial cluster balance should produce plans.", plans); @@ -96,8 +96,9 @@ protected void testWithCluster(Map> serverMap, assertClusterAsBalanced(balancedCluster); LoadOfAllTable = (Map) mockClusterServersWithTables(serverMap); List secondPlans = loadBalancer.balanceCluster(LoadOfAllTable); - assertNull("Given a requirement to be fully balanced, second attempt at plans should " + - "produce none.", secondPlans); + assertNull("Given a requirement to be fully balanced, second attempt at plans should " + + "produce none.", + secondPlans); } if (assertFullyBalancedForReplicas) { @@ -107,14 +108,15 @@ protected void testWithCluster(Map> serverMap, } protected void testWithClusterWithIteration(Map> serverMap, - RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) { + RackManager rackManager, boolean assertFullyBalanced, + boolean assertFullyBalancedForReplicas) { List list = convertToList(serverMap); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); loadBalancer.setRackManager(rackManager); // Run the balancer. Map>> LoadOfAllTable = - (Map) mockClusterServersWithTables(serverMap); + (Map) mockClusterServersWithTables(serverMap); List plans = loadBalancer.balanceCluster(LoadOfAllTable); assertNotNull("Initial cluster balance should produce plans.", plans); @@ -135,8 +137,9 @@ protected void testWithClusterWithIteration(Map> se LOG.info("Mock Final balance: " + printMock(balancedCluster)); if (assertFullyBalanced) { - assertNull("Given a requirement to be fully balanced, second attempt at plans should " + - "produce none.", plans); + assertNull("Given a requirement to be fully balanced, second attempt at plans should " + + "produce none.", + plans); } if (assertFullyBalancedForReplicas) { assertRegionReplicaPlacement(serverMap, rackManager); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java index 41dbb552db6a..9bab75b51257 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/StochasticBalancerTestBase2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java index 5dc3fa81e1b3..4362bd8268e2 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,7 +62,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestBaseLoadBalancer extends BalancerTestBase { @ClassRule @@ -96,7 +96,7 @@ public static void beforeAllTests() throws Exception { // Set up the rack topologies (5 machines per rack) rackManager = mock(RackManager.class); for (int i = 0; i < NUM_SERVERS; i++) { - servers[i] = ServerName.valueOf("foo"+i+":1234",-1); + servers[i] = ServerName.valueOf("foo" + i + ":1234", -1); if (i < 5) { when(rackManager.getRack(servers[i])).thenReturn("rack1"); } @@ -119,10 +119,9 @@ protected List balanceTable(TableName tableName, } /** - * Tests the bulk assignment used during cluster startup. - * - * Round-robin. Should yield a balanced cluster so same invariant as the load - * balancer holds, all servers holding either floor(avg) or ceiling(avg). + * Tests the bulk assignment used during cluster startup. Round-robin. Should yield a balanced + * cluster so same invariant as the load balancer holds, all servers holding either floor(avg) or + * ceiling(avg). */ @Test public void testBulkAssignment() throws Exception { @@ -132,7 +131,7 @@ public void testBulkAssignment() throws Exception { tmp.add(master); Map> plans = loadBalancer.roundRobinAssignment(hris, tmp); int totalRegion = 0; - for (List regions: plans.values()) { + for (List regions : plans.values()) { totalRegion += regions.size(); } assertEquals(hris.size(), totalRegion); @@ -157,8 +156,7 @@ public void testBulkAssignment() throws Exception { } /** - * Test the cluster startup bulk assignment which attempts to retain - * assignment info. + * Test the cluster startup bulk assignment which attempts to retain assignment info. */ @Test public void testRetainAssignment() throws Exception { @@ -220,16 +218,13 @@ private void testRandomAssignment(int numberOfIdleServers) throws Exception { @Override public List getOnlineServersListWithPredicator(List servers, - Predicate filter) { + Predicate filter) { return idleServers; } }); RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes("key1")) - .setEndKey(Bytes.toBytes("key2")) - .setSplit(false) - .setRegionId(100) - .build(); + .setStartKey(Bytes.toBytes("key1")).setEndKey(Bytes.toBytes("key2")).setSplit(false) + .setRegionId(100).build(); assertNull(balancer.randomAssignment(hri1, Collections.emptyList())); assertNull(balancer.randomAssignment(hri1, null)); for (int i = 0; i != 3; ++i) { @@ -250,27 +245,21 @@ public void testRegionAvailability() throws Exception { List list2 = new ArrayList<>(); // create a region (region1) RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes("key1")) - .setEndKey(Bytes.toBytes("key2")) - .setSplit(false) - .setRegionId(100) - .build(); + .setStartKey(Bytes.toBytes("key1")).setEndKey(Bytes.toBytes("key2")).setSplit(false) + .setRegionId(100).build(); // create a replica of the region (replica_of_region1) RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1); // create a second region (region2) RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes("key2")) - .setEndKey(Bytes.toBytes("key3")) - .setSplit(false) - .setRegionId(101) - .build(); - list0.add(hri1); //only region1 - list1.add(hri2); //only replica_of_region1 - list2.add(hri3); //only region2 + .setStartKey(Bytes.toBytes("key2")).setEndKey(Bytes.toBytes("key3")).setSplit(false) + .setRegionId(101).build(); + list0.add(hri1); // only region1 + list1.add(hri2); // only replica_of_region1 + list2.add(hri3); // only region2 Map> clusterState = new LinkedHashMap<>(); - clusterState.put(servers[0], list0); //servers[0] hosts region1 - clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1 - clusterState.put(servers[2], list2); //servers[2] hosts region2 + clusterState.put(servers[0], list0); // servers[0] hosts region1 + clusterState.put(servers[1], list1); // servers[1] hosts replica_of_region1 + clusterState.put(servers[2], list2); // servers[2] hosts region2 // create a cluster with the above clusterState. The way in which the // cluster is created (constructor code) would make sure the indices of // the servers are in the order in which it is inserted in the clusterState @@ -299,10 +288,11 @@ public void testRegionAvailability() throws Exception { // start over again clusterState.clear(); - clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1 - clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1 and replica_of_region2 - clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2 - clusterState.put(servers[10], new ArrayList<>()); //servers[10], rack3 hosts no region + clusterState.put(servers[0], list0); // servers[0], rack1 hosts region1 + clusterState.put(servers[5], list1); // servers[5], rack2 hosts replica_of_region1 and + // replica_of_region2 + clusterState.put(servers[6], list2); // servers[6], rack2 hosts region2 + clusterState.put(servers[10], new ArrayList<>()); // servers[10], rack3 hosts no region // create a cluster with the above clusterState cluster = new BalancerClusterState(clusterState, null, null, rackManager); // check whether a move of region1 from servers[0],rack1 to servers[6],rack2 would @@ -324,27 +314,21 @@ public void testRegionAvailabilityWithRegionMoves() throws Exception { List list2 = new ArrayList<>(); // create a region (region1) RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes("key1")) - .setEndKey(Bytes.toBytes("key2")) - .setSplit(false) - .setRegionId(100) - .build(); + .setStartKey(Bytes.toBytes("key1")).setEndKey(Bytes.toBytes("key2")).setSplit(false) + .setRegionId(100).build(); // create a replica of the region (replica_of_region1) RegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1); // create a second region (region2) RegionInfo hri3 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(Bytes.toBytes("key2")) - .setEndKey(Bytes.toBytes("key3")) - .setSplit(false) - .setRegionId(101) - .build(); - list0.add(hri1); //only region1 - list1.add(hri2); //only replica_of_region1 - list2.add(hri3); //only region2 + .setStartKey(Bytes.toBytes("key2")).setEndKey(Bytes.toBytes("key3")).setSplit(false) + .setRegionId(101).build(); + list0.add(hri1); // only region1 + list1.add(hri2); // only replica_of_region1 + list2.add(hri3); // only region2 Map> clusterState = new LinkedHashMap<>(); - clusterState.put(servers[0], list0); //servers[0] hosts region1 - clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1 - clusterState.put(servers[2], list2); //servers[2] hosts region2 + clusterState.put(servers[0], list0); // servers[0] hosts region1 + clusterState.put(servers[1], list1); // servers[1] hosts replica_of_region1 + clusterState.put(servers[2], list2); // servers[2] hosts region2 // create a cluster with the above clusterState. The way in which the // cluster is created (constructor code) would make sure the indices of // the servers are in the order in which it is inserted in the clusterState @@ -364,10 +348,10 @@ public void testRegionAvailabilityWithRegionMoves() throws Exception { List list3 = new ArrayList<>(); RegionInfo hri4 = RegionReplicaUtil.getRegionInfoForReplica(hri3, 1); list3.add(hri4); - clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1 - clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1 - clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2 - clusterState.put(servers[12], list3); //servers[12], rack3 hosts replica_of_region2 + clusterState.put(servers[0], list0); // servers[0], rack1 hosts region1 + clusterState.put(servers[5], list1); // servers[5], rack2 hosts replica_of_region1 + clusterState.put(servers[6], list2); // servers[6], rack2 hosts region2 + clusterState.put(servers[12], list3); // servers[12], rack3 hosts replica_of_region2 // create a cluster with the above clusterState cluster = new BalancerClusterState(clusterState, null, null, rackManager); // check whether a move of replica_of_region2 from servers[12],rack3 to servers[0],rack1 would @@ -390,8 +374,8 @@ private List getListOfServerNames(final List sals) { * Must meet the following conditions: *
      *
    • Every input region has an assignment, and to an online server - *
    • If a region had an existing assignment to a server with the same - * address a a currently online server, it will be assigned to it + *
    • If a region had an existing assignment to a server with the same address a a currently + * online server, it will be assigned to it *
    */ private void assertRetainedAssignment(Map existing, @@ -485,21 +469,21 @@ public void testClusterRegionLocations() { // mock block locality for some regions RegionHDFSBlockLocationFinder locationFinder = mock(RegionHDFSBlockLocationFinder.class); - // block locality: region:0 => {server:0} - // region:1 => {server:0, server:1} - // region:42 => {server:4, server:9, server:5} - when(locationFinder.getTopBlockLocations(regions.get(0))).thenReturn( - Lists.newArrayList(servers.get(0))); - when(locationFinder.getTopBlockLocations(regions.get(1))).thenReturn( - Lists.newArrayList(servers.get(0), servers.get(1))); - when(locationFinder.getTopBlockLocations(regions.get(42))).thenReturn( - Lists.newArrayList(servers.get(4), servers.get(9), servers.get(5))); + // block locality: region:0 => {server:0} + // region:1 => {server:0, server:1} + // region:42 => {server:4, server:9, server:5} + when(locationFinder.getTopBlockLocations(regions.get(0))) + .thenReturn(Lists.newArrayList(servers.get(0))); + when(locationFinder.getTopBlockLocations(regions.get(1))) + .thenReturn(Lists.newArrayList(servers.get(0), servers.get(1))); + when(locationFinder.getTopBlockLocations(regions.get(42))) + .thenReturn(Lists.newArrayList(servers.get(4), servers.get(9), servers.get(5))); // this server does not exists in clusterStatus when(locationFinder.getTopBlockLocations(regions.get(43))) - .thenReturn(Lists.newArrayList(ServerName.valueOf("foo", 0, 0))); + .thenReturn(Lists.newArrayList(ServerName.valueOf("foo", 0, 0))); BalancerClusterState cluster = - new BalancerClusterState(clusterState, null, locationFinder, null); + new BalancerClusterState(clusterState, null, locationFinder, null); // this is ok, it is just a test int r0 = ArrayUtils.indexOf(cluster.regions, regions.get(0)); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java index 38834a8c9fa4..2e651da20866 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDoubleArrayCost.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ public class TestDoubleArrayCost { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDoubleArrayCost.class); + HBaseClassTestRule.forClass(TestDoubleArrayCost.class); @Test public void testComputeCost() { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionHDFSBlockLocationFinder.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionHDFSBlockLocationFinder.java index 11ef8695785f..c23668ccd0cb 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionHDFSBlockLocationFinder.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionHDFSBlockLocationFinder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ public class TestRegionHDFSBlockLocationFinder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionHDFSBlockLocationFinder.class); + HBaseClassTestRule.forClass(TestRegionHDFSBlockLocationFinder.class); private static final Random RNG = new Random(); // This test depends on Random#setSeed private static TableDescriptor TD; @@ -89,7 +89,7 @@ public static void setUpBeforeClass() { byte[] startKey = i == 0 ? HConstants.EMPTY_START_ROW : Bytes.toBytes(i); byte[] endKey = i == numRegions ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes(i + 1); RegionInfo region = RegionInfoBuilder.newBuilder(TD.getTableName()).setStartKey(startKey) - .setEndKey(endKey).build(); + .setEndKey(endKey).build(); REGIONS.add(region); } } @@ -111,7 +111,7 @@ public List getAssignedRegions() { @Override public HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf, - TableDescriptor tableDescriptor, RegionInfo regionInfo) throws IOException { + TableDescriptor tableDescriptor, RegionInfo regionInfo) throws IOException { return generate(regionInfo); } }); @@ -159,7 +159,7 @@ public void testRefreshAndWait() throws Exception { } private void assertHostAndWeightEquals(HDFSBlocksDistribution expected, - HDFSBlocksDistribution actual) { + HDFSBlocksDistribution actual) { Map expectedMap = expected.getHostAndWeights(); Map actualMap = actual.getHostAndWeights(); assertEquals(expectedMap.size(), actualMap.size()); @@ -220,8 +220,8 @@ public void testRefreshRegionsWithChangedLocality() throws InterruptedException cache.put(region, hbd); } - finder.setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), - 0.123f)); + finder + .setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), 0.123f)); // everything should be cached, because metrics were null before for (RegionInfo region : REGIONS) { @@ -229,8 +229,8 @@ public void testRefreshRegionsWithChangedLocality() throws InterruptedException assertSame(cache.get(region), hbd); } - finder.setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), - 0.345f)); + finder + .setClusterMetrics(getMetricsWithLocality(testServer, testRegion.getRegionName(), 0.345f)); // cache refresh happens in a background thread, so we need to wait for the value to // update before running assertions. @@ -254,7 +254,7 @@ public void testRefreshRegionsWithChangedLocality() throws InterruptedException } private ClusterMetrics getMetricsWithLocality(ServerName serverName, byte[] region, - float locality) { + float locality) { RegionMetrics regionMetrics = mock(RegionMetrics.class); when(regionMetrics.getDataLocality()).thenReturn(locality); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestServerAndLoad.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestServerAndLoad.java index 982321f4c598..fb7fce0a0269 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestServerAndLoad.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestServerAndLoad.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestServerAndLoad { @ClassRule @@ -50,6 +50,6 @@ public void test() { ServerName other = ServerName.valueOf("other", 12345, 112244); assertNotEquals(sal.hashCode(), new ServerAndLoad(other, startcode).hashCode()); assertNotEquals(sal, new ServerAndLoad(other, startcode)); - } + } } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java index 1fb02629255e..4617fb5b9a07 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestSimpleLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ /** * Test the load balancer that is created by default. */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestSimpleLoadBalancer extends BalancerTestBase { @ClassRule @@ -74,10 +74,8 @@ public static void beforeAllTests() throws Exception { public TestName name = new TestName(); /** - * Test the load balancing algorithm. - * - * Invariant is that all servers should be hosting either floor(average) or - * ceiling(average) at both table level and cluster level + * Test the load balancing algorithm. Invariant is that all servers should be hosting either + * floor(average) or ceiling(average) at both table level and cluster level */ @Test public void testBalanceClusterOverall() throws Exception { @@ -97,7 +95,7 @@ public void testBalanceClusterOverall() throws Exception { List list = convertToList(servers); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); List partialplans = loadBalancer.balanceTable(tableName, servers); - if(partialplans != null) clusterplans.addAll(partialplans); + if (partialplans != null) clusterplans.addAll(partialplans); List balancedClusterPerTable = reconcile(list, partialplans, servers); LOG.info("Mock Balance : " + printMock(balancedClusterPerTable)); assertClusterAsBalanced(balancedClusterPerTable); @@ -112,12 +110,10 @@ public void testBalanceClusterOverall() throws Exception { } /** - * Test the load balancing algorithm. - * - * Invariant is that all servers should be hosting either floor(average) or - * ceiling(average) at both table level and cluster level - * Deliberately generate a special case to show the overall strategy can achieve cluster - * level balance while the bytable strategy cannot + * Test the load balancing algorithm. Invariant is that all servers should be hosting either + * floor(average) or ceiling(average) at both table level and cluster level Deliberately generate + * a special case to show the overall strategy can achieve cluster level balance while the bytable + * strategy cannot */ @Test public void testImpactOfBalanceClusterOverall() throws Exception { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java index 2f862cda65f1..0d6c6e5d3103 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -50,6 +51,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; @Category({ MasterTests.class, MediumTests.class }) @@ -62,64 +64,60 @@ public class TestStochasticLoadBalancer extends StochasticBalancerTestBase { private static final String REGION_KEY = "testRegion"; // Mapping of locality test -> expected locality - private float[] expectedLocalities = {1.0f, 0.0f, 0.50f, 0.25f, 1.0f}; + private float[] expectedLocalities = { 1.0f, 0.0f, 0.50f, 0.25f, 1.0f }; /** - * Data set for testLocalityCost: - * [test][0][0] = mapping of server to number of regions it hosts - * [test][region + 1][0] = server that region is hosted on - * [test][region + 1][server + 1] = locality for region on server + * Data set for testLocalityCost: [test][0][0] = mapping of server to number of regions it hosts + * [test][region + 1][0] = server that region is hosted on [test][region + 1][server + 1] = + * locality for region on server */ - private int[][][] clusterRegionLocationMocks = new int[][][]{ - // Test 1: each region is entirely on server that hosts it - new int[][]{ - new int[]{2, 1, 1}, - new int[]{2, 0, 0, 100}, // region 0 is hosted and entirely local on server 2 - new int[]{0, 100, 0, 0}, // region 1 is hosted and entirely on server 0 - new int[]{0, 100, 0, 0}, // region 2 is hosted and entirely on server 0 - new int[]{1, 0, 100, 0}, // region 3 is hosted and entirely on server 1 - }, - - // Test 2: each region is 0% local on the server that hosts it - new int[][]{ - new int[]{1, 2, 1}, - new int[]{0, 0, 0, 100}, // region 0 is hosted and entirely local on server 2 - new int[]{1, 100, 0, 0}, // region 1 is hosted and entirely on server 0 - new int[]{1, 100, 0, 0}, // region 2 is hosted and entirely on server 0 - new int[]{2, 0, 100, 0}, // region 3 is hosted and entirely on server 1 - }, - - // Test 3: each region is 25% local on the server that hosts it (and 50% locality is possible) - new int[][]{ - new int[]{1, 2, 1}, - new int[]{0, 25, 0, 50}, // region 0 is hosted and entirely local on server 2 - new int[]{1, 50, 25, 0}, // region 1 is hosted and entirely on server 0 - new int[]{1, 50, 25, 0}, // region 2 is hosted and entirely on server 0 - new int[]{2, 0, 50, 25}, // region 3 is hosted and entirely on server 1 - }, - - // Test 4: each region is 25% local on the server that hosts it (and 100% locality is possible) - new int[][]{ - new int[]{1, 2, 1}, - new int[]{0, 25, 0, 100}, // region 0 is hosted and entirely local on server 2 - new int[]{1, 100, 25, 0}, // region 1 is hosted and entirely on server 0 - new int[]{1, 100, 25, 0}, // region 2 is hosted and entirely on server 0 - new int[]{2, 0, 100, 25}, // region 3 is hosted and entirely on server 1 - }, - - // Test 5: each region is 75% local on the server that hosts it (and 75% locality is possible - // everywhere) - new int[][]{ - new int[]{1, 2, 1}, - new int[]{0, 75, 75, 75}, // region 0 is hosted and entirely local on server 2 - new int[]{1, 75, 75, 75}, // region 1 is hosted and entirely on server 0 - new int[]{1, 75, 75, 75}, // region 2 is hosted and entirely on server 0 - new int[]{2, 75, 75, 75}, // region 3 is hosted and entirely on server 1 - }, - }; + private int[][][] clusterRegionLocationMocks = new int[][][] { + // Test 1: each region is entirely on server that hosts it + new int[][] { new int[] { 2, 1, 1 }, new int[] { 2, 0, 0, 100 }, // region 0 is hosted and + // entirely local on server 2 + new int[] { 0, 100, 0, 0 }, // region 1 is hosted and entirely on server 0 + new int[] { 0, 100, 0, 0 }, // region 2 is hosted and entirely on server 0 + new int[] { 1, 0, 100, 0 }, // region 3 is hosted and entirely on server 1 + }, + + // Test 2: each region is 0% local on the server that hosts it + new int[][] { new int[] { 1, 2, 1 }, new int[] { 0, 0, 0, 100 }, // region 0 is hosted and + // entirely local on server 2 + new int[] { 1, 100, 0, 0 }, // region 1 is hosted and entirely on server 0 + new int[] { 1, 100, 0, 0 }, // region 2 is hosted and entirely on server 0 + new int[] { 2, 0, 100, 0 }, // region 3 is hosted and entirely on server 1 + }, + + // Test 3: each region is 25% local on the server that hosts it (and 50% locality is possible) + new int[][] { new int[] { 1, 2, 1 }, new int[] { 0, 25, 0, 50 }, // region 0 is hosted and + // entirely local on server 2 + new int[] { 1, 50, 25, 0 }, // region 1 is hosted and entirely on server 0 + new int[] { 1, 50, 25, 0 }, // region 2 is hosted and entirely on server 0 + new int[] { 2, 0, 50, 25 }, // region 3 is hosted and entirely on server 1 + }, + + // Test 4: each region is 25% local on the server that hosts it (and 100% locality is + // possible) + new int[][] { new int[] { 1, 2, 1 }, new int[] { 0, 25, 0, 100 }, // region 0 is hosted and + // entirely local on server + // 2 + new int[] { 1, 100, 25, 0 }, // region 1 is hosted and entirely on server 0 + new int[] { 1, 100, 25, 0 }, // region 2 is hosted and entirely on server 0 + new int[] { 2, 0, 100, 25 }, // region 3 is hosted and entirely on server 1 + }, + + // Test 5: each region is 75% local on the server that hosts it (and 75% locality is possible + // everywhere) + new int[][] { new int[] { 1, 2, 1 }, new int[] { 0, 75, 75, 75 }, // region 0 is hosted and + // entirely local on server + // 2 + new int[] { 1, 75, 75, 75 }, // region 1 is hosted and entirely on server 0 + new int[] { 1, 75, 75, 75 }, // region 2 is hosted and entirely on server 0 + new int[] { 2, 75, 75, 75 }, // region 3 is hosted and entirely on server 1 + }, }; private ServerMetrics mockServerMetricsWithCpRequests(List regionsOnServer, - long cpRequestCount) { + long cpRequestCount) { ServerMetrics serverMetrics = mock(ServerMetrics.class); Map regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (RegionInfo info : regionsOnServer) { @@ -225,20 +223,20 @@ public void testKeepRegionLoad() throws Exception { Queue loads = loadBalancer.loads.get(regionNameAsString); int i = 0; - while(loads.size() > 0) { + while (loads.size() > 0) { BalancerRegionLoad rl = loads.remove(); assertEquals(i + (numClusterStatusToAdd - 15), rl.getStorefileSizeMB()); - i ++; + i++; } } @Test - public void testUpdateBalancerLoadInfo(){ + public void testUpdateBalancerLoadInfo() { int[] cluster = new int[] { 10, 0 }; Map> servers = mockClusterServers(cluster); BalancerClusterState clusterState = mockCluster(cluster); Map>> LoadOfAllTable = - (Map) mockClusterServersWithTables(servers); + (Map) mockClusterServersWithTables(servers); try { boolean[] perTableBalancerConfigs = { true, false }; for (boolean isByTable : perTableBalancerConfigs) { @@ -247,20 +245,22 @@ public void testUpdateBalancerLoadInfo(){ dummyMetricsStochasticBalancer.clearDummyMetrics(); loadBalancer.updateBalancerLoadInfo(LoadOfAllTable); assertTrue("Metrics should be recorded!", - dummyMetricsStochasticBalancer.getDummyCostsMap() != null && !dummyMetricsStochasticBalancer.getDummyCostsMap().isEmpty()); + dummyMetricsStochasticBalancer.getDummyCostsMap() != null + && !dummyMetricsStochasticBalancer.getDummyCostsMap().isEmpty()); String metricRecordKey; if (isByTable) { metricRecordKey = "table1#" + StochasticLoadBalancer.OVERALL_COST_FUNCTION_NAME; } else { - metricRecordKey = HConstants.ENSEMBLE_TABLE_NAME + "#" + StochasticLoadBalancer.OVERALL_COST_FUNCTION_NAME; + metricRecordKey = HConstants.ENSEMBLE_TABLE_NAME + "#" + + StochasticLoadBalancer.OVERALL_COST_FUNCTION_NAME; } double curOverallCost = loadBalancer.computeCost(clusterState, Double.MAX_VALUE); double curOverallCostInMetrics = - dummyMetricsStochasticBalancer.getDummyCostsMap().get(metricRecordKey); + dummyMetricsStochasticBalancer.getDummyCostsMap().get(metricRecordKey); assertEquals(curOverallCost, curOverallCostInMetrics, 0.001); } - }finally { + } finally { conf.unset(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE); loadBalancer.onConfigurationChange(conf); } @@ -277,7 +277,8 @@ public void testUpdateStochasticCosts() { conf.setBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, false); loadBalancer.onConfigurationChange(conf); dummyMetricsStochasticBalancer.clearDummyMetrics(); - List plans = loadBalancer.balanceCluster((Map)mockClusterServersWithTables(servers)); + List plans = + loadBalancer.balanceCluster((Map) mockClusterServersWithTables(servers)); assertTrue("Balance plan should not be empty!", plans != null && !plans.isEmpty()); assertTrue("There should be metrics record in MetricsStochasticBalancer", @@ -288,7 +289,7 @@ public void testUpdateStochasticCosts() { HConstants.ENSEMBLE_TABLE_NAME + "#" + StochasticLoadBalancer.OVERALL_COST_FUNCTION_NAME); assertEquals(overallCostOfCluster, overallCostInMetrics, 0.001); } finally { - //reset config + // reset config conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", minCost); conf.unset(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE); loadBalancer.onConfigurationChange(conf); @@ -306,7 +307,8 @@ public void testUpdateStochasticCostsIfBalanceNotRan() { conf.setBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, false); loadBalancer.onConfigurationChange(conf); dummyMetricsStochasticBalancer.clearDummyMetrics(); - List plans = loadBalancer.balanceCluster( (Map) mockClusterServersWithTables(servers)); + List plans = + loadBalancer.balanceCluster((Map) mockClusterServersWithTables(servers)); assertTrue("Balance plan should be empty!", plans == null || plans.isEmpty()); assertTrue("There should be metrics record in MetricsStochasticBalancer!", @@ -317,7 +319,7 @@ public void testUpdateStochasticCostsIfBalanceNotRan() { HConstants.ENSEMBLE_TABLE_NAME + "#" + StochasticLoadBalancer.OVERALL_COST_FUNCTION_NAME); assertEquals(overallCostOfCluster, overallCostInMetrics, 0.001); } finally { - //reset config + // reset config conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", minCost); conf.unset(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE); loadBalancer.onConfigurationChange(conf); @@ -330,7 +332,7 @@ public void testNeedBalance() { conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 1.0f); try { // Test with/without per table balancer. - boolean[] perTableBalancerConfigs = {true, false}; + boolean[] perTableBalancerConfigs = { true, false }; for (boolean isByTable : perTableBalancerConfigs) { conf.setBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, isByTable); loadBalancer.onConfigurationChange(conf); @@ -354,8 +356,7 @@ public void testNeedBalance() { @Test public void testLocalityCost() throws Exception { Configuration conf = HBaseConfiguration.create(); - CostFunction - costFunction = new ServerLocalityCostFunction(conf); + CostFunction costFunction = new ServerLocalityCostFunction(conf); for (int test = 0; test < clusterRegionLocationMocks.length; test++) { int[][] clusterRegionLocations = clusterRegionLocationMocks[test]; @@ -371,21 +372,18 @@ public void testLocalityCost() throws Exception { public void testMoveCostMultiplier() throws Exception { Configuration conf = HBaseConfiguration.create(); ClusterInfoProvider provider = mock(ClusterInfoProvider.class); - CostFunction costFunction = - new MoveCostFunction(conf, provider); + CostFunction costFunction = new MoveCostFunction(conf, provider); when(provider.isOffPeakHour()).thenReturn(false); BalancerClusterState cluster = mockCluster(clusterStateMocks[0]); costFunction.prepare(cluster); costFunction.cost(); - assertEquals(MoveCostFunction.DEFAULT_MOVE_COST, - costFunction.getMultiplier(), 0.01); + assertEquals(MoveCostFunction.DEFAULT_MOVE_COST, costFunction.getMultiplier(), 0.01); // In offpeak hours, the multiplier of move cost should be lower when(provider.isOffPeakHour()).thenReturn(true); costFunction.prepare(cluster); costFunction.cost(); - assertEquals(MoveCostFunction.DEFAULT_MOVE_COST_OFFPEAK, - costFunction.getMultiplier(), 0.01); + assertEquals(MoveCostFunction.DEFAULT_MOVE_COST_OFFPEAK, costFunction.getMultiplier(), 0.01); } @Test @@ -410,7 +408,6 @@ public void testMoveCost() throws Exception { cost = costFunction.cost(); assertEquals(1.0f, cost, 0.001); - // cluster region number is bigger than maxMoves=2500 cluster.setNumRegions(10000); cluster.setNumMovedRegions(250); @@ -428,8 +425,7 @@ public void testMoveCost() throws Exception { @Test public void testSkewCost() { Configuration conf = HBaseConfiguration.create(); - CostFunction - costFunction = new RegionCountSkewCostFunction(conf); + CostFunction costFunction = new RegionCountSkewCostFunction(conf); for (int[] mockCluster : clusterStateMocks) { costFunction.prepare(mockCluster(mockCluster)); double cost = costFunction.cost(); @@ -437,17 +433,17 @@ public void testSkewCost() { assertTrue(cost <= 1.01); } - costFunction.prepare(mockCluster(new int[]{0, 0, 0, 0, 1})); - assertEquals(0,costFunction.cost(), 0.01); - costFunction.prepare(mockCluster(new int[]{0, 0, 0, 1, 1})); + costFunction.prepare(mockCluster(new int[] { 0, 0, 0, 0, 1 })); assertEquals(0, costFunction.cost(), 0.01); - costFunction.prepare(mockCluster(new int[]{0, 0, 1, 1, 1})); + costFunction.prepare(mockCluster(new int[] { 0, 0, 0, 1, 1 })); assertEquals(0, costFunction.cost(), 0.01); - costFunction.prepare(mockCluster(new int[]{0, 1, 1, 1, 1})); + costFunction.prepare(mockCluster(new int[] { 0, 0, 1, 1, 1 })); assertEquals(0, costFunction.cost(), 0.01); - costFunction.prepare(mockCluster(new int[]{1, 1, 1, 1, 1})); + costFunction.prepare(mockCluster(new int[] { 0, 1, 1, 1, 1 })); assertEquals(0, costFunction.cost(), 0.01); - costFunction.prepare(mockCluster(new int[]{10000, 0, 0, 0, 0})); + costFunction.prepare(mockCluster(new int[] { 1, 1, 1, 1, 1 })); + assertEquals(0, costFunction.cost(), 0.01); + costFunction.prepare(mockCluster(new int[] { 10000, 0, 0, 0, 0 })); assertEquals(1, costFunction.cost(), 0.01); } @@ -475,8 +471,7 @@ public void testCostAfterUndoAction() { @Test public void testTableSkewCost() { Configuration conf = HBaseConfiguration.create(); - CostFunction - costFunction = new TableSkewCostFunction(conf); + CostFunction costFunction = new TableSkewCostFunction(conf); for (int[] mockCluster : clusterStateMocks) { BalancerClusterState cluster = mockCluster(mockCluster); costFunction.prepare(cluster); @@ -498,20 +493,17 @@ public void testRegionLoadCost() { } Configuration conf = HBaseConfiguration.create(); - ReadRequestCostFunction readCostFunction = - new ReadRequestCostFunction(conf); + ReadRequestCostFunction readCostFunction = new ReadRequestCostFunction(conf); double rateResult = readCostFunction.getRegionLoadCost(regionLoads); // read requests are treated as a rate so the average rate here is simply 1 assertEquals(1, rateResult, 0.01); - CPRequestCostFunction cpCostFunction = - new CPRequestCostFunction(conf); + CPRequestCostFunction cpCostFunction = new CPRequestCostFunction(conf); rateResult = cpCostFunction.getRegionLoadCost(regionLoads); // coprocessor requests are treated as a rate so the average rate here is simply 1 assertEquals(1, rateResult, 0.01); - StoreFileCostFunction storeFileCostFunction = - new StoreFileCostFunction(conf); + StoreFileCostFunction storeFileCostFunction = new StoreFileCostFunction(conf); double result = storeFileCostFunction.getRegionLoadCost(regionLoads); // storefile size cost is simply an average of it's value over time assertEquals(2.5, result, 0.01); @@ -524,20 +516,18 @@ public void testRegionLoadCostWhenDecrease() { for (int i = 1; i < 5; i++) { int load = i == 3 ? 1 : i; BalancerRegionLoad regionLoad = mock(BalancerRegionLoad.class); - when(regionLoad.getReadRequestsCount()).thenReturn((long)load); - when(regionLoad.getCpRequestsCount()).thenReturn((long)load); + when(regionLoad.getReadRequestsCount()).thenReturn((long) load); + when(regionLoad.getCpRequestsCount()).thenReturn((long) load); regionLoads.add(regionLoad); } Configuration conf = HBaseConfiguration.create(); - ReadRequestCostFunction readCostFunction = - new ReadRequestCostFunction(conf); + ReadRequestCostFunction readCostFunction = new ReadRequestCostFunction(conf); double rateResult = readCostFunction.getRegionLoadCost(regionLoads); // read requests are treated as a rate so the average rate here is simply 1 assertEquals(1.67, rateResult, 0.01); - CPRequestCostFunction cpCostFunction = - new CPRequestCostFunction(conf); + CPRequestCostFunction cpCostFunction = new CPRequestCostFunction(conf); rateResult = cpCostFunction.getRegionLoadCost(regionLoads); // coprocessor requests are treated as a rate so the average rate here is simply 1 assertEquals(1.67, rateResult, 0.01); @@ -547,7 +537,7 @@ public void testRegionLoadCostWhenDecrease() { public void testLosingRs() throws Exception { int numNodes = 3; int numRegions = 20; - int numRegionsPerServer = 3; //all servers except one + int numRegionsPerServer = 3; // all servers except one int replication = 1; int numTables = 2; @@ -555,7 +545,6 @@ public void testLosingRs() throws Exception { createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); List list = convertToList(serverMap); - List plans = loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap); assertNotNull(plans); @@ -581,9 +570,8 @@ public void testAdditionalCostFunction() { DummyCostFunction.class.getName()); loadBalancer.onConfigurationChange(conf); - assertTrue(Arrays. - asList(loadBalancer.getCostFunctionNames()). - contains(DummyCostFunction.class.getSimpleName())); + assertTrue(Arrays.asList(loadBalancer.getCostFunctionNames()) + .contains(DummyCostFunction.class.getSimpleName())); } finally { conf.unset(StochasticLoadBalancer.COST_FUNCTIONS_COST_FUNCTIONS_KEY); loadBalancer.onConfigurationChange(conf); @@ -592,20 +580,15 @@ public void testAdditionalCostFunction() { @Test public void testDefaultCostFunctionList() { - List expected = Arrays.asList( - RegionCountSkewCostFunction.class.getSimpleName(), + List expected = Arrays.asList(RegionCountSkewCostFunction.class.getSimpleName(), PrimaryRegionCountSkewCostFunction.class.getSimpleName(), - MoveCostFunction.class.getSimpleName(), - RackLocalityCostFunction.class.getSimpleName(), + MoveCostFunction.class.getSimpleName(), RackLocalityCostFunction.class.getSimpleName(), TableSkewCostFunction.class.getSimpleName(), RegionReplicaHostCostFunction.class.getSimpleName(), RegionReplicaRackCostFunction.class.getSimpleName(), - ReadRequestCostFunction.class.getSimpleName(), - CPRequestCostFunction.class.getSimpleName(), + ReadRequestCostFunction.class.getSimpleName(), CPRequestCostFunction.class.getSimpleName(), WriteRequestCostFunction.class.getSimpleName(), - MemStoreSizeCostFunction.class.getSimpleName(), - StoreFileCostFunction.class.getSimpleName() - ); + MemStoreSizeCostFunction.class.getSimpleName(), StoreFileCostFunction.class.getSimpleName()); List actual = Arrays.asList(loadBalancer.getCostFunctionNames()); assertTrue("ExpectedCostFunctions: " + expected + " ActualCostFunctions: " + actual, @@ -613,14 +596,14 @@ public void testDefaultCostFunctionList() { } private boolean needsBalanceIdleRegion(int[] cluster) { - return Arrays.stream(cluster).anyMatch(x -> x > 1) && - Arrays.stream(cluster).anyMatch(x -> x < 1); + return Arrays.stream(cluster).anyMatch(x -> x > 1) + && Arrays.stream(cluster).anyMatch(x -> x < 1); } // This mock allows us to test the LocalityCostFunction private class MockCluster extends BalancerClusterState { - private int[][] localities = null; // [region][server] = percent of blocks + private int[][] localities = null; // [region][server] = percent of blocks public MockCluster(int[][] regions) { @@ -635,7 +618,7 @@ public MockCluster(int[][] regions) { for (int j = 1; j < regions[i].length; j++) { int serverIndex = j - 1; localities[regionIndex][serverIndex] = - regions[i][j] > 100 ? regions[i][j] % 100 : regions[i][j]; + regions[i][j] > 100 ? regions[i][j] % 100 : regions[i][j]; } } } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java index 5269fe71d7f0..7d1b911b2a98 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerBalanceCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java index 5a0dc06e4707..d94a7cc9843b 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCost.java @@ -56,10 +56,10 @@ public class TestStochasticLoadBalancerHeterogeneousCost extends StochasticBalan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerHeterogeneousCost.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerHeterogeneousCost.class); private static final Logger LOG = - LoggerFactory.getLogger(TestStochasticLoadBalancerHeterogeneousCost.class); + LoggerFactory.getLogger(TestStochasticLoadBalancerHeterogeneousCost.class); private static final double ALLOWED_WINDOW = 1.20; private static final HBaseCommonTestingUtil HTU = new HBaseCommonTestingUtil(); private static String RULES_FILE; @@ -150,27 +150,27 @@ public void testOverloaded() throws IOException { createRulesFile(RULES_FILE); final Map> serverMap = - this.createServerMap(numNodes, numRegions, numRegionsPerServer, 1, 1); + this.createServerMap(numNodes, numRegions, numRegionsPerServer, 1, 1); final List plans = - loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap); + loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap); // As we disabled all the other cost functions, balancing only according to // the heterogeneous cost function should return nothing. assertNull(plans); } private void testHeterogeneousWithCluster(final int numNodes, final int numRegions, - final int numRegionsPerServer, final List rules) throws IOException { + final int numRegionsPerServer, final List rules) throws IOException { createRulesFile(RULES_FILE, rules); final Map> serverMap = - this.createServerMap(numNodes, numRegions, numRegionsPerServer, 1, 1); + this.createServerMap(numNodes, numRegions, numRegionsPerServer, 1, 1); this.testWithCluster(serverMap, null, true, false); } @Override protected void testWithCluster(final Map> serverMap, - final RackManager rackManager, final boolean assertFullyBalanced, - final boolean assertFullyBalancedForReplicas) { + final RackManager rackManager, final boolean assertFullyBalanced, + final boolean assertFullyBalancedForReplicas) { final List list = this.convertToList(serverMap); LOG.info("Mock Cluster : " + this.printMock(list) + " " + this.printStats(list)); @@ -178,7 +178,7 @@ protected void testWithCluster(final Map> serverMap // Run the balancer. final List plans = - loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap); + loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap); assertNotNull(plans); // Check to see that this actually got to a stable place. @@ -191,13 +191,13 @@ protected void testWithCluster(final Map> serverMap if (assertFullyBalanced) { final List secondPlans = - loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap); + loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap); assertNull(secondPlans); // create external cost function to retrieve limit // for each RS final HeterogeneousRegionCountCostFunction cf = - new HeterogeneousRegionCountCostFunction(conf); + new HeterogeneousRegionCountCostFunction(conf); assertNotNull(cf); BalancerClusterState cluster = new BalancerClusterState(serverMap, null, null, null); cf.prepare(cluster); @@ -214,9 +214,10 @@ protected void testWithCluster(final Map> serverMap // as the balancer is stochastic, we cannot check exactly the result of the balancing, // hence the allowedWindow parameter - assertTrue("Host " + sn.getHostname() + " should be below " + - cf.overallUsage * ALLOWED_WINDOW * 100 + "%; " + cf.overallUsage + ", " + usage + ", " + - numberRegions + ", " + limit, usage <= cf.overallUsage * ALLOWED_WINDOW); + assertTrue("Host " + sn.getHostname() + " should be below " + + cf.overallUsage * ALLOWED_WINDOW * 100 + "%; " + cf.overallUsage + ", " + usage + + ", " + numberRegions + ", " + limit, + usage <= cf.overallUsage * ALLOWED_WINDOW); } } @@ -228,7 +229,7 @@ protected void testWithCluster(final Map> serverMap @Override protected Map> createServerMap(int numNodes, int numRegions, - int numRegionsPerServer, int replication, int numTables) { + int numRegionsPerServer, int replication, int numTables) { // construct a cluster of numNodes, having a total of numRegions. Each RS will hold // numRegionsPerServer many regions except for the last one, which will host all the // remaining regions @@ -255,7 +256,7 @@ protected Map> createServerMap(int numNodes, int nu @Override protected TreeMap> mockClusterServers(int[] mockCluster, - int numTables) { + int numTables) { int numServers = mockCluster.length; TreeMap> servers = new TreeMap<>(); for (int i = 0; i < numServers; i++) { @@ -285,7 +286,7 @@ static class FairRandomCandidateGenerator extends RandomCandidateGenerator { @Override public BalanceAction pickRandomRegions(BalancerClusterState cluster, int thisServer, - int otherServer) { + int otherServer) { if (thisServer < 0 || otherServer < 0) { return BalanceAction.NULL_ACTION; } @@ -304,7 +305,7 @@ BalanceAction generate(BalancerClusterState cluster) { static class StochasticLoadTestBalancer extends StochasticLoadBalancer { private FairRandomCandidateGenerator fairRandomCandidateGenerator = - new FairRandomCandidateGenerator(); + new FairRandomCandidateGenerator(); @Override protected CandidateGenerator getRandomGenerator() { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRules.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRules.java index 11e3f650ea41..e8ebd2538ee7 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRules.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRules.java @@ -64,10 +64,8 @@ public static void beforeClass() throws IOException { @Before public void before() throws IOException { // New rules file name per test. - this.rulesFilename = HTU - .getDataTestDir( - this.name.getMethodName() + "." + DEFAULT_RULES_FILE_NAME) - .toString(); + this.rulesFilename = + HTU.getDataTestDir(this.name.getMethodName() + "." + DEFAULT_RULES_FILE_NAME).toString(); // Set the created rules filename into the configuration. HTU.getConfiguration().set( HeterogeneousRegionCountCostFunction.HBASE_MASTER_BALANCER_HETEROGENEOUS_RULES_FILE, @@ -99,8 +97,7 @@ public void testBadFormatInRules() throws IOException { this.costFunction.loadRules(); assertEquals(0, this.costFunction.getNumberOfRulesLoaded()); - createRulesFile(this.rulesFilename, Arrays.asList("srv[1-2] 10", - "bad_rules format", "a")); + createRulesFile(this.rulesFilename, Arrays.asList("srv[1-2] 10", "bad_rules format", "a")); this.costFunction = new HeterogeneousRegionCountCostFunction(HTU.getConfiguration()); this.costFunction.loadRules(); assertEquals(1, this.costFunction.getNumberOfRulesLoaded()); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java index ba2da0a860a0..c56e19ff7240 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerLargeCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerMidCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerMidCluster.java index 3c0ec03b03c3..5be321ddb68a 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerMidCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerMidCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java index 58eed9e63796..0b3984310922 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplica.java @@ -51,8 +51,7 @@ public class TestStochasticLoadBalancerRegionReplica extends StochasticBalancerT @Test public void testReplicaCost() { Configuration conf = HBaseConfiguration.create(); - CostFunction costFunction = - new RegionReplicaHostCostFunction(conf); + CostFunction costFunction = new RegionReplicaHostCostFunction(conf); for (int[] mockCluster : clusterStateMocks) { BalancerClusterState cluster = mockCluster(mockCluster); costFunction.prepare(cluster); @@ -65,8 +64,7 @@ public void testReplicaCost() { @Test public void testReplicaCostForReplicas() { Configuration conf = HBaseConfiguration.create(); - CostFunction costFunction = - new RegionReplicaHostCostFunction(conf); + CostFunction costFunction = new RegionReplicaHostCostFunction(conf); int[] servers = new int[] { 3, 3, 3, 3, 3 }; TreeMap> clusterState = mockClusterServers(servers); @@ -163,7 +161,7 @@ public void testNeedsBalanceForColocatedReplicas() { // add another server so that the cluster has some host on another rack map.put(ServerName.valueOf("host2", 1000, 11111), randomRegions(1)); assertFalse(loadBalancer.needsBalance(HConstants.ENSEMBLE_TABLE_NAME, - new BalancerClusterState(map, null, null, new ForTestRackManagerOne()))); + new BalancerClusterState(map, null, null, new ForTestRackManagerOne()))); } @Test diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaHighReplication.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaHighReplication.java index a58b8e162968..1e020212c5db 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaHighReplication.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaHighReplication.java @@ -26,11 +26,11 @@ @Category({ MasterTests.class, MediumTests.class }) public class TestStochasticLoadBalancerRegionReplicaHighReplication - extends StochasticBalancerTestBase2 { + extends StochasticBalancerTestBase2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplicaHighReplication.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplicaHighReplication.class); @Test public void testRegionReplicasOnMidClusterHighReplication() { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaLargeCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaLargeCluster.java index 278e9f2e6138..80a991f977c4 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaLargeCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaLargeCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,11 +26,11 @@ @Category({ MasterTests.class, LargeTests.class }) public class TestStochasticLoadBalancerRegionReplicaLargeCluster - extends StochasticBalancerTestBase2 { + extends StochasticBalancerTestBase2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplicaLargeCluster.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplicaLargeCluster.class); @Test public void testRegionReplicasOnLargeCluster() { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaMidCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaMidCluster.java index 247baefdd611..5c596f39bafb 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaMidCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaMidCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ public class TestStochasticLoadBalancerRegionReplicaMidCluster extends Stochasti @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplicaMidCluster.class); + HBaseClassTestRule.forClass(TestStochasticLoadBalancerRegionReplicaMidCluster.class); @Test public void testRegionReplicasOnMidCluster() { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes.java index 098b3d901935..b806f735ebca 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaReplicationGreaterThanNumNodes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java index eade1bf0bee3..48e5f437b7ff 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerRegionReplicaWithRacks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -88,7 +88,7 @@ public void testRegionReplicationOnLargeClusterWithRacks() { int numTables = 1; int numRacks = 4; // all replicas should be on a different rack Map> serverMap = - createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); + createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); RackManager rm = new ForTestRackManager(numRacks); testWithClusterWithIteration(serverMap, rm, true, true); diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java index 479e194a6d6c..1a641028393a 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerSmallCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-build-configuration/pom.xml b/hbase-build-configuration/pom.xml index ffae7a9a1d78..154ceca9ed8b 100644 --- a/hbase-build-configuration/pom.xml +++ b/hbase-build-configuration/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase org.apache.hbase + hbase 3.0.0-alpha-3-SNAPSHOT .. hbase-build-configuration - Apache HBase - Build Configuration - Configure the build-support artifacts for maven build pom + Apache HBase - Build Configuration + Configure the build-support artifacts for maven build + + + org.apache.hbase + hbase-annotations + test-jar + test + + + org.apache.yetus + audience-annotations + + @@ -50,18 +62,6 @@ - - - org.apache.hbase - hbase-annotations - test-jar - test - - - org.apache.yetus - audience-annotations - - errorProne diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml index fbf7d97b2146..1da90e79baf5 100644 --- a/hbase-checkstyle/pom.xml +++ b/hbase-checkstyle/pom.xml @@ -1,7 +1,5 @@ - + -4.0.0 -hbase-checkstyle -3.0.0-alpha-3-SNAPSHOT -Apache HBase - Checkstyle -Module to hold Checkstyle properties for HBase. - + 4.0.0 + - hbase org.apache.hbase + hbase 3.0.0-alpha-3-SNAPSHOT .. + hbase-checkstyle + 3.0.0-alpha-3-SNAPSHOT + Apache HBase - Checkstyle + Module to hold Checkstyle properties for HBase. - - + + - - org.apache.maven.plugins - maven-site-plugin - - true - - - - - maven-assembly-plugin - - true - - - - + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + maven-assembly-plugin + + true + + + + diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index 885085c5ff8c..d766eec915fe 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,28 +30,6 @@ hbase-client Apache HBase - Client Client of HBase - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - @@ -209,6 +186,28 @@ + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + @@ -228,7 +227,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -316,8 +317,7 @@ lifecycle-mapping - - + diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java index b137a7da2ceb..23258f0faf67 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,8 +22,8 @@ /** * Interface to support the aborting of a given server or client. *

    - * This is used primarily for ZooKeeper usage when we could get an unexpected - * and fatal exception, requiring an abort. + * This is used primarily for ZooKeeper usage when we could get an unexpected and fatal exception, + * requiring an abort. *

    * Implemented by the Master, RegionServer, and TableServers (client). */ @@ -38,8 +37,7 @@ public interface Abortable { void abort(String why, Throwable e); /** - * It just call another abort method and the Throwable - * parameter is null. + * It just call another abort method and the Throwable parameter is null. * @param why Why we're aborting. * @see Abortable#abort(String, Throwable) */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java index 91cedd60299d..eb56dee1b12d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import java.util.Collections; import java.util.Map; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -57,8 +55,7 @@ public int getExceptionCount() { private String getFailedRegions() { return exceptions.keySet().stream() .map(regionName -> RegionInfo.prettyPrint(RegionInfo.encodeRegionName(regionName))) - .collect(Collectors.toList()) - .toString(); + .collect(Collectors.toList()).toString(); } @InterfaceAudience.Private @@ -68,11 +65,8 @@ public static CacheEvictionStatsBuilder builder() { @Override public String toString() { - return "CacheEvictionStats{" + - "evictedBlocks=" + evictedBlocks + - ", maxCacheSize=" + maxCacheSize + - ", failedRegionsSize=" + getExceptionCount() + - ", failedRegions=" + getFailedRegions() + - '}'; + return "CacheEvictionStats{" + "evictedBlocks=" + evictedBlocks + ", maxCacheSize=" + + maxCacheSize + ", failedRegionsSize=" + getExceptionCount() + ", failedRegions=" + + getFailedRegions() + '}'; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java index 85d68dcc08bc..679823338310 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java index d9e1400da16b..4b31d98611bc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.util.HashMap; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -42,7 +40,7 @@ public CacheEvictionStatsBuilder withMaxCacheSize(long maxCacheSize) { return this; } - public void addException(byte[] regionName, Throwable ie){ + public void addException(byte[] regionName, Throwable ie) { exceptions.put(regionName, ie); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java index 13ab3ed47cee..e1d3e4c79396 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Returned to the clients when their request was discarded due to server being overloaded. - * Clients should retry upon receiving it. + * Returned to the clients when their request was discarded due to server being overloaded. Clients + * should retry upon receiving it. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java index 12fa242693c8..1a3bc081ed7a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; @SuppressWarnings("serial") diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java index 3cf6cc035238..6b31c43935d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -85,7 +85,7 @@ public class CatalogFamilyFormat { /** A regex for parsing server columns from meta. See above javadoc for meta layout */ private static final Pattern SERVER_COLUMN_PATTERN = - Pattern.compile("^server(_[0-9a-fA-F]{4})?$"); + Pattern.compile("^server(_[0-9a-fA-F]{4})?$"); /** * Returns an HRI parsed from this regionName. Not all the fields of the HRI is stored in the @@ -99,7 +99,7 @@ public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws long regionId = Long.parseLong(Bytes.toString(fields[2])); int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0; return RegionInfoBuilder.newBuilder(TableName.valueOf(fields[0])).setStartKey(fields[1]) - .setRegionId(regionId).setReplicaId(replicaId).build(); + .setRegionId(regionId).setReplicaId(replicaId).build(); } /** @@ -138,7 +138,7 @@ public static RegionInfo getRegionInfo(Result data) { * @return HRegionLocation parsed from the given meta row Result for the given replicaId */ public static HRegionLocation getRegionLocation(final Result r, final RegionInfo regionInfo, - final int replicaId) { + final int replicaId) { ServerName serverName = getServerName(r, replicaId); long seqNum = getSeqNumDuringOpen(r, replicaId); RegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId); @@ -211,7 +211,7 @@ public static ServerName getServerName(Result r, int replicaId) { return null; } String hostAndPort = - Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); byte[] startcodeColumn = getStartCodeColumn(replicaId); cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, startcodeColumn); if (cell == null || cell.getValueLength() == 0) { @@ -232,9 +232,9 @@ public static ServerName getServerName(Result r, int replicaId) { * @return a byte[] for server column qualifier */ public static byte[] getServerColumn(int replicaId) { - return replicaId == 0 ? HConstants.SERVER_QUALIFIER : - Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.SERVER_QUALIFIER + : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -243,9 +243,9 @@ public static byte[] getServerColumn(int replicaId) { * @return a byte[] for server start code column qualifier */ public static byte[] getStartCodeColumn(int replicaId) { - return replicaId == 0 ? HConstants.STARTCODE_QUALIFIER : - Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.STARTCODE_QUALIFIER + : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -268,9 +268,9 @@ private static long getSeqNumDuringOpen(final Result r, final int replicaId) { * @return a byte[] for seqNum column qualifier */ public static byte[] getSeqNumColumn(int replicaId) { - return replicaId == 0 ? HConstants.SEQNUM_QUALIFIER : - Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.SEQNUM_QUALIFIER + : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** The delimiter for meta columns for replicaIds > 0 */ @@ -308,9 +308,9 @@ public static byte[] getMetaKeyForRegion(RegionInfo regionInfo) { * @return a byte[] for state qualifier */ public static byte[] getRegionStateColumn(int replicaId) { - return replicaId == 0 ? HConstants.STATE_QUALIFIER : - Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.STATE_QUALIFIER + : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -319,9 +319,9 @@ public static byte[] getRegionStateColumn(int replicaId) { * @return a byte[] for sn column qualifier */ public static byte[] getServerNameColumn(int replicaId) { - return replicaId == 0 ? HConstants.SERVERNAME_QUALIFIER : - Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.SERVERNAME_QUALIFIER + : Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -398,7 +398,7 @@ public static boolean hasMergeRegions(Cell[] cells) { */ public static boolean isMergeQualifierPrefix(Cell cell) { // Check to see if has family and that qualifier starts with the merge qualifier 'merge' - return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) && - PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX); + return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) + && PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java index ecc65733c12b..6315307e7c0b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,12 +73,12 @@ byte[][] getFamilies() { } public static CompletableFuture tableExists(AsyncTable metaTable, - TableName tableName) { + TableName tableName) { return getTableState(metaTable, tableName).thenApply(Optional::isPresent); } public static CompletableFuture> getTableState(AsyncTable metaTable, - TableName tableName) { + TableName tableName) { CompletableFuture> future = new CompletableFuture<>(); Get get = new Get(tableName.getName()).addColumn(HConstants.TABLE_FAMILY, HConstants.TABLE_STATE_QUALIFIER); @@ -103,18 +103,19 @@ public static CompletableFuture> getTableState(AsyncTable> - getRegionLocation(AsyncTable metaTable, byte[] regionName) { + getRegionLocation(AsyncTable metaTable, byte[] regionName) { CompletableFuture> future = new CompletableFuture<>(); try { RegionInfo parsedRegionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName(regionName); addListener(metaTable.get(new Get(CatalogFamilyFormat.getMetaKeyForRegion(parsedRegionInfo)) - .addFamily(HConstants.CATALOG_FAMILY)), (r, err) -> { + .addFamily(HConstants.CATALOG_FAMILY)), + (r, err) -> { if (err != null) { future.completeExceptionally(err); return; } future.complete(getRegionLocations(r) - .map(locations -> locations.getRegionLocation(parsedRegionInfo.getReplicaId()))); + .map(locations -> locations.getRegionLocation(parsedRegionInfo.getReplicaId()))); }); } catch (IOException parseEx) { LOG.warn("Failed to parse the passed region name: " + Bytes.toStringBinary(regionName)); @@ -130,11 +131,11 @@ public static CompletableFuture> getTableState(AsyncTable> - getRegionLocationWithEncodedName(AsyncTable metaTable, byte[] encodedRegionName) { + getRegionLocationWithEncodedName(AsyncTable metaTable, byte[] encodedRegionName) { CompletableFuture> future = new CompletableFuture<>(); addListener( metaTable - .scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY)), + .scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY)), (results, err) -> { if (err != null) { future.completeExceptionally(err); @@ -142,17 +143,17 @@ public static CompletableFuture> getTableState(AsyncTable !result.isEmpty()) - .filter(result -> CatalogFamilyFormat.getRegionInfo(result) != null).forEach(result -> { - getRegionLocations(result).ifPresent(locations -> { - for (HRegionLocation location : locations.getRegionLocations()) { - if (location != null && - encodedRegionNameStr.equals(location.getRegion().getEncodedName())) { - future.complete(Optional.of(location)); - return; + .filter(result -> CatalogFamilyFormat.getRegionInfo(result) != null).forEach(result -> { + getRegionLocations(result).ifPresent(locations -> { + for (HRegionLocation location : locations.getRegionLocations()) { + if (location != null + && encodedRegionNameStr.equals(location.getRegion().getEncodedName())) { + future.complete(Optional.of(location)); + return; + } } - } + }); }); - }); future.complete(Optional.empty()); }); return future; @@ -170,7 +171,7 @@ private static Optional getTableState(Result r) throws IOException { * {@link CompletableFuture}. */ public static CompletableFuture> getTableHRegionLocations( - AsyncTable metaTable, TableName tableName) { + AsyncTable metaTable, TableName tableName) { CompletableFuture> future = new CompletableFuture<>(); addListener(getTableRegionsAndLocations(metaTable, tableName, true), (locations, err) -> { if (err != null) { @@ -179,8 +180,8 @@ public static CompletableFuture> getTableHRegionLocations( future.complete(Collections.emptyList()); } else { List regionLocations = - locations.stream().map(loc -> new HRegionLocation(loc.getFirst(), loc.getSecond())) - .collect(Collectors.toList()); + locations.stream().map(loc -> new HRegionLocation(loc.getFirst(), loc.getSecond())) + .collect(Collectors.toList()); future.complete(regionLocations); } }); @@ -196,17 +197,17 @@ public static CompletableFuture> getTableHRegionLocations( * {@link CompletableFuture}. */ private static CompletableFuture>> getTableRegionsAndLocations( - final AsyncTable metaTable, final TableName tableName, - final boolean excludeOfflinedSplitParents) { + final AsyncTable metaTable, final TableName tableName, + final boolean excludeOfflinedSplitParents) { CompletableFuture>> future = new CompletableFuture<>(); if (TableName.META_TABLE_NAME.equals(tableName)) { future.completeExceptionally(new IOException( - "This method can't be used to locate meta regions;" + " use MetaTableLocator instead")); + "This method can't be used to locate meta regions;" + " use MetaTableLocator instead")); } // Make a version of CollectingVisitor that collects RegionInfo and ServerAddress CollectRegionLocationsVisitor visitor = - new CollectRegionLocationsVisitor(excludeOfflinedSplitParents); + new CollectRegionLocationsVisitor(excludeOfflinedSplitParents); addListener(scanMeta(metaTable, tableName, QueryType.REGION, visitor), (v, error) -> { if (error != null) { @@ -226,7 +227,7 @@ private static CompletableFuture>> getTableReg * @param visitor Visitor invoked against each row */ private static CompletableFuture scanMeta(AsyncTable metaTable, - TableName tableName, QueryType type, final Visitor visitor) { + TableName tableName, QueryType type, final Visitor visitor) { return scanMeta(metaTable, getTableStartRowForMeta(tableName, type), getTableStopRowForMeta(tableName, type), type, Integer.MAX_VALUE, visitor); } @@ -241,7 +242,7 @@ private static CompletableFuture scanMeta(AsyncTable scanMeta(AsyncTable metaTable, - byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) { + byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) { int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE; Scan scan = getMetaScan(metaTable, rowUpperLimit); for (byte[] family : type.getFamilies()) { @@ -255,9 +256,9 @@ private static CompletableFuture scanMeta(AsyncTable getResults() { } static class CollectRegionLocationsVisitor - extends CollectingVisitor> { + extends CollectingVisitor> { private final boolean excludeOfflinedSplitParents; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java index a63ca6936ec1..1afcb30ece01 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +18,10 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * This exception is thrown by the master when a region server clock skew is - * too high. + * This exception is thrown by the master when a region server clock skew is too high. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java index 1dd01faf808a..dafdf6e5d5ab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java @@ -15,29 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; import java.util.UUID; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterIdProtos; -import org.apache.hadoop.hbase.util.Bytes; /** - * The identifier for this cluster. - * It is serialized to the filesystem and up into zookeeper. This is a container for the id. - * Also knows how to serialize and deserialize the cluster id. + * The identifier for this cluster. It is serialized to the filesystem and up into zookeeper. This + * is a container for the id. Also knows how to serialize and deserialize the cluster id. */ @InterfaceAudience.Private public class ClusterId { private final String id; /** - * New ClusterID. Generates a uniqueid. + * New ClusterID. Generates a uniqueid. */ public ClusterId() { this(UUID.randomUUID().toString()); @@ -50,7 +48,7 @@ public ClusterId(final String uuid) { /** * @return The clusterid serialized using pb w/ pb magic prefix */ - public byte [] toByteArray() { + public byte[] toByteArray() { return ProtobufUtil.prependPBMagic(convert().toByteArray()); } @@ -60,7 +58,7 @@ public ClusterId(final String uuid) { * @throws DeserializationException * @see #toByteArray() */ - public static ClusterId parseFrom(final byte [] bytes) throws DeserializationException { + public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes)) { int pblen = ProtobufUtil.lengthOfPBMagic(); ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java index 29679e6fb6f4..98783be61269 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -39,28 +37,32 @@ *

  • The average cluster load.
  • *
  • The number of regions deployed on the cluster.
  • *
  • The number of requests since last report.
  • - *
  • Detailed region server loading and resource usage information, - * per server and per region.
  • + *
  • Detailed region server loading and resource usage information, per server and per + * region.
  • *
  • Regions in transition at master
  • *
  • The unique cluster ID
  • * - * {@link Option} provides a way to get desired ClusterStatus information. - * The following codes will get all the cluster information. + * {@link Option} provides a way to get desired ClusterStatus information. The following + * codes will get all the cluster information. + * *
    - * {@code
    - * // Original version still works
    - * Admin admin = connection.getAdmin();
    - * ClusterMetrics metrics = admin.getClusterStatus();
    - * // or below, a new version which has the same effects
    - * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.allOf(Option.class));
    + * {
    + *   @code
    + *   // Original version still works
    + *   Admin admin = connection.getAdmin();
    + *   ClusterMetrics metrics = admin.getClusterStatus();
    + *   // or below, a new version which has the same effects
    + *   ClusterMetrics metrics = admin.getClusterStatus(EnumSet.allOf(Option.class));
      * }
      * 
    - * If information about live servers is the only wanted. - * then codes in the following way: + * + * If information about live servers is the only wanted. then codes in the following way: + * *
    - * {@code
    - * Admin admin = connection.getAdmin();
    - * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
    + * {
    + *   @code
    + *   Admin admin = connection.getAdmin();
    + *   ClusterMetrics metrics = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
      * }
      * 
    */ @@ -129,8 +131,7 @@ default long getLastMajorCompactionTimestamp(TableName table) { default long getLastMajorCompactionTimestamp(byte[] regionName) { return getLiveServerMetrics().values().stream() - .filter(s -> s.getRegionMetrics().containsKey(regionName)) - .findAny() + .filter(s -> s.getRegionMetrics().containsKey(regionName)).findAny() .map(s -> s.getRegionMetrics().get(regionName).getLastMajorCompactionTimestamp()) .orElse(0L); } @@ -150,13 +151,12 @@ default double getAverageLoad() { if (serverSize == 0) { return 0; } - return (double)getRegionCount() / (double)serverSize; + return (double) getRegionCount() / (double) serverSize; } /** - * Provide region states count for given table. - * e.g howmany regions of give table are opened/closed/rit etc - * + * Provide region states count for given table. e.g howmany regions of give table are + * opened/closed/rit etc * @return map of table to region states count */ Map getTableRegionStatesCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java index 011f93f9fe90..308e9dceefae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -26,13 +24,13 @@ import java.util.Map; import java.util.TreeMap; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.master.RegionState; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.Option; @@ -43,49 +41,45 @@ public final class ClusterMetricsBuilder { public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics metrics) { - ClusterStatusProtos.ClusterStatus.Builder builder - = ClusterStatusProtos.ClusterStatus.newBuilder() - .addAllBackupMasters(metrics.getBackupMasterNames().stream() - .map(ProtobufUtil::toServerName).collect(Collectors.toList())) - .addAllDeadServers(metrics.getDeadServerNames().stream() - .map(ProtobufUtil::toServerName).collect(Collectors.toList())) - .addAllLiveServers(metrics.getLiveServerMetrics().entrySet().stream() - .map(s -> ClusterStatusProtos.LiveServerInfo - .newBuilder() - .setServer(ProtobufUtil.toServerName(s.getKey())) - .setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue())) - .build()) - .collect(Collectors.toList())) - .addAllMasterCoprocessors(metrics.getMasterCoprocessorNames().stream() - .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) - .collect(Collectors.toList())) - .addAllRegionsInTransition(metrics.getRegionStatesInTransition().stream() - .map(r -> ClusterStatusProtos.RegionInTransition - .newBuilder() - .setSpec(HBaseProtos.RegionSpecifier - .newBuilder() - .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) - .setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName())) - .build()) - .setRegionState(r.convert()) - .build()) - .collect(Collectors.toList())) - .setMasterInfoPort(metrics.getMasterInfoPort()) - .addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList())) - .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream() - .map(status -> - ClusterStatusProtos.TableRegionStatesCount.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) - .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())) - .build()) - .collect(Collectors.toList())); + ClusterStatusProtos.ClusterStatus.Builder builder = + ClusterStatusProtos.ClusterStatus.newBuilder() + .addAllBackupMasters(metrics.getBackupMasterNames().stream() + .map(ProtobufUtil::toServerName).collect(Collectors.toList())) + .addAllDeadServers(metrics.getDeadServerNames().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .addAllLiveServers(metrics.getLiveServerMetrics().entrySet().stream() + .map(s -> ClusterStatusProtos.LiveServerInfo.newBuilder() + .setServer(ProtobufUtil.toServerName(s.getKey())) + .setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue())).build()) + .collect(Collectors.toList())) + .addAllMasterCoprocessors(metrics.getMasterCoprocessorNames().stream() + .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) + .collect(Collectors.toList())) + .addAllRegionsInTransition(metrics.getRegionStatesInTransition().stream() + .map(r -> ClusterStatusProtos.RegionInTransition.newBuilder() + .setSpec(HBaseProtos.RegionSpecifier.newBuilder() + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName())) + .build()) + .setRegionState(r.convert()).build()) + .collect(Collectors.toList())) + .setMasterInfoPort(metrics.getMasterInfoPort()) + .addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .addAllTableRegionStatesCount( + metrics.getTableRegionStatesCount().entrySet().stream() + .map(status -> ClusterStatusProtos.TableRegionStatesCount.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) + .setRegionStatesCount( + ProtobufUtil.toTableRegionStatesCount(status.getValue())) + .build()) + .collect(Collectors.toList())); if (metrics.getMasterName() != null) { builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); } if (metrics.getMasterTasks() != null) { builder.addAllMasterTasks(metrics.getMasterTasks().stream() - .map(t -> ProtobufUtil.toServerTask(t)).collect(Collectors.toList())); + .map(t -> ProtobufUtil.toServerTask(t)).collect(Collectors.toList())); } if (metrics.getBalancerOn() != null) { builder.setBalancerOn(metrics.getBalancerOn()); @@ -95,40 +89,33 @@ public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics m } if (metrics.getHBaseVersion() != null) { builder.setHbaseVersion( - FSProtos.HBaseVersionFileContent.newBuilder() - .setVersion(metrics.getHBaseVersion())); + FSProtos.HBaseVersionFileContent.newBuilder().setVersion(metrics.getHBaseVersion())); } return builder.build(); } - public static ClusterMetrics toClusterMetrics( - ClusterStatusProtos.ClusterStatus proto) { + public static ClusterMetrics toClusterMetrics(ClusterStatusProtos.ClusterStatus proto) { ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder(); - builder.setLiveServerMetrics(proto.getLiveServersList().stream() - .collect(Collectors.toMap(e -> ProtobufUtil.toServerName(e.getServer()), - ServerMetricsBuilder::toServerMetrics))) - .setDeadServerNames(proto.getDeadServersList().stream() - .map(ProtobufUtil::toServerName) + builder + .setLiveServerMetrics(proto.getLiveServersList().stream() + .collect(Collectors.toMap(e -> ProtobufUtil.toServerName(e.getServer()), + ServerMetricsBuilder::toServerMetrics))) + .setDeadServerNames(proto.getDeadServersList().stream().map(ProtobufUtil::toServerName) .collect(Collectors.toList())) - .setBackerMasterNames(proto.getBackupMastersList().stream() - .map(ProtobufUtil::toServerName) + .setBackerMasterNames(proto.getBackupMastersList().stream().map(ProtobufUtil::toServerName) .collect(Collectors.toList())) .setRegionsInTransition(proto.getRegionsInTransitionList().stream() - .map(ClusterStatusProtos.RegionInTransition::getRegionState) - .map(RegionState::convert) + .map(ClusterStatusProtos.RegionInTransition::getRegionState).map(RegionState::convert) .collect(Collectors.toList())) .setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream() - .map(HBaseProtos.Coprocessor::getName) - .collect(Collectors.toList())) + .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) .setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName) .collect(Collectors.toList())) - .setTableRegionStatesCount( - proto.getTableRegionStatesCountList().stream() - .collect(Collectors.toMap( - e -> ProtobufUtil.toTableName(e.getTableName()), - e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount())))) - .setMasterTasks(proto.getMasterTasksList().stream() - .map(t -> ProtobufUtil.getServerTask(t)).collect(Collectors.toList())); + .setTableRegionStatesCount(proto.getTableRegionStatesCountList().stream() + .collect(Collectors.toMap(e -> ProtobufUtil.toTableName(e.getTableName()), + e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount())))) + .setMasterTasks(proto.getMasterTasksList().stream().map(t -> ProtobufUtil.getServerTask(t)) + .collect(Collectors.toList())); if (proto.hasClusterId()) { builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString()); } @@ -158,21 +145,35 @@ public static ClusterMetrics toClusterMetrics( */ public static ClusterMetrics.Option toOption(ClusterStatusProtos.Option option) { switch (option) { - case HBASE_VERSION: return ClusterMetrics.Option.HBASE_VERSION; - case LIVE_SERVERS: return ClusterMetrics.Option.LIVE_SERVERS; - case DEAD_SERVERS: return ClusterMetrics.Option.DEAD_SERVERS; - case REGIONS_IN_TRANSITION: return ClusterMetrics.Option.REGIONS_IN_TRANSITION; - case CLUSTER_ID: return ClusterMetrics.Option.CLUSTER_ID; - case MASTER_COPROCESSORS: return ClusterMetrics.Option.MASTER_COPROCESSORS; - case MASTER: return ClusterMetrics.Option.MASTER; - case BACKUP_MASTERS: return ClusterMetrics.Option.BACKUP_MASTERS; - case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON; - case SERVERS_NAME: return ClusterMetrics.Option.SERVERS_NAME; - case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT; - case TABLE_TO_REGIONS_COUNT: return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT; - case TASKS: return ClusterMetrics.Option.TASKS; + case HBASE_VERSION: + return ClusterMetrics.Option.HBASE_VERSION; + case LIVE_SERVERS: + return ClusterMetrics.Option.LIVE_SERVERS; + case DEAD_SERVERS: + return ClusterMetrics.Option.DEAD_SERVERS; + case REGIONS_IN_TRANSITION: + return ClusterMetrics.Option.REGIONS_IN_TRANSITION; + case CLUSTER_ID: + return ClusterMetrics.Option.CLUSTER_ID; + case MASTER_COPROCESSORS: + return ClusterMetrics.Option.MASTER_COPROCESSORS; + case MASTER: + return ClusterMetrics.Option.MASTER; + case BACKUP_MASTERS: + return ClusterMetrics.Option.BACKUP_MASTERS; + case BALANCER_ON: + return ClusterMetrics.Option.BALANCER_ON; + case SERVERS_NAME: + return ClusterMetrics.Option.SERVERS_NAME; + case MASTER_INFO_PORT: + return ClusterMetrics.Option.MASTER_INFO_PORT; + case TABLE_TO_REGIONS_COUNT: + return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT; + case TASKS: + return ClusterMetrics.Option.TASKS; // should not reach here - default: throw new IllegalArgumentException("Invalid option: " + option); + default: + throw new IllegalArgumentException("Invalid option: " + option); } } @@ -183,21 +184,35 @@ public static ClusterMetrics.Option toOption(ClusterStatusProtos.Option option) */ public static ClusterStatusProtos.Option toOption(ClusterMetrics.Option option) { switch (option) { - case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION; - case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS; - case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS; - case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; - case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID; - case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS; - case MASTER: return ClusterStatusProtos.Option.MASTER; - case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS; - case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON; - case SERVERS_NAME: return Option.SERVERS_NAME; - case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT; - case TABLE_TO_REGIONS_COUNT: return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT; - case TASKS: return ClusterStatusProtos.Option.TASKS; + case HBASE_VERSION: + return ClusterStatusProtos.Option.HBASE_VERSION; + case LIVE_SERVERS: + return ClusterStatusProtos.Option.LIVE_SERVERS; + case DEAD_SERVERS: + return ClusterStatusProtos.Option.DEAD_SERVERS; + case REGIONS_IN_TRANSITION: + return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; + case CLUSTER_ID: + return ClusterStatusProtos.Option.CLUSTER_ID; + case MASTER_COPROCESSORS: + return ClusterStatusProtos.Option.MASTER_COPROCESSORS; + case MASTER: + return ClusterStatusProtos.Option.MASTER; + case BACKUP_MASTERS: + return ClusterStatusProtos.Option.BACKUP_MASTERS; + case BALANCER_ON: + return ClusterStatusProtos.Option.BALANCER_ON; + case SERVERS_NAME: + return Option.SERVERS_NAME; + case MASTER_INFO_PORT: + return ClusterStatusProtos.Option.MASTER_INFO_PORT; + case TABLE_TO_REGIONS_COUNT: + return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT; + case TASKS: + return ClusterStatusProtos.Option.TASKS; // should not reach here - default: throw new IllegalArgumentException("Invalid option: " + option); + default: + throw new IllegalArgumentException("Invalid option: " + option); } } @@ -223,6 +238,7 @@ public static List toOptions(EnumSet deadServerNames = Collections.emptyList(); @@ -244,10 +260,12 @@ public static ClusterMetricsBuilder newBuilder() { private ClusterMetricsBuilder() { } + public ClusterMetricsBuilder setHBaseVersion(String value) { this.hbaseVersion = value; return this; } + public ClusterMetricsBuilder setDeadServerNames(List value) { this.deadServerNames = value; return this; @@ -262,62 +280,59 @@ public ClusterMetricsBuilder setMasterName(ServerName value) { this.masterName = value; return this; } + public ClusterMetricsBuilder setBackerMasterNames(List value) { this.backupMasterNames = value; return this; } + public ClusterMetricsBuilder setRegionsInTransition(List value) { this.regionsInTransition = value; return this; } + public ClusterMetricsBuilder setClusterId(String value) { this.clusterId = value; return this; } + public ClusterMetricsBuilder setMasterCoprocessorNames(List value) { this.masterCoprocessorNames = value; return this; } + public ClusterMetricsBuilder setBalancerOn(@Nullable Boolean value) { this.balancerOn = value; return this; } + public ClusterMetricsBuilder setMasterInfoPort(int value) { this.masterInfoPort = value; return this; } + public ClusterMetricsBuilder setServerNames(List serversName) { this.serversName = serversName; return this; } + public ClusterMetricsBuilder setMasterTasks(List masterTasks) { this.masterTasks = masterTasks; return this; } - public ClusterMetricsBuilder setTableRegionStatesCount( - Map tableRegionStatesCount) { + public ClusterMetricsBuilder + setTableRegionStatesCount(Map tableRegionStatesCount) { this.tableRegionStatesCount = tableRegionStatesCount; return this; } public ClusterMetrics build() { - return new ClusterMetricsImpl( - hbaseVersion, - deadServerNames, - liveServerMetrics, - masterName, - backupMasterNames, - regionsInTransition, - clusterId, - masterCoprocessorNames, - balancerOn, - masterInfoPort, - serversName, - tableRegionStatesCount, - masterTasks - ); + return new ClusterMetricsImpl(hbaseVersion, deadServerNames, liveServerMetrics, masterName, + backupMasterNames, regionsInTransition, clusterId, masterCoprocessorNames, balancerOn, + masterInfoPort, serversName, tableRegionStatesCount, masterTasks); } + private static class ClusterMetricsImpl implements ClusterMetrics { @Nullable private final String hbaseVersion; @@ -338,16 +353,10 @@ private static class ClusterMetricsImpl implements ClusterMetrics { private final List masterTasks; ClusterMetricsImpl(String hbaseVersion, List deadServerNames, - Map liveServerMetrics, - ServerName masterName, - List backupMasterNames, - List regionsInTransition, - String clusterId, - List masterCoprocessorNames, - Boolean balancerOn, - int masterInfoPort, - List serversName, - Map tableRegionStatesCount, + Map liveServerMetrics, ServerName masterName, + List backupMasterNames, List regionsInTransition, String clusterId, + List masterCoprocessorNames, Boolean balancerOn, int masterInfoPort, + List serversName, Map tableRegionStatesCount, List masterTasks) { this.hbaseVersion = hbaseVersion; this.deadServerNames = Preconditions.checkNotNull(deadServerNames); @@ -437,15 +446,15 @@ public String toString() { int backupMastersSize = getBackupMasterNames().size(); sb.append("\nNumber of backup masters: " + backupMastersSize); if (backupMastersSize > 0) { - for (ServerName serverName: getBackupMasterNames()) { + for (ServerName serverName : getBackupMasterNames()) { sb.append("\n " + serverName); } } int serversSize = getLiveServerMetrics().size(); int serversNameSize = getServersName().size(); - sb.append("\nNumber of live region servers: " - + (serversSize > 0 ? serversSize : serversNameSize)); + sb.append( + "\nNumber of live region servers: " + (serversSize > 0 ? serversSize : serversNameSize)); if (serversSize > 0) { for (ServerName serverName : getLiveServerMetrics().keySet()) { sb.append("\n " + serverName.getServerName()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java index 86aca2bc8177..b8b2519dc09f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java index fa202c17eb7d..96adb3f5924c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java @@ -7,32 +7,29 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; import java.util.Collections; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hbase.thirdparty.com.google.protobuf.Service; /** * Base interface for the 4 coprocessors - MasterCoprocessor, RegionCoprocessor, - * RegionServerCoprocessor, and WALCoprocessor. - * Do NOT implement this interface directly. Unless an implementation implements one (or more) of - * the above mentioned 4 coprocessors, it'll fail to be loaded by any coprocessor host. - * - * Example: - * Building a coprocessor to observe Master operations. + * RegionServerCoprocessor, and WALCoprocessor. Do NOT implement this interface directly. Unless an + * implementation implements one (or more) of the above mentioned 4 coprocessors, it'll fail to be + * loaded by any coprocessor host. Example: Building a coprocessor to observe Master operations. + * *
      * class MyMasterCoprocessor implements MasterCoprocessor {
      *   @Override
    @@ -47,6 +44,7 @@
      * 
    * * Building a Service which can be loaded by both Master and RegionServer + * *
      * class MyCoprocessorService implements MasterCoprocessor, RegionServerCoprocessor {
      *   @Override
    @@ -74,30 +72,26 @@ public interface Coprocessor {
        * Lifecycle state of a given coprocessor instance.
        */
       enum State {
    -    UNINSTALLED,
    -    INSTALLED,
    -    STARTING,
    -    ACTIVE,
    -    STOPPING,
    -    STOPPED
    +    UNINSTALLED, INSTALLED, STARTING, ACTIVE, STOPPING, STOPPED
       }
     
       /**
        * Called by the {@link CoprocessorEnvironment} during it's own startup to initialize the
        * coprocessor.
        */
    -  default void start(CoprocessorEnvironment env) throws IOException {}
    +  default void start(CoprocessorEnvironment env) throws IOException {
    +  }
     
       /**
    -   * Called by the {@link CoprocessorEnvironment} during it's own shutdown to stop the
    -   * coprocessor.
    +   * Called by the {@link CoprocessorEnvironment} during it's own shutdown to stop the coprocessor.
        */
    -  default void stop(CoprocessorEnvironment env) throws IOException {}
    +  default void stop(CoprocessorEnvironment env) throws IOException {
    +  }
     
       /**
        * Coprocessor endpoints providing protobuf services should override this method.
    -   * @return Iterable of {@link Service}s or empty collection. Implementations should never
    -   * return null.
    +   * @return Iterable of {@link Service}s or empty collection. Implementations should never return
    +   *         null.
        */
       default Iterable getServices() {
         return Collections.EMPTY_SET;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    index 4fab7333dcd9..edbc5f479d6e 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    @@ -7,16 +7,14 @@
      * "License"); you may not use this file except in compliance
      * with the License.  You may obtain a copy of the License at
      *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
      */
    -
     package org.apache.hadoop.hbase;
     
     import org.apache.hadoop.conf.Configuration;
    @@ -46,8 +44,8 @@ public interface CoprocessorEnvironment {
       int getLoadSequence();
     
       /**
    -   * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try
    -   *   to set a configuration.
    +   * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to
    +   *         set a configuration.
        */
       Configuration getConfiguration();
     
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    index 509844e367d8..6ee6299daa56 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    index 76f374c412f0..718d40c2340b 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    @@ -7,24 +7,22 @@
      * "License"); you may not use this file except in compliance
      * with the License.  You may obtain a copy of the License at
      *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
      */
     package org.apache.hadoop.hbase;
     
     import java.io.IOException;
    -
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * Thrown during flush if the possibility snapshot content was not properly
    - * persisted into store files.  Response should include replay of wal content.
    + * Thrown during flush if the possibility snapshot content was not properly persisted into store
    + * files. Response should include replay of wal content.
      */
     @InterfaceAudience.Public
     public class DroppedSnapshotException extends IOException {
    @@ -43,7 +41,6 @@ public DroppedSnapshotException(String message) {
     
       /**
        * DroppedSnapshotException with cause
    -   *
        * @param message the message for this exception
        * @param cause the cause for this exception
        */
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
    index 8f356f1fe774..49d80d3a1e94 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -23,17 +22,13 @@
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * Data structure to hold RegionInfo and the address for the hosting
    - * HRegionServer.  Immutable.  Comparable, but we compare the 'location' only:
    - * i.e. the hostname and port, and *not* the regioninfo.  This means two
    - * instances are the same if they refer to the same 'location' (the same
    - * hostname and port), though they may be carrying different regions.
    - *
    - * On a big cluster, each client will have thousands of instances of this object, often
    - *  100 000 of them if not million. It's important to keep the object size as small
    - *  as possible.
    - *
    - * 
    This interface has been marked InterfaceAudience.Public in 0.96 and 0.98. + * Data structure to hold RegionInfo and the address for the hosting HRegionServer. Immutable. + * Comparable, but we compare the 'location' only: i.e. the hostname and port, and *not* the + * regioninfo. This means two instances are the same if they refer to the same 'location' (the same + * hostname and port), though they may be carrying different regions. On a big cluster, each client + * will have thousands of instances of this object, often 100 000 of them if not million. It's + * important to keep the object size as small as possible.
    + * This interface has been marked InterfaceAudience.Public in 0.96 and 0.98. */ @InterfaceAudience.Public public class HRegionLocation implements Comparable { @@ -74,7 +69,7 @@ public boolean equals(Object o) { if (!(o instanceof HRegionLocation)) { return false; } - return this.compareTo((HRegionLocation)o) == 0; + return this.compareTo((HRegionLocation) o) == 0; } /** @@ -88,7 +83,7 @@ public int hashCode() { /** * @return regionInfo */ - public RegionInfo getRegion(){ + public RegionInfo getRegion() { return regionInfo; } @@ -105,8 +100,8 @@ public long getSeqNum() { } /** - * @return String made of hostname and port formatted as - * per {@link Addressing#createHostAndPortStr(String, int)} + * @return String made of hostname and port formatted as per + * {@link Addressing#createHostAndPortStr(String, int)} */ public String getHostnamePort() { return Addressing.createHostAndPortStr(this.getHostname(), this.getPort()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java index 63c26e2c393f..2a099157bc76 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +20,13 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown if a request is table schema modification is requested but - * made for an invalid family name. + * Thrown if a request is table schema modification is requested but made for an invalid family + * name. */ @InterfaceAudience.Public public class InvalidFamilyOperationException extends DoNotRetryIOException { private static final long serialVersionUID = (1L << 22) - 1L; + /** default constructor */ public InvalidFamilyOperationException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java index dd19fa1c2279..2ae80cade98a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,27 +23,25 @@ * Ways to keep cells marked for delete around. */ /* - * Don't change the TRUE/FALSE labels below, these have to be called - * this way for backwards compatibility. + * Don't change the TRUE/FALSE labels below, these have to be called this way for backwards + * compatibility. */ @InterfaceAudience.Public public enum KeepDeletedCells { /** Deleted Cells are not retained. */ FALSE, /** - * Deleted Cells are retained until they are removed by other means - * such TTL or VERSIONS. - * If no TTL is specified or no new versions of delete cells are - * written, they are retained forever. + * Deleted Cells are retained until they are removed by other means such TTL or VERSIONS. If no + * TTL is specified or no new versions of delete cells are written, they are retained forever. */ TRUE, /** - * Deleted Cells are retained until the delete marker expires due to TTL. - * This is useful when TTL is combined with MIN_VERSIONS and one - * wants to keep a minimum number of versions around but at the same - * time remove deleted cells after the TTL. + * Deleted Cells are retained until the delete marker expires due to TTL. This is useful when TTL + * is combined with MIN_VERSIONS and one wants to keep a minimum number of versions around but at + * the same time remove deleted cells after the TTL. */ TTL; + public static KeepDeletedCells getValue(String val) { return valueOf(val.toUpperCase()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java index 35cdecba9bb6..86e394e33403 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,6 +25,7 @@ @InterfaceAudience.Public public class MasterNotRunningException extends HBaseIOException { private static final long serialVersionUID = (1L << 23) - 1L; + /** default constructor */ public MasterNotRunningException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java index 099ea4054591..b913ac0506cd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,16 +30,15 @@ public enum MemoryCompactionPolicy { NONE, /** * Basic policy applies optimizations which modify the index to a more compacted representation. - * This is beneficial in all access patterns. The smaller the cells are the greater the - * benefit of this policy. - * This is the default policy. + * This is beneficial in all access patterns. The smaller the cells are the greater the benefit of + * this policy. This is the default policy. */ BASIC, /** - * In addition to compacting the index representation as the basic policy, eager policy - * eliminates duplication while the data is still in memory (much like the - * on-disk compaction does after the data is flushed to disk). This policy is most useful for - * applications with high data churn or small working sets. + * In addition to compacting the index representation as the basic policy, eager policy eliminates + * duplication while the data is still in memory (much like the on-disk compaction does after the + * data is flushed to disk). This policy is most useful for applications with high data churn or + * small working sets. */ EAGER, /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java index 3e06f4250af6..a49575849b04 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * Exception thrown when the result needs to be chunked on the server side. - * It signals that retries should happen right away and not count against the number of - * retries because some of the multi was a success. + * Exception thrown when the result needs to be chunked on the server side. It signals that retries + * should happen right away and not count against the number of retries because some of the multi + * was a success. */ @InterfaceAudience.Public public class MultiActionResultTooLarge extends RetryImmediatelyException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java index 5263523417ed..83e29fd9edc1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java index 72ff1e61b849..0af01d23bddf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java index c51fccb5955d..8397d8857630 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; @@ -27,6 +25,7 @@ @InterfaceAudience.Public public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException { private static final long serialVersionUID = 6439786157874827523L; + /** * default constructor */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java index 918408778c0d..aa138478b4ab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java index e887928da828..473947b8f769 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This exception is thrown by the master when a region server was shut down and - * restarted so fast that the master still hasn't processed the server shutdown - * of the first instance, or when master is initializing and client call admin - * operations, or when an operation is performed on a region server that is still starting. + * This exception is thrown by the master when a region server was shut down and restarted so fast + * that the master still hasn't processed the server shutdown of the first instance, or when master + * is initializing and client call admin operations, or when an operation is performed on a region + * server that is still starting. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java index 62f84e9495be..5e60e44243a0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java index 8a8d2151aa2e..aff9ff8af472 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +20,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when something happens related to region handling. - * Subclasses have to be more specific. + * Thrown when something happens related to region handling. Subclasses have to be more specific. */ @InterfaceAudience.Public public class RegionException extends HBaseIOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java index 0d3a464e0f86..0a297166e12d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,23 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Arrays; import java.util.Collection; import java.util.Iterator; - import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Container for holding a list of {@link HRegionLocation}'s that correspond to the - * same range. The list is indexed by the replicaId. This is an immutable list, - * however mutation operations are provided which returns a new List via copy-on-write - * (assuming small number of locations) + * Container for holding a list of {@link HRegionLocation}'s that correspond to the same range. The + * list is indexed by the replicaId. This is an immutable list, however mutation operations are + * provided which returns a new List via copy-on-write (assuming small number of locations) */ @InterfaceAudience.Private public class RegionLocations implements Iterable { @@ -45,10 +42,9 @@ public class RegionLocations implements Iterable { private final HRegionLocation[] locations; // replicaId -> HRegionLocation. /** - * Constructs the region location list. The locations array should - * contain all the locations for known replicas for the region, and should be - * sorted in replicaId ascending order, although it can contain nulls indicating replicaIds - * that the locations of which are not known. + * Constructs the region location list. The locations array should contain all the locations for + * known replicas for the region, and should be sorted in replicaId ascending order, although it + * can contain nulls indicating replicaIds that the locations of which are not known. * @param locations an array of HRegionLocations for the same region range */ public RegionLocations(HRegionLocation... locations) { @@ -66,7 +62,7 @@ public RegionLocations(HRegionLocation... locations) { index++; } // account for the null elements in the array after maxReplicaIdIndex - maxReplicaId = maxReplicaId + (locations.length - (maxReplicaIdIndex + 1) ); + maxReplicaId = maxReplicaId + (locations.length - (maxReplicaIdIndex + 1)); if (maxReplicaId + 1 == locations.length) { this.locations = locations; @@ -79,7 +75,7 @@ public RegionLocations(HRegionLocation... locations) { } } for (HRegionLocation loc : this.locations) { - if (loc != null && loc.getServerName() != null){ + if (loc != null && loc.getServerName() != null) { numNonNullElements++; } } @@ -91,8 +87,7 @@ public RegionLocations(Collection locations) { } /** - * Returns the size of the list even if some of the elements - * might be null. + * Returns the size of the list even if some of the elements might be null. * @return the size of the list (corresponding to the max replicaId) */ public int size() { @@ -116,18 +111,18 @@ public boolean isEmpty() { } /** - * Returns a new RegionLocations with the locations removed (set to null) - * which have the destination server as given. + * Returns a new RegionLocations with the locations removed (set to null) which have the + * destination server as given. * @param serverName the serverName to remove locations of - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations removeByServer(ServerName serverName) { HRegionLocation[] newLocations = null; for (int i = 0; i < locations.length; i++) { // check whether something to remove if (locations[i] != null && serverName.equals(locations[i].getServerName())) { - if (newLocations == null) { //first time + if (newLocations == null) { // first time newLocations = new HRegionLocation[locations.length]; System.arraycopy(locations, 0, newLocations, 0, i); } @@ -142,8 +137,8 @@ public RegionLocations removeByServer(ServerName serverName) { /** * Removes the given location from the list * @param location the location to remove - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations remove(HRegionLocation location) { if (location == null) return this; @@ -153,9 +148,8 @@ public RegionLocations remove(HRegionLocation location) { // check whether something to remove. HRL.compareTo() compares ONLY the // serverName. We want to compare the HRI's as well. - if (locations[replicaId] == null - || RegionInfo.COMPARATOR.compare(location.getRegion(), locations[replicaId].getRegion()) != 0 - || !location.equals(locations[replicaId])) { + if (locations[replicaId] == null || RegionInfo.COMPARATOR.compare(location.getRegion(), + locations[replicaId].getRegion()) != 0 || !location.equals(locations[replicaId])) { return this; } @@ -169,8 +163,8 @@ public RegionLocations remove(HRegionLocation location) { /** * Removes location of the given replicaId from the list * @param replicaId the replicaId of the location to remove - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations remove(int replicaId) { if (getRegionLocation(replicaId) == null) { @@ -204,13 +198,11 @@ public RegionLocations removeElementsWithNullLocation() { } /** - * Merges this RegionLocations list with the given list assuming - * same range, and keeping the most up to date version of the - * HRegionLocation entries from either list according to seqNum. If seqNums - * are equal, the location from the argument (other) is taken. + * Merges this RegionLocations list with the given list assuming same range, and keeping the most + * up to date version of the HRegionLocation entries from either list according to seqNum. If + * seqNums are equal, the location from the argument (other) is taken. * @param other the locations to merge with - * @return an RegionLocations object with merged locations or the same object - * if nothing is merged + * @return an RegionLocations object with merged locations or the same object if nothing is merged */ public RegionLocations mergeLocations(RegionLocations other) { assert other != null; @@ -231,8 +223,7 @@ public RegionLocations mergeLocations(RegionLocations other) { regionInfo = otherLoc.getRegion(); } - HRegionLocation selectedLoc = selectRegionLocation(thisLoc, - otherLoc, true, false); + HRegionLocation selectedLoc = selectRegionLocation(thisLoc, otherLoc, true, false); if (selectedLoc != thisLoc) { if (newLocations == null) { @@ -247,10 +238,9 @@ public RegionLocations mergeLocations(RegionLocations other) { // ensure that all replicas share the same start code. Otherwise delete them if (newLocations != null && regionInfo != null) { - for (int i=0; i < newLocations.length; i++) { + for (int i = 0; i < newLocations.length; i++) { if (newLocations[i] != null) { - if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, - newLocations[i].getRegion())) { + if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, newLocations[i].getRegion())) { newLocations[i] = null; } } @@ -270,41 +260,39 @@ private HRegionLocation selectRegionLocation(HRegionLocation oldLocation, return location; } - if (force - || isGreaterThan(location.getSeqNum(), oldLocation.getSeqNum(), checkForEquals)) { + if (force || isGreaterThan(location.getSeqNum(), oldLocation.getSeqNum(), checkForEquals)) { return location; } return oldLocation; } /** - * Updates the location with new only if the new location has a higher - * seqNum than the old one or force is true. + * Updates the location with new only if the new location has a higher seqNum than the old one or + * force is true. * @param location the location to add or update - * @param checkForEquals whether to update the location if seqNums for the - * HRegionLocations for the old and new location are the same + * @param checkForEquals whether to update the location if seqNums for the HRegionLocations for + * the old and new location are the same * @param force whether to force update - * @return an RegionLocations object with updated locations or the same object - * if nothing is updated + * @return an RegionLocations object with updated locations or the same object if nothing is + * updated */ - public RegionLocations updateLocation(HRegionLocation location, - boolean checkForEquals, boolean force) { + public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals, + boolean force) { assert location != null; int replicaId = location.getRegion().getReplicaId(); HRegionLocation oldLoc = getRegionLocation(location.getRegion().getReplicaId()); - HRegionLocation selectedLoc = selectRegionLocation(oldLoc, location, - checkForEquals, force); + HRegionLocation selectedLoc = selectRegionLocation(oldLoc, location, checkForEquals, force); if (selectedLoc == oldLoc) { return this; } - HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId +1)]; + HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId + 1)]; System.arraycopy(locations, 0, newLocations, 0, locations.length); newLocations[replicaId] = location; // ensure that all replicas share the same start code. Otherwise delete them - for (int i=0; i < newLocations.length; i++) { + for (int i = 0; i < newLocations.length; i++) { if (newLocations[i] != null) { if (!RegionReplicaUtil.isReplicasForSameRegion(location.getRegion(), newLocations[i].getRegion())) { @@ -327,8 +315,8 @@ public HRegionLocation getRegionLocation(int replicaId) { } /** - * Returns the region location from the list for matching regionName, which can - * be regionName or encodedRegionName + * Returns the region location from the list for matching regionName, which can be regionName or + * encodedRegionName * @param regionName regionName or encodedRegionName * @return HRegionLocation found or null */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 8cd3ea156c4d..d873c4bc1cb4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Map; @@ -26,8 +23,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Encapsulates per-region load metrics. - */ + * Encapsulates per-region load metrics. + */ @InterfaceAudience.Public public interface RegionMetrics { @@ -72,8 +69,8 @@ public interface RegionMetrics { public long getCpRequestCount(); /** - * @return the number of write requests and read requests and coprocessor - * service requests made to region + * @return the number of write requests and read requests and coprocessor service requests made to + * region */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount() + getCpRequestCount(); @@ -93,8 +90,8 @@ default String getNameAsString() { /** * TODO: why we pass the same value to different counters? Currently, the value from - * getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize() - * see HRegionServer#createRegionLoad. + * getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize() see + * HRegionServer#createRegionLoad. * @return The current total size of root-level indexes for the region */ Size getStoreFileIndexSize(); @@ -135,7 +132,6 @@ default String getNameAsString() { */ Map getStoreSequenceId(); - /** * @return the uncompressed size of the storefiles */ @@ -157,8 +153,8 @@ default String getNameAsString() { int getStoreRefCount(); /** - * @return the max reference count for any store file among all compacted stores files - * of this region + * @return the max reference count for any store file among all compacted stores files of this + * region */ int getMaxCompactedStoreFileRefCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java index 8349c35d7d33..cbb5433ebd22 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Collections; @@ -39,8 +36,8 @@ @InterfaceAudience.Private public final class RegionMetricsBuilder { - public static List toRegionMetrics( - AdminProtos.GetRegionLoadResponse regionLoadResponse) { + public static List + toRegionMetrics(AdminProtos.GetRegionLoadResponse regionLoadResponse) { return regionLoadResponse.getRegionLoadsList().stream() .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList()); } @@ -53,77 +50,72 @@ public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regio .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs()) .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId()) .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f) - .setDataLocalityForSsd(regionLoadPB.hasDataLocalityForSsd() ? - regionLoadPB.getDataLocalityForSsd() : 0.0f) - .setBlocksLocalWeight(regionLoadPB.hasBlocksLocalWeight() ? - regionLoadPB.getBlocksLocalWeight() : 0) - .setBlocksLocalWithSsdWeight(regionLoadPB.hasBlocksLocalWithSsdWeight() ? - regionLoadPB.getBlocksLocalWithSsdWeight() : 0) + .setDataLocalityForSsd( + regionLoadPB.hasDataLocalityForSsd() ? regionLoadPB.getDataLocalityForSsd() : 0.0f) + .setBlocksLocalWeight( + regionLoadPB.hasBlocksLocalWeight() ? regionLoadPB.getBlocksLocalWeight() : 0) + .setBlocksLocalWithSsdWeight( + regionLoadPB.hasBlocksLocalWithSsdWeight() ? regionLoadPB.getBlocksLocalWithSsdWeight() + : 0) .setBlocksTotalWeight(regionLoadPB.getBlocksTotalWeight()) - .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad( - regionLoadPB.getCompactionState())) + .setCompactionState( + ProtobufUtil.createCompactionStateForRegionLoad(regionLoadPB.getCompactionState())) .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) - .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(), - Size.Unit.KILOBYTE)) + .setStoreFileUncompressedDataIndexSize( + new Size(regionLoadPB.getTotalStaticIndexSizeKB(), Size.Unit.KILOBYTE)) .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs()) .setMemStoreSize(new Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE)) .setReadRequestCount(regionLoadPB.getReadRequestsCount()) .setCpRequestCount(regionLoadPB.getCpRequestsCount()) .setWriteRequestCount(regionLoadPB.getWriteRequestsCount()) - .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(), - Size.Unit.KILOBYTE)) - .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(), - Size.Unit.KILOBYTE)) - .setStoreCount(regionLoadPB.getStores()) - .setStoreFileCount(regionLoadPB.getStorefiles()) + .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(), Size.Unit.KILOBYTE)) + .setStoreFileRootLevelIndexSize( + new Size(regionLoadPB.getRootIndexSizeKB(), Size.Unit.KILOBYTE)) + .setStoreCount(regionLoadPB.getStores()).setStoreFileCount(regionLoadPB.getStorefiles()) .setStoreRefCount(regionLoadPB.getStoreRefCount()) .setMaxCompactedStoreFileRefCount(regionLoadPB.getMaxCompactedStoreFileRefCount()) .setStoreFileSize(new Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE)) .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream() - .collect(Collectors.toMap( - (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(), + .collect(Collectors.toMap( + (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(), ClusterStatusProtos.StoreSequenceId::getSequenceId))) .setUncompressedStoreFileSize( - new Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE)) + new Size(regionLoadPB.getStoreUncompressedSizeMB(), Size.Unit.MEGABYTE)) .build(); } - private static List toStoreSequenceId( - Map ids) { + private static List + toStoreSequenceId(Map ids) { return ids.entrySet().stream() .map(e -> ClusterStatusProtos.StoreSequenceId.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey())) - .setSequenceId(e.getValue()) - .build()) + .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey())).setSequenceId(e.getValue()) + .build()) .collect(Collectors.toList()); } public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) { return ClusterStatusProtos.RegionLoad.newBuilder() - .setRegionSpecifier(HBaseProtos.RegionSpecifier - .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) - .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())) - .build()) - .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize() - .get(Size.Unit.KILOBYTE)) + .setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder() + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())).build()) + .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize().get(Size.Unit.KILOBYTE)) .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount()) .setTotalCompactingKVs(regionMetrics.getCompactingCellCount()) .setCompleteSequenceId(regionMetrics.getCompletedSequenceId()) .setDataLocality(regionMetrics.getDataLocality()) .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount()) - .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize() - .get(Size.Unit.KILOBYTE)) + .setTotalStaticIndexSizeKB( + (int) regionMetrics.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE)) .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp()) .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE)) .setReadRequestsCount(regionMetrics.getReadRequestCount()) .setCpRequestsCount(regionMetrics.getCpRequestCount()) .setWriteRequestsCount(regionMetrics.getWriteRequestCount()) - .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize() - .get(Size.Unit.KILOBYTE)) - .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize() - .get(Size.Unit.KILOBYTE)) - .setStores(regionMetrics.getStoreCount()) - .setStorefiles(regionMetrics.getStoreFileCount()) + .setStorefileIndexSizeKB( + (long) regionMetrics.getStoreFileIndexSize().get(Size.Unit.KILOBYTE)) + .setRootIndexSizeKB( + (int) regionMetrics.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE)) + .setStores(regionMetrics.getStoreCount()).setStorefiles(regionMetrics.getStoreFileCount()) .setStoreRefCount(regionMetrics.getStoreRefCount()) .setMaxCompactedStoreFileRefCount(regionMetrics.getMaxCompactedStoreFileRefCount()) .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE)) @@ -164,6 +156,7 @@ public static RegionMetricsBuilder newBuilder(byte[] name) { private long blocksLocalWithSsdWeight; private long blocksTotalWeight; private CompactionState compactionState; + private RegionMetricsBuilder(byte[] name) { this.name = name; } @@ -172,135 +165,140 @@ public RegionMetricsBuilder setStoreCount(int value) { this.storeCount = value; return this; } + public RegionMetricsBuilder setStoreFileCount(int value) { this.storeFileCount = value; return this; } + public RegionMetricsBuilder setStoreRefCount(int value) { this.storeRefCount = value; return this; } + public RegionMetricsBuilder setMaxCompactedStoreFileRefCount(int value) { this.maxCompactedStoreFileRefCount = value; return this; } + public RegionMetricsBuilder setCompactingCellCount(long value) { this.compactingCellCount = value; return this; } + public RegionMetricsBuilder setCompactedCellCount(long value) { this.compactedCellCount = value; return this; } + public RegionMetricsBuilder setStoreFileSize(Size value) { this.storeFileSize = value; return this; } + public RegionMetricsBuilder setMemStoreSize(Size value) { this.memStoreSize = value; return this; } + public RegionMetricsBuilder setStoreFileIndexSize(Size value) { this.indexSize = value; return this; } + public RegionMetricsBuilder setStoreFileRootLevelIndexSize(Size value) { this.rootLevelIndexSize = value; return this; } + public RegionMetricsBuilder setStoreFileUncompressedDataIndexSize(Size value) { this.uncompressedDataIndexSize = value; return this; } + public RegionMetricsBuilder setBloomFilterSize(Size value) { this.bloomFilterSize = value; return this; } + public RegionMetricsBuilder setUncompressedStoreFileSize(Size value) { this.uncompressedStoreFileSize = value; return this; } + public RegionMetricsBuilder setWriteRequestCount(long value) { this.writeRequestCount = value; return this; } + public RegionMetricsBuilder setReadRequestCount(long value) { this.readRequestCount = value; return this; } + public RegionMetricsBuilder setCpRequestCount(long value) { this.cpRequestCount = value; return this; } + public RegionMetricsBuilder setFilteredReadRequestCount(long value) { this.filteredReadRequestCount = value; return this; } + public RegionMetricsBuilder setCompletedSequenceId(long value) { this.completedSequenceId = value; return this; } + public RegionMetricsBuilder setStoreSequenceIds(Map value) { this.storeSequenceIds = value; return this; } + public RegionMetricsBuilder setDataLocality(float value) { this.dataLocality = value; return this; } + public RegionMetricsBuilder setLastMajorCompactionTimestamp(long value) { this.lastMajorCompactionTimestamp = value; return this; } + public RegionMetricsBuilder setDataLocalityForSsd(float value) { this.dataLocalityForSsd = value; return this; } + public RegionMetricsBuilder setBlocksLocalWeight(long value) { this.blocksLocalWeight = value; return this; } + public RegionMetricsBuilder setBlocksLocalWithSsdWeight(long value) { this.blocksLocalWithSsdWeight = value; return this; } + public RegionMetricsBuilder setBlocksTotalWeight(long value) { this.blocksTotalWeight = value; return this; } + public RegionMetricsBuilder setCompactionState(CompactionState compactionState) { this.compactionState = compactionState; return this; } public RegionMetrics build() { - return new RegionMetricsImpl(name, - storeCount, - storeFileCount, - storeRefCount, - maxCompactedStoreFileRefCount, - compactingCellCount, - compactedCellCount, - storeFileSize, - memStoreSize, - indexSize, - rootLevelIndexSize, - uncompressedDataIndexSize, - bloomFilterSize, - uncompressedStoreFileSize, - writeRequestCount, - readRequestCount, - cpRequestCount, - filteredReadRequestCount, - completedSequenceId, - storeSequenceIds, - dataLocality, - lastMajorCompactionTimestamp, - dataLocalityForSsd, - blocksLocalWeight, - blocksLocalWithSsdWeight, - blocksTotalWeight, - compactionState); + return new RegionMetricsImpl(name, storeCount, storeFileCount, storeRefCount, + maxCompactedStoreFileRefCount, compactingCellCount, compactedCellCount, storeFileSize, + memStoreSize, indexSize, rootLevelIndexSize, uncompressedDataIndexSize, bloomFilterSize, + uncompressedStoreFileSize, writeRequestCount, readRequestCount, cpRequestCount, + filteredReadRequestCount, completedSequenceId, storeSequenceIds, dataLocality, + lastMajorCompactionTimestamp, dataLocalityForSsd, blocksLocalWeight, + blocksLocalWithSsdWeight, blocksTotalWeight, compactionState); } private static class RegionMetricsImpl implements RegionMetrics { @@ -331,32 +329,15 @@ private static class RegionMetricsImpl implements RegionMetrics { private final long blocksLocalWithSsdWeight; private final long blocksTotalWeight; private final CompactionState compactionState; - RegionMetricsImpl(byte[] name, - int storeCount, - int storeFileCount, - int storeRefCount, - int maxCompactedStoreFileRefCount, - final long compactingCellCount, - long compactedCellCount, - Size storeFileSize, - Size memStoreSize, - Size indexSize, - Size rootLevelIndexSize, - Size uncompressedDataIndexSize, - Size bloomFilterSize, - Size uncompressedStoreFileSize, - long writeRequestCount, - long readRequestCount, - long cpRequestCount, - long filteredReadRequestCount, - long completedSequenceId, - Map storeSequenceIds, - float dataLocality, - long lastMajorCompactionTimestamp, - float dataLocalityForSsd, - long blocksLocalWeight, - long blocksLocalWithSsdWeight, - long blocksTotalWeight, + + RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, int storeRefCount, + int maxCompactedStoreFileRefCount, final long compactingCellCount, long compactedCellCount, + Size storeFileSize, Size memStoreSize, Size indexSize, Size rootLevelIndexSize, + Size uncompressedDataIndexSize, Size bloomFilterSize, Size uncompressedStoreFileSize, + long writeRequestCount, long readRequestCount, long cpRequestCount, + long filteredReadRequestCount, long completedSequenceId, Map storeSequenceIds, + float dataLocality, long lastMajorCompactionTimestamp, float dataLocalityForSsd, + long blocksLocalWeight, long blocksLocalWithSsdWeight, long blocksTotalWeight, CompactionState compactionState) { this.name = Preconditions.checkNotNull(name); this.storeCount = storeCount; @@ -524,65 +505,44 @@ public CompactionState getCompactionState() { @Override public String toString() { - StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount", - this.getStoreCount()); - Strings.appendKeyValue(sb, "storeFileCount", - this.getStoreFileCount()); - Strings.appendKeyValue(sb, "storeRefCount", - this.getStoreRefCount()); + StringBuilder sb = + Strings.appendKeyValue(new StringBuilder(), "storeCount", this.getStoreCount()); + Strings.appendKeyValue(sb, "storeFileCount", this.getStoreFileCount()); + Strings.appendKeyValue(sb, "storeRefCount", this.getStoreRefCount()); Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", this.getMaxCompactedStoreFileRefCount()); - Strings.appendKeyValue(sb, "uncompressedStoreFileSize", - this.getUncompressedStoreFileSize()); + Strings.appendKeyValue(sb, "uncompressedStoreFileSize", this.getUncompressedStoreFileSize()); Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", - this.getLastMajorCompactionTimestamp()); - Strings.appendKeyValue(sb, "storeFileSize", - this.getStoreFileSize()); + this.getLastMajorCompactionTimestamp()); + Strings.appendKeyValue(sb, "storeFileSize", this.getStoreFileSize()); if (this.getUncompressedStoreFileSize().get() != 0) { Strings.appendKeyValue(sb, "compressionRatio", - String.format("%.4f", - (float) this.getStoreFileSize().get(Size.Unit.MEGABYTE) / - (float) this.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))); + String.format("%.4f", (float) this.getStoreFileSize().get(Size.Unit.MEGABYTE) + / (float) this.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))); } - Strings.appendKeyValue(sb, "memStoreSize", - this.getMemStoreSize()); - Strings.appendKeyValue(sb, "readRequestCount", - this.getReadRequestCount()); - Strings.appendKeyValue(sb, "cpRequestCount", - this.getCpRequestCount()); - Strings.appendKeyValue(sb, "writeRequestCount", - this.getWriteRequestCount()); - Strings.appendKeyValue(sb, "rootLevelIndexSize", - this.getStoreFileRootLevelIndexSize()); + Strings.appendKeyValue(sb, "memStoreSize", this.getMemStoreSize()); + Strings.appendKeyValue(sb, "readRequestCount", this.getReadRequestCount()); + Strings.appendKeyValue(sb, "cpRequestCount", this.getCpRequestCount()); + Strings.appendKeyValue(sb, "writeRequestCount", this.getWriteRequestCount()); + Strings.appendKeyValue(sb, "rootLevelIndexSize", this.getStoreFileRootLevelIndexSize()); Strings.appendKeyValue(sb, "uncompressedDataIndexSize", - this.getStoreFileUncompressedDataIndexSize()); - Strings.appendKeyValue(sb, "bloomFilterSize", - this.getBloomFilterSize()); - Strings.appendKeyValue(sb, "compactingCellCount", - this.getCompactingCellCount()); - Strings.appendKeyValue(sb, "compactedCellCount", - this.getCompactedCellCount()); + this.getStoreFileUncompressedDataIndexSize()); + Strings.appendKeyValue(sb, "bloomFilterSize", this.getBloomFilterSize()); + Strings.appendKeyValue(sb, "compactingCellCount", this.getCompactingCellCount()); + Strings.appendKeyValue(sb, "compactedCellCount", this.getCompactedCellCount()); float compactionProgressPct = Float.NaN; if (this.getCompactingCellCount() > 0) { - compactionProgressPct = ((float) this.getCompactedCellCount() / - (float) this.getCompactingCellCount()); + compactionProgressPct = + ((float) this.getCompactedCellCount() / (float) this.getCompactingCellCount()); } - Strings.appendKeyValue(sb, "compactionProgressPct", - compactionProgressPct); - Strings.appendKeyValue(sb, "completedSequenceId", - this.getCompletedSequenceId()); - Strings.appendKeyValue(sb, "dataLocality", - this.getDataLocality()); - Strings.appendKeyValue(sb, "dataLocalityForSsd", - this.getDataLocalityForSsd()); - Strings.appendKeyValue(sb, "blocksLocalWeight", - blocksLocalWeight); - Strings.appendKeyValue(sb, "blocksLocalWithSsdWeight", - blocksLocalWithSsdWeight); - Strings.appendKeyValue(sb, "blocksTotalWeight", - blocksTotalWeight); - Strings.appendKeyValue(sb, "compactionState", - compactionState); + Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); + Strings.appendKeyValue(sb, "completedSequenceId", this.getCompletedSequenceId()); + Strings.appendKeyValue(sb, "dataLocality", this.getDataLocality()); + Strings.appendKeyValue(sb, "dataLocalityForSsd", this.getDataLocalityForSsd()); + Strings.appendKeyValue(sb, "blocksLocalWeight", blocksLocalWeight); + Strings.appendKeyValue(sb, "blocksLocalWithSsdWeight", blocksLocalWithSsdWeight); + Strings.appendKeyValue(sb, "blocksTotalWeight", blocksTotalWeight); + Strings.appendKeyValue(sb, "compactionState", compactionState); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java index 3024962ebd67..4cdb4ea2ade6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,15 +18,14 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown by a region server if it will block and wait to serve a request. - * For example, the client wants to insert something to a region while the - * region is compacting. Keep variance in the passed 'msg' low because its msg is used as a key - * over in {@link org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException} - * grouping failure types. + * Thrown by a region server if it will block and wait to serve a request. For example, the client + * wants to insert something to a region while the region is compacting. Keep variance in the passed + * 'msg' low because its msg is used as a key over in + * {@link org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException} grouping failure + * types. */ @InterfaceAudience.Public public class RegionTooBusyException extends IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java index 6f02df2028f9..4d1deebb4e87 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java index 9df4f893c714..46cc77c61b8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java index ddd4b2ec03f5..d779bdb6d2b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,14 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java index dd2e836487f8..6069b04f63c6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,18 +6,18 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -37,6 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -69,34 +69,33 @@ public static ServerMetrics toServerMetrics(ServerName serverName, public static ServerMetrics toServerMetrics(ServerName serverName, int versionNumber, String version, ClusterStatusProtos.ServerLoad serverLoadPB) { return ServerMetricsBuilder.newBuilder(serverName) - .setRequestCountPerSecond(serverLoadPB.getNumberOfRequests()) - .setRequestCount(serverLoadPB.getTotalNumberOfRequests()) - .setInfoServerPort(serverLoadPB.getInfoServerPort()) - .setReadRequestCount(serverLoadPB.getReadRequestsCount()) - .setWriteRequestCount(serverLoadPB.getWriteRequestsCount()) - .setMaxHeapSize(new Size(serverLoadPB.getMaxHeapMB(), Size.Unit.MEGABYTE)) - .setUsedHeapSize(new Size(serverLoadPB.getUsedHeapMB(), Size.Unit.MEGABYTE)) - .setCoprocessorNames(serverLoadPB.getCoprocessorsList().stream() - .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) - .setRegionMetrics(serverLoadPB.getRegionLoadsList().stream() - .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList())) + .setRequestCountPerSecond(serverLoadPB.getNumberOfRequests()) + .setRequestCount(serverLoadPB.getTotalNumberOfRequests()) + .setInfoServerPort(serverLoadPB.getInfoServerPort()) + .setReadRequestCount(serverLoadPB.getReadRequestsCount()) + .setWriteRequestCount(serverLoadPB.getWriteRequestsCount()) + .setMaxHeapSize(new Size(serverLoadPB.getMaxHeapMB(), Size.Unit.MEGABYTE)) + .setUsedHeapSize(new Size(serverLoadPB.getUsedHeapMB(), Size.Unit.MEGABYTE)) + .setCoprocessorNames(serverLoadPB.getCoprocessorsList().stream() + .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) + .setRegionMetrics(serverLoadPB.getRegionLoadsList().stream() + .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList())) .setUserMetrics(serverLoadPB.getUserLoadsList().stream() .map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList())) - .setReplicationLoadSources(serverLoadPB.getReplLoadSourceList().stream() - .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) - .setReplicationLoadSink(serverLoadPB.hasReplLoadSink() - ? ProtobufUtil.toReplicationLoadSink(serverLoadPB.getReplLoadSink()) - : null) - .setTasks(serverLoadPB.getTasksList().stream() - .map(ProtobufUtil::getServerTask).collect(Collectors.toList())) - .setReportTimestamp(serverLoadPB.getReportEndTime()) - .setLastReportTimestamp(serverLoadPB.getReportStartTime()).setVersionNumber(versionNumber) - .setVersion(version).build(); + .setReplicationLoadSources(serverLoadPB.getReplLoadSourceList().stream() + .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) + .setReplicationLoadSink(serverLoadPB.hasReplLoadSink() + ? ProtobufUtil.toReplicationLoadSink(serverLoadPB.getReplLoadSink()) + : null) + .setTasks(serverLoadPB.getTasksList().stream().map(ProtobufUtil::getServerTask) + .collect(Collectors.toList())) + .setReportTimestamp(serverLoadPB.getReportEndTime()) + .setLastReportTimestamp(serverLoadPB.getReportStartTime()).setVersionNumber(versionNumber) + .setVersion(version).build(); } public static List toCoprocessor(Collection names) { - return names.stream() - .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) + return names.stream().map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) .collect(Collectors.toList()); } @@ -108,18 +107,14 @@ public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE)) .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE)) .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames())) - .addAllRegionLoads( - metrics.getRegionMetrics().values().stream().map(RegionMetricsBuilder::toRegionLoad) - .collect(Collectors.toList())) - .addAllUserLoads( - metrics.getUserMetrics().values().stream().map(UserMetricsBuilder::toUserMetrics) - .collect(Collectors.toList())) - .addAllReplLoadSource( - metrics.getReplicationLoadSourceList().stream() - .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) + .addAllRegionLoads(metrics.getRegionMetrics().values().stream() + .map(RegionMetricsBuilder::toRegionLoad).collect(Collectors.toList())) + .addAllUserLoads(metrics.getUserMetrics().values().stream() + .map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList())) + .addAllReplLoadSource(metrics.getReplicationLoadSourceList().stream() + .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) .addAllTasks( - metrics.getTasks().stream().map(ProtobufUtil::toServerTask) - .collect(Collectors.toList())) + metrics.getTasks().stream().map(ProtobufUtil::toServerTask).collect(Collectors.toList())) .setReportStartTime(metrics.getLastReportTimestamp()) .setReportEndTime(metrics.getReportTimestamp()); if (metrics.getReplicationLoadSink() != null) { @@ -186,7 +181,6 @@ public ServerMetricsBuilder setWriteRequestCount(long value) { return this; } - public ServerMetricsBuilder setUsedHeapSize(Size value) { this.usedHeapSize = value; return this; @@ -243,25 +237,10 @@ public ServerMetricsBuilder setTasks(List tasks) { } public ServerMetrics build() { - return new ServerMetricsImpl( - serverName, - versionNumber, - version, - requestCountPerSecond, - requestCount, - readRequestCount, - writeRequestCount, - usedHeapSize, - maxHeapSize, - infoServerPort, - sources, - sink, - regionStatus, - coprocessorNames, - reportTimestamp, - lastReportTimestamp, - userMetrics, - tasks); + return new ServerMetricsImpl(serverName, versionNumber, version, requestCountPerSecond, + requestCount, readRequestCount, writeRequestCount, usedHeapSize, maxHeapSize, + infoServerPort, sources, sink, regionStatus, coprocessorNames, reportTimestamp, + lastReportTimestamp, userMetrics, tasks); } private static class ServerMetricsImpl implements ServerMetrics { @@ -287,11 +266,10 @@ private static class ServerMetricsImpl implements ServerMetrics { ServerMetricsImpl(ServerName serverName, int versionNumber, String version, long requestCountPerSecond, long requestCount, long readRequestsCount, - long writeRequestsCount, Size usedHeapSize, Size maxHeapSize, - int infoServerPort, List sources, ReplicationLoadSink sink, - Map regionStatus, Set coprocessorNames, - long reportTimestamp, long lastReportTimestamp, Map userMetrics, - List tasks) { + long writeRequestsCount, Size usedHeapSize, Size maxHeapSize, int infoServerPort, + List sources, ReplicationLoadSink sink, + Map regionStatus, Set coprocessorNames, long reportTimestamp, + long lastReportTimestamp, Map userMetrics, List tasks) { this.serverName = Preconditions.checkNotNull(serverName); this.versionNumber = versionNumber; this.version = version; @@ -306,7 +284,7 @@ private static class ServerMetricsImpl implements ServerMetrics { this.sink = sink; this.regionStatus = Preconditions.checkNotNull(regionStatus); this.userMetrics = Preconditions.checkNotNull(userMetrics); - this.coprocessorNames =Preconditions.checkNotNull(coprocessorNames); + this.coprocessorNames = Preconditions.checkNotNull(coprocessorNames); this.reportTimestamp = reportTimestamp; this.lastReportTimestamp = lastReportTimestamp; this.tasks = tasks; @@ -367,11 +345,11 @@ public List getReplicationLoadSourceList() { } @Override - public Map> getReplicationLoadSourceMap(){ - Map> sourcesMap = new HashMap<>(); - for(ReplicationLoadSource loadSource : sources){ - sourcesMap.computeIfAbsent(loadSource.getPeerID(), - peerId -> new ArrayList<>()).add(loadSource); + public Map> getReplicationLoadSourceMap() { + Map> sourcesMap = new HashMap<>(); + for (ReplicationLoadSource loadSource : sources) { + sourcesMap.computeIfAbsent(loadSource.getPeerID(), peerId -> new ArrayList<>()) + .add(loadSource); } return sourcesMap; } @@ -434,8 +412,8 @@ public String toString() { storeFileCount += r.getStoreFileCount(); storeRefCount += r.getStoreRefCount(); int currentMaxCompactedStoreFileRefCount = r.getMaxCompactedStoreFileRefCount(); - maxCompactedStoreFileRefCount = Math.max(maxCompactedStoreFileRefCount, - currentMaxCompactedStoreFileRefCount); + maxCompactedStoreFileRefCount = + Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE); memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE); @@ -450,21 +428,20 @@ public String toString() { compactingCellCount += r.getCompactingCellCount(); } StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond", - Double.valueOf(getRequestCountPerSecond())); + Double.valueOf(getRequestCountPerSecond())); Strings.appendKeyValue(sb, "numberOfOnlineRegions", - Integer.valueOf(getRegionMetrics().size())); + Integer.valueOf(getRegionMetrics().size())); Strings.appendKeyValue(sb, "usedHeapMB", getUsedHeapSize()); Strings.appendKeyValue(sb, "maxHeapMB", getMaxHeapSize()); Strings.appendKeyValue(sb, "numberOfStores", storeCount); Strings.appendKeyValue(sb, "numberOfStorefiles", storeFileCount); Strings.appendKeyValue(sb, "storeRefCount", storeRefCount); - Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", - maxCompactedStoreFileRefCount); + Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", maxCompactedStoreFileRefCount); Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", uncompressedStoreFileSizeMB); Strings.appendKeyValue(sb, "storefileSizeMB", storeFileSizeMB); if (uncompressedStoreFileSizeMB != 0) { - Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f", - (float) storeFileSizeMB / (float) uncompressedStoreFileSizeMB)); + Strings.appendKeyValue(sb, "compressionRatio", + String.format("%.4f", (float) storeFileSizeMB / (float) uncompressedStoreFileSizeMB)); } Strings.appendKeyValue(sb, "memstoreSizeMB", memStoreSizeMB); Strings.appendKeyValue(sb, "readRequestsCount", readRequestsCount); @@ -478,8 +455,7 @@ public String toString() { Strings.appendKeyValue(sb, "currentCompactedKVs", compactedCellCount); float compactionProgressPct = Float.NaN; if (compactingCellCount > 0) { - compactionProgressPct = - Float.valueOf((float) compactedCellCount / compactingCellCount); + compactionProgressPct = Float.valueOf((float) compactedCellCount / compactingCellCount); } Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); Strings.appendKeyValue(sb, "coprocessors", getCoprocessorNames()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java index e791093e43d7..d11ea86ab11b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,10 +25,7 @@ public interface ServerTask { /** Task state */ enum State { - RUNNING, - WAITING, - COMPLETE, - ABORTED; + RUNNING, WAITING, COMPLETE, ABORTED; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java index d4937373789e..20b7065948ff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,7 +33,8 @@ public static ServerTaskBuilder newBuilder() { private long startTime; private long completionTime; - private ServerTaskBuilder() { } + private ServerTaskBuilder() { + } private static final class ServerTaskImpl implements ServerTask { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java index 0e7716a0a619..039954a14410 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,14 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.math.BigDecimal; @@ -24,8 +24,8 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * It is used to represent the size with different units. - * This class doesn't serve for the precise computation. + * It is used to represent the size with different units. This class doesn't serve for the precise + * computation. */ @InterfaceAudience.Public public final class Size implements Comparable { @@ -34,12 +34,9 @@ public final class Size implements Comparable { public enum Unit { // keep the room to add more units for HBase 10.x - PETABYTE(100, "PB"), - TERABYTE(99, "TB"), - GIGABYTE(98, "GB"), - MEGABYTE(97, "MB"), - KILOBYTE(96, "KB"), - BYTE(95, "B"); + PETABYTE(100, "PB"), TERABYTE(99, "TB"), GIGABYTE(98, "GB"), MEGABYTE(97, "MB"), + KILOBYTE(96, "KB"), BYTE(95, "B"); + private final int orderOfSize; private final String simpleName; @@ -91,7 +88,6 @@ public double get() { /** * get the value which is converted to specified unit. - * * @param unit size unit * @return the converted value */ @@ -146,7 +142,7 @@ public boolean equals(Object obj) { return true; } if (obj instanceof Size) { - return compareTo((Size)obj) == 0; + return compareTo((Size) obj) == 0; } return false; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java index 9d67a37695ca..ae6721813a8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java index a113f7c67bf0..0e6dd90d5407 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java index 7e5046538abc..54f44405c584 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java index 90c015674ca6..14720811ca16 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +26,7 @@ @InterfaceAudience.Public public class TableNotEnabledException extends DoNotRetryIOException { private static final long serialVersionUID = 262144L; + /** default constructor */ public TableNotEnabledException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java index ae114fed0e62..416d8601fc3b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java index 850cd9600623..9f1eb30ba20f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,6 @@ public class UnknownRegionException extends DoNotRetryRegionException { /** * Constructs a new UnknownRegionException with the specified detail message. - * * @param message the detail message */ public UnknownRegionException(String message) { @@ -39,7 +37,6 @@ public UnknownRegionException(String message) { /** * Constructs a new UnknownRegionException with the specified detail message and cause. - * * @param message the detail message * @param cause the cause of the exception */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java index 14afb977b5de..36cdab1ac655 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown if a region server is passed an unknown scanner ID. - * This usually means that the client has taken too long between checkins and so the - * scanner lease on the server-side has expired OR the server-side is closing - * down and has cancelled all leases. + * Thrown if a region server is passed an unknown scanner ID. This usually means that the client has + * taken too long between checkins and so the scanner lease on the server-side has expired OR the + * server-side is closing down and has cancelled all leases. */ @InterfaceAudience.Public public class UnknownScannerException extends DoNotRetryIOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java index 6c2ba07cc3d6..2710aa9be273 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Map; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Encapsulates per-user load metrics. - */ + * Encapsulates per-user load metrics. + */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface UserMetrics { @@ -60,8 +56,8 @@ interface ClientMetrics { long getWriteRequestCount(); /** - * @return the number of write requests and read requests and coprocessor - * service requests made by the user + * @return the number of write requests and read requests and coprocessor service requests made by + * the user */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java index 70d28883c269..8b906c6c70f4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; @@ -35,18 +31,19 @@ public final class UserMetricsBuilder { public static UserMetrics toUserMetrics(ClusterStatusProtos.UserLoad userLoad) { UserMetricsBuilder builder = UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes()); - userLoad.getClientMetricsList().stream().map( - clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), + userLoad.getClientMetricsList().stream() + .map(clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), clientMetrics.getReadRequestsCount(), clientMetrics.getWriteRequestsCount(), - clientMetrics.getFilteredRequestsCount())).forEach(builder::addClientMetris); + clientMetrics.getFilteredRequestsCount())) + .forEach(builder::addClientMetris); return builder.build(); } public static ClusterStatusProtos.UserLoad toUserMetrics(UserMetrics userMetrics) { ClusterStatusProtos.UserLoad.Builder builder = ClusterStatusProtos.UserLoad.newBuilder().setUserName(userMetrics.getNameAsString()); - userMetrics.getClientMetrics().values().stream().map( - clientMetrics -> ClusterStatusProtos.ClientMetrics.newBuilder() + userMetrics.getClientMetrics().values().stream() + .map(clientMetrics -> ClusterStatusProtos.ClientMetrics.newBuilder() .setHostName(clientMetrics.getHostName()) .setWriteRequestsCount(clientMetrics.getWriteRequestsCount()) .setReadRequestsCount(clientMetrics.getReadRequestsCount()) @@ -59,9 +56,9 @@ public static UserMetricsBuilder newBuilder(byte[] name) { return new UserMetricsBuilder(name); } - private final byte[] name; private Map clientMetricsMap = new HashMap<>(); + private UserMetricsBuilder(byte[] name) { this.name = name; } @@ -89,19 +86,23 @@ public ClientMetricsImpl(String hostName, long readRequest, long writeRequest, this.filteredReadRequestsCount = filteredReadRequestsCount; } - @Override public String getHostName() { + @Override + public String getHostName() { return hostName; } - @Override public long getReadRequestsCount() { + @Override + public long getReadRequestsCount() { return readRequestCount; } - @Override public long getWriteRequestsCount() { + @Override + public long getWriteRequestsCount() { return writeRequestCount; } - @Override public long getFilteredReadRequestsCount() { + @Override + public long getFilteredReadRequestsCount() { return filteredReadRequestsCount; } } @@ -115,33 +116,38 @@ private static class UserMetricsImpl implements UserMetrics { this.clientMetricsMap = clientMetricsMap; } - @Override public byte[] getUserName() { + @Override + public byte[] getUserName() { return name; } - @Override public long getReadRequestCount() { - return clientMetricsMap.values().stream().map(c -> c.getReadRequestsCount()) - .reduce(0L, Long::sum); + @Override + public long getReadRequestCount() { + return clientMetricsMap.values().stream().map(c -> c.getReadRequestsCount()).reduce(0L, + Long::sum); } - @Override public long getWriteRequestCount() { - return clientMetricsMap.values().stream().map(c -> c.getWriteRequestsCount()) - .reduce(0L, Long::sum); + @Override + public long getWriteRequestCount() { + return clientMetricsMap.values().stream().map(c -> c.getWriteRequestsCount()).reduce(0L, + Long::sum); } - @Override public Map getClientMetrics() { + @Override + public Map getClientMetrics() { return this.clientMetricsMap; } - @Override public long getFilteredReadRequests() { + @Override + public long getFilteredReadRequests() { return clientMetricsMap.values().stream().map(c -> c.getFilteredReadRequestsCount()) .reduce(0L, Long::sum); } @Override public String toString() { - StringBuilder sb = Strings - .appendKeyValue(new StringBuilder(), "readRequestCount", this.getReadRequestCount()); + StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "readRequestCount", + this.getReadRequestCount()); Strings.appendKeyValue(sb, "writeRequestCount", this.getWriteRequestCount()); Strings.appendKeyValue(sb, "filteredReadRequestCount", this.getFilteredReadRequests()); return sb.toString(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java index 4dc44b4c3c69..fa02eadab48e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -42,7 +40,6 @@ public ZooKeeperConnectionException(String message) { /** * Constructor taking another exception. - * * @param message the message for this exception * @param exception the exception to grab data from */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java index 92b046436258..48cec12f43c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.yetus.audience.InterfaceAudience; /** * Helper class for custom client scanners. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java index 9e33a12af6b5..9fb2b1cfe118 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,10 +27,10 @@ abstract class AbstractResponse { public enum ResponseType { - SINGLE (0), - MULTI (1); + SINGLE(0), MULTI(1); - ResponseType(int value) {} + ResponseType(int value) { + } } public abstract ResponseType type(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java index 60137d23fff2..032857dcbc22 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -90,11 +90,11 @@ abstract class AbstractRpcBasedConnectionRegistry implements ConnectionRegistry private final RegistryEndpointsRefresher registryEndpointRefresher; protected AbstractRpcBasedConnectionRegistry(Configuration conf, - String hedgedReqsFanoutConfigName, String initialRefreshDelaySecsConfigName, - String refreshIntervalSecsConfigName, String minRefreshIntervalSecsConfigName) - throws IOException { + String hedgedReqsFanoutConfigName, String initialRefreshDelaySecsConfigName, + String refreshIntervalSecsConfigName, String minRefreshIntervalSecsConfigName) + throws IOException { this.hedgedReadFanOut = - Math.max(1, conf.getInt(hedgedReqsFanoutConfigName, HEDGED_REQS_FANOUT_DEFAULT)); + Math.max(1, conf.getInt(hedgedReqsFanoutConfigName, HEDGED_REQS_FANOUT_DEFAULT)); rpcTimeoutMs = (int) Math.min(Integer.MAX_VALUE, conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); // XXX: we pass cluster id as null here since we do not have a cluster id yet, we have to fetch @@ -105,8 +105,8 @@ protected AbstractRpcBasedConnectionRegistry(Configuration conf, populateStubs(getBootstrapNodes(conf)); // could return null here is refresh interval is less than zero registryEndpointRefresher = - RegistryEndpointsRefresher.create(conf, initialRefreshDelaySecsConfigName, - refreshIntervalSecsConfigName, minRefreshIntervalSecsConfigName, this::refreshStubs); + RegistryEndpointsRefresher.create(conf, initialRefreshDelaySecsConfigName, + refreshIntervalSecsConfigName, minRefreshIntervalSecsConfigName, this::refreshStubs); } protected abstract Set getBootstrapNodes(Configuration conf) throws IOException; @@ -120,7 +120,7 @@ private void refreshStubs() throws IOException { private void populateStubs(Set addrs) throws IOException { Preconditions.checkNotNull(addrs); ImmutableMap.Builder builder = - ImmutableMap.builderWithExpectedSize(addrs.size()); + ImmutableMap.builderWithExpectedSize(addrs.size()); User user = User.getCurrent(); for (ServerName masterAddr : addrs) { builder.put(masterAddr, @@ -144,7 +144,7 @@ protected interface Callable { } private CompletableFuture call(ClientMetaService.Interface stub, - Callable callable) { + Callable callable) { HBaseRpcController controller = rpcControllerFactory.newController(); CompletableFuture future = new CompletableFuture<>(); callable.call(controller, stub, resp -> { @@ -174,8 +174,8 @@ private IOException badResponse(String debug) { * points have been tried and all of them are failed, we will fail the future. */ private void groupCall(CompletableFuture future, Set servers, - List stubs, int startIndexInclusive, Callable callable, - Predicate isValidResp, String debug, ConcurrentLinkedQueue errors) { + List stubs, int startIndexInclusive, Callable callable, + Predicate isValidResp, String debug, ConcurrentLinkedQueue errors) { int endIndexExclusive = Math.min(startIndexInclusive + hedgedReadFanOut, stubs.size()); AtomicInteger remaining = new AtomicInteger(endIndexExclusive - startIndexInclusive); for (int i = startIndexInclusive; i < endIndexExclusive; i++) { @@ -194,7 +194,7 @@ private void groupCall(CompletableFuture future, Set(errors)); + new RetriesExhaustedException("masters", stubs.size(), new ArrayList<>(errors)); future.completeExceptionally(new MasterRegistryFetchException(servers, ex)); } else { groupCall(future, servers, stubs, endIndexExclusive, callable, isValidResp, debug, @@ -210,7 +210,7 @@ private void groupCall(CompletableFuture future, Set CompletableFuture call(Callable callable, - Predicate isValidResp, String debug) { + Predicate isValidResp, String debug) { ImmutableMap addr2StubRef = addr2Stub; Set servers = addr2StubRef.keySet(); List stubs = new ArrayList<>(addr2StubRef.values()); @@ -222,7 +222,7 @@ protected final CompletableFuture call(Callable callab } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") Set getParsedServers() { return addr2Stub.keySet(); } @@ -233,7 +233,7 @@ Set getParsedServers() { private static RegionLocations transformMetaRegionLocations(GetMetaRegionLocationsResponse resp) { List regionLocations = new ArrayList<>(); resp.getMetaLocationsList() - .forEach(location -> regionLocations.add(ProtobufUtil.toRegionLocation(location))); + .forEach(location -> regionLocations.add(ProtobufUtil.toRegionLocation(location))); return new RegionLocations(regionLocations); } @@ -241,11 +241,11 @@ private static RegionLocations transformMetaRegionLocations(GetMetaRegionLocatio public CompletableFuture getMetaRegionLocations() { return tracedFuture( () -> this - . call( - (c, s, d) -> s.getMetaRegionLocations(c, - GetMetaRegionLocationsRequest.getDefaultInstance(), d), - r -> r.getMetaLocationsCount() != 0, "getMetaLocationsCount") - .thenApply(AbstractRpcBasedConnectionRegistry::transformMetaRegionLocations), + . call( + (c, s, d) -> s.getMetaRegionLocations(c, + GetMetaRegionLocationsRequest.getDefaultInstance(), d), + r -> r.getMetaLocationsCount() != 0, "getMetaLocationsCount") + .thenApply(AbstractRpcBasedConnectionRegistry::transformMetaRegionLocations), getClass().getSimpleName() + ".getMetaRegionLocations"); } @@ -253,10 +253,10 @@ public CompletableFuture getMetaRegionLocations() { public CompletableFuture getClusterId() { return tracedFuture( () -> this - . call( - (c, s, d) -> s.getClusterId(c, GetClusterIdRequest.getDefaultInstance(), d), - GetClusterIdResponse::hasClusterId, "getClusterId()") - .thenApply(GetClusterIdResponse::getClusterId), + . call( + (c, s, d) -> s.getClusterId(c, GetClusterIdRequest.getDefaultInstance(), d), + GetClusterIdResponse::hasClusterId, "getClusterId()") + .thenApply(GetClusterIdResponse::getClusterId), getClass().getSimpleName() + ".getClusterId"); } @@ -264,10 +264,10 @@ public CompletableFuture getClusterId() { public CompletableFuture getActiveMaster() { return tracedFuture( () -> this - . call( - (c, s, d) -> s.getActiveMaster(c, GetActiveMasterRequest.getDefaultInstance(), d), - GetActiveMasterResponse::hasServerName, "getActiveMaster()") - .thenApply(resp -> ProtobufUtil.toServerName(resp.getServerName())), + . call( + (c, s, d) -> s.getActiveMaster(c, GetActiveMasterRequest.getDefaultInstance(), d), + GetActiveMasterResponse::hasServerName, "getActiveMaster()") + .thenApply(resp -> ProtobufUtil.toServerName(resp.getServerName())), getClass().getSimpleName() + ".getClusterId"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java index fdf3485c2548..cc045c56cc14 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,9 +21,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by - * {@link Table#batch} to associate the action with it's region and maintain - * the index from the original request. + * A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by + * {@link Table#batch} to associate the action with it's region and maintain the index from the + * original request. */ @InterfaceAudience.Private public class Action implements Comparable { @@ -76,7 +75,9 @@ public int getReplicaId() { return replicaId; } - public int getPriority() { return priority; } + public int getPriority() { + return priority; + } @Override public int compareTo(Action other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 5d4337e34d41..1f7292aa065b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,11 +71,11 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** - * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and - * call {@link #close()} when done. - *

    Admin can be used to create, drop, list, enable and disable and otherwise modify tables, - * as well as perform other administrative operations. - * + * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and call + * {@link #close()} when done. + *

    + * Admin can be used to create, drop, list, enable and disable and otherwise modify tables, as well + * as perform other administrative operations. * @see ConnectionFactory * @see Connection * @see Table @@ -125,7 +125,6 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables. - * * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs */ @@ -133,7 +132,6 @@ public interface Admin extends Abortable, Closeable { /** * List all userspace tables and whether or not include system tables. - * * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs */ @@ -141,7 +139,6 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables that match the given pattern. - * * @param pattern The compiled regular expression to match against * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs @@ -153,7 +150,6 @@ default List listTableDescriptors(Pattern pattern) throws IOExc /** * List all the tables matching the given pattern. - * * @param pattern The compiled regular expression to match against * @param includeSysTables false to match only against userspace tables * @return a list of TableDescriptors @@ -165,7 +161,6 @@ List listTableDescriptors(Pattern pattern, boolean includeSysTa /** * List all of the names of userspace tables. - * * @return TableName[] table names * @throws IOException if a remote or network exception occurs */ @@ -188,28 +183,25 @@ default TableName[] listTableNames(Pattern pattern) throws IOException { * @return TableName[] table names * @throws IOException if a remote or network exception occurs */ - TableName[] listTableNames(Pattern pattern, boolean includeSysTables) - throws IOException; + TableName[] listTableNames(Pattern pattern, boolean includeSysTables) throws IOException; /** * Get a table descriptor. - * * @param tableName as a {@link TableName} * @return the tableDescriptor * @throws org.apache.hadoop.hbase.TableNotFoundException * @throws IOException if a remote or network exception occurs */ - TableDescriptor getDescriptor(TableName tableName) - throws TableNotFoundException, IOException; + TableDescriptor getDescriptor(TableName tableName) throws TableNotFoundException, IOException; /** * Creates a new table. Synchronous operation. - * * @param desc table descriptor for table * @throws IllegalArgumentException if the table name is reserved * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). + * threads, the table may have been created between test-for-existence and + * attempt-at-creation). * @throws IOException if a remote or network exception occurs */ default void createTable(TableDescriptor desc) throws IOException { @@ -217,12 +209,11 @@ default void createTable(TableDescriptor desc) throws IOException { } /** - * Creates a new table with the specified number of regions. The start key specified will become + * Creates a new table with the specified number of regions. The start key specified will become * the end key of the first region of the table, and the end key specified will become the start * key of the last region of the table (the first region has a null start key and the last region * has a null end key). BigInteger math will be used to divide the key range specified into enough * segments to make the required number of total regions. Synchronous operation. - * * @param desc table descriptor for table * @param startKey beginning of key range * @param endKey end of key range @@ -231,7 +222,8 @@ default void createTable(TableDescriptor desc) throws IOException { * @throws IllegalArgumentException if the table name is reserved * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). + * threads, the table may have been created between test-for-existence and + * attempt-at-creation). */ void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) throws IOException; @@ -240,14 +232,14 @@ void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRe * Creates a new table with an initial set of empty regions defined by the specified split keys. * The total number of regions created will be the number of split keys plus one. Synchronous * operation. Note : Avoid passing empty split key. - * * @param desc table descriptor for table * @param splitKeys array of split keys for the initial regions of the table * @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated - * and if the split key has empty byte array. + * and if the split key has empty byte array. * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). + * threads, the table may have been created between test-for-existence and + * attempt-at-creation). * @throws IOException if a remote or network exception occurs */ default void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOException { @@ -270,14 +262,12 @@ default void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOExce Future createTableAsync(TableDescriptor desc) throws IOException; /** - * Creates a new table but does not block and wait for it to come online. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * Throws IllegalArgumentException Bad table name, if the split keys - * are repeated and if the split key has empty byte array. - * + * Creates a new table but does not block and wait for it to come online. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. Throws + * IllegalArgumentException Bad table name, if the split keys are repeated and if the split key + * has empty byte array. * @param desc table descriptor for table * @param splitKeys keys to check if the table has been created with all split keys * @throws IOException if a remote or network exception occurs @@ -296,16 +286,14 @@ default void deleteTable(TableName tableName) throws IOException { } /** - * Deletes the table but does not block and wait for it to be completely removed. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Deletes the table but does not block and wait for it to be completely removed. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async delete. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async delete. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future deleteTableAsync(TableName tableName) throws IOException; @@ -330,17 +318,16 @@ default void truncateTable(TableName tableName, boolean preserveSplits) throws I * @return the result of the async truncate. You can use Future.get(long, TimeUnit) to wait on the * operation to complete. */ - Future truncateTableAsync(TableName tableName, boolean preserveSplits) - throws IOException; + Future truncateTableAsync(TableName tableName, boolean preserveSplits) throws IOException; /** * Enable a table. May timeout. Use {@link #enableTableAsync(org.apache.hadoop.hbase.TableName)} * and {@link #isTableEnabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in * disabled state for it to be enabled. * @param tableName name of the table - * @throws IOException There could be couple types of - * IOException TableNotFoundException means the table doesn't exist. - * TableNotDisabledException means the table isn't in disabled state. + * @throws IOException There could be couple types of IOException TableNotFoundException means the + * table doesn't exist. TableNotDisabledException means the table isn't in disabled + * state. * @see #isTableEnabled(org.apache.hadoop.hbase.TableName) * @see #disableTable(org.apache.hadoop.hbase.TableName) * @see #enableTableAsync(org.apache.hadoop.hbase.TableName) @@ -350,30 +337,26 @@ default void enableTable(TableName tableName) throws IOException { } /** - * Enable the table but does not block and wait for it to be completely enabled. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Enable the table but does not block and wait for it to be completely enabled. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async enable. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async enable. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future enableTableAsync(TableName tableName) throws IOException; /** - * Disable the table but does not block and wait for it to be completely disabled. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Disable the table but does not block and wait for it to be completely disabled. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async disable. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async disable. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future disableTableAsync(TableName tableName) throws IOException; @@ -425,12 +408,10 @@ default void addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnF } /** - * Add a column family to an existing table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Add a column family to an existing table. Asynchronous operation. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added * @throws IOException if a remote or network exception occurs @@ -454,20 +435,17 @@ default void deleteColumnFamily(TableName tableName, byte[] columnFamily) throws } /** - * Delete a column family from a table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Delete a column family from a table. Asynchronous operation. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. * @param tableName name of table * @param columnFamily name of column family to be deleted * @throws IOException if a remote or network exception occurs * @return the result of the async delete column family. You can use Future.get(long, TimeUnit) to * wait on the operation to complete. */ - Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) - throws IOException; + Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) throws IOException; /** * Modify an existing column family on a table. Synchronous operation. Use @@ -484,12 +462,10 @@ default void modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor colu } /** - * Modify an existing column family on a table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Modify an existing column family on a table. Asynchronous operation. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table * @param columnFamily new column family descriptor to use * @throws IOException if a remote or network exception occurs @@ -507,7 +483,7 @@ Future modifyColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor * @throws IOException if a remote or network exception occurs */ default void modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] family, String dstSFT) - throws IOException { + throws IOException { get(modifyColumnFamilyStoreFileTrackerAsync(tableName, family, dstSFT), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -522,11 +498,10 @@ default void modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] fami * @throws IOException if a remote or network exception occurs */ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] family, - String dstSFT) throws IOException; + String dstSFT) throws IOException; /** * Get all the online regions on a region server. - * * @return List of {@link RegionInfo} * @throws IOException if a remote or network exception occurs */ @@ -534,16 +509,14 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] /** * Flush a table. Synchronous operation. - * * @param tableName table to flush * @throws IOException if a remote or network exception occurs */ void flush(TableName tableName) throws IOException; /** - * Flush the specified column family stores on all regions of the passed table. - * This runs as a synchronous operation. - * + * Flush the specified column family stores on all regions of the passed table. This runs as a + * synchronous operation. * @param tableName table to flush * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs @@ -552,7 +525,6 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] /** * Flush an individual region. Synchronous operation. - * * @param regionName region to flush * @throws IOException if a remote or network exception occurs */ @@ -560,7 +532,6 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] /** * Flush a column family within a region. Synchronous operation. - * * @param regionName region to flush * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs @@ -575,10 +546,8 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] void flushRegionServer(ServerName serverName) throws IOException; /** - * Compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Compact a table. Asynchronous operation in that this method requests that a Compaction run and + * then it returns. It does not wait on the completion of Compaction (it can take a while). * @param tableName table to compact * @throws IOException if a remote or network exception occurs */ @@ -586,9 +555,8 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] /** * Compact an individual region. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Compaction run and then it returns. It does not wait on the completion of Compaction (it can + * take a while). * @param regionName region to compact * @throws IOException if a remote or network exception occurs */ @@ -596,46 +564,39 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] /** * Compact a column family within a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). * @param tableName table to compact * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs */ - void compact(TableName tableName, byte[] columnFamily) - throws IOException; + void compact(TableName tableName, byte[] columnFamily) throws IOException; /** * Compact a column family within a region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). * @param regionName region to compact * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs */ - void compactRegion(byte[] regionName, byte[] columnFamily) - throws IOException; + void compactRegion(byte[] regionName, byte[] columnFamily) throws IOException; /** - * Compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Compact a table. Asynchronous operation in that this method requests that a Compaction run and + * then it returns. It does not wait on the completion of Compaction (it can take a while). * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ void compact(TableName tableName, CompactType compactType) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** - * Compact a column family within a table. Asynchronous operation in that this method - * requests that a Compaction run and then it returns. It does not wait on the - * completion of Compaction (it can take a while). - * + * Compact a column family within a table. Asynchronous operation in that this method requests + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). * @param tableName table to compact * @param columnFamily column family within a table * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} @@ -643,70 +604,62 @@ void compact(TableName tableName, CompactType compactType) * @throws InterruptedException */ void compact(TableName tableName, byte[] columnFamily, CompactType compactType) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** - * Major compact a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a table. Asynchronous operation in that this method requests that a Compaction + * run and then it returns. It does not wait on the completion of Compaction (it can take a + * while). * @param tableName table to major compact * @throws IOException if a remote or network exception occurs */ void majorCompact(TableName tableName) throws IOException; /** - * Major compact a table or an individual region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a table or an individual region. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). * @param regionName region to major compact * @throws IOException if a remote or network exception occurs */ void majorCompactRegion(byte[] regionName) throws IOException; /** - * Major compact a column family within a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a column family within a table. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). * @param tableName table to major compact * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs */ - void majorCompact(TableName tableName, byte[] columnFamily) - throws IOException; + void majorCompact(TableName tableName, byte[] columnFamily) throws IOException; /** - * Major compact a column family within region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a column family within region. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). * @param regionName egion to major compact * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs */ - void majorCompactRegion(byte[] regionName, byte[] columnFamily) - throws IOException; + void majorCompactRegion(byte[] regionName, byte[] columnFamily) throws IOException; /** - * Major compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a table. Asynchronous operation in that this method requests that a Compaction + * run and then it returns. It does not wait on the completion of Compaction (it can take a + * while). * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ void majorCompact(TableName tableName, CompactType compactType) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** - * Major compact a column family within a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a column family within a table. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). * @param tableName table to compact * @param columnFamily column family within a table * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} @@ -714,15 +667,14 @@ void majorCompact(TableName tableName, CompactType compactType) * @throws InterruptedException */ void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** * Turn the compaction on or off. Disabling compactions will also interrupt any currently ongoing - * compactions. This state is ephemeral. The setting will be lost on restart. Compaction - * can also be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled - * in hbase-site.xml. - * - * @param switchState Set to true to enable, false to disable. + * compactions. This state is ephemeral. The setting will be lost on restart. Compaction can also + * be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled in + * hbase-site.xml. + * @param switchState Set to true to enable, false to disable. * @param serverNamesList list of region servers. * @return Previous compaction states for region servers * @throws IOException if a remote or network exception occurs @@ -769,8 +721,8 @@ Map compactionSwitch(boolean switchState, List serv * startcode. Here is an example: host187.example.com,60020,1289493121758 * @throws IOException if we can't find a region named encodedRegionName * @deprecated since 2.2.0 and will be removed in 4.0.0. Use {@link #move(byte[], ServerName)} - * instead. And if you want to move the region to a random server, please use - * {@link #move(byte[])}. + * instead. And if you want to move the region to a random server, please use + * {@link #move(byte[])}. * @see HBASE-22108 */ @Deprecated @@ -810,16 +762,15 @@ default void move(byte[] encodedRegionName, byte[] destServerName) throws IOExce void unassign(byte[] regionName) throws IOException; /** - * Unassign a region from current hosting regionserver. Region will then be assigned to a - * regionserver chosen at random. Region could be reassigned back to the same server. Use {@link - * #move(byte[], ServerName)} if you want to control the region movement. - * + * Unassign a region from current hosting regionserver. Region will then be assigned to a + * regionserver chosen at random. Region could be reassigned back to the same server. Use + * {@link #move(byte[], ServerName)} if you want to control the region movement. * @param regionName Region to unassign. Will clear any existing RegionPlan if one found. - * @param force If true, force unassign (Will remove region from regions-in-transition too if - * present. If results in double assignment use hbck -fix to resolve. To be used by experts). + * @param force If true, force unassign (Will remove region from + * regions-in-transition too if present. If results in double assignment use hbck -fix to + * resolve. To be used by experts). * @throws IOException if a remote or network exception occurs - * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} - * instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} instead. * @see HBASE-24875 */ @Deprecated @@ -833,7 +784,6 @@ default void unassign(byte[] regionName, boolean force) throws IOException { * still online as per Master's in memory state. If this API is incorrectly used on active region * then master will loose track of that region. This is a special method that should be used by * experts or hbck. - * * @param regionName Region to offline. * @throws IOException if a remote or network exception occurs */ @@ -850,21 +800,18 @@ default void unassign(byte[] regionName, boolean force) throws IOException { boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOException; /** - * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the - * reassignments. Can NOT run for various reasons. Check logs. - * + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. Can NOT run for various reasons. Check logs. * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs */ default boolean balance() throws IOException { - return balance(BalanceRequest.defaultInstance()) - .isBalancerRan(); + return balance(BalanceRequest.defaultInstance()).isBalancerRan(); } /** - * Invoke the balancer with the given balance request. The BalanceRequest defines how the - * balancer will run. See {@link BalanceRequest} for more details. - * + * Invoke the balancer with the given balance request. The BalanceRequest defines how the balancer + * will run. See {@link BalanceRequest} for more details. * @param request defines how the balancer should run * @return {@link BalanceResponse} with details about the results of the invocation. * @throws IOException if a remote or network exception occurs @@ -872,39 +819,33 @@ default boolean balance() throws IOException { BalanceResponse balance(BalanceRequest request) throws IOException; /** - * Invoke the balancer. Will run the balancer and if regions to move, it will - * go ahead and do the reassignments. If there is region in transition, force parameter of true - * would still run balancer. Can *not* run for other reasons. Check - * logs. + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. If there is region in transition, force parameter of true would still run + * balancer. Can *not* run for other reasons. Check logs. * @param force whether we should force balance even if there is region in transition * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.5.0. Will be removed in 4.0.0. - * Use {@link #balance(BalanceRequest)} instead. + * @deprecated Since 2.5.0. Will be removed in 4.0.0. Use {@link #balance(BalanceRequest)} + * instead. */ @Deprecated default boolean balance(boolean force) throws IOException { - return balance( - BalanceRequest.newBuilder() - .setIgnoreRegionsInTransition(force) - .build() - ).isBalancerRan(); + return balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(force).build()) + .isBalancerRan(); } /** * Query the current state of the balancer. - * * @return true if the balancer is enabled, false otherwise. * @throws IOException if a remote or network exception occurs */ boolean isBalancerEnabled() throws IOException; /** - * Clear all the blocks corresponding to this table from BlockCache. For expert-admins. - * Calling this API will drop all the cached blocks specific to a table from BlockCache. - * This can significantly impact the query performance as the subsequent queries will - * have to retrieve the blocks from underlying filesystem. - * + * Clear all the blocks corresponding to this table from BlockCache. For expert-admins. Calling + * this API will drop all the cached blocks specific to a table from BlockCache. This can + * significantly impact the query performance as the subsequent queries will have to retrieve the + * blocks from underlying filesystem. * @param tableName table to clear block cache * @return CacheEvictionStats related to the eviction * @throws IOException if a remote or network exception occurs @@ -912,11 +853,9 @@ default boolean balance(boolean force) throws IOException { CacheEvictionStats clearBlockCache(final TableName tableName) throws IOException; /** - * Invoke region normalizer. Can NOT run for various reasons. Check logs. - * This is a non-blocking invocation to region normalizer. If return value is true, it means - * the request was submitted successfully. We need to check logs for the details of which regions - * were split/merged. - * + * Invoke region normalizer. Can NOT run for various reasons. Check logs. This is a non-blocking + * invocation to region normalizer. If return value is true, it means the request was submitted + * successfully. We need to check logs for the details of which regions were split/merged. * @return {@code true} if region normalizer ran, {@code false} otherwise. * @throws IOException if a remote or network exception occurs */ @@ -925,11 +864,9 @@ default boolean normalize() throws IOException { } /** - * Invoke region normalizer. Can NOT run for various reasons. Check logs. - * This is a non-blocking invocation to region normalizer. If return value is true, it means - * the request was submitted successfully. We need to check logs for the details of which regions - * were split/merged. - * + * Invoke region normalizer. Can NOT run for various reasons. Check logs. This is a non-blocking + * invocation to region normalizer. If return value is true, it means the request was submitted + * successfully. We need to check logs for the details of which regions were split/merged. * @param ntfp limit to tables matching the specified filter. * @return {@code true} if region normalizer ran, {@code false} otherwise. * @throws IOException if a remote or network exception occurs @@ -938,7 +875,6 @@ default boolean normalize() throws IOException { /** * Query the current state of the region normalizer. - * * @return true if region normalizer is enabled, false otherwise. * @throws IOException if a remote or network exception occurs */ @@ -946,7 +882,6 @@ default boolean normalize() throws IOException { /** * Turn region normalizer on or off. - * * @return Previous normalizer value * @throws IOException if a remote or network exception occurs */ @@ -954,7 +889,6 @@ default boolean normalize() throws IOException { /** * Enable/Disable the catalog janitor/ - * * @param onOrOff if true enables the catalog janitor * @return the previous state * @throws IOException if a remote or network exception occurs @@ -963,7 +897,6 @@ default boolean normalize() throws IOException { /** * Ask for a scan of the catalog table. - * * @return the number of entries cleaned. Returns -1 if previous run is in progress. * @throws IOException if a remote or network exception occurs */ @@ -971,14 +904,12 @@ default boolean normalize() throws IOException { /** * Query on the catalog janitor state (Enabled/Disabled?). - * * @throws IOException if a remote or network exception occurs */ boolean isCatalogJanitorEnabled() throws IOException; /** * Enable/Disable the cleaner chore. - * * @param onOrOff if true enables the cleaner chore * @return the previous state * @throws IOException if a remote or network exception occurs @@ -987,7 +918,6 @@ default boolean normalize() throws IOException { /** * Ask for cleaner chore to run. - * * @return true if cleaner chore ran, false otherwise * @throws IOException if a remote or network exception occurs */ @@ -995,12 +925,10 @@ default boolean normalize() throws IOException { /** * Query on the cleaner chore state (Enabled/Disabled?). - * * @throws IOException if a remote or network exception occurs */ boolean isCleanerChoreEnabled() throws IOException; - /** * Merge two regions. Asynchronous operation. * @param nameOfRegionA encoded or full name of region a @@ -1102,7 +1030,7 @@ default void modifyTableStoreFileTracker(TableName tableName, String dstSFT) thr * @throws IOException if a remote or network exception occurs */ Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT) - throws IOException; + throws IOException; /** * Shuts down the HBase cluster. @@ -1125,22 +1053,21 @@ Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT /** * Check whether Master is in maintenance mode. - * * @throws IOException if a remote or network exception occurs */ - boolean isMasterInMaintenanceMode() throws IOException; + boolean isMasterInMaintenanceMode() throws IOException; /** * Stop the designated regionserver. - * * @param hostnamePort Hostname and port delimited by a : as in - * example.org:1234 + * example.org:1234 * @throws IOException if a remote or network exception occurs */ void stopRegionServer(String hostnamePort) throws IOException; /** * Get whole cluster metrics, containing status about: + * *

        * hbase version
        * cluster id
    @@ -1150,6 +1077,7 @@ Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT
        * balancer
        * regions in transition
        * 
    + * * @return cluster metrics * @throws IOException if a remote or network exception occurs */ @@ -1189,9 +1117,8 @@ default Collection getRegionServers() throws IOException { } /** - * Retrieve all current live region servers including decommissioned - * if excludeDecommissionedRS is false, else non-decommissioned ones only - * + * Retrieve all current live region servers including decommissioned if excludeDecommissionedRS is + * false, else non-decommissioned ones only * @param excludeDecommissionedRS should we exclude decommissioned RS nodes * @return all current live region servers including/excluding decommissioned hosts * @throws IOException if a remote or network exception occurs @@ -1199,19 +1126,17 @@ default Collection getRegionServers() throws IOException { default Collection getRegionServers(boolean excludeDecommissionedRS) throws IOException { List allServers = - getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).getServersName(); + getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).getServersName(); if (!excludeDecommissionedRS) { return allServers; } List decommissionedRegionServers = listDecommissionedRegionServers(); - return allServers.stream() - .filter(s -> !decommissionedRegionServers.contains(s)) - .collect(ImmutableList.toImmutableList()); + return allServers.stream().filter(s -> !decommissionedRegionServers.contains(s)) + .collect(ImmutableList.toImmutableList()); } /** * Get {@link RegionMetrics} of all regions hosted on a regionserver. - * * @param serverName region server from which {@link RegionMetrics} is required. * @return a {@link RegionMetrics} list of all regions hosted on a region server * @throws IOException if a remote or network exception occurs @@ -1220,14 +1145,13 @@ default Collection getRegionServers(boolean excludeDecommissionedRS) /** * Get {@link RegionMetrics} of all regions hosted on a regionserver for a table. - * * @param serverName region server from which {@link RegionMetrics} is required. * @param tableName get {@link RegionMetrics} of regions belonging to the table * @return region metrics map of all regions of a table hosted on a region server * @throws IOException if a remote or network exception occurs */ - List getRegionMetrics(ServerName serverName, - TableName tableName) throws IOException; + List getRegionMetrics(ServerName serverName, TableName tableName) + throws IOException; /** * @return Configuration used by the instance. @@ -1303,7 +1227,6 @@ NamespaceDescriptor getNamespaceDescriptor(String name) /** * List available namespaces - * * @return List of namespace names * @throws IOException if a remote or network exception occurs */ @@ -1311,7 +1234,6 @@ NamespaceDescriptor getNamespaceDescriptor(String name) /** * List available namespace descriptors - * * @return List of descriptors * @throws IOException if a remote or network exception occurs */ @@ -1335,7 +1257,6 @@ NamespaceDescriptor getNamespaceDescriptor(String name) /** * Get the regions of a given table. - * * @param tableName the name of the table * @return List of {@link RegionInfo}. * @throws IOException if a remote or network exception occurs @@ -1347,13 +1268,11 @@ NamespaceDescriptor getNamespaceDescriptor(String name) /** * Get tableDescriptors. - * * @param tableNames List of table names * @return returns a list of TableDescriptors * @throws IOException if a remote or network exception occurs */ - List listTableDescriptors(List tableNames) - throws IOException; + List listTableDescriptors(List tableNames) throws IOException; /** * Abort a procedure. @@ -1374,16 +1293,15 @@ default boolean abortProcedure(long procId, boolean mayInterruptIfRunning) throw } /** - * Abort a procedure but does not block and wait for completion. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2. - * + * Abort a procedure but does not block and wait for completion. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. Do not use. Usually it is ignored but if not, it can + * do more damage than good. See hbck2. * @param procId ID of the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? - * @return true if aborted, false if procedure already completed or does not exist + * @return true if aborted, false if procedure already completed or does + * not exist * @throws IOException if a remote or network exception occurs * @deprecated since 2.1.1 and will be removed in 4.0.0. * @see HBASE-21223 @@ -1408,11 +1326,9 @@ Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) /** * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file. - * * Note that the actual rolling of the log writer is asynchronous and may not be complete when - * this method returns. As a side effect of this call, the named region server may schedule - * store flushes at the request of the wal. - * + * this method returns. As a side effect of this call, the named region server may schedule store + * flushes at the request of the wal. * @param serverName The servername of the regionserver. * @throws IOException if a remote or network exception occurs * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException @@ -1425,14 +1341,12 @@ Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) * @see org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames() */ default List getMasterCoprocessorNames() throws IOException { - return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)) - .getMasterCoprocessorNames(); + return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)).getMasterCoprocessorNames(); } /** * Get the current compaction state of a table. It could be in a major compaction, a minor * compaction, both, or none. - * * @param tableName table to examine * @return the current compaction state * @throws IOException if a remote or network exception occurs @@ -1441,19 +1355,17 @@ default List getMasterCoprocessorNames() throws IOException { /** * Get the current compaction state of a table. It could be in a compaction, or none. - * * @param tableName table to examine * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @return the current compaction state * @throws IOException if a remote or network exception occurs */ - CompactionState getCompactionState(TableName tableName, - CompactType compactType) throws IOException; + CompactionState getCompactionState(TableName tableName, CompactType compactType) + throws IOException; /** * Get the current compaction state of region. It could be in a major compaction, a minor * compaction, both, or none. - * * @param regionName region to examine * @return the current compaction state * @throws IOException if a remote or network exception occurs @@ -1461,11 +1373,8 @@ CompactionState getCompactionState(TableName tableName, CompactionState getCompactionStateForRegion(byte[] regionName) throws IOException; /** - * Get the timestamp of the last major compaction for the passed table - * - * The timestamp of the oldest HFile resulting from a major compaction of that table, - * or 0 if no such HFile could be found. - * + * Get the timestamp of the last major compaction for the passed table The timestamp of the oldest + * HFile resulting from a major compaction of that table, or 0 if no such HFile could be found. * @param tableName table to examine * @return the last major compaction timestamp or 0 * @throws IOException if a remote or network exception occurs @@ -1473,11 +1382,9 @@ CompactionState getCompactionState(TableName tableName, long getLastMajorCompactionTimestamp(TableName tableName) throws IOException; /** - * Get the timestamp of the last major compaction for the passed region. - * - * The timestamp of the oldest HFile resulting from a major compaction of that region, - * or 0 if no such HFile could be found. - * + * Get the timestamp of the last major compaction for the passed region. The timestamp of the + * oldest HFile resulting from a major compaction of that region, or 0 if no such HFile could be + * found. * @param regionName region to examine * @return the last major compaction timestamp or 0 * @throws IOException if a remote or network exception occurs @@ -1486,10 +1393,10 @@ CompactionState getCompactionState(TableName tableName, /** * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be - * taken. If the table is disabled, an offline snapshot is taken. Snapshots are taken - * sequentially even when requested concurrently, across all tables. Snapshots are considered - * unique based on the name of the snapshot. Attempts to take a snapshot with the same - * name (even a different type or with different parameters) will fail with a + * taken. If the table is disabled, an offline snapshot is taken. Snapshots are taken sequentially + * even when requested concurrently, across all tables. Snapshots are considered unique based on + * the name of the snapshot. Attempts to take a snapshot with the same name (even a + * different type or with different parameters) will fail with a * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate * naming. Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. @@ -1506,10 +1413,10 @@ default void snapshot(String snapshotName, TableName tableName) /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the - * snapshot. Snapshots are taken sequentially even when requested concurrently, across - * all tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a {@link SnapshotCreationException} indicating the - * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See + * snapshot. Snapshots are taken sequentially even when requested concurrently, across all + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming. + * Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other * snapshots stored on the cluster @@ -1526,48 +1433,45 @@ default void snapshot(String snapshotName, TableName tableName, SnapshotType typ /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the - * snapshot. Snapshots are taken sequentially even when requested concurrently, across - * all tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a {@link SnapshotCreationException} indicating the - * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See - * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. - * Snapshot can live with ttl seconds. - * - * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other - * snapshots stored on the cluster - * @param tableName name of the table to snapshot - * @param type type of snapshot to take + * snapshot. Snapshots are taken sequentially even when requested concurrently, across all + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming. + * Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. Snapshot can + * live with ttl seconds. + * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other + * snapshots stored on the cluster + * @param tableName name of the table to snapshot + * @param type type of snapshot to take * @param snapshotProps snapshot additional properties e.g. TTL - * @throws IOException we fail to reach the master + * @throws IOException we fail to reach the master * @throws SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ default void snapshot(String snapshotName, TableName tableName, SnapshotType type, - Map snapshotProps) throws IOException, - SnapshotCreationException, IllegalArgumentException { + Map snapshotProps) + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(new SnapshotDescription(snapshotName, tableName, type, snapshotProps)); } /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the - * snapshot. Snapshots are taken sequentially even when requested concurrently, across - * all tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a {@link SnapshotCreationException} indicating the - * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See - * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. - * Snapshot can live with ttl seconds. - * - * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other - * snapshots stored on the cluster - * @param tableName name of the table to snapshot + * snapshot. Snapshots are taken sequentially even when requested concurrently, across all + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming. + * Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. Snapshot can + * live with ttl seconds. + * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other + * snapshots stored on the cluster + * @param tableName name of the table to snapshot * @param snapshotProps snapshot additional properties e.g. TTL - * @throws IOException we fail to reach the master + * @throws IOException we fail to reach the master * @throws SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ - default void snapshot(String snapshotName, TableName tableName, - Map snapshotProps) throws IOException, - SnapshotCreationException, IllegalArgumentException { + default void snapshot(String snapshotName, TableName tableName, Map snapshotProps) + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(new SnapshotDescription(snapshotName, tableName, SnapshotType.FLUSH, snapshotProps)); } @@ -1593,7 +1497,6 @@ void snapshot(SnapshotDescription snapshot) * Take a snapshot without waiting for the server to complete that snapshot (asynchronous). * Snapshots are considered unique based on the name of the snapshot. Snapshots are taken * sequentially even when requested concurrently, across all tables. - * * @param snapshot snapshot to take * @throws IOException if the snapshot did not succeed or we lose contact with the master. * @throws SnapshotCreationException if snapshot creation failed @@ -1603,20 +1506,22 @@ Future snapshotAsync(SnapshotDescription snapshot) throws IOException, SnapshotCreationException; /** - * Check the current state of the passed snapshot. There are three possible states:
      - *
    1. running - returns false
    2. finished - returns true
    3. - *
    4. finished with error - throws the exception that caused the snapshot to fail
    The - * cluster only knows about the most recent snapshot. Therefore, if another snapshot has been - * run/started since the snapshot you are checking, you will receive an {@link - * org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}. - * + * Check the current state of the passed snapshot. There are three possible states: + *
      + *
    1. running - returns false
    2. + *
    3. finished - returns true
    4. + *
    5. finished with error - throws the exception that caused the snapshot to fail
    6. + *
    + * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been + * run/started since the snapshot you are checking, you will receive an + * {@link org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}. * @param snapshot description of the snapshot to check * @return true if the snapshot is completed, false if the snapshot is still - * running + * running * @throws IOException if we have a network issue * @throws org.apache.hadoop.hbase.snapshot.HBaseSnapshotException if the snapshot failed * @throws org.apache.hadoop.hbase.snapshot.UnknownSnapshotException if the requested snapshot is - * unknown + * unknown */ boolean isSnapshotFinished(SnapshotDescription snapshot) throws IOException, HBaseSnapshotException, UnknownSnapshotException; @@ -1667,7 +1572,7 @@ default void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot) * @throws IllegalArgumentException if the restore request is formatted incorrectly */ void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl) - throws IOException, RestoreSnapshotException; + throws IOException, RestoreSnapshotException; /** * Create a new table by cloning the snapshot content. @@ -1695,8 +1600,7 @@ default void cloneSnapshot(String snapshotName, TableName tableName) * @throws IllegalArgumentException if the specified table has not a valid name */ default void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl, - String customSFT) - throws IOException, TableExistsException, RestoreSnapshotException { + String customSFT) throws IOException, TableExistsException, RestoreSnapshotException { get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -1746,8 +1650,7 @@ default Future cloneSnapshotAsync(String snapshotName, TableName tableName * @throws IllegalArgumentException if the specified table has not a valid name */ default Future cloneSnapshotAsync(String snapshotName, TableName tableName, - boolean restoreAcl) - throws IOException, TableExistsException, RestoreSnapshotException { + boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException { return cloneSnapshotAsync(snapshotName, tableName, restoreAcl, null); } @@ -1763,15 +1666,14 @@ default Future cloneSnapshotAsync(String snapshotName, TableName tableName * @throws IllegalArgumentException if the specified table has not a valid name */ Future cloneSnapshotAsync(String snapshotName, TableName tableName, boolean restoreAcl, - String customSFT) throws IOException, TableExistsException, RestoreSnapshotException; + String customSFT) throws IOException, TableExistsException, RestoreSnapshotException; /** * Execute a distributed procedure on a cluster. - * * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). + * root ZK node name of the procedure). * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. + * optional. * @param props Property/Value pairs of properties passing to the procedure * @throws IOException if a remote or network exception occurs */ @@ -1780,11 +1682,10 @@ void execProcedure(String signature, String instance, Map props) /** * Execute a distributed procedure on a cluster. - * * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). + * root ZK node name of the procedure). * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. + * optional. * @param props Property/Value pairs of properties passing to the procedure * @return data returned after procedure execution. null if no return data. * @throws IOException if a remote or network exception occurs @@ -1793,14 +1694,17 @@ byte[] execProcedureWithReturn(String signature, String instance, Map - *
  • running - returns false
  • finished - returns true
  • - *
  • finished with error - throws the exception that caused the procedure to fail
  • - * + * Check the current state of the specified procedure. There are three possible states: + *
      + *
    1. running - returns false
    2. + *
    3. finished - returns true
    4. + *
    5. finished with error - throws the exception that caused the procedure to fail
    6. + *
    * @param signature The signature that uniquely identifies a procedure * @param instance The instance name of the procedure * @param props Property/Value pairs of properties passing to the procedure - * @return true if the specified procedure is finished successfully, false if it is still running + * @return true if the specified procedure is finished successfully, + * false if it is still running * @throws IOException if the specified procedure finished with error */ boolean isProcedureFinished(String signature, String instance, Map props) @@ -1808,7 +1712,6 @@ boolean isProcedureFinished(String signature, String instance, Map listTableSnapshots(Pattern tableNamePattern, /** * Delete an existing snapshot. - * * @param snapshotName name of the snapshot * @throws IOException if a remote or network exception occurs */ @@ -1844,7 +1745,6 @@ List listTableSnapshots(Pattern tableNamePattern, /** * Delete existing snapshots whose names match the pattern passed. - * * @param pattern pattern for names of the snapshot to match * @throws IOException if a remote or network exception occurs */ @@ -1862,7 +1762,6 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) /** * Apply the new quota settings. - * * @param quota the quota settings * @throws IOException if a remote or network exception occurs */ @@ -1885,8 +1784,8 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} using standard protobuf service * invocations: *

    - *

    - *
    + *
    + * *
        * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
        * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
    @@ -1895,8 +1794,8 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern)
        *     .build();
        * MyCallResponse response = service.myCall(null, request);
        * 
    - *
    - *
    + * + *
    * @return A MasterCoprocessorRpcChannel instance * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any * more. Use the coprocessorService methods in {@link AsyncAdmin} instead. @@ -1904,7 +1803,6 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) @Deprecated CoprocessorRpcChannel coprocessorService(); - /** * Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} * instance connected to the passed region server. @@ -1915,6 +1813,7 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) * invocations: *

    *

    + * *
        * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
        * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
    @@ -1923,8 +1822,8 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern)
        *     .build();
        * MyCallResponse response = service.myCall(null, request);
        * 
    - *
    - *
    + * + * * @param serverName the server name to which the endpoint call is made * @return A RegionServerCoprocessorRpcChannel instance * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any @@ -1933,25 +1832,22 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) @Deprecated CoprocessorRpcChannel coprocessorService(ServerName serverName); - /** - * Update the configuration and trigger an online config change - * on the regionserver. + * Update the configuration and trigger an online config change on the regionserver. * @param server : The server whose config needs to be updated. * @throws IOException if a remote or network exception occurs */ void updateConfiguration(ServerName server) throws IOException; /** - * Update the configuration and trigger an online config change - * on all the regionservers. + * Update the configuration and trigger an online config change on all the regionservers. * @throws IOException if a remote or network exception occurs */ void updateConfiguration() throws IOException; /** - * Update the configuration and trigger an online config change - * on all the regionservers in the RSGroup. + * Update the configuration and trigger an online config change on all the regionservers in the + * RSGroup. * @param groupName the group name * @throws IOException if a remote or network exception occurs */ @@ -2068,8 +1964,7 @@ Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerCo * @throws IOException if a remote or network exception occurs */ default void removeReplicationPeer(String peerId) throws IOException { - get(removeReplicationPeerAsync(peerId), getSyncWaitTimeout(), - TimeUnit.MILLISECONDS); + get(removeReplicationPeerAsync(peerId), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } /** @@ -2174,7 +2069,7 @@ default void appendReplicationPeerTableCFs(String id, Map listDecommissionedRegionServers() throws IOException; /** - * Remove decommission marker from a region server to allow regions assignments. - * Load regions onto the server if a list of regions is given. Region loading is - * asynchronous. + * Remove decommission marker from a region server to allow regions assignments. Load regions onto + * the server if a list of regions is given. Region loading is asynchronous. * @param server The server to recommission. * @param encodedRegionNames Regions to load onto the server. * @throws IOException if a remote or network exception occurs @@ -2310,7 +2204,7 @@ void recommissionRegionServer(ServerName server, List encodedRegionNames * @throws InterruptedException */ void clearCompactionQueues(ServerName serverName, Set queues) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** * List dead region servers. @@ -2354,8 +2248,8 @@ void cloneTableSchema(TableName tableName, TableName newTableName, boolean prese boolean isRpcThrottleEnabled() throws IOException; /** - * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota - * can be exceeded if region server has availble quota. + * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota can be + * exceeded if region server has availble quota. * @param enable Set to true to enable, false to disable. * @return Previous exceed throttle enabled value * @throws IOException if a remote or network exception occurs @@ -2372,8 +2266,8 @@ void cloneTableSchema(TableName tableName, TableName newTableName, boolean prese * Fetches the observed {@link SpaceQuotaSnapshotView}s observed by a RegionServer. * @throws IOException if a remote or network exception occurs */ - Map getRegionServerSpaceQuotaSnapshots( - ServerName serverName) throws IOException; + Map + getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException; /** * Returns the Master's view of a quota on the given {@code namespace} or null if the Master has @@ -2438,35 +2332,30 @@ default List hasUserPermissions(List permissions) throws IO /** * Turn on or off the auto snapshot cleanup based on TTL. - * * @param on Set to true to enable, false to disable. * @param synchronous If true, it waits until current snapshot cleanup is completed, - * if outstanding. + * if outstanding. * @return Previous auto snapshot cleanup value * @throws IOException if a remote or network exception occurs */ - boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous) - throws IOException; + boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous) throws IOException; /** * Query the current state of the auto snapshot cleanup based on TTL. - * - * @return true if the auto snapshot cleanup is enabled, - * false otherwise. + * @return true if the auto snapshot cleanup is enabled, false + * otherwise. * @throws IOException if a remote or network exception occurs */ boolean isSnapshotCleanupEnabled() throws IOException; /** - * Retrieves online slow/large RPC logs from the provided list of - * RegionServers - * + * Retrieves online slow/large RPC logs from the provided list of RegionServers * @param serverNames Server names to get slowlog responses from * @param logQueryFilter filter to be used if provided (determines slow / large RPC logs) * @return online slowlog response list * @throws IOException if a remote or network exception occurs - * @deprecated since 2.4.0 and will be removed in 4.0.0. - * Use {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. */ @Deprecated default List getSlowLogResponses(final Set serverNames, @@ -2483,24 +2372,20 @@ default List getSlowLogResponses(final Set serverNa filterParams.put("tableName", logQueryFilter.getTableName()); filterParams.put("userName", logQueryFilter.getUserName()); filterParams.put("filterByOperator", logQueryFilter.getFilterByOperator().toString()); - List logEntries = - getLogEntries(serverNames, logType, ServerType.REGION_SERVER, logQueryFilter.getLimit(), - filterParams); + List logEntries = getLogEntries(serverNames, logType, ServerType.REGION_SERVER, + logQueryFilter.getLimit(), filterParams); return logEntries.stream().map(logEntry -> (OnlineLogRecord) logEntry) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } /** - * Clears online slow/large RPC logs from the provided list of - * RegionServers - * + * Clears online slow/large RPC logs from the provided list of RegionServers * @param serverNames Set of Server names to clean slowlog responses from - * @return List of booleans representing if online slowlog response buffer is cleaned - * from each RegionServer + * @return List of booleans representing if online slowlog response buffer is cleaned from each + * RegionServer * @throws IOException if a remote or network exception occurs */ - List clearSlowLogResponses(final Set serverNames) - throws IOException; + List clearSlowLogResponses(final Set serverNames) throws IOException; /** * Creates a new RegionServer group with the given name @@ -2559,7 +2444,7 @@ List clearSlowLogResponses(final Set serverNames) * @see #listTablesInRSGroup(String) */ Pair, List> getConfiguredNamespacesAndTablesInRSGroup(String groupName) - throws IOException; + throws IOException; /** * Remove RegionServer group associated with the given name @@ -2569,11 +2454,10 @@ Pair, List> getConfiguredNamespacesAndTablesInRSGroup(St void removeRSGroup(String groupName) throws IOException; /** - * Remove decommissioned servers from group - * 1. Sometimes we may find the server aborted due to some hardware failure and we must offline - * the server for repairing. Or we need to move some servers to join other clusters. - * So we need to remove these servers from the group. - * 2. Dead/recovering/live servers will be disallowed. + * Remove decommissioned servers from group 1. Sometimes we may find the server aborted due to + * some hardware failure and we must offline the server for repairing. Or we need to move some + * servers to join other clusters. So we need to remove these servers from the group. 2. + * Dead/recovering/live servers will be disallowed. * @param servers set of servers to remove * @throws IOException if a remote or network exception occurs */ @@ -2606,9 +2490,8 @@ default BalanceResponse balanceRSGroup(String groupName) throws IOException { } /** - * Balance regions in the given RegionServer group, running based on - * the given {@link BalanceRequest}. - * + * Balance regions in the given RegionServer group, running based on the given + * {@link BalanceRequest}. * @return BalanceResponse details about the balancer run */ BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException; @@ -2630,12 +2513,11 @@ default BalanceResponse balanceRSGroup(String groupName) throws IOException { void updateRSGroupConfig(String groupName, Map configuration) throws IOException; /** - * Retrieve recent online records from HMaster / RegionServers. - * Examples include slow/large RPC logs, balancer decisions by master. - * - * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. + * Retrieve recent online records from HMaster / RegionServers. Examples include slow/large RPC + * logs, balancer decisions by master. + * @param serverNames servers to retrieve records from, useful in case of records maintained by + * RegionServer as we can select specific server. In case of servertype=MASTER, logs will + * only come from the currently active master. * @param logType string representing type of log records * @param serverType enum for server type: HMaster or RegionServer * @param limit put a limit to list of records that server should send in response @@ -2643,6 +2525,6 @@ default BalanceResponse balanceRSGroup(String groupName) throws IOException { * @return Log entries representing online records from servers * @throws IOException if a remote or network exception occurs */ - List getLogEntries(Set serverNames, String logType, - ServerType serverType, int limit, Map filterParams) throws IOException; + List getLogEntries(Set serverNames, String logType, ServerType serverType, + int limit, Map filterParams) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index 4559e90e4c97..e04ef440e4c1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -96,7 +96,7 @@ public AdminOverAsyncAdmin(Connection conn, RawAsyncHBaseAdmin admin) { this.operationTimeout = conn.getConfiguration().getInt( HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); this.syncWaitTimeout = - conn.getConfiguration().getInt("hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min + conn.getConfiguration().getInt("hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min } @Override @@ -136,8 +136,7 @@ public List listTableDescriptors() throws IOException { } @Override - public List listTableDescriptors(boolean includeSysTables) - throws IOException { + public List listTableDescriptors(boolean includeSysTables) throws IOException { return get(admin.listTableDescriptors(includeSysTables)); } @@ -230,13 +229,13 @@ public Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFa @Override public Future modifyColumnFamilyAsync(TableName tableName, - ColumnFamilyDescriptor columnFamily) throws IOException { + ColumnFamilyDescriptor columnFamily) throws IOException { return admin.modifyColumnFamily(tableName, columnFamily); } @Override public Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] family, - String dstSFT) throws IOException { + String dstSFT) throws IOException { return admin.modifyColumnFamilyStoreFileTracker(tableName, family, dstSFT); } @@ -380,7 +379,6 @@ public boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOExc return get(admin.balancerSwitch(onOrOff, synchronous)); } - public BalanceResponse balance(BalanceRequest request) throws IOException { return get(admin.balance(request)); } @@ -483,7 +481,7 @@ public Future modifyTableAsync(TableDescriptor td) throws IOException { @Override public Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT) - throws IOException { + throws IOException { return admin.modifyTableStoreFileTracker(tableName, dstSFT); } @@ -656,14 +654,14 @@ public void restoreSnapshot(String snapshotName) throws IOException, RestoreSnap @Override public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl) - throws IOException, RestoreSnapshotException { + throws IOException, RestoreSnapshotException { get(admin.restoreSnapshot(snapshotName, takeFailSafeSnapshot, restoreAcl)); } @Override public Future cloneSnapshotAsync(String snapshotName, TableName tableName, - boolean restoreAcl, String customSFT) - throws IOException, TableExistsException, RestoreSnapshotException { + boolean restoreAcl, String customSFT) + throws IOException, TableExistsException, RestoreSnapshotException { return admin.cloneSnapshot(snapshotName, tableName, restoreAcl, customSFT); } @@ -779,14 +777,14 @@ public Message callBlockingMethod(MethodDescriptor method, RpcController control @Override public CoprocessorRpcChannel coprocessorService() { return new SyncCoprocessorRpcChannelOverAsync( - new MasterCoprocessorRpcChannelImpl(admin. newMasterCaller())); + new MasterCoprocessorRpcChannelImpl(admin. newMasterCaller())); } @SuppressWarnings("deprecation") @Override public CoprocessorRpcChannel coprocessorService(ServerName serverName) { return new SyncCoprocessorRpcChannelOverAsync(new RegionServerCoprocessorRpcChannelImpl( - admin. newServerCaller().serverName(serverName))); + admin. newServerCaller().serverName(serverName))); } @Override @@ -947,8 +945,8 @@ public Map getSpaceQuotaTableSizes() throws IOException { } @Override - public Map getRegionServerSpaceQuotaSnapshots( - ServerName serverName) throws IOException { + public Map + getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException { return get(admin.getRegionServerSpaceQuotaSnapshots(serverName)); } @@ -975,8 +973,8 @@ public void revoke(UserPermission userPermission) throws IOException { } @Override - public List getUserPermissions( - GetUserPermissionsRequest getUserPermissionsRequest) throws IOException { + public List + getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) throws IOException { return get(admin.getUserPermissions(getUserPermissionsRequest)); } @@ -998,8 +996,7 @@ public boolean isSnapshotCleanupEnabled() throws IOException { } @Override - public List clearSlowLogResponses(final Set serverNames) - throws IOException { + public List clearSlowLogResponses(final Set serverNames) throws IOException { return get(admin.clearSlowLogResponses(serverNames)); } @@ -1024,7 +1021,8 @@ public void removeRSGroup(String groupName) throws IOException { } @Override - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { return get(admin.balanceRSGroup(groupName, request)); } @@ -1040,7 +1038,7 @@ public List listTablesInRSGroup(String groupName) throws IOException @Override public Pair, List> - getConfiguredNamespacesAndTablesInRSGroup(String groupName) throws IOException { + getConfiguredNamespacesAndTablesInRSGroup(String groupName) throws IOException { return get(admin.getConfiguredNamespacesAndTablesInRSGroup(groupName)); } @@ -1077,8 +1075,7 @@ public void updateRSGroupConfig(String groupName, Map configurat @Override public List getLogEntries(Set serverNames, String logType, - ServerType serverType, int limit, Map filterParams) - throws IOException { + ServerType serverType, int limit, Map filterParams) throws IOException { return get(admin.getLogEntries(serverNames, logType, serverType, limit, filterParams)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java index 10933abf3cf2..7ead41af6984 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.Optional; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java index 8d21994c23e0..3ef28308f1c8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Arrays; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java index 41b3845fc784..3648ca220ecd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -38,12 +38,12 @@ /** * Performs Append operations on a single row. *

    - * This operation ensures atomicty to readers. Appends are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. + * This operation ensures atomicty to readers. Appends are done under a single row lock, so write + * operations to a row are synchronized, and readers are guaranteed to see this operation fully + * completed. *

    - * To append to a set of columns of a row, instantiate an Append object with the - * row to append to. At least one column to append must be specified using the + * To append to a set of columns of a row, instantiate an Append object with the row to append to. + * At least one column to append must be specified using the * {@link #addColumn(byte[], byte[], byte[])} method. */ @InterfaceAudience.Public @@ -55,12 +55,11 @@ public class Append extends Mutation { /** * Sets the TimeRange to be used on the Get for this append. *

    - * This is useful for when you have counters that only last for specific - * periods of time (ie. counters that are partitioned by time). By setting - * the range of valid times for this append, you can potentially gain - * some performance with a more optimal Get operation. - * Be careful adding the time range to this class as you will update the old cell if the - * time range doesn't include the latest cells. + * This is useful for when you have counters that only last for specific periods of time (ie. + * counters that are partitioned by time). By setting the range of valid times for this append, + * you can potentially gain some performance with a more optimal Get operation. Be careful adding + * the time range to this class as you will update the old cell if the time range doesn't include + * the latest cells. *

    * This range is used as [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive @@ -81,15 +80,13 @@ public TimeRange getTimeRange() { } @Override - protected long extraHeapSize(){ + protected long extraHeapSize() { return HEAP_OVERHEAD; } /** - * @param returnResults - * True (default) if the append operation should return the results. - * A client that is not interested in the result can save network - * bandwidth setting this to false. + * @param returnResults True (default) if the append operation should return the results. A client + * that is not interested in the result can save network bandwidth setting this to false. */ @Override public Append setReturnResults(boolean returnResults) { @@ -115,6 +112,7 @@ public boolean isReturnResults() { public Append(byte[] row) { this(row, 0, row.length); } + /** * Copy constructor * @param appendToCopy append to copy @@ -124,27 +122,27 @@ public Append(Append appendToCopy) { this.tr = appendToCopy.getTimeRange(); } - /** Create a Append operation for the specified row. + /** + * Create a Append operation for the specified row. *

    * At least one column must be appended to. * @param rowArray Makes a copy out of this buffer. * @param rowOffset * @param rowLength */ - public Append(final byte [] rowArray, final int rowOffset, final int rowLength) { + public Append(final byte[] rowArray, final int rowOffset, final int rowLength) { checkRow(rowArray, rowOffset, rowLength); this.row = Bytes.copy(rowArray, rowOffset, rowLength); } /** - * Construct the Append with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. + * Construct the Append with user defined data. NOTED: 1) all cells in the familyMap must have the + * Type.Put 2) the row of each cell must be same with passed row. * @param row row. CAN'T be null * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Append(byte[] row, long ts, NavigableMap> familyMap) { + public Append(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index b0dc0c16d9e6..baf318a3bfe4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -170,8 +170,8 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[ /** * Creates a new table with an initial set of empty regions defined by the specified split keys. - * The total number of regions created will be the number of split keys plus one. - * Note : Avoid passing empty split key. + * The total number of regions created will be the number of split keys plus one. Note : Avoid + * passing empty split key. * @param desc table descriptor for table * @param splitKeys array of split keys for the initial regions of the table */ @@ -241,8 +241,7 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[ * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added */ - CompletableFuture addColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily); + CompletableFuture addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily); /** * Delete a column family from a table. @@ -266,7 +265,7 @@ CompletableFuture modifyColumnFamily(TableName tableName, * @param dstSFT the destination store file tracker */ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] family, - String dstSFT); + String dstSFT); /** * Create a new namespace. @@ -322,8 +321,8 @@ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, CompletableFuture flush(TableName tableName); /** - * Flush the specified column family stores on all regions of the passed table. - * This runs as a synchronous operation. + * Flush the specified column family stores on all regions of the passed table. This runs as a + * synchronous operation. * @param tableName table to flush * @param columnFamily column family within a table */ @@ -351,8 +350,8 @@ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, /** * Compact a table. When the returned CompletableFuture is done, it only means the compact request - * was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to compact */ default CompletableFuture compact(TableName tableName) { @@ -362,8 +361,7 @@ default CompletableFuture compact(TableName tableName) { /** * Compact a column family within a table. When the returned CompletableFuture is done, it only * means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to compact * @param columnFamily column family within a table. If not present, compact the table's all * column families. @@ -374,9 +372,9 @@ default CompletableFuture compact(TableName tableName, byte[] columnFamily /** * Compact a table. When the returned CompletableFuture is done, it only means the compact request - * was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for - * normal compaction type. + * was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for normal compaction + * type. * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ @@ -385,8 +383,7 @@ default CompletableFuture compact(TableName tableName, byte[] columnFamily /** * Compact a column family within a table. When the returned CompletableFuture is done, it only * means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for * normal compaction type. * @param tableName table to compact * @param columnFamily column family within a table @@ -414,8 +411,8 @@ CompletableFuture compact(TableName tableName, byte[] columnFamily, /** * Major compact a table. When the returned CompletableFuture is done, it only means the compact - * request was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * request was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to major compact */ default CompletableFuture majorCompact(TableName tableName) { @@ -425,8 +422,7 @@ default CompletableFuture majorCompact(TableName tableName) { /** * Major compact a column family within a table. When the returned CompletableFuture is done, it * only means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for * normal compaction. type. * @param tableName table to major compact * @param columnFamily column family within a table. If not present, major compact the table's all @@ -438,9 +434,9 @@ default CompletableFuture majorCompact(TableName tableName, byte[] columnF /** * Major compact a table. When the returned CompletableFuture is done, it only means the compact - * request was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for - * normal compaction type. + * request was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for normal compaction + * type. * @param tableName table to major compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ @@ -449,8 +445,7 @@ default CompletableFuture majorCompact(TableName tableName, byte[] columnF /** * Major compact a column family within a table. When the returned CompletableFuture is done, it * only means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to major compact * @param columnFamily column family within a table. If not present, major compact the table's all * column families. @@ -615,8 +610,7 @@ default CompletableFuture mergeRegions(byte[] nameOfRegionA, byte[] nameOf * @param forcible If true, force unassign (Will remove region from regions-in-transition too if * present. If results in double assignment use hbck -fix to resolve. To be used by * experts). - * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} - * instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} instead. * @see HBASE-24875 */ @Deprecated @@ -678,8 +672,8 @@ default CompletableFuture addReplicationPeer(String peerId, * @param peerConfig configuration for the replication slave cluster * @param enabled peer state, true if ENABLED and false if DISABLED */ - CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled); + CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled); /** * Remove a peer and stop the replication @@ -727,15 +721,15 @@ CompletableFuture transitReplicationPeerSyncReplicationState(String peerId * @param peerId a short name that identifies the peer * @return the current cluster state wrapped by a {@link CompletableFuture}. */ - default CompletableFuture getReplicationPeerSyncReplicationState( - String peerId) { + default CompletableFuture + getReplicationPeerSyncReplicationState(String peerId) { CompletableFuture future = new CompletableFuture<>(); addListener(listReplicationPeers(Pattern.compile(peerId)), (peers, error) -> { if (error != null) { future.completeExceptionally(error); } else if (peers.isEmpty() || !peers.get(0).getPeerId().equals(peerId)) { - future - .completeExceptionally(new IOException("Replication peer " + peerId + " does not exist")); + future.completeExceptionally( + new IOException("Replication peer " + peerId + " does not exist")); } else { future.complete(peers.get(0).getSyncReplicationState()); } @@ -795,10 +789,10 @@ CompletableFuture removeReplicationPeerTableCFs(String peerId, /** * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be - * taken. If the table is disabled, an offline snapshot is taken. Snapshots are taken - * sequentially even when requested concurrently, across all tables. Snapshots are considered - * unique based on the name of the snapshot. Attempts to take a snapshot with the same - * name (even a different type or with different parameters) will fail with a + * taken. If the table is disabled, an offline snapshot is taken. Snapshots are taken sequentially + * even when requested concurrently, across all tables. Snapshots are considered unique based on + * the name of the snapshot. Attempts to take a snapshot with the same name (even a + * different type or with different parameters) will fail with a * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate * naming. Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. @@ -812,11 +806,10 @@ default CompletableFuture snapshot(String snapshotName, TableName tableNam /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the * snapshot. Snapshots are taken sequentially even when requested concurrently, across all - * tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a - * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate - * naming. Snapshot names follow the same naming constraints as tables in HBase. See - * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} + * indicating the duplicate naming. Snapshot names follow the same naming constraints as tables in + * HBase. See {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other * snapshots stored on the cluster * @param tableName name of the table to snapshot @@ -828,15 +821,15 @@ default CompletableFuture snapshot(String snapshotName, TableName tableNam } /** - * Take a snapshot and wait for the server to complete that snapshot asynchronously. Snapshots - * are taken sequentially even when requested concurrently, across all tables. Snapshots are - * considered unique based on the name of the snapshot. - * Attempts to take a snapshot with the same name (even a different type or with different - * parameters) will fail with a {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} - * indicating the duplicate naming. Snapshot names follow the same naming constraints as tables in - * HBase. See {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. - * You should probably use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} unless you - * are sure about the type of snapshot that you want to take. + * Take a snapshot and wait for the server to complete that snapshot asynchronously. Snapshots are + * taken sequentially even when requested concurrently, across all tables. Snapshots are + * considered unique based on the name of the snapshot. Attempts to take a snapshot with + * the same name (even a different type or with different parameters) will fail with a + * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate + * naming. Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. You should + * probably use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} unless you are sure + * about the type of snapshot that you want to take. * @param snapshot snapshot to take */ CompletableFuture snapshot(SnapshotDescription snapshot); @@ -1031,8 +1024,8 @@ CompletableFuture isProcedureFinished(String signature, String instance Map props); /** - * Abort a procedure - * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2. + * Abort a procedure Do not use. Usually it is ignored but if not, it can do more damage than + * good. See hbck2. * @param procId ID of the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? * @return true if aborted, false if procedure already completed or does not exist. the value is @@ -1056,10 +1049,10 @@ CompletableFuture isProcedureFinished(String signature, String instance CompletableFuture getLocks(); /** - * Mark region server(s) as decommissioned to prevent additional regions from getting - * assigned to them. Optionally unload the regions on the servers. If there are multiple servers - * to be decommissioned, decommissioning them at the same time can prevent wasteful region - * movements. Region unloading is asynchronous. + * Mark region server(s) as decommissioned to prevent additional regions from getting assigned to + * them. Optionally unload the regions on the servers. If there are multiple servers to be + * decommissioned, decommissioning them at the same time can prevent wasteful region movements. + * Region unloading is asynchronous. * @param servers The list of servers to decommission. * @param offload True to offload the regions from the decommissioned servers */ @@ -1102,7 +1095,7 @@ default CompletableFuture getMaster() { */ default CompletableFuture> getBackupMasters() { return getClusterMetrics(EnumSet.of(Option.BACKUP_MASTERS)) - .thenApply(ClusterMetrics::getBackupMasterNames); + .thenApply(ClusterMetrics::getBackupMasterNames); } /** @@ -1113,8 +1106,8 @@ default CompletableFuture> getRegionServers() { .thenApply(ClusterMetrics::getServersName); } - default CompletableFuture> getRegionServers( - boolean excludeDecommissionedRS) { + default CompletableFuture> + getRegionServers(boolean excludeDecommissionedRS) { CompletableFuture> future = new CompletableFuture<>(); addListener( getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).thenApply(ClusterMetrics::getServersName), @@ -1130,7 +1123,7 @@ default CompletableFuture> getRegionServers( future.completeExceptionally(decomErr); } else { future.complete(allServers.stream().filter(s -> !decomServers.contains(s)) - .collect(ImmutableList.toImmutableList())); + .collect(ImmutableList.toImmutableList())); } }); } @@ -1152,8 +1145,8 @@ default CompletableFuture> getMasterCoprocessorNames() { * @return master info port */ default CompletableFuture getMasterInfoPort() { - return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)).thenApply( - ClusterMetrics::getMasterInfoPort); + return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)) + .thenApply(ClusterMetrics::getMasterInfoPort); } /** @@ -1185,8 +1178,8 @@ default CompletableFuture getMasterInfoPort() { CompletableFuture updateConfiguration(); /** - * Update the configuration and trigger an online config change on all the regionservers in - * the RSGroup. + * Update the configuration and trigger an online config change on all the regionservers in the + * RSGroup. * @param groupName the group name */ CompletableFuture updateConfiguration(String groupName); @@ -1223,7 +1216,7 @@ default CompletableFuture getMasterInfoPort() { * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture} */ CompletableFuture> getRegionMetrics(ServerName serverName, - TableName tableName); + TableName tableName); /** * Check whether master is in maintenance mode @@ -1315,8 +1308,7 @@ default CompletableFuture balancerSwitch(boolean on) { * {@link CompletableFuture}. */ default CompletableFuture balance() { - return balance(BalanceRequest.defaultInstance()) - .thenApply(BalanceResponse::isBalancerRan); + return balance(BalanceRequest.defaultInstance()).thenApply(BalanceResponse::isBalancerRan); } /** @@ -1326,21 +1318,17 @@ default CompletableFuture balance() { * @param forcible whether we should force balance even if there is region in transition. * @return True if balancer ran, false otherwise. The return value will be wrapped by a * {@link CompletableFuture}. - * @deprecated Since 2.5.0. Will be removed in 4.0.0. - * Use {@link #balance(BalanceRequest)} instead. + * @deprecated Since 2.5.0. Will be removed in 4.0.0. Use {@link #balance(BalanceRequest)} + * instead. */ default CompletableFuture balance(boolean forcible) { - return balance( - BalanceRequest.newBuilder() - .setIgnoreRegionsInTransition(forcible) - .build() - ).thenApply(BalanceResponse::isBalancerRan); + return balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(forcible).build()) + .thenApply(BalanceResponse::isBalancerRan); } /** - * Invoke the balancer with the given balance request. The BalanceRequest defines how the - * balancer will run. See {@link BalanceRequest} for more details. - * + * Invoke the balancer with the given balance request. The BalanceRequest defines how the balancer + * will run. See {@link BalanceRequest} for more details. * @param request defines how the balancer should run * @return {@link BalanceResponse} with details about the results of the invocation. */ @@ -1348,8 +1336,8 @@ default CompletableFuture balance(boolean forcible) { /** * Query the current state of the balancer. - * @return true if the balance switch is on, false otherwise. The return value will be wrapped by a - * {@link CompletableFuture}. + * @return true if the balance switch is on, false otherwise. The return value will be wrapped by + * a {@link CompletableFuture}. */ CompletableFuture isBalancerEnabled(); @@ -1393,8 +1381,8 @@ default CompletableFuture normalize() { /** * Query the current state of the cleaner chore. - * @return true if cleaner chore is on, false otherwise. The return value will be wrapped by - * a {@link CompletableFuture} + * @return true if cleaner chore is on, false otherwise. The return value will be wrapped by a + * {@link CompletableFuture} */ CompletableFuture isCleanerChoreEnabled(); @@ -1414,8 +1402,8 @@ default CompletableFuture normalize() { /** * Query on the catalog janitor state. - * @return true if the catalog janitor is on, false otherwise. The return value will be - * wrapped by a {@link CompletableFuture} + * @return true if the catalog janitor is on, false otherwise. The return value will be wrapped by + * a {@link CompletableFuture} */ CompletableFuture isCatalogJanitorEnabled(); @@ -1437,6 +1425,7 @@ default CompletableFuture normalize() { * channel -> xxxService.newStub(channel) * *

    + * * @param stubMaker a delegation to the actual {@code newStub} call. * @param callable a delegation to the actual protobuf rpc call. See the comment of * {@link ServiceCaller} for more details. @@ -1459,6 +1448,7 @@ CompletableFuture coprocessorService(Function stubMaker * channel -> xxxService.newStub(channel) * * + * * @param stubMaker a delegation to the actual {@code newStub} call. * @param callable a delegation to the actual protobuf rpc call. See the comment of * {@link ServiceCaller} for more details. @@ -1469,7 +1459,7 @@ CompletableFuture coprocessorService(Function stubMaker * @see ServiceCaller */ CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable, ServerName serverName); + ServiceCaller callable, ServerName serverName); /** * List all the dead region servers. @@ -1498,21 +1488,19 @@ default CompletableFuture> listDeadServers() { /** * Create a new table by cloning the existent table schema. - * * @param tableName name of the table to be cloned * @param newTableName name of the new table where the table will be created * @param preserveSplits True if the splits should be preserved */ - CompletableFuture cloneTableSchema(final TableName tableName, - final TableName newTableName, final boolean preserveSplits); + CompletableFuture cloneTableSchema(final TableName tableName, final TableName newTableName, + final boolean preserveSplits); /** * Turn the compaction on or off. Disabling compactions will also interrupt any currently ongoing - * compactions. This state is ephemeral. The setting will be lost on restart. Compaction - * can also be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled - * in hbase-site.xml. - * - * @param switchState Set to true to enable, false to disable. + * compactions. This state is ephemeral. The setting will be lost on restart. Compaction can also + * be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled in + * hbase-site.xml. + * @param switchState Set to true to enable, false to disable. * @param serverNamesList list of region servers. * @return Previous compaction states for region servers */ @@ -1533,8 +1521,8 @@ CompletableFuture> compactionSwitch(boolean switchState CompletableFuture isRpcThrottleEnabled(); /** - * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota - * can be exceeded if region server has availble quota. + * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota can be + * exceeded if region server has availble quota. * @param enable Set to true to enable, false to disable. * @return Previous exceed throttle enabled value */ @@ -1562,8 +1550,8 @@ CompletableFuture> compactionSwitch(boolean switchState * Returns the Master's view of a quota on the given {@code tableName} or null if the Master has * no quota information on that table. */ - CompletableFuture getCurrentSpaceQuotaSnapshot( - TableName tableName); + CompletableFuture + getCurrentSpaceQuotaSnapshot(TableName tableName); /** * Grants user specific permissions @@ -1613,35 +1601,31 @@ default CompletableFuture> hasUserPermissions(List per * Notice that, the method itself is always non-blocking, which means it will always return * immediately. The {@code sync} parameter only effects when will we complete the returned * {@link CompletableFuture}. - * * @param on Set to true to enable, false to disable. - * @param sync If true, it waits until current snapshot cleanup is completed, - * if outstanding. + * @param sync If true, it waits until current snapshot cleanup is completed, if + * outstanding. * @return Previous auto snapshot cleanup value wrapped by a {@link CompletableFuture}. */ CompletableFuture snapshotCleanupSwitch(boolean on, boolean sync); /** * Query the current state of the auto snapshot cleanup based on TTL. - * - * @return true if the auto snapshot cleanup is enabled, false otherwise. - * The return value will be wrapped by a {@link CompletableFuture}. + * @return true if the auto snapshot cleanup is enabled, false otherwise. The return value will be + * wrapped by a {@link CompletableFuture}. */ CompletableFuture isSnapshotCleanupEnabled(); /** - * Retrieves online slow RPC logs from the provided list of - * RegionServers - * + * Retrieves online slow RPC logs from the provided list of RegionServers * @param serverNames Server names to get slowlog responses from * @param logQueryFilter filter to be used if provided * @return Online slowlog response list. The return value wrapped by a {@link CompletableFuture} - * @deprecated since 2.4.0 and will be removed in 4.0.0. - * Use {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. */ @Deprecated - default CompletableFuture> getSlowLogResponses( - final Set serverNames, final LogQueryFilter logQueryFilter) { + default CompletableFuture> + getSlowLogResponses(final Set serverNames, final LogQueryFilter logQueryFilter) { String logType; if (LogQueryFilter.Type.LARGE_LOG.equals(logQueryFilter.getType())) { logType = "LARGE_LOG"; @@ -1654,21 +1638,17 @@ default CompletableFuture> getSlowLogResponses( filterParams.put("tableName", logQueryFilter.getTableName()); filterParams.put("userName", logQueryFilter.getUserName()); filterParams.put("filterByOperator", logQueryFilter.getFilterByOperator().toString()); - CompletableFuture> logEntries = - getLogEntries(serverNames, logType, ServerType.REGION_SERVER, logQueryFilter.getLimit(), - filterParams); - return logEntries.thenApply( - logEntryList -> logEntryList.stream().map(logEntry -> (OnlineLogRecord) logEntry) - .collect(Collectors.toList())); + CompletableFuture> logEntries = getLogEntries(serverNames, logType, + ServerType.REGION_SERVER, logQueryFilter.getLimit(), filterParams); + return logEntries.thenApply(logEntryList -> logEntryList.stream() + .map(logEntry -> (OnlineLogRecord) logEntry).collect(Collectors.toList())); } /** - * Clears online slow RPC logs from the provided list of - * RegionServers - * + * Clears online slow RPC logs from the provided list of RegionServers * @param serverNames Set of Server names to clean slowlog responses from - * @return List of booleans representing if online slowlog response buffer is cleaned - * from each RegionServer. The return value wrapped by a {@link CompletableFuture} + * @return List of booleans representing if online slowlog response buffer is cleaned from each + * RegionServer. The return value wrapped by a {@link CompletableFuture} */ CompletableFuture> clearSlowLogResponses(final Set serverNames); @@ -1729,7 +1709,7 @@ default CompletableFuture> getSlowLogResponses( * @see #listTablesInRSGroup(String) */ CompletableFuture, List>> - getConfiguredNamespacesAndTablesInRSGroup(String groupName); + getConfiguredNamespacesAndTablesInRSGroup(String groupName); /** * Remove RegionServer group associated with the given name @@ -1739,11 +1719,10 @@ default CompletableFuture> getSlowLogResponses( CompletableFuture removeRSGroup(String groupName); /** - * Remove decommissioned servers from group - * 1. Sometimes we may find the server aborted due to some hardware failure and we must offline - * the server for repairing. Or we need to move some servers to join other clusters. - * So we need to remove these servers from the group. - * 2. Dead/recovering/live servers will be disallowed. + * Remove decommissioned servers from group 1. Sometimes we may find the server aborted due to + * some hardware failure and we must offline the server for repairing. Or we need to move some + * servers to join other clusters. So we need to remove these servers from the group. 2. + * Dead/recovering/live servers will be disallowed. * @param servers set of servers to remove * @throws IOException if a remote or network exception occurs */ @@ -1801,12 +1780,11 @@ default CompletableFuture balanceRSGroup(String groupName) { CompletableFuture updateRSGroupConfig(String groupName, Map configuration); /** - * Retrieve recent online records from HMaster / RegionServers. - * Examples include slow/large RPC logs, balancer decisions by master. - * - * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. + * Retrieve recent online records from HMaster / RegionServers. Examples include slow/large RPC + * logs, balancer decisions by master. + * @param serverNames servers to retrieve records from, useful in case of records maintained by + * RegionServer as we can select specific server. In case of servertype=MASTER, logs will + * only come from the currently active master. * @param logType string representing type of log records * @param serverType enum for server type: HMaster or RegionServer * @param limit put a limit to list of records that server should send in response @@ -1814,5 +1792,5 @@ default CompletableFuture balanceRSGroup(String groupName) { * @return Log entries representing online records from servers */ CompletableFuture> getLogEntries(Set serverNames, String logType, - ServerType serverType, int limit, Map filterParams); + ServerType serverType, int limit, Map filterParams); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java index 49bc350bb9a6..a00a7c64f906 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java index ffb3ae97ecff..0ad6629aa14a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java index 7a381db39c82..6bcb451e945d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public AsyncAdminRequestRetryingCaller(Timer retryTimer, AsyncConnectionImpl con long pauseNs, long pauseForCQTBENs, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { super(retryTimer, conn, priority, pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, - rpcTimeoutNs, startLogErrorsCnt); + rpcTimeoutNs, startLogErrorsCnt); this.serverName = serverName; this.callable = callable; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java index 7af385da2d83..91732b1398e6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -132,7 +132,7 @@ public RegionRequest(HRegionLocation loc) { private static final class ServerRequest { public final ConcurrentMap actionsByRegion = - new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); + new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); public void addAction(HRegionLocation loc, Action action) { computeIfAbsent(actionsByRegion, loc.getRegion().getRegionName(), @@ -145,7 +145,7 @@ public void setRegionRequest(byte[] regionName, RegionRequest regionReq) { public int getPriority() { return actionsByRegion.values().stream().flatMap(rr -> rr.actions.stream()) - .mapToInt(Action::getPriority).max().orElse(HConstants.PRIORITY_UNSET); + .mapToInt(Action::getPriority).max().orElse(HConstants.PRIORITY_UNSET); } } @@ -218,10 +218,11 @@ private void logException(int tries, Supplier> regionsSupp Throwable error, ServerName serverName) { if (tries > startLogErrorsCnt) { String regions = - regionsSupplier.get().map(r -> "'" + r.loc.getRegion().getRegionNameAsString() + "'") - .collect(Collectors.joining(",", "[", "]")); - LOG.warn("Process batch for " + regions + " in " + tableName + " from " + serverName + - " failed, tries=" + tries, error); + regionsSupplier.get().map(r -> "'" + r.loc.getRegion().getRegionNameAsString() + "'") + .collect(Collectors.joining(",", "[", "]")); + LOG.warn("Process batch for " + regions + " in " + tableName + " from " + serverName + + " failed, tries=" + tries, + error); } } @@ -235,7 +236,7 @@ private void addError(Action action, Throwable error, ServerName serverName) { errors = action2Errors.computeIfAbsent(action, k -> new ArrayList<>()); } errors.add(new ThrowableWithExtraContext(error, EnvironmentEdgeManager.currentTime(), - getExtraContextForError(serverName))); + getExtraContextForError(serverName))); } private void addError(Iterable actions, Throwable error, ServerName serverName) { @@ -248,7 +249,7 @@ private void failOne(Action action, int tries, Throwable error, long currentTime return; } ThrowableWithExtraContext errorWithCtx = - new ThrowableWithExtraContext(error, currentTime, extras); + new ThrowableWithExtraContext(error, currentTime, extras); List errors = removeErrors(action); if (errors == null) { errors = Collections.singletonList(errorWithCtx); @@ -271,7 +272,7 @@ private void failAll(Stream actions, int tries) { return; } future.completeExceptionally(new RetriesExhaustedException(tries, - Optional.ofNullable(removeErrors(action)).orElse(Collections.emptyList()))); + Optional.ofNullable(removeErrors(action)).orElse(Collections.emptyList()))); }); } @@ -288,10 +289,10 @@ private ClientProtos.MultiRequest buildReq(Map actionsByR // action list. RequestConverter.buildNoDataRegionActions(entry.getKey(), entry.getValue().actions.stream() - .sorted((a1, a2) -> Integer.compare(a1.getOriginalIndex(), a2.getOriginalIndex())) - .collect(Collectors.toList()), - cells, multiRequestBuilder, regionActionBuilder, actionBuilder, mutationBuilder, - nonceGroup, indexMap); + .sorted((a1, a2) -> Integer.compare(a1.getOriginalIndex(), a2.getOriginalIndex())) + .collect(Collectors.toList()), + cells, multiRequestBuilder, regionActionBuilder, actionBuilder, mutationBuilder, nonceGroup, + indexMap); } return multiRequestBuilder.build(); } @@ -302,9 +303,9 @@ private void onComplete(Action action, RegionRequest regionReq, int tries, Serve MutableBoolean retryImmediately) { Object result = regionResult.result.getOrDefault(action.getOriginalIndex(), regionException); if (result == null) { - LOG.error("Server " + serverName + " sent us neither result nor exception for row '" + - Bytes.toStringBinary(action.getAction().getRow()) + "' of " + - regionReq.loc.getRegion().getRegionNameAsString()); + LOG.error("Server " + serverName + " sent us neither result nor exception for row '" + + Bytes.toStringBinary(action.getAction().getRow()) + "' of " + + regionReq.loc.getRegion().getRegionNameAsString()); addError(action, new RuntimeException("Invalid response"), serverName); failedActions.add(action); } else if (result instanceof Throwable) { @@ -406,8 +407,8 @@ private void sendToServer(ServerName serverName, ServerRequest serverReq, int tr onError(serverReq.actionsByRegion, tries, controller.getFailed(), serverName); } else { try { - onComplete(serverReq.actionsByRegion, tries, serverName, ResponseConverter.getResults(req, - indexMap, resp, controller.cellScanner())); + onComplete(serverReq.actionsByRegion, tries, serverName, + ResponseConverter.getResults(req, indexMap, resp, controller.cellScanner())); } catch (Exception e) { onError(serverReq.actionsByRegion, tries, e, serverName); return; @@ -436,7 +437,7 @@ private void sendOrDelay(Map actionsByServer, int tri serverReq.actionsByRegion.forEach((regionName, regionReq) -> { long backoff = backoffPolicy.getBackoffTime(serverName, regionName, serverStats); groupByBackoff.computeIfAbsent(backoff, k -> new ServerRequest()) - .setRegionRequest(regionName, regionReq); + .setRegionRequest(regionName, regionReq); }); groupByBackoff.forEach((backoff, sr) -> { if (backoff > 0) { @@ -463,7 +464,7 @@ private void onError(Map actionsByRegion, int tries, Thro return; } List copiedActions = actionsByRegion.values().stream().flatMap(r -> r.actions.stream()) - .collect(Collectors.toList()); + .collect(Collectors.toList()); addError(copiedActions, error, serverName); tryResubmit(copiedActions.stream(), tries, error instanceof RetryImmediatelyException, error instanceof CallQueueTooBigException); @@ -504,22 +505,23 @@ private void groupAndSend(Stream actions, int tries) { ConcurrentMap actionsByServer = new ConcurrentHashMap<>(); ConcurrentLinkedQueue locateFailed = new ConcurrentLinkedQueue<>(); addListener(CompletableFuture.allOf(actions - .map(action -> conn.getLocator().getRegionLocation(tableName, action.getAction().getRow(), - RegionLocateType.CURRENT, locateTimeoutNs).whenComplete((loc, error) -> { - if (error != null) { - error = unwrapCompletionException(translateException(error)); - if (error instanceof DoNotRetryIOException) { - failOne(action, tries, error, EnvironmentEdgeManager.currentTime(), ""); - return; + .map(action -> conn.getLocator().getRegionLocation(tableName, action.getAction().getRow(), + RegionLocateType.CURRENT, locateTimeoutNs).whenComplete((loc, error) -> { + if (error != null) { + error = unwrapCompletionException(translateException(error)); + if (error instanceof DoNotRetryIOException) { + failOne(action, tries, error, EnvironmentEdgeManager.currentTime(), ""); + return; + } + addError(action, error, null); + locateFailed.add(action); + } else { + computeIfAbsent(actionsByServer, loc.getServerName(), ServerRequest::new) + .addAction(loc, action); } - addError(action, error, null); - locateFailed.add(action); - } else { - computeIfAbsent(actionsByServer, loc.getServerName(), ServerRequest::new).addAction(loc, - action); - } - })) - .toArray(CompletableFuture[]::new)), (v, r) -> { + })) + .toArray(CompletableFuture[]::new)), + (v, r) -> { if (!actionsByServer.isEmpty()) { sendOrDelay(actionsByServer, tries); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java index 7b21eb5fa13a..e5f28d2e0602 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java index ea2528d5152c..ed21fb8e23ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java index cd0496377bc4..3b5f5ea6ccae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -103,6 +103,6 @@ public AsyncBufferedMutatorBuilder setMaxKeyValueSize(int maxKeyValueSize) { @Override public AsyncBufferedMutator build() { return new AsyncBufferedMutatorImpl(periodicalFlushTimer, tableBuilder.build(), writeBufferSize, - periodicFlushTimeoutNs, maxKeyValueSize); + periodicFlushTimeoutNs, maxKeyValueSize); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java index fcd1724d10e5..30f9cb13334c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -111,13 +111,13 @@ protected void internalFlush() { @Override public List> mutate(List mutations) { List> futures = - Stream.> generate(CompletableFuture::new).limit(mutations.size()) - .collect(Collectors.toList()); + Stream.> generate(CompletableFuture::new).limit(mutations.size()) + .collect(Collectors.toList()); long heapSize = 0; for (Mutation mutation : mutations) { heapSize += mutation.heapSize(); if (mutation instanceof Put) { - validatePut((Put)mutation, maxKeyValueSize); + validatePut((Put) mutation, maxKeyValueSize); } } synchronized (this) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java index d7984628319a..0e2c9c616df5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java @@ -27,6 +27,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.isRemote; import static org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.context.Scope; @@ -41,7 +42,9 @@ import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.util.Timer; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interface; @@ -123,10 +126,7 @@ public AsyncClientScanner(Scan scan, AdvancedScanResultConsumer consumer, TableN * `start()` method. The cost of doing so would be making access to the `span` safe for * concurrent threads. */ - span = new TableOperationSpanBuilder(conn) - .setTableName(tableName) - .setOperation(scan) - .build(); + span = new TableOperationSpanBuilder(conn).setTableName(tableName).setOperation(scan).build(); if (consumer instanceof AsyncTableResultScanner) { AsyncTableResultScanner scanner = (AsyncTableResultScanner) consumer; scanner.setSpan(span); @@ -167,8 +167,8 @@ private CompletableFuture callOpenScanner(HBaseRpcControlle } CompletableFuture future = new CompletableFuture<>(); try { - ScanRequest request = RequestConverter.buildScanRequest( - loc.getRegion().getRegionName(), scan, scan.getCaching(), false); + ScanRequest request = RequestConverter.buildScanRequest(loc.getRegion().getRegionName(), + scan, scan.getCaching(), false); stub.scan(controller, request, resp -> { try (Scope ignored1 = span.makeCurrent()) { if (controller.failed()) { @@ -178,8 +178,8 @@ private CompletableFuture callOpenScanner(HBaseRpcControlle span.end(); return; } - future.complete(new OpenScannerResponse( - loc, isRegionServerRemote, stub, controller, resp)); + future.complete( + new OpenScannerResponse(loc, isRegionServerRemote, stub, controller, resp)); } }); } catch (IOException e) { @@ -193,13 +193,13 @@ private CompletableFuture callOpenScanner(HBaseRpcControlle private void startScan(OpenScannerResponse resp) { addListener( conn.callerFactory.scanSingleRegion().id(resp.resp.getScannerId()).location(resp.loc) - .remote(resp.isRegionServerRemote) - .scannerLeaseTimeoutPeriod(resp.resp.getTtl(), TimeUnit.MILLISECONDS).stub(resp.stub) - .setScan(scan).metrics(scanMetrics).consumer(consumer).resultCache(resultCache) - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) - .pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) - .startLogErrorsCnt(startLogErrorsCnt).start(resp.controller, resp.resp), + .remote(resp.isRegionServerRemote) + .scannerLeaseTimeoutPeriod(resp.resp.getTtl(), TimeUnit.MILLISECONDS).stub(resp.stub) + .setScan(scan).metrics(scanMetrics).consumer(consumer).resultCache(resultCache) + .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) + .pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) + .startLogErrorsCnt(startLogErrorsCnt).start(resp.controller, resp.resp), (hasMore, error) -> { try (Scope ignored = span.makeCurrent()) { if (error != null) { @@ -228,18 +228,18 @@ private void startScan(OpenScannerResponse resp) { private CompletableFuture openScanner(int replicaId) { try (Scope ignored = span.makeCurrent()) { return conn.callerFactory. single().table(tableName) - .row(scan.getStartRow()).replicaId(replicaId).locateType(getLocateType(scan)) - .priority(scan.getPriority()) - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) - .pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) - .startLogErrorsCnt(startLogErrorsCnt).action(this::callOpenScanner).call(); + .row(scan.getStartRow()).replicaId(replicaId).locateType(getLocateType(scan)) + .priority(scan.getPriority()).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS) + .pause(pauseNs, TimeUnit.NANOSECONDS).pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS) + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt) + .action(this::callOpenScanner).call(); } } private long getPrimaryTimeoutNs() { return TableName.isMetaTableName(tableName) ? conn.connConf.getPrimaryMetaScanTimeoutNs() - : conn.connConf.getPrimaryScanTimeoutNs(); + : conn.connConf.getPrimaryScanTimeoutNs(); } private void openScanner() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java index 8839eda802a5..3d2c365e3b13 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java index a6d403f597b8..599f488488d7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,10 +65,10 @@ class AsyncConnectionConfiguration { private static final Logger LOG = LoggerFactory.getLogger(AsyncConnectionConfiguration.class); /** - * Configure the number of failures after which the client will start logging. A few failures - * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable - * heuristic for the number of errors we don't log. 5 was chosen because we wait for 1s at - * this stage. + * Configure the number of failures after which the client will start logging. A few failures is + * fine: region moved, then is not opened, then is overloaded. We try to have an acceptable + * heuristic for the number of errors we don't log. 5 was chosen because we wait for 1s at this + * stage. */ public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = "hbase.client.start.log.errors.counter"; @@ -133,9 +133,9 @@ class AsyncConnectionConfiguration { long rpcTimeoutMs = conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT); this.rpcTimeoutNs = TimeUnit.MILLISECONDS.toNanos(rpcTimeoutMs); this.readRpcTimeoutNs = - TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_READ_TIMEOUT_KEY, rpcTimeoutMs)); + TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_READ_TIMEOUT_KEY, rpcTimeoutMs)); this.writeRpcTimeoutNs = - TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_WRITE_TIMEOUT_KEY, rpcTimeoutMs)); + TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_WRITE_TIMEOUT_KEY, rpcTimeoutMs)); long pauseMs = conf.getLong(HBASE_CLIENT_PAUSE, DEFAULT_HBASE_CLIENT_PAUSE); long pauseForCQTBEMs = conf.getLong(HBASE_CLIENT_PAUSE_FOR_CQTBE, pauseMs); if (pauseForCQTBEMs < pauseMs) { @@ -148,27 +148,26 @@ class AsyncConnectionConfiguration { this.pauseForCQTBENs = TimeUnit.MILLISECONDS.toNanos(pauseForCQTBEMs); this.maxRetries = conf.getInt(HBASE_CLIENT_RETRIES_NUMBER, DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); this.startLogErrorsCnt = - conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT); - this.scanTimeoutNs = TimeUnit.MILLISECONDS.toNanos( - conf.getInt(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, - DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)); + conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT); + this.scanTimeoutNs = TimeUnit.MILLISECONDS.toNanos(conf + .getInt(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)); this.scannerCaching = - conf.getInt(HBASE_CLIENT_SCANNER_CACHING, DEFAULT_HBASE_CLIENT_SCANNER_CACHING); + conf.getInt(HBASE_CLIENT_SCANNER_CACHING, DEFAULT_HBASE_CLIENT_SCANNER_CACHING); this.metaScannerCaching = - conf.getInt(HBASE_META_SCANNER_CACHING, DEFAULT_HBASE_META_SCANNER_CACHING); + conf.getInt(HBASE_META_SCANNER_CACHING, DEFAULT_HBASE_META_SCANNER_CACHING); this.scannerMaxResultSize = conf.getLong(HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); this.writeBufferSize = conf.getLong(WRITE_BUFFER_SIZE_KEY, WRITE_BUFFER_SIZE_DEFAULT); this.writeBufferPeriodicFlushTimeoutNs = - TimeUnit.MILLISECONDS.toNanos(conf.getLong(WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, - WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT)); + TimeUnit.MILLISECONDS.toNanos(conf.getLong(WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, + WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT)); this.primaryCallTimeoutNs = TimeUnit.MICROSECONDS.toNanos( conf.getLong(PRIMARY_CALL_TIMEOUT_MICROSECOND, PRIMARY_CALL_TIMEOUT_MICROSECOND_DEFAULT)); this.primaryScanTimeoutNs = TimeUnit.MICROSECONDS.toNanos( conf.getLong(PRIMARY_SCAN_TIMEOUT_MICROSECOND, PRIMARY_SCAN_TIMEOUT_MICROSECOND_DEFAULT)); this.primaryMetaScanTimeoutNs = - TimeUnit.MICROSECONDS.toNanos(conf.getLong(HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, - HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT)); + TimeUnit.MICROSECONDS.toNanos(conf.getLong(HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, + HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT)); this.maxKeyValueSize = conf.getInt(MAX_KEYVALUE_SIZE_KEY, MAX_KEYVALUE_SIZE_DEFAULT); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 4de9a2c4ac58..f756e1f69216 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -27,6 +27,7 @@ import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLED_KEY; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.SERVER_NAME_KEY; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; + import io.opentelemetry.api.trace.Span; import java.io.IOException; import java.net.SocketAddress; @@ -59,8 +60,10 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; @@ -75,9 +78,9 @@ public class AsyncConnectionImpl implements AsyncConnection { private static final Logger LOG = LoggerFactory.getLogger(AsyncConnectionImpl.class); static final HashedWheelTimer RETRY_TIMER = new HashedWheelTimer( - new ThreadFactoryBuilder().setNameFormat("Async-Client-Retry-Timer-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), - 10, TimeUnit.MILLISECONDS); + new ThreadFactoryBuilder().setNameFormat("Async-Client-Retry-Timer-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + 10, TimeUnit.MILLISECONDS); private final Configuration conf; @@ -106,7 +109,7 @@ public class AsyncConnectionImpl implements AsyncConnection { private final AtomicReference masterStub = new AtomicReference<>(); private final AtomicReference> masterStubMakeFuture = - new AtomicReference<>(); + new AtomicReference<>(); private final Optional stats; private final ClientBackoffPolicy backoffPolicy; @@ -122,7 +125,7 @@ public class AsyncConnectionImpl implements AsyncConnection { private volatile ConnectionOverAsyncConnection conn; public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, String clusterId, - SocketAddress localAddress, User user) { + SocketAddress localAddress, User user) { this.conf = conf; this.user = user; @@ -137,10 +140,10 @@ public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, Stri this.metrics = Optional.empty(); } this.rpcClient = - RpcClientFactory.createClient(conf, clusterId, localAddress, metrics.orElse(null)); + RpcClientFactory.createClient(conf, clusterId, localAddress, metrics.orElse(null)); this.rpcControllerFactory = RpcControllerFactory.instantiate(conf); - this.rpcTimeout = - (int) Math.min(Integer.MAX_VALUE, TimeUnit.NANOSECONDS.toMillis(connConf.getRpcTimeoutNs())); + this.rpcTimeout = (int) Math.min(Integer.MAX_VALUE, + TimeUnit.NANOSECONDS.toMillis(connConf.getRpcTimeoutNs())); this.locator = new AsyncRegionLocator(this, RETRY_TIMER); this.callerFactory = new AsyncRpcRetryingCallerFactory(this, RETRY_TIMER); if (conf.getBoolean(CLIENT_NONCES_ENABLED_KEY, true)) { @@ -302,7 +305,7 @@ CompletableFuture getMasterStub() { future.completeExceptionally(error); } else if (addr == null) { future.completeExceptionally(new MasterNotRunningException( - "ZooKeeper available but no active master location found")); + "ZooKeeper available but no active master location found")); } else { LOG.debug("The fetched master address is {}", addr); try { @@ -351,13 +354,13 @@ public AsyncTable build() { @Override public AsyncTableBuilder getTableBuilder(TableName tableName, - ExecutorService pool) { + ExecutorService pool) { return new AsyncTableBuilderBase(tableName, connConf) { @Override public AsyncTable build() { RawAsyncTableImpl rawTable = - new RawAsyncTableImpl(AsyncConnectionImpl.this, RETRY_TIMER, this); + new RawAsyncTableImpl(AsyncConnectionImpl.this, RETRY_TIMER, this); return new AsyncTableImpl(rawTable, pool); } }; @@ -379,7 +382,7 @@ public AsyncAdminBuilder getAdminBuilder(ExecutorService pool) { @Override public AsyncAdmin build() { RawAsyncHBaseAdmin rawAdmin = - new RawAsyncHBaseAdmin(AsyncConnectionImpl.this, RETRY_TIMER, this); + new RawAsyncHBaseAdmin(AsyncConnectionImpl.this, RETRY_TIMER, this); return new AsyncHBaseAdmin(rawAdmin, pool); } }; @@ -392,9 +395,9 @@ public AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName @Override public AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName, - ExecutorService pool) { + ExecutorService pool) { return new AsyncBufferedMutatorBuilderImpl(connConf, getTableBuilder(tableName, pool), - RETRY_TIMER); + RETRY_TIMER); } @Override @@ -419,8 +422,10 @@ private Hbck getHbckInternal(ServerName masterServer) { // we will not create a new connection when creating a new protobuf stub, and for hbck there // will be no performance consideration, so for simplification we will create a new stub every // time instead of caching the stub here. - return new HBaseHbck(MasterProtos.HbckService.newBlockingStub( - rpcClient.createBlockingRpcChannel(masterServer, user, rpcTimeout)), rpcControllerFactory); + return new HBaseHbck( + MasterProtos.HbckService + .newBlockingStub(rpcClient.createBlockingRpcChannel(masterServer, user, rpcTimeout)), + rpcControllerFactory); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index fc7ee5c94554..250e997895fb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -205,7 +205,7 @@ public CompletableFuture modifyColumnFamily(TableName tableName, @Override public CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, - byte[] family, String dstSFT) { + byte[] family, String dstSFT) { return wrap(rawAdmin.modifyColumnFamilyStoreFileTracker(tableName, family, dstSFT)); } @@ -275,14 +275,13 @@ public CompletableFuture flushRegionServer(ServerName sn) { } @Override - public CompletableFuture compact(TableName tableName, - CompactType compactType) { + public CompletableFuture compact(TableName tableName, CompactType compactType) { return wrap(rawAdmin.compact(tableName, compactType)); } @Override - public CompletableFuture compact(TableName tableName, - byte[] columnFamily, CompactType compactType) { + public CompletableFuture compact(TableName tableName, byte[] columnFamily, + CompactType compactType) { return wrap(rawAdmin.compact(tableName, columnFamily, compactType)); } @@ -408,8 +407,8 @@ public CompletableFuture> getQuota(QuotaFilter filter) { } @Override - public CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled) { + public CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled) { return wrap(rawAdmin.addReplicationPeer(peerId, peerConfig, enabled)); } @@ -499,7 +498,7 @@ public CompletableFuture restoreSnapshot(String snapshotName) { @Override public CompletableFuture restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, - boolean restoreAcl) { + boolean restoreAcl) { return wrap(rawAdmin.restoreSnapshot(snapshotName, takeFailSafeSnapshot, restoreAcl)); } @@ -678,8 +677,8 @@ public CompletableFuture isMasterInMaintenanceMode() { } @Override - public CompletableFuture getCompactionState( - TableName tableName, CompactType compactType) { + public CompletableFuture getCompactionState(TableName tableName, + CompactType compactType) { return wrap(rawAdmin.getCompactionState(tableName, compactType)); } @@ -694,8 +693,8 @@ public CompletableFuture> getLastMajorCompactionTimestamp(TableNa } @Override - public CompletableFuture> getLastMajorCompactionTimestampForRegion( - byte[] regionName) { + public CompletableFuture> + getLastMajorCompactionTimestampForRegion(byte[] regionName) { return wrap(rawAdmin.getLastMajorCompactionTimestampForRegion(regionName)); } @@ -819,8 +818,8 @@ public CompletableFuture> getSpaceQuotaTableSizes() { } @Override - public CompletableFuture> getRegionServerSpaceQuotaSnapshots( - ServerName serverName) { + public CompletableFuture> + getRegionServerSpaceQuotaSnapshots(ServerName serverName) { return wrap(rawAdmin.getRegionServerSpaceQuotaSnapshots(serverName)); } @@ -858,8 +857,7 @@ public CompletableFuture> hasUserPermissions(String userName, } @Override - public CompletableFuture snapshotCleanupSwitch(final boolean on, - final boolean sync) { + public CompletableFuture snapshotCleanupSwitch(final boolean on, final boolean sync) { return wrap(rawAdmin.snapshotCleanupSwitch(on, sync)); } @@ -894,7 +892,8 @@ public CompletableFuture removeRSGroup(String groupName) { } @Override - public CompletableFuture balanceRSGroup(String groupName, BalanceRequest request) { + public CompletableFuture balanceRSGroup(String groupName, + BalanceRequest request) { return wrap(rawAdmin.balanceRSGroup(groupName, request)); } @@ -910,7 +909,7 @@ public CompletableFuture> listTablesInRSGroup(String groupName) @Override public CompletableFuture, List>> - getConfiguredNamespacesAndTablesInRSGroup(String groupName) { + getConfiguredNamespacesAndTablesInRSGroup(String groupName) { return wrap(rawAdmin.getConfiguredNamespacesAndTablesInRSGroup(groupName)); } @@ -940,15 +939,14 @@ public CompletableFuture renameRSGroup(String oldName, String newName) { } @Override - public CompletableFuture - updateRSGroupConfig(String groupName, Map configuration) { + public CompletableFuture updateRSGroupConfig(String groupName, + Map configuration) { return wrap(rawAdmin.updateRSGroupConfig(groupName, configuration)); } @Override public CompletableFuture> getLogEntries(Set serverNames, - String logType, ServerType serverType, int limit, - Map filterParams) { + String logType, ServerType serverType, int limit, Map filterParams) { return wrap(rawAdmin.getLogEntries(serverNames, logType, serverType, limit, filterParams)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java index de2778cf6d78..33f2604a60ce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,14 +47,14 @@ public AsyncMasterRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl Callable callable, int priority, long pauseNs, long pauseForCQTBENs, int maxRetries, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { super(retryTimer, conn, priority, pauseNs, pauseForCQTBENs, maxRetries, operationTimeoutNs, - rpcTimeoutNs, startLogErrorsCnt); + rpcTimeoutNs, startLogErrorsCnt); this.callable = callable; } private void clearMasterStubCacheOnError(MasterService.Interface stub, Throwable error) { // ServerNotRunningYetException may because it is the backup master. - if (ClientExceptionsUtil.isConnectionException(error) || - error instanceof ServerNotRunningYetException) { + if (ClientExceptionsUtil.isConnectionException(error) + || error instanceof ServerNotRunningYetException) { conn.clearMasterStubCache(stub); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java index 5ae9de6c476d..8a60547cdf35 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ class AsyncMetaRegionLocator { private final AtomicReference metaRegionLocations = new AtomicReference<>(); private final AtomicReference> metaRelocateFuture = - new AtomicReference<>(); + new AtomicReference<>(); AsyncMetaRegionLocator(ConnectionRegistry registry) { this.registry = registry; @@ -77,8 +77,8 @@ private void addLocationToCache(HRegionLocation loc) { } } HRegionLocation oldLoc = oldLocs.getRegionLocation(replicaId); - if (oldLoc != null && (oldLoc.getSeqNum() > loc.getSeqNum() || - oldLoc.getServerName().equals(loc.getServerName()))) { + if (oldLoc != null && (oldLoc.getSeqNum() > loc.getSeqNum() + || oldLoc.getServerName().equals(loc.getServerName()))) { return; } RegionLocations newLocs = replaceRegionLocation(oldLocs, loc); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 1c686aca8b76..abb6a3b8aca0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,7 +74,7 @@ class AsyncNonMetaRegionLocator { private static final Logger LOG = LoggerFactory.getLogger(AsyncNonMetaRegionLocator.class); static final String MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = - "hbase.client.meta.max.concurrent.locate.per.table"; + "hbase.client.meta.max.concurrent.locate.per.table"; private static final int DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = 8; @@ -124,12 +124,12 @@ public boolean equals(Object obj) { private static final class TableCache { private final ConcurrentNavigableMap cache = - new ConcurrentSkipListMap<>(BYTES_COMPARATOR); + new ConcurrentSkipListMap<>(BYTES_COMPARATOR); private final Set pendingRequests = new HashSet<>(); private final Map> allRequests = - new LinkedHashMap<>(); + new LinkedHashMap<>(); public boolean hasQuota(int max) { return pendingRequests.size() < max; @@ -149,7 +149,7 @@ public Optional getCandidate() { public void clearCompletedRequests(RegionLocations locations) { for (Iterator>> iter = - allRequests.entrySet().iterator(); iter.hasNext();) { + allRequests.entrySet().iterator(); iter.hasNext();) { Map.Entry> entry = iter.next(); if (tryComplete(entry.getKey(), entry.getValue(), locations)) { iter.remove(); @@ -178,8 +178,8 @@ private boolean tryComplete(LocateRequest req, CompletableFuture 0 || Bytes.equals(EMPTY_END_ROW, endKey)) && - Bytes.compareTo(loc.getRegion().getStartKey(), req.row) < 0); + completed = c == 0 || ((c > 0 || Bytes.equals(EMPTY_END_ROW, endKey)) + && Bytes.compareTo(loc.getRegion().getStartKey(), req.row) < 0); } else { completed = loc.getRegion().containsRow(req.row); } @@ -197,35 +197,35 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; - try { - RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( - conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); - numOfReplicas = metaLocations.size(); - } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); - } - return numOfReplicas; - }); + this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory + .createSelector(replicaSelectorClass, META_TABLE_NAME, conn, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = conn.registry.getMetaRegionLocations() + .get(conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); break; case NONE: // If user does not configure LOCATOR_META_REPLICAS_MODE, let's check the legacy config. - boolean useMetaReplicas = conn.getConfiguration().getBoolean(USE_META_REPLICAS, - DEFAULT_USE_META_REPLICAS); + boolean useMetaReplicas = + conn.getConfiguration().getBoolean(USE_META_REPLICAS, DEFAULT_USE_META_REPLICAS); if (useMetaReplicas) { this.metaReplicaMode = CatalogReplicaMode.HEDGED_READ; } @@ -286,9 +286,10 @@ private RegionLocations addToCache(TableCache tableCache, RegionLocations locs) RegionLocations mergedLocs = oldLocs.mergeLocations(locs); if (isEqual(mergedLocs, oldLocs)) { // the merged one is the same with the old one, give up - LOG.trace("Will not add {} to cache because the old value {} " + - " is newer than us or has the same server name." + - " Maybe it is updated before we replace it", locs, oldLocs); + LOG.trace("Will not add {} to cache because the old value {} " + + " is newer than us or has the same server name." + + " Maybe it is updated before we replace it", + locs, oldLocs); return oldLocs; } if (tableCache.cache.replace(startKey, oldLocs, mergedLocs)) { @@ -298,8 +299,10 @@ private RegionLocations addToCache(TableCache tableCache, RegionLocations locs) // the region is different, here we trust the one we fetched. This maybe wrong but finally // the upper layer can detect this and trigger removal of the wrong locations if (LOG.isDebugEnabled()) { - LOG.debug("The newnly fetch region {} is different from the old one {} for row '{}'," + - " try replaing the old one...", region, oldRegion, Bytes.toStringBinary(startKey)); + LOG.debug( + "The newnly fetch region {} is different from the old one {} for row '{}'," + + " try replaing the old one...", + region, oldRegion, Bytes.toStringBinary(startKey)); } if (tableCache.cache.replace(startKey, oldLocs, locs)) { return locs; @@ -311,8 +314,9 @@ private RegionLocations addToCache(TableCache tableCache, RegionLocations locs) private void complete(TableName tableName, LocateRequest req, RegionLocations locs, Throwable error) { if (error != null) { - LOG.warn("Failed to locate region in '" + tableName + "', row='" + - Bytes.toStringBinary(req.row) + "', locateType=" + req.locateType, error); + LOG.warn("Failed to locate region in '" + tableName + "', row='" + + Bytes.toStringBinary(req.row) + "', locateType=" + req.locateType, + error); } Optional toSend = Optional.empty(); TableCache tableCache = getTableCache(tableName); @@ -422,7 +426,7 @@ private RegionLocations locateRowBeforeInCache(TableCache tableCache, TableName byte[] row, int replicaId) { boolean isEmptyStopRow = isEmptyStopRow(row); Map.Entry entry = - isEmptyStopRow ? tableCache.cache.lastEntry() : tableCache.cache.lowerEntry(row); + isEmptyStopRow ? tableCache.cache.lastEntry() : tableCache.cache.lowerEntry(row); if (entry == null) { recordCacheMiss(); return null; @@ -433,8 +437,8 @@ private RegionLocations locateRowBeforeInCache(TableCache tableCache, TableName recordCacheMiss(); return null; } - if (isEmptyStopRow(loc.getRegion().getEndKey()) || - (!isEmptyStopRow && Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0)) { + if (isEmptyStopRow(loc.getRegion().getEndKey()) + || (!isEmptyStopRow && Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0)) { if (LOG.isTraceEnabled()) { LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName, Bytes.toStringBinary(row), RegionLocateType.BEFORE, replicaId); @@ -449,8 +453,8 @@ private RegionLocations locateRowBeforeInCache(TableCache tableCache, TableName private void locateInMeta(TableName tableName, LocateRequest req) { if (LOG.isTraceEnabled()) { - LOG.trace("Try locate '" + tableName + "', row='" + Bytes.toStringBinary(req.row) + - "', locateType=" + req.locateType + " in meta"); + LOG.trace("Try locate '" + tableName + "', row='" + Bytes.toStringBinary(req.row) + + "', locateType=" + req.locateType + " in meta"); } byte[] metaStartKey; if (req.locateType.equals(RegionLocateType.BEFORE)) { @@ -464,10 +468,10 @@ private void locateInMeta(TableName tableName, LocateRequest req) { metaStartKey = createRegionName(tableName, req.row, NINES, false); } byte[] metaStopKey = - RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); + RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); Scan scan = new Scan().withStartRow(metaStartKey).withStopRow(metaStopKey, true) - .addFamily(HConstants.CATALOG_FAMILY).setReversed(true).setCaching(locatePrefetchLimit) - .setReadType(ReadType.PREAD); + .addFamily(HConstants.CATALOG_FAMILY).setReversed(true).setCaching(locatePrefetchLimit) + .setReadType(ReadType.PREAD); switch (this.metaReplicaMode) { case LOAD_BALANCE: @@ -503,7 +507,7 @@ public void onComplete() { complete(tableName, req, null, new TableNotFoundException(tableName)); } else if (!completeNormally) { complete(tableName, req, null, new IOException( - "Unable to find region for '" + Bytes.toStringBinary(req.row) + "' in " + tableName)); + "Unable to find region for '" + Bytes.toStringBinary(req.row) + "' in " + tableName)); } } @@ -551,8 +555,8 @@ public void onNext(Result[] results, ScanController controller) { private RegionLocations locateInCache(TableCache tableCache, TableName tableName, byte[] row, int replicaId, RegionLocateType locateType) { return locateType.equals(RegionLocateType.BEFORE) - ? locateRowBeforeInCache(tableCache, tableName, row, replicaId) - : locateRowInCache(tableCache, tableName, row, replicaId); + ? locateRowBeforeInCache(tableCache, tableName, row, replicaId) + : locateRowInCache(tableCache, tableName, row, replicaId); } // locateToPrevious is true means we will use the start key of a region to locate the region @@ -680,7 +684,7 @@ void clearCache(TableName tableName) { } } conn.getConnectionMetrics() - .ifPresent(metrics -> metrics.incrMetaCacheNumClearRegion(tableCache.cache.size())); + .ifPresent(metrics -> metrics.incrMetaCacheNumClearRegion(tableCache.cache.size())); } void clearCache() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java index 39c5b040443c..f1f36ed34c06 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java @@ -76,7 +76,7 @@ class AsyncRegionLocator { } private CompletableFuture withTimeout(CompletableFuture future, long timeoutNs, - Supplier timeoutMsg) { + Supplier timeoutMsg) { if (future.isDone() || timeoutNs <= 0) { return future; } @@ -99,11 +99,8 @@ private boolean isMeta(TableName tableName) { return TableName.isMetaTableName(tableName); } - private CompletableFuture tracedLocationFuture( - Supplier> action, - Function> getRegionNames, - Supplier spanSupplier - ) { + private CompletableFuture tracedLocationFuture(Supplier> action, + Function> getRegionNames, Supplier spanSupplier) { final Span span = spanSupplier.get(); try (Scope scope = span.makeCurrent()) { CompletableFuture future = action.get(); @@ -127,50 +124,44 @@ private static List getRegionNames(RegionLocations locs) { if (locs == null || locs.getRegionLocations() == null) { return Collections.emptyList(); } - return Arrays.stream(locs.getRegionLocations()) - .filter(Objects::nonNull) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .collect(Collectors.toList()); + return Arrays.stream(locs.getRegionLocations()).filter(Objects::nonNull) + .map(HRegionLocation::getRegion).map(RegionInfo::getRegionNameAsString) + .collect(Collectors.toList()); } private static List getRegionNames(HRegionLocation location) { - return Optional.ofNullable(location) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .map(Collections::singletonList) - .orElseGet(Collections::emptyList); + return Optional.ofNullable(location).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).map(Collections::singletonList) + .orElseGet(Collections::emptyList); } CompletableFuture getRegionLocations(TableName tableName, byte[] row, - RegionLocateType type, boolean reload, long timeoutNs) { + RegionLocateType type, boolean reload, long timeoutNs) { final Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.getRegionLocations") - .setTableName(tableName); + .setName("AsyncRegionLocator.getRegionLocations").setTableName(tableName); return tracedLocationFuture(() -> { - CompletableFuture future = isMeta(tableName) ? - metaRegionLocator.getRegionLocations(RegionReplicaUtil.DEFAULT_REPLICA_ID, reload) : - nonMetaRegionLocator.getRegionLocations(tableName, row, - RegionReplicaUtil.DEFAULT_REPLICA_ID, type, reload); + CompletableFuture future = isMeta(tableName) + ? metaRegionLocator.getRegionLocations(RegionReplicaUtil.DEFAULT_REPLICA_ID, reload) + : nonMetaRegionLocator.getRegionLocations(tableName, row, + RegionReplicaUtil.DEFAULT_REPLICA_ID, type, reload); return withTimeout(future, timeoutNs, - () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + - "ms) waiting for region locations for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "'"); + () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + + "ms) waiting for region locations for " + tableName + ", row='" + + Bytes.toStringBinary(row) + "'"); }, AsyncRegionLocator::getRegionNames, supplier); } CompletableFuture getRegionLocation(TableName tableName, byte[] row, - int replicaId, RegionLocateType type, boolean reload, long timeoutNs) { + int replicaId, RegionLocateType type, boolean reload, long timeoutNs) { final Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.getRegionLocation") - .setTableName(tableName); + .setName("AsyncRegionLocator.getRegionLocation").setTableName(tableName); return tracedLocationFuture(() -> { // meta region can not be split right now so we always call the same method. // Change it later if the meta table can have more than one regions. CompletableFuture future = new CompletableFuture<>(); CompletableFuture locsFuture = - isMeta(tableName) ? metaRegionLocator.getRegionLocations(replicaId, reload) : - nonMetaRegionLocator.getRegionLocations(tableName, row, replicaId, type, reload); + isMeta(tableName) ? metaRegionLocator.getRegionLocations(replicaId, reload) + : nonMetaRegionLocator.getRegionLocations(tableName, row, replicaId, type, reload); addListener(locsFuture, (locs, error) -> { if (error != null) { future.completeExceptionally(error); @@ -178,38 +169,38 @@ CompletableFuture getRegionLocation(TableName tableName, byte[] } HRegionLocation loc = locs.getRegionLocation(replicaId); if (loc == null) { - future.completeExceptionally( - new RegionOfflineException("No location for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "', locateType=" + type + ", replicaId=" + replicaId)); + future.completeExceptionally(new RegionOfflineException( + "No location for " + tableName + ", row='" + Bytes.toStringBinary(row) + + "', locateType=" + type + ", replicaId=" + replicaId)); } else if (loc.getServerName() == null) { future.completeExceptionally( - new RegionOfflineException("No server address listed for region '" + - loc.getRegion().getRegionNameAsString() + ", row='" + Bytes.toStringBinary(row) + - "', locateType=" + type + ", replicaId=" + replicaId)); + new RegionOfflineException("No server address listed for region '" + + loc.getRegion().getRegionNameAsString() + ", row='" + Bytes.toStringBinary(row) + + "', locateType=" + type + ", replicaId=" + replicaId)); } else { future.complete(loc); } }); return withTimeout(future, timeoutNs, - () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + - "ms) waiting for region location for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "', replicaId=" + replicaId); + () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + + "ms) waiting for region location for " + tableName + ", row='" + + Bytes.toStringBinary(row) + "', replicaId=" + replicaId); }, AsyncRegionLocator::getRegionNames, supplier); } CompletableFuture getRegionLocation(TableName tableName, byte[] row, - int replicaId, RegionLocateType type, long timeoutNs) { + int replicaId, RegionLocateType type, long timeoutNs) { return getRegionLocation(tableName, row, replicaId, type, false, timeoutNs); } CompletableFuture getRegionLocation(TableName tableName, byte[] row, - RegionLocateType type, boolean reload, long timeoutNs) { + RegionLocateType type, boolean reload, long timeoutNs) { return getRegionLocation(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID, type, reload, timeoutNs); } CompletableFuture getRegionLocation(TableName tableName, byte[] row, - RegionLocateType type, long timeoutNs) { + RegionLocateType type, long timeoutNs) { return getRegionLocation(tableName, row, type, false, timeoutNs); } @@ -222,9 +213,8 @@ void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) { } void clearCache(TableName tableName) { - Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache") - .setTableName(tableName); + Supplier supplier = + new TableSpanBuilder(conn).setName("AsyncRegionLocator.clearCache").setTableName(tableName); TraceUtil.trace(() -> { LOG.debug("Clear meta cache for {}", tableName); if (tableName.equals(META_TABLE_NAME)) { @@ -236,9 +226,9 @@ void clearCache(TableName tableName) { } void clearCache(ServerName serverName) { - Supplier supplier = new ConnectionSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache") - .addAttribute(SERVER_NAME_KEY, serverName.getServerName()); + Supplier supplier = + new ConnectionSpanBuilder(conn).setName("AsyncRegionLocator.clearCache") + .addAttribute(SERVER_NAME_KEY, serverName.getServerName()); TraceUtil.trace(() -> { LOG.debug("Clear meta cache for {}", serverName); metaRegionLocator.clearCache(serverName); @@ -248,8 +238,8 @@ void clearCache(ServerName serverName) { } void clearCache() { - Supplier supplier = new ConnectionSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache"); + Supplier supplier = + new ConnectionSpanBuilder(conn).setName("AsyncRegionLocator.clearCache"); TraceUtil.trace(() -> { metaRegionLocator.clearCache(); nonMetaRegionLocator.clearCache(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java index 4c6cd5a01172..3f79d040939b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.findException; import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.isMetaClearingException; + import java.util.Arrays; import java.util.function.Consumer; import java.util.function.Function; @@ -50,8 +51,8 @@ static boolean canUpdateOnError(HRegionLocation loc, HRegionLocation oldLoc) { if (oldLoc == null || oldLoc.getServerName() == null) { return false; } - return oldLoc.getSeqNum() <= loc.getSeqNum() && - oldLoc.getServerName().equals(loc.getServerName()); + return oldLoc.getSeqNum() <= loc.getSeqNum() + && oldLoc.getServerName().equals(loc.getServerName()); } static void updateCachedLocationOnError(HRegionLocation loc, Throwable exception, @@ -79,7 +80,7 @@ static void updateCachedLocationOnError(HRegionLocation loc, Throwable exception if (cause instanceof RegionMovedException) { RegionMovedException rme = (RegionMovedException) cause; HRegionLocation newLoc = - new HRegionLocation(loc.getRegion(), rme.getServerName(), rme.getLocationSeqNum()); + new HRegionLocation(loc.getRegion(), rme.getServerName(), rme.getLocationSeqNum()); LOG.debug("Try updating {} with the new location {} constructed by {}", loc, newLoc, rme.toString()); addToCache.accept(newLoc); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java index 8648572a04a3..f5cfd2614d72 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS; @@ -178,14 +176,15 @@ protected final void onError(Throwable t, Supplier errMsg, return; } if (tries > startLogErrorsCnt) { - LOG.warn(errMsg.get() + ", tries = " + tries + ", maxAttempts = " + maxAttempts + - ", timeout = " + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + - " ms, time elapsed = " + elapsedMs() + " ms", error); + LOG.warn(errMsg.get() + ", tries = " + tries + ", maxAttempts = " + maxAttempts + + ", timeout = " + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + + " ms, time elapsed = " + elapsedMs() + " ms", + error); } updateCachedLocation.accept(error); RetriesExhaustedException.ThrowableWithExtraContext qt = - new RetriesExhaustedException.ThrowableWithExtraContext(error, - EnvironmentEdgeManager.currentTime(), ""); + new RetriesExhaustedException.ThrowableWithExtraContext(error, + EnvironmentEdgeManager.currentTime(), ""); exceptions.add(qt); if (tries >= maxAttempts) { completeExceptionally(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java index 48bde4434be7..2743fabe5890 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -93,8 +93,8 @@ public SingleRequestCallerBuilder row(byte[] row) { return this; } - public SingleRequestCallerBuilder action( - AsyncSingleRequestRpcRetryingCaller.Callable callable) { + public SingleRequestCallerBuilder + action(AsyncSingleRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -156,8 +156,8 @@ private void preCheck() { public AsyncSingleRequestRpcRetryingCaller build() { preCheck(); return new AsyncSingleRequestRpcRetryingCaller<>(retryTimer, conn, tableName, row, replicaId, - locateType, callable, priority, pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, - rpcTimeoutNs, startLogErrorsCnt); + locateType, callable, priority, pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, + rpcTimeoutNs, startLogErrorsCnt); } /** @@ -291,9 +291,9 @@ private void preCheck() { public AsyncScanSingleRegionRpcRetryingCaller build() { preCheck(); return new AsyncScanSingleRegionRpcRetryingCaller(retryTimer, conn, scan, scanMetrics, - scannerId, resultCache, consumer, stub, loc, isRegionServerRemote, priority, - scannerLeaseTimeoutPeriodNs, pauseNs, pauseForCQTBENs, maxAttempts, scanTimeoutNs, - rpcTimeoutNs, startLogErrorsCnt); + scannerId, resultCache, consumer, stub, loc, isRegionServerRemote, priority, + scannerLeaseTimeoutPeriodNs, pauseNs, pauseForCQTBENs, maxAttempts, scanTimeoutNs, + rpcTimeoutNs, startLogErrorsCnt); } /** @@ -364,7 +364,7 @@ public BatchCallerBuilder startLogErrorsCnt(int startLogErrorsCnt) { public AsyncBatchRpcRetryingCaller build() { return new AsyncBatchRpcRetryingCaller<>(retryTimer, conn, tableName, actions, pauseNs, - pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); } public List> call() { @@ -385,8 +385,8 @@ public class MasterRequestCallerBuilder extends BuilderBase { private int priority = PRIORITY_UNSET; - public MasterRequestCallerBuilder action( - AsyncMasterRequestRpcRetryingCaller.Callable callable) { + public MasterRequestCallerBuilder + action(AsyncMasterRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -438,7 +438,8 @@ private void preCheck() { public AsyncMasterRequestRpcRetryingCaller build() { preCheck(); return new AsyncMasterRequestRpcRetryingCaller(retryTimer, conn, callable, priority, - pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, + startLogErrorsCnt); } /** @@ -466,8 +467,8 @@ public class AdminRequestCallerBuilder extends BuilderBase { private int priority; - public AdminRequestCallerBuilder action( - AsyncAdminRequestRetryingCaller.Callable callable) { + public AdminRequestCallerBuilder + action(AsyncAdminRequestRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -514,8 +515,8 @@ public AdminRequestCallerBuilder priority(int priority) { public AsyncAdminRequestRetryingCaller build() { return new AsyncAdminRequestRetryingCaller(retryTimer, conn, priority, pauseNs, - pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, - checkNotNull(serverName, "serverName is null"), checkNotNull(callable, "action is null")); + pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, + checkNotNull(serverName, "serverName is null"), checkNotNull(callable, "action is null")); } public CompletableFuture call() { @@ -537,8 +538,8 @@ public class ServerRequestCallerBuilder extends BuilderBase { private ServerName serverName; - public ServerRequestCallerBuilder action( - AsyncServerRequestRpcRetryingCaller.Callable callable) { + public ServerRequestCallerBuilder + action(AsyncServerRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -580,8 +581,8 @@ public ServerRequestCallerBuilder serverName(ServerName serverName) { public AsyncServerRequestRpcRetryingCaller build() { return new AsyncServerRequestRpcRetryingCaller(retryTimer, conn, pauseNs, pauseForCQTBENs, - maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, - checkNotNull(serverName, "serverName is null"), checkNotNull(callable, "action is null")); + maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, + checkNotNull(serverName, "serverName is null"), checkNotNull(callable, "action is null")); } public CompletableFuture call() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java index 48e038ecd2e7..e9aa962edb38 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +27,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException; import static org.apache.hadoop.hbase.client.ConnectionUtils.updateResultsMetrics; import static org.apache.hadoop.hbase.client.ConnectionUtils.updateServerSideMetrics; + import io.opentelemetry.context.Context; import io.opentelemetry.context.Scope; import java.io.IOException; @@ -51,9 +52,11 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.io.netty.util.Timeout; import org.apache.hbase.thirdparty.io.netty.util.Timer; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; @@ -169,8 +172,8 @@ public ScanControllerImpl(Optional cursor) { private void preCheck() { Preconditions.checkState(Thread.currentThread() == callerThread, - "The current thread is %s, expected thread is %s, " + - "you should not call this method outside onNext or onHeartbeat", + "The current thread is %s, expected thread is %s, " + + "you should not call this method outside onNext or onHeartbeat", Thread.currentThread(), callerThread); Preconditions.checkState(state.equals(ScanControllerState.INITIALIZED), "Invalid Stopper state %s", state); @@ -200,7 +203,7 @@ ScanControllerState destroy() { @Override public Optional cursor() { - return cursor; + return cursor; } } @@ -351,9 +354,9 @@ private void closeScanner() { ScanRequest req = RequestConverter.buildScanRequest(this.scannerId, 0, true, false); stub.scan(controller, req, resp -> { if (controller.failed()) { - LOG.warn("Call to " + loc.getServerName() + " for closing scanner id = " + scannerId + - " for " + loc.getRegion().getEncodedName() + " of " + - loc.getRegion().getTable() + " failed, ignore, probably already closed", + LOG.warn("Call to " + loc.getServerName() + " for closing scanner id = " + scannerId + + " for " + loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() + + " failed, ignore, probably already closed", controller.getFailed()); } }); @@ -391,19 +394,19 @@ private void completeWhenError(boolean closeScanner) { private void onError(Throwable error) { error = translateException(error); if (tries > startLogErrorsCnt) { - LOG.warn("Call to " + loc.getServerName() + " for scanner id = " + scannerId + " for " + - loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() + - " failed, , tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = " + - TimeUnit.NANOSECONDS.toMillis(scanTimeoutNs) + " ms, time elapsed = " + elapsedMs() + - " ms", + LOG.warn("Call to " + loc.getServerName() + " for scanner id = " + scannerId + " for " + + loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() + + " failed, , tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = " + + TimeUnit.NANOSECONDS.toMillis(scanTimeoutNs) + " ms, time elapsed = " + elapsedMs() + + " ms", error); } - boolean scannerClosed = - error instanceof UnknownScannerException || error instanceof NotServingRegionException || - error instanceof RegionServerStoppedException || error instanceof ScannerResetException; + boolean scannerClosed = error instanceof UnknownScannerException + || error instanceof NotServingRegionException + || error instanceof RegionServerStoppedException || error instanceof ScannerResetException; RetriesExhaustedException.ThrowableWithExtraContext qt = - new RetriesExhaustedException.ThrowableWithExtraContext(error, - EnvironmentEdgeManager.currentTime(), ""); + new RetriesExhaustedException.ThrowableWithExtraContext(error, + EnvironmentEdgeManager.currentTime(), ""); exceptions.add(qt); if (tries >= maxAttempts) { completeExceptionally(!scannerClosed); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java index 52a2abe39440..97c915df9bf5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; /** - * Retry caller for a request call to region server. - * Now only used for coprocessor call to region server. + * Retry caller for a request call to region server. Now only used for coprocessor call to region + * server. */ @InterfaceAudience.Private public class AsyncServerRequestRpcRetryingCaller extends AsyncRpcRetryingCaller { @@ -49,7 +49,7 @@ public AsyncServerRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl long pauseNs, long pauseForCQTBENs, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { super(retryTimer, conn, HConstants.NORMAL_QOS, pauseNs, pauseForCQTBENs, maxAttempts, - operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); this.serverName = serverName; this.callable = callable; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java index 2a552c71b3dd..e9cbc3a02204 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ public AsyncSingleRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl Callable callable, int priority, long pauseNs, long pauseForCQTBENs, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { super(retryTimer, conn, priority, pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, - rpcTimeoutNs, startLogErrorsCnt); + rpcTimeoutNs, startLogErrorsCnt); this.tableName = tableName; this.row = row; this.replicaId = replicaId; @@ -73,8 +73,8 @@ private void call(HRegionLocation loc) { stub = conn.getRegionServerStub(loc.getServerName()); } catch (IOException e) { onError(e, - () -> "Get async stub to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + - "' in " + loc.getRegion().getEncodedName() + " of " + tableName + " failed", + () -> "Get async stub to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + + "' in " + loc.getRegion().getEncodedName() + " of " + tableName + " failed", err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } @@ -82,8 +82,8 @@ private void call(HRegionLocation loc) { addListener(callable.call(controller, loc, stub), (result, error) -> { if (error != null) { onError(error, - () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in " + - loc.getRegion().getEncodedName() + " of " + tableName + " failed", + () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in " + + loc.getRegion().getEncodedName() + " of " + tableName + " failed", err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java index df25351e1017..d350dcc2bbe5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -185,7 +185,7 @@ default CompletableFuture exists(Get get) { * {@link CompletableFuture}. */ default CompletableFuture incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount) { + long amount) { return incrementColumnValue(row, family, qualifier, amount, Durability.SYNC_WAL); } @@ -205,12 +205,12 @@ default CompletableFuture incrementColumnValue(byte[] row, byte[] family, * {@link CompletableFuture}. */ default CompletableFuture incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount, Durability durability) { + long amount, Durability durability) { Preconditions.checkNotNull(row, "row is null"); Preconditions.checkNotNull(family, "family is null"); return increment( new Increment(row).addColumn(family, qualifier, amount).setDurability(durability)) - .thenApply(r -> Bytes.toLong(r.getValue(family, qualifier))); + .thenApply(r -> Bytes.toLong(r.getValue(family, qualifier))); } /** @@ -375,7 +375,7 @@ interface CheckAndMutateWithFilterBuilder { * @return A list of {@link CompletableFuture}s that represent the result for each CheckAndMutate. */ List> - checkAndMutate(List checkAndMutates); + checkAndMutate(List checkAndMutates); /** * A simple version of batch checkAndMutate. It will fail if there are any failures. @@ -383,7 +383,7 @@ interface CheckAndMutateWithFilterBuilder { * @return A {@link CompletableFuture} that wrapper the result list. */ default CompletableFuture> - checkAndMutateAll(List checkAndMutates) { + checkAndMutateAll(List checkAndMutates) { return allOf(checkAndMutate(checkAndMutates)); } @@ -479,7 +479,7 @@ default ResultScanner getScanner(byte[] family, byte[] qualifier) { */ default List> exists(List gets) { return get(toCheckExistenceOnly(gets)).stream() - .> map(f -> f.thenApply(r -> r.getExists())).collect(toList()); + .> map(f -> f.thenApply(r -> r.getExists())).collect(toList()); } /** @@ -587,7 +587,7 @@ default CompletableFuture> batchAll(List actions) { * @see ServiceCaller */ CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable, byte[] row); + ServiceCaller callable, byte[] row); /** * The callback when we want to execute a coprocessor call on a range of regions. @@ -726,5 +726,5 @@ default CoprocessorServiceBuilder toRow(byte[] endKey) { * for more details. */ CoprocessorServiceBuilder coprocessorService(Function stubMaker, - ServiceCaller callable, CoprocessorCallback callback); + ServiceCaller callable, CoprocessorCallback callback); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java index 4c883a8332d7..ebaa33a3837e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java index 399d9ddfaffe..554782b1175f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -54,7 +53,7 @@ abstract class AsyncTableBuilderBase AsyncTableBuilderBase(TableName tableName, AsyncConnectionConfiguration connConf) { this.tableName = tableName; this.operationTimeoutNs = tableName.isSystemTable() ? connConf.getMetaOperationTimeoutNs() - : connConf.getOperationTimeoutNs(); + : connConf.getOperationTimeoutNs(); this.scanTimeoutNs = connConf.getScanTimeoutNs(); this.rpcTimeoutNs = connConf.getRpcTimeoutNs(); this.readRpcTimeoutNs = connConf.getReadRpcTimeoutNs(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java index 0bf3179673db..2b1e171a3f52 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static java.util.stream.Collectors.toList; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.context.Context; import io.opentelemetry.context.Scope; @@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; /** @@ -179,8 +181,7 @@ public CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value) public CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) { return new CheckAndMutateWithFilterBuilder() { - private final CheckAndMutateWithFilterBuilder builder = - rawTable.checkAndMutate(row, filter); + private final CheckAndMutateWithFilterBuilder builder = rawTable.checkAndMutate(row, filter); @Override public CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange) { @@ -211,10 +212,9 @@ public CompletableFuture checkAndMutate(CheckAndMutate che } @Override - public List> checkAndMutate( - List checkAndMutates) { - return rawTable.checkAndMutate(checkAndMutates).stream() - .map(this::wrap).collect(toList()); + public List> + checkAndMutate(List checkAndMutates) { + return rawTable.checkAndMutate(checkAndMutates).stream().map(this::wrap).collect(toList()); } @Override @@ -238,7 +238,7 @@ private void scan0(Scan scan, ScanResultConsumer consumer) { span = scanner.getSpan(); try (Scope ignored = span.makeCurrent()) { consumer.onScanMetricsCreated(scanner.getScanMetrics()); - for (Result result; (result = scanner.next()) != null; ) { + for (Result result; (result = scanner.next()) != null;) { if (!consumer.onNext(result)) { break; } @@ -312,7 +312,7 @@ public void onError(Throwable error) { } }; CoprocessorServiceBuilder builder = - rawTable.coprocessorService(stubMaker, callable, wrappedCallback); + rawTable.coprocessorService(stubMaker, callable, wrappedCallback); return new CoprocessorServiceBuilder() { @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java index 96e3ec4173a9..22114652f0be 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -143,8 +143,8 @@ default CompletableFuture> getEndKeys() { default CompletableFuture>> getStartEndKeys() { return getAllRegionLocations().thenApply( locs -> locs.stream().filter(loc -> RegionReplicaUtil.isDefaultReplica(loc.getRegion())) - .map(HRegionLocation::getRegion).map(r -> Pair.newPair(r.getStartKey(), r.getEndKey())) - .collect(Collectors.toList())); + .map(HRegionLocation::getRegion).map(r -> Pair.newPair(r.getStartKey(), r.getEndKey())) + .collect(Collectors.toList())); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java index 35bf0e0ea330..1da08ad912b5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ public TableName getName() { @Override public CompletableFuture getRegionLocation(byte[] row, int replicaId, - boolean reload) { + boolean reload) { return conn.getLocator().getRegionLocation(tableName, row, replicaId, RegionLocateType.CURRENT, reload, -1L); } @@ -59,18 +59,18 @@ public CompletableFuture> getAllRegionLocations() { return tracedFuture(() -> { if (TableName.isMetaTableName(tableName)) { return conn.registry.getMetaRegionLocations() - .thenApply(locs -> Arrays.asList(locs.getRegionLocations())); + .thenApply(locs -> Arrays.asList(locs.getRegionLocations())); } return ClientMetaTableAccessor - .getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName); + .getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName); }, getClass().getSimpleName() + ".getAllRegionLocations"); } @Override public CompletableFuture> getRegionLocations(byte[] row, boolean reload) { return conn.getLocator() - .getRegionLocations(tableName, row, RegionLocateType.CURRENT, reload, -1L) - .thenApply(locs -> Arrays.asList(locs.getRegionLocations())); + .getRegionLocations(tableName, row, RegionLocateType.CURRENT, reload, -1L) + .thenApply(locs -> Arrays.asList(locs.getRegionLocations())); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java index 6462cd093f85..34ac43fdf38b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.calcEstimatedSize; + import io.opentelemetry.api.trace.Span; import java.io.IOException; import java.io.InterruptedIOException; @@ -74,10 +75,10 @@ private void addToCache(Result result) { private void stopPrefetch(ScanController controller) { if (LOG.isDebugEnabled()) { - LOG.debug("{} stop prefetching when scanning {} as the cache size {}" + - " is greater than the maxCacheSize {}", - String.format("0x%x", System.identityHashCode(this)), tableName, cacheSize, - maxCacheSize); + LOG.debug( + "{} stop prefetching when scanning {} as the cache size {}" + + " is greater than the maxCacheSize {}", + String.format("0x%x", System.identityHashCode(this)), tableName, cacheSize, maxCacheSize); } resumer = controller.suspend(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java index d693cb329b30..c7ba64bd8c6d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,19 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public interface Attributes { /** - * Sets an attribute. - * In case value = null attribute is removed from the attributes map. - * Attribute names starting with _ indicate system attributes. + * Sets an attribute. In case value = null attribute is removed from the attributes map. Attribute + * names starting with _ indicate system attributes. * @param name attribute name * @param value attribute value */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java index 4e67bcedbd84..70a809da05bb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,19 +34,15 @@ public final static class Builder { private boolean dryRun = false; private boolean ignoreRegionsInTransition = false; - private Builder() {} + private Builder() { + } /** - * Updates BalancerRequest to run the balancer in dryRun mode. - * In this mode, the balancer will try to find a plan but WILL NOT - * execute any region moves or call any coprocessors. - * - * You can run in dryRun mode regardless of whether the balancer switch - * is enabled or disabled, but dryRun mode will not run over an existing - * request or chore. - * - * Dry run is useful for testing out new balance configs. See the logs - * on the active HMaster for the results of the dry run. + * Updates BalancerRequest to run the balancer in dryRun mode. In this mode, the balancer will + * try to find a plan but WILL NOT execute any region moves or call any coprocessors. You can + * run in dryRun mode regardless of whether the balancer switch is enabled or disabled, but + * dryRun mode will not run over an existing request or chore. Dry run is useful for testing out + * new balance configs. See the logs on the active HMaster for the results of the dry run. */ public Builder setDryRun(boolean dryRun) { this.dryRun = dryRun; @@ -55,10 +50,8 @@ public Builder setDryRun(boolean dryRun) { } /** - * Updates BalancerRequest to run the balancer even if there are regions - * in transition. - * - * WARNING: Advanced usage only, this could cause more issues than it fixes. + * Updates BalancerRequest to run the balancer even if there are regions in transition. WARNING: + * Advanced usage only, this could cause more issues than it fixes. */ public Builder setIgnoreRegionsInTransition(boolean ignoreRegionsInTransition) { this.ignoreRegionsInTransition = ignoreRegionsInTransition; @@ -81,8 +74,8 @@ public static Builder newBuilder() { } /** - * Get a BalanceRequest for a default run of the balancer. The default mode executes - * any moves calculated and will not run if regions are already in transition. + * Get a BalanceRequest for a default run of the balancer. The default mode executes any moves + * calculated and will not run if regions are already in transition. */ public static BalanceRequest defaultInstance() { return DEFAULT; @@ -97,16 +90,16 @@ private BalanceRequest(boolean dryRun, boolean ignoreRegionsInTransition) { } /** - * Returns true if the balancer should run in dry run mode, otherwise false. In - * dry run mode, moves will be calculated but not executed. + * Returns true if the balancer should run in dry run mode, otherwise false. In dry run mode, + * moves will be calculated but not executed. */ public boolean isDryRun() { return dryRun; } /** - * Returns true if the balancer should execute even if regions are in transition, otherwise - * false. This is an advanced usage feature, as it can cause more issues than it fixes. + * Returns true if the balancer should execute even if regions are in transition, otherwise false. + * This is an advanced usage feature, as it can cause more issues than it fixes. */ public boolean isIgnoreRegionsInTransition() { return ignoreRegionsInTransition; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java index 143878209d11..c7914f150de8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +26,8 @@ public final class BalanceResponse { /** - * Used in HMaster to build a {@link BalanceResponse} for returning results of a balance invocation to callers + * Used in HMaster to build a {@link BalanceResponse} for returning results of a balance + * invocation to callers */ @InterfaceAudience.Private public final static class Builder { @@ -35,13 +35,13 @@ public final static class Builder { private int movesCalculated; private int movesExecuted; - private Builder() {} + private Builder() { + } /** * Set true if the balancer ran, otherwise false. The balancer may not run in some - * circumstances, such as if a balance is already running or there are regions already - * in transition. - * + * circumstances, such as if a balance is already running or there are regions already in + * transition. * @param balancerRan true if balancer ran, false otherwise */ public Builder setBalancerRan(boolean balancerRan) { @@ -52,7 +52,6 @@ public Builder setBalancerRan(boolean balancerRan) { /** * Set how many moves were calculated by the balancer. This will be zero if the cluster is * already balanced. - * * @param movesCalculated moves calculated by the balance run */ public Builder setMovesCalculated(int movesCalculated) { @@ -64,7 +63,6 @@ public Builder setMovesCalculated(int movesCalculated) { * Set how many of the calculated moves were actually executed by the balancer. This should be * zero if the balancer is run with {@link BalanceRequest#isDryRun()}. It may also not equal * movesCalculated if the balancer ran out of time while executing the moves. - * * @param movesExecuted moves executed by the balance run */ public Builder setMovesExecuted(int movesExecuted) { @@ -98,9 +96,9 @@ private BalanceResponse(boolean balancerRan, int movesCalculated, int movesExecu } /** - * Returns true if the balancer ran, otherwise false. The balancer may not run for a - * variety of reasons, such as: another balance is running, there are regions in - * transition, the cluster is in maintenance mode, etc. + * Returns true if the balancer ran, otherwise false. The balancer may not run for a variety of + * reasons, such as: another balance is running, there are regions in transition, the cluster is + * in maintenance mode, etc. */ public boolean isBalancerRan() { return balancerRan; @@ -115,10 +113,10 @@ public int getMovesCalculated() { } /** - * The number of moves actually executed by the balancer if it ran. This will be - * zero if {@link #getMovesCalculated()} is zero or if {@link BalanceRequest#isDryRun()} - * was true. It may also not be equal to {@link #getMovesCalculated()} if the balancer - * was interrupted midway through executing the moves due to max run time. + * The number of moves actually executed by the balancer if it ran. This will be zero if + * {@link #getMovesCalculated()} is zero or if {@link BalanceRequest#isDryRun()} was true. It may + * also not be equal to {@link #getMovesCalculated()} if the balancer was interrupted midway + * through executing the moves due to max run time. */ public int getMovesExecuted() { return movesExecuted; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java index e2bf2e28e0e7..1c22203b6dc1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.List; - import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -45,13 +42,12 @@ final public class BalancerDecision extends LogEntry { // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .registerTypeAdapter(BalancerDecision.class, (JsonSerializer) - (balancerDecision, type, jsonSerializationContext) -> { - Gson gson = new Gson(); - return gson.toJsonTree(balancerDecision); - }).create(); + private static final Gson GSON = + GsonUtil.createGson().setPrettyPrinting().registerTypeAdapter(BalancerDecision.class, + (JsonSerializer) (balancerDecision, type, jsonSerializationContext) -> { + Gson gson = new Gson(); + return gson.toJsonTree(balancerDecision); + }).create(); private BalancerDecision(String initialFunctionCosts, String finalFunctionCosts, double initTotalCost, double computedTotalCost, List regionPlans, @@ -90,14 +86,10 @@ public long getComputedSteps() { @Override public String toString() { - return new ToStringBuilder(this) - .append("initialFunctionCosts", initialFunctionCosts) - .append("finalFunctionCosts", finalFunctionCosts) - .append("initTotalCost", initTotalCost) - .append("computedTotalCost", computedTotalCost) - .append("computedSteps", computedSteps) - .append("regionPlans", regionPlans) - .toString(); + return new ToStringBuilder(this).append("initialFunctionCosts", initialFunctionCosts) + .append("finalFunctionCosts", finalFunctionCosts).append("initTotalCost", initTotalCost) + .append("computedTotalCost", computedTotalCost).append("computedSteps", computedSteps) + .append("regionPlans", regionPlans).toString(); } @Override @@ -144,8 +136,8 @@ public Builder setComputedSteps(long computedSteps) { } public BalancerDecision build() { - return new BalancerDecision(initialFunctionCosts, finalFunctionCosts, - initTotalCost, computedTotalCost, regionPlans, computedSteps); + return new BalancerDecision(initialFunctionCosts, finalFunctionCosts, initTotalCost, + computedTotalCost, regionPlans, computedSteps); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java index d6e6cee20fc8..3bc114d7beec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -37,27 +34,25 @@ @InterfaceAudience.Public @InterfaceStability.Evolving final public class BalancerRejection extends LogEntry { - //The reason why balancer was rejected + // The reason why balancer was rejected private final String reason; private final List costFuncInfoList; // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .disableHtmlEscaping() - .registerTypeAdapter(BalancerRejection.class, (JsonSerializer) - (balancerRejection, type, jsonSerializationContext) -> { - Gson gson = new Gson(); - return gson.toJsonTree(balancerRejection); - }).create(); + private static final Gson GSON = GsonUtil.createGson().setPrettyPrinting().disableHtmlEscaping() + .registerTypeAdapter(BalancerRejection.class, + (JsonSerializer) (balancerRejection, type, jsonSerializationContext) -> { + Gson gson = new Gson(); + return gson.toJsonTree(balancerRejection); + }) + .create(); private BalancerRejection(String reason, List costFuncInfoList) { this.reason = reason; - if(costFuncInfoList == null){ + if (costFuncInfoList == null) { this.costFuncInfoList = Collections.emptyList(); - } - else { + } else { this.costFuncInfoList = costFuncInfoList; } } @@ -72,10 +67,8 @@ public List getCostFuncInfoList() { @Override public String toString() { - return new ToStringBuilder(this) - .append("reason", reason) - .append("costFuncInfoList", costFuncInfoList.toString()) - .toString(); + return new ToStringBuilder(this).append("reason", reason) + .append("costFuncInfoList", costFuncInfoList.toString()).toString(); } @Override @@ -92,19 +85,15 @@ public Builder setReason(String reason) { return this; } - public void addCostFuncInfo(String funcName, double cost, float multiplier){ - if(costFuncInfoList == null){ + public void addCostFuncInfo(String funcName, double cost, float multiplier) { + if (costFuncInfoList == null) { costFuncInfoList = new ArrayList<>(); } - costFuncInfoList.add( - new StringBuilder() - .append(funcName) - .append(" cost:").append(cost) - .append(" multiplier:").append(multiplier) - .toString()); + costFuncInfoList.add(new StringBuilder().append(funcName).append(" cost:").append(cost) + .append(" multiplier:").append(multiplier).toString()); } - public Builder setCostFuncInfoList(List costFuncInfoList){ + public Builder setCostFuncInfoList(List costFuncInfoList) { this.costFuncInfoList = costFuncInfoList; return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java index 3b27298585e9..8cbb8dcec4bb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,11 +24,10 @@ import java.util.ArrayList; import java.util.Deque; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A scan result cache for batched scan, i.e, @@ -142,8 +141,8 @@ public Result[] addAndGet(Result[] results, boolean isHeartbeatMessage) throws I numberOfCompleteRows++; } // check if we have a row change - if (!partialResults.isEmpty() && - !Bytes.equals(partialResults.peek().getRow(), result.getRow())) { + if (!partialResults.isEmpty() + && !Bytes.equals(partialResults.peek().getRow(), result.getRow())) { regroupedResults.add(createCompletedResult()); } Result regroupedResult = regroupResults(result); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java index 8ad6a7922303..a007e3ecc52f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,34 +25,38 @@ import org.apache.yetus.audience.InterfaceAudience; /** - *

    Used to communicate with a single HBase table similar to {@link Table} but meant for - * batched, asynchronous puts. Obtain an instance from a {@link Connection} and call - * {@link #close()} afterwards. Customizations can be applied to the {@code BufferedMutator} via - * the {@link BufferedMutatorParams}. + *

    + * Used to communicate with a single HBase table similar to {@link Table} but meant for batched, + * asynchronous puts. Obtain an instance from a {@link Connection} and call {@link #close()} + * afterwards. Customizations can be applied to the {@code BufferedMutator} via the + * {@link BufferedMutatorParams}. *

    - * - *

    Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. - * The default implementation is to throw the exception upon receipt. This behavior can be - * overridden with a custom implementation, provided as a parameter with - * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}.

    - * - *

    Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs - * benefit from batching, but have no natural flush point. {@code BufferedMutator} receives the - * puts from the M/R job and will batch puts based on some heuristic, such as the accumulated size - * of the puts, and submit batches of puts asynchronously so that the M/R logic can continue - * without interruption. + *

    + * Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. The + * default implementation is to throw the exception upon receipt. This behavior can be overridden + * with a custom implementation, provided as a parameter with + * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}. *

    - * - *

    {@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs - * will have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can - * also be effectively used in high volume online systems to batch puts, with the caveat that - * extreme circumstances, such as JVM or machine failure, may cause some data loss.

    - * - *

    NOTE: This class replaces the functionality that used to be available via + *

    + * Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs benefit + * from batching, but have no natural flush point. {@code BufferedMutator} receives the puts from + * the M/R job and will batch puts based on some heuristic, such as the accumulated size of the + * puts, and submit batches of puts asynchronously so that the M/R logic can continue without + * interruption. + *

    + *

    + * {@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs will + * have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can also be + * effectively used in high volume online systems to batch puts, with the caveat that extreme + * circumstances, such as JVM or machine failure, may cause some data loss. + *

    + *

    + * NOTE: This class replaces the functionality that used to be available via * HTable#setAutoFlush(boolean) set to {@code false}. *

    - * - *

    See also the {@code BufferedMutatorExample} in the hbase-examples module.

    + *

    + * See also the {@code BufferedMutatorExample} in the hbase-examples module. + *

    * @see ConnectionFactory * @see Connection * @since 1.0.0 @@ -70,8 +73,8 @@ public interface BufferedMutator extends Closeable { String CLASSNAME_KEY = "hbase.client.bufferedmutator.classname"; /** - * Having the timer tick run more often that once every 100ms is needless and will - * probably cause too many timer events firing having a negative impact on performance. + * Having the timer tick run more often that once every 100ms is needless and will probably cause + * too many timer events firing having a negative impact on performance. */ long MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS = 100; @@ -83,25 +86,22 @@ public interface BufferedMutator extends Closeable { /** * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance. *

    - * The reference returned is not a copy, so any change made to it will - * affect this instance. + * The reference returned is not a copy, so any change made to it will affect this instance. */ Configuration getConfiguration(); /** - * Sends a {@link Mutation} to the table. The mutations will be buffered and sent over the - * wire as part of a batch. Currently only supports {@link Put} and {@link Delete} mutations. - * + * Sends a {@link Mutation} to the table. The mutations will be buffered and sent over the wire as + * part of a batch. Currently only supports {@link Put} and {@link Delete} mutations. * @param mutation The data to send. * @throws IOException if a remote or network exception occurs. */ void mutate(Mutation mutation) throws IOException; /** - * Send some {@link Mutation}s to the table. The mutations will be buffered and sent over the - * wire as part of a batch. There is no guarantee of sending entire content of {@code mutations} - * in a single batch; it will be broken up according to the write buffer capacity. - * + * Send some {@link Mutation}s to the table. The mutations will be buffered and sent over the wire + * as part of a batch. There is no guarantee of sending entire content of {@code mutations} in a + * single batch; it will be broken up according to the write buffer capacity. * @param mutations The data to send. * @throws IOException if a remote or network exception occurs. */ @@ -109,24 +109,22 @@ public interface BufferedMutator extends Closeable { /** * Performs a {@link #flush()} and releases any resources held. - * * @throws IOException if a remote or network exception occurs. */ @Override void close() throws IOException; /** - * Executes all the buffered, asynchronous {@link Mutation} operations and waits until they - * are done. - * + * Executes all the buffered, asynchronous {@link Mutation} operations and waits until they are + * done. * @throws IOException if a remote or network exception occurs. */ void flush() throws IOException; /** * Sets the maximum time before the buffer is automatically flushed checking once per second. - * @param timeoutMs The maximum number of milliseconds how long records may be buffered - * before they are flushed. Set to 0 to disable. + * @param timeoutMs The maximum number of milliseconds how long records may be buffered before + * they are flushed. Set to 0 to disable. */ default void setWriteBufferPeriodicFlush(long timeoutMs) { setWriteBufferPeriodicFlush(timeoutMs, 1000L); @@ -134,16 +132,16 @@ default void setWriteBufferPeriodicFlush(long timeoutMs) { /** * Sets the maximum time before the buffer is automatically flushed. - * @param timeoutMs The maximum number of milliseconds how long records may be buffered - * before they are flushed. Set to 0 to disable. - * @param timerTickMs The number of milliseconds between each check if the - * timeout has been exceeded. Must be 100ms (as defined in - * {@link #MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS}) - * or larger to avoid performance problems. + * @param timeoutMs The maximum number of milliseconds how long records may be buffered before + * they are flushed. Set to 0 to disable. + * @param timerTickMs The number of milliseconds between each check if the timeout has been + * exceeded. Must be 100ms (as defined in + * {@link #MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS}) or larger to avoid performance + * problems. */ default void setWriteBufferPeriodicFlush(long timeoutMs, long timerTickMs) { throw new UnsupportedOperationException( - "The BufferedMutator::setWriteBufferPeriodicFlush has not been implemented"); + "The BufferedMutator::setWriteBufferPeriodicFlush has not been implemented"); } /** @@ -155,22 +153,22 @@ default void disableWriteBufferPeriodicFlush() { /** * Returns the current periodic flush timeout value in milliseconds. - * @return The maximum number of milliseconds how long records may be buffered before they - * are flushed. The value 0 means this is disabled. + * @return The maximum number of milliseconds how long records may be buffered before they are + * flushed. The value 0 means this is disabled. */ default long getWriteBufferPeriodicFlushTimeoutMs() { throw new UnsupportedOperationException( - "The BufferedMutator::getWriteBufferPeriodicFlushTimeoutMs has not been implemented"); + "The BufferedMutator::getWriteBufferPeriodicFlushTimeoutMs has not been implemented"); } /** * Returns the current periodic flush timertick interval in milliseconds. - * @return The number of milliseconds between each check if the timeout has been exceeded. - * This value only has a real meaning if the timeout has been set to > 0 + * @return The number of milliseconds between each check if the timeout has been exceeded. This + * value only has a real meaning if the timeout has been set to > 0 */ default long getWriteBufferPeriodicFlushTimerTickMs() { throw new UnsupportedOperationException( - "The BufferedMutator::getWriteBufferPeriodicFlushTimerTickMs has not been implemented"); + "The BufferedMutator::getWriteBufferPeriodicFlushTimerTickMs has not been implemented"); } /** @@ -202,7 +200,7 @@ default long getWriteBufferPeriodicFlushTimerTickMs() { */ @InterfaceAudience.Public interface ExceptionListener { - public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator mutator) throws RetriesExhaustedWithDetailsException; + public void onException(RetriesExhaustedWithDetailsException exception, BufferedMutator mutator) + throws RetriesExhaustedWithDetailsException; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java index b8bc55c47c37..bad00e8b7cdb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorOverAsyncBufferedMutator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ class BufferedMutatorOverAsyncBufferedMutator implements BufferedMutator { private static final Logger LOG = - LoggerFactory.getLogger(BufferedMutatorOverAsyncBufferedMutator.class); + LoggerFactory.getLogger(BufferedMutatorOverAsyncBufferedMutator.class); private final AsyncBufferedMutator mutator; @@ -56,7 +56,7 @@ class BufferedMutatorOverAsyncBufferedMutator implements BufferedMutator { private final AtomicLong bufferedSize = new AtomicLong(0); private final ConcurrentLinkedQueue> errors = - new ConcurrentLinkedQueue<>(); + new ConcurrentLinkedQueue<>(); BufferedMutatorOverAsyncBufferedMutator(AsyncBufferedMutator mutator, ExceptionListener listener) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java index 54c133b81bf8..07c33e47b3c4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.concurrent.ExecutorService; @@ -43,8 +41,7 @@ public class BufferedMutatorParams implements Cloneable { private BufferedMutator.ExceptionListener listener = new BufferedMutator.ExceptionListener() { @Override public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator bufferedMutator) - throws RetriesExhaustedWithDetailsException { + BufferedMutator bufferedMutator) throws RetriesExhaustedWithDetailsException { throw exception; } }; @@ -145,8 +142,8 @@ public BufferedMutatorParams maxKeyValueSize(int maxKeyValueSize) { } /** - * @deprecated Since 3.0.0-alpha-2, will be removed in 4.0.0. You can not set it anymore. - * BufferedMutator will use Connection's ExecutorService. + * @deprecated Since 3.0.0-alpha-2, will be removed in 4.0.0. You can not set it anymore. + * BufferedMutator will use Connection's ExecutorService. */ @Deprecated public ExecutorService getPool() { @@ -154,8 +151,8 @@ public ExecutorService getPool() { } /** - * Override the default executor pool defined by the {@code hbase.htable.threads.*} - * configuration values. + * Override the default executor pool defined by the {@code hbase.htable.threads.*} configuration + * values. * @deprecated Since 3.0.0-alpha-2, will be removed in 4.0.0. You can not set it anymore. * BufferedMutator will use Connection's ExecutorService. */ @@ -200,18 +197,18 @@ public BufferedMutatorParams listener(BufferedMutator.ExceptionListener listener return this; } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="CN_IDIOM_NO_SUPER_CALL", - justification="The clone below is complete") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "CN_IDIOM_NO_SUPER_CALL", + justification = "The clone below is complete") @Override public BufferedMutatorParams clone() { BufferedMutatorParams clone = new BufferedMutatorParams(this.tableName); - clone.writeBufferSize = this.writeBufferSize; - clone.writeBufferPeriodicFlushTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs; + clone.writeBufferSize = this.writeBufferSize; + clone.writeBufferPeriodicFlushTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs; clone.writeBufferPeriodicFlushTimerTickMs = this.writeBufferPeriodicFlushTimerTickMs; - clone.maxKeyValueSize = this.maxKeyValueSize; - clone.pool = this.pool; - clone.listener = this.listener; - clone.implementationClassName = this.implementationClassName; + clone.maxKeyValueSize = this.maxKeyValueSize; + clone.pool = this.pool; + clone.listener = this.listener; + clone.implementationClassName = this.implementationClassName; return clone; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index 27be88a9def2..7c4896af2e74 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -39,10 +39,9 @@ interface CatalogReplicaLoadBalanceSelector { /** * Select a catalog replica region where client go to loop up the input row key. - * * @param tablename table name - * @param row key to look up - * @param locateType locate type + * @param row key to look up + * @param locateType locate type * @return replica id */ int select(TableName tablename, byte[] row, RegionLocateType locateType); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java index fe686f79ab8a..121f23b040f7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java @@ -35,13 +35,13 @@ private CatalogReplicaLoadBalanceSelectorFactory() { /** * Create a CatalogReplicaLoadBalanceReplicaSelector based on input config. - * @param replicaSelectorClass Selector classname. - * @param tableName System table name. + * @param replicaSelectorClass Selector classname. + * @param tableName System table name. * @param conn {@link AsyncConnectionImpl} - * @return {@link CatalogReplicaLoadBalanceSelector} + * @return {@link CatalogReplicaLoadBalanceSelector} */ public static CatalogReplicaLoadBalanceSelector createSelector(String replicaSelectorClass, - TableName tableName, AsyncConnectionImpl conn, IntSupplier getReplicaCount) { + TableName tableName, AsyncConnectionImpl conn, IntSupplier getReplicaCount) { return ReflectionUtils.instantiateWithCustomCtor(replicaSelectorClass, new Class[] { TableName.class, AsyncConnectionImpl.class, IntSupplier.class }, new Object[] { tableName, conn, getReplicaCount }); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index 01996b34e2ef..2abc01f6532f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow; import static org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR; import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; + import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -38,34 +39,36 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + /** - *

    CatalogReplicaLoadBalanceReplicaSimpleSelector implements a simple catalog replica load - * balancing algorithm. It maintains a stale location cache for each table. Whenever client looks - * up location, it first check if the row is the stale location cache. If yes, the location from - * catalog replica is stale, it will go to the primary region to look up update-to-date location; - * otherwise, it will randomly pick up a replica region for lookup. When clients receive - * RegionNotServedException from region servers, it will add these region locations to the stale - * location cache. The stale cache will be cleaned up periodically by a chore.

    - * + *

    + * CatalogReplicaLoadBalanceReplicaSimpleSelector implements a simple catalog replica load balancing + * algorithm. It maintains a stale location cache for each table. Whenever client looks up location, + * it first check if the row is the stale location cache. If yes, the location from catalog replica + * is stale, it will go to the primary region to look up update-to-date location; otherwise, it will + * randomly pick up a replica region for lookup. When clients receive RegionNotServedException from + * region servers, it will add these region locations to the stale location cache. The stale cache + * will be cleaned up periodically by a chore. + *

    * It follows a simple algorithm to choose a replica to go: - * *
      - *
    1. If there is no stale location entry for rows it looks up, it will randomly - * pick a replica region to do lookup.
    2. - *
    3. If the location from the replica region is stale, client gets RegionNotServedException - * from region server, in this case, it will create StaleLocationCacheEntry in - * CatalogReplicaLoadBalanceReplicaSimpleSelector.
    4. - *
    5. When client tries to do location lookup, it checks StaleLocationCache first for rows it - * tries to lookup, if entry exists, it will go with primary meta region to do lookup; - * otherwise, it will follow step 1.
    6. - *
    7. A chore will periodically run to clean up cache entries in the StaleLocationCache.
    8. + *
    9. If there is no stale location entry for rows it looks up, it will randomly pick a replica + * region to do lookup.
    10. + *
    11. If the location from the replica region is stale, client gets RegionNotServedException from + * region server, in this case, it will create StaleLocationCacheEntry in + * CatalogReplicaLoadBalanceReplicaSimpleSelector.
    12. + *
    13. When client tries to do location lookup, it checks StaleLocationCache first for rows it tries + * to lookup, if entry exists, it will go with primary meta region to do lookup; otherwise, it will + * follow step 1.
    14. + *
    15. A chore will periodically run to clean up cache entries in the StaleLocationCache.
    16. *
    */ -class CatalogReplicaLoadBalanceSimpleSelector implements - CatalogReplicaLoadBalanceSelector, Stoppable { +class CatalogReplicaLoadBalanceSimpleSelector + implements CatalogReplicaLoadBalanceSelector, Stoppable { private static final Logger LOG = - LoggerFactory.getLogger(CatalogReplicaLoadBalanceSimpleSelector.class); + LoggerFactory.getLogger(CatalogReplicaLoadBalanceSimpleSelector.class); private final long STALE_CACHE_TIMEOUT_IN_MILLISECONDS = 3000; // 3 seconds private final int STALE_CACHE_CLEAN_CHORE_INTERVAL_IN_MILLISECONDS = 1500; // 1.5 seconds private final int REFRESH_REPLICA_COUNT_CHORE_INTERVAL_IN_MILLISECONDS = 60000; // 1 minute @@ -94,15 +97,13 @@ public long getTimestamp() { @Override public String toString() { - return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("endKey", endKey) - .append("timestamp", timestamp) - .toString(); + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("endKey", endKey) + .append("timestamp", timestamp).toString(); } } - private final ConcurrentMap> - staleCache = new ConcurrentHashMap<>(); + private final ConcurrentMap> staleCache = + new ConcurrentHashMap<>(); private volatile int numOfReplicas; private final AsyncConnectionImpl conn; private final TableName tableName; @@ -110,7 +111,7 @@ public String toString() { private volatile boolean isStopped = false; CatalogReplicaLoadBalanceSimpleSelector(TableName tableName, AsyncConnectionImpl conn, - IntSupplier getNumOfReplicas) { + IntSupplier getNumOfReplicas) { this.conn = conn; this.tableName = tableName; this.getNumOfReplicas = getNumOfReplicas; @@ -123,24 +124,22 @@ public String toString() { } /** - * When a client runs into RegionNotServingException, it will call this method to - * update Selector's internal state. + * When a client runs into RegionNotServingException, it will call this method to update + * Selector's internal state. * @param loc the location which causes exception. */ public void onError(HRegionLocation loc) { - ConcurrentNavigableMap tableCache = - computeIfAbsent(staleCache, loc.getRegion().getTable(), - () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR)); + ConcurrentNavigableMap tableCache = computeIfAbsent(staleCache, + loc.getRegion().getTable(), () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR)); byte[] startKey = loc.getRegion().getStartKey(); - tableCache.putIfAbsent(startKey, - new StaleLocationCacheEntry(loc.getRegion().getEndKey())); + tableCache.putIfAbsent(startKey, new StaleLocationCacheEntry(loc.getRegion().getEndKey())); LOG.debug("Add entry to stale cache for table {} with startKey {}, {}", loc.getRegion().getTable(), startKey, loc.getRegion().getEndKey()); } /** - * Select an random replica id. In case there is no replica region configured, return - * the primary replica id. + * Select an random replica id. In case there is no replica region configured, return the primary + * replica id. * @return Replica id */ private int getRandomReplicaId() { @@ -157,20 +156,18 @@ private int getRandomReplicaId() { } /** - * When it looks up a location, it will call this method to find a replica region to go. - * For a normal case, > 99% of region locations from catalog/meta replica will be up to date. - * In extreme cases such as region server crashes, it will depends on how fast replication - * catches up. - * + * When it looks up a location, it will call this method to find a replica region to go. For a + * normal case, > 99% of region locations from catalog/meta replica will be up to date. In extreme + * cases such as region server crashes, it will depends on how fast replication catches up. * @param tablename table name it looks up * @param row key it looks up. * @param locateType locateType, Only BEFORE and CURRENT will be passed in. * @return catalog replica id */ public int select(final TableName tablename, final byte[] row, - final RegionLocateType locateType) { - Preconditions.checkArgument(locateType == RegionLocateType.BEFORE || - locateType == RegionLocateType.CURRENT, + final RegionLocateType locateType) { + Preconditions.checkArgument( + locateType == RegionLocateType.BEFORE || locateType == RegionLocateType.CURRENT, "Expected type BEFORE or CURRENT but got: %s", locateType); ConcurrentNavigableMap tableCache = staleCache.get(tablename); @@ -198,15 +195,15 @@ public int select(final TableName tablename, final byte[] row, // long comparing is faster than comparing byte arrays(in most cases). It could remove // stale entries faster. If the possible match entry does not time out, it will check if // the entry is a match for the row passed in and select the replica id accordingly. - if ((EnvironmentEdgeManager.currentTime() - entry.getValue().getTimestamp()) >= - STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { + if ((EnvironmentEdgeManager.currentTime() + - entry.getValue().getTimestamp()) >= STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { LOG.debug("Entry for table {} with startKey {}, {} times out", tablename, entry.getKey(), entry); tableCache.remove(entry.getKey()); return getRandomReplicaId(); } - byte[] endKey = entry.getValue().getEndKey(); + byte[] endKey = entry.getValue().getEndKey(); // The following logic is borrowed from AsyncNonMetaRegionLocator. if (isEmptyStopRow(endKey)) { @@ -245,12 +242,11 @@ public boolean isStopped() { private void cleanupReplicaReplicaStaleCache() { long curTimeInMills = EnvironmentEdgeManager.currentTime(); for (ConcurrentNavigableMap tableCache : staleCache.values()) { - Iterator> it = - tableCache.entrySet().iterator(); + Iterator> it = tableCache.entrySet().iterator(); while (it.hasNext()) { Map.Entry entry = it.next(); - if (curTimeInMills - entry.getValue().getTimestamp() >= - STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { + if (curTimeInMills + - entry.getValue().getTimestamp() >= STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { LOG.debug("clean entry {}, {} from stale cache", entry.getKey(), entry.getValue()); it.remove(); } @@ -269,17 +265,17 @@ private int refreshCatalogReplicaCount() { } int cachedNumOfReplicas = this.numOfReplicas; - if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - (cachedNumOfReplicas != newNumOfReplicas)) { + if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) + || (cachedNumOfReplicas != newNumOfReplicas)) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; } - private ScheduledChore getCacheCleanupChore( - final CatalogReplicaLoadBalanceSimpleSelector selector) { + private ScheduledChore + getCacheCleanupChore(final CatalogReplicaLoadBalanceSimpleSelector selector) { return new ScheduledChore("CleanupCatalogReplicaStaleCache", this, - STALE_CACHE_CLEAN_CHORE_INTERVAL_IN_MILLISECONDS) { + STALE_CACHE_CLEAN_CHORE_INTERVAL_IN_MILLISECONDS) { @Override protected void chore() { selector.cleanupReplicaReplicaStaleCache(); @@ -287,10 +283,10 @@ protected void chore() { }; } - private ScheduledChore getRefreshReplicaCountChore( - final CatalogReplicaLoadBalanceSimpleSelector selector) { + private ScheduledChore + getRefreshReplicaCountChore(final CatalogReplicaLoadBalanceSimpleSelector selector) { return new ScheduledChore("RefreshReplicaCountChore", this, - REFRESH_REPLICA_COUNT_CHORE_INTERVAL_IN_MILLISECONDS) { + REFRESH_REPLICA_COUNT_CHORE_INTERVAL_IN_MILLISECONDS) { @Override protected void chore() { selector.refreshCatalogReplicaCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java index 40062e32e83c..647d5dcf38f5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,18 +20,16 @@ import org.apache.yetus.audience.InterfaceAudience; /** - *

    There are two modes with catalog replica support.

    - * + *

    + * There are two modes with catalog replica support. + *

    *
      - *
    1. HEDGED_READ - Client sends requests to the primary region first, within a - * configured amount of time, if there is no response coming back, - * client sends requests to all replica regions and takes the first - * response.
    2. - * - *
    3. LOAD_BALANCE - Client sends requests to replica regions in a round-robin mode, - * if results from replica regions are stale, next time, client sends requests for - * these stale locations to the primary region. In this mode, scan - * requests are load balanced across all replica regions.
    4. + *
    5. HEDGED_READ - Client sends requests to the primary region first, within a configured amount + * of time, if there is no response coming back, client sends requests to all replica regions and + * takes the first response.
    6. + *
    7. LOAD_BALANCE - Client sends requests to replica regions in a round-robin mode, if results + * from replica regions are stale, next time, client sends requests for these stale locations to the + * primary region. In this mode, scan requests are load balanced across all replica regions.
    8. *
    */ @InterfaceAudience.Private @@ -54,7 +54,7 @@ public String toString() { }; public static CatalogReplicaMode fromString(final String value) { - for(CatalogReplicaMode mode : values()) { + for (CatalogReplicaMode mode : values()) { if (mode.toString().equalsIgnoreCase(value)) { return mode; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java index b7f17f310fd8..ce13acf7f9f7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java @@ -23,13 +23,15 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** * Used to perform CheckAndMutate operations. *

    - * Use the builder class to instantiate a CheckAndMutate object. - * This builder class is fluent style APIs, the code are like: + * Use the builder class to instantiate a CheckAndMutate object. This builder class is fluent style + * APIs, the code are like: + * *

      * 
      * // A CheckAndMutate operation where do the specified action if the column (specified by the
    @@ -75,7 +77,6 @@ private Builder(byte[] row) {
     
         /**
          * Check for lack of column
    -     *
          * @param family family to check
          * @param qualifier qualifier to check
          * @return the CheckAndMutate object
    @@ -86,7 +87,6 @@ public Builder ifNotExists(byte[] family, byte[] qualifier) {
     
         /**
          * Check for equality
    -     *
          * @param family family to check
          * @param qualifier qualifier to check
          * @param value the expected value
    @@ -104,7 +104,7 @@ public Builder ifEquals(byte[] family, byte[] qualifier, byte[] value) {
          * @return the CheckAndMutate object
          */
         public Builder ifMatches(byte[] family, byte[] qualifier, CompareOperator compareOp,
    -      byte[] value) {
    +        byte[] value) {
           this.family = Preconditions.checkNotNull(family, "family is null");
           this.qualifier = qualifier;
           this.op = Preconditions.checkNotNull(compareOp, "compareOp is null");
    @@ -133,13 +133,14 @@ public Builder timeRange(TimeRange timeRange) {
         private void preCheck(Row action) {
           Preconditions.checkNotNull(action, "action is null");
           if (!Bytes.equals(row, action.getRow())) {
    -        throw new IllegalArgumentException("The row of the action <" +
    -          Bytes.toStringBinary(action.getRow()) + "> doesn't match the original one <" +
    -          Bytes.toStringBinary(this.row) + ">");
    +        throw new IllegalArgumentException(
    +            "The row of the action <" + Bytes.toStringBinary(action.getRow())
    +                + "> doesn't match the original one <" + Bytes.toStringBinary(this.row) + ">");
           }
    -      Preconditions.checkState(op != null || filter != null, "condition is null. You need to"
    -        + " specify the condition by calling ifNotExists/ifEquals/ifMatches before building a"
    -        + " CheckAndMutate object");
    +      Preconditions.checkState(op != null || filter != null,
    +        "condition is null. You need to"
    +            + " specify the condition by calling ifNotExists/ifEquals/ifMatches before building a"
    +            + " CheckAndMutate object");
         }
     
         /**
    @@ -210,7 +211,6 @@ public CheckAndMutate build(RowMutations mutations) {
     
       /**
        * returns a builder object to build a CheckAndMutate object
    -   *
        * @param row row
        * @return a builder object
        */
    @@ -227,8 +227,8 @@ public static Builder newBuilder(byte[] row) {
       private final TimeRange timeRange;
       private final Row action;
     
    -  private CheckAndMutate(byte[] row, byte[] family, byte[] qualifier,final CompareOperator op,
    -    byte[] value, TimeRange timeRange, Row action) {
    +  private CheckAndMutate(byte[] row, byte[] family, byte[] qualifier, final CompareOperator op,
    +      byte[] value, TimeRange timeRange, Row action) {
         this.row = row;
         this.family = family;
         this.qualifier = qualifier;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    index 69aa120b6b99..ed198f3b7fe1 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -17,7 +17,6 @@
      */
     package org.apache.hadoop.hbase.client;
     
    -
     import org.apache.yetus.audience.InterfaceAudience;
     
     import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    index 9125132e66c5..758cf508578a 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -20,27 +19,27 @@
     
     import java.io.IOException;
     import java.lang.management.ManagementFactory;
    -
    -import org.apache.yetus.audience.InterfaceAudience;
    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
     import org.apache.hadoop.hbase.util.Addressing;
     import org.apache.hadoop.hbase.util.Bytes;
     import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    +import org.apache.yetus.audience.InterfaceAudience;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
     
     /**
    - * The class that is able to determine some unique strings for the client,
    - * such as an IP address, PID, and composite deterministic ID.
    + * The class that is able to determine some unique strings for the client, such as an IP address,
    + * PID, and composite deterministic ID.
      */
     @InterfaceAudience.Private
     final class ClientIdGenerator {
       private static final Logger LOG = LoggerFactory.getLogger(ClientIdGenerator.class);
     
    -  private ClientIdGenerator() {}
    +  private ClientIdGenerator() {
    +  }
     
       /**
    -   * @return a unique ID incorporating IP address, PID, TID and timer. Might be an overkill...
    -   * Note though that new UUID in java by default is just a random number.
    +   * @return a unique ID incorporating IP address, PID, TID and timer. Might be an overkill... Note
    +   *         though that new UUID in java by default is just a random number.
        */
       public static byte[] generateClientId() {
         byte[] selfBytes = getIpAddressBytes();
    @@ -78,8 +77,8 @@ public static Long getPid() {
       }
     
       /**
    -   * @return Some IPv4/IPv6 address available on the current machine that is up, not virtual
    -   *         and not a loopback address. Empty array if none can be found or error occurred.
    +   * @return Some IPv4/IPv6 address available on the current machine that is up, not virtual and not
    +   *         a loopback address. Empty array if none can be found or error occurred.
        */
       public static byte[] getIpAddressBytes() {
         try {
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    index ba447d5a81ba..44eef0668f03 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -25,7 +25,6 @@
     @InterfaceAudience.Private
     public class ClientUtil {
     
    -
       public static boolean areScanStartRowAndStopRowEqual(byte[] startRow, byte[] stopRow) {
         return startRow != null && startRow.length > 0 && Bytes.equals(startRow, stopRow);
       }
    @@ -35,19 +34,23 @@ public static Cursor createCursor(byte[] row) {
       }
     
       /**
    -   * 

    When scanning for a prefix the scan should stop immediately after the the last row that - * has the specified prefix. This method calculates the closest next rowKey immediately following - * the given rowKeyPrefix.

    - *

    IMPORTANT: This converts a rowKeyPrefix into a rowKey.

    - *

    If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can - * simply increment the last byte of the array. - * But if your application uses real binary rowids you may run into the scenario that your - * prefix is something like:

    + *

    + * When scanning for a prefix the scan should stop immediately after the the last row that has the + * specified prefix. This method calculates the closest next rowKey immediately following the + * given rowKeyPrefix. + *

    + *

    + * IMPORTANT: This converts a rowKeyPrefix into a rowKey. + *

    + *

    + * If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can simply + * increment the last byte of the array. But if your application uses real binary rowids you may + * run into the scenario that your prefix is something like: + *

    *    { 0x12, 0x23, 0xFF, 0xFF }
    * Then this stopRow needs to be fed into the actual scan
    *    { 0x12, 0x24 } (Notice that it is shorter now)
    * This method calculates the correct stop row value for this usecase. - * * @param rowKeyPrefix the rowKeyPrefix. * @return the closest next rowKey immediately following the given rowKeyPrefix. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index 1370d07c5fb3..3eab89fee810 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.Closeable; @@ -37,6 +35,10 @@ import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufInputStream; @@ -48,15 +50,13 @@ import org.apache.hbase.thirdparty.io.netty.channel.socket.DatagramChannel; import org.apache.hbase.thirdparty.io.netty.channel.socket.DatagramPacket; import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioDatagramChannel; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * A class that receives the cluster status, and provide it as a set of service to the client. - * Today, manages only the dead server list. - * The class is abstract to allow multiple implementations, from ZooKeeper to multicast based. + * Today, manages only the dead server list. The class is abstract to allow multiple + * implementations, from ZooKeeper to multicast based. */ @InterfaceAudience.Private class ClusterStatusListener implements Closeable { @@ -80,13 +80,11 @@ public interface DeadServerHandler { /** * Called when a server is identified as dead. Called only once even if we receive the * information multiple times. - * * @param sn - the server name */ void newDead(ServerName sn); } - /** * The interface to be implemented by a listener of a cluster status event. */ @@ -99,7 +97,6 @@ interface Listener extends Closeable { /** * Called to connect. - * * @param conf Configuration to use. * @throws IOException if failing to connect */ @@ -107,7 +104,7 @@ interface Listener extends Closeable { } public ClusterStatusListener(DeadServerHandler dsh, Configuration conf, - Class listenerClass) throws IOException { + Class listenerClass) throws IOException { this.deadServerHandler = dsh; try { Constructor ctor = @@ -128,7 +125,6 @@ public ClusterStatusListener(DeadServerHandler dsh, Configuration conf, /** * Acts upon the reception of a new cluster status. - * * @param ncs the cluster status */ public void receive(ClusterMetrics ncs) { @@ -152,7 +148,6 @@ public void close() { /** * Check if we know if a server is dead. - * * @param sn the server name to check. * @return true if we know for sure that the server is dead, false otherwise. */ @@ -162,9 +157,8 @@ public boolean isDeadServer(ServerName sn) { } for (ServerName dead : deadServers) { - if (dead.getStartcode() >= sn.getStartcode() && - dead.getPort() == sn.getPort() && - dead.getHostname().equals(sn.getHostname())) { + if (dead.getStartcode() >= sn.getStartcode() && dead.getPort() == sn.getPort() + && dead.getHostname().equals(sn.getHostname())) { return true; } } @@ -172,7 +166,6 @@ public boolean isDeadServer(ServerName sn) { return false; } - /** * An implementation using a multicast message between the master & the client. */ @@ -180,8 +173,9 @@ public boolean isDeadServer(ServerName sn) { class MulticastListener implements Listener { private DatagramChannel channel; private final EventLoopGroup group = new NioEventLoopGroup(1, - new ThreadFactoryBuilder().setNameFormat("hbase-client-clusterStatusListener-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadFactoryBuilder().setNameFormat("hbase-client-clusterStatusListener-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER) + .build()); public MulticastListener() { } @@ -190,11 +184,11 @@ public MulticastListener() { public void connect(Configuration conf) throws IOException { String mcAddress = conf.get(HConstants.STATUS_MULTICAST_ADDRESS, - HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); + HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); String bindAddress = conf.get(HConstants.STATUS_MULTICAST_BIND_ADDRESS, HConstants.DEFAULT_STATUS_MULTICAST_BIND_ADDRESS); - int port = conf.getInt(HConstants.STATUS_MULTICAST_PORT, - HConstants.DEFAULT_STATUS_MULTICAST_PORT); + int port = + conf.getInt(HConstants.STATUS_MULTICAST_PORT, HConstants.DEFAULT_STATUS_MULTICAST_PORT); String niName = conf.get(HConstants.STATUS_MULTICAST_NI_NAME); InetAddress ina; @@ -207,11 +201,9 @@ public void connect(Configuration conf) throws IOException { try { Bootstrap b = new Bootstrap(); - b.group(group) - .channel(NioDatagramChannel.class) - .option(ChannelOption.SO_REUSEADDR, true) - .handler(new ClusterStatusHandler()); - channel = (DatagramChannel)b.bind(bindAddress, port).sync().channel(); + b.group(group).channel(NioDatagramChannel.class).option(ChannelOption.SO_REUSEADDR, true) + .handler(new ClusterStatusHandler()); + channel = (DatagramChannel) b.bind(bindAddress, port).sync().channel(); } catch (InterruptedException e) { close(); throw ExceptionUtil.asInterrupt(e); @@ -228,7 +220,6 @@ public void connect(Configuration conf) throws IOException { channel.joinGroup(ina, ni, null, channel.newPromise()); } - @Override public void close() { if (channel != null) { @@ -238,17 +229,13 @@ public void close() { group.shutdownGracefully(); } - - /** * Class, conforming to the Netty framework, that manages the message received. */ private class ClusterStatusHandler extends SimpleChannelInboundHandler { @Override - public void exceptionCaught( - ChannelHandlerContext ctx, Throwable cause) - throws Exception { + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { LOG.error("Unexpected exception, continuing.", cause); } @@ -257,7 +244,6 @@ public boolean acceptInboundMessage(Object msg) throws Exception { return super.acceptInboundMessage(msg); } - @Override protected void channelRead0(ChannelHandlerContext ctx, DatagramPacket dp) throws Exception { ByteBufInputStream bis = new ByteBufInputStream(dp.content()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java index 001d672620ea..806b7d4ec371 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,71 +20,68 @@ import java.util.Comparator; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.MemoryCompactionPolicy; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * An ColumnFamilyDescriptor contains information about a column family such as the - * number of versions, compression settings, etc. - * - * It is used as input when creating a table or adding a column. - * - * To construct a new instance, use the {@link ColumnFamilyDescriptorBuilder} methods + * An ColumnFamilyDescriptor contains information about a column family such as the number of + * versions, compression settings, etc. It is used as input when creating a table or adding a + * column. To construct a new instance, use the {@link ColumnFamilyDescriptorBuilder} methods * @since 2.0.0 */ @InterfaceAudience.Public public interface ColumnFamilyDescriptor { @InterfaceAudience.Private - static final Comparator COMPARATOR - = (ColumnFamilyDescriptor lhs, ColumnFamilyDescriptor rhs) -> { - int result = Bytes.compareTo(lhs.getName(), rhs.getName()); - if (result != 0) { - return result; - } - // punt on comparison for ordering, just calculate difference. - result = lhs.getValues().hashCode() - rhs.getValues().hashCode(); - if (result != 0) { - return result; - } - return lhs.getConfiguration().hashCode() - rhs.getConfiguration().hashCode(); - }; - - static final Bytes REPLICATION_SCOPE_BYTES = new Bytes( - Bytes.toBytes(ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE)); + static final Comparator COMPARATOR = + (ColumnFamilyDescriptor lhs, ColumnFamilyDescriptor rhs) -> { + int result = Bytes.compareTo(lhs.getName(), rhs.getName()); + if (result != 0) { + return result; + } + // punt on comparison for ordering, just calculate difference. + result = lhs.getValues().hashCode() - rhs.getValues().hashCode(); + if (result != 0) { + return result; + } + return lhs.getConfiguration().hashCode() - rhs.getConfiguration().hashCode(); + }; + + static final Bytes REPLICATION_SCOPE_BYTES = + new Bytes(Bytes.toBytes(ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE)); @InterfaceAudience.Private - static final Comparator COMPARATOR_IGNORE_REPLICATION = ( - ColumnFamilyDescriptor lcf, ColumnFamilyDescriptor rcf) -> { - int result = Bytes.compareTo(lcf.getName(), rcf.getName()); - if (result != 0) { - return result; - } - // ColumnFamilyDescriptor.getValues is a immutable map, so copy it and remove - // REPLICATION_SCOPE_BYTES - Map lValues = new HashMap<>(); - lValues.putAll(lcf.getValues()); - lValues.remove(REPLICATION_SCOPE_BYTES); - Map rValues = new HashMap<>(); - rValues.putAll(rcf.getValues()); - rValues.remove(REPLICATION_SCOPE_BYTES); - result = lValues.hashCode() - rValues.hashCode(); - if (result != 0) { - return result; - } - return lcf.getConfiguration().hashCode() - rcf.getConfiguration().hashCode(); - }; + static final Comparator COMPARATOR_IGNORE_REPLICATION = + (ColumnFamilyDescriptor lcf, ColumnFamilyDescriptor rcf) -> { + int result = Bytes.compareTo(lcf.getName(), rcf.getName()); + if (result != 0) { + return result; + } + // ColumnFamilyDescriptor.getValues is a immutable map, so copy it and remove + // REPLICATION_SCOPE_BYTES + Map lValues = new HashMap<>(); + lValues.putAll(lcf.getValues()); + lValues.remove(REPLICATION_SCOPE_BYTES); + Map rValues = new HashMap<>(); + rValues.putAll(rcf.getValues()); + rValues.remove(REPLICATION_SCOPE_BYTES); + result = lValues.hashCode() - rValues.hashCode(); + if (result != 0) { + return result; + } + return lcf.getConfiguration().hashCode() - rcf.getConfiguration().hashCode(); + }; /** * @return The storefile/hfile blocksize for this column family. */ int getBlocksize(); + /** * @return bloom filter type used for new StoreFiles in ColumnFamily */ @@ -114,20 +111,23 @@ public interface ColumnFamilyDescriptor { * @return an unmodifiable map. */ Map getConfiguration(); + /** * @param key the key whose associated value is to be returned * @return accessing the configuration value by key. */ String getConfigurationValue(String key); + /** * @return replication factor set for this CF */ short getDFSReplication(); + /** - * @return the data block encoding algorithm used in block cache and - * optionally on disk + * @return the data block encoding algorithm used in block cache and optionally on disk */ DataBlockEncoding getDataBlockEncoding(); + /** * @return Return the raw crypto key attribute for the family, or null if not set */ @@ -137,35 +137,41 @@ public interface ColumnFamilyDescriptor { * @return Return the encryption algorithm in use by this family */ String getEncryptionType(); + /** - * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for - * for this column family + * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for for + * this column family */ MemoryCompactionPolicy getInMemoryCompaction(); + /** * @return return the KeepDeletedCells */ KeepDeletedCells getKeepDeletedCells(); + /** * @return maximum number of versions */ int getMaxVersions(); + /** * @return The minimum number of versions to keep. */ int getMinVersions(); + /** * Get the mob compact partition policy for this family * @return MobCompactPartitionPolicy */ MobCompactPartitionPolicy getMobCompactPartitionPolicy(); + /** - * Gets the mob threshold of the family. - * If the size of a cell value is larger than this threshold, it's regarded as a mob. - * The default threshold is 1024*100(100K)B. + * Gets the mob threshold of the family. If the size of a cell value is larger than this + * threshold, it's regarded as a mob. The default threshold is 1024*100(100K)B. * @return The mob threshold. */ long getMobThreshold(); + /** * @return a copy of Name of this column family */ @@ -176,45 +182,53 @@ public interface ColumnFamilyDescriptor { */ String getNameAsString(); - /** - * @return the scope tag - */ + /** + * @return the scope tag + */ int getScope(); + /** * Not using {@code enum} here because HDFS is not using {@code enum} for storage policy, see * org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite for more details. * @return Return the storage policy in use by this family */ String getStoragePolicy(); - /** + + /** * @return Time-to-live of cell contents, in seconds. */ int getTimeToLive(); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ Bytes getValue(Bytes key); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ String getValue(String key); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ byte[] getValue(byte[] key); + /** * It clone all bytes of all elements. * @return All values */ Map getValues(); + /** * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX - * and BLOOM type blocks). + * and BLOOM type blocks). */ boolean isBlockCacheEnabled(); + /** * @return true if we should cache bloomfilter blocks on write */ @@ -224,29 +238,35 @@ public interface ColumnFamilyDescriptor { * @return true if we should cache data blocks on write */ boolean isCacheDataOnWrite(); + /** * @return true if we should cache index blocks on write */ boolean isCacheIndexesOnWrite(); + /** * @return Whether KV tags should be compressed along with DataBlockEncoding. When no * DataBlockEncoding is been used, this is having no effect. */ boolean isCompressTags(); + /** * @return true if we should evict cached blocks from the blockcache on close */ boolean isEvictBlocksOnClose(); + /** - * @return True if we are to favor keeping all values for this column family in the - * HRegionServer cache. + * @return True if we are to favor keeping all values for this column family in the HRegionServer + * cache. */ boolean isInMemory(); + /** * Gets whether the mob is enabled for the family. * @return True if the mob is enabled for the family. */ boolean isMobEnabled(); + /** * @return true if we should prefetch blocks into the blockcache on open */ @@ -258,9 +278,9 @@ public interface ColumnFamilyDescriptor { String toStringCustomizedValues(); /** - * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts - * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. - * We will also consider mvcc in versions. See HBASE-15968 for details. + * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts will + * mask a later Put with lower ts. Set this to true to enable new semantics of versions. We will + * also consider mvcc in versions. See HBASE-15968 for details. */ boolean isNewVersionBehavior(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 6d85cb439c3e..1f39ce2ce5b5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,20 +49,21 @@ public class ColumnFamilyDescriptorBuilder { // For future backward compatibility - // Version 3 was when column names become byte arrays and when we picked up - // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. - // Version 5 was when bloom filter descriptors were removed. - // Version 6 adds metadata as a map where keys and values are byte[]. - // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) - // Version 8 -- reintroduction of bloom filters, changed from boolean to enum - // Version 9 -- add data block encoding + // Version 3 was when column names become byte arrays and when we picked up + // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. + // Version 5 was when bloom filter descriptors were removed. + // Version 6 adds metadata as a map where keys and values are byte[]. + // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) + // Version 8 -- reintroduction of bloom filters, changed from boolean to enum + // Version 9 -- add data block encoding // Version 10 -- change metadata to standard type. // Version 11 -- add column family level configuration. private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11; @InterfaceAudience.Private public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; - private static final Bytes IN_MEMORY_COMPACTION_BYTES = new Bytes(Bytes.toBytes(IN_MEMORY_COMPACTION)); + private static final Bytes IN_MEMORY_COMPACTION_BYTES = + new Bytes(Bytes.toBytes(IN_MEMORY_COMPACTION)); @InterfaceAudience.Private public static final String IN_MEMORY = HConstants.IN_MEMORY; @@ -74,53 +75,59 @@ public class ColumnFamilyDescriptorBuilder { private static final Bytes COMPRESSION_BYTES = new Bytes(Bytes.toBytes(COMPRESSION)); @InterfaceAudience.Private public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; - private static final Bytes COMPRESSION_COMPACT_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT)); + private static final Bytes COMPRESSION_COMPACT_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT)); public static final String COMPRESSION_COMPACT_MAJOR = "COMPRESSION_COMPACT_MAJOR"; - private static final Bytes COMPRESSION_COMPACT_MAJOR_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MAJOR)); + private static final Bytes COMPRESSION_COMPACT_MAJOR_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MAJOR)); public static final String COMPRESSION_COMPACT_MINOR = "COMPRESSION_COMPACT_MINOR"; - private static final Bytes COMPRESSION_COMPACT_MINOR_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MINOR)); + private static final Bytes COMPRESSION_COMPACT_MINOR_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MINOR)); @InterfaceAudience.Private public static final String DATA_BLOCK_ENCODING = "DATA_BLOCK_ENCODING"; - private static final Bytes DATA_BLOCK_ENCODING_BYTES = new Bytes(Bytes.toBytes(DATA_BLOCK_ENCODING)); + private static final Bytes DATA_BLOCK_ENCODING_BYTES = + new Bytes(Bytes.toBytes(DATA_BLOCK_ENCODING)); /** - * Key for the BLOCKCACHE attribute. A more exact name would be - * CACHE_DATA_ON_READ because this flag sets whether or not we cache DATA - * blocks. We always cache INDEX and BLOOM blocks; caching these blocks cannot - * be disabled. + * Key for the BLOCKCACHE attribute. A more exact name would be CACHE_DATA_ON_READ because this + * flag sets whether or not we cache DATA blocks. We always cache INDEX and BLOOM blocks; caching + * these blocks cannot be disabled. */ @InterfaceAudience.Private public static final String BLOCKCACHE = "BLOCKCACHE"; private static final Bytes BLOCKCACHE_BYTES = new Bytes(Bytes.toBytes(BLOCKCACHE)); @InterfaceAudience.Private public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE"; - private static final Bytes CACHE_DATA_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_DATA_ON_WRITE)); + private static final Bytes CACHE_DATA_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_DATA_ON_WRITE)); @InterfaceAudience.Private public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE"; - private static final Bytes CACHE_INDEX_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_INDEX_ON_WRITE)); + private static final Bytes CACHE_INDEX_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_INDEX_ON_WRITE)); @InterfaceAudience.Private public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE"; - private static final Bytes CACHE_BLOOMS_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_BLOOMS_ON_WRITE)); + private static final Bytes CACHE_BLOOMS_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_BLOOMS_ON_WRITE)); @InterfaceAudience.Private public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE"; - private static final Bytes EVICT_BLOCKS_ON_CLOSE_BYTES = new Bytes(Bytes.toBytes(EVICT_BLOCKS_ON_CLOSE)); + private static final Bytes EVICT_BLOCKS_ON_CLOSE_BYTES = + new Bytes(Bytes.toBytes(EVICT_BLOCKS_ON_CLOSE)); /** - * Key for the PREFETCH_BLOCKS_ON_OPEN attribute. If set, all INDEX, BLOOM, - * and DATA blocks of HFiles belonging to this family will be loaded into the - * cache as soon as the file is opened. These loads will not count as cache - * misses. + * Key for the PREFETCH_BLOCKS_ON_OPEN attribute. If set, all INDEX, BLOOM, and DATA blocks of + * HFiles belonging to this family will be loaded into the cache as soon as the file is opened. + * These loads will not count as cache misses. */ @InterfaceAudience.Private public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN"; - private static final Bytes PREFETCH_BLOCKS_ON_OPEN_BYTES = new Bytes(Bytes.toBytes(PREFETCH_BLOCKS_ON_OPEN)); + private static final Bytes PREFETCH_BLOCKS_ON_OPEN_BYTES = + new Bytes(Bytes.toBytes(PREFETCH_BLOCKS_ON_OPEN)); /** - * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. - * Use smaller block sizes for faster random-access at expense of larger - * indices (more memory consumption). Note that this is a soft limit and that - * blocks have overhead (metadata, CRCs) so blocks will tend to be the size - * specified here and then some; i.e. don't expect that setting BLOCKSIZE=4k - * means hbase data will align with an SSDs 4k page accesses (TODO). + * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. Use smaller block + * sizes for faster random-access at expense of larger indices (more memory consumption). Note + * that this is a soft limit and that blocks have overhead (metadata, CRCs) so blocks will tend to + * be the size specified here and then some; i.e. don't expect that setting BLOCKSIZE=4k means + * hbase data will align with an SSDs 4k page accesses (TODO). */ @InterfaceAudience.Private public static final String BLOCKSIZE = "BLOCKSIZE"; @@ -141,13 +148,14 @@ public class ColumnFamilyDescriptorBuilder { public static final String MIN_VERSIONS = "MIN_VERSIONS"; private static final Bytes MIN_VERSIONS_BYTES = new Bytes(Bytes.toBytes(MIN_VERSIONS)); /** - * Retain all cells across flushes and compactions even if they fall behind a - * delete tombstone. To see all retained cells, do a 'raw' scan; see - * Scan#setRaw or pass RAW => true attribute in the shell. + * Retain all cells across flushes and compactions even if they fall behind a delete tombstone. To + * see all retained cells, do a 'raw' scan; see Scan#setRaw or pass RAW => true attribute in + * the shell. */ @InterfaceAudience.Private public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS"; - private static final Bytes KEEP_DELETED_CELLS_BYTES = new Bytes(Bytes.toBytes(KEEP_DELETED_CELLS)); + private static final Bytes KEEP_DELETED_CELLS_BYTES = + new Bytes(Bytes.toBytes(KEEP_DELETED_CELLS)); @InterfaceAudience.Private public static final String COMPRESS_TAGS = "COMPRESS_TAGS"; private static final Bytes COMPRESS_TAGS_BYTES = new Bytes(Bytes.toBytes(COMPRESS_TAGS)); @@ -168,9 +176,10 @@ public class ColumnFamilyDescriptorBuilder { public static final long DEFAULT_MOB_THRESHOLD = 100 * 1024; // 100k @InterfaceAudience.Private public static final String MOB_COMPACT_PARTITION_POLICY = "MOB_COMPACT_PARTITION_POLICY"; - private static final Bytes MOB_COMPACT_PARTITION_POLICY_BYTES = new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY)); - public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY - = MobCompactPartitionPolicy.DAILY; + private static final Bytes MOB_COMPACT_PARTITION_POLICY_BYTES = + new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY)); + public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY = + MobCompactPartitionPolicy.DAILY; @InterfaceAudience.Private public static final String DFS_REPLICATION = "DFS_REPLICATION"; private static final Bytes DFS_REPLICATION_BYTES = new Bytes(Bytes.toBytes(DFS_REPLICATION)); @@ -180,7 +189,8 @@ public class ColumnFamilyDescriptorBuilder { private static final Bytes STORAGE_POLICY_BYTES = new Bytes(Bytes.toBytes(STORAGE_POLICY)); public static final String NEW_VERSION_BEHAVIOR = "NEW_VERSION_BEHAVIOR"; - private static final Bytes NEW_VERSION_BEHAVIOR_BYTES = new Bytes(Bytes.toBytes(NEW_VERSION_BEHAVIOR)); + private static final Bytes NEW_VERSION_BEHAVIOR_BYTES = + new Bytes(Bytes.toBytes(NEW_VERSION_BEHAVIOR)); public static final boolean DEFAULT_NEW_VERSION_BEHAVIOR = false; /** * Default compression type. @@ -203,8 +213,7 @@ public class ColumnFamilyDescriptorBuilder { public static final int DEFAULT_MIN_VERSIONS = 0; /** - * Default setting for whether to try and serve this column family from memory - * or not. + * Default setting for whether to try and serve this column family from memory or not. */ public static final boolean DEFAULT_IN_MEMORY = false; @@ -219,14 +228,12 @@ public class ColumnFamilyDescriptorBuilder { public static final boolean DEFAULT_BLOCKCACHE = true; /** - * Default setting for whether to cache data blocks on write if block caching - * is enabled. + * Default setting for whether to cache data blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; /** - * Default setting for whether to cache index blocks on write if block caching - * is enabled. + * Default setting for whether to cache index blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false; @@ -241,8 +248,7 @@ public class ColumnFamilyDescriptorBuilder { public static final BloomType DEFAULT_BLOOMFILTER = BloomType.ROW; /** - * Default setting for whether to cache bloom filter blocks on write if block - * caching is enabled. + * Default setting for whether to cache bloom filter blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false; @@ -257,8 +263,7 @@ public class ColumnFamilyDescriptorBuilder { public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL; /** - * Default setting for whether to evict cached blocks from the blockcache on - * close. + * Default setting for whether to evict cached blocks from the blockcache on close. */ public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false; @@ -276,7 +281,8 @@ public class ColumnFamilyDescriptorBuilder { private static Map getDefaultValuesBytes() { Map values = new HashMap<>(); - DEFAULT_VALUES.forEach((k, v) -> values.put(new Bytes(Bytes.toBytes(k)), new Bytes(Bytes.toBytes(v)))); + DEFAULT_VALUES + .forEach((k, v) -> values.put(new Bytes(Bytes.toBytes(k)), new Bytes(Bytes.toBytes(v)))); return values; } @@ -326,10 +332,10 @@ public static Unit getUnit(String key) { /** * @param b Family name. * @return b - * @throws IllegalArgumentException If not null and not a legitimate family - * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because - * b can be null when deserializing). Cannot start with a '.' - * either. Also Family can not be an empty value or equal "recovered.edits". + * @throws IllegalArgumentException If not null and not a legitimate family name: i.e. 'printable' + * and ends in a ':' (Null passes are allowed because b can be null when + * deserializing). Cannot start with a '.' either. Also Family can not be an empty value + * or equal "recovered.edits". */ public static byte[] isLegalColumnFamilyName(final byte[] b) { if (b == null) { @@ -337,27 +343,28 @@ public static byte[] isLegalColumnFamilyName(final byte[] b) { } Preconditions.checkArgument(b.length != 0, "Column Family name can not be empty"); if (b[0] == '.') { - throw new IllegalArgumentException("Column Family names cannot start with a " - + "period: " + Bytes.toString(b)); + throw new IllegalArgumentException( + "Column Family names cannot start with a " + "period: " + Bytes.toString(b)); } for (int i = 0; i < b.length; i++) { if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') { throw new IllegalArgumentException("Illegal character <" + b[i] - + ">. Column Family names cannot contain control characters or colons: " - + Bytes.toString(b)); + + ">. Column Family names cannot contain control characters or colons: " + + Bytes.toString(b)); } } byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR); if (Bytes.equals(recoveredEdit, b)) { - throw new IllegalArgumentException("Column Family name cannot be: " - + HConstants.RECOVERED_EDITS_DIR); + throw new IllegalArgumentException( + "Column Family name cannot be: " + HConstants.RECOVERED_EDITS_DIR); } return b; } private final ModifyableColumnFamilyDescriptor desc; - public static ColumnFamilyDescriptor parseFrom(final byte[] pbBytes) throws DeserializationException { + public static ColumnFamilyDescriptor parseFrom(final byte[] pbBytes) + throws DeserializationException { return ModifyableColumnFamilyDescriptor.parseFrom(pbBytes); } @@ -453,12 +460,14 @@ public ColumnFamilyDescriptorBuilder setCompactionCompressionType(Compression.Al return this; } - public ColumnFamilyDescriptorBuilder setMajorCompactionCompressionType(Compression.Algorithm value) { + public ColumnFamilyDescriptorBuilder + setMajorCompactionCompressionType(Compression.Algorithm value) { desc.setMajorCompactionCompressionType(value); return this; } - public ColumnFamilyDescriptorBuilder setMinorCompactionCompressionType(Compression.Algorithm value) { + public ColumnFamilyDescriptorBuilder + setMinorCompactionCompressionType(Compression.Algorithm value) { desc.setMinorCompactionCompressionType(value); return this; } @@ -532,7 +541,8 @@ public ColumnFamilyDescriptorBuilder setMinVersions(final int value) { return this; } - public ColumnFamilyDescriptorBuilder setMobCompactPartitionPolicy(final MobCompactPartitionPolicy value) { + public ColumnFamilyDescriptorBuilder + setMobCompactPartitionPolicy(final MobCompactPartitionPolicy value) { desc.setMobCompactPartitionPolicy(value); return this; } @@ -599,10 +609,9 @@ public ColumnFamilyDescriptorBuilder setVersionsWithTimeToLive(final int retenti } /** - * An ModifyableFamilyDescriptor contains information about a column family such as the - * number of versions, compression settings, etc. - * - * It is used as input when creating a table or adding a column. + * An ModifyableFamilyDescriptor contains information about a column family such as the number of + * versions, compression settings, etc. It is used as input when creating a table or adding a + * column. */ private static final class ModifyableColumnFamilyDescriptor implements ColumnFamilyDescriptor, Comparable { @@ -614,20 +623,17 @@ private static final class ModifyableColumnFamilyDescriptor private final Map values = new HashMap<>(); /** - * A map which holds the configuration specific to the column family. The - * keys of the map have the same names as config keys and override the - * defaults with cf-specific settings. Example usage may be for compactions, - * etc. + * A map which holds the configuration specific to the column family. The keys of the map have + * the same names as config keys and override the defaults with cf-specific settings. Example + * usage may be for compactions, etc. */ private final Map configuration = new HashMap<>(); /** - * Construct a column descriptor specifying only the family name The other - * attributes are defaulted. - * - * @param name Column family name. Must be 'printable' -- digit or - * letter -- and may not contain a : - * TODO: make this private after the HCD is removed. + * Construct a column descriptor specifying only the family name The other attributes are + * defaulted. + * @param name Column family name. Must be 'printable' -- digit or letter -- and may not contain + * a : TODO: make this private after the HCD is removed. */ @InterfaceAudience.Private public ModifyableColumnFamilyDescriptor(final byte[] name) { @@ -635,8 +641,8 @@ public ModifyableColumnFamilyDescriptor(final byte[] name) { } /** - * Constructor. Makes a deep copy of the supplied descriptor. - * TODO: make this private after the HCD is removed. + * Constructor. Makes a deep copy of the supplied descriptor. TODO: make this private after the + * HCD is removed. * @param desc The descriptor. */ @InterfaceAudience.Private @@ -644,7 +650,8 @@ public ModifyableColumnFamilyDescriptor(ColumnFamilyDescriptor desc) { this(desc.getName(), desc.getValues(), desc.getConfiguration()); } - private ModifyableColumnFamilyDescriptor(byte[] name, Map values, Map config) { + private ModifyableColumnFamilyDescriptor(byte[] name, Map values, + Map config) { this.name = name; this.values.putAll(values); this.configuration.putAll(config); @@ -688,7 +695,8 @@ public Map getValues() { * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setValue(byte[] key, byte[] value) { - return setValue(toBytesOrNull(key, Function.identity()), toBytesOrNull(value, Function.identity())); + return setValue(toBytesOrNull(key, Function.identity()), + toBytesOrNull(value, Function.identity())); } public ModifyableColumnFamilyDescriptor setValue(String key, String value) { @@ -698,6 +706,7 @@ public ModifyableColumnFamilyDescriptor setValue(String key, String value) { private ModifyableColumnFamilyDescriptor setValue(Bytes key, String value) { return setValue(key, toBytesOrNull(value, Bytes::toBytes)); } + /** * @param key The key. * @param value The value. @@ -749,8 +758,8 @@ public ModifyableColumnFamilyDescriptor setMaxVersions(int maxVersions) { throw new IllegalArgumentException("Maximum versions must be positive"); } if (maxVersions < this.getMinVersions()) { - throw new IllegalArgumentException("Set MaxVersion to " + maxVersions - + " while minVersion is " + this.getMinVersions() + throw new IllegalArgumentException( + "Set MaxVersion to " + maxVersions + " while minVersion is " + this.getMinVersions() + ". Maximum versions must be >= minimum versions "); } setValue(MAX_VERSIONS_BYTES, Integer.toString(maxVersions)); @@ -759,7 +768,6 @@ public ModifyableColumnFamilyDescriptor setMaxVersions(int maxVersions) { /** * Set minimum and maximum versions to keep - * * @param minVersions minimal number of versions * @param maxVersions maximum number of versions * @return this (for chained invocation) @@ -772,8 +780,8 @@ public ModifyableColumnFamilyDescriptor setVersions(int minVersions, int maxVers } if (maxVersions < minVersions) { - throw new IllegalArgumentException("Unable to set MaxVersion to " + maxVersions - + " and set MinVersion to " + minVersions + throw new IllegalArgumentException( + "Unable to set MaxVersion to " + maxVersions + " and set MinVersion to " + minVersions + ", as maximum versions must be >= minimum versions."); } setMinVersions(minVersions); @@ -781,15 +789,13 @@ public ModifyableColumnFamilyDescriptor setVersions(int minVersions, int maxVers return this; } - @Override public int getBlocksize() { return getStringOrDefault(BLOCKSIZE_BYTES, Integer::valueOf, DEFAULT_BLOCKSIZE); } /** - * @param s Blocksize to use when writing out storefiles/hfiles on this - * column family. + * @param s Blocksize to use when writing out storefiles/hfiles on this column family. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setBlocksize(int s) { @@ -797,8 +803,8 @@ public ModifyableColumnFamilyDescriptor setBlocksize(int s) { } public ModifyableColumnFamilyDescriptor setBlocksize(String blocksize) throws HBaseException { - return setBlocksize(Integer.parseInt(PrettyPrinter. - valueOf(blocksize, PrettyPrinter.Unit.BYTE))); + return setBlocksize( + Integer.parseInt(PrettyPrinter.valueOf(blocksize, PrettyPrinter.Unit.BYTE))); } @Override @@ -808,11 +814,9 @@ public Compression.Algorithm getCompressionType() { } /** - * Compression types supported in hbase. LZO is not bundled as part of the - * hbase distribution. See - * See LZO Compression - * for how to enable it. - * + * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. + * See See LZO Compression for + * how to enable it. * @param type Compression type setting. * @return this (for chained invocation) */ @@ -828,18 +832,17 @@ public DataBlockEncoding getDataBlockEncoding() { /** * Set data block encoding algorithm used in block cache. - * * @param type What kind of data block encoding will be used. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setDataBlockEncoding(DataBlockEncoding type) { - return setValue(DATA_BLOCK_ENCODING_BYTES, type == null ? DataBlockEncoding.NONE.name() : type.name()); + return setValue(DATA_BLOCK_ENCODING_BYTES, + type == null ? DataBlockEncoding.NONE.name() : type.name()); } /** - * Set whether the tags should be compressed along with DataBlockEncoding. - * When no DataBlockEncoding is been used, this is having no effect. - * + * Set whether the tags should be compressed along with DataBlockEncoding. When no + * DataBlockEncoding is been used, this is having no effect. * @param compressTags * @return this (for chained invocation) */ @@ -849,8 +852,7 @@ public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) { @Override public boolean isCompressTags() { - return getStringOrDefault(COMPRESS_TAGS_BYTES, Boolean::valueOf, - DEFAULT_COMPRESS_TAGS); + return getStringOrDefault(COMPRESS_TAGS_BYTES, Boolean::valueOf, DEFAULT_COMPRESS_TAGS); } @Override @@ -872,26 +874,24 @@ public Compression.Algorithm getMinorCompactionCompressionType() { } /** - * Compression types supported in hbase. LZO is not bundled as part of the - * hbase distribution. See - * See LZO Compression - * for how to enable it. - * + * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. + * See See LZO Compression for + * how to enable it. * @param type Compression type setting. * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_BYTES, type.name()); } - public ModifyableColumnFamilyDescriptor setMajorCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setMajorCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_MAJOR_BYTES, type.name()); } - public ModifyableColumnFamilyDescriptor setMinorCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setMinorCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_MINOR_BYTES, type.name()); } @@ -901,8 +901,8 @@ public boolean isInMemory() { } /** - * @param inMemory True if we are to favor keeping all values for this - * column family in the HRegionServer cache + * @param inMemory True if we are to favor keeping all values for this column family in the + * HRegionServer cache * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setInMemory(boolean inMemory) { @@ -916,23 +916,22 @@ public MemoryCompactionPolicy getInMemoryCompaction() { } /** - * @param inMemoryCompaction the prefered in-memory compaction policy for - * this column family + * @param inMemoryCompaction the prefered in-memory compaction policy for this column family * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { + public ModifyableColumnFamilyDescriptor + setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { return setValue(IN_MEMORY_COMPACTION_BYTES, inMemoryCompaction.name()); } @Override public KeepDeletedCells getKeepDeletedCells() { - return getStringOrDefault(KEEP_DELETED_CELLS_BYTES, - KeepDeletedCells::getValue, DEFAULT_KEEP_DELETED); + return getStringOrDefault(KEEP_DELETED_CELLS_BYTES, KeepDeletedCells::getValue, + DEFAULT_KEEP_DELETED); } /** - * @param keepDeletedCells True if deleted rows should not be collected - * immediately. + * @param keepDeletedCells True if deleted rows should not be collected immediately. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) { @@ -941,13 +940,13 @@ public ModifyableColumnFamilyDescriptor setKeepDeletedCells(KeepDeletedCells kee /** * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts - * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. - * We will also consider mvcc in versions. See HBASE-15968 for details. + * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. We + * will also consider mvcc in versions. See HBASE-15968 for details. */ @Override public boolean isNewVersionBehavior() { - return getStringOrDefault(NEW_VERSION_BEHAVIOR_BYTES, - Boolean::parseBoolean, DEFAULT_NEW_VERSION_BEHAVIOR); + return getStringOrDefault(NEW_VERSION_BEHAVIOR_BYTES, Boolean::parseBoolean, + DEFAULT_NEW_VERSION_BEHAVIOR); } public ModifyableColumnFamilyDescriptor setNewVersionBehavior(boolean newVersionBehavior) { @@ -982,8 +981,7 @@ public int getMinVersions() { } /** - * @param minVersions The minimum number of versions to keep. (used when - * timeToLive is set) + * @param minVersions The minimum number of versions to keep. (used when timeToLive is set) * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setMinVersions(int minVersions) { @@ -991,17 +989,16 @@ public ModifyableColumnFamilyDescriptor setMinVersions(int minVersions) { } /** - * Retain all versions for a given TTL(retentionInterval), and then only a specific number - * of versions(versionAfterInterval) after that interval elapses. - * + * Retain all versions for a given TTL(retentionInterval), and then only a specific number of + * versions(versionAfterInterval) after that interval elapses. * @param retentionInterval Retain all versions for this interval * @param versionAfterInterval Retain no of versions to retain after retentionInterval * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setVersionsWithTimeToLive( - final int retentionInterval, final int versionAfterInterval) { + public ModifyableColumnFamilyDescriptor setVersionsWithTimeToLive(final int retentionInterval, + final int versionAfterInterval) { ModifyableColumnFamilyDescriptor modifyableColumnFamilyDescriptor = - setVersions(versionAfterInterval, Integer.MAX_VALUE); + setVersions(versionAfterInterval, Integer.MAX_VALUE); modifyableColumnFamilyDescriptor.setTimeToLive(retentionInterval); modifyableColumnFamilyDescriptor.setKeepDeletedCells(KeepDeletedCells.TTL); return modifyableColumnFamilyDescriptor; @@ -1013,8 +1010,8 @@ public boolean isBlockCacheEnabled() { } /** - * @param blockCacheEnabled True if hfile DATA type blocks should be cached - * (We always cache INDEX and BLOOM blocks; you cannot turn this off). + * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache + * INDEX and BLOOM blocks; you cannot turn this off). * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { @@ -1033,7 +1030,8 @@ public ModifyableColumnFamilyDescriptor setBloomFilterType(final BloomType bt) { @Override public int getScope() { - return getStringOrDefault(REPLICATION_SCOPE_BYTES, Integer::valueOf, DEFAULT_REPLICATION_SCOPE); + return getStringOrDefault(REPLICATION_SCOPE_BYTES, Integer::valueOf, + DEFAULT_REPLICATION_SCOPE); } /** @@ -1046,7 +1044,8 @@ public ModifyableColumnFamilyDescriptor setScope(int scope) { @Override public boolean isCacheDataOnWrite() { - return getStringOrDefault(CACHE_DATA_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_DATA_ON_WRITE); + return getStringOrDefault(CACHE_DATA_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_DATA_ON_WRITE); } /** @@ -1059,7 +1058,8 @@ public ModifyableColumnFamilyDescriptor setCacheDataOnWrite(boolean value) { @Override public boolean isCacheIndexesOnWrite() { - return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_INDEX_ON_WRITE); + return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_INDEX_ON_WRITE); } /** @@ -1072,7 +1072,8 @@ public ModifyableColumnFamilyDescriptor setCacheIndexesOnWrite(boolean value) { @Override public boolean isCacheBloomsOnWrite() { - return getStringOrDefault(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_BLOOMS_ON_WRITE); + return getStringOrDefault(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_BLOOMS_ON_WRITE); } /** @@ -1085,12 +1086,12 @@ public ModifyableColumnFamilyDescriptor setCacheBloomsOnWrite(boolean value) { @Override public boolean isEvictBlocksOnClose() { - return getStringOrDefault(EVICT_BLOCKS_ON_CLOSE_BYTES, Boolean::valueOf, DEFAULT_EVICT_BLOCKS_ON_CLOSE); + return getStringOrDefault(EVICT_BLOCKS_ON_CLOSE_BYTES, Boolean::valueOf, + DEFAULT_EVICT_BLOCKS_ON_CLOSE); } /** - * @param value true if we should evict cached blocks from the blockcache on - * close + * @param value true if we should evict cached blocks from the blockcache on close * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setEvictBlocksOnClose(boolean value) { @@ -1099,12 +1100,12 @@ public ModifyableColumnFamilyDescriptor setEvictBlocksOnClose(boolean value) { @Override public boolean isPrefetchBlocksOnOpen() { - return getStringOrDefault(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean::valueOf, DEFAULT_PREFETCH_BLOCKS_ON_OPEN); + return getStringOrDefault(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean::valueOf, + DEFAULT_PREFETCH_BLOCKS_ON_OPEN); } /** - * @param value true if we should prefetch blocks into the blockcache on - * open + * @param value true if we should prefetch blocks into the blockcache on open * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setPrefetchBlocksOnOpen(boolean value) { @@ -1124,7 +1125,6 @@ public String toString() { return s.toString(); } - @Override public String toStringCustomizedValues() { StringBuilder s = new StringBuilder(); @@ -1151,9 +1151,8 @@ private StringBuilder getValues(boolean printDefaults) { } String key = Bytes.toString(entry.getKey().get()); String value = Bytes.toStringBinary(entry.getValue().get()); - if (printDefaults - || !DEFAULT_VALUES.containsKey(key) - || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { + if (printDefaults || !DEFAULT_VALUES.containsKey(key) + || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { s.append(", "); s.append(key); s.append(" => "); @@ -1197,7 +1196,8 @@ private StringBuilder getValues(boolean printDefaults) { printCommaForConfiguration = true; s.append('\'').append(e.getKey()).append('\''); s.append(" => "); - s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\''); + s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))) + .append('\''); } s.append("}"); } @@ -1210,7 +1210,8 @@ public boolean equals(Object obj) { return true; } if (obj instanceof ModifyableColumnFamilyDescriptor) { - return ColumnFamilyDescriptor.COMPARATOR.compare(this, (ModifyableColumnFamilyDescriptor) obj) == 0; + return ColumnFamilyDescriptor.COMPARATOR.compare(this, + (ModifyableColumnFamilyDescriptor) obj) == 0; } return false; } @@ -1234,19 +1235,18 @@ public int compareTo(ModifyableColumnFamilyDescriptor other) { * @see #parseFrom(byte[]) */ private byte[] toByteArray() { - return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this) - .toByteArray()); + return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this).toByteArray()); } /** - * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb - * magic prefix - * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from - * bytes + * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb magic + * prefix + * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from bytes * @throws DeserializationException * @see #toByteArray() */ - private static ColumnFamilyDescriptor parseFrom(final byte[] bytes) throws DeserializationException { + private static ColumnFamilyDescriptor parseFrom(final byte[] bytes) + throws DeserializationException { if (!ProtobufUtil.isPBMagicPrefix(bytes)) { throw new DeserializationException("No magic"); } @@ -1275,9 +1275,7 @@ public Map getConfiguration() { /** * Setter for storing a configuration setting in {@link #configuration} map. - * - * @param key Config key. Same as XML config key e.g. - * hbase.something.or.other. + * @param key Config key. Same as XML config key e.g. hbase.something.or.other. * @param value String value. If null, removes the configuration. * @return this (for chained invocation) */ @@ -1291,9 +1289,7 @@ public ModifyableColumnFamilyDescriptor setConfiguration(String key, String valu } /** - * Remove a configuration setting represented by the key from the - * {@link #configuration} map. - * + * Remove a configuration setting represented by the key from the {@link #configuration} map. * @param key * @return this (for chained invocation) */ @@ -1308,7 +1304,6 @@ public String getEncryptionType() { /** * Set the encryption algorithm for use with this family - * * @param algorithm * @return this (for chained invocation) */ @@ -1323,7 +1318,6 @@ public byte[] getEncryptionKey() { /** * Set the raw crypto key attribute for the family - * * @param keyBytes * @return this (for chained invocation) */ @@ -1338,7 +1332,6 @@ public long getMobThreshold() { /** * Sets the mob threshold of the family. - * * @param threshold The mob threshold. * @return this (for chained invocation) */ @@ -1353,7 +1346,6 @@ public boolean isMobEnabled() { /** * Enables the mob for the family. - * * @param isMobEnabled Whether to enable the mob for the family. * @return this (for chained invocation) */ @@ -1370,32 +1362,30 @@ public MobCompactPartitionPolicy getMobCompactPartitionPolicy() { /** * Set the mob compact partition policy for the family. - * * @param policy policy type * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) { + public ModifyableColumnFamilyDescriptor + setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) { return setValue(MOB_COMPACT_PARTITION_POLICY_BYTES, policy.name()); } @Override public short getDFSReplication() { - return getStringOrDefault(DFS_REPLICATION_BYTES, - Short::valueOf, DEFAULT_DFS_REPLICATION); + return getStringOrDefault(DFS_REPLICATION_BYTES, Short::valueOf, DEFAULT_DFS_REPLICATION); } /** * Set the replication factor to hfile(s) belonging to this family - * - * @param replication number of replicas the blocks(s) belonging to this CF - * should have, or {@link #DEFAULT_DFS_REPLICATION} for the default - * replication factor set in the filesystem + * @param replication number of replicas the blocks(s) belonging to this CF should have, or + * {@link #DEFAULT_DFS_REPLICATION} for the default replication factor set in the + * filesystem * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setDFSReplication(short replication) { if (replication < 1 && replication != DEFAULT_DFS_REPLICATION) { throw new IllegalArgumentException( - "DFS replication factor cannot be less than 1 if explicitly set."); + "DFS replication factor cannot be less than 1 if explicitly set."); } return setValue(DFS_REPLICATION_BYTES, Short.toString(replication)); } @@ -1407,11 +1397,8 @@ public String getStoragePolicy() { /** * Set the storage policy for use with this family - * - * @param policy the policy to set, valid setting includes: - * "LAZY_PERSIST", - * "ALL_SSD", "ONE_SSD", "HOT", "WARM", - * "COLD" + * @param policy the policy to set, valid setting includes: "LAZY_PERSIST", + * "ALL_SSD", "ONE_SSD", "HOT", "WARM", "COLD" * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setStoragePolicy(String policy) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java index 018cfef02605..25b45f6c0d51 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,18 +16,18 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import org.apache.yetus.audience.InterfaceAudience; /** - * Currently, there are only two compact types: - * {@code NORMAL} means do store files compaction; + * Currently, there are only two compact types: {@code NORMAL} means do store files compaction; * {@code MOB} means do mob files compaction. - * */ + */ @InterfaceAudience.Public public enum CompactType { - NORMAL (0), - MOB (1); + NORMAL(0), MOB(1); - CompactType(int value) {} + CompactType(int value) { + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java index 51f7d071e4ac..b70dce458441 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java index 08afeb61b558..592a99b0584d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +21,8 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A scan result cache that only returns complete result. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java index b638e72a46db..5e3468628e1a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,23 +29,22 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A cluster connection encapsulating lower level individual connections to actual servers and - * a connection to zookeeper. Connections are instantiated through the {@link ConnectionFactory} - * class. The lifecycle of the connection is managed by the caller, who has to {@link #close()} - * the connection to release the resources. - * - *

    The connection object contains logic to find the master, locate regions out on the cluster, - * keeps a cache of locations and then knows how to re-calibrate after they move. The individual - * connections to servers, meta cache, zookeeper connection, etc are all shared by the - * {@link Table} and {@link Admin} instances obtained from this connection. - * - *

    Connection creation is a heavy-weight operation. Connection implementations are thread-safe, - * so that the client can create a connection once, and share it with different threads. - * {@link Table} and {@link Admin} instances, on the other hand, are light-weight and are not - * thread-safe. Typically, a single connection per client application is instantiated and every - * thread will obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin} - * is not recommended. - * + * A cluster connection encapsulating lower level individual connections to actual servers and a + * connection to zookeeper. Connections are instantiated through the {@link ConnectionFactory} + * class. The lifecycle of the connection is managed by the caller, who has to {@link #close()} the + * connection to release the resources. + *

    + * The connection object contains logic to find the master, locate regions out on the cluster, keeps + * a cache of locations and then knows how to re-calibrate after they move. The individual + * connections to servers, meta cache, zookeeper connection, etc are all shared by the {@link Table} + * and {@link Admin} instances obtained from this connection. + *

    + * Connection creation is a heavy-weight operation. Connection implementations are thread-safe, so + * that the client can create a connection once, and share it with different threads. {@link Table} + * and {@link Admin} instances, on the other hand, are light-weight and are not thread-safe. + * Typically, a single connection per client application is instantiated and every thread will + * obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin} is not + * recommended. * @see ConnectionFactory * @since 0.99.0 */ @@ -53,13 +52,11 @@ public interface Connection extends Abortable, Closeable { /* - * Implementation notes: - * - Only allow new style of interfaces: - * -- All table names are passed as TableName. No more byte[] and string arguments - * -- Most of the classes with names H is deprecated in favor of non-H versions - * (Table, Connection, etc) - * -- Only real client-facing public methods are allowed - * - Connection should contain only getTable(), getAdmin() kind of general methods. + * Implementation notes: - Only allow new style of interfaces: -- All table names are passed as + * TableName. No more byte[] and string arguments -- Most of the classes with names H is + * deprecated in favor of non-H versions (Table, Connection, etc) -- Only real client-facing + * public methods are allowed - Connection should contain only getTable(), getAdmin() kind of + * general methods. */ /** @@ -68,17 +65,14 @@ public interface Connection extends Abortable, Closeable { Configuration getConfiguration(); /** - * Retrieve a Table implementation for accessing a table. - * The returned Table is not thread safe, a new instance should be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned Table - * is neither required nor desired. + * Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a + * new instance should be created for each using thread. This is a lightweight operation, pooling + * or caching of the returned Table is neither required nor desired. *

    - * The caller is responsible for calling {@link Table#close()} on the returned - * table instance. + * The caller is responsible for calling {@link Table#close()} on the returned table instance. *

    - * Since 0.98.1 this method no longer checks table existence. An exception - * will be thrown if the table does not exist only when the first operation is - * attempted. + * Since 0.98.1 this method no longer checks table existence. An exception will be thrown if the + * table does not exist only when the first operation is attempted. * @param tableName the name of the table * @return a Table to use for interactions with this table */ @@ -87,18 +81,14 @@ default Table getTable(TableName tableName) throws IOException { } /** - * Retrieve a Table implementation for accessing a table. - * The returned Table is not thread safe, a new instance should be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned Table - * is neither required nor desired. + * Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a + * new instance should be created for each using thread. This is a lightweight operation, pooling + * or caching of the returned Table is neither required nor desired. *

    - * The caller is responsible for calling {@link Table#close()} on the returned - * table instance. + * The caller is responsible for calling {@link Table#close()} on the returned table instance. *

    - * Since 0.98.1 this method no longer checks table existence. An exception - * will be thrown if the table does not exist only when the first operation is - * attempted. - * + * Since 0.98.1 this method no longer checks table existence. An exception will be thrown if the + * table does not exist only when the first operation is attempted. * @param tableName the name of the table * @param pool The thread pool to use for batch operations, null to use a default pool. * @return a Table to use for interactions with this table @@ -110,19 +100,17 @@ default Table getTable(TableName tableName, ExecutorService pool) throws IOExcep /** *

    * Retrieve a {@link BufferedMutator} for performing client-side buffering of writes. The - * {@link BufferedMutator} returned by this method is thread-safe. This BufferedMutator will - * use the Connection's ExecutorService. This object can be used for long lived operations. + * {@link BufferedMutator} returned by this method is thread-safe. This BufferedMutator will use + * the Connection's ExecutorService. This object can be used for long lived operations. *

    *

    - * The caller is responsible for calling {@link BufferedMutator#close()} on - * the returned {@link BufferedMutator} instance. + * The caller is responsible for calling {@link BufferedMutator#close()} on the returned + * {@link BufferedMutator} instance. *

    *

    - * This accessor will use the connection's ExecutorService and will throw an - * exception in the main thread when an asynchronous exception occurs. - * + * This accessor will use the connection's ExecutorService and will throw an exception in the main + * thread when an asynchronous exception occurs. * @param tableName the name of the table - * * @return a {@link BufferedMutator} for the supplied tableName. */ default BufferedMutator getBufferedMutator(TableName tableName) throws IOException { @@ -134,7 +122,6 @@ default BufferedMutator getBufferedMutator(TableName tableName) throws IOExcepti * {@link BufferedMutator} returned by this method is thread-safe. This object can be used for * long lived table operations. The caller is responsible for calling * {@link BufferedMutator#close()} on the returned {@link BufferedMutator} instance. - * * @param params details on how to instantiate the {@code BufferedMutator}. * @return a {@link BufferedMutator} for the supplied tableName. */ @@ -143,15 +130,10 @@ default BufferedMutator getBufferedMutator(TableName tableName) throws IOExcepti /** * Retrieve a RegionLocator implementation to inspect region information on a table. The returned * RegionLocator is not thread-safe, so a new instance should be created for each using thread. - * - * This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither - * required nor desired. - *
    + * This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither + * required nor desired.
    * The caller is responsible for calling {@link RegionLocator#close()} on the returned - * RegionLocator instance. - * - * RegionLocator needs to be unmanaged - * + * RegionLocator instance. RegionLocator needs to be unmanaged * @param tableName Name of the table who's region is to be examined * @return A RegionLocator instance */ @@ -168,14 +150,10 @@ default BufferedMutator getBufferedMutator(TableName tableName) throws IOExcepti void clearRegionLocationCache(); /** - * Retrieve an Admin implementation to administer an HBase cluster. - * The returned Admin is not guaranteed to be thread-safe. A new instance should be created for - * each using thread. This is a lightweight operation. Pooling or caching of the returned - * Admin is not recommended. - *
    - * The caller is responsible for calling {@link Admin#close()} on the returned - * Admin instance. - * + * Retrieve an Admin implementation to administer an HBase cluster. The returned Admin is not + * guaranteed to be thread-safe. A new instance should be created for each using thread. This is a + * lightweight operation. Pooling or caching of the returned Admin is not recommended.
    + * The caller is responsible for calling {@link Admin#close()} on the returned Admin instance. * @return an Admin instance for cluster administration */ Admin getAdmin() throws IOException; @@ -210,15 +188,11 @@ default BufferedMutator getBufferedMutator(TableName tableName) throws IOExcepti String getClusterId(); /** - * Retrieve an Hbck implementation to fix an HBase cluster. - * The returned Hbck is not guaranteed to be thread-safe. A new instance should be created by - * each thread. This is a lightweight operation. Pooling or caching of the returned Hbck instance - * is not recommended. - *
    - * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. - *
    + * Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to + * be thread-safe. A new instance should be created by each thread. This is a lightweight + * operation. Pooling or caching of the returned Hbck instance is not recommended.
    + * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance.
    * This will be used mostly by hbck tool. - * * @return an Hbck instance for active master. Active master is fetched from the zookeeper. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) @@ -227,18 +201,13 @@ default Hbck getHbck() throws IOException { } /** - * Retrieve an Hbck implementation to fix an HBase cluster. - * The returned Hbck is not guaranteed to be thread-safe. A new instance should be created by - * each thread. This is a lightweight operation. Pooling or caching of the returned Hbck instance - * is not recommended. - *
    - * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. - *
    - * This will be used mostly by hbck tool. This may only be used to by pass getting - * registered master from ZK. In situations where ZK is not available or active master is not - * registered with ZK and user can get master address by other means, master can be explicitly - * specified. - * + * Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to + * be thread-safe. A new instance should be created by each thread. This is a lightweight + * operation. Pooling or caching of the returned Hbck instance is not recommended.
    + * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance.
    + * This will be used mostly by hbck tool. This may only be used to by pass getting registered + * master from ZK. In situations where ZK is not available or active master is not registered with + * ZK and user can get master address by other means, master can be explicitly specified. * @param masterServer explicit {@link ServerName} for master server * @return an Hbck instance for a specified master server */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java index 19ca9adbf3f4..817605877d12 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java @@ -1,14 +1,20 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; @@ -16,12 +22,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Configuration parameters for the connection. - * Configuration is a heavy weight registry that does a lot of string operations and regex matching. - * Method calls into Configuration account for high CPU usage and have huge performance impact. - * This class caches connection-related configuration values in the ConnectionConfiguration - * object so that expensive conf.getXXX() calls are avoided every time HTable, etc is instantiated. - * see HBASE-12128 + * Configuration parameters for the connection. Configuration is a heavy weight registry that does a + * lot of string operations and regex matching. Method calls into Configuration account for high CPU + * usage and have huge performance impact. This class caches connection-related configuration values + * in the ConnectionConfiguration object so that expensive conf.getXXX() calls are avoided every + * time HTable, etc is instantiated. see HBASE-12128 */ @InterfaceAudience.Private public class ConnectionConfiguration { @@ -29,18 +34,18 @@ public class ConnectionConfiguration { public static final String WRITE_BUFFER_SIZE_KEY = "hbase.client.write.buffer"; public static final long WRITE_BUFFER_SIZE_DEFAULT = 2097152; public static final String WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS = - "hbase.client.write.buffer.periodicflush.timeout.ms"; + "hbase.client.write.buffer.periodicflush.timeout.ms"; public static final String WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS = - "hbase.client.write.buffer.periodicflush.timertick.ms"; + "hbase.client.write.buffer.periodicflush.timertick.ms"; public static final long WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT = 0; // 0 == Disabled public static final long WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT = 1000L; // 1 second public static final String MAX_KEYVALUE_SIZE_KEY = "hbase.client.keyvalue.maxsize"; public static final int MAX_KEYVALUE_SIZE_DEFAULT = 10485760; public static final String PRIMARY_CALL_TIMEOUT_MICROSECOND = - "hbase.client.primaryCallTimeout.get"; + "hbase.client.primaryCallTimeout.get"; public static final int PRIMARY_CALL_TIMEOUT_MICROSECOND_DEFAULT = 10000; // 10ms public static final String PRIMARY_SCAN_TIMEOUT_MICROSECOND = - "hbase.client.replicaCallTimeout.scan"; + "hbase.client.replicaCallTimeout.scan"; public static final int PRIMARY_SCAN_TIMEOUT_MICROSECOND_DEFAULT = 1000000; // 1s public static final String LOG_SCANNER_ACTIVITY = "hbase.client.log.scanner.activity"; @@ -62,49 +67,46 @@ public class ConnectionConfiguration { // toggle for async/sync prefetch private final boolean clientScannerAsyncPrefetch; - /** + /** * Constructor * @param conf Configuration object */ ConnectionConfiguration(Configuration conf) { this.writeBufferSize = conf.getLong(WRITE_BUFFER_SIZE_KEY, WRITE_BUFFER_SIZE_DEFAULT); - this.writeBufferPeriodicFlushTimeoutMs = conf.getLong( - WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, - WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT); + this.writeBufferPeriodicFlushTimeoutMs = conf.getLong(WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, + WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT); this.writeBufferPeriodicFlushTimerTickMs = conf.getLong( - WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, - WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT); + WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT); this.metaOperationTimeout = conf.getInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.operationTimeout = conf.getInt( - HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.scannerCaching = conf.getInt( - HConstants.HBASE_CLIENT_SCANNER_CACHING, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); + this.scannerCaching = conf.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); - this.scannerMaxResultSize = - conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); + this.scannerMaxResultSize = conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); this.primaryCallTimeoutMicroSecond = - conf.getInt(PRIMARY_CALL_TIMEOUT_MICROSECOND, PRIMARY_CALL_TIMEOUT_MICROSECOND_DEFAULT); + conf.getInt(PRIMARY_CALL_TIMEOUT_MICROSECOND, PRIMARY_CALL_TIMEOUT_MICROSECOND_DEFAULT); this.replicaCallTimeoutMicroSecondScan = - conf.getInt(PRIMARY_SCAN_TIMEOUT_MICROSECOND, PRIMARY_SCAN_TIMEOUT_MICROSECOND_DEFAULT); + conf.getInt(PRIMARY_SCAN_TIMEOUT_MICROSECOND, PRIMARY_SCAN_TIMEOUT_MICROSECOND_DEFAULT); this.metaReplicaCallTimeoutMicroSecondScan = - conf.getInt(HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, - HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT); + conf.getInt(HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, + HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT); - this.retries = conf.getInt( - HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + this.retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); - this.clientScannerAsyncPrefetch = conf.getBoolean( - Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH); + this.clientScannerAsyncPrefetch = conf.getBoolean(Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, + Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH); this.maxKeyValueSize = conf.getInt(MAX_KEYVALUE_SIZE_KEY, MAX_KEYVALUE_SIZE_DEFAULT); @@ -112,16 +114,15 @@ public class ConnectionConfiguration { conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); this.readRpcTimeout = conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); this.writeRpcTimeout = conf.getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); } /** - * Constructor - * This is for internal testing purpose (using the default value). - * In real usage, we should read the configuration from the Configuration object. + * Constructor This is for internal testing purpose (using the default value). In real usage, we + * should read the configuration from the Configuration object. */ protected ConnectionConfiguration() { this.writeBufferSize = WRITE_BUFFER_SIZE_DEFAULT; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index a3cf55715bdf..e417a65ac9c8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,13 +54,15 @@ * Since 2.2.0, Connection created by ConnectionFactory can contain user-specified kerberos * credentials if caller has following two configurations set: *

      - *
    • hbase.client.keytab.file, points to a valid keytab on the local filesystem - *
    • hbase.client.kerberos.principal, gives the Kerberos principal to use + *
    • hbase.client.keytab.file, points to a valid keytab on the local filesystem + *
    • hbase.client.kerberos.principal, gives the Kerberos principal to use *
    * By this way, caller can directly connect to kerberized cluster without caring login and * credentials renewal logic in application. + * *
      * 
    + * * Similarly, {@link Connection} also returns {@link Admin} and {@link RegionLocator} * implementations. * @see Connection @@ -70,7 +71,8 @@ @InterfaceAudience.Public public class ConnectionFactory { - public static final String HBASE_CLIENT_ASYNC_CONNECTION_IMPL = "hbase.client.async.connection.impl"; + public static final String HBASE_CLIENT_ASYNC_CONNECTION_IMPL = + "hbase.client.async.connection.impl"; /** No public c.tors */ protected ConnectionFactory() { @@ -220,10 +222,10 @@ public static Connection createConnection(Configuration conf, ExecutorService po try { // Default HCM#HCI is not accessible; make it so before invoking. Constructor constructor = - clazz.getDeclaredConstructor(Configuration.class, ExecutorService.class, User.class); + clazz.getDeclaredConstructor(Configuration.class, ExecutorService.class, User.class); constructor.setAccessible(true); return user.runAs((PrivilegedExceptionAction) () -> (Connection) constructor - .newInstance(conf, pool, user)); + .newInstance(conf, pool, user)); } catch (Exception e) { throw new IOException(e); } @@ -278,7 +280,7 @@ public static CompletableFuture createAsyncConnection(Configura * @return AsyncConnection object wrapped by CompletableFuture */ public static CompletableFuture createAsyncConnection(Configuration conf, - final User user) { + final User user) { return TraceUtil.tracedFuture(() -> { CompletableFuture future = new CompletableFuture<>(); ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf); @@ -298,7 +300,7 @@ public static CompletableFuture createAsyncConnection(Configura try { future.complete( user.runAs((PrivilegedExceptionAction) () -> ReflectionUtils - .newInstance(clazz, conf, registry, clusterId, null, user))); + .newInstance(clazz, conf, registry, clusterId, null, user))); } catch (Exception e) { registry.close(); future.completeExceptionally(e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java index e50d308ec46d..0d73c343ec14 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -154,11 +154,11 @@ private ThreadPoolExecutor createThreadPool() { int threads = conf.getInt("hbase.hconnection.threads.max", 256); long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60); BlockingQueue workQueue = - new LinkedBlockingQueue<>(threads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, - HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); + new LinkedBlockingQueue<>(threads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, + HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); ThreadPoolExecutor tpe = new ThreadPoolExecutor(threads, threads, keepAliveTime, - TimeUnit.SECONDS, workQueue, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat(toString() + "-shared-%d").build()); + TimeUnit.SECONDS, workQueue, new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat(toString() + "-shared-%d").build()); tpe.allowCoreThreadTimeOut(true); return tpe; } @@ -188,13 +188,13 @@ public TableBuilder getTableBuilder(TableName tableName, ExecutorService pool) { @Override public Table build() { IOExceptionSupplier poolSupplier = - pool != null ? () -> pool : ConnectionOverAsyncConnection.this::getBatchPool; + pool != null ? () -> pool : ConnectionOverAsyncConnection.this::getBatchPool; return new TableOverAsyncTable(conn, - conn.getTableBuilder(tableName).setRpcTimeout(rpcTimeout, TimeUnit.MILLISECONDS) - .setReadRpcTimeout(readRpcTimeout, TimeUnit.MILLISECONDS) - .setWriteRpcTimeout(writeRpcTimeout, TimeUnit.MILLISECONDS) - .setOperationTimeout(operationTimeout, TimeUnit.MILLISECONDS).build(), - poolSupplier); + conn.getTableBuilder(tableName).setRpcTimeout(rpcTimeout, TimeUnit.MILLISECONDS) + .setReadRpcTimeout(readRpcTimeout, TimeUnit.MILLISECONDS) + .setWriteRpcTimeout(writeRpcTimeout, TimeUnit.MILLISECONDS) + .setOperationTimeout(operationTimeout, TimeUnit.MILLISECONDS).build(), + poolSupplier); } }; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java index 975d8df71808..2ace3959ffa6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java @@ -25,8 +25,8 @@ /** * Registry for meta information needed for connection setup to a HBase cluster. Implementations - * hold cluster information such as this cluster's id, location of hbase:meta, etc.. - * Internal use only. + * hold cluster information such as this cluster's id, location of hbase:meta, etc.. Internal use + * only. */ @InterfaceAudience.Private public interface ConnectionRegistry extends Closeable { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java index 92c075c0f18c..06728574d504 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java @@ -37,8 +37,8 @@ private ConnectionRegistryFactory() { */ static ConnectionRegistry getRegistry(Configuration conf) { Class clazz = - conf.getClass(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, RpcConnectionRegistry.class, - ConnectionRegistry.class); + conf.getClass(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, RpcConnectionRegistry.class, + ConnectionRegistry.class); return ReflectionUtils.newInstance(clazz, conf); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 70312aa4de46..5b6db64169df 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -310,7 +310,7 @@ static boolean noMoreResultsForReverseScan(Scan scan, RegionInfo info) { static CompletableFuture> allOf(List> futures) { return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])) - .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList())); + .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList())); } public static ScanResultCache createScanResultCache(Scan scan) { @@ -458,8 +458,8 @@ static CompletableFuture timelineConsistentRead(AsyncRegionLocator locato (locs, error) -> { if (error != null) { LOG.warn( - "Failed to locate all the replicas for table={}, row='{}', locateType={}" + - " give up timeline consistent read", + "Failed to locate all the replicas for table={}, row='{}', locateType={}" + + " give up timeline consistent read", tableName, Bytes.toStringBinary(row), locateType, error); return; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java index 533bd0f41b6d..45dec17a6958 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; @@ -27,22 +26,18 @@ public enum Consistency { // developer note: Do not reorder. Client.proto#Consistency depends on this order /** - * Strong consistency is the default consistency model in HBase, - * where reads and writes go through a single server which serializes - * the updates, and returns all data that was written and ack'd. + * Strong consistency is the default consistency model in HBase, where reads and writes go through + * a single server which serializes the updates, and returns all data that was written and ack'd. */ STRONG, /** - * Timeline consistent reads might return values that may not see - * the most recent updates. Write transactions are always performed - * in strong consistency model in HBase which guarantees that transactions - * are ordered, and replayed in the same order by all copies of the data. - * In timeline consistency, the get and scan requests can be answered from data - * that may be stale. - *
    - * The client may still observe transactions out of order if the requests are - * responded from different servers. + * Timeline consistent reads might return values that may not see the most recent updates. Write + * transactions are always performed in strong consistency model in HBase which guarantees that + * transactions are ordered, and replayed in the same order by all copies of the data. In timeline + * consistency, the get and scan requests can be answered from data that may be stale.
    + * The client may still observe transactions out of order if the requests are responded from + * different servers. */ TIMELINE, } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java index 56091ff6ec0d..66cafc94cb2a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorBlockingRpcCallback.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java index 72d588bc9763..3331c8107009 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,9 +22,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * CoprocessorDescriptor contains the details about how to build a coprocessor. - * This class is a pojo so there are no checks for the details carried by this class. - * Use {@link CoprocessorDescriptorBuilder} to instantiate a CoprocessorDescriptor + * CoprocessorDescriptor contains the details about how to build a coprocessor. This class is a pojo + * so there are no checks for the details carried by this class. Use + * {@link CoprocessorDescriptorBuilder} to instantiate a CoprocessorDescriptor */ @InterfaceAudience.Public public interface CoprocessorDescriptor { @@ -45,7 +44,7 @@ public interface CoprocessorDescriptor { int getPriority(); /** - * @return Arbitrary key-value parameter pairs passed into the coprocessor. + * @return Arbitrary key-value parameter pairs passed into the coprocessor. */ Map getProperties(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java index 71d1264c0741..1bc64d01fbe1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -80,7 +79,7 @@ private static final class CoprocessorDescriptorImpl implements CoprocessorDescr private final Map properties; private CoprocessorDescriptorImpl(String className, String jarPath, int priority, - Map properties) { + Map properties) { this.className = className; this.jarPath = jarPath; this.priority = priority; @@ -109,10 +108,8 @@ public Map getProperties() { @Override public String toString() { - return "class:" + className - + ", jarPath:" + jarPath - + ", priority:" + priority - + ", properties:" + properties; + return "class:" + className + ", jarPath:" + jarPath + ", priority:" + priority + + ", properties:" + properties; } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java index 837e72d109c2..73e128dfd8f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * Scan cursor to tell client where server is scanning - * {@link Scan#setNeedCursorResult(boolean)} - * {@link Result#isCursor()} - * {@link Result#getCursor()} + * Scan cursor to tell client where server is scanning {@link Scan#setNeedCursorResult(boolean)} + * {@link Result#isCursor()} {@link Result#getCursor()} */ @InterfaceAudience.Public public class Cursor { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java index 2a219f6a39c9..5ec4f14c2c12 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -37,76 +35,68 @@ /** * Used to perform Delete operations on a single row. *

    - * To delete an entire row, instantiate a Delete object with the row - * to delete. To further define the scope of what to delete, perform - * additional methods as outlined below. + * To delete an entire row, instantiate a Delete object with the row to delete. To further define + * the scope of what to delete, perform additional methods as outlined below. + *

    + * To delete specific families, execute {@link #addFamily(byte[]) deleteFamily} for each family to + * delete. *

    - * To delete specific families, execute {@link #addFamily(byte[]) deleteFamily} - * for each family to delete. + * To delete multiple versions of specific columns, execute {@link #addColumns(byte[], byte[]) + * deleteColumns} for each column to delete. *

    - * To delete multiple versions of specific columns, execute - * {@link #addColumns(byte[], byte[]) deleteColumns} - * for each column to delete. + * To delete specific versions of specific columns, execute {@link #addColumn(byte[], byte[], long) + * deleteColumn} for each column version to delete. *

    - * To delete specific versions of specific columns, execute - * {@link #addColumn(byte[], byte[], long) deleteColumn} - * for each column version to delete. + * Specifying timestamps, deleteFamily and deleteColumns will delete all versions with a timestamp + * less than or equal to that passed. If no timestamp is specified, an entry is added with a + * timestamp of 'now' where 'now' is the servers's EnvironmentEdgeManager.currentTime(). Specifying + * a timestamp to the deleteColumn method will delete versions only with a timestamp equal to that + * specified. If no timestamp is passed to deleteColumn, internally, it figures the most recent + * cell's timestamp and adds a delete at that timestamp; i.e. it deletes the most recently added + * cell. *

    - * Specifying timestamps, deleteFamily and deleteColumns will delete all - * versions with a timestamp less than or equal to that passed. If no - * timestamp is specified, an entry is added with a timestamp of 'now' - * where 'now' is the servers's EnvironmentEdgeManager.currentTime(). - * Specifying a timestamp to the deleteColumn method will - * delete versions only with a timestamp equal to that specified. - * If no timestamp is passed to deleteColumn, internally, it figures the - * most recent cell's timestamp and adds a delete at that timestamp; i.e. - * it deletes the most recently added cell. - *

    The timestamp passed to the constructor is used ONLY for delete of - * rows. For anything less -- a deleteColumn, deleteColumns or - * deleteFamily -- then you need to use the method overrides that take a - * timestamp. The constructor timestamp is not referenced. + * The timestamp passed to the constructor is used ONLY for delete of rows. For anything less -- a + * deleteColumn, deleteColumns or deleteFamily -- then you need to use the method overrides that + * take a timestamp. The constructor timestamp is not referenced. */ @InterfaceAudience.Public public class Delete extends Mutation { /** * Create a Delete operation for the specified row. *

    - * If no further operations are done, this will delete everything - * associated with the specified row (all versions of all columns in all - * families), with timestamp from current point in time to the past. - * Cells defining timestamp for a future point in time - * (timestamp > current time) will not be deleted. + * If no further operations are done, this will delete everything associated with the specified + * row (all versions of all columns in all families), with timestamp from current point in time to + * the past. Cells defining timestamp for a future point in time (timestamp > current time) will + * not be deleted. * @param row row key */ - public Delete(byte [] row) { + public Delete(byte[] row) { this(row, HConstants.LATEST_TIMESTAMP); } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. * @param row row key * @param timestamp maximum version timestamp (only for delete row) */ - public Delete(byte [] row, long timestamp) { + public Delete(byte[] row, long timestamp) { this(row, 0, row.length, timestamp); } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. * @param row We make a local copy of this passed in row. * @param rowOffset * @param rowLength @@ -116,14 +106,13 @@ public Delete(final byte[] row, final int rowOffset, final int rowLength) { } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. * @param row We make a local copy of this passed in row. * @param rowOffset * @param rowLength @@ -143,15 +132,14 @@ public Delete(final Delete deleteToCopy) { } /** - * Construct the Delete with user defined data. NOTED: - * 1) all cells in the familyMap must have the delete type. - * see {@link org.apache.hadoop.hbase.Cell.Type} - * 2) the row of each cell must be same with passed row. + * Construct the Delete with user defined data. NOTED: 1) all cells in the familyMap must have the + * delete type. see {@link org.apache.hadoop.hbase.Cell.Type} 2) the row of each cell must be same + * with passed row. * @param row row. CAN'T be null * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Delete(byte[] row, long ts, NavigableMap> familyMap) { + public Delete(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } @@ -169,32 +157,30 @@ public Delete add(Cell cell) throws IOException { /** * Delete all versions of all columns of the specified family. *

    - * Overrides previous calls to deleteColumn and deleteColumns for the - * specified family. + * Overrides previous calls to deleteColumn and deleteColumns for the specified family. * @param family family name * @return this for invocation chaining */ - public Delete addFamily(final byte [] family) { + public Delete addFamily(final byte[] family) { this.addFamily(family, this.ts); return this; } /** - * Delete all columns of the specified family with a timestamp less than - * or equal to the specified timestamp. + * Delete all columns of the specified family with a timestamp less than or equal to the specified + * timestamp. *

    - * Overrides previous calls to deleteColumn and deleteColumns for the - * specified family. + * Overrides previous calls to deleteColumn and deleteColumns for the specified family. * @param family family name * @param timestamp maximum version timestamp * @return this for invocation chaining */ - public Delete addFamily(final byte [] family, final long timestamp) { + public Delete addFamily(final byte[] family, final long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } List list = getCellList(family); - if(!list.isEmpty()) { + if (!list.isEmpty()) { list.clear(); } KeyValue kv = new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamily); @@ -203,16 +189,14 @@ public Delete addFamily(final byte [] family, final long timestamp) { } /** - * Delete all columns of the specified family with a timestamp equal to - * the specified timestamp. + * Delete all columns of the specified family with a timestamp equal to the specified timestamp. * @param family family name * @param timestamp version timestamp * @return this for invocation chaining */ - public Delete addFamilyVersion(final byte [] family, final long timestamp) { + public Delete addFamilyVersion(final byte[] family, final long timestamp) { List list = getCellList(family); - list.add(new KeyValue(row, family, null, timestamp, - KeyValue.Type.DeleteFamilyVersion)); + list.add(new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamilyVersion)); return this; } @@ -222,39 +206,37 @@ public Delete addFamilyVersion(final byte [] family, final long timestamp) { * @param qualifier column qualifier * @return this for invocation chaining */ - public Delete addColumns(final byte [] family, final byte [] qualifier) { + public Delete addColumns(final byte[] family, final byte[] qualifier) { addColumns(family, qualifier, this.ts); return this; } /** - * Delete all versions of the specified column with a timestamp less than - * or equal to the specified timestamp. + * Delete all versions of the specified column with a timestamp less than or equal to the + * specified timestamp. * @param family family name * @param qualifier column qualifier * @param timestamp maximum version timestamp * @return this for invocation chaining */ - public Delete addColumns(final byte [] family, final byte [] qualifier, final long timestamp) { + public Delete addColumns(final byte[] family, final byte[] qualifier, final long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } List list = getCellList(family); - list.add(new KeyValue(this.row, family, qualifier, timestamp, - KeyValue.Type.DeleteColumn)); + list.add(new KeyValue(this.row, family, qualifier, timestamp, KeyValue.Type.DeleteColumn)); return this; } /** - * Delete the latest version of the specified column. - * This is an expensive call in that on the server-side, it first does a - * get to find the latest versions timestamp. Then it adds a delete using - * the fetched cells timestamp. + * Delete the latest version of the specified column. This is an expensive call in that on the + * server-side, it first does a get to find the latest versions timestamp. Then it adds a delete + * using the fetched cells timestamp. * @param family family name * @param qualifier column qualifier * @return this for invocation chaining */ - public Delete addColumn(final byte [] family, final byte [] qualifier) { + public Delete addColumn(final byte[] family, final byte[] qualifier) { this.addColumn(family, qualifier, this.ts); return this; } @@ -266,7 +248,7 @@ public Delete addColumn(final byte [] family, final byte [] qualifier) { * @param timestamp version timestamp * @return this for invocation chaining */ - public Delete addColumn(byte [] family, byte [] qualifier, long timestamp) { + public Delete addColumn(byte[] family, byte[] qualifier, long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java index 9419137842f7..4bc7a76514a2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.DoNotRetryIOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java index aaf0b5cc7320..7ee451b982cc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java @@ -15,22 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * Enum describing the durability guarantees for tables and {@link Mutation}s - * Note that the items must be sorted in order of increasing durability + * Enum describing the durability guarantees for tables and {@link Mutation}s Note that the items + * must be sorted in order of increasing durability */ @InterfaceAudience.Public public enum Durability { /* Developer note: Do not rename the enum field names. They are serialized in HTableDescriptor */ /** - * If this is for tables durability, use HBase's global default value (SYNC_WAL). - * Otherwise, if this is for mutation, use the table's default setting to determine durability. - * This must remain the first option. + * If this is for tables durability, use HBase's global default value (SYNC_WAL). Otherwise, if + * this is for mutation, use the table's default setting to determine durability. This must remain + * the first option. */ USE_DEFAULT, /** @@ -42,15 +41,15 @@ public enum Durability { */ ASYNC_WAL, /** - * Write the Mutation to the WAL synchronously. - * The data is flushed to the filesystem implementation, but not necessarily to disk. - * For HDFS this will flush the data to the designated number of DataNodes. - * See HADOOP-6313 + * Write the Mutation to the WAL synchronously. The data is flushed to the filesystem + * implementation, but not necessarily to disk. For HDFS this will flush the data to the + * designated number of DataNodes. See + * HADOOP-6313 */ SYNC_WAL, /** - * Write the Mutation to the WAL synchronously and force the entries to disk. - * See HADOOP-6313 + * Write the Mutation to the WAL synchronously and force the entries to disk. See + * HADOOP-6313 */ FSYNC_WAL } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index 0f04407ac3e3..09242d3db48e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; - import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -29,37 +27,36 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Used to perform Get operations on a single row. *

    - * To get everything for a row, instantiate a Get object with the row to get. - * To further narrow the scope of what to Get, use the methods below. + * To get everything for a row, instantiate a Get object with the row to get. To further narrow the + * scope of what to Get, use the methods below. *

    - * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} - * for each family to retrieve. + * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} for each + * family to retrieve. *

    - * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} - * for each column to retrieve. + * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} for each column to + * retrieve. *

    - * To only retrieve columns within a specific range of version timestamps, - * execute {@link #setTimeRange(long, long) setTimeRange}. + * To only retrieve columns within a specific range of version timestamps, execute + * {@link #setTimeRange(long, long) setTimeRange}. *

    - * To only retrieve columns with a specific timestamp, execute - * {@link #setTimestamp(long) setTimestamp}. + * To only retrieve columns with a specific timestamp, execute {@link #setTimestamp(long) + * setTimestamp}. *

    - * To limit the number of versions of each column to be returned, execute - * {@link #readVersions(int) readVersions}. + * To limit the number of versions of each column to be returned, execute {@link #readVersions(int) + * readVersions}. *

    * To add a filter, call {@link #setFilter(Filter) setFilter}. */ @@ -67,30 +64,29 @@ public class Get extends Query implements Row { private static final Logger LOG = LoggerFactory.getLogger(Get.class); - private byte [] row = null; + private byte[] row = null; private int maxVersions = 1; private boolean cacheBlocks = true; private int storeLimit = -1; private int storeOffset = 0; private TimeRange tr = TimeRange.allTime(); private boolean checkExistenceOnly = false; - private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * Create a Get operation for the specified row. *

    - * If no further operations are done, this will get the latest version of - * all columns in all families of the specified row. + * If no further operations are done, this will get the latest version of all columns in all + * families of the specified row. * @param row row key */ - public Get(byte [] row) { + public Get(byte[] row) { Mutation.checkRow(row); this.row = row; } /** * Copy-constructor - * * @param get */ public Get(Get get) { @@ -108,8 +104,8 @@ public Get(Get get) { this.checkExistenceOnly = get.isCheckExistenceOnly(); this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); Map> fams = get.getFamilyMap(); - for (Map.Entry> entry : fams.entrySet()) { - byte [] fam = entry.getKey(); + for (Map.Entry> entry : fams.entrySet()) { + byte[] fam = entry.getKey(); NavigableSet cols = entry.getValue(); if (cols != null && cols.size() > 0) { for (byte[] col : cols) { @@ -166,7 +162,7 @@ public Get setCheckExistenceOnly(boolean checkExistenceOnly) { * @param family family name * @return the Get object */ - public Get addFamily(byte [] family) { + public Get addFamily(byte[] family) { familyMap.remove(family); familyMap.put(family, null); return this; @@ -180,9 +176,9 @@ public Get addFamily(byte [] family) { * @param qualifier column qualifier * @return the Get objec */ - public Get addColumn(byte [] family, byte [] qualifier) { - NavigableSet set = familyMap.get(family); - if(set == null) { + public Get addColumn(byte[] family, byte[] qualifier) { + NavigableSet set = familyMap.get(family); + if (set == null) { set = new TreeSet<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, set); } @@ -194,8 +190,7 @@ public Get addColumn(byte [] family, byte [] qualifier) { } /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp). + * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive * @return this for invocation chaining @@ -213,7 +208,7 @@ public Get setTimeRange(long minStamp, long maxStamp) throws IOException { public Get setTimestamp(long timestamp) { try { tr = TimeRange.at(timestamp); - } catch(Exception e) { + } catch (Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); throw e; @@ -286,12 +281,9 @@ public Get setFilter(Filter filter) { /** * Set whether blocks should be cached for this Get. *

    - * This is true by default. When true, default settings of the table and - * family are used (this will never override caching blocks if the block - * cache is disabled for that family or entirely). - * - * @param cacheBlocks if false, default settings are overridden and blocks - * will not be cached + * This is true by default. When true, default settings of the table and family are used (this + * will never override caching blocks if the block cache is disabled for that family or entirely). + * @param cacheBlocks if false, default settings are overridden and blocks will not be cached */ public Get setCacheBlocks(boolean cacheBlocks) { this.cacheBlocks = cacheBlocks; @@ -300,8 +292,7 @@ public Get setCacheBlocks(boolean cacheBlocks) { /** * Get whether blocks should be cached for this Get. - * @return true if default caching should be used, false if blocks should not - * be cached + * @return true if default caching should be used, false if blocks should not be cached */ public boolean getCacheBlocks() { return cacheBlocks; @@ -312,7 +303,7 @@ public boolean getCacheBlocks() { * @return row */ @Override - public byte [] getRow() { + public byte[] getRow() { return this.row; } @@ -325,8 +316,7 @@ public int getMaxVersions() { } /** - * Method for retrieving the get's maximum number of values - * to return per Column Family + * Method for retrieving the get's maximum number of values to return per Column Family * @return the maximum number of values to fetch per CF */ public int getMaxResultsPerColumnFamily() { @@ -334,8 +324,7 @@ public int getMaxResultsPerColumnFamily() { } /** - * Method for retrieving the get's offset per row per column - * family (#kvs to be skipped) + * Method for retrieving the get's offset per row per column family (#kvs to be skipped) * @return the row offset */ public int getRowOffsetPerColumnFamily() { @@ -378,14 +367,13 @@ public boolean hasFamilies() { * Method for retrieving the get's familyMap * @return familyMap */ - public Map> getFamilyMap() { + public Map> getFamilyMap() { return this.familyMap; } /** - * Compile the table and column family (i.e. schema) information - * into a String. Useful for parsing and aggregation by debugging, - * logging, and administration tools. + * Compile the table and column family (i.e. schema) information into a String. Useful for parsing + * and aggregation by debugging, logging, and administration tools. * @return Map */ @Override @@ -393,17 +381,16 @@ public Map getFingerprint() { Map map = new HashMap<>(); List families = new ArrayList<>(this.familyMap.entrySet().size()); map.put("families", families); - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { families.add(Bytes.toStringBinary(entry.getKey())); } return map; } /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. + * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a + * Map along with the fingerprinted information. Useful for debugging, logging, and administration + * tools. * @param maxCols a limit on the number of columns output prior to truncation * @return Map */ @@ -425,11 +412,10 @@ public Map toMap(int maxCols) { map.put("timeRange", timeRange); int colCount = 0; // iterate through affected families and add details - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { List familyList = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), familyList); - if(entry.getValue() == null) { + if (entry.getValue() == null) { colCount++; --maxCols; familyList.add("ALL"); @@ -438,7 +424,7 @@ public Map toMap(int maxCols) { if (maxCols <= 0) { continue; } - for (byte [] column : entry.getValue()) { + for (byte[] column : entry.getValue()) { if (--maxCols <= 0) { continue; } @@ -459,7 +445,7 @@ public Map toMap(int maxCols) { @Override public int hashCode() { - // TODO: This is wrong. Can't have two gets the same just because on same row. But it + // TODO: This is wrong. Can't have two gets the same just because on same row. But it // matches how equals works currently and gets rid of the findbugs warning. return Bytes.hashCode(this.getRow()); } @@ -473,7 +459,7 @@ public boolean equals(Object obj) { return false; } Row other = (Row) obj; - // TODO: This is wrong. Can't have two gets the same just because on same row. + // TODO: This is wrong. Can't have two gets the same just because on same row. return Row.COMPARATOR.compare(this, other) == 0; } @@ -514,7 +500,7 @@ public Get setReplicaId(int Id) { @Override public Get setIsolationLevel(IsolationLevel level) { - return (Get) super.setIsolationLevel(level); + return (Get) super.setIsolationLevel(level); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java index d153ef7dd771..12c4e9458816 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,20 +51,21 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignsResponse; /** - * Use {@link Connection#getHbck()} to obtain an instance of {@link Hbck} instead of - * constructing an HBaseHbck directly. - * - *

    Connection should be an unmanaged connection obtained via - * {@link ConnectionFactory#createConnection(Configuration)}.

    - * - *

    NOTE: The methods in here can do damage to a cluster if applied in the wrong sequence or at - * the wrong time. Use with caution. For experts only. These methods are only for the - * extreme case where the cluster has been damaged or has achieved an inconsistent state because - * of some unforeseen circumstance or bug and requires manual intervention. - * - *

    An instance of this class is lightweight and not-thread safe. A new instance should be created - * by each thread. Pooling or caching of the instance is not recommended.

    - * + * Use {@link Connection#getHbck()} to obtain an instance of {@link Hbck} instead of constructing an + * HBaseHbck directly. + *

    + * Connection should be an unmanaged connection obtained via + * {@link ConnectionFactory#createConnection(Configuration)}. + *

    + *

    + * NOTE: The methods in here can do damage to a cluster if applied in the wrong sequence or at the + * wrong time. Use with caution. For experts only. These methods are only for the extreme case where + * the cluster has been damaged or has achieved an inconsistent state because of some unforeseen + * circumstance or bug and requires manual intervention. + *

    + * An instance of this class is lightweight and not-thread safe. A new instance should be created by + * each thread. Pooling or caching of the instance is not recommended. + *

    * @see ConnectionFactory * @see Hbck */ @@ -102,9 +103,9 @@ public boolean isAborted() { @Override public TableState setTableStateInMeta(TableState state) throws IOException { try { - GetTableStateResponse response = hbck.setTableStateInMeta( - rpcControllerFactory.newController(), - RequestConverter.buildSetTableStateInMetaRequest(state)); + GetTableStateResponse response = + hbck.setTableStateInMeta(rpcControllerFactory.newController(), + RequestConverter.buildSetTableStateInMetaRequest(state)); return TableState.convert(state.getTableName(), response.getTableState()); } catch (ServiceException se) { LOG.debug("table={}, state={}", state.getTableName(), state.getState(), se); @@ -114,14 +115,14 @@ public TableState setTableStateInMeta(TableState state) throws IOException { @Override public Map setRegionStateInMeta( - Map nameOrEncodedName2State) throws IOException { + Map nameOrEncodedName2State) throws IOException { try { if (LOG.isDebugEnabled()) { nameOrEncodedName2State.forEach((k, v) -> LOG.debug("region={}, state={}", k, v)); } MasterProtos.SetRegionStateInMetaResponse response = - hbck.setRegionStateInMeta(rpcControllerFactory.newController(), - RequestConverter.buildSetRegionStateInMetaRequest(nameOrEncodedName2State)); + hbck.setRegionStateInMeta(rpcControllerFactory.newController(), + RequestConverter.buildSetRegionStateInMetaRequest(nameOrEncodedName2State)); Map result = new HashMap<>(); for (RegionSpecifierAndState nameAndState : response.getStatesList()) { result.put(nameAndState.getRegionSpecifier().getValue().toStringUtf8(), @@ -134,11 +135,10 @@ public Map setRegionStateInMeta( } @Override - public List assigns(List encodedRegionNames, boolean override) - throws IOException { + public List assigns(List encodedRegionNames, boolean override) throws IOException { try { AssignsResponse response = this.hbck.assigns(rpcControllerFactory.newController(), - RequestConverter.toAssignRegionsRequest(encodedRegionNames, override)); + RequestConverter.toAssignRegionsRequest(encodedRegionNames, override)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString(encodedRegionNames), se); @@ -151,7 +151,7 @@ public List unassigns(List encodedRegionNames, boolean override) throws IOException { try { UnassignsResponse response = this.hbck.unassigns(rpcControllerFactory.newController(), - RequestConverter.toUnassignRegionsRequest(encodedRegionNames, override)); + RequestConverter.toUnassignRegionsRequest(encodedRegionNames, override)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString(encodedRegionNames), se); @@ -165,29 +165,25 @@ private static String toCommaDelimitedString(List list) { @Override public List bypassProcedure(List pids, long waitTime, boolean override, - boolean recursive) - throws IOException { - BypassProcedureResponse response = ProtobufUtil.call( - new Callable() { - @Override - public BypassProcedureResponse call() throws Exception { - try { - return hbck.bypassProcedure(rpcControllerFactory.newController(), - BypassProcedureRequest.newBuilder().addAllProcId(pids). - setWaitTime(waitTime).setOverride(override).setRecursive(recursive).build()); - } catch (Throwable t) { - LOG.error(pids.stream().map(i -> i.toString()). - collect(Collectors.joining(", ")), t); - throw t; - } - } - }); + boolean recursive) throws IOException { + BypassProcedureResponse response = ProtobufUtil.call(new Callable() { + @Override + public BypassProcedureResponse call() throws Exception { + try { + return hbck.bypassProcedure(rpcControllerFactory.newController(), + BypassProcedureRequest.newBuilder().addAllProcId(pids).setWaitTime(waitTime) + .setOverride(override).setRecursive(recursive).build()); + } catch (Throwable t) { + LOG.error(pids.stream().map(i -> i.toString()).collect(Collectors.joining(", ")), t); + throw t; + } + } + }); return response.getBypassedList(); } @Override - public List scheduleServerCrashProcedures(List serverNames) - throws IOException { + public List scheduleServerCrashProcedures(List serverNames) throws IOException { try { ScheduleServerCrashProcedureResponse response = this.hbck.scheduleServerCrashProcedure(rpcControllerFactory.newController(), @@ -206,9 +202,8 @@ public List scheduleServerCrashProcedures(List serverNames) public List scheduleSCPsForUnknownServers() throws IOException { try { ScheduleSCPsForUnknownServersResponse response = - this.hbck.scheduleSCPsForUnknownServers( - rpcControllerFactory.newController(), - ScheduleSCPsForUnknownServersRequest.newBuilder().build()); + this.hbck.scheduleSCPsForUnknownServers(rpcControllerFactory.newController(), + ScheduleSCPsForUnknownServersRequest.newBuilder().build()); return response.getPidList(); } catch (ServiceException se) { LOG.debug("Failed to run ServerCrashProcedures for unknown servers", se); @@ -220,7 +215,7 @@ public List scheduleSCPsForUnknownServers() throws IOException { public boolean runHbckChore() throws IOException { try { RunHbckChoreResponse response = this.hbck.runHbckChore(rpcControllerFactory.newController(), - RunHbckChoreRequest.newBuilder().build()); + RunHbckChoreRequest.newBuilder().build()); return response.getRan(); } catch (ServiceException se) { LOG.debug("Failed to run HBCK chore", se); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java index 7e9a519b95f1..d9769742ba40 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java @@ -30,19 +30,19 @@ /** * Hbck fixup tool APIs. Obtain an instance from {@link Connection#getHbck()} and call * {@link #close()} when done. - *

    WARNING: the below methods can damage the cluster. It may leave the cluster in an - * indeterminate state, e.g. region not assigned, or some hdfs files left behind. After running - * any of the below, operators may have to do some clean up on hdfs or schedule some assign - * procedures to get regions back online. DO AT YOUR OWN RISK. For experienced users only. - * + *

    + * WARNING: the below methods can damage the cluster. It may leave the cluster in an indeterminate + * state, e.g. region not assigned, or some hdfs files left behind. After running any of the below, + * operators may have to do some clean up on hdfs or schedule some assign procedures to get regions + * back online. DO AT YOUR OWN RISK. For experienced users only. * @see ConnectionFactory * @since 2.0.2, 2.1.1 */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) public interface Hbck extends Abortable, Closeable { /** - * Update table state in Meta only. No procedures are submitted to open/assign or - * close/unassign regions of the table. + * Update table state in Meta only. No procedures are submitted to open/assign or close/unassign + * regions of the table. * @param state table state * @return previous state of the table in Meta */ @@ -54,21 +54,21 @@ public interface Hbck extends Abortable, Closeable { * @param nameOrEncodedName2State list of all region states to be updated in meta * @return previous state of the region in Meta */ - Map - setRegionStateInMeta(Map nameOrEncodedName2State) throws IOException; + Map setRegionStateInMeta( + Map nameOrEncodedName2State) throws IOException; /** - * Like {@link Admin#assign(byte[])} but 'raw' in that it can do more than one Region at a time - * -- good if many Regions to online -- and it will schedule the assigns even in the case where + * Like {@link Admin#assign(byte[])} but 'raw' in that it can do more than one Region at a time -- + * good if many Regions to online -- and it will schedule the assigns even in the case where * Master is initializing (as long as the ProcedureExecutor is up). Does NOT call Coprocessor * hooks. * @param override You need to add the override for case where a region has previously been - * bypassed. When a Procedure has been bypassed, a Procedure will have completed - * but no other Procedure will be able to make progress on the target entity - * (intentionally). This override flag will override this fencing mechanism. - * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding - * for hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an - * example of what a random user-space encoded Region name looks like. + * bypassed. When a Procedure has been bypassed, a Procedure will have completed but no + * other Procedure will be able to make progress on the target entity (intentionally). + * This override flag will override this fencing mechanism. + * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding for + * hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an example of what a random + * user-space encoded Region name looks like. */ List assigns(List encodedRegionNames, boolean override) throws IOException; @@ -82,12 +82,12 @@ default List assigns(List encodedRegionNames) throws IOException { * case where Master is initializing (as long as the ProcedureExecutor is up). Does NOT call * Coprocessor hooks. * @param override You need to add the override for case where a region has previously been - * bypassed. When a Procedure has been bypassed, a Procedure will have completed - * but no other Procedure will be able to make progress on the target entity - * (intentionally). This override flag will override this fencing mechanism. - * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding - * for hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an - * example of what a random user-space encoded Region name looks like. + * bypassed. When a Procedure has been bypassed, a Procedure will have completed but no + * other Procedure will be able to make progress on the target entity (intentionally). + * This override flag will override this fencing mechanism. + * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding for + * hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an example of what a random + * user-space encoded Region name looks like. */ List unassigns(List encodedRegionNames, boolean override) throws IOException; @@ -96,17 +96,16 @@ default List unassigns(List encodedRegionNames) throws IOException } /** - * Bypass specified procedure and move it to completion. Procedure is marked completed but - * no actual work is done from the current state/step onwards. Parents of the procedure are - * also marked for bypass. - * + * Bypass specified procedure and move it to completion. Procedure is marked completed but no + * actual work is done from the current state/step onwards. Parents of the procedure are also + * marked for bypass. * @param pids of procedures to complete. * @param waitTime wait time in ms for acquiring lock for a procedure * @param override if override set to true, we will bypass the procedure even if it is executing. - * This is for procedures which can't break out during execution (bugs?). - * @param recursive If set, if a parent procedure, we will find and bypass children and then - * the parent procedure (Dangerous but useful in case where child procedure has been 'lost'). - * Does not always work. Experimental. + * This is for procedures which can't break out during execution (bugs?). + * @param recursive If set, if a parent procedure, we will find and bypass children and then the + * parent procedure (Dangerous but useful in case where child procedure has been 'lost'). + * Does not always work. Experimental. * @return true if procedure is marked for bypass successfully, false otherwise */ List bypassProcedure(List pids, long waitTime, boolean override, boolean recursive) @@ -118,7 +117,6 @@ List bypassProcedure(List pids, long waitTime, boolean override, /** * Request HBCK chore to run at master side. - * * @return true if HBCK chore ran, false if HBCK chore already running * @throws IOException if a remote or network exception occurs */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java index 01ec316c798a..4d0d4d9a2b4f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -41,7 +39,6 @@ public final class ImmutableScan extends Scan { /** * Create Immutable instance of Scan from given Scan object - * * @param scan Copy all values from Scan */ public ImmutableScan(Scan scan) { @@ -71,7 +68,7 @@ public Scan setTimestamp(long timestamp) { @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setColumnFamilyTimeRange"); + "ImmutableScan does not allow access to setColumnFamilyTimeRange"); } @Override @@ -97,13 +94,13 @@ public Scan withStopRow(byte[] stopRow, boolean inclusive) { @Override public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setStartStopRowForPrefixScan"); + "ImmutableScan does not allow access to setStartStopRowForPrefixScan"); } @Override public Scan readAllVersions() { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to readAllVersions"); + "ImmutableScan does not allow access to readAllVersions"); } @Override @@ -119,13 +116,13 @@ public Scan setBatch(int batch) { @Override public Scan setMaxResultsPerColumnFamily(int limit) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setMaxResultsPerColumnFamily"); + "ImmutableScan does not allow access to setMaxResultsPerColumnFamily"); } @Override public Scan setRowOffsetPerColumnFamily(int offset) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setRowOffsetPerColumnFamily"); + "ImmutableScan does not allow access to setRowOffsetPerColumnFamily"); } @Override @@ -136,7 +133,7 @@ public Scan setCaching(int caching) { @Override public Scan setMaxResultSize(long maxResultSize) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setMaxResultSize"); + "ImmutableScan does not allow access to setMaxResultSize"); } @Override @@ -152,7 +149,7 @@ public Scan setFamilyMap(Map> familyMap) { @Override public Scan setCacheBlocks(boolean cacheBlocks) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setCacheBlocks"); + "ImmutableScan does not allow access to setCacheBlocks"); } @Override @@ -163,13 +160,13 @@ public Scan setReversed(boolean reversed) { @Override public Scan setAllowPartialResults(final boolean allowPartialResults) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setAllowPartialResults"); + "ImmutableScan does not allow access to setAllowPartialResults"); } @Override public Scan setLoadColumnFamiliesOnDemand(boolean value) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setLoadColumnFamiliesOnDemand"); + "ImmutableScan does not allow access to setLoadColumnFamiliesOnDemand"); } @Override @@ -190,7 +187,7 @@ public Scan setId(String id) { @Override public Scan setAuthorizations(Authorizations authorizations) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setAuthorizations"); + "ImmutableScan does not allow access to setAuthorizations"); } @Override @@ -206,7 +203,7 @@ public Scan setACL(String user, Permission perms) { @Override public Scan setConsistency(Consistency consistency) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setConsistency"); + "ImmutableScan does not allow access to setConsistency"); } @Override @@ -217,7 +214,7 @@ public Scan setReplicaId(int id) { @Override public Scan setIsolationLevel(IsolationLevel level) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setIsolationLevel"); + "ImmutableScan does not allow access to setIsolationLevel"); } @Override @@ -228,14 +225,14 @@ public Scan setPriority(int priority) { @Override public Scan setScanMetricsEnabled(final boolean enabled) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setScanMetricsEnabled"); + "ImmutableScan does not allow access to setScanMetricsEnabled"); } @Override @Deprecated public Scan setAsyncPrefetch(boolean asyncPrefetch) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setAsyncPrefetch"); + "ImmutableScan does not allow access to setAsyncPrefetch"); } @Override @@ -246,7 +243,7 @@ public Scan setLimit(int limit) { @Override public Scan setOneRowLimit() { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setOneRowLimit"); + "ImmutableScan does not allow access to setOneRowLimit"); } @Override @@ -257,19 +254,19 @@ public Scan setReadType(ReadType readType) { @Override Scan setMvccReadPoint(long mvccReadPoint) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setMvccReadPoint"); + "ImmutableScan does not allow access to setMvccReadPoint"); } @Override Scan resetMvccReadPoint() { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to resetMvccReadPoint"); + "ImmutableScan does not allow access to resetMvccReadPoint"); } @Override public Scan setNeedCursorResult(boolean needCursorResult) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setNeedCursorResult"); + "ImmutableScan does not allow access to setNeedCursorResult"); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index bd824d4a855f..71c2e0386412 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,12 +38,12 @@ /** * Used to perform Increment operations on a single row. *

    - * This operation ensures atomicity to readers. Increments are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. + * This operation ensures atomicity to readers. Increments are done under a single row lock, so + * write operations to a row are synchronized, and readers are guaranteed to see this operation + * fully completed. *

    - * To increment columns of a row, instantiate an Increment object with the row - * to increment. At least one column to increment must be specified using the + * To increment columns of a row, instantiate an Increment object with the row to increment. At + * least one column to increment must be specified using the * {@link #addColumn(byte[], byte[], long)} method. */ @InterfaceAudience.Public @@ -58,7 +57,7 @@ public class Increment extends Mutation { * At least one column must be incremented. * @param row row key (we will make a copy of this). */ - public Increment(byte [] row) { + public Increment(byte[] row) { this(row, 0, row.length); } @@ -68,10 +67,11 @@ public Increment(byte [] row) { * At least one column must be incremented. * @param row row key (we will make a copy of this). */ - public Increment(final byte [] row, final int offset, final int length) { + public Increment(final byte[] row, final int offset, final int length) { checkRow(row, offset, length); this.row = Bytes.copy(row, offset, length); } + /** * Copy constructor * @param incrementToCopy increment to copy @@ -82,14 +82,13 @@ public Increment(Increment incrementToCopy) { } /** - * Construct the Increment with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. + * Construct the Increment with user defined data. NOTED: 1) all cells in the familyMap must have + * the Type.Put 2) the row of each cell must be same with passed row. * @param row row. CAN'T be null * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Increment(byte[] row, long ts, NavigableMap> familyMap) { + public Increment(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } @@ -99,14 +98,14 @@ public Increment(byte[] row, long ts, NavigableMap> familyMa * @return this * @throws java.io.IOException e */ - public Increment add(Cell cell) throws IOException{ + public Increment add(Cell cell) throws IOException { super.add(cell); return this; } /** - * Increment the column from the specific family with the specified qualifier - * by the specified amount. + * Increment the column from the specific family with the specified qualifier by the specified + * amount. *

    * Overrides previous calls to addColumn for this family and qualifier. * @param family family name @@ -114,7 +113,7 @@ public Increment add(Cell cell) throws IOException{ * @param amount amount to increment by * @return the Increment object */ - public Increment addColumn(byte [] family, byte [] qualifier, long amount) { + public Increment addColumn(byte[] family, byte[] qualifier, long amount) { if (family == null) { throw new IllegalArgumentException("family cannot be null"); } @@ -135,12 +134,11 @@ public TimeRange getTimeRange() { /** * Sets the TimeRange to be used on the Get for this increment. *

    - * This is useful for when you have counters that only last for specific - * periods of time (ie. counters that are partitioned by time). By setting - * the range of valid times for this increment, you can potentially gain - * some performance with a more optimal Get operation. - * Be careful adding the time range to this class as you will update the old cell if the - * time range doesn't include the latest cells. + * This is useful for when you have counters that only last for specific periods of time (ie. + * counters that are partitioned by time). By setting the range of valid times for this increment, + * you can potentially gain some performance with a more optimal Get operation. Be careful adding + * the time range to this class as you will update the old cell if the time range doesn't include + * the latest cells. *

    * This range is used as [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive @@ -161,8 +159,8 @@ public Increment setTimestamp(long timestamp) { /** * @param returnResults True (default) if the increment operation should return the results. A - * client that is not interested in the result can save network bandwidth setting this - * to false. + * client that is not interested in the result can save network bandwidth setting this to + * false. */ @Override public Increment setReturnResults(boolean returnResults) { @@ -197,21 +195,20 @@ public boolean hasFamilies() { } /** - * Before 0.95, when you called Increment#getFamilyMap(), you got back - * a map of families to a list of Longs. Now, {@link #getFamilyCellMap()} returns - * families by list of Cells. This method has been added so you can have the - * old behavior. + * Before 0.95, when you called Increment#getFamilyMap(), you got back a map of families to a list + * of Longs. Now, {@link #getFamilyCellMap()} returns families by list of Cells. This method has + * been added so you can have the old behavior. * @return Map of families to a Map of qualifiers and their Long increments. * @since 0.95.0 */ - public Map> getFamilyMapOfLongs() { + public Map> getFamilyMapOfLongs() { NavigableMap> map = super.getFamilyCellMap(); - Map> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (Map.Entry> entry: map.entrySet()) { - NavigableMap longs = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (Cell cell: entry.getValue()) { + Map> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Map.Entry> entry : map.entrySet()) { + NavigableMap longs = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Cell cell : entry.getValue()) { longs.put(CellUtil.cloneQualifier(cell), - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } results.put(entry.getKey(), longs); } @@ -226,14 +223,14 @@ public String toString() { StringBuilder sb = new StringBuilder(); sb.append("row="); sb.append(Bytes.toStringBinary(this.row)); - if(this.familyMap.isEmpty()) { + if (this.familyMap.isEmpty()) { sb.append(", no columns set to be incremented"); return sb.toString(); } sb.append(", families="); boolean moreThanOne = false; - for(Map.Entry> entry: this.familyMap.entrySet()) { - if(moreThanOne) { + for (Map.Entry> entry : this.familyMap.entrySet()) { + if (moreThanOne) { sb.append("), "); } else { moreThanOne = true; @@ -242,19 +239,19 @@ public String toString() { sb.append("(family="); sb.append(Bytes.toString(entry.getKey())); sb.append(", columns="); - if(entry.getValue() == null) { + if (entry.getValue() == null) { sb.append("NONE"); } else { sb.append("{"); boolean moreThanOneB = false; - for(Cell cell : entry.getValue()) { - if(moreThanOneB) { + for (Cell cell : entry.getValue()) { + if (moreThanOneB) { sb.append(", "); } else { moreThanOneB = true; } - sb.append(CellUtil.getCellKeyAsString(cell) + "+=" + - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + sb.append(CellUtil.getCellKeyAsString(cell) + "+=" + + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } sb.append("}"); } @@ -264,7 +261,7 @@ public String toString() { } @Override - protected long extraHeapSize(){ + protected long extraHeapSize() { return HEAP_OVERHEAD; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java index ba7609087001..f77a13b2406b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; @@ -25,29 +22,27 @@ /** * Specify Isolation levels in Scan operations. *

    - * There are two isolation levels. A READ_COMMITTED isolation level - * indicates that only data that is committed be returned in a scan. - * An isolation level of READ_UNCOMMITTED indicates that a scan - * should return data that is being modified by transactions that might - * not have been committed yet. + * There are two isolation levels. A READ_COMMITTED isolation level indicates that only data that is + * committed be returned in a scan. An isolation level of READ_UNCOMMITTED indicates that a scan + * should return data that is being modified by transactions that might not have been committed yet. */ @InterfaceAudience.Public public enum IsolationLevel { - READ_COMMITTED(1), - READ_UNCOMMITTED(2); + READ_COMMITTED(1), READ_UNCOMMITTED(2); - IsolationLevel(int value) {} + IsolationLevel(int value) { + } - public byte [] toBytes() { - return new byte [] { toByte() }; + public byte[] toBytes() { + return new byte[] { toByte() }; } public byte toByte() { - return (byte)this.ordinal(); + return (byte) this.ordinal(); } - public static IsolationLevel fromBytes(byte [] bytes) { + public static IsolationLevel fromBytes(byte[] bytes) { return IsolationLevel.fromByte(bytes[0]); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java index 41f79cf8e813..807c7f1f435d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Abstract response class representing online logs response from ring-buffer use-cases - * e.g slow/large RPC logs, balancer decision logs + * Abstract response class representing online logs response from ring-buffer use-cases e.g + * slow/large RPC logs, balancer decision logs */ @InterfaceAudience.Public @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java index 506fc4f76521..74db8afe4628 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.commons.lang3.builder.EqualsBuilder; @@ -26,8 +24,8 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Slow/Large Log Query Filter with all filter and limit parameters - * Extends generic LogRequest used by Admin API getLogEntries + * Slow/Large Log Query Filter with all filter and limit parameters Extends generic LogRequest used + * by Admin API getLogEntries * @deprecated as of 2.4.0. Will be removed in 4.0.0. */ @InterfaceAudience.Public @@ -44,13 +42,11 @@ public class LogQueryFilter { private FilterByOperator filterByOperator = FilterByOperator.OR; public enum Type { - SLOW_LOG, - LARGE_LOG + SLOW_LOG, LARGE_LOG } public enum FilterByOperator { - AND, - OR + AND, OR } public String getRegionName() { @@ -121,41 +117,24 @@ public boolean equals(Object o) { LogQueryFilter that = (LogQueryFilter) o; - return new EqualsBuilder() - .append(limit, that.limit) - .append(regionName, that.regionName) - .append(clientAddress, that.clientAddress) - .append(tableName, that.tableName) - .append(userName, that.userName) - .append(type, that.type) - .append(filterByOperator, that.filterByOperator) - .isEquals(); + return new EqualsBuilder().append(limit, that.limit).append(regionName, that.regionName) + .append(clientAddress, that.clientAddress).append(tableName, that.tableName) + .append(userName, that.userName).append(type, that.type) + .append(filterByOperator, that.filterByOperator).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(regionName) - .append(clientAddress) - .append(tableName) - .append(userName) - .append(limit) - .append(type) - .append(filterByOperator) - .toHashCode(); + return new HashCodeBuilder(17, 37).append(regionName).append(clientAddress).append(tableName) + .append(userName).append(limit).append(type).append(filterByOperator).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this) - .append("regionName", regionName) - .append("clientAddress", clientAddress) - .append("tableName", tableName) - .append("userName", userName) - .append("limit", limit) - .append("type", type) - .append("filterByOperator", filterByOperator) - .toString(); + return new ToStringBuilder(this).append("regionName", regionName) + .append("clientAddress", clientAddress).append("tableName", tableName) + .append("userName", userName).append("limit", limit).append("type", type) + .append("filterByOperator", filterByOperator).toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java index d85971bdb057..7ff1323c5cee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,9 +53,7 @@ private CompletableFuture rpcCall(MethodDescriptor method, Message requ CompletableFuture future = new CompletableFuture<>(); CoprocessorServiceRequest csr = CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); - stub.execMasterService( - controller, - csr, + stub.execMasterService(controller, csr, new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback() { @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java index 05773d0b4195..1d8bdd70f646 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java @@ -57,16 +57,16 @@ public class MasterRegistry extends AbstractRpcBasedConnectionRegistry { /** Configuration key that controls the fan out of requests **/ public static final String MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY = - "hbase.client.master_registry.hedged.fanout"; + "hbase.client.master_registry.hedged.fanout"; public static final String MASTER_REGISTRY_INITIAL_REFRESH_DELAY_SECS = - "hbase.client.master_registry.initial_refresh_delay_secs"; + "hbase.client.master_registry.initial_refresh_delay_secs"; public static final String MASTER_REGISTRY_PERIODIC_REFRESH_INTERVAL_SECS = - "hbase.client.master_registry.refresh_interval_secs"; + "hbase.client.master_registry.refresh_interval_secs"; public static final String MASTER_REGISTRY_MIN_SECS_BETWEEN_REFRESHES = - "hbase.client.master_registry.min_secs_between_refreshes"; + "hbase.client.master_registry.min_secs_between_refreshes"; private static final String MASTER_ADDRS_CONF_SEPARATOR = ","; @@ -80,7 +80,7 @@ public static Set parseMasterAddrs(Configuration conf) throws Unknow String configuredMasters = getMasterAddr(conf); for (String masterAddr : configuredMasters.split(MASTER_ADDRS_CONF_SEPARATOR)) { HostAndPort masterHostPort = - HostAndPort.fromString(masterAddr.trim()).withDefaultPort(HConstants.DEFAULT_MASTER_PORT); + HostAndPort.fromString(masterAddr.trim()).withDefaultPort(HConstants.DEFAULT_MASTER_PORT); masterAddrs.add(ServerName.valueOf(masterHostPort.toString(), ServerName.NON_STARTCODE)); } Preconditions.checkArgument(!masterAddrs.isEmpty(), "At least one master address is needed"); @@ -91,7 +91,7 @@ public static Set parseMasterAddrs(Configuration conf) throws Unknow MasterRegistry(Configuration conf) throws IOException { super(conf, MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY, MASTER_REGISTRY_INITIAL_REFRESH_DELAY_SECS, - MASTER_REGISTRY_PERIODIC_REFRESH_INTERVAL_SECS, MASTER_REGISTRY_MIN_SECS_BETWEEN_REFRESHES); + MASTER_REGISTRY_PERIODIC_REFRESH_INTERVAL_SECS, MASTER_REGISTRY_MIN_SECS_BETWEEN_REFRESHES); connectionString = getConnectionString(conf); } @@ -131,16 +131,16 @@ public static String getMasterAddr(Configuration conf) throws UnknownHostExcepti private static Set transformServerNames(GetMastersResponse resp) { return resp.getMasterServersList().stream() - .map(s -> ProtobufUtil.toServerName(s.getServerName())).collect(Collectors.toSet()); + .map(s -> ProtobufUtil.toServerName(s.getServerName())).collect(Collectors.toSet()); } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/(.*/MasterRegistry.java|src/test/.*)") + allowedOnPath = ".*/(.*/MasterRegistry.java|src/test/.*)") CompletableFuture> getMasters() { return this - . call( - (c, s, d) -> s.getMasters(c, GetMastersRequest.getDefaultInstance(), d), - r -> r.getMasterServersCount() != 0, "getMasters()") - .thenApply(MasterRegistry::transformServerNames); + . call( + (c, s, d) -> s.getMasters(c, GetMastersRequest.getDefaultInstance(), d), + r -> r.getMasterServersCount() != 0, "getMasters()") + .thenApply(MasterRegistry::transformServerNames); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java index 6d4b85cfc51e..127c22bd36af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,12 +16,13 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import org.apache.yetus.audience.InterfaceAudience; + /** * Represents the master switch type */ @InterfaceAudience.Public public enum MasterSwitchType { - SPLIT, - MERGE + SPLIT, MERGE } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java index 9db8b6090e10..7bb89c650b64 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.RatioGauge; import com.codahale.metrics.Timer; - import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; @@ -34,20 +33,20 @@ import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; -import org.apache.hadoop.hbase.util.Bytes; /** - * This class is for maintaining the various connection statistics and publishing them through - * the metrics interfaces. - * - * This class manages its own {@link MetricRegistry} and {@link JmxReporter} so as to not - * conflict with other uses of Yammer Metrics within the client application. Instantiating + * This class is for maintaining the various connection statistics and publishing them through the + * metrics interfaces. This class manages its own {@link MetricRegistry} and {@link JmxReporter} so + * as to not conflict with other uses of Yammer Metrics within the client application. Instantiating * this class implicitly creates and "starts" instances of these classes; be sure to call * {@link #shutdown()} to terminate the thread pools they allocate. */ @@ -139,12 +138,10 @@ private CallTracker(MetricRegistry registry, String name, String subName, String sb.append("(").append(subName).append(")"); } this.name = sb.toString(); - this.callTimer = registry.timer(name(MetricsConnection.class, - DRTN_BASE + this.name, scope)); - this.reqHist = registry.histogram(name(MetricsConnection.class, - REQ_BASE + this.name, scope)); - this.respHist = registry.histogram(name(MetricsConnection.class, - RESP_BASE + this.name, scope)); + this.callTimer = registry.timer(name(MetricsConnection.class, DRTN_BASE + this.name, scope)); + this.reqHist = registry.histogram(name(MetricsConnection.class, REQ_BASE + this.name, scope)); + this.respHist = + registry.histogram(name(MetricsConnection.class, RESP_BASE + this.name, scope)); } private CallTracker(MetricRegistry registry, String name, String scope) { @@ -170,10 +167,10 @@ protected static class RegionStats { public RegionStats(MetricRegistry registry, String name) { this.name = name; - this.memstoreLoadHist = registry.histogram(name(MetricsConnection.class, - MEMLOAD_BASE + this.name)); - this.heapOccupancyHist = registry.histogram(name(MetricsConnection.class, - HEAP_BASE + this.name)); + this.memstoreLoadHist = + registry.histogram(name(MetricsConnection.class, MEMLOAD_BASE + this.name)); + this.heapOccupancyHist = + registry.histogram(name(MetricsConnection.class, HEAP_BASE + this.name)); } public void update(RegionLoadStats regionStatistics) { @@ -188,12 +185,10 @@ protected static class RunnerStats { final Histogram delayIntevalHist; public RunnerStats(MetricRegistry registry) { - this.normalRunners = registry.counter( - name(MetricsConnection.class, "normalRunnersCount")); - this.delayRunners = registry.counter( - name(MetricsConnection.class, "delayRunnersCount")); - this.delayIntevalHist = registry.histogram( - name(MetricsConnection.class, "delayIntervalHist")); + this.normalRunners = registry.counter(name(MetricsConnection.class, "normalRunnersCount")); + this.delayRunners = registry.counter(name(MetricsConnection.class, "delayRunnersCount")); + this.delayIntevalHist = + registry.histogram(name(MetricsConnection.class, "delayIntervalHist")); } public void incrNormalRunners() { @@ -209,11 +204,10 @@ public void updateDelayInterval(long interval) { } } - protected ConcurrentHashMap> serverStats - = new ConcurrentHashMap<>(); + protected ConcurrentHashMap> serverStats = + new ConcurrentHashMap<>(); - public void updateServerStats(ServerName serverName, byte[] regionName, - Object r) { + public void updateServerStats(ServerName serverName, byte[] regionName, Object r) { if (!(r instanceof Result)) { return; } @@ -254,19 +248,22 @@ private static interface NewMetric { private final String scope; private final NewMetric timerFactory = new NewMetric() { - @Override public Timer newMetric(Class clazz, String name, String scope) { + @Override + public Timer newMetric(Class clazz, String name, String scope) { return registry.timer(name(clazz, name, scope)); } }; private final NewMetric histogramFactory = new NewMetric() { - @Override public Histogram newMetric(Class clazz, String name, String scope) { + @Override + public Histogram newMetric(Class clazz, String name, String scope) { return registry.histogram(name(clazz, name, scope)); } }; private final NewMetric counterFactory = new NewMetric() { - @Override public Counter newMetric(Class clazz, String name, String scope) { + @Override + public Counter newMetric(Class clazz, String name, String scope) { return registry.counter(name(clazz, name, scope)); } }; @@ -299,46 +296,43 @@ private static interface NewMetric { // a big improvement over calling registry.newMetric each time. protected final ConcurrentMap rpcTimers = new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); - protected final ConcurrentMap rpcHistograms = - new ConcurrentHashMap<>(CAPACITY * 2 /* tracking both request and response sizes */, - LOAD_FACTOR, CONCURRENCY_LEVEL); + protected final ConcurrentMap rpcHistograms = new ConcurrentHashMap<>( + CAPACITY * 2 /* tracking both request and response sizes */, LOAD_FACTOR, CONCURRENCY_LEVEL); private final ConcurrentMap cacheDroppingExceptions = - new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); - protected final ConcurrentMap rpcCounters = + new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); + protected final ConcurrentMap rpcCounters = new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); MetricsConnection(String scope, Supplier batchPool, Supplier metaPool) { this.scope = scope; this.registry = new MetricRegistry(); - this.registry.register(getExecutorPoolName(), - new RatioGauge() { - @Override - protected Ratio getRatio() { - ThreadPoolExecutor pool = batchPool.get(); - if (pool == null) { - return Ratio.of(0, 0); - } - return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); - } - }); - this.registry.register(getMetaPoolName(), - new RatioGauge() { - @Override - protected Ratio getRatio() { - ThreadPoolExecutor pool = metaPool.get(); - if (pool == null) { - return Ratio.of(0, 0); - } - return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); - } - }); + this.registry.register(getExecutorPoolName(), new RatioGauge() { + @Override + protected Ratio getRatio() { + ThreadPoolExecutor pool = batchPool.get(); + if (pool == null) { + return Ratio.of(0, 0); + } + return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); + } + }); + this.registry.register(getMetaPoolName(), new RatioGauge() { + @Override + protected Ratio getRatio() { + ThreadPoolExecutor pool = metaPool.get(); + if (pool == null) { + return Ratio.of(0, 0); + } + return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); + } + }); this.metaCacheHits = registry.counter(name(this.getClass(), "metaCacheHits", scope)); this.metaCacheMisses = registry.counter(name(this.getClass(), "metaCacheMisses", scope)); - this.metaCacheNumClearServer = registry.counter(name(this.getClass(), - "metaCacheNumClearServer", scope)); - this.metaCacheNumClearRegion = registry.counter(name(this.getClass(), - "metaCacheNumClearRegion", scope)); + this.metaCacheNumClearServer = + registry.counter(name(this.getClass(), "metaCacheNumClearServer", scope)); + this.metaCacheNumClearRegion = + registry.counter(name(this.getClass(), "metaCacheNumClearRegion", scope)); this.hedgedReadOps = registry.counter(name(this.getClass(), "hedgedReadOps", scope)); this.hedgedReadWin = registry.counter(name(this.getClass(), "hedgedReadWin", scope)); this.getTracker = new CallTracker(this.registry, "Get", scope); @@ -349,10 +343,10 @@ protected Ratio getRatio() { this.putTracker = new CallTracker(this.registry, "Mutate", "Put", scope); this.multiTracker = new CallTracker(this.registry, "Multi", scope); this.runnerStats = new RunnerStats(this.registry); - this.concurrentCallsPerServerHist = registry.histogram(name(MetricsConnection.class, - "concurrentCallsPerServer", scope)); - this.numActionsPerServerHist = registry.histogram(name(MetricsConnection.class, - "numActionsPerServer", scope)); + this.concurrentCallsPerServerHist = + registry.histogram(name(MetricsConnection.class, "concurrentCallsPerServer", scope)); + this.numActionsPerServerHist = + registry.histogram(name(MetricsConnection.class, "numActionsPerServer", scope)); this.nsLookups = registry.counter(name(this.getClass(), NS_LOOKUPS, scope)); this.nsLookupsFailed = registry.counter(name(this.getClass(), NS_LOOKUPS_FAILED, scope)); @@ -437,8 +431,8 @@ private T getMetric(String key, ConcurrentMap map, NewMetric f /** Update call stats for non-critical-path methods */ private void updateRpcGeneric(String methodName, CallStats stats) { - getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory) - .update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS); + getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory).update(stats.getCallTimeMs(), + TimeUnit.MILLISECONDS); getMetric(REQ_BASE + methodName, rpcHistograms, histogramFactory) .update(stats.getRequestSizeBytes()); getMetric(RESP_BASE + methodName, rpcHistograms, histogramFactory) @@ -457,7 +451,7 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats) { // this implementation is tied directly to protobuf implementation details. would be better // if we could dispatch based on something static, ie, request Message type. if (method.getService() == ClientService.getDescriptor()) { - switch(method.getIndex()) { + switch (method.getIndex()) { case 0: assert "Get".equals(method.getName()); getTracker.updateRpc(stats); @@ -465,7 +459,7 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats) { case 1: assert "Mutate".equals(method.getName()); final MutationType mutationType = ((MutateRequest) param).getMutation().getMutateType(); - switch(mutationType) { + switch (mutationType) { case APPEND: appendTracker.updateRpc(stats); return; @@ -519,8 +513,8 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats) { } public void incrCacheDroppingExceptions(Object exception) { - getMetric(CACHE_BASE + - (exception == null? UNKNOWN_EXCEPTION : exception.getClass().getSimpleName()), + getMetric( + CACHE_BASE + (exception == null ? UNKNOWN_EXCEPTION : exception.getClass().getSimpleName()), cacheDroppingExceptions, counterFactory).inc(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java index 6ad44f08a60d..fc473bdbb709 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java index 03f168893a71..1117b3bcd952 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.HashMap; import java.util.Map; import java.util.TreeMap; - +import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.util.Bytes; /** * A container for Result objects, grouped by regionName. @@ -37,11 +35,10 @@ public class MultiResponse extends AbstractResponse { private Map results = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** - * The server can send us a failure for the region itself, instead of individual failure. - * It's a part of the protobuf definition. + * The server can send us a failure for the region itself, instead of individual failure. It's a + * part of the protobuf definition. */ - private Map exceptions = - new TreeMap<>(Bytes.BYTES_COMPARATOR); + private Map exceptions = new TreeMap<>(Bytes.BYTES_COMPARATOR); public MultiResponse() { super(); @@ -52,7 +49,7 @@ public MultiResponse() { */ public int size() { int size = 0; - for (RegionResult result: results.values()) { + for (RegionResult result : results.values()) { size += result.size(); } return size; @@ -60,7 +57,6 @@ public int size() { /** * Add the pair to the container, grouped by the regionName - * * @param regionName * @param originalIndex the original index of the Action (request). * @param resOrEx the result or error; will be empty for successful Put and Delete actions. @@ -69,14 +65,14 @@ public void add(byte[] regionName, int originalIndex, Object resOrEx) { getResult(regionName).addResult(originalIndex, resOrEx); } - public void addException(byte []regionName, Throwable ie){ + public void addException(byte[] regionName, Throwable ie) { exceptions.put(regionName, ie); } /** * @return the exception for the region, if any. Null otherwise. */ - public Throwable getException(byte []regionName){ + public Throwable getException(byte[] regionName) { return exceptions.get(regionName); } @@ -88,7 +84,7 @@ public void addStatistic(byte[] regionName, ClientProtos.RegionLoadStats stat) { getResult(regionName).setStat(stat); } - private RegionResult getResult(byte[] region){ + private RegionResult getResult(byte[] region) { RegionResult rs = results.get(region); if (rs == null) { rs = new RegionResult(); @@ -97,7 +93,7 @@ private RegionResult getResult(byte[] region){ return rs; } - public Map getResults(){ + public Map getResults() { return this.results; } @@ -106,15 +102,15 @@ public ResponseType type() { return ResponseType.MULTI; } - static class RegionResult{ + static class RegionResult { Map result = new HashMap<>(); ClientProtos.RegionLoadStats stat; - public void addResult(int index, Object result){ + public void addResult(int index, Object result) { this.result.put(index, result); } - public void setStat(ClientProtos.RegionLoadStats stat){ + public void setStat(ClientProtos.RegionLoadStats stat) { this.stat = stat; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index 0aa301c4c8cd..503de6994214 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import java.util.Arrays; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparatorImpl; @@ -36,26 +37,16 @@ class MutableRegionInfo implements RegionInfo { private static final int MAX_REPLICA_ID = 0xFFFF; /** - * The new format for a region name contains its encodedName at the end. - * The encoded name also serves as the directory name for the region - * in the filesystem. - * - * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. - * where, - * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> - * - * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> - * For region names in the old format, the encoded name is a 32-bit - * JenkinsHash integer value (in its decimal notation, string form). - *

    - * **NOTE** - * - * The first hbase:meta region, and regions created by an older - * version of HBase (0.20 or prior) will continue to use the - * old region name format. + * The new format for a region name contains its encodedName at the end. The encoded name also + * serves as the directory name for the region in the filesystem. New region name format: + * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. where, <encodedName> + * is a hex version of the MD5 hash of <tablename>,<startkey>,<regionIdTimestamp> The old + * region name format: <tablename>,<startkey>,<regionIdTimestamp> For region names in the + * old format, the encoded name is a 32-bit JenkinsHash integer value (in its decimal notation, + * string form). + *

    + * **NOTE** The first hbase:meta region, and regions created by an older version of HBase (0.20 or + * prior) will continue to use the old region name format. */ // This flag is in the parent of a split while the parent is still referenced by daughter @@ -76,8 +67,8 @@ class MutableRegionInfo implements RegionInfo { private final TableName tableName; private static int generateHashCode(final TableName tableName, final byte[] startKey, - final byte[] endKey, final long regionId, - final int replicaId, boolean offLine, byte[] regionName) { + final byte[] endKey, final long regionId, final int replicaId, boolean offLine, + byte[] regionName) { int result = Arrays.hashCode(regionName); result = (int) (result ^ regionId); result ^= Arrays.hashCode(checkStartKey(startKey)); @@ -89,11 +80,11 @@ private static int generateHashCode(final TableName tableName, final byte[] star } private static byte[] checkStartKey(byte[] startKey) { - return startKey == null? HConstants.EMPTY_START_ROW: startKey; + return startKey == null ? HConstants.EMPTY_START_ROW : startKey; } private static byte[] checkEndKey(byte[] endKey) { - return endKey == null? HConstants.EMPTY_END_ROW: endKey; + return endKey == null ? HConstants.EMPTY_END_ROW : endKey; } private static TableName checkTableName(TableName tableName) { @@ -115,7 +106,7 @@ private static int checkReplicaId(int regionId) { */ MutableRegionInfo(long regionId, TableName tableName, int replicaId) { this(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId, - replicaId, false); + replicaId, false); } MutableRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, @@ -145,11 +136,10 @@ public String getShortNameToLog() { /** @return the regionId */ @Override - public long getRegionId(){ + public long getRegionId() { return regionId; } - /** * @return the regionName as an array of bytes. * @see #getRegionNameAsString() @@ -200,25 +190,22 @@ public TableName getTable() { } /** - * Returns true if the given inclusive range of rows is fully contained - * by this region. For example, if the region is foo,a,g and this is - * passed ["b","c"] or ["a","c"] it will return true, but if this is passed - * ["b","z"] it will return false. + * Returns true if the given inclusive range of rows is fully contained by this region. For + * example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will return + * true, but if this is passed ["b","z"] it will return false. * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) */ @Override public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName); if (cellComparator.compareRows(rangeStartKey, rangeEndKey) > 0) { - throw new IllegalArgumentException( - "Invalid range: " + Bytes.toStringBinary(rangeStartKey) + - " > " + Bytes.toStringBinary(rangeEndKey)); + throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(rangeStartKey) + + " > " + Bytes.toStringBinary(rangeEndKey)); } boolean firstKeyInRange = cellComparator.compareRows(rangeStartKey, startKey) >= 0; - boolean lastKeyInRange = - cellComparator.compareRows(rangeEndKey, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); + boolean lastKeyInRange = cellComparator.compareRows(rangeEndKey, endKey) < 0 + || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); return firstKeyInRange && lastKeyInRange; } @@ -228,9 +215,9 @@ public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { @Override public boolean containsRow(byte[] row) { CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName); - return cellComparator.compareRows(row, startKey) >= 0 && - (cellComparator.compareRows(row, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); + return cellComparator.compareRows(row, startKey) >= 0 + && (cellComparator.compareRows(row, endKey) < 0 + || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); } /** @return true if this region is a meta region */ @@ -268,8 +255,8 @@ public boolean isOffline() { } /** - * The parent of a region split is offline while split daughters hold - * references to the parent. Offlined regions are closed. + * The parent of a region split is offline while split daughters hold references to the parent. + * Offlined regions are closed. * @param offLine Set online/offline status. * @return MutableRegionInfo */ @@ -309,14 +296,11 @@ public int getReplicaId() { */ @Override public String toString() { - return "{ENCODED => " + getEncodedName() + ", " + - HConstants.NAME + " => '" + Bytes.toStringBinary(this.regionName) - + "', STARTKEY => '" + - Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + - Bytes.toStringBinary(this.endKey) + "'" + - (isOffline()? ", OFFLINE => true": "") + - (isSplit()? ", SPLIT => true": "") + - ((replicaId > 0)? ", REPLICA_ID => " + replicaId : "") + "}"; + return "{ENCODED => " + getEncodedName() + ", " + HConstants.NAME + " => '" + + Bytes.toStringBinary(this.regionName) + "', STARTKEY => '" + + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + + "'" + (isOffline() ? ", OFFLINE => true" : "") + (isSplit() ? ", SPLIT => true" : "") + + ((replicaId > 0) ? ", REPLICA_ID => " + replicaId : "") + "}"; } /** @@ -333,7 +317,7 @@ public boolean equals(Object o) { if (!(o instanceof RegionInfo)) { return false; } - return compareTo((RegionInfo)o) == 0; + return compareTo((RegionInfo) o) == 0; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index ab6fc9475142..cc7e5c0c2190 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -66,24 +65,23 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; @InterfaceAudience.Public -public abstract class Mutation extends OperationWithAttributes implements Row, CellScannable, - HeapSize { +public abstract class Mutation extends OperationWithAttributes + implements Row, CellScannable, HeapSize { public static final long MUTATION_OVERHEAD = ClassSize.align( - // This - ClassSize.OBJECT + - // row + OperationWithAttributes.attributes - 2 * ClassSize.REFERENCE + - // Timestamp - 1 * Bytes.SIZEOF_LONG + - // durability - ClassSize.REFERENCE + - // familyMap - ClassSize.REFERENCE + - // familyMap - ClassSize.TREEMAP + - // priority - ClassSize.INTEGER - ); + // This + ClassSize.OBJECT + + // row + OperationWithAttributes.attributes + 2 * ClassSize.REFERENCE + + // Timestamp + 1 * Bytes.SIZEOF_LONG + + // durability + ClassSize.REFERENCE + + // familyMap + ClassSize.REFERENCE + + // familyMap + ClassSize.TREEMAP + + // priority + ClassSize.INTEGER); /** * The attribute for storing the list of clusters that have consumed the change. @@ -98,17 +96,16 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C private static final String RETURN_RESULTS = "_rr_"; // TODO: row should be final - protected byte [] row = null; + protected byte[] row = null; protected long ts = HConstants.LATEST_TIMESTAMP; protected Durability durability = Durability.USE_DEFAULT; // TODO: familyMap should be final // A Map sorted by column family. - protected NavigableMap> familyMap; + protected NavigableMap> familyMap; /** - * empty construction. - * We need this empty construction to keep binary compatibility. + * empty construction. We need this empty construction to keep binary compatibility. */ protected Mutation() { this.familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); @@ -118,10 +115,10 @@ protected Mutation(Mutation clone) { super(clone); this.row = clone.getRow(); this.ts = clone.getTimestamp(); - this.familyMap = clone.getFamilyCellMap().entrySet().stream(). - collect(Collectors.toMap(e -> e.getKey(), e -> new ArrayList<>(e.getValue()), (k, v) -> { - throw new RuntimeException("collisions!!!"); - }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); + this.familyMap = clone.getFamilyCellMap().entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> new ArrayList<>(e.getValue()), (k, v) -> { + throw new RuntimeException("collisions!!!"); + }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); } /** @@ -130,7 +127,7 @@ protected Mutation(Mutation clone) { * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - protected Mutation(byte[] row, long ts, NavigableMap> familyMap) { + protected Mutation(byte[] row, long ts, NavigableMap> familyMap) { this.row = Preconditions.checkNotNull(row); if (row.length == 0) { throw new IllegalArgumentException("Row can't be empty"); @@ -145,9 +142,8 @@ public CellScanner cellScanner() { } /** - * Creates an empty list if one doesn't exist for the given column family - * or else it returns the associated list of Cell objects. - * + * Creates an empty list if one doesn't exist for the given column family or else it returns the + * associated list of Cell objects. * @param family column family * @return a list of Cell objects, returns an empty list if one doesn't exist. */ @@ -162,7 +158,6 @@ List getCellList(byte[] family) { /* * Create a KeyValue with this objects row key and the Put identifier. - * * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value) { @@ -185,20 +180,18 @@ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] valu /* * Create a KeyValue with this objects row key and the Put identifier. - * * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value, Tag[] tags) { - return new KeyValue(this.row, 0, this.row == null ? 0 : this.row.length, - family, 0, family == null ? 0 : family.length, - qualifier, ts, KeyValue.Type.Put, value, tags != null ? Arrays.asList(tags) : null); + return new KeyValue(this.row, 0, this.row == null ? 0 : this.row.length, family, 0, + family == null ? 0 : family.length, qualifier, ts, KeyValue.Type.Put, value, + tags != null ? Arrays.asList(tags) : null); } /** - * Compile the column family (i.e. schema) information - * into a Map. Useful for parsing and aggregation by debugging, - * logging, and administration tools. + * Compile the column family (i.e. schema) information into a Map. Useful for parsing and + * aggregation by debugging, logging, and administration tools. * @return Map */ @Override @@ -208,16 +201,16 @@ public Map getFingerprint() { // ideally, we would also include table information, but that information // is not stored in each Operation instance. map.put("families", families); - for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { families.add(Bytes.toStringBinary(entry.getKey())); } return map; } /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. + * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a + * Map along with the fingerprinted information. Useful for debugging, logging, and administration + * tools. * @param maxCols a limit on the number of columns output prior to truncation * @return Map */ @@ -232,7 +225,7 @@ public Map toMap(int maxCols) { map.put("row", Bytes.toStringBinary(this.row)); int colCount = 0; // iterate through all column families affected - for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { // map from this family to details for each cell affected within the family List> qualifierDetails = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails); @@ -241,7 +234,7 @@ public Map toMap(int maxCols) { continue; } // add details for each cell - for (Cell cell: entry.getValue()) { + for (Cell cell : entry.getValue()) { if (--maxCols <= 0) { continue; } @@ -266,16 +259,15 @@ public Map toMap(int maxCols) { private static Map cellToStringMap(Cell c) { Map stringMap = new HashMap<>(); - stringMap.put("qualifier", Bytes.toStringBinary(c.getQualifierArray(), c.getQualifierOffset(), - c.getQualifierLength())); + stringMap.put("qualifier", + Bytes.toStringBinary(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength())); stringMap.put("timestamp", c.getTimestamp()); stringMap.put("vlen", c.getValueLength()); List tags = PrivateCellUtil.getTags(c); if (tags != null) { List tagsString = new ArrayList<>(tags.size()); for (Tag t : tags) { - tagsString - .add((t.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(t))); + tagsString.add((t.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(t))); } stringMap.put("tag", tagsString); } @@ -300,7 +292,7 @@ public Durability getDurability() { * Method for retrieving the put's familyMap * @return familyMap */ - public NavigableMap> getFamilyCellMap() { + public NavigableMap> getFamilyCellMap() { return this.familyMap; } @@ -317,13 +309,12 @@ public boolean isEmpty() { * @return row */ @Override - public byte [] getRow() { + public byte[] getRow() { return this.row; } /** * Method for retrieving the timestamp. - * * @return timestamp */ public long getTimestamp() { @@ -351,10 +342,10 @@ public Mutation setClusterIds(List clusterIds) { public List getClusterIds() { List clusterIds = new ArrayList<>(); byte[] bytes = getAttribute(CONSUMED_CLUSTER_IDS); - if(bytes != null) { + if (bytes != null) { ByteArrayDataInput in = ByteStreams.newDataInput(bytes); int numClusters = in.readInt(); - for(int i=0; i getClusterIds() { */ public Mutation setCellVisibility(CellVisibility expression) { this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, - toCellVisibility(expression).toByteArray()); + toCellVisibility(expression).toByteArray()); return this; } @@ -383,7 +374,6 @@ public CellVisibility getCellVisibility() throws DeserializationException { /** * Create a protocol buffer CellVisibility based on a client CellVisibility. - * * @param cellVisibility * @return a protocol buffer CellVisibility */ @@ -395,7 +385,6 @@ static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibilit /** * Convert a protocol buffer CellVisibility to a client CellVisibility - * * @param proto * @return the converted client CellVisibility */ @@ -406,12 +395,12 @@ private static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto /** * Convert a protocol buffer CellVisibility bytes to a client CellVisibility - * * @param protoBytes * @return the converted client CellVisibility * @throws DeserializationException */ - private static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException { + private static CellVisibility toCellVisibility(byte[] protoBytes) + throws DeserializationException { if (protoBytes == null) return null; ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder(); ClientProtos.CellVisibility proto = null; @@ -453,20 +442,17 @@ public long heapSize() { heapsize += ClassSize.align(ClassSize.ARRAY + this.row.length); // Adding map overhead - heapsize += - ClassSize.align(getFamilyCellMap().size() * ClassSize.MAP_ENTRY); - for(Map.Entry> entry : getFamilyCellMap().entrySet()) { - //Adding key overhead - heapsize += - ClassSize.align(ClassSize.ARRAY + entry.getKey().length); - - //This part is kinds tricky since the JVM can reuse references if you - //store the same value, but have a good match with SizeOf at the moment - //Adding value overhead + heapsize += ClassSize.align(getFamilyCellMap().size() * ClassSize.MAP_ENTRY); + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + // Adding key overhead + heapsize += ClassSize.align(ClassSize.ARRAY + entry.getKey().length); + + // This part is kinds tricky since the JVM can reuse references if you + // store the same value, but have a good match with SizeOf at the moment + // Adding value overhead heapsize += ClassSize.align(ClassSize.ARRAYLIST); int size = entry.getValue().size(); - heapsize += ClassSize.align(ClassSize.ARRAY + - size * ClassSize.REFERENCE); + heapsize += ClassSize.align(ClassSize.ARRAY + size * ClassSize.REFERENCE); for (Cell cell : entry.getValue()) { heapsize += cell.heapSize(); @@ -509,8 +495,8 @@ public Mutation setACL(Map perms) { /** * Return the TTL requested for the result of the mutation, in milliseconds. - * @return the TTL requested for the result of the mutation, in milliseconds, - * or Long.MAX_VALUE if unset + * @return the TTL requested for the result of the mutation, in milliseconds, or Long.MAX_VALUE if + * unset */ public long getTTL() { byte[] ttlBytes = getAttribute(OP_ATTRIBUTE_TTL); @@ -551,7 +537,7 @@ protected Mutation setReturnResults(boolean returnResults) { * Subclasses should override this method to add the heap size of their own fields. * @return the heap size to add (will be aligned). */ - protected long extraHeapSize(){ + protected long extraHeapSize() { return 0L; } @@ -567,76 +553,71 @@ public Mutation setTimestamp(long timestamp) { } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family & qualifier. - * Both given arguments must match the KeyValue object to return true. - * + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family & qualifier. Both given arguments must match the KeyValue object to return + * true. * @param family column family * @param qualifier column qualifier - * @return returns true if the given family and qualifier already has an - * existing KeyValue object in the family map. + * @return returns true if the given family and qualifier already has an existing KeyValue object + * in the family map. */ - public boolean has(byte [] family, byte [] qualifier) { + public boolean has(byte[] family, byte[] qualifier) { return has(family, qualifier, this.ts, HConstants.EMPTY_BYTE_ARRAY, true, true); } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family, qualifier and timestamp. - * All 3 given arguments must match the KeyValue object to return true. - * + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family, qualifier and timestamp. All 3 given arguments must match the KeyValue object to + * return true. * @param family column family * @param qualifier column qualifier * @param ts timestamp - * @return returns true if the given family, qualifier and timestamp already has an - * existing KeyValue object in the family map. + * @return returns true if the given family, qualifier and timestamp already has an existing + * KeyValue object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, long ts) { + public boolean has(byte[] family, byte[] qualifier, long ts) { return has(family, qualifier, ts, HConstants.EMPTY_BYTE_ARRAY, false, true); } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family, qualifier and timestamp. - * All 3 given arguments must match the KeyValue object to return true. - * + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family, qualifier and timestamp. All 3 given arguments must match the KeyValue object to + * return true. * @param family column family * @param qualifier column qualifier * @param value value to check - * @return returns true if the given family, qualifier and value already has an - * existing KeyValue object in the family map. + * @return returns true if the given family, qualifier and value already has an existing KeyValue + * object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, byte [] value) { + public boolean has(byte[] family, byte[] qualifier, byte[] value) { return has(family, qualifier, this.ts, value, true, false); } /** - * A convenience method to determine if this object's familyMap contains - * the given value assigned to the given family, qualifier and timestamp. - * All 4 given arguments must match the KeyValue object to return true. - * + * A convenience method to determine if this object's familyMap contains the given value assigned + * to the given family, qualifier and timestamp. All 4 given arguments must match the KeyValue + * object to return true. * @param family column family * @param qualifier column qualifier * @param ts timestamp * @param value value to check - * @return returns true if the given family, qualifier timestamp and value - * already has an existing KeyValue object in the family map. + * @return returns true if the given family, qualifier timestamp and value already has an existing + * KeyValue object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, long ts, byte [] value) { + public boolean has(byte[] family, byte[] qualifier, long ts, byte[] value) { return has(family, qualifier, ts, value, false, false); } /** * Returns a list of all KeyValue objects with matching column family and qualifier. - * * @param family column family * @param qualifier column qualifier - * @return a list of KeyValue objects with the matching family and qualifier, - * returns an empty list if one doesn't exist for the given family. + * @return a list of KeyValue objects with the matching family and qualifier, returns an empty + * list if one doesn't exist for the given family. */ public List get(byte[] family, byte[] qualifier) { List filteredList = new ArrayList<>(); - for (Cell cell: getCellList(family)) { + for (Cell cell : getCellList(family)) { if (CellUtil.matchingQualifier(cell, qualifier)) { filteredList.add(cell); } @@ -645,21 +626,19 @@ public List get(byte[] family, byte[] qualifier) { } /* - * Private method to determine if this object's familyMap contains - * the given value assigned to the given family, qualifier and timestamp - * respecting the 2 boolean arguments - * + * Private method to determine if this object's familyMap contains the given value assigned to the + * given family, qualifier and timestamp respecting the 2 boolean arguments * @param family * @param qualifier * @param ts * @param value * @param ignoreTS * @param ignoreValue - * @return returns true if the given family, qualifier timestamp and value - * already has an existing KeyValue object in the family map. + * @return returns true if the given family, qualifier timestamp and value already has an existing + * KeyValue object in the family map. */ - protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, - boolean ignoreTS, boolean ignoreValue) { + protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, boolean ignoreTS, + boolean ignoreValue) { List list = getCellList(family); if (list.isEmpty()) { return false; @@ -671,10 +650,8 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, // F F => 1 if (!ignoreTS && !ignoreValue) { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && - CellUtil.matchingQualifier(cell, qualifier) && - CellUtil.matchingValue(cell, value) && - cell.getTimestamp() == ts) { + if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) + && CellUtil.matchingValue(cell, value) && cell.getTimestamp() == ts) { return true; } } @@ -694,8 +671,7 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, } } else { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && - CellUtil.matchingQualifier(cell, qualifier)) { + if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier)) { return true; } } @@ -705,23 +681,23 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, /** * @param row Row to check - * @throws IllegalArgumentException Thrown if row is empty or null or - * > {@link HConstants#MAX_ROW_LENGTH} + * @throws IllegalArgumentException Thrown if row is empty or null or > + * {@link HConstants#MAX_ROW_LENGTH} * @return row */ - static byte [] checkRow(final byte [] row) { - return checkRow(row, 0, row == null? 0: row.length); + static byte[] checkRow(final byte[] row) { + return checkRow(row, 0, row == null ? 0 : row.length); } /** * @param row Row to check * @param offset * @param length - * @throws IllegalArgumentException Thrown if row is empty or null or - * > {@link HConstants#MAX_ROW_LENGTH} + * @throws IllegalArgumentException Thrown if row is empty or null or > + * {@link HConstants#MAX_ROW_LENGTH} * @return row */ - static byte [] checkRow(final byte [] row, final int offset, final int length) { + static byte[] checkRow(final byte[] row, final int offset, final int length) { if (row == null) { throw new IllegalArgumentException("Row buffer is null"); } @@ -729,8 +705,8 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, throw new IllegalArgumentException("Row length is 0"); } if (length > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row length " + length + " is > " + - HConstants.MAX_ROW_LENGTH); + throw new IllegalArgumentException( + "Row length " + length + " is > " + HConstants.MAX_ROW_LENGTH); } return row; } @@ -743,18 +719,18 @@ static void checkRow(ByteBuffer row) { throw new IllegalArgumentException("Row length is 0"); } if (row.remaining() > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row length " + row.remaining() + " is > " + - HConstants.MAX_ROW_LENGTH); + throw new IllegalArgumentException( + "Row length " + row.remaining() + " is > " + HConstants.MAX_ROW_LENGTH); } } Mutation add(Cell cell) throws IOException { - //Checking that the row of the kv is the same as the mutation + // Checking that the row of the kv is the same as the mutation // TODO: It is fraught with risk if user pass the wrong row. // Throwing the IllegalArgumentException is more suitable I'd say. if (!CellUtil.matchingRows(cell, this.row)) { - throw new WrongRowIOException("The row in " + cell.toString() + - " doesn't match the original one " + Bytes.toStringBinary(this.row)); + throw new WrongRowIOException("The row in " + cell.toString() + + " doesn't match the original one " + Bytes.toStringBinary(this.row)); } byte[] family; @@ -785,8 +761,8 @@ Mutation add(Cell cell) throws IOException { public abstract CellBuilder getCellBuilder(CellBuilderType cellBuilderType); /** - * get a CellBuilder instance that already has relevant Type and Row set. - * the default CellBuilderType is CellBuilderType.SHALLOW_COPY + * get a CellBuilder instance that already has relevant Type and Row set. the default + * CellBuilderType is CellBuilderType.SHALLOW_COPY * @return CellBuilder which already has relevant Type and Row set. */ public CellBuilder getCellBuilder() { @@ -798,7 +774,7 @@ public CellBuilder getCellBuilder() { * @param cellBuilderType e.g CellBuilderType.SHALLOW_COPY * @param cellType e.g Cell.Type.Put * @return CellBuilder which already has relevant Type and Row set. - */ + */ protected CellBuilder getCellBuilder(CellBuilderType cellBuilderType, Cell.Type cellType) { CellBuilder builder = CellBuilderFactory.create(cellBuilderType).setRow(row).setType(cellType); return new CellBuilder() { @@ -876,11 +852,10 @@ public CellBuilder clear() { } private static final class CellWrapper implements ExtendedCell { - private static final long FIXED_OVERHEAD = ClassSize.align( - ClassSize.OBJECT // object header - + KeyValue.TIMESTAMP_SIZE // timestamp - + Bytes.SIZEOF_LONG // sequence id - + 1 * ClassSize.REFERENCE); // references to cell + private static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT // object header + + KeyValue.TIMESTAMP_SIZE // timestamp + + Bytes.SIZEOF_LONG // sequence id + + 1 * ClassSize.REFERENCE); // references to cell private final Cell cell; private long sequenceId; private long timestamp; @@ -1013,22 +988,19 @@ public byte[] cloneTags() { } private long heapOverhead() { - return FIXED_OVERHEAD - + ClassSize.ARRAY // row - + getFamilyLength() == 0 ? 0 : ClassSize.ARRAY - + getQualifierLength() == 0 ? 0 : ClassSize.ARRAY - + getValueLength() == 0 ? 0 : ClassSize.ARRAY - + getTagsLength() == 0 ? 0 : ClassSize.ARRAY; + return FIXED_OVERHEAD + ClassSize.ARRAY // row + + getFamilyLength() == 0 + ? 0 + : ClassSize.ARRAY + getQualifierLength() == 0 ? 0 + : ClassSize.ARRAY + getValueLength() == 0 ? 0 + : ClassSize.ARRAY + getTagsLength() == 0 ? 0 : ClassSize.ARRAY; } @Override public long heapSize() { - return heapOverhead() - + ClassSize.align(getRowLength()) - + ClassSize.align(getFamilyLength()) - + ClassSize.align(getQualifierLength()) - + ClassSize.align(getValueLength()) - + ClassSize.align(getTagsLength()); + return heapOverhead() + ClassSize.align(getRowLength()) + ClassSize.align(getFamilyLength()) + + ClassSize.align(getQualifierLength()) + ClassSize.align(getValueLength()) + + ClassSize.align(getTagsLength()); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java index 184f0c0bc0f4..6d5d94802d0e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java index 70fa36a5afa6..3020be221059 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * NonceGenerator interface. - * In general, nonce group is an ID (one per client, or region+client, or whatever) that - * could be used to reduce collision potential, or be used by compatible server nonce manager - * to optimize nonce storage and removal. See HBASE-3787. + * NonceGenerator interface. In general, nonce group is an ID (one per client, or region+client, or + * whatever) that could be used to reduce collision potential, or be used by compatible server nonce + * manager to optimize nonce storage and removal. See HBASE-3787. */ @InterfaceAudience.Private public interface NonceGenerator { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java index 982ec5b0065b..5aeb1663694a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java @@ -24,25 +24,16 @@ /** * A collection of criteria used for table selection. The logic of table selection is as follows: *

      - *
    • - * When no parameter values are provided, an unfiltered list of all user tables is returned. - *
    • - *
    • - * When a list of {@link TableName TableNames} are provided, the filter starts with any of - * these tables that exist. - *
    • - *
    • - * When a {@code namespace} name is provided, the filter starts with all the tables present in - * that namespace. - *
    • - *
    • - * If both a list of {@link TableName TableNames} and a {@code namespace} name are provided, - * the {@link TableName} list is honored and the {@code namespace} name is ignored. - *
    • - *
    • - * If a {@code regex} is provided, this subset of {@link TableName TableNames} is further - * reduced to those that match the provided regular expression. - *
    • + *
    • When no parameter values are provided, an unfiltered list of all user tables is returned. + *
    • + *
    • When a list of {@link TableName TableNames} are provided, the filter starts with any of these + * tables that exist.
    • + *
    • When a {@code namespace} name is provided, the filter starts with all the tables present in + * that namespace.
    • + *
    • If both a list of {@link TableName TableNames} and a {@code namespace} name are provided, the + * {@link TableName} list is honored and the {@code namespace} name is ignored.
    • + *
    • If a {@code regex} is provided, this subset of {@link TableName TableNames} is further + * reduced to those that match the provided regular expression.
    • *
    */ @InterfaceAudience.Public @@ -52,7 +43,7 @@ public final class NormalizeTableFilterParams { private final String namespace; private NormalizeTableFilterParams(final List tableNames, final String regex, - final String namespace) { + final String namespace) { this.tableNames = tableNames; this.regex = regex; this.namespace = namespace; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java index 115e55f336f6..71e200cf2d48 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.commons.lang3.builder.EqualsBuilder; @@ -40,23 +38,22 @@ final public class OnlineLogRecord extends LogEntry { // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .registerTypeAdapter(OnlineLogRecord.class, (JsonSerializer) - (slowLogPayload, type, jsonSerializationContext) -> { - Gson gson = new Gson(); - JsonObject jsonObj = (JsonObject) gson.toJsonTree(slowLogPayload); - if (slowLogPayload.getMultiGetsCount() == 0) { - jsonObj.remove("multiGetsCount"); - } - if (slowLogPayload.getMultiMutationsCount() == 0) { - jsonObj.remove("multiMutationsCount"); - } - if (slowLogPayload.getMultiServiceCalls() == 0) { - jsonObj.remove("multiServiceCalls"); - } - return jsonObj; - }).create(); + private static final Gson GSON = + GsonUtil.createGson().setPrettyPrinting().registerTypeAdapter(OnlineLogRecord.class, + (JsonSerializer) (slowLogPayload, type, jsonSerializationContext) -> { + Gson gson = new Gson(); + JsonObject jsonObj = (JsonObject) gson.toJsonTree(slowLogPayload); + if (slowLogPayload.getMultiGetsCount() == 0) { + jsonObj.remove("multiGetsCount"); + } + if (slowLogPayload.getMultiMutationsCount() == 0) { + jsonObj.remove("multiMutationsCount"); + } + if (slowLogPayload.getMultiServiceCalls() == 0) { + jsonObj.remove("multiServiceCalls"); + } + return jsonObj; + }).create(); private final long startTime; private final int processingTime; @@ -239,9 +236,9 @@ public OnlineLogRecordBuilder setMultiServiceCalls(int multiServiceCalls) { } public OnlineLogRecord build() { - return new OnlineLogRecord(startTime, processingTime, queueTime, responseSize, - clientAddress, serverClass, methodName, callDetails, param, regionName, - userName, multiGetsCount, multiMutationsCount, multiServiceCalls); + return new OnlineLogRecord(startTime, processingTime, queueTime, responseSize, clientAddress, + serverClass, methodName, callDetails, param, regionName, userName, multiGetsCount, + multiMutationsCount, multiServiceCalls); } } @@ -257,42 +254,22 @@ public boolean equals(Object o) { OnlineLogRecord that = (OnlineLogRecord) o; - return new EqualsBuilder() - .append(startTime, that.startTime) - .append(processingTime, that.processingTime) - .append(queueTime, that.queueTime) - .append(responseSize, that.responseSize) - .append(multiGetsCount, that.multiGetsCount) - .append(multiMutationsCount, that.multiMutationsCount) - .append(multiServiceCalls, that.multiServiceCalls) - .append(clientAddress, that.clientAddress) - .append(serverClass, that.serverClass) - .append(methodName, that.methodName) - .append(callDetails, that.callDetails) - .append(param, that.param) - .append(regionName, that.regionName) - .append(userName, that.userName) - .isEquals(); + return new EqualsBuilder().append(startTime, that.startTime) + .append(processingTime, that.processingTime).append(queueTime, that.queueTime) + .append(responseSize, that.responseSize).append(multiGetsCount, that.multiGetsCount) + .append(multiMutationsCount, that.multiMutationsCount) + .append(multiServiceCalls, that.multiServiceCalls).append(clientAddress, that.clientAddress) + .append(serverClass, that.serverClass).append(methodName, that.methodName) + .append(callDetails, that.callDetails).append(param, that.param) + .append(regionName, that.regionName).append(userName, that.userName).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(startTime) - .append(processingTime) - .append(queueTime) - .append(responseSize) - .append(clientAddress) - .append(serverClass) - .append(methodName) - .append(callDetails) - .append(param) - .append(regionName) - .append(userName) - .append(multiGetsCount) - .append(multiMutationsCount) - .append(multiServiceCalls) - .toHashCode(); + return new HashCodeBuilder(17, 37).append(startTime).append(processingTime).append(queueTime) + .append(responseSize).append(clientAddress).append(serverClass).append(methodName) + .append(callDetails).append(param).append(regionName).append(userName) + .append(multiGetsCount).append(multiMutationsCount).append(multiServiceCalls).toHashCode(); } @Override @@ -302,22 +279,14 @@ public String toJsonPrettyPrint() { @Override public String toString() { - return new ToStringBuilder(this) - .append("startTime", startTime) - .append("processingTime", processingTime) - .append("queueTime", queueTime) - .append("responseSize", responseSize) - .append("clientAddress", clientAddress) - .append("serverClass", serverClass) - .append("methodName", methodName) - .append("callDetails", callDetails) - .append("param", param) - .append("regionName", regionName) - .append("userName", userName) - .append("multiGetsCount", multiGetsCount) - .append("multiMutationsCount", multiMutationsCount) - .append("multiServiceCalls", multiServiceCalls) - .toString(); + return new ToStringBuilder(this).append("startTime", startTime) + .append("processingTime", processingTime).append("queueTime", queueTime) + .append("responseSize", responseSize).append("clientAddress", clientAddress) + .append("serverClass", serverClass).append("methodName", methodName) + .append("callDetails", callDetails).append("param", param).append("regionName", regionName) + .append("userName", userName).append("multiGetsCount", multiGetsCount) + .append("multiMutationsCount", multiMutationsCount) + .append("multiServiceCalls", multiServiceCalls).toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java index 3b6a6f5e51c4..5acd3ba282a4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +19,12 @@ import java.io.IOException; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.JsonMapper; +import org.apache.yetus.audience.InterfaceAudience; /** - * Superclass for any type that maps to a potentially application-level query. - * (e.g. Put, Get, Delete, Scan, Next, etc.) - * Contains methods for exposure to logging and debugging tools. + * Superclass for any type that maps to a potentially application-level query. (e.g. Put, Get, + * Delete, Scan, Next, etc.) Contains methods for exposure to logging and debugging tools. */ @InterfaceAudience.Public public abstract class Operation { @@ -36,15 +33,15 @@ public abstract class Operation { private static final int DEFAULT_MAX_COLS = 5; /** - * Produces a Map containing a fingerprint which identifies the type and - * the static schema components of a query (i.e. column families) + * Produces a Map containing a fingerprint which identifies the type and the static schema + * components of a query (i.e. column families) * @return a map containing fingerprint information (i.e. column families) */ public abstract Map getFingerprint(); /** - * Produces a Map containing a summary of the details of a query - * beyond the scope of the fingerprint (i.e. columns, rows...) + * Produces a Map containing a summary of the details of a query beyond the scope of the + * fingerprint (i.e. columns, rows...) * @param maxCols a limit on the number of columns output prior to truncation * @return a map containing parameters of a query (i.e. rows, columns...) */ @@ -59,8 +56,7 @@ public Map toMap() { } /** - * Produces a JSON object for fingerprint and details exposure in a - * parseable format. + * Produces a JSON object for fingerprint and details exposure in a parseable format. * @param maxCols a limit on the number of columns to include in the JSON * @return a JSONObject containing this Operation's information, as a string */ @@ -69,8 +65,7 @@ public String toJSON(int maxCols) throws IOException { } /** - * Produces a JSON object sufficient for description of a query - * in a debugging or logging context. + * Produces a JSON object sufficient for description of a query in a debugging or logging context. * @return the produced JSON object, as a string */ public String toJSON() throws IOException { @@ -78,17 +73,16 @@ public String toJSON() throws IOException { } /** - * Produces a string representation of this Operation. It defaults to a JSON - * representation, but falls back to a string representation of the - * fingerprint and details in the case of a JSON encoding failure. - * @param maxCols a limit on the number of columns output in the summary - * prior to truncation + * Produces a string representation of this Operation. It defaults to a JSON representation, but + * falls back to a string representation of the fingerprint and details in the case of a JSON + * encoding failure. + * @param maxCols a limit on the number of columns output in the summary prior to truncation * @return a JSON-parseable String */ public String toString(int maxCols) { - /* for now this is merely a wrapper from producing a JSON string, but - * toJSON is kept separate in case this is changed to be a less parsable - * pretty printed representation. + /* + * for now this is merely a wrapper from producing a JSON string, but toJSON is kept separate in + * case this is changed to be a less parsable pretty printed representation. */ try { return toJSON(maxCols); @@ -98,9 +92,9 @@ public String toString(int maxCols) { } /** - * Produces a string representation of this Operation. It defaults to a JSON - * representation, but falls back to a string representation of the - * fingerprint and details in the case of a JSON encoding failure. + * Produces a string representation of this Operation. It defaults to a JSON representation, but + * falls back to a string representation of the fingerprint and details in the case of a JSON + * encoding failure. * @return String */ @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java index 7342e65bb316..d710533125b7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.Collections; @@ -39,18 +37,17 @@ public abstract class OperationWithAttributes extends Operation implements Attri private int priority = HConstants.PRIORITY_UNSET; /** - * empty construction. - * We need this empty construction to keep binary compatibility. + * empty construction. We need this empty construction to keep binary compatibility. */ protected OperationWithAttributes() { } protected OperationWithAttributes(OperationWithAttributes clone) { - this.attributes = clone.getAttributesMap() == null ? null : - clone.getAttributesMap().entrySet().stream() - .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue(), (k, v) -> { - throw new RuntimeException("collisions!!!"); - }, () -> new TreeMap<>())); + this.attributes = clone.getAttributesMap() == null ? null + : clone.getAttributesMap().entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue(), (k, v) -> { + throw new RuntimeException("collisions!!!"); + }, () -> new TreeMap<>())); this.priority = clone.getPriority(); } @@ -96,7 +93,7 @@ protected long getAttributeSize() { long size = 0; if (attributes != null) { size += ClassSize.align(this.attributes.size() * ClassSize.MAP_ENTRY); - for(Map.Entry entry : this.attributes.entrySet()) { + for (Map.Entry entry : this.attributes.entrySet()) { size += ClassSize.align(ClassSize.STRING + entry.getKey().length()); size += ClassSize.align(ClassSize.ARRAY + entry.getValue().length); } @@ -105,13 +102,11 @@ protected long getAttributeSize() { } /** - * This method allows you to set an identifier on an operation. The original - * motivation for this was to allow the identifier to be used in slow query - * logging, but this could obviously be useful in other places. One use of - * this could be to put a class.method identifier in here to see where the - * slow query is coming from. - * @param id - * id to set for the scan + * This method allows you to set an identifier on an operation. The original motivation for this + * was to allow the identifier to be used in slow query logging, but this could obviously be + * useful in other places. One use of this could be to put a class.method identifier in here to + * see where the slow query is coming from. + * @param id id to set for the scan */ public OperationWithAttributes setId(String id) { setAttribute(ID_ATRIBUTE, Bytes.toBytes(id)); @@ -119,13 +114,12 @@ public OperationWithAttributes setId(String id) { } /** - * This method allows you to retrieve the identifier for the operation if one - * was set. + * This method allows you to retrieve the identifier for the operation if one was set. * @return the id or null if not set */ public String getId() { byte[] attr = getAttribute(ID_ATRIBUTE); - return attr == null? null: Bytes.toString(attr); + return attr == null ? null : Bytes.toString(attr); } public OperationWithAttributes setPriority(int priority) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java index 1b1ded9953bb..56a8dd19fcc5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java index 8aedc4d2205c..7d9d27907631 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +35,8 @@ public final class PerClientRandomNonceGenerator implements NonceGenerator { private PerClientRandomNonceGenerator() { byte[] clientIdBase = ClientIdGenerator.generateClientId(); - this.clientId = (((long) Arrays.hashCode(clientIdBase)) << 32) + - ThreadLocalRandom.current().nextInt(); + this.clientId = + (((long) Arrays.hashCode(clientIdBase)) << 32) + ThreadLocalRandom.current().nextInt(); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java index 719251ff1f09..660720000019 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -39,10 +37,9 @@ /** * Used to perform Put operations for a single row. *

    - * To perform a Put, instantiate a Put object with the row to insert to, and - * for each column to be inserted, execute {@link #addColumn(byte[], byte[], - * byte[]) add} or {@link #addColumn(byte[], byte[], long, byte[]) add} if - * setting the timestamp. + * To perform a Put, instantiate a Put object with the row to insert to, and for each column to be + * inserted, execute {@link #addColumn(byte[], byte[], byte[]) add} or + * {@link #addColumn(byte[], byte[], long, byte[]) add} if setting the timestamp. */ @InterfaceAudience.Public public class Put extends Mutation implements HeapSize { @@ -50,13 +47,12 @@ public class Put extends Mutation implements HeapSize { * Create a Put operation for the specified row. * @param row row key */ - public Put(byte [] row) { + public Put(byte[] row) { this(row, HConstants.LATEST_TIMESTAMP); } /** * Create a Put operation for the specified row, using a given timestamp. - * * @param row row key; we make a copy of what we are passed to keep local. * @param ts timestamp */ @@ -70,13 +66,13 @@ public Put(byte[] row, long ts) { * @param rowOffset * @param rowLength */ - public Put(byte [] rowArray, int rowOffset, int rowLength) { + public Put(byte[] rowArray, int rowOffset, int rowLength) { this(rowArray, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP); } /** * @param row row key; we make a copy of what we are passed to keep local. - * @param ts timestamp + * @param ts timestamp */ public Put(ByteBuffer row, long ts) { if (ts < 0) { @@ -102,7 +98,7 @@ public Put(ByteBuffer row) { * @param rowLength * @param ts */ - public Put(byte [] rowArray, int rowOffset, int rowLength, long ts) { + public Put(byte[] rowArray, int rowOffset, int rowLength, long ts) { checkRow(rowArray, rowOffset, rowLength); this.row = Bytes.copy(rowArray, rowOffset, rowLength); this.ts = ts; @@ -113,24 +109,20 @@ public Put(byte [] rowArray, int rowOffset, int rowLength, long ts) { /** * Create a Put operation for an immutable row key. - * * @param row row key - * @param rowIsImmutable whether the input row is immutable. - * Set to true if the caller can guarantee that - * the row will not be changed for the Put duration. + * @param rowIsImmutable whether the input row is immutable. Set to true if the caller can + * guarantee that the row will not be changed for the Put duration. */ - public Put(byte [] row, boolean rowIsImmutable) { + public Put(byte[] row, boolean rowIsImmutable) { this(row, HConstants.LATEST_TIMESTAMP, rowIsImmutable); } /** * Create a Put operation for an immutable row key, using a given timestamp. - * * @param row row key * @param ts timestamp - * @param rowIsImmutable whether the input row is immutable. - * Set to true if the caller can guarantee that - * the row will not be changed for the Put duration. + * @param rowIsImmutable whether the input row is immutable. Set to true if the caller can + * guarantee that the row will not be changed for the Put duration. */ public Put(byte[] row, long ts, boolean rowIsImmutable) { // Check and set timestamp @@ -141,15 +133,15 @@ public Put(byte[] row, long ts, boolean rowIsImmutable) { // Deal with row according to rowIsImmutable checkRow(row); - if (rowIsImmutable) { // Row is immutable - this.row = row; // Do not make a local copy, but point to the provided byte array directly - } else { // Row is not immutable - this.row = Bytes.copy(row, 0, row.length); // Make a local copy + if (rowIsImmutable) { // Row is immutable + this.row = row; // Do not make a local copy, but point to the provided byte array directly + } else { // Row is not immutable + this.row = Bytes.copy(row, 0, row.length); // Make a local copy } } /** - * Copy constructor. Creates a Put operation cloned from the specified Put. + * Copy constructor. Creates a Put operation cloned from the specified Put. * @param putToCopy put to copy */ public Put(Put putToCopy) { @@ -157,14 +149,13 @@ public Put(Put putToCopy) { } /** - * Construct the Put with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. + * Construct the Put with user defined data. NOTED: 1) all cells in the familyMap must have the + * Type.Put 2) the row of each cell must be same with passed row. * @param row row. CAN'T be null * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Put(byte[] row, long ts, NavigableMap> familyMap) { + public Put(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } @@ -175,20 +166,20 @@ public Put(byte[] row, long ts, NavigableMap> familyMap) { * @param value column value * @return this */ - public Put addColumn(byte [] family, byte [] qualifier, byte [] value) { + public Put addColumn(byte[] family, byte[] qualifier, byte[] value) { return addColumn(family, qualifier, this.ts, value); } /** - * Add the specified column and value, with the specified timestamp as - * its version to this Put operation. + * Add the specified column and value, with the specified timestamp as its version to this Put + * operation. * @param family family name * @param qualifier column qualifier * @param ts version timestamp * @param value column value * @return this */ - public Put addColumn(byte [] family, byte [] qualifier, long ts, byte [] value) { + public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) { if (ts < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts); } @@ -199,8 +190,8 @@ public Put addColumn(byte [] family, byte [] qualifier, long ts, byte [] value) } /** - * Add the specified column and value, with the specified timestamp as - * its version to this Put operation. + * Add the specified column and value, with the specified timestamp as its version to this Put + * operation. * @param family family name * @param qualifier column qualifier * @param ts version timestamp @@ -218,9 +209,8 @@ public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer va } /** - * Add the specified KeyValue to this Put operation. Operation assumes that - * the passed KeyValue is immutable and its backing array will not be modified - * for the duration of this Put. + * Add the specified KeyValue to this Put operation. Operation assumes that the passed KeyValue is + * immutable and its backing array will not be modified for the duration of this Put. * @param cell individual cell * @return this * @throws java.io.IOException e diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index 919513ceb622..a344f482540d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.Map; - -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; @@ -29,11 +26,14 @@ import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.VisibilityConstants; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; -import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** * Base class for HBase read operations; e.g. Scan and Get. @@ -46,6 +46,7 @@ public abstract class Query extends OperationWithAttributes { protected Consistency consistency = Consistency.STRONG; protected Map colFamTimeRangeMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); protected Boolean loadColumnFamiliesOnDemand = null; + /** * @return Filter */ @@ -70,8 +71,8 @@ public Query setFilter(Filter filter) { * @param authorizations */ public Query setAuthorizations(Authorizations authorizations) { - this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, ProtobufUtil - .toAuthorizations(authorizations).toByteArray()); + this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, + ProtobufUtil.toAuthorizations(authorizations).toByteArray()); return this; } @@ -111,7 +112,7 @@ public Query setACL(Map perms) { permMap.put(entry.getKey(), entry.getValue()); } setAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL, - AccessControlUtil.toUsersAndPermissions(permMap).toByteArray()); + AccessControlUtil.toUsersAndPermissions(permMap).toByteArray()); return this; } @@ -134,9 +135,9 @@ public Query setConsistency(Consistency consistency) { /** * Specify region replica id where Query will fetch data from. Use this together with - * {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from - * a specific replicaId. - *
    Expert: This is an advanced API exposed. Only use it if you know what you are doing + * {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from a + * specific replicaId.
    + * Expert: This is an advanced API exposed. Only use it if you know what you are doing * @param Id */ public Query setReplicaId(int Id) { @@ -153,14 +154,10 @@ public int getReplicaId() { } /** - * Set the isolation level for this query. If the - * isolation level is set to READ_UNCOMMITTED, then - * this query will return data from committed and - * uncommitted transactions. If the isolation level - * is set to READ_COMMITTED, then this query will return - * data from committed transactions only. If a isolation - * level is not explicitly set on a Query, then it - * is assumed to be READ_COMMITTED. + * Set the isolation level for this query. If the isolation level is set to READ_UNCOMMITTED, then + * this query will return data from committed and uncommitted transactions. If the isolation level + * is set to READ_COMMITTED, then this query will return data from committed transactions only. If + * a isolation level is not explicitly set on a Query, then it is assumed to be READ_COMMITTED. * @param level IsolationLevel for this query */ public Query setIsolationLevel(IsolationLevel level) { @@ -169,32 +166,28 @@ public Query setIsolationLevel(IsolationLevel level) { } /** - * @return The isolation level of this query. - * If no isolation level was set for this query object, - * then it returns READ_COMMITTED. + * @return The isolation level of this query. If no isolation level was set for this query object, + * then it returns READ_COMMITTED. * @return The IsolationLevel for this query */ public IsolationLevel getIsolationLevel() { byte[] attr = getAttribute(ISOLATION_LEVEL); - return attr == null ? IsolationLevel.READ_COMMITTED : - IsolationLevel.fromBytes(attr); - } - - /** - * Set the value indicating whether loading CFs on demand should be allowed (cluster - * default is false). On-demand CF loading doesn't load column families until necessary, e.g. - * if you filter on one column, the other column family data will be loaded only for the rows - * that are included in result, not all rows like in normal case. - * With column-specific filters, like SingleColumnValueFilter w/filterIfMissing == true, - * this can deliver huge perf gains when there's a cf with lots of data; however, it can - * also lead to some inconsistent results, as follows: - * - if someone does a concurrent update to both column families in question you may get a row - * that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } } - * someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan - * filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, - * { video => "my dog" } }. - * - if there's a concurrent split and you have more than 2 column families, some rows may be - * missing some column families. + return attr == null ? IsolationLevel.READ_COMMITTED : IsolationLevel.fromBytes(attr); + } + + /** + * Set the value indicating whether loading CFs on demand should be allowed (cluster default is + * false). On-demand CF loading doesn't load column families until necessary, e.g. if you filter + * on one column, the other column family data will be loaded only for the rows that are included + * in result, not all rows like in normal case. With column-specific filters, like + * SingleColumnValueFilter w/filterIfMissing == true, this can deliver huge perf gains when + * there's a cf with lots of data; however, it can also lead to some inconsistent results, as + * follows: - if someone does a concurrent update to both column families in question you may get + * a row that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" + * } } someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent + * scan filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, { video => + * "my dog" } }. - if there's a concurrent split and you have more than 2 column families, some + * rows may be missing some column families. */ public Query setLoadColumnFamiliesOnDemand(boolean value) { this.loadColumnFamiliesOnDemand = value; @@ -212,18 +205,15 @@ public Boolean getLoadColumnFamiliesOnDemandValue() { * Get the logical value indicating whether on-demand CF loading should be allowed. */ public boolean doLoadColumnFamiliesOnDemand() { - return (this.loadColumnFamiliesOnDemand != null) - && this.loadColumnFamiliesOnDemand; + return (this.loadColumnFamiliesOnDemand != null) && this.loadColumnFamiliesOnDemand; } /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp) on a per CF bases. Note, default maximum versions to return is 1. If - * your time range spans more than one version and you want all versions - * returned, up the number of versions beyond the default. + * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp) on a + * per CF bases. Note, default maximum versions to return is 1. If your time range spans more than + * one version and you want all versions returned, up the number of versions beyond the default. * Column Family time ranges take precedence over the global time range. - * - * @param cf the column family for which you want to restrict + * @param cf the column family for which you want to restrict * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive * @return this diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 572eb0960ea1..ed35b5434d01 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -382,8 +382,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { this.pauseNs = builder.pauseNs; if (builder.pauseForCQTBENs < builder.pauseNs) { LOG.warn( - "Configured value of pauseForCQTBENs is {} ms, which is less than" + - " the normal pause value {} ms, use the greater one instead", + "Configured value of pauseForCQTBENs is {} ms, which is less than" + + " the normal pause value {} ms, use the greater one instead", TimeUnit.NANOSECONDS.toMillis(builder.pauseForCQTBENs), TimeUnit.NANOSECONDS.toMillis(builder.pauseNs)); this.pauseForCQTBENs = builder.pauseNs; @@ -397,18 +397,18 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { MasterRequestCallerBuilder newMasterCaller() { return this.connection.callerFactory. masterRequest() - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) - .pause(pauseNs, TimeUnit.NANOSECONDS).pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt); + .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) + .pause(pauseNs, TimeUnit.NANOSECONDS).pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS) + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt); } private AdminRequestCallerBuilder newAdminCaller() { return this.connection.callerFactory. adminRequest() - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) - .pause(pauseNs, TimeUnit.NANOSECONDS).pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt); + .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) + .pause(pauseNs, TimeUnit.NANOSECONDS).pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS) + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt); } @FunctionalInterface @@ -513,8 +513,8 @@ public CompletableFuture tableExists(TableName tableName) { @Override public CompletableFuture> listTableDescriptors(boolean includeSysTables) { - return getTableDescriptors(RequestConverter.buildGetTableDescriptorsRequest(null, - includeSysTables)); + return getTableDescriptors( + RequestConverter.buildGetTableDescriptorsRequest(null, includeSysTables)); } /** @@ -523,18 +523,16 @@ public CompletableFuture> listTableDescriptors(boolean inc @Override public CompletableFuture> listTableDescriptors(Pattern pattern, boolean includeSysTables) { - Preconditions.checkNotNull(pattern, - "pattern is null. If you don't specify a pattern, " - + "use listTableDescriptors(boolean) instead"); - return getTableDescriptors(RequestConverter.buildGetTableDescriptorsRequest(pattern, - includeSysTables)); + Preconditions.checkNotNull(pattern, "pattern is null. If you don't specify a pattern, " + + "use listTableDescriptors(boolean) instead"); + return getTableDescriptors( + RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables)); } @Override public CompletableFuture> listTableDescriptors(List tableNames) { - Preconditions.checkNotNull(tableNames, - "tableNames is null. If you don't specify tableNames, " - + "use listTableDescriptors(boolean) instead"); + Preconditions.checkNotNull(tableNames, "tableNames is null. If you don't specify tableNames, " + + "use listTableDescriptors(boolean) instead"); if (tableNames.isEmpty()) { return CompletableFuture.completedFuture(Collections.emptyList()); } @@ -557,28 +555,26 @@ public CompletableFuture> listTableNames(boolean includeSysTable } @Override - public CompletableFuture> - listTableNames(Pattern pattern, boolean includeSysTables) { + public CompletableFuture> listTableNames(Pattern pattern, + boolean includeSysTables) { Preconditions.checkNotNull(pattern, - "pattern is null. If you don't specify a pattern, use listTableNames(boolean) instead"); + "pattern is null. If you don't specify a pattern, use listTableNames(boolean) instead"); return getTableNames(RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables)); } private CompletableFuture> getTableNames(GetTableNamesRequest request) { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this - .> call(controller, - stub, request, (s, c, req, done) -> s.getTableNames(c, req, done), - (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))).call(); + return this.> newMasterCaller() + .action((controller, stub) -> this + .> call(controller, stub, + request, (s, c, req, done) -> s.getTableNames(c, req, done), + (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))) + .call(); } @Override public CompletableFuture> listTableDescriptorsByNamespace(String name) { return this.> newMasterCaller().action((controller, stub) -> this - .> call( + .> call( controller, stub, ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name).build(), (s, c, req, done) -> s.listTableDescriptorsByNamespace(c, req, done), @@ -589,8 +585,7 @@ List> call( @Override public CompletableFuture> listTableNamesByNamespace(String name) { return this.> newMasterCaller().action((controller, stub) -> this - .> call( + .> call( controller, stub, ListTableNamesByNamespaceRequest.newBuilder().setNamespaceName(name).build(), (s, c, req, done) -> s.listTableNamesByNamespace(c, req, done), @@ -602,12 +597,13 @@ List> call( public CompletableFuture getDescriptor(TableName tableName) { CompletableFuture future = new CompletableFuture<>(); addListener(this.> newMasterCaller().priority(tableName) - .action((controller, stub) -> this - .> call( - controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), - (s, c, req, done) -> s.getTableDescriptors(c, req, done), - (resp) -> resp.getTableSchemaList())) - .call(), (tableSchemas, error) -> { + .action((controller, stub) -> this + .> call( + controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), + (s, c, req, done) -> s.getTableDescriptors(c, req, done), + (resp) -> resp.getTableSchemaList())) + .call(), + (tableSchemas, error) -> { if (error != null) { future.completeExceptionally(error); return; @@ -661,20 +657,21 @@ private CompletableFuture createTable(TableName tableName, CreateTableRequ public CompletableFuture modifyTable(TableDescriptor desc) { return this. procedureCall(desc.getTableName(), RequestConverter.buildModifyTableRequest(desc.getTableName(), desc, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.modifyTable(c, req, done), - (resp) -> resp.getProcId(), new ModifyTableProcedureBiConsumer(this, desc.getTableName())); + ng.newNonce()), + (s, c, req, done) -> s.modifyTable(c, req, done), (resp) -> resp.getProcId(), + new ModifyTableProcedureBiConsumer(this, desc.getTableName())); } @Override public CompletableFuture modifyTableStoreFileTracker(TableName tableName, String dstSFT) { return this - . procedureCall( - tableName, - RequestConverter.buildModifyTableStoreFileTrackerRequest(tableName, dstSFT, - ng.getNonceGroup(), ng.newNonce()), - (s, c, req, done) -> s.modifyTableStoreFileTracker(c, req, done), - (resp) -> resp.getProcId(), - new ModifyTableStoreFileTrackerProcedureBiConsumer(this, tableName)); + . procedureCall( + tableName, + RequestConverter.buildModifyTableStoreFileTrackerRequest(tableName, dstSFT, + ng.getNonceGroup(), ng.newNonce()), + (s, c, req, done) -> s.modifyTableStoreFileTracker(c, req, done), + (resp) -> resp.getProcId(), + new ModifyTableStoreFileTrackerProcedureBiConsumer(this, tableName)); } @Override @@ -689,8 +686,9 @@ public CompletableFuture deleteTable(TableName tableName) { public CompletableFuture truncateTable(TableName tableName, boolean preserveSplits) { return this. procedureCall(tableName, RequestConverter.buildTruncateTableRequest(tableName, preserveSplits, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.truncateTable(c, req, done), - (resp) -> resp.getProcId(), new TruncateTableProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.truncateTable(c, req, done), (resp) -> resp.getProcId(), + new TruncateTableProcedureBiConsumer(tableName)); } @Override @@ -710,9 +708,9 @@ public CompletableFuture disableTable(TableName tableName) { } /** - * Utility for completing passed TableState {@link CompletableFuture} future - * using passed parameters. Sets error or boolean result ('true' if table matches - * the passed-in targetState). + * Utility for completing passed TableState {@link CompletableFuture} future using + * passed parameters. Sets error or boolean result ('true' if table matches the passed-in + * targetState). */ private static CompletableFuture completeCheckTableState( CompletableFuture future, TableState tableState, Throwable error, @@ -760,8 +758,9 @@ public CompletableFuture isTableDisabled(TableName tableName) { @Override public CompletableFuture isTableAvailable(TableName tableName) { if (TableName.isMetaTableName(tableName)) { - return connection.registry.getMetaRegionLocations().thenApply(locs -> Stream - .of(locs.getRegionLocations()).allMatch(loc -> loc != null && loc.getServerName() != null)); + return connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .allMatch(loc -> loc != null && loc.getServerName() != null)); } CompletableFuture future = new CompletableFuture<>(); addListener(isTableEnabled(tableName), (enabled, error) -> { @@ -776,15 +775,14 @@ public CompletableFuture isTableAvailable(TableName tableName) { if (!enabled) { future.complete(false); } else { - addListener( - ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName), + addListener(ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName), (locations, error1) -> { if (error1 != null) { future.completeExceptionally(error1); return; } List notDeployedRegions = locations.stream() - .filter(loc -> loc.getServerName() == null).collect(Collectors.toList()); + .filter(loc -> loc.getServerName() == null).collect(Collectors.toList()); if (notDeployedRegions.size() > 0) { if (LOG.isDebugEnabled()) { LOG.debug("Table " + tableName + " has " + notDeployedRegions.size() + " regions"); @@ -800,11 +798,12 @@ public CompletableFuture isTableAvailable(TableName tableName) { } @Override - public CompletableFuture addColumnFamily( - TableName tableName, ColumnFamilyDescriptor columnFamily) { + public CompletableFuture addColumnFamily(TableName tableName, + ColumnFamilyDescriptor columnFamily) { return this. procedureCall(tableName, RequestConverter.buildAddColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(), + ng.newNonce()), + (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(), new AddColumnFamilyProcedureBiConsumer(tableName)); } @@ -812,8 +811,9 @@ public CompletableFuture addColumnFamily( public CompletableFuture deleteColumnFamily(TableName tableName, byte[] columnFamily) { return this. procedureCall(tableName, RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.deleteColumn(c, req, done), - (resp) -> resp.getProcId(), new DeleteColumnFamilyProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.deleteColumn(c, req, done), (resp) -> resp.getProcId(), + new DeleteColumnFamilyProcedureBiConsumer(tableName)); } @Override @@ -821,21 +821,22 @@ public CompletableFuture modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) { return this. procedureCall(tableName, RequestConverter.buildModifyColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.modifyColumn(c, req, done), - (resp) -> resp.getProcId(), new ModifyColumnFamilyProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.modifyColumn(c, req, done), (resp) -> resp.getProcId(), + new ModifyColumnFamilyProcedureBiConsumer(tableName)); } @Override public CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, - byte[] family, String dstSFT) { + byte[] family, String dstSFT) { return this - . procedureCall( - tableName, - RequestConverter.buildModifyColumnStoreFileTrackerRequest(tableName, family, dstSFT, - ng.getNonceGroup(), ng.newNonce()), - (s, c, req, done) -> s.modifyColumnStoreFileTracker(c, req, done), - (resp) -> resp.getProcId(), - new ModifyColumnFamilyStoreFileTrackerProcedureBiConsumer(tableName)); + . procedureCall( + tableName, + RequestConverter.buildModifyColumnStoreFileTrackerRequest(tableName, family, dstSFT, + ng.getNonceGroup(), ng.newNonce()), + (s, c, req, done) -> s.modifyColumnStoreFileTracker(c, req, done), + (resp) -> resp.getProcId(), + new ModifyColumnFamilyStoreFileTrackerProcedureBiConsumer(tableName)); } @Override @@ -864,37 +865,31 @@ public CompletableFuture deleteNamespace(String name) { @Override public CompletableFuture getNamespaceDescriptor(String name) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . - call(controller, stub, RequestConverter.buildGetNamespaceDescriptorRequest(name), - (s, c, req, done) -> s.getNamespaceDescriptor(c, req, done), (resp) - -> ProtobufUtil.toNamespaceDescriptor(resp.getNamespaceDescriptor()))).call(); + return this. newMasterCaller().action((controller, stub) -> this + . call( + controller, stub, RequestConverter.buildGetNamespaceDescriptorRequest(name), + (s, c, req, done) -> s.getNamespaceDescriptor(c, req, done), + (resp) -> ProtobufUtil.toNamespaceDescriptor(resp.getNamespaceDescriptor()))) + .call(); } @Override public CompletableFuture> listNamespaces() { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this - .> call( - controller, stub, ListNamespacesRequest.newBuilder().build(), (s, c, req, - done) -> s.listNamespaces(c, req, done), - (resp) -> resp.getNamespaceNameList())).call(); + return this.> newMasterCaller().action( + (controller, stub) -> this.> call( + controller, stub, ListNamespacesRequest.newBuilder().build(), + (s, c, req, done) -> s.listNamespaces(c, req, done), (resp) -> resp.getNamespaceNameList())) + .call(); } @Override public CompletableFuture> listNamespaceDescriptors() { - return this - .> newMasterCaller().action((controller, stub) -> this - .> call(controller, stub, - ListNamespaceDescriptorsRequest.newBuilder().build(), (s, c, req, done) -> - s.listNamespaceDescriptors(c, req, done), - (resp) -> ProtobufUtil.toNamespaceDescriptorList(resp))).call(); + return this.> newMasterCaller().action((controller, stub) -> this + .> call( + controller, stub, ListNamespaceDescriptorsRequest.newBuilder().build(), + (s, c, req, done) -> s.listNamespaceDescriptors(c, req, done), + (resp) -> ProtobufUtil.toNamespaceDescriptorList(resp))) + .call(); } @Override @@ -912,14 +907,14 @@ public CompletableFuture> getRegions(ServerName serverName) { public CompletableFuture> getRegions(TableName tableName) { if (tableName.equals(META_TABLE_NAME)) { return connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion) - .collect(Collectors.toList())); + .thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion) + .collect(Collectors.toList())); } else { - return ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName) - .thenApply( - locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList())); + return ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).thenApply( + locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList())); } } + @Override public CompletableFuture flush(TableName tableName) { return flush(tableName, null); @@ -944,8 +939,9 @@ public CompletableFuture flush(TableName tableName, byte[] columnFamily) { if (columnFamily != null) { props.put(HConstants.FAMILY_KEY_STR, Bytes.toString(columnFamily)); } - addListener(execProcedure(FLUSH_TABLE_PROCEDURE_SIGNATURE, tableName.getNameAsString(), - props), (ret, err3) -> { + addListener( + execProcedure(FLUSH_TABLE_PROCEDURE_SIGNATURE, tableName.getNameAsString(), props), + (ret, err3) -> { if (err3 != null) { future.completeExceptionally(err3); } else { @@ -968,9 +964,9 @@ public CompletableFuture flushRegion(byte[] regionName) { @Override public CompletableFuture flushRegion(byte[] regionName, byte[] columnFamily) { Preconditions.checkNotNull(columnFamily, "columnFamily is null." - + "If you don't specify a columnFamily, use flushRegion(regionName) instead"); - return flushRegionInternal(regionName, columnFamily, false) - .thenAccept(r -> {}); + + "If you don't specify a columnFamily, use flushRegion(regionName) instead"); + return flushRegionInternal(regionName, columnFamily, false).thenAccept(r -> { + }); } /** @@ -979,8 +975,8 @@ public CompletableFuture flushRegion(byte[] regionName, byte[] columnFamil * As it exposes the protobuf message, please do NOT try to expose it as a public * API. */ - CompletableFuture flushRegionInternal(byte[] regionName, - byte[] columnFamily, boolean writeFlushWALMarker) { + CompletableFuture flushRegionInternal(byte[] regionName, byte[] columnFamily, + boolean writeFlushWALMarker) { CompletableFuture future = new CompletableFuture<>(); addListener(getRegionLocation(regionName), (location, err) -> { if (err != null) { @@ -989,31 +985,32 @@ CompletableFuture flushRegionInternal(byte[] regionName, } ServerName serverName = location.getServerName(); if (serverName == null) { - future - .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); + future.completeExceptionally( + new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } - addListener( - flush(serverName, location.getRegion(), columnFamily, writeFlushWALMarker), + addListener(flush(serverName, location.getRegion(), columnFamily, writeFlushWALMarker), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(ret); - }}); + } + }); }); return future; } private CompletableFuture flush(ServerName serverName, RegionInfo regionInfo, - byte[] columnFamily, boolean writeFlushWALMarker) { + byte[] columnFamily, boolean writeFlushWALMarker) { return this. newAdminCaller().serverName(serverName) - .action((controller, stub) -> this - . adminCall(controller, stub, - RequestConverter.buildFlushRegionRequest(regionInfo.getRegionName(), - columnFamily, writeFlushWALMarker), - (s, c, req, done) -> s.flushRegion(c, req, done), resp -> resp)) - .call(); + .action((controller, stub) -> this + . adminCall(controller, + stub, + RequestConverter.buildFlushRegionRequest(regionInfo.getRegionName(), columnFamily, + writeFlushWALMarker), + (s, c, req, done) -> s.flushRegion(c, req, done), resp -> resp)) + .call(); } @Override @@ -1026,11 +1023,9 @@ public CompletableFuture flushRegionServer(ServerName sn) { } List> compactFutures = new ArrayList<>(); if (hRegionInfos != null) { - hRegionInfos.forEach( - region -> compactFutures.add( - flush(sn, region, null, false).thenAccept(r -> {}) - ) - ); + hRegionInfos + .forEach(region -> compactFutures.add(flush(sn, region, null, false).thenAccept(r -> { + }))); } addListener(CompletableFuture.allOf( compactFutures.toArray(new CompletableFuture[compactFutures.size()])), (ret, err2) -> { @@ -1137,8 +1132,8 @@ private CompletableFuture compactRegion(byte[] regionName, byte[] columnFa } ServerName serverName = location.getServerName(); if (serverName == null) { - future - .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); + future.completeExceptionally( + new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } addListener(compact(location.getServerName(), location.getRegion(), major, columnFamily), @@ -1162,12 +1157,12 @@ private CompletableFuture> getTableHRegionLocations(TableN addListener(connection.registry.getMetaRegionLocations(), (metaRegions, err) -> { if (err != null) { future.completeExceptionally(err); - } else if (metaRegions == null || metaRegions.isEmpty() || - metaRegions.getDefaultRegionLocation() == null) { - future.completeExceptionally(new IOException("meta region does not found")); - } else { - future.complete(Collections.singletonList(metaRegions.getDefaultRegionLocation())); - } + } else if (metaRegions == null || metaRegions.isEmpty() + || metaRegions.getDefaultRegionLocation() == null) { + future.completeExceptionally(new IOException("meta region does not found")); + } else { + future.complete(Collections.singletonList(metaRegions.getDefaultRegionLocation())); + } }); return future; } else { @@ -1210,10 +1205,10 @@ private CompletableFuture compact(TableName tableName, byte[] columnFamily future.completeExceptionally(new TableNotFoundException(tableName)); } CompletableFuture[] compactFutures = - locations.stream().filter(l -> l.getRegion() != null) - .filter(l -> !l.getRegion().isOffline()).filter(l -> l.getServerName() != null) - .map(l -> compact(l.getServerName(), l.getRegion(), major, columnFamily)) - .toArray(CompletableFuture[]::new); + locations.stream().filter(l -> l.getRegion() != null) + .filter(l -> !l.getRegion().isOffline()).filter(l -> l.getServerName() != null) + .map(l -> compact(l.getServerName(), l.getRegion(), major, columnFamily)) + .toArray(CompletableFuture[]::new); // future complete unless all of the compact futures are completed. addListener(CompletableFuture.allOf(compactFutures), (ret, err2) -> { if (err2 != null) { @@ -1235,19 +1230,19 @@ private CompletableFuture compact(TableName tableName, byte[] columnFamily */ private CompletableFuture compact(final ServerName sn, final RegionInfo hri, final boolean major, byte[] columnFamily) { - return this - . newAdminCaller() - .serverName(sn) + return this. newAdminCaller().serverName(sn) .action( - (controller, stub) -> this. adminCall( - controller, stub, RequestConverter.buildCompactRegionRequest(hri.getRegionName(), - major, columnFamily), (s, c, req, done) -> s.compactRegion(c, req, done), - resp -> null)).call(); + (controller, stub) -> this + . adminCall(controller, stub, + RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, + columnFamily), + (s, c, req, done) -> s.compactRegion(c, req, done), resp -> null)) + .call(); } private byte[] toEncodeRegionName(byte[] regionName) { - return RegionInfo.isEncodedRegionName(regionName) ? regionName : - Bytes.toBytes(RegionInfo.encodeRegionName(regionName)); + return RegionInfo.isEncodedRegionName(regionName) ? regionName + : Bytes.toBytes(RegionInfo.encodeRegionName(regionName)); } private void checkAndGetTableName(byte[] encodeRegionName, AtomicReference tableName, @@ -1267,8 +1262,8 @@ private void checkAndGetTableName(byte[] encodeRegionName, AtomicReference

    isSplitEnabled() { private CompletableFuture setSplitOrMergeOn(boolean enabled, boolean synchronous, MasterSwitchType switchType) { SetSplitOrMergeEnabledRequest request = - RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchType); - return this. newMasterCaller() - .action((controller, stub) -> this + RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchType); + return this. newMasterCaller().action((controller, stub) -> this . call(controller, stub, request, (s, c, req, done) -> s.setSplitOrMergeEnabled(c, req, done), (resp) -> resp.getPrevValueList().get(0))) - .call(); + .call(); } private CompletableFuture isSplitOrMergeOn(MasterSwitchType switchType) { IsSplitOrMergeEnabledRequest request = RequestConverter.buildIsSplitOrMergeEnabledRequest(switchType); - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . call( - controller, stub, request, - (s, c, req, done) -> s.isSplitOrMergeEnabled(c, req, done), - (resp) -> resp.getEnabled())).call(); + return this. newMasterCaller() + .action((controller, stub) -> this + . call(controller, + stub, request, (s, c, req, done) -> s.isSplitOrMergeEnabled(c, req, done), + (resp) -> resp.getEnabled())) + .call(); } @Override public CompletableFuture mergeRegions(List nameOfRegionsToMerge, boolean forcible) { if (nameOfRegionsToMerge.size() < 2) { return failedFuture(new IllegalArgumentException( - "Can not merge only " + nameOfRegionsToMerge.size() + " region")); + "Can not merge only " + nameOfRegionsToMerge.size() + " region")); } CompletableFuture future = new CompletableFuture<>(); byte[][] encodedNameOfRegionsToMerge = - nameOfRegionsToMerge.stream().map(this::toEncodeRegionName).toArray(byte[][]::new); + nameOfRegionsToMerge.stream().map(this::toEncodeRegionName).toArray(byte[][]::new); addListener(checkRegionsAndGetTableName(encodedNameOfRegionsToMerge), (tableName, err) -> { if (err != null) { @@ -1356,9 +1348,8 @@ public CompletableFuture mergeRegions(List nameOfRegionsToMerge, b } addListener( - this.procedureCall(tableName, request, - MasterService.Interface::mergeTableRegions, MergeTableRegionsResponse::getProcId, - new MergeTableRegionProcedureBiConsumer(tableName)), + this.procedureCall(tableName, request, MasterService.Interface::mergeTableRegions, + MergeTableRegionsResponse::getProcId, new MergeTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1383,11 +1374,11 @@ public CompletableFuture split(TableName tableName) { return; } addListener(metaTable - .scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY) - .withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(tableName, - ClientMetaTableAccessor.QueryType.REGION)) - .withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(tableName, - ClientMetaTableAccessor.QueryType.REGION))), + .scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY) + .withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(tableName, + ClientMetaTableAccessor.QueryType.REGION)) + .withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(tableName, + ClientMetaTableAccessor.QueryType.REGION))), (results, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1404,8 +1395,8 @@ public CompletableFuture split(TableName tableName) { for (HRegionLocation h : rl.getRegionLocations()) { if (h != null && h.getServerName() != null) { RegionInfo hri = h.getRegion(); - if (hri == null || hri.isSplitParent() || - hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { + if (hri == null || hri.isSplitParent() + || hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { continue; } splitFutures.add(split(hri, null)); @@ -1415,7 +1406,7 @@ public CompletableFuture split(TableName tableName) { } addListener( CompletableFuture - .allOf(splitFutures.toArray(new CompletableFuture[splitFutures.size()])), + .allOf(splitFutures.toArray(new CompletableFuture[splitFutures.size()])), (ret, exception) -> { if (exception != null) { future.completeExceptionally(exception); @@ -1443,7 +1434,7 @@ public CompletableFuture split(TableName tableName, byte[] splitPoint) { result.completeExceptionally(err); } else if (loc == null || loc.getRegion() == null) { result.completeExceptionally(new IllegalArgumentException( - "Region does not found: rowKey=" + Bytes.toStringBinary(splitPoint))); + "Region does not found: rowKey=" + Bytes.toStringBinary(splitPoint))); } else { addListener(splitRegion(loc.getRegion().getRegionName(), splitPoint), (ret, err2) -> { if (err2 != null) { @@ -1468,15 +1459,14 @@ public CompletableFuture splitRegion(byte[] regionName) { } RegionInfo regionInfo = location.getRegion(); if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - future - .completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + - "Replicas are auto-split when their primary is split.")); + future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + + "Replicas are auto-split when their primary is split.")); return; } ServerName serverName = location.getServerName(); if (serverName == null) { - future - .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); + future.completeExceptionally( + new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } addListener(split(regionInfo, null), (ret, err2) -> { @@ -1502,19 +1492,18 @@ public CompletableFuture splitRegion(byte[] regionName, byte[] splitPoint) } RegionInfo regionInfo = location.getRegion(); if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - future - .completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + - "Replicas are auto-split when their primary is split.")); + future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + + "Replicas are auto-split when their primary is split.")); return; } ServerName serverName = location.getServerName(); if (serverName == null) { - future - .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); + future.completeExceptionally( + new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } - if (regionInfo.getStartKey() != null && - Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0) { + if (regionInfo.getStartKey() != null + && Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0) { future.completeExceptionally( new IllegalArgumentException("should not give a splitkey which equals to startkey!")); return; @@ -1543,9 +1532,8 @@ private CompletableFuture split(final RegionInfo hri, byte[] splitPoint) { } addListener( - this.procedureCall(tableName, - request, MasterService.Interface::splitRegion, SplitTableRegionResponse::getProcId, - new SplitTableRegionProcedureBiConsumer(tableName)), + this.procedureCall(tableName, request, MasterService.Interface::splitRegion, + SplitTableRegionResponse::getProcId, new SplitTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1564,11 +1552,14 @@ public CompletableFuture assign(byte[] regionName) { future.completeExceptionally(err); return; } - addListener(this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this. call( - controller, stub, RequestConverter.buildAssignRegionRequest(regionInfo.getRegionName()), - (s, c, req, done) -> s.assignRegion(c, req, done), resp -> null))) - .call(), (ret, err2) -> { + addListener( + this. newMasterCaller().priority(regionInfo.getTable()) + .action(((controller, stub) -> this + . call(controller, stub, + RequestConverter.buildAssignRegionRequest(regionInfo.getRegionName()), + (s, c, req, done) -> s.assignRegion(c, req, done), resp -> null))) + .call(), + (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { @@ -1589,11 +1580,11 @@ public CompletableFuture unassign(byte[] regionName) { } addListener( this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this - . call(controller, stub, - RequestConverter.buildUnassignRegionRequest(regionInfo.getRegionName()), - (s, c, req, done) -> s.unassignRegion(c, req, done), resp -> null))) - .call(), + .action(((controller, stub) -> this + . call(controller, stub, + RequestConverter.buildUnassignRegionRequest(regionInfo.getRegionName()), + (s, c, req, done) -> s.unassignRegion(c, req, done), resp -> null))) + .call(), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1615,11 +1606,11 @@ public CompletableFuture offline(byte[] regionName) { } addListener( this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this - . call(controller, stub, - RequestConverter.buildOfflineRegionRequest(regionInfo.getRegionName()), - (s, c, req, done) -> s.offlineRegion(c, req, done), resp -> null))) - .call(), + .action(((controller, stub) -> this + . call(controller, stub, + RequestConverter.buildOfflineRegionRequest(regionInfo.getRegionName()), + (s, c, req, done) -> s.offlineRegion(c, req, done), resp -> null))) + .call(), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1665,7 +1656,7 @@ public CompletableFuture move(byte[] regionName, ServerName destServerName } addListener( moveRegion(regionInfo, RequestConverter - .buildMoveRegionRequest(regionInfo.getEncodedNameAsBytes(), destServerName)), + .buildMoveRegionRequest(regionInfo.getEncodedNameAsBytes(), destServerName)), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1679,58 +1670,57 @@ public CompletableFuture move(byte[] regionName, ServerName destServerName private CompletableFuture moveRegion(RegionInfo regionInfo, MoveRegionRequest request) { return this. newMasterCaller().priority(regionInfo.getTable()) - .action( - (controller, stub) -> this. call(controller, - stub, request, (s, c, req, done) -> s.moveRegion(c, req, done), resp -> null)) - .call(); + .action( + (controller, stub) -> this. call(controller, + stub, request, (s, c, req, done) -> s.moveRegion(c, req, done), resp -> null)) + .call(); } @Override public CompletableFuture setQuota(QuotaSettings quota) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call(controller, - stub, QuotaSettings.buildSetQuotaRequestProto(quota), - (s, c, req, done) -> s.setQuota(c, req, done), (resp) -> null)).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, QuotaSettings.buildSetQuotaRequestProto(quota), + (s, c, req, done) -> s.setQuota(c, req, done), (resp) -> null)) + .call(); } @Override public CompletableFuture> getQuota(QuotaFilter filter) { CompletableFuture> future = new CompletableFuture<>(); Scan scan = QuotaTableUtil.makeScan(filter); - this.connection.getTableBuilder(QuotaTableUtil.QUOTA_TABLE_NAME).build() - .scan(scan, new AdvancedScanResultConsumer() { - List settings = new ArrayList<>(); + this.connection.getTableBuilder(QuotaTableUtil.QUOTA_TABLE_NAME).build().scan(scan, + new AdvancedScanResultConsumer() { + List settings = new ArrayList<>(); - @Override - public void onNext(Result[] results, ScanController controller) { - for (Result result : results) { - try { - QuotaTableUtil.parseResultToCollection(result, settings); - } catch (IOException e) { - controller.terminate(); - future.completeExceptionally(e); - } + @Override + public void onNext(Result[] results, ScanController controller) { + for (Result result : results) { + try { + QuotaTableUtil.parseResultToCollection(result, settings); + } catch (IOException e) { + controller.terminate(); + future.completeExceptionally(e); } } + } - @Override - public void onError(Throwable error) { - future.completeExceptionally(error); - } + @Override + public void onError(Throwable error) { + future.completeExceptionally(error); + } - @Override - public void onComplete() { - future.complete(settings); - } - }); + @Override + public void onComplete() { + future.complete(settings); + } + }); return future; } @Override - public CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled) { + public CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled) { return this. procedureCall( RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled), (s, c, req, done) -> s.addReplicationPeer(c, req, done), (resp) -> resp.getProcId(), @@ -1764,33 +1754,34 @@ public CompletableFuture disableReplicationPeer(String peerId) { @Override public CompletableFuture getReplicationPeerConfig(String peerId) { return this. newMasterCaller().action((controller, stub) -> this - . - call(controller, stub, RequestConverter.buildGetReplicationPeerConfigRequest(peerId), - (s, c, req, done) -> s.getReplicationPeerConfig(c, req, done), - (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))).call(); + . call( + controller, stub, RequestConverter.buildGetReplicationPeerConfigRequest(peerId), + (s, c, req, done) -> s.getReplicationPeerConfig(c, req, done), + (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))) + .call(); } @Override public CompletableFuture updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) { return this - . procedureCall( - RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig), - (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done), - (resp) -> resp.getProcId(), - new ReplicationProcedureBiConsumer(peerId, () -> "UPDATE_REPLICATION_PEER_CONFIG")); + . procedureCall( + RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig), + (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done), + (resp) -> resp.getProcId(), + new ReplicationProcedureBiConsumer(peerId, () -> "UPDATE_REPLICATION_PEER_CONFIG")); } @Override public CompletableFuture transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState clusterState) { - return this. procedureCall( - RequestConverter.buildTransitReplicationPeerSyncReplicationStateRequest(peerId, - clusterState), + return this + . procedureCall( + RequestConverter.buildTransitReplicationPeerSyncReplicationStateRequest(peerId, + clusterState), (s, c, req, done) -> s.transitReplicationPeerSyncReplicationState(c, req, done), (resp) -> resp.getProcId(), new ReplicationProcedureBiConsumer(peerId, - () -> "TRANSIT_REPLICATION_PEER_SYNCHRONOUS_REPLICATION_STATE")); + () -> "TRANSIT_REPLICATION_PEER_SYNCHRONOUS_REPLICATION_STATE")); } @Override @@ -1804,7 +1795,7 @@ public CompletableFuture appendReplicationPeerTableCFs(String id, addListener(getReplicationPeerConfig(id), (peerConfig, error) -> { if (!completeExceptionally(future, error)) { ReplicationPeerConfig newPeerConfig = - ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(tableCfs, peerConfig); + ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(tableCfs, peerConfig); addListener(updateReplicationPeerConfig(id, newPeerConfig), (result, err) -> { if (!completeExceptionally(future, error)) { future.complete(result); @@ -1828,7 +1819,7 @@ public CompletableFuture removeReplicationPeerTableCFs(String id, ReplicationPeerConfig newPeerConfig = null; try { newPeerConfig = ReplicationPeerConfigUtil - .removeTableCFsFromReplicationPeerConfig(tableCfs, peerConfig, id); + .removeTableCFsFromReplicationPeerConfig(tableCfs, peerConfig, id); } catch (ReplicationException e) { future.completeExceptionally(e); return; @@ -1855,17 +1846,16 @@ public CompletableFuture> listReplicationPeers( return listReplicationPeers(RequestConverter.buildListReplicationPeersRequest(pattern)); } - private CompletableFuture> listReplicationPeers( - ListReplicationPeersRequest request) { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this.> call(controller, stub, request, - (s, c, req, done) -> s.listReplicationPeers(c, req, done), - (resp) -> resp.getPeerDescList().stream() - .map(ReplicationPeerConfigUtil::toReplicationPeerDescription) - .collect(Collectors.toList()))).call(); + private CompletableFuture> + listReplicationPeers(ListReplicationPeersRequest request) { + return this.> newMasterCaller().action( + (controller, stub) -> this + .> call( + controller, stub, request, (s, c, req, done) -> s.listReplicationPeers(c, req, done), + (resp) -> resp.getPeerDescList().stream() + .map(ReplicationPeerConfigUtil::toReplicationPeerDescription) + .collect(Collectors.toList()))) + .call(); } @Override @@ -1877,10 +1867,10 @@ public CompletableFuture> listReplicatedTableCFs() { tables.forEach(table -> { Map cfs = new HashMap<>(); Stream.of(table.getColumnFamilies()) - .filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) - .forEach(column -> { - cfs.put(column.getNameAsString(), column.getScope()); - }); + .filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) + .forEach(column -> { + cfs.put(column.getNameAsString(), column.getScope()); + }); if (!cfs.isEmpty()) { replicatedTableCFs.add(new TableCFs(table.getTableName(), cfs)); } @@ -1894,21 +1884,22 @@ public CompletableFuture> listReplicatedTableCFs() { @Override public CompletableFuture snapshot(SnapshotDescription snapshotDesc) { SnapshotProtos.SnapshotDescription snapshot = - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc); + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc); try { ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); } catch (IllegalArgumentException e) { return failedFuture(e); } CompletableFuture future = new CompletableFuture<>(); - final SnapshotRequest request = - SnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(ng.getNonceGroup()) - .setNonce(ng.newNonce()).build(); - addListener(this. newMasterCaller() - .action((controller, stub) -> - this. call(controller, stub, - request, (s, c, req, done) -> s.snapshot(c, req, done), resp -> resp)) - .call(), (resp, err) -> { + final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot) + .setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()).build(); + addListener( + this. newMasterCaller() + .action((controller, stub) -> this + . call(controller, stub, request, + (s, c, req, done) -> s.snapshot(c, req, done), resp -> resp)) + .call(), + (resp, err) -> { if (err != null) { future.completeExceptionally(err); return; @@ -1921,8 +1912,8 @@ this. call(controller, stub // This is for keeping compatibility with old implementation. // If there is a procId field in the response, then the snapshot will be operated with a // SnapshotProcedure, otherwise the snapshot will be coordinated by zk. - private void waitSnapshotFinish(SnapshotDescription snapshot, - CompletableFuture future, SnapshotResponse resp) { + private void waitSnapshotFinish(SnapshotDescription snapshot, CompletableFuture future, + SnapshotResponse resp) { if (resp.hasProcId()) { getProcedureResult(resp.getProcId(), future, 0); addListener(future, new SnapshotProcedureBiConsumer(snapshot.getTableName())); @@ -1944,17 +1935,16 @@ public void run(Timeout timeout) throws Exception { future.complete(null); } else { // retry again after pauseTime. - long pauseTime = ConnectionUtils - .getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries); + long pauseTime = + ConnectionUtils.getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries); pauseTime = Math.min(pauseTime, maxPauseTime); - AsyncConnectionImpl.RETRY_TIMER - .newTimeout(this, pauseTime, TimeUnit.MILLISECONDS); + AsyncConnectionImpl.RETRY_TIMER.newTimeout(this, pauseTime, TimeUnit.MILLISECONDS); } }); } else { - future.completeExceptionally(new SnapshotCreationException( - "Snapshot '" + snapshot.getName() + "' wasn't completed in expectedTime:" - + expectedTimeout + " ms", snapshot)); + future.completeExceptionally( + new SnapshotCreationException("Snapshot '" + snapshot.getName() + + "' wasn't completed in expectedTime:" + expectedTimeout + " ms", snapshot)); } } }; @@ -1964,15 +1954,13 @@ public void run(Timeout timeout) throws Exception { @Override public CompletableFuture isSnapshotFinished(SnapshotDescription snapshot) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, - stub, - IsSnapshotDoneRequest.newBuilder() - .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), (s, c, - req, done) -> s.isSnapshotDone(c, req, done), resp -> resp.getDone())).call(); + return this. newMasterCaller() + .action((controller, stub) -> this + . call(controller, stub, + IsSnapshotDoneRequest.newBuilder() + .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), + (s, c, req, done) -> s.isSnapshotDone(c, req, done), resp -> resp.getDone())) + .call(); } @Override @@ -2003,7 +1991,7 @@ public CompletableFuture restoreSnapshot(String snapshotName, boolean take } if (tableName == null) { future.completeExceptionally(new RestoreSnapshotException( - "Unable to find the table name for snapshot=" + snapshotName)); + "Unable to find the table name for snapshot=" + snapshotName)); return; } final TableName finalTableName = tableName; @@ -2037,12 +2025,12 @@ private CompletableFuture restoreSnapshot(String snapshotName, TableName t CompletableFuture future = new CompletableFuture<>(); // Step.1 Take a snapshot of the current state String failSafeSnapshotSnapshotNameFormat = - this.connection.getConfiguration().get(HConstants.SNAPSHOT_RESTORE_FAILSAFE_NAME, - HConstants.DEFAULT_SNAPSHOT_RESTORE_FAILSAFE_NAME); + this.connection.getConfiguration().get(HConstants.SNAPSHOT_RESTORE_FAILSAFE_NAME, + HConstants.DEFAULT_SNAPSHOT_RESTORE_FAILSAFE_NAME); final String failSafeSnapshotSnapshotName = - failSafeSnapshotSnapshotNameFormat.replace("{snapshot.name}", snapshotName) - .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.')) - .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime())); + failSafeSnapshotSnapshotNameFormat.replace("{snapshot.name}", snapshotName) + .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.')) + .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime())); LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); addListener(snapshot(failSafeSnapshotSnapshotName, tableName), (ret, err) -> { if (err != null) { @@ -2053,16 +2041,14 @@ private CompletableFuture restoreSnapshot(String snapshotName, TableName t (void2, err2) -> { if (err2 != null) { // Step.3.a Something went wrong during the restore and try to rollback. - addListener( - internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, restoreAcl, - null), - (void3, err3) -> { + addListener(internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, + restoreAcl, null), (void3, err3) -> { if (err3 != null) { future.completeExceptionally(err3); } else { String msg = - "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" + - failSafeSnapshotSnapshotName + " succeeded."; + "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" + + failSafeSnapshotSnapshotName + " succeeded."; future.completeExceptionally(new RestoreSnapshotException(msg, err2)); } }); @@ -2118,23 +2104,24 @@ public CompletableFuture cloneSnapshot(String snapshotName, TableName tabl private CompletableFuture internalRestoreSnapshot(String snapshotName, TableName tableName, boolean restoreAcl, String customSFT) { SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder() - .setName(snapshotName).setTable(tableName.getNameAsString()).build(); + .setName(snapshotName).setTable(tableName.getNameAsString()).build(); try { ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); } catch (IllegalArgumentException e) { return failedFuture(e); } RestoreSnapshotRequest.Builder builder = - RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(ng.getNonceGroup()) - .setNonce(ng.newNonce()).setRestoreACL(restoreAcl); - if(customSFT != null){ + RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(ng.getNonceGroup()) + .setNonce(ng.newNonce()).setRestoreACL(restoreAcl); + if (customSFT != null) { builder.setCustomSFT(customSFT); } - return waitProcedureResult(this. newMasterCaller().action((controller, stub) -> this - . call(controller, stub, - builder.build(), - (s, c, req, done) -> s.restoreSnapshot(c, req, done), (resp) -> resp.getProcId())) - .call()); + return waitProcedureResult(this. newMasterCaller() + .action( + (controller, stub) -> this. call( + controller, stub, builder.build(), (s, c, req, done) -> s.restoreSnapshot(c, req, done), + (resp) -> resp.getProcId())) + .call()); } @Override @@ -2151,8 +2138,8 @@ public CompletableFuture> listSnapshots(Pattern patter private CompletableFuture> getCompletedSnapshots(Pattern pattern) { return this.> newMasterCaller().action((controller, stub) -> this - .> - call(controller, stub, GetCompletedSnapshotsRequest.newBuilder().build(), + .> call( + controller, stub, GetCompletedSnapshotsRequest.newBuilder().build(), (s, c, req, done) -> s.getCompletedSnapshots(c, req, done), resp -> ProtobufUtil.toSnapshotDescriptionList(resp, pattern))) .call(); @@ -2175,8 +2162,8 @@ public CompletableFuture> listTableSnapshots(Pattern t return getCompletedSnapshots(tableNamePattern, snapshotNamePattern); } - private CompletableFuture> getCompletedSnapshots( - Pattern tableNamePattern, Pattern snapshotNamePattern) { + private CompletableFuture> + getCompletedSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) { CompletableFuture> future = new CompletableFuture<>(); addListener(listTableNames(tableNamePattern, false), (tableNames, err) -> { if (err != null) { @@ -2197,8 +2184,8 @@ private CompletableFuture> getCompletedSnapshots( return; } future.complete(snapshotDescList.stream() - .filter(snap -> (snap != null && tableNames.contains(snap.getTableName()))) - .collect(Collectors.toList())); + .filter(snap -> (snap != null && tableNames.contains(snap.getTableName()))) + .collect(Collectors.toList())); }); }); return future; @@ -2257,7 +2244,8 @@ private CompletableFuture internalDeleteSnapshots(Pattern tableNamePattern return; } addListener(CompletableFuture.allOf(snapshotDescriptions.stream() - .map(this::internalDeleteSnapshot).toArray(CompletableFuture[]::new)), (v, e) -> { + .map(this::internalDeleteSnapshot).toArray(CompletableFuture[]::new)), + (v, e) -> { if (e != null) { future.completeExceptionally(e); } else { @@ -2269,15 +2257,13 @@ private CompletableFuture internalDeleteSnapshots(Pattern tableNamePattern } private CompletableFuture internalDeleteSnapshot(SnapshotDescription snapshot) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, - stub, - DeleteSnapshotRequest.newBuilder() - .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), (s, c, - req, done) -> s.deleteSnapshot(c, req, done), resp -> null)).call(); + return this. newMasterCaller() + .action((controller, stub) -> this + . call(controller, stub, + DeleteSnapshotRequest.newBuilder() + .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), + (s, c, req, done) -> s.deleteSnapshot(c, req, done), resp -> null)) + .call(); } @Override @@ -2285,12 +2271,13 @@ public CompletableFuture execProcedure(String signature, String instance, Map props) { CompletableFuture future = new CompletableFuture<>(); ProcedureDescription procDesc = - ProtobufUtil.buildProcedureDescription(signature, instance, props); + ProtobufUtil.buildProcedureDescription(signature, instance, props); addListener(this. newMasterCaller() - .action((controller, stub) -> this. call( - controller, stub, ExecProcedureRequest.newBuilder().setProcedure(procDesc).build(), - (s, c, req, done) -> s.execProcedure(c, req, done), resp -> resp.getExpectedTimeout())) - .call(), (expectedTimeout, err) -> { + .action((controller, stub) -> this. call( + controller, stub, ExecProcedureRequest.newBuilder().setProcedure(procDesc).build(), + (s, c, req, done) -> s.execProcedure(c, req, done), resp -> resp.getExpectedTimeout())) + .call(), + (expectedTimeout, err) -> { if (err != null) { future.completeExceptionally(err); return; @@ -2314,15 +2301,15 @@ public void run(Timeout timeout) throws Exception { } else { // retry again after pauseTime. long pauseTime = - ConnectionUtils.getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries); + ConnectionUtils.getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries); pauseTime = Math.min(pauseTime, maxPauseTime); AsyncConnectionImpl.RETRY_TIMER.newTimeout(this, pauseTime, TimeUnit.MICROSECONDS); } }); } else { - future.completeExceptionally(new IOException("Procedure '" + signature + " : " + - instance + "' wasn't completed in expectedTime:" + expectedTimeout + " ms")); + future.completeExceptionally(new IOException("Procedure '" + signature + " : " + + instance + "' wasn't completed in expectedTime:" + expectedTimeout + " ms")); } } }; @@ -2370,61 +2357,56 @@ public CompletableFuture abortProcedure(long procId, boolean mayInterru @Override public CompletableFuture getProcedures() { - return this - . newMasterCaller() + return this. newMasterCaller() .action( - (controller, stub) -> this - . call( - controller, stub, GetProceduresRequest.newBuilder().build(), - (s, c, req, done) -> s.getProcedures(c, req, done), - resp -> ProtobufUtil.toProcedureJson(resp.getProcedureList()))).call(); + (controller, stub) -> this. call( + controller, stub, GetProceduresRequest.newBuilder().build(), + (s, c, req, done) -> s.getProcedures(c, req, done), + resp -> ProtobufUtil.toProcedureJson(resp.getProcedureList()))) + .call(); } @Override public CompletableFuture getLocks() { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, stub, GetLocksRequest.newBuilder().build(), - (s, c, req, done) -> s.getLocks(c, req, done), - resp -> ProtobufUtil.toLockJson(resp.getLockList()))).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, GetLocksRequest.newBuilder().build(), + (s, c, req, done) -> s.getLocks(c, req, done), + resp -> ProtobufUtil.toLockJson(resp.getLockList()))) + .call(); } @Override - public CompletableFuture decommissionRegionServers( - List servers, boolean offload) { + public CompletableFuture decommissionRegionServers(List servers, + boolean offload) { return this. newMasterCaller() .action((controller, stub) -> this - . call( - controller, stub, + . call( + controller, stub, RequestConverter.buildDecommissionRegionServersRequest(servers, offload), - (s, c, req, done) -> s.decommissionRegionServers(c, req, done), resp -> null)) + (s, c, req, done) -> s.decommissionRegionServers(c, req, done), resp -> null)) .call(); } @Override public CompletableFuture> listDecommissionedRegionServers() { - return this.> newMasterCaller() - .action((controller, stub) -> this - .> call( - controller, stub, ListDecommissionedRegionServersRequest.newBuilder().build(), - (s, c, req, done) -> s.listDecommissionedRegionServers(c, req, done), - resp -> resp.getServerNameList().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList()))) + return this.> newMasterCaller().action((controller, stub) -> this + .> call( + controller, stub, ListDecommissionedRegionServersRequest.newBuilder().build(), + (s, c, req, done) -> s.listDecommissionedRegionServers(c, req, done), + resp -> resp.getServerNameList().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList()))) .call(); } @Override public CompletableFuture recommissionRegionServer(ServerName server, List encodedRegionNames) { - return this. newMasterCaller() - .action((controller, stub) -> - this. call( - controller, stub, RequestConverter.buildRecommissionRegionServerRequest( - server, encodedRegionNames), (s, c, req, done) -> s.recommissionRegionServer( - c, req, done), resp -> null)).call(); + return this. newMasterCaller().action((controller, stub) -> this + . call(controller, + stub, RequestConverter.buildRecommissionRegionServerRequest(server, encodedRegionNames), + (s, c, req, done) -> s.recommissionRegionServer(c, req, done), resp -> null)) + .call(); } /** @@ -2445,8 +2427,8 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { // old format encodedName, should be meta region future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); } else { future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, regionNameOrEncodedRegionName); @@ -2456,20 +2438,20 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR // it needs to throw out IllegalArgumentException in case tableName is passed in. RegionInfo regionInfo; try { - regionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName( - regionNameOrEncodedRegionName); + regionInfo = + CatalogFamilyFormat.parseRegionInfoFromRegionName(regionNameOrEncodedRegionName); } catch (IOException ioe) { return failedFuture(new IllegalArgumentException(ioe.getMessage())); } if (regionInfo.isMetaRegion()) { future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) - .findFirst()); + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) + .findFirst()); } else { future = - ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); + ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); } } @@ -2481,8 +2463,8 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR } if (!location.isPresent() || location.get().getRegion() == null) { returnedFuture.completeExceptionally( - new UnknownRegionException("Invalid region name or encoded region name: " + - Bytes.toStringBinary(regionNameOrEncodedRegionName))); + new UnknownRegionException("Invalid region name or encoded region name: " + + Bytes.toStringBinary(regionNameOrEncodedRegionName))); } else { returnedFuture.complete(location.get()); } @@ -2502,9 +2484,9 @@ private CompletableFuture getRegionInfo(byte[] regionNameOrEncodedRe } if (Bytes.equals(regionNameOrEncodedRegionName, - RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) || - Bytes.equals(regionNameOrEncodedRegionName, - RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) { + RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) + || Bytes.equals(regionNameOrEncodedRegionName, + RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) { return CompletableFuture.completedFuture(RegionInfoBuilder.FIRST_META_REGIONINFO); } @@ -2641,7 +2623,7 @@ String getOperationType() { } private static class ModifyTableStoreFileTrackerProcedureBiConsumer - extends TableProcedureBiConsumer { + extends TableProcedureBiConsumer { ModifyTableStoreFileTrackerProcedureBiConsumer(AsyncAdmin admin, TableName tableName) { super(tableName); @@ -2744,7 +2726,7 @@ String getOperationType() { } private static class ModifyColumnFamilyStoreFileTrackerProcedureBiConsumer - extends TableProcedureBiConsumer { + extends TableProcedureBiConsumer { ModifyColumnFamilyStoreFileTrackerProcedureBiConsumer(TableName tableName) { super(tableName); @@ -2827,7 +2809,6 @@ String getOperationType() { } } - private static class ReplicationProcedureBiConsumer extends ProcedureBiConsumer { private final String peerId; private final Supplier getOperation; @@ -2866,12 +2847,11 @@ private CompletableFuture waitProcedureResult(CompletableFuture proc private void getProcedureResult(long procId, CompletableFuture future, int retries) { addListener( - this. newMasterCaller() - .action((controller, stub) -> this + this. newMasterCaller().action((controller, stub) -> this . call( controller, stub, GetProcedureResultRequest.newBuilder().setProcId(procId).build(), (s, c, req, done) -> s.getProcedureResult(c, req, done), (resp) -> resp)) - .call(), + .call(), (response, error) -> { if (error != null) { LOG.warn("failed to get the procedure result procId={}", procId, @@ -2915,54 +2895,51 @@ public CompletableFuture getClusterMetrics() { @Override public CompletableFuture getClusterMetrics(EnumSet
    - * - * - * - * - * - * - * - * - * - * - * - * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * *
    ROW-KEYFAM/QUALDATADESC
    n.<namespace>q:s<global-quotas>
    n.<namespace>u:p<namespace-quota policy>
    n.<namespace>u:s<SpaceQuotaSnapshot>The size of all snapshots against tables in the namespace
    t.<table>q:s<global-quotas>
    t.<table>u:p<table-quota policy>
    t.<table>u:ss.<snapshot name><SpaceQuotaSnapshot>The size of a snapshot against a table
    u.<user>q:s<global-quotas>
    u.<user>q:s.<table><table-quotas>
    u.<user>q:s.<ns><namespace-quotas>
    ROW-KEYFAM/QUALDATADESC
    n.<namespace>q:s<global-quotas>
    n.<namespace>u:p<namespace-quota policy>
    n.<namespace>u:s<SpaceQuotaSnapshot>The size of all snapshots against tables in the namespace
    t.<table>q:s<global-quotas>
    t.<table>u:p<table-quota policy>
    t.<table>u:ss.<snapshot name><SpaceQuotaSnapshot>The size of a snapshot against a table
    u.<user>q:s<global-quotas>
    u.<user>q:s.<table><table-quotas>
    u.<user>q:s.<ns><namespace-quotas>
    */ @InterfaceAudience.Private @@ -116,8 +155,9 @@ public class QuotaTableUtil { */ public static final String QUOTA_REGION_SERVER_ROW_KEY = "all"; - /* ========================================================================= - * Quota "settings" helpers + /* + * ========================================================================= Quota "settings" + * helpers */ public static Quotas getTableQuota(final Connection connection, final TableName table) throws IOException { @@ -188,10 +228,10 @@ public static Get makeGetForUserQuotas(final String user, final Iterable namespaces) { Get get = new Get(getUserRowKey(user)); get.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - for (final TableName table: tables) { + for (final TableName table : tables) { get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserTable(table)); } - for (final String ns: namespaces) { + for (final String ns : namespaces) { get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserNamespace(ns)); } return get; @@ -219,9 +259,8 @@ public static Filter makeFilter(final QuotaFilter filter) { FilterList nsFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL); nsFilters.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); - nsFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator( - getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0))); + nsFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator( + getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0))); userFilters.addFilter(nsFilters); hasFilter = true; } @@ -229,9 +268,8 @@ public static Filter makeFilter(final QuotaFilter filter) { FilterList tableFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL); tableFilters.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); - tableFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator( - getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0))); + tableFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator( + getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0))); userFilters.addFilter(tableFilters); hasFilter = true; } @@ -263,12 +301,12 @@ public static Scan makeQuotaSnapshotScan() { /** * Fetches all {@link SpaceQuotaSnapshot} objects from the {@code hbase:quota} table. - * * @param conn The HBase connection * @return A map of table names and their computed snapshot. */ - public static Map getSnapshots(Connection conn) throws IOException { - Map snapshots = new HashMap<>(); + public static Map getSnapshots(Connection conn) + throws IOException { + Map snapshots = new HashMap<>(); try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); ResultScanner rs = quotaTable.getScanner(makeQuotaSnapshotScan())) { for (Result r : rs) { @@ -311,15 +349,14 @@ public static Get makeQuotaSnapshotGetForTable(TableName tn) { /** * Extracts the {@link SpaceViolationPolicy} and {@link TableName} from the provided - * {@link Result} and adds them to the given {@link Map}. If the result does not contain - * the expected information or the serialized policy in the value is invalid, this method - * will throw an {@link IllegalArgumentException}. - * + * {@link Result} and adds them to the given {@link Map}. If the result does not contain the + * expected information or the serialized policy in the value is invalid, this method will throw + * an {@link IllegalArgumentException}. * @param result A row from the quota table. * @param snapshots A map of snapshots to add the result of this method into. */ - public static void extractQuotaSnapshot( - Result result, Map snapshots) { + public static void extractQuotaSnapshot(Result result, + Map snapshots) { byte[] row = Objects.requireNonNull(result).getRow(); if (row == null || row.length == 0) { throw new IllegalArgumentException("Provided result had a null row"); @@ -330,8 +367,8 @@ public static void extractQuotaSnapshot( throw new IllegalArgumentException("Result did not contain the expected column " + QUOTA_POLICY_COLUMN + ", " + result.toString()); } - ByteString buffer = UnsafeByteOperations.unsafeWrap( - c.getValueArray(), c.getValueOffset(), c.getValueLength()); + ByteString buffer = + UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength()); try { QuotaProtos.SpaceQuotaSnapshot snapshot = QuotaProtos.SpaceQuotaSnapshot.parseFrom(buffer); snapshots.put(targetTableName, SpaceQuotaSnapshot.toSpaceQuotaSnapshot(snapshot)); @@ -342,27 +379,25 @@ public static void extractQuotaSnapshot( } public static interface UserQuotasVisitor { - void visitUserQuotas(final String userName, final Quotas quotas) - throws IOException; + void visitUserQuotas(final String userName, final Quotas quotas) throws IOException; + void visitUserQuotas(final String userName, final TableName table, final Quotas quotas) - throws IOException; + throws IOException; + void visitUserQuotas(final String userName, final String namespace, final Quotas quotas) - throws IOException; + throws IOException; } public static interface TableQuotasVisitor { - void visitTableQuotas(final TableName tableName, final Quotas quotas) - throws IOException; + void visitTableQuotas(final TableName tableName, final Quotas quotas) throws IOException; } public static interface NamespaceQuotasVisitor { - void visitNamespaceQuotas(final String namespace, final Quotas quotas) - throws IOException; + void visitNamespaceQuotas(final String namespace, final Quotas quotas) throws IOException; } private static interface RegionServerQuotasVisitor { - void visitRegionServerQuotas(final String regionServer, final Quotas quotas) - throws IOException; + void visitRegionServerQuotas(final String regionServer, final Quotas quotas) throws IOException; } public static interface QuotasVisitor extends UserQuotasVisitor, TableQuotasVisitor, @@ -426,8 +461,8 @@ public void visitRegionServerQuotas(String regionServer, Quotas quotas) { }); } - public static void parseNamespaceResult(final Result result, - final NamespaceQuotasVisitor visitor) throws IOException { + public static void parseNamespaceResult(final Result result, final NamespaceQuotasVisitor visitor) + throws IOException { String namespace = getNamespaceFromRowKey(result.getRow()); parseNamespaceResult(namespace, result, visitor); } @@ -482,7 +517,7 @@ protected static void parseUserResult(final String userName, final Result result Map familyMap = result.getFamilyMap(QUOTA_FAMILY_INFO); if (familyMap == null || familyMap.isEmpty()) return; - for (Map.Entry entry: familyMap.entrySet()) { + for (Map.Entry entry : familyMap.entrySet()) { Quotas quotas = quotasFromData(entry.getValue()); if (Bytes.startsWith(entry.getKey(), QUOTA_QUALIFIER_SETTINGS_PREFIX)) { String name = Bytes.toString(entry.getKey(), QUOTA_QUALIFIER_SETTINGS_PREFIX.length); @@ -505,9 +540,8 @@ protected static void parseUserResult(final String userName, final Result result */ static Put createPutForSpaceSnapshot(TableName tableName, SpaceQuotaSnapshot snapshot) { Put p = new Put(getTableRowKey(tableName)); - p.addColumn( - QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY, - SpaceQuotaSnapshot.toProtoSnapshot(snapshot).toByteArray()); + p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY, + SpaceQuotaSnapshot.toProtoSnapshot(snapshot).toByteArray()); return p; } @@ -516,23 +550,22 @@ static Put createPutForSpaceSnapshot(TableName tableName, SpaceQuotaSnapshot sna */ static Get makeGetForSnapshotSize(TableName tn, String snapshot) { Get g = new Get(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(tn.toString()))); - g.addColumn( - QUOTA_FAMILY_USAGE, - Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); + g.addColumn(QUOTA_FAMILY_USAGE, + Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); return g; } /** - * Creates a {@link Put} to persist the current size of the {@code snapshot} with respect to - * the given {@code table}. + * Creates a {@link Put} to persist the current size of the {@code snapshot} with respect to the + * given {@code table}. */ static Put createPutForSnapshotSize(TableName tableName, String snapshot, long size) { // We just need a pb message with some `long usage`, so we can just reuse the // SpaceQuotaSnapshot message instead of creating a new one. Put p = new Put(getTableRowKey(tableName)); p.addColumn(QUOTA_FAMILY_USAGE, getSnapshotSizeQualifier(snapshot), - org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot - .newBuilder().setQuotaUsage(size).build().toByteArray()); + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot.newBuilder() + .setQuotaUsage(size).build().toByteArray()); return p; } @@ -542,14 +575,14 @@ static Put createPutForSnapshotSize(TableName tableName, String snapshot, long s static Put createPutForNamespaceSnapshotSize(String namespace, long size) { Put p = new Put(getNamespaceRowKey(namespace)); p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_SNAPSHOT_SIZE_QUALIFIER, - org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot - .newBuilder().setQuotaUsage(size).build().toByteArray()); + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot.newBuilder() + .setQuotaUsage(size).build().toByteArray()); return p; } /** - * Returns a list of {@code Delete} to remove given table snapshot - * entries to remove from quota table + * Returns a list of {@code Delete} to remove given table snapshot entries to remove from quota + * table * @param snapshotEntriesToRemove the entries to remove */ static List createDeletesForExistingTableSnapshotSizes( @@ -560,7 +593,7 @@ static List createDeletesForExistingTableSnapshotSizes( for (String snapshot : entry.getValue()) { Delete d = new Delete(getTableRowKey(entry.getKey())); d.addColumns(QUOTA_FAMILY_USAGE, - Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); + Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); deletes.add(d); } } @@ -577,12 +610,12 @@ static List createDeletesForExistingTableSnapshotSizes(Connection connec } /** - * Returns a list of {@code Delete} to remove given namespace snapshot - * entries to removefrom quota table + * Returns a list of {@code Delete} to remove given namespace snapshot entries to removefrom quota + * table * @param snapshotEntriesToRemove the entries to remove */ - static List createDeletesForExistingNamespaceSnapshotSizes( - Set snapshotEntriesToRemove) { + static List + createDeletesForExistingNamespaceSnapshotSizes(Set snapshotEntriesToRemove) { List deletes = new ArrayList<>(); for (String snapshot : snapshotEntriesToRemove) { Delete d = new Delete(getNamespaceRowKey(snapshot)); @@ -599,7 +632,7 @@ static List createDeletesForExistingNamespaceSnapshotSizes( static List createDeletesForExistingNamespaceSnapshotSizes(Connection connection) throws IOException { return createDeletesForExistingSnapshotsFromScan(connection, - createScanForNamespaceSnapshotSizes()); + createScanForNamespaceSnapshotSizes()); } /** @@ -634,23 +667,23 @@ static List createDeletesForExistingSnapshotsFromScan(Connection connect * @param namespace the namespace to fetch the list of table usage snapshots */ static void deleteTableUsageSnapshotsForNamespace(Connection connection, String namespace) - throws IOException { + throws IOException { Scan s = new Scan(); - //Get rows for all tables in namespace + // Get rows for all tables in namespace s.setStartStopRowForPrefixScan( Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM))); - //Scan for table usage column (u:p) in quota table - s.addColumn(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY); - //Scan for table quota column (q:s) if table has a space quota defined - s.addColumn(QUOTA_FAMILY_INFO,QUOTA_QUALIFIER_SETTINGS); + // Scan for table usage column (u:p) in quota table + s.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); + // Scan for table quota column (q:s) if table has a space quota defined + s.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(s)) { + ResultScanner rs = quotaTable.getScanner(s)) { for (Result r : rs) { byte[] data = r.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - //if table does not have a table space quota defined, delete table usage column (u:p) + // if table does not have a table space quota defined, delete table usage column (u:p) if (data == null) { Delete delete = new Delete(r.getRow()); - delete.addColumns(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY); + delete.addColumns(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); quotaTable.delete(delete); } } @@ -660,8 +693,7 @@ static void deleteTableUsageSnapshotsForNamespace(Connection connection, String /** * Fetches the computed size of all snapshots against tables in a namespace for space quotas. */ - static long getNamespaceSnapshotSize( - Connection conn, String namespace) throws IOException { + static long getNamespaceSnapshotSize(Connection conn, String namespace) throws IOException { try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { Result r = quotaTable.get(createGetNamespaceSnapshotSize(namespace)); if (r.isEmpty()) { @@ -687,8 +719,8 @@ static Get createGetNamespaceSnapshotSize(String namespace) { * Parses the snapshot size from the given Cell's value. */ static long parseSnapshotSize(Cell c) throws InvalidProtocolBufferException { - ByteString bs = UnsafeByteOperations.unsafeWrap( - c.getValueArray(), c.getValueOffset(), c.getValueLength()); + ByteString bs = + UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength()); return QuotaProtos.SpaceQuotaSnapshot.parseFrom(bs).getQuotaUsage(); } @@ -737,22 +769,21 @@ static Scan createScanForSpaceSnapshotSizes(TableName table) { } // Just the usage family and only the snapshot size qualifiers - return s.addFamily(QUOTA_FAMILY_USAGE).setFilter( - new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); + return s.addFamily(QUOTA_FAMILY_USAGE) + .setFilter(new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); } /** * Fetches any persisted HBase snapshot sizes stored in the quota table. The sizes here are - * computed relative to the table which the snapshot was created from. A snapshot's size will - * not include the size of files which the table still refers. These sizes, in bytes, are what - * is used internally to compute quota violation for tables and namespaces. - * + * computed relative to the table which the snapshot was created from. A snapshot's size will not + * include the size of files which the table still refers. These sizes, in bytes, are what is used + * internally to compute quota violation for tables and namespaces. * @return A map of snapshot name to size in bytes per space quota computations */ - public static Map getObservedSnapshotSizes(Connection conn) throws IOException { + public static Map getObservedSnapshotSizes(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { - final Map snapshotSizes = new HashMap<>(); + final Map snapshotSizes = new HashMap<>(); for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { @@ -828,15 +859,16 @@ public static SpaceQuotaSnapshot getCurrentSnapshotFromQuotaTable(Connection con } } - /* ========================================================================= - * Quotas protobuf helpers + /* + * ========================================================================= Quotas protobuf + * helpers */ protected static Quotas quotasFromData(final byte[] data) throws IOException { return quotasFromData(data, 0, data.length); } - protected static Quotas quotasFromData( - final byte[] data, int offset, int length) throws IOException { + protected static Quotas quotasFromData(final byte[] data, int offset, int length) + throws IOException { int magicLen = ProtobufMagic.lengthOfPBMagic(); if (!ProtobufMagic.isPBMagicPrefix(data, offset, magicLen)) { throw new IOException("Missing pb magic prefix"); @@ -863,11 +895,10 @@ public static boolean isEmptyQuota(final Quotas quotas) { return !hasSettings; } - /* ========================================================================= - * HTable helpers + /* + * ========================================================================= HTable helpers */ - protected static Result doGet(final Connection connection, final Get get) - throws IOException { + protected static Result doGet(final Connection connection, final Get get) throws IOException { try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(get); } @@ -880,8 +911,9 @@ protected static Result[] doGet(final Connection connection, final List get } } - /* ========================================================================= - * Quota table row key helpers + /* + * ========================================================================= Quota table row key + * helpers */ protected static byte[] getUserRowKey(final String user) { return Bytes.add(QUOTA_USER_ROW_KEY_PREFIX, Bytes.toBytes(user)); @@ -905,7 +937,7 @@ protected static byte[] getSettingsQualifierForUserTable(final TableName tableNa protected static byte[] getSettingsQualifierForUserNamespace(final String namespace) { return Bytes.add(QUOTA_QUALIFIER_SETTINGS_PREFIX, - Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)); + Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)); } protected static String getUserRowKeyRegex(final String user) { @@ -933,13 +965,13 @@ private static String getRowKeyRegEx(final byte[] prefix, final String regex) { } protected static String getSettingsQualifierRegexForUserTable(final String table) { - return '^' + Pattern.quote(Bytes.toString(QUOTA_QUALIFIER_SETTINGS_PREFIX)) + - table + "(?> tableCFsMap) { + public ReplicationPeerConfigBuilder setTableCFsMap(Map> tableCFsMap) { this.tableCFsMap = tableCFsMap; return this; } @@ -311,9 +310,9 @@ public boolean needToReplicate(TableName table) { * this peer config. * @param table name of the table * @param family family name - * @return true if (the family of) the table need replicate to the peer cluster. - * If passed family is null, return true if any CFs of the table need replicate; - * If passed family is not null, return true if the passed family need replicate. + * @return true if (the family of) the table need replicate to the peer cluster. If passed family + * is null, return true if any CFs of the table need replicate; If passed family is not + * null, return true if the passed family need replicate. */ public boolean needToReplicate(TableName table, byte[] family) { String namespace = table.getNamespaceAsString(); @@ -330,9 +329,9 @@ public boolean needToReplicate(TableName table, byte[] family) { // If cfs is null or empty then we can make sure that we do not need to replicate this table, // otherwise, we may still need to replicate the table but filter out some families. return cfs != null && !cfs.isEmpty() - // If exclude-table-cfs contains passed family then we make sure that we do not need to - // replicate this family. - && (family == null || !cfs.contains(Bytes.toString(family))); + // If exclude-table-cfs contains passed family then we make sure that we do not need to + // replicate this family. + && (family == null || !cfs.contains(Bytes.toString(family))); } else { // Not replicate all user tables, so filter by namespaces and table-cfs config if (namespaces == null && tableCFsMap == null) { @@ -346,9 +345,9 @@ public boolean needToReplicate(TableName table, byte[] family) { // If table-cfs contains this table then we can make sure that we need replicate some CFs of // this table. Further we need all CFs if tableCFsMap.get(table) is null or empty. return tableCFsMap != null && tableCFsMap.containsKey(table) - && (family == null || CollectionUtils.isEmpty(tableCFsMap.get(table)) - // If table-cfs must contain passed family then we need to replicate this family. - || tableCFsMap.get(table).contains(Bytes.toString(family))); + && (family == null || CollectionUtils.isEmpty(tableCFsMap.get(table)) + // If table-cfs must contain passed family then we need to replicate this family. + || tableCFsMap.get(table).contains(Bytes.toString(family))); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java index c6a97fad9e81..d9cb7f1fa16f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -60,7 +58,6 @@ public interface ReplicationPeerConfigBuilder { @InterfaceAudience.Private ReplicationPeerConfigBuilder removeConfiguration(String key); - /** * Adds all of the provided "raw" configuration entries to {@code this}. * @param configuration A collection of raw configuration entries @@ -90,17 +87,15 @@ default ReplicationPeerConfigBuilder putAllPeerData(Map peerData } /** - * Sets an explicit map of tables and column families in those tables that should be replicated - * to the given peer. Use {@link #setReplicateAllUserTables(boolean)} to replicate all tables - * to a peer. - * + * Sets an explicit map of tables and column families in those tables that should be replicated to + * the given peer. Use {@link #setReplicateAllUserTables(boolean)} to replicate all tables to a + * peer. * @param tableCFsMap A map from tableName to column family names. An empty collection can be - * passed to indicate replicating all column families. + * passed to indicate replicating all column families. * @return {@code this} * @see #setReplicateAllUserTables(boolean) */ - ReplicationPeerConfigBuilder - setTableCFsMap(Map> tableCFsMap); + ReplicationPeerConfigBuilder setTableCFsMap(Map> tableCFsMap); /** * Sets a unique collection of HBase namespaces that should be replicated to this peer. @@ -125,12 +120,11 @@ default ReplicationPeerConfigBuilder putAllPeerData(Map peerData ReplicationPeerConfigBuilder setReplicateAllUserTables(boolean replicateAllUserTables); /** - * Sets the mapping of table name to column families which should not be replicated. This - * method sets state which is mutually exclusive to {@link #setTableCFsMap(Map)}. Invoking this - * method is only relevant when all user tables are being replicated. - * - * @param tableCFsMap A mapping of table names to column families which should not be - * replicated. An empty list of column families implies all families for the table. + * Sets the mapping of table name to column families which should not be replicated. This method + * sets state which is mutually exclusive to {@link #setTableCFsMap(Map)}. Invoking this method is + * only relevant when all user tables are being replicated. + * @param tableCFsMap A mapping of table names to column families which should not be replicated. + * An empty list of column families implies all families for the table. * @return {@code this}. */ ReplicationPeerConfigBuilder setExcludeTableCFsMap(Map> tableCFsMap); @@ -140,7 +134,6 @@ default ReplicationPeerConfigBuilder putAllPeerData(Map peerData * configured to be replicated. This method sets state which is mutually exclusive to * {@link #setNamespaces(Set)}. Invoking this method is only relevant when all user tables are * being replicated. - * * @param namespaces A set of namespaces whose tables should not be replicated. * @return {@code this} */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java index b0c27bb704a0..330f75ea7186 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java index de9576caebdb..6af63dce1bc8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,11 +64,11 @@ public int value() { public static byte[] toByteArray(SyncReplicationState state) { return ProtobufUtil - .prependPBMagic(ReplicationPeerConfigUtil.toSyncReplicationState(state).toByteArray()); + .prependPBMagic(ReplicationPeerConfigUtil.toSyncReplicationState(state).toByteArray()); } public static SyncReplicationState parseFrom(byte[] bytes) throws InvalidProtocolBufferException { return ReplicationPeerConfigUtil.toSyncReplicationState(ReplicationProtos.SyncReplicationState - .parseFrom(Arrays.copyOfRange(bytes, ProtobufUtil.lengthOfPBMagic(), bytes.length))); + .parseFrom(Arrays.copyOfRange(bytes, ProtobufUtil.lengthOfPBMagic(), bytes.length))); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java index e8316d1cce79..07904d83223c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,8 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.token.Token; @@ -58,7 +56,7 @@ public abstract class AbstractHBaseSaslRpcClient { protected AbstractHBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, Token token, InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed) - throws IOException { + throws IOException { this(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, "authentication"); } @@ -80,11 +78,11 @@ protected AbstractHBaseSaslRpcClient(Configuration conf, this.fallbackAllowed = fallbackAllowed; saslProps = SaslUtil.initSaslProperties(rpcProtection); - saslClient = provider.createClient( - conf, serverAddr, securityInfo, token, fallbackAllowed, saslProps); + saslClient = + provider.createClient(conf, serverAddr, securityInfo, token, fallbackAllowed, saslProps); if (saslClient == null) { - throw new IOException("Authentication provider " + provider.getClass() - + " returned a null SaslClient"); + throw new IOException( + "Authentication provider " + provider.getClass() + " returned a null SaslClient"); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java index 259a0a4d651d..873132899d98 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.yetus.audience.InterfaceAudience; - /** * Exception thrown by access-related methods. */ @@ -33,7 +32,7 @@ public AccessDeniedException() { } public AccessDeniedException(Class clazz, String s) { - super( "AccessDenied [" + clazz.getName() + "]: " + s); + super("AccessDenied [" + clazz.getName() + "]: " + s); } public AccessDeniedException(String s) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java index 65fc6172236d..e6c4822d0809 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,15 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.yetus.audience.InterfaceAudience; /** Authentication method */ @InterfaceAudience.Private @@ -39,7 +36,7 @@ public enum AuthMethod { public final UserGroupInformation.AuthenticationMethod authenticationMethod; AuthMethod(byte code, String mechanismName, - UserGroupInformation.AuthenticationMethod authMethod) { + UserGroupInformation.AuthenticationMethod authMethod) { this.code = code; this.mechanismName = mechanismName; this.authenticationMethod = authMethod; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java index 97be44fff10d..31ed191f91a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; /** * Unwrap messages with Crypto AES. Should be placed after a diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java index ceb3f35c0c75..40ce32073f8b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,10 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; @@ -27,9 +29,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.CoalescingBufferQueue; import org.apache.hbase.thirdparty.io.netty.util.ReferenceCountUtil; import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; - /** * wrap messages with Crypto AES. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java index 74ad96e2cbda..ed163dcc31ce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java @@ -36,7 +36,9 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.EncryptionProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; @@ -55,8 +57,8 @@ private EncryptionUtil() { } /** - * Protect a key by encrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. + * Protect a key by encrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. * @param conf configuration * @param key the raw key bytes * @param algorithm the algorithm to use with this key material @@ -71,18 +73,16 @@ public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm) } /** - * Protect a key by encrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. + * Protect a key by encrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. * @param conf configuration * @param subject subject key alias * @param key the key * @return the encrypted key bytes */ - public static byte[] wrapKey(Configuration conf, String subject, Key key) - throws IOException { + public static byte[] wrapKey(Configuration conf, String subject, Key key) throws IOException { // Wrap the key with the configured encryption algorithm. - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { throw new RuntimeException("Cipher '" + algorithm + "' not available"); @@ -98,11 +98,11 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) byte[] keyBytes = key.getEncoded(); builder.setLength(keyBytes.length); builder.setHashAlgorithm(Encryption.getConfiguredHashAlgorithm(conf)); - builder.setHash( - UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes))); + builder + .setHash(UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes))); ByteArrayOutputStream out = new ByteArrayOutputStream(); - Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, - conf, cipher, iv); + Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, cipher, + iv); builder.setData(UnsafeByteOperations.unsafeWrap(out.toByteArray())); // Build and return the protobuf message out.reset(); @@ -111,8 +111,8 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) } /** - * Unwrap a key by decrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. + * Unwrap a key by decrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. * @param conf configuration * @param subject subject key alias * @param value the encrypted key bytes @@ -122,10 +122,9 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) */ public static Key unwrapKey(Configuration conf, String subject, byte[] value) throws IOException, KeyException { - EncryptionProtos.WrappedKey wrappedKey = EncryptionProtos.WrappedKey.PARSER - .parseDelimitedFrom(new ByteArrayInputStream(value)); - String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, - HConstants.CIPHER_AES); + EncryptionProtos.WrappedKey wrappedKey = + EncryptionProtos.WrappedKey.PARSER.parseDelimitedFrom(new ByteArrayInputStream(value)); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { throw new RuntimeException("Cipher '" + algorithm + "' not available"); @@ -137,22 +136,22 @@ private static Key getUnwrapKey(Configuration conf, String subject, EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException { String configuredHashAlgorithm = Encryption.getConfiguredHashAlgorithm(conf); String wrappedHashAlgorithm = wrappedKey.getHashAlgorithm().trim(); - if(!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) { + if (!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) { String msg = String.format("Unexpected encryption key hash algorithm: %s (expecting: %s)", wrappedHashAlgorithm, configuredHashAlgorithm); - if(Encryption.failOnHashAlgorithmMismatch(conf)) { + if (Encryption.failOnHashAlgorithmMismatch(conf)) { throw new KeyException(msg); } LOG.debug(msg); } ByteArrayOutputStream out = new ByteArrayOutputStream(); byte[] iv = wrappedKey.hasIv() ? wrappedKey.getIv().toByteArray() : null; - Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), - wrappedKey.getLength(), subject, conf, cipher, iv); + Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(), + subject, conf, cipher, iv); byte[] keyBytes = out.toByteArray(); if (wrappedKey.hasHash()) { if (!Bytes.equals(wrappedKey.getHash().toByteArray(), - Encryption.hashWithAlg(wrappedHashAlgorithm, keyBytes))) { + Encryption.hashWithAlg(wrappedHashAlgorithm, keyBytes))) { throw new KeyException("Key was not successfully unwrapped"); } } @@ -183,7 +182,6 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) /** * Helper to create an encyption context. - * * @param conf The current configuration. * @param family The current column descriptor. * @return The created encryption context. @@ -191,13 +189,13 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) * @throws IllegalStateException in case of encryption related configuration errors */ public static Encryption.Context createEncryptionContext(Configuration conf, - ColumnFamilyDescriptor family) throws IOException { + ColumnFamilyDescriptor family) throws IOException { Encryption.Context cryptoContext = Encryption.Context.NONE; String cipherName = family.getEncryptionType(); if (cipherName != null) { - if(!Encryption.isEncryptionEnabled(conf)) { + if (!Encryption.isEncryptionEnabled(conf)) { throw new IllegalStateException("Encryption for family '" + family.getNameAsString() - + "' configured with type '" + cipherName + "' but the encryption feature is disabled"); + + "' configured with type '" + cipherName + "' but the encryption feature is disabled"); } Cipher cipher; Key key; @@ -214,9 +212,9 @@ public static Encryption.Context createEncryptionContext(Configuration conf, // We use the encryption type specified in the column schema as a sanity check on // what the wrapped key is telling us if (!cipher.getName().equalsIgnoreCase(cipherName)) { - throw new IllegalStateException("Encryption for family '" + family.getNameAsString() - + "' configured with type '" + cipherName + "' but key specifies algorithm '" - + cipher.getName() + "'"); + throw new IllegalStateException( + "Encryption for family '" + family.getNameAsString() + "' configured with type '" + + cipherName + "' but key specifies algorithm '" + cipher.getName() + "'"); } } else { // Family does not provide key material, create a random key @@ -236,10 +234,7 @@ public static Encryption.Context createEncryptionContext(Configuration conf, /** * Helper for {@link #unwrapKey(Configuration, String, byte[])} which automatically uses the * configured master and alternative keys, rather than having to specify a key type to unwrap - * with. - * - * The configuration must be set up correctly for key alias resolution. - * + * with. The configuration must be set up correctly for key alias resolution. * @param conf the current configuration * @param keyBytes the key encrypted by master (or alternative) to unwrap * @return the key bytes, decrypted @@ -247,8 +242,8 @@ public static Encryption.Context createEncryptionContext(Configuration conf, */ public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOException { Key key; - String masterKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()); + String masterKeyName = + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()); try { // First try the master key key = unwrapKey(conf, masterKeyName, keyBytes); @@ -258,8 +253,7 @@ public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOExcept if (LOG.isDebugEnabled()) { LOG.debug("Unable to unwrap key with current master key '" + masterKeyName + "'"); } - String alternateKeyName = - conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); + String alternateKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); if (alternateKeyName != null) { try { key = unwrapKey(conf, alternateKeyName, keyBytes); @@ -275,7 +269,6 @@ public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOExcept /** * Helper to create an instance of CryptoAES. - * * @param conf The current configuration. * @param cryptoCipherMeta The metadata for create CryptoAES. * @return The instance of CryptoAES. @@ -286,13 +279,11 @@ public static CryptoAES createCryptoAES(RPCProtos.CryptoCipherMeta cryptoCipherM Properties properties = new Properties(); // the property for cipher class properties.setProperty(CryptoCipherFactory.CLASSES_KEY, - conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", - "org.apache.commons.crypto.cipher.JceCipher")); + conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", + "org.apache.commons.crypto.cipher.JceCipher")); // create SaslAES for client return new CryptoAES(cryptoCipherMeta.getTransformation(), properties, - cryptoCipherMeta.getInKey().toByteArray(), - cryptoCipherMeta.getOutKey().toByteArray(), - cryptoCipherMeta.getInIv().toByteArray(), - cryptoCipherMeta.getOutIv().toByteArray()); + cryptoCipherMeta.getInKey().toByteArray(), cryptoCipherMeta.getOutKey().toByteArray(), + cryptoCipherMeta.getInIv().toByteArray(), cryptoCipherMeta.getOutIv().toByteArray()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java index 03af94ddad96..fde71630a1af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import java.io.BufferedInputStream; @@ -29,14 +28,11 @@ import java.io.OutputStream; import java.net.InetAddress; import java.nio.ByteBuffer; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.SaslInputStream; @@ -47,6 +43,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; + /** * A utility class that encapsulates SASL logic for RPC client. Copied from * org.apache.hadoop.security @@ -72,7 +70,7 @@ public HBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider p public HBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, Token token, InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, String rpcProtection, boolean initStreamForCrypto) - throws IOException { + throws IOException { super(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, rpcProtection); this.initStreamForCrypto = initStreamForCrypto; } @@ -151,9 +149,8 @@ public boolean saslConnect(InputStream inS, OutputStream outS) throws IOExceptio try { readStatus(inStream); - } - catch (IOException e){ - if(e instanceof RemoteException){ + } catch (IOException e) { + if (e instanceof RemoteException) { LOG.debug("Sasl connection failed: ", e); throw e; } @@ -189,8 +186,8 @@ public String getSaslQOP() { return (String) saslClient.getNegotiatedProperty(Sasl.QOP); } - public void initCryptoCipher(RPCProtos.CryptoCipherMeta cryptoCipherMeta, - Configuration conf) throws IOException { + public void initCryptoCipher(RPCProtos.CryptoCipherMeta cryptoCipherMeta, Configuration conf) + throws IOException { // create SaslAES for client cryptoAES = EncryptionUtil.createCryptoAES(cryptoCipherMeta, conf); cryptoAesEnable = true; @@ -214,6 +211,7 @@ public InputStream getInputStream() throws IOException { class WrappedInputStream extends FilterInputStream { private ByteBuffer unwrappedRpcBuffer = ByteBuffer.allocate(0); + public WrappedInputStream(InputStream in) throws IOException { super(in); } @@ -279,6 +277,7 @@ class WrappedOutputStream extends FilterOutputStream { public WrappedOutputStream(OutputStream out) throws IOException { super(out); } + @Override public void write(byte[] buf, int off, int len) throws IOException { if (LOG.isDebugEnabled()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java index e4611d181378..8c67e851ce65 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; -import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; /** @@ -43,7 +44,7 @@ public class NettyHBaseRpcConnectionHeaderHandler extends SimpleChannelInboundHa private final ByteBuf connectionHeaderWithLength; public NettyHBaseRpcConnectionHeaderHandler(Promise saslPromise, Configuration conf, - ByteBuf connectionHeaderWithLength) { + ByteBuf connectionHeaderWithLength) { this.saslPromise = saslPromise; this.conf = conf; this.connectionHeaderWithLength = connectionHeaderWithLength; @@ -61,8 +62,8 @@ protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Excep // Get the CryptoCipherMeta, update the HBaseSaslRpcClient for Crypto Cipher if (connectionHeaderResponse.hasCryptoCipherMeta()) { - CryptoAES cryptoAES = EncryptionUtil.createCryptoAES( - connectionHeaderResponse.getCryptoCipherMeta(), conf); + CryptoAES cryptoAES = + EncryptionUtil.createCryptoAES(connectionHeaderResponse.getCryptoCipherMeta(), conf); // replace the Sasl handler with Crypto AES handler setupCryptoAESHandler(ctx.pipeline(), cryptoAES); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java index a5b980350d15..a5293f2c1aff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,14 +17,9 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; -import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; - import java.io.IOException; import java.net.InetAddress; - import javax.security.sasl.Sasl; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.token.Token; @@ -33,6 +28,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; +import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; + /** * Implement SASL logic for netty rpc client. * @since 2.0.0 diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java index 2dd80ab1ca02..ac28c69de0b4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,25 +17,24 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; -import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; - import java.io.IOException; import java.net.InetAddress; import java.security.PrivilegedExceptionAction; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; import org.apache.hadoop.hbase.ipc.FallbackDisallowedException; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; +import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; /** * Implement SASL logic for netty rpc client. @@ -72,8 +71,8 @@ public NettyHBaseSaslRpcClientHandler(Promise saslPromise, UserGroupInf this.conf = conf; this.provider = provider; this.saslRpcClient = new NettyHBaseSaslRpcClient(conf, provider, token, serverAddr, - securityInfo, fallbackAllowed, conf.get( - "hbase.rpc.protection", SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase())); + securityInfo, fallbackAllowed, conf.get("hbase.rpc.protection", + SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase())); } private void writeResponse(ChannelHandlerContext ctx, byte[] response) { @@ -99,10 +98,10 @@ private void tryComplete(ChannelHandlerContext ctx) { } private void setCryptoAESOption() { - boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY. - getSaslQop().equalsIgnoreCase(saslRpcClient.getSaslQOP()); - needProcessConnectionHeader = saslEncryptionEnabled && conf.getBoolean( - "hbase.rpc.crypto.encryption.aes.enabled", false); + boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop() + .equalsIgnoreCase(saslRpcClient.getSaslQOP()); + needProcessConnectionHeader = + saslEncryptionEnabled && conf.getBoolean("hbase.rpc.crypto.encryption.aes.enabled", false); } public boolean isNeedProcessConnectionHeader() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java index cbbcb0e77616..952550ef4c8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,16 +17,15 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; -import org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder; - import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.ipc.RemoteException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder; /** * Decode the sasl challenge sent by RpcServer. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java index 332bc1933d6e..90012c8bb1b6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public enum SaslStatus { - SUCCESS (0), - ERROR (1); + SUCCESS(0), ERROR(1); public final int state; + SaslStatus(int state) { this.state = state; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java index 00d0c41240ac..dfc36e4ba314 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,14 @@ */ package org.apache.hadoop.hbase.security; +import javax.security.sasl.SaslClient; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import javax.security.sasl.SaslClient; - -import org.apache.yetus.audience.InterfaceAudience; - /** * Unwrap sasl messages. Should be placed after a * io.netty.handler.codec.LengthFieldBasedFrameDecoder diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java index ad2067f2cf22..e2c77845df92 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +20,10 @@ import java.util.Base64; import java.util.Map; import java.util.TreeMap; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; import javax.security.sasl.SaslServer; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -39,9 +36,7 @@ public class SaslUtil { public static final int SWITCH_TO_SIMPLE_AUTH = -88; public enum QualityOfProtection { - AUTHENTICATION("auth"), - INTEGRITY("auth-int"), - PRIVACY("auth-conf"); + AUTHENTICATION("auth"), INTEGRITY("auth-int"), PRIVACY("auth-conf"); private final String saslQop; @@ -81,8 +76,8 @@ public static char[] encodePassword(byte[] password) { } /** - * Returns {@link org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection} - * corresponding to the given {@code stringQop} value. + * Returns {@link org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection} corresponding to + * the given {@code stringQop} value. * @throws IllegalArgumentException If stringQop doesn't match any QOP. */ public static QualityOfProtection getQop(String stringQop) { @@ -91,7 +86,7 @@ public static QualityOfProtection getQop(String stringQop) { return qop; } } - throw new IllegalArgumentException("Invalid qop: " + stringQop + throw new IllegalArgumentException("Invalid qop: " + stringQop + ". It must be one of 'authentication', 'integrity', 'privacy'."); } @@ -110,7 +105,7 @@ public static Map initSaslProperties(String rpcProtection) { QualityOfProtection qop = getQop(qops[i]); saslQopBuilder.append(",").append(qop.getSaslQop()); } - saslQop = saslQopBuilder.substring(1); // remove first ',' + saslQop = saslQopBuilder.substring(1); // remove first ',' } Map saslProps = new TreeMap<>(); saslProps.put(Sasl.QOP, saslQop); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java index 62c127e2dfb3..006a24f0fd07 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security; import javax.security.sasl.SaslClient; - import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; import org.apache.yetus.audience.InterfaceAudience; @@ -31,7 +30,6 @@ import org.apache.hbase.thirdparty.io.netty.util.ReferenceCountUtil; import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; - /** * wrap sasl messages. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java index 749190a6bbc9..dbb4c83844a4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java @@ -35,30 +35,28 @@ @InterfaceAudience.Private public class SecurityInfo { /** Maps RPC service names to authentication information */ - private static ConcurrentMap infos = new ConcurrentHashMap<>(); + private static ConcurrentMap infos = new ConcurrentHashMap<>(); // populate info for known services static { infos.put(AdminProtos.AdminService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, - Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(ClientProtos.ClientService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, - Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(MasterService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(RegionServerStatusProtos.RegionServerStatusService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(MasterProtos.HbckService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(RegistryProtos.ClientMetaService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); // NOTE: IF ADDING A NEW SERVICE, BE SURE TO UPDATE HBasePolicyProvider ALSO ELSE // new Service will not be found when all is Kerberized!!!! } /** - * Adds a security configuration for a new service name. Note that this will have no effect if - * the service name was already registered. + * Adds a security configuration for a new service name. Note that this will have no effect if the + * service name was already registered. */ public static void addInfo(String serviceName, SecurityInfo securityInfo) { infos.putIfAbsent(serviceName, securityInfo); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index 67546b78b001..fc8e23747229 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -69,11 +69,9 @@ public static boolean isCellAuthorizationEnabled(Connection connection) throws I .contains(SecurityCapability.CELL_AUTHORIZATION); } - private static BlockingInterface getAccessControlServiceStub(Table ht) - throws IOException { + private static BlockingInterface getAccessControlServiceStub(Table ht) throws IOException { CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); - BlockingInterface protocol = - AccessControlProtos.AccessControlService.newBlockingStub(service); + BlockingInterface protocol = AccessControlProtos.AccessControlService.newBlockingStub(service); return protocol; } @@ -90,8 +88,8 @@ private static BlockingInterface getAccessControlServiceStub(Table ht) * @param actions * @throws Throwable */ - private static void grant(Connection connection, final TableName tableName, - final String userName, final byte[] family, final byte[] qual, boolean mergeExistingPermissions, + private static void grant(Connection connection, final TableName tableName, final String userName, + final byte[] family, final byte[] qual, boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { connection.getAdmin().grant(new UserPermission(userName, Permission.newBuilder(tableName) .withFamily(family).withQualifier(qual).withActions(actions).build()), @@ -99,8 +97,8 @@ private static void grant(Connection connection, final TableName tableName, } /** - * Grants permission on the specified table for the specified user. - * If permissions for a specified user exists, later granted permissions will override previous granted permissions. + * Grants permission on the specified table for the specified user. If permissions for a specified + * user exists, later granted permissions will override previous granted permissions. * @param connection The Connection instance to use * @param tableName * @param userName @@ -133,8 +131,8 @@ private static void grant(Connection connection, final String namespace, final S } /** - * Grants permission on the specified namespace for the specified user. - * If permissions on the specified namespace exists, later granted permissions will override previous granted + * Grants permission on the specified namespace for the specified user. If permissions on the + * specified namespace exists, later granted permissions will override previous granted * permissions. * @param connection The Connection instance to use * @param namespace @@ -165,9 +163,8 @@ private static void grant(Connection connection, final String userName, } /** - * Grant global permissions for the specified user. - * If permissions for the specified user exists, later granted permissions will override previous granted - * permissions. + * Grant global permissions for the specified user. If permissions for the specified user exists, + * later granted permissions will override previous granted permissions. * @param connection * @param userName * @param actions @@ -195,9 +192,9 @@ public static boolean isAccessControllerRunning(Connection connection) * @param actions * @throws Throwable */ - public static void revoke(Connection connection, final TableName tableName, - final String username, final byte[] family, final byte[] qualifier, - final Permission.Action... actions) throws Throwable { + public static void revoke(Connection connection, final TableName tableName, final String username, + final byte[] family, final byte[] qualifier, final Permission.Action... actions) + throws Throwable { connection.getAdmin().revoke(new UserPermission(username, Permission.newBuilder(tableName) .withFamily(family).withQualifier(qualifier).withActions(actions).build())); } @@ -210,8 +207,8 @@ public static void revoke(Connection connection, final TableName tableName, * @param actions * @throws Throwable */ - public static void revoke(Connection connection, final String namespace, - final String userName, final Permission.Action... actions) throws Throwable { + public static void revoke(Connection connection, final String namespace, final String userName, + final Permission.Action... actions) throws Throwable { connection.getAdmin().revoke( new UserPermission(userName, Permission.newBuilder(namespace).withActions(actions).build())); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java index e0c4d99dfca5..a795d296fe7c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import org.apache.yetus.audience.InterfaceAudience; @@ -24,16 +23,16 @@ public interface AccessControlConstants { /** - * Configuration option that toggles whether EXEC permission checking is - * performed during coprocessor endpoint invocations. + * Configuration option that toggles whether EXEC permission checking is performed during + * coprocessor endpoint invocations. */ public static final String EXEC_PERMISSION_CHECKS_KEY = "hbase.security.exec.permission.checks"; /** Default setting for hbase.security.exec.permission.checks; false */ public static final boolean DEFAULT_EXEC_PERMISSION_CHECKS = false; /** - * Configuration or CF schema option for early termination of access checks - * if table or CF permissions grant access. Pre-0.98 compatible behavior + * Configuration or CF schema option for early termination of access checks if table or CF + * permissions grant access. Pre-0.98 compatible behavior */ public static final String CF_ATTRIBUTE_EARLY_OUT = "hbase.security.access.early_out"; /** Default setting for hbase.security.access.early_out */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java index 236191f1d66c..31b4233a270e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,11 +47,11 @@ */ @InterfaceAudience.Private public class AccessControlUtil { - private AccessControlUtil() {} + private AccessControlUtil() { + } /** * Create a request to grant user table permissions. - * * @param username the short user name who to grant permissions * @param tableName optional table name the permissions apply * @param family optional column family @@ -60,11 +60,10 @@ private AccessControlUtil() {} * @return A {@link AccessControlProtos} GrantRequest * @throws NullPointerException if {@code tableName} is {@code null} */ - public static AccessControlProtos.GrantRequest buildGrantRequest( - String username, TableName tableName, byte[] family, byte[] qualifier, - boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.GrantRequest buildGrantRequest(String username, + TableName tableName, byte[] family, byte[] qualifier, boolean mergeExistingPermissions, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.TablePermission.Builder permissionBuilder = AccessControlProtos.TablePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { @@ -81,29 +80,24 @@ public static AccessControlProtos.GrantRequest buildGrantRequest( if (qualifier != null) { permissionBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } - ret.setType(AccessControlProtos.Permission.Type.Table) - .setTablePermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Table).setTablePermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } /** * Create a request to grant user namespace permissions. - * * @param username the short user name who to grant permissions * @param namespace optional table name the permissions apply * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ - public static AccessControlProtos.GrantRequest buildGrantRequest( - String username, String namespace, boolean mergeExistingPermissions, + public static AccessControlProtos.GrantRequest buildGrantRequest(String username, + String namespace, boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.NamespacePermission.Builder permissionBuilder = AccessControlProtos.NamespacePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { @@ -113,54 +107,44 @@ public static AccessControlProtos.GrantRequest buildGrantRequest( permissionBuilder.setNamespaceName(ByteString.copyFromUtf8(namespace)); } ret.setType(AccessControlProtos.Permission.Type.Namespace) - .setNamespacePermission(permissionBuilder); + .setNamespacePermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } /** * Create a request to revoke user global permissions. - * * @param username the short user name whose permissions to be revoked * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.GlobalPermission.Builder permissionBuilder = AccessControlProtos.GlobalPermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } - ret.setType(AccessControlProtos.Permission.Type.Global) - .setGlobalPermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Global).setGlobalPermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } /** * Create a request to revoke user namespace permissions. - * * @param username the short user name whose permissions to be revoked * @param namespace optional table name the permissions apply * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, String namespace, - AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + String namespace, AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.NamespacePermission.Builder permissionBuilder = AccessControlProtos.NamespacePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { @@ -170,60 +154,51 @@ public static AccessControlProtos.RevokeRequest buildRevokeRequest( permissionBuilder.setNamespaceName(ByteString.copyFromUtf8(namespace)); } ret.setType(AccessControlProtos.Permission.Type.Namespace) - .setNamespacePermission(permissionBuilder); + .setNamespacePermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } /** * Create a request to grant user global permissions. - * * @param username the short user name who to grant permissions * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ public static AccessControlProtos.GrantRequest buildGrantRequest(String username, boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.GlobalPermission.Builder permissionBuilder = AccessControlProtos.GlobalPermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } - ret.setType(AccessControlProtos.Permission.Type.Global) - .setGlobalPermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Global).setGlobalPermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions(String user, Permission perms) { return AccessControlProtos.UsersAndPermissions.newBuilder() .addUserPermissions(AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder() - .setUser(ByteString.copyFromUtf8(user)) - .addPermissions(toPermission(perms)) - .build()) + .setUser(ByteString.copyFromUtf8(user)).addPermissions(toPermission(perms)).build()) .build(); } - public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions( - ListMultimap perms) { + public static AccessControlProtos.UsersAndPermissions + toUsersAndPermissions(ListMultimap perms) { AccessControlProtos.UsersAndPermissions.Builder builder = AccessControlProtos.UsersAndPermissions.newBuilder(); for (Map.Entry> entry : perms.asMap().entrySet()) { AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); - for (Permission perm: entry.getValue()) { + for (Permission perm : entry.getValue()) { userPermBuilder.addPermissions(toPermission(perm)); } builder.addUserPermissions(userPermBuilder.build()); @@ -231,13 +206,13 @@ public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions( return builder.build(); } - public static ListMultimap toUsersAndPermissions( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUsersAndPermissions(AccessControlProtos.UsersAndPermissions proto) { ListMultimap result = ArrayListMultimap.create(); - for (AccessControlProtos.UsersAndPermissions.UserPermissions userPerms: - proto.getUserPermissionsList()) { + for (AccessControlProtos.UsersAndPermissions.UserPermissions userPerms : proto + .getUserPermissionsList()) { String user = userPerms.getUser().toStringUtf8(); - for (AccessControlProtos.Permission perm: userPerms.getPermissionsList()) { + for (AccessControlProtos.Permission perm : userPerms.getPermissionsList()) { result.put(user, toPermission(perm)); } } @@ -311,7 +286,6 @@ public static Permission toPermission(AccessControlProtos.Permission proto) { /** * Convert a client Permission to a Permission proto - * * @param perm the client Permission * @return the protobuf Permission */ @@ -321,7 +295,7 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { NamespacePermission namespace = (NamespacePermission) perm; ret.setType(AccessControlProtos.Permission.Type.Namespace); AccessControlProtos.NamespacePermission.Builder builder = - AccessControlProtos.NamespacePermission.newBuilder(); + AccessControlProtos.NamespacePermission.newBuilder(); builder.setNamespaceName(ByteString.copyFromUtf8(namespace.getNamespace())); Permission.Action[] actions = perm.getActions(); if (actions != null) { @@ -334,7 +308,7 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { TablePermission table = (TablePermission) perm; ret.setType(AccessControlProtos.Permission.Type.Table); AccessControlProtos.TablePermission.Builder builder = - AccessControlProtos.TablePermission.newBuilder(); + AccessControlProtos.TablePermission.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(table.getTableName())); if (table.hasFamily()) { builder.setFamily(UnsafeByteOperations.unsafeWrap(table.getFamily())); @@ -353,10 +327,10 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { // perm instanceof GlobalPermission ret.setType(AccessControlProtos.Permission.Type.Global); AccessControlProtos.GlobalPermission.Builder builder = - AccessControlProtos.GlobalPermission.newBuilder(); + AccessControlProtos.GlobalPermission.newBuilder(); Permission.Action[] actions = perm.getActions(); if (actions != null) { - for (Permission.Action a: actions) { + for (Permission.Action a : actions) { builder.addAction(toPermissionAction(a)); } } @@ -367,7 +341,6 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { /** * Converts a list of Permission.Action proto to an array of client Permission.Action objects. - * * @param protoActions the list of protobuf Actions * @return the converted array of Actions */ @@ -382,68 +355,62 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { /** * Converts a Permission.Action proto to a client Permission.Action object. - * * @param action the protobuf Action * @return the converted Action */ - public static Permission.Action toPermissionAction( - AccessControlProtos.Permission.Action action) { + public static Permission.Action toPermissionAction(AccessControlProtos.Permission.Action action) { switch (action) { - case READ: - return Permission.Action.READ; - case WRITE: - return Permission.Action.WRITE; - case EXEC: - return Permission.Action.EXEC; - case CREATE: - return Permission.Action.CREATE; - case ADMIN: - return Permission.Action.ADMIN; + case READ: + return Permission.Action.READ; + case WRITE: + return Permission.Action.WRITE; + case EXEC: + return Permission.Action.EXEC; + case CREATE: + return Permission.Action.CREATE; + case ADMIN: + return Permission.Action.ADMIN; } - throw new IllegalArgumentException("Unknown action value "+action.name()); + throw new IllegalArgumentException("Unknown action value " + action.name()); } /** * Convert a client Permission.Action to a Permission.Action proto - * * @param action the client Action * @return the protobuf Action */ - public static AccessControlProtos.Permission.Action toPermissionAction( - Permission.Action action) { + public static AccessControlProtos.Permission.Action toPermissionAction(Permission.Action action) { switch (action) { - case READ: - return AccessControlProtos.Permission.Action.READ; - case WRITE: - return AccessControlProtos.Permission.Action.WRITE; - case EXEC: - return AccessControlProtos.Permission.Action.EXEC; - case CREATE: - return AccessControlProtos.Permission.Action.CREATE; - case ADMIN: - return AccessControlProtos.Permission.Action.ADMIN; + case READ: + return AccessControlProtos.Permission.Action.READ; + case WRITE: + return AccessControlProtos.Permission.Action.WRITE; + case EXEC: + return AccessControlProtos.Permission.Action.EXEC; + case CREATE: + return AccessControlProtos.Permission.Action.CREATE; + case ADMIN: + return AccessControlProtos.Permission.Action.ADMIN; } - throw new IllegalArgumentException("Unknown action value "+action.name()); + throw new IllegalArgumentException("Unknown action value " + action.name()); } /** * Convert a client user permission to a user permission proto - * * @param perm the client UserPermission * @return the protobuf UserPermission */ public static AccessControlProtos.UserPermission toUserPermission(UserPermission perm) { return AccessControlProtos.UserPermission.newBuilder() .setUser(ByteString.copyFromUtf8(perm.getUser())) - .setPermission(toPermission(perm.getPermission())) - .build(); + .setPermission(toPermission(perm.getPermission())).build(); } /** * Converts the permissions list into a protocol buffer GetUserPermissionsResponse */ - public static GetUserPermissionsResponse buildGetUserPermissionsResponse( - final List permissions) { + public static GetUserPermissionsResponse + buildGetUserPermissionsResponse(final List permissions) { GetUserPermissionsResponse.Builder builder = GetUserPermissionsResponse.newBuilder(); for (UserPermission perm : permissions) { builder.addUserPermission(toUserPermission(perm)); @@ -453,7 +420,6 @@ public static GetUserPermissionsResponse buildGetUserPermissionsResponse( /** * Converts a user permission proto to a client user permission object. - * * @param proto the protobuf UserPermission * @return the converted UserPermission */ @@ -462,21 +428,20 @@ public static UserPermission toUserPermission(AccessControlProtos.UserPermission } /** - * Convert a ListMultimap<String, TablePermission> where key is username - * to a protobuf UserPermission - * + * Convert a ListMultimap<String, TablePermission> where key is username to a protobuf + * UserPermission * @param perm the list of user and table permissions * @return the protobuf UserTablePermissions */ - public static AccessControlProtos.UsersAndPermissions toUserTablePermissions( - ListMultimap perm) { + public static AccessControlProtos.UsersAndPermissions + toUserTablePermissions(ListMultimap perm) { AccessControlProtos.UsersAndPermissions.Builder builder = AccessControlProtos.UsersAndPermissions.newBuilder(); for (Map.Entry> entry : perm.asMap().entrySet()) { AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); - for (UserPermission userPerm: entry.getValue()) { + for (UserPermission userPerm : entry.getValue()) { userPermBuilder.addPermissions(toPermission(userPerm.getPermission())); } builder.addUserPermissions(userPermBuilder.build()); @@ -488,7 +453,6 @@ public static AccessControlProtos.UsersAndPermissions toUserTablePermissions( * A utility used to grant a user global permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to grant permissions * @param actions the permissions to be granted @@ -497,24 +461,24 @@ public static AccessControlProtos.UsersAndPermissions toUserTablePermissions( */ @Deprecated public static void grant(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, boolean mergeExistingPermissions, - Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, + boolean mergeExistingPermissions, Permission.Action... actions) throws ServiceException { List permActions = Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } - AccessControlProtos.GrantRequest request = buildGrantRequest(userShortName, mergeExistingPermissions, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + AccessControlProtos.GrantRequest request = + buildGrantRequest(userShortName, mergeExistingPermissions, + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.grant(controller, request); } /** - * A utility used to grant a user table permissions. The permissions will - * be for a table table/column family/qualifier. + * A utility used to grant a user table permissions. The permissions will be for a table + * table/column family/qualifier. *

    * It's also called by the shell, in case you want to find references. - * * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to grant permissions * @param tableName optional table name @@ -544,7 +508,6 @@ public static void grant(RpcController controller, * A utility used to grant a user namespace permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param namespace the short name of the user to grant permissions @@ -561,8 +524,9 @@ public static void grant(RpcController controller, for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } - AccessControlProtos.GrantRequest request = buildGrantRequest(userShortName, namespace, mergeExistingPermissions, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + AccessControlProtos.GrantRequest request = + buildGrantRequest(userShortName, namespace, mergeExistingPermissions, + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.grant(controller, request); } @@ -570,7 +534,6 @@ public static void grant(RpcController controller, * A utility used to revoke a user's global permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions @@ -588,16 +551,15 @@ public static void revoke(RpcController controller, permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } /** - * A utility used to revoke a user's table permissions. The permissions will - * be for a table/column family/qualifier. + * A utility used to revoke a user's table permissions. The permissions will be for a table/column + * family/qualifier. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions @@ -618,7 +580,7 @@ public static void revoke(RpcController controller, permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, tableName, f, q, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } @@ -626,7 +588,6 @@ public static void revoke(RpcController controller, * A utility used to revoke a user's namespace permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions @@ -645,7 +606,7 @@ public static void revoke(RpcController controller, permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, namespace, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } @@ -653,7 +614,6 @@ public static void revoke(RpcController controller, * A utility used to get user's global permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @throws ServiceException on failure @@ -697,7 +657,6 @@ public static List getUserPermissions(RpcController controller, * A utility used to get user table permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param t optional table name @@ -706,8 +665,7 @@ public static List getUserPermissions(RpcController controller, */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, - TableName t) throws ServiceException { + AccessControlService.BlockingInterface protocol, TableName t) throws ServiceException { return getUserPermissions(controller, protocol, t, null, null, HConstants.EMPTY_STRING); } @@ -757,7 +715,6 @@ public static List getUserPermissions(RpcController controller, * A utility used to get permissions for selected namespace. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param namespace name of the namespace @@ -766,8 +723,7 @@ public static List getUserPermissions(RpcController controller, */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, - byte[] namespace) throws ServiceException { + AccessControlService.BlockingInterface protocol, byte[] namespace) throws ServiceException { return getUserPermissions(controller, protocol, namespace, HConstants.EMPTY_STRING); } @@ -827,8 +783,7 @@ public static boolean hasPermission(RpcController controller, throws ServiceException { AccessControlProtos.TablePermission.Builder tablePermissionBuilder = AccessControlProtos.TablePermission.newBuilder(); - tablePermissionBuilder - .setTableName(ProtobufUtil.toProtoTableName(tableName)); + tablePermissionBuilder.setTableName(ProtobufUtil.toProtoTableName(tableName)); if (Bytes.len(columnFamily) > 0) { tablePermissionBuilder.setFamily(UnsafeByteOperations.unsafeWrap(columnFamily)); } @@ -851,8 +806,8 @@ public static boolean hasPermission(RpcController controller, * @param proto the proto UsersAndPermissions * @return a ListMultimap with user and its permissions */ - public static ListMultimap toUserPermission( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUserPermission(AccessControlProtos.UsersAndPermissions proto) { ListMultimap userPermission = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { @@ -871,8 +826,8 @@ public static ListMultimap toUserPermission( * @param proto the proto UsersAndPermissions * @return a ListMultimap with user and its permissions */ - public static ListMultimap toPermission( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toPermission(AccessControlProtos.UsersAndPermissions proto) { ListMultimap perms = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { @@ -887,7 +842,6 @@ public static ListMultimap toPermission( /** * Create a request to revoke user table permissions. - * * @param username the short user name whose permissions to be revoked * @param tableName optional table name the permissions apply * @param family optional column family @@ -895,11 +849,10 @@ public static ListMultimap toPermission( * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, TableName tableName, byte[] family, byte[] qualifier, + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + TableName tableName, byte[] family, byte[] qualifier, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.TablePermission.Builder permissionBuilder = AccessControlProtos.TablePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { @@ -914,13 +867,10 @@ public static AccessControlProtos.RevokeRequest buildRevokeRequest( if (qualifier != null) { permissionBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } - ret.setType(AccessControlProtos.Permission.Type.Table) - .setTablePermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Table).setTablePermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java index 8e1767cce944..cb45087e1018 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Objects; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java index 01d53ebb37f7..570c543b4b53 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java index 7781d2295693..721530101835 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.Objects; - import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java index 49f2432ffa58..834641194596 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; @@ -27,20 +26,17 @@ import java.util.List; import java.util.Map; import java.util.Objects; - import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.VersionedWritable; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.VersionedWritable; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; /** - * Base permissions instance representing the ability to perform a given set - * of actions. - * + * Base permissions instance representing the ability to perform a given set of actions. * @see TablePermission */ @InterfaceAudience.Public @@ -52,11 +48,14 @@ public enum Action { READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A'); private final byte code; + Action(char code) { this.code = (byte) code; } - public byte code() { return code; } + public byte code() { + return code; + } } @InterfaceAudience.Private @@ -64,6 +63,7 @@ protected enum Scope { GLOBAL('G'), NAMESPACE('N'), TABLE('T'), EMPTY('E'); private final byte code; + Scope(char code) { this.code = (byte) code; } @@ -82,23 +82,15 @@ public byte code() { protected Scope scope = Scope.EMPTY; static { - ACTION_BY_CODE = ImmutableMap.of( - Action.READ.code, Action.READ, - Action.WRITE.code, Action.WRITE, - Action.EXEC.code, Action.EXEC, - Action.CREATE.code, Action.CREATE, - Action.ADMIN.code, Action.ADMIN - ); - - SCOPE_BY_CODE = ImmutableMap.of( - Scope.GLOBAL.code, Scope.GLOBAL, - Scope.NAMESPACE.code, Scope.NAMESPACE, - Scope.TABLE.code, Scope.TABLE, - Scope.EMPTY.code, Scope.EMPTY - ); + ACTION_BY_CODE = ImmutableMap.of(Action.READ.code, Action.READ, Action.WRITE.code, Action.WRITE, + Action.EXEC.code, Action.EXEC, Action.CREATE.code, Action.CREATE, Action.ADMIN.code, + Action.ADMIN); + + SCOPE_BY_CODE = ImmutableMap.of(Scope.GLOBAL.code, Scope.GLOBAL, Scope.NAMESPACE.code, + Scope.NAMESPACE, Scope.TABLE.code, Scope.TABLE, Scope.EMPTY.code, Scope.EMPTY); } - /** Empty constructor for Writable implementation. Do not use. */ + /** Empty constructor for Writable implementation. Do not use. */ public Permission() { super(); } @@ -114,8 +106,8 @@ public Permission(byte[] actionCodes) { for (byte code : actionCodes) { Action action = ACTION_BY_CODE.get(code); if (action == null) { - LOG.error("Ignoring unknown action code '" + - Bytes.toStringBinary(new byte[] { code }) + "'"); + LOG.error( + "Ignoring unknown action code '" + Bytes.toStringBinary(new byte[] { code }) + "'"); continue; } actions.add(action); @@ -146,9 +138,8 @@ public void setActions(Action[] assigned) { } /** - * Check if two permission equals regardless of actions. It is useful when - * merging a new permission with an existed permission which needs to check two permissions's - * fields. + * Check if two permission equals regardless of actions. It is useful when merging a new + * permission with an existed permission which needs to check two permissions's fields. * @param obj instance * @return true if equals, false otherwise */ @@ -221,8 +212,8 @@ public void readFields(DataInput in) throws IOException { byte b = in.readByte(); Action action = ACTION_BY_CODE.get(b); if (action == null) { - throw new IOException("Unknown action code '" + - Bytes.toStringBinary(new byte[] { b }) + "' in input"); + throw new IOException( + "Unknown action code '" + Bytes.toStringBinary(new byte[] { b }) + "' in input"); } actions.add(action); } @@ -235,7 +226,7 @@ public void write(DataOutput out) throws IOException { super.write(out); out.writeByte(actions != null ? actions.size() : 0); if (actions != null) { - for (Action a: actions) { + for (Action a : actions) { out.writeByte(a.code()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java index 661bcc842a8d..38e4167d81b9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Collection; @@ -28,6 +27,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest; @@ -39,7 +39,6 @@ /** * Convert protobuf objects in AccessControl.proto under hbase-protocol-shaded to user-oriented * objects and vice versa.
    - * * In HBASE-15638, we create a hbase-protocol-shaded module for upgrading protobuf version to 3.x, * but there are still some coprocessor endpoints(such as AccessControl, Authentication, * MulitRowMutation) which depend on hbase-protocol module for CPEP compatibility. In fact, we use @@ -73,16 +72,16 @@ public static AccessControlProtos.Permission.Action toPermissionAction(Permissio */ public static Permission.Action toPermissionAction(AccessControlProtos.Permission.Action action) { switch (action) { - case READ: - return Permission.Action.READ; - case WRITE: - return Permission.Action.WRITE; - case EXEC: - return Permission.Action.EXEC; - case CREATE: - return Permission.Action.CREATE; - case ADMIN: - return Permission.Action.ADMIN; + case READ: + return Permission.Action.READ; + case WRITE: + return Permission.Action.WRITE; + case EXEC: + return Permission.Action.EXEC; + case CREATE: + return Permission.Action.CREATE; + case ADMIN: + return Permission.Action.ADMIN; } throw new IllegalArgumentException("Unknown action value " + action.name()); } @@ -218,8 +217,8 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { * @param proto the protobuf UserPermission * @return the converted UserPermission */ - public static ListMultimap toUserTablePermissions( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUserTablePermissions(AccessControlProtos.UsersAndPermissions proto) { ListMultimap perms = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java index f17919f70bf9..b78728ce9013 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java @@ -15,24 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * Represents an authorization for access for the given actions, optionally - * restricted to the given column family or column qualifier, over the - * given table. If the family property is null, it implies - * full table access. + * Represents an authorization for access for the given actions, optionally restricted to the given + * column family or column qualifier, over the given table. If the family property is + * null, it implies full table access. */ @InterfaceAudience.Public public class TablePermission extends Permission { @@ -131,13 +128,13 @@ private boolean failCheckQualifier(byte[] qual) { } /** - * Checks if this permission grants access to perform the given action on - * the given table and key value. + * Checks if this permission grants access to perform the given action on the given table and key + * value. * @param table the table on which the operation is being performed * @param kv the KeyValue on which the operation is being requested * @param action the action requested - * @return true if the action is allowed over the given scope - * by this permission, otherwise false + * @return true if the action is allowed over the given scope by this permission, + * otherwise false */ public boolean implies(TableName table, KeyValue kv, Action action) { if (failCheckTable(table)) { @@ -168,8 +165,8 @@ public boolean tableFieldsEqual(TablePermission tp) { boolean tEq = (table == null && tp.table == null) || (table != null && table.equals(tp.table)); boolean fEq = (family == null && tp.family == null) || Bytes.equals(family, tp.family); - boolean qEq = (qualifier == null && tp.qualifier == null) || - Bytes.equals(qualifier, tp.qualifier); + boolean qEq = + (qualifier == null && tp.qualifier == null) || Bytes.equals(qualifier, tp.qualifier); return tEq && fEq && qEq; } @@ -212,10 +209,9 @@ public String toString() { protected String rawExpression() { StringBuilder raw = new StringBuilder(); if (table != null) { - raw.append("table=").append(table) - .append(", family=").append(family == null ? null : Bytes.toString(family)) - .append(", qualifier=").append(qualifier == null ? null : Bytes.toString(qualifier)) - .append(", "); + raw.append("table=").append(table).append(", family=") + .append(family == null ? null : Bytes.toString(family)).append(", qualifier=") + .append(qualifier == null ? null : Bytes.toString(qualifier)).append(", "); } return raw.toString() + super.rawExpression(); } @@ -224,7 +220,7 @@ protected String rawExpression() { public void readFields(DataInput in) throws IOException { super.readFields(in); byte[] tableBytes = Bytes.readByteArray(in); - if(tableBytes.length > 0) { + if (tableBytes.length > 0) { table = TableName.valueOf(tableBytes); } if (in.readBoolean()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java index 896ba5251a3c..874495126de1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; /** - * UserPermission consists of a user name and a permission. - * Permission can be one of [Global, Namespace, Table] permission. + * UserPermission consists of a user name and a permission. Permission can be one of [Global, + * Namespace, Table] permission. */ @InterfaceAudience.Public public class UserPermission { @@ -87,8 +85,7 @@ public int hashCode() { @Override public String toString() { - StringBuilder str = new StringBuilder("UserPermission: ") - .append("user=").append(user) + StringBuilder str = new StringBuilder("UserPermission: ").append("user=").append(user) .append(", ").append(permission.toString()); return str.toString(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java index d018ce19921b..375ad68deab0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java @@ -27,11 +27,10 @@ */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving -public abstract class AbstractSaslClientAuthenticationProvider implements - SaslClientAuthenticationProvider { +public abstract class AbstractSaslClientAuthenticationProvider + implements SaslClientAuthenticationProvider { public static final String AUTH_TOKEN_TYPE = "HBASE_AUTH_TOKEN"; - @Override public final String getTokenKind() { // All HBase authentication tokens are "HBASE_AUTH_TOKEN"'s. We differentiate between them diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java index a681d53719d0..e5fe8f9b11b4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.provider; import java.util.Collection; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -41,9 +40,9 @@ void configure(Configuration conf, Collection availableProviders); /** - * Chooses the authentication provider which should be used given the provided client context - * from the authentication providers passed in via {@link #configure(Configuration, Collection)}. + * Chooses the authentication provider which should be used given the provided client context from + * the authentication providers passed in via {@link #configure(Configuration, Collection)}. */ - Pair> selectProvider( - String clusterId, User user); + Pair> + selectProvider(String clusterId, User user); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java index 752003dad8c6..cc957805f48a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java @@ -21,9 +21,7 @@ import java.util.Collection; import java.util.Objects; - import net.jcip.annotations.NotThreadSafe; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -39,14 +37,12 @@ /** * Default implementation of {@link AuthenticationProviderSelector} which can choose from the * authentication implementations which HBase provides out of the box: Simple, Kerberos, and - * Delegation Token authentication. - * - * This implementation will ignore any {@link SaslAuthenticationProvider}'s which are available - * on the classpath or specified in the configuration because HBase cannot correctly choose which - * token should be returned to a client when multiple are present. It is expected that users - * implement their own {@link AuthenticationProviderSelector} when writing a custom provider. - * - * This implementation is not thread-safe. {@link #configure(Configuration, Collection)} and + * Delegation Token authentication. This implementation will ignore any + * {@link SaslAuthenticationProvider}'s which are available on the classpath or specified in the + * configuration because HBase cannot correctly choose which token should be returned to a client + * when multiple are present. It is expected that users implement their own + * {@link AuthenticationProviderSelector} when writing a custom provider. This implementation is not + * thread-safe. {@link #configure(Configuration, Collection)} and * {@link #selectProvider(String, User)} is not safe if they are called concurrently. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @@ -61,8 +57,8 @@ public class BuiltInProviderSelector implements AuthenticationProviderSelector { Text digestAuthTokenKind = null; @Override - public void configure( - Configuration conf, Collection providers) { + public void configure(Configuration conf, + Collection providers) { if (this.conf != null) { throw new IllegalStateException("configure() should only be called once"); } @@ -100,8 +96,8 @@ public void configure( } @Override - public Pair> selectProvider( - String clusterId, User user) { + public Pair> + selectProvider(String clusterId, User user) { requireNonNull(clusterId, "Null clusterId was given"); requireNonNull(user, "Null user was given"); @@ -117,10 +113,10 @@ public Pair> // (for whatever that's worth). for (Token token : user.getTokens()) { // We need to check for two things: - // 1. This token is for the HBase cluster we want to talk to - // 2. We have suppporting client implementation to handle the token (the "kind" of token) - if (clusterIdAsText.equals(token.getService()) && - digestAuthTokenKind.equals(token.getKind())) { + // 1. This token is for the HBase cluster we want to talk to + // 2. We have suppporting client implementation to handle the token (the "kind" of token) + if (clusterIdAsText.equals(token.getService()) + && digestAuthTokenKind.equals(token.getKind())) { return new Pair<>(digestAuth, token); } } @@ -128,15 +124,16 @@ public Pair> final UserGroupInformation currentUser = user.getUGI(); // May be null if Hadoop AuthenticationMethod is PROXY final UserGroupInformation realUser = currentUser.getRealUser(); - if (currentUser.hasKerberosCredentials() || - (realUser != null && realUser.hasKerberosCredentials())) { + if (currentUser.hasKerberosCredentials() + || (realUser != null && realUser.hasKerberosCredentials())) { return new Pair<>(krbAuth, null); } // This indicates that a client is requesting some authentication mechanism which the servers // don't know how to process (e.g. there is no provider which can support it). This may be // a bug or simply a misconfiguration of client *or* server. LOG.warn("No matching SASL authentication provider and supporting token found from providers" - + " for user: {}", user); + + " for user: {}", + user); return null; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java index c1b7ddb7c554..712d4035448b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java @@ -20,9 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Base class for all Apache HBase, built-in {@link SaslAuthenticationProvider}'s to extend. - * - * HBase users should take care to note that this class (and its sub-classes) are marked with the + * Base class for all Apache HBase, built-in {@link SaslAuthenticationProvider}'s to extend. HBase + * users should take care to note that this class (and its sub-classes) are marked with the * {@code InterfaceAudience.Private} annotation. These implementations are available for users to * read, copy, and modify, but should not be extended or re-used in binary form. There are no * compatibility guarantees provided for implementations of this class. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java index 7cbdecd642be..98e6605413c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java @@ -26,8 +26,8 @@ @InterfaceAudience.Private public class DigestSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "DIGEST", (byte)82, "DIGEST-MD5", AuthenticationMethod.TOKEN); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("DIGEST", (byte) 82, "DIGEST-MD5", AuthenticationMethod.TOKEN); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java index a84f24b9080e..735d2ece1965 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.NameCallback; @@ -30,7 +29,6 @@ import javax.security.sasl.RealmChoiceCallback; import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.hbase.security.SecurityInfo; @@ -52,7 +50,7 @@ public SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo securityInfo, Token token, boolean fallbackAllowed, Map saslProps) throws IOException { return Sasl.createSaslClient(new String[] { getSaslAuthMethod().getSaslMechanism() }, null, - null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new DigestSaslClientCallbackHandler(token)); + null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new DigestSaslClientCallbackHandler(token)); } public static class DigestSaslClientCallbackHandler implements CallbackHandler { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java index 07101848e507..5ddb54fdad34 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java @@ -26,8 +26,8 @@ @InterfaceAudience.Private public class GssSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "KERBEROS", (byte)81, "GSSAPI", AuthenticationMethod.KERBEROS); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("KERBEROS", (byte) 81, "GSSAPI", AuthenticationMethod.KERBEROS); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java index 21a4828b49e9..1ebd62a02ca5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java @@ -20,10 +20,8 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.hbase.security.SecurityConstants; @@ -42,8 +40,8 @@ @InterfaceAudience.Private public class GssSaslClientAuthenticationProvider extends GssSaslAuthenticationProvider implements SaslClientAuthenticationProvider { - private static final Logger LOG = LoggerFactory.getLogger( - GssSaslClientAuthenticationProvider.class); + private static final Logger LOG = + LoggerFactory.getLogger(GssSaslClientAuthenticationProvider.class); private static boolean useCanonicalHostname(Configuration conf) { return !conf.getBoolean( @@ -57,10 +55,9 @@ public static String getHostnameForServerPrincipal(Configuration conf, InetAddre if (useCanonicalHostname(conf)) { hostname = addr.getCanonicalHostName(); if (hostname.equals(addr.getHostAddress())) { - LOG.warn("Canonical hostname for SASL principal is the same with IP address: " - + hostname + ", " + addr.getHostName() + ". Check DNS configuration or consider " - + SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS - + "=true"); + LOG.warn("Canonical hostname for SASL principal is the same with IP address: " + hostname + + ", " + addr.getHostName() + ". Check DNS configuration or consider " + + SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS + "=true"); } } else { hostname = addr.getHostName(); @@ -89,11 +86,11 @@ public SaslClient createClient(Configuration conf, InetAddress serverAddr, LOG.debug("Setting up Kerberos RPC to server={}", serverPrincipal); String[] names = SaslUtil.splitKerberosName(serverPrincipal); if (names.length != 3) { - throw new IOException("Kerberos principal '" + serverPrincipal - + "' does not have the expected format"); + throw new IOException( + "Kerberos principal '" + serverPrincipal + "' does not have the expected format"); } return Sasl.createSaslClient(new String[] { getSaslAuthMethod().getSaslMechanism() }, null, - names[0], names[1], saslProps, null); + names[0], names[1], saslProps, null); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java index 7930564cb9f6..0303e8c48de8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.provider; import java.util.Objects; - import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; @@ -78,19 +77,13 @@ public boolean equals(Object o) { return false; } SaslAuthMethod other = (SaslAuthMethod) o; - return Objects.equals(name, other.name) && - code == other.code && - Objects.equals(saslMech, other.saslMech) && - Objects.equals(method, other.method); + return Objects.equals(name, other.name) && code == other.code + && Objects.equals(saslMech, other.saslMech) && Objects.equals(method, other.method); } @Override public int hashCode() { - return new HashCodeBuilder() - .append(name) - .append(code) - .append(saslMech) - .append(method) + return new HashCodeBuilder().append(name).append(code).append(saslMech).append(method) .toHashCode(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java index 1f6d821ce953..99e2916fa513 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java @@ -22,13 +22,11 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. - * It is suggested that custom implementations extend the abstract class in the type hierarchy - * instead of directly implementing this interface (clients have a base class available, but - * servers presently do not). - * - * Implementations of this interface must be unique among each other via the {@code byte} - * returned by {@link SaslAuthMethod#getCode()} on {@link #getSaslAuthMethod()}. + * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. It is + * suggested that custom implementations extend the abstract class in the type hierarchy instead of + * directly implementing this interface (clients have a base class available, but servers presently + * do not). Implementations of this interface must be unique among each other via the + * {@code byte} returned by {@link SaslAuthMethod#getCode()} on {@link #getSaslAuthMethod()}. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java index 4b1cabcfc494..52f873f71aba 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java @@ -20,9 +20,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.SecurityInfo; @@ -38,10 +36,9 @@ /** * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. * Implementations should not directly implement this interface, but instead extend - * {@link AbstractSaslClientAuthenticationProvider}. - * - * Implementations of this interface must make an implementation of {@code hashCode()} - * which returns the same value across multiple instances of the provider implementation. + * {@link AbstractSaslClientAuthenticationProvider}. Implementations of this interface must make an + * implementation of {@code hashCode()} which returns the same value across multiple instances of + * the provider implementation. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving @@ -60,18 +57,15 @@ SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo UserInformation getUserInfo(User user); /** - * Returns the "real" user, the user who has the credentials being authenticated by the - * remote service, in the form of an {@link UserGroupInformation} object. - * - * It is common in the Hadoop "world" to have distinct notions of a "real" user and a "proxy" - * user. A "real" user is the user which actually has the credentials (often, a Kerberos ticket), - * but some code may be running as some other user who has no credentials. This method gives - * the authentication provider a chance to acknowledge this is happening and ensure that any - * RPCs are executed with the real user's credentials, because executing them as the proxy user - * would result in failure because no credentials exist to authenticate the RPC. - * - * Not all implementations will need to implement this method. By default, the provided User's - * UGI is returned directly. + * Returns the "real" user, the user who has the credentials being authenticated by the remote + * service, in the form of an {@link UserGroupInformation} object. It is common in the Hadoop + * "world" to have distinct notions of a "real" user and a "proxy" user. A "real" user is the user + * which actually has the credentials (often, a Kerberos ticket), but some code may be running as + * some other user who has no credentials. This method gives the authentication provider a chance + * to acknowledge this is happening and ensure that any RPCs are executed with the real user's + * credentials, because executing them as the proxy user would result in failure because no + * credentials exist to authenticate the RPC. Not all implementations will need to implement this + * method. By default, the provided User's UGI is returned directly. */ default UserGroupInformation getRealUser(User ugi) { return ugi.getUGI(); @@ -86,8 +80,9 @@ default boolean canRetry() { } /** - * Executes any necessary logic to re-login the client. Not all implementations will have - * any logic that needs to be executed. + * Executes any necessary logic to re-login the client. Not all implementations will have any + * logic that needs to be executed. */ - default void relogin() throws IOException {} + default void relogin() throws IOException { + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java index aaaee003c595..9ab989be940c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.ServiceLoader; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -43,8 +42,8 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving public final class SaslClientAuthenticationProviders { - private static final Logger LOG = LoggerFactory.getLogger( - SaslClientAuthenticationProviders.class); + private static final Logger LOG = + LoggerFactory.getLogger(SaslClientAuthenticationProviders.class); public static final String SELECTOR_KEY = "hbase.client.sasl.provider.class"; public static final String EXTRA_PROVIDERS_KEY = "hbase.client.sasl.provider.extras"; @@ -55,8 +54,7 @@ public final class SaslClientAuthenticationProviders { private final Collection providers; private final AuthenticationProviderSelector selector; - private SaslClientAuthenticationProviders( - Collection providers, + private SaslClientAuthenticationProviders(Collection providers, AuthenticationProviderSelector selector) { this.providers = providers; this.selector = selector; @@ -90,11 +88,11 @@ public static synchronized void reset() { } /** - * Adds the given {@code provider} to the set, only if an equivalent provider does not - * already exist in the set. + * Adds the given {@code provider} to the set, only if an equivalent provider does not already + * exist in the set. */ static void addProviderIfNotExists(SaslClientAuthenticationProvider provider, - HashMap providers) { + HashMap providers) { Byte code = provider.getSaslAuthMethod().getCode(); SaslClientAuthenticationProvider existingProvider = providers.get(code); if (existingProvider != null) { @@ -109,8 +107,8 @@ static void addProviderIfNotExists(SaslClientAuthenticationProvider provider, */ static AuthenticationProviderSelector instantiateSelector(Configuration conf, Collection providers) { - Class clz = conf.getClass( - SELECTOR_KEY, BuiltInProviderSelector.class, AuthenticationProviderSelector.class); + Class clz = conf.getClass(SELECTOR_KEY, + BuiltInProviderSelector.class, AuthenticationProviderSelector.class); try { AuthenticationProviderSelector selector = clz.getConstructor().newInstance(); selector.configure(conf, providers); @@ -118,10 +116,11 @@ static AuthenticationProviderSelector instantiateSelector(Configuration conf, LOG.trace("Loaded ProviderSelector {}", selector.getClass()); } return selector; - } catch (InstantiationException | IllegalAccessException | NoSuchMethodException | - InvocationTargetException e) { - throw new RuntimeException("Failed to instantiate " + clz + - " as the ProviderSelector defined by " + SELECTOR_KEY, e); + } catch (InstantiationException | IllegalAccessException | NoSuchMethodException + | InvocationTargetException e) { + throw new RuntimeException( + "Failed to instantiate " + clz + " as the ProviderSelector defined by " + SELECTOR_KEY, + e); } } @@ -129,8 +128,8 @@ static AuthenticationProviderSelector instantiateSelector(Configuration conf, * Extracts and instantiates authentication providers from the configuration. */ static void addExplicitProviders(Configuration conf, - HashMap providers) { - for(String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) { + HashMap providers) { + for (String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) { Class clz; // Load the class from the config try { @@ -143,7 +142,8 @@ static void addExplicitProviders(Configuration conf, // Make sure it's the right type if (!SaslClientAuthenticationProvider.class.isAssignableFrom(clz)) { LOG.warn("Ignoring SaslClientAuthenticationProvider {} because it is not an instance of" - + " SaslClientAuthenticationProvider", clz); + + " SaslClientAuthenticationProvider", + clz); continue; } @@ -170,19 +170,18 @@ static void addExplicitProviders(Configuration conf, static SaslClientAuthenticationProviders instantiate(Configuration conf) { ServiceLoader loader = ServiceLoader.load(SaslClientAuthenticationProvider.class); - HashMap providerMap = new HashMap<>(); + HashMap providerMap = new HashMap<>(); for (SaslClientAuthenticationProvider provider : loader) { addProviderIfNotExists(provider, providerMap); } addExplicitProviders(conf, providerMap); - Collection providers = Collections.unmodifiableCollection( - providerMap.values()); + Collection providers = + Collections.unmodifiableCollection(providerMap.values()); if (LOG.isTraceEnabled()) { - String loadedProviders = providers.stream() - .map((provider) -> provider.getClass().getName()) + String loadedProviders = providers.stream().map((provider) -> provider.getClass().getName()) .collect(Collectors.joining(", ")); LOG.trace("Found SaslClientAuthenticationProviders {}", loadedProviders); } @@ -192,16 +191,13 @@ static SaslClientAuthenticationProviders instantiate(Configuration conf) { } /** - * Returns the provider and token pair for SIMPLE authentication. - * - * This method is a "hack" while SIMPLE authentication for HBase does not flow through - * the SASL codepath. + * Returns the provider and token pair for SIMPLE authentication. This method is a "hack" while + * SIMPLE authentication for HBase does not flow through the SASL codepath. */ public Pair> getSimpleProvider() { Optional optional = providers.stream() - .filter((p) -> p instanceof SimpleSaslClientAuthenticationProvider) - .findFirst(); + .filter((p) -> p instanceof SimpleSaslClientAuthenticationProvider).findFirst(); return new Pair<>(optional.get(), null); } @@ -209,15 +205,14 @@ static SaslClientAuthenticationProviders instantiate(Configuration conf) { * Chooses the best authentication provider and corresponding token given the HBase cluster * identifier and the user. */ - public Pair> selectProvider( - String clusterId, User clientUser) { + public Pair> + selectProvider(String clusterId, User clientUser) { return selector.selectProvider(clusterId, clientUser); } @Override public String toString() { - return providers.stream() - .map((p) -> p.getClass().getName()) + return providers.stream().map((p) -> p.getClass().getName()) .collect(Collectors.joining(", ", "providers=[", "], selector=")) + selector.getClass(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java index 3f1122c75413..4b2ecb2cf476 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java @@ -25,8 +25,8 @@ */ @InterfaceAudience.Private public class SimpleSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "SIMPLE", (byte)80, "", AuthenticationMethod.SIMPLE); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("SIMPLE", (byte) 80, "", AuthenticationMethod.SIMPLE); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java index 3a9142f34c44..d1098c81f61f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java @@ -20,9 +20,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; @@ -34,8 +32,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; @InterfaceAudience.Private -public class SimpleSaslClientAuthenticationProvider extends - SimpleSaslAuthenticationProvider implements SaslClientAuthenticationProvider { +public class SimpleSaslClientAuthenticationProvider extends SimpleSaslAuthenticationProvider + implements SaslClientAuthenticationProvider { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddress, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java index 0e0a2500a54f..eab9bbac5c38 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.io.DataInput; @@ -51,8 +50,8 @@ public AuthenticationTokenIdentifier(String username) { this.username = username; } - public AuthenticationTokenIdentifier(String username, int keyId, - long issueDate, long expirationDate) { + public AuthenticationTokenIdentifier(String username, int keyId, long issueDate, + long expirationDate) { this.username = username; this.keyId = keyId; this.issueDate = issueDate; @@ -119,9 +118,7 @@ public byte[] toBytes() { if (username != null) { builder.setUsername(ByteString.copyFromUtf8(username)); } - builder.setIssueDate(issueDate) - .setExpirationDate(expirationDate) - .setKeyId(keyId) + builder.setIssueDate(issueDate).setExpirationDate(expirationDate).setKeyId(keyId) .setSequenceNumber(sequenceNumber); return builder.build().toByteArray(); } @@ -139,13 +136,13 @@ public void readFields(DataInput in) throws IOException { byte[] inBytes = new byte[len]; in.readFully(inBytes); AuthenticationProtos.TokenIdentifier.Builder builder = - AuthenticationProtos.TokenIdentifier.newBuilder(); + AuthenticationProtos.TokenIdentifier.newBuilder(); ProtobufUtil.mergeFrom(builder, inBytes); AuthenticationProtos.TokenIdentifier identifier = builder.build(); // sanity check on type - if (!identifier.hasKind() || - identifier.getKind() != AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN) { - throw new IOException("Invalid TokenIdentifier kind from input "+identifier.getKind()); + if (!identifier.hasKind() + || identifier.getKind() != AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN) { + throw new IOException("Invalid TokenIdentifier kind from input " + identifier.getKind()); } // copy the field values @@ -172,26 +169,23 @@ public boolean equals(Object other) { return false; } if (other instanceof AuthenticationTokenIdentifier) { - AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier)other; - return sequenceNumber == ident.getSequenceNumber() - && keyId == ident.getKeyId() - && issueDate == ident.getIssueDate() - && expirationDate == ident.getExpirationDate() - && (username == null ? ident.getUsername() == null : - username.equals(ident.getUsername())); + AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier) other; + return sequenceNumber == ident.getSequenceNumber() && keyId == ident.getKeyId() + && issueDate == ident.getIssueDate() && expirationDate == ident.getExpirationDate() + && (username == null ? ident.getUsername() == null + : username.equals(ident.getUsername())); } return false; } @Override public int hashCode() { - return (int)sequenceNumber; + return (int) sequenceNumber; } @Override public String toString() { - return "(username=" + username + ", keyId=" - + keyId + ", issueDate=" + issueDate - + ", expirationDate=" + expirationDate + ", sequenceNumber=" + sequenceNumber + ")"; + return "(username=" + username + ", keyId=" + keyId + ", issueDate=" + issueDate + + ", expirationDate=" + expirationDate + ", sequenceNumber=" + sequenceNumber + ")"; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java index 39959ef61db4..709279b73604 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java @@ -15,22 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.util.Collection; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.Private -public class AuthenticationTokenSelector - implements TokenSelector { +public class AuthenticationTokenSelector implements TokenSelector { private static final Logger LOG = LoggerFactory.getLogger(AuthenticationTokenSelector.class); public AuthenticationTokenSelector() { @@ -41,12 +38,12 @@ public Token selectToken(Text serviceName, Collection> tokens) { if (serviceName != null) { for (Token ident : tokens) { - if (serviceName.equals(ident.getService()) && - AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind())) { + if (serviceName.equals(ident.getService()) + && AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind())) { if (LOG.isDebugEnabled()) { - LOG.debug("Returning token "+ident); + LOG.debug("Returning token " + ident); } - return (Token)ident; + return (Token) ident; } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java index a29c47c5f6fb..d43ac0ee437f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.io.IOException; @@ -52,7 +51,8 @@ public final class ClientTokenUtil { // Set in TestClientTokenUtil via reflection private static ServiceException injectedException; - private ClientTokenUtil() {} + private ClientTokenUtil() { + } private static void injectFault() throws ServiceException { if (injectedException != null) { @@ -66,19 +66,18 @@ private static void injectFault() throws ServiceException { * @return the authentication token instance, wrapped by a {@link CompletableFuture}. */ @InterfaceAudience.Private - public static CompletableFuture> obtainToken( - AsyncConnection conn) { + public static CompletableFuture> + obtainToken(AsyncConnection conn) { CompletableFuture> future = new CompletableFuture<>(); if (injectedException != null) { future.completeExceptionally(ProtobufUtil.handleRemoteException(injectedException)); return future; } AsyncTable table = conn.getTable(TableName.META_TABLE_NAME); - table. coprocessorService( + table. coprocessorService( AuthenticationProtos.AuthenticationService::newStub, - (s, c, r) -> s.getAuthenticationToken(c, - AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance(), r), + (s, c, r) -> s.getAuthenticationToken(c, + AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance(), r), HConstants.EMPTY_START_ROW).whenComplete((resp, error) -> { if (error != null) { future.completeExceptionally(ProtobufUtil.handleRemoteException(error)); @@ -96,20 +95,17 @@ AuthenticationProtos.GetAuthenticationTokenResponse> coprocessorService( * @return the authentication token instance */ @InterfaceAudience.Private - static Token obtainToken( - Connection conn) throws IOException { + static Token obtainToken(Connection conn) throws IOException { Table meta = null; try { injectFault(); meta = conn.getTable(TableName.META_TABLE_NAME); - CoprocessorRpcChannel rpcChannel = meta.coprocessorService( - HConstants.EMPTY_START_ROW); + CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); AuthenticationProtos.AuthenticationService.BlockingInterface service = AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); - AuthenticationProtos.GetAuthenticationTokenResponse response = - service.getAuthenticationToken(null, - AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); + AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken( + null, AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); return toToken(response.getToken()); } catch (ServiceException se) { @@ -123,7 +119,6 @@ static Token obtainToken( /** * Converts a Token instance (with embedded identifier) to the protobuf representation. - * * @param token the Token instance to copy * @return the protobuf Token message */ @@ -140,14 +135,12 @@ static AuthenticationProtos.Token toToken(Token t /** * Converts a protobuf Token message back into a Token instance. - * * @param proto the protobuf Token message * @return the Token instance */ @InterfaceAudience.Private static Token toToken(AuthenticationProtos.Token proto) { - return new Token<>( - proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, + return new Token<>(proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, proto.hasPassword() ? proto.getPassword().toByteArray() : null, AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE, proto.hasService() ? new Text(proto.getService().toStringUtf8()) : null); @@ -160,8 +153,8 @@ static Token toToken(AuthenticationProtos.Token p * @return the authentication token instance */ @InterfaceAudience.Private - static Token obtainToken( - final Connection conn, User user) throws IOException, InterruptedException { + static Token obtainToken(final Connection conn, User user) + throws IOException, InterruptedException { return user.runAs(new PrivilegedExceptionAction>() { @Override public Token run() throws Exception { @@ -171,15 +164,13 @@ public Token run() throws Exception { } /** - * Obtain an authentication token for the given user and add it to the - * user's credentials. + * Obtain an authentication token for the given user and add it to the user's credentials. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ - public static void obtainAndCacheToken(final Connection conn, - User user) + public static void obtainAndCacheToken(final Connection conn, User user) throws IOException, InterruptedException { try { Token token = obtainToken(conn, user); @@ -188,8 +179,7 @@ public static void obtainAndCacheToken(final Connection conn, throw new IOException("No token returned for user " + user.getName()); } if (LOG.isDebugEnabled()) { - LOG.debug("Obtained token " + token.getKind().toString() + " for user " + - user.getName()); + LOG.debug("Obtained token " + token.getKind().toString() + " for user " + user.getName()); } user.addToken(token); } catch (IOException | InterruptedException | RuntimeException e) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java index f8ac1b966097..f15bab6c0951 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -31,6 +30,7 @@ public class Authorizations { private List labels; + public Authorizations(String... labels) { this.labels = new ArrayList<>(labels.length); Collections.addAll(this.labels, labels); @@ -43,12 +43,12 @@ public Authorizations(List labels) { public List getLabels() { return Collections.unmodifiableList(this.labels); } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("[ "); - for (String label: labels) { + for (String label : labels) { sb.append(label); sb.append(' '); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java index 6cf8fb748dfd..8abaee005094 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hbase.security.visibility; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * This contains a visibility expression which can be associated with a cell. When it is set with a * Mutation, all the cells in that mutation will get associated with this expression. A visibility - * expression can contain visibility labels combined with logical - * operators AND(&), OR(|) and NOT(!) + * expression can contain visibility labels combined with logical operators AND(&), OR(|) and + * NOT(!) */ @InterfaceAudience.Public public class CellVisibility { @@ -48,25 +48,22 @@ public String toString() { } /** - * Helps in quoting authentication Strings. Use this if unicode characters to - * be used in expression or special characters like '(', ')', - * '"','\','&','|','!' + * Helps in quoting authentication Strings. Use this if unicode characters to be used in + * expression or special characters like '(', ')', '"','\','&','|','!' */ public static String quote(String auth) { return quote(Bytes.toBytes(auth)); } /** - * Helps in quoting authentication Strings. Use this if unicode characters to - * be used in expression or special characters like '(', ')', - * '"','\','&','|','!' + * Helps in quoting authentication Strings. Use this if unicode characters to be used in + * expression or special characters like '(', ')', '"','\','&','|','!' */ public static String quote(byte[] auth) { int escapeChars = 0; for (int i = 0; i < auth.length; i++) - if (auth[i] == '"' || auth[i] == '\\') - escapeChars++; + if (auth[i] == '"' || auth[i] == '\\') escapeChars++; byte[] escapedAuth = new byte[auth.length + escapeChars + 2]; int index = 1; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java index 778288d4c03f..e9160ec976c2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java @@ -28,4 +28,3 @@ public InvalidLabelException(String msg) { super(msg); } } - diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java index 3d3d081ad481..feca76ed1956 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService; - /** * Utility client for doing visibility labels admin operations. */ @@ -66,7 +65,6 @@ public static boolean isCellVisibilityEnabled(Connection connection) throws IOEx /** * Utility method for adding label to the system. - * * @param connection * @param label * @return VisibilityLabelsResponse @@ -79,7 +77,6 @@ public static VisibilityLabelsResponse addLabel(Connection connection, final Str /** * Utility method for adding labels to the system. - * * @param connection * @param labels * @return VisibilityLabelsResponse @@ -90,31 +87,32 @@ public static VisibilityLabelsResponse addLabels(Connection connection, final St try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); - for (String label : labels) { - if (label.length() > 0) { - VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); - newBuilder.setLabel(UnsafeByteOperations.unsafeWrap((Bytes.toBytes(label)))); - builder.addVisLabel(newBuilder.build()); + @Override + public VisibilityLabelsResponse call(VisibilityLabelsService service) + throws IOException { + VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); + for (String label : labels) { + if (label.length() > 0) { + VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); + newBuilder.setLabel(UnsafeByteOperations.unsafeWrap((Bytes.toBytes(label)))); + builder.addVisLabel(newBuilder.build()); + } + } + service.addLabels(controller, builder.build(), rpcCallback); + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - } - service.addLabels(controller, builder.build(), rpcCallback); - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; + }; Map result = table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } @@ -144,25 +142,24 @@ public static GetAuthsResponse getAuths(Connection connection, final String user try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { - GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); - getAuthReqBuilder.setUser(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(user))); - service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); - GetAuthsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = - table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + @Override + public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { + GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); + getAuthReqBuilder.setUser(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(user))); + service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); + GetAuthsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; + } + }; + Map result = table.coprocessorService(VisibilityLabelsService.class, + HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } @@ -171,7 +168,7 @@ public GetAuthsResponse call(VisibilityLabelsService service) throws IOException /** * Retrieve the list of visibility labels defined in the system. * @param connection The Connection instance to use. - * @param regex The regular expression to filter which labels are returned. + * @param regex The regular expression to filter which labels are returned. * @return labels The list of visibility labels defined in the system. * @throws Throwable */ @@ -180,29 +177,29 @@ public static ListLabelsResponse listLabels(Connection connection, final String try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public ListLabelsResponse call(VisibilityLabelsService service) throws IOException { - ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder(); - if (regex != null) { - // Compile the regex here to catch any regex exception earlier. - Pattern pattern = Pattern.compile(regex); - listAuthLabelsReqBuilder.setRegex(pattern.toString()); - } - service.listLabels(controller, listAuthLabelsReqBuilder.build(), rpcCallback); - ListLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; + @Override + public ListLabelsResponse call(VisibilityLabelsService service) throws IOException { + ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder(); + if (regex != null) { + // Compile the regex here to catch any regex exception earlier. + Pattern pattern = Pattern.compile(regex); + listAuthLabelsReqBuilder.setRegex(pattern.toString()); + } + service.listLabels(controller, listAuthLabelsReqBuilder.build(), rpcCallback); + ListLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; + } + }; Map result = table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } @@ -228,34 +225,35 @@ private static VisibilityLabelsResponse setOrClearAuths(Connection connection, try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); - setAuthReqBuilder.setUser(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(user))); - for (String auth : auths) { - if (auth.length() > 0) { - setAuthReqBuilder.addAuth((ByteString.copyFromUtf8(auth))); + @Override + public VisibilityLabelsResponse call(VisibilityLabelsService service) + throws IOException { + SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); + setAuthReqBuilder.setUser(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(user))); + for (String auth : auths) { + if (auth.length() > 0) { + setAuthReqBuilder.addAuth((ByteString.copyFromUtf8(auth))); + } + } + if (setOrClear) { + service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } else { + service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - } - if (setOrClear) { - service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } else { - service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = table.coprocessorService( - VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, - callable); + }; + Map result = + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java index 0945dd98afc2..18bf96a28d4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,8 +19,8 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public final class VisibilityConstants { @@ -31,8 +31,8 @@ public final class VisibilityConstants { public static final String VISIBILITY_LABELS_ATTR_KEY = "VISIBILITY"; /** Internal storage table for visibility labels */ - public static final TableName LABELS_TABLE_NAME = TableName.valueOf( - NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "labels"); + public static final TableName LABELS_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "labels"); /** Family for the internal storage table for visibility labels */ public static final byte[] LABELS_TABLE_FAMILY = Bytes.toBytes("f"); @@ -41,15 +41,15 @@ public final class VisibilityConstants { public static final byte[] LABEL_QUALIFIER = new byte[1]; /** - * Visibility serialization version format. It indicates the visibility labels - * are sorted based on ordinal + * Visibility serialization version format. It indicates the visibility labels are sorted based on + * ordinal **/ public static final byte SORTED_ORDINAL_SERIALIZATION_FORMAT = 1; /** Byte representation of the visibility_serialization_version **/ public static final byte[] SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG_VAL = new byte[] { SORTED_ORDINAL_SERIALIZATION_FORMAT }; - public static final String CHECK_AUTHS_FOR_MUTATION = + public static final String CHECK_AUTHS_FOR_MUTATION = "hbase.security.visibility.mutations.checkauths"; public static final String NOT_OPERATOR = "!"; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java index a73d47501912..7d8d550e82e1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /* diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java index 874b2b42cec3..dfbb0b9d02b2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.regex.Pattern; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -54,7 +53,7 @@ public class VisibilityLabelsValidator { validAuthChars['.'] = true; validAuthChars['/'] = true; } - + static final boolean isValidAuthChar(byte b) { return validAuthChars[0xff & b]; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 4ba0631c4e9d..982e923a5704 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -47,7 +47,6 @@ import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.client.BalanceRequest; import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.CacheEvictionStats; import org.apache.hadoop.hbase.CacheEvictionStatsBuilder; @@ -71,9 +70,10 @@ import org.apache.hadoop.hbase.ServerTaskBuilder; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.BalanceRequest; import org.apache.hadoop.hbase.client.BalanceResponse; -import org.apache.hadoop.hbase.client.BalancerRejection; import org.apache.hadoop.hbase.client.BalancerDecision; +import org.apache.hadoop.hbase.client.BalancerRejection; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.ClientUtil; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -122,7 +122,6 @@ import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.CellVisibility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.DynamicClassLoader; @@ -203,6 +202,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RecentLogs; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; @@ -220,12 +220,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; /** - * Protobufs utility. - * Be aware that a class named org.apache.hadoop.hbase.protobuf.ProtobufUtil (i.e. no 'shaded' in - * the package name) carries a COPY of a subset of this class for non-shaded - * users; e.g. Coprocessor Endpoints. If you make change in here, be sure to make change in - * the companion class too (not the end of the world, especially if you are adding new functionality - * but something to be aware of. + * Protobufs utility. Be aware that a class named org.apache.hadoop.hbase.protobuf.ProtobufUtil + * (i.e. no 'shaded' in the package name) carries a COPY of a subset of this class for non-shaded + * users; e.g. Coprocessor Endpoints. If you make change in here, be sure to make change in the + * companion class too (not the end of the world, especially if you are adding new functionality but + * something to be aware of. */ @InterfaceAudience.Private // TODO: some clients (Hive, etc) use this class public final class ProtobufUtil { @@ -234,18 +233,18 @@ private ProtobufUtil() { } /** - * Many results are simple: no cell, exists true or false. To save on object creations, - * we reuse them across calls. + * Many results are simple: no cell, exists true or false. To save on object creations, we reuse + * them across calls. */ - private final static Cell[] EMPTY_CELL_ARRAY = new Cell[]{}; + private final static Cell[] EMPTY_CELL_ARRAY = new Cell[] {}; private final static Result EMPTY_RESULT = Result.create(EMPTY_CELL_ARRAY); final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true); final static Result EMPTY_RESULT_EXISTS_FALSE = Result.create(null, false); private final static Result EMPTY_RESULT_STALE = Result.create(EMPTY_CELL_ARRAY, null, true); - private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE - = Result.create((Cell[])null, true, true); - private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE - = Result.create((Cell[])null, false, true); + private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE = + Result.create((Cell[]) null, true, true); + private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE = + Result.create((Cell[]) null, false, true); private final static ClientProtos.Result EMPTY_RESULT_PB; private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE; @@ -254,13 +253,12 @@ private ProtobufUtil() { private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE_STALE; private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_FALSE_STALE; - static { ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); builder.setExists(true); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_TRUE = builder.build(); + EMPTY_RESULT_PB_EXISTS_TRUE = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_EXISTS_TRUE_STALE = builder.build(); @@ -268,13 +266,13 @@ private ProtobufUtil() { builder.setExists(false); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_FALSE = builder.build(); + EMPTY_RESULT_PB_EXISTS_FALSE = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_EXISTS_FALSE_STALE = builder.build(); builder.clear(); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB = builder.build(); + EMPTY_RESULT_PB = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_STALE = builder.build(); } @@ -290,9 +288,8 @@ private final static class ClassLoaderHolder { static { ClassLoader parent = ProtobufUtil.class.getClassLoader(); Configuration conf = HBaseConfiguration.create(); - CLASS_LOADER = AccessController.doPrivileged((PrivilegedAction) - () -> new DynamicClassLoader(conf, parent) - ); + CLASS_LOADER = AccessController + .doPrivileged((PrivilegedAction) () -> new DynamicClassLoader(conf, parent)); classLoaderLoaded = true; } } @@ -302,14 +299,13 @@ public static boolean isClassLoaderLoaded() { } /** - * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, - * to flag what follows as a protobuf in hbase. Prepend these bytes to all content written to - * znodes, etc. + * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, to flag what + * follows as a protobuf in hbase. Prepend these bytes to all content written to znodes, etc. * @param bytes Bytes to decorate - * @return The passed bytes with magic prepended (Creates a new - * byte array that is bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. + * @return The passed bytes with magic prepended (Creates a new byte array that is + * bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. */ - public static byte [] prependPBMagic(final byte [] bytes) { + public static byte[] prependPBMagic(final byte[] bytes) { return Bytes.add(PB_MAGIC, bytes); } @@ -317,7 +313,7 @@ public static boolean isClassLoaderLoaded() { * @param bytes Bytes to check. * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes) { + public static boolean isPBMagicPrefix(final byte[] bytes) { return ProtobufMagic.isPBMagicPrefix(bytes); } @@ -327,7 +323,7 @@ public static boolean isPBMagicPrefix(final byte [] bytes) { * @param len length to use * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) { + public static boolean isPBMagicPrefix(final byte[] bytes, int offset, int len) { return ProtobufMagic.isPBMagicPrefix(bytes, offset, len); } @@ -350,20 +346,18 @@ public static int lengthOfPBMagic() { return ProtobufMagic.lengthOfPBMagic(); } - public static ComparatorProtos.ByteArrayComparable toByteArrayComparable(final byte [] value) { + public static ComparatorProtos.ByteArrayComparable toByteArrayComparable(final byte[] value) { ComparatorProtos.ByteArrayComparable.Builder builder = - ComparatorProtos.ByteArrayComparable.newBuilder(); + ComparatorProtos.ByteArrayComparable.newBuilder(); if (value != null) builder.setValue(UnsafeByteOperations.unsafeWrap(value)); return builder.build(); } /** - * Return the IOException thrown by the remote server wrapped in - * ServiceException as cause. - * + * Return the IOException thrown by the remote server wrapped in ServiceException as cause. * @param se ServiceException that wraps IO exception thrown by the server - * @return Exception wrapped in ServiceException or - * a new IOException that wraps the unexpected ServiceException. + * @return Exception wrapped in ServiceException or a new IOException that wraps the unexpected + * ServiceException. */ public static IOException getRemoteException(ServiceException se) { return makeIOExceptionOfException(se); @@ -372,8 +366,8 @@ public static IOException getRemoteException(ServiceException se) { /** * Like {@link #getRemoteException(ServiceException)} but more generic, able to handle more than * just {@link ServiceException}. Prefer this method to - * {@link #getRemoteException(ServiceException)} because trying to - * contain direct protobuf references. + * {@link #getRemoteException(ServiceException)} because trying to contain direct protobuf + * references. */ public static IOException handleRemoteException(Throwable e) { return makeIOExceptionOfException(e); @@ -388,14 +382,13 @@ private static IOException makeIOExceptionOfException(Throwable e) { return ExceptionUtil.asInterrupt(t); } if (t instanceof RemoteException) { - t = ((RemoteException)t).unwrapRemoteException(); + t = ((RemoteException) t).unwrapRemoteException(); } - return t instanceof IOException? (IOException)t: new HBaseIOException(t); + return t instanceof IOException ? (IOException) t : new HBaseIOException(t); } /** * Convert a ServerName to a protocol buffer ServerName - * * @param serverName the ServerName to convert * @return the converted protocol buffer ServerName * @see #toServerName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName) @@ -404,8 +397,7 @@ public static HBaseProtos.ServerName toServerName(final ServerName serverName) { if (serverName == null) { return null; } - HBaseProtos.ServerName.Builder builder = - HBaseProtos.ServerName.newBuilder(); + HBaseProtos.ServerName.Builder builder = HBaseProtos.ServerName.newBuilder(); builder.setHostName(serverName.getHostname()); if (serverName.getPort() >= 0) { builder.setPort(serverName.getPort()); @@ -418,7 +410,6 @@ public static HBaseProtos.ServerName toServerName(final ServerName serverName) { /** * Convert a protocol buffer ServerName to a ServerName - * * @param proto the protocol buffer ServerName to convert * @return the converted ServerName */ @@ -453,7 +444,7 @@ public static ServerName toServerName(final byte[] data) throws DeserializationE int prefixLen = ProtobufMagic.lengthOfPBMagic(); try { ZooKeeperProtos.Master rss = - ZooKeeperProtos.Master.parser().parseFrom(data, prefixLen, data.length - prefixLen); + ZooKeeperProtos.Master.parser().parseFrom(data, prefixLen, data.length - prefixLen); HBaseProtos.ServerName sn = rss.getMaster(); return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); } catch (/* InvalidProtocolBufferException */IOException e) { @@ -485,10 +476,8 @@ public static ServerName toServerName(final byte[] data) throws DeserializationE * @param proto protocol buffer ServerNameList * @return a list of ServerName */ - public static List toServerNameList( - List proto) { - return proto.stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList()); + public static List toServerNameList(List proto) { + return proto.stream().map(ProtobufUtil::toServerName).collect(Collectors.toList()); } /** @@ -496,8 +485,8 @@ public static List toServerNameList( * @param proto the ListNamespaceDescriptorsResponse * @return a list of NamespaceDescriptor */ - public static List toNamespaceDescriptorList( - ListNamespaceDescriptorsResponse proto) { + public static List + toNamespaceDescriptorList(ListNamespaceDescriptorsResponse proto) { return proto.getNamespaceDescriptorList().stream().map(ProtobufUtil::toNamespaceDescriptor) .collect(Collectors.toList()); } @@ -529,12 +518,11 @@ public static List toTableDescriptorList(GetTableDescriptorsRes /** * get the split keys in form "byte [][]" from a CreateTableRequest proto - * * @param proto the CreateTableRequest * @return the split keys */ - public static byte [][] getSplitKeysArray(final CreateTableRequest proto) { - byte [][] splitKeys = new byte[proto.getSplitKeysCount()][]; + public static byte[][] getSplitKeysArray(final CreateTableRequest proto) { + byte[][] splitKeys = new byte[proto.getSplitKeysCount()][]; for (int i = 0; i < proto.getSplitKeysCount(); ++i) { splitKeys[i] = proto.getSplitKeys(i).toByteArray(); } @@ -544,48 +532,45 @@ public static List toTableDescriptorList(GetTableDescriptorsRes /** * Convert a protobuf Durability into a client Durability */ - public static Durability toDurability( - final ClientProtos.MutationProto.Durability proto) { - switch(proto) { - case USE_DEFAULT: - return Durability.USE_DEFAULT; - case SKIP_WAL: - return Durability.SKIP_WAL; - case ASYNC_WAL: - return Durability.ASYNC_WAL; - case SYNC_WAL: - return Durability.SYNC_WAL; - case FSYNC_WAL: - return Durability.FSYNC_WAL; - default: - return Durability.USE_DEFAULT; + public static Durability toDurability(final ClientProtos.MutationProto.Durability proto) { + switch (proto) { + case USE_DEFAULT: + return Durability.USE_DEFAULT; + case SKIP_WAL: + return Durability.SKIP_WAL; + case ASYNC_WAL: + return Durability.ASYNC_WAL; + case SYNC_WAL: + return Durability.SYNC_WAL; + case FSYNC_WAL: + return Durability.FSYNC_WAL; + default: + return Durability.USE_DEFAULT; } } /** * Convert a client Durability into a protbuf Durability */ - public static ClientProtos.MutationProto.Durability toDurability( - final Durability d) { - switch(d) { - case USE_DEFAULT: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; - case SKIP_WAL: - return ClientProtos.MutationProto.Durability.SKIP_WAL; - case ASYNC_WAL: - return ClientProtos.MutationProto.Durability.ASYNC_WAL; - case SYNC_WAL: - return ClientProtos.MutationProto.Durability.SYNC_WAL; - case FSYNC_WAL: - return ClientProtos.MutationProto.Durability.FSYNC_WAL; - default: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; + public static ClientProtos.MutationProto.Durability toDurability(final Durability d) { + switch (d) { + case USE_DEFAULT: + return ClientProtos.MutationProto.Durability.USE_DEFAULT; + case SKIP_WAL: + return ClientProtos.MutationProto.Durability.SKIP_WAL; + case ASYNC_WAL: + return ClientProtos.MutationProto.Durability.ASYNC_WAL; + case SYNC_WAL: + return ClientProtos.MutationProto.Durability.SYNC_WAL; + case FSYNC_WAL: + return ClientProtos.MutationProto.Durability.FSYNC_WAL; + default: + return ClientProtos.MutationProto.Durability.USE_DEFAULT; } } /** * Convert a protocol buffer Get to a client Get - * * @param proto the protocol buffer Get to convert * @return the converted client Get * @throws IOException @@ -609,8 +594,8 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { if (proto.getCfTimeRangeCount() > 0) { for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { TimeRange timeRange = toTimeRange(cftr.getTimeRange()); - get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); + get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), + timeRange.getMax()); } } if (proto.hasTimeRange()) { @@ -621,14 +606,14 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { FilterProtos.Filter filter = proto.getFilter(); get.setFilter(ProtobufUtil.toFilter(filter)); } - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { get.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { + for (Column column : proto.getColumnList()) { byte[] family = column.getFamily().toByteArray(); if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { + for (ByteString qualifier : column.getQualifierList()) { get.addColumn(family, qualifier.toByteArray()); } } else { @@ -636,7 +621,7 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { } } } - if (proto.hasExistenceOnly() && proto.getExistenceOnly()){ + if (proto.hasExistenceOnly() && proto.getExistenceOnly()) { get.setCheckExistenceOnly(true); } if (proto.hasConsistency()) { @@ -650,58 +635,61 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { public static Consistency toConsistency(ClientProtos.Consistency consistency) { switch (consistency) { - case STRONG : return Consistency.STRONG; - case TIMELINE : return Consistency.TIMELINE; - default : return Consistency.STRONG; + case STRONG: + return Consistency.STRONG; + case TIMELINE: + return Consistency.TIMELINE; + default: + return Consistency.STRONG; } } public static ClientProtos.Consistency toConsistency(Consistency consistency) { switch (consistency) { - case STRONG : return ClientProtos.Consistency.STRONG; - case TIMELINE : return ClientProtos.Consistency.TIMELINE; - default : return ClientProtos.Consistency.STRONG; + case STRONG: + return ClientProtos.Consistency.STRONG; + case TIMELINE: + return ClientProtos.Consistency.TIMELINE; + default: + return ClientProtos.Consistency.STRONG; } } /** * Convert a protocol buffer Mutate to a Put. - * * @param proto The protocol buffer MutationProto to convert * @return A client Put. * @throws IOException */ - public static Put toPut(final MutationProto proto) - throws IOException { + public static Put toPut(final MutationProto proto) throws IOException { return toPut(proto, null); } /** * Convert a protocol buffer Mutate to a Put. - * * @param proto The protocol buffer MutationProto to convert * @param cellScanner If non-null, the Cell data that goes with this proto. * @return A client Put. * @throws IOException */ public static Put toPut(final MutationProto proto, final CellScanner cellScanner) - throws IOException { - // TODO: Server-side at least why do we convert back to the Client types? Why not just pb it? + throws IOException { + // TODO: Server-side at least why do we convert back to the Client types? Why not just pb it? MutationType type = proto.getMutateType(); - assert type == MutationType.PUT: type.name(); - long timestamp = proto.hasTimestamp()? proto.getTimestamp(): HConstants.LATEST_TIMESTAMP; + assert type == MutationType.PUT : type.name(); + long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); + throw new DoNotRetryIOException( + "Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + toShortString(proto)); } Cell cell = cellScanner.current(); if (put == null) { @@ -714,13 +702,13 @@ public static Put toPut(final MutationProto proto, final CellScanner cellScanner throw new IllegalArgumentException("row cannot be null"); } // The proto has the metadata and the data itself - ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (ColumnValue column: proto.getColumnValueList()) { + ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + for (ColumnValue column : proto.getColumnValueList()) { byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { + for (QualifierValue qv : column.getQualifierValueList()) { if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); + throw new DoNotRetryIOException("Missing required field: qualifier value"); } long ts = timestamp; if (qv.hasTimestamp()) { @@ -729,51 +717,35 @@ public static Put toPut(final MutationProto proto, final CellScanner cellScanner byte[] allTagsBytes; if (qv.hasTags()) { allTagsBytes = qv.getTags().toByteArray(); - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(proto.getRow().toByteArray()) - .setFamily(family) + if (qv.hasDeleteType()) { + put.add(cellBuilder.clear().setRow(proto.getRow().toByteArray()).setFamily(family) .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .setTags(allTagsBytes) - .build()); + .setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()) + .setTags(allTagsBytes).build()); } else { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Cell.Type.Put) + .setTimestamp(ts).setType(Cell.Type.Put) .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .setTags(allTagsBytes) - .build()); + .setTags(allTagsBytes).build()); } } else { - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) + if (qv.hasDeleteType()) { + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .build()); - } else{ - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) + .setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()).build()); + } else { + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Type.Put) - .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .build()); + .setTimestamp(ts).setType(Type.Put) + .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null).build()); } } } } } put.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { put.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } return put; @@ -781,48 +753,45 @@ public static Put toPut(final MutationProto proto, final CellScanner cellScanner /** * Convert a protocol buffer Mutate to a Delete - * * @param proto the protocol buffer Mutate to convert * @return the converted client Delete * @throws IOException */ - public static Delete toDelete(final MutationProto proto) - throws IOException { + public static Delete toDelete(final MutationProto proto) throws IOException { return toDelete(proto, null); } /** * Convert a protocol buffer Mutate to a Delete - * * @param proto the protocol buffer Mutate to convert * @param cellScanner if non-null, the data that goes with this delete. * @return the converted client Delete * @throws IOException */ public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.DELETE : type.name(); long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; Delete delete = proto.hasRow() ? new Delete(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - TextFormat.shortDebugString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + + TextFormat.shortDebugString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + TextFormat.shortDebugString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + TextFormat.shortDebugString(proto)); } Cell cell = cellScanner.current(); if (delete == null) { delete = - new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp); + new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp); } delete.add(cell); } @@ -830,9 +799,9 @@ public static Delete toDelete(final MutationProto proto, final CellScanner cellS if (delete == null) { throw new IllegalArgumentException("row cannot be null"); } - for (ColumnValue column: proto.getColumnValueList()) { + for (ColumnValue column : proto.getColumnValueList()) { byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { + for (QualifierValue qv : column.getQualifierValueList()) { DeleteType deleteType = qv.getDeleteType(); byte[] qualifier = null; if (qv.hasQualifier()) { @@ -852,35 +821,38 @@ public static Delete toDelete(final MutationProto proto, final CellScanner cellS } } delete.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { delete.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } return delete; } + @FunctionalInterface - private interface ConsumerWithException { + private interface ConsumerWithException { void accept(T t, U u) throws IOException; } - private static T toDelta(Function supplier, ConsumerWithException consumer, - final MutationProto proto, final CellScanner cellScanner) throws IOException { + private static T toDelta(Function supplier, + ConsumerWithException consumer, final MutationProto proto, + final CellScanner cellScanner) throws IOException { byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null; T mutation = row == null ? null : supplier.apply(new Bytes(row)); int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); + throw new DoNotRetryIOException( + "Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + toShortString(proto)); } Cell cell = cellScanner.current(); if (mutation == null) { - mutation = supplier.apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + mutation = supplier + .apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); } consumer.accept(mutation, cell); } @@ -893,23 +865,18 @@ private static T toDelta(Function supplier, Consu for (QualifierValue qv : column.getQualifierValueList()) { byte[] qualifier = qv.getQualifier().toByteArray(); if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); + throw new DoNotRetryIOException("Missing required field: qualifier value"); } byte[] value = qv.getValue().toByteArray(); byte[] tags = null; if (qv.hasTags()) { tags = qv.getTags().toByteArray(); } - consumer.accept(mutation, ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(mutation.getRow()) - .setFamily(family) - .setQualifier(qualifier) - .setTimestamp(cellTimestampOrLatest(qv)) - .setType(KeyValue.Type.Put.getCode()) - .setValue(value) - .setTags(tags) - .build()); + consumer.accept(mutation, + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + .setRow(mutation.getRow()).setFamily(family).setQualifier(qualifier) + .setTimestamp(cellTimestampOrLatest(qv)).setType(KeyValue.Type.Put.getCode()) + .setValue(value).setTags(tags).build()); } } } @@ -936,11 +903,11 @@ private static long cellTimestampOrLatest(QualifierValue cell) { * @throws IOException */ public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.APPEND : type.name(); Append append = toDelta((Bytes row) -> new Append(row.get(), row.getOffset(), row.getLength()), - Append::add, proto, cellScanner); + Append::add, proto, cellScanner); if (proto.hasTimeRange()) { TimeRange timeRange = toTimeRange(proto.getTimeRange()); append.setTimeRange(timeRange.getMin(), timeRange.getMax()); @@ -950,17 +917,17 @@ public static Append toAppend(final MutationProto proto, final CellScanner cellS /** * Convert a protocol buffer Mutate to an Increment - * * @param proto the protocol buffer Mutate to convert * @return the converted client Increment * @throws IOException */ public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.INCREMENT : type.name(); - Increment increment = toDelta((Bytes row) -> new Increment(row.get(), row.getOffset(), row.getLength()), - Increment::add, proto, cellScanner); + Increment increment = + toDelta((Bytes row) -> new Increment(row.get(), row.getOffset(), row.getLength()), + Increment::add, proto, cellScanner); if (proto.hasTimeRange()) { TimeRange timeRange = toTimeRange(proto.getTimeRange()); increment.setTimeRange(timeRange.getMin(), timeRange.getMax()); @@ -970,7 +937,6 @@ public static Increment toIncrement(final MutationProto proto, final CellScanner /** * Convert a MutateRequest to Mutation - * * @param proto the protocol buffer Mutate to convert * @return the converted Mutation * @throws IOException @@ -1020,15 +986,12 @@ public static Scan.ReadType toReadType(ClientProtos.Scan.ReadType readType) { /** * Convert a client Scan to a protocol buffer Scan - * * @param scan the client Scan to convert * @return the converted protocol buffer Scan * @throws IOException */ - public static ClientProtos.Scan toScan( - final Scan scan) throws IOException { - ClientProtos.Scan.Builder scanBuilder = - ClientProtos.Scan.newBuilder(); + public static ClientProtos.Scan toScan(final Scan scan) throws IOException { + ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder(); scanBuilder.setCacheBlocks(scan.getCacheBlocks()); if (scan.getBatch() > 0) { scanBuilder.setBatchSize(scan.getBatch()); @@ -1046,15 +1009,14 @@ public static ClientProtos.Scan toScan( scanBuilder.setMaxVersions(scan.getMaxVersions()); scan.getColumnFamilyTimeRange().forEach((cf, timeRange) -> { scanBuilder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)) - .setTimeRange(toTimeRange(timeRange)) - .build()); + .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)).setTimeRange(toTimeRange(timeRange)) + .build()); }); scanBuilder.setTimeRange(ProtobufUtil.toTimeRange(scan.getTimeRange())); Map attributes = scan.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); scanBuilder.addAttribute(attributeBuilder.build()); @@ -1073,13 +1035,12 @@ public static ClientProtos.Scan toScan( } if (scan.hasFamilies()) { Column.Builder columnBuilder = Column.newBuilder(); - for (Map.Entry> - family: scan.getFamilyMap().entrySet()) { + for (Map.Entry> family : scan.getFamilyMap().entrySet()) { columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); - NavigableSet qualifiers = family.getValue(); + NavigableSet qualifiers = family.getValue(); columnBuilder.clearQualifier(); if (qualifiers != null && qualifiers.size() > 0) { - for (byte [] qualifier: qualifiers) { + for (byte[] qualifier : qualifiers) { columnBuilder.addQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } } @@ -1120,13 +1081,11 @@ public static ClientProtos.Scan toScan( /** * Convert a protocol buffer Scan to a client Scan - * * @param proto the protocol buffer Scan to convert * @return the converted client Scan * @throws IOException */ - public static Scan toScan( - final ClientProtos.Scan proto) throws IOException { + public static Scan toScan(final ClientProtos.Scan proto) throws IOException { byte[] startRow = HConstants.EMPTY_START_ROW; byte[] stopRow = HConstants.EMPTY_END_ROW; boolean includeStartRow = true; @@ -1168,8 +1127,8 @@ public static Scan toScan( if (proto.getCfTimeRangeCount() > 0) { for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { TimeRange timeRange = toTimeRange(cftr.getTimeRange()); - scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); + scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), + timeRange.getMax()); } } if (proto.hasTimeRange()) { @@ -1189,14 +1148,14 @@ public static Scan toScan( if (proto.hasAllowPartialResults()) { scan.setAllowPartialResults(proto.getAllowPartialResults()); } - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { + for (Column column : proto.getColumnList()) { byte[] family = column.getFamily().toByteArray(); if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { + for (ByteString qualifier : column.getQualifierList()) { scan.addColumn(family, qualifier.toByteArray()); } } else { @@ -1243,15 +1202,12 @@ public static Cursor toCursor(ClientProtos.Cursor cursor) { /** * Create a protocol buffer Get based on a client Get. - * * @param get the client Get * @return a protocol buffer Get * @throws IOException */ - public static ClientProtos.Get toGet( - final Get get) throws IOException { - ClientProtos.Get.Builder builder = - ClientProtos.Get.newBuilder(); + public static ClientProtos.Get toGet(final Get get) throws IOException { + ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder(); builder.setRow(UnsafeByteOperations.unsafeWrap(get.getRow())); builder.setCacheBlocks(get.getCacheBlocks()); builder.setMaxVersions(get.getMaxVersions()); @@ -1260,15 +1216,14 @@ public static ClientProtos.Get toGet( } get.getColumnFamilyTimeRange().forEach((cf, timeRange) -> { builder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)) - .setTimeRange(toTimeRange(timeRange)) - .build()); + .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)).setTimeRange(toTimeRange(timeRange)) + .build()); }); builder.setTimeRange(ProtobufUtil.toTimeRange(get.getTimeRange())); Map attributes = get.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); builder.addAttribute(attributeBuilder.build()); @@ -1277,12 +1232,12 @@ public static ClientProtos.Get toGet( if (get.hasFamilies()) { Column.Builder columnBuilder = Column.newBuilder(); Map> families = get.getFamilyMap(); - for (Map.Entry> family: families.entrySet()) { + for (Map.Entry> family : families.entrySet()) { NavigableSet qualifiers = family.getValue(); columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); columnBuilder.clearQualifier(); if (qualifiers != null && qualifiers.size() > 0) { - for (byte[] qualifier: qualifiers) { + for (byte[] qualifier : qualifiers) { columnBuilder.addQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } } @@ -1295,7 +1250,7 @@ public static ClientProtos.Get toGet( if (get.getRowOffsetPerColumnFamily() > 0) { builder.setStoreOffset(get.getRowOffsetPerColumnFamily()); } - if (get.isCheckExistenceOnly()){ + if (get.isCheckExistenceOnly()) { builder.setExistenceOnly(true); } if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) { @@ -1310,20 +1265,19 @@ public static ClientProtos.Get toGet( } public static MutationProto toMutation(final MutationType type, final Mutation mutation) - throws IOException { + throws IOException { return toMutation(type, mutation, HConstants.NO_NONCE); } /** * Create a protocol buffer Mutate based on a client Mutation - * * @param type * @param mutation * @return a protobuf'd Mutation * @throws IOException */ public static MutationProto toMutation(final MutationType type, final Mutation mutation, - final long nonce) throws IOException { + final long nonce) throws IOException { return toMutation(type, mutation, MutationProto.newBuilder(), nonce); } @@ -1333,8 +1287,7 @@ public static MutationProto toMutation(final MutationType type, final Mutation m } public static MutationProto toMutation(final MutationType type, final Mutation mutation, - MutationProto.Builder builder, long nonce) - throws IOException { + MutationProto.Builder builder, long nonce) throws IOException { builder = getMutationBuilderAndSetCommonFields(type, mutation, builder); if (nonce != HConstants.NO_NONCE) { builder.setNonce(nonce); @@ -1347,15 +1300,15 @@ public static MutationProto toMutation(final MutationType type, final Mutation m } ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); - for (Map.Entry> family: mutation.getFamilyCellMap().entrySet()) { + for (Map.Entry> family : mutation.getFamilyCellMap().entrySet()) { columnBuilder.clear(); columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); - for (Cell cell: family.getValue()) { + for (Cell cell : family.getValue()) { valueBuilder.clear(); - valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap( - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); - valueBuilder.setValue(UnsafeByteOperations.unsafeWrap( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength())); + valueBuilder.setValue(UnsafeByteOperations.unsafeWrap(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength())); valueBuilder.setTimestamp(cell.getTimestamp()); if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) { KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte()); @@ -1378,12 +1331,12 @@ public static MutationProto toMutation(final MutationType type, final Mutation m * @throws IOException */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation, - final MutationProto.Builder builder) throws IOException { + final MutationProto.Builder builder) throws IOException { return toMutationNoData(type, mutation, builder, HConstants.NO_NONCE); } /** - * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. + * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. * Understanding is that the Cell will be transported other than via protobuf. * @param type * @param mutation @@ -1391,8 +1344,8 @@ public static MutationProto toMutationNoData(final MutationType type, final Muta * @throws IOException */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation) - throws IOException { - MutationProto.Builder builder = MutationProto.newBuilder(); + throws IOException { + MutationProto.Builder builder = MutationProto.newBuilder(); return toMutationNoData(type, mutation, builder); } @@ -1428,7 +1381,7 @@ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final Map attributes = mutation.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); builder.addAttribute(attributeBuilder.build()); @@ -1439,7 +1392,6 @@ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final /** * Convert a client Result to a protocol buffer Result - * * @param result the client Result to convert * @return the converted protocol buffer Result */ @@ -1448,14 +1400,13 @@ public static ClientProtos.Result toResult(final Result result) { } /** - * Convert a client Result to a protocol buffer Result + * Convert a client Result to a protocol buffer Result * @param result the client Result to convert - * @param encodeTags whether to includeTags in converted protobuf result or not - * When @encodeTags is set to true, it will return all the tags in the response. - * These tags may contain some sensitive data like acl permissions, etc. - * Only the tools like Export, Import which needs to take backup needs to set - * it to true so that cell tags are persisted in backup. - * Refer to HBASE-25246 for more context. + * @param encodeTags whether to includeTags in converted protobuf result or not When @encodeTags + * is set to true, it will return all the tags in the response. These tags may contain + * some sensitive data like acl permissions, etc. Only the tools like Export, Import + * which needs to take backup needs to set it to true so that cell tags are persisted in + * backup. Refer to HBASE-25246 for more context. * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final Result result, boolean encodeTags) { @@ -1481,12 +1432,11 @@ public static ClientProtos.Result toResult(final Result result, boolean encodeTa /** * Convert a client Result to a protocol buffer Result - * * @param existence the client existence to send * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final boolean existence, boolean stale) { - if (stale){ + if (stale) { return existence ? EMPTY_RESULT_PB_EXISTS_TRUE_STALE : EMPTY_RESULT_PB_EXISTS_FALSE_STALE; } else { return existence ? EMPTY_RESULT_PB_EXISTS_TRUE : EMPTY_RESULT_PB_EXISTS_FALSE; @@ -1494,9 +1444,8 @@ public static ClientProtos.Result toResult(final boolean existence, boolean stal } /** - * Convert a client Result to a protocol buffer Result. - * The pb Result does not include the Cell data. That is for transport otherwise. - * + * Convert a client Result to a protocol buffer Result. The pb Result does not include the Cell + * data. That is for transport otherwise. * @param result the client Result to convert * @return the converted protocol buffer Result */ @@ -1512,7 +1461,6 @@ public static ClientProtos.Result toResultNoData(final Result result) { /** * Convert a protocol buffer Result to a client Result - * * @param proto the protocol buffer Result to convert * @return the converted client Result */ @@ -1522,26 +1470,24 @@ public static Result toResult(final ClientProtos.Result proto) { /** * Convert a protocol buffer Result to a client Result - * * @param proto the protocol buffer Result to convert - * @param decodeTags whether to decode tags into converted client Result - * When @decodeTags is set to true, it will decode all the tags from the - * response. These tags may contain some sensitive data like acl permissions, - * etc. Only the tools like Export, Import which needs to take backup needs to - * set it to true so that cell tags are persisted in backup. - * Refer to HBASE-25246 for more context. + * @param decodeTags whether to decode tags into converted client Result When @decodeTags is set + * to true, it will decode all the tags from the response. These tags may contain some + * sensitive data like acl permissions, etc. Only the tools like Export, Import which + * needs to take backup needs to set it to true so that cell tags are persisted in + * backup. Refer to HBASE-25246 for more context. * @return the converted client Result */ public static Result toResult(final ClientProtos.Result proto, boolean decodeTags) { if (proto.hasExists()) { if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; + return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE : EMPTY_RESULT_EXISTS_FALSE_STALE; } return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; } List values = proto.getCellList(); - if (values.isEmpty()){ + if (values.isEmpty()) { return proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT; } @@ -1555,23 +1501,22 @@ public static Result toResult(final ClientProtos.Result proto, boolean decodeTag /** * Convert a protocol buffer Result to a client Result - * * @param proto the protocol buffer Result to convert * @param scanner Optional cell scanner. * @return the converted client Result * @throws IOException */ public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner) - throws IOException { + throws IOException { List values = proto.getCellList(); if (proto.hasExists()) { - if ((values != null && !values.isEmpty()) || - (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0)) { + if ((values != null && !values.isEmpty()) + || (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0)) { throw new IllegalArgumentException("bad proto: exists with cells is no allowed " + proto); } if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; + return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE : EMPTY_RESULT_EXISTS_FALSE_STALE; } return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; } @@ -1587,10 +1532,10 @@ public static Result toResult(final ClientProtos.Result proto, final CellScanner } } - if (!values.isEmpty()){ + if (!values.isEmpty()) { if (cells == null) cells = new ArrayList<>(values.size()); ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (CellProtos.Cell c: values) { + for (CellProtos.Cell c : values) { cells.add(toCell(builder, c, false)); } } @@ -1600,10 +1545,8 @@ public static Result toResult(final ClientProtos.Result proto, final CellScanner : Result.create(cells, null, proto.getStale()); } - /** * Convert a ByteArrayComparable to a protocol buffer Comparator - * * @param comparator the ByteArrayComparable to convert * @return the converted protocol buffer Comparator */ @@ -1616,23 +1559,22 @@ public static ComparatorProtos.Comparator toComparator(ByteArrayComparable compa /** * Convert a protocol buffer Comparator to a ByteArrayComparable - * * @param proto the protocol buffer Comparator to convert * @return the converted ByteArrayComparable */ @SuppressWarnings("unchecked") public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto) - throws IOException { + throws IOException { String type = proto.getName(); String funcName = "parseFrom"; - byte [] value = proto.getSerializedComparator().toByteArray(); + byte[] value = proto.getSerializedComparator().toByteArray(); try { Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); Method parseFrom = c.getMethod(funcName, byte[].class); if (parseFrom == null) { throw new IOException("Unable to locate function: " + funcName + " in type: " + type); } - return (ByteArrayComparable)parseFrom.invoke(null, value); + return (ByteArrayComparable) parseFrom.invoke(null, value); } catch (Exception e) { throw new IOException(e); } @@ -1640,14 +1582,13 @@ public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto /** * Convert a protocol buffer Filter to a client Filter - * * @param proto the protocol buffer Filter to convert * @return the converted Filter */ @SuppressWarnings("unchecked") public static Filter toFilter(FilterProtos.Filter proto) throws IOException { String type = proto.getName(); - final byte [] value = proto.getSerializedFilter().toByteArray(); + final byte[] value = proto.getSerializedFilter().toByteArray(); String funcName = "parseFrom"; try { Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); @@ -1655,7 +1596,7 @@ public static Filter toFilter(FilterProtos.Filter proto) throws IOException { if (parseFrom == null) { throw new IOException("Unable to locate function: " + funcName + " in type: " + type); } - return (Filter)parseFrom.invoke(c, value); + return (Filter) parseFrom.invoke(c, value); } catch (Exception e) { // Either we couldn't instantiate the method object, or "parseFrom" failed. // In either case, let's not retry. @@ -1665,7 +1606,6 @@ public static Filter toFilter(FilterProtos.Filter proto) throws IOException { /** * Convert a client Filter to a protocol buffer Filter - * * @param filter the Filter to convert * @return the converted protocol buffer Filter */ @@ -1678,53 +1618,48 @@ public static FilterProtos.Filter toFilter(Filter filter) throws IOException { /** * Convert a delete KeyValue type to protocol buffer DeleteType. - * * @param type * @return protocol buffer DeleteType * @throws IOException */ - public static DeleteType toDeleteType( - KeyValue.Type type) throws IOException { + public static DeleteType toDeleteType(KeyValue.Type type) throws IOException { switch (type) { - case Delete: - return DeleteType.DELETE_ONE_VERSION; - case DeleteColumn: - return DeleteType.DELETE_MULTIPLE_VERSIONS; - case DeleteFamily: - return DeleteType.DELETE_FAMILY; - case DeleteFamilyVersion: - return DeleteType.DELETE_FAMILY_VERSION; - default: + case Delete: + return DeleteType.DELETE_ONE_VERSION; + case DeleteColumn: + return DeleteType.DELETE_MULTIPLE_VERSIONS; + case DeleteFamily: + return DeleteType.DELETE_FAMILY; + case DeleteFamilyVersion: + return DeleteType.DELETE_FAMILY_VERSION; + default: throw new IOException("Unknown delete type: " + type); } } /** * Convert a protocol buffer DeleteType to delete KeyValue type. - * * @param type The DeleteType * @return The type. * @throws IOException */ - public static KeyValue.Type fromDeleteType( - DeleteType type) throws IOException { + public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException { switch (type) { - case DELETE_ONE_VERSION: - return KeyValue.Type.Delete; - case DELETE_MULTIPLE_VERSIONS: - return KeyValue.Type.DeleteColumn; - case DELETE_FAMILY: - return KeyValue.Type.DeleteFamily; - case DELETE_FAMILY_VERSION: - return KeyValue.Type.DeleteFamilyVersion; - default: - throw new IOException("Unknown delete type: " + type); + case DELETE_ONE_VERSION: + return KeyValue.Type.Delete; + case DELETE_MULTIPLE_VERSIONS: + return KeyValue.Type.DeleteColumn; + case DELETE_FAMILY: + return KeyValue.Type.DeleteFamily; + case DELETE_FAMILY_VERSION: + return KeyValue.Type.DeleteFamilyVersion; + default: + throw new IOException("Unknown delete type: " + type); } } /** * Convert a stringified protocol buffer exception Parameter to a Java Exception - * * @param parameter the protocol buffer Parameter to convert * @return the converted Exception * @throws IOException if failed to deserialize the parameter @@ -1736,7 +1671,7 @@ public static Throwable toException(final NameBytesPair parameter) throws IOExce String type = parameter.getName(); try { Class c = - (Class)Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); + (Class) Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); Constructor cn = null; try { cn = c.getDeclaredConstructor(String.class); @@ -1751,22 +1686,21 @@ public static Throwable toException(final NameBytesPair parameter) throws IOExce } } -// Start helpers for Client + // Start helpers for Client @SuppressWarnings("unchecked") public static T newServiceStub(Class service, RpcChannel channel) throws Exception { - return (T)Methods.call(service, null, "newStub", - new Class[]{ RpcChannel.class }, new Object[]{ channel }); + return (T) Methods.call(service, null, "newStub", new Class[] { RpcChannel.class }, + new Object[] { channel }); } -// End helpers for Client -// Start helpers for Admin + // End helpers for Client + // Start helpers for Admin /** - * A helper to retrieve region info given a region name or an - * encoded region name using admin protocol. - * + * A helper to retrieve region info given a region name or an encoded region name using admin + * protocol. * @return the retrieved region info */ public static org.apache.hadoop.hbase.client.RegionInfo getRegionInfo( @@ -1774,8 +1708,8 @@ public static org.apache.hadoop.hbase.client.RegionInfo getRegionInfo( final byte[] regionName) throws IOException { try { GetRegionInfoRequest request = getGetRegionInfoRequest(regionName); - GetRegionInfoResponse response = admin.getRegionInfo(controller, - getGetRegionInfoRequest(regionName)); + GetRegionInfoResponse response = + admin.getRegionInfo(controller, getGetRegionInfoRequest(regionName)); return toRegionInfo(response.getRegionInfo()); } catch (ServiceException se) { throw getRemoteException(se); @@ -1785,27 +1719,27 @@ public static org.apache.hadoop.hbase.client.RegionInfo getRegionInfo( /** * @return A GetRegionInfoRequest for the passed in regionName. */ - public static GetRegionInfoRequest getGetRegionInfoRequest(final byte [] regionName) - throws IOException { - return org.apache.hadoop.hbase.client.RegionInfo.isEncodedRegionName(regionName)? - GetRegionInfoRequest.newBuilder().setRegion(RequestConverter. - buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, regionName)).build(): - RequestConverter.buildGetRegionInfoRequest(regionName); + public static GetRegionInfoRequest getGetRegionInfoRequest(final byte[] regionName) + throws IOException { + return org.apache.hadoop.hbase.client.RegionInfo.isEncodedRegionName(regionName) + ? GetRegionInfoRequest.newBuilder() + .setRegion(RequestConverter + .buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, regionName)) + .build() + : RequestConverter.buildGetRegionInfoRequest(regionName); } /** - * A helper to close a region given a region name - * using admin protocol. - * + * A helper to close a region given a region name using admin protocol. * @param admin * @param regionName * @throws IOException */ public static void closeRegion(final RpcController controller, final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName) - throws IOException { + throws IOException { CloseRegionRequest closeRegionRequest = - ProtobufUtil.buildCloseRegionRequest(server, regionName); + ProtobufUtil.buildCloseRegionRequest(server, regionName); try { admin.closeRegion(controller, closeRegionRequest); } catch (ServiceException se) { @@ -1814,19 +1748,17 @@ public static void closeRegion(final RpcController controller, } /** - * A helper to warmup a region given a region name - * using admin protocol - * + * A helper to warmup a region given a region name using admin protocol * @param admin * @param regionInfo - * */ public static void warmupRegion(final RpcController controller, - final AdminService.BlockingInterface admin, final org.apache.hadoop.hbase.client.RegionInfo regionInfo) throws IOException { + final AdminService.BlockingInterface admin, + final org.apache.hadoop.hbase.client.RegionInfo regionInfo) throws IOException { try { WarmupRegionRequest warmupRegionRequest = - RequestConverter.buildWarmupRegionRequest(regionInfo); + RequestConverter.buildWarmupRegionRequest(regionInfo); admin.warmupRegion(controller, warmupRegionRequest); } catch (ServiceException e) { @@ -1841,10 +1773,9 @@ public static void warmupRegion(final RpcController controller, * @throws IOException */ public static void openRegion(final RpcController controller, - final AdminService.BlockingInterface admin, ServerName server, final org.apache.hadoop.hbase.client.RegionInfo region) - throws IOException { - OpenRegionRequest request = - RequestConverter.buildOpenRegionRequest(server, region, null); + final AdminService.BlockingInterface admin, ServerName server, + final org.apache.hadoop.hbase.client.RegionInfo region) throws IOException { + OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, region, null); try { admin.openRegion(controller, request); } catch (ServiceException se) { @@ -1853,26 +1784,23 @@ public static void openRegion(final RpcController controller, } /** - * A helper to get the all the online regions on a region - * server using admin protocol. - * + * A helper to get the all the online regions on a region server using admin protocol. * @param admin * @return a list of online region info * @throws IOException */ - public static List getOnlineRegions(final AdminService.BlockingInterface admin) - throws IOException { + public static List + getOnlineRegions(final AdminService.BlockingInterface admin) throws IOException { return getOnlineRegions(null, admin); } /** - * A helper to get the all the online regions on a region - * server using admin protocol. + * A helper to get the all the online regions on a region server using admin protocol. * @return a list of online region info */ - public static List getOnlineRegions(final RpcController controller, - final AdminService.BlockingInterface admin) - throws IOException { + public static List + getOnlineRegions(final RpcController controller, final AdminService.BlockingInterface admin) + throws IOException { GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest(); GetOnlineRegionResponse response = null; try { @@ -1885,14 +1813,15 @@ public static List getOnlineRegions(f /** * Get the list of region info from a GetOnlineRegionResponse - * * @param proto the GetOnlineRegionResponse * @return the list of region info or empty if proto is null */ - public static List getRegionInfos(final GetOnlineRegionResponse proto) { + public static List + getRegionInfos(final GetOnlineRegionResponse proto) { if (proto == null) return Collections.EMPTY_LIST; - List regionInfos = new ArrayList<>(proto.getRegionInfoList().size()); - for (RegionInfo regionInfo: proto.getRegionInfoList()) { + List regionInfos = + new ArrayList<>(proto.getRegionInfoList().size()); + for (RegionInfo regionInfo : proto.getRegionInfoList()) { regionInfos.add(toRegionInfo(regionInfo)); } return regionInfos; @@ -1903,8 +1832,7 @@ public static List getRegionInfos(fin * @return the server name */ public static ServerInfo getServerInfo(final RpcController controller, - final AdminService.BlockingInterface admin) - throws IOException { + final AdminService.BlockingInterface admin) throws IOException { GetServerInfoRequest request = RequestConverter.buildGetServerInfoRequest(); try { GetServerInfoResponse response = admin.getServerInfo(controller, request); @@ -1915,28 +1843,22 @@ public static ServerInfo getServerInfo(final RpcController controller, } /** - * A helper to get the list of files of a column family - * on a given region using admin protocol. - * + * A helper to get the list of files of a column family on a given region using admin protocol. * @return the list of store files */ public static List getStoreFiles(final AdminService.BlockingInterface admin, - final byte[] regionName, final byte[] family) - throws IOException { + final byte[] regionName, final byte[] family) throws IOException { return getStoreFiles(null, admin, regionName, family); } /** - * A helper to get the list of files of a column family - * on a given region using admin protocol. - * + * A helper to get the list of files of a column family on a given region using admin protocol. * @return the list of store files */ public static List getStoreFiles(final RpcController controller, final AdminService.BlockingInterface admin, final byte[] regionName, final byte[] family) - throws IOException { - GetStoreFileRequest request = - ProtobufUtil.buildGetStoreFileRequest(regionName, family); + throws IOException { + GetStoreFileRequest request = ProtobufUtil.buildGetStoreFileRequest(regionName, family); try { GetStoreFileResponse response = admin.getStoreFile(controller, request); return response.getStoreFileList(); @@ -1945,7 +1867,7 @@ public static List getStoreFiles(final RpcController controller, } } -// End helpers for Admin + // End helpers for Admin /* * Get the total (read + write) requests from a RegionLoad pb @@ -1960,11 +1882,10 @@ public static long getTotalRequestsCount(RegionLoad rl) { return rl.getReadRequestsCount() + rl.getWriteRequestsCount(); } - /** * @param m Message to get delimited pb serialization of (with pb magic prefix) */ - public static byte [] toDelimitedByteArray(final Message m) throws IOException { + public static byte[] toDelimitedByteArray(final Message m) throws IOException { // Allocate arbitrary big size so we avoid resizing. ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); baos.write(PB_MAGIC); @@ -1974,13 +1895,12 @@ public static long getTotalRequestsCount(RegionLoad rl) { /** * Find the HRegion encoded name based on a region specifier - * * @param regionSpecifier the region specifier * @return the corresponding region's encoded name * @throws DoNotRetryIOException if the specifier type is unsupported */ - public static String getRegionEncodedName( - final RegionSpecifier regionSpecifier) throws DoNotRetryIOException { + public static String getRegionEncodedName(final RegionSpecifier regionSpecifier) + throws DoNotRetryIOException { ByteString value = regionSpecifier.getValue(); RegionSpecifierType type = regionSpecifier.getType(); switch (type) { @@ -1989,8 +1909,7 @@ public static String getRegionEncodedName( case ENCODED_REGION_NAME: return value.toStringUtf8(); default: - throw new DoNotRetryIOException( - "Unsupported region specifier type: " + type); + throw new DoNotRetryIOException("Unsupported region specifier type: " + type); } } @@ -2034,7 +1953,7 @@ public static void toIOException(ServiceException se) throws IOException { Throwable cause = se.getCause(); if (cause != null && cause instanceof IOException) { - throw (IOException)cause; + throw (IOException) cause; } throw new IOException(se); } @@ -2085,13 +2004,10 @@ private static ByteString wrap(ByteBuffer b, int offset, int length) { } public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell, - boolean decodeTags) { - ExtendedCellBuilder builder = cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) + boolean decodeTags) { + ExtendedCellBuilder builder = cellBuilder.clear().setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()).setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()).setType((byte) cell.getCellType().getNumber()) .setValue(cell.getValue().toByteArray()); if (decodeTags && cell.hasTags()) { builder.setTags(cell.getTags().toByteArray()); @@ -2101,12 +2017,10 @@ public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { HBaseProtos.NamespaceDescriptor.Builder b = - HBaseProtos.NamespaceDescriptor.newBuilder() - .setName(ByteString.copyFromUtf8(ns.getName())); - for(Map.Entry entry: ns.getConfiguration().entrySet()) { - b.addConfiguration(HBaseProtos.NameStringPair.newBuilder() - .setName(entry.getKey()) - .setValue(entry.getValue())); + HBaseProtos.NamespaceDescriptor.newBuilder().setName(ByteString.copyFromUtf8(ns.getName())); + for (Map.Entry entry : ns.getConfiguration().entrySet()) { + b.addConfiguration( + HBaseProtos.NameStringPair.newBuilder().setName(entry.getKey()).setValue(entry.getValue())); } return b.build(); } @@ -2120,25 +2034,25 @@ public static NamespaceDescriptor toNamespaceDescriptor(HBaseProtos.NamespaceDes } public static CompactionDescriptor toCompactionDescriptor( - org.apache.hadoop.hbase.client.RegionInfo info, byte[] family, - List inputPaths, List outputPaths, Path storeDir) { + org.apache.hadoop.hbase.client.RegionInfo info, byte[] family, List inputPaths, + List outputPaths, Path storeDir) { return toCompactionDescriptor(info, null, family, inputPaths, outputPaths, storeDir); } public static CompactionDescriptor toCompactionDescriptor( - org.apache.hadoop.hbase.client.RegionInfo info, byte[] regionName, - byte[] family, List inputPaths, List outputPaths, Path storeDir) { + org.apache.hadoop.hbase.client.RegionInfo info, byte[] regionName, byte[] family, + List inputPaths, List outputPaths, Path storeDir) { // compaction descriptor contains relative paths. // input / output paths are relative to the store dir // store dir is relative to region dir CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder() .setTableName(UnsafeByteOperations.unsafeWrap(info.getTable().toBytes())) - .setEncodedRegionName(UnsafeByteOperations.unsafeWrap( - regionName == null ? info.getEncodedNameAsBytes() : regionName)) - .setFamilyName(UnsafeByteOperations.unsafeWrap(family)) - .setStoreHomeDir(storeDir.getName()); //make relative + .setEncodedRegionName(UnsafeByteOperations + .unsafeWrap(regionName == null ? info.getEncodedNameAsBytes() : regionName)) + .setFamilyName(UnsafeByteOperations.unsafeWrap(family)).setStoreHomeDir(storeDir.getName()); // make + // relative for (Path inputPath : inputPaths) { - builder.addCompactionInput(inputPath.getName()); //relative path + builder.addCompactionInput(inputPath.getName()); // relative path } for (Path outputPath : outputPaths) { builder.addCompactionOutput(outputPath.getName()); @@ -2147,10 +2061,10 @@ public static CompactionDescriptor toCompactionDescriptor( return builder.build(); } - public static FlushDescriptor toFlushDescriptor(FlushAction action, org.apache.hadoop.hbase.client.RegionInfo hri, - long flushSeqId, Map> committedFiles) { - FlushDescriptor.Builder desc = FlushDescriptor.newBuilder() - .setAction(action) + public static FlushDescriptor toFlushDescriptor(FlushAction action, + org.apache.hadoop.hbase.client.RegionInfo hri, long flushSeqId, + Map> committedFiles) { + FlushDescriptor.Builder desc = FlushDescriptor.newBuilder().setAction(action) .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes())) .setRegionName(UnsafeByteOperations.unsafeWrap(hri.getRegionName())) .setFlushSequenceNumber(flushSeqId) @@ -2159,8 +2073,8 @@ public static FlushDescriptor toFlushDescriptor(FlushAction action, org.apache.h for (Map.Entry> entry : committedFiles.entrySet()) { WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder builder = WALProtos.FlushDescriptor.StoreFlushDescriptor.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) - .setStoreHomeDir(Bytes.toString(entry.getKey())); //relative to region + .setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) + .setStoreHomeDir(Bytes.toString(entry.getKey())); // relative to region if (entry.getValue() != null) { for (Path path : entry.getValue()) { builder.addFlushOutput(path.getName()); @@ -2171,37 +2085,27 @@ public static FlushDescriptor toFlushDescriptor(FlushAction action, org.apache.h return desc.build(); } - public static RegionEventDescriptor toRegionEventDescriptor( - EventType eventType, org.apache.hadoop.hbase.client.RegionInfo hri, long seqId, ServerName server, + public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, + org.apache.hadoop.hbase.client.RegionInfo hri, long seqId, ServerName server, Map> storeFiles) { final byte[] tableNameAsBytes = hri.getTable().getName(); final byte[] encodedNameAsBytes = hri.getEncodedNameAsBytes(); final byte[] regionNameAsBytes = hri.getRegionName(); - return toRegionEventDescriptor(eventType, - tableNameAsBytes, - encodedNameAsBytes, - regionNameAsBytes, - seqId, + return toRegionEventDescriptor(eventType, tableNameAsBytes, encodedNameAsBytes, + regionNameAsBytes, seqId, - server, - storeFiles); + server, storeFiles); } public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, - byte[] tableNameAsBytes, - byte[] encodedNameAsBytes, - byte[] regionNameAsBytes, - long seqId, - - ServerName server, - Map> storeFiles) { - RegionEventDescriptor.Builder desc = RegionEventDescriptor.newBuilder() - .setEventType(eventType) + byte[] tableNameAsBytes, byte[] encodedNameAsBytes, byte[] regionNameAsBytes, long seqId, + + ServerName server, Map> storeFiles) { + RegionEventDescriptor.Builder desc = RegionEventDescriptor.newBuilder().setEventType(eventType) .setTableName(UnsafeByteOperations.unsafeWrap(tableNameAsBytes)) .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(encodedNameAsBytes)) .setRegionName(UnsafeByteOperations.unsafeWrap(regionNameAsBytes)) - .setLogSequenceNumber(seqId) - .setServer(toServerName(server)); + .setLogSequenceNumber(seqId).setServer(toServerName(server)); for (Entry> entry : storeFiles.entrySet()) { StoreDescriptor.Builder builder = StoreDescriptor.newBuilder() @@ -2217,55 +2121,54 @@ public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, } /** - * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. - * Tries to NOT print out data both because it can be big but also so we do not have data in our - * logs. Use judiciously. + * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. Tries to + * NOT print out data both because it can be big but also so we do not have data in our logs. Use + * judiciously. * @param m * @return toString of passed m */ public static String getShortTextFormat(Message m) { if (m == null) return "null"; if (m instanceof ScanRequest) { - // This should be small and safe to output. No data. + // This should be small and safe to output. No data. return TextFormat.shortDebugString(m); } else if (m instanceof RegionServerReportRequest) { // Print a short message only, just the servername and the requests, not the full load. - RegionServerReportRequest r = (RegionServerReportRequest)m; - return "server " + TextFormat.shortDebugString(r.getServer()) + - " load { numberOfRequests: " + r.getLoad().getNumberOfRequests() + " }"; + RegionServerReportRequest r = (RegionServerReportRequest) m; + return "server " + TextFormat.shortDebugString(r.getServer()) + " load { numberOfRequests: " + + r.getLoad().getNumberOfRequests() + " }"; } else if (m instanceof RegionServerStartupRequest) { // Should be small enough. return TextFormat.shortDebugString(m); } else if (m instanceof MutationProto) { - return toShortString((MutationProto)m); + return toShortString((MutationProto) m); } else if (m instanceof GetRequest) { GetRequest r = (GetRequest) m; - return "region= " + getStringForByteString(r.getRegion().getValue()) + - ", row=" + getStringForByteString(r.getGet().getRow()); + return "region= " + getStringForByteString(r.getRegion().getValue()) + ", row=" + + getStringForByteString(r.getGet().getRow()); } else if (m instanceof ClientProtos.MultiRequest) { ClientProtos.MultiRequest r = (ClientProtos.MultiRequest) m; // Get the number of Actions - int actionsCount = r.getRegionActionList() - .stream() - .mapToInt(ClientProtos.RegionAction::getActionCount) - .sum(); + int actionsCount = r.getRegionActionList().stream() + .mapToInt(ClientProtos.RegionAction::getActionCount).sum(); // Get first set of Actions. ClientProtos.RegionAction actions = r.getRegionActionList().get(0); - String row = actions.getActionCount() <= 0? "": - getStringForByteString(actions.getAction(0).hasGet()? - actions.getAction(0).getGet().getRow(): - actions.getAction(0).getMutation().getRow()); - return "region= " + getStringForByteString(actions.getRegion().getValue()) + - ", for " + actionsCount + " action(s) and 1st row key=" + row; + String row = actions.getActionCount() <= 0 ? "" + : getStringForByteString( + actions.getAction(0).hasGet() ? actions.getAction(0).getGet().getRow() + : actions.getAction(0).getMutation().getRow()); + return "region= " + getStringForByteString(actions.getRegion().getValue()) + ", for " + + actionsCount + " action(s) and 1st row key=" + row; } else if (m instanceof ClientProtos.MutateRequest) { ClientProtos.MutateRequest r = (ClientProtos.MutateRequest) m; - return "region= " + getStringForByteString(r.getRegion().getValue()) + - ", row=" + getStringForByteString(r.getMutation().getRow()); + return "region= " + getStringForByteString(r.getRegion().getValue()) + ", row=" + + getStringForByteString(r.getMutation().getRow()); } else if (m instanceof ClientProtos.CoprocessorServiceRequest) { ClientProtos.CoprocessorServiceRequest r = (ClientProtos.CoprocessorServiceRequest) m; - return "coprocessorService= " + r.getCall().getServiceName() + ":" + r.getCall().getMethodName(); + return "coprocessorService= " + r.getCall().getServiceName() + ":" + + r.getCall().getMethodName(); } return "TODO: " + m.getClass().toString(); } @@ -2276,7 +2179,6 @@ private static String getStringForByteString(ByteString bs) { /** * Return SlowLogParams to maintain recent online slowlog responses - * * @param message Message object {@link Message} * @return SlowLogParams with regionName(for filter queries) and params */ @@ -2297,14 +2199,12 @@ public static SlowLogParams getSlowLogParams(Message message) { GetRequest getRequest = (GetRequest) message; String regionName = getStringForByteString(getRequest.getRegion().getValue()); String params = "region= " + regionName + ", row= " - + getStringForByteString(getRequest.getGet().getRow()); + + getStringForByteString(getRequest.getGet().getRow()); return new SlowLogParams(regionName, params); } else if (message instanceof MultiRequest) { MultiRequest multiRequest = (MultiRequest) message; - int actionsCount = multiRequest.getRegionActionList() - .stream() - .mapToInt(ClientProtos.RegionAction::getActionCount) - .sum(); + int actionsCount = multiRequest.getRegionActionList().stream() + .mapToInt(ClientProtos.RegionAction::getActionCount).sum(); RegionAction actions = multiRequest.getRegionActionList().get(0); String regionName = getStringForByteString(actions.getRegion().getValue()); String params = "region= " + regionName + ", for " + actionsCount + " action(s)"; @@ -2316,9 +2216,8 @@ public static SlowLogParams getSlowLogParams(Message message) { return new SlowLogParams(regionName, params); } else if (message instanceof CoprocessorServiceRequest) { CoprocessorServiceRequest coprocessorServiceRequest = (CoprocessorServiceRequest) message; - String params = "coprocessorService= " - + coprocessorServiceRequest.getCall().getServiceName() - + ":" + coprocessorServiceRequest.getCall().getMethodName(); + String params = "coprocessorService= " + coprocessorServiceRequest.getCall().getServiceName() + + ":" + coprocessorServiceRequest.getCall().getMethodName(); return new SlowLogParams(params); } String params = message.getClass().toString(); @@ -2331,13 +2230,13 @@ public static SlowLogParams getSlowLogParams(Message message) { * @return Short String of mutation proto */ static String toShortString(final MutationProto proto) { - return "row=" + Bytes.toString(proto.getRow().toByteArray()) + - ", type=" + proto.getMutateType().toString(); + return "row=" + Bytes.toString(proto.getRow().toByteArray()) + ", type=" + + proto.getMutateType().toString(); } public static TableName toTableName(HBaseProtos.TableName tableNamePB) { return TableName.valueOf(tableNamePB.getNamespace().asReadOnlyByteBuffer(), - tableNamePB.getQualifier().asReadOnlyByteBuffer()); + tableNamePB.getQualifier().asReadOnlyByteBuffer()); } public static HBaseProtos.TableName toProtoTableName(TableName tableName) { @@ -2373,7 +2272,6 @@ public static TableName[] getTableNameArray(List tableNam /** * Convert a protocol buffer CellVisibility to a client CellVisibility - * * @param proto * @return the converted client CellVisibility */ @@ -2384,7 +2282,6 @@ public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) /** * Convert a protocol buffer CellVisibility bytes to a client CellVisibility - * * @param protoBytes * @return the converted client CellVisibility * @throws DeserializationException @@ -2404,7 +2301,6 @@ public static CellVisibility toCellVisibility(byte[] protoBytes) throws Deserial /** * Create a protocol buffer CellVisibility based on a client CellVisibility. - * * @param cellVisibility * @return a protocol buffer CellVisibility */ @@ -2416,7 +2312,6 @@ public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVi /** * Convert a protocol buffer Authorizations to a client Authorizations - * * @param proto * @return the converted client Authorizations */ @@ -2427,7 +2322,6 @@ public static Authorizations toAuthorizations(ClientProtos.Authorizations proto) /** * Convert a protocol buffer Authorizations bytes to a client Authorizations - * * @param protoBytes * @return the converted client Authorizations * @throws DeserializationException @@ -2447,7 +2341,6 @@ public static Authorizations toAuthorizations(byte[] protoBytes) throws Deserial /** * Create a protocol buffer Authorizations based on a client Authorizations. - * * @param authorizations * @return a protocol buffer Authorizations */ @@ -2461,45 +2354,56 @@ public static ClientProtos.Authorizations toAuthorizations(Authorizations author /** * Convert a protocol buffer TimeUnit to a client TimeUnit - * * @param proto * @return the converted client TimeUnit */ public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) { switch (proto) { - case NANOSECONDS: return TimeUnit.NANOSECONDS; - case MICROSECONDS: return TimeUnit.MICROSECONDS; - case MILLISECONDS: return TimeUnit.MILLISECONDS; - case SECONDS: return TimeUnit.SECONDS; - case MINUTES: return TimeUnit.MINUTES; - case HOURS: return TimeUnit.HOURS; - case DAYS: return TimeUnit.DAYS; + case NANOSECONDS: + return TimeUnit.NANOSECONDS; + case MICROSECONDS: + return TimeUnit.MICROSECONDS; + case MILLISECONDS: + return TimeUnit.MILLISECONDS; + case SECONDS: + return TimeUnit.SECONDS; + case MINUTES: + return TimeUnit.MINUTES; + case HOURS: + return TimeUnit.HOURS; + case DAYS: + return TimeUnit.DAYS; } throw new RuntimeException("Invalid TimeUnit " + proto); } /** * Convert a client TimeUnit to a protocol buffer TimeUnit - * * @param timeUnit * @return the converted protocol buffer TimeUnit */ public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) { switch (timeUnit) { - case NANOSECONDS: return HBaseProtos.TimeUnit.NANOSECONDS; - case MICROSECONDS: return HBaseProtos.TimeUnit.MICROSECONDS; - case MILLISECONDS: return HBaseProtos.TimeUnit.MILLISECONDS; - case SECONDS: return HBaseProtos.TimeUnit.SECONDS; - case MINUTES: return HBaseProtos.TimeUnit.MINUTES; - case HOURS: return HBaseProtos.TimeUnit.HOURS; - case DAYS: return HBaseProtos.TimeUnit.DAYS; + case NANOSECONDS: + return HBaseProtos.TimeUnit.NANOSECONDS; + case MICROSECONDS: + return HBaseProtos.TimeUnit.MICROSECONDS; + case MILLISECONDS: + return HBaseProtos.TimeUnit.MILLISECONDS; + case SECONDS: + return HBaseProtos.TimeUnit.SECONDS; + case MINUTES: + return HBaseProtos.TimeUnit.MINUTES; + case HOURS: + return HBaseProtos.TimeUnit.HOURS; + case DAYS: + return HBaseProtos.TimeUnit.DAYS; } throw new RuntimeException("Invalid TimeUnit " + timeUnit); } /** * Convert a protocol buffer ThrottleType to a client ThrottleType - * * @param proto * @return the converted client ThrottleType */ @@ -2530,7 +2434,6 @@ public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) /** * Convert a client ThrottleType to a protocol buffer ThrottleType - * * @param type * @return the converted protocol buffer ThrottleType */ @@ -2561,97 +2464,107 @@ public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType ty /** * Convert a protocol buffer QuotaScope to a client QuotaScope - * * @param proto * @return the converted client QuotaScope */ public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) { switch (proto) { - case CLUSTER: return QuotaScope.CLUSTER; - case MACHINE: return QuotaScope.MACHINE; + case CLUSTER: + return QuotaScope.CLUSTER; + case MACHINE: + return QuotaScope.MACHINE; } throw new RuntimeException("Invalid QuotaScope " + proto); } /** * Convert a client QuotaScope to a protocol buffer QuotaScope - * * @param scope * @return the converted protocol buffer QuotaScope */ public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) { switch (scope) { - case CLUSTER: return QuotaProtos.QuotaScope.CLUSTER; - case MACHINE: return QuotaProtos.QuotaScope.MACHINE; + case CLUSTER: + return QuotaProtos.QuotaScope.CLUSTER; + case MACHINE: + return QuotaProtos.QuotaScope.MACHINE; } throw new RuntimeException("Invalid QuotaScope " + scope); } /** * Convert a protocol buffer QuotaType to a client QuotaType - * * @param proto * @return the converted client QuotaType */ public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) { switch (proto) { - case THROTTLE: return QuotaType.THROTTLE; - case SPACE: return QuotaType.SPACE; + case THROTTLE: + return QuotaType.THROTTLE; + case SPACE: + return QuotaType.SPACE; } throw new RuntimeException("Invalid QuotaType " + proto); } /** * Convert a client QuotaType to a protocol buffer QuotaType - * * @param type * @return the converted protocol buffer QuotaType */ public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) { switch (type) { - case THROTTLE: return QuotaProtos.QuotaType.THROTTLE; - case SPACE: return QuotaProtos.QuotaType.SPACE; - default: throw new RuntimeException("Invalid QuotaType " + type); + case THROTTLE: + return QuotaProtos.QuotaType.THROTTLE; + case SPACE: + return QuotaProtos.QuotaType.SPACE; + default: + throw new RuntimeException("Invalid QuotaType " + type); } } /** * Converts a protocol buffer SpaceViolationPolicy to a client SpaceViolationPolicy. - * * @param proto The protocol buffer space violation policy. * @return The corresponding client SpaceViolationPolicy. */ - public static SpaceViolationPolicy toViolationPolicy( - final QuotaProtos.SpaceViolationPolicy proto) { + public static SpaceViolationPolicy + toViolationPolicy(final QuotaProtos.SpaceViolationPolicy proto) { switch (proto) { - case DISABLE: return SpaceViolationPolicy.DISABLE; - case NO_WRITES_COMPACTIONS: return SpaceViolationPolicy.NO_WRITES_COMPACTIONS; - case NO_WRITES: return SpaceViolationPolicy.NO_WRITES; - case NO_INSERTS: return SpaceViolationPolicy.NO_INSERTS; + case DISABLE: + return SpaceViolationPolicy.DISABLE; + case NO_WRITES_COMPACTIONS: + return SpaceViolationPolicy.NO_WRITES_COMPACTIONS; + case NO_WRITES: + return SpaceViolationPolicy.NO_WRITES; + case NO_INSERTS: + return SpaceViolationPolicy.NO_INSERTS; } throw new RuntimeException("Invalid SpaceViolationPolicy " + proto); } /** * Converts a client SpaceViolationPolicy to a protocol buffer SpaceViolationPolicy. - * * @param policy The client SpaceViolationPolicy object. * @return The corresponding protocol buffer SpaceViolationPolicy. */ - public static QuotaProtos.SpaceViolationPolicy toProtoViolationPolicy( - final SpaceViolationPolicy policy) { + public static QuotaProtos.SpaceViolationPolicy + toProtoViolationPolicy(final SpaceViolationPolicy policy) { switch (policy) { - case DISABLE: return QuotaProtos.SpaceViolationPolicy.DISABLE; - case NO_WRITES_COMPACTIONS: return QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS; - case NO_WRITES: return QuotaProtos.SpaceViolationPolicy.NO_WRITES; - case NO_INSERTS: return QuotaProtos.SpaceViolationPolicy.NO_INSERTS; + case DISABLE: + return QuotaProtos.SpaceViolationPolicy.DISABLE; + case NO_WRITES_COMPACTIONS: + return QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS; + case NO_WRITES: + return QuotaProtos.SpaceViolationPolicy.NO_WRITES; + case NO_INSERTS: + return QuotaProtos.SpaceViolationPolicy.NO_INSERTS; } throw new RuntimeException("Invalid SpaceViolationPolicy " + policy); } /** * Build a protocol buffer TimedQuota - * * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit * @param scope the quota scope @@ -2659,58 +2572,48 @@ public static QuotaProtos.SpaceViolationPolicy toProtoViolationPolicy( */ public static QuotaProtos.TimedQuota toTimedQuota(final long limit, final TimeUnit timeUnit, final QuotaScope scope) { - return QuotaProtos.TimedQuota.newBuilder() - .setSoftLimit(limit) - .setTimeUnit(toProtoTimeUnit(timeUnit)) - .setScope(toProtoQuotaScope(scope)) - .build(); + return QuotaProtos.TimedQuota.newBuilder().setSoftLimit(limit) + .setTimeUnit(toProtoTimeUnit(timeUnit)).setScope(toProtoQuotaScope(scope)).build(); } /** * Builds a protocol buffer SpaceQuota. - * * @param limit The maximum space usage for the quota in bytes. * @param violationPolicy The policy to apply when the quota is violated. * @return The protocol buffer SpaceQuota. */ - public static QuotaProtos.SpaceQuota toProtoSpaceQuota( - final long limit, final SpaceViolationPolicy violationPolicy) { - return QuotaProtos.SpaceQuota.newBuilder() - .setSoftLimit(limit) - .setViolationPolicy(toProtoViolationPolicy(violationPolicy)) - .build(); + public static QuotaProtos.SpaceQuota toProtoSpaceQuota(final long limit, + final SpaceViolationPolicy violationPolicy) { + return QuotaProtos.SpaceQuota.newBuilder().setSoftLimit(limit) + .setViolationPolicy(toProtoViolationPolicy(violationPolicy)).build(); } /** - * Generates a marker for the WAL so that we propagate the notion of a bulk region load - * throughout the WAL. - * - * @param tableName The tableName into which the bulk load is being imported into. + * Generates a marker for the WAL so that we propagate the notion of a bulk region load throughout + * the WAL. + * @param tableName The tableName into which the bulk load is being imported into. * @param encodedRegionName Encoded region name of the region which is being bulk loaded. - * @param storeFiles A set of store files of a column family are bulk loaded. - * @param storeFilesSize Map of store files and their lengths - * @param bulkloadSeqId sequence ID (by a force flush) used to create bulk load hfile - * name + * @param storeFiles A set of store files of a column family are bulk loaded. + * @param storeFilesSize Map of store files and their lengths + * @param bulkloadSeqId sequence ID (by a force flush) used to create bulk load hfile name * @return The WAL log marker for bulk loads. */ public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableName, - ByteString encodedRegionName, Map> storeFiles, - Map storeFilesSize, long bulkloadSeqId) { - return toBulkLoadDescriptor(tableName, encodedRegionName, storeFiles, - storeFilesSize, bulkloadSeqId, null, true); + ByteString encodedRegionName, Map> storeFiles, + Map storeFilesSize, long bulkloadSeqId) { + return toBulkLoadDescriptor(tableName, encodedRegionName, storeFiles, storeFilesSize, + bulkloadSeqId, null, true); } public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableName, ByteString encodedRegionName, Map> storeFiles, - Map storeFilesSize, long bulkloadSeqId, - List clusterIds, boolean replicate) { + Map storeFilesSize, long bulkloadSeqId, List clusterIds, + boolean replicate) { BulkLoadDescriptor.Builder desc = - BulkLoadDescriptor.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tableName)) - .setEncodedRegionName(encodedRegionName) - .setBulkloadSeqNum(bulkloadSeqId) - .setReplicate(replicate); - if(clusterIds != null) { + BulkLoadDescriptor.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setEncodedRegionName(encodedRegionName).setBulkloadSeqNum(bulkloadSeqId) + .setReplicate(replicate); + if (clusterIds != null) { desc.addAllClusterIds(clusterIds); } @@ -2738,7 +2641,7 @@ public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableN * @throws IOException */ public static void mergeDelimitedFrom(Message.Builder builder, InputStream in) - throws IOException { + throws IOException { // This used to be builder.mergeDelimitedFrom(in); // but is replaced to allow us to bump the protobuf size limit. final int firstByte = in.read(); @@ -2753,8 +2656,8 @@ public static void mergeDelimitedFrom(Message.Builder builder, InputStream in) } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers where the message size is known + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers + * where the message size is known * @param builder current message builder * @param in InputStream containing protobuf data * @param size known size of protobuf data @@ -2769,14 +2672,13 @@ public static void mergeFrom(Message.Builder builder, InputStream in, int size) } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers where the message size is not known + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers + * where the message size is not known * @param builder current message builder * @param in InputStream containing protobuf data * @throws IOException */ - public static void mergeFrom(Message.Builder builder, InputStream in) - throws IOException { + public static void mergeFrom(Message.Builder builder, InputStream in) throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(in); codedInput.setSizeLimit(Integer.MAX_VALUE); builder.mergeFrom(codedInput); @@ -2784,8 +2686,8 @@ public static void mergeFrom(Message.Builder builder, InputStream in) } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with ByteStrings + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with ByteStrings * @param builder current message builder * @param bs ByteString containing the * @throws IOException @@ -2798,8 +2700,8 @@ public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOEx } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with byte arrays * @param builder current message builder * @param b byte array * @throws IOException @@ -2812,8 +2714,8 @@ public static void mergeFrom(Message.Builder builder, byte[] b) throws IOExcepti } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with byte arrays * @param builder current message builder * @param b byte array * @param offset @@ -2841,37 +2743,32 @@ public static void mergeFrom(Message.Builder builder, CodedInputStream codedInpu codedInput.setSizeLimit(prevLimit); } - public static ReplicationLoadSink toReplicationLoadSink( - ClusterStatusProtos.ReplicationLoadSink rls) { + public static ReplicationLoadSink + toReplicationLoadSink(ClusterStatusProtos.ReplicationLoadSink rls) { ReplicationLoadSink.ReplicationLoadSinkBuilder builder = ReplicationLoadSink.newBuilder(); - builder.setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()). - setTimestampsOfLastAppliedOp(rls.getTimeStampsOfLastAppliedOp()). - setTimestampStarted(rls.hasTimestampStarted()? rls.getTimestampStarted(): -1L). - setTotalOpsProcessed(rls.hasTotalOpsProcessed()? rls.getTotalOpsProcessed(): -1L); + builder.setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()) + .setTimestampsOfLastAppliedOp(rls.getTimeStampsOfLastAppliedOp()) + .setTimestampStarted(rls.hasTimestampStarted() ? rls.getTimestampStarted() : -1L) + .setTotalOpsProcessed(rls.hasTotalOpsProcessed() ? rls.getTotalOpsProcessed() : -1L); return builder.build(); } - public static ReplicationLoadSource toReplicationLoadSource( - ClusterStatusProtos.ReplicationLoadSource rls) { + public static ReplicationLoadSource + toReplicationLoadSource(ClusterStatusProtos.ReplicationLoadSource rls) { ReplicationLoadSource.ReplicationLoadSourceBuilder builder = ReplicationLoadSource.newBuilder(); - builder.setPeerID(rls.getPeerID()). - setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()). - setSizeOfLogQueue(rls.getSizeOfLogQueue()). - setTimestampOfLastShippedOp(rls.getTimeStampOfLastShippedOp()). - setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()). - setReplicationLag(rls.getReplicationLag()). - setQueueId(rls.getQueueId()). - setRecovered(rls.getRecovered()). - setRunning(rls.getRunning()). - setEditsSinceRestart(rls.getEditsSinceRestart()). - setEditsRead(rls.getEditsRead()). - setoPsShipped(rls.getOPsShipped()); + builder.setPeerID(rls.getPeerID()).setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()) + .setSizeOfLogQueue(rls.getSizeOfLogQueue()) + .setTimestampOfLastShippedOp(rls.getTimeStampOfLastShippedOp()) + .setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()) + .setReplicationLag(rls.getReplicationLag()).setQueueId(rls.getQueueId()) + .setRecovered(rls.getRecovered()).setRunning(rls.getRunning()) + .setEditsSinceRestart(rls.getEditsSinceRestart()).setEditsRead(rls.getEditsRead()) + .setoPsShipped(rls.getOPsShipped()); return builder.build(); } /** * Get a protocol buffer VersionInfo - * * @return the converted protocol buffer VersionInfo */ public static HBaseProtos.VersionInfo getVersionInfo() { @@ -2899,7 +2796,7 @@ public static HBaseProtos.VersionInfo getVersionInfo() { public static List toSecurityCapabilityList( List capabilities) { List scList = new ArrayList<>(capabilities.size()); - for (MasterProtos.SecurityCapabilitiesResponse.Capability c: capabilities) { + for (MasterProtos.SecurityCapabilitiesResponse.Capability c : capabilities) { try { scList.add(SecurityCapability.valueOf(c.getNumber())); } catch (IllegalArgumentException e) { @@ -2955,11 +2852,12 @@ public static ColumnFamilySchema toColumnFamilySchema(ColumnFamilyDescriptor hcd */ public static ColumnFamilyDescriptor toColumnFamilyDescriptor(final ColumnFamilySchema cfs) { // Use the empty constructor so we preserve the initial values set on construction for things - // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for + // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for // unrelated-looking test failures that are hard to trace back to here. - ColumnFamilyDescriptorBuilder builder - = ColumnFamilyDescriptorBuilder.newBuilder(cfs.getName().toByteArray()); - cfs.getAttributesList().forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(cfs.getName().toByteArray()); + cfs.getAttributesList() + .forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); cfs.getConfigurationList().forEach(a -> builder.setConfiguration(a.getName(), a.getValue())); return builder.build(); } @@ -2990,16 +2888,13 @@ public static TableSchema toTableSchema(TableDescriptor htd) { * @return An {@link TableDescriptor} made from the passed in pb ts. */ public static TableDescriptor toTableDescriptor(final TableSchema ts) { - TableDescriptorBuilder builder - = TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName())); - ts.getColumnFamiliesList() - .stream() - .map(ProtobufUtil::toColumnFamilyDescriptor) - .forEach(builder::setColumnFamily); + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName())); + ts.getColumnFamiliesList().stream().map(ProtobufUtil::toColumnFamilyDescriptor) + .forEach(builder::setColumnFamily); ts.getAttributesList() - .forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); - ts.getConfigurationList() - .forEach(a -> builder.setValue(a.getName(), a.getValue())); + .forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); + ts.getConfigurationList().forEach(a -> builder.setValue(a.getName(), a.getValue())); return builder.build(); } @@ -3020,18 +2915,18 @@ public static GetRegionInfoResponse.CompactionState createCompactionState(Compac /** * Creates {@link CompactionState} from - * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos - * .RegionLoad.CompactionState} state + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos .RegionLoad.CompactionState} + * state * @param state the protobuf CompactionState * @return CompactionState */ - public static CompactionState createCompactionStateForRegionLoad( - RegionLoad.CompactionState state) { + public static CompactionState + createCompactionStateForRegionLoad(RegionLoad.CompactionState state) { return CompactionState.valueOf(state.toString()); } - public static RegionLoad.CompactionState createCompactionStateForRegionLoad( - CompactionState state) { + public static RegionLoad.CompactionState + createCompactionStateForRegionLoad(CompactionState state) { return RegionLoad.CompactionState.valueOf(state.toString()); } @@ -3082,7 +2977,8 @@ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription */ public static SnapshotProtos.SnapshotDescription createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) { - SnapshotProtos.SnapshotDescription.Builder builder = SnapshotProtos.SnapshotDescription.newBuilder(); + SnapshotProtos.SnapshotDescription.Builder builder = + SnapshotProtos.SnapshotDescription.newBuilder(); if (snapshotDesc.getTableName() != null) { builder.setTable(snapshotDesc.getTableNameAsString()); } @@ -3095,8 +2991,8 @@ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription if (snapshotDesc.getCreationTime() != -1L) { builder.setCreationTime(snapshotDesc.getCreationTime()); } - if (snapshotDesc.getTtl() != -1L && - snapshotDesc.getTtl() < TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { + if (snapshotDesc.getTtl() != -1L + && snapshotDesc.getTtl() < TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { builder.setTtl(snapshotDesc.getTtl()); } if (snapshotDesc.getVersion() != -1) { @@ -3122,9 +3018,9 @@ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription snapshotProps.put("TTL", snapshotDesc.getTtl()); snapshotProps.put(TableDescriptorBuilder.MAX_FILESIZE, snapshotDesc.getMaxFileSize()); return new SnapshotDescription(snapshotDesc.getName(), - snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : null, - createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(), - snapshotDesc.getCreationTime(), snapshotDesc.getVersion(), snapshotProps); + snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : null, + createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(), + snapshotDesc.getCreationTime(), snapshotDesc.getVersion(), snapshotProps); } public static RegionLoadStats createRegionLoadStats(ClientProtos.RegionLoadStats stats) { @@ -3140,7 +3036,7 @@ public static String toText(Message msg) { return TextFormat.shortDebugString(msg); } - public static byte [] toBytes(ByteString bs) { + public static byte[] toBytes(ByteString bs) { return bs.toByteArray(); } @@ -3157,21 +3053,20 @@ public static T call(Callable callable) throws IOException { } /** - * Create a protocol buffer GetStoreFileRequest for a given region name - * - * @param regionName the name of the region to get info - * @param family the family to get store file list - * @return a protocol buffer GetStoreFileRequest - */ - public static GetStoreFileRequest - buildGetStoreFileRequest(final byte[] regionName, final byte[] family) { - GetStoreFileRequest.Builder builder = GetStoreFileRequest.newBuilder(); - RegionSpecifier region = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.addFamily(UnsafeByteOperations.unsafeWrap(family)); - return builder.build(); - } + * Create a protocol buffer GetStoreFileRequest for a given region name + * @param regionName the name of the region to get info + * @param family the family to get store file list + * @return a protocol buffer GetStoreFileRequest + */ + public static GetStoreFileRequest buildGetStoreFileRequest(final byte[] regionName, + final byte[] family) { + GetStoreFileRequest.Builder builder = GetStoreFileRequest.newBuilder(); + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.addFamily(UnsafeByteOperations.unsafeWrap(family)); + return builder.build(); + } /** * Create a CloseRegionRequest for a given region name @@ -3191,7 +3086,7 @@ public static CloseRegionRequest buildCloseRegionRequest(ServerName server, byte ServerName destinationServer, long closeProcId) { CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); RegionSpecifier region = - RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (destinationServer != null) { builder.setDestinationServer(toServerName(destinationServer)); @@ -3215,8 +3110,8 @@ public static ProcedureDescription buildProcedureDescription(String signature, S } /** - * Get the Meta region state from the passed data bytes. Can handle both old and new style - * server names. + * Get the Meta region state from the passed data bytes. Can handle both old and new style server + * names. * @param data protobuf serialized data with meta server name. * @param replicaId replica ID for this region * @return RegionState instance corresponding to the serialized data. @@ -3229,15 +3124,13 @@ public static RegionState parseMetaRegionStateFrom(final byte[] data, int replic if (data != null && data.length > 0 && ProtobufUtil.isPBMagicPrefix(data)) { try { int prefixLen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.MetaRegionServer rl = - ZooKeeperProtos.MetaRegionServer.parser().parseFrom(data, prefixLen, - data.length - prefixLen); + ZooKeeperProtos.MetaRegionServer rl = ZooKeeperProtos.MetaRegionServer.parser() + .parseFrom(data, prefixLen, data.length - prefixLen); if (rl.hasState()) { state = RegionState.State.convert(rl.getState()); } HBaseProtos.ServerName sn = rl.getServer(); - serverName = ServerName.valueOf( - sn.getHostName(), sn.getPort(), sn.getStartCode()); + serverName = ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); } catch (InvalidProtocolBufferException e) { throw new DeserializationException("Unable to parse meta region location"); } @@ -3249,34 +3142,34 @@ public static RegionState parseMetaRegionStateFrom(final byte[] data, int replic state = RegionState.State.OFFLINE; } return new RegionState(RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName); + RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName); } /** * Get a ServerName from the passed in data bytes. - * @param data Data with a serialize server name in it; can handle the old style - * servername where servername was host and port. Works too with data that - * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that - * has a serialized {@link ServerName} in it. - * @return Returns null if data is null else converts passed data - * to a ServerName instance. + * @param data Data with a serialize server name in it; can handle the old style servername where + * servername was host and port. Works too with data that begins w/ the pb 'PBUF' magic + * and that is then followed by a protobuf that has a serialized {@link ServerName} in + * it. + * @return Returns null if data is null else converts passed data to a ServerName + * instance. * @throws DeserializationException */ - public static ServerName parseServerNameFrom(final byte [] data) throws DeserializationException { + public static ServerName parseServerNameFrom(final byte[] data) throws DeserializationException { if (data == null || data.length <= 0) return null; if (ProtobufMagic.isPBMagicPrefix(data)) { int prefixLen = ProtobufMagic.lengthOfPBMagic(); try { ZooKeeperProtos.Master rss = - ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen); + ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName sn = rss.getMaster(); return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); - } catch (/*InvalidProtocolBufferException*/IOException e) { + } catch (/* InvalidProtocolBufferException */IOException e) { // A failed parse of the znode is pretty catastrophic. Rather than loop // retrying hoping the bad bytes will changes, and rather than change // the signature on this method to add an IOE which will send ripples all - // over the code base, throw a RuntimeException. This should "never" happen. + // over the code base, throw a RuntimeException. This should "never" happen. // Fail fast if it does. throw new DeserializationException(e); } @@ -3317,7 +3210,8 @@ public static String toLockJson(List lockedRes JsonArray lockedResourceJsons = new JsonArray(lockedResourceProtos.size()); for (LockServiceProtos.LockedResource lockedResourceProto : lockedResourceProtos) { try { - JsonElement lockedResourceJson = ProtobufMessageConverter.toJsonElement(lockedResourceProto); + JsonElement lockedResourceJson = + ProtobufMessageConverter.toJsonElement(lockedResourceProto); lockedResourceJsons.add(lockedResourceJson); } catch (InvalidProtocolBufferException e) { lockedResourceJsons.add(e.toString()); @@ -3328,11 +3222,11 @@ public static String toLockJson(List lockedRes /** * Convert a RegionInfo to a Proto RegionInfo - * * @param info the RegionInfo to convert * @return the converted Proto RegionInfo */ - public static HBaseProtos.RegionInfo toRegionInfo(final org.apache.hadoop.hbase.client.RegionInfo info) { + public static HBaseProtos.RegionInfo + toRegionInfo(final org.apache.hadoop.hbase.client.RegionInfo info) { if (info == null) { return null; } @@ -3353,18 +3247,18 @@ public static HBaseProtos.RegionInfo toRegionInfo(final org.apache.hadoop.hbase. /** * Convert HBaseProto.RegionInfo to a RegionInfo - * * @param proto the RegionInfo to convert * @return the converted RegionInfo */ - public static org.apache.hadoop.hbase.client.RegionInfo toRegionInfo(final HBaseProtos.RegionInfo proto) { + public static org.apache.hadoop.hbase.client.RegionInfo + toRegionInfo(final HBaseProtos.RegionInfo proto) { if (proto == null) { return null; } TableName tableName = ProtobufUtil.toTableName(proto.getTableName()); long regionId = proto.getRegionId(); int defaultReplicaId = org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; - int replicaId = proto.hasReplicaId()? proto.getReplicaId(): defaultReplicaId; + int replicaId = proto.hasReplicaId() ? proto.getReplicaId() : defaultReplicaId; if (tableName.equals(TableName.META_TABLE_NAME) && replicaId == defaultReplicaId) { return RegionInfoBuilder.FIRST_META_REGIONINFO; } @@ -3380,12 +3274,8 @@ public static org.apache.hadoop.hbase.client.RegionInfo toRegionInfo(final HBase if (proto.hasSplit()) { split = proto.getSplit(); } - RegionInfoBuilder rib = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(startKey) - .setEndKey(endKey) - .setRegionId(regionId) - .setReplicaId(replicaId) - .setSplit(split); + RegionInfoBuilder rib = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey) + .setEndKey(endKey).setRegionId(regionId).setReplicaId(replicaId).setSplit(split); if (proto.hasOffline()) { rib.setOffline(proto.getOffline()); } @@ -3408,18 +3298,17 @@ public static HRegionLocation toRegionLocation(HBaseProtos.RegionLocation proto) return new HRegionLocation(regionInfo, serverName, proto.getSeqNum()); } - public static List toSnapshotDescriptionList( - GetCompletedSnapshotsResponse response, Pattern pattern) { + public static List + toSnapshotDescriptionList(GetCompletedSnapshotsResponse response, Pattern pattern) { return response.getSnapshotsList().stream().map(ProtobufUtil::createSnapshotDesc) .filter(snap -> pattern != null ? pattern.matcher(snap.getName()).matches() : true) .collect(Collectors.toList()); } - public static CacheEvictionStats toCacheEvictionStats( - HBaseProtos.CacheEvictionStats stats) throws IOException{ + public static CacheEvictionStats toCacheEvictionStats(HBaseProtos.CacheEvictionStats stats) + throws IOException { CacheEvictionStatsBuilder builder = CacheEvictionStats.builder(); - builder.withEvictedBlocks(stats.getEvictedBlocks()) - .withMaxCacheSize(stats.getMaxCacheSize()); + builder.withEvictedBlocks(stats.getEvictedBlocks()).withMaxCacheSize(stats.getMaxCacheSize()); if (stats.getExceptionCount() > 0) { for (HBaseProtos.RegionExceptionMessage exception : stats.getExceptionList()) { HBaseProtos.RegionSpecifier rs = exception.getRegion(); @@ -3430,60 +3319,47 @@ public static CacheEvictionStats toCacheEvictionStats( return builder.build(); } - public static HBaseProtos.CacheEvictionStats toCacheEvictionStats( - CacheEvictionStats cacheEvictionStats) { - HBaseProtos.CacheEvictionStats.Builder builder - = HBaseProtos.CacheEvictionStats.newBuilder(); + public static HBaseProtos.CacheEvictionStats + toCacheEvictionStats(CacheEvictionStats cacheEvictionStats) { + HBaseProtos.CacheEvictionStats.Builder builder = HBaseProtos.CacheEvictionStats.newBuilder(); for (Map.Entry entry : cacheEvictionStats.getExceptions().entrySet()) { - builder.addException( - RegionExceptionMessage.newBuilder() - .setRegion(RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, entry.getKey())) - .setException(ResponseConverter.buildException(entry.getValue())) - .build() - ); - } - return builder - .setEvictedBlocks(cacheEvictionStats.getEvictedBlocks()) - .setMaxCacheSize(cacheEvictionStats.getMaxCacheSize()) - .build(); + builder.addException(RegionExceptionMessage.newBuilder() + .setRegion( + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, entry.getKey())) + .setException(ResponseConverter.buildException(entry.getValue())).build()); + } + return builder.setEvictedBlocks(cacheEvictionStats.getEvictedBlocks()) + .setMaxCacheSize(cacheEvictionStats.getMaxCacheSize()).build(); } - public static ClusterStatusProtos.ReplicationLoadSource toReplicationLoadSource( - ReplicationLoadSource rls) { - return ClusterStatusProtos.ReplicationLoadSource.newBuilder() - .setPeerID(rls.getPeerID()) + public static ClusterStatusProtos.ReplicationLoadSource + toReplicationLoadSource(ReplicationLoadSource rls) { + return ClusterStatusProtos.ReplicationLoadSource.newBuilder().setPeerID(rls.getPeerID()) .setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()) .setSizeOfLogQueue((int) rls.getSizeOfLogQueue()) .setTimeStampOfLastShippedOp(rls.getTimestampOfLastShippedOp()) - .setReplicationLag(rls.getReplicationLag()) - .setQueueId(rls.getQueueId()) - .setRecovered(rls.isRecovered()) - .setRunning(rls.isRunning()) + .setReplicationLag(rls.getReplicationLag()).setQueueId(rls.getQueueId()) + .setRecovered(rls.isRecovered()).setRunning(rls.isRunning()) .setEditsSinceRestart(rls.hasEditsSinceRestart()) .setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()) - .setOPsShipped(rls.getOPsShipped()) - .setEditsRead(rls.getEditsRead()) - .build(); + .setOPsShipped(rls.getOPsShipped()).setEditsRead(rls.getEditsRead()).build(); } - public static ClusterStatusProtos.ReplicationLoadSink toReplicationLoadSink( - ReplicationLoadSink rls) { + public static ClusterStatusProtos.ReplicationLoadSink + toReplicationLoadSink(ReplicationLoadSink rls) { return ClusterStatusProtos.ReplicationLoadSink.newBuilder() .setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()) .setTimeStampsOfLastAppliedOp(rls.getTimestampsOfLastAppliedOp()) .setTimestampStarted(rls.getTimestampStarted()) - .setTotalOpsProcessed(rls.getTotalOpsProcessed()) - .build(); + .setTotalOpsProcessed(rls.getTotalOpsProcessed()).build(); } public static HBaseProtos.TimeRange toTimeRange(TimeRange timeRange) { if (timeRange == null) { timeRange = TimeRange.allTime(); } - return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()) - .setTo(timeRange.getMax()) - .build(); + return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()).setTo(timeRange.getMax()) + .build(); } public static byte[] toCompactionEventTrackerBytes(Set storeFiles) { @@ -3509,8 +3385,8 @@ public static Set toCompactedStoreFiles(byte[] bytes) throws IOException return Collections.emptySet(); } - public static ClusterStatusProtos.RegionStatesCount toTableRegionStatesCount( - RegionStatesCount regionStatesCount) { + public static ClusterStatusProtos.RegionStatesCount + toTableRegionStatesCount(RegionStatesCount regionStatesCount) { int openRegions = 0; int splitRegions = 0; int closedRegions = 0; @@ -3523,17 +3399,13 @@ public static ClusterStatusProtos.RegionStatesCount toTableRegionStatesCount( regionsInTransition = regionStatesCount.getRegionsInTransition(); totalRegions = regionStatesCount.getTotalRegions(); } - return ClusterStatusProtos.RegionStatesCount.newBuilder() - .setOpenRegions(openRegions) - .setSplitRegions(splitRegions) - .setClosedRegions(closedRegions) - .setRegionsInTransition(regionsInTransition) - .setTotalRegions(totalRegions) - .build(); + return ClusterStatusProtos.RegionStatesCount.newBuilder().setOpenRegions(openRegions) + .setSplitRegions(splitRegions).setClosedRegions(closedRegions) + .setRegionsInTransition(regionsInTransition).setTotalRegions(totalRegions).build(); } - public static RegionStatesCount toTableRegionStatesCount( - ClusterStatusProtos.RegionStatesCount regionStatesCount) { + public static RegionStatesCount + toTableRegionStatesCount(ClusterStatusProtos.RegionStatesCount regionStatesCount) { int openRegions = 0; int splitRegions = 0; int closedRegions = 0; @@ -3546,64 +3418,52 @@ public static RegionStatesCount toTableRegionStatesCount( splitRegions = regionStatesCount.getSplitRegions(); totalRegions = regionStatesCount.getTotalRegions(); } - return new RegionStatesCount.RegionStatesCountBuilder() - .setOpenRegions(openRegions) - .setSplitRegions(splitRegions) - .setClosedRegions(closedRegions) - .setRegionsInTransition(regionsInTransition) - .setTotalRegions(totalRegions) - .build(); + return new RegionStatesCount.RegionStatesCountBuilder().setOpenRegions(openRegions) + .setSplitRegions(splitRegions).setClosedRegions(closedRegions) + .setRegionsInTransition(regionsInTransition).setTotalRegions(totalRegions).build(); } /** * Convert Protobuf class - * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload} - * To client SlowLog Payload class {@link OnlineLogRecord} - * + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload} To client + * SlowLog Payload class {@link OnlineLogRecord} * @param slowLogPayload SlowLog Payload protobuf instance * @return SlowLog Payload for client usecase */ - private static LogEntry getSlowLogRecord( - final TooSlowLog.SlowLogPayload slowLogPayload) { + private static LogEntry getSlowLogRecord(final TooSlowLog.SlowLogPayload slowLogPayload) { OnlineLogRecord onlineLogRecord = new OnlineLogRecord.OnlineLogRecordBuilder() - .setCallDetails(slowLogPayload.getCallDetails()) - .setClientAddress(slowLogPayload.getClientAddress()) - .setMethodName(slowLogPayload.getMethodName()) - .setMultiGetsCount(slowLogPayload.getMultiGets()) - .setMultiMutationsCount(slowLogPayload.getMultiMutations()) - .setMultiServiceCalls(slowLogPayload.getMultiServiceCalls()) - .setParam(slowLogPayload.getParam()) - .setProcessingTime(slowLogPayload.getProcessingTime()) - .setQueueTime(slowLogPayload.getQueueTime()) - .setRegionName(slowLogPayload.getRegionName()) - .setResponseSize(slowLogPayload.getResponseSize()) - .setServerClass(slowLogPayload.getServerClass()) - .setStartTime(slowLogPayload.getStartTime()) - .setUserName(slowLogPayload.getUserName()) - .build(); + .setCallDetails(slowLogPayload.getCallDetails()) + .setClientAddress(slowLogPayload.getClientAddress()) + .setMethodName(slowLogPayload.getMethodName()) + .setMultiGetsCount(slowLogPayload.getMultiGets()) + .setMultiMutationsCount(slowLogPayload.getMultiMutations()) + .setMultiServiceCalls(slowLogPayload.getMultiServiceCalls()) + .setParam(slowLogPayload.getParam()).setProcessingTime(slowLogPayload.getProcessingTime()) + .setQueueTime(slowLogPayload.getQueueTime()).setRegionName(slowLogPayload.getRegionName()) + .setResponseSize(slowLogPayload.getResponseSize()) + .setServerClass(slowLogPayload.getServerClass()).setStartTime(slowLogPayload.getStartTime()) + .setUserName(slowLogPayload.getUserName()).build(); return onlineLogRecord; } /** - * Convert AdminProtos#SlowLogResponses to list of {@link OnlineLogRecord} - * + * Convert AdminProtos#SlowLogResponses to list of {@link OnlineLogRecord} * @param logEntry slowlog response protobuf instance * @return list of SlowLog payloads for client usecase */ - public static List toSlowLogPayloads( - final HBaseProtos.LogEntry logEntry) { + public static List toSlowLogPayloads(final HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("SlowLogResponses")) { - AdminProtos.SlowLogResponses slowLogResponses = (AdminProtos.SlowLogResponses) method - .invoke(null, logEntry.getLogMessage()); + AdminProtos.SlowLogResponses slowLogResponses = + (AdminProtos.SlowLogResponses) method.invoke(null, logEntry.getLogMessage()); return slowLogResponses.getSlowLogPayloadsList().stream() - .map(ProtobufUtil::getSlowLogRecord).collect(Collectors.toList()); + .map(ProtobufUtil::getSlowLogRecord).collect(Collectors.toList()); } } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { + | InvocationTargetException e) { throw new RuntimeException("Error while retrieving response from server"); } throw new RuntimeException("Invalid response from server"); @@ -3611,7 +3471,6 @@ public static List toSlowLogPayloads( /** * Convert {@link ClearSlowLogResponses} to boolean - * * @param clearSlowLogResponses Clear slowlog response protobuf instance * @return boolean representing clear slowlog response */ @@ -3619,50 +3478,46 @@ public static boolean toClearSlowLogPayload(final ClearSlowLogResponses clearSlo return clearSlowLogResponses.getIsCleaned(); } - public static void populateBalanceRSGroupResponse(RSGroupAdminProtos.BalanceRSGroupResponse.Builder responseBuilder, BalanceResponse response) { - responseBuilder - .setBalanceRan(response.isBalancerRan()) - .setMovesCalculated(response.getMovesCalculated()) - .setMovesExecuted(response.getMovesExecuted()); + public static void populateBalanceRSGroupResponse( + RSGroupAdminProtos.BalanceRSGroupResponse.Builder responseBuilder, BalanceResponse response) { + responseBuilder.setBalanceRan(response.isBalancerRan()) + .setMovesCalculated(response.getMovesCalculated()) + .setMovesExecuted(response.getMovesExecuted()); } - public static BalanceResponse toBalanceResponse(RSGroupAdminProtos.BalanceRSGroupResponse response) { - return BalanceResponse.newBuilder() - .setBalancerRan(response.getBalanceRan()) - .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0) - .setMovesCalculated(response.hasMovesCalculated() ? response.getMovesCalculated() : 0) - .build(); + public static BalanceResponse + toBalanceResponse(RSGroupAdminProtos.BalanceRSGroupResponse response) { + return BalanceResponse.newBuilder().setBalancerRan(response.getBalanceRan()) + .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0) + .setMovesCalculated(response.hasMovesCalculated() ? response.getMovesCalculated() : 0) + .build(); } - public static RSGroupAdminProtos.BalanceRSGroupRequest createBalanceRSGroupRequest(String groupName, BalanceRequest request) { - return RSGroupAdminProtos.BalanceRSGroupRequest.newBuilder() - .setRSGroupName(groupName) - .setDryRun(request.isDryRun()) - .setIgnoreRit(request.isIgnoreRegionsInTransition()) - .build(); + public static RSGroupAdminProtos.BalanceRSGroupRequest + createBalanceRSGroupRequest(String groupName, BalanceRequest request) { + return RSGroupAdminProtos.BalanceRSGroupRequest.newBuilder().setRSGroupName(groupName) + .setDryRun(request.isDryRun()).setIgnoreRit(request.isIgnoreRegionsInTransition()).build(); } public static BalanceRequest toBalanceRequest(RSGroupAdminProtos.BalanceRSGroupRequest request) { - return BalanceRequest.newBuilder() - .setDryRun(request.hasDryRun() && request.getDryRun()) - .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()) - .build(); + return BalanceRequest.newBuilder().setDryRun(request.hasDryRun() && request.getDryRun()) + .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()).build(); } public static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) { RSGroupInfo rsGroupInfo = new RSGroupInfo(proto.getName()); Collection

    addresses = proto.getServersList().parallelStream() - .map(serverName -> Address.fromParts(serverName.getHostName(), serverName.getPort())) - .collect(Collectors.toList()); + .map(serverName -> Address.fromParts(serverName.getHostName(), serverName.getPort())) + .collect(Collectors.toList()); rsGroupInfo.addAllServers(addresses); Collection tables = proto.getTablesList().parallelStream() - .map(ProtobufUtil::toTableName).collect(Collectors.toList()); + .map(ProtobufUtil::toTableName).collect(Collectors.toList()); rsGroupInfo.addAllTables(tables); - proto.getConfigurationList().forEach(pair -> - rsGroupInfo.setConfiguration(pair.getName(), pair.getValue())); + proto.getConfigurationList() + .forEach(pair -> rsGroupInfo.setConfiguration(pair.getName(), pair.getValue())); return rsGroupInfo; } @@ -3676,8 +3531,8 @@ public static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) { hostports.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) .setPort(el.getPort()).build()); } - List configuration = pojo.getConfiguration().entrySet() - .stream().map(entry -> NameStringPair.newBuilder() + List configuration = pojo + .getConfiguration().entrySet().stream().map(entry -> NameStringPair.newBuilder() .setName(entry.getKey()).setValue(entry.getValue()).build()) .collect(Collectors.toList()); return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()).addAllServers(hostports) @@ -3685,20 +3540,20 @@ public static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) { } public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, - MutationProto mutation, CellScanner cellScanner) throws IOException { + MutationProto mutation, CellScanner cellScanner) throws IOException { byte[] row = condition.getRow().toByteArray(); CheckAndMutate.Builder builder = CheckAndMutate.newBuilder(row); Filter filter = condition.hasFilter() ? ProtobufUtil.toFilter(condition.getFilter()) : null; if (filter != null) { builder.ifMatches(filter); } else { - builder.ifMatches(condition.getFamily().toByteArray(), - condition.getQualifier().toByteArray(), + builder.ifMatches(condition.getFamily().toByteArray(), condition.getQualifier().toByteArray(), CompareOperator.valueOf(condition.getCompareType().name()), ProtobufUtil.toComparator(condition.getComparator()).getValue()); } - TimeRange timeRange = condition.hasTimeRange() ? - ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime(); + TimeRange timeRange = + condition.hasTimeRange() ? ProtobufUtil.toTimeRange(condition.getTimeRange()) + : TimeRange.allTime(); builder.timeRange(timeRange); try { @@ -3721,7 +3576,7 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, } public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, - List mutations) throws IOException { + List mutations) throws IOException { assert mutations.size() > 0; byte[] row = condition.getRow().toByteArray(); CheckAndMutate.Builder builder = CheckAndMutate.newBuilder(row); @@ -3729,13 +3584,13 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, if (filter != null) { builder.ifMatches(filter); } else { - builder.ifMatches(condition.getFamily().toByteArray(), - condition.getQualifier().toByteArray(), + builder.ifMatches(condition.getFamily().toByteArray(), condition.getQualifier().toByteArray(), CompareOperator.valueOf(condition.getCompareType().name()), ProtobufUtil.toComparator(condition.getComparator()).getValue()); } - TimeRange timeRange = condition.hasTimeRange() ? - ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime(); + TimeRange timeRange = + condition.hasTimeRange() ? ProtobufUtil.toTimeRange(condition.getTimeRange()) + : TimeRange.allTime(); builder.timeRange(timeRange); try { @@ -3750,8 +3605,8 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, } else if (m instanceof Append) { return builder.build((Append) m); } else { - throw new DoNotRetryIOException("Unsupported mutate type: " + m.getClass() - .getSimpleName().toUpperCase()); + throw new DoNotRetryIOException( + "Unsupported mutate type: " + m.getClass().getSimpleName().toUpperCase()); } } else { return builder.build(new RowMutations(mutations.get(0).getRow()).add(mutations)); @@ -3762,169 +3617,150 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, } public static ClientProtos.Condition toCondition(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, - final TimeRange timeRange) throws IOException { + final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, + final TimeRange timeRange) throws IOException { - ClientProtos.Condition.Builder builder = ClientProtos.Condition.newBuilder() - .setRow(UnsafeByteOperations.unsafeWrap(row)); + ClientProtos.Condition.Builder builder = + ClientProtos.Condition.newBuilder().setRow(UnsafeByteOperations.unsafeWrap(row)); if (filter != null) { builder.setFilter(ProtobufUtil.toFilter(filter)); } else { builder.setFamily(UnsafeByteOperations.unsafeWrap(family)) - .setQualifier(UnsafeByteOperations.unsafeWrap( - qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)) - .setComparator(ProtobufUtil.toComparator(new BinaryComparator(value))) - .setCompareType(HBaseProtos.CompareType.valueOf(op.name())); + .setQualifier(UnsafeByteOperations + .unsafeWrap(qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)) + .setComparator(ProtobufUtil.toComparator(new BinaryComparator(value))) + .setCompareType(HBaseProtos.CompareType.valueOf(op.name())); } return builder.setTimeRange(ProtobufUtil.toTimeRange(timeRange)).build(); } public static ClientProtos.Condition toCondition(final byte[] row, final Filter filter, - final TimeRange timeRange) throws IOException { + final TimeRange timeRange) throws IOException { return toCondition(row, null, null, null, null, filter, timeRange); } public static ClientProtos.Condition toCondition(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final byte[] value, - final TimeRange timeRange) throws IOException { + final byte[] qualifier, final CompareOperator op, final byte[] value, + final TimeRange timeRange) throws IOException { return toCondition(row, family, qualifier, op, value, null, timeRange); } - public static List toBalancerDecisionResponse( - HBaseProtos.LogEntry logEntry) { + public static List toBalancerDecisionResponse(HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("BalancerDecisionsResponse")) { MasterProtos.BalancerDecisionsResponse response = - (MasterProtos.BalancerDecisionsResponse) method - .invoke(null, logEntry.getLogMessage()); + (MasterProtos.BalancerDecisionsResponse) method.invoke(null, logEntry.getLogMessage()); return getBalancerDecisionEntries(response); } } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { + | InvocationTargetException e) { throw new RuntimeException("Error while retrieving response from server"); } throw new RuntimeException("Invalid response from server"); } - public static List toBalancerRejectionResponse( - HBaseProtos.LogEntry logEntry) { + public static List toBalancerRejectionResponse(HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("BalancerRejectionsResponse")) { MasterProtos.BalancerRejectionsResponse response = - (MasterProtos.BalancerRejectionsResponse) method - .invoke(null, logEntry.getLogMessage()); + (MasterProtos.BalancerRejectionsResponse) method.invoke(null, logEntry.getLogMessage()); return getBalancerRejectionEntries(response); } } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { + | InvocationTargetException e) { throw new RuntimeException("Error while retrieving response from server"); } throw new RuntimeException("Invalid response from server"); } - public static List getBalancerDecisionEntries( - MasterProtos.BalancerDecisionsResponse response) { + public static List + getBalancerDecisionEntries(MasterProtos.BalancerDecisionsResponse response) { List balancerDecisions = response.getBalancerDecisionList(); if (CollectionUtils.isEmpty(balancerDecisions)) { return Collections.emptyList(); } - return balancerDecisions.stream().map(balancerDecision -> new BalancerDecision.Builder() - .setInitTotalCost(balancerDecision.getInitTotalCost()) - .setInitialFunctionCosts(balancerDecision.getInitialFunctionCosts()) - .setComputedTotalCost(balancerDecision.getComputedTotalCost()) - .setFinalFunctionCosts(balancerDecision.getFinalFunctionCosts()) - .setComputedSteps(balancerDecision.getComputedSteps()) - .setRegionPlans(balancerDecision.getRegionPlansList()).build()) - .collect(Collectors.toList()); + return balancerDecisions.stream() + .map(balancerDecision -> new BalancerDecision.Builder() + .setInitTotalCost(balancerDecision.getInitTotalCost()) + .setInitialFunctionCosts(balancerDecision.getInitialFunctionCosts()) + .setComputedTotalCost(balancerDecision.getComputedTotalCost()) + .setFinalFunctionCosts(balancerDecision.getFinalFunctionCosts()) + .setComputedSteps(balancerDecision.getComputedSteps()) + .setRegionPlans(balancerDecision.getRegionPlansList()).build()) + .collect(Collectors.toList()); } - public static List getBalancerRejectionEntries( - MasterProtos.BalancerRejectionsResponse response) { + public static List + getBalancerRejectionEntries(MasterProtos.BalancerRejectionsResponse response) { List balancerRejections = response.getBalancerRejectionList(); if (CollectionUtils.isEmpty(balancerRejections)) { return Collections.emptyList(); } - return balancerRejections.stream().map(balancerRejection -> new BalancerRejection.Builder() - .setReason(balancerRejection.getReason()) - .setCostFuncInfoList(balancerRejection.getCostFuncInfoList()) - .build()) - .collect(Collectors.toList()); + return balancerRejections.stream() + .map(balancerRejection -> new BalancerRejection.Builder() + .setReason(balancerRejection.getReason()) + .setCostFuncInfoList(balancerRejection.getCostFuncInfoList()).build()) + .collect(Collectors.toList()); } public static HBaseProtos.LogRequest toBalancerDecisionRequest(int limit) { MasterProtos.BalancerDecisionsRequest balancerDecisionsRequest = - MasterProtos.BalancerDecisionsRequest.newBuilder().setLimit(limit).build(); + MasterProtos.BalancerDecisionsRequest.newBuilder().setLimit(limit).build(); return HBaseProtos.LogRequest.newBuilder() - .setLogClassName(balancerDecisionsRequest.getClass().getName()) - .setLogMessage(balancerDecisionsRequest.toByteString()) - .build(); + .setLogClassName(balancerDecisionsRequest.getClass().getName()) + .setLogMessage(balancerDecisionsRequest.toByteString()).build(); } public static HBaseProtos.LogRequest toBalancerRejectionRequest(int limit) { MasterProtos.BalancerRejectionsRequest balancerRejectionsRequest = - MasterProtos.BalancerRejectionsRequest.newBuilder().setLimit(limit).build(); + MasterProtos.BalancerRejectionsRequest.newBuilder().setLimit(limit).build(); return HBaseProtos.LogRequest.newBuilder() - .setLogClassName(balancerRejectionsRequest.getClass().getName()) - .setLogMessage(balancerRejectionsRequest.toByteString()) - .build(); + .setLogClassName(balancerRejectionsRequest.getClass().getName()) + .setLogMessage(balancerRejectionsRequest.toByteString()).build(); } public static MasterProtos.BalanceRequest toBalanceRequest(BalanceRequest request) { - return MasterProtos.BalanceRequest.newBuilder() - .setDryRun(request.isDryRun()) - .setIgnoreRit(request.isIgnoreRegionsInTransition()) - .build(); + return MasterProtos.BalanceRequest.newBuilder().setDryRun(request.isDryRun()) + .setIgnoreRit(request.isIgnoreRegionsInTransition()).build(); } public static BalanceRequest toBalanceRequest(MasterProtos.BalanceRequest request) { - return BalanceRequest.newBuilder() - .setDryRun(request.hasDryRun() && request.getDryRun()) - .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()) - .build(); + return BalanceRequest.newBuilder().setDryRun(request.hasDryRun() && request.getDryRun()) + .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()).build(); } public static MasterProtos.BalanceResponse toBalanceResponse(BalanceResponse response) { - return MasterProtos.BalanceResponse.newBuilder() - .setBalancerRan(response.isBalancerRan()) - .setMovesCalculated(response.getMovesCalculated()) - .setMovesExecuted(response.getMovesExecuted()) - .build(); + return MasterProtos.BalanceResponse.newBuilder().setBalancerRan(response.isBalancerRan()) + .setMovesCalculated(response.getMovesCalculated()) + .setMovesExecuted(response.getMovesExecuted()).build(); } public static BalanceResponse toBalanceResponse(MasterProtos.BalanceResponse response) { return BalanceResponse.newBuilder() - .setBalancerRan(response.hasBalancerRan() && response.getBalancerRan()) - .setMovesCalculated(response.hasMovesCalculated() ? response.getMovesExecuted() : 0) - .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0) - .build(); + .setBalancerRan(response.hasBalancerRan() && response.getBalancerRan()) + .setMovesCalculated(response.hasMovesCalculated() ? response.getMovesExecuted() : 0) + .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0).build(); } public static ServerTask getServerTask(ClusterStatusProtos.ServerTask task) { - return ServerTaskBuilder.newBuilder() - .setDescription(task.getDescription()) - .setStatus(task.getStatus()) - .setState(ServerTask.State.valueOf(task.getState().name())) - .setStartTime(task.getStartTime()) - .setCompletionTime(task.getCompletionTime()) - .build(); + return ServerTaskBuilder.newBuilder().setDescription(task.getDescription()) + .setStatus(task.getStatus()).setState(ServerTask.State.valueOf(task.getState().name())) + .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTime()).build(); } public static ClusterStatusProtos.ServerTask toServerTask(ServerTask task) { - return ClusterStatusProtos.ServerTask.newBuilder() - .setDescription(task.getDescription()) - .setStatus(task.getStatus()) - .setState(ClusterStatusProtos.ServerTask.State.valueOf(task.getState().name())) - .setStartTime(task.getStartTime()) - .setCompletionTime(task.getCompletionTime()) - .build(); + return ClusterStatusProtos.ServerTask.newBuilder().setDescription(task.getDescription()) + .setStatus(task.getStatus()) + .setState(ClusterStatusProtos.ServerTask.State.valueOf(task.getState().name())) + .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTime()).build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 3008956d7517..69640bbb0d8b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -167,8 +167,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; /** - * Helper utility to build protocol buffer requests, - * or build components for protocol buffer requests. + * Helper utility to build protocol buffer requests, or build components for protocol buffer + * requests. */ @InterfaceAudience.Private public final class RequestConverter { @@ -176,20 +176,18 @@ public final class RequestConverter { private RequestConverter() { } -// Start utilities for Client + // Start utilities for Client /** * Create a protocol buffer GetRequest for a client Get - * * @param regionName the name of the region to get * @param get the client Get * @return a protocol buffer GetRequest */ - public static GetRequest buildGetRequest(final byte[] regionName, - final Get get) throws IOException { + public static GetRequest buildGetRequest(final byte[] regionName, final Get get) + throws IOException { GetRequest.Builder builder = GetRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); builder.setGet(ProtobufUtil.toGet(get)); return builder.build(); @@ -197,61 +195,60 @@ public static GetRequest buildGetRequest(final byte[] regionName, /** * Create a protocol buffer MutateRequest for a conditioned put/delete/increment/append - * * @return a mutate request * @throws IOException */ public static MutateRequest buildMutateRequest(final byte[] regionName, final byte[] row, - final byte[] family, final byte[] qualifier, final CompareOperator op, final byte[] value, - final Filter filter, final TimeRange timeRange, final Mutation mutation, long nonceGroup, - long nonce) throws IOException { + final byte[] family, final byte[] qualifier, final CompareOperator op, final byte[] value, + final Filter filter, final TimeRange timeRange, final Mutation mutation, long nonceGroup, + long nonce) throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); if (mutation instanceof Increment || mutation instanceof Append) { builder.setMutation(ProtobufUtil.toMutation(getMutationType(mutation), mutation, nonce)) - .setNonceGroup(nonceGroup); + .setNonceGroup(nonceGroup); } else { builder.setMutation(ProtobufUtil.toMutation(getMutationType(mutation), mutation)); } return builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)) - .setCondition(ProtobufUtil.toCondition(row, family, qualifier, op, value, filter, timeRange)) - .build(); + .setCondition( + ProtobufUtil.toCondition(row, family, qualifier, op, value, filter, timeRange)) + .build(); } /** * Create a protocol buffer MultiRequest for conditioned row mutations - * * @return a multi request * @throws IOException */ public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, - final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator op, final byte[] value, final Filter filter, final TimeRange timeRange, - final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException { - return buildMultiRequest(regionName, rowMutations, ProtobufUtil.toCondition(row, family, - qualifier, op, value, filter, timeRange), nonceGroup, nonce); + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, + final byte[] value, final Filter filter, final TimeRange timeRange, + final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException { + return buildMultiRequest(regionName, rowMutations, + ProtobufUtil.toCondition(row, family, qualifier, op, value, filter, timeRange), nonceGroup, + nonce); } /** * Create a protocol buffer MultiRequest for row mutations - * * @return a multi request */ public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, - final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException { + final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException { return buildMultiRequest(regionName, rowMutations, null, nonceGroup, nonce); } private static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, - final RowMutations rowMutations, final Condition condition, long nonceGroup, long nonce) - throws IOException { + final RowMutations rowMutations, final Condition condition, long nonceGroup, long nonce) + throws IOException { RegionAction.Builder builder = - getRegionActionBuilderWithRegion(RegionAction.newBuilder(), regionName); + getRegionActionBuilderWithRegion(RegionAction.newBuilder(), regionName); builder.setAtomic(true); boolean hasNonce = false; ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder(); MutationProto.Builder mutationBuilder = MutationProto.newBuilder(); - for (Mutation mutation: rowMutations.getMutations()) { + for (Mutation mutation : rowMutations.getMutations()) { mutationBuilder.clear(); MutationProto mp; if (mutation instanceof Increment || mutation instanceof Append) { @@ -279,17 +276,15 @@ private static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionNa /** * Create a protocol buffer MutateRequest for a put - * * @param regionName * @param put * @return a mutate request * @throws IOException */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Put put) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Put put) + throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); builder.setMutation(ProtobufUtil.toMutation(MutationType.PUT, put, MutationProto.newBuilder())); return builder.build(); @@ -297,68 +292,62 @@ public static MutateRequest buildMutateRequest( /** * Create a protocol buffer MutateRequest for an append - * * @param regionName * @param append * @return a mutate request * @throws IOException */ - public static MutateRequest buildMutateRequest(final byte[] regionName, - final Append append, long nonceGroup, long nonce) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Append append, + long nonceGroup, long nonce) throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (nonce != HConstants.NO_NONCE && nonceGroup != HConstants.NO_NONCE) { builder.setNonceGroup(nonceGroup); } - builder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, append, - MutationProto.newBuilder(), nonce)); + builder.setMutation( + ProtobufUtil.toMutation(MutationType.APPEND, append, MutationProto.newBuilder(), nonce)); return builder.build(); } /** * Create a protocol buffer MutateRequest for a client increment - * * @param regionName * @param increment * @return a mutate request */ - public static MutateRequest buildMutateRequest(final byte[] regionName, - final Increment increment, final long nonceGroup, final long nonce) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Increment increment, + final long nonceGroup, final long nonce) throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (nonce != HConstants.NO_NONCE && nonceGroup != HConstants.NO_NONCE) { builder.setNonceGroup(nonceGroup); } builder.setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, increment, - MutationProto.newBuilder(), nonce)); + MutationProto.newBuilder(), nonce)); return builder.build(); } /** * Create a protocol buffer MutateRequest for a delete - * * @param regionName * @param delete * @return a mutate request * @throws IOException */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Delete delete) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Delete delete) + throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); - builder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, - MutationProto.newBuilder())); + builder.setMutation( + ProtobufUtil.toMutation(MutationType.DELETE, delete, MutationProto.newBuilder())); return builder.build(); } public static RegionAction.Builder getRegionActionBuilderWithRegion( - final RegionAction.Builder regionActionBuilder, final byte [] regionName) { + final RegionAction.Builder regionActionBuilder, final byte[] regionName) { RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); regionActionBuilder.setRegion(region); return regionActionBuilder; @@ -366,7 +355,6 @@ public static RegionAction.Builder getRegionActionBuilderWithRegion( /** * Create a protocol buffer ScanRequest for a client Scan - * * @param regionName * @param scan * @param numberOfRows @@ -437,7 +425,6 @@ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boo /** * Create a protocol buffer bulk load request - * * @param familyPaths * @param regionName * @param assignSeqNum @@ -448,38 +435,34 @@ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boo */ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( final List> familyPaths, final byte[] regionName, boolean assignSeqNum, - final Token userToken, final String bulkToken, boolean copyFiles, - List clusterIds, boolean replicate) { - RegionSpecifier region = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + final Token userToken, final String bulkToken, boolean copyFiles, List clusterIds, + boolean replicate) { + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); ClientProtos.DelegationToken protoDT = null; if (userToken != null) { - protoDT = - ClientProtos.DelegationToken.newBuilder() - .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) - .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) - .setKind(userToken.getKind().toString()) - .setService(userToken.getService().toString()).build(); + protoDT = ClientProtos.DelegationToken.newBuilder() + .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) + .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) + .setKind(userToken.getKind().toString()).setService(userToken.getService().toString()) + .build(); } - List protoFamilyPaths = new ArrayList<>(familyPaths.size()); + List protoFamilyPaths = + new ArrayList<>(familyPaths.size()); if (!familyPaths.isEmpty()) { - ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder pathBuilder - = ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder(); - for(Pair el: familyPaths) { - protoFamilyPaths.add(pathBuilder - .setFamily(UnsafeByteOperations.unsafeWrap(el.getFirst())) - .setPath(el.getSecond()).build()); + ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder pathBuilder = + ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder(); + for (Pair el : familyPaths) { + protoFamilyPaths.add(pathBuilder.setFamily(UnsafeByteOperations.unsafeWrap(el.getFirst())) + .setPath(el.getSecond()).build()); } pathBuilder.clear(); } - BulkLoadHFileRequest.Builder request = - ClientProtos.BulkLoadHFileRequest.newBuilder() - .setRegion(region) - .setAssignSeqNum(assignSeqNum) - .addAllFamilyPath(protoFamilyPaths); + BulkLoadHFileRequest.Builder request = ClientProtos.BulkLoadHFileRequest.newBuilder() + .setRegion(region).setAssignSeqNum(assignSeqNum).addAllFamilyPath(protoFamilyPaths); if (userToken != null) { request.setFsToken(protoDT); } @@ -496,13 +479,15 @@ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( /** * Create a protocol buffer multirequest with NO data for a list of actions (data is carried - * otherwise than via protobuf). This means it just notes attributes, whether to write the - * WAL, etc., and the presence in protobuf serves as place holder for the data which is - * coming along otherwise. Note that Get is different. It does not contain 'data' and is always - * carried by protobuf. We return references to the data by adding them to the passed in - * data param. - *

    Propagates Actions original index. - *

    The passed in multiRequestBuilder will be populated with region actions. + * otherwise than via protobuf). This means it just notes attributes, whether to write the WAL, + * etc., and the presence in protobuf serves as place holder for the data which is coming along + * otherwise. Note that Get is different. It does not contain 'data' and is always carried by + * protobuf. We return references to the data by adding them to the passed in data + * param. + *

    + * Propagates Actions original index. + *

    + * The passed in multiRequestBuilder will be populated with region actions. * @param regionName The region name of the actions. * @param actions The actions that are grouped by the same region name. * @param cells Place to stuff references to actual data. @@ -512,31 +497,30 @@ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( * @param mutationBuilder mutationBuilder to be used to build mutation. * @param nonceGroup nonceGroup to be applied. * @param indexMap Map of created RegionAction to the original index for a - * RowMutations/CheckAndMutate within the original list of actions + * RowMutations/CheckAndMutate within the original list of actions * @throws IOException */ public static void buildNoDataRegionActions(final byte[] regionName, final Iterable actions, final List cells, final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder, long nonceGroup, final Map indexMap) throws IOException { regionActionBuilder.clear(); - RegionAction.Builder builder = getRegionActionBuilderWithRegion( - regionActionBuilder, regionName); + RegionAction.Builder builder = + getRegionActionBuilderWithRegion(regionActionBuilder, regionName); ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null; boolean hasNonce = false; List rowMutationsList = new ArrayList<>(); List checkAndMutates = new ArrayList<>(); - for (Action action: actions) { + for (Action action : actions) { Row row = action.getAction(); actionBuilder.clear(); actionBuilder.setIndex(action.getOriginalIndex()); mutationBuilder.clear(); if (row instanceof Get) { - Get g = (Get)row; + Get g = (Get) row; builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g))); } else if (row instanceof Put) { buildNoDataRegionAction((Put) row, cells, builder, actionBuilder, mutationBuilder); @@ -554,18 +538,17 @@ public static void buildNoDataRegionActions(final byte[] regionName, RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row; // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString. org.apache.hbase.thirdparty.com.google.protobuf.ByteString value = - org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.unsafeWrap( - exec.getRequest().toByteArray()); + org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations + .unsafeWrap(exec.getRequest().toByteArray()); if (cpBuilder == null) { cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder(); } else { cpBuilder.clear(); } - builder.addAction(actionBuilder.setServiceCall( - cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) - .setServiceName(exec.getMethod().getService().getFullName()) - .setMethodName(exec.getMethod().getName()) - .setRequest(value))); + builder.addAction(actionBuilder + .setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) + .setServiceName(exec.getMethod().getService().getFullName()) + .setMethodName(exec.getMethod().getName()).setRequest(value))); } else if (row instanceof RowMutations) { rowMutationsList.add(action); } else if (row instanceof CheckAndMutate) { @@ -609,9 +592,9 @@ public static void buildNoDataRegionActions(final byte[] regionName, getRegionActionBuilderWithRegion(builder, regionName); CheckAndMutate cam = (CheckAndMutate) action.getAction(); - builder.setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), - cam.getQualifier(), cam.getCompareOp(), cam.getValue(), cam.getFilter(), - cam.getTimeRange())); + builder + .setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), cam.getQualifier(), + cam.getCompareOp(), cam.getValue(), cam.getFilter(), cam.getTimeRange())); if (cam.getAction() instanceof Put) { actionBuilder.clear(); @@ -643,8 +626,8 @@ public static void buildNoDataRegionActions(final byte[] regionName, } builder.setAtomic(true); } else { - throw new DoNotRetryIOException("CheckAndMutate doesn't support " + - cam.getAction().getClass().getName()); + throw new DoNotRetryIOException( + "CheckAndMutate doesn't support " + cam.getAction().getClass().getName()); } multiRequestBuilder.addRegionAction(builder.build()); @@ -660,61 +643,61 @@ public static void buildNoDataRegionActions(final byte[] regionName, } private static void buildNoDataRegionAction(final Put put, final List cells, - final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { cells.add(put); - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, put, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, put, mutationBuilder))); } - private static void buildNoDataRegionAction(final Delete delete, - final List cells, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) - throws IOException { + private static void buildNoDataRegionAction(final Delete delete, final List cells, + final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { int size = delete.size(); // Note that a legitimate Delete may have a size of zero; i.e. a Delete that has nothing - // in it but the row to delete. In this case, the current implementation does not make + // in it but the row to delete. In this case, the current implementation does not make // a KeyValue to represent a delete-of-all-the-row until we serialize... For such cases // where the size returned is zero, we will send the Delete fully pb'd rather than have // metadata only in the pb and then send the kv along the side in cells. if (size > 0) { cells.add(delete); - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutationNoData(MutationType.DELETE, delete, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutationNoData(MutationType.DELETE, delete, mutationBuilder))); } else { - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, mutationBuilder))); } } private static void buildNoDataRegionAction(final Increment increment, - final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + final List cells, long nonce, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { cells.add(increment); - regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData( - MutationType.INCREMENT, increment, mutationBuilder, nonce))); + regionActionBuilder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutationNoData(MutationType.INCREMENT, increment, mutationBuilder, nonce))); } - private static void buildNoDataRegionAction(final Append append, - final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + private static void buildNoDataRegionAction(final Append append, final List cells, + long nonce, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { cells.add(append); - regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData( - MutationType.APPEND, append, mutationBuilder, nonce))); + regionActionBuilder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutationNoData(MutationType.APPEND, append, mutationBuilder, nonce))); } /** * @return whether or not the rowMutations has a Increment or Append */ private static boolean buildNoDataRegionAction(final RowMutations rowMutations, - final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) - throws IOException { + final List cells, long nonce, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { boolean ret = false; - for (Mutation mutation: rowMutations.getMutations()) { + for (Mutation mutation : rowMutations.getMutations()) { mutationBuilder.clear(); MutationProto mp; if (mutation instanceof Increment || mutation instanceof Append) { @@ -743,45 +726,39 @@ private static MutationType getMutationType(Mutation mutation) { } } -// End utilities for Client -//Start utilities for Admin + // End utilities for Client + // Start utilities for Admin /** * Create a protocol buffer GetRegionInfoRequest for a given region name - * * @param regionName the name of the region to get info * @return a protocol buffer GetRegionInfoRequest */ - public static GetRegionInfoRequest - buildGetRegionInfoRequest(final byte[] regionName) { + public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName) { return buildGetRegionInfoRequest(regionName, false); } /** * Create a protocol buffer GetRegionInfoRequest for a given region name - * * @param regionName the name of the region to get info * @param includeCompactionState indicate if the compaction state is requested * @return a protocol buffer GetRegionInfoRequest */ - public static GetRegionInfoRequest - buildGetRegionInfoRequest(final byte[] regionName, - final boolean includeCompactionState) { + public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName, + final boolean includeCompactionState) { return buildGetRegionInfoRequest(regionName, includeCompactionState, false); } /** - * - * @param regionName the name of the region to get info - * @param includeCompactionState indicate if the compaction state is requested - * @param includeBestSplitRow indicate if the bestSplitRow is requested + * @param regionName the name of the region to get info + * @param includeCompactionState indicate if the compaction state is requested + * @param includeBestSplitRow indicate if the bestSplitRow is requested * @return protocol buffer GetRegionInfoRequest */ public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName, final boolean includeCompactionState, boolean includeBestSplitRow) { GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (includeCompactionState) { builder.setCompactionState(includeCompactionState); @@ -829,7 +806,7 @@ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName * @return a protocol buffer FlushRegionRequest */ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName, - byte[] columnFamily, boolean writeFlushWALMarker) { + byte[] columnFamily, boolean writeFlushWALMarker) { FlushRegionRequest.Builder builder = FlushRegionRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); @@ -847,8 +824,8 @@ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName * @param favoredNodes a list of favored nodes * @return a protocol buffer OpenRegionRequest */ - public static OpenRegionRequest buildOpenRegionRequest(ServerName server, - final RegionInfo region, List favoredNodes) { + public static OpenRegionRequest buildOpenRegionRequest(ServerName server, final RegionInfo region, + List favoredNodes) { OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); builder.addOpenInfo(buildRegionOpenInfo(region, favoredNodes, -1L)); if (server != null) { @@ -912,8 +889,8 @@ public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, /** * @see #buildRollWALWriterRequest() */ - private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = RollWALWriterRequest.newBuilder() - .build(); + private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = + RollWALWriterRequest.newBuilder().build(); /** * Create a new RollWALWriterRequest @@ -926,8 +903,8 @@ public static RollWALWriterRequest buildRollWALWriterRequest() { /** * @see #buildGetServerInfoRequest() */ - private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = GetServerInfoRequest.newBuilder() - .build(); + private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = + GetServerInfoRequest.newBuilder().build(); /** * Create a new GetServerInfoRequest @@ -948,17 +925,16 @@ public static StopServerRequest buildStopServerRequest(final String reason) { return builder.build(); } -//End utilities for Admin + // End utilities for Admin /** * Convert a byte array to a protocol buffer RegionSpecifier - * * @param type the region specifier type * @param value the region specifier byte array value * @return a protocol buffer RegionSpecifier */ - public static RegionSpecifier buildRegionSpecifier( - final RegionSpecifierType type, final byte[] value) { + public static RegionSpecifier buildRegionSpecifier(final RegionSpecifierType type, + final byte[] value) { RegionSpecifier.Builder regionBuilder = RegionSpecifier.newBuilder(); regionBuilder.setValue(UnsafeByteOperations.unsafeWrap(value)); regionBuilder.setType(type); @@ -967,16 +943,12 @@ public static RegionSpecifier buildRegionSpecifier( /** * Create a protocol buffer AddColumnRequest - * * @param tableName * @param column * @return an AddColumnRequest */ - public static AddColumnRequest buildAddColumnRequest( - final TableName tableName, - final ColumnFamilyDescriptor column, - final long nonceGroup, - final long nonce) { + public static AddColumnRequest buildAddColumnRequest(final TableName tableName, + final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) { AddColumnRequest.Builder builder = AddColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column)); @@ -987,16 +959,12 @@ public static AddColumnRequest buildAddColumnRequest( /** * Create a protocol buffer DeleteColumnRequest - * * @param tableName * @param columnName * @return a DeleteColumnRequest */ - public static DeleteColumnRequest buildDeleteColumnRequest( - final TableName tableName, - final byte [] columnName, - final long nonceGroup, - final long nonce) { + public static DeleteColumnRequest buildDeleteColumnRequest(final TableName tableName, + final byte[] columnName, final long nonceGroup, final long nonce) { DeleteColumnRequest.Builder builder = DeleteColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setColumnName(UnsafeByteOperations.unsafeWrap(columnName)); @@ -1007,16 +975,12 @@ public static DeleteColumnRequest buildDeleteColumnRequest( /** * Create a protocol buffer ModifyColumnRequest - * * @param tableName * @param column * @return an ModifyColumnRequest */ - public static ModifyColumnRequest buildModifyColumnRequest( - final TableName tableName, - final ColumnFamilyDescriptor column, - final long nonceGroup, - final long nonce) { + public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName, + final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) { ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column)); @@ -1025,11 +989,11 @@ public static ModifyColumnRequest buildModifyColumnRequest( return builder.build(); } - public static ModifyColumnStoreFileTrackerRequest - buildModifyColumnStoreFileTrackerRequest(final TableName tableName, final byte[] family, - final String dstSFT, final long nonceGroup, final long nonce) { + public static ModifyColumnStoreFileTrackerRequest buildModifyColumnStoreFileTrackerRequest( + final TableName tableName, final byte[] family, final String dstSFT, final long nonceGroup, + final long nonce) { ModifyColumnStoreFileTrackerRequest.Builder builder = - ModifyColumnStoreFileTrackerRequest.newBuilder(); + ModifyColumnStoreFileTrackerRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setFamily(ByteString.copyFrom(family)); builder.setDstSft(dstSFT); @@ -1047,8 +1011,8 @@ public static ModifyColumnRequest buildModifyColumnRequest( public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName, ServerName destServerName) { MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, - encodedRegionName)); + builder.setRegion( + buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, encodedRegionName)); if (destServerName != null) { builder.setDestServerName(ProtobufUtil.toServerName(destServerName)); } @@ -1056,14 +1020,12 @@ public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName, } public static MergeTableRegionsRequest buildMergeTableRegionsRequest( - final byte[][] encodedNameOfdaughaterRegions, - final boolean forcible, - final long nonceGroup, + final byte[][] encodedNameOfdaughaterRegions, final boolean forcible, final long nonceGroup, final long nonce) throws DeserializationException { MergeTableRegionsRequest.Builder builder = MergeTableRegionsRequest.newBuilder(); - for (int i = 0; i< encodedNameOfdaughaterRegions.length; i++) { - builder.addRegion(buildRegionSpecifier( - RegionSpecifierType.ENCODED_REGION_NAME, encodedNameOfdaughaterRegions[i])); + for (int i = 0; i < encodedNameOfdaughaterRegions.length; i++) { + builder.addRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, + encodedNameOfdaughaterRegions[i])); } builder.setForcible(forcible); builder.setNonceGroup(nonceGroup); @@ -1086,51 +1048,44 @@ public static SplitTableRegionRequest buildSplitTableRegionRequest(final RegionI /** * Create a protocol buffer AssignRegionRequest - * * @param regionName * @return an AssignRegionRequest */ - public static AssignRegionRequest buildAssignRegionRequest(final byte [] regionName) { + public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) { AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** * Creates a protocol buffer UnassignRegionRequest - * * @param regionName * @return an UnassignRegionRequest */ - public static UnassignRegionRequest buildUnassignRegionRequest( - final byte [] regionName) { + public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regionName) { UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** * Creates a protocol buffer OfflineRegionRequest - * * @param regionName * @return an OfflineRegionRequest */ - public static OfflineRegionRequest buildOfflineRegionRequest(final byte [] regionName) { + public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] regionName) { OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** * Creates a protocol buffer DeleteTableRequest - * * @param tableName * @return a DeleteTableRequest */ - public static DeleteTableRequest buildDeleteTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); @@ -1140,16 +1095,12 @@ public static DeleteTableRequest buildDeleteTableRequest( /** * Creates a protocol buffer TruncateTableRequest - * * @param tableName name of table to truncate * @param preserveSplits True if the splits should be preserved * @return a TruncateTableRequest */ - public static TruncateTableRequest buildTruncateTableRequest( - final TableName tableName, - final boolean preserveSplits, - final long nonceGroup, - final long nonce) { + public static TruncateTableRequest buildTruncateTableRequest(final TableName tableName, + final boolean preserveSplits, final long nonceGroup, final long nonce) { TruncateTableRequest.Builder builder = TruncateTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setPreserveSplits(preserveSplits); @@ -1160,14 +1111,11 @@ public static TruncateTableRequest buildTruncateTableRequest( /** * Creates a protocol buffer EnableTableRequest - * * @param tableName * @return an EnableTableRequest */ - public static EnableTableRequest buildEnableTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static EnableTableRequest buildEnableTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { EnableTableRequest.Builder builder = EnableTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); @@ -1177,14 +1125,11 @@ public static EnableTableRequest buildEnableTableRequest( /** * Creates a protocol buffer DisableTableRequest - * * @param tableName * @return a DisableTableRequest */ - public static DisableTableRequest buildDisableTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static DisableTableRequest buildDisableTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { DisableTableRequest.Builder builder = DisableTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setNonceGroup(nonceGroup); @@ -1194,20 +1139,16 @@ public static DisableTableRequest buildDisableTableRequest( /** * Creates a protocol buffer CreateTableRequest - * * @param tableDescriptor * @param splitKeys * @return a CreateTableRequest */ - public static CreateTableRequest buildCreateTableRequest( - final TableDescriptor tableDescriptor, - final byte [][] splitKeys, - final long nonceGroup, - final long nonce) { + public static CreateTableRequest buildCreateTableRequest(final TableDescriptor tableDescriptor, + final byte[][] splitKeys, final long nonceGroup, final long nonce) { CreateTableRequest.Builder builder = CreateTableRequest.newBuilder(); builder.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor)); if (splitKeys != null) { - for(byte[] key : splitKeys) { + for (byte[] key : splitKeys) { builder.addSplitKeys(UnsafeByteOperations.unsafeWrap(key)); } } @@ -1218,16 +1159,12 @@ public static CreateTableRequest buildCreateTableRequest( /** * Creates a protocol buffer ModifyTableRequest - * * @param tableName * @param tableDesc * @return a ModifyTableRequest */ - public static ModifyTableRequest buildModifyTableRequest( - final TableName tableName, - final TableDescriptor tableDesc, - final long nonceGroup, - final long nonce) { + public static ModifyTableRequest buildModifyTableRequest(final TableName tableName, + final TableDescriptor tableDesc, final long nonceGroup, final long nonce) { ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setTableSchema(ProtobufUtil.toTableSchema(tableDesc)); @@ -1237,9 +1174,9 @@ public static ModifyTableRequest buildModifyTableRequest( } public static ModifyTableStoreFileTrackerRequest buildModifyTableStoreFileTrackerRequest( - final TableName tableName, final String dstSFT, final long nonceGroup, final long nonce) { + final TableName tableName, final String dstSFT, final long nonceGroup, final long nonce) { ModifyTableStoreFileTrackerRequest.Builder builder = - ModifyTableStoreFileTrackerRequest.newBuilder(); + ModifyTableStoreFileTrackerRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setDstSft(dstSFT); builder.setNonceGroup(nonceGroup); @@ -1249,12 +1186,11 @@ public static ModifyTableStoreFileTrackerRequest buildModifyTableStoreFileTracke /** * Creates a protocol buffer GetTableDescriptorsRequest - * * @param tableNames * @return a GetTableDescriptorsRequest */ - public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( - final List tableNames) { + public static GetTableDescriptorsRequest + buildGetTableDescriptorsRequest(final List tableNames) { GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder(); if (tableNames != null) { for (TableName tableName : tableNames) { @@ -1266,7 +1202,6 @@ public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( /** * Creates a protocol buffer GetTableDescriptorsRequest - * * @param pattern The compiled regular expression to match against * @param includeSysTables False to match only against userspace tables * @return a GetTableDescriptorsRequest @@ -1283,7 +1218,6 @@ public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest(final P /** * Creates a protocol buffer GetTableNamesRequest - * * @param pattern The compiled regular expression to match against * @param includeSysTables False to match only against userspace tables * @return a GetTableNamesRequest @@ -1314,7 +1248,7 @@ public static SetTableStateInMetaRequest buildSetTableStateInMetaRequest(final T * @return a SetRegionStateInMetaRequest */ public static SetRegionStateInMetaRequest - buildSetRegionStateInMetaRequest(Map nameOrEncodedName2State) { + buildSetRegionStateInMetaRequest(Map nameOrEncodedName2State) { SetRegionStateInMetaRequest.Builder builder = SetRegionStateInMetaRequest.newBuilder(); nameOrEncodedName2State.forEach((name, state) -> { byte[] bytes = Bytes.toBytes(name); @@ -1325,27 +1259,24 @@ public static SetTableStateInMetaRequest buildSetTableStateInMetaRequest(final T spec = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, bytes); } builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec) - .setState(state.convert()).build()); + .setState(state.convert()).build()); }); return builder.build(); } /** * Creates a protocol buffer GetTableDescriptorsRequest for a single table - * * @param tableName the table name * @return a GetTableDescriptorsRequest */ - public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( - final TableName tableName) { + public static GetTableDescriptorsRequest + buildGetTableDescriptorsRequest(final TableName tableName) { return GetTableDescriptorsRequest.newBuilder() - .addTableNames(ProtobufUtil.toProtoTableName(tableName)) - .build(); + .addTableNames(ProtobufUtil.toProtoTableName(tableName)).build(); } /** * Creates a protocol buffer IsMasterRunningRequest - * * @return a IsMasterRunningRequest */ public static IsMasterRunningRequest buildIsMasterRunningRequest() { @@ -1354,20 +1285,17 @@ public static IsMasterRunningRequest buildIsMasterRunningRequest() { /** * Creates a protocol buffer SetBalancerRunningRequest - * * @param on * @param synchronous * @return a SetBalancerRunningRequest */ - public static SetBalancerRunningRequest buildSetBalancerRunningRequest( - boolean on, + public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on, boolean synchronous) { return SetBalancerRunningRequest.newBuilder().setOn(on).setSynchronous(synchronous).build(); } /** * Creates a protocol buffer IsBalancerEnabledRequest - * * @return a IsBalancerEnabledRequest */ public static IsBalancerEnabledRequest buildIsBalancerEnabledRequest() { @@ -1376,35 +1304,30 @@ public static IsBalancerEnabledRequest buildIsBalancerEnabledRequest() { /** * Creates a protocol buffer ClearRegionBlockCacheRequest - * * @return a ClearRegionBlockCacheRequest */ public static ClearRegionBlockCacheRequest buildClearRegionBlockCacheRequest(List hris) { ClearRegionBlockCacheRequest.Builder builder = ClearRegionBlockCacheRequest.newBuilder(); - hris.forEach( - hri -> builder.addRegion( - buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hri.getRegionName()) - )); + hris.forEach(hri -> builder + .addRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hri.getRegionName()))); return builder.build(); } /** * Creates a protocol buffer GetClusterStatusRequest - * * @return A GetClusterStatusRequest */ public static GetClusterStatusRequest buildGetClusterStatusRequest(EnumSet

    servers, String targetGroup) { Set hostPorts = Sets.newHashSet(); for (Address el : servers) { - hostPorts.add( - HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()).setPort(el.getPort()) - .build()); + hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) + .setPort(el.getPort()).build()); } return MoveServersRequest.newBuilder().setTargetGroup(targetGroup).addAllServers(hostPorts) - .build(); + .build(); } public static RemoveServersRequest buildRemoveServersRequest(Set
    servers) { Set hostPorts = Sets.newHashSet(); - for(Address el: servers) { - hostPorts.add(HBaseProtos.ServerName.newBuilder() - .setHostName(el.getHostname()) - .setPort(el.getPort()) - .build()); + for (Address el : servers) { + hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) + .setPort(el.getPort()).build()); } - return RemoveServersRequest.newBuilder() - .addAllServers(hostPorts) - .build(); + return RemoveServersRequest.newBuilder().addAllServers(hostPorts).build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java index d62f0ac74e22..bb6eb13e089c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,8 +64,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; /** - * Helper utility to build protocol buffer responses, - * or retrieve data from protocol buffer responses. + * Helper utility to build protocol buffer responses, or retrieve data from protocol buffer + * responses. */ @InterfaceAudience.Private public final class ResponseConverter { @@ -76,9 +76,7 @@ private ResponseConverter() { // Start utilities for Client public static SingleResponse getResult(final ClientProtos.MutateRequest request, - final ClientProtos.MutateResponse response, - final CellScanner cells) - throws IOException { + final ClientProtos.MutateResponse response, final CellScanner cells) throws IOException { SingleResponse singleResponse = new SingleResponse(); SingleResponse.Entry entry = new SingleResponse.Entry(); entry.setResult(ProtobufUtil.toResult(response.getResult(), cells)); @@ -89,63 +87,60 @@ public static SingleResponse getResult(final ClientProtos.MutateRequest request, /** * Get the results from a protocol buffer MultiResponse - * * @param request the original protocol buffer MultiRequest * @param response the protocol buffer MultiResponse to convert - * @param cells Cells to go with the passed in proto. Can be null. + * @param cells Cells to go with the passed in proto. Can be null. * @return the results that were in the MultiResponse (a Result or an Exception). * @throws IOException */ public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request, - final MultiResponse response, final CellScanner cells) - throws IOException { + final MultiResponse response, final CellScanner cells) throws IOException { return getResults(request, null, response, cells); } /** * Get the results from a protocol buffer MultiResponse - * * @param request the original protocol buffer MultiRequest * @param indexMap Used to support RowMutations/CheckAndMutate in batch * @param response the protocol buffer MultiResponse to convert - * @param cells Cells to go with the passed in proto. Can be null. + * @param cells Cells to go with the passed in proto. Can be null. * @return the results that were in the MultiResponse (a Result or an Exception). * @throws IOException */ public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request, - final Map indexMap, final MultiResponse response, - final CellScanner cells) throws IOException { + final Map indexMap, final MultiResponse response, final CellScanner cells) + throws IOException { int requestRegionActionCount = request.getRegionActionCount(); int responseRegionActionResultCount = response.getRegionActionResultCount(); if (requestRegionActionCount != responseRegionActionResultCount) { - throw new IllegalStateException("Request mutation count=" + requestRegionActionCount + - " does not match response mutation result count=" + responseRegionActionResultCount); + throw new IllegalStateException("Request mutation count=" + requestRegionActionCount + + " does not match response mutation result count=" + responseRegionActionResultCount); } org.apache.hadoop.hbase.client.MultiResponse results = - new org.apache.hadoop.hbase.client.MultiResponse(); + new org.apache.hadoop.hbase.client.MultiResponse(); for (int i = 0; i < responseRegionActionResultCount; i++) { RegionAction actions = request.getRegionAction(i); RegionActionResult actionResult = response.getRegionActionResult(i); HBaseProtos.RegionSpecifier rs = actions.getRegion(); - if (rs.hasType() && - (rs.getType() != HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)){ + if (rs.hasType() + && (rs.getType() != HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)) { throw new IllegalArgumentException( "We support only encoded types for protobuf multi response."); } byte[] regionName = rs.getValue().toByteArray(); if (actionResult.hasException()) { - Throwable regionException = ProtobufUtil.toException(actionResult.getException()); + Throwable regionException = ProtobufUtil.toException(actionResult.getException()); results.addException(regionName, regionException); continue; } if (actions.getActionCount() != actionResult.getResultOrExceptionCount()) { - throw new IllegalStateException("actions.getActionCount=" + actions.getActionCount() + - ", actionResult.getResultOrExceptionCount=" + - actionResult.getResultOrExceptionCount() + " for region " + actions.getRegion()); + throw new IllegalStateException("actions.getActionCount=" + actions.getActionCount() + + ", actionResult.getResultOrExceptionCount=" + actionResult.getResultOrExceptionCount() + + " for region " + actions.getRegion()); } // For RowMutations/CheckAndMutate action, if there is an exception, the exception is set @@ -197,7 +192,7 @@ public static org.apache.hadoop.hbase.client.MultiResponse getResults(final Mult } private static CheckAndMutateResult getCheckAndMutateResult(RegionActionResult actionResult, - CellScanner cells) throws IOException { + CellScanner cells) throws IOException { Result result = null; if (actionResult.getResultOrExceptionCount() > 0) { // Get the result of the Increment/Append operations from the first element of the @@ -214,7 +209,7 @@ private static CheckAndMutateResult getCheckAndMutateResult(RegionActionResult a } private static Result getMutateRowResult(RegionActionResult actionResult, CellScanner cells) - throws IOException { + throws IOException { if (actionResult.getProcessed()) { Result result = null; if (actionResult.getResultOrExceptionCount() > 0) { @@ -239,11 +234,10 @@ private static Result getMutateRowResult(RegionActionResult actionResult, CellSc /** * Create a CheckAndMutateResult object from a protocol buffer MutateResponse - * * @return a CheckAndMutateResult object */ public static CheckAndMutateResult getCheckAndMutateResult( - ClientProtos.MutateResponse mutateResponse, CellScanner cells) throws IOException { + ClientProtos.MutateResponse mutateResponse, CellScanner cells) throws IOException { boolean success = mutateResponse.getProcessed(); Result result = null; if (mutateResponse.hasResult()) { @@ -254,7 +248,6 @@ public static CheckAndMutateResult getCheckAndMutateResult( /** * Wrap a throwable to an action result. - * * @param t * @return an action result builder */ @@ -266,7 +259,6 @@ public static ResultOrException.Builder buildActionResult(final Throwable t) { /** * Wrap a throwable to an action result. - * * @param r * @return an action result builder */ @@ -283,8 +275,7 @@ public static ResultOrException.Builder buildActionResult(final ClientProtos.Res public static NameBytesPair buildException(final Throwable t) { NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder(); parameterBuilder.setName(t.getClass().getName()); - parameterBuilder.setValue( - ByteString.copyFromUtf8(StringUtils.stringifyException(t))); + parameterBuilder.setValue(ByteString.copyFromUtf8(StringUtils.stringifyException(t))); return parameterBuilder.build(); } @@ -297,12 +288,11 @@ public static HasPermissionResponse buildHasPermissionResponse(boolean hasPermis return builder.build(); } -// End utilities for Client -// Start utilities for Admin + // End utilities for Client + // Start utilities for Admin /** * Get the list of region info from a GetOnlineRegionResponse - * * @param proto the GetOnlineRegionResponse * @return the list of region info */ @@ -313,25 +303,22 @@ public static List getRegionInfos(final GetOnlineRegionResponse prot /** * Check if the region is closed from a CloseRegionResponse - * * @param proto the CloseRegionResponse * @return the region close state */ - public static boolean isClosed - (final CloseRegionResponse proto) { + public static boolean isClosed(final CloseRegionResponse proto) { if (proto == null || !proto.hasClosed()) return false; return proto.getClosed(); } /** * A utility to build a GetServerInfoResponse. - * * @param serverName * @param webuiPort * @return the response */ - public static GetServerInfoResponse buildGetServerInfoResponse( - final ServerName serverName, final int webuiPort) { + public static GetServerInfoResponse buildGetServerInfoResponse(final ServerName serverName, + final int webuiPort) { GetServerInfoResponse.Builder builder = GetServerInfoResponse.newBuilder(); ServerInfo.Builder serverInfoBuilder = ServerInfo.newBuilder(); serverInfoBuilder.setServerName(ProtobufUtil.toServerName(serverName)); @@ -344,14 +331,13 @@ public static GetServerInfoResponse buildGetServerInfoResponse( /** * A utility to build a GetOnlineRegionResponse. - * * @param regions * @return the response */ - public static GetOnlineRegionResponse buildGetOnlineRegionResponse( - final List regions) { + public static GetOnlineRegionResponse + buildGetOnlineRegionResponse(final List regions) { GetOnlineRegionResponse.Builder builder = GetOnlineRegionResponse.newBuilder(); - for (RegionInfo region: regions) { + for (RegionInfo region : regions) { builder.addRegionInfo(ProtobufUtil.toRegionInfo(region)); } return builder.build(); @@ -381,30 +367,29 @@ public static RunCleanerChoreResponse buildRunCleanerChoreResponse(boolean ran) return RunCleanerChoreResponse.newBuilder().setCleanerChoreRan(ran).build(); } -// End utilities for Admin + // End utilities for Admin /** * Creates a response for the last flushed sequence Id request * @return A GetLastFlushedSequenceIdResponse */ - public static GetLastFlushedSequenceIdResponse buildGetLastFlushedSequenceIdResponse( - RegionStoreSequenceIds ids) { + public static GetLastFlushedSequenceIdResponse + buildGetLastFlushedSequenceIdResponse(RegionStoreSequenceIds ids) { return GetLastFlushedSequenceIdResponse.newBuilder() .setLastFlushedSequenceId(ids.getLastFlushedSequenceId()) .addAllStoreLastFlushedSequenceId(ids.getStoreSequenceIdList()).build(); } /** - * Stores an exception encountered during RPC invocation so it can be passed back - * through to the client. + * Stores an exception encountered during RPC invocation so it can be passed back through to the + * client. * @param controller the controller instance provided by the client when calling the service * @param ioe the exception encountered */ - public static void setControllerException(RpcController controller, - IOException ioe) { + public static void setControllerException(RpcController controller, IOException ioe) { if (controller != null) { if (controller instanceof ServerRpcController) { - ((ServerRpcController)controller).setFailedOn(ioe); + ((ServerRpcController) controller).setFailedOn(ioe); } else { controller.setFailed(StringUtils.stringifyException(ioe)); } @@ -415,13 +400,13 @@ public static void setControllerException(RpcController controller, * Retreivies exception stored during RPC invocation. * @param controller the controller instance provided by the client when calling the service * @return exception if any, or null; Will return DoNotRetryIOException for string represented - * failure causes in controller. + * failure causes in controller. */ @Nullable public static IOException getControllerException(RpcController controller) throws IOException { if (controller != null && controller.failed()) { if (controller instanceof ServerRpcController) { - return ((ServerRpcController)controller).getFailedOn(); + return ((ServerRpcController) controller).getFailedOn(); } else { return new DoNotRetryIOException(controller.errorText()); } @@ -429,7 +414,6 @@ public static IOException getControllerException(RpcController controller) throw return null; } - /** * Create Results from the cells using the cells meta data. * @param cellScanner @@ -440,27 +424,28 @@ public static Result[] getResults(CellScanner cellScanner, ScanResponse response throws IOException { if (response == null) return null; // If cellscanner, then the number of Results to return is the count of elements in the - // cellsPerResult list. Otherwise, it is how many results are embedded inside the response. - int noOfResults = cellScanner != null? - response.getCellsPerResultCount(): response.getResultsCount(); + // cellsPerResult list. Otherwise, it is how many results are embedded inside the response. + int noOfResults = + cellScanner != null ? response.getCellsPerResultCount() : response.getResultsCount(); Result[] results = new Result[noOfResults]; for (int i = 0; i < noOfResults; i++) { if (cellScanner != null) { - // Cells are out in cellblocks. Group them up again as Results. How many to read at a + // Cells are out in cellblocks. Group them up again as Results. How many to read at a // time will be found in getCellsLength -- length here is how many Cells in the i'th Result int noOfCells = response.getCellsPerResult(i); boolean isPartial = - response.getPartialFlagPerResultCount() > i ? - response.getPartialFlagPerResult(i) : false; + response.getPartialFlagPerResultCount() > i ? response.getPartialFlagPerResult(i) + : false; List cells = new ArrayList<>(noOfCells); for (int j = 0; j < noOfCells; j++) { try { if (cellScanner.advance() == false) { // We are not able to retrieve the exact number of cells which ResultCellMeta says us. - // We have to scan for the same results again. Throwing DNRIOE as a client retry on the + // We have to scan for the same results again. Throwing DNRIOE as a client retry on + // the // same scanner will result in OutOfOrderScannerNextException String msg = "Results sent from server=" + noOfResults + ". But only got " + i - + " results completely at client. Resetting the scanner to scan again."; + + " results completely at client. Resetting the scanner to scan again."; LOG.error(msg); throw new DoNotRetryIOException(msg); } @@ -468,8 +453,9 @@ public static Result[] getResults(CellScanner cellScanner, ScanResponse response // We are getting IOE while retrieving the cells for Results. // We have to scan for the same results again. Throwing DNRIOE as a client retry on the // same scanner will result in OutOfOrderScannerNextException - LOG.error("Exception while reading cells from result." - + "Resetting the scanner to scan again.", ioe); + LOG.error( + "Exception while reading cells from result." + "Resetting the scanner to scan again.", + ioe); throw new DoNotRetryIOException("Resetting the scanner.", ioe); } cells.add(cellScanner.current()); @@ -507,11 +493,11 @@ public static Map getScanMetrics(ScanResponse response) { /** * Creates a protocol buffer ClearRegionBlockCacheResponse - * * @return a ClearRegionBlockCacheResponse */ - public static AdminProtos.ClearRegionBlockCacheResponse buildClearRegionBlockCacheResponse(final HBaseProtos.CacheEvictionStats - cacheEvictionStats) { - return AdminProtos.ClearRegionBlockCacheResponse.newBuilder().setStats(cacheEvictionStats).build(); + public static AdminProtos.ClearRegionBlockCacheResponse + buildClearRegionBlockCacheResponse(final HBaseProtos.CacheEvictionStats cacheEvictionStats) { + return AdminProtos.ClearRegionBlockCacheResponse.newBuilder().setStats(cacheEvictionStats) + .build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java index 771ee8cffbec..a4b20f2402cf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.slowlog; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -33,17 +30,18 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog; + /** * Slowlog Accessor to record slow/large RPC log identified at each RegionServer RpcServer level. - * This can be done only optionally to record the entire history of slow/large rpc calls - * since RingBuffer can handle only limited latest records. + * This can be done only optionally to record the entire history of slow/large rpc calls since + * RingBuffer can handle only limited latest records. */ @InterfaceAudience.Private public class SlowLogTableAccessor { @@ -53,14 +51,13 @@ public class SlowLogTableAccessor { private static Connection connection; /** - * hbase:slowlog table name - can be enabled - * with config - hbase.regionserver.slowlog.systable.enabled + * hbase:slowlog table name - can be enabled with config - + * hbase.regionserver.slowlog.systable.enabled */ public static final TableName SLOW_LOG_TABLE_NAME = - TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "slowlog"); + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "slowlog"); - private static void doPut(final Connection connection, final List puts) - throws IOException { + private static void doPut(final Connection connection, final List puts) throws IOException { try (Table table = connection.getTable(SLOW_LOG_TABLE_NAME)) { table.put(puts); } @@ -76,32 +73,32 @@ public static void addSlowLogRecords(final List slowL List puts = new ArrayList<>(slowLogPayloads.size()); for (TooSlowLog.SlowLogPayload slowLogPayload : slowLogPayloads) { final byte[] rowKey = getRowKey(slowLogPayload); - final Put put = new Put(rowKey).setDurability(Durability.SKIP_WAL) - .setPriority(HConstants.NORMAL_QOS) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("call_details"), - Bytes.toBytes(slowLogPayload.getCallDetails())) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("client_address"), - Bytes.toBytes(slowLogPayload.getClientAddress())) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("method_name"), - Bytes.toBytes(slowLogPayload.getMethodName())) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("param"), - Bytes.toBytes(slowLogPayload.getParam())) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("processing_time"), - Bytes.toBytes(Integer.toString(slowLogPayload.getProcessingTime()))) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("queue_time"), - Bytes.toBytes(Integer.toString(slowLogPayload.getQueueTime()))) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("region_name"), - Bytes.toBytes(slowLogPayload.getRegionName())) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("response_size"), - Bytes.toBytes(Long.toString(slowLogPayload.getResponseSize()))) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("server_class"), - Bytes.toBytes(slowLogPayload.getServerClass())) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("start_time"), - Bytes.toBytes(Long.toString(slowLogPayload.getStartTime()))) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("type"), - Bytes.toBytes(slowLogPayload.getType().name())) - .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("username"), - Bytes.toBytes(slowLogPayload.getUserName())); + final Put put = + new Put(rowKey).setDurability(Durability.SKIP_WAL).setPriority(HConstants.NORMAL_QOS) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("call_details"), + Bytes.toBytes(slowLogPayload.getCallDetails())) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("client_address"), + Bytes.toBytes(slowLogPayload.getClientAddress())) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("method_name"), + Bytes.toBytes(slowLogPayload.getMethodName())) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("param"), + Bytes.toBytes(slowLogPayload.getParam())) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("processing_time"), + Bytes.toBytes(Integer.toString(slowLogPayload.getProcessingTime()))) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("queue_time"), + Bytes.toBytes(Integer.toString(slowLogPayload.getQueueTime()))) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("region_name"), + Bytes.toBytes(slowLogPayload.getRegionName())) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("response_size"), + Bytes.toBytes(Long.toString(slowLogPayload.getResponseSize()))) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("server_class"), + Bytes.toBytes(slowLogPayload.getServerClass())) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("start_time"), + Bytes.toBytes(Long.toString(slowLogPayload.getStartTime()))) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("type"), + Bytes.toBytes(slowLogPayload.getType().name())) + .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("username"), + Bytes.toBytes(slowLogPayload.getUserName())); puts.add(put); } try { @@ -126,17 +123,15 @@ private static synchronized void createConnection(Configuration configuration) } /** - * Create rowKey: currentTime APPEND slowLogPayload.hashcode - * Scan on slowlog table should keep records with sorted order of time, however records - * added at the very same time could be in random order. - * + * Create rowKey: currentTime APPEND slowLogPayload.hashcode Scan on slowlog table should keep + * records with sorted order of time, however records added at the very same time could be in + * random order. * @param slowLogPayload SlowLogPayload to process * @return rowKey byte[] */ private static byte[] getRowKey(final TooSlowLog.SlowLogPayload slowLogPayload) { String hashcode = String.valueOf(slowLogPayload.hashCode()); - String lastFiveDig = - hashcode.substring((hashcode.length() > 5) ? (hashcode.length() - 5) : 0); + String lastFiveDig = hashcode.substring((hashcode.length() > 5) ? (hashcode.length() - 5) : 0); if (lastFiveDig.startsWith("-")) { lastFiveDig = String.valueOf(ThreadLocalRandom.current().nextInt(99999)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java index f2f917e011a7..d7efaeac21a3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,8 +24,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; /** - * Class to help with dealing with a snapshot description on the client side. - * There is a corresponding class on the server side. + * Class to help with dealing with a snapshot description on the client side. There is a + * corresponding class on the server side. */ @InterfaceAudience.Private public final class ClientSnapshotDescriptionUtils { @@ -68,15 +67,9 @@ public static String toString(SnapshotProtos.SnapshotDescription snapshot) { return null; } - return new StringBuilder("{ ss=") - .append(snapshot.getName()) - .append(" table=") - .append(snapshot.hasTable() ? TableName.valueOf(snapshot.getTable()) : "") - .append(" type=") - .append(snapshot.getType()) - .append(" ttl=") - .append(snapshot.getTtl()) - .append(" }") - .toString(); + return new StringBuilder("{ ss=").append(snapshot.getName()).append(" table=") + .append(snapshot.hasTable() ? TableName.valueOf(snapshot.getTable()) : "").append(" type=") + .append(snapshot.getType()).append(" ttl=").append(snapshot.getTtl()).append(" }") + .toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/CorruptedSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/CorruptedSnapshotException.java index c8ba848d28f1..6d126d4a9fbf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/CorruptedSnapshotException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/CorruptedSnapshotException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ public class CorruptedSnapshotException extends HBaseSnapshotException { /** * Snapshot was corrupt for some reason. - * * @param message message describing the exception * @param e the actual cause of the exception */ @@ -38,7 +37,6 @@ public CorruptedSnapshotException(String message, Exception e) { /** * Snapshot was corrupt for some reason. - * * @param message full description of the failure * @param snapshotDescription snapshot that was expected */ @@ -48,10 +46,9 @@ public CorruptedSnapshotException(String message, SnapshotDescription snapshotDe /** * Snapshot was corrupt for some reason. - * * @param message message describing the exception */ public CorruptedSnapshotException(String message) { - super(message, (SnapshotDescription)null); + super(message, (SnapshotDescription) null); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java index df5f9255e4da..c2e1949224e7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java index 1f50b5ce53fa..e4f9469d1fff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,6 @@ public class HBaseSnapshotException extends DoNotRetryIOException { /** * Some exception happened for a snapshot and don't even know the snapshot that it was about. - * * @param message the full description of the failure */ public HBaseSnapshotException(String message) { @@ -40,7 +39,6 @@ public HBaseSnapshotException(String message) { /** * Exception for the given snapshot that has no previous root cause. - * * @param message the reason why the snapshot failed * @param snapshotDescription the description of the snapshot that is failing */ @@ -51,7 +49,6 @@ public HBaseSnapshotException(String message, SnapshotDescription snapshotDescri /** * Exception for the given snapshot due to another exception. - * * @param message the reason why the snapshot failed * @param cause the root cause of the failure * @param snapshotDescription the description of the snapshot that is being failed @@ -65,7 +62,6 @@ public HBaseSnapshotException(String message, Throwable cause, /** * Exception when the description of the snapshot cannot be determined, due to some root other * root cause. - * * @param message description of what caused the failure * @param cause the root cause */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java index 029450994e0b..5b7d2560d2f2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java index d4f672b1e0d5..3d370896412f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,15 +21,13 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when a snapshot could not be created due to a server-side error when - * taking the snapshot. + * Thrown when a snapshot could not be created due to a server-side error when taking the snapshot. */ @SuppressWarnings("serial") @InterfaceAudience.Public public class SnapshotCreationException extends HBaseSnapshotException { /** * Used internally by the RPC engine to pass the exception back to the client. - * * @param message error message to pass back */ public SnapshotCreationException(String message) { @@ -38,7 +36,6 @@ public SnapshotCreationException(String message) { /** * Failure to create the specified snapshot. - * * @param message reason why the snapshot couldn't be completed * @param snapshotDescription description of the snapshot attempted */ @@ -48,7 +45,6 @@ public SnapshotCreationException(String message, SnapshotDescription snapshotDes /** * Failure to create the specified snapshot due to an external cause. - * * @param message reason why the snapshot couldn't be completed * @param cause the root cause of the failure * @param snapshotDescription description of the snapshot attempted diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java index 9c0e51c39e5b..923c3b46d1d7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDoesNotExistException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java index 6942b691939b..80de174d3a3c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ public class SnapshotExistsException extends HBaseSnapshotException { /** * Failure due to the snapshot already existing. - * * @param message the full description of the failure */ public SnapshotExistsException(String message) { @@ -37,7 +36,6 @@ public SnapshotExistsException(String message) { /** * Failure due to the snapshot already existing. - * * @param message the full description of the failure * @param snapshotDescription snapshot that was attempted */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java index 5de352108fa3..d6e36552105b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.snapshot; import java.io.IOException; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java index 7951eafb4104..741646d6981d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/UnknownSnapshotException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/FileSystemVersionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/FileSystemVersionException.java index 4d8d38c44ded..a4d5fccc1be7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/FileSystemVersionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/FileSystemVersionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** Thrown when the file system needs to be upgraded */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java index 77cbf387148c..0ff131f23bf2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java index 057cb7e37555..0664a2a0dde8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,31 +25,23 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; /** - * - * The PoolMap maps a key to a collection of values, the elements - * of which are managed by a pool. In effect, that collection acts as a shared - * pool of resources, access to which is closely controlled as per the semantics - * of the pool. - * + * The PoolMap maps a key to a collection of values, the elements of which are managed + * by a pool. In effect, that collection acts as a shared pool of resources, access to which is + * closely controlled as per the semantics of the pool. *

    - * In case the size of the pool is set to a non-zero positive number, that is - * used to cap the number of resources that a pool may contain for any given - * key. A size of {@link Integer#MAX_VALUE} is interpreted as an unbounded pool. + * In case the size of the pool is set to a non-zero positive number, that is used to cap the number + * of resources that a pool may contain for any given key. A size of {@link Integer#MAX_VALUE} is + * interpreted as an unbounded pool. *

    - * *

    - * PoolMap is thread-safe. It does not remove elements automatically. Unused resources - * must be closed and removed explicitly. + * PoolMap is thread-safe. It does not remove elements automatically. Unused resources must be + * closed and removed explicitly. *

    - * - * @param - * the type of the key to the resource - * @param - * the type of the resource being pooled + * @param the type of the key to the resource + * @param the type of the resource being pooled */ @InterfaceAudience.Private public class PoolMap { @@ -58,32 +49,33 @@ public class PoolMap { private final PoolType poolType; private final int poolMaxSize; - public PoolMap(PoolType poolType, int poolMaxSize) { - pools = new HashMap<>(); - this.poolType = poolType; - this.poolMaxSize = poolMaxSize; + public PoolMap(PoolType poolType, int poolMaxSize) { + pools = new HashMap<>(); + this.poolType = poolType; + this.poolMaxSize = poolMaxSize; } public V getOrCreate(K key, PoolResourceSupplier supplier) throws IOException { - synchronized (pools) { - Pool pool = pools.get(key); - - if (pool == null) { - pool = createPool(); - pools.put(key, pool); - } - - try { - return pool.getOrCreate(supplier); - } catch (IOException | RuntimeException | Error e) { - if (pool.size() == 0) { - pools.remove(key); - } - - throw e; - } - } + synchronized (pools) { + Pool pool = pools.get(key); + + if (pool == null) { + pool = createPool(); + pools.put(key, pool); + } + + try { + return pool.getOrCreate(supplier); + } catch (IOException | RuntimeException | Error e) { + if (pool.size() == 0) { + pools.remove(key); + } + + throw e; + } + } } + public boolean remove(K key, V value) { synchronized (pools) { Pool pool = pools.get(key); @@ -128,7 +120,7 @@ public void clear() { } public interface PoolResourceSupplier { - R get() throws IOException; + R get() throws IOException; } protected static V createResource(PoolResourceSupplier supplier) throws IOException { @@ -172,30 +164,25 @@ public static PoolType fuzzyMatch(String name) { protected Pool createPool() { switch (poolType) { - case RoundRobin: - return new RoundRobinPool<>(poolMaxSize); - case ThreadLocal: - return new ThreadLocalPool<>(); - default: - return new RoundRobinPool<>(poolMaxSize); + case RoundRobin: + return new RoundRobinPool<>(poolMaxSize); + case ThreadLocal: + return new ThreadLocalPool<>(); + default: + return new RoundRobinPool<>(poolMaxSize); } } /** - * The RoundRobinPool represents a {@link PoolMap.Pool}, which - * stores its resources in an {@link ArrayList}. It load-balances access to - * its resources by returning a different resource every time a given key is - * looked up. - * + * The RoundRobinPool represents a {@link PoolMap.Pool}, which stores its resources + * in an {@link ArrayList}. It load-balances access to its resources by returning a different + * resource every time a given key is looked up. *

    - * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of - * the pool is unbounded. Otherwise, it caps the number of resources in this - * pool to the (non-zero positive) value specified in {@link #maxSize}. + * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of the pool is + * unbounded. Otherwise, it caps the number of resources in this pool to the (non-zero positive) + * value specified in {@link #maxSize}. *

    - * - * @param - * the type of the resource - * + * @param the type of the resource */ @SuppressWarnings("serial") static class RoundRobinPool implements Pool { @@ -254,18 +241,15 @@ public int size() { } /** - * The ThreadLocalPool represents a {@link PoolMap.Pool} that - * works similarly to {@link ThreadLocal} class. It essentially binds the resource - * to the thread from which it is accessed. It doesn't remove resources when a thread exits, - * those resources must be closed manually. - * + * The ThreadLocalPool represents a {@link PoolMap.Pool} that works similarly to + * {@link ThreadLocal} class. It essentially binds the resource to the thread from which it is + * accessed. It doesn't remove resources when a thread exits, those resources must be closed + * manually. *

    - * Note that the size of the pool is essentially bounded by the number of threads - * that add resources to this pool. + * Note that the size of the pool is essentially bounded by the number of threads that add + * resources to this pool. *

    - * - * @param - * the type of the resource + * @param the type of the resource */ static class ThreadLocalPool implements Pool { private final Map resources; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java index 698330acc921..4143df9b75d9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,10 +24,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Writable; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility class with methods for manipulating Writable objects @@ -38,11 +36,11 @@ public class Writables { /** * @param w writable * @return The bytes of w gotten by running its - * {@link Writable#write(java.io.DataOutput)} method. + * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException e * @see #getWritable(byte[], Writable) */ - public static byte [] getBytes(final Writable w) throws IOException { + public static byte[] getBytes(final Writable w) throws IOException { if (w == null) { throw new IllegalArgumentException("Writable cannot be null"); } @@ -64,20 +62,20 @@ public class Writables { * Put a bunch of Writables as bytes all into the one byte array. * @param ws writable * @return The bytes of w gotten by running its - * {@link Writable#write(java.io.DataOutput)} method. + * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException e */ - public static byte [] getBytes(final Writable... ws) throws IOException { - List bytes = new ArrayList<>(ws.length); + public static byte[] getBytes(final Writable... ws) throws IOException { + List bytes = new ArrayList<>(ws.length); int size = 0; - for (Writable w: ws) { - byte [] b = getBytes(w); + for (Writable w : ws) { + byte[] b = getBytes(w); size += b.length; bytes.add(b); } - byte [] result = new byte[size]; + byte[] result = new byte[size]; int offset = 0; - for (byte [] b: bytes) { + for (byte[] b : bytes) { System.arraycopy(b, 0, result, offset, b.length); offset += b.length; } @@ -88,16 +86,14 @@ public class Writables { * Set bytes into the passed Writable by calling its * {@link Writable#readFields(java.io.DataInput)}. * @param bytes serialized bytes - * @param w An empty Writable (usually made by calling the null-arg - * constructor). - * @return The passed Writable after its readFields has been called fed - * by the passed bytes array or IllegalArgumentException - * if passed null or an empty bytes array. + * @param w An empty Writable (usually made by calling the null-arg constructor). + * @return The passed Writable after its readFields has been called fed by the passed + * bytes array or IllegalArgumentException if passed null or an empty + * bytes array. * @throws IOException e * @throws IllegalArgumentException */ - public static Writable getWritable(final byte [] bytes, final Writable w) - throws IOException { + public static Writable getWritable(final byte[] bytes, final Writable w) throws IOException { return getWritable(bytes, 0, bytes.length, w); } @@ -107,20 +103,17 @@ public static Writable getWritable(final byte [] bytes, final Writable w) * @param bytes serialized bytes * @param offset offset into array * @param length length of data - * @param w An empty Writable (usually made by calling the null-arg - * constructor). - * @return The passed Writable after its readFields has been called fed - * by the passed bytes array or IllegalArgumentException - * if passed null or an empty bytes array. + * @param w An empty Writable (usually made by calling the null-arg constructor). + * @return The passed Writable after its readFields has been called fed by the passed + * bytes array or IllegalArgumentException if passed null or an empty + * bytes array. * @throws IOException e * @throws IllegalArgumentException */ - public static Writable getWritable(final byte [] bytes, final int offset, - final int length, final Writable w) - throws IOException { - if (bytes == null || length <=0) { - throw new IllegalArgumentException("Can't build a writable with empty " + - "bytes array"); + public static Writable getWritable(final byte[] bytes, final int offset, final int length, + final Writable w) throws IOException { + if (bytes == null || length <= 0) { + throw new IllegalArgumentException("Can't build a writable with empty " + "bytes array"); } if (w == null) { throw new IllegalArgumentException("Writable cannot be null"); @@ -136,26 +129,24 @@ public static Writable getWritable(final byte [] bytes, final int offset, } /** - * Copy one Writable to another. Copies bytes using data streams. + * Copy one Writable to another. Copies bytes using data streams. * @param src Source Writable * @param tgt Target Writable * @return The target Writable. * @throws IOException e */ - public static Writable copyWritable(final Writable src, final Writable tgt) - throws IOException { + public static Writable copyWritable(final Writable src, final Writable tgt) throws IOException { return copyWritable(getBytes(src), tgt); } /** - * Copy one Writable to another. Copies bytes using data streams. + * Copy one Writable to another. Copies bytes using data streams. * @param bytes Source Writable * @param tgt Target Writable * @return The target Writable. * @throws IOException e */ - public static Writable copyWritable(final byte [] bytes, final Writable tgt) - throws IOException { + public static Writable copyWritable(final byte[] bytes, final Writable tgt) throws IOException { DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes)); try { tgt.readFields(dis); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index 0447e31fdd09..59834b26961a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -137,8 +137,8 @@ public ReadOnlyZKClient(Configuration conf) { conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS); LOG.debug( - "Connect {} to {} with session timeout={}ms, retries {}, " + - "retry interval {}ms, keepAlive={}ms", + "Connect {} to {} with session timeout={}ms, retries {}, " + + "retry interval {}ms, keepAlive={}ms", getId(), connectString, sessionTimeoutMs, maxRetries, retryIntervalMs, keepAliveTimeMs); Threads.setDaemonThreadRunning(new Thread(this::run), "ReadOnlyZKClient-" + connectString + "@" + getId()); @@ -260,8 +260,8 @@ public CompletableFuture get(String path) { @Override protected void doExec(ZooKeeper zk) { - zk.getData(path, false, - (rc, path, ctx, data, stat) -> onComplete(zk, rc, data, true), null); + zk.getData(path, false, (rc, path, ctx, data, stat) -> onComplete(zk, rc, data, true), + null); } }); return future; @@ -311,7 +311,8 @@ private void closeZk() { private ZooKeeper getZk() throws IOException { // may be closed when session expired if (zookeeper == null || !zookeeper.getState().isAlive()) { - zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> {}); + zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> { + }); } return zookeeper; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java index 5072706cb5ae..f0fae958a66a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.zookeeper; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java index 4d3e7b3c50ba..319b9f09a044 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java @@ -43,9 +43,8 @@ public class ZNodePaths { public final String baseZNode; /** - * The prefix of meta znode. Does not include baseZNode. - * Its a 'prefix' because meta replica id integer can be tagged on the end (if - * no number present, it is 'default' replica). + * The prefix of meta znode. Does not include baseZNode. Its a 'prefix' because meta replica id + * integer can be tagged on the end (if no number present, it is 'default' replica). */ private final String metaZNodePrefix; @@ -114,31 +113,24 @@ public ZNodePaths(Configuration conf) { hfileRefsZNode = joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.hfile.refs", "hfile-refs")); snapshotCleanupZNode = joinZNode(baseZNode, - conf.get("zookeeper.znode.snapshot.cleanup", DEFAULT_SNAPSHOT_CLEANUP_ZNODE)); + conf.get("zookeeper.znode.snapshot.cleanup", DEFAULT_SNAPSHOT_CLEANUP_ZNODE)); } @Override public String toString() { - return new StringBuilder() - .append("ZNodePaths [baseZNode=").append(baseZNode) - .append(", rsZNode=").append(rsZNode) - .append(", drainingZNode=").append(drainingZNode) + return new StringBuilder().append("ZNodePaths [baseZNode=").append(baseZNode) + .append(", rsZNode=").append(rsZNode).append(", drainingZNode=").append(drainingZNode) .append(", masterAddressZNode=").append(masterAddressZNode) .append(", backupMasterAddressesZNode=").append(backupMasterAddressesZNode) - .append(", clusterStateZNode=").append(clusterStateZNode) - .append(", tableZNode=").append(tableZNode) - .append(", clusterIdZNode=").append(clusterIdZNode) - .append(", splitLogZNode=").append(splitLogZNode) - .append(", balancerZNode=").append(balancerZNode) - .append(", regionNormalizerZNode=").append(regionNormalizerZNode) - .append(", switchZNode=").append(switchZNode) - .append(", masterMaintZNode=").append(masterMaintZNode) - .append(", replicationZNode=").append(replicationZNode) - .append(", peersZNode=").append(peersZNode) - .append(", queuesZNode=").append(queuesZNode) - .append(", hfileRefsZNode=").append(hfileRefsZNode) - .append(", snapshotCleanupZNode=").append(snapshotCleanupZNode) - .append("]").toString(); + .append(", clusterStateZNode=").append(clusterStateZNode).append(", tableZNode=") + .append(tableZNode).append(", clusterIdZNode=").append(clusterIdZNode) + .append(", splitLogZNode=").append(splitLogZNode).append(", balancerZNode=") + .append(balancerZNode).append(", regionNormalizerZNode=").append(regionNormalizerZNode) + .append(", switchZNode=").append(switchZNode).append(", masterMaintZNode=") + .append(masterMaintZNode).append(", replicationZNode=").append(replicationZNode) + .append(", peersZNode=").append(peersZNode).append(", queuesZNode=").append(queuesZNode) + .append(", hfileRefsZNode=").append(hfileRefsZNode).append(", snapshotCleanupZNode=") + .append(snapshotCleanupZNode).append("]").toString(); } /** @@ -170,9 +162,8 @@ public int getMetaReplicaIdFromPath(String path) { * @return replicaId */ public int getMetaReplicaIdFromZNode(String znode) { - return znode.equals(metaZNodePrefix)? - RegionInfo.DEFAULT_REPLICA_ID: - Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); + return znode.equals(metaZNodePrefix) ? RegionInfo.DEFAULT_REPLICA_ID + : Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); } /** @@ -198,10 +189,10 @@ public boolean isClientReadable(String path) { // Developer notice: These znodes are world readable. DO NOT add more znodes here UNLESS // all clients need to access this data to work. Using zk for sharing data to clients (other // than service lookup case is not a recommended design pattern. - return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode) || - path.equals(clusterIdZNode) || path.equals(rsZNode) || - // /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not - path.equals(tableZNode) || path.startsWith(tableZNode + "/"); + return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode) + || path.equals(clusterIdZNode) || path.equals(rsZNode) || + // /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not + path.equals(tableZNode) || path.startsWith(tableZNode + "/"); } public String getRsPath(ServerName sn) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java index dd26ed5f2091..6f1fd7f18635 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java @@ -19,13 +19,12 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.ZooKeeper; +import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; /** * Methods that help working with ZooKeeper @@ -39,11 +38,12 @@ private ZooKeeperHelper() { /** * Get a ZooKeeper instance and wait until it connected before returning. * @param sessionTimeoutMs Used as session timeout passed to the created ZooKeeper AND as the - * timeout to wait on connection establishment. + * timeout to wait on connection establishment. */ public static ZooKeeper getConnectedZooKeeper(String connectString, int sessionTimeoutMs) throws IOException { - ZooKeeper zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> {}); + ZooKeeper zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> { + }); return ensureConnectedZooKeeper(zookeeper, sessionTimeoutMs); } @@ -58,12 +58,11 @@ public static ZooKeeper ensureConnectedZooKeeper(ZooKeeper zookeeper, int timeou } Stopwatch stopWatch = Stopwatch.createStarted(); // Make sure we are connected before we hand it back. - while(!zookeeper.getState().isConnected()) { + while (!zookeeper.getState().isConnected()) { Threads.sleep(1); if (stopWatch.elapsed(TimeUnit.MILLISECONDS) > timeout) { - throw new ZooKeeperConnectionException("Failed connect after waiting " + - stopWatch.elapsed(TimeUnit.MILLISECONDS) + "ms (zk session timeout); " + - zookeeper); + throw new ZooKeeperConnectionException("Failed connect after waiting " + + stopWatch.elapsed(TimeUnit.MILLISECONDS) + "ms (zk session timeout); " + zookeeper); } } return zookeeper; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java index 628655a083c2..c35267889725 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestCatalogFamilyFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ public class TestCatalogFamilyFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCatalogFamilyFormat.class); + HBaseClassTestRule.forClass(TestCatalogFamilyFormat.class); @Rule public TestName name = new TestName(); @@ -69,8 +69,8 @@ public void testMetaReaderGetColumnMethods() { assertArrayEquals(HConstants.STARTCODE_QUALIFIER, CatalogFamilyFormat.getStartCodeColumn(0)); assertArrayEquals( - Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + - CatalogFamilyFormat.META_REPLICA_ID_DELIMITER + "002A"), + Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + + CatalogFamilyFormat.META_REPLICA_ID_DELIMITER + "002A"), CatalogFamilyFormat.getStartCodeColumn(42)); assertArrayEquals(HConstants.SEQNUM_QUALIFIER, CatalogFamilyFormat.getSeqNumColumn(0)); @@ -84,19 +84,18 @@ public void testMetaReaderGetColumnMethods() { * The info we can get from the regionName is: table name, start key, regionId, replicaId. */ @Test - public void testParseRegionInfoFromRegionName() throws IOException { - RegionInfo originalRegionInfo = RegionInfoBuilder.newBuilder( - TableName.valueOf(name.getMethodName())).setRegionId(999999L) - .setStartKey(Bytes.toBytes("2")).setEndKey(Bytes.toBytes("3")) - .setReplicaId(1).build(); - RegionInfo newParsedRegionInfo = CatalogFamilyFormat - .parseRegionInfoFromRegionName(originalRegionInfo.getRegionName()); + public void testParseRegionInfoFromRegionName() throws IOException { + RegionInfo originalRegionInfo = + RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setRegionId(999999L) + .setStartKey(Bytes.toBytes("2")).setEndKey(Bytes.toBytes("3")).setReplicaId(1).build(); + RegionInfo newParsedRegionInfo = + CatalogFamilyFormat.parseRegionInfoFromRegionName(originalRegionInfo.getRegionName()); assertEquals("Parse TableName error", originalRegionInfo.getTable(), newParsedRegionInfo.getTable()); assertEquals("Parse regionId error", originalRegionInfo.getRegionId(), newParsedRegionInfo.getRegionId()); - assertTrue("Parse startKey error", Bytes.equals(originalRegionInfo.getStartKey(), - newParsedRegionInfo.getStartKey())); + assertTrue("Parse startKey error", + Bytes.equals(originalRegionInfo.getStartKey(), newParsedRegionInfo.getStartKey())); assertEquals("Parse replicaId error", originalRegionInfo.getReplicaId(), newParsedRegionInfo.getReplicaId()); assertTrue("We can't parse endKey from regionName only", diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java index db46768be315..0ef8e9d2dedc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestRegionLocations { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionLocations.class); + HBaseClassTestRule.forClass(TestRegionLocations.class); ServerName sn0 = ServerName.valueOf("host0", 10, 10); ServerName sn1 = ServerName.valueOf("host1", 10, 10); @@ -90,7 +90,7 @@ private RegionInfo hri(long regionId, int replicaId) { byte[] startKey = HConstants.EMPTY_START_ROW; byte[] endKey = HConstants.EMPTY_END_ROW; RegionInfo info = RegionInfoBuilder.newBuilder(table).setStartKey(startKey).setEndKey(endKey) - .setRegionId(regionId).setReplicaId(replicaId).build(); + .setRegionId(regionId).setReplicaId(replicaId).build(); return info; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java index fd0183ee32b1..4a337fca7f1b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,7 +73,7 @@ public class TestAsyncAdminRpcPriority { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncAdminRpcPriority.class); + HBaseClassTestRule.forClass(TestAsyncAdminRpcPriority.class); private static Configuration CONF = HBaseConfiguration.create(); @@ -96,7 +96,7 @@ public void setUp() throws IOException { public Void answer(InvocationOnMock invocation) throws Throwable { RpcCallback done = invocation.getArgument(2); done.run(GetProcedureResultResponse.newBuilder() - .setState(GetProcedureResultResponse.State.FINISHED).build()); + .setState(GetProcedureResultResponse.State.FINISHED).build()); return null; } }).when(masterStub).getProcedureResult(any(HBaseRpcController.class), @@ -143,7 +143,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { any()); conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", null, - UserProvider.instantiate(CONF).getCurrent()) { + UserProvider.instantiate(CONF).getCurrent()) { @Override CompletableFuture getMasterStub() { @@ -170,9 +170,9 @@ public boolean matches(HBaseRpcController controller) { @Test public void testCreateNormalTable() { conn.getAdmin() - .createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()) - .join(); + .createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()) + .join(); verify(masterStub, times(1)).createTable(assertPriority(NORMAL_QOS), any(CreateTableRequest.class), any()); } @@ -182,10 +182,10 @@ public void testCreateNormalTable() { @Test public void testCreateSystemTable() { conn.getAdmin() - .createTable(TableDescriptorBuilder - .newBuilder(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()) - .join(); + .createTable(TableDescriptorBuilder + .newBuilder(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()) + .join(); verify(masterStub, times(1)).createTable(assertPriority(SYSTEMTABLE_QOS), any(CreateTableRequest.class), any()); } @@ -195,7 +195,7 @@ public void testCreateSystemTable() { @Test public void testCreateMetaTable() { conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()).join(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()).join(); verify(masterStub, times(1)).createTable(assertPriority(SYSTEMTABLE_QOS), any(CreateTableRequest.class), any()); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java index b2d5b872e757..7f9a2c2a0a1d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ public class TestAsyncConnectionConfiguration { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncConnectionConfiguration.class); + HBaseClassTestRule.forClass(TestAsyncConnectionConfiguration.class); @Test public void testDefaultReadWriteRpcTimeout() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java index b344ff5febc5..9955a928aa79 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,12 +49,12 @@ public class TestAsyncConnectionTracing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncConnectionTracing.class); + HBaseClassTestRule.forClass(TestAsyncConnectionTracing.class); private static Configuration CONF = HBaseConfiguration.create(); private ServerName masterServer = - ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()); + ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()); private AsyncConnection conn; @@ -71,7 +71,7 @@ public CompletableFuture getActiveMaster() { } }; conn = new AsyncConnectionImpl(CONF, registry, "test", null, - UserProvider.instantiate(CONF).getCurrent()); + UserProvider.instantiate(CONF).getCurrent()); } @After @@ -82,10 +82,10 @@ public void tearDown() throws IOException { private void assertTrace(String methodName, ServerName serverName) { Waiter.waitFor(CONF, 1000, () -> traceRule.getSpans().stream() - .anyMatch(span -> span.getName().equals("AsyncConnection." + methodName) && - span.getKind() == SpanKind.INTERNAL && span.hasEnded())); + .anyMatch(span -> span.getName().equals("AsyncConnection." + methodName) + && span.getKind() == SpanKind.INTERNAL && span.hasEnded())); SpanData data = traceRule.getSpans().stream() - .filter(s -> s.getName().equals("AsyncConnection." + methodName)).findFirst().get(); + .filter(s -> s.getName().equals("AsyncConnection." + methodName)).findFirst().get(); assertEquals(StatusCode.OK, data.getStatus().getStatusCode()); if (serverName != null) { assertEquals(serverName.getServerName(), @@ -101,8 +101,8 @@ public void testHbck() { @Test public void testHbckWithServerName() throws IOException { - ServerName serverName = ServerName.valueOf("localhost", 23456, - EnvironmentEdgeManager.currentTime()); + ServerName serverName = + ServerName.valueOf("localhost", 23456, EnvironmentEdgeManager.currentTime()); conn.getHbck(serverName); assertTrace("getHbck", serverName); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java index b306500c8b13..eca59331cc49 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java @@ -37,7 +37,7 @@ public class TestAsyncMetaRegionLocatorFailFast { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncMetaRegionLocatorFailFast.class); + HBaseClassTestRule.forClass(TestAsyncMetaRegionLocatorFailFast.class); private static Configuration CONF = HBaseConfiguration.create(); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java index 1a3feb735da8..de5098a89a77 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java @@ -30,6 +30,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasItem; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule; @@ -61,6 +62,7 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ClientTests.class, MediumTests.class }) @@ -69,7 +71,7 @@ public class TestAsyncRegionLocatorTracing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncRegionLocatorTracing.class); + HBaseClassTestRule.forClass(TestAsyncRegionLocatorTracing.class); private static final Configuration CONF = HBaseConfiguration.create(); @@ -84,12 +86,12 @@ public class TestAsyncRegionLocatorTracing { public void setUp() throws IOException { RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); locs = new RegionLocations( - new HRegionLocation(metaRegionInfo, - ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime())), - new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 1), - ServerName.valueOf("127.0.0.2", 12345, EnvironmentEdgeManager.currentTime())), - new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 2), - ServerName.valueOf("127.0.0.3", 12345, EnvironmentEdgeManager.currentTime()))); + new HRegionLocation(metaRegionInfo, + ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime())), + new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 1), + ServerName.valueOf("127.0.0.2", 12345, EnvironmentEdgeManager.currentTime())), + new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 2), + ServerName.valueOf("127.0.0.3", 12345, EnvironmentEdgeManager.currentTime()))); conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF) { @Override @@ -111,53 +113,44 @@ private SpanData waitSpan(String name) { private SpanData waitSpan(Matcher matcher) { Matcher spanLocator = allOf(matcher, hasEnded()); try { - Waiter.waitFor(CONF, 1000, new MatcherPredicate<>( - "waiting for span", - () -> traceRule.getSpans(), hasItem(spanLocator))); + Waiter.waitFor(CONF, 1000, new MatcherPredicate<>("waiting for span", + () -> traceRule.getSpans(), hasItem(spanLocator))); } catch (AssertionError e) { LOG.error("AssertionError while waiting for matching span. Span reservoir contains: {}", traceRule.getSpans()); throw e; } - return traceRule.getSpans() - .stream() - .filter(spanLocator::matches) - .findFirst() - .orElseThrow(AssertionError::new); + return traceRule.getSpans().stream().filter(spanLocator::matches).findFirst() + .orElseThrow(AssertionError::new); } @Test public void testClearCache() { conn.getLocator().clearCache(); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn))); } @Test public void testClearCacheServerName() { - ServerName sn = ServerName.valueOf("127.0.0.1", 12345, - EnvironmentEdgeManager.currentTime()); + ServerName sn = ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime()); conn.getLocator().clearCache(sn); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - hasAttributes(containsEntry("db.hbase.server.name", sn.getServerName())))); + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + hasAttributes(containsEntry("db.hbase.server.name", sn.getServerName())))); } @Test public void testClearCacheTableName() { conn.getLocator().clearCache(TableName.META_TABLE_NAME); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME))); + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME))); } @Test @@ -165,13 +158,11 @@ public void testGetRegionLocation() { conn.getLocator().getRegionLocation(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocation"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( - containsEntryWithStringValuesOf("db.hbase.regions", + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME), + hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions", locs.getDefaultRegionLocation().getRegion().getRegionNameAsString())))); } @@ -180,16 +171,12 @@ public void testGetRegionLocations() { conn.getLocator().getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, false, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocations"); - String[] expectedRegions = Arrays.stream(locs.getRegionLocations()) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .toArray(String[]::new); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + String[] expectedRegions = + Arrays.stream(locs.getRegionLocations()).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).toArray(String[]::new); + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( + buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes( containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions))))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java index bc5ebf4e9fff..3320bc1288e6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell.Type; @@ -68,7 +67,9 @@ import org.mockito.ArgumentMatcher; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; @@ -92,7 +93,7 @@ public class TestAsyncTableRpcPriority { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableRpcPriority.class); + HBaseClassTestRule.forClass(TestAsyncTableRpcPriority.class); private static Configuration CONF = HBaseConfiguration.create(); @@ -114,8 +115,7 @@ public void setUp() throws IOException { @Override public Void answer(InvocationOnMock invocation) throws Throwable { - ClientProtos.MultiResponse resp = - ClientProtos.MultiResponse.newBuilder() + ClientProtos.MultiResponse resp = ClientProtos.MultiResponse.newBuilder() .addRegionActionResult(RegionActionResult.newBuilder().addResultOrException( ResultOrException.newBuilder().setResult(ProtobufUtil.toResult(new Result())))) .build(); @@ -135,11 +135,11 @@ public Void answer(InvocationOnMock invocation) throws Throwable { ColumnValue value = req.getColumnValue(0); QualifierValue qvalue = value.getQualifierValue(0); Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) - .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) - .setQualifier(qvalue.getQualifier().toByteArray()) - .setValue(qvalue.getValue().toByteArray()).build(); + .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) + .setQualifier(qvalue.getQualifier().toByteArray()) + .setValue(qvalue.getValue().toByteArray()).build(); resp = MutateResponse.newBuilder() - .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); + .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); break; default: resp = MutateResponse.getDefaultInstance(); @@ -160,24 +160,24 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } }).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any()); conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", null, - UserProvider.instantiate(CONF).getCurrent()) { + UserProvider.instantiate(CONF).getCurrent()) { @Override AsyncRegionLocator getLocator() { AsyncRegionLocator locator = mock(AsyncRegionLocator.class); Answer> answer = - new Answer>() { - - @Override - public CompletableFuture answer(InvocationOnMock invocation) - throws Throwable { - TableName tableName = invocation.getArgument(0); - RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); - ServerName serverName = ServerName.valueOf("rs", 16010, 12345); - HRegionLocation loc = new HRegionLocation(info, serverName); - return CompletableFuture.completedFuture(loc); - } - }; + new Answer>() { + + @Override + public CompletableFuture answer(InvocationOnMock invocation) + throws Throwable { + TableName tableName = invocation.getArgument(0); + RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); + ServerName serverName = ServerName.valueOf("rs", 16010, 12345); + HRegionLocation loc = new HRegionLocation(info, serverName); + return CompletableFuture.completedFuture(loc); + } + }; doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), any(RegionLocateType.class), anyLong()); doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), @@ -215,7 +215,7 @@ public boolean matches(ScanRequest request) { @Test public void testGet() { conn.getTable(TableName.valueOf(name.getMethodName())) - .get(new Get(Bytes.toBytes(0)).setPriority(11)).join(); + .get(new Get(Bytes.toBytes(0)).setPriority(11)).join(); verify(stub, times(1)).get(assertPriority(11), any(GetRequest.class), any()); } @@ -228,7 +228,7 @@ public void testGetNormalTable() { @Test public void testGetSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .get(new Get(Bytes.toBytes(0))).join(); + .get(new Get(Bytes.toBytes(0))).join(); verify(stub, times(1)).get(assertPriority(SYSTEMTABLE_QOS), any(GetRequest.class), any()); } @@ -240,54 +240,53 @@ public void testGetMetaTable() { @Test public void testPut() { - conn - .getTable(TableName.valueOf(name.getMethodName())).put(new Put(Bytes.toBytes(0)) + conn.getTable(TableName.valueOf(name.getMethodName())).put(new Put(Bytes.toBytes(0)) .setPriority(12).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .join(); verify(stub, times(1)).mutate(assertPriority(12), any(MutateRequest.class), any()); } @Test public void testPutNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())).put(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testPutSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .put(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), - Bytes.toBytes("v"))) - .join(); + .put(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), + Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testPutMetaTable() { conn.getTable(TableName.META_TABLE_NAME).put(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testDelete() { conn.getTable(TableName.valueOf(name.getMethodName())) - .delete(new Delete(Bytes.toBytes(0)).setPriority(13)).join(); + .delete(new Delete(Bytes.toBytes(0)).setPriority(13)).join(); verify(stub, times(1)).mutate(assertPriority(13), any(MutateRequest.class), any()); } @Test public void testDeleteNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())).delete(new Delete(Bytes.toBytes(0))) - .join(); + .join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testDeleteSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .delete(new Delete(Bytes.toBytes(0))).join(); + .delete(new Delete(Bytes.toBytes(0))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @@ -299,154 +298,155 @@ public void testDeleteMetaTable() { @Test public void testAppend() { - conn - .getTable(TableName.valueOf(name.getMethodName())).append(new Append(Bytes.toBytes(0)) + conn.getTable(TableName.valueOf(name.getMethodName())).append(new Append(Bytes.toBytes(0)) .setPriority(14).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .join(); verify(stub, times(1)).mutate(assertPriority(14), any(MutateRequest.class), any()); } @Test public void testAppendNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())).append(new Append(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testAppendSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .append(new Append(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), - Bytes.toBytes("v"))) - .join(); + .append(new Append(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), + Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testAppendMetaTable() { conn.getTable(TableName.META_TABLE_NAME).append(new Append(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testIncrement() { conn.getTable(TableName.valueOf(name.getMethodName())).increment(new Increment(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).setPriority(15)).join(); + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).setPriority(15)).join(); verify(stub, times(1)).mutate(assertPriority(15), any(MutateRequest.class), any()); } @Test public void testIncrementNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())) - .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); + .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testIncrementSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); + .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testIncrementMetaTable() { conn.getTable(TableName.META_TABLE_NAME) - .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); + .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndPut() { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifNotExists() - .thenPut(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")).setPriority(16)) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifNotExists() + .thenPut(new Put(Bytes.toBytes(0)) + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .setPriority(16)) + .join(); verify(stub, times(1)).mutate(assertPriority(16), any(MutateRequest.class), any()); } @Test public void testCheckAndPutNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifNotExists().thenPut(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), - Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifNotExists().thenPut(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), + Bytes.toBytes("cq"), Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndPutSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifNotExists().thenPut(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), - Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifNotExists().thenPut(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), + Bytes.toBytes("cq"), Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndPutMetaTable() { conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) - .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndDelete() { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0)).setPriority(17)).join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0)).setPriority(17)) + .join(); verify(stub, times(1)).mutate(assertPriority(17), any(MutateRequest.class), any()); } @Test public void testCheckAndDeleteNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0))).join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0))).join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndDeleteSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0))).join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndDeleteMetaTable() { conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) - .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndMutate() throws IOException { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")).thenMutate(new RowMutations(Bytes.toBytes(0)) - .add((Mutation) new Delete(Bytes.toBytes(0)).setPriority(18))) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")).thenMutate(new RowMutations(Bytes.toBytes(0)) + .add((Mutation) new Delete(Bytes.toBytes(0)).setPriority(18))) + .join(); verify(stub, times(1)).multi(assertPriority(18), any(ClientProtos.MultiRequest.class), any()); } @Test public void testCheckAndMutateNormalTable() throws IOException { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")) - .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")) + .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) + .join(); verify(stub, times(1)).multi(assertPriority(NORMAL_QOS), any(ClientProtos.MultiRequest.class), any()); } @@ -454,10 +454,10 @@ public void testCheckAndMutateNormalTable() throws IOException { @Test public void testCheckAndMutateSystemTable() throws IOException { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")) - .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")) + .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) + .join(); verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS), any(ClientProtos.MultiRequest.class), any()); } @@ -465,9 +465,9 @@ public void testCheckAndMutateSystemTable() throws IOException { @Test public void testCheckAndMutateMetaTable() throws IOException { conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) - .qualifier(Bytes.toBytes("cq")).ifEquals(Bytes.toBytes("v")) - .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) - .join(); + .qualifier(Bytes.toBytes("cq")).ifEquals(Bytes.toBytes("v")) + .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) + .join(); verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS), any(ClientProtos.MultiRequest.class), any()); } @@ -485,28 +485,24 @@ public Void answer(InvocationOnMock invocation) throws Throwable { ScanRequest req = invocation.getArgument(1); RpcCallback done = invocation.getArgument(2); if (!req.hasScannerId()) { - done.run(ScanResponse.newBuilder() - .setScannerId(scannerId).setTtl(800) - .setMoreResultsInRegion(true).setMoreResults(true) - .build()); + done.run(ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) + .setMoreResultsInRegion(true).setMoreResults(true).build()); } else { if (req.hasRenew() && req.getRenew()) { future.complete(null); } assertFalse("close scanner should not come in with scan priority " + scanPriority, - req.hasCloseScanner() && req.getCloseScanner()); + req.hasCloseScanner() && req.getCloseScanner()); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) + Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) + .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) .setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); - done.run( - ScanResponse.newBuilder() - .setScannerId(scannerId).setTtl(800).setMoreResultsInRegion(true) - .setMoreResults(true).addResults(ProtobufUtil.toResult(result)) - .build()); + done.run(ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) + .setMoreResultsInRegion(true).setMoreResults(true) + .addResults(ProtobufUtil.toResult(result)).build()); } }); return null; @@ -518,13 +514,13 @@ public Void answer(InvocationOnMock invocation) throws Throwable { @SuppressWarnings("FutureReturnValueIgnored") @Override public Void answer(InvocationOnMock invocation) throws Throwable { - threadPool.submit(() ->{ + threadPool.submit(() -> { ScanRequest req = invocation.getArgument(1); RpcCallback done = invocation.getArgument(2); assertTrue("close request should have scannerId", req.hasScannerId()); assertEquals("close request's scannerId should match", scannerId, req.getScannerId()); assertTrue("close request should have closerScanner set", - req.hasCloseScanner() && req.getCloseScanner()); + req.hasCloseScanner() && req.getCloseScanner()); done.run(ScanResponse.getDefaultInstance()); }); @@ -549,8 +545,8 @@ public void testScanNormalTable() throws Exception { @Test public void testScanSystemTable() throws Exception { CompletableFuture renewFuture = mockScanReturnRenewFuture(SYSTEMTABLE_QOS); - testForTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName()), - renewFuture, Optional.empty()); + testForTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName()), renewFuture, + Optional.empty()); } @Test @@ -560,7 +556,7 @@ public void testScanMetaTable() throws Exception { } private void testForTable(TableName tableName, CompletableFuture renewFuture, - Optional priority) throws Exception { + Optional priority) throws Exception { Scan scan = new Scan().setCaching(1).setMaxResultSize(1); priority.ifPresent(scan::setPriority); @@ -584,7 +580,7 @@ private void testForTable(TableName tableName, CompletableFuture renewFutu @Test public void testBatchNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())) - .batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); + .batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); verify(stub, times(1)).multi(assertPriority(NORMAL_QOS), any(ClientProtos.MultiRequest.class), any()); } @@ -592,7 +588,7 @@ public void testBatchNormalTable() { @Test public void testBatchSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); + .batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS), any(ClientProtos.MultiRequest.class), any()); } @@ -600,7 +596,7 @@ public void testBatchSystemTable() { @Test public void testBatchMetaTable() { conn.getTable(TableName.META_TABLE_NAME).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))) - .join(); + .join(); verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS), any(ClientProtos.MultiRequest.class), any()); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java index 69cd77668dc7..00042e8d155d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java @@ -38,6 +38,7 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule; @@ -80,8 +81,10 @@ import org.junit.experimental.categories.Category; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; @@ -102,7 +105,7 @@ public class TestAsyncTableTracing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableTracing.class); + HBaseClassTestRule.forClass(TestAsyncTableTracing.class); private static Configuration CONF = HBaseConfiguration.create(); @@ -127,18 +130,18 @@ public Void answer(InvocationOnMock invocation) throws Throwable { RpcCallback done = invocation.getArgument(2); if (!req.hasScannerId()) { done.run(ScanResponse.newBuilder().setScannerId(1).setTtl(800) - .setMoreResultsInRegion(true).setMoreResults(true).build()); + .setMoreResultsInRegion(true).setMoreResults(true).build()); } else { if (req.hasCloseScanner() && req.getCloseScanner()) { done.run(ScanResponse.getDefaultInstance()); } else { Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) - .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) - .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) - .setValue(Bytes.toBytes("v")).build(); + .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) + .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) + .setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); ScanResponse.Builder builder = ScanResponse.newBuilder().setScannerId(1).setTtl(800) - .addResults(ProtobufUtil.toResult(result)); + .addResults(ProtobufUtil.toResult(result)); if (req.getLimitOfRows() == 1) { builder.setMoreResultsInRegion(false).setMoreResults(false); } else { @@ -181,11 +184,11 @@ public Void answer(InvocationOnMock invocation) throws Throwable { ColumnValue value = req.getColumnValue(0); QualifierValue qvalue = value.getQualifierValue(0); Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) - .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) - .setQualifier(qvalue.getQualifier().toByteArray()) - .setValue(qvalue.getValue().toByteArray()).build(); + .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) + .setQualifier(qvalue.getQualifier().toByteArray()) + .setValue(qvalue.getValue().toByteArray()).build(); resp = MutateResponse.newBuilder() - .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); + .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); break; default: resp = MutateResponse.getDefaultInstance(); @@ -206,37 +209,37 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } }).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any()); final User user = UserProvider.instantiate(CONF).getCurrent(); - conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", null, - user) { - - @Override - AsyncRegionLocator getLocator() { - AsyncRegionLocator locator = mock(AsyncRegionLocator.class); - Answer> answer = - new Answer>() { - - @Override - public CompletableFuture answer(InvocationOnMock invocation) - throws Throwable { - TableName tableName = invocation.getArgument(0); - RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); - ServerName serverName = ServerName.valueOf("rs", 16010, 12345); - HRegionLocation loc = new HRegionLocation(info, serverName); - return CompletableFuture.completedFuture(loc); - } - }; - doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), - any(RegionLocateType.class), anyLong()); - doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), - anyInt(), any(RegionLocateType.class), anyLong()); - return locator; - } + conn = + new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", null, user) { + + @Override + AsyncRegionLocator getLocator() { + AsyncRegionLocator locator = mock(AsyncRegionLocator.class); + Answer> answer = + new Answer>() { + + @Override + public CompletableFuture answer(InvocationOnMock invocation) + throws Throwable { + TableName tableName = invocation.getArgument(0); + RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); + ServerName serverName = ServerName.valueOf("rs", 16010, 12345); + HRegionLocation loc = new HRegionLocation(info, serverName); + return CompletableFuture.completedFuture(loc); + } + }; + doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), + any(byte[].class), any(RegionLocateType.class), anyLong()); + doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), + any(byte[].class), anyInt(), any(RegionLocateType.class), anyLong()); + return locator; + } - @Override - ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { - return stub; - } - }; + @Override + ClientService.Interface getRegionServerStub(ServerName serverName) throws IOException { + return stub; + } + }; table = conn.getTable(TableName.valueOf("table"), ForkJoinPool.commonPool()); } @@ -251,26 +254,19 @@ private void assertTrace(String tableOperation) { private void assertTrace(String tableOperation, Matcher matcher) { final TableName tableName = table.getName(); - final Matcher spanLocator = allOf( - hasName(containsString(tableOperation)), hasEnded()); + final Matcher spanLocator = + allOf(hasName(containsString(tableOperation)), hasEnded()); final String expectedName = tableOperation + " " + tableName.getNameWithNamespaceInclAsString(); - Waiter.waitFor(CONF, 1000, new MatcherPredicate<>( - "waiting for span to emit", - () -> traceRule.getSpans(), hasItem(spanLocator))); - List candidateSpans = traceRule.getSpans() - .stream() - .filter(spanLocator::matches) - .collect(Collectors.toList()); + Waiter.waitFor(CONF, 1000, new MatcherPredicate<>("waiting for span to emit", + () -> traceRule.getSpans(), hasItem(spanLocator))); + List candidateSpans = + traceRule.getSpans().stream().filter(spanLocator::matches).collect(Collectors.toList()); assertThat(candidateSpans, hasSize(1)); SpanData data = candidateSpans.iterator().next(); - assertThat(data, allOf( - hasName(expectedName), - hasKind(SpanKind.CLIENT), - hasStatusWithCode(StatusCode.OK), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(tableName), - matcher)); + assertThat(data, + allOf(hasName(expectedName), hasKind(SpanKind.CLIENT), hasStatusWithCode(StatusCode.OK), + buildConnectionAttributesMatcher(conn), buildTableAttributesMatcher(tableName), matcher)); } @Test @@ -308,16 +304,16 @@ public void testAppend() { @Test public void testIncrement() { table - .increment( - new Increment(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1)) - .join(); + .increment( + new Increment(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1)) + .join(); assertTrace("INCREMENT"); } @Test public void testIncrementColumnValue1() { table.incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1) - .join(); + .join(); assertTrace("INCREMENT"); } @@ -331,38 +327,37 @@ public void testIncrementColumnValue2() { @Test public void testCheckAndMutate() { table.checkAndMutate(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0)))).join(); + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0)))).join(); assertTrace("CHECK_AND_MUTATE"); } @Test public void testCheckAndMutateList() { CompletableFuture - .allOf(table.checkAndMutate(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0))))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + .allOf(table.checkAndMutate(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0))))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } @Test public void testCheckAndMutateAll() { table.checkAndMutateAll(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0))))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0))))).join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } private void testCheckAndMutateBuilder(Row op) { AsyncTable.CheckAndMutateBuilder builder = - table.checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) - .qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")); + table.checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")); if (op instanceof Put) { Put put = (Put) op; builder.thenPut(put).join(); @@ -380,8 +375,8 @@ private void testCheckAndMutateBuilder(Row op) { @Test public void testCheckAndMutateBuilderThenPut() { - Put put = new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v")); + Put put = new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), + Bytes.toBytes("v")); testCheckAndMutateBuilder(put); } @@ -392,17 +387,16 @@ public void testCheckAndMutateBuilderThenDelete() { @Test public void testCheckAndMutateBuilderThenMutations() throws IOException { - RowMutations mutations = new RowMutations(Bytes.toBytes(0)) - .add(new Put(Bytes.toBytes(0)) + RowMutations mutations = new RowMutations(Bytes.toBytes(0)).add(new Put(Bytes.toBytes(0)) .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .add(new Delete(Bytes.toBytes(0))); + .add(new Delete(Bytes.toBytes(0))); testCheckAndMutateBuilder(mutations); } private void testCheckAndMutateWithFilterBuilder(Row op) { // use of `PrefixFilter` is completely arbitrary here. AsyncTable.CheckAndMutateWithFilterBuilder builder = - table.checkAndMutate(Bytes.toBytes(0), new PrefixFilter(Bytes.toBytes(0))); + table.checkAndMutate(Bytes.toBytes(0), new PrefixFilter(Bytes.toBytes(0))); if (op instanceof Put) { Put put = (Put) op; builder.thenPut(put).join(); @@ -420,8 +414,8 @@ private void testCheckAndMutateWithFilterBuilder(Row op) { @Test public void testCheckAndMutateWithFilterBuilderThenPut() { - Put put = new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v")); + Put put = new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), + Bytes.toBytes("v")); testCheckAndMutateWithFilterBuilder(put); } @@ -432,19 +426,17 @@ public void testCheckAndMutateWithFilterBuilderThenDelete() { @Test public void testCheckAndMutateWithFilterBuilderThenMutations() throws IOException { - RowMutations mutations = new RowMutations(Bytes.toBytes(0)) - .add(new Put(Bytes.toBytes(0)) + RowMutations mutations = new RowMutations(Bytes.toBytes(0)).add(new Put(Bytes.toBytes(0)) .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .add(new Delete(Bytes.toBytes(0))); + .add(new Delete(Bytes.toBytes(0))); testCheckAndMutateWithFilterBuilder(mutations); } @Test public void testMutateRow() throws IOException { - final RowMutations mutations = new RowMutations(Bytes.toBytes(0)) - .add(new Put(Bytes.toBytes(0)) + final RowMutations mutations = new RowMutations(Bytes.toBytes(0)).add(new Put(Bytes.toBytes(0)) .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .add(new Delete(Bytes.toBytes(0))); + .add(new Delete(Bytes.toBytes(0))); table.mutateRow(mutations).join(); assertTrace("BATCH", hasAttributes( containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE", "PUT"))); @@ -463,19 +455,22 @@ public void testScan() throws Throwable { final AtomicReference throwable = new AtomicReference<>(); final Scan scan = new Scan().setCaching(1).setMaxResultSize(1).setLimit(1); table.scan(scan, new ScanResultConsumer() { - @Override public boolean onNext(Result result) { + @Override + public boolean onNext(Result result) { if (result.getRow() != null) { count.incrementAndGet(); } return true; } - @Override public void onError(Throwable error) { + @Override + public void onError(Throwable error) { throwable.set(error); doneSignal.countDown(); } - @Override public void onComplete() { + @Override + public void onComplete() { doneSignal.countDown(); } }); @@ -506,85 +501,84 @@ public void testGetScanner() { @Test public void testExistsList() { CompletableFuture - .allOf( - table.exists(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + .allOf( + table.exists(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testExistsAll() { table.existsAll(Arrays.asList(new Get(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testGetList() { CompletableFuture - .allOf(table.get(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + .allOf( + table.get(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testGetAll() { table.getAll(Arrays.asList(new Get(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testPutList() { CompletableFuture - .allOf(table.put(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), - Bytes.toBytes("cq"), Bytes.toBytes("v")))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); + .allOf(table.put(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), + Bytes.toBytes("cq"), Bytes.toBytes("v")))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); } @Test public void testPutAll() { table.putAll(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); } @Test public void testDeleteList() { - CompletableFuture - .allOf( - table.delete(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + CompletableFuture.allOf( + table.delete(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testDeleteAll() { table.deleteAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testBatch() { - CompletableFuture - .allOf( - table.batch(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + CompletableFuture.allOf( + table.batch(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testBatchAll() { table.batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java index 15d5104730a4..a962cf7c91f2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,13 +27,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestAttributes { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAttributes.class); - private static final byte [] ROW = new byte [] {'r'}; + private static final byte[] ROW = new byte[] { 'r' }; + @Test public void testPutAttributes() { Put put = new Put(ROW); @@ -48,22 +49,22 @@ public void testPutAttributes() { put.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttribute("attribute1"))); Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - put.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value1"), put.getAttributesMap().get("attribute1"))); // overriding attribute value put.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), put.getAttribute("attribute1"))); Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - put.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), put.getAttributesMap().get("attribute1"))); // adding another attribute put.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttribute("attribute2"))); Assert.assertEquals(2, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - put.getAttributesMap().get("attribute2"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value2"), put.getAttributesMap().get("attribute2"))); // removing attribute put.setAttribute("attribute2", null); @@ -86,7 +87,7 @@ public void testPutAttributes() { @Test public void testDeleteAttributes() { - Delete del = new Delete(new byte [] {'r'}); + Delete del = new Delete(new byte[] { 'r' }); Assert.assertTrue(del.getAttributesMap().isEmpty()); Assert.assertNull(del.getAttribute("absent")); @@ -98,22 +99,22 @@ public void testDeleteAttributes() { del.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttribute("attribute1"))); Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - del.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value1"), del.getAttributesMap().get("attribute1"))); // overriding attribute value del.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), del.getAttribute("attribute1"))); Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - del.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), del.getAttributesMap().get("attribute1"))); // adding another attribute del.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttribute("attribute2"))); Assert.assertEquals(2, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - del.getAttributesMap().get("attribute2"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value2"), del.getAttributesMap().get("attribute2"))); // removing attribute del.setAttribute("attribute2", null); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java index 73953d0db75e..f30b14e0b0ba 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -98,15 +98,14 @@ public Future submit(Runnable task) { } @Override - public List> invokeAll( - Collection> tasks) throws InterruptedException { + public List> invokeAll(Collection> tasks) + throws InterruptedException { return null; } @Override - public List> invokeAll( - Collection> tasks, long timeout, TimeUnit unit) - throws InterruptedException { + public List> invokeAll(Collection> tasks, long timeout, + TimeUnit unit) throws InterruptedException { return null; } @@ -117,8 +116,7 @@ public T invokeAny(Collection> tasks) } @Override - public T invokeAny(Collection> tasks, - long timeout, TimeUnit unit) + public T invokeAny(Collection> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return null; } @@ -129,8 +127,8 @@ public T invokeAny(Collection> tasks, */ private static class MockExceptionListener implements BufferedMutator.ExceptionListener { @Override - public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator mutator) throws RetriesExhaustedWithDetailsException { + public void onException(RetriesExhaustedWithDetailsException exception, BufferedMutator mutator) + throws RetriesExhaustedWithDetailsException { } } @@ -141,13 +139,9 @@ public void testClone() { BufferedMutatorParams bmp = new BufferedMutatorParams(TableName.valueOf(tableName)); BufferedMutator.ExceptionListener listener = new MockExceptionListener(); - bmp - .writeBufferSize(17) - .setWriteBufferPeriodicFlushTimeoutMs(123) - .setWriteBufferPeriodicFlushTimerTickMs(456) - .maxKeyValueSize(13) - .pool(pool) - .listener(listener); + bmp.writeBufferSize(17).setWriteBufferPeriodicFlushTimeoutMs(123) + .setWriteBufferPeriodicFlushTimerTickMs(456).maxKeyValueSize(13).pool(pool) + .listener(listener); bmp.implementationClassName("someClassName"); BufferedMutatorParams clone = bmp.clone(); @@ -175,16 +169,14 @@ public void testClone() { * @param some some instance * @param clone a clone of that instance, but not the same instance. */ - private void cloneTest(BufferedMutatorParams some, - BufferedMutatorParams clone) { + private void cloneTest(BufferedMutatorParams some, BufferedMutatorParams clone) { assertFalse(some == clone); - assertEquals(some.getTableName().toString(), - clone.getTableName().toString()); + assertEquals(some.getTableName().toString(), clone.getTableName().toString()); assertEquals(some.getWriteBufferSize(), clone.getWriteBufferSize()); assertEquals(some.getWriteBufferPeriodicFlushTimeoutMs(), - clone.getWriteBufferPeriodicFlushTimeoutMs()); + clone.getWriteBufferPeriodicFlushTimeoutMs()); assertEquals(some.getWriteBufferPeriodicFlushTimerTickMs(), - clone.getWriteBufferPeriodicFlushTimerTickMs()); + clone.getWriteBufferPeriodicFlushTimerTickMs()); assertEquals(some.getMaxKeyValueSize(), clone.getMaxKeyValueSize()); assertTrue(some.getListener() == clone.getListener()); assertTrue(some.getPool() == clone.getPool()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java index 0df04b8043f8..b2e9abfc2c5e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestClientExponentialBackoff { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -67,8 +67,8 @@ public void testMaxLoad() { ServerStatistics stats = new ServerStatistics(); update(stats, 100); - assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoff.getBackoffTime(server, - regionname, stats)); + assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, + backoff.getBackoffTime(server, regionname, stats)); // another policy with a different max timeout long max = 100; @@ -78,20 +78,20 @@ public void testMaxLoad() { // test beyond 100 still doesn't exceed the max update(stats, 101); - assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoff.getBackoffTime(server, - regionname, stats)); + assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, + backoff.getBackoffTime(server, regionname, stats)); assertEquals(max, backoffShortTimeout.getBackoffTime(server, regionname, stats)); // and that when we are below 100, its less than the max timeout update(stats, 99); - assertTrue(backoff.getBackoffTime(server, - regionname, stats) < ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); + assertTrue(backoff.getBackoffTime(server, regionname, + stats) < ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); assertTrue(backoffShortTimeout.getBackoffTime(server, regionname, stats) < max); } /** - * Make sure that we get results in the order that we expect - backoff for a load of 1 should - * less than backoff for 10, which should be less than that for 50. + * Make sure that we get results in the order that we expect - backoff for a load of 1 should less + * than backoff for 10, which should be less than that for 50. */ @Test public void testResultOrdering() { @@ -105,9 +105,9 @@ public void testResultOrdering() { for (int i = 1; i <= 100; i++) { update(stats, i); long next = backoff.getBackoffTime(server, regionname, stats); - assertTrue( - "Previous backoff time" + previous + " >= " + next + ", the next backoff time for " + - "load " + i, previous < next); + assertTrue("Previous backoff time" + previous + " >= " + next + ", the next backoff time for " + + "load " + i, + previous < next); previous = next; } } @@ -151,8 +151,7 @@ public void testCompactionPressurePolicy() { long previous = backoffTime; update(stats, 0, 0, 50); backoffTime = backoff.getBackoffTime(server, regionname, stats); - assertTrue("Compaction pressure should be bigger", - backoffTime > previous); + assertTrue("Compaction pressure should be bigger", backoffTime > previous); update(stats, 0, 0, 100); backoffTime = backoff.getBackoffTime(server, regionname, stats); @@ -161,18 +160,16 @@ public void testCompactionPressurePolicy() { } private void update(ServerStatistics stats, int load) { - ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder() - .setMemStoreLoad(load).build(); + ClientProtos.RegionLoadStats stat = + ClientProtos.RegionLoadStats.newBuilder().setMemStoreLoad(load).build(); stats.update(regionname, ProtobufUtil.createRegionLoadStats(stat)); } private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy, - int compactionPressure) { - ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder() - .setMemStoreLoad(memstoreLoad) - .setHeapOccupancy(heapOccupancy) - .setCompactionPressure(compactionPressure) - .build(); + int compactionPressure) { + ClientProtos.RegionLoadStats stat = + ClientProtos.RegionLoadStats.newBuilder().setMemStoreLoad(memstoreLoad) + .setHeapOccupancy(heapOccupancy).setCompactionPressure(compactionPressure).build(); stats.update(regionname, ProtobufUtil.createRegionLoadStats(stat)); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java index ea9a36171d49..37c935ba47af 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,6 +22,7 @@ import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; +import java.util.Map; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; @@ -40,19 +41,18 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -import java.util.Map; @Category({ MiscTests.class, SmallTests.class }) public class TestColumnFamilyDescriptorBuilder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestColumnFamilyDescriptorBuilder.class); + HBaseClassTestRule.forClass(TestColumnFamilyDescriptorBuilder.class); @Test public void testBuilder() throws DeserializationException { ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY).setInMemory(true) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE); + ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY).setInMemory(true) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE); final int v = 123; builder.setBlocksize(v); builder.setTimeToLive(v); @@ -109,7 +109,7 @@ public void testHColumnDescriptorShouldThrowIAEWhenFamilyNameEmpty() throws Exce @Test public void testAddGetRemoveConfiguration() { ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); String key = "Some"; String value = "value"; builder.setConfiguration(key, value); @@ -131,7 +131,7 @@ public void testMobValuesInHColumnDescriptorShouldReadable() { ColumnFamilyDescriptorBuilder.getUnit(ColumnFamilyDescriptorBuilder.MOB_THRESHOLD)); String policyString = PrettyPrinter.format(Bytes.toStringBinary(Bytes.toBytes(policy)), ColumnFamilyDescriptorBuilder - .getUnit(ColumnFamilyDescriptorBuilder.MOB_COMPACT_PARTITION_POLICY)); + .getUnit(ColumnFamilyDescriptorBuilder.MOB_COMPACT_PARTITION_POLICY)); assertEquals(String.valueOf(isMob), isMobString); assertEquals(String.valueOf(threshold), thresholdString); assertEquals(String.valueOf(policy), policyString); @@ -153,7 +153,7 @@ public void testClassMethodsAreBuilderStyle() { public void testSetTimeToLive() throws HBaseException { String ttl; ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); ttl = "50000"; builder.setTimeToLive(ttl); @@ -188,7 +188,7 @@ public void testSetTimeToLive() throws HBaseException { public void testSetBlocksize() throws HBaseException { String blocksize; ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); blocksize = "131072"; builder.setBlocksize(blocksize); @@ -244,7 +244,7 @@ public void testDefaultBuilder() { @Test public void testSetEmptyValue() { ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY); + ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY); String testConf = "TestConfiguration"; String testValue = "TestValue"; // test set value diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java index ac8aed866e68..7d62d5e129a0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,10 +43,10 @@ public class TestColumnFamilyDescriptorLowerCaseEnum { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestColumnFamilyDescriptorLowerCaseEnum.class); + HBaseClassTestRule.forClass(TestColumnFamilyDescriptorLowerCaseEnum.class); private static final Logger LOG = - LoggerFactory.getLogger(TestColumnFamilyDescriptorLowerCaseEnum.class); + LoggerFactory.getLogger(TestColumnFamilyDescriptorLowerCaseEnum.class); private Method getSetMethod(Method getMethod, Class enumType) throws NoSuchMethodException { String methodName = getMethod.getName().replaceFirst("get", "set"); @@ -71,7 +71,7 @@ public void test() throws IllegalAccessException, InvocationTargetException, NoSuchMethodException { Map> getMethod2Value = new HashMap<>(); ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")); for (Method method : ColumnFamilyDescriptor.class.getMethods()) { if (method.getParameterCount() == 0 && method.getReturnType().isEnum()) { LOG.info("Checking " + method); @@ -85,7 +85,7 @@ public void test() } ColumnFamilyDescriptor desc = builder.build(); ColumnFamilyDescriptorBuilder builder2 = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test2")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test2")); desc.getValues().forEach((k, v) -> { LOG.info(k.toString() + "=>" + v.toString()); String str = Bytes.toString(v.get(), v.getOffset(), v.getLength()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java index 561b1f5715fd..c29724d4048c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public class TestConnectionRegistryLeak { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionRegistryLeak.class); + HBaseClassTestRule.forClass(TestConnectionRegistryLeak.class); public static final class ConnectionRegistryForTest extends DoNothingConnectionRegistry { @@ -72,7 +72,7 @@ public void close() { @BeforeClass public static void setUp() { CONF.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - ConnectionRegistryForTest.class, ConnectionRegistry.class); + ConnectionRegistryForTest.class, ConnectionRegistry.class); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java index b288f98f1f92..6e64e1ec9782 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ public class TestCoprocessorDescriptor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCoprocessorDescriptor.class); + HBaseClassTestRule.forClass(TestCoprocessorDescriptor.class); private static final Logger LOG = LoggerFactory.getLogger(TestCoprocessorDescriptor.class); @@ -55,9 +55,8 @@ public void testBuild() { int priority = 100; String propertyKey = "propertyKey"; String propertyValue = "propertyValue"; - CoprocessorDescriptor cp = - CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path).setPriority(priority) - .setProperty(propertyKey, propertyValue).build(); + CoprocessorDescriptor cp = CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) + .setPriority(priority).setProperty(propertyKey, propertyValue).build(); assertEquals(className, cp.getClassName()); assertEquals(path, cp.getJarPath().get()); assertEquals(priority, cp.getPriority()); @@ -73,13 +72,11 @@ public void testSetCoprocessor() throws IOException { String path = "path"; int priority = Math.abs(className.hashCode()); String propertyValue = "propertyValue"; - cps.add( - CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path).setPriority(priority) - .setProperty(propertyKey, propertyValue).build()); + cps.add(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) + .setPriority(priority).setProperty(propertyKey, propertyValue).build()); } - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setCoprocessors(cps).build(); + TableDescriptor tableDescriptor = TableDescriptorBuilder + .newBuilder(TableName.valueOf(name.getMethodName())).setCoprocessors(cps).build(); for (CoprocessorDescriptor cp : cps) { boolean match = false; for (CoprocessorDescriptor that : tableDescriptor.getCoprocessorDescriptors()) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java index e855055fd889..50a51ad9f249 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestDeleteTimeStamp { @ClassRule diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java index b52ad3e8d2a4..3a646088f221 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,43 +52,42 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; // TODO: cover more test cases -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestGet { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGet.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestGet.class); - private static final byte [] ROW = new byte [] {'r'}; + private static final byte[] ROW = new byte[] { 'r' }; private static final String PB_GET = "CgNyb3ciEwoPdGVzdC5Nb2NrRmlsdGVyEgAwATgB"; private static final String PB_GET_WITH_FILTER_LIST = - "CgFyIosBCilvcmcuYXBhY2hlLmhhZG9vcC5oYmFzZS5maWx0ZXIuRmlsdGVyTGlzdBJeCAESEwoP" + - "dGVzdC5Nb2NrRmlsdGVyEgASEQoNbXkuTW9ja0ZpbHRlchIAEjIKLG9yZy5hcGFjaGUuaGFkb29w" + - "LmhiYXNlLmZpbHRlci5LZXlPbmx5RmlsdGVyEgIIADABOAE="; + "CgFyIosBCilvcmcuYXBhY2hlLmhhZG9vcC5oYmFzZS5maWx0ZXIuRmlsdGVyTGlzdBJeCAESEwoP" + + "dGVzdC5Nb2NrRmlsdGVyEgASEQoNbXkuTW9ja0ZpbHRlchIAEjIKLG9yZy5hcGFjaGUuaGFkb29w" + + "LmhiYXNlLmZpbHRlci5LZXlPbmx5RmlsdGVyEgIIADABOAE="; private static final String MOCK_FILTER_JAR = - "UEsDBBQACAgIANWDlEMAAAAAAAAAAAAAAAAJAAQATUVUQS1JTkYv/soAAAMAUEsHCAAAAAACAAAA" + - "AAAAAFBLAwQUAAgICADVg5RDAAAAAAAAAAAAAAAAFAAAAE1FVEEtSU5GL01BTklGRVNULk1G803M" + - "y0xLLS7RDUstKs7Mz7NSMNQz4OVyLkpNLElN0XWqBAmY6xnEG1gqaPgXJSbnpCo45xcV5BcllgCV" + - "a/Jy8XIBAFBLBwgxyqRbQwAAAEQAAABQSwMEFAAICAgAUoOUQwAAAAAAAAAAAAAAABMAAABteS9N" + - "b2NrRmlsdGVyLmNsYXNzdZHPTsJAEMa/LYVCRVFQMd68gQc38YrxUJUTetGQGE7bstrVwjbbYsSn" + - "0hOJJj6AD2WcFoP/4iYzX+bb32xmd9/en18B7GPLhY11BxsurEw3GUoHaqzSQ4ZCq91nsI/0UDLU" + - "emoszyYjX5oL4Ufk1Hs6EFFfGJXVn6adhirJ6NGUn+rgtquiVJoOQyUWJpFdo0cMjdbAa/8hnNj3" + - "pqmkbmvgMbgn94GMU6XHiYMm1ed6YgJJeDbNV+fejbgTVRRRYlj+cSZDW5trLmIRhJKHYqh1zENf" + - "JJJf5QCfcx45DJ3/WLmYgx/LRNJ1I/UgMmMxIXbo9WxkywLLZqHsUMVJGWlxdwb2lG+XKZdys4kK" + - "5eocgIsl0grVy0Q5+e9Y+V75BdblDIXHX/3b3/rLWEGNdJXCJmeNop7zjQ9QSwcI1kzyMToBAADs" + - "AQAAUEsDBBQACAgIAFKDlEMAAAAAAAAAAAAAAAAVAAAAdGVzdC9Nb2NrRmlsdGVyLmNsYXNzdVHB" + - "TsJAFJwthUJFERQx3ryBBzfxivFQlRN60ZAYTtuy2tXCNtti1K/SE4kmfoAfZXwtBg3RTd6bzOy8" + - "zezux+frO4ADbLuwsemg6cLKcIuhdKgmKj1iKLQ7Awb7WI8kQ62vJvJ8OvaluRR+REqjrwMRDYRR" + - "Gf8W7TRUCUO9n8ok5Wc6uOupKJWmy1CJhUlkz+gxQ7M99Dp/eJzY9x5JZrCGHoN7+hDIOFV6kjho" + - "Eb/QUxNIsmeJfib3b8W9qKKIEslLpzJ0tLnhIhZBKHkoRlrHPPRFIvl1buBzn0cKQ/c/r1wk4Scy" + - "kXTpSD2JTFhkxC69oY1sWWBZGuoOMU7ICIt7M7CXfLtMvZSLLVSoV+cGuFghrBBfJZeT/5GV75Xf" + - "YF3NUHhemt/5NV/GGmqE61Q2KXWqRu7f+AJQSwcIrS5nKDoBAADyAQAAUEsBAhQAFAAICAgA1YOU" + - "QwAAAAACAAAAAAAAAAkABAAAAAAAAAAAAAAAAAAAAE1FVEEtSU5GL/7KAABQSwECFAAUAAgICADV" + - "g5RDMcqkW0MAAABEAAAAFAAAAAAAAAAAAAAAAAA9AAAATUVUQS1JTkYvTUFOSUZFU1QuTUZQSwEC" + - "FAAUAAgICABSg5RD1kzyMToBAADsAQAAEwAAAAAAAAAAAAAAAADCAAAAbXkvTW9ja0ZpbHRlci5j" + - "bGFzc1BLAQIUABQACAgIAFKDlEOtLmcoOgEAAPIBAAAVAAAAAAAAAAAAAAAAAD0CAAB0ZXN0L01v" + - "Y2tGaWx0ZXIuY2xhc3NQSwUGAAAAAAQABAABAQAAugMAAAAA"; + "UEsDBBQACAgIANWDlEMAAAAAAAAAAAAAAAAJAAQATUVUQS1JTkYv/soAAAMAUEsHCAAAAAACAAAA" + + "AAAAAFBLAwQUAAgICADVg5RDAAAAAAAAAAAAAAAAFAAAAE1FVEEtSU5GL01BTklGRVNULk1G803M" + + "y0xLLS7RDUstKs7Mz7NSMNQz4OVyLkpNLElN0XWqBAmY6xnEG1gqaPgXJSbnpCo45xcV5BcllgCV" + + "a/Jy8XIBAFBLBwgxyqRbQwAAAEQAAABQSwMEFAAICAgAUoOUQwAAAAAAAAAAAAAAABMAAABteS9N" + + "b2NrRmlsdGVyLmNsYXNzdZHPTsJAEMa/LYVCRVFQMd68gQc38YrxUJUTetGQGE7bstrVwjbbYsSn" + + "0hOJJj6AD2WcFoP/4iYzX+bb32xmd9/en18B7GPLhY11BxsurEw3GUoHaqzSQ4ZCq91nsI/0UDLU" + + "emoszyYjX5oL4Ufk1Hs6EFFfGJXVn6adhirJ6NGUn+rgtquiVJoOQyUWJpFdo0cMjdbAa/8hnNj3" + + "pqmkbmvgMbgn94GMU6XHiYMm1ed6YgJJeDbNV+fejbgTVRRRYlj+cSZDW5trLmIRhJKHYqh1zENf" + + "JJJf5QCfcx45DJ3/WLmYgx/LRNJ1I/UgMmMxIXbo9WxkywLLZqHsUMVJGWlxdwb2lG+XKZdys4kK" + + "5eocgIsl0grVy0Q5+e9Y+V75BdblDIXHX/3b3/rLWEGNdJXCJmeNop7zjQ9QSwcI1kzyMToBAADs" + + "AQAAUEsDBBQACAgIAFKDlEMAAAAAAAAAAAAAAAAVAAAAdGVzdC9Nb2NrRmlsdGVyLmNsYXNzdVHB" + + "TsJAFJwthUJFERQx3ryBBzfxivFQlRN60ZAYTtuy2tXCNtti1K/SE4kmfoAfZXwtBg3RTd6bzOy8" + + "zezux+frO4ADbLuwsemg6cLKcIuhdKgmKj1iKLQ7Awb7WI8kQ62vJvJ8OvaluRR+REqjrwMRDYRR" + + "Gf8W7TRUCUO9n8ok5Wc6uOupKJWmy1CJhUlkz+gxQ7M99Dp/eJzY9x5JZrCGHoN7+hDIOFV6kjho" + + "Eb/QUxNIsmeJfib3b8W9qKKIEslLpzJ0tLnhIhZBKHkoRlrHPPRFIvl1buBzn0cKQ/c/r1wk4Scy" + + "kXTpSD2JTFhkxC69oY1sWWBZGuoOMU7ICIt7M7CXfLtMvZSLLVSoV+cGuFghrBBfJZeT/5GV75Xf" + + "YF3NUHhemt/5NV/GGmqE61Q2KXWqRu7f+AJQSwcIrS5nKDoBAADyAQAAUEsBAhQAFAAICAgA1YOU" + + "QwAAAAACAAAAAAAAAAkABAAAAAAAAAAAAAAAAAAAAE1FVEEtSU5GL/7KAABQSwECFAAUAAgICADV" + + "g5RDMcqkW0MAAABEAAAAFAAAAAAAAAAAAAAAAAA9AAAATUVUQS1JTkYvTUFOSUZFU1QuTUZQSwEC" + + "FAAUAAgICABSg5RD1kzyMToBAADsAQAAEwAAAAAAAAAAAAAAAADCAAAAbXkvTW9ja0ZpbHRlci5j" + + "bGFzc1BLAQIUABQACAgIAFKDlEOtLmcoOgEAAPIBAAAVAAAAAAAAAAAAAAAAAD0CAAB0ZXN0L01v" + + "Y2tGaWx0ZXIuY2xhc3NQSwUGAAAAAAQABAABAQAAugMAAAAA"; @Test public void testAttributesSerialization() throws IOException { @@ -121,22 +120,22 @@ public void testGetAttributes() { get.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttribute("attribute1"))); Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - get.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value1"), get.getAttributesMap().get("attribute1"))); // overriding attribute value get.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), get.getAttribute("attribute1"))); Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - get.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), get.getAttributesMap().get("attribute1"))); // adding another attribute get.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttribute("attribute2"))); Assert.assertEquals(2, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - get.getAttributesMap().get("attribute2"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value2"), get.getAttributesMap().get("attribute2"))); // removing attribute get.setAttribute("attribute2", null); @@ -209,16 +208,14 @@ public void TestGetRowFromGetCopyConstructor() throws Exception { @Test public void testDynamicFilter() throws Exception { Configuration conf = HBaseConfiguration.create(); - String localPath = conf.get("hbase.local.dir") - + File.separator + "jars" + File.separator; + String localPath = conf.get("hbase.local.dir") + File.separator + "jars" + File.separator; File jarFile = new File(localPath, "MockFilter.jar"); jarFile.delete(); assertFalse("Should be deleted: " + jarFile.getPath(), jarFile.exists()); - ClientProtos.Get getProto1 = - ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET)); + ClientProtos.Get getProto1 = ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET)); ClientProtos.Get getProto2 = - ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET_WITH_FILTER_LIST)); + ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET_WITH_FILTER_LIST)); try { ProtobufUtil.toGet(getProto1); fail("Should not be able to load the filter class"); @@ -230,9 +227,8 @@ public void testDynamicFilter() throws Exception { fail("Should not be able to load the filter class"); } catch (IOException ioe) { assertTrue(ioe.getCause() instanceof InvocationTargetException); - InvocationTargetException ite = (InvocationTargetException)ioe.getCause(); - assertTrue(ite.getTargetException() - instanceof DeserializationException); + InvocationTargetException ite = (InvocationTargetException) ioe.getCause(); + assertTrue(ite.getTargetException() instanceof DeserializationException); } FileOutputStream fos = new FileOutputStream(jarFile); fos.write(Base64.getDecoder().decode(MOCK_FILTER_JAR)); @@ -243,7 +239,7 @@ public void testDynamicFilter() throws Exception { Get get2 = ProtobufUtil.toGet(getProto2); assertTrue(get2.getFilter() instanceof FilterList); - List filters = ((FilterList)get2.getFilter()).getFilters(); + List filters = ((FilterList) get2.getFilter()).getFilters(); assertEquals(3, filters.size()); assertEquals("test.MockFilter", filters.get(0).getClass().getName()); assertEquals("my.MockFilter", filters.get(1).getClass().getName()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java index 7a36696d1544..b8b663351a7c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertArrayEquals; @@ -51,7 +49,7 @@ public class TestImmutableScan { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImmutableScan.class); + HBaseClassTestRule.forClass(TestImmutableScan.class); private static final Logger LOG = LoggerFactory.getLogger(TestImmutableScan.class); @@ -60,36 +58,18 @@ public void testScanCopyConstructor() throws Exception { Scan scan = new Scan(); scan.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q")) - .setACL("test_user2", new Permission(Permission.Action.READ)) - .setAllowPartialResults(true) - .setAsyncPrefetch(false) - .setAttribute("test_key", Bytes.toBytes("test_value")) - .setAuthorizations(new Authorizations("test_label")) - .setBatch(10) - .setCacheBlocks(false) - .setCaching(10) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("scan_copy_constructor") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLimit(100) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultSize(100) - .setMaxResultsPerColumnFamily(1000) - .readVersions(9999) - .setMvccReadPoint(5) - .setNeedCursorResult(true) - .setPriority(1) - .setRaw(true) - .setReplicaId(3) - .setReversed(true) - .setRowOffsetPerColumnFamily(5) - .setStartStopRowForPrefixScan(Bytes.toBytes("row_")) - .setScanMetricsEnabled(true) - .setReadType(Scan.ReadType.STREAM) - .withStartRow(Bytes.toBytes("row_1")) - .withStopRow(Bytes.toBytes("row_2")) - .setTimeRange(0, 13); + .setACL("test_user2", new Permission(Permission.Action.READ)).setAllowPartialResults(true) + .setAsyncPrefetch(false).setAttribute("test_key", Bytes.toBytes("test_value")) + .setAuthorizations(new Authorizations("test_label")).setBatch(10).setCacheBlocks(false) + .setCaching(10).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("scan_copy_constructor").setIsolationLevel(IsolationLevel.READ_COMMITTED) + .setLimit(100).setLoadColumnFamiliesOnDemand(false).setMaxResultSize(100) + .setMaxResultsPerColumnFamily(1000).readVersions(9999).setMvccReadPoint(5) + .setNeedCursorResult(true).setPriority(1).setRaw(true).setReplicaId(3).setReversed(true) + .setRowOffsetPerColumnFamily(5).setStartStopRowForPrefixScan(Bytes.toBytes("row_")) + .setScanMetricsEnabled(true).setReadType(Scan.ReadType.STREAM) + .withStartRow(Bytes.toBytes("row_1")).withStopRow(Bytes.toBytes("row_2")) + .setTimeRange(0, 13); // create a copy of existing scan object Scan scanCopy = new ImmutableScan(scan); @@ -210,8 +190,7 @@ private void testUnmodifiableSetters(Scan scanCopy) throws IOException { scanCopy.setCaching(1); throw new RuntimeException("Should not reach here"); } catch (UnsupportedOperationException e) { - assertEquals("ImmutableScan does not allow access to setCaching", - e.getMessage()); + assertEquals("ImmutableScan does not allow access to setCaching", e.getMessage()); } try { scanCopy.setLoadColumnFamiliesOnDemand(true); @@ -302,8 +281,7 @@ private void testUnmodifiableSetters(Scan scanCopy) throws IOException { scanCopy.setAllowPartialResults(true); throw new RuntimeException("Should not reach here"); } catch (UnsupportedOperationException e) { - assertEquals("ImmutableScan does not allow access to setAllowPartialResults", - e.getMessage()); + assertEquals("ImmutableScan does not allow access to setAllowPartialResults", e.getMessage()); } try { scanCopy.setId("id"); @@ -386,8 +364,7 @@ private static boolean isGetter(Method method) { || method.getName().startsWith("set")) { return false; } - return !void.class.equals(method.getReturnType()) - && !Scan.class.equals(method.getReturnType()); + return !void.class.equals(method.getReturnType()) && !Scan.class.equals(method.getReturnType()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java index 75bad5ea416f..f63ffc2fdefa 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestIncrement { @ClassRule @@ -39,17 +39,17 @@ public class TestIncrement { @Test public void testIncrementInstance() { final long expected = 13; - Increment inc = new Increment(new byte [] {'r'}); + Increment inc = new Increment(new byte[] { 'r' }); int total = 0; for (int i = 0; i < 2; i++) { - byte [] bytes = Bytes.toBytes(i); + byte[] bytes = Bytes.toBytes(i); inc.addColumn(bytes, bytes, expected); total++; } - Map> familyMapOfLongs = inc.getFamilyMapOfLongs(); + Map> familyMapOfLongs = inc.getFamilyMapOfLongs(); int found = 0; - for (Map.Entry> entry: familyMapOfLongs.entrySet()) { - for (Map.Entry e: entry.getValue().entrySet()) { + for (Map.Entry> entry : familyMapOfLongs.entrySet()) { + for (Map.Entry e : entry.getValue().entrySet()) { assertEquals(expected, e.getValue().longValue()); found++; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java index 3c8b04dde174..52bf8d9c1253 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestInterfaceAlign { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestInterfaceAlign.class); + HBaseClassTestRule.forClass(TestInterfaceAlign.class); /** * Test methods name match up @@ -78,8 +78,8 @@ public void testAdminWithAsyncAdmin() { private List getMethodNames(Class c) { // DON'T use the getDeclaredMethods as we want to check the Public APIs only. return Arrays.asList(c.getMethods()).stream().filter(m -> !isDeprecated(m)) - .filter(m -> !Modifier.isStatic(m.getModifiers())).map(Method::getName).distinct() - .collect(Collectors.toList()); + .filter(m -> !Modifier.isStatic(m.getModifiers())).map(Method::getName).distinct() + .collect(Collectors.toList()); } private boolean isDeprecated(Method method) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java index d48806def23d..e76ff892ae45 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -@Category({ClientTests.class, MetricsTests.class, SmallTests.class}) +@Category({ ClientTests.class, MetricsTests.class, SmallTests.class }) public class TestMetricsConnection { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -56,7 +56,8 @@ public class TestMetricsConnection { private static MetricsConnection METRICS; private static final ThreadPoolExecutor BATCH_POOL = - (ThreadPoolExecutor) Executors.newFixedThreadPool(2); + (ThreadPoolExecutor) Executors.newFixedThreadPool(2); + @BeforeClass public static void beforeClass() { METRICS = new MetricsConnection("mocked-connection", () -> BATCH_POOL, () -> null); @@ -70,71 +71,54 @@ public static void afterClass() { @Test public void testStaticMetrics() throws IOException { final byte[] foo = Bytes.toBytes("foo"); - final RegionSpecifier region = RegionSpecifier.newBuilder() - .setValue(ByteString.EMPTY) - .setType(RegionSpecifierType.REGION_NAME) - .build(); + final RegionSpecifier region = RegionSpecifier.newBuilder().setValue(ByteString.EMPTY) + .setType(RegionSpecifierType.REGION_NAME).build(); final int loop = 5; for (int i = 0; i < loop; i++) { - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Get"), - GetRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Scan"), - ScanRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Multi"), - MultiRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.APPEND, new Append(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, new Delete(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, new Increment(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.PUT, new Put(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Get"), + GetRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Scan"), + ScanRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Multi"), + MultiRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.APPEND, new Append(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, new Delete(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, new Increment(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.PUT, new Put(foo))).setRegion(region) + .build(), + MetricsConnection.newCallStats()); } - for (String method: new String[]{"Get", "Scan", "Mutate"}) { + for (String method : new String[] { "Get", "Scan", "Mutate" }) { final String metricKey = "rpcCount_" + ClientService.getDescriptor().getName() + "_" + method; final long metricVal = METRICS.rpcCounters.get(metricKey).getCount(); assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal >= loop); } - for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] { - METRICS.getTracker, METRICS.scanTracker, METRICS.multiTracker, METRICS.appendTracker, - METRICS.deleteTracker, METRICS.incrementTracker, METRICS.putTracker - }) { + for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] { METRICS.getTracker, + METRICS.scanTracker, METRICS.multiTracker, METRICS.appendTracker, METRICS.deleteTracker, + METRICS.incrementTracker, METRICS.putTracker }) { assertEquals("Failed to invoke callTimer on " + t, loop, t.callTimer.getCount()); assertEquals("Failed to invoke reqHist on " + t, loop, t.reqHist.getCount()); assertEquals("Failed to invoke respHist on " + t, loop, t.respHist.getCount()); } - RatioGauge executorMetrics = (RatioGauge) METRICS.getMetricRegistry() - .getMetrics().get(METRICS.getExecutorPoolName()); - RatioGauge metaMetrics = (RatioGauge) METRICS.getMetricRegistry() - .getMetrics().get(METRICS.getMetaPoolName()); + RatioGauge executorMetrics = + (RatioGauge) METRICS.getMetricRegistry().getMetrics().get(METRICS.getExecutorPoolName()); + RatioGauge metaMetrics = + (RatioGauge) METRICS.getMetricRegistry().getMetrics().get(METRICS.getMetaPoolName()); assertEquals(Ratio.of(0, 3).getValue(), executorMetrics.getValue(), 0); assertEquals(Double.NaN, metaMetrics.getValue(), 0); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java index 99699a4fea6f..c197eb35decc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,20 +50,16 @@ public void testAppendCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Type.Put) - .setValue(Bytes.toBytes(100)) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Type.Put) + .setValue(Bytes.toBytes(100)).build()); origin.addColumn(family, Bytes.toBytes("q0"), Bytes.toBytes("value")); origin.setTimeRange(100, 1000); Append clone = new Append(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes("value")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -73,20 +69,16 @@ public void testIncrementCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(100)) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Cell.Type.Put) + .setValue(Bytes.toBytes(100)).build()); origin.addColumn(family, Bytes.toBytes("q0"), 4); origin.setTimeRange(100, 1000); Increment clone = new Increment(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q1"), 3); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -96,12 +88,8 @@ public void testDeleteCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Type.Delete) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Type.Delete).build()); origin.addColumn(family, Bytes.toBytes("q0")); origin.addColumns(family, Bytes.toBytes("q1")); origin.addFamily(family); @@ -111,7 +99,7 @@ public void testDeleteCopyConstructor() throws IOException { assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q3")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -121,20 +109,16 @@ public void testPutCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes("value")) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Cell.Type.Put) + .setValue(Bytes.toBytes("value")).build()); origin.addColumn(family, Bytes.toBytes("q0"), Bytes.toBytes("V-01")); origin.addColumn(family, Bytes.toBytes("q1"), 100, Bytes.toBytes("V-01")); Put clone = new Put(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q2"), Bytes.toBytes("V-02")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -160,10 +144,10 @@ private void assertEquals(Mutation origin, Mutation clone) { Assert.assertEquals(origin.getTimestamp(), clone.getTimestamp()); Assert.assertEquals(origin.getPriority(), clone.getPriority()); if (origin instanceof Append) { - assertEquals(((Append)origin).getTimeRange(), ((Append)clone).getTimeRange()); + assertEquals(((Append) origin).getTimeRange(), ((Append) clone).getTimeRange()); } if (origin instanceof Increment) { - assertEquals(((Increment)origin).getTimeRange(), ((Increment)clone).getTimeRange()); + assertEquals(((Increment) origin).getTimeRange(), ((Increment) clone).getTimeRange()); } } @@ -179,65 +163,54 @@ public void testRowIsImmutableOrNot() { // Test when row key is immutable Put putRowIsImmutable = new Put(rowKey, true); - assertTrue(rowKey == putRowIsImmutable.getRow()); // No local copy is made + assertTrue(rowKey == putRowIsImmutable.getRow()); // No local copy is made // Test when row key is not immutable Put putRowIsNotImmutable = new Put(rowKey, 1000L, false); - assertTrue(rowKey != putRowIsNotImmutable.getRow()); // A local copy is made + assertTrue(rowKey != putRowIsNotImmutable.getRow()); // A local copy is made } // HBASE-14882 @Test public void testAddImmutableToPut() throws IOException { - byte[] row = Bytes.toBytes("immutable-row"); - byte[] family = Bytes.toBytes("immutable-family"); + byte[] row = Bytes.toBytes("immutable-row"); + byte[] family = Bytes.toBytes("immutable-family"); byte[] qualifier0 = Bytes.toBytes("immutable-qualifier-0"); - byte[] value0 = Bytes.toBytes("immutable-value-0"); + byte[] value0 = Bytes.toBytes("immutable-value-0"); byte[] qualifier1 = Bytes.toBytes("immutable-qualifier-1"); - byte[] value1 = Bytes.toBytes("immutable-value-1"); - long ts1 = 5000L; + byte[] value1 = Bytes.toBytes("immutable-value-1"); + long ts1 = 5000L; // "true" indicates that the input row is immutable Put put = new Put(row, true); - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(family) - .setQualifier(qualifier0) - .setTimestamp(put.getTimestamp()) - .setType(Type.Put) - .setValue(value0) - .build()) - .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(family) - .setQualifier(qualifier1) - .setTimestamp(ts1) - .setType(Type.Put) - .setValue(value1) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(family) + .setQualifier(qualifier0).setTimestamp(put.getTimestamp()).setType(Type.Put) + .setValue(value0).build()) + .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(family) + .setQualifier(qualifier1).setTimestamp(ts1).setType(Type.Put).setValue(value1).build()); // Verify the cell of family:qualifier0 Cell cell0 = put.get(family, qualifier0).get(0); // Verify no local copy is made for family, qualifier or value - assertTrue(cell0.getFamilyArray() == family); + assertTrue(cell0.getFamilyArray() == family); assertTrue(cell0.getQualifierArray() == qualifier0); - assertTrue(cell0.getValueArray() == value0); + assertTrue(cell0.getValueArray() == value0); // Verify timestamp - assertTrue(cell0.getTimestamp() == put.getTimestamp()); + assertTrue(cell0.getTimestamp() == put.getTimestamp()); // Verify the cell of family:qualifier1 Cell cell1 = put.get(family, qualifier1).get(0); // Verify no local copy is made for family, qualifier or value - assertTrue(cell1.getFamilyArray() == family); + assertTrue(cell1.getFamilyArray() == family); assertTrue(cell1.getQualifierArray() == qualifier1); - assertTrue(cell1.getValueArray() == value1); + assertTrue(cell1.getValueArray() == value1); // Verify timestamp - assertTrue(cell1.getTimestamp() == ts1); + assertTrue(cell1.getTimestamp() == ts1); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java index 600c444ad463..432ed6e686f2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,19 +71,19 @@ import org.apache.hbase.thirdparty.com.google.gson.Gson; /** - * Run tests that use the functionality of the Operation superclass for - * Puts, Gets, Deletes, Scans, and MultiPuts. + * Run tests that use the functionality of the Operation superclass for Puts, Gets, Deletes, Scans, + * and MultiPuts. */ -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestOperation { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestOperation.class); - private static byte [] ROW = Bytes.toBytes("testRow"); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); - private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); - private static byte [] VALUE = Bytes.toBytes("testValue"); + private static byte[] ROW = Bytes.toBytes("testRow"); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte[] VALUE = Bytes.toBytes("testValue"); private static Gson GSON = GsonUtil.createGson().create(); @@ -105,8 +105,8 @@ public class TestOperation { private static String COL_NAME_2 = "col2"; private static ColumnRangeFilter CR_FILTER = new ColumnRangeFilter(Bytes.toBytes(COL_NAME_1), true, Bytes.toBytes(COL_NAME_2), false); - private static String STR_CR_FILTER = CR_FILTER.getClass().getSimpleName() - + " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")"; + private static String STR_CR_FILTER = + CR_FILTER.getClass().getSimpleName() + " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")"; private static int COL_COUNT = 9; private static ColumnCountGetFilter CCG_FILTER = new ColumnCountGetFilter(COL_COUNT); @@ -115,14 +115,13 @@ public class TestOperation { private static int LIMIT = 3; private static int OFFSET = 4; private static ColumnPaginationFilter CP_FILTER = new ColumnPaginationFilter(LIMIT, OFFSET); - private static String STR_CP_FILTER = CP_FILTER.getClass().getSimpleName() - + " (" + LIMIT + ", " + OFFSET + ")"; + private static String STR_CP_FILTER = + CP_FILTER.getClass().getSimpleName() + " (" + LIMIT + ", " + OFFSET + ")"; private static String STOP_ROW_KEY = "stop"; private static InclusiveStopFilter IS_FILTER = new InclusiveStopFilter(Bytes.toBytes(STOP_ROW_KEY)); - private static String STR_IS_FILTER = - IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY; + private static String STR_IS_FILTER = IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY; private static String PREFIX = "prefix"; private static PrefixFilter PREFIX_FILTER = new PrefixFilter(Bytes.toBytes(PREFIX)); @@ -133,9 +132,9 @@ public class TestOperation { private static String STR_MCP_FILTER = MCP_FILTER.getClass().getSimpleName() + " (3/3): [0, 1, 2]"; - private static byte[][] L_PREFIXES = { - Bytes.toBytes("0"), Bytes.toBytes("1"), Bytes.toBytes("2"), Bytes.toBytes("3"), - Bytes.toBytes("4"), Bytes.toBytes("5"), Bytes.toBytes("6"), Bytes.toBytes("7") }; + private static byte[][] L_PREFIXES = + { Bytes.toBytes("0"), Bytes.toBytes("1"), Bytes.toBytes("2"), Bytes.toBytes("3"), + Bytes.toBytes("4"), Bytes.toBytes("5"), Bytes.toBytes("6"), Bytes.toBytes("7") }; private static MultipleColumnPrefixFilter L_MCP_FILTER = new MultipleColumnPrefixFilter(L_PREFIXES); private static String STR_L_MCP_FILTER = @@ -165,10 +164,9 @@ public class TestOperation { private static BinaryComparator BC = new BinaryComparator(CMP_VALUE); private static DependentColumnFilter DC_FILTER = new DependentColumnFilter(FAMILY, QUALIFIER, true, CMP_OP, BC); - private static String STR_DC_FILTER = String.format( - "%s (%s, %s, %s, %s, %s)", DC_FILTER.getClass().getSimpleName(), - Bytes.toStringBinary(FAMILY), Bytes.toStringBinary(QUALIFIER), true, - CMP_OP.name(), Bytes.toStringBinary(BC.getValue())); + private static String STR_DC_FILTER = String.format("%s (%s, %s, %s, %s, %s)", + DC_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), true, CMP_OP.name(), Bytes.toStringBinary(BC.getValue())); private static FamilyFilter FAMILY_FILTER = new FamilyFilter(CMP_OP, BC); private static String STR_FAMILY_FILTER = @@ -188,102 +186,93 @@ public class TestOperation { private static SingleColumnValueFilter SCV_FILTER = new SingleColumnValueFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); private static String STR_SCV_FILTER = String.format("%s (%s, %s, %s, %s)", - SCV_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), - Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), - Bytes.toStringBinary(CMP_VALUE)); + SCV_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); private static SingleColumnValueExcludeFilter SCVE_FILTER = new SingleColumnValueExcludeFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); private static String STR_SCVE_FILTER = String.format("%s (%s, %s, %s, %s)", - SCVE_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), - Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); - - private static FilterList AND_FILTER_LIST = new FilterList( - Operator.MUST_PASS_ALL, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); - private static String STR_AND_FILTER_LIST = String.format( - "%s AND (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), - STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); - - private static FilterList OR_FILTER_LIST = new FilterList( - Operator.MUST_PASS_ONE, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); - private static String STR_OR_FILTER_LIST = String.format( - "%s OR (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), - STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); - - private static FilterList L_FILTER_LIST = new FilterList( - Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER, COL_PRE_FILTER, - CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER)); - private static String STR_L_FILTER_LIST = String.format( - "%s AND (5/8): [%s, %s, %s, %s, %s, %s]", - L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, - STR_CR_FILTER, STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER); - - private static Filter[] FILTERS = { - TS_FILTER, // TimestampsFilter - L_TS_FILTER, // TimestampsFilter - COL_PRE_FILTER, // ColumnPrefixFilter - CP_FILTER, // ColumnPaginationFilter - CR_FILTER, // ColumnRangeFilter - CCG_FILTER, // ColumnCountGetFilter - IS_FILTER, // InclusiveStopFilter - PREFIX_FILTER, // PrefixFilter - PAGE_FILTER, // PageFilter - SKIP_FILTER, // SkipFilter - WHILE_FILTER, // WhileMatchFilter - KEY_ONLY_FILTER, // KeyOnlyFilter - FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter - MCP_FILTER, // MultipleColumnPrefixFilter - L_MCP_FILTER, // MultipleColumnPrefixFilter - DC_FILTER, // DependentColumnFilter - FAMILY_FILTER, // FamilyFilter - QUALIFIER_FILTER, // QualifierFilter - ROW_FILTER, // RowFilter - VALUE_FILTER, // ValueFilter - SCV_FILTER, // SingleColumnValueFilter - SCVE_FILTER, // SingleColumnValueExcludeFilter - AND_FILTER_LIST, // FilterList - OR_FILTER_LIST, // FilterList - L_FILTER_LIST, // FilterList + SCVE_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); + + private static FilterList AND_FILTER_LIST = new FilterList(Operator.MUST_PASS_ALL, + Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); + private static String STR_AND_FILTER_LIST = String.format("%s AND (3/3): [%s, %s, %s]", + AND_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); + + private static FilterList OR_FILTER_LIST = new FilterList(Operator.MUST_PASS_ONE, + Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); + private static String STR_OR_FILTER_LIST = String.format("%s OR (3/3): [%s, %s, %s]", + AND_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); + + private static FilterList L_FILTER_LIST = new FilterList(Arrays.asList((Filter) TS_FILTER, + L_TS_FILTER, CR_FILTER, COL_PRE_FILTER, CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER)); + private static String STR_L_FILTER_LIST = String.format("%s AND (5/8): [%s, %s, %s, %s, %s, %s]", + L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER, + STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER); + + private static Filter[] FILTERS = { TS_FILTER, // TimestampsFilter + L_TS_FILTER, // TimestampsFilter + COL_PRE_FILTER, // ColumnPrefixFilter + CP_FILTER, // ColumnPaginationFilter + CR_FILTER, // ColumnRangeFilter + CCG_FILTER, // ColumnCountGetFilter + IS_FILTER, // InclusiveStopFilter + PREFIX_FILTER, // PrefixFilter + PAGE_FILTER, // PageFilter + SKIP_FILTER, // SkipFilter + WHILE_FILTER, // WhileMatchFilter + KEY_ONLY_FILTER, // KeyOnlyFilter + FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter + MCP_FILTER, // MultipleColumnPrefixFilter + L_MCP_FILTER, // MultipleColumnPrefixFilter + DC_FILTER, // DependentColumnFilter + FAMILY_FILTER, // FamilyFilter + QUALIFIER_FILTER, // QualifierFilter + ROW_FILTER, // RowFilter + VALUE_FILTER, // ValueFilter + SCV_FILTER, // SingleColumnValueFilter + SCVE_FILTER, // SingleColumnValueExcludeFilter + AND_FILTER_LIST, // FilterList + OR_FILTER_LIST, // FilterList + L_FILTER_LIST, // FilterList }; - private static String[] FILTERS_INFO = { - STR_TS_FILTER, // TimestampsFilter - STR_L_TS_FILTER, // TimestampsFilter - STR_COL_PRE_FILTER, // ColumnPrefixFilter - STR_CP_FILTER, // ColumnPaginationFilter - STR_CR_FILTER, // ColumnRangeFilter - STR_CCG_FILTER, // ColumnCountGetFilter - STR_IS_FILTER, // InclusiveStopFilter - STR_PREFIX_FILTER, // PrefixFilter - STR_PAGE_FILTER, // PageFilter - STR_SKIP_FILTER, // SkipFilter - STR_WHILE_FILTER, // WhileMatchFilter - STR_KEY_ONLY_FILTER, // KeyOnlyFilter - STR_FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter - STR_MCP_FILTER, // MultipleColumnPrefixFilter - STR_L_MCP_FILTER, // MultipleColumnPrefixFilter - STR_DC_FILTER, // DependentColumnFilter - STR_FAMILY_FILTER, // FamilyFilter - STR_QUALIFIER_FILTER, // QualifierFilter - STR_ROW_FILTER, // RowFilter - STR_VALUE_FILTER, // ValueFilter - STR_SCV_FILTER, // SingleColumnValueFilter - STR_SCVE_FILTER, // SingleColumnValueExcludeFilter - STR_AND_FILTER_LIST, // FilterList - STR_OR_FILTER_LIST, // FilterList - STR_L_FILTER_LIST, // FilterList + private static String[] FILTERS_INFO = { STR_TS_FILTER, // TimestampsFilter + STR_L_TS_FILTER, // TimestampsFilter + STR_COL_PRE_FILTER, // ColumnPrefixFilter + STR_CP_FILTER, // ColumnPaginationFilter + STR_CR_FILTER, // ColumnRangeFilter + STR_CCG_FILTER, // ColumnCountGetFilter + STR_IS_FILTER, // InclusiveStopFilter + STR_PREFIX_FILTER, // PrefixFilter + STR_PAGE_FILTER, // PageFilter + STR_SKIP_FILTER, // SkipFilter + STR_WHILE_FILTER, // WhileMatchFilter + STR_KEY_ONLY_FILTER, // KeyOnlyFilter + STR_FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter + STR_MCP_FILTER, // MultipleColumnPrefixFilter + STR_L_MCP_FILTER, // MultipleColumnPrefixFilter + STR_DC_FILTER, // DependentColumnFilter + STR_FAMILY_FILTER, // FamilyFilter + STR_QUALIFIER_FILTER, // QualifierFilter + STR_ROW_FILTER, // RowFilter + STR_VALUE_FILTER, // ValueFilter + STR_SCV_FILTER, // SingleColumnValueFilter + STR_SCVE_FILTER, // SingleColumnValueExcludeFilter + STR_AND_FILTER_LIST, // FilterList + STR_OR_FILTER_LIST, // FilterList + STR_L_FILTER_LIST, // FilterList }; static { - assertEquals("The sizes of static arrays do not match: " - + "[FILTERS: %d <=> FILTERS_INFO: %d]", - FILTERS.length, FILTERS_INFO.length); + assertEquals("The sizes of static arrays do not match: " + "[FILTERS: %d <=> FILTERS_INFO: %d]", + FILTERS.length, FILTERS_INFO.length); } /** - * Test the client Operations' JSON encoding to ensure that produced JSON is - * parseable and that the details are present and not corrupted. - * + * Test the client Operations' JSON encoding to ensure that produced JSON is parseable and that + * the details are present and not corrupted. * @throws IOException if the JSON conversion fails */ @Test @@ -297,16 +286,14 @@ public void testOperationJSON() throws IOException { }.getType(); Map parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("startRow incorrect in Scan.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("startRow")); + assertEquals("startRow incorrect in Scan.toJSON()", Bytes.toStringBinary(ROW), + parsedJSON.get("startRow")); // check for the family and the qualifier. - List familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + List familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Scan.toJSON()", familyInfo); assertEquals("Qualifier absent in Scan.toJSON()", 1, familyInfo.size()); - assertEquals("Qualifier incorrect in Scan.toJSON()", - Bytes.toStringBinary(QUALIFIER), - familyInfo.get(0)); + assertEquals("Qualifier incorrect in Scan.toJSON()", Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); // produce a Get Operation Get get = new Get(ROW); @@ -315,16 +302,13 @@ public void testOperationJSON() throws IOException { json = get.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row incorrect in Get.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row incorrect in Get.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Get.toJSON()", familyInfo); assertEquals("Qualifier absent in Get.toJSON()", 1, familyInfo.size()); - assertEquals("Qualifier incorrect in Get.toJSON()", - Bytes.toStringBinary(QUALIFIER), - familyInfo.get(0)); + assertEquals("Qualifier incorrect in Get.toJSON()", Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); // produce a Put operation Put put = new Put(ROW); @@ -333,17 +317,14 @@ public void testOperationJSON() throws IOException { json = put.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row absent in Put.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row absent in Put.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Put.toJSON()", familyInfo); assertEquals("KeyValue absent in Put.toJSON()", 1, familyInfo.size()); Map kvMap = (Map) familyInfo.get(0); - assertEquals("Qualifier incorrect in Put.toJSON()", - Bytes.toStringBinary(QUALIFIER), - kvMap.get("qualifier")); + assertEquals("Qualifier incorrect in Put.toJSON()", Bytes.toStringBinary(QUALIFIER), + kvMap.get("qualifier")); assertEquals("Value length incorrect in Put.toJSON()", VALUE.length, ((Number) kvMap.get("vlen")).intValue()); @@ -354,16 +335,14 @@ public void testOperationJSON() throws IOException { json = delete.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row absent in Delete.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row absent in Delete.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Delete.toJSON()", familyInfo); assertEquals("KeyValue absent in Delete.toJSON()", 1, familyInfo.size()); kvMap = (Map) familyInfo.get(0); - assertEquals("Qualifier incorrect in Delete.toJSON()", - Bytes.toStringBinary(QUALIFIER), kvMap.get("qualifier")); + assertEquals("Qualifier incorrect in Delete.toJSON()", Bytes.toStringBinary(QUALIFIER), + kvMap.get("qualifier")); } @Test @@ -386,7 +365,7 @@ public void testPutCreationWithByteBuffer() { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2013L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -395,7 +374,7 @@ public void testPutCreationWithByteBuffer() { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2001L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -405,7 +384,7 @@ public void testPutCreationWithByteBuffer() { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2001L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(1970L, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -414,29 +393,17 @@ public void testPutCreationWithByteBuffer() { @Test @SuppressWarnings("rawtypes") public void testOperationSubClassMethodsAreBuilderStyle() { - /* All Operation subclasses should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * Scan scan = new Scan() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * All Operation subclasses should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: Scan scan = new Scan() .setFoo(foo) .setBar(bar) + * .setBuz(buz) This test ensures that all methods starting with "set" returns the declaring + * object */ // TODO: We should ensure all subclasses of Operation is checked. - Class[] classes = new Class[] { - Operation.class, - OperationWithAttributes.class, - Mutation.class, - Query.class, - Delete.class, - Increment.class, - Append.class, - Put.class, - Get.class, - Scan.class}; + Class[] classes = + new Class[] { Operation.class, OperationWithAttributes.class, Mutation.class, Query.class, + Delete.class, Increment.class, Append.class, Put.class, Get.class, Scan.class }; BuilderStyleTest.assertClassesAreBuilderStyle(classes); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java index ef9d4c96d282..b1f560f5509f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,10 +27,9 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) /** - * Addresses HBASE-6047 - * We test put.has call with all of its polymorphic magic + * Addresses HBASE-6047 We test put.has call with all of its polymorphic magic */ public class TestPutDotHas { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java index 8572c0b47a1a..f57145cdb1b2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.client; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java index 3b66f7eb2e60..f7c36d00c28d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestRegionInfoBuilder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionInfoBuilder.class); + HBaseClassTestRule.forClass(TestRegionInfoBuilder.class); @Rule public TableNameTestRule name = new TableNameTestRule(); @@ -114,7 +114,7 @@ public void testCreateRegionInfoName() throws Exception { public void testContainsRange() { TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(name.getTableName()).build(); RegionInfo ri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("g")).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("g")).build(); // Single row range at start of region assertTrue(ri.containsRange(Bytes.toBytes("a"), Bytes.toBytes("a"))); // Fully contained range @@ -175,9 +175,9 @@ public void testContainsRangeForMetaTable() { public void testLastRegionCompare() { TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(name.getTableName()).build(); RegionInfo rip = RegionInfoBuilder.newBuilder(tableDesc.getTableName()) - .setStartKey(Bytes.toBytes("a")).setEndKey(new byte[0]).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(new byte[0]).build(); RegionInfo ric = RegionInfoBuilder.newBuilder(tableDesc.getTableName()) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); assertTrue(RegionInfo.COMPARATOR.compare(rip, ric) > 0); } @@ -191,9 +191,9 @@ public void testComparator() { final TableName tableName = name.getTableName(); byte[] empty = new byte[0]; RegionInfo older = RegionInfoBuilder.newBuilder(tableName).setStartKey(empty).setEndKey(empty) - .setSplit(false).setRegionId(0L).build(); + .setSplit(false).setRegionId(0L).build(); RegionInfo newer = RegionInfoBuilder.newBuilder(tableName).setStartKey(empty).setEndKey(empty) - .setSplit(false).setRegionId(1L).build(); + .setSplit(false).setRegionId(1L).build(); assertTrue(RegionInfo.COMPARATOR.compare(older, newer) < 0); assertTrue(RegionInfo.COMPARATOR.compare(newer, older) > 0); assertTrue(RegionInfo.COMPARATOR.compare(older, older) == 0); @@ -259,7 +259,7 @@ public void testParseName() throws IOException { @Test public void testConvert() { final TableName tableName = - TableName.valueOf("ns1:" + name.getTableName().getQualifierAsString()); + TableName.valueOf("ns1:" + name.getTableName().getQualifierAsString()); byte[] startKey = Bytes.toBytes("startKey"); byte[] endKey = Bytes.toBytes("endKey"); boolean split = false; @@ -267,7 +267,7 @@ public void testConvert() { int replicaId = 42; RegionInfo ri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(endKey) - .setSplit(split).setRegionId(regionId).setReplicaId(replicaId).build(); + .setSplit(split).setRegionId(regionId).setReplicaId(replicaId).build(); // convert two times, compare RegionInfo convertedRi = ProtobufUtil.toRegionInfo(ProtobufUtil.toRegionInfo(ri)); @@ -276,16 +276,16 @@ public void testConvert() { // test convert RegionInfo without replicaId HBaseProtos.RegionInfo info = HBaseProtos.RegionInfo.newBuilder() - .setTableName(HBaseProtos.TableName.newBuilder() - .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())) - .setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())).build()) - .setStartKey(UnsafeByteOperations.unsafeWrap(startKey)) - .setEndKey(UnsafeByteOperations.unsafeWrap(endKey)).setSplit(split).setRegionId(regionId) - .build(); + .setTableName(HBaseProtos.TableName.newBuilder() + .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())) + .setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())).build()) + .setStartKey(UnsafeByteOperations.unsafeWrap(startKey)) + .setEndKey(UnsafeByteOperations.unsafeWrap(endKey)).setSplit(split).setRegionId(regionId) + .build(); convertedRi = ProtobufUtil.toRegionInfo(info); RegionInfo expectedRi = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey) - .setEndKey(endKey).setSplit(split).setRegionId(regionId).setReplicaId(0).build(); + .setEndKey(endKey).setSplit(split).setRegionId(regionId).setReplicaId(0).build(); assertEquals(expectedRi, convertedRi); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java index 5a211719227b..497875dba551 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,36 +34,34 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionInfoDisplay { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRegionInfoDisplay.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); @Test public void testRegionDetailsForDisplay() throws IOException { - byte[] startKey = new byte[] {0x01, 0x01, 0x02, 0x03}; - byte[] endKey = new byte[] {0x01, 0x01, 0x02, 0x04}; + byte[] startKey = new byte[] { 0x01, 0x01, 0x02, 0x03 }; + byte[] endKey = new byte[] { 0x01, 0x01, 0x02, 0x04 }; Configuration conf = new Configuration(); conf.setBoolean("hbase.display.keys", false); RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(startKey).setEndKey(endKey).build(); + .setStartKey(startKey).setEndKey(endKey).build(); checkEquality(ri, conf); // check HRIs with non-default replicaId - ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(startKey) - .setEndKey(endKey) - .setSplit(false) - .setRegionId(EnvironmentEdgeManager.currentTime()) - .setReplicaId(1).build(); + ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setStartKey(startKey) + .setEndKey(endKey).setSplit(false).setRegionId(EnvironmentEdgeManager.currentTime()) + .setReplicaId(1).build(); checkEquality(ri, conf); Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_END_KEY, - RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); + RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_START_KEY, - RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); + RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); RegionState state = RegionState.createForTesting(ri, RegionState.State.OPEN); String descriptiveNameForDisplay = @@ -75,25 +73,22 @@ public void testRegionDetailsForDisplay() throws IOException { Assert.assertArrayEquals(endKey, RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); Assert.assertArrayEquals(startKey, RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); Assert.assertEquals(originalDescriptive, - RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf)); + RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf)); } private void checkDescriptiveNameEquality(String descriptiveNameForDisplay, String origDesc, byte[] startKey) { // except for the "hidden-start-key" substring everything else should exactly match - String firstPart = descriptiveNameForDisplay.substring(0, - descriptiveNameForDisplay.indexOf( - new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8))); - String secondPart = descriptiveNameForDisplay.substring( - descriptiveNameForDisplay.indexOf( - new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8)) + - RegionInfoDisplay.HIDDEN_START_KEY.length); + String firstPart = descriptiveNameForDisplay.substring(0, descriptiveNameForDisplay + .indexOf(new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8))); + String secondPart = descriptiveNameForDisplay.substring(descriptiveNameForDisplay + .indexOf(new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8)) + + RegionInfoDisplay.HIDDEN_START_KEY.length); String firstPartOrig = origDesc.substring(0, origDesc.indexOf(Bytes.toStringBinary(startKey))); String secondPartOrig = origDesc.substring( - origDesc.indexOf(Bytes.toStringBinary(startKey)) + - Bytes.toStringBinary(startKey).length()); - assert(firstPart.equals(firstPartOrig)); - assert(secondPart.equals(secondPartOrig)); + origDesc.indexOf(Bytes.toStringBinary(startKey)) + Bytes.toStringBinary(startKey).length()); + assert (firstPart.equals(firstPartOrig)); + assert (secondPart.equals(secondPartOrig)); } private void checkEquality(RegionInfo ri, Configuration conf) throws IOException { @@ -102,18 +97,18 @@ private void checkEquality(RegionInfo ri, Configuration conf) throws IOException byte[][] modifiedRegionNameParts = RegionInfo.parseRegionName(modifiedRegionName); byte[][] regionNameParts = RegionInfo.parseRegionName(ri.getRegionName()); - //same number of parts - assert(modifiedRegionNameParts.length == regionNameParts.length); + // same number of parts + assert (modifiedRegionNameParts.length == regionNameParts.length); for (int i = 0; i < regionNameParts.length; i++) { // all parts should match except for [1] where in the modified one, // we should have "hidden_start_key" if (i != 1) { - System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + - Bytes.toString(modifiedRegionNameParts[i])); + System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + + Bytes.toString(modifiedRegionNameParts[i])); Assert.assertArrayEquals(regionNameParts[i], modifiedRegionNameParts[i]); } else { - System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + - Bytes.toString(modifiedRegionNameParts[i])); + System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + + Bytes.toString(modifiedRegionNameParts[i])); Assert.assertNotEquals(regionNameParts[i], modifiedRegionNameParts[i]); Assert.assertArrayEquals(modifiedRegionNameParts[1], RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegistryEndpointsRefresher.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegistryEndpointsRefresher.java index 3d6fe1563b8c..a20b4c0aeaa1 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegistryEndpointsRefresher.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegistryEndpointsRefresher.java @@ -44,14 +44,14 @@ public class TestRegistryEndpointsRefresher { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegistryEndpointsRefresher.class); + HBaseClassTestRule.forClass(TestRegistryEndpointsRefresher.class); private static final String INITIAL_DELAY_SECS_CONFIG_NAME = - "hbase.test.registry.initial.delay.secs"; + "hbase.test.registry.initial.delay.secs"; private static final String INTERVAL_SECS_CONFIG_NAME = - "hbase.test.registry.refresh.interval.secs"; + "hbase.test.registry.refresh.interval.secs"; private static final String MIN_INTERVAL_SECS_CONFIG_NAME = - "hbase.test.registry.refresh.min.interval.secs"; + "hbase.test.registry.refresh.min.interval.secs"; private Configuration conf; private RegistryEndpointsRefresher refresher; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java index 5b591030c966..ded41dd58a70 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,15 +29,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestResultStatsUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestResultStatsUtil.class); + HBaseClassTestRule.forClass(TestResultStatsUtil.class); - private static final RegionLoadStats regionLoadStats = new RegionLoadStats(100, - 10,90); - private static final byte[] regionName = {80}; + private static final RegionLoadStats regionLoadStats = new RegionLoadStats(100, 10, 90); + private static final byte[] regionName = { 80 }; private static final ServerName server = ServerName.parseServerName("3.1.yg.n,50,1"); @Test @@ -51,12 +50,12 @@ public void testUpdateStats() { // Check that the tracker was updated as expected ServerStatistics stats = serverStatisticTracker.getStats(server); - assertEquals(regionLoadStats.memstoreLoad, stats.getStatsForRegion(regionName) - .getMemStoreLoadPercent()); - assertEquals(regionLoadStats.compactionPressure, stats.getStatsForRegion(regionName) - .getCompactionPressure()); - assertEquals(regionLoadStats.heapOccupancy, stats.getStatsForRegion(regionName) - .getHeapOccupancyPercent()); + assertEquals(regionLoadStats.memstoreLoad, + stats.getStatsForRegion(regionName).getMemStoreLoadPercent()); + assertEquals(regionLoadStats.compactionPressure, + stats.getStatsForRegion(regionName).getCompactionPressure()); + assertEquals(regionLoadStats.heapOccupancy, + stats.getStatsForRegion(regionName).getHeapOccupancyPercent()); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java index 7b584e948610..463036cf8903 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,14 +31,15 @@ import org.junit.rules.TestName; import org.mockito.Mockito; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRetriesExhaustedWithDetailsException { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRetriesExhaustedWithDetailsException.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); /** * Assert that a RetriesExhaustedException that has RegionTooBusyException outputs region name. diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java index 64983089ae06..eb7cde981a2c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,15 +35,15 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestRowComparator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRowComparator.class); - private static final List DEFAULT_ROWS = IntStream.range(1, 9) - .mapToObj(String::valueOf).map(Bytes::toBytes).collect(Collectors.toList()); + private static final List DEFAULT_ROWS = IntStream.range(1, 9).mapToObj(String::valueOf) + .map(Bytes::toBytes).collect(Collectors.toList()); @Test public void testPut() { @@ -71,8 +71,7 @@ public void testGet() { } private static void test(Function f) { - List rows = new ArrayList(DEFAULT_ROWS.stream() - .map(f).collect(Collectors.toList())); + List rows = new ArrayList(DEFAULT_ROWS.stream().map(f).collect(Collectors.toList())); do { Collections.shuffle(rows); } while (needShuffle(rows)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java index 146895aca166..010c39536b8b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,22 +64,22 @@ public class TestRpcBasedRegistryHedgedReads { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRpcBasedRegistryHedgedReads.class); + HBaseClassTestRule.forClass(TestRpcBasedRegistryHedgedReads.class); private static final Logger LOG = LoggerFactory.getLogger(TestRpcBasedRegistryHedgedReads.class); private static final String HEDGED_REQS_FANOUT_CONFIG_NAME = "hbase.test.hedged.reqs.fanout"; private static final String INITIAL_DELAY_SECS_CONFIG_NAME = - "hbase.test.refresh.initial.delay.secs"; + "hbase.test.refresh.initial.delay.secs"; private static final String REFRESH_INTERVAL_SECS_CONFIG_NAME = - "hbase.test.refresh.interval.secs"; + "hbase.test.refresh.interval.secs"; private static final String MIN_REFRESH_INTERVAL_SECS_CONFIG_NAME = - "hbase.test.min.refresh.interval.secs"; + "hbase.test.min.refresh.interval.secs"; private static final HBaseCommonTestingUtil UTIL = new HBaseCommonTestingUtil(); private static final ExecutorService EXECUTOR = - Executors.newCachedThreadPool(new ThreadFactoryBuilder().setDaemon(true).build()); + Executors.newCachedThreadPool(new ThreadFactoryBuilder().setDaemon(true).build()); private static Set BOOTSTRAP_NODES; @@ -90,12 +90,12 @@ public class TestRpcBasedRegistryHedgedReads { private static volatile Set GOOD_RESP_INDEXS; private static GetClusterIdResponse RESP = - GetClusterIdResponse.newBuilder().setClusterId("id").build(); + GetClusterIdResponse.newBuilder().setClusterId("id").build(); public static final class RpcClientImpl implements RpcClient { public RpcClientImpl(Configuration configuration, String clusterId, SocketAddress localAddress, - MetricsConnection metrics) { + MetricsConnection metrics) { } @Override @@ -130,7 +130,7 @@ public static final class RpcChannelImpl implements RpcChannel { @Override public void callMethod(MethodDescriptor method, RpcController controller, Message request, - Message responsePrototype, RpcCallback done) { + Message responsePrototype, RpcCallback done) { if (!method.getName().equals("GetClusterId")) { // On RPC failures, MasterRegistry internally runs getMasters() RPC to keep the master list // fresh. We do not want to intercept those RPCs here and double count. @@ -155,8 +155,8 @@ private AbstractRpcBasedConnectionRegistry createRegistry(int hedged) throws IOE Configuration conf = UTIL.getConfiguration(); conf.setInt(HEDGED_REQS_FANOUT_CONFIG_NAME, hedged); return new AbstractRpcBasedConnectionRegistry(conf, HEDGED_REQS_FANOUT_CONFIG_NAME, - INITIAL_DELAY_SECS_CONFIG_NAME, REFRESH_INTERVAL_SECS_CONFIG_NAME, - MIN_REFRESH_INTERVAL_SECS_CONFIG_NAME) { + INITIAL_DELAY_SECS_CONFIG_NAME, REFRESH_INTERVAL_SECS_CONFIG_NAME, + MIN_REFRESH_INTERVAL_SECS_CONFIG_NAME) { @Override protected Set getBootstrapNodes(Configuration conf) throws IOException { @@ -168,7 +168,8 @@ protected CompletableFuture> fetchEndpoints() { return CompletableFuture.completedFuture(BOOTSTRAP_NODES); } - @Override public String getConnectionString() { + @Override + public String getConnectionString() { return "unimplemented"; } }; @@ -184,8 +185,8 @@ public static void setUpBeforeClass() { conf.setLong(REFRESH_INTERVAL_SECS_CONFIG_NAME, Integer.MAX_VALUE); conf.setLong(MIN_REFRESH_INTERVAL_SECS_CONFIG_NAME, Integer.MAX_VALUE - 1); BOOTSTRAP_NODES = IntStream.range(0, 10) - .mapToObj(i -> ServerName.valueOf("localhost", (10000 + 100 * i), ServerName.NON_STARTCODE)) - .collect(Collectors.toSet()); + .mapToObj(i -> ServerName.valueOf("localhost", (10000 + 100 * i), ServerName.NON_STARTCODE)) + .collect(Collectors.toSet()); } @AfterClass @@ -229,7 +230,7 @@ public void testAllFailHedged3() throws IOException { @Test public void testFirstSucceededNoHedge() throws IOException { GOOD_RESP_INDEXS = - IntStream.range(0, 10).mapToObj(Integer::valueOf).collect(Collectors.toSet()); + IntStream.range(0, 10).mapToObj(Integer::valueOf).collect(Collectors.toSet()); // will be set to 1 try (AbstractRpcBasedConnectionRegistry registry = createRegistry(0)) { String clusterId = logIfError(registry.getClusterId()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java index 0fbf4bb07962..9d821331661c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,11 +43,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; // TODO: cover more test cases -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestScan { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScan.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestScan.class); @Test public void testAttributesSerialization() throws IOException { @@ -70,22 +69,14 @@ public void testAttributesSerialization() throws IOException { @Test public void testGetToScan() throws Exception { Get get = new Get(Bytes.toBytes(1)); - get.setCacheBlocks(true) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("get") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultsPerColumnFamily(1000) - .readVersions(9999) - .setRowOffsetPerColumnFamily(5) - .setTimeRange(0, 13) - .setAttribute("att_v0", Bytes.toBytes("att_v0")) - .setColumnFamilyTimeRange(Bytes.toBytes("cf"), 0, 123) - .setReplicaId(3) - .setACL("test_user", new Permission(Permission.Action.READ)) - .setAuthorizations(new Authorizations("test_label")) - .setPriority(3); + get.setCacheBlocks(true).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("get").setIsolationLevel(IsolationLevel.READ_COMMITTED) + .setLoadColumnFamiliesOnDemand(false).setMaxResultsPerColumnFamily(1000).readVersions(9999) + .setRowOffsetPerColumnFamily(5).setTimeRange(0, 13) + .setAttribute("att_v0", Bytes.toBytes("att_v0")) + .setColumnFamilyTimeRange(Bytes.toBytes("cf"), 0, 123).setReplicaId(3) + .setACL("test_user", new Permission(Permission.Action.READ)) + .setAuthorizations(new Authorizations("test_label")).setPriority(3); Scan scan = new Scan(get); assertEquals(get.getCacheBlocks(), scan.getCacheBlocks()); @@ -94,7 +85,7 @@ public void testGetToScan() throws Exception { assertEquals(get.getId(), scan.getId()); assertEquals(get.getIsolationLevel(), scan.getIsolationLevel()); assertEquals(get.getLoadColumnFamiliesOnDemandValue(), - scan.getLoadColumnFamiliesOnDemandValue()); + scan.getLoadColumnFamiliesOnDemandValue()); assertEquals(get.getMaxResultsPerColumnFamily(), scan.getMaxResultsPerColumnFamily()); assertEquals(get.getMaxVersions(), scan.getMaxVersions()); assertEquals(get.getRowOffsetPerColumnFamily(), scan.getRowOffsetPerColumnFamily()); @@ -102,9 +93,9 @@ public void testGetToScan() throws Exception { assertEquals(get.getTimeRange().getMax(), scan.getTimeRange().getMax()); assertTrue(Bytes.equals(get.getAttribute("att_v0"), scan.getAttribute("att_v0"))); assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin(), - scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin()); + scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin()); assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax(), - scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax()); + scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax()); assertEquals(get.getReplicaId(), scan.getReplicaId()); assertEquals(get.getACL(), scan.getACL()); assertEquals(get.getAuthorizations().getLabels(), scan.getAuthorizations().getLabels()); @@ -125,22 +116,22 @@ public void testScanAttributes() { scan.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttribute("attribute1"))); Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - scan.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value1"), scan.getAttributesMap().get("attribute1"))); // overriding attribute value scan.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttribute("attribute1"))); Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - scan.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), scan.getAttributesMap().get("attribute1"))); // adding another attribute scan.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttribute("attribute2"))); Assert.assertEquals(2, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - scan.getAttributesMap().get("attribute2"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value2"), scan.getAttributesMap().get("attribute2"))); // removing attribute scan.setAttribute("attribute2", null); @@ -198,7 +189,7 @@ public void testSetStartRowAndSetStopRow() { scan.withStartRow(new byte[1]); scan.withStartRow(new byte[HConstants.MAX_ROW_LENGTH]); try { - scan.withStartRow(new byte[HConstants.MAX_ROW_LENGTH+1]); + scan.withStartRow(new byte[HConstants.MAX_ROW_LENGTH + 1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { } catch (Exception e) { @@ -209,7 +200,7 @@ public void testSetStartRowAndSetStopRow() { scan.withStopRow(new byte[1]); scan.withStopRow(new byte[HConstants.MAX_ROW_LENGTH]); try { - scan.withStopRow(new byte[HConstants.MAX_ROW_LENGTH+1]); + scan.withStopRow(new byte[HConstants.MAX_ROW_LENGTH + 1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { } catch (Exception e) { @@ -222,35 +213,17 @@ public void testScanCopyConstructor() throws Exception { Scan scan = new Scan(); scan.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q")) - .setACL("test_user", new Permission(Permission.Action.READ)) - .setAllowPartialResults(true) - .setAsyncPrefetch(false) - .setAttribute("test_key", Bytes.toBytes("test_value")) - .setAuthorizations(new Authorizations("test_label")) - .setBatch(10) - .setCacheBlocks(false) - .setCaching(10) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("scan_copy_constructor") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLimit(100) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultSize(100) - .setMaxResultsPerColumnFamily(1000) - .readVersions(9999) - .setMvccReadPoint(5) - .setNeedCursorResult(true) - .setPriority(1) - .setRaw(true) - .setReplicaId(3) - .setReversed(true) - .setRowOffsetPerColumnFamily(5) - .setStartStopRowForPrefixScan(Bytes.toBytes("row_")) - .setScanMetricsEnabled(true) - .setReadType(ReadType.STREAM) - .withStartRow(Bytes.toBytes("row_1")) - .withStopRow(Bytes.toBytes("row_2")) + .setACL("test_user", new Permission(Permission.Action.READ)).setAllowPartialResults(true) + .setAsyncPrefetch(false).setAttribute("test_key", Bytes.toBytes("test_value")) + .setAuthorizations(new Authorizations("test_label")).setBatch(10).setCacheBlocks(false) + .setCaching(10).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("scan_copy_constructor").setIsolationLevel(IsolationLevel.READ_COMMITTED) + .setLimit(100).setLoadColumnFamiliesOnDemand(false).setMaxResultSize(100) + .setMaxResultsPerColumnFamily(1000).readVersions(9999).setMvccReadPoint(5) + .setNeedCursorResult(true).setPriority(1).setRaw(true).setReplicaId(3).setReversed(true) + .setRowOffsetPerColumnFamily(5).setStartStopRowForPrefixScan(Bytes.toBytes("row_")) + .setScanMetricsEnabled(true).setReadType(ReadType.STREAM) + .withStartRow(Bytes.toBytes("row_1")).withStopRow(Bytes.toBytes("row_2")) .setTimeRange(0, 13); // create a copy of existing scan object diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java index 4b63f5b2168d..74bde5cb25b6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,26 +47,25 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestSimpleRequestController { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSimpleRequestController.class); - private static final TableName DUMMY_TABLE - = TableName.valueOf("DUMMY_TABLE"); + private static final TableName DUMMY_TABLE = TableName.valueOf("DUMMY_TABLE"); private static final byte[] DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1"); private static final byte[] DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2"); private static final byte[] DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3"); private static final ServerName SN = ServerName.valueOf("s1,1,1"); private static final ServerName SN2 = ServerName.valueOf("s2,2,2"); private static final RegionInfo HRI1 = RegionInfoBuilder.newBuilder(DUMMY_TABLE) - .setStartKey(DUMMY_BYTES_1).setEndKey(DUMMY_BYTES_2).setRegionId(1).build(); + .setStartKey(DUMMY_BYTES_1).setEndKey(DUMMY_BYTES_2).setRegionId(1).build(); private static final RegionInfo HRI2 = RegionInfoBuilder.newBuilder(DUMMY_TABLE) - .setStartKey(DUMMY_BYTES_2).setEndKey(HConstants.EMPTY_END_ROW).setRegionId(2).build(); + .setStartKey(DUMMY_BYTES_2).setEndKey(HConstants.EMPTY_END_ROW).setRegionId(2).build(); private static final RegionInfo HRI3 = RegionInfoBuilder.newBuilder(DUMMY_TABLE) - .setStartKey(DUMMY_BYTES_3).setEndKey(HConstants.EMPTY_END_ROW).setRegionId(3).build(); + .setStartKey(DUMMY_BYTES_3).setEndKey(HConstants.EMPTY_END_ROW).setRegionId(3).build(); private static final HRegionLocation LOC1 = new HRegionLocation(HRI1, SN); private static final HRegionLocation LOC2 = new HRegionLocation(HRI2, SN); private static final HRegionLocation LOC3 = new HRegionLocation(HRI3, SN2); @@ -124,11 +123,9 @@ public void testTaskCheckerHost() throws IOException { final Map taskCounterPerServer = new HashMap<>(); final Map taskCounterPerRegion = new HashMap<>(); SimpleRequestController.TaskCountChecker countChecker = - new SimpleRequestController.TaskCountChecker( - maxTotalConcurrentTasks, - maxConcurrentTasksPerServer, - maxConcurrentTasksPerRegion, - tasksInProgress, taskCounterPerServer, taskCounterPerRegion); + new SimpleRequestController.TaskCountChecker(maxTotalConcurrentTasks, + maxConcurrentTasksPerServer, maxConcurrentTasksPerRegion, tasksInProgress, + taskCounterPerServer, taskCounterPerRegion); final long maxHeapSizePerRequest = 2 * 1024 * 1024; // unlimiited SimpleRequestController.RequestHeapSizeChecker sizeChecker = @@ -165,8 +162,8 @@ public void testTaskCheckerHost() throws IOException { @Test public void testRequestHeapSizeChecker() throws IOException { final long maxHeapSizePerRequest = 2 * 1024 * 1024; - SimpleRequestController.RequestHeapSizeChecker checker - = new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); + SimpleRequestController.RequestHeapSizeChecker checker = + new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); // inner state is unchanged. for (int i = 0; i != 10; ++i) { @@ -207,10 +204,10 @@ public void testRequestHeapSizeChecker() throws IOException { @Test public void testRequestRowsChecker() throws IOException { final long maxRowCount = 100; - SimpleRequestController.RequestRowsChecker checker - = new SimpleRequestController.RequestRowsChecker(maxRowCount); + SimpleRequestController.RequestRowsChecker checker = + new SimpleRequestController.RequestRowsChecker(maxRowCount); - final long heapSizeOfRow = 100; //unused + final long heapSizeOfRow = 100; // unused // inner state is unchanged. for (int i = 0; i != 10; ++i) { ReturnCode code = checker.canTakeOperation(LOC1, heapSizeOfRow); @@ -252,8 +249,8 @@ public void testRequestRowsChecker() throws IOException { @Test public void testSubmittedSizeChecker() { final long maxHeapSizeSubmit = 2 * 1024 * 1024; - SimpleRequestController.SubmittedSizeChecker checker - = new SimpleRequestController.SubmittedSizeChecker(maxHeapSizeSubmit); + SimpleRequestController.SubmittedSizeChecker checker = + new SimpleRequestController.SubmittedSizeChecker(maxHeapSizeSubmit); for (int i = 0; i != 10; ++i) { ReturnCode include = checker.canTakeOperation(LOC1, 100000); @@ -289,10 +286,8 @@ public void testTaskCountChecker() throws InterruptedIOException { Map taskCounterPerServer = new HashMap<>(); Map taskCounterPerRegion = new HashMap<>(); SimpleRequestController.TaskCountChecker checker = new SimpleRequestController.TaskCountChecker( - maxTotalConcurrentTasks, - maxConcurrentTasksPerServer, - maxConcurrentTasksPerRegion, - tasksInProgress, taskCounterPerServer, taskCounterPerRegion); + maxTotalConcurrentTasks, maxConcurrentTasksPerServer, maxConcurrentTasksPerRegion, + tasksInProgress, taskCounterPerServer, taskCounterPerRegion); // inner state is unchanged. for (int i = 0; i != 10; ++i) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index a9b7cd99fe37..0cb80c14ce82 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -49,7 +49,7 @@ public class TestTableDescriptorBuilder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableDescriptorBuilder.class); + HBaseClassTestRule.forClass(TestTableDescriptorBuilder.class); private static final Logger LOG = LoggerFactory.getLogger(TestTableDescriptorBuilder.class); @@ -60,15 +60,15 @@ public class TestTableDescriptorBuilder { public void testAddCoprocessorTwice() throws IOException { String cpName = "a.b.c.d"; TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setCoprocessor(cpName) - .setCoprocessor(cpName).build(); + .setCoprocessor(cpName).build(); } @Test public void testPb() throws DeserializationException, IOException { final int v = 123; TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setMaxFileSize(v) - .setDurability(Durability.ASYNC_WAL).setReadOnly(true).setRegionReplication(2).build(); + TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setMaxFileSize(v) + .setDurability(Durability.ASYNC_WAL).setReadOnly(true).setRegionReplication(2).build(); byte[] bytes = TableDescriptorBuilder.toByteArray(htd); TableDescriptor deserializedHtd = TableDescriptorBuilder.parseFrom(bytes); @@ -88,16 +88,16 @@ public void testGetSetRemoveCP() throws Exception { // simple CP String className = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())).setCoprocessor(className) // add and - // check that - // it is - // present - .build(); + .newBuilder(TableName.valueOf(name.getMethodName())).setCoprocessor(className) // add and + // check that + // it is + // present + .build(); assertTrue(desc.hasCoprocessor(className)); desc = TableDescriptorBuilder.newBuilder(desc).removeCoprocessor(className) // remove it and // check that it is // gone - .build(); + .build(); assertFalse(desc.hasCoprocessor(className)); } @@ -108,7 +108,7 @@ public void testGetSetRemoveCP() throws Exception { @Test public void testSetListRemoveCP() throws Exception { TableDescriptor desc = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); // Check that any coprocessor is present. assertTrue(desc.getCoprocessorDescriptors().isEmpty()); @@ -119,33 +119,33 @@ public void testSetListRemoveCP() throws Exception { desc = TableDescriptorBuilder.newBuilder(desc).setCoprocessor(className1).build(); assertTrue(desc.getCoprocessorDescriptors().size() == 1); assertTrue(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className1))); + .anyMatch(name -> name.equals(className1))); // Add the 2nd coprocessor and check if present. // remove it and check that it is gone desc = TableDescriptorBuilder.newBuilder(desc) - .setCoprocessor(className2).build(); + .setCoprocessor(className2).build(); assertTrue(desc.getCoprocessorDescriptors().size() == 2); assertTrue(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className2))); + .anyMatch(name -> name.equals(className2))); // Remove one and check desc = TableDescriptorBuilder.newBuilder(desc) - .removeCoprocessor(className1).build(); + .removeCoprocessor(className1).build(); assertTrue(desc.getCoprocessorDescriptors().size() == 1); assertFalse(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className1))); + .anyMatch(name -> name.equals(className1))); assertTrue(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className2))); + .anyMatch(name -> name.equals(className2))); // Remove the last and check desc = TableDescriptorBuilder.newBuilder(desc) - .removeCoprocessor(className2).build(); + .removeCoprocessor(className2).build(); assertTrue(desc.getCoprocessorDescriptors().isEmpty()); assertFalse(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className1))); + .anyMatch(name -> name.equals(className1))); assertFalse(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className2))); + .anyMatch(name -> name.equals(className2))); } /** @@ -155,9 +155,8 @@ public void testSetListRemoveCP() throws Exception { @Test(expected = IllegalArgumentException.class) public void testRemoveNonExistingCoprocessor() throws Exception { String className = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); assertFalse(desc.hasCoprocessor(className)); TableDescriptorBuilder.newBuilder(desc).removeCoprocessor(className).build(); } @@ -170,19 +169,19 @@ public void testRemoveString() { byte[] key = Bytes.toBytes("Some"); byte[] value = Bytes.toBytes("value"); TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())).setValue(key, value).build(); + .newBuilder(TableName.valueOf(name.getMethodName())).setValue(key, value).build(); assertTrue(Bytes.equals(value, desc.getValue(key))); desc = TableDescriptorBuilder.newBuilder(desc).removeValue(key).build(); assertTrue(desc.getValue(key) == null); } String[] legalTableNames = { "foo", "with-dash_under.dot", "_under_start_ok", - "with-dash.with_underscore", "02-01-2012.my_table_01-02", "xyz._mytable_", "9_9_0.table_02", - "dot1.dot2.table", "new.-mytable", "with-dash.with.dot", "legal..t2", "legal..legal.t2", - "trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02" }; + "with-dash.with_underscore", "02-01-2012.my_table_01-02", "xyz._mytable_", "9_9_0.table_02", + "dot1.dot2.table", "new.-mytable", "with-dash.with.dot", "legal..t2", "legal..legal.t2", + "trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02" }; String[] illegalTableNames = { ".dot_start_illegal", "-dash_start_illegal", "spaces not ok", - "-dash-.start_illegal", "new.table with space", "01 .table", "ns:-illegaldash", - "new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2" }; + "-dash-.start_illegal", "new.table with space", "01 .table", "ns:-illegaldash", + "new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2" }; @Test public void testLegalTableNames() { @@ -226,17 +225,17 @@ public void testIllegalTableNamesRegex() { @Test public void testGetMaxFileSize() { TableDescriptor desc = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); assertEquals(-1, desc.getMaxFileSize()); desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setMaxFileSize(1111L).build(); + .setMaxFileSize(1111L).build(); assertEquals(1111L, desc.getMaxFileSize()); } @Test public void testSetMaxFileSize() throws HBaseException { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); String maxFileSize = "1073741824"; builder.setMaxFileSize(maxFileSize); @@ -266,17 +265,17 @@ public void testSetMaxFileSize() throws HBaseException { @Test public void testGetMemStoreFlushSize() { TableDescriptor desc = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); assertEquals(-1, desc.getMemStoreFlushSize()); desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setMemStoreFlushSize(1111L).build(); + .setMemStoreFlushSize(1111L).build(); assertEquals(1111L, desc.getMemStoreFlushSize()); } @Test public void testSetMemStoreFlushSize() throws HBaseException { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); String memstoreFlushSize = "1073741824"; builder.setMemStoreFlushSize(memstoreFlushSize); @@ -309,14 +308,14 @@ public void testClassMethodsAreBuilderStyle() { public void testModifyFamily() { byte[] familyName = Bytes.toBytes("cf"); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setBlocksize(1000).setDFSReplication((short) 3).build(); + .setBlocksize(1000).setDFSReplication((short) 3).build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(hcd).build(); + .setColumnFamily(hcd).build(); assertEquals(1000, htd.getColumnFamily(familyName).getBlocksize()); assertEquals(3, htd.getColumnFamily(familyName).getDFSReplication()); hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(2000) - .setDFSReplication((short) 1).build(); + .setDFSReplication((short) 1).build(); htd = TableDescriptorBuilder.newBuilder(htd).modifyColumnFamily(hcd).build(); assertEquals(2000, htd.getColumnFamily(familyName).getBlocksize()); assertEquals(1, htd.getColumnFamily(familyName).getDFSReplication()); @@ -326,16 +325,16 @@ public void testModifyFamily() { public void testModifyInexistentFamily() { byte[] familyName = Bytes.toBytes("cf"); TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .modifyColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName)).build(); + .modifyColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName)).build(); } @Test(expected = IllegalArgumentException.class) public void testAddDuplicateFamilies() { byte[] familyName = Bytes.toBytes("cf"); ColumnFamilyDescriptor hcd = - ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(1000).build(); + ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(1000).build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(hcd).build(); + .setColumnFamily(hcd).build(); assertEquals(1000, htd.getColumnFamily(familyName).getBlocksize()); hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(2000).build(); // add duplicate column @@ -345,7 +344,7 @@ public void testAddDuplicateFamilies() { @Test public void testPriority() { TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setPriority(42).build(); + .setPriority(42).build(); assertEquals(42, htd.getPriority()); } @@ -353,27 +352,22 @@ public void testPriority() { public void testStringCustomizedValues() throws HBaseException { byte[] familyName = Bytes.toBytes("cf"); ColumnFamilyDescriptor hcd = - ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(131072).build(); - TableDescriptor htd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(hcd).setDurability(Durability.ASYNC_WAL).build(); + ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(131072).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(hcd).setDurability(Durability.ASYNC_WAL).build(); assertEquals( - "'testStringCustomizedValues', " + - "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, " - + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", + "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, " + + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", htd.toStringCustomizedValues()); - htd = TableDescriptorBuilder.newBuilder(htd) - .setMaxFileSize("10737942528") - .setMemStoreFlushSize("256MB") - .build(); + htd = TableDescriptorBuilder.newBuilder(htd).setMaxFileSize("10737942528") + .setMemStoreFlushSize("256MB").build(); assertEquals( - "'testStringCustomizedValues', " + - "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " - + "MAX_FILESIZE => '10737942528 B (10GB 512KB)', " - + "MEMSTORE_FLUSHSIZE => '268435456 B (256MB)'}}, " - + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", + "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " + + "MAX_FILESIZE => '10737942528 B (10GB 512KB)', " + + "MEMSTORE_FLUSHSIZE => '268435456 B (256MB)'}}, " + + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", htd.toStringCustomizedValues()); } @@ -390,7 +384,7 @@ public void testGetSetRegionServerGroup() { @Test public void testSetEmptyValue() { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); String testValue = "TestValue"; // test setValue builder.setValue(testValue, "2"); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java index 44d199764c5b..532d4a805db2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import static org.junit.Assert.assertEquals; import java.util.Arrays; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.TableDescriptorUtils.TableDescriptorDelta; @@ -43,32 +41,25 @@ public void testDelta() { ColumnFamilyDescriptor cf2 = ColumnFamilyDescriptorBuilder.of("cf2"); ColumnFamilyDescriptor cf3 = ColumnFamilyDescriptorBuilder.of("cf3"); ColumnFamilyDescriptor cf4 = ColumnFamilyDescriptorBuilder.of("cf4"); - TableDescriptor td = TableDescriptorBuilder - .newBuilder(TableName.valueOf("test")) - .setColumnFamilies(Arrays.asList(cf1, cf2, cf3, cf4)) - .build(); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf("test")) + .setColumnFamilies(Arrays.asList(cf1, cf2, cf3, cf4)).build(); TableDescriptorDelta selfCompare = TableDescriptorUtils.computeDelta(td, td); assertEquals(0, selfCompare.getColumnsAdded().size()); assertEquals(0, selfCompare.getColumnsDeleted().size()); assertEquals(0, selfCompare.getColumnsModified().size()); - ColumnFamilyDescriptor modCf2 = ColumnFamilyDescriptorBuilder - .newBuilder(cf2).setMaxVersions(5).build(); - ColumnFamilyDescriptor modCf3 = ColumnFamilyDescriptorBuilder - .newBuilder(cf3).setMaxVersions(5).build(); + ColumnFamilyDescriptor modCf2 = + ColumnFamilyDescriptorBuilder.newBuilder(cf2).setMaxVersions(5).build(); + ColumnFamilyDescriptor modCf3 = + ColumnFamilyDescriptorBuilder.newBuilder(cf3).setMaxVersions(5).build(); ColumnFamilyDescriptor cf5 = ColumnFamilyDescriptorBuilder.of("cf5"); ColumnFamilyDescriptor cf6 = ColumnFamilyDescriptorBuilder.of("cf6"); ColumnFamilyDescriptor cf7 = ColumnFamilyDescriptorBuilder.of("cf7"); - TableDescriptor newTd = TableDescriptorBuilder - .newBuilder(td) - .removeColumnFamily(Bytes.toBytes("cf1")) - .modifyColumnFamily(modCf2) - .modifyColumnFamily(modCf3) - .setColumnFamily(cf5) - .setColumnFamily(cf6) - .setColumnFamily(cf7) - .build(); + TableDescriptor newTd = + TableDescriptorBuilder.newBuilder(td).removeColumnFamily(Bytes.toBytes("cf1")) + .modifyColumnFamily(modCf2).modifyColumnFamily(modCf3).setColumnFamily(cf5) + .setColumnFamily(cf6).setColumnFamily(cf7).build(); TableDescriptorDelta delta = TableDescriptorUtils.computeDelta(td, newTd); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java index 2c7061259f90..df6ffbbb2b18 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java @@ -75,56 +75,43 @@ private static void populateChildren(final Map spansById) { } private static List findRoots(final Map spansById) { - return spansById.values() - .stream() - .filter(node -> Objects.equals(node.spanData.getParentSpanId(), SpanId.getInvalid())) - .collect(Collectors.toList()); + return spansById.values().stream() + .filter(node -> Objects.equals(node.spanData.getParentSpanId(), SpanId.getInvalid())) + .collect(Collectors.toList()); } public void render(final Consumer writer) { - for (ListIterator iter = graphs.listIterator(); iter.hasNext(); ) { + for (ListIterator iter = graphs.listIterator(); iter.hasNext();) { final int idx = iter.nextIndex(); final Node node = iter.next(); render(writer, node, 0, idx == 0); } } - private static void render( - final Consumer writer, - final Node node, - final int indent, - final boolean isFirst - ) { + private static void render(final Consumer writer, final Node node, final int indent, + final boolean isFirst) { writer.accept(render(node.spanData, indent, isFirst)); final List children = new ArrayList<>(node.children.values()); - for (ListIterator iter = children.listIterator(); iter.hasNext(); ) { + for (ListIterator iter = children.listIterator(); iter.hasNext();) { final int idx = iter.nextIndex(); final Node child = iter.next(); render(writer, child, indent + 2, idx == 0); } } - private static String render( - final SpanData spanData, - final int indent, - final boolean isFirst - ) { + private static String render(final SpanData spanData, final int indent, final boolean isFirst) { final StringBuilder sb = new StringBuilder(); for (int i = 0; i < indent; i++) { sb.append(' '); } - return sb.append(isFirst ? "└─ " : "├─ ") - .append(render(spanData)) - .toString(); + return sb.append(isFirst ? "└─ " : "├─ ").append(render(spanData)).toString(); } private static String render(final SpanData spanData) { return new ToStringBuilder(spanData, ToStringStyle.NO_CLASS_NAME_STYLE) - .append("spanId", spanData.getSpanId()) - .append("name", spanData.getName()) - .append("hasEnded", spanData.hasEnded()) - .toString(); + .append("spanId", spanData.getSpanId()).append("name", spanData.getName()) + .append("hasEnded", spanData.hasEnded()).toString(); } private static class Node { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java index c7bb205076cd..1a2d6ea4ccdc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java @@ -20,6 +20,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasProperty; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import java.util.Arrays; @@ -32,12 +33,11 @@ */ public final class AttributesMatchers { - private AttributesMatchers() { } + private AttributesMatchers() { + } - public static Matcher containsEntry( - Matcher> keyMatcher, - Matcher valueMatcher - ) { + public static Matcher containsEntry(Matcher> keyMatcher, + Matcher valueMatcher) { return new IsAttributesContaining<>(keyMatcher, valueMatcher); } @@ -53,10 +53,8 @@ public static Matcher containsEntryWithStringValuesOf(String key, St return containsEntry(AttributeKey.stringArrayKey(key), Arrays.asList(values)); } - public static Matcher containsEntryWithStringValuesOf( - String key, - Matcher> matcher - ) { + public static Matcher containsEntryWithStringValuesOf(String key, + Matcher> matcher) { return new IsAttributesContaining<>(equalTo(AttributeKey.stringArrayKey(key)), matcher); } @@ -64,37 +62,28 @@ private static final class IsAttributesContaining extends TypeSafeMatcher> keyMatcher; private final Matcher valueMatcher; - private IsAttributesContaining( - final Matcher> keyMatcher, - final Matcher valueMatcher - ) { + private IsAttributesContaining(final Matcher> keyMatcher, + final Matcher valueMatcher) { this.keyMatcher = keyMatcher; this.valueMatcher = valueMatcher; } @Override protected boolean matchesSafely(Attributes item) { - return item.asMap().entrySet().stream().anyMatch(e -> allOf( - hasProperty("key", keyMatcher), - hasProperty("value", valueMatcher)) - .matches(e)); + return item.asMap().entrySet().stream().anyMatch( + e -> allOf(hasProperty("key", keyMatcher), hasProperty("value", valueMatcher)).matches(e)); } @Override public void describeMismatchSafely(Attributes item, Description mismatchDescription) { - mismatchDescription - .appendText("Attributes was ") - .appendValueList("[", ", ", "]", item.asMap().entrySet()); + mismatchDescription.appendText("Attributes was ").appendValueList("[", ", ", "]", + item.asMap().entrySet()); } @Override public void describeTo(Description description) { - description - .appendText("Attributes containing [") - .appendDescriptionOf(keyMatcher) - .appendText("->") - .appendDescriptionOf(valueMatcher) - .appendText("]"); + description.appendText("Attributes containing [").appendDescriptionOf(keyMatcher) + .appendText("->").appendDescriptionOf(valueMatcher).appendText("]"); } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java index e24245fb4c62..106e52cc0f91 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client.trace.hamcrest; import static org.hamcrest.Matchers.equalTo; + import io.opentelemetry.api.common.Attributes; import io.opentelemetry.sdk.trace.data.EventData; import org.hamcrest.FeatureMatcher; @@ -28,12 +29,14 @@ */ public final class EventMatchers { - private EventMatchers() { } + private EventMatchers() { + } public static Matcher hasAttributes(Matcher matcher) { - return new FeatureMatcher( - matcher, "EventData having attributes that ", "attributes") { - @Override protected Attributes featureValueOf(EventData actual) { + return new FeatureMatcher(matcher, "EventData having attributes that ", + "attributes") { + @Override + protected Attributes featureValueOf(EventData actual) { return actual.getAttributes(); } }; @@ -45,7 +48,8 @@ public static Matcher hasName(String name) { public static Matcher hasName(Matcher matcher) { return new FeatureMatcher(matcher, "EventData with a name that ", "name") { - @Override protected String featureValueOf(EventData actual) { + @Override + protected String featureValueOf(EventData actual) { return actual.getName(); } }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java index 026deb0afe45..c7a9d9029fc9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.client.trace.hamcrest.AttributesMatchers.containsEntry; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; + import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; @@ -39,21 +40,22 @@ */ public final class SpanDataMatchers { - private SpanDataMatchers() { } + private SpanDataMatchers() { + } public static Matcher hasAttributes(Matcher matcher) { - return new FeatureMatcher( - matcher, "SpanData having attributes that ", "attributes" - ) { - @Override protected Attributes featureValueOf(SpanData item) { + return new FeatureMatcher(matcher, "SpanData having attributes that ", + "attributes") { + @Override + protected Attributes featureValueOf(SpanData item) { return item.getAttributes(); } }; } public static Matcher hasDuration(Matcher matcher) { - return new FeatureMatcher( - matcher, "SpanData having duration that ", "duration") { + return new FeatureMatcher(matcher, "SpanData having duration that ", + "duration") { @Override protected Duration featureValueOf(SpanData item) { return Duration.ofNanos(item.getEndEpochNanos() - item.getStartEpochNanos()); @@ -63,19 +65,23 @@ protected Duration featureValueOf(SpanData item) { public static Matcher hasEnded() { return new TypeSafeMatcher() { - @Override protected boolean matchesSafely(SpanData item) { + @Override + protected boolean matchesSafely(SpanData item) { return item.hasEnded(); } - @Override public void describeTo(Description description) { + + @Override + public void describeTo(Description description) { description.appendText("SpanData that hasEnded"); } }; } public static Matcher hasEvents(Matcher> matcher) { - return new FeatureMatcher>( - matcher, "SpanData having events that", "events") { - @Override protected Iterable featureValueOf(SpanData item) { + return new FeatureMatcher>(matcher, + "SpanData having events that", "events") { + @Override + protected Iterable featureValueOf(SpanData item) { return item.getEvents(); } }; @@ -87,22 +93,21 @@ public static Matcher hasExceptionWithType(Matcher mat public static Matcher hasException(Matcher matcher) { return new FeatureMatcher(matcher, - "SpanData having Exception with Attributes that", "exception attributes") { - @Override protected Attributes featureValueOf(SpanData actual) { - return actual.getEvents() - .stream() - .filter(e -> Objects.equals(SemanticAttributes.EXCEPTION_EVENT_NAME, e.getName())) - .map(EventData::getAttributes) - .findFirst() - .orElse(null); + "SpanData having Exception with Attributes that", "exception attributes") { + @Override + protected Attributes featureValueOf(SpanData actual) { + return actual.getEvents().stream() + .filter(e -> Objects.equals(SemanticAttributes.EXCEPTION_EVENT_NAME, e.getName())) + .map(EventData::getAttributes).findFirst().orElse(null); } }; } public static Matcher hasKind(SpanKind kind) { - return new FeatureMatcher( - equalTo(kind), "SpanData with kind that", "SpanKind") { - @Override protected SpanKind featureValueOf(SpanData item) { + return new FeatureMatcher(equalTo(kind), "SpanData with kind that", + "SpanKind") { + @Override + protected SpanKind featureValueOf(SpanData item) { return item.getKind(); } }; @@ -114,7 +119,8 @@ public static Matcher hasName(String name) { public static Matcher hasName(Matcher matcher) { return new FeatureMatcher(matcher, "SpanKind with a name that", "name") { - @Override protected String featureValueOf(SpanData item) { + @Override + protected String featureValueOf(SpanData item) { return item.getName(); } }; @@ -130,9 +136,9 @@ public static Matcher hasParentSpanId(SpanData parent) { public static Matcher hasParentSpanId(Matcher matcher) { return new FeatureMatcher(matcher, "SpanKind with a parentSpanId that", - "parentSpanId" - ) { - @Override protected String featureValueOf(SpanData item) { + "parentSpanId") { + @Override + protected String featureValueOf(SpanData item) { return item.getParentSpanId(); } }; @@ -141,13 +147,15 @@ public static Matcher hasParentSpanId(Matcher matcher) { public static Matcher hasStatusWithCode(StatusCode statusCode) { final Matcher matcher = is(equalTo(statusCode)); return new TypeSafeMatcher() { - @Override protected boolean matchesSafely(SpanData item) { + @Override + protected boolean matchesSafely(SpanData item) { final StatusData statusData = item.getStatus(); - return statusData != null - && statusData.getStatusCode() != null - && matcher.matches(statusData.getStatusCode()); + return statusData != null && statusData.getStatusCode() != null + && matcher.matches(statusData.getStatusCode()); } - @Override public void describeTo(Description description) { + + @Override + public void describeTo(Description description) { description.appendText("SpanData with StatusCode that ").appendDescriptionOf(matcher); } }; @@ -158,9 +166,10 @@ public static Matcher hasTraceId(String traceId) { } public static Matcher hasTraceId(Matcher matcher) { - return new FeatureMatcher( - matcher, "SpanData with a traceId that ", "traceId") { - @Override protected String featureValueOf(SpanData item) { + return new FeatureMatcher(matcher, "SpanData with a traceId that ", + "traceId") { + @Override + protected String featureValueOf(SpanData item) { return item.getTraceId(); } }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java index 98f1ffd9c913..3b5e453856ec 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace.hamcrest; import static org.apache.hadoop.hbase.client.trace.hamcrest.AttributesMatchers.containsEntry; import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasAttributes; import static org.hamcrest.Matchers.allOf; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.sdk.trace.data.SpanData; import org.apache.hadoop.hbase.TableName; @@ -29,24 +29,23 @@ public final class TraceTestUtil { - private TraceTestUtil() { } + private TraceTestUtil() { + } /** * All {@link Span}s involving {@code conn} should include these attributes. */ public static Matcher buildConnectionAttributesMatcher(AsyncConnectionImpl conn) { - return hasAttributes(allOf( - containsEntry("db.system", "hbase"), - containsEntry("db.connection_string", "nothing"), - containsEntry("db.user", conn.getUser().toString()))); + return hasAttributes( + allOf(containsEntry("db.system", "hbase"), containsEntry("db.connection_string", "nothing"), + containsEntry("db.user", conn.getUser().toString()))); } /** * All {@link Span}s involving {@code tableName} should include these attributes. */ public static Matcher buildTableAttributesMatcher(TableName tableName) { - return hasAttributes(allOf( - containsEntry("db.name", tableName.getNamespaceAsString()), + return hasAttributes(allOf(containsEntry("db.name", tableName.getNamespaceAsString()), containsEntry("db.hbase.table", tableName.getNameAsString()))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java index 275fb0931aec..fd2cd40421b7 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java index 868f3b7fda43..7ad129f8e417 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestComparators { @ClassRule @@ -105,55 +105,55 @@ public void testCellFieldsCompare() throws Exception { assertFalse(PrivateCellUtil.qualifierStartsWith(kv, q2)); assertFalse(PrivateCellUtil.qualifierStartsWith(kv, Bytes.toBytes("longerthanthequalifier"))); - //Binary component comparisons + // Binary component comparisons byte[] val = Bytes.toBytes("abcd"); kv = new KeyValue(r0, f, q1, val); buffer = ByteBuffer.wrap(kv.getBuffer()); bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining()); - //equality check - //row comparison - //row is "row0"(set by variable r0) - //and we are checking for equality to 'o' at position 1 - //'r' is at position 0. + // equality check + // row comparison + // row is "row0"(set by variable r0) + // and we are checking for equality to 'o' at position 1 + // 'r' is at position 0. byte[] component = Bytes.toBytes("o"); comparable = new BinaryComponentComparator(component, 1); assertEquals(0, PrivateCellUtil.compareRow(bbCell, comparable)); assertEquals(0, PrivateCellUtil.compareRow(kv, comparable)); - //value comparison - //value is "abcd"(set by variable val). - //and we are checking for equality to 'c' at position 2. - //'a' is at position 0. + // value comparison + // value is "abcd"(set by variable val). + // and we are checking for equality to 'c' at position 2. + // 'a' is at position 0. component = Bytes.toBytes("c"); comparable = new BinaryComponentComparator(component, 2); - assertEquals(0,PrivateCellUtil.compareValue(bbCell, comparable)); - assertEquals(0,PrivateCellUtil.compareValue(kv, comparable)); + assertEquals(0, PrivateCellUtil.compareValue(bbCell, comparable)); + assertEquals(0, PrivateCellUtil.compareValue(kv, comparable)); - //greater than + // greater than component = Bytes.toBytes("z"); - //checking for greater than at position 1. - //for both row("row0") and value("abcd") - //'z' > 'r' + // checking for greater than at position 1. + // for both row("row0") and value("abcd") + // 'z' > 'r' comparable = new BinaryComponentComparator(component, 1); - //row comparison + // row comparison assertTrue(PrivateCellUtil.compareRow(bbCell, comparable) > 0); assertTrue(PrivateCellUtil.compareRow(kv, comparable) > 0); - //value comparison - //'z' > 'a' + // value comparison + // 'z' > 'a' assertTrue(PrivateCellUtil.compareValue(bbCell, comparable) > 0); assertTrue(PrivateCellUtil.compareValue(kv, comparable) > 0); - //less than + // less than component = Bytes.toBytes("a"); - //checking for less than at position 1 for row ("row0") + // checking for less than at position 1 for row ("row0") comparable = new BinaryComponentComparator(component, 1); - //row comparison - //'a' < 'r' + // row comparison + // 'a' < 'r' assertTrue(PrivateCellUtil.compareRow(bbCell, comparable) < 0); assertTrue(PrivateCellUtil.compareRow(kv, comparable) < 0); - //value comparison - //checking for less than at position 2 for value("abcd") - //'a' < 'c' + // value comparison + // checking for less than at position 2 for value("abcd") + // 'a' < 'c' comparable = new BinaryComponentComparator(component, 2); assertTrue(PrivateCellUtil.compareValue(bbCell, comparable) < 0); assertTrue(PrivateCellUtil.compareValue(kv, comparable) < 0); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java index df63523b24cd..90d5e2d1d980 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,12 +64,10 @@ public void testKeyOnly() throws Exception { byte[] q = Bytes.toBytes("qual1"); byte[] v = Bytes.toBytes("val1"); byte[] tags = Bytes.toBytes("tag1"); - KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, Type.Put, v, 0, - v.length, tags); + KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, Type.Put, v, 0, v.length, tags); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); - ByteBufferKeyValue bbCell = new ByteBufferKeyValue(buffer, 0, - buffer.remaining()); + ByteBufferKeyValue bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining()); // KV format: // Rebuild as: <0:4> @@ -86,41 +84,34 @@ public void testKeyOnly() throws Exception { KeyValue KeyOnlyKeyValue = new KeyValue(newBuffer); KeyOnlyCell keyOnlyCell = new KeyOnlyCell(kv, lenAsVal); - KeyOnlyByteBufferExtendedCell keyOnlyByteBufferedCell = new KeyOnlyByteBufferExtendedCell( - bbCell, lenAsVal); + KeyOnlyByteBufferExtendedCell keyOnlyByteBufferedCell = + new KeyOnlyByteBufferExtendedCell(bbCell, lenAsVal); assertTrue(CellUtil.matchingRows(KeyOnlyKeyValue, keyOnlyCell)); assertTrue(CellUtil.matchingRows(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingFamily(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(CellUtil - .matchingFamily(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingFamily(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, - keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(KeyOnlyKeyValue.getValueLength() == keyOnlyByteBufferedCell - .getValueLength()); + assertTrue(KeyOnlyKeyValue.getValueLength() == keyOnlyByteBufferedCell.getValueLength()); assertEquals(8 + keyLen + (lenAsVal ? 4 : 0), KeyOnlyKeyValue.getSerializedSize()); assertEquals(8 + keyLen + (lenAsVal ? 4 : 0), keyOnlyCell.getSerializedSize()); if (keyOnlyByteBufferedCell.getValueLength() > 0) { - assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, - keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); } assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyCell.getTimestamp()); - assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyByteBufferedCell - .getTimestamp()); + assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyByteBufferedCell.getTimestamp()); assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyCell.getTypeByte()); - assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyByteBufferedCell - .getTypeByte()); + assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyByteBufferedCell.getTypeByte()); assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyCell.getTagsLength()); - assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyByteBufferedCell - .getTagsLength()); + assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyByteBufferedCell.getTagsLength()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java index 60c8cd084997..6e1e89634a36 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,12 +33,12 @@ public class TestLongComparator { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestLongComparator.class); - private long[] values = { Long.MIN_VALUE, -10000000000L, -1000000L, 0L, 1000000L, 10000000000L, - Long.MAX_VALUE }; + private long[] values = + { Long.MIN_VALUE, -10000000000L, -1000000L, 0L, 1000000L, 10000000000L, Long.MAX_VALUE }; @Test public void testSimple() { - for (int i = 1; i < values.length ; i++) { + for (int i = 1; i < values.length; i++) { for (int j = 0; j < i; j++) { LongComparator cp = new LongComparator(values[i]); assertEquals(1, cp.compareTo(Bytes.toBytes(values[j]))); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java index 62eba1ecea5c..f0d2b55ca188 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -82,8 +82,8 @@ static void doBuildCellBlockUndoCellBlock(final CellBlockBuilder builder, final CellScanner cellScanner = sized ? getSizedCellScanner(cells) : CellUtil.createCellScanner(Arrays.asList(cells).iterator()); ByteBuffer bb = builder.buildCellBlock(codec, compressor, cellScanner); - cellScanner = builder.createCellScannerReusingBuffers(codec, compressor, - new SingleByteBuff(bb)); + cellScanner = + builder.createCellScannerReusingBuffers(codec, compressor, new SingleByteBuff(bb)); int i = 0; while (cellScanner.advance()) { i++; @@ -176,7 +176,6 @@ private static void timerTest(final CellBlockBuilder builder, final StopWatch ti /** * For running a few tests of methods herein. - * * @param args the arguments to use for the timer test * @throws IOException if creating the build fails */ diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java index 48a079d3e75b..33b41ef9f969 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,15 +33,17 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class, ClientTests.class}) +@Category({ SmallTests.class, ClientTests.class }) public class TestConnectionId { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionId.class); + HBaseClassTestRule.forClass(TestConnectionId.class); private Configuration testConfig = HBaseConfiguration.create(); - private User testUser1 = User.createUserForTesting(testConfig, "test", new String[]{"testgroup"}); - private User testUser2 = User.createUserForTesting(testConfig, "test", new String[]{"testgroup"}); + private User testUser1 = + User.createUserForTesting(testConfig, "test", new String[] { "testgroup" }); + private User testUser2 = + User.createUserForTesting(testConfig, "test", new String[] { "testgroup" }); private String serviceName = "test"; private Address address = Address.fromParts("localhost", 999); private ConnectionId connectionId1 = new ConnectionId(testUser1, serviceName, address); @@ -71,9 +73,8 @@ public void testToString() { } /** - * Test if the over-ridden equals method satisfies all the properties - * (reflexive, symmetry, transitive and null) - * along with their hashcode + * Test if the over-ridden equals method satisfies all the properties (reflexive, symmetry, + * transitive and null) along with their hashcode */ @Test public void testEqualsWithHashCode() { @@ -87,8 +88,8 @@ public void testEqualsWithHashCode() { // Test the Transitive Property ConnectionId connectionId3 = new ConnectionId(testUser1, serviceName, address); - assertTrue(connectionId1.equals(connectionId) && connectionId.equals(connectionId3) && - connectionId1.equals(connectionId3)); + assertTrue(connectionId1.equals(connectionId) && connectionId.equals(connectionId3) + && connectionId1.equals(connectionId3)); assertEquals(connectionId.hashCode(), connectionId3.hashCode()); // Test For null @@ -99,8 +100,8 @@ public void testEqualsWithHashCode() { } /** - * Test the hashcode for same object and different object with both hashcode - * function and static hashcode function + * Test the hashcode for same object and different object with both hashcode function and static + * hashcode function */ @Test public void testHashCode() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java index dc94e91f4fde..1a41710aaeec 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestFailedServersLog { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFailedServersLog.class); + HBaseClassTestRule.forClass(TestFailedServersLog.class); static final int TEST_PORT = 9999; @@ -58,14 +58,14 @@ public void setup() { when(mockAppender.getName()).thenReturn("mockAppender"); when(mockAppender.isStarted()).thenReturn(true); ((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager - .getLogger(FailedServers.class)).addAppender(mockAppender); + .getLogger(FailedServers.class)).addAppender(mockAppender); } @After public void teardown() { ((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager - .getLogger(FailedServers.class)).removeAppender(mockAppender); + .getLogger(FailedServers.class)).removeAppender(mockAppender); } @Test @@ -77,7 +77,7 @@ public void testAddToFailedServersLogging() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { org.apache.logging.log4j.core.LogEvent logEvent = - invocation.getArgument(0, org.apache.logging.log4j.core.LogEvent.class); + invocation.getArgument(0, org.apache.logging.log4j.core.LogEvent.class); level.set(logEvent.getLevel()); msg.set(logEvent.getMessage().getFormattedMessage()); return null; @@ -92,7 +92,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { verify(mockAppender, times(1)).append(any(org.apache.logging.log4j.core.LogEvent.class)); assertEquals(org.apache.logging.log4j.Level.DEBUG, level.get()); - assertEquals("Added failed server with address " + addr.toString() + " to list caused by " + - nullException.toString(), msg.get()); + assertEquals("Added failed server with address " + addr.toString() + " to list caused by " + + nullException.toString(), + msg.get()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java index d829b4bfd654..f4e53bfdb7b8 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java index 45da1e8560df..bcb5e87d7b24 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,10 +49,10 @@ public class TestIPCUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIPCUtil.class); + HBaseClassTestRule.forClass(TestIPCUtil.class); private static Throwable create(Class clazz) throws InstantiationException, - IllegalAccessException, InvocationTargetException, NoSuchMethodException { + IllegalAccessException, InvocationTargetException, NoSuchMethodException { try { Constructor c = clazz.getDeclaredConstructor(); c.setAccessible(true); @@ -79,7 +79,7 @@ private static Throwable create(Class clazz) throws Instant try { Constructor c = - clazz.getDeclaredConstructor(String.class, Throwable.class); + clazz.getDeclaredConstructor(String.class, Throwable.class); c.setAccessible(true); return c.newInstance("error", new Exception("error")); } catch (NoSuchMethodException e) { @@ -87,7 +87,7 @@ private static Throwable create(Class clazz) throws Instant } Constructor c = - clazz.getDeclaredConstructor(Throwable.class, Throwable.class); + clazz.getDeclaredConstructor(Throwable.class, Throwable.class); c.setAccessible(true); return c.newInstance(new Exception("error"), "error"); } @@ -104,15 +104,16 @@ public void testWrapConnectionException() throws Exception { Address addr = Address.fromParts("127.0.0.1", 12345); for (Throwable exception : exceptions) { if (exception instanceof TimeoutException) { - assertThat(IPCUtil.wrapException(addr, null, exception), instanceOf(TimeoutIOException.class)); + assertThat(IPCUtil.wrapException(addr, null, exception), + instanceOf(TimeoutIOException.class)); } else { - IOException ioe = IPCUtil.wrapException(addr, RegionInfoBuilder.FIRST_META_REGIONINFO, - exception); + IOException ioe = + IPCUtil.wrapException(addr, RegionInfoBuilder.FIRST_META_REGIONINFO, exception); // Assert that the exception contains the Region name if supplied. HBASE-25735. // Not all exceptions get the region stuffed into it. if (ioe.getMessage() != null) { - assertTrue(ioe.getMessage(). - contains(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString())); + assertTrue(ioe.getMessage() + .contains(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString())); } assertThat(ioe, instanceOf(exception.getClass())); } @@ -135,8 +136,8 @@ public void run() { if (depth <= IPCUtil.MAX_DEPTH) { if (numElements <= numStackTraceElements.intValue()) { future.completeExceptionally( - new AssertionError("should call run directly but stack trace decreased from " + - numStackTraceElements.intValue() + " to " + numElements)); + new AssertionError("should call run directly but stack trace decreased from " + + numStackTraceElements.intValue() + " to " + numElements)); return; } numStackTraceElements.setValue(numElements); @@ -144,9 +145,9 @@ public void run() { } else { if (numElements >= numStackTraceElements.intValue()) { future.completeExceptionally( - new AssertionError("should call eventLoop.execute to prevent stack overflow but" + - " stack trace increased from " + numStackTraceElements.intValue() + " to " + - numElements)); + new AssertionError("should call eventLoop.execute to prevent stack overflow but" + + " stack trace increased from " + numStackTraceElements.intValue() + " to " + + numElements)); } else { future.complete(null); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java index 8782fe116b07..e91b0132e787 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public class TestNettyRpcConnection { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNettyRpcConnection.class); + HBaseClassTestRule.forClass(TestNettyRpcConnection.class); private static final Logger LOG = LoggerFactory.getLogger(TestNettyRpcConnection.class); @@ -59,7 +59,7 @@ public class TestNettyRpcConnection { public static void setUp() throws IOException { CLIENT = new NettyRpcClient(HBaseConfiguration.create()); CONN = new NettyRpcConnection(CLIENT, - new ConnectionId(User.getCurrent(), "test", Address.fromParts("localhost", 1234))); + new ConnectionId(User.getCurrent(), "test", Address.fromParts("localhost", 1234))); } @AfterClass diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java index ba1e27258d2d..3b05391b42d9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java index 62e204a65a2c..3e59a09fae2e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestQuotaFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -33,16 +33,11 @@ public class TestQuotaFilter { @Test public void testClassMethodsAreBuilderStyle() { - /* ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * QuotaFilter qf - * = new QuotaFilter() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: QuotaFilter qf = new QuotaFilter() .setFoo(foo) + * .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" returns the + * declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(QuotaFilter.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java index 37a21dc2b18b..afc8e24d60f4 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,7 +22,6 @@ import static org.junit.Assert.fail; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.quotas.QuotaSettingsFactory.QuotaGlobalsSettingsBypass; @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestQuotaGlobalsSettingsBypass { @ClassRule @@ -142,6 +142,7 @@ void expectFailure(QuotaSettings one, QuotaSettings two) throws IOException { try { one.merge(two); fail("Expected to see an Exception merging " + two + " into " + one); - } catch (IllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java index 6b9212f6260f..d7c21c8fda35 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,8 +53,7 @@ public class TestQuotaSettingsFactory { @Test public void testAllQuotasAddedToList() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1024L * 1024L * 1024L * 50L) // 50G + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1024L * 1024L * 1024L * 50L) // 50G .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) // Disable the table .build(); final long readLimit = 1000; @@ -67,8 +66,7 @@ public void testAllQuotasAddedToList() { .setWriteNum(TimedQuota.newBuilder().setSoftLimit(writeLimit) .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) .build(); - final Quotas quotas = Quotas.newBuilder() - .setSpace(spaceQuota) // Set the FS quotas + final Quotas quotas = Quotas.newBuilder().setSpace(spaceQuota) // Set the FS quotas .setThrottle(throttle) // Set some RPC limits .build(); final TableName tn = TableName.valueOf("my_table"); @@ -125,19 +123,15 @@ public void testAllQuotasAddedToList() { @Test(expected = IllegalArgumentException.class) public void testNeitherTableNorNamespace() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1L) - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) - .build(); + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE).build(); QuotaSettingsFactory.fromSpace(null, null, spaceQuota); } @Test(expected = IllegalArgumentException.class) public void testBothTableAndNamespace() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1L) - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) - .build(); + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE).build(); QuotaSettingsFactory.fromSpace(TableName.valueOf("foo"), "bar", spaceQuota); } @@ -150,7 +144,7 @@ public void testSpaceLimitSettings() { QuotaSettingsFactory.limitTableSpace(tableName, sizeLimit, violationPolicy); assertNotNull("QuotaSettings should not be null", settings); assertTrue("Should be an instance of SpaceLimitSettings", - settings instanceof SpaceLimitSettings); + settings instanceof SpaceLimitSettings); SpaceLimitSettings spaceLimitSettings = (SpaceLimitSettings) settings; SpaceLimitRequest protoRequest = spaceLimitSettings.getProto(); assertTrue("Request should have a SpaceQuota", protoRequest.hasQuota()); @@ -167,7 +161,7 @@ public void testSpaceLimitSettingsForDeletes() { QuotaSettings nsSettings = QuotaSettingsFactory.removeNamespaceSpaceLimit(ns); assertNotNull("QuotaSettings should not be null", nsSettings); assertTrue("Should be an instance of SpaceLimitSettings", - nsSettings instanceof SpaceLimitSettings); + nsSettings instanceof SpaceLimitSettings); SpaceLimitRequest nsProto = ((SpaceLimitSettings) nsSettings).getProto(); assertTrue("Request should have a SpaceQuota", nsProto.hasQuota()); assertTrue("The remove attribute should be true", nsProto.getQuota().getRemove()); @@ -175,7 +169,7 @@ public void testSpaceLimitSettingsForDeletes() { QuotaSettings tableSettings = QuotaSettingsFactory.removeTableSpaceLimit(tn); assertNotNull("QuotaSettings should not be null", tableSettings); assertTrue("Should be an instance of SpaceLimitSettings", - tableSettings instanceof SpaceLimitSettings); + tableSettings instanceof SpaceLimitSettings); SpaceLimitRequest tableProto = ((SpaceLimitSettings) tableSettings).getProto(); assertTrue("Request should have a SpaceQuota", tableProto.hasQuota()); assertTrue("The remove attribute should be true", tableProto.getQuota().getRemove()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java index 2406d10ed0a0..d14c4f539af1 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ /** * Test class for {@link SpaceLimitSettings}. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestSpaceLimitSettings { @ClassRule @@ -130,14 +130,14 @@ public void testNamespaceQuota() { @Test public void testQuotaMerging() throws IOException { TableName tn = TableName.valueOf("foo"); - QuotaSettings originalSettings = QuotaSettingsFactory.limitTableSpace( - tn, 1024L * 1024L, SpaceViolationPolicy.DISABLE); - QuotaSettings largerSizeLimit = QuotaSettingsFactory.limitTableSpace( - tn, 5L * 1024L * 1024L, SpaceViolationPolicy.DISABLE); - QuotaSettings differentPolicy = QuotaSettingsFactory.limitTableSpace( - tn, 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); - QuotaSettings incompatibleSettings = QuotaSettingsFactory.limitNamespaceSpace( - "ns1", 5L * 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); + QuotaSettings originalSettings = + QuotaSettingsFactory.limitTableSpace(tn, 1024L * 1024L, SpaceViolationPolicy.DISABLE); + QuotaSettings largerSizeLimit = + QuotaSettingsFactory.limitTableSpace(tn, 5L * 1024L * 1024L, SpaceViolationPolicy.DISABLE); + QuotaSettings differentPolicy = + QuotaSettingsFactory.limitTableSpace(tn, 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); + QuotaSettings incompatibleSettings = QuotaSettingsFactory.limitNamespaceSpace("ns1", + 5L * 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); assertEquals(originalSettings.merge(largerSizeLimit), largerSizeLimit); assertEquals(originalSettings.merge(differentPolicy), differentPolicy); @@ -145,7 +145,7 @@ public void testQuotaMerging() throws IOException { originalSettings.merge(incompatibleSettings); fail("Should not be able to merge a Table space quota with a namespace space quota."); } catch (IllegalArgumentException e) { - //pass + // pass } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java index 53fb9bd3e927..0d75ea261e75 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestThrottleSettings { @ClassRule @@ -44,15 +44,13 @@ public class TestThrottleSettings { @Test public void testMerge() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, tr1); TimedQuota tq2 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest tr2 = ThrottleRequest.newBuilder().setTimedQuota(tq2) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); @@ -66,15 +64,13 @@ public void testMerge() throws IOException { @Test public void testIncompatibleThrottleTypes() throws IOException { TimedQuota requestsQuota = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest requestsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(requestsQuota) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, requestsQuotaReq); TimedQuota readsQuota = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest readsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(readsQuota) .setType(QuotaProtos.ThrottleType.READ_NUMBER).build(); @@ -89,17 +85,15 @@ public void testIncompatibleThrottleTypes() throws IOException { @Test public void testNoThrottleReturnsOriginal() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, tr1); - ThrottleRequest tr2 = ThrottleRequest.newBuilder() - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + ThrottleRequest tr2 = + ThrottleRequest.newBuilder().setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); - assertTrue( - "The same object should be returned by merge, but it wasn't", + assertTrue("The same object should be returned by merge, but it wasn't", orig == orig.merge(new ThrottleSettings("joe", null, null, null, tr2))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java index ae2d4262e647..bfb5d1642b73 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -37,7 +36,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestReplicationPeerConfig { @ClassRule @@ -53,16 +52,11 @@ public class TestReplicationPeerConfig { @Test public void testClassMethodsAreBuilderStyle() { - /* ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * ReplicationPeerConfig htd - * = new ReplicationPeerConfig() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: ReplicationPeerConfig htd = new ReplicationPeerConfig() + * .setFoo(foo) .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" + * returns the declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(ReplicationPeerConfig.class); @@ -72,48 +66,39 @@ public void testClassMethodsAreBuilderStyle() { public void testNeedToReplicateWithReplicatingAll() { // 1. replication_all flag is true, no namespaces and table-cfs config ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .build(); + .setReplicateAllUserTables(true).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // 2. replicate_all flag is true, and config in excludedTableCfs // Exclude empty table-cfs map peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(Maps.newHashMap()) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(Maps.newHashMap()).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude table B Map> tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_B)); // 3. replicate_all flag is true, and config in excludeNamespaces // Exclude empty namespace set peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet()) - .build(); + .setReplicateAllUserTables(true).setExcludeNamespaces(Sets.newHashSet()).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude namespace other - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(true) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude namespace replication - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(true) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // 4. replicate_all flag is true, and config excludeNamespaces and excludedTableCfs both @@ -121,30 +106,24 @@ public void testNeedToReplicateWithReplicatingAll() { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .setExcludeTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) + .setExcludeTableCFsMap(tableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Namespaces config conflicts with table-cfs config tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_B)); tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_B)); } @@ -156,48 +135,38 @@ public void testNeedToReplicateWithoutReplicatingAll() { // 1. replication_all flag is false, no namespaces and table-cfs config peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .build(); + .setReplicateAllUserTables(false).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // 2. replicate_all flag is false, and only config table-cfs in peer // Set empty table-cfs map peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(Maps.newHashMap()) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(Maps.newHashMap()).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set table B tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_B)); // 3. replication_all flag is false, and only config namespace in peer // Set empty namespace set peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet()) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet()).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set namespace other peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set namespace replication peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) + .build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // 4. replicate_all flag is false, and config namespaces and table-cfs both @@ -205,29 +174,23 @@ public void testNeedToReplicateWithoutReplicatingAll() { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs) + .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Namespaces config conflicts with table-cfs config tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs) + .setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) + .setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); } @@ -236,9 +199,7 @@ public void testNeedToReplicateCFWithReplicatingAll() { Map> excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, null); ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -246,9 +207,7 @@ public void testNeedToReplicateCFWithReplicatingAll() { excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, Lists.newArrayList()); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -256,9 +215,7 @@ public void testNeedToReplicateCFWithReplicatingAll() { excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, Lists.newArrayList(Bytes.toString(FAMILY1))); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -269,9 +226,7 @@ public void testNeedToReplicateCFWithoutReplicatingAll() { Map> tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -279,9 +234,7 @@ public void testNeedToReplicateCFWithoutReplicatingAll() { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, Lists.newArrayList()); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -289,9 +242,7 @@ public void testNeedToReplicateCFWithoutReplicatingAll() { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, Lists.newArrayList(Bytes.toString(FAMILY1))); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java index cf5939031b02..70b3a1a6e12d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.security.Key; import java.security.KeyException; - import javax.crypto.spec.SecretKeySpec; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -38,7 +37,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestEncryptionUtil { private static final String INVALID_HASH_ALG = "this-hash-algorithm-not-exists hopefully... :)"; @@ -50,7 +49,7 @@ public class TestEncryptionUtil { // There does not seem to be a ready way to test either getKeyFromBytesOrMasterKey // or createEncryptionContext, and the existing code under MobUtils appeared to be - // untested. Not ideal! + // untested. Not ideal! @Test public void testKeyWrappingUsingHashAlgDefault() throws Exception { @@ -146,15 +145,14 @@ private void testKeyWrapping(String hashAlgorithm) throws Exception { // set up the key provider for testing to resolve a key for our test subject Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - if(!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { + if (!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, hashAlgorithm); } // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(keyBytes); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Key key = new SecretKeySpec(keyBytes, algorithm); // wrap the test key @@ -168,7 +166,7 @@ private void testKeyWrapping(String hashAlgorithm) throws Exception { assertTrue(unwrappedKey instanceof SecretKeySpec); // did we get back what we wrapped? assertTrue("Unwrapped key bytes do not match original", - Bytes.equals(keyBytes, unwrappedKey.getEncoded())); + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); // unwrap with an incorrect key try { @@ -183,7 +181,7 @@ private void testWALKeyWrapping(String hashAlgorithm) throws Exception { // set up the key provider for testing to resolve a key for our test subject Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - if(!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { + if (!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, hashAlgorithm); } @@ -204,7 +202,7 @@ private void testWALKeyWrapping(String hashAlgorithm) throws Exception { assertTrue(unwrappedKey instanceof SecretKeySpec); // did we get back what we wrapped? assertTrue("Unwrapped key bytes do not match original", - Bytes.equals(keyBytes, unwrappedKey.getEncoded())); + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); } private void testKeyWrappingWithMismatchingAlgorithms(Configuration conf) throws Exception { @@ -215,8 +213,7 @@ private void testKeyWrappingWithMismatchingAlgorithms(Configuration conf) throws // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(keyBytes); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Key key = new SecretKeySpec(keyBytes, algorithm); // wrap the test key diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java index 538a9b91c3c5..8de94c9e6649 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Strings; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestHBaseSaslRpcClient { @ClassRule @@ -82,14 +82,13 @@ public class TestHBaseSaslRpcClient { private static final Logger LOG = LoggerFactory.getLogger(TestHBaseSaslRpcClient.class); - @Rule public ExpectedException exception = ExpectedException.none(); @Test public void testSaslClientUsesGivenRpcProtection() throws Exception { - Token token = createTokenMockWithCredentials(DEFAULT_USER_NAME, - DEFAULT_USER_PASSWORD); + Token token = + createTokenMockWithCredentials(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD); DigestSaslClientAuthenticationProvider provider = new DigestSaslClientAuthenticationProvider(); for (SaslUtil.QualityOfProtection qop : SaslUtil.QualityOfProtection.values()) { String negotiatedQop = new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, token, @@ -114,7 +113,7 @@ public void testDigestSaslClientCallbackHandler() throws UnsupportedCallbackExce final RealmCallback realmCallback = mock(RealmCallback.class); // We can provide a realmCallback, but HBase presently does nothing with it. - Callback[] callbackArray = {nameCallback, passwordCallback, realmCallback}; + Callback[] callbackArray = { nameCallback, passwordCallback, realmCallback }; final DigestSaslClientCallbackHandler saslClCallbackHandler = new DigestSaslClientCallbackHandler(token); saslClCallbackHandler.handle(callbackArray); @@ -132,7 +131,7 @@ public void testDigestSaslClientCallbackHandlerWithException() { try { saslClCallbackHandler.handle(new Callback[] { mock(TextOutputCallback.class) }); } catch (UnsupportedCallbackException expEx) { - //expected + // expected } catch (Exception ex) { fail("testDigestSaslClientCallbackHandlerWithException error : " + ex.getMessage()); } @@ -140,7 +139,7 @@ public void testDigestSaslClientCallbackHandlerWithException() { @Test public void testHBaseSaslRpcClientCreation() throws Exception { - //creation kerberos principal check section + // creation kerberos principal check section assertFalse(assertSuccessCreationKerberosPrincipal(null)); assertFalse(assertSuccessCreationKerberosPrincipal("DOMAIN.COM")); assertFalse(assertSuccessCreationKerberosPrincipal("principal/DOMAIN.COM")); @@ -150,22 +149,22 @@ public void testHBaseSaslRpcClientCreation() throws Exception { LOG.warn("Could not create a SASL client with valid Kerberos credential"); } - //creation digest principal check section + // creation digest principal check section assertFalse(assertSuccessCreationDigestPrincipal(null, null)); assertFalse(assertSuccessCreationDigestPrincipal("", "")); assertFalse(assertSuccessCreationDigestPrincipal("", null)); assertFalse(assertSuccessCreationDigestPrincipal(null, "")); assertTrue(assertSuccessCreationDigestPrincipal(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - //creation simple principal check section + // creation simple principal check section assertFalse(assertSuccessCreationSimplePrincipal("", "")); assertFalse(assertSuccessCreationSimplePrincipal(null, null)); assertFalse(assertSuccessCreationSimplePrincipal(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - //exceptions check section + // exceptions check section assertTrue(assertIOExceptionThenSaslClientIsNull(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - assertTrue(assertIOExceptionWhenGetStreamsBeforeConnectCall( - DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); + assertTrue( + assertIOExceptionWhenGetStreamsBeforeConnectCall(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); } @Test @@ -182,9 +181,8 @@ public void testAuthMethodReadWrite() throws IOException { assertAuthMethodWrite(out, AuthMethod.DIGEST); } - private void assertAuthMethodRead(DataInputBuffer in, AuthMethod authMethod) - throws IOException { - in.reset(new byte[] {authMethod.code}, 1); + private void assertAuthMethodRead(DataInputBuffer in, AuthMethod authMethod) throws IOException { + in.reset(new byte[] { authMethod.code }, 1); assertEquals(authMethod, AuthMethod.read(in)); } @@ -214,15 +212,15 @@ public SaslClient createClient(Configuration conf, InetAddress serverAddress, try { rpcClient.getInputStream(); - } catch(IOException ex) { - //Sasl authentication exchange hasn't completed yet + } catch (IOException ex) { + // Sasl authentication exchange hasn't completed yet inState = true; } try { rpcClient.getOutputStream(); - } catch(IOException ex) { - //Sasl authentication exchange hasn't completed yet + } catch (IOException ex) { + // Sasl authentication exchange hasn't completed yet outState = true; } @@ -233,14 +231,13 @@ private boolean assertIOExceptionThenSaslClientIsNull(String principal, String p try { DigestSaslClientAuthenticationProvider provider = new DigestSaslClientAuthenticationProvider() { - @Override - public SaslClient createClient(Configuration conf, InetAddress serverAddress, - SecurityInfo securityInfo, - Token token, boolean fallbackAllowed, - Map saslProps) { - return null; - } - }; + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddress, + SecurityInfo securityInfo, Token token, + boolean fallbackAllowed, Map saslProps) { + return null; + } + }; new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); @@ -254,7 +251,7 @@ private boolean assertSuccessCreationKerberosPrincipal(String principal) { HBaseSaslRpcClient rpcClient = null; try { rpcClient = createSaslRpcClientForKerberos(principal); - } catch(Exception ex) { + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; @@ -267,7 +264,7 @@ private boolean assertSuccessCreationDigestPrincipal(String principal, String pa new DigestSaslClientAuthenticationProvider(), createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); - } catch(Exception ex) { + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; @@ -277,22 +274,20 @@ private boolean assertSuccessCreationSimplePrincipal(String principal, String pa HBaseSaslRpcClient rpcClient = null; try { rpcClient = createSaslRpcClientSimple(principal, password); - } catch(Exception ex) { + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; } - private HBaseSaslRpcClient createSaslRpcClientForKerberos(String principal) - throws IOException { + private HBaseSaslRpcClient createSaslRpcClientForKerberos(String principal) throws IOException { return new HBaseSaslRpcClient(HBaseConfiguration.create(), new GssSaslClientAuthenticationProvider(), createTokenMock(), Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); } - private Token createTokenMockWithCredentials( - String principal, String password) - throws IOException { + private Token createTokenMockWithCredentials(String principal, + String password) throws IOException { Token token = createTokenMock(); if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(password)) { when(token.getIdentifier()).thenReturn(Bytes.toBytes(DEFAULT_USER_NAME)); @@ -314,20 +309,20 @@ private Token createTokenMock() { } @Test(expected = IOException.class) - public void testFailedEvaluateResponse() throws IOException { - //prep mockin the SaslClient + public void testFailedEvaluateResponse() throws IOException { + // prep mockin the SaslClient SimpleSaslClientAuthenticationProvider mockProvider = - Mockito.mock(SimpleSaslClientAuthenticationProvider.class); + Mockito.mock(SimpleSaslClientAuthenticationProvider.class); SaslClient mockClient = Mockito.mock(SaslClient.class); Assert.assertNotNull(mockProvider); Assert.assertNotNull(mockClient); Mockito.when(mockProvider.createClient(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.anyBoolean(), Mockito.any())).thenReturn(mockClient); - HBaseSaslRpcClient rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), - mockProvider, createTokenMock(), - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); + HBaseSaslRpcClient rpcClient = + new HBaseSaslRpcClient(HBaseConfiguration.create(), mockProvider, createTokenMock(), + Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); - //simulate getting an error from a failed saslServer.evaluateResponse + // simulate getting an error from a failed saslServer.evaluateResponse DataOutputBuffer errorBuffer = new DataOutputBuffer(); errorBuffer.writeInt(SaslStatus.ERROR.state); WritableUtils.writeString(errorBuffer, IOException.class.getName()); @@ -337,7 +332,7 @@ mockProvider, createTokenMock(), in.reset(errorBuffer.getData(), 0, errorBuffer.getLength()); DataOutputBuffer out = new DataOutputBuffer(); - //simulate that authentication exchange has completed quickly after sending the token + // simulate that authentication exchange has completed quickly after sending the token Mockito.when(mockClient.isComplete()).thenReturn(true); rpcClient.saslConnect(in, out); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java index 36f29dec240e..2b5df2734da4 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.ExpectedException; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestSaslUtil { @ClassRule diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java index eff3b5f8dd0a..b684ff7f8adc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java @@ -23,7 +23,6 @@ import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -32,7 +31,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestDefaultProviderSelector { @ClassRule @@ -40,6 +39,7 @@ public class TestDefaultProviderSelector { HBaseClassTestRule.forClass(TestDefaultProviderSelector.class); BuiltInProviderSelector selector; + @Before public void setup() { selector = new BuiltInProviderSelector(); @@ -70,9 +70,9 @@ public void testDuplicateProviders() { @Test public void testExpectedProviders() { - HashSet providers = new HashSet<>(Arrays.asList( - new SimpleSaslClientAuthenticationProvider(), new GssSaslClientAuthenticationProvider(), - new DigestSaslClientAuthenticationProvider())); + HashSet providers = + new HashSet<>(Arrays.asList(new SimpleSaslClientAuthenticationProvider(), + new GssSaslClientAuthenticationProvider(), new DigestSaslClientAuthenticationProvider())); selector.configure(new Configuration(false), providers); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java index 2b399593e7c1..37f6e9df3852 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java @@ -25,9 +25,7 @@ import java.net.InetAddress; import java.util.HashMap; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -44,7 +42,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; -@Category({SmallTests.class, SecurityTests.class}) +@Category({ SmallTests.class, SecurityTests.class }) public class TestSaslClientAuthenticationProviders { @ClassRule @@ -53,7 +51,7 @@ public class TestSaslClientAuthenticationProviders { @Test public void testCannotAddTheSameProviderTwice() { - HashMap registeredProviders = new HashMap<>(); + HashMap registeredProviders = new HashMap<>(); SaslClientAuthenticationProvider p1 = new SimpleSaslClientAuthenticationProvider(); SaslClientAuthenticationProvider p2 = new SimpleSaslClientAuthenticationProvider(); @@ -62,10 +60,11 @@ public void testCannotAddTheSameProviderTwice() { try { SaslClientAuthenticationProviders.addProviderIfNotExists(p2, registeredProviders); - } catch (RuntimeException e) {} + } catch (RuntimeException e) { + } assertSame("Expected the original provider to be present", p1, - registeredProviders.entrySet().iterator().next().getValue()); + registeredProviders.entrySet().iterator().next().getValue()); } @Test @@ -89,58 +88,66 @@ public void testInstanceIsCached() { public void testDifferentConflictingImplementationsFail() { Configuration conf = HBaseConfiguration.create(); conf.setStrings(SaslClientAuthenticationProviders.EXTRA_PROVIDERS_KEY, - ConflictingProvider1.class.getName(), ConflictingProvider2.class.getName()); + ConflictingProvider1.class.getName(), ConflictingProvider2.class.getName()); SaslClientAuthenticationProviders.getInstance(conf); } static class ConflictingProvider1 implements SaslClientAuthenticationProvider { - static final SaslAuthMethod METHOD1 = new SaslAuthMethod( - "FOO", (byte)12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); + static final SaslAuthMethod METHOD1 = + new SaslAuthMethod("FOO", (byte) 12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); public ConflictingProvider1() { } - @Override public SaslAuthMethod getSaslAuthMethod() { + @Override + public SaslAuthMethod getSaslAuthMethod() { return METHOD1; } - @Override public String getTokenKind() { + @Override + public String getTokenKind() { return null; } - @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo securityInfo, Token token, boolean fallbackAllowed, Map saslProps) throws IOException { return null; } - @Override public UserInformation getUserInfo(User user) { + @Override + public UserInformation getUserInfo(User user) { return null; } } static class ConflictingProvider2 implements SaslClientAuthenticationProvider { - static final SaslAuthMethod METHOD2 = new SaslAuthMethod( - "BAR", (byte)12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); + static final SaslAuthMethod METHOD2 = + new SaslAuthMethod("BAR", (byte) 12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); public ConflictingProvider2() { } - @Override public SaslAuthMethod getSaslAuthMethod() { + @Override + public SaslAuthMethod getSaslAuthMethod() { return METHOD2; } - @Override public String getTokenKind() { + @Override + public String getTokenKind() { return null; } - @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo securityInfo, Token token, boolean fallbackAllowed, Map saslProps) throws IOException { return null; } - @Override public UserInformation getUserInfo(User user) { + @Override + public UserInformation getUserInfo(User user) { return null; } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java index c78c765aca1f..d01621483092 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public class TestClientTokenUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientTokenUtil.class); + HBaseClassTestRule.forClass(TestClientTokenUtil.class); private URLClassLoader cl; @@ -81,13 +81,13 @@ public void testObtainToken() throws Exception { shouldInjectFault.set(null, new ServiceException(injected)); try { - ClientTokenUtil.obtainToken((Connection)null); + ClientTokenUtil.obtainToken((Connection) null); fail("Should have injected exception."); } catch (IOException e) { assertException(injected, e); } - CompletableFuture future = ClientTokenUtil.obtainToken((AsyncConnection)null); + CompletableFuture future = ClientTokenUtil.obtainToken((AsyncConnection) null); try { future.get(); fail("Should have injected exception."); @@ -95,7 +95,7 @@ public void testObtainToken() throws Exception { assertException(injected, e); } Boolean loaded = (Boolean) cl.loadClass(ProtobufUtil.class.getCanonicalName()) - .getDeclaredMethod("isClassLoaderLoaded").invoke(null); + .getDeclaredMethod("isClassLoaderLoaded").invoke(null); assertFalse("Should not have loaded DynamicClassLoader", loaded); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 317dff9efebc..7c4166c59b77 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,7 +72,8 @@ public class TestProtobufUtil { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestProtobufUtil.class); private static final String TAG_STR = "tag-1"; - private static final byte TAG_TYPE = (byte)10; + private static final byte TAG_TYPE = (byte) 10; + public TestProtobufUtil() { } @@ -93,7 +94,6 @@ public void testException() throws IOException { /** * Test basic Get conversions. - * * @throws IOException if the conversion to a {@link Get} fails */ @Test @@ -126,9 +126,8 @@ public void testGet() throws IOException { /** * Test Delete Mutate conversions. - * * @throws IOException if the conversion to a {@link Delete} or a - * {@link org.apache.hadoop.hbase.client.Mutation} fails + * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @Test public void testDelete() throws IOException { @@ -161,22 +160,18 @@ public void testDelete() throws IOException { // delete always have empty value, // add empty value to the original mutate - for (ColumnValue.Builder column: - mutateBuilder.getColumnValueBuilderList()) { - for (QualifierValue.Builder qualifier: - column.getQualifierValueBuilderList()) { + for (ColumnValue.Builder column : mutateBuilder.getColumnValueBuilderList()) { + for (QualifierValue.Builder qualifier : column.getQualifierValueBuilderList()) { qualifier.setValue(ByteString.EMPTY); } } - assertEquals(mutateBuilder.build(), - ProtobufUtil.toMutation(MutationType.DELETE, delete)); + assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.DELETE, delete)); } /** * Test Put Mutate conversions. - * * @throws IOException if the conversion to a {@link Put} or a - * {@link org.apache.hadoop.hbase.client.Mutation} fails + * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @Test public void testPut() throws IOException { @@ -210,22 +205,18 @@ public void testPut() throws IOException { // value level timestamp specified, // add the timestamp to the original mutate long timestamp = put.getTimestamp(); - for (ColumnValue.Builder column: - mutateBuilder.getColumnValueBuilderList()) { - for (QualifierValue.Builder qualifier: - column.getQualifierValueBuilderList()) { + for (ColumnValue.Builder column : mutateBuilder.getColumnValueBuilderList()) { + for (QualifierValue.Builder qualifier : column.getQualifierValueBuilderList()) { if (!qualifier.hasTimestamp()) { qualifier.setTimestamp(timestamp); } } } - assertEquals(mutateBuilder.build(), - ProtobufUtil.toMutation(MutationType.PUT, put)); + assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.PUT, put)); } /** * Test basic Scan conversions. - * * @throws IOException if the conversion to a {@link org.apache.hadoop.hbase.client.Scan} fails */ @Test @@ -259,8 +250,7 @@ public void testScan() throws IOException { scanBuilder.setIncludeStopRow(false); ClientProtos.Scan expectedProto = scanBuilder.build(); - ClientProtos.Scan actualProto = ProtobufUtil.toScan( - ProtobufUtil.toScan(expectedProto)); + ClientProtos.Scan actualProto = ProtobufUtil.toScan(ProtobufUtil.toScan(expectedProto)); assertEquals(expectedProto, actualProto); } @@ -281,17 +271,15 @@ public void testToCell() { dbb.put(arr); ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(dbb, kv1.getLength(), kv2.getLength()); CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV, false); - Cell newOffheapKV = - ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell, - false); + Cell newOffheapKV = ProtobufUtil + .toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell, false); assertTrue(CellComparatorImpl.COMPARATOR.compare(offheapKV, newOffheapKV) == 0); } /** * Test Increment Mutate conversions. - * * @throws IOException if converting to an {@link Increment} or - * {@link org.apache.hadoop.hbase.client.Mutation} fails + * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @Test public void testIncrement() throws IOException { @@ -334,23 +322,20 @@ private MutationProto getIncrementMutation(Long timestamp) { } /** - * Older clients may not send along a timestamp in the MutationProto. Check that we - * default correctly. + * Older clients may not send along a timestamp in the MutationProto. Check that we default + * correctly. */ @Test public void testIncrementNoTimestamp() throws IOException { MutationProto mutation = getIncrementMutation(null); Increment increment = ProtobufUtil.toIncrement(mutation, null); assertEquals(HConstants.LATEST_TIMESTAMP, increment.getTimestamp()); - increment.getFamilyCellMap().values() - .forEach(cells -> - cells.forEach(cell -> - assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); + increment.getFamilyCellMap().values().forEach(cells -> cells + .forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); } /** * Test Append Mutate conversions. - * * @throws IOException if converting to an {@link Append} fails */ @Test @@ -373,15 +358,16 @@ public void testAppend() throws IOException { } /** - * Older clients may not send along a timestamp in the MutationProto. Check that we - * default correctly. + * Older clients may not send along a timestamp in the MutationProto. Check that we default + * correctly. */ @Test public void testAppendNoTimestamp() throws IOException { MutationProto mutation = getAppendMutation(null); Append append = ProtobufUtil.toAppend(mutation, null); assertEquals(HConstants.LATEST_TIMESTAMP, append.getTimestamp()); - append.getFamilyCellMap().values().forEach(cells -> cells.forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); + append.getFamilyCellMap().values().forEach(cells -> cells + .forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); } private MutationProto getAppendMutation(Long timestamp) { @@ -425,8 +411,8 @@ private static ProcedureProtos.Procedure createProcedure(long procId) { private static LockServiceProtos.LockedResource createLockedResource( LockServiceProtos.LockedResourceType resourceType, String resourceName, - LockServiceProtos.LockType lockType, - ProcedureProtos.Procedure exclusiveLockOwnerProcedure, int sharedLockCount) { + LockServiceProtos.LockType lockType, ProcedureProtos.Procedure exclusiveLockOwnerProcedure, + int sharedLockCount) { LockServiceProtos.LockedResource.Builder build = LockServiceProtos.LockedResource.newBuilder(); build.setResourceType(resourceType); build.setResourceName(resourceName); @@ -448,94 +434,70 @@ public void testProcedureInfo() { ProcedureProtos.Procedure procedure = builder.build(); String procJson = ProtobufUtil.toProcedureJson(Lists.newArrayList(procedure)); - assertEquals("[{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"1\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"," - + "\"stateMessage\":[{\"value\":\"QQ==\"}]" - + "}]", procJson); + assertEquals("[{" + "\"className\":\"java.lang.Object\"," + "\"procId\":\"1\"," + + "\"submittedTime\":\"0\"," + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"," + + "\"stateMessage\":[{\"value\":\"QQ==\"}]" + "}]", + procJson); } @Test public void testServerLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.SERVER, "server", - LockServiceProtos.LockType.SHARED, null, 2); + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.SERVER, "server", + LockServiceProtos.LockType.SHARED, null, 2); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"SERVER\"," - + "\"resourceName\":\"server\"," - + "\"lockType\":\"SHARED\"," - + "\"sharedLockCount\":2" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"SERVER\"," + "\"resourceName\":\"server\"," + + "\"lockType\":\"SHARED\"," + "\"sharedLockCount\":2" + "}]", + lockJson); } @Test public void testNamespaceLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.NAMESPACE, "ns", - LockServiceProtos.LockType.EXCLUSIVE, createProcedure(2), 0); + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.NAMESPACE, "ns", + LockServiceProtos.LockType.EXCLUSIVE, createProcedure(2), 0); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"NAMESPACE\"," - + "\"resourceName\":\"ns\"," - + "\"lockType\":\"EXCLUSIVE\"," - + "\"exclusiveLockOwnerProcedure\":{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"2\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"" - + "}," - + "\"sharedLockCount\":0" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"NAMESPACE\"," + "\"resourceName\":\"ns\"," + + "\"lockType\":\"EXCLUSIVE\"," + "\"exclusiveLockOwnerProcedure\":{" + + "\"className\":\"java.lang.Object\"," + "\"procId\":\"2\"," + "\"submittedTime\":\"0\"," + + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"" + "}," + "\"sharedLockCount\":0" + + "}]", + lockJson); } @Test public void testTableLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.TABLE, "table", - LockServiceProtos.LockType.SHARED, null, 2); + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.TABLE, "table", + LockServiceProtos.LockType.SHARED, null, 2); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"TABLE\"," - + "\"resourceName\":\"table\"," - + "\"lockType\":\"SHARED\"," - + "\"sharedLockCount\":2" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"TABLE\"," + "\"resourceName\":\"table\"," + + "\"lockType\":\"SHARED\"," + "\"sharedLockCount\":2" + "}]", + lockJson); } @Test public void testRegionLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.REGION, "region", - LockServiceProtos.LockType.EXCLUSIVE, createProcedure(3), 0); + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.REGION, "region", + LockServiceProtos.LockType.EXCLUSIVE, createProcedure(3), 0); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"REGION\"," - + "\"resourceName\":\"region\"," - + "\"lockType\":\"EXCLUSIVE\"," - + "\"exclusiveLockOwnerProcedure\":{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"3\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"" - + "}," - + "\"sharedLockCount\":0" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"REGION\"," + "\"resourceName\":\"region\"," + + "\"lockType\":\"EXCLUSIVE\"," + "\"exclusiveLockOwnerProcedure\":{" + + "\"className\":\"java.lang.Object\"," + "\"procId\":\"3\"," + "\"submittedTime\":\"0\"," + + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"" + "}," + "\"sharedLockCount\":0" + + "}]", + lockJson); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encode/decode tags is set to true. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encode/decode tags is set to true. */ @Test public void testCellConversionWithTags() { @@ -546,7 +508,7 @@ public void testCellConversionWithTags() { Cell decodedCell = getCellFromProtoResult(protoCell, true); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(1, decodedTags.size()); + assertEquals(1, decodedTags.size()); Tag decodedTag = decodedTags.get(0); assertEquals(TAG_TYPE, decodedTag.getType()); assertEquals(TAG_STR, Tag.getValueAsString(decodedTag)); @@ -566,14 +528,14 @@ private Cell getCellWithTags() { private Cell getCellFromProtoResult(CellProtos.Cell protoCell, boolean decodeTags) { ExtendedCellBuilder decodedBuilder = - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); return ProtobufUtil.toCell(decodedBuilder, protoCell, decodeTags); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encode/decode tags is set to false. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encode/decode tags is set to false. */ @Test public void testCellConversionWithoutTags() { @@ -583,14 +545,13 @@ public void testCellConversionWithoutTags() { Cell decodedCell = getCellFromProtoResult(protoCell, false); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encoding of tags is set to false - * and decoding of tags is set to true. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encoding of tags is set to false and decoding of tags is set to true. */ @Test public void testTagEncodeFalseDecodeTrue() { @@ -600,14 +561,13 @@ public void testTagEncodeFalseDecodeTrue() { Cell decodedCell = getCellFromProtoResult(protoCell, true); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encoding of tags is set to true - * and decoding of tags is set to false. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encoding of tags is set to true and decoding of tags is set to false. */ @Test public void testTagEncodeTrueDecodeFalse() { @@ -617,6 +577,6 @@ public void testTagEncodeTrueDecodeFalse() { Cell decodedCell = getCellFromProtoResult(protoCell, false); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java index 808e245062a1..d2d68a4ad375 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,32 +27,17 @@ import java.util.Set; /** - * Utility class to check whether a given class conforms to builder-style: - * Foo foo = - * new Foo() - * .setBar(bar) - * .setBaz(baz) + * Utility class to check whether a given class conforms to builder-style: Foo foo = new Foo() + * .setBar(bar) .setBaz(baz) */ public final class BuilderStyleTest { - private BuilderStyleTest() {} + private BuilderStyleTest() { + } /* * If a base class Foo declares a method setFoo() returning Foo, then the subclass should - * re-declare the methods overriding the return class with the subclass: - * - * class Foo { - * Foo setFoo() { - * .. - * return this; - * } - * } - * - * class Bar { - * Bar setFoo() { - * return (Bar) super.setFoo(); - * } - * } - * + * re-declare the methods overriding the return class with the subclass: class Foo { Foo setFoo() + * { .. return this; } } class Bar { Bar setFoo() { return (Bar) super.setFoo(); } } */ @SuppressWarnings("rawtypes") public static void assertClassesAreBuilderStyle(Class... classes) { @@ -66,13 +51,13 @@ public static void assertClassesAreBuilderStyle(Class... classes) { } Class ret = method.getReturnType(); if (method.getName().startsWith("set") || method.getName().startsWith("add")) { - System.out.println(" " + clazz.getSimpleName() + "." + method.getName() + "() : " - + ret.getSimpleName()); + System.out.println( + " " + clazz.getSimpleName() + "." + method.getName() + "() : " + ret.getSimpleName()); // because of subclass / super class method overrides, we group the methods fitting the // same signatures because we get two method definitions from java reflection: // Mutation.setDurability() : Mutation - // Delete.setDurability() : Mutation + // Delete.setDurability() : Mutation // Delete.setDurability() : Delete String sig = method.getName(); for (Class param : method.getParameterTypes()) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java index 314cae9e175b..dd335f9b1b41 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java index 2fd73caea46a..a7ae74922635 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java index 2f497c6fdfb5..50bb5ebd2d49 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java index a8b7644c52af..a5177abd6f88 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index 70f3b75b3e8d..e3e09b534b0a 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,113 +31,6 @@ Apache HBase - Common Common functionality for HBase - - - - src/main/resources/ - - hbase-default.xml - - - - - - src/test/resources/META-INF/ - META-INF/ - - NOTICE - - true - - - src/test/resources - - **/** - - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - - maven-assembly-plugin - - true - - - - maven-antrun-plugin - - - process-resources - - - - - - - run - - - - - generate-Version-information - generate-sources - - - - - - - - - - - run - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - versionInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-sources/java - - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - hbase-default.xml - - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase @@ -261,6 +154,112 @@ + + + + src/main/resources/ + + hbase-default.xml + + + + + + META-INF/ + true + src/test/resources/META-INF/ + + NOTICE + + + + src/test/resources + + **/** + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + + maven-assembly-plugin + + true + + + + maven-antrun-plugin + + + + run + + process-resources + + + + + + + + + generate-Version-information + + run + + generate-sources + + + + + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + versionInfo-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-sources/java + + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + hbase-default.xml + + + + + net.revelc.code + warbucks-maven-plugin + + + + @@ -273,10 +272,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -313,7 +312,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -328,10 +329,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 3.0.0-alpha-3-SNAPSHOT .. hbase-compression-aircompressor Apache HBase - Compression - Aircompressor Pure Java compression support using Aircompressor codecs - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -165,6 +131,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java index 43231c35b92e..b3518c3d87fc 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java @@ -1,21 +1,23 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.Compressor; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; @@ -24,7 +26,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.airlift.compress.Compressor; /** * Hadoop compressor glue for aircompressor compressors. @@ -163,7 +164,7 @@ public void reinit(Configuration conf) { public void reset() { LOG.trace("reset"); try { - compressor = (T)compressor.getClass().getDeclaredConstructor().newInstance(); + compressor = (T) compressor.getClass().getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java index b0e444fd2584..33eaf4bec0cd 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java @@ -1,28 +1,29 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.Decompressor; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.airlift.compress.Decompressor; /** * Hadoop decompressor glue for aircompressor decompressors. @@ -102,7 +103,7 @@ public boolean needsDictionary() { public void reset() { LOG.trace("reset"); try { - decompressor = (T)decompressor.getClass().getDeclaredConstructor().newInstance(); + decompressor = (T) decompressor.getClass().getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java index bc83234e4746..6bb7b1c721a7 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.lz4.Lz4Compressor; +import io.airlift.compress.lz4.Lz4Decompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -30,8 +33,6 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.lz4.Lz4Compressor; -import io.airlift.compress.lz4.Lz4Decompressor; /** * Hadoop Lz4 codec implemented with aircompressor. diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java index dbff7d1bdf3d..31f1bda30e6d 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.lzo.LzoCompressor; +import io.airlift.compress.lzo.LzoDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -30,8 +33,6 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.lzo.LzoCompressor; -import io.airlift.compress.lzo.LzoDecompressor; /** * Hadoop Lzo codec implemented with aircompressor. diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java index c3ef34c79d6d..2f066dd34375 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.snappy.SnappyCompressor; +import io.airlift.compress.snappy.SnappyDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -30,8 +33,6 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.snappy.SnappyCompressor; -import io.airlift.compress.snappy.SnappyDecompressor; /** * Hadoop snappy codec implemented with aircompressor. diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java index 599ceefa6aef..0fc35b46583e 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.zstd.ZstdCompressor; +import io.airlift.compress.zstd.ZstdDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -30,21 +33,18 @@ import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.zstd.ZstdCompressor; -import io.airlift.compress.zstd.ZstdDecompressor; /** * Hadoop codec implementation for Zstandard, implemented with aircompressor. *

    - * Unlike the other codecs this one should be considered as under development and unstable - * (as in changing), reflecting the status of aircompressor's zstandard implementation. + * Unlike the other codecs this one should be considered as under development and unstable (as in + * changing), reflecting the status of aircompressor's zstandard implementation. *

    - * NOTE: This codec is NOT data format compatible with the Hadoop native zstandard codec. - * There are issues with both framing and limitations of the aircompressor zstandard - * compressor. This codec can be used as an alternative to the native codec, if the native - * codec cannot be made available and/or an eventual migration will never be necessary - * (i.e. this codec's performance meets anticipated requirements). Once you begin using this - * alternative you will be locked into it. + * NOTE: This codec is NOT data format compatible with the Hadoop native zstandard codec. There are + * issues with both framing and limitations of the aircompressor zstandard compressor. This codec + * can be used as an alternative to the native codec, if the native codec cannot be made available + * and/or an eventual migration will never be necessary (i.e. this codec's performance meets + * anticipated requirements). Once you begin using this alternative you will be locked into it. */ @InterfaceAudience.Private public class ZstdCodec implements Configurable, CompressionCodec { diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java index 1defda25a593..52165eb9b59a 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLz4 extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZ4); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java index 98ee5c04bafe..6cd4eee3cfc7 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLzo extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZO); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java index a6d863b61a5e..66ed1c40f4a8 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionSnappy extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.SNAPPY); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java index de0f4575e62a..02673dfe8686 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionZstd extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.ZSTD); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java index db1cc7214fd1..45f7c9058d6a 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -39,7 +40,7 @@ public void testLz4CodecSmall() throws Exception { public void testLz4CodecLarge() throws Exception { codecLargeTest(new Lz4Codec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new Lz4Codec(), 2); - codecLargeTest(new Lz4Codec(), 10); // high compressability + codecLargeTest(new Lz4Codec(), 10); // high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java index bd1b75aecc1b..59f37c861643 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -39,7 +40,7 @@ public void testLzoCodecSmall() throws Exception { public void testLzoCodecLarge() throws Exception { codecLargeTest(new LzoCodec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new LzoCodec(), 2); - codecLargeTest(new LzoCodec(), 10); // very high compressability + codecLargeTest(new LzoCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java index 2646b1942025..667268a46c25 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -39,7 +40,7 @@ public void testSnappyCodecSmall() throws Exception { public void testSnappyCodecLarge() throws Exception { codecLargeTest(new SnappyCodec(), 1.1); // poor compressability codecLargeTest(new SnappyCodec(), 2); - codecLargeTest(new SnappyCodec(), 10); // very high compressability + codecLargeTest(new SnappyCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java index 23d7777f07c7..710bd5750428 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java index 997d6873c617..f3da86794ee0 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java index 924e46a77eee..e1bde41687ba 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java index 0de6de2b027c..4fff65da43ed 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java index 6b924c5ff9e8..3f9c89272829 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -39,7 +40,7 @@ public void testZstdCodecSmall() throws Exception { public void testZstdCodecLarge() throws Exception { codecLargeTest(new ZstdCodec(), 1.1); // poor compressability codecLargeTest(new ZstdCodec(), 2); - codecLargeTest(new ZstdCodec(), 10); // very high compressability + codecLargeTest(new ZstdCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-lz4/pom.xml b/hbase-compression/hbase-compression-lz4/pom.xml index c023da743d8f..21f4bea7f25d 100644 --- a/hbase-compression/hbase-compression-lz4/pom.xml +++ b/hbase-compression/hbase-compression-lz4/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 3.0.0-alpha-3-SNAPSHOT .. hbase-compression-lz4 Apache HBase - Compression - LZ4 Pure Java compression support using lz4-java - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -149,6 +115,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java index 288861f2c95f..e6420e9f8b3d 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java index b2bf0a247a27..649cf4908b04 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java @@ -1,23 +1,26 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; import java.io.IOException; import java.nio.ByteBuffer; +import net.jpountz.lz4.LZ4Compressor; +import net.jpountz.lz4.LZ4Factory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; @@ -25,8 +28,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import net.jpountz.lz4.LZ4Compressor; -import net.jpountz.lz4.LZ4Factory; /** * Hadoop compressor glue for lz4-java. diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java index d67bb2555222..b897423f761e 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java @@ -1,30 +1,31 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; import java.io.IOException; import java.nio.ByteBuffer; +import net.jpountz.lz4.LZ4Factory; +import net.jpountz.lz4.LZ4SafeDecompressor; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import net.jpountz.lz4.LZ4Factory; -import net.jpountz.lz4.LZ4SafeDecompressor; /** * Hadoop decompressor glue for lz4-java. diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java index 06c113dea5ee..5d547d064784 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLz4 extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZ4); } diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java index 0c237e105bac..ff9cec67285e 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; @@ -38,8 +39,8 @@ public void testLz4CodecSmall() throws Exception { @Test public void testLz4CodecLarge() throws Exception { codecLargeTest(new Lz4Codec(), 1.1); // poor compressability, expansion with this codec - codecLargeTest(new Lz4Codec(), 2); - codecLargeTest(new Lz4Codec(), 10); // very high compressability + codecLargeTest(new Lz4Codec(), 2); + codecLargeTest(new Lz4Codec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java index fdf9b0a9cc14..734be0c2851f 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-snappy/pom.xml b/hbase-compression/hbase-compression-snappy/pom.xml index 63e099853f83..d7232174c6e1 100644 --- a/hbase-compression/hbase-compression-snappy/pom.xml +++ b/hbase-compression/hbase-compression-snappy/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 3.0.0-alpha-3-SNAPSHOT .. hbase-compression-snappy Apache HBase - Compression - Snappy Pure Java compression support using Xerial Snappy - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -149,6 +115,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java index 35902ba238ef..6e4b951d294f 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java index 082a52e1e723..3493a804f38e 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java index 02952bd3b861..0987e550b74f 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java index 15ed99ef65f1..ea527212bfb7 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionSnappy extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.SNAPPY); } diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java index 99fd09dc013c..2f022117f169 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; @@ -39,7 +40,7 @@ public void testSnappyCodecSmall() throws Exception { public void testSnappyCodecLarge() throws Exception { codecLargeTest(new SnappyCodec(), 1.1); // poor compressability codecLargeTest(new SnappyCodec(), 2); - codecLargeTest(new SnappyCodec(), 10); // very high compressability + codecLargeTest(new SnappyCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java index ba59b6525340..5afd5b90811b 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-xz/pom.xml b/hbase-compression/hbase-compression-xz/pom.xml index 4efd0504f4ea..980cb30fe44a 100644 --- a/hbase-compression/hbase-compression-xz/pom.xml +++ b/hbase-compression/hbase-compression-xz/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 3.0.0-alpha-3-SNAPSHOT .. hbase-compression-xz Apache HBase - Compression - XZ Pure Java compression support using XZ for Java - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -149,6 +115,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java index 82c65a9ddb55..4e15c1405ed4 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java index d0aab7f5a74d..3551003b2cb3 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; @@ -20,8 +21,8 @@ import java.nio.BufferOverflowException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.hbase.io.ByteBufferOutputStream; +import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Compressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -235,7 +236,7 @@ public void setInput(byte[] b, int off, int len) { // Package private int maxCompressedLength(int len) { - return len + 32 + (len/6); + return len + 32 + (len / 6); } } diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java index 759307bba988..27a14bcb23e7 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java index 481c7287aa38..f645b7544f5e 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLzma extends HFileTestBase { @ClassRule @@ -52,16 +52,16 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZMA); } @Test public void testReconfLevels() throws Exception { Path path_1 = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".1.hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".1.hfile"); Path path_2 = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".2.hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".2.hfile"); conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 1); doTest(conf, path_1, Compression.Algorithm.LZMA); long len_1 = FS.getFileStatus(path_1).getLen(); diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java index 63978abe838b..d13e80a134ea 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; @@ -39,8 +40,8 @@ public void testLzmaCodecSmall() throws Exception { @Test public void testLzmaCodecLarge() throws Exception { codecLargeTest(new LzmaCodec(), 1.1); // poor compressability - codecLargeTest(new LzmaCodec(), 2); - codecLargeTest(new LzmaCodec(), 10); // very high compressability + codecLargeTest(new LzmaCodec(), 2); + codecLargeTest(new LzmaCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java index 89ce68b0600e..396b01402d09 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-zstd/pom.xml b/hbase-compression/hbase-compression-zstd/pom.xml index 55f38c73ccdc..ef1437e09d90 100644 --- a/hbase-compression/hbase-compression-zstd/pom.xml +++ b/hbase-compression/hbase-compression-zstd/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 3.0.0-alpha-3-SNAPSHOT .. hbase-compression-zstd Apache HBase - Compression - ZStandard Pure Java compression support using zstd-jni - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - - @@ -138,6 +115,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java index 07b26d0c4bf0..f7af083ddd88 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -115,8 +116,7 @@ public String getDefaultExtension() { static int getLevel(Configuration conf) { return conf.getInt(ZSTD_LEVEL_KEY, - conf.getInt( - CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, + conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT)); } @@ -141,10 +141,8 @@ static byte[] getDictionary(final Configuration conf) { // Reference: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md static boolean isDictionary(byte[] dictionary) { - return (dictionary[0] == (byte)0x37 && - dictionary[1] == (byte)0xA4 && - dictionary[2] == (byte)0x30 && - dictionary[3] == (byte)0xEC); + return (dictionary[0] == (byte) 0x37 && dictionary[1] == (byte) 0xA4 + && dictionary[2] == (byte) 0x30 && dictionary[3] == (byte) 0xEC); } static int getDictionaryId(byte[] dictionary) { diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java index 3ac514a5fb94..e2c483cbcf18 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; +import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDictCompress; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; @@ -25,8 +28,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.github.luben.zstd.Zstd; -import com.github.luben.zstd.ZstdDictCompress; /** * Hadoop compressor glue for zstd-jni. diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java index dfa37db636ae..f7934d9f5500 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; +import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDictDecompress; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; @@ -25,8 +28,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.github.luben.zstd.Zstd; -import com.github.luben.zstd.ZstdDictDecompress; /** * Hadoop decompressor glue for zstd-java. diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java index 42c56a822d4d..7d9be7a7079a 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionZstd extends HFileTestBase { @ClassRule @@ -52,16 +52,16 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.ZSTD); } @Test public void testReconfLevels() throws Exception { Path path_1 = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".1.hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".1.hfile"); Path path_2 = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtil.getRandomUUID().toString() + ".2.hfile"); + HBaseTestingUtil.getRandomUUID().toString() + ".2.hfile"); conf.setInt(ZstdCodec.ZSTD_LEVEL_KEY, 1); doTest(conf, path_1, Compression.Algorithm.ZSTD); long len_1 = FS.getFileStatus(path_1).getLen(); diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java index e75de9b9c466..ffce70943963 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java index bf1c78cbc17f..bdff5da87c9c 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -41,8 +42,8 @@ public void testZstdCodecSmall() throws Exception { @Test public void testZstdCodecLarge() throws Exception { codecLargeTest(new ZstdCodec(), 1.1); // poor compressability - codecLargeTest(new ZstdCodec(), 2); - codecLargeTest(new ZstdCodec(), 10); // very high compressability + codecLargeTest(new ZstdCodec(), 2); + codecLargeTest(new ZstdCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java index 0a17ef997d20..ef3f339160eb 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -40,7 +41,7 @@ public class TestZstdDictionary extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZstdDictionary.class); + HBaseClassTestRule.forClass(TestZstdDictionary.class); private static final String DICTIONARY_PATH = DictionaryCache.RESOURCE_SCHEME + "zstd.test.dict"; // zstd.test.data compressed with zstd.test.dict at level 3 will produce a result of @@ -53,7 +54,7 @@ public class TestZstdDictionary extends CompressionTestBase { public static void setUp() throws Exception { Configuration conf = new Configuration(); TEST_DATA = DictionaryCache.loadFromResource(conf, - DictionaryCache.RESOURCE_SCHEME + "zstd.test.data", /* maxSize */ 1024*1024); + DictionaryCache.RESOURCE_SCHEME + "zstd.test.data", /* maxSize */ 1024 * 1024); assertNotNull("Failed to load test data", TEST_DATA); } @@ -76,13 +77,13 @@ public void test() throws Exception { public static void main(String[] args) throws IOException { // Write 1000 1k blocks for training to the specified file // Train with: - // zstd --train -B1024 -o + // zstd --train -B1024 -o if (args.length < 1) { System.err.println("Usage: TestZstdCodec "); System.exit(-1); } final RandomDistribution.DiscreteRNG rng = - new RandomDistribution.Zipf(new Random(), 0, Byte.MAX_VALUE, 2); + new RandomDistribution.Zipf(new Random(), 0, Byte.MAX_VALUE, 2); final File outFile = new File(args[0]); final byte[] buffer = new byte[1024]; System.out.println("Generating " + outFile); diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java index 6d850114bbbd..8cb13812e3b9 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java @@ -53,7 +53,7 @@ public class TestZstdDictionarySplitMerge { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZstdDictionarySplitMerge.class); + HBaseClassTestRule.forClass(TestZstdDictionarySplitMerge.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Configuration conf; @@ -83,11 +83,10 @@ public void test() throws Exception { final byte[] cfName = Bytes.toBytes("info"); final String dictionaryPath = DictionaryCache.RESOURCE_SCHEME + "zstd.test.dict"; final TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName) - .setCompressionType(Compression.Algorithm.ZSTD) - .setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath) - .build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName) + .setCompressionType(Compression.Algorithm.ZSTD) + .setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath).build()) + .build(); final Admin admin = TEST_UTIL.getAdmin(); admin.createTable(td, new byte[][] { Bytes.toBytes(1) }); TEST_UTIL.waitTableAvailable(tableName); @@ -108,6 +107,7 @@ public void test() throws Exception { public boolean evaluate() throws Exception { return TEST_UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 3; } + @Override public String explainFailure() throws Exception { return "Split has not finished yet"; @@ -120,7 +120,7 @@ public String explainFailure() throws Exception { RegionInfo regionA = null; RegionInfo regionB = null; - for (RegionInfo region: admin.getRegions(tableName)) { + for (RegionInfo region : admin.getRegions(tableName)) { if (region.getStartKey().length == 0) { regionA = region; } else if (Bytes.equals(region.getStartKey(), Bytes.toBytes(1))) { @@ -129,18 +129,16 @@ public String explainFailure() throws Exception { } assertNotNull(regionA); assertNotNull(regionB); - admin.mergeRegionsAsync(new byte[][] { - regionA.getRegionName(), - regionB.getRegionName() - }, false).get(30, TimeUnit.SECONDS); + admin + .mergeRegionsAsync(new byte[][] { regionA.getRegionName(), regionB.getRegionName() }, false) + .get(30, TimeUnit.SECONDS); assertEquals(2, admin.getRegions(tableName).size()); ServerName expected = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName(); assertEquals(expected, TEST_UTIL.getConnection().getRegionLocator(tableName) - .getRegionLocation(Bytes.toBytes(1), true).getServerName()); - try (AsyncConnection asyncConn = - ConnectionFactory.createAsyncConnection(conf).get()) { + .getRegionLocation(Bytes.toBytes(1), true).getServerName()); + try (AsyncConnection asyncConn = ConnectionFactory.createAsyncConnection(conf).get()) { assertEquals(expected, asyncConn.getRegionLocator(tableName) - .getRegionLocation(Bytes.toBytes(1), true).get().getServerName()); + .getRegionLocation(Bytes.toBytes(1), true).get().getServerName()); } TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0); } diff --git a/hbase-compression/pom.xml b/hbase-compression/pom.xml index 1193bfbc1348..47b5eda08e50 100644 --- a/hbase-compression/pom.xml +++ b/hbase-compression/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-compression + pom Apache HBase - Compression Pure Java compression support parent - pom hbase-compression-aircompressor @@ -80,10 +80,10 @@ spotbugs-maven-plugin - false spotbugs + false ${project.basedir}/../dev-support/spotbugs-exclude.xml diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml index ab1a4e3bdef7..058c6d8186d9 100644 --- a/hbase-endpoint/pom.xml +++ b/hbase-endpoint/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -33,33 +33,6 @@ true - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - org.apache.hbase.thirdparty @@ -237,6 +210,33 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + @@ -258,7 +258,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 8df15417fa31..0ee432e7c025 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -58,29 +58,26 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateService; /** - * This client class is for invoking the aggregate functions deployed on the - * Region Server side via the AggregateService. This class will implement the - * supporting functionality for summing/processing the individual results - * obtained from the AggregateService for each region. + * This client class is for invoking the aggregate functions deployed on the Region Server side via + * the AggregateService. This class will implement the supporting functionality for + * summing/processing the individual results obtained from the AggregateService for each region. *

    - * This will serve as the client side handler for invoking the aggregate - * functions. - * For all aggregate functions, + * This will serve as the client side handler for invoking the aggregate functions. For all + * aggregate functions, *

      *
    • start row < end row is an essential condition (if they are not * {@link HConstants#EMPTY_BYTE_ARRAY}) - *
    • Column family can't be null. In case where multiple families are - * provided, an IOException will be thrown. An optional column qualifier can - * also be defined.
    • - *
    • For methods to find maximum, minimum, sum, rowcount, it returns the - * parameter type. For average and std, it returns a double value. For row - * count, it returns a long value.
    • + *
    • Column family can't be null. In case where multiple families are provided, an IOException + * will be thrown. An optional column qualifier can also be defined.
    • + *
    • For methods to find maximum, minimum, sum, rowcount, it returns the parameter type. For + * average and std, it returns a double value. For row count, it returns a long value.
    • *
    - *

    Call {@link #close()} when done. + *

    + * Call {@link #close()} when done. */ @InterfaceAudience.Public public class AggregationClient implements Closeable { - // TODO: This class is not used. Move to examples? + // TODO: This class is not used. Move to examples? private static final Logger log = LoggerFactory.getLogger(AggregationClient.class); private final Connection connection; @@ -152,18 +149,17 @@ public void close() throws IOException { } /** - * It gives the maximum value of a column for a given column family for the - * given range. In case qualifier is null, a max of all values for the given - * family is returned. + * It gives the maximum value of a column for a given column family for the given range. In case + * qualifier is null, a max of all values for the given family is returned. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return max val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public R max( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) + public R + max(final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return max(table, ci, scan); @@ -171,19 +167,17 @@ public R max( } /** - * It gives the maximum value of a column for a given column family for the - * given range. In case qualifier is null, a max of all values for the given - * family is returned. + * It gives the maximum value of a column for a given column family for the given range. In case + * qualifier is null, a max of all values for the given family is returned. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return max val <> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R max(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R max(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class MaxCallBack implements Batch.Callback { R max = null; @@ -199,41 +193,40 @@ public synchronized void update(byte[] region, byte[] row, R result) { } MaxCallBack aMaxCallBack = new MaxCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public R call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMax(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() > 0) { - ByteString b = response.getFirstPart(0); - Q q = getParsedGenericInstance(ci.getClass(), 3, b); - return ci.getCellValueFromProto(q); - } - return null; + new Batch.Call() { + @Override + public R call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMax(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + if (response.getFirstPartCount() > 0) { + ByteString b = response.getFirstPart(0); + Q q = getParsedGenericInstance(ci.getClass(), 3, b); + return ci.getCellValueFromProto(q); } - }, aMaxCallBack); + return null; + } + }, aMaxCallBack); return aMaxCallBack.getMax(); } /** - * It gives the minimum value of a column for a given column family for the - * given range. In case qualifier is null, a min of all values for the given - * family is returned. + * It gives the minimum value of a column for a given column family for the given range. In case + * qualifier is null, a min of all values for the given family is returned. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return min val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public R min( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) + public R + min(final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return min(table, ci, scan); @@ -241,19 +234,17 @@ public R min( } /** - * It gives the minimum value of a column for a given column family for the - * given range. In case qualifier is null, a min of all values for the given - * family is returned. + * It gives the minimum value of a column for a given column family for the given range. In case + * qualifier is null, a min of all values for the given family is returned. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return min val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R min(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R min(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class MinCallBack implements Batch.Callback { private R min = null; @@ -270,67 +261,65 @@ public synchronized void update(byte[] region, byte[] row, R result) { MinCallBack minCallBack = new MinCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public R call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMin(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() > 0) { - ByteString b = response.getFirstPart(0); - Q q = getParsedGenericInstance(ci.getClass(), 3, b); - return ci.getCellValueFromProto(q); - } - return null; + new Batch.Call() { + @Override + public R call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMin(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + if (response.getFirstPartCount() > 0) { + ByteString b = response.getFirstPart(0); + Q q = getParsedGenericInstance(ci.getClass(), 3, b); + return ci.getCellValueFromProto(q); } - }, minCallBack); + return null; + } + }, minCallBack); log.debug("Min fom all regions is: " + minCallBack.getMinimum()); return minCallBack.getMinimum(); } /** - * It gives the row count, by summing up the individual results obtained from - * regions. In case the qualifier is null, FirstKeyValueFilter is used to - * optimised the operation. In case qualifier is provided, I can't use the - * filter as it may set the flag to skip to next row, but the value read is - * not of the given filter: in this case, this particular row will not be - * counted ==> an error. + * It gives the row count, by summing up the individual results obtained from regions. In case the + * qualifier is null, FirstKeyValueFilter is used to optimised the operation. In case qualifier is + * provided, I can't use the filter as it may set the flag to skip to next row, but the value read + * is not of the given filter: in this case, this particular row will not be counted ==> an + * error. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ public long rowCount( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return rowCount(table, ci, scan); } } /** - * It gives the row count, by summing up the individual results obtained from - * regions. In case the qualifier is null, FirstKeyValueFilter is used to - * optimised the operation. In case qualifier is provided, I can't use the - * filter as it may set the flag to skip to next row, but the value read is - * not of the given filter: in this case, this particular row will not be - * counted ==> an error. + * It gives the row count, by summing up the individual results obtained from regions. In case the + * qualifier is null, FirstKeyValueFilter is used to optimised the operation. In case qualifier is + * provided, I can't use the filter as it may set the flag to skip to next row, but the value read + * is not of the given filter: in this case, this particular row will not be counted ==> an + * error. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - long rowCount(final Table table, final ColumnInterpreter ci, final Scan scan) + public long + rowCount(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, true); class RowNumCallback implements Batch.Callback { @@ -348,57 +337,56 @@ public void update(byte[] region, byte[] row, Long result) { RowNumCallback rowNum = new RowNumCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public Long call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getRowNum(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); - ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); - bb.rewind(); - return bb.getLong(); + new Batch.Call() { + @Override + public Long call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getRowNum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, rowNum); + byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); + ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); + bb.rewind(); + return bb.getLong(); + } + }, rowNum); return rowNum.getRowNumCount(); } /** - * It sums up the value returned from various regions. In case qualifier is - * null, summation of all the column qualifiers in the given family is done. + * It sums up the value returned from various regions. In case qualifier is null, summation of all + * the column qualifiers in the given family is done. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return sum <S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public S sum( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public S + sum(final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return sum(table, ci, scan); } } /** - * It sums up the value returned from various regions. In case qualifier is - * null, summation of all the column qualifiers in the given family is done. + * It sums up the value returned from various regions. In case qualifier is null, summation of all + * the column qualifiers in the given family is done. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return sum <S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - S sum(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public S sum(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class SumCallBack implements Batch.Callback { @@ -415,38 +403,38 @@ public synchronized void update(byte[] region, byte[] row, S result) { } SumCallBack sumCallBack = new SumCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public S call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - // Not sure what is going on here why I have to do these casts. TODO. - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getSum(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() == 0) { - return null; - } - ByteString b = response.getFirstPart(0); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - return s; + new Batch.Call() { + @Override + public S call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + // Not sure what is going on here why I have to do these casts. TODO. + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getSum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + if (response.getFirstPartCount() == 0) { + return null; } - }, sumCallBack); + ByteString b = response.getFirstPart(0); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + return s; + } + }, sumCallBack); return sumCallBack.getSumResult(); } /** - * It computes average while fetching sum and row count from all the - * corresponding regions. Approach is to compute a global sum of region level - * sum and rowcount and then compute the average. + * It computes average while fetching sum and row count from all the corresponding regions. + * Approach is to compute a global sum of region level sum and rowcount and then compute the + * average. * @param tableName the name of the table to scan * @param scan the HBase scan object to use to read data from HBase - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ private Pair getAvgArgs( final TableName tableName, final ColumnInterpreter ci, final Scan scan) @@ -457,17 +445,17 @@ private Pair - Pair getAvgArgs(final Table table, final ColumnInterpreter ci, - final Scan scan) throws Throwable { + private Pair + getAvgArgs(final Table table, final ColumnInterpreter ci, final Scan scan) + throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class AvgCallBack implements Batch.Callback> { S sum = null; @@ -486,90 +474,85 @@ public synchronized void update(byte[] region, byte[] row, Pair result) AvgCallBack avgCallBack = new AvgCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call>() { - @Override - public Pair call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getAvg(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - Pair pair = new Pair<>(null, 0L); - if (response.getFirstPartCount() == 0) { - return pair; - } - ByteString b = response.getFirstPart(0); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - pair.setFirst(s); - ByteBuffer bb = ByteBuffer.allocate(8).put( - getBytesFromResponse(response.getSecondPart())); - bb.rewind(); - pair.setSecond(bb.getLong()); + new Batch.Call>() { + @Override + public Pair call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getAvg(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + Pair pair = new Pair<>(null, 0L); + if (response.getFirstPartCount() == 0) { return pair; } - }, avgCallBack); + ByteString b = response.getFirstPart(0); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + pair.setFirst(s); + ByteBuffer bb = + ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; + } + }, avgCallBack); return avgCallBack.getAvgArgs(); } /** - * This is the client side interface/handle for calling the average method for - * a given cf-cq combination. It was necessary to add one more call stack as - * its return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the average and returs the double value. + * This is the client side interface/handle for calling the average method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the average and returs the double value. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - double avg(final TableName tableName, final ColumnInterpreter ci, - Scan scan) throws Throwable { + public double + avg(final TableName tableName, final ColumnInterpreter ci, Scan scan) + throws Throwable { Pair p = getAvgArgs(tableName, ci, scan); return ci.divideForAvg(p.getFirst(), p.getSecond()); } /** - * This is the client side interface/handle for calling the average method for - * a given cf-cq combination. It was necessary to add one more call stack as - * its return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the average and returs the double value. + * This is the client side interface/handle for calling the average method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the average and returs the double value. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ public double avg( - final Table table, final ColumnInterpreter ci, Scan scan) - throws Throwable { + final Table table, final ColumnInterpreter ci, Scan scan) throws Throwable { Pair p = getAvgArgs(table, ci, scan); return ci.divideForAvg(p.getFirst(), p.getSecond()); } /** - * It computes a global standard deviation for a given column and its value. - * Standard deviation is square root of (average of squares - - * average*average). From individual regions, it obtains sum, square sum and - * number of rows. With these, the above values are computed to get the global - * std. + * It computes a global standard deviation for a given column and its value. Standard deviation is + * square root of (average of squares - average*average). From individual regions, it obtains sum, + * square sum and number of rows. With these, the above values are computed to get the global std. * @param table table to scan. * @param scan the HBase scan object to use to read data from HBase * @return standard deviations - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - private - Pair, Long> getStdArgs(final Table table, final ColumnInterpreter ci, - final Scan scan) throws Throwable { + private Pair, Long> + getStdArgs(final Table table, final ColumnInterpreter ci, final Scan scan) + throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class StdCallback implements Batch.Callback, Long>> { long rowCountVal = 0L; @@ -595,75 +578,72 @@ public synchronized void update(byte[] region, byte[] row, Pair, Long> r StdCallback stdCallback = new StdCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call, Long>>() { - @Override - public Pair, Long> call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getStd(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - Pair, Long> pair = new Pair<>(new ArrayList<>(), 0L); - if (response.getFirstPartCount() == 0) { - return pair; - } - List list = new ArrayList<>(); - for (int i = 0; i < response.getFirstPartCount(); i++) { - ByteString b = response.getFirstPart(i); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - list.add(s); - } - pair.setFirst(list); - ByteBuffer bb = ByteBuffer.allocate(8).put( - getBytesFromResponse(response.getSecondPart())); - bb.rewind(); - pair.setSecond(bb.getLong()); + new Batch.Call, Long>>() { + @Override + public Pair, Long> call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getStd(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + Pair, Long> pair = new Pair<>(new ArrayList<>(), 0L); + if (response.getFirstPartCount() == 0) { return pair; } - }, stdCallback); + List list = new ArrayList<>(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + ByteString b = response.getFirstPart(i); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + list.add(s); + } + pair.setFirst(list); + ByteBuffer bb = + ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; + } + }, stdCallback); return stdCallback.getStdParams(); } /** - * This is the client side interface/handle for calling the std method for a - * given cf-cq combination. It was necessary to add one more call stack as its - * return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the std and returns the double value. + * This is the client side interface/handle for calling the std method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the std and returns the double value. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - double std(final TableName tableName, ColumnInterpreter ci, - Scan scan) throws Throwable { + public double std( + final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return std(table, ci, scan); } } /** - * This is the client side interface/handle for calling the std method for a - * given cf-cq combination. It was necessary to add one more call stack as its - * return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the std and returns the double value. + * This is the client side interface/handle for calling the std method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the std and returns the double value. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public double std( - final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { + public double + std(final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { Pair, Long> p = getStdArgs(table, ci, scan); double res = 0d; double avg = ci.divideForAvg(p.getFirst().get(0), p.getSecond()); @@ -674,22 +654,20 @@ public double st } /** - * It helps locate the region with median for a given column whose weight - * is specified in an optional column. - * From individual regions, it obtains sum of values and sum of weights. + * It helps locate the region with median for a given column whose weight is specified in an + * optional column. From individual regions, it obtains sum of values and sum of weights. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase - * @return pair whose first element is a map between start row of the region - * and (sum of values, sum of weights) for the region, the second element is - * (sum of values, sum of weights) for all the regions chosen - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @return pair whose first element is a map between start row of the region and (sum of values, + * sum of weights) for the region, the second element is (sum of values, sum of weights) + * for all the regions chosen + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ private - Pair>, List> - getMedianArgs(final Table table, - final ColumnInterpreter ci, final Scan scan) throws Throwable { + Pair>, List> getMedianArgs(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); final NavigableMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); class StdCallback implements Batch.Callback> { @@ -712,64 +690,63 @@ public synchronized void update(byte[] region, byte[] row, List result) { } StdCallback stdCallback = new StdCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call>() { - @Override - public List call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMedian(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } + new Batch.Call>() { + @Override + public List call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMedian(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } - List list = new ArrayList<>(); - for (int i = 0; i < response.getFirstPartCount(); i++) { - ByteString b = response.getFirstPart(i); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - list.add(s); - } - return list; + List list = new ArrayList<>(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + ByteString b = response.getFirstPart(i); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + list.add(s); } + return list; + } - }, stdCallback); + }, stdCallback); return stdCallback.getMedianParams(); } /** - * This is the client side interface/handler for calling the median method for a - * given cf-cq combination. This method collects the necessary parameters - * to compute the median and returns the median. + * This is the client side interface/handler for calling the median method for a given cf-cq + * combination. This method collects the necessary parameters to compute the median and returns + * the median. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return R the median - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R median(final TableName tableName, ColumnInterpreter ci, - Scan scan) throws Throwable { + public R median( + final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return median(table, ci, scan); } } /** - * This is the client side interface/handler for calling the median method for a - * given cf-cq combination. This method collects the necessary parameters - * to compute the median and returns the median. + * This is the client side interface/handler for calling the median method for a given cf-cq + * combination. This method collects the necessary parameters to compute the median and returns + * the median. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return R the median - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R median(final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { + public R median(final Table table, + ColumnInterpreter ci, Scan scan) throws Throwable { Pair>, List> p = getMedianArgs(table, ci, scan); byte[] startRow = null; byte[] colFamily = scan.getFamilies()[0]; diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java index 953fd6e2dfeb..59c2d28cbb5b 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client.coprocessor; - import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; @@ -40,7 +39,8 @@ */ @InterfaceAudience.Private public final class AggregationHelper { - private AggregationHelper() {} + private AggregationHelper() { + } /** * @param scan the HBase scan object to use to read data from HBase diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java index ee4eeee99b76..145779d13da1 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,8 @@ */ @InterfaceAudience.Public public final class AsyncAggregationClient { - private AsyncAggregationClient() {} + private AsyncAggregationClient() { + } private static abstract class AbstractAggregationCallback implements CoprocessorCallback { @@ -84,8 +85,7 @@ public synchronized void onError(Throwable error) { completeExceptionally(error); } - protected abstract void aggregate(RegionInfo region, AggregateResponse resp) - throws IOException; + protected abstract void aggregate(RegionInfo region, AggregateResponse resp) throws IOException; @Override public synchronized void onRegionComplete(RegionInfo region, AggregateResponse resp) { @@ -202,8 +202,8 @@ protected R getFinalResult() { } public static - CompletableFuture rowCount(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + rowCount(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -269,8 +269,8 @@ protected S getFinalResult() { } public static - CompletableFuture avg(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + avg(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -306,8 +306,8 @@ protected Double getFinalResult() { } public static - CompletableFuture std(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + std(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -365,20 +365,20 @@ protected Double getFinalResult() { AbstractAggregationCallback> callback = new AbstractAggregationCallback>(future) { - private final NavigableMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private final NavigableMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); - @Override - protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException { - if (resp.getFirstPartCount() > 0) { - map.put(region.getStartKey(), getPromotedValueFromProto(ci, resp, firstPartIndex)); + @Override + protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException { + if (resp.getFirstPartCount() > 0) { + map.put(region.getStartKey(), getPromotedValueFromProto(ci, resp, firstPartIndex)); + } } - } - @Override - protected NavigableMap getFinalResult() { - return map; - } - }; + @Override + protected NavigableMap getFinalResult() { + return map; + } + }; table . coprocessorService(AggregateService::newStub, (stub, controller, rpcCallback) -> stub.getMedian(controller, req, rpcCallback), callback) @@ -388,8 +388,8 @@ protected NavigableMap getFinalResult() { } private static void findMedian( - CompletableFuture future, AsyncTable table, - ColumnInterpreter ci, Scan scan, NavigableMap sumByRegion) { + CompletableFuture future, AsyncTable table, + ColumnInterpreter ci, Scan scan, NavigableMap sumByRegion) { double halfSum = ci.divideForAvg(sumByRegion.values().stream().reduce(ci::add).get(), 2L); S movingSum = null; byte[] startRow = null; @@ -455,9 +455,9 @@ public void onComplete() { }); } - public static - CompletableFuture median(AsyncTable table, - ColumnInterpreter ci, Scan scan) { + public static CompletableFuture + median(AsyncTable table, ColumnInterpreter ci, + Scan scan) { CompletableFuture future = new CompletableFuture<>(); addListener(sumByRegion(table, ci, scan), (sumByRegion, error) -> { if (error != null) { diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java index 73eef3104265..6b6b8f517a91 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java @@ -48,11 +48,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateService; /** - * A concrete AggregateProtocol implementation. Its system level coprocessor - * that computes the aggregate function at a region level. - * {@link ColumnInterpreter} is used to interpret column value. This class is - * parameterized with the following (these are the types with which the {@link ColumnInterpreter} - * is parameterized, and for more description on these, refer to {@link ColumnInterpreter}): + * A concrete AggregateProtocol implementation. Its system level coprocessor that computes the + * aggregate function at a region level. {@link ColumnInterpreter} is used to interpret column + * value. This class is parameterized with the following (these are the types with which the + * {@link ColumnInterpreter} is parameterized, and for more description on these, refer to + * {@link ColumnInterpreter}): * @param Cell value data type * @param Promoted data type * @param

    PB message that is used to transport initializer specific bytes @@ -61,20 +61,19 @@ */ @InterfaceAudience.Private public class AggregateImplementation - extends AggregateService implements RegionCoprocessor { + extends AggregateService implements RegionCoprocessor { protected static final Logger log = LoggerFactory.getLogger(AggregateImplementation.class); private RegionCoprocessorEnvironment env; /** - * Gives the maximum for a given combination of column qualifier and column - * family, in the given row range as defined in the Scan object. In its - * current implementation, it takes one column family and one column qualifier - * (if provided). In case of null column qualifier, maximum value for the - * entire column family will be returned. + * Gives the maximum for a given combination of column qualifier and column family, in the given + * row range as defined in the Scan object. In its current implementation, it takes one column + * family and one column qualifier (if provided). In case of null column qualifier, maximum value + * for the entire column family will be returned. */ @Override public void getMax(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { InternalScanner scanner = null; AggregateResponse response = null; T max = null; @@ -112,7 +111,8 @@ public void getMax(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } log.info("Maximum from this region is " @@ -121,15 +121,14 @@ public void getMax(RpcController controller, AggregateRequest request, } /** - * Gives the minimum for a given combination of column qualifier and column - * family, in the given row range as defined in the Scan object. In its - * current implementation, it takes one column family and one column qualifier - * (if provided). In case of null column qualifier, minimum value for the - * entire column family will be returned. + * Gives the minimum for a given combination of column qualifier and column family, in the given + * row range as defined in the Scan object. In its current implementation, it takes one column + * family and one column qualifier (if provided). In case of null column qualifier, minimum value + * for the entire column family will be returned. */ @Override public void getMin(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; T min = null; @@ -156,8 +155,8 @@ public void getMin(RpcController controller, AggregateRequest request, results.clear(); } while (hasMoreRows); if (min != null) { - response = AggregateResponse.newBuilder().addFirstPart( - ci.getProtoForCellType(min).toByteString()).build(); + response = AggregateResponse.newBuilder() + .addFirstPart(ci.getProtoForCellType(min).toByteString()).build(); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -165,7 +164,8 @@ public void getMin(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } log.info("Minimum from this region is " @@ -174,15 +174,14 @@ public void getMin(RpcController controller, AggregateRequest request, } /** - * Gives the sum for a given combination of column qualifier and column - * family, in the given row range as defined in the Scan object. In its - * current implementation, it takes one column family and one column qualifier - * (if provided). In case of null column qualifier, sum for the entire column - * family will be returned. + * Gives the sum for a given combination of column qualifier and column family, in the given row + * range as defined in the Scan object. In its current implementation, it takes one column family + * and one column qualifier (if provided). In case of null column qualifier, sum for the entire + * column family will be returned. */ @Override public void getSum(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; long sum = 0L; @@ -212,8 +211,8 @@ public void getSum(RpcController controller, AggregateRequest request, results.clear(); } while (hasMoreRows); if (sumVal != null) { - response = AggregateResponse.newBuilder().addFirstPart( - ci.getProtoForPromotedType(sumVal).toByteString()).build(); + response = AggregateResponse.newBuilder() + .addFirstPart(ci.getProtoForPromotedType(sumVal).toByteString()).build(); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -221,21 +220,22 @@ public void getSum(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } - log.debug("Sum from this region is " - + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + sum); + log.debug("Sum from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString() + + ": " + sum); done.run(response); } /** - * Gives the row count for the given column family and column qualifier, in - * the given row range as defined in the Scan object. + * Gives the row count for the given column family and column qualifier, in the given row range as + * defined in the Scan object. */ @Override public void getRowNum(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; long counter = 0L; List results = new ArrayList<>(); @@ -244,8 +244,8 @@ public void getRowNum(RpcController controller, AggregateRequest request, Scan scan = ProtobufUtil.toScan(request.getScan()); byte[][] colFamilies = scan.getFamilies(); byte[] colFamily = colFamilies != null ? colFamilies[0] : null; - NavigableSet qualifiers = colFamilies != null ? - scan.getFamilyMap().get(colFamily) : null; + NavigableSet qualifiers = + colFamilies != null ? scan.getFamilyMap().get(colFamily) : null; byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); @@ -264,15 +264,15 @@ public void getRowNum(RpcController controller, AggregateRequest request, } while (hasMoreRows); ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter); bb.rewind(); - response = AggregateResponse.newBuilder().addFirstPart( - ByteString.copyFrom(bb)).build(); + response = AggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb)).build(); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); } finally { if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } log.info("Row counter from this region is " @@ -281,21 +281,18 @@ public void getRowNum(RpcController controller, AggregateRequest request, } /** - * Gives a Pair with first object as Sum and second object as row count, - * computed for a given combination of column qualifier and column family in - * the given row range as defined in the Scan object. In its current - * implementation, it takes one column family and one column qualifier (if - * provided). In case of null column qualifier, an aggregate sum over all the - * entire column family will be returned. + * Gives a Pair with first object as Sum and second object as row count, computed for a given + * combination of column qualifier and column family in the given row range as defined in the Scan + * object. In its current implementation, it takes one column family and one column qualifier (if + * provided). In case of null column qualifier, an aggregate sum over all the entire column family + * will be returned. *

    - * The average is computed in - * AggregationClient#avg(byte[], ColumnInterpreter, Scan) by - * processing results from all regions, so its "ok" to pass sum and a Long - * type. + * The average is computed in AggregationClient#avg(byte[], ColumnInterpreter, Scan) by processing + * results from all regions, so its "ok" to pass sum and a Long type. */ @Override public void getAvg(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; try { @@ -318,8 +315,8 @@ public void getAvg(RpcController controller, AggregateRequest request, hasMoreRows = scanner.next(results); int listSize = results.size(); for (int i = 0; i < listSize; i++) { - sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, - qualifier, results.get(i)))); + sumVal = ci.add(sumVal, + ci.castToReturnType(ci.getValue(colFamily, qualifier, results.get(i)))); } rowCountVal++; } while (hasMoreRows); @@ -338,24 +335,24 @@ public void getAvg(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } done.run(response); } /** - * Gives a Pair with first object a List containing Sum and sum of squares, - * and the second object as row count. It is computed for a given combination of - * column qualifier and column family in the given row range as defined in the - * Scan object. In its current implementation, it takes one column family and - * one column qualifier (if provided). The idea is get the value of variance first: - * the average of the squares less the square of the average a standard - * deviation is square root of variance. + * Gives a Pair with first object a List containing Sum and sum of squares, and the second object + * as row count. It is computed for a given combination of column qualifier and column family in + * the given row range as defined in the Scan object. In its current implementation, it takes one + * column family and one column qualifier (if provided). The idea is get the value of variance + * first: the average of the squares less the square of the average a standard deviation is square + * root of variance. */ @Override public void getStd(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { InternalScanner scanner = null; AggregateResponse response = null; try { @@ -379,8 +376,8 @@ public void getStd(RpcController controller, AggregateRequest request, hasMoreRows = scanner.next(results); int listSize = results.size(); for (int i = 0; i < listSize; i++) { - tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, - qualifier, results.get(i)))); + tempVal = ci.add(tempVal, + ci.castToReturnType(ci.getValue(colFamily, qualifier, results.get(i)))); } results.clear(); sumVal = ci.add(sumVal, tempVal); @@ -404,23 +401,22 @@ public void getStd(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } done.run(response); } /** - * Gives a List containing sum of values and sum of weights. - * It is computed for the combination of column - * family and column qualifier(s) in the given row range as defined in the - * Scan object. In its current implementation, it takes one column family and - * two column qualifiers. The first qualifier is for values column and - * the second qualifier (optional) is for weight column. + * Gives a List containing sum of values and sum of weights. It is computed for the combination of + * column family and column qualifier(s) in the given row range as defined in the Scan object. In + * its current implementation, it takes one column family and two column qualifiers. The first + * qualifier is for values column and the second qualifier (optional) is for weight column. */ @Override public void getMedian(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; try { @@ -447,11 +443,10 @@ public void getMedian(RpcController controller, AggregateRequest request, int listSize = results.size(); for (int i = 0; i < listSize; i++) { Cell kv = results.get(i); - tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, - valQualifier, kv))); + tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, valQualifier, kv))); if (weightQualifier != null) { tempWeight = ci.add(tempWeight, - ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv))); + ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv))); } } results.clear(); @@ -471,7 +466,8 @@ public void getMedian(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } done.run(response); @@ -479,11 +475,11 @@ public void getMedian(RpcController controller, AggregateRequest request, @SuppressWarnings("unchecked") // Used server-side too by Aggregation Coprocesor Endpoint. Undo this interdependence. TODO. - ColumnInterpreter constructColumnInterpreterFromRequest( - AggregateRequest request) throws IOException { + ColumnInterpreter constructColumnInterpreterFromRequest(AggregateRequest request) + throws IOException { String className = request.getInterpreterClassName(); try { - ColumnInterpreter ci; + ColumnInterpreter ci; Class cls = Class.forName(className); ci = (ColumnInterpreter) cls.getDeclaredConstructor().newInstance(); @@ -493,8 +489,8 @@ ColumnInterpreter constructColumnInterpreterFromRequest( ci.initialize(initMsg); } return ci; - } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | - NoSuchMethodException | InvocationTargetException e) { + } catch (ClassNotFoundException | InstantiationException | IllegalAccessException + | NoSuchMethodException | InvocationTargetException e) { throw new IOException(e); } } @@ -507,17 +503,17 @@ public Iterable getServices() { /** * Stores a reference to the coprocessor environment provided by the * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this - * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded - * on a table region, so always expects this to be an instance of + * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on + * a table region, so always expects this to be an instance of * {@link RegionCoprocessorEnvironment}. * @param env the environment provided by the coprocessor host * @throws IOException if the provided environment is not an instance of - * {@code RegionCoprocessorEnvironment} + * {@code RegionCoprocessorEnvironment} */ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; } else { throw new CoprocessorException("Must be loaded on a table region!"); } diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java index 2b38dcbaae48..e6d58d7ddbd9 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.coprocessor; @@ -81,10 +80,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ExportProtos; /** - * Export an HBase table. Writes content to sequence files up in HDFS. Use - * {@link Import} to read it back in again. It is implemented by the endpoint - * technique. - * + * Export an HBase table. Writes content to sequence files up in HDFS. Use {@link Import} to read it + * back in again. It is implemented by the endpoint technique. * @see org.apache.hadoop.hbase.mapreduce.Export */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -114,27 +111,25 @@ static Map run(final Configuration conf, final String[] args) return run(conf, arguments.getFirst(), arguments.getSecond(), arguments.getThird()); } - public static Map run(final Configuration conf, TableName tableName, - Scan scan, Path dir) throws Throwable { + public static Map run(final Configuration conf, TableName tableName, Scan scan, + Path dir) throws Throwable { FileSystem fs = dir.getFileSystem(conf); UserProvider userProvider = UserProvider.instantiate(conf); checkDir(fs, dir); FsDelegationToken fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); fsDelegationToken.acquireDelegationToken(fs); try { - final ExportProtos.ExportRequest request = getConfiguredRequest(conf, dir, - scan, fsDelegationToken.getUserToken()); + final ExportProtos.ExportRequest request = + getConfiguredRequest(conf, dir, scan, fsDelegationToken.getUserToken()); try (Connection con = ConnectionFactory.createConnection(conf); - Table table = con.getTable(tableName)) { + Table table = con.getTable(tableName)) { Map result = new TreeMap<>(Bytes.BYTES_COMPARATOR); - table.coprocessorService(ExportProtos.ExportService.class, - scan.getStartRow(), - scan.getStopRow(), - (ExportProtos.ExportService service) -> { + table.coprocessorService(ExportProtos.ExportService.class, scan.getStartRow(), + scan.getStopRow(), (ExportProtos.ExportService service) -> { ServerRpcController controller = new ServerRpcController(); Map rval = new TreeMap<>(Bytes.BYTES_COMPARATOR); - CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); service.export(controller, request, rpcCallback); if (controller.failedOnException()) { throw controller.getFailedOn(); @@ -159,8 +154,8 @@ private static boolean getCompression(final ExportProtos.ExportRequest request) } } - private static SequenceFile.CompressionType getCompressionType( - final ExportProtos.ExportRequest request) { + private static SequenceFile.CompressionType + getCompressionType(final ExportProtos.ExportRequest request) { if (request.hasCompressType()) { return SequenceFile.CompressionType.valueOf(request.getCompressType()); } else { @@ -173,20 +168,20 @@ private static CompressionCodec getCompressionCodec(final Configuration conf, try { Class codecClass; if (request.hasCompressCodec()) { - codecClass = conf.getClassByName(request.getCompressCodec()) - .asSubclass(CompressionCodec.class); + codecClass = + conf.getClassByName(request.getCompressCodec()).asSubclass(CompressionCodec.class); } else { codecClass = DEFAULT_CODEC; } return ReflectionUtils.newInstance(codecClass, conf); } catch (ClassNotFoundException e) { - throw new IllegalArgumentException("Compression codec " - + request.getCompressCodec() + " was not found.", e); + throw new IllegalArgumentException( + "Compression codec " + request.getCompressCodec() + " was not found.", e); } } private static SequenceFile.Writer.Option getOutputPath(final Configuration conf, - final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { + final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { Path file = new Path(request.getOutputPath(), "export-" + info.getEncodedName()); FileSystem fs = file.getFileSystem(conf); if (fs.exists(file)) { @@ -196,14 +191,14 @@ private static SequenceFile.Writer.Option getOutputPath(final Configuration conf } private static List getWriterOptions(final Configuration conf, - final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { + final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { List rval = new LinkedList<>(); rval.add(SequenceFile.Writer.keyClass(ImmutableBytesWritable.class)); rval.add(SequenceFile.Writer.valueClass(Result.class)); rval.add(getOutputPath(conf, info, request)); if (getCompression(request)) { rval.add(SequenceFile.Writer.compression(getCompressionType(request), - getCompressionCodec(conf, request))); + getCompressionCodec(conf, request))); } else { rval.add(SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE)); } @@ -216,7 +211,7 @@ private static ExportProtos.ExportResponse processData(final Region region, ScanCoprocessor cp = new ScanCoprocessor(region); RegionScanner scanner = null; try (RegionOp regionOp = new RegionOp(region); - SecureWriter out = new SecureWriter(conf, userProvider, userToken, opts)) { + SecureWriter out = new SecureWriter(conf, userProvider, userToken, opts)) { scanner = cp.checkScannerOpen(scan); ImmutableBytesWritable key = new ImmutableBytesWritable(); long rowCount = 0; @@ -236,8 +231,8 @@ private static ExportProtos.ExportResponse processData(final Region region, Cell firstCell = cells.get(0); for (Cell cell : cells) { if (Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), - firstCell.getRowLength(), cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength()) != 0) { + firstCell.getRowLength(), cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength()) != 0) { throw new IOException("Why the RegionScanner#nextRaw returns the data of different" + " rows?? first row=" + Bytes.toHex(firstCell.getRowArray(), firstCell.getRowOffset(), @@ -258,10 +253,8 @@ private static ExportProtos.ExportResponse processData(final Region region, } results.clear(); } while (hasMore); - return ExportProtos.ExportResponse.newBuilder() - .setRowCount(rowCount) - .setCellCount(cellCount) - .build(); + return ExportProtos.ExportResponse.newBuilder().setRowCount(rowCount).setCellCount(cellCount) + .build(); } finally { cp.checkScannerClose(scanner); } @@ -276,31 +269,24 @@ private static void checkDir(final FileSystem fs, final Path dir) throws IOExcep } } - private static ExportProtos.ExportRequest getConfiguredRequest(Configuration conf, - Path dir, final Scan scan, final Token userToken) throws IOException { + private static ExportProtos.ExportRequest getConfiguredRequest(Configuration conf, Path dir, + final Scan scan, final Token userToken) throws IOException { boolean compressed = conf.getBoolean(FileOutputFormat.COMPRESS, false); - String compressionType = conf.get(FileOutputFormat.COMPRESS_TYPE, - DEFAULT_TYPE.toString()); - String compressionCodec = conf.get(FileOutputFormat.COMPRESS_CODEC, - DEFAULT_CODEC.getName()); + String compressionType = conf.get(FileOutputFormat.COMPRESS_TYPE, DEFAULT_TYPE.toString()); + String compressionCodec = conf.get(FileOutputFormat.COMPRESS_CODEC, DEFAULT_CODEC.getName()); DelegationToken protoToken = null; if (userToken != null) { protoToken = DelegationToken.newBuilder() - .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) - .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) - .setKind(userToken.getKind().toString()) - .setService(userToken.getService().toString()).build(); - } - LOG.info("compressed=" + compressed - + ", compression type=" + compressionType - + ", compression codec=" + compressionCodec - + ", userToken=" + userToken); + .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) + .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) + .setKind(userToken.getKind().toString()).setService(userToken.getService().toString()) + .build(); + } + LOG.info("compressed=" + compressed + ", compression type=" + compressionType + + ", compression codec=" + compressionCodec + ", userToken=" + userToken); ExportProtos.ExportRequest.Builder builder = ExportProtos.ExportRequest.newBuilder() - .setScan(ProtobufUtil.toScan(scan)) - .setOutputPath(dir.toString()) - .setCompressed(compressed) - .setCompressCodec(compressionCodec) - .setCompressType(compressionType); + .setScan(ProtobufUtil.toScan(scan)).setOutputPath(dir.toString()).setCompressed(compressed) + .setCompressCodec(compressionCodec).setCompressType(compressionType); if (protoToken != null) { builder.setFsToken(protoToken); } @@ -328,11 +314,11 @@ public Iterable getServices() { @Override public void export(RpcController controller, ExportProtos.ExportRequest request, - RpcCallback done) { + RpcCallback done) { Region region = env.getRegion(); Configuration conf = HBaseConfiguration.create(env.getConfiguration()); conf.setStrings("io.serializations", conf.get("io.serializations"), - ResultSerialization.class.getName()); + ResultSerialization.class.getName()); try { Scan scan = validateKey(region.getRegionInfo(), request); Token userToken = null; @@ -340,12 +326,11 @@ public void export(RpcController controller, ExportProtos.ExportRequest request, LOG.warn("Hadoop security is enable, but no found of user token"); } else if (userProvider.isHadoopSecurityEnabled()) { userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), - request.getFsToken().getPassword().toByteArray(), - new Text(request.getFsToken().getKind()), - new Text(request.getFsToken().getService())); + request.getFsToken().getPassword().toByteArray(), + new Text(request.getFsToken().getKind()), new Text(request.getFsToken().getService())); } - ExportProtos.ExportResponse response = processData(region, conf, userProvider, - scan, userToken, getWriterOptions(conf, region.getRegionInfo(), request)); + ExportProtos.ExportResponse response = processData(region, conf, userProvider, scan, + userToken, getWriterOptions(conf, region.getRegionInfo(), request)); done.run(response); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -358,14 +343,12 @@ private Scan validateKey(final RegionInfo region, final ExportProtos.ExportReque Scan scan = ProtobufUtil.toScan(request.getScan()); byte[] regionStartKey = region.getStartKey(); byte[] originStartKey = scan.getStartRow(); - if (originStartKey == null - || Bytes.compareTo(originStartKey, regionStartKey) < 0) { + if (originStartKey == null || Bytes.compareTo(originStartKey, regionStartKey) < 0) { scan.withStartRow(regionStartKey); } byte[] regionEndKey = region.getEndKey(); byte[] originEndKey = scan.getStopRow(); - if (originEndKey == null - || Bytes.compareTo(originEndKey, regionEndKey) > 0) { + if (originEndKey == null || Bytes.compareTo(originEndKey, regionEndKey) > 0) { scan.withStartRow(regionEndKey); } return scan; @@ -423,8 +406,8 @@ void checkScannerClose(final InternalScanner s) throws IOException { } } - boolean preScannerNext(final InternalScanner s, - final List results, final int limit) throws IOException { + boolean preScannerNext(final InternalScanner s, final List results, final int limit) + throws IOException { if (region.getCoprocessorHost() == null) { return false; } else { @@ -433,9 +416,8 @@ boolean preScannerNext(final InternalScanner s, } } - boolean postScannerNext(final InternalScanner s, - final List results, final int limit, boolean hasMore) - throws IOException { + boolean postScannerNext(final InternalScanner s, final List results, final int limit, + boolean hasMore) throws IOException { if (region.getCoprocessorHost() == null) { return false; } else { @@ -447,15 +429,13 @@ boolean postScannerNext(final InternalScanner s, private static class SecureWriter implements Closeable { private final PrivilegedWriter privilegedWriter; - SecureWriter(final Configuration conf, final UserProvider userProvider, - final Token userToken, final List opts) - throws IOException { + SecureWriter(final Configuration conf, final UserProvider userProvider, final Token userToken, + final List opts) throws IOException { User user = getActiveUser(userProvider, userToken); try { SequenceFile.Writer sequenceFileWriter = - user.runAs((PrivilegedExceptionAction) () -> - SequenceFile.createWriter(conf, - opts.toArray(new SequenceFile.Writer.Option[opts.size()]))); + user.runAs((PrivilegedExceptionAction) () -> SequenceFile + .createWriter(conf, opts.toArray(new SequenceFile.Writer.Option[opts.size()]))); privilegedWriter = new PrivilegedWriter(user, sequenceFileWriter); } catch (InterruptedException e) { throw new IOException(e); @@ -483,8 +463,7 @@ public void close() throws IOException { } } - private static class PrivilegedWriter implements PrivilegedExceptionAction, - Closeable { + private static class PrivilegedWriter implements PrivilegedExceptionAction, Closeable { private final User user; private final SequenceFile.Writer out; private Object key; @@ -541,11 +520,8 @@ public long getCellCount() { @Override public String toString() { StringBuilder builder = new StringBuilder(35); - return builder.append("rowCount=") - .append(rowCount) - .append(", cellCount=") - .append(cellCount) - .toString(); + return builder.append("rowCount=").append(rowCount).append(", cellCount=").append(cellCount) + .toString(); } } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java index e2f036043407..3d7968837216 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java index 587fd70753f6..4bc4de34adbc 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ public class TestRpcControllerFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRpcControllerFactory.class); + HBaseClassTestRule.forClass(TestRpcControllerFactory.class); public static class StaticRpcControllerFactory extends RpcControllerFactory { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java index 9dd8e243bdb8..48205f697ff4 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java @@ -43,7 +43,7 @@ * The aggregation implementation at a region. */ public class ColumnAggregationEndpoint extends ColumnAggregationService - implements RegionCoprocessor { + implements RegionCoprocessor { private static final Logger LOG = LoggerFactory.getLogger(ColumnAggregationEndpoint.class); private RegionCoprocessorEnvironment env = null; @@ -55,7 +55,7 @@ public Iterable getServices() { @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -71,8 +71,8 @@ public void sum(RpcController controller, SumRequest request, RpcCallback getServices() { @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -74,7 +74,7 @@ public void stop(CoprocessorEnvironment env) throws IOException { @Override public void sum(RpcController controller, ColumnAggregationNullResponseSumRequest request, - RpcCallback done) { + RpcCallback done) { // aggregate at each region Scan scan = new Scan(); // Family is required in pb. Qualifier is not. @@ -122,9 +122,8 @@ public void sum(RpcController controller, ColumnAggregationNullResponseSumReques } } } - done.run(ColumnAggregationNullResponseSumResponse.newBuilder().setSum(sumResult) - .build()); - LOG.info("Returning sum " + sumResult + " for region " + - Bytes.toStringBinary(env.getRegion().getRegionInfo().getRegionName())); + done.run(ColumnAggregationNullResponseSumResponse.newBuilder().setSum(sumResult).build()); + LOG.info("Returning sum " + sumResult + " for region " + + Bytes.toStringBinary(env.getRegion().getRegionInfo().getRegionName())); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java index 1ab9b5eca5e0..ccff20ac7ad6 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java @@ -43,13 +43,13 @@ import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.ColumnAggregationWithErrorsProtos.ColumnAggregationWithErrorsSumResponse; /** - * Test coprocessor endpoint that always throws a {@link DoNotRetryIOException} for requests on - * the last region in the table. This allows tests to ensure correct error handling of - * coprocessor endpoints throwing exceptions. + * Test coprocessor endpoint that always throws a {@link DoNotRetryIOException} for requests on the + * last region in the table. This allows tests to ensure correct error handling of coprocessor + * endpoints throwing exceptions. */ public class ColumnAggregationEndpointWithErrors - extends ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors - implements RegionCoprocessor { + extends ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors + implements RegionCoprocessor { private static final Logger LOG = LoggerFactory.getLogger(ColumnAggregationEndpointWithErrors.class); @@ -63,7 +63,7 @@ public Iterable getServices() { @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -76,7 +76,7 @@ public void stop(CoprocessorEnvironment env) throws IOException { @Override public void sum(RpcController controller, ColumnAggregationWithErrorsSumRequest request, - RpcCallback done) { + RpcCallback done) { // aggregate at each region Scan scan = new Scan(); // Family is required in pb. Qualifier is not. diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java index 65130cfbb0e9..3f6a815b876d 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.coprocessor; - import java.io.IOException; import java.util.Collections; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -42,8 +41,9 @@ * service methods. For internal use by unit tests only. */ public class ProtobufCoprocessorService extends TestRpcServiceProtos.TestProtobufRpcProto - implements MasterCoprocessor, RegionCoprocessor { - public ProtobufCoprocessorService() {} + implements MasterCoprocessor, RegionCoprocessor { + public ProtobufCoprocessorService() { + } @Override public Iterable getServices() { @@ -52,34 +52,34 @@ public Iterable getServices() { @Override public void ping(RpcController controller, TestProtos.EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { done.run(TestProtos.EmptyResponseProto.getDefaultInstance()); } @Override public void echo(RpcController controller, TestProtos.EchoRequestProto request, - RpcCallback done) { + RpcCallback done) { String message = request.getMessage(); done.run(TestProtos.EchoResponseProto.newBuilder().setMessage(message).build()); } @Override public void error(RpcController controller, TestProtos.EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, new IOException("Test exception")); done.run(null); } @Override public void pause(RpcController controller, PauseRequestProto request, - RpcCallback done) { + RpcCallback done) { Threads.sleepWithoutInterrupt(request.getMs()); done.run(EmptyResponseProto.getDefaultInstance()); } @Override public void addr(RpcController controller, EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { done.run(AddrResponseProto.newBuilder() .setAddr(RpcServer.getRemoteAddress().get().getHostAddress()).build()); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java index 584c1a4d5565..2bbb2cb2c85c 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java @@ -80,10 +80,9 @@ public void testMasterCoprocessorService() throws Exception { TestProtos.EchoRequestProto request = TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); TestProtos.EchoResponseProto response = - admin - . - coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto::newStub, - (s, c, done) -> s.echo(c, request, done)).get(); + admin. coprocessorService( + TestRpcServiceProtos.TestProtobufRpcProto::newStub, + (s, c, done) -> s.echo(c, request, done)).get(); assertEquals("hello", response.getMessage()); } @@ -91,10 +90,9 @@ public void testMasterCoprocessorService() throws Exception { public void testMasterCoprocessorError() throws Exception { TestProtos.EmptyRequestProto emptyRequest = TestProtos.EmptyRequestProto.getDefaultInstance(); try { - admin - . - coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto::newStub, - (s, c, done) -> s.error(c, emptyRequest, done)).get(); + admin. coprocessorService( + TestRpcServiceProtos.TestProtobufRpcProto::newStub, + (s, c, done) -> s.error(c, emptyRequest, done)).get(); fail("Should have thrown an exception"); } catch (Exception e) { } @@ -106,11 +104,9 @@ public void testRegionServerCoprocessorService() throws Exception { DummyRegionServerEndpointProtos.DummyRequest request = DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); DummyRegionServerEndpointProtos.DummyResponse response = - admin - . coprocessorService( - DummyRegionServerEndpointProtos.DummyService::newStub, - (s, c, done) -> s.dummyCall(c, request, done), serverName).get(); + admin. coprocessorService( + DummyRegionServerEndpointProtos.DummyService::newStub, + (s, c, done) -> s.dummyCall(c, request, done), serverName).get(); assertEquals(DUMMY_VALUE, response.getValue()); } @@ -120,11 +116,9 @@ public void testRegionServerCoprocessorServiceError() throws Exception { DummyRegionServerEndpointProtos.DummyRequest request = DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); try { - admin - . coprocessorService( - DummyRegionServerEndpointProtos.DummyService::newStub, - (s, c, done) -> s.dummyThrow(c, request, done), serverName).get(); + admin. coprocessorService( + DummyRegionServerEndpointProtos.DummyService::newStub, + (s, c, done) -> s.dummyThrow(c, request, done), serverName).get(); fail("Should have thrown an exception"); } catch (Exception e) { assertTrue(e.getCause() instanceof RetriesExhaustedException); @@ -133,8 +127,9 @@ DummyRegionServerEndpointProtos.DummyResponse> coprocessorService( } public static class DummyRegionServerEndpoint extends DummyService - implements RegionServerCoprocessor { - public DummyRegionServerEndpoint() {} + implements RegionServerCoprocessor { + public DummyRegionServerEndpoint() { + } @Override public Iterable getServices() { @@ -156,8 +151,7 @@ public void dummyCall(RpcController controller, DummyRequest request, } @Override - public void dummyThrow(RpcController controller, - DummyRequest request, + public void dummyThrow(RpcController controller, DummyRequest request, RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, WHAT_TO_THROW); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java index 67e4ff1bb297..b0f975c80044 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ /** * TestEndpoint: test cases to verify the batch execution of coprocessor Endpoint */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestBatchCoprocessorEndpoint { @ClassRule @@ -69,8 +69,7 @@ public class TestBatchCoprocessorEndpoint { private static final Logger LOG = LoggerFactory.getLogger(TestBatchCoprocessorEndpoint.class); - private static final TableName TEST_TABLE = - TableName.valueOf("TestTable"); + private static final TableName TEST_TABLE = TableName.valueOf("TestTable"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static byte[] ROW = Bytes.toBytes("testRow"); @@ -87,17 +86,17 @@ public static void setupBeforeClass() throws Exception { // set configure to indicate which cp should be loaded Configuration conf = util.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), - ProtobufCoprocessorService.class.getName(), - ColumnAggregationEndpointWithErrors.class.getName(), - ColumnAggregationEndpointNullResponse.class.getName()); + org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), + ProtobufCoprocessorService.class.getName(), + ColumnAggregationEndpointWithErrors.class.getName(), + ColumnAggregationEndpointNullResponse.class.getName()); conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); + ProtobufCoprocessorService.class.getName()); util.startMiniCluster(2); Admin admin = util.getAdmin(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); admin.createTable(tableDescriptor, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] }); util.waitUntilAllRegionsAssigned(TEST_TABLE); admin.close(); @@ -120,24 +119,21 @@ public static void tearDownAfterClass() throws Exception { public void testAggregationNullResponse() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); ColumnAggregationNullResponseSumRequest.Builder builder = - ColumnAggregationNullResponseSumRequest - .newBuilder(); + ColumnAggregationNullResponseSumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(TEST_FAMILY)); if (TEST_QUALIFIER != null && TEST_QUALIFIER.length > 0) { builder.setQualifier(ByteString.copyFrom(TEST_QUALIFIER)); } - Map results = - table.batchCoprocessorService( - ColumnAggregationServiceNullResponse.getDescriptor().findMethodByName("sum"), - builder.build(), ROWS[0], ROWS[ROWS.length - 1], - ColumnAggregationNullResponseSumResponse.getDefaultInstance()); + Map results = table.batchCoprocessorService( + ColumnAggregationServiceNullResponse.getDescriptor().findMethodByName("sum"), builder.build(), + ROWS[0], ROWS[ROWS.length - 1], + ColumnAggregationNullResponseSumResponse.getDefaultInstance()); int sumResult = 0; int expectedResult = 0; - for (Map.Entry e : - results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + for (Map.Entry e : results.entrySet()) { + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < rowSeperator2; i++) { @@ -156,29 +152,29 @@ private static byte[][] makeN(byte[] base, int n) { } private Map sum(final Table table, final byte[] family, - final byte[] qualifier, final byte[] start, final byte[] end) throws ServiceException, - Throwable { - ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest - .newBuilder(); + final byte[] qualifier, final byte[] start, final byte[] end) + throws ServiceException, Throwable { + ColumnAggregationProtos.SumRequest.Builder builder = + ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(family)); if (qualifier != null && qualifier.length > 0) { builder.setQualifier(ByteString.copyFrom(qualifier)); } return table.batchCoprocessorService( - ColumnAggregationProtos.ColumnAggregationService.getDescriptor().findMethodByName("sum"), - builder.build(), start, end, ColumnAggregationProtos.SumResponse.getDefaultInstance()); + ColumnAggregationProtos.ColumnAggregationService.getDescriptor().findMethodByName("sum"), + builder.build(), start, end, ColumnAggregationProtos.SumResponse.getDefaultInstance()); } @Test public void testAggregationWithReturnValue() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], - ROWS[ROWS.length - 1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < ROWSIZE; i++) { @@ -189,13 +185,12 @@ public void testAggregationWithReturnValue() throws Throwable { results.clear(); // scan: for region 2 and region 3 - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], - ROWS[ROWS.length - 1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -208,13 +203,13 @@ public void testAggregationWithReturnValue() throws Throwable { @Test public void testAggregation() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[0], ROWS[ROWS.length - 1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < ROWSIZE; i++) { @@ -227,8 +222,8 @@ public void testAggregation() throws Throwable { sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -241,14 +236,10 @@ public void testAggregation() throws Throwable { @Test public void testAggregationWithErrors() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - final Map results = - Collections.synchronizedMap( - new TreeMap( - Bytes.BYTES_COMPARATOR - )); + final Map results = Collections.synchronizedMap( + new TreeMap(Bytes.BYTES_COMPARATOR)); ColumnAggregationWithErrorsSumRequest.Builder builder = - ColumnAggregationWithErrorsSumRequest - .newBuilder(); + ColumnAggregationWithErrorsSumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(TEST_FAMILY)); if (TEST_QUALIFIER != null && TEST_QUALIFIER.length > 0) { builder.setQualifier(ByteString.copyFrom(TEST_QUALIFIER)); @@ -257,18 +248,18 @@ public void testAggregationWithErrors() throws Throwable { boolean hasError = false; try { table.batchCoprocessorService( - ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors.getDescriptor() - .findMethodByName("sum"), - builder.build(), ROWS[0], ROWS[ROWS.length - 1], - ColumnAggregationWithErrorsSumResponse.getDefaultInstance(), - new Batch.Callback() { + ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors.getDescriptor() + .findMethodByName("sum"), + builder.build(), ROWS[0], ROWS[ROWS.length - 1], + ColumnAggregationWithErrorsSumResponse.getDefaultInstance(), + new Batch.Callback() { - @Override - public void update(byte[] region, byte[] row, - ColumnAggregationWithErrorsSumResponse result) { - results.put(region, result); - } - }); + @Override + public void update(byte[] region, byte[] row, + ColumnAggregationWithErrorsSumResponse result) { + results.put(region, result); + } + }); } catch (Throwable t) { LOG.info("Exceptions in coprocessor service", t); hasError = true; @@ -277,8 +268,8 @@ public void update(byte[] region, byte[] row, int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < rowSeperator2; i++) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index 705bf626f5e2..56e10f8ae092 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -69,7 +69,7 @@ /** * Test coprocessors class loading. */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestClassLoading { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -96,17 +96,17 @@ public Optional getMasterObserver() { static final String cpName6 = "TestCP6"; private static Class regionCoprocessor1 = ColumnAggregationEndpoint.class; - // TOOD: Fix the import of this handler. It is coming in from a package that is far away. + // TOOD: Fix the import of this handler. It is coming in from a package that is far away. private static Class regionCoprocessor2 = TestServerCustomProtocol.PingHandler.class; private static Class regionServerCoprocessor = SampleRegionWALCoprocessor.class; private static Class masterCoprocessor = TestMasterCoprocessor.class; private static final String[] regionServerSystemCoprocessors = - new String[]{ regionServerCoprocessor.getSimpleName() }; + new String[] { regionServerCoprocessor.getSimpleName() }; - private static final String[] masterRegionServerSystemCoprocessors = new String[] { - regionCoprocessor1.getSimpleName(), MultiRowMutationEndpoint.class.getSimpleName(), - regionServerCoprocessor.getSimpleName() }; + private static final String[] masterRegionServerSystemCoprocessors = + new String[] { regionCoprocessor1.getSimpleName(), + MultiRowMutationEndpoint.class.getSimpleName(), regionServerCoprocessor.getSimpleName() }; @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -114,19 +114,15 @@ public static void setUpBeforeClass() throws Exception { // regionCoprocessor1 will be loaded on all regionservers, since it is // loaded for any tables (user or meta). - conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - regionCoprocessor1.getName()); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, regionCoprocessor1.getName()); // regionCoprocessor2 will be loaded only on regionservers that serve a // user table region. Therefore, if there are no user tables loaded, // this coprocessor will not be loaded on any regionserver. - conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - regionCoprocessor2.getName()); + conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, regionCoprocessor2.getName()); - conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, - regionServerCoprocessor.getName()); - conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - masterCoprocessor.getName()); + conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, regionServerCoprocessor.getName()); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, masterCoprocessor.getName()); TEST_UTIL.startMiniCluster(1); cluster = TEST_UTIL.getDFSCluster(); } @@ -137,11 +133,9 @@ public static void tearDownAfterClass() throws Exception { } static File buildCoprocessorJar(String className) throws Exception { - String code = - "import org.apache.hadoop.hbase.coprocessor.*;" + - "public class " + className + " implements RegionCoprocessor {}"; - return ClassLoaderTestHelper.buildJar( - TEST_UTIL.getDataTestDir().toString(), className, code); + String code = "import org.apache.hadoop.hbase.coprocessor.*;" + "public class " + className + + " implements RegionCoprocessor {}"; + return ClassLoaderTestHelper.buildJar(TEST_UTIL.getDataTestDir().toString(), className, code); } @Test @@ -155,32 +149,26 @@ public void testClassLoadingFromHDFS() throws Exception { // copy the jars into dfs fs.copyFromLocalFile(new Path(jarFile1.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + - jarFile1.getName(); + String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + jarFile1.getName(); Path pathOnHDFS1 = new Path(jarFileOnHDFS1); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(pathOnHDFS1)); + assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS1)); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS1); fs.copyFromLocalFile(new Path(jarFile2.getPath()), - new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + - jarFile2.getName(); + new Path(fs.getUri().toString() + Path.SEPARATOR)); + String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + jarFile2.getName(); Path pathOnHDFS2 = new Path(jarFileOnHDFS2); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(pathOnHDFS2)); + assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS2)); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2); // create a table that references the coprocessors TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(tableName); - tdb.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("test")).build()); + tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build()); // without configuration values - tdb.setValue("COPROCESSOR$1", jarFileOnHDFS1 + "|" + cpName1 - + "|" + Coprocessor.PRIORITY_USER); + tdb.setValue("COPROCESSOR$1", jarFileOnHDFS1 + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER); // with configuration values - tdb.setValue("COPROCESSOR$2", jarFileOnHDFS2 + "|" + cpName2 - + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); + tdb.setValue("COPROCESSOR$2", + jarFileOnHDFS2 + "|" + cpName2 + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(tableName)) { if (admin.isTableEnabled(tableName)) { @@ -189,19 +177,18 @@ public void testClassLoadingFromHDFS() throws Exception { admin.deleteTable(tableName); } CoprocessorClassLoader.clearCache(); - byte[] startKey = {10, 63}; - byte[] endKey = {12, 43}; + byte[] startKey = { 10, 63 }; + byte[] endKey = { 12, 43 }; TableDescriptor tableDescriptor = tdb.build(); admin.createTable(tableDescriptor, startKey, endKey, 4); waitForTable(tableDescriptor.getTableName()); // verify that the coprocessors were loaded - boolean foundTableRegion=false; + boolean foundTableRegion = false; boolean found1 = true, found2 = true, found2_k1 = true, found2_k2 = true, found2_k3 = true; Map> regionsActiveClassLoaders = new HashMap<>(); SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: - hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { foundTableRegion = true; CoprocessorEnvironment env; @@ -219,8 +206,8 @@ public void testClassLoadingFromHDFS() throws Exception { found2_k2 = false; found2_k3 = false; } - regionsActiveClassLoaders - .put(region, ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders()); + regionsActiveClassLoaders.put(region, + ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders()); } } @@ -235,17 +222,16 @@ public void testClassLoadingFromHDFS() throws Exception { CoprocessorClassLoader.getIfCached(pathOnHDFS1)); assertNotNull(jarFileOnHDFS2 + " was not cached", CoprocessorClassLoader.getIfCached(pathOnHDFS2)); - //two external jar used, should be one classloader per jar - assertEquals("The number of cached classloaders should be equal to the number" + - " of external jar files", + // two external jar used, should be one classloader per jar + assertEquals( + "The number of cached classloaders should be equal to the number" + " of external jar files", 2, CoprocessorClassLoader.getAllCached().size()); - //check if region active classloaders are shared across all RS regions - Set externalClassLoaders = new HashSet<>( - CoprocessorClassLoader.getAllCached()); + // check if region active classloaders are shared across all RS regions + Set externalClassLoaders = new HashSet<>(CoprocessorClassLoader.getAllCached()); for (Map.Entry> regionCP : regionsActiveClassLoaders.entrySet()) { assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached." - + " ClassLoader Cache:" + externalClassLoaders - + " Region ClassLoaders:" + regionCP.getValue(), + + " ClassLoader Cache:" + externalClassLoaders + " Region ClassLoaders:" + + regionCP.getValue(), externalClassLoaders.containsAll(regionCP.getValue())); } } @@ -261,10 +247,9 @@ public void testClassLoadingFromLocalFS() throws Exception { // create a table that references the jar TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf(cpName3)); - tdb.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("test")).build()); - tdb.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" + - Coprocessor.PRIORITY_USER); + tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build()); + tdb.setValue("COPROCESSOR$1", + getLocalPath(jarFile) + "|" + cpName3 + "|" + Coprocessor.PRIORITY_USER); TableDescriptor tableDescriptor = tdb.build(); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(tableDescriptor); @@ -273,7 +258,7 @@ public void testClassLoadingFromLocalFS() throws Exception { // verify that the coprocessor was loaded boolean found = false; SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName3)) { found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null); } @@ -288,10 +273,9 @@ public void testPrivateClassLoader() throws Exception { // create a table that references the jar TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf(cpName4)); - tdb.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("test")).build()); - tdb.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" + - Coprocessor.PRIORITY_USER); + tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build()); + tdb.setValue("COPROCESSOR$1", + getLocalPath(jarFile) + "|" + cpName4 + "|" + Coprocessor.PRIORITY_USER); TableDescriptor tableDescriptor = tdb.build(); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(tableDescriptor); @@ -300,7 +284,7 @@ public void testPrivateClassLoader() throws Exception { // verify that the coprocessor was loaded correctly boolean found = false; SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName4)) { Coprocessor cp = region.getCoprocessorHost().findCoprocessor(cpName4); if (cp != null) { @@ -328,17 +312,14 @@ public void testHBase3810() throws Exception { String cpKey2 = " Coprocessor$2 "; String cpKey3 = " coprocessor$03 "; - String cpValue1 = getLocalPath(jarFile1) + "|" + cpName1 + "|" + - Coprocessor.PRIORITY_USER; + String cpValue1 = getLocalPath(jarFile1) + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER; String cpValue2 = getLocalPath(jarFile2) + " | " + cpName2 + " | "; // load from default class loader - String cpValue3 = - " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; + String cpValue3 = " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; // create a table that references the jar TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(tableName); - tdb.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("test")).build()); + tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build()); // add 3 coprocessors by setting htd attributes directly. tdb.setValue(cpKey1, cpValue1); @@ -346,24 +327,18 @@ public void testHBase3810() throws Exception { tdb.setValue(cpKey3, cpValue3); // add 2 coprocessor by using new htd.setCoprocessor() api - CoprocessorDescriptor coprocessorDescriptor = CoprocessorDescriptorBuilder - .newBuilder(cpName5) - .setJarPath(new Path(getLocalPath(jarFile5)).toString()) - .setPriority(Coprocessor.PRIORITY_USER) - .setProperties(Collections.emptyMap()) - .build(); + CoprocessorDescriptor coprocessorDescriptor = CoprocessorDescriptorBuilder.newBuilder(cpName5) + .setJarPath(new Path(getLocalPath(jarFile5)).toString()) + .setPriority(Coprocessor.PRIORITY_USER).setProperties(Collections.emptyMap()).build(); tdb.setCoprocessor(coprocessorDescriptor); Map kvs = new HashMap<>(); kvs.put("k1", "v1"); kvs.put("k2", "v2"); kvs.put("k3", "v3"); - coprocessorDescriptor = CoprocessorDescriptorBuilder - .newBuilder(cpName6) - .setJarPath(new Path(getLocalPath(jarFile6)).toString()) - .setPriority(Coprocessor.PRIORITY_USER) - .setProperties(kvs) - .build(); + coprocessorDescriptor = CoprocessorDescriptorBuilder.newBuilder(cpName6) + .setJarPath(new Path(getLocalPath(jarFile6)).toString()) + .setPriority(Coprocessor.PRIORITY_USER).setProperties(kvs).build(); tdb.setCoprocessor(coprocessorDescriptor); Admin admin = TEST_UTIL.getAdmin(); @@ -379,23 +354,17 @@ public void testHBase3810() throws Exception { waitForTable(tableDescriptor.getTableName()); // verify that the coprocessor was loaded - boolean found_2 = false, found_1 = false, found_3 = false, - found_5 = false, found_6 = false; - boolean found6_k1 = false, found6_k2 = false, found6_k3 = false, - found6_k4 = false; + boolean found_2 = false, found_1 = false, found_3 = false, found_5 = false, found_6 = false; + boolean found6_k1 = false, found6_k2 = false, found6_k3 = false, found6_k4 = false; SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { - found_1 = found_1 || - (region.getCoprocessorHost().findCoprocessor(cpName1) != null); - found_2 = found_2 || - (region.getCoprocessorHost().findCoprocessor(cpName2) != null); - found_3 = found_3 || - (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver") - != null); - found_5 = found_5 || - (region.getCoprocessorHost().findCoprocessor(cpName5) != null); + found_1 = found_1 || (region.getCoprocessorHost().findCoprocessor(cpName1) != null); + found_2 = found_2 || (region.getCoprocessorHost().findCoprocessor(cpName2) != null); + found_3 = found_3 + || (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver") != null); + found_5 = found_5 || (region.getCoprocessorHost().findCoprocessor(cpName5) != null); CoprocessorEnvironment env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName6); @@ -438,28 +407,23 @@ void loadingClassFromLibDirInJar(String libPrefix) throws Exception { File innerJarFile2 = buildCoprocessorJar(cpName2); File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar"); - ClassLoaderTestHelper.addJarFilesToJar( - outerJarFile, libPrefix, innerJarFile1, innerJarFile2); + ClassLoaderTestHelper.addJarFilesToJar(outerJarFile, libPrefix, innerJarFile1, innerJarFile2); // copy the jars into dfs fs.copyFromLocalFile(new Path(outerJarFile.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + - outerJarFile.getName(); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(new Path(jarFileOnHDFS))); + String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + outerJarFile.getName(); + assertTrue("Copy jar file to HDFS failed.", fs.exists(new Path(jarFileOnHDFS))); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS); // create a table that references the coprocessors TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(tableName); - tdb.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("test")).build()); - // without configuration values - tdb.setValue("COPROCESSOR$1", jarFileOnHDFS + "|" + cpName1 - + "|" + Coprocessor.PRIORITY_USER); - // with configuration values - tdb.setValue("COPROCESSOR$2", jarFileOnHDFS + "|" + cpName2 - + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); + tdb.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")).build()); + // without configuration values + tdb.setValue("COPROCESSOR$1", jarFileOnHDFS + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER); + // with configuration values + tdb.setValue("COPROCESSOR$2", + jarFileOnHDFS + "|" + cpName2 + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(tableName)) { if (admin.isTableEnabled(tableName)) { @@ -473,10 +437,9 @@ void loadingClassFromLibDirInJar(String libPrefix) throws Exception { waitForTable(tableDescriptor.getTableName()); // verify that the coprocessors were loaded - boolean found1 = false, found2 = false, found2_k1 = false, - found2_k2 = false, found2_k3 = false; + boolean found1 = false, found2 = false, found2_k1 = false, found2_k2 = false, found2_k3 = false; SingleProcessHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { CoprocessorEnvironment env; env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1); @@ -509,24 +472,21 @@ public void testRegionServerCoprocessorsReported() throws Exception { } /** - * return the subset of all regionservers - * (actually returns set of ServerLoads) - * which host some region in a given table. - * used by assertAllRegionServers() below to - * test reporting of loaded coprocessors. + * return the subset of all regionservers (actually returns set of ServerLoads) which host some + * region in a given table. used by assertAllRegionServers() below to test reporting of loaded + * coprocessors. * @param tableName : given table. * @return subset of all servers. */ Map serversForTable(String tableName) { Map serverLoadHashMap = new HashMap<>(); - for(Map.Entry server: - TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager(). - getOnlineServers().entrySet()) { - for(Map.Entry region: - server.getValue().getRegionMetrics().entrySet()) { + for (Map.Entry server : TEST_UTIL.getMiniHBaseCluster().getMaster() + .getServerManager().getOnlineServers().entrySet()) { + for (Map.Entry region : server.getValue().getRegionMetrics() + .entrySet()) { if (region.getValue().getNameAsString().equals(tableName)) { // this server hosts a region of tableName: add this server.. - serverLoadHashMap.put(server.getKey(),server.getValue()); + serverLoadHashMap.put(server.getKey(), server.getValue()); // .. and skip the rest of the regions that it hosts. break; } @@ -547,13 +507,12 @@ void assertAllRegionServers(String tableName) throws InterruptedException { } for (int i = 0; i < 5; i++) { boolean any_failed = false; - for(Map.Entry server: servers.entrySet()) { + for (Map.Entry server : servers.entrySet()) { String[] actualCoprocessors = - server.getValue().getCoprocessorNames().stream().toArray(size -> new String[size]); + server.getValue().getCoprocessorNames().stream().toArray(size -> new String[size]); if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) { - LOG.debug("failed comparison: actual: " + - Arrays.toString(actualCoprocessors) + - " ; expected: " + Arrays.toString(expectedCoprocessors)); + LOG.debug("failed comparison: actual: " + Arrays.toString(actualCoprocessors) + + " ; expected: " + Arrays.toString(expectedCoprocessors)); any_failed = true; expectedCoprocessors = switchExpectedCoprocessors(expectedCoprocessors); break; @@ -584,11 +543,9 @@ public void testMasterCoprocessorsReported() { // HBASE 4070: Improve region server metrics to report loaded coprocessors // to master: verify that the master is reporting the correct set of // loaded coprocessors. - final String loadedMasterCoprocessorsVerify = - "[" + masterCoprocessor.getSimpleName() + "]"; + final String loadedMasterCoprocessorsVerify = "[" + masterCoprocessor.getSimpleName() + "]"; String loadedMasterCoprocessors = - java.util.Arrays.toString( - TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessors()); + java.util.Arrays.toString(TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessors()); assertEquals(loadedMasterCoprocessorsVerify, loadedMasterCoprocessors); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java index d6d0d4ce43dd..b697b61de10f 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java @@ -68,7 +68,7 @@ /** * TestEndpoint: test cases to verify coprocessor Endpoint */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -76,8 +76,7 @@ public class TestCoprocessorEndpoint { private static final Logger LOG = LoggerFactory.getLogger(TestCoprocessorEndpoint.class); - private static final TableName TEST_TABLE = - TableName.valueOf("TestCoprocessorEndpoint"); + private static final TableName TEST_TABLE = TableName.valueOf("TestCoprocessorEndpoint"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static byte[] ROW = Bytes.toBytes("testRow"); @@ -95,15 +94,15 @@ public static void setupBeforeClass() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), - ProtobufCoprocessorService.class.getName()); + org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), + ProtobufCoprocessorService.class.getName()); conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); + ProtobufCoprocessorService.class.getName()); util.startMiniCluster(2); Admin admin = util.getAdmin(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); admin.createTable(tableDescriptor, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] }); util.waitUntilAllRegionsAssigned(TEST_TABLE); @@ -121,19 +120,17 @@ public static void tearDownAfterClass() throws Exception { util.shutdownMiniCluster(); } - private Map sum(final Table table, final byte [] family, - final byte [] qualifier, final byte [] start, final byte [] end) - throws ServiceException, Throwable { - return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, - start, end, - new Batch.Call() { + private Map sum(final Table table, final byte[] family, final byte[] qualifier, + final byte[] start, final byte[] end) throws ServiceException, Throwable { + return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, start, + end, new Batch.Call() { @Override public Long call(ColumnAggregationProtos.ColumnAggregationService instance) - throws IOException { + throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); ColumnAggregationProtos.SumRequest.Builder builder = - ColumnAggregationProtos.SumRequest.newBuilder(); + ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(UnsafeByteOperations.unsafeWrap(family)); if (qualifier != null && qualifier.length > 0) { builder.setQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); @@ -147,12 +144,12 @@ public Long call(ColumnAggregationProtos.ColumnAggregationService instance) @Test public void testAggregation() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[0], ROWS[ROWS.length-1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue(); } for (int i = 0; i < ROWSIZE; i++) { @@ -163,12 +160,11 @@ public void testAggregation() throws Throwable { results.clear(); // scan: for region 2 and region 3 - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[rowSeperator1], ROWS[ROWS.length-1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -183,22 +179,22 @@ public void testCoprocessorService() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); List regions; - try(RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { + try (RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { regions = rl.getAllRegionLocations(); } final TestProtos.EchoRequestProto request = TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); - final Map results = Collections.synchronizedMap( - new TreeMap(Bytes.BYTES_COMPARATOR)); + final Map results = + Collections.synchronizedMap(new TreeMap(Bytes.BYTES_COMPARATOR)); try { // scan: for all regions final RpcController controller = new ServerRpcController(); - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[0], ROWS[ROWS.length - 1], + table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[0], + ROWS[ROWS.length - 1], new Batch.Call() { @Override - public TestProtos.EchoResponseProto call( - TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { + public TestProtos.EchoResponseProto + call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); CoprocessorRpcUtils.BlockingRpcCallback callback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); @@ -207,33 +203,31 @@ public TestProtos.EchoResponseProto call( LOG.debug("Batch.Call returning result " + response); return response; } - }, - new Batch.Callback() { + }, new Batch.Callback() { @Override public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { assertNotNull(result); assertEquals("hello", result.getMessage()); results.put(region, result.getMessage()); } - } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(3, results.size()); for (HRegionLocation info : regions) { - LOG.info("Region info is "+info.getRegion().getRegionNameAsString()); + LOG.info("Region info is " + info.getRegion().getRegionNameAsString()); assertTrue(results.containsKey(info.getRegion().getRegionName())); } results.clear(); // scan: for region 2 and region 3 - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[rowSeperator1], ROWS[ROWS.length - 1], + table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[rowSeperator1], + ROWS[ROWS.length - 1], new Batch.Call() { @Override - public TestProtos.EchoResponseProto call( - TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { + public TestProtos.EchoResponseProto + call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); CoprocessorRpcUtils.BlockingRpcCallback callback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); @@ -242,18 +236,16 @@ public TestProtos.EchoResponseProto call( LOG.debug("Batch.Call returning result " + response); return response; } - }, - new Batch.Callback() { + }, new Batch.Callback() { @Override public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { assertNotNull(result); assertEquals("hello", result.getMessage()); results.put(region, result.getMessage()); } - } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(2, results.size()); } finally { @@ -265,7 +257,7 @@ public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto resul public void testCoprocessorServiceNullResponse() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); List regions; - try(RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { + try (RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { regions = rl.getAllRegionLocations(); } @@ -275,28 +267,26 @@ public void testCoprocessorServiceNullResponse() throws Throwable { // scan: for all regions final RpcController controller = new ServerRpcController(); // test that null results are supported - Map results = - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[0], ROWS[ROWS.length - 1], - new Batch.Call() { - public String call(TestRpcServiceProtos.TestProtobufRpcProto instance) - throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.echo(controller, request, callback); - TestProtos.EchoResponseProto response = callback.get(); - LOG.debug("Batch.Call got result " + response); - return null; - } + Map results = table.coprocessorService( + TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[0], ROWS[ROWS.length - 1], + new Batch.Call() { + public String call(TestRpcServiceProtos.TestProtobufRpcProto instance) + throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback callback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.echo(controller, request, callback); + TestProtos.EchoResponseProto response = callback.get(); + LOG.debug("Batch.Call got result " + response); + return null; } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(3, results.size()); for (HRegionLocation region : regions) { RegionInfo info = region.getRegion(); - LOG.info("Region info is "+info.getRegionNameAsString()); + LOG.info("Region info is " + info.getRegionNameAsString()); assertTrue(results.containsKey(info.getRegionName())); assertNull(results.get(info.getRegionName())); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java index b183d6b3cb29..70745e26f782 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java @@ -33,6 +33,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; + import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.trace.data.SpanData; import java.util.List; @@ -84,10 +85,12 @@ import org.junit.rules.TestRule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; + import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoRequestProto; import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoResponseProto; import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto; @@ -95,29 +98,28 @@ /** * Test cases to verify tracing coprocessor Endpoint execution */ -@Category({ CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorEndpointTracing { private static final Logger logger = - LoggerFactory.getLogger(TestCoprocessorEndpointTracing.class); + LoggerFactory.getLogger(TestCoprocessorEndpointTracing.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCoprocessorEndpointTracing.class); + HBaseClassTestRule.forClass(TestCoprocessorEndpointTracing.class); private static final OpenTelemetryClassRule otelClassRule = OpenTelemetryClassRule.create(); - private static final MiniClusterRule miniclusterRule = MiniClusterRule.newBuilder() - .setConfiguration(() -> { - final Configuration conf = HBaseConfiguration.create(); - conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000); - conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); - conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); - return conf; - }) - .build(); + private static final MiniClusterRule miniclusterRule = + MiniClusterRule.newBuilder().setConfiguration(() -> { + final Configuration conf = HBaseConfiguration.create(); + conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + ProtobufCoprocessorService.class.getName()); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + ProtobufCoprocessorService.class.getName()); + return conf; + }).build(); private static final ConnectionRule connectionRule = - ConnectionRule.createAsyncConnectionRule(miniclusterRule::createAsyncConnection); + ConnectionRule.createAsyncConnectionRule(miniclusterRule::createAsyncConnection); private static final class Setup extends ExternalResource { @Override @@ -126,20 +128,18 @@ protected void before() throws Throwable { final AsyncConnection connection = connectionRule.getAsyncConnection(); final AsyncAdmin admin = connection.getAdmin(); final TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); admin.createTable(tableDescriptor).get(); util.waitUntilAllRegionsAssigned(TEST_TABLE); } } @ClassRule - public static final TestRule testRule = RuleChain.outerRule(otelClassRule) - .around(miniclusterRule) - .around(connectionRule) - .around(new Setup()); + public static final TestRule testRule = RuleChain.outerRule(otelClassRule).around(miniclusterRule) + .around(connectionRule).around(new Setup()); private static final TableName TEST_TABLE = - TableName.valueOf(TestCoprocessorEndpointTracing.class.getSimpleName()); + TableName.valueOf(TestCoprocessorEndpointTracing.class.getSimpleName()); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); @Rule @@ -155,42 +155,41 @@ public void traceAsyncTableEndpoint() { final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final CompletableFuture> future = new CompletableFuture<>(); final AsyncTable.CoprocessorCallback callback = - new AsyncTable.CoprocessorCallback() { - final ConcurrentMap results = new ConcurrentHashMap<>(); - - @Override - public void onRegionComplete(RegionInfo region, EchoResponseProto resp) { - if (!future.isDone()) { - results.put(region.getRegionName(), resp.getMessage()); + new AsyncTable.CoprocessorCallback() { + final ConcurrentMap results = new ConcurrentHashMap<>(); + + @Override + public void onRegionComplete(RegionInfo region, EchoResponseProto resp) { + if (!future.isDone()) { + results.put(region.getRegionName(), resp.getMessage()); + } } - } - @Override - public void onRegionError(RegionInfo region, Throwable error) { - if (!future.isDone()) { - future.completeExceptionally(error); + @Override + public void onRegionError(RegionInfo region, Throwable error) { + if (!future.isDone()) { + future.completeExceptionally(error); + } } - } - @Override - public void onComplete() { - if (!future.isDone()) { - future.complete(results); + @Override + public void onComplete() { + if (!future.isDone()) { + future.complete(results); + } } - } - @Override - public void onError(Throwable error) { - if (!future.isDone()) { - future.completeExceptionally(error); + @Override + public void onError(Throwable error) { + if (!future.isDone()) { + future.completeExceptionally(error); + } } - } - }; + }; final Map results = TraceUtil.trace(() -> { table.coprocessorService(TestProtobufRpcProto::newStub, - (stub, controller, cb) -> stub.echo(controller, request, cb), callback) - .execute(); + (stub, controller, cb) -> stub.echo(controller, request, cb), callback).execute(); try { return future.get(); } catch (InterruptedException | ExecutionException e) { @@ -199,31 +198,21 @@ public void onError(Throwable error) { }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - equalTo("hello")))); + assertThat(results.values(), everyItem(allOf(notNullValue(), equalTo("hello")))); final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -234,48 +223,36 @@ public void traceSyncTableEndpointCall() throws Exception { final RpcController controller = new ServerRpcController(); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); final Map results = TraceUtil.trace(() -> { try { - return table.coprocessorService(TestProtobufRpcProto.class, null, null, - t -> { - t.echo(controller, request, callback); - return callback.get(); - }); + return table.coprocessorService(TestProtobufRpcProto.class, null, null, t -> { + t.echo(controller, request, callback); + return callback.get(); + }); } catch (Throwable t) { throw new RuntimeException(t); } }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -286,52 +263,39 @@ public void traceSyncTableEndpointCallAndCallback() throws Exception { final RpcController controller = new ServerRpcController(); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); final ConcurrentMap results = new ConcurrentHashMap<>(); TraceUtil.trace(() -> { try { - table.coprocessorService(TestProtobufRpcProto.class, null, null, - t -> { - t.echo(controller, request, callback); - return callback.get(); - }, - (region, row, result) -> { - results.put(region, result); - }); + table.coprocessorService(TestProtobufRpcProto.class, null, null, t -> { + t.echo(controller, request, callback); + return callback.get(); + }, (region, row, result) -> { + results.put(region, result); + }); } catch (Throwable t) { throw new RuntimeException(t); } }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -344,7 +308,7 @@ public void traceSyncTableRegionCoprocessorRpcChannel() throws Exception { try { final CoprocessorRpcChannel channel = table.coprocessorService(new byte[] {}); final TestProtobufRpcProto.BlockingInterface service = - TestProtobufRpcProto.newBlockingStub(channel); + TestProtobufRpcProto.newBlockingStub(channel); return service.echo(null, request); } catch (Throwable t) { throw new RuntimeException(t); @@ -354,9 +318,7 @@ public void traceSyncTableRegionCoprocessorRpcChannel() throws Exception { assertEquals("hello", response.getMessage()); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); @@ -365,13 +327,10 @@ public void traceSyncTableRegionCoprocessorRpcChannel() throws Exception { * The Table instance isn't issuing a command here, it's not a table operation, so don't expect * there to be a span like `COPROC_EXEC table`. */ - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = + allOf(hasName(containsString("COPROC_EXEC")), hasParentSpanId(testSpan)); assertThat(spans, not(hasItem(tableOpMatcher))); } @@ -380,45 +339,34 @@ public void traceSyncTableBatchEndpoint() throws Exception { final Connection connection = connectionRule.getConnection(); try (final Table table = connection.getTable(TEST_TABLE)) { final Descriptors.MethodDescriptor descriptor = - TestProtobufRpcProto.getDescriptor().findMethodByName("echo"); + TestProtobufRpcProto.getDescriptor().findMethodByName("echo"); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final Map response = TraceUtil.trace(() -> { try { - return table.batchCoprocessorService( - descriptor, request, null, null, EchoResponseProto.getDefaultInstance()); + return table.batchCoprocessorService(descriptor, request, null, null, + EchoResponseProto.getDefaultInstance()); } catch (Throwable t) { throw new RuntimeException(t); } }, testName.getMethodName()); assertNotNull(response); - assertThat(response.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(response.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -427,7 +375,7 @@ public void traceSyncTableBatchEndpointCallback() throws Exception { final Connection connection = connectionRule.getConnection(); try (final Table table = connection.getTable(TEST_TABLE)) { final Descriptors.MethodDescriptor descriptor = - TestProtobufRpcProto.getDescriptor().findMethodByName("echo"); + TestProtobufRpcProto.getDescriptor().findMethodByName("echo"); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final ConcurrentMap results = new ConcurrentHashMap<>(); TraceUtil.trace(() -> { @@ -440,34 +388,23 @@ public void traceSyncTableBatchEndpointCallback() throws Exception { }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -477,29 +414,22 @@ public void traceAsyncAdminEndpoint() throws Exception { final AsyncAdmin admin = connection.getAdmin(); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final ServiceCaller callback = - (stub, controller, cb) -> stub.echo(controller, request, cb); + (stub, controller, cb) -> stub.echo(controller, request, cb); - final String response = TraceUtil.tracedFuture( - () -> admin.coprocessorService(TestProtobufRpcProto::newStub, callback), - testName.getMethodName()) - .get() - .getMessage(); + final String response = TraceUtil + .tracedFuture(() -> admin.coprocessorService(TestProtobufRpcProto::newStub, callback), + testName.getMethodName()) + .get().getMessage(); assertEquals("hello", response); - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.MasterService/ExecMasterService"), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.MasterService/ExecMasterService"), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -508,7 +438,7 @@ public void traceSyncAdminEndpoint() throws Exception { final Connection connection = connectionRule.getConnection(); try (final Admin admin = connection.getAdmin()) { final TestProtobufRpcProto.BlockingInterface service = - TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); + TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final String response = TraceUtil.trace(() -> { try { @@ -520,27 +450,21 @@ public void traceSyncAdminEndpoint() throws Exception { assertEquals("hello", response); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.MasterService/ExecMasterService"), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.MasterService/ExecMasterService"), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } private void waitForAndLog(Matcher spanMatcher) { final Configuration conf = connectionRule.getAsyncConnection().getConfiguration(); - Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>( - otelClassRule::getSpans, hasItem(spanMatcher))); + Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), + new MatcherPredicate<>(otelClassRule::getSpans, hasItem(spanMatcher))); final List spans = otelClassRule.getSpans(); if (logger.isDebugEnabled()) { StringTraceRenderer renderer = new StringTraceRenderer(spans); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java index 5a0827c2d75b..08b78505f0cd 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.ColumnAggregationProtos; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorTableEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -82,8 +82,8 @@ public void testCoprocessorTableEndpoint() throws Throwable { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)) - .setCoprocessor(ColumnAggregationEndpoint.class.getName()).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)) + .setCoprocessor(ColumnAggregationEndpoint.class.getName()).build(); createTable(tableDescriptor); verifyTable(tableName); @@ -94,11 +94,11 @@ public void testDynamicCoprocessorTableEndpoint() throws Throwable { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); createTable(tableDescriptor); updateTable(TableDescriptorBuilder.newBuilder(tableDescriptor) - .setCoprocessor(ColumnAggregationEndpoint.class.getName()).build()); + .setCoprocessor(ColumnAggregationEndpoint.class.getName()).build()); verifyTable(tableName); } @@ -111,19 +111,18 @@ private static byte[][] makeN(byte[] base, int n) { return ret; } - private static Map sum(final Table table, final byte [] family, - final byte [] qualifier, final byte [] start, final byte [] end) + private static Map sum(final Table table, final byte[] family, + final byte[] qualifier, final byte[] start, final byte[] end) throws ServiceException, Throwable { - return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, - start, end, - new Batch.Call() { + return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, start, + end, new Batch.Call() { @Override public Long call(ColumnAggregationProtos.ColumnAggregationService instance) - throws IOException { + throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); ColumnAggregationProtos.SumRequest.Builder builder = - ColumnAggregationProtos.SumRequest.newBuilder(); + ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(family)); if (qualifier != null && qualifier.length > 0) { builder.setQualifier(ByteString.copyFrom(qualifier)); @@ -160,8 +159,8 @@ private static void updateTable(TableDescriptor tableDescriptor) throws Exceptio private static final void verifyTable(TableName tableName) throws Throwable { Table table = TEST_UTIL.getConnection().getTable(tableName); try { - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], - ROWS[ROWS.length-1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { @@ -174,7 +173,7 @@ private static final void verifyTable(TableName tableName) throws Throwable { // scan: for region 2 and region 3 results.clear(); - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length-1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java index 8a4c7b21b553..007bcf64c3b5 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestImportExport extends org.apache.hadoop.hbase.mapreduce.TestImportExport { @ClassRule diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java index 81b14b949be4..c87568c4cd4e 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyService; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestRegionServerCoprocessorEndpoint { @ClassRule @@ -76,13 +76,13 @@ public static void tearDownAfterClass() throws Exception { public void testEndpoint() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); final ServerRpcController controller = new ServerRpcController(); - final CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + final CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); DummyRegionServerEndpointProtos.DummyService service = ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, TEST_UTIL.getAdmin().coprocessorService(serverName)); - service.dummyCall(controller, - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); + service.dummyCall(controller, DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), + rpcCallback); assertEquals(DUMMY_VALUE, rpcCallback.get().getValue()); if (controller.failedOnException()) { throw controller.getFailedOn(); @@ -93,13 +93,13 @@ public void testEndpoint() throws Exception { public void testEndpointExceptions() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); final ServerRpcController controller = new ServerRpcController(); - final CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + final CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); DummyRegionServerEndpointProtos.DummyService service = ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, - TEST_UTIL.getAdmin().coprocessorService(serverName)); + TEST_UTIL.getAdmin().coprocessorService(serverName)); service.dummyThrow(controller, - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); + DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); assertEquals(null, rpcCallback.get()); assertTrue(controller.failedOnException()); assertEquals(WHAT_TO_THROW.getClass(), controller.getFailedOn().getCause().getClass()); @@ -120,8 +120,7 @@ public void dummyCall(RpcController controller, DummyRequest request, } @Override - public void dummyThrow(RpcController controller, - DummyRequest request, + public void dummyThrow(RpcController controller, DummyRequest request, RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, WHAT_TO_THROW); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java index a13cb5b5bf20..f657cd79b2d0 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java @@ -84,7 +84,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestSecureExport { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -125,25 +125,20 @@ public class TestSecureExport { private static final String TOPSECRET = "topsecret"; @Rule public final TestName name = new TestName(); + private static void setUpKdcServer() throws Exception { KDC = UTIL.setupMiniKdc(KEYTAB_FILE); USERNAME = UserGroupInformation.getLoginUser().getShortUserName(); SERVER_PRINCIPAL = USERNAME + "/" + LOCALHOST; HTTP_PRINCIPAL = "HTTP/" + LOCALHOST; - KDC.createPrincipal(KEYTAB_FILE, - SERVER_PRINCIPAL, - HTTP_PRINCIPAL, - USER_ADMIN + "/" + LOCALHOST, - USER_OWNER + "/" + LOCALHOST, - USER_RX + "/" + LOCALHOST, - USER_RO + "/" + LOCALHOST, - USER_XO + "/" + LOCALHOST, - USER_NONE + "/" + LOCALHOST); + KDC.createPrincipal(KEYTAB_FILE, SERVER_PRINCIPAL, HTTP_PRINCIPAL, USER_ADMIN + "/" + LOCALHOST, + USER_OWNER + "/" + LOCALHOST, USER_RX + "/" + LOCALHOST, USER_RO + "/" + LOCALHOST, + USER_XO + "/" + LOCALHOST, USER_NONE + "/" + LOCALHOST); } private static User getUserByLogin(final String user) throws IOException { - return User.create(UserGroupInformation.loginUserFromKeytabAndReturnUGI( - getPrinciple(user), KEYTAB_FILE.getAbsolutePath())); + return User.create(UserGroupInformation.loginUserFromKeytabAndReturnUGI(getPrinciple(user), + KEYTAB_FILE.getAbsolutePath())); } private static String getPrinciple(final String user) { @@ -152,28 +147,27 @@ private static String getPrinciple(final String user) { private static void setUpClusterKdc() throws Exception { HBaseKerberosUtils.setSecuredConfiguration(UTIL.getConfiguration(), - SERVER_PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm()); + SERVER_PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm()); HBaseKerberosUtils.setSSLConfiguration(UTIL, TestSecureExport.class); UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - UTIL.getConfiguration().get( - CoprocessorHost.REGION_COPROCESSOR_CONF_KEY) + "," + Export.class.getName()); + UTIL.getConfiguration().get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY) + "," + + Export.class.getName()); } private static void addLabels(final Configuration conf, final List users, final List labels) throws Exception { - PrivilegedExceptionAction action - = () -> { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels.toArray(new String[labels.size()])); - for (String user : users) { - VisibilityClient.setAuths(conn, labels.toArray(new String[labels.size()]), user); - } - } catch (Throwable t) { - throw new IOException(t); + PrivilegedExceptionAction action = () -> { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels.toArray(new String[labels.size()])); + for (String user : users) { + VisibilityClient.setAuths(conn, labels.toArray(new String[labels.size()]), user); } - return null; - }; + } catch (Throwable t) { + throw new IOException(t); + } + return null; + }; getUserByLogin(USER_ADMIN).runAs(action); } @@ -199,7 +193,7 @@ private static void clearOutput(Path path) throws IOException { @BeforeClass public static void beforeClass() throws Exception { UserProvider.setUserProviderForTesting(UTIL.getConfiguration(), - HadoopSecurityEnabledUserProviderForTesting.class); + HadoopSecurityEnabledUserProviderForTesting.class); setUpKdcServer(); SecureTestUtil.enableSecurity(UTIL.getConfiguration()); UTIL.getConfiguration().setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); @@ -211,15 +205,11 @@ public static void beforeClass() throws Exception { UTIL.waitUntilAllRegionsAssigned(VisibilityConstants.LABELS_TABLE_NAME); UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME, 50000); UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME, 50000); - SecureTestUtil.grantGlobal(UTIL, USER_ADMIN, - Permission.Action.ADMIN, - Permission.Action.CREATE, - Permission.Action.EXEC, - Permission.Action.READ, - Permission.Action.WRITE); + SecureTestUtil.grantGlobal(UTIL, USER_ADMIN, Permission.Action.ADMIN, Permission.Action.CREATE, + Permission.Action.EXEC, Permission.Action.READ, Permission.Action.WRITE); SecureTestUtil.grantGlobal(UTIL, USER_OWNER, Permission.Action.CREATE); addLabels(UTIL.getConfiguration(), Arrays.asList(USER_OWNER), - Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET)); + Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET)); } @AfterClass @@ -231,28 +221,22 @@ public static void afterClass() throws Exception { } /** - * Test the ExportEndpoint's access levels. The {@link Export} test is ignored - * since the access exceptions cannot be collected from the mappers. + * Test the ExportEndpoint's access levels. The {@link Export} test is ignored since the access + * exceptions cannot be collected from the mappers. */ @Test public void testAccessCase() throws Throwable { final String exportTable = name.getMethodName(); - TableDescriptor exportHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(exportTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .build(); + TableDescriptor exportHtd = TableDescriptorBuilder.newBuilder(TableName.valueOf(exportTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)).build(); User owner = User.createUserForTesting(UTIL.getConfiguration(), USER_OWNER, new String[0]); - SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][]{Bytes.toBytes("s")}); - SecureTestUtil.grantOnTable(UTIL, USER_RO, - TableName.valueOf(exportTable), null, null, - Permission.Action.READ); - SecureTestUtil.grantOnTable(UTIL, USER_RX, - TableName.valueOf(exportTable), null, null, - Permission.Action.READ, - Permission.Action.EXEC); - SecureTestUtil.grantOnTable(UTIL, USER_XO, - TableName.valueOf(exportTable), null, null, - Permission.Action.EXEC); + SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][] { Bytes.toBytes("s") }); + SecureTestUtil.grantOnTable(UTIL, USER_RO, TableName.valueOf(exportTable), null, null, + Permission.Action.READ); + SecureTestUtil.grantOnTable(UTIL, USER_RX, TableName.valueOf(exportTable), null, null, + Permission.Action.READ, Permission.Action.EXEC); + SecureTestUtil.grantOnTable(UTIL, USER_XO, TableName.valueOf(exportTable), null, null, + Permission.Action.EXEC); assertEquals(4, PermissionStorage .getTablePermissions(UTIL.getConfiguration(), TableName.valueOf(exportTable)).size()); AccessTestAction putAction = () -> { @@ -260,20 +244,15 @@ public void testAccessCase() throws Throwable { p.addColumn(FAMILYA, Bytes.toBytes("qual_0"), NOW, QUAL); p.addColumn(FAMILYA, Bytes.toBytes("qual_1"), NOW, QUAL); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table t = conn.getTable(TableName.valueOf(exportTable))) { + Table t = conn.getTable(TableName.valueOf(exportTable))) { t.put(p); } return null; }; // no hdfs access. - SecureTestUtil.verifyAllowed(putAction, - getUserByLogin(USER_ADMIN), - getUserByLogin(USER_OWNER)); - SecureTestUtil.verifyDenied(putAction, - getUserByLogin(USER_RO), - getUserByLogin(USER_XO), - getUserByLogin(USER_RX), - getUserByLogin(USER_NONE)); + SecureTestUtil.verifyAllowed(putAction, getUserByLogin(USER_ADMIN), getUserByLogin(USER_OWNER)); + SecureTestUtil.verifyDenied(putAction, getUserByLogin(USER_RO), getUserByLogin(USER_XO), + getUserByLogin(USER_RX), getUserByLogin(USER_NONE)); final FileSystem fs = UTIL.getDFSCluster().getFileSystem(); final Path openDir = fs.makeQualified(new Path("testAccessCase")); @@ -282,9 +261,9 @@ public void testAccessCase() throws Throwable { final Path output = fs.makeQualified(new Path(openDir, "output")); AccessTestAction exportAction = () -> { try { - String[] args = new String[]{exportTable, output.toString()}; - Map result - = Export.run(new Configuration(UTIL.getConfiguration()), args); + String[] args = new String[] { exportTable, output.toString() }; + Map result = + Export.run(new Configuration(UTIL.getConfiguration()), args); long rowCount = 0; long cellCount = 0; for (Export.Response r : result.values()) { @@ -308,7 +287,7 @@ public void testAccessCase() throws Throwable { assertEquals("Unexpected file owner", currentUserName, outputDirFileStatus.getOwner()); FileStatus[] outputFileStatus = fs.listStatus(new Path(openDir, "output")); - for (FileStatus fileStatus: outputFileStatus) { + for (FileStatus fileStatus : outputFileStatus) { assertEquals("Unexpected file owner", currentUserName, fileStatus.getOwner()); } } else { @@ -318,14 +297,10 @@ public void testAccessCase() throws Throwable { clearOutput(output); } }; - SecureTestUtil.verifyDenied(exportAction, - getUserByLogin(USER_RO), - getUserByLogin(USER_XO), + SecureTestUtil.verifyDenied(exportAction, getUserByLogin(USER_RO), getUserByLogin(USER_XO), getUserByLogin(USER_NONE)); - SecureTestUtil.verifyAllowed(exportAction, - getUserByLogin(USER_ADMIN), - getUserByLogin(USER_OWNER), - getUserByLogin(USER_RX)); + SecureTestUtil.verifyAllowed(exportAction, getUserByLogin(USER_ADMIN), + getUserByLogin(USER_OWNER), getUserByLogin(USER_RX)); AccessTestAction deleteAction = () -> { UTIL.deleteTable(TableName.valueOf(exportTable)); return null; @@ -339,12 +314,11 @@ public void testAccessCase() throws Throwable { public void testVisibilityLabels() throws IOException, Throwable { final String exportTable = name.getMethodName() + "_export"; final String importTable = name.getMethodName() + "_import"; - final TableDescriptor exportHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(exportTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .build(); + final TableDescriptor exportHtd = + TableDescriptorBuilder.newBuilder(TableName.valueOf(exportTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)).build(); User owner = User.createUserForTesting(UTIL.getConfiguration(), USER_OWNER, new String[0]); - SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][]{Bytes.toBytes("s")}); + SecureTestUtil.createTable(UTIL, owner, exportHtd, new byte[][] { Bytes.toBytes("s") }); AccessTestAction putAction = () -> { Put p1 = new Put(ROW1); p1.addColumn(FAMILYA, QUAL, NOW, QUAL); @@ -356,7 +330,7 @@ public void testVisibilityLabels() throws IOException, Throwable { p3.addColumn(FAMILYA, QUAL, NOW, QUAL); p3.setCellVisibility(new CellVisibility("!" + CONFIDENTIAL + " & " + TOPSECRET)); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table t = conn.getTable(TableName.valueOf(exportTable))) { + Table t = conn.getTable(TableName.valueOf(exportTable))) { t.put(p1); t.put(p2); t.put(p3); @@ -373,7 +347,7 @@ public void testVisibilityLabels() throws IOException, Throwable { for (final Pair, Integer> labelsAndRowCount : labelsAndRowCounts) { final List labels = labelsAndRowCount.getFirst(); final int rowCount = labelsAndRowCount.getSecond(); - //create a open permission directory. + // create a open permission directory. final Path openDir = new Path("testAccessCase"); final FileSystem fs = openDir.getFileSystem(UTIL.getConfiguration()); fs.mkdirs(openDir); @@ -384,10 +358,9 @@ public void testVisibilityLabels() throws IOException, Throwable { labels.forEach(v -> buf.append(v).append(",")); buf.deleteCharAt(buf.length() - 1); try { - String[] args = new String[]{ - "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + buf.toString(), - exportTable, - output.toString(),}; + String[] args = + new String[] { "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + buf.toString(), + exportTable, output.toString(), }; Export.run(new Configuration(UTIL.getConfiguration()), args); return null; } catch (ServiceException | IOException ex) { @@ -397,19 +370,16 @@ public void testVisibilityLabels() throws IOException, Throwable { } }; SecureTestUtil.verifyAllowed(exportAction, getUserByLogin(USER_OWNER)); - final TableDescriptor importHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(importTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)) - .build(); - SecureTestUtil.createTable(UTIL, owner, importHtd, new byte[][]{Bytes.toBytes("s")}); + final TableDescriptor importHtd = + TableDescriptorBuilder.newBuilder(TableName.valueOf(importTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)).build(); + SecureTestUtil.createTable(UTIL, owner, importHtd, new byte[][] { Bytes.toBytes("s") }); AccessTestAction importAction = () -> { - String[] args = new String[]{ - "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, - importTable, - output.toString() - }; - assertEquals(0, ToolRunner.run( - new Configuration(UTIL.getConfiguration()), new Import(), args)); + String[] args = new String[] { + "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, importTable, + output.toString() }; + assertEquals(0, + ToolRunner.run(new Configuration(UTIL.getConfiguration()), new Import(), args)); return null; }; SecureTestUtil.verifyAllowed(importAction, getUserByLogin(USER_OWNER)); @@ -417,8 +387,8 @@ public void testVisibilityLabels() throws IOException, Throwable { Scan scan = new Scan(); scan.setAuthorizations(new Authorizations(labels)); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table table = conn.getTable(importHtd.getTableName()); - ResultScanner scanner = table.getScanner(scan)) { + Table table = conn.getTable(importHtd.getTableName()); + ResultScanner scanner = table.getScanner(scan)) { int count = 0; for (Result r : scanner) { ++count; diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java index 63b2b1d68544..4f5f3d9ed9df 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,13 +40,11 @@ public class TestCoprocessorRpcUtils { @Test public void testServiceName() throws Exception { // verify that we de-namespace build in HBase rpc services - ServiceDescriptor authService = - AuthenticationProtos.AuthenticationService.getDescriptor(); + ServiceDescriptor authService = AuthenticationProtos.AuthenticationService.getDescriptor(); assertEquals(authService.getName(), CoprocessorRpcUtils.getServiceName(authService)); // non-hbase rpc services should remain fully qualified - ServiceDescriptor dummyService = - DummyRegionServerEndpointProtos.DummyService.getDescriptor(); + ServiceDescriptor dummyService = DummyRegionServerEndpointProtos.DummyService.getDescriptor(); assertEquals(dummyService.getFullName(), CoprocessorRpcUtils.getServiceName(dummyService)); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java index fca207570936..af70212d942c 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.PingProtos.PingResponse; import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.PingProtos.PingService; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestServerCustomProtocol { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -110,8 +110,8 @@ public void count(RpcController controller, CountRequest request, } @Override - public void increment(RpcController controller, - IncrementCountRequest request, RpcCallback done) { + public void increment(RpcController controller, IncrementCountRequest request, + RpcCallback done) { this.counter += request.getDiff(); done.run(IncrementCountResponse.newBuilder().setCount(this.counter).build()); } @@ -190,10 +190,10 @@ public static void tearDownAfterClass() throws Exception { @Test public void testSingleProxy() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = ping(table, null, null); + Map results = ping(table, null, null); // There are three regions so should get back three results. assertEquals(3, results.size()); - for (Map.Entry e: results.entrySet()) { + for (Map.Entry e : results.entrySet()) { assertEquals("Invalid custom protocol response", "pong", e.getValue()); } hello(table, "George", HELLO + "George"); @@ -202,125 +202,119 @@ public void testSingleProxy() throws Throwable { LOG.info("Who are you"); hello(table, NOBODY, null); LOG.info(NOBODY); - Map intResults = table.coprocessorService(PingService.class, - null, null, + Map intResults = table.coprocessorService(PingService.class, null, null, new Batch.Call() { @Override public Integer call(PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.count(null, CountRequest.newBuilder().build(), rpcCallback); return rpcCallback.get().getCount(); } }); int count = -1; - for (Map.Entry e: intResults.entrySet()) { + for (Map.Entry e : intResults.entrySet()) { assertTrue(e.getValue() > 0); count = e.getValue(); } final int diff = 5; - intResults = table.coprocessorService(PingService.class, - null, null, + intResults = table.coprocessorService(PingService.class, null, null, new Batch.Call() { @Override public Integer call(PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.increment(null, - IncrementCountRequest.newBuilder().setDiff(diff).build(), + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.increment(null, IncrementCountRequest.newBuilder().setDiff(diff).build(), rpcCallback); return rpcCallback.get().getCount(); } }); // There are three regions so should get back three results. assertEquals(3, results.size()); - for (Map.Entry e: intResults.entrySet()) { + for (Map.Entry e : intResults.entrySet()) { assertEquals(e.getValue().intValue(), count + diff); } table.close(); } - private Map hello(final Table table, final String send, final String response) - throws ServiceException, Throwable { - Map results = hello(table, send); - for (Map.Entry e: results.entrySet()) { + private Map hello(final Table table, final String send, final String response) + throws ServiceException, Throwable { + Map results = hello(table, send); + for (Map.Entry e : results.entrySet()) { assertEquals("Invalid custom protocol response", response, e.getValue()); } return results; } - private Map hello(final Table table, final String send) - throws ServiceException, Throwable { + private Map hello(final Table table, final String send) + throws ServiceException, Throwable { return hello(table, send, null, null); } - private Map hello(final Table table, final String send, final byte [] start, - final byte [] end) throws ServiceException, Throwable { - return table.coprocessorService(PingService.class, - start, end, - new Batch.Call() { - @Override - public String call(PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + private Map hello(final Table table, final String send, final byte[] start, + final byte[] end) throws ServiceException, Throwable { + return table.coprocessorService(PingService.class, start, end, + new Batch.Call() { + @Override + public String call(PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - HelloRequest.Builder builder = HelloRequest.newBuilder(); - if (send != null) { - builder.setName(send); - } - instance.hello(null, builder.build(), rpcCallback); - HelloResponse r = rpcCallback.get(); - return r != null && r.hasResponse()? r.getResponse(): null; + HelloRequest.Builder builder = HelloRequest.newBuilder(); + if (send != null) { + builder.setName(send); } - }); + instance.hello(null, builder.build(), rpcCallback); + HelloResponse r = rpcCallback.get(); + return r != null && r.hasResponse() ? r.getResponse() : null; + } + }); } - private Map compoundOfHelloAndPing(final Table table, final byte [] start, - final byte [] end) throws ServiceException, Throwable { - return table.coprocessorService(PingService.class, - start, end, - new Batch.Call() { - @Override - public String call(PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + private Map compoundOfHelloAndPing(final Table table, final byte[] start, + final byte[] end) throws ServiceException, Throwable { + return table.coprocessorService(PingService.class, start, end, + new Batch.Call() { + @Override + public String call(PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - HelloRequest.Builder builder = HelloRequest.newBuilder(); - // Call ping on same instance. Use result calling hello on same instance. - builder.setName(doPing(instance)); - instance.hello(null, builder.build(), rpcCallback); - HelloResponse r = rpcCallback.get(); - return r != null && r.hasResponse()? r.getResponse(): null; - } - }); + HelloRequest.Builder builder = HelloRequest.newBuilder(); + // Call ping on same instance. Use result calling hello on same instance. + builder.setName(doPing(instance)); + instance.hello(null, builder.build(), rpcCallback); + HelloResponse r = rpcCallback.get(); + return r != null && r.hasResponse() ? r.getResponse() : null; + } + }); } - private Map noop(final Table table, final byte [] start, final byte [] end) - throws ServiceException, Throwable { + private Map noop(final Table table, final byte[] start, final byte[] end) + throws ServiceException, Throwable { return table.coprocessorService(PingService.class, start, end, - new Batch.Call() { - @Override - public String call(PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new Batch.Call() { + @Override + public String call(PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - NoopRequest.Builder builder = NoopRequest.newBuilder(); - instance.noop(null, builder.build(), rpcCallback); - rpcCallback.get(); - // Looks like null is expected when void. That is what the test below is looking for - return null; - } - }); + NoopRequest.Builder builder = NoopRequest.newBuilder(); + instance.noop(null, builder.build(), rpcCallback); + rpcCallback.get(); + // Looks like null is expected when void. That is what the test below is looking for + return null; + } + }); } @Test public void testSingleMethod() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = table.coprocessorService(PingService.class, - null, ROW_A, + Map results = table.coprocessorService(PingService.class, null, ROW_A, new Batch.Call() { @Override public String call(PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.ping(null, PingRequest.newBuilder().build(), rpcCallback); return rpcCallback.get().getPong(); } @@ -343,9 +337,9 @@ public String call(PingService instance) throws IOException { public void testRowRange() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - for (HRegionLocation e: locator.getAllRegionLocations()) { - LOG.info("Region " + e.getRegion().getRegionNameAsString() - + ", servername=" + e.getServerName()); + for (HRegionLocation e : locator.getAllRegionLocations()) { + LOG.info( + "Region " + e.getRegion().getRegionNameAsString() + ", servername=" + e.getServerName()); } // Here are what regions looked like on a run: // @@ -353,7 +347,7 @@ public void testRowRange() throws Throwable { // test,bbb,1355943549661.110393b070dd1ed93441e0bc9b3ffb7e. // test,ccc,1355943549665.c3d6d125141359cbbd2a43eaff3cdf74. - Map results = ping(table, null, ROW_A); + Map results = ping(table, null, ROW_A); // Should contain first region only. assertEquals(1, results.size()); verifyRegionResults(locator, results, ROW_A); @@ -376,7 +370,7 @@ public void testRowRange() throws Throwable { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegion().getRegionName())); + results.get(loc.getRegion().getRegionName())); // test explicit start + end results = ping(table, ROW_AB, ROW_BC); @@ -386,7 +380,7 @@ public void testRowRange() throws Throwable { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegion().getRegionName())); + results.get(loc.getRegion().getRegionName())); // test single region results = ping(table, ROW_B, ROW_BC); @@ -395,15 +389,15 @@ public void testRowRange() throws Throwable { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_A, true); assertNull("Should be missing region for row aaa (prior to start)", - results.get(loc.getRegion().getRegionName())); + results.get(loc.getRegion().getRegionName())); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegion().getRegionName())); + results.get(loc.getRegion().getRegionName())); } } - private Map ping(final Table table, final byte [] start, final byte [] end) - throws ServiceException, Throwable { + private Map ping(final Table table, final byte[] start, final byte[] end) + throws ServiceException, Throwable { return table.coprocessorService(PingService.class, start, end, new Batch.Call() { @Override @@ -424,7 +418,7 @@ private static String doPing(PingService instance) throws IOException { public void testCompoundCall() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = compoundOfHelloAndPing(table, ROW_A, ROW_C); + Map results = compoundOfHelloAndPing(table, ROW_A, ROW_C); verifyRegionResults(locator, results, "Hello, pong", ROW_A); verifyRegionResults(locator, results, "Hello, pong", ROW_B); verifyRegionResults(locator, results, "Hello, pong", ROW_C); @@ -435,7 +429,7 @@ public void testCompoundCall() throws Throwable { public void testNullCall() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = hello(table, null, ROW_A, ROW_C); + Map results = hello(table, null, ROW_A, ROW_C); verifyRegionResults(locator, results, "Who are you?", ROW_A); verifyRegionResults(locator, results, "Who are you?", ROW_B); verifyRegionResults(locator, results, "Who are you?", ROW_C); @@ -446,7 +440,7 @@ public void testNullCall() throws Throwable { public void testNullReturn() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = hello(table, "nobody", ROW_A, ROW_C); + Map results = hello(table, "nobody", ROW_A, ROW_C); verifyRegionResults(locator, results, null, ROW_A); verifyRegionResults(locator, results, null, ROW_B); verifyRegionResults(locator, results, null, ROW_C); @@ -456,7 +450,7 @@ public void testNullReturn() throws Throwable { @Test public void testEmptyReturnType() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE)) { - Map results = noop(table, ROW_A, ROW_C); + Map results = noop(table, ROW_A, ROW_C); assertEquals("Should have results from three regions", 3, results.size()); // all results should be null for (Object v : results.values()) { @@ -465,24 +459,23 @@ public void testEmptyReturnType() throws Throwable { } } - private void verifyRegionResults(RegionLocator table, Map results, byte[] row) - throws Exception { + private void verifyRegionResults(RegionLocator table, Map results, byte[] row) + throws Exception { verifyRegionResults(table, results, "pong", row); } private void verifyRegionResults(RegionLocator regionLocator, Map results, - String expected, byte[] row) throws Exception { - for (Map.Entry e: results.entrySet()) { - LOG.info("row=" + Bytes.toString(row) + ", expected=" + expected + - ", result key=" + Bytes.toString(e.getKey()) + - ", value=" + e.getValue()); + String expected, byte[] row) throws Exception { + for (Map.Entry e : results.entrySet()) { + LOG.info("row=" + Bytes.toString(row) + ", expected=" + expected + ", result key=" + + Bytes.toString(e.getKey()) + ", value=" + e.getValue()); } HRegionLocation loc = regionLocator.getRegionLocation(row, true); byte[] region = loc.getRegion().getRegionName(); - assertTrue("Results should contain region " + - Bytes.toStringBinary(region) + " for row '" + Bytes.toStringBinary(row)+ "'", + assertTrue("Results should contain region " + Bytes.toStringBinary(region) + " for row '" + + Bytes.toStringBinary(row) + "'", results.containsKey(region)); - assertEquals("Invalid result for row '"+Bytes.toStringBinary(row)+"'", - expected, results.get(region)); + assertEquals("Invalid result for row '" + Bytes.toStringBinary(row) + "'", expected, + results.get(region)); } } diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index a3cebc055a7a..396c2949dee2 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -35,95 +35,6 @@ --> 3.17.3 - - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - ${surefire.firstPartGroups} - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - com.google.protobuf:protoc:${internal.protobuf.version}:exe:${os.detected.classifier} - true - - - - - - com.google.code.maven-replacer-plugin - replacer - 1.5.3 - - - process-sources - - replace - - - - - ${basedir}/target/generated-sources/ - - **/*.java - - - true - - - ([^\.])com.google.protobuf - $1org.apache.hbase.thirdparty.com.google.protobuf - - - (public)(\W+static)?(\W+final)?(\W+class) - @javax.annotation.Generated("proto") $1$2$3$4 - - - - (@javax.annotation.Generated\("proto"\) ){2} - $1 - - - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - org.apache.hbase.thirdparty @@ -258,8 +169,8 @@ org.apache.hbase hbase-http - test test-jar + test org.slf4j @@ -292,6 +203,95 @@ test + + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + ${surefire.firstPartGroups} + + + + + org.apache.maven.plugins + maven-source-plugin + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + compile-protoc + + compile + + generate-sources + + com.google.protobuf:protoc:${internal.protobuf.version}:exe:${os.detected.classifier} + true + + + + + + com.google.code.maven-replacer-plugin + replacer + 1.5.3 + + ${basedir}/target/generated-sources/ + + **/*.java + + + true + + + ([^\.])com.google.protobuf + $1org.apache.hbase.thirdparty.com.google.protobuf + + + (public)(\W+static)?(\W+final)?(\W+class) + @javax.annotation.Generated("proto") $1$2$3$4 + + + + (@javax.annotation.Generated\("proto"\) ){2} + $1 + + + + + + + replace + + process-sources + + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + @@ -313,7 +313,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -329,8 +331,8 @@ jaxb-api - javax.ws.rs - jsr311-api + javax.ws.rs + jsr311-api @@ -342,10 +344,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-external-blockcache Apache HBase - External Block Cache - - HBase module that provides out of process block cache. + HBase module that provides out of process block cache. Currently Memcached is the reference implementation for external block cache. External block caches allow HBase to take advantage of other more complex caches that can live longer than the HBase regionserver process and are not necessarily tied to a single computer - life time. However external block caches add in extra operational overhead. - - + life time. However external block caches add in extra operational overhead. + + + + org.apache.hbase + hbase-common + + + org.apache.hbase + hbase-server + + + net.spy + spymemcached + true + + + org.slf4j + slf4j-api + + + junit + junit + test + + + @@ -60,10 +81,10 @@ versionInfo-source - generate-sources add-source + generate-sources ${project.build.directory}/generated-sources/java @@ -91,31 +112,6 @@ - - - org.apache.hbase - hbase-common - - - org.apache.hbase - hbase-server - - - net.spy - spymemcached - true - - - org.slf4j - slf4j-api - - - junit - junit - test - - - @@ -128,10 +124,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -167,7 +163,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -182,10 +180,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-hadoop-compat Apache HBase - Hadoop Compatibility - - Interfaces to be implemented in order to smooth - over hadoop version differences - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - + Interfaces to be implemented in order to smooth + over hadoop version differences @@ -166,8 +134,36 @@ + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + - + skipHadoopCompatTests @@ -190,15 +186,14 @@ - org.eclipse.m2e lifecycle-mapping - - + diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java index d29e7bc1d3b3..220faa5b9753 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Iterator; import java.util.ServiceLoader; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +36,8 @@ public class CompatibilityFactory { /** * This is a static only class don't let any instance be created. */ - protected CompatibilityFactory() {} + protected CompatibilityFactory() { + } public static synchronized T getInstance(Class klass) { T instance = null; @@ -48,10 +47,9 @@ public static synchronized T getInstance(Class klass) { instance = it.next(); if (it.hasNext()) { StringBuilder msg = new StringBuilder(); - msg.append("ServiceLoader provided more than one implementation for class: ") - .append(klass) - .append(", using implementation: ").append(instance.getClass()) - .append(", other implementations: {"); + msg.append("ServiceLoader provided more than one implementation for class: ").append(klass) + .append(", using implementation: ").append(instance.getClass()) + .append(", other implementations: {"); while (it.hasNext()) { msg.append(it.next()).append(" "); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java index 0e633b8b15f4..bacdc11c300e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,39 +15,39 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.ServiceLoader; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Factory for classes supplied by hadoop compatibility modules. Only one of each class will be - * created. + * Factory for classes supplied by hadoop compatibility modules. Only one of each class will be + * created. */ @InterfaceAudience.Private public class CompatibilitySingletonFactory extends CompatibilityFactory { public static enum SingletonStorage { INSTANCE; + private final Object lock = new Object(); private final Map instances = new HashMap<>(); } + private static final Logger LOG = LoggerFactory.getLogger(CompatibilitySingletonFactory.class); /** * This is a static only class don't let anyone create an instance. */ - protected CompatibilitySingletonFactory() { } + protected CompatibilitySingletonFactory() { + } /** * Get the singleton instance of Any classes defined by compatibiliy jar's - * * @return the singleton */ @SuppressWarnings("unchecked") @@ -62,8 +62,7 @@ public static T getInstance(Class klass) { if (it.hasNext()) { StringBuilder msg = new StringBuilder(); msg.append("ServiceLoader provided more than one implementation for class: ") - .append(klass) - .append(", using implementation: ").append(instance.getClass()) + .append(klass).append(", using implementation: ").append(instance.getClass()) .append(", other implementations: {"); while (it.hasNext()) { msg.append(it.next()).append(" "); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java index c0a8519c10cd..b9258965eb42 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -44,24 +43,22 @@ public interface MetricsIOSource extends BaseSource { */ String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String FS_READ_TIME_HISTO_KEY = "fsReadTime"; String FS_PREAD_TIME_HISTO_KEY = "fsPReadTime"; String FS_WRITE_HISTO_KEY = "fsWriteTime"; String CHECKSUM_FAILURES_KEY = "fsChecksumFailureCount"; - String FS_READ_TIME_HISTO_DESC - = "Latency of HFile's sequential reads on this region server in milliseconds"; - String FS_PREAD_TIME_HISTO_DESC - = "Latency of HFile's positional reads on this region server in milliseconds"; - String FS_WRITE_TIME_HISTO_DESC - = "Latency of HFile's writes on this region server in milliseconds"; + String FS_READ_TIME_HISTO_DESC = + "Latency of HFile's sequential reads on this region server in milliseconds"; + String FS_PREAD_TIME_HISTO_DESC = + "Latency of HFile's positional reads on this region server in milliseconds"; + String FS_WRITE_TIME_HISTO_DESC = + "Latency of HFile's writes on this region server in milliseconds"; String CHECKSUM_FAILURES_DESC = "Number of checksum failures for the HBase HFile checksums at the" + " HBase level (separate from HDFS checksums)"; - /** * Update the fs sequential read time histogram * @param t time it took, in milliseconds diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java index fdb318adaac3..5b9a8c5efb0a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -38,21 +37,18 @@ public MetricsIOSourceImpl(MetricsIOWrapper wrapper) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, wrapper); } - public MetricsIOSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsIOWrapper wrapper) { + public MetricsIOSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext, MetricsIOWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; - fsReadTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_READ_TIME_HISTO_KEY, FS_READ_TIME_HISTO_DESC); - fsPReadTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_PREAD_TIME_HISTO_KEY, FS_PREAD_TIME_HISTO_DESC); - fsWriteTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_WRITE_HISTO_KEY, FS_WRITE_TIME_HISTO_DESC); + fsReadTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_READ_TIME_HISTO_KEY, FS_READ_TIME_HISTO_DESC); + fsPReadTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_PREAD_TIME_HISTO_KEY, FS_PREAD_TIME_HISTO_DESC); + fsWriteTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_WRITE_HISTO_KEY, FS_WRITE_TIME_HISTO_DESC); } @Override diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java index 3ba8cd5d0ae8..e3dc724d8b7d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java index 69bd040e7f95..2db18d56e0c2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSource; @@ -25,20 +23,15 @@ @InterfaceAudience.Private public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String AUTHORIZATION_SUCCESSES_NAME = "authorizationSuccesses"; - String AUTHORIZATION_SUCCESSES_DESC = - "Number of authorization successes."; + String AUTHORIZATION_SUCCESSES_DESC = "Number of authorization successes."; String AUTHORIZATION_FAILURES_NAME = "authorizationFailures"; - String AUTHORIZATION_FAILURES_DESC = - "Number of authorization failures."; + String AUTHORIZATION_FAILURES_DESC = "Number of authorization failures."; String AUTHENTICATION_SUCCESSES_NAME = "authenticationSuccesses"; - String AUTHENTICATION_SUCCESSES_DESC = - "Number of authentication successes."; + String AUTHENTICATION_SUCCESSES_DESC = "Number of authentication successes."; String AUTHENTICATION_FAILURES_NAME = "authenticationFailures"; - String AUTHENTICATION_FAILURES_DESC = - "Number of authentication failures."; + String AUTHENTICATION_FAILURES_DESC = "Number of authentication failures."; String AUTHENTICATION_FALLBACKS_NAME = "authenticationFallbacks"; - String AUTHENTICATION_FALLBACKS_DESC = - "Number of fallbacks to insecure authentication."; + String AUTHENTICATION_FALLBACKS_DESC = "Number of fallbacks to insecure authentication."; String SENT_BYTES_NAME = "sentBytes"; String SENT_BYTES_DESC = "Number of bytes sent."; String RECEIVED_BYTES_NAME = "receivedBytes"; @@ -54,27 +47,26 @@ public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String TOTAL_CALL_TIME_NAME = "totalCallTime"; String TOTAL_CALL_TIME_DESC = "Total call time, including both queued and processing time."; String QUEUE_SIZE_NAME = "queueSize"; - String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " + - "parsed and is waiting to run or is currently being executed."; + String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " + + "parsed and is waiting to run or is currently being executed."; String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue"; - String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " + - "parsed requests waiting in scheduler to be executed"; + String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " + + "parsed requests waiting in scheduler to be executed"; String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue"; String METAPRIORITY_QUEUE_NAME = "numCallsInMetaPriorityQueue"; String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue"; - String REPLICATION_QUEUE_DESC = - "Number of calls in the replication call queue waiting to be run"; + String REPLICATION_QUEUE_DESC = "Number of calls in the replication call queue waiting to be run"; String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run"; String METAPRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run"; String WRITE_QUEUE_NAME = "numCallsInWriteQueue"; - String WRITE_QUEUE_DESC = "Number of calls in the write call queue; " + - "parsed requests waiting in scheduler to be executed"; + String WRITE_QUEUE_DESC = "Number of calls in the write call queue; " + + "parsed requests waiting in scheduler to be executed"; String READ_QUEUE_NAME = "numCallsInReadQueue"; - String READ_QUEUE_DESC = "Number of calls in the read call queue; " + - "parsed requests waiting in scheduler to be executed"; + String READ_QUEUE_DESC = "Number of calls in the read call queue; " + + "parsed requests waiting in scheduler to be executed"; String SCAN_QUEUE_NAME = "numCallsInScanQueue"; - String SCAN_QUEUE_DESC = "Number of calls in the scan call queue; " + - "parsed requests waiting in scheduler to be executed"; + String SCAN_QUEUE_DESC = "Number of calls in the scan call queue; " + + "parsed requests waiting in scheduler to be executed"; String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections"; String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections."; String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler"; @@ -92,17 +84,16 @@ public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String NUM_ACTIVE_SCAN_HANDLER_NAME = "numActiveScanHandler"; String NUM_ACTIVE_SCAN_HANDLER_DESC = "Number of active scan rpc handlers."; String NUM_GENERAL_CALLS_DROPPED_NAME = "numGeneralCallsDropped"; - String NUM_GENERAL_CALLS_DROPPED_DESC = "Total number of calls in general queue which " + - "were dropped by CoDel RPC executor"; + String NUM_GENERAL_CALLS_DROPPED_DESC = + "Total number of calls in general queue which " + "were dropped by CoDel RPC executor"; String NUM_LIFO_MODE_SWITCHES_NAME = "numLifoModeSwitches"; - String NUM_LIFO_MODE_SWITCHES_DESC = "Total number of calls in general queue which " + - "were served from the tail of the queue"; + String NUM_LIFO_MODE_SWITCHES_DESC = + "Total number of calls in general queue which " + "were served from the tail of the queue"; // Direct Memory Usage metrics String NETTY_DM_USAGE_NAME = "nettyDirectMemoryUsage"; String NETTY_DM_USAGE_DESC = "Current Netty direct memory usage."; - void authorizationSuccess(); void authorizationFailure(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java index 7f1415ae86f2..027c197333a3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; @@ -34,18 +32,16 @@ public abstract class MetricsHBaseServerSourceFactory { static final String METRICS_DESCRIPTION = "Metrics about HBase Server IPC"; /** - * The Suffix of the JMX Context that a MetricsHBaseServerSource will register under. - * - * JMX_CONTEXT will be created by createContextName(serverClassName) + METRICS_JMX_CONTEXT_SUFFIX + * The Suffix of the JMX Context that a MetricsHBaseServerSource will register under. JMX_CONTEXT + * will be created by createContextName(serverClassName) + METRICS_JMX_CONTEXT_SUFFIX */ static final String METRICS_JMX_CONTEXT_SUFFIX = ",sub=" + METRICS_NAME; abstract MetricsHBaseServerSource create(String serverName, MetricsHBaseServerWrapper wrapper); /** - * From the name of the class that's starting up create the - * context that an IPC source should register itself. - * + * From the name of the class that's starting up create the context that an IPC source should + * register itself. * @param serverName The name of the class that's starting up. * @return The Camel Cased context name. */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java index 67325c0728e5..118ed939b265 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,18 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.HashMap; import java.util.Locale; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsHBaseServerSourceFactoryImpl extends MetricsHBaseServerSourceFactory { private enum SourceStorage { INSTANCE; + HashMap sources = new HashMap<>(); } @@ -37,19 +35,16 @@ public MetricsHBaseServerSource create(String serverName, MetricsHBaseServerWrap } private static synchronized MetricsHBaseServerSource getSource(String serverName, - MetricsHBaseServerWrapper wrap) { + MetricsHBaseServerWrapper wrap) { String context = createContextName(serverName); MetricsHBaseServerSource source = SourceStorage.INSTANCE.sources.get(context); if (source == null) { - //Create the source. - source = new MetricsHBaseServerSourceImpl( - context, - METRICS_DESCRIPTION, - context.toLowerCase(Locale.ROOT), - context + METRICS_JMX_CONTEXT_SUFFIX, wrap); - - //Store back in storage + // Create the source. + source = new MetricsHBaseServerSourceImpl(context, METRICS_DESCRIPTION, + context.toLowerCase(Locale.ROOT), context + METRICS_JMX_CONTEXT_SUFFIX, wrap); + + // Store back in storage SourceStorage.INSTANCE.sources.put(context, source); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java index e4fee95e2c4d..ede600928ea0 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSourceImpl; @@ -39,45 +37,40 @@ public class MetricsHBaseServerSourceImpl extends ExceptionTrackingSourceImpl private final MutableFastCounter sentBytes; private final MutableFastCounter receivedBytes; - private MetricHistogram queueCallTime; private MetricHistogram processCallTime; private MetricHistogram totalCallTime; private MetricHistogram requestSize; private MetricHistogram responseSize; - public MetricsHBaseServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsHBaseServerWrapper wrapper) { + public MetricsHBaseServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsHBaseServerWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; this.authorizationSuccesses = this.getMetricsRegistry().newCounter(AUTHORIZATION_SUCCESSES_NAME, - AUTHORIZATION_SUCCESSES_DESC, 0L); + AUTHORIZATION_SUCCESSES_DESC, 0L); this.authorizationFailures = this.getMetricsRegistry().newCounter(AUTHORIZATION_FAILURES_NAME, - AUTHORIZATION_FAILURES_DESC, 0L); - this.authenticationSuccesses = this.getMetricsRegistry().newCounter( - AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L); + AUTHORIZATION_FAILURES_DESC, 0L); + this.authenticationSuccesses = this.getMetricsRegistry() + .newCounter(AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L); this.authenticationFailures = this.getMetricsRegistry().newCounter(AUTHENTICATION_FAILURES_NAME, - AUTHENTICATION_FAILURES_DESC, 0L); - this.authenticationFallbacks = this.getMetricsRegistry().newCounter( - AUTHENTICATION_FALLBACKS_NAME, AUTHENTICATION_FALLBACKS_DESC, 0L); - this.sentBytes = this.getMetricsRegistry().newCounter(SENT_BYTES_NAME, - SENT_BYTES_DESC, 0L); - this.receivedBytes = this.getMetricsRegistry().newCounter(RECEIVED_BYTES_NAME, - RECEIVED_BYTES_DESC, 0L); - this.queueCallTime = this.getMetricsRegistry().newTimeHistogram(QUEUE_CALL_TIME_NAME, - QUEUE_CALL_TIME_DESC); - this.processCallTime = this.getMetricsRegistry().newTimeHistogram(PROCESS_CALL_TIME_NAME, - PROCESS_CALL_TIME_DESC); - this.totalCallTime = this.getMetricsRegistry().newTimeHistogram(TOTAL_CALL_TIME_NAME, - TOTAL_CALL_TIME_DESC); - this.requestSize = this.getMetricsRegistry().newSizeHistogram(REQUEST_SIZE_NAME, - REQUEST_SIZE_DESC); - this.responseSize = this.getMetricsRegistry().newSizeHistogram(RESPONSE_SIZE_NAME, - RESPONSE_SIZE_DESC); + AUTHENTICATION_FAILURES_DESC, 0L); + this.authenticationFallbacks = this.getMetricsRegistry() + .newCounter(AUTHENTICATION_FALLBACKS_NAME, AUTHENTICATION_FALLBACKS_DESC, 0L); + this.sentBytes = this.getMetricsRegistry().newCounter(SENT_BYTES_NAME, SENT_BYTES_DESC, 0L); + this.receivedBytes = + this.getMetricsRegistry().newCounter(RECEIVED_BYTES_NAME, RECEIVED_BYTES_DESC, 0L); + this.queueCallTime = + this.getMetricsRegistry().newTimeHistogram(QUEUE_CALL_TIME_NAME, QUEUE_CALL_TIME_DESC); + this.processCallTime = + this.getMetricsRegistry().newTimeHistogram(PROCESS_CALL_TIME_NAME, PROCESS_CALL_TIME_DESC); + this.totalCallTime = + this.getMetricsRegistry().newTimeHistogram(TOTAL_CALL_TIME_NAME, TOTAL_CALL_TIME_DESC); + this.requestSize = + this.getMetricsRegistry().newSizeHistogram(REQUEST_SIZE_NAME, REQUEST_SIZE_DESC); + this.responseSize = + this.getMetricsRegistry().newSizeHistogram(RESPONSE_SIZE_NAME, RESPONSE_SIZE_DESC); } @Override @@ -147,17 +140,17 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { if (wrapper != null) { mrb.addGauge(Interns.info(QUEUE_SIZE_NAME, QUEUE_SIZE_DESC), wrapper.getTotalQueueSize()) .addGauge(Interns.info(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC), - wrapper.getGeneralQueueLength()) - .addGauge(Interns.info(REPLICATION_QUEUE_NAME, - REPLICATION_QUEUE_DESC), wrapper.getReplicationQueueLength()) + wrapper.getGeneralQueueLength()) + .addGauge(Interns.info(REPLICATION_QUEUE_NAME, REPLICATION_QUEUE_DESC), + wrapper.getReplicationQueueLength()) .addGauge(Interns.info(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC), - wrapper.getPriorityQueueLength()) + wrapper.getPriorityQueueLength()) .addGauge(Interns.info(METAPRIORITY_QUEUE_NAME, METAPRIORITY_QUEUE_DESC), - wrapper.getMetaPriorityQueueLength()) - .addGauge(Interns.info(NUM_OPEN_CONNECTIONS_NAME, - NUM_OPEN_CONNECTIONS_DESC), wrapper.getNumOpenConnections()) - .addGauge(Interns.info(NUM_ACTIVE_HANDLER_NAME, - NUM_ACTIVE_HANDLER_DESC), wrapper.getActiveRpcHandlerCount()) + wrapper.getMetaPriorityQueueLength()) + .addGauge(Interns.info(NUM_OPEN_CONNECTIONS_NAME, NUM_OPEN_CONNECTIONS_DESC), + wrapper.getNumOpenConnections()) + .addGauge(Interns.info(NUM_ACTIVE_HANDLER_NAME, NUM_ACTIVE_HANDLER_DESC), + wrapper.getActiveRpcHandlerCount()) .addGauge(Interns.info(NUM_ACTIVE_GENERAL_HANDLER_NAME, NUM_ACTIVE_GENERAL_HANDLER_DESC), wrapper.getActiveGeneralRpcHandlerCount()) .addGauge( @@ -166,16 +159,13 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { .addGauge( Interns.info(NUM_ACTIVE_REPLICATION_HANDLER_NAME, NUM_ACTIVE_REPLICATION_HANDLER_DESC), wrapper.getActiveReplicationRpcHandlerCount()) - .addCounter(Interns.info(NUM_GENERAL_CALLS_DROPPED_NAME, - NUM_GENERAL_CALLS_DROPPED_DESC), wrapper.getNumGeneralCallsDropped()) - .addCounter(Interns.info(NUM_LIFO_MODE_SWITCHES_NAME, - NUM_LIFO_MODE_SWITCHES_DESC), wrapper.getNumLifoModeSwitches()) - .addGauge(Interns.info(WRITE_QUEUE_NAME, WRITE_QUEUE_DESC), - wrapper.getWriteQueueLength()) - .addGauge(Interns.info(READ_QUEUE_NAME, READ_QUEUE_DESC), - wrapper.getReadQueueLength()) - .addGauge(Interns.info(SCAN_QUEUE_NAME, SCAN_QUEUE_DESC), - wrapper.getScanQueueLength()) + .addCounter(Interns.info(NUM_GENERAL_CALLS_DROPPED_NAME, NUM_GENERAL_CALLS_DROPPED_DESC), + wrapper.getNumGeneralCallsDropped()) + .addCounter(Interns.info(NUM_LIFO_MODE_SWITCHES_NAME, NUM_LIFO_MODE_SWITCHES_DESC), + wrapper.getNumLifoModeSwitches()) + .addGauge(Interns.info(WRITE_QUEUE_NAME, WRITE_QUEUE_DESC), wrapper.getWriteQueueLength()) + .addGauge(Interns.info(READ_QUEUE_NAME, READ_QUEUE_DESC), wrapper.getReadQueueLength()) + .addGauge(Interns.info(SCAN_QUEUE_NAME, SCAN_QUEUE_DESC), wrapper.getScanQueueLength()) .addGauge(Interns.info(NUM_ACTIVE_WRITE_HANDLER_NAME, NUM_ACTIVE_WRITE_HANDLER_DESC), wrapper.getActiveWriteRpcHandlerCount()) .addGauge(Interns.info(NUM_ACTIVE_READ_HANDLER_NAME, NUM_ACTIVE_READ_HANDLER_DESC), diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java index db30c0348c35..136294883b69 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java index b4f62b3970b7..413ea5399214 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.Cluster; @@ -43,27 +41,24 @@ protected JobUtil() { /** * Initializes the staging directory and returns the path. - * * @param conf system configuration * @return staging directory path * @throws IOException if the ownership on the staging directory is not as expected * @throws InterruptedException if the thread getting the staging directory is interrupted */ - public static Path getStagingDir(Configuration conf) - throws IOException, InterruptedException { + public static Path getStagingDir(Configuration conf) throws IOException, InterruptedException { return JobSubmissionFiles.getStagingDir(new Cluster(conf), conf); } /** * Initializes the staging directory and returns the qualified path. - * * @param conf conf system configuration * @return qualified staging directory path * @throws IOException if the ownership on the staging directory is not as expected * @throws InterruptedException if the thread getting the staging directory is interrupted */ public static Path getQualifiedStagingDir(Configuration conf) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Cluster cluster = new Cluster(conf); Path stagingDir = JobSubmissionFiles.getStagingDir(cluster, conf); return cluster.getFileSystem().makeQualified(stagingDir); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java index 4487021fac16..f1692edffaf2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -95,21 +94,18 @@ public interface MetricsAssignmentManagerSource extends BaseSource { /** * Set the number of regions in transition. - * * @param ritCount count of the regions in transition. */ void setRIT(int ritCount); /** * Set the count of the number of regions that have been in transition over the threshold time. - * * @param ritCountOverThreshold number of regions in transition for longer than threshold. */ void setRITCountOverThreshold(int ritCountOverThreshold); /** * Set the oldest region in transition. - * * @param age age of the oldest RIT. */ void setRITOldestAge(long age); @@ -122,35 +118,30 @@ public interface MetricsAssignmentManagerSource extends BaseSource { /** * Set the number of orphan regions on RS. - * * @param orphanRegionsOnRs count of the orphan regions on RS in HBCK chore report. */ void setOrphanRegionsOnRs(int orphanRegionsOnRs); /** * Set the number of orphan regions on FS. - * * @param orphanRegionsOnFs count of the orphan regions on FS in HBCK chore report. */ void setOrphanRegionsOnFs(int orphanRegionsOnFs); /** * Set the number of inconsistent regions. - * * @param inconsistentRegions count of the inconsistent regions in HBCK chore report. */ void setInconsistentRegions(int inconsistentRegions); /** * Set the number of holes. - * * @param holes count of the holes in CatalogJanitor Consistency report. */ void setHoles(int holes); /** * Set the number of overlaps. - * * @param overlaps count of the overlaps in CatalogJanitor Consistency report. */ void setOverlaps(int overlaps); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java index a2b2897b94b5..fba6e9ad8d39 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -28,8 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsAssignmentManagerSourceImpl - extends BaseSourceImpl +public class MetricsAssignmentManagerSourceImpl extends BaseSourceImpl implements MetricsAssignmentManagerSource { private MutableGaugeLong ritGauge; @@ -63,16 +61,15 @@ public MetricsAssignmentManagerSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsAssignmentManagerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsAssignmentManagerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } public void init() { ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, RIT_COUNT_DESC, 0L); - ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, - RIT_COUNT_OVER_THRESHOLD_DESC,0L); + ritCountOverThresholdGauge = + metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, RIT_COUNT_OVER_THRESHOLD_DESC, 0L); ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, RIT_OLDEST_AGE_DESC, 0L); ritDurationHisto = metricsRegistry.newTimeHistogram(RIT_DURATION_NAME, RIT_DURATION_DESC); operationCounter = metricsRegistry.getCounter(OPERATION_COUNT_NAME, 0L); @@ -94,8 +91,8 @@ public void init() { metricsRegistry.newGauge(EMPTY_REGION_INFO_REGIONS, EMPTY_REGION_INFO_REGIONS_DESC, 0L); /** - * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is - * moving away from using Hadoop's metric2 to having independent HBase specific Metrics. Use + * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is moving + * away from using Hadoop's metric2 to having independent HBase specific Metrics. Use * {@link BaseSourceImpl#registry} to register the new metrics. */ assignMetrics = new OperationMetrics(registry, ASSIGN_METRIC_PREFIX); @@ -222,7 +219,7 @@ public OperationMetrics getCloseMetrics() { public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName); metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java index 91dc71a034cc..53ed8a25ed0e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -54,7 +53,6 @@ public interface MetricsMasterFileSystemSource extends BaseSource { String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()"; String SPLIT_SIZE_DESC = "Size of WAL files being split"; - void updateMetaWALSplitTime(long time); void updateMetaWALSplitSize(long size); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java index d78efce2add9..b9a093c759e2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -23,8 +22,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsMasterFilesystemSourceImpl - extends BaseSourceImpl +public class MetricsMasterFilesystemSourceImpl extends BaseSourceImpl implements MetricsMasterFileSystemSource { private MetricHistogram splitSizeHisto; @@ -36,9 +34,8 @@ public MetricsMasterFilesystemSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsMasterFilesystemSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsMasterFilesystemSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -62,7 +59,6 @@ public void updateSplitSize(long size) { splitSizeHisto.add(size); } - @Override public void updateMetaWALSplitTime(long time) { metaSplitTimeHisto.add(time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java index db4f25ec03e3..07ceaaf2e241 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java index 197f9f9fe754..a399e53b4fb3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java index 6fd254e9a690..dc5773cb9046 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java index 69e7d7958fab..c1195c8c61b1 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,29 +24,20 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsMasterSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsMasterSource. Implements BaseSource through BaseSourceImpl, + * following the pattern */ @InterfaceAudience.Private -public class MetricsMasterProcSourceImpl - extends BaseSourceImpl implements MetricsMasterProcSource { +public class MetricsMasterProcSourceImpl extends BaseSourceImpl implements MetricsMasterProcSource { private final MetricsMasterWrapper masterWrapper; public MetricsMasterProcSourceImpl(MetricsMasterWrapper masterWrapper) { - this(METRICS_NAME, - METRICS_DESCRIPTION, - METRICS_CONTEXT, - METRICS_JMX_CONTEXT, - masterWrapper); + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper); } - public MetricsMasterProcSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsMasterWrapper masterWrapper) { + public MetricsMasterProcSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper masterWrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.masterWrapper = masterWrapper; @@ -64,13 +54,12 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { // masterWrapper can be null because this function is called inside of init. if (masterWrapper != null) { - metricsRecordBuilder - .addGauge(Interns.info(NUM_MASTER_WALS_NAME, NUM_MASTER_WALS_DESC), - masterWrapper.getNumWALFiles()); + metricsRecordBuilder.addGauge(Interns.info(NUM_MASTER_WALS_NAME, NUM_MASTER_WALS_DESC), + masterWrapper.getNumWALFiles()); } metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java index 8450432ade67..270e4e49f3f2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -57,40 +58,35 @@ public interface MetricsMasterQuotaSource extends BaseSource { /** * Updates the metric tracking the number of space quotas defined in the system. - * * @param numSpaceQuotas The number of space quotas defined */ void updateNumSpaceQuotas(long numSpaceQuotas); /** - * Updates the metric tracking the number of tables the master has computed to be in - * violation of their space quota. - * + * Updates the metric tracking the number of tables the master has computed to be in violation of + * their space quota. * @param numTablesInViolation The number of tables violating a space quota */ void updateNumTablesInSpaceQuotaViolation(long numTablesInViolation); /** - * Updates the metric tracking the number of namespaces the master has computed to be in - * violation of their space quota. - * + * Updates the metric tracking the number of namespaces the master has computed to be in violation + * of their space quota. * @param numNamespacesInViolation The number of namespaces violating a space quota */ void updateNumNamespacesInSpaceQuotaViolation(long numNamespacesInViolation); /** - * Updates the metric tracking the number of region size reports the master is currently - * retaining in memory. - * + * Updates the metric tracking the number of region size reports the master is currently retaining + * in memory. * @param numCurrentRegionSizeReports The number of region size reports the master is holding in - * memory + * memory */ void updateNumCurrentSpaceQuotaRegionSizeReports(long numCurrentRegionSizeReports); /** - * Updates the metric tracking the amount of time taken by the {@code QuotaObserverChore} - * which runs periodically. - * + * Updates the metric tracking the amount of time taken by the {@code QuotaObserverChore} which + * runs periodically. * @param time The execution time of the chore in milliseconds */ void incrementSpaceQuotaObserverChoreTime(long time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java index 2dcd945ea811..a53652b0f3dc 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java index 0fae0e744059..6a489eb70019 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java index 750c1c959fcb..7c28e22035be 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +19,6 @@ import java.util.Map; import java.util.Map.Entry; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsCollector; @@ -33,7 +33,7 @@ */ @InterfaceAudience.Private public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl - implements MetricsMasterQuotaSource { + implements MetricsMasterQuotaSource { private final MetricsMasterWrapper wrapper; private final MutableGaugeLong spaceQuotasGauge; private final MutableGaugeLong tablesViolatingQuotasGauge; @@ -48,30 +48,29 @@ public MetricsMasterQuotaSourceImpl(MetricsMasterWrapper wrapper) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, wrapper); } - public MetricsMasterQuotaSourceImpl( - String metricsName, String metricsDescription, String metricsContext, - String metricsJmxContext, MetricsMasterWrapper wrapper) { + public MetricsMasterQuotaSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; - spaceQuotasGauge = getMetricsRegistry().newGauge( - NUM_SPACE_QUOTAS_NAME, NUM_SPACE_QUOTAS_DESC, 0L); - tablesViolatingQuotasGauge = getMetricsRegistry().newGauge( - NUM_TABLES_QUOTA_VIOLATIONS_NAME, NUM_TABLES_QUOTA_VIOLATIONS_DESC, 0L); - namespacesViolatingQuotasGauge = getMetricsRegistry().newGauge( - NUM_NS_QUOTA_VIOLATIONS_NAME, NUM_NS_QUOTA_VIOLATIONS_DESC, 0L); - regionSpaceReportsGauge = getMetricsRegistry().newGauge( - NUM_REGION_SIZE_REPORTS_NAME, NUM_REGION_SIZE_REPORTS_DESC, 0L); + spaceQuotasGauge = + getMetricsRegistry().newGauge(NUM_SPACE_QUOTAS_NAME, NUM_SPACE_QUOTAS_DESC, 0L); + tablesViolatingQuotasGauge = getMetricsRegistry().newGauge(NUM_TABLES_QUOTA_VIOLATIONS_NAME, + NUM_TABLES_QUOTA_VIOLATIONS_DESC, 0L); + namespacesViolatingQuotasGauge = getMetricsRegistry().newGauge(NUM_NS_QUOTA_VIOLATIONS_NAME, + NUM_NS_QUOTA_VIOLATIONS_DESC, 0L); + regionSpaceReportsGauge = getMetricsRegistry().newGauge(NUM_REGION_SIZE_REPORTS_NAME, + NUM_REGION_SIZE_REPORTS_DESC, 0L); - quotaObserverTimeHisto = getMetricsRegistry().newTimeHistogram( - QUOTA_OBSERVER_CHORE_TIME_NAME, QUOTA_OBSERVER_CHORE_TIME_DESC); - snapshotObserverTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_CHORE_TIME_NAME, SNAPSHOT_OBSERVER_CHORE_TIME_DESC); + quotaObserverTimeHisto = getMetricsRegistry().newTimeHistogram(QUOTA_OBSERVER_CHORE_TIME_NAME, + QUOTA_OBSERVER_CHORE_TIME_DESC); + snapshotObserverTimeHisto = getMetricsRegistry() + .newTimeHistogram(SNAPSHOT_OBSERVER_CHORE_TIME_NAME, SNAPSHOT_OBSERVER_CHORE_TIME_DESC); snapshotObserverSizeComputationTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME, SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC); - snapshotObserverSnapshotFetchTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_FETCH_TIME_NAME, SNAPSHOT_OBSERVER_FETCH_TIME_DESC); + SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME, SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC); + snapshotObserverSnapshotFetchTimeHisto = getMetricsRegistry() + .newTimeHistogram(SNAPSHOT_OBSERVER_FETCH_TIME_NAME, SNAPSHOT_OBSERVER_FETCH_TIME_DESC); } @Override @@ -109,7 +108,7 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder record = metricsCollector.addRecord(metricsRegistry.info()); if (wrapper != null) { // Summarize the tables - Map> tableUsages = wrapper.getTableSpaceUtilization(); + Map> tableUsages = wrapper.getTableSpaceUtilization(); String tableSummary = "[]"; if (tableUsages != null && !tableUsages.isEmpty()) { tableSummary = generateJsonQuotaSummary(tableUsages.entrySet(), "table"); @@ -118,7 +117,7 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { // Summarize the namespaces String nsSummary = "[]"; - Map> namespaceUsages = wrapper.getNamespaceSpaceUtilization(); + Map> namespaceUsages = wrapper.getNamespaceSpaceUtilization(); if (namespaceUsages != null && !namespaceUsages.isEmpty()) { nsSummary = generateJsonQuotaSummary(namespaceUsages.entrySet(), "namespace"); } @@ -130,10 +129,10 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { /** * Summarizes the usage and limit for many targets (table or namespace) into JSON. */ - private String generateJsonQuotaSummary( - Iterable>> data, String target) { + private String generateJsonQuotaSummary(Iterable>> data, + String target) { StringBuilder sb = new StringBuilder(); - for (Entry> tableUsage : data) { + for (Entry> tableUsage : data) { String tableName = tableUsage.getKey(); long usage = tableUsage.getValue().getKey(); long limit = tableUsage.getValue().getValue(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java index 5f275e847dc6..8f512ddb6572 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -74,7 +73,7 @@ public interface MetricsMasterSource extends BaseSource { String MASTER_ACTIVE_TIME_DESC = "Master Active Time"; String MASTER_START_TIME_DESC = "Master Start Time"; String MASTER_FINISHED_INITIALIZATION_TIME_DESC = - "Timestamp when Master has finished initializing"; + "Timestamp when Master has finished initializing"; String AVERAGE_LOAD_DESC = "AverageLoad"; String LIVE_REGION_SERVERS_DESC = "Names of live RegionServers"; String NUMBER_OF_REGION_SERVERS_DESC = "Number of RegionServers"; @@ -95,27 +94,22 @@ public interface MetricsMasterSource extends BaseSource { /** * Increment the number of requests the cluster has seen. - * * @param inc Ammount to increment the total by. */ void incRequests(final long inc); /** * Increment the number of read requests the cluster has seen. - * * @param inc Ammount to increment the total by. */ void incReadRequests(final long inc); - /** * Increment the number of write requests the cluster has seen. - * * @param inc Ammount to increment the total by. */ void incWriteRequests(final long inc); - /** * @return {@link OperationMetrics} containing common metrics for server crash operation */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java index fce574a2cf07..bfdf348b34f7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java index a4b3fa194f9c..84c49062f03a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; /** - * Factory to create MetricsMasterSource when given a MetricsMasterWrapper + * Factory to create MetricsMasterSource when given a MetricsMasterWrapper */ @InterfaceAudience.Private public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory { private static enum FactoryStorage { INSTANCE; + MetricsMasterSource masterSource; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java index 4072d8d20835..e366f6ad5b40 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -28,13 +27,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsMasterSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsMasterSource. Implements BaseSource through BaseSourceImpl, + * following the pattern */ @InterfaceAudience.Private -public class MetricsMasterSourceImpl - extends BaseSourceImpl implements MetricsMasterSource { +public class MetricsMasterSourceImpl extends BaseSourceImpl implements MetricsMasterSource { private final MetricsMasterWrapper masterWrapper; private MutableFastCounter clusterRequestsCounter; @@ -44,18 +41,11 @@ public class MetricsMasterSourceImpl private OperationMetrics serverCrashMetrics; public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) { - this(METRICS_NAME, - METRICS_DESCRIPTION, - METRICS_CONTEXT, - METRICS_JMX_CONTEXT, - masterWrapper); + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper); } - public MetricsMasterSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsMasterWrapper masterWrapper) { + public MetricsMasterSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper masterWrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.masterWrapper = masterWrapper; @@ -69,9 +59,9 @@ public void init() { clusterWriteRequestsCounter = metricsRegistry.newCounter(CLUSTER_WRITE_REQUESTS_NAME, "", 0L); /* - * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is - * moving away from using Hadoop's metric2 to having independent HBase specific Metrics. Use - * {@link BaseSourceImpl#registry} to register the new metrics. + * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is moving + * away from using Hadoop's metric2 to having independent HBase specific Metrics. Use {@link + * BaseSourceImpl#registry} to register the new metrics. */ serverCrashMetrics = new OperationMetrics(registry, SERVER_CRASH_METRIC_PREFIX); } @@ -106,45 +96,45 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { metricsRecordBuilder .addGauge(Interns.info(MERGE_PLAN_COUNT_NAME, MERGE_PLAN_COUNT_DESC), - masterWrapper.getMergePlanCount()) + masterWrapper.getMergePlanCount()) .addGauge(Interns.info(SPLIT_PLAN_COUNT_NAME, SPLIT_PLAN_COUNT_DESC), - masterWrapper.getSplitPlanCount()) - .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME, - MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime()) - .addGauge(Interns.info(MASTER_START_TIME_NAME, - MASTER_START_TIME_DESC), masterWrapper.getStartTime()) - .addGauge(Interns.info(MASTER_FINISHED_INITIALIZATION_TIME_NAME, - MASTER_FINISHED_INITIALIZATION_TIME_DESC), - masterWrapper.getMasterInitializationTime()) + masterWrapper.getSplitPlanCount()) + .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME, MASTER_ACTIVE_TIME_DESC), + masterWrapper.getActiveTime()) + .addGauge(Interns.info(MASTER_START_TIME_NAME, MASTER_START_TIME_DESC), + masterWrapper.getStartTime()) + .addGauge( + Interns.info(MASTER_FINISHED_INITIALIZATION_TIME_NAME, + MASTER_FINISHED_INITIALIZATION_TIME_DESC), + masterWrapper.getMasterInitializationTime()) .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC), - masterWrapper.getAverageLoad()) + masterWrapper.getAverageLoad()) .addGauge(Interns.info(ONLINE_REGION_COUNT_NAME, ONLINE_REGION_COUNT_DESC), - regionNumberPair.getFirst()) + regionNumberPair.getFirst()) .addGauge(Interns.info(OFFLINE_REGION_COUNT_NAME, OFFLINE_REGION_COUNT_DESC), - regionNumberPair.getSecond()) + regionNumberPair.getSecond()) .tag(Interns.info(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC), - masterWrapper.getRegionServers()) - .addGauge(Interns.info(NUM_REGION_SERVERS_NAME, - NUMBER_OF_REGION_SERVERS_DESC), masterWrapper.getNumRegionServers()) + masterWrapper.getRegionServers()) + .addGauge(Interns.info(NUM_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC), + masterWrapper.getNumRegionServers()) .tag(Interns.info(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC), - masterWrapper.getDeadRegionServers()) - .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME, - NUMBER_OF_DEAD_REGION_SERVERS_DESC), - masterWrapper.getNumDeadRegionServers()) + masterWrapper.getDeadRegionServers()) + .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME, NUMBER_OF_DEAD_REGION_SERVERS_DESC), + masterWrapper.getNumDeadRegionServers()) .tag(Interns.info(DRAINING_REGION_SERVER_NAME, DRAINING_REGION_SERVER_DESC), - masterWrapper.getDrainingRegionServers()) + masterWrapper.getDrainingRegionServers()) .addGauge(Interns.info(NUM_DRAINING_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC), - masterWrapper.getNumDrainingRegionServers()) + masterWrapper.getNumDrainingRegionServers()) .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), - masterWrapper.getZookeeperQuorum()) + masterWrapper.getZookeeperQuorum()) .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName()) .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId()) .tag(Interns.info(IS_ACTIVE_MASTER_NAME, IS_ACTIVE_MASTER_DESC), - String.valueOf(masterWrapper.getIsActiveMaster())); + String.valueOf(masterWrapper.getIsActiveMaster())); } metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java index 1b3a75c3b84e..a900edf115e3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import java.util.Map; @@ -42,63 +41,54 @@ public interface MetricsMasterWrapper { /** * Get Average Load - * * @return Average Load */ double getAverageLoad(); /** * Get the Cluster ID - * * @return Cluster ID */ String getClusterId(); /** * Get the ZooKeeper Quorum Info - * * @return ZooKeeper Quorum Info */ String getZookeeperQuorum(); /** * Get the co-processors - * * @return Co-processors */ String[] getCoprocessors(); /** * Get hbase master start time - * * @return Start time of master in milliseconds */ long getStartTime(); /** * Get the hbase master active time - * * @return Time in milliseconds when master became active */ long getActiveTime(); /** * Whether this master is the active master - * * @return True if this is the active master */ boolean getIsActiveMaster(); /** * Get the live region servers - * * @return Live region servers */ String getRegionServers(); /** * Get the number of live region servers - * * @return number of Live region servers */ @@ -106,28 +96,24 @@ public interface MetricsMasterWrapper { /** * Get the dead region servers - * * @return Dead region Servers */ String getDeadRegionServers(); /** * Get the number of dead region servers - * * @return number of Dead region Servers */ int getNumDeadRegionServers(); /** * Get the draining region servers - * * @return Draining region server */ String getDrainingRegionServers(); /** * Get the number of draining region servers - * * @return number of draining region servers */ int getNumDrainingRegionServers(); @@ -150,12 +136,12 @@ public interface MetricsMasterWrapper { /** * Gets the space usage and limit for each table. */ - Map> getTableSpaceUtilization(); + Map> getTableSpaceUtilization(); /** * Gets the space usage and limit for each namespace. */ - Map> getNamespaceSpaceUtilization(); + Map> getNamespaceSpaceUtilization(); /** * Get the time in Millis when the master finished initializing/becoming the active master @@ -164,7 +150,6 @@ public interface MetricsMasterWrapper { /** * Get the online and offline region counts - * * @return pair of count for online regions and offline regions */ PairOfSameType getRegionCounts(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java index 15315b6c3ef8..88e21621f100 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java index 7077f73ea47b..f84911a199b0 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -33,20 +32,18 @@ public MetricsSnapshotSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsSnapshotSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsSnapshotSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @Override public void init() { - snapshotTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC); - snapshotCloneTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC); - snapshotRestoreTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC); + snapshotTimeHisto = metricsRegistry.newTimeHistogram(SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC); + snapshotCloneTimeHisto = + metricsRegistry.newTimeHistogram(SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC); + snapshotRestoreTimeHisto = + metricsRegistry.newTimeHistogram(SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC); } @Override diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java index 6b8c40ba5127..502de8859ae9 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public interface MetricsBalancerSource extends BaseSource { +public interface MetricsBalancerSource extends BaseSource { /** * The name of the metrics diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java index 7bccbb70d584..1e06514d502c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -32,9 +31,8 @@ public MetricsBalancerSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsBalancerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsBalancerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); updateBalancerStatus(true); } @@ -57,6 +55,6 @@ public void incrMiscInvocations() { @Override public void updateBalancerStatus(boolean status) { - metricsRegistry.tag(BALANCER_STATUS,"", String.valueOf(status), true); + metricsRegistry.tag(BALANCER_STATUS, "", String.valueOf(status), true); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java index 6eecc1233fd3..dac3d31781a7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface extends the basic metrics balancer source to add a function - * to report metrics that related to stochastic load balancer. The purpose is to - * offer an insight to the internal cost calculations that can be useful to tune - * the balancer. For details, refer to HBASE-13965 + * This interface extends the basic metrics balancer source to add a function to report metrics that + * related to stochastic load balancer. The purpose is to offer an insight to the internal cost + * calculations that can be useful to tune the balancer. For details, refer to HBASE-13965 */ @InterfaceAudience.Private public interface MetricsStochasticBalancerSource extends MetricsBalancerSource { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java index de1dd81b17fa..8546799eb26f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceImpl implements - MetricsStochasticBalancerSource { +public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceImpl + implements MetricsStochasticBalancerSource { private static final String TABLE_FUNCTION_SEP = "_"; // Most Recently Used(MRU) cache @@ -38,14 +36,14 @@ public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceIm private int mruCap = calcMruCap(metricsSize); private final Map> stochasticCosts = - new LinkedHashMap>(mruCap, MRU_LOAD_FACTOR, true) { - private static final long serialVersionUID = 8204713453436906599L; + new LinkedHashMap>(mruCap, MRU_LOAD_FACTOR, true) { + private static final long serialVersionUID = 8204713453436906599L; - @Override - protected boolean removeEldestEntry(Map.Entry> eldest) { - return size() > mruCap; - } - }; + @Override + protected boolean removeEldestEntry(Map.Entry> eldest) { + return size() > mruCap; + } + }; private Map costFunctionDescs = new ConcurrentHashMap<>(); /** diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java index 76391bb8d7b7..a2f2bb9d17f8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; /** - * BaseSource for dynamic metrics to announce to Metrics2. - * In hbase-hadoop{1|2}-compat there is an implementation of this interface. + * BaseSource for dynamic metrics to announce to Metrics2. In hbase-hadoop{1|2}-compat there is an + * implementation of this interface. */ @InterfaceAudience.Private public interface BaseSource { @@ -36,56 +35,48 @@ public interface BaseSource { /** * Set a gauge to a specific value. - * * @param gaugeName the name of the gauge - * @param value the value + * @param value the value */ void setGauge(String gaugeName, long value); /** * Add some amount to a gauge. - * * @param gaugeName the name of the gauge - * @param delta the amount to change the gauge by. + * @param delta the amount to change the gauge by. */ void incGauge(String gaugeName, long delta); /** * Subtract some amount from a gauge. - * * @param gaugeName the name of the gauge - * @param delta the amount to change the gauge by. + * @param delta the amount to change the gauge by. */ void decGauge(String gaugeName, long delta); /** * Remove a metric and no longer announce it. - * * @param key Name of the gauge to remove. */ void removeMetric(String key); /** * Add some amount to a counter. - * * @param counterName the name of the counter - * @param delta the amount to change the counter by. + * @param delta the amount to change the counter by. */ void incCounters(String counterName, long delta); /** * Add some value to a histogram. - * * @param name the name of the histogram * @param value the value to add to the histogram */ void updateHistogram(String name, long value); - /** - * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. + * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. * eg. regionserver, master, thriftserver - * * @return The string context used to register this source to hadoop's metrics2 system. */ String getMetricsContext(); @@ -96,20 +87,19 @@ public interface BaseSource { String getMetricsDescription(); /** - * Get the name of the context in JMX that this source will be exposed through. - * This is in ObjectName format. With the default context being Hadoop -> HBase + * Get the name of the context in JMX that this source will be exposed through. This is in + * ObjectName format. With the default context being Hadoop -> HBase */ String getMetricsJmxContext(); /** - * Get the name of the metrics that are being exported by this source. - * Eg. IPC, GC, WAL + * Get the name of the metrics that are being exported by this source. Eg. IPC, GC, WAL */ String getMetricsName(); default MetricRegistryInfo getMetricRegistryInfo() { - return new MetricRegistryInfo(getMetricsName(), getMetricsDescription(), - getMetricsContext(), getMetricsJmxContext(), true); + return new MetricRegistryInfo(getMetricsName(), getMetricsDescription(), getMetricsContext(), + getMetricsJmxContext(), true); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java index a90d810701c5..653982c8082e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.hbase.metrics.impl.GlobalMetricRegistriesAdapter; @@ -33,16 +32,16 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop 2 implementation of BaseSource (using metrics2 framework). It handles registration to - * DefaultMetricsSystem and creation of the metrics registry. - * - * All MetricsSource's in hbase-hadoop2-compat should derive from this class. + * Hadoop 2 implementation of BaseSource (using metrics2 framework). It handles registration to + * DefaultMetricsSystem and creation of the metrics registry. All MetricsSource's in + * hbase-hadoop2-compat should derive from this class. */ @InterfaceAudience.Private public class BaseSourceImpl implements BaseSource, MetricsSource { private static enum DefaultMetricsSystemInitializer { INSTANCE; + private boolean inited = false; synchronized void init(String name) { @@ -62,10 +61,10 @@ synchronized void init(String name) { } /** - * @deprecated Use hbase-metrics/hbase-metrics-api module interfaces for new metrics. - * Defining BaseSources for new metric groups (WAL, RPC, etc) is not needed anymore, - * however, for existing {@link BaseSource} implementations, please use the field - * named "registry" which is a {@link MetricRegistry} instance together with the + * @deprecated Use hbase-metrics/hbase-metrics-api module interfaces for new metrics. Defining + * BaseSources for new metric groups (WAL, RPC, etc) is not needed anymore, however, + * for existing {@link BaseSource} implementations, please use the field named + * "registry" which is a {@link MetricRegistry} instance together with the * {@link HBaseMetrics2HadoopMetricsAdapter}. */ @Deprecated @@ -77,17 +76,16 @@ synchronized void init(String name) { /** * Note that there are at least 4 MetricRegistry definitions in the source code. The first one is - * Hadoop Metrics2 MetricRegistry, second one is DynamicMetricsRegistry which is HBase's fork - * of the Hadoop metrics2 class. The third one is the dropwizard metrics implementation of + * Hadoop Metrics2 MetricRegistry, second one is DynamicMetricsRegistry which is HBase's fork of + * the Hadoop metrics2 class. The third one is the dropwizard metrics implementation of * MetricRegistry, and finally a new API abstraction in HBase that is the * o.a.h.h.metrics.MetricRegistry class. This last one is the new way to use metrics within the - * HBase code. However, the others are in play because of existing metrics2 based code still - * needs to coexists until we get rid of all of our BaseSource and convert them to the new - * framework. Until that happens, new metrics can use the new API, but will be collected - * through the HBaseMetrics2HadoopMetricsAdapter class. - * - * BaseSourceImpl has two MetricRegistries. metricRegistry is for hadoop Metrics2 based - * metrics, while the registry is for hbase-metrics based metrics. + * HBase code. However, the others are in play because of existing metrics2 based code still needs + * to coexists until we get rid of all of our BaseSource and convert them to the new framework. + * Until that happens, new metrics can use the new API, but will be collected through the + * HBaseMetrics2HadoopMetricsAdapter class. BaseSourceImpl has two MetricRegistries. + * metricRegistry is for hadoop Metrics2 based metrics, while the registry is for hbase-metrics + * based metrics. */ protected final MetricRegistry registry; @@ -101,10 +99,7 @@ synchronized void init(String name) { */ protected final HBaseMetrics2HadoopMetricsAdapter metricsAdapter; - public BaseSourceImpl( - String metricsName, - String metricsDescription, - String metricsContext, + public BaseSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext) { this.metricsName = metricsName; @@ -115,7 +110,7 @@ public BaseSourceImpl( metricsRegistry = new DynamicMetricsRegistry(metricsName).setContext(metricsContext); DefaultMetricsSystemInitializer.INSTANCE.init(metricsName); - //Register this instance. + // Register this instance. DefaultMetricsSystem.instance().register(metricsJmxContext, metricsDescription, this); // hbase-metrics module based metrics are registered in the hbase MetricsRegistry. @@ -132,9 +127,8 @@ public void init() { /** * Set a single gauge to a value. - * * @param gaugeName gauge name - * @param value the new value of the gauge. + * @param value the new value of the gauge. */ public void setGauge(String gaugeName, long value) { MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, value); @@ -143,9 +137,8 @@ public void setGauge(String gaugeName, long value) { /** * Add some amount to a gauge. - * * @param gaugeName The name of the gauge to increment. - * @param delta The amount to increment the gauge by. + * @param delta The amount to increment the gauge by. */ public void incGauge(String gaugeName, long delta) { MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, 0L); @@ -154,9 +147,8 @@ public void incGauge(String gaugeName, long delta) { /** * Decrease the value of a named gauge. - * * @param gaugeName The name of the gauge. - * @param delta the ammount to subtract from a gauge value. + * @param delta the ammount to subtract from a gauge value. */ public void decGauge(String gaugeName, long delta) { MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, 0L); @@ -165,8 +157,7 @@ public void decGauge(String gaugeName, long delta) { /** * Increment a named counter by some value. - * - * @param key the name of the counter + * @param key the name of the counter * @param delta the ammount to increment */ public void incCounters(String key, long delta) { @@ -183,7 +174,6 @@ public void updateHistogram(String name, long value) { /** * Remove a named gauge. - * * @param key the key of the gauge to remove */ public void removeMetric(String key) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java index 3c5f898fc290..e582a57ad502 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -26,19 +25,20 @@ */ @InterfaceAudience.Private public interface ExceptionTrackingSource extends BaseSource { - String EXCEPTIONS_NAME="exceptions"; - String EXCEPTIONS_DESC="Exceptions caused by requests"; - String EXCEPTIONS_TYPE_DESC="Number of requests that resulted in the specified type of Exception"; - String EXCEPTIONS_OOO_NAME="exceptions.OutOfOrderScannerNextException"; - String EXCEPTIONS_BUSY_NAME="exceptions.RegionTooBusyException"; - String EXCEPTIONS_UNKNOWN_NAME="exceptions.UnknownScannerException"; - String EXCEPTIONS_SCANNER_RESET_NAME="exceptions.ScannerResetException"; - String EXCEPTIONS_SANITY_NAME="exceptions.FailedSanityCheckException"; - String EXCEPTIONS_MOVED_NAME="exceptions.RegionMovedException"; - String EXCEPTIONS_NSRE_NAME="exceptions.NotServingRegionException"; + String EXCEPTIONS_NAME = "exceptions"; + String EXCEPTIONS_DESC = "Exceptions caused by requests"; + String EXCEPTIONS_TYPE_DESC = + "Number of requests that resulted in the specified type of Exception"; + String EXCEPTIONS_OOO_NAME = "exceptions.OutOfOrderScannerNextException"; + String EXCEPTIONS_BUSY_NAME = "exceptions.RegionTooBusyException"; + String EXCEPTIONS_UNKNOWN_NAME = "exceptions.UnknownScannerException"; + String EXCEPTIONS_SCANNER_RESET_NAME = "exceptions.ScannerResetException"; + String EXCEPTIONS_SANITY_NAME = "exceptions.FailedSanityCheckException"; + String EXCEPTIONS_MOVED_NAME = "exceptions.RegionMovedException"; + String EXCEPTIONS_NSRE_NAME = "exceptions.NotServingRegionException"; String EXCEPTIONS_MULTI_TOO_LARGE_NAME = "exceptions.multiResponseTooLarge"; - String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " + - "rest of the requests will have to be retried."; + String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " + + "rest of the requests will have to be retried."; String EXCEPTIONS_CALL_QUEUE_TOO_BIG = "exceptions.callQueueTooBig"; String EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC = "Call queue is full"; String EXCEPTIONS_QUOTA_EXCEEDED = "exceptions.quotaExceeded"; @@ -54,18 +54,32 @@ public interface ExceptionTrackingSource extends BaseSource { * Different types of exceptions */ void outOfOrderException(); + void failedSanityException(); + void movedRegionException(); + void notServingRegionException(); + void unknownScannerException(); + void scannerResetException(); + void tooBusyException(); + void multiActionTooLargeException(); + void callQueueTooBigException(); + void quotaExceededException(); + void rpcThrottlingException(); + void callDroppedException(); + void callTimedOut(); + void requestTooBigException(); + void otherExceptions(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java index a4e75ba0137e..624d5be7b4f3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java @@ -15,19 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.metrics2.lib.MutableFastCounter; import org.apache.yetus.audience.InterfaceAudience; /** - * Common base implementation for metrics sources which need to track exceptions thrown or - * received. + * Common base implementation for metrics sources which need to track exceptions thrown or received. */ @InterfaceAudience.Private -public class ExceptionTrackingSourceImpl extends BaseSourceImpl - implements ExceptionTrackingSource { +public class ExceptionTrackingSourceImpl extends BaseSourceImpl implements ExceptionTrackingSource { protected MutableFastCounter exceptions; protected MutableFastCounter exceptionsOOO; protected MutableFastCounter exceptionsBusy; @@ -46,7 +43,7 @@ public class ExceptionTrackingSourceImpl extends BaseSourceImpl protected MutableFastCounter otherExceptions; public ExceptionTrackingSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -54,36 +51,36 @@ public ExceptionTrackingSourceImpl(String metricsName, String metricsDescription public void init() { super.init(); this.exceptions = this.getMetricsRegistry().newCounter(EXCEPTIONS_NAME, EXCEPTIONS_DESC, 0L); - this.exceptionsOOO = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_OOO_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsBusy = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_BUSY_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsUnknown = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_UNKNOWN_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsOOO = + this.getMetricsRegistry().newCounter(EXCEPTIONS_OOO_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsBusy = + this.getMetricsRegistry().newCounter(EXCEPTIONS_BUSY_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsUnknown = + this.getMetricsRegistry().newCounter(EXCEPTIONS_UNKNOWN_NAME, EXCEPTIONS_TYPE_DESC, 0L); this.exceptionsScannerReset = this.getMetricsRegistry() .newCounter(EXCEPTIONS_SCANNER_RESET_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsSanity = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_SANITY_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsMoved = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_MOVED_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsNSRE = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_NSRE_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsSanity = + this.getMetricsRegistry().newCounter(EXCEPTIONS_SANITY_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsMoved = + this.getMetricsRegistry().newCounter(EXCEPTIONS_MOVED_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsNSRE = + this.getMetricsRegistry().newCounter(EXCEPTIONS_NSRE_NAME, EXCEPTIONS_TYPE_DESC, 0L); this.exceptionsMultiTooLarge = this.getMetricsRegistry() .newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L); this.exceptionsCallQueueTooBig = this.getMetricsRegistry() .newCounter(EXCEPTIONS_CALL_QUEUE_TOO_BIG, EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC, 0L); - this.exceptionsQuotaExceeded = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_QUOTA_EXCEEDED, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsRpcThrottling = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_RPC_THROTTLING, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsCallDropped = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_CALL_DROPPED, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsCallTimedOut = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_CALL_TIMED_OUT, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionRequestTooBig = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_REQUEST_TOO_BIG, EXCEPTIONS_TYPE_DESC, 0L); - this.otherExceptions = this.getMetricsRegistry() - .newCounter(OTHER_EXCEPTIONS, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsQuotaExceeded = + this.getMetricsRegistry().newCounter(EXCEPTIONS_QUOTA_EXCEEDED, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsRpcThrottling = + this.getMetricsRegistry().newCounter(EXCEPTIONS_RPC_THROTTLING, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsCallDropped = + this.getMetricsRegistry().newCounter(EXCEPTIONS_CALL_DROPPED, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsCallTimedOut = + this.getMetricsRegistry().newCounter(EXCEPTIONS_CALL_TIMED_OUT, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionRequestTooBig = + this.getMetricsRegistry().newCounter(EXCEPTIONS_REQUEST_TOO_BIG, EXCEPTIONS_TYPE_DESC, 0L); + this.otherExceptions = + this.getMetricsRegistry().newCounter(OTHER_EXCEPTIONS, EXCEPTIONS_TYPE_DESC, 0L); } @Override diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java index 254d3b4a9719..cd7cc1ce244c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.yetus.audience.InterfaceAudience; @@ -53,11 +51,11 @@ public ConcurrentHashMap load(MetricsInfo key) { } }); - private Interns(){} + private Interns() { + } /** * Get a metric info object - * * @return an interned metric info object */ public static MetricsInfo info(String name, String description) { @@ -72,8 +70,7 @@ public static MetricsInfo info(String name, String description) { /** * Get a metrics tag - * - * @param info of the tag + * @param info of the tag * @param value of the tag * @return an interned metrics tag */ @@ -89,10 +86,9 @@ public static MetricsTag tag(MetricsInfo info, String value) { /** * Get a metrics tag - * - * @param name of the tag + * @param name of the tag * @param description of the tag - * @param value of the tag + * @param value of the tag * @return an interned metrics tag */ public static MetricsTag tag(String name, String description, String value) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java index 6cb542586c98..8c89eb577614 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -24,7 +23,7 @@ * Interface for sources that will export JvmPauseMonitor metrics */ @InterfaceAudience.Private -public interface JvmPauseMonitorSource { +public interface JvmPauseMonitorSource { String INFO_THRESHOLD_COUNT_KEY = "pauseInfoThresholdExceeded"; String INFO_THRESHOLD_COUNT_DESC = "Count of INFO level pause threshold alerts"; @@ -52,14 +51,12 @@ public interface JvmPauseMonitorSource { /** * Update the pause time histogram where GC activity was detected. - * * @param t time it took */ void updatePauseTimeWithGc(long t); /** * Update the pause time histogram where GC activity was not detected. - * * @param t time it took */ void updatePauseTimeWithoutGc(long t); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java index 575ca31c6442..d2e4baceafc8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,17 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import javax.management.ObjectName; import org.apache.yetus.audience.InterfaceAudience; /** - * Object that will register an mbean with the underlying metrics implementation. + * Object that will register an mbean with the underlying metrics implementation. */ @InterfaceAudience.Private -public interface MBeanSource { +public interface MBeanSource { /** * Register an mbean with the underlying metrics system @@ -34,7 +33,6 @@ public interface MBeanSource { * @param theMbean the actual MBean * @return ObjectName from jmx */ - ObjectName register(String serviceName, String metricsName, - Object theMbean); + ObjectName register(String serviceName, String metricsName, Object theMbean); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java index a5ffe8fb5e2c..e383b0e1bbd7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import javax.management.ObjectName; - import org.apache.hadoop.metrics2.util.MBeans; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java index 42d139cb4e5a..a1c3c7fd8320 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.metrics2.MetricsInfo; @@ -37,30 +36,33 @@ class MetricsInfoImpl implements MetricsInfo { this.description = Preconditions.checkNotNull(description, "description"); } - @Override public String name() { + @Override + public String name() { return name; } - @Override public String description() { + @Override + public String description() { return description; } - @Override public boolean equals(Object obj) { + @Override + public boolean equals(Object obj) { if (obj instanceof MetricsInfo) { MetricsInfo other = (MetricsInfo) obj; - return Objects.equal(name, other.name()) && - Objects.equal(description, other.description()); + return Objects.equal(name, other.name()) && Objects.equal(description, other.description()); } return false; } - @Override public int hashCode() { + @Override + public int hashCode() { return Objects.hashCode(name, description); } - @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name).add("description", description) + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("name", name).add("description", description) .toString(); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java index 064c9ca3f9a1..b90b6a3c674b 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -41,9 +40,9 @@ public OperationMetrics(final MetricRegistry registry, final String metricNamePr Preconditions.checkNotNull(metricNamePrefix); /** - * TODO: As of now, Metrics description cannot be added/ registered with - * {@link MetricRegistry}. As metric names are unambiguous but concise, descriptions of - * metrics need to be made available someplace for users. + * TODO: As of now, Metrics description cannot be added/ registered with {@link MetricRegistry}. + * As metric names are unambiguous but concise, descriptions of metrics need to be made + * available someplace for users. */ submittedCounter = registry.counter(metricNamePrefix + SUBMITTED_COUNT); timeHisto = registry.histogram(metricNamePrefix + TIME); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java index a816d4970449..a3f87818ce84 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,13 +40,11 @@ /** * This class acts as an adapter to export the MetricRegistry's in the global registry. Each - * MetricRegistry will be registered or unregistered from the metric2 system. The collection will - * be performed via the MetricsSourceAdapter and the MetricRegistry will collected like a - * BaseSource instance for a group of metrics (like WAL, RPC, etc) with the MetricRegistryInfo's - * JMX context. - * - *

    Developer note: - * Unlike the current metrics2 based approach, the new metrics approach + * MetricRegistry will be registered or unregistered from the metric2 system. The collection will be + * performed via the MetricsSourceAdapter and the MetricRegistry will collected like a BaseSource + * instance for a group of metrics (like WAL, RPC, etc) with the MetricRegistryInfo's JMX context. + *

    + * Developer note: Unlike the current metrics2 based approach, the new metrics approach * (hbase-metrics-api and hbase-metrics modules) work by having different MetricRegistries that are * initialized and used from the code that lives in their respective modules (hbase-server, etc). * There is no need to define BaseSource classes and do a lot of indirection. The MetricRegistry'es @@ -54,7 +52,6 @@ * MetricRegistries.global() and register adapters to the metrics2 subsystem. These adapters then * report the actual values by delegating to * {@link HBaseMetrics2HadoopMetricsAdapter#snapshotAllMetrics(MetricRegistry, MetricsCollector)}. - * * We do not initialize the Hadoop Metrics2 system assuming that other BaseSources already do so * (see BaseSourceImpl). Once the last BaseSource is moved to the new system, the metric2 * initialization should be moved here. @@ -67,6 +64,7 @@ public final class GlobalMetricRegistriesAdapter { private class MetricsSourceAdapter implements MetricsSource { private final MetricRegistry registry; + MetricsSourceAdapter(MetricRegistry registry) { this.registry = registry; } @@ -135,7 +133,7 @@ private void doRun() { MetricsSourceAdapter adapter = new MetricsSourceAdapter(registry); LOG.info("Registering " + info.getMetricsJmxContext() + " " + info.getMetricsDescription()); DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), - info.getMetricsDescription(), adapter); + info.getMetricsDescription(), adapter); registeredSources.put(info, adapter); // next collection will collect the newly registered MetricSource. Doing this here leads to // ConcurrentModificationException. @@ -145,7 +143,7 @@ private void doRun() { boolean removed = false; // Remove registered sources if it is removed from the global registry for (Iterator> it = - registeredSources.entrySet().iterator(); it.hasNext();) { + registeredSources.entrySet().iterator(); it.hasNext();) { Entry entry = it.next(); MetricRegistryInfo info = entry.getKey(); Optional found = MetricRegistries.global().get(info); @@ -153,7 +151,7 @@ private void doRun() { if (LOG.isDebugEnabled()) { LOG.debug("Removing adapter for the MetricRegistry: " + info.getMetricsJmxContext()); } - synchronized(DefaultMetricsSystem.instance()) { + synchronized (DefaultMetricsSystem.instance()) { DefaultMetricsSystem.instance().unregisterSource(info.getMetricsJmxContext()); helper.removeSourceName(info.getMetricsJmxContext()); helper.removeObjectName(info.getMetricsJmxContext()); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java index 5fc2450cdb5e..d708a8296212 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -/* - * Copyright 2016 Josh Elser - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ package org.apache.hadoop.hbase.metrics.impl; import java.util.Map; @@ -53,16 +38,15 @@ /** * This is the adapter from "HBase Metrics Framework", implemented in hbase-metrics-api and - * hbase-metrics modules to the Hadoop Metrics2 framework. This adapter is not a metric source, - * but a helper to be able to collect all of the Metric's in the MetricRegistry using the - * MetricsCollector and MetricsRecordBuilder. - * - * Some of the code is forked from https://github.com/joshelser/dropwizard-hadoop-metrics2. + * hbase-metrics modules to the Hadoop Metrics2 framework. This adapter is not a metric source, but + * a helper to be able to collect all of the Metric's in the MetricRegistry using the + * MetricsCollector and MetricsRecordBuilder. Some of the code is forked from + * https://github.com/joshelser/dropwizard-hadoop-metrics2. */ @InterfaceAudience.Private public class HBaseMetrics2HadoopMetricsAdapter { - private static final Logger LOG - = LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class); + private static final Logger LOG = + LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class); private static final String EMPTY_STRING = ""; public HBaseMetrics2HadoopMetricsAdapter() { @@ -70,14 +54,12 @@ public HBaseMetrics2HadoopMetricsAdapter() { /** * Iterates over the MetricRegistry and adds them to the {@code collector}. - * * @param collector A metrics collector */ - public void snapshotAllMetrics(MetricRegistry metricRegistry, - MetricsCollector collector) { + public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsCollector collector) { MetricRegistryInfo info = metricRegistry.getMetricRegistryInfo(); - MetricsRecordBuilder builder = collector.addRecord(Interns.info(info.getMetricsName(), - info.getMetricsDescription())); + MetricsRecordBuilder builder = + collector.addRecord(Interns.info(info.getMetricsName(), info.getMetricsDescription())); builder.setContext(info.getMetricsContext()); snapshotAllMetrics(metricRegistry, builder); @@ -85,13 +67,12 @@ public void snapshotAllMetrics(MetricRegistry metricRegistry, /** * Iterates over the MetricRegistry and adds them to the {@code builder}. - * * @param builder A record builder */ public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuilder builder) { Map metrics = metricRegistry.getMetrics(); - for (Map.Entry e: metrics.entrySet()) { + for (Map.Entry e : metrics.entrySet()) { // Always capitalize the name String name = StringUtils.capitalize(e.getKey()); Metric metric = e.getValue(); @@ -99,13 +80,13 @@ public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuild if (metric instanceof Gauge) { addGauge(name, (Gauge) metric, builder); } else if (metric instanceof Counter) { - addCounter(name, (Counter)metric, builder); + addCounter(name, (Counter) metric, builder); } else if (metric instanceof Histogram) { - addHistogram(name, (Histogram)metric, builder); + addHistogram(name, (Histogram) metric, builder); } else if (metric instanceof Meter) { - addMeter(name, (Meter)metric, builder); + addMeter(name, (Meter) metric, builder); } else if (metric instanceof Timer) { - addTimer(name, (Timer)metric, builder); + addTimer(name, (Timer) metric, builder); } else { LOG.info("Ignoring unknown Metric class " + metric.getClass().getName()); } @@ -137,7 +118,6 @@ private void addCounter(String name, Counter counter, MetricsRecordBuilder build /** * Add Histogram value-distribution data to a Hadoop-Metrics2 record building. - * * @param name A base name for this record. * @param histogram A histogram to measure distribution of values. * @param builder A Hadoop-Metrics2 record builder. @@ -149,7 +129,6 @@ private void addHistogram(String name, Histogram histogram, MetricsRecordBuilder /** * Add Dropwizard-Metrics rate information to a Hadoop-Metrics2 record builder, converting the * rates to the appropriate unit. - * * @param builder A Hadoop-Metrics2 record builder. * @param name A base name for this record. */ @@ -159,7 +138,7 @@ private void addMeter(String name, Meter meter, MetricsRecordBuilder builder) { builder.addGauge(Interns.info(name + "_1min_rate", EMPTY_STRING), meter.getOneMinuteRate()); builder.addGauge(Interns.info(name + "_5min_rate", EMPTY_STRING), meter.getFiveMinuteRate()); builder.addGauge(Interns.info(name + "_15min_rate", EMPTY_STRING), - meter.getFifteenMinuteRate()); + meter.getFifteenMinuteRate()); } private void addTimer(String name, Timer timer, MetricsRecordBuilder builder) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java index 12fb43fce350..47ebdbbc7cf3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java @@ -1,20 +1,19 @@ /* - * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; @@ -125,8 +124,8 @@ public interface MetricsHeapMemoryManagerSource extends BaseSource { // Counters String DO_NOTHING_COUNTER_NAME = "tunerDoNothingCounter"; String DO_NOTHING_COUNTER_DESC = - "The number of times that tuner neither expands memstore global size limit nor expands " + - "blockcache max size"; + "The number of times that tuner neither expands memstore global size limit nor expands " + + "blockcache max size"; String ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME = "aboveHeapOccupancyLowWaterMarkCounter"; String ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC = "The number of times that heap occupancy percent is above low watermark"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java index 047f8e13b1e1..4fbeefc3ea8e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java @@ -1,20 +1,19 @@ /* - * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; @@ -29,8 +28,8 @@ * BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsHeapMemoryManagerSourceImpl extends BaseSourceImpl implements - MetricsHeapMemoryManagerSource { +public class MetricsHeapMemoryManagerSourceImpl extends BaseSourceImpl + implements MetricsHeapMemoryManagerSource { private final MetricHistogram blockedFlushHistogram; private final MetricHistogram unblockedFlushHistogram; @@ -56,35 +55,34 @@ public MetricsHeapMemoryManagerSourceImpl(String metricsName, String metricsDesc super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // Histograms - blockedFlushHistogram = getMetricsRegistry() - .newSizeHistogram(BLOCKED_FLUSH_NAME, BLOCKED_FLUSH_DESC); - unblockedFlushHistogram = getMetricsRegistry() - .newSizeHistogram(UNBLOCKED_FLUSH_NAME, UNBLOCKED_FLUSH_DESC); - incMemStoreSizeHistogram = getMetricsRegistry() - .newSizeHistogram(INC_MEMSTORE_TUNING_NAME, INC_MEMSTORE_TUNING_DESC); - decMemStoreSizeHistogram = getMetricsRegistry() - .newSizeHistogram(DEC_MEMSTORE_TUNING_NAME, DEC_MEMSTORE_TUNING_DESC); - incBlockCacheSizeHistogram = getMetricsRegistry() - .newSizeHistogram(INC_BLOCKCACHE_TUNING_NAME, INC_BLOCKCACHE_TUNING_DESC); - decBlockCacheSizeHistogram = getMetricsRegistry() - .newSizeHistogram(DEC_BLOCKCACHE_TUNING_NAME, DEC_BLOCKCACHE_TUNING_DESC); + blockedFlushHistogram = + getMetricsRegistry().newSizeHistogram(BLOCKED_FLUSH_NAME, BLOCKED_FLUSH_DESC); + unblockedFlushHistogram = + getMetricsRegistry().newSizeHistogram(UNBLOCKED_FLUSH_NAME, UNBLOCKED_FLUSH_DESC); + incMemStoreSizeHistogram = + getMetricsRegistry().newSizeHistogram(INC_MEMSTORE_TUNING_NAME, INC_MEMSTORE_TUNING_DESC); + decMemStoreSizeHistogram = + getMetricsRegistry().newSizeHistogram(DEC_MEMSTORE_TUNING_NAME, DEC_MEMSTORE_TUNING_DESC); + incBlockCacheSizeHistogram = getMetricsRegistry().newSizeHistogram(INC_BLOCKCACHE_TUNING_NAME, + INC_BLOCKCACHE_TUNING_DESC); + decBlockCacheSizeHistogram = getMetricsRegistry().newSizeHistogram(DEC_BLOCKCACHE_TUNING_NAME, + DEC_BLOCKCACHE_TUNING_DESC); // Gauges - blockedFlushGauge = getMetricsRegistry() - .newGauge(BLOCKED_FLUSH_GAUGE_NAME, BLOCKED_FLUSH_GAUGE_DESC, 0L); - unblockedFlushGauge = getMetricsRegistry() - .newGauge(UNBLOCKED_FLUSH_GAUGE_NAME, UNBLOCKED_FLUSH_GAUGE_DESC, 0L); - memStoreSizeGauge = getMetricsRegistry() - .newGauge(MEMSTORE_SIZE_GAUGE_NAME, MEMSTORE_SIZE_GAUGE_DESC, 0L); - blockCacheSizeGauge = getMetricsRegistry() - .newGauge(BLOCKCACHE_SIZE_GAUGE_NAME, BLOCKCACHE_SIZE_GAUGE_DESC, 0L); + blockedFlushGauge = + getMetricsRegistry().newGauge(BLOCKED_FLUSH_GAUGE_NAME, BLOCKED_FLUSH_GAUGE_DESC, 0L); + unblockedFlushGauge = + getMetricsRegistry().newGauge(UNBLOCKED_FLUSH_GAUGE_NAME, UNBLOCKED_FLUSH_GAUGE_DESC, 0L); + memStoreSizeGauge = + getMetricsRegistry().newGauge(MEMSTORE_SIZE_GAUGE_NAME, MEMSTORE_SIZE_GAUGE_DESC, 0L); + blockCacheSizeGauge = + getMetricsRegistry().newGauge(BLOCKCACHE_SIZE_GAUGE_NAME, BLOCKCACHE_SIZE_GAUGE_DESC, 0L); // Counters - doNothingCounter = getMetricsRegistry() - .newCounter(DO_NOTHING_COUNTER_NAME, DO_NOTHING_COUNTER_DESC, 0L); - aboveHeapOccupancyLowWatermarkCounter = getMetricsRegistry() - .newCounter(ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME, - ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC, 0L); + doNothingCounter = + getMetricsRegistry().newCounter(DO_NOTHING_COUNTER_NAME, DO_NOTHING_COUNTER_DESC, 0L); + aboveHeapOccupancyLowWatermarkCounter = getMetricsRegistry().newCounter( + ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME, ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC, 0L); } @Override diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java index 23d02598a3d9..74edf74c456e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface will be implemented by a MetricsSource that will export metrics from - * multiple regions into the hadoop metrics system. + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * regions into the hadoop metrics system. */ @InterfaceAudience.Private public interface MetricsRegionAggregateSource extends BaseSource { @@ -53,14 +52,12 @@ public interface MetricsRegionAggregateSource extends BaseSource { /** * Register a MetricsRegionSource as being open. - * * @param source the source for the region being opened. */ void register(MetricsRegionSource source); /** * Remove a region's source. This is called when a region is closed. - * * @param source The region to remove. */ void deregister(MetricsRegionSource source); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java index 044d6b8bb0a1..dee63d2b0cf8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricsCollector; @@ -48,11 +46,8 @@ public MetricsRegionAggregateSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - - public MetricsRegionAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsRegionAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // Every few mins clean the JMX cache. @@ -76,9 +71,8 @@ public void deregister(MetricsRegionSource toRemove) { } catch (Exception e) { // Ignored. If this errors out it means that someone is double // closing the region source and the region is already nulled out. - LOG.info( - "Error trying to remove " + toRemove + " from " + this.getClass().getSimpleName(), - e); + LOG.info("Error trying to remove " + toRemove + " from " + this.getClass().getSimpleName(), + e); } clearCache(); } @@ -88,12 +82,11 @@ private synchronized void clearCache() { } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param collector the collector - * @param all get all the metrics regardless of when they last changed. + * @param all get all the metrics regardless of when they last changed. */ @Override public void getMetrics(MetricsCollector collector, boolean all) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java index 93990ef1bd4e..991187bc98eb 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,31 +40,28 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { String REGION_SIZE_REPORTING_CHORE_TIME_NAME = "regionSizeReportingChoreTime"; /** - * Updates the metric tracking how many tables this RegionServer has marked as in violation - * of their space quota. + * Updates the metric tracking how many tables this RegionServer has marked as in violation of + * their space quota. */ void updateNumTablesInSpaceQuotaViolation(long tablesInViolation); /** * Updates the metric tracking how many tables this RegionServer has received * {@code SpaceQuotaSnapshot}s for. - * * @param numSnapshots The number of {@code SpaceQuotaSnapshot}s received from the Master. */ void updateNumTableSpaceQuotaSnapshots(long numSnapshots); /** - * Updates the metric tracking how much time was spent scanning the filesystem to compute - * the size of each region hosted by this RegionServer. - * + * Updates the metric tracking how much time was spent scanning the filesystem to compute the size + * of each region hosted by this RegionServer. * @param time The execution time of the chore in milliseconds. */ void incrementSpaceQuotaFileSystemScannerChoreTime(long time); /** - * Updates the metric tracking how much time was spent updating the RegionServer with the - * latest information on space quotas from the {@code hbase:quota} table. - * + * Updates the metric tracking how much time was spent updating the RegionServer with the latest + * information on space quotas from the {@code hbase:quota} table. * @param time The execution time of the chore in milliseconds. */ void incrementSpaceQuotaRefresherChoreTime(long time); @@ -71,7 +69,6 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { /** * Updates the metric tracking how many region size reports were sent from this RegionServer to * the Master. These reports contain information on the size of each Region hosted locally. - * * @param numReportsSent The number of region size reports sent */ void incrementNumRegionSizeReportsSent(long numReportsSent); @@ -79,7 +76,6 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { /** * Updates the metric tracking how much time was spent sending region size reports to the Master * by the RegionSizeReportingChore. - * * @param time The execution time in milliseconds. */ void incrementRegionSizeReportingChoreTime(long time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java index 3a796ddf0c5f..7c1cc0670455 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Counter; import org.apache.hadoop.hbase.metrics.Meter; @@ -28,8 +28,8 @@ * Implementation of {@link MetricsRegionServerQuotaSource}. */ @InterfaceAudience.Private -public class MetricsRegionServerQuotaSourceImpl extends BaseSourceImpl implements - MetricsRegionServerQuotaSource { +public class MetricsRegionServerQuotaSourceImpl extends BaseSourceImpl + implements MetricsRegionServerQuotaSource { private final Meter tablesInViolationCounter; private final Meter spaceQuotaSnapshotsReceived; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 97896d41022f..62ce3bd8f16d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -50,7 +49,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the Put time histogram - * * @param t time it took */ void updatePut(long t); @@ -63,7 +61,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the Delete time histogram - * * @param t time it took */ void updateDelete(long t); @@ -94,42 +91,37 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the Get time histogram . - * * @param t time it took */ void updateGet(long t); /** * Update the Increment time histogram. - * * @param t time it took */ void updateIncrement(long t); /** * Update the Append time histogram. - * * @param t time it took */ void updateAppend(long t); /** * Update the Replay time histogram. - * * @param t time it took */ void updateReplay(long t); /** * Update the scan size. - * * @param scanSize size of the scan */ void updateScanSize(long scanSize); /** * Update the scan time. - * */ + */ void updateScanTime(long t); /** @@ -256,17 +248,17 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String NUM_REFERENCE_FILES_DESC = "Number of reference file on this RegionServer"; String STOREFILE_SIZE_DESC = "Size of storefiles being served."; String STOREFILE_SIZE_GROWTH_RATE_DESC = - "Bytes per second by which the size of storefiles being served grows."; + "Bytes per second by which the size of storefiles being served grows."; String TOTAL_REQUEST_COUNT = "totalRequestCount"; String TOTAL_REQUEST_COUNT_DESC = - "Total number of requests this RegionServer has answered; increments the count once for " + - "EVERY access whether an admin operation, a Scan, a Put or Put of 1M rows, or a Get " + - "of a non-existent row"; + "Total number of requests this RegionServer has answered; increments the count once for " + + "EVERY access whether an admin operation, a Scan, a Put or Put of 1M rows, or a Get " + + "of a non-existent row"; String TOTAL_ROW_ACTION_REQUEST_COUNT = "totalRowActionRequestCount"; String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC = - "Total number of region requests this RegionServer has answered; counts by row-level " + - "action at the RPC Server (Sums 'readRequestsCount' and 'writeRequestsCount'); counts" + - "once per access whether a Put of 1M rows or a Get that returns 1M Results"; + "Total number of region requests this RegionServer has answered; counts by row-level " + + "action at the RPC Server (Sums 'readRequestsCount' and 'writeRequestsCount'); counts" + + "once per access whether a Put of 1M rows or a Get that returns 1M Results"; String READ_REQUEST_COUNT = "readRequestCount"; String FILTERED_READ_REQUEST_COUNT = "filteredReadRequestCount"; String FILTERED_READ_REQUEST_COUNT_DESC = @@ -280,8 +272,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String CP_REQUEST_COUNT_DESC = "Number of coprocessor service requests this region server has answered."; String WRITE_REQUEST_COUNT = "writeRequestCount"; - String WRITE_REQUEST_COUNT_DESC = - "Number of mutation requests this RegionServer has answered."; + String WRITE_REQUEST_COUNT_DESC = "Number of mutation requests this RegionServer has answered."; String WRITE_REQUEST_RATE_PER_SECOND = "writeRequestRatePerSecond"; String WRITE_REQUEST_RATE_DESC = "Rate of answering the mutation requests by this region server per second."; @@ -296,8 +287,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String STATIC_INDEX_SIZE = "staticIndexSize"; String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes."; String STATIC_BLOOM_SIZE = "staticBloomSize"; - String STATIC_BLOOM_SIZE_DESC = - "Uncompressed size of the static bloom filters."; + String STATIC_BLOOM_SIZE_DESC = "Uncompressed size of the static bloom filters."; String NUMBER_OF_MUTATIONS_WITHOUT_WAL = "mutationsWithoutWALCount"; String NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC = "Number of mutations that have been sent by clients with the write ahead logging turned off."; @@ -309,7 +299,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo "The percent of HFiles that are stored on the local hdfs data node."; String PERCENT_FILES_LOCAL_SECONDARY_REGIONS = "percentFilesLocalSecondaryRegions"; String PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC = - "The percent of HFiles used by secondary regions that are stored on the local hdfs data node."; + "The percent of HFiles used by secondary regions that are stored on the local hdfs data node."; String SPLIT_QUEUE_LENGTH = "splitQueueLength"; String SPLIT_QUEUE_LENGTH_DESC = "Length of the queue for splits."; String COMPACTION_QUEUE_LENGTH = "compactionQueueLength"; @@ -323,8 +313,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String FLUSH_QUEUE_LENGTH = "flushQueueLength"; String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes"; String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize"; - String BLOCK_CACHE_FREE_DESC = - "Size of the block cache that is not occupied."; + String BLOCK_CACHE_FREE_DESC = "Size of the block cache that is not occupied."; String BLOCK_CACHE_COUNT = "blockCacheCount"; String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache."; String BLOCK_CACHE_SIZE = "blockCacheSize"; @@ -342,19 +331,18 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount"; String BLOCK_CACHE_EVICTION_COUNT_DESC = "Count of the number of blocks evicted from the block cache." - + "(Not including blocks evicted because of HFile removal)"; + + "(Not including blocks evicted because of HFile removal)"; String BLOCK_CACHE_PRIMARY_EVICTION_COUNT = "blockCacheEvictionCountPrimary"; String BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC = "Count of the number of blocks evicted from primary replica in the block cache."; String BLOCK_CACHE_HIT_PERCENT = "blockCacheCountHitPercent"; - String BLOCK_CACHE_HIT_PERCENT_DESC = - "Percent of block cache requests that are hits"; + String BLOCK_CACHE_HIT_PERCENT_DESC = "Percent of block cache requests that are hits"; String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent"; String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC = "The percent of the time that requests with the cache turned on hit the cache."; String BLOCK_CACHE_FAILED_INSERTION_COUNT = "blockCacheFailedInsertionCount"; - String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = "Number of times that a block cache " + - "insertion failed. Usually due to size restrictions."; + String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = + "Number of times that a block cache " + "insertion failed. Usually due to size restrictions."; String BLOCK_CACHE_DATA_MISS_COUNT = "blockCacheDataMissCount"; String BLOCK_CACHE_ENCODED_DATA_MISS_COUNT = "blockCacheEncodedDataMissCount"; String BLOCK_CACHE_LEAF_INDEX_MISS_COUNT = "blockCacheLeafIndexMissCount"; @@ -425,15 +413,12 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String SLOW_DELETE_KEY = "slowDeleteCount"; String SLOW_INCREMENT_KEY = "slowIncrementCount"; String SLOW_APPEND_KEY = "slowAppendCount"; - String SLOW_PUT_DESC = - "The number of batches containing puts that took over 1000ms to complete"; + String SLOW_PUT_DESC = "The number of batches containing puts that took over 1000ms to complete"; String SLOW_DELETE_DESC = "The number of batches containing delete(s) that took over 1000ms to complete"; String SLOW_GET_DESC = "The number of Gets that took over 1000ms to complete"; - String SLOW_INCREMENT_DESC = - "The number of Increments that took over 1000ms to complete"; - String SLOW_APPEND_DESC = - "The number of Appends that took over 1000ms to complete"; + String SLOW_INCREMENT_DESC = "The number of Increments that took over 1000ms to complete"; + String SLOW_APPEND_DESC = "The number of Appends that took over 1000ms to complete"; String FLUSHED_CELLS = "flushedCellsCount"; String FLUSHED_CELLS_DESC = "The number of cells flushed to disk"; @@ -445,14 +430,12 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String COMPACTED_CELLS_SIZE_DESC = "The total amount of data processed during minor compactions, in bytes"; String MAJOR_COMPACTED_CELLS = "majorCompactedCellsCount"; - String MAJOR_COMPACTED_CELLS_DESC = - "The number of cells processed during major compactions"; + String MAJOR_COMPACTED_CELLS_DESC = "The number of cells processed during major compactions"; String MAJOR_COMPACTED_CELLS_SIZE = "majorCompactedCellsSize"; String MAJOR_COMPACTED_CELLS_SIZE_DESC = "The total amount of data processed during major compactions, in bytes"; String CELLS_COUNT_COMPACTED_TO_MOB = "cellsCountCompactedToMob"; - String CELLS_COUNT_COMPACTED_TO_MOB_DESC = - "The number of cells moved to mob during compaction"; + String CELLS_COUNT_COMPACTED_TO_MOB_DESC = "The number of cells moved to mob during compaction"; String CELLS_COUNT_COMPACTED_FROM_MOB = "cellsCountCompactedFromMob"; String CELLS_COUNT_COMPACTED_FROM_MOB_DESC = "The number of cells moved from mob during compaction"; @@ -490,18 +473,16 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo "The number of times we started a hedged read and a hedged read won"; String HEDGED_READ_IN_CUR_THREAD = "hedgedReadOpsInCurThread"; String HEDGED_READ_IN_CUR_THREAD_DESC = - "The number of times we execute a hedged read in current thread as a fallback for task rejection"; + "The number of times we execute a hedged read in current thread as a fallback for task rejection"; String TOTAL_BYTES_READ = "totalBytesRead"; String TOTAL_BYTES_READ_DESC = "The total number of bytes read from HDFS"; String LOCAL_BYTES_READ = "localBytesRead"; - String LOCAL_BYTES_READ_DESC = - "The number of bytes read from the local HDFS DataNode"; + String LOCAL_BYTES_READ_DESC = "The number of bytes read from the local HDFS DataNode"; String SHORTCIRCUIT_BYTES_READ = "shortCircuitBytesRead"; String SHORTCIRCUIT_BYTES_READ_DESC = "The number of bytes read through HDFS short circuit read"; String ZEROCOPY_BYTES_READ = "zeroCopyBytesRead"; - String ZEROCOPY_BYTES_READ_DESC = - "The number of bytes read through HDFS zero copy"; + String ZEROCOPY_BYTES_READ_DESC = "The number of bytes read through HDFS zero copy"; String BLOCKED_REQUESTS_COUNT = "blockedRequestCount"; String BLOCKED_REQUESTS_COUNT_DESC = "The number of blocked requests because of memstore size is " @@ -525,48 +506,47 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String FLUSHED_MEMSTORE_BYTES_DESC = "Total number of bytes of cells in memstore from flush"; String COMPACTION_TIME = "compactionTime"; - String COMPACTION_TIME_DESC - = "Histogram for the time in millis for compaction, both major and minor"; + String COMPACTION_TIME_DESC = + "Histogram for the time in millis for compaction, both major and minor"; String COMPACTION_INPUT_FILE_COUNT = "compactionInputFileCount"; - String COMPACTION_INPUT_FILE_COUNT_DESC - = "Histogram for the compaction input number of files, both major and minor"; + String COMPACTION_INPUT_FILE_COUNT_DESC = + "Histogram for the compaction input number of files, both major and minor"; String COMPACTION_INPUT_SIZE = "compactionInputSize"; - String COMPACTION_INPUT_SIZE_DESC - = "Histogram for the compaction total input file sizes, both major and minor"; + String COMPACTION_INPUT_SIZE_DESC = + "Histogram for the compaction total input file sizes, both major and minor"; String COMPACTION_OUTPUT_FILE_COUNT = "compactionOutputFileCount"; - String COMPACTION_OUTPUT_FILE_COUNT_DESC - = "Histogram for the compaction output number of files, both major and minor"; + String COMPACTION_OUTPUT_FILE_COUNT_DESC = + "Histogram for the compaction output number of files, both major and minor"; String COMPACTION_OUTPUT_SIZE = "compactionOutputSize"; - String COMPACTION_OUTPUT_SIZE_DESC - = "Histogram for the compaction total output file sizes, both major and minor"; + String COMPACTION_OUTPUT_SIZE_DESC = + "Histogram for the compaction total output file sizes, both major and minor"; String COMPACTED_INPUT_BYTES = "compactedInputBytes"; - String COMPACTED_INPUT_BYTES_DESC - = "Total number of bytes that is read for compaction, both major and minor"; + String COMPACTED_INPUT_BYTES_DESC = + "Total number of bytes that is read for compaction, both major and minor"; String COMPACTED_OUTPUT_BYTES = "compactedOutputBytes"; - String COMPACTED_OUTPUT_BYTES_DESC - = "Total number of bytes that is output from compaction, both major and minor"; + String COMPACTED_OUTPUT_BYTES_DESC = + "Total number of bytes that is output from compaction, both major and minor"; String MAJOR_COMPACTION_TIME = "majorCompactionTime"; - String MAJOR_COMPACTION_TIME_DESC - = "Histogram for the time in millis for compaction, major only"; + String MAJOR_COMPACTION_TIME_DESC = "Histogram for the time in millis for compaction, major only"; String MAJOR_COMPACTION_INPUT_FILE_COUNT = "majorCompactionInputFileCount"; - String MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC - = "Histogram for the compaction input number of files, major only"; + String MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC = + "Histogram for the compaction input number of files, major only"; String MAJOR_COMPACTION_INPUT_SIZE = "majorCompactionInputSize"; - String MAJOR_COMPACTION_INPUT_SIZE_DESC - = "Histogram for the compaction total input file sizes, major only"; + String MAJOR_COMPACTION_INPUT_SIZE_DESC = + "Histogram for the compaction total input file sizes, major only"; String MAJOR_COMPACTION_OUTPUT_FILE_COUNT = "majorCompactionOutputFileCount"; - String MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC - = "Histogram for the compaction output number of files, major only"; + String MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC = + "Histogram for the compaction output number of files, major only"; String MAJOR_COMPACTION_OUTPUT_SIZE = "majorCompactionOutputSize"; - String MAJOR_COMPACTION_OUTPUT_SIZE_DESC - = "Histogram for the compaction total output file sizes, major only"; + String MAJOR_COMPACTION_OUTPUT_SIZE_DESC = + "Histogram for the compaction total output file sizes, major only"; String MAJOR_COMPACTED_INPUT_BYTES = "majorCompactedInputBytes"; - String MAJOR_COMPACTED_INPUT_BYTES_DESC - = "Total number of bytes that is read for compaction, major only"; + String MAJOR_COMPACTED_INPUT_BYTES_DESC = + "Total number of bytes that is read for compaction, major only"; String MAJOR_COMPACTED_OUTPUT_BYTES = "majorCompactedOutputBytes"; - String MAJOR_COMPACTED_OUTPUT_BYTES_DESC - = "Total number of bytes that is output from compaction, major only"; + String MAJOR_COMPACTED_OUTPUT_BYTES_DESC = + "Total number of bytes that is output from compaction, major only"; String RPC_GET_REQUEST_COUNT = "rpcGetRequestCount"; String RPC_GET_REQUEST_COUNT_DESC = "Number of rpc get requests this RegionServer has answered."; @@ -605,5 +585,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String ACTIVE_SCANNERS_DESC = "Gauge of currently active scanners"; String SCANNER_LEASE_EXPIRED_COUNT = "scannerLeaseExpiredCount"; - String SCANNER_LEASE_EXPIRED_COUNT_DESC = "Count of scanners which were expired due to scanner lease timeout"; + String SCANNER_LEASE_EXPIRED_COUNT_DESC = + "Count of scanners which were expired due to scanner lease timeout"; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java index ef33909839ce..d477b64609ff 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.io.MetricsIOSource; @@ -30,7 +29,6 @@ public interface MetricsRegionServerSourceFactory { /** * Given a wrapper create a MetricsRegionServerSource. - * * @param regionServerWrapper The wrapped region server * @return a Metrics Source. */ @@ -38,7 +36,6 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsRegionSource from a MetricsRegionWrapper. - * * @param wrapper The wrapped region * @return A metrics region source */ @@ -58,7 +55,6 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsTableSource from a MetricsTableWrapper. - * * @param table The table name * @param wrapper The wrapped table aggregate * @return A metrics table source @@ -67,7 +63,6 @@ public interface MetricsRegionServerSourceFactory { /** * Get a MetricsTableAggregateSource - * * @return A metrics table aggregate source */ MetricsTableAggregateSource getTableAggregate(); @@ -80,7 +75,6 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsIOSource from a MetricsIOWrapper. - * * @return A metrics IO source */ MetricsIOSource createIO(MetricsIOWrapper wrapper); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java index ccc17492dba5..1a9c8a9e42f1 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,12 +23,13 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper + * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper */ @InterfaceAudience.Private public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory { public static enum FactoryStorage { INSTANCE; + private Object aggLock = new Object(); private MetricsRegionAggregateSourceImpl regionAggImpl; private MetricsUserAggregateSourceImpl userAggImpl; @@ -75,8 +76,8 @@ public synchronized MetricsHeapMemoryManagerSource getHeapMemoryManager() { } @Override - public synchronized MetricsRegionServerSource createServer( - MetricsRegionServerWrapper regionServerWrapper) { + public synchronized MetricsRegionServerSource + createServer(MetricsRegionServerWrapper regionServerWrapper) { return new MetricsRegionServerSourceImpl(regionServerWrapper); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index 966d75ac9fc4..ee19d4c4e967 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -27,13 +26,12 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsRegionServerSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsRegionServerSource. Implements BaseSource through + * BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsRegionServerSourceImpl - extends BaseSourceImpl implements MetricsRegionServerSource { +public class MetricsRegionServerSourceImpl extends BaseSourceImpl + implements MetricsRegionServerSource { final MetricsRegionServerWrapper rsWrap; private final MetricHistogram putHisto; @@ -97,11 +95,8 @@ public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap); } - public MetricsRegionServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsRegionServerWrapper rsWrap) { + public MetricsRegionServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsRegionServerWrapper rsWrap) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.rsWrap = rsWrap; @@ -131,58 +126,59 @@ public MetricsRegionServerSourceImpl(String metricsName, scanTimeHisto = getMetricsRegistry().newTimeHistogram(SCAN_TIME_KEY); flushTimeHisto = getMetricsRegistry().newTimeHistogram(FLUSH_TIME, FLUSH_TIME_DESC); - flushMemstoreSizeHisto = getMetricsRegistry() - .newSizeHistogram(FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); - flushOutputSizeHisto = getMetricsRegistry().newSizeHistogram(FLUSH_OUTPUT_SIZE, - FLUSH_OUTPUT_SIZE_DESC); - flushedOutputBytes = getMetricsRegistry().newCounter(FLUSHED_OUTPUT_BYTES, - FLUSHED_OUTPUT_BYTES_DESC, 0L); - flushedMemstoreBytes = getMetricsRegistry().newCounter(FLUSHED_MEMSTORE_BYTES, - FLUSHED_MEMSTORE_BYTES_DESC, 0L); - - compactionTimeHisto = getMetricsRegistry() - .newTimeHistogram(COMPACTION_TIME, COMPACTION_TIME_DESC); - compactionInputFileCountHisto = getMetricsRegistry() - .newHistogram(COMPACTION_INPUT_FILE_COUNT, COMPACTION_INPUT_FILE_COUNT_DESC); - compactionInputSizeHisto = getMetricsRegistry() - .newSizeHistogram(COMPACTION_INPUT_SIZE, COMPACTION_INPUT_SIZE_DESC); - compactionOutputFileCountHisto = getMetricsRegistry() - .newHistogram(COMPACTION_OUTPUT_FILE_COUNT, COMPACTION_OUTPUT_FILE_COUNT_DESC); - compactionOutputSizeHisto = getMetricsRegistry() - .newSizeHistogram(COMPACTION_OUTPUT_SIZE, COMPACTION_OUTPUT_SIZE_DESC); - compactedInputBytes = getMetricsRegistry() - .newCounter(COMPACTED_INPUT_BYTES, COMPACTED_INPUT_BYTES_DESC, 0L); - compactedOutputBytes = getMetricsRegistry() - .newCounter(COMPACTED_OUTPUT_BYTES, COMPACTED_OUTPUT_BYTES_DESC, 0L); - - majorCompactionTimeHisto = getMetricsRegistry() - .newTimeHistogram(MAJOR_COMPACTION_TIME, MAJOR_COMPACTION_TIME_DESC); + flushMemstoreSizeHisto = + getMetricsRegistry().newSizeHistogram(FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); + flushOutputSizeHisto = + getMetricsRegistry().newSizeHistogram(FLUSH_OUTPUT_SIZE, FLUSH_OUTPUT_SIZE_DESC); + flushedOutputBytes = + getMetricsRegistry().newCounter(FLUSHED_OUTPUT_BYTES, FLUSHED_OUTPUT_BYTES_DESC, 0L); + flushedMemstoreBytes = + getMetricsRegistry().newCounter(FLUSHED_MEMSTORE_BYTES, FLUSHED_MEMSTORE_BYTES_DESC, 0L); + + compactionTimeHisto = + getMetricsRegistry().newTimeHistogram(COMPACTION_TIME, COMPACTION_TIME_DESC); + compactionInputFileCountHisto = getMetricsRegistry().newHistogram(COMPACTION_INPUT_FILE_COUNT, + COMPACTION_INPUT_FILE_COUNT_DESC); + compactionInputSizeHisto = + getMetricsRegistry().newSizeHistogram(COMPACTION_INPUT_SIZE, COMPACTION_INPUT_SIZE_DESC); + compactionOutputFileCountHisto = getMetricsRegistry().newHistogram(COMPACTION_OUTPUT_FILE_COUNT, + COMPACTION_OUTPUT_FILE_COUNT_DESC); + compactionOutputSizeHisto = + getMetricsRegistry().newSizeHistogram(COMPACTION_OUTPUT_SIZE, COMPACTION_OUTPUT_SIZE_DESC); + compactedInputBytes = + getMetricsRegistry().newCounter(COMPACTED_INPUT_BYTES, COMPACTED_INPUT_BYTES_DESC, 0L); + compactedOutputBytes = + getMetricsRegistry().newCounter(COMPACTED_OUTPUT_BYTES, COMPACTED_OUTPUT_BYTES_DESC, 0L); + + majorCompactionTimeHisto = + getMetricsRegistry().newTimeHistogram(MAJOR_COMPACTION_TIME, MAJOR_COMPACTION_TIME_DESC); majorCompactionInputFileCountHisto = getMetricsRegistry() - .newHistogram(MAJOR_COMPACTION_INPUT_FILE_COUNT, MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC); + .newHistogram(MAJOR_COMPACTION_INPUT_FILE_COUNT, MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC); majorCompactionInputSizeHisto = getMetricsRegistry() .newSizeHistogram(MAJOR_COMPACTION_INPUT_SIZE, MAJOR_COMPACTION_INPUT_SIZE_DESC); majorCompactionOutputFileCountHisto = getMetricsRegistry() .newHistogram(MAJOR_COMPACTION_OUTPUT_FILE_COUNT, MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC); majorCompactionOutputSizeHisto = getMetricsRegistry() - .newSizeHistogram(MAJOR_COMPACTION_OUTPUT_SIZE, MAJOR_COMPACTION_OUTPUT_SIZE_DESC); - majorCompactedInputBytes = getMetricsRegistry() - .newCounter(MAJOR_COMPACTED_INPUT_BYTES, MAJOR_COMPACTED_INPUT_BYTES_DESC, 0L); - majorCompactedOutputBytes = getMetricsRegistry() - .newCounter(MAJOR_COMPACTED_OUTPUT_BYTES, MAJOR_COMPACTED_OUTPUT_BYTES_DESC, 0L); + .newSizeHistogram(MAJOR_COMPACTION_OUTPUT_SIZE, MAJOR_COMPACTION_OUTPUT_SIZE_DESC); + majorCompactedInputBytes = getMetricsRegistry().newCounter(MAJOR_COMPACTED_INPUT_BYTES, + MAJOR_COMPACTED_INPUT_BYTES_DESC, 0L); + majorCompactedOutputBytes = getMetricsRegistry().newCounter(MAJOR_COMPACTED_OUTPUT_BYTES, + MAJOR_COMPACTED_OUTPUT_BYTES_DESC, 0L); splitTimeHisto = getMetricsRegistry().newTimeHistogram(SPLIT_KEY); splitRequest = getMetricsRegistry().newCounter(SPLIT_REQUEST_KEY, SPLIT_REQUEST_DESC, 0L); splitSuccess = getMetricsRegistry().newCounter(SPLIT_SUCCESS_KEY, SPLIT_SUCCESS_DESC, 0L); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); - scannerLeaseExpiredCount = getMetricsRegistry().newCounter(SCANNER_LEASE_EXPIRED_COUNT, SCANNER_LEASE_EXPIRED_COUNT_DESC, 0L); + scannerLeaseExpiredCount = getMetricsRegistry().newCounter(SCANNER_LEASE_EXPIRED_COUNT, + SCANNER_LEASE_EXPIRED_COUNT_DESC, 0L); } @Override @@ -332,12 +328,11 @@ public void incrScannerLeaseExpired() { } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param metricsCollector Collector to accept metrics - * @param all push all or only changed? + * @param all push all or only changed? */ @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { @@ -346,137 +341,135 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { // rsWrap can be null because this function is called inside of init. if (rsWrap != null) { addGaugesToMetricsRecordBuilder(mrb) - .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), - rsWrap.getTotalRequestCount()) - .addCounter(Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, - TOTAL_ROW_ACTION_REQUEST_COUNT_DESC), rsWrap.getTotalRowActionRequestCount()) - .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), - rsWrap.getReadRequestsCount()) - .addCounter(Interns.info(CP_REQUEST_COUNT, CP_REQUEST_COUNT_DESC), - rsWrap.getCpRequestsCount()) - .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, - FILTERED_READ_REQUEST_COUNT_DESC), rsWrap.getFilteredReadRequestsCount()) - .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), - rsWrap.getWriteRequestsCount()) - .addCounter(Interns.info(RPC_GET_REQUEST_COUNT, RPC_GET_REQUEST_COUNT_DESC), - rsWrap.getRpcGetRequestsCount()) - .addCounter(Interns.info(RPC_FULL_SCAN_REQUEST_COUNT, RPC_FULL_SCAN_REQUEST_COUNT_DESC), - rsWrap.getRpcFullScanRequestsCount()) - .addCounter(Interns.info(RPC_SCAN_REQUEST_COUNT, RPC_SCAN_REQUEST_COUNT_DESC), - rsWrap.getRpcScanRequestsCount()) - .addCounter(Interns.info(RPC_MULTI_REQUEST_COUNT, RPC_MULTI_REQUEST_COUNT_DESC), - rsWrap.getRpcMultiRequestsCount()) - .addCounter(Interns.info(RPC_MUTATE_REQUEST_COUNT, RPC_MUTATE_REQUEST_COUNT_DESC), - rsWrap.getRpcMutateRequestsCount()) - .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), - rsWrap.getCheckAndMutateChecksFailed()) - .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), - rsWrap.getCheckAndMutateChecksPassed()) - .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), - rsWrap.getBlockCacheHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_HIT_COUNT, - BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC), rsWrap.getBlockCachePrimaryHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC), - rsWrap.getBlockCacheMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_MISS_COUNT, - BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC), rsWrap.getBlockCachePrimaryMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC), - rsWrap.getBlockCacheEvictedCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_EVICTION_COUNT, - BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC), - rsWrap.getBlockCachePrimaryEvictedCount()) - .addCounter(Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT, - BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC), - rsWrap.getBlockCacheFailedInsertions()) - .addCounter(Interns.info(BLOCK_CACHE_DATA_MISS_COUNT, ""), - rsWrap.getDataMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_MISS_COUNT, ""), - rsWrap.getLeafIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT, ""), - rsWrap.getBloomChunkMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_META_MISS_COUNT, ""), - rsWrap.getMetaMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_MISS_COUNT, ""), - rsWrap.getRootIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT, ""), - rsWrap.getIntermediateIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_MISS_COUNT, ""), - rsWrap.getFileInfoMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT, ""), - rsWrap.getGeneralBloomMetaMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT, ""), - rsWrap.getDeleteFamilyBloomMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_TRAILER_MISS_COUNT, ""), - rsWrap.getTrailerMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_DATA_HIT_COUNT, ""), - rsWrap.getDataHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_HIT_COUNT, ""), - rsWrap.getLeafIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT, ""), - rsWrap.getBloomChunkHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_META_HIT_COUNT, ""), - rsWrap.getMetaHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_HIT_COUNT, ""), - rsWrap.getRootIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, ""), - rsWrap.getIntermediateIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_HIT_COUNT, ""), - rsWrap.getFileInfoHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT, ""), - rsWrap.getGeneralBloomMetaHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT, ""), - rsWrap.getDeleteFamilyBloomHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_TRAILER_HIT_COUNT, ""), - rsWrap.getTrailerHitCount()) - .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), - rsWrap.getUpdatesBlockedTime()) - .addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), - rsWrap.getFlushedCellsCount()) - .addCounter(Interns.info(COMPACTED_CELLS, COMPACTED_CELLS_DESC), - rsWrap.getCompactedCellsCount()) - .addCounter(Interns.info(MAJOR_COMPACTED_CELLS, MAJOR_COMPACTED_CELLS_DESC), - rsWrap.getMajorCompactedCellsCount()) - .addCounter(Interns.info(FLUSHED_CELLS_SIZE, FLUSHED_CELLS_SIZE_DESC), - rsWrap.getFlushedCellsSize()) - .addCounter(Interns.info(COMPACTED_CELLS_SIZE, COMPACTED_CELLS_SIZE_DESC), - rsWrap.getCompactedCellsSize()) - .addCounter(Interns.info(MAJOR_COMPACTED_CELLS_SIZE, MAJOR_COMPACTED_CELLS_SIZE_DESC), - rsWrap.getMajorCompactedCellsSize()) - .addCounter(Interns.info(CELLS_COUNT_COMPACTED_FROM_MOB, - CELLS_COUNT_COMPACTED_FROM_MOB_DESC), rsWrap.getCellsCountCompactedFromMob()) - .addCounter(Interns.info(CELLS_COUNT_COMPACTED_TO_MOB, - CELLS_COUNT_COMPACTED_TO_MOB_DESC), rsWrap.getCellsCountCompactedToMob()) - .addCounter(Interns.info(CELLS_SIZE_COMPACTED_FROM_MOB, - CELLS_SIZE_COMPACTED_FROM_MOB_DESC), rsWrap.getCellsSizeCompactedFromMob()) - .addCounter(Interns.info(CELLS_SIZE_COMPACTED_TO_MOB, - CELLS_SIZE_COMPACTED_TO_MOB_DESC), rsWrap.getCellsSizeCompactedToMob()) - .addCounter(Interns.info(MOB_FLUSH_COUNT, MOB_FLUSH_COUNT_DESC), - rsWrap.getMobFlushCount()) - .addCounter(Interns.info(MOB_FLUSHED_CELLS_COUNT, MOB_FLUSHED_CELLS_COUNT_DESC), - rsWrap.getMobFlushedCellsCount()) - .addCounter(Interns.info(MOB_FLUSHED_CELLS_SIZE, MOB_FLUSHED_CELLS_SIZE_DESC), - rsWrap.getMobFlushedCellsSize()) - .addCounter(Interns.info(MOB_SCAN_CELLS_COUNT, MOB_SCAN_CELLS_COUNT_DESC), - rsWrap.getMobScanCellsCount()) - .addCounter(Interns.info(MOB_SCAN_CELLS_SIZE, MOB_SCAN_CELLS_SIZE_DESC), - rsWrap.getMobScanCellsSize()) - .addCounter(Interns.info(MOB_FILE_CACHE_ACCESS_COUNT, - MOB_FILE_CACHE_ACCESS_COUNT_DESC), rsWrap.getMobFileCacheAccessCount()) - .addCounter(Interns.info(MOB_FILE_CACHE_MISS_COUNT, MOB_FILE_CACHE_MISS_COUNT_DESC), - rsWrap.getMobFileCacheMissCount()) - .addCounter(Interns.info(MOB_FILE_CACHE_EVICTED_COUNT, - MOB_FILE_CACHE_EVICTED_COUNT_DESC), rsWrap.getMobFileCacheEvictedCount()) - .addCounter(Interns.info(HEDGED_READS, HEDGED_READS_DESC), rsWrap.getHedgedReadOps()) - .addCounter(Interns.info(HEDGED_READ_WINS, HEDGED_READ_WINS_DESC), - rsWrap.getHedgedReadWins()) - .addCounter(Interns.info(HEDGED_READ_IN_CUR_THREAD, HEDGED_READ_IN_CUR_THREAD_DESC), - rsWrap.getHedgedReadOpsInCurThread()) - .addCounter(Interns.info(BLOCKED_REQUESTS_COUNT, BLOCKED_REQUESTS_COUNT_DESC), - rsWrap.getBlockedRequestsCount()) - .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), - rsWrap.getZookeeperQuorum()) - .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()) - .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId()); + .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), + rsWrap.getTotalRequestCount()) + .addCounter( + Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, TOTAL_ROW_ACTION_REQUEST_COUNT_DESC), + rsWrap.getTotalRowActionRequestCount()) + .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), + rsWrap.getReadRequestsCount()) + .addCounter(Interns.info(CP_REQUEST_COUNT, CP_REQUEST_COUNT_DESC), + rsWrap.getCpRequestsCount()) + .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, FILTERED_READ_REQUEST_COUNT_DESC), + rsWrap.getFilteredReadRequestsCount()) + .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), + rsWrap.getWriteRequestsCount()) + .addCounter(Interns.info(RPC_GET_REQUEST_COUNT, RPC_GET_REQUEST_COUNT_DESC), + rsWrap.getRpcGetRequestsCount()) + .addCounter(Interns.info(RPC_FULL_SCAN_REQUEST_COUNT, RPC_FULL_SCAN_REQUEST_COUNT_DESC), + rsWrap.getRpcFullScanRequestsCount()) + .addCounter(Interns.info(RPC_SCAN_REQUEST_COUNT, RPC_SCAN_REQUEST_COUNT_DESC), + rsWrap.getRpcScanRequestsCount()) + .addCounter(Interns.info(RPC_MULTI_REQUEST_COUNT, RPC_MULTI_REQUEST_COUNT_DESC), + rsWrap.getRpcMultiRequestsCount()) + .addCounter(Interns.info(RPC_MUTATE_REQUEST_COUNT, RPC_MUTATE_REQUEST_COUNT_DESC), + rsWrap.getRpcMutateRequestsCount()) + .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), + rsWrap.getCheckAndMutateChecksFailed()) + .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), + rsWrap.getCheckAndMutateChecksPassed()) + .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), + rsWrap.getBlockCacheHitCount()) + .addCounter( + Interns.info(BLOCK_CACHE_PRIMARY_HIT_COUNT, BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC), + rsWrap.getBlockCachePrimaryHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC), + rsWrap.getBlockCacheMissCount()) + .addCounter( + Interns.info(BLOCK_CACHE_PRIMARY_MISS_COUNT, BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC), + rsWrap.getBlockCachePrimaryMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC), + rsWrap.getBlockCacheEvictedCount()) + .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_EVICTION_COUNT, + BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC), rsWrap.getBlockCachePrimaryEvictedCount()) + .addCounter(Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT, + BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC), rsWrap.getBlockCacheFailedInsertions()) + .addCounter(Interns.info(BLOCK_CACHE_DATA_MISS_COUNT, ""), rsWrap.getDataMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_MISS_COUNT, ""), + rsWrap.getLeafIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT, ""), + rsWrap.getBloomChunkMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_META_MISS_COUNT, ""), rsWrap.getMetaMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_MISS_COUNT, ""), + rsWrap.getRootIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT, ""), + rsWrap.getIntermediateIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_MISS_COUNT, ""), + rsWrap.getFileInfoMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT, ""), + rsWrap.getGeneralBloomMetaMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT, ""), + rsWrap.getDeleteFamilyBloomMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_TRAILER_MISS_COUNT, ""), + rsWrap.getTrailerMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_DATA_HIT_COUNT, ""), rsWrap.getDataHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_HIT_COUNT, ""), + rsWrap.getLeafIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT, ""), + rsWrap.getBloomChunkHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_META_HIT_COUNT, ""), rsWrap.getMetaHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_HIT_COUNT, ""), + rsWrap.getRootIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, ""), + rsWrap.getIntermediateIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_HIT_COUNT, ""), + rsWrap.getFileInfoHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT, ""), + rsWrap.getGeneralBloomMetaHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT, ""), + rsWrap.getDeleteFamilyBloomHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_TRAILER_HIT_COUNT, ""), rsWrap.getTrailerHitCount()) + .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), + rsWrap.getUpdatesBlockedTime()) + .addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), + rsWrap.getFlushedCellsCount()) + .addCounter(Interns.info(COMPACTED_CELLS, COMPACTED_CELLS_DESC), + rsWrap.getCompactedCellsCount()) + .addCounter(Interns.info(MAJOR_COMPACTED_CELLS, MAJOR_COMPACTED_CELLS_DESC), + rsWrap.getMajorCompactedCellsCount()) + .addCounter(Interns.info(FLUSHED_CELLS_SIZE, FLUSHED_CELLS_SIZE_DESC), + rsWrap.getFlushedCellsSize()) + .addCounter(Interns.info(COMPACTED_CELLS_SIZE, COMPACTED_CELLS_SIZE_DESC), + rsWrap.getCompactedCellsSize()) + .addCounter(Interns.info(MAJOR_COMPACTED_CELLS_SIZE, MAJOR_COMPACTED_CELLS_SIZE_DESC), + rsWrap.getMajorCompactedCellsSize()) + .addCounter( + Interns.info(CELLS_COUNT_COMPACTED_FROM_MOB, CELLS_COUNT_COMPACTED_FROM_MOB_DESC), + rsWrap.getCellsCountCompactedFromMob()) + .addCounter(Interns.info(CELLS_COUNT_COMPACTED_TO_MOB, CELLS_COUNT_COMPACTED_TO_MOB_DESC), + rsWrap.getCellsCountCompactedToMob()) + .addCounter( + Interns.info(CELLS_SIZE_COMPACTED_FROM_MOB, CELLS_SIZE_COMPACTED_FROM_MOB_DESC), + rsWrap.getCellsSizeCompactedFromMob()) + .addCounter(Interns.info(CELLS_SIZE_COMPACTED_TO_MOB, CELLS_SIZE_COMPACTED_TO_MOB_DESC), + rsWrap.getCellsSizeCompactedToMob()) + .addCounter(Interns.info(MOB_FLUSH_COUNT, MOB_FLUSH_COUNT_DESC), + rsWrap.getMobFlushCount()) + .addCounter(Interns.info(MOB_FLUSHED_CELLS_COUNT, MOB_FLUSHED_CELLS_COUNT_DESC), + rsWrap.getMobFlushedCellsCount()) + .addCounter(Interns.info(MOB_FLUSHED_CELLS_SIZE, MOB_FLUSHED_CELLS_SIZE_DESC), + rsWrap.getMobFlushedCellsSize()) + .addCounter(Interns.info(MOB_SCAN_CELLS_COUNT, MOB_SCAN_CELLS_COUNT_DESC), + rsWrap.getMobScanCellsCount()) + .addCounter(Interns.info(MOB_SCAN_CELLS_SIZE, MOB_SCAN_CELLS_SIZE_DESC), + rsWrap.getMobScanCellsSize()) + .addCounter(Interns.info(MOB_FILE_CACHE_ACCESS_COUNT, MOB_FILE_CACHE_ACCESS_COUNT_DESC), + rsWrap.getMobFileCacheAccessCount()) + .addCounter(Interns.info(MOB_FILE_CACHE_MISS_COUNT, MOB_FILE_CACHE_MISS_COUNT_DESC), + rsWrap.getMobFileCacheMissCount()) + .addCounter(Interns.info(MOB_FILE_CACHE_EVICTED_COUNT, MOB_FILE_CACHE_EVICTED_COUNT_DESC), + rsWrap.getMobFileCacheEvictedCount()) + .addCounter(Interns.info(HEDGED_READS, HEDGED_READS_DESC), rsWrap.getHedgedReadOps()) + .addCounter(Interns.info(HEDGED_READ_WINS, HEDGED_READ_WINS_DESC), + rsWrap.getHedgedReadWins()) + .addCounter(Interns.info(HEDGED_READ_IN_CUR_THREAD, HEDGED_READ_IN_CUR_THREAD_DESC), + rsWrap.getHedgedReadOpsInCurThread()) + .addCounter(Interns.info(BLOCKED_REQUESTS_COUNT, BLOCKED_REQUESTS_COUNT_DESC), + rsWrap.getBlockedRequestsCount()) + .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), + rsWrap.getZookeeperQuorum()) + .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()) + .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId()); } metricsRegistry.snapshot(mrb, all); @@ -490,114 +483,113 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { private MetricsRecordBuilder addGaugesToMetricsRecordBuilder(MetricsRecordBuilder mrb) { return mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions()) - .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores()) - .addGauge(Interns.info(WALFILE_COUNT, WALFILE_COUNT_DESC), rsWrap.getNumWALFiles()) - .addGauge(Interns.info(WALFILE_SIZE, WALFILE_SIZE_DESC), rsWrap.getWALFileSize()) - .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), - rsWrap.getNumStoreFiles()) - .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemStoreSize()) - .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()) - .addGauge(Interns.info(STOREFILE_SIZE_GROWTH_RATE, STOREFILE_SIZE_GROWTH_RATE_DESC), - rsWrap.getStoreFileSizeGrowthRate()) - .addGauge(Interns.info(MAX_STORE_FILE_AGE, MAX_STORE_FILE_AGE_DESC), - rsWrap.getMaxStoreFileAge()) - .addGauge(Interns.info(MIN_STORE_FILE_AGE, MIN_STORE_FILE_AGE_DESC), - rsWrap.getMinStoreFileAge()) - .addGauge(Interns.info(AVG_STORE_FILE_AGE, AVG_STORE_FILE_AGE_DESC), - rsWrap.getAvgStoreFileAge()) - .addGauge(Interns.info(NUM_REFERENCE_FILES, NUM_REFERENCE_FILES_DESC), - rsWrap.getNumReferenceFiles()) - .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC), rsWrap.getStartCode()) - .addGauge(Interns.info(AVERAGE_REGION_SIZE, AVERAGE_REGION_SIZE_DESC), - rsWrap.getAverageRegionSize()) - .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC), - rsWrap.getStoreFileIndexSize()) - .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC), - rsWrap.getTotalStaticIndexSize()) - .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC), - rsWrap.getTotalStaticBloomSize()) - .addGauge(Interns.info(NUMBER_OF_MUTATIONS_WITHOUT_WAL, - NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC), rsWrap.getNumMutationsWithoutWAL()) - .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC), - rsWrap.getDataInMemoryWithoutWAL()) - .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), - rsWrap.getPercentFileLocal()) - .addGauge(Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, - PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), - rsWrap.getPercentFileLocalSecondaryRegions()) - .addGauge(Interns.info(TOTAL_BYTES_READ, - TOTAL_BYTES_READ_DESC), - rsWrap.getTotalBytesRead()) - .addGauge(Interns.info(LOCAL_BYTES_READ, - LOCAL_BYTES_READ_DESC), - rsWrap.getLocalBytesRead()) - .addGauge(Interns.info(SHORTCIRCUIT_BYTES_READ, - SHORTCIRCUIT_BYTES_READ_DESC), - rsWrap.getShortCircuitBytesRead()) - .addGauge(Interns.info(ZEROCOPY_BYTES_READ, - ZEROCOPY_BYTES_READ_DESC), - rsWrap.getZeroCopyBytesRead()) - .addGauge(Interns.info(SPLIT_QUEUE_LENGTH, SPLIT_QUEUE_LENGTH_DESC), - rsWrap.getSplitQueueSize()) - .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), - rsWrap.getCompactionQueueSize()) - .addGauge(Interns.info(SMALL_COMPACTION_QUEUE_LENGTH, - SMALL_COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getSmallCompactionQueueSize()) - .addGauge(Interns.info(LARGE_COMPACTION_QUEUE_LENGTH, - LARGE_COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getLargeCompactionQueueSize()) - .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), - rsWrap.getFlushQueueSize()) - .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), - rsWrap.getBlockCacheFreeSize()) - .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), - rsWrap.getBlockCacheCount()) - .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC), - rsWrap.getBlockCacheSize()) - .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC), - rsWrap.getBlockCacheHitPercent()) - .addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, - BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent()) - .addGauge(Interns.info(L1_CACHE_HIT_COUNT, L1_CACHE_HIT_COUNT_DESC), - rsWrap.getL1CacheHitCount()) - .addGauge(Interns.info(L1_CACHE_MISS_COUNT, L1_CACHE_MISS_COUNT_DESC), - rsWrap.getL1CacheMissCount()) - .addGauge(Interns.info(L1_CACHE_HIT_RATIO, L1_CACHE_HIT_RATIO_DESC), - rsWrap.getL1CacheHitRatio()) - .addGauge(Interns.info(L1_CACHE_MISS_RATIO, L1_CACHE_MISS_RATIO_DESC), - rsWrap.getL1CacheMissRatio()) - .addGauge(Interns.info(L2_CACHE_HIT_COUNT, L2_CACHE_HIT_COUNT_DESC), - rsWrap.getL2CacheHitCount()) - .addGauge(Interns.info(L2_CACHE_MISS_COUNT, L2_CACHE_MISS_COUNT_DESC), - rsWrap.getL2CacheMissCount()) - .addGauge(Interns.info(L2_CACHE_HIT_RATIO, L2_CACHE_HIT_RATIO_DESC), - rsWrap.getL2CacheHitRatio()) - .addGauge(Interns.info(L2_CACHE_MISS_RATIO, L2_CACHE_MISS_RATIO_DESC), - rsWrap.getL2CacheMissRatio()) - .addGauge(Interns.info(MOB_FILE_CACHE_COUNT, MOB_FILE_CACHE_COUNT_DESC), - rsWrap.getMobFileCacheCount()) - .addGauge(Interns.info(MOB_FILE_CACHE_HIT_PERCENT, MOB_FILE_CACHE_HIT_PERCENT_DESC), - rsWrap.getMobFileCacheHitPercent()) - .addGauge(Interns.info(READ_REQUEST_RATE_PER_SECOND, READ_REQUEST_RATE_DESC), - rsWrap.getReadRequestsRatePerSecond()) - .addGauge(Interns.info(WRITE_REQUEST_RATE_PER_SECOND, WRITE_REQUEST_RATE_DESC), - rsWrap.getWriteRequestsRatePerSecond()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES, - BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES_DESC), - rsWrap.getByteBuffAllocatorHeapAllocationBytes()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES, - BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES_DESC), - rsWrap.getByteBuffAllocatorPoolAllocationBytes()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO, - BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO_DESC), - rsWrap.getByteBuffAllocatorHeapAllocRatio()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT, - BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC), - rsWrap.getByteBuffAllocatorTotalBufferCount()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT, - BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT_DESC), - rsWrap.getByteBuffAllocatorUsedBufferCount()) - .addGauge(Interns.info(ACTIVE_SCANNERS, ACTIVE_SCANNERS_DESC), - rsWrap.getActiveScanners()); + .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores()) + .addGauge(Interns.info(WALFILE_COUNT, WALFILE_COUNT_DESC), rsWrap.getNumWALFiles()) + .addGauge(Interns.info(WALFILE_SIZE, WALFILE_SIZE_DESC), rsWrap.getWALFileSize()) + .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles()) + .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemStoreSize()) + .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()) + .addGauge(Interns.info(STOREFILE_SIZE_GROWTH_RATE, STOREFILE_SIZE_GROWTH_RATE_DESC), + rsWrap.getStoreFileSizeGrowthRate()) + .addGauge(Interns.info(MAX_STORE_FILE_AGE, MAX_STORE_FILE_AGE_DESC), + rsWrap.getMaxStoreFileAge()) + .addGauge(Interns.info(MIN_STORE_FILE_AGE, MIN_STORE_FILE_AGE_DESC), + rsWrap.getMinStoreFileAge()) + .addGauge(Interns.info(AVG_STORE_FILE_AGE, AVG_STORE_FILE_AGE_DESC), + rsWrap.getAvgStoreFileAge()) + .addGauge(Interns.info(NUM_REFERENCE_FILES, NUM_REFERENCE_FILES_DESC), + rsWrap.getNumReferenceFiles()) + .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC), rsWrap.getStartCode()) + .addGauge(Interns.info(AVERAGE_REGION_SIZE, AVERAGE_REGION_SIZE_DESC), + rsWrap.getAverageRegionSize()) + .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC), + rsWrap.getStoreFileIndexSize()) + .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC), + rsWrap.getTotalStaticIndexSize()) + .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC), + rsWrap.getTotalStaticBloomSize()) + .addGauge( + Interns.info(NUMBER_OF_MUTATIONS_WITHOUT_WAL, NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC), + rsWrap.getNumMutationsWithoutWAL()) + .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC), + rsWrap.getDataInMemoryWithoutWAL()) + .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), + rsWrap.getPercentFileLocal()) + .addGauge( + Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, + PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), + rsWrap.getPercentFileLocalSecondaryRegions()) + .addGauge(Interns.info(TOTAL_BYTES_READ, TOTAL_BYTES_READ_DESC), rsWrap.getTotalBytesRead()) + .addGauge(Interns.info(LOCAL_BYTES_READ, LOCAL_BYTES_READ_DESC), rsWrap.getLocalBytesRead()) + .addGauge(Interns.info(SHORTCIRCUIT_BYTES_READ, SHORTCIRCUIT_BYTES_READ_DESC), + rsWrap.getShortCircuitBytesRead()) + .addGauge(Interns.info(ZEROCOPY_BYTES_READ, ZEROCOPY_BYTES_READ_DESC), + rsWrap.getZeroCopyBytesRead()) + .addGauge(Interns.info(SPLIT_QUEUE_LENGTH, SPLIT_QUEUE_LENGTH_DESC), + rsWrap.getSplitQueueSize()) + .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getCompactionQueueSize()) + .addGauge(Interns.info(SMALL_COMPACTION_QUEUE_LENGTH, SMALL_COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getSmallCompactionQueueSize()) + .addGauge(Interns.info(LARGE_COMPACTION_QUEUE_LENGTH, LARGE_COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getLargeCompactionQueueSize()) + .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), + rsWrap.getFlushQueueSize()) + .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), + rsWrap.getBlockCacheFreeSize()) + .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), + rsWrap.getBlockCacheCount()) + .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC), rsWrap.getBlockCacheSize()) + .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC), + rsWrap.getBlockCacheHitPercent()) + .addGauge( + Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), + rsWrap.getBlockCacheHitCachingPercent()) + .addGauge(Interns.info(L1_CACHE_HIT_COUNT, L1_CACHE_HIT_COUNT_DESC), + rsWrap.getL1CacheHitCount()) + .addGauge(Interns.info(L1_CACHE_MISS_COUNT, L1_CACHE_MISS_COUNT_DESC), + rsWrap.getL1CacheMissCount()) + .addGauge(Interns.info(L1_CACHE_HIT_RATIO, L1_CACHE_HIT_RATIO_DESC), + rsWrap.getL1CacheHitRatio()) + .addGauge(Interns.info(L1_CACHE_MISS_RATIO, L1_CACHE_MISS_RATIO_DESC), + rsWrap.getL1CacheMissRatio()) + .addGauge(Interns.info(L2_CACHE_HIT_COUNT, L2_CACHE_HIT_COUNT_DESC), + rsWrap.getL2CacheHitCount()) + .addGauge(Interns.info(L2_CACHE_MISS_COUNT, L2_CACHE_MISS_COUNT_DESC), + rsWrap.getL2CacheMissCount()) + .addGauge(Interns.info(L2_CACHE_HIT_RATIO, L2_CACHE_HIT_RATIO_DESC), + rsWrap.getL2CacheHitRatio()) + .addGauge(Interns.info(L2_CACHE_MISS_RATIO, L2_CACHE_MISS_RATIO_DESC), + rsWrap.getL2CacheMissRatio()) + .addGauge(Interns.info(MOB_FILE_CACHE_COUNT, MOB_FILE_CACHE_COUNT_DESC), + rsWrap.getMobFileCacheCount()) + .addGauge(Interns.info(MOB_FILE_CACHE_HIT_PERCENT, MOB_FILE_CACHE_HIT_PERCENT_DESC), + rsWrap.getMobFileCacheHitPercent()) + .addGauge(Interns.info(READ_REQUEST_RATE_PER_SECOND, READ_REQUEST_RATE_DESC), + rsWrap.getReadRequestsRatePerSecond()) + .addGauge(Interns.info(WRITE_REQUEST_RATE_PER_SECOND, WRITE_REQUEST_RATE_DESC), + rsWrap.getWriteRequestsRatePerSecond()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES, + BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES_DESC), + rsWrap.getByteBuffAllocatorHeapAllocationBytes()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES, + BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES_DESC), + rsWrap.getByteBuffAllocatorPoolAllocationBytes()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO, + BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO_DESC), + rsWrap.getByteBuffAllocatorHeapAllocRatio()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT, + BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC), + rsWrap.getByteBuffAllocatorTotalBufferCount()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT, + BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT_DESC), + rsWrap.getByteBuffAllocatorUsedBufferCount()) + .addGauge(Interns.info(ACTIVE_SCANNERS, ACTIVE_SCANNERS_DESC), rsWrap.getActiveScanners()); } @Override diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index b424cdb21dbb..f2ad7b48cc86 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.List; @@ -35,28 +34,24 @@ public interface MetricsRegionServerWrapper { /** * Get the Cluster ID - * * @return Cluster ID */ String getClusterId(); /** * Get the ZooKeeper Quorum Info - * * @return ZooKeeper Quorum Info */ String getZookeeperQuorum(); /** * Get the co-processors - * * @return Co-processors */ String getCoprocessors(); /** * Get HRegionServer start time - * * @return Start time of RegionServer in milliseconds */ long getStartCode(); @@ -91,9 +86,9 @@ public interface MetricsRegionServerWrapper { */ long getNumWALSlowAppend(); - /** - * Get the number of store files hosted on this region server. - */ + /** + * Get the number of store files hosted on this region server. + */ long getNumStoreFiles(); /** @@ -122,12 +117,12 @@ public interface MetricsRegionServerWrapper { long getMinStoreFileAge(); /** - * @return Average age of store files hosted on this region server + * @return Average age of store files hosted on this region server */ long getAvgStoreFileAge(); /** - * @return Number of reference files on this region server + * @return Number of reference files on this region server */ long getNumReferenceFiles(); @@ -202,8 +197,8 @@ public interface MetricsRegionServerWrapper { long getNumMutationsWithoutWAL(); /** - * Ammount of data in the memstore but not in the WAL because mutations explicitly had their - * WAL turned off. + * Ammount of data in the memstore but not in the WAL because mutations explicitly had their WAL + * turned off. */ long getDataInMemoryWithoutWAL(); @@ -237,6 +232,7 @@ public interface MetricsRegionServerWrapper { int getFlushQueueSize(); long getMemStoreLimit(); + /** * Get the size (in bytes) of the block cache that is free. */ @@ -282,7 +278,6 @@ public interface MetricsRegionServerWrapper { */ long getBlockCachePrimaryEvictedCount(); - /** * Get the percent of all requests that hit the block cache. */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java index b3a556e3d9f2..386cb66ac866 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -45,7 +44,7 @@ public interface MetricsRegionSource extends Comparable { String MAX_COMPACTION_QUEUE_DESC = "Max number of compactions queued for this region"; String FLUSHES_QUEUED_DESC = "Number flushes requested/queued for this region"; String MAX_FLUSH_QUEUE_DESC = "Max number of flushes queued for this region"; - String NUM_BYTES_COMPACTED_DESC = + String NUM_BYTES_COMPACTED_DESC = "Sum of filesize on all files entering a finished, successful or aborted, compaction"; String NUM_FILES_COMPACTED_DESC = "Number of files that were input for finished, successful or aborted, compactions"; @@ -81,7 +80,7 @@ public interface MetricsRegionSource extends Comparable { /** * Update time used of resultScanner.next(). - * */ + */ void updateScanTime(long mills); /** @@ -99,5 +98,4 @@ public interface MetricsRegionSource extends Comparable { */ MetricsRegionAggregateSource getAggregateSource(); - } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index 2f7f8074c9df..b0582054e515 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; @@ -72,14 +70,14 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { private final int hashCode; public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper, - MetricsRegionAggregateSourceImpl aggregate) { + MetricsRegionAggregateSourceImpl aggregate) { this.regionWrapper = regionWrapper; agg = aggregate; hashCode = regionWrapper.getRegionHashCode(); agg.register(this); - LOG.debug("Creating new MetricsRegionSourceImpl for table " + - regionWrapper.getTableName() + " " + regionWrapper.getRegionName()); + LOG.debug("Creating new MetricsRegionSourceImpl for table " + regionWrapper.getTableName() + " " + + regionWrapper.getRegionName()); registry = agg.getMetricsRegistry(); @@ -204,110 +202,86 @@ void snapshot(MetricsRecordBuilder mrb, boolean ignored) { return; } + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT, + MetricsRegionServerSource.STORE_COUNT_DESC), this.regionWrapper.getNumStores()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, + MetricsRegionServerSource.STOREFILE_COUNT_DESC), this.regionWrapper.getNumStoreFiles()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_REF_COUNT, + MetricsRegionServerSource.STORE_REF_COUNT), this.regionWrapper.getStoreRefCount()); + mrb.addGauge( + Interns.info( + regionNamePrefix + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT, + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT), + this.regionWrapper.getMaxCompactedStoreFileRefCount()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, + MetricsRegionServerSource.MEMSTORE_SIZE_DESC), this.regionWrapper.getMemStoreSize()); mrb.addGauge( - Interns.info( - regionNamePrefix + MetricsRegionServerSource.STORE_COUNT, - MetricsRegionServerSource.STORE_COUNT_DESC), - this.regionWrapper.getNumStores()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, - MetricsRegionServerSource.STOREFILE_COUNT_DESC), - this.regionWrapper.getNumStoreFiles()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STORE_REF_COUNT, - MetricsRegionServerSource.STORE_REF_COUNT), - this.regionWrapper.getStoreRefCount()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT, - MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT), - this.regionWrapper.getMaxCompactedStoreFileRefCount() - ); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, - MetricsRegionServerSource.MEMSTORE_SIZE_DESC), - this.regionWrapper.getMemStoreSize()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, - MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), + Interns.info(regionNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, + MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), this.regionWrapper.getMaxStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, - MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, + MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), this.regionWrapper.getMinStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, - MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, + MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), this.regionWrapper.getAvgStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, - MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, + MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), this.regionWrapper.getNumReferenceFiles()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, - MetricsRegionServerSource.STOREFILE_SIZE_DESC), - this.regionWrapper.getStoreFileSize()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT, - MetricsRegionSource.COMPACTIONS_COMPLETED_DESC), - this.regionWrapper.getNumCompactionsCompleted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_FAILED_COUNT, + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, + MetricsRegionServerSource.STOREFILE_SIZE_DESC), this.regionWrapper.getStoreFileSize()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT, + MetricsRegionSource.COMPACTIONS_COMPLETED_DESC), + this.regionWrapper.getNumCompactionsCompleted()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_FAILED_COUNT, MetricsRegionSource.COMPACTIONS_FAILED_DESC), - this.regionWrapper.getNumCompactionsFailed()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.LAST_MAJOR_COMPACTION_AGE, - MetricsRegionSource.LAST_MAJOR_COMPACTION_DESC), - this.regionWrapper.getLastMajorCompactionAge()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.NUM_BYTES_COMPACTED_COUNT, - MetricsRegionSource.NUM_BYTES_COMPACTED_DESC), - this.regionWrapper.getNumBytesCompacted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.NUM_FILES_COMPACTED_COUNT, - MetricsRegionSource.NUM_FILES_COMPACTED_DESC), - this.regionWrapper.getNumFilesCompacted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, - MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), - this.regionWrapper.getReadRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.CP_REQUEST_COUNT, - MetricsRegionServerSource.CP_REQUEST_COUNT_DESC), - this.regionWrapper.getCpRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, - MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), - this.regionWrapper.getFilteredReadRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, - MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), - this.regionWrapper.getWriteRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.REPLICA_ID, - MetricsRegionSource.REPLICA_ID_DESC), - this.regionWrapper.getReplicaId()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_QUEUED_COUNT, - MetricsRegionSource.COMPACTIONS_QUEUED_DESC), - this.regionWrapper.getNumCompactionsQueued()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.FLUSHES_QUEUED_COUNT, - MetricsRegionSource.FLUSHES_QUEUED_DESC), - this.regionWrapper.getNumFlushesQueued()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.MAX_COMPACTION_QUEUE_SIZE, - MetricsRegionSource.MAX_COMPACTION_QUEUE_DESC), - this.regionWrapper.getMaxCompactionQueueSize()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.MAX_FLUSH_QUEUE_SIZE, - MetricsRegionSource.MAX_FLUSH_QUEUE_DESC), - this.regionWrapper.getMaxFlushQueueSize()); + this.regionWrapper.getNumCompactionsFailed()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.LAST_MAJOR_COMPACTION_AGE, + MetricsRegionSource.LAST_MAJOR_COMPACTION_DESC), + this.regionWrapper.getLastMajorCompactionAge()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.NUM_BYTES_COMPACTED_COUNT, + MetricsRegionSource.NUM_BYTES_COMPACTED_DESC), this.regionWrapper.getNumBytesCompacted()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.NUM_FILES_COMPACTED_COUNT, + MetricsRegionSource.NUM_FILES_COMPACTED_DESC), this.regionWrapper.getNumFilesCompacted()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, + MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), + this.regionWrapper.getReadRequestCount()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionServerSource.CP_REQUEST_COUNT, + MetricsRegionServerSource.CP_REQUEST_COUNT_DESC), this.regionWrapper.getCpRequestCount()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), + this.regionWrapper.getFilteredReadRequestCount()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, + MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), + this.regionWrapper.getWriteRequestCount()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.REPLICA_ID, + MetricsRegionSource.REPLICA_ID_DESC), this.regionWrapper.getReplicaId()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_QUEUED_COUNT, + MetricsRegionSource.COMPACTIONS_QUEUED_DESC), + this.regionWrapper.getNumCompactionsQueued()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.FLUSHES_QUEUED_COUNT, + MetricsRegionSource.FLUSHES_QUEUED_DESC), this.regionWrapper.getNumFlushesQueued()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.MAX_COMPACTION_QUEUE_SIZE, + MetricsRegionSource.MAX_COMPACTION_QUEUE_DESC), + this.regionWrapper.getMaxCompactionQueueSize()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.MAX_FLUSH_QUEUE_SIZE, + MetricsRegionSource.MAX_FLUSH_QUEUE_DESC), this.regionWrapper.getMaxFlushQueueSize()); addCounter(mrb, this.regionWrapper.getMemstoreOnlyRowReadsCount(), MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE, MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE_DESC); addCounter(mrb, this.regionWrapper.getMixedRowReadsCount(), - MetricsRegionSource.MIXED_ROW_READS, - MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); + MetricsRegionSource.MIXED_ROW_READS, MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); } } @@ -330,7 +304,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - return obj == this || - (obj instanceof MetricsRegionSourceImpl && compareTo((MetricsRegionSourceImpl) obj) == 0); + return obj == this || (obj instanceof MetricsRegionSourceImpl + && compareTo((MetricsRegionSourceImpl) obj) == 0); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java index 6bf010ce91b4..5714a0542776 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,36 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Interface of class that will wrap an HRegion and export numbers so they can be - * used in MetricsRegionSource + * Interface of class that will wrap an HRegion and export numbers so they can be used in + * MetricsRegionSource */ @InterfaceAudience.Private public interface MetricsRegionWrapper { /** * Get the name of the table the region belongs to. - * * @return The string version of the table name. */ String getTableName(); /** * Get the name of the namespace this table is in. - * @return String version of the namespace. Can't be empty. + * @return String version of the namespace. Can't be empty. */ String getNamespace(); /** * Get the name of the region. - * * @return The encoded name of the region. */ String getRegionName(); @@ -95,12 +91,12 @@ public interface MetricsRegionWrapper { long getMinStoreFileAge(); /** - * @return Average age of store files under this region + * @return Average age of store files under this region */ long getAvgStoreFileAge(); /** - * @return Number of reference files under this region + * @return Number of reference files under this region */ long getNumReferenceFiles(); @@ -118,14 +114,14 @@ public interface MetricsRegionWrapper { long getNumCompactionsCompleted(); /** - * @return Age of the last major compaction + * @return Age of the last major compaction */ long getLastMajorCompactionAge(); /** - * Returns the total number of compactions that have been reported as failed on this region. - * Note that a given compaction can be reported as both completed and failed if an exception - * is thrown in the processing after {@code HRegion.compact()}. + * Returns the total number of compactions that have been reported as failed on this region. Note + * that a given compaction can be reported as both completed and failed if an exception is thrown + * in the processing after {@code HRegion.compact()}. */ long getNumCompactionsFailed(); @@ -143,14 +139,12 @@ public interface MetricsRegionWrapper { /** * Note that this metric is updated periodically and hence might miss some data points. - * * @return the max number of compactions queued for this region */ long getMaxCompactionQueueSize(); /** * Note that this metric is updated periodically and hence might miss some data points. - * * @return the max number of flushes queued for this region */ long getMaxFlushQueueSize(); @@ -168,8 +162,8 @@ public interface MetricsRegionWrapper { long getStoreRefCount(); /** - * @return the max number of references active on any store file among - * all compacted store files that belong to this region + * @return the max number of references active on any store file among all compacted store files + * that belong to this region */ long getMaxCompactedStoreFileRefCount(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java index f746c98c5458..e11f1864f484 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface will be implemented by a MetricsSource that will export metrics from - * multiple regions of a table into the hadoop metrics system. + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * regions of a table into the hadoop metrics system. */ @InterfaceAudience.Private public interface MetricsTableAggregateSource extends BaseSource { @@ -59,7 +58,6 @@ public interface MetricsTableAggregateSource extends BaseSource { /** * Remove a table's source. This is called when regions of a table are closed. - * * @param table The table name */ void deleteTableSource(String table); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java index 0b13e5c8dfed..06a5bc82284f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Interns; @@ -31,7 +29,7 @@ @InterfaceAudience.Private public class MetricsTableAggregateSourceImpl extends BaseSourceImpl - implements MetricsTableAggregateSource { + implements MetricsTableAggregateSource { private static final Logger LOG = LoggerFactory.getLogger(MetricsTableAggregateSourceImpl.class); private ConcurrentHashMap tableSources = new ConcurrentHashMap<>(); @@ -40,10 +38,8 @@ public MetricsTableAggregateSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsTableAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsTableAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -73,7 +69,7 @@ public MetricsTableSource getOrCreateTableSource(String table, return source; } MetricsTableSource newSource = CompatibilitySingletonFactory - .getInstance(MetricsRegionServerSourceFactory.class).createTable(table, wrapper); + .getInstance(MetricsRegionServerSourceFactory.class).createTable(table, wrapper); return tableSources.computeIfAbsent(table, k -> { // register the new metrics now newSource.registerMetrics(); @@ -82,12 +78,11 @@ public MetricsTableSource getOrCreateTableSource(String table, } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param collector the collector - * @param all get all the metrics regardless of when they last changed. + * @param all get all the metrics regardless of when they last changed. */ @Override public void getMetrics(MetricsCollector collector, boolean all) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java index 2aeb82b0d64d..aab2abdc4217 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -59,7 +60,6 @@ public interface MetricsTableLatencies { /** * Update the Put time histogram - * * @param tableName The table the metric is for * @param t time it took */ @@ -67,7 +67,6 @@ public interface MetricsTableLatencies { /** * Update the batch Put time histogram - * * @param tableName The table the metric is for * @param t time it took */ @@ -75,7 +74,6 @@ public interface MetricsTableLatencies { /** * Update the Delete time histogram - * * @param tableName The table the metric is for * @param t time it took */ @@ -83,7 +81,6 @@ public interface MetricsTableLatencies { /** * Update the batch Delete time histogram - * * @param tableName The table the metric is for * @param t time it took */ @@ -91,7 +88,6 @@ public interface MetricsTableLatencies { /** * Update the Get time histogram . - * * @param tableName The table the metric is for * @param t time it took */ @@ -99,7 +95,6 @@ public interface MetricsTableLatencies { /** * Update the Increment time histogram. - * * @param tableName The table the metric is for * @param t time it took */ @@ -107,7 +102,6 @@ public interface MetricsTableLatencies { /** * Update the Append time histogram. - * * @param tableName The table the metric is for * @param t time it took */ @@ -115,7 +109,6 @@ public interface MetricsTableLatencies { /** * Update the scan size. - * * @param tableName The table the metric is for * @param scanSize size of the scan */ @@ -123,7 +116,6 @@ public interface MetricsTableLatencies { /** * Update the scan time. - * * @param tableName The table the metric is for * @param t time it took */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java index dd143d4c6f5d..0db5dd510628 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,7 +33,7 @@ @InterfaceAudience.Private public class MetricsTableLatenciesImpl extends BaseSourceImpl implements MetricsTableLatencies { - private final HashMap histogramsByTable = new HashMap<>(); + private final HashMap histogramsByTable = new HashMap<>(); public static class TableHistograms { final MetricHistogram getTimeHisto; @@ -50,22 +51,19 @@ public static class TableHistograms { TableHistograms(DynamicMetricsRegistry registry, TableName tn) { getTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, GET_TIME)); - incrementTimeHisto = registry.newTimeHistogram( - qualifyMetricsName(tn, INCREMENT_TIME)); + incrementTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, INCREMENT_TIME)); appendTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, APPEND_TIME)); putTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, PUT_TIME)); putBatchTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, PUT_BATCH_TIME)); deleteTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, DELETE_TIME)); - deleteBatchTimeHisto = registry.newTimeHistogram( - qualifyMetricsName(tn, DELETE_BATCH_TIME)); + deleteBatchTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, DELETE_BATCH_TIME)); scanTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, SCAN_TIME)); scanSizeHisto = registry.newSizeHistogram(qualifyMetricsName(tn, SCAN_SIZE)); checkAndDeleteTimeHisto = - registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_DELETE_TIME)); - checkAndPutTimeHisto = - registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_PUT_TIME)); + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_DELETE_TIME)); + checkAndPutTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_PUT_TIME)); checkAndMutateTimeHisto = - registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_MUTATE_TIME)); + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_MUTATE_TIME)); } public void updatePut(long time) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java index c3b819228fe4..1847d407a010 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java index 6b1d323dc19a..f93e425baadd 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,15 +19,14 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.Meter; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.yetus.audience.InterfaceAudience; /** - * Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in - * a RegionServer. + * Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in a + * RegionServer. */ @InterfaceAudience.Private public class MetricsTableQueryMeterImpl implements MetricsTableQueryMeter { @@ -42,10 +42,10 @@ private static class TableMeters { final Meter tableWriteQueryMeter; TableMeters(MetricRegistry metricRegistry, TableName tableName) { - this.tableReadQueryMeter = metricRegistry.meter(qualifyMetricsName(tableName, - TABLE_READ_QUERY_PER_SECOND)); + this.tableReadQueryMeter = + metricRegistry.meter(qualifyMetricsName(tableName, TABLE_READ_QUERY_PER_SECOND)); this.tableWriteQueryMeter = - metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QUERY_PER_SECOND)); + metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QUERY_PER_SECOND)); } public void updateTableReadQueryMeter(long count) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java index 9fc606257e0c..bd7be1783834 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.Closeable; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java index 85f5bded98a8..146eb20de490 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,7 +64,6 @@ import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricHistogram; @@ -126,19 +125,18 @@ public class MetricsTableSourceImpl implements MetricsTableSource { private MutableFastCounter majorCompactedInputBytes; private MutableFastCounter majorCompactedOutputBytes; - public MetricsTableSourceImpl(String tblName, - MetricsTableAggregateSourceImpl aggregate, MetricsTableWrapperAggregate tblWrapperAgg) { + public MetricsTableSourceImpl(String tblName, MetricsTableAggregateSourceImpl aggregate, + MetricsTableWrapperAggregate tblWrapperAgg) { LOG.debug("Creating new MetricsTableSourceImpl for table '{}'", tblName); this.tableName = TableName.valueOf(tblName); this.agg = aggregate; this.tableWrapperAgg = tblWrapperAgg; this.registry = agg.getMetricsRegistry(); - this.tableNamePrefixPart1 = "Namespace_" + this.tableName.getNamespaceAsString() + - "_table_" + this.tableName.getQualifierAsString(); + this.tableNamePrefixPart1 = "Namespace_" + this.tableName.getNamespaceAsString() + "_table_" + + this.tableName.getQualifierAsString(); this.tableNamePrefixPart2 = "_metric_"; - this.tableNamePrefix = tableNamePrefixPart1 + - tableNamePrefixPart2; + this.tableNamePrefix = tableNamePrefixPart1 + tableNamePrefixPart2; this.hashCode = this.tableName.hashCode(); } @@ -238,6 +236,7 @@ public void close() { tableWrapperAgg = null; } } + @Override public MetricsTableAggregateSource getAggregateSource() { return agg; @@ -272,61 +271,75 @@ void snapshot(MetricsRecordBuilder mrb, boolean ignored) { } if (this.tableWrapperAgg != null) { - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.CP_REQUEST_COUNT, + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.CP_REQUEST_COUNT, MetricsRegionServerSource.CP_REQUEST_COUNT_DESC), tableWrapperAgg.getCpRequestsCount(tableName.getNameAsString())); - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), - tableWrapperAgg.getReadRequestCount(tableName.getNameAsString())); + tableWrapperAgg.getReadRequestCount(tableName.getNameAsString())); + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), + tableWrapperAgg.getFilteredReadRequestCount(tableName.getNameAsString())); mrb.addCounter( - Interns.info(tableNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, - MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), - tableWrapperAgg.getFilteredReadRequestCount(tableName.getNameAsString())); - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, + Interns.info(tableNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), - tableWrapperAgg.getWriteRequestCount(tableName.getNameAsString())); - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.TOTAL_REQUEST_COUNT, + tableWrapperAgg.getWriteRequestCount(tableName.getNameAsString())); + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.TOTAL_REQUEST_COUNT, MetricsRegionServerSource.TOTAL_REQUEST_COUNT_DESC), - tableWrapperAgg.getTotalRequestsCount(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, + tableWrapperAgg.getTotalRequestsCount(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, MetricsRegionServerSource.MEMSTORE_SIZE_DESC), - tableWrapperAgg.getMemStoreSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, + tableWrapperAgg.getMemStoreSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, MetricsRegionServerSource.STOREFILE_COUNT_DESC), - tableWrapperAgg.getNumStoreFiles(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, + tableWrapperAgg.getNumStoreFiles(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, MetricsRegionServerSource.STOREFILE_SIZE_DESC), - tableWrapperAgg.getStoreFileSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsTableSource.TABLE_SIZE, - MetricsTableSource.TABLE_SIZE_DESC), + tableWrapperAgg.getStoreFileSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsTableSource.TABLE_SIZE, + MetricsTableSource.TABLE_SIZE_DESC), tableWrapperAgg.getTableSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.AVERAGE_REGION_SIZE, + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.AVERAGE_REGION_SIZE, MetricsRegionServerSource.AVERAGE_REGION_SIZE_DESC), - tableWrapperAgg.getAvgRegionSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.REGION_COUNT, + tableWrapperAgg.getAvgRegionSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.REGION_COUNT, MetricsRegionServerSource.REGION_COUNT_DESC), - tableWrapperAgg.getNumRegions(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STORE_COUNT, + tableWrapperAgg.getNumRegions(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STORE_COUNT, MetricsRegionServerSource.STORE_COUNT_DESC), - tableWrapperAgg.getNumStores(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, + tableWrapperAgg.getNumStores(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), - tableWrapperAgg.getMaxStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, + tableWrapperAgg.getMaxStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), - tableWrapperAgg.getMinStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, + tableWrapperAgg.getMinStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), - tableWrapperAgg.getAvgStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, + tableWrapperAgg.getAvgStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), - tableWrapperAgg.getNumReferenceFiles(tableName.getNameAsString())); + tableWrapperAgg.getNumReferenceFiles(tableName.getNameAsString())); addGauge(mrb, tableWrapperAgg.getMemstoreOnlyRowReadsCount(tableName.getNameAsString()), MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE, MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE_DESC); addGauge(mrb, tableWrapperAgg.getMixedRowReadsCount(tableName.getNameAsString()), - MetricsRegionSource.MIXED_ROW_READS, - MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); + MetricsRegionSource.MIXED_ROW_READS, MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); } } } @@ -337,8 +350,8 @@ private void addGauge(MetricsRecordBuilder mrb, Map metricMap, Str for (Entry entry : metricMap.entrySet()) { // append 'store' and its name to the metric mrb.addGauge(Interns.info(this.tableNamePrefixPart1 + _COLUMNFAMILY - + entry.getKey().split(MetricsTableWrapperAggregate.HASH)[1] - + this.tableNamePrefixPart2 + metricName, + + entry.getKey().split(MetricsTableWrapperAggregate.HASH)[1] + this.tableNamePrefixPart2 + + metricName, metricDesc), entry.getValue()); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java index 40fd6d8effaf..284fb57cd231 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Interface of class that will wrap a MetricsTableSource and export numbers so they can be - * used in MetricsTableSource + * Interface of class that will wrap a MetricsTableSource and export numbers so they can be used in + * MetricsTableSource */ @InterfaceAudience.Private public interface MetricsTableWrapperAggregate { public String HASH = "#"; + /** * Get the number of read requests that have been issued against this table */ @@ -43,6 +42,7 @@ public interface MetricsTableWrapperAggregate { * Get the total number of filtered read requests that have been issued against this table */ long getFilteredReadRequestCount(String table); + /** * Get the number of write requests that have been issued for this table */ @@ -68,7 +68,6 @@ public interface MetricsTableWrapperAggregate { */ long getTableSize(String table); - /** * Get the average region size for this table */ @@ -100,12 +99,12 @@ public interface MetricsTableWrapperAggregate { long getMinStoreFileAge(String table); /** - * @return Average age of store files for this table + * @return Average age of store files for this table */ long getAvgStoreFileAge(String table); /** - * @return Number of reference files for this table + * @return Number of reference files for this table */ long getNumReferenceFiles(String table); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java index ee570f00d999..fe5b2ab47536 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** -* This interface will be implemented by a MetricsSource that will export metrics from -* multiple users into the hadoop metrics system. -*/ + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * users into the hadoop metrics system. + */ @InterfaceAudience.Private public interface MetricsUserAggregateSource extends BaseSource { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java index 28726c4ee1f1..85ace54b0826 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -32,7 +30,7 @@ @InterfaceAudience.Private public class MetricsUserAggregateSourceImpl extends BaseSourceImpl - implements MetricsUserAggregateSource { + implements MetricsUserAggregateSource { private static final Logger LOG = LoggerFactory.getLogger(MetricsUserAggregateSourceImpl.class); @@ -43,10 +41,8 @@ public MetricsUserAggregateSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsUserAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsUserAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java index 96173669bbc3..2d75c9246ba2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface MetricsUserSource extends Comparable { - //These client metrics will be reported through clusterStatus and hbtop only + // These client metrics will be reported through clusterStatus and hbtop only interface ClientMetrics { void incrementReadRequest(); @@ -66,15 +64,14 @@ interface ClientMetrics { void getMetrics(MetricsCollector metricsCollector, boolean all); /** - * Metrics collected at client level for a user(needed for reporting through clusterStatus - * and hbtop currently) + * Metrics collected at client level for a user(needed for reporting through clusterStatus and + * hbtop currently) * @return metrics per hostname */ Map getClientMetrics(); /** * Create a instance of ClientMetrics if not present otherwise return the previous one - * * @param hostName hostname of the client * @return Instance of ClientMetrics */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java index ef0eb7bf4620..871a3e42550c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; @@ -23,7 +22,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -74,32 +72,39 @@ public ClientMetricsImpl(String hostName) { this.hostName = hostName; } - @Override public void incrementReadRequest() { + @Override + public void incrementReadRequest() { readRequestsCount.increment(); } - @Override public void incrementWriteRequest() { + @Override + public void incrementWriteRequest() { writeRequestsCount.increment(); } - @Override public String getHostName() { + @Override + public String getHostName() { return hostName; } - @Override public long getReadRequestsCount() { + @Override + public long getReadRequestsCount() { return readRequestsCount.sum(); } - @Override public long getWriteRequestsCount() { + @Override + public long getWriteRequestsCount() { return writeRequestsCount.sum(); } - @Override public void incrementFilteredReadRequests() { + @Override + public void incrementFilteredReadRequests() { filteredRequestsCount.increment(); } - @Override public long getFilteredReadRequests() { + @Override + public long getFilteredReadRequests() { return filteredRequestsCount.sum(); } } @@ -191,8 +196,8 @@ public int hashCode() { @Override public boolean equals(Object obj) { - return obj == this || - (obj instanceof MetricsUserSourceImpl && compareTo((MetricsUserSourceImpl) obj) == 0); + return obj == this + || (obj instanceof MetricsUserSourceImpl && compareTo((MetricsUserSourceImpl) obj) == 0); } void snapshot(MetricsRecordBuilder mrb, boolean ignored) { @@ -252,16 +257,19 @@ public void updateScanTime(long t) { scanTimeHisto.add(t); } - @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { + @Override + public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder mrb = metricsCollector.addRecord(this.userNamePrefix); registry.snapshot(mrb, all); } - @Override public Map getClientMetrics() { + @Override + public Map getClientMetrics() { return Collections.unmodifiableMap(clientMetricsMap); } - @Override public ClientMetrics getOrCreateMetricsClient(String client) { + @Override + public ClientMetrics getOrCreateMetricsClient(String client) { ClientMetrics source = clientMetricsMap.get(client); if (source != null) { return source; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java index 4a430cdc434e..cd2e339b5452 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import org.apache.hadoop.hbase.TableName; @@ -28,7 +27,6 @@ @InterfaceAudience.Private public interface MetricsWALSource extends BaseSource { - /** * The name of the metrics */ @@ -49,7 +47,6 @@ public interface MetricsWALSource extends BaseSource { */ String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String APPEND_TIME = "appendTime"; String APPEND_TIME_DESC = "Time an append to the log took."; String APPEND_COUNT = "appendCount"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java index 4f71681113c5..501e02c7f156 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.util.concurrent.ConcurrentHashMap; @@ -27,9 +26,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Class that transitions metrics from MetricsWAL into the metrics subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern. + * Class that transitions metrics from MetricsWAL into the metrics subsystem. Implements BaseSource + * through BaseSourceImpl, following the pattern. * @see org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource */ @InterfaceAudience.Private @@ -55,13 +53,11 @@ public MetricsWALSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsWALSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsWALSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - //Create and store the metrics that will be used. + // Create and store the metrics that will be used. appendTimeHisto = this.getMetricsRegistry().newTimeHistogram(APPEND_TIME, APPEND_TIME_DESC); appendSizeHisto = this.getMetricsRegistry().newSizeHistogram(APPEND_SIZE, APPEND_SIZE_DESC); appendCount = this.getMetricsRegistry().newCounter(APPEND_COUNT, APPEND_COUNT_DESC, 0L); @@ -70,17 +66,17 @@ public MetricsWALSourceImpl(String metricsName, syncTimeHisto = this.getMetricsRegistry().newTimeHistogram(SYNC_TIME, SYNC_TIME_DESC); logRollRequested = this.getMetricsRegistry().newCounter(ROLL_REQUESTED, ROLL_REQUESTED_DESC, 0L); - errorRollRequested = this.getMetricsRegistry() - .newCounter(ERROR_ROLL_REQUESTED, ERROR_ROLL_REQUESTED_DESC, 0L); - lowReplicationRollRequested = this.getMetricsRegistry() - .newCounter(LOW_REPLICA_ROLL_REQUESTED, LOW_REPLICA_ROLL_REQUESTED_DESC, 0L); - slowSyncRollRequested = this.getMetricsRegistry() - .newCounter(SLOW_SYNC_ROLL_REQUESTED, SLOW_SYNC_ROLL_REQUESTED_DESC, 0L); - sizeRollRequested = this.getMetricsRegistry() - .newCounter(SIZE_ROLL_REQUESTED, SIZE_ROLL_REQUESTED_DESC, 0L); + errorRollRequested = + this.getMetricsRegistry().newCounter(ERROR_ROLL_REQUESTED, ERROR_ROLL_REQUESTED_DESC, 0L); + lowReplicationRollRequested = this.getMetricsRegistry().newCounter(LOW_REPLICA_ROLL_REQUESTED, + LOW_REPLICA_ROLL_REQUESTED_DESC, 0L); + slowSyncRollRequested = this.getMetricsRegistry().newCounter(SLOW_SYNC_ROLL_REQUESTED, + SLOW_SYNC_ROLL_REQUESTED_DESC, 0L); + sizeRollRequested = + this.getMetricsRegistry().newCounter(SIZE_ROLL_REQUESTED, SIZE_ROLL_REQUESTED_DESC, 0L); writtenBytes = this.getMetricsRegistry().newCounter(WRITTEN_BYTES, WRITTEN_BYTES_DESC, 0L); - successfulLogRolls = this.getMetricsRegistry() - .newCounter(SUCCESSFUL_LOG_ROLLS, SUCCESSFUL_LOG_ROLLS_DESC, 0L); + successfulLogRolls = + this.getMetricsRegistry().newCounter(SUCCESSFUL_LOG_ROLLS, SUCCESSFUL_LOG_ROLLS_DESC, 0L); perTableAppendCount = new ConcurrentHashMap<>(); perTableAppendSize = new ConcurrentHashMap<>(); } @@ -93,8 +89,8 @@ public void incrementAppendSize(TableName tableName, long size) { // Ideally putIfAbsent is atomic and we don't need a branch check but we still do it to avoid // expensive string construction for every append. String metricsKey = String.format("%s.%s", tableName, APPEND_SIZE); - perTableAppendSize.putIfAbsent( - tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_SIZE_DESC, 0L)); + perTableAppendSize.putIfAbsent(tableName, + getMetricsRegistry().newCounter(metricsKey, APPEND_SIZE_DESC, 0L)); tableAppendSizeCounter = perTableAppendSize.get(tableName); } tableAppendSizeCounter.incr(size); @@ -111,8 +107,8 @@ public void incrementAppendCount(TableName tableName) { MutableFastCounter tableAppendCounter = perTableAppendCount.get(tableName); if (tableAppendCounter == null) { String metricsKey = String.format("%s.%s", tableName, APPEND_COUNT); - perTableAppendCount.putIfAbsent( - tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_COUNT_DESC, 0L)); + perTableAppendCount.putIfAbsent(tableName, + getMetricsRegistry().newCounter(metricsKey, APPEND_COUNT_DESC, 0L)); tableAppendCounter = perTableAppendCount.get(tableName); } tableAppendCounter.incr(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java index 547617a1669f..57fb3923483e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.metrics2.lib.MutableFastCounter; @@ -26,7 +25,7 @@ @InterfaceAudience.Private public class MetricsReplicationGlobalSourceSourceImpl - implements MetricsReplicationGlobalSourceSource { + implements MetricsReplicationGlobalSourceSource { private static final String KEY_PREFIX = "source."; private final MetricsReplicationSourceImpl rms; @@ -56,7 +55,7 @@ public MetricsReplicationGlobalSourceSourceImpl(MetricsReplicationSourceImpl rms this.rms = rms; ageOfLastShippedOpHist = - rms.getMetricsRegistry().newTimeHistogram(SOURCE_AGE_OF_LAST_SHIPPED_OP); + rms.getMetricsRegistry().newTimeHistogram(SOURCE_AGE_OF_LAST_SHIPPED_OP); sizeOfLogQueueGauge = rms.getMetricsRegistry().getGauge(SOURCE_SIZE_OF_LOG_QUEUE, 0L); @@ -77,61 +76,70 @@ public MetricsReplicationGlobalSourceSourceImpl(MetricsReplicationSourceImpl rms sizeOfHFileRefsQueueGauge = rms.getMetricsRegistry().getGauge(SOURCE_SIZE_OF_HFILE_REFS_QUEUE, 0L); - unknownFileLengthForClosedWAL = rms.getMetricsRegistry() - .getCounter(SOURCE_CLOSED_LOGS_WITH_UNKNOWN_LENGTH, 0L); + unknownFileLengthForClosedWAL = + rms.getMetricsRegistry().getCounter(SOURCE_CLOSED_LOGS_WITH_UNKNOWN_LENGTH, 0L); uncleanlyClosedWAL = rms.getMetricsRegistry().getCounter(SOURCE_UNCLEANLY_CLOSED_LOGS, 0L); - uncleanlyClosedSkippedBytes = rms.getMetricsRegistry() - .getCounter(SOURCE_UNCLEANLY_CLOSED_IGNORED_IN_BYTES, 0L); + uncleanlyClosedSkippedBytes = + rms.getMetricsRegistry().getCounter(SOURCE_UNCLEANLY_CLOSED_IGNORED_IN_BYTES, 0L); restartWALReading = rms.getMetricsRegistry().getCounter(SOURCE_RESTARTED_LOG_READING, 0L); repeatedFileBytes = rms.getMetricsRegistry().getCounter(SOURCE_REPEATED_LOG_FILE_BYTES, 0L); completedWAL = rms.getMetricsRegistry().getCounter(SOURCE_COMPLETED_LOGS, 0L); - completedRecoveryQueue = rms.getMetricsRegistry() - .getCounter(SOURCE_COMPLETED_RECOVERY_QUEUES, 0L); - failedRecoveryQueue = rms.getMetricsRegistry() - .getCounter(SOURCE_FAILED_RECOVERY_QUEUES, 0L); + completedRecoveryQueue = + rms.getMetricsRegistry().getCounter(SOURCE_COMPLETED_RECOVERY_QUEUES, 0L); + failedRecoveryQueue = rms.getMetricsRegistry().getCounter(SOURCE_FAILED_RECOVERY_QUEUES, 0L); - walReaderBufferUsageBytes = rms.getMetricsRegistry() - .getGauge(SOURCE_WAL_READER_EDITS_BUFFER, 0L); + walReaderBufferUsageBytes = + rms.getMetricsRegistry().getGauge(SOURCE_WAL_READER_EDITS_BUFFER, 0L); sourceInitializing = rms.getMetricsRegistry().getGaugeInt(SOURCE_INITIALIZING, 0); } - @Override public void setLastShippedAge(long age) { + @Override + public void setLastShippedAge(long age) { ageOfLastShippedOpHist.add(age); } - @Override public void incrSizeOfLogQueue(int size) { + @Override + public void incrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.incr(size); } - @Override public void decrSizeOfLogQueue(int size) { + @Override + public void decrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.decr(size); } - @Override public void incrLogReadInEdits(long size) { + @Override + public void incrLogReadInEdits(long size) { logReadInEditsCounter.incr(size); } - @Override public void incrLogEditsFiltered(long size) { + @Override + public void incrLogEditsFiltered(long size) { walEditsFilteredCounter.incr(size); } - @Override public void incrBatchesShipped(int batches) { + @Override + public void incrBatchesShipped(int batches) { shippedBatchesCounter.incr(batches); } - @Override public void incrOpsShipped(long ops) { + @Override + public void incrOpsShipped(long ops) { shippedOpsCounter.incr(ops); } - @Override public void incrShippedBytes(long size) { + @Override + public void incrShippedBytes(long size) { shippedBytesCounter.incr(size); } - @Override public void incrLogReadInBytes(long size) { + @Override + public void incrLogReadInBytes(long size) { logReadInBytesCounter.incr(size); } - @Override public void clear() { + @Override + public void clear() { } @Override @@ -139,7 +147,8 @@ public long getLastShippedAge() { return ageOfLastShippedOpHist.getMax(); } - @Override public void incrHFilesShipped(long hfiles) { + @Override + public void incrHFilesShipped(long hfiles) { shippedHFilesCounter.incr(hfiles); } @@ -155,13 +164,14 @@ public void decrSizeOfHFileRefsQueue(long size) { @Override public int getSizeOfLogQueue() { - return (int)sizeOfLogQueueGauge.value(); + return (int) sizeOfLogQueueGauge.value(); } @Override public void incrUnknownFileLengthForClosedWAL() { unknownFileLengthForClosedWAL.incr(1L); } + @Override public void incrUncleanlyClosedWALs() { uncleanlyClosedWAL.incr(1L); @@ -176,22 +186,27 @@ public long getUncleanlyClosedWALs() { public void incrBytesSkippedInUncleanlyClosedWALs(final long bytes) { uncleanlyClosedSkippedBytes.incr(bytes); } + @Override public void incrRestartedWALReading() { restartWALReading.incr(1L); } + @Override public void incrRepeatedFileBytes(final long bytes) { repeatedFileBytes.incr(bytes); } + @Override public void incrCompletedWAL() { completedWAL.incr(1L); } + @Override public void incrCompletedRecoveryQueue() { completedRecoveryQueue.incr(1L); } + @Override public void incrFailedRecoveryQueue() { failedRecoveryQueue.incr(1L); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java index 2498e3426a5d..ff594412fe9a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -28,9 +27,14 @@ public interface MetricsReplicationSinkSource { public static final String SINK_APPLIED_HFILES = "sink.appliedHFiles"; void setLastAppliedOpAge(long age); + void incrAppliedBatches(long batches); + void incrAppliedOps(long batchsize); + long getLastAppliedOpAge(); + void incrAppliedHFiles(long hfileSize); + long getSinkAppliedOps(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java index ce45af5ccec7..84a7458a257c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.metrics2.lib.MutableFastCounter; @@ -37,15 +36,18 @@ public MetricsReplicationSinkSourceImpl(MetricsReplicationSourceImpl rms) { hfilesCounter = rms.getMetricsRegistry().getCounter(SINK_APPLIED_HFILES, 0L); } - @Override public void setLastAppliedOpAge(long age) { + @Override + public void setLastAppliedOpAge(long age) { ageHist.add(age); } - @Override public void incrAppliedBatches(long batches) { + @Override + public void incrAppliedBatches(long batches) { batchesCounter.incr(batches); } - @Override public void incrAppliedOps(long batchsize) { + @Override + public void incrAppliedOps(long batchsize) { opsCounter.incr(batchsize); } @@ -59,7 +61,8 @@ public void incrAppliedHFiles(long hfiles) { hfilesCounter.incr(hfiles); } - @Override public long getSinkAppliedOps() { + @Override + public long getSinkAppliedOps() { return opsCounter.value(); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java index 6fb5d71ef02f..a891b7732880 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java index 73d2cfd62f49..ef72ce756e55 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -23,7 +22,10 @@ @InterfaceAudience.Private public interface MetricsReplicationSourceFactory { public MetricsReplicationSinkSource getSink(); + public MetricsReplicationSourceSource getSource(String id); + public MetricsReplicationTableSource getTableSource(String tableName); + public MetricsReplicationGlobalSourceSourceImpl getGlobalSource(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java index 061fc58296e0..1362a9022f86 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java @@ -24,22 +24,27 @@ public class MetricsReplicationSourceFactoryImpl implements MetricsReplicationSo private static enum SourceHolder { INSTANCE; + final MetricsReplicationSourceImpl source = new MetricsReplicationSourceImpl(); } - @Override public MetricsReplicationSinkSource getSink() { + @Override + public MetricsReplicationSinkSource getSink() { return new MetricsReplicationSinkSourceImpl(SourceHolder.INSTANCE.source); } - @Override public MetricsReplicationSourceSource getSource(String id) { + @Override + public MetricsReplicationSourceSource getSource(String id) { return new MetricsReplicationSourceSourceImpl(SourceHolder.INSTANCE.source, id); } - @Override public MetricsReplicationTableSource getTableSource(String tableName) { + @Override + public MetricsReplicationTableSource getTableSource(String tableName) { return new MetricsReplicationTableSourceImpl(SourceHolder.INSTANCE.source, tableName); } - @Override public MetricsReplicationGlobalSourceSourceImpl getGlobalSource() { + @Override + public MetricsReplicationGlobalSourceSourceImpl getGlobalSource() { return new MetricsReplicationGlobalSourceSourceImpl(SourceHolder.INSTANCE.source); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java index 02045f8bbd13..f841a97929e3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -23,23 +22,18 @@ /** * Hadoop2 implementation of MetricsReplicationSource. This provides access to metrics gauges and - * counters. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * counters. Implements BaseSource through BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsReplicationSourceImpl extends BaseSourceImpl implements - MetricsReplicationSource { - +public class MetricsReplicationSourceImpl extends BaseSourceImpl + implements MetricsReplicationSource { public MetricsReplicationSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - MetricsReplicationSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + MetricsReplicationSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java index 42e28f5d0f31..d9bbeb1555c5 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -53,35 +52,66 @@ public interface MetricsReplicationSourceSource extends BaseSource { public static final String SOURCE_INITIALIZING = "source.numInitializing"; void setLastShippedAge(long age); + void incrSizeOfLogQueue(int size); + void decrSizeOfLogQueue(int size); + void incrLogEditsFiltered(long size); + void incrBatchesShipped(int batches); + void incrOpsShipped(long ops); + void incrShippedBytes(long size); + void incrLogReadInBytes(long size); + void incrLogReadInEdits(long size); + void clear(); + long getLastShippedAge(); + int getSizeOfLogQueue(); + void incrHFilesShipped(long hfiles); + void incrSizeOfHFileRefsQueue(long size); + void decrSizeOfHFileRefsQueue(long size); + void incrUnknownFileLengthForClosedWAL(); + void incrUncleanlyClosedWALs(); + long getUncleanlyClosedWALs(); + void incrBytesSkippedInUncleanlyClosedWALs(final long bytes); + void incrRestartedWALReading(); + void incrRepeatedFileBytes(final long bytes); + void incrCompletedWAL(); + void incrCompletedRecoveryQueue(); + void incrFailedRecoveryQueue(); + long getWALEditsRead(); + long getShippedOps(); + long getEditsFiltered(); + void setOldestWalAge(long age); + long getOldestWalAge(); + void incrSourceInitializing(); + void decrSourceInitializing(); + int getSourceInitializing(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java index faf14f79cfb7..795f81c0df85 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java @@ -134,43 +134,53 @@ public MetricsReplicationSourceSourceImpl(MetricsReplicationSourceImpl rms, Stri sourceInitializing = rms.getMetricsRegistry().getGaugeInt(sourceInitializingKey, 0); } - @Override public void setLastShippedAge(long age) { + @Override + public void setLastShippedAge(long age) { ageOfLastShippedOpHist.add(age); } - @Override public void incrSizeOfLogQueue(int size) { + @Override + public void incrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.incr(size); } - @Override public void decrSizeOfLogQueue(int size) { + @Override + public void decrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.decr(size); } - @Override public void incrLogReadInEdits(long size) { + @Override + public void incrLogReadInEdits(long size) { logReadInEditsCounter.incr(size); } - @Override public void incrLogEditsFiltered(long size) { + @Override + public void incrLogEditsFiltered(long size) { walEditsFilteredCounter.incr(size); } - @Override public void incrBatchesShipped(int batches) { + @Override + public void incrBatchesShipped(int batches) { shippedBatchesCounter.incr(batches); } - @Override public void incrOpsShipped(long ops) { + @Override + public void incrOpsShipped(long ops) { shippedOpsCounter.incr(ops); } - @Override public void incrShippedBytes(long size) { + @Override + public void incrShippedBytes(long size) { shippedBytesCounter.incr(size); } - @Override public void incrLogReadInBytes(long size) { + @Override + public void incrLogReadInBytes(long size) { logReadInBytesCounter.incr(size); } - @Override public void clear() { + @Override + public void clear() { rms.removeMetric(ageOfLastShippedOpKey); rms.removeMetric(sizeOfLogQueueKey); @@ -220,7 +230,7 @@ public void decrSizeOfHFileRefsQueue(long size) { @Override public int getSizeOfLogQueue() { - return (int)sizeOfLogQueueGauge.value(); + return (int) sizeOfLogQueueGauge.value(); } @Override @@ -264,13 +274,16 @@ public void incrCompletedRecoveryQueue() { } @Override - public void incrFailedRecoveryQueue() {/*no op*/} + public void incrFailedRecoveryQueue() { + /* no op */} - @Override public void setOldestWalAge(long age) { + @Override + public void setOldestWalAge(long age) { oldestWalAge.set(age); } - @Override public long getOldestWalAge() { + @Override + public long getOldestWalAge() { return oldestWalAge.value(); } @@ -284,7 +297,8 @@ public int getSourceInitializing() { return sourceInitializing.value(); } - @Override public void decrSourceInitializing() { + @Override + public void decrSourceInitializing() { sourceInitializing.decr(1); } @@ -343,15 +357,18 @@ public String getMetricsName() { return rms.getMetricsName(); } - @Override public long getWALEditsRead() { + @Override + public long getWALEditsRead() { return this.logReadInEditsCounter.value(); } - @Override public long getShippedOps() { + @Override + public long getShippedOps() { return this.shippedOpsCounter.value(); } - @Override public long getEditsFiltered() { + @Override + public long getEditsFiltered() { return this.walEditsFilteredCounter.value(); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java index faa944a6870d..c4550abb6e83 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -25,8 +24,12 @@ public interface MetricsReplicationTableSource extends BaseSource { void setLastShippedAge(long age); + void incrShippedBytes(long size); + long getShippedBytes(); + void clear(); + long getLastShippedAge(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java index 9ca0cd1a94ef..244298faff66 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java @@ -22,9 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This is the metric source for table level replication metrics. - * We can easy monitor some useful table level replication metrics such as - * ageOfLastShippedOp and shippedBytes + * This is the metric source for table level replication metrics. We can easy monitor some useful + * table level replication metrics such as ageOfLastShippedOp and shippedBytes */ @InterfaceAudience.Private public class MetricsReplicationTableSourceImpl implements MetricsReplicationTableSource { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java index 6a672f8cf9ff..72d7ad83821d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -64,91 +63,78 @@ public interface MetricsRESTSource extends BaseSource, JvmPauseMonitorSource { /** * Increment the number of requests - * * @param inc Ammount to increment by */ void incrementRequests(int inc); /** * Increment the number of successful Get requests. - * * @param inc Number of successful get requests. */ void incrementSucessfulGetRequests(int inc); /** * Increment the number of successful Put requests. - * * @param inc Number of successful put requests. */ void incrementSucessfulPutRequests(int inc); /** * Increment the number of successful Delete requests. - * * @param inc number of successful delete requests */ void incrementSucessfulDeleteRequests(int inc); /** * Increment the number of failed Put Requests. - * * @param inc Number of failed Put requests. */ void incrementFailedPutRequests(int inc); /** * Increment the number of failed Get requests. - * * @param inc The number of failed Get Requests. */ void incrementFailedGetRequests(int inc); /** * Increment the number of failed Delete requests. - * * @param inc The number of failed delete requests. */ void incrementFailedDeleteRequests(int inc); /** * Increment the number of successful scan requests. - * * @param inc Number of successful scan requests. */ void incrementSucessfulScanRequests(final int inc); /** * Increment the number failed scan requests. - * * @param inc Number of failed scan requests. */ void incrementFailedScanRequests(final int inc); /** * Increment the number of successful append requests. - * * @param inc Number of successful append requests. */ void incrementSucessfulAppendRequests(final int inc); /** * Increment the number failed append requests. - * * @param inc Number of failed append requests. */ void incrementFailedAppendRequests(final int inc); /** * Increment the number of successful increment requests. - * * @param inc Number of successful increment requests. */ void incrementSucessfulIncrementRequests(final int inc); /** * Increment the number failed increment requests. - * * @param inc Number of failed increment requests. */ void incrementFailedIncrementRequests(final int inc); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java index 3474265ee26c..45df51579762 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,9 +24,8 @@ /** * Hadoop Two implementation of a metrics2 source that will export metrics from the Rest server to - * the hadoop metrics2 subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * the hadoop metrics2 subsystem. Implements BaseSource through BaseSourceImpl, following the + * pattern */ @InterfaceAudience.Private public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource { @@ -55,17 +53,15 @@ public MetricsRESTSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT); } - public MetricsRESTSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsRESTSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java index 3fbf15caebfa..2becf3aff52b 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java index 760376cfe206..dadc2d0a5e42 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java index 3ce2d5d1fdc1..7eb9adfb89c1 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.yetus.audience.InterfaceAudience; @@ -32,6 +31,7 @@ public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServer */ private enum FactoryStorage { INSTANCE; + MetricsThriftServerSourceImpl thriftOne; MetricsThriftServerSourceImpl thriftTwo; } @@ -40,9 +40,7 @@ private enum FactoryStorage { public MetricsThriftServerSource createThriftOneSource() { if (FactoryStorage.INSTANCE.thriftOne == null) { FactoryStorage.INSTANCE.thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME, - METRICS_DESCRIPTION, - THRIFT_ONE_METRICS_CONTEXT, - THRIFT_ONE_JMX_CONTEXT); + METRICS_DESCRIPTION, THRIFT_ONE_METRICS_CONTEXT, THRIFT_ONE_JMX_CONTEXT); } return FactoryStorage.INSTANCE.thriftOne; } @@ -51,9 +49,7 @@ public MetricsThriftServerSource createThriftOneSource() { public MetricsThriftServerSource createThriftTwoSource() { if (FactoryStorage.INSTANCE.thriftTwo == null) { FactoryStorage.INSTANCE.thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME, - METRICS_DESCRIPTION, - THRIFT_TWO_METRICS_CONTEXT, - THRIFT_TWO_JMX_CONTEXT); + METRICS_DESCRIPTION, THRIFT_TWO_METRICS_CONTEXT, THRIFT_TWO_JMX_CONTEXT); } return FactoryStorage.INSTANCE.thriftTwo; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java index 4ed974c95dce..81a54d31767b 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSourceImpl; @@ -26,13 +25,12 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop 2 version of {@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource} - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop 2 version of {@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource} Implements + * BaseSource through BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl implements - MetricsThriftServerSource { +public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl + implements MetricsThriftServerSource { private MetricHistogram batchGetStat; private MetricHistogram batchMutateStat; @@ -51,17 +49,15 @@ public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl i private final MetricHistogram pausesWithGc; private final MetricHistogram pausesWithoutGc; - public MetricsThriftServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsThriftServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java index 3133472a8d33..ce93143d884a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,17 +52,17 @@ public interface MetricsZooKeeperSource extends BaseSource { String EXCEPTION_CONNECTIONLOSS_DESC = "Number of failed ops due to a CONNECTIONLOSS exception."; String EXCEPTION_DATAINCONSISTENCY = "DATAINCONSISTENCY Exception"; String EXCEPTION_DATAINCONSISTENCY_DESC = - "Number of failed ops due to a DATAINCONSISTENCY exception."; + "Number of failed ops due to a DATAINCONSISTENCY exception."; String EXCEPTION_INVALIDACL = "INVALIDACL Exception"; String EXCEPTION_INVALIDACL_DESC = "Number of failed ops due to an INVALIDACL exception"; String EXCEPTION_NOAUTH = "NOAUTH Exception"; String EXCEPTION_NOAUTH_DESC = "Number of failed ops due to a NOAUTH exception."; String EXCEPTION_OPERATIONTIMEOUT = "OPERATIONTIMEOUT Exception"; String EXCEPTION_OPERATIONTIMEOUT_DESC = - "Number of failed ops due to an OPERATIONTIMEOUT exception."; + "Number of failed ops due to an OPERATIONTIMEOUT exception."; String EXCEPTION_RUNTIMEINCONSISTENCY = "RUNTIMEINCONSISTENCY Exception"; String EXCEPTION_RUNTIMEINCONSISTENCY_DESC = - "Number of failed ops due to a RUNTIMEINCONSISTENCY exception."; + "Number of failed ops due to a RUNTIMEINCONSISTENCY exception."; String EXCEPTION_SESSIONEXPIRED = "SESSIONEXPIRED Exception"; String EXCEPTION_SESSIONEXPIRED_DESC = "Number of failed ops due to a SESSIONEXPIRED exception."; String EXCEPTION_SYSTEMERROR = "SYSTEMERROR Exception"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java index 50ebd46b7166..7b1efb399af1 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,9 +24,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Class that transitions metrics from MetricsZooKeeper into the metrics subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern. + * Class that transitions metrics from MetricsZooKeeper into the metrics subsystem. Implements + * BaseSource through BaseSourceImpl, following the pattern. */ @InterfaceAudience.Private public class MetricsZooKeeperSourceImpl extends BaseSourceImpl implements MetricsZooKeeperSource { @@ -55,34 +53,34 @@ public MetricsZooKeeperSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - //Create and store the metrics that will be used. - authFailedFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_AUTHFAILED, EXCEPTION_AUTHFAILED_DESC, 0L); - connectionLossFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_CONNECTIONLOSS, EXCEPTION_CONNECTIONLOSS_DESC, 0L); - dataInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_DATAINCONSISTENCY, EXCEPTION_DATAINCONSISTENCY_DESC, 0L); - invalidACLFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_INVALIDACL, EXCEPTION_INVALIDACL_DESC, 0L); - noAuthFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_NOAUTH, EXCEPTION_NOAUTH_DESC, 0L); - operationTimeOutFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_OPERATIONTIMEOUT, EXCEPTION_OPERATIONTIMEOUT_DESC, 0L); - runtimeInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_RUNTIMEINCONSISTENCY, EXCEPTION_RUNTIMEINCONSISTENCY_DESC, 0L); - sessionExpiredFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_SESSIONEXPIRED, EXCEPTION_SESSIONEXPIRED_DESC, 0L); - systemErrorFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_SYSTEMERROR, EXCEPTION_SYSTEMERROR_DESC, 0L); - totalFailedZKCalls = this.getMetricsRegistry().newGauge( - TOTAL_FAILED_ZK_CALLS, TOTAL_FAILED_ZK_CALLS_DESC, 0L); - - readOpLatency = this.getMetricsRegistry().newHistogram( - READ_OPERATION_LATENCY_NAME, READ_OPERATION_LATENCY_DESC); - writeOpLatency = this.getMetricsRegistry().newHistogram( - WRITE_OPERATION_LATENCY_NAME, WRITE_OPERATION_LATENCY_DESC); - syncOpLatency = this.getMetricsRegistry().newHistogram( - SYNC_OPERATION_LATENCY_NAME, SYNC_OPERATION_LATENCY_DESC); + // Create and store the metrics that will be used. + authFailedFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_AUTHFAILED, EXCEPTION_AUTHFAILED_DESC, 0L); + connectionLossFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_CONNECTIONLOSS, + EXCEPTION_CONNECTIONLOSS_DESC, 0L); + dataInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_DATAINCONSISTENCY, + EXCEPTION_DATAINCONSISTENCY_DESC, 0L); + invalidACLFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_INVALIDACL, EXCEPTION_INVALIDACL_DESC, 0L); + noAuthFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_NOAUTH, EXCEPTION_NOAUTH_DESC, 0L); + operationTimeOutFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_OPERATIONTIMEOUT, + EXCEPTION_OPERATIONTIMEOUT_DESC, 0L); + runtimeInconsistencyFailedOpCount = this.getMetricsRegistry() + .newGauge(EXCEPTION_RUNTIMEINCONSISTENCY, EXCEPTION_RUNTIMEINCONSISTENCY_DESC, 0L); + sessionExpiredFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_SESSIONEXPIRED, + EXCEPTION_SESSIONEXPIRED_DESC, 0L); + systemErrorFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_SYSTEMERROR, EXCEPTION_SYSTEMERROR_DESC, 0L); + totalFailedZKCalls = + this.getMetricsRegistry().newGauge(TOTAL_FAILED_ZK_CALLS, TOTAL_FAILED_ZK_CALLS_DESC, 0L); + + readOpLatency = this.getMetricsRegistry().newHistogram(READ_OPERATION_LATENCY_NAME, + READ_OPERATION_LATENCY_DESC); + writeOpLatency = this.getMetricsRegistry().newHistogram(WRITE_OPERATION_LATENCY_NAME, + WRITE_OPERATION_LATENCY_DESC); + syncOpLatency = this.getMetricsRegistry().newHistogram(SYNC_OPERATION_LATENCY_NAME, + SYNC_OPERATION_LATENCY_DESC); } public void getMetrics(MetricsCollector metricsCollector, boolean all) { @@ -91,7 +89,7 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { } private void clearZKExceptionMetrics() { - //Reset the exception metrics. + // Reset the exception metrics. clearMetricIfNotNull(authFailedFailedOpCount); clearMetricIfNotNull(connectionLossFailedOpCount); clearMetricIfNotNull(dataInconsistencyFailedOpCount); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java index 9aa12bab5200..306c0c761aa6 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,19 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2; import org.apache.yetus.audience.InterfaceAudience; /** - * Metrics Histogram interface. Implementing classes will expose computed - * quartile values through the metrics system. + * Metrics Histogram interface. Implementing classes will expose computed quartile values through + * the metrics system. */ @InterfaceAudience.Private public interface MetricHistogram { - //Strings used to create metrics names. + // Strings used to create metrics names. String NUM_OPS_METRIC_NAME = "_num_ops"; String MIN_METRIC_NAME = "_min"; String MAX_METRIC_NAME = "_max"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java index 1366fd0b9205..33b6c0d9a934 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2; import java.util.concurrent.ScheduledExecutorService; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java index 88b491ba3ea1..9b62cd898f61 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; - import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsExecutorImpl; @@ -32,11 +31,9 @@ /** * JMX caches the beans that have been exported; even after the values are removed from hadoop's - * metrics system the keys and old values will still remain. This class stops and restarts the - * Hadoop metrics system, forcing JMX to clear the cache of exported metrics. - * - * This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used - * are package private. + * metrics system the keys and old values will still remain. This class stops and restarts the + * Hadoop metrics system, forcing JMX to clear the cache of exported metrics. This class need to be + * in the o.a.h.metrics2.impl namespace as many of the variables/calls used are package private. */ @InterfaceAudience.Private public final class JmxCacheBuster { @@ -56,7 +53,7 @@ public static void clearJmxCache() { if (LOG.isTraceEnabled()) { LOG.trace("clearing JMX Cache" + StringUtils.stringifyException(new Exception())); } - //If there are more then 100 ms before the executor will run then everything should be merged. + // If there are more then 100 ms before the executor will run then everything should be merged. ScheduledFuture future = fut.get(); if ((future != null && (!future.isDone() && future.getDelay(TimeUnit.MILLISECONDS) > 100))) { // BAIL OUT @@ -104,9 +101,9 @@ public void run() { Thread.sleep(500); DefaultMetricsSystem.instance().start(); } - } catch (Exception exception) { + } catch (Exception exception) { LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", - exception); + exception); } } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java index 723e6d34c1d7..09556707648e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,8 +77,8 @@ public boolean removeObjectName(final String name) { * so far as a Source, thus preventing further re-registration of the source with the same name. * In case of dynamic metrics tied to region-lifecycles, this becomes a problem because we would * like to be able to re-register and remove with the same name. Otherwise, it is resource leak. - * This ugly code manually removes the name from the UniqueNames map. - * TODO: May not be needed for Hadoop versions after YARN-5190. + * This ugly code manually removes the name from the UniqueNames map. TODO: May not be needed for + * Hadoop versions after YARN-5190. */ public void removeSourceName(String name) { if (sourceNamesField == null || mapField == null) { @@ -92,8 +92,9 @@ public void removeSourceName(String name) { } } catch (Exception ex) { if (LOG.isTraceEnabled()) { - LOG.trace("Received exception while trying to access Hadoop Metrics classes via " + - "reflection.", ex); + LOG.trace( + "Received exception while trying to access Hadoop Metrics classes via " + "reflection.", + ex); } } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java index 7a791c92bc1e..354e0e25627f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.Collection; @@ -29,51 +28,41 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * An optional metrics registry class for creating and maintaining a - * collection of MetricsMutables, making writing metrics source easier. - * NOTE: this is a copy of org.apache.hadoop.metrics2.lib.MetricsRegistry with added one - * feature: metrics can be removed. When HADOOP-8313 is fixed, usages of this class - * should be substituted with org.apache.hadoop.metrics2.lib.MetricsRegistry. - * This implementation also provides handy methods for creating metrics - * dynamically. - * Another difference is that metricsMap implementation is substituted with - * thread-safe map, as we allow dynamic metrics additions/removals. + * An optional metrics registry class for creating and maintaining a collection of MetricsMutables, + * making writing metrics source easier. NOTE: this is a copy of + * org.apache.hadoop.metrics2.lib.MetricsRegistry with added one feature: metrics can be removed. + * When HADOOP-8313 is fixed, usages of this class should be substituted with + * org.apache.hadoop.metrics2.lib.MetricsRegistry. This implementation also provides handy methods + * for creating metrics dynamically. Another difference is that metricsMap implementation is + * substituted with thread-safe map, as we allow dynamic metrics additions/removals. */ @InterfaceAudience.Private public class DynamicMetricsRegistry { private static final Logger LOG = LoggerFactory.getLogger(DynamicMetricsRegistry.class); - private final ConcurrentMap metricsMap = - Maps.newConcurrentMap(); - private final ConcurrentMap tagsMap = - Maps.newConcurrentMap(); + private final ConcurrentMap metricsMap = Maps.newConcurrentMap(); + private final ConcurrentMap tagsMap = Maps.newConcurrentMap(); private final MetricsInfo metricsInfo; private final DefaultMetricsSystemHelper helper = new DefaultMetricsSystemHelper(); - private final static String[] histogramSuffixes = new String[]{ - "_num_ops", - "_min", - "_max", - "_median", - "_75th_percentile", - "_90th_percentile", - "_95th_percentile", - "_99th_percentile"}; + private final static String[] histogramSuffixes = new String[] { "_num_ops", "_min", "_max", + "_median", "_75th_percentile", "_90th_percentile", "_95th_percentile", "_99th_percentile" }; /** * Construct the registry with a record name - * @param name of the record of the metrics + * @param name of the record of the metrics */ public DynamicMetricsRegistry(String name) { - this(Interns.info(name,name)); + this(Interns.info(name, name)); } /** * Construct the registry with a metadata object - * @param info the info object for the metrics record/group + * @param info the info object for the metrics record/group */ public DynamicMetricsRegistry(MetricsInfo info) { metricsInfo = info; @@ -88,7 +77,7 @@ public MetricsInfo info() { /** * Get a metric by name - * @param name of the metric + * @param name of the metric * @return the metric object */ public MutableMetric get(String name) { @@ -97,7 +86,7 @@ public MutableMetric get(String name) { /** * Get a tag by name - * @param name of the tag + * @param name of the tag * @return the tag object */ public MetricsTag getTag(String name) { @@ -106,9 +95,9 @@ public MetricsTag getTag(String name) { /** * Create a mutable long integer counter - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new counter object */ public MutableFastCounter newCounter(String name, String desc, long iVal) { @@ -117,8 +106,8 @@ public MutableFastCounter newCounter(String name, String desc, long iVal) { /** * Create a mutable long integer counter - * @param info metadata of the metric - * @param iVal initial value + * @param info metadata of the metric + * @param iVal initial value * @return a new counter object */ public MutableFastCounter newCounter(MetricsInfo info, long iVal) { @@ -128,9 +117,9 @@ public MutableFastCounter newCounter(MetricsInfo info, long iVal) { /** * Create a mutable long integer gauge - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new gauge object */ public MutableGaugeLong newGauge(String name, String desc, long iVal) { @@ -139,8 +128,8 @@ public MutableGaugeLong newGauge(String name, String desc, long iVal) { /** * Create a mutable long integer gauge - * @param info metadata of the metric - * @param iVal initial value + * @param info metadata of the metric + * @param iVal initial value * @return a new gauge object */ public MutableGaugeLong newGauge(MetricsInfo info, long iVal) { @@ -150,36 +139,34 @@ public MutableGaugeLong newGauge(MetricsInfo info, long iVal) { /** * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") - * @param extended produce extended stat (stdev, min/max etc.) if true. + * @param name of the metric + * @param desc metric description + * @param sampleName of the metric (e.g., "Ops") + * @param valueName of the metric (e.g., "Time" or "Latency") + * @param extended produce extended stat (stdev, min/max etc.) if true. * @return a new mutable stat metric object */ - public MutableStat newStat(String name, String desc, - String sampleName, String valueName, boolean extended) { - MutableStat ret = - new MutableStat(name, desc, sampleName, valueName, extended); + public MutableStat newStat(String name, String desc, String sampleName, String valueName, + boolean extended) { + MutableStat ret = new MutableStat(name, desc, sampleName, valueName, extended); return addNewMetricIfAbsent(name, ret, MutableStat.class); } /** * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") + * @param name of the metric + * @param desc metric description + * @param sampleName of the metric (e.g., "Ops") + * @param valueName of the metric (e.g., "Time" or "Latency") * @return a new mutable metric object */ - public MutableStat newStat(String name, String desc, - String sampleName, String valueName) { + public MutableStat newStat(String name, String desc, String sampleName, String valueName) { return newStat(name, desc, sampleName, valueName, false); } /** * Create a mutable rate metric - * @param name of the metric + * @param name of the metric * @return a new mutable metric object */ public MutableRate newRate(String name) { @@ -188,7 +175,7 @@ public MutableRate newRate(String name) { /** * Create a mutable rate metric - * @param name of the metric + * @param name of the metric * @param description of the metric * @return a new mutable rate metric object */ @@ -198,9 +185,9 @@ public MutableRate newRate(String name, String description) { /** * Create a mutable rate metric (for throughput measurement) - * @param name of the metric - * @param desc description - * @param extended produce extended stat (stdev/min/max etc.) if true + * @param name of the metric + * @param desc description + * @param extended produce extended stat (stdev/min/max etc.) if true * @return a new mutable rate metric object */ public MutableRate newRate(String name, String desc, boolean extended) { @@ -208,8 +195,7 @@ public MutableRate newRate(String name, String desc, boolean extended) { } @InterfaceAudience.Private - public MutableRate newRate(String name, String desc, - boolean extended, boolean returnExisting) { + public MutableRate newRate(String name, String desc, boolean extended, boolean returnExisting) { if (returnExisting) { MutableMetric rate = metricsMap.get(name); if (rate != null) { @@ -217,8 +203,7 @@ public MutableRate newRate(String name, String desc, return (MutableRate) rate; } - throw new MetricsException("Unexpected metrics type "+ rate.getClass() - +" for "+ name); + throw new MetricsException("Unexpected metrics type " + rate.getClass() + " for " + name); } } MutableRate ret = new MutableRate(name, desc, extended); @@ -244,7 +229,7 @@ public MutableHistogram newHistogram(String name, String desc) { MutableHistogram histo = new MutableHistogram(name, desc); return addNewMetricIfAbsent(name, histo, MutableHistogram.class); } - + /** * Create a new histogram with time range counts. * @param name Name of the histogram. @@ -264,7 +249,7 @@ public MutableTimeHistogram newTimeHistogram(String name, String desc) { MutableTimeHistogram histo = new MutableTimeHistogram(name, desc); return addNewMetricIfAbsent(name, histo, MutableTimeHistogram.class); } - + /** * Create a new histogram with size range counts. * @param name Name of the histogram. @@ -285,14 +270,13 @@ public MutableSizeHistogram newSizeHistogram(String name, String desc) { return addNewMetricIfAbsent(name, histo, MutableSizeHistogram.class); } - synchronized void add(String name, MutableMetric metric) { addNewMetricIfAbsent(name, metric, MutableMetric.class); } /** * Add sample to a stat metric by name. - * @param name of the metric + * @param name of the metric * @param value of the snapshot to add */ public void add(String name, long value) { @@ -301,12 +285,10 @@ public void add(String name, long value) { if (m != null) { if (m instanceof MutableStat) { ((MutableStat) m).add(value); + } else { + throw new MetricsException("Unsupported add(value) for metric " + name); } - else { - throw new MetricsException("Unsupported add(value) for metric "+ name); - } - } - else { + } else { metricsMap.put(name, newRate(name)); // default is a rate metric add(name, value); } @@ -323,7 +305,7 @@ public DynamicMetricsRegistry setContext(String name) { /** * Add a tag to the metrics - * @param name of the tag + * @param name of the tag * @param description of the tag * @param value of the tag * @return the registry (for keep adding tags) @@ -334,20 +316,20 @@ public DynamicMetricsRegistry tag(String name, String description, String value) /** * Add a tag to the metrics - * @param name of the tag + * @param name of the tag * @param description of the tag * @param value of the tag - * @param override existing tag if true + * @param override existing tag if true * @return the registry (for keep adding tags) */ public DynamicMetricsRegistry tag(String name, String description, String value, - boolean override) { + boolean override) { return tag(new MetricsInfoImpl(name, description), value, override); } /** * Add a tag to the metrics - * @param info metadata of the tag + * @param info metadata of the tag * @param value of the tag * @param override existing tag if true * @return the registry (for keep adding tags etc.) @@ -358,7 +340,7 @@ public DynamicMetricsRegistry tag(MetricsInfo info, String value, boolean overri if (!override) { MetricsTag existing = tagsMap.putIfAbsent(info.name(), tag); if (existing != null) { - throw new MetricsException("Tag "+ info.name() +" already exists!"); + throw new MetricsException("Tag " + info.name() + " already exists!"); } return this; } @@ -394,10 +376,10 @@ public void snapshot(MetricsRecordBuilder builder, boolean all) { } } - @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("info", metricsInfo).add("tags", tags()).add("metrics", metrics()) - .toString(); + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("info", metricsInfo).add("tags", tags()) + .add("metrics", metrics()).toString(); } /** @@ -410,131 +392,125 @@ public void removeMetric(String name) { } public void removeHistogramMetrics(String baseName) { - for (String suffix:histogramSuffixes) { - removeMetric(baseName+suffix); + for (String suffix : histogramSuffixes) { + removeMetric(baseName + suffix); } } /** - * Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it. - * - * @param gaugeName name of the gauge to create or get. + * Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it. + * @param gaugeName name of the gauge to create or get. * @param potentialStartingValue value of the new gauge if we have to create it. */ public MutableGaugeLong getGauge(String gaugeName, long potentialStartingValue) { - //Try and get the guage. + // Try and get the guage. MutableMetric metric = metricsMap.get(gaugeName); - //If it's not there then try and put a new one in the storage. + // If it's not there then try and put a new one in the storage. if (metric == null) { - //Create the potential new gauge. - MutableGaugeLong newGauge = new MutableGaugeLong(new MetricsInfoImpl(gaugeName, ""), - potentialStartingValue); + // Create the potential new gauge. + MutableGaugeLong newGauge = + new MutableGaugeLong(new MetricsInfoImpl(gaugeName, ""), potentialStartingValue); - // Try and put the gauge in. This is atomic. + // Try and put the gauge in. This is atomic. metric = metricsMap.putIfAbsent(gaugeName, newGauge); - //If the value we get back is null then the put was successful and we will return that. - //otherwise gaugeLong should contain the thing that was in before the put could be completed. + // If the value we get back is null then the put was successful and we will return that. + // otherwise gaugeLong should contain the thing that was in before the put could be completed. if (metric == null) { return newGauge; } } if (!(metric instanceof MutableGaugeLong)) { - throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + - " and not of type MetricMutableGaugeLong"); + throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + + " and not of type MetricMutableGaugeLong"); } return (MutableGaugeLong) metric; } /** - * Get a MetricMutableGaugeInt from the storage. If it is not there atomically put it. - * - * @param gaugeName name of the gauge to create or get. + * Get a MetricMutableGaugeInt from the storage. If it is not there atomically put it. + * @param gaugeName name of the gauge to create or get. * @param potentialStartingValue value of the new gauge if we have to create it. */ public MutableGaugeInt getGaugeInt(String gaugeName, int potentialStartingValue) { - //Try and get the guage. + // Try and get the guage. MutableMetric metric = metricsMap.get(gaugeName); - //If it's not there then try and put a new one in the storage. + // If it's not there then try and put a new one in the storage. if (metric == null) { - //Create the potential new gauge. - MutableGaugeInt newGauge = new MutableGaugeInt(new MetricsInfoImpl(gaugeName, ""), - potentialStartingValue); + // Create the potential new gauge. + MutableGaugeInt newGauge = + new MutableGaugeInt(new MetricsInfoImpl(gaugeName, ""), potentialStartingValue); - // Try and put the gauge in. This is atomic. + // Try and put the gauge in. This is atomic. metric = metricsMap.putIfAbsent(gaugeName, newGauge); - //If the value we get back is null then the put was successful and we will return that. - //otherwise gaugeInt should contain the thing that was in before the put could be completed. + // If the value we get back is null then the put was successful and we will return that. + // otherwise gaugeInt should contain the thing that was in before the put could be completed. if (metric == null) { return newGauge; } } if (!(metric instanceof MutableGaugeInt)) { - throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + - " and not of type MetricMutableGaugeInr"); + throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + + " and not of type MetricMutableGaugeInr"); } return (MutableGaugeInt) metric; } /** - * Get a MetricMutableCounterLong from the storage. If it is not there atomically put it. - * - * @param counterName Name of the counter to get + * Get a MetricMutableCounterLong from the storage. If it is not there atomically put it. + * @param counterName Name of the counter to get * @param potentialStartingValue starting value if we have to create a new counter */ public MutableFastCounter getCounter(String counterName, long potentialStartingValue) { - //See getGauge for description on how this works. + // See getGauge for description on how this works. MutableMetric counter = metricsMap.get(counterName); if (counter == null) { MutableFastCounter newCounter = - new MutableFastCounter(new MetricsInfoImpl(counterName, ""), potentialStartingValue); + new MutableFastCounter(new MetricsInfoImpl(counterName, ""), potentialStartingValue); counter = metricsMap.putIfAbsent(counterName, newCounter); if (counter == null) { return newCounter; } } - if (!(counter instanceof MutableCounter)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - counterName + " and not of type MutableCounter"); + throw new MetricsException("Metric already exists in registry for metric name: " + counterName + + " and not of type MutableCounter"); } return (MutableFastCounter) counter; } public MutableHistogram getHistogram(String histoName) { - //See getGauge for description on how this works. + // See getGauge for description on how this works. MutableMetric histo = metricsMap.get(histoName); if (histo == null) { - MutableHistogram newCounter = - new MutableHistogram(new MetricsInfoImpl(histoName, "")); + MutableHistogram newCounter = new MutableHistogram(new MetricsInfoImpl(histoName, "")); histo = metricsMap.putIfAbsent(histoName, newCounter); if (histo == null) { return newCounter; } } - if (!(histo instanceof MutableHistogram)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - histoName + " and not of type MutableHistogram"); + throw new MetricsException("Metric already exists in registry for metric name: " + histoName + + " and not of type MutableHistogram"); } return (MutableHistogram) histo; } - private T addNewMetricIfAbsent(String name, T ret, + private T addNewMetricIfAbsent(String name, T ret, Class metricClass) { - //If the value we get back is null then the put was successful and we will + // If the value we get back is null then the put was successful and we will // return that. Otherwise metric should contain the thing that was in // before the put could be completed. MutableMetric metric = metricsMap.putIfAbsent(name, ret); @@ -546,19 +522,17 @@ private T addNewMetricIfAbsent(String name, T ret, } @SuppressWarnings("unchecked") - private T returnExistingWithCast(MutableMetric metric, - Class metricClass, String name) { + private T returnExistingWithCast(MutableMetric metric, Class metricClass, String name) { if (!metricClass.isAssignableFrom(metric.getClass())) { - throw new MetricsException("Metric already exists in registry for metric name: " + - name + " and not of type " + metricClass + - " but instead of type " + metric.getClass()); + throw new MetricsException("Metric already exists in registry for metric name: " + name + + " and not of type " + metricClass + " but instead of type " + metric.getClass()); } return (T) metric; } public void clearMetrics() { - for (String name:metricsMap.keySet()) { + for (String name : metricsMap.keySet()) { helper.removeObjectName(name); } metricsMap.clear(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java index d24f23f7f359..68b871ff4a4e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.yetus.audience.InterfaceAudience; /** - * Class to handle the ScheduledExecutorService{@link ScheduledExecutorService} used by - * MetricsRegionAggregateSourceImpl, and - * JmxCacheBuster + * Class to handle the ScheduledExecutorService{@link ScheduledExecutorService} used by + * MetricsRegionAggregateSourceImpl, and JmxCacheBuster */ @InterfaceAudience.Private public class MetricsExecutorImpl implements MetricsExecutor { @@ -48,8 +45,9 @@ public void stop() { private enum ExecutorSingleton { INSTANCE; - private final transient ScheduledExecutorService scheduler = new ScheduledThreadPoolExecutor(1, - new ThreadPoolExecutorThreadFactory("HBase-Metrics2-")); + + private final transient ScheduledExecutorService scheduler = + new ScheduledThreadPoolExecutor(1, new ThreadPoolExecutorThreadFactory("HBase-Metrics2-")); } private final static class ThreadPoolExecutorThreadFactory implements ThreadFactory { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java index 7b5ec024a508..f8f8aee35501 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java index dc86ebe8bf76..fc7ab8cd4c85 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.commons.lang3.StringUtils; @@ -51,7 +50,8 @@ public void add(final long val) { histogram.update(val); } - @Override public long getCount() { + @Override + public long getCount() { return histogram.getCount(); } @@ -65,7 +65,7 @@ public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, boo } public static void snapshot(String name, String desc, Histogram histogram, - MetricsRecordBuilder metricsRecordBuilder, boolean all) { + MetricsRecordBuilder metricsRecordBuilder, boolean all) { // Get a reference to the old histogram. Snapshot snapshot = histogram.snapshot(); if (snapshot != null) { @@ -76,27 +76,27 @@ public static void snapshot(String name, String desc, Histogram histogram, protected static void updateSnapshotMetrics(String name, String desc, Histogram histogram, Snapshot snapshot, MetricsRecordBuilder metricsRecordBuilder) { metricsRecordBuilder.addCounter(Interns.info(name + NUM_OPS_METRIC_NAME, desc), - histogram.getCount()); + histogram.getCount()); metricsRecordBuilder.addGauge(Interns.info(name + MIN_METRIC_NAME, desc), snapshot.getMin()); metricsRecordBuilder.addGauge(Interns.info(name + MAX_METRIC_NAME, desc), snapshot.getMax()); metricsRecordBuilder.addGauge(Interns.info(name + MEAN_METRIC_NAME, desc), snapshot.getMean()); metricsRecordBuilder.addGauge(Interns.info(name + TWENTY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get25thPercentile()); + snapshot.get25thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + MEDIAN_METRIC_NAME, desc), - snapshot.getMedian()); + snapshot.getMedian()); metricsRecordBuilder.addGauge(Interns.info(name + SEVENTY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get75thPercentile()); + snapshot.get75thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETIETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get90thPercentile()); + snapshot.get90thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get95thPercentile()); + snapshot.get95thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_EIGHTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get98thPercentile()); + snapshot.get98thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_NINETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get99thPercentile()); + snapshot.get99thPercentile()); metricsRecordBuilder.addGauge( - Interns.info(name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get999thPercentile()); + Interns.info(name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc), + snapshot.get999thPercentile()); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java index 507e95400264..6146c53e1404 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.hbase.metrics.Interns; @@ -40,16 +39,15 @@ public MutableRangeHistogram(String name, String description) { } /** - * Returns the type of range histogram size or time + * Returns the type of range histogram size or time */ public abstract String getRangeType(); - + /** - * Returns the ranges to be counted + * Returns the ranges to be counted */ public abstract long[] getRanges(); - @Override public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, boolean all) { // Get a reference to the old histogram. @@ -61,7 +59,7 @@ public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, boo } public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder, - Snapshot snapshot) { + Snapshot snapshot) { long priorRange = 0; long cumNum = 0; @@ -71,8 +69,8 @@ public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder long val = snapshot.getCountAtOrBelow(ranges[i]); if (val - cumNum > 0) { metricsRecordBuilder.addCounter( - Interns.info(name + "_" + rangeType + "_" + priorRange + "-" + ranges[i], desc), - val - cumNum); + Interns.info(name + "_" + rangeType + "_" + priorRange + "-" + ranges[i], desc), + val - cumNum); } priorRange = ranges[i]; cumNum = val; @@ -80,12 +78,12 @@ public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder long val = snapshot.getCount(); if (val - cumNum > 0) { metricsRecordBuilder.addCounter( - Interns.info(name + "_" + rangeType + "_" + priorRange + "-inf", desc), - val - cumNum); + Interns.info(name + "_" + rangeType + "_" + priorRange + "-inf", desc), val - cumNum); } } - @Override public long getCount() { + @Override + public long getCount() { return histogram.getCount(); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java index b02efb76f9d8..b682042ac1cc 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.metrics2.MetricsInfo; @@ -28,7 +27,8 @@ public class MutableSizeHistogram extends MutableRangeHistogram { private final static String RANGE_TYPE = "SizeRangeCount"; - private final static long[] RANGES = {10,100,1000,10000,100000,1000000,10000000,100000000}; + private final static long[] RANGES = + { 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000 }; public MutableSizeHistogram(MetricsInfo info) { this(info.name(), info.description()); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java index 7c6dfbbd5776..03cf8bd291ba 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.metrics2.MetricsInfo; @@ -28,7 +27,7 @@ public class MutableTimeHistogram extends MutableRangeHistogram { private final static String RANGE_TYPE = "TimeRangeCount"; private final static long[] RANGES = - { 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 }; + { 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 }; public MutableTimeHistogram(MetricsInfo info) { this(info.name(), info.description()); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java index 84a76edf72e9..237454e4d7f8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java @@ -20,8 +20,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Specifies a quantile (with error bounds) to be watched by a - * {@link MetricSampleQuantiles} object. + * Specifies a quantile (with error bounds) to be watched by a {@link MetricSampleQuantiles} object. */ @InterfaceAudience.Private public class MetricQuantile { @@ -59,7 +58,6 @@ public boolean equals(Object aThat) { @Override public int hashCode() { - return (int) (Double.doubleToLongBits(quantile) ^ Double - .doubleToLongBits(error)); + return (int) (Double.doubleToLongBits(quantile) ^ Double.doubleToLongBits(error)); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java index c1880f8203ba..e023a34d10b0 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.util; import java.io.IOException; @@ -24,24 +23,16 @@ import java.util.LinkedList; import java.util.ListIterator; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm - * for streaming calculation of targeted high-percentile epsilon-approximate - * quantiles. - * - * This is a generalization of the earlier work by Greenwald and Khanna (GK), - * which essentially allows different error bounds on the targeted quantiles, - * which allows for far more efficient calculation of high-percentiles. - * - * See: Cormode, Korn, Muthukrishnan, and Srivastava - * "Effective Computation of Biased Quantiles over Data Streams" in ICDE 2005 - * - * Greenwald and Khanna, - * "Space-efficient online computation of quantile summaries" in SIGMOD 2001 - * + * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm for streaming + * calculation of targeted high-percentile epsilon-approximate quantiles. This is a generalization + * of the earlier work by Greenwald and Khanna (GK), which essentially allows different error bounds + * on the targeted quantiles, which allows for far more efficient calculation of high-percentiles. + * See: Cormode, Korn, Muthukrishnan, and Srivastava "Effective Computation of Biased Quantiles over + * Data Streams" in ICDE 2005 Greenwald and Khanna, "Space-efficient online computation of quantile + * summaries" in SIGMOD 2001 */ @InterfaceAudience.Private public class MetricSampleQuantiles { @@ -57,9 +48,8 @@ public class MetricSampleQuantiles { private LinkedList samples; /** - * Buffers incoming items to be inserted in batch. Items are inserted into - * the buffer linearly. When the buffer fills, it is flushed into the samples - * array in its entirety. + * Buffers incoming items to be inserted in batch. Items are inserted into the buffer linearly. + * When the buffer fills, it is flushed into the samples array in its entirety. */ private long[] buffer = new long[500]; private int bufferCount = 0; @@ -75,14 +65,10 @@ public MetricSampleQuantiles(MetricQuantile[] quantiles) { } /** - * Specifies the allowable error for this rank, depending on which quantiles - * are being targeted. - * - * This is the f(r_i, n) function from the CKMS paper. It's basically how wide - * the range of this rank can be. - * - * @param rank - * the index in the list of samples + * Specifies the allowable error for this rank, depending on which quantiles are being targeted. + * This is the f(r_i, n) function from the CKMS paper. It's basically how wide the range of this + * rank can be. + * @param rank the index in the list of samples */ private double allowableError(int rank) { int size = samples.size(); @@ -104,7 +90,6 @@ private double allowableError(int rank) { /** * Add a new value from the stream. - * * @param v the value to insert */ synchronized public void insert(long v) { @@ -120,8 +105,8 @@ synchronized public void insert(long v) { } /** - * Merges items from buffer into the samples array in one pass. - * This is more efficient than doing an insert on every item. + * Merges items from buffer into the samples array in one pass. This is more efficient than doing + * an insert on every item. */ private void insertBatch() { if (bufferCount == 0) { @@ -166,9 +151,8 @@ private void insertBatch() { } /** - * Try to remove extraneous items from the set of sampled items. This checks - * if an item is unnecessary based on the desired error bounds, and merges it - * with the adjacent item if it is. + * Try to remove extraneous items from the set of sampled items. This checks if an item is + * unnecessary based on the desired error bounds, and merges it with the adjacent item if it is. */ private void compress() { if (samples.size() < 2) { @@ -196,7 +180,6 @@ private void compress() { /** * Get the estimated value at the specified quantile. - * * @param quantile Queried quantile, e.g. 0.50 or 0.99. * @return Estimated value at that quantile. */ @@ -225,10 +208,8 @@ private long query(double quantile) throws IOException { /** * Get a snapshot of the current values of all the tracked quantiles. - * * @return snapshot of the tracked quantiles - * @throws IOException - * if no items have been added to the estimator + * @throws IOException if no items have been added to the estimator */ synchronized public Map snapshot() throws IOException { // flush the buffer first for best results @@ -243,7 +224,6 @@ synchronized public Map snapshot() throws IOException { /** * Returns the number of items that the estimator has processed - * * @return count total number of items processed */ synchronized public long getCount() { @@ -252,7 +232,6 @@ synchronized public long getCount() { /** * Returns the number of samples kept by the estimator - * * @return count current number of samples */ synchronized public int getSampleCount() { @@ -269,27 +248,24 @@ synchronized public void clear() { } /** - * Describes a measured value passed to the estimator, tracking additional - * metadata required by the CKMS algorithm. + * Describes a measured value passed to the estimator, tracking additional metadata required by + * the CKMS algorithm. */ private static class SampleItem { - + /** * Value of the sampled item (e.g. a measured latency value) */ private final long value; - + /** - * Difference between the lowest possible rank of the previous item, and - * the lowest possible rank of this item. - * - * The sum of the g of all previous items yields this item's lower bound. + * Difference between the lowest possible rank of the previous item, and the lowest possible + * rank of this item. The sum of the g of all previous items yields this item's lower bound. */ private int g; - + /** - * Difference between the item's greatest possible rank and lowest possible - * rank. + * Difference between the item's greatest possible rank and lowest possible rank. */ private final int delta; diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java index 157327babb28..cb56b7faf58c 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,23 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; - /** * A compatibility shim layer for interacting with different versions of Hadoop. */ -//NOTE: we can move this under src/main if main code wants to use this shim layer +// NOTE: we can move this under src/main if main code wants to use this shim layer public interface HadoopShims { /** * Returns a TaskAttemptContext instance created from the given parameters. * @param job an instance of o.a.h.mapreduce.Job * @param taskId an identifier for the task attempt id. Should be parsable by - * TaskAttemptId.forName() + * TaskAttemptId.forName() * @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext */ - T createTestTaskAttemptContext(final J job, final String taskId); + T createTestTaskAttemptContext(final J job, final String taskId); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java index a022ef3e0183..ee9206af566f 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.hadoop.mapreduce.Job; @@ -30,13 +29,13 @@ public class HadoopShimsImpl implements HadoopShims { * Returns a TaskAttemptContext instance created from the given parameters. * @param job an instance of o.a.h.mapreduce.Job * @param taskId an identifier for the task attempt id. Should be parsable by - * {@link TaskAttemptID#forName(String)} + * {@link TaskAttemptID#forName(String)} * @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext */ @Override @SuppressWarnings("unchecked") public T createTestTaskAttemptContext(J job, String taskId) { - Job j = (Job)job; - return (T)new TaskAttemptContextImpl(j.getConfiguration(), TaskAttemptID.forName(taskId)); + Job j = (Job) job; + return (T) new TaskAttemptContextImpl(j.getConfiguration(), TaskAttemptID.forName(taskId)); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java index f72843cc4b01..8ea3da856af0 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; public interface RandomStringGenerator { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java index 91cd19ef009c..eb9083a43021 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,10 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; - import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; @@ -27,8 +25,8 @@ public class RandomStringGeneratorImpl implements RandomStringGenerator { private final String s; public RandomStringGeneratorImpl() { - s = new UUID(ThreadLocalRandom.current().nextLong(), - ThreadLocalRandom.current().nextLong()).toString(); + s = new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()) + .toString(); } @Override diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java index 27888db0f6d2..eec399c1ca68 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestCompatibilitySingletonFactory { @ClassRule @@ -47,8 +47,7 @@ private class TestCompatibilitySingletonFactoryCallable implements Callable callables = new ArrayList<>(ITERATIONS); List resultStrings = new ArrayList<>(ITERATIONS); - // Create the callables. for (int i = 0; i < ITERATIONS; i++) { callables.add(new TestCompatibilitySingletonFactoryCallable()); @@ -77,7 +75,6 @@ public void testGetInstance() throws Exception { // Get the first string. String firstString = resultStrings.get(0); - // Assert that all the strings are equal to the fist. for (String s : resultStrings) { assertEquals(firstString, s); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java index d95c282ecf99..ee1092d6fafe 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,9 +29,9 @@ import org.junit.experimental.categories.Category; /** - * Test for MetricsMasterProcSourceImpl + * Test for MetricsMasterProcSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsMasterProcSourceImpl { @ClassRule @@ -40,12 +40,12 @@ public class TestMetricsMasterProcSourceImpl { @Test public void testGetInstance() throws Exception { - MetricsMasterProcSourceFactory metricsMasterProcSourceFactory = CompatibilitySingletonFactory - .getInstance(MetricsMasterProcSourceFactory.class); + MetricsMasterProcSourceFactory metricsMasterProcSourceFactory = + CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class); MetricsMasterProcSource masterProcSource = metricsMasterProcSourceFactory.create(null); assertTrue(masterProcSource instanceof MetricsMasterProcSourceImpl); assertSame(metricsMasterProcSourceFactory, - CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class)); + CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class)); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java index 7d35e846b5c0..3837b5e20107 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestMetricsMasterSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsMasterSourceFactory.class); + HBaseClassTestRule.forClass(TestMetricsMasterSourceFactory.class); @Test public void testGetInstance() throws Exception { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java index 70ec90ab39a2..cbb031a7e6b2 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java @@ -29,9 +29,9 @@ import org.junit.experimental.categories.Category; /** - * Test for MetricsMasterSourceImpl + * Test for MetricsMasterSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsMasterSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -39,11 +39,11 @@ public class TestMetricsMasterSourceImpl { @Test public void testGetInstance() { - MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory - .getInstance(MetricsMasterSourceFactory.class); + MetricsMasterSourceFactory metricsMasterSourceFactory = + CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class); MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null); assertTrue(masterSource instanceof MetricsMasterSourceImpl); - assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance( - MetricsMasterSourceFactory.class)); + assertSame(metricsMasterSourceFactory, + CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class)); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java index 063071b43173..8a249e550892 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,9 +31,9 @@ import org.junit.experimental.categories.Category; /** - * Test of default BaseSource for hadoop 2 + * Test of default BaseSource for hadoop 2 */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestBaseSourceImpl { @ClassRule diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java index dbdc92da8ac4..029b75e52cf3 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.HashMap; diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java index c7594b4ff3b3..d29e6736cc5d 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ public class TestMetricsRegionServerSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsRegionServerSourceFactory.class); + HBaseClassTestRule.forClass(TestMetricsRegionServerSourceFactory.class); @Test public void testGetInstance() throws Exception { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java index 86a94baf72fd..b67162fdf99b 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java @@ -28,7 +28,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRegionServerSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -38,14 +38,12 @@ public class TestMetricsRegionServerSourceImpl { public void testGetInstance() { MetricsRegionServerSourceFactory metricsRegionServerSourceFactory = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); - MetricsRegionServerSource serverSource = - metricsRegionServerSourceFactory.createServer(null); + MetricsRegionServerSource serverSource = metricsRegionServerSourceFactory.createServer(null); assertTrue(serverSource instanceof MetricsRegionServerSourceImpl); assertSame(metricsRegionServerSourceFactory, - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); } - @Test(expected = RuntimeException.class) public void testNoGetRegionServerMetricsSourceImpl() { // This should throw an exception because MetricsRegionServerSourceImpl should only diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java index 598658a56ccc..d6df5038eeba 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MetricsTests; @@ -31,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRegionSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -40,8 +39,8 @@ public class TestMetricsRegionSourceImpl { @SuppressWarnings("SelfComparison") @Test public void testCompareToHashCodeEquals() { - MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance( - MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST")); MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST")); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java index 11177edcafb3..b6e8b17d7dcf 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java @@ -30,9 +30,9 @@ import org.junit.experimental.categories.Category; /** - * Test for MetricsTableSourceImpl + * Test for MetricsTableSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsTableSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -44,13 +44,12 @@ public void testCompareToHashCode() throws Exception { MetricsRegionServerSourceFactory metricsFact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); - MetricsTableSource one = metricsFact.createTable( - "ONETABLE", new MetricsTableWrapperStub("ONETABLE")); - MetricsTableSource oneClone = metricsFact.createTable( - "ONETABLE", - new MetricsTableWrapperStub("ONETABLE")); - MetricsTableSource two = metricsFact.createTable( - "TWOTABLE", new MetricsTableWrapperStub("TWOTABLE")); + MetricsTableSource one = + metricsFact.createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); + MetricsTableSource oneClone = + metricsFact.createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); + MetricsTableSource two = + metricsFact.createTable("TWOTABLE", new MetricsTableWrapperStub("TWOTABLE")); assertEquals(0, one.compareTo(oneClone)); assertEquals(one.hashCode(), oneClone.hashCode()); @@ -73,7 +72,7 @@ public void testNoGetTableMetricsSourceImpl() { public void testGetTableMetrics() { MetricsTableSource oneTbl = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) - .createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); + .createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); assertEquals("ONETABLE", oneTbl.getTableName()); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java index 8a72961edadc..e770c840eb17 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; @@ -30,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsUserSourceImpl { @ClassRule @@ -40,8 +39,8 @@ public class TestMetricsUserSourceImpl { @SuppressWarnings("SelfComparison") @Test public void testCompareToHashCodeEquals() throws Exception { - MetricsRegionServerSourceFactory fact - = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsUserSource one = fact.createUser("ONE"); MetricsUserSource oneClone = fact.createUser("ONE"); @@ -57,8 +56,7 @@ public void testCompareToHashCodeEquals() throws Exception { assertTrue(two.compareTo(two) == 0); } - - @Test (expected = RuntimeException.class) + @Test(expected = RuntimeException.class) public void testNoGetRegionServerMetricsSourceImpl() throws Exception { // This should throw an exception because MetricsUserSourceImpl should only // be created by a factory. @@ -67,8 +65,8 @@ public void testNoGetRegionServerMetricsSourceImpl() throws Exception { @Test public void testGetUser() { - MetricsRegionServerSourceFactory fact - = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsUserSource one = fact.createUser("ONE"); assertEquals("ONE", one.getUser()); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java index e6ffbc98f321..d3ad7431cefc 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ public class TestMetricsWALSource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsWALSource.class); + HBaseClassTestRule.forClass(TestMetricsWALSource.class); @Test public void testGetInstance() throws Exception { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java index d8ec0af92bb0..e6c785ac9500 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsWALSourceImpl { @ClassRule @@ -37,10 +37,8 @@ public class TestMetricsWALSourceImpl { @Test public void testGetInstance() throws Exception { - MetricsWALSource walSource = - CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); + MetricsWALSource walSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); assertTrue(walSource instanceof MetricsWALSourceImpl); - assertSame(walSource, - CompatibilitySingletonFactory.getInstance(MetricsWALSource.class)); + assertSame(walSource, CompatibilitySingletonFactory.getInstance(MetricsWALSource.class)); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java index 68f6fda9ee80..2a580f597201 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestMetricsReplicationSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsReplicationSourceFactory.class); + HBaseClassTestRule.forClass(TestMetricsReplicationSourceFactory.class); @Test public void testGetInstance() throws Exception { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java index 6cc26e2a4dd1..3b2403c23d20 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java @@ -27,7 +27,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsReplicationSourceFactoryImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -35,8 +35,8 @@ public class TestMetricsReplicationSourceFactoryImpl { @Test public void testGetInstance() { - MetricsReplicationSourceFactory rms = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSourceFactory.class); + MetricsReplicationSourceFactory rms = + CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class); assertTrue(rms instanceof MetricsReplicationSourceFactoryImpl); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java index faff4b389176..b29280166964 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java @@ -27,7 +27,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsReplicationSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -35,8 +35,8 @@ public class TestMetricsReplicationSourceImpl { @Test public void testGetInstance() throws Exception { - MetricsReplicationSource rms = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSource.class); + MetricsReplicationSource rms = + CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class); assertTrue(rms instanceof MetricsReplicationSourceImpl); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java index 57acdcb4539f..f508aead1072 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestMetricsRESTSource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsRESTSource.class); + HBaseClassTestRule.forClass(TestMetricsRESTSource.class); @Test public void testGetInstanceNoHadoopCompat() throws Exception { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java index 2ac7996485e4..950d8ba2bcb5 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java @@ -31,7 +31,7 @@ /** * Test for hadoop 2's version of {@link MetricsRESTSource}. */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRESTSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -40,7 +40,7 @@ public class TestMetricsRESTSourceImpl { @Test public void ensureCompatRegistered() { assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) - instanceof MetricsRESTSourceImpl); + assertTrue(CompatibilitySingletonFactory + .getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java index 49d25723b880..bcd7d9dfcefb 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.test; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -24,157 +23,141 @@ public interface MetricsAssertHelper { /** - * Init helper. This method will make sure that the metrics system is set - * up for tests. + * Init helper. This method will make sure that the metrics system is set up for tests. */ void init(); /** * Assert that a tag exists and has a given value. - * - * @param name The name of the tag. + * @param name The name of the tag. * @param expected The expected value - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertTag(String name, String expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected The expected value of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGauge(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected Value that the gauge is expected to be greater than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeGt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected Value that the gauge is expected to be less than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeLt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected The expected value of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGauge(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected Value that the gauge is expected to be greater than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeGt(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected Value that the gauge is expected to be less than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeLt(String name, double expected, BaseSource source); /** * Assert that a counter exists and that it's value is equal to the expected value. - * - * @param name The name of the counter. + * @param name The name of the counter. * @param expected The expected value - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounter(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is greater than the given value. - * - * @param name The name of the counter. + * @param name The name of the counter. * @param expected The value the counter is expected to be greater than. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounterGt(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is less than the given value. - * - * @param name The name of the counter. + * @param name The name of the counter. * @param expected The value the counter is expected to be less than. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounterLt(String name, long expected, BaseSource source); /** * Get the value of a counter. - * - * @param name name of the counter. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param name name of the counter. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return long value of the counter. */ long getCounter(String name, BaseSource source); /** * Check if a dynamic counter exists. - * - * @param name name of the counter. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param name name of the counter. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return boolean true if counter metric exists. */ boolean checkCounterExists(String name, BaseSource source); /** * Check if a gauge exists. - * - * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param name name of the gauge. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return boolean true if gauge metric exists. */ boolean checkGaugeExists(String name, BaseSource source); /** * Get the value of a gauge as a double. - * - * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param name name of the gauge. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return double value of the gauge. */ double getGaugeDouble(String name, BaseSource source); /** * Get the value of a gauge as a long. - * - * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param name name of the gauge. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return long value of the gauge. */ long getGaugeLong(String name, BaseSource source); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java index 83e25a636f07..7c7357c4f049 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.test; import static org.junit.Assert.assertEquals; @@ -25,7 +24,6 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; - import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsCollector; @@ -36,7 +34,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; /** - * A helper class that will allow tests to get into hadoop2's metrics2 values. + * A helper class that will allow tests to get into hadoop2's metrics2 values. */ public class MetricsAssertHelperImpl implements MetricsAssertHelper { private Map tags = new HashMap<>(); @@ -203,8 +201,8 @@ public void assertCounterLt(String name, long expected, BaseSource source) { public long getCounter(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); - assertNotNull("Should get counter "+cName + " but did not",counters.get(cName)); - return counters.get(cName).longValue(); + assertNotNull("Should get counter " + cName + " but did not", counters.get(cName)); + return counters.get(cName).longValue(); } @Override @@ -225,8 +223,8 @@ public boolean checkGaugeExists(String name, BaseSource source) { public double getGaugeDouble(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); - assertNotNull("Should get gauge "+cName + " but did not",gauges.get(cName)); - return gauges.get(cName).doubleValue(); + assertNotNull("Should get gauge " + cName + " but did not", gauges.get(cName)); + return gauges.get(cName).doubleValue(); } @Override diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java index 0b9c0f12e719..533fcbbe9d35 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestMetricsThriftServerSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsThriftServerSourceFactory.class); + HBaseClassTestRule.forClass(TestMetricsThriftServerSourceFactory.class); @Test public void testGetInstance() { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java index 7206810ab138..295d6f800f59 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java @@ -32,7 +32,7 @@ /** * Test for hadoop 2's version of MetricsThriftServerSourceFactory. */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsThriftServerSourceFactoryImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -40,24 +40,24 @@ public class TestMetricsThriftServerSourceFactoryImpl { @Test public void testCompatabilityRegistered() { - assertNotNull(CompatibilitySingletonFactory.getInstance( - MetricsThriftServerSourceFactory.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) - instanceof MetricsThriftServerSourceFactoryImpl); + assertNotNull( + CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class)); + assertTrue(CompatibilitySingletonFactory.getInstance( + MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl); } @Test public void testCreateThriftOneSource() { - //Make sure that the factory gives back a singleton. + // Make sure that the factory gives back a singleton. assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(), - new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); + new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); } @Test public void testCreateThriftTwoSource() { - //Make sure that the factory gives back a singleton. + // Make sure that the factory gives back a singleton. assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(), - new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); + new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java index 11e984bbe4d1..1ed5640f24df 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ public class TestMetricsZooKeeperSource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsZooKeeperSource.class); + HBaseClassTestRule.forClass(TestMetricsZooKeeperSource.class); @Test public void testGetInstanceNoHadoopCompat() throws Exception { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java index a199a78938a8..adb86b87facc 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java @@ -28,7 +28,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsZooKeeperSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -37,7 +37,7 @@ public class TestMetricsZooKeeperSourceImpl { @Test public void testGetInstance() { MetricsZooKeeperSource zkSource = - CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class); + CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class); assertTrue(zkSource instanceof MetricsZooKeeperSourceImpl); assertSame(zkSource, CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class)); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java index 1cf8702b7b12..96fe04b82e17 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import static org.junit.Assert.assertEquals; +import java.util.ArrayList; +import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MetricsTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -30,15 +31,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -import java.util.ArrayList; -import java.util.List; - @Category({ MetricsTests.class, SmallTests.class }) public class TestMutableRangeHistogram { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMutableRangeHistogram.class); + HBaseClassTestRule.forClass(TestMutableRangeHistogram.class); private static final String RECORD_NAME = "test"; private static final String SIZE_HISTOGRAM_NAME = "TestSize"; diff --git a/hbase-hbtop/pom.xml b/hbase-hbtop/pom.xml index 08f1c07fbde6..41ecdc536ac2 100644 --- a/hbase-hbtop/pom.xml +++ b/hbase-hbtop/pom.xml @@ -1,7 +1,5 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-hbtop Apache HBase - HBTop A real-time monitoring tool for HBase like Unix's top command - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.apache.hbase @@ -107,4 +96,13 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java index 9c1a000831a2..c725fe02859a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,6 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; - /** * A real-time monitoring tool for HBase like Unix top command. */ @@ -154,7 +153,7 @@ public int run(String[] args) throws Exception { } Optional fieldInfo = initialMode.getFieldInfos().stream() - .filter(f -> f.getField().getHeader().equals(field)).findFirst(); + .filter(f -> f.getField().getHeader().equals(field)).findFirst(); if (fieldInfo.isPresent()) { initialSortField = fieldInfo.get().getField(); initialAscendingSort = ascendingSort; @@ -168,7 +167,7 @@ public int run(String[] args) throws Exception { initialFields = new ArrayList<>(); for (String field : fields) { Optional fieldInfo = initialMode.getFieldInfos().stream() - .filter(f -> f.getField().getHeader().equals(field)).findFirst(); + .filter(f -> f.getField().getHeader().equals(field)).findFirst(); if (fieldInfo.isPresent()) { initialFields.add(fieldInfo.get().getField()); } else { @@ -180,7 +179,7 @@ public int run(String[] args) throws Exception { if (commandLine.hasOption("filters")) { String[] filters = commandLine.getOptionValue("filters").split(","); List fields = initialMode.getFieldInfos().stream().map(FieldInfo::getField) - .collect(Collectors.toList()); + .collect(Collectors.toList()); for (String filter : filters) { RecordFilter f = RecordFilter.parse(filter, fields, false); if (f != null) { @@ -203,7 +202,7 @@ public int run(String[] args) throws Exception { } try (Screen screen = new Screen(getConf(), initialRefreshDelay, initialMode, initialFields, - initialSortField, initialAscendingSort, initialFilters, numberOfIterations, batchMode)) { + initialSortField, initialAscendingSort, initialFilters, numberOfIterations, batchMode)) { screen.run(); } @@ -212,19 +211,16 @@ public int run(String[] args) throws Exception { private Options getOptions() { Options opts = new Options(); - opts.addOption("h", "help", false, - "Print usage; for help while the tool is running press 'h'"); - opts.addOption("d", "delay", true, - "The refresh delay (in seconds); default is 3 seconds"); + opts.addOption("h", "help", false, "Print usage; for help while the tool is running press 'h'"); + opts.addOption("d", "delay", true, "The refresh delay (in seconds); default is 3 seconds"); opts.addOption("m", "mode", true, "The mode; n (Namespace)|t (Table)|r (Region)|s (RegionServer)|u (User)" - + "|c (Client), default is r"); - opts.addOption("n", "numberOfIterations", true, - "The number of iterations"); + + "|c (Client), default is r"); + opts.addOption("n", "numberOfIterations", true, "The number of iterations"); opts.addOption("s", "sortField", true, "The initial sort field. You can prepend a `+' or `-' to the field name to also override" - + " the sort direction. A leading `+' will force sorting high to low, whereas a `-' will" - + " ensure a low to high ordering"); + + " the sort direction. A leading `+' will force sorting high to low, whereas a `-' will" + + " ensure a low to high ordering"); opts.addOption("O", "outputFieldNames", false, "Print each of the available field names on a separate line, then quit"); opts.addOption("f", "fields", true, @@ -233,8 +229,8 @@ private Options getOptions() { "The initial filters. Specify comma separated filters to set multiple filters"); opts.addOption("b", "batchMode", false, "Starts hbtop in Batch mode, which could be useful for sending output from hbtop to other" - + " programs or to a file. In this mode, hbtop will not accept input and runs until the" - + " iterations limit you've set with the `-n' command-line option or until killed"); + + " programs or to a file. In this mode, hbtop will not accept input and runs until the" + + " iterations limit you've set with the `-n' command-line option or until killed"); return opts; } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java index 577172a38cb2..3331cd03550f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -94,7 +94,8 @@ public static Record ofEntries(Entry... entries) { } public static Record ofEntries(Stream entries) { - return entries.collect(Record::builder, Builder::put, (r1, r2) -> {}).build(); + return entries.collect(Record::builder, Builder::put, (r1, r2) -> { + }).build(); } private Record(ImmutableMap values) { @@ -165,12 +166,11 @@ public Set> entrySet() { } public Record combine(Record o) { - return ofEntries(values.keySet().stream() - .map(k -> { - if (k.getFieldValueType() == FieldValueType.STRING) { - return entry(k, values.get(k)); - } - return entry(k, values.get(k).plus(o.values.get(k))); - })); + return ofEntries(values.keySet().stream().map(k -> { + if (k.getFieldValueType() == FieldValueType.STRING) { + return entry(k, values.get(k)); + } + return entry(k, values.get(k).plus(o.values.get(k))); + })); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java index 78adf7cce009..c6d4399b8322 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.hbtop.field.FieldValue; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a filter that's filtering the metric {@link Record}s. */ @@ -32,11 +31,7 @@ public final class RecordFilter { private enum Operator { - EQUAL("="), - DOUBLE_EQUALS("=="), - GREATER(">"), - GREATER_OR_EQUAL(">="), - LESS("<"), + EQUAL("="), DOUBLE_EQUALS("=="), GREATER(">"), GREATER_OR_EQUAL(">="), LESS("<"), LESS_OR_EQUAL("<="); private final String operator; @@ -68,7 +63,7 @@ public static RecordFilter parse(String filterString, List fields, boolea StringBuilder fieldString = new StringBuilder(); while (filterString.length() > index && filterString.charAt(index) != '<' - && filterString.charAt(index) != '>' && filterString.charAt(index) != '=') { + && filterString.charAt(index) != '>' && filterString.charAt(index) != '=') { fieldString.append(filterString.charAt(index++)); } @@ -82,8 +77,8 @@ public static RecordFilter parse(String filterString, List fields, boolea } StringBuilder operatorString = new StringBuilder(); - while (filterString.length() > index && (filterString.charAt(index) == '<' || - filterString.charAt(index) == '>' || filterString.charAt(index) == '=')) { + while (filterString.length() > index && (filterString.charAt(index) == '<' + || filterString.charAt(index) == '>' || filterString.charAt(index) == '=')) { operatorString.append(filterString.charAt(index++)); } @@ -138,7 +133,7 @@ private static Operator getOperator(String operatorString) { private final FieldValue value; private RecordFilter(boolean ignoreCase, boolean not, Field field, Operator operator, - FieldValue value) { + FieldValue value) { this.ignoreCase = ignoreCase; this.not = not; this.field = Objects.requireNonNull(field); @@ -166,8 +161,7 @@ public boolean execute(Record record) { return not != ret; } - int compare = ignoreCase ? - fieldValue.compareToIgnoreCase(value) : fieldValue.compareTo(value); + int compare = ignoreCase ? fieldValue.compareToIgnoreCase(value) : fieldValue.compareTo(value); boolean ret; switch (operator) { @@ -212,7 +206,7 @@ public boolean equals(Object o) { } RecordFilter filter = (RecordFilter) o; return ignoreCase == filter.ignoreCase && not == filter.not && field == filter.field - && operator == filter.operator && value.equals(filter.value); + && operator == filter.operator && value.equals(filter.value); } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java index df460dd31cf2..8874bc6853e0 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents fields that are displayed in the top screen. */ @@ -34,27 +33,25 @@ public enum Field { REGION("REGION", "Encoded Region Name", false, true, FieldValueType.STRING), REGION_SERVER("RS", "Short Region Server Name", true, true, FieldValueType.STRING), LONG_REGION_SERVER("LRS", "Long Region Server Name", true, true, FieldValueType.STRING), - REQUEST_COUNT_PER_SECOND("#REQ/S", "Request Count per second", false, false, - FieldValueType.LONG), + REQUEST_COUNT_PER_SECOND("#REQ/S", "Request Count per second", false, false, FieldValueType.LONG), READ_REQUEST_COUNT_PER_SECOND("#READ/S", "Read Request Count per second", false, false, - FieldValueType.LONG), + FieldValueType.LONG), FILTERED_READ_REQUEST_COUNT_PER_SECOND("#FREAD/S", "Filtered Read Request Count per second", - false, false, FieldValueType.LONG), + false, false, FieldValueType.LONG), WRITE_REQUEST_COUNT_PER_SECOND("#WRITE/S", "Write Request Count per second", false, false, - FieldValueType.LONG), + FieldValueType.LONG), STORE_FILE_SIZE("SF", "StoreFile Size", false, false, FieldValueType.SIZE), UNCOMPRESSED_STORE_FILE_SIZE("USF", "Uncompressed StoreFile Size", false, false, - FieldValueType.SIZE), + FieldValueType.SIZE), NUM_STORE_FILES("#SF", "Number of StoreFiles", false, false, FieldValueType.INTEGER), MEM_STORE_SIZE("MEMSTORE", "MemStore Size", false, false, FieldValueType.SIZE), LOCALITY("LOCALITY", "Block Locality", false, false, FieldValueType.FLOAT), START_KEY("SKEY", "Start Key", true, true, FieldValueType.STRING), - COMPACTING_CELL_COUNT("#COMPingCELL", "Compacting Cell Count", false, false, - FieldValueType.LONG), + COMPACTING_CELL_COUNT("#COMPingCELL", "Compacting Cell Count", false, false, FieldValueType.LONG), COMPACTED_CELL_COUNT("#COMPedCELL", "Compacted Cell Count", false, false, FieldValueType.LONG), COMPACTION_PROGRESS("%COMP", "Compaction Progress", false, false, FieldValueType.PERCENT), LAST_MAJOR_COMPACTION_TIME("LASTMCOMP", "Last Major Compaction Time", false, true, - FieldValueType.STRING), + FieldValueType.STRING), REGION_COUNT("#REGION", "Region Count", false, false, FieldValueType.INTEGER), USED_HEAP_SIZE("UHEAP", "Used Heap Size", false, false, FieldValueType.SIZE), USER("USER", "user Name", true, true, FieldValueType.STRING), @@ -70,7 +67,7 @@ public enum Field { private final FieldValueType fieldValueType; Field(String header, String description, boolean autoAdjust, boolean leftJustify, - FieldValueType fieldValueType) { + FieldValueType fieldValueType) { this.header = Objects.requireNonNull(header); this.description = Objects.requireNonNull(description); this.autoAdjust = autoAdjust; diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java index 3f0e5f7ad1d3..ad153210dd9e 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,11 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** - * Information about a field. - * - * This has a {@link Field} itself and additional information (e.g. {@code defaultLength} and - * {@code displayByDefault}). This additional information is different between the - * {@link org.apache.hadoop.hbase.hbtop.mode.Mode}s even when the field is the same. That's why the - * additional information is separated from {@link Field}. + * Information about a field. This has a {@link Field} itself and additional information (e.g. + * {@code defaultLength} and {@code displayByDefault}). This additional information is different + * between the {@link org.apache.hadoop.hbase.hbtop.mode.Mode}s even when the field is the same. + * That's why the additional information is separated from {@link Field}. */ @InterfaceAudience.Private public class FieldInfo { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java index 086dadc3e290..43fbf6498384 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,11 +22,8 @@ import org.apache.hadoop.hbase.Size; import org.apache.yetus.audience.InterfaceAudience; - /** - * Represents a value of a field. - * - * The type of a value is defined by {@link FieldValue}. + * Represents a value of a field. The type of a value is defined by {@link FieldValue}. */ @InterfaceAudience.Private public final class FieldValue implements Comparable { @@ -103,23 +100,23 @@ public final class FieldValue implements Comparable { private Size optimizeSize(Size size) { if (size.get(Size.Unit.BYTE) < 1024d) { - return size.getUnit() == Size.Unit.BYTE ? - size : new Size(size.get(Size.Unit.BYTE), Size.Unit.BYTE); + return size.getUnit() == Size.Unit.BYTE ? size + : new Size(size.get(Size.Unit.BYTE), Size.Unit.BYTE); } else if (size.get(Size.Unit.KILOBYTE) < 1024d) { - return size.getUnit() == Size.Unit.KILOBYTE ? - size : new Size(size.get(Size.Unit.KILOBYTE), Size.Unit.KILOBYTE); + return size.getUnit() == Size.Unit.KILOBYTE ? size + : new Size(size.get(Size.Unit.KILOBYTE), Size.Unit.KILOBYTE); } else if (size.get(Size.Unit.MEGABYTE) < 1024d) { - return size.getUnit() == Size.Unit.MEGABYTE ? - size : new Size(size.get(Size.Unit.MEGABYTE), Size.Unit.MEGABYTE); + return size.getUnit() == Size.Unit.MEGABYTE ? size + : new Size(size.get(Size.Unit.MEGABYTE), Size.Unit.MEGABYTE); } else if (size.get(Size.Unit.GIGABYTE) < 1024d) { - return size.getUnit() == Size.Unit.GIGABYTE ? - size : new Size(size.get(Size.Unit.GIGABYTE), Size.Unit.GIGABYTE); + return size.getUnit() == Size.Unit.GIGABYTE ? size + : new Size(size.get(Size.Unit.GIGABYTE), Size.Unit.GIGABYTE); } else if (size.get(Size.Unit.TERABYTE) < 1024d) { - return size.getUnit() == Size.Unit.TERABYTE ? - size : new Size(size.get(Size.Unit.TERABYTE), Size.Unit.TERABYTE); + return size.getUnit() == Size.Unit.TERABYTE ? size + : new Size(size.get(Size.Unit.TERABYTE), Size.Unit.TERABYTE); } - return size.getUnit() == Size.Unit.PETABYTE ? - size : new Size(size.get(Size.Unit.PETABYTE), Size.Unit.PETABYTE); + return size.getUnit() == Size.Unit.PETABYTE ? size + : new Size(size.get(Size.Unit.PETABYTE), Size.Unit.PETABYTE); } private Size parseSizeString(String sizeString) { @@ -133,7 +130,7 @@ private Size parseSizeString(String sizeString) { } private Size.Unit convertToUnit(String unitSimpleName) { - for (Size.Unit unit: Size.Unit.values()) { + for (Size.Unit unit : Size.Unit.values()) { if (unitSimpleName.equals(unit.getSimpleName())) { return unit; } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java index e2edae87b800..63597da584b9 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the type of a {@link FieldValue}. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java index fe3edd1b2544..9b8f9a518d37 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.UserMetrics; @@ -41,87 +40,88 @@ /** * Implementation for {@link ModeStrategy} for client Mode. */ -@InterfaceAudience.Private public final class ClientModeStrategy implements ModeStrategy { +@InterfaceAudience.Private +public final class ClientModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays - .asList(new FieldInfo(Field.CLIENT, 0, true), - new FieldInfo(Field.USER_COUNT, 5, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); + private final List fieldInfos = + Arrays.asList(new FieldInfo(Field.CLIENT, 0, true), new FieldInfo(Field.USER_COUNT, 5, true), + new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); private final Map requestCountPerSecondMap = new HashMap<>(); ClientModeStrategy() { } - @Override public List getFieldInfos() { + @Override + public List getFieldInfos() { return fieldInfos; } - @Override public Field getDefaultSortField() { + @Override + public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { List records = createRecords(clusterMetrics); return aggregateRecordsAndAddDistinct( - ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.CLIENT, Field.USER, - Field.USER_COUNT); + ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.CLIENT, Field.USER, + Field.USER_COUNT); } List createRecords(ClusterMetrics clusterMetrics) { List ret = new ArrayList<>(); for (ServerMetrics serverMetrics : clusterMetrics.getLiveServerMetrics().values()) { long lastReportTimestamp = serverMetrics.getLastReportTimestamp(); - serverMetrics.getUserMetrics().values().forEach(um -> um.getClientMetrics().values().forEach( - clientMetrics -> ret.add( - createRecord(um.getNameAsString(), clientMetrics, lastReportTimestamp, - serverMetrics.getServerName().getServerName())))); + serverMetrics.getUserMetrics().values() + .forEach(um -> um.getClientMetrics().values() + .forEach(clientMetrics -> ret.add(createRecord(um.getNameAsString(), clientMetrics, + lastReportTimestamp, serverMetrics.getServerName().getServerName())))); } return ret; } /** * Aggregate the records and count the unique values for the given distinctField - * - * @param records records to be processed - * @param groupBy Field on which group by needs to be done - * @param distinctField Field whose unique values needs to be counted + * @param records records to be processed + * @param groupBy Field on which group by needs to be done + * @param distinctField Field whose unique values needs to be counted * @param uniqueCountAssignedTo a target field to which the unique count is assigned to * @return aggregated records */ List aggregateRecordsAndAddDistinct(List records, Field groupBy, Field distinctField, Field uniqueCountAssignedTo) { List result = new ArrayList<>(); - records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).values() - .forEach(val -> { - Set distinctValues = new HashSet<>(); - Map map = new HashMap<>(); - for (Record record : val) { - for (Map.Entry field : record.entrySet()) { - if (distinctField.equals(field.getKey())) { - //We will not be adding the field in the new record whose distinct count is required - distinctValues.add(record.get(distinctField)); + records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).values().forEach(val -> { + Set distinctValues = new HashSet<>(); + Map map = new HashMap<>(); + for (Record record : val) { + for (Map.Entry field : record.entrySet()) { + if (distinctField.equals(field.getKey())) { + // We will not be adding the field in the new record whose distinct count is required + distinctValues.add(record.get(distinctField)); + } else { + if (field.getKey().getFieldValueType() == FieldValueType.STRING) { + map.put(field.getKey(), field.getValue()); + } else { + if (map.get(field.getKey()) == null) { + map.put(field.getKey(), field.getValue()); } else { - if (field.getKey().getFieldValueType() == FieldValueType.STRING) { - map.put(field.getKey(), field.getValue()); - } else { - if (map.get(field.getKey()) == null) { - map.put(field.getKey(), field.getValue()); - } else { - map.put(field.getKey(), map.get(field.getKey()).plus(field.getValue())); - } - } + map.put(field.getKey(), map.get(field.getKey()).plus(field.getValue())); } } } - // Add unique count field - map.put(uniqueCountAssignedTo, uniqueCountAssignedTo.newValue(distinctValues.size())); - result.add(Record.ofEntries(map.entrySet().stream() - .map(k -> Record.entry(k.getKey(), k.getValue())))); - }); + } + } + // Add unique count field + map.put(uniqueCountAssignedTo, uniqueCountAssignedTo.newValue(distinctValues.size())); + result.add( + Record.ofEntries(map.entrySet().stream().map(k -> Record.entry(k.getKey(), k.getValue())))); + }); return result; } @@ -137,21 +137,22 @@ Record createRecord(String user, UserMetrics.ClientMetrics clientMetrics, requestCountPerSecondMap.put(mapKey, requestCountPerSecond); } requestCountPerSecond.refresh(lastReportTimestamp, clientMetrics.getReadRequestsCount(), - clientMetrics.getFilteredReadRequestsCount(), clientMetrics.getWriteRequestsCount()); + clientMetrics.getFilteredReadRequestsCount(), clientMetrics.getWriteRequestsCount()); builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getRequestCountPerSecond()); builder.put(Field.READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getReadRequestCountPerSecond()); + requestCountPerSecond.getReadRequestCountPerSecond()); builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getWriteRequestCountPerSecond()); + requestCountPerSecond.getWriteRequestCountPerSecond()); builder.put(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getFilteredReadRequestCountPerSecond()); + requestCountPerSecond.getFilteredReadRequestCountPerSecond()); builder.put(Field.USER, user); return builder.build(); } - @Override public DrillDownInfo drillDown(Record selectedRecord) { + @Override + public DrillDownInfo drillDown(Record selectedRecord) { List initialFilters = Collections.singletonList( - RecordFilter.newBuilder(Field.CLIENT).doubleEquals(selectedRecord.get(Field.CLIENT))); + RecordFilter.newBuilder(Field.CLIENT).doubleEquals(selectedRecord.get(Field.CLIENT))); return new DrillDownInfo(Mode.USER, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java index de3d582fb9f1..7061d5374e88 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,15 +21,12 @@ import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.RecordFilter; import org.apache.yetus.audience.InterfaceAudience; - /** - * Information about drilling down. - * - * When drilling down, going to next {@link Mode} with initial {@link RecordFilter}s. + * Information about drilling down. When drilling down, going to next {@link Mode} with initial + * {@link RecordFilter}s. */ @InterfaceAudience.Private public class DrillDownInfo { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java index ffd98dfd6837..58ed8428c18e 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a display mode in the top screen. */ @@ -45,7 +44,7 @@ public enum Mode { private final ModeStrategy modeStrategy; Mode(String header, String description, ModeStrategy modeStrategy) { - this.header = Objects.requireNonNull(header); + this.header = Objects.requireNonNull(header); this.description = Objects.requireNonNull(description); this.modeStrategy = Objects.requireNonNull(modeStrategy); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java index 021cee25810a..db58f1facae5 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,14 +26,17 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * An interface for strategy logic for {@link Mode}. */ @InterfaceAudience.Private interface ModeStrategy { List getFieldInfos(); + Field getDefaultSortField(); + List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters); - @Nullable DrillDownInfo drillDown(Record selectedRecord); + + @Nullable + DrillDownInfo drillDown(Record selectedRecord); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java index 9175820e0cae..b426c0b949b9 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; - import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; import org.apache.hadoop.hbase.hbtop.field.Field; @@ -36,8 +35,7 @@ private ModeStrategyUtils() { * @param filters List of filters * @return filtered records */ - public static List applyFilterAndGet(List records, - List filters) { + public static List applyFilterAndGet(List records, List filters) { if (filters != null && !filters.isEmpty()) { return records.stream().filter(r -> filters.stream().allMatch(f -> f.execute(r))) .collect(Collectors.toList()); @@ -45,19 +43,18 @@ public static List applyFilterAndGet(List records, return records; } - /** - * Group by records on the basis of supplied groupBy field and - * Aggregate records using {@link Record#combine(Record)} - * + * Group by records on the basis of supplied groupBy field and Aggregate records using + * {@link Record#combine(Record)} * @param records records needs to be processed * @param groupBy Field to be used for group by * @return aggregated records */ public static List aggregateRecords(List records, Field groupBy) { return records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).entrySet().stream() - .flatMap(e -> e.getValue().stream().reduce(Record::combine).map(Stream::of) - .orElse(Stream.empty())).collect(Collectors.toList()); + .flatMap( + e -> e.getValue().stream().reduce(Record::combine).map(Stream::of).orElse(Stream.empty())) + .collect(Collectors.toList()); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java index f74d8bf22ebc..7be2518fe362 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; @@ -28,15 +27,13 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Namespace Mode. */ @InterfaceAudience.Private public final class NamespaceModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.NAMESPACE, 0, true), + private final List fieldInfos = Arrays.asList(new FieldInfo(Field.NAMESPACE, 0, true), new FieldInfo(Field.REGION_COUNT, 7, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), @@ -44,13 +41,11 @@ public final class NamespaceModeStrategy implements ModeStrategy { new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.STORE_FILE_SIZE, 13, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true) - ); + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); - NamespaceModeStrategy(){ + NamespaceModeStrategy() { } @Override @@ -63,11 +58,12 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by NAMESPACE field return ModeStrategyUtils.aggregateRecords(records, Field.NAMESPACE); @@ -75,9 +71,8 @@ public Field getDefaultSortField() { @Override public DrillDownInfo drillDown(Record selectedRecord) { - List initialFilters = - Collections.singletonList(RecordFilter.newBuilder(Field.NAMESPACE) - .doubleEquals(selectedRecord.get(Field.NAMESPACE))); + List initialFilters = Collections.singletonList( + RecordFilter.newBuilder(Field.NAMESPACE).doubleEquals(selectedRecord.get(Field.NAMESPACE))); return new DrillDownInfo(Mode.TABLE, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java index 0adbc823bf4c..ff8802b4dbbc 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.RegionMetrics; @@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Region Mode. */ @@ -47,29 +45,22 @@ public final class RegionModeStrategy implements ModeStrategy { private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.REGION_NAME, 0, false), - new FieldInfo(Field.NAMESPACE, 0, true), - new FieldInfo(Field.TABLE, 0, true), - new FieldInfo(Field.START_CODE, 13, false), - new FieldInfo(Field.REPLICA_ID, 5, false), - new FieldInfo(Field.REGION, 32, true), - new FieldInfo(Field.REGION_SERVER, 0, true), - new FieldInfo(Field.LONG_REGION_SERVER, 0, false), + new FieldInfo(Field.REGION_NAME, 0, false), new FieldInfo(Field.NAMESPACE, 0, true), + new FieldInfo(Field.TABLE, 0, true), new FieldInfo(Field.START_CODE, 13, false), + new FieldInfo(Field.REPLICA_ID, 5, false), new FieldInfo(Field.REGION, 32, true), + new FieldInfo(Field.REGION_SERVER, 0, true), new FieldInfo(Field.LONG_REGION_SERVER, 0, false), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.STORE_FILE_SIZE, 10, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 12, false), - new FieldInfo(Field.NUM_STORE_FILES,4, true), - new FieldInfo(Field.MEM_STORE_SIZE, 8, true), - new FieldInfo(Field.LOCALITY, 8, true), - new FieldInfo(Field.START_KEY, 0, false), + new FieldInfo(Field.NUM_STORE_FILES, 4, true), new FieldInfo(Field.MEM_STORE_SIZE, 8, true), + new FieldInfo(Field.LOCALITY, 8, true), new FieldInfo(Field.START_KEY, 0, false), new FieldInfo(Field.COMPACTING_CELL_COUNT, 12, false), new FieldInfo(Field.COMPACTED_CELL_COUNT, 12, false), new FieldInfo(Field.COMPACTION_PROGRESS, 7, false), - new FieldInfo(Field.LAST_MAJOR_COMPACTION_TIME, 19, false) - ); + new FieldInfo(Field.LAST_MAJOR_COMPACTION_TIME, 19, false)); private final Map requestCountPerSecondMap = new HashMap<>(); @@ -86,7 +77,8 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { List ret = new ArrayList<>(); for (ServerMetrics sm : clusterMetrics.getLiveServerMetrics().values()) { @@ -99,7 +91,7 @@ public Field getDefaultSortField() { } private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMetrics, - long lastReportTimestamp) { + long lastReportTimestamp) { Record.Builder builder = Record.builder(); @@ -119,8 +111,8 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet tableName = tn.getQualifierAsString(); startKey = Bytes.toStringBinary(elements[1]); startCode = Bytes.toString(elements[2]); - replicaId = elements.length == 4 ? - Integer.valueOf(Bytes.toString(elements[3])).toString() : ""; + replicaId = + elements.length == 4 ? Integer.valueOf(Bytes.toString(elements[3])).toString() : ""; region = RegionInfo.encodeRegionName(regionMetrics.getRegionName()); } catch (IOException ignored) { } @@ -145,11 +137,10 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet builder.put(Field.READ_REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getReadRequestCountPerSecond()); builder.put(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getFilteredReadRequestCountPerSecond()); + requestCountPerSecond.getFilteredReadRequestCountPerSecond()); builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getWriteRequestCountPerSecond()); - builder.put(Field.REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getRequestCountPerSecond()); + builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getRequestCountPerSecond()); builder.put(Field.STORE_FILE_SIZE, regionMetrics.getStoreFileSize()); builder.put(Field.UNCOMPRESSED_STORE_FILE_SIZE, regionMetrics.getUncompressedStoreFileSize()); @@ -160,7 +151,7 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet long compactingCellCount = regionMetrics.getCompactingCellCount(); long compactedCellCount = regionMetrics.getCompactedCellCount(); float compactionProgress = 0; - if (compactedCellCount > 0) { + if (compactedCellCount > 0) { compactionProgress = 100 * ((float) compactedCellCount / compactingCellCount); } @@ -178,22 +169,20 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet } /** - * Form new record list with records formed by only fields provided through fieldInfo and - * add a count field for each record with value 1 - * We are doing two operation of selecting and adding new field - * because of saving some CPU cycles on rebuilding the record again - * + * Form new record list with records formed by only fields provided through fieldInfo and add a + * count field for each record with value 1 We are doing two operation of selecting and adding new + * field because of saving some CPU cycles on rebuilding the record again * @param fieldInfos List of FieldInfos required in the record - * @param records List of records which needs to be processed + * @param records List of records which needs to be processed * @param countField Field which needs to be added with value 1 for each record * @return records after selecting required fields and adding count field */ List selectModeFieldsAndAddCountField(List fieldInfos, List records, Field countField) { - return records.stream().map(record -> Record.ofEntries( - fieldInfos.stream().filter(fi -> record.containsKey(fi.getField())) - .map(fi -> Record.entry(fi.getField(), record.get(fi.getField()))))) + return records.stream().map( + record -> Record.ofEntries(fieldInfos.stream().filter(fi -> record.containsKey(fi.getField())) + .map(fi -> Record.entry(fi.getField(), record.get(fi.getField()))))) .map(record -> Record.builder().putAll(record).put(countField, 1).build()) .collect(Collectors.toList()); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java index 44a9a2c82711..1618bf3fc801 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.hbtop.Record; @@ -32,7 +31,6 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for RegionServer Mode. */ @@ -40,8 +38,7 @@ public final class RegionServerModeStrategy implements ModeStrategy { private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.REGION_SERVER, 0, true), - new FieldInfo(Field.LONG_REGION_SERVER, 0, false), + new FieldInfo(Field.REGION_SERVER, 0, true), new FieldInfo(Field.LONG_REGION_SERVER, 0, false), new FieldInfo(Field.REGION_COUNT, 7, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), @@ -49,15 +46,12 @@ public final class RegionServerModeStrategy implements ModeStrategy { new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.STORE_FILE_SIZE, 13, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true), - new FieldInfo(Field.USED_HEAP_SIZE, 11, true), - new FieldInfo(Field.MAX_HEAP_SIZE, 11, true) - ); + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true), + new FieldInfo(Field.USED_HEAP_SIZE, 11, true), new FieldInfo(Field.MAX_HEAP_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); - RegionServerModeStrategy(){ + RegionServerModeStrategy() { } @Override @@ -70,11 +64,12 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by LONG_REGION_SERVER field Map retMap = ModeStrategyUtils.aggregateRecords(records, Field.LONG_REGION_SERVER).stream() @@ -87,9 +82,9 @@ public Field getDefaultSortField() { continue; } - Record newRecord = Record.builder().putAll(record) - .put(Field.USED_HEAP_SIZE, sm.getUsedHeapSize()) - .put(Field.MAX_HEAP_SIZE, sm.getMaxHeapSize()).build(); + Record newRecord = + Record.builder().putAll(record).put(Field.USED_HEAP_SIZE, sm.getUsedHeapSize()) + .put(Field.MAX_HEAP_SIZE, sm.getMaxHeapSize()).build(); retMap.put(sm.getServerName().getServerName(), newRecord); } @@ -100,8 +95,7 @@ public Field getDefaultSortField() { @Override public DrillDownInfo drillDown(Record selectedRecord) { List initialFilters = Collections.singletonList(RecordFilter - .newBuilder(Field.REGION_SERVER) - .doubleEquals(selectedRecord.get(Field.REGION_SERVER))); + .newBuilder(Field.REGION_SERVER).doubleEquals(selectedRecord.get(Field.REGION_SERVER))); return new DrillDownInfo(Mode.REGION, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java index d546070db71d..ade3f7d5e433 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for calculating request counts per second. */ @@ -34,7 +33,7 @@ public class RequestCountPerSecond { private long writeRequestCountPerSecond; public void refresh(long lastReportTimestamp, long readRequestCount, - long filteredReadRequestCount, long writeRequestCount) { + long filteredReadRequestCount, long writeRequestCount) { if (previousLastReportTimestamp == 0) { previousLastReportTimestamp = lastReportTimestamp; previousReadRequestCount = readRequestCount; @@ -47,7 +46,7 @@ public void refresh(long lastReportTimestamp, long readRequestCount, } readRequestCountPerSecond = (readRequestCount - previousReadRequestCount) / delta; filteredReadRequestCountPerSecond = - (filteredReadRequestCount - previousFilteredReadRequestCount) / delta; + (filteredReadRequestCount - previousFilteredReadRequestCount) / delta; writeRequestCountPerSecond = (writeRequestCount - previousWriteRequestCount) / delta; previousLastReportTimestamp = lastReportTimestamp; diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java index 4acc34412584..954401352e2d 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,26 +29,21 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Table Mode. */ @InterfaceAudience.Private public final class TableModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.NAMESPACE, 0, true), - new FieldInfo(Field.TABLE, 0, true), - new FieldInfo(Field.REGION_COUNT, 7, true), + private final List fieldInfos = Arrays.asList(new FieldInfo(Field.NAMESPACE, 0, true), + new FieldInfo(Field.TABLE, 0, true), new FieldInfo(Field.REGION_COUNT, 7, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.STORE_FILE_SIZE, 13, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true) - ); + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); @@ -65,26 +60,22 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by NAMESPACE field and TABLE field - return records.stream() - .collect(Collectors.groupingBy(r -> { - String namespace = r.get(Field.NAMESPACE).asString(); - String table = r.get(Field.TABLE).asString(); - return TableName.valueOf(namespace, table); - })) - .entrySet().stream() - .flatMap( - e -> e.getValue().stream() - .reduce(Record::combine) - .map(Stream::of) - .orElse(Stream.empty())) - .collect(Collectors.toList()); + return records.stream().collect(Collectors.groupingBy(r -> { + String namespace = r.get(Field.NAMESPACE).asString(); + String table = r.get(Field.TABLE).asString(); + return TableName.valueOf(namespace, table); + })).entrySet().stream() + .flatMap( + e -> e.getValue().stream().reduce(Record::combine).map(Stream::of).orElse(Stream.empty())) + .collect(Collectors.toList()); } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java index 605376e12218..bf57daf96b48 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; @@ -31,40 +30,44 @@ /** * Implementation for {@link ModeStrategy} for User Mode. */ -@InterfaceAudience.Private public final class UserModeStrategy implements ModeStrategy { +@InterfaceAudience.Private +public final class UserModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays - .asList(new FieldInfo(Field.USER, 0, true), - new FieldInfo(Field.CLIENT_COUNT, 7, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); + private final List fieldInfos = + Arrays.asList(new FieldInfo(Field.USER, 0, true), new FieldInfo(Field.CLIENT_COUNT, 7, true), + new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); private final ClientModeStrategy clientModeStrategy = new ClientModeStrategy(); UserModeStrategy() { } - @Override public List getFieldInfos() { + @Override + public List getFieldInfos() { return fieldInfos; } - @Override public Field getDefaultSortField() { + @Override + public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { List records = clientModeStrategy.createRecords(clusterMetrics); return clientModeStrategy.aggregateRecordsAndAddDistinct( - ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.USER, Field.CLIENT, - Field.CLIENT_COUNT); + ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.USER, Field.CLIENT, + Field.CLIENT_COUNT); } - @Override public DrillDownInfo drillDown(Record selectedRecord) { - //Drill down to client and using selected USER as a filter + @Override + public DrillDownInfo drillDown(Record selectedRecord) { + // Drill down to client and using selected USER as a filter List initialFilters = Collections.singletonList( - RecordFilter.newBuilder(Field.USER).doubleEquals(selectedRecord.get(Field.USER))); + RecordFilter.newBuilder(Field.USER).doubleEquals(selectedRecord.get(Field.USER))); return new DrillDownInfo(Mode.CLIENT, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java index 8b55d6ec0df3..4620d0896c2c 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; import org.apache.yetus.audience.InterfaceAudience; - /** * An abstract class for {@link ScreenView} that has the common useful methods and the default * implementations for the abstract methods. diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java index 2846c25d1cc4..893a64dd465d 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * This dispatches key presses and timers to the current {@link ScreenView}. */ @@ -56,10 +55,9 @@ public class Screen implements Closeable { private Long timerTimestamp; public Screen(Configuration conf, long initialRefreshDelay, Mode initialMode, - @Nullable List initialFields, @Nullable Field initialSortField, - @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, - long numberOfIterations, boolean batchMode) - throws IOException { + @Nullable List initialFields, @Nullable Field initialSortField, + @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, + long numberOfIterations, boolean batchMode) throws IOException { connection = ConnectionFactory.createConnection(conf); admin = connection.getAdmin(); @@ -69,9 +67,8 @@ public Screen(Configuration conf, long initialRefreshDelay, Mode initialMode, } else { terminal = new TerminalImpl("hbtop"); } - currentScreenView = new TopScreenView(this, terminal, initialRefreshDelay, admin, - initialMode, initialFields, initialSortField, initialAscendingSort, initialFilters, - numberOfIterations); + currentScreenView = new TopScreenView(this, terminal, initialRefreshDelay, admin, initialMode, + initialFields, initialSortField, initialAscendingSort, initialFilters, numberOfIterations); } @Override @@ -106,7 +103,7 @@ public void run() { nextScreenView = currentScreenView.handleTimer(); } else { TimeUnit.MILLISECONDS - .sleep(Math.min(timerTimestamp - now, SLEEP_TIMEOUT_MILLISECONDS)); + .sleep(Math.min(timerTimestamp - now, SLEEP_TIMEOUT_MILLISECONDS)); continue; } } else { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java index f061bff831d4..9291cedb7db4 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,13 +21,16 @@ import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.yetus.audience.InterfaceAudience; - /** * An interface for a screen view that handles key presses and timers. */ @InterfaceAudience.Private public interface ScreenView { void init(); - @Nullable ScreenView handleKeyPress(KeyPress keyPress); - @Nullable ScreenView handleTimer(); + + @Nullable + ScreenView handleKeyPress(KeyPress keyPress); + + @Nullable + ScreenView handleTimer(); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java index 45f5fd01efb7..f9f386ba1723 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,10 @@ import java.util.EnumMap; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the field screen. */ @@ -52,8 +50,8 @@ public interface ResultListener { private boolean moveMode; public FieldScreenPresenter(FieldScreenView fieldScreenView, Field sortField, List fields, - EnumMap fieldDisplayMap, ResultListener resultListener, - ScreenView nextScreenView) { + EnumMap fieldDisplayMap, ResultListener resultListener, + ScreenView nextScreenView) { this.fieldScreenView = Objects.requireNonNull(fieldScreenView); this.sortField = Objects.requireNonNull(sortField); this.fields = new ArrayList<>(Objects.requireNonNull(fields)); @@ -63,7 +61,7 @@ public FieldScreenPresenter(FieldScreenView fieldScreenView, Field sortField, Li int headerLength = 0; int descriptionLength = 0; - for (int i = 0; i < fields.size(); i ++) { + for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); if (field == sortField) { @@ -86,8 +84,8 @@ public FieldScreenPresenter(FieldScreenView fieldScreenView, Field sortField, Li public void init() { fieldScreenView.hideCursor(); fieldScreenView.clearTerminal(); - fieldScreenView.showFieldScreen(sortField.getHeader(), fields, fieldDisplayMap, - currentPosition, headerMaxLength, descriptionMaxLength, moveMode); + fieldScreenView.showFieldScreen(sortField.getHeader(), fields, fieldDisplayMap, currentPosition, + headerMaxLength, descriptionMaxLength, moveMode); fieldScreenView.refreshTerminal(); } @@ -132,7 +130,7 @@ public void pageUp() { } public void pageDown() { - if (currentPosition < fields.size() - 1 && !moveMode) { + if (currentPosition < fields.size() - 1 && !moveMode) { int previousPosition = currentPosition; currentPosition = fields.size() - 1; showField(previousPosition); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java index 165850142247..e13431550f4a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The screen where we can change the displayed fields, the sort key and the order of the fields. */ @@ -41,11 +40,11 @@ public class FieldScreenView extends AbstractScreenView { private final FieldScreenPresenter fieldScreenPresenter; public FieldScreenView(Screen screen, Terminal terminal, Field sortField, List fields, - EnumMap fieldDisplayMap, FieldScreenPresenter.ResultListener resultListener, - ScreenView nextScreenView) { + EnumMap fieldDisplayMap, FieldScreenPresenter.ResultListener resultListener, + ScreenView nextScreenView) { super(screen, terminal); this.fieldScreenPresenter = new FieldScreenPresenter(this, sortField, fields, fieldDisplayMap, - resultListener, nextScreenView); + resultListener, nextScreenView); } @Override @@ -118,11 +117,11 @@ public ScreenView handleKeyPress(KeyPress keyPress) { } public void showFieldScreen(String sortFieldHeader, List fields, - EnumMap fieldDisplayMap, int currentPosition, int headerMaxLength, - int descriptionMaxLength, boolean moveMode) { + EnumMap fieldDisplayMap, int currentPosition, int headerMaxLength, + int descriptionMaxLength, boolean moveMode) { showScreenDescription(sortFieldHeader); - for (int i = 0; i < fields.size(); i ++) { + for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); showField(i, field, fieldDisplayMap.get(field), i == currentPosition, headerMaxLength, descriptionMaxLength, moveMode); @@ -134,17 +133,17 @@ public void showScreenDescription(String sortFieldHeader) { printer.startBold().print("Fields Management").stopBold().endOfLine(); printer.print("Current Sort Field: ").startBold().print(sortFieldHeader).stopBold().endOfLine(); printer.print("Navigate with up/down, Right selects for move then or Left commits,") - .endOfLine(); + .endOfLine(); printer.print("'d' or toggles display, 's' sets sort. Use 'q' or to end!") - .endOfLine(); + .endOfLine(); } public void showField(int pos, Field field, boolean display, boolean selected, - int fieldHeaderMaxLength, int fieldDescriptionMaxLength, boolean moveMode) { + int fieldHeaderMaxLength, int fieldDescriptionMaxLength, boolean moveMode) { String fieldHeader = String.format("%-" + fieldHeaderMaxLength + "s", field.getHeader()); - String fieldDescription = String.format("%-" + fieldDescriptionMaxLength + "s", - field.getDescription()); + String fieldDescription = + String.format("%-" + fieldDescriptionMaxLength + "s", field.getDescription()); int row = FIELD_START_ROW + pos; TerminalPrinter printer = getTerminalPrinter(row); @@ -157,8 +156,8 @@ public void showField(int pos, Field field, boolean display, boolean selected, printer.startBold(); } - printer.startHighlight() - .printFormat("%s = %s", fieldHeader, fieldDescription).stopHighlight(); + printer.startHighlight().printFormat("%s = %s", fieldHeader, fieldDescription) + .stopHighlight(); if (display) { printer.stopBold(); @@ -172,8 +171,8 @@ public void showField(int pos, Field field, boolean display, boolean selected, printer.startBold(); } - printer.startHighlight().print(fieldHeader).stopHighlight() - .printFormat(" = %s", fieldDescription); + printer.startHighlight().print(fieldHeader).stopHighlight().printFormat(" = %s", + fieldDescription); if (display) { printer.stopBold(); @@ -184,7 +183,7 @@ public void showField(int pos, Field field, boolean display, boolean selected, } else { if (display) { printer.print("* ").startBold().printFormat("%s = %s", fieldHeader, fieldDescription) - .stopBold().endOfLine(); + .stopBold().endOfLine(); } else { printer.printFormat(" %s = %s", fieldHeader, fieldDescription).endOfLine(); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java index 5002ab8f6c18..218de676d4ec 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,8 @@ import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a description of a command that we can execute in the top screen. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java index f170fc57fde1..bf72e58e7830 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,42 +19,39 @@ import java.util.Arrays; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the help screen. */ @InterfaceAudience.Private public class HelpScreenPresenter { - private static final CommandDescription[] COMMAND_DESCRIPTIONS = new CommandDescription[] { - new CommandDescription("f", "Add/Remove/Order/Sort the fields"), - new CommandDescription("R", "Toggle the sort order (ascending/descending)"), - new CommandDescription("m", "Select mode"), - new CommandDescription("o", "Add a filter with ignoring case"), - new CommandDescription("O", "Add a filter with case sensitive"), - new CommandDescription("^o", "Show the current filters"), - new CommandDescription("=", "Clear the current filters"), - new CommandDescription("i", "Drill down"), - new CommandDescription( - Arrays.asList("up", "down", "left", "right", "pageUp", "pageDown", "home", "end"), - "Scroll the metrics"), - new CommandDescription("d", "Change the refresh delay"), - new CommandDescription("X", "Adjust the field length"), - new CommandDescription("", "Refresh the display"), - new CommandDescription("h", "Display this screen"), - new CommandDescription(Arrays.asList("q", ""), "Quit") - }; + private static final CommandDescription[] COMMAND_DESCRIPTIONS = + new CommandDescription[] { new CommandDescription("f", "Add/Remove/Order/Sort the fields"), + new CommandDescription("R", "Toggle the sort order (ascending/descending)"), + new CommandDescription("m", "Select mode"), + new CommandDescription("o", "Add a filter with ignoring case"), + new CommandDescription("O", "Add a filter with case sensitive"), + new CommandDescription("^o", "Show the current filters"), + new CommandDescription("=", "Clear the current filters"), + new CommandDescription("i", "Drill down"), + new CommandDescription( + Arrays.asList("up", "down", "left", "right", "pageUp", "pageDown", "home", "end"), + "Scroll the metrics"), + new CommandDescription("d", "Change the refresh delay"), + new CommandDescription("X", "Adjust the field length"), + new CommandDescription("", "Refresh the display"), + new CommandDescription("h", "Display this screen"), + new CommandDescription(Arrays.asList("q", ""), "Quit") }; private final HelpScreenView helpScreenView; private final long refreshDelay; private final ScreenView nextScreenView; public HelpScreenPresenter(HelpScreenView helpScreenView, long refreshDelay, - ScreenView nextScreenView) { + ScreenView nextScreenView) { this.helpScreenView = Objects.requireNonNull(helpScreenView); this.refreshDelay = refreshDelay; this.nextScreenView = Objects.requireNonNull(nextScreenView); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java index ccdc15737d17..e8c6a9c7efed 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The help screen. */ @@ -38,7 +37,7 @@ public class HelpScreenView extends AbstractScreenView { private final HelpScreenPresenter helpScreenPresenter; public HelpScreenView(Screen screen, Terminal terminal, long refreshDelay, - ScreenView nextScreenView) { + ScreenView nextScreenView) { super(screen, terminal); this.helpScreenPresenter = new HelpScreenPresenter(this, refreshDelay, nextScreenView); } @@ -68,12 +67,12 @@ public void showHelpScreen(long refreshDelay, CommandDescription[] commandDescri private void showScreenDescription(long refreshDelay) { TerminalPrinter printer = getTerminalPrinter(SCREEN_DESCRIPTION_START_ROW); printer.startBold().print("Help for Interactive Commands").stopBold().endOfLine(); - printer.print("Refresh delay: ").startBold() - .print((double) refreshDelay / 1000).stopBold().endOfLine(); + printer.print("Refresh delay: ").startBold().print((double) refreshDelay / 1000).stopBold() + .endOfLine(); } private void showCommandDescription(TerminalPrinter terminalPrinter, - CommandDescription commandDescription) { + CommandDescription commandDescription) { terminalPrinter.print(" "); boolean first = true; for (String key : commandDescription.getKeys()) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java index 8cd9879b0ede..cca1a996c879 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the mode screen. */ @@ -44,7 +43,7 @@ public class ModeScreenPresenter { private int currentPosition; public ModeScreenPresenter(ModeScreenView modeScreenView, Mode currentMode, - Consumer resultListener, ScreenView nextScreenView) { + Consumer resultListener, ScreenView nextScreenView) { this.modeScreenView = Objects.requireNonNull(modeScreenView); this.currentMode = Objects.requireNonNull(currentMode); this.resultListener = Objects.requireNonNull(resultListener); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java index bda9853028b7..d5af995e9dd0 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The screen where we can choose the {@link Mode} in the top screen. */ @@ -41,10 +40,10 @@ public class ModeScreenView extends AbstractScreenView { private final ModeScreenPresenter modeScreenPresenter; public ModeScreenView(Screen screen, Terminal terminal, Mode currentMode, - Consumer resultListener, ScreenView nextScreenView) { + Consumer resultListener, ScreenView nextScreenView) { super(screen, terminal); - this.modeScreenPresenter = new ModeScreenPresenter(this, currentMode, resultListener, - nextScreenView); + this.modeScreenPresenter = + new ModeScreenPresenter(this, currentMode, resultListener, nextScreenView); } @Override @@ -102,35 +101,35 @@ public ScreenView handleKeyPress(KeyPress keyPress) { } public void showModeScreen(Mode currentMode, List modes, int currentPosition, - int modeHeaderMaxLength, int modeDescriptionMaxLength) { + int modeHeaderMaxLength, int modeDescriptionMaxLength) { showScreenDescription(currentMode); for (int i = 0; i < modes.size(); i++) { - showMode(i, modes.get(i), i == currentPosition, - modeHeaderMaxLength, modeDescriptionMaxLength); + showMode(i, modes.get(i), i == currentPosition, modeHeaderMaxLength, + modeDescriptionMaxLength); } } private void showScreenDescription(Mode currentMode) { TerminalPrinter printer = getTerminalPrinter(SCREEN_DESCRIPTION_START_ROW); printer.startBold().print("Mode Management").stopBold().endOfLine(); - printer.print("Current mode: ") - .startBold().print(currentMode.getHeader()).stopBold().endOfLine(); + printer.print("Current mode: ").startBold().print(currentMode.getHeader()).stopBold() + .endOfLine(); printer.print("Select mode followed by ").endOfLine(); } public void showMode(int pos, Mode mode, boolean selected, int modeHeaderMaxLength, - int modeDescriptionMaxLength) { + int modeDescriptionMaxLength) { String modeHeader = String.format("%-" + modeHeaderMaxLength + "s", mode.getHeader()); - String modeDescription = String.format("%-" + modeDescriptionMaxLength + "s", - mode.getDescription()); + String modeDescription = + String.format("%-" + modeDescriptionMaxLength + "s", mode.getDescription()); int row = MODE_START_ROW + pos; TerminalPrinter printer = getTerminalPrinter(row); if (selected) { printer.startHighlight().print(modeHeader).stopHighlight() - .printFormat(" = %s", modeDescription).endOfLine(); + .printFormat(" = %s", modeDescription).endOfLine(); } else { printer.printFormat("%s = %s", modeHeader, modeDescription).endOfLine(); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java index 6c6bf1c1b215..3fe4da280542 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the filter display mode. */ @@ -37,7 +36,7 @@ public class FilterDisplayModeScreenPresenter { private final ScreenView nextScreenView; public FilterDisplayModeScreenPresenter(FilterDisplayModeScreenView filterDisplayModeScreenView, - List filters, ScreenView nextScreenView) { + List filters, ScreenView nextScreenView) { this.filterDisplayModeScreenView = Objects.requireNonNull(filterDisplayModeScreenView); this.filters = Collections.unmodifiableList(new ArrayList<>(Objects.requireNonNull(filters))); this.nextScreenView = Objects.requireNonNull(nextScreenView); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java index e85a4b7df42c..9bf16a73c273 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,11 +27,8 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** - * The filter display mode in the top screen. - * - * Exit if Enter key is pressed. + * The filter display mode in the top screen. Exit if Enter key is pressed. */ @InterfaceAudience.Private public class FilterDisplayModeScreenView extends AbstractScreenView { @@ -40,11 +37,11 @@ public class FilterDisplayModeScreenView extends AbstractScreenView { private final FilterDisplayModeScreenPresenter filterDisplayModeScreenPresenter; public FilterDisplayModeScreenView(Screen screen, Terminal terminal, int row, - List filters, ScreenView nextScreenView) { + List filters, ScreenView nextScreenView) { super(screen, terminal); this.row = row; this.filterDisplayModeScreenPresenter = - new FilterDisplayModeScreenPresenter(this, filters, nextScreenView); + new FilterDisplayModeScreenPresenter(this, filters, nextScreenView); } @Override @@ -68,6 +65,6 @@ public void showFilters(List filters) { } getTerminalPrinter(row).startBold().print(" to resume, filters: " + filtersString) - .stopBold().endOfLine(); + .stopBold().endOfLine(); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java index df672e9695d9..98a059faacc7 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents headers for the metrics in the top screen. */ @@ -36,7 +35,7 @@ public Header(Field field, int length) { } public String format() { - return "%" + (field.isLeftJustify() ? "-" : "") + length + "s"; + return "%" + (field.isLeftJustify() ? "-" : "") + length + "s"; } public Field getField() { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java index 8ab858b995f3..5551f3bc1701 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the input mode. */ @@ -42,7 +41,7 @@ public class InputModeScreenPresenter { private int historyPosition = -1; public InputModeScreenPresenter(InputModeScreenView inputModeScreenView, String message, - @Nullable List histories, Function resultListener) { + @Nullable List histories, Function resultListener) { this.inputModeScreenView = Objects.requireNonNull(inputModeScreenView); this.message = Objects.requireNonNull(message); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java index ab64a8ade227..311d86611ec7 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** * The input mode in the top screen. */ @@ -37,11 +36,11 @@ public class InputModeScreenView extends AbstractScreenView { private final InputModeScreenPresenter inputModeScreenPresenter; public InputModeScreenView(Screen screen, Terminal terminal, int row, String message, - List histories, Function resultListener) { + List histories, Function resultListener) { super(screen, terminal); this.row = row; - this.inputModeScreenPresenter = new InputModeScreenPresenter(this, message, histories, - resultListener); + this.inputModeScreenPresenter = + new InputModeScreenPresenter(this, message, histories, resultListener); } @Override @@ -100,7 +99,7 @@ public ScreenView handleKeyPress(KeyPress keyPress) { public void showInput(String message, String inputString, int cursorPosition) { getTerminalPrinter(row).startBold().print(message).stopBold().print(" ").print(inputString) - .endOfLine(); + .endOfLine(); setCursorPosition(message.length() + 1 + cursorPosition, row); refreshTerminal(); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java index 174a15a48432..ec83634566ee 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,8 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** - * The presentation logic for the message mode. - * - * Exit after 2 seconds or if any key is pressed. + * The presentation logic for the message mode. Exit after 2 seconds or if any key is pressed. */ @InterfaceAudience.Private public class MessageModeScreenPresenter { @@ -35,7 +32,7 @@ public class MessageModeScreenPresenter { private final ScreenView nextScreenView; public MessageModeScreenPresenter(MessageModeScreenView messageModeScreenView, String message, - ScreenView nextScreenView) { + ScreenView nextScreenView) { this.messageModeScreenView = Objects.requireNonNull(messageModeScreenView); this.message = Objects.requireNonNull(message); this.nextScreenView = Objects.requireNonNull(nextScreenView); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java index 0dfa388fad0c..9da5d27bbb97 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** * The message mode in the top screen. */ @@ -35,11 +34,10 @@ public class MessageModeScreenView extends AbstractScreenView { private final MessageModeScreenPresenter messageModeScreenPresenter; public MessageModeScreenView(Screen screen, Terminal terminal, int row, String message, - ScreenView nextScreenView) { + ScreenView nextScreenView) { super(screen, terminal); this.row = row; - this.messageModeScreenPresenter = - new MessageModeScreenPresenter(this, message, nextScreenView); + this.messageModeScreenPresenter = new MessageModeScreenPresenter(this, message, nextScreenView); } @Override @@ -61,6 +59,6 @@ public ScreenView handleKeyPress(KeyPress keyPress) { public void showMessage(String message) { getTerminalPrinter(row).startHighlight().print(" ").print(message).print(" ").stopHighlight() - .endOfLine(); + .endOfLine(); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java index b95e6f480e6e..4f93dda8ec5f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for paging for the metrics. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java index 03598f66fb48..66d64a7cc66a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the summary of the metrics. */ @@ -37,9 +36,9 @@ public class Summary { private final double averageLoad; private final long aggregateRequestPerSecond; - public Summary(String currentTime, String version, String clusterId, int servers, - int liveServers, int deadServers, int regionCount, int ritCount, double averageLoad, - long aggregateRequestPerSecond) { + public Summary(String currentTime, String version, String clusterId, int servers, int liveServers, + int deadServers, int regionCount, int ritCount, double averageLoad, + long aggregateRequestPerSecond) { this.currentTime = Objects.requireNonNull(currentTime); this.version = Objects.requireNonNull(version); this.clusterId = Objects.requireNonNull(clusterId); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java index 9cbcd18e885f..b312addc6b59 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * The data and business logic for the top screen. */ @@ -66,33 +65,32 @@ public class TopScreenModel { private boolean ascendingSort; public TopScreenModel(Admin admin, Mode initialMode, @Nullable List initialFields, - @Nullable Field initialSortField, @Nullable Boolean initialAscendingSort, - @Nullable List initialFilters) { + @Nullable Field initialSortField, @Nullable Boolean initialAscendingSort, + @Nullable List initialFilters) { this.admin = Objects.requireNonNull(admin); switchMode(Objects.requireNonNull(initialMode), initialSortField, false, initialFields, initialAscendingSort, initialFilters); } public void switchMode(Mode nextMode, boolean keepSortFieldAndSortOrderIfPossible, - List initialFilters) { + List initialFilters) { switchMode(nextMode, null, keepSortFieldAndSortOrderIfPossible, null, null, initialFilters); } public void switchMode(Mode nextMode, Field initialSortField, - boolean keepSortFieldAndSortOrderIfPossible, @Nullable List initialFields, - @Nullable Boolean initialAscendingSort, @Nullable List initialFilters) { + boolean keepSortFieldAndSortOrderIfPossible, @Nullable List initialFields, + @Nullable Boolean initialAscendingSort, @Nullable List initialFilters) { currentMode = nextMode; fieldInfos = Collections.unmodifiableList(new ArrayList<>(currentMode.getFieldInfos())); if (initialFields != null) { List tmp = new ArrayList<>(initialFields); tmp.addAll(currentMode.getFieldInfos().stream().map(FieldInfo::getField) - .filter(f -> !initialFields.contains(f)) - .collect(Collectors.toList())); + .filter(f -> !initialFields.contains(f)).collect(Collectors.toList())); fields = Collections.unmodifiableList(tmp); } else { - fields = Collections.unmodifiableList(currentMode.getFieldInfos().stream() - .map(FieldInfo::getField).collect(Collectors.toList())); + fields = Collections.unmodifiableList( + currentMode.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList())); } if (keepSortFieldAndSortOrderIfPossible) { @@ -146,8 +144,7 @@ public void refreshMetricsData() { } private void refreshSummary(ClusterMetrics clusterMetrics) { - String currentTime = ISO_8601_EXTENDED_TIME_FORMAT - .format(EnvironmentEdgeManager.currentTime()); + String currentTime = ISO_8601_EXTENDED_TIME_FORMAT.format(EnvironmentEdgeManager.currentTime()); String version = clusterMetrics.getHBaseVersion(); String clusterId = clusterMetrics.getClusterId(); int liveServers = clusterMetrics.getLiveServerMetrics().size(); @@ -156,23 +153,22 @@ private void refreshSummary(ClusterMetrics clusterMetrics) { int ritCount = clusterMetrics.getRegionStatesInTransition().size(); double averageLoad = clusterMetrics.getAverageLoad(); long aggregateRequestPerSecond = clusterMetrics.getLiveServerMetrics().entrySet().stream() - .mapToLong(e -> e.getValue().getRequestCountPerSecond()).sum(); + .mapToLong(e -> e.getValue().getRequestCountPerSecond()).sum(); - summary = new Summary(currentTime, version, clusterId, liveServers + deadServers, - liveServers, deadServers, regionCount, ritCount, averageLoad, aggregateRequestPerSecond); + summary = new Summary(currentTime, version, clusterId, liveServers + deadServers, liveServers, + deadServers, regionCount, ritCount, averageLoad, aggregateRequestPerSecond); } private void refreshRecords(ClusterMetrics clusterMetrics) { List records = currentMode.getRecords(clusterMetrics, pushDownFilters); // Filter and sort - records = records.stream() - .filter(r -> filters.stream().allMatch(f -> f.execute(r))) - .sorted((recordLeft, recordRight) -> { - FieldValue left = recordLeft.get(currentSortField); - FieldValue right = recordRight.get(currentSortField); - return (ascendingSort ? 1 : -1) * left.compareTo(right); - }).collect(Collectors.toList()); + records = records.stream().filter(r -> filters.stream().allMatch(f -> f.execute(r))) + .sorted((recordLeft, recordRight) -> { + FieldValue left = recordLeft.get(currentSortField); + FieldValue right = recordRight.get(currentSortField); + return (ascendingSort ? 1 : -1) * left.compareTo(right); + }).collect(Collectors.toList()); this.records = Collections.unmodifiableList(records); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java index e4e3caee5940..ffc02142ed03 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the top screen. */ @@ -63,7 +62,7 @@ public class TopScreenPresenter { private long iterations; public TopScreenPresenter(TopScreenView topScreenView, long initialRefreshDelay, - TopScreenModel topScreenModel, @Nullable List initialFields, long numberOfIterations) { + TopScreenModel topScreenModel, @Nullable List initialFields, long numberOfIterations) { this.topScreenView = Objects.requireNonNull(topScreenView); this.refreshDelay = new AtomicLong(initialRefreshDelay); this.topScreenModel = Objects.requireNonNull(topScreenModel); @@ -78,7 +77,7 @@ public void init() { } private void updateTerminalLengthAndPageSize(@Nullable TerminalSize terminalSize, - @Nullable Integer pageSize) { + @Nullable Integer pageSize) { if (terminalSize != null) { terminalLength = terminalSize.getColumns(); } else { @@ -132,8 +131,7 @@ private void adjustFieldLengthIfNeeded() { for (Field f : topScreenModel.getFields()) { if (f.isAutoAdjust()) { int maxLength = topScreenModel.getRecords().stream() - .map(r -> r.get(f).asString().length()) - .max(Integer::compareTo).orElse(0); + .map(r -> r.get(f).asString().length()).max(Integer::compareTo).orElse(0); fieldLengthMap.put(f, Math.max(maxLength, f.getHeader().length())); } } @@ -141,9 +139,8 @@ private void adjustFieldLengthIfNeeded() { } private List

    getDisplayedHeaders() { - List displayFields = - topScreenModel.getFields().stream() - .filter(fieldDisplayMap::get).collect(Collectors.toList()); + List displayFields = topScreenModel.getFields().stream().filter(fieldDisplayMap::get) + .collect(Collectors.toList()); if (displayFields.isEmpty()) { horizontalScroll = 0; @@ -231,8 +228,7 @@ public void end() { } private int getHeaderSize() { - return (int) topScreenModel.getFields().stream() - .filter(fieldDisplayMap::get).count(); + return (int) topScreenModel.getFields().stream().filter(fieldDisplayMap::get).count(); } public void switchSortOrder() { @@ -246,18 +242,16 @@ public ScreenView transitionToHelpScreen(Screen screen, Terminal terminal) { public ScreenView transitionToModeScreen(Screen screen, Terminal terminal) { return new ModeScreenView(screen, terminal, topScreenModel.getCurrentMode(), this::switchMode, - topScreenView); + topScreenView); } public ScreenView transitionToFieldScreen(Screen screen, Terminal terminal) { - return new FieldScreenView(screen, terminal, - topScreenModel.getCurrentSortField(), topScreenModel.getFields(), - fieldDisplayMap, - (sortField, fields, fieldDisplayMap) -> { - topScreenModel.setSortFieldAndFields(sortField, fields); - this.fieldDisplayMap.clear(); - this.fieldDisplayMap.putAll(fieldDisplayMap); - }, topScreenView); + return new FieldScreenView(screen, terminal, topScreenModel.getCurrentSortField(), + topScreenModel.getFields(), fieldDisplayMap, (sortField, fields, fieldDisplayMap) -> { + topScreenModel.setSortFieldAndFields(sortField, fields); + this.fieldDisplayMap.clear(); + this.fieldDisplayMap.putAll(fieldDisplayMap); + }, topScreenView); } private void switchMode(Mode nextMode) { @@ -303,42 +297,41 @@ public ScreenView goToMessageMode(Screen screen, Terminal terminal, int row, Str public ScreenView goToInputModeForRefreshDelay(Screen screen, Terminal terminal, int row) { return new InputModeScreenView(screen, terminal, row, - "Change refresh delay from " + (double) refreshDelay.get() / 1000 + " to", null, - (inputString) -> { - if (inputString.isEmpty()) { + "Change refresh delay from " + (double) refreshDelay.get() / 1000 + " to", null, + (inputString) -> { + if (inputString.isEmpty()) { + return topScreenView; + } + + double delay; + try { + delay = Double.parseDouble(inputString); + } catch (NumberFormatException e) { + return goToMessageMode(screen, terminal, row, "Unacceptable floating point"); + } + + refreshDelay.set((long) (delay * 1000)); return topScreenView; - } - - double delay; - try { - delay = Double.parseDouble(inputString); - } catch (NumberFormatException e) { - return goToMessageMode(screen, terminal, row, "Unacceptable floating point"); - } - - refreshDelay.set((long) (delay * 1000)); - return topScreenView; - }); + }); } public ScreenView goToInputModeForFilter(Screen screen, Terminal terminal, int row, - boolean ignoreCase) { + boolean ignoreCase) { return new InputModeScreenView(screen, terminal, row, - "add filter #" + (topScreenModel.getFilters().size() + 1) + - " (" + (ignoreCase ? "ignoring case" : "case sensitive") + ") as: [!]FLD?VAL", - topScreenModel.getFilterHistories(), - (inputString) -> { - if (inputString.isEmpty()) { + "add filter #" + (topScreenModel.getFilters().size() + 1) + " (" + + (ignoreCase ? "ignoring case" : "case sensitive") + ") as: [!]FLD?VAL", + topScreenModel.getFilterHistories(), (inputString) -> { + if (inputString.isEmpty()) { + return topScreenView; + } + + if (!topScreenModel.addFilter(inputString, ignoreCase)) { + return goToMessageMode(screen, terminal, row, "Unacceptable filter expression"); + } + + paging.init(); return topScreenView; - } - - if (!topScreenModel.addFilter(inputString, ignoreCase)) { - return goToMessageMode(screen, terminal, row, "Unacceptable filter expression"); - } - - paging.init(); - return topScreenView; - }); + }); } public void clearFilters() { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java index da5c88360d19..123c7217f28e 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,12 +35,10 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; import org.apache.yetus.audience.InterfaceAudience; - /** - * The screen that provides a dynamic real-time view for the HBase metrics. - * - * This shows the metric {@link Summary} and the metric {@link Record}s. The summary and the - * metrics are updated periodically (3 seconds by default). + * The screen that provides a dynamic real-time view for the HBase metrics. This shows the metric + * {@link Summary} and the metric {@link Record}s. The summary and the metrics are updated + * periodically (3 seconds by default). */ @InterfaceAudience.Private public class TopScreenView extends AbstractScreenView { @@ -55,13 +53,14 @@ public class TopScreenView extends AbstractScreenView { private Integer pageSize; public TopScreenView(Screen screen, Terminal terminal, long initialRefreshDelay, Admin admin, - Mode initialMode, @Nullable List initialFields, @Nullable Field initialSortField, - @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, - long numberOfIterations) { + Mode initialMode, @Nullable List initialFields, @Nullable Field initialSortField, + @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, + long numberOfIterations) { super(screen, terminal); - this.topScreenPresenter = new TopScreenPresenter(this, initialRefreshDelay, - new TopScreenModel(admin, initialMode, initialFields, initialSortField, - initialAscendingSort, initialFilters), initialFields, numberOfIterations); + this.topScreenPresenter = new TopScreenPresenter( + this, initialRefreshDelay, new TopScreenModel(admin, initialMode, initialFields, + initialSortField, initialAscendingSort, initialFilters), + initialFields, numberOfIterations); } @Override @@ -223,7 +222,7 @@ public Integer getPageSize() { } public void showTopScreen(Summary summary, List
    headers, List records, - Record selectedRecord) { + Record selectedRecord) { showSummary(summary); clearMessage(); showHeaders(headers); @@ -235,23 +234,17 @@ private void showSummary(Summary summary) { printer.print(String.format("HBase hbtop - %s", summary.getCurrentTime())).endOfLine(); printer.print(String.format("Version: %s", summary.getVersion())).endOfLine(); printer.print(String.format("Cluster ID: %s", summary.getClusterId())).endOfLine(); - printer.print("RegionServer(s): ") - .startBold().print(Integer.toString(summary.getServers())).stopBold() - .print(" total, ") - .startBold().print(Integer.toString(summary.getLiveServers())).stopBold() - .print(" live, ") - .startBold().print(Integer.toString(summary.getDeadServers())).stopBold() - .print(" dead").endOfLine(); - printer.print("RegionCount: ") - .startBold().print(Integer.toString(summary.getRegionCount())).stopBold() - .print(" total, ") - .startBold().print(Integer.toString(summary.getRitCount())).stopBold() - .print(" rit").endOfLine(); - printer.print("Average Cluster Load: ") - .startBold().print(String.format("%.2f", summary.getAverageLoad())).stopBold().endOfLine(); - printer.print("Aggregate Request/s: ") - .startBold().print(Long.toString(summary.getAggregateRequestPerSecond())).stopBold() - .endOfLine(); + printer.print("RegionServer(s): ").startBold().print(Integer.toString(summary.getServers())) + .stopBold().print(" total, ").startBold().print(Integer.toString(summary.getLiveServers())) + .stopBold().print(" live, ").startBold().print(Integer.toString(summary.getDeadServers())) + .stopBold().print(" dead").endOfLine(); + printer.print("RegionCount: ").startBold().print(Integer.toString(summary.getRegionCount())) + .stopBold().print(" total, ").startBold().print(Integer.toString(summary.getRitCount())) + .stopBold().print(" rit").endOfLine(); + printer.print("Average Cluster Load: ").startBold() + .print(String.format("%.2f", summary.getAverageLoad())).stopBold().endOfLine(); + printer.print("Aggregate Request/s: ").startBold() + .print(Long.toString(summary.getAggregateRequestPerSecond())).stopBold().endOfLine(); } private void showRecords(List
    headers, List records, Record selectedRecord) { @@ -264,7 +257,7 @@ private void showRecords(List
    headers, List records, Record sele } List buf = new ArrayList<>(headers.size()); for (int i = 0; i < size; i++) { - if(i < records.size()) { + if (i < records.size()) { Record record = records.get(i); buf.clear(); for (Header header : headers) { @@ -293,16 +286,15 @@ private void showRecords(List
    headers, List records, Record sele } private void showHeaders(List
    headers) { - String header = headers.stream() - .map(h -> String.format(h.format(), h.getField().getHeader())) - .collect(Collectors.joining(" ")); + String header = headers.stream().map(h -> String.format(h.format(), h.getField().getHeader())) + .collect(Collectors.joining(" ")); if (!header.isEmpty()) { header += " "; } getTerminalPrinter(RECORD_HEADER_ROW).startHighlight().print(header).stopHighlight() - .endOfLine(); + .endOfLine(); } private String limitLineLength(String line, int length) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java index 9322aaa8157f..a0fba228e72f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * The attributes of text in the terminal. */ @@ -117,8 +116,8 @@ public boolean equals(Object o) { } Attributes that = (Attributes) o; return bold == that.bold && blink == that.blink && reverse == that.reverse - && underline == that.underline && foregroundColor == that.foregroundColor - && backgroundColor == that.backgroundColor; + && underline == that.underline && foregroundColor == that.foregroundColor + && backgroundColor == that.backgroundColor; } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java index 843a315ab716..e0a1643a34f3 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Terminal color definitions. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java index 775ff3d72e6a..11da1b58c6e3 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * A 2-d position in 'terminal space'. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java index d0be00c5868d..53c4da17e96a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,42 +21,15 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the user pressing a key on the keyboard. */ @InterfaceAudience.Private public class KeyPress { public enum Type { - Character, - Escape, - Backspace, - ArrowLeft, - ArrowRight, - ArrowUp, - ArrowDown, - Insert, - Delete, - Home, - End, - PageUp, - PageDown, - ReverseTab, - Tab, - Enter, - F1, - F2, - F3, - F4, - F5, - F6, - F7, - F8, - F9, - F10, - F11, - F12, - Unknown + Character, Escape, Backspace, ArrowLeft, ArrowRight, ArrowUp, ArrowDown, Insert, Delete, Home, + End, PageUp, PageDown, ReverseTab, Tab, Enter, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, + F12, Unknown } private final Type type; @@ -66,7 +39,7 @@ public enum Type { private final boolean shift; public KeyPress(Type type, @Nullable Character character, boolean alt, boolean ctrl, - boolean shift) { + boolean shift) { this.type = Objects.requireNonNull(type); this.character = character; this.alt = alt; @@ -97,13 +70,8 @@ public boolean isShift() { @Override public String toString() { - return "KeyPress{" + - "type=" + type + - ", character=" + escape(character) + - ", alt=" + alt + - ", ctrl=" + ctrl + - ", shift=" + shift + - '}'; + return "KeyPress{" + "type=" + type + ", character=" + escape(character) + ", alt=" + alt + + ", ctrl=" + ctrl + ", shift=" + shift + '}'; } private String escape(Character character) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java index c834b7515c24..f34cfc298c62 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,19 +21,29 @@ import java.io.Closeable; import org.apache.yetus.audience.InterfaceAudience; - /** * The terminal interface that is an abstraction of terminal screen. */ @InterfaceAudience.Private public interface Terminal extends Closeable { void clear(); + void refresh(); - @Nullable TerminalSize getSize(); - @Nullable TerminalSize doResizeIfNecessary(); - @Nullable KeyPress pollKeyPress(); + + @Nullable + TerminalSize getSize(); + + @Nullable + TerminalSize doResizeIfNecessary(); + + @Nullable + KeyPress pollKeyPress(); + CursorPosition getCursorPosition(); + void setCursorPosition(int column, int row); + void hideCursor(); + TerminalPrinter getTerminalPrinter(int startRow); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java index 66fb55875b0e..52818e42a7d3 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * The interface responsible for printing to the terminal. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java index f7e55dde7b54..7aea3dac115b 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Terminal dimensions in 2-d space, measured in number of rows and columns. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java index de61477ce33a..6cd9475c6d0f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Color; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a single text cell of the terminal. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java index 52f8e374364e..4133d6cb6e4b 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Color; import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for escape sequences. */ @@ -54,7 +53,7 @@ public static String clearRemainingLine() { } public static String color(Color foregroundColor, Color backgroundColor, boolean bold, - boolean reverse, boolean blink, boolean underline) { + boolean reverse, boolean blink, boolean underline) { int foregroundColorValue = getColorValue(foregroundColor, true); int backgroundColorValue = getColorValue(backgroundColor, false); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java index a20222c3eb5b..937dbed63d27 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,10 +37,9 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; - /** - * This generates {@link KeyPress} objects from the given input stream and offers them to the - * given queue. + * This generates {@link KeyPress} objects from the given input stream and offers them to the given + * queue. */ @InterfaceAudience.Private public class KeyPressGenerator { @@ -67,9 +66,9 @@ public KeyPressGenerator(InputStream inputStream, Queue keyPressQueue) input = new InputStreamReader(inputStream, StandardCharsets.UTF_8); this.keyPressQueue = keyPressQueue; - executorService = Executors.newFixedThreadPool(2, new ThreadFactoryBuilder() - .setNameFormat("KeyPressGenerator-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + executorService = Executors.newFixedThreadPool(2, + new ThreadFactoryBuilder().setNameFormat("KeyPressGenerator-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); initState(); } @@ -469,8 +468,8 @@ private boolean isCtrl(int param) { private void offer(KeyPress keyPress) { // Handle ctrl + c - if (keyPress.isCtrl() && keyPress.getType() == KeyPress.Type.Character && - keyPress.getCharacter() == 'c') { + if (keyPress.isCtrl() && keyPress.getType() == KeyPress.Type.Character + && keyPress.getCharacter() == 'c') { System.exit(0); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java index 8752c5fe689a..b00769c0401d 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.CursorPosition; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a buffer of the terminal screen for double-buffering. */ @@ -78,8 +77,8 @@ public void flush(PrintWriter output) { flushRow(row, sb, attributes); } - if (cursorVisible && cursorRow >= 0 && cursorColumn >= 0 && cursorRow < rows && - cursorColumn < columns) { + if (cursorVisible && cursorRow >= 0 && cursorColumn >= 0 && cursorRow < rows + && cursorColumn < columns) { sb.append(cursor(true)); sb.append(moveCursor(cursorColumn, cursorRow)); } else { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java index c6b74afcbfa5..a4416a82fac8 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,10 +43,9 @@ import org.slf4j.LoggerFactory; /** - * An implementation of the {@link Terminal} interface for normal display mode. - * - * This implementation produces output intended for human viewing. In particular, it only displays - * one screenful of data. The output contains some escape sequences for formatting. + * An implementation of the {@link Terminal} interface for normal display mode. This implementation + * produces output intended for human viewing. In particular, it only displays one screenful of + * data. The output contains some escape sequences for formatting. */ @InterfaceAudience.Private public class TerminalImpl implements Terminal { @@ -181,8 +180,8 @@ private TerminalSize queryTerminalSize() { } private void sttyRaw() { - doStty("-ignbrk -brkint -parmrk -istrip -inlcr -igncr -icrnl -ixon -opost " + - "-echo -echonl -icanon -isig -iexten -parenb cs8 min 1"); + doStty("-ignbrk -brkint -parmrk -istrip -inlcr -igncr -icrnl -ixon -opost " + + "-echo -echonl -icanon -isig -iexten -parenb cs8 min 1"); } private void sttyCooked() { @@ -190,7 +189,7 @@ private void sttyCooked() { } private String doStty(String sttyOptionsString) { - String [] cmd = {"/bin/sh", "-c", "stty " + sttyOptionsString + " < /dev/tty"}; + String[] cmd = { "/bin/sh", "-c", "stty " + sttyOptionsString + " < /dev/tty" }; try { Process process = Runtime.getRuntime().exec(cmd); @@ -198,14 +197,14 @@ private String doStty(String sttyOptionsString) { String ret; // stdout - try (BufferedReader stdout = new BufferedReader(new InputStreamReader( - process.getInputStream(), StandardCharsets.UTF_8))) { + try (BufferedReader stdout = new BufferedReader( + new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { ret = stdout.readLine(); } // stderr - try (BufferedReader stderr = new BufferedReader(new InputStreamReader( - process.getErrorStream(), StandardCharsets.UTF_8))) { + try (BufferedReader stderr = new BufferedReader( + new InputStreamReader(process.getErrorStream(), StandardCharsets.UTF_8))) { String line = stderr.readLine(); if ((line != null) && (line.length() > 0)) { LOGGER.error("Error output from stty: " + line); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java index 788d26799581..05e0b5611533 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java index 60f550289e26..ba7a5de40a5c 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,11 +25,9 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; /** - * An implementation of the {@link Terminal} interface for batch mode. - * - * This implementation produces output that's more sensible for collecting to a log file or for - * parsing. There is no limit on the number of output lines, and the output doesn't contain any - * escape sequences for formatting. + * An implementation of the {@link Terminal} interface for batch mode. This implementation produces + * output that's more sensible for collecting to a log file or for parsing. There is no limit on the + * number of output lines, and the output doesn't contain any escape sequences for formatting. */ public class BatchTerminal implements Terminal { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java index 60316669daaf..ed216a164926 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java index 339cc40847d3..09722639cada 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,20 +28,17 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRecord { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRecord.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRecord.class); @Test public void testBuilder() { Record actual1 = Record.builder().put(Field.TABLE, "tableName") - .put(entry(Field.REGION_COUNT, 3)) - .put(Field.REQUEST_COUNT_PER_SECOND, Field.REQUEST_COUNT_PER_SECOND.newValue(100L)) - .build(); + .put(entry(Field.REGION_COUNT, 3)) + .put(Field.REQUEST_COUNT_PER_SECOND, Field.REQUEST_COUNT_PER_SECOND.newValue(100L)).build(); assertThat(actual1.size(), is(3)); assertThat(actual1.get(Field.TABLE).asString(), is("tableName")); @@ -58,11 +55,8 @@ public void testBuilder() { @Test public void testOfEntries() { - Record actual = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 3), - entry(Field.REQUEST_COUNT_PER_SECOND, 100L) - ); + Record actual = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 3), + entry(Field.REQUEST_COUNT_PER_SECOND, 100L)); assertThat(actual.size(), is(3)); assertThat(actual.get(Field.TABLE).asString(), is("tableName")); @@ -72,17 +66,11 @@ public void testOfEntries() { @Test public void testCombine() { - Record record1 = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 3), - entry(Field.REQUEST_COUNT_PER_SECOND, 100L) - ); + Record record1 = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 3), + entry(Field.REQUEST_COUNT_PER_SECOND, 100L)); - Record record2 = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 5), - entry(Field.REQUEST_COUNT_PER_SECOND, 500L) - ); + Record record2 = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 5), + entry(Field.REQUEST_COUNT_PER_SECOND, 500L)); Record actual = record1.combine(record2); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java index 2807fd8ef61e..e4805336a7fc 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,21 +36,19 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRecordFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRecordFilter.class); + HBaseClassTestRule.forClass(TestRecordFilter.class); @Test public void testParseAndBuilder() { testParseAndBuilder("REGION=region1", false, RecordFilter.newBuilder(Field.REGION).equal("region1")); - testParseAndBuilder("REGION=", false, - RecordFilter.newBuilder(Field.REGION).equal("")); + testParseAndBuilder("REGION=", false, RecordFilter.newBuilder(Field.REGION).equal("")); testParseAndBuilder("!REGION=region1", false, RecordFilter.newBuilder(Field.REGION).notEqual("region1")); @@ -132,8 +130,8 @@ private void testToString(String filterString) { public void testFilters() { List records = createTestRecords(); - testFilter(records, "REGION=region", false, - "region1", "region2", "region3", "region4", "region5"); + testFilter(records, "REGION=region", false, "region1", "region2", "region3", "region4", + "region5"); testFilter(records, "!REGION=region", false); testFilter(records, "REGION=Region", false); @@ -148,8 +146,7 @@ public void testFilters() { testFilter(records, "LOCALITY<0.5", false, "region5"); testFilter(records, "%COMP<=50%", false, "region2", "region3", "region4", "region5"); - testFilters(records, Arrays.asList("SF>=100MB", "#REQ/S>100"), false, - "region2", "region5"); + testFilters(records, Arrays.asList("SF>=100MB", "#REQ/S>100"), false, "region2", "region5"); testFilters(records, Arrays.asList("%COMP<=50%", "!#SF>=10"), false, "region4"); testFilters(records, Arrays.asList("!REGION==region1", "LOCALITY<0.5", "#REQ/S>100"), false, "region5"); @@ -159,10 +156,10 @@ public void testFilters() { public void testFiltersIgnoreCase() { List records = createTestRecords(); - testFilter(records, "REGION=Region", true, - "region1", "region2", "region3", "region4", "region5"); - testFilter(records, "REGION=REGION", true, - "region1", "region2", "region3", "region4", "region5"); + testFilter(records, "REGION=Region", true, "region1", "region2", "region3", "region4", + "region5"); + testFilter(records, "REGION=REGION", true, "region1", "region2", "region3", "region4", + "region5"); } private List createTestRecords() { @@ -175,8 +172,8 @@ private List createTestRecords() { return ret; } - private Record createTestRecord(String region, long requestCountPerSecond, - Size storeFileSize, int numStoreFiles, float locality, float compactionProgress) { + private Record createTestRecord(String region, long requestCountPerSecond, Size storeFileSize, + int numStoreFiles, float locality, float compactionProgress) { Record.Builder builder = Record.builder(); builder.put(Field.REGION, region); builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond); @@ -188,18 +185,16 @@ private Record createTestRecord(String region, long requestCountPerSecond, } private void testFilter(List records, String filterString, boolean ignoreCase, - String... expectedRegions) { + String... expectedRegions) { testFilters(records, Collections.singletonList(filterString), ignoreCase, expectedRegions); } private void testFilters(List records, List filterStrings, boolean ignoreCase, - String... expectedRegions) { - List actual = - records.stream().filter(r -> filterStrings.stream() - .map(f -> RecordFilter.parse(f, ignoreCase)) - .allMatch(f -> f.execute(r))) - .map(r -> r.get(Field.REGION).asString()) - .collect(Collectors.toList()); + String... expectedRegions) { + List actual = records.stream() + .filter(r -> filterStrings.stream().map(f -> RecordFilter.parse(f, ignoreCase)) + .allMatch(f -> f.execute(r))) + .map(r -> r.get(Field.REGION).asString()).collect(Collectors.toList()); assertThat(actual, hasItems(expectedRegions)); assertThat(actual.size(), is(expectedRegions.length)); } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java index c633e37825ea..7db264d38dd8 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.util.Bytes; - public final class TestUtils { private TestUtils() { @@ -57,114 +56,92 @@ public static ClusterMetrics createDummyClusterMetrics() { // host1 List regionMetricsList = new ArrayList<>(); List userMetricsList = new ArrayList<>(); - userMetricsList.add(createUserMetrics("FOO",1,2, 4)); - userMetricsList.add(createUserMetrics("BAR",2,3, 3)); - regionMetricsList.add(createRegionMetrics( - "table1,,1.00000000000000000000000000000000.", - 100, 50, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + userMetricsList.add(createUserMetrics("FOO", 1, 2, 4)); + userMetricsList.add(createUserMetrics("BAR", 2, 3, 3)); + regionMetricsList.add(createRegionMetrics("table1,,1.00000000000000000000000000000000.", 100, + 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.1f, 100, 100, "2019-07-22 00:00:00")); - regionMetricsList.add(createRegionMetrics( - "table2,1,2.00000000000000000000000000000001.", - 200, 100, 200, - new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + regionMetricsList.add(createRegionMetrics("table2,1,2.00000000000000000000000000000001.", 200, + 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.2f, 50, 200, "2019-07-22 00:00:01")); - regionMetricsList.add(createRegionMetrics( - "namespace:table3,,3_0001.00000000000000000000000000000002.", - 300, 150, 300, - new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, - new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02")); + regionMetricsList + .add(createRegionMetrics("namespace:table3,,3_0001.00000000000000000000000000000002.", 300, + 150, 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02")); ServerName host1 = ServerName.valueOf("host1.apache.com", 1000, 1); - serverMetricsMap.put(host1, createServerMetrics(host1, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 100, - regionMetricsList, userMetricsList)); + serverMetricsMap.put(host1, createServerMetrics(host1, 100, new Size(100, Size.Unit.MEGABYTE), + new Size(200, Size.Unit.MEGABYTE), 100, regionMetricsList, userMetricsList)); // host2 regionMetricsList.clear(); userMetricsList.clear(); - userMetricsList.add(createUserMetrics("FOO",5,7, 3)); - userMetricsList.add(createUserMetrics("BAR",4,8, 4)); - regionMetricsList.add(createRegionMetrics( - "table1,1,4.00000000000000000000000000000003.", - 100, 50, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + userMetricsList.add(createUserMetrics("FOO", 5, 7, 3)); + userMetricsList.add(createUserMetrics("BAR", 4, 8, 4)); + regionMetricsList.add(createRegionMetrics("table1,1,4.00000000000000000000000000000003.", 100, + 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.4f, 50, 100, "2019-07-22 00:00:03")); - regionMetricsList.add(createRegionMetrics( - "table2,,5.00000000000000000000000000000004.", - 200, 100, 200, - new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + regionMetricsList.add(createRegionMetrics("table2,,5.00000000000000000000000000000004.", 200, + 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.5f, 150, 200, "2019-07-22 00:00:04")); - regionMetricsList.add(createRegionMetrics( - "namespace:table3,,6.00000000000000000000000000000005.", - 300, 150, 300, - new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, - new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05")); + regionMetricsList + .add(createRegionMetrics("namespace:table3,,6.00000000000000000000000000000005.", 300, 150, + 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05")); ServerName host2 = ServerName.valueOf("host2.apache.com", 1001, 2); - serverMetricsMap.put(host2, createServerMetrics(host2, 200, - new Size(16, Size.Unit.GIGABYTE), new Size(32, Size.Unit.GIGABYTE), 200, - regionMetricsList, userMetricsList)); + serverMetricsMap.put(host2, createServerMetrics(host2, 200, new Size(16, Size.Unit.GIGABYTE), + new Size(32, Size.Unit.GIGABYTE), 200, regionMetricsList, userMetricsList)); ServerName host3 = ServerName.valueOf("host3.apache.com", 1002, 3); - return ClusterMetricsBuilder.newBuilder() - .setHBaseVersion("3.0.0-SNAPSHOT") - .setClusterId("01234567-89ab-cdef-0123-456789abcdef") - .setLiveServerMetrics(serverMetricsMap) - .setDeadServerNames(Collections.singletonList(host3)) - .setRegionsInTransition(Collections.singletonList( - new RegionState(RegionInfoBuilder.newBuilder(TableName.valueOf("table4")) - .setStartKey(new byte [0]) - .setEndKey(new byte [0]) - .setOffline(true) - .setReplicaId(0) - .setRegionId(0) - .setSplit(false) - .build(), - RegionState.State.OFFLINE, host3))) - .build(); + return ClusterMetricsBuilder.newBuilder().setHBaseVersion("3.0.0-SNAPSHOT") + .setClusterId("01234567-89ab-cdef-0123-456789abcdef").setLiveServerMetrics(serverMetricsMap) + .setDeadServerNames(Collections.singletonList(host3)) + .setRegionsInTransition(Collections + .singletonList(new RegionState(RegionInfoBuilder.newBuilder(TableName.valueOf("table4")) + .setStartKey(new byte[0]).setEndKey(new byte[0]).setOffline(true).setReplicaId(0) + .setRegionId(0).setSplit(false).build(), RegionState.State.OFFLINE, host3))) + .build(); } private static UserMetrics createUserMetrics(String user, long readRequestCount, long writeRequestCount, long filteredReadRequestsCount) { - return UserMetricsBuilder.newBuilder(Bytes.toBytes(user)).addClientMetris( - new UserMetricsBuilder.ClientMetricsImpl("CLIENT_A_" + user, readRequestCount, - writeRequestCount, filteredReadRequestsCount)).addClientMetris( - new UserMetricsBuilder.ClientMetricsImpl("CLIENT_B_" + user, readRequestCount, - writeRequestCount, filteredReadRequestsCount)).build(); + return UserMetricsBuilder.newBuilder(Bytes.toBytes(user)) + .addClientMetris(new UserMetricsBuilder.ClientMetricsImpl("CLIENT_A_" + user, + readRequestCount, writeRequestCount, filteredReadRequestsCount)) + .addClientMetris(new UserMetricsBuilder.ClientMetricsImpl("CLIENT_B_" + user, + readRequestCount, writeRequestCount, filteredReadRequestsCount)) + .build(); } private static RegionMetrics createRegionMetrics(String regionName, long readRequestCount, - long filteredReadRequestCount, long writeRequestCount, Size storeFileSize, - Size uncompressedStoreFileSize, int storeFileCount, Size memStoreSize, float locality, - long compactedCellCount, long compactingCellCount, String lastMajorCompactionTime) { + long filteredReadRequestCount, long writeRequestCount, Size storeFileSize, + Size uncompressedStoreFileSize, int storeFileCount, Size memStoreSize, float locality, + long compactedCellCount, long compactingCellCount, String lastMajorCompactionTime) { FastDateFormat df = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss"); try { return RegionMetricsBuilder.newBuilder(Bytes.toBytes(regionName)) - .setReadRequestCount(readRequestCount) - .setFilteredReadRequestCount(filteredReadRequestCount) - .setWriteRequestCount(writeRequestCount).setStoreFileSize(storeFileSize) - .setUncompressedStoreFileSize(uncompressedStoreFileSize).setStoreFileCount(storeFileCount) - .setMemStoreSize(memStoreSize).setDataLocality(locality) - .setCompactedCellCount(compactedCellCount).setCompactingCellCount(compactingCellCount) - .setLastMajorCompactionTimestamp(df.parse(lastMajorCompactionTime).getTime()).build(); + .setReadRequestCount(readRequestCount) + .setFilteredReadRequestCount(filteredReadRequestCount) + .setWriteRequestCount(writeRequestCount).setStoreFileSize(storeFileSize) + .setUncompressedStoreFileSize(uncompressedStoreFileSize).setStoreFileCount(storeFileCount) + .setMemStoreSize(memStoreSize).setDataLocality(locality) + .setCompactedCellCount(compactedCellCount).setCompactingCellCount(compactingCellCount) + .setLastMajorCompactionTimestamp(df.parse(lastMajorCompactionTime).getTime()).build(); } catch (ParseException e) { throw new IllegalArgumentException(e); } } private static ServerMetrics createServerMetrics(ServerName serverName, long reportTimestamp, - Size usedHeapSize, Size maxHeapSize, long requestCountPerSecond, - List regionMetricsList, List userMetricsList) { - - return ServerMetricsBuilder.newBuilder(serverName) - .setReportTimestamp(reportTimestamp) - .setUsedHeapSize(usedHeapSize) - .setMaxHeapSize(maxHeapSize) - .setRequestCountPerSecond(requestCountPerSecond) - .setRegionMetrics(regionMetricsList) - .setUserMetrics(userMetricsList).build(); + Size usedHeapSize, Size maxHeapSize, long requestCountPerSecond, + List regionMetricsList, List userMetricsList) { + + return ServerMetricsBuilder.newBuilder(serverName).setReportTimestamp(reportTimestamp) + .setUsedHeapSize(usedHeapSize).setMaxHeapSize(maxHeapSize) + .setRequestCountPerSecond(requestCountPerSecond).setRegionMetrics(regionMetricsList) + .setUserMetrics(userMetricsList).build(); } public static void assertRecordsInRegionMode(List records) { @@ -174,48 +151,44 @@ public static void assertRecordsInRegionMode(List records) { switch (record.get(Field.REGION_NAME).asString()) { case "table1,,1.00000000000000000000000000000000.": assertRecordInRegionMode(record, "default", "1", "", "table1", - "00000000000000000000000000000000", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, - new Size(100, Size.Unit.MEGABYTE), 0.1f, "", 100L, 100L, 100f, - "2019-07-22 00:00:00"); + "00000000000000000000000000000000", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + new Size(100, Size.Unit.MEGABYTE), 0.1f, "", 100L, 100L, 100f, "2019-07-22 00:00:00"); break; case "table1,1,4.00000000000000000000000000000003.": assertRecordInRegionMode(record, "default", "4", "", "table1", - "00000000000000000000000000000003", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, - new Size(100, Size.Unit.MEGABYTE), 0.4f, "1", 100L, 50L, 50f, - "2019-07-22 00:00:03"); + "00000000000000000000000000000003", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + new Size(100, Size.Unit.MEGABYTE), 0.4f, "1", 100L, 50L, 50f, "2019-07-22 00:00:03"); break; case "table2,,5.00000000000000000000000000000004.": assertRecordInRegionMode(record, "default", "5", "", "table2", - "00000000000000000000000000000004", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, - new Size(200, Size.Unit.MEGABYTE), 0.5f, "", 200L, 150L, 75f, - "2019-07-22 00:00:04"); + "00000000000000000000000000000004", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + new Size(200, Size.Unit.MEGABYTE), 0.5f, "", 200L, 150L, 75f, "2019-07-22 00:00:04"); break; case "table2,1,2.00000000000000000000000000000001.": assertRecordInRegionMode(record, "default", "2", "", "table2", - "00000000000000000000000000000001", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, - new Size(200, Size.Unit.MEGABYTE), 0.2f, "1", 200L, 50L, 25f, - "2019-07-22 00:00:01"); + "00000000000000000000000000000001", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + new Size(200, Size.Unit.MEGABYTE), 0.2f, "1", 200L, 50L, 25f, "2019-07-22 00:00:01"); break; case "namespace:table3,,6.00000000000000000000000000000005.": assertRecordInRegionMode(record, "namespace", "6", "", "table3", - "00000000000000000000000000000005", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + "00000000000000000000000000000005", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.6f, "", 300L, 200L, 66.66667f, "2019-07-22 00:00:05"); break; case "namespace:table3,,3_0001.00000000000000000000000000000002.": assertRecordInRegionMode(record, "namespace", "3", "1", "table3", - "00000000000000000000000000000002", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + "00000000000000000000000000000002", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.3f, "", 300L, 100L, 33.333336f, "2019-07-22 00:00:02"); break; @@ -227,12 +200,12 @@ public static void assertRecordsInRegionMode(List records) { } private static void assertRecordInRegionMode(Record record, String namespace, String startCode, - String replicaId, String table, String region, String regionServer, String longRegionServer, - long requestCountPerSecond, long readRequestCountPerSecond, - long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, - Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, - Size memStoreSize, float locality, String startKey, long compactingCellCount, - long compactedCellCount, float compactionProgress, String lastMajorCompactionTime) { + String replicaId, String table, String region, String regionServer, String longRegionServer, + long requestCountPerSecond, long readRequestCountPerSecond, + long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize, + Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, float locality, + String startKey, long compactingCellCount, long compactedCellCount, float compactionProgress, + String lastMajorCompactionTime) { assertThat(record.size(), is(22)); assertThat(record.get(Field.NAMESPACE).asString(), is(namespace)); assertThat(record.get(Field.START_CODE).asString(), is(startCode)); @@ -241,8 +214,7 @@ private static void assertRecordInRegionMode(Record record, String namespace, St assertThat(record.get(Field.REGION).asString(), is(region)); assertThat(record.get(Field.REGION_SERVER).asString(), is(regionServer)); assertThat(record.get(Field.LONG_REGION_SERVER).asString(), is(longRegionServer)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -285,12 +257,11 @@ public static void assertRecordsInNamespaceMode(List records) { } private static void assertRecordInNamespaceMode(Record record, long requestCountPerSecond, - long readRequestCountPerSecond, long filteredReadRequestCountPerSecond, - long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, - int numStoreFiles, Size memStoreSize, int regionCount) { + long readRequestCountPerSecond, long filteredReadRequestCountPerSecond, + long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, + int numStoreFiles, Size memStoreSize, int regionCount) { assertThat(record.size(), is(10)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -339,7 +310,7 @@ public static void assertRecordsInUserMode(List records) { for (Record record : records) { String user = record.get(Field.USER).asString(); switch (user) { - //readRequestPerSecond and writeRequestPerSecond will be zero + // readRequestPerSecond and writeRequestPerSecond will be zero // because there is no change or new metrics during refresh case "FOO": assertRecordInUserMode(record, 0L, 0L, 0L); @@ -358,8 +329,8 @@ public static void assertRecordsInClientMode(List records) { for (Record record : records) { String client = record.get(Field.CLIENT).asString(); switch (client) { - //readRequestPerSecond and writeRequestPerSecond will be zero - // because there is no change or new metrics during refresh + // readRequestPerSecond and writeRequestPerSecond will be zero + // because there is no change or new metrics during refresh case "CLIENT_A_FOO": assertRecordInClientMode(record, 0L, 0L, 0L); break; @@ -382,11 +353,11 @@ private static void assertRecordInUserMode(Record record, long readRequestCountP long writeCountRequestPerSecond, long filteredReadRequestsCount) { assertThat(record.size(), is(6)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(readRequestCountPerSecond)); + is(readRequestCountPerSecond)); assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), - is(writeCountRequestPerSecond)); + is(writeCountRequestPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(filteredReadRequestsCount)); + is(filteredReadRequestsCount)); assertThat(record.get(Field.CLIENT_COUNT).asInt(), is(2)); } @@ -394,11 +365,11 @@ private static void assertRecordInClientMode(Record record, long readRequestCoun long writeCountRequestPerSecond, long filteredReadRequestsCount) { assertThat(record.size(), is(6)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(readRequestCountPerSecond)); + is(readRequestCountPerSecond)); assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), - is(writeCountRequestPerSecond)); + is(writeCountRequestPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(filteredReadRequestsCount)); + is(filteredReadRequestsCount)); assertThat(record.get(Field.USER_COUNT).asInt(), is(1)); } @@ -407,8 +378,7 @@ private static void assertRecordInTableMode(Record record, long requestCountPerS long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount) { assertThat(record.size(), is(11)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -449,15 +419,13 @@ public static void assertRecordsInRegionServerMode(List records) { } private static void assertRecordInRegionServerMode(Record record, String longRegionServer, - long requestCountPerSecond, long readRequestCountPerSecond, - long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, - Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, - Size memStoreSize, int regionCount, Size usedHeapSize, Size maxHeapSize) { + long requestCountPerSecond, long readRequestCountPerSecond, + long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize, + Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount, + Size usedHeapSize, Size maxHeapSize) { assertThat(record.size(), is(13)); - assertThat(record.get(Field.LONG_REGION_SERVER).asString(), - is(longRegionServer)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.LONG_REGION_SERVER).asString(), is(longRegionServer)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java index dcbdb6b9b8ab..f71125ff10e4 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,13 +28,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestFieldValue { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFieldValue.class); + HBaseClassTestRule.forClass(TestFieldValue.class); @Test public void testParseAndAsSomethingMethod() { @@ -101,7 +100,7 @@ public void testParseAndAsSomethingMethod() { // Size FieldValue sizeFieldValue = - new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("100.0MB")); assertThat(sizeFieldValue.asSize(), is(new Size(100, Size.Unit.MEGABYTE))); @@ -122,8 +121,7 @@ public void testParseAndAsSomethingMethod() { } // Percent - FieldValue percentFieldValue = - new FieldValue(100f, FieldValueType.PERCENT); + FieldValue percentFieldValue = new FieldValue(100f, FieldValueType.PERCENT); assertThat(percentFieldValue.asString(), is("100.00%")); assertThat(percentFieldValue.asFloat(), is(100f)); @@ -184,11 +182,11 @@ public void testCompareTo() { // Size FieldValue size100MBFieldValue = - new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); FieldValue size100MBFieldValue2 = - new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); FieldValue size200MBFieldValue = - new FieldValue(new Size(200, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(200, Size.Unit.MEGABYTE), FieldValueType.SIZE); assertThat(size100MBFieldValue.compareTo(size100MBFieldValue2), is(0)); assertThat(size200MBFieldValue.compareTo(size100MBFieldValue), is(1)); @@ -228,9 +226,9 @@ public void testPlus() { // Size FieldValue sizeFieldValue = - new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); FieldValue sizeFieldValue2 = - new FieldValue(new Size(200, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(200, Size.Unit.MEGABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.plus(sizeFieldValue2).asString(), is("300.0MB")); assertThat(sizeFieldValue.plus(sizeFieldValue2).asSize(), is(new Size(300, Size.Unit.MEGABYTE))); @@ -255,44 +253,35 @@ public void testCompareToIgnoreCase() { @Test public void testOptimizeSize() { - FieldValue sizeFieldValue = - new FieldValue(new Size(1, Size.Unit.BYTE), FieldValueType.SIZE); + FieldValue sizeFieldValue = new FieldValue(new Size(1, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0B")); - sizeFieldValue = - new FieldValue(new Size(1024, Size.Unit.BYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0KB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.BYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0KB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0MB")); - sizeFieldValue = - new FieldValue(new Size(1024 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0GB")); sizeFieldValue = - new FieldValue(new Size(2 * 1024 * 1024, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(2 * 1024 * 1024, Size.Unit.MEGABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0TB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0PB")); - sizeFieldValue = - new FieldValue(new Size(1024 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1024.0PB")); - sizeFieldValue = - new FieldValue(new Size(1, Size.Unit.PETABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1, Size.Unit.PETABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0PB")); - sizeFieldValue = - new FieldValue(new Size(1024, Size.Unit.PETABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024, Size.Unit.PETABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1024.0PB")); } } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java index 4f0864838532..2a958cc8a857 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,18 +33,22 @@ @Category(SmallTests.class) public class TestClientMode extends TestModeBase { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestClientMode.class); - @Override protected Mode getMode() { + @Override + protected Mode getMode() { return Mode.CLIENT; } - @Override protected void assertRecords(List records) { + @Override + protected void assertRecords(List records) { TestUtils.assertRecordsInClientMode(records); } - @Override protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) { + @Override + protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) { assertThat(drillDownInfo.getNextMode(), is(Mode.USER)); assertThat(drillDownInfo.getInitialFilters().size(), is(1)); String client = currentRecord.get(Field.CLIENT).asString(); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java index a52b332265b0..2d29fc414605 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,23 +22,21 @@ import org.apache.hadoop.hbase.hbtop.TestUtils; import org.junit.Test; - public abstract class TestModeBase { @Test public void testGetRecords() { - List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), - null); + List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), null); assertRecords(records); } protected abstract Mode getMode(); + protected abstract void assertRecords(List records); @Test public void testDrillDown() { - List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), - null); + List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), null); for (Record record : records) { assertDrillDown(record, getMode().drillDown(record)); } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java index 6c498e94eb1d..3a40401b7721 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,13 +30,12 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestNamespaceMode extends TestModeBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamespaceMode.class); + HBaseClassTestRule.forClass(TestNamespaceMode.class); @Override protected Mode getMode() { @@ -59,8 +58,7 @@ protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo break; case "namespace": - assertThat(drillDownInfo.getInitialFilters().get(0).toString(), - is("NAMESPACE==namespace")); + assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("NAMESPACE==namespace")); break; default: diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java index b705531475f3..8480ed946aed 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,13 +29,12 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRegionMode extends TestModeBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionMode.class); + HBaseClassTestRule.forClass(TestRegionMode.class); @Override protected Mode getMode() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java index cbfc7283fc64..9065ee5da2bf 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,13 +30,12 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRegionServerMode extends TestModeBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionServerMode.class); + HBaseClassTestRule.forClass(TestRegionServerMode.class); @Override protected Mode getMode() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java index a73d54ea6bb9..705687d9146d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,13 +26,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRequestCountPerSecond { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRequestCountPerSecond.class); + HBaseClassTestRule.forClass(TestRequestCountPerSecond.class); @Test public void test() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java index f718304671c4..574d9acb1c9f 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,13 +30,12 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestTableMode extends TestModeBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMode.class); + HBaseClassTestRule.forClass(TestTableMode.class); @Override protected Mode getMode() { @@ -68,8 +67,7 @@ protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo break; case "namespace:table3": - assertThat(drillDownInfo.getInitialFilters().get(0).toString(), - is("NAMESPACE==namespace")); + assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("NAMESPACE==namespace")); assertThat(drillDownInfo.getInitialFilters().get(1).toString(), is("TABLE==table3")); break; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java index f094c85f5481..05a2b5a8ad00 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestUserMode extends TestModeBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestUserMode.class); + HBaseClassTestRule.forClass(TestUserMode.class); @Override protected Mode getMode() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java index cbf740430b0a..9c41595e2625 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,14 +44,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestFieldScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFieldScreenPresenter.class); + HBaseClassTestRule.forClass(TestFieldScreenPresenter.class); @Mock private FieldScreenView fieldScreenView; @@ -71,17 +70,15 @@ public class TestFieldScreenPresenter { @Before public void setup() { Field sortField = Mode.REGION.getDefaultSortField(); - fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); - fieldDisplayMap = Mode.REGION.getFieldInfos().stream() - .collect(() -> new EnumMap<>(Field.class), - (r, fi) -> r.put(fi.getField(), fi.isDisplayByDefault()), (r1, r2) -> {}); + fieldDisplayMap = Mode.REGION.getFieldInfos().stream().collect(() -> new EnumMap<>(Field.class), + (r, fi) -> r.put(fi.getField(), fi.isDisplayByDefault()), (r1, r2) -> { + }); - fieldScreenPresenter = - new FieldScreenPresenter(fieldScreenView, sortField, fields, fieldDisplayMap, resultListener, - topScreenView); + fieldScreenPresenter = new FieldScreenPresenter(fieldScreenView, sortField, fields, + fieldDisplayMap, resultListener, topScreenView); for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); @@ -122,8 +119,8 @@ public void testChangeSortField() { inOrder.verify(fieldScreenView).showScreenDescription(eq("LRS")); inOrder.verify(fieldScreenView).showScreenDescription(eq("#READ/S")); inOrder.verify(fieldScreenView).showScreenDescription(eq(fields.get(0).getHeader())); - inOrder.verify(fieldScreenView).showScreenDescription( - eq(fields.get(fields.size() - 1).getHeader())); + inOrder.verify(fieldScreenView) + .showScreenDescription(eq(fields.get(fields.size() - 1).getHeader())); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java index 245bf615e731..4d2e9a092674 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,14 +34,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestHelpScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHelpScreenPresenter.class); + HBaseClassTestRule.forClass(TestHelpScreenPresenter.class); private static final long TEST_REFRESH_DELAY = 5; @@ -55,8 +54,8 @@ public class TestHelpScreenPresenter { @Before public void setup() { - helpScreenPresenter = new HelpScreenPresenter(helpScreenView, TEST_REFRESH_DELAY, - topScreenView); + helpScreenPresenter = + new HelpScreenPresenter(helpScreenView, TEST_REFRESH_DELAY, topScreenView); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java index 1b7e12a6240f..d077792a1de1 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,14 +37,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestModeScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestModeScreenPresenter.class); + HBaseClassTestRule.forClass(TestModeScreenPresenter.class); @Mock private ModeScreenView modeScreenView; @@ -69,7 +68,7 @@ public void testInit() { int modeDescriptionMaxLength = Mode.REGION_SERVER.getDescription().length(); verify(modeScreenView).showModeScreen(eq(Mode.REGION), eq(Arrays.asList(Mode.values())), - eq(Mode.REGION.ordinal()) , eq(modeHeaderMaxLength), eq(modeDescriptionMaxLength)); + eq(Mode.REGION.ordinal()), eq(modeHeaderMaxLength), eq(modeDescriptionMaxLength)); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java index 414b5b0702c5..48aa7af7680d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,14 +39,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestFilterDisplayModeScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFilterDisplayModeScreenPresenter.class); + HBaseClassTestRule.forClass(TestFilterDisplayModeScreenPresenter.class); @Mock private FilterDisplayModeScreenView filterDisplayModeScreenView; @@ -58,24 +57,23 @@ public class TestFilterDisplayModeScreenPresenter { @Before public void setup() { - List fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + List fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); - List filters = new ArrayList<>(); + List filters = new ArrayList<>(); filters.add(RecordFilter.parse("NAMESPACE==namespace", fields, true)); filters.add(RecordFilter.parse("TABLE==table", fields, true)); - filterDisplayModeScreenPresenter = new FilterDisplayModeScreenPresenter( - filterDisplayModeScreenView, filters, topScreenView); + filterDisplayModeScreenPresenter = + new FilterDisplayModeScreenPresenter(filterDisplayModeScreenView, filters, topScreenView); } @Test public void testInit() { filterDisplayModeScreenPresenter.init(); - verify(filterDisplayModeScreenView).showFilters(argThat(filters -> filters.size() == 2 - && filters.get(0).toString().equals("NAMESPACE==namespace") - && filters.get(1).toString().equals("TABLE==table"))); + verify(filterDisplayModeScreenView).showFilters(argThat( + filters -> filters.size() == 2 && filters.get(0).toString().equals("NAMESPACE==namespace") + && filters.get(1).toString().equals("TABLE==table"))); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java index b5e9bb9f3ba6..1044b116bc8e 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,14 +40,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestInputModeScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestInputModeScreenPresenter.class); + HBaseClassTestRule.forClass(TestInputModeScreenPresenter.class); private static final String TEST_INPUT_MESSAGE = "test input message"; @@ -68,8 +67,8 @@ public void setup() { histories.add("history1"); histories.add("history2"); - inputModeScreenPresenter = new InputModeScreenPresenter(inputModeScreenView, - TEST_INPUT_MESSAGE, histories, resultListener); + inputModeScreenPresenter = new InputModeScreenPresenter(inputModeScreenView, TEST_INPUT_MESSAGE, + histories, resultListener); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java index 0acd79c56d2d..018d7d03252f 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,14 +32,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestMessageModeScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMessageModeScreenPresenter.class); + HBaseClassTestRule.forClass(TestMessageModeScreenPresenter.class); private static final String TEST_MESSAGE = "test message"; @@ -53,8 +52,8 @@ public class TestMessageModeScreenPresenter { @Before public void setup() { - messageModeScreenPresenter = new MessageModeScreenPresenter(messageModeScreenView, - TEST_MESSAGE, topScreenView); + messageModeScreenPresenter = + new MessageModeScreenPresenter(messageModeScreenView, TEST_MESSAGE, topScreenView); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java index e0c09dfe1673..4b55cb2e787b 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,13 +26,11 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestPaging { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPaging.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestPaging.class); @Test public void testArrowUpAndArrowDown() { @@ -292,7 +290,7 @@ public void testWhenChangingRecordsSizeDynamically() { } private void assertPaging(Paging paging, int currentPosition, int pageStartPosition, - int pageEndPosition) { + int pageEndPosition) { assertThat(paging.getCurrentPosition(), is(currentPosition)); assertThat(paging.getPageStartPosition(), is(pageStartPosition)); assertThat(paging.getPageEndPosition(), is(pageEndPosition)); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java index 44a8878407a0..a57a15db0aff 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,14 +44,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestTopScreenModel { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTopScreenModel.class); + HBaseClassTestRule.forClass(TestTopScreenModel.class); @Mock private Admin admin; @@ -65,9 +64,8 @@ public void setup() throws IOException { when(admin.getClusterMetrics()).thenReturn(TestUtils.createDummyClusterMetrics()); topScreenModel = new TopScreenModel(admin, Mode.REGION, null, null, null, null); - fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); } @Test @@ -172,9 +170,9 @@ public void testSwitchMode() { assertThat(topScreenModel.getCurrentMode(), is(Mode.TABLE)); // Test for initialFilters - List initialFilters = Arrays.asList( - RecordFilter.parse("TABLE==table1", fields, true), - RecordFilter.parse("TABLE==table2", fields, true)); + List initialFilters = + Arrays.asList(RecordFilter.parse("TABLE==table1", fields, true), + RecordFilter.parse("TABLE==table2", fields, true)); topScreenModel.switchMode(Mode.TABLE, false, initialFilters); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java index d218dd52950d..c10413da9bbb 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,39 +42,28 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestTopScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTopScreenPresenter.class); + HBaseClassTestRule.forClass(TestTopScreenPresenter.class); private static final List TEST_FIELD_INFOS = Arrays.asList( - new FieldInfo(Field.REGION, 10, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.LOCALITY, 10, true) - ); + new FieldInfo(Field.REGION, 10, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.LOCALITY, 10, true)); private static final List TEST_RECORDS = Arrays.asList( - Record.ofEntries( - entry(Field.REGION, "region1"), - entry(Field.REQUEST_COUNT_PER_SECOND, 1L), + Record.ofEntries(entry(Field.REGION, "region1"), entry(Field.REQUEST_COUNT_PER_SECOND, 1L), entry(Field.LOCALITY, 0.3f)), - Record.ofEntries( - entry(Field.REGION, "region2"), - entry(Field.REQUEST_COUNT_PER_SECOND, 2L), + Record.ofEntries(entry(Field.REGION, "region2"), entry(Field.REQUEST_COUNT_PER_SECOND, 2L), entry(Field.LOCALITY, 0.2f)), - Record.ofEntries( - entry(Field.REGION, "region3"), - entry(Field.REQUEST_COUNT_PER_SECOND, 3L), - entry(Field.LOCALITY, 0.1f)) - ); + Record.ofEntries(entry(Field.REGION, "region3"), entry(Field.REQUEST_COUNT_PER_SECOND, 3L), + entry(Field.LOCALITY, 0.1f))); - private static final Summary TEST_SUMMARY = new Summary( - "00:00:01", "3.0.0-SNAPSHOT", "01234567-89ab-cdef-0123-456789abcdef", - 3, 2, 1, 6, 1, 3.0, 300); + private static final Summary TEST_SUMMARY = new Summary("00:00:01", "3.0.0-SNAPSHOT", + "01234567-89ab-cdef-0123-456789abcdef", 3, 2, 1, 6, 1, 3.0, 300); @Mock private TopScreenView topScreenView; @@ -90,13 +79,13 @@ public void setup() { when(topScreenView.getPageSize()).thenReturn(100); when(topScreenModel.getFieldInfos()).thenReturn(TEST_FIELD_INFOS); - when(topScreenModel.getFields()).thenReturn(TEST_FIELD_INFOS.stream() - .map(FieldInfo::getField).collect(Collectors.toList())); + when(topScreenModel.getFields()).thenReturn( + TEST_FIELD_INFOS.stream().map(FieldInfo::getField).collect(Collectors.toList())); when(topScreenModel.getRecords()).thenReturn(TEST_RECORDS); when(topScreenModel.getSummary()).thenReturn(TEST_SUMMARY); - topScreenPresenter = new TopScreenPresenter(topScreenView, 3000, topScreenModel, - null, Long.MAX_VALUE); + topScreenPresenter = + new TopScreenPresenter(topScreenView, 3000, topScreenModel, null, Long.MAX_VALUE); } @Test @@ -104,8 +93,8 @@ public void testRefresh() { topScreenPresenter.init(); topScreenPresenter.refresh(true); - verify(topScreenView).showTopScreen(argThat(this::assertSummary), - argThat(this::assertHeaders), argThat(this::assertRecords), + verify(topScreenView).showTopScreen(argThat(this::assertSummary), argThat(this::assertHeaders), + argThat(this::assertRecords), argThat(selectedRecord -> assertSelectedRecord(selectedRecord, 0))); } @@ -199,21 +188,20 @@ private void verifyHorizontalScrolling(InOrder inOrder, int expectedHeaderCount) private boolean assertSummary(Summary actual) { return actual.getCurrentTime().equals(TEST_SUMMARY.getCurrentTime()) - && actual.getVersion().equals(TEST_SUMMARY.getVersion()) - && actual.getClusterId().equals(TEST_SUMMARY.getClusterId()) - && actual.getServers() == TEST_SUMMARY.getServers() - && actual.getLiveServers() == TEST_SUMMARY.getLiveServers() - && actual.getDeadServers() == TEST_SUMMARY.getDeadServers() - && actual.getRegionCount() == TEST_SUMMARY.getRegionCount() - && actual.getRitCount() == TEST_SUMMARY.getRitCount() - && actual.getAverageLoad() == TEST_SUMMARY.getAverageLoad() - && actual.getAggregateRequestPerSecond() == TEST_SUMMARY.getAggregateRequestPerSecond(); + && actual.getVersion().equals(TEST_SUMMARY.getVersion()) + && actual.getClusterId().equals(TEST_SUMMARY.getClusterId()) + && actual.getServers() == TEST_SUMMARY.getServers() + && actual.getLiveServers() == TEST_SUMMARY.getLiveServers() + && actual.getDeadServers() == TEST_SUMMARY.getDeadServers() + && actual.getRegionCount() == TEST_SUMMARY.getRegionCount() + && actual.getRitCount() == TEST_SUMMARY.getRitCount() + && actual.getAverageLoad() == TEST_SUMMARY.getAverageLoad() + && actual.getAggregateRequestPerSecond() == TEST_SUMMARY.getAggregateRequestPerSecond(); } private boolean assertHeaders(List
    actual) { - List
    expected = - TEST_FIELD_INFOS.stream().map(fi -> new Header(fi.getField(), fi.getDefaultLength())) - .collect(Collectors.toList()); + List
    expected = TEST_FIELD_INFOS.stream() + .map(fi -> new Header(fi.getField(), fi.getDefaultLength())).collect(Collectors.toList()); if (actual.size() != expected.size()) { return false; @@ -250,8 +238,9 @@ private boolean assertSelectedRecord(Record actual, int expectedSelectedRecodeIn } private boolean assertRecord(Record actual, Record expected) { - return actual.get(Field.REGION).equals(expected.get(Field.REGION)) && actual - .get(Field.REQUEST_COUNT_PER_SECOND).equals(expected.get(Field.REQUEST_COUNT_PER_SECOND)) - && actual.get(Field.LOCALITY).equals(expected.get(Field.LOCALITY)); + return actual.get(Field.REGION).equals(expected.get(Field.REGION)) + && actual.get(Field.REQUEST_COUNT_PER_SECOND) + .equals(expected.get(Field.REQUEST_COUNT_PER_SECOND)) + && actual.get(Field.LOCALITY).equals(expected.get(Field.LOCALITY)); } } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java index 304c92b8497e..3458e7ee31b4 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,9 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; - public final class TestCursor { private TestCursor() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java index ebfe56981c49..6295cd0166aa 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,9 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; - public final class TestKeyPress { private TestKeyPress() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java index 212395fecaf5..a6a79c4e67de 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +18,10 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; - public final class TestTerminalPrinter { private TestTerminalPrinter() { @@ -38,8 +36,8 @@ public static void main(String[] args) throws Exception { printer.print("Normal string").endOfLine(); printer.startHighlight().print("Highlighted string").stopHighlight().endOfLine(); printer.startBold().print("Bold string").stopBold().endOfLine(); - printer.startHighlight().startBold().print("Highlighted bold string") - .stopBold().stopHighlight().endOfLine(); + printer.startHighlight().startBold().print("Highlighted bold string").stopBold() + .stopHighlight().endOfLine(); printer.endOfLine(); printer.print("Press any key to finish").endOfLine(); diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml index e36989ba83ef..34eb95d8da4a 100644 --- a/hbase-http/pom.xml +++ b/hbase-http/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-http Apache HBase - HTTP HTTP functionality for HBase Servers - - - - - - - src/test/resources/META-INF/ - META-INF/ - - NOTICE - - true - - - src/test/resources - - **/** - - - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - default - - false - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - package - - jar - test-jar - - - - - - - maven-surefire-plugin - - - target/test-classes/webapps - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -262,6 +162,106 @@ test + + + + + + + META-INF/ + true + src/test/resources/META-INF/ + + NOTICE + + + + src/test/resources + + **/** + + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + default + + false + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + + jar + test-jar + + package + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + build-with-jdk11 @@ -286,10 +286,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -331,7 +331,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -341,6 +343,7 @@ org.apache.hadoop hadoop-minicluster + test com.google.guava @@ -355,7 +358,6 @@ jsr311-api - test com.fasterxml.jackson.core @@ -369,10 +371,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources @@ -431,7 +433,7 @@ - + diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java index 215ff37e3bf5..833207793352 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import java.io.IOException; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -27,7 +26,6 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.yetus.audience.InterfaceAudience; @@ -38,11 +36,12 @@ public class AdminAuthorizedFilter implements Filter { private Configuration conf; private AccessControlList adminsAcl; - @Override public void init(FilterConfig filterConfig) throws ServletException { - adminsAcl = (AccessControlList) filterConfig.getServletContext().getAttribute( - HttpServer.ADMINS_ACL); - conf = (Configuration) filterConfig.getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + @Override + public void init(FilterConfig filterConfig) throws ServletException { + adminsAcl = + (AccessControlList) filterConfig.getServletContext().getAttribute(HttpServer.ADMINS_ACL); + conf = (Configuration) filterConfig.getServletContext() + .getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); } @Override @@ -61,5 +60,7 @@ public void doFilter(ServletRequest request, ServletResponse response, FilterCha chain.doFilter(request, response); } - @Override public void destroy() {} + @Override + public void destroy() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java index 10156f43b445..c1dab3027b08 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,10 +37,9 @@ public class AdminAuthorizedServlet extends DefaultServlet { @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { + throws ServletException, IOException { // Do the authorization - if (HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (HttpServer.hasAdministratorAccess(getServletContext(), request, response)) { // Authorization is done. Just call super. super.doGet(request, response); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java index 0f0c7150c417..5dce5960d071 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -28,10 +27,8 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -46,7 +43,7 @@ public void init(FilterConfig filterConfig) throws ServletException { @Override public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { HttpServletResponse httpRes = (HttpServletResponse) res; httpRes.addHeader("X-Frame-Options", filterConfig.getInitParameter("xframeoptions")); chain.doFilter(req, res); @@ -58,8 +55,8 @@ public void destroy() { public static Map getDefaultParameters(Configuration conf) { Map params = new HashMap<>(); - params.put("xframeoptions", conf.get("hbase.http.filter.xframeoptions.mode", - DEFAULT_XFRAMEOPTIONS)); + params.put("xframeoptions", + conf.get("hbase.http.filter.xframeoptions.mode", DEFAULT_XFRAMEOPTIONS)); return params; } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java index 5869ce3f92e8..ed41bab54cd5 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,9 +32,10 @@ public interface FilterContainer { * @param parameters a map from parameter names to initial values */ void addFilter(String name, String classname, Map parameters); + /** - * Add a global filter to the container - This global filter will be - * applied to all available web contexts. + * Add a global filter to the container - This global filter will be applied to all available web + * contexts. * @param name filter name * @param classname filter class name * @param parameters a map from parameter names to initial values diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java index 7e8595e7d043..917fe24291f0 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java index ad584c9d1166..0cfc97c69894 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -43,12 +42,12 @@ public final class HtmlQuoting { * @return does the string contain any of the active html characters? */ public static boolean needsQuoting(byte[] data, int off, int len) { - if (off+len > data.length) { - throw new IllegalStateException("off+len=" + off+len + " should be lower" - + " than data length=" + data.length); + if (off + len > data.length) { + throw new IllegalStateException( + "off+len=" + off + len + " should be lower" + " than data length=" + data.length); } - for(int i=off; i< off+len; ++i) { - switch(data[i]) { + for (int i = off; i < off + len; ++i) { + switch (data[i]) { case '&': case '<': case '>': @@ -72,20 +71,19 @@ public static boolean needsQuoting(String str) { return false; } byte[] bytes = Bytes.toBytes(str); - return needsQuoting(bytes, 0 , bytes.length); + return needsQuoting(bytes, 0, bytes.length); } /** - * Quote all of the active HTML characters in the given string as they - * are added to the buffer. + * Quote all of the active HTML characters in the given string as they are added to the buffer. * @param output the stream to write the output to * @param buffer the byte array to take the characters from * @param off the index of the first byte to quote * @param len the number of bytes to quote */ public static void quoteHtmlChars(OutputStream output, byte[] buffer, int off, int len) - throws IOException { - for(int i=off; i < off+len; i++) { + throws IOException { + for (int i = off; i < off + len; i++) { switch (buffer[i]) { case '&': output.write(ampBytes); @@ -140,6 +138,7 @@ public static String quoteHtmlChars(String item) { public static OutputStream quoteOutputStream(final OutputStream out) { return new OutputStream() { private byte[] data = new byte[1]; + @Override public void write(byte[] data, int off, int len) throws IOException { quoteHtmlChars(out, data, off, len); @@ -198,12 +197,11 @@ public static String unquoteHtmlChars(String item) { buffer.append('"'); next += 6; } else { - int end = item.indexOf(';', next)+1; + int end = item.indexOf(';', next) + 1; if (end == 0) { end = len; } - throw new IllegalArgumentException("Bad HTML quoting for " + - item.substring(next,end)); + throw new IllegalArgumentException("Bad HTML quoting for " + item.substring(next, end)); } posn = next; next = item.indexOf('&', posn); @@ -216,15 +214,16 @@ public static void main(String[] args) { if (args.length == 0) { throw new IllegalArgumentException("Please provide some arguments"); } - for(String arg:args) { + for (String arg : args) { System.out.println("Original: " + arg); String quoted = quoteHtmlChars(arg); - System.out.println("Quoted: "+ quoted); + System.out.println("Quoted: " + quoted); String unquoted = unquoteHtmlChars(quoted); System.out.println("Unquoted: " + unquoted); System.out.println(); } } - private HtmlQuoting() {} + private HtmlQuoting() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java index 52c9133dcf63..07de1f7d2963 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import org.apache.hadoop.conf.Configuration; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -29,10 +28,9 @@ @InterfaceStability.Unstable public class HttpConfig { private Policy policy; + public enum Policy { - HTTP_ONLY, - HTTPS_ONLY, - HTTP_AND_HTTPS; + HTTP_ONLY, HTTPS_ONLY, HTTP_AND_HTTPS; public Policy fromString(String value) { if (HTTPS_ONLY.name().equalsIgnoreCase(value)) { @@ -53,8 +51,7 @@ public boolean isHttpsEnabled() { } public HttpConfig(final Configuration conf) { - boolean sslEnabled = conf.getBoolean( - ServerConfigurationKeys.HBASE_SSL_ENABLED_KEY, + boolean sslEnabled = conf.getBoolean(ServerConfigurationKeys.HBASE_SSL_ENABLED_KEY, ServerConfigurationKeys.HBASE_SSL_ENABLED_DEFAULT); policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY; if (sslEnabled) { diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java index d3e8005eb9c0..017c45b4f590 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ public final class HttpRequestLog { private static final ImmutableMap SERVER_TO_COMPONENT = - ImmutableMap.of("master", "master", "region", "regionserver"); + ImmutableMap.of("master", "master", "region", "regionserver"); public static RequestLog getRequestLog(String name) { String lookup = SERVER_TO_COMPONENT.get(name); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java index f8c04bac9715..a8ae50aa14d6 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,6 +68,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.org.eclipse.jetty.http.HttpVersion; @@ -96,12 +97,10 @@ import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; /** - * Create a Jetty embedded server to answer http requests. The primary goal - * is to serve up status information for the server. - * There are three contexts: - * "/logs/" -> points to the log directory - * "/static/" -> points to common static files (src/webapps/static) - * "/" -> the jsp server code from (src/webapps/<name>) + * Create a Jetty embedded server to answer http requests. The primary goal is to serve up status + * information for the server. There are three contexts: "/logs/" -> points to the log directory + * "/static/" -> points to common static files (src/webapps/static) "/" -> the jsp server code + * from (src/webapps/<name>) */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -111,14 +110,12 @@ public class HttpServer implements FilterContainer { private static final int DEFAULT_MAX_HEADER_SIZE = 64 * 1024; // 64K - static final String FILTER_INITIALIZERS_PROPERTY - = "hbase.http.filter.initializers"; + static final String FILTER_INITIALIZERS_PROPERTY = "hbase.http.filter.initializers"; static final String HTTP_MAX_THREADS = "hbase.http.max.threads"; public static final String HTTP_UI_AUTHENTICATION = "hbase.security.authentication.ui"; static final String HTTP_AUTHENTICATION_PREFIX = "hbase.security.authentication."; - static final String HTTP_SPNEGO_AUTHENTICATION_PREFIX = HTTP_AUTHENTICATION_PREFIX - + "spnego."; + static final String HTTP_SPNEGO_AUTHENTICATION_PREFIX = HTTP_AUTHENTICATION_PREFIX + "spnego."; static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX = "kerberos.principal"; public static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY = HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX; @@ -128,12 +125,12 @@ public class HttpServer implements FilterContainer { static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX = "kerberos.name.rules"; public static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY = HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX; - static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = "kerberos.proxyuser.enable"; + static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = + "kerberos.proxyuser.enable"; public static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY = HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX; - public static final boolean HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false; - static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX = - "signature.secret.file"; + public static final boolean HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false; + static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX = "signature.secret.file"; public static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY = HTTP_AUTHENTICATION_PREFIX + HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX; public static final String HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY = @@ -162,11 +159,11 @@ public class HttpServer implements FilterContainer { private static final class ListenerInfo { /** - * Boolean flag to determine whether the HTTP server should clean up the - * listener in stop(). + * Boolean flag to determine whether the HTTP server should clean up the listener in stop(). */ private final boolean isManaged; private final ServerConnector listener; + private ListenerInfo(boolean isManaged, ServerConnector listener) { this.isManaged = isManaged; this.listener = listener; @@ -241,12 +238,9 @@ public static class Builder { /** * Add an endpoint that the HTTP server should listen to. - * - * @param endpoint - * the endpoint of that the HTTP server should listen to. The - * scheme specifies the protocol (i.e. HTTP / HTTPS), the host - * specifies the binding address, and the port specifies the - * listening port. Unspecified or zero port means that the server + * @param endpoint the endpoint of that the HTTP server should listen to. The scheme specifies + * the protocol (i.e. HTTP / HTTPS), the host specifies the binding address, and the + * port specifies the listening port. Unspecified or zero port means that the server * can listen to any port. */ public Builder addEndpoint(URI endpoint) { @@ -255,9 +249,9 @@ public Builder addEndpoint(URI endpoint) { } /** - * Set the hostname of the http server. The host name is used to resolve the - * _HOST field in Kerberos principals. The hostname of the first listener - * will be used if the name is unspecified. + * Set the hostname of the http server. The host name is used to resolve the _HOST field in + * Kerberos principals. The hostname of the first listener will be used if the name is + * unspecified. */ public Builder hostName(String hostName) { this.hostName = hostName; @@ -284,8 +278,7 @@ public Builder keyPassword(String password) { } /** - * Specify whether the server should authorize the client in SSL - * connections. + * Specify whether the server should authorize the client in SSL connections. */ public Builder needsClientAuth(boolean value) { this.needsClientAuth = value; @@ -297,7 +290,7 @@ public Builder needsClientAuth(boolean value) { * @deprecated Since 0.99.0. Use {@link #setAppDir(String)} instead. */ @Deprecated - public Builder setName(String name){ + public Builder setName(String name) { this.name = name; return this; } @@ -307,7 +300,7 @@ public Builder setName(String name){ * @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead. */ @Deprecated - public Builder setBindAddress(String bindAddress){ + public Builder setBindAddress(String bindAddress) { this.bindAddress = bindAddress; return this; } @@ -393,7 +386,7 @@ public HttpServer build() throws IOException { try { endpoints.add(0, new URI("http", "", bindAddress, port, "", "", "")); } catch (URISyntaxException e) { - throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e); + throw new HadoopIllegalArgumentException("Invalid endpoint: " + e); } } @@ -447,11 +440,11 @@ public HttpServer build() throws IOException { LOG.debug("Excluded SSL Cipher List:" + excludeCiphers); } - listener = new ServerConnector(server.webServer, new SslConnectionFactory(sslCtxFactory, - HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory(httpsConfig)); + listener = new ServerConnector(server.webServer, + new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), + new HttpConnectionFactory(httpsConfig)); } else { - throw new HadoopIllegalArgumentException( - "unknown scheme for endpoint:" + ep); + throw new HadoopIllegalArgumentException("unknown scheme for endpoint:" + ep); } // default settings for connector @@ -482,90 +475,83 @@ public HttpServer build() throws IOException { */ @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort) - throws IOException { + throws IOException { this(name, bindAddress, port, findPort, new Configuration()); } /** - * Create a status server on the given port. Allows you to specify the - * path specifications that this server will be serving so that they will be - * added to the filters properly. - * + * Create a status server on the given port. Allows you to specify the path specifications that + * this server will be serving so that they will be added to the filters properly. * @param name The name of the server * @param bindAddress The address for this server * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. + * @param findPort whether the server should start at the given port and increment by 1 until it + * finds a free port. * @param conf Configuration - * @param pathSpecs Path specifications that this httpserver will be serving. - * These will be added to any filters. + * @param pathSpecs Path specifications that this httpserver will be serving. These will be added + * to any filters. * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, String[] pathSpecs) throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + String[] pathSpecs) throws IOException { this(name, bindAddress, port, findPort, conf, null, pathSpecs); } /** - * Create a status server on the given port. - * The jsp scripts are taken from src/webapps/<name>. + * Create a status server on the given port. The jsp scripts are taken from + * src/webapps/<name>. * @param name The name of the server * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. + * @param findPort whether the server should start at the given port and increment by 1 until it + * finds a free port. * @param conf Configuration * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf) throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf) + throws IOException { this(name, bindAddress, port, findPort, conf, null, null); } /** - * Creates a status server on the given port. The JSP scripts are taken - * from src/webapp<name>. - * + * Creates a status server on the given port. The JSP scripts are taken from + * src/webapp<name>. * @param name the name of the server * @param bindAddress the address for this server * @param port the port to use on the server * @param findPort whether the server should start at the given port and increment by 1 until it - * finds a free port + * finds a free port * @param conf the configuration to use * @param adminsAcl {@link AccessControlList} of the admins * @throws IOException when creating the server fails * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, AccessControlList adminsAcl) - throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + AccessControlList adminsAcl) throws IOException { this(name, bindAddress, port, findPort, conf, adminsAcl, null); } /** - * Create a status server on the given port. - * The jsp scripts are taken from src/webapps/<name>. + * Create a status server on the given port. The jsp scripts are taken from + * src/webapps/<name>. * @param name The name of the server * @param bindAddress The address for this server * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. + * @param findPort whether the server should start at the given port and increment by 1 until it + * finds a free port. * @param conf Configuration * @param adminsAcl {@link AccessControlList} of the admins - * @param pathSpecs Path specifications that this httpserver will be serving. - * These will be added to any filters. + * @param pathSpecs Path specifications that this httpserver will be serving. These will be added + * to any filters. * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, AccessControlList adminsAcl, - String[] pathSpecs) throws IOException { - this(new Builder().setName(name) - .addEndpoint(URI.create("http://" + bindAddress + ":" + port)) - .setFindPort(findPort).setConf(conf).setACL(adminsAcl) - .setPathSpec(pathSpecs)); + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + AccessControlList adminsAcl, String[] pathSpecs) throws IOException { + this(new Builder().setName(name).addEndpoint(URI.create("http://" + bindAddress + ":" + port)) + .setFindPort(findPort).setConf(conf).setACL(adminsAcl).setPathSpec(pathSpecs)); } private HttpServer(final Builder b) throws IOException { @@ -573,12 +559,11 @@ private HttpServer(final Builder b) throws IOException { this.logDir = b.logDir; final String appDir = getWebAppsPath(b.name); - int maxThreads = b.conf.getInt(HTTP_MAX_THREADS, 16); // If HTTP_MAX_THREADS is less than or equal to 0, QueueThreadPool() will use the // default value (currently 200). - QueuedThreadPool threadPool = maxThreads <= 0 ? new QueuedThreadPool() - : new QueuedThreadPool(maxThreads); + QueuedThreadPool threadPool = + maxThreads <= 0 ? new QueuedThreadPool() : new QueuedThreadPool(maxThreads); threadPool.setDaemon(true); this.webServer = new Server(threadPool); @@ -590,9 +575,8 @@ private HttpServer(final Builder b) throws IOException { this.webServer.setHandler(buildGzipHandler(this.webServer.getHandler())); } - private void initializeWebServer(String name, String hostName, - Configuration conf, String[] pathSpecs, HttpServer.Builder b) - throws FileNotFoundException, IOException { + private void initializeWebServer(String name, String hostName, Configuration conf, + String[] pathSpecs, HttpServer.Builder b) throws FileNotFoundException, IOException { Preconditions.checkNotNull(webAppContext); @@ -623,20 +607,18 @@ private void initializeWebServer(String name, String hostName, addGlobalFilter("safety", QuotingInputFilter.class.getName(), null); - addGlobalFilter("clickjackingprevention", - ClickjackingPreventionFilter.class.getName(), - ClickjackingPreventionFilter.getDefaultParameters(conf)); + addGlobalFilter("clickjackingprevention", ClickjackingPreventionFilter.class.getName(), + ClickjackingPreventionFilter.getDefaultParameters(conf)); HttpConfig httpConfig = new HttpConfig(conf); - addGlobalFilter("securityheaders", - SecurityHeadersFilter.class.getName(), - SecurityHeadersFilter.getDefaultParameters(conf, httpConfig.isSecure())); + addGlobalFilter("securityheaders", SecurityHeadersFilter.class.getName(), + SecurityHeadersFilter.getDefaultParameters(conf, httpConfig.isSecure())); // But security needs to be enabled prior to adding the other servlets if (authenticationEnabled) { initSpnego(conf, hostName, b.usernameConfKey, b.keytabConfKey, b.kerberosNameRulesKey, - b.signatureSecretFileKey); + b.signatureSecretFileKey); } final FilterInitializer[] initializers = getFilterInitializers(conf); @@ -662,16 +644,16 @@ private void addManagedListener(ServerConnector connector) { listeners.add(new ListenerInfo(true, connector)); } - private static WebAppContext createWebAppContext(String name, - Configuration conf, AccessControlList adminsAcl, final String appDir) { + private static WebAppContext createWebAppContext(String name, Configuration conf, + AccessControlList adminsAcl, final String appDir) { WebAppContext ctx = new WebAppContext(); ctx.setDisplayName(name); ctx.setContextPath("/"); ctx.setWar(appDir + "/" + name); ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf); // for org.apache.hadoop.metrics.MetricsServlet - ctx.getServletContext().setAttribute( - org.apache.hadoop.http.HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf); + ctx.getServletContext().setAttribute(org.apache.hadoop.http.HttpServer2.CONF_CONTEXT_ATTRIBUTE, + conf); ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl); addNoCacheFilter(ctx); return ctx; @@ -681,11 +663,12 @@ private static WebAppContext createWebAppContext(String name, * Construct and configure an instance of {@link GzipHandler}. With complex * multi-{@link WebAppContext} configurations, it's easiest to apply this handler directly to the * instance of {@link Server} near the end of its configuration, something like + * *
    -   *    Server server = new Server();
    -   *    //...
    -   *    server.setHandler(buildGzipHandler(server.getHandler()));
    -   *    server.start();
    +   * Server server = new Server();
    +   * // ...
    +   * server.setHandler(buildGzipHandler(server.getHandler()));
    +   * server.start();
        * 
    */ public static GzipHandler buildGzipHandler(final Handler wrapped) { @@ -696,7 +679,7 @@ public static GzipHandler buildGzipHandler(final Handler wrapped) { private static void addNoCacheFilter(WebAppContext ctxt) { defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(), - Collections. emptyMap(), new String[] { "/*" }); + Collections. emptyMap(), new String[] { "/*" }); } /** Get an array of FilterConfiguration specified in the conf */ @@ -711,8 +694,8 @@ private static FilterInitializer[] getFilterInitializers(Configuration conf) { } FilterInitializer[] initializers = new FilterInitializer[classes.length]; - for(int i = 0; i < classes.length; i++) { - initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(classes[i]); + for (int i = 0; i < classes.length; i++) { + initializers[i] = (FilterInitializer) ReflectionUtils.newInstance(classes[i]); } return initializers; } @@ -721,8 +704,8 @@ private static FilterInitializer[] getFilterInitializers(Configuration conf) { * Add default apps. * @param appDir The application directory */ - protected void addDefaultApps(ContextHandlerCollection parent, - final String appDir, Configuration conf) { + protected void addDefaultApps(ContextHandlerCollection parent, final String appDir, + Configuration conf) { // set up the context for "/logs/" if "hadoop.log.dir" property is defined. String logDir = this.logDir; if (logDir == null) { @@ -733,12 +716,10 @@ protected void addDefaultApps(ContextHandlerCollection parent, logContext.addServlet(AdminAuthorizedServlet.class, "/*"); logContext.setResourceBase(logDir); - if (conf.getBoolean( - ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES, - ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) { + if (conf.getBoolean(ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES, + ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) { Map params = logContext.getInitParams(); - params.put( - "org.mortbay.jetty.servlet.Default.aliases", "true"); + params.put("org.mortbay.jetty.servlet.Default.aliases", "true"); } logContext.setDisplayName("logs"); setContextAttributes(logContext, conf); @@ -761,13 +742,13 @@ private void setContextAttributes(ServletContextHandler context, Configuration c /** * Add default servlets. */ - protected void addDefaultServlets( - ContextHandlerCollection contexts, Configuration conf) throws IOException { + protected void addDefaultServlets(ContextHandlerCollection contexts, Configuration conf) + throws IOException { // set up default servlets addPrivilegedServlet("stacks", "/stacks", StackServlet.class); addPrivilegedServlet("logLevel", "/logLevel", LogLevel.Servlet.class); - // Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's - // MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2. + // Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's + // MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2. // Remove when we drop support for hbase on hadoop2.x. try { Class clz = Class.forName("org.apache.hadoop.metrics.MetricsServlet"); @@ -796,14 +777,14 @@ protected void addDefaultServlets( genCtx.setDisplayName("prof-output-hbase"); } else { addUnprivilegedServlet("prof", "/prof", ProfileServlet.DisabledServlet.class); - LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " + - "not specified. Disabling /prof endpoint."); + LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " + + "not specified. Disabling /prof endpoint."); } } /** - * Set a value in the webapp context. These values are available to the jsp - * pages as "application.getAttribute(name)". + * Set a value in the webapp context. These values are available to the jsp pages as + * "application.getAttribute(name)". * @param name The name of the attribute * @param value The value of the attribute */ @@ -816,10 +797,8 @@ public void setAttribute(String name, Object value) { * @param packageName The Java package name containing the Jersey resource. * @param pathSpec The path spec for the servlet */ - public void addJerseyResourcePackage(final String packageName, - final String pathSpec) { - LOG.info("addJerseyResourcePackage: packageName=" + packageName - + ", pathSpec=" + pathSpec); + public void addJerseyResourcePackage(final String packageName, final String pathSpec) { + LOG.info("addJerseyResourcePackage: packageName=" + packageName + ", pathSpec=" + pathSpec); ResourceConfig application = new ResourceConfig().packages(packageName); final ServletHolder sh = new ServletHolder(new ServletContainer(application)); @@ -828,8 +807,8 @@ public void addJerseyResourcePackage(final String packageName, /** * Adds a servlet in the server that any user can access. This method differs from - * {@link #addPrivilegedServlet(String, String, Class)} in that any authenticated user - * can interact with the servlet added by this method. + * {@link #addPrivilegedServlet(String, String, Class)} in that any authenticated user can + * interact with the servlet added by this method. * @param name The name of the servlet (can be passed as null) * @param pathSpec The path spec for the servlet * @param clazz The servlet class @@ -841,8 +820,8 @@ public void addUnprivilegedServlet(String name, String pathSpec, /** * Adds a servlet in the server that any user can access. This method differs from - * {@link #addPrivilegedServlet(String, ServletHolder)} in that any authenticated user - * can interact with the servlet added by this method. + * {@link #addPrivilegedServlet(String, ServletHolder)} in that any authenticated user can + * interact with the servlet added by this method. * @param pathSpec The path spec for the servlet * @param holder The servlet holder */ @@ -862,9 +841,8 @@ public void addPrivilegedServlet(String name, String pathSpec, /** * Adds a servlet in the server that only administrators can access. This method differs from - * {@link #addUnprivilegedServlet(String, ServletHolder)} in that only those - * authenticated user who are identified as administrators can interact with the servlet added by - * this method. + * {@link #addUnprivilegedServlet(String, ServletHolder)} in that only those authenticated user + * who are identified as administrators can interact with the servlet added by this method. */ public void addPrivilegedServlet(String pathSpec, ServletHolder holder) { addServletWithAuth(pathSpec, holder, true); @@ -875,8 +853,8 @@ public void addPrivilegedServlet(String pathSpec, ServletHolder holder) { * directly, but invoke it via {@link #addUnprivilegedServlet(String, String, Class)} or * {@link #addPrivilegedServlet(String, String, Class)}. */ - void addServletWithAuth(String name, String pathSpec, - Class clazz, boolean requireAuthz) { + void addServletWithAuth(String name, String pathSpec, Class clazz, + boolean requireAuthz) { addInternalServlet(name, pathSpec, clazz, requireAuthz); addFilterPathMapping(pathSpec, webAppContext); } @@ -892,20 +870,17 @@ void addServletWithAuth(String pathSpec, ServletHolder holder, boolean requireAu } /** - * Add an internal servlet in the server, specifying whether or not to - * protect with Kerberos authentication. - * Note: This method is to be used for adding servlets that facilitate - * internal communication and not for user facing functionality. For - * servlets added using this method, filters (except internal Kerberos - * filters) are not enabled. - * + * Add an internal servlet in the server, specifying whether or not to protect with Kerberos + * authentication. Note: This method is to be used for adding servlets that facilitate internal + * communication and not for user facing functionality. For servlets added using this method, + * filters (except internal Kerberos filters) are not enabled. * @param name The name of the {@link Servlet} (can be passed as null) * @param pathSpec The path spec for the {@link Servlet} * @param clazz The {@link Servlet} class * @param requireAuthz Require Kerberos authenticate to access servlet */ - void addInternalServlet(String name, String pathSpec, - Class clazz, boolean requireAuthz) { + void addInternalServlet(String name, String pathSpec, Class clazz, + boolean requireAuthz) { ServletHolder holder = new ServletHolder(clazz); if (name != null) { holder.setName(name); @@ -914,13 +889,10 @@ void addInternalServlet(String name, String pathSpec, } /** - * Add an internal servlet in the server, specifying whether or not to - * protect with Kerberos authentication. - * Note: This method is to be used for adding servlets that facilitate - * internal communication and not for user facing functionality. For - * servlets added using this method, filters (except internal Kerberos - * filters) are not enabled. - * + * Add an internal servlet in the server, specifying whether or not to protect with Kerberos + * authentication. Note: This method is to be used for adding servlets that facilitate internal + * communication and not for user facing functionality. For servlets added using this method, + * filters (except internal Kerberos filters) are not enabled. * @param pathSpec The path spec for the {@link Servlet} * @param holder The object providing the {@link Servlet} instance * @param requireAuthz Require Kerberos authenticate to access servlet @@ -944,15 +916,15 @@ void addInternalServlet(String pathSpec, ServletHolder holder, boolean requireAu public void addFilter(String name, String classname, Map parameters) { final String[] USER_FACING_URLS = { "*.html", "*.jsp" }; defineFilter(webAppContext, name, classname, parameters, USER_FACING_URLS); - LOG.info("Added filter " + name + " (class=" + classname - + ") to context " + webAppContext.getDisplayName()); + LOG.info("Added filter " + name + " (class=" + classname + ") to context " + + webAppContext.getDisplayName()); final String[] ALL_URLS = { "/*" }; for (Map.Entry e : defaultContexts.entrySet()) { if (e.getValue()) { ServletContextHandler handler = e.getKey(); defineFilter(handler, name, classname, parameters, ALL_URLS); - LOG.info("Added filter " + name + " (class=" + classname - + ") to context " + handler.getDisplayName()); + LOG.info("Added filter " + name + " (class=" + classname + ") to context " + + handler.getDisplayName()); } } filterNames.add(name); @@ -971,8 +943,8 @@ public void addGlobalFilter(String name, String classname, Map p /** * Define a filter for a context and set up default url mappings. */ - public static void defineFilter(ServletContextHandler handler, String name, - String classname, Map parameters, String[] urls) { + public static void defineFilter(ServletContextHandler handler, String name, String classname, + Map parameters, String[] urls) { FilterHolder holder = new FilterHolder(); holder.setName(name); holder.setClassName(classname); @@ -991,9 +963,8 @@ public static void defineFilter(ServletContextHandler handler, String name, * @param pathSpec The path spec * @param webAppCtx The WebApplicationContext to add to */ - protected void addFilterPathMapping(String pathSpec, - WebAppContext webAppCtx) { - for(String name : filterNames) { + protected void addFilterPathMapping(String pathSpec, WebAppContext webAppCtx) { + for (String name : filterNames) { FilterMapping fmap = new FilterMapping(); fmap.setPathSpec(pathSpec); fmap.setFilterName(name); @@ -1011,7 +982,7 @@ public Object getAttribute(String name) { return webAppContext.getAttribute(name); } - public WebAppContext getWebAppContext(){ + public WebAppContext getWebAppContext() { return this.webAppContext; } @@ -1029,8 +1000,7 @@ protected String getWebAppsPath(String webapps, String appName) throws FileNotFo URL url = getClass().getClassLoader().getResource(webapps + "/" + appName); if (url == null) { - throw new FileNotFoundException(webapps + "/" + appName - + " not found in CLASSPATH"); + throw new FileNotFoundException(webapps + "/" + appName + " not found in CLASSPATH"); } String urlString = url.toString(); @@ -1044,14 +1014,13 @@ protected String getWebAppsPath(String webapps, String appName) throws FileNotFo */ @Deprecated public int getPort() { - return ((ServerConnector)webServer.getConnectors()[0]).getLocalPort(); + return ((ServerConnector) webServer.getConnectors()[0]).getLocalPort(); } /** * Get the address that corresponds to a particular connector. - * - * @return the corresponding address for the connector, or null if there's no - * such connector or the connector is not bounded. + * @return the corresponding address for the connector, or null if there's no such connector or + * the connector is not bounded. */ public InetSocketAddress getConnectorAddress(int index) { Preconditions.checkArgument(index >= 0); @@ -1060,7 +1029,7 @@ public InetSocketAddress getConnectorAddress(int index) { return null; } - ServerConnector c = (ServerConnector)webServer.getConnectors()[index]; + ServerConnector c = (ServerConnector) webServer.getConnectors()[index]; if (c.getLocalPort() == -1 || c.getLocalPort() == -2) { // -1 if the connector has not been opened // -2 if it has been closed @@ -1079,14 +1048,14 @@ public void setThreads(int min, int max) { pool.setMaxThreads(max); } - private void initSpnego(Configuration conf, String hostName, - String usernameConfKey, String keytabConfKey, String kerberosNameRuleKey, - String signatureSecretKeyFileKey) throws IOException { + private void initSpnego(Configuration conf, String hostName, String usernameConfKey, + String keytabConfKey, String kerberosNameRuleKey, String signatureSecretKeyFileKey) + throws IOException { Map params = new HashMap<>(); String principalInConf = getOrEmptyString(conf, usernameConfKey); if (!principalInConf.isEmpty()) { - params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, SecurityUtil.getServerPrincipal( - principalInConf, hostName)); + params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, + SecurityUtil.getServerPrincipal(principalInConf, hostName)); } String httpKeytab = getOrEmptyString(conf, keytabConfKey); if (!httpKeytab.isEmpty()) { @@ -1098,30 +1067,30 @@ private void initSpnego(Configuration conf, String hostName, } String signatureSecretKeyFile = getOrEmptyString(conf, signatureSecretKeyFileKey); if (!signatureSecretKeyFile.isEmpty()) { - params.put(HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX, - signatureSecretKeyFile); + params.put(HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX, signatureSecretKeyFile); } params.put(AuthenticationFilter.AUTH_TYPE, "kerberos"); // Verify that the required options were provided - if (isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX)) || - isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX))) { - throw new IllegalArgumentException(usernameConfKey + " and " - + keytabConfKey + " are both required in the configuration " - + "to enable SPNEGO/Kerberos authentication for the Web UI"); + if (isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX)) + || isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX))) { + throw new IllegalArgumentException( + usernameConfKey + " and " + keytabConfKey + " are both required in the configuration " + + "to enable SPNEGO/Kerberos authentication for the Web UI"); } if (conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY, - HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) { - //Copy/rename standard hadoop proxyuser settings to filter - for(Map.Entry proxyEntry : - conf.getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) { - params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + proxyEntry.getKey(), - proxyEntry.getValue()); - } - addGlobalFilter(SPNEGO_PROXYUSER_FILTER, ProxyUserAuthenticationFilter.class.getName(), params); + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) { + // Copy/rename standard hadoop proxyuser settings to filter + for (Map.Entry proxyEntry : conf + .getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) { + params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + proxyEntry.getKey(), + proxyEntry.getValue()); + } + addGlobalFilter(SPNEGO_PROXYUSER_FILTER, ProxyUserAuthenticationFilter.class.getName(), + params); } else { - addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), params); + addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), params); } } @@ -1136,8 +1105,7 @@ private boolean isMissing(String value) { } /** - * Extracts the value for the given key from the configuration of returns a string of - * zero length. + * Extracts the value for the given key from the configuration of returns a string of zero length. */ private String getOrEmptyString(Configuration conf, String key) { if (null == key) { @@ -1166,8 +1134,7 @@ public void start() throws IOException { Handler[] handlers = webServer.getHandlers(); for (int i = 0; i < handlers.length; i++) { if (handlers[i].isFailed()) { - throw new IOException( - "Problem in starting http server. Server handlers failed"); + throw new IOException("Problem in starting http server. Server handlers failed"); } } // Make sure there are no errors initializing the context. @@ -1176,14 +1143,13 @@ public void start() throws IOException { // Have to stop the webserver, or else its non-daemon threads // will hang forever. webServer.stop(); - throw new IOException("Unable to initialize WebAppContext", - unavailableException); + throw new IOException("Unable to initialize WebAppContext", unavailableException); } } catch (IOException e) { throw e; } catch (InterruptedException e) { - throw (IOException) new InterruptedIOException( - "Interrupted while starting HTTP server").initCause(e); + throw (IOException) new InterruptedIOException("Interrupted while starting HTTP server") + .initCause(e); } catch (Exception e) { throw new IOException("Problem starting http server", e); } @@ -1216,12 +1182,12 @@ void openListeners() throws Exception { LOG.info("Jetty bound to port " + listener.getLocalPort()); break; } catch (IOException ex) { - if(!(ex instanceof BindException) && !(ex.getCause() instanceof BindException)) { + if (!(ex instanceof BindException) && !(ex.getCause() instanceof BindException)) { throw ex; } if (port == 0 || !findPort) { - BindException be = new BindException("Port in use: " - + listener.getHost() + ":" + listener.getPort()); + BindException be = + new BindException("Port in use: " + listener.getHost() + ":" + listener.getPort()); be.initCause(ex); throw be; } @@ -1246,9 +1212,7 @@ public void stop() throws Exception { try { li.listener.close(); } catch (Exception e) { - LOG.error( - "Error while stopping listener for webapp" - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping listener for webapp" + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } } @@ -1258,16 +1222,15 @@ public void stop() throws Exception { webAppContext.clearAttributes(); webAppContext.stop(); } catch (Exception e) { - LOG.error("Error while stopping web app context for webapp " - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping web app context for webapp " + webAppContext.getDisplayName(), + e); exception = addMultiException(exception, e); } try { webServer.stop(); } catch (Exception e) { - LOG.error("Error while stopping web server for webapp " - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping web server for webapp " + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } @@ -1278,7 +1241,7 @@ public void stop() throws Exception { } private MultiException addMultiException(MultiException exception, Exception e) { - if(exception == null){ + if (exception == null) { exception = new MultiException(); } exception.add(e); @@ -1307,8 +1270,8 @@ public String toString() { return "Inactive HttpServer"; } else { StringBuilder sb = new StringBuilder("HttpServer (") - .append(isAlive() ? STATE_DESCRIPTION_ALIVE : - STATE_DESCRIPTION_NOT_LIVE).append("), listening at:"); + .append(isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE) + .append("), listening at:"); for (ListenerInfo li : listeners) { ServerConnector l = li.listener; sb.append(l.getHost()).append(":").append(l.getPort()).append("/,"); @@ -1320,29 +1283,26 @@ public String toString() { /** * Checks the user has privileges to access to instrumentation servlets. *

    - * If hadoop.security.instrumentation.requires.admin is set to FALSE - * (default value) it always returns TRUE. - *

    - * If hadoop.security.instrumentation.requires.admin is set to TRUE - * it will check that if the current user is in the admin ACLS. If the user is - * in the admin ACLs it returns TRUE, otherwise it returns FALSE. + * If hadoop.security.instrumentation.requires.admin is set to FALSE (default value) + * it always returns TRUE. + *

    + *

    + * If hadoop.security.instrumentation.requires.admin is set to TRUE it will check + * that if the current user is in the admin ACLS. If the user is in the admin ACLs it returns + * TRUE, otherwise it returns FALSE. *

    - * * @param servletContext the servlet context. * @param request the servlet request. * @param response the servlet response. * @return TRUE/FALSE based on the logic decribed above. */ - public static boolean isInstrumentationAccessAllowed( - ServletContext servletContext, HttpServletRequest request, - HttpServletResponse response) throws IOException { - Configuration conf = - (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + public static boolean isInstrumentationAccessAllowed(ServletContext servletContext, + HttpServletRequest request, HttpServletResponse response) throws IOException { + Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); boolean access = true; - boolean adminAccess = conf.getBoolean( - CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, - false); + boolean adminAccess = conf + .getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, false); if (adminAccess) { access = hasAdministratorAccess(servletContext, request, response); } @@ -1350,20 +1310,17 @@ public static boolean isInstrumentationAccessAllowed( } /** - * Does the user sending the HttpServletRequest has the administrator ACLs? If - * it isn't the case, response will be modified to send an error to the user. - * + * Does the user sending the HttpServletRequest has the administrator ACLs? If it isn't the case, + * response will be modified to send an error to the user. * @param servletContext the {@link ServletContext} to use * @param request the {@link HttpServletRequest} to check * @param response used to send the error response if user does not have admin access. * @return true if admin-authorized, false otherwise * @throws IOException if an unauthenticated or unauthorized user tries to access the page */ - public static boolean hasAdministratorAccess( - ServletContext servletContext, HttpServletRequest request, - HttpServletResponse response) throws IOException { - Configuration conf = - (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + public static boolean hasAdministratorAccess(ServletContext servletContext, + HttpServletRequest request, HttpServletResponse response) throws IOException { + Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); AccessControlList acl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL); return hasAdministratorAccess(conf, acl, request, response); @@ -1372,22 +1329,20 @@ public static boolean hasAdministratorAccess( public static boolean hasAdministratorAccess(Configuration conf, AccessControlList acl, HttpServletRequest request, HttpServletResponse response) throws IOException { // If there is no authorization, anybody has administrator access. - if (!conf.getBoolean( - CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { + if (!conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { return true; } String remoteUser = request.getRemoteUser(); if (remoteUser == null) { response.sendError(HttpServletResponse.SC_UNAUTHORIZED, - "Unauthenticated users are not " + - "authorized to access this page."); + "Unauthenticated users are not " + "authorized to access this page."); return false; } if (acl != null && !userHasAdministratorAccess(acl, remoteUser)) { - response.sendError(HttpServletResponse.SC_FORBIDDEN, "User " - + remoteUser + " is unauthorized to access this page."); + response.sendError(HttpServletResponse.SC_FORBIDDEN, + "User " + remoteUser + " is unauthorized to access this page."); return false; } @@ -1395,46 +1350,39 @@ public static boolean hasAdministratorAccess(Configuration conf, AccessControlLi } /** - * Get the admin ACLs from the given ServletContext and check if the given - * user is in the ACL. - * + * Get the admin ACLs from the given ServletContext and check if the given user is in the ACL. * @param servletContext the context containing the admin ACL. * @param remoteUser the remote user to check for. - * @return true if the user is present in the ACL, false if no ACL is set or - * the user is not present + * @return true if the user is present in the ACL, false if no ACL is set or the user is not + * present */ public static boolean userHasAdministratorAccess(ServletContext servletContext, String remoteUser) { - AccessControlList adminsAcl = (AccessControlList) servletContext - .getAttribute(ADMINS_ACL); + AccessControlList adminsAcl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL); return userHasAdministratorAccess(adminsAcl, remoteUser); } public static boolean userHasAdministratorAccess(AccessControlList acl, String remoteUser) { - UserGroupInformation remoteUserUGI = - UserGroupInformation.createRemoteUser(remoteUser); + UserGroupInformation remoteUserUGI = UserGroupInformation.createRemoteUser(remoteUser); return acl != null && acl.isUserAllowed(remoteUserUGI); } /** - * A very simple servlet to serve up a text representation of the current - * stack traces. It both returns the stacks to the caller and logs them. - * Currently the stack traces are done sequentially rather than exactly the - * same data. + * A very simple servlet to serve up a text representation of the current stack traces. It both + * returns the stacks to the caller and logs them. Currently the stack traces are done + * sequentially rather than exactly the same data. */ public static class StackServlet extends HttpServlet { private static final long serialVersionUID = -6284183679759467039L; @Override public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), - request, response)) { + throws ServletException, IOException { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) { return; } response.setContentType("text/plain; charset=UTF-8"); - try (PrintStream out = new PrintStream( - response.getOutputStream(), false, "UTF-8")) { + try (PrintStream out = new PrintStream(response.getOutputStream(), false, "UTF-8")) { Threads.printThreadInfo(out, ""); out.flush(); } @@ -1443,9 +1391,9 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) } /** - * A Servlet input filter that quotes all HTML active characters in the - * parameter names and values. The goal is to quote the characters to make - * all of the servlets resistant to cross-site scripting attacks. + * A Servlet input filter that quotes all HTML active characters in the parameter names and + * values. The goal is to quote the characters to make all of the servlets resistant to cross-site + * scripting attacks. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public static class QuotingInputFilter implements Filter { @@ -1453,6 +1401,7 @@ public static class QuotingInputFilter implements Filter { public static class RequestQuoter extends HttpServletRequestWrapper { private final HttpServletRequest rawRequest; + public RequestQuoter(HttpServletRequest rawRequest) { super(rawRequest); this.rawRequest = rawRequest; @@ -1464,8 +1413,8 @@ public RequestQuoter(HttpServletRequest rawRequest) { @Override public Enumeration getParameterNames() { return new Enumeration() { - private Enumeration rawIterator = - rawRequest.getParameterNames(); + private Enumeration rawIterator = rawRequest.getParameterNames(); + @Override public boolean hasMoreElements() { return rawIterator.hasMoreElements(); @@ -1483,8 +1432,8 @@ public String nextElement() { */ @Override public String getParameter(String name) { - return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter( - HtmlQuoting.unquoteHtmlChars(name))); + return HtmlQuoting + .quoteHtmlChars(rawRequest.getParameter(HtmlQuoting.unquoteHtmlChars(name))); } @Override @@ -1495,7 +1444,7 @@ public String[] getParameterValues(String name) { return null; } String[] result = new String[unquoteValue.length]; - for(int i=0; i < result.length; ++i) { + for (int i = 0; i < result.length; ++i) { result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]); } return result; @@ -1505,10 +1454,10 @@ public String[] getParameterValues(String name) { public Map getParameterMap() { Map result = new HashMap<>(); Map raw = rawRequest.getParameterMap(); - for (Map.Entry item: raw.entrySet()) { + for (Map.Entry item : raw.entrySet()) { String[] rawValue = item.getValue(); String[] cookedValue = new String[rawValue.length]; - for(int i=0; i< rawValue.length; ++i) { + for (int i = 0; i < rawValue.length; ++i) { cookedValue[i] = HtmlQuoting.quoteHtmlChars(rawValue[i]); } result.put(HtmlQuoting.quoteHtmlChars(item.getKey()), cookedValue); @@ -1517,18 +1466,16 @@ public Map getParameterMap() { } /** - * Quote the url so that users specifying the HOST HTTP header - * can't inject attacks. + * Quote the url so that users specifying the HOST HTTP header can't inject attacks. */ @Override - public StringBuffer getRequestURL(){ + public StringBuffer getRequestURL() { String url = rawRequest.getRequestURL().toString(); return new StringBuffer(HtmlQuoting.quoteHtmlChars(url)); } /** - * Quote the server name so that users specifying the HOST HTTP header - * can't inject attacks. + * Quote the server name so that users specifying the HOST HTTP header can't inject attacks. */ @Override public String getServerName() { @@ -1546,12 +1493,9 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, - ServletResponse response, - FilterChain chain - ) throws IOException, ServletException { - HttpServletRequestWrapper quoted = - new RequestQuoter((HttpServletRequest) request); + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + HttpServletRequestWrapper quoted = new RequestQuoter((HttpServletRequest) request); HttpServletResponse httpResponse = (HttpServletResponse) response; String mime = inferMimeType(request); @@ -1570,11 +1514,11 @@ public void doFilter(ServletRequest request, } /** - * Infer the mime type for the response based on the extension of the request - * URI. Returns null if unknown. + * Infer the mime type for the response based on the extension of the request URI. Returns null + * if unknown. */ private String inferMimeType(ServletRequest request) { - String path = ((HttpServletRequest)request).getRequestURI(); + String path = ((HttpServletRequest) request).getRequestURI(); ServletContext context = config.getServletContext(); return context.getMimeType(path); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java index 94269719aa42..8ff9d9691924 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,5 +59,6 @@ public static void constrainHttpMethods(ServletContextHandler ctxHandler, ctxHandler.setSecurityHandler(securityHandler); } - private HttpServerUtil() {} + private HttpServerUtil() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java index 8b13e2b22053..06949929baad 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java @@ -27,16 +27,15 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder; /** - * Create a Jetty embedded server to answer http requests. The primary goal - * is to serve up status information for the server. - * There are three contexts: - * "/stacks/" -> points to stack trace - * "/static/" -> points to common static files (src/hbase-webapps/static) - * "/" -> the jsp server code from (src/hbase-webapps/<name>) + * Create a Jetty embedded server to answer http requests. The primary goal is to serve up status + * information for the server. There are three contexts: "/stacks/" -> points to stack trace + * "/static/" -> points to common static files (src/hbase-webapps/static) "/" -> the jsp + * server code from (src/hbase-webapps/<name>) */ @InterfaceAudience.Private public class InfoServer { @@ -44,48 +43,47 @@ public class InfoServer { private final org.apache.hadoop.hbase.http.HttpServer httpServer; /** - * Create a status server on the given port. - * The jsp scripts are taken from src/hbase-webapps/name. + * Create a status server on the given port. The jsp scripts are taken from + * src/hbase-webapps/name. * @param name The name of the server * @param bindAddress address to bind to * @param port The port to use on the server * @param findPort whether the server should start at the given port and increment by 1 until it - * finds a free port. + * finds a free port. * @param c the {@link Configuration} to build the server * @throws IOException if getting one of the password fails or the server cannot be created */ public InfoServer(String name, String bindAddress, int port, boolean findPort, final Configuration c) throws IOException { HttpConfig httpConfig = new HttpConfig(c); - HttpServer.Builder builder = - new org.apache.hadoop.hbase.http.HttpServer.Builder(); + HttpServer.Builder builder = new org.apache.hadoop.hbase.http.HttpServer.Builder(); - builder.setName(name).addEndpoint(URI.create(httpConfig.getSchemePrefix() + - HostAndPort.fromParts(bindAddress,port).toString())). - setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c); + builder.setName(name) + .addEndpoint(URI.create( + httpConfig.getSchemePrefix() + HostAndPort.fromParts(bindAddress, port).toString())) + .setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c); String logDir = System.getProperty("hbase.log.dir"); if (logDir != null) { builder.setLogDir(logDir); } if (httpConfig.isSecure()) { - builder.keyPassword(HBaseConfiguration - .getPassword(c, "ssl.server.keystore.keypassword", null)) - .keyStore(c.get("ssl.server.keystore.location"), - HBaseConfiguration.getPassword(c,"ssl.server.keystore.password", null), - c.get("ssl.server.keystore.type", "jks")) - .trustStore(c.get("ssl.server.truststore.location"), - HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null), - c.get("ssl.server.truststore.type", "jks")); + builder + .keyPassword(HBaseConfiguration.getPassword(c, "ssl.server.keystore.keypassword", null)) + .keyStore(c.get("ssl.server.keystore.location"), + HBaseConfiguration.getPassword(c, "ssl.server.keystore.password", null), + c.get("ssl.server.keystore.type", "jks")) + .trustStore(c.get("ssl.server.truststore.location"), + HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null), + c.get("ssl.server.truststore.type", "jks")); builder.excludeCiphers(c.get("ssl.server.exclude.cipher.list")); } // Enable SPNEGO authentication if ("kerberos".equalsIgnoreCase(c.get(HttpServer.HTTP_UI_AUTHENTICATION, null))) { builder.setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) - .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) - .setKerberosNameRulesKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY) - .setSignatureSecretFileKey( - HttpServer.HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY) - .setSecurityEnabled(true); + .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) + .setKerberosNameRulesKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY) + .setSignatureSecretFileKey(HttpServer.HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY) + .setSecurityEnabled(true); // Set an admin ACL on sensitive webUI endpoints AccessControlList acl = buildAdminAcl(c); @@ -95,13 +93,13 @@ public InfoServer(String name, String bindAddress, int port, boolean findPort, } /** - * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI - * which are meant only for administrators. + * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI which + * are meant only for administrators. */ AccessControlList buildAdminAcl(Configuration conf) { final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null); - final String adminGroups = conf.get( - HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); + final String adminGroups = + conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); if (userGroups == null && adminGroups == null) { // Backwards compatibility - if the user doesn't have anything set, allow all users in. return new AccessControlList("*", null); @@ -111,17 +109,14 @@ AccessControlList buildAdminAcl(Configuration conf) { /** * Explicitly invoke {@link #addPrivilegedServlet(String, String, Class)} or - * {@link #addUnprivilegedServlet(String, String, Class)} instead of this method. - * This method will add a servlet which any authenticated user can access. - * + * {@link #addUnprivilegedServlet(String, String, Class)} instead of this method. This method will + * add a servlet which any authenticated user can access. * @deprecated Use {@link #addUnprivilegedServlet(String, String, Class)} or - * {@link #addPrivilegedServlet(String, String, Class)} instead of this - * method which does not state outwardly what kind of authz rules will - * be applied to this servlet. + * {@link #addPrivilegedServlet(String, String, Class)} instead of this method which + * does not state outwardly what kind of authz rules will be applied to this servlet. */ @Deprecated - public void addServlet(String name, String pathSpec, - Class clazz) { + public void addServlet(String name, String pathSpec, Class clazz) { addUnprivilegedServlet(name, pathSpec, clazz); } @@ -130,7 +125,7 @@ public void addServlet(String name, String pathSpec, * @see HttpServer#addUnprivilegedServlet(String, String, Class) */ public void addUnprivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { this.httpServer.addUnprivilegedServlet(name, pathSpec, clazz); } @@ -150,7 +145,7 @@ public void addUnprivilegedServlet(String name, String pathSpec, ServletHolder h * @see HttpServer#addPrivilegedServlet(String, String, Class) */ public void addPrivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { this.httpServer.addPrivilegedServlet(name, pathSpec, clazz); } @@ -175,21 +170,20 @@ public void stop() throws Exception { this.httpServer.stop(); } - /** * Returns true if and only if UI authentication (spnego) is enabled, UI authorization is enabled, * and the requesting user is defined as an administrator. If the UI is set to readonly, this * method always returns false. */ - public static boolean canUserModifyUI( - HttpServletRequest req, ServletContext ctx, Configuration conf) { + public static boolean canUserModifyUI(HttpServletRequest req, ServletContext ctx, + Configuration conf) { if (conf.getBoolean("hbase.master.ui.readonly", false)) { return false; } String remoteUser = req.getRemoteUser(); - if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && - conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && - remoteUser != null) { + if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) + && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) + && remoteUser != null) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); } return false; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java index cd49f7e16baf..0a9beb2ec241 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import java.io.IOException; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -26,7 +25,6 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; @@ -38,9 +36,8 @@ public void init(FilterConfig filterConfig) throws ServletException { } @Override - public void doFilter(ServletRequest req, ServletResponse res, - FilterChain chain) - throws IOException, ServletException { + public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) + throws IOException, ServletException { HttpServletResponse httpRes = (HttpServletResponse) res; httpRes.setHeader("Cache-Control", "no-cache"); long now = EnvironmentEdgeManager.currentTime(); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java index d77ea9b14cec..3e84aeed0390 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java @@ -42,14 +42,14 @@ public class ProfileOutputServlet extends DefaultServlet { @Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) - throws ServletException, IOException { + throws ServletException, IOException { String absoluteDiskPath = getServletContext().getRealPath(req.getPathInfo()); File requestedFile = new File(absoluteDiskPath); // async-profiler version 1.4 writes 'Started [cpu] profiling' to output file when profiler is // running which gets replaced by final output. If final output is not ready yet, the file size // will be <100 bytes (in all modes). if (requestedFile.length() < 100) { - LOG.info(requestedFile + " is incomplete. Sending auto-refresh header."); + LOG.info(requestedFile + " is incomplete. Sending auto-refresh header."); String refreshUrl = req.getRequestURI(); // Rebuild the query string (if we have one) if (req.getQueryString() != null) { @@ -57,8 +57,8 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res } ProfileServlet.setResponseHeader(resp); resp.setHeader("Refresh", REFRESH_PERIOD + ";" + refreshUrl); - resp.getWriter().write("This page will be auto-refreshed every " + REFRESH_PERIOD + - " seconds until the output file is ready. Redirecting to " + refreshUrl); + resp.getWriter().write("This page will be auto-refreshed every " + REFRESH_PERIOD + + " seconds until the output file is ready. Redirecting to " + refreshUrl); } else { super.doGet(req, resp); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java index 0fbe31ae4c99..da1cd3eecc4b 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java @@ -25,63 +25,35 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; - import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.util.ProcessUtils; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; /** - * Servlet that runs async-profiler as web-endpoint. - * Following options from async-profiler can be specified as query paramater. - * // -e event profiling event: cpu|alloc|lock|cache-misses etc. - * // -d duration run profiling for 'duration' seconds (integer) - * // -i interval sampling interval in nanoseconds (long) - * // -j jstackdepth maximum Java stack depth (integer) - * // -b bufsize frame buffer size (long) - * // -t profile different threads separately - * // -s simple class names instead of FQN - * // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr|html - * // --width px SVG width pixels (integer) - * // --height px SVG frame height pixels (integer) - * // --minwidth px skip frames smaller than px (double) - * // --reverse generate stack-reversed FlameGraph / Call tree - * Example: - * - To collect 30 second CPU profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof" - * - To collect 1 minute CPU profile of current process and output in tree format (html) - * curl "http://localhost:10002/prof?output=tree&duration=60" - * - To collect 30 second heap allocation profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof?event=alloc" - * - To collect lock contention profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof?event=lock" - * Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all events) - * // Perf events: - * // cpu - * // page-faults - * // context-switches - * // cycles - * // instructions - * // cache-references - * // cache-misses - * // branches - * // branch-misses - * // bus-cycles - * // L1-dcache-load-misses - * // LLC-load-misses - * // dTLB-load-misses - * // mem:breakpoint - * // trace:tracepoint - * // Java events: - * // alloc - * // lock + * Servlet that runs async-profiler as web-endpoint. Following options from async-profiler can be + * specified as query paramater. // -e event profiling event: cpu|alloc|lock|cache-misses etc. // -d + * duration run profiling for 'duration' seconds (integer) // -i interval sampling interval in + * nanoseconds (long) // -j jstackdepth maximum Java stack depth (integer) // -b bufsize frame + * buffer size (long) // -t profile different threads separately // -s simple class names instead of + * FQN // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr|html // --width + * px SVG width pixels (integer) // --height px SVG frame height pixels (integer) // --minwidth px + * skip frames smaller than px (double) // --reverse generate stack-reversed FlameGraph / Call tree + * Example: - To collect 30 second CPU profile of current process (returns FlameGraph svg) curl + * "http://localhost:10002/prof" - To collect 1 minute CPU profile of current process and output in + * tree format (html) curl "http://localhost:10002/prof?output=tree&duration=60" - To collect 30 + * second heap allocation profile of current process (returns FlameGraph svg) curl + * "http://localhost:10002/prof?event=alloc" - To collect lock contention profile of current process + * (returns FlameGraph svg) curl "http://localhost:10002/prof?event=lock" Following event types are + * supported (default is 'cpu') (NOTE: not all OS'es support all events) // Perf events: // cpu // + * page-faults // context-switches // cycles // instructions // cache-references // cache-misses // + * branches // branch-misses // bus-cycles // L1-dcache-load-misses // LLC-load-misses // + * dTLB-load-misses // mem:breakpoint // trace:tracepoint // Java events: // alloc // lock */ @InterfaceAudience.Private public class ProfileServlet extends HttpServlet { @@ -101,22 +73,12 @@ public class ProfileServlet extends HttpServlet { static final String OUTPUT_DIR = System.getProperty("java.io.tmpdir") + "/prof-output-hbase"; enum Event { - CPU("cpu"), - ALLOC("alloc"), - LOCK("lock"), - PAGE_FAULTS("page-faults"), - CONTEXT_SWITCHES("context-switches"), - CYCLES("cycles"), - INSTRUCTIONS("instructions"), - CACHE_REFERENCES("cache-references"), - CACHE_MISSES("cache-misses"), - BRANCHES("branches"), - BRANCH_MISSES("branch-misses"), - BUS_CYCLES("bus-cycles"), - L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"), - LLC_LOAD_MISSES("LLC-load-misses"), - DTLB_LOAD_MISSES("dTLB-load-misses"), - MEM_BREAKPOINT("mem:breakpoint"), + CPU("cpu"), ALLOC("alloc"), LOCK("lock"), PAGE_FAULTS("page-faults"), + CONTEXT_SWITCHES("context-switches"), CYCLES("cycles"), INSTRUCTIONS("instructions"), + CACHE_REFERENCES("cache-references"), CACHE_MISSES("cache-misses"), BRANCHES("branches"), + BRANCH_MISSES("branch-misses"), BUS_CYCLES("bus-cycles"), + L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"), LLC_LOAD_MISSES("LLC-load-misses"), + DTLB_LOAD_MISSES("dTLB-load-misses"), MEM_BREAKPOINT("mem:breakpoint"), TRACE_TRACEPOINT("trace:tracepoint"),; private final String internalName; @@ -141,20 +103,15 @@ public static Event fromInternalName(final String name) { } enum Output { - SUMMARY, - TRACES, - FLAT, - COLLAPSED, + SUMMARY, TRACES, FLAT, COLLAPSED, // No SVG in 2.x asyncprofiler. - SVG, - TREE, - JFR, + SVG, TREE, JFR, // In 2.x asyncprofiler, this is how you get flamegraphs. HTML } @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SE_TRANSIENT_FIELD_NOT_RESTORED", - justification = "This class is never serialized nor restored.") + justification = "This class is never serialized nor restored.") private transient Lock profilerLock = new ReentrantLock(); private transient volatile Process process; private String asyncProfilerHome; @@ -180,10 +137,10 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) { resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); setResponseHeader(resp); - resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n" + - "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" + - "environment is properly configured. For more information please see\n" + - "http://hbase.apache.org/book.html#profiler\n"); + resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n" + + "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" + + "http://hbase.apache.org/book.html#profiler\n"); return; } @@ -217,9 +174,9 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res int lockTimeoutSecs = 3; if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) { try { - File outputFile = new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" + - event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet() + "." + - output.name().toLowerCase()); + File outputFile = + new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" + event.name().toLowerCase() + + "-" + ID_GEN.incrementAndGet() + "." + output.name().toLowerCase()); List cmd = new ArrayList<>(); cmd.add(asyncProfilerHome + PROFILER_SCRIPT); cmd.add("-e"); @@ -270,14 +227,13 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res setResponseHeader(resp); resp.setStatus(HttpServletResponse.SC_ACCEPTED); String relativeUrl = "/prof-output-hbase/" + outputFile.getName(); - resp.getWriter().write( - "Started [" + event.getInternalName() + - "] profiling. This page will automatically redirect to " + - relativeUrl + " after " + duration + " seconds. " + - "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async " + - "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler." + - "\n\nCommand:\n" + - Joiner.on(" ").join(cmd)); + resp.getWriter() + .write("Started [" + event.getInternalName() + + "] profiling. This page will automatically redirect to " + relativeUrl + + " after " + duration + " seconds. " + + "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async " + + "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler." + + "\n\nCommand:\n" + Joiner.on(" ").join(cmd)); // to avoid auto-refresh by ProfileOutputServlet, refreshDelay can be specified // via url param @@ -293,10 +249,10 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res } else { setResponseHeader(resp); resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - resp.getWriter().write( - "Unable to acquire lock. Another instance of profiler might be running."); - LOG.warn("Unable to acquire lock in " + lockTimeoutSecs + - " seconds. Another instance of profiler might be running."); + resp.getWriter() + .write("Unable to acquire lock. Another instance of profiler might be running."); + LOG.warn("Unable to acquire lock in " + lockTimeoutSecs + + " seconds. Another instance of profiler might be running."); } } catch (InterruptedException e) { LOG.warn("Interrupted while acquiring profile lock.", e); @@ -392,10 +348,10 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res throws IOException { resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); setResponseHeader(resp); - resp.getWriter().write("The profiler servlet was disabled at startup.\n\n" + - "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" + - "environment is properly configured. For more information please see\n" + - "http://hbase.apache.org/book.html#profiler\n"); + resp.getWriter().write("The profiler servlet was disabled at startup.\n\n" + + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" + + "http://hbase.apache.org/book.html#profiler\n"); return; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java index 182a4e10996d..a8a561c97dea 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java @@ -17,18 +17,6 @@ */ package org.apache.hadoop.hbase.http; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import org.apache.hadoop.util.HttpExceptionUtils; -import org.apache.hadoop.util.StringUtils; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.IOException; import java.security.Principal; import java.util.ArrayList; @@ -43,30 +31,32 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.util.HttpExceptionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This file has been copied directly (changing only the package name and and the ASF license - * text format, and adding the Yetus annotations) from Hadoop, as the Hadoop version that HBase - * depends on doesn't have it yet - * (as of 2020 Apr 24, there is no Hadoop release that has it either). - * - * Hadoop version: - * unreleased, master branch commit 4ea6c2f457496461afc63f38ef4cef3ab0efce49 - * - * Haddop path: + * This file has been copied directly (changing only the package name and and the ASF license text + * format, and adding the Yetus annotations) from Hadoop, as the Hadoop version that HBase depends + * on doesn't have it yet (as of 2020 Apr 24, there is no Hadoop release that has it either). Hadoop + * version: unreleased, master branch commit 4ea6c2f457496461afc63f38ef4cef3ab0efce49 Haddop path: * hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/ - * server/ProxyUserAuthenticationFilter.java - * - * AuthenticationFilter which adds support to perform operations - * using end user instead of proxy user. Fetches the end user from - * doAs Query Parameter. + * server/ProxyUserAuthenticationFilter.java AuthenticationFilter which adds support to perform + * operations using end user instead of proxy user. Fetches the end user from doAs Query Parameter. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class ProxyUserAuthenticationFilter extends AuthenticationFilter { - private static final Logger LOG = LoggerFactory.getLogger( - ProxyUserAuthenticationFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(ProxyUserAuthenticationFilter.class); private static final String DO_AS = "doas"; public static final String PROXYUSER_PREFIX = "proxyuser"; @@ -85,14 +75,13 @@ protected void doFilter(FilterChain filterChain, HttpServletRequest request, String doAsUser = lowerCaseRequest.getParameter(DO_AS); if (doAsUser != null && !doAsUser.equals(request.getRemoteUser())) { - LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ", - doAsUser, request.getRemoteUser(), request.getRemoteAddr()); - UserGroupInformation requestUgi = (request.getUserPrincipal() != null) ? - UserGroupInformation.createRemoteUser(request.getRemoteUser()) + LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ", doAsUser, + request.getRemoteUser(), request.getRemoteAddr()); + UserGroupInformation requestUgi = (request.getUserPrincipal() != null) + ? UserGroupInformation.createRemoteUser(request.getRemoteUser()) : null; if (requestUgi != null) { - requestUgi = UserGroupInformation.createProxyUser(doAsUser, - requestUgi); + requestUgi = UserGroupInformation.createProxyUser(doAsUser, requestUgi); try { ProxyUsers.authorize(requestUgi, request.getRemoteAddr()); @@ -116,7 +105,7 @@ public String getName() { LOG.debug("Proxy user Authentication successful"); } catch (AuthorizationException ex) { HttpExceptionUtils.createServletExceptionResponse(response, - HttpServletResponse.SC_FORBIDDEN, ex); + HttpServletResponse.SC_FORBIDDEN, ex); LOG.warn("Proxy user Authentication exception", ex); return; } @@ -140,8 +129,8 @@ protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) } static boolean containsUpperCase(final Iterable strings) { - for(String s : strings) { - for(int i = 0; i < s.length(); i++) { + for (String s : strings) { + for (int i = 0; i < s.length(); i++) { if (Character.isUpperCase(s.charAt(i))) { return true; } @@ -151,17 +140,16 @@ static boolean containsUpperCase(final Iterable strings) { } /** - * The purpose of this function is to get the doAs parameter of a http request - * case insensitively + * The purpose of this function is to get the doAs parameter of a http request case insensitively * @param request * @return doAs parameter if exists or null otherwise */ - public static String getDoasFromHeader(final HttpServletRequest request) { + public static String getDoasFromHeader(final HttpServletRequest request) { String doas = null; final Enumeration headers = request.getHeaderNames(); - while (headers.hasMoreElements()){ + while (headers.hasMoreElements()) { String header = headers.nextElement(); - if (header.toLowerCase().equals("doas")){ + if (header.toLowerCase().equals("doas")) { doas = request.getHeader(header); break; } @@ -169,11 +157,9 @@ public static String getDoasFromHeader(final HttpServletRequest request) { return doas; } - public static HttpServletRequest toLowerCase( - final HttpServletRequest request) { + public static HttpServletRequest toLowerCase(final HttpServletRequest request) { @SuppressWarnings("unchecked") - final Map original = (Map) - request.getParameterMap(); + final Map original = (Map) request.getParameterMap(); if (!containsUpperCase(original.keySet())) { return request; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java index f00f2a195af0..77fca8421710 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,16 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http; import java.io.IOException; @@ -37,10 +36,10 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class SecurityHeadersFilter implements Filter { - private static final Logger LOG = - LoggerFactory.getLogger(SecurityHeadersFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(SecurityHeadersFilter.class); private static final String DEFAULT_HSTS = "max-age=63072000;includeSubDomains;preload"; - private static final String DEFAULT_CSP = "default-src https: data: 'unsafe-inline' 'unsafe-eval'"; + private static final String DEFAULT_CSP = + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"; private FilterConfig filterConfig; @Override diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java index 8f338a7af68a..9c99b0ab8dc7 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,7 @@ import org.apache.yetus.audience.InterfaceStability; /** - * This interface contains constants for configuration keys used - * in the hbase http server code. + * This interface contains constants for configuration keys used in the hbase http server code. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -34,11 +33,9 @@ public interface ServerConfigurationKeys { public static final boolean HBASE_SSL_ENABLED_DEFAULT = false; /** Enable/Disable aliases serving from jetty */ - public static final String HBASE_JETTY_LOGS_SERVE_ALIASES = - "hbase.jetty.logs.serve.aliases"; + public static final String HBASE_JETTY_LOGS_SERVE_ALIASES = "hbase.jetty.logs.serve.aliases"; - public static final boolean DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES = - true; + public static final boolean DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES = true; public static final String HBASE_HTTP_STATIC_USER = "hbase.http.staticuser.user"; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java index 05ca9a3abd19..bb545e99f0ba 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,22 +19,19 @@ import java.io.IOException; import java.io.Writer; - import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.http.HttpServer; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** * A servlet to print out the running configuration data. */ -@InterfaceAudience.LimitedPrivate({"HBase"}) +@InterfaceAudience.LimitedPrivate({ "HBase" }) @InterfaceStability.Unstable public class ConfServlet extends HttpServlet { private static final long serialVersionUID = 1L; @@ -44,12 +41,12 @@ public class ConfServlet extends HttpServlet { private static final String FORMAT_PARAM = "format"; /** - * Return the Configuration of the daemon hosting this servlet. - * This is populated when the HttpServer starts. + * Return the Configuration of the daemon hosting this servlet. This is populated when the + * HttpServer starts. */ private Configuration getConfFromContext() { - Configuration conf = (Configuration)getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + Configuration conf = + (Configuration) getServletContext().getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); assert conf != null; return conf; } @@ -57,8 +54,7 @@ private Configuration getConfFromContext() { @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), - request, response)) { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) { return; } @@ -86,7 +82,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) * Guts of the servlet - extracted for easy testing. */ static void writeResponse(Configuration conf, Writer out, String format) - throws IOException, BadFormatException { + throws IOException, BadFormatException { if (FORMAT_JSON.equals(format)) { Configuration.dumpConfiguration(conf, out); } else if (FORMAT_XML.equals(format)) { diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java index fdcd34783c04..f501e1648599 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java @@ -20,6 +20,7 @@ import java.lang.reflect.Type; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.gson.JsonElement; import org.apache.hbase.thirdparty.com.google.gson.JsonPrimitive; import org.apache.hbase.thirdparty.com.google.gson.JsonSerializationContext; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java index c75113ded730..3379c6e25562 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java @@ -32,6 +32,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.javax.ws.rs.Produces; import org.apache.hbase.thirdparty.javax.ws.rs.WebApplicationException; @@ -56,20 +57,14 @@ public GsonMessageBodyWriter(Gson gson) { @Override public boolean isWriteable(Class type, Type genericType, Annotation[] annotations, - MediaType mediaType) { + MediaType mediaType) { return mediaType == null || MediaType.APPLICATION_JSON_TYPE.isCompatible(mediaType); } @Override - public void writeTo( - T t, - Class type, - Type genericType, - Annotation[] annotations, - MediaType mediaType, - MultivaluedMap httpHeaders, - OutputStream entityStream - ) throws IOException, WebApplicationException { + public void writeTo(T t, Class type, Type genericType, Annotation[] annotations, + MediaType mediaType, MultivaluedMap httpHeaders, OutputStream entityStream) + throws IOException, WebApplicationException { final Charset outputCharset = requestedCharset(mediaType); try (Writer writer = new OutputStreamWriter(entityStream, outputCharset)) { gson.toJson(t, writer); @@ -77,23 +72,20 @@ public void writeTo( } private static Charset requestedCharset(MediaType mediaType) { - return Optional.ofNullable(mediaType) - .map(MediaType::getParameters) - .map(params -> params.get("charset")) - .map(c -> { - try { - return Charset.forName(c); - } catch (IllegalCharsetNameException e) { - logger.debug("Client requested illegal Charset '{}'", c); - return null; - } catch (UnsupportedCharsetException e) { - logger.debug("Client requested unsupported Charset '{}'", c); - return null; - } catch (Exception e) { - logger.debug("Error while resolving Charset '{}'", c, e); - return null; - } - }) - .orElse(StandardCharsets.UTF_8); + return Optional.ofNullable(mediaType).map(MediaType::getParameters) + .map(params -> params.get("charset")).map(c -> { + try { + return Charset.forName(c); + } catch (IllegalCharsetNameException e) { + logger.debug("Client requested illegal Charset '{}'", c); + return null; + } catch (UnsupportedCharsetException e) { + logger.debug("Client requested unsupported Charset '{}'", c); + return null; + } catch (Exception e) { + logger.debug("Error while resolving Charset '{}'", c, e); + return null; + } + }).orElse(StandardCharsets.UTF_8); } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java index dc3f8a7bf430..d59ec3846da7 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java @@ -19,6 +19,7 @@ import java.io.IOException; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.javax.ws.rs.container.ContainerRequestContext; import org.apache.hbase.thirdparty.javax.ws.rs.container.ContainerResponseContext; @@ -34,10 +35,8 @@ public class ResponseEntityMapper implements ContainerResponseFilter { @Override - public void filter( - ContainerRequestContext requestContext, - ContainerResponseContext responseContext - ) throws IOException { + public void filter(ContainerRequestContext requestContext, + ContainerResponseContext responseContext) throws IOException { /* * Follows very loosely the top-level document specification described in by JSON API. Only * handles 200 response codes; leaves room for errors and other response types. diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java index 57a7e930905b..0c7b869fece5 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java @@ -19,6 +19,7 @@ import java.util.function.Supplier; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.org.glassfish.hk2.api.Factory; /** @@ -34,9 +35,12 @@ public SupplierFactoryAdapter(Supplier supplier) { this.supplier = supplier; } - @Override public T provide() { + @Override + public T provide() { return supplier.get(); } - @Override public void dispose(T instance) { } + @Override + public void dispose(T instance) { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java index a61e61684dac..8df988f6e378 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http.jmx; import java.io.IOException; @@ -36,33 +36,27 @@ import org.slf4j.LoggerFactory; /* - * This servlet is based off of the JMXProxyServlet from Tomcat 7.0.14. It has - * been rewritten to be read only and to output in a JSON format so it is not - * really that close to the original. + * This servlet is based off of the JMXProxyServlet from Tomcat 7.0.14. It has been rewritten to be + * read only and to output in a JSON format so it is not really that close to the original. */ /** * Provides Read only web access to JMX. *

    - * This servlet generally will be placed under the /jmx URL for each - * HttpServer. It provides read only - * access to JMX metrics. The optional qry parameter - * may be used to query only a subset of the JMX Beans. This query - * functionality is provided through the - * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)} - * method. + * This servlet generally will be placed under the /jmx URL for each HttpServer. It provides read + * only access to JMX metrics. The optional qry parameter may be used to query only a + * subset of the JMX Beans. This query functionality is provided through the + * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)} method. *

    *

    - * For example http://.../jmx?qry=Hadoop:* will return - * all hadoop metrics exposed through JMX. + * For example http://.../jmx?qry=Hadoop:* will return all hadoop metrics exposed + * through JMX. *

    *

    - * The optional get parameter is used to query an specific - * attribute of a JMX bean. The format of the URL is - * http://.../jmx?get=MXBeanName::AttributeName + * The optional get parameter is used to query an specific attribute of a JMX bean. The + * format of the URL is http://.../jmx?get=MXBeanName::AttributeName *

    *

    - * For example - * + * For example * http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId * will return the cluster id of the namenode mxbean. *

    @@ -72,8 +66,7 @@ * http://.../jmx?get=MXBeanName::*[RegExp1],*[RegExp2] *

    *

    - * For example - * + * For example *

    * http://../jmx?get=Hadoop:service=HBase,name=RegionServer,sub=Tables::[a-zA-z_0-9]*memStoreSize *

    @@ -82,17 +75,19 @@ *

    *
    *

    - * If the qry or the get parameter is not formatted - * correctly then a 400 BAD REQUEST http response code will be returned. + * If the qry or the get parameter is not formatted correctly then a 400 + * BAD REQUEST http response code will be returned. *

    *

    - * If a resouce such as a mbean or attribute can not be found, - * a 404 SC_NOT_FOUND http response code will be returned. + * If a resouce such as a mbean or attribute can not be found, a 404 SC_NOT_FOUND http response code + * will be returned. *

    *

    * The return format is JSON and in the form *

    - *
    
    + * 
    + * 
    + * 
      *  {
      *    "beans" : [
      *      {
    @@ -101,28 +96,18 @@
      *      }
      *    ]
      *  }
    - *  
    - *

    - * The servlet attempts to convert the the JMXBeans into JSON. Each - * bean's attributes will be converted to a JSON object member. - * - * If the attribute is a boolean, a number, a string, or an array - * it will be converted to the JSON equivalent. - * - * If the value is a {@link CompositeData} then it will be converted - * to a JSON object with the keys as the name of the JSON member and - * the value is converted following these same rules. - * - * If the value is a {@link TabularData} then it will be converted - * to an array of the {@link CompositeData} elements that it contains. - * - * All other objects will be converted to a string and output as such. - * - * The bean's name and modelerType will be returned for all beans. - * - * Optional paramater "callback" should be used to deliver JSONP response. + * + *

    + *

    + * The servlet attempts to convert the the JMXBeans into JSON. Each bean's attributes will be + * converted to a JSON object member. If the attribute is a boolean, a number, a string, or an array + * it will be converted to the JSON equivalent. If the value is a {@link CompositeData} then it will + * be converted to a JSON object with the keys as the name of the JSON member and the value is + * converted following these same rules. If the value is a {@link TabularData} then it will be + * converted to an array of the {@link CompositeData} elements that it contains. All other objects + * will be converted to a string and output as such. The bean's name and modelerType will be + * returned for all beans. Optional paramater "callback" should be used to deliver JSONP response. *

    - * */ @InterfaceAudience.Private public class JMXJsonServlet extends HttpServlet { @@ -157,11 +142,8 @@ public void init() throws ServletException { /** * Process a GET request for the specified resource. - * - * @param request - * The servlet request we are processing - * @param response - * The servlet response we are creating + * @param request The servlet request we are processing + * @param response The servlet response we are creating */ @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { @@ -199,8 +181,8 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro response.setStatus(HttpServletResponse.SC_BAD_REQUEST); return; } - if (beanWriter.write(this.mBeanServer, new ObjectName(splitStrings[0]), - splitStrings[1], description) != 0) { + if (beanWriter.write(this.mBeanServer, new ObjectName(splitStrings[0]), splitStrings[1], + description) != 0) { beanWriter.flush(); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); } @@ -215,8 +197,8 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro String excl = request.getParameter("excl"); ObjectName excluded = excl == null ? null : new ObjectName(excl); - if (beanWriter.write(this.mBeanServer, new ObjectName(qry), - null, description, excluded) != 0) { + if (beanWriter.write(this.mBeanServer, new ObjectName(qry), null, description, + excluded) != 0) { beanWriter.flush(); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); } @@ -241,10 +223,9 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro } /** - * Verifies that the callback property, if provided, is purely alphanumeric. - * This prevents a malicious callback name (that is javascript code) from being - * returned by the UI to an unsuspecting user. - * + * Verifies that the callback property, if provided, is purely alphanumeric. This prevents a + * malicious callback name (that is javascript code) from being returned by the UI to an + * unsuspecting user. * @param callbackName The callback name, can be null. * @return The callback name * @throws IOException If the name is disallowed. diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java index 21667d779a34..cfda42dda9e8 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java @@ -1,18 +1,12 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java index 72cedddd686b..4d40c9fb3d26 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.io.IOException; import java.security.Principal; import java.util.HashMap; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -32,7 +31,6 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.http.FilterContainer; @@ -42,8 +40,8 @@ import org.slf4j.LoggerFactory; /** - * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who) - * so that the web UI is usable for a secure cluster without authentication. + * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who) so that the web UI + * is usable for a secure cluster without authentication. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class StaticUserWebFilter extends FilterInitializer { @@ -53,17 +51,21 @@ public class StaticUserWebFilter extends FilterInitializer { static class User implements Principal { private final String name; + public User(String name) { this.name = name; } + @Override public String getName() { return name; } + @Override public int hashCode() { return name.hashCode(); } + @Override public boolean equals(Object other) { if (other == this) { @@ -73,6 +75,7 @@ public boolean equals(Object other) { } return ((User) other).name.equals(name); } + @Override public String toString() { return name; @@ -90,20 +93,19 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain - ) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { HttpServletRequest httpRequest = (HttpServletRequest) request; // if the user is already authenticated, don't override it if (httpRequest.getRemoteUser() != null) { chain.doFilter(request, response); } else { - HttpServletRequestWrapper wrapper = - new HttpServletRequestWrapper(httpRequest) { + HttpServletRequestWrapper wrapper = new HttpServletRequestWrapper(httpRequest) { @Override public Principal getUserPrincipal() { return user; } + @Override public String getRemoteUser() { return username; @@ -128,9 +130,7 @@ public void initFilter(FilterContainer container, Configuration conf) { String username = getUsernameFromConf(conf); options.put(HBASE_HTTP_STATIC_USER, username); - container.addFilter("static_user_filter", - StaticUserFilter.class.getName(), - options); + container.addFilter("static_user_filter", StaticUserFilter.class.getName(), options); } /** @@ -141,13 +141,12 @@ static String getUsernameFromConf(Configuration conf) { if (oldStyleUgi != null) { // We can't use the normal configuration deprecation mechanism here // since we need to split out the username from the configured UGI. - LOG.warn(DEPRECATED_UGI_KEY + " should not be used. Instead, use " + - HBASE_HTTP_STATIC_USER + "."); + LOG.warn( + DEPRECATED_UGI_KEY + " should not be used. Instead, use " + HBASE_HTTP_STATIC_USER + "."); String[] parts = oldStyleUgi.split(","); return parts[0]; } else { - return conf.get(HBASE_HTTP_STATIC_USER, - DEFAULT_HBASE_HTTP_STATIC_USER); + return conf.get(HBASE_HTTP_STATIC_USER, DEFAULT_HBASE_HTTP_STATIC_USER); } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java index 7bb9a0faa7d5..734534c33f94 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java @@ -1,36 +1,29 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** *

    - * This package provides user-selectable (via configuration) classes that add - * functionality to the web UI. They are configured as a list of classes in the - * configuration parameter hadoop.http.filter.initializers. + * This package provides user-selectable (via configuration) classes that add functionality to the + * web UI. They are configured as a list of classes in the configuration parameter + * hadoop.http.filter.initializers. *

    *
      - *
    • StaticUserWebFilter - An authorization plugin that makes all - * users a static configured user. + *
    • StaticUserWebFilter - An authorization plugin that makes all users a static configured + * user. *
    *

    * Copied from hadoop source code.
    * See https://issues.apache.org/jira/browse/HADOOP-10232 to know why *

    */ -@InterfaceAudience.LimitedPrivate({"HBase"}) +@InterfaceAudience.LimitedPrivate({ "HBase" }) @InterfaceStability.Unstable package org.apache.hadoop.hbase.http.lib; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java index 611316d9ec67..ddcccc858689 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,9 +74,7 @@ public static void main(String[] args) throws Exception { * Valid command line options. */ private enum Operations { - GETLEVEL, - SETLEVEL, - UNKNOWN + GETLEVEL, SETLEVEL, UNKNOWN } private static void printUsage() { @@ -85,8 +83,7 @@ private static void printUsage() { } public static boolean isValidProtocol(String protocol) { - return ((protocol.equals(PROTOCOL_HTTP) || - protocol.equals(PROTOCOL_HTTPS))); + return ((protocol.equals(PROTOCOL_HTTP) || protocol.equals(PROTOCOL_HTTPS))); } static class CLI extends Configured implements Tool { @@ -116,8 +113,7 @@ public int run(String[] args) throws Exception { * @throws HadoopIllegalArgumentException if arguments are invalid. * @throws Exception if unable to connect */ - private void sendLogLevelRequest() - throws HadoopIllegalArgumentException, Exception { + private void sendLogLevelRequest() throws HadoopIllegalArgumentException, Exception { switch (operation) { case GETLEVEL: doGetLevel(); @@ -126,13 +122,11 @@ private void sendLogLevelRequest() doSetLevel(); break; default: - throw new HadoopIllegalArgumentException( - "Expect either -getlevel or -setlevel"); + throw new HadoopIllegalArgumentException("Expect either -getlevel or -setlevel"); } } - public void parseArguments(String[] args) throws - HadoopIllegalArgumentException { + public void parseArguments(String[] args) throws HadoopIllegalArgumentException { if (args.length == 0) { throw new HadoopIllegalArgumentException("No arguments specified"); } @@ -149,15 +143,13 @@ public void parseArguments(String[] args) throws nextArgIndex = parseProtocolArgs(args, nextArgIndex); break; default: - throw new HadoopIllegalArgumentException( - "Unexpected argument " + args[nextArgIndex]); + throw new HadoopIllegalArgumentException("Unexpected argument " + args[nextArgIndex]); } } // if operation is never specified in the arguments if (operation == Operations.UNKNOWN) { - throw new HadoopIllegalArgumentException( - "Must specify either -getlevel or -setlevel"); + throw new HadoopIllegalArgumentException("Must specify either -getlevel or -setlevel"); } // if protocol is unspecified, set it as http. @@ -166,8 +158,7 @@ public void parseArguments(String[] args) throws } } - private int parseGetLevelArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseGetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException { // fail if multiple operations are specified in the arguments if (operation != Operations.UNKNOWN) { throw new HadoopIllegalArgumentException("Redundant -getlevel command"); @@ -182,8 +173,7 @@ private int parseGetLevelArgs(String[] args, int index) throws return index + 3; } - private int parseSetLevelArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseSetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException { // fail if multiple operations are specified in the arguments if (operation != Operations.UNKNOWN) { throw new HadoopIllegalArgumentException("Redundant -setlevel command"); @@ -199,30 +189,25 @@ private int parseSetLevelArgs(String[] args, int index) throws return index + 4; } - private int parseProtocolArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseProtocolArgs(String[] args, int index) throws HadoopIllegalArgumentException { // make sure only -protocol is specified if (protocol != null) { - throw new HadoopIllegalArgumentException( - "Redundant -protocol command"); + throw new HadoopIllegalArgumentException("Redundant -protocol command"); } // check number of arguments is sufficient if (index + 1 >= args.length) { - throw new HadoopIllegalArgumentException( - "-protocol needs one parameter"); + throw new HadoopIllegalArgumentException("-protocol needs one parameter"); } // check protocol is valid protocol = args[index + 1]; if (!isValidProtocol(protocol)) { - throw new HadoopIllegalArgumentException( - "Invalid protocol: " + protocol); + throw new HadoopIllegalArgumentException("Invalid protocol: " + protocol); } return index + 2; } /** * Send HTTP request to get log level. - * * @throws HadoopIllegalArgumentException if arguments are invalid. * @throws Exception if unable to connect */ @@ -232,20 +217,16 @@ private void doGetLevel() throws Exception { /** * Send HTTP request to set log level. - * * @throws HadoopIllegalArgumentException if arguments are invalid. * @throws Exception if unable to connect */ private void doSetLevel() throws Exception { - process(protocol + "://" + hostName + "/logLevel?log=" + className - + "&level=" + level); + process(protocol + "://" + hostName + "/logLevel?log=" + className + "&level=" + level); } /** - * Connect to the URL. Supports HTTP and supports SPNEGO - * authentication. It falls back to simple authentication if it fails to - * initiate SPNEGO. - * + * Connect to the URL. Supports HTTP and supports SPNEGO authentication. It falls back to simple + * authentication if it fails to initiate SPNEGO. * @param url the URL address of the daemon servlet * @return a connected connection * @throws Exception if it can not establish a connection. @@ -274,8 +255,7 @@ private HttpURLConnection connect(URL url) throws Exception { } /** - * Configures the client to send HTTP request to the URL. - * Supports SPENGO for authentication. + * Configures the client to send HTTP request to the URL. Supports SPENGO for authentication. * @param urlString URL and query string to the daemon's web UI * @throws Exception if unable to connect */ @@ -289,9 +269,10 @@ private void process(String urlString) throws Exception { // read from the servlet - try (InputStreamReader streamReader = - new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8); - BufferedReader bufferedReader = new BufferedReader(streamReader)) { + try ( + InputStreamReader streamReader = + new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8); + BufferedReader bufferedReader = new BufferedReader(streamReader)) { bufferedReader.lines().filter(Objects::nonNull).filter(line -> line.startsWith(MARKER)) .forEach(line -> System.out.println(TAG.matcher(line).replaceAll(""))); } catch (IOException ioe) { @@ -312,19 +293,16 @@ public static class Servlet extends HttpServlet { @Override public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { + throws ServletException, IOException { // Do the authorization - if (!HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (!HttpServer.hasAdministratorAccess(getServletContext(), request, response)) { return; } // Disallow modification of the LogLevel if explicitly set to readonly - Configuration conf = (Configuration) getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + Configuration conf = + (Configuration) getServletContext().getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); if (conf.getBoolean("hbase.master.ui.readonly", false)) { - sendError( - response, - HttpServletResponse.SC_FORBIDDEN, + sendError(response, HttpServletResponse.SC_FORBIDDEN, "Modification of HBase via the UI is disallowed in configuration."); return; } @@ -347,17 +325,13 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) if (logName != null) { out.println("

    Results:

    "); - out.println(MARKER - + "Submitted Log Name: " + logName + "
    "); + out.println(MARKER + "Submitted Log Name: " + logName + "
    "); Logger log = LoggerFactory.getLogger(logName); - out.println(MARKER - + "Log Class: " + log.getClass().getName() +"
    "); + out.println(MARKER + "Log Class: " + log.getClass().getName() + "
    "); if (level != null) { if (!isLogLevelChangeAllowed(logName, readOnlyLogLevels)) { - sendError( - response, - HttpServletResponse.SC_PRECONDITION_FAILED, + sendError(response, HttpServletResponse.SC_PRECONDITION_FAILED, "Modification of logger " + logName + " is disallowed in configuration."); return; } @@ -390,7 +364,7 @@ private boolean isLogLevelChangeAllowed(String logger, String[] readOnlyLogLevel } private void sendError(HttpServletResponse response, int code, String message) - throws IOException { + throws IOException { response.setStatus(code, message); response.sendError(code, message); } @@ -420,17 +394,18 @@ private static void process(Logger logger, String levelName, PrintWriter out) { if (levelName != null) { try { Log4jUtils.setLogLevel(logger.getName(), levelName); - out.println(MARKER + "
    " + "Setting Level to " + - levelName + " ...
    " + "
    "); + out.println(MARKER + "
    " + "Setting Level to " + + levelName + " ...
    " + "
    "); } catch (IllegalArgumentException e) { - out.println(MARKER + "
    " + "Bad level : " + levelName + - "
    " + "
    "); + out.println(MARKER + "
    " + "Bad level : " + levelName + + "
    " + "
    "); } } - out.println(MARKER + "Effective level: " + Log4jUtils.getEffectiveLevel(logger.getName()) + - "
    "); + out.println(MARKER + "Effective level: " + Log4jUtils.getEffectiveLevel(logger.getName()) + + "
    "); } } - private LogLevel() {} + private LogLevel() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/package-info.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/package-info.java index f55e24baa952..d70b57755444 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/package-info.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/package-info.java @@ -1,19 +1,12 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** *

    diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java index 560985b73e09..ee961e78673f 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,7 +27,6 @@ import java.util.Iterator; import java.util.Set; import java.util.regex.Pattern; - import javax.management.AttributeNotFoundException; import javax.management.InstanceNotFoundException; import javax.management.IntrospectionException; @@ -42,13 +42,13 @@ import javax.management.openmbean.CompositeData; import javax.management.openmbean.CompositeType; import javax.management.openmbean.TabularData; - -import org.apache.hbase.thirdparty.com.google.gson.Gson; -import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.gson.Gson; +import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; + /** * Utility for doing JSON and MBeans. */ @@ -396,7 +396,7 @@ private static void writeObject(JsonWriter writer, Object value) throws IOExcept */ public static void dumpAllBeans() throws IOException, MalformedObjectNameException { try (PrintWriter writer = - new PrintWriter(new OutputStreamWriter(System.out, StandardCharsets.UTF_8))) { + new PrintWriter(new OutputStreamWriter(System.out, StandardCharsets.UTF_8))) { JSONBean dumper = new JSONBean(); try (JSONBean.Writer jsonBeanWriter = dumper.open(writer)) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java index 6e155ae39616..3d57c8dcd3f6 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -14,7 +14,7 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ + */ package org.apache.hadoop.hbase.util; import java.beans.IntrospectionException; @@ -78,8 +78,8 @@ public static Object getValueFromMBean(ObjectName bean, String attribute) { try { value = mbServer.getAttribute(bean, attribute); } catch (Exception e) { - LOG.error("Unable to get value from MBean= " + bean.toString() + "for attribute=" + - attribute + " " + e.getMessage()); + LOG.error("Unable to get value from MBean= " + bean.toString() + "for attribute=" + attribute + + " " + e.getMessage()); } return value; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java index 92dc20d35b59..ddcd4b54d5fa 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +35,7 @@ @InterfaceAudience.Private public abstract class LogMonitoring { - public static void dumpTailOfLogs( - PrintWriter out, long tailKb) throws IOException { + public static void dumpTailOfLogs(PrintWriter out, long tailKb) throws IOException { Set logs = Log4jUtils.getActiveLogFiles(); for (File f : logs) { out.println("+++++++++++++++++++++++++++++++"); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java index 7ed09468cb67..fc1d523b0ef1 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java @@ -20,9 +20,7 @@ import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,7 +31,8 @@ public final class ProcessUtils { private static Logger LOG = LoggerFactory.getLogger(ProcessUtils.class); - private ProcessUtils() { } + private ProcessUtils() { + } public static Integer getPid() { // JVM_PID is exported by bin/hbase run script diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java index 7f1223980e3d..43be7ccb4f4c 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http; import java.io.BufferedReader; @@ -38,8 +37,8 @@ import org.slf4j.LoggerFactory; /** - * This is a base class for functional tests of the {@link HttpServer}. - * The methods are static for other classes to import statically. + * This is a base class for functional tests of the {@link HttpServer}. The methods are static for + * other classes to import statically. */ public class HttpServerFunctionalTest extends Assert { private static final Logger LOG = LoggerFactory.getLogger(HttpServerFunctionalTest.class); @@ -52,11 +51,9 @@ public class HttpServerFunctionalTest extends Assert { private static final String TEST = "test"; /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. - * + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @return the server instance - * * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ @@ -66,16 +63,14 @@ public static HttpServer createTestServer() throws IOException { } /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @param conf the server configuration to use * @return the server instance - * * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ - public static HttpServer createTestServer(Configuration conf) - throws IOException { + public static HttpServer createTestServer(Configuration conf) throws IOException { prepareTestWebapp(); return createServer(TEST, conf); } @@ -87,55 +82,50 @@ public static HttpServer createTestServer(Configuration conf, AccessControlList } /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @param conf the server configuration to use * @return the server instance - * * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ - public static HttpServer createTestServer(Configuration conf, - String[] pathSpecs) throws IOException { + public static HttpServer createTestServer(Configuration conf, String[] pathSpecs) + throws IOException { prepareTestWebapp(); return createServer(TEST, conf, pathSpecs); } public static HttpServer createTestServerWithSecurity(Configuration conf) throws IOException { - prepareTestWebapp(); - return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) - // InfoServer normally sets these for us - .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) - .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) - .build(); - } + prepareTestWebapp(); + return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) + // InfoServer normally sets these for us + .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) + .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY).build(); + } - public static HttpServer createTestServerWithSecurityAndAcl(Configuration conf, AccessControlList acl) throws IOException { + public static HttpServer createTestServerWithSecurityAndAcl(Configuration conf, + AccessControlList acl) throws IOException { prepareTestWebapp(); return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) // InfoServer normally sets these for us .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) - .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) - .setSecurityEnabled(true) - .setACL(acl) - .build(); + .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY).setSecurityEnabled(true) + .setACL(acl).build(); } /** - * Prepare the test webapp by creating the directory from the test properties - * fail if the directory cannot be created. + * Prepare the test webapp by creating the directory from the test properties fail if the + * directory cannot be created. * @throws AssertionError if a condition was not met */ protected static void prepareTestWebapp() { String webapps = System.getProperty(TEST_BUILD_WEBAPPS, BUILD_WEBAPPS_DIR); - File testWebappDir = new File(webapps + - File.separatorChar + TEST); + File testWebappDir = new File(webapps + File.separatorChar + TEST); try { if (!testWebappDir.exists()) { fail("Test webapp dir " + testWebappDir.getCanonicalPath() + " missing"); } - } - catch (IOException e) { + } catch (IOException e) { } } @@ -146,12 +136,10 @@ protected static void prepareTestWebapp() { * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String host, int port) - throws IOException { + public static HttpServer createServer(String host, int port) throws IOException { prepareTestWebapp(); return new HttpServer.Builder().setName(TEST) - .addEndpoint(URI.create("http://" + host + ":" + port)) - .setFindPort(true).build(); + .addEndpoint(URI.create("http://" + host + ":" + port)).setFindPort(true).build(); } /** @@ -163,6 +151,7 @@ public static HttpServer createServer(String host, int port) public static HttpServer createServer(String webapp) throws IOException { return localServerBuilder(webapp).setFindPort(true).build(); } + /** * Create an HttpServer instance for the given webapp * @param webapp the webapp to work with @@ -170,8 +159,7 @@ public static HttpServer createServer(String webapp) throws IOException { * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String webapp, Configuration conf) - throws IOException { + public static HttpServer createServer(String webapp, Configuration conf) throws IOException { return localServerBuilder(webapp).setFindPort(true).setConf(conf).build(); } @@ -181,8 +169,7 @@ public static HttpServer createServer(String webapp, Configuration conf, } private static Builder localServerBuilder(String webapp) { - return new HttpServer.Builder().setName(webapp).addEndpoint( - URI.create("http://localhost:0")); + return new HttpServer.Builder().setName(webapp).addEndpoint(URI.create("http://localhost:0")); } /** @@ -193,17 +180,15 @@ private static Builder localServerBuilder(String webapp) { * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String webapp, Configuration conf, - String[] pathSpecs) throws IOException { + public static HttpServer createServer(String webapp, Configuration conf, String[] pathSpecs) + throws IOException { return localServerBuilder(webapp).setFindPort(true).setConf(conf).setPathSpec(pathSpecs) - .build(); + .build(); } /** * Create and start a server with the test webapp - * * @return the newly started server - * * @throws IOException on any failure * @throws AssertionError if a condition was not met */ @@ -230,11 +215,9 @@ public static void stop(HttpServer server) throws Exception { * @return a URL bonded to the base of the server * @throws MalformedURLException if the URL cannot be created. */ - public static URL getServerURL(HttpServer server) - throws MalformedURLException { + public static URL getServerURL(HttpServer server) throws MalformedURLException { assertNotNull("No server", server); - return new URL("http://" - + NetUtils.getHostPortString(server.getConnectorAddress(0))); + return new URL("http://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); } /** @@ -297,15 +280,14 @@ public static void access(String urlstring) throws IOException { URLConnection connection = url.openConnection(); connection.connect(); - try (BufferedReader in = new BufferedReader(new InputStreamReader( - connection.getInputStream(), StandardCharsets.UTF_8))){ - for(; in.readLine() != null;) { + try (BufferedReader in = new BufferedReader( + new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8))) { + for (; in.readLine() != null;) { continue; } - } catch(IOException ioe) { + } catch (IOException ioe) { LOG.info("Got exception: ", ioe); } } - } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java index 1917655d3426..cbac5b193943 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestGlobalFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -63,12 +63,12 @@ public void destroy() { @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { if (filterConfig == null) { return; } - String uri = ((HttpServletRequest)request).getRequestURI(); + String uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); RECORDS.add(uri); chain.doFilter(request, response); @@ -76,7 +76,8 @@ public void doFilter(ServletRequest request, ServletResponse response, FilterCha /** Configuration for RecordingFilter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -89,9 +90,8 @@ public void initFilter(FilterContainer container, Configuration conf) { public void testServletFilter() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - RecordingFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, RecordingFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); http.start(); @@ -106,14 +106,11 @@ public void testServletFilter() throws Exception { final String outURL = "/static/a.out"; final String logURL = "/logs/a.log"; - final String[] urls = { - fsckURL, stacksURL, ajspURL, listPathsURL, dataURL, streamFile, rootURL, allURL, - outURL, logURL - }; + final String[] urls = { fsckURL, stacksURL, ajspURL, listPathsURL, dataURL, streamFile, rootURL, + allURL, outURL, logURL }; - //access the urls - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + // access the urls + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (String url : urls) { access(prefix + url); @@ -124,7 +121,7 @@ public void testServletFilter() throws Exception { LOG.info("RECORDS = " + RECORDS); - //verify records + // verify records for (String url : urls) { assertTrue(RECORDS.remove(url)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java index 0f4c4d5d2a14..85c11e9a648f 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,13 +31,14 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHtmlQuoting { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHtmlQuoting.class); - @Test public void testNeedsQuoting() throws Exception { + @Test + public void testNeedsQuoting() throws Exception { assertTrue(HtmlQuoting.needsQuoting("abcde>")); assertTrue(HtmlQuoting.needsQuoting("")); assertEquals("&&&", HtmlQuoting.quoteHtmlChars("&&&")); @@ -58,18 +60,18 @@ public class TestHtmlQuoting { } private void runRoundTrip(String str) throws Exception { - assertEquals(str, - HtmlQuoting.unquoteHtmlChars(HtmlQuoting.quoteHtmlChars(str))); + assertEquals(str, HtmlQuoting.unquoteHtmlChars(HtmlQuoting.quoteHtmlChars(str))); } - @Test public void testRoundtrip() throws Exception { + @Test + public void testRoundtrip() throws Exception { runRoundTrip(""); runRoundTrip("<>&'\""); runRoundTrip("ab>cd params = request.getParameterMap(); SortedSet keys = new TreeSet<>(params.keySet()); - for(String key: keys) { + for (String key : keys) { out.print(key); out.print(':'); String[] values = params.get(key); if (values.length > 0) { out.print(values[0]); - for(int i=1; i < values.length; ++i) { + for (int i = 1; i < values.length; ++i) { out.print(','); out.print(values[i]); } @@ -120,15 +122,14 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro @SuppressWarnings("serial") public static class EchoServlet extends HttpServlet { @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws IOException { + public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { PrintWriter out = response.getWriter(); SortedSet sortedKeys = new TreeSet<>(); Enumeration keys = request.getParameterNames(); - while(keys.hasMoreElements()) { + while (keys.hasMoreElements()) { sortedKeys.add(keys.nextElement()); } - for(String key: sortedKeys) { + for (String key : sortedKeys) { out.print(key); out.print(':'); out.print(request.getParameter(key)); @@ -158,7 +159,8 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro } } - @BeforeClass public static void setup() throws Exception { + @BeforeClass + public static void setup() throws Exception { Configuration conf = new Configuration(); conf.setInt(HttpServer.HTTP_MAX_THREADS, MAX_THREADS); server = createTestServer(conf); @@ -166,14 +168,14 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro server.addUnprivilegedServlet("echomap", "/echomap", EchoMapServlet.class); server.addUnprivilegedServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class); server.addUnprivilegedServlet("longheader", "/longheader", LongHeaderServlet.class); - server.addJerseyResourcePackage( - JerseyResource.class.getPackage().getName(), "/jersey/*"); + server.addJerseyResourcePackage(JerseyResource.class.getPackage().getName(), "/jersey/*"); server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } - @AfterClass public static void cleanup() throws Exception { + @AfterClass + public static void cleanup() throws Exception { server.stop(); } @@ -192,13 +194,13 @@ public void testMaxThreads() throws Exception { ready.countDown(); try { start.await(); - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); int serverThreads = server.webServer.getThreadPool().getThreads(); - assertTrue("More threads are started than expected, Server Threads count: " - + serverThreads, serverThreads <= MAX_THREADS); - LOG.info("Number of threads = " + serverThreads + - " which is less or equal than the max = " + MAX_THREADS); + assertTrue( + "More threads are started than expected, Server Threads count: " + serverThreads, + serverThreads <= MAX_THREADS); + LOG.info("Number of threads = " + serverThreads + + " which is less or equal than the max = " + MAX_THREADS); } catch (Exception e) { // do nothing } @@ -209,31 +211,30 @@ public void testMaxThreads() throws Exception { start.countDown(); } - @Test public void testEcho() throws Exception { - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); - assertEquals("a:b\nc<:d\ne:>\n", - readOutput(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); + @Test + public void testEcho() throws Exception { + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); + assertEquals("a:b\nc<:d\ne:>\n", readOutput(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); } /** Test the echo map servlet that uses getParameterMap. */ - @Test public void testEchoMap() throws Exception { - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echomap?a=b&c=d"))); - assertEquals("a:b,>\nc<:d\n", - readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>"))); + @Test + public void testEchoMap() throws Exception { + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echomap?a=b&c=d"))); + assertEquals("a:b,>\nc<:d\n", readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>"))); } /** - * Test that verifies headers can be up to 64K long. - * The test adds a 63K header leaving 1K for other headers. - * This is because the header buffer setting is for ALL headers, - * names and values included. */ - @Test public void testLongHeader() throws Exception { + * Test that verifies headers can be up to 64K long. The test adds a 63K header leaving 1K for + * other headers. This is because the header buffer setting is for ALL headers, names and values + * included. + */ + @Test + public void testLongHeader() throws Exception { URL url = new URL(baseUrl, "/longheader"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); StringBuilder sb = new StringBuilder(); - for (int i = 0 ; i < 63 * 1024; i++) { + for (int i = 0; i < 63 * 1024; i++) { sb.append("a"); } conn.setRequestProperty("longheader", sb.toString()); @@ -244,14 +245,14 @@ public void testMaxThreads() throws Exception { public void testContentTypes() throws Exception { // Static CSS files should have text/css URL cssUrl = new URL(baseUrl, "/static/test.css"); - HttpURLConnection conn = (HttpURLConnection)cssUrl.openConnection(); + HttpURLConnection conn = (HttpURLConnection) cssUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/css", conn.getContentType()); // Servlets should have text/plain with proper encoding by default URL servletUrl = new URL(baseUrl, "/echo?a=b"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/plain;charset=utf-8", conn.getContentType()); @@ -259,14 +260,14 @@ public void testContentTypes() throws Exception { // We should ignore parameters for mime types - ie a parameter // ending in .css should not change mime type servletUrl = new URL(baseUrl, "/echo?a=b.css"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/plain;charset=utf-8", conn.getContentType()); // Servlets that specify text/html should get that content type servletUrl = new URL(baseUrl, "/htmlcontent"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/html;charset=utf-8", conn.getContentType()); @@ -335,21 +336,20 @@ private static String readFully(final InputStream input) throws IOException { } /** - * Dummy filter that mimics as an authentication filter. Obtains user identity - * from the request parameter user.name. Wraps around the request so that - * request.getRemoteUser() returns the user identity. - * + * Dummy filter that mimics as an authentication filter. Obtains user identity from the request + * parameter user.name. Wraps around the request so that request.getRemoteUser() returns the user + * identity. */ public static class DummyServletFilter implements Filter { @Override - public void destroy() { } + public void destroy() { + } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain filterChain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain) + throws IOException, ServletException { final String userName = request.getParameter("user.name"); - ServletRequest requestModified = - new HttpServletRequestWrapper((HttpServletRequest) request) { + ServletRequest requestModified = new HttpServletRequestWrapper((HttpServletRequest) request) { @Override public String getRemoteUser() { return userName; @@ -359,12 +359,12 @@ public String getRemoteUser() { } @Override - public void init(FilterConfig arg0) { } + public void init(FilterConfig arg0) { + } } /** * FilterInitializer that initialized the DummyFilter. - * */ public static class DummyFilterInitializer extends FilterInitializer { public DummyFilterInitializer() { @@ -377,10 +377,8 @@ public void initFilter(FilterContainer container, Configuration conf) { } /** - * Access a URL and get the corresponding return Http status code. The URL - * will be accessed as the passed user, by sending user.name request - * parameter. - * + * Access a URL and get the corresponding return Http status code. The URL will be accessed as the + * passed user, by sending user.name request parameter. * @param urlstring The url to access * @param userName The user to perform access as * @return The HTTP response code @@ -389,7 +387,7 @@ public void initFilter(FilterContainer container, Configuration conf) { private static int getHttpStatusCode(String urlstring, String userName) throws IOException { URL url = new URL(urlstring + "?user.name=" + userName); System.out.println("Accessing " + url + " as user " + userName); - HttpURLConnection connection = (HttpURLConnection)url.openConnection(); + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.connect(); return connection.getResponseCode(); } @@ -411,9 +409,8 @@ public List getGroups(String user) { } /** - * Verify the access for /logs, /stacks, /conf, /logLevel and /metrics - * servlets, when authentication filters are set, but authorization is not - * enabled. + * Verify the access for /logs, /stacks, /conf, /logLevel and /metrics servlets, when + * authentication filters are set, but authorization is not enabled. */ @Test @Ignore @@ -421,10 +418,9 @@ public void testDisabledAuthorizationOfDefaultServlets() throws Exception { Configuration conf = new Configuration(); // Authorization is disabled by default - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - DummyFilterInitializer.class.getName()); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - MyGroupsProvider.class.getName()); + MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA", Collections.singletonList("groupA")); @@ -434,33 +430,29 @@ public void testDisabledAuthorizationOfDefaultServlets() throws Exception { .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); - String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; + String serverURL = + "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB" }) { - assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL - + servlet, user)); + assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL + servlet, user)); } } myServer.stop(); } /** - * Verify the administrator access for /logs, /stacks, /conf, /logLevel and - * /metrics servlets. + * Verify the administrator access for /logs, /stacks, /conf, /logLevel and /metrics servlets. */ @Test @Ignore public void testAuthorizationOfDefaultServlets() throws Exception { Configuration conf = new Configuration(); - conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, - true); - conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, - true); - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - DummyFilterInitializer.class.getName()); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - MyGroupsProvider.class.getName()); + MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA", Collections.singletonList("groupA")); @@ -475,15 +467,14 @@ public void testAuthorizationOfDefaultServlets() throws Exception { myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); - String serverURL = "http://" - + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; + String serverURL = + "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB", "userC", "userD" }) { - assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL - + servlet, user)); + assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL + servlet, user)); } - assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, getHttpStatusCode( - serverURL + servlet, "userE")); + assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, + getHttpStatusCode(serverURL + servlet, "userE")); } myServer.stop(); } @@ -494,8 +485,8 @@ public void testRequestQuoterWithNull() { Mockito.doReturn(null).when(request).getParameterValues("dummy"); RequestQuoter requestQuoter = new RequestQuoter(request); String[] parameterValues = requestQuoter.getParameterValues("dummy"); - Assert.assertNull("It should return null " - + "when there are no values for the parameter", parameterValues); + Assert.assertNull("It should return null " + "when there are no values for the parameter", + parameterValues); } @Test @@ -505,16 +496,16 @@ public void testRequestQuoterWithNotNull() { Mockito.doReturn(values).when(request).getParameterValues("dummy"); RequestQuoter requestQuoter = new RequestQuoter(request); String[] parameterValues = requestQuoter.getParameterValues("dummy"); - Assert.assertTrue("It should return Parameter Values", Arrays.equals( - values, parameterValues)); + Assert.assertTrue("It should return Parameter Values", Arrays.equals(values, parameterValues)); } @SuppressWarnings("unchecked") private static Map parse(String jsonString) { - return (Map)JSON.parse(jsonString); + return (Map) JSON.parse(jsonString); } - @Test public void testJersey() throws Exception { + @Test + public void testJersey() throws Exception { LOG.info("BEGIN testJersey()"); final String js = readOutput(new URL(baseUrl, "/jersey/foo?op=bar")); final Map m = parse(js); @@ -535,33 +526,33 @@ public void testHasAdministratorAccess() throws Exception { Mockito.when(request.getRemoteUser()).thenReturn(null); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); - //authorization OFF + // authorization OFF Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); - //authorization ON & user NULL + // authorization ON & user NULL response = Mockito.mock(HttpServletResponse.class); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), - Mockito.anyString()); + Mockito.anyString()); - //authorization ON & user NOT NULL & ACLs NULL + // authorization ON & user NOT NULL & ACLs NULL response = Mockito.mock(HttpServletResponse.class); Mockito.when(request.getRemoteUser()).thenReturn("foo"); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); - //authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs + // authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs response = Mockito.mock(HttpServletResponse.class); AccessControlList acls = Mockito.mock(AccessControlList.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN), - Mockito.anyString()); + Mockito.anyString()); - //authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs + // authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs response = Mockito.mock(HttpServletResponse.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(true); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(true); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); @@ -575,14 +566,14 @@ public void testRequiresAuthorizationAccess() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); - //requires admin access to instrumentation, FALSE by default + // requires admin access to instrumentation, FALSE by default Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response)); - //requires admin access to instrumentation, TRUE + // requires admin access to instrumentation, TRUE conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); AccessControlList acls = Mockito.mock(AccessControlList.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response)); } @@ -611,8 +602,7 @@ public void testBindAddress() throws Exception { } } - private HttpServer checkBindAddress(String host, int port, boolean findPort) - throws Exception { + private HttpServer checkBindAddress(String host, int port, boolean findPort) throws Exception { HttpServer server = createServer(host, port); try { // not bound, ephemeral should return requested port (0 for ephemeral) @@ -645,14 +635,12 @@ public void testXFrameHeaderSameOrigin() throws Exception { conf.set("hbase.http.filter.xframeoptions.mode", "SAMEORIGIN"); HttpServer myServer = new HttpServer.Builder().setName("test") - .addEndpoint(new URI("http://localhost:0")) - .setFindPort(true).setConf(conf).build(); + .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.addUnprivilegedServlet("echo", "/echo", EchoServlet.class); myServer.start(); - String serverURL = "http://" - + NetUtils.getHostPortString(myServer.getConnectorAddress(0)); + String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)); URL url = new URL(new URL(serverURL), "/echo?a=b&c=d"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java index ce0d6d6bc327..e2513bcfe76c 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHttpServerLifecycle extends HttpServerFunctionalTest { @ClassRule @@ -33,8 +33,8 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest { HBaseClassTestRule.forClass(TestHttpServerLifecycle.class); /** - * Check that a server is alive by probing the {@link HttpServer#isAlive()} method - * and the text of its toString() description + * Check that a server is alive by probing the {@link HttpServer#isAlive()} method and the text of + * its toString() description * @param server server */ private void assertAlive(HttpServer server) { @@ -49,16 +49,17 @@ private void assertNotLive(HttpServer server) { /** * Test that the server is alive once started - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testCreatedServerIsNotAlive() throws Throwable { HttpServer server = createTestServer(); assertNotLive(server); } - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStopUnstartedServer() throws Throwable { HttpServer server = createTestServer(); stop(server); @@ -66,10 +67,10 @@ public void testStopUnstartedServer() throws Throwable { /** * Test that the server is alive once started - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStartedServerIsAlive() throws Throwable { HttpServer server = null; server = createTestServer(); @@ -87,15 +88,15 @@ public void testStartedServerIsAlive() throws Throwable { private void assertToStringContains(HttpServer server, String text) { String description = server.toString(); assertTrue("Did not find \"" + text + "\" in \"" + description + "\"", - description.contains(text)); + description.contains(text)); } /** * Test that the server is not alive once stopped - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStoppedServerIsNotAlive() throws Throwable { HttpServer server = createAndStartTestServer(); assertAlive(server); @@ -105,10 +106,10 @@ public void testStoppedServerIsNotAlive() throws Throwable { /** * Test that the server is not alive once stopped - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStoppingTwiceServerIsAllowed() throws Throwable { HttpServer server = createAndStartTestServer(); assertAlive(server); @@ -120,11 +121,10 @@ public void testStoppingTwiceServerIsAllowed() throws Throwable { /** * Test that the server is alive once started - * - * @throws Throwable - * on failure + * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testWepAppContextAfterServerStop() throws Throwable { HttpServer server = null; String key = "test.attribute.key"; diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java index 11a7db2fbf05..8a86c7f3833e 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ /** * Test webapp loading */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHttpServerWebapps extends HttpServerFunctionalTest { @ClassRule @@ -61,8 +61,8 @@ public void testValidServerResource() throws Throwable { public void testMissingServerResource() throws Throwable { try { HttpServer server = createServer("NoSuchWebapp"); - //should not have got here. - //close the server + // should not have got here. + // close the server String serverDescription = server.toString(); stop(server); fail("Expected an exception, got " + serverDescription); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java index 7737b298b6a6..36579a651c1d 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestPathFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -62,13 +62,13 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { if (filterConfig == null) { return; } - String uri = ((HttpServletRequest)request).getRequestURI(); + String uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); RECORDS.add(uri); chain.doFilter(request, response); @@ -76,7 +76,8 @@ public void doFilter(ServletRequest request, ServletResponse response, /** Configuration for RecordingFilter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -89,9 +90,8 @@ public void initFilter(FilterContainer container, Configuration conf) { public void testPathSpecFilters() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - RecordingFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, RecordingFilter.Initializer.class.getName()); String[] pathSpecs = { "/path", "/path/*" }; HttpServer http = createTestServer(conf, pathSpecs); http.start(); @@ -105,12 +105,11 @@ public void testPathSpecFilters() throws Exception { final String allURL = "/*"; final String[] filteredUrls = { baseURL, baseSlashURL, addedURL, addedSlashURL, longURL }; - final String[] notFilteredUrls = {rootURL, allURL}; + final String[] notFilteredUrls = { rootURL, allURL }; // access the urls and verify our paths specs got added to the // filters - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (String filteredUrl : filteredUrls) { access(prefix + filteredUrl); @@ -124,7 +123,7 @@ public void testPathSpecFilters() throws Exception { LOG.info("RECORDS = " + RECORDS); - //verify records + // verify records for (String filteredUrl : filteredUrls) { assertTrue(RECORDS.remove(filteredUrl)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java index 7723e6e78871..498bf7c6d3be 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,14 +22,13 @@ import java.util.Arrays; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestProfileOutputServlet { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -37,8 +36,8 @@ public class TestProfileOutputServlet { @Test public void testSanitization() { - List good = Arrays.asList("abcd", "key=value", "key1=value&key2=value2", "", - "host=host-1.example.com"); + List good = + Arrays.asList("abcd", "key=value", "key1=value&key2=value2", "", "host=host-1.example.com"); for (String input : good) { assertEquals(input, ProfileOutputServlet.sanitize(input)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java index e4ecaedaa3e8..7c38cb8b3e41 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,10 +23,8 @@ import java.security.Principal; import java.security.PrivilegedExceptionAction; import java.util.Set; - import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosTicket; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; @@ -72,7 +70,7 @@ * HttpComponents to verify that the doas= mechanicsm works, and that the proxyuser settings are * observed. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -94,7 +92,6 @@ public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { private static File privilegedKeytab; private static File privileged2Keytab; - @BeforeClass public static void setupServer() throws Exception { Configuration conf = new Configuration(); @@ -132,7 +129,7 @@ public static void setupServer() throws Exception { server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } @AfterClass @@ -159,7 +156,6 @@ private static void setupUser(SimpleKdcServer kdc, File keytab, String principal kdc.exportPrincipal(principal, keytab); } - protected static Configuration buildSpnegoConfiguration(Configuration conf, String serverPrincipal, File serverKeytab) { KerberosName.setRules("DEFAULT"); @@ -182,13 +178,13 @@ protected static Configuration buildSpnegoConfiguration(Configuration conf, } /** - * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI - * which are meant only for administrators. + * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI which + * are meant only for administrators. */ public static AccessControlList buildAdminAcl(Configuration conf) { final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null); - final String adminGroups = conf.get( - HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); + final String adminGroups = + conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); if (userGroups == null && adminGroups == null) { // Backwards compatibility - if the user doesn't have anything set, allow all users in. return new AccessControlList("*", null); @@ -198,20 +194,23 @@ public static AccessControlList buildAdminAcl(Configuration conf) { @Test public void testProxyAllowed() throws Exception { - testProxy(WHEEL_PRINCIPAL, PRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_OK, null); + testProxy(WHEEL_PRINCIPAL, PRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_OK, null); } @Test public void testProxyDisallowedForUnprivileged() throws Exception { - testProxy(WHEEL_PRINCIPAL, UNPRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, "403 User unprivileged is unauthorized to access this page."); + testProxy(WHEEL_PRINCIPAL, UNPRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, + "403 User unprivileged is unauthorized to access this page."); } @Test public void testProxyDisallowedForNotSudoAble() throws Exception { - testProxy(WHEEL_PRINCIPAL, PRIVILEGED2_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, "403 Forbidden"); + testProxy(WHEEL_PRINCIPAL, PRIVILEGED2_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, + "403 Forbidden"); } - public void testProxy(String clientPrincipal, String doAs, int responseCode, String statusLine) throws Exception { + public void testProxy(String clientPrincipal, String doAs, int responseCode, String statusLine) + throws Exception { // Create the subject for the client final Subject clientSubject = JaasKrbUtil.loginUsingKeytab(WHEEL_PRINCIPAL, wheelKeytab); final Set clientPrincipals = clientSubject.getPrincipals(); @@ -221,7 +220,7 @@ public void testProxy(String clientPrincipal, String doAs, int responseCode, Str // Get a TGT for the subject (might have many, different encryption types). The first should // be the default encryption type. Set privateCredentials = - clientSubject.getPrivateCredentials(KerberosTicket.class); + clientSubject.getPrivateCredentials(KerberosTicket.class); assertFalse(privateCredentials.isEmpty()); KerberosTicket tgt = privateCredentials.iterator().next(); assertNotNull(tgt); @@ -231,34 +230,32 @@ public void testProxy(String clientPrincipal, String doAs, int responseCode, Str // Run this code, logged in as the subject (the client) HttpResponse resp = Subject.doAs(clientSubject, new PrivilegedExceptionAction() { - @Override - public HttpResponse run() throws Exception { - // Logs in with Kerberos via GSS - GSSManager gssManager = GSSManager.getInstance(); - // jGSS Kerberos login constant - Oid oid = new Oid("1.2.840.113554.1.2.2"); - GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); - GSSCredential credential = gssManager.createCredential(gssClient, - GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); - - HttpClientContext context = HttpClientContext.create(); - Lookup authRegistry = RegistryBuilder.create() - .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)) - .build(); - - HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) - .build(); - BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); - - URL url = new URL(getServerURL(server), "/echo?doAs=" + doAs + "&a=b"); - context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); - context.setCredentialsProvider(credentialsProvider); - context.setAuthSchemeRegistry(authRegistry); - - HttpGet get = new HttpGet(url.toURI()); - return client.execute(get, context); - } + @Override + public HttpResponse run() throws Exception { + // Logs in with Kerberos via GSS + GSSManager gssManager = GSSManager.getInstance(); + // jGSS Kerberos login constant + Oid oid = new Oid("1.2.840.113554.1.2.2"); + GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); + GSSCredential credential = gssManager.createCredential(gssClient, + GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); + + HttpClientContext context = HttpClientContext.create(); + Lookup authRegistry = RegistryBuilder. create() + .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); + + HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry).build(); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); + + URL url = new URL(getServerURL(server), "/echo?doAs=" + doAs + "&a=b"); + context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); + context.setCredentialsProvider(credentialsProvider); + context.setAuthSchemeRegistry(authRegistry); + + HttpGet get = new HttpGet(url.toURI()); + return client.execute(get, context); + } }); assertNotNull(resp); @@ -266,8 +263,8 @@ public HttpResponse run() throws Exception { if (responseCode == HttpURLConnection.HTTP_OK) { assertTrue(EntityUtils.toString(resp.getEntity()).trim().contains("a:b")); } else { - assertTrue(resp.getStatusLine().toString().contains(statusLine) || - EntityUtils.toString(resp.getEntity()).contains(statusLine)); + assertTrue(resp.getStatusLine().toString().contains(statusLine) + || EntityUtils.toString(resp.getEntity()).contains(statusLine)); } } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java index cbb5635690b9..d86831e3db7c 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,19 +45,18 @@ import org.slf4j.LoggerFactory; /** - * This testcase issues SSL certificates configures the HttpServer to serve - * HTTPS using the created certficates and calls an echo servlet using the - * corresponding HTTPS URL. + * This testcase issues SSL certificates configures the HttpServer to serve HTTPS using the created + * certficates and calls an echo servlet using the corresponding HTTPS URL. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestSSLHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSSLHttpServer.class); - private static final String BASEDIR = System.getProperty("test.build.dir", - "target/test-dir") + "/" + TestSSLHttpServer.class.getSimpleName(); + private static final String BASEDIR = System.getProperty("test.build.dir", "target/test-dir") + + "/" + TestSSLHttpServer.class.getSimpleName(); private static final Logger LOG = LoggerFactory.getLogger(TestSSLHttpServer.class); private static Configuration serverConf; @@ -87,26 +86,24 @@ public static void setup() throws Exception { clientConf.addResource(serverConf.get(SSLFactory.SSL_CLIENT_CONF_KEY)); serverConf.addResource(serverConf.get(SSLFactory.SSL_SERVER_CONF_KEY)); clientConf.set(SSLFactory.SSL_CLIENT_CONF_KEY, serverConf.get(SSLFactory.SSL_CLIENT_CONF_KEY)); - + clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, clientConf); clientSslFactory.init(); - server = new HttpServer.Builder() - .setName("test") - .addEndpoint(new URI("https://localhost")) - .setConf(serverConf) - .keyPassword(HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.keypassword", - null)) - .keyStore(serverConf.get("ssl.server.keystore.location"), - HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.password", null), - clientConf.get("ssl.server.keystore.type", "jks")) - .trustStore(serverConf.get("ssl.server.truststore.location"), - HBaseConfiguration.getPassword(serverConf, "ssl.server.truststore.password", null), - serverConf.get("ssl.server.truststore.type", "jks")).build(); + server = new HttpServer.Builder().setName("test").addEndpoint(new URI("https://localhost")) + .setConf(serverConf) + .keyPassword( + HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.keypassword", null)) + .keyStore(serverConf.get("ssl.server.keystore.location"), + HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.password", null), + clientConf.get("ssl.server.keystore.type", "jks")) + .trustStore(serverConf.get("ssl.server.truststore.location"), + HBaseConfiguration.getPassword(serverConf, "ssl.server.truststore.password", null), + serverConf.get("ssl.server.truststore.type", "jks")) + .build(); server.addUnprivilegedServlet("echo", "/echo", TestHttpServer.EchoServlet.class); server.start(); - baseUrl = new URL("https://" - + NetUtils.getHostPortString(server.getConnectorAddress(0))); + baseUrl = new URL("https://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); LOG.info("HTTP server started: " + baseUrl); } @@ -121,8 +118,7 @@ public static void cleanup() throws Exception { @Test public void testEcho() throws Exception { assertEquals("a:b\nc:d\n", readOut(new URL(baseUrl, "/echo?a=b&c=d"))); - assertEquals("a:b\nc<:d\ne:>\n", readOut(new URL(baseUrl, - "/echo?a=b&c<=d&e=>"))); + assertEquals("a:b\nc<:d\ne:>\n", readOut(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); } @Test diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java index 6b9d2c341ed7..a9e1028923ab 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({HttpServerFunctionalTest.class, MediumTests.class}) +@Category({ HttpServerFunctionalTest.class, MediumTests.class }) public class TestSecurityHeadersFilter { private static URL baseUrl; private HttpServer http; @@ -62,28 +62,27 @@ public void testDefaultValues() throws Exception { assertThat(conn.getResponseCode(), equalTo(HttpURLConnection.HTTP_OK)); assertThat("Header 'X-Content-Type-Options' is missing", - conn.getHeaderField("X-Content-Type-Options"), is(not((String)null))); + conn.getHeaderField("X-Content-Type-Options"), is(not((String) null))); assertThat(conn.getHeaderField("X-Content-Type-Options"), equalTo("nosniff")); - assertThat("Header 'X-XSS-Protection' is missing", - conn.getHeaderField("X-XSS-Protection"), is(not((String)null))); + assertThat("Header 'X-XSS-Protection' is missing", conn.getHeaderField("X-XSS-Protection"), + is(not((String) null))); assertThat("Header 'X-XSS-Protection' has invalid value", - conn.getHeaderField("X-XSS-Protection"), equalTo("1; mode=block")); + conn.getHeaderField("X-XSS-Protection"), equalTo("1; mode=block")); - assertThat("Header 'Strict-Transport-Security' should be missing from response," + - "but it's present", - conn.getHeaderField("Strict-Transport-Security"), is((String)null)); - assertThat("Header 'Content-Security-Policy' should be missing from response," + - "but it's present", - conn.getHeaderField("Content-Security-Policy"), is((String)null)); + assertThat( + "Header 'Strict-Transport-Security' should be missing from response," + "but it's present", + conn.getHeaderField("Strict-Transport-Security"), is((String) null)); + assertThat( + "Header 'Content-Security-Policy' should be missing from response," + "but it's present", + conn.getHeaderField("Content-Security-Policy"), is((String) null)); } @Test public void testHstsAndCspSettings() throws IOException { Configuration conf = new Configuration(); - conf.set("hbase.http.filter.hsts.value", - "max-age=63072000;includeSubDomains;preload"); + conf.set("hbase.http.filter.hsts.value", "max-age=63072000;includeSubDomains;preload"); conf.set("hbase.http.filter.csp.value", - "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); http = createTestServer(conf); http.start(); baseUrl = getServerURL(http); @@ -93,15 +92,15 @@ public void testHstsAndCspSettings() throws IOException { assertThat(conn.getResponseCode(), equalTo(HttpURLConnection.HTTP_OK)); assertThat("Header 'Strict-Transport-Security' is missing from Rest response", - conn.getHeaderField("Strict-Transport-Security"), Is.is(not((String)null))); + conn.getHeaderField("Strict-Transport-Security"), Is.is(not((String) null))); assertThat("Header 'Strict-Transport-Security' has invalid value", - conn.getHeaderField("Strict-Transport-Security"), - IsEqual.equalTo("max-age=63072000;includeSubDomains;preload")); + conn.getHeaderField("Strict-Transport-Security"), + IsEqual.equalTo("max-age=63072000;includeSubDomains;preload")); assertThat("Header 'Content-Security-Policy' is missing from Rest response", - conn.getHeaderField("Content-Security-Policy"), Is.is(not((String)null))); + conn.getHeaderField("Content-Security-Policy"), Is.is(not((String) null))); assertThat("Header 'Content-Security-Policy' has invalid value", - conn.getHeaderField("Content-Security-Policy"), - IsEqual.equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); + conn.getHeaderField("Content-Security-Policy"), + IsEqual.equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); } } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java index 1e9a2861c9ef..699ccbc2939a 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -42,7 +41,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestServletFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -66,20 +65,21 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { if (filterConfig == null) { return; } - uri = ((HttpServletRequest)request).getRequestURI(); + uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); chain.doFilter(request, response); } /** Configuration for the filter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -90,22 +90,21 @@ public void initFilter(FilterContainer container, Configuration conf) { private static void assertExceptionContains(String string, Throwable t) { String msg = t.getMessage(); - Assert.assertTrue( - "Expected to find '" + string + "' but got unexpected exception:" - + StringUtils.stringifyException(t), msg.contains(string)); + Assert.assertTrue("Expected to find '" + string + "' but got unexpected exception:" + + StringUtils.stringifyException(t), + msg.contains(string)); } @Test @Ignore - //From stack + // From stack // Its a 'foreign' test, one that came in from hadoop when we copy/pasted http // It's second class. Could comment it out if only failing test (as per @nkeywal – sort of) public void testServletFilter() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - SimpleFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, SimpleFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); http.start(); @@ -115,23 +114,22 @@ public void testServletFilter() throws Exception { final String logURL = "/logs/a.log"; final String hadooplogoURL = "/static/hadoop-logo.jpg"; - final String[] urls = {fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL}; + final String[] urls = { fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL }; final Random rand = ThreadLocalRandom.current(); final int[] sequence = new int[50]; - //generate a random sequence and update counts - for(int i = 0; i < sequence.length; i++) { + // generate a random sequence and update counts + for (int i = 0; i < sequence.length; i++) { sequence[i] = rand.nextInt(urls.length); } - //access the urls as the sequence - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + // access the urls as the sequence + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (int aSequence : sequence) { access(prefix + urls[aSequence]); - //make sure everything except fsck get filtered + // make sure everything except fsck get filtered if (aSequence == 0) { assertNull(uri); } else { @@ -166,8 +164,7 @@ public void initFilter(FilterContainer container, Configuration conf) { public void testServletFilterWhenInitThrowsException() throws Exception { Configuration conf = new Configuration(); // start an http server with ErrorFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - ErrorFilter.Initializer.class.getName()); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, ErrorFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); try { http.start(); @@ -178,17 +175,15 @@ public void testServletFilterWhenInitThrowsException() throws Exception { } /** - * Similar to the above test case, except that it uses a different API to add the - * filter. Regression test for HADOOP-8786. + * Similar to the above test case, except that it uses a different API to add the filter. + * Regression test for HADOOP-8786. */ @Test - public void testContextSpecificServletFilterWhenInitThrowsException() - throws Exception { + public void testContextSpecificServletFilterWhenInitThrowsException() throws Exception { Configuration conf = new Configuration(); HttpServer http = createTestServer(conf); - HttpServer.defineFilter(http.webAppContext, - "ErrorFilter", ErrorFilter.class.getName(), - null, null); + HttpServer.defineFilter(http.webAppContext, "ErrorFilter", ErrorFilter.class.getName(), null, + null); try { http.start(); fail("expecting exception"); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java index eb3394300011..a8684ae03211 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,7 +69,7 @@ * Test class for SPNEGO authentication on the HttpServer. Uses Kerby's MiniKDC and Apache * HttpComponents to verify that a simple Servlet is reachable via SPNEGO and unreachable w/o. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestSpnegoHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -114,7 +114,7 @@ public static void setupServer() throws Exception { server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } @AfterClass @@ -174,7 +174,7 @@ public void testAllowedClient() throws Exception { // Get a TGT for the subject (might have many, different encryption types). The first should // be the default encryption type. Set privateCredentials = - clientSubject.getPrivateCredentials(KerberosTicket.class); + clientSubject.getPrivateCredentials(KerberosTicket.class); assertFalse(privateCredentials.isEmpty()); KerberosTicket tgt = privateCredentials.iterator().next(); assertNotNull(tgt); @@ -184,34 +184,32 @@ public void testAllowedClient() throws Exception { // Run this code, logged in as the subject (the client) HttpResponse resp = Subject.doAs(clientSubject, new PrivilegedExceptionAction() { - @Override - public HttpResponse run() throws Exception { - // Logs in with Kerberos via GSS - GSSManager gssManager = GSSManager.getInstance(); - // jGSS Kerberos login constant - Oid oid = new Oid("1.2.840.113554.1.2.2"); - GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); - GSSCredential credential = gssManager.createCredential(gssClient, - GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); - - HttpClientContext context = HttpClientContext.create(); - Lookup authRegistry = RegistryBuilder.create() - .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)) - .build(); - - HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) - .build(); - BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); - - URL url = new URL(getServerURL(server), "/echo?a=b"); - context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); - context.setCredentialsProvider(credentialsProvider); - context.setAuthSchemeRegistry(authRegistry); - - HttpGet get = new HttpGet(url.toURI()); - return client.execute(get, context); - } + @Override + public HttpResponse run() throws Exception { + // Logs in with Kerberos via GSS + GSSManager gssManager = GSSManager.getInstance(); + // jGSS Kerberos login constant + Oid oid = new Oid("1.2.840.113554.1.2.2"); + GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); + GSSCredential credential = gssManager.createCredential(gssClient, + GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); + + HttpClientContext context = HttpClientContext.create(); + Lookup authRegistry = RegistryBuilder. create() + .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); + + HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry).build(); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); + + URL url = new URL(getServerURL(server), "/echo?a=b"); + context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); + context.setCredentialsProvider(credentialsProvider); + context.setAuthSchemeRegistry(authRegistry); + + HttpGet get = new HttpGet(url.toURI()); + return client.execute(get, context); + } }); assertNotNull(resp); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java index ac2ef8f66497..8ba4f72ce71d 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,8 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.util.ajax.JSON; /** - * Basic test case that the ConfServlet can write configuration - * to its output in XML and JSON format. + * Basic test case that the ConfServlet can write configuration to its output in XML and JSON + * format. */ @Category({ MiscTests.class, SmallTests.class }) public class TestConfServlet { @@ -74,15 +74,14 @@ public void testWriteJson() throws Exception { programSet.add("programatically"); programSet.add("programmatically"); Object parsed = JSON.parse(json); - Object[] properties = ((Map)parsed).get("properties"); + Object[] properties = ((Map) parsed).get("properties"); for (Object o : properties) { - Map propertyInfo = (Map)o; - String key = (String)propertyInfo.get("key"); - String val = (String)propertyInfo.get("value"); - String resource = (String)propertyInfo.get("resource"); + Map propertyInfo = (Map) o; + String key = (String) propertyInfo.get("key"); + String val = (String) propertyInfo.get("value"); + String resource = (String) propertyInfo.get("resource"); System.err.println("k: " + key + " v: " + val + " r: " + resource); - if (TEST_KEY.equals(key) && TEST_VAL.equals(val) - && programSet.contains(resource)) { + if (TEST_KEY.equals(key) && TEST_VAL.equals(val) && programSet.contains(resource)) { foundSetting = true; } } @@ -95,8 +94,7 @@ public void testWriteXml() throws Exception { ConfServlet.writeResponse(getTestConf(), sw, "xml"); String xml = sw.toString(); - DocumentBuilderFactory docBuilderFactory - = DocumentBuilderFactory.newInstance(); + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance(); DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); Document doc = builder.parse(new InputSource(new StringReader(xml))); NodeList nameNodes = doc.getElementsByTagName("name"); @@ -107,7 +105,7 @@ public void testWriteXml() throws Exception { System.err.println("xml key: " + key); if (TEST_KEY.equals(key)) { foundSetting = true; - Element propertyElem = (Element)nameNode.getParentNode(); + Element propertyElem = (Element) nameNode.getParentNode(); String val = propertyElem.getElementsByTagName("value").item(0).getTextContent(); assertEquals(TEST_VAL, val); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java index 02248d6bcd29..a4d5cd86f0ef 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestJMXJsonServlet extends HttpServerFunctionalTest { @ClassRule @@ -47,7 +47,8 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest { private static HttpServer server; private static URL baseUrl; - @BeforeClass public static void setup() throws Exception { + @BeforeClass + public static void setup() throws Exception { // Eclipse doesn't pick this up correctly from the plugin // configuration in the pom. System.setProperty(HttpServerFunctionalTest.TEST_BUILD_WEBAPPS, "target/test-classes/webapps"); @@ -56,78 +57,78 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest { baseUrl = getServerURL(server); } - @AfterClass public static void cleanup() throws Exception { + @AfterClass + public static void cleanup() throws Exception { server.stop(); } public static void assertReFind(String re, String value) { Pattern p = Pattern.compile(re); Matcher m = p.matcher(value); - assertTrue("'"+p+"' does not match "+value, m.find()); + assertTrue("'" + p + "' does not match " + value, m.find()); } public static void assertNotFind(String re, String value) { Pattern p = Pattern.compile(re); Matcher m = p.matcher(value); - assertFalse("'"+p+"' should not match "+value, m.find()); + assertFalse("'" + p + "' should not match " + value, m.find()); } - @Test public void testQuery() throws Exception { + @Test + public void testQuery() throws Exception { String result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Runtime")); - LOG.info("/jmx?qry=java.lang:type=Runtime RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Runtime RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Runtime\"", result); assertReFind("\"modelerType\"", result); result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Memory")); - LOG.info("/jmx?qry=java.lang:type=Memory RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Memory RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"modelerType\"", result); result = readOutput(new URL(baseUrl, "/jmx")); - LOG.info("/jmx RESULT: "+result); + LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); // test to get an attribute of a mbean - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::HeapMemoryUsage")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::HeapMemoryUsage")); + LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); // negative test to get an attribute of a mbean - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::")); + LOG.info("/jmx RESULT: " + result); assertReFind("\"ERROR\"", result); // test to get JSONP result result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Memory&callback=mycallback1")); - LOG.info("/jmx?qry=java.lang:type=Memory&callback=mycallback RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Memory&callback=mycallback RESULT: " + result); assertReFind("^mycallback1\\(\\{", result); assertReFind("\\}\\);$", result); // negative test to get an attribute of a mbean as JSONP - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::&callback=mycallback2")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::&callback=mycallback2")); + LOG.info("/jmx RESULT: " + result); assertReFind("^mycallback2\\(\\{", result); assertReFind("\"ERROR\"", result); assertReFind("\\}\\);$", result); // test to get an attribute of a mbean as JSONP - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::HeapMemoryUsage&callback=mycallback3")); - LOG.info("/jmx RESULT: "+result); + result = readOutput( + new URL(baseUrl, "/jmx?get=java.lang:type=Memory::HeapMemoryUsage&callback=mycallback3")); + LOG.info("/jmx RESULT: " + result); assertReFind("^mycallback3\\(\\{", result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); assertReFind("\\}\\);$", result); // test exclude the specific mbean - result = readOutput(new URL(baseUrl, - "/jmx?excl=Hadoop:service=HBase,name=RegionServer,sub=Regions")); + result = readOutput( + new URL(baseUrl, "/jmx?excl=Hadoop:service=HBase,name=RegionServer,sub=Regions")); LOG.info("/jmx RESULT: " + result); - assertNotFind("\"name\"\\s*:\\s*\"Hadoop:service=HBase,name=RegionServer,sub=Regions\"",result); + assertNotFind("\"name\"\\s*:\\s*\"Hadoop:service=HBase,name=RegionServer,sub=Regions\"", + result); } @Test @@ -141,8 +142,7 @@ public void testGetPattern() throws Exception { assertReFind("\"NonHeapMemoryUsage\"\\s*:", result); assertNotFind("\"HeapMemoryUsage\"\\s*:", result); - result = - readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[^Non]*HeapMemoryUsage")); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[^Non]*HeapMemoryUsage")); LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); @@ -166,8 +166,8 @@ public void testPatternMatching() throws Exception { @Test public void testDisallowedJSONPCallback() throws Exception { String callback = "function(){alert('bigproblems!')};foo"; - URL url = new URL( - baseUrl, "/jmx?qry=java.lang:type=Memory&callback="+URLEncoder.encode(callback, "UTF-8")); + URL url = new URL(baseUrl, + "/jmx?qry=java.lang:type=Memory&callback=" + URLEncoder.encode(callback, "UTF-8")); HttpURLConnection cnxn = (HttpURLConnection) url.openConnection(); assertEquals(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, cnxn.getResponseCode()); } @@ -175,8 +175,8 @@ public void testDisallowedJSONPCallback() throws Exception { @Test public void testUnderscoresInJSONPCallback() throws Exception { String callback = "my_function"; - URL url = new URL( - baseUrl, "/jmx?qry=java.lang:type=Memory&callback="+URLEncoder.encode(callback, "UTF-8")); + URL url = new URL(baseUrl, + "/jmx?qry=java.lang:type=Memory&callback=" + URLEncoder.encode(callback, "UTF-8")); HttpURLConnection cnxn = (HttpURLConnection) url.openConnection(); assertEquals(HttpServletResponse.SC_OK, cnxn.getResponseCode()); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java index 39855ee86eff..ada47b8d38eb 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestStaticUserWebFilter { @ClassRule @@ -47,8 +47,8 @@ public class TestStaticUserWebFilter { private FilterConfig mockConfig(String username) { FilterConfig mock = Mockito.mock(FilterConfig.class); - Mockito.doReturn(username).when(mock).getInitParameter( - ServerConfigurationKeys.HBASE_HTTP_STATIC_USER); + Mockito.doReturn(username).when(mock) + .getInitParameter(ServerConfigurationKeys.HBASE_HTTP_STATIC_USER); return mock; } @@ -59,14 +59,13 @@ public void testFilter() throws Exception { suf.init(config); ArgumentCaptor wrapperArg = - ArgumentCaptor.forClass(HttpServletRequestWrapper.class); + ArgumentCaptor.forClass(HttpServletRequestWrapper.class); FilterChain chain = mock(FilterChain.class); - suf.doFilter(mock(HttpServletRequest.class), mock(ServletResponse.class), - chain); + suf.doFilter(mock(HttpServletRequest.class), mock(ServletResponse.class), chain); - Mockito.verify(chain).doFilter(wrapperArg.capture(), Mockito.anyObject()); + Mockito.verify(chain).doFilter(wrapperArg.capture(), Mockito. anyObject()); HttpServletRequestWrapper wrapper = wrapperArg.getValue(); assertEquals("myuser", wrapper.getUserPrincipal().getName()); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java index acbcb55d5dbb..b8446c26d937 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,7 +67,7 @@ public class TestLogLevel { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLogLevel.class); + HBaseClassTestRule.forClass(TestLogLevel.class); private static String keystoresDir; private static String sslConfDir; @@ -78,7 +78,7 @@ public class TestLogLevel { private static final String protectedPrefix = "protected"; private static final String protectedLogName = protectedPrefix + "." + logName; private static final org.apache.logging.log4j.Logger log = - org.apache.logging.log4j.LogManager.getLogger(logName); + org.apache.logging.log4j.LogManager.getLogger(logName); private final static String PRINCIPAL = "loglevel.principal"; private final static String KEYTAB = "loglevel.keytab"; @@ -205,7 +205,7 @@ public void testCommandOptions() throws Exception { assertFalse( validateCommand(new String[] { "-setlevel", "foo.bar:8080", className, "DEBUG", "blah" })); assertFalse(validateCommand(new String[] { "-getlevel", "foo.bar:8080", className, "-setlevel", - "foo.bar:8080", className })); + "foo.bar:8080", className })); } /** @@ -236,24 +236,24 @@ private boolean validateCommand(String[] args) { */ private HttpServer createServer(String protocol, boolean isSpnego) throws Exception { HttpServer.Builder builder = new HttpServer.Builder().setName("..") - .addEndpoint(new URI(protocol + "://localhost:0")).setFindPort(true).setConf(serverConf); + .addEndpoint(new URI(protocol + "://localhost:0")).setFindPort(true).setConf(serverConf); if (isSpnego) { // Set up server Kerberos credentials. // Since the server may fall back to simple authentication, // use ACL to make sure the connection is Kerberos/SPNEGO authenticated. builder.setSecurityEnabled(true).setUsernameConfKey(PRINCIPAL).setKeytabConfKey(KEYTAB) - .setACL(new AccessControlList("client")); + .setACL(new AccessControlList("client")); } // if using HTTPS, configure keystore/truststore properties. if (protocol.equals(LogLevel.PROTOCOL_HTTPS)) { builder = builder.keyPassword(sslConf.get("ssl.server.keystore.keypassword")) - .keyStore(sslConf.get("ssl.server.keystore.location"), - sslConf.get("ssl.server.keystore.password"), - sslConf.get("ssl.server.keystore.type", "jks")) - .trustStore(sslConf.get("ssl.server.truststore.location"), - sslConf.get("ssl.server.truststore.password"), - sslConf.get("ssl.server.truststore.type", "jks")); + .keyStore(sslConf.get("ssl.server.keystore.location"), + sslConf.get("ssl.server.keystore.password"), + sslConf.get("ssl.server.keystore.type", "jks")) + .trustStore(sslConf.get("ssl.server.truststore.location"), + sslConf.get("ssl.server.truststore.password"), + sslConf.get("ssl.server.truststore.type", "jks")); } HttpServer server = builder.build(); @@ -262,17 +262,14 @@ private HttpServer createServer(String protocol, boolean isSpnego) throws Except } private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, - final boolean isSpnego) throws Exception { - testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, - logName, + final boolean isSpnego) throws Exception { + testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, logName, org.apache.logging.log4j.Level.DEBUG.toString()); } private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, - final boolean isSpnego, final String newLevel) throws Exception { - testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, - logName, - newLevel); + final boolean isSpnego, final String newLevel) throws Exception { + testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, logName, newLevel); } /** @@ -283,7 +280,7 @@ private void testDynamicLogLevel(final String bindProtocol, final String connect * @throws Exception if client can't accesss server. */ private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, - final boolean isSpnego, final String loggerName, final String newLevel) throws Exception { + final boolean isSpnego, final String loggerName, final String newLevel) throws Exception { if (!LogLevel.isValidProtocol(bindProtocol)) { throw new Exception("Invalid server protocol " + bindProtocol); } @@ -315,7 +312,7 @@ private void testDynamicLogLevel(final String bindProtocol, final String connect String keytabFilePath = keyTabFile.getAbsolutePath(); UserGroupInformation clientUGI = - UserGroupInformation.loginUserFromKeytabAndReturnUGI(clientPrincipal, keytabFilePath); + UserGroupInformation.loginUserFromKeytabAndReturnUGI(clientPrincipal, keytabFilePath); try { clientUGI.doAs((PrivilegedExceptionAction) () -> { // client command line @@ -350,7 +347,8 @@ private void getLevel(String protocol, String authority, String logName) throws * @param authority daemon's web UI address * @throws Exception if unable to run or log level does not change as expected */ - private void setLevel(String protocol, String authority, String logName, String newLevel) throws Exception { + private void setLevel(String protocol, String authority, String logName, String newLevel) + throws Exception { String[] setLevelArgs = { "-setlevel", authority, logName, newLevel, "-protocol", protocol }; CLI cli = new CLI(protocol.equalsIgnoreCase("https") ? sslConf : clientConf); cli.run(setLevelArgs); @@ -369,7 +367,8 @@ public void testSettingProtectedLogLevel() throws Exception { fail("Expected IO exception due to protected logger"); } catch (IOException e) { assertTrue(e.getMessage().contains("" + HttpServletResponse.SC_PRECONDITION_FAILED)); - assertTrue(e.getMessage().contains("Modification of logger " + protectedLogName + " is disallowed in configuration.")); + assertTrue(e.getMessage().contains( + "Modification of logger " + protectedLogName + " is disallowed in configuration.")); } } @@ -472,7 +471,7 @@ private static void exceptionShouldContains(String substr, Throwable throwable) } t = t.getCause(); } - throw new AssertionError("Expected to find '" + substr + "' but got unexpected exception:" + - StringUtils.stringifyException(throwable), throwable); + throw new AssertionError("Expected to find '" + substr + "' but got unexpected exception:" + + StringUtils.stringifyException(throwable), throwable); } } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java index ee900db62301..b683539cc7db 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,9 +34,8 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.util.ajax.JSON; /** - * A simple Jersey resource class TestHttpServer. - * The servlet simply puts the path and the op parameter in a map - * and return it in JSON format in the response. + * A simple Jersey resource class TestHttpServer. The servlet simply puts the path and the op + * parameter in a map and return it in JSON format in the response. */ @Path("") public class JerseyResource { @@ -47,11 +46,9 @@ public class JerseyResource { @GET @Path("{" + PATH + ":.*}") - @Produces({MediaType.APPLICATION_JSON}) - public Response get( - @PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path, - @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op - ) throws IOException { + @Produces({ MediaType.APPLICATION_JSON }) + public Response get(@PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path, + @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op) throws IOException { LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op); final Map m = new TreeMap<>(); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java index e09f64204a6d..23a3a0ba84bd 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http.ssl; import java.io.File; @@ -63,7 +62,6 @@ public static String getClasspathDir(Class klass) throws Exception { /** * Create a self-signed X.509 Certificate. - * * @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB" * @param pair the KeyPair * @param days how many days from now the Certificate is valid for @@ -72,14 +70,13 @@ public static String getClasspathDir(Class klass) throws Exception { */ public static X509Certificate generateCertificate(String dn, KeyPair pair, int days, String algorithm) throws CertificateEncodingException, InvalidKeyException, - IllegalStateException, NoSuchProviderException, NoSuchAlgorithmException, - SignatureException { + IllegalStateException, NoSuchProviderException, NoSuchAlgorithmException, SignatureException { Date from = new Date(); Date to = new Date(from.getTime() + days * 86400000L); BigInteger sn = new BigInteger(64, new SecureRandom()); KeyPair keyPair = pair; X509V1CertificateGenerator certGen = new X509V1CertificateGenerator(); - X500Principal dnName = new X500Principal(dn); + X500Principal dnName = new X500Principal(dn); certGen.setSerialNumber(sn); certGen.setIssuerDN(dnName); @@ -92,28 +89,25 @@ public static X509Certificate generateCertificate(String dn, KeyPair pair, int d return cert; } - public static KeyPair generateKeyPair(String algorithm) - throws NoSuchAlgorithmException { + public static KeyPair generateKeyPair(String algorithm) throws NoSuchAlgorithmException { KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm); keyGen.initialize(1024); return keyGen.genKeyPair(); } - private static KeyStore createEmptyKeyStore() - throws GeneralSecurityException, IOException { + private static KeyStore createEmptyKeyStore() throws GeneralSecurityException, IOException { return createEmptyKeyStore("jks"); } private static KeyStore createEmptyKeyStore(String keyStoreType) - throws GeneralSecurityException, IOException { + throws GeneralSecurityException, IOException { KeyStore ks = KeyStore.getInstance(keyStoreType); ks.load(null, null); // initialize return ks; } - private static void saveKeyStore(KeyStore ks, String filename, - String password) - throws GeneralSecurityException, IOException { + private static void saveKeyStore(KeyStore ks, String filename, String password) + throws GeneralSecurityException, IOException { FileOutputStream out = new FileOutputStream(filename); try { ks.store(out, password.toCharArray()); @@ -123,10 +117,9 @@ private static void saveKeyStore(KeyStore ks, String filename, } /** - * Creates a keystore with a single key and saves it to a file. - * This method will use the same password for the keystore and for the key. - * This method will always generate a keystore file in JKS format. - * + * Creates a keystore with a single key and saves it to a file. This method will use the same + * password for the keystore and for the key. This method will always generate a keystore file in + * JKS format. * @param filename String file to save * @param password String store password to set on keystore * @param alias String alias to use for the key @@ -135,17 +128,14 @@ private static void saveKeyStore(KeyStore ks, String filename, * @throws GeneralSecurityException for any error with the security APIs * @throws IOException if there is an I/O error saving the file */ - public static void createKeyStore(String filename, - String password, String alias, - Key privateKey, Certificate cert) - throws GeneralSecurityException, IOException { + public static void createKeyStore(String filename, String password, String alias, Key privateKey, + Certificate cert) throws GeneralSecurityException, IOException { createKeyStore(filename, password, password, alias, privateKey, cert); } /** - * Creates a keystore with a single key and saves it to a file. - * This method will always generate a keystore file in JKS format. - * + * Creates a keystore with a single key and saves it to a file. This method will always generate a + * keystore file in JKS format. * @param filename String file to save * @param password String store password to set on keystore * @param keyPassword String key password to set on key @@ -155,17 +145,13 @@ public static void createKeyStore(String filename, * @throws GeneralSecurityException for any error with the security APIs * @throws IOException if there is an I/O error saving the file */ - public static void createKeyStore(String filename, - String password, String keyPassword, String alias, - Key privateKey, Certificate cert) - throws GeneralSecurityException, IOException { + public static void createKeyStore(String filename, String password, String keyPassword, + String alias, Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { createKeyStore(filename, password, keyPassword, alias, privateKey, cert, "JKS"); } - /** * Creates a keystore with a single key and saves it to a file. - * * @param filename String file to save * @param password String store password to set on keystore * @param keyPassword String key password to set on key @@ -177,19 +163,16 @@ public static void createKeyStore(String filename, * @throws IOException if there is an I/O error saving the file */ public static void createKeyStore(String filename, String password, String keyPassword, - String alias, Key privateKey, Certificate cert, - String keystoreType) - throws GeneralSecurityException, IOException { + String alias, Key privateKey, Certificate cert, String keystoreType) + throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(keystoreType); - ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), - new Certificate[]{cert}); + ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), new Certificate[] { cert }); saveKeyStore(ks, filename, password); } /** - * Creates a truststore with a single certificate and saves it to a file. - * This method uses the default JKS truststore type. - * + * Creates a truststore with a single certificate and saves it to a file. This method uses the + * default JKS truststore type. * @param filename String file to save * @param password String store password to set on truststore * @param alias String alias to use for the certificate @@ -197,16 +180,13 @@ public static void createKeyStore(String filename, String password, String keyPa * @throws GeneralSecurityException for any error with the security APIs * @throws IOException if there is an I/O error saving the file */ - public static void createTrustStore(String filename, - String password, String alias, - Certificate cert) - throws GeneralSecurityException, IOException { + public static void createTrustStore(String filename, String password, String alias, + Certificate cert) throws GeneralSecurityException, IOException { createTrustStore(filename, password, alias, cert, "JKS"); } /** * Creates a truststore with a single certificate and saves it to a file. - * * @param filename String file to save * @param password String store password to set on truststore * @param alias String alias to use for the certificate @@ -216,16 +196,14 @@ public static void createTrustStore(String filename, * @throws IOException if there is an I/O error saving the file */ public static void createTrustStore(String filename, String password, String alias, - Certificate cert, String trustStoreType) - throws GeneralSecurityException, IOException { + Certificate cert, String trustStoreType) throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(trustStoreType); ks.setCertificateEntry(alias, cert); saveKeyStore(ks, filename, password); } - public static void createTrustStore( - String filename, String password, Map certs) - throws GeneralSecurityException, IOException { + public static void createTrustStore(String filename, String password, + Map certs) throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(); for (Map.Entry cert : certs.entrySet()) { ks.setCertificateEntry(cert.getKey(), cert.getValue()); @@ -233,46 +211,41 @@ public static void createTrustStore( saveKeyStore(ks, filename, password); } - public static void cleanupSSLConfig(Configuration conf) - throws Exception { + public static void cleanupSSLConfig(Configuration conf) throws Exception { File f = new File(conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER, - FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY))); + FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY))); f.delete(); f = new File(conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER, - FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY))); + FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY))); f.delete(); - String clientKeyStore = conf.get(FileBasedKeyStoresFactory - .resolvePropertyName(SSLFactory.Mode.CLIENT, - FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY)); + String clientKeyStore = + conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.CLIENT, + FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY)); if (clientKeyStore != null) { f = new File(clientKeyStore); f.delete(); } - f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + conf - .get(SSLFactory.SSL_CLIENT_CONF_KEY)); + f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + + conf.get(SSLFactory.SSL_CLIENT_CONF_KEY)); f.delete(); - f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + conf - .get(SSLFactory.SSL_SERVER_CONF_KEY)); + f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + + conf.get(SSLFactory.SSL_SERVER_CONF_KEY)); f.delete(); } /** - * Performs complete setup of SSL configuration in preparation for testing an - * SSLFactory. This includes keys, certs, keystores, truststores, the server - * SSL configuration file, the client SSL configuration file, and the master - * configuration file read by the SSLFactory. - * + * Performs complete setup of SSL configuration in preparation for testing an SSLFactory. This + * includes keys, certs, keystores, truststores, the server SSL configuration file, the client SSL + * configuration file, and the master configuration file read by the SSLFactory. * @param keystoresDir String directory to save keystores * @param sslConfDir String directory to save SSL configuration files - * @param conf Configuration master configuration to be used by an SSLFactory, - * which will be mutated by this method - * @param useClientCert boolean true to make the client present a cert in the - * SSL handshake + * @param conf Configuration master configuration to be used by an SSLFactory, which will be + * mutated by this method + * @param useClientCert boolean true to make the client present a cert in the SSL handshake */ - public static void setupSSLConfig(String keystoresDir, String sslConfDir, - Configuration conf, boolean useClientCert) - throws Exception { + public static void setupSSLConfig(String keystoresDir, String sslConfDir, Configuration conf, + boolean useClientCert) throws Exception { String clientKS = keystoresDir + "/clientKS.jks"; String clientPassword = "clientP"; String serverKS = keystoresDir + "/serverKS.jks"; @@ -280,39 +253,33 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir, String trustKS = keystoresDir + "/trustKS.jks"; String trustPassword = "trustP"; - File sslClientConfFile = new File( - sslConfDir + "/ssl-client-" + System.nanoTime() + "-" + HBaseCommonTestingUtil - .getRandomUUID() + ".xml"); - File sslServerConfFile = new File( - sslConfDir + "/ssl-server-" + System.nanoTime() + "-" + HBaseCommonTestingUtil - .getRandomUUID() + ".xml"); + File sslClientConfFile = new File(sslConfDir + "/ssl-client-" + System.nanoTime() + "-" + + HBaseCommonTestingUtil.getRandomUUID() + ".xml"); + File sslServerConfFile = new File(sslConfDir + "/ssl-server-" + System.nanoTime() + "-" + + HBaseCommonTestingUtil.getRandomUUID() + ".xml"); Map certs = new HashMap<>(); if (useClientCert) { KeyPair cKP = KeyStoreTestUtil.generateKeyPair("RSA"); X509Certificate cCert = - KeyStoreTestUtil.generateCertificate("CN=localhost, O=client", cKP, 30, - "SHA1withRSA"); - KeyStoreTestUtil.createKeyStore(clientKS, clientPassword, "client", - cKP.getPrivate(), cCert); + KeyStoreTestUtil.generateCertificate("CN=localhost, O=client", cKP, 30, "SHA1withRSA"); + KeyStoreTestUtil.createKeyStore(clientKS, clientPassword, "client", cKP.getPrivate(), cCert); certs.put("client", cCert); } KeyPair sKP = KeyStoreTestUtil.generateKeyPair("RSA"); X509Certificate sCert = - KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", sKP, 30, - "SHA1withRSA"); - KeyStoreTestUtil.createKeyStore(serverKS, serverPassword, "server", - sKP.getPrivate(), sCert); + KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", sKP, 30, "SHA1withRSA"); + KeyStoreTestUtil.createKeyStore(serverKS, serverPassword, "server", sKP.getPrivate(), sCert); certs.put("server", sCert); KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs); - Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword, - clientPassword, trustKS); - Configuration serverSSLConf = createServerSSLConfig(serverKS, serverPassword, - serverPassword, trustKS); + Configuration clientSSLConf = + createClientSSLConfig(clientKS, clientPassword, clientPassword, trustKS); + Configuration serverSSLConf = + createServerSSLConfig(serverKS, serverPassword, serverPassword, trustKS); saveConfig(sslClientConfFile, clientSSLConf); saveConfig(sslServerConfFile, serverSSLConf); @@ -322,60 +289,50 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir, conf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile.getName()); conf.set("dfs.https.server.keystore.resource", sslServerConfFile.getName()); - conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert); } /** * Creates SSL configuration for a client. - * * @param clientKS String client keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password * @param trustKS String truststore file * @return Configuration for client SSL */ - public static Configuration createClientSSLConfig(String clientKS, - String password, String keyPassword, String trustKS) { - Configuration clientSSLConf = createSSLConfig(SSLFactory.Mode.CLIENT, - clientKS, password, keyPassword, trustKS); + public static Configuration createClientSSLConfig(String clientKS, String password, + String keyPassword, String trustKS) { + Configuration clientSSLConf = + createSSLConfig(SSLFactory.Mode.CLIENT, clientKS, password, keyPassword, trustKS); return clientSSLConf; } /** * Creates SSL configuration for a server. - * * @param serverKS String server keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password * @param trustKS String truststore file * @return Configuration for server SSL */ - public static Configuration createServerSSLConfig(String serverKS, - String password, String keyPassword, String trustKS) throws IOException { - Configuration serverSSLConf = createSSLConfig(SSLFactory.Mode.SERVER, - serverKS, password, keyPassword, trustKS); + public static Configuration createServerSSLConfig(String serverKS, String password, + String keyPassword, String trustKS) throws IOException { + Configuration serverSSLConf = + createSSLConfig(SSLFactory.Mode.SERVER, serverKS, password, keyPassword, trustKS); return serverSSLConf; } /** * Creates SSL configuration. - * * @param mode SSLFactory.Mode mode to configure * @param keystore String keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password * @param trustKS String truststore file * @return Configuration for SSL */ - private static Configuration createSSLConfig(SSLFactory.Mode mode, - String keystore, String password, String keyPassword, String trustKS) { + private static Configuration createSSLConfig(SSLFactory.Mode mode, String keystore, + String password, String keyPassword, String trustKS) { String trustPassword = "trustP"; Configuration sslConf = new Configuration(false); @@ -389,8 +346,7 @@ private static Configuration createSSLConfig(SSLFactory.Mode mode, } if (keyPassword != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, - FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY), - keyPassword); + FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY), keyPassword); } if (trustKS != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, @@ -398,8 +354,7 @@ private static Configuration createSSLConfig(SSLFactory.Mode mode, } if (trustPassword != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, - FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), - trustPassword); + FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword); } sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000"); @@ -409,13 +364,11 @@ private static Configuration createSSLConfig(SSLFactory.Mode mode, /** * Saves configuration to a file. - * * @param file File to save * @param conf Configuration contents to write to file * @throws IOException if there is an I/O error saving the file */ - public static void saveConfig(File file, Configuration conf) - throws IOException { + public static void saveConfig(File file, Configuration conf) throws IOException { Writer writer = new FileWriter(file); try { conf.writeXml(writer); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java index c277cd068da3..81c5eec8a559 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,6 +22,7 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.PrintWriter; import java.io.StringWriter; import java.lang.reflect.Type; @@ -39,17 +39,18 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.reflect.TypeToken; import org.apache.hbase.thirdparty.com.google.gson.Gson; /** * Test {@link JSONBean}. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestJSONBean { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestJSONBean.class); + HBaseClassTestRule.forClass(TestJSONBean.class); private MBeanServer getMockMBeanServer() throws Exception { MBeanServer mbeanServer = mock(MBeanServer.class); @@ -58,23 +59,17 @@ private MBeanServer getMockMBeanServer() throws Exception { when(mbeanServer.queryNames(any(), any())).thenReturn(names); MBeanInfo mbeanInfo = mock(MBeanInfo.class); when(mbeanInfo.getClassName()).thenReturn("testClassName"); - String[] attributeNames = new String[] {"intAttr", "nanAttr", "infinityAttr", - "strAttr", "boolAttr", "test:Attr"}; + String[] attributeNames = + new String[] { "intAttr", "nanAttr", "infinityAttr", "strAttr", "boolAttr", "test:Attr" }; MBeanAttributeInfo[] attributeInfos = new MBeanAttributeInfo[attributeNames.length]; for (int i = 0; i < attributeInfos.length; i++) { - attributeInfos[i] = new MBeanAttributeInfo(attributeNames[i], - null, - null, - true, - false, - false); + attributeInfos[i] = new MBeanAttributeInfo(attributeNames[i], null, null, true, false, false); } when(mbeanInfo.getAttributes()).thenReturn(attributeInfos); when(mbeanServer.getMBeanInfo(any())).thenReturn(mbeanInfo); when(mbeanServer.getAttribute(any(), eq("intAttr"))).thenReturn(3); when(mbeanServer.getAttribute(any(), eq("nanAttr"))).thenReturn(Double.NaN); - when(mbeanServer.getAttribute(any(), eq("infinityAttr"))). - thenReturn(Double.POSITIVE_INFINITY); + when(mbeanServer.getAttribute(any(), eq("infinityAttr"))).thenReturn(Double.POSITIVE_INFINITY); when(mbeanServer.getAttribute(any(), eq("strAttr"))).thenReturn("aString"); when(mbeanServer.getAttribute(any(), eq("boolAttr"))).thenReturn(true); when(mbeanServer.getAttribute(any(), eq("test:Attr"))).thenReturn("aString"); @@ -105,14 +100,14 @@ private String getExpectedJSON() { public void testJSONBeanValueTypes() throws Exception { JSONBean bean = new JSONBean(); StringWriter stringWriter = new StringWriter(); - try ( - PrintWriter printWriter = new PrintWriter(stringWriter); - JSONBean.Writer jsonWriter = bean.open(printWriter)) { + try (PrintWriter printWriter = new PrintWriter(stringWriter); + JSONBean.Writer jsonWriter = bean.open(printWriter)) { jsonWriter.write(getMockMBeanServer(), null, null, false); } final Gson gson = GsonUtil.createGson().create(); - Type typeOfHashMap = new TypeToken>() {}.getType(); + Type typeOfHashMap = new TypeToken>() { + }.getType(); Map expectedJson = gson.fromJson(getExpectedJSON(), typeOfHashMap); Map actualJson = gson.fromJson(stringWriter.toString(), typeOfHashMap); assertEquals(expectedJson, actualJson); diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index 890e24f74d78..5d1f920fd6e3 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -40,105 +40,6 @@ - - - - - ../hbase-server/src/test/resources - - META-INF/NOTICE - META-INF/LICENSE - - - - src/test/resources - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-failsafe-plugin - ${surefire.version} - - - org.apache.maven.surefire - surefire-junit4 - ${surefire.version} - - - - - ${integrationtest.include} - - - ${unittest.include} - **/*$* - - ${test.output.tofile} - - ${env.LD_LIBRARY_PATH}:${project.build.directory}/nativelib - ${env.DYLD_LIBRARY_PATH}:${project.build.directory}/nativelib - 4 - - false - false - - - - integration-test - integration-test - - integration-test - - - - verify - verify - - verify - - - - - - - - - - - org.apache.maven.plugins - maven-failsafe-plugin - - false - always - - 1800 - -enableassertions -Xmx${failsafe.Xmx} - -Djava.security.egd=file:/dev/./urandom -XX:+CMSClassUnloadingEnabled - -verbose:gc -XX:+PrintCommandLineFlags -XX:+PrintFlagsFinal - - - - net.revelc.code - warbucks-maven-plugin - - - - + org.apache.hbase hbase-annotations @@ -198,8 +99,8 @@ which pulls in the below. It messes up this build at assembly time. See HBASE-22029--> - com.sun.jersey - jersey-core + com.sun.jersey + jersey-core @@ -269,8 +170,8 @@ test - javax.servlet-api javax.servlet + javax.servlet-api test @@ -295,6 +196,129 @@ + + + + + ../hbase-server/src/test/resources + + META-INF/NOTICE + META-INF/LICENSE + + + + src/test/resources + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-failsafe-plugin + ${surefire.version} + + + ${integrationtest.include} + + + ${unittest.include} + **/*$* + + ${test.output.tofile} + + ${env.LD_LIBRARY_PATH}:${project.build.directory}/nativelib + ${env.DYLD_LIBRARY_PATH}:${project.build.directory}/nativelib + 4 + + false + false + + + + org.apache.maven.surefire + surefire-junit4 + ${surefire.version} + + + + + integration-test + + integration-test + + integration-test + + + verify + + verify + + verify + + + + + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + false + always + + 1800 + -enableassertions -Xmx${failsafe.Xmx} + -Djava.security.egd=file:/dev/./urandom -XX:+CMSClassUnloadingEnabled + -verbose:gc -XX:+PrintCommandLineFlags -XX:+PrintFlagsFinal + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + + org.apache.maven.plugins + maven-surefire-report-plugin + ${surefire.version} + + + integration-tests + + report-only + + + failsafe-report + + ${project.build.directory}/failsafe-reports + + + + + + + + @@ -325,13 +349,15 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + ${hadoop-three.version} - + org.apache.hadoop hadoop-common @@ -361,10 +387,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -33,35 +31,6 @@ Apache HBase - Logging Logging Support for HBase - - - - src/test/resources - - log4j2.properties - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase @@ -109,4 +78,33 @@ test + + + + + src/test/resources + + log4j2.properties + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java index b0711d7e8f1a..d1cf2bf7cc46 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ private InternalLog4jUtils() { static void setLogLevel(String loggerName, String levelName) { org.apache.logging.log4j.Level level = - org.apache.logging.log4j.Level.toLevel(levelName.toUpperCase()); + org.apache.logging.log4j.Level.toLevel(levelName.toUpperCase()); if (!level.toString().equalsIgnoreCase(levelName)) { throw new IllegalArgumentException("Unsupported log level " + levelName); } @@ -47,7 +47,7 @@ static void setLogLevel(String loggerName, String levelName) { static String getEffectiveLevel(String loggerName) { org.apache.logging.log4j.Logger logger = - org.apache.logging.log4j.LogManager.getLogger(loggerName); + org.apache.logging.log4j.LogManager.getLogger(loggerName); return logger.getLevel().name(); } @@ -61,27 +61,28 @@ static Set getActiveLogFiles() throws IOException { for (org.apache.logging.log4j.core.Appender appender : coreLogger.getAppenders().values()) { if (appender instanceof org.apache.logging.log4j.core.appender.FileAppender) { String fileName = - ((org.apache.logging.log4j.core.appender.FileAppender) appender).getFileName(); + ((org.apache.logging.log4j.core.appender.FileAppender) appender).getFileName(); ret.add(new File(fileName)); } else if (appender instanceof org.apache.logging.log4j.core.appender.AbstractFileAppender) { String fileName = - ((org.apache.logging.log4j.core.appender.AbstractFileAppender) appender).getFileName(); + ((org.apache.logging.log4j.core.appender.AbstractFileAppender) appender) + .getFileName(); ret.add(new File(fileName)); } else if (appender instanceof org.apache.logging.log4j.core.appender.RollingFileAppender) { String fileName = - ((org.apache.logging.log4j.core.appender.RollingFileAppender) appender).getFileName(); + ((org.apache.logging.log4j.core.appender.RollingFileAppender) appender).getFileName(); ret.add(new File(fileName)); } else if (appender instanceof org.apache.logging.log4j.core.appender.RandomAccessFileAppender) { String fileName = - ((org.apache.logging.log4j.core.appender.RandomAccessFileAppender) appender) - .getFileName(); + ((org.apache.logging.log4j.core.appender.RandomAccessFileAppender) appender) + .getFileName(); ret.add(new File(fileName)); } else if (appender instanceof org.apache.logging.log4j.core.appender.MemoryMappedFileAppender) { String fileName = - ((org.apache.logging.log4j.core.appender.MemoryMappedFileAppender) appender) - .getFileName(); + ((org.apache.logging.log4j.core.appender.MemoryMappedFileAppender) appender) + .getFileName(); ret.add(new File(fileName)); } } diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java index e7b5fdd39356..2909b4191383 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,6 +37,6 @@ public class JulToSlf4jInitializer { public JulToSlf4jInitializer() throws IOException { LogManager.getLogManager() - .readConfiguration(new ByteArrayInputStream(PROPERTIES.getBytes(StandardCharsets.UTF_8))); + .readConfiguration(new ByteArrayInputStream(PROPERTIES.getBytes(StandardCharsets.UTF_8))); } } diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java index 9b3459194ab6..36c054b2e6d1 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ public final class Log4jUtils { private static final String INTERNAL_UTILS_CLASS_NAME = - "org.apache.hadoop.hbase.logging.InternalLog4jUtils"; + "org.apache.hadoop.hbase.logging.InternalLog4jUtils"; private Log4jUtils() { } diff --git a/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java b/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java index 7b3876ce0833..1c3a4bae01aa 100644 --- a/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java +++ b/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.log4j; import java.io.BufferedWriter; @@ -74,7 +73,7 @@ public FileAppender() { * write to the output file. */ public FileAppender(Layout layout, String fileName, boolean append, boolean bufferedIO, - int bufferSize) throws IOException { + int bufferSize) throws IOException { this.layout = layout; this.setFile(fileName, append, bufferedIO, bufferSize); } @@ -225,7 +224,7 @@ public void setBufferSize(int bufferSize) { * @param append If true will append to fileName. Otherwise will truncate fileName. */ public synchronized void setFile(String fileName, boolean append, boolean bufferedIO, - int bufferSize) throws IOException { + int bufferSize) throws IOException { // It does not make sense to have immediate flush and bufferedIO. if (bufferedIO) { diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index 68963a0253dd..a0c952e6ea31 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-mapreduce Apache HBase - MapReduce - - This module contains implementations of InputFormat, OutputFormat, Mapper, Reducer, etc which + This module contains implementations of InputFormat, OutputFormat, Mapper, Reducer, etc which are needed for running MR jobs on tables, WALs, HFiles and other HBase specific constructs. It also contains a bunch of tools: RowCounter, ImportTsv, Import, Export, CompactionTool, - ExportSnapshot, WALPlayer, etc - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-jar-plugin - - - - - org/apache/hadoop/hbase/mapreduce/Driver - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - + ExportSnapshot, WALPlayer, etc @@ -284,6 +247,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + + + org/apache/hadoop/hbase/mapreduce/Driver + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + @@ -302,7 +299,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -344,8 +343,7 @@ lifecycle-mapping - - + diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java index 0484fbbf239a..cd442b46d5c1 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +18,12 @@ package org.apache.hadoop.hbase.mapred; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.util.ProgramDriver; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.util.ProgramDriver; /** - * Driver for hbase mapreduce jobs. Select which to run by passing name of job - * to this main. + * Driver for hbase mapreduce jobs. Select which to run by passing name of job to this main. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable @@ -44,7 +42,7 @@ static void setProgramDriver(ProgramDriver pgd0) { */ public static void main(String[] args) throws Throwable { pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table"); - ProgramDriver.class.getMethod("driver", new Class[] { String[].class }) - .invoke(pgd, new Object[] { args }); + ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd, + new Object[] { args }); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java index 594816fcf503..04b627718e6f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +19,6 @@ import java.io.IOException; import java.util.ArrayList; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.client.Result; @@ -31,42 +28,37 @@ import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; - +import org.apache.yetus.audience.InterfaceAudience; /** * Extract grouping columns from input record */ @InterfaceAudience.Public -public class GroupingTableMap -extends MapReduceBase -implements TableMap { +public class GroupingTableMap extends MapReduceBase + implements TableMap { /** - * JobConf parameter to specify the columns used to produce the key passed to - * collect from the map phase + * JobConf parameter to specify the columns used to produce the key passed to collect from the map + * phase */ - public static final String GROUP_COLUMNS = - "hbase.mapred.groupingtablemap.columns"; + public static final String GROUP_COLUMNS = "hbase.mapred.groupingtablemap.columns"; - protected byte [][] columns; + protected byte[][] columns; /** - * Use this before submitting a TableMap job. It will appropriately set up the - * JobConf. - * + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. * @param table table to be processed * @param columns space separated list of columns to fetch - * @param groupColumns space separated list of columns used to form the key - * used in collect + * @param groupColumns space separated list of columns used to form the key used in collect * @param mapper map class * @param job job configuration object */ @SuppressWarnings("unchecked") public static void initJob(String table, String columns, String groupColumns, - Class mapper, JobConf job) { + Class mapper, JobConf job) { - TableMapReduceUtil.initTableMapJob(table, columns, mapper, - ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, + Result.class, job); job.set(GROUP_COLUMNS, groupColumns); } @@ -75,16 +67,14 @@ public void configure(JobConf job) { super.configure(job); String[] cols = job.get(GROUP_COLUMNS, "").split(" "); columns = new byte[cols.length][]; - for(int i = 0; i < cols.length; i++) { + for (int i = 0; i < cols.length; i++) { columns[i] = Bytes.toBytes(cols[i]); } } /** - * Extract the grouping columns from value to construct a new key. - * - * Pass the new key and value to reduce. - * If any of the grouping columns are not found in the value, the record is skipped. + * Extract the grouping columns from value to construct a new key. Pass the new key and value to + * reduce. If any of the grouping columns are not found in the value, the record is skipped. * @param key * @param value * @param output @@ -92,22 +82,19 @@ public void configure(JobConf job) { * @throws IOException */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) + throws IOException { byte[][] keyVals = extractKeyValues(value); - if(keyVals != null) { + if (keyVals != null) { ImmutableBytesWritable tKey = createGroupKey(keyVals); output.collect(tKey, value); } } /** - * Extract columns values from the current record. This method returns - * null if any of the columns are not found. - * - * Override this method if you want to deal with nulls differently. - * + * Extract columns values from the current record. This method returns null if any of the columns + * are not found. Override this method if you want to deal with nulls differently. * @param r * @return array of byte values */ @@ -116,9 +103,9 @@ protected byte[][] extractKeyValues(Result r) { ArrayList foundList = new ArrayList<>(); int numCols = columns.length; if (numCols > 0) { - for (Cell value: r.listCells()) { - byte [] column = CellUtil.makeColumn(CellUtil.cloneFamily(value), - CellUtil.cloneQualifier(value)); + for (Cell value : r.listCells()) { + byte[] column = + CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)); for (int i = 0; i < numCols; i++) { if (Bytes.equals(column, columns[i])) { foundList.add(CellUtil.cloneValue(value)); @@ -126,7 +113,7 @@ protected byte[][] extractKeyValues(Result r) { } } } - if(foundList.size() == numCols) { + if (foundList.size() == numCols) { keyVals = foundList.toArray(new byte[numCols][]); } } @@ -134,19 +121,18 @@ protected byte[][] extractKeyValues(Result r) { } /** - * Create a key by concatenating multiple column values. - * Override this function in order to produce different types of keys. - * + * Create a key by concatenating multiple column values. Override this function in order to + * produce different types of keys. * @param vals * @return key generated by concatenating multiple column values */ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { - if(vals == null) { + if (vals == null) { return null; } - StringBuilder sb = new StringBuilder(); - for(int i = 0; i < vals.length; i++) { - if(i > 0) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < vals.length; i++) { + if (i > 0) { sb.append(" "); } sb.append(Bytes.toString(vals[i])); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java index b777f7ae24ff..a600f7fe85b0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +20,6 @@ import java.io.IOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; @@ -31,18 +27,18 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Partitioner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This is used to partition the output keys into groups of keys. - * Keys are grouped according to the regions that currently exist - * so that each reducer fills a single region so load is distributed. - * + * This is used to partition the output keys into groups of keys. Keys are grouped according to the + * regions that currently exist so that each reducer fills a single region so load is distributed. * @param * @param */ @InterfaceAudience.Public -public class HRegionPartitioner -implements Partitioner { +public class HRegionPartitioner implements Partitioner { private static final Logger LOG = LoggerFactory.getLogger(HRegionPartitioner.class); // Connection and locator are not cleaned up; they just die when partitioner is done. private Connection connection; @@ -70,7 +66,7 @@ public void configure(JobConf job) { public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) { byte[] region = null; // Only one region return 0 - if (this.startKeys.length == 1){ + if (this.startKeys.length == 1) { return 0; } try { @@ -80,12 +76,11 @@ public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) } catch (IOException e) { LOG.error(e.toString(), e); } - for (int i = 0; i < this.startKeys.length; i++){ - if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){ - if (i >= numPartitions){ + for (int i = 0; i < this.startKeys.length; i++) { + if (Bytes.compareTo(region, this.startKeys[i]) == 0) { + if (i >= numPartitions) { // cover if we have less reduces then regions. - return (Integer.toString(i).hashCode() - & Integer.MAX_VALUE) % numPartitions; + return (Integer.toString(i).hashCode() & Integer.MAX_VALUE) % numPartitions; } return i; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java index c97bcc025230..fd3eb2a4d153 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,22 +18,20 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; /** * Pass the given key and record as-is to reduce */ @InterfaceAudience.Public -public class IdentityTableMap -extends MapReduceBase -implements TableMap { +public class IdentityTableMap extends MapReduceBase + implements TableMap { /** constructor */ public IdentityTableMap() { @@ -42,19 +39,16 @@ public IdentityTableMap() { } /** - * Use this before submitting a TableMap job. It will - * appropriately set up the JobConf. - * + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. * @param table table name * @param columns columns to scan * @param mapper mapper class * @param job job configuration */ @SuppressWarnings("unchecked") - public static void initJob(String table, String columns, - Class mapper, JobConf job) { - TableMapReduceUtil.initTableMapJob(table, columns, mapper, - ImmutableBytesWritable.class, + public static void initJob(String table, String columns, Class mapper, + JobConf job) { + TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, Result.class, job); } @@ -67,8 +61,8 @@ public static void initJob(String table, String columns, * @throws IOException */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) + throws IOException { // convert output.collect(key, value); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java index ba1df4c3a835..94c6d248e437 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,26 +19,23 @@ import java.io.IOException; import java.util.Iterator; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Write to table each key, record pair */ @InterfaceAudience.Public -public class IdentityTableReduce -extends MapReduceBase -implements TableReduce { +public class IdentityTableReduce extends MapReduceBase + implements TableReduce { @SuppressWarnings("unused") - private static final Logger LOG = - LoggerFactory.getLogger(IdentityTableReduce.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReduce.class.getName()); /** * No aggregation, output pairs of (key, record) @@ -50,11 +46,9 @@ public class IdentityTableReduce * @throws IOException */ public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector output, - Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { - while(values.hasNext()) { + while (values.hasNext()) { output.collect(key, values.next()); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java index 7902d1a3b4c3..a415c5dbe663 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapred; import edu.umd.cs.findbugs.annotations.SuppressWarnings; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -32,33 +34,25 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; +import org.apache.yetus.audience.InterfaceAudience; /** * MultiTableSnapshotInputFormat generalizes - * {@link org.apache.hadoop.hbase.mapred.TableSnapshotInputFormat} - * allowing a MapReduce job to run over one or more table snapshots, with one or more scans - * configured for each. - * Internally, the input format delegates to - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * and thus has the same performance advantages; see - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * for more details. - * Usage is similar to TableSnapshotInputFormat, with the following exception: - * initMultiTableSnapshotMapperJob takes in a map - * from snapshot name to a collection of scans. For each snapshot in the map, each corresponding - * scan will be applied; - * the overall dataset for the job is defined by the concatenation of the regions and tables - * included in each snapshot/scan + * {@link org.apache.hadoop.hbase.mapred.TableSnapshotInputFormat} allowing a MapReduce job to run + * over one or more table snapshots, with one or more scans configured for each. Internally, the + * input format delegates to {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} and + * thus has the same performance advantages; see + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more details. Usage is + * similar to TableSnapshotInputFormat, with the following exception: + * initMultiTableSnapshotMapperJob takes in a map from snapshot name to a collection of scans. For + * each snapshot in the map, each corresponding scan will be applied; the overall dataset for the + * job is defined by the concatenation of the regions and tables included in each snapshot/scan * pair. - * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob(Map, - * Class, Class, Class, JobConf, boolean, Path)} + * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob(Map, Class, Class, Class, JobConf, boolean, Path)} * can be used to configure the job. - *

    {@code
    + * 
    + * 
    + * {@code
      * Job job = new Job(conf);
      * Map> snapshotScans = ImmutableMap.of(
      *    "snapshot1", ImmutableList.of(new Scan(Bytes.toBytes("a"), Bytes.toBytes("b"))),
    @@ -70,15 +64,12 @@
      *      MyMapOutputValueWritable.class, job, true, restoreDir);
      * }
      * 
    + * * Internally, this input format restores each snapshot into a subdirectory of the given tmp - * directory. Input splits and - * record readers are created as described in - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * (one per region). - * See {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more notes on - * permissioning; the - * same caveats apply here. - * + * directory. Input splits and record readers are created as described in + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} (one per region). See + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more notes on + * permissioning; the same caveats apply here. * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat * @see org.apache.hadoop.hbase.client.TableSnapshotScanner */ @@ -111,11 +102,9 @@ public RecordReader getRecordReader(InputSplit s @SuppressWarnings("checkstyle:linelength") /** * Configure conf to read from snapshotScans, with snapshots restored to a subdirectory of - * restoreDir. - * Sets: + * restoreDir. Sets: * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#RESTORE_DIRS_KEY}, * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#SNAPSHOT_TO_SCANS_KEY} - * * @param conf * @param snapshotScans * @param restoreDir diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java index 75b221c5526b..4d1206e9b690 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +18,6 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -33,11 +30,11 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; /** - * A job with a map to count rows. - * Map outputs table rows IF the input row has columns that have content. - * Uses a org.apache.hadoop.mapred.lib.IdentityReducer + * A job with a map to count rows. Map outputs table rows IF the input row has columns that have + * content. Uses a org.apache.hadoop.mapred.lib.IdentityReducer */ @InterfaceAudience.Public public class RowCounter extends Configured implements Tool { @@ -47,16 +44,16 @@ public class RowCounter extends Configured implements Tool { /** * Mapper that runs the count. */ - static class RowCounterMapper - implements TableMap { - private static enum Counters {ROWS} + static class RowCounterMapper implements TableMap { + private static enum Counters { + ROWS + } public void map(ImmutableBytesWritable row, Result values, - OutputCollector output, - Reporter reporter) - throws IOException { - // Count every row containing data, whether it's in qualifiers or values - reporter.incrCounter(Counters.ROWS, 1); + OutputCollector output, Reporter reporter) + throws IOException { + // Count every row containing data, whether it's in qualifiers or values + reporter.incrCounter(Counters.ROWS, 1); } public void configure(JobConf jc) { @@ -86,8 +83,8 @@ public JobConf createSubmittableJob(String[] args) throws IOException { sb.append(args[i]); } // Second argument is the table name. - TableMapReduceUtil.initTableMapJob(args[1], sb.toString(), - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, c); + TableMapReduceUtil.initTableMapJob(args[1], sb.toString(), RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, c); c.setNumReduceTasks(0); // First arg is the output directory. FileOutputFormat.setOutputPath(c, new Path(args[0])); @@ -95,8 +92,7 @@ public JobConf createSubmittableJob(String[] args) throws IOException { } static int printUsage() { - System.out.println(NAME + - " [...]"); + System.out.println(NAME + " [...]"); return -1; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java index d9bb66bdf07f..3e38b0172ca0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +18,6 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -32,13 +27,15 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobConfigurable; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Convert HBase tabular data into a format that is consumable by Map/Reduce. */ @InterfaceAudience.Public -public class TableInputFormat extends TableInputFormatBase implements - JobConfigurable { +public class TableInputFormat extends TableInputFormatBase implements JobConfigurable { private static final Logger LOG = LoggerFactory.getLogger(TableInputFormat.class); /** @@ -59,7 +56,7 @@ protected void initialize(JobConf job) throws IOException { Path[] tableNames = FileInputFormat.getInputPaths(job); String colArg = job.get(COLUMN_LIST); String[] colNames = colArg.split(" "); - byte [][] m_cols = new byte[colNames.length][]; + byte[][] m_cols = new byte[colNames.length][]; for (int i = 0; i < m_cols.length; i++) { m_cols[i] = Bytes.toBytes(colNames[i]); } @@ -70,15 +67,14 @@ protected void initialize(JobConf job) throws IOException { public void validateInput(JobConf job) throws IOException { // expecting exactly one path - Path [] tableNames = FileInputFormat.getInputPaths(job); + Path[] tableNames = FileInputFormat.getInputPaths(job); if (tableNames == null || tableNames.length > 1) { throw new IOException("expecting one table name"); } // connected to table? if (getTable() == null) { - throw new IOException("could not connect to table '" + - tableNames[0].getName() + "'"); + throw new IOException("could not connect to table '" + tableNames[0].getName() + "'"); } // expecting at least one column diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java index 509972e92aa5..cbd7882ad7ef 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +19,6 @@ import java.io.Closeable; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -37,21 +32,22 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A Base for {@link TableInputFormat}s. Receives a {@link Table}, a - * byte[] of input columns and optionally a {@link Filter}. - * Subclasses may use other TableRecordReader implementations. - * + * A Base for {@link TableInputFormat}s. Receives a {@link Table}, a byte[] of input columns and + * optionally a {@link Filter}. Subclasses may use other TableRecordReader implementations. * Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to * function properly. Each of the entry points to this class used by the MapReduce framework, * {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)}, - * will call {@link #initialize(JobConf)} as a convenient centralized location to handle - * retrieving the necessary configuration information. If your subclass overrides either of these - * methods, either call the parent version or call initialize yourself. - * + * will call {@link #initialize(JobConf)} as a convenient centralized location to handle retrieving + * the necessary configuration information. If your subclass overrides either of these methods, + * either call the parent version or call initialize yourself. *

    * An example of a subclass: + * *

      *   class ExampleTIF extends TableInputFormatBase {
      *
    @@ -77,33 +73,28 @@
      */
     
     @InterfaceAudience.Public
    -public abstract class TableInputFormatBase
    -implements InputFormat {
    +public abstract class TableInputFormatBase implements InputFormat {
       private static final Logger LOG = LoggerFactory.getLogger(TableInputFormatBase.class);
    -  private byte [][] inputColumns;
    +  private byte[][] inputColumns;
       private Table table;
       private RegionLocator regionLocator;
       private Connection connection;
       private TableRecordReader tableRecordReader;
       private Filter rowFilter;
     
    -  private static final String NOT_INITIALIZED = "The input format instance has not been properly " +
    -      "initialized. Ensure you call initializeTable either in your constructor or initialize " +
    -      "method";
    -  private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" +
    -            " previous error. Please look at the previous logs lines from" +
    -            " the task's full log for more details.";
    +  private static final String NOT_INITIALIZED = "The input format instance has not been properly "
    +      + "initialized. Ensure you call initializeTable either in your constructor or initialize "
    +      + "method";
    +  private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a"
    +      + " previous error. Please look at the previous logs lines from"
    +      + " the task's full log for more details.";
     
       /**
    -   * Builds a TableRecordReader. If no TableRecordReader was provided, uses
    -   * the default.
    -   *
    -   * @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit,
    -   *      JobConf, Reporter)
    +   * Builds a TableRecordReader. If no TableRecordReader was provided, uses the default.
    +   * @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit, JobConf, Reporter)
        */
    -  public RecordReader getRecordReader(
    -      InputSplit split, JobConf job, Reporter reporter)
    -  throws IOException {
    +  public RecordReader getRecordReader(InputSplit split, JobConf job,
    +      Reporter reporter) throws IOException {
         // In case a subclass uses the deprecated approach or calls initializeTable directly
         if (table == null) {
           initialize(job);
    @@ -120,8 +111,8 @@ public RecordReader getRecordReader(
     
         TableSplit tSplit = (TableSplit) split;
         // if no table record reader was provided use default
    -    final TableRecordReader trr = this.tableRecordReader == null ? new TableRecordReader() :
    -        this.tableRecordReader;
    +    final TableRecordReader trr =
    +        this.tableRecordReader == null ? new TableRecordReader() : this.tableRecordReader;
         trr.setStartRow(tSplit.getStartRow());
         trr.setEndRow(tSplit.getEndRow());
         trr.setHTable(this.table);
    @@ -164,22 +155,16 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException
       }
     
       /**
    -   * Calculates the splits that will serve as input for the map tasks.
    -   *
    -   * Splits are created in number equal to the smallest between numSplits and
    -   * the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
    -   * If the number of splits is smaller than the number of
    -   * {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits are spanned across
    -   * multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s
    -   * and are grouped the most evenly possible. In the
    -   * case splits are uneven the bigger splits are placed first in the
    -   * {@link InputSplit} array.
    -   *
    +   * Calculates the splits that will serve as input for the map tasks. Splits are created in number
    +   * equal to the smallest between numSplits and the number of
    +   * {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. If the number of splits is
    +   * smaller than the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits
    +   * are spanned across multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s and are
    +   * grouped the most evenly possible. In the case splits are uneven the bigger splits are placed
    +   * first in the {@link InputSplit} array.
        * @param job the map task {@link JobConf}
        * @param numSplits a hint to calculate the number of splits (mapred.map.tasks).
    -   *
        * @return the input splits
    -   *
        * @see org.apache.hadoop.mapred.InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int)
        */
       public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
    @@ -196,26 +181,24 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
           throw new IOException(INITIALIZATION_ERROR, exception);
         }
     
    -    byte [][] startKeys = this.regionLocator.getStartKeys();
    +    byte[][] startKeys = this.regionLocator.getStartKeys();
         if (startKeys == null || startKeys.length == 0) {
           throw new IOException("Expecting at least one region");
         }
         if (this.inputColumns == null || this.inputColumns.length == 0) {
           throw new IOException("Expecting at least one column");
         }
    -    int realNumSplits = numSplits > startKeys.length? startKeys.length:
    -      numSplits;
    +    int realNumSplits = numSplits > startKeys.length ? startKeys.length : numSplits;
         InputSplit[] splits = new InputSplit[realNumSplits];
         int middle = startKeys.length / realNumSplits;
         int startPos = 0;
         for (int i = 0; i < realNumSplits; i++) {
           int lastPos = startPos + middle;
           lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos;
    -      String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]).
    -        getHostname();
    -      splits[i] = new TableSplit(this.table.getName(),
    -        startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]:
    -          HConstants.EMPTY_START_ROW, regionLocation);
    +      String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]).getHostname();
    +      splits[i] = new TableSplit(this.table.getName(), startKeys[startPos],
    +          ((i + 1) < realNumSplits) ? startKeys[lastPos] : HConstants.EMPTY_START_ROW,
    +          regionLocation);
           LOG.info("split: " + i + "->" + splits[i]);
           startPos = lastPos;
         }
    @@ -224,15 +207,14 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
     
       /**
        * Allows subclasses to initialize the table information.
    -   *
    -   * @param connection  The Connection to the HBase cluster. MUST be unmanaged. We will close.
    -   * @param tableName  The {@link TableName} of the table to process.
    +   * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close.
    +   * @param tableName The {@link TableName} of the table to process.
        * @throws IOException
        */
       protected void initializeTable(Connection connection, TableName tableName) throws IOException {
         if (this.table != null || this.connection != null) {
    -      LOG.warn("initializeTable called multiple times. Overwriting connection and table " +
    -          "reference; TableInputFormatBase will not close these old references when done.");
    +      LOG.warn("initializeTable called multiple times. Overwriting connection and table "
    +          + "reference; TableInputFormatBase will not close these old references when done.");
         }
         this.table = connection.getTable(tableName);
         this.regionLocator = connection.getRegionLocator(tableName);
    @@ -242,7 +224,7 @@ protected void initializeTable(Connection connection, TableName tableName) throw
       /**
        * @param inputColumns to be passed in {@link Result} to the map task.
        */
    -  protected void setInputColumns(byte [][] inputColumns) {
    +  protected void setInputColumns(byte[][] inputColumns) {
         this.inputColumns = inputColumns;
       }
     
    @@ -258,9 +240,7 @@ protected Table getTable() {
     
       /**
        * Allows subclasses to set the {@link TableRecordReader}.
    -   *
    -   * @param tableRecordReader
    -   *                to provide other {@link TableRecordReader} implementations.
    +   * @param tableRecordReader to provide other {@link TableRecordReader} implementations.
        */
       protected void setTableRecordReader(TableRecordReader tableRecordReader) {
         this.tableRecordReader = tableRecordReader;
    @@ -268,7 +248,6 @@ protected void setTableRecordReader(TableRecordReader tableRecordReader) {
     
       /**
        * Allows subclasses to set the {@link Filter} to be used.
    -   *
        * @param rowFilter
        */
       protected void setRowFilter(Filter rowFilter) {
    @@ -276,19 +255,15 @@ protected void setRowFilter(Filter rowFilter) {
       }
     
       /**
    -   * Handle subclass specific set up.
    -   * Each of the entry points used by the MapReduce framework,
    +   * Handle subclass specific set up. Each of the entry points used by the MapReduce framework,
        * {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)},
        * will call {@link #initialize(JobConf)} as a convenient centralized location to handle
        * retrieving the necessary configuration information and calling
    -   * {@link #initializeTable(Connection, TableName)}.
    -   *
    -   * Subclasses should implement their initialize call such that it is safe to call multiple times.
    -   * The current TableInputFormatBase implementation relies on a non-null table reference to decide
    -   * if an initialize call is needed, but this behavior may change in the future. In particular,
    -   * it is critical that initializeTable not be called multiple times since this will leak
    -   * Connection instances.
    -   *
    +   * {@link #initializeTable(Connection, TableName)}. Subclasses should implement their initialize
    +   * call such that it is safe to call multiple times. The current TableInputFormatBase
    +   * implementation relies on a non-null table reference to decide if an initialize call is needed,
    +   * but this behavior may change in the future. In particular, it is critical that initializeTable
    +   * not be called multiple times since this will leak Connection instances.
        */
       protected void initialize(JobConf job) throws IOException {
       }
    @@ -296,7 +271,6 @@ protected void initialize(JobConf job) throws IOException {
       /**
        * Close the Table and related objects that were initialized via
        * {@link #initializeTable(Connection, TableName)}.
    -   *
        * @throws IOException
        */
       protected void closeTable() throws IOException {
    @@ -307,7 +281,9 @@ protected void closeTable() throws IOException {
     
       private void close(Closeable... closables) throws IOException {
         for (Closeable c : closables) {
    -      if(c != null) { c.close(); }
    +      if (c != null) {
    +        c.close();
    +      }
         }
       }
     }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java
    index d76572722b6f..5b3d088cb5a3 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -18,21 +17,20 @@
      */
     package org.apache.hadoop.hbase.mapred;
     
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.Result;
     import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
     import org.apache.hadoop.io.WritableComparable;
     import org.apache.hadoop.mapred.Mapper;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * Scan an HBase table to sort by a specified sort column.
    - * If the column does not exist, the record is not passed to Reduce.
    - *
    + * Scan an HBase table to sort by a specified sort column. If the column does not exist, the record
    + * is not passed to Reduce.
      * @param  WritableComparable key class
      * @param  Writable value class
      */
     @InterfaceAudience.Public
     public interface TableMap, V>
    -extends Mapper {
    +    extends Mapper {
     
     }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
    index 99f6eb4b92ee..9d5f8994b8ea 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -18,11 +17,13 @@
      */
     package org.apache.hadoop.hbase.mapred;
     
    +import java.io.IOException;
    +import java.util.Collection;
    +import java.util.Map;
     import org.apache.hadoop.conf.Configuration;
     import org.apache.hadoop.fs.Path;
     import org.apache.hadoop.hbase.HBaseConfiguration;
     import org.apache.hadoop.hbase.TableName;
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.Connection;
     import org.apache.hadoop.hbase.client.ConnectionFactory;
     import org.apache.hadoop.hbase.client.Put;
    @@ -41,13 +42,10 @@
     import org.apache.hadoop.mapred.OutputFormat;
     import org.apache.hadoop.mapred.TextInputFormat;
     import org.apache.hadoop.mapred.TextOutputFormat;
    +import org.apache.yetus.audience.InterfaceAudience;
     import org.slf4j.Logger;
     import org.slf4j.LoggerFactory;
     
    -import java.io.IOException;
    -import java.util.Collection;
    -import java.util.Map;
    -
     /**
      * Utility for {@link TableMap} and {@link TableReduce}
      */
    @@ -57,57 +55,47 @@ public class TableMapReduceUtil {
       private static final Logger LOG = LoggerFactory.getLogger(TableMapReduceUtil.class);
     
       /**
    -   * Use this before submitting a TableMap job. It will
    -   * appropriately set up the JobConf.
    -   *
    -   * @param table  The table name to read from.
    -   * @param columns  The columns to scan.
    -   * @param mapper  The mapper class to use.
    -   * @param outputKeyClass  The class of the output key.
    -   * @param outputValueClass  The class of the output value.
    -   * @param job  The current job configuration to adjust.
    +   * Use this before submitting a TableMap job. It will appropriately set up the JobConf.
    +   * @param table The table name to read from.
    +   * @param columns The columns to scan.
    +   * @param mapper The mapper class to use.
    +   * @param outputKeyClass The class of the output key.
    +   * @param outputValueClass The class of the output value.
    +   * @param job The current job configuration to adjust.
        */
    -  public static void initTableMapJob(String table, String columns,
    -    Class mapper,
    -    Class outputKeyClass,
    -    Class outputValueClass, JobConf job) {
    -    initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job,
    -      true, TableInputFormat.class);
    +  public static void initTableMapJob(String table, String columns, Class mapper,
    +      Class outputKeyClass, Class outputValueClass, JobConf job) {
    +    initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job, true,
    +      TableInputFormat.class);
       }
     
    -  public static void initTableMapJob(String table, String columns,
    -    Class mapper,
    -    Class outputKeyClass,
    -    Class outputValueClass, JobConf job, boolean addDependencyJars) {
    +  public static void initTableMapJob(String table, String columns, Class mapper,
    +      Class outputKeyClass, Class outputValueClass, JobConf job, boolean addDependencyJars) {
         initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job,
           addDependencyJars, TableInputFormat.class);
       }
     
       /**
    -   * Use this before submitting a TableMap job. It will
    -   * appropriately set up the JobConf.
    -   *
    -   * @param table  The table name to read from.
    -   * @param columns  The columns to scan.
    -   * @param mapper  The mapper class to use.
    -   * @param outputKeyClass  The class of the output key.
    -   * @param outputValueClass  The class of the output value.
    -   * @param job  The current job configuration to adjust.
    -   * @param addDependencyJars upload HBase jars and jars for any of the configured
    -   *           job classes via the distributed cache (tmpjars).
    +   * Use this before submitting a TableMap job. It will appropriately set up the JobConf.
    +   * @param table The table name to read from.
    +   * @param columns The columns to scan.
    +   * @param mapper The mapper class to use.
    +   * @param outputKeyClass The class of the output key.
    +   * @param outputValueClass The class of the output value.
    +   * @param job The current job configuration to adjust.
    +   * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
    +   *          the distributed cache (tmpjars).
        */
    -  public static void initTableMapJob(String table, String columns,
    -    Class mapper,
    -    Class outputKeyClass,
    -    Class outputValueClass, JobConf job, boolean addDependencyJars,
    -    Class inputFormat) {
    +  public static void initTableMapJob(String table, String columns, Class mapper,
    +      Class outputKeyClass, Class outputValueClass, JobConf job, boolean addDependencyJars,
    +      Class inputFormat) {
     
         job.setInputFormat(inputFormat);
         job.setMapOutputValueClass(outputValueClass);
         job.setMapOutputKeyClass(outputKeyClass);
         job.setMapperClass(mapper);
         job.setStrings("io.serializations", job.get("io.serializations"),
    -        MutationSerialization.class.getName(), ResultSerialization.class.getName());
    +      MutationSerialization.class.getName(), ResultSerialization.class.getName());
         FileInputFormat.addInputPaths(job, table);
         job.set(TableInputFormat.COLUMN_LIST, columns);
         if (addDependencyJars) {
    @@ -120,24 +108,22 @@ public static void initTableMapJob(String table, String columns,
         try {
           initCredentials(job);
         } catch (IOException ioe) {
    -      // just spit out the stack trace?  really?
    +      // just spit out the stack trace? really?
           LOG.error("IOException encountered while initializing credentials", ioe);
         }
       }
     
       /**
        * Sets up the job for reading from one or more multiple table snapshots, with one or more scans
    -   * per snapshot.
    -   * It bypasses hbase servers and read directly from snapshot files.
    -   *
    -   * @param snapshotScans     map of snapshot name to scans on that snapshot.
    -   * @param mapper            The mapper class to use.
    -   * @param outputKeyClass    The class of the output key.
    -   * @param outputValueClass  The class of the output value.
    -   * @param job               The current job to adjust.  Make sure the passed job is
    -   *                          carrying all necessary HBase configuration.
    -   * @param addDependencyJars upload HBase jars and jars for any of the configured
    -   *                          job classes via the distributed cache (tmpjars).
    +   * per snapshot. It bypasses hbase servers and read directly from snapshot files.
    +   * @param snapshotScans map of snapshot name to scans on that snapshot.
    +   * @param mapper The mapper class to use.
    +   * @param outputKeyClass The class of the output key.
    +   * @param outputValueClass The class of the output value.
    +   * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase
    +   *          configuration.
    +   * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
    +   *          the distributed cache (tmpjars).
        */
       public static void initMultiTableSnapshotMapperJob(Map> snapshotScans,
           Class mapper, Class outputKeyClass, Class outputValueClass,
    @@ -160,30 +146,26 @@ public static void initMultiTableSnapshotMapperJob(Map>
       }
     
       /**
    -   * Sets up the job for reading from a table snapshot. It bypasses hbase servers
    -   * and read directly from snapshot files.
    -   *
    +   * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly
    +   * from snapshot files.
        * @param snapshotName The name of the snapshot (of a table) to read from.
    -   * @param columns  The columns to scan.
    -   * @param mapper  The mapper class to use.
    -   * @param outputKeyClass  The class of the output key.
    -   * @param outputValueClass  The class of the output value.
    -   * @param job  The current job to adjust.  Make sure the passed job is
    -   * carrying all necessary HBase configuration.
    -   * @param addDependencyJars upload HBase jars and jars for any of the configured
    -   *           job classes via the distributed cache (tmpjars).
    +   * @param columns The columns to scan.
    +   * @param mapper The mapper class to use.
    +   * @param outputKeyClass The class of the output key.
    +   * @param outputValueClass The class of the output value.
    +   * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase
    +   *          configuration.
    +   * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
    +   *          the distributed cache (tmpjars).
        * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should
    -   * have write permissions to this directory, and this should not be a subdirectory of rootdir.
    -   * After the job is finished, restore directory can be deleted.
    +   *          have write permissions to this directory, and this should not be a subdirectory of
    +   *          rootdir. After the job is finished, restore directory can be deleted.
        * @throws IOException When setting up the details fails.
        * @see TableSnapshotInputFormat
        */
       public static void initTableSnapshotMapJob(String snapshotName, String columns,
    -      Class mapper,
    -      Class outputKeyClass,
    -      Class outputValueClass, JobConf job,
    -      boolean addDependencyJars, Path tmpRestoreDir)
    -  throws IOException {
    +      Class mapper, Class outputKeyClass, Class outputValueClass,
    +      JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException {
         TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir);
         initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, job,
           addDependencyJars, TableSnapshotInputFormat.class);
    @@ -191,97 +173,80 @@ public static void initTableSnapshotMapJob(String snapshotName, String columns,
       }
     
       /**
    -   * Sets up the job for reading from a table snapshot. It bypasses hbase servers
    -   * and read directly from snapshot files.
    -   *
    +   * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly
    +   * from snapshot files.
        * @param snapshotName The name of the snapshot (of a table) to read from.
    -   * @param columns  The columns to scan.
    -   * @param mapper  The mapper class to use.
    -   * @param outputKeyClass  The class of the output key.
    -   * @param outputValueClass  The class of the output value.
    -   * @param jobConf  The current job to adjust.  Make sure the passed job is
    -   * carrying all necessary HBase configuration.
    -   * @param addDependencyJars upload HBase jars and jars for any of the configured
    -   *           job classes via the distributed cache (tmpjars).
    +   * @param columns The columns to scan.
    +   * @param mapper The mapper class to use.
    +   * @param outputKeyClass The class of the output key.
    +   * @param outputValueClass The class of the output value.
    +   * @param jobConf The current job to adjust. Make sure the passed job is carrying all necessary
    +   *          HBase configuration.
    +   * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
    +   *          the distributed cache (tmpjars).
        * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should
    -   * have write permissions to this directory, and this should not be a subdirectory of rootdir.
    -   * After the job is finished, restore directory can be deleted.
    +   *          have write permissions to this directory, and this should not be a subdirectory of
    +   *          rootdir. After the job is finished, restore directory can be deleted.
        * @param splitAlgo algorithm to split
        * @param numSplitsPerRegion how many input splits to generate per one region
        * @throws IOException When setting up the details fails.
        * @see TableSnapshotInputFormat
        */
       public static void initTableSnapshotMapJob(String snapshotName, String columns,
    -                                             Class mapper,
    -                                             Class outputKeyClass,
    -                                             Class outputValueClass, JobConf jobConf,
    -                                             boolean addDependencyJars, Path tmpRestoreDir,
    -                                             RegionSplitter.SplitAlgorithm splitAlgo,
    -                                             int numSplitsPerRegion)
    -          throws IOException {
    +      Class mapper, Class outputKeyClass, Class outputValueClass,
    +      JobConf jobConf, boolean addDependencyJars, Path tmpRestoreDir,
    +      RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException {
         TableSnapshotInputFormat.setInput(jobConf, snapshotName, tmpRestoreDir, splitAlgo,
    -            numSplitsPerRegion);
    +      numSplitsPerRegion);
         initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, jobConf,
    -            addDependencyJars, TableSnapshotInputFormat.class);
    +      addDependencyJars, TableSnapshotInputFormat.class);
         org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(jobConf);
       }
     
    -
       /**
    -   * Use this before submitting a TableReduce job. It will
    -   * appropriately set up the JobConf.
    -   *
    -   * @param table  The output table.
    -   * @param reducer  The reducer class to use.
    -   * @param job  The current job configuration to adjust.
    +   * Use this before submitting a TableReduce job. It will appropriately set up the JobConf.
    +   * @param table The output table.
    +   * @param reducer The reducer class to use.
    +   * @param job The current job configuration to adjust.
        * @throws IOException When determining the region count fails.
        */
    -  public static void initTableReduceJob(String table,
    -    Class reducer, JobConf job)
    -  throws IOException {
    +  public static void initTableReduceJob(String table, Class reducer,
    +      JobConf job) throws IOException {
         initTableReduceJob(table, reducer, job, null);
       }
     
       /**
    -   * Use this before submitting a TableReduce job. It will
    -   * appropriately set up the JobConf.
    -   *
    -   * @param table  The output table.
    -   * @param reducer  The reducer class to use.
    -   * @param job  The current job configuration to adjust.
    -   * @param partitioner  Partitioner to use. Pass null to use
    -   * default partitioner.
    +   * Use this before submitting a TableReduce job. It will appropriately set up the JobConf.
    +   * @param table The output table.
    +   * @param reducer The reducer class to use.
    +   * @param job The current job configuration to adjust.
    +   * @param partitioner Partitioner to use. Pass null to use default partitioner.
        * @throws IOException When determining the region count fails.
        */
    -  public static void initTableReduceJob(String table,
    -    Class reducer, JobConf job, Class partitioner)
    -  throws IOException {
    +  public static void initTableReduceJob(String table, Class reducer,
    +      JobConf job, Class partitioner) throws IOException {
         initTableReduceJob(table, reducer, job, partitioner, true);
       }
     
       /**
    -   * Use this before submitting a TableReduce job. It will
    -   * appropriately set up the JobConf.
    -   *
    -   * @param table  The output table.
    -   * @param reducer  The reducer class to use.
    -   * @param job  The current job configuration to adjust.
    -   * @param partitioner  Partitioner to use. Pass null to use
    -   * default partitioner.
    -   * @param addDependencyJars upload HBase jars and jars for any of the configured
    -   *           job classes via the distributed cache (tmpjars).
    +   * Use this before submitting a TableReduce job. It will appropriately set up the JobConf.
    +   * @param table The output table.
    +   * @param reducer The reducer class to use.
    +   * @param job The current job configuration to adjust.
    +   * @param partitioner Partitioner to use. Pass null to use default partitioner.
    +   * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
    +   *          the distributed cache (tmpjars).
        * @throws IOException When determining the region count fails.
        */
    -  public static void initTableReduceJob(String table,
    -    Class reducer, JobConf job, Class partitioner,
    -    boolean addDependencyJars) throws IOException {
    +  public static void initTableReduceJob(String table, Class reducer,
    +      JobConf job, Class partitioner, boolean addDependencyJars) throws IOException {
         job.setOutputFormat(TableOutputFormat.class);
         job.setReducerClass(reducer);
         job.set(TableOutputFormat.OUTPUT_TABLE, table);
         job.setOutputKeyClass(ImmutableBytesWritable.class);
         job.setOutputValueClass(Put.class);
         job.setStrings("io.serializations", job.get("io.serializations"),
    -        MutationSerialization.class.getName(), ResultSerialization.class.getName());
    +      MutationSerialization.class.getName(), ResultSerialization.class.getName());
         if (partitioner == HRegionPartitioner.class) {
           job.setPartitionerClass(HRegionPartitioner.class);
           int regions = getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
    @@ -322,11 +287,10 @@ public static void initCredentials(JobConf job) throws IOException {
       }
     
       /**
    -   * Ensures that the given number of reduce tasks for the given job
    -   * configuration does not exceed the number of regions for the given table.
    -   *
    -   * @param table  The table to get the region count for.
    -   * @param job  The current job configuration to adjust.
    +   * Ensures that the given number of reduce tasks for the given job configuration does not exceed
    +   * the number of regions for the given table.
    +   * @param table The table to get the region count for.
    +   * @param job The current job configuration to adjust.
        * @throws IOException When retrieving the table details fails.
        */
       // Used by tests.
    @@ -338,11 +302,10 @@ public static void limitNumReduceTasks(String table, JobConf job) throws IOExcep
       }
     
       /**
    -   * Ensures that the given number of map tasks for the given job
    -   * configuration does not exceed the number of regions for the given table.
    -   *
    -   * @param table  The table to get the region count for.
    -   * @param job  The current job configuration to adjust.
    +   * Ensures that the given number of map tasks for the given job configuration does not exceed the
    +   * number of regions for the given table.
    +   * @param table The table to get the region count for.
    +   * @param job The current job configuration to adjust.
        * @throws IOException When retrieving the table details fails.
        */
       // Used by tests.
    @@ -354,11 +317,10 @@ public static void limitNumMapTasks(String table, JobConf job) throws IOExceptio
       }
     
       /**
    -   * Sets the number of reduce tasks for the given job configuration to the
    -   * number of regions the given table has.
    -   *
    -   * @param table  The table to get the region count for.
    -   * @param job  The current job configuration to adjust.
    +   * Sets the number of reduce tasks for the given job configuration to the number of regions the
    +   * given table has.
    +   * @param table The table to get the region count for.
    +   * @param job The current job configuration to adjust.
        * @throws IOException When retrieving the table details fails.
        */
       public static void setNumReduceTasks(String table, JobConf job) throws IOException {
    @@ -366,11 +328,10 @@ public static void setNumReduceTasks(String table, JobConf job) throws IOExcepti
       }
     
       /**
    -   * Sets the number of map tasks for the given job configuration to the
    -   * number of regions the given table has.
    -   *
    -   * @param table  The table to get the region count for.
    -   * @param job  The current job configuration to adjust.
    +   * Sets the number of map tasks for the given job configuration to the number of regions the given
    +   * table has.
    +   * @param table The table to get the region count for.
    +   * @param job The current job configuration to adjust.
        * @throws IOException When retrieving the table details fails.
        */
       public static void setNumMapTasks(String table, JobConf job) throws IOException {
    @@ -378,13 +339,11 @@ public static void setNumMapTasks(String table, JobConf job) throws IOException
       }
     
       /**
    -   * Sets the number of rows to return and cache with each scanner iteration.
    -   * Higher caching values will enable faster mapreduce jobs at the expense of
    -   * requiring more heap to contain the cached rows.
    -   *
    +   * Sets the number of rows to return and cache with each scanner iteration. Higher caching values
    +   * will enable faster mapreduce jobs at the expense of requiring more heap to contain the cached
    +   * rows.
        * @param job The current job configuration to adjust.
    -   * @param batchSize The number of rows to return in batch with each scanner
    -   * iteration.
    +   * @param batchSize The number of rows to return in batch with each scanner iteration.
        */
       public static void setScannerCaching(JobConf job, int batchSize) {
         job.setInt("hbase.client.scanner.caching", batchSize);
    @@ -395,22 +354,17 @@ public static void setScannerCaching(JobConf job, int batchSize) {
        */
       public static void addDependencyJars(JobConf job) throws IOException {
         org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
    -    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(
    -      job,
    -      job.getMapOutputKeyClass(),
    -      job.getMapOutputValueClass(),
    -      job.getOutputKeyClass(),
    -      job.getOutputValueClass(),
    -      job.getPartitionerClass(),
    +    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(job,
    +      job.getMapOutputKeyClass(), job.getMapOutputValueClass(), job.getOutputKeyClass(),
    +      job.getOutputValueClass(), job.getPartitionerClass(),
           job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
           job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
           job.getCombinerClass());
       }
     
    -
       private static int getRegionCount(Configuration conf, TableName tableName) throws IOException {
         try (Connection conn = ConnectionFactory.createConnection(conf);
    -      RegionLocator locator = conn.getRegionLocator(tableName)) {
    +        RegionLocator locator = conn.getRegionLocator(tableName)) {
           return locator.getAllRegionLocations().size();
         }
       }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
    index a55784729c0b..48c2ab67bc19 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -19,11 +18,9 @@
     package org.apache.hadoop.hbase.mapred;
     
     import java.io.IOException;
    -
     import org.apache.hadoop.fs.FileAlreadyExistsException;
     import org.apache.hadoop.fs.FileSystem;
     import org.apache.hadoop.hbase.TableName;
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.BufferedMutator;
     import org.apache.hadoop.hbase.client.Connection;
     import org.apache.hadoop.hbase.client.ConnectionFactory;
    @@ -35,6 +32,7 @@
     import org.apache.hadoop.mapred.RecordWriter;
     import org.apache.hadoop.mapred.Reporter;
     import org.apache.hadoop.util.Progressable;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * Convert Map/Reduce output and write it to an HBase table
    @@ -46,8 +44,8 @@ public class TableOutputFormat extends FileOutputFormat {
         private BufferedMutator m_mutator;
    @@ -88,13 +86,10 @@ public void write(ImmutableBytesWritable key, Put value) throws IOException {
       }
     
       /**
    -   * Creates a new record writer.
    -   *
    -   * Be aware that the baseline javadoc gives the impression that there is a single
    -   * {@link RecordWriter} per job but in HBase, it is more natural if we give you a new
    +   * Creates a new record writer. Be aware that the baseline javadoc gives the impression that there
    +   * is a single {@link RecordWriter} per job but in HBase, it is more natural if we give you a new
        * RecordWriter per call of this method. You must close the returned RecordWriter when done.
        * Failure to do so will drop writes.
    -   *
        * @param ignored Ignored filesystem
        * @param job Current JobConf
        * @param name Name of the job
    @@ -104,15 +99,14 @@ public void write(ImmutableBytesWritable key, Put value) throws IOException {
        */
       @Override
       public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name,
    -      Progressable progress)
    -  throws IOException {
    +      Progressable progress) throws IOException {
         // Clear write buffer on fail is true by default so no need to reset it.
         return new TableRecordWriter(job);
       }
     
       @Override
       public void checkOutputSpecs(FileSystem ignored, JobConf job)
    -  throws FileAlreadyExistsException, InvalidJobConfException, IOException {
    +      throws FileAlreadyExistsException, InvalidJobConfException, IOException {
         String tableName = job.get(OUTPUT_TABLE);
         if (tableName == null) {
           throw new IOException("Must specify table name");
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
    index 3d41d8c5fcf8..a0e75abceb88 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -19,27 +18,23 @@
     package org.apache.hadoop.hbase.mapred;
     
     import java.io.IOException;
    -
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.Result;
     import org.apache.hadoop.hbase.client.Table;
     import org.apache.hadoop.hbase.filter.Filter;
     import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
     import org.apache.hadoop.mapred.RecordReader;
    -
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * Iterate over an HBase table data, return (Text, RowResult) pairs
      */
     @InterfaceAudience.Public
    -public class TableRecordReader
    -implements RecordReader {
    +public class TableRecordReader implements RecordReader {
     
       private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl();
     
       /**
        * Restart from survivable exceptions by creating a new scanner.
    -   *
        * @param firstRow
        * @throws IOException
        */
    @@ -49,7 +44,6 @@ public void restart(byte[] firstRow) throws IOException {
     
       /**
        * Build the scanner. Not done in constructor to allow for extension.
    -   *
        * @throws IOException
        */
       public void init() throws IOException {
    @@ -66,22 +60,21 @@ public void setHTable(Table htable) {
       /**
        * @param inputColumns the columns to be placed in {@link Result}.
        */
    -  public void setInputColumns(final byte [][] inputColumns) {
    +  public void setInputColumns(final byte[][] inputColumns) {
         this.recordReaderImpl.setInputColumns(inputColumns);
       }
     
       /**
        * @param startRow the first row in the split
        */
    -  public void setStartRow(final byte [] startRow) {
    +  public void setStartRow(final byte[] startRow) {
         this.recordReaderImpl.setStartRow(startRow);
       }
     
       /**
    -   *
        * @param endRow the last row in the split
        */
    -  public void setEndRow(final byte [] endRow) {
    +  public void setEndRow(final byte[] endRow) {
         this.recordReaderImpl.setEndRow(endRow);
       }
     
    @@ -98,7 +91,6 @@ public void close() {
     
       /**
        * @return ImmutableBytesWritable
    -   *
        * @see org.apache.hadoop.mapred.RecordReader#createKey()
        */
       public ImmutableBytesWritable createKey() {
    @@ -107,7 +99,6 @@ public ImmutableBytesWritable createKey() {
     
       /**
        * @return RowResult
    -   *
        * @see org.apache.hadoop.mapred.RecordReader#createValue()
        */
       public Result createValue() {
    @@ -132,8 +123,7 @@ public float getProgress() {
        * @return true if there was more data
        * @throws IOException
        */
    -  public boolean next(ImmutableBytesWritable key, Result value)
    -  throws IOException {
    +  public boolean next(ImmutableBytesWritable key, Result value) throws IOException {
         return this.recordReaderImpl.next(key, value);
       }
     }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
    index aff83ddcefe1..b7f7412b7008 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
    @@ -1,5 +1,4 @@
     /*
    - *
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -19,6 +18,7 @@
     package org.apache.hadoop.hbase.mapred;
     
     import static org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl.LOG_PER_ROW_COUNT;
    +
     import java.io.IOException;
     import org.apache.hadoop.conf.Configuration;
     import org.apache.hadoop.hbase.DoNotRetryIOException;
    @@ -44,13 +44,13 @@
     public class TableRecordReaderImpl {
       private static final Logger LOG = LoggerFactory.getLogger(TableRecordReaderImpl.class);
     
    -  private byte [] startRow;
    -  private byte [] endRow;
    -  private byte [] lastSuccessfulRow;
    +  private byte[] startRow;
    +  private byte[] endRow;
    +  private byte[] lastSuccessfulRow;
       private Filter trrRowFilter;
       private ResultScanner scanner;
       private Table htable;
    -  private byte [][] trrInputColumns;
    +  private byte[][] trrInputColumns;
       private long timestamp;
       private int rowcount;
       private boolean logScannerActivity = false;
    @@ -70,17 +70,15 @@ public void restart(byte[] firstRow) throws IOException {
             this.scanner = this.htable.getScanner(scan);
             currentScan = scan;
           } else {
    -        LOG.debug("TIFB.restart, firstRow: " +
    -            Bytes.toStringBinary(firstRow) + ", endRow: " +
    -            Bytes.toStringBinary(endRow));
    +        LOG.debug("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow) + ", endRow: "
    +            + Bytes.toStringBinary(endRow));
             Scan scan = new Scan().withStartRow(firstRow).withStopRow(endRow);
             TableInputFormat.addColumns(scan, trrInputColumns);
             this.scanner = this.htable.getScanner(scan);
             currentScan = scan;
           }
         } else {
    -      LOG.debug("TIFB.restart, firstRow: " +
    -          Bytes.toStringBinary(firstRow) + ", no endRow");
    +      LOG.debug("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow) + ", no endRow");
     
           Scan scan = new Scan().withStartRow(firstRow);
           TableInputFormat.addColumns(scan, trrInputColumns);
    @@ -119,22 +117,21 @@ public void setHTable(Table htable) {
       /**
        * @param inputColumns the columns to be placed in {@link Result}.
        */
    -  public void setInputColumns(final byte [][] inputColumns) {
    +  public void setInputColumns(final byte[][] inputColumns) {
         this.trrInputColumns = inputColumns;
       }
     
       /**
        * @param startRow the first row in the split
        */
    -  public void setStartRow(final byte [] startRow) {
    +  public void setStartRow(final byte[] startRow) {
         this.startRow = startRow;
       }
     
       /**
    -   *
        * @param endRow the last row in the split
        */
    -  public void setEndRow(final byte [] endRow) {
    +  public void setEndRow(final byte[] endRow) {
         this.endRow = endRow;
       }
     
    @@ -158,7 +155,6 @@ public void close() {
     
       /**
        * @return ImmutableBytesWritable
    -   *
        * @see org.apache.hadoop.mapred.RecordReader#createKey()
        */
       public ImmutableBytesWritable createKey() {
    @@ -167,7 +163,6 @@ public ImmutableBytesWritable createKey() {
     
       /**
        * @return RowResult
    -   *
        * @see org.apache.hadoop.mapred.RecordReader#createValue()
        */
       public Result createValue() {
    @@ -196,11 +191,10 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException
           try {
             result = this.scanner.next();
             if (logScannerActivity) {
    -          rowcount ++;
    +          rowcount++;
               if (rowcount >= logPerRowCount) {
                 long now = EnvironmentEdgeManager.currentTime();
    -            LOG.info("Mapper took " + (now-timestamp)
    -              + "ms to process " + rowcount + " rows");
    +            LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows");
                 timestamp = now;
                 rowcount = 0;
               }
    @@ -214,16 +208,16 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException
             // the scanner, if the second call fails, it will be rethrown
             LOG.debug("recovered from " + StringUtils.stringifyException(e));
             if (lastSuccessfulRow == null) {
    -          LOG.warn("We are restarting the first next() invocation," +
    -              " if your mapper has restarted a few other times like this" +
    -              " then you should consider killing this job and investigate" +
    -              " why it's taking so long.");
    +          LOG.warn("We are restarting the first next() invocation,"
    +              + " if your mapper has restarted a few other times like this"
    +              + " then you should consider killing this job and investigate"
    +              + " why it's taking so long.");
             }
             if (lastSuccessfulRow == null) {
               restart(startRow);
             } else {
               restart(lastSuccessfulRow);
    -          this.scanner.next();    // skip presumed already mapped row
    +          this.scanner.next(); // skip presumed already mapped row
             }
             result = this.scanner.next();
           }
    @@ -238,11 +232,10 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException
         } catch (IOException ioe) {
           if (logScannerActivity) {
             long now = EnvironmentEdgeManager.currentTime();
    -        LOG.info("Mapper took " + (now-timestamp)
    -          + "ms to process " + rowcount + " rows");
    +        LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows");
             LOG.info(ioe.toString(), ioe);
    -        String lastRow = lastSuccessfulRow == null ?
    -          "null" : Bytes.toStringBinary(lastSuccessfulRow);
    +        String lastRow =
    +            lastSuccessfulRow == null ? "null" : Bytes.toStringBinary(lastSuccessfulRow);
             LOG.info("lastSuccessfulRow=" + lastRow);
           }
           throw ioe;
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java
    index a64e4cdc82f9..c1f55f3c16ee 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -18,21 +17,20 @@
      */
     package org.apache.hadoop.hbase.mapred;
     
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.Put;
     import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
     import org.apache.hadoop.io.WritableComparable;
     import org.apache.hadoop.mapred.Reducer;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * Write a table, sorting by the input key
    - *
      * @param  key class
      * @param  value class
      */
     @InterfaceAudience.Public
     @SuppressWarnings("unchecked")
     public interface TableReduce
    -extends Reducer {
    +    extends Reducer {
     
     }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java
    index 4506b597164e..311f548e2533 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -15,7 +15,6 @@
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
    -
     package org.apache.hadoop.hbase.mapred;
     
     import java.io.DataInput;
    @@ -40,7 +39,6 @@
     /**
      * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. Further
      * documentation available on {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}.
    - *
      * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
      */
     @InterfaceAudience.Public
    @@ -85,8 +83,7 @@ public void readFields(DataInput in) throws IOException {
         }
       }
     
    -  static class TableSnapshotRecordReader
    -    implements RecordReader {
    +  static class TableSnapshotRecordReader implements RecordReader {
     
         private TableSnapshotInputFormatImpl.RecordReader delegate;
     
    @@ -136,7 +133,7 @@ public float getProgress() throws IOException {
       @Override
       public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
         List splits =
    -      TableSnapshotInputFormatImpl.getSplits(job);
    +        TableSnapshotInputFormatImpl.getSplits(job);
         InputSplit[] results = new InputSplit[splits.size()];
         for (int i = 0; i < splits.size(); i++) {
           results[i] = new TableSnapshotRegionSplit(splits.get(i));
    @@ -145,8 +142,8 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
       }
     
       @Override
    -  public RecordReader
    -  getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
    +  public RecordReader getRecordReader(InputSplit split, JobConf job,
    +      Reporter reporter) throws IOException {
         return new TableSnapshotRecordReader((TableSnapshotRegionSplit) split, job);
       }
     
    @@ -154,9 +151,9 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
        * Configures the job to use TableSnapshotInputFormat to read from a snapshot.
        * @param job the job to configure
        * @param snapshotName the name of the snapshot to read from
    -   * @param restoreDir a temporary directory to restore the snapshot into. Current user should
    -   * have write permissions to this directory, and this should not be a subdirectory of rootdir.
    -   * After the job is finished, restoreDir can be deleted.
    +   * @param restoreDir a temporary directory to restore the snapshot into. Current user should have
    +   *          write permissions to this directory, and this should not be a subdirectory of rootdir.
    +   *          After the job is finished, restoreDir can be deleted.
        * @throws IOException if an error occurs
        */
       public static void setInput(JobConf job, String snapshotName, Path restoreDir)
    @@ -168,15 +165,16 @@ public static void setInput(JobConf job, String snapshotName, Path restoreDir)
        * Configures the job to use TableSnapshotInputFormat to read from a snapshot.
        * @param job the job to configure
        * @param snapshotName the name of the snapshot to read from
    -   * @param restoreDir a temporary directory to restore the snapshot into. Current user should
    -   * have write permissions to this directory, and this should not be a subdirectory of rootdir.
    -   * After the job is finished, restoreDir can be deleted.
    +   * @param restoreDir a temporary directory to restore the snapshot into. Current user should have
    +   *          write permissions to this directory, and this should not be a subdirectory of rootdir.
    +   *          After the job is finished, restoreDir can be deleted.
        * @param splitAlgo split algorithm to generate splits from region
        * @param numSplitsPerRegion how many input splits to generate per one region
        * @throws IOException if an error occurs
        */
       public static void setInput(JobConf job, String snapshotName, Path restoreDir,
    -                              RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException {
    -    TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir, splitAlgo, numSplitsPerRegion);
    +      RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException {
    +    TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir, splitAlgo,
    +      numSplitsPerRegion);
       }
     }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
    index d6e663730a7b..0e3ca25de812 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -22,12 +21,11 @@
     import java.io.DataOutput;
     import java.io.IOException;
     import java.util.Arrays;
    -
    -import org.apache.yetus.audience.InterfaceAudience;
    -import org.apache.hadoop.hbase.TableName;
     import org.apache.hadoop.hbase.HConstants;
    +import org.apache.hadoop.hbase.TableName;
     import org.apache.hadoop.hbase.util.Bytes;
     import org.apache.hadoop.mapred.InputSplit;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * A table split corresponds to a key range [low, high)
    @@ -35,14 +33,13 @@
     @InterfaceAudience.Public
     public class TableSplit implements InputSplit, Comparable {
       private TableName m_tableName;
    -  private byte [] m_startRow;
    -  private byte [] m_endRow;
    +  private byte[] m_startRow;
    +  private byte[] m_endRow;
       private String m_regionLocation;
     
       /** default constructor */
       public TableSplit() {
    -    this((TableName)null, HConstants.EMPTY_BYTE_ARRAY,
    -      HConstants.EMPTY_BYTE_ARRAY, "");
    +    this((TableName) null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, "");
       }
     
       /**
    @@ -52,18 +49,15 @@ public TableSplit() {
        * @param endRow
        * @param location
        */
    -  public TableSplit(TableName tableName, byte [] startRow, byte [] endRow,
    -      final String location) {
    +  public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) {
         this.m_tableName = tableName;
         this.m_startRow = startRow;
         this.m_endRow = endRow;
         this.m_regionLocation = location;
       }
     
    -  public TableSplit(byte [] tableName, byte [] startRow, byte [] endRow,
    -      final String location) {
    -    this(TableName.valueOf(tableName), startRow, endRow,
    -      location);
    +  public TableSplit(byte[] tableName, byte[] startRow, byte[] endRow, final String location) {
    +    this(TableName.valueOf(tableName), startRow, endRow, location);
       }
     
       /** @return table name */
    @@ -72,17 +66,17 @@ public TableName getTable() {
       }
     
       /** @return table name */
    -   public byte [] getTableName() {
    -     return this.m_tableName.getName();
    -   }
    +  public byte[] getTableName() {
    +    return this.m_tableName.getName();
    +  }
     
       /** @return starting row key */
    -  public byte [] getStartRow() {
    +  public byte[] getStartRow() {
         return this.m_startRow;
       }
     
       /** @return end row key */
    -  public byte [] getEndRow() {
    +  public byte[] getEndRow() {
         return this.m_endRow;
       }
     
    @@ -92,7 +86,7 @@ public String getRegionLocation() {
       }
     
       public String[] getLocations() {
    -    return new String[] {this.m_regionLocation};
    +    return new String[] { this.m_regionLocation };
       }
     
       public long getLength() {
    @@ -116,14 +110,14 @@ public void write(DataOutput out) throws IOException {
     
       @Override
       public String toString() {
    -      StringBuilder sb = new StringBuilder();
    -      sb.append("HBase table split(");
    -      sb.append("table name: ").append(m_tableName);
    -      sb.append(", start row: ").append(Bytes.toStringBinary(m_startRow));
    -      sb.append(", end row: ").append(Bytes.toStringBinary(m_endRow));
    -      sb.append(", region location: ").append(m_regionLocation);
    -      sb.append(")");
    -      return sb.toString();
    +    StringBuilder sb = new StringBuilder();
    +    sb.append("HBase table split(");
    +    sb.append("table name: ").append(m_tableName);
    +    sb.append(", start row: ").append(Bytes.toStringBinary(m_startRow));
    +    sb.append(", end row: ").append(Bytes.toStringBinary(m_endRow));
    +    sb.append(", region location: ").append(m_regionLocation);
    +    sb.append(")");
    +    return sb.toString();
       }
     
       @Override
    @@ -136,11 +130,10 @@ public boolean equals(Object o) {
         if (o == null || !(o instanceof TableSplit)) {
           return false;
         }
    -    TableSplit other = (TableSplit)o;
    -    return m_tableName.equals(other.m_tableName) &&
    -      Bytes.equals(m_startRow, other.m_startRow) &&
    -      Bytes.equals(m_endRow, other.m_endRow) &&
    -      m_regionLocation.equals(other.m_regionLocation);
    +    TableSplit other = (TableSplit) o;
    +    return m_tableName.equals(other.m_tableName) && Bytes.equals(m_startRow, other.m_startRow)
    +        && Bytes.equals(m_endRow, other.m_endRow)
    +        && m_regionLocation.equals(other.m_regionLocation);
       }
     
       @Override
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java
    index b375b3980b5c..03abf7bdd87e 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java
    @@ -1,26 +1,19 @@
     /*
    - *
    - * Licensed to the Apache Software Foundation (ASF) under one
    - * or more contributor license agreements.  See the NOTICE file
    - * distributed with this work for additional information
    - * regarding copyright ownership.  The ASF licenses this file
    - * to you under the Apache License, Version 2.0 (the
    - * "License"); you may not use this file except in compliance
    - * with the License.  You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
    + * agreements. See the NOTICE file distributed with this work for additional information regarding
    + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance with the License. You may obtain a
    + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
    + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
    + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
    + * for the specific language governing permissions and limitations under the License.
      */
     /**
    -Provides HBase MapReduce
    -Input/OutputFormats, a table indexing MapReduce job, and utility methods.
    -
    -

    See HBase and MapReduce -in the HBase Reference Guide for mapreduce over hbase documentation. -*/ + * Provides HBase + * MapReduce + * Input/OutputFormats, a table indexing MapReduce job, and utility methods. + *

    + * See HBase and MapReduce in the HBase + * Reference Guide for mapreduce over hbase documentation. + */ package org.apache.hadoop.hbase.mapred; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java index c244d8b7bd91..8a2fffddd37b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,18 +18,14 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; @@ -47,12 +42,16 @@ import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * A job with a a map and reduce phase to count cells in a table. - * The counter lists the following stats for a given table: + * A job with a a map and reduce phase to count cells in a table. The counter lists the following + * stats for a given table: + * *

      * 1. Total number of rows in the table
      * 2. Total number of CFs across all rows
    @@ -65,17 +64,14 @@
      * 9. Total size of serialized cells across all rows.
      * 
    * - * The cellcounter can take optional parameters to use a user - * supplied row/family/qualifier string to use in the report and - * second a regex based or prefix based row filter to restrict the - * count operation to a limited subset of rows from the table or a - * start time and/or end time to limit the count to a time range. + * The cellcounter can take optional parameters to use a user supplied row/family/qualifier string + * to use in the report and second a regex based or prefix based row filter to restrict the count + * operation to a limited subset of rows from the table or a start time and/or end time to limit the + * count to a time range. */ @InterfaceAudience.Public public class CellCounter extends Configured implements Tool { - private static final Logger LOG = - LoggerFactory.getLogger(CellCounter.class.getName()); - + private static final Logger LOG = LoggerFactory.getLogger(CellCounter.class.getName()); /** * Name of this 'program'. @@ -87,15 +83,12 @@ public class CellCounter extends Configured implements Tool { /** * Mapper that runs the count. */ - static class CellCounterMapper - extends TableMapper { + static class CellCounterMapper extends TableMapper { /** * Counter enumeration to count the actual rows. */ public static enum Counters { - ROWS, - CELLS, - SIZE + ROWS, CELLS, SIZE } private Configuration conf; @@ -117,26 +110,22 @@ public static enum Counters { @Override protected void setup(Context context) throws IOException, InterruptedException { conf = context.getConfiguration(); - separator = conf.get("ReportSeparator",":"); + separator = conf.get("ReportSeparator", ":"); } /** * Maps the data. - * - * @param row The current table row key. - * @param values The columns. + * @param row The current table row key. + * @param values The columns. * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", - justification="Findbugs is blind to the Precondition null check") - public void map(ImmutableBytesWritable row, Result values, - Context context) - throws IOException { - Preconditions.checkState(values != null, - "values passed to the map is null"); + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", + justification = "Findbugs is blind to the Precondition null check") + public void map(ImmutableBytesWritable row, Result values, Context context) throws IOException { + Preconditions.checkState(values != null, "values passed to the map is null"); try { byte[] currentRow = values.getRow(); @@ -167,14 +156,13 @@ public void map(ImmutableBytesWritable row, Result values, context.getCounter("CF", currentFamilyName + "_Size").increment(size); context.write(new Text(currentFamilyName + "_Size"), new LongWritable(size)); } - if (currentQualifier == null || !CellUtil.matchingQualifier(value, currentQualifier)){ + if (currentQualifier == null || !CellUtil.matchingQualifier(value, currentQualifier)) { currentQualifier = CellUtil.cloneQualifier(value); - currentQualifierName = currentFamilyName + separator + - Bytes.toStringBinary(currentQualifier); + currentQualifierName = + currentFamilyName + separator + Bytes.toStringBinary(currentQualifier); currentRowQualifierName = currentRowKey + separator + currentQualifierName; - context.write(new Text("Total Qualifiers across all Rows"), - new LongWritable(1)); + context.write(new Text("Total Qualifiers across all Rows"), new LongWritable(1)); context.write(new Text(currentQualifierName), new LongWritable(1)); context.getCounter("Q", currentQualifierName + "_Size").increment(size); context.write(new Text(currentQualifierName + "_Size"), new LongWritable(size)); @@ -209,23 +197,21 @@ public void reduce(Key key, Iterable values, Context context) /** * Sets up the actual job. - * * @param conf The current configuration. * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; Path outputDir = new Path(args[1]); - String reportSeparatorString = (args.length > 2) ? args[2]: ":"; + String reportSeparatorString = (args.length > 2) ? args[2] : ":"; conf.set("ReportSeparator", reportSeparatorString); Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName)); job.setJarByClass(CellCounter.class); Scan scan = getConfiguredScanForJob(conf, args); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - CellCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, CellCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LongWritable.class); job.setOutputFormatClass(TextOutputFormat.class); @@ -249,7 +235,7 @@ private static Scan getConfiguredScanForJob(Configuration conf, String[] args) s.setCacheBlocks(false); // Set RowFilter or Prefix Filter if applicable. Filter rowFilter = getRowFilter(args); - if (rowFilter!= null) { + if (rowFilter != null) { LOG.info("Setting Row Filter for counter."); s.setFilter(rowFilter); } @@ -262,10 +248,9 @@ private static Scan getConfiguredScanForJob(Configuration conf, String[] args) return s; } - private static Filter getRowFilter(String[] args) { Filter rowFilter = null; - String filterCriteria = (args.length > 3) ? args[3]: null; + String filterCriteria = (args.length > 3) ? args[3] : null; if (filterCriteria == null) return null; if (filterCriteria.startsWith("^")) { String regexPattern = filterCriteria.substring(1, filterCriteria.length()); @@ -292,11 +277,10 @@ private static long[] getTimeRange(String[] args) throws IOException { } } - if (startTime == 0 && endTime == 0) - return null; + if (startTime == 0 && endTime == 0) return null; endTime = endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime; - return new long [] {startTime, endTime}; + return new long[] { startTime, endTime }; } @Override @@ -319,8 +303,7 @@ private void printUsage(int parameterCount) { System.err.println(" -D" + TableInputFormat.SCAN_ROW_START + "="); System.err.println(" -D" + TableInputFormat.SCAN_ROW_STOP + "="); System.err.println(" -D" + TableInputFormat.SCAN_COLUMNS + "=\" ...\""); - System.err.println(" -D" + TableInputFormat.SCAN_COLUMN_FAMILY - + "=,, ..."); + System.err.println(" -D" + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); System.err.println(" -D" + TableInputFormat.SCAN_TIMESTAMP + "="); System.err.println(" -D" + TableInputFormat.SCAN_TIMERANGE_START + "="); System.err.println(" -D" + TableInputFormat.SCAN_TIMERANGE_END + "="); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java index 6c69651d0a43..38959964ef44 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +19,12 @@ import java.io.IOException; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Facade to create Cells for HFileOutputFormat. The created Cells are of Put type. @@ -39,9 +38,9 @@ public class CellCreator { private VisibilityExpressionResolver visExpResolver; public CellCreator(Configuration conf) { - Class clazz = conf.getClass( - VISIBILITY_EXP_RESOLVER_CLASS, DefaultVisibilityExpressionResolver.class, - VisibilityExpressionResolver.class); + Class clazz = + conf.getClass(VISIBILITY_EXP_RESOLVER_CLASS, DefaultVisibilityExpressionResolver.class, + VisibilityExpressionResolver.class); this.visExpResolver = ReflectionUtils.newInstance(clazz, conf); this.visExpResolver.init(); } @@ -67,7 +66,7 @@ public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foff byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, int vlength) throws IOException { return create(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength, - timestamp, value, voffset, vlength, (List)null); + timestamp, value, voffset, vlength, (List) null); } /** diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java index 2e7e020986ff..9d567f95a0e0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,16 +22,15 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public class CellSerialization implements Serialization { @@ -60,7 +59,7 @@ public void close() throws IOException { @Override public KeyValue deserialize(Cell ignore) throws IOException { - // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO + // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO return KeyValueUtil.create(this.dis); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java index de961cf35458..5cc1e8cce848 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.io.IOException; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -30,10 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Emits sorted Cells. - * Reads in all Cells from passed Iterator, sorts them, then emits - * Cells in sorted order. If lots of columns per row, it will use lots of - * memory sorting. + * Emits sorted Cells. Reads in all Cells from passed Iterator, sorts them, then emits Cells in + * sorted order. If lots of columns per row, it will use lots of memory sorting. * @see HFileOutputFormat2 */ @InterfaceAudience.Public @@ -41,7 +37,7 @@ public class CellSortReducer extends Reducer { protected void reduce(ImmutableBytesWritable row, Iterable kvs, Reducer.Context context) - throws java.io.IOException, InterruptedException { + throws java.io.IOException, InterruptedException { TreeSet map = new TreeSet<>(CellComparator.getInstance()); for (Cell kv : kvs) { try { @@ -52,7 +48,7 @@ protected void reduce(ImmutableBytesWritable row, Iterable kvs, } context.setStatus("Read " + map.getClass()); int index = 0; - for (Cell kv: map) { + for (Cell kv : map) { context.write(row, new MapReduceExtendedCell(kv)); if (++index % 100 == 0) context.setStatus("Wrote " + index); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java index 9344400e4458..f48edadce70f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,9 +45,9 @@ import org.slf4j.LoggerFactory; /** - * Tool used to copy a table to another one which can be on a different setup. - * It is also configurable with a start and time as well as a specification - * of the region server implementation if different from the local cluster. + * Tool used to copy a table to another one which can be on a different setup. It is also + * configurable with a start and time as well as a specification of the region server implementation + * if different from the local cluster. */ @InterfaceAudience.Public public class CopyTable extends Configured implements Tool { @@ -102,8 +101,7 @@ private void initCopyTableMapperReducerJob(Job job, Scan scan) throws IOExceptio /** * Sets up the actual job. - * - * @param args The command line parameters. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -146,20 +144,20 @@ public Job createSubmittableJob(String[] args) throws IOException { scan.withStopRow(Bytes.toBytesBinary(stopRow)); } - if(families != null) { + if (families != null) { String[] fams = families.split(","); - Map cfRenameMap = new HashMap<>(); - for(String fam : fams) { + Map cfRenameMap = new HashMap<>(); + for (String fam : fams) { String sourceCf; - if(fam.contains(":")) { - // fam looks like "sourceCfName:destCfName" - String[] srcAndDest = fam.split(":", 2); - sourceCf = srcAndDest[0]; - String destCf = srcAndDest[1]; - cfRenameMap.put(sourceCf, destCf); + if (fam.contains(":")) { + // fam looks like "sourceCfName:destCfName" + String[] srcAndDest = fam.split(":", 2); + sourceCf = srcAndDest[0]; + String destCf = srcAndDest[1]; + cfRenameMap.put(sourceCf, destCf); } else { - // fam is just "sourceCf" - sourceCf = fam; + // fam is just "sourceCf" + sourceCf = fam; } scan.addFamily(Bytes.toBytes(sourceCf)); } @@ -191,14 +189,14 @@ public Job createSubmittableJob(String[] args) throws IOException { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - System.err.println("Usage: CopyTable [general options] [--starttime=X] [--endtime=Y] " + - "[--new.name=NEW] [--peer.adr=ADR] "); + System.err.println("Usage: CopyTable [general options] [--starttime=X] [--endtime=Y] " + + "[--new.name=NEW] [--peer.adr=ADR] "); System.err.println(); System.err.println("Options:"); System.err.println(" rs.class hbase.regionserver.class of the peer cluster"); @@ -218,18 +216,19 @@ private static void printUsage(final String errorMsg) { System.err.println(" To copy from cf1 to cf2, give sourceCfName:destCfName. "); System.err.println(" To keep the same name, just give \"cfName\""); System.err.println(" all.cells also copy delete markers and deleted cells"); - System.err.println(" bulkload Write input into HFiles and bulk load to the destination " - + "table"); + System.err.println( + " bulkload Write input into HFiles and bulk load to the destination " + "table"); System.err.println(" snapshot Copy the data from snapshot to destination table."); System.err.println(); System.err.println("Args:"); System.err.println(" tablename Name of the table to copy"); System.err.println(); System.err.println("Examples:"); - System.err.println(" To copy 'TestTable' to a cluster that uses replication for a 1 hour window:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 " + - "--peer.adr=server1,server2,server3:2181:/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable "); + System.err + .println(" To copy 'TestTable' to a cluster that uses replication for a 1 hour window:"); + System.err.println(" $ hbase " + + "org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 " + + "--peer.adr=server1,server2,server3:2181:/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable "); System.err.println(" To copy data from 'sourceTableSnapshot' to 'destTable': "); System.err.println(" $ hbase org.apache.hadoop.hbase.mapreduce.CopyTable " + "--snapshot --new.name=destTable sourceTableSnapshot"); @@ -241,8 +240,7 @@ private static void printUsage(final String errorMsg) { + " decreases the round trip time to the server and may increase performance.\n" + " -Dhbase.client.scanner.caching=100\n" + " The following should always be set to false, to prevent writing data twice, which may produce \n" - + " inaccurate results.\n" - + " -Dmapreduce.map.speculative=false"); + + " inaccurate results.\n" + " -Dmapreduce.map.speculative=false"); } private boolean doCommandLine(final String[] args) { @@ -333,7 +331,7 @@ private boolean doCommandLine(final String[] args) { continue; } - if(cmd.startsWith("--snapshot")){ + if (cmd.startsWith("--snapshot")) { readingSnapshot = true; continue; } @@ -393,8 +391,7 @@ private boolean doCommandLine(final String[] args) { /** * Main entry point. - * - * @param args The command line parameters. + * @param args The command line parameters. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { @@ -419,7 +416,7 @@ public int run(String[] args) throws Exception { LOG.info("command: ./bin/hbase {} {} {}", BulkLoadHFilesTool.NAME, this.bulkloadDir.toString(), this.dstTableName); if (!BulkLoadHFiles.create(getConf()).bulkLoad(TableName.valueOf(dstTableName), bulkloadDir) - .isEmpty()) { + .isEmpty()) { // bulkloadDir is deleted only BulkLoadHFiles was successful so that one can rerun // BulkLoadHFiles. FileSystem fs = CommonFSUtils.getCurrentFileSystem(getConf()); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java index 07f05dd79804..df1433086d85 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,13 +25,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Tag; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; @@ -43,6 +39,9 @@ import org.apache.hadoop.hbase.security.visibility.VisibilityLabelOrdinalProvider; import org.apache.hadoop.hbase.security.visibility.VisibilityUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This implementation creates tags by expanding expression using label ordinal. Labels will be @@ -111,12 +110,11 @@ public void init() { LOG.warn("Error closing 'labels' table", ioe); } } - if (connection != null) - try { - connection.close(); - } catch (IOException ioe) { - LOG.warn("Failed close of temporary connection", ioe); - } + if (connection != null) try { + connection.close(); + } catch (IOException ioe) { + LOG.warn("Failed close of temporary connection", ioe); + } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java index ed31c8422e7e..fa6957e2ed1a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,39 +27,36 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Driver for hbase mapreduce jobs. Select which to run by passing - * name of job to this main. + * Driver for hbase mapreduce jobs. Select which to run by passing name of job to this main. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable public class Driver { - private Driver() {} + private Driver() { + } public static void main(String[] args) throws Throwable { ProgramDriver pgd = new ProgramDriver(); - pgd.addClass(RowCounter.NAME, RowCounter.class, - "Count rows in HBase table."); - pgd.addClass(CellCounter.NAME, CellCounter.class, - "Count cells in HBase table."); + pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table."); + pgd.addClass(CellCounter.NAME, CellCounter.class, "Count cells in HBase table."); pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS."); pgd.addClass(Import.NAME, Import.class, "Import data written by Export."); pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format."); - pgd.addClass(BulkLoadHFilesTool.NAME, BulkLoadHFilesTool.class, - "Complete a bulk data load."); + pgd.addClass(BulkLoadHFilesTool.NAME, BulkLoadHFilesTool.class, "Complete a bulk data load."); pgd.addClass(CopyTable.NAME, CopyTable.class, - "Export a table from local cluster to peer cluster."); - pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" + - " data from tables in two different clusters. It" + - " doesn't work for incrementColumnValues'd cells since" + - " timestamp is changed after appending to WAL."); + "Export a table from local cluster to peer cluster."); + pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, + "Compare" + " data from tables in two different clusters. It" + + " doesn't work for incrementColumnValues'd cells since" + + " timestamp is changed after appending to WAL."); pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files."); - pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" + - " the specific snapshot to a given FileSystem."); - pgd.addClass(MobRefReporter.NAME, MobRefReporter.class, "Check the mob cells in a particular " + - "table and cf and confirm that the files they point to are correct."); + pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, + "Export" + " the specific snapshot to a given FileSystem."); + pgd.addClass(MobRefReporter.NAME, MobRefReporter.class, "Check the mob cells in a particular " + + "table and cf and confirm that the files they point to are correct."); - ProgramDriver.class.getMethod("driver", new Class [] {String[].class}). - invoke(pgd, new Object[]{args}); + ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd, + new Object[] { args }); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java index eb0f649e643b..3e02114a3bb0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java @@ -1,33 +1,31 @@ -/** -* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.mapreduce.Job; @@ -38,8 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Export an HBase table. - * Writes content to sequence files up in HDFS. Use {@link Import} to read it + * Export an HBase table. Writes content to sequence files up in HDFS. Use {@link Import} to read it * back in again. */ @InterfaceAudience.Public @@ -49,14 +46,12 @@ public class Export extends Configured implements Tool { /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { Triple arguments = ExportUtils.getArgumentsFromCommandLine(conf, args); String tableName = arguments.getFirst().getNameAsString(); Path outputDir = arguments.getThird(); @@ -66,12 +61,13 @@ public static Job createSubmittableJob(Configuration conf, String[] args) // Set optional scan parameters Scan s = arguments.getSecond(); IdentityTableMapper.initJob(tableName, s, IdentityTableMapper.class, job); - // No reducers. Just write straight to output files. + // No reducers. Just write straight to output files. job.setNumReduceTasks(0); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(Result.class); - FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't have a default fs. + FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't + // have a default fs. return job; } @@ -80,7 +76,7 @@ public int run(String[] args) throws Exception { if (!ExportUtils.isValidArguements(args)) { ExportUtils.usage("Wrong number of arguments: " + ArrayUtils.getLength(args)); System.err.println(" -D " + JOB_NAME_CONF_KEY - + "=jobName - use the specified mapreduce job name for the export"); + + "=jobName - use the specified mapreduce job name for the export"); System.err.println("For MR performance consider the following properties:"); System.err.println(" -D mapreduce.map.speculative=false"); System.err.println(" -D mapreduce.reduce.speculative=false"); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java index 568c47fd6e53..f0653fa68a0b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,15 +20,11 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.IncompatibleFilterException; @@ -40,10 +35,13 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Some helper methods are used by {@link org.apache.hadoop.hbase.mapreduce.Export} - * and org.apache.hadoop.hbase.coprocessor.Export (in hbase-endpooint). + * Some helper methods are used by {@link org.apache.hadoop.hbase.mapreduce.Export} and + * org.apache.hadoop.hbase.coprocessor.Export (in hbase-endpooint). */ @InterfaceAudience.Private public final class ExportUtils { @@ -52,37 +50,39 @@ public final class ExportUtils { public static final String EXPORT_BATCHING = "hbase.export.scanner.batch"; public static final String EXPORT_CACHING = "hbase.export.scanner.caching"; public static final String EXPORT_VISIBILITY_LABELS = "hbase.export.visibility.labels"; + /** * Common usage for other export tools. - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ public static void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - System.err.println("Usage: Export [-D ]* [ " + - "[ []] [^[regex pattern] or [Prefix] to filter]]\n"); + System.err.println("Usage: Export [-D ]* [ " + + "[ []] [^[regex pattern] or [Prefix] to filter]]\n"); System.err.println(" Note: -D properties will be applied to the conf used. "); System.err.println(" For example: "); System.err.println(" -D " + FileOutputFormat.COMPRESS + "=true"); - System.err.println(" -D " + FileOutputFormat.COMPRESS_CODEC + "=org.apache.hadoop.io.compress.GzipCodec"); + System.err.println( + " -D " + FileOutputFormat.COMPRESS_CODEC + "=org.apache.hadoop.io.compress.GzipCodec"); System.err.println(" -D " + FileOutputFormat.COMPRESS_TYPE + "=BLOCK"); System.err.println(" Additionally, the following SCAN properties can be specified"); System.err.println(" to control/limit what is exported.."); - System.err.println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); + System.err + .println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); System.err.println(" -D " + RAW_SCAN + "=true"); System.err.println(" -D " + TableInputFormat.SCAN_ROW_START + "="); System.err.println(" -D " + TableInputFormat.SCAN_ROW_STOP + "="); System.err.println(" -D " + HConstants.HBASE_CLIENT_SCANNER_CACHING + "=100"); System.err.println(" -D " + EXPORT_VISIBILITY_LABELS + "="); System.err.println("For tables with very wide rows consider setting the batch size as below:\n" - + " -D " + EXPORT_BATCHING + "=10\n" - + " -D " + EXPORT_CACHING + "=100"); + + " -D " + EXPORT_BATCHING + "=10\n" + " -D " + EXPORT_CACHING + "=100"); } private static Filter getExportFilter(String[] args) { Filter exportFilter; - String filterCriteria = (args.length > 5) ? args[5]: null; + String filterCriteria = (args.length > 5) ? args[5] : null; if (filterCriteria == null) return null; if (filterCriteria.startsWith("^")) { String regexPattern = filterCriteria.substring(1, filterCriteria.length()); @@ -97,23 +97,24 @@ public static boolean isValidArguements(String[] args) { return args != null && args.length >= 2; } - public static Triple getArgumentsFromCommandLine( - Configuration conf, String[] args) throws IOException { + public static Triple getArgumentsFromCommandLine(Configuration conf, + String[] args) throws IOException { if (!isValidArguements(args)) { return null; } - return new Triple<>(TableName.valueOf(args[0]), getScanFromCommandLine(conf, args), new Path(args[1])); + return new Triple<>(TableName.valueOf(args[0]), getScanFromCommandLine(conf, args), + new Path(args[1])); } static Scan getScanFromCommandLine(Configuration conf, String[] args) throws IOException { Scan s = new Scan(); // Optional arguments. // Set Scan Versions - int versions = args.length > 2? Integer.parseInt(args[2]): 1; + int versions = args.length > 2 ? Integer.parseInt(args[2]) : 1; s.readVersions(versions); // Set Scan Range - long startTime = args.length > 3? Long.parseLong(args[3]): 0L; - long endTime = args.length > 4? Long.parseLong(args[4]): Long.MAX_VALUE; + long startTime = args.length > 3 ? Long.parseLong(args[3]) : 0L; + long endTime = args.length > 4 ? Long.parseLong(args[4]) : Long.MAX_VALUE; s.setTimeRange(startTime, endTime); // Set cache blocks s.setCacheBlocks(false); @@ -134,8 +135,8 @@ static Scan getScanFromCommandLine(Configuration conf, String[] args) throws IOE } // Set RowFilter or Prefix Filter if applicable. Filter exportFilter = getExportFilter(args); - if (exportFilter!= null) { - LOG.info("Setting Scan Filter for Export."); + if (exportFilter != null) { + LOG.info("Setting Scan Filter for Export."); s.setFilter(exportFilter); } List labels = null; @@ -163,9 +164,8 @@ static Scan getScanFromCommandLine(Configuration conf, String[] args) throws IOE LOG.error("Caching could not be set", e); } } - LOG.info("versions=" + versions + ", starttime=" + startTime - + ", endtime=" + endTime + ", keepDeletedCells=" + raw - + ", visibility labels=" + labels); + LOG.info("versions=" + versions + ", starttime=" + startTime + ", endtime=" + endTime + + ", keepDeletedCells=" + raw + ", visibility labels=" + labels); return s; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java index 1909b2d57b38..61ad4a944714 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +19,6 @@ import java.io.IOException; import java.util.ArrayList; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -31,74 +28,68 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Job; +import org.apache.yetus.audience.InterfaceAudience; /** * Extract grouping columns from input record. */ @InterfaceAudience.Public -public class GroupingTableMapper -extends TableMapper implements Configurable { +public class GroupingTableMapper extends TableMapper + implements Configurable { /** - * JobConf parameter to specify the columns used to produce the key passed to - * collect from the map phase. + * JobConf parameter to specify the columns used to produce the key passed to collect from the map + * phase. */ - public static final String GROUP_COLUMNS = - "hbase.mapred.groupingtablemap.columns"; + public static final String GROUP_COLUMNS = "hbase.mapred.groupingtablemap.columns"; /** The grouping columns. */ - protected byte [][] columns; + protected byte[][] columns; /** The current configuration. */ private Configuration conf = null; /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table The table to be processed. - * @param scan The scan with the columns etc. - * @param groupColumns A space separated list of columns used to form the - * key used in collect. - * @param mapper The mapper class. - * @param job The current job. + * @param scan The scan with the columns etc. + * @param groupColumns A space separated list of columns used to form the key used in collect. + * @param mapper The mapper class. + * @param job The current job. * @throws IOException When setting up the job fails. */ @SuppressWarnings("unchecked") public static void initJob(String table, Scan scan, String groupColumns, - Class mapper, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(table, scan, mapper, - ImmutableBytesWritable.class, Result.class, job); + Class mapper, Job job) throws IOException { + TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, + Result.class, job); job.getConfiguration().set(GROUP_COLUMNS, groupColumns); } /** - * Extract the grouping columns from value to construct a new key. Pass the - * new key and value to reduce. If any of the grouping columns are not found - * in the value, the record is skipped. - * - * @param key The current key. - * @param value The current value. - * @param context The current context. + * Extract the grouping columns from value to construct a new key. Pass the new key and value to + * reduce. If any of the grouping columns are not found in the value, the record is skipped. + * @param key The current key. + * @param value The current value. + * @param context The current context. * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ @Override public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { byte[][] keyVals = extractKeyValues(value); - if(keyVals != null) { + if (keyVals != null) { ImmutableBytesWritable tKey = createGroupKey(keyVals); context.write(tKey, value); } } /** - * Extract columns values from the current record. This method returns - * null if any of the columns are not found. + * Extract columns values from the current record. This method returns null if any of the columns + * are not found. *

    * Override this method if you want to deal with nulls differently. - * - * @param r The current values. + * @param r The current values. * @return Array of byte values. */ protected byte[][] extractKeyValues(Result r) { @@ -106,9 +97,9 @@ protected byte[][] extractKeyValues(Result r) { ArrayList foundList = new ArrayList<>(); int numCols = columns.length; if (numCols > 0) { - for (Cell value: r.listCells()) { - byte [] column = CellUtil.makeColumn(CellUtil.cloneFamily(value), - CellUtil.cloneQualifier(value)); + for (Cell value : r.listCells()) { + byte[] column = + CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)); for (int i = 0; i < numCols; i++) { if (Bytes.equals(column, columns[i])) { foundList.add(CellUtil.cloneValue(value)); @@ -116,7 +107,7 @@ protected byte[][] extractKeyValues(Result r) { } } } - if(foundList.size() == numCols) { + if (foundList.size() == numCols) { keyVals = foundList.toArray(new byte[numCols][]); } } @@ -127,17 +118,16 @@ protected byte[][] extractKeyValues(Result r) { * Create a key by concatenating multiple column values. *

    * Override this function in order to produce different types of keys. - * - * @param vals The current key/values. + * @param vals The current key/values. * @return A key generated by concatenating multiple column values. */ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { - if(vals == null) { + if (vals == null) { return null; } - StringBuilder sb = new StringBuilder(); - for(int i = 0; i < vals.length; i++) { - if(i > 0) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < vals.length; i++) { + if (i > 0) { sb.append(" "); } sb.append(Bytes.toString(vals[i])); @@ -147,7 +137,6 @@ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -158,17 +147,15 @@ public Configuration getConf() { /** * Sets the configuration. This is used to set up the grouping details. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { this.conf = configuration; String[] cols = conf.get(GROUP_COLUMNS, "").split(" "); columns = new byte[cols.length][]; - for(int i = 0; i < cols.length; i++) { + for (int i = 0; i < cols.length; i++) { columns[i] = Bytes.toBytes(cols[i]); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java index 03254feec042..24e164c550fd 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,9 +41,8 @@ import org.slf4j.LoggerFactory; /** - * Simple MR input format for HFiles. - * This code was borrowed from Apache Crunch project. - * Updated to the recent version of HBase. + * Simple MR input format for HFiles. This code was borrowed from Apache Crunch project. Updated to + * the recent version of HBase. */ @InterfaceAudience.Private public class HFileInputFormat extends FileInputFormat { @@ -51,9 +50,9 @@ public class HFileInputFormat extends FileInputFormat { private static final Logger LOG = LoggerFactory.getLogger(HFileInputFormat.class); /** - * File filter that removes all "hidden" files. This might be something worth removing from - * a more general purpose utility; it accounts for the presence of metadata files created - * in the way we're doing exports. + * File filter that removes all "hidden" files. This might be something worth removing from a more + * general purpose utility; it accounts for the presence of metadata files created in the way + * we're doing exports. */ static final PathFilter HIDDEN_FILE_FILTER = new PathFilter() { @Override @@ -95,7 +94,6 @@ public void initialize(InputSplit split, TaskAttemptContext context) } - @Override public boolean nextKeyValue() throws IOException, InterruptedException { boolean hasNext; @@ -161,8 +159,8 @@ protected List listStatus(JobContext job) throws IOException { } @Override - public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException, InterruptedException { return new HFileRecordReader(); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index d68ee88fe4a8..c8233be1eeb5 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -93,18 +93,17 @@ import org.slf4j.LoggerFactory; /** - * Writes HFiles. Passed Cells must arrive in order. - * Writes current time as the sequence id for the file. Sets the major compacted - * attribute on created {@link HFile}s. Calling write(null,null) will forcibly roll - * all HFiles being written. + * Writes HFiles. Passed Cells must arrive in order. Writes current time as the sequence id for the + * file. Sets the major compacted attribute on created {@link HFile}s. Calling write(null,null) will + * forcibly roll all HFiles being written. *

    - * Using this class as part of a MapReduce job is best done - * using {@link #configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}. + * Using this class as part of a MapReduce job is best done using + * {@link #configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}. */ @InterfaceAudience.Public -public class HFileOutputFormat2 - extends FileOutputFormat { +public class HFileOutputFormat2 extends FileOutputFormat { private static final Logger LOG = LoggerFactory.getLogger(HFileOutputFormat2.class); + static class TableInfo { private TableDescriptor tableDesctiptor; private RegionLocator regionLocator; @@ -135,12 +134,9 @@ protected static byte[] combineTableNameSuffix(byte[] tableName, byte[] suffix) // These should not be changed by the client. static final String COMPRESSION_FAMILIES_CONF_KEY = "hbase.hfileoutputformat.families.compression"; - static final String BLOOM_TYPE_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.bloomtype"; - static final String BLOOM_PARAM_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.bloomparam"; - static final String BLOCK_SIZE_FAMILIES_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.blocksize"; + static final String BLOOM_TYPE_FAMILIES_CONF_KEY = "hbase.hfileoutputformat.families.bloomtype"; + static final String BLOOM_PARAM_FAMILIES_CONF_KEY = "hbase.hfileoutputformat.families.bloomparam"; + static final String BLOCK_SIZE_FAMILIES_CONF_KEY = "hbase.mapreduce.hfileoutputformat.blocksize"; static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY = "hbase.mapreduce.hfileoutputformat.families.datablock.encoding"; @@ -159,26 +155,24 @@ protected static byte[] combineTableNameSuffix(byte[] tableName, byte[] suffix) public static final String LOCALITY_SENSITIVE_CONF_KEY = "hbase.bulkload.locality.sensitive.enabled"; private static final boolean DEFAULT_LOCALITY_SENSITIVE = true; - static final String OUTPUT_TABLE_NAME_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.table.name"; + static final String OUTPUT_TABLE_NAME_CONF_KEY = "hbase.mapreduce.hfileoutputformat.table.name"; static final String MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY = - "hbase.mapreduce.use.multi.table.hfileoutputformat"; + "hbase.mapreduce.use.multi.table.hfileoutputformat"; - public static final String REMOTE_CLUSTER_CONF_PREFIX = - "hbase.hfileoutputformat.remote.cluster."; + public static final String REMOTE_CLUSTER_CONF_PREFIX = "hbase.hfileoutputformat.remote.cluster."; public static final String REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY = - REMOTE_CLUSTER_CONF_PREFIX + "zookeeper.quorum"; + REMOTE_CLUSTER_CONF_PREFIX + "zookeeper.quorum"; public static final String REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY = - REMOTE_CLUSTER_CONF_PREFIX + "zookeeper." + HConstants.CLIENT_PORT_STR; + REMOTE_CLUSTER_CONF_PREFIX + "zookeeper." + HConstants.CLIENT_PORT_STR; public static final String REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY = - REMOTE_CLUSTER_CONF_PREFIX + HConstants.ZOOKEEPER_ZNODE_PARENT; + REMOTE_CLUSTER_CONF_PREFIX + HConstants.ZOOKEEPER_ZNODE_PARENT; public static final String STORAGE_POLICY_PROPERTY = HStore.BLOCK_STORAGE_POLICY_KEY; public static final String STORAGE_POLICY_PROPERTY_CF_PREFIX = STORAGE_POLICY_PROPERTY + "."; @Override - public RecordWriter getRecordWriter( - final TaskAttemptContext context) throws IOException, InterruptedException { + public RecordWriter + getRecordWriter(final TaskAttemptContext context) throws IOException, InterruptedException { return createRecordWriter(context, this.getOutputCommitter(context)); } @@ -187,32 +181,32 @@ protected static byte[] getTableNameSuffixedWithFamily(byte[] tableName, byte[] } static RecordWriter createRecordWriter( - final TaskAttemptContext context, final OutputCommitter committer) throws IOException { + final TaskAttemptContext context, final OutputCommitter committer) throws IOException { // Get the path of the temporary output file - final Path outputDir = ((FileOutputCommitter)committer).getWorkPath(); + final Path outputDir = ((FileOutputCommitter) committer).getWorkPath(); final Configuration conf = context.getConfiguration(); final boolean writeMultipleTables = - conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); + conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); final String writeTableNames = conf.get(OUTPUT_TABLE_NAME_CONF_KEY); if (writeTableNames == null || writeTableNames.isEmpty()) { throw new IllegalArgumentException("" + OUTPUT_TABLE_NAME_CONF_KEY + " cannot be empty"); } final FileSystem fs = outputDir.getFileSystem(conf); // These configs. are from hbase-*.xml - final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, - HConstants.DEFAULT_MAX_FILE_SIZE); - // Invented config. Add to hbase-*.xml if other than default compression. - final String defaultCompressionStr = conf.get("hfile.compression", - Compression.Algorithm.NONE.getName()); + final long maxsize = + conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE); + // Invented config. Add to hbase-*.xml if other than default compression. + final String defaultCompressionStr = + conf.get("hfile.compression", Compression.Algorithm.NONE.getName()); final Algorithm defaultCompression = HFileWriterImpl.compressionByName(defaultCompressionStr); String compressionStr = conf.get(COMPRESSION_OVERRIDE_CONF_KEY); - final Algorithm overriddenCompression = compressionStr != null ? - Compression.getCompressionAlgorithmByName(compressionStr): null; - final boolean compactionExclude = conf.getBoolean( - "hbase.mapreduce.hfileoutputformat.compaction.exclude", false); - final Set allTableNames = Arrays.stream(writeTableNames.split( - Bytes.toString(tableSeparator))).collect(Collectors.toSet()); + final Algorithm overriddenCompression = + compressionStr != null ? Compression.getCompressionAlgorithmByName(compressionStr) : null; + final boolean compactionExclude = + conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", false); + final Set allTableNames = Arrays + .stream(writeTableNames.split(Bytes.toString(tableSeparator))).collect(Collectors.toSet()); // create a map from column family to the compression algorithm final Map compressionMap = createFamilyCompressionMap(conf); @@ -221,10 +215,10 @@ static RecordWriter createRecordWrit final Map blockSizeMap = createFamilyBlockSizeMap(conf); String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY); - final Map datablockEncodingMap - = createFamilyDataBlockEncodingMap(conf); - final DataBlockEncoding overriddenEncoding = dataBlockEncodingStr != null ? - DataBlockEncoding.valueOf(dataBlockEncodingStr) : null; + final Map datablockEncodingMap = + createFamilyDataBlockEncodingMap(conf); + final DataBlockEncoding overriddenEncoding = + dataBlockEncodingStr != null ? DataBlockEncoding.valueOf(dataBlockEncodingStr) : null; return new RecordWriter() { // Map of families to writers and how much has been output on the writer. @@ -250,8 +244,8 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { tableNameBytes = TableName.valueOf(tableNameBytes).getNameWithNamespaceInclAsString() .getBytes(Charset.defaultCharset()); if (!allTableNames.contains(Bytes.toString(tableNameBytes))) { - throw new IllegalArgumentException("TableName " + Bytes.toString(tableNameBytes) + - " not expected"); + throw new IllegalArgumentException( + "TableName " + Bytes.toString(tableNameBytes) + " not expected"); } } byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableNameBytes, family); @@ -273,7 +267,7 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { // This can only happen once a row is finished though if (wl != null && wl.written + length >= maxsize - && Bytes.compareTo(this.previousRows.get(family), rowKey) != 0) { + && Bytes.compareTo(this.previousRows.get(family), rowKey) != 0) { rollWriters(wl); } @@ -284,8 +278,9 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { HRegionLocation loc = null; String tableName = Bytes.toString(tableNameBytes); if (tableName != null) { - try (Connection connection = ConnectionFactory.createConnection( - createRemoteClusterConf(conf)); + try ( + Connection connection = + ConnectionFactory.createConnection(createRemoteClusterConf(conf)); RegionLocator locator = connection.getRegionLocator(TableName.valueOf(tableName))) { loc = locator.getRegionLocation(rowKey); @@ -331,6 +326,7 @@ private Path getTableRelativePath(byte[] tableNameBytes) { } return tableRelPath; } + private void rollWriters(WriterLength writerLength) throws IOException { if (writerLength != null) { closeWriter(writerLength); @@ -343,8 +339,8 @@ private void rollWriters(WriterLength writerLength) throws IOException { private void closeWriter(WriterLength wl) throws IOException { if (wl.writer != null) { - LOG.info("Writer=" + wl.writer.getPath() + - ((wl.written == 0)? "": ", wrote=" + wl.written)); + LOG.info( + "Writer=" + wl.writer.getPath() + ((wl.written == 0) ? "" : ", wrote=" + wl.written)); close(wl.writer); wl.writer = null; } @@ -366,9 +362,9 @@ private Configuration createRemoteClusterConf(Configuration conf) { for (Entry entry : conf) { String key = entry.getKey(); - if (REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY.equals(key) || - REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY.equals(key) || - REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY.equals(key)) { + if (REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY.equals(key) + || REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY.equals(key) + || REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY.equals(key)) { // Handled them above continue; } @@ -388,15 +384,15 @@ private Configuration createRemoteClusterConf(Configuration conf) { * Create a new StoreFile.Writer. * @return A WriterLength, containing a new StoreFile.Writer. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED", - justification="Not important") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "BX_UNBOXING_IMMEDIATELY_REBOXED", + justification = "Not important") private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration conf, InetSocketAddress[] favoredNodes) throws IOException { byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableName, family); Path familydir = new Path(outputDir, Bytes.toString(family)); if (writeMultipleTables) { familydir = new Path(outputDir, - new Path(getTableRelativePath(tableName), Bytes.toString(family))); + new Path(getTableRelativePath(tableName), Bytes.toString(family))); } WriterLength wl = new WriterLength(); Algorithm compression = overriddenCompression; @@ -424,13 +420,13 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration HFileContext hFileContext = contextBuilder.build(); if (null == favoredNodes) { - wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs) - .withOutputDir(familydir).withBloomType(bloomType) - .withFileContext(hFileContext).build(); + wl.writer = + new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs).withOutputDir(familydir) + .withBloomType(bloomType).withFileContext(hFileContext).build(); } else { wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, new HFileSystem(fs)) - .withOutputDir(familydir).withBloomType(bloomType) - .withFileContext(hFileContext).withFavoredNodes(favoredNodes).build(); + .withOutputDir(familydir).withBloomType(bloomType).withFileContext(hFileContext) + .withFavoredNodes(favoredNodes).build(); } this.writers.put(tableAndFamily, wl); @@ -439,10 +435,8 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration private void close(final StoreFileWriter w) throws IOException { if (w != null) { - w.appendFileInfo(BULKLOAD_TIME_KEY, - Bytes.toBytes(EnvironmentEdgeManager.currentTime())); - w.appendFileInfo(BULKLOAD_TASK_KEY, - Bytes.toBytes(context.getTaskAttemptID().toString())); + w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime())); + w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString())); w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true)); w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude)); w.appendTrackedTimestampsToMetadata(); @@ -452,7 +446,7 @@ private void close(final StoreFileWriter w) throws IOException { @Override public void close(TaskAttemptContext c) throws IOException, InterruptedException { - for (WriterLength wl: this.writers.values()) { + for (WriterLength wl : this.writers.values()) { close(wl.writer); } } @@ -468,9 +462,8 @@ static void configureStoragePolicy(final Configuration conf, final FileSystem fs return; } - String policy = - conf.get(STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(tableAndFamily), - conf.get(STORAGE_POLICY_PROPERTY)); + String policy = conf.get(STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(tableAndFamily), + conf.get(STORAGE_POLICY_PROPERTY)); CommonFSUtils.setStoragePolicy(fs, cfPath, policy); } @@ -483,22 +476,20 @@ static class WriterLength { } /** - * Return the start keys of all of the regions in this table, - * as a list of ImmutableBytesWritable. + * Return the start keys of all of the regions in this table, as a list of ImmutableBytesWritable. */ private static List getRegionStartKeys(List regionLocators, - boolean writeMultipleTables) - throws IOException { + boolean writeMultipleTables) throws IOException { ArrayList ret = new ArrayList<>(); - for(RegionLocator regionLocator : regionLocators) { + for (RegionLocator regionLocator : regionLocators) { TableName tableName = regionLocator.getName(); LOG.info("Looking up current regions for table " + tableName); byte[][] byteKeys = regionLocator.getStartKeys(); for (byte[] byteKey : byteKeys) { - byte[] fullKey = byteKey; //HFileOutputFormat2 use case + byte[] fullKey = byteKey; // HFileOutputFormat2 use case if (writeMultipleTables) { - //MultiTableHFileOutputFormat use case + // MultiTableHFileOutputFormat use case fullKey = combineTableNameSuffix(tableName.getName(), byteKey); } if (LOG.isDebugEnabled()) { @@ -511,8 +502,8 @@ private static List getRegionStartKeys(List - *

  • Inspects the table to configure a total order partitioner
  • - *
  • Uploads the partitions file to the cluster and adds it to the DistributedCache
  • - *
  • Sets the number of reduce tasks to match the current number of regions
  • - *
  • Sets the output key/value class to match HFileOutputFormat2's requirements
  • - *
  • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or - * PutSortReducer)
  • - *
  • Sets the HBase cluster key to load region locations for locality-sensitive
  • + *
  • Inspects the table to configure a total order partitioner
  • + *
  • Uploads the partitions file to the cluster and adds it to the DistributedCache
  • + *
  • Sets the number of reduce tasks to match the current number of regions
  • + *
  • Sets the output key/value class to match HFileOutputFormat2's requirements
  • + *
  • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or + * PutSortReducer)
  • + *
  • Sets the HBase cluster key to load region locations for locality-sensitive
  • *
* The user should be sure to set the map output value class to either KeyValue or Put before * running this function. @@ -576,15 +565,14 @@ public static void configureIncrementalLoad(Job job, Table table, RegionLocator } /** - * Configure a MapReduce Job to perform an incremental load into the given - * table. This + * Configure a MapReduce Job to perform an incremental load into the given table. This *
    - *
  • Inspects the table to configure a total order partitioner
  • - *
  • Uploads the partitions file to the cluster and adds it to the DistributedCache
  • - *
  • Sets the number of reduce tasks to match the current number of regions
  • - *
  • Sets the output key/value class to match HFileOutputFormat2's requirements
  • - *
  • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or - * PutSortReducer)
  • + *
  • Inspects the table to configure a total order partitioner
  • + *
  • Uploads the partitions file to the cluster and adds it to the DistributedCache
  • + *
  • Sets the number of reduce tasks to match the current number of regions
  • + *
  • Sets the output key/value class to match HFileOutputFormat2's requirements
  • + *
  • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or + * PutSortReducer)
  • *
* The user should be sure to set the map output value class to either KeyValue or Put before * running this function. @@ -626,8 +614,8 @@ static void configureIncrementalLoad(Job job, List multiTableInfo, } conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) { LOG.info("bulkload locality sensitive enabled"); @@ -638,46 +626,46 @@ static void configureIncrementalLoad(Job job, List multiTableInfo, List regionLocators = new ArrayList<>(multiTableInfo.size()); List tableDescriptors = new ArrayList<>(multiTableInfo.size()); - for(TableInfo tableInfo : multiTableInfo) { + for (TableInfo tableInfo : multiTableInfo) { regionLocators.add(tableInfo.getRegionLocator()); - String tn = writeMultipleTables? - tableInfo.getRegionLocator().getName().getNameWithNamespaceInclAsString(): - tableInfo.getRegionLocator().getName().getNameAsString(); + String tn = writeMultipleTables + ? tableInfo.getRegionLocator().getName().getNameWithNamespaceInclAsString() + : tableInfo.getRegionLocator().getName().getNameAsString(); allTableNames.add(tn); tableDescriptors.add(tableInfo.getTableDescriptor()); } // Record tablenames for creating writer by favored nodes, and decoding compression, // block size and other attributes of columnfamily per table - conf.set(OUTPUT_TABLE_NAME_CONF_KEY, StringUtils.join(allTableNames, Bytes - .toString(tableSeparator))); + conf.set(OUTPUT_TABLE_NAME_CONF_KEY, + StringUtils.join(allTableNames, Bytes.toString(tableSeparator))); List startKeys = - getRegionStartKeys(regionLocators, writeMultipleTables); + getRegionStartKeys(regionLocators, writeMultipleTables); // Use table's region boundaries for TOP split points. - LOG.info("Configuring " + startKeys.size() + " reduce partitions " + - "to match current region count for all tables"); + LOG.info("Configuring " + startKeys.size() + " reduce partitions " + + "to match current region count for all tables"); job.setNumReduceTasks(startKeys.size()); configurePartitioner(job, startKeys, writeMultipleTables); // Set compression algorithms based on column families - conf.set(COMPRESSION_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(compressionDetails, - tableDescriptors)); - conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(blockSizeDetails, - tableDescriptors)); - conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(bloomTypeDetails, - tableDescriptors)); - conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(bloomParamDetails, - tableDescriptors)); + conf.set(COMPRESSION_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(compressionDetails, tableDescriptors)); + conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(blockSizeDetails, tableDescriptors)); + conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(bloomTypeDetails, tableDescriptors)); + conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(bloomParamDetails, tableDescriptors)); conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(dataBlockEncodingDetails, tableDescriptors)); + serializeColumnFamilyAttribute(dataBlockEncodingDetails, tableDescriptors)); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); LOG.info("Incremental output configured for tables: " + StringUtils.join(allTableNames, ",")); } - public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDescriptor) throws - IOException { + public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDescriptor) + throws IOException { Configuration conf = job.getConfiguration(); job.setOutputKeyClass(ImmutableBytesWritable.class); @@ -690,15 +678,15 @@ public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDes conf.set(OUTPUT_TABLE_NAME_CONF_KEY, tableDescriptor.getTableName().getNameAsString()); // Set compression algorithms based on column families conf.set(COMPRESSION_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(compressionDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(compressionDetails, singleTableDescriptor)); conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(blockSizeDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(blockSizeDetails, singleTableDescriptor)); conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(bloomTypeDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(bloomTypeDetails, singleTableDescriptor)); conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(bloomParamDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(bloomParamDetails, singleTableDescriptor)); conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(dataBlockEncodingDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(dataBlockEncodingDetails, singleTableDescriptor)); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); @@ -707,21 +695,16 @@ public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDes /** * Configure HBase cluster key for remote cluster to load region location for locality-sensitive - * if it's enabled. - * It's not necessary to call this method explicitly when the cluster key for HBase cluster to be - * used to load region location is configured in the job configuration. - * Call this method when another HBase cluster key is configured in the job configuration. - * For example, you should call when you load data from HBase cluster A using - * {@link TableInputFormat} and generate hfiles for HBase cluster B. - * Otherwise, HFileOutputFormat2 fetch location from cluster A and locality-sensitive won't - * working correctly. + * if it's enabled. It's not necessary to call this method explicitly when the cluster key for + * HBase cluster to be used to load region location is configured in the job configuration. Call + * this method when another HBase cluster key is configured in the job configuration. For example, + * you should call when you load data from HBase cluster A using {@link TableInputFormat} and + * generate hfiles for HBase cluster B. Otherwise, HFileOutputFormat2 fetch location from cluster + * A and locality-sensitive won't working correctly. * {@link #configureIncrementalLoad(Job, Table, RegionLocator)} calls this method using - * {@link Table#getConfiguration} as clusterConf. - * See HBASE-25608. - * + * {@link Table#getConfiguration} as clusterConf. See HBASE-25608. * @param job which has configuration to be updated * @param clusterConf which contains cluster key of the HBase cluster to be locality-sensitive - * * @see #configureIncrementalLoad(Job, Table, RegionLocator) * @see #LOCALITY_SENSITIVE_CONF_KEY * @see #REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY @@ -736,31 +719,28 @@ public static void configureRemoteCluster(Job job, Configuration clusterConf) { } final String quorum = clusterConf.get(HConstants.ZOOKEEPER_QUORUM); - final int clientPort = clusterConf.getInt( - HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); - final String parent = clusterConf.get( - HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + final int clientPort = clusterConf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, + HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); + final String parent = clusterConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, + HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); conf.set(REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY, quorum); conf.setInt(REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY, clientPort); conf.set(REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY, parent); - LOG.info("ZK configs for remote cluster of bulkload is configured: " + - quorum + ":" + clientPort + "/" + parent); + LOG.info("ZK configs for remote cluster of bulkload is configured: " + quorum + ":" + clientPort + + "/" + parent); } /** - * Runs inside the task to deserialize column family to compression algorithm - * map from the configuration. - * + * Runs inside the task to deserialize column family to compression algorithm map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the configured compression algorithm */ @InterfaceAudience.Private - static Map createFamilyCompressionMap(Configuration - conf) { - Map stringMap = createFamilyConfValueMap(conf, - COMPRESSION_FAMILIES_CONF_KEY); + static Map createFamilyCompressionMap(Configuration conf) { + Map stringMap = createFamilyConfValueMap(conf, COMPRESSION_FAMILIES_CONF_KEY); Map compressionMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue()); @@ -770,16 +750,14 @@ static Map createFamilyCompressionMap(Configuration } /** - * Runs inside the task to deserialize column family to bloom filter type - * map from the configuration. - * + * Runs inside the task to deserialize column family to bloom filter type map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter type */ @InterfaceAudience.Private static Map createFamilyBloomTypeMap(Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - BLOOM_TYPE_FAMILIES_CONF_KEY); + Map stringMap = createFamilyConfValueMap(conf, BLOOM_TYPE_FAMILIES_CONF_KEY); Map bloomTypeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { BloomType bloomType = BloomType.valueOf(e.getValue()); @@ -789,9 +767,8 @@ static Map createFamilyBloomTypeMap(Configuration conf) { } /** - * Runs inside the task to deserialize column family to bloom filter param - * map from the configuration. - * + * Runs inside the task to deserialize column family to bloom filter param map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter param */ @@ -801,16 +778,13 @@ static Map createFamilyBloomParamMap(Configuration conf) { } /** - * Runs inside the task to deserialize column family to block size - * map from the configuration. - * + * Runs inside the task to deserialize column family to block size map from the configuration. * @param conf to read the serialized values from * @return a map from column family to the configured block size */ @InterfaceAudience.Private static Map createFamilyBlockSizeMap(Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - BLOCK_SIZE_FAMILIES_CONF_KEY); + Map stringMap = createFamilyConfValueMap(conf, BLOCK_SIZE_FAMILIES_CONF_KEY); Map blockSizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { Integer blockSize = Integer.parseInt(e.getValue()); @@ -820,18 +794,16 @@ static Map createFamilyBlockSizeMap(Configuration conf) { } /** - * Runs inside the task to deserialize column family to data block encoding - * type map from the configuration. - * + * Runs inside the task to deserialize column family to data block encoding type map from the + * configuration. * @param conf to read the serialized values from - * @return a map from column family to HFileDataBlockEncoder for the - * configured data block type for the family + * @return a map from column family to HFileDataBlockEncoder for the configured data block type + * for the family */ @InterfaceAudience.Private - static Map createFamilyDataBlockEncodingMap( - Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - DATABLOCK_ENCODING_FAMILIES_CONF_KEY); + static Map createFamilyDataBlockEncodingMap(Configuration conf) { + Map stringMap = + createFamilyConfValueMap(conf, DATABLOCK_ENCODING_FAMILIES_CONF_KEY); Map encoderMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue()))); @@ -841,13 +813,11 @@ static Map createFamilyDataBlockEncodingMap( /** * Run inside the task to deserialize column family to given conf value map. - * * @param conf to read the serialized values from * @param confName conf key to read from the configuration * @return a map of column family to the given configuration value */ - private static Map createFamilyConfValueMap( - Configuration conf, String confName) { + private static Map createFamilyConfValueMap(Configuration conf, String confName) { Map confValMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); String confVal = conf.get(confName, ""); for (String familyConf : confVal.split("&")) { @@ -857,7 +827,7 @@ private static Map createFamilyConfValueMap( } try { confValMap.put(Bytes.toBytes(URLDecoder.decode(familySplit[0], "UTF-8")), - URLDecoder.decode(familySplit[1], "UTF-8")); + URLDecoder.decode(familySplit[1], "UTF-8")); } catch (UnsupportedEncodingException e) { // will not happen with UTF-8 encoding throw new AssertionError(e); @@ -870,15 +840,13 @@ private static Map createFamilyConfValueMap( * Configure job with a TotalOrderPartitioner, partitioning against * splitPoints. Cleans up the partitions file after job exists. */ - static void configurePartitioner(Job job, List splitPoints, boolean - writeMultipleTables) - throws IOException { + static void configurePartitioner(Job job, List splitPoints, + boolean writeMultipleTables) throws IOException { Configuration conf = job.getConfiguration(); // create the partitions file FileSystem fs = FileSystem.get(conf); String hbaseTmpFsDir = - conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, - fs.getHomeDirectory() + "/hbase-staging"); + conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging"); Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID()); fs.makeQualified(partitionsPath); writePartitions(conf, partitionsPath, splitPoints, writeMultipleTables); @@ -889,12 +857,11 @@ static void configurePartitioner(Job job, List splitPoin TotalOrderPartitioner.setPartitionFile(conf, partitionsPath); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = - "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") @InterfaceAudience.Private static String serializeColumnFamilyAttribute(Function fn, - List allTables) - throws UnsupportedEncodingException { + List allTables) throws UnsupportedEncodingException { StringBuilder attributeValue = new StringBuilder(); int i = 0; for (TableDescriptor tableDescriptor : allTables) { @@ -907,9 +874,9 @@ static String serializeColumnFamilyAttribute(Function 0) { attributeValue.append('&'); } - attributeValue.append(URLEncoder.encode( - Bytes.toString(combineTableNameSuffix(tableDescriptor.getTableName().getName(), - familyDescriptor.getName())), "UTF-8")); + attributeValue.append(URLEncoder + .encode(Bytes.toString(combineTableNameSuffix(tableDescriptor.getTableName().getName(), + familyDescriptor.getName())), "UTF-8")); attributeValue.append('='); attributeValue.append(URLEncoder.encode(fn.apply(familyDescriptor), "UTF-8")); } @@ -919,24 +886,24 @@ static String serializeColumnFamilyAttribute(Function compressionDetails = familyDescriptor -> - familyDescriptor.getCompressionType().getName(); + static Function compressionDetails = + familyDescriptor -> familyDescriptor.getCompressionType().getName(); /** - * Serialize column family to block size map to configuration. Invoked while - * configuring the MR job for incremental load. + * Serialize column family to block size map to configuration. Invoked while configuring the MR + * job for incremental load. */ @InterfaceAudience.Private - static Function blockSizeDetails = familyDescriptor -> String - .valueOf(familyDescriptor.getBlocksize()); + static Function blockSizeDetails = + familyDescriptor -> String.valueOf(familyDescriptor.getBlocksize()); /** - * Serialize column family to bloom type map to configuration. Invoked while - * configuring the MR job for incremental load. + * Serialize column family to bloom type map to configuration. Invoked while configuring the MR + * job for incremental load. */ @InterfaceAudience.Private static Function bloomTypeDetails = familyDescriptor -> { @@ -948,8 +915,8 @@ static String serializeColumnFamilyAttribute(Function bloomParamDetails = familyDescriptor -> { @@ -962,8 +929,8 @@ static String serializeColumnFamilyAttribute(Function dataBlockEncodingDetails = familyDescriptor -> { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java index fad91599b6f1..8b64f8754162 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +18,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -33,24 +29,25 @@ import org.apache.hadoop.hbase.mapred.TableOutputFormat; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Partitioner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This is used to partition the output keys into groups of keys. - * Keys are grouped according to the regions that currently exist - * so that each reducer fills a single region so load is distributed. - * - *

This class is not suitable as partitioner creating hfiles - * for incremental bulk loads as region spread will likely change between time of - * hfile creation and load time. See {@link org.apache.hadoop.hbase.tool.BulkLoadHFiles} - * and Bulk Load.

- * - * @param The type of the key. - * @param The type of the value. + * This is used to partition the output keys into groups of keys. Keys are grouped according to the + * regions that currently exist so that each reducer fills a single region so load is distributed. + *

+ * This class is not suitable as partitioner creating hfiles for incremental bulk loads as region + * spread will likely change between time of hfile creation and load time. See + * {@link org.apache.hadoop.hbase.tool.BulkLoadHFiles} and + * Bulk Load. + *

+ * @param The type of the key. + * @param The type of the value. */ @InterfaceAudience.Public -public class HRegionPartitioner -extends Partitioner -implements Configurable { +public class HRegionPartitioner extends Partitioner + implements Configurable { private static final Logger LOG = LoggerFactory.getLogger(HRegionPartitioner.class); private Configuration conf = null; @@ -60,24 +57,23 @@ public class HRegionPartitioner private byte[][] startKeys; /** - * Gets the partition number for a given key (hence record) given the total - * number of partitions i.e. number of reduce-tasks for the job. - * - *

Typically a hash function on a all or a subset of the key.

- * - * @param key The key to be partitioned. - * @param value The entry value. - * @param numPartitions The total number of partitions. + * Gets the partition number for a given key (hence record) given the total number of partitions + * i.e. number of reduce-tasks for the job. + *

+ * Typically a hash function on a all or a subset of the key. + *

+ * @param key The key to be partitioned. + * @param value The entry value. + * @param numPartitions The total number of partitions. * @return The partition number for the key. - * @see org.apache.hadoop.mapreduce.Partitioner#getPartition( - * java.lang.Object, java.lang.Object, int) + * @see org.apache.hadoop.mapreduce.Partitioner#getPartition( java.lang.Object, java.lang.Object, + * int) */ @Override - public int getPartition(ImmutableBytesWritable key, - VALUE value, int numPartitions) { + public int getPartition(ImmutableBytesWritable key, VALUE value, int numPartitions) { byte[] region = null; // Only one region return 0 - if (this.startKeys.length == 1){ + if (this.startKeys.length == 1) { return 0; } try { @@ -87,12 +83,11 @@ public int getPartition(ImmutableBytesWritable key, } catch (IOException e) { LOG.error(e.toString(), e); } - for (int i = 0; i < this.startKeys.length; i++){ - if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){ - if (i >= numPartitions){ + for (int i = 0; i < this.startKeys.length; i++) { + if (Bytes.compareTo(region, this.startKeys[i]) == 0) { + if (i >= numPartitions) { // cover if we have less reduces then regions. - return (Integer.toString(i).hashCode() - & Integer.MAX_VALUE) % numPartitions; + return (Integer.toString(i).hashCode() & Integer.MAX_VALUE) % numPartitions; } return i; } @@ -103,7 +98,6 @@ public int getPartition(ImmutableBytesWritable key, /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -113,12 +107,9 @@ public Configuration getConf() { } /** - * Sets the configuration. This is used to determine the start keys for the - * given table. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * Sets the configuration. This is used to determine the start keys for the given table. + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java index 0a779618eac9..67bf29c3cb50 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -220,9 +220,9 @@ Scan initScan() throws IOException { } /** - * Choose partitions between row ranges to hash to a single output file - * Selects region boundaries that fall within the scan range, and groups them - * into the desired number of partitions. + * Choose partitions between row ranges to hash to a single output file Selects region + * boundaries that fall within the scan range, and groups them into the desired number of + * partitions. */ void selectPartitions(Pair regionStartEndKeys) { List startKeys = new ArrayList<>(); @@ -232,13 +232,13 @@ void selectPartitions(Pair regionStartEndKeys) { // if scan begins after this region, or starts before this region, then drop this region // in other words: - // IF (scan begins before the end of this region - // AND scan ends before the start of this region) - // THEN include this region + // IF (scan begins before the end of this region + // AND scan ends before the start of this region) + // THEN include this region if ((isTableStartRow(startRow) || isTableEndRow(regionEndKey) || Bytes.compareTo(startRow, regionEndKey) < 0) - && (isTableEndRow(stopRow) || isTableStartRow(regionStartKey) - || Bytes.compareTo(stopRow, regionStartKey) > 0)) { + && (isTableEndRow(stopRow) || isTableStartRow(regionStartKey) + || Bytes.compareTo(stopRow, regionStartKey) > 0)) { startKeys.add(regionStartKey); } } @@ -267,8 +267,8 @@ void selectPartitions(Pair regionStartEndKeys) { void writePartitionFile(Configuration conf, Path path) throws IOException { FileSystem fs = path.getFileSystem(conf); @SuppressWarnings("deprecation") - SequenceFile.Writer writer = SequenceFile.createWriter( - fs, conf, path, ImmutableBytesWritable.class, NullWritable.class); + SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, path, + ImmutableBytesWritable.class, NullWritable.class); for (int i = 0; i < partitions.size(); i++) { writer.append(partitions.get(i), NullWritable.get()); @@ -277,7 +277,7 @@ void writePartitionFile(Configuration conf, Path path) throws IOException { } private void readPartitionFile(FileSystem fs, Configuration conf, Path path) - throws IOException { + throws IOException { @SuppressWarnings("deprecation") SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf); ImmutableBytesWritable key = new ImmutableBytesWritable(); @@ -351,15 +351,15 @@ public class Reader implements java.io.Closeable { int partitionIndex = Collections.binarySearch(partitions, startKey); if (partitionIndex >= 0) { // if the key is equal to a partition, then go the file after that partition - hashFileIndex = partitionIndex+1; + hashFileIndex = partitionIndex + 1; } else { // if the key is between partitions, then go to the file between those partitions - hashFileIndex = -1-partitionIndex; + hashFileIndex = -1 - partitionIndex; } openHashFile(); // MapFile's don't make it easy to seek() so that the subsequent next() returns - // the desired key/value pair. So we cache it for the first call of next(). + // the desired key/value pair. So we cache it for the first call of next(). hash = new ImmutableBytesWritable(); key = (ImmutableBytesWritable) mapFileReader.getClosest(startKey, hash); if (key == null) { @@ -371,8 +371,8 @@ public class Reader implements java.io.Closeable { } /** - * Read the next key/hash pair. - * Returns true if such a pair exists and false when at the end of the data. + * Read the next key/hash pair. Returns true if such a pair exists and false when at the end + * of the data. */ public boolean next() throws IOException { if (cachedNext) { @@ -443,19 +443,19 @@ public Job createSubmittableJob(String[] args) throws IOException { generatePartitions(partitionsPath); Job job = Job.getInstance(getConf(), - getConf().get("mapreduce.job.name", "hashTable_" + tableHash.tableName)); + getConf().get("mapreduce.job.name", "hashTable_" + tableHash.tableName)); Configuration jobConf = job.getConfiguration(); jobConf.setLong(HASH_BATCH_SIZE_CONF_KEY, tableHash.batchSize); jobConf.setBoolean(IGNORE_TIMESTAMPS, tableHash.ignoreTimestamps); job.setJarByClass(HashTable.class); TableMapReduceUtil.initTableMapperJob(tableHash.tableName, tableHash.initScan(), - HashMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); + HashMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); // use a TotalOrderPartitioner and reducers to group region output into hash files job.setPartitionerClass(TotalOrderPartitioner.class); TotalOrderPartitioner.setPartitionFile(jobConf, partitionsPath); - job.setReducerClass(Reducer.class); // identity reducer + job.setReducerClass(Reducer.class); // identity reducer job.setNumReduceTasks(tableHash.numHashFiles); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(ImmutableBytesWritable.class); @@ -467,8 +467,8 @@ public Job createSubmittableJob(String[] args) throws IOException { private void generatePartitions(Path partitionsPath) throws IOException { Connection connection = ConnectionFactory.createConnection(getConf()); - Pair regionKeys - = connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys(); + Pair regionKeys = + connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys(); connection.close(); tableHash.selectPartitions(regionKeys); @@ -556,7 +556,7 @@ public long getBatchSize() { } public static class HashMapper - extends TableMapper { + extends TableMapper { private ResultHasher hasher; private long targetBatchSize; @@ -565,11 +565,10 @@ public static class HashMapper @Override protected void setup(Context context) throws IOException, InterruptedException { - targetBatchSize = context.getConfiguration() - .getLong(HASH_BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE); + targetBatchSize = + context.getConfiguration().getLong(HASH_BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE); hasher = new ResultHasher(); - hasher.ignoreTimestamps = context.getConfiguration(). - getBoolean(IGNORE_TIMESTAMPS, false); + hasher.ignoreTimestamps = context.getConfiguration().getBoolean(IGNORE_TIMESTAMPS, false); TableSplit split = (TableSplit) context.getInputSplit(); hasher.startBatch(new ImmutableBytesWritable(split.getStartRow())); } @@ -612,6 +611,7 @@ private void completeManifest() throws IOException { } private static final int NUM_ARGS = 2; + private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); @@ -646,8 +646,8 @@ private static void printUsage(final String errorMsg) { System.err.println(); System.err.println("Examples:"); System.err.println(" To hash 'TestTable' in 32kB batches for a 1 hour window into 50 files:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.HashTable --batchsize=32000 --numhashfiles=50" + System.err.println(" $ hbase " + + "org.apache.hadoop.hbase.mapreduce.HashTable --batchsize=32000 --numhashfiles=50" + " --starttime=1265875194289 --endtime=1265878794289 --families=cf2,cf3" + " TestTable /hashes/testTable"); } @@ -659,8 +659,8 @@ private boolean doCommandLine(final String[] args) { } try { - tableHash.tableName = args[args.length-2]; - destPath = new Path(args[args.length-1]); + tableHash.tableName = args[args.length - 2]; + destPath = new Path(args[args.length - 1]); for (int i = 0; i < args.length - NUM_ARGS; i++) { String cmd = args[i]; @@ -731,8 +731,8 @@ private boolean doCommandLine(final String[] args) { final String ignoreTimestampsKey = "--ignoreTimestamps="; if (cmd.startsWith(ignoreTimestampsKey)) { - tableHash.ignoreTimestamps = Boolean. - parseBoolean(cmd.substring(ignoreTimestampsKey.length())); + tableHash.ignoreTimestamps = + Boolean.parseBoolean(cmd.substring(ignoreTimestampsKey.length())); continue; } @@ -741,8 +741,8 @@ private boolean doCommandLine(final String[] args) { } if ((tableHash.startTime != 0 || tableHash.endTime != 0) && (tableHash.startTime >= tableHash.endTime)) { - printUsage("Invalid time range filter: starttime=" - + tableHash.startTime + " >= endtime=" + tableHash.endTime); + printUsage("Invalid time range filter: starttime=" + tableHash.startTime + " >= endtime=" + + tableHash.endTime); return false; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java index 831607c730c5..1e896d301dfa 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,48 +18,43 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Job; +import org.apache.yetus.audience.InterfaceAudience; /** * Pass the given key and record as-is to the reduce phase. */ @InterfaceAudience.Public -public class IdentityTableMapper -extends TableMapper { +public class IdentityTableMapper extends TableMapper { /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name. - * @param scan The scan with the columns to scan. - * @param mapper The mapper class. - * @param job The job configuration. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name. + * @param scan The scan with the columns to scan. + * @param mapper The mapper class. + * @param job The job configuration. * @throws IOException When setting up the job fails. */ @SuppressWarnings("rawtypes") - public static void initJob(String table, Scan scan, - Class mapper, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(table, scan, mapper, - ImmutableBytesWritable.class, Result.class, job); + public static void initJob(String table, Scan scan, Class mapper, Job job) + throws IOException { + TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, + Result.class, job); } /** * Pass the key, value to reduce. - * - * @param key The current key. - * @param value The current value. - * @param context The current context. + * @param key The current key. + * @param value The current value. + * @param context The current context. * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { context.write(key, value); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java index 876953c862b3..e014f9e0c60c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,60 +18,50 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.io.Writable; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.io.Writable; /** * Convenience class that simply writes all values (which must be - * {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} instances) - * passed to it out to the configured HBase table. This works in combination - * with {@link TableOutputFormat} which actually does the writing to HBase.

- * - * Keys are passed along but ignored in TableOutputFormat. However, they can - * be used to control how your values will be divided up amongst the specified - * number of reducers.

- * - * You can also use the {@link TableMapReduceUtil} class to set up the two - * classes in one step: + * {@link org.apache.hadoop.hbase.client.Put Put} or {@link org.apache.hadoop.hbase.client.Delete + * Delete} instances) passed to it out to the configured HBase table. This works in combination with + * {@link TableOutputFormat} which actually does the writing to HBase. + *

+ * Keys are passed along but ignored in TableOutputFormat. However, they can be used to control how + * your values will be divided up amongst the specified number of reducers. + *

+ * You can also use the {@link TableMapReduceUtil} class to set up the two classes in one step: *

* TableMapReduceUtil.initTableReducerJob("table", IdentityTableReducer.class, job); - *
- * This will also set the proper {@link TableOutputFormat} which is given the - * table parameter. The - * {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} define the - * row and columns implicitly. + *
This will also set the proper {@link TableOutputFormat} which is given the + * table parameter. The {@link org.apache.hadoop.hbase.client.Put Put} or + * {@link org.apache.hadoop.hbase.client.Delete Delete} define the row and columns implicitly. */ @InterfaceAudience.Public -public class IdentityTableReducer -extends TableReducer { +public class IdentityTableReducer extends TableReducer { @SuppressWarnings("unused") private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReducer.class); /** - * Writes each given record, consisting of the row key and the given values, - * to the configured {@link org.apache.hadoop.mapreduce.OutputFormat}. - * It is emitting the row key and each {@link org.apache.hadoop.hbase.client.Put Put} - * or {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs. - * - * @param key The current row key. - * @param values The {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given - * row. - * @param context The context of the reduce. + * Writes each given record, consisting of the row key and the given values, to the configured + * {@link org.apache.hadoop.mapreduce.OutputFormat}. It is emitting the row key and each + * {@link org.apache.hadoop.hbase.client.Put Put} or {@link org.apache.hadoop.hbase.client.Delete + * Delete} as separate pairs. + * @param key The current row key. + * @param values The {@link org.apache.hadoop.hbase.client.Put Put} or + * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given row. + * @param context The context of the reduce. * @throws IOException When writing the record fails. * @throws InterruptedException When the job gets interrupted. */ @Override public void reduce(Writable key, Iterable values, Context context) - throws IOException, InterruptedException { - for(Mutation putOrDelete : values) { + throws IOException, InterruptedException { + for (Mutation putOrDelete : values) { context.write(key, putOrDelete); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 30071fdfd809..9f9d0179d37a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +31,6 @@ import java.util.Map; import java.util.TreeMap; import java.util.UUID; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -41,15 +39,12 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.util.MapReduceExtendedCell; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -63,7 +58,9 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparator; @@ -77,11 +74,11 @@ import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Import data written by {@link Export}. */ @@ -95,16 +92,16 @@ public class Import extends Configured implements Tool { public final static String FILTER_ARGS_CONF_KEY = "import.filter.args"; public final static String TABLE_NAME = "import.table.name"; public final static String WAL_DURABILITY = "import.wal.durability"; - public final static String HAS_LARGE_RESULT= "import.bulk.hasLargeResult"; + public final static String HAS_LARGE_RESULT = "import.bulk.hasLargeResult"; private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; public static class CellWritableComparablePartitioner extends Partitioner { private static CellWritableComparable[] START_KEYS = null; + @Override - public int getPartition(CellWritableComparable key, Cell value, - int numPartitions) { + public int getPartition(CellWritableComparable key, Cell value, int numPartitions) { for (int i = 0; i < START_KEYS.length; ++i) { if (key.compareTo(START_KEYS[i]) <= 0) { return i; @@ -115,15 +112,13 @@ public int getPartition(CellWritableComparable key, Cell value, } - public static class CellWritableComparable - implements WritableComparable { + public static class CellWritableComparable implements WritableComparable { private Cell kv = null; static { // register this comparator - WritableComparator.define(CellWritableComparable.class, - new CellWritableComparator()); + WritableComparator.define(CellWritableComparable.class, new CellWritableComparator()); } public CellWritableComparable() { @@ -172,45 +167,37 @@ public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { } public static class CellReducer - extends - Reducer { - protected void reduce( - CellWritableComparable row, - Iterable kvs, - Reducer.Context context) + extends Reducer { + protected void reduce(CellWritableComparable row, Iterable kvs, + Reducer.Context context) throws java.io.IOException, InterruptedException { int index = 0; for (Cell kv : kvs) { context.write(new ImmutableBytesWritable(CellUtil.cloneRow(kv)), new MapReduceExtendedCell(kv)); - if (++index % 100 == 0) - context.setStatus("Wrote " + index + " KeyValues, " - + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); + if (++index % 100 == 0) context.setStatus("Wrote " + index + " KeyValues, " + + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); } } } - public static class CellSortImporter - extends TableMapper { + public static class CellSortImporter extends TableMapper { private Map cfRenameMap; private Filter filter; private static final Logger LOG = LoggerFactory.getLogger(CellImporter.class); /** - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, Result value, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException { try { if (LOG.isTraceEnabled()) { - LOG.trace("Considering the row." - + Bytes.toString(row.get(), row.getOffset(), row.getLength())); + LOG.trace( + "Considering the row." + Bytes.toString(row.get(), row.getOffset(), row.getLength())); } if (filter == null || !filter.filterRowKey( PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength()))) { @@ -241,8 +228,7 @@ public void setup(Context context) throws IOException { if (startKeys.length != reduceNum) { throw new IOException("Region split after job initialization"); } - CellWritableComparable[] startKeyWraps = - new CellWritableComparable[startKeys.length - 1]; + CellWritableComparable[] startKeyWraps = new CellWritableComparable[startKeys.length - 1]; for (int i = 1; i < startKeys.length; ++i) { startKeyWraps[i - 1] = new CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i])); @@ -255,31 +241,28 @@ public void setup(Context context) throws IOException { /** * A mapper that just writes out KeyValues. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS", - justification="Writables are going away and this has been this way forever") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_COMPARETO_USE_OBJECT_EQUALS", + justification = "Writables are going away and this has been this way forever") public static class CellImporter extends TableMapper { private Map cfRenameMap; private Filter filter; private static final Logger LOG = LoggerFactory.getLogger(CellImporter.class); /** - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, Result value, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException { try { if (LOG.isTraceEnabled()) { - LOG.trace("Considering the row." - + Bytes.toString(row.get(), row.getOffset(), row.getLength())); + LOG.trace( + "Considering the row." + Bytes.toString(row.get(), row.getOffset(), row.getLength())); } - if (filter == null - || !filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), - (short) row.getLength()))) { + if (filter == null || !filter.filterRowKey( + PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength()))) { for (Cell kv : value.rawCells()) { kv = filterKv(filter, kv); // skip if we filtered it out @@ -310,15 +293,13 @@ public static class Importer extends TableMapper cfRenameMap) { - if(cfRenameMap != null) { + if (cfRenameMap != null) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer - kv.getRowOffset(), // row offset - kv.getRowLength(), // row length - newCfName, // CF buffer - 0, // CF offset - newCfName.length, // CF length - kv.getQualifierArray(), // qualifier buffer - kv.getQualifierOffset(), // qualifier offset - kv.getQualifierLength(), // qualifier length - kv.getTimestamp(), // timestamp + kv.getRowOffset(), // row offset + kv.getRowLength(), // row length + newCfName, // CF buffer + 0, // CF offset + newCfName.length, // CF length + kv.getQualifierArray(), // qualifier buffer + kv.getQualifierOffset(), // qualifier offset + kv.getQualifierLength(), // qualifier length + kv.getTimestamp(), // timestamp KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type - kv.getValueArray(), // value buffer - kv.getValueOffset(), // value offset - kv.getValueLength(), // value length - tags.size() == 0 ? null: tags); + kv.getValueArray(), // value buffer + kv.getValueOffset(), // value offset + kv.getValueLength(), // value length + tags.size() == 0 ? null : tags); } } return kv; @@ -537,16 +517,16 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { private static Map createCfRenameMap(Configuration conf) { Map cfRenameMap = null; String allMappingsPropVal = conf.get(CF_RENAME_PROP); - if(allMappingsPropVal != null) { + if (allMappingsPropVal != null) { // The conf value format should be sourceCf1:destCf1,sourceCf2:destCf2,... String[] allMappings = allMappingsPropVal.split(","); - for (String mapping: allMappings) { - if(cfRenameMap == null) { - cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (String mapping : allMappings) { + if (cfRenameMap == null) { + cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); } - String [] srcAndDest = mapping.split(":"); - if(srcAndDest.length != 2) { - continue; + String[] srcAndDest = mapping.split(":"); + if (srcAndDest.length != 2) { + continue; } cfRenameMap.put(Bytes.toBytes(srcAndDest[0]), Bytes.toBytes(srcAndDest[1])); } @@ -555,32 +535,34 @@ private static Map createCfRenameMap(Configuration conf) { } /** - *

Sets a configuration property with key {@link #CF_RENAME_PROP} in conf that tells - * the mapper how to rename column families. - * - *

Alternately, instead of calling this function, you could set the configuration key + *

+ * Sets a configuration property with key {@link #CF_RENAME_PROP} in conf that tells the mapper + * how to rename column families. + *

+ * Alternately, instead of calling this function, you could set the configuration key * {@link #CF_RENAME_PROP} yourself. The value should look like - *

srcCf1:destCf1,srcCf2:destCf2,....
. This would have the same effect on - * the mapper behavior. - * - * @param conf the Configuration in which the {@link #CF_RENAME_PROP} key will be - * set + * + *
+   * srcCf1:destCf1,srcCf2:destCf2,....
+   * 
+ * + * . This would have the same effect on the mapper behavior. + * @param conf the Configuration in which the {@link #CF_RENAME_PROP} key will be set * @param renameMap a mapping from source CF names to destination CF names */ - static public void configureCfRenaming(Configuration conf, - Map renameMap) { + static public void configureCfRenaming(Configuration conf, Map renameMap) { StringBuilder sb = new StringBuilder(); - for(Map.Entry entry: renameMap.entrySet()) { + for (Map.Entry entry : renameMap.entrySet()) { String sourceCf = entry.getKey(); String destCf = entry.getValue(); - if(sourceCf.contains(":") || sourceCf.contains(",") || - destCf.contains(":") || destCf.contains(",")) { - throw new IllegalArgumentException("Illegal character in CF names: " - + sourceCf + ", " + destCf); + if (sourceCf.contains(":") || sourceCf.contains(",") || destCf.contains(":") + || destCf.contains(",")) { + throw new IllegalArgumentException( + "Illegal character in CF names: " + sourceCf + ", " + destCf); } - if(sb.length() != 0) { + if (sb.length() != 0) { sb.append(","); } sb.append(sourceCf + ":" + destCf); @@ -607,8 +589,7 @@ public static void addFilterAndArguments(Configuration conf, Class 0) { @@ -708,11 +688,10 @@ private static void usage(final String errorMsg) { System.err.println(" -D " + JOB_NAME_CONF_KEY + "=jobName - use the specified mapreduce job name for the import"); System.err.println("For performance consider the following options:\n" - + " -Dmapreduce.map.speculative=false\n" - + " -Dmapreduce.reduce.speculative=false\n" + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false\n" + " -D" + WAL_DURABILITY + "="); + + " Allowed values are the supported durability values" + + " like SKIP_WAL/ASYNC_WAL/SYNC_WAL/...>"); } /** @@ -721,8 +700,8 @@ private static void usage(final String errorMsg) { * present in the Write Ahead Log to replay in scenarios of a crash. This method flushes all the * regions of the table in the scenarios of import data to hbase with {@link Durability#SKIP_WAL} */ - public static void flushRegionsIfNecessary(Configuration conf) throws IOException, - InterruptedException { + public static void flushRegionsIfNecessary(Configuration conf) + throws IOException, InterruptedException { String tableName = conf.get(TABLE_NAME); Admin hAdmin = null; Connection connection = null; @@ -758,7 +737,7 @@ public int run(String[] args) throws Exception { } Job job = createSubmittableJob(getConf(), args); boolean isJobSuccessful = job.waitForCompletion(true); - if(isJobSuccessful){ + if (isJobSuccessful) { // Flush all the regions of the table flushRegionsIfNecessary(getConf()); } @@ -767,8 +746,8 @@ public int run(String[] args) throws Exception { if (outputRecords < inputRecords) { System.err.println("Warning, not all records were imported (maybe filtered out)."); if (outputRecords == 0) { - System.err.println("If the data was exported from HBase 0.94 "+ - "consider using -Dhbase.import.version=0.94."); + System.err.println("If the data was exported from HBase 0.94 " + + "consider using -Dhbase.import.version=0.94."); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index 2e94a906f289..dbd93155b1d7 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,11 +66,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Tool to import data from a TSV file. - * - * This tool is rather simplistic - it doesn't do any quoting or - * escaping, but is useful for many data loads. - * + * Tool to import data from a TSV file. This tool is rather simplistic - it doesn't do any quoting + * or escaping, but is useful for many data loads. * @see ImportTsv#usage(String) */ @InterfaceAudience.Public @@ -95,8 +91,8 @@ public class ImportTsv extends Configured implements Tool { public final static String COLUMNS_CONF_KEY = "importtsv.columns"; public final static String SEPARATOR_CONF_KEY = "importtsv.separator"; public final static String ATTRIBUTE_SEPERATOR_CONF_KEY = "attributes.seperator"; - //This config is used to propagate credentials from parent MR jobs which launch - //ImportTSV jobs. SEE IntegrationTestImportTsv. + // This config is used to propagate credentials from parent MR jobs which launch + // ImportTSV jobs. SEE IntegrationTestImportTsv. public final static String CREDENTIALS_LOCATION = "credentials_location"; final static String DEFAULT_SEPARATOR = "\t"; final static String DEFAULT_ATTRIBUTES_SEPERATOR = "=>"; @@ -105,8 +101,8 @@ public class ImportTsv extends Configured implements Tool { public final static String CREATE_TABLE_CONF_KEY = "create.table"; public final static String NO_STRICT_COL_FAMILY = "no.strict"; /** - * If table didn't exist and was created in dry-run mode, this flag is - * flipped to delete it when MR ends. + * If table didn't exist and was created in dry-run mode, this flag is flipped to delete it when + * MR ends. */ private static boolean DRY_RUN_TABLE_CREATED; @@ -151,8 +147,8 @@ public static class TsvParser { private int cellTTLColumnIndex = DEFAULT_CELL_TTL_COLUMN_INDEX; /** - * @param columnsSpecification the list of columns to parser out, comma separated. - * The row key should be the special token TsvParser.ROWKEY_COLUMN_SPEC + * @param columnsSpecification the list of columns to parser out, comma separated. The row key + * should be the special token TsvParser.ROWKEY_COLUMN_SPEC * @param separatorStr */ public TsvParser(String columnsSpecification, String separatorStr) { @@ -163,8 +159,8 @@ public TsvParser(String columnsSpecification, String separatorStr) { separatorByte = separator[0]; // Configure columns - ArrayList columnStrings = Lists.newArrayList( - Splitter.on(',').trimResults().split(columnsSpecification)); + ArrayList columnStrings = + Lists.newArrayList(Splitter.on(',').trimResults().split(columnsSpecification)); maxColumnCount = columnStrings.size(); families = new byte[maxColumnCount][]; @@ -242,12 +238,12 @@ public int getRowKeyColumnIndex() { public byte[] getFamily(int idx) { return families[idx]; } + public byte[] getQualifier(int idx) { return qualifiers[idx]; } - public ParsedLine parse(byte[] lineBytes, int length) - throws BadTsvLineException { + public ParsedLine parse(byte[] lineBytes, int length) throws BadTsvLineException { // Enumerate separator offsets ArrayList tabOffsets = new ArrayList<>(maxColumnCount); for (int i = 0; i < length; i++) { @@ -265,8 +261,7 @@ public ParsedLine parse(byte[] lineBytes, int length) throw new BadTsvLineException("Excessive columns"); } else if (tabOffsets.size() <= getRowKeyColumnIndex()) { throw new BadTsvLineException("No row key"); - } else if (hasTimestamp() - && tabOffsets.size() <= getTimestampKeyColumnIndex()) { + } else if (hasTimestamp() && tabOffsets.size() <= getTimestampKeyColumnIndex()) { throw new BadTsvLineException("No timestamp"); } else if (hasAttributes() && tabOffsets.size() <= getAttributesKeyColumnIndex()) { throw new BadTsvLineException("No attributes specified"); @@ -290,6 +285,7 @@ class ParsedLine { public int getRowKeyOffset() { return getColumnOffset(rowKeyColumnIndex); } + public int getRowKeyLength() { return getColumnLength(rowKeyColumnIndex); } @@ -300,9 +296,8 @@ public long getTimestamp(long ts) throws BadTsvLineException { return ts; } - String timeStampStr = Bytes.toString(lineBytes, - getColumnOffset(timestampKeyColumnIndex), - getColumnLength(timestampKeyColumnIndex)); + String timeStampStr = Bytes.toString(lineBytes, getColumnOffset(timestampKeyColumnIndex), + getColumnLength(timestampKeyColumnIndex)); try { return Long.parseLong(timeStampStr); } catch (NumberFormatException nfe) { @@ -316,7 +311,7 @@ private String getAttributes() { return null; } else { return Bytes.toString(lineBytes, getColumnOffset(attrKeyColumnIndex), - getColumnLength(attrKeyColumnIndex)); + getColumnLength(attrKeyColumnIndex)); } } @@ -366,7 +361,7 @@ public String getCellVisibility() { return null; } else { return Bytes.toString(lineBytes, getColumnOffset(cellVisibilityColumnIndex), - getColumnLength(cellVisibilityColumnIndex)); + getColumnLength(cellVisibilityColumnIndex)); } } @@ -391,22 +386,23 @@ public long getCellTTL() { return 0; } else { return Bytes.toLong(lineBytes, getColumnOffset(cellTTLColumnIndex), - getColumnLength(cellTTLColumnIndex)); + getColumnLength(cellTTLColumnIndex)); } } public int getColumnOffset(int idx) { - if (idx > 0) - return tabOffsets.get(idx - 1) + 1; - else - return 0; + if (idx > 0) return tabOffsets.get(idx - 1) + 1; + else return 0; } + public int getColumnLength(int idx) { return tabOffsets.get(idx) - getColumnOffset(idx); } + public int getColumnCount() { return tabOffsets.size(); } + public byte[] getLineBytes() { return lineBytes; } @@ -416,6 +412,7 @@ public static class BadTsvLineException extends Exception { public BadTsvLineException(String err) { super(err); } + private static final long serialVersionUID = 1L; } @@ -443,9 +440,8 @@ public Pair parseRowKey(byte[] lineBytes, int length) } } if (i == length) { - throw new BadTsvLineException( - "Row key does not exist as number of columns in the line" - + " are less than row key position."); + throw new BadTsvLineException("Row key does not exist as number of columns in the line" + + " are less than row key position."); } } return new Pair<>(startPos, endPos - startPos + 1); @@ -454,9 +450,8 @@ public Pair parseRowKey(byte[] lineBytes, int length) /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -471,16 +466,17 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) String actualSeparator = conf.get(SEPARATOR_CONF_KEY); if (actualSeparator != null) { conf.set(SEPARATOR_CONF_KEY, - Bytes.toString(Base64.getEncoder().encode(Bytes.toBytes(actualSeparator)))); + Bytes.toString(Base64.getEncoder().encode(Bytes.toBytes(actualSeparator)))); } // See if a non-default Mapper was set String mapperClassName = conf.get(MAPPER_CONF_KEY); - Class mapperClass = mapperClassName != null? Class.forName(mapperClassName): DEFAULT_MAPPER; + Class mapperClass = + mapperClassName != null ? Class.forName(mapperClassName) : DEFAULT_MAPPER; TableName tableName = TableName.valueOf(args[0]); Path inputDir = new Path(args[1]); - String jobName = conf.get(JOB_NAME_CONF_KEY,NAME + "_" + tableName.getNameAsString()); + String jobName = conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName.getNameAsString()); job = Job.getInstance(conf, jobName); job.setJarByClass(mapperClass); FileInputFormat.setInputPaths(job, inputDir); @@ -489,7 +485,7 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) job.setMapOutputKeyClass(ImmutableBytesWritable.class); String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); String[] columns = conf.getStrings(COLUMNS_CONF_KEY); - if(StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { + if (StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { String fileLoc = conf.get(CREDENTIALS_LOCATION); Credentials cred = Credentials.readTokenStorageFile(new File(fileLoc), conf); job.getCredentials().addAll(cred); @@ -509,9 +505,8 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) } } } else { - String errorMsg = - format("Table '%s' does not exist and '%s' is set to no.", tableName, - CREATE_TABLE_CONF_KEY); + String errorMsg = format("Table '%s' does not exist and '%s' is set to no.", + tableName, CREATE_TABLE_CONF_KEY); LOG.error(errorMsg); throw new TableNotFoundException(errorMsg); } @@ -520,26 +515,24 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) RegionLocator regionLocator = connection.getRegionLocator(tableName)) { boolean noStrict = conf.getBoolean(NO_STRICT_COL_FAMILY, false); // if no.strict is false then check column family - if(!noStrict) { + if (!noStrict) { ArrayList unmatchedFamilies = new ArrayList<>(); Set cfSet = getColumnFamilies(columns); TableDescriptor tDesc = table.getDescriptor(); for (String cf : cfSet) { - if(!tDesc.hasColumnFamily(Bytes.toBytes(cf))) { + if (!tDesc.hasColumnFamily(Bytes.toBytes(cf))) { unmatchedFamilies.add(cf); } } - if(unmatchedFamilies.size() > 0) { + if (unmatchedFamilies.size() > 0) { ArrayList familyNames = new ArrayList<>(); for (ColumnFamilyDescriptor family : table.getDescriptor().getColumnFamilies()) { familyNames.add(family.getNameAsString()); } - String msg = - "Column Families " + unmatchedFamilies + " specified in " + COLUMNS_CONF_KEY - + " does not match with any of the table " + tableName + String msg = "Column Families " + unmatchedFamilies + " specified in " + + COLUMNS_CONF_KEY + " does not match with any of the table " + tableName + " column families " + familyNames + ".\n" - + "To disable column family check, use -D" + NO_STRICT_COL_FAMILY - + "=true.\n"; + + "To disable column family check, use -D" + NO_STRICT_COL_FAMILY + "=true.\n"; usage(msg); System.exit(-1); } @@ -556,7 +549,7 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), - regionLocator); + regionLocator); } } } else { @@ -582,13 +575,15 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) if (isDryRun) { job.setOutputFormatClass(NullOutputFormat.class); job.getConfiguration().setStrings("io.serializations", - job.getConfiguration().get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + job.getConfiguration().get("io.serializations"), MutationSerialization.class.getName(), + ResultSerialization.class.getName(), CellSerialization.class.getName()); } TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - org.apache.hbase.thirdparty.com.google.common.base.Function.class /* Guava used by TsvParser */); + org.apache.hbase.thirdparty.com.google.common.base.Function.class /* + * Guava used by + * TsvParser + */); } } return job; @@ -596,21 +591,20 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) private static void createTable(Admin admin, TableName tableName, String[] columns) throws IOException { - TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(tableName); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); Set cfSet = getColumnFamilies(columns); for (String cf : cfSet) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)); } - LOG.warn(format("Creating table '%s' with '%s' columns and default descriptors.", - tableName, cfSet)); + LOG.warn( + format("Creating table '%s' with '%s' columns and default descriptors.", tableName, cfSet)); admin.createTable(builder.build()); } private static void deleteTable(Configuration conf, String[] args) { TableName tableName = TableName.valueOf(args[0]); try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { try { admin.disableTable(tableName); } catch (TableNotEnabledException e) { @@ -618,8 +612,7 @@ private static void deleteTable(Configuration conf, String[] args) { } admin.deleteTable(tableName); } catch (IOException e) { - LOG.error(format("***Dry run: Failed to delete table '%s'.***%n%s", tableName, - e.toString())); + LOG.error(format("***Dry run: Failed to delete table '%s'.***%n%s", tableName, e.toString())); return; } LOG.info(format("Dry run: Deleted table '%s'.", tableName)); @@ -641,64 +634,57 @@ private static Set getColumnFamilies(String[] columns) { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - String usage = - "Usage: " + NAME + " -D"+ COLUMNS_CONF_KEY + "=a,b,c \n" + - "\n" + - "Imports the given input directory of TSV data into the specified table.\n" + - "\n" + - "The column names of the TSV data must be specified using the -D" + COLUMNS_CONF_KEY + "\n" + - "option. This option takes the form of comma-separated column names, where each\n" + - "column name is either a simple column family, or a columnfamily:qualifier. The special\n" + - "column name " + TsvParser.ROWKEY_COLUMN_SPEC + " is used to designate that this column should be used\n" + - "as the row key for each imported record. You must specify exactly one column\n" + - "to be the row key, and you must specify a column name for every column that exists in the\n" + - "input data. Another special column" + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + - " designates that this column should be\n" + - "used as timestamp for each record. Unlike " + TsvParser.ROWKEY_COLUMN_SPEC + ", " + - TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional." + "\n" + - "You must specify at most one column as timestamp key for each imported record.\n" + - "Record with invalid timestamps (blank, non-numeric) will be treated as bad record.\n" + - "Note: if you use this option, then '" + TIMESTAMP_CONF_KEY + "' option will be ignored.\n" + - "\n" + - "Other special columns that can be specified are " + TsvParser.CELL_TTL_COLUMN_SPEC + - " and " + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + ".\n" + - TsvParser.CELL_TTL_COLUMN_SPEC + " designates that this column will be used " + - "as a Cell's Time To Live (TTL) attribute.\n" + - TsvParser.CELL_VISIBILITY_COLUMN_SPEC + " designates that this column contains the " + - "visibility label expression.\n" + - "\n" + - TsvParser.ATTRIBUTES_COLUMN_SPEC+" can be used to specify Operation Attributes per record.\n"+ - " Should be specified as key=>value where "+TsvParser.DEFAULT_ATTRIBUTES_COLUMN_INDEX+ " is used \n"+ - " as the seperator. Note that more than one OperationAttributes can be specified.\n"+ - "By default importtsv will load data directly into HBase. To instead generate\n" + - "HFiles of data to prepare for a bulk data load, pass the option:\n" + - " -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output\n" + - " Note: if you do not use this option, then the target table must already exist in HBase\n" + - "\n" + - "Other options that may be specified with -D include:\n" + - " -D" + DRY_RUN_CONF_KEY + "=true - Dry run mode. Data is not actually populated into" + - " table. If table does not exist, it is created but deleted in the end.\n" + - " -D" + SKIP_LINES_CONF_KEY + "=false - fail if encountering an invalid line\n" + - " -D" + LOG_BAD_LINES_CONF_KEY + "=true - logs invalid lines to stderr\n" + - " -D" + SKIP_EMPTY_COLUMNS + "=false - If true then skip empty columns in bulk import\n" + - " '-D" + SEPARATOR_CONF_KEY + "=|' - eg separate on pipes instead of tabs\n" + - " -D" + TIMESTAMP_CONF_KEY + "=currentTimeAsLong - use the specified timestamp for the import\n" + - " -D" + MAPPER_CONF_KEY + "=my.Mapper - A user-defined Mapper to use instead of " + - DEFAULT_MAPPER.getName() + "\n" + - " -D" + JOB_NAME_CONF_KEY + "=jobName - use the specified mapreduce job name for the import\n" + - " -D" + CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by this tool\n" + - " Note: if you set this to 'no', then the target table must already exist in HBase\n" + - " -D" + NO_STRICT_COL_FAMILY + "=true - ignore column family check in hbase table. " + - "Default is false\n\n" + - "For performance consider the following options:\n" + - " -Dmapreduce.map.speculative=false\n" + - " -Dmapreduce.reduce.speculative=false"; + String usage = "Usage: " + NAME + " -D" + COLUMNS_CONF_KEY + "=a,b,c \n" + + "\n" + "Imports the given input directory of TSV data into the specified table.\n" + "\n" + + "The column names of the TSV data must be specified using the -D" + COLUMNS_CONF_KEY + + "\n" + "option. This option takes the form of comma-separated column names, where each\n" + + "column name is either a simple column family, or a columnfamily:qualifier. The special\n" + + "column name " + TsvParser.ROWKEY_COLUMN_SPEC + + " is used to designate that this column should be used\n" + + "as the row key for each imported record. You must specify exactly one column\n" + + "to be the row key, and you must specify a column name for every column that exists in the\n" + + "input data. Another special column" + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + + " designates that this column should be\n" + "used as timestamp for each record. Unlike " + + TsvParser.ROWKEY_COLUMN_SPEC + ", " + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional." + + "\n" + "You must specify at most one column as timestamp key for each imported record.\n" + + "Record with invalid timestamps (blank, non-numeric) will be treated as bad record.\n" + + "Note: if you use this option, then '" + TIMESTAMP_CONF_KEY + + "' option will be ignored.\n" + "\n" + "Other special columns that can be specified are " + + TsvParser.CELL_TTL_COLUMN_SPEC + " and " + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + ".\n" + + TsvParser.CELL_TTL_COLUMN_SPEC + " designates that this column will be used " + + "as a Cell's Time To Live (TTL) attribute.\n" + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + + " designates that this column contains the " + "visibility label expression.\n" + "\n" + + TsvParser.ATTRIBUTES_COLUMN_SPEC + + " can be used to specify Operation Attributes per record.\n" + + " Should be specified as key=>value where " + TsvParser.DEFAULT_ATTRIBUTES_COLUMN_INDEX + + " is used \n" + + " as the seperator. Note that more than one OperationAttributes can be specified.\n" + + "By default importtsv will load data directly into HBase. To instead generate\n" + + "HFiles of data to prepare for a bulk data load, pass the option:\n" + " -D" + + BULK_OUTPUT_CONF_KEY + "=/path/for/output\n" + + " Note: if you do not use this option, then the target table must already exist in HBase\n" + + "\n" + "Other options that may be specified with -D include:\n" + " -D" + + DRY_RUN_CONF_KEY + "=true - Dry run mode. Data is not actually populated into" + + " table. If table does not exist, it is created but deleted in the end.\n" + " -D" + + SKIP_LINES_CONF_KEY + "=false - fail if encountering an invalid line\n" + " -D" + + LOG_BAD_LINES_CONF_KEY + "=true - logs invalid lines to stderr\n" + " -D" + + SKIP_EMPTY_COLUMNS + "=false - If true then skip empty columns in bulk import\n" + " '-D" + + SEPARATOR_CONF_KEY + "=|' - eg separate on pipes instead of tabs\n" + " -D" + + TIMESTAMP_CONF_KEY + "=currentTimeAsLong - use the specified timestamp for the import\n" + + " -D" + MAPPER_CONF_KEY + "=my.Mapper - A user-defined Mapper to use instead of " + + DEFAULT_MAPPER.getName() + "\n" + " -D" + JOB_NAME_CONF_KEY + + "=jobName - use the specified mapreduce job name for the import\n" + " -D" + + CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by this tool\n" + + " Note: if you set this to 'no', then the target table must already exist in HBase\n" + + " -D" + NO_STRICT_COL_FAMILY + "=true - ignore column family check in hbase table. " + + "Default is false\n\n" + "For performance consider the following options:\n" + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"; System.err.println(usage); } @@ -718,8 +704,7 @@ public int run(String[] args) throws Exception { // Make sure columns are specified String[] columns = getConf().getStrings(COLUMNS_CONF_KEY); if (columns == null) { - usage("No columns specified. Please specify with -D" + - COLUMNS_CONF_KEY+"=..."); + usage("No columns specified. Please specify with -D" + COLUMNS_CONF_KEY + "=..."); return -1; } @@ -736,30 +721,27 @@ public int run(String[] args) throws Exception { // Make sure we have at most one column as the timestamp key int tskeysFound = 0; for (String col : columns) { - if (col.equals(TsvParser.TIMESTAMPKEY_COLUMN_SPEC)) - tskeysFound++; + if (col.equals(TsvParser.TIMESTAMPKEY_COLUMN_SPEC)) tskeysFound++; } if (tskeysFound > 1) { - usage("Must specify at most one column as " - + TsvParser.TIMESTAMPKEY_COLUMN_SPEC); + usage("Must specify at most one column as " + TsvParser.TIMESTAMPKEY_COLUMN_SPEC); return -1; } int attrKeysFound = 0; for (String col : columns) { - if (col.equals(TsvParser.ATTRIBUTES_COLUMN_SPEC)) - attrKeysFound++; + if (col.equals(TsvParser.ATTRIBUTES_COLUMN_SPEC)) attrKeysFound++; } if (attrKeysFound > 1) { - usage("Must specify at most one column as " - + TsvParser.ATTRIBUTES_COLUMN_SPEC); + usage("Must specify at most one column as " + TsvParser.ATTRIBUTES_COLUMN_SPEC); return -1; } // Make sure one or more columns are specified excluding rowkey and // timestamp key if (columns.length - (rowkeysFound + tskeysFound + attrKeysFound) < 1) { - usage("One or more columns in addition to the row key and timestamp(optional) are required"); + usage( + "One or more columns in addition to the row key and timestamp(optional) are required"); return -1; } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java index 0127b51ab3fe..91a3a9175b5f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.mapreduce; @@ -38,18 +37,16 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Finds the Jar for a class. If the class is in a directory in the - * classpath, it creates a Jar on the fly with the contents of the directory - * and returns the path to that Jar. If a Jar is created, it is created in - * the system temporary directory. - * - * This file was forked from hadoop/common/branches/branch-2@1377176. + * Finds the Jar for a class. If the class is in a directory in the classpath, it creates a Jar on + * the fly with the contents of the directory and returns the path to that Jar. If a Jar is created, + * it is created in the system temporary directory. This file was forked from + * hadoop/common/branches/branch-2@1377176. */ @InterfaceAudience.Private public final class JarFinder { - private static void copyToZipStream(File file, ZipEntry entry, - ZipOutputStream zos) throws IOException { + private static void copyToZipStream(File file, ZipEntry entry, ZipOutputStream zos) + throws IOException { InputStream is = new FileInputStream(file); try { zos.putNextEntry(entry); @@ -68,8 +65,7 @@ private static void copyToZipStream(File file, ZipEntry entry, } } - public static void jarDir(File dir, String relativePath, ZipOutputStream zos) - throws IOException { + public static void jarDir(File dir, String relativePath, ZipOutputStream zos) throws IOException { Preconditions.checkNotNull(relativePath, "relativePath"); Preconditions.checkNotNull(zos, "zos"); @@ -89,8 +85,8 @@ public static void jarDir(File dir, String relativePath, ZipOutputStream zos) zos.close(); } - private static void zipDir(File dir, String relativePath, ZipOutputStream zos, - boolean start) throws IOException { + private static void zipDir(File dir, String relativePath, ZipOutputStream zos, boolean start) + throws IOException { String[] dirList = dir.list(); if (dirList == null) { return; @@ -107,8 +103,7 @@ private static void zipDir(File dir, String relativePath, ZipOutputStream zos, String filePath = f.getPath(); File file = new File(filePath); zipDir(file, relativePath + f.getName() + "/", zos, false); - } - else { + } else { String path = relativePath + f.getName(); if (!path.equals(JarFile.MANIFEST_NAME)) { ZipEntry anEntry = new ZipEntry(path); @@ -125,22 +120,18 @@ private static void createJar(File dir, File jarFile) throws IOException { File jarDir = jarFile.getParentFile(); if (!jarDir.exists()) { if (!jarDir.mkdirs()) { - throw new IOException(MessageFormat.format("could not create dir [{0}]", - jarDir)); + throw new IOException(MessageFormat.format("could not create dir [{0}]", jarDir)); } } try (FileOutputStream fos = new FileOutputStream(jarFile); - JarOutputStream jos = new JarOutputStream(fos)) { + JarOutputStream jos = new JarOutputStream(fos)) { jarDir(dir, "", jos); } } /** - * Returns the full path to the Jar containing the class. It always return a - * JAR. - * + * Returns the full path to the Jar containing the class. It always return a JAR. * @param klass class. - * * @return path to the Jar containing the class. */ public static String getJar(Class klass) { @@ -149,8 +140,7 @@ public static String getJar(Class klass) { if (loader != null) { String class_file = klass.getName().replaceAll("\\.", "/") + ".class"; try { - for (Enumeration itr = loader.getResources(class_file); - itr.hasMoreElements(); ) { + for (Enumeration itr = loader.getResources(class_file); itr.hasMoreElements();) { URL url = (URL) itr.nextElement(); String path = url.getPath(); if (path.startsWith("file:")) { @@ -160,8 +150,7 @@ public static String getJar(Class klass) { if ("jar".equals(url.getProtocol())) { path = URLDecoder.decode(path, "UTF-8"); return path.replaceAll("!.*$", ""); - } - else if ("file".equals(url.getProtocol())) { + } else if ("file".equals(url.getProtocol())) { String klassName = klass.getName(); klassName = klassName.replace(".", "/") + ".class"; path = path.substring(0, path.length() - klassName.length()); @@ -178,13 +167,13 @@ else if ("file".equals(url.getProtocol())) { return tempJar.getAbsolutePath(); } } - } - catch (IOException e) { + } catch (IOException e) { throw new RuntimeException(e); } } return null; } - private JarFinder() {} + private JarFinder() { + } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java index 6410bf8726c6..67b2e8cd434d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,42 +6,33 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.List; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Job; - -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.List; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Create 3 level tree directory, first level is using table name as parent - * directory and then use family name as child directory, and all related HFiles - * for one family are under child directory - * -tableName1 - * -columnFamilyName1 - * -columnFamilyName2 - * -HFiles - * -tableName2 - * -columnFamilyName1 - * -HFiles - * -columnFamilyName2 + * Create 3 level tree directory, first level is using table name as parent directory and then use + * family name as child directory, and all related HFiles for one family are under child directory + * -tableName1 -columnFamilyName1 -columnFamilyName2 -HFiles -tableName2 -columnFamilyName1 -HFiles + * -columnFamilyName2 */ @InterfaceAudience.Public public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { @@ -50,13 +41,11 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { /** * Creates a composite key to use as a mapper output key when using * MultiTableHFileOutputFormat.configureIncrementaLoad to set up bulk ingest job - * * @param tableName Name of the Table - Eg: TableName.getNameAsString() - * @param suffix Usually represents a rowkey when creating a mapper key or column family - * @return byte[] representation of composite key + * @param suffix Usually represents a rowkey when creating a mapper key or column family + * @return byte[] representation of composite key */ - public static byte[] createCompositeKey(byte[] tableName, - byte[] suffix) { + public static byte[] createCompositeKey(byte[] tableName, byte[] suffix) { return combineTableNameSuffix(tableName, suffix); } @@ -64,8 +53,7 @@ public static byte[] createCompositeKey(byte[] tableName, * Alternate api which accepts an ImmutableBytesWritable for the suffix * @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[]) */ - public static byte[] createCompositeKey(byte[] tableName, - ImmutableBytesWritable suffix) { + public static byte[] createCompositeKey(byte[] tableName, ImmutableBytesWritable suffix) { return combineTableNameSuffix(tableName, suffix.get()); } @@ -74,26 +62,23 @@ public static byte[] createCompositeKey(byte[] tableName, * suffix * @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[]) */ - public static byte[] createCompositeKey(String tableName, - ImmutableBytesWritable suffix) { + public static byte[] createCompositeKey(String tableName, ImmutableBytesWritable suffix) { return combineTableNameSuffix(tableName.getBytes(Charset.forName("UTF-8")), suffix.get()); } /** * Analogous to - * {@link HFileOutputFormat2#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}, - * this function will configure the requisite number of reducers to write HFiles for multple - * tables simultaneously - * - * @param job See {@link org.apache.hadoop.mapreduce.Job} + * {@link HFileOutputFormat2#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}, this + * function will configure the requisite number of reducers to write HFiles for multple tables + * simultaneously + * @param job See {@link org.apache.hadoop.mapreduce.Job} * @param multiTableDescriptors Table descriptor and region locator pairs * @throws IOException */ - public static void configureIncrementalLoad(Job job, List - multiTableDescriptors) + public static void configureIncrementalLoad(Job job, List multiTableDescriptors) throws IOException { MultiTableHFileOutputFormat.configureIncrementalLoad(job, multiTableDescriptors, - MultiTableHFileOutputFormat.class); + MultiTableHFileOutputFormat.class); } final private static int validateCompositeKey(byte[] keyBytes) { @@ -102,8 +87,8 @@ final private static int validateCompositeKey(byte[] keyBytes) { // Either the separator was not found or a tablename wasn't present or a key wasn't present if (separatorIdx == -1) { - throw new IllegalArgumentException("Invalid format for composite key [" + Bytes - .toStringBinary(keyBytes) + "]. Cannot extract tablename and suffix from key"); + throw new IllegalArgumentException("Invalid format for composite key [" + + Bytes.toStringBinary(keyBytes) + "]. Cannot extract tablename and suffix from key"); } return separatorIdx; } @@ -115,6 +100,6 @@ protected static byte[] getTableName(byte[] keyBytes) { protected static byte[] getSuffix(byte[] keyBytes) { int separatorIdx = validateCompositeKey(keyBytes); - return Bytes.copy(keyBytes, separatorIdx+1, keyBytes.length - separatorIdx - 1); + return Bytes.copy(keyBytes, separatorIdx + 1, keyBytes.length - separatorIdx - 1); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java index b69b486ba277..01fc40900062 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,16 +20,13 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Scan; +import org.apache.yetus.audience.InterfaceAudience; /** - * Convert HBase tabular data from multiple scanners into a format that - * is consumable by Map/Reduce. - * + * Convert HBase tabular data from multiple scanners into a format that is consumable by Map/Reduce. *

* Usage example *

@@ -49,13 +46,12 @@ * scan1.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, table2); * scans.add(scan2); * - * TableMapReduceUtil.initTableMapperJob(scans, TableMapper.class, Text.class, - * IntWritable.class, job); + * TableMapReduceUtil.initTableMapperJob(scans, TableMapper.class, Text.class, IntWritable.class, + * job); * */ @InterfaceAudience.Public -public class MultiTableInputFormat extends MultiTableInputFormatBase implements - Configurable { +public class MultiTableInputFormat extends MultiTableInputFormatBase implements Configurable { /** Job parameter that specifies the scan list. */ public static final String SCANS = "hbase.mapreduce.scans"; @@ -65,7 +61,6 @@ public class MultiTableInputFormat extends MultiTableInputFormatBase implements /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -75,20 +70,17 @@ public Configuration getConf() { } /** - * Sets the configuration. This is used to set the details for the tables to - * be scanned. - * + * Sets the configuration. This is used to set the details for the tables to be scanned. * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { this.conf = configuration; String[] rawScans = conf.getStrings(SCANS); if (rawScans.length <= 0) { - throw new IllegalArgumentException("There must be at least 1 scan configuration set to : " - + SCANS); + throw new IllegalArgumentException( + "There must be at least 1 scan configuration set to : " + SCANS); } List scans = new ArrayList<>(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java index 314b3a6310bf..47e431dba3b4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -47,13 +46,12 @@ import org.slf4j.LoggerFactory; /** - * A base for {@link MultiTableInputFormat}s. Receives a list of - * {@link Scan} instances that define the input tables and - * filters etc. Subclasses may use other TableRecordReader implementations. + * A base for {@link MultiTableInputFormat}s. Receives a list of {@link Scan} instances that define + * the input tables and filters etc. Subclasses may use other TableRecordReader implementations. */ @InterfaceAudience.Public -public abstract class MultiTableInputFormatBase extends - InputFormat { +public abstract class MultiTableInputFormatBase + extends InputFormat { private static final Logger LOG = LoggerFactory.getLogger(MultiTableInputFormatBase.class); @@ -64,22 +62,18 @@ public abstract class MultiTableInputFormatBase extends private TableRecordReader tableRecordReader = null; /** - * Builds a TableRecordReader. If no TableRecordReader was provided, uses the - * default. - * + * Builds a TableRecordReader. If no TableRecordReader was provided, uses the default. * @param split The split to work with. * @param context The current context. * @return The newly created record reader. * @throws IOException When creating the reader fails. * @throws InterruptedException when record reader initialization fails * @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader( - * org.apache.hadoop.mapreduce.InputSplit, - * org.apache.hadoop.mapreduce.TaskAttemptContext) + * org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException, InterruptedException { TableSplit tSplit = (TableSplit) split; LOG.info(MessageFormat.format("Input split length: {0} bytes.", tSplit.getLength())); @@ -146,9 +140,8 @@ public boolean nextKeyValue() throws IOException, InterruptedException { } /** - * Calculates the splits that will serve as input for the map tasks. The - * number of splits matches the number of regions in a table. - * + * Calculates the splits that will serve as input for the map tasks. The number of splits matches + * the number of regions in a table. * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. @@ -163,8 +156,7 @@ public List getSplits(JobContext context) throws IOException { Map> tableMaps = new HashMap<>(); for (Scan scan : scans) { byte[] tableNameBytes = scan.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME); - if (tableNameBytes == null) - throw new IOException("A scan object did not have a table name"); + if (tableNameBytes == null) throw new IOException("A scan object did not have a table name"); TableName tableName = TableName.valueOf(tableNameBytes); @@ -185,14 +177,14 @@ public List getSplits(JobContext context) throws IOException { TableName tableName = entry.getKey(); List scanList = entry.getValue(); try (Table table = conn.getTable(tableName); - RegionLocator regionLocator = conn.getRegionLocator(tableName)) { - RegionSizeCalculator sizeCalculator = new RegionSizeCalculator( - regionLocator, conn.getAdmin()); + RegionLocator regionLocator = conn.getRegionLocator(tableName)) { + RegionSizeCalculator sizeCalculator = + new RegionSizeCalculator(regionLocator, conn.getAdmin()); Pair keys = regionLocator.getStartEndKeys(); for (Scan scan : scanList) { if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) { - throw new IOException("Expecting at least one region for table : " - + tableName.getNameAsString()); + throw new IOException( + "Expecting at least one region for table : " + tableName.getNameAsString()); } int count = 0; @@ -204,29 +196,26 @@ public List getSplits(JobContext context) throws IOException { continue; } - if ((startRow.length == 0 || keys.getSecond()[i].length == 0 || - Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) && - (stopRow.length == 0 || Bytes.compareTo(stopRow, - keys.getFirst()[i]) > 0)) { - byte[] splitStart = startRow.length == 0 || - Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? - keys.getFirst()[i] : startRow; - byte[] splitStop = (stopRow.length == 0 || - Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) && - keys.getSecond()[i].length > 0 ? - keys.getSecond()[i] : stopRow; - - HRegionLocation hregionLocation = regionLocator.getRegionLocation( - keys.getFirst()[i], false); + if ((startRow.length == 0 || keys.getSecond()[i].length == 0 + || Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) + && (stopRow.length == 0 || Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) { + byte[] splitStart = + startRow.length == 0 || Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 + ? keys.getFirst()[i] + : startRow; + byte[] splitStop = + (stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) + && keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow; + + HRegionLocation hregionLocation = + regionLocator.getRegionLocation(keys.getFirst()[i], false); String regionHostname = hregionLocation.getHostname(); RegionInfo regionInfo = hregionLocation.getRegion(); String encodedRegionName = regionInfo.getEncodedName(); - long regionSize = sizeCalculator.getRegionSize( - regionInfo.getRegionName()); + long regionSize = sizeCalculator.getRegionSize(regionInfo.getRegionName()); - TableSplit split = new TableSplit(table.getName(), - scan, splitStart, splitStop, regionHostname, - encodedRegionName, regionSize); + TableSplit split = new TableSplit(table.getName(), scan, splitStart, splitStop, + regionHostname, encodedRegionName, regionSize); splits.add(split); @@ -244,29 +233,25 @@ public List getSplits(JobContext context) throws IOException { } /** - * Test if the given region is to be included in the InputSplit while - * splitting the regions of a table. + * Test if the given region is to be included in the InputSplit while splitting the regions of a + * table. *

- * This optimization is effective when there is a specific reasoning to - * exclude an entire region from the M-R job, (and hence, not contributing to - * the InputSplit), given the start and end keys of the same.
- * Useful when we need to remember the last-processed top record and revisit - * the [last, current) interval for M-R processing, continuously. In addition - * to reducing InputSplits, reduces the load on the region server as well, due - * to the ordering of the keys.
+ * This optimization is effective when there is a specific reasoning to exclude an entire region + * from the M-R job, (and hence, not contributing to the InputSplit), given the start and end keys + * of the same.
+ * Useful when we need to remember the last-processed top record and revisit the [last, current) + * interval for M-R processing, continuously. In addition to reducing InputSplits, reduces the + * load on the region server as well, due to the ordering of the keys.
+ *
+ * Note: It is possible that endKey.length() == 0 , for the last (recent) region. *
- * Note: It is possible that endKey.length() == 0 , for the last - * (recent) region.
- * Override this method, if you want to bulk exclude regions altogether from - * M-R. By default, no region is excluded( i.e. all regions are included). - * + * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no + * region is excluded( i.e. all regions are included). * @param startKey Start key of the region * @param endKey End key of the region - * @return true, if this region needs to be included as part of the input - * (default). + * @return true, if this region needs to be included as part of the input (default). */ - protected boolean includeRegionInSplit(final byte[] startKey, - final byte[] endKey) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { return true; } @@ -279,7 +264,6 @@ protected List getScans() { /** * Allows subclasses to set the list of {@link Scan} objects. - * * @param scans The list of {@link Scan} used to define the input */ protected void setScans(List scans) { @@ -288,9 +272,7 @@ protected void setScans(List scans) { /** * Allows subclasses to set the {@link TableRecordReader}. - * - * @param tableRecordReader A different {@link TableRecordReader} - * implementation. + * @param tableRecordReader A different {@link TableRecordReader} implementation. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java index 2a4fae944095..ed7bc0706e85 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; @@ -32,9 +27,9 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.JobContext; @@ -42,21 +37,22 @@ import org.apache.hadoop.mapreduce.OutputFormat; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** *

- * Hadoop output format that writes to one or more HBase tables. The key is - * taken to be the table name while the output value must be either a - * {@link Put} or a {@link Delete} instance. All tables must already exist, and - * all Puts and Deletes must reference only valid column families. + * Hadoop output format that writes to one or more HBase tables. The key is taken to be the table + * name while the output value must be either a {@link Put} or a {@link Delete} instance. + * All tables must already exist, and all Puts and Deletes must reference only valid column + * families. *

- * *

- * Write-ahead logging (WAL) for Puts can be disabled by setting - * {@link #WAL_PROPERTY} to {@link #WAL_OFF}. Default value is {@link #WAL_ON}. - * Note that disabling write-ahead logging is only appropriate for jobs where - * loss of data due to region server failure can be tolerated (for example, - * because it is easy to rerun a bulk import). + * Write-ahead logging (WAL) for Puts can be disabled by setting {@link #WAL_PROPERTY} to + * {@link #WAL_OFF}. Default value is {@link #WAL_ON}. Note that disabling write-ahead logging is + * only appropriate for jobs where loss of data due to region server failure can be tolerated (for + * example, because it is easy to rerun a bulk import). *

*/ @InterfaceAudience.Public @@ -67,11 +63,12 @@ public class MultiTableOutputFormat extends OutputFormat { + protected static class MultiTableRecordWriter + extends RecordWriter { private static final Logger LOG = LoggerFactory.getLogger(MultiTableRecordWriter.class); Connection connection; Map mutatorMap = new HashMap<>(); @@ -79,36 +76,31 @@ protected static class MultiTableRecordWriter extends boolean useWriteAheadLogging; /** - * @param conf - * HBaseConfiguration to used - * @param useWriteAheadLogging - * whether to use write ahead logging. This can be turned off ( + * @param conf HBaseConfiguration to used + * @param useWriteAheadLogging whether to use write ahead logging. This can be turned off ( * false) to improve performance when bulk loading data. */ - public MultiTableRecordWriter(Configuration conf, - boolean useWriteAheadLogging) throws IOException { - LOG.debug("Created new MultiTableRecordReader with WAL " - + (useWriteAheadLogging ? "on" : "off")); + public MultiTableRecordWriter(Configuration conf, boolean useWriteAheadLogging) + throws IOException { + LOG.debug( + "Created new MultiTableRecordReader with WAL " + (useWriteAheadLogging ? "on" : "off")); this.conf = conf; this.useWriteAheadLogging = useWriteAheadLogging; } /** - * @param tableName - * the name of the table, as a string + * @param tableName the name of the table, as a string * @return the named mutator - * @throws IOException - * if there is a problem opening a table + * @throws IOException if there is a problem opening a table */ BufferedMutator getBufferedMutator(ImmutableBytesWritable tableName) throws IOException { - if(this.connection == null){ + if (this.connection == null) { this.connection = ConnectionFactory.createConnection(conf); } if (!mutatorMap.containsKey(tableName)) { - LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get())+ "\" for writing"); + LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get()) + "\" for writing"); - BufferedMutator mutator = - connection.getBufferedMutator(TableName.valueOf(tableName.get())); + BufferedMutator mutator = connection.getBufferedMutator(TableName.valueOf(tableName.get())); mutatorMap.put(tableName, mutator); } return mutatorMap.get(tableName); @@ -126,13 +118,9 @@ public void close(TaskAttemptContext context) throws IOException { /** * Writes an action (Put or Delete) to the specified table. - * - * @param tableName - * the table being updated. - * @param action - * the update, either a put or a delete. - * @throws IllegalArgumentException - * if the action is not a put or a delete. + * @param tableName the table being updated. + * @param action the update, either a put or a delete. + * @throws IllegalArgumentException if the action is not a put or a delete. */ @Override public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException { @@ -140,21 +128,17 @@ public void write(ImmutableBytesWritable tableName, Mutation action) throws IOEx // The actions are not immutable, so we defensively copy them if (action instanceof Put) { Put put = new Put((Put) action); - put.setDurability(useWriteAheadLogging ? Durability.SYNC_WAL - : Durability.SKIP_WAL); + put.setDurability(useWriteAheadLogging ? Durability.SYNC_WAL : Durability.SKIP_WAL); mutator.mutate(put); } else if (action instanceof Delete) { Delete delete = new Delete((Delete) action); mutator.mutate(delete); - } else - throw new IllegalArgumentException( - "action must be either Delete or Put"); + } else throw new IllegalArgumentException("action must be either Delete or Put"); } } @Override - public void checkOutputSpecs(JobContext context) throws IOException, - InterruptedException { + public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException { // we can't know ahead of time if it's going to blow up when the user // passes a table name that doesn't exist, so nothing useful here. } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java index fa7129030402..99a8054c8dff 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,44 +15,35 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; +import org.apache.yetus.audience.InterfaceAudience; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * MultiTableSnapshotInputFormat generalizes - * {@link TableSnapshotInputFormat} - * allowing a MapReduce job to run over one or more table snapshots, with one or more scans - * configured for each. - * Internally, the input format delegates to - * {@link TableSnapshotInputFormat} - * and thus has the same performance advantages; - * see {@link TableSnapshotInputFormat} for - * more details. - * Usage is similar to TableSnapshotInputFormat, with the following exception: - * initMultiTableSnapshotMapperJob takes in a map - * from snapshot name to a collection of scans. For each snapshot in the map, each corresponding - * scan will be applied; - * the overall dataset for the job is defined by the concatenation of the regions and tables - * included in each snapshot/scan - * pair. - * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob - * (java.util.Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, org.apache - * .hadoop.fs.Path)} + * MultiTableSnapshotInputFormat generalizes {@link TableSnapshotInputFormat} allowing a MapReduce + * job to run over one or more table snapshots, with one or more scans configured for each. + * Internally, the input format delegates to {@link TableSnapshotInputFormat} and thus has the same + * performance advantages; see {@link TableSnapshotInputFormat} for more details. Usage is similar + * to TableSnapshotInputFormat, with the following exception: initMultiTableSnapshotMapperJob takes + * in a map from snapshot name to a collection of scans. For each snapshot in the map, each + * corresponding scan will be applied; the overall dataset for the job is defined by the + * concatenation of the regions and tables included in each snapshot/scan pair. + * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob (java.util.Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, org.apache .hadoop.fs.Path)} * can be used to configure the job. - *
{@code
+ * 
+ * 
+ * {@code
  * Job job = new Job(conf);
  * Map> snapshotScans = ImmutableMap.of(
  *    "snapshot1", ImmutableList.of(new Scan(Bytes.toBytes("a"), Bytes.toBytes("b"))),
@@ -64,14 +55,11 @@
  *      MyMapOutputValueWritable.class, job, true, restoreDir);
  * }
  * 
+ * * Internally, this input format restores each snapshot into a subdirectory of the given tmp - * directory. Input splits and - * record readers are created as described in - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * (one per region). - * See {@link TableSnapshotInputFormat} for more notes on - * permissioning; the same caveats apply here. - * + * directory. Input splits and record readers are created as described in + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} (one per region). See + * {@link TableSnapshotInputFormat} for more notes on permissioning; the same caveats apply here. * @see TableSnapshotInputFormat * @see org.apache.hadoop.hbase.client.TableSnapshotScanner */ diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java index 866fef86f9bb..be7b1672b901 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; @@ -42,8 +41,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * Shared implementation of mapreduce code over multiple table snapshots. - * Utilized by both mapreduce + * Shared implementation of mapreduce code over multiple table snapshots. Utilized by both mapreduce * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormat} and mapred * {@link org.apache.hadoop.hbase.mapred.MultiTableSnapshotInputFormat} implementations. */ @@ -78,9 +76,7 @@ public void setInput(Configuration conf, Map> snapshotS /** * Return the list of splits extracted from the scans/snapshots pushed to conf by - * {@link - * #setInput(org.apache.hadoop.conf.Configuration, java.util.Map, org.apache.hadoop.fs.Path)} - * + * {@link #setInput(org.apache.hadoop.conf.Configuration, java.util.Map, org.apache.hadoop.fs.Path)} * @param conf Configuration to determine splits from * @return Return the list of splits extracted from the scans/snapshots pushed to conf * @throws IOException @@ -116,7 +112,6 @@ public List getSplits(Configuration con /** * Retrieve the snapshot name -> list<scan> mapping pushed to configuration by * {@link #setSnapshotToScans(org.apache.hadoop.conf.Configuration, java.util.Map)} - * * @param conf Configuration to extract name -> list<scan> mappings from. * @return the snapshot name -> list<scan> mapping pushed to configuration * @throws IOException @@ -125,8 +120,8 @@ public Map> getSnapshotsToScans(Configuration conf) thr Map> rtn = Maps.newHashMap(); - for (Map.Entry entry : ConfigurationUtil - .getKeyValues(conf, SNAPSHOT_TO_SCANS_KEY)) { + for (Map.Entry entry : ConfigurationUtil.getKeyValues(conf, + SNAPSHOT_TO_SCANS_KEY)) { String snapshotName = entry.getKey(); String scan = entry.getValue(); @@ -144,7 +139,6 @@ public Map> getSnapshotsToScans(Configuration conf) thr /** * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) - * * @param conf * @param snapshotScans * @throws IOException @@ -171,7 +165,6 @@ public void setSnapshotToScans(Configuration conf, Map> /** * Retrieve the directories into which snapshots have been restored from * ({@link #RESTORE_DIRS_KEY}) - * * @param conf Configuration to extract restore directories from * @return the directories into which snapshots have been restored from * @throws IOException @@ -198,10 +191,9 @@ public void setSnapshotDirs(Configuration conf, Map snapshotDirs) } /** - * Generate a random path underneath baseRestoreDir for each snapshot in snapshots and - * return a map from the snapshot to the restore directory. - * - * @param snapshots collection of snapshot names to restore + * Generate a random path underneath baseRestoreDir for each snapshot in snapshots and return a + * map from the snapshot to the restore directory. + * @param snapshots collection of snapshot names to restore * @param baseRestoreDir base directory under which all snapshots in snapshots will be restored * @return a mapping from snapshot name to the directory in which that snapshot has been restored */ @@ -220,10 +212,9 @@ private Map generateSnapshotToRestoreDirMapping(Collection /** * Restore each (snapshot name, restore directory) pair in snapshotToDir - * - * @param conf configuration to restore with + * @param conf configuration to restore with * @param snapshotToDir mapping from snapshot names to restore directories - * @param fs filesystem to do snapshot restoration on + * @param fs filesystem to do snapshot restoration on */ public void restoreSnapshots(Configuration conf, Map snapshotToDir, FileSystem fs) throws IOException { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java index ca82e2a58ee9..16f45816552a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,25 +42,23 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Multithreaded implementation for @link org.apache.hbase.mapreduce.TableMapper *

- * It can be used instead when the Map operation is not CPU - * bound in order to improve throughput. + * It can be used instead when the Map operation is not CPU bound in order to improve throughput. *

* Mapper implementations using this MapRunnable must be thread-safe. *

- * The Map-Reduce job has to be configured with the mapper to use via - * {@link #setMapperClass} and the number of thread the thread-pool can use with the - * {@link #getNumberOfThreads} method. The default value is 10 threads. + * The Map-Reduce job has to be configured with the mapper to use via {@link #setMapperClass} and + * the number of thread the thread-pool can use with the {@link #getNumberOfThreads} method. The + * default value is 10 threads. *

*/ @InterfaceAudience.Private public class MultithreadedTableMapper extends TableMapper { private static final Logger LOG = LoggerFactory.getLogger(MultithreadedTableMapper.class); - private Class> mapClass; + private Class> mapClass; private Context outer; private ExecutorService executor; public static final String NUMBER_OF_THREADS = "hbase.mapreduce.multithreadedmapper.threads"; @@ -72,8 +70,7 @@ public class MultithreadedTableMapper extends TableMapper { * @return the number of threads */ public static int getNumberOfThreads(JobContext job) { - return job.getConfiguration(). - getInt(NUMBER_OF_THREADS, 10); + return job.getConfiguration().getInt(NUMBER_OF_THREADS, 10); } /** @@ -82,8 +79,7 @@ public static int getNumberOfThreads(JobContext job) { * @param threads the new number of threads */ public static void setNumberOfThreads(Job job, int threads) { - job.getConfiguration().setInt(NUMBER_OF_THREADS, - threads); + job.getConfiguration().setInt(NUMBER_OF_THREADS, threads); } /** @@ -94,11 +90,10 @@ public static void setNumberOfThreads(Job job, int threads) { * @return the mapper class to run */ @SuppressWarnings("unchecked") - public static - Class> getMapperClass(JobContext job) { - return (Class>) - job.getConfiguration().getClass( MAPPER_CLASS, - Mapper.class); + public static Class> + getMapperClass(JobContext job) { + return (Class>) job.getConfiguration() + .getClass(MAPPER_CLASS, Mapper.class); } /** @@ -108,15 +103,13 @@ Class> getMapperClass(JobContext jo * @param job the job to modify * @param cls the class to use as the mapper */ - public static - void setMapperClass(Job job, - Class> cls) { + public static void setMapperClass(Job job, + Class> cls) { if (MultithreadedTableMapper.class.isAssignableFrom(cls)) { - throw new IllegalArgumentException("Can't have recursive " + - "MultithreadedTableMapper instances."); + throw new IllegalArgumentException( + "Can't have recursive " + "MultithreadedTableMapper instances."); } - job.getConfiguration().setClass(MAPPER_CLASS, - cls, Mapper.class); + job.getConfiguration().setClass(MAPPER_CLASS, cls, Mapper.class); } /** @@ -128,11 +121,10 @@ public void run(Context context) throws IOException, InterruptedException { int numberOfThreads = getNumberOfThreads(context); mapClass = getMapperClass(context); if (LOG.isDebugEnabled()) { - LOG.debug("Configuring multithread runner to use " + numberOfThreads + - " threads"); + LOG.debug("Configuring multithread runner to use " + numberOfThreads + " threads"); } executor = Executors.newFixedThreadPool(numberOfThreads); - for(int i=0; i < numberOfThreads; ++i) { + for (int i = 0; i < numberOfThreads; ++i) { MapRunner thread = new MapRunner(context); executor.execute(thread); } @@ -143,8 +135,7 @@ public void run(Context context) throws IOException, InterruptedException { } } - private class SubMapRecordReader - extends RecordReader { + private class SubMapRecordReader extends RecordReader { private ImmutableBytesWritable key; private Result value; private Configuration conf; @@ -159,9 +150,8 @@ public float getProgress() throws IOException, InterruptedException { } @Override - public void initialize(InputSplit split, - TaskAttemptContext context - ) throws IOException, InterruptedException { + public void initialize(InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { conf = context.getConfiguration(); } @@ -171,8 +161,7 @@ public boolean nextKeyValue() throws IOException, InterruptedException { if (!outer.nextKeyValue()) { return false; } - key = ReflectionUtils.copy(outer.getConfiguration(), - outer.getCurrentKey(), key); + key = ReflectionUtils.copy(outer.getConfiguration(), outer.getCurrentKey(), key); value = ReflectionUtils.copy(conf, outer.getCurrentValue(), value); return true; } @@ -188,16 +177,14 @@ public Result getCurrentValue() { } } - private class SubMapRecordWriter extends RecordWriter { + private class SubMapRecordWriter extends RecordWriter { @Override - public void close(TaskAttemptContext context) throws IOException, - InterruptedException { + public void close(TaskAttemptContext context) throws IOException, InterruptedException { } @Override - public void write(K2 key, V2 value) throws IOException, - InterruptedException { + public void write(K2 key, V2 value) throws IOException, InterruptedException { synchronized (outer) { outer.write(key, value); } @@ -231,59 +218,37 @@ public float getProgress() { } } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION", - justification="Don't understand why FB is complaining about this one. We do throw exception") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", + justification = "Don't understand why FB is complaining about this one. We do throw exception") private class MapRunner implements Runnable { - private Mapper mapper; + private Mapper mapper; private Context subcontext; @SuppressWarnings({ "rawtypes", "unchecked" }) MapRunner(Context context) throws IOException, InterruptedException { - mapper = ReflectionUtils.newInstance(mapClass, - context.getConfiguration()); + mapper = ReflectionUtils.newInstance(mapClass, context.getConfiguration()); try { - Constructor c = context.getClass().getConstructor( - Mapper.class, - Configuration.class, - TaskAttemptID.class, - RecordReader.class, - RecordWriter.class, - OutputCommitter.class, - StatusReporter.class, - InputSplit.class); + Constructor c = context.getClass().getConstructor(Mapper.class, Configuration.class, + TaskAttemptID.class, RecordReader.class, RecordWriter.class, OutputCommitter.class, + StatusReporter.class, InputSplit.class); c.setAccessible(true); - subcontext = (Context) c.newInstance( - mapper, - outer.getConfiguration(), - outer.getTaskAttemptID(), - new SubMapRecordReader(), - new SubMapRecordWriter(), - context.getOutputCommitter(), - new SubMapStatusReporter(), - outer.getInputSplit()); + subcontext = (Context) c.newInstance(mapper, outer.getConfiguration(), + outer.getTaskAttemptID(), new SubMapRecordReader(), new SubMapRecordWriter(), + context.getOutputCommitter(), new SubMapStatusReporter(), outer.getInputSplit()); } catch (Exception e) { try { - Constructor c = Class.forName("org.apache.hadoop.mapreduce.task.MapContextImpl").getConstructor( - Configuration.class, - TaskAttemptID.class, - RecordReader.class, - RecordWriter.class, - OutputCommitter.class, - StatusReporter.class, - InputSplit.class); + Constructor c = Class.forName("org.apache.hadoop.mapreduce.task.MapContextImpl") + .getConstructor(Configuration.class, TaskAttemptID.class, RecordReader.class, + RecordWriter.class, OutputCommitter.class, StatusReporter.class, InputSplit.class); c.setAccessible(true); - MapContext mc = (MapContext) c.newInstance( - outer.getConfiguration(), - outer.getTaskAttemptID(), - new SubMapRecordReader(), - new SubMapRecordWriter(), - context.getOutputCommitter(), - new SubMapStatusReporter(), - outer.getInputSplit()); - Class wrappedMapperClass = Class.forName("org.apache.hadoop.mapreduce.lib.map.WrappedMapper"); + MapContext mc = (MapContext) c.newInstance(outer.getConfiguration(), + outer.getTaskAttemptID(), new SubMapRecordReader(), new SubMapRecordWriter(), + context.getOutputCommitter(), new SubMapStatusReporter(), outer.getInputSplit()); + Class wrappedMapperClass = + Class.forName("org.apache.hadoop.mapreduce.lib.map.WrappedMapper"); Method getMapContext = wrappedMapperClass.getMethod("getMapContext", MapContext.class); - subcontext = (Context) getMapContext.invoke( - wrappedMapperClass.getDeclaredConstructor().newInstance(), mc); + subcontext = (Context) getMapContext + .invoke(wrappedMapperClass.getDeclaredConstructor().newInstance(), mc); } catch (Exception ee) { // FindBugs: REC_CATCH_EXCEPTION // rethrow as IOE throw new IOException(e); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java index 7859afa496c4..63ed8d1fdc15 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,17 +20,17 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; @InterfaceAudience.Public public class MutationSerialization implements Serialization { @@ -69,6 +69,7 @@ public void open(InputStream in) throws IOException { } } + private static class MutationSerializer implements Serializer { private OutputStream out; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java index 317b328df782..4a56b3d2fe63 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,21 +19,19 @@ import java.io.IOException; import java.util.List; -import java.util.Map.Entry; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.Map.Entry; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Combine Puts. Merges Put instances grouped by K into a single - * instance. + * Combine Puts. Merges Put instances grouped by K into a single instance. * @see TableMapReduceUtil */ @InterfaceAudience.Public @@ -49,8 +46,8 @@ protected void reduce(K row, Iterable vals, Context context) // flush could result in multiple Puts for a single rowkey. That is // acceptable because Combiner is run as an optimization and it's not // critical that all Puts are grouped perfectly. - long threshold = context.getConfiguration().getLong( - "putcombiner.row.threshold", 1L * (1<<30)); + long threshold = + context.getConfiguration().getLong("putcombiner.row.threshold", 1L * (1 << 30)); int cnt = 0; long curSize = 0; Put put = null; @@ -61,8 +58,7 @@ protected void reduce(K row, Iterable vals, Context context) put = p; familyMap = put.getFamilyCellMap(); } else { - for (Entry> entry : p.getFamilyCellMap() - .entrySet()) { + for (Entry> entry : p.getFamilyCellMap().entrySet()) { List cells = familyMap.get(entry.getKey()); List kvs = (cells != null) ? (List) cells : null; for (Cell cell : entry.getValue()) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java index f4ad1f25fe4b..45b43e0c7545 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.Iterator; import java.util.List; import java.util.TreeSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; @@ -33,7 +31,6 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -41,18 +38,17 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; /** - * Emits sorted Puts. - * Reads in all Puts from passed Iterator, sorts them, then emits - * Puts in sorted order. If lots of columns per row, it will use lots of - * memory sorting. + * Emits sorted Puts. Reads in all Puts from passed Iterator, sorts them, then emits Puts in sorted + * order. If lots of columns per row, it will use lots of memory sorting. * @see HFileOutputFormat2 * @see CellSortReducer */ @InterfaceAudience.Public -public class PutSortReducer extends - Reducer { +public class PutSortReducer + extends Reducer { // the cell creator private CellCreator kvCreator; @@ -65,16 +61,12 @@ public class PutSortReducer extends } @Override - protected void reduce( - ImmutableBytesWritable row, - java.lang.Iterable puts, - Reducer.Context context) - throws java.io.IOException, InterruptedException - { + protected void reduce(ImmutableBytesWritable row, java.lang.Iterable puts, + Reducer.Context context) + throws java.io.IOException, InterruptedException { // although reduce() is called per-row, handle pathological case - long threshold = context.getConfiguration().getLong( - "putsortreducer.row.threshold", 1L * (1<<30)); + long threshold = + context.getConfiguration().getLong("putsortreducer.row.threshold", 1L * (1 << 30)); Iterator iter = puts.iterator(); while (iter.hasNext()) { TreeSet map = new TreeSet<>(CellComparator.getInstance()); @@ -107,8 +99,8 @@ protected void reduce( // just ignoring the bad one? throw new IOException("Invalid visibility expression found in mutation " + p, e); } - for (List cells: p.getFamilyCellMap().values()) { - for (Cell cell: cells) { + for (List cells : p.getFamilyCellMap().values()) { + for (Cell cell : cells) { // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. KeyValue kv = null; @@ -128,13 +120,12 @@ protected void reduce( } } } - context.setStatus("Read " + map.size() + " entries of " + map.getClass() - + "(" + StringUtils.humanReadableInt(curSize) + ")"); + context.setStatus("Read " + map.size() + " entries of " + map.getClass() + "(" + + StringUtils.humanReadableInt(curSize) + ")"); int index = 0; for (KeyValue kv : map) { context.write(row, kv); - if (++index % 100 == 0) - context.setStatus("Wrote " + index); + if (++index % 100 == 0) context.setStatus("Wrote " + index); } // if we have more entries to process diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java index 40cd34f3844a..5e5e3b5ad9b5 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,9 +38,9 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * Computes size of each region for given table and given column families. - * The value is used by MapReduce for better scheduling. - * */ + * Computes size of each region for given table and given column families. The value is used by + * MapReduce for better scheduling. + */ @InterfaceAudience.Private public class RegionSizeCalculator { @@ -48,7 +48,7 @@ public class RegionSizeCalculator { /** * Maps each region to its size in bytes. - * */ + */ private final Map sizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); static final String ENABLE_REGIONSIZECALCULATOR = "hbase.regionsizecalculator.enable"; @@ -56,13 +56,12 @@ public class RegionSizeCalculator { /** * Computes size of each region for table and given column families. - * */ + */ public RegionSizeCalculator(RegionLocator regionLocator, Admin admin) throws IOException { init(regionLocator, admin); } - private void init(RegionLocator regionLocator, Admin admin) - throws IOException { + private void init(RegionLocator regionLocator, Admin admin) throws IOException { if (!enabled(admin.getConfiguration())) { LOG.info("Region size calculation disabled."); return; @@ -79,12 +78,12 @@ private void init(RegionLocator regionLocator, Admin admin) Set tableServers = getRegionServersOfTable(regionLocator); for (ServerName tableServerName : tableServers) { - for (RegionMetrics regionLoad : admin.getRegionMetrics( - tableServerName,regionLocator.getName())) { + for (RegionMetrics regionLoad : admin.getRegionMetrics(tableServerName, + regionLocator.getName())) { byte[] regionId = regionLoad.getRegionName(); - long regionSizeBytes - = ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE; + long regionSizeBytes = + ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE; sizeMap.put(regionId, regionSizeBytes); @@ -96,8 +95,7 @@ private void init(RegionLocator regionLocator, Admin admin) LOG.debug("Region sizes calculated"); } - private Set getRegionServersOfTable(RegionLocator regionLocator) - throws IOException { + private Set getRegionServersOfTable(RegionLocator regionLocator) throws IOException { Set tableServers = Sets.newHashSet(); for (HRegionLocation regionLocation : regionLocator.getAllRegionLocations()) { @@ -112,7 +110,7 @@ boolean enabled(Configuration configuration) { /** * Returns size of given region in bytes. Returns 0 if region was not found. - * */ + */ public long getRegionSize(byte[] regionId) { Long size = sizeMap.get(regionId); if (size == null) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java index 9fdaa7b78f75..782621e120af 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,21 +24,21 @@ import java.io.OutputStream; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; @InterfaceAudience.Public public class ResultSerialization extends Configured implements Serialization { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java index 2427e909ff23..5f6233f286ed 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java @@ -50,11 +50,11 @@ public class RoundRobinTableInputFormat extends TableInputFormat { private Boolean hbaseRegionsizecalculatorEnableOriginalValue = null; /** * Boolean config for whether superclass should produce InputSplits with 'lengths'. If true, TIF - * will query every RegionServer to get the 'size' of all involved Regions and this 'size' will - * be used the the InputSplit length. If false, we skip this query and the super-classes - * returned InputSplits will have lenghths of zero. This override will set the flag to false. - * All returned lengths will be zero. Makes it so sorting on 'length' becomes a noop. The sort - * returned by this override will prevail. Thats what we want. + * will query every RegionServer to get the 'size' of all involved Regions and this 'size' will be + * used the the InputSplit length. If false, we skip this query and the super-classes returned + * InputSplits will have lenghths of zero. This override will set the flag to false. All returned + * lengths will be zero. Makes it so sorting on 'length' becomes a noop. The sort returned by this + * override will prevail. Thats what we want. */ static String HBASE_REGIONSIZECALCULATOR_ENABLE = "hbase.regionsizecalculator.enable"; @@ -116,26 +116,26 @@ List roundRobin(List inputs) throws IOException { } /** - * Adds a configuration to the Context disabling remote rpc'ing to figure Region size - * when calculating InputSplits. See up in super-class TIF where we rpc to every server to find - * the size of all involved Regions. Here we disable this super-class action. This means - * InputSplits will have a length of zero. If all InputSplits have zero-length InputSplits, the - * ordering done in here will 'pass-through' Hadoop's length-first sort. The superclass TIF will - * ask every node for the current size of each of the participating Table Regions. It does this - * because it wants to schedule the biggest Regions first (This fixation comes of hadoop itself - * -- see JobSubmitter where it sorts inputs by size). This extra diligence takes time and is of - * no utility in this RRTIF where spread is of more import than size-first. Also, if a rolling - * restart is happening when we go to launch the job, the job launch may fail because the request - * for Region size fails -- even after retries -- because rolled RegionServer may take a while to - * come online: e.g. it takes java 90 seconds to allocate a 160G. RegionServer is offline during - * this time. The job launch will fail with 'Connection rejected'. So, we set - * 'hbase.regionsizecalculator.enable' to false here in RRTIF. + * Adds a configuration to the Context disabling remote rpc'ing to figure Region size when + * calculating InputSplits. See up in super-class TIF where we rpc to every server to find the + * size of all involved Regions. Here we disable this super-class action. This means InputSplits + * will have a length of zero. If all InputSplits have zero-length InputSplits, the ordering done + * in here will 'pass-through' Hadoop's length-first sort. The superclass TIF will ask every node + * for the current size of each of the participating Table Regions. It does this because it wants + * to schedule the biggest Regions first (This fixation comes of hadoop itself -- see JobSubmitter + * where it sorts inputs by size). This extra diligence takes time and is of no utility in this + * RRTIF where spread is of more import than size-first. Also, if a rolling restart is happening + * when we go to launch the job, the job launch may fail because the request for Region size fails + * -- even after retries -- because rolled RegionServer may take a while to come online: e.g. it + * takes java 90 seconds to allocate a 160G. RegionServer is offline during this time. The job + * launch will fail with 'Connection rejected'. So, we set 'hbase.regionsizecalculator.enable' to + * false here in RRTIF. * @see #unconfigure() */ void configure() { if (getConf().get(HBASE_REGIONSIZECALCULATOR_ENABLE) != null) { - this.hbaseRegionsizecalculatorEnableOriginalValue = getConf(). - getBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, true); + this.hbaseRegionsizecalculatorEnableOriginalValue = + getConf().getBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, true); } getConf().setBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, false); } @@ -165,7 +165,7 @@ public static void main(String[] args) throws IOException { configuration.set(TableInputFormat.INPUT_TABLE, args[0]); tif.setConf(configuration); List splits = tif.getSplits(new JobContextImpl(configuration, new JobID())); - for (InputSplit split: splits) { + for (InputSplit split : splits) { System.out.println(split); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java index 9c3ab4801f56..138b92c95bf2 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,37 +18,37 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import java.util.List; import java.util.ArrayList; - +import java.util.List; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.util.AbstractHBaseTool; -import org.apache.hbase.thirdparty.com.google.common.base.Splitter; -import org.apache.hbase.thirdparty.org.apache.commons.cli.BasicParser; -import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.hbase.thirdparty.org.apache.commons.cli.MissingOptionException; -import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.filter.MultiRowRangeFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.org.apache.commons.cli.BasicParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.hbase.thirdparty.org.apache.commons.cli.MissingOptionException; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; /** - * A job with a just a map phase to count rows. Map outputs table rows IF the - * input row has columns that have content. + * A job with a just a map phase to count rows. Map outputs table rows IF the input row has columns + * that have content. */ @InterfaceAudience.Public public class RowCounter extends AbstractHBaseTool { @@ -77,25 +76,23 @@ public class RowCounter extends AbstractHBaseTool { /** * Mapper that runs the count. */ - static class RowCounterMapper - extends TableMapper { + static class RowCounterMapper extends TableMapper { /** Counter enumeration to count the actual rows. */ - public static enum Counters {ROWS} + public static enum Counters { + ROWS + } /** * Maps the data. - * - * @param row The current table row key. - * @param values The columns. - * @param context The current context. + * @param row The current table row key. + * @param values The columns. + * @param context The current context. * @throws IOException When something is broken with the data. * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, Context) */ @Override - public void map(ImmutableBytesWritable row, Result values, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result values, Context context) throws IOException { // Count every row containing data, whether it's in qualifiers or values context.getCounter(Counters.ROWS).increment(1); } @@ -103,8 +100,7 @@ public void map(ImmutableBytesWritable row, Result values, /** * Sets up the actual job. - * - * @param conf The current configuration. + * @param conf The current configuration. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -125,30 +121,28 @@ public Job createSubmittableJob(Configuration conf) throws IOException { } } - if(this.expectedCount >= 0) { + if (this.expectedCount >= 0) { conf.setLong(EXPECTED_COUNT_KEY, this.expectedCount); } scan.setTimeRange(startTime, endTime); job.setOutputFormatClass(NullOutputFormat.class); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setNumReduceTasks(0); return job; } /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. * @deprecated as of release 2.3.0. Will be removed on 4.0.0. Please use main method instead. */ @Deprecated - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; List rowRangeList = null; long startTime = 0; @@ -166,7 +160,7 @@ public static Job createSubmittableJob(Configuration conf, String[] args) if (args[i].startsWith(rangeSwitch)) { try { rowRangeList = parseRowRangeParameter( - args[i].substring(args[1].indexOf(rangeSwitch)+rangeSwitch.length())); + args[i].substring(args[1].indexOf(rangeSwitch) + rangeSwitch.length())); } catch (IllegalArgumentException e) { return null; } @@ -206,58 +200,55 @@ public static Job createSubmittableJob(Configuration conf, String[] args) if (StringUtils.isBlank(qualifier)) { scan.addFamily(Bytes.toBytes(family)); - } - else { + } else { scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier)); } } } scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime); job.setOutputFormatClass(NullOutputFormat.class); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setNumReduceTasks(0); return job; } /** - * Prints usage without error message. - * Note that we don't document --expected-count, because it's intended for test. + * Prints usage without error message. Note that we don't document --expected-count, because it's + * intended for test. */ private static void printUsage(String errorMessage) { System.err.println("ERROR: " + errorMessage); - System.err.println("Usage: hbase rowcounter [options] " - + "[--starttime= --endtime=] " - + "[--range=[startKey],[endKey][;[startKey],[endKey]...]] [ ...]"); + System.err.println( + "Usage: hbase rowcounter [options] " + "[--starttime= --endtime=] " + + "[--range=[startKey],[endKey][;[startKey],[endKey]...]] [ ...]"); System.err.println("For performance consider the following options:\n" - + "-Dhbase.client.scanner.caching=100\n" - + "-Dmapreduce.map.speculative=false"); + + "-Dhbase.client.scanner.caching=100\n" + "-Dmapreduce.map.speculative=false"); } private static List parseRowRangeParameter(String arg) { final List rangesSplit = Splitter.on(";").splitToList(arg); final List rangeList = new ArrayList<>(); for (String range : rangesSplit) { - if(range!=null && !range.isEmpty()) { + if (range != null && !range.isEmpty()) { List startEnd = Splitter.on(",").splitToList(range); if (startEnd.size() != 2 || startEnd.get(1).contains(",")) { throw new IllegalArgumentException("Wrong range specification: " + range); } String startKey = startEnd.get(0); String endKey = startEnd.get(1); - rangeList.add(new MultiRowRangeFilter.RowRange(Bytes.toBytesBinary(startKey), - true, Bytes.toBytesBinary(endKey), false)); + rangeList.add(new MultiRowRangeFilter.RowRange(Bytes.toBytesBinary(startKey), true, + Bytes.toBytesBinary(endKey), false)); } } return rangeList; } /** - * Sets filter {@link FilterBase} to the {@link Scan} instance. - * If provided rowRangeList contains more than one element, - * method sets filter which is instance of {@link MultiRowRangeFilter}. - * Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. - * If rowRangeList contains exactly one element, startRow and stopRow are set to the scan. + * Sets filter {@link FilterBase} to the {@link Scan} instance. If provided rowRangeList contains + * more than one element, method sets filter which is instance of {@link MultiRowRangeFilter}. + * Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. If rowRangeList + * contains exactly one element, startRow and stopRow are set to the scan. * @param scan * @param rowRangeList */ @@ -268,8 +259,8 @@ private static void setScanFilter(Scan scan, List } if (size == 1) { MultiRowRangeFilter.RowRange range = rowRangeList.get(0); - scan.withStartRow(range.getStartRow()); //inclusive - scan.withStopRow(range.getStopRow()); //exclusive + scan.withStartRow(range.getStartRow()); // inclusive + scan.withStopRow(range.getStopRow()); // exclusive } else if (size > 1) { scan.setFilter(new MultiRowRangeFilter(rowRangeList)); } @@ -281,8 +272,8 @@ protected void printUsage() { footerBuilder.append("For performance, consider the following configuration properties:\n"); footerBuilder.append("-Dhbase.client.scanner.caching=100\n"); footerBuilder.append("-Dmapreduce.map.speculative=false\n"); - printUsage("hbase rowcounter [options] [ ...]", - "Options:", footerBuilder.toString()); + printUsage("hbase rowcounter [options] [ ...]", "Options:", + footerBuilder.toString()); } @Override @@ -297,15 +288,15 @@ protected void printUsage(final String usageStr, final String usageHeader, @Override protected void addOptions() { - Option startTimeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("starting time filter to start counting rows from.").longOpt(OPT_START_TIME).build(); - Option endTimeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("end time filter limit, to only count rows up to this timestamp."). - longOpt(OPT_END_TIME).build(); - Option rangeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("[startKey],[endKey][;[startKey],[endKey]...]]").longOpt(OPT_RANGE).build(); - Option expectedOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("expected number of rows to be count.").longOpt(OPT_EXPECTED_COUNT).build(); + Option startTimeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("starting time filter to start counting rows from.").longOpt(OPT_START_TIME).build(); + Option endTimeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("end time filter limit, to only count rows up to this timestamp.") + .longOpt(OPT_END_TIME).build(); + Option rangeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("[startKey],[endKey][;[startKey],[endKey]...]]").longOpt(OPT_RANGE).build(); + Option expectedOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("expected number of rows to be count.").longOpt(OPT_EXPECTED_COUNT).build(); addOption(startTimeOption); addOption(endTimeOption); addOption(rangeOption); @@ -313,28 +304,28 @@ protected void addOptions() { } @Override - protected void processOptions(CommandLine cmd) throws IllegalArgumentException{ + protected void processOptions(CommandLine cmd) throws IllegalArgumentException { this.tableName = cmd.getArgList().get(0); - if(cmd.getOptionValue(OPT_RANGE)!=null) { + if (cmd.getOptionValue(OPT_RANGE) != null) { this.rowRangeList = parseRowRangeParameter(cmd.getOptionValue(OPT_RANGE)); } - this.endTime = cmd.getOptionValue(OPT_END_TIME) == null ? HConstants.LATEST_TIMESTAMP : - Long.parseLong(cmd.getOptionValue(OPT_END_TIME)); - this.expectedCount = cmd.getOptionValue(OPT_EXPECTED_COUNT) == null ? Long.MIN_VALUE : - Long.parseLong(cmd.getOptionValue(OPT_EXPECTED_COUNT)); - this.startTime = cmd.getOptionValue(OPT_START_TIME) == null ? 0 : - Long.parseLong(cmd.getOptionValue(OPT_START_TIME)); - - for(int i=1; ihbase.simpletotalorder.start - * and hbase.simpletotalorder.end. The end key needs to be - * exclusive; i.e. one larger than the biggest key in your key space. - * You may be surprised at how this class partitions the space; it may not - * align with preconceptions; e.g. a start key of zero and an end key of 100 - * divided in ten will not make regions whose range is 0-10, 10-20, and so on. - * Make your own partitioner if you need the region spacing to come out a + * A partitioner that takes start and end keys and uses bigdecimal to figure which reduce a key + * belongs to. Pass the start and end keys in the Configuration using + * hbase.simpletotalorder.start and hbase.simpletotalorder.end. The end + * key needs to be exclusive; i.e. one larger than the biggest key in your key space. You may be + * surprised at how this class partitions the space; it may not align with preconceptions; e.g. a + * start key of zero and an end key of 100 divided in ten will not make regions whose range is 0-10, + * 10-20, and so on. Make your own partitioner if you need the region spacing to come out a * particular way. * @param * @see #START @@ -46,7 +42,7 @@ */ @InterfaceAudience.Public public class SimpleTotalOrderPartitioner extends Partitioner -implements Configurable { + implements Configurable { private final static Logger LOG = LoggerFactory.getLogger(SimpleTotalOrderPartitioner.class); /** @@ -67,9 +63,9 @@ public class SimpleTotalOrderPartitioner extends PartitioneremptyIterator()); + private static final CellScanner EMPTY_CELL_SCANNER = + new CellScanner(Collections. emptyIterator()); /** - * Rescan the given range directly from the source and target tables. - * Count and log differences, and if this is not a dry run, output Puts and Deletes - * to make the target table match the source table for this range + * Rescan the given range directly from the source and target tables. Count and log differences, + * and if this is not a dry run, output Puts and Deletes to make the target table match the + * source table for this range */ private void syncRange(Context context, ImmutableBytesWritable startRow, ImmutableBytesWritable stopRow) throws IOException, InterruptedException { @@ -361,7 +360,7 @@ private void syncRange(Context context, ImmutableBytesWritable startRow, boolean rangeMatched = true; byte[] nextSourceRow = sourceCells.nextRow(); byte[] nextTargetRow = targetCells.nextRow(); - while(nextSourceRow != null || nextTargetRow != null) { + while (nextSourceRow != null || nextTargetRow != null) { boolean rowMatched; int rowComparison = compareRowKeys(nextSourceRow, nextTargetRow); if (rowComparison < 0) { @@ -371,7 +370,7 @@ private void syncRange(Context context, ImmutableBytesWritable startRow, context.getCounter(Counter.TARGETMISSINGROWS).increment(1); rowMatched = syncRowCells(context, nextSourceRow, sourceCells, EMPTY_CELL_SCANNER); - nextSourceRow = sourceCells.nextRow(); // advance only source to next row + nextSourceRow = sourceCells.nextRow(); // advance only source to next row } else if (rowComparison > 0) { if (LOG.isDebugEnabled()) { LOG.debug("Source missing row: " + Bytes.toString(nextTargetRow)); @@ -379,7 +378,7 @@ private void syncRange(Context context, ImmutableBytesWritable startRow, context.getCounter(Counter.SOURCEMISSINGROWS).increment(1); rowMatched = syncRowCells(context, nextTargetRow, EMPTY_CELL_SCANNER, targetCells); - nextTargetRow = targetCells.nextRow(); // advance only target to next row + nextTargetRow = targetCells.nextRow(); // advance only target to next row } else { // current row is the same on both sides, compare cell by cell rowMatched = syncRowCells(context, nextSourceRow, sourceCells, targetCells); @@ -396,7 +395,7 @@ private void syncRange(Context context, ImmutableBytesWritable startRow, targetScanner.close(); context.getCounter(rangeMatched ? Counter.RANGESMATCHED : Counter.RANGESNOTMATCHED) - .increment(1); + .increment(1); } private static class CellScanner { @@ -413,8 +412,7 @@ public CellScanner(Iterator results) { } /** - * Advance to the next row and return its row key. - * Returns null iff there are no more rows. + * Advance to the next row and return its row key. Returns null iff there are no more rows. */ public byte[] nextRow() { if (nextRowResult == null) { @@ -422,9 +420,8 @@ public byte[] nextRow() { while (results.hasNext()) { nextRowResult = results.next(); Cell nextCell = nextRowResult.rawCells()[0]; - if (currentRow == null - || !Bytes.equals(currentRow, 0, currentRow.length, nextCell.getRowArray(), - nextCell.getRowOffset(), nextCell.getRowLength())) { + if (currentRow == null || !Bytes.equals(currentRow, 0, currentRow.length, + nextCell.getRowArray(), nextCell.getRowOffset(), nextCell.getRowLength())) { // found next row break; } else { @@ -465,7 +462,7 @@ public Cell nextCellInRow() { Result result = results.next(); Cell cell = result.rawCells()[0]; if (Bytes.equals(currentRow, 0, currentRow.length, cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength())) { + cell.getRowOffset(), cell.getRowLength())) { // result is part of current row currentRowResult = result; nextCellInRow = 0; @@ -484,28 +481,26 @@ public Cell nextCellInRow() { } } - private Cell checkAndResetTimestamp(Cell sourceCell){ + private Cell checkAndResetTimestamp(Cell sourceCell) { if (ignoreTimestamp) { sourceCell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(sourceCell.getType()) - .setRow(sourceCell.getRowArray(), - sourceCell.getRowOffset(), sourceCell.getRowLength()) - .setFamily(sourceCell.getFamilyArray(), - sourceCell.getFamilyOffset(), sourceCell.getFamilyLength()) - .setQualifier(sourceCell.getQualifierArray(), - sourceCell.getQualifierOffset(), sourceCell.getQualifierLength()) - .setTimestamp(EnvironmentEdgeManager.currentTime()) - .setValue(sourceCell.getValueArray(), - sourceCell.getValueOffset(), sourceCell.getValueLength()).build(); + .setType(sourceCell.getType()) + .setRow(sourceCell.getRowArray(), sourceCell.getRowOffset(), sourceCell.getRowLength()) + .setFamily(sourceCell.getFamilyArray(), sourceCell.getFamilyOffset(), + sourceCell.getFamilyLength()) + .setQualifier(sourceCell.getQualifierArray(), sourceCell.getQualifierOffset(), + sourceCell.getQualifierLength()) + .setTimestamp(EnvironmentEdgeManager.currentTime()).setValue(sourceCell.getValueArray(), + sourceCell.getValueOffset(), sourceCell.getValueLength()) + .build(); } return sourceCell; } /** - * Compare the cells for the given row from the source and target tables. - * Count and log any differences. - * If not a dry run, output a Put and/or Delete needed to sync the target table - * to match the source table. + * Compare the cells for the given row from the source and target tables. Count and log any + * differences. If not a dry run, output a Put and/or Delete needed to sync the target table to + * match the source table. */ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceCells, CellScanner targetCells) throws IOException, InterruptedException { @@ -546,8 +541,8 @@ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceC delete = new Delete(rowKey); } // add a tombstone to exactly match the target cell that is missing on the source - delete.addColumn(CellUtil.cloneFamily(targetCell), - CellUtil.cloneQualifier(targetCell), targetCell.getTimestamp()); + delete.addColumn(CellUtil.cloneFamily(targetCell), CellUtil.cloneQualifier(targetCell), + targetCell.getTimestamp()); } targetCell = targetCells.nextCellInRow(); @@ -558,12 +553,12 @@ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceC } else { if (LOG.isDebugEnabled()) { LOG.debug("Different values: "); - LOG.debug(" source cell: " + sourceCell - + " value: " + Bytes.toString(sourceCell.getValueArray(), - sourceCell.getValueOffset(), sourceCell.getValueLength())); - LOG.debug(" target cell: " + targetCell - + " value: " + Bytes.toString(targetCell.getValueArray(), - targetCell.getValueOffset(), targetCell.getValueLength())); + LOG.debug(" source cell: " + sourceCell + " value: " + + Bytes.toString(sourceCell.getValueArray(), sourceCell.getValueOffset(), + sourceCell.getValueLength())); + LOG.debug(" target cell: " + targetCell + " value: " + + Bytes.toString(targetCell.getValueArray(), targetCell.getValueOffset(), + targetCell.getValueLength())); } context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1); matchingRow = false; @@ -615,12 +610,11 @@ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceC } /** - * Compare row keys of the given Result objects. - * Nulls are after non-nulls + * Compare row keys of the given Result objects. Nulls are after non-nulls */ private static int compareRowKeys(byte[] r1, byte[] r2) { if (r1 == null) { - return 1; // source missing row + return 1; // source missing row } else if (r2 == null) { return -1; // target missing row } else { @@ -631,11 +625,10 @@ private static int compareRowKeys(byte[] r1, byte[] r2) { } /** - * Compare families, qualifiers, and timestamps of the given Cells. - * They are assumed to be of the same row. - * Nulls are after non-nulls. + * Compare families, qualifiers, and timestamps of the given Cells. They are assumed to be of + * the same row. Nulls are after non-nulls. */ - private int compareCellKeysWithinRow(Cell c1, Cell c2) { + private int compareCellKeysWithinRow(Cell c1, Cell c2) { if (c1 == null) { return 1; // source missing cell } @@ -662,8 +655,7 @@ private int compareCellKeysWithinRow(Cell c1, Cell c2) { } @Override - protected void cleanup(Context context) - throws IOException, InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { if (mapperException == null) { try { finishRemainingHashRanges(context); @@ -693,8 +685,8 @@ protected void cleanup(Context context) } } - private void finishRemainingHashRanges(Context context) throws IOException, - InterruptedException { + private void finishRemainingHashRanges(Context context) + throws IOException, InterruptedException { TableSplit split = (TableSplit) context.getInputSplit(); byte[] splitEndRow = split.getEndRow(); boolean reachedEndOfTable = HashTable.isTableEndRow(splitEndRow); @@ -709,7 +701,7 @@ private void finishRemainingHashRanges(Context context) throws IOException, // need to complete the final open hash batch if ((nextSourceKey != null && nextSourceKey.compareTo(splitEndRow) > 0) - || (nextSourceKey == null && !Bytes.equals(splitEndRow, sourceTableHash.stopRow))) { + || (nextSourceKey == null && !Bytes.equals(splitEndRow, sourceTableHash.stopRow))) { // the open hash range continues past the end of this region // add a scan to complete the current hash range Scan scan = sourceTableHash.initScan(); @@ -739,6 +731,7 @@ private void finishRemainingHashRanges(Context context) throws IOException, } private static final int NUM_ARGS = 3; + private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); @@ -772,8 +765,7 @@ private static void printUsage(final String errorMsg) { System.err.println("Examples:"); System.err.println(" For a dry run SyncTable of tableA from a remote source cluster"); System.err.println(" to a local target cluster:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.SyncTable --dryrun=true" + System.err.println(" $ hbase " + "org.apache.hadoop.hbase.mapreduce.SyncTable --dryrun=true" + " --sourcezkcluster=zk1.example.com,zk2.example.com,zk3.example.com:2181:/hbase" + " hdfs://nn:9000/hashes/tableA tableA tableA"); } @@ -835,7 +827,6 @@ private boolean doCommandLine(final String[] args) { return false; } - } catch (Exception e) { LOG.error("Failed to parse commandLine arguments", e); printUsage("Can't start because " + e.getMessage()); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java index c2351b91fe66..9151ba5b4346 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,31 +21,29 @@ import java.util.Collections; import java.util.List; import java.util.Locale; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Convert HBase tabular data into a format that is consumable by Map/Reduce. */ @InterfaceAudience.Public -public class TableInputFormat extends TableInputFormatBase -implements Configurable { +public class TableInputFormat extends TableInputFormatBase implements Configurable { @SuppressWarnings("hiding") private static final Logger LOG = LoggerFactory.getLogger(TableInputFormat.class); @@ -54,12 +51,13 @@ public class TableInputFormat extends TableInputFormatBase /** Job parameter that specifies the input table. */ public static final String INPUT_TABLE = "hbase.mapreduce.inputtable"; /** - * If specified, use start keys of this table to split. - * This is useful when you are preparing data for bulkload. + * If specified, use start keys of this table to split. This is useful when you are preparing data + * for bulkload. */ private static final String SPLIT_TABLE = "hbase.mapreduce.splittable"; - /** Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified. - * See {@link TableMapReduceUtil#convertScanToString(Scan)} for more details. + /** + * Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified. See + * {@link TableMapReduceUtil#convertScanToString(Scan)} for more details. */ public static final String SCAN = "hbase.mapreduce.scan"; /** Scan start row */ @@ -92,7 +90,6 @@ public class TableInputFormat extends TableInputFormatBase /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -102,16 +99,13 @@ public Configuration getConf() { } /** - * Sets the configuration. This is used to set the details for the table to - * be scanned. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * Sets the configuration. This is used to set the details for the table to be scanned. + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", + justification = "Intentional") public void setConf(Configuration configuration) { this.conf = configuration; @@ -127,7 +121,7 @@ public void setConf(Configuration configuration) { try { scan = createScanFromConfiguration(conf); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error(StringUtils.stringifyException(e)); } } @@ -135,13 +129,13 @@ public void setConf(Configuration configuration) { } /** - * Sets up a {@link Scan} instance, applying settings from the configuration property - * constants defined in {@code TableInputFormat}. This allows specifying things such as: + * Sets up a {@link Scan} instance, applying settings from the configuration property constants + * defined in {@code TableInputFormat}. This allows specifying things such as: *

    - *
  • start and stop rows
  • - *
  • column qualifiers or families
  • - *
  • timestamps or timerange
  • - *
  • scanner caching and batch size
  • + *
  • start and stop rows
  • + *
  • column qualifiers or families
  • + *
  • timestamps or timerange
  • + *
  • scanner caching and batch size
  • *
*/ public static Scan createScanFromConfiguration(Configuration conf) throws IOException { @@ -168,9 +162,8 @@ public static Scan createScanFromConfiguration(Configuration conf) throws IOExce } if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) { - scan.setTimeRange( - Long.parseLong(conf.get(SCAN_TIMERANGE_START)), - Long.parseLong(conf.get(SCAN_TIMERANGE_END))); + scan.setTimeRange(Long.parseLong(conf.get(SCAN_TIMERANGE_START)), + Long.parseLong(conf.get(SCAN_TIMERANGE_END))); } if (conf.get(SCAN_MAXVERSIONS) != null) { @@ -204,16 +197,14 @@ protected void initialize(JobContext context) throws IOException { } /** - * Parses a combined family and qualifier and adds either both or just the - * family in case there is no qualifier. This assumes the older colon - * divided notation, e.g. "family:qualifier". - * + * Parses a combined family and qualifier and adds either both or just the family in case there is + * no qualifier. This assumes the older colon divided notation, e.g. "family:qualifier". * @param scan The Scan to update. * @param familyAndQualifier family and qualifier * @throws IllegalArgumentException When familyAndQualifier is invalid. */ private static void addColumn(Scan scan, byte[] familyAndQualifier) { - byte [][] fq = CellUtil.parseColumn(familyAndQualifier); + byte[][] fq = CellUtil.parseColumn(familyAndQualifier); if (fq.length == 1) { scan.addFamily(fq[0]); } else if (fq.length == 2) { @@ -228,31 +219,29 @@ private static void addColumn(Scan scan, byte[] familyAndQualifier) { *

* Overrides previous calls to {@link Scan#addColumn(byte[], byte[])}for any families in the * input. - * * @param scan The Scan to update. * @param columns array of columns, formatted as family:qualifier * @see Scan#addColumn(byte[], byte[]) */ - public static void addColumns(Scan scan, byte [][] columns) { + public static void addColumns(Scan scan, byte[][] columns) { for (byte[] column : columns) { addColumn(scan, column); } } /** - * Calculates the splits that will serve as input for the map tasks. The - * number of splits matches the number of regions in a table. Splits are shuffled if - * required. - * @param context The current job context. + * Calculates the splits that will serve as input for the map tasks. The number of splits matches + * the number of regions in a table. Splits are shuffled if required. + * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - * org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ @Override public List getSplits(JobContext context) throws IOException { List splits = super.getSplits(context); - if ((conf.get(SHUFFLE_MAPS) != null) && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT))) { + if ((conf.get(SHUFFLE_MAPS) != null) + && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT))) { Collections.shuffle(splits); } return splits; @@ -260,9 +249,8 @@ public List getSplits(JobContext context) throws IOException { /** * Convenience method to parse a string representation of an array of column specifiers. - * * @param scan The Scan to update. - * @param columns The columns to parse. + * @param columns The columns to parse. */ private static void addColumns(Scan scan, String columns) { String[] cols = columns.split(" "); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 265d30068d3f..6143f2d22748 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,19 +52,18 @@ import org.slf4j.LoggerFactory; /** - * A base for {@link TableInputFormat}s. Receives a {@link Connection}, a {@link TableName}, - * an {@link Scan} instance that defines the input columns etc. Subclasses may use - * other TableRecordReader implementations. - * - * Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to - * function properly. Each of the entry points to this class used by the MapReduce framework, - * {@link #createRecordReader(InputSplit, TaskAttemptContext)} and {@link #getSplits(JobContext)}, - * will call {@link #initialize(JobContext)} as a convenient centralized location to handle - * retrieving the necessary configuration information. If your subclass overrides either of these - * methods, either call the parent version or call initialize yourself. - * + * A base for {@link TableInputFormat}s. Receives a {@link Connection}, a {@link TableName}, an + * {@link Scan} instance that defines the input columns etc. Subclasses may use other + * TableRecordReader implementations. Subclasses MUST ensure initializeTable(Connection, TableName) + * is called for an instance to function properly. Each of the entry points to this class used by + * the MapReduce framework, {@link #createRecordReader(InputSplit, TaskAttemptContext)} and + * {@link #getSplits(JobContext)}, will call {@link #initialize(JobContext)} as a convenient + * centralized location to handle retrieving the necessary configuration information. If your + * subclass overrides either of these methods, either call the parent version or call initialize + * yourself. *

* An example of a subclass: + * *

  *   class ExampleTIF extends TableInputFormatBase {
  *
@@ -92,42 +90,43 @@
  *   }
  * 
* - * - * The number of InputSplits(mappers) match the number of regions in a table by default. - * Set "hbase.mapreduce.tableinput.mappers.per.region" to specify how many mappers per region, set - * this property will disable autobalance below.\ - * Set "hbase.mapreduce.tif.input.autobalance" to enable autobalance, hbase will assign mappers - * based on average region size; For regions, whose size larger than average region size may assigned - * more mappers, and for smaller one, they may group together to use one mapper. If actual average - * region size is too big, like 50G, it is not good to only assign 1 mapper for those large regions. - * Use "hbase.mapreduce.tif.ave.regionsize" to set max average region size when enable "autobalanece", - * default mas average region size is 8G. + * The number of InputSplits(mappers) match the number of regions in a table by default. Set + * "hbase.mapreduce.tableinput.mappers.per.region" to specify how many mappers per region, set this + * property will disable autobalance below.\ Set "hbase.mapreduce.tif.input.autobalance" to enable + * autobalance, hbase will assign mappers based on average region size; For regions, whose size + * larger than average region size may assigned more mappers, and for smaller one, they may group + * together to use one mapper. If actual average region size is too big, like 50G, it is not good to + * only assign 1 mapper for those large regions. Use "hbase.mapreduce.tif.ave.regionsize" to set max + * average region size when enable "autobalanece", default mas average region size is 8G. */ @InterfaceAudience.Public -public abstract class TableInputFormatBase - extends InputFormat { +public abstract class TableInputFormatBase extends InputFormat { private static final Logger LOG = LoggerFactory.getLogger(TableInputFormatBase.class); - private static final String NOT_INITIALIZED = "The input format instance has not been properly " + - "initialized. Ensure you call initializeTable either in your constructor or initialize " + - "method"; - private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" + - " previous error. Please look at the previous logs lines from" + - " the task's full log for more details."; + private static final String NOT_INITIALIZED = "The input format instance has not been properly " + + "initialized. Ensure you call initializeTable either in your constructor or initialize " + + "method"; + private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" + + " previous error. Please look at the previous logs lines from" + + " the task's full log for more details."; /** Specify if we enable auto-balance to set number of mappers in M/R jobs. */ public static final String MAPREDUCE_INPUT_AUTOBALANCE = "hbase.mapreduce.tif.input.autobalance"; - /** In auto-balance, we split input by ave region size, if calculated region size is too big, we can set it. */ + /** + * In auto-balance, we split input by ave region size, if calculated region size is too big, we + * can set it. + */ public static final String MAX_AVERAGE_REGION_SIZE = "hbase.mapreduce.tif.ave.regionsize"; /** Set the number of Mappers for each region, all regions have same number of Mappers */ - public static final String NUM_MAPPERS_PER_REGION = "hbase.mapreduce.tableinput.mappers.per.region"; + public static final String NUM_MAPPERS_PER_REGION = + "hbase.mapreduce.tableinput.mappers.per.region"; - - /** Holds the details for the internal scanner. - * - * @see Scan */ + /** + * Holds the details for the internal scanner. + * @see Scan + */ private Scan scan = null; /** The {@link Admin}. */ private Admin admin; @@ -142,27 +141,22 @@ public abstract class TableInputFormatBase /** Used to generate splits based on region size. */ private RegionSizeCalculator regionSizeCalculator; - /** The reverse DNS lookup cache mapping: IPAddress => HostName */ - private HashMap reverseDNSCacheMap = - new HashMap<>(); + private HashMap reverseDNSCacheMap = new HashMap<>(); /** - * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses - * the default. - * - * @param split The split to work with. - * @param context The current context. + * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses the + * default. + * @param split The split to work with. + * @param context The current context. * @return The newly created record reader. * @throws IOException When creating the reader fails. * @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader( - * org.apache.hadoop.mapreduce.InputSplit, - * org.apache.hadoop.mapreduce.TaskAttemptContext) + * org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) - throws IOException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException { // Just in case a subclass is relying on JobConfigurable magic. if (table == null) { initialize(context); @@ -209,8 +203,8 @@ public float getProgress() throws IOException, InterruptedException { } @Override - public void initialize(InputSplit inputsplit, TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { trr.initialize(inputsplit, context); } @@ -221,17 +215,16 @@ public boolean nextKeyValue() throws IOException, InterruptedException { }; } - protected Pair getStartEndKeys() throws IOException { + protected Pair getStartEndKeys() throws IOException { return getRegionLocator().getStartEndKeys(); } /** * Calculates the splits that will serve as input for the map tasks. - * @param context The current job context. + * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - * org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ @Override public List getSplits(JobContext context) throws IOException { @@ -267,10 +260,10 @@ public List getSplits(JobContext context) throws IOException { return res; } - //The default value of "hbase.mapreduce.input.autobalance" is false. + // The default value of "hbase.mapreduce.input.autobalance" is false. if (context.getConfiguration().getBoolean(MAPREDUCE_INPUT_AUTOBALANCE, false)) { - long maxAveRegionSize = context.getConfiguration() - .getLong(MAX_AVERAGE_REGION_SIZE, 8L*1073741824); //8GB + long maxAveRegionSize = + context.getConfiguration().getLong(MAX_AVERAGE_REGION_SIZE, 8L * 1073741824); // 8GB return calculateAutoBalancedSplits(splits, maxAveRegionSize); } @@ -285,7 +278,6 @@ public List getSplits(JobContext context) throws IOException { /** * Create one InputSplit per region - * * @return The list of InputSplit for all the regions * @throws IOException throws IOException */ @@ -299,8 +291,7 @@ private List oneInputSplitPerRegion() throws IOException { TableName tableName = getTable().getName(); Pair keys = getStartEndKeys(); - if (keys == null || keys.getFirst() == null || - keys.getFirst().length == 0) { + if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) { HRegionLocation regLoc = getRegionLocator().getRegionLocation(HConstants.EMPTY_BYTE_ARRAY, false); if (null == regLoc) { @@ -311,9 +302,9 @@ private List oneInputSplitPerRegion() throws IOException { // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - TableSplit split = new TableSplit(tableName, null, - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc - .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize); + TableSplit split = + new TableSplit(tableName, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, + regLoc.getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize); splits.add(split); return splits; } @@ -326,17 +317,16 @@ private List oneInputSplitPerRegion() throws IOException { byte[] startRow = scan.getStartRow(); byte[] stopRow = scan.getStopRow(); // determine if the given start an stop key fall into the region - if ((startRow.length == 0 || keys.getSecond()[i].length == 0 || - Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) && - (stopRow.length == 0 || - Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) { - byte[] splitStart = startRow.length == 0 || - Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? - keys.getFirst()[i] : startRow; - byte[] splitStop = (stopRow.length == 0 || - Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) && - keys.getSecond()[i].length > 0 ? - keys.getSecond()[i] : stopRow; + if ((startRow.length == 0 || keys.getSecond()[i].length == 0 + || Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) + && (stopRow.length == 0 || Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) { + byte[] splitStart = + startRow.length == 0 || Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 + ? keys.getFirst()[i] + : startRow; + byte[] splitStop = + (stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) + && keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow; HRegionLocation location = getRegionLocator().getRegionLocation(keys.getFirst()[i], false); // The below InetSocketAddress creation does a name resolution. @@ -354,8 +344,8 @@ private List oneInputSplitPerRegion() throws IOException { // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - TableSplit split = new TableSplit(tableName, null, - splitStart, splitStop, regionLocation, encodedRegionName, regionSize); + TableSplit split = new TableSplit(tableName, null, splitStart, splitStop, regionLocation, + encodedRegionName, regionSize); splits.add(split); if (LOG.isDebugEnabled()) { LOG.debug("getSplits: split -> " + i + " -> " + split); @@ -368,8 +358,8 @@ private List oneInputSplitPerRegion() throws IOException { /** * Create n splits for one InputSplit, For now only support uniform distribution * @param split A TableSplit corresponding to a range of rowkeys - * @param n Number of ranges after splitting. Pass 1 means no split for the range - * Pass 2 if you want to split the range in two; + * @param n Number of ranges after splitting. Pass 1 means no split for the range Pass 2 if you + * want to split the range in two; * @return A list of TableSplit, the size of the list is n * @throws IllegalArgumentIOException throws IllegalArgumentIOException */ @@ -380,7 +370,7 @@ protected List createNInputSplitsUniform(InputSplit split, int n) "InputSplit for CreateNSplitsPerRegion can not be null + " + "and should be instance of TableSplit"); } - //if n < 1, then still continue using n = 1 + // if n < 1, then still continue using n = 1 n = n < 1 ? 1 : n; List res = new ArrayList<>(n); if (n == 1) { @@ -398,51 +388,48 @@ protected List createNInputSplitsUniform(InputSplit split, int n) byte[] endRow = ts.getEndRow(); // For special case: startRow or endRow is empty - if (startRow.length == 0 && endRow.length == 0){ + if (startRow.length == 0 && endRow.length == 0) { startRow = new byte[1]; endRow = new byte[1]; startRow[0] = 0; endRow[0] = -1; } - if (startRow.length == 0 && endRow.length != 0){ + if (startRow.length == 0 && endRow.length != 0) { startRow = new byte[1]; startRow[0] = 0; } - if (startRow.length != 0 && endRow.length == 0){ - endRow =new byte[startRow.length]; - for (int k = 0; k < startRow.length; k++){ + if (startRow.length != 0 && endRow.length == 0) { + endRow = new byte[startRow.length]; + for (int k = 0; k < startRow.length; k++) { endRow[k] = -1; } } // Split Region into n chunks evenly - byte[][] splitKeys = Bytes.split(startRow, endRow, true, n-1); + byte[][] splitKeys = Bytes.split(startRow, endRow, true, n - 1); for (int i = 0; i < splitKeys.length - 1; i++) { // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - //notice that the regionSize parameter may be not very accurate - TableSplit tsplit = - new TableSplit(tableName, null, splitKeys[i], splitKeys[i + 1], regionLocation, - encodedRegionName, regionSize / n); + // notice that the regionSize parameter may be not very accurate + TableSplit tsplit = new TableSplit(tableName, null, splitKeys[i], splitKeys[i + 1], + regionLocation, encodedRegionName, regionSize / n); res.add(tsplit); } return res; } + /** - * Calculates the number of MapReduce input splits for the map tasks. The number of - * MapReduce input splits depends on the average region size. - * Make it 'public' for testing - * + * Calculates the number of MapReduce input splits for the map tasks. The number of MapReduce + * input splits depends on the average region size. Make it 'public' for testing * @param splits The list of input splits before balance. * @param maxAverageRegionSize max Average region size for one mapper * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - *org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ - public List calculateAutoBalancedSplits(List splits, long maxAverageRegionSize) - throws IOException { + public List calculateAutoBalancedSplits(List splits, + long maxAverageRegionSize) throws IOException { if (splits.size() == 0) { return splits; } @@ -455,15 +442,16 @@ public List calculateAutoBalancedSplits(List splits, lon long averageRegionSize = totalRegionSize / splits.size(); // totalRegionSize might be overflow, and the averageRegionSize must be positive. if (averageRegionSize <= 0) { - LOG.warn("The averageRegionSize is not positive: " + averageRegionSize + ", " + - "set it to Long.MAX_VALUE " + splits.size()); + LOG.warn("The averageRegionSize is not positive: " + averageRegionSize + ", " + + "set it to Long.MAX_VALUE " + splits.size()); averageRegionSize = Long.MAX_VALUE / splits.size(); } - //if averageRegionSize is too big, change it to default as 1 GB, + // if averageRegionSize is too big, change it to default as 1 GB, if (averageRegionSize > maxAverageRegionSize) { averageRegionSize = maxAverageRegionSize; } - // if averageRegionSize is too small, we do not need to allocate more mappers for those 'large' region + // if averageRegionSize is too small, we do not need to allocate more mappers for those 'large' + // region // set default as 16M = (default hdfs block size) / 4; if (averageRegionSize < 16 * 1048576) { return splits; @@ -477,7 +465,8 @@ public List calculateAutoBalancedSplits(List splits, lon if (regionSize >= averageRegionSize) { // make this region as multiple MapReduce input split. - int n = (int) Math.round(Math.log(((double) regionSize) / ((double) averageRegionSize)) + 1.0); + int n = + (int) Math.round(Math.log(((double) regionSize) / ((double) averageRegionSize)) + 1.0); List temp = createNInputSplitsUniform(ts, n); resultList.addAll(temp); } else { @@ -533,26 +522,25 @@ String reverseDNS(InetAddress ipAddress) throws UnknownHostException { } /** - * Test if the given region is to be included in the InputSplit while splitting - * the regions of a table. + * Test if the given region is to be included in the InputSplit while splitting the regions of a + * table. *

- * This optimization is effective when there is a specific reasoning to exclude an entire region from the M-R job, - * (and hence, not contributing to the InputSplit), given the start and end keys of the same.
- * Useful when we need to remember the last-processed top record and revisit the [last, current) interval for M-R processing, - * continuously. In addition to reducing InputSplits, reduces the load on the region server as well, due to the ordering of the keys. - *
+ * This optimization is effective when there is a specific reasoning to exclude an entire region + * from the M-R job, (and hence, not contributing to the InputSplit), given the start and end keys + * of the same.
+ * Useful when we need to remember the last-processed top record and revisit the [last, current) + * interval for M-R processing, continuously. In addition to reducing InputSplits, reduces the + * load on the region server as well, due to the ordering of the keys.
*
* Note: It is possible that endKey.length() == 0 , for the last (recent) region. *
- * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no region is excluded( i.e. all regions are included). - * - * + * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no + * region is excluded( i.e. all regions are included). * @param startKey Start key of the region * @param endKey End key of the region * @return true, if this region needs to be included as part of the input (default). - * */ - protected boolean includeRegionInSplit(final byte[] startKey, final byte [] endKey) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { return true; } @@ -588,15 +576,14 @@ protected Admin getAdmin() { /** * Allows subclasses to initialize the table information. - * - * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. - * @param tableName The {@link TableName} of the table to process. + * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. + * @param tableName The {@link TableName} of the table to process. * @throws IOException */ protected void initializeTable(Connection connection, TableName tableName) throws IOException { if (this.table != null || this.connection != null) { - LOG.warn("initializeTable called multiple times. Overwriting connection and table " + - "reference; TableInputFormatBase will not close these old references when done."); + LOG.warn("initializeTable called multiple times. Overwriting connection and table " + + "reference; TableInputFormatBase will not close these old references when done."); } this.table = connection.getTable(tableName); this.regionLocator = connection.getRegionLocator(tableName); @@ -613,7 +600,6 @@ protected RegionSizeCalculator createRegionSizeCalculator(RegionLocator locator, /** * Gets the scan defining the actual details like columns etc. - * * @return The internal scan instance. */ public Scan getScan() { @@ -623,8 +609,7 @@ public Scan getScan() { /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.scan = scan; @@ -632,28 +617,22 @@ public void setScan(Scan scan) { /** * Allows subclasses to set the {@link TableRecordReader}. - * - * @param tableRecordReader A different {@link TableRecordReader} - * implementation. + * @param tableRecordReader A different {@link TableRecordReader} implementation. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; } /** - * Handle subclass specific set up. - * Each of the entry points used by the MapReduce framework, + * Handle subclass specific set up. Each of the entry points used by the MapReduce framework, * {@link #createRecordReader(InputSplit, TaskAttemptContext)} and {@link #getSplits(JobContext)}, * will call {@link #initialize(JobContext)} as a convenient centralized location to handle * retrieving the necessary configuration information and calling - * {@link #initializeTable(Connection, TableName)}. - * - * Subclasses should implement their initialize call such that it is safe to call multiple times. - * The current TableInputFormatBase implementation relies on a non-null table reference to decide - * if an initialize call is needed, but this behavior may change in the future. In particular, - * it is critical that initializeTable not be called multiple times since this will leak - * Connection instances. - * + * {@link #initializeTable(Connection, TableName)}. Subclasses should implement their initialize + * call such that it is safe to call multiple times. The current TableInputFormatBase + * implementation relies on a non-null table reference to decide if an initialize call is needed, + * but this behavior may change in the future. In particular, it is critical that initializeTable + * not be called multiple times since this will leak Connection instances. */ protected void initialize(JobContext context) throws IOException { } @@ -661,7 +640,6 @@ protected void initialize(JobContext context) throws IOException { /** * Close the Table and related objects that were initialized via * {@link #initializeTable(Connection, TableName)}. - * * @throws IOException */ protected void closeTable() throws IOException { @@ -675,7 +653,9 @@ protected void closeTable() throws IOException { private void close(Closeable... closables) throws IOException { for (Closeable c : closables) { - if(c != null) { c.close(); } + if (c != null) { + c.close(); + } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index 8ba2c5e983fa..d57a803b981e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +17,7 @@ */ package org.apache.hadoop.hbase.mapreduce; +import com.codahale.metrics.MetricRegistry; import java.io.File; import java.io.IOException; import java.net.URL; @@ -33,24 +33,18 @@ import java.util.Set; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.TokenUtil; @@ -61,8 +55,12 @@ import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import com.codahale.metrics.MetricRegistry; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; /** * Utility for {@link TableMapper} and {@link TableReducer} @@ -74,128 +72,98 @@ public class TableMapReduceUtil { public static final String TABLE_INPUT_CLASS_KEY = "hbase.table.input.class"; /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, - job, true); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job) throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, true); } - /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(TableName table, - Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, + public static void initTableMapperJob(TableName table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, Job job) throws IOException { - initTableMapperJob(table.getNameAsString(), - scan, - mapper, - outputKeyClass, - outputValueClass, - job, - true); + initTableMapperJob(table.getNameAsString(), scan, mapper, outputKeyClass, outputValueClass, job, + true); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, - job, true); + public static void initTableMapperJob(byte[] table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job) throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + true); } - /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @throws IOException When setting up the details fails. - */ - public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Class inputFormatClass) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, true, inputFormatClass); - } - + /** + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @throws IOException When setting up the details fails. + */ + public static void initTableMapperJob(String table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Class inputFormatClass) + throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, true, inputFormatClass); + } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @param initCredentials whether to initialize hbase auth credentials for the job * @param inputFormatClass the input format * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, boolean initCredentials, - Class inputFormatClass) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, boolean initCredentials, + Class inputFormatClass) throws IOException { job.setInputFormatClass(inputFormatClass); if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass); if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass); @@ -208,8 +176,8 @@ public static void initTableMapperJob(String table, Scan scan, conf.set(TableInputFormat.INPUT_TABLE, table); conf.set(TableInputFormat.SCAN, convertScanToString(scan)); conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); if (addDependencyJars) { addDependencyJars(job); } @@ -219,116 +187,99 @@ public static void initTableMapperJob(String table, Scan scan, } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @param inputFormatClass The class of the input format * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Class inputFormatClass) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, inputFormatClass); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Class inputFormatClass) + throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, inputFormatClass); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, getConfiguredInputFormat(job)); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars) throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, getConfiguredInputFormat(job)); } /** * @return {@link TableInputFormat} .class unless Configuration has something else at - * {@link #TABLE_INPUT_CLASS_KEY}. + * {@link #TABLE_INPUT_CLASS_KEY}. */ private static Class getConfiguredInputFormat(Job job) { - return (Class)job.getConfiguration(). - getClass(TABLE_INPUT_CLASS_KEY, TableInputFormat.class); + return (Class) job.getConfiguration().getClass(TABLE_INPUT_CLASS_KEY, + TableInputFormat.class); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, getConfiguredInputFormat(job)); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars) throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, getConfiguredInputFormat(job)); } /** - * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on - * direct memory will likely cause the map tasks to OOM when opening the region. This - * is done here instead of in TableSnapshotRegionRecordReader in case an advanced user - * wants to override this behavior in their job. + * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on direct + * memory will likely cause the map tasks to OOM when opening the region. This is done here + * instead of in TableSnapshotRegionRecordReader in case an advanced user wants to override this + * behavior in their job. */ public static void resetCacheConfig(Configuration conf) { - conf.setFloat( - HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); + conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0f); conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY); } /** - * Sets up the job for reading from one or more table snapshots, with one or more scans - * per snapshot. - * It bypasses hbase servers and read directly from snapshot files. - * - * @param snapshotScans map of snapshot name to scans on that snapshot. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * Sets up the job for reading from one or more table snapshots, with one or more scans per + * snapshot. It bypasses hbase servers and read directly from snapshot files. + * @param snapshotScans map of snapshot name to scans on that snapshot. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). */ public static void initMultiTableSnapshotMapperJob(Map> snapshotScans, Class mapper, Class outputKeyClass, Class outputValueClass, @@ -373,11 +324,8 @@ public static void initMultiTableSnapshotMapperJob(Map> * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Path tmpRestoreDir) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); @@ -385,105 +333,85 @@ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, } /** - * Sets up the job for reading from a table snapshot. It bypasses hbase servers - * and read directly from snapshot files. - * + * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly + * from snapshot files. * @param snapshotName The name of the snapshot (of a table) to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restore directory can be deleted. + * have write permissions to this directory, and this should not be a subdirectory of + * rootdir. After the job is finished, restore directory can be deleted. * @param splitAlgo algorithm to split * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Path tmpRestoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, - int numSplitsPerRegion) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Path tmpRestoreDir, + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir, splitAlgo, - numSplitsPerRegion); - initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); + numSplitsPerRegion); + initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, false, TableSnapshotInputFormat.class); resetCacheConfig(job.getConfiguration()); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. * @param scans The list of {@link Scan} objects to read from. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) throws IOException { - initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, - true); + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job) throws IOException { + initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, true); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. * @param scans The list of {@link Scan} objects to read from. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the - * configured job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) throws IOException { - initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, true); + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job, boolean addDependencyJars) + throws IOException { + initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, + true); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. * @param scans The list of {@link Scan} objects to read from. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the - * configured job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @param initCredentials whether to initialize hbase auth credentials for the job * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job, boolean addDependencyJars, boolean initCredentials) throws IOException { job.setInputFormatClass(MultiTableInputFormat.class); if (outputValueClass != null) { @@ -518,7 +446,7 @@ public static void initCredentials(Job job) throws IOException { // propagate delegation related props from launcher job to MR job if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) { job.getConfiguration().set("mapreduce.job.credentials.binary", - System.getenv("HADOOP_TOKEN_FILE_LOCATION")); + System.getenv("HADOOP_TOKEN_FILE_LOCATION")); } } @@ -529,7 +457,7 @@ public static void initCredentials(Job job) throws IOException { User user = userProvider.getCurrent(); if (quorumAddress != null) { Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), - quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX); + quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX); Connection peerConn = ConnectionFactory.createConnection(peerConf); try { TokenUtil.addTokenForJob(peerConn, user, job); @@ -552,39 +480,33 @@ public static void initCredentials(Job job) throws IOException { } /** - * Obtain an authentication token, for the specified cluster, on behalf of the current user - * and add it to the credentials for the given map reduce job. - * - * The quorumAddress is the key to the ZK ensemble, which contains: - * hbase.zookeeper.quorum, hbase.zookeeper.client.port and + * Obtain an authentication token, for the specified cluster, on behalf of the current user and + * add it to the credentials for the given map reduce job. The quorumAddress is the key to the ZK + * ensemble, which contains: hbase.zookeeper.quorum, hbase.zookeeper.client.port and * zookeeper.znode.parent - * * @param job The job that requires the permission. * @param quorumAddress string that contains the 3 required configuratins * @throws IOException When the authentication token cannot be obtained. * @deprecated Since 1.2.0 and will be removed in 3.0.0. Use - * {@link #initCredentialsForCluster(Job, Configuration)} instead. + * {@link #initCredentialsForCluster(Job, Configuration)} instead. * @see #initCredentialsForCluster(Job, Configuration) * @see HBASE-14886 */ @Deprecated - public static void initCredentialsForCluster(Job job, String quorumAddress) - throws IOException { - Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), - quorumAddress); + public static void initCredentialsForCluster(Job job, String quorumAddress) throws IOException { + Configuration peerConf = + HBaseConfiguration.createClusterConf(job.getConfiguration(), quorumAddress); initCredentialsForCluster(job, peerConf); } /** - * Obtain an authentication token, for the specified cluster, on behalf of the current user - * and add it to the credentials for the given map reduce job. - * + * Obtain an authentication token, for the specified cluster, on behalf of the current user and + * add it to the credentials for the given map reduce job. * @param job The job that requires the permission. * @param conf The configuration to use in connecting to the peer cluster * @throws IOException When the authentication token cannot be obtained. */ - public static void initCredentialsForCluster(Job job, Configuration conf) - throws IOException { + public static void initCredentialsForCluster(Job job, Configuration conf) throws IOException { UserProvider userProvider = UserProvider.instantiate(conf); if (userProvider.isHBaseSecurityEnabled()) { try { @@ -603,8 +525,7 @@ public static void initCredentialsForCluster(Job job, Configuration conf) /** * Writes the given scan into a Base64 encoded string. - * - * @param scan The scan to write out. + * @param scan The scan to write out. * @return The scan saved in a Base64 encoded string. * @throws IOException When writing the scan fails. */ @@ -615,110 +536,92 @@ public static String convertScanToString(Scan scan) throws IOException { /** * Converts the given Base64 string back into a Scan instance. - * - * @param base64 The scan details. + * @param base64 The scan details. * @return The newly created Scan instance. * @throws IOException When reading the scan instance fails. */ public static Scan convertStringToScan(String base64) throws IOException { - byte [] decoded = Base64.getDecoder().decode(base64); + byte[] decoded = Base64.getDecoder().decode(base64); return ProtobufUtil.toScan(ClientProtos.Scan.parseFrom(decoded)); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job) - throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job) throws IOException { initTableReducerJob(table, reducer, job, null); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. + * @param partitioner Partitioner to use. Pass null to use default partitioner. * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner) throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner) throws IOException { initTableReducerJob(table, reducer, job, partitioner, null, null, null); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. - * @param quorumAddress Distant cluster to write to; default is null for - * output to the cluster that is designated in hbase-site.xml. - * Set this String to the zookeeper ensemble of an alternate remote cluster - * when you would have the reduce write a cluster that is other than the - * default; e.g. copying tables between clusters, the source would be - * designated by hbase-site.xml and this param would have the - * ensemble address of the remote cluster. The format to pass is particular. - * Pass <hbase.zookeeper.quorum>:< + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param quorumAddress Distant cluster to write to; default is null for output to the cluster + * that is designated in hbase-site.xml. Set this String to the zookeeper + * ensemble of an alternate remote cluster when you would have the reduce write a cluster + * that is other than the default; e.g. copying tables between clusters, the source would + * be designated by hbase-site.xml and this param would have the ensemble + * address of the remote cluster. The format to pass is particular. Pass + * <hbase.zookeeper.quorum>:< * hbase.zookeeper.client.port>:<zookeeper.znode.parent> * such as server,server2,server3:2181:/hbase. * @param serverClass redefined hbase.regionserver.class * @param serverImpl redefined hbase.regionserver.impl * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner, String quorumAddress, String serverClass, - String serverImpl) throws IOException { - initTableReducerJob(table, reducer, job, partitioner, quorumAddress, - serverClass, serverImpl, true); + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl) + throws IOException { + initTableReducerJob(table, reducer, job, partitioner, quorumAddress, serverClass, serverImpl, + true); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. - * @param quorumAddress Distant cluster to write to; default is null for - * output to the cluster that is designated in hbase-site.xml. - * Set this String to the zookeeper ensemble of an alternate remote cluster - * when you would have the reduce write a cluster that is other than the - * default; e.g. copying tables between clusters, the source would be - * designated by hbase-site.xml and this param would have the - * ensemble address of the remote cluster. The format to pass is particular. - * Pass <hbase.zookeeper.quorum>:< + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param quorumAddress Distant cluster to write to; default is null for output to the cluster + * that is designated in hbase-site.xml. Set this String to the zookeeper + * ensemble of an alternate remote cluster when you would have the reduce write a cluster + * that is other than the default; e.g. copying tables between clusters, the source would + * be designated by hbase-site.xml and this param would have the ensemble + * address of the remote cluster. The format to pass is particular. Pass + * <hbase.zookeeper.quorum>:< * hbase.zookeeper.client.port>:<zookeeper.znode.parent> * such as server,server2,server3:2181:/hbase. * @param serverClass redefined hbase.regionserver.class * @param serverImpl redefined hbase.regionserver.impl - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner, String quorumAddress, String serverClass, - String serverImpl, boolean addDependencyJars) throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl, + boolean addDependencyJars) throws IOException { Configuration conf = job.getConfiguration(); HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); @@ -726,12 +629,12 @@ public static void initTableReducerJob(String table, if (reducer != null) job.setReducerClass(reducer); conf.set(TableOutputFormat.OUTPUT_TABLE, table); conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName()); // If passed a quorum/ensemble address, pass it on to TableOutputFormat. if (quorumAddress != null) { // Calling this will validate the format ZKConfig.validateClusterKey(quorumAddress); - conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress); + conf.set(TableOutputFormat.QUORUM_ADDRESS, quorumAddress); } if (serverClass != null && serverImpl != null) { conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass); @@ -757,11 +660,10 @@ public static void initTableReducerJob(String table, } /** - * Ensures that the given number of reduce tasks for the given job - * configuration does not exceed the number of regions for the given table. - * - * @param table The table to get the region count for. - * @param job The current job to adjust. + * Ensures that the given number of reduce tasks for the given job configuration does not exceed + * the number of regions for the given table. + * @param table The table to get the region count for. + * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ public static void limitNumReduceTasks(String table, Job job) throws IOException { @@ -772,11 +674,10 @@ public static void limitNumReduceTasks(String table, Job job) throws IOException } /** - * Sets the number of reduce tasks for the given job configuration to the - * number of regions the given table has. - * - * @param table The table to get the region count for. - * @param job The current job to adjust. + * Sets the number of reduce tasks for the given job configuration to the number of regions the + * given table has. + * @param table The table to get the region count for. + * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ public static void setNumReduceTasks(String table, Job job) throws IOException { @@ -784,13 +685,11 @@ public static void setNumReduceTasks(String table, Job job) throws IOException { } /** - * Sets the number of rows to return and cache with each scanner iteration. - * Higher caching values will enable faster mapreduce jobs at the expense of - * requiring more heap to contain the cached rows. - * + * Sets the number of rows to return and cache with each scanner iteration. Higher caching values + * will enable faster mapreduce jobs at the expense of requiring more heap to contain the cached + * rows. * @param job The current job to adjust. - * @param batchSize The number of rows to return in batch with each scanner - * iteration. + * @param batchSize The number of rows to return in batch with each scanner iteration. */ public static void setScannerCaching(Job job, int batchSize) { job.getConfiguration().setInt("hbase.client.scanner.caching", batchSize); @@ -799,10 +698,9 @@ public static void setScannerCaching(Job job, int batchSize) { /** * Add HBase and its dependencies (only) to the job configuration. *

- * This is intended as a low-level API, facilitating code reuse between this - * class and its mapred counterpart. It also of use to external tools that - * need to build a MapReduce job that interacts with HBase but want - * fine-grained control over the jars shipped to the cluster. + * This is intended as a low-level API, facilitating code reuse between this class and its mapred + * counterpart. It also of use to external tools that need to build a MapReduce job that interacts + * with HBase but want fine-grained control over the jars shipped to the cluster. *

* @param conf The Configuration object to extend with dependencies. * @see org.apache.hadoop.hbase.mapred.TableMapReduceUtil @@ -811,34 +709,34 @@ public static void setScannerCaching(Job job, int batchSize) { public static void addHBaseDependencyJars(Configuration conf) throws IOException { addDependencyJarsForClasses(conf, // explicitly pull a class from each module - org.apache.hadoop.hbase.HConstants.class, // hbase-common + org.apache.hadoop.hbase.HConstants.class, // hbase-common org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, // hbase-protocol-shaded - org.apache.hadoop.hbase.client.Put.class, // hbase-client - org.apache.hadoop.hbase.ipc.RpcServer.class, // hbase-server - org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat - org.apache.hadoop.hbase.mapreduce.JobUtil.class, // hbase-hadoop2-compat - org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-mapreduce - org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class, // hbase-metrics - org.apache.hadoop.hbase.metrics.Snapshot.class, // hbase-metrics-api - org.apache.hadoop.hbase.replication.ReplicationUtils.class, // hbase-replication - org.apache.hadoop.hbase.http.HttpServer.class, // hbase-http - org.apache.hadoop.hbase.procedure2.Procedure.class, // hbase-procedure - org.apache.hadoop.hbase.zookeeper.ZKWatcher.class, // hbase-zookeeper + org.apache.hadoop.hbase.client.Put.class, // hbase-client + org.apache.hadoop.hbase.ipc.RpcServer.class, // hbase-server + org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat + org.apache.hadoop.hbase.mapreduce.JobUtil.class, // hbase-hadoop2-compat + org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-mapreduce + org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class, // hbase-metrics + org.apache.hadoop.hbase.metrics.Snapshot.class, // hbase-metrics-api + org.apache.hadoop.hbase.replication.ReplicationUtils.class, // hbase-replication + org.apache.hadoop.hbase.http.HttpServer.class, // hbase-http + org.apache.hadoop.hbase.procedure2.Procedure.class, // hbase-procedure + org.apache.hadoop.hbase.zookeeper.ZKWatcher.class, // hbase-zookeeper org.apache.hbase.thirdparty.com.google.common.collect.Lists.class, // hb-shaded-miscellaneous org.apache.hbase.thirdparty.com.google.gson.GsonBuilder.class, // hbase-shaded-gson org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.class, // hb-sh-protobuf - org.apache.hbase.thirdparty.io.netty.channel.Channel.class, // hbase-shaded-netty - org.apache.zookeeper.ZooKeeper.class, // zookeeper - org.apache.htrace.core.Tracer.class, // htrace - com.codahale.metrics.MetricRegistry.class, // metrics-core - org.apache.commons.lang3.ArrayUtils.class, // commons-lang - io.opentelemetry.api.trace.Span.class, // opentelemetry-api + org.apache.hbase.thirdparty.io.netty.channel.Channel.class, // hbase-shaded-netty + org.apache.zookeeper.ZooKeeper.class, // zookeeper + org.apache.htrace.core.Tracer.class, // htrace + com.codahale.metrics.MetricRegistry.class, // metrics-core + org.apache.commons.lang3.ArrayUtils.class, // commons-lang + io.opentelemetry.api.trace.Span.class, // opentelemetry-api io.opentelemetry.semconv.trace.attributes.SemanticAttributes.class); // opentelemetry-semconv } /** - * Returns a classpath string built from the content of the "tmpjars" value in {@code conf}. - * Also exposed to shell scripts via `bin/hbase mapredcp`. + * Returns a classpath string built from the content of the "tmpjars" value in {@code conf}. Also + * exposed to shell scripts via `bin/hbase mapredcp`. */ public static String buildDependencyClasspath(Configuration conf) { if (conf == null) { @@ -860,63 +758,52 @@ public static String buildDependencyClasspath(Configuration conf) { } /** - * Add the HBase dependency jars as well as jars for any of the configured - * job classes to the job configuration, so that JobClient will ship them - * to the cluster and add them to the DistributedCache. + * Add the HBase dependency jars as well as jars for any of the configured job classes to the job + * configuration, so that JobClient will ship them to the cluster and add them to the + * DistributedCache. */ public static void addDependencyJars(Job job) throws IOException { addHBaseDependencyJars(job.getConfiguration()); try { addDependencyJarsForClasses(job.getConfiguration(), - // when making changes here, consider also mapred.TableMapReduceUtil - // pull job classes - job.getMapOutputKeyClass(), - job.getMapOutputValueClass(), - job.getInputFormatClass(), - job.getOutputKeyClass(), - job.getOutputValueClass(), - job.getOutputFormatClass(), - job.getPartitionerClass(), - job.getCombinerClass()); + // when making changes here, consider also mapred.TableMapReduceUtil + // pull job classes + job.getMapOutputKeyClass(), job.getMapOutputValueClass(), job.getInputFormatClass(), + job.getOutputKeyClass(), job.getOutputValueClass(), job.getOutputFormatClass(), + job.getPartitionerClass(), job.getCombinerClass()); } catch (ClassNotFoundException e) { throw new IOException(e); } } /** - * Add the jars containing the given classes to the job's configuration - * such that JobClient will ship them to the cluster and add them to - * the DistributedCache. + * Add the jars containing the given classes to the job's configuration such that JobClient will + * ship them to the cluster and add them to the DistributedCache. * @deprecated since 1.3.0 and will be removed in 3.0.0. Use {@link #addDependencyJars(Job)} - * instead. + * instead. * @see #addDependencyJars(Job) * @see HBASE-8386 */ @Deprecated - public static void addDependencyJars(Configuration conf, - Class... classes) throws IOException { + public static void addDependencyJars(Configuration conf, Class... classes) throws IOException { LOG.warn("The addDependencyJars(Configuration, Class...) method has been deprecated since it" - + " is easy to use incorrectly. Most users should rely on addDependencyJars(Job) " + - "instead. See HBASE-8386 for more details."); + + " is easy to use incorrectly. Most users should rely on addDependencyJars(Job) " + + "instead. See HBASE-8386 for more details."); addDependencyJarsForClasses(conf, classes); } /** - * Add the jars containing the given classes to the job's configuration - * such that JobClient will ship them to the cluster and add them to - * the DistributedCache. - * - * N.B. that this method at most adds one jar per class given. If there is more than one - * jar available containing a class with the same name as a given class, we don't define - * which of those jars might be chosen. - * + * Add the jars containing the given classes to the job's configuration such that JobClient will + * ship them to the cluster and add them to the DistributedCache. N.B. that this method at most + * adds one jar per class given. If there is more than one jar available containing a class with + * the same name as a given class, we don't define which of those jars might be chosen. * @param conf The Hadoop Configuration to modify * @param classes will add just those dependencies needed to find the given classes * @throws IOException if an underlying library call fails. */ @InterfaceAudience.Private - public static void addDependencyJarsForClasses(Configuration conf, - Class... classes) throws IOException { + public static void addDependencyJarsForClasses(Configuration conf, Class... classes) + throws IOException { FileSystem localFs = FileSystem.getLocal(conf); Set jars = new HashSet<>(); @@ -933,13 +820,11 @@ public static void addDependencyJarsForClasses(Configuration conf, Path path = findOrCreateJar(clazz, localFs, packagedClasses); if (path == null) { - LOG.warn("Could not find jar for class " + clazz + - " in order to ship it to the cluster."); + LOG.warn("Could not find jar for class " + clazz + " in order to ship it to the cluster."); continue; } if (!localFs.exists(path)) { - LOG.warn("Could not validate jar file " + path + " for class " - + clazz); + LOG.warn("Could not validate jar file " + path + " for class " + clazz); continue; } jars.add(path.toString()); @@ -950,12 +835,11 @@ public static void addDependencyJarsForClasses(Configuration conf, } /** - * Finds the Jar for a class or creates it if it doesn't exist. If the class is in - * a directory in the classpath, it creates a Jar on the fly with the - * contents of the directory and returns the path to that Jar. If a Jar is - * created, it is created in the system temporary directory. Otherwise, - * returns an existing jar that contains a class of the same name. Maintains - * a mapping from jar contents to the tmp jar created. + * Finds the Jar for a class or creates it if it doesn't exist. If the class is in a directory in + * the classpath, it creates a Jar on the fly with the contents of the directory and returns the + * path to that Jar. If a Jar is created, it is created in the system temporary directory. + * Otherwise, returns an existing jar that contains a class of the same name. Maintains a mapping + * from jar contents to the tmp jar created. * @param my_class the class to find. * @param fs the FileSystem with which to qualify the returned path. * @param packagedClasses a map of class name to path. @@ -963,8 +847,7 @@ public static void addDependencyJarsForClasses(Configuration conf, * @throws IOException */ private static Path findOrCreateJar(Class my_class, FileSystem fs, - Map packagedClasses) - throws IOException { + Map packagedClasses) throws IOException { // attempt to locate an existing jar for the class. String jar = findContainingJar(my_class, packagedClasses); if (null == jar || jar.isEmpty()) { @@ -981,12 +864,13 @@ private static Path findOrCreateJar(Class my_class, FileSystem fs, } /** - * Add entries to packagedClasses corresponding to class files - * contained in jar. + * Add entries to packagedClasses corresponding to class files contained in + * jar. * @param jar The jar who's content to list. * @param packagedClasses map[class -> jar] */ - private static void updateMap(String jar, Map packagedClasses) throws IOException { + private static void updateMap(String jar, Map packagedClasses) + throws IOException { if (null == jar || jar.isEmpty()) { return; } @@ -1005,10 +889,9 @@ private static void updateMap(String jar, Map packagedClasses) t } /** - * Find a jar that contains a class of the same name, if any. It will return - * a jar file, even if that is not the first thing on the class path that - * has a class with the same name. Looks first on the classpath and then in - * the packagedClasses map. + * Find a jar that contains a class of the same name, if any. It will return a jar file, even if + * that is not the first thing on the class path that has a class with the same name. Looks first + * on the classpath and then in the packagedClasses map. * @param my_class the class to find. * @return a jar file that contains the class, or null. * @throws IOException @@ -1047,9 +930,8 @@ private static String findContainingJar(Class my_class, Map p } /** - * Invoke 'getJar' on a custom JarFinder implementation. Useful for some job - * configuration contexts (HBASE-8140) and also for testing on MRv2. - * check if we have HADOOP-9426. + * Invoke 'getJar' on a custom JarFinder implementation. Useful for some job configuration + * contexts (HBASE-8140) and also for testing on MRv2. check if we have HADOOP-9426. * @param my_class the class to find. * @return a jar file that contains the class, or null. */ @@ -1067,7 +949,7 @@ private static String getJar(Class my_class) { private static int getRegionCount(Configuration conf, TableName tableName) throws IOException { try (Connection conn = ConnectionFactory.createConnection(conf); - RegionLocator locator = conn.getRegionLocator(tableName)) { + RegionLocator locator = conn.getRegionLocator(tableName)) { return locator.getAllRegionLocations().size(); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java index 3a63bc60ab25..8ff8c240a6d3 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +17,19 @@ */ package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Mapper; +import org.apache.yetus.audience.InterfaceAudience; /** - * Extends the base Mapper class to add the required input key - * and value classes. - * - * @param The type of the key. - * @param The type of the value. + * Extends the base Mapper class to add the required input key and value classes. + * @param The type of the key. + * @param The type of the value. * @see org.apache.hadoop.mapreduce.Mapper */ @InterfaceAudience.Public public abstract class TableMapper -extends Mapper { + extends Mapper { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java index e02ba5f54357..a59659534913 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +18,10 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; /** * Small committer class that does not do anything. @@ -60,8 +58,6 @@ public boolean isRecoverySupported() { return true; } - public void recoverTask(TaskAttemptContext taskContext) - throws IOException - { + public void recoverTask(TaskAttemptContext taskContext) throws IOException { } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index 8da8d83d9231..a0121cd90bc8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,13 +42,11 @@ import org.slf4j.LoggerFactory; /** - * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored - * while the output value must be either a {@link Put} or a - * {@link Delete} instance. + * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored while the output + * value must be either a {@link Put} or a {@link Delete} instance. */ @InterfaceAudience.Public -public class TableOutputFormat extends OutputFormat -implements Configurable { +public class TableOutputFormat extends OutputFormat implements Configurable { private static final Logger LOG = LoggerFactory.getLogger(TableOutputFormat.class); @@ -57,20 +54,19 @@ public class TableOutputFormat extends OutputFormat public static final String OUTPUT_TABLE = "hbase.mapred.outputtable"; /** - * Prefix for configuration property overrides to apply in {@link #setConf(Configuration)}. - * For keys matching this prefix, the prefix is stripped, and the value is set in the - * configuration with the resulting key, ie. the entry "hbase.mapred.output.key1 = value1" - * would be set in the configuration as "key1 = value1". Use this to set properties - * which should only be applied to the {@code TableOutputFormat} configuration and not the - * input configuration. + * Prefix for configuration property overrides to apply in {@link #setConf(Configuration)}. For + * keys matching this prefix, the prefix is stripped, and the value is set in the configuration + * with the resulting key, ie. the entry "hbase.mapred.output.key1 = value1" would be set in the + * configuration as "key1 = value1". Use this to set properties which should only be applied to + * the {@code TableOutputFormat} configuration and not the input configuration. */ public static final String OUTPUT_CONF_PREFIX = "hbase.mapred.output."; /** - * Optional job parameter to specify a peer cluster. - * Used specifying remote cluster when copying between hbase clusters (the - * source is picked up from hbase-site.xml). - * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, Class, String, String, String) + * Optional job parameter to specify a peer cluster. Used specifying remote cluster when copying + * between hbase clusters (the source is picked up from hbase-site.xml). + * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, + * Class, String, String, String) */ public static final String QUORUM_ADDRESS = OUTPUT_CONF_PREFIX + "quorum"; @@ -78,11 +74,9 @@ public class TableOutputFormat extends OutputFormat public static final String QUORUM_PORT = OUTPUT_CONF_PREFIX + "quorum.port"; /** Optional specification of the rs class name of the peer cluster */ - public static final String - REGION_SERVER_CLASS = OUTPUT_CONF_PREFIX + "rs.class"; + public static final String REGION_SERVER_CLASS = OUTPUT_CONF_PREFIX + "rs.class"; /** Optional specification of the rs impl name of the peer cluster */ - public static final String - REGION_SERVER_IMPL = OUTPUT_CONF_PREFIX + "rs.impl"; + public static final String REGION_SERVER_IMPL = OUTPUT_CONF_PREFIX + "rs.impl"; /** The configuration. */ private Configuration conf = null; @@ -90,26 +84,24 @@ public class TableOutputFormat extends OutputFormat /** * Writes the reducer output to an HBase table. */ - protected class TableRecordWriter - extends RecordWriter { + protected class TableRecordWriter extends RecordWriter { private Connection connection; private BufferedMutator mutator; /** * @throws IOException - * */ public TableRecordWriter() throws IOException { String tableName = conf.get(OUTPUT_TABLE); this.connection = ConnectionFactory.createConnection(conf); this.mutator = connection.getBufferedMutator(TableName.valueOf(tableName)); - LOG.info("Created table instance for " + tableName); + LOG.info("Created table instance for " + tableName); } + /** * Closes the writer, in this case flush table commits. - * - * @param context The context. + * @param context The context. * @throws IOException When closing the writer fails. * @see RecordWriter#close(TaskAttemptContext) */ @@ -128,15 +120,13 @@ public void close(TaskAttemptContext context) throws IOException { /** * Writes a key/value pair into the table. - * - * @param key The key. - * @param value The value. + * @param key The key. + * @param value The value. * @throws IOException When writing fails. * @see RecordWriter#write(Object, Object) */ @Override - public void write(KEY key, Mutation value) - throws IOException { + public void write(KEY key, Mutation value) throws IOException { if (!(value instanceof Put) && !(value instanceof Delete)) { throw new IOException("Pass a Delete or a Put"); } @@ -145,14 +135,11 @@ public void write(KEY key, Mutation value) } /** - * Creates a new record writer. - * - * Be aware that the baseline javadoc gives the impression that there is a single - * {@link RecordWriter} per job but in HBase, it is more natural if we give you a new + * Creates a new record writer. Be aware that the baseline javadoc gives the impression that there + * is a single {@link RecordWriter} per job but in HBase, it is more natural if we give you a new * RecordWriter per call of this method. You must close the returned RecordWriter when done. * Failure to do so will drop writes. - * - * @param context The current task context. + * @param context The current task context. * @return The newly created writer instance. * @throws IOException When creating the writer fails. * @throws InterruptedException When the job is cancelled. @@ -165,8 +152,7 @@ public RecordWriter getRecordWriter(TaskAttemptContext context) /** * Checks if the output table exists and is enabled. - * - * @param context The current context. + * @param context The current context. * @throws IOException When the check fails. * @throws InterruptedException When the job is aborted. * @see OutputFormat#checkOutputSpecs(JobContext) @@ -179,24 +165,23 @@ public void checkOutputSpecs(JobContext context) throws IOException, Interrupted } try (Connection connection = ConnectionFactory.createConnection(hConf); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { TableName tableName = TableName.valueOf(hConf.get(OUTPUT_TABLE)); if (!admin.tableExists(tableName)) { - throw new TableNotFoundException("Can't write, table does not exist:" + - tableName.getNameAsString()); + throw new TableNotFoundException( + "Can't write, table does not exist:" + tableName.getNameAsString()); } if (!admin.isTableEnabled(tableName)) { - throw new TableNotEnabledException("Can't write, table is not enabled: " + - tableName.getNameAsString()); + throw new TableNotEnabledException( + "Can't write, table is not enabled: " + tableName.getNameAsString()); } } } /** * Returns the output committer. - * - * @param context The current context. + * @param context The current context. * @return The committer. * @throws IOException When creating the committer fails. * @throws InterruptedException When the job is aborted. @@ -204,7 +189,7 @@ public void checkOutputSpecs(JobContext context) throws IOException, Interrupted */ @Override public OutputCommitter getOutputCommitter(TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { return new TableOutputCommitter(); } @@ -216,7 +201,7 @@ public Configuration getConf() { @Override public void setConf(Configuration otherConf) { String tableName = otherConf.get(OUTPUT_TABLE); - if(tableName == null || tableName.length() <= 0) { + if (tableName == null || tableName.length() <= 0) { throw new IllegalArgumentException("Must specify table name"); } @@ -234,7 +219,7 @@ public void setConf(Configuration otherConf) { if (zkClientPort != 0) { this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort); } - } catch(IOException e) { + } catch (IOException e) { LOG.error(e.toString(), e); throw new RuntimeException(e); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java index 512c22f9cc9c..997ea0775097 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +18,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; @@ -28,21 +25,19 @@ import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; /** - * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) - * pairs. + * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) pairs. */ @InterfaceAudience.Public -public class TableRecordReader -extends RecordReader { +public class TableRecordReader extends RecordReader { private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl(); /** * Restart from survivable exceptions by creating a new scanner. - * - * @param firstRow The first row to start at. + * @param firstRow The first row to start at. * @throws IOException When restarting fails. */ public void restart(byte[] firstRow) throws IOException { @@ -58,8 +53,7 @@ public void setTable(Table table) { /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.recordReaderImpl.setScan(scan); @@ -67,7 +61,6 @@ public void setScan(Scan scan) { /** * Closes the split. - * * @see org.apache.hadoop.mapreduce.RecordReader#close() */ @Override @@ -77,21 +70,18 @@ public void close() { /** * Returns the current key. - * * @return The current key. * @throws IOException * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#getCurrentKey() */ @Override - public ImmutableBytesWritable getCurrentKey() throws IOException, - InterruptedException { + public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException { return this.recordReaderImpl.getCurrentKey(); } /** * Returns the current value. - * * @return The current value. * @throws IOException When the value is faulty. * @throws InterruptedException When the job is aborted. @@ -104,25 +94,21 @@ public Result getCurrentValue() throws IOException, InterruptedException { /** * Initializes the reader. - * - * @param inputsplit The split to work with. - * @param context The current task context. + * @param inputsplit The split to work with. + * @param context The current task context. * @throws IOException When setting up the reader fails. * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#initialize( - * org.apache.hadoop.mapreduce.InputSplit, - * org.apache.hadoop.mapreduce.TaskAttemptContext) + * org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public void initialize(InputSplit inputsplit, - TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { this.recordReaderImpl.initialize(inputsplit, context); } /** * Positions the record reader to the next record. - * * @return true if there was another record. * @throws IOException When reading the record failed. * @throws InterruptedException When the job was aborted. @@ -135,7 +121,6 @@ public boolean nextKeyValue() throws IOException, InterruptedException { /** * The current progress of the record reader through its data. - * * @return A number between 0.0 and 1.0, the fraction of the data read. * @see org.apache.hadoop.mapreduce.RecordReader#getProgress() */ diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java index 097b436f5664..55f891f7b47e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java @@ -40,13 +40,11 @@ import org.slf4j.LoggerFactory; /** - * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) - * pairs. + * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) pairs. */ @InterfaceAudience.Public public class TableRecordReaderImpl { - public static final String LOG_PER_ROW_COUNT - = "hbase.mapreduce.log.scanner.rowcount"; + public static final String LOG_PER_ROW_COUNT = "hbase.mapreduce.log.scanner.rowcount"; private static final Logger LOG = LoggerFactory.getLogger(TableRecordReaderImpl.class); @@ -71,8 +69,7 @@ public class TableRecordReaderImpl { /** * Restart from survivable exceptions by creating a new scanner. - * - * @param firstRow The first row to start at. + * @param firstRow The first row to start at. * @throws IOException When restarting fails. */ public void restart(byte[] firstRow) throws IOException { @@ -98,18 +95,17 @@ public void restart(byte[] firstRow) throws IOException { } /** - * In new mapreduce APIs, TaskAttemptContext has two getCounter methods - * Check if getCounter(String, String) method is available. + * In new mapreduce APIs, TaskAttemptContext has two getCounter methods Check if + * getCounter(String, String) method is available. * @return The getCounter method or null if not available. * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 */ @Deprecated protected static Method retrieveGetCounterWithStringsParams(TaskAttemptContext context) - throws IOException { + throws IOException { Method m = null; try { - m = context.getClass().getMethod("getCounter", - new Class [] {String.class, String.class}); + m = context.getClass().getMethod("getCounter", new Class[] { String.class, String.class }); } catch (SecurityException e) { throw new IOException("Failed test for getCounter", e); } catch (NoSuchMethodException e) { @@ -131,8 +127,7 @@ public void setHTable(Table htable) { /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.scan = scan; @@ -141,9 +136,8 @@ public void setScan(Scan scan) { /** * Build the scanner. Not done in constructor to allow for extension. */ - public void initialize(InputSplit inputsplit, - TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { if (context != null) { this.context = context; } @@ -152,8 +146,6 @@ public void initialize(InputSplit inputsplit, /** * Closes the split. - * - * */ public void close() { if (this.scanner != null) { @@ -168,18 +160,15 @@ public void close() { /** * Returns the current key. - * * @return The current key. * @throws InterruptedException When the job is aborted. */ - public ImmutableBytesWritable getCurrentKey() throws IOException, - InterruptedException { + public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException { return key; } /** * Returns the current value. - * * @return The current value. * @throws IOException When the value is faulty. * @throws InterruptedException When the job is aborted. @@ -188,10 +177,8 @@ public Result getCurrentValue() throws IOException, InterruptedException { return value; } - /** * Positions the record reader to the next record. - * * @return true if there was another record. * @throws IOException When reading the record failed. * @throws InterruptedException When the job was aborted. @@ -210,7 +197,7 @@ public boolean nextKeyValue() throws IOException, InterruptedException { numStale++; } if (logScannerActivity) { - rowcount ++; + rowcount++; if (rowcount >= logPerRowCount) { long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount); @@ -228,16 +215,16 @@ public boolean nextKeyValue() throws IOException, InterruptedException { // the scanner, if the second call fails, it will be rethrown LOG.info("recovered from " + StringUtils.stringifyException(e)); if (lastSuccessfulRow == null) { - LOG.warn("We are restarting the first next() invocation," + - " if your mapper has restarted a few other times like this" + - " then you should consider killing this job and investigate" + - " why it's taking so long."); + LOG.warn("We are restarting the first next() invocation," + + " if your mapper has restarted a few other times like this" + + " then you should consider killing this job and investigate" + + " why it's taking so long."); } if (lastSuccessfulRow == null) { restart(scan.getStartRow()); } else { restart(lastSuccessfulRow); - scanner.next(); // skip presumed already mapped row + scanner.next(); // skip presumed already mapped row } value = scanner.next(); if (value != null && value.isStale()) { @@ -267,8 +254,8 @@ public boolean nextKeyValue() throws IOException, InterruptedException { long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount); LOG.info(ioe.toString(), ioe); - String lastRow = lastSuccessfulRow == null ? - "null" : Bytes.toStringBinary(lastSuccessfulRow); + String lastRow = + lastSuccessfulRow == null ? "null" : Bytes.toStringBinary(lastSuccessfulRow); LOG.info("lastSuccessfulRow=" + lastRow); } throw ioe; @@ -276,10 +263,9 @@ public boolean nextKeyValue() throws IOException, InterruptedException { } /** - * If hbase runs on new version of mapreduce, RecordReader has access to - * counters thus can update counters based on scanMetrics. - * If hbase runs on old version of mapreduce, it won't be able to get - * access to counters and TableRecorderReader can't update counter values. + * If hbase runs on new version of mapreduce, RecordReader has access to counters thus can update + * counters based on scanMetrics. If hbase runs on old version of mapreduce, it won't be able to + * get access to counters and TableRecorderReader can't update counter values. */ private void updateCounters() { ScanMetrics scanMetrics = scanner.getScanMetrics(); @@ -291,8 +277,8 @@ private void updateCounters() { } /** - * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 - * Use {@link #updateCounters(ScanMetrics, long, TaskAttemptContext, long)} instead. + * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 Use + * {@link #updateCounters(ScanMetrics, long, TaskAttemptContext, long)} instead. */ @Deprecated protected static void updateCounters(ScanMetrics scanMetrics, long numScannerRestarts, @@ -307,29 +293,28 @@ protected static void updateCounters(ScanMetrics scanMetrics, long numScannerRes return; } - for (Map.Entry entry : scanMetrics.getMetricsMap().entrySet()) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, entry.getKey()); - if (counter != null) { - counter.increment(entry.getValue()); - } + for (Map.Entry entry : scanMetrics.getMetricsMap().entrySet()) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, entry.getKey()); + if (counter != null) { + counter.increment(entry.getValue()); } - if (numScannerRestarts != 0L) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCANNER_RESTARTS"); - if (counter != null) { - counter.increment(numScannerRestarts); - } + } + if (numScannerRestarts != 0L) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCANNER_RESTARTS"); + if (counter != null) { + counter.increment(numScannerRestarts); } - if (numStale != 0L) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCAN_RESULTS_STALE"); - if (counter != null) { - counter.increment(numStale); - } + } + if (numStale != 0L) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCAN_RESULTS_STALE"); + if (counter != null) { + counter.increment(numStale); } + } } /** * The current progress of the record reader through its data. - * * @return A number between 0.0 and 1.0, the fraction of the data read. */ public float getProgress() { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java index 07e44cbc28be..6c249abe3d11 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,28 +17,26 @@ */ package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; /** - * Extends the basic Reducer class to add the required key and - * value input/output classes. While the input key and value as well as the - * output key can be anything handed in from the previous map phase the output - * value must be either a {@link org.apache.hadoop.hbase.client.Put Put} - * or a {@link org.apache.hadoop.hbase.client.Delete Delete} instance when - * using the {@link TableOutputFormat} class. + * Extends the basic Reducer class to add the required key and value input/output + * classes. While the input key and value as well as the output key can be anything handed in from + * the previous map phase the output value must be either a + * {@link org.apache.hadoop.hbase.client.Put Put} or a {@link org.apache.hadoop.hbase.client.Delete + * Delete} instance when using the {@link TableOutputFormat} class. *

- * This class is extended by {@link IdentityTableReducer} but can also be - * subclassed to implement similar features or any custom code needed. It has - * the advantage to enforce the output value to a specific basic type. - * - * @param The type of the input key. - * @param The type of the input value. - * @param The type of the output key. + * This class is extended by {@link IdentityTableReducer} but can also be subclassed to implement + * similar features or any custom code needed. It has the advantage to enforce the output value to a + * specific basic type. + * @param The type of the input key. + * @param The type of the input value. + * @param The type of the output key. * @see org.apache.hadoop.mapreduce.Reducer */ @InterfaceAudience.Public public abstract class TableReducer -extends Reducer { + extends Reducer { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java index 23a39a4192db..8fa6d8ba4925 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.DataInput; @@ -41,40 +40,41 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job - * bypasses HBase servers, and directly accesses the underlying files (hfile, recovered edits, - * wals, etc) directly to provide maximum performance. The snapshot is not required to be - * restored to the live cluster or cloned. This also allows to run the mapreduce job from an - * online or offline hbase cluster. The snapshot files can be exported by using the - * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, - * and this InputFormat can be used to run the mapreduce job directly over the snapshot files. - * The snapshot should not be deleted while there are jobs reading from snapshot files. + * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job bypasses + * HBase servers, and directly accesses the underlying files (hfile, recovered edits, wals, etc) + * directly to provide maximum performance. The snapshot is not required to be restored to the live + * cluster or cloned. This also allows to run the mapreduce job from an online or offline hbase + * cluster. The snapshot files can be exported by using the + * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, and this + * InputFormat can be used to run the mapreduce job directly over the snapshot files. The snapshot + * should not be deleted while there are jobs reading from snapshot files. *

* Usage is similar to TableInputFormat, and * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, boolean, Path)} * can be used to configure the job. - *

{@code
- * Job job = new Job(conf);
- * Scan scan = new Scan();
- * TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
- *      scan, MyTableMapper.class, MyMapKeyOutput.class,
- *      MyMapOutputValueWritable.class, job, true);
+ * 
+ * 
+ * {
+ *   @code
+ *   Job job = new Job(conf);
+ *   Scan scan = new Scan();
+ *   TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, MyTableMapper.class,
+ *     MyMapKeyOutput.class, MyMapOutputValueWritable.class, job, true);
  * }
  * 
*

- * Internally, this input format restores the snapshot into the given tmp directory. By default, - * and similar to {@link TableInputFormat} an InputSplit is created per region, but optionally you - * can run N mapper tasks per every region, in which case the region key range will be split to - * N sub-ranges and an InputSplit will be created per sub-range. The region is opened for reading - * from each RecordReader. An internal RegionScanner is used to execute the + * Internally, this input format restores the snapshot into the given tmp directory. By default, and + * similar to {@link TableInputFormat} an InputSplit is created per region, but optionally you can + * run N mapper tasks per every region, in which case the region key range will be split to N + * sub-ranges and an InputSplit will be created per sub-range. The region is opened for reading from + * each RecordReader. An internal RegionScanner is used to execute the * {@link org.apache.hadoop.hbase.CellScanner} obtained from the user. *

* HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from - * snapshot files and data files. - * To read from snapshot files directly from the file system, the user who is running the MR job - * must have sufficient permissions to access snapshot and reference files. - * This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase - * user or the user must have group or other privileges in the filesystem (See HBASE-8369). + * snapshot files and data files. To read from snapshot files directly from the file system, the + * user who is running the MR job must have sufficient permissions to access snapshot and reference + * files. This means that to run mapreduce over snapshot files, the MR job has to be run as the + * HBase user or the user must have group or other privileges in the filesystem (See HBASE-8369). * Note that, given other users access to read from snapshot/data files will completely circumvent * the access control enforced by HBase. * @see org.apache.hadoop.hbase.client.TableSnapshotScanner @@ -130,19 +130,17 @@ TableSnapshotInputFormatImpl.InputSplit getDelegate() { } @InterfaceAudience.Private - static class TableSnapshotRegionRecordReader extends - RecordReader { + static class TableSnapshotRegionRecordReader + extends RecordReader { private TableSnapshotInputFormatImpl.RecordReader delegate = - new TableSnapshotInputFormatImpl.RecordReader(); + new TableSnapshotInputFormatImpl.RecordReader(); private TaskAttemptContext context; @Override - public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { this.context = context; - delegate.initialize( - ((TableSnapshotRegionSplit) split).delegate, - context.getConfiguration()); + delegate.initialize(((TableSnapshotRegionSplit) split).delegate, context.getConfiguration()); } @Override @@ -179,16 +177,16 @@ public void close() throws IOException { } @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) throws IOException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException { return new TableSnapshotRegionRecordReader(); } @Override public List getSplits(JobContext job) throws IOException, InterruptedException { List results = new ArrayList<>(); - for (TableSnapshotInputFormatImpl.InputSplit split : - TableSnapshotInputFormatImpl.getSplits(job.getConfiguration())) { + for (TableSnapshotInputFormatImpl.InputSplit split : TableSnapshotInputFormatImpl + .getSplits(job.getConfiguration())) { results.add(new TableSnapshotRegionSplit(split)); } return results; @@ -198,13 +196,12 @@ public List getSplits(JobContext job) throws IOException, Interrupte * Configures the job to use TableSnapshotInputFormat to read from a snapshot. * @param job the job to configure * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. + * @param restoreDir a temporary directory to restore the snapshot into. Current user should have + * write permissions to this directory, and this should not be a subdirectory of rootdir. + * After the job is finished, restoreDir can be deleted. * @throws IOException if an error occurs */ - public static void setInput(Job job, String snapshotName, Path restoreDir) - throws IOException { + public static void setInput(Job job, String snapshotName, Path restoreDir) throws IOException { TableSnapshotInputFormatImpl.setInput(job.getConfiguration(), snapshotName, restoreDir); } @@ -212,21 +209,21 @@ public static void setInput(Job job, String snapshotName, Path restoreDir) * Configures the job to use TableSnapshotInputFormat to read from a snapshot. * @param job the job to configure * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. + * @param restoreDir a temporary directory to restore the snapshot into. Current user should have + * write permissions to this directory, and this should not be a subdirectory of rootdir. + * After the job is finished, restoreDir can be deleted. * @param splitAlgo split algorithm to generate splits from region * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException if an error occurs */ - public static void setInput(Job job, String snapshotName, Path restoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { - TableSnapshotInputFormatImpl.setInput(job.getConfiguration(), snapshotName, restoreDir, - splitAlgo, numSplitsPerRegion); - } + public static void setInput(Job job, String snapshotName, Path restoreDir, + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { + TableSnapshotInputFormatImpl.setInput(job.getConfiguration(), snapshotName, restoreDir, + splitAlgo, numSplitsPerRegion); + } /** - * clean restore directory after snapshot scan job + * clean restore directory after snapshot scan job * @param job the snapshot scan job * @param snapshotName the name of the snapshot to read from * @throws IOException if an error occurs diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java index e454157da269..1d1c0a8f2e27 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.ByteArrayOutputStream; @@ -80,39 +79,37 @@ public class TableSnapshotInputFormatImpl { /** See {@link #getBestLocations(Configuration, HDFSBlocksDistribution, int)} */ private static final String LOCALITY_CUTOFF_MULTIPLIER = - "hbase.tablesnapshotinputformat.locality.cutoff.multiplier"; + "hbase.tablesnapshotinputformat.locality.cutoff.multiplier"; private static final float DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f; /** - * For MapReduce jobs running multiple mappers per region, determines - * what split algorithm we should be using to find split points for scanners. + * For MapReduce jobs running multiple mappers per region, determines what split algorithm we + * should be using to find split points for scanners. */ public static final String SPLIT_ALGO = "hbase.mapreduce.split.algorithm"; /** - * For MapReduce jobs running multiple mappers per region, determines - * number of splits to generate per region. + * For MapReduce jobs running multiple mappers per region, determines number of splits to generate + * per region. */ public static final String NUM_SPLITS_PER_REGION = "hbase.mapreduce.splits.per.region"; /** - * Whether to calculate the block location for splits. Default to true. - * If the computing layer runs outside of HBase cluster, the block locality does not master. - * Setting this value to false could skip the calculation and save some time. - * - * Set access modifier to "public" so that these could be accessed by test classes of - * both org.apache.hadoop.hbase.mapred - * and org.apache.hadoop.hbase.mapreduce. + * Whether to calculate the block location for splits. Default to true. If the computing layer + * runs outside of HBase cluster, the block locality does not master. Setting this value to false + * could skip the calculation and save some time. Set access modifier to "public" so that these + * could be accessed by test classes of both org.apache.hadoop.hbase.mapred and + * org.apache.hadoop.hbase.mapreduce. */ - public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY = + public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY = "hbase.TableSnapshotInputFormat.locality.enabled"; public static final boolean SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT = true; /** - * Whether to calculate the Snapshot region location by region location from meta. - * It is much faster than computing block locations for splits. + * Whether to calculate the Snapshot region location by region location from meta. It is much + * faster than computing block locations for splits. */ - public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION = - "hbase.TableSnapshotInputFormat.locality.by.region.location"; + public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION = + "hbase.TableSnapshotInputFormat.locality.by.region.location"; public static final boolean SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT = false; @@ -125,8 +122,8 @@ public class TableSnapshotInputFormatImpl { /** * Whether to enable scan metrics on Scan, default to true */ - public static final String SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED = - "hbase.TableSnapshotInputFormat.scan_metrics.enabled"; + public static final String SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED = + "hbase.TableSnapshotInputFormat.scan_metrics.enabled"; public static final boolean SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT = true; @@ -150,10 +147,11 @@ public static class InputSplit implements Writable { private String restoreDir; // constructor for mapreduce framework / Writable - public InputSplit() {} + public InputSplit() { + } - public InputSplit(TableDescriptor htd, RegionInfo regionInfo, List locations, - Scan scan, Path restoreDir) { + public InputSplit(TableDescriptor htd, RegionInfo regionInfo, List locations, Scan scan, + Path restoreDir) { this.htd = htd; this.regionInfo = regionInfo; if (locations == null || locations.isEmpty()) { @@ -183,7 +181,7 @@ public String getRestoreDir() { } public long getLength() { - //TODO: We can obtain the file sizes of the snapshot here. + // TODO: We can obtain the file sizes of the snapshot here. return 0; } @@ -203,9 +201,9 @@ public RegionInfo getRegionInfo() { // doing this wrapping with Writables. @Override public void write(DataOutput out) throws IOException { - TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder() - .setTable(ProtobufUtil.toTableSchema(htd)) - .setRegion(ProtobufUtil.toRegionInfo(regionInfo)); + TableSnapshotRegionSplit.Builder builder = + TableSnapshotRegionSplit.newBuilder().setTable(ProtobufUtil.toTableSchema(htd)) + .setRegion(ProtobufUtil.toRegionInfo(regionInfo)); for (String location : locations) { builder.addLocations(location); @@ -265,7 +263,6 @@ public void initialize(InputSplit split, Configuration conf) throws IOException RegionInfo hri = this.split.getRegionInfo(); FileSystem fs = CommonFSUtils.getCurrentFileSystem(conf); - // region is immutable, this should be fine, // otherwise we have to set the thread read point scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); @@ -279,7 +276,7 @@ public void initialize(InputSplit split, Configuration conf) throws IOException public boolean nextKeyValue() throws IOException { result = scanner.next(); if (result == null) { - //we are done + // we are done return false; } @@ -346,13 +343,12 @@ public static RegionSplitter.SplitAlgorithm getSplitAlgo(Configuration conf) thr try { return Class.forName(splitAlgoClassName).asSubclass(RegionSplitter.SplitAlgorithm.class) .getDeclaredConstructor().newInstance(); - } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | - NoSuchMethodException | InvocationTargetException e) { + } catch (ClassNotFoundException | InstantiationException | IllegalAccessException + | NoSuchMethodException | InvocationTargetException e) { throw new IOException("SplitAlgo class " + splitAlgoClassName + " is not found", e); } } - public static List getRegionInfosFromManifest(SnapshotManifest manifest) { List regionManifests = manifest.getRegionManifests(); if (regionManifests == null) { @@ -384,7 +380,7 @@ public static Scan extractScanFromConf(Configuration conf) throws IOException { scan = TableMapReduceUtil.convertStringToScan(conf.get(TableInputFormat.SCAN)); } else if (conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST) != null) { String[] columns = - conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST).split(" "); + conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST).split(" "); scan = new Scan(); for (String col : columns) { scan.addFamily(Bytes.toBytes(col)); @@ -394,11 +390,11 @@ public static Scan extractScanFromConf(Configuration conf) throws IOException { } if (scan.getReadType() == ReadType.DEFAULT) { - LOG.info("Provided Scan has DEFAULT ReadType," - + " updating STREAM for Snapshot-based InputFormat"); + LOG.info( + "Provided Scan has DEFAULT ReadType," + " updating STREAM for Snapshot-based InputFormat"); // Update the "DEFAULT" ReadType to be "STREAM" to try to improve the default case. scan.setReadType(conf.getEnum(SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE, - SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT)); + SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT)); } return scan; @@ -410,15 +406,15 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, } public static List getSplits(Scan scan, SnapshotManifest manifest, - List regionManifests, Path restoreDir, - Configuration conf, RegionSplitter.SplitAlgorithm sa, int numSplits) throws IOException { + List regionManifests, Path restoreDir, Configuration conf, + RegionSplitter.SplitAlgorithm sa, int numSplits) throws IOException { // load table descriptor TableDescriptor htd = manifest.getTableDescriptor(); Path tableDir = CommonFSUtils.getTableDir(restoreDir, htd.getTableName()); boolean localityEnabled = conf.getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, - SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); + SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); boolean scanMetricsEnabled = conf.getBoolean(SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED, SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT); @@ -452,8 +448,7 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, if (localityEnabled) { if (regionLocator != null) { /* Get Location from the local cache */ - HRegionLocation - location = regionLocator.getRegionLocation(hri.getStartKey(), false); + HRegionLocation location = regionLocator.getRegionLocation(hri.getStartKey(), false); hosts = new ArrayList<>(1); hosts.add(location.getHostname()); @@ -466,7 +461,7 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, byte[][] sp = sa.split(hri.getStartKey(), hri.getEndKey(), numSplits, true); for (int i = 0; i < sp.length - 1; i++) { if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), sp[i], - sp[i + 1])) { + sp[i + 1])) { Scan boundedScan = new Scan(scan); if (scan.getStartRow().length == 0) { @@ -488,7 +483,7 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, } } else { if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), - hri.getStartKey(), hri.getEndKey())) { + hri.getStartKey(), hri.getEndKey())) { splits.add(new InputSplit(htd, hri, hosts, scan, restoreDir)); } @@ -503,8 +498,7 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, * only when localityEnabled is true. */ private static List calculateLocationsForInputSplit(Configuration conf, - TableDescriptor htd, RegionInfo hri, Path tableDir) - throws IOException { + TableDescriptor htd, RegionInfo hri, Path tableDir) throws IOException { return getBestLocations(conf, HRegion.computeHDFSBlocksDistribution(conf, htd, hri, tableDir)); } @@ -514,12 +508,11 @@ private static List calculateLocationsForInputSplit(Configuration conf, * do not want to blindly pass all the locations, since we are creating one split per region, and * the region's blocks are all distributed throughout the cluster unless favorite node assignment * is used. On the expected stable case, only one location will contain most of the blocks as - * local. - * On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. Here - * we are doing a simple heuristic, where we will pass all hosts which have at least 80% + * local. On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. + * Here we are doing a simple heuristic, where we will pass all hosts which have at least 80% * (hbase.tablesnapshotinputformat.locality.cutoff.multiplier) as much block locality as the top - * host with the best locality. - * Return at most numTopsAtMost locations if there are more than that. + * host with the best locality. Return at most numTopsAtMost locations if there are more than + * that. */ private static List getBestLocations(Configuration conf, HDFSBlocksDistribution blockDistribution, int numTopsAtMost) { @@ -543,8 +536,8 @@ private static List getBestLocations(Configuration conf, // When top >= 2, // do the heuristic: filter all hosts which have at least cutoffMultiplier % of block locality - double cutoffMultiplier - = conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER); + double cutoffMultiplier = + conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER); double filterWeight = topHost.getWeight() * cutoffMultiplier; @@ -601,12 +594,11 @@ public static void setInput(Configuration conf, String snapshotName, Path restor * @throws IOException if an error occurs */ public static void setInput(Configuration conf, String snapshotName, Path restoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) - throws IOException { + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { conf.set(SNAPSHOT_NAME_KEY, snapshotName); if (numSplitsPerRegion < 1) { - throw new IllegalArgumentException("numSplits must be >= 1, " + - "illegal numSplits : " + numSplitsPerRegion); + throw new IllegalArgumentException( + "numSplits must be >= 1, " + "illegal numSplits : " + numSplitsPerRegion); } if (splitAlgo == null && numSplitsPerRegion > 1) { throw new IllegalArgumentException("Split algo can't be null when numSplits > 1"); @@ -625,7 +617,7 @@ public static void setInput(Configuration conf, String snapshotName, Path restor } /** - * clean restore directory after snapshot scan job + * clean restore directory after snapshot scan job * @param job the snapshot scan job * @param snapshotName the name of the snapshot to read from * @throws IOException if an error occurs @@ -641,6 +633,6 @@ public static void cleanRestoreDir(Job job, String snapshotName) throws IOExcept if (!fs.delete(restoreDir, true)) { LOG.warn("Failed clean restore dir {} for snapshot {}", restoreDir, snapshotName); } - LOG.debug("Clean restore directory {} for {}", restoreDir, snapshotName); + LOG.debug("Clean restore directory {} for {}", restoreDir, snapshotName); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java index 93300ebb0f39..939fc5811eb3 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +33,11 @@ import org.slf4j.LoggerFactory; /** - * A table split corresponds to a key range (low, high) and an optional scanner. - * All references to row below refer to the key of the row. + * A table split corresponds to a key range (low, high) and an optional scanner. All references to + * row below refer to the key of the row. */ @InterfaceAudience.Public -public class TableSplit extends InputSplit - implements Writable, Comparable { +public class TableSplit extends InputSplit implements Writable, Comparable { /** @deprecated LOG variable would be made private. fix in hbase 3.0 */ @Deprecated public static final Logger LOG = LoggerFactory.getLogger(TableSplit.class); @@ -79,76 +77,68 @@ static Version fromCode(int code) { private static final Version VERSION = Version.WITH_ENCODED_REGION_NAME; private TableName tableName; - private byte [] startRow; - private byte [] endRow; + private byte[] startRow; + private byte[] endRow; private String regionLocation; private String encodedRegionName = ""; /** - * The scan object may be null but the serialized form of scan is never null - * or empty since we serialize the scan object with default values then. - * Having no scanner in TableSplit doesn't necessarily mean there is no scanner - * for mapreduce job, it just means that we do not need to set it for each split. - * For example, it is not required to have a scan object for - * {@link org.apache.hadoop.hbase.mapred.TableInputFormatBase} since we use the scan from the - * job conf and scanner is supposed to be same for all the splits of table. + * The scan object may be null but the serialized form of scan is never null or empty since we + * serialize the scan object with default values then. Having no scanner in TableSplit doesn't + * necessarily mean there is no scanner for mapreduce job, it just means that we do not need to + * set it for each split. For example, it is not required to have a scan object for + * {@link org.apache.hadoop.hbase.mapred.TableInputFormatBase} since we use the scan from the job + * conf and scanner is supposed to be same for all the splits of table. */ private String scan = ""; // stores the serialized form of the Scan private long length; // Contains estimation of region size in bytes /** Default constructor. */ public TableSplit() { - this((TableName)null, null, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, ""); + this((TableName) null, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, ""); } /** - * Creates a new instance while assigning all variables. - * Length of region is set to 0 - * Encoded name of the region is set to blank - * - * @param tableName The name of the current table. + * Creates a new instance while assigning all variables. Length of region is set to 0 Encoded name + * of the region is set to blank + * @param tableName The name of the current table. * @param scan The scan associated with this split. - * @param startRow The start row of the split. - * @param endRow The end row of the split. - * @param location The location of the region. + * @param startRow The start row of the split. + * @param endRow The end row of the split. + * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, - final String location) { + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, + final String location) { this(tableName, scan, startRow, endRow, location, 0L); } /** - * Creates a new instance while assigning all variables. - * Encoded name of region is set to blank - * - * @param tableName The name of the current table. + * Creates a new instance while assigning all variables. Encoded name of region is set to blank + * @param tableName The name of the current table. * @param scan The scan associated with this split. - * @param startRow The start row of the split. - * @param endRow The end row of the split. - * @param location The location of the region. + * @param startRow The start row of the split. + * @param endRow The end row of the split. + * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, final String location, long length) { this(tableName, scan, startRow, endRow, location, "", length); } /** * Creates a new instance while assigning all variables. - * - * @param tableName The name of the current table. + * @param tableName The name of the current table. * @param scan The scan associated with this split. - * @param startRow The start row of the split. - * @param endRow The end row of the split. + * @param startRow The start row of the split. + * @param endRow The end row of the split. * @param encodedRegionName The region ID. - * @param location The location of the region. + * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, final String location, final String encodedRegionName, long length) { this.tableName = tableName; try { - this.scan = - (null == scan) ? "" : TableMapReduceUtil.convertScanToString(scan); + this.scan = (null == scan) ? "" : TableMapReduceUtil.convertScanToString(scan); } catch (IOException e) { LOG.warn("Failed to convert Scan to String", e); } @@ -160,36 +150,31 @@ public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endR } /** - * Creates a new instance without a scanner. - * Length of region is set to 0 - * + * Creates a new instance without a scanner. Length of region is set to 0 * @param tableName The name of the current table. * @param startRow The start row of the split. * @param endRow The end row of the split. * @param location The location of the region. */ - public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, - final String location) { + public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) { this(tableName, null, startRow, endRow, location); } /** * Creates a new instance without a scanner. - * * @param tableName The name of the current table. * @param startRow The start row of the split. * @param endRow The end row of the split. * @param location The location of the region. * @param length Size of region in bytes */ - public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, - final String location, long length) { + public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location, + long length) { this(tableName, null, startRow, endRow, location, length); } /** * Returns a Scan object from the stored string representation. - * * @return Returns a Scan object based on the stored scanner. * @throws IOException throws IOException if deserialization fails */ @@ -199,9 +184,9 @@ public Scan getScan() throws IOException { /** * Returns a scan string - * @return scan as string. Should be noted that this is not same as getScan().toString() - * because Scan object will have the default values when empty scan string is - * deserialized. Thus, getScan().toString() can never be empty + * @return scan as string. Should be noted that this is not same as getScan().toString() because + * Scan object will have the default values when empty scan string is deserialized. Thus, + * getScan().toString() can never be empty */ @InterfaceAudience.Private public String getScanAsString() { @@ -213,17 +198,16 @@ public String getScanAsString() { * @see #getTable() * @return The table name. */ - public byte [] getTableName() { + public byte[] getTableName() { return tableName.getName(); } /** * Returns the table name. - * * @return The table name. */ public TableName getTable() { - // It is ugly that usually to get a TableName, the method is called getTableName. We can't do + // It is ugly that usually to get a TableName, the method is called getTableName. We can't do // that in here though because there was an existing getTableName in place already since // deprecated. return tableName; @@ -231,25 +215,22 @@ public TableName getTable() { /** * Returns the start row. - * * @return The start row. */ - public byte [] getStartRow() { + public byte[] getStartRow() { return startRow; } /** * Returns the end row. - * * @return The end row. */ - public byte [] getEndRow() { + public byte[] getEndRow() { return endRow; } /** * Returns the region location. - * * @return The region's location. */ public String getRegionLocation() { @@ -258,18 +239,16 @@ public String getRegionLocation() { /** * Returns the region's location as an array. - * * @return The array containing the region location. * @see org.apache.hadoop.mapreduce.InputSplit#getLocations() */ @Override public String[] getLocations() { - return new String[] {regionLocation}; + return new String[] { regionLocation }; } /** * Returns the region's encoded name. - * * @return The region's encoded name. */ public String getEncodedRegionName() { @@ -278,7 +257,6 @@ public String getEncodedRegionName() { /** * Returns the length of the split. - * * @return The length of the split. * @see org.apache.hadoop.mapreduce.InputSplit#getLength() */ @@ -289,8 +267,7 @@ public long getLength() { /** * Reads the values of each field. - * - * @param in The input to read from. + * @param in The input to read from. * @throws IOException When reading the input fails. */ @Override @@ -327,8 +304,7 @@ public void readFields(DataInput in) throws IOException { /** * Writes the field values to the output. - * - * @param out The output to write to. + * @param out The output to write to. * @throws IOException When writing the values to the output fails. */ @Override @@ -345,7 +321,6 @@ public void write(DataOutput out) throws IOException { /** * Returns the details about this instance as a string. - * * @return The values of this instance as a string. * @see java.lang.Object#toString() */ @@ -360,8 +335,7 @@ public String toString() { try { // get the real scan here in toString, not the Base64 string printScan = TableMapReduceUtil.convertStringToScan(scan).toString(); - } - catch (IOException e) { + } catch (IOException e) { printScan = ""; } sb.append(", scan=").append(printScan); @@ -376,8 +350,7 @@ public String toString() { /** * Compares this split against the given one. - * - * @param split The split to compare to. + * @param split The split to compare to. * @return The result of the comparison. * @see java.lang.Comparable#compareTo(java.lang.Object) */ @@ -385,10 +358,9 @@ public String toString() { public int compareTo(TableSplit split) { // If The table name of the two splits is the same then compare start row // otherwise compare based on table names - int tableNameComparison = - getTable().compareTo(split.getTable()); - return tableNameComparison != 0 ? tableNameComparison : Bytes.compareTo( - getStartRow(), split.getStartRow()); + int tableNameComparison = getTable().compareTo(split.getTable()); + return tableNameComparison != 0 ? tableNameComparison + : Bytes.compareTo(getStartRow(), split.getStartRow()); } @Override @@ -396,10 +368,10 @@ public boolean equals(Object o) { if (o == null || !(o instanceof TableSplit)) { return false; } - return tableName.equals(((TableSplit)o).tableName) && - Bytes.equals(startRow, ((TableSplit)o).startRow) && - Bytes.equals(endRow, ((TableSplit)o).endRow) && - regionLocation.equals(((TableSplit)o).regionLocation); + return tableName.equals(((TableSplit) o).tableName) + && Bytes.equals(startRow, ((TableSplit) o).startRow) + && Bytes.equals(endRow, ((TableSplit) o).endRow) + && regionLocation.equals(((TableSplit) o).regionLocation); } @Override diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java index 667ca97e3f1b..4376bee85f91 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.List; import java.util.Set; import java.util.TreeSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; @@ -33,7 +32,6 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.security.visibility.InvalidLabelException; import org.apache.hadoop.hbase.util.Bytes; @@ -41,6 +39,7 @@ import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Emits Sorted KeyValues. Parse the passed text and creates KeyValues. Sorts them before emit. @@ -49,8 +48,8 @@ * @see PutSortReducer */ @InterfaceAudience.Public -public class TextSortReducer extends - Reducer { +public class TextSortReducer + extends Reducer { /** Timestamp for all inserted rows */ private long ts; @@ -90,11 +89,10 @@ public void incrementBadLineCount(int count) { } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subsclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subsclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. * @param context */ @Override @@ -132,16 +130,11 @@ protected void doSetup(Context context, Configuration conf) { } @Override - protected void reduce( - ImmutableBytesWritable rowKey, - java.lang.Iterable lines, - Reducer.Context context) - throws java.io.IOException, InterruptedException - { + protected void reduce(ImmutableBytesWritable rowKey, java.lang.Iterable lines, + Reducer.Context context) + throws java.io.IOException, InterruptedException { // although reduce() is called per-row, handle pathological case - long threshold = context.getConfiguration().getLong( - "reducer.row.threshold", 1L * (1<<30)); + long threshold = context.getConfiguration().getLong("reducer.row.threshold", 1L * (1 << 30)); Iterator iter = lines.iterator(); while (iter.hasNext()) { Set kvs = new TreeSet<>(CellComparator.getInstance()); @@ -160,8 +153,8 @@ protected void reduce( // create tags for the parsed line List tags = new ArrayList<>(); if (cellVisibilityExpr != null) { - tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags( - cellVisibilityExpr)); + tags.addAll(kvCreator.getVisibilityExpressionResolver() + .createVisibilityExpTags(cellVisibilityExpr)); } // Add TTL directly to the KV so we can vary them when packing more than one KV // into puts @@ -170,16 +163,17 @@ protected void reduce( } for (int i = 0; i < parsed.getColumnCount(); i++) { if (i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() - || i == parser.getAttributesKeyColumnIndex() || i == parser.getCellVisibilityColumnIndex() + || i == parser.getAttributesKeyColumnIndex() + || i == parser.getCellVisibilityColumnIndex() || i == parser.getCellTTLColumnIndex()) { continue; } // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. Cell cell = this.kvCreator.create(lineBytes, parsed.getRowKeyOffset(), - parsed.getRowKeyLength(), parser.getFamily(i), 0, parser.getFamily(i).length, - parser.getQualifier(i), 0, parser.getQualifier(i).length, ts, lineBytes, - parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); + parsed.getRowKeyLength(), parser.getFamily(i), 0, parser.getFamily(i).length, + parser.getQualifier(i), 0, parser.getQualifier(i).length, ts, lineBytes, + parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); KeyValue kv = KeyValueUtil.ensureKeyValue(cell); kvs.add(kv); curSize += kv.heapSize(); @@ -194,13 +188,12 @@ protected void reduce( throw new IOException(badLine); } } - context.setStatus("Read " + kvs.size() + " entries of " + kvs.getClass() - + "(" + StringUtils.humanReadableInt(curSize) + ")"); + context.setStatus("Read " + kvs.size() + " entries of " + kvs.getClass() + "(" + + StringUtils.humanReadableInt(curSize) + ")"); int index = 0; for (KeyValue kv : kvs) { context.write(rowKey, kv); - if (++index > 0 && index % 100 == 0) - context.setStatus("Wrote " + index + " key values."); + if (++index > 0 && index % 100 == 0) context.setStatus("Wrote " + index + " key values."); } // if we have more entries to process diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java index 5d406195d40b..f49d5a143567 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,14 +21,12 @@ import java.util.ArrayList; import java.util.Base64; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.BadTsvLineException; @@ -39,6 +37,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Mapper; +import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,8 +45,7 @@ * Write table content out to files in hdfs. */ @InterfaceAudience.Public -public class TsvImporterMapper - extends Mapper { +public class TsvImporterMapper extends Mapper { private static final Logger LOG = LoggerFactory.getLogger(TsvImporterMapper.class); /** Timestamp for all inserted rows */ @@ -58,7 +56,7 @@ public class TsvImporterMapper /** Should skip bad lines */ private boolean skipBadLines; - /** Should skip empty columns*/ + /** Should skip empty columns */ private boolean skipEmptyColumns; private Counter badLineCount; private boolean logBadLines; @@ -95,11 +93,10 @@ public void incrementBadLineCount(int count) { } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subsclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subsclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. * @param context */ @Override @@ -107,8 +104,7 @@ protected void setup(Context context) { doSetup(context); conf = context.getConfiguration(); - parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), - separator); + parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), separator); if (parser.getRowKeyColumnIndex() == -1) { throw new RuntimeException("No row key column specified"); } @@ -135,10 +131,8 @@ protected void doSetup(Context context) { // configuration. ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0); - skipEmptyColumns = context.getConfiguration().getBoolean( - ImportTsv.SKIP_EMPTY_COLUMNS, false); - skipBadLines = context.getConfiguration().getBoolean( - ImportTsv.SKIP_LINES_CONF_KEY, true); + skipEmptyColumns = context.getConfiguration().getBoolean(ImportTsv.SKIP_EMPTY_COLUMNS, false); + skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true); badLineCount = context.getCounter("ImportTsv", "Bad Lines"); logBadLines = context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, false); hfileOutPath = conf.get(ImportTsv.BULK_OUTPUT_CONF_KEY); @@ -148,18 +142,13 @@ protected void doSetup(Context context) { * Convert a line of TSV text into an HBase table row. */ @Override - public void map(LongWritable offset, Text value, - Context context) - throws IOException { + public void map(LongWritable offset, Text value, Context context) throws IOException { byte[] lineBytes = value.getBytes(); try { - ImportTsv.TsvParser.ParsedLine parsed = parser.parse( - lineBytes, value.getLength()); + ImportTsv.TsvParser.ParsedLine parsed = parser.parse(lineBytes, value.getLength()); ImmutableBytesWritable rowKey = - new ImmutableBytesWritable(lineBytes, - parsed.getRowKeyOffset(), - parsed.getRowKeyLength()); + new ImmutableBytesWritable(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength()); // Retrieve timestamp if exists ts = parsed.getTimestamp(ts); cellVisibilityExpr = parsed.getCellVisibility(); @@ -169,8 +158,8 @@ public void map(LongWritable offset, Text value, if (hfileOutPath != null) { tags.clear(); if (cellVisibilityExpr != null) { - tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags( - cellVisibilityExpr)); + tags.addAll(kvCreator.getVisibilityExpressionResolver() + .createVisibilityExpTags(cellVisibilityExpr)); } // Add TTL directly to the KV so we can vary them when packing more than one KV // into puts @@ -181,9 +170,9 @@ public void map(LongWritable offset, Text value, Put put = new Put(rowKey.copyBytes()); for (int i = 0; i < parsed.getColumnCount(); i++) { if (i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() - || i == parser.getAttributesKeyColumnIndex() || i == parser.getCellVisibilityColumnIndex() - || i == parser.getCellTTLColumnIndex() || (skipEmptyColumns - && parsed.getColumnLength(i) == 0)) { + || i == parser.getAttributesKeyColumnIndex() + || i == parser.getCellVisibilityColumnIndex() || i == parser.getCellTTLColumnIndex() + || (skipEmptyColumns && parsed.getColumnLength(i) == 0)) { continue; } populatePut(lineBytes, parsed, put, i); @@ -226,9 +215,9 @@ protected void populatePut(byte[] lineBytes, ImportTsv.TsvParser.ParsedLine pars // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. cell = this.kvCreator.create(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength(), - parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, - parser.getQualifier(i).length, ts, lineBytes, parsed.getColumnOffset(i), - parsed.getColumnLength(i), tags); + parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, + parser.getQualifier(i).length, ts, lineBytes, parsed.getColumnOffset(i), + parsed.getColumnLength(i), tags); } put.add(cell); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java index 0127f26955c3..600418e04aae 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,15 +19,14 @@ import java.io.IOException; import java.util.Base64; - -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.mapreduce.Mapper; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -62,11 +61,10 @@ public void incrementBadLineCount(int count) { } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. * @param context */ @Override @@ -108,11 +106,12 @@ protected void doSetup(Context context) { @Override public void map(LongWritable offset, Text value, Context context) throws IOException { try { - Pair rowKeyOffests = parser.parseRowKey(value.getBytes(), value.getLength()); - ImmutableBytesWritable rowKey = new ImmutableBytesWritable( - value.getBytes(), rowKeyOffests.getFirst(), rowKeyOffests.getSecond()); + Pair rowKeyOffests = + parser.parseRowKey(value.getBytes(), value.getLength()); + ImmutableBytesWritable rowKey = new ImmutableBytesWritable(value.getBytes(), + rowKeyOffests.getFirst(), rowKeyOffests.getSecond()); context.write(rowKey, value); - } catch (ImportTsv.TsvParser.BadTsvLineException|IllegalArgumentException badLine) { + } catch (ImportTsv.TsvParser.BadTsvLineException | IllegalArgumentException badLine) { if (logBadLines) { System.err.println(value); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java index f0f4c82a5ad8..b42c0d9116d2 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +19,9 @@ import java.io.IOException; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.Tag; +import org.apache.yetus.audience.InterfaceAudience; /** * Interface to convert visibility expressions into Tags for storing along with Cells in HFiles. diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 30d112fd1c0c..0f155346a732 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -62,8 +62,7 @@ public class WALInputFormat extends InputFormat { public static final String END_TIME_KEY = "wal.end.time"; /** - * {@link InputSplit} for {@link WAL} files. Each split represent - * exactly one log file. + * {@link InputSplit} for {@link WAL} files. Each split represent exactly one log file. */ static class WALSplit extends InputSplit implements Writable { private String logFileName; @@ -72,12 +71,12 @@ static class WALSplit extends InputSplit implements Writable { private long endTime; /** for serialization */ - public WALSplit() {} + public WALSplit() { + } /** - * Represent an WALSplit, i.e. a single WAL file. - * Start- and EndTime are managed by the split, so that WAL files can be - * filtered before WALEdits are passed to the mapper(s). + * Represent an WALSplit, i.e. a single WAL file. Start- and EndTime are managed by the split, + * so that WAL files can be filtered before WALEdits are passed to the mapper(s). */ public WALSplit(String logFileName, long fileSize, long startTime, long endTime) { this.logFileName = logFileName; @@ -132,8 +131,8 @@ public String toString() { } /** - * {@link RecordReader} for an {@link WAL} file. - * Implementation shared with deprecated HLogInputFormat. + * {@link RecordReader} for an {@link WAL} file. Implementation shared with deprecated + * HLogInputFormat. */ static abstract class WALRecordReader extends RecordReader { private Reader reader = null; @@ -148,7 +147,7 @@ static abstract class WALRecordReader extends RecordReader { @Override @@ -262,8 +261,7 @@ public WALKey getCurrentKey() throws IOException, InterruptedException { } @Override - public List getSplits(JobContext context) throws IOException, - InterruptedException { + public List getSplits(JobContext context) throws IOException, InterruptedException { return getSplits(context, START_TIME_KEY, END_TIME_KEY); } @@ -281,14 +279,14 @@ List getSplits(final JobContext context, final String startKey, fina long endTime = conf.getLong(endKey, Long.MAX_VALUE); List allFiles = new ArrayList(); - for(Path inputPath: inputPaths){ + for (Path inputPath : inputPaths) { FileSystem fs = inputPath.getFileSystem(conf); try { List files = getFiles(fs, inputPath, startTime, endTime); allFiles.addAll(files); } catch (FileNotFoundException e) { if (ignoreMissing) { - LOG.warn("File "+ inputPath +" is missing. Skipping it."); + LOG.warn("File " + inputPath + " is missing. Skipping it."); continue; } throw e; @@ -303,17 +301,17 @@ List getSplits(final JobContext context, final String startKey, fina private Path[] getInputPaths(Configuration conf) { String inpDirs = conf.get(FileInputFormat.INPUT_DIR); - return StringUtils.stringToPath( - inpDirs.split(conf.get(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ","))); + return StringUtils + .stringToPath(inpDirs.split(conf.get(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ","))); } /** - * @param startTime If file looks like it has a timestamp in its name, we'll check if newer - * or equal to this value else we will filter out the file. If name does not - * seem to have a timestamp, we will just return it w/o filtering. + * @param startTime If file looks like it has a timestamp in its name, we'll check if newer or + * equal to this value else we will filter out the file. If name does not seem to have a + * timestamp, we will just return it w/o filtering. * @param endTime If file looks like it has a timestamp in its name, we'll check if older or equal - * to this value else we will filter out the file. If name does not seem to - * have a timestamp, we will just return it w/o filtering. + * to this value else we will filter out the file. If name does not seem to have a + * timestamp, we will just return it w/o filtering. */ private List getFiles(FileSystem fs, Path dir, long startTime, long endTime) throws IOException { @@ -347,8 +345,8 @@ static void addFile(List result, LocatedFileStatus lfs, long startTi LOG.info("Found {}", lfs.getPath()); result.add(lfs); } else { - LOG.info("Skipped {}, outside range [{}/{} - {}/{}]", lfs.getPath(), - startTime, Instant.ofEpochMilli(startTime), endTime, Instant.ofEpochMilli(endTime)); + LOG.info("Skipped {}, outside range [{}/{} - {}/{}]", lfs.getPath(), startTime, + Instant.ofEpochMilli(startTime), endTime, Instant.ofEpochMilli(endTime)); } } else { // If no timestamp, add it regardless. diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index e3c4d7a328f6..1ddb8ebd5a1b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -58,17 +58,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - - /** - * A tool to replay WAL files as a M/R job. - * The WAL can be replayed for a set of tables or all tables, - * and a time range can be provided (in milliseconds). - * The WAL is filtered to the passed set of tables and the output - * can optionally be mapped to another set of tables. - * - * WAL replay can also generate HFiles for later bulk importing, - * in that case the WAL is replayed for a single table only. + * A tool to replay WAL files as a M/R job. The WAL can be replayed for a set of tables or all + * tables, and a time range can be provided (in milliseconds). The WAL is filtered to the passed set + * of tables and the output can optionally be mapped to another set of tables. WAL replay can also + * generate HFiles for later bulk importing, in that case the WAL is replayed for a single table + * only. */ @InterfaceAudience.Public public class WALPlayer extends Configured implements Tool { @@ -134,22 +129,18 @@ public void setup(Context context) throws IOException { } /** - * Enum for map metrics. Keep it out here rather than inside in the Map - * inner-class so we can find associated properties. + * Enum for map metrics. Keep it out here rather than inside in the Map inner-class so we can find + * associated properties. */ protected static enum Counter { /** Number of aggregated writes */ PUTS, /** Number of aggregated deletes */ - DELETES, - CELLS_READ, - CELLS_WRITTEN, - WALEDITS + DELETES, CELLS_READ, CELLS_WRITTEN, WALEDITS } /** - * A mapper that writes out {@link Mutation} to be directly applied to - * a running HBase instance. + * A mapper that writes out {@link Mutation} to be directly applied to a running HBase instance. */ protected static class WALMapper extends Mapper { @@ -287,7 +278,7 @@ public Job createSubmittableJob(String[] args) throws IOException { setupTime(conf, WALInputFormat.START_TIME_KEY); setupTime(conf, WALInputFormat.END_TIME_KEY); String inputDirs = args[0]; - String[] tables = args.length == 1? new String [] {}: args[1].split(","); + String[] tables = args.length == 1 ? new String[] {} : args[1].split(","); String[] tableMap; if (args.length > 2) { tableMap = args[2].split(","); @@ -301,8 +292,8 @@ public Job createSubmittableJob(String[] args) throws IOException { conf.setStrings(TABLES_KEY, tables); conf.setStrings(TABLE_MAP_KEY, tableMap); conf.set(FileInputFormat.INPUT_DIR, inputDirs); - Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + - EnvironmentEdgeManager.currentTime())); + Job job = Job.getInstance(conf, + conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime())); job.setJarByClass(WALPlayer.class); job.setInputFormatClass(WALInputFormat.class); @@ -370,12 +361,12 @@ private void usage(final String errorMsg) { System.err.println(" directory of WALs to replay."); System.err.println(" comma separated list of tables. If no tables specified,"); System.err.println(" all are imported (even hbase:meta if present)."); - System.err.println(" WAL entries can be mapped to a new set of tables by " + - "passing"); - System.err.println(" , a comma separated list of target " + - "tables."); - System.err.println(" If specified, each table in must have a " + - "mapping."); + System.err.println( + " WAL entries can be mapped to a new set of tables by " + "passing"); + System.err.println( + " , a comma separated list of target " + "tables."); + System.err.println( + " If specified, each table in must have a " + "mapping."); System.err.println("To generate HFiles to bulk load instead of loading HBase directly, pass:"); System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); System.err.println(" Only one table can be specified, and no mapping allowed!"); @@ -383,8 +374,8 @@ private void usage(final String errorMsg) { System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); System.err.println(" The start and the end date of timerange (inclusive). The dates can be"); - System.err.println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + - "format."); + System.err + .println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + "format."); System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12"); System.err.println("Other options:"); System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); @@ -392,8 +383,7 @@ private void usage(final String errorMsg) { System.err.println(" -Dwal.input.separator=' '"); System.err.println(" Change WAL filename separator (WAL dir names use default ','.)"); System.err.println("For performance also consider the following options:\n" - + " -Dmapreduce.map.speculative=false\n" - + " -Dmapreduce.reduce.speculative=false"); + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"); } /** diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java index 29b63096902a..85b0e1d275fd 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java @@ -1,26 +1,19 @@ /* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** -Provides HBase MapReduce -Input/OutputFormats, a table indexing MapReduce job, and utility methods. - -

See HBase and MapReduce -in the HBase Reference Guide for mapreduce over hbase documentation. -*/ + * Provides HBase + * MapReduce + * Input/OutputFormats, a table indexing MapReduce job, and utility methods. + *

+ * See HBase and MapReduce in the HBase + * Reference Guide for mapreduce over hbase documentation. + */ package org.apache.hadoop.hbase.mapreduce; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 747b56474128..760f8a6c95de 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,20 +69,18 @@ import org.slf4j.LoggerFactory; /** - * This map-only job compares the data from a local table with a remote one. - * Every cell is compared and must have exactly the same keys (even timestamp) - * as well as same value. It is possible to restrict the job by time range and - * families. The peer id that's provided must match the one given when the - * replication stream was setup. + * This map-only job compares the data from a local table with a remote one. Every cell is compared + * and must have exactly the same keys (even timestamp) as well as same value. It is possible to + * restrict the job by time range and families. The peer id that's provided must match the one given + * when the replication stream was setup. *

- * Two counters are provided, Verifier.Counters.GOODROWS and BADROWS. The reason - * for a why a row is different is shown in the map's log. + * Two counters are provided, Verifier.Counters.GOODROWS and BADROWS. The reason for a why a row is + * different is shown in the map's log. */ @InterfaceAudience.Private public class VerifyReplication extends Configured implements Tool { - private static final Logger LOG = - LoggerFactory.getLogger(VerifyReplication.class); + private static final Logger LOG = LoggerFactory.getLogger(VerifyReplication.class); public final static String NAME = "verifyrep"; private final static String PEER_CONFIG_PREFIX = NAME + ".peer."; @@ -100,29 +97,27 @@ public class VerifyReplication extends Configured implements Tool { int sleepMsBeforeReCompare = 0; boolean verbose = false; boolean includeDeletedCells = false; - //Source table snapshot name + // Source table snapshot name String sourceSnapshotName = null; - //Temp location in source cluster to restore source snapshot + // Temp location in source cluster to restore source snapshot String sourceSnapshotTmpDir = null; - //Peer table snapshot name + // Peer table snapshot name String peerSnapshotName = null; - //Temp location in peer cluster to restore peer snapshot + // Temp location in peer cluster to restore peer snapshot String peerSnapshotTmpDir = null; - //Peer cluster Hadoop FS address + // Peer cluster Hadoop FS address String peerFSAddress = null; - //Peer cluster HBase root dir location + // Peer cluster HBase root dir location String peerHBaseRootAddress = null; - //Peer Table Name + // Peer Table Name String peerTableName = null; - private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; /** * Map-only comparator for 2 tables */ - public static class Verifier - extends TableMapper { + public static class Verifier extends TableMapper { public enum Counters { GOODROWS, BADROWS, ONLY_IN_SOURCE_TABLE_ROWS, ONLY_IN_PEER_TABLE_ROWS, CONTENT_DIFFERENT_ROWS @@ -140,22 +135,20 @@ public enum Counters { private int batch = -1; /** - * Map method that compares every scanned row with the equivalent from - * a distant cluster. - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * Map method that compares every scanned row with the equivalent from a distant cluster. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, final Result value, - Context context) + public void map(ImmutableBytesWritable row, final Result value, Context context) throws IOException { if (replicatedScanner == null) { Configuration conf = context.getConfiguration(); - sleepMsBeforeReCompare = conf.getInt(NAME +".sleepMsBeforeReCompare", 0); + sleepMsBeforeReCompare = conf.getInt(NAME + ".sleepMsBeforeReCompare", 0); delimiter = conf.get(NAME + ".delimiter", ""); - verbose = conf.getBoolean(NAME +".verbose", false); + verbose = conf.getBoolean(NAME + ".verbose", false); batch = conf.getInt(NAME + ".batch", -1); final Scan scan = new Scan(); if (batch > 0) { @@ -166,9 +159,9 @@ public void map(ImmutableBytesWritable row, final Result value, long startTime = conf.getLong(NAME + ".startTime", 0); long endTime = conf.getLong(NAME + ".endTime", Long.MAX_VALUE); String families = conf.get(NAME + ".families", null); - if(families != null) { + if (families != null) { String[] fams = families.split(","); - for(String fam : fams) { + for (String fam : fams) { scan.addFamily(Bytes.toBytes(fam)); } } @@ -177,7 +170,7 @@ public void map(ImmutableBytesWritable row, final Result value, String rowPrefixes = conf.get(NAME + ".rowPrefixes", null); setRowPrefixFilter(scan, rowPrefixes); scan.setTimeRange(startTime, endTime); - int versions = conf.getInt(NAME+".versions", -1); + int versions = conf.getInt(NAME + ".versions", -1); LOG.info("Setting number of version inside map as: " + versions); if (versions >= 0) { scan.readVersions(versions); @@ -189,8 +182,8 @@ public void map(ImmutableBytesWritable row, final Result value, final InputSplit tableSplit = context.getInputSplit(); String zkClusterKey = conf.get(NAME + ".peerQuorumAddress"); - Configuration peerConf = HBaseConfiguration.createClusterConf(conf, - zkClusterKey, PEER_CONFIG_PREFIX); + Configuration peerConf = + HBaseConfiguration.createClusterConf(conf, zkClusterKey, PEER_CONFIG_PREFIX); String peerName = peerConf.get(NAME + ".peerTableName", tableName.getNameAsString()); TableName peerTableName = TableName.valueOf(peerName); @@ -201,7 +194,7 @@ public void map(ImmutableBytesWritable row, final Result value, byte[] endRow = null; if (tableSplit instanceof TableSnapshotInputFormat.TableSnapshotRegionSplit) { endRow = ((TableSnapshotInputFormat.TableSnapshotRegionSplit) tableSplit).getRegion() - .getEndKey(); + .getEndKey(); } else { endRow = ((TableSplit) tableSplit).getEndRow(); } @@ -215,12 +208,12 @@ public void map(ImmutableBytesWritable row, final Result value, String peerHBaseRootAddress = conf.get(NAME + ".peerHBaseRootAddress", null); FileSystem.setDefaultUri(peerConf, peerFSAddress); CommonFSUtils.setRootDir(peerConf, new Path(peerHBaseRootAddress)); - LOG.info("Using peer snapshot:" + peerSnapshotName + " with temp dir:" + - peerSnapshotTmpDir + " peer root uri:" + CommonFSUtils.getRootDir(peerConf) + - " peerFSAddress:" + peerFSAddress); + LOG.info("Using peer snapshot:" + peerSnapshotName + " with temp dir:" + + peerSnapshotTmpDir + " peer root uri:" + CommonFSUtils.getRootDir(peerConf) + + " peerFSAddress:" + peerFSAddress); replicatedScanner = new TableSnapshotScanner(peerConf, CommonFSUtils.getRootDir(peerConf), - new Path(peerFSAddress, peerSnapshotTmpDir), peerSnapshotName, scan, true); + new Path(peerFSAddress, peerSnapshotTmpDir), peerSnapshotName, scan, true); } else { replicatedScanner = replicatedTable.getScanner(scan); } @@ -239,8 +232,8 @@ public void map(ImmutableBytesWritable row, final Result value, Result.compareResults(value, currentCompareRowInPeerTable, false); context.getCounter(Counters.GOODROWS).increment(1); if (verbose) { - LOG.info("Good row key: " + delimiter - + Bytes.toStringBinary(value.getRow()) + delimiter); + LOG.info( + "Good row key: " + delimiter + Bytes.toStringBinary(value.getRow()) + delimiter); } } catch (Exception e) { logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value); @@ -270,21 +263,20 @@ private void logFailRowAndIncreaseCounter(Context context, Counters counter, Res if (!sourceResult.isEmpty()) { context.getCounter(Counters.GOODROWS).increment(1); if (verbose) { - LOG.info("Good row key (with recompare): " + delimiter + - Bytes.toStringBinary(row.getRow()) - + delimiter); + LOG.info("Good row key (with recompare): " + delimiter + + Bytes.toStringBinary(row.getRow()) + delimiter); } } return; } catch (Exception e) { - LOG.error("recompare fail after sleep, rowkey=" + delimiter + - Bytes.toStringBinary(row.getRow()) + delimiter); + LOG.error("recompare fail after sleep, rowkey=" + delimiter + + Bytes.toStringBinary(row.getRow()) + delimiter); } } context.getCounter(counter).increment(1); context.getCounter(Counters.BADROWS).increment(1); - LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) + - delimiter); + LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) + + delimiter); } @Override @@ -311,7 +303,7 @@ protected void cleanup(Context context) { LOG.error("fail to close source table in cleanup", e); } } - if(sourceConnection != null){ + if (sourceConnection != null) { try { sourceConnection.close(); } catch (Exception e) { @@ -319,14 +311,14 @@ protected void cleanup(Context context) { } } - if(replicatedTable != null){ - try{ + if (replicatedTable != null) { + try { replicatedTable.close(); } catch (Exception e) { LOG.error("fail to close replicated table in cleanup", e); } } - if(replicatedConnection != null){ + if (replicatedConnection != null) { try { replicatedConnection.close(); } catch (Exception e) { @@ -336,8 +328,8 @@ protected void cleanup(Context context) { } } - private static Pair getPeerQuorumConfig( - final Configuration conf, String peerId) throws IOException { + private static Pair + getPeerQuorumConfig(final Configuration conf, String peerId) throws IOException { ZKWatcher localZKW = null; try { localZKW = new ZKWatcher(conf, "VerifyReplication", new Abortable() { @@ -351,7 +343,7 @@ public boolean isAborted() { } }); ReplicationPeerStorage storage = - ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf); + ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf); ReplicationPeerConfig peerConfig = storage.getPeerConfig(peerId); return Pair.newPair(peerConfig, ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf)); @@ -366,9 +358,9 @@ public boolean isAborted() { } private void restoreSnapshotForPeerCluster(Configuration conf, String peerQuorumAddress) - throws IOException { + throws IOException { Configuration peerConf = - HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); + HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); FileSystem.setDefaultUri(peerConf, peerFSAddress); CommonFSUtils.setRootDir(peerConf, new Path(peerFSAddress, peerHBaseRootAddress)); FileSystem fs = FileSystem.get(peerConf); @@ -378,30 +370,28 @@ private void restoreSnapshotForPeerCluster(Configuration conf, String peerQuorum /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws java.io.IOException When setting up the job fails. */ - public Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public Job createSubmittableJob(Configuration conf, String[] args) throws IOException { if (!doCommandLine(args)) { return null; } - conf.set(NAME+".tableName", tableName); - conf.setLong(NAME+".startTime", startTime); - conf.setLong(NAME+".endTime", endTime); - conf.setInt(NAME +".sleepMsBeforeReCompare", sleepMsBeforeReCompare); + conf.set(NAME + ".tableName", tableName); + conf.setLong(NAME + ".startTime", startTime); + conf.setLong(NAME + ".endTime", endTime); + conf.setInt(NAME + ".sleepMsBeforeReCompare", sleepMsBeforeReCompare); conf.set(NAME + ".delimiter", delimiter); conf.setInt(NAME + ".batch", batch); - conf.setBoolean(NAME +".verbose", verbose); - conf.setBoolean(NAME +".includeDeletedCells", includeDeletedCells); + conf.setBoolean(NAME + ".verbose", verbose); + conf.setBoolean(NAME + ".includeDeletedCells", includeDeletedCells); if (families != null) { - conf.set(NAME+".families", families); + conf.set(NAME + ".families", families); } - if (rowPrefixes != null){ - conf.set(NAME+".rowPrefixes", rowPrefixes); + if (rowPrefixes != null) { + conf.set(NAME + ".rowPrefixes", rowPrefixes); } String peerQuorumAddress; @@ -410,8 +400,8 @@ public Job createSubmittableJob(Configuration conf, String[] args) peerConfigPair = getPeerQuorumConfig(conf, peerId); ReplicationPeerConfig peerConfig = peerConfigPair.getFirst(); peerQuorumAddress = peerConfig.getClusterKey(); - LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " + - peerConfig.getConfiguration()); + LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " + + peerConfig.getConfiguration()); conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress); HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX, peerConfig.getConfiguration().entrySet()); @@ -430,7 +420,7 @@ public Job createSubmittableJob(Configuration conf, String[] args) conf.setInt(NAME + ".versions", versions); LOG.info("Number of version: " + versions); - //Set Snapshot specific parameters + // Set Snapshot specific parameters if (peerSnapshotName != null) { conf.set(NAME + ".peerSnapshotName", peerSnapshotName); @@ -461,9 +451,9 @@ public Job createSubmittableJob(Configuration conf, String[] args) scan.readVersions(versions); LOG.info("Number of versions set to " + versions); } - if(families != null) { + if (families != null) { String[] fams = families.split(","); - for(String fam : fams) { + for (String fam : fams) { scan.addFamily(Bytes.toBytes(fam)); } } @@ -486,8 +476,8 @@ public Job createSubmittableJob(Configuration conf, String[] args) assert peerConfigPair != null; peerClusterConf = peerConfigPair.getSecond(); } else { - peerClusterConf = HBaseConfiguration.createClusterConf(conf, - peerQuorumAddress, PEER_CONFIG_PREFIX); + peerClusterConf = + HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); } // Obtain the auth token from peer cluster TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf); @@ -508,7 +498,7 @@ private static void setRowPrefixFilter(Scan scan, String rowPrefixes) { } scan.setFilter(filterList); byte[] startPrefixRow = Bytes.toBytes(rowPrefixArray[0]); - byte[] lastPrefixRow = Bytes.toBytes(rowPrefixArray[rowPrefixArray.length -1]); + byte[] lastPrefixRow = Bytes.toBytes(rowPrefixArray[rowPrefixArray.length - 1]); setStartAndStopRows(scan, startPrefixRow, lastPrefixRow); } } @@ -516,7 +506,7 @@ private static void setRowPrefixFilter(Scan scan, String rowPrefixes) { private static void setStartAndStopRows(Scan scan, byte[] startPrefixRow, byte[] lastPrefixRow) { scan.withStartRow(startPrefixRow); byte[] stopRow = Bytes.add(Bytes.head(lastPrefixRow, lastPrefixRow.length - 1), - new byte[]{(byte) (lastPrefixRow[lastPrefixRow.length - 1] + 1)}); + new byte[] { (byte) (lastPrefixRow[lastPrefixRow.length - 1] + 1) }); scan.withStopRow(stopRow); } @@ -570,7 +560,7 @@ public boolean doCommandLine(final String[] args) { } final String rowPrefixesKey = "--row-prefixes="; - if (cmd.startsWith(rowPrefixesKey)){ + if (cmd.startsWith(rowPrefixesKey)) { rowPrefixes = cmd.substring(rowPrefixesKey.length()); continue; } @@ -639,7 +629,7 @@ public boolean doCommandLine(final String[] args) { return false; } - if (i == args.length-2) { + if (i == args.length - 2) { if (isPeerQuorumAddress(cmd)) { peerQuorumAddress = cmd; } else { @@ -647,7 +637,7 @@ public boolean doCommandLine(final String[] args) { } } - if (i == args.length-1) { + if (i == args.length - 1) { tableName = cmd; } } @@ -674,7 +664,7 @@ public boolean doCommandLine(final String[] args) { if ((sourceSnapshotName != null || peerSnapshotName != null) && sleepMsBeforeReCompare > 0) { printUsage( "Using sleepMsBeforeReCompare along with snapshots is not allowed as snapshots are" - + " immutable"); + + " immutable"); return false; } @@ -697,7 +687,7 @@ private boolean isPeerQuorumAddress(String cmd) { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { @@ -715,13 +705,13 @@ private static void printUsage(final String errorMsg) { System.err.println(" endtime end of the time range"); System.err.println(" versions number of cell versions to verify"); System.err.println(" batch batch count for scan, note that" - + " result row counts will no longer be actual number of rows when you use this option"); + + " result row counts will no longer be actual number of rows when you use this option"); System.err.println(" raw includes raw scan if given in options"); System.err.println(" families comma-separated list of families to copy"); System.err.println(" row-prefixes comma-separated list of row key prefixes to filter on "); System.err.println(" delimiter the delimiter used in display around rowkey"); - System.err.println(" recomparesleep milliseconds to sleep before recompare row, " + - "default value is 0 which disables the recompare."); + System.err.println(" recomparesleep milliseconds to sleep before recompare row, " + + "default value is 0 which disables the recompare."); System.err.println(" verbose logs row keys of good rows"); System.err.println(" peerTableName Peer Table Name"); System.err.println(" sourceSnapshotName Source Snapshot Name"); @@ -733,63 +723,59 @@ private static void printUsage(final String errorMsg) { System.err.println(); System.err.println("Args:"); System.err.println(" peerid Id of the peer used for verification," - + " must match the one given for replication"); + + " must match the one given for replication"); System.err.println(" peerQuorumAddress quorumAdress of the peer used for verification. The " - + "format is zk_quorum:zk_port:zk_hbase_path"); + + "format is zk_quorum:zk_port:zk_hbase_path"); System.err.println(" tablename Name of the table to verify"); System.err.println(); System.err.println("Examples:"); - System.err.println( - " To verify the data replicated from TestTable for a 1 hour window with peer #5 "); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" + - " --starttime=1265875194289 --endtime=1265878794289 5 TestTable "); + System.err + .println(" To verify the data replicated from TestTable for a 1 hour window with peer #5 "); + System.err + .println(" $ hbase " + "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" + + " --starttime=1265875194289 --endtime=1265878794289 5 TestTable "); System.err.println(); System.err.println( " To verify the data in TestTable between the cluster runs VerifyReplication and cluster-b"); System.err.println(" Assume quorum address for cluster-b is" - + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:2181:/cluster-b"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:2181:/cluster-b"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); System.err.println( " To verify the data in TestTable between the secured cluster runs VerifyReplication" - + " and insecure cluster-b"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.security.authentication=simple \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + + " and insecure cluster-b"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.security.authentication=simple \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); - System.err.println(" To verify the data in TestTable between" + - " the secured cluster runs VerifyReplication and secured cluster-b"); - System.err.println(" Assume cluster-b uses different kerberos principal, cluster-b/_HOST@E" + - ", for master and regionserver kerberos principal from another cluster"); + System.err.println(" To verify the data in TestTable between" + + " the secured cluster runs VerifyReplication and secured cluster-b"); + System.err.println(" Assume cluster-b uses different kerberos principal, cluster-b/_HOST@E" + + ", for master and regionserver kerberos principal from another cluster"); System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" - + "cluster-b/_HOST@EXAMPLE.COM \\\n" + - " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" + + "cluster-b/_HOST@EXAMPLE.COM \\\n" + + " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); System.err.println( " To verify the data in TestTable between the insecure cluster runs VerifyReplication" - + " and secured cluster-b"); + + " and secured cluster-b"); System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.security.authentication=kerberos \\\n" + - " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" - + "cluster-b/_HOST@EXAMPLE.COM \\\n" + - " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.security.authentication=kerberos \\\n" + + " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" + + "cluster-b/_HOST@EXAMPLE.COM \\\n" + + " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); } @Override @@ -804,8 +790,7 @@ public int run(String[] args) throws Exception { /** * Main entry point. - * - * @param args The command line parameters. + * @param args The command line parameters. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java index c74c6ed5eaba..fb63bc6d7075 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,13 +62,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** - * Scans a given table + CF for all mob reference cells to get the list of backing mob files. - * For each referenced file we attempt to verify that said file is on the FileSystem in a place - * that the MOB system will look when attempting to resolve the actual value. - * - * The job includes counters that can help provide a rough sketch of the mob data. + * Scans a given table + CF for all mob reference cells to get the list of backing mob files. For + * each referenced file we attempt to verify that said file is on the FileSystem in a place that the + * MOB system will look when attempting to resolve the actual value. The job includes counters that + * can help provide a rough sketch of the mob data. * *

  * Map-Reduce Framework
@@ -95,30 +92,25 @@
  *         Number of rows with total size in the 1,000,000s of bytes=3162
  * 
* - * * Map-Reduce Framework:Map input records - the number of rows with mob references - * * Map-Reduce Framework:Reduce output records - the number of unique hfiles referenced - * * MOB:NUM_CELLS - the total number of mob reference cells - * * PROBLEM:Affected rows - the number of rows that reference hfiles with an issue - * * PROBLEM:Problem MOB files - the number of unique hfiles that have an issue - * * CELLS PER ROW: - this counter group gives a histogram of the order of magnitude of the - * number of cells in a given row by grouping by the number of digits used in each count. - * This allows us to see more about the distribution of cells than what we can determine - * with just the cell count and the row count. In this particular example we can see that - * all of our rows have somewhere between 1 - 9 cells. - * * ROWS WITH PROBLEMS PER FILE: - this counter group gives a histogram of the order of - * magnitude of the number of rows in each of the hfiles with a problem. e.g. in the - * example there are 2 hfiles and they each have the same order of magnitude number of rows, - * specifically between 100 and 999. - * * SIZES OF CELLS: - this counter group gives a histogram of the order of magnitude of - * the size of mob values according to our reference cells. e.g. in the example above we - * have cell sizes that are all between 10,000 bytes and 9,999,999 bytes. From this - * histogram we can also see that _most_ cells are 100,000 - 999,000 bytes and the smaller - * and bigger ones are outliers making up less than 2% of mob cells. - * * SIZES OF ROWS: - this counter group gives a histogram of the order of magnitude of the - * size of mob values across each row according to our reference cells. In the example above - * we have rows that are are between 100,000 bytes and 9,999,999 bytes. We can also see that - * about 2/3rd of our rows are 100,000 - 999,999 bytes. - * + * * Map-Reduce Framework:Map input records - the number of rows with mob references * Map-Reduce + * Framework:Reduce output records - the number of unique hfiles referenced * MOB:NUM_CELLS - the + * total number of mob reference cells * PROBLEM:Affected rows - the number of rows that reference + * hfiles with an issue * PROBLEM:Problem MOB files - the number of unique hfiles that have an issue + * * CELLS PER ROW: - this counter group gives a histogram of the order of magnitude of the number + * of cells in a given row by grouping by the number of digits used in each count. This allows us to + * see more about the distribution of cells than what we can determine with just the cell count and + * the row count. In this particular example we can see that all of our rows have somewhere between + * 1 - 9 cells. * ROWS WITH PROBLEMS PER FILE: - this counter group gives a histogram of the order + * of magnitude of the number of rows in each of the hfiles with a problem. e.g. in the example + * there are 2 hfiles and they each have the same order of magnitude number of rows, specifically + * between 100 and 999. * SIZES OF CELLS: - this counter group gives a histogram of the order of + * magnitude of the size of mob values according to our reference cells. e.g. in the example above + * we have cell sizes that are all between 10,000 bytes and 9,999,999 bytes. From this histogram we + * can also see that _most_ cells are 100,000 - 999,000 bytes and the smaller and bigger ones are + * outliers making up less than 2% of mob cells. * SIZES OF ROWS: - this counter group gives a + * histogram of the order of magnitude of the size of mob values across each row according to our + * reference cells. In the example above we have rows that are are between 100,000 bytes and + * 9,999,999 bytes. We can also see that about 2/3rd of our rows are 100,000 - 999,999 bytes. * Generates a report that gives one file status per line, with tabs dividing fields. * *
@@ -132,33 +124,28 @@
  * MISSING FILE    28e252d7f013973174750d483d358fa020191101f73536e7133f4cd3ab1065edf588d509        MmJiMjMyYzBiMTNjNzc0OTY1ZWY4NTU4ZjBmYmQ2MTUtNTIz,MmEzOGE0YTkzMTZjNDllNWE4MzM1MTdjNDVkMzEwNzAtODg=
  * 
* - * Possible results are listed; the first three indicate things are working properly. - * * MOB DIR - the reference is in the normal MOB area for the given table and CF - * * HLINK TO ARCHIVE FOR SAME TABLE - the reference is present in the archive area for this - * table and CF - * * HLINK TO ARCHIVE FOR OTHER TABLE - the reference is present in a different table and CF, - * either in the MOB or archive areas (e.g. from a snapshot restore or clone) - * * ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE - the reference is currently present in the archive - * area for this table and CF, but it is kept there because a _different_ table has a - * reference to it (e.g. from a snapshot clone). If these other tables are removed then - * the file will likely be deleted unless there is a snapshot also referencing it. - * * ARCHIVE BUT NO HLINKS - the reference is currently present in the archive for this table and - * CF, but there are no references present to prevent its removal. Unless it is newer than - * the general TTL (default 5 minutes) or referenced in a snapshot it will be subject to - * cleaning. - * * ARCHIVE BUT FAILURE WHILE CHECKING HLINKS - Check the job logs to see why things failed while - * looking for why this file is being kept around. - * * MISSING FILE - We couldn't find the reference on the FileSystem. Either there is dataloss due - * to a bug in the MOB storage system or the MOB storage is damaged but in an edge case that - * allows it to work for now. You can verify which by doing a raw reference scan to get the - * referenced hfile and check the underlying filesystem. See the ref guide section on mob - * for details. - * * HLINK BUT POINT TO MISSING FILE - There is a pointer in our mob area for this table and CF - * to a file elsewhere on the FileSystem, however the file it points to no longer exists. - * * MISSING FILE BUT FAILURE WHILE CHECKING HLINKS - We could not find the referenced file, - * however you should check the job logs to see why we couldn't check to see if there is a - * pointer to the referenced file in our archive or another table's archive or mob area. - * + * Possible results are listed; the first three indicate things are working properly. * MOB DIR - + * the reference is in the normal MOB area for the given table and CF * HLINK TO ARCHIVE FOR SAME + * TABLE - the reference is present in the archive area for this table and CF * HLINK TO ARCHIVE FOR + * OTHER TABLE - the reference is present in a different table and CF, either in the MOB or archive + * areas (e.g. from a snapshot restore or clone) * ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE - the + * reference is currently present in the archive area for this table and CF, but it is kept there + * because a _different_ table has a reference to it (e.g. from a snapshot clone). If these other + * tables are removed then the file will likely be deleted unless there is a snapshot also + * referencing it. * ARCHIVE BUT NO HLINKS - the reference is currently present in the archive for + * this table and CF, but there are no references present to prevent its removal. Unless it is newer + * than the general TTL (default 5 minutes) or referenced in a snapshot it will be subject to + * cleaning. * ARCHIVE BUT FAILURE WHILE CHECKING HLINKS - Check the job logs to see why things + * failed while looking for why this file is being kept around. * MISSING FILE - We couldn't find + * the reference on the FileSystem. Either there is dataloss due to a bug in the MOB storage system + * or the MOB storage is damaged but in an edge case that allows it to work for now. You can verify + * which by doing a raw reference scan to get the referenced hfile and check the underlying + * filesystem. See the ref guide section on mob for details. * HLINK BUT POINT TO MISSING FILE - + * There is a pointer in our mob area for this table and CF to a file elsewhere on the FileSystem, + * however the file it points to no longer exists. * MISSING FILE BUT FAILURE WHILE CHECKING HLINKS + * - We could not find the referenced file, however you should check the job logs to see why we + * couldn't check to see if there is a pointer to the referenced file in our archive or another + * table's archive or mob area. */ @InterfaceAudience.Private public class MobRefReporter extends Configured implements Tool { @@ -169,8 +156,8 @@ public class MobRefReporter extends Configured implements Tool { public static class MobRefMapper extends TableMapper { @Override - public void map(ImmutableBytesWritable r, Result columns, Context context) throws IOException, - InterruptedException { + public void map(ImmutableBytesWritable r, Result columns, Context context) + throws IOException, InterruptedException { if (columns == null) { return; } @@ -190,24 +177,29 @@ public void map(ImmutableBytesWritable r, Result columns, Context context) throw files.add(fileName); } final int cellsize = MobUtils.getMobValueLength(c); - context.getCounter("SIZES OF CELLS", "Number of cells with size in the " + - log10GroupedString(cellsize) + "s of bytes").increment(1L); + context + .getCounter("SIZES OF CELLS", + "Number of cells with size in the " + log10GroupedString(cellsize) + "s of bytes") + .increment(1L); size += cellsize; count++; } else { LOG.debug("cell is not a mob ref, even though we asked for only refs. cell={}", c); } } - context.getCounter("CELLS PER ROW", "Number of rows with " + log10GroupedString(count) + - "s of cells per row").increment(1L); - context.getCounter("SIZES OF ROWS", "Number of rows with total size in the " + - log10GroupedString(size) + "s of bytes").increment(1L); - context.getCounter("MOB","NUM_CELLS").increment(count); + context + .getCounter("CELLS PER ROW", + "Number of rows with " + log10GroupedString(count) + "s of cells per row") + .increment(1L); + context + .getCounter("SIZES OF ROWS", + "Number of rows with total size in the " + log10GroupedString(size) + "s of bytes") + .increment(1L); + context.getCounter("MOB", "NUM_CELLS").increment(count); } } - public static class MobRefReducer extends - Reducer { + public static class MobRefReducer extends Reducer { TableName table; String mobRegion; @@ -246,7 +238,7 @@ public void setup(Context context) throws IOException, InterruptedException { mob = MobUtils.getMobFamilyPath(conf, table, family); LOG.info("Using active mob area '{}'", mob); archive = HFileArchiveUtil.getStoreArchivePath(conf, table, - MobUtils.getMobRegionInfo(table).getEncodedName(), family); + MobUtils.getMobRegionInfo(table).getEncodedName(), family); LOG.info("Using archive mob area '{}'", archive); seperator = conf.get(TextOutputFormat.SEPERATOR, "\t"); } @@ -260,7 +252,7 @@ public void reduce(Text key, Iterable rows, Context cont if (mob.getFileSystem(conf).exists(new Path(mob, file))) { LOG.debug("Found file '{}' in mob area", file); context.write(OK_MOB_DIR, key); - // archive area - is there an hlink back reference (from a snapshot from same table) + // archive area - is there an hlink back reference (from a snapshot from same table) } else if (archive.getFileSystem(conf).exists(new Path(archive, file))) { Path backRefDir = HFileLink.getBackReferencesDir(archive, file); @@ -269,37 +261,41 @@ public void reduce(Text key, Iterable rows, Context cont if (backRefs != null) { boolean found = false; for (FileStatus backRef : backRefs) { - Pair refParts = HFileLink.parseBackReferenceName( - backRef.getPath().getName()); + Pair refParts = + HFileLink.parseBackReferenceName(backRef.getPath().getName()); if (table.equals(refParts.getFirst()) && mobRegion.equals(refParts.getSecond())) { Path hlinkPath = HFileLink.getHFileFromBackReference(MobUtils.getMobHome(conf), - backRef.getPath()); + backRef.getPath()); if (hlinkPath.getFileSystem(conf).exists(hlinkPath)) { found = true; } else { LOG.warn("Found file '{}' in archive area with a back reference to the mob area " + "for our table, but the mob area does not have a corresponding hfilelink.", - file); + file); } } } if (found) { LOG.debug("Found file '{}' in archive area. has proper hlink back references to " - + "suggest it is from a restored snapshot for this table.", file); + + "suggest it is from a restored snapshot for this table.", + file); context.write(OK_HLINK_RESTORE, key); } else { LOG.warn("Found file '{}' in archive area, but the hlink back references do not " - + "properly point to the mob area for our table.", file); + + "properly point to the mob area for our table.", + file); context.write(INCONSISTENT_ARCHIVE_BAD_LINK, encodeRows(context, key, rows)); } } else { LOG.warn("Found file '{}' in archive area, but there are no hlinks pointing to it. Not " - + "yet used snapshot or an error.", file); + + "yet used snapshot or an error.", + file); context.write(INCONSISTENT_ARCHIVE_STALE, encodeRows(context, key, rows)); } } catch (IOException e) { LOG.warn("Found file '{}' in archive area, but got an error while checking " - + "on back references.", file, e); + + "on back references.", + file, e); context.write(INCONSISTENT_ARCHIVE_IOE, encodeRows(context, key, rows)); } @@ -307,19 +303,19 @@ public void reduce(Text key, Iterable rows, Context cont // check for an hlink in the active mob area (from a snapshot of a different table) try { /** - * we are doing this ourselves instead of using FSUtils.getReferenceFilePaths because - * we know the mob region never splits, so we can only have HFileLink references - * and looking for just them is cheaper then listing everything. - * - * This glob should match the naming convention for HFileLinks to our referenced hfile. - * As simplified explanation those file names look like "table=region-hfile". For details - * see the {@link HFileLink#createHFileLinkName HFileLink implementation}. + * we are doing this ourselves instead of using FSUtils.getReferenceFilePaths because we + * know the mob region never splits, so we can only have HFileLink references and looking + * for just them is cheaper then listing everything. This glob should match the naming + * convention for HFileLinks to our referenced hfile. As simplified explanation those file + * names look like "table=region-hfile". For details see the + * {@link HFileLink#createHFileLinkName HFileLink implementation}. */ FileStatus[] hlinks = mob.getFileSystem(conf).globStatus(new Path(mob + "/*=*-" + file)); if (hlinks != null && hlinks.length != 0) { if (hlinks.length != 1) { - LOG.warn("Found file '{}' as hfilelinks in the mob area, but there are more than " + - "one: {}", file, Arrays.deepToString(hlinks)); + LOG.warn("Found file '{}' as hfilelinks in the mob area, but there are more than " + + "one: {}", + file, Arrays.deepToString(hlinks)); } HFileLink found = null; for (FileStatus hlink : hlinks) { @@ -336,7 +332,8 @@ public void reduce(Text key, Iterable rows, Context cont context.write(OK_HLINK_CLONE, key); } else { LOG.warn("Found file '{}' as ref(s) in the mob area but they do not point to an hfile" - + " that exists.", file); + + " that exists.", + file); context.write(DATALOSS_HLINK_DANGLING, encodeRows(context, key, rows)); } } else { @@ -352,8 +349,8 @@ public void reduce(Text key, Iterable rows, Context cont } } catch (IOException e) { LOG.error( - "Exception while checking mob area of our table for HFileLinks that point to {}", - file, e); + "Exception while checking mob area of our table for HFileLinks that point to {}", file, + e); context.write(DATALOSS_MISSING_IOE, encodeRows(context, key, rows)); } } @@ -379,25 +376,27 @@ private Text encodeRows(Context context, Text key, Iterable 1, 10-99 -> 10, ..., 100,000-999,999 -> 100,000, etc. + * Returns the string representation of the given number after grouping it into log10 buckets. + * e.g. 0-9 -> 1, 10-99 -> 10, ..., 100,000-999,999 -> 100,000, etc. */ static String log10GroupedString(long number) { - return String.format("%,d", (long)(Math.pow(10d, Math.floor(Math.log10(number))))); + return String.format("%,d", (long) (Math.pow(10d, Math.floor(Math.log10(number))))); } /** * Main method for the tool. - * @return 0 if success, 1 for bad args. 2 if job aborted with an exception, - * 3 if mr job was unsuccessful + * @return 0 if success, 1 for bad args. 2 if job aborted with an exception, 3 if mr job was + * unsuccessful */ public int run(String[] args) throws IOException, InterruptedException { // TODO make family and table optional @@ -427,7 +426,7 @@ public int run(String[] args) throws IOException, InterruptedException { } } else { LOG.error("The passed configs point to an HBase dir does not exist: {}", - conf.get(HConstants.HBASE_DIR)); + conf.get(HConstants.HBASE_DIR)); throw new IOException("The target HBase does not exist"); } @@ -435,7 +434,7 @@ public int run(String[] args) throws IOException, InterruptedException { int maxVersions; TableName tn = TableName.valueOf(tableName); try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { TableDescriptor htd = admin.getDescriptor(tn); ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(familyName)); if (hcd == null || !hcd.isMobEnabled()) { @@ -445,7 +444,6 @@ public int run(String[] args) throws IOException, InterruptedException { maxVersions = hcd.getMaxVersions(); } - String id = getClass().getSimpleName() + UUID.randomUUID().toString().replace("-", ""); Job job = null; Scan scan = new Scan(); @@ -462,8 +460,8 @@ public int run(String[] args) throws IOException, InterruptedException { job = Job.getInstance(conf); job.setJarByClass(getClass()); - TableMapReduceUtil.initTableMapperJob(tn, scan, - MobRefMapper.class, Text.class, ImmutableBytesWritable.class, job); + TableMapReduceUtil.initTableMapperJob(tn, scan, MobRefMapper.class, Text.class, + ImmutableBytesWritable.class, job); job.setReducerClass(MobRefReducer.class); job.setOutputFormatClass(TextOutputFormat.class); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index fd09e34fde16..837b682a4c3b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,12 +66,9 @@ import org.slf4j.LoggerFactory; /* - * The CompactionTool allows to execute a compaction specifying a: - *
    - *
  • table folder (all regions and families will be compacted) - *
  • region folder (all families in the region will be compacted) - *
  • family folder (the store files will be compacted) - *
+ * The CompactionTool allows to execute a compaction specifying a:
  • table folder (all + * regions and families will be compacted)
  • region folder (all families in the region will be + * compacted)
  • family folder (the store files will be compacted)
*/ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class CompactionTool extends Configured implements Tool { @@ -82,8 +79,8 @@ public class CompactionTool extends Configured implements Tool { private final static String CONF_DELETE_COMPACTED = "hbase.compactiontool.delete"; /** - * Class responsible to execute the Compaction on the specified path. - * The path can be a table, region or family directory. + * Class responsible to execute the Compaction on the specified path. The path can be a table, + * region or family directory. */ private static class CompactionWorker { private final boolean deleteCompacted; @@ -98,7 +95,6 @@ public CompactionWorker(final FileSystem fs, final Configuration conf) { /** * Execute the compaction on the specified path. - * * @param path Directory path on which to run compaction. * @param compactOnce Execute just a single step of compaction. * @param major Request major compaction. @@ -110,8 +106,7 @@ public void compact(final Path path, final boolean compactOnce, final boolean ma Path tableDir = regionDir.getParent(); TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - compactStoreFiles(tableDir, htd, hri, - path.getName(), compactOnce, major); + compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major); } else if (isRegionDir(fs, path)) { Path tableDir = path.getParent(); TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); @@ -120,39 +115,36 @@ public void compact(final Path path, final boolean compactOnce, final boolean ma compactTable(path, compactOnce, major); } else { throw new IOException( - "Specified path is not a table, region or family directory. path=" + path); + "Specified path is not a table, region or family directory. path=" + path); } } private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major) throws IOException { TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); - for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { + for (Path regionDir : FSUtils.getRegionDirs(fs, tableDir)) { compactRegion(tableDir, htd, regionDir, compactOnce, major); } } - private void compactRegion(final Path tableDir, final TableDescriptor htd, - final Path regionDir, final boolean compactOnce, final boolean major) - throws IOException { + private void compactRegion(final Path tableDir, final TableDescriptor htd, final Path regionDir, + final boolean compactOnce, final boolean major) throws IOException { RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { + for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) { compactStoreFiles(tableDir, htd, hri, familyDir.getName(), compactOnce, major); } } /** - * Execute the actual compaction job. - * If the compact once flag is not specified, execute the compaction until - * no more compactions are needed. Uses the Configuration settings provided. + * Execute the actual compaction job. If the compact once flag is not specified, execute the + * compaction until no more compactions are needed. Uses the Configuration settings provided. */ private void compactStoreFiles(final Path tableDir, final TableDescriptor htd, final RegionInfo hri, final String familyName, final boolean compactOnce, final boolean major) throws IOException { HStore store = getStore(conf, fs, tableDir, htd, hri, familyName); - LOG.info("Compact table=" + htd.getTableName() + - " region=" + hri.getRegionNameAsString() + - " family=" + familyName); + LOG.info("Compact table=" + htd.getTableName() + " region=" + hri.getRegionNameAsString() + + " family=" + familyName); if (major) { store.triggerMajorCompaction(); } @@ -166,13 +158,13 @@ private void compactStoreFiles(final Path tableDir, final TableDescriptor htd, store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null); if (storeFiles != null && !storeFiles.isEmpty()) { if (deleteCompacted) { - for (HStoreFile storeFile: storeFiles) { + for (HStoreFile storeFile : storeFiles) { fs.delete(storeFile.getPath(), false); } } } } while (store.needsCompaction() && !compactOnce); - //We need to close the store properly, to make sure it will archive compacted files + // We need to close the store properly, to make sure it will archive compacted files store.close(); } @@ -236,8 +228,8 @@ protected boolean isSplitable(JobContext context, Path file) { } /** - * Returns a split for each store files directory using the block location - * of each file as locality reference. + * Returns a split for each store files directory using the block location of each file as + * locality reference. */ @Override public List getSplits(JobContext job) throws IOException { @@ -245,7 +237,7 @@ public List getSplits(JobContext job) throws IOException { List files = listStatus(job); Text key = new Text(); - for (FileStatus file: files) { + for (FileStatus file : files) { Path path = file.getPath(); FileSystem fs = path.getFileSystem(job.getConfiguration()); LineReader reader = new LineReader(fs.open(path)); @@ -276,9 +268,9 @@ private static String[] getStoreDirHosts(final FileSystem fs, final Path path) } HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); - for (FileStatus hfileStatus: files) { + for (FileStatus hfileStatus : files) { HDFSBlocksDistribution storeFileBlocksDistribution = - FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen()); + FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen()); hdfsBlocksDistribution.add(storeFileBlocksDistribution); } @@ -287,27 +279,26 @@ private static String[] getStoreDirHosts(final FileSystem fs, final Path path) } /** - * Create the input file for the given directories to compact. - * The file is a TextFile with each line corrisponding to a - * store files directory to compact. + * Create the input file for the given directories to compact. The file is a TextFile with each + * line corrisponding to a store files directory to compact. */ public static List createInputFile(final FileSystem fs, final FileSystem stagingFs, final Path path, final Set toCompactDirs) throws IOException { // Extract the list of store dirs List storeDirs = new LinkedList<>(); - for (Path compactDir: toCompactDirs) { + for (Path compactDir : toCompactDirs) { if (isFamilyDir(fs, compactDir)) { storeDirs.add(compactDir); } else if (isRegionDir(fs, compactDir)) { storeDirs.addAll(FSUtils.getFamilyDirs(fs, compactDir)); } else if (isTableDir(fs, compactDir)) { // Lookup regions - for (Path regionDir: FSUtils.getRegionDirs(fs, compactDir)) { + for (Path regionDir : FSUtils.getRegionDirs(fs, compactDir)) { storeDirs.addAll(FSUtils.getFamilyDirs(fs, regionDir)); } } else { throw new IOException( - "Specified path is not a table, region or family directory. path=" + compactDir); + "Specified path is not a table, region or family directory. path=" + compactDir); } } @@ -316,7 +307,7 @@ public static List createInputFile(final FileSystem fs, final FileSystem s LOG.info("Create input file=" + path + " with " + storeDirs.size() + " dirs to compact."); try { final byte[] newLine = Bytes.toBytes("\n"); - for (Path storeDir: storeDirs) { + for (Path storeDir : storeDirs) { stream.write(Bytes.toBytes(storeDir.toString())); stream.write(newLine); } @@ -352,16 +343,16 @@ private int doMapReduce(final FileSystem fs, final Set toCompactDirs, FileSystem stagingFs = stagingDir.getFileSystem(conf); try { // Create input file with the store dirs - Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTime()); - List storeDirs = CompactionInputFormat.createInputFile(fs, stagingFs, - inputPath, toCompactDirs); + Path inputPath = new Path(stagingDir, "compact-" + EnvironmentEdgeManager.currentTime()); + List storeDirs = + CompactionInputFormat.createInputFile(fs, stagingFs, inputPath, toCompactDirs); CompactionInputFormat.addInputPath(job, inputPath); // Initialize credential for secure cluster TableMapReduceUtil.initCredentials(job); // Despite the method name this will get delegation token for the filesystem - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - storeDirs.toArray(new Path[0]), conf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), storeDirs.toArray(new Path[0]), + conf); // Start the MR Job and wait return job.waitForCompletion(true) ? 0 : 1; @@ -376,7 +367,7 @@ private int doMapReduce(final FileSystem fs, final Set toCompactDirs, private int doClient(final FileSystem fs, final Set toCompactDirs, final boolean compactOnce, final boolean major) throws IOException { CompactionWorker worker = new CompactionWorker(fs, getConf()); - for (Path path: toCompactDirs) { + for (Path path : toCompactDirs) { worker.compact(path, compactOnce, major); } return 0; @@ -449,16 +440,17 @@ private void printUsage(final String message) { System.err.println(); System.err.println("Note: -D properties will be applied to the conf used. "); System.err.println("For example: "); - System.err.println(" To stop delete of compacted file, pass -D"+CONF_DELETE_COMPACTED+"=false"); + System.err + .println(" To stop delete of compacted file, pass -D" + CONF_DELETE_COMPACTED + "=false"); System.err.println(); System.err.println("Examples:"); System.err.println(" To compact the full 'TestTable' using MapReduce:"); - System.err.println(" $ hbase " + this.getClass().getName() + - " -mapred hdfs://hbase/data/default/TestTable"); + System.err.println( + " $ hbase " + this.getClass().getName() + " -mapred hdfs://hbase/data/default/TestTable"); System.err.println(); System.err.println(" To compact column family 'x' of the table 'TestTable' region 'abc':"); - System.err.println(" $ hbase " + this.getClass().getName() + - " hdfs://hbase/data/default/TestTable/abc/x"); + System.err.println( + " $ hbase " + this.getClass().getName() + " hdfs://hbase/data/default/TestTable/abc/x"); } public static void main(String[] args) throws Exception { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 54c92c5ab6a0..4217256f061c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.BufferedInputStream; @@ -86,11 +85,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * Export the specified snapshot to a given FileSystem. - * - * The .snapshot/name folder is copied to the destination cluster - * and then all the hfiles/wals are copied using a Map-Reduce Job in the .archive/ location. - * When everything is done, the second cluster can restore the snapshot. + * Export the specified snapshot to a given FileSystem. The .snapshot/name folder is copied to the + * destination cluster and then all the hfiles/wals are copied using a Map-Reduce Job in the + * .archive/ location. When everything is done, the second cluster can restore the snapshot. */ @InterfaceAudience.Public public class ExportSnapshot extends AbstractHBaseTool implements Tool { @@ -133,40 +130,40 @@ static class Testing { // Command line options and defaults. static final class Options { static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); - static final Option TARGET_NAME = new Option(null, "target", true, - "Target name for the snapshot."); - static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " - + "destination hdfs://"); - static final Option COPY_FROM = new Option(null, "copy-from", true, - "Input folder hdfs:// (default hbase.rootdir)"); + static final Option TARGET_NAME = + new Option(null, "target", true, "Target name for the snapshot."); + static final Option COPY_TO = + new Option(null, "copy-to", true, "Remote " + "destination hdfs://"); + static final Option COPY_FROM = + new Option(null, "copy-from", true, "Input folder hdfs:// (default hbase.rootdir)"); static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, "Do not verify checksum, use name+length only."); static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, "Do not verify the integrity of the exported snapshot."); - static final Option NO_SOURCE_VERIFY = new Option(null, "no-source-verify", false, - "Do not verify the source of the snapshot."); - static final Option OVERWRITE = new Option(null, "overwrite", false, - "Rewrite the snapshot manifest if already exists."); - static final Option CHUSER = new Option(null, "chuser", true, - "Change the owner of the files to the specified one."); - static final Option CHGROUP = new Option(null, "chgroup", true, - "Change the group of the files to the specified one."); - static final Option CHMOD = new Option(null, "chmod", true, - "Change the permission of the files to the specified one."); + static final Option NO_SOURCE_VERIFY = + new Option(null, "no-source-verify", false, "Do not verify the source of the snapshot."); + static final Option OVERWRITE = + new Option(null, "overwrite", false, "Rewrite the snapshot manifest if already exists."); + static final Option CHUSER = + new Option(null, "chuser", true, "Change the owner of the files to the specified one."); + static final Option CHGROUP = + new Option(null, "chgroup", true, "Change the group of the files to the specified one."); + static final Option CHMOD = + new Option(null, "chmod", true, "Change the permission of the files to the specified one."); static final Option MAPPERS = new Option(null, "mappers", true, "Number of mappers to use during the copy (mapreduce.job.maps)."); - static final Option BANDWIDTH = new Option(null, "bandwidth", true, - "Limit bandwidth to this value in MB/second."); + static final Option BANDWIDTH = + new Option(null, "bandwidth", true, "Limit bandwidth to this value in MB/second."); } // Export Map-Reduce Counters, to keep track of the progress public enum Counter { - MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, - BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED + MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, BYTES_EXPECTED, BYTES_SKIPPED, + BYTES_COPIED } - private static class ExportMapper extends Mapper { + private static class ExportMapper + extends Mapper { private static final Logger LOG = LoggerFactory.getLogger(ExportMapper.class); final static int REPORT_SIZE = 1 * 1024 * 1024; final static int BUFFER_SIZE = 64 * 1024; @@ -199,7 +196,7 @@ public void setup(Context context) throws IOException { filesGroup = conf.get(CONF_FILES_GROUP); filesUser = conf.get(CONF_FILES_USER); - filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); + filesMode = (short) conf.getInt(CONF_FILES_MODE, 0); outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); @@ -217,7 +214,7 @@ public void setup(Context context) throws IOException { destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); outputFs = FileSystem.get(outputRoot.toUri(), destConf); } catch (IOException e) { - throw new IOException("Could not get the output FileSystem with root="+ outputRoot, e); + throw new IOException("Could not get the output FileSystem with root=" + outputRoot, e); } // Use the default block size of the outputFs if bigger @@ -261,7 +258,7 @@ private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException case HFILE: Path inputPath = new Path(inputInfo.getHfile()); String family = inputPath.getParent().getName(); - TableName table =HFileLink.getReferencedTableName(inputPath.getName()); + TableName table = HFileLink.getReferencedTableName(inputPath.getName()); String region = HFileLink.getReferencedRegionName(inputPath.getName()); String hfile = HFileLink.getReferencedHFileName(inputPath.getName()); path = new Path(CommonFSUtils.getTableDir(new Path("./"), table), @@ -278,8 +275,8 @@ private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException @SuppressWarnings("checkstyle:linelength") /** - * Used by TestExportSnapshot to test for retries when failures happen. - * Failure is injected in {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}. + * Used by TestExportSnapshot to test for retries when failures happen. Failure is injected in + * {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}. */ private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo) throws IOException { @@ -289,7 +286,7 @@ private void injectTestFailure(final Context context, final SnapshotFileInfo inp context.getCounter(Counter.COPY_FAILED).increment(1); LOG.debug("Injecting failure. Count: " + testing.injectedFailureCount); throw new IOException(String.format("TEST FAILURE (%d of max %d): Unable to copy input=%s", - testing.injectedFailureCount, testing.failuresCountToInject, inputInfo)); + testing.injectedFailureCount, testing.failuresCountToInject, inputInfo)); } private void copyFile(final Context context, final SnapshotFileInfo inputInfo, @@ -361,10 +358,8 @@ private void createOutputPath(final Path path) throws IOException { /** * Try to Preserve the files attribute selected by the user copying them from the source file * This is only required when you are exporting as a different user than "hbase" or on a system - * that doesn't have the "hbase" user. - * - * This is not considered a blocking failure since the user can force a chmod with the user - * that knows is available on the system. + * that doesn't have the "hbase" user. This is not considered a blocking failure since the user + * can force a chmod with the user that knows is available on the system. */ private boolean preserveAttributes(final Path path, final FileStatus refStat) { FileStatus stat; @@ -382,7 +377,7 @@ private boolean preserveAttributes(final Path path, final FileStatus refStat) { outputFs.setPermission(path, refStat.getPermission()); } } catch (IOException e) { - LOG.warn("Unable to set the permission for file="+ stat.getPath() +": "+ e.getMessage()); + LOG.warn("Unable to set the permission for file=" + stat.getPath() + ": " + e.getMessage()); return false; } @@ -395,9 +390,10 @@ private boolean preserveAttributes(final Path path, final FileStatus refStat) { outputFs.setOwner(path, user, group); } } catch (IOException e) { - LOG.warn("Unable to set the owner/group for file="+ stat.getPath() +": "+ e.getMessage()); - LOG.warn("The user/group may not exist on the destination cluster: user=" + - user + " group=" + group); + LOG.warn( + "Unable to set the owner/group for file=" + stat.getPath() + ": " + e.getMessage()); + LOG.warn("The user/group may not exist on the destination cluster: user=" + user + + " group=" + group); return false; } } @@ -409,13 +405,11 @@ private boolean stringIsNotEmpty(final String str) { return str != null && str.length() > 0; } - private void copyData(final Context context, - final Path inputPath, final InputStream in, - final Path outputPath, final FSDataOutputStream out, - final long inputFileSize) + private void copyData(final Context context, final Path inputPath, final InputStream in, + final Path outputPath, final FSDataOutputStream out, final long inputFileSize) throws IOException { - final String statusMessage = "copied %s/" + StringUtils.humanReadableInt(inputFileSize) + - " (%.1f%%)"; + final String statusMessage = + "copied %s/" + StringUtils.humanReadableInt(inputFileSize) + " (%.1f%%)"; try { byte[] buffer = new byte[bufferSize]; @@ -431,33 +425,33 @@ private void copyData(final Context context, if (reportBytes >= reportSize) { context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); - context.setStatus(String.format(statusMessage, - StringUtils.humanReadableInt(totalBytesWritten), - (totalBytesWritten/(float)inputFileSize) * 100.0f) + - " from " + inputPath + " to " + outputPath); + context.setStatus( + String.format(statusMessage, StringUtils.humanReadableInt(totalBytesWritten), + (totalBytesWritten / (float) inputFileSize) * 100.0f) + " from " + inputPath + + " to " + outputPath); reportBytes = 0; } } long etime = EnvironmentEdgeManager.currentTime(); context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); - context.setStatus(String.format(statusMessage, - StringUtils.humanReadableInt(totalBytesWritten), - (totalBytesWritten/(float)inputFileSize) * 100.0f) + - " from " + inputPath + " to " + outputPath); + context + .setStatus(String.format(statusMessage, StringUtils.humanReadableInt(totalBytesWritten), + (totalBytesWritten / (float) inputFileSize) * 100.0f) + " from " + inputPath + " to " + + outputPath); // Verify that the written size match if (totalBytesWritten != inputFileSize) { - String msg = "number of bytes copied not matching copied=" + totalBytesWritten + - " expected=" + inputFileSize + " for file=" + inputPath; + String msg = "number of bytes copied not matching copied=" + totalBytesWritten + + " expected=" + inputFileSize + " for file=" + inputPath; throw new IOException(msg); } LOG.info("copy completed for input=" + inputPath + " output=" + outputPath); - LOG.info("size=" + totalBytesWritten + - " (" + StringUtils.humanReadableInt(totalBytesWritten) + ")" + - " time=" + StringUtils.formatTimeDiff(etime, stime) + - String.format(" %.3fM/sec", (totalBytesWritten / ((etime - stime)/1000.0))/1048576.0)); + LOG.info( + "size=" + totalBytesWritten + " (" + StringUtils.humanReadableInt(totalBytesWritten) + ")" + + " time=" + StringUtils.formatTimeDiff(etime, stime) + String.format(" %.3fM/sec", + (totalBytesWritten / ((etime - stime) / 1000.0)) / 1048576.0)); context.getCounter(Counter.FILES_COPIED).increment(1); } catch (IOException e) { LOG.error("Error copying " + inputPath + " to " + outputPath, e); @@ -467,12 +461,11 @@ private void copyData(final Context context, } /** - * Try to open the "source" file. - * Throws an IOException if the communication with the inputFs fail or - * if the file is not found. + * Try to open the "source" file. Throws an IOException if the communication with the inputFs + * fail or if the file is not found. */ private FSDataInputStream openSourceFile(Context context, final SnapshotFileInfo fileInfo) - throws IOException { + throws IOException { try { Configuration conf = context.getConfiguration(); FileLink link = null; @@ -524,12 +517,12 @@ private FileStatus getSourceFileStatus(Context context, final SnapshotFileInfo f } } - private FileLink getFileLink(Path path, Configuration conf) throws IOException{ + private FileLink getFileLink(Path path, Configuration conf) throws IOException { String regionName = HFileLink.getReferencedRegionName(path.getName()); TableName tableName = HFileLink.getReferencedTableName(path.getName()); - if(MobUtils.getMobRegionInfo(tableName).getEncodedName().equals(regionName)) { + if (MobUtils.getMobRegionInfo(tableName).getEncodedName().equals(regionName)) { return HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf), - HFileArchiveUtil.getArchivePath(conf), path); + HFileArchiveUtil.getArchivePath(conf), path); } return HFileLink.buildFromHFileLinkPattern(inputRoot, inputArchive, path); } @@ -544,8 +537,8 @@ private FileChecksum getFileChecksum(final FileSystem fs, final Path path) { } /** - * Check if the two files are equal by looking at the file length, - * and at the checksum (if user has specified the verifyChecksum flag). + * Check if the two files are equal by looking at the file length, and at the checksum (if user + * has specified the verifyChecksum flag). */ private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat) { // Not matching length @@ -566,7 +559,7 @@ private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat } // ========================================================================== - // Input Format + // Input Format // ========================================================================== /** @@ -624,12 +617,11 @@ private static Pair getSnapshotFileAndSize(FileSystem fs * Given a list of file paths and sizes, create around ngroups in as balanced a way as possible. * The groups created will have similar amounts of bytes. *

- * The algorithm used is pretty straightforward; the file list is sorted by size, - * and then each group fetch the bigger file available, iterating through groups - * alternating the direction. + * The algorithm used is pretty straightforward; the file list is sorted by size, and then each + * group fetch the bigger file available, iterating through groups alternating the direction. */ - static List>> getBalancedSplits( - final List> files, final int ngroups) { + static List>> + getBalancedSplits(final List> files, final int ngroups) { // Sort files by size, from small to big Collections.sort(files, new Comparator>() { public int compare(Pair a, Pair b) { @@ -686,7 +678,7 @@ private static class ExportSnapshotInputFormat extends InputFormat createRecordReader(InputSplit split, TaskAttemptContext tac) throws IOException, InterruptedException { - return new ExportSnapshotRecordReader(((ExportSnapshotInputSplit)split).getSplitKeys()); + return new ExportSnapshotRecordReader(((ExportSnapshotInputSplit) split).getSplitKeys()); } @Override @@ -706,7 +698,7 @@ public List getSplits(JobContext context) throws IOException, Interr List>> groups = getBalancedSplits(snapshotFiles, mappers); List splits = new ArrayList(groups.size()); - for (List> files: groups) { + for (List> files : groups) { splits.add(new ExportSnapshotInputSplit(files)); } return splits; @@ -722,9 +714,9 @@ public ExportSnapshotInputSplit() { public ExportSnapshotInputSplit(final List> snapshotFiles) { this.files = new ArrayList(snapshotFiles.size()); - for (Pair fileInfo: snapshotFiles) { - this.files.add(new Pair<>( - new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond())); + for (Pair fileInfo : snapshotFiles) { + this.files.add( + new Pair<>(new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond())); this.length += fileInfo.getSecond(); } } @@ -760,7 +752,7 @@ public void readFields(DataInput in) throws IOException { @Override public void write(DataOutput out) throws IOException { out.writeInt(files.size()); - for (final Pair fileInfo: files) { + for (final Pair fileInfo : files) { fileInfo.getFirst().write(out); out.writeLong(fileInfo.getSecond()); } @@ -776,48 +768,55 @@ private static class ExportSnapshotRecordReader ExportSnapshotRecordReader(final List> files) { this.files = files; - for (Pair fileInfo: files) { + for (Pair fileInfo : files) { totalSize += fileInfo.getSecond(); } } @Override - public void close() { } + public void close() { + } @Override - public BytesWritable getCurrentKey() { return files.get(index).getFirst(); } + public BytesWritable getCurrentKey() { + return files.get(index).getFirst(); + } @Override - public NullWritable getCurrentValue() { return NullWritable.get(); } + public NullWritable getCurrentValue() { + return NullWritable.get(); + } @Override - public float getProgress() { return (float)procSize / totalSize; } + public float getProgress() { + return (float) procSize / totalSize; + } @Override - public void initialize(InputSplit split, TaskAttemptContext tac) { } + public void initialize(InputSplit split, TaskAttemptContext tac) { + } @Override public boolean nextKeyValue() { if (index >= 0) { procSize += files.get(index).getSecond(); } - return(++index < files.size()); + return (++index < files.size()); } } } // ========================================================================== - // Tool + // Tool // ========================================================================== /** * Run Map-Reduce Job to perform the files copy. */ - private void runCopyJob(final Path inputRoot, final Path outputRoot, - final String snapshotName, final Path snapshotDir, final boolean verifyChecksum, - final String filesUser, final String filesGroup, final int filesMode, - final int mappers, final int bandwidthMB) - throws IOException, InterruptedException, ClassNotFoundException { + private void runCopyJob(final Path inputRoot, final Path outputRoot, final String snapshotName, + final Path snapshotDir, final boolean verifyChecksum, final String filesUser, + final String filesGroup, final int filesMode, final int mappers, final int bandwidthMB) + throws IOException, InterruptedException, ClassNotFoundException { Configuration conf = getConf(); if (filesGroup != null) conf.set(CONF_FILES_GROUP, filesGroup); if (filesUser != null) conf.set(CONF_FILES_USER, filesUser); @@ -846,11 +845,9 @@ private void runCopyJob(final Path inputRoot, final Path outputRoot, // Acquire the delegation Tokens Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - new Path[] { inputRoot }, srcConf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { inputRoot }, srcConf); Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - new Path[] { outputRoot }, destConf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { outputRoot }, destConf); // Run the MR Job if (!job.waitForCompletion(true)) { @@ -858,8 +855,8 @@ private void runCopyJob(final Path inputRoot, final Path outputRoot, } } - private void verifySnapshot(final Configuration baseConf, - final FileSystem fs, final Path rootDir, final Path snapshotDir) throws IOException { + private void verifySnapshot(final Configuration baseConf, final FileSystem fs, final Path rootDir, + final Path snapshotDir) throws IOException { // Update the conf with the current root dir, since may be a different cluster Configuration conf = new Configuration(baseConf); CommonFSUtils.setRootDir(conf, rootDir); @@ -968,8 +965,8 @@ public int doWork() throws IOException { } if (outputRoot == null) { - System.err.println("Destination file-system (--" + Options.COPY_TO.getLongOpt() - + ") not provided."); + System.err.println( + "Destination file-system (--" + Options.COPY_TO.getLongOpt() + ") not provided."); LOG.error("Use -h or --help for usage instructions."); return 0; } @@ -989,16 +986,17 @@ public int doWork() throws IOException { Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf); - boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) || - conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null; + boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) + || conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null; Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot); - Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot, - destConf); - Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot); + Path snapshotTmpDir = + SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot, destConf); + Path outputSnapshotDir = + SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot); Path initialOutputSnapshotDir = skipTmp ? outputSnapshotDir : snapshotTmpDir; LOG.debug("inputFs={}, inputRoot={}", inputFs.getUri().toString(), inputRoot); - LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}", - outputFs, outputRoot.toString(), skipTmp, initialOutputSnapshotDir); + LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}", outputFs, + outputRoot.toString(), skipTmp, initialOutputSnapshotDir); // Verify snapshot source before copying files if (verifySource) { @@ -1028,8 +1026,8 @@ public int doWork() throws IOException { return 1; } } else { - System.err.println("The snapshot '" + targetName + - "' already exists in the destination: " + outputSnapshotDir); + System.err.println("The snapshot '" + targetName + "' already exists in the destination: " + + outputSnapshotDir); return 1; } } @@ -1039,19 +1037,23 @@ public int doWork() throws IOException { if (outputFs.exists(snapshotTmpDir)) { if (overwrite) { if (!outputFs.delete(snapshotTmpDir, true)) { - System.err.println("Unable to remove existing snapshot tmp directory: "+snapshotTmpDir); + System.err + .println("Unable to remove existing snapshot tmp directory: " + snapshotTmpDir); return 1; } } else { - System.err.println("A snapshot with the same name '"+ targetName +"' may be in-progress"); - System.err.println("Please check "+snapshotTmpDir+". If the snapshot has completed, "); - System.err.println("consider removing "+snapshotTmpDir+" by using the -overwrite option"); + System.err + .println("A snapshot with the same name '" + targetName + "' may be in-progress"); + System.err + .println("Please check " + snapshotTmpDir + ". If the snapshot has completed, "); + System.err + .println("consider removing " + snapshotTmpDir + " by using the -overwrite option"); return 1; } } } - // Step 1 - Copy fs1:/.snapshot/ to fs2:/.snapshot/.tmp/ + // Step 1 - Copy fs1:/.snapshot/ to fs2:/.snapshot/.tmp/ // The snapshot references must be copied before the hfiles otherwise the cleaner // will remove them because they are unreferenced. List travesedPaths = new ArrayList<>(); @@ -1060,42 +1062,41 @@ public int doWork() throws IOException { LOG.info("Copy Snapshot Manifest from " + snapshotDir + " to " + initialOutputSnapshotDir); travesedPaths = FSUtils.copyFilesParallel(inputFs, snapshotDir, outputFs, initialOutputSnapshotDir, conf, - conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); + conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); copySucceeded = true; } catch (IOException e) { - throw new ExportSnapshotException("Failed to copy the snapshot directory: from=" + - snapshotDir + " to=" + initialOutputSnapshotDir, e); + throw new ExportSnapshotException("Failed to copy the snapshot directory: from=" + snapshotDir + + " to=" + initialOutputSnapshotDir, e); } finally { if (copySucceeded) { if (filesUser != null || filesGroup != null) { - LOG.warn((filesUser == null ? "" : "Change the owner of " + needSetOwnerDir + " to " - + filesUser) - + (filesGroup == null ? "" : ", Change the group of " + needSetOwnerDir + " to " - + filesGroup)); + LOG.warn( + (filesUser == null ? "" : "Change the owner of " + needSetOwnerDir + " to " + filesUser) + + (filesGroup == null ? "" + : ", Change the group of " + needSetOwnerDir + " to " + filesGroup)); setOwnerParallel(outputFs, filesUser, filesGroup, conf, travesedPaths); } if (filesMode > 0) { LOG.warn("Change the permission of " + needSetOwnerDir + " to " + filesMode); - setPermissionParallel(outputFs, (short)filesMode, travesedPaths, conf); + setPermissionParallel(outputFs, (short) filesMode, travesedPaths, conf); } } } // Write a new .snapshotinfo if the target name is different from the source name if (!targetName.equals(snapshotName)) { - SnapshotDescription snapshotDesc = - SnapshotDescriptionUtils.readSnapshotInfo(inputFs, snapshotDir) - .toBuilder() - .setName(targetName) - .build(); + SnapshotDescription snapshotDesc = SnapshotDescriptionUtils + .readSnapshotInfo(inputFs, snapshotDir).toBuilder().setName(targetName).build(); SnapshotDescriptionUtils.writeSnapshotInfo(snapshotDesc, initialOutputSnapshotDir, outputFs); if (filesUser != null || filesGroup != null) { - outputFs.setOwner(new Path(initialOutputSnapshotDir, - SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), filesUser, filesGroup); + outputFs.setOwner( + new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), filesUser, + filesGroup); } if (filesMode > 0) { - outputFs.setPermission(new Path(initialOutputSnapshotDir, - SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), new FsPermission((short)filesMode)); + outputFs.setPermission( + new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), + new FsPermission((short) filesMode)); } } @@ -1103,15 +1104,15 @@ public int doWork() throws IOException { // The snapshot references must be copied before the files otherwise the files gets removed // by the HFileArchiver, since they have no references. try { - runCopyJob(inputRoot, outputRoot, snapshotName, snapshotDir, verifyChecksum, - filesUser, filesGroup, filesMode, mappers, bandwidthMB); + runCopyJob(inputRoot, outputRoot, snapshotName, snapshotDir, verifyChecksum, filesUser, + filesGroup, filesMode, mappers, bandwidthMB); LOG.info("Finalize the Snapshot Export"); if (!skipTmp) { // Step 3 - Rename fs2:/.snapshot/.tmp/ fs2:/.snapshot/ if (!outputFs.rename(snapshotTmpDir, outputSnapshotDir)) { - throw new ExportSnapshotException("Unable to rename snapshot directory from=" + - snapshotTmpDir + " to=" + outputSnapshotDir); + throw new ExportSnapshotException("Unable to rename snapshot directory from=" + + snapshotTmpDir + " to=" + outputSnapshotDir); } } @@ -1139,18 +1140,16 @@ public int doWork() throws IOException { @Override protected void printUsage() { super.printUsage(); - System.out.println("\n" - + "Examples:\n" - + " hbase snapshot export \\\n" + System.out.println("\n" + "Examples:\n" + " hbase snapshot export \\\n" + " --snapshot MySnapshot --copy-to hdfs://srv2:8082/hbase \\\n" - + " --chuser MyUser --chgroup MyGroup --chmod 700 --mappers 16\n" - + "\n" + + " --chuser MyUser --chgroup MyGroup --chmod 700 --mappers 16\n" + "\n" + " hbase snapshot export \\\n" + " --snapshot MySnapshot --copy-from hdfs://srv2:8082/hbase \\\n" + " --copy-to hdfs://srv1:50070/hbase"); } - @Override protected void addOptions() { + @Override + protected void addOptions() { addRequiredOption(Options.SNAPSHOT); addOption(Options.COPY_TO); addOption(Options.COPY_FROM); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java index 9432f309adb6..c97da439e751 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,8 +27,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Generate a classpath string containing any jars required by mapreduce jobs. Specify - * additional values by providing a comma-separated list of paths via -Dtmpjars. + * Generate a classpath string containing any jars required by mapreduce jobs. Specify additional + * values by providing a comma-separated list of paths via -Dtmpjars. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class MapreduceDependencyClasspathTool implements Tool { @@ -49,8 +49,10 @@ public Configuration getConf() { public int run(String[] args) throws Exception { if (args.length > 0) { System.err.println("Usage: hbase mapredcp [-Dtmpjars=...]"); - System.err.println(" Construct a CLASSPATH containing dependency jars required to run a mapreduce"); - System.err.println(" job. By default, includes any jars detected by TableMapReduceUtils. Provide"); + System.err.println( + " Construct a CLASSPATH containing dependency jars required to run a mapreduce"); + System.err + .println(" job. By default, includes any jars detected by TableMapReduceUtils. Provide"); System.err.println(" additional entries by specifying a comma-separated list in tmpjars."); return 0; } @@ -63,7 +65,7 @@ public int run(String[] args) throws Exception { public static void main(String[] argv) throws Exception { // Silence the usual noise. This is probably fragile... Log4jUtils.setLogLevel("org.apache.hadoop.hbase", "WARN"); - System.exit(ToolRunner.run( - HBaseConfiguration.create(), new MapreduceDependencyClasspathTool(), argv)); + System.exit( + ToolRunner.run(HBaseConfiguration.create(), new MapreduceDependencyClasspathTool(), argv)); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 0a24cbe1edc9..a70c60383674 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -117,21 +116,18 @@ import org.apache.hbase.thirdparty.com.google.gson.Gson; /** - * Script used evaluating HBase performance and scalability. Runs a HBase - * client that steps through one of a set of hardcoded tests or 'experiments' - * (e.g. a random reads test, a random writes test, etc.). Pass on the - * command-line which test to run and how many clients are participating in - * this experiment. Run {@code PerformanceEvaluation --help} to obtain usage. - * - *

This class sets up and runs the evaluation programs described in - * Section 7, Performance Evaluation, of the Bigtable - * paper, pages 8-10. - * - *

By default, runs as a mapreduce job where each mapper runs a single test - * client. Can also run as a non-mapreduce, multithreaded application by - * specifying {@code --nomapred}. Each client does about 1GB of data, unless - * specified otherwise. + * Script used evaluating HBase performance and scalability. Runs a HBase client that steps through + * one of a set of hardcoded tests or 'experiments' (e.g. a random reads test, a random writes test, + * etc.). Pass on the command-line which test to run and how many clients are participating in this + * experiment. Run {@code PerformanceEvaluation --help} to obtain usage. + *

+ * This class sets up and runs the evaluation programs described in Section 7, Performance + * Evaluation, of the Bigtable paper, + * pages 8-10. + *

+ * By default, runs as a mapreduce job where each mapper runs a single test client. Can also run as + * a non-mapreduce, multithreaded application by specifying {@code --nomapred}. Each client does + * about 1GB of data, unless specified otherwise. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class PerformanceEvaluation extends Configured implements Tool { @@ -170,11 +166,9 @@ public class PerformanceEvaluation extends Configured implements Tool { "Run async sequential read test"); addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite", "Run async sequential write test"); - addCommandDescriptor(AsyncScanTest.class, "asyncScan", - "Run async scan test (read every row)"); + addCommandDescriptor(AsyncScanTest.class, "asyncScan", "Run async scan test (read every row)"); addCommandDescriptor(RandomReadTest.class, RANDOM_READ, "Run random read test"); - addCommandDescriptor(MetaRandomReadTest.class, "metaRandomRead", - "Run getRegionLocation test"); + addCommandDescriptor(MetaRandomReadTest.class, "metaRandomRead", "Run getRegionLocation test"); addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN, "Run random seek and scan 100 test"); addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10", @@ -185,18 +179,15 @@ public class PerformanceEvaluation extends Configured implements Tool { "Run random seek scan with both start and stop row (max 1000 rows)"); addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000", "Run random seek scan with both start and stop row (max 10000 rows)"); - addCommandDescriptor(RandomWriteTest.class, "randomWrite", - "Run random write test"); - addCommandDescriptor(SequentialReadTest.class, "sequentialRead", - "Run sequential read test"); - addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", - "Run sequential write test"); + addCommandDescriptor(RandomWriteTest.class, "randomWrite", "Run random write test"); + addCommandDescriptor(SequentialReadTest.class, "sequentialRead", "Run sequential read test"); + addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", "Run sequential write test"); addCommandDescriptor(MetaWriteTest.class, "metaWrite", "Populate meta table;used with 1 thread; to be cleaned up by cleanMeta"); addCommandDescriptor(ScanTest.class, "scan", "Run scan test (read every row)"); addCommandDescriptor(FilteredScanTest.class, "filterScan", - "Run scan test using a filter to find a specific row based on it's value " + - "(make sure to use --rows=20)"); + "Run scan test using a filter to find a specific row based on it's value " + + "(make sure to use --rows=20)"); addCommandDescriptor(IncrementTest.class, "increment", "Increment on each row; clients overlap on keyspace so some concurrent operations"); addCommandDescriptor(AppendTest.class, "append", @@ -212,8 +203,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * Enum for map metrics. Keep it out here rather than inside in the Map - * inner-class so we can find associated properties. + * Enum for map metrics. Keep it out here rather than inside in the Map inner-class so we can find + * associated properties. */ protected static enum Counter { /** elapsed time */ @@ -231,7 +222,7 @@ public RunResult(long duration, Histogram hist) { } public RunResult(long duration, long numbOfReplyOverThreshold, long numOfReplyFromReplica, - Histogram hist) { + Histogram hist) { this.duration = duration; this.hist = hist; this.numbOfReplyOverThreshold = numbOfReplyOverThreshold; @@ -248,7 +239,8 @@ public String toString() { return Long.toString(duration); } - @Override public int compareTo(RunResult o) { + @Override + public int compareTo(RunResult o) { return Long.compare(this.duration, o.duration); } } @@ -261,8 +253,8 @@ public PerformanceEvaluation(final Configuration conf) { super(conf); } - protected static void addCommandDescriptor(Class cmdClass, - String name, String description) { + protected static void addCommandDescriptor(Class cmdClass, String name, + String description) { CmdDescriptor cmdDescriptor = new CmdDescriptor(cmdClass, name, description); COMMANDS.put(name, cmdDescriptor); } @@ -317,12 +309,12 @@ private Class forName(String className, Class type) @Override protected void map(LongWritable key, Text value, final Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Status status = new Status() { @Override public void setStatus(String msg) { - context.setStatus(msg); + context.setStatus(msg); } }; @@ -337,7 +329,8 @@ public void setStatus(String msg) { } // Evaluation task - RunResult result = PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status); + RunResult result = + PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status); // Collect how much time the thing took. Report as map output and // to the ELAPSED_TIME counter. context.getCounter(Counter.ELAPSED_TIME).increment(result.duration); @@ -348,43 +341,36 @@ public void setStatus(String msg) { } /* - * If table does not already exist, create. Also create a table when - * {@code opts.presplitRegions} is specified or when the existing table's - * region replica count doesn't match {@code opts.replicas}. + * If table does not already exist, create. Also create a table when {@code opts.presplitRegions} + * is specified or when the existing table's region replica count doesn't match {@code + * opts.replicas}. */ static boolean checkTable(Admin admin, TestOptions opts) throws IOException { TableName tableName = TableName.valueOf(opts.tableName); boolean needsDelete = false, exists = admin.tableExists(tableName); boolean isReadCmd = opts.cmdName.toLowerCase(Locale.ROOT).contains("read") - || opts.cmdName.toLowerCase(Locale.ROOT).contains("scan"); + || opts.cmdName.toLowerCase(Locale.ROOT).contains("scan"); if (!exists && isReadCmd) { throw new IllegalStateException( - "Must specify an existing table for read commands. Run a write command first."); + "Must specify an existing table for read commands. Run a write command first."); } - TableDescriptor desc = - exists ? admin.getDescriptor(TableName.valueOf(opts.tableName)) : null; + TableDescriptor desc = exists ? admin.getDescriptor(TableName.valueOf(opts.tableName)) : null; byte[][] splits = getSplits(opts); // recreate the table when user has requested presplit or when existing // {RegionSplitPolicy,replica count} does not match requested, or when the // number of column families does not match requested. if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions) - || (!isReadCmd && desc != null && - !StringUtils.equals(desc.getRegionSplitPolicyClassName(), opts.splitPolicy)) - || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas) - || (desc != null && desc.getColumnFamilyCount() != opts.families)) { + || (!isReadCmd && desc != null + && !StringUtils.equals(desc.getRegionSplitPolicyClassName(), opts.splitPolicy)) + || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas) + || (desc != null && desc.getColumnFamilyCount() != opts.families)) { needsDelete = true; // wait, why did it delete my table?!? - LOG.debug(MoreObjects.toStringHelper("needsDelete") - .add("needsDelete", needsDelete) - .add("isReadCmd", isReadCmd) - .add("exists", exists) - .add("desc", desc) - .add("presplit", opts.presplitRegions) - .add("splitPolicy", opts.splitPolicy) - .add("replicas", opts.replicas) - .add("families", opts.families) - .toString()); + LOG.debug(MoreObjects.toStringHelper("needsDelete").add("needsDelete", needsDelete) + .add("isReadCmd", isReadCmd).add("exists", exists).add("desc", desc) + .add("presplit", opts.presplitRegions).add("splitPolicy", opts.splitPolicy) + .add("replicas", opts.replicas).add("families", opts.families).toString()); } // remove an existing table @@ -420,12 +406,12 @@ static boolean checkTable(Admin admin, TestOptions opts) throws IOException { */ protected static TableDescriptor getTableDescriptor(TestOptions opts) { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(opts.tableName)); + TableDescriptorBuilder.newBuilder(TableName.valueOf(opts.tableName)); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); ColumnFamilyDescriptorBuilder cfBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(familyName); + ColumnFamilyDescriptorBuilder.newBuilder(familyName); cfBuilder.setDataBlockEncoding(opts.blockEncoding); cfBuilder.setCompressionType(opts.compression); cfBuilder.setBloomFilterType(opts.bloomType); @@ -449,8 +435,7 @@ protected static TableDescriptor getTableDescriptor(TestOptions opts) { * generates splits based on total number of rows and specified split regions */ protected static byte[][] getSplits(TestOptions opts) { - if (opts.presplitRegions == DEFAULT_OPTS.presplitRegions) - return null; + if (opts.presplitRegions == DEFAULT_OPTS.presplitRegions) return null; int numSplitPoints = opts.presplitRegions - 1; byte[][] splits = new byte[numSplitPoints][]; @@ -492,8 +477,8 @@ static RunResult[] doLocalClients(final TestOptions opts, final Configuration co cons[i] = ConnectionFactory.createConnection(conf); asyncCons[i] = ConnectionFactory.createAsyncConnection(conf).get(); } - LOG.info("Created " + opts.connCount + " connections for " + - opts.numClientThreads + " threads"); + LOG.info( + "Created " + opts.connCount + " connections for " + opts.numClientThreads + " threads"); for (int i = 0; i < threads.length; i++) { final int index = i; threads[i] = pool.submit(new Callable() { @@ -509,11 +494,11 @@ public void setStatus(final String msg) throws IOException { LOG.info(msg); } }); - LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration + - "ms over " + threadOpts.perClientRunRows + " rows"); + LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration + + "ms over " + threadOpts.perClientRunRows + " rows"); if (opts.latencyThreshold > 0) { - LOG.info("Number of replies over latency threshold " + opts.latencyThreshold + - "(ms) is " + run.numbOfReplyOverThreshold); + LOG.info("Number of replies over latency threshold " + opts.latencyThreshold + + "(ms) is " + run.numbOfReplyOverThreshold); } return run; } @@ -529,11 +514,10 @@ public void setStatus(final String msg) throws IOException { } } final String test = cmd.getSimpleName(); - LOG.info("[" + test + "] Summary of timings (ms): " - + Arrays.toString(results)); + LOG.info("[" + test + "] Summary of timings (ms): " + Arrays.toString(results)); Arrays.sort(results); long total = 0; - float avgLatency = 0 ; + float avgLatency = 0; float avgTPS = 0; long replicaWins = 0; for (RunResult result : results) { @@ -544,10 +528,8 @@ public void setStatus(final String msg) throws IOException { } avgTPS *= 1000; // ms to second avgLatency = avgLatency / results.length; - LOG.info("[" + test + " duration ]" - + "\tMin: " + results[0] + "ms" - + "\tMax: " + results[results.length - 1] + "ms" - + "\tAvg: " + (total / results.length) + "ms"); + LOG.info("[" + test + " duration ]" + "\tMin: " + results[0] + "ms" + "\tMax: " + + results[results.length - 1] + "ms" + "\tAvg: " + (total / results.length) + "ms"); LOG.info("[ Avg latency (us)]\t" + Math.round(avgLatency)); LOG.info("[ Avg TPS/QPS]\t" + Math.round(avgTPS) + "\t row per second"); if (opts.replicas > 1) { @@ -563,9 +545,8 @@ public void setStatus(final String msg) throws IOException { } /* - * Run a mapreduce job. Run as many maps as asked-for clients. - * Before we start up the job, write out an input file with instruction - * per client regards which row they are to start on. + * Run a mapreduce job. Run as many maps as asked-for clients. Before we start up the job, write + * out an input file with instruction per client regards which row they are to start on. * @param cmd Command to run. * @throws IOException */ @@ -597,11 +578,11 @@ static Job doMapReduce(TestOptions opts, final Configuration conf) TextOutputFormat.setOutputPath(job, new Path(inputDir.getParent(), "outputs")); TableMapReduceUtil.addDependencyJars(job); - TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - Histogram.class, // yammer metrics - Gson.class, // gson + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Histogram.class, // yammer + // metrics + Gson.class, // gson FilterAllFilter.class // hbase-server tests jar - ); + ); TableMapReduceUtil.initCredentials(job); @@ -610,7 +591,7 @@ static Job doMapReduce(TestOptions opts, final Configuration conf) } /** - * Each client has one mapper to do the work, and client do the resulting count in a map task. + * Each client has one mapper to do the work, and client do the resulting count in a map task. */ static String JOB_INPUT_FILENAME = "input.txt"; @@ -626,7 +607,7 @@ static Path writeInputFile(final Configuration c, final TestOptions opts) throws } static Path writeInputFile(final Configuration c, final TestOptions opts, final Path basedir) - throws IOException { + throws IOException { SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss"); Path jobdir = new Path(new Path(basedir, PERF_EVAL_DIR), formatter.format(new Date())); Path inputDir = new Path(jobdir, "inputs"); @@ -651,7 +632,7 @@ static Path writeInputFile(final Configuration c, final TestOptions opts, final int hash = h.hash(new ByteArrayHashKey(b, 0, b.length), -1); m.put(hash, s); } - for (Map.Entry e: m.entrySet()) { + for (Map.Entry e : m.entrySet()) { out.println(e.getValue()); } } finally { @@ -688,11 +669,11 @@ public String getDescription() { } /** - * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation}. - * This makes tracking all these arguments a little easier. - * NOTE: ADDING AN OPTION, you need to add a data member, a getter/setter (to make JSON - * serialization of this TestOptions class behave), and you need to add to the clone constructor - * below copying your new option from the 'that' to the 'this'. Look for 'clone' below. + * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation}. This makes + * tracking all these arguments a little easier. NOTE: ADDING AN OPTION, you need to add a data + * member, a getter/setter (to make JSON serialization of this TestOptions class behave), and you + * need to add to the clone constructor below copying your new option from the 'that' to the + * 'this'. Look for 'clone' below. */ static class TestOptions { String cmdName = null; @@ -715,7 +696,7 @@ static class TestOptions { boolean writeToWAL = true; boolean autoFlush = false; boolean oneCon = false; - int connCount = -1; //wil decide the actual num later + int connCount = -1; // wil decide the actual num later boolean useTags = false; int noOfTags = 1; boolean reportLatency = false; @@ -733,7 +714,7 @@ static class TestOptions { boolean valueRandom = false; boolean valueZipf = false; int valueSize = DEFAULT_VALUE_LENGTH; - int period = (this.perClientRunRows / 10) == 0? perClientRunRows: perClientRunRows / 10; + int period = (this.perClientRunRows / 10) == 0 ? perClientRunRows : perClientRunRows / 10; int cycles = 1; int columns = 1; int families = 1; @@ -741,14 +722,14 @@ static class TestOptions { int latencyThreshold = 0; // in millsecond boolean addColumns = true; MemoryCompactionPolicy inMemoryCompaction = - MemoryCompactionPolicy.valueOf( - CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT); + MemoryCompactionPolicy.valueOf(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT); boolean asyncPrefetch = false; boolean cacheBlocks = true; Scan.ReadType scanReadType = Scan.ReadType.DEFAULT; long bufferSize = 2l * 1024l * 1024l; - public TestOptions() {} + public TestOptions() { + } /** * Clone constructor. @@ -1141,8 +1122,7 @@ public long getBufferSize() { } /* - * A test. - * Subclass to particularize what happens per row. + * A test. Subclass to particularize what happens per row. */ static abstract class TestBase { // Below is make it so when Tests are all running in the one @@ -1152,6 +1132,7 @@ static abstract class TestBase { private static long nextRandomSeed() { return randomSeed.nextLong(); } + private final int everyN; protected final Random rand = new Random(nextRandomSeed()); @@ -1175,8 +1156,8 @@ private static long nextRandomSeed() { private long numOfReplyFromReplica = 0; /** - * Note that all subclasses of this class must provide a public constructor - * that has the exact same list of arguments. + * Note that all subclasses of this class must provide a public constructor that has the exact + * same list of arguments. */ TestBase(final Configuration conf, final TestOptions options, final Status status) { this.conf = conf; @@ -1200,13 +1181,14 @@ int getValueLength(final Random r) { } } - void updateValueSize(final Result [] rs) throws IOException { + void updateValueSize(final Result[] rs) throws IOException { updateValueSize(rs, 0); } - void updateValueSize(final Result [] rs, final long latency) throws IOException { + void updateValueSize(final Result[] rs, final long latency) throws IOException { if (rs == null || (latency == 0)) return; - for (Result r: rs) updateValueSize(r, latency); + for (Result r : rs) + updateValueSize(r, latency); } void updateValueSize(final Result r) throws IOException { @@ -1219,7 +1201,7 @@ void updateValueSize(final Result r, final long latency) throws IOException { // update replicaHistogram if (r.isStale()) { replicaLatencyHistogram.update(latency / 1000); - numOfReplyFromReplica ++; + numOfReplyFromReplica++; } if (!isRandomValueSize()) return; @@ -1236,7 +1218,7 @@ void updateValueSize(final int valueSize) { void updateScanMetrics(final ScanMetrics metrics) { if (metrics == null) return; - Map metricsMap = metrics.getMetricsMap(); + Map metricsMap = metrics.getMetricsMap(); Long rpcCalls = metricsMap.get(ScanMetrics.RPC_CALLS_METRIC_NAME); if (rpcCalls != null) { this.rpcCallsHistogram.update(rpcCalls.longValue()); @@ -1264,8 +1246,8 @@ void updateScanMetrics(final ScanMetrics metrics) { } String generateStatus(final int sr, final int i, final int lr) { - return sr + "/" + i + "/" + lr + ", latency " + getShortLatencyReport() + - (!isRandomValueSize()? "": ", value size " + getShortValueSizeReport()); + return sr + "/" + i + "/" + lr + ", latency " + getShortLatencyReport() + + (!isRandomValueSize() ? "" : ", value size " + getShortValueSizeReport()); } boolean isRandomValueSize() { @@ -1288,16 +1270,19 @@ void testSetup() throws IOException { latencyHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); // If it is a replica test, set up histogram for replica. if (opts.replicas > 1) { - replicaLatencyHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + replicaLatencyHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); } valueSizeHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); // scan metrics rpcCallsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); remoteRpcCallsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); - millisBetweenNextHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + millisBetweenNextHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); regionsScannedHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); bytesInResultsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); - bytesInRemoteResultsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + bytesInRemoteResultsHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); onStartup(); } @@ -1311,52 +1296,51 @@ void testTakedown() throws IOException { // output. We can't use 'this' here because each thread has its own instance of Test class. synchronized (Test.class) { status.setStatus("Test : " + testName + ", Thread : " + Thread.currentThread().getName()); - status.setStatus("Latency (us) : " + YammerHistogramUtils.getHistogramReport( - latencyHistogram)); + status.setStatus( + "Latency (us) : " + YammerHistogramUtils.getHistogramReport(latencyHistogram)); if (opts.replicas > 1) { - status.setStatus("Latency (us) from Replica Regions: " + - YammerHistogramUtils.getHistogramReport(replicaLatencyHistogram)); + status.setStatus("Latency (us) from Replica Regions: " + + YammerHistogramUtils.getHistogramReport(replicaLatencyHistogram)); } status.setStatus("Num measures (latency) : " + latencyHistogram.getCount()); status.setStatus(YammerHistogramUtils.getPrettyHistogramReport(latencyHistogram)); if (valueSizeHistogram.getCount() > 0) { - status.setStatus("ValueSize (bytes) : " - + YammerHistogramUtils.getHistogramReport(valueSizeHistogram)); + status.setStatus( + "ValueSize (bytes) : " + YammerHistogramUtils.getHistogramReport(valueSizeHistogram)); status.setStatus("Num measures (ValueSize): " + valueSizeHistogram.getCount()); status.setStatus(YammerHistogramUtils.getPrettyHistogramReport(valueSizeHistogram)); } else { status.setStatus("No valueSize statistics available"); } if (rpcCallsHistogram.getCount() > 0) { - status.setStatus("rpcCalls (count): " + - YammerHistogramUtils.getHistogramReport(rpcCallsHistogram)); + status.setStatus( + "rpcCalls (count): " + YammerHistogramUtils.getHistogramReport(rpcCallsHistogram)); } if (remoteRpcCallsHistogram.getCount() > 0) { - status.setStatus("remoteRpcCalls (count): " + - YammerHistogramUtils.getHistogramReport(remoteRpcCallsHistogram)); + status.setStatus("remoteRpcCalls (count): " + + YammerHistogramUtils.getHistogramReport(remoteRpcCallsHistogram)); } if (millisBetweenNextHistogram.getCount() > 0) { - status.setStatus("millisBetweenNext (latency): " + - YammerHistogramUtils.getHistogramReport(millisBetweenNextHistogram)); + status.setStatus("millisBetweenNext (latency): " + + YammerHistogramUtils.getHistogramReport(millisBetweenNextHistogram)); } if (regionsScannedHistogram.getCount() > 0) { - status.setStatus("regionsScanned (count): " + - YammerHistogramUtils.getHistogramReport(regionsScannedHistogram)); + status.setStatus("regionsScanned (count): " + + YammerHistogramUtils.getHistogramReport(regionsScannedHistogram)); } if (bytesInResultsHistogram.getCount() > 0) { - status.setStatus("bytesInResults (size): " + - YammerHistogramUtils.getHistogramReport(bytesInResultsHistogram)); + status.setStatus("bytesInResults (size): " + + YammerHistogramUtils.getHistogramReport(bytesInResultsHistogram)); } if (bytesInRemoteResultsHistogram.getCount() > 0) { - status.setStatus("bytesInRemoteResults (size): " + - YammerHistogramUtils.getHistogramReport(bytesInRemoteResultsHistogram)); + status.setStatus("bytesInRemoteResults (size): " + + YammerHistogramUtils.getHistogramReport(bytesInRemoteResultsHistogram)); } } } abstract void onTakedown() throws IOException; - /* * Run test * @return Elapsed time. @@ -1396,12 +1380,12 @@ void testTimed() throws IOException, InterruptedException { long startTime = System.nanoTime(); boolean requestSent = false; Span span = TraceUtil.getGlobalTracer().spanBuilder("test row").startSpan(); - try (Scope scope = span.makeCurrent()){ + try (Scope scope = span.makeCurrent()) { requestSent = testRow(i, startTime); } finally { span.end(); } - if ( (i - startRow) > opts.measureAfter) { + if ((i - startRow) > opts.measureAfter) { // If multiget or multiput is enabled, say set to 10, testRow() returns immediately // first 9 times and sends the actual get request in the 10th iteration. // We should only set latency when actual request is sent because otherwise @@ -1410,7 +1394,7 @@ void testTimed() throws IOException, InterruptedException { long latency = (System.nanoTime() - startTime) / 1000; latencyHistogram.update(latency); if ((opts.latencyThreshold > 0) && (latency / 1000 >= opts.latencyThreshold)) { - numOfReplyOverLatencyThreshold ++; + numOfReplyOverLatencyThreshold++; } } if (status != null && i > 0 && (i % getReportingPeriod()) == 0) { @@ -1435,15 +1419,14 @@ public String getShortValueSizeReport() { return YammerHistogramUtils.getShortHistogramReport(this.valueSizeHistogram); } - /** * Test for individual row. * @param i Row index. - * @return true if the row was sent to server and need to record metrics. - * False if not, multiGet and multiPut e.g., the rows are sent - * to server only if enough gets/puts are gathered. + * @return true if the row was sent to server and need to record metrics. False if not, multiGet + * and multiPut e.g., the rows are sent to server only if enough gets/puts are gathered. */ - abstract boolean testRow(final int i, final long startTime) throws IOException, InterruptedException; + abstract boolean testRow(final int i, final long startTime) + throws IOException, InterruptedException; } static abstract class Test extends TestBase { @@ -1483,8 +1466,8 @@ void onTakedown() throws IOException { } /* - * Parent class for all meta tests: MetaWriteTest, MetaRandomReadTest and CleanMetaTest - */ + * Parent class for all meta tests: MetaWriteTest, MetaRandomReadTest and CleanMetaTest + */ static abstract class MetaTest extends TableTest { protected int keyLength; @@ -1499,7 +1482,7 @@ void onTakedown() throws IOException { } /* - Generates Lexicographically ascending strings + * Generates Lexicographically ascending strings */ protected byte[] getSplitKey(final int i) { return Bytes.toBytes(String.format("%0" + keyLength + "d", i)); @@ -1547,7 +1530,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1563,8 +1546,8 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt if (opts.multiGet > 0) { this.gets.add(get); if (this.gets.size() == opts.multiGet) { - Result[] rs = - this.table.get(this.gets).stream().map(f -> propagate(f::get)).toArray(Result[]::new); + Result[] rs = this.table.get(this.gets).stream().map(f -> propagate(f::get)) + .toArray(Result[]::new); updateValueSize(rs); this.gets.clear(); } else { @@ -1632,9 +1615,8 @@ static class AsyncScanTest extends AsyncTableTest { @Override void onStartup() throws IOException { - this.asyncTable = - connection.getTable(TableName.valueOf(opts.tableName), - Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors())); + this.asyncTable = connection.getTable(TableName.valueOf(opts.tableName), + Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors())); } @Override @@ -1649,15 +1631,14 @@ void testTakedown() throws IOException { @Override boolean testRow(final int i, final long startTime) throws IOException { if (this.testScanner == null) { - Scan scan = - new Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching) - .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) - .setReadType(opts.scanReadType).setScanMetricsEnabled(true); + Scan scan = new Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching) + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1687,7 +1668,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1729,7 +1710,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); byte[] value = generateData(this.rand, getValueLength(this.rand)); if (opts.useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); @@ -1738,8 +1719,8 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); updateValueSize(kv.getValueLength()); } else { @@ -1799,16 +1780,16 @@ static class RandomSeekScanTest extends TableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - Scan scan = new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows)) - .setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks) - .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType) - .setScanMetricsEnabled(true); + Scan scan = + new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows)).setCaching(opts.caching) + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); FilterList list = new FilterList(); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1856,7 +1837,7 @@ boolean testRow(final int i, final long startTime) throws IOException { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1876,8 +1857,8 @@ boolean testRow(final int i, final long startTime) throws IOException { } if (i % 100 == 0) { LOG.info(String.format("Scan for key range %s - %s returned %s rows", - Bytes.toString(startAndStopRow.getFirst()), - Bytes.toString(startAndStopRow.getSecond()), count)); + Bytes.toString(startAndStopRow.getFirst()), Bytes.toString(startAndStopRow.getSecond()), + count)); } } finally { updateScanMetrics(s.getScanMetrics()); @@ -1886,7 +1867,7 @@ boolean testRow(final int i, final long startTime) throws IOException { return true; } - protected abstract Pair getStartAndStopRow(); + protected abstract Pair getStartAndStopRow(); protected Pair generateStartAndStopRows(int maxRange) { int start = this.rand.nextInt(Integer.MAX_VALUE) % opts.totalRows; @@ -1897,7 +1878,7 @@ protected Pair generateStartAndStopRows(int maxRange) { @Override protected int getReportingPeriod() { int period = opts.perClientRunRows / 100; - return period == 0? opts.perClientRunRows: period; + return period == 0 ? opts.perClientRunRows : period; } } @@ -1968,7 +1949,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1983,7 +1964,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt if (opts.multiGet > 0) { this.gets.add(get); if (this.gets.size() == opts.multiGet) { - Result [] rs = this.table.get(this.gets); + Result[] rs = this.table.get(this.gets); if (opts.replicas > 1) { long latency = System.nanoTime() - startTime; updateValueSize(rs, latency); @@ -2023,8 +2004,8 @@ protected void testTakedown() throws IOException { } /* - * Send random reads against fake regions inserted by MetaWriteTest - */ + * Send random reads against fake regions inserted by MetaWriteTest + */ static class MetaRandomReadTest extends MetaTest { private RegionLocator regionLocator; @@ -2044,8 +2025,8 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt if (opts.randomSleep > 0) { Thread.sleep(rand.nextInt(opts.randomSleep)); } - HRegionLocation hRegionLocation = regionLocator.getRegionLocation( - getSplitKey(rand.nextInt(opts.perClientRunRows)), true); + HRegionLocation hRegionLocation = + regionLocator.getRegionLocation(getSplitKey(rand.nextInt(opts.perClientRunRows)), true); LOG.debug("get location for region: " + hRegionLocation); return true; } @@ -2072,7 +2053,6 @@ protected byte[] generateRow(final int i) { return getRandomRow(this.rand, opts.totalRows); } - } static class ScanTest extends TableTest { @@ -2090,7 +2070,6 @@ void testTakedown() throws IOException { super.testTakedown(); } - @Override boolean testRow(final int i, final long startTime) throws IOException { if (this.testScanner == null) { @@ -2101,7 +2080,7 @@ boolean testRow(final int i, final long startTime) throws IOException { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -2122,19 +2101,20 @@ boolean testRow(final int i, final long startTime) throws IOException { /** * Base class for operations that are CAS-like; that read a value and then set it based off what * they read. In this category is increment, append, checkAndPut, etc. - * - *

These operations also want some concurrency going on. Usually when these tests run, they + *

+ * These operations also want some concurrency going on. Usually when these tests run, they * operate in their own part of the key range. In CASTest, we will have them all overlap on the * same key space. We do this with our getStartRow and getLastRow overrides. */ static abstract class CASTableTest extends TableTest { - private final byte [] qualifier; + private final byte[] qualifier; + CASTableTest(Connection con, TestOptions options, Status status) { super(con, options, status); qualifier = Bytes.toBytes(this.getClass().getSimpleName()); } - byte [] getQualifier() { + byte[] getQualifier() { return this.qualifier; } @@ -2176,7 +2156,7 @@ static class AppendTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - byte [] bytes = format(i); + byte[] bytes = format(i); Append append = new Append(bytes); // unlike checkAndXXX tests, which make most sense to do on a single value, // if multiple families are specified for an append test we assume it is @@ -2197,7 +2177,7 @@ static class CheckAndMutateTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); @@ -2205,8 +2185,8 @@ boolean testRow(final int i, final long startTime) throws IOException { this.table.put(put); RowMutations mutations = new RowMutations(bytes); mutations.add(put); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenMutate(mutations); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenMutate(mutations); return true; } } @@ -2218,14 +2198,14 @@ static class CheckAndPutTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); put.addColumn(FAMILY_ZERO, getQualifier(), bytes); this.table.put(put); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenPut(put); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenPut(put); return true; } } @@ -2237,7 +2217,7 @@ static class CheckAndDeleteTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); @@ -2245,15 +2225,15 @@ boolean testRow(final int i, final long startTime) throws IOException { this.table.put(put); Delete delete = new Delete(put.getRow()); delete.addColumn(FAMILY_ZERO, getQualifier()); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenDelete(delete); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenDelete(delete); return true; } } /* - * Delete all fake regions inserted to meta table by MetaWriteTest. - */ + * Delete all fake regions inserted to meta table by MetaWriteTest. + */ static class CleanMetaTest extends MetaTest { CleanMetaTest(Connection con, TestOptions options, Status status) { super(con, options, status); @@ -2263,11 +2243,11 @@ static class CleanMetaTest extends MetaTest { boolean testRow(final int i, final long startTime) throws IOException { try { RegionInfo regionInfo = connection.getRegionLocator(table.getName()) - .getRegionLocation(getSplitKey(i), false).getRegion(); + .getRegionLocation(getSplitKey(i), false).getRegion(); LOG.debug("deleting region from meta: " + regionInfo); - Delete delete = MetaTableAccessor - .makeDeleteFromRegionInfo(regionInfo, HConstants.LATEST_TIMESTAMP); + Delete delete = + MetaTableAccessor.makeDeleteFromRegionInfo(regionInfo, HConstants.LATEST_TIMESTAMP); try (Table t = MetaTableAccessor.getMetaHTable(connection)) { t.delete(delete); } @@ -2291,7 +2271,7 @@ boolean testRow(final int i, final long startTime) throws IOException { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -2309,7 +2289,6 @@ boolean testRow(final int i, final long startTime) throws IOException { static class SequentialWriteTest extends BufferedMutatorTest { private ArrayList puts; - SequentialWriteTest(Connection con, TestOptions options, Status status) { super(con, options, status); if (opts.multiPut > 0) { @@ -2329,7 +2308,7 @@ boolean testRow(final int i, final long startTime) throws IOException { for (int family = 0; family < opts.families; family++) { byte familyName[] = Bytes.toBytes(FAMILY_NAME_BASE + family); for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); byte[] value = generateData(this.rand, getValueLength(this.rand)); if (opts.useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); @@ -2338,8 +2317,8 @@ boolean testRow(final int i, final long startTime) throws IOException { Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); updateValueSize(kv.getValueLength()); } else { @@ -2369,8 +2348,8 @@ boolean testRow(final int i, final long startTime) throws IOException { } /* - * Insert fake regions into meta table with contiguous split keys. - */ + * Insert fake regions into meta table with contiguous split keys. + */ static class MetaWriteTest extends MetaTest { MetaWriteTest(Connection con, TestOptions options, Status status) { @@ -2381,27 +2360,26 @@ static class MetaWriteTest extends MetaTest { boolean testRow(final int i, final long startTime) throws IOException { List regionInfos = new ArrayList(); RegionInfo regionInfo = (RegionInfoBuilder.newBuilder(TableName.valueOf(TABLE_NAME)) - .setStartKey(getSplitKey(i)) - .setEndKey(getSplitKey(i + 1)) - .build()); + .setStartKey(getSplitKey(i)).setEndKey(getSplitKey(i + 1)).build()); regionInfos.add(regionInfo); MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1); // write the serverName columns - MetaTableAccessor.updateRegionLocation(connection, - regionInfo, ServerName.valueOf("localhost", 60010, rand.nextLong()), i, + MetaTableAccessor.updateRegionLocation(connection, regionInfo, + ServerName.valueOf("localhost", 60010, rand.nextLong()), i, EnvironmentEdgeManager.currentTime()); return true; } } + static class FilteredScanTest extends TableTest { protected static final Logger LOG = LoggerFactory.getLogger(FilteredScanTest.class.getName()); FilteredScanTest(Connection con, TestOptions options, Status status) { super(con, options, status); if (opts.perClientRunRows == DEFAULT_ROWS_PER_GB) { - LOG.warn("Option \"rows\" unspecified. Using default value " + DEFAULT_ROWS_PER_GB + - ". This could take a very long time."); + LOG.warn("Option \"rows\" unspecified. Using default value " + DEFAULT_ROWS_PER_GB + + ". This could take a very long time."); } } @@ -2426,8 +2404,8 @@ boolean testRow(int i, final long startTime) throws IOException { protected Scan constructScan(byte[] valuePrefix) throws IOException { FilterList list = new FilterList(); - Filter filter = new SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO, - CompareOperator.EQUAL, new BinaryComparator(valuePrefix)); + Filter filter = new SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO, CompareOperator.EQUAL, + new BinaryComparator(valuePrefix)); list.addFilter(filter); if (opts.filterAll) { list.addFilter(new FilterAllFilter()); @@ -2437,7 +2415,7 @@ protected Scan constructScan(byte[] valuePrefix) throws IOException { .setScanMetricsEnabled(true); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(FAMILY_ZERO, qualifier); } } else { @@ -2454,60 +2432,61 @@ protected Scan constructScan(byte[] valuePrefix) throws IOException { * @param timeMs Time taken in milliseconds. * @return String value with label, ie '123.76 MB/s' */ - private static String calculateMbps(int rows, long timeMs, final int valueSize, int families, int columns) { - BigDecimal rowSize = BigDecimal.valueOf(ROW_LENGTH + - ((valueSize + (FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families); - BigDecimal mbps = BigDecimal.valueOf(rows).multiply(rowSize, CXT) - .divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT) - .divide(BYTES_PER_MB, CXT); + private static String calculateMbps(int rows, long timeMs, final int valueSize, int families, + int columns) { + BigDecimal rowSize = BigDecimal.valueOf( + ROW_LENGTH + ((valueSize + (FAMILY_NAME_BASE.length() + 1) + COLUMN_ZERO.length) * columns) + * families); + BigDecimal mbps = + BigDecimal.valueOf(rows).multiply(rowSize, CXT).divide(BigDecimal.valueOf(timeMs), CXT) + .multiply(MS_PER_SEC, CXT).divide(BYTES_PER_MB, CXT); return FMT.format(mbps) + " MB/s"; } /* * Format passed integer. * @param number - * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version of passed - * number (Does absolute in case number is negative). + * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version of passed number (Does + * absolute in case number is negative). */ - public static byte [] format(final int number) { - byte [] b = new byte[ROW_LENGTH]; + public static byte[] format(final int number) { + byte[] b = new byte[ROW_LENGTH]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return b; } /* - * This method takes some time and is done inline uploading data. For - * example, doing the mapfile test, generation of the key and value - * consumes about 30% of CPU time. + * This method takes some time and is done inline uploading data. For example, doing the mapfile + * test, generation of the key and value consumes about 30% of CPU time. * @return Generated random value to insert into a table cell. */ public static byte[] generateData(final Random r, int length) { - byte [] b = new byte [length]; + byte[] b = new byte[length]; int i; - for(i = 0; i < (length-8); i += 8) { + for (i = 0; i < (length - 8); i += 8) { b[i] = (byte) (65 + r.nextInt(26)); - b[i+1] = b[i]; - b[i+2] = b[i]; - b[i+3] = b[i]; - b[i+4] = b[i]; - b[i+5] = b[i]; - b[i+6] = b[i]; - b[i+7] = b[i]; + b[i + 1] = b[i]; + b[i + 2] = b[i]; + b[i + 3] = b[i]; + b[i + 4] = b[i]; + b[i + 5] = b[i]; + b[i + 6] = b[i]; + b[i + 7] = b[i]; } byte a = (byte) (65 + r.nextInt(26)); - for(; i < length; i++) { + for (; i < length; i++) { b[i] = a; } return b; } - static byte [] getRandomRow(final Random random, final int totalRows) { + static byte[] getRandomRow(final Random random, final int totalRows) { return format(generateRandomRow(random, totalRows)); } @@ -2518,8 +2497,8 @@ static int generateRandomRow(final Random random, final int totalRows) { static RunResult runOneClient(final Class cmd, Configuration conf, Connection con, AsyncConnection asyncCon, TestOptions opts, final Status status) throws IOException, InterruptedException { - status.setStatus("Start " + cmd + " at offset " + opts.startRow + " for " - + opts.perClientRunRows + " rows"); + status.setStatus( + "Start " + cmd + " at offset " + opts.startRow + " for " + opts.perClientRunRows + " rows"); long totalElapsedTime; final TestBase t; @@ -2545,21 +2524,22 @@ static RunResult runOneClient(final Class cmd, Configuration } totalElapsedTime = t.test(); - status.setStatus("Finished " + cmd + " in " + totalElapsedTime + - "ms at offset " + opts.startRow + " for " + opts.perClientRunRows + " rows" + - " (" + calculateMbps((int)(opts.perClientRunRows * opts.sampleRate), totalElapsedTime, - getAverageValueLength(opts), opts.families, opts.columns) + ")"); + status.setStatus("Finished " + cmd + " in " + totalElapsedTime + "ms at offset " + opts.startRow + + " for " + opts.perClientRunRows + " rows" + " (" + + calculateMbps((int) (opts.perClientRunRows * opts.sampleRate), totalElapsedTime, + getAverageValueLength(opts), opts.families, opts.columns) + + ")"); return new RunResult(totalElapsedTime, t.numOfReplyOverLatencyThreshold, - t.numOfReplyFromReplica, t.getLatencyHistogram()); + t.numOfReplyFromReplica, t.getLatencyHistogram()); } private static int getAverageValueLength(final TestOptions opts) { - return opts.valueRandom? opts.valueSize/2: opts.valueSize; + return opts.valueRandom ? opts.valueSize / 2 : opts.valueSize; } - private void runTest(final Class cmd, TestOptions opts) throws IOException, - InterruptedException, ClassNotFoundException, ExecutionException { + private void runTest(final Class cmd, TestOptions opts) + throws IOException, InterruptedException, ClassNotFoundException, ExecutionException { // Log the configuration we're going to run with. Uses JSON mapper because lazy. It'll do // the TestOptions introspection for us and dump the output in a readable format. LOG.info(cmd.getSimpleName() + " test run options=" + GSON.toJson(opts)); @@ -2601,86 +2581,91 @@ protected static void printUsage(final String shortName, final String message) { System.err.println(" [-D]* "); System.err.println(); System.err.println("General Options:"); - System.err.println(" nomapred Run multiple clients using threads " + - "(rather than use mapreduce)"); - System.err.println(" oneCon all the threads share the same connection. Default: False"); + System.err.println( + " nomapred Run multiple clients using threads " + "(rather than use mapreduce)"); + System.err + .println(" oneCon all the threads share the same connection. Default: False"); System.err.println(" connCount connections all threads share. " + "For example, if set to 2, then all thread share 2 connection. " + "Default: depend on oneCon parameter. if oneCon set to true, then connCount=1, " + "if not, connCount=thread number"); - System.err.println(" sampleRate Execute test on a sample of total " + - "rows. Only supported by randomRead. Default: 1.0"); - System.err.println(" period Report every 'period' rows: " + - "Default: opts.perClientRunRows / 10 = " + DEFAULT_OPTS.getPerClientRunRows()/10); + System.err.println(" sampleRate Execute test on a sample of total " + + "rows. Only supported by randomRead. Default: 1.0"); + System.err.println(" period Report every 'period' rows: " + + "Default: opts.perClientRunRows / 10 = " + DEFAULT_OPTS.getPerClientRunRows() / 10); System.err.println(" cycles How many times to cycle the test. Defaults: 1."); - System.err.println(" traceRate Enable HTrace spans. Initiate tracing every N rows. " + - "Default: 0"); + System.err.println( + " traceRate Enable HTrace spans. Initiate tracing every N rows. " + "Default: 0"); System.err.println(" latency Set to report operation latencies. Default: False"); - System.err.println(" latencyThreshold Set to report number of operations with latency " + - "over lantencyThreshold, unit in millisecond, default 0"); - System.err.println(" measureAfter Start to measure the latency once 'measureAfter'" + - " rows have been treated. Default: 0"); - System.err.println(" valueSize Pass value size to use: Default: " - + DEFAULT_OPTS.getValueSize()); - System.err.println(" valueRandom Set if we should vary value size between 0 and " + - "'valueSize'; set on read for stats on size: Default: Not set."); + System.err.println(" latencyThreshold Set to report number of operations with latency " + + "over lantencyThreshold, unit in millisecond, default 0"); + System.err.println(" measureAfter Start to measure the latency once 'measureAfter'" + + " rows have been treated. Default: 0"); + System.err.println( + " valueSize Pass value size to use: Default: " + DEFAULT_OPTS.getValueSize()); + System.err.println(" valueRandom Set if we should vary value size between 0 and " + + "'valueSize'; set on read for stats on size: Default: Not set."); System.err.println(" blockEncoding Block encoding to use. Value should be one of " + Arrays.toString(DataBlockEncoding.values()) + ". Default: NONE"); System.err.println(); System.err.println("Table Creation / Write Tests:"); System.err.println(" table Alternate table name. Default: 'TestTable'"); - System.err.println(" rows Rows each client runs. Default: " - + DEFAULT_OPTS.getPerClientRunRows() - + ". In case of randomReads and randomSeekScans this could" - + " be specified along with --size to specify the number of rows to be scanned within" - + " the total range specified by the size."); + System.err.println( + " rows Rows each client runs. Default: " + DEFAULT_OPTS.getPerClientRunRows() + + ". In case of randomReads and randomSeekScans this could" + + " be specified along with --size to specify the number of rows to be scanned within" + + " the total range specified by the size."); System.err.println( " size Total size in GiB. Mutually exclusive with --rows for writes and scans" + ". But for randomReads and randomSeekScans when you use size with --rows you could" + " use size to specify the end range and --rows" + " specifies the number of rows within that range. " + "Default: 1.0."); System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'"); - System.err.println(" flushCommits Used to determine if the test should flush the table. " + - "Default: false"); - System.err.println(" valueZipf Set if we should vary value size between 0 and " + - "'valueSize' in zipf form: Default: Not set."); + System.err.println( + " flushCommits Used to determine if the test should flush the table. " + "Default: false"); + System.err.println(" valueZipf Set if we should vary value size between 0 and " + + "'valueSize' in zipf form: Default: Not set."); System.err.println(" writeToWAL Set writeToWAL on puts. Default: True"); System.err.println(" autoFlush Set autoFlush on htable. Default: False"); - System.err.println(" multiPut Batch puts together into groups of N. Only supported " + - "by write. If multiPut is bigger than 0, autoFlush need to set to true. Default: 0"); + System.err.println(" multiPut Batch puts together into groups of N. Only supported " + + "by write. If multiPut is bigger than 0, autoFlush need to set to true. Default: 0"); System.err.println(" presplit Create presplit table. If a table with same name exists," + " it'll be deleted and recreated (instead of verifying count of its existing regions). " + "Recommended for accurate perf analysis (see guide). Default: disabled"); - System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " + - "Default: false"); - System.err.println(" numoftags Specify the no of tags that would be needed. " + - "This works only if usetags is true. Default: " + DEFAULT_OPTS.noOfTags); + System.err.println( + " usetags Writes tags along with KVs. Use with HFile V3. " + "Default: false"); + System.err.println(" numoftags Specify the no of tags that would be needed. " + + "This works only if usetags is true. Default: " + DEFAULT_OPTS.noOfTags); System.err.println(" splitPolicy Specify a custom RegionSplitPolicy for the table."); System.err.println(" columns Columns to write per row. Default: 1"); - System.err.println(" families Specify number of column families for the table. Default: 1"); + System.err + .println(" families Specify number of column families for the table. Default: 1"); System.err.println(); System.err.println("Read Tests:"); System.err.println(" filterAll Helps to filter out all the rows on the server side" + " there by not returning any thing back to the client. Helps to check the server side" + " performance. Uses FilterAllFilter internally. "); - System.err.println(" multiGet Batch gets together into groups of N. Only supported " + - "by randomRead. Default: disabled"); - System.err.println(" inmemory Tries to keep the HFiles of the CF " + - "inmemory as far as possible. Not guaranteed that reads are always served " + - "from memory. Default: false"); - System.err.println(" bloomFilter Bloom filter type, one of " - + Arrays.toString(BloomType.values())); + System.err.println(" multiGet Batch gets together into groups of N. Only supported " + + "by randomRead. Default: disabled"); + System.err.println(" inmemory Tries to keep the HFiles of the CF " + + "inmemory as far as possible. Not guaranteed that reads are always served " + + "from memory. Default: false"); + System.err.println( + " bloomFilter Bloom filter type, one of " + Arrays.toString(BloomType.values())); System.err.println(" blockSize Blocksize to use when writing out hfiles. "); - System.err.println(" inmemoryCompaction Makes the column family to do inmemory flushes/compactions. " - + "Uses the CompactingMemstore"); + System.err + .println(" inmemoryCompaction Makes the column family to do inmemory flushes/compactions. " + + "Uses the CompactingMemstore"); System.err.println(" addColumns Adds columns to scans/gets explicitly. Default: true"); System.err.println(" replicas Enable region replica testing. Defaults: 1."); - System.err.println(" randomSleep Do a random sleep before each get between 0 and entered value. Defaults: 0"); + System.err.println( + " randomSleep Do a random sleep before each get between 0 and entered value. Defaults: 0"); System.err.println(" caching Scan caching to use. Default: 30"); System.err.println(" asyncPrefetch Enable asyncPrefetch for scan"); System.err.println(" cacheBlocks Set the cacheBlocks option for scan. Default: true"); - System.err.println(" scanReadType Set the readType option for scan, stream/pread/default. Default: default"); + System.err.println( + " scanReadType Set the readType option for scan, stream/pread/default. Default: default"); System.err.println(" bufferSize Set the value of client side buffering. Default: 2MB"); System.err.println(); System.err.println(" Note: -D properties will be applied to the conf used. "); @@ -2704,10 +2689,10 @@ protected static void printUsage(final String shortName, final String message) { } /** - * Parse options passed in via an arguments array. Assumes that array has been split - * on white-space and placed into a {@code Queue}. Any unknown arguments will remain - * in the queue at the conclusion of this method call. It's up to the caller to deal - * with these unrecognized arguments. + * Parse options passed in via an arguments array. Assumes that array has been split on + * white-space and placed into a {@code Queue}. Any unknown arguments will remain in the queue at + * the conclusion of this method call. It's up to the caller to deal with these unrecognized + * arguments. */ static TestOptions parseOpts(Queue args) { TestOptions opts = new TestOptions(); @@ -2896,7 +2881,7 @@ static TestOptions parseOpts(Queue args) { } final String blockSize = "--blockSize="; - if(cmd.startsWith(blockSize) ) { + if (cmd.startsWith(blockSize)) { opts.blockSize = Integer.parseInt(cmd.substring(blockSize.length())); continue; } @@ -3005,17 +2990,17 @@ static TestOptions parseOpts(Queue args) { } /** - * Validates opts after all the opts are parsed, so that caller need not to maintain order of opts - */ - private static void validateParsedOpts(TestOptions opts) { + * Validates opts after all the opts are parsed, so that caller need not to maintain order of opts + */ + private static void validateParsedOpts(TestOptions opts) { if (!opts.autoFlush && opts.multiPut > 0) { throw new IllegalArgumentException("autoFlush must be true when multiPut is more than 0"); } if (opts.oneCon && opts.connCount > 1) { - throw new IllegalArgumentException("oneCon is set to true, " - + "connCount should not bigger than 1"); + throw new IllegalArgumentException( + "oneCon is set to true, " + "connCount should not bigger than 1"); } if (opts.valueZipf && opts.valueRandom) { @@ -3042,8 +3027,8 @@ static TestOptions calculateRowsAndSize(final TestOptions opts) { } static int getRowsPerGB(final TestOptions opts) { - return ONE_GB / ((opts.valueRandom? opts.valueSize/2: opts.valueSize) * opts.getFamilies() * - opts.getColumns()); + return ONE_GB / ((opts.valueRandom ? opts.valueSize / 2 : opts.valueSize) * opts.getFamilies() + * opts.getColumns()); } @Override diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java index a9ce959c6f9a..e2244750c241 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; @@ -49,8 +48,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; /** - * A simple performance evaluation tool for single client and MR scans - * and snapshot scans. + * A simple performance evaluation tool for single client and MR scans and snapshot scans. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class ScanPerformanceEvaluation extends AbstractHBaseTool { @@ -78,7 +76,8 @@ public void setConf(Configuration conf) { @Override protected void addOptions() { - this.addRequiredOptWithArg("t", "type", "the type of the test. One of the following: streaming|scan|snapshotscan|scanmapreduce|snapshotscanmapreduce"); + this.addRequiredOptWithArg("t", "type", + "the type of the test. One of the following: streaming|scan|snapshotscan|scanmapreduce|snapshotscanmapreduce"); this.addOptWithArg("f", "file", "the filename to read from"); this.addOptWithArg("tn", "table", "the tablename to read from"); this.addOptWithArg("sn", "snapshot", "the snapshot name to read from"); @@ -119,15 +118,15 @@ protected void testHdfsStreaming(Path filename) throws IOException { } streamTimer.stop(); - double throughput = (double)totalBytes / streamTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / streamTimer.elapsed(TimeUnit.SECONDS); System.out.println("HDFS streaming: "); - System.out.println("total time to open: " + - fileOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out + .println("total time to open: " + fileOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to read: " + streamTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throghput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throghput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); } private Scan getScan() { @@ -176,30 +175,30 @@ public void testScan() throws IOException { ScanMetrics metrics = scanner.getScanMetrics(); long totalBytes = metrics.countOfBytesInResults.get(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan: "); - System.out.println("total time to open table: " + - tableOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to scan: " + - scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open table: " + tableOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("Scan metrics:\n" + metrics.getMetricsMap()); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } - public void testSnapshotScan() throws IOException { Stopwatch snapshotRestoreTimer = Stopwatch.createUnstarted(); Stopwatch scanOpenTimer = Stopwatch.createUnstarted(); @@ -233,40 +232,39 @@ public void testSnapshotScan() throws IOException { ScanMetrics metrics = scanner.getScanMetrics(); long totalBytes = metrics.countOfBytesInResults.get(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan snapshot: "); - System.out.println("total time to restore snapshot: " + - snapshotRestoreTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to scan: " + - scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to restore snapshot: " + + snapshotRestoreTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("Scan metrics:\n" + metrics.getMetricsMap()); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } public static enum ScanCounter { - NUM_ROWS, - NUM_CELLS, + NUM_ROWS, NUM_CELLS, } public static class MyMapper extends TableMapper { @Override - protected void map(ImmutableBytesWritable key, Result value, - Context context) throws IOException, - InterruptedException { + protected void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { context.getCounter(ScanCounter.NUM_ROWS).increment(1); context.getCounter(ScanCounter.NUM_CELLS).increment(value.rawCells().length); } @@ -285,14 +283,8 @@ public void testScanMapReduce() throws IOException, InterruptedException, ClassN job.setJarByClass(getClass()); - TableMapReduceUtil.initTableMapperJob( - this.tablename, - scan, - MyMapper.class, - NullWritable.class, - NullWritable.class, - job - ); + TableMapReduceUtil.initTableMapperJob(this.tablename, scan, MyMapper.class, NullWritable.class, + NullWritable.class, job); job.setNumReduceTasks(0); job.setOutputKeyClass(NullWritable.class); @@ -308,25 +300,28 @@ public void testScanMapReduce() throws IOException, InterruptedException, ClassN long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue(); long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan mapreduce: "); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } - public void testSnapshotScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException { + public void testSnapshotScanMapReduce() + throws IOException, InterruptedException, ClassNotFoundException { Stopwatch scanOpenTimer = Stopwatch.createUnstarted(); Stopwatch scanTimer = Stopwatch.createUnstarted(); @@ -339,16 +334,8 @@ public void testSnapshotScanMapReduce() throws IOException, InterruptedException job.setJarByClass(getClass()); - TableMapReduceUtil.initTableSnapshotMapperJob( - this.snapshotName, - scan, - MyMapper.class, - NullWritable.class, - NullWritable.class, - job, - true, - new Path(restoreDir) - ); + TableMapReduceUtil.initTableSnapshotMapperJob(this.snapshotName, scan, MyMapper.class, + NullWritable.class, NullWritable.class, job, true, new Path(restoreDir)); job.setNumReduceTasks(0); job.setOutputKeyClass(NullWritable.class); @@ -364,29 +351,31 @@ public void testSnapshotScanMapReduce() throws IOException, InterruptedException long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue(); long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan mapreduce: "); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } @Override protected int doWork() throws Exception { if (type.equals("streaming")) { testHdfsStreaming(new Path(file)); - } else if (type.equals("scan")){ + } else if (type.equals("scan")) { testScan(); } else if (type.equals("snapshotscan")) { testSnapshotScan(); @@ -398,7 +387,7 @@ protected int doWork() throws Exception { return 0; } - public static void main (String[] args) throws Exception { + public static void main(String[] args) throws Exception { int ret = ToolRunner.run(HBaseConfiguration.create(), new ScanPerformanceEvaluation(), args); System.exit(ret); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java index d1f8cc08b269..415300a89694 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,6 @@ import java.util.Queue; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -56,7 +55,7 @@ import org.apache.hbase.thirdparty.com.google.gson.Gson; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestPerformanceEvaluation { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -66,10 +65,9 @@ public class TestPerformanceEvaluation { @Test public void testDefaultInMemoryCompaction() { - PerformanceEvaluation.TestOptions defaultOpts = - new PerformanceEvaluation.TestOptions(); + PerformanceEvaluation.TestOptions defaultOpts = new PerformanceEvaluation.TestOptions(); assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT, - defaultOpts.getInMemoryCompaction().toString()); + defaultOpts.getInMemoryCompaction().toString()); TableDescriptor tableDescriptor = PerformanceEvaluation.getTableDescriptor(defaultOpts); for (ColumnFamilyDescriptor familyDescriptor : tableDescriptor.getColumnFamilies()) { assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT, @@ -85,7 +83,7 @@ public void testSerialization() { Gson gson = GsonUtil.createGson().create(); String optionsString = gson.toJson(options); PerformanceEvaluation.TestOptions optionsDeserialized = - gson.fromJson(optionsString, PerformanceEvaluation.TestOptions.class); + gson.fromJson(optionsString, PerformanceEvaluation.TestOptions.class); assertTrue(optionsDeserialized.isAutoFlush()); } @@ -99,7 +97,7 @@ public void testWriteInputFile() throws IOException { opts.setNumClientThreads(clients); opts.setPerClientRunRows(10); Path dir = - PerformanceEvaluation.writeInputFile(HTU.getConfiguration(), opts, HTU.getDataTestDir()); + PerformanceEvaluation.writeInputFile(HTU.getConfiguration(), opts, HTU.getDataTestDir()); FileSystem fs = FileSystem.get(HTU.getConfiguration()); Path p = new Path(dir, PerformanceEvaluation.JOB_INPUT_FILENAME); long len = fs.getFileStatus(p).getLen(); @@ -108,7 +106,7 @@ public void testWriteInputFile() throws IOException { try (FSDataInputStream dis = fs.open(p)) { dis.readFully(content); BufferedReader br = new BufferedReader( - new InputStreamReader(new ByteArrayInputStream(content), StandardCharsets.UTF_8)); + new InputStreamReader(new ByteArrayInputStream(content), StandardCharsets.UTF_8)); int count = 0; while (br.readLine() != null) { count++; @@ -179,9 +177,9 @@ public void testZipfian() throws NoSuchMethodException, SecurityException, Insta opts.setValueSize(valueSize); RandomReadTest rrt = new RandomReadTest(null, opts, null); Constructor ctor = - Histogram.class.getDeclaredConstructor(com.codahale.metrics.Reservoir.class); + Histogram.class.getDeclaredConstructor(com.codahale.metrics.Reservoir.class); ctor.setAccessible(true); - Histogram histogram = (Histogram)ctor.newInstance(new UniformReservoir(1024 * 500)); + Histogram histogram = (Histogram) ctor.newInstance(new UniformReservoir(1024 * 500)); for (int i = 0; i < 100; i++) { histogram.update(rrt.getValueLength(null)); } @@ -256,11 +254,11 @@ public void testParseOptsMultiPuts() { try { options = PerformanceEvaluation.parseOpts(opts); fail("should fail"); - } catch (IllegalArgumentException e) { + } catch (IllegalArgumentException e) { System.out.println(e.getMessage()); } - //Re-create options + // Re-create options opts = new LinkedList<>(); opts.offer("--autoFlush=true"); opts.offer("--multiPut=10"); @@ -316,7 +314,7 @@ public void testParseOptsConnCount() { try { options = PerformanceEvaluation.parseOpts(opts); fail("should fail"); - } catch (IllegalArgumentException e) { + } catch (IllegalArgumentException e) { System.out.println(e.getMessage()); } @@ -344,7 +342,7 @@ public void testParseOptsValueRandom() { try { options = PerformanceEvaluation.parseOpts(opts); fail("should fail"); - } catch (IllegalStateException e) { + } catch (IllegalStateException e) { System.out.println(e.getMessage()); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java index 327b7afec2fb..6c49a43bf463 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,18 +29,17 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestDriver { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDriver.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestDriver.class); @Test public void testDriverMainMethod() throws Throwable { ProgramDriver programDriverMock = mock(ProgramDriver.class); Driver.setProgramDriver(programDriverMock); - Driver.main(new String[]{}); + Driver.main(new String[] {}); verify(programDriverMock).driver(Mockito.any()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java index 76e3c73e2d50..05f096ae5955 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestGroupingTableMap { @ClassRule @@ -58,8 +58,7 @@ public class TestGroupingTableMap { @Test @SuppressWarnings({ "deprecation", "unchecked" }) - public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() - throws Exception { + public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() throws Exception { GroupingTableMap gTableMap = null; try { Result result = mock(Result.class); @@ -71,13 +70,13 @@ public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() gTableMap.configure(jobConf); byte[] row = {}; - List keyValues = ImmutableList.of( - new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), - Bytes.toBytes("1111")), - new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), - Bytes.toBytes("2222")), - new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), - Bytes.toBytes("3333"))); + List keyValues = ImmutableList. of( + new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), + Bytes.toBytes("1111")), + new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), + Bytes.toBytes("2222")), + new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), + Bytes.toBytes("3333"))); when(result.listCells()).thenReturn(keyValues); OutputCollector outputCollectorMock = mock(OutputCollector.class); @@ -85,8 +84,7 @@ public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() verify(result).listCells(); verifyZeroInteractions(outputCollectorMock); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -104,24 +102,22 @@ public void shouldCreateNewKeyAlthoughExtraKey() throws Exception { gTableMap.configure(jobConf); byte[] row = {}; - List keyValues = ImmutableList.of( - new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), - Bytes.toBytes("1111")), - new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), - Bytes.toBytes("2222")), - new KeyValue(row, Bytes.toBytes("familyC"), Bytes.toBytes("qualifierC"), - Bytes.toBytes("3333"))); + List keyValues = ImmutableList. of( + new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), + Bytes.toBytes("1111")), + new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), + Bytes.toBytes("2222")), + new KeyValue(row, Bytes.toBytes("familyC"), Bytes.toBytes("qualifierC"), + Bytes.toBytes("3333"))); when(result.listCells()).thenReturn(keyValues); OutputCollector outputCollectorMock = mock(OutputCollector.class); gTableMap.map(null, result, outputCollectorMock, reporter); verify(result).listCells(); - verify(outputCollectorMock, times(1)) - .collect(any(), any()); + verify(outputCollectorMock, times(1)).collect(any(), any()); verifyNoMoreInteractions(outputCollectorMock); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -142,24 +138,23 @@ public void shouldCreateNewKey() throws Exception { final byte[] firstPartKeyValue = Bytes.toBytes("34879512738945"); final byte[] secondPartKeyValue = Bytes.toBytes("35245142671437"); byte[] row = {}; - List cells = ImmutableList.of( - new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), - firstPartKeyValue), - new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), - secondPartKeyValue)); + List cells = ImmutableList. of( + new KeyValue(row, Bytes.toBytes("familyA"), Bytes.toBytes("qualifierA"), firstPartKeyValue), + new KeyValue(row, Bytes.toBytes("familyB"), Bytes.toBytes("qualifierB"), + secondPartKeyValue)); when(result.listCells()).thenReturn(cells); final AtomicBoolean outputCollected = new AtomicBoolean(); OutputCollector outputCollector = new OutputCollector() { - @Override - public void collect(ImmutableBytesWritable arg, Result result) throws IOException { - assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives. - Bytes.concat(firstPartKeyValue, bSeparator, - secondPartKeyValue), arg.copyBytes()); - outputCollected.set(true); - } - }; + @Override + public void collect(ImmutableBytesWritable arg, Result result) throws IOException { + assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives.Bytes + .concat(firstPartKeyValue, bSeparator, secondPartKeyValue), + arg.copyBytes()); + outputCollected.set(true); + } + }; gTableMap.map(null, result, outputCollector, reporter); verify(result).listCells(); @@ -169,12 +164,11 @@ public void collect(ImmutableBytesWritable arg, Result result) throws IOExceptio final byte[] secondPartValue = Bytes.toBytes("4678456942345"); byte[][] data = { firstPartValue, secondPartValue }; ImmutableBytesWritable byteWritable = gTableMap.createGroupKey(data); - assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives. - Bytes.concat(firstPartValue, - bSeparator, secondPartValue), byteWritable.get()); + assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives.Bytes + .concat(firstPartValue, bSeparator, secondPartValue), + byteWritable.get()); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -186,8 +180,7 @@ public void shouldReturnNullFromCreateGroupKey() throws Exception { gTableMap = new GroupingTableMap(); assertNull(gTableMap.createGroupKey(null)); } finally { - if(gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java index 25576c1ef420..f90a45701eb0 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestIdentityTableMap { @ClassRule @@ -55,14 +55,11 @@ public void shouldCollectPredefinedTimes() throws IOException { mock(OutputCollector.class); for (int i = 0; i < recordNumber; i++) - identityTableMap.map(bytesWritableMock, resultMock, outputCollectorMock, - reporterMock); + identityTableMap.map(bytesWritableMock, resultMock, outputCollectorMock, reporterMock); - verify(outputCollectorMock, times(recordNumber)).collect( - Mockito.any(), Mockito.any()); + verify(outputCollectorMock, times(recordNumber)).collect(Mockito.any(), Mockito.any()); } finally { - if (identityTableMap != null) - identityTableMap.close(); + if (identityTableMap != null) identityTableMap.close(); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java index 1dd3e69f9775..64cd2f035933 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ protected void runJob(String jobName, Configuration c, List scans) job.setReducerClass(Reducer.class); TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); + ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); TableMapReduceUtil.addDependencyJars(job); @@ -92,10 +92,8 @@ public void map(ImmutableBytesWritable key, Result value, } /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - * + * Closes this stream and releases any system resources associated with it. If the stream is + * already closed then invoking this method has no effect. * @throws IOException if an I/O error occurs */ @Override @@ -109,8 +107,7 @@ public void configure(JobConf jobConf) { } public static class Reducer extends TestMultiTableSnapshotInputFormat.ScanReducer implements - org.apache.hadoop.mapred.Reducer { + org.apache.hadoop.mapred.Reducer { private JobConf jobConf; @@ -122,10 +119,8 @@ public void reduce(ImmutableBytesWritable key, Iterator } /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - * + * Closes this stream and releases any system resources associated with it. If the stream is + * already closed then invoking this method has no effect. * @throws IOException if an I/O error occurs */ @Override diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java index 13913e5fc24a..828008645527 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Joiner; -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestRowCounter { @ClassRule @@ -68,8 +68,7 @@ void doRead() { @Test @SuppressWarnings("deprecation") - public void shouldExitAndPrintUsageSinceParameterNumberLessThanThree() - throws Exception { + public void shouldExitAndPrintUsageSinceParameterNumberLessThanThree() throws Exception { final String[] args = new String[] { "one", "two" }; String line = "ERROR: Wrong number of parameters: " + args.length; String result = new OutputReader(System.err) { @@ -90,10 +89,9 @@ public void shouldRegInReportEveryIncomingRow() throws IOException { Reporter reporter = mock(Reporter.class); for (int i = 0; i < iterationNumber; i++) mapper.map(mock(ImmutableBytesWritable.class), mock(Result.class), - mock(OutputCollector.class), reporter); + mock(OutputCollector.class), reporter); - Mockito.verify(reporter, times(iterationNumber)).incrCounter( - any(), anyLong()); + Mockito.verify(reporter, times(iterationNumber)).incrCounter(any(), anyLong()); } @Test @@ -101,8 +99,7 @@ public void shouldRegInReportEveryIncomingRow() throws IOException { public void shouldCreateAndRunSubmittableJob() throws Exception { RowCounter rCounter = new RowCounter(); rCounter.setConf(HBaseConfiguration.create()); - String[] args = new String[] { "\temp", "tableA", "column1", "column2", - "column3" }; + String[] args = new String[] { "\temp", "tableA", "column1", "column2", "column3" }; JobConf jobConfig = rCounter.createSubmittableJob(args); assertNotNull(jobConfig); @@ -110,8 +107,8 @@ public void shouldCreateAndRunSubmittableJob() throws Exception { assertEquals("rowcounter", jobConfig.getJobName()); assertEquals(jobConfig.getMapOutputValueClass(), Result.class); assertEquals(jobConfig.getMapperClass(), RowCounterMapper.class); - assertEquals(jobConfig.get(TableInputFormat.COLUMN_LIST), Joiner.on(' ') - .join("column1", "column2", "column3")); + assertEquals(jobConfig.get(TableInputFormat.COLUMN_LIST), + Joiner.on(' ').join("column1", "column2", "column3")); assertEquals(jobConfig.getMapOutputKeyClass(), ImmutableBytesWritable.class); } @@ -147,17 +144,16 @@ protected String read() throws Exception { return new String(outBytes.toByteArray()); } finally { switch (outs) { - case OUT: { - System.setOut(oldPrintStream); - break; - } - case ERR: { - System.setErr(oldPrintStream); - break; - } - default: - throw new IllegalStateException( - "OutputReader: unsupported PrintStream"); + case OUT: { + System.setOut(oldPrintStream); + break; + } + case ERR: { + System.setErr(oldPrintStream); + break; + } + default: + throw new IllegalStateException("OutputReader: unsupported PrintStream"); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java index bf46a7ac6d88..4475a7fabdea 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestSplitTable { @ClassRule @@ -44,16 +44,16 @@ public class TestSplitTable { public TestName name = new TestName(); @Test - @SuppressWarnings({"deprecation", "SelfComparison"}) + @SuppressWarnings({ "deprecation", "SelfComparison" }) public void testSplitTableCompareTo() { - TableSplit aTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("aaa"), Bytes.toBytes("ddd"), "locationA"); + TableSplit aTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("aaa"), + Bytes.toBytes("ddd"), "locationA"); - TableSplit bTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("iii"), Bytes.toBytes("kkk"), "locationA"); + TableSplit bTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("iii"), + Bytes.toBytes("kkk"), "locationA"); - TableSplit cTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("lll"), Bytes.toBytes("zzz"), "locationA"); + TableSplit cTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("lll"), + Bytes.toBytes("zzz"), "locationA"); assertEquals(0, aTableSplit.compareTo(aTableSplit)); assertEquals(0, bTableSplit.compareTo(bTableSplit)); @@ -105,18 +105,15 @@ public void testSplitTableEquals() { @Test @SuppressWarnings("deprecation") public void testToString() { - TableSplit split = - new TableSplit(TableName.valueOf(name.getMethodName()), Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location"); - String str = - "HBase table split(table name: " + name.getMethodName() + ", start row: row-start, " - + "end row: row-end, region location: location)"; + TableSplit split = new TableSplit(TableName.valueOf(name.getMethodName()), + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); + String str = "HBase table split(table name: " + name.getMethodName() + + ", start row: row-start, " + "end row: row-end, region location: location)"; Assert.assertEquals(str, split.toString()); split = new TableSplit((TableName) null, null, null, null); - str = - "HBase table split(table name: null, start row: null, " - + "end row: null, region location: null)"; + str = "HBase table split(table name: null, start row: null, " + + "end row: null, region location: null)"; Assert.assertEquals(str, split.toString()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java index 2c5abec8ddec..8df16da01445 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -75,7 +75,7 @@ /** * This tests the TableInputFormat and its recovery semantics */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTableInputFormat { @ClassRule @@ -109,7 +109,6 @@ public void before() throws IOException { /** * Setup a table with two rows and values. - * * @param tableName the name of the table to create * @return A Table instance for the created table. * @throws IOException @@ -120,7 +119,6 @@ public static Table createTable(byte[] tableName) throws IOException { /** * Setup a table with two rows and values per column family. - * * @param tableName * @return A Table instance for the created table. * @throws IOException @@ -142,15 +140,14 @@ public static Table createTable(byte[] tableName, byte[][] families) throws IOEx /** * Verify that the result and key have expected values. - * * @param r single row result * @param key the row key * @param expectedKey the expected key * @param expectedValue the expected value * @return true if the result contains the expected key and value, false otherwise. */ - static boolean checkResult(Result r, ImmutableBytesWritable key, - byte[] expectedKey, byte[] expectedValue) { + static boolean checkResult(Result r, ImmutableBytesWritable key, byte[] expectedKey, + byte[] expectedValue) { assertEquals(0, key.compareTo(expectedKey)); Map vals = r.getFamilyMap(FAMILY); byte[] value = vals.values().iterator().next(); @@ -159,9 +156,7 @@ static boolean checkResult(Result r, ImmutableBytesWritable key, } /** - * Create table data and run tests on specified htable using the - * o.a.h.hbase.mapred API. - * + * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API. * @param table * @throws IOException */ @@ -192,11 +187,9 @@ static void runTestMapred(Table table) throws IOException { /** * Create a table that IOE's on first scanner next call - * * @throws IOException */ - static Table createIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -225,13 +218,10 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { } /** - * Create a table that throws a DoNoRetryIOException on first scanner next - * call - * + * Create a table that throws a DoNoRetryIOException on first scanner next call * @throws IOException */ - static Table createDNRIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -246,8 +236,7 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { ResultScanner scanner = mock(ResultScanner.class); invocation.callRealMethod(); // simulate NotServingRegionException - doThrow( - new NotServingRegionException("Injected simulated TimeoutException")) + doThrow(new NotServingRegionException("Injected simulated TimeoutException")) .when(scanner).next(); return scanner; } @@ -264,7 +253,6 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { /** * Run test assuming no errors using mapred api. - * * @throws IOException */ @Test @@ -275,7 +263,6 @@ public void testTableRecordReader() throws IOException { /** * Run test assuming Scanner IOException failure using mapred api, - * * @throws IOException */ @Test @@ -286,7 +273,6 @@ public void testTableRecordReaderScannerFail() throws IOException { /** * Run test assuming Scanner IOException failure using mapred api, - * * @throws IOException */ @Test(expected = IOException.class) @@ -297,7 +283,6 @@ public void testTableRecordReaderScannerFailTwice() throws IOException { /** * Run test assuming NotServingRegionException using mapred api. - * * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @Test @@ -308,7 +293,6 @@ public void testTableRecordReaderScannerTimeout() throws IOException { /** * Run test assuming NotServingRegionException using mapred api. - * * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class) @@ -330,8 +314,8 @@ public void testExtensionOfTableInputFormatBase() throws IOException { @Test public void testDeprecatedExtensionOfTableInputFormatBase() throws IOException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " - + "as it was given in 0.98."); + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "as it was given in 0.98."); final Table table = createTable(Bytes.toBytes("exampleDeprecatedTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleDeprecatedTIF.class); @@ -339,8 +323,8 @@ public void testDeprecatedExtensionOfTableInputFormatBase() throws IOException { @Test public void testJobConfigurableExtensionOfTableInputFormatBase() throws IOException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " - + "using JobConfigurable."); + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "using JobConfigurable."); final Table table = createTable(Bytes.toBytes("exampleJobConfigurableTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleJobConfigurableTIF.class); @@ -378,17 +362,19 @@ public void configure(JobConf conf) { @Override public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) throws IOException { for (Cell cell : value.listCells()) { - reporter.getCounter(TestTableInputFormat.class.getName() + ":row", - Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) + reporter + .getCounter(TestTableInputFormat.class.getName() + ":row", + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) .increment(1l); - reporter.getCounter(TestTableInputFormat.class.getName() + ":family", - Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) + reporter + .getCounter(TestTableInputFormat.class.getName() + ":family", + Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) .increment(1l); - reporter.getCounter(TestTableInputFormat.class.getName() + ":value", - Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) + reporter + .getCounter(TestTableInputFormat.class.getName() + ":value", + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) .increment(1l); } } @@ -408,12 +394,11 @@ public void configure(JobConf job) { Table exampleTable = connection.getTable(TableName.valueOf("exampleDeprecatedTable")); // mandatory initializeTable(connection, exampleTable.getName()); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // mandatory setInputColumns(inputColumns); Filter exampleFilter = - new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); + new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); // optional setRowFilter(exampleFilter); } catch (IOException exception) { @@ -440,7 +425,6 @@ protected void initialize(JobConf job) throws IOException { } } - public static class ExampleTIF extends TableInputFormatBase { @Override @@ -453,12 +437,11 @@ protected void initialize(JobConf job, String table) throws IOException { TableName tableName = TableName.valueOf(table); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // mandatory setInputColumns(inputColumns); Filter exampleFilter = - new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); + new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); // optional setRowFilter(exampleFilter); } @@ -466,4 +449,3 @@ protected void initialize(JobConf job, String table) throws IOException { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java index e36847613062..cff3a831facd 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,11 +43,11 @@ import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) @SuppressWarnings("deprecation") public class TestTableMapReduce extends TestTableMapReduceBase { @@ -55,24 +55,23 @@ public class TestTableMapReduce extends TestTableMapReduceBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestTableMapReduce.class); - private static final Logger LOG = - LoggerFactory.getLogger(TestTableMapReduce.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduce.class.getName()); - protected Logger getLog() { return LOG; } + protected Logger getLog() { + return LOG; + } /** * Pass the given key and processed record reduce */ - static class ProcessContentsMapper extends MapReduceBase implements - TableMap { + static class ProcessContentsMapper extends MapReduceBase + implements TableMap { /** * Pass the key, and reversed value to reduce */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { output.collect(key, TestTableMapReduceBase.map(key, value)); } } @@ -86,8 +85,8 @@ protected void runTestOnTable(Table table) throws IOException { jobConf.setJobName("process column contents"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(table.getName().getNameAsString(), - Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class, - ImmutableBytesWritable.class, Put.class, jobConf); + Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class, ImmutableBytesWritable.class, + Put.class, jobConf); TableMapReduceUtil.initTableReduceJob(table.getName().getNameAsString(), IdentityTableReduce.class, jobConf); @@ -105,4 +104,3 @@ protected void runTestOnTable(Table table) throws IOException { } } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java index e28cbb63d104..4374fdcf06b9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,15 +57,14 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTableMapReduceUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); - private static final Logger LOG = LoggerFactory - .getLogger(TestTableMapReduceUtil.class); + private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduceUtil.class); private static Table presidentsTable; private static final String TABLE_NAME = "People"; @@ -73,20 +72,19 @@ public class TestTableMapReduceUtil { private static final byte[] COLUMN_FAMILY = Bytes.toBytes("info"); private static final byte[] COLUMN_QUALIFIER = Bytes.toBytes("name"); - private static ImmutableSet presidentsRowKeys = ImmutableSet.of( - "president1", "president2", "president3"); - private static Iterator presidentNames = ImmutableSet.of( - "John F. Kennedy", "George W. Bush", "Barack Obama").iterator(); + private static ImmutableSet presidentsRowKeys = + ImmutableSet.of("president1", "president2", "president3"); + private static Iterator presidentNames = + ImmutableSet.of("John F. Kennedy", "George W. Bush", "Barack Obama").iterator(); - private static ImmutableSet actorsRowKeys = ImmutableSet.of("actor1", - "actor2"); - private static Iterator actorNames = ImmutableSet.of( - "Jack Nicholson", "Martin Freeman").iterator(); + private static ImmutableSet actorsRowKeys = ImmutableSet.of("actor1", "actor2"); + private static Iterator actorNames = + ImmutableSet.of("Jack Nicholson", "Martin Freeman").iterator(); private static String PRESIDENT_PATTERN = "president"; private static String ACTOR_PATTERN = "actor"; - private static ImmutableMap> relation = ImmutableMap - .of(PRESIDENT_PATTERN, presidentsRowKeys, ACTOR_PATTERN, actorsRowKeys); + private static ImmutableMap> relation = + ImmutableMap.of(PRESIDENT_PATTERN, presidentsRowKeys, ACTOR_PATTERN, actorsRowKeys); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -133,12 +131,11 @@ private static void createPutCommand(Table table) throws IOException { } /** - * Check what the given number of reduce tasks for the given job configuration - * does not exceed the number of regions for the given table. + * Check what the given number of reduce tasks for the given job configuration does not exceed the + * number of regions for the given table. */ @Test - public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() - throws IOException { + public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Assert.assertNotNull(presidentsTable); Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); @@ -155,8 +152,7 @@ public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() } @Test - public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() - throws IOException { + public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); TableMapReduceUtil.setNumReduceTasks(TABLE_NAME, jobConf); @@ -178,49 +174,42 @@ public void shoudBeValidMapReduceEvaluation() throws Exception { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), - ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, - jobConf); - TableMapReduceUtil.initTableReduceJob(TABLE_NAME, - ClassificatorRowReduce.class, jobConf); + ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); + TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { - if (jobConf != null) - FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); + if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } } @Test @SuppressWarnings("deprecation") - public void shoudBeValidMapReduceWithPartitionerEvaluation() - throws IOException { + public void shoudBeValidMapReduceWithPartitionerEvaluation() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); try { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(2); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), - ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, - jobConf); + ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); - TableMapReduceUtil.initTableReduceJob(TABLE_NAME, - ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class); + TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf, + HRegionPartitioner.class); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { - if (jobConf != null) - FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); + if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } } @SuppressWarnings("deprecation") - static class ClassificatorRowReduce extends MapReduceBase implements - TableReduce { + static class ClassificatorRowReduce extends MapReduceBase + implements TableReduce { @Override public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector output, Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { String strKey = Bytes.toString(key.get()); List result = new ArrayList<>(); while (values.hasNext()) @@ -244,18 +233,17 @@ private void throwAccertionError(String errorMessage) throws AssertionError { } @SuppressWarnings("deprecation") - static class ClassificatorMapper extends MapReduceBase implements - TableMap { + static class ClassificatorMapper extends MapReduceBase + implements TableMap { @Override public void map(ImmutableBytesWritable row, Result result, - OutputCollector outCollector, - Reporter reporter) throws IOException { + OutputCollector outCollector, Reporter reporter) + throws IOException { String rowKey = Bytes.toString(result.getRow()); - final ImmutableBytesWritable pKey = new ImmutableBytesWritable( - Bytes.toBytes(PRESIDENT_PATTERN)); - final ImmutableBytesWritable aKey = new ImmutableBytesWritable( - Bytes.toBytes(ACTOR_PATTERN)); + final ImmutableBytesWritable pKey = + new ImmutableBytesWritable(Bytes.toBytes(PRESIDENT_PATTERN)); + final ImmutableBytesWritable aKey = new ImmutableBytesWritable(Bytes.toBytes(ACTOR_PATTERN)); ImmutableBytesWritable outKey = null; if (rowKey.startsWith(PRESIDENT_PATTERN)) { @@ -266,11 +254,9 @@ public void map(ImmutableBytesWritable row, Result result, throw new AssertionError("unexpected rowKey"); } - String name = Bytes.toString(result.getValue(COLUMN_FAMILY, - COLUMN_QUALIFIER)); - outCollector.collect(outKey, - new Put(Bytes.toBytes("rowKey2")) - .addColumn(COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(name))); + String name = Bytes.toString(result.getValue(COLUMN_FAMILY, COLUMN_QUALIFIER)); + outCollector.collect(outKey, new Put(Bytes.toBytes("rowKey2")).addColumn(COLUMN_FAMILY, + COLUMN_QUALIFIER, Bytes.toBytes(name))); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java index b14bc9aac24c..cac9179f4cc9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,10 +37,9 @@ import org.slf4j.LoggerFactory; /** - * Spark creates many instances of TableOutputFormat within a single process. We need to make - * sure we can have many instances and not leak connections. - * - * This test creates a few TableOutputFormats and shouldn't fail due to ZK connection exhaustion. + * Spark creates many instances of TableOutputFormat within a single process. We need to make sure + * we can have many instances and not leak connections. This test creates a few TableOutputFormats + * and shouldn't fail due to ZK connection exhaustion. */ @Category(MediumTests.class) public class TestTableOutputFormatConnectionExhaust { @@ -77,16 +76,16 @@ public void before() throws IOException { } /** - * Open and close a TableOutputFormat. The closing the RecordWriter should release HBase + * Open and close a TableOutputFormat. The closing the RecordWriter should release HBase * Connection (ZK) resources, and will throw exception if they are exhausted. */ - static void openCloseTableOutputFormat(int iter) throws IOException { + static void openCloseTableOutputFormat(int iter) throws IOException { LOG.info("Instantiating TableOutputFormat connection " + iter); JobConf conf = new JobConf(); conf.addResource(UTIL.getConfiguration()); conf.set(TableOutputFormat.OUTPUT_TABLE, TABLE); - TableMapReduceUtil.initTableMapJob(TABLE, FAMILY, TableMap.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, conf); + TableMapReduceUtil.initTableMapJob(TABLE, FAMILY, TableMap.class, ImmutableBytesWritable.class, + ImmutableBytesWritable.class, conf); TableOutputFormat tof = new TableOutputFormat(); RecordWriter rw = tof.getRecordWriter(null, conf, TABLE, null); rw.close(null); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java index f0556ca8ee7d..166467ff4c59 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase { @ClassRule @@ -63,7 +63,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa private static final byte[] aaa = Bytes.toBytes("aaa"); private static final byte[] after_zzz = Bytes.toBytes("zz{"); // 'z' + 1 => '{' private static final String COLUMNS = - Bytes.toString(FAMILIES[0]) + " " + Bytes.toString(FAMILIES[1]); + Bytes.toString(FAMILIES[0]) + " " + Bytes.toString(FAMILIES[1]); @Rule public TestName name = new TestName(); @@ -92,7 +92,7 @@ public void map(ImmutableBytesWritable key, Result value, public static class TestTableSnapshotReducer extends MapReduceBase implements Reducer { HBaseTestingUtil.SeenRowTracker rowTracker = - new HBaseTestingUtil.SeenRowTracker(aaa, after_zzz); + new HBaseTestingUtil.SeenRowTracker(aaa, after_zzz); @Override public void reduce(ImmutableBytesWritable key, Iterator values, @@ -117,19 +117,17 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { JobConf job = new JobConf(UTIL.getConfiguration()); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. - Assert.assertEquals( - "Snapshot job should be configured for default LruBlockCache.", + Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.", HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, job.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); - Assert.assertEquals( - "Snapshot job should not use BucketCache.", - 0, job.getFloat("hbase.bucketcache.size", -1), 0.01); + Assert.assertEquals("Snapshot job should not use BucketCache.", 0, + job.getFloat("hbase.bucketcache.size", -1), 0.01); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); @@ -142,10 +140,9 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { @Test @Override public void testWithMockedMapReduceMultiRegion() throws Exception { - testWithMockedMapReduce( - UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 10, true); - // It does not matter whether true or false is given to setLocalityEnabledTo, - // because it is not read in testWithMockedMapReduce(). + testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 10, true); + // It does not matter whether true or false is given to setLocalityEnabledTo, + // because it is not read in testWithMockedMapReduce(). } @Test @@ -165,19 +162,17 @@ public void testWithMapReduceAndOfflineHBaseMultiRegion() throws Exception { public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, String snapshotName, Path tmpTableDir) throws Exception { JobConf job = new JobConf(UTIL.getConfiguration()); - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, TestTableSnapshotMapper.class, + ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir); } @Override - protected void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, - int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) + protected void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, int numRegions, + int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); try { - createTableAndSnapshot( - util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); + createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); JobConf job = new JobConf(util.getConfiguration()); // setLocalityEnabledTo is ignored no matter what is specified, so as to test the case that @@ -186,14 +181,13 @@ protected void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotNam Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName); if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir); } // mapred doesn't support start and end keys? o.O @@ -213,7 +207,7 @@ private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expected Assert.assertEquals(expectedNumSplits, splits.length); HBaseTestingUtil.SeenRowTracker rowTracker = - new HBaseTestingUtil.SeenRowTracker(startRow, stopRow); + new HBaseTestingUtil.SeenRowTracker(startRow, stopRow); // SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY is not explicitly specified, // so the default value is taken. @@ -226,7 +220,7 @@ private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expected if (localityEnabled) { // When localityEnabled is true, meant to verify split.getLocations() // by the following statement: - // Assert.assertTrue(split.getLocations() != null && split.getLocations().length != 0); + // Assert.assertTrue(split.getLocations() != null && split.getLocations().length != 0); // However, getLocations() of some splits could return an empty array (length is 0), // so drop the verification on length. // TODO: investigate how to verify split.getLocations() when localityEnabled is true @@ -266,9 +260,9 @@ protected void testWithMapReduceImpl(HBaseTestingUtil util, TableName tableName, // this is also called by the IntegrationTestTableSnapshotInputFormat public static void doTestWithMapReduce(HBaseTestingUtil util, TableName tableName, String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions, - int numSplitsPerRegion,int expectedNumSplits, boolean shutdownCluster) throws Exception { + int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { - //create the table and snapshot + // create the table and snapshot createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions); if (shutdownCluster) { @@ -283,15 +277,14 @@ public static void doTestWithMapReduce(HBaseTestingUtil util, TableName tableNam org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(jobConf, TestTableSnapshotInputFormat.class); - if(numSplitsPerRegion > 1) { + if (numSplitsPerRegion > 1) { TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, - TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, jobConf, true, tableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, jobConf, + true, tableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, - TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, jobConf, true, tableDir); + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, jobConf, + true, tableDir); } jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java index 28d44edb76b4..911c73f3f675 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; @@ -80,8 +79,7 @@ public static void setUpBeforeClass() throws Exception { // create and fill table for (String tableName : TABLES) { try (Table table = - TEST_UTIL.createMultiRegionTable(TableName.valueOf(tableName), - INPUT_FAMILY, 4)) { + TEST_UTIL.createMultiRegionTable(TableName.valueOf(tableName), INPUT_FAMILY, 4)) { TEST_UTIL.loadTable(table, INPUT_FAMILY, false); } } @@ -101,11 +99,10 @@ public void tearDown() throws Exception { /** * Pass the key and value to reducer. */ - public static class ScanMapper extends - TableMapper { + public static class ScanMapper + extends TableMapper { /** * Pass the key and value to reduce. - * * @param key The key, here "aaa", "aab" etc. * @param value The value is the same as the key. * @param context The task context. @@ -122,15 +119,13 @@ public void makeAssertions(ImmutableBytesWritable key, Result value) throws IOEx if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> cf = - value.getMap(); + Map>> cf = value.getMap(); if (!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } String val = Bytes.toStringBinary(value.getValue(INPUT_FAMILY, null)); - LOG.debug("map: key -> " + Bytes.toStringBinary(key.get()) + - ", value -> " + val); + LOG.debug("map: key -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); } } @@ -138,16 +133,13 @@ public void makeAssertions(ImmutableBytesWritable key, Result value) throws IOEx * Checks the last and first keys seen against the scanner boundaries. */ public static class ScanReducer - extends - Reducer { + extends Reducer { private String first = null; private String last = null; @Override - protected void reduce(ImmutableBytesWritable key, - Iterable values, Context context) - throws IOException, InterruptedException { + protected void reduce(ImmutableBytesWritable key, Iterable values, + Context context) throws IOException, InterruptedException { makeAssertions(key, values); } @@ -156,8 +148,8 @@ protected void makeAssertions(ImmutableBytesWritable key, int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); - LOG.debug("reduce: key[" + count + "] -> " + - Bytes.toStringBinary(key.get()) + ", value -> " + val); + LOG.debug( + "reduce: key[" + count + "] -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); if (first == null) first = val; last = val; count++; @@ -166,8 +158,7 @@ protected void makeAssertions(ImmutableBytesWritable key, } @Override - protected void cleanup(Context context) throws IOException, - InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { Configuration c = context.getConfiguration(); cleanup(c); } @@ -175,10 +166,8 @@ protected void cleanup(Context context) throws IOException, protected void cleanup(Configuration c) { String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); - LOG.info("cleanup: first -> \"" + first + "\", start row -> \"" + - startRow + "\""); - LOG.info("cleanup: last -> \"" + last + "\", last row -> \"" + lastRow + - "\""); + LOG.info("cleanup: first -> \"" + first + "\", start row -> \"" + startRow + "\""); + LOG.info("cleanup: last -> \"" + last + "\", last row -> \"" + lastRow + "\""); if (startRow != null && startRow.length() > 0) { assertEquals(startRow, first); } @@ -189,41 +178,38 @@ protected void cleanup(Configuration c) { } @Test - public void testScanEmptyToEmpty() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanEmptyToEmpty() + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, null, null); } @Test - public void testScanEmptyToAPP() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanEmptyToAPP() + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "app", "apo"); } @Test - public void testScanOBBToOPP() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanOBBToOPP() throws IOException, InterruptedException, ClassNotFoundException { testScan("obb", "opp", "opo"); } @Test - public void testScanYZYToEmpty() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanYZYToEmpty() + throws IOException, InterruptedException, ClassNotFoundException { testScan("yzy", null, "zzz"); } /** * Tests a MR scan using specific start and stop rows. - * * @throws IOException * @throws ClassNotFoundException * @throws InterruptedException */ private void testScan(String start, String stop, String last) throws IOException, InterruptedException, ClassNotFoundException { - String jobName = - "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + - (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); @@ -269,5 +255,4 @@ protected void runJob(String jobName, Configuration c, List scans) protected abstract void initJob(List scans, Job job) throws IOException; - } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java index e022bfdbd494..091bc2197830 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.io.DataOutput; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; @@ -33,17 +31,16 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; /** - * Input format that creates a configurable number of map tasks - * each provided with a single row of NullWritables. This can be - * useful when trying to write mappers which don't have any real - * input (eg when the mapper is simply producing random data as output) + * Input format that creates a configurable number of map tasks each provided with a single row of + * NullWritables. This can be useful when trying to write mappers which don't have any real input + * (eg when the mapper is simply producing random data as output) */ public class NMapInputFormat extends InputFormat { private static final String NMAPS_KEY = "nmapinputformat.num.maps"; @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext tac) { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext tac) { return new SingleRecordReader<>(NullWritable.get(), NullWritable.get()); } @@ -85,8 +82,7 @@ public void write(DataOutput out) { } } - private static class SingleRecordReader - extends RecordReader { + private static class SingleRecordReader extends RecordReader { private final K key; private final V value; @@ -107,7 +103,7 @@ public K getCurrentKey() { } @Override - public V getCurrentValue(){ + public V getCurrentValue() { return value; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java index 63e9cdb48688..bb6ed42dca5b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ public abstract class TableSnapshotInputFormatTestBase { private static final Logger LOG = LoggerFactory.getLogger(TableSnapshotInputFormatTestBase.class); protected final HBaseTestingUtil UTIL = new HBaseTestingUtil(); protected static final int NUM_REGION_SERVERS = 2; - protected static final byte[][] FAMILIES = {Bytes.toBytes("f1"), Bytes.toBytes("f2")}; + protected static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2") }; protected FileSystem fs; protected Path rootDir; @@ -61,9 +61,9 @@ public abstract class TableSnapshotInputFormatTestBase { @Before public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numRegionServers(NUM_REGION_SERVERS).numDataNodes(NUM_REGION_SERVERS) - .createRootDir(true).build(); + StartTestingClusterOption option = + StartTestingClusterOption.builder().numRegionServers(NUM_REGION_SERVERS) + .numDataNodes(NUM_REGION_SERVERS).createRootDir(true).build(); UTIL.startMiniCluster(option); rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); fs = rootDir.getFileSystem(UTIL.getConfiguration()); @@ -80,12 +80,12 @@ private static void setupConf(Configuration conf) { } protected abstract void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, - int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) - throws Exception; + int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) + throws Exception; protected abstract void testWithMapReduceImpl(HBaseTestingUtil util, TableName tableName, - String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, - int expectedNumSplits, boolean shutdownCluster) throws Exception; + String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, + int expectedNumSplits, boolean shutdownCluster) throws Exception; protected abstract byte[] getStartRow(); @@ -128,7 +128,7 @@ public void testRestoreSnapshotDoesNotCreateBackRefLinks() throws Exception { Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName,tmpTableDir); + testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName, tmpTableDir); Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); for (Path regionDir : FSUtils.getRegionDirs(fs, @@ -169,32 +169,31 @@ protected void testWithMapReduce(HBaseTestingUtil util, String snapshotName, int } protected static void verifyRowFromMap(ImmutableBytesWritable key, Result result) - throws IOException { + throws IOException { byte[] row = key.get(); CellScanner scanner = result.cellScanner(); while (scanner.advance()) { Cell cell = scanner.current(); - //assert that all Cells in the Result have the same key - Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + // assert that all Cells in the Result have the same key + Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength())); } for (byte[] family : FAMILIES) { byte[] actual = result.getValue(family, family); - Assert.assertArrayEquals( - "Row in snapshot does not match, expected:" + Bytes.toString(row) + " ,actual:" + Bytes - .toString(actual), row, actual); + Assert.assertArrayEquals("Row in snapshot does not match, expected:" + Bytes.toString(row) + + " ,actual:" + Bytes.toString(actual), + row, actual); } } protected static void createTableAndSnapshot(HBaseTestingUtil util, TableName tableName, - String snapshotName, byte[] startRow, byte[] endRow, int numRegions) - throws Exception { + String snapshotName, byte[] startRow, byte[] endRow, int numRegions) throws Exception { try { LOG.debug("Ensuring table doesn't exist."); util.deleteTable(tableName); - } catch(Exception ex) { + } catch (Exception ex) { // ignore } @@ -214,8 +213,8 @@ protected static void createTableAndSnapshot(HBaseTestingUtil util, TableName ta FileSystem fs = rootDir.getFileSystem(util.getConfiguration()); LOG.info("snapshot"); - SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, - Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true); + SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES), null, + snapshotName, rootDir, fs, true); LOG.info("load different values"); byte[] value = Bytes.toBytes("after_snapshot_value"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java index 5bc548b1e871..1177007f03eb 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestCellCounter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -66,8 +66,8 @@ public class TestCellCounter { private static final byte[] QUALIFIER = Bytes.toBytes("q"); private static Path FQ_OUTPUT_DIR; - private static final String OUTPUT_DIR = "target" + File.separator + "test-data" + File.separator - + "output"; + private static final String OUTPUT_DIR = + "target" + File.separator + "test-data" + File.separator + "output"; private static long now = EnvironmentEdgeManager.currentTime(); @Rule @@ -87,7 +87,6 @@ public static void afterClass() throws Exception { /** * Test CellCounter all data should print to output - * */ @Test public void testCellCounter() throws Exception { @@ -107,7 +106,7 @@ public void testCellCounter() throws Exception { String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "^row1" }; runCount(args); FileInputStream inputStream = - new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); + new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2")); @@ -143,7 +142,7 @@ public void testCellCounterPrefix() throws Exception { String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "\\x01row1" }; runCount(args); FileInputStream inputStream = - new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); + new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2")); @@ -177,10 +176,10 @@ public void testCellCounterStartTimeRange() throws Exception { p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23")); t.put(p); String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "^row1", - "--starttime=" + now, "--endtime=" + now + 2 }; + "--starttime=" + now, "--endtime=" + now + 2 }; runCount(args); FileInputStream inputStream = - new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); + new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2")); @@ -214,10 +213,10 @@ public void testCellCounteEndTimeRange() throws Exception { p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23")); t.put(p); String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "^row1", - "--endtime=" + now + 1 }; + "--endtime=" + now + 1 }; runCount(args); FileInputStream inputStream = - new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); + new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2")); @@ -250,13 +249,12 @@ public void testCellCounteOutOfTimeRange() throws Exception { p.addColumn(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22")); p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23")); t.put(p); - String[] args = - { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "--starttime=" + now + 1, - "--endtime=" + now + 2 }; + String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", + "--starttime=" + now + 1, "--endtime=" + now + 2 }; runCount(args); FileInputStream inputStream = - new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); + new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); // nothing should hace been emitted to the reducer @@ -269,8 +267,8 @@ public void testCellCounteOutOfTimeRange() throws Exception { private boolean runCount(String[] args) throws Exception { // need to make a copy of the configuration because to make sure // different temp dirs are used. - int status = ToolRunner.run(new Configuration(UTIL.getConfiguration()), new CellCounter(), - args); + int status = + ToolRunner.run(new Configuration(UTIL.getConfiguration()), new CellCounter(), args); return status == 0; } @@ -281,7 +279,7 @@ private boolean runCount(String[] args) throws Exception { public void testCellCounterMain() throws Exception { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -313,9 +311,8 @@ public void testCellCounterForCompleteTable() throws Exception { final TableName sourceTable = TableName.valueOf(name.getMethodName()); String outputPath = OUTPUT_DIR + sourceTable; LocalFileSystem localFileSystem = new LocalFileSystem(); - Path outputDir = - new Path(outputPath).makeQualified(localFileSystem.getUri(), - localFileSystem.getWorkingDirectory()); + Path outputDir = new Path(outputPath).makeQualified(localFileSystem.getUri(), + localFileSystem.getWorkingDirectory()); byte[][] families = { FAMILY_A, FAMILY_B }; Table t = UTIL.createTable(sourceTable, families); try { @@ -347,7 +344,7 @@ public void testCellCounterForCompleteTable() throws Exception { FileUtil.fullyDelete(new File(outputPath)); args = new String[] { "-D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=a, b", - sourceTable.getNameAsString(), outputDir.toString(), ";"}; + sourceTable.getNameAsString(), outputDir.toString(), ";" }; runCount(args); inputStream = new FileInputStream(outputPath + File.separator + "part-r-00000"); String data2 = IOUtils.toString(inputStream); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java index f25a9862d63a..5f60dcf1603d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LauncherSecurityManager; import org.apache.hadoop.util.ToolRunner; - import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -60,7 +59,7 @@ /** * Basic test for the CopyTable M/R tool */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestCopyTable { @ClassRule @@ -96,20 +95,19 @@ private void doCopyTableTest(boolean bulkload) throws Exception { final byte[] COLUMN1 = Bytes.toBytes("c1"); try (Table t1 = TEST_UTIL.createTable(tableName1, FAMILY); - Table t2 = TEST_UTIL.createTable(tableName2, FAMILY)) { + Table t2 = TEST_UTIL.createTable(tableName2, FAMILY)) { // put rows into the first table loadData(t1, FAMILY, COLUMN1); CopyTable copy = new CopyTable(); int code; if (bulkload) { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - "--bulkload", tableName1.getNameAsString() }); + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", + tableName1.getNameAsString() }); } else { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - tableName1.getNameAsString() }); + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { + "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString() }); } assertEquals("copy job failed", 0, code); @@ -131,15 +129,13 @@ private void doCopyTableTestWithMob(boolean bulkload) throws Exception { cfd.setMobEnabled(true); cfd.setMobThreshold(5); - TableDescriptor desc1 = TableDescriptorBuilder.newBuilder(tableName1) - .setColumnFamily(cfd.build()) - .build(); - TableDescriptor desc2 = TableDescriptorBuilder.newBuilder(tableName2) - .setColumnFamily(cfd.build()) - .build(); + TableDescriptor desc1 = + TableDescriptorBuilder.newBuilder(tableName1).setColumnFamily(cfd.build()).build(); + TableDescriptor desc2 = + TableDescriptorBuilder.newBuilder(tableName2).setColumnFamily(cfd.build()).build(); try (Table t1 = TEST_UTIL.createTable(desc1, null); - Table t2 = TEST_UTIL.createTable(desc2, null);) { + Table t2 = TEST_UTIL.createTable(desc2, null);) { // put rows into the first table for (int i = 0; i < 10; i++) { @@ -152,13 +148,12 @@ private void doCopyTableTestWithMob(boolean bulkload) throws Exception { int code; if (bulkload) { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - "--bulkload", tableName1.getNameAsString() }); + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", + tableName1.getNameAsString() }); } else { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - tableName1.getNameAsString() }); + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { + "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString() }); } assertEquals("copy job failed", 0, code); @@ -169,17 +164,14 @@ private void doCopyTableTestWithMob(boolean bulkload) throws Exception { assertEquals(1, r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1)); assertEquals("compare row values between two tables", - t1.getDescriptor().getValue("row" + i), - t2.getDescriptor().getValue("row" + i)); + t1.getDescriptor().getValue("row" + i), t2.getDescriptor().getValue("row" + i)); } assertEquals("compare count of mob rows after table copy", MobTestUtil.countMobRows(t1), - MobTestUtil.countMobRows(t2)); + MobTestUtil.countMobRows(t2)); assertEquals("compare count of mob row values between two tables", - t1.getDescriptor().getValues().size(), - t2.getDescriptor().getValues().size()); - assertTrue("The mob row count is 0 but should be > 0", - MobTestUtil.countMobRows(t2) > 0); + t1.getDescriptor().getValues().size(), t2.getDescriptor().getValues().size()); + assertTrue("The mob row count is 0 but should be > 0", MobTestUtil.countMobRows(t2) > 0); } finally { TEST_UTIL.deleteTable(tableName1); TEST_UTIL.deleteTable(tableName2); @@ -229,7 +221,7 @@ public void testStartStopRow() throws Exception { final byte[] row2 = Bytes.toBytesBinary("\\x01row2"); try (Table t1 = TEST_UTIL.createTable(tableName1, FAMILY); - Table t2 = TEST_UTIL.createTable(tableName2, FAMILY)) { + Table t2 = TEST_UTIL.createTable(tableName2, FAMILY)) { // put rows into the first table Put p = new Put(row0); @@ -243,9 +235,10 @@ public void testStartStopRow() throws Exception { t1.put(p); CopyTable copy = new CopyTable(); - assertEquals(0, ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[]{"--new.name=" + tableName2, "--startrow=\\x01row1", - "--stoprow=\\x01row2", tableName1.getNameAsString()})); + assertEquals(0, + ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2, "--startrow=\\x01row1", "--stoprow=\\x01row2", + tableName1.getNameAsString() })); // verify the data was copied into table 2 // row1 exist, row0, row2 do not exist @@ -293,8 +286,8 @@ public void testRenameFamily() throws Exception { long currentTime = EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--new.name=" + targetTable, "--families=a:b", "--all.cells", - "--starttime=" + (currentTime - 100000), "--endtime=" + (currentTime + 100000), - "--versions=1", sourceTable.getNameAsString() }; + "--starttime=" + (currentTime - 100000), "--endtime=" + (currentTime + 100000), + "--versions=1", sourceTable.getNameAsString() }; assertNull(t2.get(new Get(ROW1)).getRow()); assertTrue(runCopy(args)); @@ -322,7 +315,7 @@ public void testMainMethod() throws Exception { PrintStream writer = new PrintStream(data); System.setErr(writer); SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); try { CopyTable.main(emptyArgs); @@ -339,8 +332,8 @@ public void testMainMethod() throws Exception { } private boolean runCopy(String[] args) throws Exception { - int status = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), new CopyTable(), - args); + int status = + ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), new CopyTable(), args); return status == 0; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java index 46a449a43599..d1126ce59223 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestGroupingTableMapper { @ClassRule @@ -60,10 +60,10 @@ public void testGroupingTableMapper() throws Exception { context.write(any(), any()); List keyValue = new ArrayList<>(); byte[] row = {}; - keyValue.add(new KeyValue(row, Bytes.toBytes("family2"), Bytes.toBytes("clm"), Bytes - .toBytes("value1"))); - keyValue.add(new KeyValue(row, Bytes.toBytes("family1"), Bytes.toBytes("clm"), Bytes - .toBytes("value2"))); + keyValue.add( + new KeyValue(row, Bytes.toBytes("family2"), Bytes.toBytes("clm"), Bytes.toBytes("value1"))); + keyValue.add( + new KeyValue(row, Bytes.toBytes("family1"), Bytes.toBytes("clm"), Bytes.toBytes("value2"))); when(result.listCells()).thenReturn(keyValue); mapper.map(null, result, context); // template data diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java index 09b9b5eea646..e76e46b02698 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -25,14 +31,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestHBaseMRTestingUtility { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -55,17 +54,21 @@ public void testMRYarnConfigsPopulation() throws IOException { hbt.getConfiguration().set(entry.getKey(), entry.getValue()); } - for (Map.Entry entry : dummyProps.entrySet()) { - assertTrue("The Configuration for key " + entry.getKey() +" and value: " + entry.getValue() + - " is not populated correctly", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); + for (Map.Entry entry : dummyProps.entrySet()) { + assertTrue( + "The Configuration for key " + entry.getKey() + " and value: " + entry.getValue() + + " is not populated correctly", + hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); } hbt.startMiniMapReduceCluster(); // Confirm that MiniMapReduceCluster overwrites the mr properties and updates the Configuration - for (Map.Entry entry : dummyProps.entrySet()) { - assertFalse("The MR prop: " + entry.getValue() + " is not overwritten when map reduce mini"+ - "cluster is started", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); + for (Map.Entry entry : dummyProps.entrySet()) { + assertFalse( + "The MR prop: " + entry.getValue() + " is not overwritten when map reduce mini" + + "cluster is started", + hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); } hbt.shutdownMiniMapReduceCluster(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 7adbbc62821a..91bd2ed7f2b3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -134,13 +134,12 @@ import org.slf4j.LoggerFactory; /** - * Simple test for {@link HFileOutputFormat2}. - * Sets up and runs a mapreduce job that writes hfile output. - * Creates a few inner classes to implement splits and an inputformat that - * emits keys and values like those of {@link PerformanceEvaluation}. + * Simple test for {@link HFileOutputFormat2}. Sets up and runs a mapreduce job that writes hfile + * output. Creates a few inner classes to implement splits and an inputformat that emits keys and + * values like those of {@link PerformanceEvaluation}. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) -public class TestHFileOutputFormat2 { +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +public class TestHFileOutputFormat2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -149,10 +148,10 @@ public class TestHFileOutputFormat2 { private final static int ROWSPERSPLIT = 1024; public static final byte[] FAMILY_NAME = TestHRegionFileSystem.FAMILY_NAME; - private static final byte[][] FAMILIES = { - Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B"))}; - private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", - "TestTable3").map(TableName::valueOf).toArray(TableName[]::new); + private static final byte[][] FAMILIES = + { Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; + private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", "TestTable3") + .map(TableName::valueOf).toArray(TableName[]::new); private HBaseTestingUtil util = new HBaseTestingUtil(); @@ -162,45 +161,39 @@ public class TestHFileOutputFormat2 { * Simple mapper that makes KeyValue output. */ static class RandomKVGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; - private static final int KEYLEN_DEFAULT=10; - private static final String KEYLEN_CONF="randomkv.key.length"; + private static final int KEYLEN_DEFAULT = 10; + private static final String KEYLEN_CONF = "randomkv.key.length"; private int valLength; - private static final int VALLEN_DEFAULT=10; - private static final String VALLEN_CONF="randomkv.val.length"; - private static final byte [] QUALIFIER = Bytes.toBytes("data"); + private static final int VALLEN_DEFAULT = 10; + private static final String VALLEN_CONF = "randomkv.val.length"; + private static final byte[] QUALIFIER = Bytes.toBytes("data"); private boolean multiTableMapper = false; private TableName[] tables = null; - @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException ,InterruptedException - { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -232,8 +225,7 @@ protected void map( * Simple mapper that makes Put output. */ static class RandomPutGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; private static final int KEYLEN_DEFAULT = 10; @@ -247,28 +239,25 @@ static class RandomPutGeneratingMapper private TableName[] tables = null; @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException, InterruptedException { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -315,28 +304,27 @@ private void setupRandomGeneratorMapper(Job job, boolean putSortReducer) { } /** - * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if - * passed a keyvalue whose timestamp is {@link HConstants#LATEST_TIMESTAMP}. + * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if passed a keyvalue whose + * timestamp is {@link HConstants#LATEST_TIMESTAMP}. * @see HBASE-2615 */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test - public void test_LATEST_TIMESTAMP_isReplaced() - throws Exception { + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test + public void test_LATEST_TIMESTAMP_isReplaced() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); + Path dir = util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); try { Job job = new Job(conf); FileOutputFormat.setOutputPath(job, dir); context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be - // changed by call to write. Check all in kv is same but ts. + // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be + // changed by call to write. Check all in kv is same but ts. KeyValue kv = new KeyValue(b, b, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); @@ -347,7 +335,7 @@ public void test_LATEST_TIMESTAMP_isReplaced() assertNotSame(original.getTimestamp(), kv.getTimestamp()); assertNotSame(HConstants.LATEST_TIMESTAMP, kv.getTimestamp()); - // Test 2. Now test passing a kv that has explicit ts. It should not be + // Test 2. Now test passing a kv that has explicit ts. It should not be // changed by call to record write. kv = new KeyValue(b, b, b, kv.getTimestamp() - 1, b); original = kv.clone(); @@ -359,26 +347,25 @@ public void test_LATEST_TIMESTAMP_isReplaced() } } - private TaskAttemptContext createTestTaskAttemptContext(final Job job) - throws Exception { + private TaskAttemptContext createTestTaskAttemptContext(final Job job) throws Exception { HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class); - TaskAttemptContext context = hadoop.createTestTaskAttemptContext( - job, "attempt_201402131733_0001_m_000000_0"); + TaskAttemptContext context = + hadoop.createTestTaskAttemptContext(job, "attempt_201402131733_0001_m_000000_0"); return context; } /* - * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE - * metadata used by time-restricted scans. + * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE metadata used by + * time-restricted scans. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void test_TIMERANGE() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_TIMERANGE_present"); - LOG.info("Timerange dir writing to dir: "+ dir); + Path dir = util.getDataTestDir("test_TIMERANGE_present"); + LOG.info("Timerange dir writing to dir: " + dir); try { // build a record writer using HFileOutputFormat2 Job job = new Job(conf); @@ -388,13 +375,13 @@ public void test_TIMERANGE() throws Exception { writer = hof.getRecordWriter(context); // Pass two key values with explicit times stamps - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); // value 1 with timestamp 2000 KeyValue kv = new KeyValue(b, b, b, 2000, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); - assertEquals(original,kv); + assertEquals(original, kv); // value 2 with timestamp 1000 kv = new KeyValue(b, b, b, 1000, b); @@ -416,14 +403,13 @@ public void test_TIMERANGE() throws Exception { // open as HFile Reader and pull out TIMERANGE FileInfo. HFile.Reader rd = HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), true, conf); - Map finfo = rd.getHFileInfo(); + Map finfo = rd.getHFileInfo(); byte[] range = finfo.get(Bytes.toBytes("TIMERANGE")); assertNotNull(range); // unmarshall and check values. - TimeRangeTracker timeRangeTracker =TimeRangeTracker.parseFrom(range); - LOG.info(timeRangeTracker.getMin() + - "...." + timeRangeTracker.getMax()); + TimeRangeTracker timeRangeTracker = TimeRangeTracker.parseFrom(range); + LOG.info(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax()); assertEquals(1000, timeRangeTracker.getMin()); assertEquals(2000, timeRangeTracker.getMax()); rd.close(); @@ -436,7 +422,8 @@ public void test_TIMERANGE() throws Exception { /** * Run small MR job. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testWritingPEData() throws Exception { Configuration conf = util.getConfiguration(); Path testDir = util.getDataTestDirOnTestFS("testWritingPEData"); @@ -455,8 +442,8 @@ public void testWritingPEData() throws Exception { byte[] startKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; byte[] endKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; - Arrays.fill(startKey, (byte)0); - Arrays.fill(endKey, (byte)0xff); + Arrays.fill(startKey, (byte) 0); + Arrays.fill(endKey, (byte) 0xff); job.setPartitionerClass(SimpleTotalOrderPartitioner.class); // Set start and end rows for partitioner. @@ -466,49 +453,46 @@ public void testWritingPEData() throws Exception { job.setOutputFormatClass(HFileOutputFormat2.class); job.setNumReduceTasks(4); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); FileOutputFormat.setOutputPath(job, testDir); assertTrue(job.waitForCompletion(false)); - FileStatus [] files = fs.listStatus(testDir); + FileStatus[] files = fs.listStatus(testDir); assertTrue(files.length > 0); - //check output file num and size. + // check output file num and size. for (byte[] family : FAMILIES) { - long kvCount= 0; + long kvCount = 0; RemoteIterator iterator = - fs.listFiles(testDir.suffix("/" + new String(family)), true); + fs.listFiles(testDir.suffix("/" + new String(family)), true); while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = - HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); + HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); HFileScanner scanner = reader.getScanner(conf, false, false, false); kvCount += reader.getEntries(); scanner.seekTo(); long perKVSize = scanner.getCell().getSerializedSize(); assertTrue("Data size of each file should not be too large.", - perKVSize * reader.getEntries() <= hregionMaxFilesize); + perKVSize * reader.getEntries() <= hregionMaxFilesize); } assertEquals("Should write expected data in output file.", ROWSPERSPLIT, kvCount); } } /** - * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into - * hfile. + * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into hfile. */ @Test - public void test_WritingTagData() - throws Exception { + public void test_WritingTagData() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); final String HFILE_FORMAT_VERSION_CONF_KEY = "hfile.format.version"; conf.setInt(HFILE_FORMAT_VERSION_CONF_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("WritingTagData"); + Path dir = util.getDataTestDir("WritingTagData"); try { conf.set(HFileOutputFormat2.OUTPUT_TABLE_NAME_CONF_KEY, TABLE_NAMES[0].getNameAsString()); // turn locality off to eliminate getRegionLocation fail-and-retry time when writing kvs @@ -518,9 +502,9 @@ public void test_WritingTagData() context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - List< Tag > tags = new ArrayList<>(); + List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(978670))); KeyValue kv = new KeyValue(b, b, b, HConstants.LATEST_TIMESTAMP, b, tags); writer.write(new ImmutableBytesWritable(), kv); @@ -528,7 +512,7 @@ public void test_WritingTagData() writer = null; FileSystem fs = dir.getFileSystem(conf); RemoteIterator iterator = fs.listFiles(dir, true); - while(iterator.hasNext()) { + while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); @@ -547,11 +531,12 @@ public void test_WritingTagData() } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testJobConfiguration() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); - conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, util.getDataTestDir("testJobConfiguration") - .toString()); + conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, + util.getDataTestDir("testJobConfiguration").toString()); Job job = new Job(conf); job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration")); Table table = Mockito.mock(Table.class); @@ -562,14 +547,14 @@ public void testJobConfiguration() throws Exception { assertEquals(job.getNumReduceTasks(), 4); } - private byte [][] generateRandomStartKeys(int numKeys) { + private byte[][] generateRandomStartKeys(int numKeys) { Random random = ThreadLocalRandom.current(); byte[][] ret = new byte[numKeys][]; // first region start key is always empty ret[0] = HConstants.EMPTY_BYTE_ARRAY; for (int i = 1; i < numKeys; i++) { ret[i] = - PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); + PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); } return ret; } @@ -584,34 +569,37 @@ private byte[][] generateRandomSplitKeys(int numKeys) { return ret; } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoad() throws Exception { LOG.info("\nStarting test testMRIncrementalLoad\n"); doIncrementalLoadTest(false, false, false, "testMRIncrementalLoad"); } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithSplit() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithSplit\n"); doIncrementalLoadTest(true, false, false, "testMRIncrementalLoadWithSplit"); } /** - * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true - * This test could only check the correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY - * is set to true. Because MiniHBaseCluster always run with single hostname (and different ports), - * it's not possible to check the region locality by comparing region locations and DN hostnames. - * When MiniHBaseCluster supports explicit hostnames parameter (just like MiniDFSCluster does), - * we could test region locality features more easily. + * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true This test could only check the + * correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY is set to true. Because + * MiniHBaseCluster always run with single hostname (and different ports), it's not possible to + * check the region locality by comparing region locations and DN hostnames. When MiniHBaseCluster + * supports explicit hostnames parameter (just like MiniDFSCluster does), we could test region + * locality features more easily. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithLocality() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithLocality\n"); doIncrementalLoadTest(false, true, false, "testMRIncrementalLoadWithLocality1"); doIncrementalLoadTest(true, true, false, "testMRIncrementalLoadWithLocality2"); } - //@Ignore("Wahtevs") + // @Ignore("Wahtevs") @Test public void testMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithPutSortReducer\n"); @@ -619,17 +607,16 @@ public void testMRIncrementalLoadWithPutSortReducer() throws Exception { } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, - boolean putSortReducer, String tableStr) throws Exception { - doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, - Arrays.asList(tableStr)); + boolean putSortReducer, String tableStr) throws Exception { + doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, + Arrays.asList(tableStr)); } @Test public void testMultiMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMultiMRIncrementalLoadWithPutSortReducer\n"); doIncrementalLoadTest(false, false, true, - Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList - ())); + Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList())); } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, @@ -682,16 +669,14 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe assertEquals("HFOF should not touch actual table", 0, util.countRows(tableSingle)); } int numTableDirs = 0; - FileStatus[] fss = - testDir.getFileSystem(conf).listStatus(testDir); - for (FileStatus tf: fss) { + FileStatus[] fss = testDir.getFileSystem(conf).listStatus(testDir); + for (FileStatus tf : fss) { Path tablePath = testDir; if (writeMultipleTables) { if (allTables.containsKey(tf.getPath().getName())) { ++numTableDirs; tablePath = tf.getPath(); - } - else { + } else { continue; } } @@ -699,7 +684,7 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe // Make sure that a directory was created for every CF int dir = 0; fss = tablePath.getFileSystem(conf).listStatus(tablePath); - for (FileStatus f: fss) { + for (FileStatus f : fss) { for (byte[] family : FAMILIES) { if (Bytes.toString(family).equals(f.getPath().getName())) { ++dir; @@ -726,9 +711,8 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe byte[][] newSplitKeys = generateRandomSplitKeys(14); Table table = util.createTable(chosenTable.getName(), FAMILIES, newSplitKeys); - while (util.getConnection().getRegionLocator(chosenTable.getName()) - .getAllRegionLocations().size() != 15 || - !admin.isTableAvailable(table.getName())) { + while (util.getConnection().getRegionLocator(chosenTable.getName()).getAllRegionLocations() + .size() != 15 || !admin.isTableAvailable(table.getName())) { Thread.sleep(200); LOG.info("Waiting for new region assignment to happen"); } @@ -751,11 +735,11 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe if (putSortReducer) { // no rows should be extracted assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); } else { expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); Scan scan = new Scan(); ResultScanner results = currentTable.getScanner(scan); for (Result res : results) { @@ -788,14 +772,14 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe } admin.enableTable(currentTableName); util.waitTableAvailable(currentTableName); - assertEquals("Data should remain after reopening of regions", - tableDigestBefore, util.checksumRows(currentTable)); + assertEquals("Data should remain after reopening of regions", tableDigestBefore, + util.checksumRows(currentTable)); } } finally { for (HFileOutputFormat2.TableInfo tableInfoSingle : tableInfo) { - tableInfoSingle.getRegionLocator().close(); + tableInfoSingle.getRegionLocator().close(); } - for (Entry singleTable : allTables.entrySet() ) { + for (Entry singleTable : allTables.entrySet()) { singleTable.getValue().close(); util.deleteTable(singleTable.getValue().getName()); } @@ -804,14 +788,14 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe } } - private void runIncrementalPELoad(Configuration conf, List tableInfo, Path outDir, - boolean putSortReducer) throws IOException, - InterruptedException, ClassNotFoundException { + private void runIncrementalPELoad(Configuration conf, + List tableInfo, Path outDir, boolean putSortReducer) + throws IOException, InterruptedException, ClassNotFoundException { Job job = new Job(conf, "testLocalMRIncrementalLoad"); job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad")); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); setupRandomGeneratorMapper(job, putSortReducer); if (tableInfo.size() > 1) { MultiTableHFileOutputFormat.configureIncrementalLoad(job, tableInfo); @@ -820,29 +804,27 @@ private void runIncrementalPELoad(Configuration conf, List retrievedFamilyToCompressionMap = HFileOutputFormat2 - .createFamilyCompressionMap(conf); + Map retrievedFamilyToCompressionMap = + HFileOutputFormat2.createFamilyCompressionMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToCompression.entrySet()) { - assertEquals("Compression configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToCompressionMap.get(Bytes.toBytes(entry.getKey()))); + assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToCompressionMap.get(Bytes.toBytes(entry.getKey()))); } } } @@ -872,16 +852,12 @@ public void testSerializeDeserializeFamilyCompressionMap() throws IOException { private void setupMockColumnFamiliesForCompression(Table table, Map familyToCompression) throws IOException { - TableDescriptorBuilder mockTableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); + TableDescriptorBuilder mockTableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); for (Entry entry : familyToCompression.entrySet()) { - ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(entry.getKey())) - .setMaxVersions(1) - .setCompressionType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0) - .build(); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(entry.getKey())).setMaxVersions(1) + .setCompressionType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0) + .build(); mockTableDescriptor.setColumnFamily(columnFamilyDescriptor); } @@ -889,11 +865,10 @@ private void setupMockColumnFamiliesForCompression(Table table, } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForCompression (int numCfs) { + private Map getMockColumnFamiliesForCompression(int numCfs) { Map familyToCompression = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { @@ -911,73 +886,62 @@ private void setupMockColumnFamiliesForCompression(Table table, return familyToCompression; } - /** - * Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. - * Tests that the family bloom type map is correctly serialized into - * and deserialized from configuration - * + * Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the + * family bloom type map is correctly serialized into and deserialized from configuration * @throws IOException */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException { for (int numCfs = 0; numCfs <= 2; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBloomType = - getMockColumnFamiliesForBloomType(numCfs); + Map familyToBloomType = getMockColumnFamiliesForBloomType(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBloomType(table, - familyToBloomType); + setupMockColumnFamiliesForBloomType(table, familyToBloomType); conf.set(HFileOutputFormat2.BLOOM_TYPE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, - Arrays.asList(table.getDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, + Arrays.asList(table.getDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBloomTypeMap = - HFileOutputFormat2 - .createFamilyBloomTypeMap(conf); + HFileOutputFormat2.createFamilyBloomTypeMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToBloomType.entrySet()) { - assertEquals("BloomType configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBloomTypeMap.get(Bytes.toBytes(entry.getKey()))); + assertEquals("BloomType configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBloomTypeMap.get(Bytes.toBytes(entry.getKey()))); } } } private void setupMockColumnFamiliesForBloomType(Table table, Map familyToDataBlockEncoding) throws IOException { - TableDescriptorBuilder mockTableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); + TableDescriptorBuilder mockTableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(entry.getKey())) - .setMaxVersions(1) - .setBloomFilterType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0).build(); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(entry.getKey())).setMaxVersions(1) + .setBloomFilterType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0) + .build(); mockTableDescriptor.setColumnFamily(columnFamilyDescriptor); } Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBloomType (int numCfs) { + private Map getMockColumnFamiliesForBloomType(int numCfs) { Map familyToBloomType = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBloomType.put("Family1!@#!@#&", BloomType.ROW); } if (numCfs-- > 0) { - familyToBloomType.put("Family2=asdads&!AASD", - BloomType.ROWCOL); + familyToBloomType.put("Family2=asdads&!AASD", BloomType.ROWCOL); } if (numCfs-- > 0) { familyToBloomType.put("Family3", BloomType.NONE); @@ -986,77 +950,63 @@ private void setupMockColumnFamiliesForBloomType(Table table, } /** - * Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. - * Tests that the family block size map is correctly serialized into - * and deserialized from configuration - * + * Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the + * family block size map is correctly serialized into and deserialized from configuration * @throws IOException */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBlockSize = - getMockColumnFamiliesForBlockSize(numCfs); + Map familyToBlockSize = getMockColumnFamiliesForBlockSize(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBlockSize(table, - familyToBlockSize); + setupMockColumnFamiliesForBlockSize(table, familyToBlockSize); conf.set(HFileOutputFormat2.BLOCK_SIZE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.blockSizeDetails, Arrays.asList(table - .getDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.blockSizeDetails, + Arrays.asList(table.getDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBlockSizeMap = - HFileOutputFormat2 - .createFamilyBlockSizeMap(conf); + HFileOutputFormat2.createFamilyBlockSizeMap(conf); // test that we have a value for all column families that matches with the // used mock values - for (Entry entry : familyToBlockSize.entrySet() - ) { - assertEquals("BlockSize configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBlockSizeMap.get(Bytes.toBytes(entry.getKey()))); + for (Entry entry : familyToBlockSize.entrySet()) { + assertEquals("BlockSize configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBlockSizeMap.get(Bytes.toBytes(entry.getKey()))); } } } private void setupMockColumnFamiliesForBlockSize(Table table, Map familyToDataBlockEncoding) throws IOException { - TableDescriptorBuilder mockTableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); + TableDescriptorBuilder mockTableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(entry.getKey())) - .setMaxVersions(1) - .setBlocksize(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0).build(); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(entry.getKey())).setMaxVersions(1) + .setBlocksize(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0).build(); mockTableDescriptor.setColumnFamily(columnFamilyDescriptor); } Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBlockSize (int numCfs) { + private Map getMockColumnFamiliesForBlockSize(int numCfs) { Map familyToBlockSize = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBlockSize.put("Family1!@#!@#&", 1234); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { familyToBlockSize.put("Family3", 0); @@ -1065,77 +1015,69 @@ private void setupMockColumnFamiliesForBlockSize(Table table, } /** - * Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. - * Tests that the family data block encoding map is correctly serialized into - * and deserialized from configuration - * + * Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that + * the family data block encoding map is correctly serialized into and deserialized from + * configuration * @throws IOException */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToDataBlockEncoding = getMockColumnFamiliesForDataBlockEncoding(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForDataBlockEncoding(table, - familyToDataBlockEncoding); + setupMockColumnFamiliesForDataBlockEncoding(table, familyToDataBlockEncoding); TableDescriptor tableDescriptor = table.getDescriptor(); conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.dataBlockEncodingDetails, Arrays - .asList(tableDescriptor))); + HFileOutputFormat2.serializeColumnFamilyAttribute( + HFileOutputFormat2.dataBlockEncodingDetails, Arrays.asList(tableDescriptor))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToDataBlockEncodingMap = - HFileOutputFormat2 - .createFamilyDataBlockEncodingMap(conf); + HFileOutputFormat2.createFamilyDataBlockEncodingMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToDataBlockEncoding.entrySet()) { - assertEquals("DataBlockEncoding configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToDataBlockEncodingMap.get(Bytes.toBytes(entry.getKey()))); + assertEquals( + "DataBlockEncoding configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), + retrievedFamilyToDataBlockEncodingMap.get(Bytes.toBytes(entry.getKey()))); } } } private void setupMockColumnFamiliesForDataBlockEncoding(Table table, Map familyToDataBlockEncoding) throws IOException { - TableDescriptorBuilder mockTableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); + TableDescriptorBuilder mockTableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(entry.getKey())) - .setMaxVersions(1) - .setDataBlockEncoding(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0).build(); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(entry.getKey())).setMaxVersions(1) + .setDataBlockEncoding(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0) + .build(); mockTableDescriptor.setColumnFamily(columnFamilyDescriptor); } Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForDataBlockEncoding (int numCfs) { + private Map getMockColumnFamiliesForDataBlockEncoding(int numCfs) { Map familyToDataBlockEncoding = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.FAST_DIFF); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.FAST_DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.PREFIX); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.PREFIX); } if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE); @@ -1144,12 +1086,8 @@ private void setupMockColumnFamiliesForDataBlockEncoding(Table table, } private void setupMockStartKeys(RegionLocator table) throws IOException { - byte[][] mockKeys = new byte[][] { - HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes("aaa"), - Bytes.toBytes("ggg"), - Bytes.toBytes("zzz") - }; + byte[][] mockKeys = new byte[][] { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("aaa"), + Bytes.toBytes("ggg"), Bytes.toBytes("zzz") }; Mockito.doReturn(mockKeys).when(table).getStartKeys(); } @@ -1159,10 +1097,11 @@ private void setupMockTableName(RegionLocator table) throws IOException { } /** - * Test that {@link HFileOutputFormat2} RecordWriter uses compression and - * bloom filter settings from the column family descriptor + * Test that {@link HFileOutputFormat2} RecordWriter uses compression and bloom filter settings + * from the column family descriptor */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testColumnFamilySettings() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; @@ -1173,7 +1112,7 @@ public void testColumnFamilySettings() throws Exception { Table table = Mockito.mock(Table.class); RegionLocator regionLocator = Mockito.mock(RegionLocator.class); TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); + TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); Mockito.doReturn(tableDescriptorBuilder.build()).when(table).getDescriptor(); for (ColumnFamilyDescriptor hcd : HBaseTestingUtil.generateColumnDescriptors()) { @@ -1202,8 +1141,8 @@ public void testColumnFamilySettings() throws Exception { writer = hof.getRecordWriter(context); // write out random rows - writeRandomKeyValues(writer, context, - tableDescriptorBuilder.build().getColumnFamilyNames(), ROWSPERSPLIT); + writeRandomKeyValues(writer, context, tableDescriptorBuilder.build().getColumnFamilyNames(), + ROWSPERSPLIT); writer.close(context); // Make sure that a directory was created for every CF @@ -1216,8 +1155,8 @@ public void testColumnFamilySettings() throws Exception { assertEquals(tableDescriptorBuilder.build().getColumnFamilies().length, families.length); for (FileStatus f : families) { String familyStr = f.getPath().getName(); - ColumnFamilyDescriptor hcd = tableDescriptorBuilder.build() - .getColumnFamily(Bytes.toBytes(familyStr)); + ColumnFamilyDescriptor hcd = + tableDescriptorBuilder.build().getColumnFamily(Bytes.toBytes(familyStr)); // verify that the compression on this file matches the configured // compression Path dataFilePath = fs.listStatus(f.getPath())[0].getPath(); @@ -1226,8 +1165,8 @@ public void testColumnFamilySettings() throws Exception { byte[] bloomFilter = fileInfo.get(BLOOM_FILTER_TYPE_KEY); if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE"); - assertEquals("Incorrect bloom filter used for column family " + familyStr + - "(reader: " + reader + ")", + assertEquals( + "Incorrect bloom filter used for column family " + familyStr + "(reader: " + reader + ")", hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter))); assertEquals( "Incorrect compression used for column family " + familyStr + "(reader: " + reader + ")", @@ -1239,8 +1178,8 @@ public void testColumnFamilySettings() throws Exception { } /** - * Write random values to the writer assuming a table created using - * {@link #FAMILIES} as column family descriptors + * Write random values to the writer assuming a table created using {@link #FAMILIES} as column + * family descriptors */ private void writeRandomKeyValues(RecordWriter writer, TaskAttemptContext context, Set families, int numRows) @@ -1251,7 +1190,7 @@ private void writeRandomKeyValues(RecordWriter wri int taskId = context.getTaskAttemptID().getTaskID().getId(); assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; - final byte [] qualifier = Bytes.toBytes("data"); + final byte[] qualifier = Bytes.toBytes("data"); for (int i = 0; i < numRows; i++) { Bytes.putInt(keyBytes, 0, i); Bytes.random(valBytes); @@ -1264,12 +1203,12 @@ private void writeRandomKeyValues(RecordWriter wri } /** - * This test is to test the scenario happened in HBASE-6901. - * All files are bulk loaded and excluded from minor compaction. - * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException - * will be thrown. + * This test is to test the scenario happened in HBASE-6901. All files are bulk loaded and + * excluded from minor compaction. Without the fix of HBASE-6901, an + * ArrayIndexOutOfBoundsException will be thrown. */ - @Ignore ("Flakey: See HBASE-9051") @Test + @Ignore("Flakey: See HBASE-9051") + @Test public void testExcludeAllFromMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); @@ -1284,28 +1223,29 @@ public void testExcludeAllFromMinorCompaction() throws Exception { assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), - new Path(admin.getRegions(TABLE_NAMES[0]).get(0).getEncodedName(), - Bytes.toString(FAMILIES[0]))); + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + new Path(admin.getRegions(TABLE_NAMES[0]).get(0).getEncodedName(), + Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // Generate two bulk load files - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false); + runIncrementalPELoad(conf, + Arrays.asList(new HFileOutputFormat2.TableInfo(table.getDescriptor(), + conn.getRegionLocator(TABLE_NAMES[0]))), + testDir, false); // Perform the actual load BulkLoadHFiles.create(conf).bulkLoad(table.getName(), testDir); } // Ensure data shows up int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("BulkLoadHFiles should put expected data in table", - expectedRows, util.countRows(table)); + assertEquals("BulkLoadHFiles should put expected data in table", expectedRows, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1350,7 +1290,8 @@ public Boolean call() throws Exception { } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testExcludeMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); @@ -1358,17 +1299,17 @@ public void testExcludeMinorCompaction() throws Exception { util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()){ + Admin admin = conn.getAdmin()) { Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); final FileSystem fs = util.getDFSCluster().getFileSystem(); Table table = util.createTable(TABLE_NAMES[0], FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), - new Path(admin.getRegions(TABLE_NAMES[0]).get(0).getEncodedName(), - Bytes.toString(FAMILIES[0]))); + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + new Path(admin.getRegions(TABLE_NAMES[0]).get(0).getEncodedName(), + Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // put some data in it and flush to create a storefile @@ -1385,8 +1326,7 @@ public Boolean call() throws Exception { }, 5000); // Generate a bulk load file with more rows - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAMES[0]); runIncrementalPELoad(conf, @@ -1398,8 +1338,8 @@ public Boolean call() throws Exception { // Ensure data shows up int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("BulkLoadHFiles should put expected data in table", - expectedRows + 1, util.countRows(table)); + assertEquals("BulkLoadHFiles should put expected data in table", expectedRows + 1, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1457,16 +1397,17 @@ public void manualTest(String args[]) throws Exception { Table table = util.createTable(tname, FAMILIES, splitKeys); } else if ("incremental".equals(args[0])) { TableName tname = TableName.valueOf(args[1]); - try(Connection c = ConnectionFactory.createConnection(conf); + try (Connection c = ConnectionFactory.createConnection(conf); Admin admin = c.getAdmin(); RegionLocator regionLocator = c.getRegionLocator(tname)) { Path outDir = new Path("incremental-out"); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(admin - .getDescriptor(tname), regionLocator)), outDir, false); + runIncrementalPELoad(conf, + Arrays + .asList(new HFileOutputFormat2.TableInfo(admin.getDescriptor(tname), regionLocator)), + outDir, false); } } else { - throw new RuntimeException( - "usage: TestHFileOutputFormat2 newtable | incremental"); + throw new RuntimeException("usage: TestHFileOutputFormat2 newtable | incremental"); } } @@ -1476,9 +1417,10 @@ public void testBlockStoragePolicy() throws Exception { Configuration conf = util.getConfiguration(); conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD"); - conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + - Bytes.toString(HFileOutputFormat2.combineTableNameSuffix( - TABLE_NAMES[0].getName(), FAMILIES[0])), "ONE_SSD"); + conf.set( + HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString( + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0])), + "ONE_SSD"); Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0])); Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1])); util.startMiniDFSCluster(3); @@ -1497,9 +1439,9 @@ public void testBlockStoragePolicy() throws Exception { // alter table cf schema to change storage policies HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); spA = getStoragePolicyName(fs, cf1Dir); spB = getStoragePolicyName(fs, cf2Dir); LOG.debug("Storage policy of cf 0: [" + spA + "]."); @@ -1560,11 +1502,12 @@ public void TestConfigurePartitioner() throws IOException { // Create a user who is not the current user String fooUserName = "foo1234"; String fooGroupName = "group1"; - UserGroupInformation - ugi = UserGroupInformation.createUserForTesting(fooUserName, new String[]{fooGroupName}); + UserGroupInformation ugi = + UserGroupInformation.createUserForTesting(fooUserName, new String[] { fooGroupName }); // Get user's home directory Path fooHomeDirectory = ugi.doAs(new PrivilegedAction() { - @Override public Path run() { + @Override + public Path run() { try (FileSystem fs = FileSystem.get(conf)) { return fs.makeQualified(fs.getHomeDirectory()); } catch (IOException ioe) { @@ -1581,7 +1524,8 @@ public void TestConfigurePartitioner() throws IOException { splitPoints.add(writable); ugi.doAs(new PrivilegedAction() { - @Override public Void run() { + @Override + public Void run() { try { HFileOutputFormat2.configurePartitioner(job, splitPoints, false); } catch (IOException ioe) { @@ -1634,7 +1578,7 @@ public void TestConfigureCompression() throws Exception { HFile.Reader reader = HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); assertEquals(reader.getTrailer().getCompressionCodec().getName(), - hfileoutputformatCompression); + hfileoutputformatCompression); } } finally { if (writer != null && context != null) { @@ -1657,7 +1601,7 @@ public void testMRIncrementalLoadWithLocalityMultiCluster() throws Exception { hostnames[i] = "datanode_" + i; } StartTestingClusterOption option = StartTestingClusterOption.builder() - .numRegionServers(hostCount).dataNodeHosts(hostnames).build(); + .numRegionServers(hostCount).dataNodeHosts(hostnames).build(); util.startMiniCluster(option); // Start cluster B @@ -1671,7 +1615,7 @@ public void testMRIncrementalLoadWithLocalityMultiCluster() throws Exception { TableName tableName = TableName.valueOf("table"); // Create table in cluster B try (Table table = utilB.createTable(tableName, FAMILIES, splitKeys); - RegionLocator r = utilB.getConnection().getRegionLocator(tableName)) { + RegionLocator r = utilB.getConnection().getRegionLocator(tableName)) { // Generate the bulk load files // Job has zookeeper configuration for cluster A // Assume reading from cluster A by TableInputFormat and creating hfiles to cluster B @@ -1701,7 +1645,7 @@ public void testMRIncrementalLoadWithLocalityMultiCluster() throws Exception { assertTrue(job.waitForCompletion(true)); final List configs = - ConfigurationCaptorConnection.getCapturedConfigarutions(key); + ConfigurationCaptorConnection.getCapturedConfigarutions(key); assertFalse(configs.isEmpty()); for (Configuration config : configs) { @@ -1712,8 +1656,7 @@ public void testMRIncrementalLoadWithLocalityMultiCluster() throws Exception { assertEquals(confB.get(HConstants.ZOOKEEPER_ZNODE_PARENT), config.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); - assertEquals(bSpecificConfigValue, - config.get(bSpecificConfigKey)); + assertEquals(bSpecificConfigValue, config.get(bSpecificConfigKey)); } } finally { utilB.deleteTable(tableName); @@ -1731,7 +1674,7 @@ private static class ConfigurationCaptorConnection implements Connection { private final Connection delegate; public ConfigurationCaptorConnection(Configuration conf, ExecutorService es, User user) - throws IOException { + throws IOException { delegate = FutureUtils.get(createAsyncConnection(conf, user)).toConnection(); final String uuid = conf.get(UUID_KEY); @@ -1819,8 +1762,7 @@ public String getClusterId() { } @Override - public Hbck getHbck() - throws IOException { + public Hbck getHbck() throws IOException { return delegate.getHbck(); } @@ -1841,4 +1783,3 @@ public boolean isAborted() { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java index a2c4fbaf87e8..9f9377f10716 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestHRegionPartitioner { @ClassRule @@ -65,8 +65,8 @@ public void testHRegionPartitioner() throws Exception { byte[][] families = { Bytes.toBytes("familyA"), Bytes.toBytes("familyB") }; - UTIL.createTable(TableName.valueOf(name.getMethodName()), families, 1, - Bytes.toBytes("aa"), Bytes.toBytes("cc"), 3); + UTIL.createTable(TableName.valueOf(name.getMethodName()), families, 1, Bytes.toBytes("aa"), + Bytes.toBytes("cc"), 3); HRegionPartitioner partitioner = new HRegionPartitioner<>(); Configuration configuration = UTIL.getConfiguration(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java index ff1ac7461fa6..dd754b4a2ead 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -85,9 +85,9 @@ public void testHashTable() throws Exception { int numRegions = 10; int numHashFiles = 3; - byte[][] splitRows = new byte[numRegions-1][]; + byte[][] splitRows = new byte[numRegions - 1][]; for (int i = 1; i < numRegions; i++) { - splitRows[i-1] = Bytes.toBytes(numRows * i / numRegions); + splitRows[i - 1] = Bytes.toBytes(numRows * i / numRegions); } long timestamp = 1430764183454L; @@ -107,13 +107,9 @@ public void testHashTable() throws Exception { Path testDir = TEST_UTIL.getDataTestDirOnTestFS(tableName.getNameAsString()); long batchSize = 300; - int code = hashTable.run(new String[] { - "--batchsize=" + batchSize, - "--numhashfiles=" + numHashFiles, - "--scanbatch=2", - tableName.getNameAsString(), - testDir.toString() - }); + int code = + hashTable.run(new String[] { "--batchsize=" + batchSize, "--numhashfiles=" + numHashFiles, + "--scanbatch=2", tableName.getNameAsString(), testDir.toString() }); assertEquals("test job failed", 0, code); FileSystem fs = TEST_UTIL.getTestFileSystem(); @@ -127,29 +123,29 @@ public void testHashTable() throws Exception { LOG.debug("partition: " + Bytes.toInt(bytes.get())); } - ImmutableMap expectedHashes - = ImmutableMap.builder() - .put(-1, new ImmutableBytesWritable(Bytes.fromHex("714cb10a9e3b5569852980edd8c6ca2f"))) - .put(5, new ImmutableBytesWritable(Bytes.fromHex("28d961d9252ce8f8d44a07b38d3e1d96"))) - .put(10, new ImmutableBytesWritable(Bytes.fromHex("f6bbc4a224d8fd929b783a92599eaffa"))) - .put(15, new ImmutableBytesWritable(Bytes.fromHex("522deb5d97f73a414ecc11457be46881"))) - .put(20, new ImmutableBytesWritable(Bytes.fromHex("b026f2611aaa46f7110116d807545352"))) - .put(25, new ImmutableBytesWritable(Bytes.fromHex("39ffc1a3094aa12a2e90ffd9cef2ce93"))) - .put(30, new ImmutableBytesWritable(Bytes.fromHex("f6b4d75727ce9a30ac29e4f08f601666"))) - .put(35, new ImmutableBytesWritable(Bytes.fromHex("422e2d2f1eb79a8f02171a705a42c090"))) - .put(40, new ImmutableBytesWritable(Bytes.fromHex("559ad61c900fffefea0a15abf8a97bc3"))) - .put(45, new ImmutableBytesWritable(Bytes.fromHex("23019084513eca41cee436b2a29611cb"))) - .put(50, new ImmutableBytesWritable(Bytes.fromHex("b40467d222ddb4949b142fe145ee9edc"))) - .put(55, new ImmutableBytesWritable(Bytes.fromHex("372bf89fcd8ca4b7ab3c1add9d07f7e4"))) - .put(60, new ImmutableBytesWritable(Bytes.fromHex("69ae0585e6255de27dce974e332b8f8b"))) - .put(65, new ImmutableBytesWritable(Bytes.fromHex("8029610044297aad0abdbecd485d8e59"))) - .put(70, new ImmutableBytesWritable(Bytes.fromHex("de5f784f7f78987b6e57ecfd81c8646f"))) - .put(75, new ImmutableBytesWritable(Bytes.fromHex("1cd757cc4e1715c8c3b1c24447a1ec56"))) - .put(80, new ImmutableBytesWritable(Bytes.fromHex("f9a53aacfeb6142b08066615e7038095"))) - .put(85, new ImmutableBytesWritable(Bytes.fromHex("89b872b7e639df32d3276b33928c0c91"))) - .put(90, new ImmutableBytesWritable(Bytes.fromHex("45eeac0646d46a474ea0484175faed38"))) - .put(95, new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))) - .build(); + ImmutableMap expectedHashes = + ImmutableMap. builder() + .put(-1, new ImmutableBytesWritable(Bytes.fromHex("714cb10a9e3b5569852980edd8c6ca2f"))) + .put(5, new ImmutableBytesWritable(Bytes.fromHex("28d961d9252ce8f8d44a07b38d3e1d96"))) + .put(10, new ImmutableBytesWritable(Bytes.fromHex("f6bbc4a224d8fd929b783a92599eaffa"))) + .put(15, new ImmutableBytesWritable(Bytes.fromHex("522deb5d97f73a414ecc11457be46881"))) + .put(20, new ImmutableBytesWritable(Bytes.fromHex("b026f2611aaa46f7110116d807545352"))) + .put(25, new ImmutableBytesWritable(Bytes.fromHex("39ffc1a3094aa12a2e90ffd9cef2ce93"))) + .put(30, new ImmutableBytesWritable(Bytes.fromHex("f6b4d75727ce9a30ac29e4f08f601666"))) + .put(35, new ImmutableBytesWritable(Bytes.fromHex("422e2d2f1eb79a8f02171a705a42c090"))) + .put(40, new ImmutableBytesWritable(Bytes.fromHex("559ad61c900fffefea0a15abf8a97bc3"))) + .put(45, new ImmutableBytesWritable(Bytes.fromHex("23019084513eca41cee436b2a29611cb"))) + .put(50, new ImmutableBytesWritable(Bytes.fromHex("b40467d222ddb4949b142fe145ee9edc"))) + .put(55, new ImmutableBytesWritable(Bytes.fromHex("372bf89fcd8ca4b7ab3c1add9d07f7e4"))) + .put(60, new ImmutableBytesWritable(Bytes.fromHex("69ae0585e6255de27dce974e332b8f8b"))) + .put(65, new ImmutableBytesWritable(Bytes.fromHex("8029610044297aad0abdbecd485d8e59"))) + .put(70, new ImmutableBytesWritable(Bytes.fromHex("de5f784f7f78987b6e57ecfd81c8646f"))) + .put(75, new ImmutableBytesWritable(Bytes.fromHex("1cd757cc4e1715c8c3b1c24447a1ec56"))) + .put(80, new ImmutableBytesWritable(Bytes.fromHex("f9a53aacfeb6142b08066615e7038095"))) + .put(85, new ImmutableBytesWritable(Bytes.fromHex("89b872b7e639df32d3276b33928c0c91"))) + .put(90, new ImmutableBytesWritable(Bytes.fromHex("45eeac0646d46a474ea0484175faed38"))) + .put(95, new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))) + .build(); Map actualHashes = new HashMap<>(); Path dataDir = new Path(testDir, HashTable.HASH_DATA_DIR); @@ -166,7 +162,7 @@ public void testHashTable() throws Exception { int intKey = -1; if (key.getLength() > 0) { - intKey = Bytes.toInt(key.get(), key.getOffset(), key.getLength()); + intKey = Bytes.toInt(key.get(), key.getOffset(), key.getLength()); } if (actualHashes.containsKey(intKey)) { Assert.fail("duplicate key in data files: " + intKey); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index a68ef63bc175..b825d4eb9bdb 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -110,7 +110,7 @@ /** * Tests the table import and table export MR job functionality */ -@Category({VerySlowMapReduceTests.class, MediumTests.class}) +@Category({ VerySlowMapReduceTests.class, MediumTests.class }) public class TestImportExport { @ClassRule @@ -134,7 +134,7 @@ public class TestImportExport { private static final long now = EnvironmentEdgeManager.currentTime(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); - public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); + public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); public static final String TEST_ATTR = "source_op"; public static final String TEST_TAG = "test_tag"; @@ -144,7 +144,7 @@ public static void beforeClass() throws Throwable { UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); UTIL.startMiniCluster(); FQ_OUTPUT_DIR = - new Path(OUTPUT_DIR).makeQualified(FileSystem.get(UTIL.getConfiguration())).toString(); + new Path(OUTPUT_DIR).makeQualified(FileSystem.get(UTIL.getConfiguration())).toString(); } @AfterClass @@ -229,22 +229,18 @@ public void testSimpleCase() throws Throwable { } String[] args = new String[] { - // Only export row1 & row2. - "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1", - "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", - name.getMethodName(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + // Only export row1 & row2. + "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1", + "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", name.getMethodName(), FQ_OUTPUT_DIR, + "1000", // max number of key versions per key to export }; assertTrue(runExport(args)); final String IMPORT_TABLE = name.getMethodName() + "import"; try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3)) { - args = new String[] { - "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING, - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; + args = + new String[] { "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, + IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Get g = new Get(ROW1); @@ -263,13 +259,12 @@ public void testSimpleCase() throws Throwable { /** * Test export hbase:meta table - * * @throws Throwable */ @Test public void testMetaExport() throws Throwable { - String[] args = new String[] { TableName.META_TABLE_NAME.getNameAsString(), - FQ_OUTPUT_DIR, "1", "0", "0" }; + String[] args = + new String[] { TableName.META_TABLE_NAME.getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" }; assertTrue(runExport(args)); } @@ -293,34 +288,26 @@ public void testImport94Table() throws Throwable { fs.copyFromLocalFile(importPath, new Path(FQ_OUTPUT_DIR + Path.SEPARATOR + name)); String IMPORT_TABLE = name; try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3)) { - String[] args = new String[] { - "-Dhbase.import.version=0.94" , - IMPORT_TABLE, FQ_OUTPUT_DIR - }; + String[] args = new String[] { "-Dhbase.import.version=0.94", IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - /* exportedTableIn94Format contains 5 rows - ROW COLUMN+CELL - r1 column=f1:c1, timestamp=1383766761171, value=val1 - r2 column=f1:c1, timestamp=1383766771642, value=val2 - r3 column=f1:c1, timestamp=1383766777615, value=val3 - r4 column=f1:c1, timestamp=1383766785146, value=val4 - r5 column=f1:c1, timestamp=1383766791506, value=val5 - */ - assertEquals(5, UTIL.countRows(t)); + /* + * exportedTableIn94Format contains 5 rows ROW COLUMN+CELL r1 column=f1:c1, + * timestamp=1383766761171, value=val1 r2 column=f1:c1, timestamp=1383766771642, value=val2 r3 + * column=f1:c1, timestamp=1383766777615, value=val3 r4 column=f1:c1, timestamp=1383766785146, + * value=val4 r5 column=f1:c1, timestamp=1383766791506, value=val5 + */ + assertEquals(5, UTIL.countRows(t)); } } /** * Test export scanner batching */ - @Test - public void testExportScannerBatching() throws Throwable { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(1) - .build()) - .build(); + @Test + public void testExportScannerBatching() throws Throwable { + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(1).build()).build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName())) { Put p = new Put(ROW1); @@ -331,11 +318,11 @@ public void testExportScannerBatching() throws Throwable { p.addColumn(FAMILYA, QUAL, now + 4, QUAL); t.put(p); - String[] args = new String[] { - "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added scanner batching arg. - name.getMethodName(), - FQ_OUTPUT_DIR - }; + String[] args = new String[] { "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added + // scanner + // batching + // arg. + name.getMethodName(), FQ_OUTPUT_DIR }; assertTrue(runExport(args)); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); @@ -345,12 +332,10 @@ public void testExportScannerBatching() throws Throwable { @Test public void testWithDeletes() throws Throwable { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName())) { @@ -362,35 +347,26 @@ public void testWithDeletes() throws Throwable { p.addColumn(FAMILYA, QUAL, now + 4, QUAL); t.put(p); - Delete d = new Delete(ROW1, now+3); + Delete d = new Delete(ROW1, now + 3); t.delete(d); d = new Delete(ROW1); - d.addColumns(FAMILYA, QUAL, now+2); + d.addColumns(FAMILYA, QUAL, now + 2); t.delete(d); } - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", - name.getMethodName(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", name.getMethodName(), + FQ_OUTPUT_DIR, "1000", // max number of key versions per key to export }; assertTrue(runExport(args)); final String IMPORT_TABLE = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(IMPORT_TABLE)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName())) { - args = new String[] { - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; + args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -400,71 +376,60 @@ public void testWithDeletes() throws Throwable { Result r = scanner.next(); Cell[] res = r.rawCells(); assertTrue(PrivateCellUtil.isDeleteFamily(res[0])); - assertEquals(now+4, res[1].getTimestamp()); - assertEquals(now+3, res[2].getTimestamp()); + assertEquals(now + 4, res[1].getTimestamp()); + assertEquals(now + 3, res[2].getTimestamp()); assertTrue(CellUtil.isDelete(res[3])); - assertEquals(now+2, res[4].getTimestamp()); - assertEquals(now+1, res[5].getTimestamp()); + assertEquals(now + 2, res[4].getTimestamp()); + assertEquals(now + 1, res[5].getTimestamp()); assertEquals(now, res[6].getTimestamp()); } } - @Test public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Throwable { final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) .build(); UTIL.getAdmin().createTable(desc); Table exportT = UTIL.getConnection().getTable(exportTable); - //Add first version of QUAL + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); exportT.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); exportT.delete(d); - //Add second version of QUAL + // Add second version of QUAL p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now + 5, Bytes.toBytes("s")); exportT.put(p); - //Add second Delete family marker - d = new Delete(ROW1, now+7); + // Add second Delete family marker + d = new Delete(ROW1, now + 7); exportT.delete(d); - - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", + exportTable.getNameAsString(), FQ_OUTPUT_DIR, "1000", // max number of key versions per key + // to export }; assertTrue(runExport(args)); final String importTable = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(importTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(importTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); Table importT = UTIL.getConnection().getTable(TableName.valueOf(importTable)); - args = new String[] { - importTable, - FQ_OUTPUT_DIR - }; + args = new String[] { importTable, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -475,11 +440,11 @@ public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Thro Result importedTResult = importedTScanner.next(); ResultScanner exportedTScanner = exportT.getScanner(s); - Result exportedTResult = exportedTScanner.next(); + Result exportedTResult = exportedTScanner.next(); try { Result.compareResults(exportedTResult, importedTResult); } catch (Throwable e) { - fail("Original and imported tables data comparision failed with error:"+e.getMessage()); + fail("Original and imported tables data comparision failed with error:" + e.getMessage()); } finally { exportT.close(); importT.close(); @@ -487,18 +452,15 @@ public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Thro } /** - * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, + * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, * attempt with invalid values. */ @Test public void testWithFilter() throws Throwable { // Create simple table to export - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()).build(); UTIL.getAdmin().createTable(desc); Table exportTable = UTIL.getConnection().getTable(desc.getTableName()); @@ -521,19 +483,14 @@ public void testWithFilter() throws Throwable { // Import to a new table final String IMPORT_TABLE = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(IMPORT_TABLE)).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()).build(); UTIL.getAdmin().createTable(desc); Table importTable = UTIL.getConnection().getTable(desc.getTableName()); args = new String[] { "-D" + Import.FILTER_CLASS_CONF_KEY + "=" + PrefixFilter.class.getName(), "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, - FQ_OUTPUT_DIR, - "1000" }; + FQ_OUTPUT_DIR, "1000" }; assertTrue(runImport(args)); // get the count of the source table for that time range @@ -581,7 +538,7 @@ private int getCount(Table table, Filter filter) throws IOException { public void testImportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -611,29 +568,19 @@ public void testExportScan() throws Exception { String prefix = "row"; String label_0 = "label_0"; String label_1 = "label_1"; - String[] args = { - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + String[] args = { "table", "outputDir", String.valueOf(version), String.valueOf(startTime), + String.valueOf(endTime), prefix }; Scan scan = ExportUtils.getScanFromCommandLine(UTIL.getConfiguration(), args); assertEquals(version, scan.getMaxVersions()); assertEquals(startTime, scan.getTimeRange().getMin()); assertEquals(endTime, scan.getTimeRange().getMax()); assertEquals(true, (scan.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); - String[] argsWithLabels = { - "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + assertEquals(0, + Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + String[] argsWithLabels = + { "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, "table", + "outputDir", String.valueOf(version), String.valueOf(startTime), + String.valueOf(endTime), prefix }; Configuration conf = new Configuration(UTIL.getConfiguration()); // parse the "-D" options String[] otherArgs = new GenericOptionsParser(conf, argsWithLabels).getRemainingArgs(); @@ -642,7 +589,8 @@ public void testExportScan() throws Exception { assertEquals(startTime, scanWithLabels.getTimeRange().getMin()); assertEquals(endTime, scanWithLabels.getTimeRange().getMax()); assertEquals(true, (scanWithLabels.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), + Bytes.toBytesBinary(prefix))); assertEquals(2, scanWithLabels.getAuthorizations().getLabels().size()); assertEquals(label_0, scanWithLabels.getAuthorizations().getLabels().get(0)); assertEquals(label_1, scanWithLabels.getAuthorizations().getLabels().get(1)); @@ -655,7 +603,7 @@ public void testExportScan() throws Exception { public void testExportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -668,11 +616,10 @@ public void testExportMain() throws Throwable { assertEquals(-1, newSecurityManager.getExitCode()); String errMsg = data.toString(); assertTrue(errMsg.contains("Wrong number of arguments:")); - assertTrue(errMsg.contains( - "Usage: Export [-D ]* [ " + - "[ []] [^[regex pattern] or [Prefix] to filter]]")); assertTrue( - errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); + errMsg.contains("Usage: Export [-D ]* [ " + + "[ []] [^[regex pattern] or [Prefix] to filter]]")); + assertTrue(errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); assertTrue(errMsg.contains("-D hbase.mapreduce.include.deleted.rows=true")); assertTrue(errMsg.contains("-D hbase.client.scanner.caching=100")); assertTrue(errMsg.contains("-D hbase.export.scanner.batch=10")); @@ -719,8 +666,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } /** - * Test addFilterAndArguments method of Import This method set couple - * parameters into Configuration + * Test addFilterAndArguments method of Import This method set couple parameters into + * Configuration */ @Test public void testAddFilterAndArguments() throws IOException { @@ -732,7 +679,7 @@ public void testAddFilterAndArguments() throws IOException { Import.addFilterAndArguments(configuration, FilterBase.class, args); assertEquals("org.apache.hadoop.hbase.filter.FilterBase", - configuration.get(Import.FILTER_CLASS_CONF_KEY)); + configuration.get(Import.FILTER_CLASS_CONF_KEY)); assertEquals("param1,param2", configuration.get(Import.FILTER_ARGS_CONF_KEY)); } @@ -755,7 +702,7 @@ public void testDurability() throws Throwable { exportTable.put(put); // Run the export - String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000"}; + String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000" }; assertTrue(runExport(args)); // Create the table for import @@ -770,13 +717,12 @@ public void testDurability() throws Throwable { wal.registerWALActionsListener(walListener); // Run the import with SKIP_WAL - args = - new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), - importTableName, FQ_OUTPUT_DIR }; + args = new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), + importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is not visisted + // Assert that the wal is not visisted assertTrue(!walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); // Run the import with the default durability option @@ -789,16 +735,16 @@ public void testDurability() throws Throwable { wal.registerWALActionsListener(walListener); args = new String[] { importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is visisted + // Assert that the wal is visisted assertTrue(walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); } } /** - * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to - * identify that an entry is written to the Write Ahead Log for the given table. + * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to identify + * that an entry is written to the Write Ahead Log for the given table. */ private static class TableWALActionListener implements WALActionsListener { @@ -812,7 +758,7 @@ public TableWALActionListener(RegionInfo region) { @Override public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) { if (logKey.getTableName().getNameAsString().equalsIgnoreCase( - this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit())) { + this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit())) { isVisited = true; } } @@ -823,45 +769,39 @@ public boolean isWALVisited() { } /** - * Add cell tags to delete mutations, run export and import tool and - * verify that tags are present in import table also. + * Add cell tags to delete mutations, run export and import tool and verify that tags are present + * in import table also. * @throws Throwable throws Throwable. */ @Test public void testTagsAddition() throws Throwable { final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(exportTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor desc = TableDescriptorBuilder.newBuilder(exportTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(desc); Table exportT = UTIL.getConnection().getTable(exportTable); - //Add first version of QUAL + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); exportT.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); // Add test attribute to delete mutation. d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); exportT.delete(d); // Run export tool with KeyValueCodecWithTags as Codec. This will ensure that export tool // will use KeyValueCodecWithTags. - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", - // This will make sure that codec will encode and decode tags in rpc call. - "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + exportTable.getNameAsString(), FQ_OUTPUT_DIR, "1000", // max number of key versions per key + // to export }; assertTrue(runExport(args)); // Assert tag exists in exportTable @@ -869,23 +809,17 @@ public void testTagsAddition() throws Throwable { // Create an import table with MetadataController. final TableName importTable = TableName.valueOf("importWithTestTagsAddition"); - TableDescriptor importTableDesc = TableDescriptorBuilder - .newBuilder(importTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor importTableDesc = TableDescriptorBuilder.newBuilder(importTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(importTableDesc); // Run import tool. args = new String[] { - // This will make sure that codec will encode and decode tags in rpc call. - "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - importTable.getNameAsString(), - FQ_OUTPUT_DIR - }; + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + importTable.getNameAsString(), FQ_OUTPUT_DIR }; assertTrue(runImport(args)); // Make sure that tags exists in imported table. checkWhetherTagExists(importTable, true); @@ -908,7 +842,7 @@ private void checkWhetherTagExists(TableName table, boolean tagExists) throws IO } } boolean deleteFound = false; - for (Cell cell: values) { + for (Cell cell : values) { if (PrivateCellUtil.isDelete(cell.getType().getCode())) { deleteFound = true; List tags = PrivateCellUtil.getTags(cell); @@ -928,7 +862,7 @@ private void checkWhetherTagExists(TableName table, boolean tagExists) throws IO } /* - This co-proc will add a cell tag to delete mutation. + * This co-proc will add a cell tag to delete mutation. */ public static class MetadataController implements RegionCoprocessor, RegionObserver { @Override @@ -938,8 +872,7 @@ public Optional getRegionObserver() { @Override public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) - throws IOException { + MiniBatchOperationInProgress miniBatchOp) throws IOException { if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { return; } @@ -954,7 +887,7 @@ public void preBatchMutate(ObserverContext c, } Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); List updatedCells = new ArrayList<>(); - for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) { + for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); List tags = PrivateCellUtil.getTags(cell); tags.add(sourceOpTag); @@ -972,34 +905,30 @@ public void preBatchMutate(ObserverContext c, } /** - * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string - * This means it will use no Codec. Make sure that we don't return Tags in response. + * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string This means + * it will use no Codec. Make sure that we don't return Tags in response. * @throws Exception Exception */ @Test public void testTagsWithEmptyCodec() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); - TableDescriptor tableDesc = TableDescriptorBuilder - .newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(tableDesc); Configuration conf = new Configuration(UTIL.getConfiguration()); conf.set(RPC_CODEC_CONF_KEY, ""); conf.set(DEFAULT_CODEC_CLASS, ""); try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { - //Add first version of QUAL + Table table = connection.getTable(tableName)) { + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); table.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); // Add test attribute to delete mutation. d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); table.delete(d); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index 3e17bd963674..35d0bfd504c5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithOperationAttributes implements Configurable { @ClassRule @@ -78,8 +78,7 @@ public class TestImportTSVWithOperationAttributes implements Configurable { protected static HBaseTestingUtil util = new HBaseTestingUtil(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -153,13 +152,10 @@ public void testMROnTableWithInvalidOperationAttr() throws Exception { } /** - * Run an ImportTsv job and perform basic validation on the results. Returns - * the ImportTsv Tool instance so that other tests can inspect it - * for further validation as necessary. This method is static to insure - * non-reliance on instance's util/conf facilities. - * - * @param args - * Any arguments to pass BEFORE inputFile path is appended. + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. + * @param args Any arguments to pass BEFORE inputFile path is appended. * @param dataAvailable * @return The Tool instance used to run the test. */ @@ -199,7 +195,6 @@ private Tool doMROnTableTest(HBaseTestingUtil util, String family, String data, /** * Confirm ImportTsv via data in online table. - * * @param dataAvailable */ private static void validateTable(Configuration conf, TableName tableName, String family, @@ -224,9 +219,10 @@ private static void validateTable(Configuration conf, TableName tableName, Strin List kvs = res.listCells(); assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); - assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), - Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. verified = true; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java index f981ffc222a4..de76cb8e508b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithTTLs implements Configurable { @ClassRule @@ -67,8 +67,7 @@ public class TestImportTSVWithTTLs implements Configurable { protected static HBaseTestingUtil util = new HBaseTestingUtil(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -114,8 +113,7 @@ public void testMROnTable() throws Exception { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_TTL", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001b1000000\n"; @@ -131,8 +129,8 @@ protected static Tool doMROnTableTest(HBaseTestingUtil util, String family, Stri // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified(new Path(util - .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = fs + .makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); op.write(Bytes.toBytes(data)); op.close(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java index 910f4f6836c8..8ccad720bfb9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -76,7 +76,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithVisibilityLabels implements Configurable { @ClassRule @@ -89,8 +89,7 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { protected static HBaseTestingUtil util = new HBaseTestingUtil(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -125,10 +124,10 @@ public void setConf(Configuration conf) { public static void provisionCluster() throws Exception { conf = util.getConfiguration(); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); - conf.set("hbase.superuser", "admin,"+User.getCurrent().getName()); + conf.set("hbase.superuser", "admin," + User.getCurrent().getName()); VisibilityTestUtil.enableVisiblityLabels(conf); conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, - ScanLabelGenerator.class); + ScanLabelGenerator.class); util.startMiniCluster(); // Wait for the labels table to become available util.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000); @@ -138,19 +137,19 @@ public static void provisionCluster() throws Exception { private static void createLabels() throws IOException, InterruptedException { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE }; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels); - LOG.info("Added labels "); - } catch (Throwable t) { - LOG.error("Error in adding labels" , t); - throw new IOException(t); - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels); + LOG.info("Added labels "); + } catch (Throwable t) { + LOG.error("Error in adding labels", t); + throw new IOException(t); + } + return null; + } + }; SUPERUSER.runAs(action); } @@ -165,8 +164,7 @@ public void testMROnTable() throws Exception { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n"; @@ -232,10 +230,8 @@ public void testMROnTableWithBulkload() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = new String[] { - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + String[] args = new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n"; util.createTable(tableName, FAMILY); @@ -247,18 +243,15 @@ public void testMROnTableWithBulkload() throws Exception { public void testBulkOutputWithTsvImporterTextMapper() throws Exception { final TableName table = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); String FAMILY = "FAM"; - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - table.getNameAsString() - }; + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), + table.getNameAsString() }; String data = "KEY\u001bVALUE4\u001bVALUE8\u001bsecret&private\n"; doMROnTableTest(util, FAMILY, data, args, 4); util.deleteTable(table); @@ -270,8 +263,7 @@ public void testMRWithOutputFormat() throws Exception { Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; @@ -286,10 +278,9 @@ public void testBulkOutputWithInvalidLabels() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + String[] args = new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; // 2 Data rows, one with valid label and one with invalid label String data = @@ -304,13 +295,12 @@ public void testBulkOutputWithTsvImporterTextMapperWithInvalidLabels() throws Ex final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; // 2 Data rows, one with valid label and one with invalid label String data = @@ -326,27 +316,22 @@ protected static Tool doMROnTableTest(HBaseTestingUtil util, String family, Stri } /** - * Run an ImportTsv job and perform basic validation on the results. Returns - * the ImportTsv Tool instance so that other tests can inspect it - * for further validation as necessary. This method is static to insure - * non-reliance on instance's util/conf facilities. - * - * @param args - * Any arguments to pass BEFORE inputFile path is appended. - * + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. + * @param args Any arguments to pass BEFORE inputFile path is appended. * @param expectedKVCount Expected KV count. pass -1 to skip the kvcount check - * * @return The Tool instance used to run the test. */ protected static Tool doMROnTableTest(HBaseTestingUtil util, String family, String data, - String[] args, int valueMultiplier,int expectedKVCount) throws Exception { + String[] args, int valueMultiplier, int expectedKVCount) throws Exception { TableName table = TableName.valueOf(args[args.length - 1]); Configuration conf = new Configuration(util.getConfiguration()); // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified(new Path(util - .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = fs + .makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); if (data == null) { data = "KEY\u001bVALUE1\u001bVALUE2\n"; @@ -381,10 +366,8 @@ protected static Tool doMROnTableTest(HBaseTestingUtil util, String family, Stri } } LOG.debug("validating the table " + createdHFiles); - if (createdHFiles) - validateHFiles(fs, outputPath, family,expectedKVCount); - else - validateTable(conf, table, family, valueMultiplier); + if (createdHFiles) validateHFiles(fs, outputPath, family, expectedKVCount); + else validateTable(conf, table, family, valueMultiplier); if (conf.getBoolean(DELETE_AFTER_LOAD_CONF, true)) { LOG.debug("Deleting test subdirectory"); @@ -411,20 +394,21 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami String cf = elements[elements.length - 1]; foundFamilies.add(cf); assertTrue(String.format( - "HFile ouput contains a column family (%s) not present in input families (%s)", cf, - configFamilies), configFamilies.contains(cf)); + "HFile ouput contains a column family (%s) not present in input families (%s)", cf, + configFamilies), configFamilies.contains(cf)); for (FileStatus hfile : fs.listStatus(cfStatus.getPath())) { assertTrue(String.format("HFile %s appears to contain no data.", hfile.getPath()), - hfile.getLen() > 0); + hfile.getLen() > 0); if (expectedKVCount > -1) { actualKVCount += getKVCountFromHfile(fs, hfile.getPath()); } } } if (expectedKVCount > -1) { - assertTrue(String.format( - "KV count in output hfile=<%d> doesn't match with expected KV count=<%d>", actualKVCount, - expectedKVCount), actualKVCount == expectedKVCount); + assertTrue( + String.format("KV count in output hfile=<%d> doesn't match with expected KV count=<%d>", + actualKVCount, expectedKVCount), + actualKVCount == expectedKVCount); } } @@ -444,7 +428,7 @@ private static void validateTable(Configuration conf, TableName tableName, Strin Scan scan = new Scan(); // Scan entire family. scan.addFamily(Bytes.toBytes(family)); - scan.setAuthorizations(new Authorizations("secret","private")); + scan.setAuthorizations(new Authorizations("secret", "private")); ResultScanner resScanner = table.getScanner(scan); Result[] next = resScanner.next(5); assertEquals(1, next.length); @@ -455,8 +439,8 @@ private static void validateTable(Configuration conf, TableName tableName, Strin assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), - Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. } verified = true; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java index a3427f2a5ec6..a02ff64afd99 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,7 +71,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestImportTsv implements Configurable { @ClassRule @@ -143,11 +143,10 @@ public void testMROnTableWithTimestamp() throws Exception { } @Test - public void testMROnTableWithCustomMapper() - throws Exception { + public void testMROnTableWithCustomMapper() throws Exception { util.createTable(tn, FAMILY); args.put(ImportTsv.MAPPER_CONF_KEY, - "org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapper"); + "org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapper"); doMROnTableTest(null, 3); util.deleteTable(tn); @@ -189,39 +188,34 @@ public void testBulkOutputWithAnExistingTableNoStrictTrue() throws Exception { @Test public void testJobConfigurationsWithTsvImporterTextMapper() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); String INPUT_FILE = "InputFile1.csv"; // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - tn.getNameAsString(), - INPUT_FILE - }; - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - Job job = createSubmittableJob(getConf(), args); - assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); - assertTrue(job.getReducerClass().equals(TextSortReducer.class)); - assertTrue(job.getMapOutputValueClass().equals(Text.class)); - return 0; - } - }, args)); + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), + tn.getNameAsString(), INPUT_FILE }; + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + Job job = createSubmittableJob(getConf(), args); + assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); + assertTrue(job.getReducerClass().equals(TextSortReducer.class)); + assertTrue(job.getMapOutputValueClass().equals(Text.class)); + return 0; + } + }, args)); // Delete table created by createSubmittableJob. util.deleteTable(tn); } @Test public void testBulkOutputWithTsvImporterTextMapper() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.MAPPER_CONF_KEY, "org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); String data = "KEY\u001bVALUE4\u001bVALUE8\n"; @@ -239,53 +233,49 @@ public void testWithoutAnExistingTableAndCreateTableSetToNo() throws Exception { conf.set(ImportTsv.CREATE_TABLE_CONF_KEY, "no"); exception.expect(TableNotFoundException.class); assertEquals("running test job configuration failed.", 0, - ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { - @Override public int run(String[] args) throws Exception { - createSubmittableJob(getConf(), args); - return 0; - } - }, args)); + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + createSubmittableJob(getConf(), args); + return 0; + } + }, args)); } @Test public void testMRWithoutAnExistingTable() throws Exception { - String[] args = - new String[] { tn.getNameAsString(), "/inputFile" }; + String[] args = new String[] { tn.getNameAsString(), "/inputFile" }; exception.expect(TableNotFoundException.class); - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - createSubmittableJob(getConf(), args); - return 0; - } - }, args)); + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + createSubmittableJob(getConf(), args); + return 0; + } + }, args)); } @Test public void testJobConfigurationsWithDryMode() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); String INPUT_FILE = "InputFile1.csv"; // Prepare the arguments required for the test. - String[] argsArray = new String[] { - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - "-D" + ImportTsv.DRY_RUN_CONF_KEY + "=true", - tn.getNameAsString(), - INPUT_FILE }; - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - Job job = createSubmittableJob(getConf(), args); - assertTrue(job.getOutputFormatClass().equals(NullOutputFormat.class)); - return 0; - } - }, argsArray)); + String[] argsArray = + new String[] { "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), + "-D" + ImportTsv.DRY_RUN_CONF_KEY + "=true", tn.getNameAsString(), INPUT_FILE }; + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + Job job = createSubmittableJob(getConf(), args); + assertTrue(job.getOutputFormatClass().equals(NullOutputFormat.class)); + return 0; + } + }, argsArray)); // Delete table created by createSubmittableJob. util.deleteTable(tn); } @@ -301,8 +291,7 @@ public void testDryModeWithoutBulkOutputAndTableExists() throws Exception { } /** - * If table is not present in non-bulk mode, dry run should fail just like - * normal mode. + * If table is not present in non-bulk mode, dry run should fail just like normal mode. */ @Test public void testDryModeWithoutBulkOutputAndTableDoesNotExists() throws Exception { @@ -311,7 +300,8 @@ public void testDryModeWithoutBulkOutputAndTableDoesNotExists() throws Exception doMROnTableTest(null, 1); } - @Test public void testDryModeWithBulkOutputAndTableExists() throws Exception { + @Test + public void testDryModeWithBulkOutputAndTableExists() throws Exception { util.createTable(tn, FAMILY); // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); @@ -324,12 +314,11 @@ public void testDryModeWithoutBulkOutputAndTableDoesNotExists() throws Exception } /** - * If table is not present in bulk mode and create.table is not set to yes, - * import should fail with TableNotFoundException. + * If table is not present in bulk mode and create.table is not set to yes, import should fail + * with TableNotFoundException. */ @Test - public void testDryModeWithBulkOutputAndTableDoesNotExistsCreateTableSetToNo() throws - Exception { + public void testDryModeWithBulkOutputAndTableDoesNotExistsCreateTableSetToNo() throws Exception { // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); @@ -382,31 +371,30 @@ public void testSkipEmptyColumns() throws Exception { } private Tool doMROnTableTest(String data, int valueMultiplier) throws Exception { - return doMROnTableTest(util, tn, FAMILY, data, args, valueMultiplier,-1); + return doMROnTableTest(util, tn, FAMILY, data, args, valueMultiplier, -1); } - protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, - String family, String data, Map args) throws Exception { - return doMROnTableTest(util, table, family, data, args, 1,-1); + protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, String family, + String data, Map args) throws Exception { + return doMROnTableTest(util, table, family, data, args, 1, -1); } /** - * Run an ImportTsv job and perform basic validation on the results. - * Returns the ImportTsv Tool instance so that other tests can - * inspect it for further validation as necessary. This method is static to - * insure non-reliance on instance's util/conf facilities. + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. * @param args Any arguments to pass BEFORE inputFile path is appended. * @return The Tool instance used to run the test. */ - protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, - String family, String data, Map args, int valueMultiplier,int expectedKVCount) - throws Exception { + protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, String family, + String data, Map args, int valueMultiplier, int expectedKVCount) + throws Exception { Configuration conf = new Configuration(util.getConfiguration()); // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified( - new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = fs + .makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); if (data == null) { data = "KEY\u001bVALUE1\u001bVALUE2\n"; @@ -440,15 +428,14 @@ protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, // Perform basic validation. If the input args did not include // ImportTsv.BULK_OUTPUT_CONF_KEY then validate data in the table. // Otherwise, validate presence of hfiles. - boolean isDryRun = args.containsKey(ImportTsv.DRY_RUN_CONF_KEY) && - "true".equalsIgnoreCase(args.get(ImportTsv.DRY_RUN_CONF_KEY)); + boolean isDryRun = args.containsKey(ImportTsv.DRY_RUN_CONF_KEY) + && "true".equalsIgnoreCase(args.get(ImportTsv.DRY_RUN_CONF_KEY)); if (args.containsKey(ImportTsv.BULK_OUTPUT_CONF_KEY)) { if (isDryRun) { assertFalse(String.format("Dry run mode, %s should not have been created.", - ImportTsv.BULK_OUTPUT_CONF_KEY), - fs.exists(new Path(ImportTsv.BULK_OUTPUT_CONF_KEY))); + ImportTsv.BULK_OUTPUT_CONF_KEY), fs.exists(new Path(ImportTsv.BULK_OUTPUT_CONF_KEY))); } else { - validateHFiles(fs, args.get(ImportTsv.BULK_OUTPUT_CONF_KEY), family,expectedKVCount); + validateHFiles(fs, args.get(ImportTsv.BULK_OUTPUT_CONF_KEY), family, expectedKVCount); } } else { validateTable(conf, table, family, valueMultiplier, isDryRun); @@ -464,8 +451,8 @@ protected static Tool doMROnTableTest(HBaseTestingUtil util, TableName table, /** * Confirm ImportTsv via data in online table. */ - private static void validateTable(Configuration conf, TableName tableName, - String family, int valueMultiplier, boolean isDryRun) throws IOException { + private static void validateTable(Configuration conf, TableName tableName, String family, + int valueMultiplier, boolean isDryRun) throws IOException { LOG.debug("Validating table."); Connection connection = ConnectionFactory.createConnection(conf); @@ -487,7 +474,8 @@ private static void validateTable(Configuration conf, TableName tableName, assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. } if (isDryRun) { @@ -527,14 +515,11 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami String[] elements = cfStatus.getPath().toString().split(Path.SEPARATOR); String cf = elements[elements.length - 1]; foundFamilies.add(cf); - assertTrue( - String.format( - "HFile output contains a column family (%s) not present in input families (%s)", - cf, configFamilies), - configFamilies.contains(cf)); + assertTrue(String.format( + "HFile output contains a column family (%s) not present in input families (%s)", cf, + configFamilies), configFamilies.contains(cf)); for (FileStatus hfile : fs.listStatus(cfStatus.getPath())) { - assertTrue( - String.format("HFile %s appears to contain no data.", hfile.getPath()), + assertTrue(String.format("HFile %s appears to contain no data.", hfile.getPath()), hfile.getLen() > 0); // count the number of KVs from all the hfiles if (expectedKVCount > -1) { @@ -543,11 +528,12 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami } } assertTrue(String.format("HFile output does not contain the input family '%s'.", family), - foundFamilies.contains(family)); + foundFamilies.contains(family)); if (expectedKVCount > -1) { - assertTrue(String.format( - "KV count in ouput hfile=<%d> doesn't match with expected KV count=<%d>", actualKVCount, - expectedKVCount), actualKVCount == expectedKVCount); + assertTrue( + String.format("KV count in ouput hfile=<%d> doesn't match with expected KV count=<%d>", + actualKVCount, expectedKVCount), + actualKVCount == expectedKVCount); } } @@ -571,4 +557,3 @@ private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException return count; } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java index a0d1cf7b6cf9..573cf3eee7d7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ /** * Tests for {@link TsvParser}. */ -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestImportTsvParser { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -58,7 +58,7 @@ private void checkParsing(ParsedLine parsed, Iterable expected) { ArrayList parsedCols = new ArrayList<>(); for (int i = 0; i < parsed.getColumnCount(); i++) { parsedCols.add(Bytes.toString(parsed.getLineBytes(), parsed.getColumnOffset(i), - parsed.getColumnLength(i))); + parsed.getColumnLength(i))); } if (!Iterables.elementsEqual(parsedCols, expected)) { fail("Expected: " + Joiner.on(",").join(expected) + "\n" + "Got:" @@ -293,7 +293,7 @@ public void testTsvParseAttributesKey() throws BadTsvLineException { assertEquals(6, parse.getAttributeKeyOffset()); String[] attr = parse.getIndividualAttributes(); int i = 0; - for (String str : attr) { + for (String str : attr) { assertEquals(("key" + i + "=>" + "value" + i), str); i++; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java index d1f48bb299ed..c0ff107df973 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ public class TestJarFinder { @Test public void testJar() throws Exception { - //picking a class that is for sure in a JAR in the classpath + // picking a class that is for sure in a JAR in the classpath String jar = JarFinder.getJar(LoggerFactory.class); Assert.assertTrue(new File(jar).exists()); } @@ -59,8 +59,7 @@ public void testJar() throws Exception { private static void delete(File file) throws IOException { if (file.getAbsolutePath().length() < 5) { throw new IllegalArgumentException( - MessageFormat.format("Path [{0}] is too short, not deleting", - file.getAbsolutePath())); + MessageFormat.format("Path [{0}] is too short, not deleting", file.getAbsolutePath())); } if (file.exists()) { if (file.isDirectory()) { @@ -73,16 +72,15 @@ private static void delete(File file) throws IOException { } if (!file.delete()) { throw new RuntimeException( - MessageFormat.format("Could not delete path [{0}]", - file.getAbsolutePath())); + MessageFormat.format("Could not delete path [{0}]", file.getAbsolutePath())); } } } @Test public void testExpandedClasspath() throws Exception { - //picking a class that is for sure in a directory in the classpath - //in this case the JAR is created on the fly + // picking a class that is for sure in a directory in the classpath + // in this case the JAR is created on the fly String jar = JarFinder.getJar(TestJarFinder.class); Assert.assertTrue(new File(jar).exists()); } @@ -90,7 +88,7 @@ public void testExpandedClasspath() throws Exception { @Test public void testExistingManifest() throws Exception { File dir = new File(System.getProperty("test.build.dir", "target/test-dir"), - TestJarFinder.class.getName() + "-testExistingManifest"); + TestJarFinder.class.getName() + "-testExistingManifest"); delete(dir); dir.mkdirs(); @@ -109,8 +107,7 @@ public void testExistingManifest() throws Exception { ByteArrayOutputStream baos = new ByteArrayOutputStream(); JarOutputStream zos = new JarOutputStream(baos); JarFinder.jarDir(dir, "", zos); - JarInputStream jis = - new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); + JarInputStream jis = new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); Assert.assertNotNull(jis.getManifest()); jis.close(); } @@ -118,7 +115,7 @@ public void testExistingManifest() throws Exception { @Test public void testNoManifest() throws Exception { File dir = new File(System.getProperty("test.build.dir", "target/test-dir"), - TestJarFinder.class.getName() + "-testNoManifest"); + TestJarFinder.class.getName() + "-testNoManifest"); delete(dir); dir.mkdirs(); File propsFile = new File(dir, "props.properties"); @@ -128,8 +125,7 @@ public void testNoManifest() throws Exception { ByteArrayOutputStream baos = new ByteArrayOutputStream(); JarOutputStream zos = new JarOutputStream(baos); JarFinder.jarDir(dir, "", zos); - JarInputStream jis = - new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); + JarInputStream jis = new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); Assert.assertNotNull(jis.getManifest()); jis.close(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java index eca7ca6f32d6..4cd86aebe2ab 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,11 +31,10 @@ import org.junit.experimental.categories.Category; /** - * Tests various scan start and stop row scenarios. This is set in a scan and - * tested in a MapReduce job to see if that is handed over and done properly - * too. + * Tests various scan start and stop row scenarios. This is set in a scan and tested in a MapReduce + * job to see if that is handed over and done properly too. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestMultiTableInputFormat extends MultiTableInputFormatTestBase { @ClassRule @@ -47,9 +46,9 @@ public static void setupLogging() { Log4jUtils.enableDebug(MultiTableInputFormat.class); } - @Override + @Override protected void initJob(List scans, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); + TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class, ImmutableBytesWritable.class, + ImmutableBytesWritable.class, job); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java index 716d60356183..1f516fe693d0 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,20 +67,20 @@ /** * Tests of MultiTableInputFormatBase. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestMultiTableInputFormatBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMultiTableInputFormatBase.class); - @Rule public final TestName name = new TestName(); + @Rule + public final TestName name = new TestName(); /** - * Test getSplits only puts up one Connection. - * In past it has put up many Connections. Each Connection setup comes with a fresh new cache - * so we have to do fresh hit on hbase:meta. Should only do one Connection when doing getSplits - * even if a MultiTableInputFormat. + * Test getSplits only puts up one Connection. In past it has put up many Connections. Each + * Connection setup comes with a fresh new cache so we have to do fresh hit on hbase:meta. Should + * only do one Connection when doing getSplits even if a MultiTableInputFormat. * @throws IOException */ @Test @@ -89,8 +89,7 @@ public void testMRSplitsConnectionCount() throws IOException { MultiTableInputFormatBase mtif = new MultiTableInputFormatBase() { @Override public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext context) - throws IOException, InterruptedException { + TaskAttemptContext context) throws IOException, InterruptedException { return super.createRecordReader(split, context); } }; @@ -125,7 +124,7 @@ public static class MRSplitsConnection implements Connection { private final Configuration configuration; static final AtomicInteger creations = new AtomicInteger(0); - MRSplitsConnection (Configuration conf, ExecutorService pool, User user) throws IOException { + MRSplitsConnection(Configuration conf, ExecutorService pool, User user) throws IOException { this.configuration = conf; creations.incrementAndGet(); } @@ -158,31 +157,25 @@ public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws I @Override public RegionLocator getRegionLocator(final TableName tableName) throws IOException { // Make up array of start keys. We start off w/ empty byte array. - final byte [][] startKeys = new byte [][] {HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), - Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), - Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), - Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), - Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), - Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), - Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), - Bytes.toBytes("zzz")}; + final byte[][] startKeys = new byte[][] { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("aaaa"), + Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), + Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), + Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), + Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), + Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("zzz") }; // Make an array of end keys. We end with the empty byte array. - final byte [][] endKeys = new byte[][] { - Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), - Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), - Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), - Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), - Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), - Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), - Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), - Bytes.toBytes("zzz"), - HConstants.EMPTY_BYTE_ARRAY}; + final byte[][] endKeys = new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), + Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), + Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("lll"), + Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), + Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"), + Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("zzz"), + HConstants.EMPTY_BYTE_ARRAY }; // Now make a map of start keys to HRegionLocations. Let the server namber derive from // the start key. - final Map map = - new TreeMap(Bytes.BYTES_COMPARATOR); - for (byte [] startKey: startKeys) { + final Map map = + new TreeMap(Bytes.BYTES_COMPARATOR); + for (byte[] startKey : startKeys) { HRegionLocation hrl = new HRegionLocation( RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).build(), ServerName.valueOf(Bytes.toString(startKey), 0, 0)); @@ -192,19 +185,20 @@ public RegionLocator getRegionLocator(final TableName tableName) throws IOExcept final List locations = new ArrayList(map.values()); // Now make a RegionLocator mock backed by the abpve map and list of locations. RegionLocator mockedRegionLocator = Mockito.mock(RegionLocator.class); - Mockito.when(mockedRegionLocator.getRegionLocation(Mockito.any(byte [].class), - Mockito.anyBoolean())). - thenAnswer(new Answer() { + Mockito + .when( + mockedRegionLocator.getRegionLocation(Mockito.any(byte[].class), Mockito.anyBoolean())) + .thenAnswer(new Answer() { @Override public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] key = (byte [])args[0]; + Object[] args = invocationOnMock.getArguments(); + byte[] key = (byte[]) args[0]; return map.get(key); } }); Mockito.when(mockedRegionLocator.getAllRegionLocations()).thenReturn(locations); - Mockito.when(mockedRegionLocator.getStartEndKeys()). - thenReturn(new Pair(startKeys, endKeys)); + Mockito.when(mockedRegionLocator.getStartEndKeys()) + .thenReturn(new Pair(startKeys, endKeys)); Mockito.when(mockedRegionLocator.getName()).thenReturn(tableName); return mockedRegionLocator; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java index c9ba9badfa2a..63064f856a14 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,9 +73,9 @@ public void setUp() throws Exception { @Override protected void initJob(List scans, Job job) throws IOException { - TableMapReduceUtil - .initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), ScanMapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); + TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), + ScanMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, + restoreDir); } protected Map> getSnapshotScanMapping(final List scans) { @@ -84,7 +84,7 @@ protected Map> getSnapshotScanMapping(final List @Override public String apply(Scan input) { return snapshotNameForTable( - Bytes.toStringBinary(input.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME))); + Bytes.toStringBinary(input.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME))); } }).asMap(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java index 455b64b915b7..1a8c3c46eba7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,19 +71,16 @@ public void setUp() throws Exception { // feels weird to introduce a RestoreSnapshotHelperFactory and inject that, which would // probably be the more "pure" // way of doing things. This is the lesser of two evils, perhaps? - doNothing().when(this.subject). - restoreSnapshot(any(), any(), any(), - any(), any()); + doNothing().when(this.subject).restoreSnapshot(any(), any(), any(), any(), any()); this.conf = new Configuration(); this.rootDir = new Path("file:///test-root-dir"); CommonFSUtils.setRootDir(conf, rootDir); - this.snapshotScans = ImmutableMap.>of("snapshot1", - ImmutableList.of(new Scan().withStartRow(Bytes.toBytes("1")) - .withStopRow(Bytes.toBytes("2"))), "snapshot2", - ImmutableList.of(new Scan().withStartRow(Bytes.toBytes("3")) - .withStopRow(Bytes.toBytes("4")), - new Scan().withStartRow(Bytes.toBytes("5")).withStopRow(Bytes.toBytes("6")))); + this.snapshotScans = ImmutableMap.> of("snapshot1", + ImmutableList.of(new Scan().withStartRow(Bytes.toBytes("1")).withStopRow(Bytes.toBytes("2"))), + "snapshot2", + ImmutableList.of(new Scan().withStartRow(Bytes.toBytes("3")).withStopRow(Bytes.toBytes("4")), + new Scan().withStartRow(Bytes.toBytes("5")).withStopRow(Bytes.toBytes("6")))); this.restoreDir = new Path(CommonFSUtils.getRootDir(conf), "restore-dir"); @@ -93,8 +90,8 @@ public void callSetInput() throws IOException { subject.setInput(this.conf, snapshotScans, restoreDir); } - public Map> toScanWithEquals( - Map> snapshotScans) throws IOException { + public Map> + toScanWithEquals(Map> snapshotScans) throws IOException { Map> rtn = Maps.newHashMap(); for (Map.Entry> entry : snapshotScans.entrySet()) { @@ -116,7 +113,6 @@ public static class ScanWithEquals { /** * Creates a new instance of this class while copying all values. - * * @param scan The scan instance to copy from. * @throws java.io.IOException When copying the values fails. */ @@ -131,8 +127,8 @@ public boolean equals(Object obj) { return false; } ScanWithEquals otherScan = (ScanWithEquals) obj; - return Objects.equals(this.startRow, otherScan.startRow) && Objects - .equals(this.stopRow, otherScan.stopRow); + return Objects.equals(this.startRow, otherScan.startRow) + && Objects.equals(this.stopRow, otherScan.stopRow); } @Override @@ -142,9 +138,8 @@ public int hashCode() { @Override public String toString() { - return org.apache.hbase.thirdparty.com.google.common.base.MoreObjects. - toStringHelper(this).add("startRow", startRow) - .add("stopRow", stopRow).toString(); + return org.apache.hbase.thirdparty.com.google.common.base.MoreObjects.toStringHelper(this) + .add("startRow", startRow).add("stopRow", stopRow).toString(); } } @@ -179,7 +174,7 @@ public void testSetInputCreatesRestoreDirectoriesUnderRootRestoreDir() throws Ex for (Path snapshotDir : restoreDirs.values()) { assertEquals("Expected " + snapshotDir + " to be a child of " + restoreDir, restoreDir, - snapshotDir.getParent()); + snapshotDir.getParent()); } } @@ -191,7 +186,7 @@ public void testSetInputRestoresSnapshots() throws Exception { for (Map.Entry entry : snapshotDirs.entrySet()) { verify(this.subject).restoreSnapshot(eq(this.conf), eq(entry.getKey()), eq(this.rootDir), - eq(entry.getValue()), any()); + eq(entry.getValue()), any()); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java index d55fc829bfef..47f4d3eaa048 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,11 +53,11 @@ import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestMultithreadedTableMapper { @ClassRule @@ -65,21 +65,19 @@ public class TestMultithreadedTableMapper { HBaseClassTestRule.forClass(TestMultithreadedTableMapper.class); private static final Logger LOG = LoggerFactory.getLogger(TestMultithreadedTableMapper.class); - private static final HBaseTestingUtil UTIL = - new HBaseTestingUtil(); + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); static final TableName MULTI_REGION_TABLE_NAME = TableName.valueOf("mrtest"); static final byte[] INPUT_FAMILY = Bytes.toBytes("contents"); static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text"); - static final int NUMBER_OF_THREADS = 10; + static final int NUMBER_OF_THREADS = 10; @BeforeClass public static void beforeClass() throws Exception { // Up the handlers; this test needs more than usual. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); UTIL.startMiniCluster(); - Table table = - UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, - OUTPUT_FAMILY }); + Table table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, + new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME); } @@ -92,29 +90,25 @@ public static void afterClass() throws Exception { /** * Pass the given key and processed record reduce */ - public static class ProcessContentsMapper - extends TableMapper { + public static class ProcessContentsMapper extends TableMapper { /** * Pass the key, and reversed value to reduce - * * @param key * @param value * @param context * @throws IOException */ @Override - public void map(ImmutableBytesWritable key, Result value, - Context context) - throws IOException, InterruptedException { + public void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, INPUT_FAMILY)); @@ -148,15 +142,12 @@ private void runTestOnTable(Table table) job.setNumReduceTasks(1); Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); - TableMapReduceUtil.initTableMapperJob( - table.getName(), scan, - MultithreadedTableMapper.class, ImmutableBytesWritable.class, - Put.class, job); + TableMapReduceUtil.initTableMapperJob(table.getName(), scan, MultithreadedTableMapper.class, + ImmutableBytesWritable.class, Put.class, job); MultithreadedTableMapper.setMapperClass(job, ProcessContentsMapper.class); MultithreadedTableMapper.setNumberOfThreads(job, NUMBER_OF_THREADS); - TableMapReduceUtil.initTableReducerJob( - table.getName().getNameAsString(), - IdentityTableReducer.class, job); + TableMapReduceUtil.initTableReducerJob(table.getName().getNameAsString(), + IdentityTableReducer.class, job); FileOutputFormat.setOutputPath(job, new Path("test")); LOG.info("Started " + table.getName()); assertTrue(job.waitForCompletion(true)); @@ -166,8 +157,7 @@ private void runTestOnTable(Table table) } finally { table.close(); if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } @@ -184,8 +174,8 @@ private void verify(TableName tableName) throws IOException { verified = true; break; } catch (NullPointerException e) { - // If here, a cell was empty. Presume its because updates came in - // after the scanner had been opened. Wait a while and retry. + // If here, a cell was empty. Presume its because updates came in + // after the scanner had been opened. Wait a while and retry. LOG.debug("Verification attempt failed: " + e.getMessage()); } try { @@ -199,15 +189,13 @@ private void verify(TableName tableName) throws IOException { } /** - * Looks at every value of the mapreduce output and verifies that indeed - * the values have been reversed. - * + * Looks at every value of the mapreduce output and verifies that indeed the values have been + * reversed. * @param table Table to scan. * @throws IOException * @throws NullPointerException if we failed to find a cell value */ - private void verifyAttempt(final Table table) - throws IOException, NullPointerException { + private void verifyAttempt(final Table table) throws IOException, NullPointerException { Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); scan.addFamily(OUTPUT_FAMILY); @@ -215,37 +203,34 @@ private void verifyAttempt(final Table table) try { Iterator itr = scanner.iterator(); assertTrue(itr.hasNext()); - while(itr.hasNext()) { + while (itr.hasNext()) { Result r = itr.next(); if (LOG.isDebugEnabled()) { - if (r.size() > 2 ) { - throw new IOException("Too many results, expected 2 got " + - r.size()); + if (r.size() > 2) { + throw new IOException("Too many results, expected 2 got " + r.size()); } } byte[] firstValue = null; byte[] secondValue = null; int count = 0; - for(Cell kv : r.listCells()) { + for (Cell kv : r.listCells()) { if (count == 0) { firstValue = CellUtil.cloneValue(kv); - }else if (count == 1) { + } else if (count == 1) { secondValue = CellUtil.cloneValue(kv); - }else if (count == 2) { + } else if (count == 2) { break; } count++; } String first = ""; if (firstValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": first value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": first value is null"); } first = Bytes.toString(firstValue); String second = ""; if (secondValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": second value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": second value is null"); } byte[] secondReversed = new byte[secondValue.length]; for (int i = 0, j = secondValue.length - 1; j >= 0; j--, i++) { @@ -254,9 +239,9 @@ private void verifyAttempt(final Table table) second = Bytes.toString(secondReversed); if (first.compareTo(second) != 0) { if (LOG.isDebugEnabled()) { - LOG.debug("second key is not the reverse of first. row=" + - Bytes.toStringBinary(r.getRow()) + ", first value=" + first + - ", second value=" + second); + LOG.debug( + "second key is not the reverse of first. row=" + Bytes.toStringBinary(r.getRow()) + + ", first value=" + first + ", second value=" + second); } fail(); } @@ -267,4 +252,3 @@ private void verifyAttempt(final Table table) } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java index 6cce69660895..4c4d9bf3dda1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestRegionSizeCalculator { @ClassRule @@ -51,19 +51,16 @@ public class TestRegionSizeCalculator { private Configuration configuration = new Configuration(); private final long megabyte = 1024L * 1024L; - private final ServerName sn = ServerName.valueOf("local-rs", DEFAULT_REGIONSERVER_PORT, - ServerName.NON_STARTCODE); + private final ServerName sn = + ServerName.valueOf("local-rs", DEFAULT_REGIONSERVER_PORT, ServerName.NON_STARTCODE); @Test public void testSimpleTestCase() throws Exception { RegionLocator regionLocator = mockRegionLocator("region1", "region2", "region3"); - Admin admin = mockAdmin( - mockRegion("region1", 123), - mockRegion("region3", 1232), - mockRegion("region2", 54321) - ); + Admin admin = mockAdmin(mockRegion("region1", 123), mockRegion("region3", 1232), + mockRegion("region2", 54321)); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); @@ -76,41 +73,36 @@ public void testSimpleTestCase() throws Exception { assertEquals(3, calculator.getRegionSizeMap().size()); } - /** - * When size of region in megabytes is larger than largest possible integer there could be - * error caused by lost of precision. - * */ + * When size of region in megabytes is larger than largest possible integer there could be error + * caused by lost of precision. + */ @Test public void testLargeRegion() throws Exception { RegionLocator regionLocator = mockRegionLocator("largeRegion"); - Admin admin = mockAdmin( - mockRegion("largeRegion", Integer.MAX_VALUE) - ); + Admin admin = mockAdmin(mockRegion("largeRegion", Integer.MAX_VALUE)); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); assertEquals(((long) Integer.MAX_VALUE) * megabyte, - calculator.getRegionSize(Bytes.toBytes("largeRegion"))); + calculator.getRegionSize(Bytes.toBytes("largeRegion"))); } - /** When calculator is disabled, it should return 0 for each request.*/ + /** When calculator is disabled, it should return 0 for each request. */ @Test public void testDisabled() throws Exception { String regionName = "cz.goout:/index.html"; RegionLocator table = mockRegionLocator(regionName); - Admin admin = mockAdmin( - mockRegion(regionName, 999) - ); + Admin admin = mockAdmin(mockRegion(regionName, 999)); - //first request on enabled calculator + // first request on enabled calculator RegionSizeCalculator calculator = new RegionSizeCalculator(table, admin); assertEquals(999 * megabyte, calculator.getRegionSize(Bytes.toBytes(regionName))); - //then disabled calculator. + // then disabled calculator. configuration.setBoolean(RegionSizeCalculator.ENABLE_REGIONSIZECALCULATOR, false); RegionSizeCalculator disabledCalculator = new RegionSizeCalculator(table, admin); assertEquals(0 * megabyte, disabledCalculator.getRegionSize(Bytes.toBytes(regionName))); @@ -120,7 +112,7 @@ public void testDisabled() throws Exception { /** * Makes some table with given region names. - * */ + */ private RegionLocator mockRegionLocator(String... regionNames) throws IOException { RegionLocator mockedTable = Mockito.mock(RegionLocator.class); when(mockedTable.getName()).thenReturn(TableName.valueOf("sizeTestTable")); @@ -138,7 +130,7 @@ private RegionLocator mockRegionLocator(String... regionNames) throws IOExceptio /** * Creates mock returning RegionLoad info about given servers. - */ + */ private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception { Admin mockAdmin = Mockito.mock(Admin.class); List regionLoads = new ArrayList<>(); @@ -153,9 +145,8 @@ private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception { /** * Creates mock of region with given name and size. - * - * @param fileSizeMb number of megabytes occupied by region in file store in megabytes - * */ + * @param fileSizeMb number of megabytes occupied by region in file store in megabytes + */ private RegionMetrics mockRegion(String regionName, int fileSizeMb) { RegionMetrics region = Mockito.mock(RegionMetrics.class); when(region.getRegionName()).thenReturn(Bytes.toBytes(regionName)); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java index c3abf4d544e0..71acc0c11557 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -43,25 +44,17 @@ /** * Basic test of {@link RoundRobinTableInputFormat}; i.e. RRTIF. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRoundRobinTableInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRoundRobinTableInputFormat.class); private static final int SERVERS_COUNT = 5; - private static final String[] KEYS = { - "aa", "ab", "ac", "ad", "ae", - "ba", "bb", "bc", "bd", "be", - "ca", "cb", "cc", "cd", "ce", - "da", "db", "dc", "dd", "de", - "ea", "eb", "ec", "ed", "ee", - "fa", "fb", "fc", "fd", "fe", - "ga", "gb", "gc", "gd", "ge", - "ha", "hb", "hc", "hd", "he", - "ia", "ib", "ic", "id", "ie", - "ja", "jb", "jc", "jd", "je", "jf" - }; + private static final String[] KEYS = { "aa", "ab", "ac", "ad", "ae", "ba", "bb", "bc", "bd", "be", + "ca", "cb", "cc", "cd", "ce", "da", "db", "dc", "dd", "de", "ea", "eb", "ec", "ed", "ee", + "fa", "fb", "fc", "fd", "fe", "ga", "gb", "gc", "gd", "ge", "ha", "hb", "hc", "hd", "he", + "ia", "ib", "ic", "id", "ie", "ja", "jb", "jc", "jd", "je", "jf" }; /** * Test default behavior. @@ -78,8 +71,8 @@ public void testRoundRobinSplit() throws IOException, InterruptedException { Arrays.sort(copy.toArray(new InputSplit[0]), new SplitComparator()); // Assert the sort is retained even after passing through SplitComparator. for (int i = 0; i < sortedSplits.size(); i++) { - TableSplit sortedTs = (TableSplit)sortedSplits.get(i); - TableSplit copyTs = (TableSplit)copy.get(i); + TableSplit sortedTs = (TableSplit) sortedSplits.get(i); + TableSplit copyTs = (TableSplit) copy.get(i); assertEquals(sortedTs.getEncodedRegionName(), copyTs.getEncodedRegionName()); } } @@ -90,17 +83,17 @@ public void testRoundRobinSplit() throws IOException, InterruptedException { private List createSplits() { List splits = new ArrayList<>(KEYS.length - 1); for (int i = 0; i < KEYS.length - 1; i++) { - InputSplit split = new TableSplit(TableName.valueOf("test"), new Scan(), - Bytes.toBytes(KEYS[i]), Bytes.toBytes(KEYS[i + 1]), String.valueOf(i % SERVERS_COUNT + 1), - "", 0); + InputSplit split = + new TableSplit(TableName.valueOf("test"), new Scan(), Bytes.toBytes(KEYS[i]), + Bytes.toBytes(KEYS[i + 1]), String.valueOf(i % SERVERS_COUNT + 1), "", 0); splits.add(split); } return splits; } private void testDistribution(List list) throws IOException, InterruptedException { - for (int i = 0; i < KEYS.length/SERVERS_COUNT; i++) { - int [] counts = new int[SERVERS_COUNT]; + for (int i = 0; i < KEYS.length / SERVERS_COUNT; i++) { + int[] counts = new int[SERVERS_COUNT]; for (int j = i * SERVERS_COUNT; j < i * SERVERS_COUNT + SERVERS_COUNT; j++) { counts[Integer.parseInt(list.get(j).getLocations()[0]) - 1]++; } @@ -120,21 +113,21 @@ private static class SplitComparator implements Comparator { public int compare(InputSplit o1, InputSplit o2) { try { return Long.compare(o1.getLength(), o2.getLength()); - } catch (IOException|InterruptedException e) { + } catch (IOException | InterruptedException e) { throw new RuntimeException("exception in compare", e); } } } /** - * Assert that lengths are descending. RRTIF writes lengths in descending order so any - * subsequent sort using dump SplitComparator as is done in JobSubmitter up in Hadoop keeps - * our RRTIF ordering. + * Assert that lengths are descending. RRTIF writes lengths in descending order so any subsequent + * sort using dump SplitComparator as is done in JobSubmitter up in Hadoop keeps our RRTIF + * ordering. */ private void assertLengthDescending(List list) - throws IOException, InterruptedException { + throws IOException, InterruptedException { long previousLength = Long.MAX_VALUE; - for (InputSplit is: list) { + for (InputSplit is : list) { long length = is.getLength(); assertTrue(previousLength + " " + length, previousLength > length); previousLength = length; @@ -166,12 +159,12 @@ public void testConfigureUnconfigure() { private void checkRetainsBooleanValue(JobContext jobContext, RoundRobinTableInputFormat rrtif, final boolean b) { - jobContext.getConfiguration(). - setBoolean(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE, b); + jobContext.getConfiguration() + .setBoolean(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE, b); rrtif.configure(); rrtif.unconfigure(); - String value = jobContext.getConfiguration(). - get(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE); + String value = jobContext.getConfiguration() + .get(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE); assertEquals(b, Boolean.valueOf(value)); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java index d33f30a70759..88913f7f0910 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -50,12 +49,12 @@ /** * Test the rowcounter map reduce job. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestRowCounter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRowCounter.class); + HBaseClassTestRule.forClass(TestRowCounter.class); private static final Logger LOG = LoggerFactory.getLogger(TestRowCounter.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -89,71 +88,54 @@ public static void tearDownAfterClass() throws Exception { /** * Test a case when no column was specified in command line arguments. - * * @throws Exception */ @Test public void testRowCounterNoColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME - }; + String[] args = new String[] { TABLE_NAME }; runRowCount(args, 10); } /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows. - * + * Test a case when the column specified in command line arguments is exclusive for few rows. * @throws Exception */ @Test public void testRowCounterExclusiveColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL1 }; runRowCount(args, 8); } /** - * Test a case when the column specified in command line arguments is - * one for which the qualifier contains colons. - * + * Test a case when the column specified in command line arguments is one for which the qualifier + * contains colons. * @throws Exception */ @Test public void testRowCounterColumnWithColonInQualifier() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN }; runRowCount(args, 8); } /** - * Test a case when the column specified in command line arguments is not part - * of first KV for a row. - * + * Test a case when the column specified in command line arguments is not part of first KV for a + * row. * @throws Exception */ @Test public void testRowCounterHiddenColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL2 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL2 }; runRowCount(args, 10); } - /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows and also a row range filter is specified - * + * Test a case when the column specified in command line arguments is exclusive for few rows and + * also a row range filter is specified * @throws Exception */ @Test public void testRowCounterColumnAndRowRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 }; runRowCount(args, 8); } @@ -163,9 +145,7 @@ public void testRowCounterColumnAndRowRange() throws Exception { */ @Test public void testRowCounterRowSingleRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3" }; runRowCount(args, 2); } @@ -175,9 +155,7 @@ public void testRowCounterRowSingleRange() throws Exception { */ @Test public void testRowCounterRowSingleRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3" }; runRowCount(args, 3); } @@ -187,9 +165,7 @@ public void testRowCounterRowSingleRangeUpperBound() throws Exception { */ @Test public void testRowCounterRowMultiRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" }; runRowCount(args, 5); } @@ -199,22 +175,18 @@ public void testRowCounterRowMultiRangeUpperBound() throws Exception { */ @Test public void testRowCounterRowMultiRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" }; runRowCount(args, 5); } /** - * Test a case when a range is specified with multiple ranges of start-end keys; - * one range is filled, another two are not + * Test a case when a range is specified with multiple ranges of start-end keys; one range is + * filled, another two are not * @throws Exception */ @Test public void testRowCounterRowMultiEmptyRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;;" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;;" }; runRowCount(args, 2); } @@ -222,19 +194,16 @@ public void testRowCounterRowMultiEmptyRange() throws Exception { public void testRowCounter10kRowRange() throws Exception { String tableName = TABLE_NAME + "10k"; - try (Table table = TEST_UTIL.createTable( - TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { + try ( + Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { writeRows(table, 10000, 0); } - String[] args = new String[] { - tableName, "--range=\\x00row9872,\\x00row9875" - }; + String[] args = new String[] { tableName, "--range=\\x00row9872,\\x00row9875" }; runRowCount(args, 3); } /** * Test a case when the timerange is specified with --starttime and --endtime options - * * @throws Exception */ @Test @@ -248,7 +217,8 @@ public void testRowCounterTimeRange() throws Exception { long ts; // clean up content of TABLE_NAME - Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM)); + Table table = + TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM)); ts = EnvironmentEdgeManager.currentTime(); put1.addColumn(family, col1, ts, Bytes.toBytes("val1")); @@ -262,38 +232,25 @@ public void testRowCounterTimeRange() throws Exception { table.put(put3); table.close(); - String[] args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + ts - }; + String[] args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + ts }; runRowCount(args, 1); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + (ts - 10) - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + (ts - 10) }; runRowCount(args, 1); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + ts, - "--endtime=" + (ts + 1000) - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + ts, + "--endtime=" + (ts + 1000) }; runRowCount(args, 2); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + (ts - 30 * 1000), - "--endtime=" + (ts + 30 * 1000), - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, + "--starttime=" + (ts - 30 * 1000), "--endtime=" + (ts + 30 * 1000), }; runRowCount(args, 3); } /** * Run the RowCounter map reduce job and verify the row count. - * * @param args the command line arguments to be used for rowcounter job. * @param expectedCount the expected row count (result of map reduce job). * @throws Exception @@ -301,18 +258,17 @@ public void testRowCounterTimeRange() throws Exception { private void runRowCount(String[] args, int expectedCount) throws Exception { RowCounter rowCounter = new RowCounter(); rowCounter.setConf(TEST_UTIL.getConfiguration()); - args = Arrays.copyOf(args, args.length+1); - args[args.length-1]="--expectedCount=" + expectedCount; + args = Arrays.copyOf(args, args.length + 1); + args[args.length - 1] = "--expectedCount=" + expectedCount; long start = EnvironmentEdgeManager.currentTime(); int result = rowCounter.run(args); long duration = EnvironmentEdgeManager.currentTime() - start; LOG.debug("row count duration (ms): " + duration); - assertTrue(result==0); + assertTrue(result == 0); } /** * Run the RowCounter map reduce job and verify the row count. - * * @param args the command line arguments to be used for rowcounter job. * @param expectedCount the expected row count (result of map reduce job). * @throws Exception in case of any unexpected error. @@ -330,66 +286,50 @@ private void runCreateSubmittableJobWithArgs(String[] args, int expectedCount) t @Test public void testCreateSubmittableJobWithArgsNoColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME - }; + String[] args = new String[] { TABLE_NAME }; runCreateSubmittableJobWithArgs(args, 10); } /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows. - * + * Test a case when the column specified in command line arguments is exclusive for few rows. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsExclusiveColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL1 }; runCreateSubmittableJobWithArgs(args, 8); } /** - * Test a case when the column specified in command line arguments is - * one for which the qualifier contains colons. - * + * Test a case when the column specified in command line arguments is one for which the qualifier + * contains colons. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsColumnWithColonInQualifier() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN }; runCreateSubmittableJobWithArgs(args, 8); } /** - * Test a case when the column specified in command line arguments is not part - * of first KV for a row. - * + * Test a case when the column specified in command line arguments is not part of first KV for a + * row. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsHiddenColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL2 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL2 }; runCreateSubmittableJobWithArgs(args, 10); } - /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows and also a row range filter is specified - * + * Test a case when the column specified in command line arguments is exclusive for few rows and + * also a row range filter is specified * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsColumnAndRowRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 }; runCreateSubmittableJobWithArgs(args, 8); } @@ -399,9 +339,7 @@ public void testCreateSubmittableJobWithArgsColumnAndRowRange() throws Exception */ @Test public void testCreateSubmittableJobWithArgsRowSingleRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3" }; runCreateSubmittableJobWithArgs(args, 2); } @@ -411,9 +349,7 @@ public void testCreateSubmittableJobWithArgsRowSingleRange() throws Exception { */ @Test public void testCreateSubmittableJobWithArgsRowSingleRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3" }; runCreateSubmittableJobWithArgs(args, 3); } @@ -423,9 +359,7 @@ public void testCreateSubmittableJobWithArgsRowSingleRangeUpperBound() throws Ex */ @Test public void testCreateSubmittableJobWithArgsRowMultiRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" }; runCreateSubmittableJobWithArgs(args, 5); } @@ -435,22 +369,18 @@ public void testCreateSubmittableJobWithArgsRowMultiRangeUpperBound() throws Exc */ @Test public void testCreateSubmittableJobWithArgsRowMultiRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" }; runCreateSubmittableJobWithArgs(args, 5); } /** - * Test a case when a range is specified with multiple ranges of start-end keys; - * one range is filled, another two are not + * Test a case when a range is specified with multiple ranges of start-end keys; one range is + * filled, another two are not * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsRowMultiEmptyRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;;" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;;" }; runCreateSubmittableJobWithArgs(args, 2); } @@ -458,19 +388,16 @@ public void testCreateSubmittableJobWithArgsRowMultiEmptyRange() throws Exceptio public void testCreateSubmittableJobWithArgs10kRowRange() throws Exception { String tableName = TABLE_NAME + "CreateSubmittableJobWithArgs10kRowRange"; - try (Table table = TEST_UTIL.createTable( - TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { + try ( + Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { writeRows(table, 10000, 0); } - String[] args = new String[] { - tableName, "--range=\\x00row9872,\\x00row9875" - }; + String[] args = new String[] { tableName, "--range=\\x00row9872,\\x00row9875" }; runCreateSubmittableJobWithArgs(args, 3); } /** * Test a case when the timerange is specified with --starttime and --endtime options - * * @throws Exception in case of any unexpected error. */ @Test @@ -483,7 +410,7 @@ public void testCreateSubmittableJobWithArgsTimeRange() throws Exception { long ts; - String tableName = TABLE_NAME_TS_RANGE+"CreateSubmittableJobWithArgs"; + String tableName = TABLE_NAME_TS_RANGE + "CreateSubmittableJobWithArgs"; // clean up content of TABLE_NAME Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM)); @@ -499,39 +426,26 @@ public void testCreateSubmittableJobWithArgsTimeRange() throws Exception { table.put(put3); table.close(); - String[] args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + ts - }; + String[] args = + new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + 0, "--endtime=" + ts }; runCreateSubmittableJobWithArgs(args, 1); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + (ts - 10) - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + (ts - 10) }; runCreateSubmittableJobWithArgs(args, 1); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + ts, - "--endtime=" + (ts + 1000) - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + ts, + "--endtime=" + (ts + 1000) }; runCreateSubmittableJobWithArgs(args, 2); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + (ts - 30 * 1000), - "--endtime=" + (ts + 30 * 1000), - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + (ts - 30 * 1000), + "--endtime=" + (ts + 30 * 1000), }; runCreateSubmittableJobWithArgs(args, 3); } /** - * Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have - * two columns, Few have one. - * + * Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have two columns, Few have + * one. * @param table * @throws IOException */ @@ -570,7 +484,7 @@ private static void writeRows(Table table, int totalRows, int rowsWithOneCol) th @Test public void testImportMain() throws Exception { SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); String[] args = {}; try { @@ -602,13 +516,13 @@ public void testHelp() throws Exception { ByteArrayOutputStream data = new ByteArrayOutputStream(); PrintStream stream = new PrintStream(data); System.setOut(stream); - String[] args = {"-h"}; + String[] args = { "-h" }; runRowCount(args, 0); assertUsageContent(data.toString()); - args = new String[]{"--help"}; + args = new String[] { "--help" }; runRowCount(args, 0); assertUsageContent(data.toString()); - }finally { + } finally { System.setOut(oldPrintStream); } } @@ -616,27 +530,27 @@ public void testHelp() throws Exception { @Test public void testInvalidTable() throws Exception { try { - String[] args = {"invalid"}; + String[] args = { "invalid" }; runRowCount(args, 0); fail("RowCounter should had failed with invalid table."); - }catch (Throwable e){ + } catch (Throwable e) { assertTrue(e instanceof AssertionError); } } private void assertUsageContent(String usage) { - assertTrue(usage.contains("usage: hbase rowcounter " - + " [options] [ ...]")); + assertTrue(usage + .contains("usage: hbase rowcounter " + " [options] [ ...]")); assertTrue(usage.contains("Options:\n")); - assertTrue(usage.contains("--starttime= " - + "starting time filter to start counting rows from.\n")); + assertTrue(usage.contains( + "--starttime= " + "starting time filter to start counting rows from.\n")); assertTrue(usage.contains("--endtime= " - + "end time filter limit, to only count rows up to this timestamp.\n")); - assertTrue(usage.contains("--range= " - + "[startKey],[endKey][;[startKey],[endKey]...]]\n")); + + "end time filter limit, to only count rows up to this timestamp.\n")); + assertTrue(usage + .contains("--range= " + "[startKey],[endKey][;[startKey],[endKey]...]]\n")); assertTrue(usage.contains("--expectedCount= expected number of rows to be count.\n")); - assertTrue(usage.contains("For performance, " - + "consider the following configuration properties:\n")); + assertTrue( + usage.contains("For performance, " + "consider the following configuration properties:\n")); assertTrue(usage.contains("-Dhbase.client.scanner.caching=100\n")); assertTrue(usage.contains("-Dmapreduce.map.speculative=false\n")); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java index f93e76ae8031..efc97e82dabf 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ /** * Test of simple partitioner. */ -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestSimpleTotalOrderPartitioner { @ClassRule @@ -48,7 +48,7 @@ public class TestSimpleTotalOrderPartitioner { public void testSplit() throws Exception { String start = "a"; String end = "{"; - SimpleTotalOrderPartitioner p = new SimpleTotalOrderPartitioner<>(); + SimpleTotalOrderPartitioner p = new SimpleTotalOrderPartitioner<>(); this.conf.set(SimpleTotalOrderPartitioner.START, start); this.conf.set(SimpleTotalOrderPartitioner.END, end); @@ -69,14 +69,12 @@ public void testSplit() throws Exception { partition = p.getPartition(q, HConstants.EMPTY_BYTE_ARRAY, 3); assertEquals(2, partition); // What about end and start keys. - ImmutableBytesWritable startBytes = - new ImmutableBytesWritable(Bytes.toBytes(start)); + ImmutableBytesWritable startBytes = new ImmutableBytesWritable(Bytes.toBytes(start)); partition = p.getPartition(startBytes, HConstants.EMPTY_BYTE_ARRAY, 2); assertEquals(0, partition); partition = p.getPartition(startBytes, HConstants.EMPTY_BYTE_ARRAY, 3); assertEquals(0, partition); - ImmutableBytesWritable endBytes = - new ImmutableBytesWritable(Bytes.toBytes("z")); + ImmutableBytesWritable endBytes = new ImmutableBytesWritable(Bytes.toBytes("z")); partition = p.getPartition(endBytes, HConstants.EMPTY_BYTE_ARRAY, 2); assertEquals(1, partition); partition = p.getPartition(endBytes, HConstants.EMPTY_BYTE_ARRAY, 3); @@ -84,4 +82,3 @@ public void testSplit() throws Exception { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index da0d7b121d71..b8731913f191 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.junit.Assert.assertEquals; import java.util.Arrays; - import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -80,9 +79,9 @@ public static void afterClass() throws Exception { } private static byte[][] generateSplits(int numRows, int numRegions) { - byte[][] splitRows = new byte[numRegions-1][]; + byte[][] splitRows = new byte[numRegions - 1][]; for (int i = 1; i < numRegions; i++) { - splitRows[i-1] = Bytes.toBytes(numRows * i / numRegions); + splitRows[i - 1] = Bytes.toBytes(numRows * i / numRegions); } return splitRows; } @@ -117,8 +116,8 @@ public void testSyncTableDoDeletesFalse() throws Exception { writeTestData(sourceTableName, targetTableName); hashSourceTable(sourceTableName, testDir); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--doDeletes=false"); + Counters syncCounters = + syncTables(sourceTableName, targetTableName, testDir, "--doDeletes=false"); assertTargetDoDeletesFalse(100, sourceTableName, targetTableName); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -140,8 +139,7 @@ public void testSyncTableDoPutsFalse() throws Exception { writeTestData(sourceTableName, targetTableName); hashSourceTable(sourceTableName, testDir); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--doPuts=false"); + Counters syncCounters = syncTables(sourceTableName, targetTableName, testDir, "--doPuts=false"); assertTargetDoPutsFalse(70, sourceTableName, targetTableName); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -163,8 +161,8 @@ public void testSyncTableIgnoreTimestampsTrue() throws Exception { long current = EnvironmentEdgeManager.currentTime(); writeTestData(sourceTableName, targetTableName, current - 1000, current); hashSourceTable(sourceTableName, testDir, "--ignoreTimestamps=true"); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--ignoreTimestamps=true"); + Counters syncCounters = + syncTables(sourceTableName, targetTableName, testDir, "--ignoreTimestamps=true"); assertEqualTables(90, sourceTableName, targetTableName, true); assertEquals(50, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -196,22 +194,18 @@ private void assertEqualTables(int expectedRows, TableName sourceTableName, + " cells:" + targetRow); if (sourceRow == null) { - Assert.fail("Expected " + expectedRows - + " source rows but only found " + i); + Assert.fail("Expected " + expectedRows + " source rows but only found " + i); } if (targetRow == null) { - Assert.fail("Expected " + expectedRows - + " target rows but only found " + i); + Assert.fail("Expected " + expectedRows + " target rows but only found " + i); } Cell[] sourceCells = sourceRow.rawCells(); Cell[] targetCells = targetRow.rawCells(); if (sourceCells.length != targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " has " + sourceCells.length - + " cells in source table but " + targetCells.length - + " cells in target table"); + Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length + + " cells in source table but " + targetCells.length + " cells in target table"); } for (int j = 0; j < sourceCells.length; j++) { Cell sourceCell = sourceCells[j]; @@ -240,13 +234,13 @@ private void assertEqualTables(int expectedRows, TableName sourceTableName, } Result sourceRow = sourceScanner.next(); if (sourceRow != null) { - Assert.fail("Source table has more than " + expectedRows - + " rows. Next row: " + Bytes.toInt(sourceRow.getRow())); + Assert.fail("Source table has more than " + expectedRows + " rows. Next row: " + + Bytes.toInt(sourceRow.getRow())); } Result targetRow = targetScanner.next(); if (targetRow != null) { - Assert.fail("Target table has more than " + expectedRows - + " rows. Next row: " + Bytes.toInt(targetRow.getRow())); + Assert.fail("Target table has more than " + expectedRows + " rows. Next row: " + + Bytes.toInt(targetRow.getRow())); } sourceScanner.close(); targetScanner.close(); @@ -266,18 +260,16 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN int rowsCount = 0; while (targetRow != null) { rowsCount++; - //only compares values for existing rows, skipping rows existing on - //target only that were not deleted given --doDeletes=false + // only compares values for existing rows, skipping rows existing on + // target only that were not deleted given --doDeletes=false if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { targetRow = targetScanner.next(); continue; } - LOG.debug("SOURCE row: " + (sourceRow == null ? "null" - : Bytes.toInt(sourceRow.getRow())) + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + " cells:" + sourceRow); - LOG.debug("TARGET row: " + (targetRow == null ? "null" - : Bytes.toInt(targetRow.getRow())) + LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + " cells:" + targetRow); Cell[] sourceCells = sourceRow.rawCells(); @@ -287,18 +279,16 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN if (sourceCells.length == targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + targetRowKey + " should have more cells in " - + "target than in source"); + Assert.fail( + "Row " + targetRowKey + " should have more cells in " + "target than in source"); } } else { if (sourceCells.length != targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " has " + sourceCells.length - + " cells in source table but " + targetCells.length - + " cells in target table"); + Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length + + " cells in source table but " + targetCells.length + " cells in target table"); } } for (int j = 0; j < sourceCells.length; j++) { @@ -314,7 +304,7 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN if (!CellUtil.matchingQualifier(sourceCell, targetCell)) { Assert.fail("Qualifiers don't match"); } - if (targetRowKey < 80 && targetRowKey >= 90){ + if (targetRowKey < 80 && targetRowKey >= 90) { if (!CellUtil.matchingTimestamp(sourceCell, targetCell)) { Assert.fail("Timestamps don't match"); } @@ -323,16 +313,14 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN Assert.fail("Values don't match"); } } catch (Throwable t) { - LOG.debug("Source cell: " + sourceCell + " target cell: " - + targetCell); + LOG.debug("Source cell: " + sourceCell + " target cell: " + targetCell); Throwables.propagate(t); } } targetRow = targetScanner.next(); sourceRow = sourceScanner.next(); } - assertEquals("Target expected rows does not match.",expectedRows, - rowsCount); + assertEquals("Target expected rows does not match.", expectedRows, rowsCount); sourceScanner.close(); targetScanner.close(); sourceTable.close(); @@ -350,21 +338,17 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName Result sourceRow = sourceScanner.next(); int rowsCount = 0; - while (targetRow!=null) { - //only compares values for existing rows, skipping rows existing on - //source only that were not added to target given --doPuts=false + while (targetRow != null) { + // only compares values for existing rows, skipping rows existing on + // source only that were not added to target given --doPuts=false if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { sourceRow = sourceScanner.next(); continue; } - LOG.debug("SOURCE row: " + (sourceRow == null ? - "null" : - Bytes.toInt(sourceRow.getRow())) + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + " cells:" + sourceRow); - LOG.debug("TARGET row: " + (targetRow == null ? - "null" : - Bytes.toInt(targetRow.getRow())) + LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + " cells:" + targetRow); LOG.debug("rowsCount: " + rowsCount); @@ -381,8 +365,8 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName if (sourceCells.length == targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " shouldn't have same number of cells."); + Assert.fail( + "Row " + Bytes.toInt(sourceRow.getRow()) + " shouldn't have same number of cells."); } } else if (targetRowKey >= 80 && targetRowKey < 90) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); @@ -395,8 +379,7 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName Cell targetCell = targetCells[j]; if (CellUtil.matchingValue(sourceCell, targetCell)) { Assert.fail("Cells values should not match for rows between " - + "90 and 100. Target row id: " + (Bytes.toInt(targetRow - .getRow()))); + + "90 and 100. Target row id: " + (Bytes.toInt(targetRow.getRow()))); } } } else { @@ -420,8 +403,7 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName Assert.fail("Values don't match"); } } catch (Throwable t) { - LOG.debug( - "Source cell: " + sourceCell + " target cell: " + targetCell); + LOG.debug("Source cell: " + sourceCell + " target cell: " + targetCell); Throwables.propagate(t); } } @@ -430,21 +412,20 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName targetRow = targetScanner.next(); sourceRow = sourceScanner.next(); } - assertEquals("Target expected rows does not match.",expectedRows, - rowsCount); + assertEquals("Target expected rows does not match.", expectedRows, rowsCount); sourceScanner.close(); targetScanner.close(); sourceTable.close(); targetTable.close(); } - private Counters syncTables(TableName sourceTableName, TableName targetTableName, - Path testDir, String... options) throws Exception { + private Counters syncTables(TableName sourceTableName, TableName targetTableName, Path testDir, + String... options) throws Exception { SyncTable syncTable = new SyncTable(TEST_UTIL.getConfiguration()); - String[] args = Arrays.copyOf(options, options.length+3); + String[] args = Arrays.copyOf(options, options.length + 3); args[options.length] = testDir.toString(); - args[options.length+1] = sourceTableName.getNameAsString(); - args[options.length+2] = targetTableName.getNameAsString(); + args[options.length + 1] = sourceTableName.getNameAsString(); + args[options.length + 2] = targetTableName.getNameAsString(); int code = syncTable.run(args); assertEquals("sync table job failed", 0, code); @@ -455,10 +436,10 @@ private Counters syncTables(TableName sourceTableName, TableName targetTableName private void hashSourceTable(TableName sourceTableName, Path testDir, String... options) throws Exception { int numHashFiles = 3; - long batchSize = 100; // should be 2 batches per region + long batchSize = 100; // should be 2 batches per region int scanBatch = 1; HashTable hashTable = new HashTable(TEST_UTIL.getConfiguration()); - String[] args = Arrays.copyOf(options, options.length+5); + String[] args = Arrays.copyOf(options, options.length + 5); args[options.length] = "--batchsize=" + batchSize; args[options.length + 1] = "--numhashfiles=" + numHashFiles; args[options.length + 2] = "--scanbatch=" + scanBatch; @@ -492,14 +473,14 @@ private void writeTestData(TableName sourceTableName, TableName targetTableName, int targetRegions = 6; if (ArrayUtils.isEmpty(timestamps)) { long current = EnvironmentEdgeManager.currentTime(); - timestamps = new long[]{current,current}; + timestamps = new long[] { current, current }; } - Table sourceTable = TEST_UTIL.createTable(sourceTableName, - family, generateSplits(numRows, sourceRegions)); + Table sourceTable = + TEST_UTIL.createTable(sourceTableName, family, generateSplits(numRows, sourceRegions)); - Table targetTable = TEST_UTIL.createTable(targetTableName, - family, generateSplits(numRows, targetRegions)); + Table targetTable = + TEST_UTIL.createTable(targetTableName, family, generateSplits(numRows, targetRegions)); int rowIndex = 0; // a bunch of identical rows @@ -571,8 +552,8 @@ private void writeTestData(TableName sourceTableName, TableName targetTableName, sourceTable.put(sourcePut); Put targetPut = new Put(Bytes.toBytes(rowIndex)); - targetPut.addColumn(family, column1, timestamps[1]+1, column1); - targetPut.addColumn(family, column2, timestamps[1]-1, value2); + targetPut.addColumn(family, column1, timestamps[1] + 1, column1); + targetPut.addColumn(family, column2, timestamps[1] - 1, value2); targetTable.put(targetPut); } // some rows with different values diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java index 197060d1b20c..bc390c082b7a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,7 +71,6 @@ /** * This tests the TableInputFormat and its recovery semantics - * */ @Category(LargeTests.class) public class TestTableInputFormat { @@ -107,7 +106,6 @@ public void before() throws IOException { /** * Setup a table with two rows and values. - * * @param tableName * @return A Table instance for the created table. * @throws IOException @@ -118,7 +116,6 @@ public static Table createTable(byte[] tableName) throws IOException { /** * Setup a table with two rows and values per column family. - * * @param tableName * @return A Table instance for the created table. * @throws IOException @@ -140,15 +137,14 @@ public static Table createTable(byte[] tableName, byte[][] families) throws IOEx /** * Verify that the result and key have expected values. - * * @param r single row result * @param key the row key * @param expectedKey the expected key * @param expectedValue the expected value * @return true if the result contains the expected key and value, false otherwise. */ - static boolean checkResult(Result r, ImmutableBytesWritable key, - byte[] expectedKey, byte[] expectedValue) { + static boolean checkResult(Result r, ImmutableBytesWritable key, byte[] expectedKey, + byte[] expectedValue) { assertEquals(0, key.compareTo(expectedKey)); Map vals = r.getFamilyMap(FAMILY); byte[] value = vals.values().iterator().next(); @@ -157,15 +153,12 @@ static boolean checkResult(Result r, ImmutableBytesWritable key, } /** - * Create table data and run tests on specified htable using the - * o.a.h.hbase.mapreduce API. - * + * Create table data and run tests on specified htable using the o.a.h.hbase.mapreduce API. * @param table * @throws IOException * @throws InterruptedException */ - static void runTestMapreduce(Table table) throws IOException, - InterruptedException { + static void runTestMapreduce(Table table) throws IOException, InterruptedException { org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr = new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); Scan s = new Scan(); @@ -198,11 +191,9 @@ static void runTestMapreduce(Table table) throws IOException, /** * Create a table that IOE's on first scanner next call - * * @throws IOException */ - static Table createIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -231,13 +222,10 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { } /** - * Create a table that throws a NotServingRegionException on first scanner - * next call - * + * Create a table that throws a NotServingRegionException on first scanner next call * @throws IOException */ - static Table createDNRIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -252,8 +240,7 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { ResultScanner scanner = mock(ResultScanner.class); invocation.callRealMethod(); // simulate NotServingRegionException - doThrow( - new NotServingRegionException("Injected simulated TimeoutException")) + doThrow(new NotServingRegionException("Injected simulated TimeoutException")) .when(scanner).next(); return scanner; } @@ -270,46 +257,40 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { /** * Run test assuming no errors using newer mapreduce api - * * @throws IOException * @throws InterruptedException */ @Test - public void testTableRecordReaderMapreduce() throws IOException, - InterruptedException { + public void testTableRecordReaderMapreduce() throws IOException, InterruptedException { Table table = createTable(Bytes.toBytes("table1-mr")); runTestMapreduce(table); } /** * Run test assuming Scanner IOException failure using newer mapreduce api - * * @throws IOException * @throws InterruptedException */ @Test - public void testTableRecordReaderScannerFailMapreduce() throws IOException, - InterruptedException { + public void testTableRecordReaderScannerFailMapreduce() throws IOException, InterruptedException { Table htable = createIOEScannerTable(Bytes.toBytes("table2-mr"), 1); runTestMapreduce(htable); } /** * Run test assuming Scanner IOException failure using newer mapreduce api - * * @throws IOException * @throws InterruptedException */ @Test(expected = IOException.class) - public void testTableRecordReaderScannerFailMapreduceTwice() throws IOException, - InterruptedException { + public void testTableRecordReaderScannerFailMapreduceTwice() + throws IOException, InterruptedException { Table htable = createIOEScannerTable(Bytes.toBytes("table3-mr"), 2); runTestMapreduce(htable); } /** * Run test assuming NotServingRegionException using newer mapreduce api - * * @throws InterruptedException * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @@ -322,7 +303,6 @@ public void testTableRecordReaderScannerTimeoutMapreduce() /** * Run test assuming NotServingRegionException using newer mapreduce api - * * @throws InterruptedException * @throws org.apache.hadoop.hbase.NotServingRegionException */ @@ -348,8 +328,8 @@ public void testExtensionOfTableInputFormatBase() @Test public void testJobConfigurableExtensionOfTableInputFormatBase() throws IOException, InterruptedException, ClassNotFoundException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + - "using JobConfigurable."); + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "using JobConfigurable."); final Table htable = createTable(Bytes.toBytes("exampleJobConfigurableTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleJobConfigurableTIF.class); @@ -358,8 +338,8 @@ public void testJobConfigurableExtensionOfTableInputFormatBase() @Test public void testDeprecatedExtensionOfTableInputFormatBase() throws IOException, InterruptedException, ClassNotFoundException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + - "using the approach documented in 0.98."); + LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + + "using the approach documented in 0.98."); final Table htable = createTable(Bytes.toBytes("exampleDeprecatedTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleDeprecatedTIF.class); @@ -392,17 +372,19 @@ void testInputFormat(Class clazz) public static class ExampleVerifier extends TableMapper { @Override - public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException { + public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException { for (Cell cell : value.listCells()) { - context.getCounter(TestTableInputFormat.class.getName() + ":row", - Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) + context + .getCounter(TestTableInputFormat.class.getName() + ":row", + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) .increment(1l); - context.getCounter(TestTableInputFormat.class.getName() + ":family", - Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) + context + .getCounter(TestTableInputFormat.class.getName() + ":family", + Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) .increment(1l); - context.getCounter(TestTableInputFormat.class.getName() + ":value", - Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) + context + .getCounter(TestTableInputFormat.class.getName() + ":value", + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) .increment(1l); } } @@ -418,15 +400,14 @@ public void configure(JobConf job) { Table exampleTable = connection.getTable(TableName.valueOf(("exampleDeprecatedTable"))); // mandatory initializeTable(connection, exampleTable.getName()); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); } Filter exampleFilter = - new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); + new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); scan.setFilter(exampleFilter); setScan(scan); } catch (IOException exception) { @@ -436,7 +417,6 @@ public void configure(JobConf job) { } - public static class ExampleJobConfigurableTIF extends TableInputFormatBase implements JobConfigurable { @@ -447,15 +427,14 @@ public void configure(JobConf job) { TableName tableName = TableName.valueOf("exampleJobConfigurableTable"); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; - //optional + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; + // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); } Filter exampleFilter = - new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); + new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); scan.setFilter(exampleFilter); setScan(scan); } catch (IOException exception) { @@ -464,29 +443,26 @@ public void configure(JobConf job) { } } - public static class ExampleTIF extends TableInputFormatBase { @Override protected void initialize(JobContext job) throws IOException { - Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create( - job.getConfiguration())); + Connection connection = + ConnectionFactory.createConnection(HBaseConfiguration.create(job.getConfiguration())); TableName tableName = TableName.valueOf("exampleTable"); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; - //optional + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; + // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); } Filter exampleFilter = - new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); + new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); scan.setFilter(exampleFilter); setScan(scan); } } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java index 12b17f925d99..2d9d5622355e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestTableInputFormatBase { @ClassRule @@ -92,13 +92,12 @@ public void testReuseRegionSizeCalculator() throws IOException { format.getSplits(context); // should only be 2 despite calling getSplits 4 times - Mockito.verify(format, Mockito.times(2)) - .createRegionSizeCalculator(Mockito.any(), Mockito.any()); + Mockito.verify(format, Mockito.times(2)).createRegionSizeCalculator(Mockito.any(), + Mockito.any()); } @Test - public void testTableInputFormatBaseReverseDNSForIPv6() - throws UnknownHostException { + public void testTableInputFormatBaseReverseDNSForIPv6() throws UnknownHostException { String address = "ipv6.google.com"; String localhost = null; InetAddress addr = null; @@ -110,11 +109,11 @@ public void testTableInputFormatBaseReverseDNSForIPv6() // google.com is down, we can probably forgive this test. return; } - System.out.println("Should retrun the hostname for this host " + - localhost + " addr : " + addr); + System.out.println("Should retrun the hostname for this host " + localhost + " addr : " + addr); String actualHostName = inputFormat.reverseDNS(addr); - assertEquals("Should retrun the hostname for this host. Expected : " + - localhost + " Actual : " + actualHostName, localhost, actualHostName); + assertEquals("Should retrun the hostname for this host. Expected : " + localhost + " Actual : " + + actualHostName, + localhost, actualHostName); } @Test @@ -122,7 +121,7 @@ public void testNonSuccessiveSplitsAreNotMerged() throws IOException { JobContext context = mock(JobContext.class); Configuration conf = HBaseConfiguration.create(); conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, - ConnectionForMergeTesting.class.getName()); + ConnectionForMergeTesting.class.getName()); conf.set(TableInputFormat.INPUT_TABLE, "testTable"); conf.setBoolean(TableInputFormatBase.MAPREDUCE_INPUT_AUTOBALANCE, true); when(context.getConfiguration()).thenReturn(conf); @@ -132,13 +131,13 @@ public void testNonSuccessiveSplitsAreNotMerged() throws IOException { // split["b", "c"] is excluded, split["o", "p"] and split["p", "q"] are merged, // but split["a", "b"] and split["c", "d"] are not merged. assertEquals(ConnectionForMergeTesting.START_KEYS.length - 1 - 1, - tifExclude.getSplits(context).size()); + tifExclude.getSplits(context).size()); } /** * Subclass of {@link TableInputFormat} to use in {@link #testNonSuccessiveSplitsAreNotMerged}. - * This class overrides {@link TableInputFormatBase#includeRegionInSplit} - * to exclude specific splits. + * This class overrides {@link TableInputFormatBase#includeRegionInSplit} to exclude specific + * splits. */ private static class TableInputFormatForMergeTesting extends TableInputFormat { private byte[] prefixStartKey = Bytes.toBytes("b"); @@ -149,7 +148,7 @@ private static class TableInputFormatForMergeTesting extends TableInputFormat { * Exclude regions which contain rows starting with "b". */ @Override - protected boolean includeRegionInSplit(final byte[] startKey, final byte [] endKey) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { if (Bytes.compareTo(startKey, prefixEndKey) < 0 && (Bytes.compareTo(prefixStartKey, endKey) < 0 || Bytes.equals(endKey, HConstants.EMPTY_END_ROW))) { @@ -168,26 +167,23 @@ protected void initializeTable(Connection connection, TableName tableName) throw @Override protected RegionSizeCalculator createRegionSizeCalculator(RegionLocator locator, Admin admin) - throws IOException { + throws IOException { return sizeCalculator; } } /** - * Connection class to use in {@link #testNonSuccessiveSplitsAreNotMerged}. - * This class returns mocked {@link Table}, {@link RegionLocator}, {@link RegionSizeCalculator}, - * and {@link Admin}. + * Connection class to use in {@link #testNonSuccessiveSplitsAreNotMerged}. This class returns + * mocked {@link Table}, {@link RegionLocator}, {@link RegionSizeCalculator}, and {@link Admin}. */ private static class ConnectionForMergeTesting implements Connection { - public static final byte[][] SPLITS = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d"), - Bytes.toBytes("e"), Bytes.toBytes("f"), Bytes.toBytes("g"), Bytes.toBytes("h"), - Bytes.toBytes("i"), Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"), - Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"), Bytes.toBytes("p"), - Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"), Bytes.toBytes("t"), - Bytes.toBytes("u"), Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"), - Bytes.toBytes("y"), Bytes.toBytes("z") - }; + public static final byte[][] SPLITS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), + Bytes.toBytes("c"), Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"), + Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"), Bytes.toBytes("j"), + Bytes.toBytes("k"), Bytes.toBytes("l"), Bytes.toBytes("m"), Bytes.toBytes("n"), + Bytes.toBytes("o"), Bytes.toBytes("p"), Bytes.toBytes("q"), Bytes.toBytes("r"), + Bytes.toBytes("s"), Bytes.toBytes("t"), Bytes.toBytes("u"), Bytes.toBytes("v"), + Bytes.toBytes("w"), Bytes.toBytes("x"), Bytes.toBytes("y"), Bytes.toBytes("z") }; public static final byte[][] START_KEYS; public static final byte[][] END_KEYS; @@ -268,32 +264,31 @@ public RegionLocator getRegionLocator(TableName tableName) throws IOException { } RegionLocator locator = mock(RegionLocator.class); - when(locator.getRegionLocation(any(byte [].class), anyBoolean())). - thenAnswer(new Answer() { - @Override - public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] key = (byte [])args[0]; - return locationMap.get(key); - } - }); - when(locator.getStartEndKeys()). - thenReturn(new Pair(START_KEYS, END_KEYS)); + when(locator.getRegionLocation(any(byte[].class), anyBoolean())) + .thenAnswer(new Answer() { + @Override + public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { + Object[] args = invocationOnMock.getArguments(); + byte[] key = (byte[]) args[0]; + return locationMap.get(key); + } + }); + when(locator.getStartEndKeys()) + .thenReturn(new Pair(START_KEYS, END_KEYS)); return locator; } public RegionSizeCalculator getRegionSizeCalculator() { RegionSizeCalculator sizeCalculator = mock(RegionSizeCalculator.class); - when(sizeCalculator.getRegionSize(any(byte[].class))). - thenAnswer(new Answer() { - @Override - public Long answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] regionId = (byte [])args[0]; - byte[] startKey = RegionInfo.getStartKey(regionId); - return SIZE_MAP.get(startKey); - } - }); + when(sizeCalculator.getRegionSize(any(byte[].class))).thenAnswer(new Answer() { + @Override + public Long answer(InvocationOnMock invocationOnMock) throws Throwable { + Object[] args = invocationOnMock.getArguments(); + byte[] regionId = (byte[]) args[0]; + byte[] startKey = RegionInfo.getStartKey(regionId); + return SIZE_MAP.get(startKey); + } + }); return sizeCalculator; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java index a116ecb72fa6..2129b42b53ec 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScan extends TestTableInputFormatScanBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScan.class); + HBaseClassTestRule.forClass(TestTableInputFormatScan.class); /** * Tests a MR scan using specific number of mappers. The test table has 26 regions, diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java index eab1d871a606..f00987e962da 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -50,7 +50,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Tests various scan start and stop row scenarios. This is set in a scan and tested in a MapReduce * job to see if that is handed over and done properly too. @@ -61,7 +60,7 @@ public abstract class TestTableInputFormatScanBase { static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); static final TableName TABLE_NAME = TableName.valueOf("scantest"); - static final byte[][] INPUT_FAMILYS = {Bytes.toBytes("content1"), Bytes.toBytes("content2")}; + static final byte[][] INPUT_FAMILYS = { Bytes.toBytes("content1"), Bytes.toBytes("content2") }; static final String KEY_STARTROW = "startRow"; static final String KEY_LASTROW = "stpRow"; @@ -85,35 +84,32 @@ public static void tearDownAfterClass() throws Exception { * Pass the key and value to reduce. */ public static class ScanMapper - extends TableMapper { + extends TableMapper { /** * Pass the key and value to reduce. - * - * @param key The key, here "aaa", "aab" etc. - * @param value The value is the same as the key. - * @param context The task context. + * @param key The key, here "aaa", "aab" etc. + * @param value The value is the same as the key. + * @param context The task context. * @throws IOException When reading the rows fails. */ @Override - public void map(ImmutableBytesWritable key, Result value, - Context context) - throws IOException, InterruptedException { + public void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { if (value.size() != 2) { throw new IOException("There should be two input columns"); } - Map>> - cfMap = value.getMap(); + Map>> cfMap = value.getMap(); if (!cfMap.containsKey(INPUT_FAMILYS[0]) || !cfMap.containsKey(INPUT_FAMILYS[1])) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILYS[0]) + "' or '" + Bytes.toString(INPUT_FAMILYS[1]) + "'."); + throw new IOException("Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILYS[0]) + + "' or '" + Bytes.toString(INPUT_FAMILYS[1]) + "'."); } String val0 = Bytes.toStringBinary(value.getValue(INPUT_FAMILYS[0], null)); String val1 = Bytes.toStringBinary(value.getValue(INPUT_FAMILYS[1], null)); - LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) + - ", value -> (" + val0 + ", " + val1 + ")"); + LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) + ", value -> (" + val0 + ", " + + val1 + ")"); context.write(key, key); } } @@ -122,28 +118,25 @@ public void map(ImmutableBytesWritable key, Result value, * Checks the last and first key seen against the scanner boundaries. */ public static class ScanReducer - extends Reducer { + extends Reducer { private String first = null; private String last = null; - protected void reduce(ImmutableBytesWritable key, - Iterable values, Context context) - throws IOException ,InterruptedException { + protected void reduce(ImmutableBytesWritable key, Iterable values, + Context context) throws IOException, InterruptedException { int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); - LOG.info("reduce: key[" + count + "] -> " + - Bytes.toStringBinary(key.get()) + ", value -> " + val); + LOG.info( + "reduce: key[" + count + "] -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); if (first == null) first = val; last = val; count++; } } - protected void cleanup(Context context) - throws IOException, InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { Configuration c = context.getConfiguration(); String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); @@ -164,8 +157,8 @@ protected void cleanup(Context context) */ protected void testScanFromConfiguration(String start, String stop, String last) throws IOException, InterruptedException, ClassNotFoundException { - String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + - "To" + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + + "To" + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); c.set(TableInputFormat.INPUT_TABLE, TABLE_NAME.getNameAsString()); c.set(TableInputFormat.SCAN_COLUMN_FAMILY, @@ -198,8 +191,8 @@ protected void testScanFromConfiguration(String start, String stop, String last) */ protected void testScan(String start, String stop, String last) throws IOException, InterruptedException, ClassNotFoundException { - String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + - (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); Scan scan = new Scan(); @@ -225,7 +218,6 @@ protected void testScan(String start, String stop, String last) LOG.info("After map/reduce completion - job " + jobName); } - /** * Tests Number of inputSplits for MR job when specify number of mappers for TableInputFormatXXX * This test does not run MR job @@ -294,7 +286,7 @@ protected void testAutobalanceNumOfSplit() throws IOException { int[] regionLen = { 10, 20, 20, 40, 60 }; for (int i = 0; i < 5; i++) { InputSplit split = new TableSplit(TABLE_NAME, new Scan(), Bytes.toBytes(i), - Bytes.toBytes(i + 1), "", "", regionLen[i] * 1048576); + Bytes.toBytes(i + 1), "", "", regionLen[i] * 1048576); splits.add(split); } TableInputFormat tif = new TableInputFormat(); @@ -311,4 +303,3 @@ protected void testAutobalanceNumOfSplit() throws IOException { assertNotEquals("The seventh split start key should not be", 4, Bytes.toInt(ts4.getStartRow())); } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java index d7cefd61b148..0653e7e109d8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanEmptyToAPP extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToAPP.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToAPP.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java index 598a345834d8..d1f42f256255 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanEmptyToBBA extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToBBA.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToBBA.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java index 6d3674caad86..22cfb2cd5c62 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanEmptyToBBB extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToBBB.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToBBB.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java index f5d4de10a88a..2e62e4d9c7c6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanEmptyToEmpty extends TestTableInputFormatSc @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToEmpty.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToEmpty.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java index 939fc936f955..45fc2208e22c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanEmptyToOPP extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToOPP.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToOPP.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java index 32f768c00fb8..0126ef3b82c9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanOBBToOPP extends TestTableInputFormatScanBa @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanOBBToOPP.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanOBBToOPP.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java index 5ecb4e60f4e0..5de7e14ced43 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanOBBToQPP extends TestTableInputFormatScanBa @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanOBBToQPP.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanOBBToQPP.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java index 7b2ccded7e19..5874d893b453 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanOPPToEmpty extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanOPPToEmpty.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanOPPToEmpty.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java index 2801f4eb8bf7..b1dc43efed38 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanYYXToEmpty extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanYYXToEmpty.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanYYXToEmpty.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java index 97a4998e5537..49a7f4fed407 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanYYYToEmpty extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanYYYToEmpty.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanYYYToEmpty.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java index 3d91ff2b7b3c..524ea567ba3b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanYZYToEmpty extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanYZYToEmpty.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanYZYToEmpty.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java index 786da1a02049..8e69b39f60bf 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,12 +51,12 @@ import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableMapReduce extends TestTableMapReduceBase { @ClassRule @@ -66,7 +66,9 @@ public class TestTableMapReduce extends TestTableMapReduceBase { private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduce.class); @Override - protected Logger getLog() { return LOG; } + protected Logger getLog() { + return LOG; + } /** * Pass the given key and processed record reduce @@ -75,24 +77,21 @@ static class ProcessContentsMapper extends TableMapper>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it @@ -115,12 +114,9 @@ protected void runTestOnTable(Table table) throws IOException { job.setNumReduceTasks(1); Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); - TableMapReduceUtil.initTableMapperJob( - table.getName().getNameAsString(), scan, - ProcessContentsMapper.class, ImmutableBytesWritable.class, - Put.class, job); - TableMapReduceUtil.initTableReducerJob( - table.getName().getNameAsString(), + TableMapReduceUtil.initTableMapperJob(table.getName().getNameAsString(), scan, + ProcessContentsMapper.class, ImmutableBytesWritable.class, Put.class, job); + TableMapReduceUtil.initTableReducerJob(table.getName().getNameAsString(), IdentityTableReducer.class, job); FileOutputFormat.setOutputPath(job, new Path("test")); LOG.info("Started " + table.getName().getNameAsString()); @@ -138,8 +134,7 @@ protected void runTestOnTable(Table table) throws IOException { } finally { table.close(); if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } @@ -151,8 +146,8 @@ protected void runTestOnTable(Table table) throws IOException { */ private void verifyJobCountersAreEmitted(Job job) throws IOException { Counters counters = job.getCounters(); - Counter counter - = counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS"); + Counter counter = + counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS"); assertNotNull("Unable to find Job counter for HBase scan metrics, RPC_CALLS", counter); assertTrue("Counter value for RPC_CALLS should be larger than 0", counter.getValue() > 0); } @@ -161,7 +156,7 @@ private void verifyJobCountersAreEmitted(Job job) throws IOException { public void testWritingToDisabledTable() throws IOException { try (Admin admin = UTIL.getConnection().getAdmin(); - Table table = UTIL.getConnection().getTable(TABLE_FOR_NEGATIVE_TESTS)) { + Table table = UTIL.getConnection().getTable(TABLE_FOR_NEGATIVE_TESTS)) { admin.disableTable(table.getName()); runTestOnTable(table); fail("Should not have reached here, should have thrown an exception"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java index bca27ec28f6a..7e227836035f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,9 +43,9 @@ import org.slf4j.Logger; /** - * A base class for a test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of a particular cell, - * and write it back to the table. Implements common components between mapred and mapreduce + * A base class for a test Map/Reduce job over HBase tables. The map/reduce process we're testing on + * our tables is simple - take every row in the table, reverse the value of a particular cell, and + * write it back to the table. Implements common components between mapred and mapreduce * implementations. */ public abstract class TestTableMapReduceBase { @@ -56,10 +55,7 @@ public abstract class TestTableMapReduceBase { protected static final byte[] INPUT_FAMILY = Bytes.toBytes("contents"); protected static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text"); - protected static final byte[][] columns = new byte[][] { - INPUT_FAMILY, - OUTPUT_FAMILY - }; + protected static final byte[][] columns = new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }; /** * Retrieve my logger instance. @@ -74,9 +70,8 @@ public abstract class TestTableMapReduceBase { @BeforeClass public static void beforeClass() throws Exception { UTIL.startMiniCluster(); - Table table = - UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, - OUTPUT_FAMILY }); + Table table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, + new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); UTIL.createTable(TABLE_FOR_NEGATIVE_TESTS, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); } @@ -111,11 +106,10 @@ protected static Put map(ImmutableBytesWritable key, Result value) throws IOExce if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it @@ -157,8 +151,8 @@ protected void verify(TableName tableName) throws IOException { } /** - * Looks at every value of the mapreduce output and verifies that indeed - * the values have been reversed. + * Looks at every value of the mapreduce output and verifies that indeed the values have been + * reversed. * @param table Table to scan. * @throws IOException * @throws NullPointerException if we failed to find a cell value @@ -170,18 +164,17 @@ private void verifyAttempt(final Table table) throws IOException, NullPointerExc try { Iterator itr = scanner.iterator(); assertTrue(itr.hasNext()); - while(itr.hasNext()) { + while (itr.hasNext()) { Result r = itr.next(); if (getLog().isDebugEnabled()) { - if (r.size() > 2 ) { - throw new IOException("Too many results, expected 2 got " + - r.size()); + if (r.size() > 2) { + throw new IOException("Too many results, expected 2 got " + r.size()); } } byte[] firstValue = null; byte[] secondValue = null; int count = 0; - for(Cell kv : r.listCells()) { + for (Cell kv : r.listCells()) { if (count == 0) { firstValue = CellUtil.cloneValue(kv); } @@ -194,16 +187,13 @@ private void verifyAttempt(final Table table) throws IOException, NullPointerExc } } - if (firstValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": first value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": first value is null"); } String first = Bytes.toString(firstValue); if (secondValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": second value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": second value is null"); } byte[] secondReversed = new byte[secondValue.length]; for (int i = 0, j = secondValue.length - 1; j >= 0; j--, i++) { @@ -213,9 +203,9 @@ private void verifyAttempt(final Table table) throws IOException, NullPointerExc if (first.compareTo(second) != 0) { if (getLog().isDebugEnabled()) { - getLog().debug("second key is not the reverse of first. row=" + - Bytes.toStringBinary(r.getRow()) + ", first value=" + first + - ", second value=" + second); + getLog().debug( + "second key is not the reverse of first. row=" + Bytes.toStringBinary(r.getRow()) + + ", first value=" + first + ", second value=" + second); } fail(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java index 09cdc279bc8a..f71c85da6b0b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.Closeable; import java.io.File; import java.util.Collection; @@ -57,7 +58,7 @@ /** * Test different variants of initTableMapperJob method */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestTableMapReduceUtil { private static final String HTTP_PRINCIPAL = "HTTP/localhost"; @@ -71,8 +72,8 @@ public void after() { } /* - * initTableSnapshotMapperJob is tested in {@link TestTableSnapshotInputFormat} because - * the method depends on an online cluster. + * initTableSnapshotMapperJob is tested in {@link TestTableSnapshotInputFormat} because the method + * depends on an online cluster. */ @Test @@ -80,9 +81,8 @@ public void testInitTableMapperJob1() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); // test - TableMapReduceUtil.initTableMapperJob( - "Table", new Scan(), Import.Importer.class, Text.class, Text.class, job, - false, WALInputFormat.class); + TableMapReduceUtil.initTableMapperJob("Table", new Scan(), Import.Importer.class, Text.class, + Text.class, job, false, WALInputFormat.class); assertEquals(WALInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -95,9 +95,8 @@ public void testInitTableMapperJob1() throws Exception { public void testInitTableMapperJob2() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job, false, WALInputFormat.class); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job, false, WALInputFormat.class); assertEquals(WALInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -110,9 +109,8 @@ public void testInitTableMapperJob2() throws Exception { public void testInitTableMapperJob3() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job); assertEquals(TableInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -125,9 +123,8 @@ public void testInitTableMapperJob3() throws Exception { public void testInitTableMapperJob4() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job, false); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job, false); assertEquals(TableInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -136,8 +133,8 @@ public void testInitTableMapperJob4() throws Exception { assertEquals("Table", job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); } - private static Closeable startSecureMiniCluster( - HBaseTestingUtil util, MiniKdc kdc, String principal) throws Exception { + private static Closeable startSecureMiniCluster(HBaseTestingUtil util, MiniKdc kdc, + String principal) throws Exception { Configuration conf = util.getConfiguration(); SecureTestUtil.enableSecurity(conf); @@ -147,8 +144,8 @@ private static Closeable startSecureMiniCluster( conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() + ',' + TokenProvider.class.getName()); - HBaseKerberosUtils.setSecuredConfiguration(conf, - principal + '@' + kdc.getRealm(), HTTP_PRINCIPAL + '@' + kdc.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, principal + '@' + kdc.getRealm(), + HTTP_PRINCIPAL + '@' + kdc.getRealm()); util.startMiniCluster(); try { @@ -161,7 +158,8 @@ private static Closeable startSecureMiniCluster( return util::shutdownMiniCluster; } - @Test public void testInitCredentialsForCluster1() throws Exception { + @Test + public void testInitCredentialsForCluster1() throws Exception { HBaseTestingUtil util1 = new HBaseTestingUtil(); HBaseTestingUtil util2 = new HBaseTestingUtil(); @@ -185,8 +183,9 @@ private static Closeable startSecureMiniCluster( } } - @Test @SuppressWarnings("unchecked") public void testInitCredentialsForCluster2() - throws Exception { + @Test + @SuppressWarnings("unchecked") + public void testInitCredentialsForCluster2() throws Exception { HBaseTestingUtil util1 = new HBaseTestingUtil(); HBaseTestingUtil util2 = new HBaseTestingUtil(); @@ -199,7 +198,7 @@ private static Closeable startSecureMiniCluster( loginUserFromKeytab(userPrincipal + '@' + kdc.getRealm(), keytab.getAbsolutePath()); try (Closeable util1Closeable = startSecureMiniCluster(util1, kdc, userPrincipal); - Closeable util2Closeable = startSecureMiniCluster(util2, kdc, userPrincipal)) { + Closeable util2Closeable = startSecureMiniCluster(util2, kdc, userPrincipal)) { Configuration conf1 = util1.getConfiguration(); Job job = Job.getInstance(conf1); @@ -211,7 +210,7 @@ private static Closeable startSecureMiniCluster( String clusterId = ZKClusterId.readClusterIdZNode(util2.getZooKeeperWatcher()); Token tokenForCluster = - (Token) credentials.getToken(new Text(clusterId)); + (Token) credentials.getToken(new Text(clusterId)); assertEquals(userPrincipal + '@' + kdc.getRealm(), tokenForCluster.decodeIdentifier().getUsername()); } @@ -220,7 +219,8 @@ private static Closeable startSecureMiniCluster( } } - @Test public void testInitCredentialsForCluster3() throws Exception { + @Test + public void testInitCredentialsForCluster3() throws Exception { HBaseTestingUtil util1 = new HBaseTestingUtil(); File keytab = new File(util1.getDataTestDir("keytab").toUri().getPath()); @@ -251,8 +251,9 @@ private static Closeable startSecureMiniCluster( } } - @Test @SuppressWarnings("unchecked") public void testInitCredentialsForCluster4() - throws Exception { + @Test + @SuppressWarnings("unchecked") + public void testInitCredentialsForCluster4() throws Exception { HBaseTestingUtil util1 = new HBaseTestingUtil(); // Assume util1 is insecure cluster // Do not start util1 because cannot boot secured mini cluster and insecure mini cluster at once @@ -278,7 +279,7 @@ private static Closeable startSecureMiniCluster( String clusterId = ZKClusterId.readClusterIdZNode(util2.getZooKeeperWatcher()); Token tokenForCluster = - (Token) credentials.getToken(new Text(clusterId)); + (Token) credentials.getToken(new Text(clusterId)); assertEquals(userPrincipal + '@' + kdc.getRealm(), tokenForCluster.decodeIdentifier().getUsername()); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java index 1905beba3b18..01429d68fcc2 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java index 3165d459f85b..a72a39bb2a8e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,11 +17,11 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION; +import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_ROW_LIMIT_PER_INPUTSPLIT; -import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION; -import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; @@ -70,7 +70,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase { @ClassRule @@ -97,7 +97,6 @@ protected byte[] getEndRow() { return yyy; } - @Test public void testGetBestLocations() throws IOException { TableSnapshotInputFormatImpl tsif = new TableSnapshotInputFormatImpl(); @@ -107,36 +106,36 @@ public void testGetBestLocations() throws IOException { Assert.assertEquals(null, TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); blockDistribution = new HDFSBlocksDistribution(); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 10); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 7); - blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 5); - blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 10); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 7); + blockDistribution.addHostsAndBlockWeight(new String[] { "h3" }, 5); + blockDistribution.addHostsAndBlockWeight(new String[] { "h4" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 2); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 2); Assert.assertEquals(Lists.newArrayList("h1", "h2"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 3); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 3); Assert.assertEquals(Lists.newArrayList("h2", "h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 6); - blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 9); + blockDistribution.addHostsAndBlockWeight(new String[] { "h3" }, 6); + blockDistribution.addHostsAndBlockWeight(new String[] { "h4" }, 9); Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); @@ -147,10 +146,10 @@ public static enum TestTableSnapshotCounters { } public static class TestTableSnapshotMapper - extends TableMapper { + extends TableMapper { @Override - protected void map(ImmutableBytesWritable key, Result value, - Context context) throws IOException, InterruptedException { + protected void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { // Validate a single row coming from the snapshot, and emit the row key verifyRowFromMap(key, value); context.write(key, NullWritable.get()); @@ -158,18 +157,17 @@ protected void map(ImmutableBytesWritable key, Result value, } public static class TestTableSnapshotReducer - extends Reducer { - HBaseTestingUtil.SeenRowTracker rowTracker = - new HBaseTestingUtil.SeenRowTracker(bbb, yyy); + extends Reducer { + HBaseTestingUtil.SeenRowTracker rowTracker = new HBaseTestingUtil.SeenRowTracker(bbb, yyy); + @Override protected void reduce(ImmutableBytesWritable key, Iterable values, - Context context) throws IOException, InterruptedException { + Context context) throws IOException, InterruptedException { rowTracker.addRow(key.get()); } @Override - protected void cleanup(Context context) throws IOException, - InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { rowTracker.validate(); } } @@ -184,19 +182,17 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { Job job = new Job(UTIL.getConfiguration()); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. - Assert.assertEquals( - "Snapshot job should be configured for default LruBlockCache.", + Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.", HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); - Assert.assertEquals( - "Snapshot job should not use BucketCache.", - 0, job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); + Assert.assertEquals("Snapshot job should not use BucketCache.", 0, + job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); @@ -208,8 +204,7 @@ public void testWithMockedMapReduceSingleRegionByRegionLocation() throws Excepti Configuration conf = UTIL.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, true); try { - testWithMockedMapReduce(UTIL, name.getMethodName() + "Snapshot", 1, 1, 1, - true); + testWithMockedMapReduce(UTIL, name.getMethodName() + "Snapshot", 1, 1, 1, true); } finally { conf.unset(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION); } @@ -219,19 +214,18 @@ public void testWithMockedMapReduceSingleRegionByRegionLocation() throws Excepti public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, String snapshotName, Path tmpTableDir) throws Exception { Job job = new Job(UTIL.getConfiguration()); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); } @Override - public void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, - int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) + public void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, int numRegions, + int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); try { - createTableAndSnapshot( - util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); + createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); Configuration conf = util.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, setLocalityEnabledTo); @@ -242,14 +236,13 @@ public void testWithMockedMapReduce(HBaseTestingUtil util, String snapshotName, Scan scan = new Scan().withStartRow(getStartRow()).withStopRow(getEndRow()); // limit the scan if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir); } verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow()); @@ -418,7 +411,7 @@ public void testScannerReadTypeConfiguration() throws IOException { Scan scanWithReadType = new Scan(); scanWithReadType.setReadType(readType); assertEquals(scanWithReadType.getReadType(), - serializeAndReturn(conf, scanWithReadType).getReadType()); + serializeAndReturn(conf, scanWithReadType).getReadType()); } // We should only see the DEFAULT ReadType getting updated to STREAM. Scan scanWithoutReadType = new Scan(); @@ -432,8 +425,8 @@ public void testScannerReadTypeConfiguration() throws IOException { } /** - * Serializes and deserializes the given scan in the same manner that - * TableSnapshotInputFormat does. + * Serializes and deserializes the given scan in the same manner that TableSnapshotInputFormat + * does. */ private Scan serializeAndReturn(Configuration conf, Scan s) throws IOException { conf.set(TableInputFormat.SCAN, TableMapReduceUtil.convertScanToString(s)); @@ -441,8 +434,7 @@ private Scan serializeAndReturn(Configuration conf, Scan s) throws IOException { } private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumSplits, - byte[] startRow, byte[] stopRow) - throws IOException, InterruptedException { + byte[] startRow, byte[] stopRow) throws IOException, InterruptedException { TableSnapshotInputFormat tsif = new TableSnapshotInputFormat(); List splits = tsif.getSplits(job); @@ -451,13 +443,12 @@ private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumS HBaseTestingUtil.SeenRowTracker rowTracker = new HBaseTestingUtil.SeenRowTracker(startRow, stopRow.length > 0 ? stopRow : Bytes.toBytes("\uffff")); - boolean localityEnabled = - job.getConfiguration().getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, - SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); + boolean localityEnabled = job.getConfiguration().getBoolean( + SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); boolean byRegionLoc = - job.getConfiguration().getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, - SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT); + job.getConfiguration().getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, + SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT); for (int i = 0; i < splits.size(); i++) { // validate input split InputSplit split = splits.get(i); @@ -545,17 +536,16 @@ public static void doTestWithMapReduce(HBaseTestingUtil util, TableName tableNam job.setJarByClass(util.getClass()); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - TestTableSnapshotInputFormat.class); + TestTableSnapshotInputFormat.class); if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, true, tableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + true, tableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, true, tableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + true, tableDir); } job.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class); @@ -583,12 +573,12 @@ public void testCleanRestoreDir() throws Exception { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); Job job = Job.getInstance(UTIL.getConfiguration()); Path workingDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, workingDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + workingDir); FileSystem fs = workingDir.getFileSystem(job.getConfiguration()); - Path restorePath = new Path(job.getConfiguration() - .get("hbase.TableSnapshotInputFormat.restore.dir")); + Path restorePath = + new Path(job.getConfiguration().get("hbase.TableSnapshotInputFormat.restore.dir")); Assert.assertTrue(fs.exists(restorePath)); TableSnapshotInputFormat.cleanRestoreDir(job, snapshotName); Assert.assertFalse(fs.exists(restorePath)); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java index 37feec3f78d0..35e23d326947 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestTableSplit { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -47,11 +47,9 @@ public class TestTableSplit { @Test public void testHashCode() { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location"); + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location"); + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); assertEquals(split1, split2); assertTrue(split1.hashCode() == split2.hashCode()); HashSet set = new HashSet<>(2); @@ -62,15 +60,13 @@ public void testHashCode() { /** * length of region should not influence hashcode - * */ + */ @Test public void testHashCode_length() { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location", 1984); + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location", 1984); TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location", 1982); + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location", 1982); assertEquals(split1, split2); assertTrue(split1.hashCode() == split2.hashCode()); @@ -82,16 +78,14 @@ public void testHashCode_length() { /** * Length of region need to be properly serialized. - * */ + */ @Test public void testLengthIsSerialized() throws Exception { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location", 666); + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location", 666); TableSplit deserialized = new TableSplit(TableName.valueOf(name.getMethodName()), - Bytes.toBytes("row-start2"), - Bytes.toBytes("row-end2"), "location1"); + Bytes.toBytes("row-start2"), Bytes.toBytes("row-end2"), "location1"); ReflectionUtils.copy(new Configuration(), split1, deserialized); Assert.assertEquals(666, deserialized.getLength()); @@ -99,37 +93,27 @@ public void testLengthIsSerialized() throws Exception { @Test public void testToString() { - TableSplit split = - new TableSplit(TableName.valueOf(name.getMethodName()), Bytes.toBytes("row-start"), - Bytes.toBytes("row-end"), "location"); - String str = - "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " - + "endrow=row-end, regionLocation=location, " - + "regionname=)"; + TableSplit split = new TableSplit(TableName.valueOf(name.getMethodName()), + Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location"); + String str = "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " + + "endrow=row-end, regionLocation=location, " + "regionname=)"; Assert.assertEquals(str, split.toString()); split = new TableSplit(TableName.valueOf(name.getMethodName()), null, Bytes.toBytes("row-start"), Bytes.toBytes("row-end"), "location", "encoded-region-name", 1000L); - str = - "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " - + "endrow=row-end, regionLocation=location, " - + "regionname=encoded-region-name)"; + str = "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " + + "endrow=row-end, regionLocation=location, " + "regionname=encoded-region-name)"; Assert.assertEquals(str, split.toString()); split = new TableSplit(null, null, null, null); - str = - "Split(tablename=null, startrow=null, " - + "endrow=null, regionLocation=null, " - + "regionname=)"; + str = "Split(tablename=null, startrow=null, " + "endrow=null, regionLocation=null, " + + "regionname=)"; Assert.assertEquals(str, split.toString()); split = new TableSplit(null, null, null, null, null, null, 1000L); - str = - "Split(tablename=null, startrow=null, " - + "endrow=null, regionLocation=null, " - + "regionname=null)"; + str = "Split(tablename=null, startrow=null, " + "endrow=null, regionLocation=null, " + + "regionname=null)"; Assert.assertEquals(str, split.toString()); } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index bf25c1caac30..2957d5dbf7c8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTimeRangeMapRed { @ClassRule @@ -69,20 +69,19 @@ public class TestTimeRangeMapRed { HBaseClassTestRule.forClass(TestTimeRangeMapRed.class); private final static Logger log = LoggerFactory.getLogger(TestTimeRangeMapRed.class); - private static final HBaseTestingUtil UTIL = - new HBaseTestingUtil(); + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private Admin admin; - private static final byte [] KEY = Bytes.toBytes("row1"); + private static final byte[] KEY = Bytes.toBytes("row1"); private static final NavigableMap TIMESTAMP = new TreeMap<>(); static { - TIMESTAMP.put((long)1245620000, false); - TIMESTAMP.put((long)1245620005, true); // include - TIMESTAMP.put((long)1245620010, true); // include - TIMESTAMP.put((long)1245620055, true); // include - TIMESTAMP.put((long)1245620100, true); // include - TIMESTAMP.put((long)1245620150, false); - TIMESTAMP.put((long)1245620250, false); + TIMESTAMP.put((long) 1245620000, false); + TIMESTAMP.put((long) 1245620005, true); // include + TIMESTAMP.put((long) 1245620010, true); // include + TIMESTAMP.put((long) 1245620055, true); // include + TIMESTAMP.put((long) 1245620100, true); // include + TIMESTAMP.put((long) 1245620150, false); + TIMESTAMP.put((long) 1245620250, false); } static final long MINSTAMP = 1245620005; static final long MAXSTAMP = 1245620100 + 1; // maxStamp itself is excluded. so increment it. @@ -107,16 +106,13 @@ public void before() throws Exception { } private static class ProcessTimeRangeMapper - extends TableMapper - implements Configurable { + extends TableMapper implements Configurable { private Configuration conf = null; private Table table = null; @Override - public void map(ImmutableBytesWritable key, Result result, - Context context) - throws IOException { + public void map(ImmutableBytesWritable key, Result result, Context context) throws IOException { List tsList = new ArrayList<>(); for (Cell kv : result.listCells()) { tsList.add(kv.getTimestamp()); @@ -151,10 +147,10 @@ public void setConf(Configuration configuration) { @Test public void testTimeRangeMapRed() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { final TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(FAMILY_NAME).setMaxVersions(Integer.MAX_VALUE).build()).build(); + TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(FAMILY_NAME).setMaxVersions(Integer.MAX_VALUE).build()).build(); admin.createTable(tableDescriptor); List puts = new ArrayList<>(); for (Map.Entry entry : TIMESTAMP.entrySet()) { @@ -170,8 +166,7 @@ public void testTimeRangeMapRed() table.close(); } - private void runTestOnTable() - throws IOException, InterruptedException, ClassNotFoundException { + private void runTestOnTable() throws IOException, InterruptedException, ClassNotFoundException { Job job = null; try { job = new Job(UTIL.getConfiguration(), "test123"); @@ -181,16 +176,15 @@ private void runTestOnTable() scan.addColumn(FAMILY_NAME, COLUMN_NAME); scan.setTimeRange(MINSTAMP, MAXSTAMP); scan.readAllVersions(); - TableMapReduceUtil.initTableMapperJob(TABLE_NAME, - scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job); + TableMapReduceUtil.initTableMapperJob(TABLE_NAME, scan, ProcessTimeRangeMapper.class, + Text.class, Text.class, job); job.waitForCompletion(true); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } finally { if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } @@ -200,11 +194,11 @@ private void verify(final Table table) throws IOException { scan.addColumn(FAMILY_NAME, COLUMN_NAME); scan.readVersions(1); ResultScanner scanner = table.getScanner(scan); - for (Result r: scanner) { + for (Result r : scanner) { for (Cell kv : r.listCells()) { log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(CellUtil.cloneFamily(kv)) - + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv)) - + "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(CellUtil.cloneValue(kv))); + + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv)) + "\t" + kv.getTimestamp() + "\t" + + Bytes.toBoolean(CellUtil.cloneValue(kv))); org.junit.Assert.assertEquals(TIMESTAMP.get(kv.getTimestamp()), Bytes.toBoolean(CellUtil.cloneValue(kv))); } @@ -213,4 +207,3 @@ private void verify(final Table table) throws IOException { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java index 48e85183923e..531a454cd005 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; + import java.util.ArrayList; import java.util.List; import org.apache.hadoop.fs.FileStatus; @@ -32,11 +33,11 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({ MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestWALInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALInputFormat.class); + HBaseClassTestRule.forClass(TestWALInputFormat.class); /** * Test the primitive start/end time filtering. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index aac4ae5271b8..e8a69512fc0b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -24,6 +24,7 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.ByteArrayOutputStream; import java.io.File; import java.io.PrintStream; @@ -72,7 +73,7 @@ /** * Basic test for the WALPlayer M/R tool */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestWALPlayer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -115,9 +116,9 @@ public void testPlayingRecoveredEdit() throws Exception { TEST_UTIL.createTable(tn, TestRecoveredEdits.RECOVEREDEDITS_COLUMNFAMILY); // Copy testing recovered.edits file that is over under hbase-server test resources // up into a dir in our little hdfs cluster here. - String hbaseServerTestResourcesEdits = System.getProperty("test.build.classes") + - "/../../../hbase-server/src/test/resources/" + - TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); + String hbaseServerTestResourcesEdits = + System.getProperty("test.build.classes") + "/../../../hbase-server/src/test/resources/" + + TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); assertTrue(new File(hbaseServerTestResourcesEdits).exists()); FileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); // Target dir. @@ -125,7 +126,7 @@ public void testPlayingRecoveredEdit() throws Exception { assertTrue(dfs.mkdirs(targetDir)); dfs.copyFromLocalFile(new Path(hbaseServerTestResourcesEdits), targetDir); assertEquals(0, - ToolRunner.run(new WALPlayer(this.conf), new String [] {targetDir.toString()})); + ToolRunner.run(new WALPlayer(this.conf), new String[] { targetDir.toString() })); // I don't know how many edits are in this file for this table... so just check more than 1. assertTrue(TEST_UTIL.countRows(tn) > 0); } @@ -157,19 +158,17 @@ public void testWALPlayer() throws Exception { // replay the WAL, map table 1 to table 2 WAL log = cluster.getRegionServer(0).getWAL(null); log.rollWriter(); - String walInputDir = new Path(cluster.getMaster().getMasterFileSystem() - .getWALRootDir(), HConstants.HREGION_LOGDIR_NAME).toString(); + String walInputDir = new Path(cluster.getMaster().getMasterFileSystem().getWALRootDir(), + HConstants.HREGION_LOGDIR_NAME).toString(); - Configuration configuration= TEST_UTIL.getConfiguration(); + Configuration configuration = TEST_UTIL.getConfiguration(); WALPlayer player = new WALPlayer(configuration); - String optionName="_test_.name"; + String optionName = "_test_.name"; configuration.set(optionName, "1000"); player.setupTime(configuration, optionName); - assertEquals(1000,configuration.getLong(optionName,0)); + assertEquals(1000, configuration.getLong(optionName, 0)); assertEquals(0, ToolRunner.run(configuration, player, - new String[] {walInputDir, tableName1.getNameAsString(), - tableName2.getNameAsString() })); - + new String[] { walInputDir, tableName1.getNameAsString(), tableName2.getNameAsString() })); // verify the WAL was player into table 2 Get g = new Get(ROW); @@ -233,7 +232,7 @@ public void testMainMethod() throws Exception { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -246,8 +245,8 @@ public void testMainMethod() throws Exception { } catch (SecurityException e) { assertEquals(-1, newSecurityManager.getExitCode()); assertTrue(data.toString().contains("ERROR: Wrong number of arguments:")); - assertTrue(data.toString().contains("Usage: WALPlayer [options] " + - " [ ]")); + assertTrue(data.toString() + .contains("Usage: WALPlayer [options] " + " [ ]")); assertTrue(data.toString().contains("-Dwal.bulk.output=/path/for/output")); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index c8ff9042932f..16c366a8f5a1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.List; import java.util.NavigableMap; @@ -82,7 +83,7 @@ public class TestWALRecordReader { private static Path walRootDir; // visible for TestHLogRecordReader static final TableName tableName = TableName.valueOf(getName()); - private static final byte [] rowName = tableName.getName(); + private static final byte[] rowName = tableName.getName(); // visible for TestHLogRecordReader static final RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); private static final byte[] family = Bytes.toBytes("column"); @@ -145,8 +146,8 @@ public void testPartialRead() throws Exception { edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value)); log.appendData(info, getWalKeyImpl(ts, scopes), edit); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts+1, value)); - log.appendData(info, getWalKeyImpl(ts+1, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts + 1, value)); + log.appendData(info, getWalKeyImpl(ts + 1, scopes), edit); log.sync(); Threads.sleep(10); LOG.info("Before 1st WAL roll " + log.toString()); @@ -157,17 +158,16 @@ public void testPartialRead() throws Exception { long ts1 = EnvironmentEdgeManager.currentTime(); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1+1, value)); - log.appendData(info, getWalKeyImpl(ts1+1, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1 + 1, value)); + log.appendData(info, getWalKeyImpl(ts1 + 1, scopes), edit); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("4"), ts1+2, value)); - log.appendData(info, getWalKeyImpl(ts1+2, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("4"), ts1 + 2, value)); + log.appendData(info, getWalKeyImpl(ts1 + 2, scopes), edit); log.sync(); log.shutdown(); walfactory.shutdown(); LOG.info("Closed WAL " + log.toString()); - WALInputFormat input = new WALInputFormat(); Configuration jobConf = new Configuration(conf); jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString()); @@ -178,7 +178,7 @@ public void testPartialRead() throws Exception { assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); - jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1+1); + jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1 + 1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(2, splits.size()); // Both entries from first file are in-range. @@ -201,12 +201,12 @@ public void testPartialRead() throws Exception { public void testWALRecordReader() throws Exception { final WALFactory walfactory = new WALFactory(conf, getName()); WAL log = walfactory.getWAL(info); - byte [] value = Bytes.toBytes("value"); + byte[] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), - EnvironmentEdgeManager.currentTime(), value)); - long txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), EnvironmentEdgeManager.currentTime(), + value)); + long txid = + log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); Thread.sleep(1); // make sure 2nd log gets a later timestamp @@ -214,10 +214,9 @@ public void testWALRecordReader() throws Exception { log.rollWriter(); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), - EnvironmentEdgeManager.currentTime(), value)); - txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), EnvironmentEdgeManager.currentTime(), + value)); + txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); log.shutdown(); walfactory.shutdown(); @@ -240,7 +239,7 @@ public void testWALRecordReader() throws Exception { // now test basic time ranges: // set an endtime, the 2nd log file can be ignored completely. - jobConf.setLong(WALInputFormat.END_TIME_KEY, secondTs-1); + jobConf.setLong(WALInputFormat.END_TIME_KEY, secondTs - 1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); @@ -253,29 +252,27 @@ public void testWALRecordReader() throws Exception { } /** - * Test WALRecordReader tolerance to moving WAL from active - * to archive directory + * Test WALRecordReader tolerance to moving WAL from active to archive directory * @throws Exception exception */ @Test public void testWALRecordReaderActiveArchiveTolerance() throws Exception { final WALFactory walfactory = new WALFactory(conf, getName()); WAL log = walfactory.getWAL(info); - byte [] value = Bytes.toBytes("value"); + byte[] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), - EnvironmentEdgeManager.currentTime(), value)); - long txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), EnvironmentEdgeManager.currentTime(), + value)); + long txid = + log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); Thread.sleep(10); // make sure 2nd edit gets a later timestamp edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), - EnvironmentEdgeManager.currentTime(), value)); - txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), EnvironmentEdgeManager.currentTime(), + value)); + txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); log.shutdown(); @@ -287,7 +284,7 @@ public void testWALRecordReaderActiveArchiveTolerance() throws Exception { List splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(1, splits.size()); WALInputFormat.WALSplit split = (WALInputFormat.WALSplit) splits.get(0); - LOG.debug("log="+logDir+" file="+ split.getLogFileName()); + LOG.debug("log=" + logDir + " file=" + split.getLogFileName()); testSplitWithMovingWAL(splits.get(0), Bytes.toBytes("1"), Bytes.toBytes("2")); } @@ -323,8 +320,8 @@ private void testSplit(InputSplit split, byte[]... columns) throws Exception { } /** - * Create a new reader from the split, match the edits against the passed columns, - * moving WAL to archive in between readings + * Create a new reader from the split, match the edits against the passed columns, moving WAL to + * archive in between readings */ private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] col2) throws Exception { WALRecordReader reader = getReader(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java index c674af3e76d8..81bb4573e340 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; - import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -29,9 +28,9 @@ import org.apache.hadoop.io.Text; /** - * Dummy mapper used for unit tests to verify that the mapper can be injected. - * This approach would be used if a custom transformation needed to be done after - * reading the input data before writing it to HFiles. + * Dummy mapper used for unit tests to verify that the mapper can be injected. This approach would + * be used if a custom transformation needed to be done after reading the input data before writing + * it to HFiles. */ public class TsvImporterCustomTestMapper extends TsvImporterMapper { @Override @@ -40,12 +39,11 @@ protected void setup(Context context) { } /** - * Convert a line of TSV text into an HBase table row after transforming the - * values by multiplying them by 3. + * Convert a line of TSV text into an HBase table row after transforming the values by multiplying + * them by 3. */ @Override - public void map(LongWritable offset, Text value, Context context) - throws IOException { + public void map(LongWritable offset, Text value, Context context) throws IOException { byte[] family = Bytes.toBytes("FAM"); final byte[][] qualifiers = { Bytes.toBytes("A"), Bytes.toBytes("B") }; @@ -54,20 +52,19 @@ public void map(LongWritable offset, Text value, Context context) String[] valueTokens = new String(lineBytes, StandardCharsets.UTF_8).split("\u001b"); // create the rowKey and Put - ImmutableBytesWritable rowKey = - new ImmutableBytesWritable(Bytes.toBytes(valueTokens[0])); + ImmutableBytesWritable rowKey = new ImmutableBytesWritable(Bytes.toBytes(valueTokens[0])); Put put = new Put(rowKey.copyBytes()); put.setDurability(Durability.SKIP_WAL); - //The value should look like this: VALUE1 or VALUE2. Let's multiply - //the integer by 3 - for(int i = 1; i < valueTokens.length; i++) { + // The value should look like this: VALUE1 or VALUE2. Let's multiply + // the integer by 3 + for (int i = 1; i < valueTokens.length; i++) { String prefix = valueTokens[i].substring(0, "VALUE".length()); String suffix = valueTokens[i].substring("VALUE".length()); String newValue = prefix + Integer.parseInt(suffix) * 3; - KeyValue kv = new KeyValue(rowKey.copyBytes(), family, - qualifiers[i-1], Bytes.toBytes(newValue)); + KeyValue kv = + new KeyValue(rowKey.copyBytes(), family, qualifiers[i - 1], Bytes.toBytes(newValue)); put.add(kv); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java index 850d4abac80b..cc38ebd58207 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.Arrays; - import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.BadTsvLineException; @@ -27,8 +26,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Just shows a simple example of how the attributes can be extracted and added - * to the puts + * Just shows a simple example of how the attributes can be extracted and added to the puts */ public class TsvImporterCustomTestMapperForOprAttr extends TsvImporterMapper { @Override diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java index f286c63fb546..5acf89b71e69 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestCompactionTool { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCompactionTool.class); + HBaseClassTestRule.forClass(TestCompactionTool.class); private final HBaseTestingUtil testUtil = new HBaseTestingUtil(); @@ -77,20 +77,20 @@ public void testCompactedFilesArchived() throws Exception { Path tableDir = CommonFSUtils.getTableDir(rootDir, region.getRegionInfo().getTable()); FileSystem fs = store.getFileSystem(); String storePath = tableDir + "/" + region.getRegionInfo().getEncodedName() + "/" - + Bytes.toString(HBaseTestingUtil.fam1); + + Bytes.toString(HBaseTestingUtil.fam1); FileStatus[] regionDirFiles = fs.listStatus(new Path(storePath)); assertEquals(10, regionDirFiles.length); String defaultFS = testUtil.getMiniHBaseCluster().getConfiguration().get("fs.defaultFS"); Configuration config = HBaseConfiguration.create(); config.set("fs.defaultFS", defaultFS); int result = ToolRunner.run(config, new CompactionTool(), - new String[]{"-compactOnce", "-major", storePath}); - assertEquals(0,result); + new String[] { "-compactOnce", "-major", storePath }); + assertEquals(0, result); regionDirFiles = fs.listStatus(new Path(storePath)); assertEquals(1, regionDirFiles.length); } - private void putAndFlush(int key) throws Exception{ + private void putAndFlush(int key) throws Exception { Put put = new Put(Bytes.toBytes(key)); put.addColumn(HBaseTestingUtil.fam1, qualifier, Bytes.toBytes("val" + key)); region.put(put); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java index c614b4400051..6888cd6aaaf3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java @@ -95,9 +95,10 @@ public void setUp() throws Exception { public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); - TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100) - .build()).build(); + TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100).build()) + .build(); Connection connection2 = ConnectionFactory.createConnection(CONF2); try (Admin admin2 = connection2.getAdmin()) { @@ -249,7 +250,6 @@ static void checkRestoreTmpDir(Configuration conf, String restoreTmpDir, int exp } } - @Test public void testVerifyRepJobWithQuorumAddress() throws Exception { // Populate the tables, at the same time it guarantees that the tables are @@ -300,10 +300,10 @@ public void testVerifyRepJobWithQuorumAddressAndSnapshotSupport() throws Excepti String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), - tableName.getNameAsString() }; + "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, + "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); checkRestoreTmpDir(CONF1, tmpPath1, 1); checkRestoreTmpDir(CONF2, tmpPath2, 1); @@ -330,10 +330,10 @@ public void testVerifyRepJobWithQuorumAddressAndSnapshotSupport() throws Excepti Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true); args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), - tableName.getNameAsString() }; + "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, + "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + tableName.getNameAsString() }; runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); checkRestoreTmpDir(CONF1, tmpPath1, 2); checkRestoreTmpDir(CONF2, tmpPath2, 2); @@ -391,25 +391,25 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti FileSystem fs = rootDir.getFileSystem(CONF1); String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, - Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); + Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); // Take target snapshot Path peerRootDir = CommonFSUtils.getRootDir(CONF2); FileSystem peerFs = peerRootDir.getFileSystem(CONF2); String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName, - Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); + Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); String peerFSAddress = peerFs.getUri().toString(); String tmpPath1 = UTIL1.getRandomDir().toString(); String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), - "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), - tableName.getNameAsString() }; + "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, + "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, + "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); checkRestoreTmpDir(CONF1, tmpPath1, 1); checkRestoreTmpDir(CONF2, tmpPath2, 1); @@ -421,7 +421,7 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti put = new Put(result.getRow()); Cell firstVal = result.rawCells()[0]; put.addColumn(CellUtil.cloneFamily(firstVal), CellUtil.cloneQualifier(firstVal), - Bytes.toBytes("diff data")); + Bytes.toBytes("diff data")); htable3.put(put); } Delete delete = new Delete(put.getRow()); @@ -429,18 +429,18 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, - Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); + Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName, - Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); + Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), - "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), - tableName.getNameAsString() }; + "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, + "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, + "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + tableName.getNameAsString() }; runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); checkRestoreTmpDir(CONF1, tmpPath1, 2); checkRestoreTmpDir(CONF2, tmpPath2, 2); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java index fdbf7ac0db04..eb8d76e5155c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java @@ -90,9 +90,10 @@ public void setUp() throws Exception { @BeforeClass public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); - TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100) - .build()).build(); + TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100).build()) + .build(); Connection connection2 = ConnectionFactory.createConnection(CONF2); try (Admin admin2 = connection2.getAdmin()) { admin2.createTable(peerTable, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE); @@ -303,10 +304,10 @@ public void testVerifyReplicationWithSnapshotSupport() throws Exception { String temPath2 = "/tmp" + EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), "2", - tableName.getNameAsString() }; + "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, + "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), "2", + tableName.getNameAsString() }; TestVerifyReplication.runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); TestVerifyReplication.checkRestoreTmpDir(CONF1, temPath1, 1); TestVerifyReplication.checkRestoreTmpDir(CONF2, temPath2, 1); @@ -333,10 +334,10 @@ public void testVerifyReplicationWithSnapshotSupport() throws Exception { Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true); args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), "2", - tableName.getNameAsString() }; + "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, + "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), "2", + tableName.getNameAsString() }; TestVerifyReplication.runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); TestVerifyReplication.checkRestoreTmpDir(CONF1, temPath1, 2); TestVerifyReplication.checkRestoreTmpDir(CONF2, temPath2, 2); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java index 2fe843ba62d1..7266ec090e80 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import static org.junit.Assert.assertEquals; @@ -168,23 +167,24 @@ public void testVerifyRepBySnapshot() throws Exception { FileSystem fs = rootDir.getFileSystem(conf1); String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(util1.getAdmin(), TABLE_NAME, - Bytes.toString(FAMILY), sourceSnapshotName, rootDir, fs, true); + Bytes.toString(FAMILY), sourceSnapshotName, rootDir, fs, true); // Take target snapshot Path peerRootDir = CommonFSUtils.getRootDir(conf2); FileSystem peerFs = peerRootDir.getFileSystem(conf2); String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(util2.getAdmin(), TABLE_NAME, - Bytes.toString(FAMILY), peerSnapshotName, peerRootDir, peerFs, true); + Bytes.toString(FAMILY), peerSnapshotName, peerRootDir, peerFs, true); String peerFSAddress = peerFs.getUri().toString(); String temPath1 = new Path(fs.getUri().toString(), "/tmp1").toString(); String temPath2 = "/tmp2"; String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(conf2), PEER_ID, TABLE_NAME.toString() }; + "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, + "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(conf2), PEER_ID, + TABLE_NAME.toString() }; // Use the yarn's config override the source cluster's config. Configuration newConf = HBaseConfiguration.create(conf1); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java index 0d1cead33016..10b5a5338beb 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +18,7 @@ package org.apache.hadoop.hbase.replication; import static org.junit.Assert.assertEquals; + import java.io.File; import java.io.IOException; import java.util.Arrays; @@ -62,14 +62,14 @@ public class TestVerifyReplicationSecureClusterCredentials { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationSecureClusterCredentials.class); + HBaseClassTestRule.forClass(TestVerifyReplicationSecureClusterCredentials.class); private static MiniKdc KDC; private static final HBaseTestingUtil UTIL1 = new HBaseTestingUtil(); private static final HBaseTestingUtil UTIL2 = new HBaseTestingUtil(); private static final File KEYTAB_FILE = - new File(UTIL1.getDataTestDir("keytab").toUri().getPath()); + new File(UTIL1.getDataTestDir("keytab").toUri().getPath()); private static final String LOCALHOST = "localhost"; private static String CLUSTER_PRINCIPAL; @@ -96,8 +96,8 @@ private static void setupCluster(HBaseTestingUtil util) throws Exception { conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() + ',' + TokenProvider.class.getName()); - HBaseKerberosUtils.setSecuredConfiguration(conf, - CLUSTER_PRINCIPAL + '@' + KDC.getRealm(), HTTP_PRINCIPAL + '@' + KDC.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, CLUSTER_PRINCIPAL + '@' + KDC.getRealm(), + HTTP_PRINCIPAL + '@' + KDC.getRealm()); util.startMiniCluster(); } @@ -112,13 +112,14 @@ public static void beforeClass() throws Exception { setupCluster(UTIL2); try (Admin admin = UTIL1.getAdmin()) { - admin.addReplicationPeer("1", ReplicationPeerConfig.newBuilder() - .setClusterKey(ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())) - .putConfiguration(HBaseKerberosUtils.KRB_PRINCIPAL, - UTIL2.getConfiguration().get(HBaseKerberosUtils.KRB_PRINCIPAL)) - .putConfiguration(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL, - UTIL2.getConfiguration().get(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL)) - .build()); + admin.addReplicationPeer("1", + ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())) + .putConfiguration(HBaseKerberosUtils.KRB_PRINCIPAL, + UTIL2.getConfiguration().get(HBaseKerberosUtils.KRB_PRINCIPAL)) + .putConfiguration(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL, + UTIL2.getConfiguration().get(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL)) + .build()); } } @@ -130,10 +131,8 @@ public static void cleanup() throws IOException { @Parameters public static Collection> peer() { - return Arrays.asList( - () -> "1", - () -> ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration()) - ); + return Arrays.asList(() -> "1", + () -> ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())); } @Parameter @@ -143,11 +142,7 @@ public static Collection> peer() { @SuppressWarnings("unchecked") public void testJobCredentials() throws Exception { Job job = new VerifyReplication().createSubmittableJob( - new Configuration(UTIL1.getConfiguration()), - new String[] { - peer.get(), - "table" - }); + new Configuration(UTIL1.getConfiguration()), new String[] { peer.get(), "table" }); Credentials credentials = job.getCredentials(); Collection> tokens = credentials.getAllTokens(); @@ -155,12 +150,12 @@ public void testJobCredentials() throws Exception { String clusterId1 = ZKClusterId.readClusterIdZNode(UTIL1.getZooKeeperWatcher()); Token tokenForCluster1 = - (Token) credentials.getToken(new Text(clusterId1)); + (Token) credentials.getToken(new Text(clusterId1)); assertEquals(FULL_USER_PRINCIPAL, tokenForCluster1.decodeIdentifier().getUsername()); String clusterId2 = ZKClusterId.readClusterIdZNode(UTIL2.getZooKeeperWatcher()); Token tokenForCluster2 = - (Token) credentials.getToken(new Text(clusterId2)); + (Token) credentials.getToken(new Text(clusterId2)); assertEquals(FULL_USER_PRINCIPAL, tokenForCluster2.decodeIdentifier().getUsername()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 02aae1b341bd..07a48448cc65 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -64,15 +64,16 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** * Test Export Snapshot Tool */ @Ignore // HBASE-24493 -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestExportSnapshot { @ClassRule @@ -232,30 +233,30 @@ public void testExportWithTargetName() throws Exception { private void testExportFileSystemState(final TableName tableName, final String snapshotName, final String targetName, int filesExpected) throws Exception { - testExportFileSystemState(tableName, snapshotName, targetName, - filesExpected, getHdfsDestinationDir(), false); + testExportFileSystemState(tableName, snapshotName, targetName, filesExpected, + getHdfsDestinationDir(), false); } - protected void testExportFileSystemState(final TableName tableName, - final String snapshotName, final String targetName, int filesExpected, - Path copyDir, boolean overwrite) throws Exception { + protected void testExportFileSystemState(final TableName tableName, final String snapshotName, + final String targetName, int filesExpected, Path copyDir, boolean overwrite) + throws Exception { testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, targetName, - filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, - overwrite, getBypassRegionPredicate(), true); + filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, overwrite, + getBypassRegionPredicate(), true); } /** * Creates destination directory, runs ExportSnapshot() tool, and runs some verifications. */ - protected static void testExportFileSystemState(final Configuration conf, final TableName tableName, - final String snapshotName, final String targetName, final int filesExpected, - final Path srcDir, Path rawTgtDir, final boolean overwrite, + protected static void testExportFileSystemState(final Configuration conf, + final TableName tableName, final String snapshotName, final String targetName, + final int filesExpected, final Path srcDir, Path rawTgtDir, final boolean overwrite, final RegionPredicate bypassregionPredicate, boolean success) throws Exception { FileSystem tgtFs = rawTgtDir.getFileSystem(conf); FileSystem srcFs = srcDir.getFileSystem(conf); Path tgtDir = rawTgtDir.makeQualified(tgtFs.getUri(), tgtFs.getWorkingDirectory()); - LOG.info("tgtFsUri={}, tgtDir={}, rawTgtDir={}, srcFsUri={}, srcDir={}", - tgtFs.getUri(), tgtDir, rawTgtDir, srcFs.getUri(), srcDir); + LOG.info("tgtFsUri={}, tgtDir={}, rawTgtDir={}, srcFsUri={}, srcDir={}", tgtFs.getUri(), tgtDir, + rawTgtDir, srcFs.getUri(), srcDir); List opts = new ArrayList<>(); opts.add("--snapshot"); opts.add(snapshotName); @@ -283,11 +284,11 @@ protected static void testExportFileSystemState(final Configuration conf, final // Verify File-System state FileStatus[] rootFiles = tgtFs.listStatus(tgtDir); assertEquals(filesExpected > 0 ? 2 : 1, rootFiles.length); - for (FileStatus fileStatus: rootFiles) { + for (FileStatus fileStatus : rootFiles) { String name = fileStatus.getPath().getName(); assertTrue(fileStatus.toString(), fileStatus.isDirectory()); - assertTrue(name.toString(), name.equals(HConstants.SNAPSHOT_DIR_NAME) || - name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); + assertTrue(name.toString(), name.equals(HConstants.SNAPSHOT_DIR_NAME) + || name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); } LOG.info("Verified filesystem state"); @@ -295,8 +296,8 @@ protected static void testExportFileSystemState(final Configuration conf, final final Path snapshotDir = new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName); final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, targetName); verifySnapshotDir(srcFs, new Path(srcDir, snapshotDir), tgtFs, new Path(tgtDir, targetDir)); - Set snapshotFiles = verifySnapshot(conf, tgtFs, tgtDir, tableName, - targetName, bypassregionPredicate); + Set snapshotFiles = + verifySnapshot(conf, tgtFs, tgtDir, tableName, targetName, bypassregionPredicate); assertEquals(filesExpected, snapshotFiles.size()); } @@ -314,12 +315,12 @@ protected static void verifySnapshotDir(final FileSystem fs1, final Path root1, protected static Set verifySnapshot(final Configuration conf, final FileSystem fs, final Path rootDir, final TableName tableName, final String snapshotName, final RegionPredicate bypassregionPredicate) throws IOException { - final Path exportedSnapshot = new Path(rootDir, - new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); + final Path exportedSnapshot = + new Path(rootDir, new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); final Set snapshotFiles = new HashSet<>(); final Path exportedArchive = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); SnapshotReferenceUtil.visitReferencedFiles(conf, fs, exportedSnapshot, - new SnapshotReferenceUtil.SnapshotVisitor() { + new SnapshotReferenceUtil.SnapshotVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { @@ -331,8 +332,8 @@ public void storeFile(final RegionInfo regionInfo, final String family, String hfile = storeFile.getName(); snapshotFiles.add(hfile); verifyNonEmptyFile(new Path(exportedArchive, - new Path(CommonFSUtils.getTableDir(new Path("./"), tableName), - new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); + new Path(CommonFSUtils.getTableDir(new Path("./"), tableName), + new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); } else { Pair referredToRegionAndFile = StoreFileInfo.getReferredToRegionAndFile(storeFile.getName()); @@ -340,8 +341,8 @@ public void storeFile(final RegionInfo regionInfo, final String family, String hfile = referredToRegionAndFile.getSecond(); snapshotFiles.add(hfile); verifyNonEmptyFile(new Path(exportedArchive, - new Path(CommonFSUtils.getTableDir(new Path("./"), tableName), - new Path(region, new Path(family, hfile))))); + new Path(CommonFSUtils.getTableDir(new Path("./"), tableName), + new Path(region, new Path(family, hfile))))); } } @@ -365,7 +366,7 @@ private static Set listFiles(final FileSystem fs, final Path root, final int rootPrefix = root.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString().length(); FileStatus[] list = CommonFSUtils.listStatus(fs, dir); if (list != null) { - for (FileStatus fstat: list) { + for (FileStatus fstat : list) { LOG.debug(Objects.toString(fstat.getPath())); if (fstat.isDirectory()) { files.addAll(listFiles(fs, root, fstat.getPath())); @@ -379,8 +380,8 @@ private static Set listFiles(final FileSystem fs, final Path root, final private Path getHdfsDestinationDir() { Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - Path path = new Path(new Path(rootDir, "export-test"), "export-" + - EnvironmentEdgeManager.currentTime()); + Path path = new Path(new Path(rootDir, "export-test"), + "export-" + EnvironmentEdgeManager.currentTime()); LOG.info("HDFS export destination path: " + path); return path; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java index 6569767ea3fc..062731d4a8e7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.snapshot; import static org.junit.Assert.assertFalse; + import java.util.Iterator; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -43,13 +44,13 @@ import org.slf4j.LoggerFactory; /** - * Tests that are adjunct to {@link TestExportSnapshot}. They used to be in same test suite but - * the test suite ran too close to the maximum time limit so we split these out. Uses - * facility from TestExportSnapshot where possible. + * Tests that are adjunct to {@link TestExportSnapshot}. They used to be in same test suite but the + * test suite ran too close to the maximum time limit so we split these out. Uses facility from + * TestExportSnapshot where possible. * @see TestExportSnapshot */ @Ignore // HBASE-24493 -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestExportSnapshotAdjunct { private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshotAdjunct.class); @@ -75,12 +76,11 @@ public static void setUpBeforeClass() throws Exception { } /** - * Check for references to '/tmp'. We are trying to avoid having references to outside of the - * test data dir when running tests. References outside of the test dir makes it so concurrent - * tests can stamp on each other by mistake. This check is for references to the 'tmp'. - * - * This is a strange place for this test but I want somewhere where the configuration is - * full -- filed w/ hdfs and mapreduce configurations. + * Check for references to '/tmp'. We are trying to avoid having references to outside of the test + * data dir when running tests. References outside of the test dir makes it so concurrent tests + * can stamp on each other by mistake. This check is for references to the 'tmp'. This is a + * strange place for this test but I want somewhere where the configuration is full -- filed w/ + * hdfs and mapreduce configurations. */ private void checkForReferencesToTmpDir() { Configuration conf = TEST_UTIL.getConfiguration(); @@ -126,8 +126,7 @@ public void setUp() throws Exception { admin.snapshot(emptySnapshotName, tableName); // Add some rows - SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, - TestExportSnapshot.FAMILY); + SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, TestExportSnapshot.FAMILY); tableNumFiles = admin.getRegions(tableName).size(); // take a snapshot @@ -151,9 +150,8 @@ public void testExportRetry() throws Exception { conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true); conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 2); conf.setInt("mapreduce.map.maxattempts", 3); - TestExportSnapshot.testExportFileSystemState(conf, tableName, - snapshotName, snapshotName, tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), - copyDir, true, null, true); + TestExportSnapshot.testExportFileSystemState(conf, tableName, snapshotName, snapshotName, + tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, true); } /** @@ -168,8 +166,7 @@ public void testExportFailure() throws Exception { conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true); conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 4); conf.setInt("mapreduce.map.maxattempts", 3); - TestExportSnapshot.testExportFileSystemState(conf, tableName, - snapshotName, snapshotName, tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), - copyDir, true, null, false); + TestExportSnapshot.testExportFileSystemState(conf, tableName, snapshotName, snapshotName, + tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, false); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java index d104d830985b..faa1094147ce 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ /** * Test Export Snapshot Tool helpers */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestExportSnapshotHelpers { @ClassRule @@ -42,44 +42,39 @@ public class TestExportSnapshotHelpers { HBaseClassTestRule.forClass(TestExportSnapshotHelpers.class); /** - * Verfy the result of getBalanceSplits() method. - * The result are groups of files, used as input list for the "export" mappers. - * All the groups should have similar amount of data. - * - * The input list is a pair of file path and length. - * The getBalanceSplits() function sort it by length, - * and assign to each group a file, going back and forth through the groups. + * Verfy the result of getBalanceSplits() method. The result are groups of files, used as input + * list for the "export" mappers. All the groups should have similar amount of data. The input + * list is a pair of file path and length. The getBalanceSplits() function sort it by length, and + * assign to each group a file, going back and forth through the groups. */ @Test public void testBalanceSplit() throws Exception { // Create a list of files List> files = new ArrayList<>(21); for (long i = 0; i <= 20; i++) { - SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() - .setType(SnapshotFileInfo.Type.HFILE) - .setHfile("file-" + i) - .build(); + SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder().setType(SnapshotFileInfo.Type.HFILE) + .setHfile("file-" + i).build(); files.add(new Pair<>(fileInfo, i)); } // Create 5 groups (total size 210) - // group 0: 20, 11, 10, 1 (total size: 42) - // group 1: 19, 12, 9, 2 (total size: 42) - // group 2: 18, 13, 8, 3 (total size: 42) - // group 3: 17, 12, 7, 4 (total size: 42) - // group 4: 16, 11, 6, 5 (total size: 42) + // group 0: 20, 11, 10, 1 (total size: 42) + // group 1: 19, 12, 9, 2 (total size: 42) + // group 2: 18, 13, 8, 3 (total size: 42) + // group 3: 17, 12, 7, 4 (total size: 42) + // group 4: 16, 11, 6, 5 (total size: 42) List>> splits = ExportSnapshot.getBalancedSplits(files, 5); assertEquals(5, splits.size()); - String[] split0 = new String[] {"file-20", "file-11", "file-10", "file-1", "file-0"}; + String[] split0 = new String[] { "file-20", "file-11", "file-10", "file-1", "file-0" }; verifyBalanceSplit(splits.get(0), split0, 42); - String[] split1 = new String[] {"file-19", "file-12", "file-9", "file-2"}; + String[] split1 = new String[] { "file-19", "file-12", "file-9", "file-2" }; verifyBalanceSplit(splits.get(1), split1, 42); - String[] split2 = new String[] {"file-18", "file-13", "file-8", "file-3"}; + String[] split2 = new String[] { "file-18", "file-13", "file-8", "file-3" }; verifyBalanceSplit(splits.get(2), split2, 42); - String[] split3 = new String[] {"file-17", "file-14", "file-7", "file-4"}; + String[] split3 = new String[] { "file-17", "file-14", "file-7", "file-4" }; verifyBalanceSplit(splits.get(3), split3, 42); - String[] split4 = new String[] {"file-16", "file-15", "file-6", "file-5"}; + String[] split4 = new String[] { "file-16", "file-15", "file-6", "file-5" }; verifyBalanceSplit(splits.get(4), split4, 42); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java index dd5ed0cc9655..1b31b3b94b61 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java @@ -44,12 +44,11 @@ import org.slf4j.LoggerFactory; /** - * Test Export Snapshot Tool - * Tests V1 snapshots only. Used to ALSO test v2 but strange failure so separate the tests. - * See companion file for test of v2 snapshot. + * Test Export Snapshot Tool Tests V1 snapshots only. Used to ALSO test v2 but strange failure so + * separate the tests. See companion file for test of v2 snapshot. * @see TestExportSnapshotV2NoCluster */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestExportSnapshotV1NoCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -91,30 +90,30 @@ static Path setup(FileSystem fs, HBaseCommonTestingUtil hctu) throws IOException */ @Test public void testSnapshotWithRefsExportFileSystemState() throws Exception { - final SnapshotMock snapshotMock = new SnapshotMock(testUtil.getConfiguration(), - this.fs, testDir); - final SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV1("tableWithRefsV1", - "tableWithRefsV1"); + final SnapshotMock snapshotMock = + new SnapshotMock(testUtil.getConfiguration(), this.fs, testDir); + final SnapshotMock.SnapshotBuilder builder = + snapshotMock.createSnapshotV1("tableWithRefsV1", "tableWithRefsV1"); testSnapshotWithRefsExportFileSystemState(this.fs, builder, testUtil, testDir); } /** - * Generates a couple of regions for the specified SnapshotMock, - * and then it will run the export and verification. + * Generates a couple of regions for the specified SnapshotMock, and then it will run the export + * and verification. */ static void testSnapshotWithRefsExportFileSystemState(FileSystem fs, - SnapshotMock.SnapshotBuilder builder, HBaseCommonTestingUtil testUtil, Path testDir) - throws Exception { + SnapshotMock.SnapshotBuilder builder, HBaseCommonTestingUtil testUtil, Path testDir) + throws Exception { Path[] r1Files = builder.addRegion(); Path[] r2Files = builder.addRegion(); builder.commit(); // remove references, only keep data files Set dataFiles = new HashSet<>(); - for (Path[] files: new Path[][]{r1Files, r2Files}) { + for (Path[] files : new Path[][] { r1Files, r2Files }) { for (Path file : files) { if (StoreFileInfo.isReference(file.getName())) { Pair referredToRegionAndFile = - StoreFileInfo.getReferredToRegionAndFile(file.getName()); + StoreFileInfo.getReferredToRegionAndFile(file.getName()); dataFiles.add(referredToRegionAndFile.getSecond()); } else { dataFiles.add(file.getName()); @@ -124,16 +123,16 @@ static void testSnapshotWithRefsExportFileSystemState(FileSystem fs, int snapshotFilesCount = dataFiles.size(); String snapshotName = builder.getSnapshotDescription().getName(); TableName tableName = builder.getTableDescriptor().getTableName(); - TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(), - tableName, snapshotName, snapshotName, snapshotFilesCount, - testDir, getDestinationDir(fs, testUtil, testDir), false, null, true); + TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(), tableName, + snapshotName, snapshotName, snapshotFilesCount, testDir, + getDestinationDir(fs, testUtil, testDir), false, null, true); } static Path getDestinationDir(FileSystem fs, HBaseCommonTestingUtil hctu, Path testDir) throws IOException { - Path path = new Path(new Path(testDir, "export-test"), - "export-" + EnvironmentEdgeManager.currentTime()).makeQualified(fs.getUri(), - fs.getWorkingDirectory()); + Path path = + new Path(new Path(testDir, "export-test"), "export-" + EnvironmentEdgeManager.currentTime()) + .makeQualified(fs.getUri(), fs.getWorkingDirectory()); LOG.info("Export destination={}, fs={}, fsurl={}, fswd={}, testDir={}", path, fs, fs.getUri(), fs.getWorkingDirectory(), testDir); return path; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java index f2d3f627bae7..d433cf5809c8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java @@ -16,7 +16,9 @@ * limitations under the License. */ package org.apache.hadoop.hbase.snapshot; + import static org.junit.Assert.assertTrue; + import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; @@ -36,7 +38,7 @@ * Test Export Snapshot Tool; tests v2 snapshots. * @see TestExportSnapshotV1NoCluster */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestExportSnapshotV2NoCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -60,9 +62,9 @@ public void before() throws Exception { @Test public void testSnapshotWithRefsExportFileSystemState() throws Exception { final SnapshotMock snapshotMock = new SnapshotMock(testUtil.getConfiguration(), - testDir.getFileSystem(testUtil.getConfiguration()), testDir); - final SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2("tableWithRefsV2", - "tableWithRefsV2"); + testDir.getFileSystem(testUtil.getConfiguration()), testDir); + final SnapshotMock.SnapshotBuilder builder = + snapshotMock.createSnapshotV2("tableWithRefsV2", "tableWithRefsV2"); TestExportSnapshotV1NoCluster.testSnapshotWithRefsExportFileSystemState(this.fs, builder, this.testUtil, this.testDir); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java index 5560555e9f33..0e21f9c6b9d5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -31,7 +31,7 @@ import org.junit.experimental.categories.Category; @Ignore // HBASE-24493 -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestExportSnapshotWithTemporaryDirectory extends TestExportSnapshot { @ClassRule @@ -54,8 +54,8 @@ public static void setUpBaseConf(Configuration conf) { Path tmpDir = null; try { FileSystem localFs = FileSystem.getLocal(conf); - tmpDir = TEST_UTIL.getDataTestDir(UUID.randomUUID().toString()). - makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); + tmpDir = TEST_UTIL.getDataTestDir(UUID.randomUUID().toString()) + .makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); } catch (IOException ioe) { throw new RuntimeException(ioe); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java index 4f0d3deebe20..dc92c03c4f65 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ * Test Export Snapshot Tool */ @Ignore // HBASE-24493 -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestMobExportSnapshot extends TestExportSnapshot { @ClassRule diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java index 484f88afecf4..c57dea3cd36e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ /** * Reruns TestMobExportSnapshot using MobExportSnapshot in secure mode. */ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestMobSecureExportSnapshot extends TestMobExportSnapshot { @ClassRule diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java index ce1c4cb39a04..9d813a173724 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ /** * Reruns TestExportSnapshot using ExportSnapshot in secure mode. */ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestSecureExportSnapshot extends TestExportSnapshot { @ClassRule diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 336816e2b49e..3ef5afe9cdd3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; @@ -68,8 +69,8 @@ /** * A command-line utility that reads, writes, and verifies data. Unlike - * {@link org.apache.hadoop.hbase.PerformanceEvaluation}, this tool validates the data written, - * and supports simultaneously writing and reading the same set of keys. + * {@link org.apache.hadoop.hbase.PerformanceEvaluation}, this tool validates the data written, and + * supports simultaneously writing and reading the same set of keys. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class LoadTestTool extends AbstractHBaseTool { @@ -94,23 +95,21 @@ public class LoadTestTool extends AbstractHBaseTool { /** Usage string for the load option */ protected static final String OPT_USAGE_LOAD = - ":" + - "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; + ":" + "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; /** Usage string for the read option */ protected static final String OPT_USAGE_READ = "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; /** Usage string for the update option */ - protected static final String OPT_USAGE_UPDATE = - "[:<#threads=" + DEFAULT_NUM_THREADS - + ">][:<#whether to ignore nonce collisions=0>]"; + protected static final String OPT_USAGE_UPDATE = "[:<#threads=" + + DEFAULT_NUM_THREADS + ">][:<#whether to ignore nonce collisions=0>]"; - protected static final String OPT_USAGE_BLOOM = "Bloom filter type, one of " + - Arrays.toString(BloomType.values()); + protected static final String OPT_USAGE_BLOOM = + "Bloom filter type, one of " + Arrays.toString(BloomType.values()); - protected static final String OPT_USAGE_COMPRESSION = "Compression type, " + - "one of " + Arrays.toString(Compression.Algorithm.values()); + protected static final String OPT_USAGE_COMPRESSION = + "Compression type, " + "one of " + Arrays.toString(Compression.Algorithm.values()); protected static final String OPT_VERBOSE = "verbose"; @@ -121,8 +120,8 @@ public class LoadTestTool extends AbstractHBaseTool { public static final String OPT_DEFERRED_LOG_FLUSH_USAGE = "Enable deferred log flush."; public static final String OPT_INMEMORY = "in_memory"; - public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " + - "inmemory as far as possible. Not guaranteed that reads are always served from inmemory"; + public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " + + "inmemory as far as possible. Not guaranteed that reads are always served from inmemory"; public static final String OPT_GENERATOR = "generator"; public static final String OPT_GENERATOR_USAGE = "The class which generates load for the tool." @@ -157,12 +156,12 @@ public class LoadTestTool extends AbstractHBaseTool { public static final String OPT_ENCRYPTION = "encryption"; protected static final String OPT_ENCRYPTION_USAGE = - "Enables transparent encryption on the test table, one of " + - Arrays.toString(Encryption.getSupportedCiphers()); + "Enables transparent encryption on the test table, one of " + + Arrays.toString(Encryption.getSupportedCiphers()); public static final String OPT_NUM_REGIONS_PER_SERVER = "num_regions_per_server"; - protected static final String OPT_NUM_REGIONS_PER_SERVER_USAGE - = "Desired number of regions per region server. Defaults to 5."; + protected static final String OPT_NUM_REGIONS_PER_SERVER_USAGE = + "Desired number of regions per region server. Defaults to 5."; public static int DEFAULT_NUM_REGIONS_PER_SERVER = 5; public static final String OPT_REGION_REPLICATION = "region_replication"; @@ -222,7 +221,7 @@ public class LoadTestTool extends AbstractHBaseTool { private String superUser; private String userNames; - //This file is used to read authentication information in secure clusters. + // This file is used to read authentication information in secure clusters. private String authnFileName; private int numRegionsPerServer = DEFAULT_NUM_REGIONS_PER_SERVER; @@ -232,21 +231,19 @@ public class LoadTestTool extends AbstractHBaseTool { private int mobThreshold = -1; // not set // TODO: refactor LoadTestToolImpl somewhere to make the usage from tests less bad, - // console tool itself should only be used from console. + // console tool itself should only be used from console. protected boolean isSkipInit = false; protected boolean isInitOnly = false; protected Cipher cipher = null; - protected String[] splitColonSeparated(String option, - int minNumCols, int maxNumCols) { + protected String[] splitColonSeparated(String option, int minNumCols, int maxNumCols) { String optVal = cmd.getOptionValue(option); String[] cols = optVal.split(COLON); if (cols.length < minNumCols || cols.length > maxNumCols) { - throw new IllegalArgumentException("Expected at least " - + minNumCols + " columns but no more than " + maxNumCols + - " in the colon-separated value '" + optVal + "' of the " + - "-" + option + " option"); + throw new IllegalArgumentException("Expected at least " + minNumCols + + " columns but no more than " + maxNumCols + " in the colon-separated value '" + optVal + + "' of the " + "-" + option + " option"); } return cols; } @@ -260,11 +257,10 @@ public byte[][] getColumnFamilies() { } /** - * Apply column family options such as Bloom filters, compression, and data - * block encoding. + * Apply column family options such as Bloom filters, compression, and data block encoding. */ - protected void applyColumnFamilyOptions(TableName tableName, - byte[][] columnFamilies) throws IOException { + protected void applyColumnFamilyOptions(TableName tableName, byte[][] columnFamilies) + throws IOException { try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()) { TableDescriptor tableDesc = admin.getDescriptor(tableName); @@ -273,9 +269,9 @@ protected void applyColumnFamilyOptions(TableName tableName, for (byte[] cf : columnFamilies) { ColumnFamilyDescriptor columnDesc = tableDesc.getColumnFamily(cf); boolean isNewCf = columnDesc == null; - ColumnFamilyDescriptorBuilder columnDescBuilder = isNewCf ? - ColumnFamilyDescriptorBuilder.newBuilder(cf) : - ColumnFamilyDescriptorBuilder.newBuilder(columnDesc); + ColumnFamilyDescriptorBuilder columnDescBuilder = + isNewCf ? ColumnFamilyDescriptorBuilder.newBuilder(cf) + : ColumnFamilyDescriptorBuilder.newBuilder(columnDesc); if (bloomType != null) { columnDescBuilder.setBloomFilterType(bloomType); } @@ -292,11 +288,8 @@ protected void applyColumnFamilyOptions(TableName tableName, byte[] keyBytes = new byte[cipher.getKeyLength()]; Bytes.secureRandom(keyBytes); columnDescBuilder.setEncryptionType(cipher.getName()); - columnDescBuilder.setEncryptionKey( - EncryptionUtil.wrapKey(conf, - User.getCurrent().getShortName(), - new SecretKeySpec(keyBytes, - cipher.getName()))); + columnDescBuilder.setEncryptionKey(EncryptionUtil.wrapKey(conf, + User.getCurrent().getShortName(), new SecretKeySpec(keyBytes, cipher.getName()))); } if (mobThreshold >= 0) { columnDescBuilder.setMobEnabled(true); @@ -317,8 +310,8 @@ protected void applyColumnFamilyOptions(TableName tableName, @Override protected void addOptions() { addOptNoArg("v", OPT_VERBOSE, "Will display a full readout of logs, including ZooKeeper"); - addOptWithArg(OPT_ZK_QUORUM, "ZK quorum as comma-separated host names " + - "without port numbers"); + addOptWithArg(OPT_ZK_QUORUM, + "ZK quorum as comma-separated host names " + "without port numbers"); addOptWithArg(OPT_ZK_PARENT_NODE, "name of parent znode in zookeeper"); addOptWithArg(OPT_TABLE_NAME, "The name of the table to read or write"); addOptWithArg(OPT_COLUMN_FAMILIES, "The name of the column families to use separated by comma"); @@ -329,20 +322,23 @@ protected void addOptions() { addOptWithArg(OPT_BLOOM, OPT_USAGE_BLOOM); addOptWithArg(OPT_BLOOM_PARAM, "the parameter of bloom filter type"); addOptWithArg(OPT_COMPRESSION, OPT_USAGE_COMPRESSION); - addOptWithArg(HFileTestUtil.OPT_DATA_BLOCK_ENCODING, HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); - addOptWithArg(OPT_MAX_READ_ERRORS, "The maximum number of read errors " + - "to tolerate before terminating all reader threads. The default is " + - MultiThreadedReader.DEFAULT_MAX_ERRORS + "."); - addOptWithArg(OPT_MULTIGET, "Whether to use multi-gets as opposed to " + - "separate gets for every column in a row"); - addOptWithArg(OPT_KEY_WINDOW, "The 'key window' to maintain between " + - "reads and writes for concurrent write/read workload. The default " + - "is " + MultiThreadedReader.DEFAULT_KEY_WINDOW + "."); - - addOptNoArg(OPT_MULTIPUT, "Whether to use multi-puts as opposed to " + - "separate puts for every column in a row"); - addOptNoArg(OPT_BATCHUPDATE, "Whether to use batch as opposed to " + - "separate updates for every column in a row"); + addOptWithArg(HFileTestUtil.OPT_DATA_BLOCK_ENCODING, + HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); + addOptWithArg(OPT_MAX_READ_ERRORS, + "The maximum number of read errors " + + "to tolerate before terminating all reader threads. The default is " + + MultiThreadedReader.DEFAULT_MAX_ERRORS + "."); + addOptWithArg(OPT_MULTIGET, + "Whether to use multi-gets as opposed to " + "separate gets for every column in a row"); + addOptWithArg(OPT_KEY_WINDOW, + "The 'key window' to maintain between " + + "reads and writes for concurrent write/read workload. The default " + "is " + + MultiThreadedReader.DEFAULT_KEY_WINDOW + "."); + + addOptNoArg(OPT_MULTIPUT, + "Whether to use multi-puts as opposed to " + "separate puts for every column in a row"); + addOptNoArg(OPT_BATCHUPDATE, + "Whether to use batch as opposed to " + "separate updates for every column in a row"); addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY); addOptWithArg(OPT_GENERATOR, OPT_GENERATOR_USAGE); addOptWithArg(OPT_WRITER, OPT_WRITER_USAGE); @@ -350,11 +346,9 @@ protected void addOptions() { addOptWithArg(OPT_READER, OPT_READER_USAGE); addOptWithArg(OPT_NUM_KEYS, "The number of keys to read/write"); - addOptWithArg(OPT_START_KEY, "The first key to read/write " + - "(a 0-based index). The default value is " + - DEFAULT_START_KEY + "."); - addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table " - + "already exists"); + addOptWithArg(OPT_START_KEY, "The first key to read/write " + + "(a 0-based index). The default value is " + DEFAULT_START_KEY + "."); + addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table " + "already exists"); addOptWithArg(NUM_TABLES, "A positive integer number. When a number n is specified, load test " @@ -379,9 +373,8 @@ public CommandLine parse(Options opts, String[] args, Properties props, boolean throws ParseException { CommandLine cl = super.parse(opts, args, props, stop); - boolean isReadWriteUpdate = cmd.hasOption(OPT_READ) - || cmd.hasOption(OPT_WRITE) - || cmd.hasOption(OPT_UPDATE); + boolean isReadWriteUpdate = + cmd.hasOption(OPT_READ) || cmd.hasOption(OPT_WRITE) || cmd.hasOption(OPT_UPDATE); boolean isInitOnly = cmd.hasOption(OPT_INIT_ONLY); if (!isInitOnly && !isReadWriteUpdate) { @@ -407,8 +400,7 @@ public CommandLine parse(Options opts, String[] args, Properties props, boolean protected void processOptions(CommandLine cmd) { this.cmd = cmd; - tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME, - DEFAULT_TABLE_NAME)); + tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME)); if (cmd.hasOption(OPT_COLUMN_FAMILIES)) { String[] list = cmd.getOptionValue(OPT_COLUMN_FAMILIES).split(","); @@ -428,10 +420,9 @@ protected void processOptions(CommandLine cmd) { deferredLogFlush = cmd.hasOption(OPT_DEFERRED_LOG_FLUSH); if (!isInitOnly) { - startKey = parseLong(cmd.getOptionValue(OPT_START_KEY, - String.valueOf(DEFAULT_START_KEY)), 0, Long.MAX_VALUE); - long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1, - Long.MAX_VALUE - startKey); + startKey = parseLong(cmd.getOptionValue(OPT_START_KEY, String.valueOf(DEFAULT_START_KEY)), 0, + Long.MAX_VALUE); + long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1, Long.MAX_VALUE - startKey); endKey = startKey + numKeys; isSkipInit = cmd.hasOption(OPT_SKIP_INIT); System.out.println("Key range: [" + startKey + ".." + (endKey - 1) + "]"); @@ -445,8 +436,7 @@ protected void processOptions(CommandLine cmd) { int colIndex = 0; minColsPerKey = 1; maxColsPerKey = 2 * Integer.parseInt(writeOpts[colIndex++]); - int avgColDataSize = - parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE); + int avgColDataSize = parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE); minColDataSize = avgColDataSize / 2; maxColDataSize = avgColDataSize * 3 / 2; @@ -462,10 +452,8 @@ protected void processOptions(CommandLine cmd) { } System.out.println("Multi-puts: " + isMultiPut); - System.out.println("Columns per key: " + minColsPerKey + ".." - + maxColsPerKey); - System.out.println("Data size per column: " + minColDataSize + ".." - + maxColDataSize); + System.out.println("Columns per key: " + minColsPerKey + ".." + maxColsPerKey); + System.out.println("Data size per column: " + minColDataSize + ".." + maxColDataSize); } if (isUpdate) { @@ -496,18 +484,15 @@ protected void processOptions(CommandLine cmd) { } if (cmd.hasOption(OPT_MAX_READ_ERRORS)) { - maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS), - 0, Integer.MAX_VALUE); + maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS), 0, Integer.MAX_VALUE); } if (cmd.hasOption(OPT_KEY_WINDOW)) { - keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW), - 0, Integer.MAX_VALUE); + keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW), 0, Integer.MAX_VALUE); } if (cmd.hasOption(OPT_MULTIGET)) { - multiGetBatchSize = parseInt(cmd.getOptionValue(OPT_MULTIGET), - 0, Integer.MAX_VALUE); + multiGetBatchSize = parseInt(cmd.getOptionValue(OPT_MULTIGET), 0, Integer.MAX_VALUE); } System.out.println("Multi-gets (value of 1 means no multigets): " + multiGetBatchSize); @@ -538,16 +523,15 @@ protected void processOptions(CommandLine cmd) { private void parseColumnFamilyOptions(CommandLine cmd) { String dataBlockEncodingStr = cmd.getOptionValue(HFileTestUtil.OPT_DATA_BLOCK_ENCODING); - dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null : - DataBlockEncoding.valueOf(dataBlockEncodingStr); + dataBlockEncodingAlgo = + dataBlockEncodingStr == null ? null : DataBlockEncoding.valueOf(dataBlockEncodingStr); String compressStr = cmd.getOptionValue(OPT_COMPRESSION); - compressAlgo = compressStr == null ? Compression.Algorithm.NONE : - Compression.Algorithm.valueOf(compressStr); + compressAlgo = compressStr == null ? Compression.Algorithm.NONE + : Compression.Algorithm.valueOf(compressStr); String bloomStr = cmd.getOptionValue(OPT_BLOOM); - bloomType = bloomStr == null ? BloomType.ROW : - BloomType.valueOf(bloomStr); + bloomType = bloomStr == null ? BloomType.ROW : BloomType.valueOf(bloomStr); if (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH) { if (!cmd.hasOption(OPT_BLOOM_PARAM)) { @@ -570,9 +554,8 @@ public void initTestTable() throws IOException { durability = Durability.ASYNC_WAL; } - HBaseTestingUtil.createPreSplitLoadTestTable(conf, tableName, - getColumnFamilies(), compressAlgo, dataBlockEncodingAlgo, numRegionsPerServer, - regionReplication, durability); + HBaseTestingUtil.createPreSplitLoadTestTable(conf, tableName, getColumnFamilies(), compressAlgo, + dataBlockEncodingAlgo, numRegionsPerServer, regionReplication, durability); applyColumnFamilyOptions(tableName, getColumnFamilies()); } @@ -634,8 +617,8 @@ protected int loadTable() throws IOException { userOwner = User.createUserForTesting(conf, superUser, new String[0]); } } else { - args = clazzAndArgs.length == 1 ? new String[0] : Arrays.copyOfRange(clazzAndArgs, 1, - clazzAndArgs.length); + args = clazzAndArgs.length == 1 ? new String[0] + : Arrays.copyOfRange(clazzAndArgs, 1, clazzAndArgs.length); } dataGen.initialize(args); } else { @@ -646,15 +629,14 @@ protected int loadTable() throws IOException { if (userOwner != null) { LOG.info("Granting permissions for user " + userOwner.getShortName()); - Permission.Action[] actions = { - Permission.Action.ADMIN, Permission.Action.CREATE, - Permission.Action.READ, Permission.Action.WRITE }; + Permission.Action[] actions = { Permission.Action.ADMIN, Permission.Action.CREATE, + Permission.Action.READ, Permission.Action.WRITE }; try { - AccessControlClient.grant(ConnectionFactory.createConnection(conf), - tableName, userOwner.getShortName(), null, null, actions); + AccessControlClient.grant(ConnectionFactory.createConnection(conf), tableName, + userOwner.getShortName(), null, null, actions); } catch (Throwable e) { - LOG.error(HBaseMarkers.FATAL, "Error in granting permission for the user " + - userOwner.getShortName(), e); + LOG.error(HBaseMarkers.FATAL, + "Error in granting permission for the user " + userOwner.getShortName(), e); return EXIT_FAILURE; } } @@ -707,8 +689,8 @@ protected int loadTable() throws IOException { if (isRead) { if (userOwner != null) { - readerThreads = new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent, - userNames); + readerThreads = + new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent, userNames); } else { String readerClass = null; if (cmd.hasOption(OPT_READER)) { @@ -725,14 +707,12 @@ protected int loadTable() throws IOException { } if (isUpdate && isWrite) { - LOG.info("Concurrent write/update workload: making updaters aware of the " + - "write point"); + LOG.info("Concurrent write/update workload: making updaters aware of the " + "write point"); updaterThreads.linkToWriter(writerThreads); } if (isRead && (isUpdate || isWrite)) { - LOG.info("Concurrent write/read workload: making readers aware of the " + - "write point"); + LOG.info("Concurrent write/read workload: making readers aware of the " + "write point"); readerThreads.linkToWriter(isUpdate ? updaterThreads : writerThreads); } @@ -783,46 +763,46 @@ protected int loadTable() throws IOException { private LoadTestDataGenerator getLoadGeneratorInstance(String clazzName) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor(int.class, int.class, int.class, int.class, - byte[][].class); + Constructor constructor = + clazz.getConstructor(int.class, int.class, int.class, int.class, byte[][].class); return (LoadTestDataGenerator) constructor.newInstance(minColDataSize, maxColDataSize, - minColsPerKey, maxColsPerKey, families); + minColsPerKey, maxColsPerKey, families); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedWriter getMultiThreadedWriterInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedWriter getMultiThreadedWriterInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class); + Constructor constructor = + clazz.getConstructor(LoadTestDataGenerator.class, Configuration.class, TableName.class); return (MultiThreadedWriter) constructor.newInstance(dataGen, conf, tableName); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedUpdater getMultiThreadedUpdaterInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedUpdater getMultiThreadedUpdaterInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class); - return (MultiThreadedUpdater) constructor.newInstance( - dataGen, conf, tableName, updatePercent); + Constructor constructor = clazz.getConstructor(LoadTestDataGenerator.class, + Configuration.class, TableName.class, double.class); + return (MultiThreadedUpdater) constructor.newInstance(dataGen, conf, tableName, + updatePercent); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedReader getMultiThreadedReaderInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedReader getMultiThreadedReaderInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class); + Constructor constructor = clazz.getConstructor(LoadTestDataGenerator.class, + Configuration.class, TableName.class, double.class); return (MultiThreadedReader) constructor.newInstance(dataGen, conf, tableName, verifyPercent); } catch (Exception e) { throw new IOException(e); @@ -834,15 +814,12 @@ public static void main(String[] args) { } /** - * When NUM_TABLES is specified, the function starts multiple worker threads - * which individually start a LoadTestTool instance to load a table. Each - * table name is in format <tn>_<index>. For example, "-tn test -num_tables 2" - * , table names will be "test_1", "test_2" - * + * When NUM_TABLES is specified, the function starts multiple worker threads which individually + * start a LoadTestTool instance to load a table. Each table name is in format <tn>_<index>. + * For example, "-tn test -num_tables 2" , table names will be "test_1", "test_2" * @throws IOException if one of the load tasks is unable to complete */ - private int parallelLoadTables() - throws IOException { + private int parallelLoadTables() throws IOException { // create new command args String tableName = cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME); String[] newArgs = null; @@ -869,7 +846,7 @@ private int parallelLoadTables() List workers = new ArrayList<>(); for (int i = 0; i < numTables; i++) { String[] workerArgs = newArgs.clone(); - workerArgs[tableNameValueIndex] = tableName + "_" + (i+1); + workerArgs[tableNameValueIndex] = tableName + "_" + (i + 1); WorkerThread worker = new WorkerThread(i, workerArgs); workers.add(worker); LOG.info(worker + " starting"); diff --git a/hbase-metrics-api/pom.xml b/hbase-metrics-api/pom.xml index 088a6c010bc0..160588d3131f 100644 --- a/hbase-metrics-api/pom.xml +++ b/hbase-metrics-api/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,50 +31,6 @@ Apache HBase - Metrics API HBase Metrics API descriptions - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - - secondPartTestsExecution - test - - test - - - true - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -149,6 +105,50 @@ + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + + secondPartTestsExecution + + test + + test + + true + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java index 6e041590ee3a..78d9ade04236 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,6 @@ public interface Counter extends Metric { /** * Increment {@code this} by {@code n}. - * * @param n The amount to increment. */ void increment(long n); @@ -47,7 +46,6 @@ public interface Counter extends Metric { /** * Decrement {@code this} by {@code n}. - * * @param n The amount to decrement. */ void decrement(long n); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java index ba171c2cab24..b20da2426296 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ /** * A metrics which measures a discrete value. - * * @param The value of the Gauge. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java index 891bc6df2ea1..da4ff89c59b7 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,14 +30,12 @@ public interface Histogram extends Metric { /** * Adds a new value to the distribution. - * * @param value The value to add */ void update(int value); /** * Adds a new value to the distribution. - * * @param value The value to add */ void update(long value); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java index 5f38a005b3e1..9217a2af4a4e 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,6 @@ public interface Meter extends Metric { /** * Records {@code events} occurrences. - * * @param events Number of occurrences to record. */ void mark(long events); @@ -53,14 +52,13 @@ public interface Meter extends Metric { double getMeanRate(); /** - * Returns the one-minute exponentially-weighted moving average rate at which events have - * occurred since the meter was created. + * Returns the one-minute exponentially-weighted moving average rate at which events have occurred + * since the meter was created. *

* This rate has the same exponential decay factor as the one-minute load average in the {@code * top} Unix command. - * - * @return the one-minute exponentially-weighted moving average rate at which events have - * occurred since the meter was created + * @return the one-minute exponentially-weighted moving average rate at which events have occurred + * since the meter was created */ double getOneMinuteRate(); @@ -70,7 +68,6 @@ public interface Meter extends Metric { *

* This rate has the same exponential decay factor as the five-minute load average in the {@code * top} Unix command. - * * @return the five-minute exponentially-weighted moving average rate at which events have * occurred since the meter was created */ @@ -82,7 +79,6 @@ public interface Meter extends Metric { *

* This rate has the same exponential decay factor as the fifteen-minute load average in the * {@code top} Unix command. - * * @return the fifteen-minute exponentially-weighted moving average rate at which events have * occurred since the meter was created */ diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java index 2f6d49e01fc9..e79a9f3631ac 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java index 33e989cfe015..9e7b13d89c8b 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Collection; import java.util.Optional; import java.util.Set; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -53,8 +50,8 @@ public static MetricRegistries global() { public abstract void clear(); /** - * Create or return MetricRegistry with the given info. MetricRegistry will only be created - * if current reference count is 0. Otherwise ref counted is incremented, and an existing instance + * Create or return MetricRegistry with the given info. MetricRegistry will only be created if + * current reference count is 0. Otherwise ref counted is incremented, and an existing instance * will be returned. * @param info the info object for the MetricRegistrytry. * @return created or existing MetricRegistry. diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java index edc813d95b99..737ab0e5abcb 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.ArrayList; import java.util.List; import java.util.ServiceLoader; - import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -32,8 +29,8 @@ public final class MetricRegistriesLoader { private static final Logger LOG = LoggerFactory.getLogger(MetricRegistries.class); - private static final String defaultClass - = "org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl"; + private static final String defaultClass = + "org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl"; private MetricRegistriesLoader() { } @@ -64,7 +61,7 @@ static MetricRegistries load(List availableImplementations) { return impl; } else if (availableImplementations.isEmpty()) { try { - return ReflectionUtils.newInstance((Class)Class.forName(defaultClass)); + return ReflectionUtils.newInstance((Class) Class.forName(defaultClass)); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java index 3bd5f6cd844c..78179ebcee95 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics; import java.util.Optional; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,7 +31,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Timer} used to measure durations and report rates. - * * @param name the name of the timer. * @return An instance of {@link Timer}. */ @@ -40,7 +38,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Histogram} used to measure a distribution of values. - * * @param name The name of the Histogram. * @return An instance of {@link Histogram}. */ @@ -49,7 +46,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Meter} used to measure durations and report distributions (a * combination of a {@link Timer} and a {@link Histogram}. - * * @param name The name of the Meter. * @return An instance of {@link Meter}. */ @@ -57,7 +53,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Counter} used to track a mutable number. - * * @param name The name of the Counter * @return An instance of {@link Counter}. */ @@ -96,7 +91,6 @@ public interface MetricRegistry extends MetricSet { /** * Removes the metric with the given name. - * * @param name the name of the metric * @return true if the metric is removed. */ diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java index be77c42985de..9d53a8cbf539 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java index c4396bd24d5a..abac13b8ace5 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; - import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.yetus.audience.InterfaceAudience; /** * HBase Metrics are grouped in different MetricRegistry'ies. All metrics that correspond to a - * subcomponent (like RPC, GC, WAL) are managed in a single MetricRegistry. - * This class holds the name and description and JMX related context names for such group of - * metrics. + * subcomponent (like RPC, GC, WAL) are managed in a single MetricRegistry. This class holds the + * name and description and JMX related context names for such group of metrics. */ @InterfaceAudience.Private public class MetricRegistryInfo { @@ -37,12 +34,8 @@ public class MetricRegistryInfo { protected final String metricsJmxContext; protected final boolean existingSource; - public MetricRegistryInfo( - String metricsName, - String metricsDescription, - String metricsJmxContext, - String metricsContext, - boolean existingSource) { + public MetricRegistryInfo(String metricsName, String metricsDescription, String metricsJmxContext, + String metricsContext, boolean existingSource) { this.metricsName = metricsName; this.metricsDescription = metricsDescription; this.metricsContext = metricsContext; @@ -51,9 +44,8 @@ public MetricRegistryInfo( } /** - * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. + * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. * eg. regionserver, master, thriftserver - * * @return The string context used to register this source to hadoop's metrics2 system. */ public String getMetricsContext() { @@ -68,16 +60,15 @@ public String getMetricsDescription() { } /** - * Get the name of the context in JMX that this source will be exposed through. - * This is in ObjectName format. With the default context being Hadoop -> HBase + * Get the name of the context in JMX that this source will be exposed through. This is in + * ObjectName format. With the default context being Hadoop -> HBase */ public String getMetricsJmxContext() { return metricsJmxContext; } /** - * Get the name of the metrics that are being exported by this source. - * Eg. IPC, GC, WAL + * Get the name of the metrics that are being exported by this source. Eg. IPC, GC, WAL */ public String getMetricsName() { return metricsName; @@ -102,11 +93,7 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return new HashCodeBuilder() - .append(metricsName) - .append(metricsDescription) - .append(metricsContext) - .append(metricsJmxContext) - .toHashCode(); + return new HashCodeBuilder().append(metricsName).append(metricsDescription) + .append(metricsContext).append(metricsJmxContext).toHashCode(); } } diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java index 5e1c873ce8bd..60d7e9e39ddb 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Map; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** * A set of named metrics. - * * @see MetricRegistry#registerAll(MetricSet) */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -34,7 +31,6 @@ public interface MetricSet extends Metric { /** * A map of metric names to metrics. - * * @return the metrics */ Map getMetrics(); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java index ecb01ad57c0e..e38302360696 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,19 +19,16 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; - import org.apache.yetus.audience.InterfaceAudience; /** - * This is a dummy annotation that forces javac to produce output for - * otherwise empty package-info.java. - * - *

The result is maven-compiler-plugin can properly identify the scope of - * changed files - * - *

See more details in - * - * maven-compiler-plugin: incremental compilation broken + * This is a dummy annotation that forces javac to produce output for otherwise empty + * package-info.java. + *

+ * The result is maven-compiler-plugin can properly identify the scope of changed files + *

+ * See more details in + * maven-compiler-plugin: incremental compilation broken */ @Retention(RetentionPolicy.SOURCE) @InterfaceAudience.Private diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java index a7b9869a0d2d..26aee2804eea 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -43,7 +40,6 @@ public interface Snapshot { /** * Returns the number of values in the snapshot. - * * @return the number of values */ long getCount(); @@ -57,77 +53,66 @@ public interface Snapshot { /** * Returns the value at the 25th percentile in the distribution. - * * @return the value at the 25th percentile */ long get25thPercentile(); /** * Returns the value at the 75th percentile in the distribution. - * * @return the value at the 75th percentile */ long get75thPercentile(); /** * Returns the value at the 90th percentile in the distribution. - * * @return the value at the 90th percentile */ long get90thPercentile(); /** * Returns the value at the 95th percentile in the distribution. - * * @return the value at the 95th percentile */ long get95thPercentile(); /** * Returns the value at the 98th percentile in the distribution. - * * @return the value at the 98th percentile */ long get98thPercentile(); /** * Returns the value at the 99th percentile in the distribution. - * * @return the value at the 99th percentile */ long get99thPercentile(); /** * Returns the value at the 99.9th percentile in the distribution. - * * @return the value at the 99.9th percentile */ long get999thPercentile(); /** * Returns the median value in the distribution. - * * @return the median value */ long getMedian(); /** * Returns the highest value in the snapshot. - * * @return the highest value */ long getMax(); /** * Returns the arithmetic mean of the values in the snapshot. - * * @return the arithmetic mean */ long getMean(); /** * Returns the lowest value in the snapshot. - * * @return the lowest value */ long getMin(); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java index 30c64fb5ce4b..aeb6adf5163c 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/package-info.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/package-info.java index e79451fe6ad4..3acaa96d871d 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/package-info.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/package-info.java @@ -1,19 +1,12 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** diff --git a/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java b/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java index 59f26999bd2c..85fff81fc9be 100644 --- a/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java +++ b/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,8 +51,8 @@ public void testLoadMultipleInstances() { MetricRegistries loader1 = mock(MetricRegistries.class); MetricRegistries loader2 = mock(MetricRegistries.class); MetricRegistries loader3 = mock(MetricRegistries.class); - MetricRegistries instance = MetricRegistriesLoader.load(Lists.newArrayList(loader1, loader2, - loader3)); + MetricRegistries instance = + MetricRegistriesLoader.load(Lists.newArrayList(loader1, loader2, loader3)); // the load() returns the first instance assertEquals(loader1, instance); diff --git a/hbase-metrics/pom.xml b/hbase-metrics/pom.xml index 5d7c247b81c0..30d8aea5931d 100644 --- a/hbase-metrics/pom.xml +++ b/hbase-metrics/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,50 +31,6 @@ Apache HBase - Metrics Implementation HBase Metrics Implementation - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - - secondPartTestsExecution - test - - test - - - true - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -157,6 +113,50 @@ + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + + secondPartTestsExecution + + test + + test + + true + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java index 8021c0689398..ad30fbe1674d 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics.impl; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.hbase.metrics.Counter; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java index b5c52cf840cc..d2723a22e942 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,7 @@ package org.apache.hadoop.hbase.metrics.impl; import com.codahale.metrics.Meter; - import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -39,11 +37,13 @@ public DropwizardMeter(Meter meter) { this.meter = Objects.requireNonNull(meter); } - @Override public void mark() { + @Override + public void mark() { this.meter.mark(); } - @Override public void mark(long count) { + @Override + public void mark(long count) { this.meter.mark(count); } diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java index 81544607f5f7..f9cc1a6ca2f6 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public class FastLongHistogram { public static final int DEFAULT_NBINS = 255; public static final double[] DEFAULT_QUANTILES = - new double[]{0.25, 0.5, 0.75, 0.90, 0.95, 0.98, 0.99, 0.999}; + new double[] { 0.25, 0.5, 0.75, 0.90, 0.95, 0.98, 0.99, 0.999 }; /** * Bins is a class containing a list of buckets(or bins) for estimation histogram of some data. @@ -105,8 +105,8 @@ private int getIndex(long value) { return this.counts.length - 2; } // compute the position - return 1 + (int) ((value - this.binsMin) * (this.counts.length - 3) / - (this.binsMax - this.binsMin)); + return 1 + (int) ((value - this.binsMin) * (this.counts.length - 3) + / (this.binsMax - this.binsMin)); } diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java index 2e0aa55808f6..c29b267e347a 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,9 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Custom histogram implementation based on FastLongHistogram. Dropwizard-based histograms are - * slow compared to this implementation, so we are using our implementation here. - * See HBASE-15222. + * Custom histogram implementation based on FastLongHistogram. Dropwizard-based histograms are slow + * compared to this implementation, so we are using our implementation here. See HBASE-15222. */ @InterfaceAudience.Private public class HistogramImpl implements Histogram { diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java index 3826e66093b6..39da41eeec00 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +21,6 @@ import java.util.Collections; import java.util.Optional; import java.util.Set; - import org.apache.hadoop.hbase.metrics.MetricRegistries; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.MetricRegistryFactory; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java index 6f9e16366aa7..5ebdf0d479a0 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java index 05e096304cf8..1c8927b15b3a 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.hbase.metrics.Counter; import org.apache.hadoop.hbase.metrics.Gauge; import org.apache.hadoop.hbase.metrics.Histogram; @@ -97,7 +96,7 @@ public Metric register(String name, Metric metric) { @Override public Gauge register(String name, Gauge gauge) { - return (Gauge) register(name, (Metric)gauge); + return (Gauge) register(name, (Metric) gauge); } @Override diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java index 63131a100e92..19ec192211a8 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,22 +22,23 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.function.Supplier; import java.util.stream.Collectors; - import org.apache.yetus.audience.InterfaceAudience; /** - * A map of K to V, but does ref counting for added and removed values. The values are - * not added directly, but instead requested from the given Supplier if ref count == 0. Each put() - * call will increment the ref count, and each remove() will decrement it. The values are removed - * from the map iff ref count == 0. + * A map of K to V, but does ref counting for added and removed values. The values are not added + * directly, but instead requested from the given Supplier if ref count == 0. Each put() call will + * increment the ref count, and each remove() will decrement it. The values are removed from the map + * iff ref count == 0. */ @InterfaceAudience.Private class RefCountingMap { private ConcurrentHashMap> map = new ConcurrentHashMap<>(); + private static class Payload { V v; int refCount; + Payload(V v) { this.v = v; this.refCount = 1; // create with ref count = 1 @@ -46,7 +46,7 @@ private static class Payload { } V put(K k, Supplier supplier) { - return ((Payload)map.compute(k, (k1, oldValue) -> { + return ((Payload) map.compute(k, (k1, oldValue) -> { if (oldValue != null) { oldValue.refCount++; return oldValue; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java index 03a8c65915e2..3ad560a3d74f 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.Timer; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/package-info.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/package-info.java index 0df119e2ee45..91a3af5b9cb9 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/package-info.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/package-info.java @@ -1,19 +1,12 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java index 5b5e26f13a80..651f560c5c7b 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,11 +39,13 @@ public class TestCounterImpl { private Counter counter; - @Before public void setup() { + @Before + public void setup() { this.counter = new CounterImpl(); } - @Test public void testCounting() { + @Test + public void testCounting() { counter.increment(); assertEquals(1L, counter.getCount()); counter.increment(); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java index 072f18a3b155..9f5415c67e37 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,11 +38,13 @@ public class TestDropwizardMeter { private Meter meter; - @Before public void setup() { + @Before + public void setup() { this.meter = Mockito.mock(Meter.class); } - @Test public void test() { + @Test + public void test() { DropwizardMeter dwMeter = new DropwizardMeter(this.meter); dwMeter.mark(); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java index 120f91169c5a..78c2afc1ec92 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.Arrays; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -34,7 +33,7 @@ /** * Testcases for FastLongHistogram. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestFastLongHistogram { @ClassRule @@ -97,7 +96,6 @@ public void testAdaptionOfChange() { } } - @Test public void testGetNumAtOrBelow() { long[] VALUES = { 1, 10, 20, 30, 40, 50 }; @@ -126,7 +124,6 @@ public void testGetNumAtOrBelow() { assertEquals(601, h.getNumAtOrBelow(Long.MAX_VALUE)); } - @Test public void testSameValues() { FastLongHistogram hist = new FastLongHistogram(100); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java index 52d29fc700cc..e1ed9cf6a5be 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,18 +34,17 @@ public class TestGauge { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGauge.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestGauge.class); @Test public void testGetValue() { SimpleGauge gauge = new SimpleGauge(); - assertEquals(0, (long)gauge.getValue()); + assertEquals(0, (long) gauge.getValue()); gauge.setValue(1000L); - assertEquals(1000L, (long)gauge.getValue()); + assertEquals(1000L, (long) gauge.getValue()); } /** @@ -55,7 +54,8 @@ private static class SimpleGauge implements Gauge { private final AtomicLong value = new AtomicLong(0L); - @Override public Long getValue() { + @Override + public Long getValue() { return this.value.get(); } diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java index 9be3fcee20f4..cbce953b69ba 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java index 1115529a051c..43db251b8bfb 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ public void testCounter() { counter.increment(42L); Optional metric = registry.get("mycounter"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Counter)metric.get()).getCount()); + assertEquals(42L, (long) ((Counter) metric.get()).getCount()); } @Test @@ -72,7 +72,7 @@ public Long getValue() { }); Optional metric = registry.get("mygauge"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); } @Test @@ -81,7 +81,7 @@ public void testRegisterGaugeLambda() { registry.register("gaugeLambda", () -> 42L); Optional metric = registry.get("gaugeLambda"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); } @Test @@ -106,7 +106,7 @@ public void testRegister() { Optional metric = registry.get("mycounter"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Counter)metric.get()).getCount()); + assertEquals(42L, (long) ((Counter) metric.get()).getCount()); } @Test @@ -119,8 +119,7 @@ public void testDoubleRegister() { Optional metric = registry.get("mygauge"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); - + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); Counter c1 = registry.counter("mycounter"); Counter c2 = registry.counter("mycounter"); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java index c5ed1edb9eb8..6478639cf509 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ public void testPutGet() { @Test public void testPutMulti() { String v1 = map.put("foo", () -> "foovalue"); - String v2 = map.put("foo", () -> "foovalue2"); + String v2 = map.put("foo", () -> "foovalue2"); String v3 = map.put("foo", () -> "foovalue3"); String v = map.get("foo"); @@ -127,7 +127,6 @@ public void testClear() { assertEquals(0, map.size()); } - @Test public void testKeySet() { map.put("foo", () -> "foovalue"); @@ -151,6 +150,6 @@ public void testValues() { assertEquals(3, values.size()); Lists.newArrayList("foovalue", "foovalue3", "foovalue4").stream() - .forEach(v -> assertTrue(values.contains(v))); + .forEach(v -> assertTrue(values.contains(v))); } } diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java index d9d3632b7310..749bd63c7e5a 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml index 21e4ff479277..f3f692856679 100644 --- a/hbase-procedure/pom.xml +++ b/hbase-procedure/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -30,35 +30,6 @@ hbase-procedure Apache HBase - Procedure Procedure Framework - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - @@ -145,14 +116,43 @@ test - hadoop-hdfs-client org.apache.hadoop + hadoop-hdfs-client + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + - - + + skipProcedureTests @@ -169,7 +169,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java index 53bfba62daf8..61dea04eb214 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,11 +68,10 @@ public void signalAll() { } // ========================================================================== - // Add related + // Add related // ========================================================================== /** - * Add the procedure to the queue. - * NOTE: this method is called with the sched lock held. + * Add the procedure to the queue. NOTE: this method is called with the sched lock held. * @param procedure the Procedure to add * @param addFront true if the item should be added to the front of the queue */ @@ -131,11 +129,10 @@ protected void push(final Procedure procedure, final boolean addFront, final boo } // ========================================================================== - // Poll related + // Poll related // ========================================================================== /** - * Fetch one Procedure from the queue - * NOTE: this method is called with the sched lock held. + * Fetch one Procedure from the queue NOTE: this method is called with the sched lock held. * @return the Procedure to execute, or null if nothing is available. */ protected abstract Procedure dequeue(); @@ -187,18 +184,18 @@ public Procedure poll(final long nanos) { } // ========================================================================== - // Utils + // Utils // ========================================================================== /** - * Returns the number of elements in this queue. - * NOTE: this method is called with the sched lock held. + * Returns the number of elements in this queue. NOTE: this method is called with the sched lock + * held. * @return the number of elements in this queue. */ protected abstract int queueSize(); /** - * Returns true if there are procedures available to process. - * NOTE: this method is called with the sched lock held. + * Returns true if there are procedures available to process. NOTE: this method is called with the + * sched lock held. * @return true if there are procedures available to process, otherwise false. */ protected abstract boolean queueHasRunnables(); @@ -224,7 +221,7 @@ public boolean hasRunnables() { } // ============================================================================ - // TODO: Metrics + // TODO: Metrics // ============================================================================ public long getPollCalls() { return pollCalls; @@ -235,13 +232,13 @@ public long getNullPollCalls() { } // ========================================================================== - // Procedure Events + // Procedure Events // ========================================================================== /** - * Wake up all of the given events. - * Note that we first take scheduler lock and then wakeInternal() synchronizes on the event. - * Access should remain package-private. Use ProcedureEvent class to wake/suspend events. + * Wake up all of the given events. Note that we first take scheduler lock and then wakeInternal() + * synchronizes on the event. Access should remain package-private. Use ProcedureEvent class to + * wake/suspend events. * @param events the list of events to wake */ public void wakeEvents(ProcedureEvent[] events) { @@ -276,7 +273,7 @@ protected void wakeProcedure(final Procedure procedure) { } // ========================================================================== - // Internal helpers + // Internal helpers // ========================================================================== protected void schedLock() { schedulerLock.lock(); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java index 796a8e47c918..69f4fa52034d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -87,7 +87,7 @@ protected void periodicExecute(final TEnvironment env) { } final long evictTtl = - conf.getInt(ProcedureExecutor.EVICT_TTL_CONF_KEY, ProcedureExecutor.DEFAULT_EVICT_TTL); + conf.getInt(ProcedureExecutor.EVICT_TTL_CONF_KEY, ProcedureExecutor.DEFAULT_EVICT_TTL); final long evictAckTtl = conf.getInt(ProcedureExecutor.EVICT_ACKED_TTL_CONF_KEY, ProcedureExecutor.DEFAULT_ACKED_EVICT_TTL); final int batchSize = conf.getInt(BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE); @@ -97,7 +97,7 @@ protected void periodicExecute(final TEnvironment env) { final long now = EnvironmentEdgeManager.currentTime(); final Iterator>> it = - completed.entrySet().iterator(); + completed.entrySet().iterator(); while (it.hasNext() && store.isRunning()) { final Map.Entry> entry = it.next(); final CompletedProcedureRetainer retainer = entry.getValue(); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java index d5f1ee7f6c3f..6ba261b8e01b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ public void setClientAckTime(long clientAckTime) { } public boolean isExpired(long now, long evictTtl, long evictAckTtl) { - return (hasClientAckTime() && (now - getClientAckTime()) >= evictAckTtl) || - (now - procedure.getLastUpdate()) >= evictTtl; + return (hasClientAckTime() && (now - getClientAckTime()) >= evictAckTtl) + || (now - procedure.getLastUpdate()) >= evictTtl; } } \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java index 3fc975078604..a2e4e659e9f8 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java index 40eb22c3b56e..1bd4490da863 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java index dfe8e7d3c537..5561661d73b7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java @@ -21,8 +21,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure - * operation. + * Used internally signaling failed queue of a remote procedure operation. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java index 32b4922a0b17..21350b56c23d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java index bfeb7398fa06..c07dcf2dfe44 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.util.function.Function; @@ -142,8 +141,8 @@ public boolean tryExclusiveLock(Procedure proc) { * @return whether we should wake the procedures waiting on the lock here. */ public boolean releaseExclusiveLock(Procedure proc) { - if (exclusiveLockOwnerProcedure == null || - exclusiveLockOwnerProcedure.getProcId() != proc.getProcId()) { + if (exclusiveLockOwnerProcedure == null + || exclusiveLockOwnerProcedure.getProcId() != proc.getProcId()) { // We are not the lock owner, it is probably inherited from the parent procedures. return false; } @@ -187,7 +186,7 @@ public Stream filterWaitingQueue(Predicate predicate) { @Override public String toString() { - return "exclusiveLockOwner=" + (hasExclusiveLock() ? getExclusiveLockProcIdOwner() : "NONE") + - ", sharedLockCount=" + getSharedLockCount() + ", waitingProcCount=" + queue.size(); + return "exclusiveLockOwner=" + (hasExclusiveLock() ? getExclusiveLockProcIdOwner() : "NONE") + + ", sharedLockCount=" + getSharedLockCount() + ", waitingProcCount=" + queue.size(); } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java index 33d2a38c80aa..d3723e1a35a7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java index 8599af90d387..e1938ae9573e 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java index 81d1e7212299..0b1ce49e526d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -30,9 +29,9 @@ public class LockedResource { private final int sharedLockCount; private final List> waitingProcedures; - public LockedResource(LockedResourceType resourceType, String resourceName, - LockType lockType, Procedure exclusiveLockOwnerProcedure, - int sharedLockCount, List> waitingProcedures) { + public LockedResource(LockedResourceType resourceType, String resourceName, LockType lockType, + Procedure exclusiveLockOwnerProcedure, int sharedLockCount, + List> waitingProcedures) { this.resourceType = resourceType; this.resourceName = resourceName; this.lockType = lockType; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java index 55d195b3920f..d948b68c7d4f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java index d2e13f135361..a6faf501682b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java @@ -20,9 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * In particular, no dispatch Node was found for the passed server name - * key AFTER queuing dispatch. + * Used internally signaling failed queue of a remote procedure operation. In particular, no + * dispatch Node was found for the passed server name key AFTER queuing dispatch. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java index 5cdbcd417dea..95265d00a7ba 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java @@ -20,9 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * In particular, no dispatch Node was found for the passed server name - * key. + * Used internally signaling failed queue of a remote procedure operation. In particular, no + * dispatch Node was found for the passed server name key. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java index 9deac23e1546..502d7ee0b6e1 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * The target server passed is null. + * Used internally signaling failed queue of a remote procedure operation. The target server passed + * is null. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java index 2d6e065da675..4f0bc6ce6b29 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index 579c60998765..b2da26336545 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -113,9 +113,9 @@ public abstract class Procedure implements Comparable

Bypassing a procedure is not like aborting. Aborting a procedure will trigger - * a rollback. And since the {@link #abort(Object)} method is overrideable - * Some procedures may have chosen to ignore the aborting. + * If bypass is set to true, when executing it will return null when {@link #doExecute(Object)} is + * called to finish the procedure and release any locks it may currently hold. The bypass does + * cleanup around the Procedure as far as the Procedure framework is concerned. It does not clean + * any internal state that the Procedure's themselves may have set. That is for the Procedures to + * do themselves when bypass is called. They should override bypass and do their cleanup in the + * overridden bypass method (be sure to call the parent bypass to ensure proper processing). + *

+ *

+ * Bypassing a procedure is not like aborting. Aborting a procedure will trigger a rollback. And + * since the {@link #abort(Object)} method is overrideable Some procedures may have chosen to + * ignore the aborting. */ private volatile boolean bypass = false; @@ -176,13 +176,13 @@ public boolean isBypass() { } /** - * Set the bypass to true. - * Only called in {@link ProcedureExecutor#bypassProcedure(long, long, boolean, boolean)} for now. - * DO NOT use this method alone, since we can't just bypass one single procedure. We need to - * bypass its ancestor too. If your Procedure has set state, it needs to undo it in here. - * @param env Current environment. May be null because of context; e.g. pretty-printing - * procedure WALs where there is no 'environment' (and where Procedures that require - * an 'environment' won't be run. + * Set the bypass to true. Only called in + * {@link ProcedureExecutor#bypassProcedure(long, long, boolean, boolean)} for now. DO NOT use + * this method alone, since we can't just bypass one single procedure. We need to bypass its + * ancestor too. If your Procedure has set state, it needs to undo it in here. + * @param env Current environment. May be null because of context; e.g. pretty-printing procedure + * WALs where there is no 'environment' (and where Procedures that require an + * 'environment' won't be run. */ protected void bypass(TEnvironment env) { this.bypass = true; @@ -201,64 +201,56 @@ protected final void skipPersistence() { } /** - * The main code of the procedure. It must be idempotent since execute() - * may be called multiple times in case of machine failure in the middle - * of the execution. + * The main code of the procedure. It must be idempotent since execute() may be called multiple + * times in case of machine failure in the middle of the execution. * @param env the environment passed to the ProcedureExecutor * @return a set of sub-procedures to run or ourselves if there is more work to do or null if the * procedure is done. * @throws ProcedureYieldException the procedure will be added back to the queue and retried - * later. + * later. * @throws InterruptedException the procedure will be added back to the queue and retried later. * @throws ProcedureSuspendedException Signal to the executor that Procedure has suspended itself - * and has set itself up waiting for an external event to wake it back up again. + * and has set itself up waiting for an external event to wake it back up again. */ protected abstract Procedure[] execute(TEnvironment env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException; + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException; /** - * The code to undo what was done by the execute() code. - * It is called when the procedure or one of the sub-procedures failed or an - * abort was requested. It should cleanup all the resources created by - * the execute() call. The implementation must be idempotent since rollback() - * may be called multiple time in case of machine failure in the middle - * of the execution. + * The code to undo what was done by the execute() code. It is called when the procedure or one of + * the sub-procedures failed or an abort was requested. It should cleanup all the resources + * created by the execute() call. The implementation must be idempotent since rollback() may be + * called multiple time in case of machine failure in the middle of the execution. * @param env the environment passed to the ProcedureExecutor * @throws IOException temporary failure, the rollback will retry later * @throws InterruptedException the procedure will be added back to the queue and retried later */ - protected abstract void rollback(TEnvironment env) - throws IOException, InterruptedException; + protected abstract void rollback(TEnvironment env) throws IOException, InterruptedException; /** - * The abort() call is asynchronous and each procedure must decide how to deal - * with it, if they want to be abortable. The simplest implementation - * is to have an AtomicBoolean set in the abort() method and then the execute() - * will check if the abort flag is set or not. - * abort() may be called multiple times from the client, so the implementation - * must be idempotent. - * - *

NOTE: abort() is not like Thread.interrupt(). It is just a notification - * that allows the procedure implementor abort. + * The abort() call is asynchronous and each procedure must decide how to deal with it, if they + * want to be abortable. The simplest implementation is to have an AtomicBoolean set in the + * abort() method and then the execute() will check if the abort flag is set or not. abort() may + * be called multiple times from the client, so the implementation must be idempotent. + *

+ * NOTE: abort() is not like Thread.interrupt(). It is just a notification that allows the + * procedure implementor abort. */ protected abstract boolean abort(TEnvironment env); /** - * The user-level code of the procedure may have some state to - * persist (e.g. input arguments or current position in the processing state) to - * be able to resume on failure. + * The user-level code of the procedure may have some state to persist (e.g. input arguments or + * current position in the processing state) to be able to resume on failure. * @param serializer stores the serializable state */ protected abstract void serializeStateData(ProcedureStateSerializer serializer) - throws IOException; + throws IOException; /** - * Called on store load to allow the user to decode the previously serialized - * state. + * Called on store load to allow the user to decode the previously serialized state. * @param serializer contains the serialized state */ protected abstract void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException; + throws IOException; /** * The {@link #doAcquireLock(Object, ProcedureStore)} will be split into two steps, first, it will @@ -321,9 +313,9 @@ protected boolean holdLock(TEnvironment env) { /** * This is used in conjunction with {@link #holdLock(Object)}. If {@link #holdLock(Object)} - * returns true, the procedure executor will call acquireLock() once and thereafter - * not call {@link #releaseLock(Object)} until the Procedure is done (Normally, it calls - * release/acquire around each invocation of {@link #execute(Object)}. + * returns true, the procedure executor will call acquireLock() once and thereafter not call + * {@link #releaseLock(Object)} until the Procedure is done (Normally, it calls release/acquire + * around each invocation of {@link #execute(Object)}. * @see #holdLock(Object) * @return true if the procedure has the lock, false otherwise. */ @@ -332,61 +324,57 @@ public final boolean hasLock() { } /** - * Called when the procedure is loaded for replay. - * The procedure implementor may use this method to perform some quick - * operation before replay. - * e.g. failing the procedure if the state on replay may be unknown. + * Called when the procedure is loaded for replay. The procedure implementor may use this method + * to perform some quick operation before replay. e.g. failing the procedure if the state on + * replay may be unknown. */ protected void beforeReplay(TEnvironment env) { // no-op } /** - * Called when the procedure is ready to be added to the queue after - * the loading/replay operation. + * Called when the procedure is ready to be added to the queue after the loading/replay operation. */ protected void afterReplay(TEnvironment env) { // no-op } /** - * Called when the procedure is marked as completed (success or rollback). - * The procedure implementor may use this method to cleanup in-memory states. - * This operation will not be retried on failure. If a procedure took a lock, - * it will have been released when this method runs. + * Called when the procedure is marked as completed (success or rollback). The procedure + * implementor may use this method to cleanup in-memory states. This operation will not be retried + * on failure. If a procedure took a lock, it will have been released when this method runs. */ protected void completionCleanup(TEnvironment env) { // no-op } /** - * By default, the procedure framework/executor will try to run procedures start to finish. - * Return true to make the executor yield between each execution step to - * give other procedures a chance to run. + * By default, the procedure framework/executor will try to run procedures start to finish. Return + * true to make the executor yield between each execution step to give other procedures a chance + * to run. * @param env the environment passed to the ProcedureExecutor - * @return Return true if the executor should yield on completion of an execution step. - * Defaults to return false. + * @return Return true if the executor should yield on completion of an execution step. Defaults + * to return false. */ protected boolean isYieldAfterExecutionStep(TEnvironment env) { return false; } /** - * By default, the executor will keep the procedure result around util - * the eviction TTL is expired. The client can cut down the waiting time - * by requesting that the result is removed from the executor. - * In case of system started procedure, we can force the executor to auto-ack. + * By default, the executor will keep the procedure result around util the eviction TTL is + * expired. The client can cut down the waiting time by requesting that the result is removed from + * the executor. In case of system started procedure, we can force the executor to auto-ack. * @param env the environment passed to the ProcedureExecutor - * @return true if the executor should wait the client ack for the result. - * Defaults to return true. + * @return true if the executor should wait the client ack for the result. Defaults to return + * true. */ protected boolean shouldWaitClientAck(TEnvironment env) { return true; } /** - * Override this method to provide procedure specific counters for submitted count, failed - * count and time histogram. + * Override this method to provide procedure specific counters for submitted count, failed count + * and time histogram. * @param env The environment passed to the procedure executor * @return Container object for procedure related metric */ @@ -467,13 +455,9 @@ protected StringBuilder toStringSimpleSB() { } /* - * TODO - * Enable later when this is being used. - * Currently owner not used. - if (hasOwner()) { - sb.append(", owner="); - sb.append(getOwner()); - }*/ + * TODO Enable later when this is being used. Currently owner not used. if (hasOwner()) { + * sb.append(", owner="); sb.append(getOwner()); } + */ sb.append(", state="); // pState for Procedure State as opposed to any other kind. toStringState(sb); @@ -532,8 +516,7 @@ protected void toStringState(StringBuilder builder) { } /** - * Extend the toString() information with the procedure details - * e.g. className and parameters + * Extend the toString() information with the procedure details e.g. className and parameters * @param builder the string builder to use to append the proc specific information */ protected void toStringClassDetails(StringBuilder builder) { @@ -541,11 +524,11 @@ protected void toStringClassDetails(StringBuilder builder) { } // ========================================================================== - // Those fields are unchanged after initialization. + // Those fields are unchanged after initialization. // - // Each procedure will get created from the user or during - // ProcedureExecutor.start() during the load() phase and then submitted - // to the executor. these fields will never be changed after initialization + // Each procedure will get created from the user or during + // ProcedureExecutor.start() during the load() phase and then submitted + // to the executor. these fields will never be changed after initialization // ========================================================================== public long getProcId() { return procId; @@ -620,15 +603,14 @@ public void setOwner(User owner) { } /** - * Called on store load to initialize the Procedure internals after - * the creation/deserialization. + * Called on store load to initialize the Procedure internals after the creation/deserialization. */ protected void setSubmittedTime(long submittedTime) { this.submittedTime = submittedTime; } // ========================================================================== - // runtime state - timeout related + // runtime state - timeout related // ========================================================================== /** * @param timeout timeout interval in msec @@ -649,8 +631,7 @@ public int getTimeout() { } /** - * Called on store load to initialize the Procedure internals after - * the creation/deserialization. + * Called on store load to initialize the Procedure internals after the creation/deserialization. */ protected void setLastUpdate(long lastUpdate) { this.lastUpdate = lastUpdate; @@ -668,9 +649,8 @@ public long getLastUpdate() { } /** - * Timeout of the next timeout. - * Called by the ProcedureExecutor if the procedure has timeout set and - * the procedure is in the waiting queue. + * Timeout of the next timeout. Called by the ProcedureExecutor if the procedure has timeout set + * and the procedure is in the waiting queue. * @return the timestamp of the next timeout. */ protected long getTimeoutTimestamp() { @@ -678,7 +658,7 @@ protected long getTimeoutTimestamp() { } // ========================================================================== - // runtime state + // runtime state // ========================================================================== /** * @return the time elapsed between the last update and the start time of the procedure. @@ -704,8 +684,8 @@ protected void setResult(byte[] result) { /** * Will only be called when loading procedures from procedure store, where we need to record - * whether the procedure has already held a lock. Later we will call - * {@link #restoreLock(Object)} to actually acquire the lock. + * whether the procedure has already held a lock. Later we will call {@link #restoreLock(Object)} + * to actually acquire the lock. */ final void lockedWhenLoading() { this.lockedWhenLoading = true; @@ -724,12 +704,12 @@ public boolean isLockedWhenLoading() { } // ============================================================================================== - // Runtime state, updated every operation by the ProcedureExecutor + // Runtime state, updated every operation by the ProcedureExecutor // - // There is always 1 thread at the time operating on the state of the procedure. - // The ProcedureExecutor may check and set states, or some Procecedure may - // update its own state. but no concurrent updates. we use synchronized here - // just because the procedure can get scheduled on different executor threads on each step. + // There is always 1 thread at the time operating on the state of the procedure. + // The ProcedureExecutor may check and set states, or some Procecedure may + // update its own state. but no concurrent updates. we use synchronized here + // just because the procedure can get scheduled on different executor threads on each step. // ============================================================================================== /** @@ -839,8 +819,7 @@ public synchronized RemoteProcedureException getException() { protected synchronized void setChildrenLatch(int numChildren) { this.childrenLatch = numChildren; if (LOG.isTraceEnabled()) { - LOG.trace("CHILD LATCH INCREMENT SET " + - this.childrenLatch, new Throwable(this.toString())); + LOG.trace("CHILD LATCH INCREMENT SET " + this.childrenLatch, new Throwable(this.toString())); } } @@ -859,7 +838,7 @@ protected synchronized void incChildrenLatch() { * Called by the ProcedureExecutor to notify that one of the sub-procedures has completed. */ private synchronized boolean childrenCountDown() { - assert childrenLatch > 0: this; + assert childrenLatch > 0 : this; boolean b = --childrenLatch == 0; if (LOG.isTraceEnabled()) { LOG.trace("CHILD LATCH DECREMENT " + childrenLatch, new Throwable(this.toString())); @@ -868,8 +847,7 @@ private synchronized boolean childrenCountDown() { } /** - * Try to set this procedure into RUNNABLE state. - * Succeeds if all subprocedures/children are done. + * Try to set this procedure into RUNNABLE state. Succeeds if all subprocedures/children are done. * @return True if we were able to move procedure to RUNNABLE state. */ synchronized boolean tryRunnable() { @@ -891,8 +869,8 @@ protected synchronized int getChildrenLatch() { } /** - * Called by the RootProcedureState on procedure execution. - * Each procedure store its stack-index positions. + * Called by the RootProcedureState on procedure execution. Each procedure store its stack-index + * positions. */ protected synchronized void addStackIndex(final int index) { if (stackIndexes == null) { @@ -915,8 +893,7 @@ protected synchronized boolean removeStackIndex() { } /** - * Called on store load to initialize the Procedure internals after - * the creation/deserialization. + * Called on store load to initialize the Procedure internals after the creation/deserialization. */ protected synchronized void setStackIndexes(final List stackIndexes) { this.stackIndexes = new int[stackIndexes.size()]; @@ -934,7 +911,7 @@ protected synchronized int[] getStackIndexes() { } // ========================================================================== - // Internal methods - called by the ProcedureExecutor + // Internal methods - called by the ProcedureExecutor // ========================================================================== /** @@ -959,8 +936,7 @@ protected Procedure[] doExecute(TEnvironment env) /** * Internal method called by the ProcedureExecutor that starts the user-level code rollback(). */ - protected void doRollback(TEnvironment env) - throws IOException, InterruptedException { + protected void doRollback(TEnvironment env) throws IOException, InterruptedException { try { updateTimestamp(); if (bypass) { @@ -1052,7 +1028,7 @@ public int compareTo(final Procedure other) { } // ========================================================================== - // misc utils + // misc utils // ========================================================================== /** diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java index 1b6b93db70c2..9d6f9a4965c0 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java index c5f02e950bc5..ad42634edb95 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,20 +11,18 @@ * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUTKey WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.procedure2; import java.util.ArrayDeque; - import org.apache.yetus.audience.InterfaceAudience; /** - * Type class. - * For conceptual purpose only. Seeing ProcedureDeque as type instead of just ArrayDeque gives - * more understanding that it's a queue of waiting procedures. + * Type class. For conceptual purpose only. Seeing ProcedureDeque as type instead of just ArrayDeque + * gives more understanding that it's a queue of waiting procedures. */ @InterfaceAudience.Private public class ProcedureDeque extends ArrayDeque { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java index e9bc91986b86..b9e556c1ecd2 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; @@ -61,12 +60,12 @@ public synchronized void suspend() { } /** - * Wakes up the suspended procedures by pushing them back into scheduler queues and sets the - * event as ready. - * See {@link #wakeInternal(AbstractProcedureScheduler)} for why this is not synchronized. + * Wakes up the suspended procedures by pushing them back into scheduler queues and sets the event + * as ready. See {@link #wakeInternal(AbstractProcedureScheduler)} for why this is not + * synchronized. */ public void wake(AbstractProcedureScheduler procedureScheduler) { - procedureScheduler.wakeEvents(new ProcedureEvent[]{this}); + procedureScheduler.wakeEvents(new ProcedureEvent[] { this }); } /** @@ -89,22 +88,19 @@ public synchronized boolean wakeIfSuspended(AbstractProcedureScheduler procedure * Wakes up all the given events and puts the procedures waiting on them back into * ProcedureScheduler queues. */ - public static void wakeEvents(AbstractProcedureScheduler scheduler, ProcedureEvent ... events) { + public static void wakeEvents(AbstractProcedureScheduler scheduler, ProcedureEvent... events) { scheduler.wakeEvents(events); } /** - * Only to be used by ProcedureScheduler implementations. - * Reason: To wake up multiple events, locking sequence is - * schedLock --> synchronized (event) - * To wake up an event, both schedLock() and synchronized(event) are required. - * The order is schedLock() --> synchronized(event) because when waking up multiple events - * simultaneously, we keep the scheduler locked until all procedures suspended on these events - * have been added back to the queue (Maybe it's not required? Evaluate!) - * To avoid deadlocks, we want to keep the locking order same even when waking up single event. - * That's why, {@link #wake(AbstractProcedureScheduler)} above uses the same code path as used - * when waking up multiple events. - * Access should remain package-private. + * Only to be used by ProcedureScheduler implementations. Reason: To wake up multiple events, + * locking sequence is schedLock --> synchronized (event) To wake up an event, both schedLock() + * and synchronized(event) are required. The order is schedLock() --> synchronized(event) because + * when waking up multiple events simultaneously, we keep the scheduler locked until all + * procedures suspended on these events have been added back to the queue (Maybe it's not + * required? Evaluate!) To avoid deadlocks, we want to keep the locking order same even when + * waking up single event. That's why, {@link #wake(AbstractProcedureScheduler)} above uses the + * same code path as used when waking up multiple events. Access should remain package-private. */ public synchronized void wakeInternal(AbstractProcedureScheduler procedureScheduler) { if (ready && !suspendedProcedures.isEmpty()) { @@ -122,8 +118,8 @@ public synchronized void wakeInternal(AbstractProcedureScheduler procedureSchedu } /** - * Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it - * here for tests. + * Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it here + * for tests. */ public ProcedureDeque getSuspendedProcedures() { return suspendedProcedures; @@ -131,7 +127,7 @@ public ProcedureDeque getSuspendedProcedures() { @Override public synchronized String toString() { - return getClass().getSimpleName() + " for " + object + ", ready=" + isReady() + ", " + - suspendedProcedures; + return getClass().getSimpleName() + " for " + object + ", ready=" + isReady() + ", " + + suspendedProcedures; } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java index 93cd355c4e0d..b52510286d96 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index b4e3d1e03e49..abf51cf3929c 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,17 +62,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; /** - * Thread Pool that executes the submitted procedures. - * The executor has a ProcedureStore associated. - * Each operation is logged and on restart the pending procedures are resumed. - * - * Unless the Procedure code throws an error (e.g. invalid user input) - * the procedure will complete (at some point in time), On restart the pending - * procedures are resumed and the once failed will be rolledback. - * - * The user can add procedures to the executor via submitProcedure(proc) - * check for the finished state via isFinished(procId) - * and get the result via getResult(procId) + * Thread Pool that executes the submitted procedures. The executor has a ProcedureStore associated. + * Each operation is logged and on restart the pending procedures are resumed. Unless the Procedure + * code throws an error (e.g. invalid user input) the procedure will complete (at some point in + * time), On restart the pending procedures are resumed and the once failed will be rolledback. The + * user can add procedures to the executor via submitProcedure(proc) check for the finished state + * via isFinished(procId) and get the result via getResult(procId) */ @InterfaceAudience.Private public class ProcedureExecutor { @@ -88,13 +83,13 @@ public class ProcedureExecutor { public static final String EVICT_TTL_CONF_KEY = "hbase.procedure.cleaner.evict.ttl"; static final int DEFAULT_EVICT_TTL = 15 * 60000; // 15min - public static final String EVICT_ACKED_TTL_CONF_KEY ="hbase.procedure.cleaner.acked.evict.ttl"; + public static final String EVICT_ACKED_TTL_CONF_KEY = "hbase.procedure.cleaner.acked.evict.ttl"; static final int DEFAULT_ACKED_EVICT_TTL = 5 * 60000; // 5min /** - * {@link #testing} is non-null when ProcedureExecutor is being tested. Tests will try to - * break PE having it fail at various junctures. When non-null, testing is set to an instance of - * the below internal {@link Testing} class with flags set for the particular test. + * {@link #testing} is non-null when ProcedureExecutor is being tested. Tests will try to break PE + * having it fail at various junctures. When non-null, testing is set to an instance of the below + * internal {@link Testing} class with flags set for the particular test. */ volatile Testing testing = null; @@ -114,8 +109,8 @@ public static class Testing { /** * Set when we want to fail AFTER state has been stored into the WAL. Rarely used. HBASE-20978 - * is about a case where memory-state was being set after store to WAL where a crash could - * cause us to get stuck. This flag allows killing at what was a vulnerable time. + * is about a case where memory-state was being set after store to WAL where a crash could cause + * us to get stuck. This flag allows killing at what was a vulnerable time. */ protected volatile boolean killAfterStoreUpdate = false; protected volatile boolean toggleKillAfterStoreUpdate = false; @@ -155,32 +150,34 @@ protected boolean shouldKillAfterStoreUpdate(final boolean isSuspended) { public interface ProcedureExecutorListener { void procedureLoaded(long procId); + void procedureAdded(long procId); + void procedureFinished(long procId); } /** - * Map the the procId returned by submitProcedure(), the Root-ProcID, to the Procedure. - * Once a Root-Procedure completes (success or failure), the result will be added to this map. - * The user of ProcedureExecutor should call getResult(procId) to get the result. + * Map the the procId returned by submitProcedure(), the Root-ProcID, to the Procedure. Once a + * Root-Procedure completes (success or failure), the result will be added to this map. The user + * of ProcedureExecutor should call getResult(procId) to get the result. */ private final ConcurrentHashMap> completed = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); /** * Map the the procId returned by submitProcedure(), the Root-ProcID, to the RootProcedureState. - * The RootProcedureState contains the execution stack of the Root-Procedure, - * It is added to the map by submitProcedure() and removed on procedure completion. + * The RootProcedureState contains the execution stack of the Root-Procedure, It is added to the + * map by submitProcedure() and removed on procedure completion. */ private final ConcurrentHashMap> rollbackStack = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); /** - * Helper map to lookup the live procedures by ID. - * This map contains every procedure. root-procedures and subprocedures. + * Helper map to lookup the live procedures by ID. This map contains every procedure. + * root-procedures and subprocedures. */ private final ConcurrentHashMap> procedures = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); /** * Helper map to lookup whether the procedure already issued from the same client. This map @@ -189,40 +186,37 @@ public interface ProcedureExecutorListener { private final ConcurrentHashMap nonceKeysToProcIdsMap = new ConcurrentHashMap<>(); private final CopyOnWriteArrayList listeners = - new CopyOnWriteArrayList<>(); + new CopyOnWriteArrayList<>(); private Configuration conf; /** * Created in the {@link #init(int, boolean)} method. Destroyed in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private ThreadGroup threadGroup; /** - * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private CopyOnWriteArrayList workerThreads; /** * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private TimeoutExecutorThread timeoutExecutor; /** * WorkerMonitor check for stuck workers and new worker thread when necessary, for example if * there is no worker to assign meta, it will new worker thread for it, so it is very important. - * TimeoutExecutor execute many tasks like DeadServerMetricRegionChore RegionInTransitionChore - * and so on, some tasks may execute for a long time so will block other tasks like - * WorkerMonitor, so use a dedicated thread for executing WorkerMonitor. + * TimeoutExecutor execute many tasks like DeadServerMetricRegionChore RegionInTransitionChore and + * so on, some tasks may execute for a long time so will block other tasks like WorkerMonitor, so + * use a dedicated thread for executing WorkerMonitor. */ private TimeoutExecutorThread workerMonitorExecutor; @@ -272,8 +266,9 @@ private void forceUpdateProcedure(long procId) throws IOException { Procedure proc = procedures.get(procId); if (proc != null) { if (proc.isFinished() && proc.hasParent() && isRootFinished(proc)) { - LOG.debug("Procedure {} has already been finished and parent is succeeded," + - " skip force updating", proc); + LOG.debug("Procedure {} has already been finished and parent is succeeded," + + " skip force updating", + proc); return; } } else { @@ -566,7 +561,7 @@ public void init(int numThreads, boolean abortOnCorruption) throws IOException { this.corePoolSize = numThreads; this.maxPoolSize = 10 * numThreads; LOG.info("Starting {} core workers (bigger of cpus/4 or 16) with max (burst) worker count={}", - corePoolSize, maxPoolSize); + corePoolSize, maxPoolSize); this.threadGroup = new ThreadGroup("PEWorkerGroup"); this.timeoutExecutor = new TimeoutExecutorThread<>(this, threadGroup, "ProcExecTimeout"); @@ -615,7 +610,7 @@ public void startWorkers() throws IOException { LOG.trace("Start workers {}", workerThreads.size()); timeoutExecutor.start(); workerMonitorExecutor.start(); - for (WorkerThread worker: workerThreads) { + for (WorkerThread worker : workerThreads) { worker.start(); } @@ -624,7 +619,7 @@ public void startWorkers() throws IOException { // Add completed cleaner chore addChore(new CompletedProcedureCleaner<>(conf, store, procExecutionLock, completed, - nonceKeysToProcIdsMap)); + nonceKeysToProcIdsMap)); } public void stop() { @@ -647,7 +642,7 @@ public void join() { workerMonitorExecutor.awaitTermination(); // stop the worker threads - for (WorkerThread worker: workerThreads) { + for (WorkerThread worker : workerThreads) { worker.awaitTermination(); } @@ -656,8 +651,8 @@ public void join() { try { threadGroup.destroy(); } catch (IllegalThreadStateException e) { - LOG.error("ThreadGroup {} contains running threads; {}: See STDOUT", - this.threadGroup, e.getMessage()); + LOG.error("ThreadGroup {} contains running threads; {}: See STDOUT", this.threadGroup, + e.getMessage()); // This dumps list of threads on STDOUT. this.threadGroup.list(); } @@ -673,12 +668,12 @@ public void join() { public void refreshConfiguration(final Configuration conf) { this.conf = conf; - setKeepAliveTime(conf.getLong(WORKER_KEEP_ALIVE_TIME_CONF_KEY, - DEFAULT_WORKER_KEEP_ALIVE_TIME), TimeUnit.MILLISECONDS); + setKeepAliveTime(conf.getLong(WORKER_KEEP_ALIVE_TIME_CONF_KEY, DEFAULT_WORKER_KEEP_ALIVE_TIME), + TimeUnit.MILLISECONDS); } // ========================================================================== - // Accessors + // Accessors // ========================================================================== public boolean isRunning() { return running.get(); @@ -724,7 +719,7 @@ public long getKeepAliveTime(final TimeUnit timeUnit) { } // ========================================================================== - // Submit/Remove Chores + // Submit/Remove Chores // ========================================================================== /** @@ -753,7 +748,7 @@ public boolean removeChore(@Nullable ProcedureInMemoryChore chore) } // ========================================================================== - // Nonce Procedure helpers + // Nonce Procedure helpers // ========================================================================== /** * Create a NonceKey from the specified nonceGroup and nonce. @@ -766,13 +761,10 @@ public NonceKey createNonceKey(final long nonceGroup, final long nonce) { } /** - * Register a nonce for a procedure that is going to be submitted. - * A procId will be reserved and on submitProcedure(), - * the procedure with the specified nonce will take the reserved ProcId. - * If someone already reserved the nonce, this method will return the procId reserved, - * otherwise an invalid procId will be returned. and the caller should procede - * and submit the procedure. - * + * Register a nonce for a procedure that is going to be submitted. A procId will be reserved and + * on submitProcedure(), the procedure with the specified nonce will take the reserved ProcId. If + * someone already reserved the nonce, this method will return the procId reserved, otherwise an + * invalid procId will be returned. and the caller should procede and submit the procedure. * @param nonceKey A unique identifier for this operation from the client or process. * @return the procId associated with the nonce, if any otherwise an invalid procId. */ @@ -796,9 +788,8 @@ public long registerNonce(final NonceKey nonceKey) { // we found a registered nonce, but the procedure may not have been submitted yet. // since the client expect the procedure to be submitted, spin here until it is. final boolean traceEnabled = LOG.isTraceEnabled(); - while (isRunning() && - !(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) && - nonceKeysToProcIdsMap.containsKey(nonceKey)) { + while (isRunning() && !(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) + && nonceKeysToProcIdsMap.containsKey(nonceKey)) { if (traceEnabled) { LOG.trace("Waiting for pid=" + oldProcId.longValue() + " to be submitted"); } @@ -828,9 +819,8 @@ public void unregisterNonceIfProcedureWasNotSubmitted(final NonceKey nonceKey) { } /** - * If the failure failed before submitting it, we may want to give back the - * same error to the requests with the same nonceKey. - * + * If the failure failed before submitting it, we may want to give back the same error to the + * requests with the same nonceKey. * @param nonceKey A unique identifier for this operation from the client or process * @param procName name of the procedure, used to inform the user * @param procOwner name of the owner of the procedure, used to inform the user @@ -848,15 +838,15 @@ public void setFailureResultForNonce(NonceKey nonceKey, String procName, User pr } completed.computeIfAbsent(procId, (key) -> { - Procedure proc = new FailedProcedure<>(procId.longValue(), - procName, procOwner, nonceKey, exception); + Procedure proc = + new FailedProcedure<>(procId.longValue(), procName, procOwner, nonceKey, exception); return new CompletedProcedureRetainer<>(proc); }); } // ========================================================================== - // Submit/Abort Procedure + // Submit/Abort Procedure // ========================================================================== /** * Add a new root-procedure to the executor. @@ -868,45 +858,39 @@ public long submitProcedure(Procedure proc) { } /** - * Bypass a procedure. If the procedure is set to bypass, all the logic in - * execute/rollback will be ignored and it will return success, whatever. - * It is used to recover buggy stuck procedures, releasing the lock resources - * and letting other procedures run. Bypassing one procedure (and its ancestors will - * be bypassed automatically) may leave the cluster in a middle state, e.g. region - * not assigned, or some hdfs files left behind. After getting rid of those stuck procedures, - * the operators may have to do some clean up on hdfs or schedule some assign procedures - * to let region online. DO AT YOUR OWN RISK. + * Bypass a procedure. If the procedure is set to bypass, all the logic in execute/rollback will + * be ignored and it will return success, whatever. It is used to recover buggy stuck procedures, + * releasing the lock resources and letting other procedures run. Bypassing one procedure (and its + * ancestors will be bypassed automatically) may leave the cluster in a middle state, e.g. region + * not assigned, or some hdfs files left behind. After getting rid of those stuck procedures, the + * operators may have to do some clean up on hdfs or schedule some assign procedures to let region + * online. DO AT YOUR OWN RISK. *

- * A procedure can be bypassed only if - * 1. The procedure is in state of RUNNABLE, WAITING, WAITING_TIMEOUT - * or it is a root procedure without any child. - * 2. No other worker thread is executing it - * 3. No child procedure has been submitted - * + * A procedure can be bypassed only if 1. The procedure is in state of RUNNABLE, WAITING, + * WAITING_TIMEOUT or it is a root procedure without any child. 2. No other worker thread is + * executing it 3. No child procedure has been submitted *

- * If all the requirements are meet, the procedure and its ancestors will be - * bypassed and persisted to WAL. - * + * If all the requirements are meet, the procedure and its ancestors will be bypassed and + * persisted to WAL. *

- * If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. - * TODO: What about WAITING_TIMEOUT? + * If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. TODO: What + * about WAITING_TIMEOUT? * @param pids the procedure id * @param lockWait time to wait lock - * @param force if force set to true, we will bypass the procedure even if it is executing. - * This is for procedures which can't break out during executing(due to bug, mostly) - * In this case, bypassing the procedure is not enough, since it is already stuck - * there. We need to restart the master after bypassing, and letting the problematic - * procedure to execute wth bypass=true, so in that condition, the procedure can be - * successfully bypassed. + * @param force if force set to true, we will bypass the procedure even if it is executing. This + * is for procedures which can't break out during executing(due to bug, mostly) In this + * case, bypassing the procedure is not enough, since it is already stuck there. We need + * to restart the master after bypassing, and letting the problematic procedure to + * execute wth bypass=true, so in that condition, the procedure can be successfully + * bypassed. * @param recursive We will do an expensive search for children of each pid. EXPENSIVE! * @return true if bypass success * @throws IOException IOException */ public List bypassProcedure(List pids, long lockWait, boolean force, - boolean recursive) - throws IOException { + boolean recursive) throws IOException { List result = new ArrayList(pids.size()); - for(long pid: pids) { + for (long pid : pids) { result.add(bypassProcedure(pid, lockWait, force, recursive)); } return result; @@ -921,16 +905,16 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur return false; } - LOG.debug("Begin bypass {} with lockWait={}, override={}, recursive={}", - procedure, lockWait, override, recursive); + LOG.debug("Begin bypass {} with lockWait={}, override={}, recursive={}", procedure, lockWait, + override, recursive); IdLock.Entry lockEntry = procExecutionLock.tryLockEntry(procedure.getProcId(), lockWait); if (lockEntry == null && !override) { - LOG.debug("Waited {} ms, but {} is still running, skipping bypass with force={}", - lockWait, procedure, override); + LOG.debug("Waited {} ms, but {} is still running, skipping bypass with force={}", lockWait, + procedure, override); return false; } else if (lockEntry == null) { - LOG.debug("Waited {} ms, but {} is still running, begin bypass with force={}", - lockWait, procedure, override); + LOG.debug("Waited {} ms, but {} is still running, begin bypass with force={}", lockWait, + procedure, override); } try { // check whether the procedure is already finished @@ -944,9 +928,9 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur // EXPENSIVE. Checks each live procedure of which there could be many!!! // Is there another way to get children of a procedure? LOG.info("Recursive bypass on children of pid={}", procedure.getProcId()); - this.procedures.forEachValue(1 /*Single-threaded*/, + this.procedures.forEachValue(1 /* Single-threaded */, // Transformer - v -> v.getParentProcId() == procedure.getProcId()? v: null, + v -> v.getParentProcId() == procedure.getProcId() ? v : null, // Consumer v -> { try { @@ -966,8 +950,8 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur && procedure.getState() != ProcedureState.WAITING && procedure.getState() != ProcedureState.WAITING_TIMEOUT) { LOG.debug("Bypassing procedures in RUNNABLE, WAITING and WAITING_TIMEOUT states " - + "(with no parent), {}", - procedure); + + "(with no parent), {}", + procedure); // Question: how is the bypass done here? return false; } @@ -984,7 +968,7 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur current = getProcedure(parentID); } - //wake up waiting procedure, already checked there is no child + // wake up waiting procedure, already checked there is no child if (procedure.getState() == ProcedureState.WAITING) { procedure.setState(ProcedureState.RUNNABLE); store.update(procedure); @@ -1025,7 +1009,7 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur * @param nonceKey the registered unique identifier for this operation from the client or process. * @return the procedure id, that can be used to monitor the operation */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "FindBugs is blind to the check-for-null") public long submitProcedure(Procedure proc, NonceKey nonceKey) { Preconditions.checkArgument(lastProcId.get() >= 0); @@ -1109,8 +1093,8 @@ private long pushProcedure(Procedure proc) { } /** - * Send an abort notification the specified procedure. - * Depending on the procedure implementation the abort can be considered or ignored. + * Send an abort notification the specified procedure. Depending on the procedure implementation + * the abort can be considered or ignored. * @param procId the procedure to abort * @return true if the procedure exists and has received the abort, otherwise false. */ @@ -1119,8 +1103,8 @@ public boolean abort(long procId) { } /** - * Send an abort notification to the specified procedure. - * Depending on the procedure implementation, the abort can be considered or ignored. + * Send an abort notification to the specified procedure. Depending on the procedure + * implementation, the abort can be considered or ignored. * @param procId the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? * @return true if the procedure exists and has received the abort, otherwise false. @@ -1137,7 +1121,7 @@ public boolean abort(long procId, boolean mayInterruptIfRunning) { } // ========================================================================== - // Executor query helpers + // Executor query helpers // ========================================================================== public Procedure getProcedure(final long procId) { return procedures.get(procId); @@ -1161,9 +1145,8 @@ public Procedure getResult(long procId) { } /** - * Return true if the procedure is finished. - * The state may be "completed successfully" or "failed and rolledback". - * Use getResult() to check the state or get the result data. + * Return true if the procedure is finished. The state may be "completed successfully" or "failed + * and rolledback". Use getResult() to check the state or get the result data. * @param procId the ID of the procedure to check * @return true if the procedure execution is finished, otherwise false. */ @@ -1213,8 +1196,8 @@ public Procedure getResultOrProcedure(long procId) { * Check if the user is this procedure's owner * @param procId the target procedure * @param user the user - * @return true if the user is the owner of the procedure, - * false otherwise or the owner is unknown. + * @return true if the user is the owner of the procedure, false otherwise or the owner is + * unknown. */ public boolean isProcedureOwner(long procId, User user) { if (user == null) { @@ -1252,19 +1235,19 @@ public Collection> getActiveProceduresNoCopy() { */ public List> getProcedures() { List> procedureList = - new ArrayList<>(procedures.size() + completed.size()); + new ArrayList<>(procedures.size() + completed.size()); procedureList.addAll(procedures.values()); // Note: The procedure could show up twice in the list with different state, as // it could complete after we walk through procedures list and insert into // procedureList - it is ok, as we will use the information in the Procedure // to figure it out; to prevent this would increase the complexity of the logic. completed.values().stream().map(CompletedProcedureRetainer::getProcedure) - .forEach(procedureList::add); + .forEach(procedureList::add); return procedureList; } // ========================================================================== - // Listeners helpers + // Listeners helpers // ========================================================================== public void registerListener(ProcedureExecutorListener listener) { this.listeners.add(listener); @@ -1276,7 +1259,7 @@ public boolean unregisterListener(ProcedureExecutorListener listener) { private void sendProcedureLoadedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureLoaded(procId); } catch (Throwable e) { @@ -1288,7 +1271,7 @@ private void sendProcedureLoadedNotification(final long procId) { private void sendProcedureAddedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureAdded(procId); } catch (Throwable e) { @@ -1300,7 +1283,7 @@ private void sendProcedureAddedNotification(final long procId) { private void sendProcedureFinishedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureFinished(procId); } catch (Throwable e) { @@ -1311,7 +1294,7 @@ private void sendProcedureFinishedNotification(final long procId) { } // ========================================================================== - // Procedure IDs helpers + // Procedure IDs helpers // ========================================================================== private long nextProcId() { long procId = lastProcId.incrementAndGet(); @@ -1343,7 +1326,7 @@ Long getRootProcedureId(Procedure proc) { } // ========================================================================== - // Executions + // Executions // ========================================================================== private void executeProcedure(Procedure proc) { if (proc.isFinished()) { @@ -1579,9 +1562,8 @@ private void cleanupAfterRollbackOneStep(Procedure proc) { } /** - * Execute the rollback of the procedure step. - * It updates the store with the new state (stack index) - * or will remove completly the procedure in case it is a child. + * Execute the rollback of the procedure step. It updates the store with the new state (stack + * index) or will remove completly the procedure in case it is a child. */ private LockState executeRollback(Procedure proc) { try { @@ -1619,36 +1601,38 @@ private void yieldProcedure(Procedure proc) { /** * Executes procedure *

    - *
  • Calls the doExecute() of the procedure - *
  • If the procedure execution didn't fail (i.e. valid user input) - *
      - *
    • ...and returned subprocedures - *
      • The subprocedures are initialized. - *
      • The subprocedures are added to the store - *
      • The subprocedures are added to the runnable queue - *
      • The procedure is now in a WAITING state, waiting for the subprocedures to complete - *
      - *
    • - *
    • ...if there are no subprocedure - *
      • the procedure completed successfully - *
      • if there is a parent (WAITING) - *
      • the parent state will be set to RUNNABLE - *
      - *
    • - *
    - *
  • - *
  • In case of failure - *
      - *
    • The store is updated with the new state
    • - *
    • The executor (caller of this method) will start the rollback of the procedure
    • - *
    - *
  • - *
+ *
  • Calls the doExecute() of the procedure + *
  • If the procedure execution didn't fail (i.e. valid user input) + *
      + *
    • ...and returned subprocedures + *
        + *
      • The subprocedures are initialized. + *
      • The subprocedures are added to the store + *
      • The subprocedures are added to the runnable queue + *
      • The procedure is now in a WAITING state, waiting for the subprocedures to complete + *
      + *
    • + *
    • ...if there are no subprocedure + *
        + *
      • the procedure completed successfully + *
      • if there is a parent (WAITING) + *
      • the parent state will be set to RUNNABLE + *
      + *
    • + *
    + *
  • + *
  • In case of failure + *
      + *
    • The store is updated with the new state
    • + *
    • The executor (caller of this method) will start the rollback of the procedure
    • + *
    + *
  • + * */ private void execProcedure(RootProcedureState procStack, Procedure procedure) { Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE, - "NOT RUNNABLE! " + procedure.toString()); + "NOT RUNNABLE! " + procedure.toString()); // Procedures can suspend themselves. They skip out by throwing a ProcedureSuspendedException. // The exception is caught below and then we hurry to the exit without disturbing state. The @@ -1699,10 +1683,9 @@ private void execProcedure(RootProcedureState procStack, // Yield the current procedure, and make the subprocedure runnable // subprocs may come back 'null'. subprocs = initializeChildren(procStack, procedure, subprocs); - LOG.info("Initialized subprocedures=" + - (subprocs == null? null: - Stream.of(subprocs).map(e -> "{" + e.toString() + "}"). - collect(Collectors.toList()).toString())); + LOG.info("Initialized subprocedures=" + (subprocs == null ? null + : Stream.of(subprocs).map(e -> "{" + e.toString() + "}") + .collect(Collectors.toList()).toString())); } } else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) { LOG.trace("Added to timeoutExecutor {}", procedure); @@ -1718,8 +1701,8 @@ private void execProcedure(RootProcedureState procStack, // allows to kill the executor before something is stored to the wal. // useful to test the procedure recovery. - if (testing != null && - testing.shouldKillBeforeStoreUpdate(suspended, procedure.hasParent())) { + if (testing != null + && testing.shouldKillBeforeStoreUpdate(suspended, procedure.hasParent())) { kill("TESTING: Kill BEFORE store update: " + procedure); } @@ -1740,8 +1723,8 @@ private void execProcedure(RootProcedureState procStack, return; } // if the procedure is kind enough to pass the slot to someone else, yield - if (procedure.isRunnable() && !suspended && - procedure.isYieldAfterExecutionStep(getEnvironment())) { + if (procedure.isRunnable() && !suspended + && procedure.isYieldAfterExecutionStep(getEnvironment())) { yieldProcedure(procedure); return; } @@ -1787,8 +1770,8 @@ private Procedure[] initializeChildren(RootProcedureState subproc = subprocs[i]; if (subproc == null) { String msg = "subproc[" + i + "] is null, aborting the procedure"; - procedure.setFailure(new RemoteProcedureException(msg, - new IllegalArgumentIOException(msg))); + procedure + .setFailure(new RemoteProcedureException(msg, new IllegalArgumentIOException(msg))); return null; } @@ -1839,8 +1822,8 @@ private void countDownChildren(RootProcedureState procStack, // children have completed, move parent to front of the queue. store.update(parent); scheduler.addFront(parent); - LOG.info("Finished subprocedure pid={}, resume processing ppid={}", - procedure.getProcId(), parent.getProcId()); + LOG.info("Finished subprocedure pid={}, resume processing ppid={}", procedure.getProcId(), + parent.getProcId()); return; } } @@ -1883,10 +1866,10 @@ private void handleInterruptedException(Procedure proc, Interrupte private void execCompletionCleanup(Procedure proc) { final TEnvironment env = getEnvironment(); if (proc.hasLock()) { - LOG.warn("Usually this should not happen, we will release the lock before if the procedure" + - " is finished, even if the holdLock is true, arrive here means we have some holes where" + - " we do not release the lock. And the releaseLock below may fail since the procedure may" + - " have already been deleted from the procedure store."); + LOG.warn("Usually this should not happen, we will release the lock before if the procedure" + + " is finished, even if the holdLock is true, arrive here means we have some holes where" + + " we do not release the lock. And the releaseLock below may fail since the procedure may" + + " have already been deleted from the procedure store."); releaseLock(proc, true); } try { @@ -1941,7 +1924,7 @@ public IdLock getProcExecutionLock() { } // ========================================================================== - // Worker Thread + // Worker Thread // ========================================================================== private class WorkerThread extends StoppableThread { private final AtomicLong executionStartTime = new AtomicLong(Long.MAX_VALUE); @@ -1960,6 +1943,7 @@ protected WorkerThread(ThreadGroup group, String prefix) { public void sendStopSignal() { scheduler.signalAll(); } + @Override public void run() { long lastUpdate = EnvironmentEdgeManager.currentTime(); @@ -1986,8 +1970,8 @@ public void run() { procExecutionLock.releaseLockEntry(lockEntry); activeCount = activeExecutorCount.decrementAndGet(); runningCount = store.setRunningProcedureCount(activeCount); - LOG.trace("Halt pid={} runningCount={}, activeCount={}", proc.getProcId(), - runningCount, activeCount); + LOG.trace("Halt pid={} runningCount={}, activeCount={}", proc.getProcId(), runningCount, + activeCount); this.activeProcedure = null; lastUpdate = EnvironmentEdgeManager.currentTime(); executionStartTime.set(Long.MAX_VALUE); @@ -2004,7 +1988,7 @@ public void run() { @Override public String toString() { Procedure p = this.activeProcedure; - return getName() + "(pid=" + (p == null? Procedure.NO_PROC_ID: p.getProcId() + ")"); + return getName() + "(pid=" + (p == null ? Procedure.NO_PROC_ID : p.getProcId() + ")"); } /** @@ -2107,12 +2091,11 @@ private void checkThreadCount(final int stuckCount) { } private void refreshConfig() { - addWorkerStuckPercentage = conf.getFloat(WORKER_ADD_STUCK_PERCENTAGE_CONF_KEY, - DEFAULT_WORKER_ADD_STUCK_PERCENTAGE); - timeoutInterval = conf.getInt(WORKER_MONITOR_INTERVAL_CONF_KEY, - DEFAULT_WORKER_MONITOR_INTERVAL); - stuckThreshold = conf.getInt(WORKER_STUCK_THRESHOLD_CONF_KEY, - DEFAULT_WORKER_STUCK_THRESHOLD); + addWorkerStuckPercentage = + conf.getFloat(WORKER_ADD_STUCK_PERCENTAGE_CONF_KEY, DEFAULT_WORKER_ADD_STUCK_PERCENTAGE); + timeoutInterval = + conf.getInt(WORKER_MONITOR_INTERVAL_CONF_KEY, DEFAULT_WORKER_MONITOR_INTERVAL); + stuckThreshold = conf.getInt(WORKER_STUCK_THRESHOLD_CONF_KEY, DEFAULT_WORKER_STUCK_THRESHOLD); } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java index cd65c1f74aed..f8232cce950e 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; @@ -23,13 +22,10 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Special procedure used as a chore. - * Instead of bringing the Chore class in (dependencies reason), - * we reuse the executor timeout thread for this special case. - * - * The assumption is that procedure is used as hook to dispatch other procedures - * or trigger some cleanups. It does not store state in the ProcedureStore. - * this is just for in-memory chore executions. + * Special procedure used as a chore. Instead of bringing the Chore class in (dependencies reason), + * we reuse the executor timeout thread for this special case. The assumption is that procedure is + * used as hook to dispatch other procedures or trigger some cleanups. It does not store state in + * the ProcedureStore. this is just for in-memory chore executions. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -56,12 +52,10 @@ protected boolean abort(final TEnvironment env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java index 48413928e5b7..f86a2b2d00a5 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.hadoop.hbase.metrics.Counter; @@ -26,12 +25,11 @@ * With this interface, the procedure framework provides means to collect following set of metrics * per procedure type for all procedures: *
      - *
    • Count of submitted procedure instances
    • - *
    • Time histogram for successfully completed procedure instances
    • - *
    • Count of failed procedure instances
    • - *
    - * - * Please implement this interface to return appropriate metrics. + *
  • Count of submitted procedure instances
  • + *
  • Time histogram for successfully completed procedure instances
  • + *
  • Count of failed procedure instances
  • + * + * Please implement this interface to return appropriate metrics. */ @InterfaceAudience.Private public interface ProcedureMetrics { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java index 72b2b284ca19..53a3714f268e 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,8 +38,8 @@ public interface ProcedureScheduler { void stop(); /** - * In case the class is blocking on poll() waiting for items to be added, - * this method should awake poll() and poll() should return. + * In case the class is blocking on poll() waiting for items to be added, this method should awake + * poll() and poll() should return. */ void signalAll(); @@ -75,15 +75,14 @@ public interface ProcedureScheduler { void addBack(Procedure proc, boolean notify); /** - * The procedure can't run at the moment. - * add it back to the queue, giving priority to someone else. + * The procedure can't run at the moment. add it back to the queue, giving priority to someone + * else. * @param proc the Procedure to add back to the list */ void yield(Procedure proc); /** - * The procedure in execution completed. - * This can be implemented to perform cleanups. + * The procedure in execution completed. This can be implemented to perform cleanups. * @param proc the Procedure that completed the execution. */ void completionCleanup(Procedure proc); @@ -126,9 +125,9 @@ public interface ProcedureScheduler { int size(); /** - * Clear current state of scheduler such that it is equivalent to newly created scheduler. - * Used for testing failure and recovery. To emulate server crash/restart, - * {@link ProcedureExecutor} resets its own state and calls clear() on scheduler. + * Clear current state of scheduler such that it is equivalent to newly created scheduler. Used + * for testing failure and recovery. To emulate server crash/restart, {@link ProcedureExecutor} + * resets its own state and calls clear() on scheduler. */ void clear(); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java index 216022f1c798..fc4eb1532ee4 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.Message; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java index 9f521214f075..95fafae72665 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java index c557c2021b40..ebf7c922bde4 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,10 +46,11 @@ */ @InterfaceAudience.Private public final class ProcedureUtil { - private ProcedureUtil() { } + private ProcedureUtil() { + } // ========================================================================== - // Reflection helpers to create/validate a Procedure object + // Reflection helpers to create/validate a Procedure object // ========================================================================== private static Procedure newProcedure(String className) throws BadProcedureException { try { @@ -67,8 +68,8 @@ private static Procedure newProcedure(String className) throws BadProcedureEx return ctor.newInstance(); } catch (Exception e) { throw new BadProcedureException( - "The procedure class " + className + " must be accessible and have an empty constructor", - e); + "The procedure class " + className + " must be accessible and have an empty constructor", + e); } } @@ -85,18 +86,18 @@ static void validateClass(Procedure proc) throws BadProcedureException { throw new Exception("the " + clazz + " constructor is not public"); } } catch (Exception e) { - throw new BadProcedureException("The procedure class " + proc.getClass().getName() + - " must be accessible and have an empty constructor", e); + throw new BadProcedureException("The procedure class " + proc.getClass().getName() + + " must be accessible and have an empty constructor", e); } } // ========================================================================== - // convert to and from Procedure object + // convert to and from Procedure object // ========================================================================== /** - * A serializer for our Procedures. Instead of the previous serializer, it - * uses the stateMessage list to store the internal state of the Procedures. + * A serializer for our Procedures. Instead of the previous serializer, it uses the stateMessage + * list to store the internal state of the Procedures. */ private static class StateSerializer implements ProcedureStateSerializer { private final ProcedureProtos.Procedure.Builder builder; @@ -113,8 +114,7 @@ public void serialize(Message message) throws IOException { } @Override - public M deserialize(Class clazz) - throws IOException { + public M deserialize(Class clazz) throws IOException { if (deserializeIndex >= builder.getStateMessageCount()) { throw new IOException("Invalid state message index: " + deserializeIndex); } @@ -129,8 +129,8 @@ public M deserialize(Class clazz) } /** - * A serializer (deserializer) for those Procedures which were serialized - * before this patch. It deserializes the old, binary stateData field. + * A serializer (deserializer) for those Procedures which were serialized before this patch. It + * deserializes the old, binary stateData field. */ private static class CompatStateSerializer implements ProcedureStateSerializer { private InputStream inputStream; @@ -146,8 +146,7 @@ public void serialize(Message message) throws IOException { @SuppressWarnings("unchecked") @Override - public M deserialize(Class clazz) - throws IOException { + public M deserialize(Class clazz) throws IOException { Parser parser = (Parser) Internal.getDefaultInstance(clazz).getParserForType(); try { return parser.parseDelimitedFrom(inputStream); @@ -167,12 +166,10 @@ public static ProcedureProtos.Procedure convertToProtoProcedure(Procedure pro Preconditions.checkArgument(proc != null); validateClass(proc); - final ProcedureProtos.Procedure.Builder builder = ProcedureProtos.Procedure.newBuilder() - .setClassName(proc.getClass().getName()) - .setProcId(proc.getProcId()) - .setState(proc.getState()) - .setSubmittedTime(proc.getSubmittedTime()) - .setLastUpdate(proc.getLastUpdate()); + final ProcedureProtos.Procedure.Builder builder = + ProcedureProtos.Procedure.newBuilder().setClassName(proc.getClass().getName()) + .setProcId(proc.getProcId()).setState(proc.getState()) + .setSubmittedTime(proc.getSubmittedTime()).setLastUpdate(proc.getLastUpdate()); if (proc.hasParent()) { builder.setParentId(proc.getParentProcId()); @@ -259,9 +256,9 @@ public static Procedure convertToProcedure(ProcedureProtos.Procedure proto) } if (proto.hasException()) { - assert proc.getState() == ProcedureProtos.ProcedureState.FAILED || - proc.getState() == ProcedureProtos.ProcedureState.ROLLEDBACK : - "The procedure must be failed (waiting to rollback) or rolledback"; + assert proc.getState() == ProcedureProtos.ProcedureState.FAILED + || proc.getState() == ProcedureProtos.ProcedureState.ROLLEDBACK + : "The procedure must be failed (waiting to rollback) or rolledback"; proc.setFailure(RemoteProcedureException.fromProto(proto.getException())); } @@ -298,11 +295,11 @@ public static Procedure convertToProcedure(ProcedureProtos.Procedure proto) } // ========================================================================== - // convert from LockedResource object + // convert from LockedResource object // ========================================================================== - public static LockServiceProtos.LockedResourceType convertToProtoResourceType( - LockedResourceType resourceType) { + public static LockServiceProtos.LockedResourceType + convertToProtoResourceType(LockedResourceType resourceType) { return LockServiceProtos.LockedResourceType.valueOf(resourceType.name()); } @@ -310,13 +307,12 @@ public static LockServiceProtos.LockType convertToProtoLockType(LockType lockTyp return LockServiceProtos.LockType.valueOf(lockType.name()); } - public static LockServiceProtos.LockedResource convertToProtoLockedResource( - LockedResource lockedResource) throws IOException { + public static LockServiceProtos.LockedResource + convertToProtoLockedResource(LockedResource lockedResource) throws IOException { LockServiceProtos.LockedResource.Builder builder = LockServiceProtos.LockedResource.newBuilder(); - builder - .setResourceType(convertToProtoResourceType(lockedResource.getResourceType())) + builder.setResourceType(convertToProtoResourceType(lockedResource.getResourceType())) .setResourceName(lockedResource.getResourceName()) .setLockType(convertToProtoLockType(lockedResource.getLockType())); @@ -331,8 +327,7 @@ public static LockServiceProtos.LockedResource convertToProtoLockedResource( builder.setSharedLockCount(lockedResource.getSharedLockCount()); for (Procedure waitingProcedure : lockedResource.getWaitingProcedures()) { - ProcedureProtos.Procedure waitingProcedureProto = - convertToProtoProcedure(waitingProcedure); + ProcedureProtos.Procedure waitingProcedureProto = convertToProtoProcedure(waitingProcedure); builder.addWaitingProcedures(waitingProcedureProto); } @@ -340,17 +335,17 @@ public static LockServiceProtos.LockedResource convertToProtoLockedResource( } public static final String PROCEDURE_RETRY_SLEEP_INTERVAL_MS = - "hbase.procedure.retry.sleep.interval.ms"; + "hbase.procedure.retry.sleep.interval.ms"; // default to 1 second public static final long DEFAULT_PROCEDURE_RETRY_SLEEP_INTERVAL_MS = 1000; public static final String PROCEDURE_RETRY_MAX_SLEEP_TIME_MS = - "hbase.procedure.retry.max.sleep.time.ms"; + "hbase.procedure.retry.max.sleep.time.ms"; // default to 10 minutes public static final long DEFAULT_PROCEDURE_RETRY_MAX_SLEEP_TIME_MS = - TimeUnit.MINUTES.toMillis(10); + TimeUnit.MINUTES.toMillis(10); /** * Get a retry counter for getting the backoff time. We will use the @@ -363,11 +358,11 @@ public static LockServiceProtos.LockedResource convertToProtoLockedResource( */ public static RetryCounter createRetryCounter(Configuration conf) { long sleepIntervalMs = - conf.getLong(PROCEDURE_RETRY_SLEEP_INTERVAL_MS, DEFAULT_PROCEDURE_RETRY_SLEEP_INTERVAL_MS); + conf.getLong(PROCEDURE_RETRY_SLEEP_INTERVAL_MS, DEFAULT_PROCEDURE_RETRY_SLEEP_INTERVAL_MS); long maxSleepTimeMs = - conf.getLong(PROCEDURE_RETRY_MAX_SLEEP_TIME_MS, DEFAULT_PROCEDURE_RETRY_MAX_SLEEP_TIME_MS); + conf.getLong(PROCEDURE_RETRY_MAX_SLEEP_TIME_MS, DEFAULT_PROCEDURE_RETRY_MAX_SLEEP_TIME_MS); RetryConfig retryConfig = new RetryConfig().setSleepInterval(sleepIntervalMs) - .setMaxSleepTime(maxSleepTimeMs).setBackoffPolicy(new ExponentialBackoffPolicyWithLimit()); + .setMaxSleepTime(maxSleepTimeMs).setBackoffPolicy(new ExponentialBackoffPolicyWithLimit()); return new RetryCounter(retryConfig); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java index dbb998132be5..fc564711e883 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index 03702e6f64bb..2ed563aeffb9 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; @@ -36,22 +35,23 @@ import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * A procedure dispatcher that aggregates and sends after elapsed time or after we hit - * count threshold. Creates its own threadpool to run RPCs with timeout. + * A procedure dispatcher that aggregates and sends after elapsed time or after we hit count + * threshold. Creates its own threadpool to run RPCs with timeout. *
      *
    • Each server queue has a dispatch buffer
    • - *
    • Once the dispatch buffer reaches a threshold-size/time we send
    • + *
    • Once the dispatch buffer reaches a threshold-size/time we send + *
    • *
    - *

    Call {@link #start()} and then {@link #submitTask(Runnable)}. When done, - * call {@link #stop()}. + *

    + * Call {@link #start()} and then {@link #submitTask(Runnable)}. When done, call {@link #stop()}. */ @InterfaceAudience.Private public abstract class RemoteProcedureDispatcher> { @@ -92,8 +92,9 @@ public boolean start() { return false; } - LOG.info("Instantiated, coreThreads={} (allowCoreThreadTimeOut=true), queueMaxSize={}, " + - "operationDelay={}", this.corePoolSize, this.queueMaxSize, this.operationDelay); + LOG.info("Instantiated, coreThreads={} (allowCoreThreadTimeOut=true), queueMaxSize={}, " + + "operationDelay={}", + this.corePoolSize, this.queueMaxSize, this.operationDelay); // Create the timeout executor timeoutExecutor = new TimeoutExecutorThread(); @@ -102,7 +103,7 @@ public boolean start() { // Create the thread pool that will execute RPCs threadPool = Threads.getBoundedCachedThreadPool(corePoolSize, 60L, TimeUnit.SECONDS, new ThreadFactoryBuilder().setNameFormat(this.getClass().getSimpleName() + "-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(getUncaughtExceptionHandler()).build()); + .setDaemon(true).setUncaughtExceptionHandler(getUncaughtExceptionHandler()).build()); return true; } @@ -144,14 +145,14 @@ public void join() { protected abstract UncaughtExceptionHandler getUncaughtExceptionHandler(); // ============================================================================================ - // Node Helpers + // Node Helpers // ============================================================================================ /** * Add a node that will be able to execute remote procedures * @param key the node identifier */ public void addNode(final TRemote key) { - assert key != null: "Tried to add a node with a null key"; + assert key != null : "Tried to add a node with a null key"; nodeMap.computeIfAbsent(key, k -> new BufferNode(k)); } @@ -160,8 +161,7 @@ public void addNode(final TRemote key) { * @param key the node identifier */ public void addOperationToNode(final TRemote key, RemoteProcedure rp) - throws NullTargetServerDispatchException, NoServerDispatchException, - NoNodeDispatchException { + throws NullTargetServerDispatchException, NoServerDispatchException, NoNodeDispatchException { if (key == null) { throw new NullTargetServerDispatchException(rp.toString()); } @@ -203,7 +203,7 @@ public boolean removeNode(final TRemote key) { } // ============================================================================================ - // Task Helpers + // Task Helpers // ============================================================================================ protected final void submitTask(Runnable task) { threadPool.execute(task); @@ -214,6 +214,7 @@ protected final void submitTask(Runnable task, long delay, TimeUnit unit) { } protected abstract void remoteDispatch(TRemote key, Set operations); + protected abstract void abortPendingOperations(TRemote key, Set operations); /** @@ -236,11 +237,11 @@ public RemoteProcedure getRemoteProcedure() { */ public interface RemoteProcedure { /** - * For building the remote operation. - * May be empty if no need to send remote call. Usually, this means the RemoteProcedure has been - * finished already. This is possible, as we may have already sent the procedure to RS but then - * the rpc connection is broken so the executeProcedures call fails, but the RS does receive the - * procedure and execute it and then report back, before we retry again. + * For building the remote operation. May be empty if no need to send remote call. Usually, this + * means the RemoteProcedure has been finished already. This is possible, as we may have already + * sent the procedure to RS but then the rpc connection is broken so the executeProcedures call + * fails, but the RS does receive the procedure and execute it and then report back, before we + * retry again. */ Optional remoteCallBuild(TEnv env, TRemote remote); @@ -262,9 +263,8 @@ public interface RemoteProcedure { void remoteOperationFailed(TEnv env, RemoteProcedureException error); /** - * Whether store this remote procedure in dispatched queue - * only OpenRegionProcedure and CloseRegionProcedure return false since they are - * not fully controlled by dispatcher + * Whether store this remote procedure in dispatched queue only OpenRegionProcedure and + * CloseRegionProcedure return false since they are not fully controlled by dispatcher */ default boolean storeInDispatchedQueue() { return true; @@ -294,11 +294,11 @@ protected ArrayListMultimap, RemoteOperation> buildAndGroupRequestByTyp protected List fetchType( final ArrayListMultimap, RemoteOperation> requestByType, final Class type) { - return (List)requestByType.removeAll(type); + return (List) requestByType.removeAll(type); } // ============================================================================================ - // Timeout Helpers + // Timeout Helpers // ============================================================================================ private final class TimeoutExecutorThread extends Thread { private final DelayQueue queue = new DelayQueue(); @@ -310,13 +310,14 @@ public TimeoutExecutorThread() { @Override public void run() { while (running.get()) { - final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue, - 20, TimeUnit.SECONDS); + final DelayedWithTimeout task = + DelayedUtil.takeWithoutInterrupt(queue, 20, TimeUnit.SECONDS); if (task == null || task == DelayedUtil.DELAYED_POISON) { if (task == null && queue.size() > 0) { LOG.error("DelayQueue for RemoteProcedureDispatcher is not empty when timed waiting" - + " elapsed. If this is repeated consistently, it means no element is getting expired" - + " from the queue and it might freeze the system. Queue: {}", queue); + + " elapsed. If this is repeated consistently, it means no element is getting expired" + + " from the queue and it might freeze the system. Queue: {}", + queue); } // the executor may be shutting down, and the task is just the shutdown request continue; @@ -348,8 +349,8 @@ public void awaitTermination() { sendStopSignal(); join(250); if (i > 0 && (i % 8) == 0) { - LOG.warn("Waiting termination of thread " + getName() + ", " + - StringUtils.humanTimeDiff(EnvironmentEdgeManager.currentTime() - startTime)); + LOG.warn("Waiting termination of thread " + getName() + ", " + + StringUtils.humanTimeDiff(EnvironmentEdgeManager.currentTime() - startTime)); } } } catch (InterruptedException e) { @@ -359,7 +360,7 @@ public void awaitTermination() { } // ============================================================================================ - // Internals Helpers + // Internals Helpers // ============================================================================================ /** @@ -412,7 +413,7 @@ public synchronized void abortOperationsInQueue() { this.dispatchedOperations.clear(); } - public synchronized void operationCompleted(final RemoteProcedure remoteProcedure){ + public synchronized void operationCompleted(final RemoteProcedure remoteProcedure) { this.dispatchedOperations.remove(remoteProcedure); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java index 91ad920f27f8..7d9dd6deb4c7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2; import java.io.IOException; - import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; @@ -32,23 +31,23 @@ * RemoteProcedureExceptions are sent to 'remote' peers to signal an abort in the face of failures. * When serialized for transmission we encode using Protobufs to ensure version compatibility. *

    - * RemoteProcedureException exceptions contain a Throwable as its cause. - * This can be a "regular" exception generated locally or a ProxyThrowable that is a representation - * of the original exception created on original 'remote' source. These ProxyThrowables have their - * their stacks traces and messages overridden to reflect the original 'remote' exception. + * RemoteProcedureException exceptions contain a Throwable as its cause. This can be a "regular" + * exception generated locally or a ProxyThrowable that is a representation of the original + * exception created on original 'remote' source. These ProxyThrowables have their their stacks + * traces and messages overridden to reflect the original 'remote' exception. */ @InterfaceAudience.Private @InterfaceStability.Evolving @SuppressWarnings("serial") public class RemoteProcedureException extends ProcedureException { /** - * Name of the throwable's source such as a host or thread name. Must be non-null. + * Name of the throwable's source such as a host or thread name. Must be non-null. */ private final String source; /** - * Create a new RemoteProcedureException that can be serialized. - * It is assumed that this came form a local source. + * Create a new RemoteProcedureException that can be serialized. It is assumed that this came form + * a local source. * @param source the host or thread name of the source * @param cause the actual cause of the exception */ @@ -66,10 +65,10 @@ public String getSource() { public Exception unwrapRemoteException() { final Throwable cause = getCause(); if (cause instanceof RemoteException) { - return ((RemoteException)cause).unwrapRemoteException(); + return ((RemoteException) cause).unwrapRemoteException(); } if (cause instanceof Exception) { - return (Exception)cause; + return (Exception) cause; } return new Exception(cause); } @@ -81,7 +80,7 @@ public Exception unwrapRemoteException() { public IOException unwrapRemoteIOException() { final Exception cause = unwrapRemoteException(); if (cause instanceof IOException) { - return (IOException)cause; + return (IOException) cause; } return new IOException(cause); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java index 440f9e7d6ec1..9990bdeb4306 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,15 +29,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; /** - * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure". - * A "Root Procedure" is a Procedure without parent, each subprocedure will be - * added to the "Root Procedure" stack (or rollback-stack). - * - * RootProcedureState is used and managed only by the ProcedureExecutor. - * Long rootProcId = getRootProcedureId(proc); - * rollbackStack.get(rootProcId).acquire(proc) - * rollbackStack.get(rootProcId).release(proc) - * ... + * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure". A "Root + * Procedure" is a Procedure without parent, each subprocedure will be added to the "Root Procedure" + * stack (or rollback-stack). RootProcedureState is used and managed only by the ProcedureExecutor. + * Long rootProcId = getRootProcedureId(proc); rollbackStack.get(rootProcId).acquire(proc) + * rollbackStack.get(rootProcId).release(proc) ... */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -45,9 +41,9 @@ class RootProcedureState { private static final Logger LOG = LoggerFactory.getLogger(RootProcedureState.class); private enum State { - RUNNING, // The Procedure is running or ready to run - FAILED, // The Procedure failed, waiting for the rollback executing - ROLLINGBACK, // The Procedure failed and the execution was rolledback + RUNNING, // The Procedure is running or ready to run + FAILED, // The Procedure failed, waiting for the rollback executing + ROLLINGBACK, // The Procedure failed and the execution was rolledback } private Set> subprocs = null; @@ -102,7 +98,7 @@ protected synchronized List> getSubproceduresStack() { protected synchronized RemoteProcedureException getException() { if (subprocStack != null) { - for (Procedure proc: subprocStack) { + for (Procedure proc : subprocStack) { if (proc.hasException()) { return proc.getException(); } @@ -137,8 +133,8 @@ protected synchronized void abort() { } /** - * Called by the ProcedureExecutor after the procedure step is completed, - * to add the step to the rollback list (or procedure stack) + * Called by the ProcedureExecutor after the procedure step is completed, to add the step to the + * rollback list (or procedure stack) */ protected synchronized void addRollbackStep(Procedure proc) { if (proc.isFailed()) { @@ -163,11 +159,10 @@ protected synchronized void addSubProcedure(Procedure proc) { } /** - * Called on store load by the ProcedureExecutor to load part of the stack. - * - * Each procedure has its own stack-positions. Which means we have to write - * to the store only the Procedure we executed, and nothing else. - * on load we recreate the full stack by aggregating each procedure stack-positions. + * Called on store load by the ProcedureExecutor to load part of the stack. Each procedure has its + * own stack-positions. Which means we have to write to the store only the Procedure we executed, + * and nothing else. on load we recreate the full stack by aggregating each procedure + * stack-positions. */ protected synchronized void loadStack(Procedure proc) { addSubProcedure(proc); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java index 20abf651e306..d40986b1e12b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,24 +15,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.SequentialProcedureData; /** * A SequentialProcedure describes one step in a procedure chain: + * *

      *   -> Step 1 -> Step 2 -> Step 3
      * 
    - * The main difference from a base Procedure is that the execute() of a - * SequentialProcedure will be called only once; there will be no second - * execute() call once the children are finished. which means once the child - * of a SequentialProcedure are completed the SequentialProcedure is completed too. + * + * The main difference from a base Procedure is that the execute() of a SequentialProcedure will be + * called only once; there will be no second execute() call once the children are finished. which + * means once the child of a SequentialProcedure are completed the SequentialProcedure is completed + * too. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -53,8 +54,7 @@ protected Procedure[] doExecute(final TEnvironment env) } @Override - protected void doRollback(final TEnvironment env) - throws IOException, InterruptedException { + protected void doRollback(final TEnvironment env) throws IOException, InterruptedException { updateTimestamp(); if (executed) { try { @@ -67,16 +67,14 @@ protected void doRollback(final TEnvironment env) } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { SequentialProcedureData.Builder data = SequentialProcedureData.newBuilder(); data.setExecuted(executed); serializer.serialize(data.build()); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { SequentialProcedureData data = serializer.deserialize(SequentialProcedureData.class); executed = data.getExecuted(); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java index 2b043d472d0e..f2b4d4820da7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.util.Collections; @@ -80,8 +79,7 @@ public List getLocks() { } @Override - public LockedResource getLockResource(LockedResourceType resourceType, - String resourceName) { + public LockedResource getLockResource(LockedResourceType resourceType, String resourceName) { return null; } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java index d1af4969141a..5503eaa2f254 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,19 +31,17 @@ /** * Procedure described by a series of steps. - * - *

    The procedure implementor must have an enum of 'states', describing - * the various step of the procedure. - * Once the procedure is running, the procedure-framework will call executeFromState() - * using the 'state' provided by the user. The first call to executeFromState() - * will be performed with 'state = null'. The implementor can jump between - * states using setNextState(MyStateEnum.ordinal()). - * The rollback will call rollbackState() for each state that was executed, in reverse order. + *

    + * The procedure implementor must have an enum of 'states', describing the various step of the + * procedure. Once the procedure is running, the procedure-framework will call executeFromState() + * using the 'state' provided by the user. The first call to executeFromState() will be performed + * with 'state = null'. The implementor can jump between states using + * setNextState(MyStateEnum.ordinal()). The rollback will call rollbackState() for each state that + * was executed, in reverse order. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public abstract class StateMachineProcedure - extends Procedure { +public abstract class StateMachineProcedure extends Procedure { private static final Logger LOG = LoggerFactory.getLogger(StateMachineProcedure.class); private static final int EOF_STATE = Integer.MIN_VALUE; @@ -71,18 +69,17 @@ protected final int getCycles() { private int previousState; public enum Flow { - HAS_MORE_STATE, - NO_MORE_STATE, + HAS_MORE_STATE, NO_MORE_STATE, } /** * called to perform a single step of the specified 'state' of the procedure * @param state state to execute - * @return Flow.NO_MORE_STATE if the procedure is completed, - * Flow.HAS_MORE_STATE if there is another step. + * @return Flow.NO_MORE_STATE if the procedure is completed, Flow.HAS_MORE_STATE if there is + * another step. */ protected abstract Flow executeFromState(TEnvironment env, TState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException; + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException; /** * called to perform the rollback of the specified state @@ -90,7 +87,7 @@ protected abstract Flow executeFromState(TEnvironment env, TState state) * @throws IOException temporary failure, the rollback will retry later */ protected abstract void rollbackState(TEnvironment env, TState state) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** * Convert an ordinal (or state id) to an Enum (or more descriptive) state object. @@ -122,9 +119,9 @@ protected void setNextState(final TState state) { } /** - * By default, the executor will try ro run all the steps of the procedure start to finish. - * Return true to make the executor yield between execution steps to - * give other procedures time to run their steps. + * By default, the executor will try ro run all the steps of the procedure start to finish. Return + * true to make the executor yield between execution steps to give other procedures time to run + * their steps. * @param state the state we are going to execute next. * @return Return true if the executor should yield before the execution of the specified step. * Defaults to return false. @@ -137,8 +134,8 @@ protected boolean isYieldBeforeExecuteFromState(TEnvironment env, TState state) * Add a child procedure to execute * @param subProcedure the child procedure */ - protected > void addChildProcedure( - @SuppressWarnings("unchecked") T... subProcedure) { + protected > void + addChildProcedure(@SuppressWarnings("unchecked") T... subProcedure) { if (subProcedure == null) { return; } @@ -161,7 +158,7 @@ protected > void addChildProcedure( @Override protected Procedure[] execute(final TEnvironment env) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { updateTimestamp(); try { failIfAborted(); @@ -176,7 +173,7 @@ protected Procedure[] execute(final TEnvironment env) } if (LOG.isTraceEnabled()) { - LOG.trace(state + " " + this + "; cycles=" + this.cycles); + LOG.trace(state + " " + this + "; cycles=" + this.cycles); } // Keep running count of cycles if (getStateId(state) != this.previousState) { @@ -197,15 +194,14 @@ protected Procedure[] execute(final TEnvironment env) subProcList = null; return subProcedures; } - return (isWaiting() || isFailed() || !hasMoreState()) ? null : new Procedure[] {this}; + return (isWaiting() || isFailed() || !hasMoreState()) ? null : new Procedure[] { this }; } finally { updateTimestamp(); } } @Override - protected void rollback(final TEnvironment env) - throws IOException, InterruptedException { + protected void rollback(final TEnvironment env) throws IOException, InterruptedException { if (isEofState()) { stateCount--; } @@ -220,7 +216,7 @@ protected void rollback(final TEnvironment env) } protected boolean isEofState() { - return stateCount > 0 && states[stateCount-1] == EOF_STATE; + return stateCount > 0 && states[stateCount - 1] == EOF_STATE; } @Override @@ -253,8 +249,8 @@ protected final void failIfAborted() { } /** - * Used by the default implementation of abort() to know if the current state can be aborted - * and rollback can be triggered. + * Used by the default implementation of abort() to know if the current state can be aborted and + * rollback can be triggered. */ protected boolean isRollbackSupported(final TState state) { return false; @@ -270,7 +266,7 @@ private boolean hasMoreState() { } protected TState getCurrentState() { - return stateCount > 0 ? getState(states[stateCount-1]) : getInitialState(); + return stateCount > 0 ? getState(states[stateCount - 1]) : getInitialState(); } /** @@ -307,8 +303,7 @@ protected void toStringState(StringBuilder builder) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { StateMachineProcedureData.Builder data = StateMachineProcedureData.newBuilder(); for (int i = 0; i < stateCount; ++i) { data.addState(states[i]); @@ -317,8 +312,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { StateMachineProcedureData data = serializer.deserialize(StateMachineProcedureData.class); stateCount = data.getStateCount(); if (stateCount > 0) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java index b58b571a9345..4d0d8941dedf 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java index fc917b6f36ed..a95fba7160ad 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ class TimeoutExecutorThread extends StoppableThread { private final DelayQueue queue = new DelayQueue<>(); public TimeoutExecutorThread(ProcedureExecutor executor, ThreadGroup group, - String name) { + String name) { super(group, name); setDaemon(true); this.executor = executor; @@ -53,8 +53,7 @@ public void sendStopSignal() { @Override public void run() { while (executor.isRunning()) { - final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue, 20, - TimeUnit.SECONDS); + final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue, 20, TimeUnit.SECONDS); if (task == null || task == DelayedUtil.DELAYED_POISON) { // the executor may be shutting down, // and the task is just the shutdown request diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java index b8ddad21866e..7a15ebfc494c 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java index aba71b95d6da..ec0915063185 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java index 7a9ea1b0d314..9d668dc1688b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java index 8fbc1473ed7e..8a4dd403cd20 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2.store; import java.io.IOException; - import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java index c1eaa73230fc..1010e0e5dc18 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2.store; import java.io.IOException; - import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -85,9 +84,8 @@ public interface ProcedureIterator { void reset(); /** - * Returns true if the iterator has more elements. - * (In other words, returns true if next() would return a Procedure - * rather than throwing an exception.) + * Returns true if the iterator has more elements. (In other words, returns true if next() would + * return a Procedure rather than throwing an exception.) * @return true if the iterator has more procedures */ boolean hasNext(); @@ -135,8 +133,8 @@ public interface ProcedureLoader { void load(ProcedureIterator procIter) throws IOException; /** - * Called by the ProcedureStore.load() in case we have procedures not-ready to be added to - * the executor, which probably means they are corrupted since some information/link is missing. + * Called by the ProcedureStore.load() in case we have procedures not-ready to be added to the + * executor, which probably means they are corrupted since some information/link is missing. * @param procIter iterator over the procedures not ready to be added to the executor, corrupted */ void handleCorrupted(ProcedureIterator procIter) throws IOException; @@ -178,8 +176,8 @@ public interface ProcedureLoader { int getNumThreads(); /** - * Set the number of procedure running. - * This can be used, for example, by the store to know how long to wait before a sync. + * Set the number of procedure running. This can be used, for example, by the store to know how + * long to wait before a sync. * @return how many procedures are running (may not be same as count). */ int setRunningProcedureCount(int count); @@ -201,54 +199,45 @@ public interface ProcedureLoader { void load(ProcedureLoader loader) throws IOException; /** - * When a procedure is submitted to the executor insert(proc, null) will be called. - * 'proc' has a 'RUNNABLE' state and the initial information required to start up. - * - * When a procedure is executed and it returns children insert(proc, subprocs) will be called. - * 'proc' has a 'WAITING' state and an update state. - * 'subprocs' are the children in 'RUNNABLE' state with the initial information. - * + * When a procedure is submitted to the executor insert(proc, null) will be called. 'proc' has a + * 'RUNNABLE' state and the initial information required to start up. When a procedure is executed + * and it returns children insert(proc, subprocs) will be called. 'proc' has a 'WAITING' state and + * an update state. 'subprocs' are the children in 'RUNNABLE' state with the initial information. * @param proc the procedure to serialize and write to the store. * @param subprocs the newly created child of the proc. */ void insert(Procedure proc, Procedure[] subprocs); /** - * Serialize a set of new procedures. - * These procedures are freshly submitted to the executor and each procedure - * has a 'RUNNABLE' state and the initial information required to start up. - * + * Serialize a set of new procedures. These procedures are freshly submitted to the executor and + * each procedure has a 'RUNNABLE' state and the initial information required to start up. * @param procs the procedures to serialize and write to the store. */ void insert(Procedure[] procs); /** - * The specified procedure was executed, - * and the new state should be written to the store. + * The specified procedure was executed, and the new state should be written to the store. * @param proc the procedure to serialize and write to the store. */ void update(Procedure proc); /** - * The specified procId was removed from the executor, - * due to completion, abort or failure. - * The store implementor should remove all the information about the specified procId. + * The specified procId was removed from the executor, due to completion, abort or failure. The + * store implementor should remove all the information about the specified procId. * @param procId the ID of the procedure to remove. */ void delete(long procId); /** - * The parent procedure completed. - * Update the state and mark all the child deleted. + * The parent procedure completed. Update the state and mark all the child deleted. * @param parentProc the parent procedure to serialize and write to the store. * @param subProcIds the IDs of the sub-procedure to remove. */ void delete(Procedure parentProc, long[] subProcIds); /** - * The specified procIds were removed from the executor, - * due to completion, abort or failure. - * The store implementor should remove all the information about the specified procIds. + * The specified procIds were removed from the executor, due to completion, abort or failure. The + * store implementor should remove all the information about the specified procIds. * @param procIds the IDs of the procedures to remove. * @param offset the array offset from where to start to delete * @param count the number of IDs to delete diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java index a5c04fab200c..be0a148d6379 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,14 +27,13 @@ @InterfaceAudience.Private public abstract class ProcedureStoreBase implements ProcedureStore { private final CopyOnWriteArrayList listeners = - new CopyOnWriteArrayList<>(); + new CopyOnWriteArrayList<>(); private final AtomicBoolean running = new AtomicBoolean(false); /** - * Change the state to 'isRunning', - * returns true if the store state was changed, - * false if the store was already in that state. + * Change the state to 'isRunning', returns true if the store state was changed, false if the + * store was already in that state. * @param isRunning the state to set. * @return true if the store state was changed, otherwise false. */ diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java index 4e615b971d8a..7fe234a0f5b7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -86,7 +86,7 @@ private ProcedureTree(Map procMap) { } checkOrphan(procMap); Comparator cmp = - (p1, p2) -> Long.compare(p1.getProto().getProcId(), p2.getProto().getProcId()); + (p1, p2) -> Long.compare(p1.getProto().getProcId(), p2.getProto().getProcId()); Collections.sort(validProcs, cmp); Collections.sort(corruptedProcs, cmp); } @@ -109,7 +109,7 @@ private List buildTree(Map procMap) { } private void collectStackId(Entry entry, Map> stackId2Proc, - MutableInt maxStackId) { + MutableInt maxStackId) { if (LOG.isDebugEnabled()) { LOG.debug("Procedure {} stack ids={}", entry, entry.proc.getStackIdList()); } @@ -124,7 +124,7 @@ private void collectStackId(Entry entry, Map> stackId2Proc, } private void addAllToCorruptedAndRemoveFromProcMap(Entry entry, - Map remainingProcMap) { + Map remainingProcMap) { corruptedProcs.add(new ProtoAndProcedure(entry.proc)); remainingProcMap.remove(entry.proc.getProcId()); for (Entry e : entry.subProcs) { @@ -167,8 +167,9 @@ private void checkReady(Entry rootEntry, Map remainingProcMap) { rootEntry); valid = false; } else if (entries.size() > 1) { - LOG.error("Multiple procedures {} have the same stack id {}, max stack id is {}," + - " root procedure is {}", entries, i, maxStackId, rootEntry); + LOG.error("Multiple procedures {} have the same stack id {}, max stack id is {}," + + " root procedure is {}", + entries, i, maxStackId, rootEntry); valid = false; } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java index 0cdc48041003..593f1f967e3f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java index 98416a527b8e..fc82cc88727b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -271,7 +271,7 @@ public void unsetPartialFlag() { */ public ProcedureProtos.ProcedureStoreTracker.TrackerNode convert() { ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builder = - ProcedureProtos.ProcedureStoreTracker.TrackerNode.newBuilder(); + ProcedureProtos.ProcedureStoreTracker.TrackerNode.newBuilder(); builder.setStartId(start); for (int i = 0; i < modified.length; ++i) { builder.addUpdated(modified[i]); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java index dc9d16c41f8e..a47b2664a9e7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java index 3436e8b76697..dc33ab8d91a7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,10 +32,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; /** - * Keeps track of live procedures. - * - * It can be used by the ProcedureStore to identify which procedures are already - * deleted/completed to avoid the deserialization step on restart + * Keeps track of live procedures. It can be used by the ProcedureStore to identify which procedures + * are already deleted/completed to avoid the deserialization step on restart * @deprecated Since 2.3.0, will be removed in 4.0.0. Keep here only for rolling upgrading, now we * use the new region based procedure store. */ @@ -48,29 +46,30 @@ class ProcedureStoreTracker { private final TreeMap map = new TreeMap<>(); /** - * If true, do not remove bits corresponding to deleted procedures. Note that this can result - * in huge bitmaps overtime. - * Currently, it's set to true only when building tracker state from logs during recovery. During - * recovery, if we are sure that a procedure has been deleted, reading its old update entries - * can be skipped. + * If true, do not remove bits corresponding to deleted procedures. Note that this can result in + * huge bitmaps overtime. Currently, it's set to true only when building tracker state from logs + * during recovery. During recovery, if we are sure that a procedure has been deleted, reading its + * old update entries can be skipped. */ private boolean keepDeletes = false; /** - * If true, it means tracker has incomplete information about the active/deleted procedures. - * It's set to true only when recovering from old logs. See {@link #isDeleted(long)} docs to - * understand it's real use. + * If true, it means tracker has incomplete information about the active/deleted procedures. It's + * set to true only when recovering from old logs. See {@link #isDeleted(long)} docs to understand + * it's real use. */ boolean partial = false; private long minModifiedProcId = Long.MAX_VALUE; private long maxModifiedProcId = Long.MIN_VALUE; - public enum DeleteState { YES, NO, MAYBE } + public enum DeleteState { + YES, NO, MAYBE + } public void resetToProto(ProcedureProtos.ProcedureStoreTracker trackerProtoBuf) { reset(); - for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode : - trackerProtoBuf.getNodeList()) { + for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode : trackerProtoBuf + .getNodeList()) { final BitSetNode node = new BitSetNode(protoNode); map.put(node.getStart(), node); } @@ -182,6 +181,7 @@ public void setMinMaxModifiedProcIds(long min, long max) { this.minModifiedProcId = min; this.maxModifiedProcId = max; } + /** * This method is used when restarting where we need to rebuild the ProcedureStoreTracker. The * {@link #delete(long)} method above assume that the {@link BitSetNode} exists, but when restart @@ -236,8 +236,8 @@ private void setDeleteIf(ProcedureStoreTracker tracker, * @see #setDeletedIfModifiedInBoth(ProcedureStoreTracker) */ public void setDeletedIfDeletedByThem(ProcedureStoreTracker tracker) { - setDeleteIf(tracker, (node, procId) -> node == null || !node.contains(procId) || - node.isDeleted(procId) == DeleteState.YES); + setDeleteIf(tracker, (node, procId) -> node == null || !node.contains(procId) + || node.isDeleted(procId) == DeleteState.YES); } /** @@ -288,16 +288,15 @@ public void reset() { public boolean isModified(long procId) { final Map.Entry entry = map.floorEntry(procId); - return entry != null && entry.getValue().contains(procId) && - entry.getValue().isModified(procId); + return entry != null && entry.getValue().contains(procId) + && entry.getValue().isModified(procId); } /** * If {@link #partial} is false, returns state from the bitmap. If no state is found for - * {@code procId}, returns YES. - * If partial is true, tracker doesn't have complete view of system state, so it returns MAYBE - * if there is no update for the procedure or if it doesn't have a state in bitmap. Otherwise, - * returns state from the bitmap. + * {@code procId}, returns YES. If partial is true, tracker doesn't have complete view of system + * state, so it returns MAYBE if there is no update for the procedure or if it doesn't have a + * state in bitmap. Otherwise, returns state from the bitmap. */ public DeleteState isDeleted(long procId) { Map.Entry entry = map.floorEntry(procId); @@ -374,12 +373,12 @@ public boolean isAllModified() { */ public long[] getAllActiveProcIds() { return map.values().stream().map(BitSetNode::getActiveProcIds).filter(p -> p.length > 0) - .flatMapToLong(LongStream::of).toArray(); + .flatMapToLong(LongStream::of).toArray(); } /** - * Clears the list of updated procedure ids. This doesn't affect global list of active - * procedure ids. + * Clears the list of updated procedure ids. This doesn't affect global list of active procedure + * ids. */ public void resetModified() { for (Map.Entry entry : map.entrySet()) { @@ -472,12 +471,11 @@ public void dump() { } // ======================================================================== - // Convert to/from Protocol Buffer. + // Convert to/from Protocol Buffer. // ======================================================================== /** - * Builds - * org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker + * Builds org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker * protocol buffer from current state. */ public ProcedureProtos.ProcedureStoreTracker toProto() throws IOException { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java index 947d5bd9d650..6734f50695a7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,8 +64,8 @@ public ProcedureWALFile(final FileSystem fs, final FileStatus logStatus) { tracker.setPartialFlag(true); } - public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader header, - long startPos, long timestamp) { + public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader header, long startPos, + long timestamp) { this.fs = fs; this.header = header; this.logFile = logFile; @@ -205,7 +205,7 @@ public boolean equals(Object o) { return false; } - return compareTo((ProcedureWALFile)o) == 0; + return compareTo((ProcedureWALFile) o) == 0; } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java index bc60584126fb..1dafb3cdac19 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2.store.wal; import java.io.IOException; @@ -73,7 +72,8 @@ interface Loader extends ProcedureLoader { void markCorruptedWAL(ProcedureWALFile log, IOException e); } - private ProcedureWALFormat() {} + private ProcedureWALFormat() { + } /** * Load all the procedures in these ProcedureWALFiles, and rebuild the given {@code tracker} if @@ -116,28 +116,17 @@ public static void writeHeader(OutputStream stream, ProcedureWALHeader header) } /* - * +-----------------+ - * | END OF WAL DATA | <---+ - * +-----------------+ | - * | | | - * | Tracker | | - * | | | - * +-----------------+ | - * | version | | - * +-----------------+ | - * | TRAILER_MAGIC | | - * +-----------------+ | - * | offset |-----+ - * +-----------------+ + * +-----------------+ | END OF WAL DATA | <---+ +-----------------+ | | | | | Tracker | | | | | + * +-----------------+ | | version | | +-----------------+ | | TRAILER_MAGIC | | + * +-----------------+ | | offset |-----+ +-----------------+ */ public static long writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker) throws IOException { long offset = stream.getPos(); // Write EOF Entry - ProcedureWALEntry.newBuilder() - .setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF) - .build().writeDelimitedTo(stream); + ProcedureWALEntry.newBuilder().setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF).build() + .writeDelimitedTo(stream); // Write Tracker tracker.toProto().writeDelimitedTo(stream); @@ -148,8 +137,7 @@ public static long writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker return stream.getPos() - offset; } - public static ProcedureWALHeader readHeader(InputStream stream) - throws IOException { + public static ProcedureWALHeader readHeader(InputStream stream) throws IOException { ProcedureWALHeader header; try { header = ProcedureWALHeader.parseDelimitedFrom(stream); @@ -162,8 +150,8 @@ public static ProcedureWALHeader readHeader(InputStream stream) } if (header.getVersion() < 0 || header.getVersion() != HEADER_VERSION) { - throw new InvalidWALDataException("Invalid Header version. got " + header.getVersion() + - " expected " + HEADER_VERSION); + throw new InvalidWALDataException( + "Invalid Header version. got " + header.getVersion() + " expected " + HEADER_VERSION); } if (header.getType() < 0 || header.getType() > LOG_TYPE_MAX_VALID) { @@ -185,14 +173,14 @@ public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long sta stream.seek(trailerPos); int version = stream.read(); if (version != TRAILER_VERSION) { - throw new InvalidWALDataException("Invalid Trailer version. got " + version + - " expected " + TRAILER_VERSION); + throw new InvalidWALDataException( + "Invalid Trailer version. got " + version + " expected " + TRAILER_VERSION); } long magic = StreamUtils.readLong(stream); if (magic != TRAILER_MAGIC) { - throw new InvalidWALDataException("Invalid Trailer magic. got " + magic + - " expected " + TRAILER_MAGIC); + throw new InvalidWALDataException( + "Invalid Trailer magic. got " + magic + " expected " + TRAILER_MAGIC); } long trailerOffset = StreamUtils.readLong(stream); @@ -203,10 +191,8 @@ public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long sta throw new InvalidWALDataException("Invalid Trailer begin"); } - ProcedureWALTrailer trailer = ProcedureWALTrailer.newBuilder() - .setVersion(version) - .setTrackerPos(stream.getPos()) - .build(); + ProcedureWALTrailer trailer = + ProcedureWALTrailer.newBuilder().setVersion(version).setTrackerPos(stream.getPos()).build(); return trailer; } @@ -214,8 +200,8 @@ public static ProcedureWALEntry readEntry(InputStream stream) throws IOException return ProcedureWALEntry.parseDelimitedFrom(stream); } - public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, - Procedure proc, Procedure[] subprocs) throws IOException { + public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, Procedure proc, + Procedure[] subprocs) throws IOException { final ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); builder.setType(type); builder.addProcedure(ProcedureUtil.convertToProtoProcedure(proc)); @@ -227,8 +213,7 @@ public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, builder.build().writeDelimitedTo(slot); } - public static void writeInsert(ByteSlot slot, Procedure proc) - throws IOException { + public static void writeInsert(ByteSlot slot, Procedure proc) throws IOException { writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_INIT, proc, null); } @@ -237,13 +222,11 @@ public static void writeInsert(ByteSlot slot, Procedure proc, Procedure[] writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_INSERT, proc, subprocs); } - public static void writeUpdate(ByteSlot slot, Procedure proc) - throws IOException { + public static void writeUpdate(ByteSlot slot, Procedure proc) throws IOException { writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_UPDATE, proc, null); } - public static void writeDelete(ByteSlot slot, long procId) - throws IOException { + public static void writeDelete(ByteSlot slot, long procId) throws IOException { final ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); builder.setType(ProcedureWALEntry.Type.PROCEDURE_WAL_DELETE); builder.setProcId(procId); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java index 31150cad8fb2..f12abeb3ee47 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,11 +56,10 @@ class ProcedureWALFormatReader { private final ProcedureWALFormat.Loader loader; /** - * Global tracker that will be used by the WALProcedureStore after load. - * If the last WAL was closed cleanly we already have a full tracker ready to be used. - * If the last WAL was truncated (e.g. master killed) the tracker will be empty - * and the 'partial' flag will be set. In this case, on WAL replay we are going - * to rebuild the tracker. + * Global tracker that will be used by the WALProcedureStore after load. If the last WAL was + * closed cleanly we already have a full tracker ready to be used. If the last WAL was truncated + * (e.g. master killed) the tracker will be empty and the 'partial' flag will be set. In this + * case, on WAL replay we are going to rebuild the tracker. */ private final ProcedureStoreTracker tracker; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java index 41fcc186ad34..f70f0666a95f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,13 +67,11 @@ public ProcedureWALPrettyPrinter() { /** * Reads a log file and outputs its contents. - * - * @param conf HBase configuration relevant to this log file - * @param p path of the log file to be read - * @throws IOException IOException + * @param conf HBase configuration relevant to this log file + * @param p path of the log file to be read + * @throws IOException IOException */ - public void processFile(final Configuration conf, final Path p) - throws IOException { + public void processFile(final Configuration conf, final Path p) throws IOException { FileSystem fs = p.getFileSystem(conf); if (!fs.exists(p)) { @@ -121,8 +119,7 @@ public void processProcedureWALFile(ProcedureWALFile log) throws IOException { } } catch (IOException e) { out.println("got an exception while reading the procedure WAL " + e.getMessage()); - } - finally { + } finally { log.close(); } } @@ -150,13 +147,10 @@ private void printHeader(ProcedureWALHeader header) { } /** - * Pass one or more log file names and formatting options and it will dump out - * a text version of the contents on stdout. - * - * @param args - * Command line arguments - * @throws IOException - * Thrown upon file system errors etc. + * Pass one or more log file names and formatting options and it will dump out a text version of + * the contents on stdout. + * @param args Command line arguments + * @throws IOException Thrown upon file system errors etc. */ @Override public int run(final String[] args) throws IOException { @@ -176,19 +170,19 @@ public int run(final String[] args) throws IOException { if (files.isEmpty() || cmd.hasOption("h")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("ProcedureWALPrettyPrinter ", options, true); - return(-1); + return (-1); } } catch (ParseException e) { LOG.error("Failed to parse commandLine arguments", e); HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("ProcedureWALPrettyPrinter ", options, true); - return(-1); + return (-1); } // get configuration, file system, and process the given files for (Path file : files) { processFile(getConf(), file); } - return(0); + return (0); } public static void main(String[] args) throws Exception { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java index 5e1983f46968..1e3423e56a27 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java index 29bda4732d0f..632c98061796 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -101,8 +101,7 @@ * will first be initialized to the oldest file's tracker(which is stored in the trailer), using the * method {@link ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge it * with the tracker of every newer wal files, using the - * {@link ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. - * If we find out + * {@link ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we find out * that all the modified procedures for the oldest wal file are modified or deleted in newer wal * files, then we can delete it. This is because that, every time we call * {@link ProcedureStore#insert(Procedure[])} or {@link ProcedureStore#update(Procedure)}, we will @@ -121,33 +120,31 @@ public class WALProcedureStore extends ProcedureStoreBase { /** Used to construct the name of the log directory for master procedures */ public static final String MASTER_PROCEDURE_LOGDIR = "MasterProcWALs"; - public static final String WAL_COUNT_WARN_THRESHOLD_CONF_KEY = - "hbase.procedure.store.wal.warn.threshold"; + "hbase.procedure.store.wal.warn.threshold"; private static final int DEFAULT_WAL_COUNT_WARN_THRESHOLD = 10; public static final String EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY = - "hbase.procedure.store.wal.exec.cleanup.on.load"; + "hbase.procedure.store.wal.exec.cleanup.on.load"; private static final boolean DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY = true; public static final String MAX_RETRIES_BEFORE_ROLL_CONF_KEY = - "hbase.procedure.store.wal.max.retries.before.roll"; + "hbase.procedure.store.wal.max.retries.before.roll"; private static final int DEFAULT_MAX_RETRIES_BEFORE_ROLL = 3; public static final String WAIT_BEFORE_ROLL_CONF_KEY = - "hbase.procedure.store.wal.wait.before.roll"; + "hbase.procedure.store.wal.wait.before.roll"; private static final int DEFAULT_WAIT_BEFORE_ROLL = 500; - public static final String ROLL_RETRIES_CONF_KEY = - "hbase.procedure.store.wal.max.roll.retries"; + public static final String ROLL_RETRIES_CONF_KEY = "hbase.procedure.store.wal.max.roll.retries"; private static final int DEFAULT_ROLL_RETRIES = 3; public static final String MAX_SYNC_FAILURE_ROLL_CONF_KEY = - "hbase.procedure.store.wal.sync.failure.roll.max"; + "hbase.procedure.store.wal.sync.failure.roll.max"; private static final int DEFAULT_MAX_SYNC_FAILURE_ROLL = 3; public static final String PERIODIC_ROLL_CONF_KEY = - "hbase.procedure.store.wal.periodic.roll.msec"; + "hbase.procedure.store.wal.periodic.roll.msec"; private static final int DEFAULT_PERIODIC_ROLL = 60 * 60 * 1000; // 1h public static final String SYNC_WAIT_MSEC_CONF_KEY = "hbase.procedure.store.wal.sync.wait.msec"; @@ -238,8 +235,8 @@ public float getSyncedPerSec() { public WALProcedureStore(Configuration conf, LeaseRecovery leaseRecovery) throws IOException { this(conf, new Path(CommonFSUtils.getWALRootDir(conf), MASTER_PROCEDURE_LOGDIR), - new Path(CommonFSUtils.getWALRootDir(conf), HConstants.HREGION_OLDLOGDIR_NAME), - leaseRecovery); + new Path(CommonFSUtils.getWALRootDir(conf), HConstants.HREGION_OLDLOGDIR_NAME), + leaseRecovery); } public WALProcedureStore(final Configuration conf, final Path walDir, final Path walArchiveDir, @@ -249,8 +246,8 @@ public WALProcedureStore(final Configuration conf, final Path walDir, final Path this.walDir = walDir; this.walArchiveDir = walArchiveDir; this.fs = CommonFSUtils.getWALFileSystem(conf); - this.enforceStreamCapability = conf.getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, - true); + this.enforceStreamCapability = + conf.getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true); // Create the log directory for the procedure store if (!fs.exists(walDir)) { @@ -291,9 +288,9 @@ public void start(int numSlots) throws IOException { // Tunings walCountWarnThreshold = - conf.getInt(WAL_COUNT_WARN_THRESHOLD_CONF_KEY, DEFAULT_WAL_COUNT_WARN_THRESHOLD); + conf.getInt(WAL_COUNT_WARN_THRESHOLD_CONF_KEY, DEFAULT_WAL_COUNT_WARN_THRESHOLD); maxRetriesBeforeRoll = - conf.getInt(MAX_RETRIES_BEFORE_ROLL_CONF_KEY, DEFAULT_MAX_RETRIES_BEFORE_ROLL); + conf.getInt(MAX_RETRIES_BEFORE_ROLL_CONF_KEY, DEFAULT_MAX_RETRIES_BEFORE_ROLL); maxSyncFailureRoll = conf.getInt(MAX_SYNC_FAILURE_ROLL_CONF_KEY, DEFAULT_MAX_SYNC_FAILURE_ROLL); waitBeforeRoll = conf.getInt(WAIT_BEFORE_ROLL_CONF_KEY, DEFAULT_WAIT_BEFORE_ROLL); rollRetries = conf.getInt(ROLL_RETRIES_CONF_KEY, DEFAULT_ROLL_RETRIES); @@ -303,8 +300,8 @@ public void start(int numSlots) throws IOException { useHsync = conf.getBoolean(USE_HSYNC_CONF_KEY, DEFAULT_USE_HSYNC); // WebUI - syncMetricsQueue = new CircularFifoQueue<>( - conf.getInt(STORE_WAL_SYNC_STATS_COUNT, DEFAULT_SYNC_STATS_COUNT)); + syncMetricsQueue = + new CircularFifoQueue<>(conf.getInt(STORE_WAL_SYNC_STATS_COUNT, DEFAULT_SYNC_STATS_COUNT)); // Init sync thread syncThread = new Thread("WALProcedureStoreSyncThread") { @@ -329,8 +326,8 @@ public void stop(final boolean abort) { return; } - LOG.info("Stopping the WAL Procedure Store, isAbort=" + abort + - (isSyncAborted() ? " (self aborting)" : "")); + LOG.info("Stopping the WAL Procedure Store, isAbort=" + abort + + (isSyncAborted() ? " (self aborting)" : "")); sendStopSignal(); if (!isSyncAborted()) { try { @@ -350,7 +347,7 @@ public void stop(final boolean abort) { // Close the old logs // they should be already closed, this is just in case the load fails // and we call start() and then stop() - for (ProcedureWALFile log: logs) { + for (ProcedureWALFile log : logs) { log.close(); } logs.clear(); @@ -405,8 +402,7 @@ public void recoverLease() throws IOException { while (isRunning()) { // Don't sleep before first attempt if (afterFirstAttempt) { - LOG.trace("Sleep {} ms after first lease recovery attempt.", - waitBeforeRoll); + LOG.trace("Sleep {} ms after first lease recovery attempt.", waitBeforeRoll); Threads.sleepWithoutInterrupt(waitBeforeRoll); } else { afterFirstAttempt = true; @@ -552,8 +548,9 @@ public void insert(Procedure proc, Procedure[] subprocs) { } catch (IOException e) { // We are not able to serialize the procedure. // this is a code error, and we are not able to go on. - LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: proc=" + - proc + ", subprocs=" + Arrays.toString(subprocs), e); + LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: proc=" + proc + + ", subprocs=" + Arrays.toString(subprocs), + e); throw new RuntimeException(e); } finally { releaseSlot(slot); @@ -581,8 +578,8 @@ public void insert(Procedure[] procs) { } catch (IOException e) { // We are not able to serialize the procedure. // this is a code error, and we are not able to go on. - LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: " + - Arrays.toString(procs), e); + LOG.error(HBaseMarkers.FATAL, + "Unable to serialize one of the procedure: " + Arrays.toString(procs), e); throw new RuntimeException(e); } finally { releaseSlot(slot); @@ -706,10 +703,12 @@ private void releaseSlot(final ByteSlot slot) { slotsCache.offer(slot); } - private enum PushType { INSERT, UPDATE, DELETE } + private enum PushType { + INSERT, UPDATE, DELETE + } - private long pushData(final PushType type, final ByteSlot slot, - final long procId, final long[] subProcIds) { + private long pushData(final PushType type, final ByteSlot slot, final long procId, + final long[] subProcIds) { if (!isRunning()) { throw new RuntimeException("the store must be running before inserting data"); } @@ -768,8 +767,7 @@ private long pushData(final PushType type, final ByteSlot slot, return logId; } - private void updateStoreTracker(final PushType type, - final long procId, final long[] subProcIds) { + private void updateStoreTracker(final PushType type, final long procId, final long[] subProcIds) { switch (type) { case INSERT: if (subProcIds == null) { @@ -819,8 +817,8 @@ private void syncLoop() throws Throwable { if (LOG.isTraceEnabled()) { float rollTsSec = getMillisFromLastRoll() / 1000.0f; LOG.trace(String.format("Waiting for data. flushed=%s (%s/sec)", - StringUtils.humanSize(totalSynced.get()), - StringUtils.humanSize(totalSynced.get() / rollTsSec))); + StringUtils.humanSize(totalSynced.get()), + StringUtils.humanSize(totalSynced.get() / rollTsSec))); } waitCond.await(getMillisToNextPeriodicRoll(), TimeUnit.MILLISECONDS); @@ -843,9 +841,8 @@ private void syncLoop() throws Throwable { final float syncedPerSec = totalSyncedToStore / rollSec; if (LOG.isTraceEnabled() && (syncWaitMs > 10 || slotIndex < syncMaxSlot)) { LOG.trace(String.format("Sync wait %s, slotIndex=%s , totalSynced=%s (%s/sec)", - StringUtils.humanTimeDiff(syncWaitMs), slotIndex, - StringUtils.humanSize(totalSyncedToStore), - StringUtils.humanSize(syncedPerSec))); + StringUtils.humanTimeDiff(syncWaitMs), slotIndex, + StringUtils.humanSize(totalSyncedToStore), StringUtils.humanSize(syncedPerSec))); } // update webui circular buffers (TODO: get rid of allocations) @@ -933,8 +930,8 @@ protected long syncSlots(final FSDataOutputStream stream, final ByteSlot[] slots sendPostSyncSignal(); if (LOG.isTraceEnabled()) { - LOG.trace("Sync slots=" + count + '/' + syncMaxSlot + - ", flushed=" + StringUtils.humanSize(totalSynced)); + LOG.trace("Sync slots=" + count + '/' + syncMaxSlot + ", flushed=" + + StringUtils.humanSize(totalSynced)); } return totalSynced; } @@ -1007,7 +1004,7 @@ void removeInactiveLogsForTesting() throws Exception { lock.lock(); try { removeInactiveLogs(); - } finally { + } finally { lock.unlock(); } } @@ -1061,11 +1058,8 @@ boolean rollWriter(long logId) throws IOException { assert lock.isHeldByCurrentThread() : "expected to be the lock owner. " + lock.isLocked(); ProcedureWALHeader header = ProcedureWALHeader.newBuilder() - .setVersion(ProcedureWALFormat.HEADER_VERSION) - .setType(ProcedureWALFormat.LOG_TYPE_STREAM) - .setMinProcId(storeTracker.getActiveMinProcId()) - .setLogId(logId) - .build(); + .setVersion(ProcedureWALFormat.HEADER_VERSION).setType(ProcedureWALFormat.LOG_TYPE_STREAM) + .setMinProcId(storeTracker.getActiveMinProcId()).setLogId(logId).build(); FSDataOutputStream newStream = null; Path newLogFile = null; @@ -1074,8 +1068,8 @@ boolean rollWriter(long logId) throws IOException { try { FSDataOutputStreamBuilder builder = fs.createFile(newLogFile).overwrite(false); if (builder instanceof DistributedFileSystem.HdfsDataOutputStreamBuilder) { - newStream = ((DistributedFileSystem.HdfsDataOutputStreamBuilder) builder) - .replicate().build(); + newStream = + ((DistributedFileSystem.HdfsDataOutputStreamBuilder) builder).replicate().build(); } else { newStream = builder.build(); } @@ -1091,11 +1085,11 @@ boolean rollWriter(long logId) throws IOException { // to provide. final String durability = useHsync ? StreamCapabilities.HSYNC : StreamCapabilities.HFLUSH; if (enforceStreamCapability && !newStream.hasCapability(durability)) { - throw new IllegalStateException("The procedure WAL relies on the ability to " + durability + - " for proper operation during component failures, but the underlying filesystem does " + - "not support doing so. Please check the config value of '" + USE_HSYNC_CONF_KEY + - "' to set the desired level of robustness and ensure the config value of '" + - CommonFSUtils.HBASE_WAL_DIR + "' points to a FileSystem mount that can provide it."); + throw new IllegalStateException("The procedure WAL relies on the ability to " + durability + + " for proper operation during component failures, but the underlying filesystem does " + + "not support doing so. Please check the config value of '" + USE_HSYNC_CONF_KEY + + "' to set the desired level of robustness and ensure the config value of '" + + CommonFSUtils.HBASE_WAL_DIR + "' points to a FileSystem mount that can provide it."); } try { ProcedureWALFormat.writeHeader(newStream, header); @@ -1120,8 +1114,9 @@ boolean rollWriter(long logId) throws IOException { if (logs.size() == 2) { buildHoldingCleanupTracker(); } else if (logs.size() > walCountWarnThreshold) { - LOG.warn("procedure WALs count={} above the warning threshold {}. check running procedures" + - " to see if something is stuck.", logs.size(), walCountWarnThreshold); + LOG.warn("procedure WALs count={} above the warning threshold {}. check running procedures" + + " to see if something is stuck.", + logs.size(), walCountWarnThreshold); // This is just like what we have done at RS side when there are too many wal files. For RS, // if there are too many wal files, we will find out the wal entries in the oldest file, and // tell the upper layer to flush these regions so the wal entries will be useless and then we @@ -1168,7 +1163,7 @@ private void closeCurrentLogStream(boolean abort) { } // ========================================================================== - // Log Files cleaner helpers + // Log Files cleaner helpers // ========================================================================== private void removeInactiveLogs() throws IOException { // We keep track of which procedures are holding the oldest WAL in 'holdingCleanupTracker'. @@ -1254,7 +1249,7 @@ private boolean removeLogFile(final ProcedureWALFile log, final Path walArchiveD } // ========================================================================== - // FileSystem Log Files helpers + // FileSystem Log Files helpers // ========================================================================== public Path getWALDir() { return this.walDir; @@ -1288,13 +1283,13 @@ public boolean accept(Path path) { private static final Comparator FILE_STATUS_ID_COMPARATOR = new Comparator() { - @Override - public int compare(FileStatus a, FileStatus b) { - final long aId = getLogIdFromName(a.getPath().getName()); - final long bId = getLogIdFromName(b.getPath().getName()); - return Long.compare(aId, bId); - } - }; + @Override + public int compare(FileStatus a, FileStatus b) { + final long aId = getLogIdFromName(a.getPath().getName()); + final long bId = getLogIdFromName(b.getPath().getName()); + return Long.compare(aId, bId); + } + }; private FileStatus[] getLogFiles() throws IOException { try { @@ -1400,27 +1395,27 @@ private ProcedureWALFile initOldLog(final FileStatus logFile, final Path walArch } /** - * Parses a directory of WALs building up ProcedureState. - * For testing parse and profiling. + * Parses a directory of WALs building up ProcedureState. For testing parse and profiling. * @param args Include pointer to directory of WAL files for a store instance to parse & load. */ - public static void main(String [] args) throws IOException { + public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); if (args == null || args.length != 1) { System.out.println("ERROR: Empty arguments list; pass path to MASTERPROCWALS_DIR."); System.out.println("Usage: WALProcedureStore MASTERPROCWALS_DIR"); System.exit(-1); } - WALProcedureStore store = new WALProcedureStore(conf, new Path(args[0]), null, - new LeaseRecovery() { - @Override - public void recoverFileLease(FileSystem fs, Path path) throws IOException { - // no-op - } - }); + WALProcedureStore store = + new WALProcedureStore(conf, new Path(args[0]), null, new LeaseRecovery() { + @Override + public void recoverFileLease(FileSystem fs, Path path) throws IOException { + // no-op + } + }); try { store.start(16); - ProcedureExecutor pe = new ProcedureExecutor<>(conf, new Object()/*Pass anything*/, store); + ProcedureExecutor pe = + new ProcedureExecutor<>(conf, new Object()/* Pass anything */, store); pe.init(1, true); } finally { store.stop(true); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java index 3e95de56f255..0a88b3fc2066 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2.util; import java.io.IOException; import java.io.OutputStream; import java.util.Arrays; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header. - * e.g. you write some data and you want to prepend an header that contains the data len or cksum. - * + * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header. e.g. you + * write some data and you want to prepend an header that contains the data len or cksum. * ByteSlot slot = new ByteSlot(); * // write data * slot.write(...); @@ -78,13 +75,13 @@ public byte[] getBuffer() { public void writeAt(int offset, int b) { head = Math.min(head, offset); - buf[offset] = (byte)b; + buf[offset] = (byte) b; } @Override public void write(int b) { ensureCapacity(size + 1); - buf[size++] = (byte)b; + buf[size++] = (byte) b; } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java index fa796ae97426..32e5f5632bdb 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.concurrent.DelayQueue; import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -30,7 +29,8 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public final class DelayedUtil { - private DelayedUtil() { } + private DelayedUtil() { + } /** * Add a timeout to a Delay @@ -104,7 +104,7 @@ public static int compareDelayed(final Delayed o1, final Delayed o2) { private static long getTimeout(final Delayed o) { assert o instanceof DelayedWithTimeout : "expected DelayedWithTimeout instance, got " + o; - return ((DelayedWithTimeout)o).getTimeout(); + return ((DelayedWithTimeout) o).getTimeout(); } public static abstract class DelayedObject implements DelayedWithTimeout { @@ -146,7 +146,7 @@ public boolean equals(final Object other) { return false; } - return Objects.equals(getObject(), ((DelayedContainer)other).getObject()); + return Objects.equals(getObject(), ((DelayedContainer) other).getObject()); } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java index fddc999bec3c..cddfd94d3da9 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,8 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public final class StringUtils { - private StringUtils() {} + private StringUtils() { + } public static String humanTimeDiff(long timeDiff) { if (timeDiff < 1000) { @@ -31,17 +32,17 @@ public static String humanTimeDiff(long timeDiff) { } StringBuilder buf = new StringBuilder(); - long hours = timeDiff / (60*60*1000); - long rem = (timeDiff % (60*60*1000)); - long minutes = rem / (60*1000); - rem = rem % (60*1000); + long hours = timeDiff / (60 * 60 * 1000); + long rem = (timeDiff % (60 * 60 * 1000)); + long minutes = rem / (60 * 1000); + rem = rem % (60 * 1000); float seconds = rem / 1000.0f; - if (hours != 0){ + if (hours != 0) { buf.append(hours); buf.append(" hrs, "); } - if (minutes != 0){ + if (minutes != 0) { buf.append(minutes); buf.append(" mins, "); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java index 6c66a49c2018..cb9b91f69673 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,12 +53,12 @@ private ProcedureTestingUtility() { } public static ProcedureStore createStore(final Configuration conf, final Path dir) - throws IOException { + throws IOException { return createWalStore(conf, dir); } public static WALProcedureStore createWalStore(final Configuration conf, final Path dir) - throws IOException { + throws IOException { return new WALProcedureStore(conf, dir, null, new LeaseRecovery() { @Override public void recoverFileLease(FileSystem fs, Path path) throws IOException { @@ -68,12 +68,12 @@ public void recoverFileLease(FileSystem fs, Path path) throws IOException { } public static void restart(final ProcedureExecutor procExecutor, boolean abort, - boolean startWorkers) throws Exception { + boolean startWorkers) throws Exception { restart(procExecutor, false, true, null, null, null, abort, startWorkers); } public static void restart(final ProcedureExecutor procExecutor, boolean abort) - throws Exception { + throws Exception { restart(procExecutor, false, true, null, null, null, abort, true); } @@ -82,12 +82,12 @@ public static void restart(final ProcedureExecutor procExecutor) th } public static void initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads, - boolean abortOnCorruption) throws IOException { + boolean abortOnCorruption) throws IOException { initAndStartWorkers(procExecutor, numThreads, abortOnCorruption, true); } public static void initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads, - boolean abortOnCorruption, boolean startWorkers) throws IOException { + boolean abortOnCorruption, boolean startWorkers) throws IOException { procExecutor.init(numThreads, abortOnCorruption); if (startWorkers) { procExecutor.startWorkers(); @@ -95,16 +95,16 @@ public static void initAndStartWorkers(ProcedureExecutor procExecutor, int nu } public static void restart(ProcedureExecutor procExecutor, - boolean avoidTestKillDuringRestart, boolean failOnCorrupted, Callable stopAction, - Callable actionBeforeStartWorker, Callable startAction) throws Exception { + boolean avoidTestKillDuringRestart, boolean failOnCorrupted, Callable stopAction, + Callable actionBeforeStartWorker, Callable startAction) throws Exception { restart(procExecutor, avoidTestKillDuringRestart, failOnCorrupted, stopAction, actionBeforeStartWorker, startAction, false, true); } public static void restart(ProcedureExecutor procExecutor, - boolean avoidTestKillDuringRestart, boolean failOnCorrupted, Callable stopAction, - Callable actionBeforeStartWorker, Callable startAction, boolean abort, - boolean startWorkers) throws Exception { + boolean avoidTestKillDuringRestart, boolean failOnCorrupted, Callable stopAction, + Callable actionBeforeStartWorker, Callable startAction, boolean abort, + boolean startWorkers) throws Exception { final ProcedureStore procStore = procExecutor.getStore(); final int storeThreads = procExecutor.getCorePoolSize(); final int execThreads = procExecutor.getCorePoolSize(); @@ -145,12 +145,12 @@ public static void restart(ProcedureExecutor procExecutor, } public static void storeRestart(ProcedureStore procStore, ProcedureStore.ProcedureLoader loader) - throws Exception { + throws Exception { storeRestart(procStore, false, loader); } public static void storeRestart(ProcedureStore procStore, boolean abort, - ProcedureStore.ProcedureLoader loader) throws Exception { + ProcedureStore.ProcedureLoader loader) throws Exception { procStore.stop(abort); procStore.start(procStore.getNumThreads()); procStore.recoverLease(); @@ -158,7 +158,7 @@ public static void storeRestart(ProcedureStore procStore, boolean abort, } public static LoadCounter storeRestartAndAssert(ProcedureStore procStore, long maxProcId, - long runnableCount, int completedCount, int corruptedCount) throws Exception { + long runnableCount, int completedCount, int corruptedCount) throws Exception { final LoadCounter loader = new LoadCounter(); storeRestart(procStore, loader); assertEquals(maxProcId, loader.getMaxProcId()); @@ -175,19 +175,19 @@ private static void createExecutorTesting(final ProcedureExecutor p } public static void setKillIfHasParent(ProcedureExecutor procExecutor, - boolean value) { + boolean value) { createExecutorTesting(procExecutor); procExecutor.testing.killIfHasParent = value; } public static void setKillIfSuspended(ProcedureExecutor procExecutor, - boolean value) { + boolean value) { createExecutorTesting(procExecutor); procExecutor.testing.killIfSuspended = value; } public static void setKillBeforeStoreUpdate(ProcedureExecutor procExecutor, - boolean value) { + boolean value) { createExecutorTesting(procExecutor); procExecutor.testing.killBeforeStoreUpdate = value; LOG.warn("Set Kill before store update to: " + procExecutor.testing.killBeforeStoreUpdate); @@ -195,7 +195,7 @@ public static void setKillBeforeStoreUpdate(ProcedureExecutor procE } public static void setToggleKillBeforeStoreUpdate(ProcedureExecutor procExecutor, - boolean value) { + boolean value) { createExecutorTesting(procExecutor); procExecutor.testing.toggleKillBeforeStoreUpdate = value; assertSingleExecutorForKillTests(procExecutor); @@ -216,27 +216,27 @@ public static void toggleKillAfterStoreUpdate(ProcedureExecutor pro } public static void setKillAndToggleBeforeStoreUpdate(ProcedureExecutor procExecutor, - boolean value) { + boolean value) { ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, value); ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, value); assertSingleExecutorForKillTests(procExecutor); } private static void - assertSingleExecutorForKillTests(final ProcedureExecutor procExecutor) { + assertSingleExecutorForKillTests(final ProcedureExecutor procExecutor) { if (procExecutor.testing == null) { return; } - if (procExecutor.testing.killBeforeStoreUpdate || - procExecutor.testing.toggleKillBeforeStoreUpdate) { + if (procExecutor.testing.killBeforeStoreUpdate + || procExecutor.testing.toggleKillBeforeStoreUpdate) { assertEquals("expected only one executor running during test with kill/restart", 1, procExecutor.getCorePoolSize()); } } public static long submitAndWait(Configuration conf, TEnv env, Procedure proc) - throws IOException { + throws IOException { NoopProcedureStore procStore = new NoopProcedureStore(); ProcedureExecutor procExecutor = new ProcedureExecutor<>(conf, env, procStore); procStore.start(1); @@ -254,14 +254,14 @@ public static long submitAndWait(ProcedureExecutor procExecutor, Pr } public static long submitAndWait(ProcedureExecutor procExecutor, Procedure proc, - final long nonceGroup, final long nonce) { + final long nonceGroup, final long nonce) { long procId = submitProcedure(procExecutor, proc, nonceGroup, nonce); waitProcedure(procExecutor, procId); return procId; } public static long submitProcedure(ProcedureExecutor procExecutor, Procedure proc, - final long nonceGroup, final long nonce) { + final long nonceGroup, final long nonce) { final NonceKey nonceKey = procExecutor.createNonceKey(nonceGroup, nonce); long procId = procExecutor.registerNonce(nonceKey); assertFalse(procId >= 0); @@ -307,7 +307,7 @@ public static void waitNoProcedureRunning(ProcedureExecutor procExe } public static void assertProcNotYetCompleted(ProcedureExecutor procExecutor, - long procId) { + long procId) { assertFalse("expected a running proc", procExecutor.isFinished(procId)); assertEquals(null, procExecutor.getResult(procId)); } @@ -323,7 +323,7 @@ public static void assertProcNotFailed(final Procedure result) { } public static Throwable assertProcFailed(final ProcedureExecutor procExecutor, - final long procId) { + final long procId) { Procedure result = procExecutor.getResult(procId); assertTrue("expected procedure result", result != null); return assertProcFailed(result); @@ -372,17 +372,18 @@ public static Throwable getExceptionCause(final Procedure procInfo) { * This is a good test for finding state that needs persisting and steps that are not idempotent. */ public static void testRecoveryAndDoubleExecution(final ProcedureExecutor procExec, - final long procId) throws Exception { + final long procId) throws Exception { testRecoveryAndDoubleExecution(procExec, procId, false); } public static void testRecoveryAndDoubleExecution(final ProcedureExecutor procExec, - final long procId, final boolean expectFailure) throws Exception { + final long procId, final boolean expectFailure) throws Exception { testRecoveryAndDoubleExecution(procExec, procId, expectFailure, null); } public static void testRecoveryAndDoubleExecution(final ProcedureExecutor procExec, - final long procId, final boolean expectFailure, final Runnable customRestart) throws Exception { + final long procId, final boolean expectFailure, final Runnable customRestart) + throws Exception { Procedure proc = procExec.getProcedure(procId); waitProcedure(procExec, procId); assertEquals(false, procExec.isRunning()); @@ -411,7 +412,7 @@ public NoopProcedure() { @Override protected Procedure[] execute(TEnv env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { return null; } @@ -434,7 +435,7 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws } public static class NoopStateMachineProcedure - extends StateMachineProcedure { + extends StateMachineProcedure { private TState initialState; private TEnv env; @@ -448,7 +449,7 @@ public NoopStateMachineProcedure(TEnv env, TState initialState) { @Override protected Flow executeFromState(TEnv env, TState tState) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { return null; } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java index 6c69853b511b..37cc7779b9e4 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestChildProcedures { @ClassRule @@ -109,11 +109,9 @@ public void testChildLoadWithSteppedRestart() throws Exception { ProcedureTestingUtility.assertProcNotFailed(procExecutor, procId); } - /** - * Test the state setting that happens after store to WAL; in particular the bit where we - * set the parent runnable again after its children have all completed successfully. - * See HBASE-20978. + * Test the state setting that happens after store to WAL; in particular the bit where we set the + * parent runnable again after its children have all completed successfully. See HBASE-20978. */ @Test public void testChildLoadWithRestartAfterChildSuccess() throws Exception { @@ -170,7 +168,8 @@ private void assertProcFailed(long procId) { } public static class TestRootProcedure extends SequentialProcedure { - public TestRootProcedure() {} + public TestRootProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) { @@ -194,7 +193,8 @@ public boolean abort(TestProcEnv env) { } public static class TestChildProcedure extends SequentialProcedure { - public TestChildProcedure() {} + public TestChildProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java index d0d6864ab6cf..1b34c224236c 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ public class TestForceUpdateProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestForceUpdateProcedure.class); + HBaseClassTestRule.forClass(TestForceUpdateProcedure.class); private static HBaseCommonTestingUtil UTIL = new HBaseCommonTestingUtil(); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java index 9f24403dc7d4..2def0769ae90 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestLockAndQueue { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLockAndQueue.class); + HBaseClassTestRule.forClass(TestLockAndQueue.class); @Test public void testHasLockAccess() { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java index fa8db418aece..dfdf42cdc1e0 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -40,12 +39,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; - -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureBypass { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule - .forClass(TestProcedureBypass.class); + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestProcedureBypass.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureBypass.class); @@ -77,11 +76,9 @@ public static void setUp() throws Exception { logDir = new Path(testDir, "proc-logs"); procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), logDir); - procExecutor = new ProcedureExecutor<>(htu.getConfiguration(), procEnv, - procStore); + procExecutor = new ProcedureExecutor<>(htu.getConfiguration(), procEnv, procStore); procStore.start(PROCEDURE_EXECUTOR_SLOTS); - ProcedureTestingUtility - .initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true); + ProcedureTestingUtility.initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true); } @Test @@ -89,7 +86,7 @@ public void testBypassSuspendProcedure() throws Exception { final SuspendProcedure proc = new SuspendProcedure(); long id = procExecutor.submitProcedure(proc); Thread.sleep(500); - //bypass the procedure + // bypass the procedure assertTrue(procExecutor.bypassProcedure(id, 30000, false, false)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -100,9 +97,9 @@ public void testStuckProcedure() throws Exception { final StuckProcedure proc = new StuckProcedure(); long id = procExecutor.submitProcedure(proc); Thread.sleep(500); - //bypass the procedure + // bypass the procedure assertTrue(procExecutor.bypassProcedure(id, 1000, true, false)); - //Since the procedure is stuck there, we need to restart the executor to recovery. + // Since the procedure is stuck there, we need to restart the executor to recovery. ProcedureTestingUtility.restart(procExecutor); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -113,9 +110,8 @@ public void testBypassingProcedureWithParent() throws Exception { final RootProcedure proc = new RootProcedure(); long rootId = procExecutor.submitProcedure(proc); htu.waitFor(5000, () -> procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()) - .size() > 0); - SuspendProcedure suspendProcedure = (SuspendProcedure)procExecutor.getProcedures().stream() + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).size() > 0); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().stream() .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); assertTrue(procExecutor.bypassProcedure(suspendProcedure.getProcId(), 1000, false, false)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); @@ -141,9 +137,8 @@ public void testBypassingProcedureWithParentRecursive() throws Exception { final RootProcedure proc = new RootProcedure(); long rootId = procExecutor.submitProcedure(proc); htu.waitFor(5000, () -> procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()) - .size() > 0); - SuspendProcedure suspendProcedure = (SuspendProcedure)procExecutor.getProcedures().stream() + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).size() > 0); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().stream() .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); assertTrue(procExecutor.bypassProcedure(rootId, 1000, false, true)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); @@ -176,8 +171,7 @@ public SuspendProcedure() { } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { // Always suspend the procedure throw new ProcedureSuspendedException(); } @@ -201,7 +195,6 @@ protected Procedure[] execute(final TestProcEnv env) { } - public static class RootProcedure extends ProcedureTestingUtility.NoopProcedure { private boolean childSpwaned = false; @@ -210,11 +203,10 @@ public RootProcedure() { } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { if (!childSpwaned) { childSpwaned = true; - return new Procedure[] {new SuspendProcedure()}; + return new Procedure[] { new SuspendProcedure() }; } else { return null; } @@ -228,8 +220,7 @@ public WaitingTimeoutProcedure() { } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { // Always suspend the procedure setTimeout(50000); setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); @@ -263,7 +254,7 @@ public StuckStateMachineProcedure(TestProcEnv env, StuckStateMachineState initia @Override protected Flow executeFromState(TestProcEnv env, StuckStateMachineState tState) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { switch (tState) { case START: LOG.info("PHASE 1: START"); @@ -292,5 +283,4 @@ protected int getStateId(StuckStateMachineState tState) { } } - } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java index 07dd8d8ae907..e340a1a77d5a 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,8 +49,7 @@ public class TestProcedureCleanup { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureCleanup.class); - + HBaseClassTestRule.forClass(TestProcedureCleanup.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureCleanup.class); @@ -95,8 +94,7 @@ public void testProcedureShouldNotCleanOnLoad() throws Exception { LOG.info("Begin to execute " + rootProc); // wait until the child procedure arrival htu.waitFor(10000, () -> procExecutor.getProcedures().size() >= 2); - SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor - .getProcedures().get(1); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().get(1); // wait until the suspendProcedure executed suspendProcedure.latch.countDown(); Thread.sleep(100); @@ -181,14 +179,13 @@ private void corrupt(FileStatus file) throws IOException { Path tmpFile = file.getPath().suffix(".tmp"); // remove the last byte to make the trailer corrupted try (FSDataInputStream in = fs.open(file.getPath()); - FSDataOutputStream out = fs.create(tmpFile)) { + FSDataOutputStream out = fs.create(tmpFile)) { ByteStreams.copy(ByteStreams.limit(in, file.getLen() - 1), out); } fs.delete(file.getPath(), false); fs.rename(tmpFile, file.getPath()); } - public static final class ExchangeProcedure extends ProcedureTestingUtility.NoopProcedure { private final Exchanger exchanger = new Exchanger<>(); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java index f8cd787e98f5..944c7fbaebb8 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureEvents { @ClassRule @@ -81,11 +81,10 @@ public void tearDown() throws IOException { } /** - * Tests being able to suspend a Procedure for N timeouts and then failing.s - * Resets the timeout after each elapses. See {@link TestTimeoutEventProcedure} for example - * of how to do this sort of trickery with the ProcedureExecutor; i.e. suspend for a while, - * check for a condition and if not set, suspend again, etc., ultimately failing or succeeding - * eventually. + * Tests being able to suspend a Procedure for N timeouts and then failing.s Resets the timeout + * after each elapses. See {@link TestTimeoutEventProcedure} for example of how to do this sort of + * trickery with the ProcedureExecutor; i.e. suspend for a while, check for a condition and if not + * set, suspend again, etc., ultimately failing or succeeding eventually. */ @Test public void testTimeoutEventProcedure() throws Exception { @@ -122,20 +121,19 @@ private void testTimeoutEventProcedureDoubleExecution(final boolean killIfSuspen /** * This Event+Procedure exhibits following behavior: *

      - *
    • On procedure execute() - *
        - *
      • If had enough timeouts, abort the procedure. Else....
      • - *
      • Suspend the event and add self to its suspend queue
      • - *
      • Go into waiting state
      • - *
      - *
    • - *
    • - * On waiting timeout - *
        - *
      • Wake the event (which adds this procedure back into scheduler queue), and set own's - * state to RUNNABLE (so can be executed again).
      • - *
      - *
    • + *
    • On procedure execute() + *
        + *
      • If had enough timeouts, abort the procedure. Else....
      • + *
      • Suspend the event and add self to its suspend queue
      • + *
      • Go into waiting state
      • + *
      + *
    • + *
    • On waiting timeout + *
        + *
      • Wake the event (which adds this procedure back into scheduler queue), and set own's state + * to RUNNABLE (so can be executed again).
      • + *
      + *
    • *
    */ public static class TestTimeoutEventProcedure extends NoopProcedure { @@ -144,7 +142,8 @@ public static class TestTimeoutEventProcedure extends NoopProcedure private final AtomicInteger ntimeouts = new AtomicInteger(0); private int maxTimeouts = 1; - public TestTimeoutEventProcedure() {} + public TestTimeoutEventProcedure() { + } public TestTimeoutEventProcedure(final int timeoutMsec, final int maxTimeouts) { this.maxTimeouts = maxTimeouts; @@ -190,8 +189,7 @@ protected void afterReplay(final TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { Int32Value.Builder ntimeoutsBuilder = Int32Value.newBuilder().setValue(ntimeouts.get()); serializer.serialize(ntimeoutsBuilder.build()); @@ -200,8 +198,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { Int32Value ntimeoutsValue = serializer.deserialize(Int32Value.class); ntimeouts.set(ntimeoutsValue.getValue()); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java index ba52975cd6f5..1b7f9aeefe3b 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureExecution { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -178,7 +178,7 @@ public void testSingleSequentialProc() { public void testSingleSequentialProcRollback() { List state = new ArrayList<>(); Procedure subProc2 = - new TestSequentialProcedure("subProc2", state, new TestProcedureException("fail test")); + new TestSequentialProcedure("subProc2", state, new TestProcedureException("fail test")); Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2); Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1); long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, rootProc); @@ -204,7 +204,8 @@ public void testSingleSequentialProcRollback() { public static class TestFaultyRollback extends SequentialProcedure { private int retries = 0; - public TestFaultyRollback() { } + public TestFaultyRollback() { + } @Override protected Procedure[] execute(Void env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java index 01d25acc1793..efee14ddf508 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureExecutor { @ClassRule @@ -155,8 +155,8 @@ private int waitThreadCount(final int expectedThreads) { if (procExecutor.getWorkerThreadCount() == expectedThreads) { break; } - LOG.debug("waiting for thread count=" + expectedThreads + - " current=" + procExecutor.getWorkerThreadCount()); + LOG.debug("waiting for thread count=" + expectedThreads + " current=" + + procExecutor.getWorkerThreadCount()); Threads.sleepWithoutInterrupt(250); } return procExecutor.getWorkerThreadCount(); @@ -189,5 +189,6 @@ protected Procedure[] execute(final TestProcEnv env) { } } - private static class TestProcEnv { } + private static class TestProcEnv { + } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java index 454b188280d4..a8762bf99186 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureInMemoryChore { @ClassRule diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java index a2f833d62d90..60e4902ee775 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureMetrics { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -204,7 +204,7 @@ public ProcedureMetrics(boolean success, ProcedureMetrics[] subprocs) { } public ProcedureMetrics(boolean success, boolean yield, int yieldCount, - ProcedureMetrics[] subprocs) { + ProcedureMetrics[] subprocs) { this.success = success; this.yield = yield; this.yieldCount = yieldCount; @@ -218,8 +218,8 @@ protected void updateMetricsOnSubmit(TestProcEnv env) { } @Override - protected Procedure[] execute(TestProcEnv env) throws ProcedureYieldException, - ProcedureSuspendedException, InterruptedException { + protected Procedure[] execute(TestProcEnv env) + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { if (this.yield) { if (yieldNum < yieldCount) { yieldNum++; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java index 2a5c5ade1e83..8ba44419420b 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureNonce { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -174,8 +174,8 @@ public void testConcurrentNonceRegistrationWithRollback() throws IOException { testConcurrentNonceRegistration(false, 890, 55555); } - private void testConcurrentNonceRegistration(final boolean submitProcedure, - final long nonceGroup, final long nonce) throws IOException { + private void testConcurrentNonceRegistration(final boolean submitProcedure, final long nonceGroup, + final long nonce) throws IOException { // register the nonce final NonceKey nonceKey = procExecutor.createNonceKey(nonceGroup, nonce); @@ -229,8 +229,7 @@ public void run() { // register the nonce t2BeforeNonceRegisteredLatch.countDown(); - assertFalse("unexpected non registered nonce", - procExecutor.registerNonce(nonceKey) < 0); + assertFalse("unexpected non registered nonce", procExecutor.registerNonce(nonceKey) < 0); } catch (Throwable e) { t2Exception.set(e); } finally { @@ -256,7 +255,8 @@ public void run() { public static class TestSingleStepProcedure extends SequentialProcedure { private int step = 0; - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -269,7 +269,8 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { } @Override - protected void rollback(TestProcEnv env) { } + protected void rollback(TestProcEnv env) { + } @Override protected boolean abort(TestProcEnv env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java index 686b10dbf42b..46c625e41c2f 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Int32Value; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureRecovery { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -98,7 +98,8 @@ private void restart() throws Exception { public static class TestSingleStepProcedure extends SequentialProcedure { private int step = 0; - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -110,7 +111,8 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { } @Override - protected void rollback(TestProcEnv env) { } + protected void rollback(TestProcEnv env) { + } @Override protected boolean abort(TestProcEnv env) { @@ -130,8 +132,7 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { step++; Threads.sleepWithoutInterrupt(procSleepInterval); if (isAborted()) { - setFailure(new RemoteProcedureException(getClass().getName(), - new ProcedureAbortedException( + setFailure(new RemoteProcedureException(getClass().getName(), new ProcedureAbortedException( "got an abort at " + getClass().getName() + " step=" + step))); return null; } @@ -155,7 +156,7 @@ private boolean isAborted() { boolean aborted = abort.get(); BaseTestStepProcedure proc = this; while (proc.hasParent() && !aborted) { - proc = (BaseTestStepProcedure)procExecutor.getProcedure(proc.getParentProcId()); + proc = (BaseTestStepProcedure) procExecutor.getProcedure(proc.getParentProcId()); aborted = proc.isAborted(); } return aborted; @@ -163,7 +164,8 @@ private boolean isAborted() { } public static class TestMultiStepProcedure extends BaseTestStepProcedure { - public TestMultiStepProcedure() { } + public TestMultiStepProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -172,7 +174,8 @@ public Procedure[] execute(TestProcEnv env) throws InterruptedException { } public static class Step1Procedure extends BaseTestStepProcedure { - public Step1Procedure() { } + public Step1Procedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -182,7 +185,8 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { } public static class Step2Procedure extends BaseTestStepProcedure { - public Step2Procedure() { } + public Step2Procedure() { + } } } @@ -295,9 +299,12 @@ public void testMultiStepRollbackRecovery() throws Exception { public static class TestStateMachineProcedure extends StateMachineProcedure { - enum State { STATE_1, STATE_2, STATE_3, DONE } + enum State { + STATE_1, STATE_2, STATE_3, DONE + } - public TestStateMachineProcedure() {} + public TestStateMachineProcedure() { + } public TestStateMachineProcedure(final boolean testSubmitChildProc) { this.submitChildProc = testSubmitChildProc; @@ -388,16 +395,14 @@ protected boolean abort(TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { super.serializeStateData(serializer); Int32Value.Builder builder = Int32Value.newBuilder().setValue(iResult); serializer.serialize(builder.build()); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { super.deserializeStateData(serializer); Int32Value value = serializer.deserialize(Int32Value.class); iResult = value.getValue(); @@ -515,7 +520,7 @@ private void dumpLogDirState() { try { FileStatus[] files = fs.listStatus(logDir); if (files != null && files.length > 0) { - for (FileStatus file: files) { + for (FileStatus file : files) { assertTrue(file.toString(), file.isFile()); LOG.debug("log file " + file.getPath() + " size=" + file.getLen()); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java index 4d7d45de3ac7..6d85a1175b83 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.concurrent.atomic.AtomicLong; @@ -141,8 +142,8 @@ private void submitProcedures(final int nthreads, final int nprocPerThread, public void run() { for (int i = 0; i < nprocPerThread; ++i) { try { - procExecutor.submitProcedure((Procedure) - procClazz.getDeclaredConstructor().newInstance()); + procExecutor + .submitProcedure((Procedure) procClazz.getDeclaredConstructor().newInstance()); } catch (Exception e) { LOG.error("unable to instantiate the procedure", e); fail("failure during the proc.newInstance(): " + e.getMessage()); @@ -197,7 +198,8 @@ public long getExecId() { } @Override - protected void rollback(TestProcedureEnv env) { } + protected void rollback(TestProcedureEnv env) { + } @Override protected boolean abort(TestProcedureEnv env) { @@ -205,15 +207,13 @@ protected boolean abort(TestProcedureEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { Int64Value.Builder builder = Int64Value.newBuilder().setValue(execId); serializer.serialize(builder.build()); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { Int64Value value = serializer.deserialize(Int64Value.class); execId = value.getValue(); step = 2; @@ -221,7 +221,8 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) } public static class TestSingleStepProcedure extends TestProcedure { - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcedureEnv env) throws ProcedureYieldException { @@ -244,7 +245,8 @@ public String toString() { } public static class TestTwoStepProcedure extends TestProcedure { - public TestTwoStepProcedure() { } + public TestTwoStepProcedure() { + } @Override protected Procedure[] execute(TestProcedureEnv env) throws ProcedureYieldException { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java index 098c53fff28d..b3688ba4f865 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java index f56cdb31b6b8..52e768ee96fe 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureSchedulerConcurrency { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -105,10 +105,10 @@ public void run() { } if (wakeCount.get() != oldWakeCount) { lastUpdate = EnvironmentEdgeManager.currentTime(); - } else if (wakeCount.get() >= NRUNS && - (EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD) { - break; - } + } else if (wakeCount.get() >= NRUNS + && (EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD) { + break; + } Threads.sleepWithoutInterrupt(25); } } @@ -119,7 +119,7 @@ public void run() { @Override public void run() { while (true) { - TestProcedureWithEvent proc = (TestProcedureWithEvent)sched.poll(); + TestProcedureWithEvent proc = (TestProcedureWithEvent) sched.poll(); if (proc == null) { continue; } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java index 266082e04487..2304a1e931fc 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ public class TestProcedureSkipPersistence { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureSkipPersistence.class); + HBaseClassTestRule.forClass(TestProcedureSkipPersistence.class); private ProcedureExecutor procExecutor; private ProcedureStore procStore; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java index 38aef16cffea..d6c600ce87da 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureSuspended { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -181,8 +181,8 @@ public static class TestLockProcedure extends Procedure { private AtomicBoolean lock = null; private boolean hasLock = false; - public TestLockProcedure(final AtomicBoolean lock, final String key, - final boolean throwYield, final boolean throwSuspend) { + public TestLockProcedure(final AtomicBoolean lock, final String key, final boolean throwYield, + final boolean throwSuspend) { this.lock = lock; this.key = key; this.throwYield = throwYield; @@ -259,13 +259,11 @@ protected boolean abort(TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java index 07eacfeb7c7f..817e455bb448 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashState; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureToString { @ClassRule @@ -40,7 +40,8 @@ public class TestProcedureToString { /** * A do-nothing environment for BasicProcedure. */ - static class BasicProcedureEnv {} + static class BasicProcedureEnv { + } /** * A do-nothing basic procedure just for testing toString. @@ -49,7 +50,7 @@ static class BasicProcedure extends Procedure { @Override protected Procedure[] execute(BasicProcedureEnv env) throws ProcedureYieldException, InterruptedException { - return new Procedure [] {this}; + return new Procedure[] { this }; } @Override @@ -62,13 +63,11 @@ protected boolean abort(BasicProcedureEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } @@ -106,17 +105,17 @@ public void testBasicToString() { * Do-nothing SimpleMachineProcedure for checking its toString. */ static class SimpleStateMachineProcedure - extends StateMachineProcedure { + extends StateMachineProcedure { @Override - protected org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow executeFromState( - BasicProcedureEnv env, ServerCrashState state) + protected org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow + executeFromState(BasicProcedureEnv env, ServerCrashState state) throws ProcedureYieldException, InterruptedException { return null; } @Override - protected void rollbackState(BasicProcedureEnv env, ServerCrashState state) throws IOException, - InterruptedException { + protected void rollbackState(BasicProcedureEnv env, ServerCrashState state) + throws IOException, InterruptedException { } @Override diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java index 4d57c37ac619..cea18cb47030 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ public class TestProcedureUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureUtil.class); + HBaseClassTestRule.forClass(TestProcedureUtil.class); @Test public void testValidation() throws Exception { @@ -51,13 +51,14 @@ public void testConvert() throws Exception { // check Procedure to protobuf conversion final TestProcedure proc1 = new TestProcedure(10, 1, new byte[] { 65 }); final ProcedureProtos.Procedure proto1 = ProcedureUtil.convertToProtoProcedure(proc1); - final TestProcedure proc2 = (TestProcedure)ProcedureUtil.convertToProcedure(proto1); + final TestProcedure proc2 = (TestProcedure) ProcedureUtil.convertToProcedure(proto1); final ProcedureProtos.Procedure proto2 = ProcedureUtil.convertToProtoProcedure(proc2); assertEquals(false, proto2.hasResult()); assertEquals("Procedure protobuf does not match", proto1, proto2); } public static class TestProcedureNoDefaultConstructor extends TestProcedure { - public TestProcedureNoDefaultConstructor(int x) {} + public TestProcedureNoDefaultConstructor(int x) { + } } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java index ec001d1e3373..ba072da68ea3 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ public class TestRemoteProcedureDispatcherUncaughtExceptionHandler { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemoteProcedureDispatcherUncaughtExceptionHandler.class); + HBaseClassTestRule.forClass(TestRemoteProcedureDispatcherUncaughtExceptionHandler.class); private static final class ExceptionHandler implements UncaughtExceptionHandler { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java index cc3c765885d2..9d1fe2bbd31c 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestStateMachineProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -62,7 +62,7 @@ public boolean equals(final Object other) { // we are going to serialize the exception in the test, // so the instance comparison will not match - return getMessage().equals(((Exception)other).getMessage()); + return getMessage().equals(((Exception) other).getMessage()); } @Override @@ -179,7 +179,9 @@ public void testChildOnLastStepWithRollbackDoubleExecution() throws Exception { assertEquals(TEST_FAILURE_EXCEPTION, cause); } - public enum TestSMProcedureState { STEP_1, STEP_2 } + public enum TestSMProcedureState { + STEP_1, STEP_2 + } public static class TestSMProcedure extends StateMachineProcedure { @@ -228,7 +230,7 @@ protected TestSMProcedureState getInitialState() { } public static class TestSMProcedureBadRollback - extends StateMachineProcedure { + extends StateMachineProcedure { @Override protected Flow executeFromState(TestProcEnv env, TestSMProcedureState state) { LOG.info("EXEC " + state + " " + this); @@ -245,6 +247,7 @@ protected Flow executeFromState(TestProcEnv env, TestSMProcedureState state) { } return Flow.HAS_MORE_STATE; } + @Override protected void rollbackState(TestProcEnv env, TestSMProcedureState state) { LOG.info("ROLLBACK " + state + " " + this); @@ -267,8 +270,7 @@ protected TestSMProcedureState getInitialState() { } @Override - protected void rollback(final TestProcEnv env) - throws IOException, InterruptedException { + protected void rollback(final TestProcEnv env) throws IOException, InterruptedException { if (isEofState()) { stateCount--; } @@ -276,8 +278,8 @@ protected void rollback(final TestProcEnv env) updateTimestamp(); rollbackState(env, getCurrentState()); throw new IOException(); - } catch(IOException e) { - //do nothing for now + } catch (IOException e) { + // do nothing for now } finally { stateCount--; updateTimestamp(); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java index 890bbd1871cf..37a7b717f154 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestYieldProcedures { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -70,8 +70,8 @@ public void setUp() throws IOException { logDir = new Path(testDir, "proc-logs"); procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), logDir); procRunnables = new TestScheduler(); - procExecutor = - new ProcedureExecutor<>(htu.getConfiguration(), new TestProcEnv(), procStore, procRunnables); + procExecutor = new ProcedureExecutor<>(htu.getConfiguration(), new TestProcEnv(), procStore, + procRunnables); procStore.start(PROCEDURE_EXECUTOR_SLOTS); ProcedureTestingUtility.initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true); } @@ -189,7 +189,9 @@ public long nextTimestamp() { public static class TestStateMachineProcedure extends StateMachineProcedure { - enum State { STATE_1, STATE_2, STATE_3 } + enum State { + STATE_1, STATE_2, STATE_3 + } public static class ExecutionInfo { private final boolean rollback; @@ -266,8 +268,7 @@ protected StateMachineProcedure.Flow executeFromState(TestProcEnv env, State sta } @Override - protected void rollbackState(TestProcEnv env, final State state) - throws InterruptedException { + protected void rollbackState(TestProcEnv env, final State state) throws InterruptedException { final long ts = env.nextTimestamp(); LOG.debug(getProcId() + " rollback state " + state + " ts=" + ts); executionInfo.add(new ExecutionInfo(ts, state, true)); @@ -347,13 +348,11 @@ protected boolean isYieldAfterExecutionStep(final TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } @@ -364,7 +363,8 @@ private static class TestScheduler extends SimpleProcedureScheduler { private int yieldCalls; private int pollCalls; - public TestScheduler() {} + public TestScheduler() { + } @Override public void addFront(final Procedure proc) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java index d88d93e571f2..9e1b4f3c722f 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -39,35 +40,35 @@ * Base class for testing procedure store performance. */ public abstract class ProcedureStorePerformanceEvaluation - extends AbstractHBaseTool { + extends AbstractHBaseTool { // Command line options and defaults. public static String DEFAULT_OUTPUT_PATH = "proc-store"; public static Option OUTPUT_PATH_OPTION = - new Option("output", true, "The output path. Default: " + DEFAULT_OUTPUT_PATH); + new Option("output", true, "The output path. Default: " + DEFAULT_OUTPUT_PATH); public static int DEFAULT_NUM_THREADS = 20; public static Option NUM_THREADS_OPTION = new Option("threads", true, - "Number of parallel threads which will write insert/updates/deletes to store. Default: " + - DEFAULT_NUM_THREADS); + "Number of parallel threads which will write insert/updates/deletes to store. Default: " + + DEFAULT_NUM_THREADS); public static int DEFAULT_NUM_PROCS = 1000000; // 1M public static Option NUM_PROCS_OPTION = new Option("procs", true, - "Total number of procedures. Each procedure writes one insert and one update. Default: " + - DEFAULT_NUM_PROCS); + "Total number of procedures. Each procedure writes one insert and one update. Default: " + + DEFAULT_NUM_PROCS); public static int DEFAULT_STATE_SIZE = 1024; // 1KB public static Option STATE_SIZE_OPTION = new Option("state_size", true, - "Size of serialized state in bytes to write on update. Default: " + DEFAULT_STATE_SIZE + - "bytes"); + "Size of serialized state in bytes to write on update. Default: " + DEFAULT_STATE_SIZE + + "bytes"); public static Option SYNC_OPTION = new Option("sync", true, - "Type of sync to use when writing WAL contents to file system. Accepted values: hflush, " + - "hsync, nosync. Default: hflush"); + "Type of sync to use when writing WAL contents to file system. Accepted values: hflush, " + + "hsync, nosync. Default: hflush"); public static String DEFAULT_SYNC_OPTION = "hflush"; @@ -102,8 +103,8 @@ protected void processOptions(CommandLine cmd) { numThreads = getOptionAsInt(cmd, NUM_THREADS_OPTION.getOpt(), DEFAULT_NUM_THREADS); numProcs = getOptionAsInt(cmd, NUM_PROCS_OPTION.getOpt(), DEFAULT_NUM_PROCS); syncType = cmd.getOptionValue(SYNC_OPTION.getOpt(), DEFAULT_SYNC_OPTION); - assert "hsync".equals(syncType) || "hflush".equals(syncType) || "nosync".equals( - syncType) : "sync argument can only accept one of these three values: hsync, hflush, nosync"; + assert "hsync".equals(syncType) || "hflush".equals(syncType) || "nosync".equals(syncType) + : "sync argument can only accept one of these three values: hsync, hflush, nosync"; stateSize = getOptionAsInt(cmd, STATE_SIZE_OPTION.getOpt(), DEFAULT_STATE_SIZE); SERIALIZED_STATE = new byte[stateSize]; Bytes.random(SERIALIZED_STATE); @@ -137,8 +138,8 @@ private void tearDownProcedureStore() { storeDir = fs.makeQualified(new Path(outputPath)); fs.delete(storeDir, true); } catch (IOException e) { - System.err.println("Error: Couldn't delete log dir. You can delete it manually to free up " + - "disk space. Location: " + storeDir); + System.err.println("Error: Couldn't delete log dir. You can delete it manually to free up " + + "disk space. Location: " + storeDir); e.printStackTrace(); } } @@ -159,8 +160,8 @@ protected int doWork() throws Exception { boolean failure = false; try { for (Future future : futures) { - long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 - - EnvironmentEdgeManager.currentTime(); + long timeout = + start + WORKER_THREADS_TIMEOUT_SEC * 1000 - EnvironmentEdgeManager.currentTime(); failure |= (future.get(timeout, TimeUnit.MILLISECONDS).equals(EXIT_FAILURE)); } } catch (Exception e) { @@ -219,8 +220,8 @@ public Integer call() throws IOException { } if (procId != 0 && procId % 10000 == 0) { long ns = System.nanoTime() - start; - System.out.println("Wrote " + procId + " procedures in " + - StringUtils.humanTimeDiff(TimeUnit.NANOSECONDS.toMillis(ns))); + System.out.println("Wrote " + procId + " procedures in " + + StringUtils.humanTimeDiff(TimeUnit.NANOSECONDS.toMillis(ns))); } try { preWrite(procId); @@ -232,7 +233,7 @@ public Integer call() throws IOException { return EXIT_FAILURE; } ProcedureTestingUtility.TestProcedure proc = - new ProcedureTestingUtility.TestProcedure(procId); + new ProcedureTestingUtility.TestProcedure(procId); proc.setData(SERIALIZED_STATE); store.insert(proc, null); store.update(proc); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java index 29d114af7212..278258fdfb12 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ public class TestProcedureTree { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureTree.class); + HBaseClassTestRule.forClass(TestProcedureTree.class); public static final class TestProcedure extends Procedure { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java index 2866b21518ec..cd4db73614b9 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.List; import java.util.Set; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -42,14 +42,14 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { protected static final HBaseCommonTestingUtil UTIL = new HBaseCommonTestingUtil(); // Command line options and defaults. - public static int DEFAULT_NUM_PROCS = 1000000; // 1M - public static Option NUM_PROCS_OPTION = new Option("procs", true, - "Total number of procedures. Default: " + DEFAULT_NUM_PROCS); + public static int DEFAULT_NUM_PROCS = 1000000; // 1M + public static Option NUM_PROCS_OPTION = + new Option("procs", true, "Total number of procedures. Default: " + DEFAULT_NUM_PROCS); public static int DEFAULT_NUM_WALS = 0; public static Option NUM_WALS_OPTION = new Option("wals", true, - "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + - " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); - public static int DEFAULT_STATE_SIZE = 1024; // 1KB + "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + + " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); + public static int DEFAULT_STATE_SIZE = 1024; // 1KB public static Option STATE_SIZE_OPTION = new Option("state_size", true, "Size of serialized state in bytes to write on update. Default: " + DEFAULT_STATE_SIZE + " bytes"); @@ -69,7 +69,8 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { static byte[] serializedState; private static class LoadCounter implements ProcedureStore.ProcedureLoader { - public LoadCounter() {} + public LoadCounter() { + } @Override public void setMaxProcId(long maxProcId) { @@ -105,10 +106,10 @@ protected void processOptions(CommandLine cmd) { numWals = getOptionAsInt(cmd, NUM_WALS_OPTION.getOpt(), DEFAULT_NUM_WALS); int stateSize = getOptionAsInt(cmd, STATE_SIZE_OPTION.getOpt(), DEFAULT_STATE_SIZE); serializedState = new byte[stateSize]; - updatesPerProc = getOptionAsInt(cmd, UPDATES_PER_PROC_OPTION.getOpt(), - DEFAULT_UPDATES_PER_PROC); + updatesPerProc = + getOptionAsInt(cmd, UPDATES_PER_PROC_OPTION.getOpt(), DEFAULT_UPDATES_PER_PROC); deleteProcsFraction = getOptionAsDouble(cmd, DELETE_PROCS_FRACTION_OPTION.getOpt(), - DEFAULT_DELETE_PROCS_FRACTION); + DEFAULT_DELETE_PROCS_FRACTION); setupConf(); } @@ -140,7 +141,7 @@ private List shuffleProcWriteSequence() { Set toBeDeletedProcs = new HashSet<>(); // Add n + 1 entries of the proc id for insert + updates. If proc is chosen for delete, add // extra entry which is marked -ve in the loop after shuffle. - for (int procId = 1; procId <= numProcs; ++procId) { + for (int procId = 1; procId <= numProcs; ++procId) { procStatesSequence.addAll(Collections.nCopies(updatesPerProc + 1, procId)); if (ThreadLocalRandom.current().nextFloat() < deleteProcsFraction) { procStatesSequence.add(procId); @@ -161,7 +162,7 @@ private List shuffleProcWriteSequence() { private void writeWals() throws IOException { List procStates = shuffleProcWriteSequence(); - TestProcedure[] procs = new TestProcedure[numProcs + 1]; // 0 is not used. + TestProcedure[] procs = new TestProcedure[numProcs + 1]; // 0 is not used. int numProcsPerWal = numWals > 0 ? procStates.size() / numWals : Integer.MAX_VALUE; long startTime = EnvironmentEdgeManager.currentTime(); long lastTime = startTime; @@ -179,8 +180,8 @@ private void writeWals() throws IOException { } if (i > 0 && i % numProcsPerWal == 0) { long currentTime = EnvironmentEdgeManager.currentTime(); - System.out.println("Forcing wall roll. Time taken on last WAL: " + - (currentTime - lastTime) / 1000.0f + " sec"); + System.out.println("Forcing wall roll. Time taken on last WAL: " + + (currentTime - lastTime) / 1000.0f + " sec"); store.rollWriterForTesting(); lastTime = currentTime; } @@ -203,8 +204,8 @@ private void storeRestart(ProcedureStore.ProcedureLoader loader) throws IOExcept System.out.println("Load time : " + (timeTaken / 1000.0f) + "sec"); System.out.println("******************************************"); System.out.println("Raw format for scripts"); - System.out.println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " - + "total_time_ms=%s]", + System.out + .println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " + "total_time_ms=%s]", NUM_PROCS_OPTION.getOpt(), numProcs, STATE_SIZE_OPTION.getOpt(), serializedState.length, UPDATES_PER_PROC_OPTION.getOpt(), updatesPerProc, DELETE_PROCS_FRACTION_OPTION.getOpt(), deleteProcsFraction, NUM_WALS_OPTION.getOpt(), numWals, timeTaken)); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java index cab44264f295..ba7a1577875d 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,13 +31,13 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; public class ProcedureWALPerformanceEvaluation - extends ProcedureStorePerformanceEvaluation { + extends ProcedureStorePerformanceEvaluation { // Command line options and defaults. public static int DEFAULT_NUM_WALS = 0; public static Option NUM_WALS_OPTION = new Option("wals", true, - "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + - " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); + "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + + " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); private long numProcsPerWal = Long.MAX_VALUE; // never roll wall based on this value. private int numWals; @@ -79,10 +79,10 @@ protected WALProcedureStore createProcedureStore(Path storeDir) throws IOExcepti @Override protected void printRawFormatResult(long timeTakenNs) { System.out - .println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " + "total_time_ms=%s]", - NUM_PROCS_OPTION.getOpt(), numProcs, STATE_SIZE_OPTION.getOpt(), stateSize, - SYNC_OPTION.getOpt(), syncType, NUM_THREADS_OPTION.getOpt(), numThreads, - NUM_WALS_OPTION.getOpt(), numWals, timeTakenNs)); + .println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " + "total_time_ms=%s]", + NUM_PROCS_OPTION.getOpt(), numProcs, STATE_SIZE_OPTION.getOpt(), stateSize, + SYNC_OPTION.getOpt(), syncType, NUM_THREADS_OPTION.getOpt(), numThreads, + NUM_WALS_OPTION.getOpt(), numWals, timeTakenNs)); } @Override diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java index 9d897cf878c5..16f8293ab8fc 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestBitSetNode { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBitSetNode.class); + HBaseClassTestRule.forClass(TestBitSetNode.class); @Test public void testGetActiveMaxMinProcId() { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java index e3064c9ab823..89697a7ffb68 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -33,7 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureStoreTracker { @ClassRule public static final HBaseClassTestRule CLASS_RULE = diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java index 12ea02adf9c6..97d1018ac1a5 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; @@ -43,7 +44,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestStressWALProcedureStore { @ClassRule @@ -115,7 +116,8 @@ public void run() { for (int i = 0, nupdates = rand.nextInt(10); i <= nupdates; ++i) { try { Thread.sleep(0, rand.nextInt(15)); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { + } procStore.update(proc); } // Delete @@ -136,7 +138,8 @@ public void run() { assertEquals(1, procStore.getActiveLogs().size()); } - @Ignore @Test // REENABLE after merge of + @Ignore + @Test // REENABLE after merge of // https://github.com/google/protobuf/issues/2228#issuecomment-252058282 public void testEntrySizeLimit() throws Exception { final int NITEMS = 20; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java index 915d6190b815..484f771c11a9 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Int64Value; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestWALProcedureStore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -161,7 +161,7 @@ public void testWalCleanerSequentialClean() throws Exception { procStore.insert(procs[i], null); procStore.rollWriterForTesting(); logs = procStore.getActiveLogs(); - assertEquals(logs.size(), i + 2); // Extra 1 for current ongoing wal. + assertEquals(logs.size(), i + 2); // Extra 1 for current ongoing wal. } // Delete procedures in sequential order make sure that only the corresponding wal is deleted @@ -176,7 +176,6 @@ public void testWalCleanerSequentialClean() throws Exception { } } - // Test that wal cleaner doesn't create holes in wal files list i.e. it only deletes files if // they are in the starting of the list. @Test @@ -189,7 +188,7 @@ public void testWalCleanerNoHoles() throws Exception { procStore.insert(procs[i], null); procStore.rollWriterForTesting(); logs = procStore.getActiveLogs(); - assertEquals(i + 2, logs.size()); // Extra 1 for current ongoing wal. + assertEquals(i + 2, logs.size()); // Extra 1 for current ongoing wal. } for (int i = 1; i < procs.length; i++) { @@ -222,18 +221,18 @@ public void testWalCleanerUpdatesDontLeaveHoles() throws Exception { TestSequentialProcedure p2 = new TestSequentialProcedure(); procStore.insert(p1, null); procStore.insert(p2, null); - procStore.rollWriterForTesting(); // generates first log with p1 + p2 + procStore.rollWriterForTesting(); // generates first log with p1 + p2 ProcedureWALFile log1 = procStore.getActiveLogs().get(0); procStore.update(p2); - procStore.rollWriterForTesting(); // generates second log with p2 + procStore.rollWriterForTesting(); // generates second log with p2 ProcedureWALFile log2 = procStore.getActiveLogs().get(1); procStore.update(p2); - procStore.rollWriterForTesting(); // generates third log with p2 - procStore.removeInactiveLogsForTesting(); // Shouldn't remove 2nd log. + procStore.rollWriterForTesting(); // generates third log with p2 + procStore.removeInactiveLogsForTesting(); // Shouldn't remove 2nd log. assertEquals(4, procStore.getActiveLogs().size()); procStore.update(p1); - procStore.rollWriterForTesting(); // generates fourth log with p1 - procStore.removeInactiveLogsForTesting(); // Should remove first two logs. + procStore.rollWriterForTesting(); // generates fourth log with p1 + procStore.removeInactiveLogsForTesting(); // Should remove first two logs. assertEquals(3, procStore.getActiveLogs().size()); assertFalse(procStore.getActiveLogs().contains(log1)); assertFalse(procStore.getActiveLogs().contains(log2)); @@ -418,8 +417,8 @@ public void testCorruptedTrailer() throws Exception { assertEquals(0, loader.getCorruptedCount()); } - private static void assertUpdated(final ProcedureStoreTracker tracker, - final Procedure[] procs, final int[] updatedProcs, final int[] nonUpdatedProcs) { + private static void assertUpdated(final ProcedureStoreTracker tracker, final Procedure[] procs, + final int[] updatedProcs, final int[] nonUpdatedProcs) { for (int index : updatedProcs) { long procId = procs[index].getProcId(); assertTrue("Procedure id : " + procId, tracker.isModified(procId)); @@ -430,17 +429,17 @@ private static void assertUpdated(final ProcedureStoreTracker tracker, } } - private static void assertDeleted(final ProcedureStoreTracker tracker, - final Procedure[] procs, final int[] deletedProcs, final int[] nonDeletedProcs) { + private static void assertDeleted(final ProcedureStoreTracker tracker, final Procedure[] procs, + final int[] deletedProcs, final int[] nonDeletedProcs) { for (int index : deletedProcs) { long procId = procs[index].getProcId(); - assertEquals("Procedure id : " + procId, - ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(procId)); + assertEquals("Procedure id : " + procId, ProcedureStoreTracker.DeleteState.YES, + tracker.isDeleted(procId)); } for (int index : nonDeletedProcs) { long procId = procs[index].getProcId(); - assertEquals("Procedure id : " + procId, - ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(procId)); + assertEquals("Procedure id : " + procId, ProcedureStoreTracker.DeleteState.NO, + tracker.isDeleted(procId)); } } @@ -451,13 +450,13 @@ public void testCorruptedTrailersRebuild() throws Exception { procs[i] = new TestSequentialProcedure(); } // Log State (I=insert, U=updated, D=delete) - // | log 1 | log 2 | log 3 | - // 0 | I, D | | | - // 1 | I | | | - // 2 | I | D | | - // 3 | I | U | | - // 4 | | I | D | - // 5 | | | I | + // | log 1 | log 2 | log 3 | + // 0 | I, D | | | + // 1 | I | | | + // 2 | I | D | | + // 3 | I | U | | + // 4 | | I | D | + // 5 | | | I | procStore.insert(procs[0], null); procStore.insert(procs[1], null); procStore.insert(procs[2], null); @@ -485,7 +484,7 @@ public void testCorruptedTrailersRebuild() throws Exception { htu.getConfiguration().setBoolean(WALProcedureStore.EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY, false); final LoadCounter loader = new LoadCounter(); storeRestart(loader); - assertEquals(3, loader.getLoadedCount()); // procs 1, 3 and 5 + assertEquals(3, loader.getLoadedCount()); // procs 1, 3 and 5 assertEquals(0, loader.getCorruptedCount()); // Check the Trackers @@ -493,13 +492,16 @@ public void testCorruptedTrailersRebuild() throws Exception { LOG.info("WALs " + walFiles); assertEquals(4, walFiles.size()); LOG.info("Checking wal " + walFiles.get(0)); - assertUpdated(walFiles.get(0).getTracker(), procs, new int[]{0, 1, 2, 3}, new int[] {4, 5}); + assertUpdated(walFiles.get(0).getTracker(), procs, new int[] { 0, 1, 2, 3 }, + new int[] { 4, 5 }); LOG.info("Checking wal " + walFiles.get(1)); - assertUpdated(walFiles.get(1).getTracker(), procs, new int[]{2, 3, 4}, new int[] {0, 1, 5}); + assertUpdated(walFiles.get(1).getTracker(), procs, new int[] { 2, 3, 4 }, + new int[] { 0, 1, 5 }); LOG.info("Checking wal " + walFiles.get(2)); - assertUpdated(walFiles.get(2).getTracker(), procs, new int[]{4, 5}, new int[] {0, 1, 2, 3}); + assertUpdated(walFiles.get(2).getTracker(), procs, new int[] { 4, 5 }, + new int[] { 0, 1, 2, 3 }); LOG.info("Checking global tracker "); - assertDeleted(procStore.getStoreTracker(), procs, new int[]{0, 2, 4}, new int[] {1, 3, 5}); + assertDeleted(procStore.getStoreTracker(), procs, new int[] { 0, 2, 4 }, new int[] { 1, 3, 5 }); } @Test @@ -531,17 +533,17 @@ public void testCorruptedProcedures() throws Exception { // Insert root-procedures TestProcedure[] rootProcs = new TestProcedure[10]; for (int i = 1; i <= rootProcs.length; i++) { - rootProcs[i-1] = new TestProcedure(i, 0); - procStore.insert(rootProcs[i-1], null); - rootProcs[i-1].addStackId(0); - procStore.update(rootProcs[i-1]); + rootProcs[i - 1] = new TestProcedure(i, 0); + procStore.insert(rootProcs[i - 1], null); + rootProcs[i - 1].addStackId(0); + procStore.update(rootProcs[i - 1]); } // insert root-child txn procStore.rollWriterForTesting(); for (int i = 1; i <= rootProcs.length; i++) { TestProcedure b = new TestProcedure(rootProcs.length + i, i); - rootProcs[i-1].addStackId(1); - procStore.insert(rootProcs[i-1], new Procedure[] { b }); + rootProcs[i - 1].addStackId(1); + procStore.insert(rootProcs[i - 1], new Procedure[] { b }); } // insert child updates procStore.rollWriterForTesting(); @@ -629,20 +631,19 @@ public void testFileNotFoundDuringLeaseRecovery() throws IOException { assertEquals(procs.length + 1, status.length); // simulate another active master removing the wals - procStore = new WALProcedureStore(htu.getConfiguration(), logDir, null, - new LeaseRecovery() { - private int count = 0; - - @Override - public void recoverFileLease(FileSystem fs, Path path) throws IOException { - if (++count <= 2) { - fs.delete(path, false); - LOG.debug("Simulate FileNotFound at count=" + count + " for " + path); - throw new FileNotFoundException("test file not found " + path); - } - LOG.debug("Simulate recoverFileLease() at count=" + count + " for " + path); + procStore = new WALProcedureStore(htu.getConfiguration(), logDir, null, new LeaseRecovery() { + private int count = 0; + + @Override + public void recoverFileLease(FileSystem fs, Path path) throws IOException { + if (++count <= 2) { + fs.delete(path, false); + LOG.debug("Simulate FileNotFound at count=" + count + " for " + path); + throw new FileNotFoundException("test file not found " + path); } - }); + LOG.debug("Simulate recoverFileLease() at count=" + count + " for " + path); + } + }); final LoadCounter loader = new LoadCounter(); procStore.start(PROCEDURE_STORE_SLOTS); @@ -656,7 +657,7 @@ public void recoverFileLease(FileSystem fs, Path path) throws IOException { @Test public void testLogFileAlreadyExists() throws IOException { - final boolean[] tested = {false}; + final boolean[] tested = { false }; WALProcedureStore mStore = Mockito.spy(procStore); Answer ans = new Answer() { @@ -806,20 +807,19 @@ public void recoverFileLease(FileSystem fs, Path path) throws IOException { }); } - private LoadCounter restartAndAssert(long maxProcId, long runnableCount, - int completedCount, int corruptedCount) throws Exception { - return ProcedureTestingUtility.storeRestartAndAssert(procStore, maxProcId, - runnableCount, completedCount, corruptedCount); + private LoadCounter restartAndAssert(long maxProcId, long runnableCount, int completedCount, + int corruptedCount) throws Exception { + return ProcedureTestingUtility.storeRestartAndAssert(procStore, maxProcId, runnableCount, + completedCount, corruptedCount); } - private void corruptLog(final FileStatus logFile, final long dropBytes) - throws IOException { + private void corruptLog(final FileStatus logFile, final long dropBytes) throws IOException { assertTrue(logFile.getLen() > dropBytes); - LOG.debug("corrupt log " + logFile.getPath() + - " size=" + logFile.getLen() + " drop=" + dropBytes); + LOG.debug( + "corrupt log " + logFile.getPath() + " size=" + logFile.getLen() + " drop=" + dropBytes); Path tmpPath = new Path(testDir, "corrupted.log"); InputStream in = fs.open(logFile.getPath()); - OutputStream out = fs.create(tmpPath); + OutputStream out = fs.create(tmpPath); IOUtils.copyBytes(in, out, logFile.getLen() - dropBytes, true); if (!fs.rename(tmpPath, logFile.getPath())) { throw new IOException("Unable to rename"); @@ -856,8 +856,7 @@ protected boolean abort(Void env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { long procId = getProcId(); if (procId % 2 == 0) { Int64Value.Builder builder = Int64Value.newBuilder().setValue(procId); @@ -866,8 +865,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { long procId = getProcId(); if (procId % 2 == 0) { Int64Value value = serializer.deserialize(Int64Value.class); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java index 0d494fcdd6b3..93b85c8c2c86 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestDelayedUtil { @ClassRule @@ -50,9 +50,8 @@ public void testDelayedContainerEquals() { ZeroDelayContainer o1cb = new ZeroDelayContainer<>(o1); ZeroDelayContainer o2c = new ZeroDelayContainer<>(o2); - ZeroDelayContainer[] items = new ZeroDelayContainer[] { - lnull, l10a, l10b, l15, onull, o1ca, o1cb, o2c, - }; + ZeroDelayContainer[] items = + new ZeroDelayContainer[] { lnull, l10a, l10b, l15, onull, o1ca, o1cb, o2c, }; assertContainersEquals(lnull, items, lnull, onull); assertContainersEquals(l10a, items, l10a, l10b); @@ -75,8 +74,8 @@ private void assertContainersEquals(final ZeroDelayContainer src, } } boolean isMatching = src.equals(items[i]); - assertEquals(src.getObject() + " unexpectedly match " + items[i].getObject(), - shouldMatch, isMatching); + assertEquals(src.getObject() + " unexpectedly match " + items[i].getObject(), shouldMatch, + isMatching); } } diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml index de40e56e20a5..82710d6bae59 100644 --- a/hbase-protocol-shaded/pom.xml +++ b/hbase-protocol-shaded/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -36,13 +36,28 @@ --> 3.17.3 + + + + + org.apache.hbase.thirdparty + hbase-shaded-protobuf + + + junit + junit + test + + org.apache.maven.plugins maven-source-plugin - + maven-assembly-plugin @@ -56,10 +71,10 @@ secondPartTestsExecution - test test + test true @@ -72,10 +87,10 @@ compile-protoc - generate-sources compile + generate-sources com.google.protobuf:protoc:${internal.protobuf.version}:exe:${os.detected.classifier} true @@ -95,48 +110,48 @@ com.google.code.maven-replacer-plugin replacer 1.5.3 + + ${basedir}/target/generated-sources/ + + **/*.java + + + true + + + ([^\.])com.google.protobuf + $1org.apache.hbase.thirdparty.com.google.protobuf + + + (public)(\W+static)?(\W+final)?(\W+class) + @javax.annotation.Generated("proto") $1$2$3$4 + + + + (@javax.annotation.Generated\("proto"\) ){2} + $1 + + + - process-sources replace + process-sources - - ${basedir}/target/generated-sources/ - - **/*.java - - - true - - - ([^\.])com.google.protobuf - $1org.apache.hbase.thirdparty.com.google.protobuf - - - (public)(\W+static)?(\W+final)?(\W+class) - @javax.annotation.Generated("proto") $1$2$3$4 - - - - (@javax.annotation.Generated\("proto"\) ){2} - $1 - - - org.apache.maven.plugins maven-shade-plugin - package shade + package true true @@ -187,21 +202,6 @@ - - - - - org.apache.hbase.thirdparty - hbase-shaded-protobuf - - - junit - junit - test - - @@ -260,9 +260,7 @@ - - com.google.code.maven-replacer-plugin - + com.google.code.maven-replacer-plugin replacer [1.5.3,) @@ -271,7 +269,7 @@ - false + false diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java index f8cef893d7d7..9f839d3f6ecf 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,19 +22,20 @@ import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage; /** - * Helper to convert Exceptions and StackTraces from/to protobuf. - * (see ErrorHandling.proto for the internal of the proto messages) + * Helper to convert Exceptions and StackTraces from/to protobuf. (see ErrorHandling.proto for the + * internal of the proto messages) */ @InterfaceAudience.Private public final class ForeignExceptionUtil { - private ForeignExceptionUtil() { } + private ForeignExceptionUtil() { + } public static Exception toException(final ForeignExceptionMessage eem) { Exception re; @@ -58,7 +59,7 @@ public static IOException toIOException(final ForeignExceptionMessage eem) { private static T createException(final Class clazz, final ForeignExceptionMessage eem) throws ClassNotFoundException, NoSuchMethodException, - InstantiationException, IllegalAccessException, InvocationTargetException { + InstantiationException, IllegalAccessException, InvocationTargetException { final GenericExceptionMessage gem = eem.getGenericException(); final Class realClass = Class.forName(gem.getClassName()); final Class cls = realClass.asSubclass(clazz); @@ -127,8 +128,7 @@ public static List toProtoStackTraceElement(StackTrace } /** - * Unwind a serialized array of {@link StackTraceElementMessage}s to a - * {@link StackTraceElement}s. + * Unwind a serialized array of {@link StackTraceElementMessage}s to a {@link StackTraceElement}s. * @param traceList list that was serialized * @return the deserialized list or null if it couldn't be unwound (e.g. wasn't set on * the sender). @@ -140,10 +140,8 @@ public static StackTraceElement[] toStackTrace(List tr StackTraceElement[] trace = new StackTraceElement[traceList.size()]; for (int i = 0; i < traceList.size(); i++) { StackTraceElementMessage elem = traceList.get(i); - trace[i] = new StackTraceElement( - elem.getDeclaringClass(), elem.getMethodName(), - elem.hasFileName() ? elem.getFileName() : null, - elem.getLineNumber()); + trace[i] = new StackTraceElement(elem.getDeclaringClass(), elem.getMethodName(), + elem.hasFileName() ? elem.getFileName() : null, elem.getLineNumber()); } return trace; } diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml index 9405d5367b04..d454075b80f3 100644 --- a/hbase-replication/pom.xml +++ b/hbase-replication/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -31,34 +30,6 @@ Apache HBase - Replication HBase Replication Support - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -159,12 +130,42 @@ + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + hadoop-3.0 - !hadoop.profile + + !hadoop.profile + ${hadoop-three.version} @@ -193,8 +194,7 @@ lifecycle-mapping - - + diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 83421600aa0d..6dba30a34c04 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java index 5c21e1e023ce..36b958d2fa2e 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index 85b6c7626614..b0c4842948a0 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; @@ -37,8 +36,7 @@ public interface ReplicationPeer { */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) enum PeerState { - ENABLED, - DISABLED + ENABLED, DISABLED } /** @@ -108,7 +106,7 @@ default boolean isPeerEnabled() { /** * @deprecated since 2.1.0 and will be removed in 4.0.0. Use - * {@link #registerPeerConfigListener(ReplicationPeerConfigListener)} instead. + * {@link #registerPeerConfigListener(ReplicationPeerConfigListener)} instead. * @see #registerPeerConfigListener(ReplicationPeerConfigListener) * @see HBASE-19573 */ diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java index d4d8023ead76..d0bacda6d496 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -24,8 +22,8 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public interface ReplicationPeerConfigListener { - /** Callback method for when users update the ReplicationPeerConfig for this peer - * + /** + * Callback method for when users update the ReplicationPeerConfig for this peer * @param rpc The updated ReplicationPeerConfig */ void peerConfigUpdated(ReplicationPeerConfig rpc); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java index 08799856b754..ee10c03c7016 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java @@ -63,7 +63,7 @@ public ReplicationPeerImpl(Configuration conf, String id, ReplicationPeerConfig setPeerState(peerState); this.peerConfig = peerConfig; this.syncReplicationStateBits = - syncReplicationState.value() | (newSyncReplicationState.value() << SHIFT); + syncReplicationState.value() | (newSyncReplicationState.value() << SHIFT); this.peerConfigListeners = new ArrayList<>(); } @@ -78,12 +78,12 @@ public void setPeerConfig(ReplicationPeerConfig peerConfig) { public void setNewSyncReplicationState(SyncReplicationState newState) { this.syncReplicationStateBits = - (this.syncReplicationStateBits & AND_BITS) | (newState.value() << SHIFT); + (this.syncReplicationStateBits & AND_BITS) | (newState.value() << SHIFT); } public void transitSyncReplicationState() { this.syncReplicationStateBits = - (this.syncReplicationStateBits >>> SHIFT) | (SyncReplicationState.NONE.value() << SHIFT); + (this.syncReplicationStateBits >>> SHIFT) | (SyncReplicationState.NONE.value() << SHIFT); } /** diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java index f74ac37187c4..6f2818f940cb 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -92,8 +92,8 @@ void setPeerNewSyncReplicationState(String peerId, SyncReplicationState state) SyncReplicationState getPeerSyncReplicationState(String peerId) throws ReplicationException; /** - * Get the new sync replication state. Will return {@link SyncReplicationState#NONE} if we are - * not in a transition. + * Get the new sync replication state. Will return {@link SyncReplicationState#NONE} if we are not + * in a transition. * @throws ReplicationException if there are errors accessing the storage service. */ SyncReplicationState getPeerNewSyncReplicationState(String peerId) throws ReplicationException; diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index ebe99da3541b..0a2ab4e8780e 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -140,8 +140,8 @@ private ReplicationPeerImpl createPeer(String peerId) throws ReplicationExceptio boolean enabled = peerStorage.isPeerEnabled(peerId); SyncReplicationState syncReplicationState = peerStorage.getPeerSyncReplicationState(peerId); SyncReplicationState newSyncReplicationState = - peerStorage.getPeerNewSyncReplicationState(peerId); + peerStorage.getPeerNewSyncReplicationState(peerId); return new ReplicationPeerImpl(ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf), - peerId, peerConfig, enabled, syncReplicationState, newSyncReplicationState); + peerId, peerConfig, enabled, syncReplicationState, newSyncReplicationState); } } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java index d39a37eca8f0..af56ff11f588 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,16 +20,15 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This class is responsible for the parsing logic for a queue id representing a queue. - * It will extract the peerId if it's recovered as well as the dead region servers - * that were part of the queue's history. + * This class is responsible for the parsing logic for a queue id representing a queue. It will + * extract the peerId if it's recovered as well as the dead region servers that were part of the + * queue's history. */ @InterfaceAudience.Private public class ReplicationQueueInfo { @@ -43,8 +41,8 @@ public class ReplicationQueueInfo { private List deadRegionServers = new ArrayList<>(); /** - * The passed queueId will be either the id of the peer or the handling story of that queue - * in the form of id-servername-* + * The passed queueId will be either the id of the peer or the handling story of that queue in the + * form of id-servername-* */ public ReplicationQueueInfo(String queueId) { this.queueId = queueId; @@ -62,9 +60,9 @@ public ReplicationQueueInfo(String queueId) { * "ip-10-46-221-101.ec2.internal", so we need skip some "-" during parsing for the following * cases: 2-ip-10-46-221-101.ec2.internal,52170,1364333181125-<server name>-... */ - private static void - extractDeadServersFromZNodeString(String deadServerListStr, List result) { - if(deadServerListStr == null || result == null || deadServerListStr.isEmpty()) { + private static void extractDeadServersFromZNodeString(String deadServerListStr, + List result) { + if (deadServerListStr == null || result == null || deadServerListStr.isEmpty()) { return; } @@ -79,10 +77,10 @@ public ReplicationQueueInfo(String queueId) { seenCommaCnt += 1; break; case '-': - if(seenCommaCnt>=2) { + if (seenCommaCnt >= 2) { if (i > startIndex) { String serverName = deadServerListStr.substring(startIndex, i); - if(ServerName.isFullServerName(serverName)){ + if (ServerName.isFullServerName(serverName)) { result.add(ServerName.valueOf(serverName)); } else { LOG.error("Found invalid server name:" + serverName); @@ -98,9 +96,9 @@ public ReplicationQueueInfo(String queueId) { } // add tail - if(startIndex < len - 1){ + if (startIndex < len - 1) { String serverName = deadServerListStr.substring(startIndex, len); - if(ServerName.isFullServerName(serverName)){ + if (ServerName.isFullServerName(serverName)) { result.add(ServerName.valueOf(serverName)); } else { LOG.error("Found invalid server name at the end:" + serverName); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java index 59278e9807d5..90f380e2367a 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Map; import java.util.Set; import java.util.SortedSet; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Pair; diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java index 462cfedd0a04..429b44bdb542 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java index e8ecec262bf6..daff451bbf4e 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,7 +57,7 @@ public final class ReplicationUtils { public static final String RENAME_WAL_SUFFIX = ".ren"; public static final String LEGACY_REGION_REPLICATION_ENDPOINT_NAME = - "org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint"; + "org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint"; private ReplicationUtils() { } @@ -140,11 +140,11 @@ public static boolean isNamespacesAndTableCFsEqual(ReplicationPeerConfig rpc1, return false; } if (rpc1.replicateAllUserTables()) { - return isNamespacesEqual(rpc1.getExcludeNamespaces(), rpc2.getExcludeNamespaces()) && - isTableCFsEqual(rpc1.getExcludeTableCFsMap(), rpc2.getExcludeTableCFsMap()); + return isNamespacesEqual(rpc1.getExcludeNamespaces(), rpc2.getExcludeNamespaces()) + && isTableCFsEqual(rpc1.getExcludeTableCFsMap(), rpc2.getExcludeTableCFsMap()); } else { - return isNamespacesEqual(rpc1.getNamespaces(), rpc2.getNamespaces()) && - isTableCFsEqual(rpc1.getTableCFsMap(), rpc2.getTableCFsMap()); + return isNamespacesEqual(rpc1.getNamespaces(), rpc2.getNamespaces()) + && isTableCFsEqual(rpc1.getTableCFsMap(), rpc2.getTableCFsMap()); } } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java index 09aeee55cca8..18ddfeb1adf4 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,16 +47,16 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase public static final String PEERS_STATE_ZNODE_DEFAULT = "peer-state"; public static final byte[] ENABLED_ZNODE_BYTES = - toByteArray(ReplicationProtos.ReplicationState.State.ENABLED); + toByteArray(ReplicationProtos.ReplicationState.State.ENABLED); public static final byte[] DISABLED_ZNODE_BYTES = - toByteArray(ReplicationProtos.ReplicationState.State.DISABLED); + toByteArray(ReplicationProtos.ReplicationState.State.DISABLED); public static final String SYNC_REPLICATION_STATE_ZNODE = "sync-rep-state"; public static final String NEW_SYNC_REPLICATION_STATE_ZNODE = "new-sync-rep-state"; public static final byte[] NONE_STATE_ZNODE_BYTES = - SyncReplicationState.toByteArray(SyncReplicationState.NONE); + SyncReplicationState.toByteArray(SyncReplicationState.NONE); /** * The name of the znode that contains the replication status of a remote slave (i.e. peer) @@ -107,10 +107,9 @@ public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean ena ZKUtil.createWithParents(zookeeper, peersZNode); ZKUtil.multiOrSequential(zookeeper, multiOps, false); } catch (KeeperException e) { - throw new ReplicationException( - "Could not add peer with id=" + peerId + ", peerConfig=>" + peerConfig + ", state=" + - (enabled ? "ENABLED" : "DISABLED") + ", syncReplicationState=" + syncReplicationState, - e); + throw new ReplicationException("Could not add peer with id=" + peerId + ", peerConfig=>" + + peerConfig + ", state=" + (enabled ? "ENABLED" : "DISABLED") + ", syncReplicationState=" + + syncReplicationState, e); } } @@ -141,7 +140,7 @@ public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig) ReplicationPeerConfigUtil.toByteArray(peerConfig)); } catch (KeeperException e) { throw new ReplicationException( - "There was a problem trying to save changes to the " + "replication peer " + peerId, e); + "There was a problem trying to save changes to the " + "replication peer " + peerId, e); } } @@ -175,13 +174,13 @@ public ReplicationPeerConfig getPeerConfig(String peerId) throws ReplicationExce } if (data == null || data.length == 0) { throw new ReplicationException( - "Replication peer config data shouldn't be empty, peerId=" + peerId); + "Replication peer config data shouldn't be empty, peerId=" + peerId); } try { return ReplicationPeerConfigUtil.parsePeerFrom(data); } catch (DeserializationException e) { throw new ReplicationException( - "Failed to parse replication peer config for peer with id=" + peerId, e); + "Failed to parse replication peer config for peer with id=" + peerId, e); } } @@ -193,7 +192,7 @@ public void setPeerNewSyncReplicationState(String peerId, SyncReplicationState s SyncReplicationState.toByteArray(state)); } catch (KeeperException e) { throw new ReplicationException( - "Unable to set the new sync replication state for peer with id=" + peerId, e); + "Unable to set the new sync replication state for peer with id=" + peerId, e); } } @@ -208,7 +207,7 @@ public void transitPeerSyncReplicationState(String peerId) throws ReplicationExc false); } catch (KeeperException | InterruptedException e) { throw new ReplicationException( - "Error transiting sync replication state for peer with id=" + peerId, e); + "Error transiting sync replication state for peer with id=" + peerId, e); } } @@ -223,13 +222,14 @@ private SyncReplicationState getSyncReplicationState(String peerId, String path) return SyncReplicationState.NONE; } else { throw new ReplicationException( - "Replication peer sync state shouldn't be empty, peerId=" + peerId); + "Replication peer sync state shouldn't be empty, peerId=" + peerId); } } return SyncReplicationState.parseFrom(data); } catch (KeeperException | InterruptedException | IOException e) { throw new ReplicationException( - "Error getting sync replication state of path " + path + " for peer with id=" + peerId, e); + "Error getting sync replication state of path " + path + " for peer with id=" + peerId, + e); } } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java index c51bdfcc283e..8c99855a2552 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -162,10 +162,10 @@ String getSerialReplicationRegionPeerNode(String encodedRegionName, String peerI "Invalid encoded region name: " + encodedRegionName + ", length should be 32."); } return new StringBuilder(regionsZNode).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 0, 2).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 2, 4).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 4, encodedRegionName.length()).append("-").append(peerId) - .toString(); + .append(encodedRegionName, 0, 2).append(ZNodePaths.ZNODE_PATH_SEPARATOR) + .append(encodedRegionName, 2, 4).append(ZNodePaths.ZNODE_PATH_SEPARATOR) + .append(encodedRegionName, 4, encodedRegionName.length()).append("-").append(peerId) + .toString(); } @Override @@ -198,8 +198,8 @@ public void removeWAL(ServerName serverName, String queueId, String fileName) } catch (NoNodeException e) { LOG.warn("{} already deleted when removing log", fileNode); } catch (KeeperException e) { - throw new ReplicationException("Failed to remove wal from queue (serverName=" + serverName + - ", queueId=" + queueId + ", fileName=" + fileName + ")", e); + throw new ReplicationException("Failed to remove wal from queue (serverName=" + serverName + + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); } } @@ -350,12 +350,12 @@ public void removeLastSequenceIds(String peerId, List encodedRegionNames throws ReplicationException { try { List listOfOps = - encodedRegionNames.stream().map(n -> getSerialReplicationRegionPeerNode(n, peerId)) - .map(ZKUtilOp::deleteNodeFailSilent).collect(Collectors.toList()); + encodedRegionNames.stream().map(n -> getSerialReplicationRegionPeerNode(n, peerId)) + .map(ZKUtilOp::deleteNodeFailSilent).collect(Collectors.toList()); ZKUtil.multiOrSequential(zookeeper, listOfOps, true); } catch (KeeperException e) { - throw new ReplicationException("Failed to remove last sequence ids, peerId=" + peerId + - ", encodedRegionNames.size=" + encodedRegionNames.size(), e); + throw new ReplicationException("Failed to remove last sequence ids, peerId=" + peerId + + ", encodedRegionNames.size=" + encodedRegionNames.size(), e); } } @@ -366,14 +366,14 @@ public long getWALPosition(ServerName serverName, String queueId, String fileNam try { bytes = ZKUtil.getData(zookeeper, getFileNode(serverName, queueId, fileName)); } catch (KeeperException | InterruptedException e) { - throw new ReplicationException("Failed to get log position (serverName=" + serverName + - ", queueId=" + queueId + ", fileName=" + fileName + ")", e); + throw new ReplicationException("Failed to get log position (serverName=" + serverName + + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); } try { return ZKUtil.parseWALPositionFrom(bytes); } catch (DeserializationException de) { - LOG.warn("Failed parse log position (serverName={}, queueId={}, fileName={})", - serverName, queueId, fileName); + LOG.warn("Failed parse log position (serverName={}, queueId={}, fileName={})", serverName, + queueId, fileName); } // if we can not parse the position, start at the beginning of the wal file again return 0; @@ -391,10 +391,8 @@ public Pair> claimQueue(ServerName sourceServerName, S try { ZKUtil.createWithParents(zookeeper, getRsNode(destServerName)); } catch (KeeperException e) { - throw new ReplicationException( - "Claim queue queueId=" + queueId + " from " + sourceServerName + " to " + destServerName + - " failed when creating the node for " + destServerName, - e); + throw new ReplicationException("Claim queue queueId=" + queueId + " from " + sourceServerName + + " to " + destServerName + " failed when creating the node for " + destServerName, e); } String newQueueId = queueId + "-" + sourceServerName; try { @@ -440,11 +438,11 @@ public Pair> claimQueue(ServerName sourceServerName, S // queue to tell the upper layer that claim nothing. For other types of exception should be // thrown out to notify the upper layer. LOG.info("Claim queue queueId={} from {} to {} failed with {}, someone else took the log?", - queueId,sourceServerName, destServerName, e.toString()); + queueId, sourceServerName, destServerName, e.toString()); return new Pair<>(newQueueId, Collections.emptySortedSet()); } catch (KeeperException | InterruptedException e) { - throw new ReplicationException("Claim queue queueId=" + queueId + " from " + - sourceServerName + " to " + destServerName + " failed", e); + throw new ReplicationException("Claim queue queueId=" + queueId + " from " + sourceServerName + + " to " + destServerName + " failed", e); } } @@ -478,8 +476,8 @@ public List getListOfReplicators() throws ReplicationException { private List getWALsInQueue0(ServerName serverName, String queueId) throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(zookeeper, getQueueNode(serverName, - queueId)); + List children = + ZKUtil.listChildrenNoWatch(zookeeper, getQueueNode(serverName, queueId)); return children != null ? children : Collections.emptyList(); } @@ -521,7 +519,7 @@ protected int getQueuesZNodeCversion() throws KeeperException { * Therefore, we must update the cversion of root {@link #queuesZNode} when migrate wal nodes to * other queues. * @see #claimQueue(ServerName, String, ServerName) as an example of updating root - * {@link #queuesZNode} cversion. + * {@link #queuesZNode} cversion. */ @Override public Set getAllWALs() throws ReplicationException { @@ -543,8 +541,8 @@ public Set getAllWALs() throws ReplicationException { if (v0 == v1) { return wals; } - LOG.info("Replication queue node cversion changed from %d to %d, retry = %d", - v0, v1, retry); + LOG.info("Replication queue node cversion changed from %d to %d, retry = %d", v0, v1, + retry); } } catch (KeeperException e) { throw new ReplicationException("Failed to get all wals", e); @@ -597,8 +595,8 @@ public void addHFileRefs(String peerId, List> pairs) List listOfOps = pairs.stream().map(p -> p.getSecond().getName()) .map(n -> getHFileNode(peerNode, n)) .map(f -> ZKUtilOp.createAndFailSilent(f, HConstants.EMPTY_BYTE_ARRAY)).collect(toList()); - LOG.debug("The multi list size for adding hfile references in zk for node {} is {}", - peerNode, listOfOps.size()); + LOG.debug("The multi list size for adding hfile references in zk for node {} is {}", peerNode, + listOfOps.size()); try { ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { @@ -613,8 +611,8 @@ public void removeHFileRefs(String peerId, List files) throws Replicatio List listOfOps = files.stream().map(n -> getHFileNode(peerNode, n)) .map(ZKUtilOp::deleteNodeFailSilent).collect(toList()); - LOG.debug("The multi list size for removing hfile references in zk for node {} is {}", - peerNode, listOfOps.size()); + LOG.debug("The multi list size for removing hfile references in zk for node {} is {}", peerNode, + listOfOps.size()); try { ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { @@ -638,8 +636,8 @@ public List getAllPeersFromHFileRefsQueue() throws ReplicationException } private List getReplicableHFiles0(String peerId) throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(this.zookeeper, - getHFileRefsPeerNode(peerId)); + List children = + ZKUtil.listChildrenNoWatch(this.zookeeper, getHFileRefsPeerNode(peerId)); return children != null ? children : Collections.emptyList(); } @@ -683,7 +681,7 @@ public Set getAllHFileRefs() throws ReplicationException { return hfileRefs; } LOG.debug("Replication hfile references node cversion changed from %d to %d, retry = %d", - v0, v1, retry); + v0, v1, retry); } } catch (KeeperException e) { throw new ReplicationException("Failed to get all hfile refs", e); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java index 596167f9abfc..002ce6481a85 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index 4bb1021b7a42..67e77c837596 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -79,11 +79,11 @@ public void testReplicationQueueStorage() throws ReplicationException { */ rqs.addWAL(server1, "qId1", "trash"); rqs.removeWAL(server1, "qId1", "trash"); - rqs.addWAL(server1,"qId2", "filename1"); - rqs.addWAL(server1,"qId3", "filename2"); - rqs.addWAL(server1,"qId3", "filename3"); - rqs.addWAL(server2,"trash", "trash"); - rqs.removeQueue(server2,"trash"); + rqs.addWAL(server1, "qId2", "filename1"); + rqs.addWAL(server1, "qId3", "filename2"); + rqs.addWAL(server1, "qId3", "filename3"); + rqs.addWAL(server2, "trash", "trash"); + rqs.removeQueue(server2, "trash"); List reps = rqs.getListOfReplicators(); assertEquals(2, reps.size()); @@ -105,10 +105,11 @@ public void testReplicationQueueStorage() throws ReplicationException { } private void removeAllQueues(ServerName serverName) throws ReplicationException { - for (String queue: rqs.getAllQueues(serverName)) { + for (String queue : rqs.getAllQueues(serverName)) { rqs.removeQueue(serverName, queue); } } + @Test public void testReplicationQueues() throws ReplicationException { // Initialize ReplicationPeer so we can add peers (we don't transfer lone queues) @@ -373,8 +374,8 @@ protected void populateQueues() throws ReplicationException { } // Add peers for the corresponding queues so they are not orphans rp.getPeerStorage().addPeer("qId" + i, - ReplicationPeerConfig.newBuilder(). - setClusterKey(MiniZooKeeperCluster.HOST + ":2818:/bogus" + i).build(), + ReplicationPeerConfig.newBuilder() + .setClusterKey(MiniZooKeeperCluster.HOST + ":2818:/bogus" + i).build(), true, SyncReplicationState.NONE); } } diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java index 3d67bd37a6a7..8d1b4fae0dfd 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,8 +69,8 @@ private static String initPeerClusterState(String baseZKNode) Configuration testConf = new Configuration(conf); testConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, baseZKNode); ZKWatcher zkw1 = new ZKWatcher(testConf, "test1", null); - String fakeRs = ZNodePaths.joinZNode(zkw1.getZNodePaths().rsZNode, - "hostname1.example.org:1234"); + String fakeRs = + ZNodePaths.joinZNode(zkw1.getZNodePaths().rsZNode, "hostname1.example.org:1234"); ZKUtil.createWithParents(zkw1, fakeRs); ZKClusterId.setClusterId(zkw1, new ClusterId()); return ZKConfig.getZooKeeperClusterKey(testConf); diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java index 211576173308..2e34f97d1853 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -131,8 +131,8 @@ private void assertMapEquals(Map> expected, } else { assertNotNull(actualCFs); assertEquals(expectedCFs.size(), actualCFs.size()); - for (Iterator expectedIt = expectedCFs.iterator(), actualIt = actualCFs.iterator(); - expectedIt.hasNext();) { + for (Iterator expectedIt = expectedCFs.iterator(), + actualIt = actualCFs.iterator(); expectedIt.hasNext();) { assertEquals(expectedIt.next(), actualIt.next()); } } @@ -244,31 +244,32 @@ public void testBaseReplicationPeerConfig() throws ReplicationException { Configuration conf = UTIL.getConfiguration(); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, - customPeerConfigKey.concat("=").concat(customPeerConfigValue).concat(";"). - concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); + customPeerConfigKey.concat("=").concat(customPeerConfigValue).concat(";") + .concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig - assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigSecondKey)); + assertEquals(customPeerConfigValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); + assertEquals(customPeerConfigSecondValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigSecondKey)); // validates base configs get updated values even if config already present conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, - customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";"). - concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondUpdatedValue)); + customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";") + .concat(customPeerConfigSecondKey).concat("=") + .concat(customPeerConfigSecondUpdatedValue)); - ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); - assertEquals(customPeerConfigUpdatedValue, replicationPeerConfigAfterValueUpdate. - getConfiguration().get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondUpdatedValue, replicationPeerConfigAfterValueUpdate. - getConfiguration().get(customPeerConfigSecondKey)); + assertEquals(customPeerConfigUpdatedValue, + replicationPeerConfigAfterValueUpdate.getConfiguration().get(customPeerConfigKey)); + assertEquals(customPeerConfigSecondUpdatedValue, + replicationPeerConfigAfterValueUpdate.getConfiguration().get(customPeerConfigSecondKey)); } @Test @@ -284,26 +285,26 @@ public void testBaseReplicationRemovePeerConfig() throws ReplicationException { conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat(customPeerConfigValue)); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig - assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigKey)); + assertEquals(customPeerConfigValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat("")); - ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); assertNull(replicationPeerConfigRemoved.getConfiguration().get(customPeerConfigKey)); } @Test public void testBaseReplicationRemovePeerConfigWithNoExistingConfig() - throws ReplicationException { + throws ReplicationException { String customPeerConfigKey = "hbase.xxx.custom_config"; ReplicationPeerConfig existingReplicationPeerConfig = getConfig(1); @@ -313,8 +314,8 @@ public void testBaseReplicationRemovePeerConfigWithNoExistingConfig() conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat("")); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); assertNull(updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); } diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java index a2ca0d96fa57..aea2f4d39ce9 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-resource-bundle/pom.xml b/hbase-resource-bundle/pom.xml index ac0c77e53197..59c2b3a0f055 100644 --- a/hbase-resource-bundle/pom.xml +++ b/hbase-resource-bundle/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -36,15 +36,15 @@ true - + - - maven-assembly-plugin - - true - + + maven-assembly-plugin + + true + org.apache.maven.plugins diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml index 169542891fd0..3787c7bcaf76 100644 --- a/hbase-rest/pom.xml +++ b/hbase-rest/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-rest Apache HBase - Rest HBase Rest Server - - - - - - ${project.build.directory} - - hbase-webapps/** - - - - - - src/test/resources - - **/** - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-antrun-plugin - - - - generate - generate-sources - - - - - - - - - - - - - - - - - - - - - run - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - jspcSource-packageInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-sources/java - - - - - - - - maven-surefire-plugin - - - target/test-classes/webapps - - - - - net.revelc.code - warbucks-maven-plugin - - - - com.sun.jersey - jersey-core + com.sun.jersey + jersey-core @@ -286,12 +186,12 @@ --> org.codehaus.jettison jettison - - - stax - stax-api - - + + + stax + stax-api + + @@ -378,6 +278,106 @@ test + + + + + + ${project.build.directory} + + hbase-webapps/** + + + + + + src/test/resources + + **/** + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-antrun-plugin + + + + generate + + run + + generate-sources + + + + + + + + + + + + + + + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + jspcSource-packageInfo-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-sources/java + + + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + net.revelc.code + warbucks-maven-plugin + + + @@ -396,7 +396,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java index 56bc9297f85f..af8b9e303bdf 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.yetus.audience.InterfaceAudience; @@ -29,7 +28,7 @@ public interface Constants { String VERSION_STRING = "0.0.3"; - int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours + int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours int DEFAULT_LISTEN_PORT = 8080; @@ -83,11 +82,13 @@ public interface Constants { String SCAN_FILTER = "filter"; String SCAN_REVERSED = "reversed"; String SCAN_CACHE_BLOCKS = "cacheblocks"; - String CUSTOM_FILTERS = "hbase.rest.custom.filters"; + String CUSTOM_FILTERS = "hbase.rest.custom.filters"; String ROW_KEYS_PARAM_NAME = "row"; - /** If this query parameter is present when processing row or scanner resources, - it disables server side block caching */ + /** + * If this query parameter is present when processing row or scanner resources, it disables server + * side block caching + */ String NOCACHE_PARAM_NAME = "nocache"; /** Configuration parameter to set rest client connection timeout */ diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java index 0a6fd0e1d5ac..d49e0769d843 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -53,19 +51,17 @@ public ExistsResource(TableResource tableResource) throws IOException { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF, MIMETYPE_BINARY}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF, + MIMETYPE_BINARY }) public Response get(final @Context UriInfo uriInfo) { try { if (!tableResource.exists()) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } ResponseBuilder response = Response.ok(); response.cacheControl(cacheControl); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java index f1b2cea6e952..ef42e9344fa6 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - import org.apache.hadoop.hbase.rest.MetricsRESTSource; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsREST { @@ -34,23 +31,23 @@ public MetricsRESTSource getSource() { private MetricsRESTSource source; public MetricsREST() { - source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); + source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); } - + /** * @param inc How much to add to requests. */ public void incrementRequests(final int inc) { source.incrementRequests(inc); } - + /** * @param inc How much to add to sucessfulGetCount. */ public void incrementSucessfulGetRequests(final int inc) { source.incrementSucessfulGetRequests(inc); } - + /** * @param inc How much to add to sucessfulPutCount. */ @@ -64,7 +61,7 @@ public void incrementSucessfulPutRequests(final int inc) { public void incrementFailedPutRequests(final int inc) { source.incrementFailedPutRequests(inc); } - + /** * @param inc How much to add to failedGetCount. */ diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java index 2d097752bd9b..ee221a907562 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +45,6 @@ public class MultiRowResource extends ResourceBase implements Constants { /** * Constructor - * * @param tableResource * @param versions * @throws java.io.IOException @@ -87,15 +85,14 @@ public Response get(final @Context UriInfo uriInfo) { } } - ResultGenerator generator = - ResultGenerator.fromRowSpec(this.tableResource.getName(), rowSpec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(this.tableResource.getName(), + rowSpec, null, !params.containsKey(NOCACHE_PARAM_NAME)); Cell value = null; RowModel rowModel = new RowModel(rowSpec.getRow()); if (generator.hasNext()) { while ((value = generator.next()) != null) { - rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil - .cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); + rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), + CellUtil.cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); } model.addRow(rowModel); } else { @@ -106,11 +103,10 @@ public Response get(final @Context UriInfo uriInfo) { } if (model.getRows().isEmpty()) { - //If no rows found. + // If no rows found. servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("No rows found." + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("No rows found." + CRLF).build(); } else { servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(model).build(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java index 6156b8aaf979..9617c936f7ed 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -85,31 +84,30 @@ public NamespacesInstanceResource(String namespace, boolean queryTables) throws * @param context servlet context * @param uriInfo (JAX-RS context variable) request URL * @return A response containing NamespacesInstanceModel for a namespace descriptions and - * TableListModel for a list of namespace tables. + * TableListModel for a list of namespace tables. */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context ServletContext context, - final @Context UriInfo uriInfo) { + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); // Respond to list of namespace tables requests. - if(queryTables){ + if (queryTables) { TableListModel tableModel = new TableListModel(); - try{ + try { List tables = - servlet.getAdmin().listTableDescriptorsByNamespace(Bytes.toBytes(namespace)); + servlet.getAdmin().listTableDescriptorsByNamespace(Bytes.toBytes(namespace)); for (TableDescriptor table : tables) { tableModel.add(new TableModel(table.getTableName().getQualifierAsString())); } servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(tableModel).build(); - }catch(IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); throw new RuntimeException("Cannot retrieve table list for '" + namespace + "'."); } @@ -117,8 +115,7 @@ public Response get(final @Context ServletContext context, // Respond to namespace description requests. try { - NamespacesInstanceModel rowModel = - new NamespacesInstanceModel(servlet.getAdmin(), namespace); + NamespacesInstanceModel rowModel = new NamespacesInstanceModel(servlet.getAdmin(), namespace); servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(rowModel).build(); } catch (IOException e) { @@ -134,8 +131,7 @@ public Response get(final @Context ServletContext context, * @return response code. */ @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) public Response put(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) { return processUpdate(model, true, uriInfo); } @@ -147,14 +143,11 @@ public Response put(final NamespacesInstanceModel model, final @Context UriInfo * @return response code. */ @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final NamespacesInstanceModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) { return processUpdate(model, false, uriInfo); } - // Check that POST or PUT is valid and then update namespace. private Response processUpdate(NamespacesInstanceModel model, final boolean updateExisting, final UriInfo uriInfo) { @@ -164,7 +157,7 @@ private Response processUpdate(NamespacesInstanceModel model, final boolean upda if (model == null) { try { model = new NamespacesInstanceModel(namespace); - } catch(IOException ioe) { + } catch (IOException ioe) { servlet.getMetrics().incrementFailedPutRequests(1); throw new RuntimeException("Cannot retrieve info for '" + namespace + "'."); } @@ -182,25 +175,25 @@ private Response processUpdate(NamespacesInstanceModel model, final boolean upda try { admin = servlet.getAdmin(); namespaceExists = doesNamespaceExist(admin, namespace); - }catch (IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } // Do not allow creation if namespace already exists. - if(!updateExisting && namespaceExists){ + if (!updateExisting && namespaceExists) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' already exists. Use REST PUT " + - "to alter the existing namespace.").build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Namespace '" + + namespace + "' already exists. Use REST PUT " + "to alter the existing namespace.") + .build(); } // Do not allow altering if namespace does not exist. - if (updateExisting && !namespaceExists){ + if (updateExisting && !namespaceExists) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' does not exist. Use " + - "REST POST to create the namespace.").build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity( + "Namespace '" + namespace + "' does not exist. Use " + "REST POST to create the namespace.") + .build(); } return createOrUpdate(model, uriInfo, admin, updateExisting); @@ -211,32 +204,32 @@ private Response createOrUpdate(final NamespacesInstanceModel model, final UriIn final Admin admin, final boolean updateExisting) { NamespaceDescriptor.Builder builder = NamespaceDescriptor.create(namespace); builder.addConfiguration(model.getProperties()); - if(model.getProperties().size() > 0){ + if (model.getProperties().size() > 0) { builder.addConfiguration(model.getProperties()); } NamespaceDescriptor nsd = builder.build(); - try{ - if(updateExisting){ + try { + if (updateExisting) { admin.modifyNamespace(nsd); - }else{ + } else { admin.createNamespace(nsd); } - }catch (IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } servlet.getMetrics().incrementSucessfulPutRequests(1); - return updateExisting ? Response.ok(uriInfo.getAbsolutePath()).build() : - Response.created(uriInfo.getAbsolutePath()).build(); + return updateExisting ? Response.ok(uriInfo.getAbsolutePath()).build() + : Response.created(uriInfo.getAbsolutePath()).build(); } - private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException{ + private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException { NamespaceDescriptor[] nd = admin.listNamespaceDescriptors(); - for(int i = 0; i < nd.length; i++){ - if(nd[i].getName().equals(namespaceName)){ + for (int i = 0; i < nd.length; i++) { + if (nd[i].getName().equals(namespaceName)) { return true; } } @@ -250,8 +243,8 @@ private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOE * @return response code. */ @DELETE - public Response deleteNoBody(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response deleteNoBody(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { LOG.trace("DELETE " + uriInfo.getAbsolutePath()); } @@ -261,12 +254,12 @@ public Response deleteNoBody(final byte[] message, .entity("Forbidden" + CRLF).build(); } - try{ + try { Admin admin = servlet.getAdmin(); - if (!doesNamespaceExist(admin, namespace)){ - return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' does not exists. Cannot " + - "drop namespace.").build(); + if (!doesNamespaceExist(admin, namespace)) { + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Namespace '" + namespace + "' does not exists. Cannot " + "drop namespace.") + .build(); } admin.deleteNamespace(namespace); @@ -283,8 +276,8 @@ public Response deleteNoBody(final byte[] message, * Dispatch to NamespaceInstanceResource for getting list of tables. */ @Path("tables") - public NamespacesInstanceResource getNamespaceInstanceResource( - final @PathParam("tables") String namespace) throws IOException { + public NamespacesInstanceResource + getNamespaceInstanceResource(final @PathParam("tables") String namespace) throws IOException { return new NamespacesInstanceResource(this.namespace, true); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java index e458d463f672..c83d41cec5a2 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -60,8 +58,8 @@ public NamespacesResource() throws IOException { * @return a response for a version request */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java index d5e4354e4391..57150d42b2ad 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Common interface for models capable of supporting protobuf marshalling - * and unmarshalling. Hooks up to the ProtobufMessageBodyConsumer and - * ProtobufMessageBodyProducer adapters. + * Common interface for models capable of supporting protobuf marshalling and unmarshalling. Hooks + * up to the ProtobufMessageBodyConsumer and ProtobufMessageBodyProducer adapters. */ @InterfaceAudience.Private public interface ProtobufMessageHandler { @@ -41,6 +37,5 @@ public interface ProtobufMessageHandler { * @return reference to self for convenience * @throws IOException */ - ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException; + ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java index d1ba5b7dd827..3ce1ff5c7b29 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,15 +50,15 @@ protected ProtobufStreamingOutput(ResultScanner scanner, String type, int limit, this.limit = limit; this.fetchSize = fetchSize; if (LOG.isTraceEnabled()) { - LOG.trace("Created StreamingOutput with content type = " + this.contentType - + " user limit : " + this.limit + " scan fetch size : " + this.fetchSize); + LOG.trace("Created StreamingOutput with content type = " + this.contentType + " user limit : " + + this.limit + " scan fetch size : " + this.fetchSize); } } @Override public void write(OutputStream outStream) throws IOException, WebApplicationException { Result[] rowsToSend; - if(limit < fetchSize){ + if (limit < fetchSize) { rowsToSend = this.resultScanner.next(limit); writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream); } else { @@ -69,7 +69,7 @@ public void write(OutputStream outStream) throws IOException, WebApplicationExce } else { rowsToSend = this.resultScanner.next(this.fetchSize); } - if(rowsToSend.length == 0){ + if (rowsToSend.length == 0) { break; } count = count - rowsToSend.length; @@ -81,7 +81,7 @@ public void write(OutputStream outStream) throws IOException, WebApplicationExce private void writeToStream(CellSetModel model, String contentType, OutputStream outStream) throws IOException { byte[] objectBytes = model.createProtobufOutput(); - outStream.write(Bytes.toBytes((short)objectBytes.length)); + outStream.write(Bytes.toBytes((short) objectBytes.length)); outStream.write(objectBytes); outStream.flush(); if (LOG.isTraceEnabled()) { @@ -96,8 +96,8 @@ private CellSetModel createModelFromResults(Result[] results) { RowModel rModel = new RowModel(rowKey); List kvs = rs.listCells(); for (Cell kv : kvs) { - rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv - .getTimestamp(), CellUtil.cloneValue(kv))); + rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), + kv.getTimestamp(), CellUtil.cloneValue(kv))); } cellSetModel.addRow(rModel); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 9c9275844275..abf5a056375a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.lang.management.ManagementFactory; @@ -88,14 +87,14 @@ public class RESTServer implements Constants { static final String REST_CSRF_ENABLED_KEY = "hbase.rest.csrf.enabled"; static final boolean REST_CSRF_ENABLED_DEFAULT = false; boolean restCSRFEnabled = false; - static final String REST_CSRF_CUSTOM_HEADER_KEY ="hbase.rest.csrf.custom.header"; + static final String REST_CSRF_CUSTOM_HEADER_KEY = "hbase.rest.csrf.custom.header"; static final String REST_CSRF_CUSTOM_HEADER_DEFAULT = "X-XSRF-HEADER"; static final String REST_CSRF_METHODS_TO_IGNORE_KEY = "hbase.rest.csrf.methods.to.ignore"; static final String REST_CSRF_METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; public static final String SKIP_LOGIN_KEY = "hbase.rest.skip.login"; static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k static final String HTTP_HEADER_CACHE_SIZE = "hbase.rest.http.header.cache.size"; - static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE -1; + static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE - 1; private static final String PATH_SPEC_ANY = "/*"; @@ -103,12 +102,12 @@ public class RESTServer implements Constants { // HTTP OPTIONS method is commonly used in REST APIs for negotiation. So it is enabled by default. private static boolean REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT = true; static final String REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY = - "hbase.rest-csrf.browser-useragents-regex"; + "hbase.rest-csrf.browser-useragents-regex"; // HACK, making this static for AuthFilter to get at our configuration. Necessary for unit tests. @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value={"ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", "MS_CANNOT_BE_FINAL"}, - justification="For testing") + value = { "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", "MS_CANNOT_BE_FINAL" }, + justification = "For testing") public static Configuration conf = null; private final UserProvider userProvider; private Server server; @@ -122,16 +121,17 @@ public RESTServer(Configuration conf) { private static void printUsageAndExit(Options options, int exitCode) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("hbase rest start", "", options, - "\nTo run the REST server as a daemon, execute " + - "hbase-daemon.sh start|stop rest [-i ] [-p ] [-ro]\n", true); + "\nTo run the REST server as a daemon, execute " + + "hbase-daemon.sh start|stop rest [-i ] [-p ] [-ro]\n", + true); System.exit(exitCode); } void addCSRFFilter(ServletContextHandler ctxHandler, Configuration conf) { restCSRFEnabled = conf.getBoolean(REST_CSRF_ENABLED_KEY, REST_CSRF_ENABLED_DEFAULT); if (restCSRFEnabled) { - Map restCsrfParams = RestCsrfPreventionFilter - .getFilterParams(conf, "hbase.rest-csrf."); + Map restCsrfParams = + RestCsrfPreventionFilter.getFilterParams(conf, "hbase.rest-csrf."); FilterHolder holder = new FilterHolder(); holder.setName("csrf"); holder.setClassName(RestCsrfPreventionFilter.class.getName()); @@ -149,8 +149,8 @@ private void addClickjackingPreventionFilter(ServletContextHandler ctxHandler, ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class)); } - private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, - Configuration conf, boolean isSecure) { + private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, Configuration conf, + boolean isSecure) { FilterHolder holder = new FilterHolder(); holder.setName("securityheaders"); holder.setClassName(SecurityHeadersFilter.class.getName()); @@ -159,13 +159,12 @@ private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, } // login the server principal (if using secure Hadoop) - private static Pair> loginServerPrincipal( - UserProvider userProvider, Configuration conf) throws Exception { + private static Pair> + loginServerPrincipal(UserProvider userProvider, Configuration conf) throws Exception { Class containerClass = ServletContainer.class; if (userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled()) { - String machineName = Strings.domainNamePointerToHostName( - DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), - conf.get(REST_DNS_NAMESERVER, "default"))); + String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost( + conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default"))); String keytabFilename = conf.get(REST_KEYTAB_FILE); Preconditions.checkArgument(keytabFilename != null && !keytabFilename.isEmpty(), REST_KEYTAB_FILE + " should be set if security is enabled"); @@ -181,7 +180,7 @@ private static Pair> loginServer FilterHolder authFilter = new FilterHolder(); authFilter.setClassName(AuthFilter.class.getName()); authFilter.setName("AuthenticationFilter"); - return new Pair<>(authFilter,containerClass); + return new Pair<>(authFilter, containerClass); } } return new Pair<>(null, containerClass); @@ -190,8 +189,8 @@ private static Pair> loginServer private static void parseCommandLine(String[] args, Configuration conf) { Options options = new Options(); options.addOption("p", "port", true, "Port to bind to [default: " + DEFAULT_LISTEN_PORT + "]"); - options.addOption("ro", "readonly", false, "Respond only to GET HTTP " + - "method requests [default: false]"); + options.addOption("ro", "readonly", false, + "Respond only to GET HTTP " + "method requests [default: false]"); options.addOption("i", "infoport", true, "Port for WEB UI"); CommandLine commandLine = null; @@ -250,20 +249,19 @@ private static void parseCommandLine(String[] args, Configuration conf) { } } - /** * Runs the REST server. */ public synchronized void run() throws Exception { - Pair> pair = loginServerPrincipal( - userProvider, conf); + Pair> pair = + loginServerPrincipal(userProvider, conf); FilterHolder authFilter = pair.getFirst(); Class containerClass = pair.getSecond(); RESTServlet servlet = RESTServlet.getInstance(conf, userProvider); // set up the Jersey servlet container for Jetty - ResourceConfig application = new ResourceConfig(). - packages("org.apache.hadoop.hbase.rest").register(JacksonJaxbJsonProvider.class); + ResourceConfig application = new ResourceConfig().packages("org.apache.hadoop.hbase.rest") + .register(JacksonJaxbJsonProvider.class); // Using our custom ServletContainer is tremendously important. This is what makes sure the // UGI.doAs() is done for the remoteUser, and calls are not made as the REST server itself. ServletContainer servletContainer = ReflectionUtils.newInstance(containerClass, application); @@ -279,23 +277,24 @@ public synchronized void run() throws Exception { // Use the default queue (unbounded with Jetty 9.3) if the queue size is negative, otherwise use // bounded {@link ArrayBlockingQueue} with the given size int queueSize = servlet.getConfiguration().getInt(REST_THREAD_POOL_TASK_QUEUE_SIZE, -1); - int idleTimeout = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000); - QueuedThreadPool threadPool = queueSize > 0 ? - new QueuedThreadPool(maxThreads, minThreads, idleTimeout, new ArrayBlockingQueue<>(queueSize)) : - new QueuedThreadPool(maxThreads, minThreads, idleTimeout); + int idleTimeout = + servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000); + QueuedThreadPool threadPool = queueSize > 0 + ? new QueuedThreadPool(maxThreads, minThreads, idleTimeout, + new ArrayBlockingQueue<>(queueSize)) + : new QueuedThreadPool(maxThreads, minThreads, idleTimeout); this.server = new Server(threadPool); // Setup JMX - MBeanContainer mbContainer=new MBeanContainer(ManagementFactory.getPlatformMBeanServer()); + MBeanContainer mbContainer = new MBeanContainer(ManagementFactory.getPlatformMBeanServer()); server.addEventListener(mbContainer); server.addBean(mbContainer); - String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0"); int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 8080); - int httpHeaderCacheSize = servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE, - DEFAULT_HTTP_HEADER_CACHE_SIZE); + int httpHeaderCacheSize = + servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE, DEFAULT_HTTP_HEADER_CACHE_SIZE); HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSecureScheme("https"); httpConfig.setSecurePort(servicePort); @@ -315,49 +314,48 @@ public synchronized void run() throws Exception { SslContextFactory sslCtxFactory = new SslContextFactory(); String keystore = conf.get(REST_SSL_KEYSTORE_STORE); String keystoreType = conf.get(REST_SSL_KEYSTORE_TYPE); - String password = HBaseConfiguration.getPassword(conf, - REST_SSL_KEYSTORE_PASSWORD, null); - String keyPassword = HBaseConfiguration.getPassword(conf, - REST_SSL_KEYSTORE_KEYPASSWORD, password); + String password = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_PASSWORD, null); + String keyPassword = + HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_KEYPASSWORD, password); sslCtxFactory.setKeyStorePath(keystore); - if(StringUtils.isNotBlank(keystoreType)) { + if (StringUtils.isNotBlank(keystoreType)) { sslCtxFactory.setKeyStoreType(keystoreType); } sslCtxFactory.setKeyStorePassword(password); sslCtxFactory.setKeyManagerPassword(keyPassword); String trustStore = conf.get(REST_SSL_TRUSTSTORE_STORE); - if(StringUtils.isNotBlank(trustStore)) { + if (StringUtils.isNotBlank(trustStore)) { sslCtxFactory.setTrustStorePath(trustStore); } String trustStorePassword = - HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null); - if(StringUtils.isNotBlank(trustStorePassword)) { + HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null); + if (StringUtils.isNotBlank(trustStorePassword)) { sslCtxFactory.setTrustStorePassword(trustStorePassword); } String trustStoreType = conf.get(REST_SSL_TRUSTSTORE_TYPE); - if(StringUtils.isNotBlank(trustStoreType)) { + if (StringUtils.isNotBlank(trustStoreType)) { sslCtxFactory.setTrustStoreType(trustStoreType); } - String[] excludeCiphers = servlet.getConfiguration().getStrings( - REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); + String[] excludeCiphers = servlet.getConfiguration() + .getStrings(REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); if (excludeCiphers.length != 0) { sslCtxFactory.setExcludeCipherSuites(excludeCiphers); } - String[] includeCiphers = servlet.getConfiguration().getStrings( - REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); + String[] includeCiphers = servlet.getConfiguration() + .getStrings(REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); if (includeCiphers.length != 0) { sslCtxFactory.setIncludeCipherSuites(includeCiphers); } - String[] excludeProtocols = servlet.getConfiguration().getStrings( - REST_SSL_EXCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY); + String[] excludeProtocols = servlet.getConfiguration().getStrings(REST_SSL_EXCLUDE_PROTOCOLS, + ArrayUtils.EMPTY_STRING_ARRAY); if (excludeProtocols.length != 0) { sslCtxFactory.setExcludeProtocols(excludeProtocols); } - String[] includeProtocols = servlet.getConfiguration().getStrings( - REST_SSL_INCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY); + String[] includeProtocols = servlet.getConfiguration().getStrings(REST_SSL_INCLUDE_PROTOCOLS, + ArrayUtils.EMPTY_STRING_ARRAY); if (includeProtocols.length != 0) { sslCtxFactory.setIncludeProtocols(includeProtocols); } @@ -381,15 +379,16 @@ public synchronized void run() throws Exception { server.setStopAtShutdown(true); // set up context - ServletContextHandler ctxHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS); + ServletContextHandler ctxHandler = + new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS); ctxHandler.addServlet(sh, PATH_SPEC_ANY); if (authFilter != null) { ctxHandler.addFilter(authFilter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST)); } // Load filters from configuration. - String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES, - GzipFilter.class.getName()); + String[] filterClasses = + servlet.getConfiguration().getStrings(FILTER_CLASSES, GzipFilter.class.getName()); for (String filter : filterClasses) { filter = filter.trim(); ctxHandler.addFilter(filter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST)); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java index 6c71bb6222e0..c58255c8bac6 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.ParseFilter; @@ -32,6 +27,9 @@ import org.apache.hadoop.hbase.util.JvmPauseMonitor; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Singleton class encapsulating global REST servlet state and functions. @@ -58,7 +56,7 @@ UserGroupInformation getRealUser() { * @return the RESTServlet singleton instance */ public synchronized static RESTServlet getInstance() { - assert(INSTANCE != null); + assert (INSTANCE != null); return INSTANCE; } @@ -75,8 +73,8 @@ public ConnectionCache getConnectionCache() { * @return the RESTServlet singleton instance * @throws IOException */ - public synchronized static RESTServlet getInstance(Configuration conf, - UserProvider userProvider) throws IOException { + public synchronized static RESTServlet getInstance(Configuration conf, UserProvider userProvider) + throws IOException { if (INSTANCE == null) { INSTANCE = new RESTServlet(conf, userProvider); } @@ -96,16 +94,14 @@ public synchronized static void stop() { * @param userProvider the login user provider * @throws IOException */ - RESTServlet(final Configuration conf, - final UserProvider userProvider) throws IOException { + RESTServlet(final Configuration conf, final UserProvider userProvider) throws IOException { this.realUser = userProvider.getCurrent().getUGI(); this.conf = conf; registerCustomFilter(conf); int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000); int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000); - connectionCache = new ConnectionCache( - conf, userProvider, cleanInterval, maxIdleTime); + connectionCache = new ConnectionCache(conf, userProvider, cleanInterval, maxIdleTime); if (supportsProxyuser()) { ProxyUsers.refreshSuperUserGroupsConfiguration(conf); } @@ -136,8 +132,7 @@ MetricsREST getMetrics() { } /** - * Helper method to determine if server should - * only respond to GET HTTP method requests. + * Helper method to determine if server should only respond to GET HTTP method requests. * @return boolean for server read-only state */ boolean isReadOnly() { @@ -166,8 +161,7 @@ private void registerCustomFilter(Configuration conf) { for (String filterClass : filterList) { String[] filterPart = filterClass.split(":"); if (filterPart.length != 2) { - LOG.warn( - "Invalid filter specification " + filterClass + " - skipping"); + LOG.warn("Invalid filter specification " + filterClass + " - skipping"); } else { ParseFilter.registerFilter(filterPart[0], filterPart[1]); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java index 28cf4cba9fa7..a8b7b78aff77 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; +import static org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.toLowerCase; + import java.io.IOException; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; @@ -31,11 +31,10 @@ import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; -import static org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.toLowerCase; /** - * REST servlet container. It is used to get the remote request user - * without going through @HttpContext, so that we can minimize code changes. + * REST servlet container. It is used to get the remote request user without going + * through @HttpContext, so that we can minimize code changes. */ @InterfaceAudience.Private public class RESTServletContainer extends ServletContainer { @@ -46,13 +45,12 @@ public RESTServletContainer(ResourceConfig config) { } /** - * This container is used only if authentication and - * impersonation is enabled. The remote request user is used - * as a proxy user for impersonation in invoking any REST service. + * This container is used only if authentication and impersonation is enabled. The remote request + * user is used as a proxy user for impersonation in invoking any REST service. */ @Override - public void service(final HttpServletRequest request, - final HttpServletResponse response) throws ServletException, IOException { + public void service(final HttpServletRequest request, final HttpServletResponse response) + throws ServletException, IOException { final HttpServletRequest lowerCaseRequest = toLowerCase(request); final String doAsUserFromQuery = lowerCaseRequest.getParameter("doas"); RESTServlet servlet = RESTServlet.getInstance(); @@ -69,7 +67,7 @@ public void service(final HttpServletRequest request, // validate the proxy user authorization try { ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf); - } catch(AuthorizationException e) { + } catch (AuthorizationException e) { throw new ServletException(e.getMessage()); } servlet.setEffectiveUser(doAsUserFromQuery); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index 784894e27571..02197a1515d4 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -67,8 +65,8 @@ public RegionsResource(TableResource tableResource) throws IOException { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -83,14 +81,14 @@ public Response get(final @Context UriInfo uriInfo) { List locs; try (Connection connection = ConnectionFactory.createConnection(servlet.getConfiguration()); - RegionLocator locator = connection.getRegionLocator(tableName)) { + RegionLocator locator = connection.getRegionLocator(tableName)) { locs = locator.getAllRegionLocations(); } for (HRegionLocation loc : locs) { RegionInfo hri = loc.getRegion(); ServerName addr = loc.getServerName(); model.add(new TableRegionModel(tableName.getNameAsString(), hri.getRegionId(), - hri.getStartKey(), hri.getEndKey(), addr.getAddress().toString())); + hri.getStartKey(), hri.getEndKey(), addr.getAddress().toString())); } ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); @@ -98,14 +96,12 @@ public Response get(final @Context UriInfo uriInfo) { return response.build(); } catch (TableNotFoundException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java index 9beb69df682b..a597b491af68 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -32,7 +31,7 @@ public class ResourceBase implements Constants { RESTServlet servlet; - Class accessDeniedClazz; + Class accessDeniedClazz; public ResourceBase() throws IOException { servlet = RESTServlet.getInstance(); @@ -41,53 +40,42 @@ public ResourceBase() throws IOException { } catch (ClassNotFoundException e) { } } - + protected Response processException(Throwable exp) { Throwable curr = exp; - if(accessDeniedClazz != null) { - //some access denied exceptions are buried + if (accessDeniedClazz != null) { + // some access denied exceptions are buried while (curr != null) { - if(accessDeniedClazz.isAssignableFrom(curr.getClass())) { + if (accessDeniedClazz.isAssignableFrom(curr.getClass())) { throw new WebApplicationException( - Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } curr = curr.getCause(); } } - //TableNotFound may also be buried one level deep - if (exp instanceof TableNotFoundException || - exp.getCause() instanceof TableNotFoundException) { + // TableNotFound may also be buried one level deep + if (exp instanceof TableNotFoundException || exp.getCause() instanceof TableNotFoundException) { throw new WebApplicationException( - Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } - if (exp instanceof NoSuchColumnFamilyException){ + if (exp instanceof NoSuchColumnFamilyException) { throw new WebApplicationException( - Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } if (exp instanceof RuntimeException) { throw new WebApplicationException( - Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } if (exp instanceof RetriesExhaustedException) { RetriesExhaustedException retryException = (RetriesExhaustedException) exp; processException(retryException.getCause()); } throw new WebApplicationException( - Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java index 41135a814f38..a2d7ab3944a3 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,19 +19,16 @@ import java.io.IOException; import java.util.Iterator; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.rest.model.ScannerModel; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public abstract class ResultGenerator implements Iterator { - public static ResultGenerator fromRowSpec(final String table, - final RowSpec rowspec, final Filter filter, final boolean cacheBlocks) - throws IOException { + public static ResultGenerator fromRowSpec(final String table, final RowSpec rowspec, + final Filter filter, final boolean cacheBlocks) throws IOException { if (rowspec.isSingleRow()) { return new RowResultGenerator(table, rowspec, filter, cacheBlocks); } else { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java index 3f5e1e1f6f82..814c72125438 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -60,15 +58,15 @@ public RootResource() throws IOException { private final TableListModel getTableList() throws IOException { TableListModel tableList = new TableListModel(); TableName[] tableNames = servlet.getAdmin().listTableNames(); - for (TableName name: tableNames) { + for (TableName name : tableNames) { tableList.add(new TableModel(name.getNameAsString())); } return tableList; } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -86,8 +84,7 @@ public Response get(final @Context UriInfo uriInfo) { } @Path("status/cluster") - public StorageClusterStatusResource getClusterStatusResource() - throws IOException { + public StorageClusterStatusResource getClusterStatusResource() throws IOException { return new StorageClusterStatusResource(); } @@ -97,8 +94,7 @@ public VersionResource getVersionResource() throws IOException { } @Path("{table}") - public TableResource getTableResource( - final @PathParam("table") String table) throws IOException { + public TableResource getTableResource(final @PathParam("table") String table) throws IOException { return new TableResource(table); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java index ae8cc90eaba0..eb75f76e8964 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -79,8 +77,8 @@ public class RowResource extends ResourceBase { * @param returnResult * @throws IOException */ - public RowResource(TableResource tableResource, String rowspec, - String versions, String check, String returnResult) throws IOException { + public RowResource(TableResource tableResource, String rowspec, String versions, String check, + String returnResult) throws IOException { super(); this.tableResource = tableResource; this.rowspec = new RowSpec(rowspec); @@ -94,8 +92,7 @@ public RowResource(TableResource tableResource, String rowspec, } @GET - @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -103,14 +100,12 @@ public Response get(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); MultivaluedMap params = uriInfo.getQueryParameters(); try { - ResultGenerator generator = - ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, + null, !params.containsKey(NOCACHE_PARAM_NAME)); if (!generator.hasNext()) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } int count = 0; CellSetModel model = new CellSetModel(); @@ -124,7 +119,7 @@ public Response get(final @Context UriInfo uriInfo) { rowModel = new RowModel(rowKey); } rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), - value.getTimestamp(), CellUtil.cloneValue(value))); + value.getTimestamp(), CellUtil.cloneValue(value))); if (++count > rowspec.getMaxValues()) { break; } @@ -143,7 +138,7 @@ public Response get(final @Context UriInfo uriInfo) { @Produces(MIMETYPE_BINARY) public Response getBinary(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); // doesn't make sense to use a non specific coordinate as this can only @@ -151,24 +146,22 @@ public Response getBinary(final @Context UriInfo uriInfo) { if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) { servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) - .entity("Bad request: Default 'GET' method only works if there is exactly 1 column " + - "in the row. Using the 'Accept' header with one of these formats lets you " + - "retrieve the entire row if it has multiple columns: " + - // Same as the @Produces list for the get method. - MIMETYPE_XML + ", " + MIMETYPE_JSON + ", " + - MIMETYPE_PROTOBUF + ", " + MIMETYPE_PROTOBUF_IETF + - CRLF).build(); + .entity("Bad request: Default 'GET' method only works if there is exactly 1 column " + + "in the row. Using the 'Accept' header with one of these formats lets you " + + "retrieve the entire row if it has multiple columns: " + + // Same as the @Produces list for the get method. + MIMETYPE_XML + ", " + MIMETYPE_JSON + ", " + MIMETYPE_PROTOBUF + ", " + + MIMETYPE_PROTOBUF_IETF + CRLF) + .build(); } MultivaluedMap params = uriInfo.getQueryParameters(); try { - ResultGenerator generator = - ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, + null, !params.containsKey(NOCACHE_PARAM_NAME)); if (!generator.hasNext()) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } Cell value = generator.next(); ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); @@ -185,9 +178,8 @@ Response update(final CellSetModel model, final boolean replace) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } if (CHECK_PUT.equalsIgnoreCase(check)) { @@ -199,29 +191,27 @@ Response update(final CellSetModel model, final boolean replace) { } else if (CHECK_INCREMENT.equalsIgnoreCase(check)) { return increment(model); } else if (check != null && check.length() > 0) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Invalid check value '" + check + "'" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Invalid check value '" + check + "'" + CRLF).build(); } Table table = null; try { List rows = model.getRows(); List puts = new ArrayList<>(); - for (RowModel row: rows) { + for (RowModel row : rows) { byte[] key = row.getKey(); if (key == null) { key = rowspec.getRow(); } if (key == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key not specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key not specified." + CRLF).build(); } Put put = new Put(key); int i = 0; - for (CellModel cell: row.getCells()) { + for (CellModel cell : row.getCells()) { byte[] col = cell.getColumn(); if (col == null) try { col = rowspec.getColumns()[i++]; @@ -230,24 +220,17 @@ Response update(final CellSetModel model, final boolean replace) { } if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(cell.getTimestamp()) - .setType(Type.Put) - .setValue(cell.getValue()) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(cell.getTimestamp()) + .setType(Type.Put).setValue(cell.getValue()).build()); } puts.add(put); if (LOG.isTraceEnabled()) { @@ -272,14 +255,12 @@ Response update(final CellSetModel model, final boolean replace) { } // This currently supports only update of one row at a time. - Response updateBinary(final byte[] message, final HttpHeaders headers, - final boolean replace) { + Response updateBinary(final byte[] message, final HttpHeaders headers, final boolean replace) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } Table table = null; try { @@ -304,25 +285,18 @@ Response updateBinary(final byte[] message, final HttpHeaders headers, } if (column == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } Put put = new Put(row); byte parts[][] = CellUtil.parseColumn(column); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(timestamp) - .setType(Type.Put) - .setValue(message) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(timestamp).setType(Type.Put) + .setValue(message).build()); table = servlet.getTable(tableResource.getName()); table.put(put); if (LOG.isTraceEnabled()) { @@ -343,45 +317,39 @@ Response updateBinary(final byte[] message, final HttpHeaders headers, } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final CellSetModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final CellSetModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("PUT " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); + LOG.trace("PUT " + uriInfo.getAbsolutePath() + " " + uriInfo.getQueryParameters()); } return update(model, true); } @PUT @Consumes(MIMETYPE_BINARY) - public Response putBinary(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response putBinary(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { - LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } return updateBinary(message, headers, true); } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final CellSetModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final CellSetModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("POST " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); + LOG.trace("POST " + uriInfo.getAbsolutePath() + " " + uriInfo.getQueryParameters()); } return update(model, false); } @POST @Consumes(MIMETYPE_BINARY) - public Response postBinary(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response postBinary(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { - LOG.trace("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY); + LOG.trace("POST " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } return updateBinary(message, headers, false); } @@ -394,17 +362,14 @@ public Response delete(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } Delete delete = null; - if (rowspec.hasTimestamp()) - delete = new Delete(rowspec.getRow(), rowspec.getTimestamp()); - else - delete = new Delete(rowspec.getRow()); + if (rowspec.hasTimestamp()) delete = new Delete(rowspec.getRow(), rowspec.getTimestamp()); + else delete = new Delete(rowspec.getRow()); - for (byte[] column: rowspec.getColumns()) { + for (byte[] column : rowspec.getColumns()) { byte[][] split = CellUtil.parseColumn(column); if (rowspec.hasTimestamp()) { if (split.length == 1) { @@ -412,9 +377,8 @@ public Response delete(final @Context UriInfo uriInfo) { } else if (split.length == 2) { delete.addColumns(split[0], split[1], rowspec.getTimestamp()); } else { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } } else { if (split.length == 1) { @@ -422,9 +386,8 @@ public Response delete(final @Context UriInfo uriInfo) { } else if (split.length == 2) { delete.addColumns(split[0], split[1]); } else { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } } } @@ -450,9 +413,8 @@ public Response delete(final @Context UriInfo uriInfo) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes checkAndPut on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes + * checkAndPut on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -476,9 +438,7 @@ Response checkAndPut(final CellSetModel model) { int cellModelCount = cellModels.size(); if (key == null || cellModelCount <= 1) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response - .status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT) + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) .entity( "Bad request: Either row key is null or no data found for columns specified." + CRLF) .build(); @@ -494,34 +454,26 @@ Response checkAndPut(final CellSetModel model) { // Copy all the cells to the Put request // and track if the check cell's latest value is also sent - for (int i = 0, n = cellModelCount - 1; i < n ; i++) { + for (int i = 0, n = cellModelCount - 1; i < n; i++) { CellModel cell = cellModels.get(i); byte[] col = cell.getColumn(); if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(cell.getTimestamp()) - .setType(Type.Put) - .setValue(cell.getValue()) - .build()); - if(Bytes.equals(col, - valueToCheckCell.getColumn())) { + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(cell.getTimestamp()) + .setType(Type.Put).setValue(cell.getValue()).build()); + if (Bytes.equals(col, valueToCheckCell.getColumn())) { valueToPutCell = cell; } } @@ -532,13 +484,12 @@ Response checkAndPut(final CellSetModel model) { .entity("Bad request: The column to put and check do not match." + CRLF).build(); } else { retValue = table.checkAndMutate(key, valueToPutParts[0]).qualifier(valueToPutParts[1]) - .ifEquals(valueToCheckCell.getValue()).thenPut(put); + .ifEquals(valueToCheckCell.getValue()).thenPut(put); } } else { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } if (LOG.isTraceEnabled()) { @@ -546,9 +497,8 @@ Response checkAndPut(final CellSetModel model) { } if (!retValue) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Value not Modified" + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Value not Modified" + CRLF).build(); } ResponseBuilder response = Response.ok(); servlet.getMetrics().incrementSucessfulPutRequests(1); @@ -566,9 +516,8 @@ Response checkAndPut(final CellSetModel model) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes checkAndDelete on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes + * checkAndDelete on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -579,9 +528,8 @@ Response checkAndDelete(final CellSetModel model) { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -590,9 +538,8 @@ Response checkAndDelete(final CellSetModel model) { } if (key == null) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } List cellModels = rowModel.getCells(); @@ -600,31 +547,29 @@ Response checkAndDelete(final CellSetModel model) { delete = new Delete(key); boolean retValue; - CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1); + CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount - 1); byte[] valueToDeleteColumn = valueToDeleteCell.getColumn(); if (valueToDeleteColumn == null) { try { valueToDeleteColumn = rowspec.getColumns()[0]; } catch (final ArrayIndexOutOfBoundsException e) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column not specified for check." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column not specified for check." + CRLF).build(); } } - byte[][] parts ; + byte[][] parts; // Copy all the cells to the Delete request if extra cells are sent - if(cellModelCount > 1) { + if (cellModelCount > 1) { for (int i = 0, n = cellModelCount - 1; i < n; i++) { CellModel cell = cellModels.get(i); byte[] col = cell.getColumn(); if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } parts = CellUtil.parseColumn(col); @@ -636,10 +581,8 @@ Response checkAndDelete(final CellSetModel model) { delete.addColumn(parts[0], parts[1], cell.getTimestamp()); } else { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT) - .entity("Bad request: Column to delete incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column to delete incorrectly specified." + CRLF).build(); } } } @@ -649,36 +592,33 @@ Response checkAndDelete(final CellSetModel model) { if (parts[1].length != 0) { // To support backcompat of deleting a cell // if that is the only cell passed to the rest api - if(cellModelCount == 1) { + if (cellModelCount == 1) { delete.addColumns(parts[0], parts[1]); } retValue = table.checkAndMutate(key, parts[0]).qualifier(parts[1]) .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); } else { // The case of empty qualifier. - if(cellModelCount == 1) { + if (cellModelCount == 1) { delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY)); } - retValue = table.checkAndMutate(key, parts[0]) - .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); + retValue = table.checkAndMutate(key, parts[0]).ifEquals(valueToDeleteCell.getValue()) + .thenDelete(delete); } } else { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column to check incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column to check incorrectly specified." + CRLF).build(); } if (LOG.isTraceEnabled()) { - LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns " - + retValue); + LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns " + retValue); } if (!retValue) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity(" Delete check failed." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity(" Delete check failed." + CRLF).build(); } ResponseBuilder response = Response.ok(); servlet.getMetrics().incrementSucessfulDeleteRequests(1); @@ -696,9 +636,8 @@ Response checkAndDelete(final CellSetModel model) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes Append on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes Append on + * HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -709,9 +648,8 @@ Response append(final CellSetModel model) { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -720,15 +658,14 @@ Response append(final CellSetModel model) { } if (key == null) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } append = new Append(key); append.setReturnResults(returnResult); int i = 0; - for (CellModel cell: rowModel.getCells()) { + for (CellModel cell : rowModel.getCells()) { byte[] col = cell.getColumn(); if (col == null) { try { @@ -739,16 +676,14 @@ Response append(final CellSetModel model) { } if (col == null) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } append.addColumn(parts[0], parts[1], cell.getValue()); } @@ -760,16 +695,15 @@ Response append(final CellSetModel model) { if (returnResult) { if (result.isEmpty()) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Append return empty." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Append return empty." + CRLF).build(); } CellSetModel rModel = new CellSetModel(); RowModel rRowModel = new RowModel(result.getRow()); for (Cell cell : result.listCells()) { rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - cell.getTimestamp(), CellUtil.cloneValue(cell))); + cell.getTimestamp(), CellUtil.cloneValue(cell))); } rModel.addRow(rRowModel); servlet.getMetrics().incrementSucessfulAppendRequests(1); @@ -790,9 +724,8 @@ Response append(final CellSetModel model) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes Increment on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes Increment + * on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -803,9 +736,8 @@ Response increment(final CellSetModel model) { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -814,15 +746,14 @@ Response increment(final CellSetModel model) { } if (key == null) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } increment = new Increment(key); increment.setReturnResults(returnResult); int i = 0; - for (CellModel cell: rowModel.getCells()) { + for (CellModel cell : rowModel.getCells()) { byte[] col = cell.getColumn(); if (col == null) { try { @@ -833,18 +764,17 @@ Response increment(final CellSetModel model) { } if (col == null) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } - increment.addColumn(parts[0], parts[1], Long.parseLong(Bytes.toStringBinary(cell.getValue()))); + increment.addColumn(parts[0], parts[1], + Long.parseLong(Bytes.toStringBinary(cell.getValue()))); } if (LOG.isDebugEnabled()) { @@ -855,16 +785,15 @@ Response increment(final CellSetModel model) { if (returnResult) { if (result.isEmpty()) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Increment return empty." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Increment return empty." + CRLF).build(); } CellSetModel rModel = new CellSetModel(); RowModel rRowModel = new RowModel(result.getRow()); for (Cell cell : result.listCells()) { rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - cell.getTimestamp(), CellUtil.cloneValue(cell))); + cell.getTimestamp(), CellUtil.cloneValue(cell))); } rModel.addRow(rowModel); servlet.getMetrics().incrementSucessfulIncrementRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java index 3d81c414867d..844b217b7618 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import java.io.IOException; import java.util.Iterator; import java.util.NoSuchElementException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -30,11 +28,8 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.security.AccessDeniedException; - import org.apache.hadoop.util.StringUtils; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,9 +40,8 @@ public class RowResultGenerator extends ResultGenerator { private Iterator valuesI; private Cell cache; - public RowResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public RowResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final boolean cacheBlocks) throws IllegalArgumentException, IOException { try (Table table = RESTServlet.getInstance().getTable(tableName)) { Get get = new Get(rowspec.getRow()); if (rowspec.hasColumns()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java index c510c9ed797d..2798544406d8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.UnsupportedEncodingException; @@ -26,22 +24,19 @@ import java.util.Collections; import java.util.List; import java.util.TreeSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * Parses a path based row/column/timestamp specification into its component - * elements. + * Parses a path based row/column/timestamp specification into its component elements. *

    - * */ @InterfaceAudience.Private public class RowSpec { public static final long DEFAULT_START_TIMESTAMP = 0; public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE; - + private byte[] row = HConstants.EMPTY_START_ROW; private byte[] endRow = null; private TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); @@ -62,8 +57,7 @@ public RowSpec(String path) throws IllegalArgumentException { i = parseQueryParams(path, i); } - private int parseRowKeys(final String path, int i) - throws IllegalArgumentException { + private int parseRowKeys(final String path, int i) throws IllegalArgumentException { String startRow = null, endRow = null; try { StringBuilder sb = new StringBuilder(); @@ -76,10 +70,8 @@ private int parseRowKeys(final String path, int i) String row = startRow = sb.toString(); int idx = startRow.indexOf(','); if (idx != -1) { - startRow = URLDecoder.decode(row.substring(0, idx), - HConstants.UTF8_ENCODING); - endRow = URLDecoder.decode(row.substring(idx + 1), - HConstants.UTF8_ENCODING); + startRow = URLDecoder.decode(row.substring(0, idx), HConstants.UTF8_ENCODING); + endRow = URLDecoder.decode(row.substring(idx + 1), HConstants.UTF8_ENCODING); } else { startRow = URLDecoder.decode(row, HConstants.UTF8_ENCODING); } @@ -93,13 +85,11 @@ private int parseRowKeys(final String path, int i) // table scanning if (startRow.charAt(startRow.length() - 1) == '*') { if (endRow != null) - throw new IllegalArgumentException("invalid path: start row "+ - "specified with wildcard"); - this.row = Bytes.toBytes(startRow.substring(0, - startRow.lastIndexOf("*"))); + throw new IllegalArgumentException("invalid path: start row " + "specified with wildcard"); + this.row = Bytes.toBytes(startRow.substring(0, startRow.lastIndexOf("*"))); this.endRow = new byte[this.row.length + 1]; System.arraycopy(this.row, 0, this.endRow, 0, this.row.length); - this.endRow[this.row.length] = (byte)255; + this.endRow[this.row.length] = (byte) 255; } else { this.row = Bytes.toBytes(startRow.toString()); if (endRow != null) { @@ -145,8 +135,7 @@ private int parseColumns(final String path, int i) throws IllegalArgumentExcepti return i; } - private int parseTimestamp(final String path, int i) - throws IllegalArgumentException { + private int parseTimestamp(final String path, int i) throws IllegalArgumentException { if (i >= path.length()) { return i; } @@ -163,8 +152,7 @@ private int parseTimestamp(final String path, int i) i++; } try { - time0 = Long.parseLong(URLDecoder.decode(stamp.toString(), - HConstants.UTF8_ENCODING)); + time0 = Long.parseLong(URLDecoder.decode(stamp.toString(), HConstants.UTF8_ENCODING)); } catch (NumberFormatException e) { throw new IllegalArgumentException(e); } @@ -176,8 +164,7 @@ private int parseTimestamp(final String path, int i) i++; } try { - time1 = Long.parseLong(URLDecoder.decode(stamp.toString(), - HConstants.UTF8_ENCODING)); + time1 = Long.parseLong(URLDecoder.decode(stamp.toString(), HConstants.UTF8_ENCODING)); } catch (NumberFormatException e) { throw new IllegalArgumentException(e); } @@ -206,8 +193,7 @@ private int parseQueryParams(final String path, int i) { } StringBuilder query = new StringBuilder(); try { - query.append(URLDecoder.decode(path.substring(i), - HConstants.UTF8_ENCODING)); + query.append(URLDecoder.decode(path.substring(i), HConstants.UTF8_ENCODING)); } catch (UnsupportedEncodingException e) { // should not happen throw new RuntimeException(e); @@ -234,39 +220,41 @@ private int parseQueryParams(final String path, int i) { break; } switch (what) { - case 'm': { - StringBuilder sb = new StringBuilder(); - while (j <= query.length()) { - c = query.charAt(j); - if (c < '0' || c > '9') { - j--; - break; + case 'm': { + StringBuilder sb = new StringBuilder(); + while (j <= query.length()) { + c = query.charAt(j); + if (c < '0' || c > '9') { + j--; + break; + } + sb.append(c); } - sb.append(c); + maxVersions = Integer.parseInt(sb.toString()); } - maxVersions = Integer.parseInt(sb.toString()); - } break; - case 'n': { - StringBuilder sb = new StringBuilder(); - while (j <= query.length()) { - c = query.charAt(j); - if (c < '0' || c > '9') { - j--; - break; + break; + case 'n': { + StringBuilder sb = new StringBuilder(); + while (j <= query.length()) { + c = query.charAt(j); + if (c < '0' || c > '9') { + j--; + break; + } + sb.append(c); } - sb.append(c); + maxValues = Integer.parseInt(sb.toString()); } - maxValues = Integer.parseInt(sb.toString()); - } break; - default: - throw new IllegalArgumentException("unknown parameter '" + c + "'"); + break; + default: + throw new IllegalArgumentException("unknown parameter '" + c + "'"); } } return i; } - public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, - long startTime, long endTime, int maxVersions) { + public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, long startTime, long endTime, + int maxVersions) { this.row = startRow; this.endRow = endRow; if (columns != null) { @@ -277,15 +265,16 @@ public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, this.maxVersions = maxVersions; } - public RowSpec(byte[] startRow, byte[] endRow, Collection columns, - long startTime, long endTime, int maxVersions, Collection labels) { + public RowSpec(byte[] startRow, byte[] endRow, Collection columns, long startTime, + long endTime, int maxVersions, Collection labels) { this(startRow, endRow, columns, startTime, endTime, maxVersions); - if(labels != null) { + if (labels != null) { this.labels.addAll(labels); } } - public RowSpec(byte[] startRow, byte[] endRow, Collection columns, - long startTime, long endTime, int maxVersions) { + + public RowSpec(byte[] startRow, byte[] endRow, Collection columns, long startTime, + long endTime, int maxVersions) { this.row = startRow; this.endRow = endRow; if (columns != null) { @@ -319,7 +308,7 @@ public void setMaxValues(final int maxValues) { public boolean hasColumns() { return !columns.isEmpty(); } - + public boolean hasLabels() { return !labels.isEmpty(); } @@ -347,7 +336,7 @@ public void addColumn(final byte[] column) { public byte[][] getColumns() { return columns.toArray(new byte[columns.size()][]); } - + public List getLabels() { return labels; } @@ -384,11 +373,11 @@ public String toString() { result.append(Bytes.toString(row)); } result.append("', endRow => '"); - if (endRow != null) { + if (endRow != null) { result.append(Bytes.toString(endRow)); } result.append("', columns => ["); - for (byte[] col: columns) { + for (byte[] col : columns) { result.append(" '"); result.append(Bytes.toString(col)); result.append("'"); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java index 4bbc2cf11261..bcc2ac49ddc3 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -44,8 +42,7 @@ @InterfaceAudience.Private public class ScannerInstanceResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(ScannerInstanceResource.class); + private static final Logger LOG = LoggerFactory.getLogger(ScannerInstanceResource.class); static CacheControl cacheControl; static { @@ -58,29 +55,28 @@ public class ScannerInstanceResource extends ResourceBase { String id = null; int batch = 1; - public ScannerInstanceResource() throws IOException { } + public ScannerInstanceResource() throws IOException { + } - public ScannerInstanceResource(String table, String id, - ResultGenerator generator, int batch) throws IOException { + public ScannerInstanceResource(String table, String id, ResultGenerator generator, int batch) + throws IOException { this.id = id; this.generator = generator; this.batch = batch; } @GET - @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context UriInfo uriInfo, - @QueryParam("n") int maxRows, final @QueryParam("c") int maxValues) { + @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context UriInfo uriInfo, @QueryParam("n") int maxRows, + final @QueryParam("c") int maxValues) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); if (generator == null) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } else { // Updated the connection access time for each client next() call RESTServlet.getInstance().getConnectionCache().updateConnectionAccessTime(); @@ -104,15 +100,13 @@ public Response get(final @Context UriInfo uriInfo, servlet.getMetrics().incrementFailedDeleteRequests(1); } servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.GONE) - .type(MIMETYPE_TEXT).entity("Gone" + CRLF) - .build(); + return Response.status(Response.Status.GONE).type(MIMETYPE_TEXT).entity("Gone" + CRLF) + .build(); } catch (IllegalArgumentException e) { Throwable t = e.getCause(); if (t instanceof TableNotFoundException) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } throw e; } @@ -144,8 +138,7 @@ public Response get(final @Context UriInfo uriInfo, rowKey = CellUtil.cloneRow(value); rowModel = new RowModel(rowKey); } - rowModel.addCell( - new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), + rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); } while (--count > 0); model.addRow(rowModel); @@ -159,8 +152,7 @@ public Response get(final @Context UriInfo uriInfo, @Produces(MIMETYPE_BINARY) public Response getBinary(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + - MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); try { @@ -173,10 +165,10 @@ public Response getBinary(final @Context UriInfo uriInfo) { } ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); response.cacheControl(cacheControl); - response.header("X-Row", Bytes.toString(Base64.getEncoder().encode( - CellUtil.cloneRow(value)))); + response.header("X-Row", + Bytes.toString(Base64.getEncoder().encode(CellUtil.cloneRow(value)))); response.header("X-Column", Bytes.toString(Base64.getEncoder().encode( - CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))))); + CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))))); response.header("X-Timestamp", value.getTimestamp()); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); @@ -187,9 +179,8 @@ public Response getBinary(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementFailedDeleteRequests(1); } servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.GONE) - .type(MIMETYPE_TEXT).entity("Gone" + CRLF) - .build(); + return Response.status(Response.Status.GONE).type(MIMETYPE_TEXT).entity("Gone" + CRLF) + .build(); } } @@ -200,9 +191,8 @@ public Response delete(final @Context UriInfo uriInfo) { } servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } if (ScannerResource.delete(id)) { servlet.getMetrics().incrementSucessfulDeleteRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java index cd3af0bf9f5c..119d11c75a85 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import com.fasterxml.jackson.core.JsonParseException; @@ -48,8 +46,8 @@ public class ScannerResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(ScannerResource.class); - static final Map scanners = - Collections.synchronizedMap(new HashMap()); + static final Map scanners = + Collections.synchronizedMap(new HashMap()); TableResource tableResource; @@ -58,7 +56,7 @@ public class ScannerResource extends ResourceBase { * @param tableResource * @throws IOException */ - public ScannerResource(TableResource tableResource)throws IOException { + public ScannerResource(TableResource tableResource) throws IOException { super(); this.tableResource = tableResource; } @@ -73,13 +71,11 @@ static boolean delete(final String id) { } } - Response update(final ScannerModel model, final boolean replace, - final UriInfo uriInfo) { + Response update(final ScannerModel model, final boolean replace, final UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } byte[] endRow = model.hasEndRow() ? model.getEndRow() : null; RowSpec spec = null; @@ -94,12 +90,11 @@ Response update(final ScannerModel model, final boolean replace, try { Filter filter = ScannerResultGenerator.buildFilterFromModel(model); String tableName = tableResource.getName(); - ScannerResultGenerator gen = - new ScannerResultGenerator(tableName, spec, filter, model.getCaching(), - model.getCacheBlocks(), model.getLimit()); + ScannerResultGenerator gen = new ScannerResultGenerator(tableName, spec, filter, + model.getCaching(), model.getCacheBlocks(), model.getLimit()); String id = gen.getID(); ScannerInstanceResource instance = - new ScannerInstanceResource(tableName, id, gen, model.getBatch()); + new ScannerInstanceResource(tableName, id, gen, model.getBatch()); scanners.put(id, instance); if (LOG.isTraceEnabled()) { LOG.trace("new scanner: " + id); @@ -112,26 +107,21 @@ Response update(final ScannerModel model, final boolean replace, LOG.error("Exception occurred while processing " + uriInfo.getAbsolutePath() + " : ", e); servlet.getMetrics().incrementFailedPutRequests(1); if (e instanceof TableNotFoundException) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } else if (e instanceof RuntimeException || e instanceof JsonMappingException | e instanceof JsonParseException) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); - } - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); + } + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final ScannerModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final ScannerModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -139,10 +129,8 @@ public Response put(final ScannerModel model, } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final ScannerModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final ScannerModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("POST " + uriInfo.getAbsolutePath()); } @@ -150,8 +138,8 @@ public Response post(final ScannerModel model, } @Path("{scanner: .+}") - public ScannerInstanceResource getScannerInstanceResource( - final @PathParam("scanner") String id) throws IOException { + public ScannerInstanceResource getScannerInstanceResource(final @PathParam("scanner") String id) + throws IOException { ScannerInstanceResource instance = scanners.get(id); if (instance == null) { servlet.getMetrics().incrementFailedGetRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java index 4a4e10efb029..70a5a0be2a5d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; import java.util.Iterator; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableNotEnabledException; @@ -43,11 +40,9 @@ @InterfaceAudience.Private public class ScannerResultGenerator extends ResultGenerator { - private static final Logger LOG = - LoggerFactory.getLogger(ScannerResultGenerator.class); + private static final Logger LOG = LoggerFactory.getLogger(ScannerResultGenerator.class); - public static Filter buildFilterFromModel(final ScannerModel model) - throws Exception { + public static Filter buildFilterFromModel(final ScannerModel model) throws Exception { String filter = model.getFilter(); if (filter == null || filter.length() == 0) { return null; @@ -61,20 +56,18 @@ public static Filter buildFilterFromModel(final ScannerModel model) private ResultScanner scanner; private Result cached; - public ScannerResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final boolean cacheBlocks) throws IllegalArgumentException, IOException { this(tableName, rowspec, filter, -1, cacheBlocks); } - public ScannerResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final int caching, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final int caching, final boolean cacheBlocks) throws IllegalArgumentException, IOException { this(tableName, rowspec, filter, caching, cacheBlocks, -1); } - public ScannerResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final int caching ,final boolean cacheBlocks, int limit) throws IOException { + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final int caching, final boolean cacheBlocks, int limit) throws IOException { Table table = RESTServlet.getInstance().getTable(tableName); try { Scan scan; @@ -85,7 +78,7 @@ public ScannerResultGenerator(final String tableName, final RowSpec rowspec, } if (rowspec.hasColumns()) { byte[][] columns = rowspec.getColumns(); - for (byte[] column: columns) { + for (byte[] column : columns) { byte[][] split = CellUtil.parseColumn(column); if (split.length == 1) { scan.addFamily(split[0]); @@ -101,7 +94,7 @@ public ScannerResultGenerator(final String tableName, final RowSpec rowspec, if (filter != null) { scan.setFilter(filter); } - if (caching > 0 ) { + if (caching > 0) { scan.setCaching(caching); } if (limit > 0) { @@ -113,8 +106,8 @@ public ScannerResultGenerator(final String tableName, final RowSpec rowspec, } scanner = table.getScanner(scan); cached = null; - id = Long.toString(EnvironmentEdgeManager.currentTime()) + - Integer.toHexString(scanner.hashCode()); + id = Long.toString(EnvironmentEdgeManager.currentTime()) + + Integer.toHexString(scanner.hashCode()); } finally { table.close(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java index 0cdc2867f351..39c9756d2be0 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,16 +78,15 @@ private TableDescriptor getTableSchema() throws IOException, TableNotFoundExcept } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { - ResponseBuilder response = - Response.ok(new TableSchemaModel(getTableSchema())); + ResponseBuilder response = Response.ok(new TableSchemaModel(getTableSchema())); response.cacheControl(cacheControl); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); @@ -100,19 +99,17 @@ public Response get(final @Context UriInfo uriInfo) { private Response replace(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, final Admin admin) { if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } try { - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(name); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(name); for (Map.Entry e : model.getAny().entrySet()) { tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } for (ColumnSchemaModel family : model.getColumns()) { ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName())); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName())); for (Map.Entry e : family.getAny().entrySet()) { columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString()); @@ -131,9 +128,8 @@ private Response replace(final TableName name, final TableSchemaModel model, servlet.getMetrics().incrementSucessfulPutRequests(1); } catch (TableExistsException e) { // race, someone else created a table with the same name - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Not modified" + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Not modified" + CRLF).build(); } } return Response.created(uriInfo.getAbsolutePath()).build(); @@ -144,21 +140,20 @@ private Response replace(final TableName name, final TableSchemaModel model, } } - private Response update(final TableName name, final TableSchemaModel model, - final UriInfo uriInfo, final Admin admin) { + private Response update(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, + final Admin admin) { if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } try { TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(admin.getDescriptor(name)); + TableDescriptorBuilder.newBuilder(admin.getDescriptor(name)); admin.disableTable(name); try { for (ColumnSchemaModel family : model.getColumns()) { ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName())); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName())); for (Map.Entry e : family.getAny().entrySet()) { columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString()); @@ -172,9 +167,8 @@ private Response update(final TableName name, final TableSchemaModel model, } } } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } finally { admin.enableTable(TableName.valueOf(tableResource.getName())); } @@ -207,10 +201,8 @@ private Response update(final TableSchemaModel model, final boolean replace, } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final TableSchemaModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final TableSchemaModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -219,10 +211,8 @@ public Response put(final TableSchemaModel model, } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final TableSchemaModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final TableSchemaModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -230,8 +220,8 @@ public Response post(final TableSchemaModel model, return update(model, false, uriInfo); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE", - justification="Expected") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DE_MIGHT_IGNORE", + justification = "Expected") @DELETE public Response delete(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { @@ -246,7 +236,8 @@ public Response delete(final @Context UriInfo uriInfo) { Admin admin = servlet.getAdmin(); try { admin.disableTable(TableName.valueOf(tableResource.getName())); - } catch (TableNotEnabledException e) { /* this is what we want anyway */ } + } catch (TableNotEnabledException e) { + /* this is what we want anyway */ } admin.deleteTable(TableName.valueOf(tableResource.getName())); servlet.getMetrics().incrementSucessfulDeleteRequests(1); return Response.ok().build(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java index d60b8eed6600..2c549300188b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -43,8 +41,7 @@ @InterfaceAudience.Private public class StorageClusterStatusResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(StorageClusterStatusResource.class); + private static final Logger LOG = LoggerFactory.getLogger(StorageClusterStatusResource.class); static CacheControl cacheControl; static { @@ -62,47 +59,41 @@ public StorageClusterStatusResource() throws IOException { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { - ClusterMetrics status = servlet.getAdmin().getClusterMetrics( - EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS)); + ClusterMetrics status = servlet.getAdmin() + .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS)); StorageClusterStatusModel model = new StorageClusterStatusModel(); model.setRegions(status.getRegionCount()); model.setRequests(status.getRequestCount()); model.setAverageLoad(status.getAverageLoad()); - for (Map.Entry entry: status.getLiveServerMetrics().entrySet()) { + for (Map.Entry entry : status.getLiveServerMetrics().entrySet()) { ServerName sn = entry.getKey(); ServerMetrics load = entry.getValue(); StorageClusterStatusModel.Node node = - model.addLiveNode( - sn.getHostname() + ":" + - Integer.toString(sn.getPort()), - sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE), - (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE)); + model.addLiveNode(sn.getHostname() + ":" + Integer.toString(sn.getPort()), + sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE), + (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE)); node.setRequests(load.getRequestCount()); - for (RegionMetrics region: load.getRegionMetrics().values()) { - node.addRegion(region.getRegionName(), region.getStoreCount(), - region.getStoreFileCount(), + for (RegionMetrics region : load.getRegionMetrics().values()) { + node.addRegion(region.getRegionName(), region.getStoreCount(), region.getStoreFileCount(), (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE), (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE), (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE), - region.getReadRequestCount(), - region.getCpRequestCount(), - region.getWriteRequestCount(), + region.getReadRequestCount(), region.getCpRequestCount(), region.getWriteRequestCount(), (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE), (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE), (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE), - region.getCompactingCellCount(), - region.getCompactedCellCount()); + region.getCompactingCellCount(), region.getCompactedCellCount()); } } - for (ServerName name: status.getDeadServerNames()) { + for (ServerName name : status.getDeadServerNames()) { model.addDeadNode(name.toString()); } ResponseBuilder response = Response.ok(model); @@ -111,9 +102,8 @@ public Response get(final @Context UriInfo uriInfo) { return response.build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java index ffa17e442394..7cfc52e4d9ad 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -37,8 +35,7 @@ @InterfaceAudience.Private public class StorageClusterVersionResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(StorageClusterVersionResource.class); + private static final Logger LOG = LoggerFactory.getLogger(StorageClusterVersionResource.class); static CacheControl cacheControl; static { @@ -56,7 +53,7 @@ public StorageClusterVersionResource() throws IOException { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -65,17 +62,15 @@ public Response get(final @Context UriInfo uriInfo) { try { StorageClusterVersionModel model = new StorageClusterVersionModel(); model.setVersion( - servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.HBASE_VERSION)) - .getHBaseVersion()); + servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.HBASE_VERSION)).getHBaseVersion()); ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java index a7a40b859a04..6166253db67c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -100,10 +98,9 @@ public MultiRowResource getMultipleRowResource(final @QueryParam("v") String ver public RowResource getRowResource( // We need the @Encoded decorator so Jersey won't urldecode before // the RowSpec constructor has a chance to parse - final @PathParam("rowspec") @Encoded String rowspec, - final @QueryParam("v") String versions, - final @QueryParam("check") String check, - final @QueryParam("rr") String returnResult) throws IOException { + final @PathParam("rowspec") @Encoded String rowspec, final @QueryParam("v") String versions, + final @QueryParam("check") String check, final @QueryParam("rr") String returnResult) + throws IOException { return new RowResource(this, rowspec, versions, check, returnResult); } @@ -112,17 +109,15 @@ public RowResource getRowResourceWithSuffixGlobbing( // We need the @Encoded decorator so Jersey won't urldecode before // the RowSpec constructor has a chance to parse final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec, - final @QueryParam("v") String versions, - final @QueryParam("check") String check, + final @QueryParam("v") String versions, final @QueryParam("check") String check, final @QueryParam("rr") String returnResult) throws IOException { return new RowResource(this, suffixglobbingspec, versions, check, returnResult); } @Path("{scanspec: .*[*]$}") - public TableScanResource getScanResource( - final @PathParam("scanspec") String scanSpec, - @DefaultValue(Integer.MAX_VALUE + "") - @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit, + public TableScanResource getScanResource(final @PathParam("scanspec") String scanSpec, + @DefaultValue(Integer.MAX_VALUE + + "") @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit, @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow, @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow, @QueryParam(Constants.SCAN_COLUMN) List column, @@ -159,7 +154,7 @@ public TableScanResource getScanResource( } tableScan.withStopRow(Bytes.toBytes(endRow)); for (String col : column) { - byte [][] parts = CellUtil.parseColumn(Bytes.toBytes(col.trim())); + byte[][] parts = CellUtil.parseColumn(Bytes.toBytes(col.trim())); if (parts.length == 1) { if (LOG.isTraceEnabled()) { LOG.trace("Scan family : " + Bytes.toStringBinary(parts[0])); @@ -167,8 +162,8 @@ public TableScanResource getScanResource( tableScan.addFamily(parts[0]); } else if (parts.length == 2) { if (LOG.isTraceEnabled()) { - LOG.trace("Scan family and column : " + Bytes.toStringBinary(parts[0]) - + " " + Bytes.toStringBinary(parts[1])); + LOG.trace("Scan family and column : " + Bytes.toStringBinary(parts[0]) + " " + + Bytes.toStringBinary(parts[1])); } tableScan.addColumn(parts[0], parts[1]); } else { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java index d31a346757b8..cadddc54dc13 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +47,7 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.UriInfo; @InterfaceAudience.Private -public class TableScanResource extends ResourceBase { +public class TableScanResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(TableScanResource.class); TableResource tableResource; @@ -108,18 +107,16 @@ public RowModel next() { @GET @Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF }) - public Response getProtobuf( - final @Context UriInfo uriInfo, + public Response getProtobuf(final @Context UriInfo uriInfo, final @HeaderParam("Accept") String contentType) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + - MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); try { int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10); - StreamingOutput stream = new ProtobufStreamingOutput(this.results, contentType, - userRequestedLimit, fetchSize); + StreamingOutput stream = + new ProtobufStreamingOutput(this.results, contentType, userRequestedLimit, fetchSize); servlet.getMetrics().incrementSucessfulScanRequests(1); ResponseBuilder response = Response.ok(stream); response.header("content-type", contentType); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java index e12ff9907b86..2868c046d2c2 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -69,10 +67,9 @@ public VersionResource() throws IOException { * @return a response for a version request */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context ServletContext context, - final @Context UriInfo uriInfo) { + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } @@ -87,8 +84,7 @@ public Response get(final @Context ServletContext context, * Dispatch to StorageClusterVersionResource */ @Path("cluster") - public StorageClusterVersionResource getClusterVersionResource() - throws IOException { + public StorageClusterVersionResource getClusterVersionResource() throws IOException { return new StorageClusterVersionResource(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 7459f8af0ad7..d6814a2fb196 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import java.io.BufferedInputStream; @@ -68,8 +66,8 @@ import org.slf4j.LoggerFactory; /** - * A wrapper around HttpClient which provides some useful function and - * semantics for interacting with the REST gateway. + * A wrapper around HttpClient which provides some useful function and semantics for interacting + * with the REST gateway. */ @InterfaceAudience.Public public class Client { @@ -111,11 +109,10 @@ private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, Constants.DEFAULT_REST_CLIENT_CONN_TIMEOUT); int socketTimeout = this.conf.getInt(Constants.REST_CLIENT_SOCKET_TIMEOUT, Constants.DEFAULT_REST_CLIENT_SOCKET_TIMEOUT); - RequestConfig requestConfig = RequestConfig.custom() - .setConnectTimeout(connTimeout) - .setSocketTimeout(socketTimeout) - .setNormalizeUri(false) // URIs should not be normalized, see HBASE-26903 - .build(); + RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(connTimeout) + .setSocketTimeout(socketTimeout).setNormalizeUri(false) // URIs should not be normalized, + // see HBASE-26903 + .build(); httpClientBuilder.setDefaultRequestConfig(requestConfig); // Since HBASE-25267 we don't use the deprecated DefaultHttpClient anymore. @@ -124,10 +121,10 @@ private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, // automatic content compression. httpClientBuilder.disableContentCompression(); - if(sslEnabled && trustStore.isPresent()) { + if (sslEnabled && trustStore.isPresent()) { try { SSLContext sslcontext = - SSLContexts.custom().loadTrustMaterial(trustStore.get(), null).build(); + SSLContexts.custom().loadTrustMaterial(trustStore.get(), null).build(); httpClientBuilder.setSSLContext(sslcontext); } catch (NoSuchAlgorithmException | KeyStoreException | KeyManagementException e) { throw new ClientTrustStoreInitializationException("Error while processing truststore", e); @@ -166,12 +163,10 @@ public Client(Cluster cluster, Configuration conf, boolean sslEnabled) { /** * Constructor, allowing to define custom trust store (only for SSL connections) - * * @param cluster the cluster definition * @param trustStorePath custom trust store to use for SSL connections * @param trustStorePassword password to use for custom trust store * @param trustStoreType type of custom trust store - * * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded */ public Client(Cluster cluster, String trustStorePath, Optional trustStorePassword, @@ -181,7 +176,6 @@ public Client(Cluster cluster, String trustStorePath, Optional trustStor /** * Constructor, allowing to define custom trust store (only for SSL connections) - * * @param cluster the cluster definition * @param conf Configuration * @param trustStorePath custom trust store to use for SSL connections @@ -201,12 +195,12 @@ public Client(Cluster cluster, Configuration conf, String trustStorePath, } catch (KeyStoreException e) { throw new ClientTrustStoreInitializationException("Invalid trust store type: " + type, e); } - try (InputStream inputStream = new BufferedInputStream( - Files.newInputStream(new File(trustStorePath).toPath()))) { + try (InputStream inputStream = + new BufferedInputStream(Files.newInputStream(new File(trustStorePath).toPath()))) { trustStore.load(inputStream, password); } catch (CertificateException | NoSuchAlgorithmException | IOException e) { throw new ClientTrustStoreInitializationException("Trust store load error: " + trustStorePath, - e); + e); } initialize(cluster, conf, true, Optional.of(trustStore)); @@ -226,9 +220,8 @@ public HttpClient getHttpClient() { } /** - * Add extra headers. These extra headers will be applied to all http - * methods before they are removed. If any header is not used any more, - * client needs to remove it explicitly. + * Add extra headers. These extra headers will be applied to all http methods before they are + * removed. If any header is not used any more, client needs to remove it explicitly. */ public void addExtraHeader(final String name, final String value) { extraHeaders.put(name, value); @@ -256,11 +249,10 @@ public void removeExtraHeader(final String name) { } /** - * Execute a transaction method given only the path. Will select at random - * one of the members of the supplied cluster definition and iterate through - * the list until a transaction can be successfully completed. The - * definition of success here is a complete HTTP transaction, irrespective - * of result code. + * Execute a transaction method given only the path. Will select at random one of the members of + * the supplied cluster definition and iterate through the list until a transaction can be + * successfully completed. The definition of success here is a complete HTTP transaction, + * irrespective of result code. * @param cluster the cluster definition * @param method the transaction method * @param headers HTTP header values to send @@ -268,13 +260,13 @@ public void removeExtraHeader(final String name) { * @return the HTTP response code * @throws IOException */ - public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, - Header[] headers, String path) throws IOException { + public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, Header[] headers, + String path) throws IOException { IOException lastException; if (cluster.nodes.size() < 1) { throw new IOException("Cluster is empty"); } - int start = (int)Math.round((cluster.nodes.size() - 1) * Math.random()); + int start = (int) Math.round((cluster.nodes.size() - 1) * Math.random()); int i = start; do { cluster.lastHost = cluster.nodes.get(i); @@ -326,11 +318,11 @@ public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri) throws IOException { // method.setURI(new URI(uri, true)); - for (Map.Entry e: extraHeaders.entrySet()) { + for (Map.Entry e : extraHeaders.entrySet()) { method.addHeader(e.getKey(), e.getValue()); } if (headers != null) { - for (Header header: headers) { + for (Header header : headers) { method.addHeader(header); } } @@ -346,16 +338,16 @@ public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String u long endTime = EnvironmentEdgeManager.currentTime(); if (LOG.isTraceEnabled()) { - LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " + - resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms"); + LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " + + resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms"); } return resp; } /** - * Execute a transaction method. Will call either executePathOnly - * or executeURI depending on whether a path only is supplied in - * 'path', or if a complete URI is passed instead, respectively. + * Execute a transaction method. Will call either executePathOnly or executeURI + * depending on whether a path only is supplied in 'path', or if a complete URI is passed instead, + * respectively. * @param cluster the cluster definition * @param method the HTTP method * @param headers HTTP header values to send @@ -363,8 +355,8 @@ public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String u * @return the HTTP response code * @throws IOException */ - public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, - String path) throws IOException { + public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, String path) + throws IOException { if (path.startsWith("/")) { return executePathOnly(cluster, method, headers, path); } @@ -437,8 +429,7 @@ public Response head(String path) throws IOException { * @return a Response object with response detail * @throws IOException */ - public Response head(Cluster cluster, String path, Header[] headers) - throws IOException { + public Response head(Cluster cluster, String path, Header[] headers) throws IOException { HttpHead method = new HttpHead(path); try { HttpResponse resp = execute(cluster, method, null, path); @@ -488,8 +479,7 @@ public Response get(String path, String accept) throws IOException { * @return a Response object with response detail * @throws IOException */ - public Response get(Cluster cluster, String path, String accept) - throws IOException { + public Response get(Cluster cluster, String path, String accept) throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Accept", accept); return get(cluster, path, headers); @@ -498,8 +488,7 @@ public Response get(Cluster cluster, String path, String accept) /** * Send a GET request * @param path the path or URI - * @param headers the HTTP headers to include in the request, - * Accept must be supplied + * @param headers the HTTP headers to include in the request, Accept must be supplied * @return a Response object with response detail * @throws IOException */ @@ -508,32 +497,28 @@ public Response get(String path, Header[] headers) throws IOException { } /** - * Returns the response body of the HTTPResponse, if any, as an array of bytes. - * If response body is not available or cannot be read, returns null - * - * Note: This will cause the entire response body to be buffered in memory. A - * malicious server may easily exhaust all the VM memory. It is strongly - * recommended, to use getResponseAsStream if the content length of the response - * is unknown or reasonably large. - * + * Returns the response body of the HTTPResponse, if any, as an array of bytes. If response body + * is not available or cannot be read, returns null Note: This will cause the entire + * response body to be buffered in memory. A malicious server may easily exhaust all the VM + * memory. It is strongly recommended, to use getResponseAsStream if the content length of the + * response is unknown or reasonably large. * @param resp HttpResponse * @return The response body, null if body is empty - * @throws IOException If an I/O (transport) problem occurs while obtaining the - * response body. + * @throws IOException If an I/O (transport) problem occurs while obtaining the response body. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = - "NP_LOAD_OF_KNOWN_NULL_VALUE", justification = "null is possible return value") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_LOAD_OF_KNOWN_NULL_VALUE", + justification = "null is possible return value") public static byte[] getResponseBody(HttpResponse resp) throws IOException { if (resp.getEntity() == null) return null; try (InputStream instream = resp.getEntity().getContent()) { if (instream != null) { long contentLength = resp.getEntity().getContentLength(); if (contentLength > Integer.MAX_VALUE) { - //guard integer cast from overflow - throw new IOException("Content too large to be buffered: " + contentLength +" bytes"); + // guard integer cast from overflow + throw new IOException("Content too large to be buffered: " + contentLength + " bytes"); } - ByteArrayOutputStream outstream = new ByteArrayOutputStream( - contentLength > 0 ? (int) contentLength : 4*1024); + ByteArrayOutputStream outstream = + new ByteArrayOutputStream(contentLength > 0 ? (int) contentLength : 4 * 1024); byte[] buffer = new byte[4096]; int len; while ((len = instream.read(buffer)) > 0) { @@ -554,15 +539,14 @@ public static byte[] getResponseBody(HttpResponse resp) throws IOException { * @return a Response object with response detail * @throws IOException */ - public Response get(Cluster c, String path, Header[] headers) - throws IOException { + public Response get(Cluster c, String path, Header[] headers) throws IOException { if (httpGet != null) { httpGet.releaseConnection(); } httpGet = new HttpGet(path); HttpResponse resp = execute(c, httpGet, headers, path); - return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), - resp, resp.getEntity() == null ? null : resp.getEntity().getContent()); + return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), resp, + resp.getEntity() == null ? null : resp.getEntity().getContent()); } /** @@ -573,8 +557,7 @@ public Response get(Cluster c, String path, Header[] headers) * @return a Response object with response detail * @throws IOException */ - public Response put(String path, String contentType, byte[] content) - throws IOException { + public Response put(String path, String contentType, byte[] content) throws IOException { return put(cluster, path, contentType, content); } @@ -601,8 +584,8 @@ public Response put(String path, String contentType, byte[] content, Header extr * @return a Response object with response detail * @throws IOException for error */ - public Response put(Cluster cluster, String path, String contentType, - byte[] content) throws IOException { + public Response put(Cluster cluster, String path, String contentType, byte[] content) + throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Content-Type", contentType); return put(cluster, path, headers, content); @@ -618,8 +601,8 @@ public Response put(Cluster cluster, String path, String contentType, * @return a Response object with response detail * @throws IOException for error */ - public Response put(Cluster cluster, String path, String contentType, - byte[] content, Header extraHdr) throws IOException { + public Response put(Cluster cluster, String path, String contentType, byte[] content, + Header extraHdr) throws IOException { int cnt = extraHdr == null ? 1 : 2; Header[] headers = new Header[cnt]; headers[0] = new BasicHeader("Content-Type", contentType); @@ -632,14 +615,12 @@ public Response put(Cluster cluster, String path, String contentType, /** * Send a PUT request * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes * @return a Response object with response detail * @throws IOException */ - public Response put(String path, Header[] headers, byte[] content) - throws IOException { + public Response put(String path, Header[] headers, byte[] content) throws IOException { return put(cluster, path, headers, content); } @@ -647,14 +628,13 @@ public Response put(String path, Header[] headers, byte[] content) * Send a PUT request * @param cluster the cluster definition * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes * @return a Response object with response detail * @throws IOException */ - public Response put(Cluster cluster, String path, Header[] headers, - byte[] content) throws IOException { + public Response put(Cluster cluster, String path, Header[] headers, byte[] content) + throws IOException { HttpPut method = new HttpPut(path); try { method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); @@ -675,8 +655,7 @@ public Response put(Cluster cluster, String path, Header[] headers, * @return a Response object with response detail * @throws IOException */ - public Response post(String path, String contentType, byte[] content) - throws IOException { + public Response post(String path, String contentType, byte[] content) throws IOException { return post(cluster, path, contentType, content); } @@ -703,8 +682,8 @@ public Response post(String path, String contentType, byte[] content, Header ext * @return a Response object with response detail * @throws IOException for error */ - public Response post(Cluster cluster, String path, String contentType, - byte[] content) throws IOException { + public Response post(Cluster cluster, String path, String contentType, byte[] content) + throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Content-Type", contentType); return post(cluster, path, headers, content); @@ -720,8 +699,8 @@ public Response post(Cluster cluster, String path, String contentType, * @return a Response object with response detail * @throws IOException for error */ - public Response post(Cluster cluster, String path, String contentType, - byte[] content, Header extraHdr) throws IOException { + public Response post(Cluster cluster, String path, String contentType, byte[] content, + Header extraHdr) throws IOException { int cnt = extraHdr == null ? 1 : 2; Header[] headers = new Header[cnt]; headers[0] = new BasicHeader("Content-Type", contentType); @@ -734,14 +713,12 @@ public Response post(Cluster cluster, String path, String contentType, /** * Send a POST request * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes * @return a Response object with response detail * @throws IOException */ - public Response post(String path, Header[] headers, byte[] content) - throws IOException { + public Response post(String path, Header[] headers, byte[] content) throws IOException { return post(cluster, path, headers, content); } @@ -749,14 +726,13 @@ public Response post(String path, Header[] headers, byte[] content) * Send a POST request * @param cluster the cluster definition * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes * @return a Response object with response detail * @throws IOException */ - public Response post(Cluster cluster, String path, Header[] headers, - byte[] content) throws IOException { + public Response post(Cluster cluster, String path, Header[] headers, byte[] content) + throws IOException { HttpPost method = new HttpPost(path); try { method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); @@ -829,7 +805,6 @@ public Response delete(Cluster cluster, String path, Header extraHdr) throws IOE } } - public static class ClientTrustStoreInitializationException extends RuntimeException { public ClientTrustStoreInitializationException(String message, Throwable cause) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java index 008470826dea..dbb30adbc74b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,29 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; /** - * A list of 'host:port' addresses of HTTP servers operating as a single - * entity, for example multiple redundant web service gateways. + * A list of 'host:port' addresses of HTTP servers operating as a single entity, for example + * multiple redundant web service gateways. */ @InterfaceAudience.Public public class Cluster { - protected List nodes = - Collections.synchronizedList(new ArrayList()); + protected List nodes = Collections.synchronizedList(new ArrayList()); protected String lastHost; /** * Constructor */ - public Cluster() {} + public Cluster() { + } /** * Constructor @@ -99,10 +96,8 @@ public Cluster remove(String name, int port) { return remove(sb.toString()); } - @Override public String toString() { - return "Cluster{" + - "nodes=" + nodes + - ", lastHost='" + lastHost + '\'' + - '}'; + @Override + public String toString() { + return "Cluster{" + "nodes=" + nodes + ", lastHost='" + lastHost + '\'' + '}'; } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java index 0e91005ab2b8..fbffa71294b8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,12 +19,9 @@ import java.io.IOException; import java.io.InputStream; - import org.apache.http.Header; import org.apache.http.HttpResponse; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,10 +66,9 @@ public Response(int code, Header[] headers, byte[] body) { this.headers = headers; this.body = body; } - + /** * Constructor. Note: this is not thread-safe - * * @param code the HTTP response code * @param headers headers the HTTP response headers * @param resp the response @@ -93,13 +88,12 @@ public Response(int code, Header[] headers, HttpResponse resp, InputStream in) { public int getCode() { return code; } - + /** * Gets the input stream instance. - * * @return an instance of InputStream class. */ - public InputStream getStream(){ + public InputStream getStream() { return this.stream; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java index b9b8a006437c..d9dbd8707362 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,18 +44,16 @@ public class AuthFilter extends AuthenticationFilter { private static final int REST_PREFIX_LEN = REST_PREFIX.length(); /** - * Returns the configuration to be used by the authentication filter - * to initialize the authentication handler. - * - * This filter retrieves all HBase configurations and passes those started - * with REST_PREFIX to the authentication handler. It is useful to support - * plugging different authentication handlers. - */ + * Returns the configuration to be used by the authentication filter to initialize the + * authentication handler. This filter retrieves all HBase configurations and passes those started + * with REST_PREFIX to the authentication handler. It is useful to support plugging different + * authentication handlers. + */ @Override - protected Properties getConfiguration( - String configPrefix, FilterConfig filterConfig) throws ServletException { + protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) + throws ServletException { Properties props = super.getConfiguration(configPrefix, filterConfig); - //setting the cookie path to root '/' so it is used for all resources. + // setting the cookie path to root '/' so it is used for all resources. props.setProperty(AuthenticationFilter.COOKIE_PATH, "/"); Configuration conf = null; @@ -70,11 +68,10 @@ protected Properties getConfiguration( String name = entry.getKey(); if (name.startsWith(REST_PREFIX)) { String value = entry.getValue(); - if(name.equals(REST_AUTHENTICATION_PRINCIPAL)) { + if (name.equals(REST_AUTHENTICATION_PRINCIPAL)) { try { - String machineName = Strings.domainNamePointerToHostName( - DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), - conf.get(REST_DNS_NAMESERVER, "default"))); + String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost( + conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default"))); value = SecurityUtil.getServerPrincipal(value, machineName); } catch (IOException ie) { throw new ServletException("Failed to retrieve server principal", ie); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java index f74e10cae74b..efb7e2a227aa 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +19,9 @@ import java.io.IOException; import java.util.zip.GZIPInputStream; - import javax.servlet.ReadListener; import javax.servlet.ServletInputStream; import javax.servlet.http.HttpServletRequest; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java index 51eba665f3fd..db41fbb5b847 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.filter; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; - import javax.servlet.ServletInputStream; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java index 3fa1ad6f857d..7c1a4f995472 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +19,9 @@ import java.io.IOException; import java.util.zip.GZIPOutputStream; - import javax.servlet.ServletOutputStream; import javax.servlet.WriteListener; import javax.servlet.http.HttpServletResponse; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java index 53a26ea1ac80..41342214100d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.filter; import java.io.IOException; import java.io.PrintWriter; - import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponseWrapper; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -80,7 +76,7 @@ public void flushBuffer() throws IOException { writer.flush(); } if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).finish(); + ((GZIPResponseStream) os).finish(); } else { getResponse().flushBuffer(); } @@ -90,7 +86,7 @@ public void flushBuffer() throws IOException { public void reset() { super.reset(); if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).resetBuffer(); + ((GZIPResponseStream) os).resetBuffer(); } writer = null; os = null; @@ -101,7 +97,7 @@ public void reset() { public void resetBuffer() { super.resetBuffer(); if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).resetBuffer(); + ((GZIPResponseStream) os).resetBuffer(); } writer = null; os = null; @@ -129,7 +125,7 @@ public void sendRedirect(String location) throws IOException { public ServletOutputStream getOutputStream() throws IOException { if (os == null) { if (!response.isCommitted() && compress) { - os = (ServletOutputStream)new GZIPResponseStream(response); + os = (ServletOutputStream) new GZIPResponseStream(response); } else { os = response.getOutputStream(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java index 4ba9eca302d0..9aecef5881e0 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +23,6 @@ import java.util.Locale; import java.util.Set; import java.util.StringTokenizer; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -33,9 +31,7 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -58,27 +54,25 @@ public void destroy() { } @Override - public void doFilter(ServletRequest req, ServletResponse rsp, - FilterChain chain) throws IOException, ServletException { - HttpServletRequest request = (HttpServletRequest)req; - HttpServletResponse response = (HttpServletResponse)rsp; + public void doFilter(ServletRequest req, ServletResponse rsp, FilterChain chain) + throws IOException, ServletException { + HttpServletRequest request = (HttpServletRequest) req; + HttpServletResponse response = (HttpServletResponse) rsp; String contentEncoding = request.getHeader("content-encoding"); String acceptEncoding = request.getHeader("accept-encoding"); String contentType = request.getHeader("content-type"); - if ((contentEncoding != null) && - (contentEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) { + if ((contentEncoding != null) && (contentEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) { request = new GZIPRequestWrapper(request); } - if (((acceptEncoding != null) && - (acceptEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) || - ((contentType != null) && mimeTypes.contains(contentType))) { + if (((acceptEncoding != null) && (acceptEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) + || ((contentType != null) && mimeTypes.contains(contentType))) { response = new GZIPResponseWrapper(response); } chain.doFilter(request, response); if (response instanceof GZIPResponseWrapper) { OutputStream os = response.getOutputStream(); if (os instanceof GZIPResponseStream) { - ((GZIPResponseStream)os).finish(); + ((GZIPResponseStream) os).finish(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java index 94eb314e01ab..9d0894f468d0 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -34,36 +33,29 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This filter provides protection against cross site request forgery (CSRF) - * attacks for REST APIs. Enabling this filter on an endpoint results in the - * requirement of all client to send a particular (configurable) HTTP header - * with every request. In the absense of this header the filter will reject the - * attempt as a bad request. + * This filter provides protection against cross site request forgery (CSRF) attacks for REST APIs. + * Enabling this filter on an endpoint results in the requirement of all client to send a particular + * (configurable) HTTP header with every request. In the absense of this header the filter will + * reject the attempt as a bad request. */ @InterfaceAudience.Public public class RestCsrfPreventionFilter implements Filter { - private static final Logger LOG = - LoggerFactory.getLogger(RestCsrfPreventionFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(RestCsrfPreventionFilter.class); public static final String HEADER_USER_AGENT = "User-Agent"; - public static final String BROWSER_USER_AGENT_PARAM = - "browser-useragents-regex"; + public static final String BROWSER_USER_AGENT_PARAM = "browser-useragents-regex"; public static final String CUSTOM_HEADER_PARAM = "custom-header"; - public static final String CUSTOM_METHODS_TO_IGNORE_PARAM = - "methods-to-ignore"; - static final String BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*"; + public static final String CUSTOM_METHODS_TO_IGNORE_PARAM = "methods-to-ignore"; + static final String BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*"; public static final String HEADER_DEFAULT = "X-XSRF-HEADER"; - static final String METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; - private String headerName = HEADER_DEFAULT; + static final String METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; + private String headerName = HEADER_DEFAULT; private Set methodsToIgnore = null; private Set browserUserAgents; @@ -73,8 +65,7 @@ public void init(FilterConfig filterConfig) { if (customHeader != null) { headerName = customHeader; } - String customMethodsToIgnore = - filterConfig.getInitParameter(CUSTOM_METHODS_TO_IGNORE_PARAM); + String customMethodsToIgnore = filterConfig.getInitParameter(CUSTOM_METHODS_TO_IGNORE_PARAM); if (customMethodsToIgnore != null) { parseMethodsToIgnore(customMethodsToIgnore); } else { @@ -86,13 +77,14 @@ public void init(FilterConfig filterConfig) { agents = BROWSER_USER_AGENTS_DEFAULT; } parseBrowserUserAgents(agents); - LOG.info(String.format("Adding cross-site request forgery (CSRF) protection, " - + "headerName = %s, methodsToIgnore = %s, browserUserAgents = %s", - headerName, methodsToIgnore, browserUserAgents)); + LOG.info(String.format( + "Adding cross-site request forgery (CSRF) protection, " + + "headerName = %s, methodsToIgnore = %s, browserUserAgents = %s", + headerName, methodsToIgnore, browserUserAgents)); } void parseBrowserUserAgents(String userAgents) { - String[] agentsArray = userAgents.split(","); + String[] agentsArray = userAgents.split(","); browserUserAgents = new HashSet<>(); for (String patternString : agentsArray) { browserUserAgents.add(Pattern.compile(patternString)); @@ -106,17 +98,14 @@ void parseMethodsToIgnore(String mti) { } /** - * This method interrogates the User-Agent String and returns whether it - * refers to a browser. If its not a browser, then the requirement for the - * CSRF header will not be enforced; if it is a browser, the requirement will - * be enforced. + * This method interrogates the User-Agent String and returns whether it refers to a browser. If + * its not a browser, then the requirement for the CSRF header will not be enforced; if it is a + * browser, the requirement will be enforced. *

    - * A User-Agent String is considered to be a browser if it matches - * any of the regex patterns from browser-useragent-regex; the default - * behavior is to consider everything a browser that matches the following: - * "^Mozilla.*,^Opera.*". Subclasses can optionally override - * this method to use different behavior. - * + * A User-Agent String is considered to be a browser if it matches any of the regex patterns from + * browser-useragent-regex; the default behavior is to consider everything a browser that matches + * the following: "^Mozilla.*,^Opera.*". Subclasses can optionally override this method to use + * different behavior. * @param userAgent The User-Agent String, or null if there isn't one * @return true if the User-Agent String refers to a browser, false if not */ @@ -134,18 +123,16 @@ protected boolean isBrowser(String userAgent) { } /** - * Defines the minimal API requirements for the filter to execute its - * filtering logic. This interface exists to facilitate integration in - * components that do not run within a servlet container and therefore cannot - * rely on a servlet container to dispatch to the {@link #doFilter} method. - * Applications that do run inside a servlet container will not need to write - * code that uses this interface. Instead, they can use typical servlet - * container configuration mechanisms to insert the filter. + * Defines the minimal API requirements for the filter to execute its filtering logic. This + * interface exists to facilitate integration in components that do not run within a servlet + * container and therefore cannot rely on a servlet container to dispatch to the {@link #doFilter} + * method. Applications that do run inside a servlet container will not need to write code that + * uses this interface. Instead, they can use typical servlet container configuration mechanisms + * to insert the filter. */ public interface HttpInteraction { /** * Returns the value of a header. - * * @param header name of header * @return value of header */ @@ -153,24 +140,21 @@ public interface HttpInteraction { /** * Returns the method. - * * @return method */ String getMethod(); /** * Called by the filter after it decides that the request may proceed. - * * @throws IOException if there is an I/O error - * @throws ServletException if the implementation relies on the servlet API - * and a servlet API call has failed + * @throws ServletException if the implementation relies on the servlet API and a servlet API + * call has failed */ void proceed() throws IOException, ServletException; /** - * Called by the filter after it decides that the request is a potential - * CSRF attack and therefore must be rejected. - * + * Called by the filter after it decides that the request is a potential CSRF attack and + * therefore must be rejected. * @param code status code to send * @param message response message * @throws IOException if there is an I/O error @@ -180,31 +164,29 @@ public interface HttpInteraction { /** * Handles an {@link HttpInteraction} by applying the filtering logic. - * * @param httpInteraction caller's HTTP interaction * @throws IOException if there is an I/O error - * @throws ServletException if the implementation relies on the servlet API - * and a servlet API call has failed + * @throws ServletException if the implementation relies on the servlet API and a servlet API call + * has failed */ public void handleHttpInteraction(HttpInteraction httpInteraction) throws IOException, ServletException { - if (!isBrowser(httpInteraction.getHeader(HEADER_USER_AGENT)) || - methodsToIgnore.contains(httpInteraction.getMethod()) || - httpInteraction.getHeader(headerName) != null) { + if (!isBrowser(httpInteraction.getHeader(HEADER_USER_AGENT)) + || methodsToIgnore.contains(httpInteraction.getMethod()) + || httpInteraction.getHeader(headerName) != null) { httpInteraction.proceed(); } else { httpInteraction.sendError(HttpServletResponse.SC_BAD_REQUEST, - "Missing Required Header for CSRF Vulnerability Protection"); + "Missing Required Header for CSRF Vulnerability Protection"); } } @Override - public void doFilter(ServletRequest request, ServletResponse response, - final FilterChain chain) throws IOException, ServletException { - final HttpServletRequest httpRequest = (HttpServletRequest)request; - final HttpServletResponse httpResponse = (HttpServletResponse)response; - handleHttpInteraction(new ServletFilterHttpInteraction(httpRequest, - httpResponse, chain)); + public void doFilter(ServletRequest request, ServletResponse response, final FilterChain chain) + throws IOException, ServletException { + final HttpServletRequest httpRequest = (HttpServletRequest) request; + final HttpServletResponse httpResponse = (HttpServletResponse) response; + handleHttpInteraction(new ServletFilterHttpInteraction(httpRequest, httpResponse, chain)); } @Override @@ -212,15 +194,12 @@ public void destroy() { } /** - * Constructs a mapping of configuration properties to be used for filter - * initialization. The mapping includes all properties that start with the - * specified configuration prefix. Property names in the mapping are trimmed - * to remove the configuration prefix. - * + * Constructs a mapping of configuration properties to be used for filter initialization. The + * mapping includes all properties that start with the specified configuration prefix. Property + * names in the mapping are trimmed to remove the configuration prefix. * @param conf configuration to read * @param confPrefix configuration prefix - * @return mapping of configuration properties to be used for filter - * initialization + * @return mapping of configuration properties to be used for filter initialization */ public static Map getFilterParams(Configuration conf, String confPrefix) { Map filterConfigMap = new HashMap<>(); @@ -245,7 +224,6 @@ private static final class ServletFilterHttpInteraction implements HttpInteracti /** * Creates a new ServletFilterHttpInteraction. - * * @param httpRequest request to process * @param httpResponse response to process * @param chain filter chain to forward to if HTTP interaction is allowed diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java index 128be02bb348..511f01f2dec8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonProperty; @@ -39,10 +37,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellMessage.Cell; + /** - * Representation of a cell. A cell is a single value associated a column and - * optional qualifier, and either the timestamp when it was stored or the user- - * provided timestamp if one was explicitly supplied. + * Representation of a cell. A cell is a single value associated a column and optional qualifier, + * and either the timestamp when it was stored or the user- provided timestamp if one was explicitly + * supplied. * *

      * <complexType name="Cell">
    @@ -58,7 +57,7 @@
      * </complexType>
      * 
    */ -@XmlRootElement(name="Cell") +@XmlRootElement(name = "Cell") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class CellModel implements ProtobufMessageHandler, Serializable { @@ -79,7 +78,8 @@ public class CellModel implements ProtobufMessageHandler, Serializable { /** * Default constructor */ - public CellModel() {} + public CellModel() { + } /** * Constructor @@ -105,8 +105,8 @@ public CellModel(byte[] column, byte[] qualifier, byte[] value) { * @param cell */ public CellModel(org.apache.hadoop.hbase.Cell cell) { - this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), CellUtil - .cloneValue(cell)); + this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), + CellUtil.cloneValue(cell)); } /** @@ -128,8 +128,7 @@ public CellModel(byte[] column, long timestamp, byte[] value) { * @param timestamp * @param value */ - public CellModel(byte[] column, byte[] qualifier, long timestamp, - byte[] value) { + public CellModel(byte[] column, byte[] qualifier, long timestamp, byte[] value) { this.column = CellUtil.makeColumn(column, qualifier); this.timestamp = timestamp; this.value = value; @@ -150,8 +149,7 @@ public void setColumn(byte[] column) { } /** - * @return true if the timestamp property has been specified by the - * user + * @return true if the timestamp property has been specified by the user */ public boolean hasUserTimestamp() { return timestamp != HConstants.LATEST_TIMESTAMP; @@ -197,8 +195,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { Cell.Builder builder = Cell.newBuilder(); ProtobufUtil.mergeFrom(builder, message); setColumn(builder.getColumn().toByteArray()); @@ -221,28 +218,18 @@ public boolean equals(Object obj) { return false; } CellModel cellModel = (CellModel) obj; - return new EqualsBuilder(). - append(column, cellModel.column). - append(timestamp, cellModel.timestamp). - append(value, cellModel.value). - isEquals(); + return new EqualsBuilder().append(column, cellModel.column) + .append(timestamp, cellModel.timestamp).append(value, cellModel.value).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(). - append(column). - append(timestamp). - append(value). - toHashCode(); + return new HashCodeBuilder().append(column).append(timestamp).append(value).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this). - append("column", column). - append("timestamp", timestamp). - append("value", value). - toString(); + return new ToStringBuilder(this).append("column", column).append("timestamp", timestamp) + .append("value", value).toString(); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java index ebb2b1832fb1..b1287284f42b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,26 +21,23 @@ import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellMessage.Cell; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellSetMessage.CellSet; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; - -import org.apache.yetus.audience.InterfaceAudience; - /** - * Representation of a grouping of cells. May contain cells from more than - * one row. Encapsulates RowModel and CellModel models. + * Representation of a grouping of cells. May contain cells from more than one row. Encapsulates + * RowModel and CellModel models. * *
      * <complexType name="CellSet">
    @@ -72,13 +68,13 @@
      * </complexType>
      * 
    */ -@XmlRootElement(name="CellSet") +@XmlRootElement(name = "CellSet") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class CellSetModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; - @XmlElement(name="Row") + @XmlElement(name = "Row") private List rows; /** @@ -132,8 +128,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { CellSet.Builder builder = CellSet.newBuilder(); ProtobufUtil.mergeFrom(builder, message); for (CellSet.Row row : builder.getRowsList()) { @@ -144,8 +139,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) timestamp = cell.getTimestamp(); } rowModel.addCell( - new CellModel(cell.getColumn().toByteArray(), timestamp, - cell.getData().toByteArray())); + new CellModel(cell.getColumn().toByteArray(), timestamp, cell.getData().toByteArray())); } addRow(rowModel); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java index 6de4b7743ef6..fbefdb68cb5e 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonAnyGetter; @@ -42,7 +40,7 @@ * </complexType> * */ -@XmlRootElement(name="ColumnSchema") +@XmlRootElement(name = "ColumnSchema") @InterfaceAudience.Private public class ColumnSchemaModel implements Serializable { private static final long serialVersionUID = 1L; @@ -55,12 +53,13 @@ public class ColumnSchemaModel implements Serializable { private static QName VERSIONS = new QName(HConstants.VERSIONS); private String name; - private Map attrs = new LinkedHashMap<>(); + private Map attrs = new LinkedHashMap<>(); /** * Default constructor */ - public ColumnSchemaModel() {} + public ColumnSchemaModel() { + } /** * Add an attribute to the column family schema @@ -78,7 +77,7 @@ public void addAttribute(String name, Object value) { */ public String getAttribute(String name) { Object o = attrs.get(new QName(name)); - return o != null ? o.toString(): null; + return o != null ? o.toString() : null; } /** @@ -94,7 +93,7 @@ public String getName() { */ @XmlAnyAttribute @JsonAnyGetter - public Map getAny() { + public Map getAny() { return attrs; } @@ -105,7 +104,8 @@ public void setName(String name) { this.name = name; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -114,7 +114,7 @@ public String toString() { sb.append("{ NAME => '"); sb.append(name); sb.append('\''); - for (Map.Entry e: attrs.entrySet()) { + for (Map.Entry e : attrs.entrySet()) { sb.append(", "); sb.append(e.getKey().getLocalPart()); sb.append(" => '"); @@ -135,8 +135,8 @@ public String toString() { */ public boolean __getBlockcache() { Object o = attrs.get(BLOCKCACHE); - return o != null ? Boolean.parseBoolean(o.toString()) : - ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKCACHE; + return o != null ? Boolean.parseBoolean(o.toString()) + : ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKCACHE; } /** @@ -144,8 +144,8 @@ public boolean __getBlockcache() { */ public int __getBlocksize() { Object o = attrs.get(BLOCKSIZE); - return o != null ? Integer.parseInt(o.toString()) : - ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE; + return o != null ? Integer.parseInt(o.toString()) + : ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE; } /** @@ -169,8 +169,8 @@ public String __getCompression() { */ public boolean __getInMemory() { Object o = attrs.get(IN_MEMORY); - return o != null ? - Boolean.parseBoolean(o.toString()) : ColumnFamilyDescriptorBuilder.DEFAULT_IN_MEMORY; + return o != null ? Boolean.parseBoolean(o.toString()) + : ColumnFamilyDescriptorBuilder.DEFAULT_IN_MEMORY; } /** @@ -186,8 +186,8 @@ public int __getTTL() { */ public int __getVersions() { Object o = attrs.get(VERSIONS); - return o != null ? Integer.parseInt(o.toString()) : - ColumnFamilyDescriptorBuilder.DEFAULT_MAX_VERSIONS; + return o != null ? Integer.parseInt(o.toString()) + : ColumnFamilyDescriptorBuilder.DEFAULT_MAX_VERSIONS; } /** @@ -212,7 +212,7 @@ public void __setBloomfilter(String value) { * @param value the desired value of the COMPRESSION attribute */ public void __setCompression(String value) { - attrs.put(COMPRESSION, value); + attrs.put(COMPRESSION, value); } /** diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java index aa7df1e983ab..f4ff2bfded11 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; @@ -24,19 +22,16 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; - import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.rest.protobuf - .generated.NamespacePropertiesMessage.NamespaceProperties; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.NamespacePropertiesMessage.NamespaceProperties; /** * List a HBase namespace's key/value properties. @@ -48,7 +43,7 @@ *
  • value: property value
  • * */ -@XmlRootElement(name="NamespaceProperties") +@XmlRootElement(name = "NamespaceProperties") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class NamespacesInstanceModel implements Serializable, ProtobufMessageHandler { @@ -56,7 +51,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan private static final long serialVersionUID = 1L; // JAX-RS automatically converts Map to XMLAnyElement. - private Map properties = null; + private Map properties = null; @XmlTransient private String namespaceName; @@ -64,7 +59,8 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan /** * Default constructor. Do not use. */ - public NamespacesInstanceModel() {} + public NamespacesInstanceModel() { + } /** * Constructor to use if namespace does not exist in HBASE. @@ -83,12 +79,16 @@ public NamespacesInstanceModel(String namespaceName) throws IOException { */ public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOException { this.namespaceName = namespaceName; - if(admin == null) { return; } + if (admin == null) { + return; + } NamespaceDescriptor nd = admin.getNamespaceDescriptor(namespaceName); // For properly formed JSON, if no properties, field has to be null (not just no elements). - if(nd.getConfiguration().isEmpty()){ return; } + if (nd.getConfiguration().isEmpty()) { + return; + } properties = new HashMap<>(); properties.putAll(nd.getConfiguration()); @@ -100,7 +100,7 @@ public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOExcep * @param value attribute value */ public void addProperty(String key, String value) { - if(properties == null){ + if (properties == null) { properties = new HashMap<>(); } properties.put(key, value); @@ -109,18 +109,19 @@ public void addProperty(String key, String value) { /** * @return The map of uncategorized namespace properties. */ - public Map getProperties() { - if(properties == null){ + public Map getProperties() { + if (properties == null) { properties = new HashMap<>(); } return properties; } - public String getNamespaceName(){ + public String getNamespaceName() { return namespaceName; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -129,7 +130,7 @@ public String toString() { sb.append("{NAME => \'"); sb.append(namespaceName); sb.append("\'"); - if(properties != null){ + if (properties != null) { for (Map.Entry entry : properties.entrySet()) { sb.append(", "); sb.append(entry.getKey()); @@ -145,7 +146,7 @@ public String toString() { @Override public byte[] createProtobufOutput() { NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); - if(properties != null){ + if (properties != null) { for (Map.Entry entry : properties.entrySet()) { String key = entry.getKey(); NamespaceProperties.Property.Builder property = NamespaceProperties.Property.newBuilder(); @@ -162,7 +163,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOExce NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); builder.mergeFrom(message); List properties = builder.getPropsList(); - for(NamespaceProperties.Property property: properties){ + for (NamespaceProperties.Property property : properties) { addProperty(property.getKey(), property.getValue()); } return this; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java index 0be558d22553..76e8bea1d894 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,36 +15,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; +import com.fasterxml.jackson.annotation.JsonProperty; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.NamespacesMessage.Namespaces; -import com.fasterxml.jackson.annotation.JsonProperty; - - /** * A list of HBase namespaces. *
      *
    • Namespace: namespace name
    • *
    */ -@XmlRootElement(name="Namespaces") +@XmlRootElement(name = "Namespaces") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class NamespacesModel implements Serializable, ProtobufMessageHandler { @@ -53,13 +47,14 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @JsonProperty("Namespace") - @XmlElement(name="Namespace") + @XmlElement(name = "Namespace") private List namespaces = new ArrayList<>(); /** * Default constructor. Do not use. */ - public NamespacesModel() {} + public NamespacesModel() { + } /** * Constructor @@ -88,7 +83,8 @@ public void setNamespaces(List namespaces) { this.namespaces = namespaces; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java index b560f697dead..c8baf2e3467b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,22 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonProperty; - import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; @@ -40,9 +35,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Representation of a row. A row is a related set of cells, grouped by common - * row key. RowModels do not appear in results by themselves. They are always - * encapsulated within CellSetModels. + * Representation of a row. A row is a related set of cells, grouped by common row key. RowModels do + * not appear in results by themselves. They are always encapsulated within CellSetModels. * *
      * <complexType name="Row">
    @@ -54,7 +48,7 @@
      * </complexType>
      * 
    */ -@XmlRootElement(name="Row") +@XmlRootElement(name = "Row") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class RowModel implements ProtobufMessageHandler, Serializable { @@ -65,14 +59,14 @@ public class RowModel implements ProtobufMessageHandler, Serializable { private byte[] key; @JsonProperty("Cell") - @XmlElement(name="Cell") + @XmlElement(name = "Cell") private List cells = new ArrayList<>(); - /** * Default constructor */ - public RowModel() { } + public RowModel() { + } /** * Constructor @@ -81,7 +75,7 @@ public RowModel() { } public RowModel(final String key) { this(Bytes.toBytes(key)); } - + /** * Constructor * @param key the row key @@ -99,7 +93,7 @@ public RowModel(final byte[] key) { public RowModel(final String key, final List cells) { this(Bytes.toBytes(key), cells); } - + /** * Constructor * @param key the row key @@ -109,7 +103,7 @@ public RowModel(final byte[] key, final List cells) { this.key = key; this.cells = cells; } - + /** * Adds a cell to the list of cells for this row * @param cell the cell @@ -142,16 +136,13 @@ public List getCells() { @Override public byte[] createProtobufOutput() { // there is no standalone row protobuf message - throw new UnsupportedOperationException( - "no protobuf equivalent to RowModel"); + throw new UnsupportedOperationException("no protobuf equivalent to RowModel"); } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { // there is no standalone row protobuf message - throw new UnsupportedOperationException( - "no protobuf equivalent to RowModel"); + throw new UnsupportedOperationException("no protobuf equivalent to RowModel"); } @Override @@ -166,25 +157,16 @@ public boolean equals(Object obj) { return false; } RowModel rowModel = (RowModel) obj; - return new EqualsBuilder(). - append(key, rowModel.key). - append(cells, rowModel.cells). - isEquals(); + return new EqualsBuilder().append(key, rowModel.key).append(cells, rowModel.cells).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(). - append(key). - append(cells). - toHashCode(); + return new HashCodeBuilder().append(key).append(cells).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this). - append("key", key). - append("cells", cells). - toString(); + return new ToStringBuilder(this).append("key", key).append("cells", cells).toString(); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java index de1af216f8b3..ba05aeb0a7f5 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -97,7 +96,7 @@ * </complexType> * */ -@XmlRootElement(name="Scanner") +@XmlRootElement(name = "Scanner") @JsonInclude(JsonInclude.Include.NON_NULL) @InterfaceAudience.Private public class ScannerModel implements ProtobufMessageHandler, Serializable { @@ -118,8 +117,8 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { private int limit = -1; /** - * Implement lazily-instantiated singleton as per recipe - * here: http://literatejava.com/jvm/fastest-threadsafe-singleton-jvm/ + * Implement lazily-instantiated singleton as per recipe here: + * http://literatejava.com/jvm/fastest-threadsafe-singleton-jvm/ */ private static class JaxbJsonProviderHolder { static final JacksonJaxbJsonProvider INSTANCE = new JacksonJaxbJsonProvider(); @@ -130,23 +129,22 @@ static class FilterModel { @XmlRootElement static class ByteArrayComparableModel { - @XmlAttribute public String type; - @XmlAttribute public String value; - @XmlAttribute public String op; + @XmlAttribute + public String type; + @XmlAttribute + public String value; + @XmlAttribute + public String op; static enum ComparatorType { - BinaryComparator, - BinaryPrefixComparator, - BitComparator, - NullComparator, - RegexStringComparator, - SubstringComparator + BinaryComparator, BinaryPrefixComparator, BitComparator, NullComparator, + RegexStringComparator, SubstringComparator } - public ByteArrayComparableModel() { } + public ByteArrayComparableModel() { + } - public ByteArrayComparableModel( - ByteArrayComparable comparator) { + public ByteArrayComparableModel(ByteArrayComparable comparator) { String typeName = comparator.getClass().getSimpleName(); ComparatorType type = ComparatorType.valueOf(typeName); this.type = typeName; @@ -157,7 +155,7 @@ public ByteArrayComparableModel( break; case BitComparator: this.value = Bytes.toString(Base64.getEncoder().encode(comparator.getValue())); - this.op = ((BitComparator)comparator).getOperator().toString(); + this.op = ((BitComparator) comparator).getOperator().toString(); break; case NullComparator: break; @@ -202,54 +200,57 @@ public ByteArrayComparable build() { // A grab bag of fields, would have been a union if this were C. // These are null by default and will only be serialized if set (non null). - @XmlAttribute public String type; - @XmlAttribute public String op; - @XmlElement ByteArrayComparableModel comparator; - @XmlAttribute public String value; - @XmlElement public List filters; - @XmlAttribute public Integer limit; - @XmlAttribute public Integer offset; - @XmlAttribute public String family; - @XmlAttribute public String qualifier; - @XmlAttribute public Boolean ifMissing; - @XmlAttribute public Boolean latestVersion; - @XmlAttribute public String minColumn; - @XmlAttribute public Boolean minColumnInclusive; - @XmlAttribute public String maxColumn; - @XmlAttribute public Boolean maxColumnInclusive; - @XmlAttribute public Boolean dropDependentColumn; - @XmlAttribute public Float chance; - @XmlElement public List prefixes; - @XmlElement private List ranges; - @XmlElement public List timestamps; + @XmlAttribute + public String type; + @XmlAttribute + public String op; + @XmlElement + ByteArrayComparableModel comparator; + @XmlAttribute + public String value; + @XmlElement + public List filters; + @XmlAttribute + public Integer limit; + @XmlAttribute + public Integer offset; + @XmlAttribute + public String family; + @XmlAttribute + public String qualifier; + @XmlAttribute + public Boolean ifMissing; + @XmlAttribute + public Boolean latestVersion; + @XmlAttribute + public String minColumn; + @XmlAttribute + public Boolean minColumnInclusive; + @XmlAttribute + public String maxColumn; + @XmlAttribute + public Boolean maxColumnInclusive; + @XmlAttribute + public Boolean dropDependentColumn; + @XmlAttribute + public Float chance; + @XmlElement + public List prefixes; + @XmlElement + private List ranges; + @XmlElement + public List timestamps; static enum FilterType { - ColumnCountGetFilter, - ColumnPaginationFilter, - ColumnPrefixFilter, - ColumnRangeFilter, - DependentColumnFilter, - FamilyFilter, - FilterList, - FirstKeyOnlyFilter, - InclusiveStopFilter, - KeyOnlyFilter, - MultipleColumnPrefixFilter, - MultiRowRangeFilter, - PageFilter, - PrefixFilter, - QualifierFilter, - RandomRowFilter, - RowFilter, - SingleColumnValueExcludeFilter, - SingleColumnValueFilter, - SkipFilter, - TimestampsFilter, - ValueFilter, - WhileMatchFilter - } - - public FilterModel() { } + ColumnCountGetFilter, ColumnPaginationFilter, ColumnPrefixFilter, ColumnRangeFilter, + DependentColumnFilter, FamilyFilter, FilterList, FirstKeyOnlyFilter, InclusiveStopFilter, + KeyOnlyFilter, MultipleColumnPrefixFilter, MultiRowRangeFilter, PageFilter, PrefixFilter, + QualifierFilter, RandomRowFilter, RowFilter, SingleColumnValueExcludeFilter, + SingleColumnValueFilter, SkipFilter, TimestampsFilter, ValueFilter, WhileMatchFilter + } + + public FilterModel() { + } public FilterModel(Filter filter) { String typeName = filter.getClass().getSimpleName(); @@ -257,25 +258,25 @@ public FilterModel(Filter filter) { this.type = typeName; switch (type) { case ColumnCountGetFilter: - this.limit = ((ColumnCountGetFilter)filter).getLimit(); + this.limit = ((ColumnCountGetFilter) filter).getLimit(); break; case ColumnPaginationFilter: - this.limit = ((ColumnPaginationFilter)filter).getLimit(); - this.offset = ((ColumnPaginationFilter)filter).getOffset(); + this.limit = ((ColumnPaginationFilter) filter).getLimit(); + this.offset = ((ColumnPaginationFilter) filter).getOffset(); break; case ColumnPrefixFilter: - byte[] src = ((ColumnPrefixFilter)filter).getPrefix(); + byte[] src = ((ColumnPrefixFilter) filter).getPrefix(); this.value = Bytes.toString(Base64.getEncoder().encode(src)); break; case ColumnRangeFilter: - ColumnRangeFilter crf = (ColumnRangeFilter)filter; + ColumnRangeFilter crf = (ColumnRangeFilter) filter; this.minColumn = Bytes.toString(Base64.getEncoder().encode(crf.getMinColumn())); this.minColumnInclusive = crf.getMinColumnInclusive(); this.maxColumn = Bytes.toString(Base64.getEncoder().encode(crf.getMaxColumn())); this.maxColumnInclusive = crf.getMaxColumnInclusive(); break; case DependentColumnFilter: { - DependentColumnFilter dcf = (DependentColumnFilter)filter; + DependentColumnFilter dcf = (DependentColumnFilter) filter; this.family = Bytes.toString(Base64.getEncoder().encode(dcf.getFamily())); byte[] qualifier = dcf.getQualifier(); if (qualifier != null) { @@ -284,11 +285,12 @@ public FilterModel(Filter filter) { this.op = dcf.getCompareOperator().toString(); this.comparator = new ByteArrayComparableModel(dcf.getComparator()); this.dropDependentColumn = dcf.dropDependentColumn(); - } break; + } + break; case FilterList: - this.op = ((FilterList)filter).getOperator().toString(); + this.op = ((FilterList) filter).getOperator().toString(); this.filters = new ArrayList<>(); - for (Filter child: ((FilterList)filter).getFilters()) { + for (Filter child : ((FilterList) filter).getFilters()) { this.filters.add(new FilterModel(child)); } break; @@ -296,40 +298,38 @@ public FilterModel(Filter filter) { case KeyOnlyFilter: break; case InclusiveStopFilter: - this.value = Bytes.toString(Base64.getEncoder().encode( - ((InclusiveStopFilter)filter).getStopRowKey())); + this.value = Bytes + .toString(Base64.getEncoder().encode(((InclusiveStopFilter) filter).getStopRowKey())); break; case MultipleColumnPrefixFilter: this.prefixes = new ArrayList<>(); - for (byte[] prefix: ((MultipleColumnPrefixFilter)filter).getPrefix()) { + for (byte[] prefix : ((MultipleColumnPrefixFilter) filter).getPrefix()) { this.prefixes.add(Bytes.toString(Base64.getEncoder().encode(prefix))); } break; case MultiRowRangeFilter: this.ranges = new ArrayList<>(); - for(RowRange range : ((MultiRowRangeFilter)filter).getRowRanges()) { + for (RowRange range : ((MultiRowRangeFilter) filter).getRowRanges()) { this.ranges.add(new RowRange(range.getStartRow(), range.isStartRowInclusive(), range.getStopRow(), range.isStopRowInclusive())); } break; case PageFilter: - this.value = Long.toString(((PageFilter)filter).getPageSize()); + this.value = Long.toString(((PageFilter) filter).getPageSize()); break; case PrefixFilter: - this.value = Bytes.toString(Base64.getEncoder().encode( - ((PrefixFilter)filter).getPrefix())); + this.value = + Bytes.toString(Base64.getEncoder().encode(((PrefixFilter) filter).getPrefix())); break; case FamilyFilter: case QualifierFilter: case RowFilter: case ValueFilter: - this.op = ((CompareFilter)filter).getCompareOperator().toString(); - this.comparator = - new ByteArrayComparableModel( - ((CompareFilter)filter).getComparator()); + this.op = ((CompareFilter) filter).getCompareOperator().toString(); + this.comparator = new ByteArrayComparableModel(((CompareFilter) filter).getComparator()); break; case RandomRowFilter: - this.chance = ((RandomRowFilter)filter).getChance(); + this.chance = ((RandomRowFilter) filter).getChance(); break; case SingleColumnValueExcludeFilter: case SingleColumnValueFilter: { @@ -340,26 +340,25 @@ public FilterModel(Filter filter) { this.qualifier = Bytes.toString(Base64.getEncoder().encode(qualifier)); } this.op = scvf.getCompareOperator().toString(); - this.comparator = - new ByteArrayComparableModel(scvf.getComparator()); + this.comparator = new ByteArrayComparableModel(scvf.getComparator()); if (scvf.getFilterIfMissing()) { this.ifMissing = true; } if (scvf.getLatestVersionOnly()) { this.latestVersion = true; } - } break; + } + break; case SkipFilter: this.filters = new ArrayList<>(); - this.filters.add(new FilterModel(((SkipFilter)filter).getFilter())); + this.filters.add(new FilterModel(((SkipFilter) filter).getFilter())); break; case TimestampsFilter: - this.timestamps = ((TimestampsFilter)filter).getTimestamps(); + this.timestamps = ((TimestampsFilter) filter).getTimestamps(); break; case WhileMatchFilter: this.filters = new ArrayList<>(); - this.filters.add( - new FilterModel(((WhileMatchFilter)filter).getFilter())); + this.filters.add(new FilterModel(((WhileMatchFilter) filter).getFilter())); break; default: throw new RuntimeException("unhandled filter type " + type); @@ -369,105 +368,107 @@ public FilterModel(Filter filter) { public Filter build() { Filter filter; switch (FilterType.valueOf(type)) { - case ColumnCountGetFilter: - filter = new ColumnCountGetFilter(limit); - break; - case ColumnPaginationFilter: - filter = new ColumnPaginationFilter(limit, offset); - break; - case ColumnPrefixFilter: - filter = new ColumnPrefixFilter(Base64.getDecoder().decode(value)); - break; - case ColumnRangeFilter: - filter = new ColumnRangeFilter(Base64.getDecoder().decode(minColumn), - minColumnInclusive, Base64.getDecoder().decode(maxColumn), - maxColumnInclusive); - break; - case DependentColumnFilter: - filter = new DependentColumnFilter(Base64.getDecoder().decode(family), - qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - dropDependentColumn, CompareOperator.valueOf(op), comparator.build()); - break; - case FamilyFilter: - filter = new FamilyFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case FilterList: { - List list = new ArrayList<>(filters.size()); - for (FilterModel model: filters) { - list.add(model.build()); - } - filter = new FilterList(FilterList.Operator.valueOf(op), list); - } break; - case FirstKeyOnlyFilter: - filter = new FirstKeyOnlyFilter(); - break; - case InclusiveStopFilter: - filter = new InclusiveStopFilter(Base64.getDecoder().decode(value)); - break; - case KeyOnlyFilter: - filter = new KeyOnlyFilter(); - break; - case MultipleColumnPrefixFilter: { - byte[][] values = new byte[prefixes.size()][]; - for (int i = 0; i < prefixes.size(); i++) { - values[i] = Base64.getDecoder().decode(prefixes.get(i)); - } - filter = new MultipleColumnPrefixFilter(values); - } break; - case MultiRowRangeFilter: { - filter = new MultiRowRangeFilter(ranges); - } break; - case PageFilter: - filter = new PageFilter(Long.parseLong(value)); - break; - case PrefixFilter: - filter = new PrefixFilter(Base64.getDecoder().decode(value)); - break; - case QualifierFilter: - filter = new QualifierFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case RandomRowFilter: - filter = new RandomRowFilter(chance); - break; - case RowFilter: - filter = new RowFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case SingleColumnValueFilter: - filter = new SingleColumnValueFilter(Base64.getDecoder().decode(family), - qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - CompareOperator.valueOf(op), comparator.build()); - if (ifMissing != null) { - ((SingleColumnValueFilter)filter).setFilterIfMissing(ifMissing); - } - if (latestVersion != null) { - ((SingleColumnValueFilter)filter).setLatestVersionOnly(latestVersion); + case ColumnCountGetFilter: + filter = new ColumnCountGetFilter(limit); + break; + case ColumnPaginationFilter: + filter = new ColumnPaginationFilter(limit, offset); + break; + case ColumnPrefixFilter: + filter = new ColumnPrefixFilter(Base64.getDecoder().decode(value)); + break; + case ColumnRangeFilter: + filter = new ColumnRangeFilter(Base64.getDecoder().decode(minColumn), minColumnInclusive, + Base64.getDecoder().decode(maxColumn), maxColumnInclusive); + break; + case DependentColumnFilter: + filter = new DependentColumnFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, dropDependentColumn, + CompareOperator.valueOf(op), comparator.build()); + break; + case FamilyFilter: + filter = new FamilyFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case FilterList: { + List list = new ArrayList<>(filters.size()); + for (FilterModel model : filters) { + list.add(model.build()); + } + filter = new FilterList(FilterList.Operator.valueOf(op), list); } - break; - case SingleColumnValueExcludeFilter: - filter = new SingleColumnValueExcludeFilter(Base64.getDecoder().decode(family), - qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - CompareOperator.valueOf(op), comparator.build()); - if (ifMissing != null) { - ((SingleColumnValueExcludeFilter)filter).setFilterIfMissing(ifMissing); + break; + case FirstKeyOnlyFilter: + filter = new FirstKeyOnlyFilter(); + break; + case InclusiveStopFilter: + filter = new InclusiveStopFilter(Base64.getDecoder().decode(value)); + break; + case KeyOnlyFilter: + filter = new KeyOnlyFilter(); + break; + case MultipleColumnPrefixFilter: { + byte[][] values = new byte[prefixes.size()][]; + for (int i = 0; i < prefixes.size(); i++) { + values[i] = Base64.getDecoder().decode(prefixes.get(i)); + } + filter = new MultipleColumnPrefixFilter(values); } - if (latestVersion != null) { - ((SingleColumnValueExcludeFilter)filter).setLatestVersionOnly(latestVersion); + break; + case MultiRowRangeFilter: { + filter = new MultiRowRangeFilter(ranges); } - break; - case SkipFilter: - filter = new SkipFilter(filters.get(0).build()); - break; - case TimestampsFilter: - filter = new TimestampsFilter(timestamps); - break; - case ValueFilter: - filter = new ValueFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case WhileMatchFilter: - filter = new WhileMatchFilter(filters.get(0).build()); - break; - default: - throw new RuntimeException("unhandled filter type: " + type); + break; + case PageFilter: + filter = new PageFilter(Long.parseLong(value)); + break; + case PrefixFilter: + filter = new PrefixFilter(Base64.getDecoder().decode(value)); + break; + case QualifierFilter: + filter = new QualifierFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case RandomRowFilter: + filter = new RandomRowFilter(chance); + break; + case RowFilter: + filter = new RowFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case SingleColumnValueFilter: + filter = new SingleColumnValueFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, + CompareOperator.valueOf(op), comparator.build()); + if (ifMissing != null) { + ((SingleColumnValueFilter) filter).setFilterIfMissing(ifMissing); + } + if (latestVersion != null) { + ((SingleColumnValueFilter) filter).setLatestVersionOnly(latestVersion); + } + break; + case SingleColumnValueExcludeFilter: + filter = new SingleColumnValueExcludeFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, + CompareOperator.valueOf(op), comparator.build()); + if (ifMissing != null) { + ((SingleColumnValueExcludeFilter) filter).setFilterIfMissing(ifMissing); + } + if (latestVersion != null) { + ((SingleColumnValueExcludeFilter) filter).setLatestVersionOnly(latestVersion); + } + break; + case SkipFilter: + filter = new SkipFilter(filters.get(0).build()); + break; + case TimestampsFilter: + filter = new TimestampsFilter(timestamps); + break; + case ValueFilter: + filter = new ValueFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case WhileMatchFilter: + filter = new WhileMatchFilter(filters.get(0).build()); + break; + default: + throw new RuntimeException("unhandled filter type: " + type); } return filter; } @@ -476,7 +477,6 @@ public Filter build() { /** * Get the JacksonJaxbJsonProvider instance; - * * @return A JacksonJaxbJsonProvider. */ private static JacksonJaxbJsonProvider getJasonProvider() { @@ -489,8 +489,9 @@ private static JacksonJaxbJsonProvider getJasonProvider() { * @throws Exception */ public static Filter buildFilter(String s) throws Exception { - FilterModel model = getJasonProvider().locateMapper(FilterModel.class, - MediaType.APPLICATION_JSON_TYPE).readValue(s, FilterModel.class); + FilterModel model = + getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE) + .readValue(s, FilterModel.class); return model.build(); } @@ -500,8 +501,8 @@ public static Filter buildFilter(String s) throws Exception { * @throws Exception */ public static String stringifyFilter(final Filter filter) throws Exception { - return getJasonProvider().locateMapper(FilterModel.class, - MediaType.APPLICATION_JSON_TYPE).writeValueAsString(new FilterModel(filter)); + return getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE) + .writeValueAsString(new FilterModel(filter)); } private static final byte[] COLUMN_DIVIDER = Bytes.toBytes(":"); @@ -514,11 +515,11 @@ public static ScannerModel fromScan(Scan scan) throws Exception { ScannerModel model = new ScannerModel(); model.setStartRow(scan.getStartRow()); model.setEndRow(scan.getStopRow()); - Map> families = scan.getFamilyMap(); + Map> families = scan.getFamilyMap(); if (families != null) { - for (Map.Entry> entry : families.entrySet()) { + for (Map.Entry> entry : families.entrySet()) { if (entry.getValue() != null) { - for (byte[] qualifier: entry.getValue()) { + for (byte[] qualifier : entry.getValue()) { model.addColumn(Bytes.add(entry.getKey(), COLUMN_DIVIDER, qualifier)); } } else { @@ -561,7 +562,8 @@ public static ScannerModel fromScan(Scan scan) throws Exception { /** * Default constructor */ - public ScannerModel() {} + public ScannerModel() { + } /** * Constructor @@ -572,11 +574,10 @@ public ScannerModel() {} * @param caching the number of rows that the scanner will fetch at once * @param endTime the upper bound on timestamps of values of interest * @param maxVersions the maximum number of versions to return - * @param filter a filter specification - * (values with timestamps later than this are excluded) + * @param filter a filter specification (values with timestamps later than this are excluded) */ - public ScannerModel(byte[] startRow, byte[] endRow, List columns, - int batch, int caching, long endTime, int maxVersions, String filter) { + public ScannerModel(byte[] startRow, byte[] endRow, List columns, int batch, int caching, + long endTime, int maxVersions, String filter) { super(); this.startRow = startRow; this.endRow = endRow; @@ -595,14 +596,14 @@ public ScannerModel(byte[] startRow, byte[] endRow, List columns, * @param columns the columns to scan * @param batch the number of values to return in batch * @param caching the number of rows that the scanner will fetch at once - * @param startTime the lower bound on timestamps of values of interest - * (values with timestamps earlier than this are excluded) - * @param endTime the upper bound on timestamps of values of interest - * (values with timestamps later than this are excluded) + * @param startTime the lower bound on timestamps of values of interest (values with timestamps + * earlier than this are excluded) + * @param endTime the upper bound on timestamps of values of interest (values with timestamps + * later than this are excluded) * @param filter a filter specification */ - public ScannerModel(byte[] startRow, byte[] endRow, List columns, - int batch, int caching, long startTime, long endTime, String filter) { + public ScannerModel(byte[] startRow, byte[] endRow, List columns, int batch, int caching, + long startTime, long endTime, String filter) { super(); this.startRow = startRow; this.endRow = endRow; @@ -628,6 +629,7 @@ public void addColumn(byte[] column) { public void addLabel(String label) { labels.add(label); } + /** * @return true if a start row was specified */ @@ -661,12 +663,12 @@ public byte[] getEndRow() { /** * @return list of columns of interest in column:qualifier format, or empty for all */ - @XmlElement(name="column") + @XmlElement(name = "column") public List getColumns() { return columns; } - @XmlElement(name="labels") + @XmlElement(name = "labels") public List getLabels() { return labels; } @@ -771,7 +773,8 @@ public void setCaching(int caching) { } /** - * @param value true if HFile blocks should be cached on the servers for this scan, false otherwise + * @param value true if HFile blocks should be cached on the servers for this scan, false + * otherwise */ public void setCacheBlocks(boolean value) { this.cacheBlocks = value; @@ -821,7 +824,7 @@ public byte[] createProtobufOutput() { if (!Bytes.equals(endRow, HConstants.EMPTY_START_ROW)) { builder.setEndRow(UnsafeByteOperations.unsafeWrap(endRow)); } - for (byte[] column: columns) { + for (byte[] column : columns) { builder.addColumns(UnsafeByteOperations.unsafeWrap(column)); } if (startTime != 0) { @@ -834,7 +837,7 @@ public byte[] createProtobufOutput() { if (caching > 0) { builder.setCaching(caching); } - if (limit > 0){ + if (limit > 0) { builder.setLimit(limit); } builder.setMaxVersions(maxVersions); @@ -850,8 +853,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { Scanner.Builder builder = Scanner.newBuilder(); ProtobufUtil.mergeFrom(builder, message); if (builder.hasStartRow()) { @@ -860,7 +862,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) if (builder.hasEndRow()) { endRow = builder.getEndRow().toByteArray(); } - for (ByteString column: builder.getColumnsList()) { + for (ByteString column : builder.getColumnsList()) { addColumn(column.toByteArray()); } if (builder.hasBatch()) { @@ -886,7 +888,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) } if (builder.getLabelsList() != null) { List labels = builder.getLabelsList(); - for(String label : labels) { + for (String label : labels) { addLabel(label); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java index cb1e7f76028a..8b9022d9f4c8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,34 +18,30 @@ package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonProperty; - import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlElementWrapper; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.util.Bytes; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus; /** * Representation of the status of a storage cluster: *

    *

      *
    • regions: the total number of regions served by the cluster
    • - *
    • requests: the total number of requests per second handled by the - * cluster in the last reporting interval
    • + *
    • requests: the total number of requests per second handled by the cluster in the last + * reporting interval
    • *
    • averageLoad: the average load of the region servers in the cluster
    • *
    • liveNodes: detailed status of the live region servers
    • *
    • deadNodes: the names of region servers declared dead
    • @@ -97,7 +92,7 @@ * </complexType> * */ -@XmlRootElement(name="ClusterStatus") +@XmlRootElement(name = "ClusterStatus") @InterfaceAudience.Private public class StorageClusterStatusModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @@ -152,11 +147,11 @@ public Region(byte[] name) { * @param memstoreSizeMB total size of memstore, in MB * @param storefileIndexSizeKB total size of store file indexes, in KB */ - public Region(byte[] name, int stores, int storefiles, - int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB, - long readRequestsCount, long cpRequestsCount, long writeRequestsCount, - int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, - long totalCompactingKVs, long currentCompactedKVs) { + public Region(byte[] name, int stores, int storefiles, int storefileSizeMB, + int memstoreSizeMB, long storefileIndexSizeKB, long readRequestsCount, + long cpRequestsCount, long writeRequestsCount, int rootIndexSizeKB, + int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, long totalCompactingKVs, + long currentCompactedKVs) { this.name = name; this.stores = stores; this.storefiles = storefiles; @@ -300,8 +295,7 @@ public void setCpRequestsCount(long cpRequestsCount) { } /** - * @param rootIndexSizeKB The current total size of root-level indexes - * for the region, in KB + * @param rootIndexSizeKB The current total size of root-level indexes for the region, in KB */ public void setRootIndexSizeKB(int rootIndexSizeKB) { this.rootIndexSizeKB = rootIndexSizeKB; @@ -315,32 +309,31 @@ public void setWriteRequestsCount(long writeRequestsCount) { } /** - * @param currentCompactedKVs The completed count of key values - * in currently running compaction + * @param currentCompactedKVs The completed count of key values in currently running + * compaction */ public void setCurrentCompactedKVs(long currentCompactedKVs) { this.currentCompactedKVs = currentCompactedKVs; } /** - * @param totalCompactingKVs The total compacting key values - * in currently running compaction + * @param totalCompactingKVs The total compacting key values in currently running compaction */ public void setTotalCompactingKVs(long totalCompactingKVs) { this.totalCompactingKVs = totalCompactingKVs; } /** - * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks, - * not just loaded into the block cache, in KB. + * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks, not just loaded + * into the block cache, in KB. */ public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) { this.totalStaticBloomSizeKB = totalStaticBloomSizeKB; } /** - * @param totalStaticIndexSizeKB The total size of all index blocks, - * not just the root level, in KB. + * @param totalStaticIndexSizeKB The total size of all index blocks, not just the root level, + * in KB. */ public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) { this.totalStaticIndexSizeKB = totalStaticIndexSizeKB; @@ -400,15 +393,14 @@ public void setStorefileIndexSizeKB(long storefileIndexSizeKB) { * Add a region name to the list * @param name the region name */ - public void addRegion(byte[] name, int stores, int storefiles, - int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB, - long readRequestsCount, long cpRequestsCount, long writeRequestsCount, - int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, - long totalCompactingKVs, long currentCompactedKVs) { - regions.add(new Region(name, stores, storefiles, storefileSizeMB, - memstoreSizeMB, storefileIndexSizeKB, readRequestsCount, cpRequestsCount, - writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB, - totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs)); + public void addRegion(byte[] name, int stores, int storefiles, int storefileSizeMB, + int memstoreSizeMB, long storefileIndexSizeKB, long readRequestsCount, long cpRequestsCount, + long writeRequestsCount, int rootIndexSizeKB, int totalStaticIndexSizeKB, + int totalStaticBloomSizeKB, long totalCompactingKVs, long currentCompactedKVs) { + regions.add(new Region(name, stores, storefiles, storefileSizeMB, memstoreSizeMB, + storefileIndexSizeKB, readRequestsCount, cpRequestsCount, writeRequestsCount, + rootIndexSizeKB, totalStaticIndexSizeKB, totalStaticBloomSizeKB, totalCompactingKVs, + currentCompactedKVs)); } /** @@ -422,7 +414,8 @@ public Region getRegion(int index) { /** * Default constructor */ - public Node() {} + public Node() { + } /** * Constructor @@ -469,7 +462,7 @@ public int getMaxHeapSizeMB() { /** * @return the list of regions served by the region server */ - @XmlElement(name="Region") + @XmlElement(name = "Region") public List getRegions() { return regions; } @@ -608,7 +601,7 @@ public int getRegions() { /** * @return the total number of requests per second handled by the cluster in the last reporting - * interval + * interval */ @XmlAttribute public long getRequests() { @@ -661,9 +654,8 @@ public void setAverageLoad(double averageLoad) { @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append(String.format("%d live servers, %d dead servers, " + - "%.4f average load%n%n", liveNodes.size(), deadNodes.size(), - averageLoad)); + sb.append(String.format("%d live servers, %d dead servers, " + "%.4f average load%n%n", + liveNodes.size(), deadNodes.size(), averageLoad)); if (!liveNodes.isEmpty()) { sb.append(liveNodes.size()); sb.append(" live servers\n"); @@ -735,8 +727,7 @@ public byte[] createProtobufOutput() { builder.setRequests(requests); builder.setAverageLoad(averageLoad); for (Node node : liveNodes) { - StorageClusterStatus.Node.Builder nodeBuilder = - StorageClusterStatus.Node.newBuilder(); + StorageClusterStatus.Node.Builder nodeBuilder = StorageClusterStatus.Node.newBuilder(); nodeBuilder.setName(node.name); nodeBuilder.setStartCode(node.startCode); nodeBuilder.setRequests(node.requests); @@ -744,7 +735,7 @@ public byte[] createProtobufOutput() { nodeBuilder.setMaxHeapSizeMB(node.maxHeapSizeMB); for (Node.Region region : node.regions) { StorageClusterStatus.Region.Builder regionBuilder = - StorageClusterStatus.Region.newBuilder(); + StorageClusterStatus.Region.newBuilder(); regionBuilder.setName(UnsafeByteOperations.unsafeWrap(region.name)); regionBuilder.setStores(region.stores); regionBuilder.setStorefiles(region.storefiles); @@ -785,26 +776,16 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOExce for (StorageClusterStatus.Node node : builder.getLiveNodesList()) { long startCode = node.hasStartCode() ? node.getStartCode() : -1; StorageClusterStatusModel.Node nodeModel = - addLiveNode(node.getName(), startCode, node.getHeapSizeMB(), - node.getMaxHeapSizeMB()); + addLiveNode(node.getName(), startCode, node.getHeapSizeMB(), node.getMaxHeapSizeMB()); long requests = node.hasRequests() ? node.getRequests() : 0; nodeModel.setRequests(requests); for (StorageClusterStatus.Region region : node.getRegionsList()) { - nodeModel.addRegion( - region.getName().toByteArray(), - region.getStores(), - region.getStorefiles(), - region.getStorefileSizeMB(), - region.getMemStoreSizeMB(), - region.getStorefileIndexSizeKB(), - region.getReadRequestsCount(), - region.getCpRequestsCount(), - region.getWriteRequestsCount(), - region.getRootIndexSizeKB(), - region.getTotalStaticIndexSizeKB(), - region.getTotalStaticBloomSizeKB(), - region.getTotalCompactingKVs(), - region.getCurrentCompactedKVs()); + nodeModel.addRegion(region.getName().toByteArray(), region.getStores(), + region.getStorefiles(), region.getStorefileSizeMB(), region.getMemStoreSizeMB(), + region.getStorefileIndexSizeKB(), region.getReadRequestsCount(), + region.getCpRequestsCount(), region.getWriteRequestsCount(), region.getRootIndexSizeKB(), + region.getTotalStaticIndexSizeKB(), region.getTotalStaticBloomSizeKB(), + region.getTotalCompactingKVs(), region.getCurrentCompactedKVs()); } } for (String node : builder.getDeadNodesList()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java index 584099765c7b..bdf76b157689 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,14 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.Serializable; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -35,7 +31,7 @@ * </complexType> * */ -@XmlRootElement(name="ClusterVersion") +@XmlRootElement(name = "ClusterVersion") @InterfaceAudience.Private public class StorageClusterVersionModel implements Serializable { private static final long serialVersionUID = 1L; @@ -45,7 +41,7 @@ public class StorageClusterVersionModel implements Serializable { /** * @return the storage cluster version */ - @XmlAttribute(name="Version") + @XmlAttribute(name = "Version") public String getVersion() { return version; } @@ -57,7 +53,8 @@ public void setVersion(String version) { this.version = version; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -65,10 +62,9 @@ public String toString() { return version; } - //needed for jackson deserialization + // needed for jackson deserialization private static StorageClusterVersionModel valueOf(String value) { - StorageClusterVersionModel versionModel - = new StorageClusterVersionModel(); + StorageClusterVersionModel versionModel = new StorageClusterVersionModel(); versionModel.setVersion(value); return versionModel; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java index 320062512152..e407b1383786 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,26 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableInfoMessage.TableInfo; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; - /** * Representation of a list of table regions. * @@ -49,7 +45,7 @@ * </complexType> * */ -@XmlRootElement(name="TableInfo") +@XmlRootElement(name = "TableInfo") @InterfaceAudience.Private public class TableInfoModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @@ -60,7 +56,8 @@ public class TableInfoModel implements Serializable, ProtobufMessageHandler { /** * Default constructor */ - public TableInfoModel() {} + public TableInfoModel() { + } /** * Constructor @@ -97,7 +94,7 @@ public String getName() { /** * @return the regions */ - @XmlElement(name="Region") + @XmlElement(name = "Region") public List getRegions() { return regions; } @@ -116,13 +113,14 @@ public void setRegions(List regions) { this.regions = regions; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); - for(TableRegionModel aRegion : regions) { + for (TableRegionModel aRegion : regions) { sb.append(aRegion.toString()); sb.append('\n'); } @@ -133,7 +131,7 @@ public String toString() { public byte[] createProtobufOutput() { TableInfo.Builder builder = TableInfo.newBuilder(); builder.setName(name); - for (TableRegionModel aRegion: regions) { + for (TableRegionModel aRegion : regions) { TableInfo.Region.Builder regionBuilder = TableInfo.Region.newBuilder(); regionBuilder.setName(aRegion.getName()); regionBuilder.setId(aRegion.getId()); @@ -146,16 +144,14 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { TableInfo.Builder builder = TableInfo.newBuilder(); ProtobufUtil.mergeFrom(builder, message); setName(builder.getName()); - for (TableInfo.Region region: builder.getRegionsList()) { - add(new TableRegionModel(builder.getName(), region.getId(), - region.getStartKey().toByteArray(), - region.getEndKey().toByteArray(), - region.getLocation())); + for (TableInfo.Region region : builder.getRegionsList()) { + add( + new TableRegionModel(builder.getName(), region.getId(), region.getStartKey().toByteArray(), + region.getEndKey().toByteArray(), region.getLocation())); } return this; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java index 8d3e1ab04641..a092d179af8c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,19 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlElementRef; import javax.xml.bind.annotation.XmlRootElement; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableListMessage.TableList; @@ -36,7 +32,7 @@ /** * Simple representation of a list of table names. */ -@XmlRootElement(name="TableList") +@XmlRootElement(name = "TableList") @InterfaceAudience.Private public class TableListModel implements Serializable, ProtobufMessageHandler { @@ -47,7 +43,8 @@ public class TableListModel implements Serializable, ProtobufMessageHandler { /** * Default constructor */ - public TableListModel() {} + public TableListModel() { + } /** * Add the table name model to the list @@ -68,7 +65,7 @@ public TableModel get(int index) { /** * @return the tables */ - @XmlElementRef(name="table") + @XmlElementRef(name = "table") public List getTables() { return tables; } @@ -80,13 +77,14 @@ public void setTables(List tables) { this.tables = tables; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); - for(TableModel aTable : tables) { + for (TableModel aTable : tables) { sb.append(aTable.toString()); sb.append('\n'); } @@ -103,11 +101,10 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { TableList.Builder builder = TableList.newBuilder(); ProtobufUtil.mergeFrom(builder, message); - for (String table: builder.getNameList()) { + for (String table : builder.getNameList()) { this.add(new TableModel(table)); } return this; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java index 4628263e9922..d856db40f568 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,14 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.Serializable; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -37,18 +33,19 @@ * </complexType> * */ -@XmlRootElement(name="table") +@XmlRootElement(name = "table") @InterfaceAudience.Private public class TableModel implements Serializable { private static final long serialVersionUID = 1L; - + private String name; - + /** * Default constructor */ - public TableModel() {} + public TableModel() { + } /** * Constructor @@ -74,7 +71,8 @@ public void setName(String name) { this.name = name; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java index d794c44d7f71..3e743bdc8a5a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.Serializable; @@ -28,8 +26,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Representation of a region of a table and its current location on the - * storage cluster. + * Representation of a region of a table and its current location on the storage cluster. * *
        * <complexType name="TableRegion">
      @@ -41,7 +38,7 @@
        *  </complexType>
        * 
      */ -@XmlRootElement(name="Region") +@XmlRootElement(name = "Region") @InterfaceAudience.Private public class TableRegionModel implements Serializable { @@ -49,14 +46,15 @@ public class TableRegionModel implements Serializable { private String table; private long id; - private byte[] startKey; + private byte[] startKey; private byte[] endKey; private String location; /** * Constructor */ - public TableRegionModel() {} + public TableRegionModel() { + } /** * Constructor @@ -65,8 +63,7 @@ public TableRegionModel() {} * @param startKey the start key of the region * @param endKey the end key of the region */ - public TableRegionModel(String table, long id, byte[] startKey, - byte[] endKey) { + public TableRegionModel(String table, long id, byte[] startKey, byte[] endKey) { this(table, id, startKey, endKey, null); } @@ -78,8 +75,7 @@ public TableRegionModel(String table, long id, byte[] startKey, * @param endKey the end key of the region * @param location the name and port of the region server hosting the region */ - public TableRegionModel(String table, long id, byte[] startKey, - byte[] endKey, String location) { + public TableRegionModel(String table, long id, byte[] startKey, byte[] endKey, String location) { this.table = table; this.id = id; this.startKey = startKey; @@ -92,17 +88,17 @@ public TableRegionModel(String table, long id, byte[] startKey, */ @XmlAttribute public String getName() { - byte [] tableNameAsBytes = Bytes.toBytes(this.table); + byte[] tableNameAsBytes = Bytes.toBytes(this.table); TableName tableName = TableName.valueOf(tableNameAsBytes); - byte [] nameAsBytes = RegionInfo.createRegionName( - tableName, this.startKey, this.id, !tableName.isSystemTable()); + byte[] nameAsBytes = + RegionInfo.createRegionName(tableName, this.startKey, this.id, !tableName.isSystemTable()); return Bytes.toString(nameAsBytes); } /** * @return the encoded region id */ - @XmlAttribute + @XmlAttribute public long getId() { return id; } @@ -110,7 +106,7 @@ public long getId() { /** * @return the start key */ - @XmlAttribute + @XmlAttribute public byte[] getStartKey() { return startKey; } @@ -118,7 +114,7 @@ public byte[] getStartKey() { /** * @return the end key */ - @XmlAttribute + @XmlAttribute public byte[] getEndKey() { return endKey; } @@ -126,7 +122,7 @@ public byte[] getEndKey() { /** * @return the name and port of the region server hosting the region */ - @XmlAttribute + @XmlAttribute public String getLocation() { return location; } @@ -171,7 +167,8 @@ public void setLocation(String location) { this.location = location; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java index b5578b70e8f3..e9512a71c625 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +60,7 @@ * </complexType> * */ -@XmlRootElement(name="TableSchema") +@XmlRootElement(name = "TableSchema") @InterfaceAudience.Private public class TableSchemaModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @@ -73,13 +72,14 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { private static final QName COMPRESSION = new QName(ColumnFamilyDescriptorBuilder.COMPRESSION); private String name; - private Map attrs = new LinkedHashMap<>(); + private Map attrs = new LinkedHashMap<>(); private List columns = new ArrayList<>(); /** * Default constructor. */ - public TableSchemaModel() {} + public TableSchemaModel() { + } /** * Constructor @@ -88,16 +88,14 @@ public TableSchemaModel() {} public TableSchemaModel(TableDescriptor tableDescriptor) { setName(tableDescriptor.getTableName().getNameAsString()); for (Map.Entry e : tableDescriptor.getValues().entrySet()) { - addAttribute(Bytes.toString(e.getKey().get()), - Bytes.toString(e.getValue().get())); + addAttribute(Bytes.toString(e.getKey().get()), Bytes.toString(e.getValue().get())); } for (ColumnFamilyDescriptor hcd : tableDescriptor.getColumnFamilies()) { ColumnSchemaModel columnModel = new ColumnSchemaModel(); columnModel.setName(hcd.getNameAsString()); - for (Map.Entry e: - hcd.getValues().entrySet()) { + for (Map.Entry e : hcd.getValues().entrySet()) { columnModel.addAttribute(Bytes.toString(e.getKey().get()), - Bytes.toString(e.getValue().get())); + Bytes.toString(e.getValue().get())); } addColumnFamily(columnModel); } @@ -114,8 +112,8 @@ public void addAttribute(String name, Object value) { } /** - * Return a table descriptor value as a string. Calls toString() on the - * object stored in the descriptor value map. + * Return a table descriptor value as a string. Calls toString() on the object stored in the + * descriptor value map. * @param name the attribute name * @return the attribute value */ @@ -154,14 +152,14 @@ public String getName() { */ @XmlAnyAttribute @JsonAnyGetter - public Map getAny() { + public Map getAny() { return attrs; } /** * @return the columns */ - @XmlElement(name="ColumnSchema") + @XmlElement(name = "ColumnSchema") public List getColumns() { return columns; } @@ -180,7 +178,8 @@ public void setColumns(List columns) { this.columns = columns; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -189,7 +188,7 @@ public String toString() { sb.append("{ NAME=> '"); sb.append(name); sb.append('\''); - for (Map.Entry e : attrs.entrySet()) { + for (Map.Entry e : attrs.entrySet()) { sb.append(", "); sb.append(e.getKey().getLocalPart()); sb.append(" => '"); @@ -265,8 +264,7 @@ public byte[] createProtobufOutput() { TableSchema.Builder builder = TableSchema.newBuilder(); builder.setName(name); for (Map.Entry e : attrs.entrySet()) { - TableSchema.Attribute.Builder attrBuilder = - TableSchema.Attribute.newBuilder(); + TableSchema.Attribute.Builder attrBuilder = TableSchema.Attribute.newBuilder(); attrBuilder.setName(e.getKey().getLocalPart()); attrBuilder.setValue(e.getValue().toString()); builder.addAttrs(attrBuilder); @@ -276,8 +274,7 @@ public byte[] createProtobufOutput() { ColumnSchema.Builder familyBuilder = ColumnSchema.newBuilder(); familyBuilder.setName(family.getName()); for (Map.Entry e : familyAttrs.entrySet()) { - ColumnSchema.Attribute.Builder attrBuilder = - ColumnSchema.Attribute.newBuilder(); + ColumnSchema.Attribute.Builder attrBuilder = ColumnSchema.Attribute.newBuilder(); attrBuilder.setName(e.getKey().getLocalPart()); attrBuilder.setValue(e.getValue().toString()); familyBuilder.addAttrs(attrBuilder); @@ -300,8 +297,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { TableSchema.Builder builder = TableSchema.newBuilder(); ProtobufUtil.mergeFrom(builder, message); this.setName(builder.getName()); @@ -321,8 +317,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) familyModel.addAttribute(ColumnFamilyDescriptorBuilder.TTL, family.getTtl()); } if (family.hasMaxVersions()) { - familyModel.addAttribute(HConstants.VERSIONS, - family.getMaxVersions()); + familyModel.addAttribute(HConstants.VERSIONS, family.getMaxVersions()); } if (family.hasCompression()) { familyModel.addAttribute(ColumnFamilyDescriptorBuilder.COMPRESSION, @@ -339,13 +334,13 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) @JsonIgnore public TableDescriptor getTableDescriptor() { TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(getName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(getName())); for (Map.Entry e : getAny().entrySet()) { tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } for (ColumnSchemaModel column : getColumns()) { - ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(column.getName())); + ColumnFamilyDescriptorBuilder cfdb = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column.getName())); for (Map.Entry e : column.getAny().entrySet()) { cfdb.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java index a3f2fa6a76b4..f6257dc4b1eb 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; @@ -34,8 +32,7 @@ import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.VersionMessage.Version; /** - * A representation of the collection of versions of the REST gateway software - * components. + * A representation of the collection of versions of the REST gateway software components. *
        *
      • restVersion: REST gateway revision
      • *
      • jvmVersion: the JVM vendor and version information
      • @@ -44,7 +41,7 @@ *
      • jerseyVersion: the version of the embedded Jersey framework
      • *
      */ -@XmlRootElement(name="Version") +@XmlRootElement(name = "Version") @InterfaceAudience.Private public class VersionModel implements Serializable, ProtobufMessageHandler { @@ -59,7 +56,8 @@ public class VersionModel implements Serializable, ProtobufMessageHandler { /** * Default constructor. Do not use. */ - public VersionModel() {} + public VersionModel() { + } /** * Constructor @@ -67,12 +65,10 @@ public VersionModel() {} */ public VersionModel(ServletContext context) { restVersion = RESTServlet.VERSION_STRING; - jvmVersion = System.getProperty("java.vm.vendor") + ' ' + - System.getProperty("java.version") + '-' + - System.getProperty("java.vm.version"); - osVersion = System.getProperty("os.name") + ' ' + - System.getProperty("os.version") + ' ' + - System.getProperty("os.arch"); + jvmVersion = System.getProperty("java.vm.vendor") + ' ' + System.getProperty("java.version") + + '-' + System.getProperty("java.vm.version"); + osVersion = System.getProperty("os.name") + ' ' + System.getProperty("os.version") + ' ' + + System.getProperty("os.arch"); serverVersion = context.getServerInfo(); jerseyVersion = ServletContainer.class.getPackage().getImplementationVersion(); // Currently, this will always be null because the manifest doesn't have any useful information @@ -82,7 +78,7 @@ public VersionModel(ServletContext context) { /** * @return the REST gateway version */ - @XmlAttribute(name="REST") + @XmlAttribute(name = "REST") public String getRESTVersion() { return restVersion; } @@ -90,7 +86,7 @@ public String getRESTVersion() { /** * @return the JVM vendor and version */ - @XmlAttribute(name="JVM") + @XmlAttribute(name = "JVM") public String getJVMVersion() { return jvmVersion; } @@ -98,7 +94,7 @@ public String getJVMVersion() { /** * @return the OS name, version, and hardware architecture */ - @XmlAttribute(name="OS") + @XmlAttribute(name = "OS") public String getOSVersion() { return osVersion; } @@ -106,7 +102,7 @@ public String getOSVersion() { /** * @return the servlet container version */ - @XmlAttribute(name="Server") + @XmlAttribute(name = "Server") public String getServerVersion() { return serverVersion; } @@ -114,7 +110,7 @@ public String getServerVersion() { /** * @return the version of the embedded Jersey framework */ - @XmlAttribute(name="Jersey") + @XmlAttribute(name = "Jersey") public String getJerseyVersion() { return jerseyVersion; } @@ -154,7 +150,8 @@ public void setJerseyVersion(String version) { this.jerseyVersion = version; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -186,8 +183,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { Version.Builder builder = Version.newBuilder(); ProtobufUtil.mergeFrom(builder, message); if (builder.hasRestVersion()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java index 3aa81db5f03a..3acc57747150 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider; import java.util.Arrays; @@ -44,10 +42,9 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * Plumbing for hooking up Jersey's JSON entity body encoding and decoding - * support to JAXB. Modify how the context is created (by using e.g. a - * different configuration builder) to control how JSON is processed and - * created. + * Plumbing for hooking up Jersey's JSON entity body encoding and decoding support to JAXB. Modify + * how the context is created (by using e.g. a different configuration builder) to control how JSON + * is processed and created. */ @Provider @InterfaceAudience.Private @@ -57,23 +54,11 @@ public class JAXBContextResolver implements ContextResolver { private final Set> types; - private final Class[] cTypes = { - CellModel.class, - CellSetModel.class, - ColumnSchemaModel.class, - NamespacesModel.class, - NamespacesInstanceModel.class, - RowModel.class, - ScannerModel.class, - StorageClusterStatusModel.class, - StorageClusterVersionModel.class, - TableInfoModel.class, - TableListModel.class, - TableModel.class, - TableRegionModel.class, - TableSchemaModel.class, - VersionModel.class - }; + private final Class[] cTypes = { CellModel.class, CellSetModel.class, ColumnSchemaModel.class, + NamespacesModel.class, NamespacesInstanceModel.class, RowModel.class, ScannerModel.class, + StorageClusterStatusModel.class, StorageClusterVersionModel.class, TableInfoModel.class, + TableListModel.class, TableModel.class, TableRegionModel.class, TableSchemaModel.class, + VersionModel.class }; @SuppressWarnings("unchecked") public JAXBContextResolver() throws Exception { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java index 9c94611355ae..fc26d1bc3421 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider.consumer; import java.io.ByteArrayOutputStream; @@ -39,28 +37,25 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * Adapter for hooking up Jersey content processing dispatch to - * ProtobufMessageHandler interface capable handlers for decoding protobuf input. + * Adapter for hooking up Jersey content processing dispatch to ProtobufMessageHandler interface + * capable handlers for decoding protobuf input. */ @Provider -@Consumes({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF}) +@Consumes({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF }) @InterfaceAudience.Private -public class ProtobufMessageBodyConsumer - implements MessageBodyReader { - private static final Logger LOG = - LoggerFactory.getLogger(ProtobufMessageBodyConsumer.class); +public class ProtobufMessageBodyConsumer implements MessageBodyReader { + private static final Logger LOG = LoggerFactory.getLogger(ProtobufMessageBodyConsumer.class); @Override - public boolean isReadable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { + public boolean isReadable(Class type, Type genericType, Annotation[] annotations, + MediaType mediaType) { return ProtobufMessageHandler.class.isAssignableFrom(type); } @Override public ProtobufMessageHandler readFrom(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, InputStream inputStream) - throws IOException, WebApplicationException { + Annotation[] annotations, MediaType mediaType, MultivaluedMap httpHeaders, + InputStream inputStream) throws IOException, WebApplicationException { ProtobufMessageHandler obj = null; try { obj = type.getDeclaredConstructor().newInstance(); @@ -74,8 +69,7 @@ public ProtobufMessageHandler readFrom(Class type, Type } } while (read > 0); if (LOG.isTraceEnabled()) { - LOG.trace(getClass() + ": read " + baos.size() + " bytes from " + - inputStream); + LOG.trace(getClass() + ": read " + baos.size() + " bytes from " + inputStream); } obj = obj.getObjectFromMessage(baos.toByteArray()); } catch (InstantiationException | NoSuchMethodException | InvocationTargetException diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java index 9eef5bf3df47..56fc81f3ab26 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider.producer; import java.io.IOException; @@ -35,34 +33,30 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * An adapter between Jersey and Object.toString(). Hooks up plain text output - * to the Jersey content handling framework. - * Jersey will first call getSize() to learn the number of bytes that will be + * An adapter between Jersey and Object.toString(). Hooks up plain text output to the Jersey content + * handling framework. Jersey will first call getSize() to learn the number of bytes that will be * sent, then writeTo to perform the actual I/O. */ @Provider @Produces(Constants.MIMETYPE_TEXT) @InterfaceAudience.Private -public class PlainTextMessageBodyProducer - implements MessageBodyWriter { +public class PlainTextMessageBodyProducer implements MessageBodyWriter { @Override - public boolean isWriteable(Class arg0, Type arg1, Annotation[] arg2, - MediaType arg3) { + public boolean isWriteable(Class arg0, Type arg1, Annotation[] arg2, MediaType arg3) { return true; } @Override - public long getSize(Object object, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { + public long getSize(Object object, Class type, Type genericType, Annotation[] annotations, + MediaType mediaType) { // deprecated by JAX-RS 2.0 and ignored by Jersey runtime return -1; } @Override - public void writeTo(Object object, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, OutputStream outStream) + public void writeTo(Object object, Class type, Type genericType, Annotation[] annotations, + MediaType mediaType, MultivaluedMap httpHeaders, OutputStream outStream) throws IOException, WebApplicationException { outStream.write(Bytes.toBytes(object.toString())); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java index 3f21893470d1..5c66621f9fe4 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.provider.producer; import java.io.IOException; @@ -35,20 +33,18 @@ import org.apache.hbase.thirdparty.javax.ws.rs.ext.Provider; /** - * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up - * protobuf output producing methods to the Jersey content handling framework. - * Jersey will first call getSize() to learn the number of bytes that will be - * sent, then writeTo to perform the actual I/O. + * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up protobuf output + * producing methods to the Jersey content handling framework. Jersey will first call getSize() to + * learn the number of bytes that will be sent, then writeTo to perform the actual I/O. */ @Provider -@Produces({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF}) +@Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF }) @InterfaceAudience.Private -public class ProtobufMessageBodyProducer - implements MessageBodyWriter { +public class ProtobufMessageBodyProducer implements MessageBodyWriter { @Override - public boolean isWriteable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { + public boolean isWriteable(Class type, Type genericType, Annotation[] annotations, + MediaType mediaType) { return ProtobufMessageHandler.class.isAssignableFrom(type); } @@ -61,9 +57,8 @@ public long getSize(ProtobufMessageHandler m, Class type, Type genericType, @Override public void writeTo(ProtobufMessageHandler m, Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, OutputStream entityStream) - throws IOException, WebApplicationException { + Annotation[] annotations, MediaType mediaType, MultivaluedMap httpHeaders, + OutputStream entityStream) throws IOException, WebApplicationException { entityStream.write(m.createProtobufOutput()); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java index 5af8ee2bfafc..995aa1b18aaf 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.rest; import java.io.IOException; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -27,7 +26,6 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java index 00b28c7534b4..e1e52d2c1848 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,8 +44,7 @@ public void startServletContainer(Configuration conf) throws Exception { server = new RESTServer(conf); server.run(); - LOG.info("started " + server.getClass().getName() + " on port " + - server.getPort()); + LOG.info("started " + server.getClass().getName() + " on port " + server.getPort()); } public void shutdownServletContainer() { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java index 0ed14f0dd1bf..6260b591ed7f 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -96,24 +95,20 @@ import org.slf4j.LoggerFactory; /** - * Script used evaluating Stargate performance and scalability. Runs a SG - * client that steps through one of a set of hardcoded tests or 'experiments' - * (e.g. a random reads test, a random writes test, etc.). Pass on the - * command-line which test to run and how many clients are participating in - * this experiment. Run java PerformanceEvaluation --help to - * obtain usage. - * - *

      This class sets up and runs the evaluation programs described in - * Section 7, Performance Evaluation, of the Bigtable - * paper, pages 8-10. - * - *

      If number of clients > 1, we start up a MapReduce job. Each map task - * runs an individual client. Each client does about 1GB of data. + * Script used evaluating Stargate performance and scalability. Runs a SG client that steps through + * one of a set of hardcoded tests or 'experiments' (e.g. a random reads test, a random writes test, + * etc.). Pass on the command-line which test to run and how many clients are participating in this + * experiment. Run java PerformanceEvaluation --help to obtain usage. + *

      + * This class sets up and runs the evaluation programs described in Section 7, Performance + * Evaluation, of the Bigtable paper, + * pages 8-10. + *

      + * If number of clients > 1, we start up a MapReduce job. Each map task runs an individual client. + * Each client does about 1GB of data. */ public class PerformanceEvaluation extends Configured implements Tool { - protected static final Logger LOG = - LoggerFactory.getLogger(PerformanceEvaluation.class); + protected static final Logger LOG = LoggerFactory.getLogger(PerformanceEvaluation.class); private static final int DEFAULT_ROW_PREFIX_LENGTH = 16; private static final int ROW_LENGTH = 1000; @@ -149,20 +144,14 @@ public class PerformanceEvaluation extends Configured implements Tool { /** * Regex to parse lines in input file passed to mapreduce task. */ - public static final Pattern LINE_PATTERN = - Pattern.compile("tableName=(\\w+),\\s+" + - "startRow=(\\d+),\\s+" + - "perClientRunRows=(\\d+),\\s+" + - "totalRows=(\\d+),\\s+" + - "clients=(\\d+),\\s+" + - "flushCommits=(\\w+),\\s+" + - "writeToWAL=(\\w+),\\s+" + - "useTags=(\\w+),\\s+" + - "noOfTags=(\\d+)"); + public static final Pattern LINE_PATTERN = Pattern + .compile("tableName=(\\w+),\\s+" + "startRow=(\\d+),\\s+" + "perClientRunRows=(\\d+),\\s+" + + "totalRows=(\\d+),\\s+" + "clients=(\\d+),\\s+" + "flushCommits=(\\w+),\\s+" + + "writeToWAL=(\\w+),\\s+" + "useTags=(\\w+),\\s+" + "noOfTags=(\\d+)"); /** - * Enum for map metrics. Keep it out here rather than inside in the Map - * inner-class so we can find associated properties. + * Enum for map metrics. Keep it out here rather than inside in the Map inner-class so we can find + * associated properties. */ protected enum Counter { /** elapsed time */ @@ -178,33 +167,28 @@ protected enum Counter { public PerformanceEvaluation(final Configuration c) { this.conf = c; - addCommandDescriptor(RandomReadTest.class, "randomRead", - "Run random read test"); + addCommandDescriptor(RandomReadTest.class, "randomRead", "Run random read test"); addCommandDescriptor(RandomSeekScanTest.class, "randomSeekScan", - "Run random seek and scan 100 test"); + "Run random seek and scan 100 test"); addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10", - "Run random seek scan with both start and stop row (max 10 rows)"); + "Run random seek scan with both start and stop row (max 10 rows)"); addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100", - "Run random seek scan with both start and stop row (max 100 rows)"); + "Run random seek scan with both start and stop row (max 100 rows)"); addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000", - "Run random seek scan with both start and stop row (max 1000 rows)"); + "Run random seek scan with both start and stop row (max 1000 rows)"); addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000", - "Run random seek scan with both start and stop row (max 10000 rows)"); - addCommandDescriptor(RandomWriteTest.class, "randomWrite", - "Run random write test"); - addCommandDescriptor(SequentialReadTest.class, "sequentialRead", - "Run sequential read test"); - addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", - "Run sequential write test"); - addCommandDescriptor(ScanTest.class, "scan", - "Run scan test (read every row)"); + "Run random seek scan with both start and stop row (max 10000 rows)"); + addCommandDescriptor(RandomWriteTest.class, "randomWrite", "Run random write test"); + addCommandDescriptor(SequentialReadTest.class, "sequentialRead", "Run sequential read test"); + addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", "Run sequential write test"); + addCommandDescriptor(ScanTest.class, "scan", "Run scan test (read every row)"); addCommandDescriptor(FilteredScanTest.class, "filterScan", - "Run scan test using a filter to find a specific row based " + - "on it's value (make sure to use --rows=20)"); + "Run scan test using a filter to find a specific row based " + + "on it's value (make sure to use --rows=20)"); } - protected void addCommandDescriptor(Class cmdClass, - String name, String description) { + protected void addCommandDescriptor(Class cmdClass, String name, + String description) { CmdDescriptor cmdDescriptor = new CmdDescriptor(cmdClass, name, description); commands.put(name, cmdDescriptor); } @@ -222,10 +206,9 @@ interface Status { } /** - * This class works as the InputSplit of Performance Evaluation - * MapReduce InputFormat, and the Record Value of RecordReader. - * Each map task will only read one record from a PeInputSplit, - * the record value is the PeInputSplit itself. + * This class works as the InputSplit of Performance Evaluation MapReduce InputFormat, and the + * Record Value of RecordReader. Each map task will only read one record from a PeInputSplit, the + * record value is the PeInputSplit itself. */ public static class PeInputSplit extends InputSplit implements Writable { private TableName tableName; @@ -326,8 +309,8 @@ public int getNoOfTags() { } /** - * InputFormat of Performance Evaluation MapReduce job. - * It extends from FileInputFormat, want to use it's methods such as setInputPaths(). + * InputFormat of Performance Evaluation MapReduce job. It extends from FileInputFormat, want to + * use it's methods such as setInputPaths(). */ public static class PeInputFormat extends FileInputFormat { @Override @@ -362,20 +345,13 @@ public List getSplits(JobContext job) throws IOException { boolean useTags = Boolean.parseBoolean(m.group(8)); int noOfTags = Integer.parseInt(m.group(9)); - LOG.debug("tableName=" + tableName + - " split["+ splitList.size() + "] " + - " startRow=" + startRow + - " rows=" + rows + - " totalRows=" + totalRows + - " clients=" + clients + - " flushCommits=" + flushCommits + - " writeToWAL=" + writeToWAL + - " useTags=" + useTags + - " noOfTags=" + noOfTags); - - PeInputSplit newSplit = - new PeInputSplit(tableName, startRow, rows, totalRows, clients, - flushCommits, writeToWAL, useTags, noOfTags); + LOG.debug("tableName=" + tableName + " split[" + splitList.size() + "] " + " startRow=" + + startRow + " rows=" + rows + " totalRows=" + totalRows + " clients=" + clients + + " flushCommits=" + flushCommits + " writeToWAL=" + writeToWAL + " useTags=" + + useTags + " noOfTags=" + noOfTags); + + PeInputSplit newSplit = new PeInputSplit(tableName, startRow, rows, totalRows, clients, + flushCommits, writeToWAL, useTags, noOfTags); splitList.add(newSplit); } } @@ -401,7 +377,7 @@ public static class PeRecordReader extends RecordReader peClass = forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class); try { - this.pe = peClass.getConstructor(Configuration.class) - .newInstance(context.getConfiguration()); + this.pe = + peClass.getConstructor(Configuration.class).newInstance(context.getConfiguration()); } catch (Exception e) { throw new IllegalStateException("Could not instantiate PE instance", e); } @@ -490,11 +466,10 @@ protected void map(NullWritable key, PeInputSplit value, final Context context) // Evaluation task pe.tableName = value.getTableName(); - long elapsedTime = this.pe.runOneClient(this.cmd, value.getStartRow(), - value.getRows(), value.getTotalRows(), - value.isFlushCommits(), value.isWriteToWAL(), - value.isUseTags(), value.getNoOfTags(), - ConnectionFactory.createConnection(context.getConfiguration()), status); + long elapsedTime = + this.pe.runOneClient(this.cmd, value.getStartRow(), value.getRows(), value.getTotalRows(), + value.isFlushCommits(), value.isWriteToWAL(), value.isUseTags(), value.getNoOfTags(), + ConnectionFactory.createConnection(context.getConfiguration()), status); // Collect how much time the thing took. Report as map output and // to the ELAPSED_TIME counter. context.getCounter(Counter.ELAPSED_TIME).increment(elapsedTime); @@ -519,7 +494,7 @@ private boolean checkTable(RemoteAdmin admin) throws IOException { } byte[][] splits = getSplits(); - for (int i=0; i < splits.length; i++) { + for (int i = 0; i < splits.length; i++) { LOG.debug(" split " + i + ": " + Bytes.toStringBinary(splits[i])); } admin.createTable(tableDescriptor); @@ -537,11 +512,10 @@ private boolean checkTable(RemoteAdmin admin) throws IOException { protected TableDescriptor getDescriptor() { if (TABLE_DESCRIPTOR == null) { - TABLE_DESCRIPTOR = - TableDescriptorBuilder.newBuilder(tableName) + TABLE_DESCRIPTOR = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_NAME) - .setDataBlockEncoding(blockEncoding).setCompressionType(compression) - .setInMemory(inMemoryCF).build()) + .setDataBlockEncoding(blockEncoding).setCompressionType(compression) + .setInMemory(inMemoryCF).build()) .build(); } return TABLE_DESCRIPTOR; @@ -549,17 +523,16 @@ protected TableDescriptor getDescriptor() { /** * Generates splits based on total number of rows and specified split regions - * * @return splits : array of byte [] */ - protected byte[][] getSplits() { + protected byte[][] getSplits() { if (this.presplitRegions == 0) { return new byte[0][]; } int numSplitPoints = presplitRegions - 1; byte[][] splits = new byte[numSplitPoints][]; - int jump = this.R / this.presplitRegions; + int jump = this.R / this.presplitRegions; for (int i = 0; i < numSplitPoints; i++) { int rowkey = jump * (1 + i); splits[i] = format(rowkey); @@ -568,8 +541,8 @@ protected byte[][] getSplits() { } /** - * We're to run multiple clients concurrently. Setup a mapreduce job. Run - * one map per client. Then run a single reduce to sum the elapsed times. + * We're to run multiple clients concurrently. Setup a mapreduce job. Run one map per client. Then + * run a single reduce to sum the elapsed times. * @param cmd Command to run. */ private void runNIsMoreThanOne(final Class cmd) @@ -591,7 +564,7 @@ private void runNIsMoreThanOne(final Class cmd) private void doMultipleClients(final Class cmd) throws IOException { final List threads = new ArrayList<>(this.N); final long[] timings = new long[this.N]; - final int perClientRows = R/N; + final int perClientRows = R / N; final TableName tableName = this.tableName; final DataBlockEncoding encoding = this.blockEncoding; final boolean flushCommits = this.flushCommits; @@ -619,13 +592,12 @@ public void run() { pe.useTags = useTags; pe.noOfTags = numTags; try { - long elapsedTime = pe.runOneClient(cmd, index * perClientRows, - perClientRows, R, - flushCommits, writeToWAL, useTags, noOfTags, connection, + long elapsedTime = pe.runOneClient(cmd, index * perClientRows, perClientRows, R, + flushCommits, writeToWAL, useTags, noOfTags, connection, msg -> LOG.info("client-" + getName() + " " + msg)); timings[index] = elapsedTime; - LOG.info("Finished " + getName() + " in " + elapsedTime + - "ms writing " + perClientRows + " rows"); + LOG.info("Finished " + getName() + " in " + elapsedTime + "ms writing " + perClientRows + + " rows"); } catch (IOException e) { throw new RuntimeException(e); } @@ -646,23 +618,19 @@ public void run() { } } final String test = cmd.getSimpleName(); - LOG.info("[" + test + "] Summary of timings (ms): " - + Arrays.toString(timings)); + LOG.info("[" + test + "] Summary of timings (ms): " + Arrays.toString(timings)); Arrays.sort(timings); long total = 0; for (int i = 0; i < this.N; i++) { total += timings[i]; } - LOG.info("[" + test + "]" - + "\tMin: " + timings[0] + "ms" - + "\tMax: " + timings[this.N - 1] + "ms" - + "\tAvg: " + (total / this.N) + "ms"); + LOG.info("[" + test + "]" + "\tMin: " + timings[0] + "ms" + "\tMax: " + timings[this.N - 1] + + "ms" + "\tAvg: " + (total / this.N) + "ms"); } /** - * Run a mapreduce job. Run as many maps as asked-for clients. - * Before we start up the job, write out an input file with instruction - * per client regards which row they are to start on. + * Run a mapreduce job. Run as many maps as asked-for clients. Before we start up the job, write + * out an input file with instruction per client regards which row they are to start on. * @param cmd Command to run. */ private void doMapReduce(final Class cmd) @@ -764,8 +732,8 @@ public String getDescription() { } /** - * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation} tests - * This makes the reflection logic a little easier to understand... + * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation} tests This + * makes the reflection logic a little easier to understand... */ static class TestOptions { private int startRow; @@ -779,8 +747,8 @@ static class TestOptions { private Connection connection; TestOptions(int startRow, int perClientRunRows, int totalRows, TableName tableName, - boolean flushCommits, boolean writeToWAL, boolean useTags, - int noOfTags, Connection connection) { + boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags, + Connection connection) { this.startRow = startRow; this.perClientRunRows = perClientRunRows; this.totalRows = totalRows; @@ -830,17 +798,17 @@ public int getNumTags() { } /* - * A test. - * Subclass to particularize what happens per row. + * A test. Subclass to particularize what happens per row. */ static abstract class Test { // Below is make it so when Tests are all running in the one // jvm, that they each have a differently seeded Random. - private static final Random randomSeed = - new Random(EnvironmentEdgeManager.currentTime()); + private static final Random randomSeed = new Random(EnvironmentEdgeManager.currentTime()); + private static long nextRandomSeed() { return randomSeed.nextLong(); } + protected final Random rand = new Random(nextRandomSeed()); protected final int startRow; @@ -855,8 +823,8 @@ private static long nextRandomSeed() { protected Connection connection; /** - * Note that all subclasses of this class must provide a public contructor - * that has the exact same list of arguments. + * Note that all subclasses of this class must provide a public contructor that has the exact + * same list of arguments. */ Test(final Configuration conf, final TestOptions options, final Status status) { super(); @@ -878,10 +846,10 @@ protected String generateStatus(final int sr, final int i, final int lr) { protected int getReportingPeriod() { int period = this.perClientRunRows / 10; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } - abstract void testTakedown() throws IOException; + abstract void testTakedown() throws IOException; /** * Run test @@ -956,7 +924,7 @@ void testSetup() throws IOException { } @Override - void testTakedown() throws IOException { + void testTakedown() throws IOException { if (flushCommits) { this.mutator.flush(); } @@ -981,7 +949,7 @@ void testRow(final int i) throws IOException { @Override protected int getReportingPeriod() { int period = this.perClientRunRows / 100; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } } @@ -995,7 +963,7 @@ static abstract class RandomScanWithRangeTest extends TableTest { void testRow(final int i) throws IOException { Pair startAndStopRow = getStartAndStopRow(); Scan scan = new Scan().withStartRow(startAndStopRow.getFirst()) - .withStopRow(startAndStopRow.getSecond()); + .withStopRow(startAndStopRow.getSecond()); scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); ResultScanner s = this.table.getScanner(scan); int count = 0; @@ -1005,8 +973,8 @@ void testRow(final int i) throws IOException { if (i % 100 == 0) { LOG.info(String.format("Scan for key range %s - %s returned %s rows", - Bytes.toString(startAndStopRow.getFirst()), - Bytes.toString(startAndStopRow.getSecond()), count)); + Bytes.toString(startAndStopRow.getFirst()), Bytes.toString(startAndStopRow.getSecond()), + count)); } s.close(); @@ -1023,7 +991,7 @@ protected Pair generateStartAndStopRows(int maxRange) { @Override protected int getReportingPeriod() { int period = this.perClientRunRows / 100; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } } @@ -1086,7 +1054,7 @@ void testRow(final int i) throws IOException { @Override protected int getReportingPeriod() { int period = this.perClientRunRows / 100; - return period == 0? this.perClientRunRows: period; + return period == 0 ? this.perClientRunRows : period; } } @@ -1203,10 +1171,8 @@ void testRow(int i) throws IOException { } protected Scan constructScan(byte[] valuePrefix) { - Filter filter = new SingleColumnValueFilter( - FAMILY_NAME, QUALIFIER_NAME, CompareOperator.EQUAL, - new BinaryComparator(valuePrefix) - ); + Filter filter = new SingleColumnValueFilter(FAMILY_NAME, QUALIFIER_NAME, + CompareOperator.EQUAL, new BinaryComparator(valuePrefix)); Scan scan = new Scan(); scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); scan.setFilter(filter); @@ -1218,31 +1184,31 @@ protected Scan constructScan(byte[] valuePrefix) { * Format passed integer. * @param number the integer to format * @return Returns zero-prefixed 10-byte wide decimal version of passed number (Does absolute in - * case number is negative). + * case number is negative). */ - public static byte [] format(final int number) { + public static byte[] format(final int number) { byte[] b = new byte[DEFAULT_ROW_PREFIX_LENGTH + 10]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return b; } public static byte[] generateData(final Random r, int length) { - byte[] b = new byte [length]; + byte[] b = new byte[length]; int i; - for (i = 0; i < (length-8); i += 8) { + for (i = 0; i < (length - 8); i += 8) { b[i] = (byte) (65 + r.nextInt(26)); - b[i+1] = b[i]; - b[i+2] = b[i]; - b[i+3] = b[i]; - b[i+4] = b[i]; - b[i+5] = b[i]; - b[i+6] = b[i]; - b[i+7] = b[i]; + b[i + 1] = b[i]; + b[i + 2] = b[i]; + b[i + 3] = b[i]; + b[i + 4] = b[i]; + b[i + 5] = b[i]; + b[i + 6] = b[i]; + b[i + 7] = b[i]; } byte a = (byte) (65 + r.nextInt(26)); @@ -1253,7 +1219,7 @@ public static byte[] generateData(final Random r, int length) { } public static byte[] generateValue(final Random r) { - byte [] b = new byte [ROW_LENGTH]; + byte[] b = new byte[ROW_LENGTH]; r.nextBytes(b); return b; } @@ -1262,33 +1228,32 @@ static byte[] getRandomRow(final Random random, final int totalRows) { return format(random.nextInt(Integer.MAX_VALUE) % totalRows); } - long runOneClient(final Class cmd, final int startRow, - final int perClientRunRows, final int totalRows, - boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags, + long runOneClient(final Class cmd, final int startRow, final int perClientRunRows, + final int totalRows, boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags, Connection connection, final Status status) throws IOException { - status.setStatus("Start " + cmd + " at offset " + startRow + " for " + - perClientRunRows + " rows"); + status.setStatus( + "Start " + cmd + " at offset " + startRow + " for " + perClientRunRows + " rows"); long totalElapsedTime; - TestOptions options = new TestOptions(startRow, perClientRunRows, - totalRows, tableName, flushCommits, writeToWAL, useTags, noOfTags, connection); + TestOptions options = new TestOptions(startRow, perClientRunRows, totalRows, tableName, + flushCommits, writeToWAL, useTags, noOfTags, connection); final Test t; try { - Constructor constructor = cmd.getDeclaredConstructor( - Configuration.class, TestOptions.class, Status.class); + Constructor constructor = + cmd.getDeclaredConstructor(Configuration.class, TestOptions.class, Status.class); t = constructor.newInstance(this.conf, options, status); } catch (NoSuchMethodException e) { - throw new IllegalArgumentException("Invalid command class: " + - cmd.getName() + ". It does not provide a constructor as described by" + - "the javadoc comment. Available constructors are: " + - Arrays.toString(cmd.getConstructors())); + throw new IllegalArgumentException("Invalid command class: " + cmd.getName() + + ". It does not provide a constructor as described by" + + "the javadoc comment. Available constructors are: " + + Arrays.toString(cmd.getConstructors())); } catch (Exception e) { throw new IllegalStateException("Failed to construct command class", e); } totalElapsedTime = t.test(); - status.setStatus("Finished " + cmd + " in " + totalElapsedTime + - "ms at offset " + startRow + " for " + perClientRunRows + " rows"); + status.setStatus("Finished " + cmd + " in " + totalElapsedTime + "ms at offset " + startRow + + " for " + perClientRunRows + " rows"); return totalElapsedTime; } @@ -1300,8 +1265,8 @@ private void runNIsOne(final Class cmd) { Client client = new Client(cluster); admin = new RemoteAdmin(client, getConf()); checkTable(admin); - runOneClient(cmd, 0, this.R, this.R, this.flushCommits, this.writeToWAL, - this.useTags, this.noOfTags, this.connection, status); + runOneClient(cmd, 0, this.R, this.R, this.flushCommits, this.writeToWAL, this.useTags, + this.noOfTags, this.connection, status); } catch (Exception e) { LOG.error("Failed", e); } @@ -1329,30 +1294,30 @@ protected void printUsage(final String message) { } System.err.println("Usage: java " + this.getClass().getName() + " \\"); System.err.println(" [--nomapred] [--rows=ROWS] [--table=NAME] \\"); - System.err.println(" [--compress=TYPE] [--blockEncoding=TYPE] " + - "[-D]* "); + System.err.println( + " [--compress=TYPE] [--blockEncoding=TYPE] " + "[-D]* "); System.err.println(); System.err.println("General Options:"); - System.err.println(" nomapred Run multiple clients using threads " + - "(rather than use mapreduce)"); + System.err.println( + " nomapred Run multiple clients using threads " + "(rather than use mapreduce)"); System.err.println(" rows Rows each client runs. Default: One million"); System.err.println(); System.err.println("Table Creation / Write Tests:"); System.err.println(" table Alternate table name. Default: 'TestTable'"); System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'"); - System.err.println(" flushCommits Used to determine if the test should flush the table. " + - "Default: false"); + System.err.println( + " flushCommits Used to determine if the test should flush the table. " + "Default: false"); System.err.println(" writeToWAL Set writeToWAL on puts. Default: True"); - System.err.println(" presplit Create presplit table. Recommended for accurate perf " + - "analysis (see guide). Default: disabled"); - System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " + - "Default : false"); - System.err.println(" numoftags Specify the no of tags that would be needed. " + - "This works only if usetags is true."); + System.err.println(" presplit Create presplit table. Recommended for accurate perf " + + "analysis (see guide). Default: disabled"); + System.err.println( + " usetags Writes tags along with KVs. Use with HFile V3. " + "Default : false"); + System.err.println(" numoftags Specify the no of tags that would be needed. " + + "This works only if usetags is true."); System.err.println(); System.err.println("Read Tests:"); - System.err.println(" inmemory Tries to keep the HFiles of the CF inmemory as far as " + - "possible. Not guaranteed that reads are always served from inmemory. Default: false"); + System.err.println(" inmemory Tries to keep the HFiles of the CF inmemory as far as " + + "possible. Not guaranteed that reads are always served from inmemory. Default: false"); System.err.println(); System.err.println(" Note: -D properties will be applied to the conf used. "); System.err.println(" For example: "); @@ -1365,13 +1330,12 @@ protected void printUsage(final String message) { } System.err.println(); System.err.println("Args:"); - System.err.println(" nclients Integer. Required. Total number of " + - "clients (and HRegionServers)"); + System.err.println( + " nclients Integer. Required. Total number of " + "clients (and HRegionServers)"); System.err.println(" running: 1 <= value <= 500"); System.err.println("Examples:"); System.err.println(" To run a single evaluation client:"); - System.err.println(" $ hbase " + this.getClass().getName() - + " sequentialWrite 1"); + System.err.println(" $ hbase " + this.getClass().getName() + " sequentialWrite 1"); } private void getArgs(final int start, final String[] args) { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java index 53f2f14ec6ae..f1f5984d188a 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,8 +73,7 @@ public class RowResourceBase { protected static final String VALUE_6 = "6"; protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - protected static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + protected static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); protected static Client client; protected static JAXBContext context; protected static Marshaller xmlMarshaller; @@ -87,16 +86,12 @@ public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(3); REST_TEST_UTIL.startServletContainer(conf); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class); xmlMarshaller = context.createMarshaller(); xmlUnmarshaller = context.createUnmarshaller(); - jsonMapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + jsonMapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); } @AfterClass @@ -112,12 +107,11 @@ public void beforeMethod() throws Exception { TEST_UTIL.deleteTable(TABLE_NAME); } TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE)); - ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(CFA)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE)); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); } @@ -130,8 +124,8 @@ public void afterMethod() throws Exception { } } - static Response putValuePB(String table, String row, String column, - String value) throws IOException { + static Response putValuePB(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -142,39 +136,38 @@ static Response putValuePB(String table, String row, String column, return putValuePB(path.toString(), table, row, column, value); } - static Response putValuePB(String url, String table, String row, - String column, String value) throws IOException { + static Response putValuePB(String url, String table, String row, String column, String value) + throws IOException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(value))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); - Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(url, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); return response; } - protected static void checkValueXML(String url, String table, String row, - String column, String value) throws IOException, JAXBException { + protected static void checkValueXML(String url, String table, String row, String column, + String value) throws IOException, JAXBException { Response response = getValueXML(url); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); assertEquals(Bytes.toString(cell.getColumn()), column); assertEquals(Bytes.toString(cell.getValue()), value); } - protected static void checkValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static void checkValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { Response response = getValueXML(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); assertEquals(Bytes.toString(cell.getColumn()), column); @@ -186,8 +179,8 @@ protected static void checkIncrementValueXML(String table, String row, String co Response response1 = getValueXML(table, row, column); assertEquals(200, response1.getCode()); assertEquals(Constants.MIMETYPE_XML, response1.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response1.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response1.getBody())); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); assertEquals(Bytes.toString(cell.getColumn()), column); @@ -195,12 +188,12 @@ protected static void checkIncrementValueXML(String table, String row, String co } protected static Response getValuePB(String url) throws IOException { - Response response = client.get(url, Constants.MIMETYPE_PROTOBUF); + Response response = client.get(url, Constants.MIMETYPE_PROTOBUF); return response; } - protected static Response putValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response putValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -211,23 +204,20 @@ protected static Response putValueXML(String table, String row, String column, return putValueXML(path.toString(), table, row, column, value); } - protected static Response putValueXML(String url, String table, String row, - String column, String value) throws IOException, JAXBException { + protected static Response putValueXML(String url, String table, String row, String column, + String value) throws IOException, JAXBException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(value))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(url, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(url, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); return response; } - protected static Response getValuePB(String table, String row, String column) - throws IOException { + protected static Response getValuePB(String table, String row, String column) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -238,8 +228,8 @@ protected static Response getValuePB(String table, String row, String column) return getValuePB(path.toString()); } - protected static void checkValuePB(String table, String row, String column, - String value) throws IOException { + protected static void checkValuePB(String table, String row, String column, String value) + throws IOException { Response response = getValuePB(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); @@ -251,8 +241,8 @@ protected static void checkValuePB(String table, String row, String column, assertEquals(Bytes.toString(cell.getValue()), value); } - protected static void checkIncrementValuePB(String table, String row, String column, - long value) throws IOException { + protected static void checkIncrementValuePB(String table, String row, String column, long value) + throws IOException { Response response = getValuePB(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); @@ -265,38 +255,36 @@ protected static void checkIncrementValuePB(String table, String row, String col } protected static Response checkAndPutValuePB(String url, String table, String row, String column, - String valueToCheck, String valueToPut, HashMap otherCells) + String valueToCheck, String valueToPut, HashMap otherCells) throws IOException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToPut))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToPut))); if (otherCells != null) { - for (Map.Entry entry : otherCells.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : otherCells.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // This Cell need to be added as last cell. - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); - Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(url, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); return response; } - protected static Response checkAndPutValuePB(String table, String row, - String column, String valueToCheck, String valueToPut) throws IOException { - return checkAndPutValuePB(table,row,column,valueToCheck,valueToPut,null); + protected static Response checkAndPutValuePB(String table, String row, String column, + String valueToCheck, String valueToPut) throws IOException { + return checkAndPutValuePB(table, row, column, valueToCheck, valueToPut, null); } protected static Response checkAndPutValuePB(String table, String row, String column, - String valueToCheck, String valueToPut, HashMap otherCells) + String valueToCheck, String valueToPut, HashMap otherCells) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); @@ -304,86 +292,81 @@ protected static Response checkAndPutValuePB(String table, String row, String co path.append('/'); path.append(row); path.append("?check=put"); - return checkAndPutValuePB(path.toString(), table, row, column, - valueToCheck, valueToPut, otherCells); + return checkAndPutValuePB(path.toString(), table, row, column, valueToCheck, valueToPut, + otherCells); } protected static Response checkAndPutValueXML(String url, String table, String row, String column, - String valueToCheck, String valueToPut, HashMap otherCells) + String valueToCheck, String valueToPut, HashMap otherCells) throws IOException, JAXBException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToPut))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToPut))); if (otherCells != null) { - for (Map.Entry entry : otherCells.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : otherCells.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // This Cell need to be added as last cell. - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(url, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(url, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); return response; } protected static Response checkAndPutValueXML(String table, String row, String column, String valueToCheck, String valueToPut) throws IOException, JAXBException { - return checkAndPutValueXML(table,row,column,valueToCheck,valueToPut, null); + return checkAndPutValueXML(table, row, column, valueToCheck, valueToPut, null); } - protected static Response checkAndPutValueXML(String table, String row, - String column, String valueToCheck, String valueToPut, HashMap otherCells) - throws IOException, JAXBException { + protected static Response checkAndPutValueXML(String table, String row, String column, + String valueToCheck, String valueToPut, HashMap otherCells) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); path.append('/'); path.append(row); path.append("?check=put"); - return checkAndPutValueXML(path.toString(), table, row, column, - valueToCheck, valueToPut, otherCells); + return checkAndPutValueXML(path.toString(), table, row, column, valueToCheck, valueToPut, + otherCells); } - protected static Response checkAndDeleteXML(String url, String table, - String row, String column, String valueToCheck, HashMap cellsToDelete) - throws IOException, JAXBException { + protected static Response checkAndDeleteXML(String url, String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) + throws IOException, JAXBException { RowModel rowModel = new RowModel(row); if (cellsToDelete != null) { - for (Map.Entry entry : cellsToDelete.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // Add this at the end - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(url, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(url, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); return response; } - protected static Response checkAndDeleteXML(String table, String row, - String column, String valueToCheck) throws IOException, JAXBException { + protected static Response checkAndDeleteXML(String table, String row, String column, + String valueToCheck) throws IOException, JAXBException { return checkAndDeleteXML(table, row, column, valueToCheck, null); } - protected static Response checkAndDeleteXML(String table, String row, - String column, String valueToCheck, HashMap cellsToDelete) + protected static Response checkAndDeleteXML(String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); @@ -394,14 +377,13 @@ protected static Response checkAndDeleteXML(String table, String row, return checkAndDeleteXML(path.toString(), table, row, column, valueToCheck, cellsToDelete); } - protected static Response checkAndDeleteJson(String table, String row, - String column, String valueToCheck) throws IOException { + protected static Response checkAndDeleteJson(String table, String row, String column, + String valueToCheck) throws IOException { return checkAndDeleteJson(table, row, column, valueToCheck, null); } - protected static Response checkAndDeleteJson(String table, String row, - String column, String valueToCheck, HashMap cellsToDelete) - throws IOException { + protected static Response checkAndDeleteJson(String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -411,25 +393,22 @@ protected static Response checkAndDeleteJson(String table, String row, return checkAndDeleteJson(path.toString(), table, row, column, valueToCheck, cellsToDelete); } - protected static Response checkAndDeleteJson(String url, String table, - String row, String column, String valueToCheck, HashMap cellsToDelete) - throws IOException { + protected static Response checkAndDeleteJson(String url, String table, String row, String column, + String valueToCheck, HashMap cellsToDelete) throws IOException { RowModel rowModel = new RowModel(row); if (cellsToDelete != null) { - for (Map.Entry entry : cellsToDelete.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // Add this at the end - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(url, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(url, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); Thread.yield(); return response; } @@ -439,8 +418,8 @@ protected static Response checkAndDeletePB(String table, String row, String colu return checkAndDeletePB(table, row, column, value, null); } - protected static Response checkAndDeletePB(String table, String row, - String column, String value, HashMap cellsToDelete) throws IOException { + protected static Response checkAndDeletePB(String table, String row, String column, String value, + HashMap cellsToDelete) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -449,30 +428,30 @@ protected static Response checkAndDeletePB(String table, String row, path.append("?check=delete"); return checkAndDeleteValuePB(path.toString(), table, row, column, value, cellsToDelete); } - protected static Response checkAndDeleteValuePB(String url, String table, - String row, String column, String valueToCheck, HashMap cellsToDelete) + + protected static Response checkAndDeleteValuePB(String url, String table, String row, + String column, String valueToCheck, HashMap cellsToDelete) throws IOException { RowModel rowModel = new RowModel(row); if (cellsToDelete != null) { - for (Map.Entry entry : cellsToDelete.entrySet()) { - rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), - Bytes.toBytes(entry.getValue()))); + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel + .addCell(new CellModel(Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()))); } } // Add this at the end - rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes - .toBytes(valueToCheck))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(valueToCheck))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); - Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(url, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); return response; } - protected static Response getValueXML(String table, String startRow, - String endRow, String column) throws IOException { + protected static Response getValueXML(String table, String startRow, String endRow, String column) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -521,8 +500,7 @@ protected static Response getValueXML(String table, String row, String column) return getValueXML(path.toString()); } - protected static Response deleteRow(String table, String row) - throws IOException { + protected static Response deleteRow(String table, String row) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -533,8 +511,8 @@ protected static Response deleteRow(String table, String row) return response; } - protected static Response getValueJson(String table, String row, - String column) throws IOException { + protected static Response getValueJson(String table, String row, String column) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -545,8 +523,8 @@ protected static Response getValueJson(String table, String row, return getValueJson(path.toString()); } - protected static void checkValueJSON(String table, String row, String column, - String value) throws IOException { + protected static void checkValueJSON(String table, String row, String column, String value) + throws IOException { Response response = getValueJson(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); @@ -559,13 +537,13 @@ protected static void checkValueJSON(String table, String row, String column, assertEquals(Bytes.toString(cell.getValue()), value); } - protected static void checkIncrementValueJSON(String table, String row, String column, - long value) throws IOException { + protected static void checkIncrementValueJSON(String table, String row, String column, long value) + throws IOException { Response response = getValueJson(table, row, column); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); RowModel rowModel = cellSet.getRows().get(0); CellModel cell = rowModel.getCells().get(0); @@ -573,8 +551,8 @@ protected static void checkIncrementValueJSON(String table, String row, String c assertEquals(Bytes.toLong(cell.getValue()), value); } - protected static Response putValueJson(String table, String row, String column, - String value) throws IOException { + protected static Response putValueJson(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -588,19 +566,17 @@ protected static Response putValueJson(String table, String row, String column, protected static Response putValueJson(String url, String table, String row, String column, String value) throws IOException { RowModel rowModel = new RowModel(row); - rowModel.addCell(new CellModel(Bytes.toBytes(column), - Bytes.toBytes(value))); + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value))); CellSetModel cellSetModel = new CellSetModel(); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(url, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(url, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); Thread.yield(); return response; } - protected static Response appendValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response appendValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -610,8 +586,8 @@ protected static Response appendValueXML(String table, String row, String column return putValueXML(path.toString(), table, row, column, value); } - protected static Response appendValuePB(String table, String row, String column, - String value) throws IOException { + protected static Response appendValuePB(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -621,8 +597,8 @@ protected static Response appendValuePB(String table, String row, String column, return putValuePB(path.toString(), table, row, column, value); } - protected static Response appendValueJson(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response appendValueJson(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -632,8 +608,8 @@ protected static Response appendValueJson(String table, String row, String colum return putValueJson(path.toString(), table, row, column, value); } - protected static Response incrementValueXML(String table, String row, String column, - String value) throws IOException, JAXBException { + protected static Response incrementValueXML(String table, String row, String column, String value) + throws IOException, JAXBException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); @@ -643,8 +619,8 @@ protected static Response incrementValueXML(String table, String row, String col return putValueXML(path.toString(), table, row, column, value); } - protected static Response incrementValuePB(String table, String row, String column, - String value) throws IOException { + protected static Response incrementValuePB(String table, String row, String column, String value) + throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); path.append(table); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java index f4f9c7572081..34622f754aa9 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestDeleteRow extends RowResourceBase { @ClassRule @@ -91,11 +91,11 @@ public void testDeleteXML() throws IOException, JAXBException { response = getValueXML(TABLE, ROW_1, COLUMN_2); assertEquals(404, response.getCode()); - //Delete a row in non existent table + // Delete a row in non existent table response = deleteValue("dummy", ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //Delete non existent column + // Delete non existent column response = deleteValue(TABLE, ROW_1, "dummy"); assertEquals(404, response.getCode()); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java index 42e38fc99a69..c0037ae7b76f 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestGZIPResponseWrapper { @ClassRule diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java index e1dec900d491..8c1715c154dc 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestGetAndPutResource extends RowResourceBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -146,8 +146,8 @@ public void testMultipleCellCheckPutPB() throws IOException { assertEquals(200, response.getCode()); checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); - HashMap otherCells = new HashMap<>(); - otherCells.put(COLUMN_2,VALUE_3); + HashMap otherCells = new HashMap<>(); + otherCells.put(COLUMN_2, VALUE_3); // On Success update both the cells response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_3, otherCells); @@ -179,8 +179,8 @@ public void testMultipleCellCheckPutXML() throws IOException, JAXBException { assertEquals(200, response.getCode()); checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); - HashMap otherCells = new HashMap<>(); - otherCells.put(COLUMN_2,VALUE_3); + HashMap otherCells = new HashMap<>(); + otherCells.put(COLUMN_2, VALUE_3); // On Success update both the cells response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_3, otherCells); @@ -217,9 +217,9 @@ public void testMultipleCellCheckDeletePB() throws IOException { checkValuePB(TABLE, ROW_1, COLUMN_3, VALUE_3); // Deletes the following columns based on Column1 check - HashMap cellsToDelete = new HashMap<>(); - cellsToDelete.put(COLUMN_2,VALUE_2); // Value does not matter - cellsToDelete.put(COLUMN_3,VALUE_3); // Value does not matter + HashMap cellsToDelete = new HashMap<>(); + cellsToDelete.put(COLUMN_2, VALUE_2); // Value does not matter + cellsToDelete.put(COLUMN_3, VALUE_3); // Value does not matter // On Success update both the cells response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_1, cellsToDelete); @@ -265,7 +265,7 @@ public void testSingleCellGetPutBinary() throws IOException { assertEquals(Constants.MIMETYPE_BINARY, response.getHeader("content-type")); assertTrue(Bytes.equals(response.getBody(), body)); boolean foundTimestampHeader = false; - for (Header header: response.getHeaders()) { + for (Header header : response.getHeaders()) { if (header.getName().equals("X-Timestamp")) { foundTimestampHeader = true; break; @@ -280,8 +280,7 @@ public void testSingleCellGetPutBinary() throws IOException { @Test public void testSingleCellGetJSON() throws IOException { final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; - Response response = client.put(path, Constants.MIMETYPE_BINARY, - Bytes.toBytes(VALUE_4)); + Response response = client.put(path, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_4)); assertEquals(200, response.getCode()); Thread.yield(); response = client.get(path, Constants.MIMETYPE_JSON); @@ -296,16 +295,13 @@ public void testLatestCellGetJSON() throws IOException { final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_4); - CellModel cellOne = new CellModel(Bytes.toBytes(COLUMN_1), 1L, - Bytes.toBytes(VALUE_1)); - CellModel cellTwo = new CellModel(Bytes.toBytes(COLUMN_1), 2L, - Bytes.toBytes(VALUE_2)); + CellModel cellOne = new CellModel(Bytes.toBytes(COLUMN_1), 1L, Bytes.toBytes(VALUE_1)); + CellModel cellTwo = new CellModel(Bytes.toBytes(COLUMN_1), 2L, Bytes.toBytes(VALUE_2)); rowModel.addCell(cellOne); rowModel.addCell(cellTwo); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(path, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(path, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); assertEquals(200, response.getCode()); Thread.yield(); response = client.get(path, Constants.MIMETYPE_JSON); @@ -315,8 +311,8 @@ public void testLatestCellGetJSON() throws IOException { assertTrue(cellSet.getRows().size() == 1); assertTrue(cellSet.getRows().get(0).getCells().size() == 1); CellModel cell = cellSet.getRows().get(0).getCells().get(0); - assertEquals(VALUE_2 , Bytes.toString(cell.getValue())); - assertEquals(2L , cell.getTimestamp()); + assertEquals(VALUE_2, Bytes.toString(cell.getValue())); + assertEquals(2L, cell.getTimestamp()); response = deleteRow(TABLE, ROW_4); assertEquals(200, response.getCode()); } @@ -332,18 +328,16 @@ public void testURLEncodedKey() throws IOException, JAXBException { path.append('/'); path.append(COLUMN_1); Response response; - response = putValueXML(path.toString(), TABLE, urlKey, COLUMN_1, - VALUE_1); + response = putValueXML(path.toString(), TABLE, urlKey, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); checkValueXML(path.toString(), TABLE, urlKey, COLUMN_1, VALUE_1); } @Test public void testNoSuchCF() throws IOException { - final String goodPath = "/" + TABLE + "/" + ROW_1 + "/" + CFA+":"; + final String goodPath = "/" + TABLE + "/" + ROW_1 + "/" + CFA + ":"; final String badPath = "/" + TABLE + "/" + ROW_1 + "/" + "BAD"; - Response response = client.post(goodPath, Constants.MIMETYPE_BINARY, - Bytes.toBytes(VALUE_1)); + Response response = client.post(goodPath, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1)); assertEquals(200, response.getCode()); assertEquals(200, client.get(goodPath, Constants.MIMETYPE_BINARY).getCode()); assertEquals(404, client.get(badPath, Constants.MIMETYPE_BINARY).getCode()); @@ -352,25 +346,20 @@ public void testNoSuchCF() throws IOException { @Test public void testMultiCellGetPutXML() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); // make sure the fake row was not actually created @@ -391,23 +380,19 @@ public void testMultiCellGetPutXML() throws IOException, JAXBException { @Test public void testMultiCellGetPutPB() throws IOException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); - Response response = client.put(path, Constants.MIMETYPE_PROTOBUF, - cellSetModel.createProtobufOutput()); + Response response = + client.put(path, Constants.MIMETYPE_PROTOBUF, cellSetModel.createProtobufOutput()); Thread.yield(); // make sure the fake row was not actually created @@ -438,12 +423,12 @@ public void testStartEndRowGetPutXML() throws IOException, JAXBException { } response = getValueXML(TABLE, rows[0], rows[2], COLUMN_1); assertEquals(200, response.getCode()); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertEquals(2, cellSet.getRows().size()); - for (int i = 0; i < cellSet.getRows().size()-1; i++) { + for (int i = 0; i < cellSet.getRows().size() - 1; i++) { RowModel rowModel = cellSet.getRows().get(i); - for (CellModel cell: rowModel.getCells()) { + for (CellModel cell : rowModel.getCells()) { assertEquals(COLUMN_1, Bytes.toString(cell.getColumn())); assertEquals(values[i], Bytes.toString(cell.getValue())); } @@ -458,16 +443,14 @@ public void testStartEndRowGetPutXML() throws IOException, JAXBException { public void testInvalidCheckParam() throws IOException, JAXBException { CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); final String path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1 + "?check=blah"; - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); assertEquals(400, response.getCode()); } @@ -476,40 +459,33 @@ public void testInvalidColumnPut() throws IOException, JAXBException { String dummyColumn = "doesnot:exist"; CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(dummyColumn), - Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(dummyColumn), Bytes.toBytes(VALUE_1))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); final String path = "/" + TABLE + "/" + ROW_1 + "/" + dummyColumn; - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); assertEquals(404, response.getCode()); } @Test public void testMultiCellGetJson() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); String jsonString = jsonMapper.writeValueAsString(cellSetModel); - Response response = client.put(path, Constants.MIMETYPE_JSON, - Bytes.toBytes(jsonString)); + Response response = client.put(path, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); Thread.yield(); // make sure the fake row was not actually created @@ -531,8 +507,7 @@ public void testMultiCellGetJson() throws IOException, JAXBException { @Test public void testMetrics() throws IOException { final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; - Response response = client.put(path, Constants.MIMETYPE_BINARY, - Bytes.toBytes(VALUE_4)); + Response response = client.put(path, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_4)); assertEquals(200, response.getCode()); Thread.yield(); response = client.get(path, Constants.MIMETYPE_JSON); @@ -605,25 +580,20 @@ private boolean containsCellModel(List cells, String column, String v @Test public void testSuffixGlobbingXMLWithNewScanner() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); // make sure the fake row was not actually created @@ -639,8 +609,8 @@ public void testSuffixGlobbingXMLWithNewScanner() throws IOException, JAXBExcept response = client.get(query.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertTrue(cellSet.getRows().size() == 2); response = deleteRow(TABLE, ROW_1); @@ -651,25 +621,20 @@ public void testSuffixGlobbingXMLWithNewScanner() throws IOException, JAXBExcept @Test public void testSuffixGlobbingXML() throws IOException, JAXBException { - String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row CellSetModel cellSetModel = new CellSetModel(); RowModel rowModel = new RowModel(ROW_1); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_1))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); cellSetModel.addRow(rowModel); rowModel = new RowModel(ROW_2); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), - Bytes.toBytes(VALUE_3))); - rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), - Bytes.toBytes(VALUE_4))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4))); cellSetModel.addRow(rowModel); StringWriter writer = new StringWriter(); xmlMarshaller.marshal(cellSetModel, writer); - Response response = client.put(path, Constants.MIMETYPE_XML, - Bytes.toBytes(writer.toString())); + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); Thread.yield(); // make sure the fake row was not actually created @@ -687,8 +652,8 @@ public void testSuffixGlobbingXML() throws IOException, JAXBException { response = client.get(query.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); List rows = cellSet.getRows(); assertTrue(rows.size() == 2); for (RowModel row : rows) { @@ -706,7 +671,7 @@ public void testAppendXML() throws IOException, JAXBException { Response response = getValueXML(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = appendValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); @@ -723,7 +688,7 @@ public void testAppendPB() throws IOException, JAXBException { Response response = getValuePB(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = appendValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); @@ -740,7 +705,7 @@ public void testAppendJSON() throws IOException, JAXBException { Response response = getValueJson(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = appendValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1); assertEquals(200, response.getCode()); putValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1); @@ -757,14 +722,14 @@ public void testIncrementXML() throws IOException, JAXBException { Response response = getValueXML(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append single cell + // append single cell response = incrementValueXML(TABLE, ROW_1, COLUMN_1, VALUE_5); assertEquals(200, response.getCode()); checkIncrementValueXML(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); response = incrementValueXML(TABLE, ROW_1, COLUMN_1, VALUE_6); assertEquals(200, response.getCode()); checkIncrementValueXML(TABLE, ROW_1, COLUMN_1, - Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); response = deleteRow(TABLE, ROW_1); assertEquals(200, response.getCode()); @@ -775,14 +740,14 @@ public void testIncrementPB() throws IOException, JAXBException { Response response = getValuePB(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = incrementValuePB(TABLE, ROW_1, COLUMN_1, VALUE_5); assertEquals(200, response.getCode()); checkIncrementValuePB(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); response = incrementValuePB(TABLE, ROW_1, COLUMN_1, VALUE_6); assertEquals(200, response.getCode()); checkIncrementValuePB(TABLE, ROW_1, COLUMN_1, - Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); response = deleteRow(TABLE, ROW_1); assertEquals(200, response.getCode()); @@ -793,14 +758,14 @@ public void testIncrementJSON() throws IOException, JAXBException { Response response = getValueJson(TABLE, ROW_1, COLUMN_1); assertEquals(404, response.getCode()); - //append cell + // append cell response = incrementValueJson(TABLE, ROW_1, COLUMN_1, VALUE_5); assertEquals(200, response.getCode()); checkIncrementValueJSON(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); response = incrementValueJson(TABLE, ROW_1, COLUMN_1, VALUE_6); assertEquals(200, response.getCode()); checkIncrementValueJSON(TABLE, ROW_1, COLUMN_1, - Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); response = deleteRow(TABLE, ROW_1); assertEquals(200, response.getCode()); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java index 11f14f1b5ff7..5e8ab5add1d1 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestGzipFilter { @ClassRule @@ -64,24 +64,21 @@ public class TestGzipFilter { private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1"); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(TABLE)) { return; } - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLE); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); } @@ -151,4 +148,3 @@ void testScannerResultCodes() throws Exception { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java index 8337eace55c2..6cbd1582d64f 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,7 +56,7 @@ import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) @RunWith(Parameterized.class) public class TestMultiRowResource { @ClassRule @@ -104,10 +104,7 @@ public static void setUpBeforeClass() throws Exception { extraHdr = new BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, ""); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); @@ -115,13 +112,11 @@ public static void setUpBeforeClass() throws Exception { if (admin.tableExists(TABLE)) { return; } - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLE); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); } @@ -264,8 +259,8 @@ public void testMultiCellGetWithColsInQueryPathJSON() throws IOException { Response response = client.get(path.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); - ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper( - CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); assertEquals(1, cellSet.getRows().size()); assertEquals(ROW_1, Bytes.toString(cellSet.getRows().get(0).getKey())); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java index 488a95a1d7fa..62d2162427f4 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,24 +63,23 @@ import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestNamespacesInstanceResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestNamespacesInstanceResource.class); private static String NAMESPACE1 = "TestNamespacesInstanceResource1"; - private static Map NAMESPACE1_PROPS = new HashMap<>(); + private static Map NAMESPACE1_PROPS = new HashMap<>(); private static String NAMESPACE2 = "TestNamespacesInstanceResource2"; - private static Map NAMESPACE2_PROPS = new HashMap<>(); + private static Map NAMESPACE2_PROPS = new HashMap<>(); private static String NAMESPACE3 = "TestNamespacesInstanceResource3"; - private static Map NAMESPACE3_PROPS = new HashMap<>(); + private static Map NAMESPACE3_PROPS = new HashMap<>(); private static String NAMESPACE4 = "TestNamespacesInstanceResource4"; - private static Map NAMESPACE4_PROPS = new HashMap<>(); + private static Map NAMESPACE4_PROPS = new HashMap<>(); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; @@ -92,12 +91,11 @@ public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); testNamespacesInstanceModel = new TestNamespacesInstanceModel(); context = JAXBContext.newInstance(NamespacesInstanceModel.class, TableListModel.class); - jsonMapper = new JacksonJaxbJsonProvider() - .locateMapper(NamespacesInstanceModel.class, MediaType.APPLICATION_JSON_TYPE); + jsonMapper = new JacksonJaxbJsonProvider().locateMapper(NamespacesInstanceModel.class, + MediaType.APPLICATION_JSON_TYPE); NAMESPACE1_PROPS.put("key1", "value1"); NAMESPACE2_PROPS.put("key2a", "value2a"); NAMESPACE2_PROPS.put("key2b", "value2b"); @@ -119,12 +117,11 @@ private static byte[] toXML(NamespacesInstanceModel model) throws JAXBException } @SuppressWarnings("unchecked") - private static T fromXML(byte[] content) - throws JAXBException { + private static T fromXML(byte[] content) throws JAXBException { return (T) context.createUnmarshaller().unmarshal(new ByteArrayInputStream(content)); } - private NamespaceDescriptor findNamespace(Admin admin, String namespaceName) throws IOException{ + private NamespaceDescriptor findNamespace(Admin admin, String namespaceName) throws IOException { NamespaceDescriptor[] nd = admin.listNamespaceDescriptors(); for (NamespaceDescriptor namespaceDescriptor : nd) { if (namespaceDescriptor.getName().equals(namespaceName)) { @@ -134,19 +131,19 @@ private NamespaceDescriptor findNamespace(Admin admin, String namespaceName) thr return null; } - private void checkNamespaceProperties(NamespaceDescriptor nd, Map testProps){ + private void checkNamespaceProperties(NamespaceDescriptor nd, Map testProps) { checkNamespaceProperties(nd.getConfiguration(), testProps); } - private void checkNamespaceProperties(Map namespaceProps, - Map testProps){ + private void checkNamespaceProperties(Map namespaceProps, + Map testProps) { assertTrue(namespaceProps.size() == testProps.size()); - for (String key: testProps.keySet()) { + for (String key : testProps.keySet()) { assertEquals(testProps.get(key), namespaceProps.get(key)); } } - private void checkNamespaceTables(List namespaceTables, List testTables){ + private void checkNamespaceTables(List namespaceTables, List testTables) { assertEquals(namespaceTables.size(), testTables.size()); for (TableModel namespaceTable : namespaceTables) { String tableName = namespaceTable.getName(); @@ -189,10 +186,9 @@ public void testGetNamespaceTablesAndCannotDeleteNamespace() throws IOException, // Create two tables via admin. TableName tn1 = TableName.valueOf(nsName + ":table1"); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tn1); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tn1); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf1")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf1")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); TableName tn2 = TableName.valueOf(nsName + ":table2"); @@ -336,7 +332,7 @@ public void testNamespaceCreateAndDeleteXMLAndJSON() throws IOException, JAXBExc jsonString = jsonMapper.writeValueAsString(model2); response = client.post(namespacePath2, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); assertEquals(201, response.getCode()); - //check passing null content-type with a payload returns 415 + // check passing null content-type with a payload returns 415 Header[] nullHeaders = null; response = client.post(namespacePath1, nullHeaders, toXML(model1)); assertEquals(415, response.getCode()); @@ -392,23 +388,23 @@ public void testNamespaceCreateAndDeletePBAndNoBody() throws IOException { model4 = testNamespacesInstanceModel.buildTestModel(NAMESPACE4, NAMESPACE4_PROPS); testNamespacesInstanceModel.checkModel(model4, NAMESPACE4, NAMESPACE4_PROPS); - //Defines null headers for use in tests where no body content is provided, so that we set + // Defines null headers for use in tests where no body content is provided, so that we set // no content-type in the request Header[] nullHeaders = null; // Test cannot PUT (alter) non-existent namespace. - response = client.put(namespacePath3, nullHeaders, new byte[]{}); + response = client.put(namespacePath3, nullHeaders, new byte[] {}); assertEquals(403, response.getCode()); - response = client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(403, response.getCode()); // Test cannot create tables when in read only mode. conf.set("hbase.rest.readonly", "true"); - response = client.post(namespacePath3, nullHeaders, new byte[]{}); + response = client.post(namespacePath3, nullHeaders, new byte[] {}); assertEquals(403, response.getCode()); - response = client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(403, response.getCode()); NamespaceDescriptor nd3 = findNamespace(admin, NAMESPACE3); NamespaceDescriptor nd4 = findNamespace(admin, NAMESPACE4); @@ -417,14 +413,14 @@ public void testNamespaceCreateAndDeletePBAndNoBody() throws IOException { conf.set("hbase.rest.readonly", "false"); // Create namespace with no body and binary content type. - response = client.post(namespacePath3, nullHeaders, new byte[]{}); + response = client.post(namespacePath3, nullHeaders, new byte[] {}); assertEquals(201, response.getCode()); // Create namespace with protobuf content-type. - response = client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(201, response.getCode()); - //check setting unsupported content-type returns 415 - response = client.post(namespacePath3, Constants.MIMETYPE_BINARY, new byte[]{}); + // check setting unsupported content-type returns 415 + response = client.post(namespacePath3, Constants.MIMETYPE_BINARY, new byte[] {}); assertEquals(415, response.getCode()); // Check that created namespaces correctly. @@ -436,10 +432,10 @@ public void testNamespaceCreateAndDeletePBAndNoBody() throws IOException { checkNamespaceProperties(nd4, NAMESPACE4_PROPS); // Check cannot post tables that already exist. - response = client.post(namespacePath3, nullHeaders, new byte[]{}); + response = client.post(namespacePath3, nullHeaders, new byte[] {}); assertEquals(403, response.getCode()); - response = client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, - model4.createProtobufOutput()); + response = + client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, model4.createProtobufOutput()); assertEquals(403, response.getCode()); // Check cannot post tables when in read only mode. diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java index d8729f6656b5..58a2622803e9 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestNamespacesResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -53,8 +53,7 @@ public class TestNamespacesResource { private static String NAMESPACE2 = "TestNamespacesInstanceResource2"; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; @@ -78,7 +77,7 @@ public static void tearDownAfterClass() throws Exception { private static NamespacesModel fromXML(byte[] content) throws JAXBException { return (NamespacesModel) context.createUnmarshaller() - .unmarshal(new ByteArrayInputStream(content)); + .unmarshal(new ByteArrayInputStream(content)); } private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java index 4201c3210fe5..03c55ba19710 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestRESTServerSSL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.rest; import static org.junit.Assert.assertEquals; + import java.io.File; import java.lang.reflect.Method; import java.security.KeyPair; @@ -42,7 +43,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestRESTServerSSL { @ClassRule @@ -79,8 +80,8 @@ public static void beforeClass() throws Exception { initializeAlgorithmId(); keyDir = initKeystoreDir(); KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - X509Certificate serverCertificate = KeyStoreTestUtil.generateCertificate( - "CN=localhost, O=server", keyPair, 30, "SHA1withRSA"); + X509Certificate serverCertificate = + KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", keyPair, 30, "SHA1withRSA"); generateTrustStore("jks", serverCertificate); generateTrustStore("jceks", serverCertificate); @@ -162,8 +163,6 @@ public void testSslConnectionUsingKeystoreFormatPKCS12() throws Exception { assertEquals(200, response.getCode()); } - - private static File initKeystoreDir() { String dataTestDir = TEST_UTIL.getDataTestDir().toString(); File keystoreDir = new File(dataTestDir, TestRESTServerSSL.class.getSimpleName() + "_keys"); @@ -172,14 +171,14 @@ private static File initKeystoreDir() { } private static void generateKeyStore(String keyStoreType, KeyPair keyPair, - X509Certificate serverCertificate) throws Exception { + X509Certificate serverCertificate) throws Exception { String keyStorePath = getKeystoreFilePath(keyStoreType); KeyStoreTestUtil.createKeyStore(keyStorePath, KEY_STORE_PASSWORD, KEY_STORE_PASSWORD, "serverKS", keyPair.getPrivate(), serverCertificate, keyStoreType); } private static void generateTrustStore(String trustStoreType, X509Certificate serverCertificate) - throws Exception { + throws Exception { String trustStorePath = getTruststoreFilePath(trustStoreType); KeyStoreTestUtil.createTrustStore(trustStorePath, TRUST_STORE_PASSWORD, "serverTS", serverCertificate, trustStoreType); @@ -200,7 +199,7 @@ private void startRESTServerWithDefaultKeystoreType() throws Exception { REST_TEST_UTIL.startServletContainer(conf); Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); sslClient = new Client(localCluster, getTruststoreFilePath("jks"), - Optional.of(TRUST_STORE_PASSWORD), Optional.empty()); + Optional.of(TRUST_STORE_PASSWORD), Optional.empty()); } private void startRESTServer(String storeType) throws Exception { @@ -213,7 +212,7 @@ private void startRESTServer(String storeType) throws Exception { REST_TEST_UTIL.startServletContainer(conf); Cluster localCluster = new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()); sslClient = new Client(localCluster, getTruststoreFilePath(storeType), - Optional.of(TRUST_STORE_PASSWORD), Optional.of(storeType)); + Optional.of(TRUST_STORE_PASSWORD), Optional.of(storeType)); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java index 7c0294372f01..410218555422 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestResourceFilter { @ClassRule @@ -40,8 +40,7 @@ public class TestResourceFilter { HBaseClassTestRule.forClass(TestResourceFilter.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; @BeforeClass @@ -49,8 +48,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().set(Constants.FILTER_CLASSES, DummyFilter.class.getName()); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); } @AfterClass diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java index 9a2542e70518..7831b55de7ff 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Marshaller; @@ -68,7 +67,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestScannerResource { @ClassRule @@ -85,8 +84,7 @@ public class TestScannerResource { private static final String COLUMN_2 = CFB + ":2"; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Marshaller marshaller; @@ -99,7 +97,7 @@ static int insertData(Configuration conf, TableName tableName, String column, do throws IOException { Random rng = ThreadLocalRandom.current(); byte[] k = new byte[3]; - byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column)); + byte[][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column)); List puts = new ArrayList<>(); for (byte b1 = 'a'; b1 < 'z'; b1++) { for (byte b2 = 'a'; b2 < 'z'; b2++) { @@ -139,8 +137,8 @@ static int countCellSet(CellSetModel model) { private static int fullTableScan(ScannerModel model) throws IOException { model.setBatch(100); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -176,12 +174,8 @@ public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class, + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); @@ -189,25 +183,21 @@ public static void setUpBeforeClass() throws Exception { if (admin.tableExists(TABLE)) { return; } - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLE); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); expectedRows1 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_1, 1.0); expectedRows2 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_2, 0.5); - tableDescriptorBuilder=TableDescriptorBuilder.newBuilder(TABLE_TO_BE_DISABLED); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE_TO_BE_DISABLED); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); @@ -232,16 +222,14 @@ public void testSimpleScannerXML() throws IOException, JAXBException { // test put operation is forbidden in read-only mode conf.set("hbase.rest.readonly", "true"); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(403, response.getCode()); String scannerURI = response.getLocation(); assertNull(scannerURI); // recall previous put operation with read-only off conf.set("hbase.rest.readonly", "false"); - response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, - body); + response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -250,8 +238,8 @@ public void testSimpleScannerXML() throws IOException, JAXBException { response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); // confirm batch size conformance assertEquals(BATCH_SIZE, countCellSet(cellSet)); @@ -276,16 +264,16 @@ public void testSimpleScannerPB() throws IOException { // test put operation is forbidden in read-only mode conf.set("hbase.rest.readonly", "true"); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(403, response.getCode()); String scannerURI = response.getLocation(); assertNull(scannerURI); // recall previous put operation with read-only off conf.set("hbase.rest.readonly", "false"); - response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(201, response.getCode()); scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -319,16 +307,16 @@ public void testSimpleScannerBinary() throws IOException { // test put operation is forbidden in read-only mode conf.set("hbase.rest.readonly", "true"); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(403, response.getCode()); String scannerURI = response.getLocation(); assertNull(scannerURI); // recall previous put operation with read-only off conf.set("hbase.rest.readonly", "false"); - response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); assertEquals(201, response.getCode()); scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -340,9 +328,8 @@ public void testSimpleScannerBinary() throws IOException { // verify that data was returned assertTrue(response.getBody().length > 0); // verify that the expected X-headers are present - boolean foundRowHeader = false, foundColumnHeader = false, - foundTimestampHeader = false; - for (Header header: response.getHeaders()) { + boolean foundRowHeader = false, foundColumnHeader = false, foundTimestampHeader = false; + for (Header header : response.getHeaders()) { if (header.getName().equals("X-Row")) { foundRowHeader = true; } else if (header.getName().equals("X-Column")) { @@ -383,8 +370,8 @@ public void testTableDoesNotExist() throws IOException, JAXBException { StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + NONEXISTENT_TABLE + - "/scanner", Constants.MIMETYPE_XML, body); + Response response = + client.put("/" + NONEXISTENT_TABLE + "/scanner", Constants.MIMETYPE_XML, body); String scannerURI = response.getLocation(); assertNotNull(scannerURI); response = client.get(scannerURI, Constants.MIMETYPE_XML); @@ -407,4 +394,3 @@ public void testTableScanWithTableDisable() throws IOException { assertEquals(410, response.getCode()); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java index 9f86b5815d41..39d2b8689b71 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,7 +78,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestScannersWithFilters { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -88,37 +88,28 @@ public class TestScannersWithFilters { private static final TableName TABLE = TableName.valueOf("TestScannersWithFilters"); - private static final byte[][] ROWS_ONE = { - Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"), - Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3") - }; + private static final byte[][] ROWS_ONE = { Bytes.toBytes("testRowOne-0"), + Bytes.toBytes("testRowOne-1"), Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3") }; - private static final byte[][] ROWS_TWO = { - Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-1"), - Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3") - }; + private static final byte[][] ROWS_TWO = { Bytes.toBytes("testRowTwo-0"), + Bytes.toBytes("testRowTwo-1"), Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3") }; - private static final byte[][] FAMILIES = { - Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo") - }; + private static final byte[][] FAMILIES = + { Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo") }; - private static final byte[][] QUALIFIERS_ONE = { - Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"), - Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3") - }; + private static final byte[][] QUALIFIERS_ONE = + { Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"), + Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3") }; - private static final byte[][] QUALIFIERS_TWO = { - Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"), - Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3") - }; + private static final byte[][] QUALIFIERS_TWO = + { Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"), + Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3") }; - private static final byte[][] VALUES = { - Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") - }; + private static final byte[][] VALUES = + { Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") }; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Marshaller marshaller; @@ -130,20 +121,16 @@ public class TestScannersWithFilters { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - context = JAXBContext.newInstance( - CellModel.class, - CellSetModel.class, - RowModel.class, - ScannerModel.class); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, + ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); Admin admin = TEST_UTIL.getAdmin(); if (!admin.tableExists(TABLE)) { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES[0])) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES[1])).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES[0])) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES[1])).build(); admin.createTable(tableDescriptor); Table table = TEST_UTIL.getConnection().getTable(TABLE); // Insert first half @@ -221,16 +208,14 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - private static void verifyScan(Scan s, long expectedRows, long expectedKeys) - throws Exception { + private static void verifyScan(Scan s, long expectedRows, long expectedKeys) throws Exception { ScannerModel model = ScannerModel.fromScan(s); model.setBatch(Integer.MAX_VALUE); // fetch it all at once StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); LOG.debug(writer.toString()); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -239,16 +224,17 @@ private static void verifyScan(Scan s, long expectedRows, long expectedKeys) response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cells = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cells = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); int rows = cells.getRows().size(); - assertEquals("Scanned too many rows! Only expected " + expectedRows + - " total but scanned " + rows, expectedRows, rows); + assertEquals( + "Scanned too many rows! Only expected " + expectedRows + " total but scanned " + rows, + expectedRows, rows); for (RowModel row : cells.getRows()) { int count = row.getCells().size(); - assertEquals("Expected " + expectedKeys + " keys per row but " + - "returned " + count, expectedKeys, count); + assertEquals("Expected " + expectedKeys + " keys per row but " + "returned " + count, + expectedKeys, count); } // delete the scanner @@ -256,15 +242,14 @@ private static void verifyScan(Scan s, long expectedRows, long expectedKeys) assertEquals(200, response.getCode()); } - private static void verifyScanFull(Scan s, KeyValue [] kvs) throws Exception { + private static void verifyScanFull(Scan s, KeyValue[] kvs) throws Exception { ScannerModel model = ScannerModel.fromScan(s); model.setBatch(Integer.MAX_VALUE); // fetch it all at once StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); LOG.debug(writer.toString()); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -273,8 +258,8 @@ private static void verifyScanFull(Scan s, KeyValue [] kvs) throws Exception { response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); // delete the scanner response = client.delete(scannerURI); @@ -295,36 +280,30 @@ private static void verifyScanFull(Scan s, KeyValue [] kvs) throws Exception { break; } - assertTrue("Scanned too many keys! Only expected " + kvs.length + - " total but already scanned " + (cells.size() + idx), + assertTrue("Scanned too many keys! Only expected " + kvs.length + + " total but already scanned " + (cells.size() + idx), kvs.length >= idx + cells.size()); - for (CellModel cell: cells) { - assertTrue("Row mismatch", - Bytes.equals(rowModel.getKey(), CellUtil.cloneRow(kvs[idx]))); + for (CellModel cell : cells) { + assertTrue("Row mismatch", Bytes.equals(rowModel.getKey(), CellUtil.cloneRow(kvs[idx]))); byte[][] split = CellUtil.parseColumn(cell.getColumn()); - assertTrue("Family mismatch", - Bytes.equals(split[0], CellUtil.cloneFamily(kvs[idx]))); - assertTrue("Qualifier mismatch", - Bytes.equals(split[1], CellUtil.cloneQualifier(kvs[idx]))); - assertTrue("Value mismatch", - Bytes.equals(cell.getValue(), CellUtil.cloneValue(kvs[idx]))); + assertTrue("Family mismatch", Bytes.equals(split[0], CellUtil.cloneFamily(kvs[idx]))); + assertTrue("Qualifier mismatch", Bytes.equals(split[1], CellUtil.cloneQualifier(kvs[idx]))); + assertTrue("Value mismatch", Bytes.equals(cell.getValue(), CellUtil.cloneValue(kvs[idx]))); idx++; } } - assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, - kvs.length, idx); + assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, kvs.length, idx); } - private static void verifyScanNoEarlyOut(Scan s, long expectedRows, - long expectedKeys) throws Exception { + private static void verifyScanNoEarlyOut(Scan s, long expectedRows, long expectedKeys) + throws Exception { ScannerModel model = ScannerModel.fromScan(s); model.setBatch(Integer.MAX_VALUE); // fetch it all at once StringWriter writer = new StringWriter(); marshaller.marshal(model, writer); LOG.debug(writer.toString()); byte[] body = Bytes.toBytes(writer.toString()); - Response response = client.put("/" + TABLE + "/scanner", - Constants.MIMETYPE_XML, body); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); assertEquals(201, response.getCode()); String scannerURI = response.getLocation(); assertNotNull(scannerURI); @@ -333,8 +312,8 @@ private static void verifyScanNoEarlyOut(Scan s, long expectedRows, response = client.get(scannerURI, Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) - unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); // delete the scanner response = client.delete(scannerURI); @@ -354,13 +333,13 @@ private static void verifyScanNoEarlyOut(Scan s, long expectedRows, break; } - assertTrue("Scanned too many rows! Only expected " + expectedRows + - " total but already scanned " + (j+1), expectedRows > j); - assertEquals("Expected " + expectedKeys + " keys per row but " + - "returned " + cells.size(), expectedKeys, cells.size()); + assertTrue("Scanned too many rows! Only expected " + expectedRows + + " total but already scanned " + (j + 1), + expectedRows > j); + assertEquals("Expected " + expectedKeys + " keys per row but " + "returned " + cells.size(), + expectedKeys, cells.size()); } - assertEquals("Expected " + expectedRows + " rows but scanned " + j + - " rows", expectedRows, j); + assertEquals("Expected " + expectedRows + " rows but scanned " + j + " rows", expectedRows, j); } @Test @@ -376,7 +355,7 @@ public void testNoFilter() throws Exception { // One family s = new Scan(); s.addFamily(FAMILIES[0]); - verifyScan(s, expectedRows, expectedKeys/2); + verifyScan(s, expectedRows, expectedKeys / 2); } @Test @@ -392,50 +371,49 @@ public void testPrefixFilter() throws Exception { @Test public void testPageFilter() throws Exception { // KVs in first 6 rows - KeyValue [] expectedKVs = { - // testRowOne-0 - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-2 - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-3 - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) - }; + KeyValue[] expectedKVs = { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) }; // Grab all 6 rows long expectedRows = 6; @@ -482,7 +460,7 @@ public void testInclusiveStopFilter() throws Exception { long expectedRows = (numRows / 2) - 1; long expectedKeys = colsPerRow; Scan s = new Scan().withStartRow(Bytes.toBytes("testRowOne-0")) - .withStopRow(Bytes.toBytes("testRowOne-3")); + .withStopRow(Bytes.toBytes("testRowOne-3")); verifyScan(s, expectedRows, expectedKeys); // Now use start row with inclusive stop filter @@ -497,7 +475,7 @@ public void testInclusiveStopFilter() throws Exception { expectedRows = (numRows / 2) - 1; expectedKeys = colsPerRow; s = new Scan().withStartRow(Bytes.toBytes("testRowTwo-0")) - .withStopRow(Bytes.toBytes("testRowTwo-3")); + .withStopRow(Bytes.toBytes("testRowTwo-3")); verifyScan(s, expectedRows, expectedKeys); // Now use start row with inclusive stop filter @@ -544,7 +522,7 @@ public void testQualifierFilter() throws Exception { f = new QualifierFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + .withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -555,7 +533,7 @@ public void testQualifierFilter() throws Exception { f = new QualifierFilter(CompareOperator.GREATER_OR_EQUAL, new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + .withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -566,96 +544,92 @@ public void testQualifierFilter() throws Exception { f = new QualifierFilter(CompareOperator.GREATER, new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + .withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); // Match keys not equal to. Look across rows and fully validate the keys and ordering // Expect varied numbers of keys, 4 per row in group one, 6 per row in group two - f = new QualifierFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(QUALIFIERS_ONE[2])); + f = new QualifierFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(QUALIFIERS_ONE[2])); s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { - // testRowOne-0 - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-2 - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-3 - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + KeyValue[] kvs = { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); // Test across rows and groups with a regex. Filter out "test*-2" // Expect 4 keys per row across both groups - f = new QualifierFilter(CompareOperator.NOT_EQUAL, - new RegexStringComparator("test.+-2")); + f = new QualifierFilter(CompareOperator.NOT_EQUAL, new RegexStringComparator("test.+-2")); s = new Scan(); s.setFilter(f); - kvs = new KeyValue [] { - // testRowOne-0 - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-2 - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-3 - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + kvs = new KeyValue[] { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -664,8 +638,8 @@ public void testRowFilter() throws Exception { // Match a single row, all keys long expectedRows = 1; long expectedKeys = colsPerRow; - Filter f = new RowFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + Filter f = + new RowFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -673,8 +647,7 @@ public void testRowFilter() throws Exception { // Match a two rows, one from each group, using regex expectedRows = 2; expectedKeys = colsPerRow; - f = new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator("testRow.+-2")); + f = new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("testRow.+-2")); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -683,8 +656,7 @@ public void testRowFilter() throws Exception { // Expect all keys in one row expectedRows = 1; expectedKeys = colsPerRow; - f = new RowFilter(CompareOperator.LESS, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = new RowFilter(CompareOperator.LESS, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -723,8 +695,7 @@ public void testRowFilter() throws Exception { // Expect all keys in all but two rows expectedRows = numRows - 2; expectedKeys = colsPerRow; - f = new RowFilter(CompareOperator.GREATER, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = new RowFilter(CompareOperator.GREATER, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -737,69 +708,66 @@ public void testRowFilter() throws Exception { s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { - // testRowOne-0 - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-3 - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + KeyValue[] kvs = { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); // Test across rows and groups with a regex // Filter out everything that doesn't match "*-2" // Expect all keys in two rows - f = new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2")); + f = new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2")); s = new Scan(); s.setFilter(f); - kvs = new KeyValue [] { - // testRowOne-2 - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) - }; + kvs = new KeyValue[] { + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) }; verifyScanFull(s, kvs); } @@ -808,8 +776,8 @@ public void testValueFilter() throws Exception { // Match group one rows long expectedRows = numRows / 2; long expectedKeys = colsPerRow; - Filter f = new ValueFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + Filter f = + new ValueFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testValueOne"))); Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -817,8 +785,7 @@ public void testValueFilter() throws Exception { // Match group two rows expectedRows = numRows / 2; expectedKeys = colsPerRow; - f = new ValueFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testValueTwo"))); + f = new ValueFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testValueTwo"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -826,8 +793,7 @@ public void testValueFilter() throws Exception { // Match all values using regex expectedRows = numRows; expectedKeys = colsPerRow; - f = new ValueFilter(CompareOperator.EQUAL, - new RegexStringComparator("testValue((One)|(Two))")); + f = new ValueFilter(CompareOperator.EQUAL, new RegexStringComparator("testValue((One)|(Two))")); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -836,8 +802,7 @@ public void testValueFilter() throws Exception { // Expect group one rows expectedRows = numRows / 2; expectedKeys = colsPerRow; - f = new ValueFilter(CompareOperator.LESS, - new BinaryComparator(Bytes.toBytes("testValueTwo"))); + f = new ValueFilter(CompareOperator.LESS, new BinaryComparator(Bytes.toBytes("testValueTwo"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -900,29 +865,28 @@ public void testValueFilter() throws Exception { s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + KeyValue[] kvs = { + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -935,29 +899,28 @@ public void testSkipFilter() throws Exception { Scan s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + KeyValue[] kvs = { + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -967,30 +930,22 @@ public void testFilterList() throws Exception { // regular expression and substring filters // Use must pass all List filters = new ArrayList<>(3); - filters.add(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2"))); - filters.add(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2"))); - filters.add(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("One"))); + filters.add(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2"))); + filters.add(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2"))); + filters.add(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("One"))); Filter f = new FilterList(Operator.MUST_PASS_ALL, filters); Scan s = new Scan(); s.addFamily(FAMILIES[0]); s.setFilter(f); - KeyValue [] kvs = { - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]) - }; + KeyValue[] kvs = { new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]) }; verifyScanFull(s, kvs); // Test getting everything with a MUST_PASS_ONE filter including row, qf, // val, regular expression and substring filters filters.clear(); - filters.add(new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+Two.+"))); - filters.add(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2"))); - filters.add(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("One"))); + filters.add(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(".+Two.+"))); + filters.add(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2"))); + filters.add(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("One"))); f = new FilterList(Operator.MUST_PASS_ONE, filters); s = new Scan(); s.setFilter(f); @@ -1002,14 +957,12 @@ public void testFirstKeyOnlyFilter() throws Exception { Scan s = new Scan(); s.setFilter(new FirstKeyOnlyFilter()); // Expected KVs, the first KV from each of the remaining 6 rows - KeyValue [] kvs = { - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) - }; + KeyValue[] kvs = { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) }; verifyScanFull(s, kvs); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java index f7f9def44778..d6df0de266fe 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; import org.apache.hadoop.hbase.rest.client.Client; import org.apache.hadoop.hbase.rest.client.Cluster; import org.apache.hadoop.hbase.rest.client.Response; @@ -70,7 +69,9 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; + +@Category({ RestTests.class, MediumTests.class }) public class TestScannersWithLabels { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -106,8 +107,8 @@ private static int insertData(TableName tableName, String column, double prob) Put put = new Put(Bytes.toBytes("row" + i)); put.setDurability(Durability.SKIP_WAL); put.addColumn(famAndQf[0], famAndQf[1], k); - put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" - + TOPSECRET)); + put.setCellVisibility( + new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET)); puts.add(put); } try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { @@ -132,11 +133,10 @@ private static int countCellSet(CellSetModel model) { @BeforeClass public static void setUpBeforeClass() throws Exception { - SUPERUSER = User.createUserForTesting(conf, "admin", - new String[] { "supergroup" }); + SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); conf = TEST_UTIL.getConfiguration(); - conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, - SimpleScanLabelGenerator.class, ScanLabelGenerator.class); + conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, + ScanLabelGenerator.class); conf.set("hbase.superuser", SUPERUSER.getShortName()); VisibilityTestUtil.enableVisiblityLabels(conf); TEST_UTIL.startMiniCluster(1); @@ -147,20 +147,18 @@ public static void setUpBeforeClass() throws Exception { REST_TEST_UTIL.startServletContainer(conf); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, - ScannerModel.class); + ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(TABLE)) { return; } - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLE); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); insertData(TABLE, COLUMN_1, 1.0); @@ -243,8 +241,8 @@ public void testSimpleScannerXMLWithLabelsThatReceivesData() throws IOException, // Respond with 204 as there are no cells to be retrieved assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - CellSetModel cellSet = (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response - .getBody())); + CellSetModel cellSet = + (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); assertEquals(5, countCellSet(cellSet)); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java index 14768f9da504..e374312f0992 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) @RunWith(Parameterized.class) public class TestSchemaResource { @ClassRule @@ -64,8 +64,7 @@ public class TestSchemaResource { private static String TABLE2 = "TestSchemaResource2"; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; @@ -93,12 +92,9 @@ public static void setUpBeforeClass() throws Exception { extraHdr = new BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, ""); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); testTableSchemaModel = new TestTableSchemaModel(); - context = JAXBContext.newInstance( - ColumnSchemaModel.class, - TableSchemaModel.class); + context = JAXBContext.newInstance(ColumnSchemaModel.class, TableSchemaModel.class); } @AfterClass @@ -111,7 +107,7 @@ public static void tearDownAfterClass() throws Exception { public void tearDown() throws Exception { Admin admin = TEST_UTIL.getAdmin(); - for (String table : new String[] {TABLE1, TABLE2}) { + for (String table : new String[] { TABLE1, TABLE2 }) { TableName t = TableName.valueOf(table); if (admin.tableExists(t)) { admin.disableTable(t); @@ -128,10 +124,9 @@ private static byte[] toXML(TableSchemaModel model) throws JAXBException { return Bytes.toBytes(writer.toString()); } - private static TableSchemaModel fromXML(byte[] content) - throws JAXBException { + private static TableSchemaModel fromXML(byte[] content) throws JAXBException { return (TableSchemaModel) context.createUnmarshaller() - .unmarshal(new ByteArrayInputStream(content)); + .unmarshal(new ByteArrayInputStream(content)); } @Test @@ -142,7 +137,7 @@ public void testTableCreateAndDeleteXML() throws IOException, JAXBException { Admin admin = TEST_UTIL.getAdmin(); assertFalse("Table " + TABLE1 + " should not exist", - admin.tableExists(TableName.valueOf(TABLE1))); + admin.tableExists(TableName.valueOf(TABLE1))); // create the table model = testTableSchemaModel.buildTestModel(TABLE1); @@ -154,8 +149,8 @@ public void testTableCreateAndDeleteXML() throws IOException, JAXBException { } response = client.put(schemaPath, Constants.MIMETYPE_XML, toXML(model), extraHdr); - assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), - 201, response.getCode()); + assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), 201, + response.getCode()); // recall the same put operation but in read-only mode conf.set("hbase.rest.readonly", "true"); @@ -213,15 +208,15 @@ public void testTableCreateAndDeletePB() throws IOException { response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); assertEquals(400, response.getCode()); } - response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput(), extraHdr); - assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), - 201, response.getCode()); + response = + client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput(), extraHdr); + assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), 201, + response.getCode()); // recall the same put operation but in read-only mode conf.set("hbase.rest.readonly", "true"); - response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput(), extraHdr); + response = + client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput(), extraHdr); assertNotNull(extraHdr); assertEquals(403, response.getCode()); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java index 60896446e95f..2274cc81b465 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -101,7 +101,7 @@ * Test class for SPNEGO authentication on the HttpServer. Uses Kerby's MiniKDC and Apache * HttpComponents to verify that a simple Servlet is reachable via SPNEGO and unreachable w/o. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestSecureRESTServer { @ClassRule @@ -138,8 +138,7 @@ public static void setupServer() throws Exception { /* * Keytabs */ - File keytabDir = new File(target, TestSecureRESTServer.class.getSimpleName() - + "_keytabs"); + File keytabDir = new File(target, TestSecureRESTServer.class.getSimpleName() + "_keytabs"); if (keytabDir.exists()) { FileUtils.deleteDirectory(keytabDir); } @@ -175,15 +174,14 @@ public static void setupServer() throws Exception { conf.set("hbase.master.keytab.file", serviceKeytab.getAbsolutePath()); conf.set("hbase.unsafe.regionserver.hostname", "localhost"); conf.set("hbase.master.hostname", "localhost"); - HBaseKerberosUtils.setSecuredConfiguration(conf, - SERVICE_PRINCIPAL+ "@" + KDC.getRealm(), SPNEGO_SERVICE_PRINCIPAL+ "@" + KDC.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, SERVICE_PRINCIPAL + "@" + KDC.getRealm(), + SPNEGO_SERVICE_PRINCIPAL + "@" + KDC.getRealm()); setHdfsSecuredConfiguration(conf); - conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - TokenProvider.class.getName(), AccessController.class.getName()); - conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - AccessController.class.getName()); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName(), + AccessController.class.getName()); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName()); conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, - AccessController.class.getName()); + AccessController.class.getName()); // Enable EXEC permission checking conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); conf.set("hbase.superuser", "hbase"); @@ -194,18 +192,15 @@ public static void setupServer() throws Exception { UserGroupInformation.setConfiguration(conf); updateKerberosConfiguration(conf, REST_SERVER_PRINCIPAL, SPNEGO_SERVICE_PRINCIPAL, - restServerKeytab); + restServerKeytab); // Start HDFS - TEST_UTIL.startMiniCluster(StartTestingClusterOption.builder() - .numMasters(1) - .numRegionServers(1) - .numZkServers(1) - .build()); + TEST_UTIL.startMiniCluster(StartTestingClusterOption.builder().numMasters(1).numRegionServers(1) + .numZkServers(1).build()); // Start REST - UserGroupInformation restUser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - REST_SERVER_PRINCIPAL, restServerKeytab.getAbsolutePath()); + UserGroupInformation restUser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(REST_SERVER_PRINCIPAL, restServerKeytab.getAbsolutePath()); restUser.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { @@ -215,18 +210,18 @@ public Void run() throws Exception { }); baseUrl = new URL("http://localhost:" + REST_TEST.getServletPort()); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); TEST_UTIL.waitTableAvailable(TableName.valueOf("hbase:acl")); // Let the REST server create, read, and write globally - UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + UserGroupInformation superuser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); superuser.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { - AccessControlClient.grant( - conn, REST_SERVER_PRINCIPAL, Action.CREATE, Action.READ, Action.WRITE); + AccessControlClient.grant(conn, REST_SERVER_PRINCIPAL, Action.CREATE, Action.READ, + Action.WRITE); } catch (Throwable t) { if (t instanceof Exception) { throw (Exception) t; @@ -268,13 +263,13 @@ public static void stopServer() throws Exception { private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception { // Set principal+keytab configuration for HDFS conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, - SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + SERVICE_PRINCIPAL + "@" + KDC.getRealm()); conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, serviceKeytab.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, - SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + SERVICE_PRINCIPAL + "@" + KDC.getRealm()); conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, serviceKeytab.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, - SPNEGO_SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + SPNEGO_SERVICE_PRINCIPAL + "@" + KDC.getRealm()); // Enable token access for HDFS blocks conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); // Only use HTTPS (required because we aren't using "secure" ports) @@ -293,8 +288,8 @@ private static void setHdfsSecuredConfiguration(Configuration conf) throws Excep conf.setBoolean("ignore.secure.ports.for.testing", true); } - private static void updateKerberosConfiguration(Configuration conf, - String serverPrincipal, String spnegoPrincipal, File serverKeytab) { + private static void updateKerberosConfiguration(Configuration conf, String serverPrincipal, + String spnegoPrincipal, File serverKeytab) { KerberosName.setRules("DEFAULT"); // Enable Kerberos (pre-req) @@ -312,16 +307,15 @@ private static void updateKerberosConfiguration(Configuration conf, private static void instertData() throws IOException, InterruptedException { // Create a table, write a row to it, grant read perms to the client - UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + UserGroupInformation superuser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); final TableName table = TableName.valueOf("publicTable"); superuser.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { TableDescriptor desc = TableDescriptorBuilder.newBuilder(table) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")).build(); conn.getAdmin().createTable(desc); try (Table t = conn.getTable(table)) { Put p = new Put(Bytes.toBytes("a")); @@ -341,21 +335,22 @@ public Void run() throws Exception { }); } - public void testProxy(String extraArgs, String PRINCIPAL, File keytab, int responseCode) throws Exception{ - UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + public void testProxy(String extraArgs, String PRINCIPAL, File keytab, int responseCode) + throws Exception { + UserGroupInformation superuser = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); final TableName table = TableName.valueOf("publicTable"); // Read that row as the client - Pair pair = getClient(); + Pair pair = getClient(); CloseableHttpClient client = pair.getFirst(); HttpClientContext context = pair.getSecond(); - HttpGet get = new HttpGet(new URL("http://localhost:"+ REST_TEST.getServletPort()).toURI() - + "/" + table + "/a" + extraArgs); + HttpGet get = new HttpGet(new URL("http://localhost:" + REST_TEST.getServletPort()).toURI() + + "/" + table + "/a" + extraArgs); get.addHeader("Accept", "application/json"); - UserGroupInformation user = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - PRINCIPAL, keytab.getAbsolutePath()); + UserGroupInformation user = + UserGroupInformation.loginUserFromKeytabAndReturnUGI(PRINCIPAL, keytab.getAbsolutePath()); String jsonResponse = user.doAs(new PrivilegedExceptionAction() { @Override public String run() throws Exception { @@ -367,8 +362,9 @@ public String run() throws Exception { } } }); - if(responseCode == HttpURLConnection.HTTP_OK) { - ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + if (responseCode == HttpURLConnection.HTTP_OK) { + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel model = mapper.readValue(jsonResponse, CellSetModel.class); assertEquals(1, model.getRows().size()); RowModel row = model.getRows().get(0); @@ -386,12 +382,12 @@ public void testPositiveAuthorization() throws Exception { @Test public void testDoAs() throws Exception { - testProxy("?doAs="+CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); + testProxy("?doAs=" + CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); } @Test public void testDoas() throws Exception { - testProxy("?doas="+CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); + testProxy("?doas=" + CLIENT_PRINCIPAL, WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_OK); } @Test @@ -399,47 +395,43 @@ public void testWithoutDoAs() throws Exception { testProxy("", WHEEL_PRINCIPAL, wheelKeytab, HttpURLConnection.HTTP_FORBIDDEN); } - @Test public void testNegativeAuthorization() throws Exception { - Pair pair = getClient(); + Pair pair = getClient(); CloseableHttpClient client = pair.getFirst(); HttpClientContext context = pair.getSecond(); StringEntity entity = new StringEntity( "{\"name\":\"test\", \"ColumnSchema\":[{\"name\":\"f\"}]}", ContentType.APPLICATION_JSON); - HttpPut put = new HttpPut("http://localhost:"+ REST_TEST.getServletPort() + "/test/schema"); + HttpPut put = new HttpPut("http://localhost:" + REST_TEST.getServletPort() + "/test/schema"); put.setEntity(entity); - - UserGroupInformation unprivileged = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - CLIENT_PRINCIPAL, clientKeytab.getAbsolutePath()); + UserGroupInformation unprivileged = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(CLIENT_PRINCIPAL, clientKeytab.getAbsolutePath()); unprivileged.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (CloseableHttpResponse response = client.execute(put, context)) { final int statusCode = response.getStatusLine().getStatusCode(); HttpEntity entity = response.getEntity(); - assertEquals("Got response: "+ EntityUtils.toString(entity), - HttpURLConnection.HTTP_FORBIDDEN, statusCode); + assertEquals("Got response: " + EntityUtils.toString(entity), + HttpURLConnection.HTTP_FORBIDDEN, statusCode); } return null; } }); } - private Pair getClient() { + private Pair getClient() { HttpClientConnectionManager pool = new PoolingHttpClientConnectionManager(); HttpHost host = new HttpHost("localhost", REST_TEST.getServletPort()); - Registry authRegistry = - RegistryBuilder.create().register(AuthSchemes.SPNEGO, - new SPNegoSchemeFactory(true, true)).build(); + Registry authRegistry = RegistryBuilder. create() + .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); credentialsProvider.setCredentials(AuthScope.ANY, EmptyCredentials.INSTANCE); AuthCache authCache = new BasicAuthCache(); - CloseableHttpClient client = HttpClients.custom() - .setDefaultAuthSchemeRegistry(authRegistry) + CloseableHttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) .setConnectionManager(pool).build(); HttpClientContext context = HttpClientContext.create(); @@ -454,10 +446,13 @@ private Pair getClient() { private static class EmptyCredentials implements Credentials { public static final EmptyCredentials INSTANCE = new EmptyCredentials(); - @Override public String getPassword() { + @Override + public String getPassword() { return null; } - @Override public Principal getUserPrincipal() { + + @Override + public Principal getUserPrincipal() { return null; } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java index a741801df077..65ade2037e38 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestSecurityHeadersFilter { @ClassRule @@ -42,8 +42,7 @@ public class TestSecurityHeadersFilter { HBaseClassTestRule.forClass(TestSecurityHeadersFilter.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; @After @@ -56,56 +55,54 @@ public void tearDown() throws Exception { public void testDefaultValues() throws Exception { TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); String path = "/version/cluster"; Response response = client.get(path); assertThat(response.getCode(), equalTo(200)); assertThat("Header 'X-Content-Type-Options' is missing from Rest response", - response.getHeader("X-Content-Type-Options"), is(not((String)null))); + response.getHeader("X-Content-Type-Options"), is(not((String) null))); assertThat("Header 'X-Content-Type-Options' has invalid default value", - response.getHeader("X-Content-Type-Options"), equalTo("nosniff")); + response.getHeader("X-Content-Type-Options"), equalTo("nosniff")); assertThat("Header 'X-XSS-Protection' is missing from Rest response", - response.getHeader("X-XSS-Protection"), is(not((String)null))); + response.getHeader("X-XSS-Protection"), is(not((String) null))); assertThat("Header 'X-XSS-Protection' has invalid default value", - response.getHeader("X-XSS-Protection"), equalTo("1; mode=block")); + response.getHeader("X-XSS-Protection"), equalTo("1; mode=block")); - assertThat("Header 'Strict-Transport-Security' should be missing from Rest response," + - "but it's present", - response.getHeader("Strict-Transport-Security"), is((String)null)); - assertThat("Header 'Content-Security-Policy' should be missing from Rest response," + - "but it's present", - response.getHeader("Content-Security-Policy"), is((String)null)); + assertThat("Header 'Strict-Transport-Security' should be missing from Rest response," + + "but it's present", + response.getHeader("Strict-Transport-Security"), is((String) null)); + assertThat( + "Header 'Content-Security-Policy' should be missing from Rest response," + "but it's present", + response.getHeader("Content-Security-Policy"), is((String) null)); } @Test public void testHstsAndCspSettings() throws Exception { TEST_UTIL.getConfiguration().set("hbase.http.filter.hsts.value", - "max-age=63072000;includeSubDomains;preload"); + "max-age=63072000;includeSubDomains;preload"); TEST_UTIL.getConfiguration().set("hbase.http.filter.csp.value", - "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); String path = "/version/cluster"; Response response = client.get(path); assertThat(response.getCode(), equalTo(200)); assertThat("Header 'Strict-Transport-Security' is missing from Rest response", - response.getHeader("Strict-Transport-Security"), is(not((String)null))); + response.getHeader("Strict-Transport-Security"), is(not((String) null))); assertThat("Header 'Strict-Transport-Security' has invalid value", - response.getHeader("Strict-Transport-Security"), - equalTo("max-age=63072000;includeSubDomains;preload")); + response.getHeader("Strict-Transport-Security"), + equalTo("max-age=63072000;includeSubDomains;preload")); assertThat("Header 'Content-Security-Policy' is missing from Rest response", - response.getHeader("Content-Security-Policy"), is(not((String)null))); + response.getHeader("Content-Security-Policy"), is(not((String) null))); assertThat("Header 'Content-Security-Policy' has invalid value", - response.getHeader("Content-Security-Policy"), - equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); + response.getHeader("Content-Security-Policy"), + equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java index b30a276cd45d..fe45e37f1982 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestStatusResource { @ClassRule @@ -58,8 +58,7 @@ public class TestStatusResource { private static final byte[] META_REGION_NAME = Bytes.toBytes(TableName.META_TABLE_NAME + ",,1"); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; @@ -73,11 +72,11 @@ private static void validate(StorageClusterStatusModel model) { assertNotNull(model.getDeadNodes()); assertFalse(model.getLiveNodes().isEmpty()); boolean foundMeta = false; - for (StorageClusterStatusModel.Node node: model.getLiveNodes()) { + for (StorageClusterStatusModel.Node node : model.getLiveNodes()) { assertNotNull(node.getName()); assertTrue(node.getStartCode() > 0L); assertTrue(node.getRequests() >= 0); - for (StorageClusterStatusModel.Node.Region region: node.getRegions()) { + for (StorageClusterStatusModel.Node.Region region : node.getRegions()) { if (Bytes.equals(region.getName(), META_REGION_NAME)) { foundMeta = true; } @@ -116,9 +115,8 @@ public void testGetClusterStatusXML() throws IOException, JAXBException { Response response = client.get("/status/cluster", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - StorageClusterStatusModel model = (StorageClusterStatusModel) - context.createUnmarshaller().unmarshal( - new ByteArrayInputStream(response.getBody())); + StorageClusterStatusModel model = (StorageClusterStatusModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); validate(model); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java index 36b2d3db6317..20043c9d0614 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestTableResource { @ClassRule @@ -74,8 +74,7 @@ public class TestTableResource { private static List regionMap; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; @@ -83,16 +82,12 @@ public class TestTableResource { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); - context = JAXBContext.newInstance( - TableModel.class, - TableInfoModel.class, - TableListModel.class, - TableRegionModel.class); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance(TableModel.class, TableInfoModel.class, TableListModel.class, + TableRegionModel.class); TEST_UTIL.createMultiRegionTable(TABLE, Bytes.toBytes(COLUMN_FAMILY), NUM_REGIONS); byte[] k = new byte[3]; - byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(COLUMN)); + byte[][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(COLUMN)); List puts = new ArrayList<>(); for (byte b1 = 'a'; b1 < 'z'; b1++) { for (byte b2 = 'a'; b2 < 'z'; b2++) { @@ -110,7 +105,7 @@ public static void setUpBeforeClass() throws Exception { Connection connection = TEST_UTIL.getConnection(); - Table table = connection.getTable(TABLE); + Table table = connection.getTable(TABLE); table.put(puts); table.close(); @@ -152,7 +147,7 @@ void checkTableInfo(TableInfoModel model) { TableRegionModel region = regions.next(); boolean found = false; LOG.debug("looking for region " + region.getName()); - for (HRegionLocation e: regionMap) { + for (HRegionLocation e : regionMap) { RegionInfo hri = e.getRegion(); // getRegionNameAsString uses Bytes.toStringBinary which escapes some non-printable // characters @@ -166,8 +161,7 @@ void checkTableInfo(TableInfoModel model) { ServerName serverName = e.getServerName(); InetSocketAddress sa = new InetSocketAddress(serverName.getHostname(), serverName.getPort()); - String location = sa.getHostName() + ":" + - Integer.valueOf(sa.getPort()); + String location = sa.getHostName() + ":" + Integer.valueOf(sa.getPort()); assertEquals(hri.getRegionId(), region.getId()); assertTrue(Bytes.equals(startKey, region.getStartKey())); assertTrue(Bytes.equals(endKey, region.getEndKey())); @@ -191,8 +185,7 @@ public void testTableListXML() throws IOException, JAXBException { Response response = client.get("/", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - TableListModel model = (TableListModel) - context.createUnmarshaller() + TableListModel model = (TableListModel) context.createUnmarshaller() .unmarshal(new ByteArrayInputStream(response.getBody())); checkTableList(model); } @@ -229,11 +222,10 @@ public void testTableInfoText() throws IOException { @Test public void testTableInfoXML() throws IOException, JAXBException { - Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - TableInfoModel model = (TableInfoModel) - context.createUnmarshaller() + TableInfoModel model = (TableInfoModel) context.createUnmarshaller() .unmarshal(new ByteArrayInputStream(response.getBody())); checkTableInfo(model); } @@ -271,4 +263,3 @@ public void testTableNotFound() throws IOException { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java index c0e230bcb621..589a0921d383 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -76,7 +76,7 @@ import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestTableScan { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -95,8 +95,7 @@ public class TestTableScan { private static Configuration conf; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -104,17 +103,14 @@ public static void setUpBeforeClass() throws Exception { conf.set(Constants.CUSTOM_FILTERS, "CustomFilter:" + CustomFilter.class.getName()); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); Admin admin = TEST_UTIL.getAdmin(); if (!admin.tableExists(TABLE)) { - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLE); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); expectedRows1 = TestScannerResource.insertData(conf, TABLE, COLUMN_1, 1.0); @@ -140,8 +136,7 @@ public void testSimpleScannerXML() throws IOException, JAXBException { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=10"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); @@ -151,13 +146,12 @@ public void testSimpleScannerXML() throws IOException, JAXBException { assertEquals(10, count); checkRowsNotNull(model); - //Test with no limit. + // Test with no limit. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); model = (CellSetModel) ush.unmarshal(response.getStream()); @@ -165,7 +159,7 @@ public void testSimpleScannerXML() throws IOException, JAXBException { assertEquals(expectedRows1, count); checkRowsNotNull(model); - //Test with start and end row. + // Test with start and end row. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -174,8 +168,7 @@ public void testSimpleScannerXML() throws IOException, JAXBException { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); model = (CellSetModel) ush.unmarshal(response.getStream()); count = TestScannerResource.countCellSet(model); @@ -186,7 +179,7 @@ public void testSimpleScannerXML() throws IOException, JAXBException { assertEquals(24, count); checkRowsNotNull(model); - //Test with start row and limit. + // Test with start row and limit. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -195,8 +188,7 @@ public void testSimpleScannerXML() throws IOException, JAXBException { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=15"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); model = (CellSetModel) ush.unmarshal(response.getStream()); @@ -216,24 +208,22 @@ public void testSimpleScannerJson() throws IOException { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=2"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class); int count = TestScannerResource.countCellSet(model); assertEquals(2, count); checkRowsNotNull(model); - //Test scanning with no limit. + // Test scanning with no limit. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_2); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); model = mapper.readValue(response.getStream(), CellSetModel.class); @@ -241,7 +231,7 @@ public void testSimpleScannerJson() throws IOException { assertEquals(expectedRows2, count); checkRowsNotNull(model); - //Test with start row and end row. + // Test with start row and end row. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -250,8 +240,7 @@ public void testSimpleScannerJson() throws IOException { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); model = mapper.readValue(response.getStream(), CellSetModel.class); RowModel startRow = model.getRows().get(0); @@ -275,12 +264,11 @@ public void testScanUsingListenerUnmarshallerXML() throws Exception { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=10"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - JAXBContext context = JAXBContext.newInstance(ClientSideCellSetModel.class, RowModel.class, - CellModel.class); + JAXBContext context = + JAXBContext.newInstance(ClientSideCellSetModel.class, RowModel.class, CellModel.class); Unmarshaller unmarshaller = context.createUnmarshaller(); final ClientSideCellSetModel.Listener listener = new ClientSideCellSetModel.Listener() { @@ -293,19 +281,19 @@ public void handleRowModel(ClientSideCellSetModel helper, RowModel row) { // install the callback on all ClientSideCellSetModel instances unmarshaller.setListener(new Unmarshaller.Listener() { - @Override - public void beforeUnmarshal(Object target, Object parent) { - if (target instanceof ClientSideCellSetModel) { - ((ClientSideCellSetModel) target).setCellSetModelListener(listener); - } + @Override + public void beforeUnmarshal(Object target, Object parent) { + if (target instanceof ClientSideCellSetModel) { + ((ClientSideCellSetModel) target).setCellSetModelListener(listener); } + } - @Override - public void afterUnmarshal(Object target, Object parent) { - if (target instanceof ClientSideCellSetModel) { - ((ClientSideCellSetModel) target).setCellSetModelListener(null); - } + @Override + public void afterUnmarshal(Object target, Object parent) { + if (target instanceof ClientSideCellSetModel) { + ((ClientSideCellSetModel) target).setCellSetModelListener(null); } + } }); // create a new XML parser @@ -321,7 +309,7 @@ public void afterUnmarshal(Object target, Object parent) { @Test public void testStreamingJSON() throws Exception { - //Test with start row and end row. + // Test with start row and end row. StringBuilder builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -330,18 +318,17 @@ public void testStreamingJSON() throws Exception { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); int count = 0; - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); JsonFactory jfactory = new JsonFactory(mapper); JsonParser jParser = jfactory.createJsonParser(response.getStream()); boolean found = false; while (jParser.nextToken() != JsonToken.END_OBJECT) { - if(jParser.getCurrentToken() == JsonToken.START_OBJECT && found) { + if (jParser.getCurrentToken() == JsonToken.START_OBJECT && found) { RowModel row = jParser.readValueAs(RowModel.class); assertNotNull(row.getKey()); for (int i = 0; i < row.getCells().size(); i++) { @@ -369,14 +356,13 @@ public void testSimpleScannerProtobuf() throws Exception { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_LIMIT + "=15"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_PROTOBUF); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); int rowCount = readProtobufStream(response.getStream()); assertEquals(15, rowCount); - //Test with start row and end row. + // Test with start row and end row. builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -385,8 +371,7 @@ public void testSimpleScannerProtobuf() throws Exception { builder.append(Constants.SCAN_START_ROW + "=aaa"); builder.append("&"); builder.append(Constants.SCAN_END_ROW + "=aay"); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_PROTOBUF); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_PROTOBUF); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); rowCount = readProtobufStream(response.getStream()); @@ -394,7 +379,7 @@ public void testSimpleScannerProtobuf() throws Exception { } private void checkRowsNotNull(CellSetModel model) { - for (RowModel row: model.getRows()) { + for (RowModel row : model.getRows()) { assertTrue(row.getKey() != null); assertTrue(row.getCells().size() > 0); } @@ -406,7 +391,7 @@ private void checkRowsNotNull(CellSetModel model) { * @return The number of rows in the cell set model. * @throws IOException Signals that an I/O exception has occurred. */ - public int readProtobufStream(InputStream inputStream) throws IOException{ + public int readProtobufStream(InputStream inputStream) throws IOException { DataInputStream stream = new DataInputStream(inputStream); CellSetModel model = null; int rowCount = 0; @@ -441,8 +426,7 @@ public void testScanningUnknownColumnJson() throws IOException { builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=a:test"); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, @@ -464,8 +448,7 @@ public void testSimpleFilter() throws IOException, JAXBException { builder.append(Constants.SCAN_END_ROW + "=aay"); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("PrefixFilter('aab')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -473,7 +456,7 @@ public void testSimpleFilter() throws IOException, JAXBException { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("aab", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -481,10 +464,9 @@ public void testQualifierAndPrefixFilters() throws IOException, JAXBException { StringBuilder builder = new StringBuilder(); builder.append("/abc*"); builder.append("?"); - builder.append(Constants.SCAN_FILTER + "=" - + URLEncoder.encode("QualifierFilter(=,'binary:1')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + builder.append( + Constants.SCAN_FILTER + "=" + URLEncoder.encode("QualifierFilter(=,'binary:1')", "UTF-8")); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -492,7 +474,7 @@ public void testQualifierAndPrefixFilters() throws IOException, JAXBException { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("abc", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -502,8 +484,7 @@ public void testCompoundFilter() throws IOException, JAXBException { builder.append("?"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("PrefixFilter('abc') AND QualifierFilter(=,'binary:1')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -511,7 +492,7 @@ public void testCompoundFilter() throws IOException, JAXBException { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("abc", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -522,8 +503,7 @@ public void testCustomFilter() throws IOException, JAXBException { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("CustomFilter('abc')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -531,7 +511,7 @@ public void testCustomFilter() throws IOException, JAXBException { int count = TestScannerResource.countCellSet(model); assertEquals(1, count); assertEquals("abc", - new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); } @Test @@ -542,8 +522,7 @@ public void testNegativeCustomFilter() throws IOException, JAXBException { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("CustomFilter('abc')", "UTF-8")); - Response response = - client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); Unmarshaller ush = ctx.createUnmarshaller(); @@ -572,7 +551,7 @@ public void testReversed() throws IOException, JAXBException { assertEquals(24, count); List rowModels = model.getRows().subList(1, count); - //reversed + // reversed builder = new StringBuilder(); builder.append("/*"); builder.append("?"); @@ -597,9 +576,9 @@ public void testReversed() throws IOException, JAXBException { RowModel reversedRowModel = reversedRowModels.get(i); assertEquals(new String(rowModel.getKey(), StandardCharsets.UTF_8), - new String(reversedRowModel.getKey(), StandardCharsets.UTF_8)); + new String(reversedRowModel.getKey(), StandardCharsets.UTF_8)); assertEquals(new String(rowModel.getCells().get(0).getValue(), StandardCharsets.UTF_8), - new String(reversedRowModel.getCells().get(0).getValue(), StandardCharsets.UTF_8)); + new String(reversedRowModel.getCells().get(0).getValue(), StandardCharsets.UTF_8)); } } @@ -610,12 +589,11 @@ public void testColumnWithEmptyQualifier() throws IOException { builder.append("/*"); builder.append("?"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_EMPTY); - Response response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class); int count = TestScannerResource.countCellSet(model); assertEquals(expectedRows3, count); @@ -631,12 +609,11 @@ public void testColumnWithEmptyQualifier() throws IOException { builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); builder.append("&"); builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_EMPTY); - response = client.get("/" + TABLE + builder.toString(), - Constants.MIMETYPE_JSON); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - mapper = new JacksonJaxbJsonProvider() - .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); model = mapper.readValue(response.getStream(), CellSetModel.class); count = TestScannerResource.countCellSet(model); assertEquals(expectedRows1 + expectedRows3, count); @@ -653,7 +630,7 @@ public CustomFilter(byte[] key) { @Override public boolean filterRowKey(Cell cell) { int cmp = Bytes.compareTo(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), - this.key, 0, this.key.length); + this.key, 0, this.key.length); return cmp != 0; } @@ -676,14 +653,13 @@ public static class ClientSideCellSetModel implements Serializable { * This list is not a real list; instead it will notify a listener whenever JAXB has * unmarshalled the next row. */ - @XmlElement(name="Row") + @XmlElement(name = "Row") private List row; static boolean listenerInvoked = false; /** - * Install a listener for row model on this object. If l is null, the listener - * is removed again. + * Install a listener for row model on this object. If l is null, the listener is removed again. */ public void setCellSetModelListener(final Listener l) { row = (l == null) ? null : new ArrayList() { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java index 782c89cf0bd9..364741cad0a3 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ import org.apache.hbase.thirdparty.com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestVersionResource { @ClassRule @@ -57,8 +57,7 @@ public class TestVersionResource { private static final Logger LOG = LoggerFactory.getLogger(TestVersionResource.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; @@ -66,11 +65,8 @@ public class TestVersionResource { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); - client = new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())); - context = JAXBContext.newInstance( - VersionModel.class, - StorageClusterVersionModel.class); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance(VersionModel.class, StorageClusterVersionModel.class); } @AfterClass @@ -123,9 +119,8 @@ public void testGetStargateVersionXML() throws IOException, JAXBException { Response response = client.get("/version", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - VersionModel model = (VersionModel) - context.createUnmarshaller().unmarshal( - new ByteArrayInputStream(response.getBody())); + VersionModel model = (VersionModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); validate(model); LOG.info("success retrieving Stargate version as XML"); } @@ -135,10 +130,9 @@ public void testGetStargateVersionJSON() throws IOException { Response response = client.get("/version", Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); - ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(VersionModel.class, MediaType.APPLICATION_JSON_TYPE); - VersionModel model - = mapper.readValue(response.getBody(), VersionModel.class); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(VersionModel.class, + MediaType.APPLICATION_JSON_TYPE); + VersionModel model = mapper.readValue(response.getBody(), VersionModel.class); validate(model); LOG.info("success retrieving Stargate version as JSON"); } @@ -167,15 +161,12 @@ public void testGetStorageClusterVersionText() throws IOException { } @Test - public void testGetStorageClusterVersionXML() throws IOException, - JAXBException { - Response response = client.get("/version/cluster",Constants.MIMETYPE_XML); + public void testGetStorageClusterVersionXML() throws IOException, JAXBException { + Response response = client.get("/version/cluster", Constants.MIMETYPE_XML); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); - StorageClusterVersionModel clusterVersionModel = - (StorageClusterVersionModel) - context.createUnmarshaller().unmarshal( - new ByteArrayInputStream(response.getBody())); + StorageClusterVersionModel clusterVersionModel = (StorageClusterVersionModel) context + .createUnmarshaller().unmarshal(new ByteArrayInputStream(response.getBody())); assertNotNull(clusterVersionModel); assertNotNull(clusterVersionModel.getVersion()); LOG.info("success retrieving storage cluster version as XML"); @@ -187,12 +178,11 @@ public void testGetStorageClusterVersionJSON() throws IOException { assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); ObjectMapper mapper = new JacksonJaxbJsonProvider() - .locateMapper(StorageClusterVersionModel.class, MediaType.APPLICATION_JSON_TYPE); - StorageClusterVersionModel clusterVersionModel - = mapper.readValue(response.getBody(), StorageClusterVersionModel.class); + .locateMapper(StorageClusterVersionModel.class, MediaType.APPLICATION_JSON_TYPE); + StorageClusterVersionModel clusterVersionModel = + mapper.readValue(response.getBody(), StorageClusterVersionModel.class); assertNotNull(clusterVersionModel); assertNotNull(clusterVersionModel.getVersion()); LOG.info("success retrieving storage cluster version as JSON"); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java index a97f98afbd9e..5033da153412 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,24 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InterruptedIOException; - import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Unmarshaller; import javax.xml.stream.XMLInputFactory; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; - -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; - +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.rest.Constants; import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel; import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel; @@ -41,6 +35,7 @@ import org.apache.hadoop.hbase.rest.model.TableSchemaModel; import org.apache.hadoop.hbase.rest.model.VersionModel; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class RemoteAdmin { @@ -58,7 +53,6 @@ public class RemoteAdmin { /** * Constructor - * * @param client * @param conf */ @@ -70,8 +64,8 @@ static Unmarshaller getUnmarsheller() throws JAXBException { if (versionClusterUnmarshaller == null) { - RemoteAdmin.versionClusterUnmarshaller = JAXBContext.newInstance( - StorageClusterVersionModel.class).createUnmarshaller(); + RemoteAdmin.versionClusterUnmarshaller = + JAXBContext.newInstance(StorageClusterVersionModel.class).createUnmarshaller(); } return RemoteAdmin.versionClusterUnmarshaller; } @@ -101,9 +95,8 @@ public boolean isTableAvailable(String tableName) throws IOException { /** * @return string representing the rest api's version - * @throws IOException - * if the endpoint does not exist, there is a timeout, or some other - * general failure mode + * @throws IOException if the endpoint does not exist, there is a timeout, or some other general + * failure mode */ public VersionModel getRestVersion() throws IOException { @@ -118,26 +111,24 @@ public VersionModel getRestVersion() throws IOException { int code = 0; for (int i = 0; i < maxRetries; i++) { - Response response = client.get(path.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: - - VersionModel v = new VersionModel(); - return (VersionModel) v.getObjectFromMessage(response.getBody()); - case 404: - throw new IOException("REST version not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path.toString() - + " returned " + code); + case 200: + + VersionModel v = new VersionModel(); + return (VersionModel) v.getObjectFromMessage(response.getBody()); + case 404: + throw new IOException("REST version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + " returned " + code); } } throw new IOException("get request to " + path.toString() + " timed out"); @@ -145,40 +136,39 @@ public VersionModel getRestVersion() throws IOException { /** * @return string representing the cluster's version - * @throws IOException if the endpoint does not exist, there is a timeout, or some other general failure mode + * @throws IOException if the endpoint does not exist, there is a timeout, or some other general + * failure mode */ public StorageClusterStatusModel getClusterStatus() throws IOException { - StringBuilder path = new StringBuilder(); + StringBuilder path = new StringBuilder(); + path.append('/'); + if (accessToken != null) { + path.append(accessToken); path.append('/'); - if (accessToken !=null) { - path.append(accessToken); - path.append('/'); - } + } path.append("status/cluster"); int code = 0; for (int i = 0; i < maxRetries; i++) { - Response response = client.get(path.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: - StorageClusterStatusModel s = new StorageClusterStatusModel(); - return (StorageClusterStatusModel) s.getObjectFromMessage(response - .getBody()); - case 404: - throw new IOException("Cluster version not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path + " returned " + code); + case 200: + StorageClusterStatusModel s = new StorageClusterStatusModel(); + return (StorageClusterStatusModel) s.getObjectFromMessage(response.getBody()); + case 404: + throw new IOException("Cluster version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path + " returned " + code); } } throw new IOException("get request to " + path + " timed out"); @@ -186,9 +176,8 @@ public StorageClusterStatusModel getClusterStatus() throws IOException { /** * @return string representing the cluster's version - * @throws IOException - * if the endpoint does not exist, there is a timeout, or some other - * general failure mode + * @throws IOException if the endpoint does not exist, there is a timeout, or some other general + * failure mode */ public StorageClusterVersionModel getClusterVersion() throws IOException { @@ -206,32 +195,30 @@ public StorageClusterVersionModel getClusterVersion() throws IOException { Response response = client.get(path.toString(), Constants.MIMETYPE_XML); code = response.getCode(); switch (code) { - case 200: - try { - - return (StorageClusterVersionModel) getUnmarsheller().unmarshal( - getInputStream(response)); - } catch (JAXBException jaxbe) { - - throw new IOException( - "Issue parsing StorageClusterVersionModel object in XML form: " - + jaxbe.getLocalizedMessage(), jaxbe); - } - case 404: - throw new IOException("Cluster version not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException(path.toString() + " request returned " + code); + case 200: + try { + + return (StorageClusterVersionModel) getUnmarsheller() + .unmarshal(getInputStream(response)); + } catch (JAXBException jaxbe) { + + throw new IOException("Issue parsing StorageClusterVersionModel object in XML form: " + + jaxbe.getLocalizedMessage(), jaxbe); + } + case 404: + throw new IOException("Cluster version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException(path.toString() + " request returned " + code); } } - throw new IOException("get request to " + path.toString() - + " request timed out"); + throw new IOException("get request to " + path.toString() + " request timed out"); } /** @@ -254,19 +241,19 @@ public boolean isTableAvailable(byte[] tableName) throws IOException { Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: - return true; - case 404: - return false; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path.toString() + " returned " + code); + case 200: + return true; + case 404: + return false; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + " returned " + code); } } throw new IOException("get request to " + path.toString() + " timed out"); @@ -277,8 +264,7 @@ public boolean isTableAvailable(byte[] tableName) throws IOException { * @param desc table descriptor for table * @throws IOException if a remote or network exception occurs */ - public void createTable(TableDescriptor desc) - throws IOException { + public void createTable(TableDescriptor desc) throws IOException { TableSchemaModel model = new TableSchemaModel(desc); StringBuilder path = new StringBuilder(); path.append('/'); @@ -291,21 +277,21 @@ public void createTable(TableDescriptor desc) path.append("schema"); int code = 0; for (int i = 0; i < maxRetries; i++) { - Response response = client.put(path.toString(), Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput()); + Response response = + client.put(path.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); code = response.getCode(); switch (code) { - case 201: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("create request to " + path.toString() + " returned " + code); + case 201: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("create request to " + path.toString() + " returned " + code); } } throw new IOException("create request to " + path.toString() + " timed out"); @@ -325,7 +311,7 @@ public void deleteTable(final String tableName) throws IOException { * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs */ - public void deleteTable(final byte [] tableName) throws IOException { + public void deleteTable(final byte[] tableName) throws IOException { StringBuilder path = new StringBuilder(); path.append('/'); if (accessToken != null) { @@ -340,17 +326,17 @@ public void deleteTable(final byte [] tableName) throws IOException { Response response = client.delete(path.toString()); code = response.getCode(); switch (code) { - case 200: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("delete request to " + path.toString() + " returned " + code); + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("delete request to " + path.toString() + " returned " + code); } } throw new IOException("delete request to " + path.toString() + " timed out"); @@ -358,9 +344,8 @@ public void deleteTable(final byte [] tableName) throws IOException { /** * @return string representing the cluster's version - * @throws IOException - * if the endpoint does not exist, there is a timeout, or some other - * general failure mode + * @throws IOException if the endpoint does not exist, there is a timeout, or some other general + * failure mode */ public TableListModel getTableList() throws IOException { @@ -375,34 +360,30 @@ public TableListModel getTableList() throws IOException { for (int i = 0; i < maxRetries; i++) { // Response response = client.get(path.toString(), // Constants.MIMETYPE_XML); - Response response = client.get(path.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); code = response.getCode(); switch (code) { - case 200: - TableListModel t = new TableListModel(); - return (TableListModel) t.getObjectFromMessage(response.getBody()); - case 404: - throw new IOException("Table list not found"); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("get request to " + path.toString() - + " request returned " + code); + case 200: + TableListModel t = new TableListModel(); + return (TableListModel) t.getObjectFromMessage(response.getBody()); + case 404: + throw new IOException("Table list not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + " request returned " + code); } } - throw new IOException("get request to " + path.toString() - + " request timed out"); + throw new IOException("get request to " + path.toString() + " request timed out"); } /** * Convert the REST server's response to an XML reader. - * * @param response The REST server's response. * @return A reader over the parsed XML document. * @throws IOException If the document fails to parse diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index 4ae6d243752b..aedc27f9ae6d 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -195,8 +194,8 @@ protected Result[] buildResultFromModel(final CellSetModel model) { } else { throw new IllegalArgumentException("Invalid familyAndQualifier provided."); } - kvs - .add(new KeyValue(row.getKey(), column, qualifier, cell.getTimestamp(), cell.getValue())); + kvs.add( + new KeyValue(row.getKey(), column, qualifier, cell.getTimestamp(), cell.getValue())); } results.add(Result.create(kvs)); } @@ -209,7 +208,8 @@ protected CellSetModel buildModelFromPut(Put put) { for (List cells : put.getFamilyCellMap().values()) { for (Cell cell : cells) { row.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - ts != HConstants.LATEST_TIMESTAMP ? ts : cell.getTimestamp(), CellUtil.cloneValue(cell))); + ts != HConstants.LATEST_TIMESTAMP ? ts : cell.getTimestamp(), + CellUtil.cloneValue(cell))); } } CellSetModel model = new CellSetModel(); @@ -365,7 +365,7 @@ public void put(Put put) throws IOException { sb.append(toURLEncodedBytes(put.getRow())); for (int i = 0; i < maxRetries; i++) { Response response = - client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { case 200: @@ -420,7 +420,7 @@ public void put(List puts) throws IOException { sb.append("/$multiput"); // can be any nonexistent row for (int i = 0; i < maxRetries; i++) { Response response = - client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { case 200: @@ -521,7 +521,7 @@ public Scanner(Scan scan) throws IOException { sb.append("scanner"); for (int i = 0; i < maxRetries; i++) { Response response = - client.post(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + client.post(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { case 201: @@ -679,7 +679,7 @@ private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier, byte[ for (int i = 0; i < maxRetries; i++) { Response response = - client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { case 200: @@ -715,7 +715,7 @@ private boolean doCheckAndDelete(byte[] row, byte[] family, byte[] qualifier, by for (int i = 0; i < maxRetries; i++) { Response response = - client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { case 200: @@ -873,8 +873,8 @@ private class CheckAndMutateBuilderImpl implements CheckAndMutateBuilder { @Override public CheckAndMutateBuilder qualifier(byte[] qualifier) { - this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + - " an empty byte array, or just do not call this method if you want a null qualifier"); + this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + + " an empty byte array, or just do not call this method if you want a null qualifier"); return this; } @@ -886,7 +886,7 @@ public CheckAndMutateBuilder timeRange(TimeRange timeRange) { @Override public CheckAndMutateBuilder ifNotExists() { throw new UnsupportedOperationException( - "CheckAndMutate for non-equal comparison " + "not implemented"); + "CheckAndMutate for non-equal comparison " + "not implemented"); } @Override @@ -896,7 +896,7 @@ public CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value) return this; } else { throw new UnsupportedOperationException( - "CheckAndMutate for non-equal comparison " + "not implemented"); + "CheckAndMutate for non-equal comparison " + "not implemented"); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java index 8e8ba36c834d..7da38d1679be 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ /** * Tests {@link RemoteAdmin} retries. */ -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestRemoteAdminRetries { @ClassRule @@ -78,7 +78,7 @@ public void setup() throws Exception { } @Test - public void testFailingGetRestVersion() throws Exception { + public void testFailingGetRestVersion() throws Exception { testTimedOutGetCall(new CallExecutor() { @Override public void run() throws Exception { @@ -88,7 +88,7 @@ public void run() throws Exception { } @Test - public void testFailingGetClusterStatus() throws Exception { + public void testFailingGetClusterStatus() throws Exception { testTimedOutGetCall(new CallExecutor() { @Override public void run() throws Exception { @@ -123,7 +123,7 @@ public void testFailingCreateTable() throws Exception { @Override public void run() throws Exception { remoteAdmin - .createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf("TestTable")).build()); + .createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf("TestTable")).build()); } }); verify(client, times(RETRIES)).put(anyString(), anyString(), any()); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java index 55d6d904eedf..aab11eefc00e 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ /** * Test RemoteHTable retries. */ -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestRemoteHTableRetries { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -75,17 +75,14 @@ public void setup() throws Exception { Response response = new Response(509); when(client.get(anyString(), anyString())).thenReturn(response); when(client.delete(anyString())).thenReturn(response); - when(client.put(anyString(), anyString(), any())).thenReturn( - response); - when(client.post(anyString(), anyString(), any())).thenReturn( - response); + when(client.put(anyString(), anyString(), any())).thenReturn(response); + when(client.post(anyString(), anyString(), any())).thenReturn(response); Configuration configuration = TEST_UTIL.getConfiguration(); configuration.setInt("hbase.rest.client.max.retries", RETRIES); configuration.setInt("hbase.rest.client.sleep", SLEEP_TIME); - remoteTable = new RemoteHTable(client, TEST_UTIL.getConfiguration(), - "MyTable"); + remoteTable = new RemoteHTable(client, TEST_UTIL.getConfiguration(), "MyTable"); } @After @@ -156,8 +153,8 @@ public void testCheckAndPut() throws Exception { public void run() throws Exception { Put put = new Put(ROW_1); put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); - remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenPut(put); + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenPut(put); } }); verify(client, times(RETRIES)).put(anyString(), anyString(), any()); @@ -170,9 +167,9 @@ public void testCheckAndDelete() throws Exception { public void run() throws Exception { Put put = new Put(ROW_1); put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); - Delete delete= new Delete(ROW_1); - remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenDelete(delete); + Delete delete = new Delete(ROW_1); + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenDelete(delete); } }); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java index d37c8113fbf8..5466d8ba9762 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, MediumTests.class}) +@Category({ RestTests.class, MediumTests.class }) public class TestRemoteTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -72,7 +72,7 @@ public class TestRemoteTable { private static final String INVALID_URL_CHARS_1 = "|\"\\^{}\u0001\u0002\u0003\u0004\u0005\u0006\u0007\u0008\u0009\u000B\u000C"; - // HColumnDescriptor prevents certain characters in column names. The following + // HColumnDescriptor prevents certain characters in column names. The following // are examples of characters are allowed in column names but are not valid in // URLs. private static final String INVALID_URL_CHARS_2 = "|^{}\u0242"; @@ -86,7 +86,7 @@ public class TestRemoteTable { private static final byte[] ROW_1 = Bytes.toBytes("testrow1" + INVALID_URL_CHARS_1); private static final byte[] ROW_2 = Bytes.toBytes("testrow2" + INVALID_URL_CHARS_1); private static final byte[] ROW_3 = Bytes.toBytes("testrow3" + INVALID_URL_CHARS_1); - private static final byte[] ROW_4 = Bytes.toBytes("testrow4"+ INVALID_URL_CHARS_1); + private static final byte[] ROW_4 = Bytes.toBytes("testrow4" + INVALID_URL_CHARS_1); private static final byte[] COLUMN_1 = Bytes.toBytes("a" + INVALID_URL_CHARS_2); private static final byte[] COLUMN_2 = Bytes.toBytes("b" + INVALID_URL_CHARS_2); @@ -102,8 +102,7 @@ public class TestRemoteTable { private static final long TS_1 = TS_2 - ONE_HOUR; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final HBaseRESTTestingUtility REST_TEST_UTIL = - new HBaseRESTTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private RemoteHTable remoteTable; @BeforeClass @@ -113,7 +112,7 @@ public static void setUpBeforeClass() throws Exception { } @Before - public void before() throws Exception { + public void before() throws Exception { Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(TABLE)) { if (admin.isTableEnabled(TABLE)) { @@ -124,10 +123,13 @@ public void before() throws Exception { } TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN_1).setMaxVersions(3).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN_2).setMaxVersions(3).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN_3).setMaxVersions(3).build()) - .build(); + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(COLUMN_1).setMaxVersions(3).build()) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(COLUMN_2).setMaxVersions(3).build()) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(COLUMN_3).setMaxVersions(3).build()) + .build(); admin.createTable(tableDescriptor); try (Table table = TEST_UTIL.getConnection().getTable(TABLE)) { Put put = new Put(ROW_1); @@ -140,8 +142,7 @@ public void before() throws Exception { table.put(put); } remoteTable = new RemoteHTable( - new Client(new Cluster().add("localhost", - REST_TEST_UTIL.getServletPort())), + new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), TEST_UTIL.getConfiguration(), TABLE.toBytes()); } @@ -251,7 +252,7 @@ public void testGet() throws IOException { get.readVersions(2); result = remoteTable.get(get); int count = 0; - for (Cell kv: result.listCells()) { + for (Cell kv : result.listCells()) { if (CellUtil.matchingFamily(kv, COLUMN_1) && TS_1 == kv.getTimestamp()) { assertTrue(CellUtil.matchingValue(kv, VALUE_1)); // @TS_1 count++; @@ -275,7 +276,7 @@ public void testMultiGet() throws Exception { assertEquals(1, results[0].size()); assertEquals(2, results[1].size()); - //Test Versions + // Test Versions gets = new ArrayList<>(2); Get g = new Get(ROW_1); g.readVersions(3); @@ -287,7 +288,7 @@ public void testMultiGet() throws Exception { assertEquals(1, results[0].size()); assertEquals(3, results[1].size()); - //404 + // 404 gets = new ArrayList<>(1); gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE"))); results = remoteTable.get(gets); @@ -345,7 +346,7 @@ public void testPut() throws IOException { assertTrue(Bytes.equals(VALUE_2, value)); assertTrue(Bytes.equals(Bytes.toBytes("TestRemoteTable" + VALID_TABLE_NAME_CHARS), - remoteTable.getTableName())); + remoteTable.getTableName())); } @Test @@ -481,7 +482,7 @@ public void testScanner() throws IOException { scanner.close(); - scanner = remoteTable.getScanner(COLUMN_1,QUALIFIER_1); + scanner = remoteTable.getScanner(COLUMN_1, QUALIFIER_1); results = scanner.next(4); assertNotNull(results); assertEquals(4, results.length); @@ -506,18 +507,18 @@ public void testCheckAndDelete() throws IOException { assertEquals(1, remoteTable.exists(Collections.singletonList(get)).length); Delete delete = new Delete(ROW_1); - remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenDelete(delete); + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenDelete(delete); assertFalse(remoteTable.exists(get)); Put put = new Put(ROW_1); put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); remoteTable.put(put); - assertTrue(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_1).thenPut(put)); - assertFalse(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) - .ifEquals(VALUE_2).thenPut(put)); + assertTrue(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_1) + .thenPut(put)); + assertFalse(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1).ifEquals(VALUE_2) + .thenPut(put)); } /** @@ -555,7 +556,7 @@ public void testIteratorScaner() throws IOException { * Test a some methods of class Response. */ @Test - public void testResponse(){ + public void testResponse() { Response response = new Response(200); assertEquals(200, response.getCode()); Header[] headers = new Header[2]; @@ -576,9 +577,8 @@ public void testResponse(){ } /** - * Tests scanner with limitation - * limit the number of rows each scanner scan fetch at life time - * The number of rows returned should be equal to the limit + * Tests scanner with limitation limit the number of rows each scanner scan fetch at life time The + * number of rows returned should be equal to the limit * @throws Exception */ @Test @@ -602,8 +602,8 @@ public void testLimitedScan() throws Exception { table.put(puts); } - remoteTable = - new RemoteHTable(new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), + remoteTable = new RemoteHTable( + new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), TEST_UTIL.getConfiguration(), TABLE.toBytes()); Scan scan = new Scan(); @@ -621,7 +621,6 @@ public void testLimitedScan() throws Exception { /** * Tests keeping a HBase scanner alive for long periods of time. Each call to next() should reset * the ConnectionCache timeout for the scanner's connection. - * * @throws Exception if starting the servlet container or disabling or truncating the table fails */ @Test diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java index 26190f66f472..b59ad9b52a8a 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,8 +66,7 @@ public void testParsingClusterVersion() throws Exception { @Test public void testFailOnExternalEntities() throws Exception { - final String externalEntitiesXml = - "" + final String externalEntitiesXml = "" + " ] >" + " &xee;"; Client client = mock(Client.class); @@ -80,9 +79,11 @@ public void testFailOnExternalEntities() throws Exception { admin.getClusterVersion(); fail("Expected getClusterVersion() to throw an exception"); } catch (IOException e) { - assertEquals("Cause of exception ought to be a failure to parse the stream due to our " + - "invalid external entity. Make sure this isn't just a false positive due to " + - "implementation. see HBASE-19020.", UnmarshalException.class, e.getCause().getClass()); + assertEquals( + "Cause of exception ought to be a failure to parse the stream due to our " + + "invalid external entity. Make sure this isn't just a false positive due to " + + "implementation. see HBASE-19020.", + UnmarshalException.class, e.getCause().getClass()); final String exceptionText = StringUtils.stringifyException(e); final String expectedText = "\"xee\""; LOG.debug("exception text: '" + exceptionText + "'", e); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java index b8305d56a180..e05f7626d68d 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestCellModel extends TestModelBase { @ClassRule @@ -45,14 +45,12 @@ public class TestCellModel extends TestModelBase { public TestCellModel() throws Exception { super(CellModel.class); - AS_XML = - "dGVzdHZhbHVl"; - AS_PB = - "Egp0ZXN0Y29sdW1uGOO6i+eeJCIJdGVzdHZhbHVl"; + AS_XML = "dGVzdHZhbHVl"; + AS_PB = "Egp0ZXN0Y29sdW1uGOO6i+eeJCIJdGVzdHZhbHVl"; AS_JSON = - "{\"column\":\"dGVzdGNvbHVtbg==\",\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVl\"}"; + "{\"column\":\"dGVzdGNvbHVtbg==\",\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVl\"}"; } @Override @@ -108,4 +106,3 @@ public void testToString() throws Exception { assertTrue(StringUtils.contains(cellModel.toString(), expectedColumn)); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java index 1d40effb47d0..acd857cbc9d5 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestCellSetModel extends TestModelBase { @ClassRule @@ -50,41 +50,32 @@ public class TestCellSetModel extends TestModelBase { public TestCellSetModel() throws Exception { super(CellSetModel.class); - AS_XML = - "" + - "" + - "" + - "dGVzdHZhbHVlMQ==" + - "" + - "" + - "" + - "dGVzdHZhbHVlMg==" + - "" + - "dGVzdHZhbHVlMw==" + - "" + - ""; - - AS_PB = - "CiwKCHRlc3Ryb3cxEiASC3Rlc3Rjb2x1bW4xGOO6i+eeJCIKdGVzdHZhbHVlMQpOCgh0ZXN0cm93" + - "MRIgEgt0ZXN0Y29sdW1uMhjHyc7wniQiCnRlc3R2YWx1ZTISIBILdGVzdGNvbHVtbjMYsOLnuZ8k" + - "Igp0ZXN0dmFsdWUz"; - - AS_XML = - "" + - "" + - "dGVzdHZhbHVlMQ==" + - "" + - "dGVzdHZhbHVlMg==" + - "dGVzdHZhbHVlMw==" + - ""; - - AS_JSON = - "{\"Row\":[{\"key\":\"dGVzdHJvdzE=\"," + - "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\",\"timestamp\":1245219839331," + - "\"$\":\"dGVzdHZhbHVlMQ==\"}]},{\"key\":\"dGVzdHJvdzE=\"," + - "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjI=\",\"timestamp\":1245239813319," + - "\"$\":\"dGVzdHZhbHVlMg==\"},{\"column\":\"dGVzdGNvbHVtbjM=\"," + - "\"timestamp\":1245393318192,\"$\":\"dGVzdHZhbHVlMw==\"}]}]}"; + AS_XML = "" + "" + + "" + + "dGVzdHZhbHVlMQ==" + "" + "" + + "" + + "dGVzdHZhbHVlMg==" + + "" + + "dGVzdHZhbHVlMw==" + "" + ""; + + AS_PB = "CiwKCHRlc3Ryb3cxEiASC3Rlc3Rjb2x1bW4xGOO6i+eeJCIKdGVzdHZhbHVlMQpOCgh0ZXN0cm93" + + "MRIgEgt0ZXN0Y29sdW1uMhjHyc7wniQiCnRlc3R2YWx1ZTISIBILdGVzdGNvbHVtbjMYsOLnuZ8k" + + "Igp0ZXN0dmFsdWUz"; + + AS_XML = "" + + "" + + "dGVzdHZhbHVlMQ==" + + "" + + "dGVzdHZhbHVlMg==" + + "dGVzdHZhbHVlMw==" + + ""; + + AS_JSON = "{\"Row\":[{\"key\":\"dGVzdHJvdzE=\"," + + "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\",\"timestamp\":1245219839331," + + "\"$\":\"dGVzdHZhbHVlMQ==\"}]},{\"key\":\"dGVzdHJvdzE=\"," + + "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjI=\",\"timestamp\":1245239813319," + + "\"$\":\"dGVzdHZhbHVlMg==\"},{\"column\":\"dGVzdGNvbHVtbjM=\"," + + "\"timestamp\":1245393318192,\"$\":\"dGVzdHZhbHVlMw==\"}]}]}"; } @Override @@ -147,4 +138,3 @@ public void testFromPB() throws Exception { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java index a52358cbe525..1df9dd12ac7f 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestColumnSchemaModel extends TestModelBase { @ClassRule @@ -45,15 +45,13 @@ public class TestColumnSchemaModel extends TestModelBase { public TestColumnSchemaModel() throws Exception { super(ColumnSchemaModel.class); - AS_XML = - ""; + AS_XML = ""; - AS_JSON = - "{\"name\":\"testcolumn\",\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\"," + - "\"BLOCKCACHE\":\"true\",\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\"," + - "\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}"; + AS_JSON = "{\"name\":\"testcolumn\",\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\"," + + "\"BLOCKCACHE\":\"true\",\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\"," + + "\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}"; } @Override @@ -87,4 +85,3 @@ protected void checkModel(ColumnSchemaModel model) { public void testFromPB() throws Exception { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java index 63124113da51..fac5e8b0c4c0 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,8 +52,7 @@ protected TestModelBase(Class clazz) throws Exception { super(); this.clazz = clazz; context = new JAXBContextResolver().getContext(clazz); - mapper = new JacksonJaxbJsonProvider().locateMapper(clazz, - MediaType.APPLICATION_JSON_TYPE); + mapper = new JacksonJaxbJsonProvider().locateMapper(clazz, MediaType.APPLICATION_JSON_TYPE); } protected abstract T buildTestModel(); @@ -68,19 +67,17 @@ protected String toXML(T model) throws JAXBException { protected String toJSON(T model) throws JAXBException, IOException { StringWriter writer = new StringWriter(); mapper.writeValue(writer, model); -// original marshaller, uncomment this and comment mapper to verify backward compatibility -// ((JSONJAXBContext)context).createJSONMarshaller().marshallToJSON(model, writer); + // original marshaller, uncomment this and comment mapper to verify backward compatibility + // ((JSONJAXBContext)context).createJSONMarshaller().marshallToJSON(model, writer); return writer.toString(); } public T fromJSON(String json) throws JAXBException, IOException { - return (T) - mapper.readValue(json, clazz); + return (T) mapper.readValue(json, clazz); } public T fromXML(String xml) throws JAXBException { - return (T) - context.createUnmarshaller().unmarshal(new StringReader(xml)); + return (T) context.createUnmarshaller().unmarshal(new StringReader(xml)); } @SuppressWarnings("unused") @@ -88,14 +85,12 @@ protected byte[] toPB(ProtobufMessageHandler model) { return model.createProtobufOutput(); } - protected T fromPB(String pb) throws - Exception { - return (T)clazz.getMethod("getObjectFromMessage", byte[].class).invoke( - clazz.getDeclaredConstructor().newInstance(), - Base64.getDecoder().decode(AS_PB)); + protected T fromPB(String pb) throws Exception { + return (T) clazz.getMethod("getObjectFromMessage", byte[].class) + .invoke(clazz.getDeclaredConstructor().newInstance(), Base64.getDecoder().decode(AS_PB)); } - protected abstract void checkModel(T model); + protected abstract void checkModel(T model); @Test public void testBuildModel() throws Exception { @@ -124,7 +119,7 @@ public void testToJSON() throws Exception { ObjectNode expObj = mapper.readValue(AS_JSON, ObjectNode.class); ObjectNode actObj = mapper.readValue(toJSON(buildTestModel()), ObjectNode.class); assertEquals(expObj, actObj); - } catch(Exception e) { + } catch (Exception e) { assertEquals(AS_JSON, toJSON(buildTestModel())); } } @@ -134,4 +129,3 @@ public void testFromJSON() throws Exception { checkModel(fromJSON(AS_JSON)); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java index e7a9188b5e35..bfe9005406b5 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,34 +29,33 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestNamespacesInstanceModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestNamespacesInstanceModel.class); - public static final Map NAMESPACE_PROPERTIES = new HashMap<>(); + public static final Map NAMESPACE_PROPERTIES = new HashMap<>(); public static final String NAMESPACE_NAME = "namespaceName"; public TestNamespacesInstanceModel() throws Exception { super(NamespacesInstanceModel.class); - NAMESPACE_PROPERTIES.put("KEY_1","VALUE_1"); - NAMESPACE_PROPERTIES.put("KEY_2","VALUE_2"); - NAMESPACE_PROPERTIES.put("NAME","testNamespace"); + NAMESPACE_PROPERTIES.put("KEY_1", "VALUE_1"); + NAMESPACE_PROPERTIES.put("KEY_2", "VALUE_2"); + NAMESPACE_PROPERTIES.put("NAME", "testNamespace"); - AS_XML = - "" + - "NAMEtestNamespace" + - "KEY_2VALUE_2" + - "KEY_1VALUE_1" + - ""; + AS_XML = "" + + "NAMEtestNamespace" + + "KEY_2VALUE_2" + + "KEY_1VALUE_1" + + ""; AS_PB = "ChUKBE5BTUUSDXRlc3ROYW1lc3BhY2UKEAoFS0VZXzESB1ZBTFVFXzEKEAoFS0VZXzISB1ZBTFVFXzI="; - AS_JSON = "{\"properties\":{\"NAME\":\"testNamespace\"," + - "\"KEY_1\":\"VALUE_1\",\"KEY_2\":\"VALUE_2\"}}"; + AS_JSON = "{\"properties\":{\"NAME\":\"testNamespace\"," + + "\"KEY_1\":\"VALUE_1\",\"KEY_2\":\"VALUE_2\"}}"; } @Override @@ -64,9 +63,9 @@ protected NamespacesInstanceModel buildTestModel() { return buildTestModel(NAMESPACE_NAME, NAMESPACE_PROPERTIES); } - public NamespacesInstanceModel buildTestModel(String namespace, Map properties) { + public NamespacesInstanceModel buildTestModel(String namespace, Map properties) { NamespacesInstanceModel model = new NamespacesInstanceModel(); - for(String key: properties.keySet()){ + for (String key : properties.keySet()) { model.addProperty(key, properties.get(key)); } return model; @@ -78,12 +77,12 @@ protected void checkModel(NamespacesInstanceModel model) { } public void checkModel(NamespacesInstanceModel model, String namespace, - Map properties) { - Map modProperties = model.getProperties(); + Map properties) { + Map modProperties = model.getProperties(); assertEquals(properties.size(), modProperties.size()); // Namespace name comes from REST URI, not properties. assertNotSame(namespace, model.getNamespaceName()); - for(String property: properties.keySet()){ + for (String property : properties.keySet()) { assertEquals(properties.get(property), modProperties.get(property)); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java index 5da776ab7350..60666ca8d613 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestNamespacesModel extends TestModelBase { @ClassRule @@ -42,10 +42,9 @@ public class TestNamespacesModel extends TestModelBase { public TestNamespacesModel() throws Exception { super(NamespacesModel.class); - AS_XML = - "" + - "testNamespace1" + - "testNamespace2"; + AS_XML = "" + + "testNamespace1" + + "testNamespace2"; AS_PB = "Cg50ZXN0TmFtZXNwYWNlMQoOdGVzdE5hbWVzcGFjZTI="; @@ -71,7 +70,7 @@ protected void checkModel(NamespacesModel model) { public void checkModel(NamespacesModel model, String... namespaceName) { List namespaces = model.getNamespaces(); assertEquals(namespaceName.length, namespaces.size()); - for(int i = 0; i < namespaceName.length; i++){ + for (int i = 0; i < namespaceName.length; i++) { assertTrue(namespaces.contains(namespaceName[i])); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java index 99f8e3df447e..c8b99932264e 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestRowModel extends TestModelBase { @ClassRule @@ -48,12 +48,11 @@ public class TestRowModel extends TestModelBase { public TestRowModel() throws Exception { super(RowModel.class); AS_XML = - "" + - "dGVzdHZhbHVlMQ=="; + "" + + "dGVzdHZhbHVlMQ=="; - AS_JSON = - "{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," + - "\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVlMQ==\"}]}"; + AS_JSON = "{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," + + "\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVlMQ==\"}]}"; } @Override @@ -78,7 +77,7 @@ protected void checkModel(RowModel model) { @Override public void testFromPB() throws Exception { - //do nothing row model has no PB + // do nothing row model has no PB } @Test @@ -102,4 +101,3 @@ public void testToString() throws Exception { assertTrue(StringUtils.contains(rowModel.toString(), expectedRowKey)); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java index 4835b7b0fc93..9d5f61344f0d 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.databind.JsonMappingException; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.rest.ScannerResultGenerator; import org.apache.hadoop.hbase.testclassification.RestTests; @@ -32,7 +31,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestScannerModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -55,19 +54,18 @@ public TestScannerModel() throws Exception { super(ScannerModel.class); AS_XML = "" - + "" - + "Y29sdW1uMQ== Y29sdW1uMjpmb28=" - + "private public"; + + "" + + "Y29sdW1uMQ== Y29sdW1uMjpmb28=" + + "private public"; AS_JSON = "{\"batch\":100,\"caching\":1000,\"cacheBlocks\":false,\"endRow\":\"enp5eng=\"," - + "\"endTime\":1245393318192,\"maxVersions\":2147483647,\"startRow\":\"YWJyYWNhZGFicmE=\"," - + "\"startTime\":1245219839331,\"column\":[\"Y29sdW1uMQ==\",\"Y29sdW1uMjpmb28=\"]," - +"\"labels\":[\"private\",\"public\"]," - +"\"limit\":10000}"; + + "\"endTime\":1245393318192,\"maxVersions\":2147483647,\"startRow\":\"YWJyYWNhZGFicmE=\"," + + "\"startTime\":1245219839331,\"column\":[\"Y29sdW1uMQ==\",\"Y29sdW1uMjpmb28=\"]," + + "\"labels\":[\"private\",\"public\"]," + "\"limit\":10000}"; AS_PB = "CgthYnJhY2FkYWJyYRIFenp5engaB2NvbHVtbjEaC2NvbHVtbjI6Zm9vIGQo47qL554kMLDi57mfJDj" - +"/////B0joB1IHcHJpdmF0ZVIGcHVibGljWABgkE4="; + + "/////B0joB1IHcHJpdmF0ZVIGcHVibGljWABgkE4="; } @Override diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java index 2611a067437a..4f61d89046bc 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestStorageClusterStatusModel extends TestModelBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -39,49 +39,45 @@ public class TestStorageClusterStatusModel extends TestModelBase" + - "" + - "" + - "" + - "" + - "" + - ""; + AS_XML = "" + + "" + + "" + + "" + + "" + "" + + ""; - AS_PB = - "Cj8KBXRlc3QxEOO6i+eeJBgAIIABKIAIMicKDWhiYXNlOnJvb3QsLDAQARgBIAAoADAAOAFAAkgB" + - "UAFYAWABaAEKSwoFdGVzdDIQ/pKx8J4kGAAggAQogAgyMwoZaGJhc2U6bWV0YSwsMTI0NjAwMDA0" + - "MzcyNBABGAEgACgAMAA4AUACSAFQAVgBYAFoARgCIAApAAAAAAAA8D8="; + AS_PB = "Cj8KBXRlc3QxEOO6i+eeJBgAIIABKIAIMicKDWhiYXNlOnJvb3QsLDAQARgBIAAoADAAOAFAAkgB" + + "UAFYAWABaAEKSwoFdGVzdDIQ/pKx8J4kGAAggAQogAgyMwoZaGJhc2U6bWV0YSwsMTI0NjAwMDA0" + + "MzcyNBABGAEgACgAMAA4AUACSAFQAVgBYAFoARgCIAApAAAAAAAA8D8="; - - //Using jackson will break json backward compatibilty for this representation - //but the original one was broken as it would only print one Node element - //so the format itself was broken + // Using jackson will break json backward compatibilty for this representation + // but the original one was broken as it would only print one Node element + // so the format itself was broken AS_JSON = - "{\"regions\":2,\"requests\":0,\"averageLoad\":1.0,\"LiveNodes\":[{\"name\":\"test1\"," + - "\"Region\":[{\"name\":\"aGJhc2U6cm9vdCwsMA==\",\"stores\":1,\"storefiles\":1," + - "\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," + - "\"readRequestsCount\":1,\"cpRequestsCount\":1,\"writeRequestsCount\":2," + - "\"rootIndexSizeKB\":1,\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1," + - "\"totalCompactingKVs\":1,\"currentCompactedKVs\":1}],\"requests\":0," + - "\"startCode\":1245219839331,\"heapSizeMB\":128,\"maxHeapSizeMB\":1024}," + - "{\"name\":\"test2\",\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\"," + - "\"stores\":1,\"storefiles\":1,\"storefileSizeMB\":0,\"memStoreSizeMB\":0," + - "\"storefileIndexSizeKB\":0,\"readRequestsCount\":1,\"cpRequestsCount\":1," + - "\"writeRequestsCount\":2,\"rootIndexSizeKB\":1,\"totalStaticIndexSizeKB\":1," + - "\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1,\"currentCompactedKVs\":1}]," + - "\"requests\":0,\"startCode\":1245239331198,\"heapSizeMB\":512," + - "\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}"; + "{\"regions\":2,\"requests\":0,\"averageLoad\":1.0,\"LiveNodes\":[{\"name\":\"test1\"," + + "\"Region\":[{\"name\":\"aGJhc2U6cm9vdCwsMA==\",\"stores\":1,\"storefiles\":1," + + "\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," + + "\"readRequestsCount\":1,\"cpRequestsCount\":1,\"writeRequestsCount\":2," + + "\"rootIndexSizeKB\":1,\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1," + + "\"totalCompactingKVs\":1,\"currentCompactedKVs\":1}],\"requests\":0," + + "\"startCode\":1245219839331,\"heapSizeMB\":128,\"maxHeapSizeMB\":1024}," + + "{\"name\":\"test2\",\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\"," + + "\"stores\":1,\"storefiles\":1,\"storefileSizeMB\":0,\"memStoreSizeMB\":0," + + "\"storefileIndexSizeKB\":0,\"readRequestsCount\":1,\"cpRequestsCount\":1," + + "\"writeRequestsCount\":2,\"rootIndexSizeKB\":1,\"totalStaticIndexSizeKB\":1," + + "\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1,\"currentCompactedKVs\":1}]," + + "\"requests\":0,\"startCode\":1245239331198,\"heapSizeMB\":512," + + "\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}"; } @Override @@ -90,11 +86,11 @@ protected StorageClusterStatusModel buildTestModel() { model.setRegions(2); model.setRequests(0); model.setAverageLoad(1.0); - model.addLiveNode("test1", 1245219839331L, 128, 1024) - .addRegion(Bytes.toBytes("hbase:root,,0"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, 1, 1); - model.addLiveNode("test2", 1245239331198L, 512, 1024) - .addRegion(Bytes.toBytes(TableName.META_TABLE_NAME+",,1246000043724"),1, 1, 0, 0, 0, - 1, 1, 2, 1, 1, 1, 1, 1); + model.addLiveNode("test1", 1245219839331L, 128, 1024).addRegion(Bytes.toBytes("hbase:root,,0"), + 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, 1, 1); + model.addLiveNode("test2", 1245239331198L, 512, 1024).addRegion( + Bytes.toBytes(TableName.META_TABLE_NAME + ",,1246000043724"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, + 1, 1); return model; } @@ -103,18 +99,15 @@ protected void checkModel(StorageClusterStatusModel model) { assertEquals(2, model.getRegions()); assertEquals(0, model.getRequests()); assertEquals(1.0, model.getAverageLoad(), 0.0); - Iterator nodes = - model.getLiveNodes().iterator(); + Iterator nodes = model.getLiveNodes().iterator(); StorageClusterStatusModel.Node node = nodes.next(); assertEquals("test1", node.getName()); assertEquals(1245219839331L, node.getStartCode()); assertEquals(128, node.getHeapSizeMB()); assertEquals(1024, node.getMaxHeapSizeMB()); - Iterator regions = - node.getRegions().iterator(); + Iterator regions = node.getRegions().iterator(); StorageClusterStatusModel.Node.Region region = regions.next(); - assertTrue(Bytes.toString(region.getName()).equals( - "hbase:root,,0")); + assertTrue(Bytes.toString(region.getName()).equals("hbase:root,,0")); assertEquals(1, region.getStores()); assertEquals(1, region.getStorefiles()); assertEquals(0, region.getStorefileSizeMB()); @@ -135,8 +128,7 @@ protected void checkModel(StorageClusterStatusModel model) { assertEquals(1024, node.getMaxHeapSizeMB()); regions = node.getRegions().iterator(); region = regions.next(); - assertEquals(Bytes.toString(region.getName()), - TableName.META_TABLE_NAME+",,1246000043724"); + assertEquals(Bytes.toString(region.getName()), TableName.META_TABLE_NAME + ",,1246000043724"); assertEquals(1, region.getStores()); assertEquals(1, region.getStorefiles()); assertEquals(0, region.getStorefileSizeMB()); @@ -154,4 +146,3 @@ protected void checkModel(StorageClusterStatusModel model) { assertFalse(nodes.hasNext()); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java index b6101462aa09..19620298d9be 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestStorageClusterVersionModel extends TestModelBase { @ClassRule @@ -36,9 +36,8 @@ public class TestStorageClusterVersionModel extends TestModelBase"+ - ""; + AS_XML = "" + + ""; AS_JSON = "{\"Version\": \"0.0.1-testing\"}"; } @@ -57,7 +56,6 @@ protected void checkModel(StorageClusterVersionModel model) { @Override public void testFromPB() throws Exception { - //ignore test no pb + // ignore test no pb } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java index 2ada01c58877..a00608480e63 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestTableInfoModel extends TestModelBase { @ClassRule @@ -44,22 +44,19 @@ public class TestTableInfoModel extends TestModelBase { public TestTableInfoModel() throws Exception { super(TableInfoModel.class); - AS_XML = - ""; + AS_XML = ""; - AS_PB = - "Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" + - "YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY="; + AS_PB = "Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" + + "YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY="; - AS_JSON = - "{\"name\":\"testtable\",\"Region\":[{\"endKey\":\"enp5eng=\",\"id\":8731042424," + - "\"location\":\"testhost:9876\",\"" + - "name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + - "startKey\":\"YWJyYWNhZGJyYQ==\"}]}"; + AS_JSON = "{\"name\":\"testtable\",\"Region\":[{\"endKey\":\"enp5eng=\",\"id\":8731042424," + + "\"location\":\"testhost:9876\",\"" + + "name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + + "startKey\":\"YWJyYWNhZGJyYQ==\"}]}"; } @Override @@ -98,4 +95,3 @@ public void testFromPB() throws Exception { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java index eca14978c909..9f640b8475a3 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestTableListModel extends TestModelBase { @ClassRule @@ -40,14 +40,12 @@ public class TestTableListModel extends TestModelBase { public TestTableListModel() throws Exception { super(TableListModel.class); - AS_XML = - "
      "; + AS_XML = "
      "; AS_PB = "CgZ0YWJsZTEKBnRhYmxlMgoGdGFibGUz"; - AS_JSON = - "{\"table\":[{\"name\":\"table1\"},{\"name\":\"table2\"},{\"name\":\"table3\"}]}"; + AS_JSON = "{\"table\":[{\"name\":\"table1\"},{\"name\":\"table2\"},{\"name\":\"table3\"}]}"; } @Override @@ -71,4 +69,3 @@ protected void checkModel(TableListModel model) { assertFalse(tables.hasNext()); } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java index 20577cfc536f..4c5767fc128d 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestTableRegionModel extends TestModelBase { @ClassRule @@ -48,21 +48,19 @@ public TestTableRegionModel() throws Exception { super(TableRegionModel.class); AS_XML = - ""; + ""; - AS_JSON = - "{\"endKey\":\"enp5eng=\",\"id\":8731042424,\"location\":\"testhost:9876\"," + - "\"name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + - "startKey\":\"YWJyYWNhZGJyYQ==\"}"; + AS_JSON = "{\"endKey\":\"enp5eng=\",\"id\":8731042424,\"location\":\"testhost:9876\"," + + "\"name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + + "startKey\":\"YWJyYWNhZGJyYQ==\"}"; } @Override protected TableRegionModel buildTestModel() { - TableRegionModel model = - new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); + TableRegionModel model = new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); return model; } @@ -72,9 +70,8 @@ protected void checkModel(TableRegionModel model) { assertTrue(Bytes.equals(model.getEndKey(), END_KEY)); assertEquals(ID, model.getId()); assertEquals(LOCATION, model.getLocation()); - assertEquals(model.getName(), - TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) + - ".ad9860f031282c46ed431d7af8f94aca."); + assertEquals(model.getName(), TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) + + ".ad9860f031282c46ed431d7af8f94aca."); } @Test @@ -82,7 +79,7 @@ public void testGetName() { TableRegionModel model = buildTestModel(); String modelName = model.getName(); RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(TABLE)).setStartKey(START_KEY) - .setEndKey(END_KEY).setRegionId(ID).build(); + .setEndKey(END_KEY).setRegionId(ID).build(); assertEquals(modelName, hri.getRegionNameAsString()); } @@ -96,7 +93,6 @@ public void testSetName() { @Override public void testFromPB() throws Exception { - //no pb ignore + // no pb ignore } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java index 6b50ab700489..63ccc7f78a37 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestTableSchemaModel extends TestModelBase { @ClassRule @@ -51,24 +51,21 @@ public TestTableSchemaModel() throws Exception { super(TableSchemaModel.class); testColumnSchemaModel = new TestColumnSchemaModel(); - AS_XML = - "" + - "" + - "" + - ""; - - AS_PB = - "Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" + - "TFkSBWZhbHNlGpcBCgp0ZXN0Y29sdW1uEhIKCUJMT0NLU0laRRIFMTYzODQSEwoLQkxPT01GSUxU" + - "RVISBE5PTkUSEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01QUkVTU0lPThICR1oSDQoIVkVSU0lP" + - "TlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZhbHNlGICjBSABKgJHWigA"; - - AS_JSON = - "{\"name\":\"testTable\",\"IS_META\":\"false\",\"IS_ROOT\":\"false\"," + - "\"READONLY\":\"false\",\"ColumnSchema\":[{\"name\":\"testcolumn\"," + - "\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\",\"BLOCKCACHE\":\"true\"," + - "\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\",\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}]}"; + AS_XML = "" + + "" + + "" + + ""; + + AS_PB = "Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" + + "TFkSBWZhbHNlGpcBCgp0ZXN0Y29sdW1uEhIKCUJMT0NLU0laRRIFMTYzODQSEwoLQkxPT01GSUxU" + + "RVISBE5PTkUSEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01QUkVTU0lPThICR1oSDQoIVkVSU0lP" + + "TlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZhbHNlGICjBSABKgJHWigA"; + + AS_JSON = "{\"name\":\"testTable\",\"IS_META\":\"false\",\"IS_ROOT\":\"false\"," + + "\"READONLY\":\"false\",\"ColumnSchema\":[{\"name\":\"testcolumn\"," + + "\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\",\"BLOCKCACHE\":\"true\"," + + "\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\",\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}]}"; } @Override @@ -122,4 +119,3 @@ public void testFromPB() throws Exception { } } - diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java index b35295059cfa..4da8f24db068 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RestTests.class, SmallTests.class}) +@Category({ RestTests.class, SmallTests.class }) public class TestVersionModel extends TestModelBase { @ClassRule @@ -33,28 +33,23 @@ public class TestVersionModel extends TestModelBase { HBaseClassTestRule.forClass(TestVersionModel.class); private static final String REST_VERSION = "0.0.1"; - private static final String OS_VERSION = - "Linux 2.6.18-128.1.6.el5.centos.plusxen amd64"; - private static final String JVM_VERSION = - "Sun Microsystems Inc. 1.6.0_13-11.3-b02"; + private static final String OS_VERSION = "Linux 2.6.18-128.1.6.el5.centos.plusxen amd64"; + private static final String JVM_VERSION = "Sun Microsystems Inc. 1.6.0_13-11.3-b02"; private static final String JETTY_VERSION = "6.1.14"; private static final String JERSEY_VERSION = "1.1.0-ea"; public TestVersionModel() throws Exception { super(VersionModel.class); - AS_XML = - ""; + AS_XML = ""; - AS_PB = - "CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" + - "LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE="; + AS_PB = "CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" + + "LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE="; - AS_JSON = - "{\"JVM\":\"Sun Microsystems Inc. 1.6.0_13-11.3-b02\",\"Jersey\":\"1.1.0-ea\"," + - "\"OS\":\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\",\"" + - "REST\":\"0.0.1\",\"Server\":\"6.1.14\"}"; + AS_JSON = "{\"JVM\":\"Sun Microsystems Inc. 1.6.0_13-11.3-b02\",\"Jersey\":\"1.1.0-ea\"," + + "\"OS\":\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\",\"" + + "REST\":\"0.0.1\",\"Server\":\"6.1.14\"}"; } @Override @@ -77,4 +72,3 @@ protected void checkModel(VersionModel model) { assertEquals(JERSEY_VERSION, model.getJerseyVersion()); } } - diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index e2f16df0809e..fbb7463cda75 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -36,203 +36,6 @@ true true - - - - - - ${project.build.directory} - - hbase-webapps/** - - - - src/main/resources - - **/** - - - - - - - src/test/resources/META-INF/ - META-INF/ - - NOTICE - - true - - - src/test/resources - - **/** - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - 2048 - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - - org.apache.hbase - hbase-resource-bundle - ${project.version} - - - - - default - - false - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-jar-plugin - - - - org/apache/jute/** - org/apache/zookeeper/** - **/*.jsp - hbase-site.xml - hdfs-site.xml - log4j.properties - mapred-queues.xml - mapred-site.xml - - - - - - maven-antrun-plugin - - - - generate - generate-sources - - - - - - - - - - - - - - - - - - - - - - - run - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - jspcSource-packageInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-jamon - ${project.build.directory}/generated-sources/java - - - - - - - org.jamon - jamon-maven-plugin - - - generate-sources - - translate - - - src/main/jamon - target/generated-jamon - - - - - - - maven-surefire-plugin - - - target/test-classes/webapps - - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.hbase.thirdparty @@ -304,8 +107,8 @@ hbase-balancer - hbase-balancer org.apache.hbase + hbase-balancer test-jar test @@ -372,7 +175,7 @@ org.glassfish.web javax.servlet.jsp - + javax.servlet.jsp javax.servlet.jsp-api @@ -537,6 +340,203 @@ test + + + + + + ${project.build.directory} + + hbase-webapps/** + + + + src/main/resources + + **/** + + + + + + + META-INF/ + true + src/test/resources/META-INF/ + + NOTICE + + + + src/test/resources + + **/** + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + 2048 + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + + org.apache.hbase + hbase-resource-bundle + ${project.version} + + + + + default + + false + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + + org/apache/jute/** + org/apache/zookeeper/** + **/*.jsp + hbase-site.xml + hdfs-site.xml + log4j.properties + mapred-queues.xml + mapred-site.xml + + + + + + maven-antrun-plugin + + + + generate + + run + + generate-sources + + + + + + + + + + + + + + + + + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + jspcSource-packageInfo-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-jamon + ${project.build.directory}/generated-sources/java + + + + + + + org.jamon + jamon-maven-plugin + + + + translate + + generate-sources + + src/main/jamon + target/generated-jamon + + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + net.revelc.code + warbucks-maven-plugin + + + @@ -549,10 +549,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -600,15 +600,17 @@ make + + run + compile - run - + - + @@ -626,7 +628,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -705,10 +709,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources @@ -777,10 +755,10 @@ - - false - true - + + false + true + @@ -809,7 +787,7 @@ - + @@ -818,6 +796,31 @@ + + + org.apache.maven.plugins + maven-eclipse-plugin + + + org.jamon.project.jamonnature + + + org.jamon.project.templateBuilder + org.eclipse.jdt.core.javabuilder + org.jamon.project.markerUpdater + + + + .settings/org.jamon.prefs + # now + eclipse.preferences.version=1 + templateSourceDir=src/main/jamon + templateOutputDir=target/generated-jamon + + + + + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java index 1f351c52da29..52bca682c812 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,14 +22,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Implementations of this interface will keep and return to clients - * implementations of classes providing API to execute - * coordinated operations. This interface is client-side, so it does NOT - * include methods to retrieve the particular interface implementations. - * - * For each coarse-grained area of operations there will be a separate - * interface with implementation, providing API for relevant operations - * requiring coordination. + * Implementations of this interface will keep and return to clients implementations of classes + * providing API to execute coordinated operations. This interface is client-side, so it does NOT + * include methods to retrieve the particular interface implementations. For each coarse-grained + * area of operations there will be a separate interface with implementation, providing API for + * relevant operations requiring coordination. */ @InterfaceAudience.Private public interface CoordinatedStateManager { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java index 4b4aef30bbc5..7e656260bdbe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ExecutorStatusChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,8 +31,8 @@ import org.slf4j.LoggerFactory; /** - * The Class ExecutorStatusChore for collect Executor status info periodically - * and report to metrics system + * The Class ExecutorStatusChore for collect Executor status info periodically and report to metrics + * system */ @InterfaceAudience.Private public class ExecutorStatusChore extends ScheduledChore { @@ -52,7 +52,7 @@ public ExecutorStatusChore(int sleepTime, Stoppable stopper, ExecutorService ser @Override protected void chore() { - try{ + try { // thread pool monitor Map statuses = service.getAllExecutorStatuses(); for (Map.Entry statusEntry : statuses.entrySet()) { @@ -71,7 +71,7 @@ protected void chore() { queued.set(queueSize); running.set(runningSize); } - } catch(Throwable e) { + } catch (Throwable e) { LOG.error(e.getMessage(), e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java index b5329b136293..d7395d1cc617 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -92,8 +92,8 @@ */ @InterfaceAudience.Private public abstract class HBaseRpcServicesBase> - implements ClientMetaService.BlockingInterface, AdminService.BlockingInterface, - HBaseRPCErrorHandler, PriorityFunction, ConfigurationObserver { + implements ClientMetaService.BlockingInterface, AdminService.BlockingInterface, + HBaseRPCErrorHandler, PriorityFunction, ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(HBaseRpcServicesBase.class); @@ -120,9 +120,9 @@ protected HBaseRpcServicesBase(S server, String processName) throws IOException final RpcSchedulerFactory rpcSchedulerFactory; try { rpcSchedulerFactory = getRpcSchedulerFactoryClass(conf).asSubclass(RpcSchedulerFactory.class) - .getDeclaredConstructor().newInstance(); + .getDeclaredConstructor().newInstance(); } catch (NoSuchMethodException | InvocationTargetException | InstantiationException - | IllegalAccessException e) { + | IllegalAccessException e) { throw new IllegalArgumentException(e); } String hostname = DNS.getHostname(conf, getDNSServerType()); @@ -136,20 +136,20 @@ protected HBaseRpcServicesBase(S server, String processName) throws IOException priority = createPriority(); // Using Address means we don't get the IP too. Shorten it more even to just the host name // w/o the domain. - final String name = processName + "/" + - Address.fromParts(initialIsa.getHostName(), initialIsa.getPort()).toStringWithoutDomain(); + final String name = processName + "/" + + Address.fromParts(initialIsa.getHostName(), initialIsa.getPort()).toStringWithoutDomain(); server.setName(name); // Set how many times to retry talking to another server over Connection. ConnectionUtils.setServerSideHConnectionRetriesConfig(conf, name, LOG); boolean reservoirEnabled = - conf.getBoolean(ByteBuffAllocator.ALLOCATOR_POOL_ENABLED_KEY, defaultReservoirEnabled()); + conf.getBoolean(ByteBuffAllocator.ALLOCATOR_POOL_ENABLED_KEY, defaultReservoirEnabled()); try { // use final bindAddress for this server. rpcServer = RpcServerFactory.createRpcServer(server, name, getServices(), bindAddress, conf, rpcSchedulerFactory.create(conf, this, server), reservoirEnabled); } catch (BindException be) { - throw new IOException(be.getMessage() + ". To switch ports use the '" + getPortConfigName() + - "' configuration property.", be.getCause() != null ? be.getCause() : be); + throw new IOException(be.getMessage() + ". To switch ports use the '" + getPortConfigName() + + "' configuration property.", be.getCause() != null ? be.getCause() : be); } final InetSocketAddress address = rpcServer.getListenerAddress(); if (address == null) { @@ -183,7 +183,7 @@ protected final void internalStart(ZKWatcher zkWatcher) { accessChecker = new NoopAccessChecker(getConfiguration()); } zkPermissionWatcher = - new ZKPermissionWatcher(zkWatcher, accessChecker.getAuthManager(), getConfiguration()); + new ZKPermissionWatcher(zkWatcher, accessChecker.getAuthManager(), getConfiguration()); try { zkPermissionWatcher.start(); } catch (KeeperException e) { @@ -193,7 +193,7 @@ protected final void internalStart(ZKWatcher zkWatcher) { } protected final void requirePermission(String request, Permission.Action perm) - throws IOException { + throws IOException { if (accessChecker != null) { accessChecker.requirePermission(RpcServer.getRequestUser().orElse(null), request, null, perm); } @@ -260,44 +260,44 @@ public void onConfigurationChange(Configuration conf) { @Override public GetClusterIdResponse getClusterId(RpcController controller, GetClusterIdRequest request) - throws ServiceException { + throws ServiceException { return GetClusterIdResponse.newBuilder().setClusterId(server.getClusterId()).build(); } @Override public GetActiveMasterResponse getActiveMaster(RpcController controller, - GetActiveMasterRequest request) throws ServiceException { + GetActiveMasterRequest request) throws ServiceException { GetActiveMasterResponse.Builder builder = GetActiveMasterResponse.newBuilder(); server.getActiveMaster() - .ifPresent(name -> builder.setServerName(ProtobufUtil.toServerName(name))); + .ifPresent(name -> builder.setServerName(ProtobufUtil.toServerName(name))); return builder.build(); } @Override public GetMastersResponse getMasters(RpcController controller, GetMastersRequest request) - throws ServiceException { + throws ServiceException { GetMastersResponse.Builder builder = GetMastersResponse.newBuilder(); server.getActiveMaster() - .ifPresent(activeMaster -> builder.addMasterServers(GetMastersResponseEntry.newBuilder() - .setServerName(ProtobufUtil.toServerName(activeMaster)).setIsActive(true))); + .ifPresent(activeMaster -> builder.addMasterServers(GetMastersResponseEntry.newBuilder() + .setServerName(ProtobufUtil.toServerName(activeMaster)).setIsActive(true))); server.getBackupMasters() - .forEach(backupMaster -> builder.addMasterServers(GetMastersResponseEntry.newBuilder() - .setServerName(ProtobufUtil.toServerName(backupMaster)).setIsActive(false))); + .forEach(backupMaster -> builder.addMasterServers(GetMastersResponseEntry.newBuilder() + .setServerName(ProtobufUtil.toServerName(backupMaster)).setIsActive(false))); return builder.build(); } @Override public GetMetaRegionLocationsResponse getMetaRegionLocations(RpcController controller, - GetMetaRegionLocationsRequest request) throws ServiceException { + GetMetaRegionLocationsRequest request) throws ServiceException { GetMetaRegionLocationsResponse.Builder builder = GetMetaRegionLocationsResponse.newBuilder(); server.getMetaLocations() - .forEach(location -> builder.addMetaLocations(ProtobufUtil.toRegionLocation(location))); + .forEach(location -> builder.addMetaLocations(ProtobufUtil.toRegionLocation(location))); return builder.build(); } @Override public final GetBootstrapNodesResponse getBootstrapNodes(RpcController controller, - GetBootstrapNodesRequest request) throws ServiceException { + GetBootstrapNodesRequest request) throws ServiceException { int maxNodeCount = server.getConfiguration().getInt(CLIENT_BOOTSTRAP_NODE_LIMIT, DEFAULT_CLIENT_BOOTSTRAP_NODE_LIMIT); ReservoirSample sample = new ReservoirSample<>(maxNodeCount); @@ -305,13 +305,13 @@ public final GetBootstrapNodesResponse getBootstrapNodes(RpcController controlle GetBootstrapNodesResponse.Builder builder = GetBootstrapNodesResponse.newBuilder(); sample.getSamplingResult().stream().map(ProtobufUtil::toServerName) - .forEach(builder::addServerName); + .forEach(builder::addServerName); return builder.build(); } @Override public UpdateConfigurationResponse updateConfiguration(RpcController controller, - UpdateConfigurationRequest request) throws ServiceException { + UpdateConfigurationRequest request) throws ServiceException { try { requirePermission("updateConfiguration", Permission.Action.ADMIN); this.server.updateConfiguration(); @@ -324,24 +324,23 @@ public UpdateConfigurationResponse updateConfiguration(RpcController controller, @Override @QosPriority(priority = HConstants.ADMIN_QOS) public ClearSlowLogResponses clearSlowLogsResponses(final RpcController controller, - final ClearSlowLogResponseRequest request) throws ServiceException { + final ClearSlowLogResponseRequest request) throws ServiceException { try { requirePermission("clearSlowLogsResponses", Permission.Action.ADMIN); } catch (IOException e) { throw new ServiceException(e); } final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder(); - boolean slowLogsCleaned = Optional.ofNullable(namedQueueRecorder) - .map( - queueRecorder -> queueRecorder.clearNamedQueue(NamedQueuePayload.NamedQueueEvent.SLOW_LOG)) - .orElse(false); + boolean slowLogsCleaned = Optional.ofNullable(namedQueueRecorder).map( + queueRecorder -> queueRecorder.clearNamedQueue(NamedQueuePayload.NamedQueueEvent.SLOW_LOG)) + .orElse(false); ClearSlowLogResponses clearSlowLogResponses = - ClearSlowLogResponses.newBuilder().setIsCleaned(slowLogsCleaned).build(); + ClearSlowLogResponses.newBuilder().setIsCleaned(slowLogsCleaned).build(); return clearSlowLogResponses; } private List getSlowLogPayloads(SlowLogResponseRequest request, - NamedQueueRecorder namedQueueRecorder) { + NamedQueueRecorder namedQueueRecorder) { if (namedQueueRecorder == null) { return Collections.emptyList(); } @@ -350,34 +349,34 @@ private List getSlowLogPayloads(SlowLogResponseRequest request, namedQueueGetRequest.setNamedQueueEvent(RpcLogDetails.SLOW_LOG_EVENT); namedQueueGetRequest.setSlowLogResponseRequest(request); NamedQueueGetResponse namedQueueGetResponse = - namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); - slowLogPayloads = namedQueueGetResponse != null ? namedQueueGetResponse.getSlowLogPayloads() : - Collections.emptyList(); + namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); + slowLogPayloads = namedQueueGetResponse != null ? namedQueueGetResponse.getSlowLogPayloads() + : Collections.emptyList(); return slowLogPayloads; } @Override @QosPriority(priority = HConstants.ADMIN_QOS) public HBaseProtos.LogEntry getLogEntries(RpcController controller, - HBaseProtos.LogRequest request) throws ServiceException { + HBaseProtos.LogRequest request) throws ServiceException { try { final String logClassName = request.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("SlowLogResponseRequest")) { SlowLogResponseRequest slowLogResponseRequest = - (SlowLogResponseRequest) method.invoke(null, request.getLogMessage()); + (SlowLogResponseRequest) method.invoke(null, request.getLogMessage()); final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder(); final List slowLogPayloads = - getSlowLogPayloads(slowLogResponseRequest, namedQueueRecorder); + getSlowLogPayloads(slowLogResponseRequest, namedQueueRecorder); SlowLogResponses slowLogResponses = - SlowLogResponses.newBuilder().addAllSlowLogPayloads(slowLogPayloads).build(); + SlowLogResponses.newBuilder().addAllSlowLogPayloads(slowLogPayloads).build(); return HBaseProtos.LogEntry.newBuilder() - .setLogClassName(slowLogResponses.getClass().getName()) - .setLogMessage(slowLogResponses.toByteString()).build(); + .setLogClassName(slowLogResponses.getClass().getName()) + .setLogMessage(slowLogResponses.toByteString()).build(); } } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { + | InvocationTargetException e) { LOG.error("Error while retrieving log entries.", e); throw new ServiceException(e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java index c28ea29215a4..787a2082fdc5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -76,7 +76,7 @@ */ @InterfaceAudience.Private public abstract class HBaseServerBase> extends Thread - implements Server, ConfigurationObserver, ConnectionRegistryEndpoint { + implements Server, ConfigurationObserver, ConnectionRegistryEndpoint { private static final Logger LOG = LoggerFactory.getLogger(HBaseServerBase.class); @@ -192,10 +192,10 @@ private static void setupWindows(final Configuration conf, ConfigurationManager protected final synchronized void setupClusterConnection() throws IOException { if (asyncClusterConnection == null) { InetSocketAddress localAddress = - new InetSocketAddress(rpcServices.getSocketAddress().getAddress(), 0); + new InetSocketAddress(rpcServices.getSocketAddress().getAddress(), 0); User user = userProvider.getCurrent(); asyncClusterConnection = - ClusterConnectionFactory.createAsyncClusterConnection(this, conf, localAddress, user); + ClusterConnectionFactory.createAsyncClusterConnection(this, conf, localAddress, user); } } @@ -216,7 +216,7 @@ protected final void initializeFileSystem() throws IOException { // underlying hadoop hdfs accessors will be going against wrong filesystem // (unless all is set to defaults). String rootDirUri = - CommonFSUtils.getDirUri(this.conf, new Path(conf.get(HConstants.HBASE_DIR))); + CommonFSUtils.getDirUri(this.conf, new Path(conf.get(HConstants.HBASE_DIR))); if (rootDirUri != null) { CommonFSUtils.setFsDefault(this.conf, rootDirUri); } @@ -224,15 +224,15 @@ protected final void initializeFileSystem() throws IOException { this.dataFs = new HFileSystem(this.conf, useHBaseChecksum); this.dataRootDir = CommonFSUtils.getRootDir(this.conf); this.tableDescriptors = new FSTableDescriptors(this.dataFs, this.dataRootDir, - !canUpdateTableDescriptor(), cacheTableDescriptor()); + !canUpdateTableDescriptor(), cacheTableDescriptor()); } public HBaseServerBase(Configuration conf, String name) - throws ZooKeeperConnectionException, IOException { + throws ZooKeeperConnectionException, IOException { super(name); // thread name this.conf = conf; this.eventLoopGroupConfig = - NettyEventLoopGroupConfig.setup(conf, getClass().getSimpleName() + "-EventLoopGroup"); + NettyEventLoopGroupConfig.setup(conf, getClass().getSimpleName() + "-EventLoopGroup"); this.startcode = EnvironmentEdgeManager.currentTime(); this.userProvider = UserProvider.instantiate(conf); this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000); @@ -241,8 +241,8 @@ public HBaseServerBase(Configuration conf, String name) this.rpcServices = createRpcServices(); useThisHostnameInstead = getUseThisHostnameInstead(conf); InetSocketAddress addr = rpcServices.getSocketAddress(); - String hostName = StringUtils.isBlank(useThisHostnameInstead) ? addr.getHostName() : - this.useThisHostnameInstead; + String hostName = StringUtils.isBlank(useThisHostnameInstead) ? addr.getHostName() + : this.useThisHostnameInstead; serverName = ServerName.valueOf(hostName, addr.getPort(), this.startcode); // login the zookeeper client principal (if using security) ZKAuthentication.loginClient(this.conf, HConstants.ZK_CLIENT_KEYTAB_FILE, @@ -253,7 +253,7 @@ public HBaseServerBase(Configuration conf, String name) // or process owner as default super user. Superusers.initialize(conf); zooKeeper = - new ZKWatcher(conf, getProcessName() + ":" + addr.getPort(), this, canCreateBaseZNode()); + new ZKWatcher(conf, getProcessName() + ":" + addr.getPort(), this, canCreateBaseZNode()); this.configurationManager = new ConfigurationManager(); setupWindows(conf, configurationManager); @@ -285,8 +285,8 @@ public HBaseServerBase(Configuration conf, String name) * Puts up the webui. */ private void putUpWebUI() throws IOException { - int port = - this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT); + int port = this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT, + HConstants.DEFAULT_REGIONSERVER_INFOPORT); String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0"); if (this instanceof HMaster) { @@ -299,9 +299,9 @@ private void putUpWebUI() throws IOException { } if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) { - String msg = "Failed to start http info server. Address " + addr + - " does not belong to this host. Correct configuration parameter: " + - "hbase.regionserver.info.bindAddress"; + String msg = "Failed to start http info server. Address " + addr + + " does not belong to this host. Correct configuration parameter: " + + "hbase.regionserver.info.bindAddress"; LOG.error(msg); throw new IOException(msg); } @@ -329,7 +329,7 @@ private void putUpWebUI() throws IOException { port = this.infoServer.getPort(); conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port); int masterInfoPort = - conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); + conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); conf.setInt("hbase.master.info.port.orig", masterInfoPort); conf.setInt(HConstants.MASTER_INFO_PORT, port); } @@ -383,8 +383,8 @@ protected final void initializeMemStoreChunkCreator(HeapMemoryManager hMemManage long globalMemStoreSize = pair.getFirst(); boolean offheap = pair.getSecond() == MemoryType.NON_HEAP; // When off heap memstore in use, take full area for chunk pool. - float poolSizePercentage = offheap ? 1.0F : - conf.getFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, MemStoreLAB.POOL_MAX_SIZE_DEFAULT); + float poolSizePercentage = offheap ? 1.0F + : conf.getFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, MemStoreLAB.POOL_MAX_SIZE_DEFAULT); float initialCountPercentage = conf.getFloat(MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT); int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT); @@ -556,7 +556,7 @@ public R getRpcServices() { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") public MetaRegionLocationCache getMetaRegionLocationCache() { return this.metaRegionLocationCache; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java index 8db0ca272d8a..88fbe9cd6e59 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java @@ -49,7 +49,7 @@ public HealthCheckChore(int sleepTime, Stoppable stopper, Configuration conf) { healthChecker.init(healthCheckScript, scriptTimeout); this.threshold = config.getInt(HConstants.HEALTH_FAILURE_THRESHOLD, HConstants.DEFAULT_HEALTH_FAILURE_THRESHOLD); - this.failureWindow = (long)this.threshold * (long)sleepTime; + this.failureWindow = (long) this.threshold * (long) sleepTime; } @Override @@ -59,13 +59,12 @@ protected void chore() { if (!isHealthy) { boolean needToStop = decideToStop(); if (needToStop) { - this.getStopper().stop("The node reported unhealthy " + threshold - + " number of times consecutively."); + this.getStopper() + .stop("The node reported unhealthy " + threshold + " number of times consecutively."); } // Always log health report. - LOG.info("Health status at " + - StringUtils.formatTime(EnvironmentEdgeManager.currentTime()) + " : " + - report.getHealthReport()); + LOG.info("Health status at " + StringUtils.formatTime(EnvironmentEdgeManager.currentTime()) + + " : " + report.getHealthReport()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java index e47afd58d68b..8e02ea93c6f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java @@ -19,17 +19,15 @@ import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * A utility for executing an external script that checks the health of - * the node. An example script can be found at - * src/main/sh/healthcheck/healthcheck.sh in the - * hbase-examples module. + * A utility for executing an external script that checks the health of the node. An example script + * can be found at src/main/sh/healthcheck/healthcheck.sh in the hbase-examples + * module. */ class HealthChecker { @@ -44,16 +42,11 @@ class HealthChecker { private long scriptTimeout; enum HealthCheckerExitStatus { - SUCCESS, - TIMED_OUT, - FAILED_WITH_EXIT_CODE, - FAILED_WITH_EXCEPTION, - FAILED + SUCCESS, TIMED_OUT, FAILED_WITH_EXIT_CODE, FAILED_WITH_EXCEPTION, FAILED } /** * Initialize. - * * @param location the location of the health script * @param timeout the timeout to be used for the health script */ @@ -64,8 +57,8 @@ public void init(String location, long timeout) { execScript.add(healthCheckScript); this.shexec = new ShellCommandExecutor(execScript.toArray(new String[execScript.size()]), null, null, scriptTimeout); - LOG.info("HealthChecker initialized with script at " + this.healthCheckScript + - ", timeout=" + timeout); + LOG.info("HealthChecker initialized with script at " + this.healthCheckScript + ", timeout=" + + timeout); } public HealthReport checkHealth() { @@ -104,24 +97,24 @@ private boolean hasErrors(String output) { return false; } - private String getHealthReport(HealthCheckerExitStatus status){ + private String getHealthReport(HealthCheckerExitStatus status) { String healthReport = null; switch (status) { - case SUCCESS: - healthReport = "Server is healthy."; - break; - case TIMED_OUT: - healthReport = "Health script timed out"; - break; - case FAILED_WITH_EXCEPTION: - healthReport = exceptionStackTrace; - break; - case FAILED_WITH_EXIT_CODE: - healthReport = "Health script failed with exit code."; - break; - case FAILED: - healthReport = shexec.getOutput(); - break; + case SUCCESS: + healthReport = "Server is healthy."; + break; + case TIMED_OUT: + healthReport = "Health script timed out"; + break; + case FAILED_WITH_EXCEPTION: + healthReport = exceptionStackTrace; + break; + case FAILED_WITH_EXIT_CODE: + healthReport = "Health script failed with exit code."; + break; + case FAILED: + healthReport = shexec.getOutput(); + break; } return healthReport; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java index 83882b0cdcca..8052cdb8c594 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthReport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,6 @@ class HealthReport { /** * Gets the status of the region server. - * * @return HealthCheckerExitStatus */ HealthCheckerExitStatus getStatus() { @@ -49,7 +48,6 @@ public String toString() { /** * Gets the health report of the region server. - * * @return String */ String getHealthReport() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java index f8fb4bd9a426..307a201a693f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,11 +41,9 @@ import org.slf4j.LoggerFactory; /** - * Pluggable JMX Agent for HBase(to fix the 2 random TCP ports issue - * of the out-of-the-box JMX Agent): - * 1)connector port can share with the registry port if SSL is OFF - * 2)support password authentication - * 3)support subset of SSL (with default configuration) + * Pluggable JMX Agent for HBase(to fix the 2 random TCP ports issue of the out-of-the-box JMX + * Agent): 1)connector port can share with the registry port if SSL is OFF 2)support password + * authentication 3)support subset of SSL (with default configuration) */ @InterfaceAudience.Private public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { @@ -57,16 +54,15 @@ public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { public static final int defRegionserverRMIRegistryPort = 10102; /** - * workaround for HBASE-11146 - * master and regionserver are in 1 JVM in standalone mode - * only 1 JMX instance is allowed, otherwise there is port conflict even if - * we only load regionserver coprocessor on master + * workaround for HBASE-11146 master and regionserver are in 1 JVM in standalone mode only 1 JMX + * instance is allowed, otherwise there is port conflict even if we only load regionserver + * coprocessor on master */ private static JMXConnectorServer JMX_CS = null; private Registry rmiRegistry = null; - public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort, - int rmiConnectorPort) throws IOException { + public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort, int rmiConnectorPort) + throws IOException { // Build jmxURL StringBuilder url = new StringBuilder(); url.append("service:jmx:rmi://localhost:"); @@ -79,8 +75,7 @@ public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort, } - public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) - throws IOException { + public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) throws IOException { boolean rmiSSL = false; boolean authenticate = true; String passwordFile = null; @@ -88,8 +83,7 @@ public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) System.setProperty("java.rmi.server.randomIDs", "true"); - String rmiSSLValue = System.getProperty("com.sun.management.jmxremote.ssl", - "false"); + String rmiSSLValue = System.getProperty("com.sun.management.jmxremote.ssl", "false"); rmiSSL = Boolean.parseBoolean(rmiSSLValue); String authenticateValue = @@ -99,8 +93,8 @@ public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) passwordFile = System.getProperty("com.sun.management.jmxremote.password.file"); accessFile = System.getProperty("com.sun.management.jmxremote.access.file"); - LOG.info("rmiSSL:" + rmiSSLValue + ",authenticate:" + authenticateValue - + ",passwordFile:" + passwordFile + ",accessFile:" + accessFile); + LOG.info("rmiSSL:" + rmiSSLValue + ",authenticate:" + authenticateValue + ",passwordFile:" + + passwordFile + ",accessFile:" + accessFile); // Environment map HashMap jmxEnv = new HashMap<>(); @@ -110,8 +104,8 @@ public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) if (rmiSSL) { if (rmiRegistryPort == rmiConnectorPort) { - throw new IOException("SSL is enabled. " + - "rmiConnectorPort cannot share with the rmiRegistryPort!"); + throw new IOException( + "SSL is enabled. " + "rmiConnectorPort cannot share with the rmiRegistryPort!"); } csf = new SslRMIClientSocketFactorySecure(); ssf = new SslRMIServerSocketFactorySecure(); @@ -140,7 +134,7 @@ public void startConnectorServer(int rmiRegistryPort, int rmiConnectorPort) try { // Start the JMXListener with the connection string - synchronized(JMXListener.class) { + synchronized (JMXListener.class) { if (JMX_CS != null) { throw new RuntimeException("Started by another thread?"); } @@ -172,7 +166,6 @@ public void stopConnectorServer() throws IOException { } } - @Override public void start(CoprocessorEnvironment env) throws IOException { int rmiRegistryPort = -1; @@ -189,23 +182,20 @@ public void start(CoprocessorEnvironment env) throws IOException { } else if (env instanceof RegionServerCoprocessorEnvironment) { // running on RegionServer rmiRegistryPort = - conf.getInt("regionserver" + RMI_REGISTRY_PORT_CONF_KEY, - defRegionserverRMIRegistryPort); - rmiConnectorPort = - conf.getInt("regionserver" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort); - LOG.info("RegionServer rmiRegistryPort:" + rmiRegistryPort - + ",RegionServer rmiConnectorPort:" + rmiConnectorPort); + conf.getInt("regionserver" + RMI_REGISTRY_PORT_CONF_KEY, defRegionserverRMIRegistryPort); + rmiConnectorPort = conf.getInt("regionserver" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort); + LOG.info("RegionServer rmiRegistryPort:" + rmiRegistryPort + ",RegionServer rmiConnectorPort:" + + rmiConnectorPort); } else if (env instanceof RegionCoprocessorEnvironment) { LOG.error("JMXListener should not be loaded in Region Environment!"); return; } - synchronized(JMXListener.class) { + synchronized (JMXListener.class) { if (JMX_CS != null) { LOG.info("JMXListener has been started at Registry port " + rmiRegistryPort); - } - else { + } else { startConnectorServer(rmiRegistryPort, rmiConnectorPort); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index 329b1d050f6c..3df1208047f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,26 +40,22 @@ import org.slf4j.LoggerFactory; /** - * This class creates a single process HBase cluster. One thread is created for - * a master and one per region server. - * - * Call {@link #startup()} to start the cluster running and {@link #shutdown()} - * to close it all down. {@link #join} the cluster is you want to wait on - * shutdown completion. - * - *

      Runs master on port 16000 by default. Because we can't just kill the - * process -- not till HADOOP-1700 gets fixed and even then.... -- we need to - * be able to find the master with a remote client to run shutdown. To use a - * port other than 16000, set the hbase.master to a value of 'local:PORT': - * that is 'local', not 'localhost', and the port number the master should use - * instead of 16000. - * + * This class creates a single process HBase cluster. One thread is created for a master and one per + * region server. Call {@link #startup()} to start the cluster running and {@link #shutdown()} to + * close it all down. {@link #join} the cluster is you want to wait on shutdown completion. + *

      + * Runs master on port 16000 by default. Because we can't just kill the process -- not till + * HADOOP-1700 gets fixed and even then.... -- we need to be able to find the master with a remote + * client to run shutdown. To use a port other than 16000, set the hbase.master to a value of + * 'local:PORT': that is 'local', not 'localhost', and the port number the master should use instead + * of 16000. */ @InterfaceAudience.Private public class LocalHBaseCluster { private static final Logger LOG = LoggerFactory.getLogger(LocalHBaseCluster.class); private final List masterThreads = new CopyOnWriteArrayList<>(); - private final List regionThreads = new CopyOnWriteArrayList<>(); + private final List regionThreads = + new CopyOnWriteArrayList<>(); private final static int DEFAULT_NO = 1; /** local mode */ public static final String LOCAL = "local"; @@ -77,49 +72,44 @@ public class LocalHBaseCluster { * @param conf * @throws IOException */ - public LocalHBaseCluster(final Configuration conf) - throws IOException { + public LocalHBaseCluster(final Configuration conf) throws IOException { this(conf, DEFAULT_NO); } /** * Constructor. - * @param conf Configuration to use. Post construction has the master's - * address. + * @param conf Configuration to use. Post construction has the master's address. * @param noRegionServers Count of regionservers to start. * @throws IOException */ - public LocalHBaseCluster(final Configuration conf, final int noRegionServers) - throws IOException { + public LocalHBaseCluster(final Configuration conf, final int noRegionServers) throws IOException { this(conf, 1, 0, noRegionServers, getMasterImplementation(conf), getRegionServerImplementation(conf)); } /** * Constructor. - * @param conf Configuration to use. Post construction has the active master - * address. + * @param conf Configuration to use. Post construction has the active master address. * @param noMasters Count of masters to start. * @param noRegionServers Count of regionservers to start. * @throws IOException */ - public LocalHBaseCluster(final Configuration conf, final int noMasters, - final int noRegionServers) - throws IOException { + public LocalHBaseCluster(final Configuration conf, final int noMasters, final int noRegionServers) + throws IOException { this(conf, noMasters, 0, noRegionServers, getMasterImplementation(conf), getRegionServerImplementation(conf)); } @SuppressWarnings("unchecked") - private static Class getRegionServerImplementation(final Configuration conf) { - return (Class)conf.getClass(HConstants.REGION_SERVER_IMPL, - HRegionServer.class); + private static Class + getRegionServerImplementation(final Configuration conf) { + return (Class) conf.getClass(HConstants.REGION_SERVER_IMPL, + HRegionServer.class); } @SuppressWarnings("unchecked") private static Class getMasterImplementation(final Configuration conf) { - return (Class)conf.getClass(HConstants.MASTER_IMPL, - HMaster.class); + return (Class) conf.getClass(HConstants.MASTER_IMPL, HMaster.class); } public LocalHBaseCluster(final Configuration conf, final int noMasters, final int noRegionServers, @@ -130,8 +120,7 @@ public LocalHBaseCluster(final Configuration conf, final int noMasters, final in /** * Constructor. - * @param conf Configuration to use. Post construction has the master's - * address. + * @param conf Configuration to use. Post construction has the master's address. * @param noMasters Count of masters to start. * @param noRegionServers Count of regionservers to start. * @param masterClass @@ -147,34 +136,34 @@ public LocalHBaseCluster(final Configuration conf, final int noMasters, // When active, if a port selection is default then we switch to random if (conf.getBoolean(ASSIGN_RANDOM_PORTS, false)) { - if (conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT) - == HConstants.DEFAULT_MASTER_PORT) { + if (conf.getInt(HConstants.MASTER_PORT, + HConstants.DEFAULT_MASTER_PORT) == HConstants.DEFAULT_MASTER_PORT) { LOG.debug("Setting Master Port to random."); conf.set(HConstants.MASTER_PORT, "0"); } - if (conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT) - == HConstants.DEFAULT_REGIONSERVER_PORT) { + if (conf.getInt(HConstants.REGIONSERVER_PORT, + HConstants.DEFAULT_REGIONSERVER_PORT) == HConstants.DEFAULT_REGIONSERVER_PORT) { LOG.debug("Setting RegionServer Port to random."); conf.set(HConstants.REGIONSERVER_PORT, "0"); } // treat info ports special; expressly don't change '-1' (keep off) // in case we make that the default behavior. - if (conf.getInt(HConstants.REGIONSERVER_INFO_PORT, 0) != -1 && - conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT) - == HConstants.DEFAULT_REGIONSERVER_INFOPORT) { + if (conf.getInt(HConstants.REGIONSERVER_INFO_PORT, 0) != -1 + && conf.getInt(HConstants.REGIONSERVER_INFO_PORT, + HConstants.DEFAULT_REGIONSERVER_INFOPORT) == HConstants.DEFAULT_REGIONSERVER_INFOPORT) { LOG.debug("Setting RS InfoServer Port to random."); conf.set(HConstants.REGIONSERVER_INFO_PORT, "0"); } - if (conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1 && - conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) - == HConstants.DEFAULT_MASTER_INFOPORT) { + if (conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1 + && conf.getInt(HConstants.MASTER_INFO_PORT, + HConstants.DEFAULT_MASTER_INFOPORT) == HConstants.DEFAULT_MASTER_INFOPORT) { LOG.debug("Setting Master InfoServer Port to random."); conf.set(HConstants.MASTER_INFO_PORT, "0"); } } - this.masterClass = (Class) - conf.getClass(HConstants.MASTER_IMPL, masterClass); + this.masterClass = + (Class) conf.getClass(HConstants.MASTER_IMPL, masterClass); // Start the HMasters. int i; for (i = 0; i < noMasters; i++) { @@ -186,45 +175,41 @@ public LocalHBaseCluster(final Configuration conf, final int noMasters, addMaster(c, i + j); } // Start the HRegionServers. - this.regionServerClass = - (Class)conf.getClass(HConstants.REGION_SERVER_IMPL, - regionServerClass); + this.regionServerClass = (Class) conf + .getClass(HConstants.REGION_SERVER_IMPL, regionServerClass); for (int j = 0; j < noRegionServers; j++) { addRegionServer(new Configuration(conf), j); } } - public JVMClusterUtil.RegionServerThread addRegionServer() - throws IOException { + public JVMClusterUtil.RegionServerThread addRegionServer() throws IOException { return addRegionServer(new Configuration(conf), this.regionThreads.size()); } @SuppressWarnings("unchecked") - public JVMClusterUtil.RegionServerThread addRegionServer( - Configuration config, final int index) - throws IOException { + public JVMClusterUtil.RegionServerThread addRegionServer(Configuration config, final int index) + throws IOException { // Create each regionserver with its own Configuration instance so each has // its Connection instance rather than share (see HBASE_INSTANCES down in // the guts of ConnectionManager). JVMClusterUtil.RegionServerThread rst = JVMClusterUtil.createRegionServerThread(config, (Class) conf - .getClass(HConstants.REGION_SERVER_IMPL, this.regionServerClass), index); + .getClass(HConstants.REGION_SERVER_IMPL, this.regionServerClass), + index); this.regionThreads.add(rst); return rst; } - public JVMClusterUtil.RegionServerThread addRegionServer( - final Configuration config, final int index, User user) - throws IOException, InterruptedException { - return user.runAs( - new PrivilegedExceptionAction() { - @Override - public JVMClusterUtil.RegionServerThread run() throws Exception { - return addRegionServer(config, index); - } - }); + public JVMClusterUtil.RegionServerThread addRegionServer(final Configuration config, + final int index, User user) throws IOException, InterruptedException { + return user.runAs(new PrivilegedExceptionAction() { + @Override + public JVMClusterUtil.RegionServerThread run() throws Exception { + return addRegionServer(config, index); + } + }); } public JVMClusterUtil.MasterThread addMaster() throws IOException { @@ -237,26 +222,24 @@ public JVMClusterUtil.MasterThread addMaster(Configuration c, final int index) // its Connection instance rather than share (see HBASE_INSTANCES down in // the guts of ConnectionManager. JVMClusterUtil.MasterThread mt = JVMClusterUtil.createMasterThread(c, - (Class) c.getClass(HConstants.MASTER_IMPL, this.masterClass), index); + (Class) c.getClass(HConstants.MASTER_IMPL, this.masterClass), index); this.masterThreads.add(mt); // Refresh the master address config. List masterHostPorts = new ArrayList<>(); - getMasters().forEach(masterThread -> - masterHostPorts.add(masterThread.getMaster().getServerName().getAddress().toString())); + getMasters().forEach(masterThread -> masterHostPorts + .add(masterThread.getMaster().getServerName().getAddress().toString())); conf.set(HConstants.MASTER_ADDRS_KEY, String.join(",", masterHostPorts)); return mt; } - public JVMClusterUtil.MasterThread addMaster( - final Configuration c, final int index, User user) - throws IOException, InterruptedException { - return user.runAs( - new PrivilegedExceptionAction() { - @Override - public JVMClusterUtil.MasterThread run() throws Exception { - return addMaster(c, index); - } - }); + public JVMClusterUtil.MasterThread addMaster(final Configuration c, final int index, User user) + throws IOException, InterruptedException { + return user.runAs(new PrivilegedExceptionAction() { + @Override + public JVMClusterUtil.MasterThread run() throws Exception { + return addMaster(c, index); + } + }); } /** @@ -275,14 +258,13 @@ public List getRegionServers() { } /** - * @return List of running servers (Some servers may have been killed or - * aborted during lifetime of cluster; these servers are not included in this - * list). + * @return List of running servers (Some servers may have been killed or aborted during lifetime + * of cluster; these servers are not included in this list). */ public List getLiveRegionServers() { List liveServers = new ArrayList<>(); List list = getRegionServers(); - for (JVMClusterUtil.RegionServerThread rst: list) { + for (JVMClusterUtil.RegionServerThread rst : list) { if (rst.isAlive()) liveServers.add(rst); else LOG.info("Not alive " + rst.getName()); } @@ -331,15 +313,14 @@ public HMaster getMaster(int serverNumber) { } /** - * Gets the current active master, if available. If no active master, returns - * null. + * Gets the current active master, if available. If no active master, returns null. * @return the HMaster for the active master */ public HMaster getActiveMaster() { for (JVMClusterUtil.MasterThread mt : masterThreads) { // Ensure that the current active master is not stopped. // We don't want to return a stopping master as an active master. - if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { + if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { return mt.getMaster(); } } @@ -354,14 +335,13 @@ public List getMasters() { } /** - * @return List of running master servers (Some servers may have been killed - * or aborted during lifetime of cluster; these servers are not included in - * this list). + * @return List of running master servers (Some servers may have been killed or aborted during + * lifetime of cluster; these servers are not included in this list). */ public List getLiveMasters() { List liveServers = new ArrayList<>(); List list = getMasters(); - for (JVMClusterUtil.MasterThread mt: list) { + for (JVMClusterUtil.MasterThread mt : list) { if (mt.isAlive()) { liveServers.add(mt); } @@ -389,7 +369,7 @@ public String waitOnMaster(JVMClusterUtil.MasterThread masterThread) { masterThread.join(); } catch (InterruptedException e) { LOG.error("Interrupted while waiting for {} to finish. Retrying join", - masterThread.getName(), e); + masterThread.getName(), e); Thread.currentThread().interrupt(); } } @@ -398,12 +378,11 @@ public String waitOnMaster(JVMClusterUtil.MasterThread masterThread) { } /** - * Wait for Mini HBase Cluster to shut down. - * Presumes you've already called {@link #shutdown()}. + * Wait for Mini HBase Cluster to shut down. Presumes you've already called {@link #shutdown()}. */ public void join() { if (this.regionThreads != null) { - for(Thread t: this.regionThreads) { + for (Thread t : this.regionThreads) { if (t.isAlive()) { try { Threads.threadDumpingIsAlive(t); @@ -445,8 +424,9 @@ public void shutdown() { * @return True if a 'local' address in hbase.master value. */ public static boolean isLocal(final Configuration c) { - boolean mode = c.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); - return(mode == HConstants.CLUSTER_IS_LOCAL); + boolean mode = + c.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); + return (mode == HConstants.CLUSTER_IS_LOCAL); } /** @@ -457,9 +437,9 @@ public static void main(String[] args) throws IOException { LocalHBaseCluster cluster = new LocalHBaseCluster(conf); cluster.startup(); try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { - TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TableName.valueOf(cluster.getClass().getName())).build(); + Admin admin = connection.getAdmin()) { + TableDescriptor htd = TableDescriptorBuilder + .newBuilder(TableName.valueOf(cluster.getClass().getName())).build(); admin.createTable(htd); } finally { cluster.shutdown(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java index 2e0f21379c7a..c78f19056f7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaRegionLocationCache.java @@ -41,9 +41,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * A cache of meta region location metadata. Registers a listener on ZK to track changes to the - * meta table znodes. Clients are expected to retry if the meta information is stale. This class - * is thread-safe (a single instance of this class can be shared by multiple threads without race + * A cache of meta region location metadata. Registers a listener on ZK to track changes to the meta + * table znodes. Clients are expected to retry if the meta information is stale. This class is + * thread-safe (a single instance of this class can be shared by multiple threads without race * conditions). */ @InterfaceAudience.Private @@ -64,19 +64,16 @@ public class MetaRegionLocationCache extends ZKListener { new RetryCounterFactory(MAX_ZK_META_FETCH_RETRIES, SLEEP_INTERVAL_MS_BETWEEN_RETRIES); /** - * Cached meta region locations indexed by replica ID. - * CopyOnWriteArrayMap ensures synchronization during updates and a consistent snapshot during - * client requests. Even though CopyOnWriteArrayMap copies the data structure for every write, - * that should be OK since the size of the list is often small and mutations are not too often - * and we do not need to block client requests while mutations are in progress. + * Cached meta region locations indexed by replica ID. CopyOnWriteArrayMap ensures synchronization + * during updates and a consistent snapshot during client requests. Even though + * CopyOnWriteArrayMap copies the data structure for every write, that should be OK since the size + * of the list is often small and mutations are not too often and we do not need to block client + * requests while mutations are in progress. */ private final CopyOnWriteArrayMap cachedMetaLocations; private enum ZNodeOpType { - INIT, - CREATED, - CHANGED, - DELETED + INIT, CREATED, CHANGED, DELETED } public MetaRegionLocationCache(ZKWatcher zkWatcher) { @@ -90,9 +87,9 @@ public MetaRegionLocationCache(ZKWatcher zkWatcher) { // in a separate thread in the background to not block master init. ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).build(); RetryCounterFactory retryFactory = new RetryCounterFactory(Integer.MAX_VALUE, - SLEEP_INTERVAL_MS_BETWEEN_RETRIES, SLEEP_INTERVAL_MS_MAX); + SLEEP_INTERVAL_MS_BETWEEN_RETRIES, SLEEP_INTERVAL_MS_MAX); threadFactory.newThread(() -> loadMetaLocationsFromZk(retryFactory.create(), ZNodeOpType.INIT)) - .start(); + .start(); } /** @@ -132,25 +129,24 @@ private void loadMetaLocationsFromZk(RetryCounter retryCounter, ZNodeOpType opTy // No new meta znodes got added. return; } - for (String znode: znodes) { + for (String znode : znodes) { String path = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, znode); updateMetaLocation(path, opType); } } /** - * Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for - * future updates. + * Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for future + * updates. * @param replicaId ReplicaID of the region. * @return HRegionLocation for the meta replica. * @throws KeeperException if there is any issue fetching/parsing the serialized data. */ - private HRegionLocation getMetaRegionLocation(int replicaId) - throws KeeperException { + private HRegionLocation getMetaRegionLocation(int replicaId) throws KeeperException { RegionState metaRegionState; try { - byte[] data = ZKUtil.getDataAndWatch(watcher, - watcher.getZNodePaths().getZNodeForReplica(replicaId)); + byte[] data = + ZKUtil.getDataAndWatch(watcher, watcher.getZNodePaths().getZNodeForReplica(replicaId)); metaRegionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId); } catch (DeserializationException e) { throw ZKUtil.convert(e); @@ -201,7 +197,6 @@ private void updateMetaLocation(String path, ZNodeOpType opType) { /** * @return Optional list of HRegionLocations for meta replica(s), null if the cache is empty. - * */ public List getMetaRegionLocations() { ConcurrentNavigableMap snapshot = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 512916f483e0..ae31b4cf70ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -104,7 +104,7 @@ private MetaTableAccessor() { * @param visitor Visitor invoked against each row in regions family. */ public static void fullScanRegions(Connection connection, - final ClientMetaTableAccessor.Visitor visitor) throws IOException { + final ClientMetaTableAccessor.Visitor visitor) throws IOException { scanMeta(connection, null, null, QueryType.REGION, visitor); } @@ -122,7 +122,7 @@ public static List fullScanRegions(Connection connection) throws IOExcep * @param visitor Visitor invoked against each row in tables family. */ public static void fullScanTables(Connection connection, - final ClientMetaTableAccessor.Visitor visitor) throws IOException { + final ClientMetaTableAccessor.Visitor visitor) throws IOException { scanMeta(connection, null, null, QueryType.TABLE, visitor); } @@ -162,7 +162,7 @@ public static Table getMetaHTable(final Connection connection) throws IOExceptio */ @Deprecated public static Pair getRegion(Connection connection, byte[] regionName) - throws IOException { + throws IOException { HRegionLocation location = getRegionLocation(connection, regionName); return location == null ? null : new Pair<>(location.getRegion(), location.getServerName()); } @@ -174,7 +174,7 @@ public static Pair getRegion(Connection connection, byte * @return HRegionLocation for the given region */ public static HRegionLocation getRegionLocation(Connection connection, byte[] regionName) - throws IOException { + throws IOException { byte[] row = regionName; RegionInfo parsedInfo = null; try { @@ -190,8 +190,8 @@ public static HRegionLocation getRegionLocation(Connection connection, byte[] re r = t.get(get); } RegionLocations locations = CatalogFamilyFormat.getRegionLocations(r); - return locations == null ? null : - locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId()); + return locations == null ? null + : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId()); } /** @@ -201,7 +201,7 @@ public static HRegionLocation getRegionLocation(Connection connection, byte[] re * @return HRegionLocation for the given region */ public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo) - throws IOException { + throws IOException { return CatalogFamilyFormat.getRegionLocation(getCatalogFamilyRow(connection, regionInfo), regionInfo, regionInfo.getReplicaId()); } @@ -210,7 +210,7 @@ public static HRegionLocation getRegionLocation(Connection connection, RegionInf * @return Return the {@link HConstants#CATALOG_FAMILY} row from hbase:meta. */ public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri) - throws IOException { + throws IOException { Get get = new Get(CatalogFamilyFormat.getMetaKeyForRegion(ri)); get.addFamily(HConstants.CATALOG_FAMILY); try (Table t = getMetaHTable(connection)) { @@ -225,7 +225,7 @@ public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri) * @return result of the specified region */ public static Result getRegionResult(Connection connection, byte[] regionName) - throws IOException { + throws IOException { Get get = new Get(regionName); get.addFamily(HConstants.CATALOG_FAMILY); try (Table t = getMetaHTable(connection)) { @@ -261,7 +261,7 @@ public static Result scanByRegionEncodedName(Connection connection, String regio * @return List of all user-space regions. */ public static List getAllRegions(Connection connection, - boolean excludeOfflinedSplitParents) throws IOException { + boolean excludeOfflinedSplitParents) throws IOException { List> result; result = getTableRegionsAndLocations(connection, null, excludeOfflinedSplitParents); @@ -278,7 +278,7 @@ public static List getAllRegions(Connection connection, * @return Ordered list of {@link RegionInfo}. */ public static List getTableRegions(Connection connection, TableName tableName) - throws IOException { + throws IOException { return getTableRegions(connection, tableName, false); } @@ -292,14 +292,14 @@ public static List getTableRegions(Connection connection, TableName * @return Ordered list of {@link RegionInfo}. */ public static List getTableRegions(Connection connection, TableName tableName, - final boolean excludeOfflinedSplitParents) throws IOException { + final boolean excludeOfflinedSplitParents) throws IOException { List> result = - getTableRegionsAndLocations(connection, tableName, excludeOfflinedSplitParents); + getTableRegionsAndLocations(connection, tableName, excludeOfflinedSplitParents); return getListOfRegionInfos(result); } private static List - getListOfRegionInfos(final List> pairs) { + getListOfRegionInfos(final List> pairs) { if (pairs == null || pairs.isEmpty()) { return Collections.emptyList(); } @@ -351,7 +351,7 @@ private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { * @return Return list of regioninfos and server. */ public static List> - getTableRegionsAndLocations(Connection connection, TableName tableName) throws IOException { + getTableRegionsAndLocations(Connection connection, TableName tableName) throws IOException { return getTableRegionsAndLocations(connection, tableName, true); } @@ -364,15 +364,15 @@ private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { */ // What happens here when 1M regions in hbase:meta? This won't scale? public static List> getTableRegionsAndLocations( - Connection connection, @Nullable final TableName tableName, - final boolean excludeOfflinedSplitParents) throws IOException { + Connection connection, @Nullable final TableName tableName, + final boolean excludeOfflinedSplitParents) throws IOException { if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) { throw new IOException( - "This method can't be used to locate meta regions;" + " use MetaTableLocator instead"); + "This method can't be used to locate meta regions;" + " use MetaTableLocator instead"); } // Make a version of CollectingVisitor that collects RegionInfo and ServerAddress ClientMetaTableAccessor.CollectRegionLocationsVisitor visitor = - new ClientMetaTableAccessor.CollectRegionLocationsVisitor(excludeOfflinedSplitParents); + new ClientMetaTableAccessor.CollectRegionLocationsVisitor(excludeOfflinedSplitParents); scanMeta(connection, ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION), ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION), QueryType.REGION, @@ -406,19 +406,19 @@ public static void fullScanMetaAndPrint(Connection connection) throws IOExceptio } public static void scanMetaForTableRegions(Connection connection, - ClientMetaTableAccessor.Visitor visitor, TableName tableName) throws IOException { + ClientMetaTableAccessor.Visitor visitor, TableName tableName) throws IOException { scanMeta(connection, tableName, QueryType.REGION, Integer.MAX_VALUE, visitor); } private static void scanMeta(Connection connection, TableName table, QueryType type, int maxRows, - final ClientMetaTableAccessor.Visitor visitor) throws IOException { + final ClientMetaTableAccessor.Visitor visitor) throws IOException { scanMeta(connection, ClientMetaTableAccessor.getTableStartRowForMeta(table, type), ClientMetaTableAccessor.getTableStopRowForMeta(table, type), type, maxRows, visitor); } public static void scanMeta(Connection connection, @Nullable final byte[] startRow, - @Nullable final byte[] stopRow, QueryType type, final ClientMetaTableAccessor.Visitor visitor) - throws IOException { + @Nullable final byte[] stopRow, QueryType type, final ClientMetaTableAccessor.Visitor visitor) + throws IOException { scanMeta(connection, startRow, stopRow, type, Integer.MAX_VALUE, visitor); } @@ -431,15 +431,15 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR * @param rowLimit max number of rows to return */ public static void scanMeta(Connection connection, final ClientMetaTableAccessor.Visitor visitor, - final TableName tableName, final byte[] row, final int rowLimit) throws IOException { + final TableName tableName, final byte[] row, final int rowLimit) throws IOException { byte[] startRow = null; byte[] stopRow = null; if (tableName != null) { startRow = ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION); if (row != null) { RegionInfo closestRi = getClosestRegionInfo(connection, tableName, row); - startRow = - RegionInfo.createRegionName(tableName, closestRi.getStartKey(), HConstants.ZEROES, false); + startRow = RegionInfo.createRegionName(tableName, closestRi.getStartKey(), + HConstants.ZEROES, false); } stopRow = ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION); } @@ -456,14 +456,14 @@ public static void scanMeta(Connection connection, final ClientMetaTableAccessor * @param visitor Visitor invoked against each row. */ public static void scanMeta(Connection connection, @Nullable final byte[] startRow, - @Nullable final byte[] stopRow, QueryType type, int maxRows, - final ClientMetaTableAccessor.Visitor visitor) throws IOException { + @Nullable final byte[] stopRow, QueryType type, int maxRows, + final ClientMetaTableAccessor.Visitor visitor) throws IOException { scanMeta(connection, startRow, stopRow, type, null, maxRows, visitor); } public static void scanMeta(Connection connection, @Nullable final byte[] startRow, - @Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows, - final ClientMetaTableAccessor.Visitor visitor) throws IOException { + @Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows, + final ClientMetaTableAccessor.Visitor visitor) throws IOException { int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE; Scan scan = getMetaScan(connection.getConfiguration(), rowUpperLimit); @@ -481,9 +481,9 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR } if (LOG.isTraceEnabled()) { - LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(startRow) + - " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit + - " with caching=" + scan.getCaching()); + LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(startRow) + + " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit + + " with caching=" + scan.getCaching()); } int currentRow = 0; @@ -519,7 +519,7 @@ public static void scanMeta(Connection connection, @Nullable final byte[] startR */ @NonNull private static RegionInfo getClosestRegionInfo(Connection connection, - @NonNull final TableName tableName, @NonNull final byte[] row) throws IOException { + @NonNull final TableName tableName, @NonNull final byte[] row) throws IOException { byte[] searchRow = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setReversed(true); @@ -527,13 +527,13 @@ private static RegionInfo getClosestRegionInfo(Connection connection, try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) { Result result = resultScanner.next(); if (result == null) { - throw new TableNotFoundException("Cannot find row in META " + " for table: " + tableName + - ", row=" + Bytes.toStringBinary(row)); + throw new TableNotFoundException("Cannot find row in META " + " for table: " + tableName + + ", row=" + Bytes.toStringBinary(row)); } RegionInfo regionInfo = CatalogFamilyFormat.getRegionInfo(result); if (regionInfo == null) { - throw new IOException("RegionInfo was null or empty in Meta for " + tableName + ", row=" + - Bytes.toStringBinary(row)); + throw new IOException("RegionInfo was null or empty in Meta for " + tableName + ", row=" + + Bytes.toStringBinary(row)); } return regionInfo; } @@ -617,7 +617,7 @@ public static Map getTableStates(Connection conn) throws * @param tableName table to look for */ public static void updateTableState(Connection conn, TableName tableName, TableState.State actual) - throws IOException { + throws IOException { updateTableState(conn, new TableState(tableName, actual)); } @@ -647,18 +647,18 @@ public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo, long ts) { * Adds split daughters to the Put */ public static Put addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB) - throws IOException { + throws IOException { if (splitA != null) { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITA_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitA)) - .build()); + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITA_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Type.Put) + .setValue(RegionInfo.toByteArray(splitA)).build()); } if (splitB != null) { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITB_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitB)) - .build()); + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITB_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Type.Put) + .setValue(RegionInfo.toByteArray(splitB)).build()); } return put; } @@ -689,7 +689,7 @@ private static void put(Table t, Put p) throws IOException { * @param ps Put to add to hbase:meta */ public static void putsToMetaTable(final Connection connection, final List ps) - throws IOException { + throws IOException { if (ps.isEmpty()) { return; } @@ -710,7 +710,7 @@ public static void putsToMetaTable(final Connection connection, final List * @param d Delete to add to hbase:meta */ private static void deleteFromMetaTable(final Connection connection, final Delete d) - throws IOException { + throws IOException { List dels = new ArrayList<>(1); dels.add(d); deleteFromMetaTable(connection, dels); @@ -722,7 +722,7 @@ private static void deleteFromMetaTable(final Connection connection, final Delet * @param deletes Deletes to add to hbase:meta This list should support #remove. */ private static void deleteFromMetaTable(final Connection connection, final List deletes) - throws IOException { + throws IOException { try (Table t = getMetaHTable(connection)) { debugLogMutations(deletes); t.delete(deletes); @@ -731,9 +731,9 @@ private static void deleteFromMetaTable(final Connection connection, final List< public static Put addRegionStateToPut(Put put, RegionState.State state) throws IOException { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.STATE_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(state.name())) - .build()); + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.STATE_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) + .setValue(Bytes.toBytes(state.name())).build()); return put; } @@ -741,7 +741,7 @@ public static Put addRegionStateToPut(Put put, RegionState.State state) throws I * Update state column in hbase:meta. */ public static void updateRegionState(Connection connection, RegionInfo ri, - RegionState.State state) throws IOException { + RegionState.State state) throws IOException { Put put = new Put(RegionReplicaUtil.getRegionInfoForDefaultReplica(ri).getRegionName()); putsToMetaTable(connection, Collections.singletonList(addRegionStateToPut(put, state))); } @@ -760,7 +760,7 @@ public static void updateRegionState(Connection connection, RegionInfo ri, * @throws IOException if problem connecting or updating meta */ public static void addSplitsToParent(Connection connection, RegionInfo regionInfo, - RegionInfo splitA, RegionInfo splitB) throws IOException { + RegionInfo splitA, RegionInfo splitB) throws IOException { try (Table meta = getMetaHTable(connection)) { Put put = makePutFromRegionInfo(regionInfo, EnvironmentEdgeManager.currentTime()); addDaughtersToPut(put, splitA, splitB); @@ -778,7 +778,7 @@ public static void addSplitsToParent(Connection connection, RegionInfo regionInf * @throws IOException if problem connecting or updating meta */ public static void addRegionsToMeta(Connection connection, List regionInfos, - int regionReplication) throws IOException { + int regionReplication) throws IOException { addRegionsToMeta(connection, regionInfos, regionReplication, EnvironmentEdgeManager.currentTime()); } @@ -792,7 +792,7 @@ public static void addRegionsToMeta(Connection connection, List regi * @throws IOException if problem connecting or updating meta */ public static void addRegionsToMeta(Connection connection, List regionInfos, - int regionReplication, long ts) throws IOException { + int regionReplication, long ts) throws IOException { List puts = new ArrayList<>(); for (RegionInfo regionInfo : regionInfos) { if (!RegionReplicaUtil.isDefaultReplica(regionInfo)) { @@ -860,7 +860,7 @@ public static void deleteTableState(Connection connection, TableName table) thro * @param masterSystemTime wall clock time from master if passed in the open region RPC */ public static void updateRegionLocation(Connection connection, RegionInfo regionInfo, - ServerName sn, long openSeqNum, long masterSystemTime) throws IOException { + ServerName sn, long openSeqNum, long masterSystemTime) throws IOException { updateLocation(connection, regionInfo, sn, openSeqNum, masterSystemTime); } @@ -878,7 +878,7 @@ public static void updateRegionLocation(Connection connection, RegionInfo region * is down on other end. */ private static void updateLocation(Connection connection, RegionInfo regionInfo, ServerName sn, - long openSeqNum, long masterSystemTime) throws IOException { + long openSeqNum, long masterSystemTime) throws IOException { // region replicas are kept in the primary region's row Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), masterSystemTime); addRegionInfo(put, regionInfo); @@ -889,47 +889,46 @@ private static void updateLocation(Connection connection, RegionInfo regionInfo, public static Put addRegionInfo(final Put p, final RegionInfo hri) throws IOException { p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow()) - .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.REGIONINFO_QUALIFIER) - .setTimestamp(p.getTimestamp()).setType(Type.Put) - // Serialize the Default Replica HRI otherwise scan of hbase:meta - // shows an info:regioninfo value with encoded name and region - // name that differs from that of the hbase;meta row. - .setValue(RegionInfo.toByteArray(RegionReplicaUtil.getRegionInfoForDefaultReplica(hri))) - .build()); + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.REGIONINFO_QUALIFIER) + .setTimestamp(p.getTimestamp()).setType(Type.Put) + // Serialize the Default Replica HRI otherwise scan of hbase:meta + // shows an info:regioninfo value with encoded name and region + // name that differs from that of the hbase;meta row. + .setValue(RegionInfo.toByteArray(RegionReplicaUtil.getRegionInfoForDefaultReplica(hri))) + .build()); return p; } public static Put addLocation(Put p, ServerName sn, long openSeqNum, int replicaId) - throws IOException { + throws IOException { CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - return p - .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) + return p.add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) .setQualifier(CatalogFamilyFormat.getServerColumn(replicaId)).setTimestamp(p.getTimestamp()) .setType(Cell.Type.Put).setValue(Bytes.toBytes(sn.getAddress().toString())).build()) - .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(CatalogFamilyFormat.getStartCodeColumn(replicaId)) - .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put) - .setValue(Bytes.toBytes(sn.getStartcode())).build()) - .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(CatalogFamilyFormat.getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()) - .setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)).build()); + .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) + .setQualifier(CatalogFamilyFormat.getStartCodeColumn(replicaId)) + .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put) + .setValue(Bytes.toBytes(sn.getStartcode())).build()) + .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) + .setQualifier(CatalogFamilyFormat.getSeqNumColumn(replicaId)) + .setTimestamp(p.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)) + .build()); } public static Put addEmptyLocation(Put p, int replicaId) throws IOException { CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); return p - .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(CatalogFamilyFormat.getServerColumn(replicaId)).setTimestamp(p.getTimestamp()) - .setType(Type.Put).build()) - .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(CatalogFamilyFormat.getStartCodeColumn(replicaId)) - .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).build()) - .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(CatalogFamilyFormat.getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put).build()); + .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) + .setQualifier(CatalogFamilyFormat.getServerColumn(replicaId)) + .setTimestamp(p.getTimestamp()).setType(Type.Put).build()) + .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) + .setQualifier(CatalogFamilyFormat.getStartCodeColumn(replicaId)) + .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).build()) + .add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY) + .setQualifier(CatalogFamilyFormat.getSeqNumColumn(replicaId)) + .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).build()); } - private static void debugLogMutations(List mutations) throws IOException { if (!METALOG.isDebugEnabled()) { return; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java index e57471a778f7..c72be9ea44ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -27,11 +26,10 @@ */ @InterfaceAudience.Private public interface RegionStateListener { -// TODO: Get rid of this!!!! Ain't there a better way to watch region -// state than introduce a whole new listening mechanism? St.Ack + // TODO: Get rid of this!!!! Ain't there a better way to watch region + // state than introduce a whole new listening mechanism? St.Ack /** * Process region split event. - * * @param hri An instance of RegionInfo * @throws IOException */ @@ -39,7 +37,6 @@ public interface RegionStateListener { /** * Process region split reverted event. - * * @param hri An instance of RegionInfo * @throws IOException Signals that an I/O exception has occurred. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index 31b8226cd4a9..b5f025f44a26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,9 +27,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Defines a curated set of shared functions implemented by HBase servers (Masters - * and RegionServers). For use internally only. Be judicious adding API. Changes cause ripples - * through the code base. + * Defines a curated set of shared functions implemented by HBase servers (Masters and + * RegionServers). For use internally only. Be judicious adding API. Changes cause ripples through + * the code base. */ @InterfaceAudience.Private public interface Server extends Abortable, Stoppable { @@ -44,10 +44,9 @@ public interface Server extends Abortable, Stoppable { ZKWatcher getZooKeeper(); /** - * Returns a reference to the servers' connection. - * - * Important note: this method returns a reference to Connection which is managed - * by Server itself, so callers must NOT attempt to close connection obtained. + * Returns a reference to the servers' connection. Important note: this method returns a reference + * to Connection which is managed by Server itself, so callers must NOT attempt to close + * connection obtained. */ default Connection getConnection() { return getAsyncConnection().toConnection(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java index dfe8780ee20f..de077f155c54 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java @@ -1,6 +1,4 @@ -package org.apache.hadoop.hbase; - -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,23 +15,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.hadoop.hbase; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ import java.lang.reflect.Field; import java.util.concurrent.atomic.LongAdder; - import org.apache.yetus.audience.InterfaceAudience; /** - * Counters kept by the distributed WAL split log process. - * Used by master and regionserver packages. + * Counters kept by the distributed WAL split log process. Used by master and regionserver packages. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @Deprecated @InterfaceAudience.Private public class SplitLogCounters { - private SplitLogCounters() {} + private SplitLogCounters() { + } - //Spnager counters + // Spnager counters public final static LongAdder tot_mgr_log_split_batch_start = new LongAdder(); public final static LongAdder tot_mgr_log_split_batch_success = new LongAdder(); public final static LongAdder tot_mgr_log_split_batch_err = new LongAdder(); @@ -92,7 +101,7 @@ public static void resetCounters() throws Exception { for (Field fld : cl.getDeclaredFields()) { /* Guard against source instrumentation. */ if ((!fld.isSynthetic()) && (LongAdder.class.isAssignableFrom(fld.getType()))) { - ((LongAdder)fld.get(null)).reset(); + ((LongAdder) fld.get(null)).reset(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java index ca07fcb1ee33..6dd989c73cac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +18,22 @@ package org.apache.hadoop.hbase; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.util.Bytes; /** - * State of a WAL log split during distributed splitting. State is kept up in zookeeper. - * Encapsulates protobuf serialization/deserialization so we don't leak generated pb outside of - * this class. Used by regionserver and master packages. - *

      Immutable + * State of a WAL log split during distributed splitting. State is kept up in zookeeper. + * Encapsulates protobuf serialization/deserialization so we don't leak generated pb outside of this + * class. Used by regionserver and master packages. + *

      + * Immutable * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @Deprecated @InterfaceAudience.Private @@ -132,7 +133,7 @@ public String toString() { @Override public boolean equals(Object obj) { if (!(obj instanceof SplitLogTask)) return false; - SplitLogTask other = (SplitLogTask)obj; + SplitLogTask other = (SplitLogTask) obj; return other.state.equals(this.state) && other.originServer.equals(this.originServer); } @@ -149,7 +150,7 @@ public int hashCode() { * @throws DeserializationException * @see #toByteArray() */ - public static SplitLogTask parseFrom(final byte [] data) throws DeserializationException { + public static SplitLogTask parseFrom(final byte[] data) throws DeserializationException { ProtobufUtil.expectPBMagicPrefix(data); try { int prefixLen = ProtobufUtil.lengthOfPBMagic(); @@ -165,13 +166,13 @@ public static SplitLogTask parseFrom(final byte [] data) throws DeserializationE * @return This instance serialized into a byte array * @see #parseFrom(byte[]) */ - public byte [] toByteArray() { - // First create a pb ServerName. Then create a ByteString w/ the TaskState - // bytes in it. Finally create a SplitLogTaskState passing in the two + public byte[] toByteArray() { + // First create a pb ServerName. Then create a ByteString w/ the TaskState + // bytes in it. Finally create a SplitLogTaskState passing in the two // pbs just created. HBaseProtos.ServerName snpb = ProtobufUtil.toServerName(this.originServer); ZooKeeperProtos.SplitLogTask slts = - ZooKeeperProtos.SplitLogTask.newBuilder().setServerName(snpb).setState(this.state).build(); + ZooKeeperProtos.SplitLogTask.newBuilder().setServerName(snpb).setState(this.state).build(); return ProtobufUtil.prependPBMagic(slts.toByteArray()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java index 5dffb73d3ed4..5b089d1f2921 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; @@ -32,8 +39,7 @@ public Socket createSocket(String host, int port) throws IOException { secureProtocols.add(p); } } - socket.setEnabledProtocols(secureProtocols.toArray( - new String[secureProtocols.size()])); + socket.setEnabledProtocols(secureProtocols.toArray(new String[secureProtocols.size()])); return socket; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java index 8a9223675a70..9e4a22cb84b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; @@ -38,9 +45,8 @@ public ServerSocket createServerSocket(int port) throws IOException { public Socket accept() throws IOException { Socket socket = super.accept(); SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); - SSLSocket sslSocket = - (SSLSocket) sslSocketFactory.createSocket(socket, - socket.getInetAddress().getHostName(), socket.getPort(), true); + SSLSocket sslSocket = (SSLSocket) sslSocketFactory.createSocket(socket, + socket.getInetAddress().getHostName(), socket.getPort(), true); sslSocket.setUseClientMode(false); sslSocket.setNeedClientAuth(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index f7e07045f4c5..895188c46367 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +19,8 @@ import java.io.IOException; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.yetus.audience.InterfaceAudience; /** * Get, remove and modify table descriptors. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java index 6ba719a4acb1..5dec53e27a32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java index b884669fe645..5b5df2a5b85e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.BufferedReader; @@ -33,22 +32,29 @@ import org.slf4j.LoggerFactory; /** - *

      Contains a set of methods for the collaboration between the start/stop scripts and the - * servers. It allows to delete immediately the znode when the master or the regions server crashes. - * The region server / master writes a specific file when it starts / becomes main master. When they - * end properly, they delete the file.

      - *

      In the script, we check for the existence of these files when the program ends. If they still + *

      + * Contains a set of methods for the collaboration between the start/stop scripts and the servers. + * It allows to delete immediately the znode when the master or the regions server crashes. The + * region server / master writes a specific file when it starts / becomes main master. When they end + * properly, they delete the file. + *

      + *

      + * In the script, we check for the existence of these files when the program ends. If they still * exist we conclude that the server crashed, likely without deleting their znode. To have a faster - * recovery we delete immediately the znode.

      - *

      The strategy depends on the server type. For a region server we store the znode path in the - * file, and use it to delete it. for a master, as the znode path constant whatever the server, we - * check its content to make sure that the backup server is not now in charge.

      + * recovery we delete immediately the znode. + *

      + *

      + * The strategy depends on the server type. For a region server we store the znode path in the file, + * and use it to delete it. for a master, as the znode path constant whatever the server, we check + * its content to make sure that the backup server is not now in charge. + *

      */ @InterfaceAudience.Private public final class ZNodeClearer { private static final Logger LOG = LoggerFactory.getLogger(ZNodeClearer.class); - private ZNodeClearer() {} + private ZNodeClearer() { + } /** * Logs the errors without failing on exception. @@ -56,8 +62,8 @@ private ZNodeClearer() {} public static void writeMyEphemeralNodeOnDisk(String fileContent) { String fileName = ZNodeClearer.getMyEphemeralNodeFileName(); if (fileName == null) { - LOG.warn("Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared " + - "on crash by start scripts (Longer MTTR!)"); + LOG.warn("Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared " + + "on crash by start scripts (Longer MTTR!)"); return; } @@ -65,7 +71,7 @@ public static void writeMyEphemeralNodeOnDisk(String fileContent) { try { fstream = new FileWriter(fileName); } catch (IOException e) { - LOG.warn("Can't write znode file "+fileName, e); + LOG.warn("Can't write znode file " + fileName, e); return; } @@ -82,7 +88,7 @@ public static void writeMyEphemeralNodeOnDisk(String fileContent) { } } } catch (IOException e) { - LOG.warn("Can't write znode file "+fileName, e); + LOG.warn("Can't write znode file " + fileName, e); } } @@ -91,7 +97,7 @@ public static void writeMyEphemeralNodeOnDisk(String fileContent) { */ public static String readMyEphemeralNodeOnDisk() throws IOException { String fileName = getMyEphemeralNodeFileName(); - if (fileName == null){ + if (fileName == null) { throw new FileNotFoundException("No filename; set environment variable HBASE_ZNODE_FILE"); } FileReader znodeFile = new FileReader(fileName); @@ -113,7 +119,7 @@ public static String getMyEphemeralNodeFileName() { } /** - * delete the znode file + * delete the znode file */ public static void deleteMyEphemeralNodeOnDisk() { String fileName = getMyEphemeralNodeFileName(); @@ -124,8 +130,8 @@ public static void deleteMyEphemeralNodeOnDisk() { } /** - * See HBASE-14861. We are extracting master ServerName from rsZnodePath - * example: "/hbase/rs/server.example.com,16020,1448266496481" + * See HBASE-14861. We are extracting master ServerName from rsZnodePath example: + * "/hbase/rs/server.example.com,16020,1448266496481" * @param rsZnodePath from HBASE_ZNODE_FILE * @return String representation of ServerName or null if fails */ @@ -134,7 +140,7 @@ public static String parseMasterServerName(String rsZnodePath) { String masterServerName = null; try { String[] rsZnodeParts = rsZnodePath.split("/"); - masterServerName = rsZnodeParts[rsZnodeParts.length -1]; + masterServerName = rsZnodeParts[rsZnodeParts.length - 1]; } catch (IndexOutOfBoundsException e) { LOG.warn("String " + rsZnodePath + " has wrong format", e); } @@ -142,9 +148,9 @@ public static String parseMasterServerName(String rsZnodePath) { } /** - * Delete the master znode if its content (ServerName string) is the same - * as the one in the znode file. (env: HBASE_ZNODE_FILE). I case of master-rs - * colloaction we extract ServerName string from rsZnode path.(HBASE-14861) + * Delete the master znode if its content (ServerName string) is the same as the one in the znode + * file. (env: HBASE_ZNODE_FILE). I case of master-rs colloaction we extract ServerName string + * from rsZnode path.(HBASE-14861) * @return true on successful deletion, false otherwise. */ public static boolean clear(Configuration conf) { @@ -153,11 +159,16 @@ public static boolean clear(Configuration conf) { ZKWatcher zkw; try { - zkw = new ZKWatcher(tempConf, "clean znode for master", - new Abortable() { - @Override public void abort(String why, Throwable e) {} - @Override public boolean isAborted() { return false; } - }); + zkw = new ZKWatcher(tempConf, "clean znode for master", new Abortable() { + @Override + public void abort(String why, Throwable e) { + } + + @Override + public boolean isAborted() { + return false; + } + }); } catch (IOException e) { LOG.warn("Can't connect to zookeeper to read the master znode", e); return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java index fa081948f3f8..51aeabb7b457 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/FailedArchiveException.java @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.backup; -import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; - import java.io.IOException; import java.util.Collection; +import org.apache.hadoop.fs.Path; +import org.apache.yetus.audience.InterfaceAudience; /** * Exception indicating that some files in the requested set could not be archived. @@ -42,9 +40,6 @@ public Collection getFailedFiles() { @Override public String getMessage() { - return new StringBuilder(super.getMessage()) - .append("; files=") - .append(failedFiles) - .toString(); + return new StringBuilder(super.getMessage()).append("; files=").append(failedFiles).toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index 6400976bf43e..19ac29896625 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,13 +67,12 @@ public class HFileArchiver { /** Number of retries in case of fs operation failure */ private static final int DEFAULT_RETRIES_NUMBER = 3; - private static final Function FUNC_FILE_TO_PATH = - new Function() { - @Override - public Path apply(File file) { - return file == null ? null : file.getPath(); - } - }; + private static final Function FUNC_FILE_TO_PATH = new Function() { + @Override + public Path apply(File file) { + return file == null ? null : file.getPath(); + } + }; private static ThreadPoolExecutor archiveExecutor; @@ -159,13 +158,13 @@ public boolean accept(Path file) { // convert the files in the region to a File Stream.of(storeDirs).map(getAsFile).forEachOrdered(toArchive::add); LOG.debug("Archiving " + toArchive); - List failedArchive = resolveAndArchive(fs, regionArchiveDir, toArchive, - EnvironmentEdgeManager.currentTime()); + List failedArchive = + resolveAndArchive(fs, regionArchiveDir, toArchive, EnvironmentEdgeManager.currentTime()); if (!failedArchive.isEmpty()) { throw new FailedArchiveException( - "Failed to archive/delete all the files for region:" + regionDir.getName() + " into " + - regionArchiveDir + ". Something is probably awry on the filesystem.", - failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); + "Failed to archive/delete all the files for region:" + regionDir.getName() + " into " + + regionArchiveDir + ". Something is probably awry on the filesystem.", + failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } // if that was successful, then we delete the region return deleteRegionWithoutArchiving(fs, regionDir); @@ -176,17 +175,16 @@ public boolean accept(Path file) { * @param conf the configuration to use * @param fs {@link FileSystem} from which to remove the region * @param rootDir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) - * @param tableDir {@link Path} to where the table is being stored (for building the archive - * path) + * the archive path) + * @param tableDir {@link Path} to where the table is being stored (for building the archive path) * @param regionDirList {@link Path} to where regions are being stored (for building the archive - * path) + * path) * @throws IOException if the request cannot be completed */ public static void archiveRegions(Configuration conf, FileSystem fs, Path rootDir, Path tableDir, - List regionDirList) throws IOException { + List regionDirList) throws IOException { List> futures = new ArrayList<>(regionDirList.size()); - for (Path regionDir: regionDirList) { + for (Path regionDir : regionDirList) { Future future = getArchiveExecutor(conf).submit(() -> { archiveRegion(fs, rootDir, tableDir, regionDir); return null; @@ -194,7 +192,7 @@ public static void archiveRegions(Configuration conf, FileSystem fs, Path rootDi futures.add(future); } try { - for (Future future: futures) { + for (Future future : futures) { future.get(); } } catch (InterruptedException e) { @@ -207,8 +205,8 @@ public static void archiveRegions(Configuration conf, FileSystem fs, Path rootDi private static synchronized ThreadPoolExecutor getArchiveExecutor(final Configuration conf) { if (archiveExecutor == null) { int maxThreads = conf.getInt("hbase.hfilearchiver.thread.pool.max", 8); - archiveExecutor = Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, - getThreadFactory()); + archiveExecutor = + Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, getThreadFactory()); // Shutdown this ThreadPool in a shutdown hook Runtime.getRuntime().addShutdownHook(new Thread(() -> archiveExecutor.shutdown())); @@ -235,8 +233,8 @@ public Thread newThread(Runnable r) { } /** - * Remove from the specified region the store files of the specified column family, - * either by archiving them or outright deletion + * Remove from the specified region the store files of the specified column family, either by + * archiving them or outright deletion * @param fs the filesystem where the store files live * @param conf {@link Configuration} to examine to determine the archive directory * @param parent Parent region hosting the store files @@ -244,15 +242,15 @@ public Thread newThread(Runnable r) { * @param family the family hosting the store files * @throws IOException if the files could not be correctly disposed. */ - public static void archiveFamily(FileSystem fs, Configuration conf, - RegionInfo parent, Path tableDir, byte[] family) throws IOException { + public static void archiveFamily(FileSystem fs, Configuration conf, RegionInfo parent, + Path tableDir, byte[] family) throws IOException { Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family))); archiveFamilyByFamilyDir(fs, conf, parent, familyDir, family); } /** - * Removes from the specified region the store files of the specified column family, - * either by archiving them or outright deletion + * Removes from the specified region the store files of the specified column family, either by + * archiving them or outright deletion * @param fs the filesystem where the store files live * @param conf {@link Configuration} to examine to determine the archive directory * @param parent Parent region hosting the store files @@ -260,12 +258,12 @@ public static void archiveFamily(FileSystem fs, Configuration conf, * @param family the family hosting the store files * @throws IOException if the files could not be correctly disposed. */ - public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, - RegionInfo parent, Path familyDir, byte[] family) throws IOException { + public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, RegionInfo parent, + Path familyDir, byte[] family) throws IOException { FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, familyDir); if (storeFiles == null) { LOG.debug("No files to dispose of in {}, family={}", parent.getRegionNameAsString(), - Bytes.toString(family)); + Bytes.toString(family)); return; } @@ -274,12 +272,13 @@ public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family); // do the actual archive - List failedArchive = resolveAndArchive(fs, storeArchiveDir, toArchive, - EnvironmentEdgeManager.currentTime()); - if (!failedArchive.isEmpty()){ - throw new FailedArchiveException("Failed to archive/delete all the files for region:" - + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family) - + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", + List failedArchive = + resolveAndArchive(fs, storeArchiveDir, toArchive, EnvironmentEdgeManager.currentTime()); + if (!failedArchive.isEmpty()) { + throw new FailedArchiveException( + "Failed to archive/delete all the files for region:" + + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family) + + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } } @@ -295,8 +294,7 @@ public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, * @throws IOException if the files could not be correctly disposed. */ public static void archiveStoreFiles(Configuration conf, FileSystem fs, RegionInfo regionInfo, - Path tableDir, byte[] family, Collection compactedFiles) - throws IOException { + Path tableDir, byte[] family, Collection compactedFiles) throws IOException { Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family); archive(fs, regionInfo, family, compactedFiles, storeArchiveDir); } @@ -304,10 +302,10 @@ public static void archiveStoreFiles(Configuration conf, FileSystem fs, RegionIn /** * Archive recovered edits using existing logic for archiving store files. This is currently only * relevant when hbase.region.archive.recovered.edits is true, as recovered edits shouldn't - * be kept after replay. In theory, we could use very same method available for archiving - * store files, but supporting WAL dir and store files on different FileSystems added the need for - * extra validation of the passed FileSystem instance and the path where the archiving edits - * should be placed. + * be kept after replay. In theory, we could use very same method available for archiving store + * files, but supporting WAL dir and store files on different FileSystems added the need for extra + * validation of the passed FileSystem instance and the path where the archiving edits should be + * placed. * @param conf {@link Configuration} to determine the archive directory. * @param fs the filesystem used for storing WAL files. * @param regionInfo {@link RegionInfo} a pseudo region representation for the archiving logic. @@ -316,29 +314,29 @@ public static void archiveStoreFiles(Configuration conf, FileSystem fs, RegionIn * @throws IOException if files can't be achived due to some internal error. */ public static void archiveRecoveredEdits(Configuration conf, FileSystem fs, RegionInfo regionInfo, - byte[] family, Collection replayedEdits) - throws IOException { + byte[] family, Collection replayedEdits) throws IOException { String workingDir = conf.get(CommonFSUtils.HBASE_WAL_DIR, conf.get(HConstants.HBASE_DIR)); - //extra sanity checks for the right FS + // extra sanity checks for the right FS Path path = new Path(workingDir); - if(path.isAbsoluteAndSchemeAuthorityNull()){ - //no schema specified on wal dir value, so it's on same FS as StoreFiles + if (path.isAbsoluteAndSchemeAuthorityNull()) { + // no schema specified on wal dir value, so it's on same FS as StoreFiles path = new Path(conf.get(HConstants.HBASE_DIR)); } - if(path.toUri().getScheme()!=null && !path.toUri().getScheme().equals(fs.getScheme())){ - throw new IOException("Wrong file system! Should be " + path.toUri().getScheme() + - ", but got " + fs.getScheme()); + if (path.toUri().getScheme() != null && !path.toUri().getScheme().equals(fs.getScheme())) { + throw new IOException("Wrong file system! Should be " + path.toUri().getScheme() + + ", but got " + fs.getScheme()); } path = HFileArchiveUtil.getStoreArchivePathForRootDir(path, regionInfo, family); archive(fs, regionInfo, family, replayedEdits, path); } private static void archive(FileSystem fs, RegionInfo regionInfo, byte[] family, - Collection compactedFiles, Path storeArchiveDir) throws IOException { + Collection compactedFiles, Path storeArchiveDir) throws IOException { // sometimes in testing, we don't have rss, so we need to check for that if (fs == null) { - LOG.warn("Passed filesystem is null, so just deleting files without archiving for {}," + - "family={}", Bytes.toString(regionInfo.getRegionName()), Bytes.toString(family)); + LOG.warn( + "Passed filesystem is null, so just deleting files without archiving for {}," + "family={}", + Bytes.toString(regionInfo.getRegionName()), Bytes.toString(family)); deleteStoreFilesWithoutArchiving(compactedFiles); return; } @@ -350,8 +348,8 @@ private static void archive(FileSystem fs, RegionInfo regionInfo, byte[] family, } // build the archive path - if (regionInfo == null || family == null) throw new IOException( - "Need to have a region and a family to archive from."); + if (regionInfo == null || family == null) + throw new IOException("Need to have a region and a family to archive from."); // make sure we don't archive if we can't and that the archive dir exists if (!fs.mkdirs(storeArchiveDir)) { throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:" @@ -364,16 +362,17 @@ private static void archive(FileSystem fs, RegionInfo regionInfo, byte[] family, // Wrap the storefile into a File StoreToFile getStorePath = new StoreToFile(fs); Collection storeFiles = - compactedFiles.stream().map(getStorePath).collect(Collectors.toList()); + compactedFiles.stream().map(getStorePath).collect(Collectors.toList()); // do the actual archive List failedArchive = - resolveAndArchive(fs, storeArchiveDir, storeFiles, EnvironmentEdgeManager.currentTime()); + resolveAndArchive(fs, storeArchiveDir, storeFiles, EnvironmentEdgeManager.currentTime()); - if (!failedArchive.isEmpty()){ - throw new FailedArchiveException("Failed to archive/delete all the files for region:" - + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family) - + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", + if (!failedArchive.isEmpty()) { + throw new FailedArchiveException( + "Failed to archive/delete all the files for region:" + + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family) + + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } } @@ -402,22 +401,20 @@ public static void archiveStoreFile(Configuration conf, FileSystem fs, RegionInf File file = new FileablePath(fs, storeFile); if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) { throw new IOException("Failed to archive/delete the file for region:" - + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family) - + " into " + storeArchiveDir + ". Something is probably awry on the filesystem."); + + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family) + " into " + + storeArchiveDir + ". Something is probably awry on the filesystem."); } } /** - * Resolve any conflict with an existing archive file via timestamp-append - * renaming of the existing file and then archive the passed in files. + * Resolve any conflict with an existing archive file via timestamp-append renaming of the + * existing file and then archive the passed in files. * @param fs {@link FileSystem} on which to archive the files - * @param baseArchiveDir base archive directory to store the files. If any of - * the files to archive are directories, will append the name of the - * directory to the base archive directory name, creating a parallel - * structure. + * @param baseArchiveDir base archive directory to store the files. If any of the files to archive + * are directories, will append the name of the directory to the base archive directory + * name, creating a parallel structure. * @param toArchive files/directories that need to be archvied - * @param start time the archiving started - used for resolving archive - * conflicts. + * @param start time the archiving started - used for resolving archive conflicts. * @return the list of failed to archive files. * @throws IOException if an unexpected file operation exception occurred */ @@ -514,15 +511,17 @@ private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile, long curMtime = curStatus.getModificationTime(); long archiveMtime = archiveStatus.getModificationTime(); if (curLen != archiveLen) { - LOG.error("{} already exists in archive with different size than current {}." - + " archiveLen: {} currentLen: {} archiveMtime: {} currentMtime: {}", + LOG.error( + "{} already exists in archive with different size than current {}." + + " archiveLen: {} currentLen: {} archiveMtime: {} currentMtime: {}", archiveFile, currentFile, archiveLen, curLen, archiveMtime, curMtime); - throw new IOException(archiveFile + " already exists in archive with different size" + - " than " + currentFile); + throw new IOException(archiveFile + " already exists in archive with different size" + + " than " + currentFile); } - LOG.error("{} already exists in archive, moving to timestamped backup and overwriting" - + " current {}. archiveLen: {} currentLen: {} archiveMtime: {} currentMtime: {}", + LOG.error( + "{} already exists in archive, moving to timestamped backup and overwriting" + + " current {}. archiveLen: {} currentLen: {} archiveMtime: {} currentMtime: {}", archiveFile, currentFile, archiveLen, curLen, archiveMtime, curMtime); // move the archive file to the stamped backup @@ -565,8 +564,9 @@ private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile, try { success = currentFile.moveAndClose(archiveFile); } catch (FileNotFoundException fnfe) { - LOG.warn("Failed to archive " + currentFile + - " because it does not exist! Skipping and continuing on.", fnfe); + LOG.warn("Failed to archive " + currentFile + + " because it does not exist! Skipping and continuing on.", + fnfe); success = true; } catch (IOException e) { LOG.warn("Failed to archive " + currentFile + " on try #" + i, e); @@ -654,8 +654,7 @@ public File apply(FileStatus input) { } /** - * Convert the {@link HStoreFile} into something we can manage in the archive - * methods + * Convert the {@link HStoreFile} into something we can manage in the archive methods */ private static class StoreToFile extends FileConverter { public StoreToFile(FileSystem fs) { @@ -692,8 +691,8 @@ public File(FileSystem fs) { abstract boolean isFile() throws IOException; /** - * @return if this is a directory, returns all the children in the - * directory, otherwise returns an empty list + * @return if this is a directory, returns all the children in the directory, otherwise returns + * an empty list * @throws IOException */ abstract Collection getChildren() throws IOException; @@ -705,8 +704,7 @@ public File(FileSystem fs) { abstract void close() throws IOException; /** - * @return the name of the file (not the full fs path, just the individual - * file name) + * @return the name of the file (not the full fs path, just the individual file name) */ abstract String getName(); @@ -788,8 +786,7 @@ Path getPath() { } /** - * {@link File} adapter for a {@link HStoreFile} living on a {@link FileSystem} - * . + * {@link File} adapter for a {@link HStoreFile} living on a {@link FileSystem} . */ private static class FileableStoreFile extends File { HStoreFile file; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java index 9978f4a67d80..ab2b00702de3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Connection; @@ -47,10 +46,10 @@ class HFileArchiveManager { public HFileArchiveManager(Connection connection, Configuration conf) throws ZooKeeperConnectionException, IOException { - this.zooKeeper = new ZKWatcher(conf, "hfileArchiveManager-on-" + connection.toString(), - connection); - this.archiveZnode = ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(), - this.zooKeeper); + this.zooKeeper = + new ZKWatcher(conf, "hfileArchiveManager-on-" + connection.toString(), connection); + this.archiveZnode = + ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(), this.zooKeeper); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java index a4daaf011391..80cdc7587a15 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +36,7 @@ public class HFileArchiveTableMonitor { private final Set archivedTables = new TreeSet<>(); /** - * Set the tables to be archived. Internally adds each table and attempts to - * register it. + * Set the tables to be archived. Internally adds each table and attempts to register it. *

      * Note: All previous tables will be removed in favor of these tables. * @param tables add each of the tables to be archived. @@ -48,8 +47,7 @@ public synchronized void setArchiveTables(List tables) { } /** - * Add the named table to be those being archived. Attempts to register the - * table + * Add the named table to be those being archived. Attempts to register the table * @param table name of the table to be registered */ public synchronized void addTable(String table) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java index 946f7593d43f..262cd8d54d7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,10 +35,9 @@ * currently being archived. *

      * This only works properly if the - * {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner} - * is also enabled (it always should be), since it may take a little time - * for the ZK notification to propagate, in which case we may accidentally - * delete some files. + * {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner} is also enabled (it always + * should be), since it may take a little time for the ZK notification to propagate, in which case + * we may accidentally delete some files. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { @@ -55,7 +54,7 @@ public boolean isFileDeletable(FileStatus fStat) { if (fStat.isDirectory()) { return true; } - + Path file = fStat.getPath(); // check to see if FileStatus[] deleteStatus = CommonFSUtils.listStatus(this.fs, file, null); @@ -72,8 +71,8 @@ public boolean isFileDeletable(FileStatus fStat) { String tableName = table.getName(); boolean ret = !archiveTracker.keepHFiles(tableName); - LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + - tableName); + LOG.debug( + "Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName); return ret; } catch (IOException e) { LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java index 49b0e827758b..cbbfe3c62380 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.zookeeper.ZKListener; @@ -49,8 +48,8 @@ private TableHFileArchiveTracker(ZKWatcher watcher, HFileArchiveTableMonitor mon super(watcher); watcher.registerListener(this); this.monitor = monitor; - this.archiveHFileZNode = ZKTableArchiveClient.getArchiveZNode(watcher.getConfiguration(), - watcher); + this.archiveHFileZNode = + ZKTableArchiveClient.getArchiveZNode(watcher.getConfiguration(), watcher); } /** @@ -84,8 +83,8 @@ public void nodeCreated(String path) { try { addAndReWatchTable(path); } catch (KeeperException e) { - LOG.warn("Couldn't read zookeeper data for table for path:" + path - + ", not preserving a table.", e); + LOG.warn( + "Couldn't read zookeeper data for table for path:" + path + ", not preserving a table.", e); } } @@ -252,8 +251,7 @@ public static TableHFileArchiveTracker create(Configuration conf) * @return ZooKeeper tracker to monitor for this server if this server should archive hfiles for a * given table */ - private static TableHFileArchiveTracker create(ZKWatcher zkw, - HFileArchiveTableMonitor monitor) { + private static TableHFileArchiveTracker create(ZKWatcher zkw, HFileArchiveTableMonitor monitor) { return new TableHFileArchiveTracker(zkw, monitor); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java index 59c7537c84a0..f6b617a15469 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.client.Connection; @@ -138,8 +137,8 @@ public boolean getArchivingEnabled(String table) throws IOException, KeeperExcep * @throws KeeperException if we can't reach zookeeper * @throws IOException if an unexpected network issue occurs */ - private synchronized HFileArchiveManager createHFileArchiveManager() throws KeeperException, - IOException { + private synchronized HFileArchiveManager createHFileArchiveManager() + throws KeeperException, IOException { return new HFileArchiveManager(this.connection, this.getConf()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java index 11c4f4f359cd..ebf5cdc17380 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -85,8 +85,8 @@ CompletableFuture getRegionLocations(TableName tableName, byte[ * @param replicate flags if the bulkload is targeted for replication. */ CompletableFuture bulkLoad(TableName tableName, List> familyPaths, - byte[] row, boolean assignSeqNum, Token userToken, String bulkToken, boolean copyFiles, - List clusterIds, boolean replicate); + byte[] row, boolean assignSeqNum, Token userToken, String bulkToken, boolean copyFiles, + List clusterIds, boolean replicate); /** * Clean up after finishing bulk load, no matter success or not. @@ -97,7 +97,7 @@ CompletableFuture bulkLoad(TableName tableName, List> getLiveRegionServers(MasterAddressTracker masterAddrTracker, - int count); + int count); /** * Get the bootstrap node list of another region server. @@ -108,5 +108,5 @@ CompletableFuture> getLiveRegionServers(MasterAddressTracker ma * Replicate wal edits to a secondary replica. */ CompletableFuture replicate(RegionInfo replica, List entries, int numRetries, - long rpcTimeoutNs, long operationTimeoutNs); + long rpcTimeoutNs, long operationTimeoutNs); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java index 789d6162988f..ca64b2d11c6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -93,51 +93,52 @@ public CompletableFuture getRegionLocations(TableName tableName @Override public CompletableFuture prepareBulkLoad(TableName tableName) { return callerFactory. single().table(tableName).row(HConstants.EMPTY_START_ROW) - .action((controller, loc, stub) -> ConnectionUtils - . call(controller, loc, - stub, tableName, (rn, tn) -> { - RegionSpecifier region = - RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, rn); - return PrepareBulkLoadRequest.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tn)).setRegion(region).build(); - }, (s, c, req, done) -> s.prepareBulkLoad(c, req, done), - (c, resp) -> resp.getBulkToken())) - .call(); + .action((controller, loc, stub) -> ConnectionUtils + . call(controller, + loc, stub, tableName, (rn, tn) -> { + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, rn); + return PrepareBulkLoadRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tn)).setRegion(region).build(); + }, (s, c, req, done) -> s.prepareBulkLoad(c, req, done), + (c, resp) -> resp.getBulkToken())) + .call(); } @Override public CompletableFuture bulkLoad(TableName tableName, - List> familyPaths, byte[] row, boolean assignSeqNum, Token userToken, - String bulkToken, boolean copyFiles, List clusterIds, boolean replicate) { + List> familyPaths, byte[] row, boolean assignSeqNum, Token userToken, + String bulkToken, boolean copyFiles, List clusterIds, boolean replicate) { return callerFactory. single().table(tableName).row(row) - .action((controller, loc, stub) -> ConnectionUtils - . call(controller, loc, stub, - null, - (rn, nil) -> RequestConverter.buildBulkLoadHFileRequest(familyPaths, rn, assignSeqNum, - userToken, bulkToken, copyFiles, clusterIds, replicate), - (s, c, req, done) -> s.bulkLoadHFile(c, req, done), (c, resp) -> resp.getLoaded())) - .call(); + .action((controller, loc, stub) -> ConnectionUtils + . call(controller, loc, + stub, null, + (rn, nil) -> RequestConverter.buildBulkLoadHFileRequest(familyPaths, rn, assignSeqNum, + userToken, bulkToken, copyFiles, clusterIds, replicate), + (s, c, req, done) -> s.bulkLoadHFile(c, req, done), (c, resp) -> resp.getLoaded())) + .call(); } @Override public CompletableFuture cleanupBulkLoad(TableName tableName, String bulkToken) { return callerFactory. single().table(tableName).row(HConstants.EMPTY_START_ROW) - .action((controller, loc, stub) -> ConnectionUtils - . call(controller, loc, stub, - bulkToken, (rn, bt) -> { - RegionSpecifier region = - RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, rn); - return CleanupBulkLoadRequest.newBuilder().setRegion(region).setBulkToken(bt).build(); - }, (s, c, req, done) -> s.cleanupBulkLoad(c, req, done), (c, resp) -> null)) - .call(); + .action((controller, loc, stub) -> ConnectionUtils + . call(controller, loc, + stub, bulkToken, (rn, bt) -> { + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, rn); + return CleanupBulkLoadRequest.newBuilder().setRegion(region).setBulkToken(bt) + .build(); + }, (s, c, req, done) -> s.cleanupBulkLoad(c, req, done), (c, resp) -> null)) + .call(); } @Override public CompletableFuture> - getLiveRegionServers(MasterAddressTracker masterAddrTracker, int count) { + getLiveRegionServers(MasterAddressTracker masterAddrTracker, int count) { CompletableFuture> future = new CompletableFuture<>(); - RegionServerStatusService.Interface stub = RegionServerStatusService - .newStub(rpcClient.createRpcChannel(masterAddrTracker.getMasterAddress(), user, rpcTimeout)); + RegionServerStatusService.Interface stub = RegionServerStatusService.newStub( + rpcClient.createRpcChannel(masterAddrTracker.getMasterAddress(), user, rpcTimeout)); HBaseRpcController controller = rpcControllerFactory.newController(); stub.getLiveRegionServers(controller, GetLiveRegionServersRequest.newBuilder().setCount(count).build(), resp -> { @@ -145,7 +146,7 @@ public CompletableFuture cleanupBulkLoad(TableName tableName, String bulkT future.completeExceptionally(controller.getFailed()); } else { future.complete(resp.getServerList().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList())); + .collect(Collectors.toList())); } }); return future; @@ -155,7 +156,7 @@ public CompletableFuture cleanupBulkLoad(TableName tableName, String bulkT public CompletableFuture> getAllBootstrapNodes(ServerName regionServer) { CompletableFuture> future = new CompletableFuture<>(); BootstrapNodeService.Interface stub = - BootstrapNodeService.newStub(rpcClient.createRpcChannel(regionServer, user, rpcTimeout)); + BootstrapNodeService.newStub(rpcClient.createRpcChannel(regionServer, user, rpcTimeout)); HBaseRpcController controller = rpcControllerFactory.newController(); stub.getAllBootstrapNodes(controller, GetAllBootstrapNodesRequest.getDefaultInstance(), resp -> { @@ -163,18 +164,17 @@ public CompletableFuture> getAllBootstrapNodes(ServerName regio future.completeExceptionally(controller.getFailed()); } else { future.complete(resp.getNodeList().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList())); + .collect(Collectors.toList())); } }); return future; } @Override - public CompletableFuture replicate(RegionInfo replica, - List entries, int retries, long rpcTimeoutNs, - long operationTimeoutNs) { + public CompletableFuture replicate(RegionInfo replica, List entries, int retries, + long rpcTimeoutNs, long operationTimeoutNs) { return new AsyncRegionReplicationRetryingCaller(RETRY_TIMER, this, - ConnectionUtils.retries2Attempts(retries), rpcTimeoutNs, operationTimeoutNs, replica, entries) - .call(); + ConnectionUtils.retries2Attempts(retries), rpcTimeoutNs, operationTimeoutNs, replica, + entries).call(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java index 726559fc28c0..89617e398a6b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionReplicationRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,19 +50,19 @@ public class AsyncRegionReplicationRetryingCaller extends AsyncRpcRetryingCaller private boolean useReplay; public AsyncRegionReplicationRetryingCaller(HashedWheelTimer retryTimer, - AsyncClusterConnectionImpl conn, int maxAttempts, long rpcTimeoutNs, long operationTimeoutNs, - RegionInfo replica, List entries) { + AsyncClusterConnectionImpl conn, int maxAttempts, long rpcTimeoutNs, long operationTimeoutNs, + RegionInfo replica, List entries) { super(retryTimer, conn, ConnectionUtils.getPriority(replica.getTable()), - conn.connConf.getPauseNs(), conn.connConf.getPauseForCQTBENs(), maxAttempts, - operationTimeoutNs, rpcTimeoutNs, conn.connConf.getStartLogErrorsCnt()); + conn.connConf.getPauseNs(), conn.connConf.getPauseForCQTBENs(), maxAttempts, + operationTimeoutNs, rpcTimeoutNs, conn.connConf.getStartLogErrorsCnt()); this.replica = replica; this.entries = entries.toArray(new Entry[0]); } @Override protected Throwable preProcessError(Throwable error) { - if (error instanceof DoNotRetryIOException && - error.getCause() instanceof UnsupportedOperationException) { + if (error instanceof DoNotRetryIOException + && error.getCause() instanceof UnsupportedOperationException) { // fallback to use replay, and also return the cause to let the upper retry useReplay = true; return error.getCause(); @@ -91,7 +91,7 @@ private void call(HRegionLocation loc) { return; } Pair pair = ReplicationProtobufUtil - .buildReplicateWALEntryRequest(entries, replica.getEncodedNameAsBytes(), null, null, null); + .buildReplicateWALEntryRequest(entries, replica.getEncodedNameAsBytes(), null, null, null); resetCallTimeout(); controller.setCellScanner(pair.getSecond()); if (useReplay) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java index 8ff869fcdb94..a0813b253553 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -128,8 +128,8 @@ public CompletableFuture getStoreFile(GetStoreFileRequest return call((stub, controller, done) -> stub.getStoreFile(controller, request, done)); } - public CompletableFuture getOnlineRegion( - GetOnlineRegionRequest request) { + public CompletableFuture + getOnlineRegion(GetOnlineRegionRequest request) { return call((stub, controller, done) -> stub.getOnlineRegion(controller, request, done)); } @@ -149,8 +149,8 @@ public CompletableFuture flushRegion(FlushRegionRequest req return call((stub, controller, done) -> stub.flushRegion(controller, request, done)); } - public CompletableFuture compactionSwitch( - CompactionSwitchRequest request) { + public CompletableFuture + compactionSwitch(CompactionSwitchRequest request) { return call((stub, controller, done) -> stub.compactionSwitch(controller, request, done)); } @@ -158,8 +158,8 @@ public CompletableFuture compactRegion(CompactRegionReque return call((stub, controller, done) -> stub.compactRegion(controller, request, done)); } - public CompletableFuture replicateWALEntry( - ReplicateWALEntryRequest request, CellScanner cellScanner, int timeout) { + public CompletableFuture + replicateWALEntry(ReplicateWALEntryRequest request, CellScanner cellScanner, int timeout) { return call((stub, controller, done) -> { controller.setCallTimeout(timeout); stub.replicateWALEntry(controller, request, done); @@ -183,13 +183,13 @@ public CompletableFuture stopServer(StopServerRequest reques return call((stub, controller, done) -> stub.stopServer(controller, request, done)); } - public CompletableFuture updateFavoredNodes( - UpdateFavoredNodesRequest request) { + public CompletableFuture + updateFavoredNodes(UpdateFavoredNodesRequest request) { return call((stub, controller, done) -> stub.updateFavoredNodes(controller, request, done)); } - public CompletableFuture updateConfiguration( - UpdateConfigurationRequest request) { + public CompletableFuture + updateConfiguration(UpdateConfigurationRequest request) { return call((stub, controller, done) -> stub.updateConfiguration(controller, request, done)); } @@ -197,23 +197,23 @@ public CompletableFuture getRegionLoad(GetRegionLoadReque return call((stub, controller, done) -> stub.getRegionLoad(controller, request, done)); } - public CompletableFuture clearCompactionQueues( - ClearCompactionQueuesRequest request) { + public CompletableFuture + clearCompactionQueues(ClearCompactionQueuesRequest request) { return call((stub, controller, done) -> stub.clearCompactionQueues(controller, request, done)); } - public CompletableFuture clearRegionBlockCache( - ClearRegionBlockCacheRequest request) { + public CompletableFuture + clearRegionBlockCache(ClearRegionBlockCacheRequest request) { return call((stub, controller, done) -> stub.clearRegionBlockCache(controller, request, done)); } - public CompletableFuture getSpaceQuotaSnapshots( - GetSpaceQuotaSnapshotsRequest request) { + public CompletableFuture + getSpaceQuotaSnapshots(GetSpaceQuotaSnapshotsRequest request) { return call((stub, controller, done) -> stub.getSpaceQuotaSnapshots(controller, request, done)); } - public CompletableFuture executeProcedures( - ExecuteProceduresRequest request) { + public CompletableFuture + executeProcedures(ExecuteProceduresRequest request) { return call((stub, controller, done) -> stub.executeProcedures(controller, request, done)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java index 1feafc18993f..a9f70828c2af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -38,8 +37,8 @@ import org.slf4j.LoggerFactory; /** - * A client scanner for a region opened for read-only on the client side. Assumes region data - * is not changing. + * A client scanner for a region opened for read-only on the client side. Assumes region data is not + * changing. */ @InterfaceAudience.Private public class ClientSideRegionScanner extends AbstractClientScanner { @@ -50,9 +49,8 @@ public class ClientSideRegionScanner extends AbstractClientScanner { RegionScanner scanner; List values; - public ClientSideRegionScanner(Configuration conf, FileSystem fs, - Path rootDir, TableDescriptor htd, RegionInfo hri, Scan scan, ScanMetrics scanMetrics) - throws IOException { + public ClientSideRegionScanner(Configuration conf, FileSystem fs, Path rootDir, + TableDescriptor htd, RegionInfo hri, Scan scan, ScanMetrics scanMetrics) throws IOException { // region is immutable, set isolation level scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); @@ -67,7 +65,7 @@ public ClientSideRegionScanner(Configuration conf, FileSystem fs, // IndexOnlyLruBlockCache and set a value to HBASE_CLIENT_SCANNER_BLOCK_CACHE_SIZE_KEY conf.set(BlockCacheFactory.BLOCKCACHE_POLICY_KEY, "IndexOnlyLRU"); conf.setIfUnset(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, - String.valueOf(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT)); + String.valueOf(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT)); // don't allow L2 bucket cache for non RS process to avoid unexpected disk usage. conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY); region.setBlockCache(BlockCacheFactory.createBlockCache(conf)); @@ -95,7 +93,7 @@ public Result next() throws IOException { values.clear(); scanner.nextRaw(values); if (values.isEmpty()) { - //we are done + // we are done return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java index 0c216c6daa38..c8a45f79e0f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,21 +33,21 @@ public final class ClusterConnectionFactory { public static final String HBASE_SERVER_CLUSTER_CONNECTION_IMPL = - "hbase.server.cluster.connection.impl"; + "hbase.server.cluster.connection.impl"; private ClusterConnectionFactory() { } private static AsyncClusterConnection createAsyncClusterConnection(Configuration conf, - ConnectionRegistry registry, SocketAddress localAddress, User user) throws IOException { + ConnectionRegistry registry, SocketAddress localAddress, User user) throws IOException { String clusterId = FutureUtils.get(registry.getClusterId()); Class clazz = - conf.getClass(HBASE_SERVER_CLUSTER_CONNECTION_IMPL, AsyncClusterConnectionImpl.class, - AsyncClusterConnection.class); + conf.getClass(HBASE_SERVER_CLUSTER_CONNECTION_IMPL, AsyncClusterConnectionImpl.class, + AsyncClusterConnection.class); try { return user - .runAs((PrivilegedExceptionAction) () -> ReflectionUtils - .newInstance(clazz, conf, registry, clusterId, localAddress, user)); + .runAs((PrivilegedExceptionAction) () -> ReflectionUtils + .newInstance(clazz, conf, registry, clusterId, localAddress, user)); } catch (Exception e) { throw new IOException(e); } @@ -63,7 +63,7 @@ private static AsyncClusterConnection createAsyncClusterConnection(Configuration * change later if we want a {@link java.util.concurrent.CompletableFuture} here. */ public static AsyncClusterConnection createAsyncClusterConnection(Configuration conf, - SocketAddress localAddress, User user) throws IOException { + SocketAddress localAddress, User user) throws IOException { return createAsyncClusterConnection(conf, ConnectionRegistryFactory.getRegistry(conf), localAddress, user); } @@ -73,8 +73,8 @@ public static AsyncClusterConnection createAsyncClusterConnection(Configuration * {@link ConnectionRegistryEndpoint}. */ public static AsyncClusterConnection createAsyncClusterConnection( - ConnectionRegistryEndpoint endpoint, Configuration conf, SocketAddress localAddress, User user) - throws IOException { + ConnectionRegistryEndpoint endpoint, Configuration conf, SocketAddress localAddress, + User user) throws IOException { ShortCircuitConnectionRegistry registry = new ShortCircuitConnectionRegistry(endpoint); return createAsyncClusterConnection(conf, registry, localAddress, user); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java index c8b0a26e7878..a75faf3db75b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java index 0f05b21c05b6..dc8abfd515e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java index a6efc1134a77..1eb4e2d08ea8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java index dd03ab26675d..f5238b84d0e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -42,18 +41,14 @@ * A Scanner which performs a scan over snapshot files. Using this class requires copying the * snapshot to a temporary empty directory, which will copy the snapshot reference files into that * directory. Actual data files are not copied. - * *

      - * This also allows one to run the scan from an - * online or offline hbase cluster. The snapshot files can be exported by using the - * org.apache.hadoop.hbase.snapshot.ExportSnapshot tool, - * to a pure-hdfs cluster, and this scanner can be used to - * run the scan directly over the snapshot files. The snapshot should not be deleted while there - * are open scanners reading from snapshot files. - * + * This also allows one to run the scan from an online or offline hbase cluster. The snapshot files + * can be exported by using the org.apache.hadoop.hbase.snapshot.ExportSnapshot tool, to a pure-hdfs + * cluster, and this scanner can be used to run the scan directly over the snapshot files. The + * snapshot should not be deleted while there are open scanners reading from snapshot files. *

      - * An internal RegionScanner is used to execute the {@link Scan} obtained - * from the user for each region in the snapshot. + * An internal RegionScanner is used to execute the {@link Scan} obtained from the user for each + * region in the snapshot. *

      * HBase owns all the data and snapshot files on the filesystem. Only the HBase user can read from * snapshot files and data files. HBase also enforces security because all the requests are handled @@ -62,8 +57,8 @@ * permissions to access snapshot and reference files. This means that to run mapreduce over * snapshot files, the job has to be run as the HBase user or the user must have group or other * priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from - * snapshot/data files will completely circumvent the access control enforced by HBase. - * See org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat. + * snapshot/data files will completely circumvent the access control enforced by HBase. See + * org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat. */ @InterfaceAudience.Private public class TableSnapshotScanner extends AbstractClientScanner { @@ -80,10 +75,11 @@ public class TableSnapshotScanner extends AbstractClientScanner { private TableDescriptor htd; private final boolean snapshotAlreadyRestored; - private ClientSideRegionScanner currentRegionScanner = null; + private ClientSideRegionScanner currentRegionScanner = null; private int currentRegion = -1; private int numOfCompleteRows = 0; + /** * Creates a TableSnapshotScanner. * @param conf the configuration @@ -150,7 +146,7 @@ private void openWithoutRestoringSnapshot() throws IOException { regions = new ArrayList<>(regionManifests.size()); regionManifests.stream().map(r -> ProtobufUtil.toRegionInfo(r.getRegionInfo())) - .filter(this::isValidRegion).sorted().forEach(r -> regions.add(r)); + .filter(this::isValidRegion).sorted().forEach(r -> regions.add(r)); htd = manifest.getTableDescriptor(); } @@ -184,8 +180,8 @@ public Result next() throws IOException { } RegionInfo hri = regions.get(currentRegion); - currentRegionScanner = new ClientSideRegionScanner(conf, fs, - restoreDir, htd, hri, scan, scanMetrics); + currentRegionScanner = + new ClientSideRegionScanner(conf, fs, restoreDir, htd, hri, scan, scanMetrics); if (this.scanMetrics != null) { this.scanMetrics.countOfRegions.incrementAndGet(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java index 8418161048f6..161c44f4ddbb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; - /** * Class to help with parsing the version info. */ @@ -40,9 +39,8 @@ public static boolean currentClientHasMinimumVersion(int major, int minor) { return hasMinimumVersion(getCurrentClientVersionInfo(), major, minor); } - public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo, - int major, - int minor) { + public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo, int major, + int minor) { if (versionInfo != null) { if (versionInfo.hasVersionMajor() && versionInfo.hasVersionMinor()) { int clientMajor = versionInfo.getVersionMajor(); @@ -70,15 +68,15 @@ public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo, } /** - * We intend to use the local version for service call shortcut(s), so we use an interface - * compatible with a typical service call, with 2 args, return type, and an exception type. + * We intend to use the local version for service call shortcut(s), so we use an interface + * compatible with a typical service call, with 2 args, return type, and an exception type. */ public interface ServiceCallFunction { R apply(T1 t1, T2 t2) throws E; } - public static R callWithVersion( - ServiceCallFunction f, T1 t1, T2 t2) throws E { + public static R + callWithVersion(ServiceCallFunction f, T1 t1, T2 t2) throws E { // Note: just as RpcServer.CurCall, this will only apply on the current thread. NonCallVersion.set(ProtobufUtil.getVersionInfo()); try { @@ -92,27 +90,22 @@ public static R callWithVersion( * @return the versionInfo extracted from the current RpcCallContext */ public static HBaseProtos.VersionInfo getCurrentClientVersionInfo() { - return RpcServer.getCurrentCall().map( - RpcCallContext::getClientVersionInfo).orElse(NonCallVersion.get()); + return RpcServer.getCurrentCall().map(RpcCallContext::getClientVersionInfo) + .orElse(NonCallVersion.get()); } - /** * @param version - * @return the passed-in version int as a version String - * (e.g. 0x0103004 is 1.3.4) + * @return the passed-in version int as a version String (e.g. 0x0103004 is 1.3.4) */ public static String versionNumberToString(final int version) { - return String.format("%d.%d.%d", - ((version >> 20) & 0xff), - ((version >> 12) & 0xff), - (version & 0xfff)); + return String.format("%d.%d.%d", ((version >> 20) & 0xff), ((version >> 12) & 0xff), + (version & 0xfff)); } /** - * Pack the full number version in a int. by shifting each component by 8bit, - * except the dot release which has 12bit. - * Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) + * Pack the full number version in a int. by shifting each component by 8bit, except the dot + * release which has 12bit. Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) * @param versionInfo the VersionInfo object to pack * @return the version number as int. (e.g. 0x0103004 is 1.3.4) */ @@ -130,13 +123,12 @@ public static int getVersionNumber(final HBaseProtos.VersionInfo versionInfo) { return buildVersionNumber(clientMajor, clientMinor, 0); } } - return(0); // no version + return (0); // no version } /** - * Pack the full number version in a int. by shifting each component by 8bit, - * except the dot release which has 12bit. - * Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) + * Pack the full number version in a int. by shifting each component by 8bit, except the dot + * release which has 12bit. Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) * @param major version major number * @param minor version minor number * @param patch version patch number @@ -147,8 +139,8 @@ private static int buildVersionNumber(int major, int minor, int patch) { } /** - * Returns the version components - * Examples: "1.4.3" returns [1, 4, 3], "4.5.6-SNAPSHOT" returns [4, 5, 6, "SNAPSHOT"] + * Returns the version components Examples: "1.4.3" returns [1, 4, 3], "4.5.6-SNAPSHOT" returns + * [4, 5, 6, "SNAPSHOT"] * @return the components of the version string */ private static String[] getVersionComponents(final HBaseProtos.VersionInfo versionInfo) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java index e27574a0f924..1e57c0c4eed1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java @@ -15,57 +15,52 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.locking; import java.io.IOException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; -import org.apache.hadoop.hbase.util.Threads; /** - * Lock for HBase Entity either a Table, a Namespace, or Regions. - * - * These are remote locks which live on master, and need periodic heartbeats to keep them alive. - * (Once we request the lock, internally an heartbeat thread will be started on the client). - * If master does not receive the heartbeat in time, it'll release the lock and make it available - * to other users. - * - *

      Use {@link LockServiceClient} to build instances. Then call {@link #requestLock()}. - * {@link #requestLock} will contact master to queue the lock and start the heartbeat thread - * which will check lock's status periodically and once the lock is acquired, it will send the - * heartbeats to the master. - * - *

      Use {@link #await} or {@link #await(long, TimeUnit)} to wait for the lock to be acquired. - * Always call {@link #unlock()} irrespective of whether lock was acquired or not. If the lock - * was acquired, it'll be released. If it was not acquired, it is possible that master grants the - * lock in future and the heartbeat thread keeps it alive forever by sending heartbeats. - * Calling {@link #unlock()} will stop the heartbeat thread and cancel the lock queued on master. - * - *

      There are 4 ways in which these remote locks may be released/can be lost: - *

      • Call {@link #unlock}.
      • - *
      • Lock times out on master: Can happen because of network issues, GC pauses, etc. - * Worker thread will call the given abortable as soon as it detects such a situation.
      • + * Lock for HBase Entity either a Table, a Namespace, or Regions. These are remote locks which live + * on master, and need periodic heartbeats to keep them alive. (Once we request the lock, internally + * an heartbeat thread will be started on the client). If master does not receive the heartbeat in + * time, it'll release the lock and make it available to other users. + *

        + * Use {@link LockServiceClient} to build instances. Then call {@link #requestLock()}. + * {@link #requestLock} will contact master to queue the lock and start the heartbeat thread which + * will check lock's status periodically and once the lock is acquired, it will send the heartbeats + * to the master. + *

        + * Use {@link #await} or {@link #await(long, TimeUnit)} to wait for the lock to be acquired. Always + * call {@link #unlock()} irrespective of whether lock was acquired or not. If the lock was + * acquired, it'll be released. If it was not acquired, it is possible that master grants the lock + * in future and the heartbeat thread keeps it alive forever by sending heartbeats. Calling + * {@link #unlock()} will stop the heartbeat thread and cancel the lock queued on master. + *

        + * There are 4 ways in which these remote locks may be released/can be lost: + *

          + *
        • Call {@link #unlock}.
        • + *
        • Lock times out on master: Can happen because of network issues, GC pauses, etc. Worker thread + * will call the given abortable as soon as it detects such a situation.
        • *
        • Fail to contact master: If worker thread can not contact mater and thus fails to send - * heartbeat before the timeout expires, it assumes that lock is lost and calls the - * abortable.
        • + * heartbeat before the timeout expires, it assumes that lock is lost and calls the abortable. *
        • Worker thread is interrupted.
        • *
        - * - * Use example: - * + * Use example: * EntityLock lock = lockServiceClient.*Lock(...., "exampled lock", abortable); * lock.requestLock(); * .... @@ -81,8 +76,7 @@ public class EntityLock { private static final Logger LOG = LoggerFactory.getLogger(EntityLock.class); - public static final String HEARTBEAT_TIME_BUFFER = - "hbase.client.locks.heartbeat.time.buffer.ms"; + public static final String HEARTBEAT_TIME_BUFFER = "hbase.client.locks.heartbeat.time.buffer.ms"; private final AtomicBoolean locked = new AtomicBoolean(false); private final CountDownLatch latch = new CountDownLatch(1); @@ -102,12 +96,12 @@ public class EntityLock { private Long procId = null; /** - * Abortable.abort() is called when the lease of the lock will expire. - * It's up to the user decide if simply abort the process or handle the loss of the lock - * by aborting the operation that was supposed to be under lock. + * Abortable.abort() is called when the lease of the lock will expire. It's up to the user decide + * if simply abort the process or handle the loss of the lock by aborting the operation that was + * supposed to be under lock. */ - EntityLock(Configuration conf, LockService.BlockingInterface stub, - LockRequest request, Abortable abort) { + EntityLock(Configuration conf, LockService.BlockingInterface stub, LockRequest request, + Abortable abort) { this.stub = stub; this.lockRequest = request; this.abort = abort; @@ -158,10 +152,9 @@ public boolean isLocked() { } /** - * Sends rpc to the master to request lock. - * The lock request is queued with other lock requests. - * Call {@link #await()} to wait on lock. - * Always call {@link #unlock()} after calling the below, even after error. + * Sends rpc to the master to request lock. The lock request is queued with other lock requests. + * Call {@link #await()} to wait on lock. Always call {@link #unlock()} after calling the below, + * even after error. */ public void requestLock() throws IOException { if (procId == null) { @@ -179,7 +172,7 @@ public void requestLock() throws IOException { /** * @param timeout in milliseconds. If set to 0, waits indefinitely. * @return true if lock was acquired; and false if waiting time elapsed before lock could be - * acquired. + * acquired. */ public boolean await(long timeout, TimeUnit timeUnit) throws InterruptedException { final boolean result = latch.await(timeout, timeUnit); @@ -188,7 +181,7 @@ public boolean await(long timeout, TimeUnit timeUnit) throws InterruptedExceptio LOG.info("Acquired " + lockRequestStr); } else { LOG.info(String.format("Failed acquire in %s %s of %s", timeout, timeUnit.toString(), - lockRequestStr)); + lockRequestStr)); } return result; } @@ -243,12 +236,13 @@ public void run() { if (!isLocked() && response.getLockStatus() == LockHeartbeatResponse.LockStatus.LOCKED) { locked.set(true); latch.countDown(); - } else if (isLocked() && response.getLockStatus() == LockHeartbeatResponse.LockStatus.UNLOCKED) { - // Lock timed out. - locked.set(false); - abort.abort("Lock timed out.", null); - return; - } + } else + if (isLocked() && response.getLockStatus() == LockHeartbeatResponse.LockStatus.UNLOCKED) { + // Lock timed out. + locked.set(false); + abort.abort("Lock timed out.", null); + return; + } try { // If lock not acquired yet, poll faster so we can notify faster. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java index 24f2835af8b2..14e4388c5e73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.locking; import java.util.List; @@ -35,12 +33,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType; /** - * Helper class to create "master locks" for namespaces, tables and regions. - * DEV-NOTE: At the moment this class is used only by the RS for MOB, - * to prevent other MOB compaction to conflict. - * The RS has already the stub of the LockService, so we have only one constructor that - * takes the LockService stub. If in the future we are going to use this in other places - * we should add a constructor that from conf or connection, creates the stub. + * Helper class to create "master locks" for namespaces, tables and regions. DEV-NOTE: At the moment + * this class is used only by the RS for MOB, to prevent other MOB compaction to conflict. The RS + * has already the stub of the LockService, so we have only one constructor that takes the + * LockService stub. If in the future we are going to use this in other places we should add a + * constructor that from conf or connection, creates the stub. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving @@ -57,23 +54,23 @@ public LockServiceClient(final Configuration conf, final LockService.BlockingInt } /** - * Create a new EntityLock object to acquire an exclusive or shared lock on a table. - * Internally, the table namespace will also be locked in shared mode. + * Create a new EntityLock object to acquire an exclusive or shared lock on a table. Internally, + * the table namespace will also be locked in shared mode. */ public EntityLock tableLock(final TableName tableName, final boolean exclusive, final String description, final Abortable abort) { LockRequest lockRequest = buildLockRequest(exclusive ? LockType.EXCLUSIVE : LockType.SHARED, - tableName.getNameAsString(), null, null, description, ng.getNonceGroup(), ng.newNonce()); + tableName.getNameAsString(), null, null, description, ng.getNonceGroup(), ng.newNonce()); return new EntityLock(conf, stub, lockRequest, abort); } /** - * LocCreate a new EntityLock object to acquire exclusive lock on a namespace. - * Clients can not acquire shared locks on namespace. + * LocCreate a new EntityLock object to acquire exclusive lock on a namespace. Clients can not + * acquire shared locks on namespace. */ public EntityLock namespaceLock(String namespace, String description, Abortable abort) { - LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, - namespace, null, null, description, ng.getNonceGroup(), ng.newNonce()); + LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, namespace, null, null, + description, ng.getNonceGroup(), ng.newNonce()); return new EntityLock(conf, stub, lockRequest, abort); } @@ -82,21 +79,19 @@ public EntityLock namespaceLock(String namespace, String description, Abortable * Internally, the table and its namespace will also be locked in shared mode. */ public EntityLock regionLock(List regionInfos, String description, Abortable abort) { - LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, - null, null, regionInfos, description, ng.getNonceGroup(), ng.newNonce()); + LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE, null, null, regionInfos, + description, ng.getNonceGroup(), ng.newNonce()); return new EntityLock(conf, stub, lockRequest, abort); } @InterfaceAudience.Private - public static LockRequest buildLockRequest(final LockType type, - final String namespace, final TableName tableName, final List regionInfos, - final String description, final long nonceGroup, final long nonce) { - final LockRequest.Builder builder = LockRequest.newBuilder() - .setLockType(type) - .setNonceGroup(nonceGroup) - .setNonce(nonce); + public static LockRequest buildLockRequest(final LockType type, final String namespace, + final TableName tableName, final List regionInfos, final String description, + final long nonceGroup, final long nonce) { + final LockRequest.Builder builder = + LockRequest.newBuilder().setLockType(type).setNonceGroup(nonceGroup).setNonce(nonce); if (regionInfos != null) { - for (RegionInfo hri: regionInfos) { + for (RegionInfo hri : regionInfos) { builder.addRegionInfo(ProtobufUtil.toRegionInfo(hri)); } } else if (namespace != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java index ddbbb5fc8bdc..8a849b7ec1c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,22 +20,23 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.ExtendedCellBuilder; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.ByteBuffInputStream; import org.apache.hadoop.hbase.nio.ByteBuff; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.ExtendedCellBuilder; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.CellProtos; /** - * Codec that just writes out Cell as a protobuf Cell Message. Does not write the mvcc stamp. - * Use a different codec if you want that in the stream. + * Codec that just writes out Cell as a protobuf Cell Message. Does not write the mvcc stamp. Use a + * different codec if you want that in the stream. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class MessageCodec implements Codec { @@ -48,26 +49,27 @@ static class MessageEncoder extends BaseEncoder { public void write(Cell cell) throws IOException { checkFlushed(); CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder(); - // This copies bytes from Cell to ByteString. I don't see anyway around the copy. + // This copies bytes from Cell to ByteString. I don't see anyway around the copy. // ByteString is final. builder.setRow(UnsafeByteOperations.unsafeWrap(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength())); + cell.getRowLength())); builder.setFamily(UnsafeByteOperations.unsafeWrap(cell.getFamilyArray(), - cell.getFamilyOffset(), - cell.getFamilyLength())); + cell.getFamilyOffset(), cell.getFamilyLength())); builder.setQualifier(UnsafeByteOperations.unsafeWrap(cell.getQualifierArray(), - cell.getQualifierOffset(), cell.getQualifierLength())); + cell.getQualifierOffset(), cell.getQualifierLength())); builder.setTimestamp(cell.getTimestamp()); builder.setCellType(CellProtos.CellType.valueOf(cell.getTypeByte())); builder.setValue(UnsafeByteOperations.unsafeWrap(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + cell.getValueLength())); CellProtos.Cell pbcell = builder.build(); pbcell.writeDelimitedTo(this.out); } } static class MessageDecoder extends BaseDecoder { - private final ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + private final ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + MessageDecoder(final InputStream in) { super(in); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java index e5b6f4a166fb..de2a470bd5ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/BaseConstraint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hbase.constraint; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configured; +import org.apache.yetus.audience.InterfaceAudience; /** - * Base class to use when actually implementing a {@link Constraint}. It takes - * care of getting and setting of configuration for the constraint. + * Base class to use when actually implementing a {@link Constraint}. It takes care of getting and + * setting of configuration for the constraint. */ @InterfaceAudience.Private public abstract class BaseConstraint extends Configured implements Constraint { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java index c0c4b6063f99..95d61a8ccbca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.constraint; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.client.Put; +import org.apache.yetus.audience.InterfaceAudience; /** * Apply a {@link Constraint} (in traditional database terminology) to a Table. Any number of diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java index 51641b91ce67..cb7af0f9d3b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,11 @@ /** * Exception that a user defined constraint throws on failure of a - * {@link org.apache.hadoop.hbase.client.Put}. - *

        Does NOT attempt the - * {@link org.apache.hadoop.hbase.client.Put} multiple times, - * since the constraint should fail every time for - * the same {@link org.apache.hadoop.hbase.client.Put} (it should be - * idempotent). + * {@link org.apache.hadoop.hbase.client.Put}. + *

        + * Does NOT attempt the {@link org.apache.hadoop.hbase.client.Put} multiple times, since the + * constraint should fail every time for the same {@link org.apache.hadoop.hbase.client.Put} + * (it should be idempotent). */ @InterfaceAudience.Private public class ConstraintException extends org.apache.hadoop.hbase.DoNotRetryIOException { @@ -36,12 +35,10 @@ public ConstraintException() { super(); } - public ConstraintException(String msg) - { + public ConstraintException(String msg) { super(msg); } - - + public ConstraintException(String msg, Throwable cause) { super(msg, cause); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java index b0a04c5044ac..67fa4153ddbd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Optional; - import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -32,15 +31,14 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; /*** * Processes multiple {@link Constraint Constraints} on a given table. *

        - * This is an ease of use mechanism - all the functionality here could be - * implemented on any given system by a coprocessor. + * This is an ease of use mechanism - all the functionality here could be implemented on any given + * system by a coprocessor. */ @InterfaceAudience.Private public class ConstraintProcessor implements RegionCoprocessor, RegionObserver { @@ -82,15 +80,15 @@ public void start(CoprocessorEnvironment environment) { } if (LOG.isInfoEnabled()) { - LOG.info("Finished loading " + constraints.size() - + " user Constraints on table: " + desc.getTableName()); + LOG.info("Finished loading " + constraints.size() + " user Constraints on table: " + + desc.getTableName()); } } @Override - public void prePut(ObserverContext e, Put put, - WALEdit edit, Durability durability) throws IOException { + public void prePut(ObserverContext e, Put put, WALEdit edit, + Durability durability) throws IOException { // check the put against the stored constraints for (Constraint c : constraints) { c.check(put); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java index a9438e3f25e7..98c34b411875 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,7 +56,7 @@ private Constraints() { private static final Logger LOG = LoggerFactory.getLogger(Constraints.class); private static final String CONSTRAINT_HTD_KEY_PREFIX = "constraint $"; private static final Pattern CONSTRAINT_HTD_ATTR_KEY_PATTERN = - Pattern.compile(CONSTRAINT_HTD_KEY_PREFIX, Pattern.LITERAL); + Pattern.compile(CONSTRAINT_HTD_KEY_PREFIX, Pattern.LITERAL); // configuration key for if the constraint is enabled private static final String ENABLED_KEY = "_ENABLED"; @@ -105,7 +105,7 @@ public static TableDescriptorBuilder disable(TableDescriptorBuilder builder) thr public static TableDescriptorBuilder remove(TableDescriptorBuilder builder) throws IOException { disable(builder); return builder - .removeValue((k, v) -> CONSTRAINT_HTD_ATTR_KEY_PATTERN.split(k.toString()).length == 2); + .removeValue((k, v) -> CONSTRAINT_HTD_ATTR_KEY_PATTERN.split(k.toString()).length == 2); } /** @@ -127,7 +127,7 @@ public static boolean has(TableDescriptor desc, Class claz * {@code null} otherwise. */ private static Pair getKeyValueForClass(TableDescriptor desc, - Class clazz) { + Class clazz) { // get the serialized version of the constraint String key = serializeConstraintClass(clazz); String value = desc.getValue(key); @@ -143,7 +143,7 @@ private static Pair getKeyValueForClass(TableDescriptor desc, * {@code null} otherwise. */ private static Pair getKeyValueForClass(TableDescriptorBuilder builder, - Class clazz) { + Class clazz) { // get the serialized version of the constraint String key = serializeConstraintClass(clazz); String value = builder.getValue(key); @@ -168,7 +168,7 @@ private static Pair getKeyValueForClass(TableDescriptorBuilder b */ @SafeVarargs public static TableDescriptorBuilder add(TableDescriptorBuilder builder, - Class... constraints) throws IOException { + Class... constraints) throws IOException { // make sure constraints are enabled enable(builder); long priority = getNextPriority(builder); @@ -199,7 +199,7 @@ public static TableDescriptorBuilder add(TableDescriptorBuilder builder, */ @SafeVarargs public static TableDescriptorBuilder add(TableDescriptorBuilder builder, - Pair, Configuration>... constraints) throws IOException { + Pair, Configuration>... constraints) throws IOException { enable(builder); long priority = getNextPriority(builder); for (Pair, Configuration> pair : constraints) { @@ -222,7 +222,7 @@ public static TableDescriptorBuilder add(TableDescriptorBuilder builder, * be enforced. */ public static TableDescriptorBuilder add(TableDescriptorBuilder builder, - Class constraint, Configuration conf) throws IOException { + Class constraint, Configuration conf) throws IOException { enable(builder); long priority = getNextPriority(builder); addConstraint(builder, constraint, conf, priority++); @@ -239,7 +239,7 @@ public static TableDescriptorBuilder add(TableDescriptorBuilder builder, * When a constraint is added, it is automatically enabled. */ private static TableDescriptorBuilder addConstraint(TableDescriptorBuilder builder, - Class clazz, Configuration conf, long priority) throws IOException { + Class clazz, Configuration conf, long priority) throws IOException { return writeConstraint(builder, serializeConstraintClass(clazz), configure(conf, true, priority)); } @@ -282,7 +282,7 @@ private static String serializeConstraintClass(Class clazz * Write the given key and associated configuration to the {@link TableDescriptorBuilder}. */ private static TableDescriptorBuilder writeConstraint(TableDescriptorBuilder builder, String key, - Configuration conf) throws IOException { + Configuration conf) throws IOException { // store the key and conf in the descriptor return builder.setValue(key, serializeConfiguration(conf)); } @@ -339,7 +339,7 @@ private static long getNextPriority(TableDescriptorBuilder builder) { } private static TableDescriptorBuilder updateLatestPriority(TableDescriptorBuilder builder, - long priority) { + long priority) { // update the max priority return builder.setValue(COUNTER_KEY, Long.toString(priority)); } @@ -354,14 +354,14 @@ private static TableDescriptorBuilder updateLatestPriority(TableDescriptorBuilde * @throws IllegalArgumentException if the Constraint was not present on this table. */ public static TableDescriptorBuilder setConfiguration(TableDescriptorBuilder builder, - Class clazz, Configuration configuration) - throws IOException, IllegalArgumentException { + Class clazz, Configuration configuration) + throws IOException, IllegalArgumentException { // get the entry for this class Pair e = getKeyValueForClass(builder, clazz); if (e == null) { throw new IllegalArgumentException( - "Constraint: " + clazz.getName() + " is not associated with this table."); + "Constraint: " + clazz.getName() + " is not associated with this table."); } // clone over the configuration elements @@ -384,7 +384,7 @@ public static TableDescriptorBuilder setConfiguration(TableDescriptorBuilder bui * @param clazz {@link Constraint} class to remove */ public static TableDescriptorBuilder remove(TableDescriptorBuilder builder, - Class clazz) { + Class clazz) { String key = serializeConstraintClass(clazz); return builder.removeValue(key); } @@ -397,7 +397,7 @@ public static TableDescriptorBuilder remove(TableDescriptorBuilder builder, * @throws IOException If the constraint cannot be properly deserialized */ public static void enableConstraint(TableDescriptorBuilder builder, - Class clazz) throws IOException { + Class clazz) throws IOException { changeConstraintEnabled(builder, clazz, true); } @@ -409,7 +409,7 @@ public static void enableConstraint(TableDescriptorBuilder builder, * @throws IOException if the constraint cannot be found */ public static void disableConstraint(TableDescriptorBuilder builder, - Class clazz) throws IOException { + Class clazz) throws IOException { changeConstraintEnabled(builder, clazz, false); } @@ -417,12 +417,12 @@ public static void disableConstraint(TableDescriptorBuilder builder, * Change the whether the constraint (if it is already present) is enabled or disabled. */ private static TableDescriptorBuilder changeConstraintEnabled(TableDescriptorBuilder builder, - Class clazz, boolean enabled) throws IOException { + Class clazz, boolean enabled) throws IOException { // get the original constraint Pair entry = getKeyValueForClass(builder, clazz); if (entry == null) { - throw new IllegalArgumentException("Constraint: " + clazz.getName() + - " is not associated with this table. You can't enable it!"); + throw new IllegalArgumentException("Constraint: " + clazz.getName() + + " is not associated with this table. You can't enable it!"); } // create a new configuration from that conf @@ -444,7 +444,7 @@ private static TableDescriptorBuilder changeConstraintEnabled(TableDescriptorBui * @throws IOException If the constraint has improperly stored in the table */ public static boolean enabled(TableDescriptor desc, Class clazz) - throws IOException { + throws IOException { // get the kv Pair entry = getKeyValueForClass(desc, clazz); // its not enabled so just return false. In fact, its not even present! @@ -469,7 +469,7 @@ public static boolean enabled(TableDescriptor desc, Class * @throws IOException if any part of reading/arguments fails */ static List getConstraints(TableDescriptor desc, ClassLoader classloader) - throws IOException { + throws IOException { List constraints = new ArrayList<>(); // loop through all the key, values looking for constraints for (Map.Entry e : desc.getValues().entrySet()) { @@ -501,12 +501,12 @@ static List getConstraints(TableDescriptor desc, ClassLoad try { // add the constraint, now that we expect it to be valid. Class clazz = - classloader.loadClass(key).asSubclass(Constraint.class); + classloader.loadClass(key).asSubclass(Constraint.class); Constraint constraint = clazz.getDeclaredConstructor().newInstance(); constraint.setConf(conf); constraints.add(constraint); } catch (InvocationTargetException | NoSuchMethodException | ClassNotFoundException - | InstantiationException | IllegalAccessException e1) { + | InstantiationException | IllegalAccessException e1) { throw new IOException(e1); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java index 0696fc84416e..402483d939a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java @@ -1,19 +1,12 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java index 9508321a625a..736b3ca86666 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coordination; import java.io.IOException; import java.util.Set; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective; @@ -39,7 +36,7 @@ * {@link #checkTaskStillAvailable(String)} Check that task is still there
        * {@link #checkTasks()} check for unassigned tasks and resubmit them * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @InterfaceAudience.Private @Deprecated diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java index 5452578a2c26..ac721b4500ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java @@ -1,21 +1,22 @@ - /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.coordination; + import java.util.concurrent.atomic.LongAdder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -28,8 +29,8 @@ /** * Coordinated operations for {@link SplitLogWorker} and - * {@link org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler} Important - * methods for SplitLogWorker:
        + * {@link org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler} Important methods for + * SplitLogWorker:
        * {@link #isReady()} called from {@link SplitLogWorker#run()} to check whether the coordination is * ready to supply the tasks
        * {@link #taskLoop()} loop for new tasks until the worker is stopped
        @@ -41,7 +42,7 @@ * Important methods for WALSplitterHandler:
        * splitting task has completed. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager + * distributed WAL splitter, see SplitWALManager */ @Deprecated @InterfaceAudience.Private @@ -55,11 +56,11 @@ public interface SplitLogWorkerCoordination { * @param splitTaskExecutor split executor from SplitLogWorker * @param worker instance of SplitLogWorker */ - void init(RegionServerServices server, Configuration conf, - TaskExecutor splitTaskExecutor, SplitLogWorker worker); + void init(RegionServerServices server, Configuration conf, TaskExecutor splitTaskExecutor, + SplitLogWorker worker); /** - * called when Coordination should stop processing tasks and exit + * called when Coordination should stop processing tasks and exit */ void stopProcessingTasks(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index dee94be9fad3..bde0f47949f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -1,5 +1,5 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one +/* + * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coordination; import static org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective.CHECK; @@ -28,7 +27,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; @@ -60,12 +58,11 @@ import org.slf4j.LoggerFactory; /** - * ZooKeeper based implementation of - * {@link SplitLogManagerCoordination} + * ZooKeeper based implementation of {@link SplitLogManagerCoordination} */ @InterfaceAudience.Private -public class ZKSplitLogManagerCoordination extends ZKListener implements - SplitLogManagerCoordination { +public class ZKSplitLogManagerCoordination extends ZKListener + implements SplitLogManagerCoordination { public static final int DEFAULT_TIMEOUT = 120000; public static final int DEFAULT_ZK_RETRIES = 3; @@ -121,8 +118,8 @@ public String prepareTask(String taskname) { public int remainingTasksInCoordination() { int count = 0; try { - List tasks = ZKUtil.listChildrenNoWatch(watcher, - watcher.getZNodePaths().splitLogZNode); + List tasks = + ZKUtil.listChildrenNoWatch(watcher, watcher.getZNodePaths().splitLogZNode); if (tasks != null) { int listSize = tasks.size(); for (int i = 0; i < listSize; i++) { @@ -177,9 +174,9 @@ public boolean resubmitTask(String path, Task task, ResubmitDirective directive) // finished the task. This allows to continue if the worker cannot actually handle it, // for any reason. final long time = EnvironmentEdgeManager.currentTime() - task.last_update; - final boolean alive = - details.getMaster().getServerManager() != null ? details.getMaster().getServerManager() - .isServerOnline(task.cur_worker_name) : true; + final boolean alive = details.getMaster().getServerManager() != null + ? details.getMaster().getServerManager().isServerOnline(task.cur_worker_name) + : true; if (alive && time < timeout) { LOG.trace("Skipping the resubmit of " + task.toString() + " because the server " + task.cur_worker_name + " is not marked as dead, we waited for " + time @@ -219,7 +216,6 @@ public boolean resubmitTask(String path, Task task, ResubmitDirective directive) return true; } - @Override public void checkTasks() { rescan(Long.MAX_VALUE); @@ -237,11 +233,9 @@ private void rescan(long retries) { // Since the TimeoutMonitor will keep resubmitting UNASSIGNED tasks // therefore this behavior is safe. SplitLogTask slt = new SplitLogTask.Done(this.details.getServerName()); - this.watcher - .getRecoverableZooKeeper() - .getZooKeeper() - .create(ZKSplitLog.getRescanNode(watcher), slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, - CreateMode.EPHEMERAL_SEQUENTIAL, new CreateRescanAsyncCallback(), Long.valueOf(retries)); + this.watcher.getRecoverableZooKeeper().getZooKeeper().create(ZKSplitLog.getRescanNode(watcher), + slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL, + new CreateRescanAsyncCallback(), Long.valueOf(retries)); } @Override @@ -252,11 +246,8 @@ public void submitTask(String path) { @Override public void checkTaskStillAvailable(String path) { // A negative retry count will lead to ignoring all error processing. - this.watcher - .getRecoverableZooKeeper() - .getZooKeeper() - .getData(path, this.watcher, new GetDataAsyncCallback(), - Long.valueOf(-1) /* retry count */); + this.watcher.getRecoverableZooKeeper().getZooKeeper().getData(path, this.watcher, + new GetDataAsyncCallback(), Long.valueOf(-1) /* retry count */); SplitLogCounters.tot_mgr_get_data_queued.increment(); } @@ -265,8 +256,8 @@ private void deleteNode(String path, Long retries) { // Once a task znode is ready for delete, that is it is in the TASK_DONE // state, then no one should be writing to it anymore. That is no one // will be updating the znode version any more. - this.watcher.getRecoverableZooKeeper().getZooKeeper() - .delete(path, -1, new DeleteAsyncCallback(), retries); + this.watcher.getRecoverableZooKeeper().getZooKeeper().delete(path, -1, + new DeleteAsyncCallback(), retries); } private void deleteNodeSuccess(String path) { @@ -339,8 +330,8 @@ private void createNodeFailure(String path) { } private void getDataSetWatch(String path, Long retry_count) { - this.watcher.getRecoverableZooKeeper().getZooKeeper() - .getData(path, this.watcher, new GetDataAsyncCallback(), retry_count); + this.watcher.getRecoverableZooKeeper().getZooKeeper().getData(path, this.watcher, + new GetDataAsyncCallback(), retry_count); SplitLogCounters.tot_mgr_get_data_queued.increment(); } @@ -382,8 +373,8 @@ private void getDataSetWatchSuccess(String path, byte[] data, int version) LOG.info("Task " + path + " entered state=" + slt.toString()); resubmitOrFail(path, CHECK); } else { - LOG.error(HBaseMarkers.FATAL, "logic error - unexpected zk state for path = " - + path + " data = " + slt.toString()); + LOG.error(HBaseMarkers.FATAL, + "logic error - unexpected zk state for path = " + path + " data = " + slt.toString()); setDone(path, FAILURE); } } @@ -466,8 +457,8 @@ private void heartbeat(String path, int new_version, ServerName workerName) { private void lookForOrphans() { List orphans; try { - orphans = ZKUtil.listChildrenNoWatch(this.watcher, - this.watcher.getZNodePaths().splitLogZNode); + orphans = + ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.getZNodePaths().splitLogZNode); if (orphans == null) { LOG.warn("Could not get children of " + this.watcher.getZNodePaths().splitLogZNode); return; @@ -509,8 +500,7 @@ public void nodeDataChanged(String path) { private boolean resubmit(String path, int version) { try { // blocking zk call but this is done from the timeout thread - SplitLogTask slt = - new SplitLogTask.Unassigned(this.details.getServerName()); + SplitLogTask slt = new SplitLogTask.Unassigned(this.details.getServerName()); if (ZKUtil.setData(this.watcher, path, slt.toByteArray(), version) == false) { LOG.debug("Failed to resubmit task " + path + " version changed"); return false; @@ -536,12 +526,11 @@ private boolean resubmit(String path, int version) { return true; } - /** * {@link org.apache.hadoop.hbase.master.SplitLogManager} can use objects implementing this * interface to finish off a partially done task by - * {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}. This provides a - * serialization point at the end of the task processing. Must be restartable and idempotent. + * {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}. This provides a serialization + * point at the end of the task processing. Must be restartable and idempotent. */ public interface TaskFinisher { /** @@ -639,8 +628,8 @@ public void processResult(int rc, String path, Object ctx, byte[] data, Stat sta + ". Ignoring error. No error handling. No retrying."); return; } - LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path - + " remaining retries=" + retry_count); + LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path + " remaining retries=" + + retry_count); if (retry_count == 0) { SplitLogCounters.tot_mgr_get_data_err.increment(); getDataSetWatchFailure(path); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java index 323e5752ace9..dc8d8e299257 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,8 @@ /** * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter (see SplitWALManager) which doesn't use this zk-based coordinator. + * distributed WAL splitter (see SplitWALManager) which doesn't use this zk-based + * coordinator. */ @Deprecated @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -38,8 +39,8 @@ public class ZkCoordinatedStateManager implements CoordinatedStateManager { public ZkCoordinatedStateManager(Server server) { this.watcher = server.getZooKeeper(); splitLogWorkerCoordination = new ZkSplitLogWorkerCoordination(server.getServerName(), watcher); - splitLogManagerCoordination = new ZKSplitLogManagerCoordination(server.getConfiguration(), - watcher); + splitLogManagerCoordination = + new ZKSplitLogManagerCoordination(server.getConfiguration(), watcher); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java index 07e751716bf2..5652fd2b86cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coordination; import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER; @@ -59,20 +57,18 @@ import org.slf4j.LoggerFactory; /** - * ZooKeeper based implementation of {@link SplitLogWorkerCoordination} - * It listen for changes in ZooKeeper and - * + * ZooKeeper based implementation of {@link SplitLogWorkerCoordination} It listen for changes in + * ZooKeeper and */ @InterfaceAudience.Private -public class ZkSplitLogWorkerCoordination extends ZKListener implements - SplitLogWorkerCoordination { +public class ZkSplitLogWorkerCoordination extends ZKListener implements SplitLogWorkerCoordination { private static final Logger LOG = LoggerFactory.getLogger(ZkSplitLogWorkerCoordination.class); private static final int checkInterval = 5000; // 5 seconds private static final int FAILED_TO_OWN_TASK = -1; - private SplitLogWorker worker; + private SplitLogWorker worker; private TaskExecutor splitTaskExecutor; @@ -132,17 +128,16 @@ public void nodeDataChanged(String path) { * Override setter from {@link SplitLogWorkerCoordination} */ @Override - public void init(RegionServerServices server, Configuration conf, - TaskExecutor splitExecutor, SplitLogWorker worker) { + public void init(RegionServerServices server, Configuration conf, TaskExecutor splitExecutor, + SplitLogWorker worker) { this.server = server; this.worker = worker; this.splitTaskExecutor = splitExecutor; maxConcurrentTasks = conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER); - reportPeriod = - conf.getInt("hbase.splitlog.report.period", - conf.getInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT, - ZKSplitLogManagerCoordination.DEFAULT_TIMEOUT) / 3); + reportPeriod = conf.getInt("hbase.splitlog.report.period", + conf.getInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT, + ZKSplitLogManagerCoordination.DEFAULT_TIMEOUT) / 3); } /* Support functions for ZooKeeper async callback */ @@ -165,8 +160,8 @@ void getDataSetWatchFailure(String path) { } public void getDataSetWatchAsync() { - watcher.getRecoverableZooKeeper().getZooKeeper() - .getData(currentTask, watcher, new GetDataAsyncCallback(), null); + watcher.getRecoverableZooKeeper().getZooKeeper().getData(currentTask, watcher, + new GetDataAsyncCallback(), null); SplitLogCounters.tot_wkr_get_data_queued.increment(); } @@ -299,9 +294,8 @@ public boolean progress() { long t = EnvironmentEdgeManager.currentTime(); if ((t - last_report_at) > reportPeriod) { last_report_at = t; - int latestZKVersion = - attemptToOwnTask(false, watcher, server.getServerName(), curTask, - zkVersion.intValue()); + int latestZKVersion = attemptToOwnTask(false, watcher, server.getServerName(), curTask, + zkVersion.intValue()); if (latestZKVersion < 0) { LOG.warn("Failed to heartbeat the task" + curTask); return false; @@ -316,9 +310,8 @@ public boolean progress() { splitTaskDetails.setTaskNode(curTask); splitTaskDetails.setCurTaskZKVersion(zkVersion); - WALSplitterHandler hsh = - new WALSplitterHandler(server, this, splitTaskDetails, reporter, - this.tasksInProgress, splitTaskExecutor); + WALSplitterHandler hsh = new WALSplitterHandler(server, this, splitTaskDetails, reporter, + this.tasksInProgress, splitTaskExecutor); server.getExecutorService().submit(hsh); } @@ -342,8 +335,8 @@ private boolean areSplittersAvailable() { * @param taskZKVersion version of the task in zk * @return non-negative integer value when task can be owned by current region server otherwise -1 */ - protected static int attemptToOwnTask(boolean isFirstTime, ZKWatcher zkw, - ServerName server, String task, int taskZKVersion) { + protected static int attemptToOwnTask(boolean isFirstTime, ZKWatcher zkw, ServerName server, + String task, int taskZKVersion) { int latestZKVersion = FAILED_TO_OWN_TASK; try { SplitLogTask slt = new SplitLogTask.Owned(server); @@ -413,18 +406,18 @@ public void taskLoop() throws InterruptedException { if (this.areSplittersAvailable()) { if (LOG.isTraceEnabled()) { LOG.trace("Current region server " + server.getServerName() - + " is ready to take more tasks, will get task list and try grab tasks again."); + + " is ready to take more tasks, will get task list and try grab tasks again."); } int idx = (i + offset) % paths.size(); // don't call ZKSplitLog.getNodeName() because that will lead to // double encoding of the path name - taskGrabbed |= grabTask(ZNodePaths.joinZNode( - watcher.getZNodePaths().splitLogZNode, paths.get(idx))); + taskGrabbed |= grabTask( + ZNodePaths.joinZNode(watcher.getZNodePaths().splitLogZNode, paths.get(idx))); break; } else { if (LOG.isTraceEnabled()) { LOG.trace("Current region server " + server.getServerName() + " has " - + this.tasksInProgress.get() + " tasks in progress and can't take more."); + + this.tasksInProgress.get() + " tasks in progress and can't take more."); } Thread.sleep(100); } @@ -480,8 +473,9 @@ public boolean isReady() throws InterruptedException { result = ZKUtil.checkExists(watcher, watcher.getZNodePaths().splitLogZNode); } catch (KeeperException e) { // ignore - LOG.warn("Exception when checking for " + watcher.getZNodePaths().splitLogZNode - + " ... retrying", e); + LOG.warn( + "Exception when checking for " + watcher.getZNodePaths().splitLogZNode + " ... retrying", + e); } if (result == -1) { LOG.info(watcher.getZNodePaths().splitLogZNode @@ -506,7 +500,6 @@ public void removeListener() { watcher.unregisterListener(this); } - @Override public void stopProcessingTasks() { this.shouldStop = true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java index 2818dcd675f1..eb05a511af26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,9 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -27,8 +26,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; - /** * Encapsulation of the environment of each coprocessor */ @@ -51,7 +48,8 @@ public class BaseEnvironment implements CoprocessorEnviro * @param impl the coprocessor instance * @param priority chaining priority */ - public BaseEnvironment(final C impl, final int priority, final int seq, final Configuration conf) { + public BaseEnvironment(final C impl, final int priority, final int seq, + final Configuration conf) { this.impl = impl; this.classLoader = impl.getClass().getClassLoader(); this.priority = priority; @@ -62,8 +60,7 @@ public BaseEnvironment(final C impl, final int priority, final int seq, final Co /** Initialize the environment */ public void startup() throws IOException { - if (state == Coprocessor.State.INSTALLED || - state == Coprocessor.State.STOPPED) { + if (state == Coprocessor.State.INSTALLED || state == Coprocessor.State.STOPPED) { state = Coprocessor.State.STARTING; Thread currentThread = Thread.currentThread(); ClassLoader hostClassLoader = currentThread.getContextClassLoader(); @@ -75,8 +72,8 @@ public void startup() throws IOException { currentThread.setContextClassLoader(hostClassLoader); } } else { - LOG.warn("Not starting coprocessor " + impl.getClass().getName() + - " because not inactive (state=" + state.toString() + ")"); + LOG.warn("Not starting coprocessor " + impl.getClass().getName() + + " because not inactive (state=" + state.toString() + ")"); } } @@ -91,13 +88,13 @@ public void shutdown() { impl.stop(this); state = Coprocessor.State.STOPPED; } catch (IOException ioe) { - LOG.error("Error stopping coprocessor "+impl.getClass().getName(), ioe); + LOG.error("Error stopping coprocessor " + impl.getClass().getName(), ioe); } finally { currentThread.setContextClassLoader(hostClassLoader); } } else { - LOG.warn("Not stopping coprocessor "+impl.getClass().getName()+ - " because not active (state="+state.toString()+")"); + LOG.warn("Not stopping coprocessor " + impl.getClass().getName() + + " because not active (state=" + state.toString() + ")"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java index 094a7d932f3b..48bb2cc40762 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,55 +15,53 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Coprocessors implement this interface to observe and mediate bulk load operations. - *

        - * - *

        Exception Handling

        - * For all functions, exception handling is done as follows: + * Coprocessors implement this interface to observe and mediate bulk load operations.
        + *
        + *

        Exception Handling

        For all functions, exception handling is done as follows: + *
          + *
        • Exceptions of type {@link IOException} are reported back to client.
        • + *
        • For any other kind of exception: *
            - *
          • Exceptions of type {@link IOException} are reported back to client.
          • - *
          • For any other kind of exception: - *
              - *
            • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
            • - *
            • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
            • - *
            - *
          • + *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
          • + *
          • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • + *
          + *
        • *
        */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface BulkLoadObserver { - /** - * Called as part of SecureBulkLoadEndpoint.prepareBulkLoad() RPC call. - * It can't bypass the default action, e.g., ctx.bypass() won't have effect. - * If you need to get the region or table name, get it from the - * ctx as follows: code>ctx.getEnvironment().getRegion(). Use - * getRegionInfo to fetch the encodedName and use getDescriptor() to get the tableName. - * @param ctx the environment to interact with the framework and master - */ - default void prePrepareBulkLoad(ObserverContext ctx) - throws IOException {} + /** + * Called as part of SecureBulkLoadEndpoint.prepareBulkLoad() RPC call. It can't bypass the + * default action, e.g., ctx.bypass() won't have effect. If you need to get the region or table + * name, get it from the ctx as follows: + * code>ctx.getEnvironment().getRegion(). Use getRegionInfo to fetch the encodedName + * and use getDescriptor() to get the tableName. + * @param ctx the environment to interact with the framework and master + */ + default void prePrepareBulkLoad(ObserverContext ctx) + throws IOException { + } - /** - * Called as part of SecureBulkLoadEndpoint.cleanupBulkLoad() RPC call. - * It can't bypass the default action, e.g., ctx.bypass() won't have effect. - * If you need to get the region or table name, get it from the - * ctx as follows: code>ctx.getEnvironment().getRegion(). Use - * getRegionInfo to fetch the encodedName and use getDescriptor() to get the tableName. - * @param ctx the environment to interact with the framework and master - */ - default void preCleanupBulkLoad(ObserverContext ctx) - throws IOException {} + /** + * Called as part of SecureBulkLoadEndpoint.cleanupBulkLoad() RPC call. It can't bypass the + * default action, e.g., ctx.bypass() won't have effect. If you need to get the region or table + * name, get it from the ctx as follows: + * code>ctx.getEnvironment().getRegion(). Use getRegionInfo to fetch the encodedName + * and use getDescriptor() to get the tableName. + * @param ctx the environment to interact with the framework and master + */ + default void preCleanupBulkLoad(ObserverContext ctx) + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 319936d9ebfe..66c59cb4da8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -31,10 +30,6 @@ import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; @@ -46,33 +41,32 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.CoprocessorClassLoader; import org.apache.hadoop.hbase.util.SortedList; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Strings; /** - * Provides the common setup framework and runtime services for coprocessor - * invocation from HBase services. + * Provides the common setup framework and runtime services for coprocessor invocation from HBase + * services. * @param type of specific coprocessor this host will handle - * @param type of specific coprocessor environment this host requires. - * provides + * @param type of specific coprocessor environment this host requires. provides */ @InterfaceAudience.Private public abstract class CoprocessorHost> { - public static final String REGION_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.region.classes"; + public static final String REGION_COPROCESSOR_CONF_KEY = "hbase.coprocessor.region.classes"; public static final String REGIONSERVER_COPROCESSOR_CONF_KEY = "hbase.coprocessor.regionserver.classes"; public static final String USER_REGION_COPROCESSOR_CONF_KEY = "hbase.coprocessor.user.region.classes"; - public static final String MASTER_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.master.classes"; - public static final String WAL_COPROCESSOR_CONF_KEY = - "hbase.coprocessor.wal.classes"; + public static final String MASTER_COPROCESSOR_CONF_KEY = "hbase.coprocessor.master.classes"; + public static final String WAL_COPROCESSOR_CONF_KEY = "hbase.coprocessor.wal.classes"; public static final String ABORT_ON_ERROR_KEY = "hbase.coprocessor.abortonerror"; public static final boolean DEFAULT_ABORT_ON_ERROR = true; public static final String COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.enabled"; public static final boolean DEFAULT_COPROCESSORS_ENABLED = true; - public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = - "hbase.coprocessor.user.enabled"; + public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.user.enabled"; public static final boolean DEFAULT_USER_COPROCESSORS_ENABLED = true; public static final String SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR = "hbase.skip.load.duplicate.table.coprocessor"; @@ -94,15 +88,13 @@ public CoprocessorHost(Abortable abortable) { } /** - * Not to be confused with the per-object _coprocessors_ (above), - * coprocessorNames is static and stores the set of all coprocessors ever - * loaded by any thread in this JVM. It is strictly additive: coprocessors are - * added to coprocessorNames, by checkAndLoadInstance() but are never removed, since - * the intention is to preserve a history of all loaded coprocessors for - * diagnosis in case of server crash (HBASE-4014). + * Not to be confused with the per-object _coprocessors_ (above), coprocessorNames is static and + * stores the set of all coprocessors ever loaded by any thread in this JVM. It is strictly + * additive: coprocessors are added to coprocessorNames, by checkAndLoadInstance() but are never + * removed, since the intention is to preserve a history of all loaded coprocessors for diagnosis + * in case of server crash (HBASE-4014). */ - private static Set coprocessorNames = - Collections.synchronizedSet(new HashSet()); + private static Set coprocessorNames = Collections.synchronizedSet(new HashSet()); public static Set getLoadedCoprocessors() { synchronized (coprocessorNames) { @@ -111,27 +103,25 @@ public static Set getLoadedCoprocessors() { } /** - * Used to create a parameter to the HServerLoad constructor so that - * HServerLoad can provide information about the coprocessors loaded by this - * regionserver. - * (HBASE-4070: Improve region server metrics to report loaded coprocessors - * to master). + * Used to create a parameter to the HServerLoad constructor so that HServerLoad can provide + * information about the coprocessors loaded by this regionserver. (HBASE-4070: Improve region + * server metrics to report loaded coprocessors to master). */ public Set getCoprocessors() { Set returnValue = new TreeSet<>(); - for (E e: coprocEnvironments) { + for (E e : coprocEnvironments) { returnValue.add(e.getInstance().getClass().getSimpleName()); } return returnValue; } /** - * Load system coprocessors once only. Read the class names from configuration. - * Called by constructor. + * Load system coprocessors once only. Read the class names from configuration. Called by + * constructor. */ protected void loadSystemCoprocessors(Configuration conf, String confKey) { - boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, - DEFAULT_COPROCESSORS_ENABLED); + boolean coprocessorsEnabled = + conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, DEFAULT_COPROCESSORS_ENABLED); if (!coprocessorsEnabled) { return; } @@ -140,8 +130,7 @@ protected void loadSystemCoprocessors(Configuration conf, String confKey) { // load default coprocessors from configure file String[] defaultCPClasses = conf.getStrings(confKey); - if (defaultCPClasses == null || defaultCPClasses.length == 0) - return; + if (defaultCPClasses == null || defaultCPClasses.length == 0) return; int currentSystemPriority = Coprocessor.PRIORITY_SYSTEM; for (String className : defaultCPClasses) { @@ -202,10 +191,9 @@ protected void loadSystemCoprocessors(Configuration conf, String confKey) { * @param conf configuration for coprocessor * @throws java.io.IOException Exception */ - public E load(Path path, String className, int priority, - Configuration conf) throws IOException { + public E load(Path path, String className, int priority, Configuration conf) throws IOException { String[] includedClassPrefixes = null; - if (conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null){ + if (conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null) { String prefixes = conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY); includedClassPrefixes = prefixes.split(";"); } @@ -221,11 +209,11 @@ public E load(Path path, String className, int priority, * @param includedClassPrefixes class name prefixes to include * @throws java.io.IOException Exception */ - public E load(Path path, String className, int priority, - Configuration conf, String[] includedClassPrefixes) throws IOException { + public E load(Path path, String className, int priority, Configuration conf, + String[] includedClassPrefixes) throws IOException { Class implClass; - LOG.debug("Loading coprocessor class " + className + " with path " + - path + " and priority " + priority); + LOG.debug("Loading coprocessor class " + className + " with path " + path + " and priority " + + priority); boolean skipLoadDuplicateCoprocessor = conf.getBoolean(SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR, DEFAULT_SKIP_LOAD_DUPLICATE_TABLE_COPROCESSOR); @@ -243,19 +231,19 @@ public E load(Path path, String className, int priority, throw new IOException("No jar path specified for " + className); } } else { - cl = CoprocessorClassLoader.getClassLoader( - path, getClass().getClassLoader(), pathPrefix, conf); + cl = CoprocessorClassLoader.getClassLoader(path, getClass().getClassLoader(), pathPrefix, + conf); try { - implClass = ((CoprocessorClassLoader)cl).loadClass(className, includedClassPrefixes); + implClass = ((CoprocessorClassLoader) cl).loadClass(className, includedClassPrefixes); } catch (ClassNotFoundException e) { throw new IOException("Cannot load external coprocessor class " + className, e); } } - //load custom code for coprocessor + // load custom code for coprocessor Thread currentThread = Thread.currentThread(); ClassLoader hostClassLoader = currentThread.getContextClassLoader(); - try{ + try { // switch temporarily to the thread classloader for custom CP currentThread.setContextClassLoader(cl); E cpInstance = checkAndLoadInstance(implClass, priority, conf); @@ -288,7 +276,7 @@ public E checkAndLoadInstance(Class implClass, int priority, Configuration co LOG.error("Cannot load coprocessor " + implClass.getSimpleName()); return null; } - } catch (InstantiationException|IllegalAccessException e) { + } catch (InstantiationException | IllegalAccessException e) { throw new IOException(e); } // create the environment @@ -307,11 +295,11 @@ public E checkAndLoadInstance(Class implClass, int priority, Configuration co public abstract E createEnvironment(C instance, int priority, int sequence, Configuration conf); /** - * Called when a new Coprocessor class needs to be loaded. Checks if type of the given class - * is what the corresponding host implementation expects. If it is of correct type, returns an - * instance of the coprocessor to be loaded. If not, returns null. - * If an exception occurs when trying to create instance of a coprocessor, it's passed up and - * eventually results into server aborting. + * Called when a new Coprocessor class needs to be loaded. Checks if type of the given class is + * what the corresponding host implementation expects. If it is of correct type, returns an + * instance of the coprocessor to be loaded. If not, returns null. If an exception occurs when + * trying to create instance of a coprocessor, it's passed up and eventually results into server + * aborting. */ public abstract C checkAndGetInstance(Class implClass) throws InstantiationException, IllegalAccessException; @@ -328,9 +316,9 @@ public void shutdown(E e) { * Find coprocessors by full class name or simple name. */ public C findCoprocessor(String className) { - for (E env: coprocEnvironments) { - if (env.getInstance().getClass().getName().equals(className) || - env.getInstance().getClass().getSimpleName().equals(className)) { + for (E env : coprocEnvironments) { + if (env.getInstance().getClass().getName().equals(className) + || env.getInstance().getClass().getSimpleName().equals(className)) { return env.getInstance(); } } @@ -338,7 +326,7 @@ public C findCoprocessor(String className) { } public T findCoprocessor(Class cls) { - for (E env: coprocEnvironments) { + for (E env : coprocEnvironments) { if (cls.isAssignableFrom(env.getInstance().getClass())) { return (T) env.getInstance(); } @@ -354,12 +342,12 @@ public T findCoprocessor(Class cls) { public List findCoprocessors(Class cls) { ArrayList ret = new ArrayList<>(); - for (E env: coprocEnvironments) { + for (E env : coprocEnvironments) { C cp = env.getInstance(); - if(cp != null) { + if (cp != null) { if (cls.isAssignableFrom(cp.getClass())) { - ret.add((T)cp); + ret.add((T) cp); } } } @@ -372,9 +360,9 @@ public List findCoprocessors(Class cls) { * @return the coprocessor, or null if not found */ public E findCoprocessorEnvironment(String className) { - for (E env: coprocEnvironments) { - if (env.getInstance().getClass().getName().equals(className) || - env.getInstance().getClass().getSimpleName().equals(className)) { + for (E env : coprocEnvironments) { + if (env.getInstance().getClass().getName().equals(className) + || env.getInstance().getClass().getSimpleName().equals(className)) { return env; } } @@ -391,8 +379,8 @@ Set getExternalClassLoaders() { final ClassLoader systemClassLoader = this.getClass().getClassLoader(); for (E env : coprocEnvironments) { ClassLoader cl = env.getInstance().getClass().getClassLoader(); - if (cl != systemClassLoader){ - //do not include system classloader + if (cl != systemClassLoader) { + // do not include system classloader externalClassLoaders.add(cl); } } @@ -400,13 +388,11 @@ Set getExternalClassLoaders() { } /** - * Environment priority comparator. - * Coprocessors are chained in sorted order. + * Environment priority comparator. Coprocessors are chained in sorted order. */ static class EnvironmentPriorityComparator implements Comparator { @Override - public int compare(final CoprocessorEnvironment env1, - final CoprocessorEnvironment env2) { + public int compare(final CoprocessorEnvironment env1, final CoprocessorEnvironment env2) { if (env1.getPriority() < env2.getPriority()) { return -1; } else if (env1.getPriority() > env2.getPriority()) { @@ -436,16 +422,13 @@ protected void abortServer(final String coprocessorName, final Throwable e) { } /** - * This is used by coprocessor hooks which are declared to throw IOException - * (or its subtypes). For such hooks, we should handle throwable objects - * depending on the Throwable's type. Those which are instances of - * IOException should be passed on to the client. This is in conformance with - * the HBase idiom regarding IOException: that it represents a circumstance - * that should be passed along to the client for its own handling. For - * example, a coprocessor that implements access controls would throw a - * subclass of IOException, such as AccessDeniedException, in its preGet() - * method to prevent an unauthorized client's performing a Get on a particular - * table. + * This is used by coprocessor hooks which are declared to throw IOException (or its subtypes). + * For such hooks, we should handle throwable objects depending on the Throwable's type. Those + * which are instances of IOException should be passed on to the client. This is in conformance + * with the HBase idiom regarding IOException: that it represents a circumstance that should be + * passed along to the client for its own handling. For example, a coprocessor that implements + * access controls would throw a subclass of IOException, such as AccessDeniedException, in its + * preGet() method to prevent an unauthorized client's performing a Get on a particular table. * @param env Coprocessor Environment * @param e Throwable object thrown by coprocessor. * @exception IOException Exception @@ -456,7 +439,7 @@ protected void abortServer(final String coprocessorName, final Throwable e) { // update all classes' comments. protected void handleCoprocessorThrowable(final E env, final Throwable e) throws IOException { if (e instanceof IOException) { - throw (IOException)e; + throw (IOException) e; } // If we got here, e is not an IOException. A loaded coprocessor has a // fatal bug, and the server (master or regionserver) should remove the @@ -469,24 +452,23 @@ protected void handleCoprocessorThrowable(final E env, final Throwable e) throws abortServer(env, e); } else { // If available, pull a table name out of the environment - if(env instanceof RegionCoprocessorEnvironment) { - String tableName = ((RegionCoprocessorEnvironment)env).getRegionInfo().getTable().getNameAsString(); - LOG.error("Removing coprocessor '" + env.toString() + "' from table '"+ tableName + "'", e); + if (env instanceof RegionCoprocessorEnvironment) { + String tableName = + ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable().getNameAsString(); + LOG.error("Removing coprocessor '" + env.toString() + "' from table '" + tableName + "'", + e); } else { - LOG.error("Removing coprocessor '" + env.toString() + "' from " + - "environment",e); + LOG.error("Removing coprocessor '" + env.toString() + "' from " + "environment", e); } coprocEnvironments.remove(env); try { shutdown(env); } catch (Exception x) { - LOG.error("Uncaught exception when shutting down coprocessor '" - + env.toString() + "'", x); + LOG.error("Uncaught exception when shutting down coprocessor '" + env.toString() + "'", x); } - throw new DoNotRetryIOException("Coprocessor: '" + env.toString() + - "' threw: '" + e + "' and has been removed from the active " + - "coprocessor set.", e); + throw new DoNotRetryIOException("Coprocessor: '" + env.toString() + "' threw: '" + e + + "' and has been removed from the active " + "coprocessor set.", e); } } @@ -494,27 +476,26 @@ protected void handleCoprocessorThrowable(final E env, final Throwable e) throws * Used to limit legacy handling to once per Coprocessor class per classloader. */ private static final Set> legacyWarning = - new ConcurrentSkipListSet<>( - new Comparator>() { - @Override - public int compare(Class c1, Class c2) { - if (c1.equals(c2)) { - return 0; - } - return c1.getName().compareTo(c2.getName()); - } - }); + new ConcurrentSkipListSet<>(new Comparator>() { + @Override + public int compare(Class c1, Class c2) { + if (c1.equals(c2)) { + return 0; + } + return c1.getName().compareTo(c2.getName()); + } + }); /** * Implementations defined function to get an observer of type {@code O} from a coprocessor of - * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each - * observer they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for - * each of RegionObserver, EndpointObserver and BulkLoadObserver. - * These getters are used by {@code ObserverOperation} to get appropriate observer from the - * coprocessor. + * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each observer + * they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for each of + * RegionObserver, EndpointObserver and BulkLoadObserver. These getters are used by + * {@code ObserverOperation} to get appropriate observer from the coprocessor. */ @FunctionalInterface - public interface ObserverGetter extends Function> {} + public interface ObserverGetter extends Function> { + } private abstract class ObserverOperation extends ObserverContextImpl { ObserverGetter observerGetter; @@ -532,12 +513,14 @@ private abstract class ObserverOperation extends ObserverContextImpl { } ObserverOperation(ObserverGetter observerGetter, User user, boolean bypassable) { - super(user != null? user: RpcServer.getRequestUser().orElse(null), bypassable); + super(user != null ? user : RpcServer.getRequestUser().orElse(null), bypassable); this.observerGetter = observerGetter; } abstract void callObserver() throws IOException; - protected void postEnvCall() {} + + protected void postEnvCall() { + } } // Can't derive ObserverOperation from ObserverOperationWithResult (R = Void) because then all @@ -561,9 +544,8 @@ public ObserverOperationWithoutResult(ObserverGetter observerGetter, User /** * In case of coprocessors which have many kinds of observers (for eg, {@link RegionCoprocessor} - * has BulkLoadObserver, RegionObserver, etc), some implementations may not need all - * observers, in which case they will return null for that observer's getter. - * We simply ignore such cases. + * has BulkLoadObserver, RegionObserver, etc), some implementations may not need all observers, + * in which case they will return null for that observer's getter. We simply ignore such cases. */ @Override void callObserver() throws IOException { @@ -588,8 +570,7 @@ public ObserverOperationWithResult(ObserverGetter observerGetter, R result this(observerGetter, result, null, bypassable); } - public ObserverOperationWithResult(ObserverGetter observerGetter, R result, - User user) { + public ObserverOperationWithResult(ObserverGetter observerGetter, R result, User user) { this(observerGetter, result, user, false); } @@ -623,12 +604,12 @@ protected R execOperationWithResult( final ObserverOperationWithResult observerOperation) throws IOException { boolean bypass = execOperation(observerOperation); R result = observerOperation.getResult(); - return bypass == observerOperation.isBypassable()? result: null; + return bypass == observerOperation.isBypassable() ? result : null; } /** * @return True if we are to bypass (Can only be true if - * ObserverOperation#isBypassable(). + * ObserverOperation#isBypassable(). */ protected boolean execOperation(final ObserverOperation observerOperation) throws IOException { @@ -662,10 +643,10 @@ protected boolean execOperation(final ObserverOperation observerOperation } /** - * Coprocessor classes can be configured in any order, based on that priority is set and - * chained in a sorted order. Should be used preStop*() hooks i.e. when master/regionserver is - * going down. This function first calls coprocessor methods (using ObserverOperation.call()) - * and then shutdowns the environment in postEnvCall().
        + * Coprocessor classes can be configured in any order, based on that priority is set and chained + * in a sorted order. Should be used preStop*() hooks i.e. when master/regionserver is going down. + * This function first calls coprocessor methods (using ObserverOperation.call()) and then + * shutdowns the environment in postEnvCall().
        * Need to execute all coprocessor methods first then postEnvCall(), otherwise some coprocessors * may remain shutdown if any exception occurs during next coprocessor execution which prevent * master/regionserver stop or cluster shutdown. (Refer: diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java index edb24cca35cc..34ee4b114711 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -25,8 +24,8 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Service; /** - * Coprocessor endpoints providing protobuf services should implement this - * interface and return the {@link Service} instance via {@link #getService()}. + * Coprocessor endpoints providing protobuf services should implement this interface and return the + * {@link Service} instance via {@link #getService()}. * @deprecated Since 2.0. Will be removed in 3.0 */ @Deprecated diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java index 0eb5e156b7b3..e73523af47aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,20 +17,18 @@ */ package org.apache.hadoop.hbase.coprocessor; -import org.apache.yetus.audience.InterfaceAudience; - import java.lang.annotation.ElementType; import java.lang.annotation.Inherited; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; +import org.apache.yetus.audience.InterfaceAudience; /** - * Marker annotation that denotes Coprocessors that are core to HBase. - * A Core Coprocessor is a CP that realizes a core HBase feature. Features are sometimes - * implemented first as a Coprocessor to prove viability. The idea is that once proven, they then - * migrate to core. Meantime, HBase Core Coprocessors get this annotation. No other Coprocessors - * can carry this annotation. + * Marker annotation that denotes Coprocessors that are core to HBase. A Core Coprocessor is a CP + * that realizes a core HBase feature. Features are sometimes implemented first as a Coprocessor to + * prove viability. The idea is that once proven, they then migrate to core. Meantime, HBase Core + * Coprocessors get this annotation. No other Coprocessors can carry this annotation. */ // Core Coprocessors are generally naughty making use of HBase internals doing accesses no // Coprocessor should be up to so we mark these special Coprocessors with this annotation and on @@ -42,4 +39,5 @@ @InterfaceAudience.Private @Retention(RetentionPolicy.RUNTIME) // This Annotation is not @Documented because I don't want users figuring out its mechanics. -public @interface CoreCoprocessor {} +public @interface CoreCoprocessor { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java index d07d94202d5a..676837acbdd8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.coprocessor; @@ -27,22 +26,20 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Service; /** - * Coprocessors implement this interface to observe and mediate endpoint invocations - * on a region. - *

        - * - *

        Exception Handling

        - * For all functions, exception handling is done as follows: + * Coprocessors implement this interface to observe and mediate endpoint invocations on a region. + *
        + *
        + *

        Exception Handling

        For all functions, exception handling is done as follows: + *
          + *
        • Exceptions of type {@link IOException} are reported back to client.
        • + *
        • For any other kind of exception: *
            - *
          • Exceptions of type {@link IOException} are reported back to client.
          • - *
          • For any other kind of exception: - *
              - *
            • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
            • - *
            • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
            • - *
            - *
          • + *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
          • + *
          • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • + *
          + *
        • *
        */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -50,15 +47,14 @@ public interface EndpointObserver { /** - * Called before an Endpoint service method is invoked. - * The request message can be altered by returning a new instance. Throwing an - * exception will abort the invocation. - * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no - * effect in this hook. + * Called before an Endpoint service method is invoked. The request message can be altered by + * returning a new instance. Throwing an exception will abort the invocation. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this + * hook. * @param ctx the environment provided by the region server * @param service the endpoint service - * @param request Request message expected by given {@code Service}'s method (by the name - * {@code methodName}). + * @param request Request message expected by given {@code Service}'s method (by the name + * {@code methodName}). * @param methodName the invoked service method * @return the possibly modified message */ @@ -68,17 +64,18 @@ default Message preEndpointInvocation(ObserverContext ctx, Service service, String methodName, Message request, Message.Builder responseBuilder) - throws IOException {} + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java index 595e2d7765fb..2682b78fd513 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java @@ -21,11 +21,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Mark a class that it has a MasterServices accessor. - * Temporary hack until core Coprocesssors are integrated. + * Mark a class that it has a MasterServices accessor. Temporary hack until core Coprocesssors are + * integrated. * @see CoreCoprocessor * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 we will not need this - * facility as CoreCoprocessors are integated into core. + * facility as CoreCoprocessors are integated into core. */ @Deprecated @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java index 89a2c7294643..cef03390acb3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java @@ -21,11 +21,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Mark a class that it has a RegionServiceServices accessor. - * Temporary hack until core Coprocesssors are integrated. + * Mark a class that it has a RegionServiceServices accessor. Temporary hack until core + * Coprocesssors are integrated. * @see CoreCoprocessor * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 we will not need this - * facility as CoreCoprocessors are integated into core. + * facility as CoreCoprocessors are integated into core. */ @Deprecated @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java index d940385ffaee..a288a4dd869d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface MasterCoprocessor extends Coprocessor { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java index cc72871b672b..c83b9da43080 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -39,48 +36,44 @@ public interface MasterCoprocessorEnvironment extends CoprocessorEnvironmentDo not close! This is a shared connection - * with the hosting server. Throws {@link UnsupportedOperationException} if you try to close - * or abort it. - * - * For light-weight usage only. Heavy-duty usage will pull down - * the hosting RegionServer responsiveness as well as that of other Coprocessors making use of - * this Connection. Use to create table on start or to do administrative operations. Coprocessors - * should create their own Connections if heavy usage to avoid impinging on hosting Server - * operation. To create a Connection or if a Coprocessor requires a region with a particular - * Configuration, use {@link org.apache.hadoop.hbase.client.ConnectionFactory} or + * Returns the hosts' Connection to the Cluster. Do not close! This is a shared connection with + * the hosting server. Throws {@link UnsupportedOperationException} if you try to close or abort + * it. For light-weight usage only. Heavy-duty usage will pull down the hosting RegionServer + * responsiveness as well as that of other Coprocessors making use of this Connection. Use to + * create table on start or to do administrative operations. Coprocessors should create their own + * Connections if heavy usage to avoid impinging on hosting Server operation. To create a + * Connection or if a Coprocessor requires a region with a particular Configuration, use + * {@link org.apache.hadoop.hbase.client.ConnectionFactory} or * {@link #createConnection(Configuration)}}. - * - *

        Be aware that operations that make use of this Connection are executed as the RegionServer + *

        + * Be aware that operations that make use of this Connection are executed as the RegionServer * User, the hbase super user that started this server process. Exercise caution running - * operations as this User (See {@link #createConnection(Configuration)}} to run as other than - * the RegionServer User). - * - *

        Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + * operations as this User (See {@link #createConnection(Configuration)}} to run as other than the + * RegionServer User). + *

        + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. - * * @see #createConnection(Configuration) * @return The host's Connection to the Cluster. */ Connection getConnection(); /** - * Creates a cluster connection using the passed Configuration. - * - * Creating a Connection is a heavy-weight operation. The resultant Connection's cache of - * region locations will be empty. Therefore you should cache and reuse Connections rather than - * create a Connection on demand. Create on start of your Coprocessor. You will have to cast - * the CoprocessorEnvironment appropriately to get at this API at start time because - * Coprocessor start method is passed a subclass of this CoprocessorEnvironment or fetch - * Connection using a synchronized accessor initializing the Connection on first access. Close - * the returned Connection when done to free resources. Using this API rather - * than {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} + * Creates a cluster connection using the passed Configuration. Creating a Connection is a + * heavy-weight operation. The resultant Connection's cache of region locations will be empty. + * Therefore you should cache and reuse Connections rather than create a Connection on demand. + * Create on start of your Coprocessor. You will have to cast the CoprocessorEnvironment + * appropriately to get at this API at start time because Coprocessor start method is passed a + * subclass of this CoprocessorEnvironment or fetch Connection using a synchronized accessor + * initializing the Connection on first access. Close the returned Connection when done to free + * resources. Using this API rather than + * {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} * returns a Connection that will short-circuit RPC if the target is a local resource. Use * ConnectionFactory if you don't need this ability. - * - *

        Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + *

        + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. @@ -90,9 +83,10 @@ public interface MasterCoprocessorEnvironment extends CoprocessorEnvironmentSee ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples - * of how metrics can be instantiated and used.

        + *

        + * See ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples of how + * metrics can be instantiated and used. + *

        * @return A MetricRegistry for the coprocessor class to track and export metrics. */ MetricRegistry getMetricRegistryForMaster(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 877b722ccda3..480b2253258b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -45,31 +44,27 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; - /** * Defines coprocessor hooks for interacting with operations on the - * {@link org.apache.hadoop.hbase.master.HMaster} process. - *

        - * + * {@link org.apache.hadoop.hbase.master.HMaster} process.
        + *
        * Since most implementations will be interested in only a subset of hooks, this class uses * 'default' functions to avoid having to add unnecessary overrides. When the functions are - * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. - * It is done in a way that these default definitions act as no-op. So our suggestion to - * implementation would be to not call these 'default' methods from overrides. - *

        - * - *

        Exception Handling

        - * For all functions, exception handling is done as follows: + * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. It + * is done in a way that these default definitions act as no-op. So our suggestion to implementation + * would be to not call these 'default' methods from overrides.
        + *
        + *

        Exception Handling

        For all functions, exception handling is done as follows: *
          - *
        • Exceptions of type {@link IOException} are reported back to client.
        • - *
        • For any other kind of exception: - *
            - *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
          • - *
          • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • - *
          - *
        • + *
        • Exceptions of type {@link IOException} are reported back to client.
        • + *
        • For any other kind of exception: + *
            + *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
          • + *
          • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • + *
          + *
        • *
        */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -91,143 +86,135 @@ default TableDescriptor preCreateTableRegionsInfos( } /** - * Called before a new table is created by - * {@link org.apache.hadoop.hbase.master.HMaster}. Called as part of create - * table RPC call. + * Called before a new table is created by {@link org.apache.hadoop.hbase.master.HMaster}. Called + * as part of create table RPC call. * @param ctx the environment to interact with the framework and master * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ default void preCreateTable(final ObserverContext ctx, - TableDescriptor desc, RegionInfo[] regions) throws IOException {} + TableDescriptor desc, RegionInfo[] regions) throws IOException { + } /** - * Called after the createTable operation has been requested. Called as part - * of create table RPC call. + * Called after the createTable operation has been requested. Called as part of create table RPC + * call. * @param ctx the environment to interact with the framework and master * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ default void postCreateTable(final ObserverContext ctx, - TableDescriptor desc, RegionInfo[] regions) throws IOException {} + TableDescriptor desc, RegionInfo[] regions) throws IOException { + } /** - * Called before a new table is created by - * {@link org.apache.hadoop.hbase.master.HMaster}. Called as part of create - * table procedure and it is async to the create RPC call. - * + * Called before a new table is created by {@link org.apache.hadoop.hbase.master.HMaster}. Called + * as part of create table procedure and it is async to the create RPC call. * @param ctx the environment to interact with the framework and master * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ - default void preCreateTableAction( - final ObserverContext ctx, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException {} + default void preCreateTableAction(final ObserverContext ctx, + final TableDescriptor desc, final RegionInfo[] regions) throws IOException { + } /** - * Called after the createTable operation has been requested. Called as part - * of create table RPC call. Called as part of create table procedure and - * it is async to the create RPC call. - * + * Called after the createTable operation has been requested. Called as part of create table RPC + * call. Called as part of create table procedure and it is async to the create RPC call. * @param ctx the environment to interact with the framework and master * @param desc the TableDescriptor for the table * @param regions the initial regions created for the table */ default void postCompletedCreateTableAction( - final ObserverContext ctx, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException {} + final ObserverContext ctx, final TableDescriptor desc, + final RegionInfo[] regions) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * table. Called as part of delete table RPC call. + * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of + * delete table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preDeleteTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called after the deleteTable operation has been requested. Called as part - * of delete table RPC call. + * Called after the deleteTable operation has been requested. Called as part of delete table RPC + * call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postDeleteTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * table. Called as part of delete table procedure and - * it is async to the delete RPC call. - * + * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of + * delete table procedure and it is async to the delete RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preDeleteTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException {} + default void preDeleteTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * table. Called as part of delete table procedure and it is async to the - * delete RPC call. - * + * Called after {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of + * delete table procedure and it is async to the delete RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedDeleteTableAction( final ObserverContext ctx, final TableName tableName) - throws IOException {} + throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a - * table. Called as part of truncate table RPC call. + * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part + * of truncate table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preTruncateTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called after the truncateTable operation has been requested. Called as part - * of truncate table RPC call. - * The truncate is synchronous, so this method will be called when the - * truncate operation is terminated. + * Called after the truncateTable operation has been requested. Called as part of truncate table + * RPC call. The truncate is synchronous, so this method will be called when the truncate + * operation is terminated. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postTruncateTable(final ObserverContext ctx, - TableName tableName) throws IOException {} + TableName tableName) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a - * table. Called as part of truncate table procedure and it is async - * to the truncate RPC call. - * + * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part + * of truncate table procedure and it is async to the truncate RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preTruncateTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException {} + default void preTruncateTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after {@link org.apache.hadoop.hbase.master.HMaster} truncates a - * table. Called as part of truncate table procedure and it is async to the - * truncate RPC call. - * + * Called after {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part + * of truncate table procedure and it is async to the truncate RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedTruncateTableAction( final ObserverContext ctx, final TableName tableName) - throws IOException {} + throws IOException { + } /** - * Called prior to modifying a table's properties. Called as part of modify - * table RPC call. + * Called prior to modifying a table's properties. Called as part of modify table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param currentDescriptor current TableDescriptor of the table @@ -240,8 +227,8 @@ default TableDescriptor preModifyTable(final ObserverContext ctx, final TableName tableName, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor) - throws IOException {} + throws IOException { + } /** - * Called prior to modifying a table's store file tracker. Called as part of modify - * table store file tracker RPC call. + * Called prior to modifying a table's store file tracker. Called as part of modify table store + * file tracker RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param dstSFT the store file tracker * @return the store file tracker */ default String preModifyTableStoreFileTracker( - final ObserverContext ctx, final TableName tableName, - String dstSFT) throws IOException { + final ObserverContext ctx, final TableName tableName, + String dstSFT) throws IOException { return dstSFT; } /** - * Called after modifying a table's store file tracker. Called as part of modify - * table store file tracker RPC call. + * Called after modifying a table's store file tracker. Called as part of modify table store file + * tracker RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param dstSFT the store file tracker */ default void postModifyTableStoreFileTracker( - final ObserverContext ctx, final TableName tableName, - String dstSFT) throws IOException {} + final ObserverContext ctx, final TableName tableName, + String dstSFT) throws IOException { + } /** * Called prior to modifying a family's store file tracker. Called as part of modify family store @@ -286,176 +275,179 @@ default void postModifyTableStoreFileTracker( * @return the store file tracker */ default String preModifyColumnFamilyStoreFileTracker( - final ObserverContext ctx, final TableName tableName, - final byte[] family, String dstSFT) throws IOException { + final ObserverContext ctx, final TableName tableName, + final byte[] family, String dstSFT) throws IOException { return dstSFT; } /** - * Called after modifying a family store file tracker. Called as part of modify family store - * file tracker RPC call. + * Called after modifying a family store file tracker. Called as part of modify family store file + * tracker RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param family the column family * @param dstSFT the store file tracker */ default void postModifyColumnFamilyStoreFileTracker( - final ObserverContext ctx, final TableName tableName, - final byte[] family, String dstSFT) throws IOException {} + final ObserverContext ctx, final TableName tableName, + final byte[] family, String dstSFT) throws IOException { + } /** - * Called prior to modifying a table's properties. Called as part of modify - * table procedure and it is async to the modify table RPC call. - * + * Called prior to modifying a table's properties. Called as part of modify table procedure and it + * is async to the modify table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param currentDescriptor current TableDescriptor of the table * @param newDescriptor after modify operation, table will have this descriptor */ - default void preModifyTableAction( - final ObserverContext ctx, - final TableName tableName, - final TableDescriptor currentDescriptor, - final TableDescriptor newDescriptor) throws IOException {} + default void preModifyTableAction(final ObserverContext ctx, + final TableName tableName, final TableDescriptor currentDescriptor, + final TableDescriptor newDescriptor) throws IOException { + } /** - * Called after to modifying a table's properties. Called as part of modify - * table procedure and it is async to the modify table RPC call. - * + * Called after to modifying a table's properties. Called as part of modify table procedure and it + * is async to the modify table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param oldDescriptor descriptor of table before modify operation happened * @param currentDescriptor current TableDescriptor of the table */ default void postCompletedModifyTableAction( - final ObserverContext ctx, - final TableName tableName, - final TableDescriptor oldDescriptor, - final TableDescriptor currentDescriptor) throws IOException {} + final ObserverContext ctx, final TableName tableName, + final TableDescriptor oldDescriptor, final TableDescriptor currentDescriptor) + throws IOException { + } /** - * Called prior to enabling a table. Called as part of enable table RPC call. + * Called prior to enabling a table. Called as part of enable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preEnableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called after the enableTable operation has been requested. Called as part - * of enable table RPC call. + * Called after the enableTable operation has been requested. Called as part of enable table RPC + * call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postEnableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called prior to enabling a table. Called as part of enable table procedure - * and it is async to the enable table RPC call. - * + * Called prior to enabling a table. Called as part of enable table procedure and it is async to + * the enable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preEnableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + default void preEnableTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after the enableTable operation has been requested. Called as part - * of enable table procedure and it is async to the enable table RPC call. - * + * Called after the enableTable operation has been requested. Called as part of enable table + * procedure and it is async to the enable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedEnableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + final ObserverContext ctx, final TableName tableName) + throws IOException { + } /** - * Called prior to disabling a table. Called as part of disable table RPC - * call. + * Called prior to disabling a table. Called as part of disable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preDisableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called after the disableTable operation has been requested. Called as part - * of disable table RPC call. + * Called after the disableTable operation has been requested. Called as part of disable table RPC + * call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postDisableTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** - * Called prior to disabling a table. Called as part of disable table procedure - * and it is asyn to the disable table RPC call. - * + * Called prior to disabling a table. Called as part of disable table procedure and it is asyn to + * the disable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ - default void preDisableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + default void preDisableTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { + } /** - * Called after the disableTable operation has been requested. Called as part - * of disable table procedure and it is asyn to the disable table RPC call. - * + * Called after the disableTable operation has been requested. Called as part of disable table + * procedure and it is asyn to the disable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedDisableTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException {} + final ObserverContext ctx, final TableName tableName) + throws IOException { + } /** * Called before a abortProcedure request has been processed. * @param ctx the environment to interact with the framework and master * @param procId the Id of the procedure */ - default void preAbortProcedure( - ObserverContext ctx, final long procId) throws IOException {} + default void preAbortProcedure(ObserverContext ctx, + final long procId) throws IOException { + } /** * Called after a abortProcedure request has been processed. * @param ctx the environment to interact with the framework and master */ default void postAbortProcedure(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called before a getProcedures request has been processed. * @param ctx the environment to interact with the framework and master */ default void preGetProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after a getProcedures request has been processed. * @param ctx the environment to interact with the framework and master */ default void postGetProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called before a getLocks request has been processed. * @param ctx the environment to interact with the framework and master * @throws IOException if something went wrong */ - default void preGetLocks(ObserverContext ctx) - throws IOException {} + default void preGetLocks(ObserverContext ctx) throws IOException { + } /** * Called after a getLocks request has been processed. * @param ctx the environment to interact with the framework and master * @throws IOException if something went wrong */ - default void postGetLocks( - ObserverContext ctx) throws IOException {} + default void postGetLocks(ObserverContext ctx) throws IOException { + } /** * Called prior to moving a given region from one region server to another. @@ -465,9 +457,9 @@ default void postGetLocks( * @param destServer the destination ServerName */ default void preMove(final ObserverContext ctx, - final RegionInfo region, final ServerName srcServer, - final ServerName destServer) - throws IOException {} + final RegionInfo region, final ServerName srcServer, final ServerName destServer) + throws IOException { + } /** * Called after the region move has been requested. @@ -477,9 +469,9 @@ default void preMove(final ObserverContext ctx, * @param destServer the destination ServerName */ default void postMove(final ObserverContext ctx, - final RegionInfo region, final ServerName srcServer, - final ServerName destServer) - throws IOException {} + final RegionInfo region, final ServerName srcServer, final ServerName destServer) + throws IOException { + } /** * Called prior to assigning a specific region. @@ -487,7 +479,8 @@ default void postMove(final ObserverContext ctx, * @param regionInfo the regionInfo of the region */ default void preAssign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called after the region assignment has been requested. @@ -495,7 +488,8 @@ default void preAssign(final ObserverContext ctx, * @param regionInfo the regionInfo of the region */ default void postAssign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called prior to unassigning a given region. @@ -503,7 +497,8 @@ default void postAssign(final ObserverContext ctx, * @param regionInfo */ default void preUnassign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called after the region unassignment has been requested. @@ -511,7 +506,8 @@ default void preUnassign(final ObserverContext ctx * @param regionInfo */ default void postUnassign(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called prior to marking a given region as offline. @@ -519,7 +515,8 @@ default void postUnassign(final ObserverContext ct * @param regionInfo */ default void preRegionOffline(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** * Called after the region has been marked offline. @@ -527,37 +524,40 @@ default void preRegionOffline(final ObserverContext ctx, - final RegionInfo regionInfo) throws IOException {} + final RegionInfo regionInfo) throws IOException { + } /** - * Called prior to requesting rebalancing of the cluster regions, though after - * the initial checks for regions in transition and the balance switch flag. + * Called prior to requesting rebalancing of the cluster regions, though after the initial checks + * for regions in transition and the balance switch flag. * @param ctx the environment to interact with the framework and master * @param request the request used to trigger the balancer */ - default void preBalance(final ObserverContext ctx, BalanceRequest request) - throws IOException {} + default void preBalance(final ObserverContext ctx, + BalanceRequest request) throws IOException { + } /** * Called after the balancing plan has been submitted. * @param ctx the environment to interact with the framework and master * @param request the request used to trigger the balance - * @param plans the RegionPlans which master has executed. RegionPlan serves as hint - * as for the final destination for the underlying region but may not represent the - * final state of assignment + * @param plans the RegionPlans which master has executed. RegionPlan serves as hint as for the + * final destination for the underlying region but may not represent the final state of + * assignment */ - default void postBalance(final ObserverContext ctx, BalanceRequest request, List plans) - throws IOException {} + default void postBalance(final ObserverContext ctx, + BalanceRequest request, List plans) throws IOException { + } /** - * Called prior to setting split / merge switch - * Supports Coprocessor 'bypass'. + * Called prior to setting split / merge switch Supports Coprocessor 'bypass'. * @param ctx the coprocessor instance's environment * @param newValue the new value submitted in the call * @param switchType type of switch */ default void preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final MasterSwitchType switchType) throws IOException {} + final boolean newValue, final MasterSwitchType switchType) throws IOException { + } /** * Called after setting split / merge switch @@ -566,7 +566,8 @@ default void preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final MasterSwitchType switchType) throws IOException {} + final boolean newValue, final MasterSwitchType switchType) throws IOException { + } /** * Called before the split region procedure is called. @@ -574,11 +575,9 @@ default void postSetSplitOrMergeEnabled(final ObserverContext c, - final TableName tableName, - final byte[] splitRow) - throws IOException {} + default void preSplitRegion(final ObserverContext c, + final TableName tableName, final byte[] splitRow) throws IOException { + } /** * Called before the region is split. @@ -586,11 +585,9 @@ default void preSplitRegion( * @param tableName the table where the region belongs to * @param splitRow split point */ - default void preSplitRegionAction( - final ObserverContext c, - final TableName tableName, - final byte[] splitRow) - throws IOException {} + default void preSplitRegionAction(final ObserverContext c, + final TableName tableName, final byte[] splitRow) throws IOException { + } /** * Called after the region is split. @@ -598,10 +595,9 @@ default void preSplitRegionAction( * @param regionInfoA the left daughter region * @param regionInfoB the right daughter region */ - default void postCompletedSplitRegionAction( - final ObserverContext c, - final RegionInfo regionInfoA, - final RegionInfo regionInfoB) throws IOException {} + default void postCompletedSplitRegionAction(final ObserverContext c, + final RegionInfo regionInfoA, final RegionInfo regionInfoB) throws IOException { + } /** * This will be called before update META step as part of split transaction. @@ -610,78 +606,78 @@ default void postCompletedSplitRegionAction( * @param metaEntries */ default void preSplitRegionBeforeMETAAction( - final ObserverContext ctx, - final byte[] splitKey, - final List metaEntries) throws IOException {} - + final ObserverContext ctx, final byte[] splitKey, + final List metaEntries) throws IOException { + } /** * This will be called after update META step as part of split transaction * @param ctx the environment to interact with the framework and master */ default void preSplitRegionAfterMETAAction( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after the roll back of the split region is completed * @param ctx the environment to interact with the framework and master */ default void postRollBackSplitRegionAction( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * Called before the regions merge. * @param ctx the environment to interact with the framework and master */ - default void preMergeRegionsAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException {} + default void preMergeRegionsAction(final ObserverContext ctx, + final RegionInfo[] regionsToMerge) throws IOException { + } /** * called after the regions merge. * @param ctx the environment to interact with the framework and master */ default void postCompletedMergeRegionsAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge, - final RegionInfo mergedRegion) throws IOException {} + final ObserverContext ctx, final RegionInfo[] regionsToMerge, + final RegionInfo mergedRegion) throws IOException { + } /** * This will be called before update META step as part of regions merge transaction. * @param ctx the environment to interact with the framework and master * @param metaEntries mutations to execute on hbase:meta atomically with regions merge updates. - * Any puts or deletes to execute on hbase:meta can be added to the mutations. + * Any puts or deletes to execute on hbase:meta can be added to the mutations. */ - default void preMergeRegionsCommitAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge, - @MetaMutationAnnotation List metaEntries) throws IOException {} + default void preMergeRegionsCommitAction(final ObserverContext ctx, + final RegionInfo[] regionsToMerge, @MetaMutationAnnotation List metaEntries) + throws IOException { + } /** * This will be called after META step as part of regions merge transaction. * @param ctx the environment to interact with the framework and master */ - default void postMergeRegionsCommitAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge, - final RegionInfo mergedRegion) throws IOException {} + default void postMergeRegionsCommitAction(final ObserverContext ctx, + final RegionInfo[] regionsToMerge, final RegionInfo mergedRegion) throws IOException { + } /** * This will be called after the roll back of the regions merge. * @param ctx the environment to interact with the framework and master */ default void postRollBackMergeRegionsAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException {} + final ObserverContext ctx, final RegionInfo[] regionsToMerge) + throws IOException { + } /** * Called prior to modifying the flag used to enable/disable region balancing. * @param ctx the coprocessor instance's environment */ default void preBalanceSwitch(final ObserverContext ctx, - final boolean newValue) throws IOException {} + final boolean newValue) throws IOException { + } /** * Called after the flag to enable/disable balancing has changed. @@ -690,59 +686,62 @@ default void preBalanceSwitch(final ObserverContext ctx, - final boolean oldValue, final boolean newValue) throws IOException {} + final boolean oldValue, final boolean newValue) throws IOException { + } /** * Called prior to shutting down the full HBase cluster, including this * {@link org.apache.hadoop.hbase.master.HMaster} process. */ default void preShutdown(final ObserverContext ctx) - throws IOException {} - + throws IOException { + } /** - * Called immediately prior to stopping this - * {@link org.apache.hadoop.hbase.master.HMaster} process. + * Called immediately prior to stopping this {@link org.apache.hadoop.hbase.master.HMaster} + * process. */ default void preStopMaster(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** - * Called immediately after an active master instance has completed - * initialization. Will not be called on standby master instances unless - * they take over the active role. + * Called immediately after an active master instance has completed initialization. Will not be + * called on standby master instances unless they take over the active role. */ default void postStartMaster(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Call before the master initialization is set to true. * {@link org.apache.hadoop.hbase.master.HMaster} process. */ default void preMasterInitialization(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** - * Called before a new snapshot is taken. - * Called as part of snapshot RPC call. + * Called before a new snapshot is taken. Called as part of snapshot RPC call. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to snapshot */ default void preSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + throws IOException { + } /** - * Called after the snapshot operation has been requested. - * Called as part of snapshot RPC call. + * Called after the snapshot operation has been requested. Called as part of snapshot RPC call. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to snapshot */ default void postSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + throws IOException { + } /** * Called after the snapshot operation has been completed. @@ -760,7 +759,8 @@ default void postCompletedSnapshotAction(ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** * Called after listSnapshots request has been processed. @@ -768,69 +768,73 @@ default void preListSnapshot(final ObserverContext * @param snapshot the SnapshotDescriptor of the snapshot to list */ default void postListSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** - * Called before a snapshot is cloned. - * Called as part of restoreSnapshot RPC call. + * Called before a snapshot is cloned. Called as part of restoreSnapshot RPC call. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to create */ default void preCloneSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + throws IOException { + } /** - * Called after a snapshot clone operation has been requested. - * Called as part of restoreSnapshot RPC call. + * Called after a snapshot clone operation has been requested. Called as part of restoreSnapshot + * RPC call. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the v of the table to create */ default void postCloneSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + throws IOException { + } /** - * Called before a snapshot is restored. - * Called as part of restoreSnapshot RPC call. + * Called before a snapshot is restored. Called as part of restoreSnapshot RPC call. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to restore */ default void preRestoreSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + throws IOException { + } /** - * Called after a snapshot restore operation has been requested. - * Called as part of restoreSnapshot RPC call. + * Called after a snapshot restore operation has been requested. Called as part of restoreSnapshot + * RPC call. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to restore */ default void postRestoreSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) - throws IOException {} + throws IOException { + } /** - * Called before a snapshot is deleted. - * Called as part of deleteSnapshot RPC call. + * Called before a snapshot is deleted. Called as part of deleteSnapshot RPC call. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to delete */ default void preDeleteSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** - * Called after the delete snapshot operation has been requested. - * Called as part of deleteSnapshot RPC call. + * Called after the delete snapshot operation has been requested. Called as part of deleteSnapshot + * RPC call. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to delete */ default void postDeleteSnapshot(final ObserverContext ctx, - final SnapshotDescription snapshot) throws IOException {} + final SnapshotDescription snapshot) throws IOException { + } /** * Called before a getTableDescriptors request has been processed. @@ -840,8 +844,9 @@ default void postDeleteSnapshot(final ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException {} + List tableNamesList, List descriptors, String regex) + throws IOException { + } /** * Called after a getTableDescriptors request has been processed. @@ -851,8 +856,9 @@ default void preGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException {} + List tableNamesList, List descriptors, String regex) + throws IOException { + } /** * Called before a getTableNames request has been processed. @@ -861,7 +867,8 @@ default void postGetTableDescriptors(ObserverContext ctx, - List descriptors, String regex) throws IOException {} + List descriptors, String regex) throws IOException { + } /** * Called after a getTableNames request has been processed. @@ -870,34 +877,35 @@ default void preGetTableNames(ObserverContext ctx, * @param regex regular expression used for filtering the table names */ default void postGetTableNames(ObserverContext ctx, - List descriptors, String regex) throws IOException {} - - + List descriptors, String regex) throws IOException { + } /** - * Called before a new namespace is created by - * {@link org.apache.hadoop.hbase.master.HMaster}. + * Called before a new namespace is created by {@link org.apache.hadoop.hbase.master.HMaster}. * @param ctx the environment to interact with the framework and master * @param ns the NamespaceDescriptor for the table */ default void preCreateNamespace(final ObserverContext ctx, - NamespaceDescriptor ns) throws IOException {} + NamespaceDescriptor ns) throws IOException { + } + /** * Called after the createNamespace operation has been requested. * @param ctx the environment to interact with the framework and master * @param ns the NamespaceDescriptor for the table */ default void postCreateNamespace(final ObserverContext ctx, - NamespaceDescriptor ns) throws IOException {} + NamespaceDescriptor ns) throws IOException { + } /** - * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a - * namespace + * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a namespace * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace */ default void preDeleteNamespace(final ObserverContext ctx, - String namespace) throws IOException {} + String namespace) throws IOException { + } /** * Called after the deleteNamespace operation has been requested. @@ -905,7 +913,8 @@ default void preDeleteNamespace(final ObserverContext ctx, - String namespace) throws IOException {} + String namespace) throws IOException { + } /** * Called prior to modifying a namespace's properties. @@ -915,7 +924,8 @@ default void postDeleteNamespace(final ObserverContext ctx, NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor newNsDescriptor) - throws IOException {} + throws IOException { + } /** * Called after the modifyNamespace operation has been requested. @@ -925,7 +935,8 @@ default void preModifyNamespace(final ObserverContext ctx, NamespaceDescriptor oldNsDescriptor, NamespaceDescriptor currentNsDescriptor) - throws IOException {} + throws IOException { + } /** * Called before a getNamespaceDescriptor request has been processed. @@ -933,7 +944,8 @@ default void postModifyNamespace(final ObserverContext ctx, - String namespace) throws IOException {} + String namespace) throws IOException { + } /** * Called after a getNamespaceDescriptor request has been processed. @@ -941,7 +953,8 @@ default void preGetNamespaceDescriptor(ObserverContext ctx, - NamespaceDescriptor ns) throws IOException {} + NamespaceDescriptor ns) throws IOException { + } /** * Called before a listNamespaces request has been processed. @@ -950,7 +963,8 @@ default void postGetNamespaceDescriptor(ObserverContext ctx, - List namespaces) throws IOException {} + List namespaces) throws IOException { + } /** * Called after a listNamespaces request has been processed. @@ -959,7 +973,8 @@ default void preListNamespaces(ObserverContext ctx * @throws IOException if something went wrong */ default void postListNamespaces(ObserverContext ctx, - List namespaces) throws IOException {}; + List namespaces) throws IOException { + }; /** * Called before a listNamespaceDescriptors request has been processed. @@ -967,7 +982,8 @@ default void postListNamespaces(ObserverContext ct * @param descriptors an empty list, can be filled with what to return by coprocessor */ default void preListNamespaceDescriptors(ObserverContext ctx, - List descriptors) throws IOException {} + List descriptors) throws IOException { + } /** * Called after a listNamespaceDescriptors request has been processed. @@ -975,8 +991,8 @@ default void preListNamespaceDescriptors(ObserverContext ctx, - List descriptors) throws IOException {} - + List descriptors) throws IOException { + } /** * Called before the table memstore is flushed to disk. @@ -984,7 +1000,8 @@ default void postListNamespaceDescriptors(ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** * Called after the table memstore is flushed to disk. @@ -992,7 +1009,8 @@ default void preTableFlush(final ObserverContext c * @param tableName the name of the table */ default void postTableFlush(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** * Called before the quota for the user is stored. @@ -1001,7 +1019,8 @@ default void postTableFlush(final ObserverContext * @param quotas the current quota for the user */ default void preSetUserQuota(final ObserverContext ctx, - final String userName, final GlobalQuotaSettings quotas) throws IOException {} + final String userName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the user is stored. @@ -1010,7 +1029,8 @@ default void preSetUserQuota(final ObserverContext * @param quotas the resulting quota for the user */ default void postSetUserQuota(final ObserverContext ctx, - final String userName, final GlobalQuotaSettings quotas) throws IOException {} + final String userName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before the quota for the user on the specified table is stored. @@ -1019,9 +1039,10 @@ default void postSetUserQuota(final ObserverContext ctx, final String userName, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + default void preSetUserQuota(final ObserverContext ctx, + final String userName, final TableName tableName, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called after the quota for the user on the specified table is stored. @@ -1030,9 +1051,10 @@ default void preSetUserQuota( * @param tableName the name of the table * @param quotas the resulting quota for the user on the table */ - default void postSetUserQuota( - final ObserverContext ctx, final String userName, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + default void postSetUserQuota(final ObserverContext ctx, + final String userName, final TableName tableName, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called before the quota for the user on the specified namespace is stored. @@ -1041,9 +1063,10 @@ default void postSetUserQuota( * @param namespace the name of the namespace * @param quotas the current quota for the user on the namespace */ - default void preSetUserQuota( - final ObserverContext ctx, final String userName, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + default void preSetUserQuota(final ObserverContext ctx, + final String userName, final String namespace, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called after the quota for the user on the specified namespace is stored. @@ -1052,9 +1075,10 @@ default void preSetUserQuota( * @param namespace the name of the namespace * @param quotas the resulting quota for the user on the namespace */ - default void postSetUserQuota( - final ObserverContext ctx, final String userName, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + default void postSetUserQuota(final ObserverContext ctx, + final String userName, final String namespace, final GlobalQuotaSettings quotas) + throws IOException { + } /** * Called before the quota for the table is stored. @@ -1063,7 +1087,8 @@ default void postSetUserQuota( * @param quotas the current quota for the table */ default void preSetTableQuota(final ObserverContext ctx, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + final TableName tableName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the table is stored. @@ -1072,7 +1097,8 @@ default void preSetTableQuota(final ObserverContext ctx, - final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {} + final TableName tableName, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before the quota for the namespace is stored. @@ -1081,7 +1107,8 @@ default void postSetTableQuota(final ObserverContext ctx, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + final String namespace, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the namespace is stored. @@ -1090,7 +1117,8 @@ default void preSetNamespaceQuota(final ObserverContext ctx, - final String namespace, final GlobalQuotaSettings quotas) throws IOException {} + final String namespace, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before the quota for the region server is stored. @@ -1099,7 +1127,8 @@ default void postSetNamespaceQuota(final ObserverContext ctx, - final String regionServer, final GlobalQuotaSettings quotas) throws IOException {} + final String regionServer, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called after the quota for the region server is stored. @@ -1108,25 +1137,26 @@ default void preSetRegionServerQuota(final ObserverContext ctx, - final String regionServer, final GlobalQuotaSettings quotas) throws IOException {} + final String regionServer, final GlobalQuotaSettings quotas) throws IOException { + } /** * Called before merge regions request. * @param ctx coprocessor environment * @param regionsToMerge regions to be merged */ - default void preMergeRegions( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException {} + default void preMergeRegions(final ObserverContext ctx, + final RegionInfo[] regionsToMerge) throws IOException { + } /** * called after merge regions request. * @param c coprocessor environment * @param regionsToMerge regions to be merged */ - default void postMergeRegions( - final ObserverContext c, - final RegionInfo[] regionsToMerge) throws IOException {} + default void postMergeRegions(final ObserverContext c, + final RegionInfo[] regionsToMerge) throws IOException { + } /** * Called before servers are moved to target region server group @@ -1135,7 +1165,8 @@ default void postMergeRegions( * @param targetGroup destination group */ default void preMoveServersAndTables(final ObserverContext ctx, - Set
        servers, Set tables, String targetGroup) throws IOException {} + Set
        servers, Set tables, String targetGroup) throws IOException { + } /** * Called after servers are moved to target region server group @@ -1144,7 +1175,8 @@ default void preMoveServersAndTables(final ObserverContext ctx, - Set
        servers, Set tables, String targetGroup) throws IOException {} + Set
        servers, Set tables, String targetGroup) throws IOException { + } /** * Called before servers are moved to target region server group @@ -1153,7 +1185,8 @@ default void postMoveServersAndTables(final ObserverContext ctx, - Set
        servers, String targetGroup) throws IOException {} + Set
        servers, String targetGroup) throws IOException { + } /** * Called after servers are moved to target region server group @@ -1162,7 +1195,8 @@ default void preMoveServers(final ObserverContext * @param targetGroup name of group */ default void postMoveServers(final ObserverContext ctx, - Set
        servers, String targetGroup) throws IOException {} + Set
        servers, String targetGroup) throws IOException { + } /** * Called before tables are moved to target region server group @@ -1171,7 +1205,8 @@ default void postMoveServers(final ObserverContext * @param targetGroup name of group */ default void preMoveTables(final ObserverContext ctx, - Set tables, String targetGroup) throws IOException {} + Set tables, String targetGroup) throws IOException { + } /** * Called after servers are moved to target region server group @@ -1180,23 +1215,26 @@ default void preMoveTables(final ObserverContext c * @param targetGroup name of group */ default void postMoveTables(final ObserverContext ctx, - Set tables, String targetGroup) throws IOException {} + Set tables, String targetGroup) throws IOException { + } /** * Called before a new region server group is added * @param ctx the environment to interact with the framework and master * @param name group name */ - default void preAddRSGroup(final ObserverContext ctx, - String name) throws IOException {} + default void preAddRSGroup(final ObserverContext ctx, String name) + throws IOException { + } /** * Called after a new region server group is added * @param ctx the environment to interact with the framework and master * @param name group name */ - default void postAddRSGroup(final ObserverContext ctx, - String name) throws IOException {} + default void postAddRSGroup(final ObserverContext ctx, String name) + throws IOException { + } /** * Called before a region server group is removed @@ -1204,7 +1242,8 @@ default void postAddRSGroup(final ObserverContext * @param name group name */ default void preRemoveRSGroup(final ObserverContext ctx, - String name) throws IOException {} + String name) throws IOException { + } /** * Called after a region server group is removed @@ -1212,7 +1251,8 @@ default void preRemoveRSGroup(final ObserverContext ctx, - String name) throws IOException {} + String name) throws IOException { + } /** * Called before a region server group is removed @@ -1239,18 +1279,18 @@ default void postBalanceRSGroup(final ObserverContext ctx, - Set
        servers) throws IOException {} + default void preRemoveServers(final ObserverContext ctx, + Set
        servers) throws IOException { + } /** * Called after servers are removed from rsgroup * @param ctx the environment to interact with the framework and master * @param servers set of servers to remove */ - default void postRemoveServers( - final ObserverContext ctx, - Set
        servers) throws IOException {} + default void postRemoveServers(final ObserverContext ctx, + Set
        servers) throws IOException { + } /** * Called before getting region server group info of the passed groupName. @@ -1258,7 +1298,8 @@ default void postRemoveServers( * @param groupName name of the group to get RSGroupInfo for */ default void preGetRSGroupInfo(final ObserverContext ctx, - final String groupName) throws IOException {} + final String groupName) throws IOException { + } /** * Called after getting region server group info of the passed groupName. @@ -1266,7 +1307,8 @@ default void preGetRSGroupInfo(final ObserverContext ctx, - final String groupName) throws IOException {} + final String groupName) throws IOException { + } /** * Called before getting region server group info of the passed tableName. @@ -1274,7 +1316,8 @@ default void postGetRSGroupInfo(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** * Called after getting region server group info of the passed tableName. @@ -1282,21 +1325,24 @@ default void preGetRSGroupInfoOfTable(final ObserverContext ctx, - final TableName tableName) throws IOException {} + final TableName tableName) throws IOException { + } /** * Called before listing region server group information. * @param ctx the environment to interact with the framework and master */ default void preListRSGroups(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after listing region server group information. * @param ctx the environment to interact with the framework and master */ default void postListRSGroups(final ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called before listing all tables in the region server group. @@ -1304,7 +1350,8 @@ default void postListRSGroups(final ObserverContext ctx, - final String groupName) throws IOException {} + final String groupName) throws IOException { + } /** * Called after listing all tables in the region server group. @@ -1312,7 +1359,8 @@ default void preListTablesInRSGroup(final ObserverContext ctx, - final String groupName) throws IOException {} + final String groupName) throws IOException { + } /** * Called before rename rsgroup. @@ -1321,7 +1369,8 @@ default void postListTablesInRSGroup(final ObserverContext ctx, - final String oldName, final String newName) throws IOException {} + final String oldName, final String newName) throws IOException { + } /** * Called after rename rsgroup. @@ -1330,7 +1379,8 @@ default void preRenameRSGroup(final ObserverContext ctx, - final String oldName, final String newName) throws IOException {} + final String oldName, final String newName) throws IOException { + } /** * Called before update rsgroup config. @@ -1339,8 +1389,8 @@ default void postRenameRSGroup(final ObserverContext ctx, - final String groupName, final Map configuration) - throws IOException {} + final String groupName, final Map configuration) throws IOException { + } /** * Called after update rsgroup config. @@ -1349,8 +1399,8 @@ default void preUpdateRSGroupConfig(final ObserverContext ctx, - final String groupName, final Map configuration) - throws IOException {} + final String groupName, final Map configuration) throws IOException { + } /** * Called before getting the configured namespaces and tables in the region server group. @@ -1358,8 +1408,9 @@ default void postUpdateRSGroupConfig(final ObserverContext ctx, final String groupName) - throws IOException {} + final ObserverContext ctx, final String groupName) + throws IOException { + } /** * Called after getting the configured namespaces and tables in the region server group. @@ -1367,8 +1418,9 @@ default void preGetConfiguredNamespacesAndTablesInRSGroup( * @param groupName name of the region server group */ default void postGetConfiguredNamespacesAndTablesInRSGroup( - final ObserverContext ctx, final String groupName) - throws IOException {} + final ObserverContext ctx, final String groupName) + throws IOException { + } /** * Called before getting region server group info of the passed server. @@ -1376,7 +1428,8 @@ default void postGetConfiguredNamespacesAndTablesInRSGroup( * @param server server to get RSGroupInfo for */ default void preGetRSGroupInfoOfServer(final ObserverContext ctx, - final Address server) throws IOException {} + final Address server) throws IOException { + } /** * Called after getting region server group info of the passed server. @@ -1384,7 +1437,8 @@ default void preGetRSGroupInfoOfServer(final ObserverContext ctx, - final Address server) throws IOException {} + final Address server) throws IOException { + } /** * Called before add a replication peer @@ -1393,7 +1447,8 @@ default void postGetRSGroupInfoOfServer(final ObserverContext ctx, - String peerId, ReplicationPeerConfig peerConfig) throws IOException {} + String peerId, ReplicationPeerConfig peerConfig) throws IOException { + } /** * Called after add a replication peer @@ -1402,7 +1457,8 @@ default void preAddReplicationPeer(final ObserverContext ctx, - String peerId, ReplicationPeerConfig peerConfig) throws IOException {} + String peerId, ReplicationPeerConfig peerConfig) throws IOException { + } /** * Called before remove a replication peer @@ -1410,7 +1466,8 @@ default void postAddReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** * Called after remove a replication peer @@ -1418,7 +1475,8 @@ default void preRemoveReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** * Called before enable a replication peer @@ -1426,7 +1484,8 @@ default void postRemoveReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** * Called after enable a replication peer @@ -1434,7 +1493,8 @@ default void preEnableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** * Called before disable a replication peer @@ -1442,7 +1502,8 @@ default void postEnableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** * Called after disable a replication peer @@ -1450,7 +1511,8 @@ default void preDisableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** * Called before get the configured ReplicationPeerConfig for the specified peer @@ -1458,15 +1520,17 @@ default void postDisableReplicationPeer(final ObserverContext ctx, - String peerId) throws IOException {} + String peerId) throws IOException { + } /** * Called after get the configured ReplicationPeerConfig for the specified peer * @param ctx * @param peerId a short name that identifies the peer */ - default void postGetReplicationPeerConfig( - final ObserverContext ctx, String peerId) throws IOException {} + default void postGetReplicationPeerConfig(final ObserverContext ctx, + String peerId) throws IOException { + } /** * Called before update peerConfig for the specified peer @@ -1475,7 +1539,8 @@ default void postGetReplicationPeerConfig( */ default void preUpdateReplicationPeerConfig( final ObserverContext ctx, String peerId, - ReplicationPeerConfig peerConfig) throws IOException {} + ReplicationPeerConfig peerConfig) throws IOException { + } /** * Called after update peerConfig for the specified peer @@ -1484,7 +1549,8 @@ default void preUpdateReplicationPeerConfig( */ default void postUpdateReplicationPeerConfig( final ObserverContext ctx, String peerId, - ReplicationPeerConfig peerConfig) throws IOException {} + ReplicationPeerConfig peerConfig) throws IOException { + } /** * Called before list replication peers. @@ -1492,7 +1558,8 @@ default void postUpdateReplicationPeerConfig( * @param regex The regular expression to match peer id */ default void preListReplicationPeers(final ObserverContext ctx, - String regex) throws IOException {} + String regex) throws IOException { + } /** * Called after list replication peers. @@ -1500,7 +1567,8 @@ default void preListReplicationPeers(final ObserverContext ctx, - String regex) throws IOException {} + String regex) throws IOException { + } /** * Called before transit current cluster state for the specified synchronous replication peer @@ -1530,89 +1598,102 @@ default void postTransitReplicationPeerSyncReplicationState( * @param ctx the environment to interact with the framework and master */ default void preRequestLock(ObserverContext ctx, String namespace, - TableName tableName, RegionInfo[] regionInfos, String description) throws IOException {} + TableName tableName, RegionInfo[] regionInfos, String description) throws IOException { + } /** * Called after new LockProcedure is queued. * @param ctx the environment to interact with the framework and master */ default void postRequestLock(ObserverContext ctx, String namespace, - TableName tableName, RegionInfo[] regionInfos, String description) throws IOException {} + TableName tableName, RegionInfo[] regionInfos, String description) throws IOException { + } /** * Called before heartbeat to a lock. * @param ctx the environment to interact with the framework and master */ - default void preLockHeartbeat(ObserverContext ctx, - TableName tn, String description) throws IOException {} + default void preLockHeartbeat(ObserverContext ctx, TableName tn, + String description) throws IOException { + } /** * Called after heartbeat to a lock. * @param ctx the environment to interact with the framework and master */ default void postLockHeartbeat(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called before get cluster status. */ default void preGetClusterMetrics(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after get cluster status. */ default void postGetClusterMetrics(ObserverContext ctx, - ClusterMetrics status) throws IOException {} + ClusterMetrics status) throws IOException { + } /** * Called before clear dead region servers. */ default void preClearDeadServers(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after clear dead region servers. */ default void postClearDeadServers(ObserverContext ctx, - List servers, List notClearedServers) - throws IOException {} + List servers, List notClearedServers) throws IOException { + } /** * Called before decommission region servers. */ default void preDecommissionRegionServers(ObserverContext ctx, - List servers, boolean offload) throws IOException {} + List servers, boolean offload) throws IOException { + } /** * Called after decommission region servers. */ default void postDecommissionRegionServers(ObserverContext ctx, - List servers, boolean offload) throws IOException {} + List servers, boolean offload) throws IOException { + } /** * Called before list decommissioned region servers. */ default void preListDecommissionedRegionServers(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * Called after list decommissioned region servers. */ - default void postListDecommissionedRegionServers(ObserverContext ctx) - throws IOException {} + default void postListDecommissionedRegionServers( + ObserverContext ctx) throws IOException { + } /** * Called before recommission region server. */ default void preRecommissionRegionServer(ObserverContext ctx, - ServerName server, List encodedRegionNames) throws IOException {} + ServerName server, List encodedRegionNames) throws IOException { + } /** * Called after recommission region server. */ default void postRecommissionRegionServer(ObserverContext ctx, - ServerName server, List encodedRegionNames) throws IOException {} + ServerName server, List encodedRegionNames) throws IOException { + } /** * Called before switching rpc throttle enabled state. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java index 4acec8c0956e..29b8ac2f2676 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -40,6 +38,7 @@ import org.apache.hadoop.hbase.util.LossyCounting; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; /** @@ -65,11 +64,8 @@ enum MetaTableOps { } private ImmutableMap, MetaTableOps> opsNameMap = - ImmutableMap., MetaTableOps>builder() - .put(Put.class, MetaTableOps.PUT) - .put(Get.class, MetaTableOps.GET) - .put(Delete.class, MetaTableOps.DELETE) - .build(); + ImmutableMap., MetaTableOps> builder().put(Put.class, MetaTableOps.PUT) + .put(Get.class, MetaTableOps.GET).put(Delete.class, MetaTableOps.DELETE).build(); class ExampleRegionObserverMeta implements RegionCoprocessor, RegionObserver { @@ -96,7 +92,7 @@ public void preDelete(ObserverContext e, Delete de registerAndMarkMetrics(e, delete); } - private void registerAndMarkMetrics(ObserverContext e, Row row){ + private void registerAndMarkMetrics(ObserverContext e, Row row) { if (!active || !isMetaTableOp(e)) { return; } @@ -122,7 +118,7 @@ private String getTableNameFromOp(Row op) { /** * Get regionId from Ops such as: get, put, delete. - * @param op such as get, put or delete. + * @param op such as get, put or delete. */ private String getRegionIdFromOp(Row op) { final String tableRowKey = Bytes.toString(op.getRow()); @@ -134,8 +130,7 @@ private String getRegionIdFromOp(Row op) { } private boolean isMetaTableOp(ObserverContext e) { - return TableName.META_TABLE_NAME - .equals(e.getEnvironment().getRegionInfo().getTable()); + return TableName.META_TABLE_NAME.equals(e.getEnvironment().getRegionInfo().getTable()); } private void clientMetricRegisterAndMark() { @@ -193,7 +188,7 @@ private void registerAndMarkMeter(String requestMeter) { if (requestMeter.isEmpty()) { return; } - if(!registry.get(requestMeter).isPresent()){ + if (!registry.get(requestMeter).isPresent()) { metrics.add(requestMeter); } registry.meter(requestMeter).mark(); @@ -269,7 +264,7 @@ public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() != null && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() - .equals(TableName.META_TABLE_NAME)) { + .equals(TableName.META_TABLE_NAME)) { RegionCoprocessorEnvironment regionCoprocessorEnv = (RegionCoprocessorEnvironment) env; registry = regionCoprocessorEnv.getMetricRegistryForRegionServer(); LossyCounting.LossyCountingListener listener = key -> { @@ -287,7 +282,7 @@ public void start(CoprocessorEnvironment env) throws IOException { @Override public void stop(CoprocessorEnvironment env) throws IOException { // since meta region can move around, clear stale metrics when stop. - for(String metric:metrics){ + for (String metric : metrics) { registry.remove(metric); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java index a77a0fe31f0c..0a25f7ce5a7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetricsCoprocessor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.coprocessor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.metrics.MetricRegistries; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.MetricRegistryInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility class for tracking metrics for various types of coprocessors. Each coprocessor instance @@ -36,48 +33,41 @@ public class MetricsCoprocessor { // Master coprocessor metrics private static final String MASTER_COPROC_METRICS_NAME = "Coprocessor.Master"; private static final String MASTER_COPROC_METRICS_CONTEXT = "master"; - private static final String MASTER_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase MasterObservers"; - private static final String MASTER_COPROC_METRICS_JMX_CONTEXT - = "Master,sub=" + MASTER_COPROC_METRICS_NAME; + private static final String MASTER_COPROC_METRICS_DESCRIPTION = + "Metrics about HBase MasterObservers"; + private static final String MASTER_COPROC_METRICS_JMX_CONTEXT = + "Master,sub=" + MASTER_COPROC_METRICS_NAME; // RegionServer coprocessor metrics private static final String RS_COPROC_METRICS_NAME = "Coprocessor.RegionServer"; private static final String RS_COPROC_METRICS_CONTEXT = "regionserver"; - private static final String RS_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase RegionServerObservers"; - private static final String RS_COPROC_METRICS_JMX_CONTEXT = "RegionServer,sub=" - + RS_COPROC_METRICS_NAME; + private static final String RS_COPROC_METRICS_DESCRIPTION = + "Metrics about HBase RegionServerObservers"; + private static final String RS_COPROC_METRICS_JMX_CONTEXT = + "RegionServer,sub=" + RS_COPROC_METRICS_NAME; // Region coprocessor metrics private static final String REGION_COPROC_METRICS_NAME = "Coprocessor.Region"; private static final String REGION_COPROC_METRICS_CONTEXT = "regionserver"; - private static final String REGION_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase RegionObservers"; - private static final String REGION_COPROC_METRICS_JMX_CONTEXT - = "RegionServer,sub=" + REGION_COPROC_METRICS_NAME; + private static final String REGION_COPROC_METRICS_DESCRIPTION = + "Metrics about HBase RegionObservers"; + private static final String REGION_COPROC_METRICS_JMX_CONTEXT = + "RegionServer,sub=" + REGION_COPROC_METRICS_NAME; // WAL coprocessor metrics private static final String WAL_COPROC_METRICS_NAME = "Coprocessor.WAL"; private static final String WAL_COPROC_METRICS_CONTEXT = "regionserver"; - private static final String WAL_COPROC_METRICS_DESCRIPTION - = "Metrics about HBase WALObservers"; - private static final String WAL_COPROC_METRICS_JMX_CONTEXT - = "RegionServer,sub=" + WAL_COPROC_METRICS_NAME; + private static final String WAL_COPROC_METRICS_DESCRIPTION = "Metrics about HBase WALObservers"; + private static final String WAL_COPROC_METRICS_JMX_CONTEXT = + "RegionServer,sub=" + WAL_COPROC_METRICS_NAME; private static String suffix(String metricName, String cpName) { - return new StringBuilder(metricName) - .append(".") - .append("CP_") - .append(cpName) - .toString(); + return new StringBuilder(metricName).append(".").append("CP_").append(cpName).toString(); } static MetricRegistryInfo createRegistryInfoForMasterCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(MASTER_COPROC_METRICS_NAME, clazz), - MASTER_COPROC_METRICS_DESCRIPTION, - suffix(MASTER_COPROC_METRICS_JMX_CONTEXT, clazz), + return new MetricRegistryInfo(suffix(MASTER_COPROC_METRICS_NAME, clazz), + MASTER_COPROC_METRICS_DESCRIPTION, suffix(MASTER_COPROC_METRICS_JMX_CONTEXT, clazz), MASTER_COPROC_METRICS_CONTEXT, false); } @@ -86,10 +76,8 @@ public static MetricRegistry createRegistryForMasterCoprocessor(String clazz) { } static MetricRegistryInfo createRegistryInfoForRSCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(RS_COPROC_METRICS_NAME, clazz), - RS_COPROC_METRICS_DESCRIPTION, - suffix(RS_COPROC_METRICS_JMX_CONTEXT, clazz), + return new MetricRegistryInfo(suffix(RS_COPROC_METRICS_NAME, clazz), + RS_COPROC_METRICS_DESCRIPTION, suffix(RS_COPROC_METRICS_JMX_CONTEXT, clazz), RS_COPROC_METRICS_CONTEXT, false); } @@ -98,10 +86,8 @@ public static MetricRegistry createRegistryForRSCoprocessor(String clazz) { } public static MetricRegistryInfo createRegistryInfoForRegionCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(REGION_COPROC_METRICS_NAME, clazz), - REGION_COPROC_METRICS_DESCRIPTION, - suffix(REGION_COPROC_METRICS_JMX_CONTEXT, clazz), + return new MetricRegistryInfo(suffix(REGION_COPROC_METRICS_NAME, clazz), + REGION_COPROC_METRICS_DESCRIPTION, suffix(REGION_COPROC_METRICS_JMX_CONTEXT, clazz), REGION_COPROC_METRICS_CONTEXT, false); } @@ -110,10 +96,8 @@ public static MetricRegistry createRegistryForRegionCoprocessor(String clazz) { } public static MetricRegistryInfo createRegistryInfoForWALCoprocessor(String clazz) { - return new MetricRegistryInfo( - suffix(WAL_COPROC_METRICS_NAME, clazz), - WAL_COPROC_METRICS_DESCRIPTION, - suffix(WAL_COPROC_METRICS_JMX_CONTEXT, clazz), + return new MetricRegistryInfo(suffix(WAL_COPROC_METRICS_NAME, clazz), + WAL_COPROC_METRICS_DESCRIPTION, suffix(WAL_COPROC_METRICS_JMX_CONTEXT, clazz), WAL_COPROC_METRICS_CONTEXT, false); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java index 5262732a45c9..85a8b41b7a86 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java @@ -63,17 +63,12 @@ /** * This class implements atomic multi row transactions using * {@link HRegion#mutateRowsWithLocks(Collection, Collection, long, long)} and Coprocessor - * endpoints. We can also specify some conditions to perform conditional update. - * - * Defines a protocol to perform multi row transactions. - * See {@link MultiRowMutationEndpoint} for the implementation. - *
        - * See - * {@link HRegion#mutateRowsWithLocks(Collection, Collection, long, long)} - * for details and limitations. + * endpoints. We can also specify some conditions to perform conditional update. Defines a protocol + * to perform multi row transactions. See {@link MultiRowMutationEndpoint} for the implementation. *
        - * Example: - * + * See {@link HRegion#mutateRowsWithLocks(Collection, Collection, long, long)} for details and + * limitations.
        + * Example: * Put p = new Put(row1); * Delete d = new Delete(row2); * Increment i = new Increment(row3); @@ -131,8 +126,7 @@ public void mutateRows(RpcController controller, MutateRowsRequest request, for (Mutation m : mutations) { // check whether rows are in range for this region if (!HRegion.rowIsInRange(regionInfo, m.getRow())) { - String msg = "Requested row out of range '" - + Bytes.toStringBinary(m.getRow()) + "'"; + String msg = "Requested row out of range '" + Bytes.toStringBinary(m.getRow()) + "'"; if (rowsToLock.isEmpty()) { // if this is the first row, region might have moved, // allow client to retry @@ -208,8 +202,9 @@ private boolean matches(Region region, ClientProtos.Condition condition) throws comparator = ProtobufUtil.toComparator(condition.getComparator()); } - TimeRange timeRange = condition.hasTimeRange() ? - ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime(); + TimeRange timeRange = + condition.hasTimeRange() ? ProtobufUtil.toTimeRange(condition.getTimeRange()) + : TimeRange.allTime(); Get get = new Get(row); if (family != null) { @@ -251,9 +246,8 @@ private boolean matches(Region region, ClientProtos.Condition condition) throws private void checkFamily(Region region, byte[] family) throws NoSuchColumnFamilyException { if (!region.getTableDescriptor().hasColumnFamily(family)) { - throw new NoSuchColumnFamilyException( - "Column family " + Bytes.toString(family) + " does not exist in region " + this - + " in table " + region.getTableDescriptor()); + throw new NoSuchColumnFamilyException("Column family " + Bytes.toString(family) + + " does not exist in region " + this + " in table " + region.getTableDescriptor()); } } @@ -284,17 +278,17 @@ public Iterable getServices() { /** * Stores a reference to the coprocessor environment provided by the * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this - * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded - * on a table region, so always expects this to be an instance of + * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on + * a table region, so always expects this to be an instance of * {@link RegionCoprocessorEnvironment}. * @param env the environment provided by the coprocessor host * @throws IOException if the provided environment is not an instance of - * {@code RegionCoprocessorEnvironment} + * {@code RegionCoprocessorEnvironment} */ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; } else { throw new CoprocessorException("Must be loaded on a table region!"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java index c756926fb213..0a95950a3f90 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java @@ -17,23 +17,22 @@ */ package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - /** * Carries the execution state for a given invocation of an Observer coprocessor - * ({@link RegionObserver}, {@link MasterObserver}, or {@link WALObserver}) - * method. The same ObserverContext instance is passed sequentially to all loaded - * coprocessors for a given Observer method trigger, with the - * CoprocessorEnvironment reference set appropriately for each Coprocessor type: - * e.g. the RegionCoprocessorEnvironment is passed to RegionCoprocessors, and so on. - * @param The {@link CoprocessorEnvironment} subclass applicable to the - * revelant Observer interface. + * ({@link RegionObserver}, {@link MasterObserver}, or {@link WALObserver}) method. The same + * ObserverContext instance is passed sequentially to all loaded coprocessors for a given Observer + * method trigger, with the CoprocessorEnvironment reference set appropriately for each + * Coprocessor type: e.g. the RegionCoprocessorEnvironment is passed to RegionCoprocessors, and so + * on. + * @param The {@link CoprocessorEnvironment} subclass applicable to the revelant Observer + * interface. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -47,29 +46,29 @@ public interface ObserverContext { * Coprocessor invocations, only on a small subset of methods, mostly preXXX calls in * RegionObserver. Check javadoc on the pertinent Coprocessor Observer to see if * bypass is supported. - *

        This behavior of honoring only a subset of methods is new since hbase-2.0.0. - *

        Where bypass is supported what is being bypassed is all of the core code - * implementing the remainder of the operation. In order to understand what - * calling bypass() will skip, a coprocessor implementer should read and - * understand all of the remaining code and its nuances. Although this - * is good practice for coprocessor developers in general, it demands a lot. - * What is skipped is extremely version dependent. The core code will vary, perhaps significantly, - * even between point releases. We do not provide the promise of consistent behavior even between - * point releases for the bypass semantic. To achieve - * that we could not change any code between hook points. Therefore the - * coprocessor implementer becomes an HBase core developer in practice as soon - * as they rely on bypass(). Every release of HBase may break the assumption - * that the replacement for the bypassed code takes care of all necessary - * skipped concerns. Because those concerns can change at any point, such an - * assumption is never safe.

        - *

        As of hbase2, when bypass has been set, we will NOT call any Coprocessors follow the - * bypassing Coprocessor; we cut short the processing and return the bypassing Coprocessors - * response (this used be a separate 'complete' option that has been folded into the - * 'bypass' in hbase2.

        + *

        + * This behavior of honoring only a subset of methods is new since hbase-2.0.0. + *

        + * Where bypass is supported what is being bypassed is all of the core code implementing the + * remainder of the operation. In order to understand what calling bypass() will skip, a + * coprocessor implementer should read and understand all of the remaining code and its nuances. + * Although this is good practice for coprocessor developers in general, it demands a lot. What is + * skipped is extremely version dependent. The core code will vary, perhaps significantly, even + * between point releases. We do not provide the promise of consistent behavior even between point + * releases for the bypass semantic. To achieve that we could not change any code between hook + * points. Therefore the coprocessor implementer becomes an HBase core developer in practice as + * soon as they rely on bypass(). Every release of HBase may break the assumption that the + * replacement for the bypassed code takes care of all necessary skipped concerns. Because those + * concerns can change at any point, such an assumption is never safe. + *

        + *

        + * As of hbase2, when bypass has been set, we will NOT call any Coprocessors follow the bypassing + * Coprocessor; we cut short the processing and return the bypassing Coprocessors response (this + * used be a separate 'complete' option that has been folded into the 'bypass' in hbase2. + *

        */ void bypass(); - /** * Returns the active user for the coprocessor call. If an explicit {@code User} instance was * provided to the constructor, that will be returned, otherwise if we are in the context of an diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java index 9a23ffaa4a87..a3a4a93005c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java @@ -69,7 +69,7 @@ public void bypass() { /** * @return {@code true}, if {@link ObserverContext#bypass()} was called by one of the loaded - * coprocessors, {@code false} otherwise. + * coprocessors, {@code false} otherwise. */ public boolean shouldBypass() { if (!isBypassable()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java index d7705ef25b7c..1408fef3dd3d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java @@ -34,12 +34,10 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.yetus.audience.InterfaceAudience; - /** * Wraps a Configuration to make it read-only. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java index 16c6d3990402..15d6cba0de26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface RegionCoprocessor extends Coprocessor { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java index 84e6d25e7699..44db13505703 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -58,48 +55,44 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironmentDo not close! This is a shared connection - * with the hosting server. Throws {@link UnsupportedOperationException} if you try to close - * or abort it. - * - * For light-weight usage only. Heavy-duty usage will pull down - * the hosting RegionServer responsiveness as well as that of other Coprocessors making use of - * this Connection. Use to create table on start or to do administrative operations. Coprocessors - * should create their own Connections if heavy usage to avoid impinging on hosting Server - * operation. To create a Connection or if a Coprocessor requires a region with a particular - * Configuration, use {@link org.apache.hadoop.hbase.client.ConnectionFactory} or + * Returns the hosts' Connection to the Cluster. Do not close! This is a shared connection with + * the hosting server. Throws {@link UnsupportedOperationException} if you try to close or abort + * it. For light-weight usage only. Heavy-duty usage will pull down the hosting RegionServer + * responsiveness as well as that of other Coprocessors making use of this Connection. Use to + * create table on start or to do administrative operations. Coprocessors should create their own + * Connections if heavy usage to avoid impinging on hosting Server operation. To create a + * Connection or if a Coprocessor requires a region with a particular Configuration, use + * {@link org.apache.hadoop.hbase.client.ConnectionFactory} or * {@link #createConnection(Configuration)}}. - * - *

        Be aware that operations that make use of this Connection are executed as the RegionServer + *

        + * Be aware that operations that make use of this Connection are executed as the RegionServer * User, the hbase super user that started this server process. Exercise caution running - * operations as this User (See {@link #createConnection(Configuration)}} to run as other than - * the RegionServer User). - * - *

        Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + * operations as this User (See {@link #createConnection(Configuration)}} to run as other than the + * RegionServer User). + *

        + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. - * * @see #createConnection(Configuration) * @return The host's Connection to the Cluster. */ Connection getConnection(); /** - * Creates a cluster connection using the passed Configuration. - * - * Creating a Connection is a heavy-weight operation. The resultant Connection's cache of - * region locations will be empty. Therefore you should cache and reuse Connections rather than - * create a Connection on demand. Create on start of your Coprocessor. You will have to cast - * the CoprocessorEnvironment appropriately to get at this API at start time because - * Coprocessor start method is passed a subclass of this CoprocessorEnvironment or fetch - * Connection using a synchronized accessor initializing the Connection on first access. Close - * the returned Connection when done to free resources. Using this API rather - * than {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} + * Creates a cluster connection using the passed Configuration. Creating a Connection is a + * heavy-weight operation. The resultant Connection's cache of region locations will be empty. + * Therefore you should cache and reuse Connections rather than create a Connection on demand. + * Create on start of your Coprocessor. You will have to cast the CoprocessorEnvironment + * appropriately to get at this API at start time because Coprocessor start method is passed a + * subclass of this CoprocessorEnvironment or fetch Connection using a synchronized accessor + * initializing the Connection on first access. Close the returned Connection when done to free + * resources. Using this API rather than + * {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} * returns a Connection that will short-circuit RPC if the target is a local resource. Use * ConnectionFactory if you don't need this ability. - * - *

        Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + *

        + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. @@ -109,14 +102,15 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironmentSee ExampleRegionObserverWithMetrics class in the hbase-examples modules to see examples of how - * metrics can be instantiated and used.

        + * metrics tracked at this level will be shared by all the coprocessor instances of the same class + * in the same region server process. Note that there will be one region coprocessor environment + * per region in the server, but all of these instances will share the same MetricRegistry. The + * metric instances (like Counter, Timer, etc) will also be shared among all of the region + * coprocessor instances. + *

        + * See ExampleRegionObserverWithMetrics class in the hbase-examples modules to see examples of how + * metrics can be instantiated and used. + *

        * @return A MetricRegistry for the coprocessor class to track and export metrics. */ // Note: we are not exposing getMetricRegistryForRegion(). per-region metrics are already costly diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 4f4d79cdbe04..e93c9a52f9c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -7,23 +7,20 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -113,13 +110,15 @@ enum MutationType { * Called before the region is reported as open to the master. * @param c the environment provided by the region server */ - default void preOpen(ObserverContext c) throws IOException {} + default void preOpen(ObserverContext c) throws IOException { + } /** * Called after the region is reported as open to the master. * @param c the environment provided by the region server */ - default void postOpen(ObserverContext c) {} + default void postOpen(ObserverContext c) { + } /** * Called before the memstore is flushed to disk. @@ -127,7 +126,8 @@ default void postOpen(ObserverContext c) {} * @param tracker tracker used to track the life cycle of a flush */ default void preFlush(final ObserverContext c, - FlushLifeCycleTracker tracker) throws IOException {} + FlushLifeCycleTracker tracker) throws IOException { + } /** * Called before we open store scanner for flush. You can use the {@code options} to change max @@ -137,7 +137,8 @@ default void preFlush(final ObserverContext c, * @param options used to change max versions and TTL for the scanner being opened */ default void preFlushScannerOpen(ObserverContext c, Store store, - ScanOptions options,FlushLifeCycleTracker tracker) throws IOException {} + ScanOptions options, FlushLifeCycleTracker tracker) throws IOException { + } /** * Called before a Store's memstore is flushed to disk. @@ -160,7 +161,8 @@ default InternalScanner preFlush(ObserverContext c * @throws IOException if an error occurred on the coprocessor */ default void postFlush(ObserverContext c, - FlushLifeCycleTracker tracker) throws IOException {} + FlushLifeCycleTracker tracker) throws IOException { + } /** * Called after a Store's memstore is flushed to disk. @@ -170,7 +172,8 @@ default void postFlush(ObserverContext c, * @param tracker tracker used to track the life cycle of a flush */ default void postFlush(ObserverContext c, Store store, - StoreFile resultFile, FlushLifeCycleTracker tracker) throws IOException {} + StoreFile resultFile, FlushLifeCycleTracker tracker) throws IOException { + } /** * Called before in memory compaction started. @@ -178,7 +181,8 @@ default void postFlush(ObserverContext c, Store st * @param store the store where in memory compaction is being requested */ default void preMemStoreCompaction(ObserverContext c, Store store) - throws IOException {} + throws IOException { + } /** * Called before we open store scanner for in memory compaction. You can use the {@code options} @@ -191,7 +195,8 @@ default void preMemStoreCompaction(ObserverContext */ default void preMemStoreCompactionCompactScannerOpen( ObserverContext c, Store store, ScanOptions options) - throws IOException {} + throws IOException { + } /** * Called before we do in memory compaction. Notice that this method will only be called when you @@ -214,23 +219,25 @@ default InternalScanner preMemStoreCompactionCompact( * @param store the store where in memory compaction is being executed */ default void postMemStoreCompaction(ObserverContext c, Store store) - throws IOException {} + throws IOException { + } /** * Called prior to selecting the {@link StoreFile StoreFiles} to compact from the list of * available candidates. To alter the files used for compaction, you may mutate the passed in list * of candidates. If you remove all the candidates then the compaction will be canceled. - *

        Supports Coprocessor 'bypass' -- 'bypass' is how this method indicates that it changed - * the passed in candidates. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + *

        + * Supports Coprocessor 'bypass' -- 'bypass' is how this method indicates that it changed the + * passed in candidates. If 'bypass' is set, we skip out on calling any subsequent + * chained coprocessors. * @param c the environment provided by the region server * @param store the store where compaction is being requested * @param candidates the store files currently available for compaction * @param tracker tracker used to track the life cycle of a compaction */ default void preCompactSelection(ObserverContext c, Store store, - List candidates, CompactionLifeCycleTracker tracker) - throws IOException {} + List candidates, CompactionLifeCycleTracker tracker) throws IOException { + } /** * Called after the {@link StoreFile}s to compact have been selected from the available @@ -243,11 +250,12 @@ default void preCompactSelection(ObserverContext c */ default void postCompactSelection(ObserverContext c, Store store, List selected, CompactionLifeCycleTracker tracker, - CompactionRequest request) {} + CompactionRequest request) { + } /** - * Called before we open store scanner for compaction. You can use the {@code options} to change max - * versions and TTL for the scanner being opened. + * Called before we open store scanner for compaction. You can use the {@code options} to change + * max versions and TTL for the scanner being opened. * @param c the environment provided by the region server * @param store the store being compacted * @param scanType type of Scan @@ -257,7 +265,8 @@ default void postCompactSelection(ObserverContext */ default void preCompactScannerOpen(ObserverContext c, Store store, ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException {} + CompactionRequest request) throws IOException { + } /** * Called prior to writing the {@link StoreFile}s selected for compaction into a new @@ -292,7 +301,8 @@ default InternalScanner preCompact(ObserverContext */ default void postCompact(ObserverContext c, Store store, StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) - throws IOException {} + throws IOException { + } /** * Called before the region is reported as closed to the master. @@ -300,46 +310,49 @@ default void postCompact(ObserverContext c, Store * @param abortRequested true if the region server is aborting */ default void preClose(ObserverContext c, boolean abortRequested) - throws IOException {} + throws IOException { + } /** * Called after the region is reported as closed to the master. * @param c the environment provided by the region server * @param abortRequested true if the region server is aborting */ - default void postClose(ObserverContext c, boolean abortRequested) {} + default void postClose(ObserverContext c, boolean abortRequested) { + } /** * Called before the client performs a Get *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. * @param c the environment provided by the region server * @param get the Get request - * @param result The result to return to the client if default processing - * is bypassed. Can be modified. Will not be used if default processing - * is not bypassed. + * @param result The result to return to the client if default processing is bypassed. Can be + * modified. Will not be used if default processing is not bypassed. */ default void preGetOp(ObserverContext c, Get get, List result) - throws IOException {} + throws IOException { + } /** * Called after the client performs a Get *

        - * Note: Do not retain references to any Cells in 'result' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'result' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param get the Get request * @param result the result to return to the client, modify as necessary */ default void postGetOp(ObserverContext c, Get get, - List result) throws IOException {} + List result) throws IOException { + } /** * Called before the client tests for existence using a Get. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. * @param c the environment provided by the region server * @param get the Get request * @param exists the result returned by the region server @@ -365,187 +378,194 @@ default boolean postExists(ObserverContext c, Get /** * Called before the client stores a value. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param put The Put object * @param edit The WALEdit object that will be written to the wal * @param durability Persistence guarantee for this Put * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #prePut(ObserverContext, Put, WALEdit)} instead. + * {@link #prePut(ObserverContext, Put, WALEdit)} instead. */ @Deprecated default void prePut(ObserverContext c, Put put, WALEdit edit, - Durability durability) throws IOException {} + Durability durability) throws IOException { + } /** * Called before the client stores a value. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param put The Put object * @param edit The WALEdit object that will be written to the wal */ default void prePut(ObserverContext c, Put put, WALEdit edit) - throws IOException { + throws IOException { prePut(c, put, edit, put.getDurability()); } /** * Called after the client stores a value. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param put The Put object * @param edit The WALEdit object for the wal * @param durability Persistence guarantee for this Put * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postPut(ObserverContext, Put, WALEdit)} instead. + * {@link #postPut(ObserverContext, Put, WALEdit)} instead. */ @Deprecated default void postPut(ObserverContext c, Put put, WALEdit edit, - Durability durability) throws IOException {} + Durability durability) throws IOException { + } /** * Called after the client stores a value. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param put The Put object * @param edit The WALEdit object for the wal */ default void postPut(ObserverContext c, Put put, WALEdit edit) - throws IOException { + throws IOException { postPut(c, put, edit, put.getDurability()); } /** * Called before the client deletes a value. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param delete The Delete object * @param edit The WALEdit object for the wal * @param durability Persistence guarantee for this Delete * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preDelete(ObserverContext, Delete, WALEdit)} instead. + * {@link #preDelete(ObserverContext, Delete, WALEdit)} instead. */ @Deprecated default void preDelete(ObserverContext c, Delete delete, - WALEdit edit, Durability durability) throws IOException {} + WALEdit edit, Durability durability) throws IOException { + } /** * Called before the client deletes a value. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param delete The Delete object * @param edit The WALEdit object for the wal */ default void preDelete(ObserverContext c, Delete delete, - WALEdit edit) throws IOException { + WALEdit edit) throws IOException { preDelete(c, delete, edit, delete.getDurability()); } /** * Called before the server updates the timestamp for version delete with latest timestamp. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. * @param c the environment provided by the region server * @param mutation - the parent mutation associated with this delete cell * @param cell - The deleteColumn with latest version cell * @param byteNow - timestamp bytes * @param get - the get formed using the current cell's row. Note that the get does not specify * the family and qualifier - * @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-3.0.0 and replaced - * with something that doesn't expose IntefaceAudience.Private classes. + * @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-3.0.0 and replaced with + * something that doesn't expose IntefaceAudience.Private classes. */ @Deprecated default void prePrepareTimeStampForDeleteVersion(ObserverContext c, - Mutation mutation, Cell cell, byte[] byteNow, Get get) throws IOException {} + Mutation mutation, Cell cell, byte[] byteNow, Get get) throws IOException { + } /** * Called after the client deletes a value. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param delete The Delete object * @param edit The WALEdit object for the wal * @param durability Persistence guarantee for this Delete * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postDelete(ObserverContext, Delete, WALEdit)} instead. + * {@link #postDelete(ObserverContext, Delete, WALEdit)} instead. */ @Deprecated default void postDelete(ObserverContext c, Delete delete, - WALEdit edit, Durability durability) throws IOException {} + WALEdit edit, Durability durability) throws IOException { + } /** * Called after the client deletes a value. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param delete The Delete object * @param edit The WALEdit object for the wal */ default void postDelete(ObserverContext c, Delete delete, - WALEdit edit) throws IOException { + WALEdit edit) throws IOException { postDelete(c, delete, edit, delete.getDurability()); } /** * This will be called for every batch mutation operation happening at the server. This will be * called after acquiring the locks on the mutating rows and after applying the proper timestamp - * for each Mutation at the server. The batch may contain Put/Delete/Increment/Append. By - * setting OperationStatus of Mutations + * for each Mutation at the server. The batch may contain Put/Delete/Increment/Append. By setting + * OperationStatus of Mutations * ({@link MiniBatchOperationInProgress#setOperationStatus(int, OperationStatus)}), * {@link RegionObserver} can make Region to skip these Mutations. *

        - * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param miniBatchOp batch of Mutations getting applied to region. */ default void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException {} + MiniBatchOperationInProgress miniBatchOp) throws IOException { + } /** - * This will be called after applying a batch of Mutations on a region. The Mutations are added - * to memstore and WAL. The difference of this one with - * {@link #postPut(ObserverContext, Put, WALEdit)} - * and {@link #postDelete(ObserverContext, Delete, WALEdit)} - * and {@link #postIncrement(ObserverContext, Increment, Result, WALEdit)} - * and {@link #postAppend(ObserverContext, Append, Result, WALEdit)} is - * this hook will be executed before the mvcc transaction completion. + * This will be called after applying a batch of Mutations on a region. The Mutations are added to + * memstore and WAL. The difference of this one with + * {@link #postPut(ObserverContext, Put, WALEdit)} and + * {@link #postDelete(ObserverContext, Delete, WALEdit)} and + * {@link #postIncrement(ObserverContext, Increment, Result, WALEdit)} and + * {@link #postAppend(ObserverContext, Append, Result, WALEdit)} is this hook will be executed + * before the mvcc transaction completion. *

        - * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param miniBatchOp batch of Mutations applied to region. Coprocessors are discouraged from - * manipulating its state. + * manipulating its state. */ // Coprocessors can do a form of bypass by changing state in miniBatchOp. default void postBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) throws IOException {} + MiniBatchOperationInProgress miniBatchOp) throws IOException { + } /** * This will be called for region operations where read lock is acquired in @@ -554,7 +574,8 @@ default void postBatchMutate(ObserverContext c, * @param operation The operation is about to be taken on the region */ default void postStartRegionOperation(ObserverContext ctx, - Operation operation) throws IOException {} + Operation operation) throws IOException { + } /** * Called after releasing read lock in {@link Region#closeRegionOperation()}. @@ -562,29 +583,31 @@ default void postStartRegionOperation(ObserverContext ctx, - Operation operation) throws IOException {} + Operation operation) throws IOException { + } /** - * Called after the completion of batch put/delete/increment/append and will be called even if - * the batch operation fails. + * Called after the completion of batch put/delete/increment/append and will be called even if the + * batch operation fails. *

        - * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in Mutations beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param ctx * @param miniBatchOp * @param success true if batch operation is successful otherwise false. */ default void postBatchMutateIndispensably(ObserverContext ctx, - MiniBatchOperationInProgress miniBatchOp, boolean success) throws IOException {} + MiniBatchOperationInProgress miniBatchOp, boolean success) throws IOException { + } /** * Called before checkAndPut. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param family column family @@ -594,9 +617,9 @@ default void postBatchMutateIndispensably(ObserverContext c, byte[] row, @@ -608,39 +631,39 @@ default boolean preCheckAndPut(ObserverContext c, /** * Called before checkAndPut. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param filter filter * @param put data to put if check succeeds * @param result the default value of the result * @return the return value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndPut(ObserverContext c, byte[] row, - Filter filter, Put put, boolean result) throws IOException { + Filter filter, Put put, boolean result) throws IOException { return result; } /** * Called before checkAndPut but after acquiring rowlock. *

        - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param family column family @@ -650,10 +673,9 @@ default boolean preCheckAndPut(ObserverContext c, * @param put data to put if check succeeds * @param result the default value of the result * @return the return value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndPutAfterRowLock(ObserverContext c, @@ -665,37 +687,36 @@ default boolean preCheckAndPutAfterRowLock(ObserverContext - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param filter filter * @param put data to put if check succeeds * @param result the default value of the result * @return the return value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndPutAfterRowLock(ObserverContext c, - byte[] row, Filter filter, Put put, boolean result) throws IOException { + byte[] row, Filter filter, Put put, boolean result) throws IOException { return result; } /** * Called after checkAndPut *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param family column family @@ -705,9 +726,9 @@ default boolean preCheckAndPutAfterRowLock(ObserverContext c, byte[] row, @@ -719,32 +740,32 @@ default boolean postCheckAndPut(ObserverContext c, /** * Called after checkAndPut *

        - * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'put' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param filter filter * @param put data to put if check succeeds * @param result from the checkAndPut * @return the possibly transformed return value to return to client - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean postCheckAndPut(ObserverContext c, byte[] row, - Filter filter, Put put, boolean result) throws IOException { + Filter filter, Put put, boolean result) throws IOException { return result; } /** * Called before checkAndDelete. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param family column family @@ -754,9 +775,9 @@ default boolean postCheckAndPut(ObserverContext c, * @param delete delete to commit if check succeeds * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDelete(ObserverContext c, byte[] row, @@ -768,39 +789,39 @@ default boolean preCheckAndDelete(ObserverContext /** * Called before checkAndDelete. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param filter column family * @param delete delete to commit if check succeeds * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #preCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDelete(ObserverContext c, byte[] row, - Filter filter, Delete delete, boolean result) throws IOException { + Filter filter, Delete delete, boolean result) throws IOException { return result; } /** * Called before checkAndDelete but after acquiring rowock. *

        - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param family column family @@ -810,10 +831,9 @@ default boolean preCheckAndDelete(ObserverContext * @param delete delete to commit if check succeeds * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDeleteAfterRowLock(ObserverContext c, @@ -825,37 +845,36 @@ default boolean preCheckAndDeleteAfterRowLock(ObserverContext - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param filter filter * @param delete delete to commit if check succeeds * @param result the default value of the result * @return the value to return to client if bypassing default processing - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} - * instead. + * {@link #preCheckAndMutateAfterRowLock(ObserverContext, CheckAndMutate,CheckAndMutateResult)} + * instead. */ @Deprecated default boolean preCheckAndDeleteAfterRowLock(ObserverContext c, - byte[] row, Filter filter, Delete delete, boolean result) throws IOException { + byte[] row, Filter filter, Delete delete, boolean result) throws IOException { return result; } /** * Called after checkAndDelete *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param family column family @@ -865,9 +884,9 @@ default boolean preCheckAndDeleteAfterRowLock(ObserverContext c, byte[] row, @@ -879,32 +898,32 @@ default boolean postCheckAndDelete(ObserverContext /** * Called after checkAndDelete *

        - * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'delete' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param row row to check * @param filter filter * @param delete delete to commit if check succeeds * @param result from the CheckAndDelete * @return the possibly transformed returned value to return to client - * * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} instead. + * {@link #postCheckAndMutate(ObserverContext, CheckAndMutate, CheckAndMutateResult)} + * instead. */ @Deprecated default boolean postCheckAndDelete(ObserverContext c, byte[] row, - Filter filter, Delete delete, boolean result) throws IOException { + Filter filter, Delete delete, boolean result) throws IOException { return result; } /** * Called before checkAndMutate *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in actions beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in actions beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param checkAndMutate the CheckAndMutate object * @param result the default value of the result @@ -912,7 +931,7 @@ default boolean postCheckAndDelete(ObserverContext * @throws IOException if an error occurred on the coprocessor */ default CheckAndMutateResult preCheckAndMutate(ObserverContext c, - CheckAndMutate checkAndMutate, CheckAndMutateResult result) throws IOException { + CheckAndMutate checkAndMutate, CheckAndMutateResult result) throws IOException { if (checkAndMutate.getAction() instanceof Put) { boolean success; if (checkAndMutate.hasFilter()) { @@ -944,15 +963,15 @@ default CheckAndMutateResult preCheckAndMutate(ObserverContext - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in actions beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in actions beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param checkAndMutate the CheckAndMutate object * @param result the default value of the result @@ -960,18 +979,18 @@ default CheckAndMutateResult preCheckAndMutate(ObserverContext c, CheckAndMutate checkAndMutate, - CheckAndMutateResult result) throws IOException { + ObserverContext c, CheckAndMutate checkAndMutate, + CheckAndMutateResult result) throws IOException { if (checkAndMutate.getAction() instanceof Put) { boolean success; if (checkAndMutate.hasFilter()) { - success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), - checkAndMutate.getFilter(), (Put) checkAndMutate.getAction(), result.isSuccess()); - } else { - success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), new BinaryComparator(checkAndMutate.getValue()), + success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), checkAndMutate.getFilter(), (Put) checkAndMutate.getAction(), result.isSuccess()); + } else { + success = preCheckAndPutAfterRowLock(c, checkAndMutate.getRow(), checkAndMutate.getFamily(), + checkAndMutate.getQualifier(), checkAndMutate.getCompareOp(), + new BinaryComparator(checkAndMutate.getValue()), (Put) checkAndMutate.getAction(), + result.isSuccess()); } return new CheckAndMutateResult(success, null); } else if (checkAndMutate.getAction() instanceof Delete) { @@ -981,9 +1000,9 @@ default CheckAndMutateResult preCheckAndMutateAfterRowLock( checkAndMutate.getFilter(), (Delete) checkAndMutate.getAction(), result.isSuccess()); } else { success = preCheckAndDeleteAfterRowLock(c, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), new BinaryComparator(checkAndMutate.getValue()), - (Delete) checkAndMutate.getAction(), result.isSuccess()); + checkAndMutate.getFamily(), checkAndMutate.getQualifier(), checkAndMutate.getCompareOp(), + new BinaryComparator(checkAndMutate.getValue()), (Delete) checkAndMutate.getAction(), + result.isSuccess()); } return new CheckAndMutateResult(success, null); } @@ -993,8 +1012,8 @@ default CheckAndMutateResult preCheckAndMutateAfterRowLock( /** * Called after checkAndMutate *

        - * Note: Do not retain references to any Cells in actions beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in actions beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param checkAndMutate the CheckAndMutate object * @param result from the checkAndMutate @@ -1002,29 +1021,29 @@ default CheckAndMutateResult preCheckAndMutateAfterRowLock( * @throws IOException if an error occurred on the coprocessor */ default CheckAndMutateResult postCheckAndMutate(ObserverContext c, - CheckAndMutate checkAndMutate, CheckAndMutateResult result) throws IOException { + CheckAndMutate checkAndMutate, CheckAndMutateResult result) throws IOException { if (checkAndMutate.getAction() instanceof Put) { boolean success; if (checkAndMutate.hasFilter()) { - success = postCheckAndPut(c, checkAndMutate.getRow(), - checkAndMutate.getFilter(), (Put) checkAndMutate.getAction(), result.isSuccess()); - } else { - success = postCheckAndPut(c, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), new BinaryComparator(checkAndMutate.getValue()), + success = postCheckAndPut(c, checkAndMutate.getRow(), checkAndMutate.getFilter(), (Put) checkAndMutate.getAction(), result.isSuccess()); + } else { + success = postCheckAndPut(c, checkAndMutate.getRow(), checkAndMutate.getFamily(), + checkAndMutate.getQualifier(), checkAndMutate.getCompareOp(), + new BinaryComparator(checkAndMutate.getValue()), (Put) checkAndMutate.getAction(), + result.isSuccess()); } return new CheckAndMutateResult(success, null); } else if (checkAndMutate.getAction() instanceof Delete) { boolean success; if (checkAndMutate.hasFilter()) { - success = postCheckAndDelete(c, checkAndMutate.getRow(), - checkAndMutate.getFilter(), (Delete) checkAndMutate.getAction(), result.isSuccess()); - } else { - success = postCheckAndDelete(c, checkAndMutate.getRow(), - checkAndMutate.getFamily(), checkAndMutate.getQualifier(), - checkAndMutate.getCompareOp(), new BinaryComparator(checkAndMutate.getValue()), + success = postCheckAndDelete(c, checkAndMutate.getRow(), checkAndMutate.getFilter(), (Delete) checkAndMutate.getAction(), result.isSuccess()); + } else { + success = postCheckAndDelete(c, checkAndMutate.getRow(), checkAndMutate.getFamily(), + checkAndMutate.getQualifier(), checkAndMutate.getCompareOp(), + new BinaryComparator(checkAndMutate.getValue()), (Delete) checkAndMutate.getAction(), + result.isSuccess()); } return new CheckAndMutateResult(success, null); } @@ -1034,88 +1053,88 @@ default CheckAndMutateResult postCheckAndMutate(ObserverContext - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param append Append object * @return result to return to the client if bypassing default processing * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preAppend(ObserverContext, Append, WALEdit)} instead. + * {@link #preAppend(ObserverContext, Append, WALEdit)} instead. */ @Deprecated default Result preAppend(ObserverContext c, Append append) - throws IOException { + throws IOException { return null; } /** * Called before Append. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param append Append object * @param edit The WALEdit object that will be written to the wal * @return result to return to the client if bypassing default processing */ default Result preAppend(ObserverContext c, Append append, - WALEdit edit) throws IOException { + WALEdit edit) throws IOException { return preAppend(c, append); } /** * Called before Append but after acquiring rowlock. *

        - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param append Append object * @return result to return to the client if bypassing default processing * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. + * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. */ @Deprecated default Result preAppendAfterRowLock(ObserverContext c, - Append append) throws IOException { + Append append) throws IOException { return null; } /** * Called after Append *

        - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param append Append object * @param result the result returned by increment * @return the result to return to the client * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #postAppend(ObserverContext, Append, Result, WALEdit)} instead. + * {@link #postAppend(ObserverContext, Append, Result, WALEdit)} instead. */ @Deprecated default Result postAppend(ObserverContext c, Append append, - Result result) throws IOException { + Result result) throws IOException { return result; } /** * Called after Append *

        - * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. - * If need a Cell reference for later use, copy the cell and use that. + * Note: Do not retain references to any Cells in 'append' beyond the life of this invocation. If + * need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param append Append object * @param result the result returned by increment @@ -1123,15 +1142,15 @@ default Result postAppend(ObserverContext c, Appen * @return the result to return to the client */ default Result postAppend(ObserverContext c, Append append, - Result result, WALEdit edit) throws IOException { + Result result, WALEdit edit) throws IOException { return postAppend(c, append, result); } /** * Called before Increment. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. @@ -1139,19 +1158,19 @@ default Result postAppend(ObserverContext c, Appen * @param increment increment object * @return result to return to the client if bypassing default processing * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preIncrement(ObserverContext, Increment, WALEdit)} instead. + * {@link #preIncrement(ObserverContext, Increment, WALEdit)} instead. */ @Deprecated default Result preIncrement(ObserverContext c, Increment increment) - throws IOException { + throws IOException { return null; } /** * Called before Increment. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. @@ -1161,32 +1180,31 @@ default Result preIncrement(ObserverContext c, Inc * @return result to return to the client if bypassing default processing */ default Result preIncrement(ObserverContext c, Increment increment, - WALEdit edit) throws IOException { + WALEdit edit) throws IOException { return preIncrement(c, increment); } /** * Called before Increment but after acquiring rowlock. *

        - * Note: Caution to be taken for not doing any long time operation in this hook. - * Row will be locked for longer time. Trying to acquire lock on another row, within this, - * can lead to potential deadlock. + * Note: Caution to be taken for not doing any long time operation in this hook. Row will + * be locked for longer time. Trying to acquire lock on another row, within this, can lead to + * potential deadlock. *

        - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        * Note: Do not retain references to any Cells in 'increment' beyond the life of this invocation. * If need a Cell reference for later use, copy the cell and use that. - * * @param c the environment provided by the region server * @param increment increment object * @return result to return to the client if bypassing default processing * @deprecated since 3.0.0 and will be removed in 4.0.0. Use - * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. + * {@link #preBatchMutate(ObserverContext, MiniBatchOperationInProgress)} instead. */ @Deprecated default Result preIncrementAfterRowLock(ObserverContext c, - Increment increment) throws IOException { + Increment increment) throws IOException { return null; } @@ -1200,11 +1218,11 @@ default Result preIncrementAfterRowLock(ObserverContext c, Increment increment, - Result result) throws IOException { + Result result) throws IOException { return result; } @@ -1220,7 +1238,7 @@ default Result postIncrement(ObserverContext c, In * @return the result to return to the client */ default Result postIncrement(ObserverContext c, Increment increment, - Result result, WALEdit edit) throws IOException { + Result result, WALEdit edit) throws IOException { return postIncrement(c, increment, result); } @@ -1254,16 +1272,15 @@ default RegionScanner postScannerOpen(ObserverContext - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. *

        * Note: Do not retain references to any Cells returned by scanner, beyond the life of this * invocation. If need a Cell reference for later use, copy the cell and use that. * @param c the environment provided by the region server * @param s the scanner - * @param result The result to return to the client if default processing - * is bypassed. Can be modified. Will not be returned if default processing - * is not bypassed. + * @param result The result to return to the client if default processing is bypassed. Can be + * modified. Will not be returned if default processing is not bypassed. * @param limit the maximum number of results to return * @param hasNext the 'has more' indication * @return 'has more' indication that should be sent to client @@ -1294,13 +1311,11 @@ default boolean postScannerNext(ObserverContext c, * This will be called by the scan flow when the current scanned row is being filtered out by the * filter. The filter may be filtering out the row via any of the below scenarios *

          - *
        1. - * boolean filterRowKey(byte [] buffer, int offset, int length) returning true
        2. - *
        3. - * boolean filterRow() returning true
        4. - *
        5. - * default void filterRow(List<KeyValue> kvs) removing all the kvs from - * the passed List
        6. + *
        7. boolean filterRowKey(byte [] buffer, int offset, int length) returning + * true
        8. + *
        9. boolean filterRow() returning true
        10. + *
        11. default void filterRow(List<KeyValue> kvs) removing all the kvs from the + * passed List
        12. *
        *

        * Note: Do not retain references to any Cells returned by scanner, beyond the life of this @@ -1319,13 +1334,14 @@ default boolean postScannerFilterRow(ObserverContext - * Call CoprocessorEnvironment#bypass to skip default actions. - * If 'bypass' is set, we skip out on calling any subsequent chained coprocessors. + * Call CoprocessorEnvironment#bypass to skip default actions. If 'bypass' is set, we skip out on + * calling any subsequent chained coprocessors. * @param c the environment provided by the region server * @param s the scanner */ default void preScannerClose(ObserverContext c, InternalScanner s) - throws IOException {} + throws IOException { + } /** * Called after the client closes a scanner. @@ -1333,7 +1349,8 @@ default void preScannerClose(ObserverContext c, In * @param s the scanner */ default void postScannerClose(ObserverContext ctx, - InternalScanner s) throws IOException {} + InternalScanner s) throws IOException { + } /** * Called before a store opens a new scanner. @@ -1354,19 +1371,21 @@ default void postScannerClose(ObserverContext ctx, * CompactionLifeCycleTracker, CompactionRequest) */ default void preStoreScannerOpen(ObserverContext ctx, Store store, - ScanOptions options) throws IOException {} + ScanOptions options) throws IOException { + } /** - * Called before replaying WALs for this region. - * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no - * effect in this hook. + * Called before replaying WALs for this region. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this + * hook. * @param ctx the environment provided by the region server * @param info the RegionInfo for this region * @param edits the file of recovered edits */ // todo: what about these? default void preReplayWALs(ObserverContext ctx, - RegionInfo info, Path edits) throws IOException {} + RegionInfo info, Path edits) throws IOException { + } /** * Called after replaying WALs for this region. @@ -1375,76 +1394,75 @@ default void preReplayWALs(ObserverContext ctx, - RegionInfo info, Path edits) throws IOException {} + RegionInfo info, Path edits) throws IOException { + } /** - * Called before a {@link WALEdit} - * replayed for this region. + * Called before a {@link WALEdit} replayed for this region. * @param ctx the environment provided by the region server */ default void preWALRestore(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** - * Called after a {@link WALEdit} - * replayed for this region. + * Called after a {@link WALEdit} replayed for this region. * @param ctx the environment provided by the region server */ default void postWALRestore(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** - * Called before bulkLoadHFile. Users can create a StoreFile instance to - * access the contents of a HFile. - * + * Called before bulkLoadHFile. Users can create a StoreFile instance to access the contents of a + * HFile. * @param ctx the environment provided by the region server - * @param familyPaths pairs of { CF, HFile path } submitted for bulk load. Adding - * or removing from this list will add or remove HFiles to be bulk loaded. + * @param familyPaths pairs of { CF, HFile path } submitted for bulk load. Adding or removing from + * this list will add or remove HFiles to be bulk loaded. */ default void preBulkLoadHFile(ObserverContext ctx, - List> familyPaths) throws IOException {} + List> familyPaths) throws IOException { + } /** * Called before moving bulk loaded hfile to region directory. - * * @param ctx the environment provided by the region server * @param family column family - * @param pairs List of pairs of { HFile location in staging dir, HFile path in region dir } - * Each pair are for the same hfile. + * @param pairs List of pairs of { HFile location in staging dir, HFile path in region dir } Each + * pair are for the same hfile. */ default void preCommitStoreFile(ObserverContext ctx, byte[] family, - List> pairs) throws IOException {} + List> pairs) throws IOException { + } /** * Called after moving bulk loaded hfile to region directory. - * * @param ctx the environment provided by the region server * @param family column family * @param srcPath Path to file before the move * @param dstPath Path to file after the move */ default void postCommitStoreFile(ObserverContext ctx, byte[] family, - Path srcPath, Path dstPath) throws IOException {} + Path srcPath, Path dstPath) throws IOException { + } /** * Called after bulkLoadHFile. - * * @param ctx the environment provided by the region server * @param stagingFamilyPaths pairs of { CF, HFile path } submitted for bulk load - * @param finalPaths Map of CF to List of file paths for the loaded files - * if the Map is not null, the bulkLoad was successful. Otherwise the bulk load failed. - * bulkload is done by the time this hook is called. + * @param finalPaths Map of CF to List of file paths for the loaded files if the Map is not null, + * the bulkLoad was successful. Otherwise the bulk load failed. bulkload is done by the + * time this hook is called. */ default void postBulkLoadHFile(ObserverContext ctx, List> stagingFamilyPaths, Map> finalPaths) - throws IOException { + throws IOException { } /** - * Called before creation of Reader for a store file. - * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no - * effect in this hook. - * + * Called before creation of Reader for a store file. Calling + * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no effect in this + * hook. * @param ctx the environment provided by the region server * @param fs fileystem to read from * @param p path to the file @@ -1453,8 +1471,8 @@ default void postBulkLoadHFile(ObserverContext ctx * @param cacheConf * @param r original reference file. This will be not null only when reading a split file. * @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain - * @return a Reader instance to use instead of the base reader if overriding - * default behavior, null otherwise + * @return a Reader instance to use instead of the base reader if overriding default behavior, + * null otherwise * @deprecated For Phoenix only, StoreFileReader is not a stable interface. */ @Deprecated @@ -1468,7 +1486,6 @@ default StoreFileReader preStoreFileReaderOpen(ObserverContextHBASE-21643 @@ -1516,11 +1533,10 @@ default Cell postMutationBeforeWAL(ObserverContext /** * Called after a list of new cells has been created during an increment operation, but before * they are committed to the WAL or memstore. - * - * @param ctx the environment provided by the region server - * @param mutation the current mutation - * @param cellPairs a list of cell pair. The first cell is old cell which may be null. - * And the second cell is the new cell. + * @param ctx the environment provided by the region server + * @param mutation the current mutation + * @param cellPairs a list of cell pair. The first cell is old cell which may be null. And the + * second cell is the new cell. * @return a list of cell pair, possibly changed. */ default List> postIncrementBeforeWAL( @@ -1528,21 +1544,19 @@ default List> postIncrementBeforeWAL( List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { - resultPairs.add(new Pair<>(pair.getFirst(), - postMutationBeforeWAL(ctx, MutationType.INCREMENT, mutation, pair.getFirst(), - pair.getSecond()))); + resultPairs.add(new Pair<>(pair.getFirst(), postMutationBeforeWAL(ctx, MutationType.INCREMENT, + mutation, pair.getFirst(), pair.getSecond()))); } return resultPairs; } /** - * Called after a list of new cells has been created during an append operation, but before - * they are committed to the WAL or memstore. - * - * @param ctx the environment provided by the region server - * @param mutation the current mutation - * @param cellPairs a list of cell pair. The first cell is old cell which may be null. - * And the second cell is the new cell. + * Called after a list of new cells has been created during an append operation, but before they + * are committed to the WAL or memstore. + * @param ctx the environment provided by the region server + * @param mutation the current mutation + * @param cellPairs a list of cell pair. The first cell is old cell which may be null. And the + * second cell is the new cell. * @return a list of cell pair, possibly changed. */ default List> postAppendBeforeWAL( @@ -1550,17 +1564,15 @@ default List> postAppendBeforeWAL( List> cellPairs) throws IOException { List> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair pair : cellPairs) { - resultPairs.add(new Pair<>(pair.getFirst(), - postMutationBeforeWAL(ctx, MutationType.APPEND, mutation, pair.getFirst(), - pair.getSecond()))); + resultPairs.add(new Pair<>(pair.getFirst(), postMutationBeforeWAL(ctx, MutationType.APPEND, + mutation, pair.getFirst(), pair.getSecond()))); } return resultPairs; } /** - * Called after the ScanQueryMatcher creates ScanDeleteTracker. Implementing - * this hook would help in creating customised DeleteTracker and returning - * the newly created DeleteTracker + * Called after the ScanQueryMatcher creates ScanDeleteTracker. Implementing this hook would help + * in creating customised DeleteTracker and returning the newly created DeleteTracker *

        * Warn: This is used by internal coprocessors. Should not be implemented by user coprocessors * @param ctx the environment provided by the region server @@ -1577,13 +1589,12 @@ default DeleteTracker postInstantiateDeleteTracker( /** * Called just before the WAL Entry is appended to the WAL. Implementing this hook allows - * coprocessors to add extended attributes to the WALKey that then get persisted to the - * WAL, and are available to replication endpoints to use in processing WAL Entries. + * coprocessors to add extended attributes to the WALKey that then get persisted to the WAL, and + * are available to replication endpoints to use in processing WAL Entries. * @param ctx the environment provided by the region server * @param key the WALKey associated with a particular append to a WAL */ default void preWALAppend(ObserverContext ctx, WALKey key, - WALEdit edit) - throws IOException { + WALEdit edit) throws IOException { } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java index 66d8113a87a3..60bee538c16a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface RegionServerCoprocessor extends Coprocessor { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java index 4a5d69a17aa4..5b6e096cad7f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -45,48 +43,44 @@ public interface RegionServerCoprocessorEnvironment OnlineRegions getOnlineRegions(); /** - * Returns the hosts' Connection to the Cluster. Do not close! This is a shared connection - * with the hosting server. Throws {@link UnsupportedOperationException} if you try to close - * or abort it. - * - * For light-weight usage only. Heavy-duty usage will pull down - * the hosting RegionServer responsiveness as well as that of other Coprocessors making use of - * this Connection. Use to create table on start or to do administrative operations. Coprocessors - * should create their own Connections if heavy usage to avoid impinging on hosting Server - * operation. To create a Connection or if a Coprocessor requires a region with a particular - * Configuration, use {@link org.apache.hadoop.hbase.client.ConnectionFactory} or + * Returns the hosts' Connection to the Cluster. Do not close! This is a shared connection with + * the hosting server. Throws {@link UnsupportedOperationException} if you try to close or abort + * it. For light-weight usage only. Heavy-duty usage will pull down the hosting RegionServer + * responsiveness as well as that of other Coprocessors making use of this Connection. Use to + * create table on start or to do administrative operations. Coprocessors should create their own + * Connections if heavy usage to avoid impinging on hosting Server operation. To create a + * Connection or if a Coprocessor requires a region with a particular Configuration, use + * {@link org.apache.hadoop.hbase.client.ConnectionFactory} or * {@link #createConnection(Configuration)}}. - * - *

        Be aware that operations that make use of this Connection are executed as the RegionServer + *

        + * Be aware that operations that make use of this Connection are executed as the RegionServer * User, the hbase super user that started this server process. Exercise caution running - * operations as this User (See {@link #createConnection(Configuration)}} to run as other than - * the RegionServer User). - * - *

        Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + * operations as this User (See {@link #createConnection(Configuration)}} to run as other than the + * RegionServer User). + *

        + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. - * * @see #createConnection(Configuration) * @return The host's Connection to the Cluster. */ Connection getConnection(); /** - * Creates a cluster connection using the passed Configuration. - * - * Creating a Connection is a heavy-weight operation. The resultant Connection's cache of - * region locations will be empty. Therefore you should cache and reuse Connections rather than - * create a Connection on demand. Create on start of your Coprocessor. You will have to cast - * the CoprocessorEnvironment appropriately to get at this API at start time because - * Coprocessor start method is passed a subclass of this CoprocessorEnvironment or fetch - * Connection using a synchronized accessor initializing the Connection on first access. Close - * the returned Connection when done to free resources. Using this API rather - * than {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} + * Creates a cluster connection using the passed Configuration. Creating a Connection is a + * heavy-weight operation. The resultant Connection's cache of region locations will be empty. + * Therefore you should cache and reuse Connections rather than create a Connection on demand. + * Create on start of your Coprocessor. You will have to cast the CoprocessorEnvironment + * appropriately to get at this API at start time because Coprocessor start method is passed a + * subclass of this CoprocessorEnvironment or fetch Connection using a synchronized accessor + * initializing the Connection on first access. Close the returned Connection when done to free + * resources. Using this API rather than + * {@link org.apache.hadoop.hbase.client.ConnectionFactory#createConnection(Configuration)} * returns a Connection that will short-circuit RPC if the target is a local resource. Use * ConnectionFactory if you don't need this ability. - * - *

        Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl + *

        + * Be careful RPC'ing from a Coprocessor context. RPC's will fail, stall, retry, and/or crawl * because the remote side is not online, is struggling or it is on the other side of a network * partition. Any use of Connection from inside a Coprocessor must be able to handle all such * hiccups. @@ -96,9 +90,10 @@ public interface RegionServerCoprocessorEnvironment /** * Returns a MetricRegistry that can be used to track metrics at the region server level. - * - *

        See ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples - * of how metrics can be instantiated and used.

        + *

        + * See ExampleMasterObserverWithMetrics class in the hbase-examples modules for examples of how + * metrics can be instantiated and used. + *

        * @return A MetricRegistry for the coprocessor class to track and export metrics. */ MetricRegistry getMetricRegistryForRegionServer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java index f3ccd9d3638b..2a6fc49983b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.yetus.audience.InterfaceAudience; @@ -27,27 +25,24 @@ /** * Defines coprocessor hooks for interacting with operations on the - * {@link org.apache.hadoop.hbase.regionserver.HRegionServer} process. - * - * Since most implementations will be interested in only a subset of hooks, this class uses - * 'default' functions to avoid having to add unnecessary overrides. When the functions are - * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. - * It is done in a way that these default definitions act as no-op. So our suggestion to - * implementation would be to not call these 'default' methods from overrides. - *

        - * - *

        Exception Handling

        - * For all functions, exception handling is done as follows: + * {@link org.apache.hadoop.hbase.regionserver.HRegionServer} process. Since most implementations + * will be interested in only a subset of hooks, this class uses 'default' functions to avoid having + * to add unnecessary overrides. When the functions are non-empty, it's simply to satisfy the + * compiler by returning value of expected (non-void) type. It is done in a way that these default + * definitions act as no-op. So our suggestion to implementation would be to not call these + * 'default' methods from overrides.
        + *
        + *

        Exception Handling

        For all functions, exception handling is done as follows: + *
          + *
        • Exceptions of type {@link IOException} are reported back to client.
        • + *
        • For any other kind of exception: *
            - *
          • Exceptions of type {@link IOException} are reported back to client.
          • - *
          • For any other kind of exception: - *
              - *
            • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
            • - *
            • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
            • - *
            - *
          • + *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
          • + *
          • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • + *
          + *
        • *
        */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -57,24 +52,25 @@ public interface RegionServerObserver { * Called before stopping region server. * @param ctx the environment to interact with the framework and region server. */ - default void preStopRegionServer( - final ObserverContext ctx) throws IOException {} + default void preStopRegionServer(final ObserverContext ctx) + throws IOException { + } /** * This will be called before executing user request to roll a region server WAL. * @param ctx the environment to interact with the framework and region server. */ default void preRollWALWriterRequest( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after executing user request to roll a region server WAL. * @param ctx the environment to interact with the framework and region server. */ default void postRollWALWriterRequest( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after the replication endpoint is instantiated. @@ -91,8 +87,8 @@ default ReplicationEndpoint postCreateReplicationEndPoint( /** * This will be called before executing replication request to shipping log entries. * @param ctx the environment to interact with the framework and region server. - * @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal - * usage by AccessController. Do not use these hooks in custom co-processors. + * @deprecated As of release 2.0.0 with out any replacement. This is maintained for internal usage + * by AccessController. Do not use these hooks in custom co-processors. */ @Deprecated default void preReplicateLogEntries(final ObserverContext ctx) @@ -102,8 +98,8 @@ default void preReplicateLogEntries(final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called after clearing compaction queues * @param ctx the environment to interact with the framework and region server. */ default void postClearCompactionQueues( - final ObserverContext ctx) - throws IOException {} + final ObserverContext ctx) throws IOException { + } /** * This will be called before executing procedures * @param ctx the environment to interact with the framework and region server. */ default void preExecuteProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } /** * This will be called after executing procedures * @param ctx the environment to interact with the framework and region server. */ default void postExecuteProcedures(ObserverContext ctx) - throws IOException {} + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java index 1deddf9407b7..f2b98b61e6b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; +import java.util.Optional; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import java.util.Optional; - /** * WALCoprocessor don't support loading services using {@link #getServices()}. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java index 71c72a2e7f18..1774481f2103 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.wal.WAL; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -34,9 +32,10 @@ public interface WALCoprocessorEnvironment extends CoprocessorEnvironmentSee ExampleRegionServerObserverWithMetrics class in the hbase-examples modules for examples - * of how metrics can be instantiated and used.

        + *

        + * See ExampleRegionServerObserverWithMetrics class in the hbase-examples modules for examples of + * how metrics can be instantiated and used. + *

        * @return A MetricRegistry for the coprocessor class to track and export metrics. */ MetricRegistry getMetricRegistryForRegionServer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java index b2fa7ca4777e..e07a5be2fa3a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.RegionInfo; @@ -30,80 +27,73 @@ import org.apache.yetus.audience.InterfaceStability; /** - * It's provided to have a way for coprocessors to observe, rewrite, - * or skip WALEdits as they are being written to the WAL. - * - * Note that implementers of WALObserver will not see WALEdits that report themselves - * as empty via {@link WALEdit#isEmpty()}. - * - * {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} provides - * hooks for adding logic for WALEdits in the region context during reconstruction. - * - * Defines coprocessor hooks for interacting with operations on the - * {@link org.apache.hadoop.hbase.wal.WAL}. - * - * Since most implementations will be interested in only a subset of hooks, this class uses - * 'default' functions to avoid having to add unnecessary overrides. When the functions are - * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. - * It is done in a way that these default definitions act as no-op. So our suggestion to - * implementation would be to not call these 'default' methods from overrides. - *

        - * - *

        Exception Handling

        - * For all functions, exception handling is done as follows: + * It's provided to have a way for coprocessors to observe, rewrite, or skip WALEdits as they are + * being written to the WAL. Note that implementers of WALObserver will not see WALEdits that report + * themselves as empty via {@link WALEdit#isEmpty()}. + * {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} provides hooks for adding logic for + * WALEdits in the region context during reconstruction. Defines coprocessor hooks for interacting + * with operations on the {@link org.apache.hadoop.hbase.wal.WAL}. Since most implementations will + * be interested in only a subset of hooks, this class uses 'default' functions to avoid having to + * add unnecessary overrides. When the functions are non-empty, it's simply to satisfy the compiler + * by returning value of expected (non-void) type. It is done in a way that these default + * definitions act as no-op. So our suggestion to implementation would be to not call these + * 'default' methods from overrides.
        + *
        + *

        Exception Handling

        For all functions, exception handling is done as follows: + *
          + *
        • Exceptions of type {@link IOException} are reported back to client.
        • + *
        • For any other kind of exception: *
            - *
          • Exceptions of type {@link IOException} are reported back to client.
          • - *
          • For any other kind of exception: - *
              - *
            • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then - * the server aborts.
            • - *
            • Otherwise, coprocessor is removed from the server and - * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
            • - *
            - *
          • + *
          • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the + * server aborts.
          • + *
          • Otherwise, coprocessor is removed from the server and + * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
          • + *
          + *
        • *
        */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface WALObserver { /** - * Called before a {@link WALEdit} - * is writen to WAL. - * Do not amend the WALKey. It is InterfaceAudience.Private. Changing the WALKey will cause - * damage. + * Called before a {@link WALEdit} is writen to WAL. Do not amend the WALKey. It is + * InterfaceAudience.Private. Changing the WALKey will cause damage. * @deprecated Since hbase-2.0.0. To be replaced with an alternative that does not expose - * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in hbase-3.0.0. + * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in + * hbase-3.0.0. */ @Deprecated default void preWALWrite(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** - * Called after a {@link WALEdit} - * is writen to WAL. - * Do not amend the WALKey. It is InterfaceAudience.Private. Changing the WALKey will cause - * damage. + * Called after a {@link WALEdit} is writen to WAL. Do not amend the WALKey. It is + * InterfaceAudience.Private. Changing the WALKey will cause damage. * @deprecated Since hbase-2.0.0. To be replaced with an alternative that does not expose - * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in hbase-3.0.0. + * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in + * hbase-3.0.0. */ @Deprecated default void postWALWrite(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {} + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + } /** * Called before rolling the current WAL * @param oldPath the path of the current wal that we are replacing * @param newPath the path of the wal we are going to create */ - default void preWALRoll(ObserverContext ctx, - Path oldPath, Path newPath) throws IOException {} + default void preWALRoll(ObserverContext ctx, Path oldPath, + Path newPath) throws IOException { + } /** * Called after rolling the current WAL * @param oldPath the path of the wal that we replaced * @param newPath the path of the wal we have created and now is the current */ - default void postWALRoll(ObserverContext ctx, - Path oldPath, Path newPath) throws IOException {} + default void postWALRoll(ObserverContext ctx, Path oldPath, + Path newPath) throws IOException { + } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java index 7cba1eaa8751..509d284fbb06 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java @@ -1,209 +1,170 @@ /* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** - -

        Table of Contents

        - - -

        Overview

        -Coprocessors are code that runs in-process on each region server. Regions -contain references to the coprocessor implementation classes associated -with them. Coprocessor classes can be loaded either from local -jars on the region server's classpath or via the HDFS classloader. -

        -Multiple types of coprocessors are provided to provide sufficient flexibility -for potential use cases. Right now there are: -

        -

          -
        • Coprocessor: provides region lifecycle management hooks, e.g., region -open/close/split/flush/compact operations.
        • -
        • RegionObserver: provides hook for monitor table operations from -client side, such as table get/put/scan/delete, etc.
        • -
        • Endpoint: provides on demand triggers for any arbitrary function -executed at a region. One use case is column aggregation at region -server.
        • -
        - -

        Coprocessor

        -A coprocessor is required to -implement Coprocessor interface so that coprocessor framework -can manage it internally. -

        -Another design goal of this interface is to provide simple features for -making coprocessors useful, while exposing no more internal state or -control actions of the region server than necessary and not exposing them -directly. -

        -Over the lifecycle of a region, the methods of this interface are invoked -when the corresponding events happen. The master transitions regions -through the following states: -

        -    -unassigned -> pendingOpen -> open -> pendingClose -7gt; closed. -

        -Coprocessors have opportunity to intercept and handle events in -pendingOpen, open, and pendingClose states. -

        - -

        PendingOpen

        -

        -The region server is opening a region to bring it online. Coprocessors -can piggyback or fail this process. -

        -

          -
        • preOpen, postOpen: Called before and after the region is reported as - online to the master.
        • -
        -

        -

        Open

        -The region is open on the region server and is processing both client -requests (get, put, scan, etc.) and administrative actions (flush, compact, -split, etc.). Coprocessors can piggyback administrative actions via: -

        -

          -
        • preFlush, postFlush: Called before and after the memstore is flushed - into a new store file.
        • -
        • preCompact, postCompact: Called before and after compaction.
        • -
        • preSplit, postSplit: Called after the region is split.
        • -
        -

        -

        PendingClose

        -The region server is closing the region. This can happen as part of normal -operations or may happen when the region server is aborting due to fatal -conditions such as OOME, health check failure, or fatal filesystem -problems. Coprocessors can piggyback this event. If the server is aborting -an indication to this effect will be passed as an argument. -

        -

          -
        • preClose and postClose: Called before and after the region is - reported as closed to the master.
        • -
        -

        - -

        RegionObserver

        -If the coprocessor implements the RegionObserver interface it can -observe and mediate client actions on the region: -

        -

          -
        • preGet, postGet: Called before and after a client makes a Get - request.
        • -
        • preExists, postExists: Called before and after the client tests - for existence using a Get.
        • -
        • prePut and postPut: Called before and after the client stores a value. -
        • -
        • preDelete and postDelete: Called before and after the client - deletes a value.
        • -
        • preScannerOpen postScannerOpen: Called before and after the client - opens a new scanner.
        • -
        • preScannerNext, postScannerNext: Called before and after the client - asks for the next row on a scanner.
        • -
        • preScannerClose, postScannerClose: Called before and after the client - closes a scanner.
        • -
        • preCheckAndPut, postCheckAndPut: Called before and after the client - calls checkAndPut().
        • -
        • preCheckAndDelete, postCheckAndDelete: Called before and after the client - calls checkAndDelete().
        • -
        -

        -Here's an example of what a simple RegionObserver might look like. This -example shows how to implement access control for HBase. This -coprocessor checks user information for a given client request, e.g., -Get/Put/Delete/Scan by injecting code at certain -RegionObserver -preXXX hooks. If the user is not allowed to access the resource, a -CoprocessorException will be thrown. And the client request will be -denied by receiving this exception. -

        -
        -package org.apache.hadoop.hbase.coprocessor;
        -
        -import org.apache.hadoop.hbase.client.Get;
        -
        -// Sample access-control coprocessor. It utilizes RegionObserver
        -// and intercept preXXX() method to check user privilege for the given table
        -// and column family.
        -public class AccessControlCoprocessor extends BaseRegionObserverCoprocessor {
        -  // @Override
        -  public Get preGet(CoprocessorEnvironment e, Get get)
        -      throws CoprocessorException {
        -
        -    // check permissions..
        -    if (access_not_allowed)  {
        -      throw new AccessDeniedException("User is not allowed to access.");
        -    }
        -    return get;
        -  }
        -
        -  // override prePut(), preDelete(), etc.
        -}
        -
        -
        - -

        Endpoint

        -Coprocessor and RegionObserver provide certain hooks -for injecting user code running at each region. The user code will be triggered -by existing HTable and HBaseAdmin operations at -the certain hook points. -

        -Coprocessor Endpoints allow you to define your own dynamic RPC protocol to communicate -between clients and region servers, i.e., you can create a new method, specifying custom -request parameters and return types. RPC methods exposed by coprocessor Endpoints can be -triggered by calling client side dynamic RPC functions -- HTable.coprocessorService(...) + *

        Table of Contents

        + * + *

        Overview

        Coprocessors are code that runs in-process on each + * region server. Regions contain references to the coprocessor implementation classes associated + * with them. Coprocessor classes can be loaded either from local jars on the region server's + * classpath or via the HDFS classloader. + *

        + * Multiple types of coprocessors are provided to provide sufficient flexibility for potential use + * cases. Right now there are: + *

        + *

          + *
        • Coprocessor: provides region lifecycle management hooks, e.g., region + * open/close/split/flush/compact operations.
        • + *
        • RegionObserver: provides hook for monitor table operations from client side, such as table + * get/put/scan/delete, etc.
        • + *
        • Endpoint: provides on demand triggers for any arbitrary function executed at a region. One + * use case is column aggregation at region server.
        • + *
        + *

        Coprocessor

        A coprocessor is required to implement + * Coprocessor interface so that coprocessor framework can manage it internally. + *

        + * Another design goal of this interface is to provide simple features for making coprocessors + * useful, while exposing no more internal state or control actions of the region server than + * necessary and not exposing them directly. + *

        + * Over the lifecycle of a region, the methods of this interface are invoked when the corresponding + * events happen. The master transitions regions through the following states: + *

        + *     unassigned -> pendingOpen -> open -> pendingClose -7gt; closed. + *

        + * Coprocessors have opportunity to intercept and handle events in pendingOpen, open, and + * pendingClose states. + *

        + *

        PendingOpen

        + *

        + * The region server is opening a region to bring it online. Coprocessors can piggyback or fail this + * process. + *

        + *

          + *
        • preOpen, postOpen: Called before and after the region is reported as online to the + * master.
        • + *
        + *

        + *

        Open

        The region is open on the region server and is processing both client requests + * (get, put, scan, etc.) and administrative actions (flush, compact, split, etc.). Coprocessors can + * piggyback administrative actions via: + *

        + *

          + *
        • preFlush, postFlush: Called before and after the memstore is flushed into a new store + * file.
        • + *
        • preCompact, postCompact: Called before and after compaction.
        • + *
        • preSplit, postSplit: Called after the region is split.
        • + *
        + *

        + *

        PendingClose

        The region server is closing the region. This can happen as part of normal + * operations or may happen when the region server is aborting due to fatal conditions such as OOME, + * health check failure, or fatal filesystem problems. Coprocessors can piggyback this event. If the + * server is aborting an indication to this effect will be passed as an argument. + *

        + *

          + *
        • preClose and postClose: Called before and after the region is reported as closed to the + * master.
        • + *
        + *

        + *

        RegionObserver

        If the coprocessor implements the + * RegionObserver interface it can observe and mediate client actions on the region: + *

        + *

          + *
        • preGet, postGet: Called before and after a client makes a Get request.
        • + *
        • preExists, postExists: Called before and after the client tests for existence using a + * Get.
        • + *
        • prePut and postPut: Called before and after the client stores a value.
        • + *
        • preDelete and postDelete: Called before and after the client deletes a value.
        • + *
        • preScannerOpen postScannerOpen: Called before and after the client opens a new scanner.
        • + *
        • preScannerNext, postScannerNext: Called before and after the client asks for the next row on + * a scanner.
        • + *
        • preScannerClose, postScannerClose: Called before and after the client closes a scanner.
        • + *
        • preCheckAndPut, postCheckAndPut: Called before and after the client calls checkAndPut().
        • + *
        • preCheckAndDelete, postCheckAndDelete: Called before and after the client calls + * checkAndDelete().
        • + *
        + *

        + * Here's an example of what a simple RegionObserver might look like. This example shows how to + * implement access control for HBase. This coprocessor checks user information for a given client + * request, e.g., Get/Put/Delete/Scan by injecting code at certain RegionObserver + * preXXX hooks. If the user is not allowed to access the resource, a CoprocessorException will be + * thrown. And the client request will be denied by receiving this exception. + *

        + * + *
        + * package org.apache.hadoop.hbase.coprocessor;
        + * 
        + * import org.apache.hadoop.hbase.client.Get;
        + * 
        + * // Sample access-control coprocessor. It utilizes RegionObserver
        + * // and intercept preXXX() method to check user privilege for the given table
        + * // and column family.
        + * public class AccessControlCoprocessor extends BaseRegionObserverCoprocessor {
        + *   // @Override
        + *   public Get preGet(CoprocessorEnvironment e, Get get) throws CoprocessorException {
        + * 
        + *     // check permissions..
        + *     if (access_not_allowed) {
        + *       throw new AccessDeniedException("User is not allowed to access.");
        + *     }
        + *     return get;
        + *   }
        + * 
        + *   // override prePut(), preDelete(), etc.
        + * }
        + * 
        + * + *
        + *

        Endpoint

        Coprocessor and + * RegionObserver provide certain hooks for injecting user code running at each region. + * The user code will be triggered by existing HTable and HBaseAdmin + * operations at the certain hook points. + *

        + * Coprocessor Endpoints allow you to define your own dynamic RPC protocol to communicate between + * clients and region servers, i.e., you can create a new method, specifying custom request + * parameters and return types. RPC methods exposed by coprocessor Endpoints can be triggered by + * calling client side dynamic RPC functions -- HTable.coprocessorService(...) . -

        -To implement an Endpoint, you need to: -

          -
        • Define a protocol buffer Service and supporting Message types for the RPC methods. - See the - protocol buffer guide - for more details on defining services.
        • -
        • Generate the Service and Message code using the protoc compiler
        • -
        • Implement the generated Service interface and override get*Service() method in - relevant Coprocessor to return a reference to the Endpoint's protocol buffer Service instance. -
        -

        -For a more detailed discussion of how to implement a coprocessor Endpoint, along with some sample -code, see the {@code org.apache.hadoop.hbase.client.coprocessor} package documentation. -

        - -

        Coprocessor loading

        -A customized coprocessor can be loaded by two different ways, by configuration, -or by TableDescriptor for a newly created table. -

        -(Currently we don't really have an on demand coprocessor loading mechanism for -opened regions.) -

        Load from configuration

        -Whenever a region is opened, it will read coprocessor class names from -hbase.coprocessor.region.classes from Configuration. -Coprocessor framework will automatically load the configured classes as -default coprocessors. The classes must be included in the classpath already. - -

        -

        -
        + * 

        + * To implement an Endpoint, you need to: + *

          + *
        • Define a protocol buffer Service and supporting Message types for the RPC methods. See the + * protocol buffer + * guide for more details on defining services.
        • + *
        • Generate the Service and Message code using the protoc compiler
        • + *
        • Implement the generated Service interface and override get*Service() method in relevant + * Coprocessor to return a reference to the Endpoint's protocol buffer Service instance. + *
        + *

        + * For a more detailed discussion of how to implement a coprocessor Endpoint, along with some sample + * code, see the {@code org.apache.hadoop.hbase.client.coprocessor} package documentation. + *

        + *

        Coprocessor loading

        A customized coprocessor can be loaded by two + * different ways, by configuration, or by TableDescriptor for a newly created table. + *

        + * (Currently we don't really have an on demand coprocessor loading mechanism for opened regions.) + *

        Load from configuration

        Whenever a region is opened, it will read coprocessor class + * names from hbase.coprocessor.region.classes from Configuration. + * Coprocessor framework will automatically load the configured classes as default coprocessors. The + * classes must be included in the classpath already. + *

        + *

        + * + *
           <property>
             <name>hbase.coprocessor.region.classes</name>
             <value>org.apache.hadoop.hbase.coprocessor.AccessControlCoprocessor, org.apache.hadoop.hbase.coprocessor.ColumnAggregationProtocol</value>
        @@ -215,63 +176,56 @@ public Get preGet(CoprocessorEnvironment e, Get get)
             qualified class name here.
             </description>
           </property>
        -
        -
        -

        -The first defined coprocessor will be assigned -Coprocessor.Priority.SYSTEM as priority. And each following -coprocessor's priority will be incremented by one. Coprocessors are executed -in order according to the natural ordering of the int. - -

        Load from table attribute

        -Coprocessor classes can also be configured at table attribute. The -attribute key must start with "Coprocessor" and values of the form is -<path>:<class>:<priority>, so that the framework can -recognize and load it. -

        -

        -
        + * 
        + * + *
        + *

        + * The first defined coprocessor will be assigned Coprocessor.Priority.SYSTEM as + * priority. And each following coprocessor's priority will be incremented by one. Coprocessors are + * executed in order according to the natural ordering of the int. + *

        Load from table attribute

        Coprocessor classes can also be configured at table attribute. + * The attribute key must start with "Coprocessor" and values of the form is + * <path>:<class>:<priority>, so that the framework can recognize and load it. + *

        + *

        + * + *
         'COPROCESSOR$1' => 'hdfs://localhost:8020/hbase/coprocessors/test.jar:Test:1000'
         'COPROCESSOR$2' => '/hbase/coprocessors/test2.jar:AnotherTest:1001'
        -
        -
        -

        -<path> must point to a jar, can be on any filesystem supported by the -Hadoop FileSystem object. -

        -<class> is the coprocessor implementation class. A jar can contain -more than one coprocessor implementation, but only one can be specified -at a time in each table attribute. -

        -<priority> is an integer. Coprocessors are executed in order according -to the natural ordering of the int. Coprocessors can optionally abort -actions. So typically one would want to put authoritative CPs (security -policy implementations, perhaps) ahead of observers. -

        -

        -
        -  Path path = new Path(fs.getUri() + Path.SEPARATOR +
        -    "TestClassloading.jar");
        -
        -  // create a table that references the jar
        -  TableDescriptor htd = TableDescriptorBuilder
        -                        .newBuilder(TableName.valueOf(getClass().getTableName()))
        -                        .setColumnFamily(ColumnFamilyDescriptorBuilder.of("test"))
        -                        .setValue(Bytes.toBytes("Coprocessor$1", path.toString()+
        -                          ":" + classFullName +
        -                          ":" + Coprocessor.Priority.USER))
        -                        .build();
        -  Admin admin = connection.getAdmin();
        -  admin.createTable(htd);
        -
        -

        Chain of RegionObservers

        -As described above, multiple coprocessors can be loaded at one region at the -same time. In case of RegionObserver, you can have more than one -RegionObservers register to one same hook point, i.e, preGet(), etc. -When a region reach the -hook point, the framework will invoke each registered RegionObserver by the -order of assigned priority. -
        - -*/ + *
        + * + *
        + *

        + * <path> must point to a jar, can be on any filesystem supported by the Hadoop + * FileSystem object. + *

        + * <class> is the coprocessor implementation class. A jar can contain more than one + * coprocessor implementation, but only one can be specified at a time in each table attribute. + *

        + * <priority> is an integer. Coprocessors are executed in order according to the natural + * ordering of the int. Coprocessors can optionally abort actions. So typically one would want to + * put authoritative CPs (security policy implementations, perhaps) ahead of observers. + *

        + *

        + * + *
        + * Path path = new Path(fs.getUri() + Path.SEPARATOR + "TestClassloading.jar");
        + * 
        + * // create a table that references the jar
        + * TableDescriptor htd =
        + *     TableDescriptorBuilder.newBuilder(TableName.valueOf(getClass().getTableName()))
        + *         .setColumnFamily(ColumnFamilyDescriptorBuilder.of("test"))
        + *         .setValue(Bytes.toBytes("Coprocessor$1",
        + *           path.toString() + ":" + classFullName + ":" + Coprocessor.Priority.USER))
        + *         .build();
        + * Admin admin = connection.getAdmin();
        + * admin.createTable(htd);
        + * 
        + * + *
        + *

        Chain of RegionObservers

        As described above, multiple coprocessors can be loaded at one + * region at the same time. In case of RegionObserver, you can have more than one RegionObservers + * register to one same hook point, i.e, preGet(), etc. When a region reach the hook point, the + * framework will invoke each registered RegionObserver by the order of assigned priority.
        + */ package org.apache.hadoop.hbase.coprocessor; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java index 85abc722044a..2d4dfffc4772 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,37 +20,36 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage; - /** * A ForeignException is an exception from another thread or process. *

        - * ForeignExceptions are sent to 'remote' peers to signal an abort in the face of failures. - * When serialized for transmission we encode using Protobufs to ensure version compatibility. + * ForeignExceptions are sent to 'remote' peers to signal an abort in the face of failures. When + * serialized for transmission we encode using Protobufs to ensure version compatibility. *

        - * Foreign exceptions contain a Throwable as its cause. This can be a "regular" exception - * generated locally or a ProxyThrowable that is a representation of the original exception - * created on original 'remote' source. These ProxyThrowables have their their stacks traces and - * messages overridden to reflect the original 'remote' exception. The only way these - * ProxyThrowables are generated are by this class's {@link #deserialize(byte[])} method. + * Foreign exceptions contain a Throwable as its cause. This can be a "regular" exception generated + * locally or a ProxyThrowable that is a representation of the original exception created on + * original 'remote' source. These ProxyThrowables have their their stacks traces and messages + * overridden to reflect the original 'remote' exception. The only way these ProxyThrowables are + * generated are by this class's {@link #deserialize(byte[])} method. */ @InterfaceAudience.Public @SuppressWarnings("serial") public class ForeignException extends IOException { /** - * Name of the throwable's source such as a host or thread name. Must be non-null. + * Name of the throwable's source such as a host or thread name. Must be non-null. */ private final String source; /** - * Create a new ForeignException that can be serialized. It is assumed that this came form a - * local source. + * Create a new ForeignException that can be serialized. It is assumed that this came form a local + * source. * @param source * @param cause */ @@ -62,7 +61,7 @@ public ForeignException(String source, Throwable cause) { } /** - * Create a new ForeignException that can be serialized. It is assumed that this is locally + * Create a new ForeignException that can be serialized. It is assumed that this is locally * generated. * @param source * @param msg @@ -78,11 +77,9 @@ public String getSource() { /** * The cause of a ForeignException can be an exception that was generated on a local in process - * thread, or a thread from a 'remote' separate process. - * - * If the cause is a ProxyThrowable, we know it came from deserialization which usually means - * it came from not only another thread, but also from a remote thread. - * + * thread, or a thread from a 'remote' separate process. If the cause is a ProxyThrowable, we know + * it came from deserialization which usually means it came from not only another thread, but also + * from a remote thread. * @return true if went through deserialization, false if locally generated */ public boolean isRemote() { @@ -91,7 +88,7 @@ public boolean isRemote() { @Override public String toString() { - String className = getCause().getClass().getName() ; + String className = getCause().getClass().getName(); return className + " via " + getSource() + ":" + getLocalizedMessage(); } @@ -100,8 +97,8 @@ public String toString() { * @param trace the stack trace to convert to protobuf message * @return null if the passed stack is null. */ - private static List toStackTraceElementMessages( - StackTraceElement[] trace) { + private static List + toStackTraceElementMessages(StackTraceElement[] trace) { // if there is no stack trace, ignore it and just return the message if (trace == null) return null; // build the stack trace for the message @@ -157,22 +154,20 @@ public static byte[] serialize(String source, Throwable t) { * @param bytes * @return the ForeignExcpetion instance * @throws InvalidProtocolBufferException if there was deserialization problem this is thrown. - * @throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException + * @throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException */ - public static ForeignException deserialize(byte[] bytes) - throws IOException { + public static ForeignException deserialize(byte[] bytes) throws IOException { // figure out the data we need to pass ForeignExceptionMessage eem = ForeignExceptionMessage.parseFrom(bytes); GenericExceptionMessage gem = eem.getGenericException(); - StackTraceElement [] trace = ForeignException.toStackTrace(gem.getTraceList()); + StackTraceElement[] trace = ForeignException.toStackTrace(gem.getTraceList()); ProxyThrowable dfe = new ProxyThrowable(gem.getMessage(), trace); ForeignException e = new ForeignException(eem.getSource(), dfe); return e; } /** - * Unwind a serialized array of {@link StackTraceElementMessage}s to a - * {@link StackTraceElement}s. + * Unwind a serialized array of {@link StackTraceElementMessage}s to a {@link StackTraceElement}s. * @param traceList list that was serialized * @return the deserialized list or null if it couldn't be unwound (e.g. wasn't set on * the sender). @@ -184,8 +179,8 @@ private static StackTraceElement[] toStackTrace(List t StackTraceElement[] trace = new StackTraceElement[traceList.size()]; for (int i = 0; i < traceList.size(); i++) { StackTraceElementMessage elem = traceList.get(i); - trace[i] = new StackTraceElement( - elem.getDeclaringClass(), elem.getMethodName(), elem.getFileName(), elem.getLineNumber()); + trace[i] = new StackTraceElement(elem.getDeclaringClass(), elem.getMethodName(), + elem.getFileName(), elem.getLineNumber()); } return trace; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java index b2ed0c267da4..22b208bf1476 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,24 +19,23 @@ import java.util.ArrayList; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * The dispatcher acts as the state holding entity for foreign error handling. The first - * exception received by the dispatcher get passed directly to the listeners. Subsequent - * exceptions are dropped. + * The dispatcher acts as the state holding entity for foreign error handling. The first exception + * received by the dispatcher get passed directly to the listeners. Subsequent exceptions are + * dropped. *

        * If there are multiple dispatchers that are all in the same foreign exception monitoring group, * ideally all these monitors are "peers" -- any error on one dispatcher should get propagated to - * all others (via rpc, or some other mechanism). Due to racing error conditions the exact reason - * for failure may be different on different peers, but the fact that they are in error state - * should eventually hold on all. + * all others (via rpc, or some other mechanism). Due to racing error conditions the exact reason + * for failure may be different on different peers, but the fact that they are in error state should + * eventually hold on all. *

        - * This is thread-safe and must be because this is expected to be used to propagate exceptions - * from foreign threads. + * This is thread-safe and must be because this is expected to be used to propagate exceptions from + * foreign threads. */ @InterfaceAudience.Private public class ForeignExceptionDispatcher implements ForeignExceptionListener, ForeignExceptionSnare { @@ -62,7 +61,7 @@ public synchronized void receive(ForeignException e) { // if we already have an exception, then ignore it if (exception != null) return; - LOG.debug(name + " accepting received exception" , e); + LOG.debug(name + " accepting received exception", e); // mark that we got the error if (e != null) { exception = e; @@ -95,19 +94,19 @@ synchronized public ForeignException getException() { /** * Sends an exception to all listeners. - * @param e {@link ForeignException} containing the cause. Can be null. + * @param e {@link ForeignException} containing the cause. Can be null. */ private void dispatch(ForeignException e) { // update all the listeners with the passed error - for (ForeignExceptionListener l: listeners) { + for (ForeignExceptionListener l : listeners) { l.receive(e); } } /** - * Listen for failures to a given process. This method should only be used during - * initialization and not added to after exceptions are accepted. - * @param errorable listener for the errors. may be null. + * Listen for failures to a given process. This method should only be used during initialization + * and not added to after exceptions are accepted. + * @param errorable listener for the errors. may be null. */ public synchronized void addListener(ForeignExceptionListener errorable) { this.listeners.add(errorable); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java index 26de489aa765..d2ff5bcc41ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java index 7bc1ee47713e..742563e26f02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionSnare.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,45 +20,40 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This is an interface for a cooperative exception throwing mechanism. Implementations are - * containers that holds an exception from a separate thread. This can be used to receive - * exceptions from 'foreign' threads or from separate 'foreign' processes. + * This is an interface for a cooperative exception throwing mechanism. Implementations are + * containers that holds an exception from a separate thread. This can be used to receive exceptions + * from 'foreign' threads or from separate 'foreign' processes. *

        - * To use, one would pass an implementation of this object to a long running method and - * periodically check by calling {@link #rethrowException()}. If any foreign exceptions have - * been received, the calling thread is then responsible for handling the rethrown exception. + * To use, one would pass an implementation of this object to a long running method and periodically + * check by calling {@link #rethrowException()}. If any foreign exceptions have been received, the + * calling thread is then responsible for handling the rethrown exception. *

        * One could use the boolean {@link #hasException()} to determine if there is an exceptoin as well. *

        - * NOTE: This is very similar to the InterruptedException/interrupt/interrupted pattern. There, - * the notification state is bound to a Thread. Using this, applications receive Exceptions in - * the snare. The snare is referenced and checked by multiple threads which enables exception - * notification in all the involved threads/processes. + * NOTE: This is very similar to the InterruptedException/interrupt/interrupted pattern. There, the + * notification state is bound to a Thread. Using this, applications receive Exceptions in the + * snare. The snare is referenced and checked by multiple threads which enables exception + * notification in all the involved threads/processes. */ @InterfaceAudience.Private public interface ForeignExceptionSnare { /** - * Rethrow an exception currently held by the {@link ForeignExceptionSnare}. If there is - * no exception this is a no-op - * - * @throws ForeignException - * all exceptions from remote sources are procedure exceptions + * Rethrow an exception currently held by the {@link ForeignExceptionSnare}. If there is no + * exception this is a no-op + * @throws ForeignException all exceptions from remote sources are procedure exceptions */ void rethrowException() throws ForeignException; /** - * Non-exceptional form of {@link #rethrowException()}. Checks to see if any - * process to which the exception checkers is bound has created an error that - * would cause a failure. - * + * Non-exceptional form of {@link #rethrowException()}. Checks to see if any process to which the + * exception checkers is bound has created an error that would cause a failure. * @return true if there has been an error,false otherwise */ boolean hasException(); /** * Get the value of the captured exception. - * * @return the captured foreign exception or null if no exception captured. */ ForeignException getException(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java index f17dcde6baeb..a353fd88ad93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,12 +36,12 @@ public class TimeoutException extends Exception { * Exception indicating that an operation attempt has timed out * @param start time the operation started (ms since epoch) * @param end time the timeout was triggered (ms since epoch) - * @param expected expected amount of time for the operation to complete (ms) - * (ideally, expected <= end-start) + * @param expected expected amount of time for the operation to complete (ms) (ideally, expected + * <= end-start) */ public TimeoutException(String sourceName, long start, long end, long expected) { - super("Timeout elapsed! Source:" + sourceName + " Start:" + start + ", End:" + end - + ", diff:" + (end - start) + ", max:" + expected + " ms"); + super("Timeout elapsed! Source:" + sourceName + " Start:" + start + ", End:" + end + ", diff:" + + (end - start) + ", max:" + expected + " ms"); this.sourceName = sourceName; this.start = start; this.end = end; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java index 36182d677d82..251eb5a07a27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,10 @@ import java.util.Timer; import java.util.TimerTask; - +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * Time a given process/operation and report a failure if the elapsed time exceeds the max allowed @@ -63,8 +62,8 @@ public void run() { TimeoutExceptionInjector.this.complete = true; } long end = EnvironmentEdgeManager.currentTime(); - TimeoutException tee = new TimeoutException( - "Timeout caused Foreign Exception", start, end, maxTime); + TimeoutException tee = + new TimeoutException("Timeout caused Foreign Exception", start, end, maxTime); String source = "timer-" + timer; listener.receive(new ForeignException(source, tee)); } @@ -85,8 +84,8 @@ public void complete() { return; } if (LOG.isDebugEnabled()) { - LOG.debug("Marking timer as complete - no error notifications will be received for " + - "this timer."); + LOG.debug("Marking timer as complete - no error notifications will be received for " + + "this timer."); } this.complete = true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java index 17054a5c409c..0ba4805163b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import io.opentelemetry.context.Scope; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -31,23 +29,19 @@ import org.slf4j.LoggerFactory; /** - * Abstract base class for all HBase event handlers. Subclasses should - * implement the {@link #process()} and {@link #prepare()} methods. Subclasses - * should also do all necessary checks up in their prepare() if possible -- check - * table exists, is disabled, etc. -- so they fail fast rather than later when process - * is running. Do it this way because process be invoked directly but event - * handlers are also - * run in an executor context -- i.e. asynchronously -- and in this case, - * exceptions thrown at process time will not be seen by the invoker, not till - * we implement a call-back mechanism so the client can pick them up later. + * Abstract base class for all HBase event handlers. Subclasses should implement the + * {@link #process()} and {@link #prepare()} methods. Subclasses should also do all necessary checks + * up in their prepare() if possible -- check table exists, is disabled, etc. -- so they fail fast + * rather than later when process is running. Do it this way because process be invoked directly but + * event handlers are also run in an executor context -- i.e. asynchronously -- and in this case, + * exceptions thrown at process time will not be seen by the invoker, not till we implement a + * call-back mechanism so the client can pick them up later. *

        - * Event handlers have an {@link EventType}. - * {@link EventType} is a list of ALL handler event types. We need to keep - * a full list in one place -- and as enums is a good shorthand for an - * implemenations -- because event handlers can be passed to executors when - * they are to be run asynchronously. The - * hbase executor, see ExecutorService, has a switch for passing - * event type to executor. + * Event handlers have an {@link EventType}. {@link EventType} is a list of ALL handler event types. + * We need to keep a full list in one place -- and as enums is a good shorthand for an + * implemenations -- because event handlers can be passed to executors when they are to be run + * asynchronously. The hbase executor, see ExecutorService, has a switch for passing event type to + * executor. *

        * @see ExecutorService */ @@ -80,17 +74,17 @@ public EventHandler(Server server, EventType eventType) { this.eventType = eventType; seqid = seqids.incrementAndGet(); if (server != null) { - this.waitingTimeForEvents = server.getConfiguration(). - getInt("hbase.master.event.waiting.time", 1000); + this.waitingTimeForEvents = + server.getConfiguration().getInt("hbase.master.event.waiting.time", 1000); } } /** - * Event handlers should do all the necessary checks in this method (rather than - * in the constructor, or in process()) so that the caller, which is mostly executed - * in the ipc context can fail fast. Process is executed async from the client ipc, - * so this method gives a quick chance to do some basic checks. - * Should be called after constructing the EventHandler, and before process(). + * Event handlers should do all the necessary checks in this method (rather than in the + * constructor, or in process()) so that the caller, which is mostly executed in the ipc context + * can fail fast. Process is executed async from the client ipc, so this method gives a quick + * chance to do some basic checks. Should be called after constructing the EventHandler, and + * before process(). * @return the instance of this class * @throws Exception when something goes wrong */ @@ -101,7 +95,7 @@ public EventHandler prepare() throws Exception { @Override public void run() { Span span = TraceUtil.getGlobalTracer().spanBuilder(getClass().getSimpleName()) - .setParent(Context.current().with(parent)).startSpan(); + .setParent(Context.current().with(parent)).startSpan(); try (Scope scope = span.makeCurrent()) { process(); } catch (Throwable t) { @@ -112,8 +106,7 @@ public void run() { } /** - * This method is the main processing loop to be implemented by the various - * subclasses. + * This method is the main processing loop to be implemented by the various subclasses. * @throws IOException */ public abstract void process() throws IOException; @@ -127,10 +120,10 @@ public EventType getEventType() { } /** - * Get the priority level for this handler instance. This uses natural - * ordering so lower numbers are higher priority. + * Get the priority level for this handler instance. This uses natural ordering so lower numbers + * are higher priority. *

        - * Lowest priority is Integer.MAX_VALUE. Highest priority is 0. + * Lowest priority is Integer.MAX_VALUE. Highest priority is 0. *

        * Subclasses should override this method to allow prioritizing handlers. *

        @@ -152,15 +145,15 @@ public long getSeqid() { /** * Default prioritized runnable comparator which implements a FIFO ordering. *

        - * Subclasses should not override this. Instead, if they want to implement - * priority beyond FIFO, they should override {@link #getPriority()}. + * Subclasses should not override this. Instead, if they want to implement priority beyond FIFO, + * they should override {@link #getPriority()}. */ @Override public int compareTo(EventHandler o) { if (o == null) { return 1; } - if(getPriority() != o.getPriority()) { + if (getPriority() != o.getPriority()) { return (getPriority() < o.getPriority()) ? -1 : 1; } return (this.seqid < o.seqid) ? -1 : 1; @@ -168,16 +161,13 @@ public int compareTo(EventHandler o) { @Override public String toString() { - return "Event #" + getSeqid() + - " of type " + eventType + - " (" + getInformativeName() + ")"; + return "Event #" + getSeqid() + " of type " + eventType + " (" + getInformativeName() + ")"; } /** - * Event implementations should override thie class to provide an - * informative name about what event they are handling. For example, - * event-specific information such as which region or server is - * being processed should be included if possible. + * Event implementations should override thie class to provide an informative name about what + * event they are handling. For example, event-specific information such as which region or server + * is being processed should be included if possible. */ public String getInformativeName() { return this.getClass().toString(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java index 0b608be369a3..e79c9c2bc415 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,113 +33,103 @@ public enum EventType { // Messages originating from RS (NOTE: there is NO direct communication from // RS to Master). These are a result of RS updates into ZK. - // RS_ZK_REGION_CLOSING (1), // It is replaced by M_ZK_REGION_CLOSING(HBASE-4739) + // RS_ZK_REGION_CLOSING (1), // It is replaced by M_ZK_REGION_CLOSING(HBASE-4739) /** * RS_ZK_REGION_CLOSED
        - * * RS has finished closing a region. */ - RS_ZK_REGION_CLOSED (2, ExecutorType.MASTER_CLOSE_REGION), + RS_ZK_REGION_CLOSED(2, ExecutorType.MASTER_CLOSE_REGION), /** * RS_ZK_REGION_OPENING
        - * * RS is in process of opening a region. */ - RS_ZK_REGION_OPENING (3, null), + RS_ZK_REGION_OPENING(3, null), /** * RS_ZK_REGION_OPENED
        - * * RS has finished opening a region. */ - RS_ZK_REGION_OPENED (4, ExecutorType.MASTER_OPEN_REGION), + RS_ZK_REGION_OPENED(4, ExecutorType.MASTER_OPEN_REGION), /** * RS_ZK_REGION_SPLITTING
        - * * RS has started a region split after master says it's ok to move on. */ - RS_ZK_REGION_SPLITTING (5, null), + RS_ZK_REGION_SPLITTING(5, null), /** * RS_ZK_REGION_SPLIT
        - * * RS split has completed and is notifying the master. */ - RS_ZK_REGION_SPLIT (6, ExecutorType.MASTER_SERVER_OPERATIONS), + RS_ZK_REGION_SPLIT(6, ExecutorType.MASTER_SERVER_OPERATIONS), /** * RS_ZK_REGION_FAILED_OPEN
        - * * RS failed to open a region. */ - RS_ZK_REGION_FAILED_OPEN (7, ExecutorType.MASTER_CLOSE_REGION), + RS_ZK_REGION_FAILED_OPEN(7, ExecutorType.MASTER_CLOSE_REGION), /** * RS_ZK_REGION_MERGING
        - * * RS has started merging regions after master says it's ok to move on. */ - RS_ZK_REGION_MERGING (8, null), + RS_ZK_REGION_MERGING(8, null), /** * RS_ZK_REGION_MERGE
        - * * RS region merge has completed and is notifying the master. */ - RS_ZK_REGION_MERGED (9, ExecutorType.MASTER_SERVER_OPERATIONS), + RS_ZK_REGION_MERGED(9, ExecutorType.MASTER_SERVER_OPERATIONS), /** * RS_ZK_REQUEST_REGION_SPLIT
        - * - * RS has requested to split a region. This is to notify master - * and check with master if the region is in a state good to split. + * RS has requested to split a region. This is to notify master and check with master if the + * region is in a state good to split. */ - RS_ZK_REQUEST_REGION_SPLIT (10, null), + RS_ZK_REQUEST_REGION_SPLIT(10, null), /** * RS_ZK_REQUEST_REGION_MERGE
        - * - * RS has requested to merge two regions. This is to notify master - * and check with master if two regions is in states good to merge. + * RS has requested to merge two regions. This is to notify master and check with master if two + * regions is in states good to merge. */ - RS_ZK_REQUEST_REGION_MERGE (11, null), + RS_ZK_REQUEST_REGION_MERGE(11, null), /** * Messages originating from Master to RS.
        * M_RS_OPEN_REGION
        * Master asking RS to open a region. */ - M_RS_OPEN_REGION (20, ExecutorType.RS_OPEN_REGION), + M_RS_OPEN_REGION(20, ExecutorType.RS_OPEN_REGION), /** * Messages originating from Master to RS.
        * M_RS_OPEN_ROOT
        * Master asking RS to open root. */ - M_RS_OPEN_ROOT (21, ExecutorType.RS_OPEN_ROOT), + M_RS_OPEN_ROOT(21, ExecutorType.RS_OPEN_ROOT), /** * Messages originating from Master to RS.
        * M_RS_OPEN_META
        * Master asking RS to open meta. */ - M_RS_OPEN_META (22, ExecutorType.RS_OPEN_META), + M_RS_OPEN_META(22, ExecutorType.RS_OPEN_META), /** * Messages originating from Master to RS.
        * M_RS_CLOSE_REGION
        * Master asking RS to close a region. */ - M_RS_CLOSE_REGION (23, ExecutorType.RS_CLOSE_REGION), + M_RS_CLOSE_REGION(23, ExecutorType.RS_CLOSE_REGION), /** * Messages originating from Master to RS.
        * M_RS_CLOSE_ROOT
        * Master asking RS to close root. */ - M_RS_CLOSE_ROOT (24, ExecutorType.RS_CLOSE_ROOT), + M_RS_CLOSE_ROOT(24, ExecutorType.RS_CLOSE_ROOT), /** * Messages originating from Master to RS.
        * M_RS_CLOSE_META
        * Master asking RS to close meta. */ - M_RS_CLOSE_META (25, ExecutorType.RS_CLOSE_META), + M_RS_CLOSE_META(25, ExecutorType.RS_CLOSE_META), /** * Messages originating from Master to RS.
        * M_RS_OPEN_PRIORITY_REGION
        - * Master asking RS to open a priority region. + * Master asking RS to open a priority region. */ - M_RS_OPEN_PRIORITY_REGION (26, ExecutorType.RS_OPEN_PRIORITY_REGION), + M_RS_OPEN_PRIORITY_REGION(26, ExecutorType.RS_OPEN_PRIORITY_REGION), /** * Messages originating from Master to RS.
        * M_RS_SWITCH_RPC_THROTTLE
        @@ -152,168 +142,154 @@ public enum EventType { * C_M_MERGE_REGION
        * Client asking Master to merge regions. */ - C_M_MERGE_REGION (30, ExecutorType.MASTER_MERGE_OPERATIONS), + C_M_MERGE_REGION(30, ExecutorType.MASTER_MERGE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_DELETE_TABLE
        * Client asking Master to delete a table. */ - C_M_DELETE_TABLE (40, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_DELETE_TABLE(40, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_DISABLE_TABLE
        * Client asking Master to disable a table. */ - C_M_DISABLE_TABLE (41, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_DISABLE_TABLE(41, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_ENABLE_TABLE
        * Client asking Master to enable a table. */ - C_M_ENABLE_TABLE (42, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_ENABLE_TABLE(42, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_MODIFY_TABLE
        * Client asking Master to modify a table. */ - C_M_MODIFY_TABLE (43, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_MODIFY_TABLE(43, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_ADD_FAMILY
        * Client asking Master to add family to table. */ - C_M_ADD_FAMILY (44, null), + C_M_ADD_FAMILY(44, null), /** * Messages originating from Client to Master.
        * C_M_DELETE_FAMILY
        * Client asking Master to delete family of table. */ - C_M_DELETE_FAMILY (45, null), + C_M_DELETE_FAMILY(45, null), /** * Messages originating from Client to Master.
        * C_M_MODIFY_FAMILY
        * Client asking Master to modify family of table. */ - C_M_MODIFY_FAMILY (46, null), + C_M_MODIFY_FAMILY(46, null), /** * Messages originating from Client to Master.
        * C_M_CREATE_TABLE
        * Client asking Master to create a table. */ - C_M_CREATE_TABLE (47, ExecutorType.MASTER_TABLE_OPERATIONS), + C_M_CREATE_TABLE(47, ExecutorType.MASTER_TABLE_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_SNAPSHOT_TABLE
        * Client asking Master to snapshot an offline table. */ - C_M_SNAPSHOT_TABLE (48, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), + C_M_SNAPSHOT_TABLE(48, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), /** * Messages originating from Client to Master.
        * C_M_RESTORE_SNAPSHOT
        * Client asking Master to restore a snapshot. */ - C_M_RESTORE_SNAPSHOT (49, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), + C_M_RESTORE_SNAPSHOT(49, ExecutorType.MASTER_SNAPSHOT_OPERATIONS), // Updates from master to ZK. This is done by the master and there is // nothing to process by either Master or RS /** - * M_ZK_REGION_OFFLINE - * Master adds this region as offline in ZK + * M_ZK_REGION_OFFLINE Master adds this region as offline in ZK */ - M_ZK_REGION_OFFLINE (50, null), + M_ZK_REGION_OFFLINE(50, null), /** - * M_ZK_REGION_CLOSING - * Master adds this region as closing in ZK + * M_ZK_REGION_CLOSING Master adds this region as closing in ZK */ - M_ZK_REGION_CLOSING (51, null), + M_ZK_REGION_CLOSING(51, null), /** - * Master controlled events to be executed on the master - * M_SERVER_SHUTDOWN - * Master is processing shutdown of a RS + * Master controlled events to be executed on the master M_SERVER_SHUTDOWN Master is processing + * shutdown of a RS */ - M_SERVER_SHUTDOWN (70, ExecutorType.MASTER_SERVER_OPERATIONS), + M_SERVER_SHUTDOWN(70, ExecutorType.MASTER_SERVER_OPERATIONS), /** * Master controlled events to be executed on the master.
        * M_META_SERVER_SHUTDOWN
        * Master is processing shutdown of RS hosting a meta region (-ROOT- or hbase:meta). */ - M_META_SERVER_SHUTDOWN (72, ExecutorType.MASTER_META_SERVER_OPERATIONS), + M_META_SERVER_SHUTDOWN(72, ExecutorType.MASTER_META_SERVER_OPERATIONS), /** * Master controlled events to be executed on the master.
        - * * M_MASTER_RECOVERY
        * Master is processing recovery of regions found in ZK RIT */ - M_MASTER_RECOVERY (73, ExecutorType.MASTER_SERVER_OPERATIONS), + M_MASTER_RECOVERY(73, ExecutorType.MASTER_SERVER_OPERATIONS), /** * Master controlled events to be executed on the master.
        - * * M_LOG_REPLAY
        * Master is processing log replay of failed region server */ - M_LOG_REPLAY (74, ExecutorType.M_LOG_REPLAY_OPS), + M_LOG_REPLAY(74, ExecutorType.M_LOG_REPLAY_OPS), /** * RS controlled events to be executed on the RS.
        - * * RS_PARALLEL_SEEK */ - RS_PARALLEL_SEEK (80, ExecutorType.RS_PARALLEL_SEEK), + RS_PARALLEL_SEEK(80, ExecutorType.RS_PARALLEL_SEEK), /** * RS wal recovery work items (splitting wals) to be executed on the RS.
        - * * RS_LOG_REPLAY */ - RS_LOG_REPLAY (81, ExecutorType.RS_LOG_REPLAY_OPS), + RS_LOG_REPLAY(81, ExecutorType.RS_LOG_REPLAY_OPS), /** * RS flush triggering from secondary region replicas to primary region replica.
        - * * RS_REGION_REPLICA_FLUSH */ - RS_REGION_REPLICA_FLUSH (82, ExecutorType.RS_REGION_REPLICA_FLUSH_OPS), + RS_REGION_REPLICA_FLUSH(82, ExecutorType.RS_REGION_REPLICA_FLUSH_OPS), /** * RS compacted files discharger
        - * * RS_COMPACTED_FILES_DISCHARGER */ - RS_COMPACTED_FILES_DISCHARGER (83, ExecutorType.RS_COMPACTED_FILES_DISCHARGER), + RS_COMPACTED_FILES_DISCHARGER(83, ExecutorType.RS_COMPACTED_FILES_DISCHARGER), /** * RS refresh peer.
        - * * RS_REFRESH_PEER */ RS_REFRESH_PEER(84, ExecutorType.RS_REFRESH_PEER), /** * RS replay sync replication wal.
        - * * RS_REPLAY_SYNC_REPLICATION_WAL */ RS_REPLAY_SYNC_REPLICATION_WAL(85, ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL), /** * RS claim replication queue.
        - * * RS_CLAIM_REPLICATION_QUEUE */ RS_CLAIM_REPLICATION_QUEUE(86, ExecutorType.RS_CLAIM_REPLICATION_QUEUE), /** - * RS snapshot regions.
        - * - * RS_SNAPSHOT_REGIONS + * RS snapshot regions.
        + * RS_SNAPSHOT_REGIONS */ RS_SNAPSHOT_REGIONS(87, ExecutorType.RS_SNAPSHOT_OPERATIONS), /** - * RS verify snapshot.
        - * - * RS_VERIFY_SNAPSHOT + * RS verify snapshot.
        + * RS_VERIFY_SNAPSHOT */ RS_VERIFY_SNAPSHOT(88, ExecutorType.RS_SNAPSHOT_OPERATIONS); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java index cc36b957c4cc..fe8ffe645e29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,15 +45,15 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * This is a generic executor service. This component abstracts a - * threadpool, a queue to which {@link EventType}s can be submitted, - * and a Runnable that handles the object that is added to the queue. - * - *

        In order to create a new service, create an instance of this class and - * then do: instance.startExecutorService(executorConfig);. {@link ExecutorConfig} - * wraps the configuration needed by this service. When done call {@link #shutdown()}. - * - *

        In order to use the service created above, call {@link #submit(EventHandler)}. + * This is a generic executor service. This component abstracts a threadpool, a queue to which + * {@link EventType}s can be submitted, and a Runnable that handles the object that is + * added to the queue. + *

        + * In order to create a new service, create an instance of this class and then do: + * instance.startExecutorService(executorConfig);. {@link ExecutorConfig} wraps the + * configuration needed by this service. When done call {@link #shutdown()}. + *

        + * In order to use the service created above, call {@link #submit(EventHandler)}. */ @InterfaceAudience.Private public class ExecutorService { @@ -66,9 +65,9 @@ public class ExecutorService { // Name of the server hosting this executor service. private final String servername; - private final ListeningScheduledExecutorService delayedSubmitTimer = - MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() - .setDaemon(true).setNameFormat("Event-Executor-Delay-Submit-Timer").build())); + private final ListeningScheduledExecutorService delayedSubmitTimer = MoreExecutors + .listeningDecorator(Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() + .setDaemon(true).setNameFormat("Event-Executor-Delay-Submit-Timer").build())); /** * Default constructor. @@ -79,24 +78,22 @@ public ExecutorService(final String servername) { } /** - * Start an executor service with a given name. If there was a service already - * started with the same name, this throws a RuntimeException. + * Start an executor service with a given name. If there was a service already started with the + * same name, this throws a RuntimeException. * @param config Configuration to use for the executor. */ public void startExecutorService(final ExecutorConfig config) { final String name = config.getName(); Executor hbes = this.executorMap.compute(name, (key, value) -> { if (value != null) { - throw new RuntimeException("An executor service with the name " + key + - " is already running!"); + throw new RuntimeException( + "An executor service with the name " + key + " is already running!"); } return new Executor(config); }); - LOG.debug( - "Starting executor service name={}, corePoolSize={}, maxPoolSize={}", - name, hbes.threadPoolExecutor.getCorePoolSize(), - hbes.threadPoolExecutor.getMaximumPoolSize()); + LOG.debug("Starting executor service name={}, corePoolSize={}, maxPoolSize={}", name, + hbes.threadPoolExecutor.getCorePoolSize(), hbes.threadPoolExecutor.getMaximumPoolSize()); } boolean isExecutorServiceRunning(String name) { @@ -105,9 +102,8 @@ boolean isExecutorServiceRunning(String name) { public void shutdown() { this.delayedSubmitTimer.shutdownNow(); - for(Entry entry: this.executorMap.entrySet()) { - List wasRunning = - entry.getValue().threadPoolExecutor.shutdownNow(); + for (Entry entry : this.executorMap.entrySet()) { + List wasRunning = entry.getValue().threadPoolExecutor.shutdownNow(); if (!wasRunning.isEmpty()) { LOG.info(entry.getValue() + " had " + wasRunning + " on shutdown"); } @@ -133,8 +129,8 @@ public ThreadPoolExecutor getExecutorThreadPool(final ExecutorType type) { * {@link ExecutorService#startExecutorService(ExecutorConfig)} */ public ThreadPoolExecutor getExecutorLazily(ExecutorConfig config) { - return executorMap.computeIfAbsent(config.getName(), (executorName) -> - new Executor(config)).getThreadPoolExecutor(); + return executorMap.computeIfAbsent(config.getName(), (executorName) -> new Executor(config)) + .getThreadPoolExecutor(); } public void submit(final EventHandler eh) { @@ -143,8 +139,8 @@ public void submit(final EventHandler eh) { // This happens only when events are submitted after shutdown() was // called, so dropping them should be "ok" since it means we're // shutting down. - LOG.error("Cannot submit [" + eh + "] because the executor is missing." + - " Is this process shutting down?"); + LOG.error("Cannot submit [" + eh + "] because the executor is missing." + + " Is this process shutting down?"); } else { executor.submit(eh); } @@ -206,9 +202,9 @@ public boolean allowCoreThreadTimeout() { } /** - * Allows timing out of core threads. Good to set this for non-critical thread pools for - * release of unused resources. Refer to {@link ThreadPoolExecutor#allowCoreThreadTimeOut} - * for additional details. + * Allows timing out of core threads. Good to set this for non-critical thread pools for release + * of unused resources. Refer to {@link ThreadPoolExecutor#allowCoreThreadTimeOut} for + * additional details. */ public ExecutorConfig setAllowCoreThreadTimeout(boolean allowCoreThreadTimeout) { this.allowCoreThreadTimeout = allowCoreThreadTimeout; @@ -250,8 +246,8 @@ protected Executor(ExecutorConfig config) { // create the thread pool executor this.threadPoolExecutor = new TrackingThreadPoolExecutor( // setting maxPoolSize > corePoolSize has no effect since we use an unbounded task queue. - config.getCorePoolSize(), config.getCorePoolSize(), - config.getKeepAliveTimeMillis(), TimeUnit.MILLISECONDS, q); + config.getCorePoolSize(), config.getCorePoolSize(), config.getKeepAliveTimeMillis(), + TimeUnit.MILLISECONDS, q); this.threadPoolExecutor.allowCoreThreadTimeOut(config.allowCoreThreadTimeout()); // name the threads for this threadpool ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); @@ -286,18 +282,17 @@ public ExecutorStatus getStatus() { LOG.warn("Non-EventHandler " + r + " queued in " + name); continue; } - queuedEvents.add((EventHandler)r); + queuedEvents.add((EventHandler) r); } List running = Lists.newArrayList(); - for (Map.Entry e : - threadPoolExecutor.getRunningTasks().entrySet()) { + for (Map.Entry e : threadPoolExecutor.getRunningTasks().entrySet()) { Runnable r = e.getValue(); if (!(r instanceof EventHandler)) { LOG.warn("Non-EventHandler " + r + " running in " + name); continue; } - running.add(new RunningEventStatus(e.getKey(), (EventHandler)r)); + running.add(new RunningEventStatus(e.getKey(), (EventHandler) r)); } return new ExecutorStatus(this, queuedEvents, running); @@ -305,14 +300,14 @@ public ExecutorStatus getStatus() { } /** - * A subclass of ThreadPoolExecutor that keeps track of the Runnables that - * are executing at any given point in time. + * A subclass of ThreadPoolExecutor that keeps track of the Runnables that are executing at any + * given point in time. */ static class TrackingThreadPoolExecutor extends ThreadPoolExecutor { private ConcurrentMap running = Maps.newConcurrentMap(); - public TrackingThreadPoolExecutor(int corePoolSize, int maximumPoolSize, - long keepAliveTime, TimeUnit unit, BlockingQueue workQueue) { + public TrackingThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, + TimeUnit unit, BlockingQueue workQueue) { super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue); } @@ -330,10 +325,9 @@ protected void beforeExecute(Thread t, Runnable r) { } /** - * @return a map of the threads currently running tasks - * inside this executor. Each key is an active thread, - * and the value is the task that is currently running. - * Note that this is not a stable snapshot of the map. + * @return a map of the threads currently running tasks inside this executor. Each key is an + * active thread, and the value is the task that is currently running. Note that this is + * not a stable snapshot of the map. */ public ConcurrentMap getRunningTasks() { return running; @@ -341,19 +335,16 @@ public ConcurrentMap getRunningTasks() { } /** - * A snapshot of the status of a particular executor. This includes - * the contents of the executor's pending queue, as well as the - * threads and events currently being processed. - * - * This is a consistent snapshot that is immutable once constructed. + * A snapshot of the status of a particular executor. This includes the contents of the executor's + * pending queue, as well as the threads and events currently being processed. This is a + * consistent snapshot that is immutable once constructed. */ public static class ExecutorStatus { final Executor executor; final List queuedEvents; final List running; - ExecutorStatus(Executor executor, - List queuedEvents, + ExecutorStatus(Executor executor, List queuedEvents, List running) { this.executor = executor; this.queuedEvents = queuedEvents; @@ -369,17 +360,14 @@ public List getRunning() { } /** - * Dump a textual representation of the executor's status - * to the given writer. - * + * Dump a textual representation of the executor's status to the given writer. * @param out the stream to write to * @param indent a string prefix for each line, used for indentation */ public void dumpTo(Writer out, String indent) throws IOException { out.write(indent + "Status for executor: " + executor + "\n"); out.write(indent + "=======================================\n"); - out.write(indent + queuedEvents.size() + " events queued, " + - running.size() + " running\n"); + out.write(indent + queuedEvents.size() + " events queued, " + running.size() + " running\n"); if (!queuedEvents.isEmpty()) { out.write(indent + "Queued:\n"); for (EventHandler e : queuedEvents) { @@ -390,11 +378,9 @@ public void dumpTo(Writer out, String indent) throws IOException { if (!running.isEmpty()) { out.write(indent + "Running:\n"); for (RunningEventStatus stat : running) { - out.write(indent + " Running on thread '" + - stat.threadInfo.getThreadName() + - "': " + stat.event + "\n"); - out.write(ThreadMonitoring.formatThreadInfo( - stat.threadInfo, indent + " ")); + out.write(indent + " Running on thread '" + stat.threadInfo.getThreadName() + "': " + + stat.event + "\n"); + out.write(ThreadMonitoring.formatThreadInfo(stat.threadInfo, indent + " ")); out.write("\n"); } } @@ -403,8 +389,7 @@ public void dumpTo(Writer out, String indent) throws IOException { } /** - * The status of a particular event that is in the middle of being - * handled by an executor. + * The status of a particular event that is in the middle of being handled by an executor. */ public static class RunningEventStatus { final ThreadInfo threadInfo; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index cbecb3e8619f..46afd9925ccb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,41 +20,23 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * The following is a list of all executor types, both those that run in the - * master and those that run in the regionserver. + * The following is a list of all executor types, both those that run in the master and those that + * run in the regionserver. */ @InterfaceAudience.Private public enum ExecutorType { // Master executor services - MASTER_CLOSE_REGION (1), - MASTER_OPEN_REGION (2), - MASTER_SERVER_OPERATIONS (3), - MASTER_TABLE_OPERATIONS (4), - MASTER_RS_SHUTDOWN (5), - MASTER_META_SERVER_OPERATIONS (6), - M_LOG_REPLAY_OPS (7), - MASTER_SNAPSHOT_OPERATIONS (8), - MASTER_MERGE_OPERATIONS (9), + MASTER_CLOSE_REGION(1), MASTER_OPEN_REGION(2), MASTER_SERVER_OPERATIONS(3), + MASTER_TABLE_OPERATIONS(4), MASTER_RS_SHUTDOWN(5), MASTER_META_SERVER_OPERATIONS(6), + M_LOG_REPLAY_OPS(7), MASTER_SNAPSHOT_OPERATIONS(8), MASTER_MERGE_OPERATIONS(9), // RegionServer executor services - RS_OPEN_REGION (20), - RS_OPEN_ROOT (21), - RS_OPEN_META (22), - RS_CLOSE_REGION (23), - RS_CLOSE_ROOT (24), - RS_CLOSE_META (25), - RS_PARALLEL_SEEK (26), - RS_LOG_REPLAY_OPS (27), - RS_REGION_REPLICA_FLUSH_OPS (28), - RS_COMPACTED_FILES_DISCHARGER (29), - RS_OPEN_PRIORITY_REGION (30), - RS_REFRESH_PEER(31), - RS_REPLAY_SYNC_REPLICATION_WAL(32), - RS_SWITCH_RPC_THROTTLE(33), - RS_IN_MEMORY_COMPACTION(34), - RS_CLAIM_REPLICATION_QUEUE(35), - RS_SNAPSHOT_OPERATIONS(36); + RS_OPEN_REGION(20), RS_OPEN_ROOT(21), RS_OPEN_META(22), RS_CLOSE_REGION(23), RS_CLOSE_ROOT(24), + RS_CLOSE_META(25), RS_PARALLEL_SEEK(26), RS_LOG_REPLAY_OPS(27), RS_REGION_REPLICA_FLUSH_OPS(28), + RS_COMPACTED_FILES_DISCHARGER(29), RS_OPEN_PRIORITY_REGION(30), RS_REFRESH_PEER(31), + RS_REPLAY_SYNC_REPLICATION_WAL(32), RS_SWITCH_RPC_THROTTLE(33), RS_IN_MEMORY_COMPACTION(34), + RS_CLAIM_REPLICATION_QUEUE(35), RS_SNAPSHOT_OPERATIONS(36); ExecutorType(int value) { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java index b127493fc5c2..1678e649710a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,20 +20,19 @@ import java.io.IOException; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; /** - * This is a Filter wrapper class which is used in the server side. Some filter - * related hooks can be defined in this wrapper. The only way to create a - * FilterWrapper instance is passing a client side Filter instance through - * {@link org.apache.hadoop.hbase.client.Scan#getFilter()}. - * + * This is a Filter wrapper class which is used in the server side. Some filter related hooks can be + * defined in this wrapper. The only way to create a FilterWrapper instance is passing a client side + * Filter instance through {@link org.apache.hadoop.hbase.client.Scan#getFilter()}. */ @InterfaceAudience.Private final public class FilterWrapper extends Filter { @@ -55,8 +52,7 @@ public FilterWrapper(Filter filter) { */ @Override public byte[] toByteArray() throws IOException { - FilterProtos.FilterWrapper.Builder builder = - FilterProtos.FilterWrapper.newBuilder(); + FilterProtos.FilterWrapper.Builder builder = FilterProtos.FilterWrapper.newBuilder(); builder.setFilter(ProtobufUtil.toFilter(this.filter)); return builder.build().toByteArray(); } @@ -67,8 +63,7 @@ public byte[] toByteArray() throws IOException { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ - public static FilterWrapper parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static FilterWrapper parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.FilterWrapper proto; try { proto = FilterProtos.FilterWrapper.parseFrom(pbBytes); @@ -129,17 +124,17 @@ public void filterRowCells(List kvs) throws IOException { } public enum FilterRowRetCode { - NOT_CALLED, - INCLUDE, // corresponds to filter.filterRow() returning false - EXCLUDE, // corresponds to filter.filterRow() returning true - INCLUDE_THIS_FAMILY // exclude other families + NOT_CALLED, INCLUDE, // corresponds to filter.filterRow() returning false + EXCLUDE, // corresponds to filter.filterRow() returning true + INCLUDE_THIS_FAMILY // exclude other families } + public FilterRowRetCode filterRowCellsWithRet(List kvs) throws IOException { - //To fix HBASE-6429, - //Filter with filterRow() returning true is incompatible with scan with limit - //1. hasFilterRow() returns true, if either filterRow() or filterRow(kvs) is implemented. - //2. filterRow() is merged with filterRow(kvs), - //so that to make all those row related filtering stuff in the same function. + // To fix HBASE-6429, + // Filter with filterRow() returning true is incompatible with scan with limit + // 1. hasFilterRow() returns true, if either filterRow() or filterRow(kvs) is implemented. + // 2. filterRow() is merged with filterRow(kvs), + // so that to make all those row related filtering stuff in the same function. this.filter.filterRowCells(kvs); if (!kvs.isEmpty()) { if (this.filter.filterRow()) { @@ -158,15 +153,15 @@ public boolean isFamilyEssential(byte[] name) throws IOException { /** * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof FilterWrapper)) return false; - FilterWrapper other = (FilterWrapper)o; + FilterWrapper other = (FilterWrapper) o; return this.filter.areSerializedFieldsEqual(other.filter); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index eda59ed2d560..0ed3f874be85 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.fs; import edu.umd.cs.findbugs.annotations.Nullable; @@ -57,27 +54,24 @@ import org.slf4j.LoggerFactory; /** - * An encapsulation for the FileSystem object that hbase uses to access - * data. This class allows the flexibility of using - * separate filesystem objects for reading and writing hfiles and wals. + * An encapsulation for the FileSystem object that hbase uses to access data. This class allows the + * flexibility of using separate filesystem objects for reading and writing hfiles and wals. */ @InterfaceAudience.Private public class HFileSystem extends FilterFileSystem { public static final Logger LOG = LoggerFactory.getLogger(HFileSystem.class); - private final FileSystem noChecksumFs; // read hfile data from storage + private final FileSystem noChecksumFs; // read hfile data from storage private final boolean useHBaseChecksum; private static volatile byte unspecifiedStoragePolicyId = Byte.MIN_VALUE; /** * Create a FileSystem object for HBase regionservers. * @param conf The configuration to be used for the filesystem - * @param useHBaseChecksum if true, then use - * checksum verfication in hbase, otherwise - * delegate checksum verification to the FileSystem. + * @param useHBaseChecksum if true, then use checksum verfication in hbase, otherwise delegate + * checksum verification to the FileSystem. */ - public HFileSystem(Configuration conf, boolean useHBaseChecksum) - throws IOException { + public HFileSystem(Configuration conf, boolean useHBaseChecksum) throws IOException { // Create the default filesystem with checksum verification switched on. // By default, any operation to this FilterFileSystem occurs on @@ -120,9 +114,8 @@ public HFileSystem(Configuration conf, boolean useHBaseChecksum) } /** - * Wrap a FileSystem object within a HFileSystem. The noChecksumFs and - * writefs are both set to be the same specified fs. - * Do not verify hbase-checksums while reading data from filesystem. + * Wrap a FileSystem object within a HFileSystem. The noChecksumFs and writefs are both set to be + * the same specified fs. Do not verify hbase-checksums while reading data from filesystem. * @param fs Set the noChecksumFs and writeFs to this specified filesystem. */ public HFileSystem(FileSystem fs) { @@ -132,11 +125,9 @@ public HFileSystem(FileSystem fs) { } /** - * Returns the filesystem that is specially setup for - * doing reads from storage. This object avoids doing - * checksum verifications for reads. - * @return The FileSystem object that can be used to read data - * from files. + * Returns the filesystem that is specially setup for doing reads from storage. This object avoids + * doing checksum verifications for reads. + * @return The FileSystem object that can be used to read data from files. */ public FileSystem getNoChecksumFs() { return noChecksumFs; @@ -153,9 +144,9 @@ public FileSystem getBackingFs() throws IOException { /** * Set the source path (directory/file) to the specified storage policy. * @param path The source path (directory/file). - * @param policyName The name of the storage policy: 'HOT', 'COLD', etc. - * See see hadoop 2.6+ org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g - * 'COLD', 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'. + * @param policyName The name of the storage policy: 'HOT', 'COLD', etc. See see hadoop 2.6+ + * org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g 'COLD', 'WARM', + * 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'. */ public void setStoragePolicy(Path path, String policyName) { CommonFSUtils.setStoragePolicy(this.fs, path, policyName); @@ -221,8 +212,7 @@ private String getStoragePolicyForOldHDFSVersion(Path path) { /** * Are we verifying checksums in HBase? - * @return True, if hbase is configured to verify checksums, - * otherwise false. + * @return True, if hbase is configured to verify checksums, otherwise false. */ public boolean useHBaseChecksum() { return useHBaseChecksum; @@ -240,10 +230,8 @@ public void close() throws IOException { } /** - * Returns a brand new instance of the FileSystem. It does not use - * the FileSystem.Cache. In newer versions of HDFS, we can directly - * invoke FileSystem.newInstance(Configuration). - * + * Returns a brand new instance of the FileSystem. It does not use the FileSystem.Cache. In newer + * versions of HDFS, we can directly invoke FileSystem.newInstance(Configuration). * @param conf Configuration * @return A new instance of the filesystem */ @@ -271,9 +259,9 @@ private static FileSystem newInstanceFileSystem(Configuration conf) throws IOExc } /** - * Returns an instance of Filesystem wrapped into the class specified in - * hbase.fs.wrapper property, if one is set in the configuration, returns - * unmodified FS instance passed in as an argument otherwise. + * Returns an instance of Filesystem wrapped into the class specified in hbase.fs.wrapper + * property, if one is set in the configuration, returns unmodified FS instance passed in as an + * argument otherwise. * @param base Filesystem instance to wrap * @param conf Configuration * @return wrapped instance of FS, or the same instance if no wrapping configured. @@ -283,7 +271,7 @@ private FileSystem maybeWrapFileSystem(FileSystem base, Configuration conf) { Class clazz = conf.getClass("hbase.fs.wrapper", null); if (clazz != null) { return (FileSystem) clazz.getConstructor(FileSystem.class, Configuration.class) - .newInstance(base, conf); + .newInstance(base, conf); } } catch (Exception e) { LOG.error("Failed to wrap filesystem: " + e); @@ -296,15 +284,14 @@ public static boolean addLocationsOrderInterceptor(Configuration conf) throws IO } /** - * Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient - * linked to this FileSystem. See HBASE-6435 for the background. + * Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient linked to + * this FileSystem. See HBASE-6435 for the background. *

        * There should be no reason, except testing, to create a specific ReorderBlocks. - * * @return true if the interceptor was added, false otherwise. */ static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlocks lrb) { - if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) { // activated by default + if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) { // activated by default LOG.debug("addLocationsOrderInterceptor configured to false"); return false; } @@ -318,17 +305,16 @@ static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlo } if (!(fs instanceof DistributedFileSystem)) { - LOG.debug("The file system is not a DistributedFileSystem. " + - "Skipping on block location reordering"); + LOG.debug("The file system is not a DistributedFileSystem. " + + "Skipping on block location reordering"); return false; } DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSClient dfsc = dfs.getClient(); if (dfsc == null) { - LOG.warn("The DistributedFileSystem does not contain a DFSClient. Can't add the location " + - "block reordering interceptor. Continuing, but this is unexpected." - ); + LOG.warn("The DistributedFileSystem does not contain a DFSClient. Can't add the location " + + "block reordering interceptor. Continuing, but this is unexpected."); return false; } @@ -341,16 +327,15 @@ static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlo ClientProtocol namenode = (ClientProtocol) nf.get(dfsc); if (namenode == null) { - LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" + - " reordering interceptor. Continuing, but this is unexpected." - ); + LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" + + " reordering interceptor. Continuing, but this is unexpected."); return false; } ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf); nf.set(dfsc, cp1); - LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" + - " using class " + lrb.getClass().getName()); + LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" + + " using class " + lrb.getClass().getName()); } catch (NoSuchFieldException e) { LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e); return false; @@ -365,42 +350,40 @@ static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlo private static ClientProtocol createReorderingProxy(final ClientProtocol cp, final ReorderBlocks lrb, final Configuration conf) { return (ClientProtocol) Proxy.newProxyInstance(cp.getClass().getClassLoader(), - new Class[]{ClientProtocol.class, Closeable.class}, new InvocationHandler() { - @Override - public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { - try { - if ((args == null || args.length == 0) && "close".equals(method.getName())) { - RPC.stopProxy(cp); - return null; - } else { - Object res = method.invoke(cp, args); - if (res != null && args != null && args.length == 3 - && "getBlockLocations".equals(method.getName()) - && res instanceof LocatedBlocks - && args[0] instanceof String - && args[0] != null) { - lrb.reorderBlocks(conf, (LocatedBlocks) res, (String) args[0]); - } - return res; - } - } catch (InvocationTargetException ite) { - // We will have this for all the exception, checked on not, sent - // by any layer, including the functional exception - Throwable cause = ite.getCause(); - if (cause == null){ - throw new RuntimeException("Proxy invocation failed and getCause is null", ite); + new Class[] { ClientProtocol.class, Closeable.class }, new InvocationHandler() { + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + try { + if ((args == null || args.length == 0) && "close".equals(method.getName())) { + RPC.stopProxy(cp); + return null; + } else { + Object res = method.invoke(cp, args); + if (res != null && args != null && args.length == 3 + && "getBlockLocations".equals(method.getName()) && res instanceof LocatedBlocks + && args[0] instanceof String && args[0] != null) { + lrb.reorderBlocks(conf, (LocatedBlocks) res, (String) args[0]); } - if (cause instanceof UndeclaredThrowableException) { - Throwable causeCause = cause.getCause(); - if (causeCause == null) { - throw new RuntimeException("UndeclaredThrowableException had null cause!"); - } - cause = cause.getCause(); + return res; + } + } catch (InvocationTargetException ite) { + // We will have this for all the exception, checked on not, sent + // by any layer, including the functional exception + Throwable cause = ite.getCause(); + if (cause == null) { + throw new RuntimeException("Proxy invocation failed and getCause is null", ite); + } + if (cause instanceof UndeclaredThrowableException) { + Throwable causeCause = cause.getCause(); + if (causeCause == null) { + throw new RuntimeException("UndeclaredThrowableException had null cause!"); } - throw cause; + cause = cause.getCause(); } + throw cause; } - }); + } + }); } /** @@ -408,7 +391,6 @@ public Object invoke(Object proxy, Method method, Object[] args) throws Throwabl */ interface ReorderBlocks { /** - * * @param conf - the conf to use * @param lbs - the LocatedBlocks to reorder * @param src - the file name currently read @@ -418,9 +400,9 @@ interface ReorderBlocks { } /** - * We're putting at lowest priority the wal files blocks that are on the same datanode - * as the original regionserver which created these files. This because we fear that the - * datanode is actually dead, so if we use it it will timeout. + * We're putting at lowest priority the wal files blocks that are on the same datanode as the + * original regionserver which created these files. This because we fear that the datanode is + * actually dead, so if we use it it will timeout. */ static class ReorderWALBlocks implements ReorderBlocks { @Override @@ -436,8 +418,7 @@ public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) // Ok, so it's an WAL String hostName = sn.getHostname(); if (LOG.isTraceEnabled()) { - LOG.trace(src + - " is an WAL file, so reordering blocks, last hostname will be:" + hostName); + LOG.trace(src + " is an WAL file, so reordering blocks, last hostname will be:" + hostName); } // Just check for all blocks @@ -460,10 +441,9 @@ public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) } /** - * Create a new HFileSystem object, similar to FileSystem.get(). - * This returns a filesystem object that avoids checksum - * verification in the filesystem for hfileblock-reads. - * For these blocks, checksum verification is done by HBase. + * Create a new HFileSystem object, similar to FileSystem.get(). This returns a filesystem object + * that avoids checksum verification in the filesystem for hfileblock-reads. For these blocks, + * checksum verification is done by HBase. */ static public FileSystem get(Configuration conf) throws IOException { return new HFileSystem(conf, true); @@ -477,17 +457,13 @@ static public FileSystem getLocalFs(Configuration conf) throws IOException { } /** - * The org.apache.hadoop.fs.FilterFileSystem does not yet support - * createNonRecursive. This is a hadoop bug and when it is fixed in Hadoop, - * this definition will go away. + * The org.apache.hadoop.fs.FilterFileSystem does not yet support createNonRecursive. This is a + * hadoop bug and when it is fixed in Hadoop, this definition will go away. */ @Override @SuppressWarnings("deprecation") - public FSDataOutputStream createNonRecursive(Path f, - boolean overwrite, - int bufferSize, short replication, long blockSize, - Progressable progress) throws IOException { - return fs.createNonRecursive(f, overwrite, bufferSize, replication, - blockSize, progress); + public FSDataOutputStream createNonRecursive(Path f, boolean overwrite, int bufferSize, + short replication, long blockSize, Progressable progress) throws IOException { + return fs.createNonRecursive(f, overwrite, bufferSize, replication, blockSize, progress); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java index 5bbc525b8459..3a89e9254a29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,9 +34,9 @@ import org.apache.hbase.thirdparty.com.google.common.io.Closeables; /** - * Wrapper for input stream(s) that takes care of the interaction of FS and HBase checksums, - * as well as closing streams. Initialization is not thread-safe, but normal operation is; - * see method comments. + * Wrapper for input stream(s) that takes care of the interaction of FS and HBase checksums, as well + * as closing streams. Initialization is not thread-safe, but normal operation is; see method + * comments. */ @InterfaceAudience.Private public class FSDataInputStreamWrapper implements Closeable { @@ -50,25 +50,23 @@ public class FSDataInputStreamWrapper implements Closeable { private final boolean dropBehind; private final long readahead; - /** Two stream handles, one with and one without FS-level checksum. - * HDFS checksum setting is on FS level, not single read level, so you have to keep two - * FS objects and two handles open to interleave different reads freely, which is very sad. - * This is what we do: - * 1) First, we need to read the trailer of HFile to determine checksum parameters. - * We always use FS checksum to do that, so ctor opens {@link #stream}. - * 2.1) After that, if HBase checksum is not used, we'd just always use {@link #stream}; - * 2.2) If HBase checksum can be used, we'll open {@link #streamNoFsChecksum}, - * and close {@link #stream}. User MUST call prepareForBlockReader for that to happen; - * if they don't, (2.1) will be the default. - * 3) The users can call {@link #shouldUseHBaseChecksum()}, and pass its result to - * {@link #getStream(boolean)} to get stream (if Java had out/pointer params we could - * return both in one call). This stream is guaranteed to be set. - * 4) The first time HBase checksum fails, one would call {@link #fallbackToFsChecksum(int)}. - * That will take lock, and open {@link #stream}. While this is going on, others will - * continue to use the old stream; if they also want to fall back, they'll also call - * {@link #fallbackToFsChecksum(int)}, and block until {@link #stream} is set. - * 5) After some number of checksumOk() calls, we will go back to using HBase checksum. - * We will have 2 handles; however we presume checksums fail so rarely that we don't care. + /** + * Two stream handles, one with and one without FS-level checksum. HDFS checksum setting is on FS + * level, not single read level, so you have to keep two FS objects and two handles open to + * interleave different reads freely, which is very sad. This is what we do: 1) First, we need to + * read the trailer of HFile to determine checksum parameters. We always use FS checksum to do + * that, so ctor opens {@link #stream}. 2.1) After that, if HBase checksum is not used, we'd just + * always use {@link #stream}; 2.2) If HBase checksum can be used, we'll open + * {@link #streamNoFsChecksum}, and close {@link #stream}. User MUST call prepareForBlockReader + * for that to happen; if they don't, (2.1) will be the default. 3) The users can call + * {@link #shouldUseHBaseChecksum()}, and pass its result to {@link #getStream(boolean)} to get + * stream (if Java had out/pointer params we could return both in one call). This stream is + * guaranteed to be set. 4) The first time HBase checksum fails, one would call + * {@link #fallbackToFsChecksum(int)}. That will take lock, and open {@link #stream}. While this + * is going on, others will continue to use the old stream; if they also want to fall back, + * they'll also call {@link #fallbackToFsChecksum(int)}, and block until {@link #stream} is set. + * 5) After some number of checksumOk() calls, we will go back to using HBase checksum. We will + * have 2 handles; however we presume checksums fail so rarely that we don't care. */ private volatile FSDataInputStream stream = null; private volatile FSDataInputStream streamNoFsChecksum = null; @@ -103,12 +101,13 @@ public FSDataInputStreamWrapper(FileSystem fs, Path path) throws IOException { this(fs, path, false, -1L); } - public FSDataInputStreamWrapper(FileSystem fs, Path path, boolean dropBehind, long readahead) throws IOException { + public FSDataInputStreamWrapper(FileSystem fs, Path path, boolean dropBehind, long readahead) + throws IOException { this(fs, null, path, dropBehind, readahead); } - public FSDataInputStreamWrapper(FileSystem fs, FileLink link, - boolean dropBehind, long readahead) throws IOException { + public FSDataInputStreamWrapper(FileSystem fs, FileLink link, boolean dropBehind, long readahead) + throws IOException { this(fs, link, null, dropBehind, readahead); } @@ -147,9 +146,9 @@ private void setStreamOptions(FSDataInputStream in) { } /** - * Prepares the streams for block reader. NOT THREAD SAFE. Must be called once, after any - * reads finish and before any other reads start (what happens in reality is we read the - * tail, then call this based on what's in the tail, then read blocks). + * Prepares the streams for block reader. NOT THREAD SAFE. Must be called once, after any reads + * finish and before any other reads start (what happens in reality is we read the tail, then call + * this based on what's in the tail, then read blocks). * @param forceNoHBaseChecksum Force not using HBase checksum. */ public void prepareForBlockReader(boolean forceNoHBaseChecksum) throws IOException { @@ -196,8 +195,8 @@ public boolean shouldUseHBaseChecksum() { /** * Get the stream to use. Thread-safe. - * @param useHBaseChecksum must be the value that shouldUseHBaseChecksum has returned - * at some point in the past, otherwise the result is undefined. + * @param useHBaseChecksum must be the value that shouldUseHBaseChecksum has returned at some + * point in the past, otherwise the result is undefined. */ public FSDataInputStream getStream(boolean useHBaseChecksum) { return useHBaseChecksum ? this.streamNoFsChecksum : this.stream; @@ -239,20 +238,20 @@ private void updateInputStreamStatistics(FSDataInputStream stream) { // If the underlying file system is HDFS, update read statistics upon close. if (stream instanceof HdfsDataInputStream) { /** - * Because HDFS ReadStatistics is calculated per input stream, it is not - * feasible to update the aggregated number in real time. Instead, the - * metrics are updated when an input stream is closed. + * Because HDFS ReadStatistics is calculated per input stream, it is not feasible to update + * the aggregated number in real time. Instead, the metrics are updated when an input stream + * is closed. */ - HdfsDataInputStream hdfsDataInputStream = (HdfsDataInputStream)stream; + HdfsDataInputStream hdfsDataInputStream = (HdfsDataInputStream) stream; synchronized (readStatistics) { - readStatistics.totalBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalBytesRead(); - readStatistics.totalLocalBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalLocalBytesRead(); - readStatistics.totalShortCircuitBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalShortCircuitBytesRead(); - readStatistics.totalZeroCopyBytesRead += hdfsDataInputStream.getReadStatistics(). - getTotalZeroCopyBytesRead(); + readStatistics.totalBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalBytesRead(); + readStatistics.totalLocalBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalLocalBytesRead(); + readStatistics.totalShortCircuitBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalShortCircuitBytesRead(); + readStatistics.totalZeroCopyBytesRead += + hdfsDataInputStream.getReadStatistics().getTotalZeroCopyBytesRead(); } } } @@ -291,7 +290,6 @@ public void close() { // we do not care about the close exception as it is for reading, no data loss issue. Closeables.closeQuietly(streamNoFsChecksum); - updateInputStreamStatistics(stream); Closeables.closeQuietly(stream); } @@ -331,10 +329,11 @@ public void unbuffer() { if (this.instanceOfCanUnbuffer) { try { this.unbuffer.unbuffer(); - } catch (UnsupportedOperationException e){ + } catch (UnsupportedOperationException e) { if (isLogTraceEnabled) { LOG.trace("Failed to invoke 'unbuffer' method in class " + streamClass - + " . So there may be the stream does not support unbuffering.", e); + + " . So there may be the stream does not support unbuffering.", + e); } } } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java index ea285ed53fad..de4e884d7b1c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import java.io.FileNotFoundException; @@ -43,52 +42,34 @@ /** * The FileLink is a sort of hardlink, that allows access to a file given a set of locations. - * - *

        The Problem: + *

        + * The Problem: *

          - *
        • - * HDFS doesn't have support for hardlinks, and this make impossible to referencing - * the same data blocks using different names. - *
        • - *
        • - * HBase store files in one location (e.g. table/region/family/) and when the file is not - * needed anymore (e.g. compaction, region deletion, ...) moves it to an archive directory. - *
        • + *
        • HDFS doesn't have support for hardlinks, and this make impossible to referencing the same + * data blocks using different names.
        • + *
        • HBase store files in one location (e.g. table/region/family/) and when the file is not needed + * anymore (e.g. compaction, region deletion, ...) moves it to an archive directory.
        • *
        - * If we want to create a reference to a file, we need to remember that it can be in its - * original location or in the archive folder. - * The FileLink class tries to abstract this concept and given a set of locations - * it is able to switch between them making this operation transparent for the user. - * {@link HFileLink} is a more concrete implementation of the {@code FileLink}. - * - *

        Back-references: - * To help the {@link org.apache.hadoop.hbase.master.cleaner.CleanerChore} to keep track of - * the links to a particular file, during the {@code FileLink} creation, a new file is placed - * inside a back-reference directory. There's one back-reference directory for each file that - * has links, and in the directory there's one file per link. - * - *

        HFileLink Example + * If we want to create a reference to a file, we need to remember that it can be in its original + * location or in the archive folder. The FileLink class tries to abstract this concept and given a + * set of locations it is able to switch between them making this operation transparent for the + * user. {@link HFileLink} is a more concrete implementation of the {@code FileLink}. + *

        + * Back-references: To help the {@link org.apache.hadoop.hbase.master.cleaner.CleanerChore} + * to keep track of the links to a particular file, during the {@code FileLink} creation, a new file + * is placed inside a back-reference directory. There's one back-reference directory for each file + * that has links, and in the directory there's one file per link. + *

        + * HFileLink Example *

          - *
        • - * /hbase/table/region-x/cf/file-k - * (Original File) - *
        • - *
        • - * /hbase/table-cloned/region-y/cf/file-k.region-x.table - * (HFileLink to the original file) - *
        • - *
        • - * /hbase/table-2nd-cloned/region-z/cf/file-k.region-x.table - * (HFileLink to the original file) - *
        • - *
        • - * /hbase/.archive/table/region-x/.links-file-k/region-y.table-cloned - * (Back-reference to the link in table-cloned) - *
        • - *
        • - * /hbase/.archive/table/region-x/.links-file-k/region-z.table-2nd-cloned - * (Back-reference to the link in table-2nd-cloned) - *
        • + *
        • /hbase/table/region-x/cf/file-k (Original File)
        • + *
        • /hbase/table-cloned/region-y/cf/file-k.region-x.table (HFileLink to the original file)
        • + *
        • /hbase/table-2nd-cloned/region-z/cf/file-k.region-x.table (HFileLink to the original file) + *
        • + *
        • /hbase/.archive/table/region-x/.links-file-k/region-y.table-cloned (Back-reference to the + * link in table-cloned)
        • + *
        • /hbase/.archive/table/region-x/.links-file-k/region-z.table-2nd-cloned (Back-reference to the + * link in table-2nd-cloned)
        • *
        */ @InterfaceAudience.Private @@ -99,8 +80,8 @@ public class FileLink { public static final String BACK_REFERENCES_DIRECTORY_PREFIX = ".links-"; /** - * FileLink InputStream that handles the switch between the original path - * and the alternative locations, when the file is moved. + * FileLink InputStream that handles the switch between the original path and the alternative + * locations, when the file is moved. */ private static class FileLinkInputStream extends InputStream implements Seekable, PositionedReadable, CanSetDropBehind, CanSetReadahead, CanUnbuffer { @@ -112,8 +93,7 @@ private static class FileLinkInputStream extends InputStream private final int bufferSize; private final FileSystem fs; - public FileLinkInputStream(final FileSystem fs, final FileLink fileLink) - throws IOException { + public FileLinkInputStream(final FileSystem fs, final FileLink fileLink) throws IOException { this(fs, fileLink, CommonFSUtils.getDefaultBufferSize(fs)); } @@ -148,7 +128,7 @@ public int read() throws IOException { @Override public int read(byte[] b) throws IOException { - return read(b, 0, b.length); + return read(b, 0, b.length); } @Override @@ -164,7 +144,7 @@ public int read(byte[] b, int off, int len) throws IOException { n = tryOpen().read(b, off, len); } if (n > 0) pos += n; - assert(in.getPos() == pos); + assert (in.getPos() == pos); return n; } @@ -296,18 +276,17 @@ public void unbuffer() { /** * Try to open the file from one of the available locations. - * * @return FSDataInputStream stream of the opened file link * @throws IOException on unexpected error, or file not found. */ private FSDataInputStream tryOpen() throws IOException { IOException exception = null; - for (Path path: fileLink.getLocations()) { + for (Path path : fileLink.getLocations()) { if (path.equals(currentPath)) continue; try { in = fs.open(path, bufferSize); if (pos != 0) in.seek(pos); - assert(in.getPos() == pos) : "Link unable to seek to the right position=" + pos; + assert (in.getPos() == pos) : "Link unable to seek to the right position=" + pos; if (LOG.isTraceEnabled()) { if (currentPath == null) { LOG.debug("link open path=" + path); @@ -316,7 +295,7 @@ private FSDataInputStream tryOpen() throws IOException { } } currentPath = path; - return(in); + return (in); } catch (FileNotFoundException | AccessControlException | RemoteException e) { exception = FileLink.handleAccessLocationException(fileLink, e, exception); } @@ -401,7 +380,6 @@ public Path getAvailablePath(FileSystem fs) throws IOException { /** * Get the FileStatus of the referenced file. - * * @param fs {@link FileSystem} on which to get the file status * @return InputStream for the hfile link. * @throws IOException on unexpected error. @@ -453,9 +431,8 @@ private static IOException handleAccessLocationException(FileLink fileLink, /** * Open the FileLink for read. *

        - * It uses a wrapper of FSDataInputStream that is agnostic to the location - * of the file, even if the file switches between locations. - * + * It uses a wrapper of FSDataInputStream that is agnostic to the location of the file, even if + * the file switches between locations. * @param fs {@link FileSystem} on which to open the FileLink * @return InputStream for reading the file link. * @throws IOException on unexpected error. @@ -467,9 +444,8 @@ public FSDataInputStream open(final FileSystem fs) throws IOException { /** * Open the FileLink for read. *

        - * It uses a wrapper of FSDataInputStream that is agnostic to the location - * of the file, even if the file switches between locations. - * + * It uses a wrapper of FSDataInputStream that is agnostic to the location of the file, even if + * the file switches between locations. * @param fs {@link FileSystem} on which to open the FileLink * @param bufferSize the size of the buffer to be used. * @return InputStream for reading the file link. @@ -480,8 +456,8 @@ public FSDataInputStream open(final FileSystem fs, int bufferSize) throws IOExce } /** - * If the passed FSDataInputStream is backed by a FileLink, returns the underlying - * InputStream for the resolved link target. Otherwise, returns null. + * If the passed FSDataInputStream is backed by a FileLink, returns the underlying InputStream for + * the resolved link target. Otherwise, returns null. */ public static FSDataInputStream getUnderlyingFileLinkInputStream(FSDataInputStream stream) { if (stream.getWrappedStream() instanceof FileLinkInputStream) { @@ -491,13 +467,13 @@ public static FSDataInputStream getUnderlyingFileLinkInputStream(FSDataInputStre } /** - * NOTE: This method must be used only in the constructor! - * It creates a List with the specified locations for the link. + * NOTE: This method must be used only in the constructor! It creates a List with the specified + * locations for the link. */ protected void setLocations(Path originPath, Path... alternativePaths) { assert this.locations == null : "Link locations already set"; - List paths = new ArrayList<>(alternativePaths.length +1); + List paths = new ArrayList<>(alternativePaths.length + 1); if (originPath != null) { paths.add(originPath); } @@ -512,10 +488,9 @@ protected void setLocations(Path originPath, Path... alternativePaths) { /** * Get the directory to store the link back references - * - *

        To simplify the reference count process, during the FileLink creation - * a back-reference is added to the back-reference directory of the specified file. - * + *

        + * To simplify the reference count process, during the FileLink creation a back-reference is added + * to the back-reference directory of the specified file. * @param storeDir Root directory for the link reference folder * @param fileName File Name with links * @return Path for the link back references. @@ -526,7 +501,6 @@ public static Path getBackReferencesDir(final Path storeDir, final String fileNa /** * Get the referenced file name from the reference link directory path. - * * @param dirPath Link references directory path * @return Name of the file referenced */ @@ -566,4 +540,3 @@ public int hashCode() { return Arrays.hashCode(locations); } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index fbed724a207e..14a36cc75a2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import java.io.IOException; @@ -39,59 +38,53 @@ import org.slf4j.LoggerFactory; /** - * HFileLink describes a link to an hfile. - * - * An hfile can be served from a region or from the hfile archive directory (/hbase/.archive) - * HFileLink allows to access the referenced hfile regardless of the location where it is. - * - *

        Searches for hfiles in the following order and locations: + * HFileLink describes a link to an hfile. An hfile can be served from a region or from the hfile + * archive directory (/hbase/.archive) HFileLink allows to access the referenced hfile regardless of + * the location where it is. + *

        + * Searches for hfiles in the following order and locations: *

          - *
        • /hbase/table/region/cf/hfile
        • - *
        • /hbase/.archive/table/region/cf/hfile
        • + *
        • /hbase/table/region/cf/hfile
        • + *
        • /hbase/.archive/table/region/cf/hfile
        • *
        - * - * The link checks first in the original path if it is not present - * it fallbacks to the archived path. + * The link checks first in the original path if it is not present it fallbacks to the archived + * path. */ @InterfaceAudience.Private -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_DOESNT_OVERRIDE_EQUALS", - justification="To be fixed but warning suppressed for now") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_DOESNT_OVERRIDE_EQUALS", + justification = "To be fixed but warning suppressed for now") public class HFileLink extends FileLink { private static final Logger LOG = LoggerFactory.getLogger(HFileLink.class); /** - * A non-capture group, for HFileLink, so that this can be embedded. - * The HFileLink describe a link to an hfile in a different table/region - * and the name is in the form: table=region-hfile. + * A non-capture group, for HFileLink, so that this can be embedded. The HFileLink describe a link + * to an hfile in a different table/region and the name is in the form: table=region-hfile. *

        * Table name is ([\p{IsAlphabetic}\p{Digit}][\p{IsAlphabetic}\p{Digit}.-]*), so '=' is an invalid - * character for the table name. - * Region name is ([a-f0-9]+), so '-' is an invalid character for the region name. - * HFile is ([0-9a-f]+(?:_SeqId_[0-9]+_)?) covering the plain hfiles (uuid) - * and the bulk loaded (_SeqId_[0-9]+_) hfiles. - * - *

        Here is an example name: /hbase/test/0123/cf/testtb=4567-abcd where 'testtb' is table name - * and '4567' is region name and 'abcd' is filename. + * character for the table name. Region name is ([a-f0-9]+), so '-' is an invalid character for + * the region name. HFile is ([0-9a-f]+(?:_SeqId_[0-9]+_)?) covering the plain hfiles (uuid) and + * the bulk loaded (_SeqId_[0-9]+_) hfiles. + *

        + * Here is an example name: /hbase/test/0123/cf/testtb=4567-abcd where 'testtb' is table name and + * '4567' is region name and 'abcd' is filename. */ - public static final String LINK_NAME_REGEX = - String.format("(?:(?:%s=)?)%s=%s-%s", - TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, - RegionInfoBuilder.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX); + public static final String LINK_NAME_REGEX = String.format("(?:(?:%s=)?)%s=%s-%s", + TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, + RegionInfoBuilder.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX); /** Define the HFile Link name parser in the form of: table=region-hfile */ public static final Pattern LINK_NAME_PATTERN = - Pattern.compile(String.format("^(?:(%s)(?:\\=))?(%s)=(%s)-(%s)$", - TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, - RegionInfoBuilder.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX)); + Pattern.compile(String.format("^(?:(%s)(?:\\=))?(%s)=(%s)-(%s)$", + TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, + RegionInfoBuilder.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX)); /** - * The pattern should be used for hfile and reference links - * that can be found in /hbase/table/region/family/ + * The pattern should be used for hfile and reference links that can be found in + * /hbase/table/region/family/ */ - private static final Pattern REF_OR_HFILE_LINK_PATTERN = - Pattern.compile(String.format("^(?:(%s)(?:=))?(%s)=(%s)-(.+)$", - TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, - RegionInfoBuilder.ENCODED_REGION_NAME_REGEX)); + private static final Pattern REF_OR_HFILE_LINK_PATTERN = Pattern + .compile(String.format("^(?:(%s)(?:=))?(%s)=(%s)-(.+)$", TableName.VALID_NAMESPACE_REGEX, + TableName.VALID_TABLE_QUALIFIER_REGEX, RegionInfoBuilder.ENCODED_REGION_NAME_REGEX)); private final Path archivePath; private final Path originPath; @@ -102,7 +95,7 @@ public class HFileLink extends FileLink { * Dead simple hfile link constructor */ public HFileLink(final Path originPath, final Path tempPath, final Path mobPath, - final Path archivePath) { + final Path archivePath) { this.tempPath = tempPath; this.originPath = originPath; this.mobPath = mobPath; @@ -110,28 +103,24 @@ public HFileLink(final Path originPath, final Path tempPath, final Path mobPath, setLocations(originPath, tempPath, mobPath, archivePath); } - /** * @param conf {@link Configuration} from which to extract specific archive locations * @param hFileLinkPattern The path ending with a HFileLink pattern. (table=region-hfile) * @throws IOException on unexpected error. */ public static final HFileLink buildFromHFileLinkPattern(Configuration conf, Path hFileLinkPattern) - throws IOException { + throws IOException { return buildFromHFileLinkPattern(CommonFSUtils.getRootDir(conf), - HFileArchiveUtil.getArchivePath(conf), hFileLinkPattern); + HFileArchiveUtil.getArchivePath(conf), hFileLinkPattern); } - - /** * @param rootDir Path to the root directory where hbase files are stored * @param archiveDir Path to the hbase archive directory * @param hFileLinkPattern The path of the HFile Link. */ - public final static HFileLink buildFromHFileLinkPattern(final Path rootDir, - final Path archiveDir, - final Path hFileLinkPattern) { + public final static HFileLink buildFromHFileLinkPattern(final Path rootDir, final Path archiveDir, + final Path hFileLinkPattern) { Path hfilePath = getHFileLinkPatternRelativePath(hFileLinkPattern); Path tempPath = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), hfilePath); Path originPath = new Path(rootDir, hfilePath); @@ -148,8 +137,8 @@ public final static HFileLink buildFromHFileLinkPattern(final Path rootDir, * @param hfile HFile Name * @return the relative Path to open the specified table/region/family/hfile link */ - public static Path createPath(final TableName table, final String region, - final String family, final String hfile) { + public static Path createPath(final TableName table, final String region, final String family, + final String hfile) { if (HFileLink.isHFileLink(hfile)) { return new Path(family, hfile); } @@ -167,8 +156,7 @@ public static Path createPath(final TableName table, final String region, * @throws IOException on unexpected error. */ public static HFileLink build(final Configuration conf, final TableName table, - final String region, final String family, final String hfile) - throws IOException { + final String region, final String family, final String hfile) throws IOException { return HFileLink.buildFromHFileLinkPattern(conf, createPath(table, region, family, hfile)); } @@ -193,7 +181,7 @@ public Path getMobPath() { return this.mobPath; } - /** + /** * @param path Path to check. * @return True if the path is a HFileLink. */ @@ -214,10 +202,8 @@ public static boolean isHFileLink(String fileName) { } /** - * Convert a HFileLink path to a table relative path. - * e.g. the link: /hbase/test/0123/cf/testtb=4567-abcd - * becomes: /hbase/testtb/4567/cf/abcd - * + * Convert a HFileLink path to a table relative path. e.g. the link: + * /hbase/test/0123/cf/testtb=4567-abcd becomes: /hbase/testtb/4567/cf/abcd * @param path HFileLink path * @return Relative table path * @throws IOException on unexpected error. @@ -235,13 +221,11 @@ private static Path getHFileLinkPatternRelativePath(final Path path) { String hfileName = m.group(4); String familyName = path.getParent().getName(); Path tableDir = CommonFSUtils.getTableDir(new Path("./"), tableName); - return new Path(tableDir, new Path(regionName, new Path(familyName, - hfileName))); + return new Path(tableDir, new Path(regionName, new Path(familyName, hfileName))); } /** * Get the HFile name of the referenced link - * * @param fileName HFileLink file name * @return the name of the referenced HFile */ @@ -250,12 +234,11 @@ public static String getReferencedHFileName(final String fileName) { if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(m.group(4)); + return (m.group(4)); } /** * Get the Region name of the referenced link - * * @param fileName HFileLink file name * @return the name of the referenced Region */ @@ -264,12 +247,11 @@ public static String getReferencedRegionName(final String fileName) { if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(m.group(3)); + return (m.group(3)); } /** * Get the Table name of the referenced link - * * @param fileName HFileLink file name * @return the name of the referenced Table */ @@ -278,44 +260,40 @@ public static TableName getReferencedTableName(final String fileName) { if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(TableName.valueOf(m.group(1), m.group(2))); + return (TableName.valueOf(m.group(1), m.group(2))); } /** * Create a new HFileLink name - * * @param hfileRegionInfo - Linked HFile Region Info * @param hfileName - Linked HFile name * @return file name of the HFile Link */ public static String createHFileLinkName(final RegionInfo hfileRegionInfo, final String hfileName) { - return createHFileLinkName(hfileRegionInfo.getTable(), - hfileRegionInfo.getEncodedName(), hfileName); + return createHFileLinkName(hfileRegionInfo.getTable(), hfileRegionInfo.getEncodedName(), + hfileName); } /** * Create a new HFileLink name - * * @param tableName - Linked HFile table name * @param regionName - Linked HFile region name * @param hfileName - Linked HFile name * @return file name of the HFile Link */ - public static String createHFileLinkName(final TableName tableName, - final String regionName, final String hfileName) { + public static String createHFileLinkName(final TableName tableName, final String regionName, + final String hfileName) { String s = String.format("%s=%s-%s", - tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '='), - regionName, hfileName); + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '='), regionName, hfileName); return s; } /** * Create a new HFileLink - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. * @param conf {@link Configuration} to read for the archive directory name * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) @@ -325,17 +303,16 @@ public static String createHFileLinkName(final TableName tableName, * @throws IOException on file or parent directory creation failure. */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final RegionInfo hfileRegionInfo, - final String hfileName) throws IOException { + final Path dstFamilyPath, final RegionInfo hfileRegionInfo, final String hfileName) + throws IOException { return create(conf, fs, dstFamilyPath, hfileRegionInfo, hfileName, true); } /** * Create a new HFileLink - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. * @param conf {@link Configuration} to read for the archive directory name * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) @@ -346,8 +323,8 @@ public static String create(final Configuration conf, final FileSystem fs, * @throws IOException on file or parent directory creation failure. */ public static String create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final RegionInfo hfileRegionInfo, - final String hfileName, final boolean createBackRef) throws IOException { + final Path dstFamilyPath, final RegionInfo hfileRegionInfo, final String hfileName, + final boolean createBackRef) throws IOException { TableName linkedTable = hfileRegionInfo.getTable(); String linkedRegion = hfileRegionInfo.getEncodedName(); return create(conf, fs, dstFamilyPath, linkedTable, linkedRegion, hfileName, createBackRef); @@ -355,10 +332,9 @@ public static String create(final Configuration conf, final FileSystem fs, /** * Create a new HFileLink - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. * @param conf {@link Configuration} to read for the archive directory name * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) @@ -377,10 +353,9 @@ public static String create(final Configuration conf, final FileSystem fs, /** * Create a new HFileLink. In the event of link creation failure, this method throws an * IOException, so that the calling upper laying can decide on how to proceed with this. - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. * @param conf {@link Configuration} to read for the archive directory name * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) @@ -396,8 +371,8 @@ public static String create(final Configuration conf, final FileSystem fs, final String hfileName, final boolean createBackRef) throws IOException { String familyName = dstFamilyPath.getName(); String regionName = dstFamilyPath.getParent().getName(); - String tableName = CommonFSUtils.getTableName(dstFamilyPath.getParent().getParent()) - .getNameAsString(); + String tableName = + CommonFSUtils.getTableName(dstFamilyPath.getParent().getParent()).getNameAsString(); return create(conf, fs, dstFamilyPath, familyName, tableName, regionName, linkedTable, linkedRegion, hfileName, createBackRef); @@ -405,9 +380,9 @@ public static String create(final Configuration conf, final FileSystem fs, /** * Create a new HFileLink - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. * @param conf {@link Configuration} to read for the archive directory name * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) @@ -431,8 +406,8 @@ public static String create(final Configuration conf, final FileSystem fs, fs.mkdirs(dstFamilyPath); // Make sure the FileLink reference directory exists - Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, - linkedTable, linkedRegion, familyName); + Path archiveStoreDir = + HFileArchiveUtil.getStoreArchivePath(conf, linkedTable, linkedRegion, familyName); Path backRefPath = null; if (createBackRef) { Path backRefssDir = getBackReferencesDir(archiveStoreDir, hfileName); @@ -455,16 +430,15 @@ public static String create(final Configuration conf, final FileSystem fs, } throw e; } - throw new IOException("File link=" + name + " already exists under " + - dstFamilyPath + " folder."); + throw new IOException( + "File link=" + name + " already exists under " + dstFamilyPath + " folder."); } /** * Create a new HFileLink starting from a hfileLink name - * - *

        It also adds a back-reference to the hfile back-reference directory - * to simplify the reference-count and the cleaning process. - * + *

        + * It also adds a back-reference to the hfile back-reference directory to simplify the + * reference-count and the cleaning process. * @param conf {@link Configuration} to read for the archive directory name * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) @@ -475,28 +449,26 @@ public static String create(final Configuration conf, final FileSystem fs, */ public static String createFromHFileLink(final Configuration conf, final FileSystem fs, final Path dstFamilyPath, final String hfileLinkName, final boolean createBackRef) - throws IOException { + throws IOException { Matcher m = LINK_NAME_PATTERN.matcher(hfileLinkName); if (!m.matches()) { throw new IllegalArgumentException(hfileLinkName + " is not a valid HFileLink name!"); } - return create(conf, fs, dstFamilyPath, TableName.valueOf(m.group(1), m.group(2)), - m.group(3), m.group(4), createBackRef); + return create(conf, fs, dstFamilyPath, TableName.valueOf(m.group(1), m.group(2)), m.group(3), + m.group(4), createBackRef); } /** * Create the back reference name */ - //package-private for testing - static String createBackReferenceName(final String tableNameStr, - final String regionName) { + // package-private for testing + static String createBackReferenceName(final String tableNameStr, final String regionName) { return regionName + "." + tableNameStr.replace(TableName.NAMESPACE_DELIM, '='); } /** * Get the full path of the HFile referenced by the back reference - * * @param rootDir root hbase directory * @param linkRefPath Link Back Reference path * @return full path of the referenced hfile @@ -511,8 +483,8 @@ public static Path getHFileFromBackReference(final Path rootDir, final Path link Path regionPath = familyPath.getParent(); Path tablePath = regionPath.getParent(); - String linkName = createHFileLinkName(CommonFSUtils.getTableName(tablePath), - regionPath.getName(), hfileName); + String linkName = + createHFileLinkName(CommonFSUtils.getTableName(tablePath), regionPath.getName(), hfileName); Path linkTableDir = CommonFSUtils.getTableDir(rootDir, linkTableName); Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName); return new Path(new Path(regionDir, familyPath.getName()), linkName); @@ -521,15 +493,13 @@ public static Path getHFileFromBackReference(final Path rootDir, final Path link public static Pair parseBackReferenceName(String name) { int separatorIndex = name.indexOf('.'); String linkRegionName = name.substring(0, separatorIndex); - String tableSubstr = name.substring(separatorIndex + 1) - .replace('=', TableName.NAMESPACE_DELIM); + String tableSubstr = name.substring(separatorIndex + 1).replace('=', TableName.NAMESPACE_DELIM); TableName linkTableName = TableName.valueOf(tableSubstr); return new Pair<>(linkTableName, linkRegionName); } /** * Get the full path of the HFile referenced by the back reference - * * @param conf {@link Configuration} to read for the archive directory name * @param linkRefPath Link Back Reference path * @return full path of the referenced hfile diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index ab293e36277f..dcae1de68709 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.nio.ByteBuffer; import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -40,25 +38,24 @@ import org.slf4j.LoggerFactory; /** - * A facade for a {@link org.apache.hadoop.hbase.io.hfile.HFile.Reader} that serves up - * either the top or bottom half of a HFile where 'bottom' is the first half - * of the file containing the keys that sort lowest and 'top' is the second half - * of the file with keys that sort greater than those of the bottom half. - * The top includes the split files midkey, of the key that follows if it does + * A facade for a {@link org.apache.hadoop.hbase.io.hfile.HFile.Reader} that serves up either the + * top or bottom half of a HFile where 'bottom' is the first half of the file containing the keys + * that sort lowest and 'top' is the second half of the file with keys that sort greater than those + * of the bottom half. The top includes the split files midkey, of the key that follows if it does * not exist in the file. - * - *

        This type works in tandem with the {@link Reference} type. This class - * is used reading while Reference is used writing. - * - *

        This file is not splitable. Calls to {@link #midKey()} return null. + *

        + * This type works in tandem with the {@link Reference} type. This class is used reading while + * Reference is used writing. + *

        + * This file is not splitable. Calls to {@link #midKey()} return null. */ @InterfaceAudience.Private public class HalfStoreFileReader extends StoreFileReader { private static final Logger LOG = LoggerFactory.getLogger(HalfStoreFileReader.class); final boolean top; - // This is the key we split around. Its the first possible entry on a row: + // This is the key we split around. Its the first possible entry on a row: // i.e. empty column and a timestamp of LATEST_TIMESTAMP. - protected final byte [] splitkey; + protected final byte[] splitkey; private final Cell splitCell; @@ -76,12 +73,12 @@ public class HalfStoreFileReader extends StoreFileReader { * @param conf Configuration */ public HalfStoreFileReader(final ReaderContext context, final HFileInfo fileInfo, - final CacheConfig cacheConf, final Reference r, - AtomicInteger refCount, final Configuration conf) throws IOException { + final CacheConfig cacheConf, final Reference r, AtomicInteger refCount, + final Configuration conf) throws IOException { super(context, fileInfo, cacheConf, refCount, conf); // This is not actual midkey for this half-file; its just border - // around which we split top and bottom. Have to look in files to find - // actual last and first keys for bottom and top halves. Half-files don't + // around which we split top and bottom. Have to look in files to find + // actual last and first keys for bottom and top halves. Half-files don't // have an actual midkey themselves. No midkey is how we indicate file is // not splittable. this.splitkey = r.getSplitKey(); @@ -95,8 +92,8 @@ protected boolean isTop() { } @Override - public HFileScanner getScanner(final boolean cacheBlocks, - final boolean pread, final boolean isCompaction) { + public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread, + final boolean isCompaction) { final HFileScanner s = super.getScanner(cacheBlocks, pread, isCompaction); return new HFileScanner() { final HFileScanner delegate = s; @@ -202,8 +199,8 @@ public int seekTo(Cell key) throws IOException { boolean res = delegate.seekBefore(splitCell); if (!res) { throw new IOException( - "Seeking for a key in bottom of file, but key exists in top of file, " + - "failed on seekBefore(midkey)"); + "Seeking for a key in bottom of file, but key exists in top of file, " + + "failed on seekBefore(midkey)"); } return 1; } @@ -243,8 +240,8 @@ public int reseekTo(Cell key) throws IOException { public boolean seekBefore(Cell key) throws IOException { if (top) { Optional fk = getFirstKey(); - if (fk.isPresent() && - PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, fk.get()) <= 0) { + if (fk.isPresent() + && PrivateCellUtil.compareKeyIgnoresMvcc(getComparator(), key, fk.get()) <= 0) { return false; } } else { @@ -281,12 +278,12 @@ public void shipped() throws IOException { } }; } - + @Override public boolean passesKeyRangeFilter(Scan scan) { return true; } - + @Override public Optional getLastKey() { if (top) { @@ -326,7 +323,7 @@ public Optional getFirstKey() { } catch (IOException e) { LOG.warn("Failed seekTo first KV in the file", e); } finally { - if(scanner != null) { + if (scanner != null) { scanner.close(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java index 72da73e1e920..75c79ad0fceb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; @@ -30,7 +29,7 @@ public class MetricsIO { public MetricsIO(MetricsIOWrapper wrapper) { this(CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) - .createIO(wrapper), wrapper); + .createIO(wrapper), wrapper); } MetricsIO(MetricsIOSource source, MetricsIOWrapper wrapper) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java index 1ce762a0ad2e..687d58334582 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.io.hfile.HFile; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java index 845005f1bbd0..984129c0f2c3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,44 +23,42 @@ import java.io.IOException; import java.io.InputStream; import java.util.Arrays; - import org.apache.commons.io.IOUtils; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos; -import org.apache.hadoop.hbase.util.Bytes; /** - * A reference to the top or bottom half of a store file where 'bottom' is the first half - * of the file containing the keys that sort lowest and 'top' is the second half - * of the file with keys that sort greater than those of the bottom half. The file referenced - * lives under a different region. References are made at region split time. - * - *

        References work with a special half store file type. References know how - * to write out the reference format in the file system and are what is juggled - * when references are mixed in with direct store files. The half store file - * type is used reading the referred to file. - * - *

        References to store files located over in some other region look like - * this in the file system - * 1278437856009925445.3323223323: - * i.e. an id followed by hash of the referenced region. - * Note, a region is itself not splittable if it has instances of store file - * references. References are cleaned up by compactions. + * A reference to the top or bottom half of a store file where 'bottom' is the first half of the + * file containing the keys that sort lowest and 'top' is the second half of the file with keys that + * sort greater than those of the bottom half. The file referenced lives under a different region. + * References are made at region split time. + *

        + * References work with a special half store file type. References know how to write out the + * reference format in the file system and are what is juggled when references are mixed in with + * direct store files. The half store file type is used reading the referred to file. + *

        + * References to store files located over in some other region look like this in the file system + * 1278437856009925445.3323223323: i.e. an id followed by hash of the referenced + * region. Note, a region is itself not splittable if it has instances of store file references. + * References are cleaned up by compactions. */ @InterfaceAudience.Private public class Reference { - private byte [] splitkey; + private byte[] splitkey; private Range region; /** - * For split HStoreFiles, it specifies if the file covers the lower half or - * the upper half of the key range + * For split HStoreFiles, it specifies if the file covers the lower half or the upper half of the + * key range */ static enum Range { /** HStoreFile contains upper half of key range */ @@ -74,7 +71,7 @@ static enum Range { * @param splitRow * @return A {@link Reference} that points at top half of a an hfile */ - public static Reference createTopReference(final byte [] splitRow) { + public static Reference createTopReference(final byte[] splitRow) { return new Reference(splitRow, Range.top); } @@ -82,7 +79,7 @@ public static Reference createTopReference(final byte [] splitRow) { * @param splitRow * @return A {@link Reference} that points at the bottom half of a an hfile */ - public static Reference createBottomReference(final byte [] splitRow) { + public static Reference createBottomReference(final byte[] splitRow) { return new Reference(splitRow, Range.bottom); } @@ -91,8 +88,8 @@ public static Reference createBottomReference(final byte [] splitRow) { * @param splitRow This is row we are splitting around. * @param fr */ - Reference(final byte [] splitRow, final Range fr) { - this.splitkey = splitRow == null? null: KeyValueUtil.createFirstOnRow(splitRow).getKey(); + Reference(final byte[] splitRow, final Range fr) { + this.splitkey = splitRow == null ? null : KeyValueUtil.createFirstOnRow(splitRow).getKey(); this.region = fr; } @@ -108,7 +105,6 @@ public Reference() { } /** - * * @return Range */ public Range getFileRegion() { @@ -118,7 +114,7 @@ public Range getFileRegion() { /** * @return splitKey */ - public byte [] getSplitKey() { + public byte[] getSplitKey() { return splitkey; } @@ -135,20 +131,19 @@ public static boolean isTopFileRegion(final Range r) { } /** - * @deprecated Writables are going away. Use the pb serialization methods instead. - * Remove in a release after 0.96 goes out. This is here only to migrate - * old Reference files written with Writables before 0.96. + * @deprecated Writables are going away. Use the pb serialization methods instead. Remove in a + * release after 0.96 goes out. This is here only to migrate old Reference files + * written with Writables before 0.96. */ @Deprecated public void readFields(DataInput in) throws IOException { boolean tmp = in.readBoolean(); // If true, set region to top. - this.region = tmp? Range.top: Range.bottom; + this.region = tmp ? Range.top : Range.bottom; this.splitkey = Bytes.readByteArray(in); } - public Path write(final FileSystem fs, final Path p) - throws IOException { + public Path write(final FileSystem fs, final Path p) throws IOException { FSDataOutputStream out = fs.create(p, false); try { out.write(toByteArray()); @@ -165,20 +160,19 @@ public Path write(final FileSystem fs, final Path p) * @return New Reference made from passed p * @throws IOException */ - public static Reference read(final FileSystem fs, final Path p) - throws IOException { + public static Reference read(final FileSystem fs, final Path p) throws IOException { InputStream in = fs.open(p); try { // I need to be able to move back in the stream if this is not a pb serialization so I can // do the Writable decoding instead. - in = in.markSupported()? in: new BufferedInputStream(in); + in = in.markSupported() ? in : new BufferedInputStream(in); int pblen = ProtobufUtil.lengthOfPBMagic(); in.mark(pblen); - byte [] pbuf = new byte[pblen]; - IOUtils.readFully(in, pbuf,0, pblen); + byte[] pbuf = new byte[pblen]; + IOUtils.readFully(in, pbuf, 0, pblen); // WATCHOUT! Return in middle of function!!! if (ProtobufUtil.isPBMagicPrefix(pbuf)) return convert(FSProtos.Reference.parseFrom(in)); - // Else presume Writables. Need to reset the stream since it didn't start w/ pb. + // Else presume Writables. Need to reset the stream since it didn't start w/ pb. // We won't bother rewriting thie Reference as a pb since Reference is transitory. in.reset(); Reference r = new Reference(); @@ -194,8 +188,8 @@ public static Reference read(final FileSystem fs, final Path p) public FSProtos.Reference convert() { FSProtos.Reference.Builder builder = FSProtos.Reference.newBuilder(); - builder.setRange(isTopFileRegion(getFileRegion())? - FSProtos.Reference.Range.TOP: FSProtos.Reference.Range.BOTTOM); + builder.setRange(isTopFileRegion(getFileRegion()) ? FSProtos.Reference.Range.TOP + : FSProtos.Reference.Range.BOTTOM); builder.setSplitkey(UnsafeByteOperations.unsafeWrap(getSplitKey())); return builder.build(); } @@ -203,17 +197,17 @@ public FSProtos.Reference convert() { public static Reference convert(final FSProtos.Reference r) { Reference result = new Reference(); result.splitkey = r.getSplitkey().toByteArray(); - result.region = r.getRange() == FSProtos.Reference.Range.TOP? Range.top: Range.bottom; + result.region = r.getRange() == FSProtos.Reference.Range.TOP ? Range.top : Range.bottom; return result; } /** - * Use this when writing to a stream and you want to use the pb mergeDelimitedFrom - * (w/o the delimiter, pb reads to EOF which may not be what you want). + * Use this when writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the + * delimiter, pb reads to EOF which may not be what you want). * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. * @throws IOException */ - byte [] toByteArray() throws IOException { + byte[] toByteArray() throws IOException { return ProtobufUtil.prependPBMagic(convert().toByteArray()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java index c495201a45fd..894d89673cf5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,25 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.yetus.audience.InterfaceAudience; /** - * WALLink describes a link to a WAL. - * - * An wal can be in /hbase/.logs/<server>/<wal> - * or it can be in /hbase/.oldlogs/<wal> - * - * The link checks first in the original path, - * if it is not present it fallbacks to the archived path. + * WALLink describes a link to a WAL. An wal can be in /hbase/.logs/<server>/<wal> or it + * can be in /hbase/.oldlogs/<wal> The link checks first in the original path, if it is not + * present it fallbacks to the archived path. */ @InterfaceAudience.Private public class WALLink extends FileLink { @@ -43,8 +37,8 @@ public class WALLink extends FileLink { * @param logName WAL file name * @throws IOException on unexpected error. */ - public WALLink(final Configuration conf, - final String serverName, final String logName) throws IOException { + public WALLink(final Configuration conf, final String serverName, final String logName) + throws IOException { this(CommonFSUtils.getWALRootDir(conf), serverName, logName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java index 017c4d14b6e1..92b5e9fa67dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WritableWithSize.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.yetus.audience.InterfaceAudience; @@ -27,11 +25,8 @@ @InterfaceAudience.Private public interface WritableWithSize { /** - * Provide a size hint to the caller. write() should ideally - * not go beyond this if at all possible. - * - * You can return 0 if there is no size hint. - * + * Provide a size hint to the caller. write() should ideally not go beyond this if at all + * possible. You can return 0 if there is no size hint. * @return the size of the writable */ long getWritableSize(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java index cd8932269c5a..187b0536a30b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Snapshot of block cache age in cache. - * This object is preferred because we can control how it is serialized out when JSON'ing. + * Snapshot of block cache age in cache. This object is preferred because we can control how it is + * serialized out when JSON'ing. */ @InterfaceAudience.Private public class AgeSnapshot { @@ -32,7 +32,7 @@ public class AgeSnapshot { AgeSnapshot(final FastLongHistogram ageHistogram) { this.ageHistogram = ageHistogram; - this.quantiles = ageHistogram.getQuantiles(new double[]{0.75, 0.95, 0.98, 0.99, 0.999}); + this.quantiles = ageHistogram.getQuantiles(new double[] { 0.75, 0.95, 0.98, 0.99, 0.999 }); } public double get75thPercentile() { @@ -55,7 +55,6 @@ public double get999thPercentile() { return quantiles[4]; } - public double getMean() { return this.ageHistogram.getMean(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 6f32d623c5ea..a43a4b07268b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +18,11 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; - import org.apache.yetus.audience.InterfaceAudience; /** - * Block cache interface. Anything that implements the {@link Cacheable} - * interface can be put in the cache. + * Block cache interface. Anything that implements the {@link Cacheable} interface can be put in the + * cache. */ @InterfaceAudience.Private public interface BlockCache extends Iterable { @@ -47,20 +45,20 @@ public interface BlockCache extends Iterable { * Fetch block from cache. * @param cacheKey Block to fetch. * @param caching Whether this request has caching enabled (used for stats) - * @param repeat Whether this is a repeat lookup for the same block - * (used to avoid double counting cache misses when doing double-check locking) + * @param repeat Whether this is a repeat lookup for the same block (used to avoid double counting + * cache misses when doing double-check locking) * @param updateCacheMetrics Whether to update cache metrics or not * @return Block or null if block is not in 2 cache. */ Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, - boolean updateCacheMetrics); + boolean updateCacheMetrics); /** * Fetch block from cache. * @param cacheKey Block to fetch. * @param caching Whether this request has caching enabled (used for stats) - * @param repeat Whether this is a repeat lookup for the same block - * (used to avoid double counting cache misses when doing double-check locking) + * @param repeat Whether this is a repeat lookup for the same block (used to avoid double counting + * cache misses when doing double-check locking) * @param updateCacheMetrics Whether to update cache metrics or not * @param blockType BlockType * @return Block or null if block is not in 2 cache. @@ -79,7 +77,6 @@ default Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repe /** * Evicts all blocks for the given HFile. - * * @return the number of blocks evicted */ int evictBlocksByHfileName(String hfileName); @@ -131,11 +128,11 @@ default Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repe */ long getBlockCount(); - /** - * Returns the number of data blocks currently cached in the block cache. - * @return number of blocks in the cache - */ - long getDataBlockCount(); + /** + * Returns the number of data blocks currently cached in the block cache. + * @return number of blocks in the cache + */ + long getDataBlockCount(); /** * @return Iterator over the blocks in the cache. @@ -146,7 +143,7 @@ default Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repe /** * @return The list of sub blockcaches that make up this one; returns null if no sub caches. */ - BlockCache [] getBlockCaches(); + BlockCache[] getBlockCaches(); /** * Check if block type is meta or index block diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java index 12c769ec805a..e44a46d311b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.concurrent.ForkJoinPool; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; @@ -50,13 +49,13 @@ public final class BlockCacheFactory { /** * If the chosen ioengine can persist its state across restarts, the path to the file to persist - * to. This file is NOT the data file. It is a file into which we will serialize the map of - * what is in the data file. For example, if you pass the following argument as + * to. This file is NOT the data file. It is a file into which we will serialize the map of what + * is in the data file. For example, if you pass the following argument as * BUCKET_CACHE_IOENGINE_KEY ("hbase.bucketcache.ioengine"), * file:/tmp/bucketcache.data , then we will write the bucketcache data to the file * /tmp/bucketcache.data but the metadata on where the data is in the supplied file - * is an in-memory map that needs to be persisted across restarts. Where to store this - * in-memory state is what you supply here: e.g. /tmp/bucketcache.map. + * is an in-memory map that needs to be persisted across restarts. Where to store this in-memory + * state is what you supply here: e.g. /tmp/bucketcache.map. */ public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path"; @@ -103,16 +102,15 @@ public static BlockCache createBlockCache(Configuration conf) { boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); if (useExternal) { BlockCache l2CacheInstance = createExternalBlockcache(conf); - return l2CacheInstance == null ? - l1Cache : - new InclusiveCombinedBlockCache(l1Cache, l2CacheInstance); + return l2CacheInstance == null ? l1Cache + : new InclusiveCombinedBlockCache(l1Cache, l2CacheInstance); } else { // otherwise use the bucket cache. BucketCache bucketCache = createBucketCache(conf); if (!conf.getBoolean("hbase.bucketcache.combinedcache.enabled", true)) { // Non combined mode is off from 2.0 LOG.warn( - "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); + "From HBase 2.0 onwards only combined mode of LRU cache and bucket cache is available"); } return bucketCache == null ? l1Cache : new CombinedBlockCache(l1Cache, bucketCache); } @@ -125,8 +123,8 @@ private static FirstLevelBlockCache createFirstLevelCache(final Configuration c) } String policy = c.get(BLOCKCACHE_POLICY_KEY, BLOCKCACHE_POLICY_DEFAULT); int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); - LOG.info("Allocating BlockCache size=" + - StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); + LOG.info("Allocating BlockCache size=" + StringUtils.byteDesc(cacheSize) + ", blockSize=" + + StringUtils.byteDesc(blockSize)); if (policy.equalsIgnoreCase("LRU")) { return new LruBlockCache(cacheSize, blockSize, true, c); } else if (policy.equalsIgnoreCase("IndexOnlyLRU")) { @@ -141,13 +139,14 @@ private static FirstLevelBlockCache createFirstLevelCache(final Configuration c) } /** - * Enum of all built in external block caches. - * This is used for config. + * Enum of all built in external block caches. This is used for config. */ private static enum ExternalBlockCaches { memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache"); + // TODO(eclark): Consider more. Redis, etc. Class clazz; + ExternalBlockCaches(String clazzName) { try { clazz = (Class) Class.forName(clazzName); @@ -155,6 +154,7 @@ private static enum ExternalBlockCaches { clazz = null; } } + ExternalBlockCaches(Class clazz) { this.clazz = clazz; } @@ -168,12 +168,11 @@ private static BlockCache createExternalBlockcache(Configuration c) { // Get the class, from the config. s try { - klass = ExternalBlockCaches - .valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; + klass = ExternalBlockCaches.valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; } catch (IllegalArgumentException exception) { try { - klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, Class.forName( - "org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache")); + klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, + Class.forName("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache")); } catch (ClassNotFoundException e) { return null; } @@ -191,7 +190,7 @@ private static BlockCache createExternalBlockcache(Configuration c) { } private static BucketCache createBucketCache(Configuration c) { - // Check for L2. ioengine name must be non-null. + // Check for L2. ioengine name must be non-null. String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null); if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) { return null; @@ -200,20 +199,19 @@ private static BucketCache createBucketCache(Configuration c) { int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); final long bucketCacheSize = MemorySizeUtil.getBucketCacheSize(c); if (bucketCacheSize <= 0) { - throw new IllegalStateException("bucketCacheSize <= 0; Check " + - BUCKET_CACHE_SIZE_KEY + " setting and/or server java heap size"); + throw new IllegalStateException("bucketCacheSize <= 0; Check " + BUCKET_CACHE_SIZE_KEY + + " setting and/or server java heap size"); } if (c.get("hbase.bucketcache.percentage.in.combinedcache") != null) { LOG.warn("Configuration 'hbase.bucketcache.percentage.in.combinedcache' is no longer " + "respected. See comments in http://hbase.apache.org/book.html#_changes_of_note"); } - int writerThreads = c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, - DEFAULT_BUCKET_CACHE_WRITER_THREADS); - int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, - DEFAULT_BUCKET_CACHE_WRITER_QUEUE); + int writerThreads = + c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, DEFAULT_BUCKET_CACHE_WRITER_THREADS); + int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, DEFAULT_BUCKET_CACHE_WRITER_QUEUE); String persistentPath = c.get(BUCKET_CACHE_PERSISTENT_PATH_KEY); String[] configuredBucketSizes = c.getStrings(BUCKET_CACHE_BUCKETS_KEY); - int [] bucketSizes = null; + int[] bucketSizes = null; if (configuredBucketSizes != null) { bucketSizes = new int[configuredBucketSizes.length]; for (int i = 0; i < configuredBucketSizes.length; i++) { @@ -232,15 +230,16 @@ private static BucketCache createBucketCache(Configuration c) { } BucketCache bucketCache = null; try { - int ioErrorsTolerationDuration = c.getInt( - "hbase.bucketcache.ioengine.errors.tolerated.duration", - BucketCache.DEFAULT_ERROR_TOLERATION_DURATION); + int ioErrorsTolerationDuration = + c.getInt("hbase.bucketcache.ioengine.errors.tolerated.duration", + BucketCache.DEFAULT_ERROR_TOLERATION_DURATION); // Bucket cache logs its stats on creation internal to the constructor. - bucketCache = new BucketCache(bucketCacheIOEngineName, - bucketCacheSize, blockSize, bucketSizes, writerThreads, writerQueueLen, persistentPath, - ioErrorsTolerationDuration, c); + bucketCache = + new BucketCache(bucketCacheIOEngineName, bucketCacheSize, blockSize, bucketSizes, + writerThreads, writerQueueLen, persistentPath, ioErrorsTolerationDuration, c); } catch (IOException ioex) { - LOG.error("Can't instantiate bucket cache", ioex); throw new RuntimeException(ioex); + LOG.error("Can't instantiate bucket cache", ioex); + throw new RuntimeException(ioex); } return bucketCache; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java index 4683c3520c1b..d9f905aea3b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.yetus.audience.InterfaceAudience; /** * Cache Key for use with implementations of {@link BlockCache} @@ -59,8 +59,7 @@ public boolean equals(Object o) { if (o instanceof BlockCacheKey) { BlockCacheKey k = (BlockCacheKey) o; return offset == k.offset - && (hfileName == null ? k.hfileName == null : hfileName - .equals(k.hfileName)); + && (hfileName == null ? k.hfileName == null : hfileName.equals(k.hfileName)); } else { return false; } @@ -74,13 +73,12 @@ public String toString() { public static final long FIXED_OVERHEAD = ClassSize.estimateBase(BlockCacheKey.class, false); /** - * Strings have two bytes per character due to default Java Unicode encoding - * (hence length times 2). + * Strings have two bytes per character due to default Java Unicode encoding (hence length times + * 2). */ @Override public long heapSize() { - return ClassSize.align(FIXED_OVERHEAD + ClassSize.STRING + - 2 * hfileName.length()); + return ClassSize.align(FIXED_OVERHEAD + ClassSize.STRING + 2 * hfileName.length()); } // can't avoid this unfortunately diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java index c2cf82148bee..b0ced8610d97 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,28 +51,28 @@ public class BlockCacheUtil { * Needed generating JSON. */ private static final Gson GSON = GsonUtil.createGson() - .registerTypeAdapter(FastLongHistogram.class, new TypeAdapter() { - - @Override - public void write(JsonWriter out, FastLongHistogram value) throws IOException { - AgeSnapshot snapshot = new AgeSnapshot(value); - out.beginObject(); - out.name("mean").value(snapshot.getMean()); - out.name("min").value(snapshot.getMin()); - out.name("max").value(snapshot.getMax()); - out.name("75thPercentile").value(snapshot.get75thPercentile()); - out.name("95thPercentile").value(snapshot.get95thPercentile()); - out.name("98thPercentile").value(snapshot.get98thPercentile()); - out.name("99thPercentile").value(snapshot.get99thPercentile()); - out.name("999thPercentile").value(snapshot.get999thPercentile()); - out.endObject(); - } - - @Override - public FastLongHistogram read(JsonReader in) throws IOException { - throw new UnsupportedOperationException(); - } - }).setPrettyPrinting().create(); + .registerTypeAdapter(FastLongHistogram.class, new TypeAdapter() { + + @Override + public void write(JsonWriter out, FastLongHistogram value) throws IOException { + AgeSnapshot snapshot = new AgeSnapshot(value); + out.beginObject(); + out.name("mean").value(snapshot.getMean()); + out.name("min").value(snapshot.getMin()); + out.name("max").value(snapshot.getMax()); + out.name("75thPercentile").value(snapshot.get75thPercentile()); + out.name("95thPercentile").value(snapshot.get95thPercentile()); + out.name("98thPercentile").value(snapshot.get98thPercentile()); + out.name("99thPercentile").value(snapshot.get99thPercentile()); + out.name("999thPercentile").value(snapshot.get999thPercentile()); + out.endObject(); + } + + @Override + public FastLongHistogram read(JsonReader in) throws IOException { + throw new UnsupportedOperationException(); + } + }).setPrettyPrinting().create(); /** * @param cb @@ -83,8 +83,7 @@ public static String toString(final CachedBlock cb, final long now) { } /** - * Little data structure to hold counts for a file. - * Used doing a toJSON. + * Little data structure to hold counts for a file. Used doing a toJSON. */ static class CachedBlockCountsPerFile { private int count = 0; @@ -155,11 +154,9 @@ public static String toJSON(BlockCache bc) throws IOException { * @return The block content of bc as a String minus the filename. */ public static String toStringMinusFileName(final CachedBlock cb, final long now) { - return "offset=" + cb.getOffset() + - ", size=" + cb.getSize() + - ", age=" + (now - cb.getCachedTime()) + - ", type=" + cb.getBlockType() + - ", priority=" + cb.getBlockPriority(); + return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age=" + + (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority=" + + cb.getBlockPriority(); } /** @@ -172,38 +169,37 @@ public static String toStringMinusFileName(final CachedBlock cb, final long now) public static CachedBlocksByFile getLoadedCachedBlocksByFile(final Configuration conf, final BlockCache bc) { CachedBlocksByFile cbsbf = new CachedBlocksByFile(conf); - for (CachedBlock cb: bc) { + for (CachedBlock cb : bc) { if (cbsbf.update(cb)) break; } return cbsbf; } private static int compareCacheBlock(Cacheable left, Cacheable right, - boolean includeNextBlockMetadata) { + boolean includeNextBlockMetadata) { ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength()); left.serialize(l, includeNextBlockMetadata); ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength()); right.serialize(r, includeNextBlockMetadata); - return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), - r.array(), r.arrayOffset(), r.limit()); + return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(), r.array(), r.arrayOffset(), + r.limit()); } /** * Validate that the existing and newBlock are the same without including the nextBlockMetadata, - * if not, throw an exception. If they are the same without the nextBlockMetadata, - * return the comparison. - * + * if not, throw an exception. If they are the same without the nextBlockMetadata, return the + * comparison. * @param existing block that is existing in the cache. * @param newBlock block that is trying to be cached. * @param cacheKey the cache key of the blocks. * @return comparison of the existing block to the newBlock. */ public static int validateBlockAddition(Cacheable existing, Cacheable newBlock, - BlockCacheKey cacheKey) { + BlockCacheKey cacheKey) { int comparison = compareCacheBlock(existing, newBlock, false); if (comparison != 0) { - throw new RuntimeException("Cached block contents differ, which should not have happened." - + "cacheKey:" + cacheKey); + throw new RuntimeException( + "Cached block contents differ, which should not have happened." + "cacheKey:" + cacheKey); } if ((existing instanceof HFileBlock) && (newBlock instanceof HFileBlock)) { comparison = ((HFileBlock) existing).getNextBlockOnDiskSize() @@ -256,9 +252,9 @@ public static boolean shouldReplaceExistingCacheBlock(BlockCache blockCache, } /** - * Use one of these to keep a running account of cached blocks by file. Throw it away when done. - * This is different than metrics in that it is stats on current state of a cache. - * See getLoadedCachedBlocksByFile + * Use one of these to keep a running account of cached blocks by file. Throw it away when done. + * This is different than metrics in that it is stats on current state of a cache. See + * getLoadedCachedBlocksByFile */ public static class CachedBlocksByFile { private int count; @@ -267,11 +263,9 @@ public static class CachedBlocksByFile { private long dataSize; private final long now = System.nanoTime(); /** - * How many blocks to look at before we give up. - * There could be many millions of blocks. We don't want the - * ui to freeze while we run through 1B blocks... users will - * think hbase dead. UI displays warning in red when stats - * are incomplete. + * How many blocks to look at before we give up. There could be many millions of blocks. We + * don't want the ui to freeze while we run through 1B blocks... users will think hbase dead. UI + * displays warning in red when stats are incomplete. */ private final int max; public static final int DEFAULT_MAX = 1000000; @@ -281,14 +275,14 @@ public static class CachedBlocksByFile { } CachedBlocksByFile(final Configuration c) { - this.max = c == null? DEFAULT_MAX: c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX); + this.max = c == null ? DEFAULT_MAX : c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX); } /** * Map by filename. use concurent utils because we want our Map and contained blocks sorted. */ private transient NavigableMap> cachedBlockByFile = - new ConcurrentSkipListMap<>(); + new ConcurrentSkipListMap<>(); FastLongHistogram hist = new FastLongHistogram(); /** @@ -310,15 +304,15 @@ public boolean update(final CachedBlock cb) { this.dataBlockCount++; this.dataSize += cb.getSize(); } - long age = (this.now - cb.getCachedTime())/NANOS_PER_SECOND; + long age = (this.now - cb.getCachedTime()) / NANOS_PER_SECOND; this.hist.add(age, 1); return false; } /** - * @return True if full; i.e. there are more items in the cache but we only loaded up - * the maximum set in configuration hbase.ui.blockcache.by.file.max - * (Default: DEFAULT_MAX). + * @return True if full; i.e. there are more items in the cache but we only loaded up the + * maximum set in configuration hbase.ui.blockcache.by.file.max (Default: + * DEFAULT_MAX). */ public boolean isFull() { return this.count >= this.max; @@ -360,16 +354,13 @@ public AgeSnapshot getAgeInCacheSnapshot() { @Override public String toString() { AgeSnapshot snapshot = getAgeInCacheSnapshot(); - return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size + - ", dataSize=" + getDataSize() + - ", mean age=" + snapshot.getMean() + - ", min age=" + snapshot.getMin() + - ", max age=" + snapshot.getMax() + - ", 75th percentile age=" + snapshot.get75thPercentile() + - ", 95th percentile age=" + snapshot.get95thPercentile() + - ", 98th percentile age=" + snapshot.get98thPercentile() + - ", 99th percentile age=" + snapshot.get99thPercentile() + - ", 99.9th percentile age=" + snapshot.get99thPercentile(); + return "count=" + count + ", dataBlockCount=" + dataBlockCount + ", size=" + size + + ", dataSize=" + getDataSize() + ", mean age=" + snapshot.getMean() + ", min age=" + + snapshot.getMin() + ", max age=" + snapshot.getMax() + ", 75th percentile age=" + + snapshot.get75thPercentile() + ", 95th percentile age=" + snapshot.get95thPercentile() + + ", 98th percentile age=" + snapshot.get98thPercentile() + ", 99th percentile age=" + + snapshot.get99thPercentile() + ", 99.9th percentile age=" + + snapshot.get99thPercentile(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java index 2d90a85d9fc7..43498b85f205 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -29,10 +26,10 @@ @InterfaceAudience.Private class BlockCachesIterator implements Iterator { int index = 0; - final BlockCache [] bcs; + final BlockCache[] bcs; Iterator current; - BlockCachesIterator(final BlockCache [] blockCaches) { + BlockCachesIterator(final BlockCache[] blockCaches) { this.bcs = blockCaches; this.current = this.bcs[this.index].iterator(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java index b8f83578d2f7..c340254e07c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,8 +28,8 @@ public class BlockWithScanInfo { private final HFileBlock hFileBlock; /** - * The first key in the next block following this one in the HFile. - * If this key is unknown, this is reference-equal with HConstants.NO_NEXT_INDEXED_KEY + * The first key in the next block following this one in the HFile. If this key is unknown, this + * is reference-equal with HConstants.NO_NEXT_INDEXED_KEY */ private final Cell nextIndexedKey; @@ -42,7 +42,7 @@ public HFileBlock getHFileBlock() { return hFileBlock; } - public Cell getNextIndexedKey() { + public Cell getNextIndexedKey() { return nextIndexedKey; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index dcbb71582f44..1d5fbcfacecc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.util.Optional; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.io.ByteBuffAllocator; @@ -46,14 +45,13 @@ public class CacheConfig { public static final String CACHE_DATA_ON_READ_KEY = "hbase.block.data.cacheonread"; /** - * Configuration key to cache data blocks on write. There are separate - * switches for bloom blocks and non-root index blocks. + * Configuration key to cache data blocks on write. There are separate switches for bloom blocks + * and non-root index blocks. */ public static final String CACHE_BLOCKS_ON_WRITE_KEY = "hbase.rs.cacheblocksonwrite"; /** - * Configuration key to cache leaf and intermediate-level index blocks on - * write. + * Configuration key to cache leaf and intermediate-level index blocks on write. */ public static final String CACHE_INDEX_BLOCKS_ON_WRITE_KEY = "hfile.block.index.cacheonwrite"; @@ -68,14 +66,14 @@ public class CacheConfig { public static final String CACHE_DATA_BLOCKS_COMPRESSED_KEY = "hbase.block.data.cachecompressed"; /** - * Configuration key to evict all blocks of a given file from the block cache - * when the file is closed. + * Configuration key to evict all blocks of a given file from the block cache when the file is + * closed. */ public static final String EVICT_BLOCKS_ON_CLOSE_KEY = "hbase.rs.evictblocksonclose"; /** - * Configuration key to prefetch all blocks of a given file into the block cache - * when the file is opened. + * Configuration key to prefetch all blocks of a given file into the block cache when the file is + * opened. */ public static final String PREFETCH_BLOCKS_ON_OPEN_KEY = "hbase.rs.prefetchblocksonopen"; @@ -109,10 +107,9 @@ public class CacheConfig { public static final long DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD = Long.MAX_VALUE; /** - * Whether blocks should be cached on read (default is on if there is a - * cache but this can be turned off on a per-family or per-request basis). - * If off we will STILL cache meta blocks; i.e. INDEX and BLOOM types. - * This cannot be disabled. + * Whether blocks should be cached on read (default is on if there is a cache but this can be + * turned off on a per-family or per-request basis). If off we will STILL cache meta blocks; i.e. + * INDEX and BLOOM types. This cannot be disabled. */ private final boolean cacheDataOnRead; @@ -155,8 +152,8 @@ public class CacheConfig { private final ByteBuffAllocator byteBuffAllocator; /** - * Create a cache configuration using the specified configuration object and - * defaults for family level settings. Only use if no column family context. + * Create a cache configuration using the specified configuration object and defaults for family + * level settings. Only use if no column family context. * @param conf hbase configuration */ public CacheConfig(Configuration conf) { @@ -168,15 +165,14 @@ public CacheConfig(Configuration conf, BlockCache blockCache) { } /** - * Create a cache configuration using the specified configuration object and - * family descriptor. + * Create a cache configuration using the specified configuration object and family descriptor. * @param conf hbase configuration * @param family column family configuration */ public CacheConfig(Configuration conf, ColumnFamilyDescriptor family, BlockCache blockCache, ByteBuffAllocator byteBuffAllocator) { - this.cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ) && - (family == null ? true : family.isBlockCacheEnabled()); + this.cacheDataOnRead = conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ) + && (family == null ? true : family.isBlockCacheEnabled()); this.inMemory = family == null ? DEFAULT_IN_MEMORY : family.isInMemory(); this.cacheDataCompressed = conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED); @@ -184,19 +180,18 @@ public CacheConfig(Configuration conf, ColumnFamilyDescriptor family, BlockCache conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT); // For the following flags we enable them regardless of per-schema settings // if they are enabled in the global configuration. - this.cacheDataOnWrite = - conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE) || - (family == null ? false : family.isCacheDataOnWrite()); + this.cacheDataOnWrite = conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE) + || (family == null ? false : family.isCacheDataOnWrite()); this.cacheIndexesOnWrite = - conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE) || - (family == null ? false : family.isCacheIndexesOnWrite()); + conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE) + || (family == null ? false : family.isCacheIndexesOnWrite()); this.cacheBloomsOnWrite = - conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE) || - (family == null ? false : family.isCacheBloomsOnWrite()); - this.evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE) || - (family == null ? false : family.isEvictBlocksOnClose()); - this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) || - (family == null ? false : family.isPrefetchBlocksOnOpen()); + conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE) + || (family == null ? false : family.isCacheBloomsOnWrite()); + this.evictOnClose = conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE) + || (family == null ? false : family.isEvictBlocksOnClose()); + this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) + || (family == null ? false : family.isPrefetchBlocksOnOpen()); this.cacheCompactedDataOnWrite = conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE); this.cacheCompactedDataOnWriteThreshold = getCacheCompactedBlocksOnWriteThreshold(conf); @@ -240,8 +235,8 @@ private CacheConfig() { } /** - * Returns whether the DATA blocks of this HFile should be cached on read or not (we always - * cache the meta blocks, the INDEX and BLOOM blocks). + * Returns whether the DATA blocks of this HFile should be cached on read or not (we always cache + * the meta blocks, the INDEX and BLOOM blocks). * @return true if blocks should be cached on read, false if not */ public boolean shouldCacheDataOnRead() { @@ -253,13 +248,13 @@ public boolean shouldDropBehindCompaction() { } /** - * Should we cache a block of a particular category? We always cache - * important blocks such as index blocks, as long as the block cache is - * available. + * Should we cache a block of a particular category? We always cache important blocks such as + * index blocks, as long as the block cache is available. */ public boolean shouldCacheBlockOnRead(BlockCategory category) { - return cacheDataOnRead || category == BlockCategory.INDEX || category == BlockCategory.BLOOM || - (prefetchOnOpen && (category != BlockCategory.META && category != BlockCategory.UNKNOWN)); + return cacheDataOnRead || category == BlockCategory.INDEX || category == BlockCategory.BLOOM + || (prefetchOnOpen + && (category != BlockCategory.META && category != BlockCategory.UNKNOWN)); } /** @@ -270,26 +265,23 @@ public boolean isInMemory() { } /** - * @return true if data blocks should be written to the cache when an HFile is - * written, false if not + * @return true if data blocks should be written to the cache when an HFile is written, false if + * not */ public boolean shouldCacheDataOnWrite() { return this.cacheDataOnWrite; } /** - * @param cacheDataOnWrite whether data blocks should be written to the cache - * when an HFile is written + * @param cacheDataOnWrite whether data blocks should be written to the cache when an HFile is + * written */ public void setCacheDataOnWrite(boolean cacheDataOnWrite) { this.cacheDataOnWrite = cacheDataOnWrite; } /** - * Enable cache on write including: - * cacheDataOnWrite - * cacheIndexesOnWrite - * cacheBloomsOnWrite + * Enable cache on write including: cacheDataOnWrite cacheIndexesOnWrite cacheBloomsOnWrite */ public void enableCacheOnWrite() { this.cacheDataOnWrite = true; @@ -298,24 +290,24 @@ public void enableCacheOnWrite() { } /** - * @return true if index blocks should be written to the cache when an HFile - * is written, false if not + * @return true if index blocks should be written to the cache when an HFile is written, false if + * not */ public boolean shouldCacheIndexesOnWrite() { return this.cacheIndexesOnWrite; } /** - * @return true if bloom blocks should be written to the cache when an HFile - * is written, false if not + * @return true if bloom blocks should be written to the cache when an HFile is written, false if + * not */ public boolean shouldCacheBloomsOnWrite() { return this.cacheBloomsOnWrite; } /** - * @return true if blocks should be evicted from the cache when an HFile - * reader is closed, false if not + * @return true if blocks should be evicted from the cache when an HFile reader is closed, false + * if not */ public boolean shouldEvictOnClose() { return this.evictOnClose; @@ -323,8 +315,8 @@ public boolean shouldEvictOnClose() { /** * Only used for testing. - * @param evictOnClose whether blocks should be evicted from the cache when an - * HFile reader is closed + * @param evictOnClose whether blocks should be evicted from the cache when an HFile reader is + * closed */ public void setEvictOnClose(boolean evictOnClose) { this.evictOnClose = evictOnClose; @@ -369,6 +361,7 @@ public boolean shouldCacheCompactedBlocksOnWrite() { public long getCacheCompactedBlocksOnWriteThreshold() { return this.cacheCompactedDataOnWriteThreshold; } + /** * Return true if we may find this type of block in block cache. *

        @@ -390,16 +383,16 @@ public boolean shouldReadBlockFromCache(BlockType blockType) { if (blockType == null) { return true; } - if (blockType.getCategory() == BlockCategory.BLOOM || - blockType.getCategory() == BlockCategory.INDEX) { + if (blockType.getCategory() == BlockCategory.BLOOM + || blockType.getCategory() == BlockCategory.INDEX) { return true; } return false; } /** - * If we make sure the block could not be cached, we will not acquire the lock - * otherwise we will acquire lock + * If we make sure the block could not be cached, we will not acquire the lock otherwise we will + * acquire lock */ public boolean shouldLockOnCacheMiss(BlockType blockType) { if (blockType == null) { @@ -410,7 +403,6 @@ public boolean shouldLockOnCacheMiss(BlockType blockType) { /** * Returns the block cache. - * * @return the block cache, or null if caching is completely disabled */ public Optional getBlockCache() { @@ -426,9 +418,9 @@ public ByteBuffAllocator getByteBuffAllocator() { } private long getCacheCompactedBlocksOnWriteThreshold(Configuration conf) { - long cacheCompactedBlocksOnWriteThreshold = conf - .getLong(CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, - DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD); + long cacheCompactedBlocksOnWriteThreshold = + conf.getLong(CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, + DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD); if (cacheCompactedBlocksOnWriteThreshold < 0) { LOG.warn( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java index 7c5b56364098..a90b61738f12 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,15 +23,15 @@ import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram; import org.apache.yetus.audience.InterfaceAudience; - /** * Class that implements cache metrics. */ @InterfaceAudience.Private public class CacheStats { - /** Sliding window statistics. The number of metric periods to include in - * sliding window hit ratio calculations. + /** + * Sliding window statistics. The number of metric periods to include in sliding window hit ratio + * calculations. */ static final int DEFAULT_WINDOW_PERIODS = 5; @@ -43,10 +42,9 @@ public class CacheStats { private final LongAdder primaryHitCount = new LongAdder(); /** - * The number of getBlock requests that were cache hits, but only from - * requests that were set to use the block cache. This is because all reads - * attempt to read from the block cache even if they will not put new blocks - * into the block cache. See HBASE-2253 for more information. + * The number of getBlock requests that were cache hits, but only from requests that were set to + * use the block cache. This is because all reads attempt to read from the block cache even if + * they will not put new blocks into the block cache. See HBASE-2253 for more information. */ private final LongAdder hitCachingCount = new LongAdder(); @@ -56,8 +54,8 @@ public class CacheStats { /** The number of getBlock requests for primary replica that were cache misses */ private final LongAdder primaryMissCount = new LongAdder(); /** - * The number of getBlock requests that were cache misses, but only from - * requests that were set to use the block cache. + * The number of getBlock requests that were cache misses, but only from requests that were set to + * use the block cache. */ private final LongAdder missCachingCount = new LongAdder(); @@ -129,25 +127,22 @@ public CacheStats(final String name) { public CacheStats(final String name, int numPeriodsInWindow) { this.numPeriodsInWindow = numPeriodsInWindow; this.hitCounts = new long[numPeriodsInWindow]; - this.hitCachingCounts = new long[numPeriodsInWindow]; - this.requestCounts = new long[numPeriodsInWindow]; - this.requestCachingCounts = new long[numPeriodsInWindow]; + this.hitCachingCounts = new long[numPeriodsInWindow]; + this.requestCounts = new long[numPeriodsInWindow]; + this.requestCachingCounts = new long[numPeriodsInWindow]; this.ageAtEviction = new FastLongHistogram(); } @Override public String toString() { AgeSnapshot snapshot = getAgeAtEvictionSnapshot(); - return "hitCount=" + getHitCount() + ", hitCachingCount=" + getHitCachingCount() + - ", missCount=" + getMissCount() + ", missCachingCount=" + getMissCachingCount() + - ", evictionCount=" + getEvictionCount() + - ", evictedBlockCount=" + getEvictedCount() + - ", primaryMissCount=" + getPrimaryMissCount() + - ", primaryHitCount=" + getPrimaryHitCount() + - ", evictedAgeMean=" + snapshot.getMean(); + return "hitCount=" + getHitCount() + ", hitCachingCount=" + getHitCachingCount() + + ", missCount=" + getMissCount() + ", missCachingCount=" + getMissCachingCount() + + ", evictionCount=" + getEvictionCount() + ", evictedBlockCount=" + getEvictedCount() + + ", primaryMissCount=" + getPrimaryMissCount() + ", primaryHitCount=" + + getPrimaryHitCount() + ", evictedAgeMean=" + snapshot.getMean(); } - public void miss(boolean caching, boolean primary, BlockType type) { missCount.increment(); if (primary) primaryMissCount.increment(); @@ -199,7 +194,6 @@ public void hit(boolean caching, boolean primary, BlockType type) { if (primary) primaryHitCount.increment(); if (caching) hitCachingCount.increment(); - if (type == null) { return; } @@ -260,7 +254,6 @@ public long failInsert() { return failedInserts.incrementAndGet(); } - // All of the counts of misses and hits. public long getDataMissCount() { return dataMissCount.sum(); @@ -443,13 +436,11 @@ public long getFailedInserts() { public void rollMetricsPeriod() { hitCounts[windowIndex] = getHitCount() - lastHitCount; lastHitCount = getHitCount(); - hitCachingCounts[windowIndex] = - getHitCachingCount() - lastHitCachingCount; + hitCachingCounts[windowIndex] = getHitCachingCount() - lastHitCachingCount; lastHitCachingCount = getHitCachingCount(); requestCounts[windowIndex] = getRequestCount() - lastRequestCount; lastRequestCount = getRequestCount(); - requestCachingCounts[windowIndex] = - getRequestCachingCount() - lastRequestCachingCount; + requestCachingCounts[windowIndex] = getRequestCachingCount() - lastRequestCachingCount; lastRequestCachingCount = getRequestCachingCount(); windowIndex = (windowIndex + 1) % numPeriodsInWindow; } @@ -471,14 +462,14 @@ public long getSumRequestCachingCountsPastNPeriods() { } public double getHitRatioPastNPeriods() { - double ratio = ((double)getSumHitCountsPastNPeriods() / - (double)getSumRequestCountsPastNPeriods()); + double ratio = + ((double) getSumHitCountsPastNPeriods() / (double) getSumRequestCountsPastNPeriods()); return Double.isNaN(ratio) ? 0 : ratio; } public double getHitCachingRatioPastNPeriods() { - double ratio = ((double)getSumHitCachingCountsPastNPeriods() / - (double)getSumRequestCachingCountsPastNPeriods()); + double ratio = ((double) getSumHitCachingCountsPastNPeriods() + / (double) getSumRequestCachingCountsPastNPeriods()); return Double.isNaN(ratio) ? 0 : ratio; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java index 96c8e8275630..450792a256de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,30 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.nio.HBaseReferenceCounted; import org.apache.yetus.audience.InterfaceAudience; /** - * Cacheable is an interface that allows for an object to be cached. If using an - * on heap cache, just use heapsize. If using an off heap cache, Cacheable - * provides methods for serialization of the object. - * - * Some objects cannot be moved off heap, those objects will return a - * getSerializedLength() of 0. - * + * Cacheable is an interface that allows for an object to be cached. If using an on heap cache, just + * use heapsize. If using an off heap cache, Cacheable provides methods for serialization of the + * object. Some objects cannot be moved off heap, those objects will return a getSerializedLength() + * of 0. */ @InterfaceAudience.Private public interface Cacheable extends HeapSize, HBaseReferenceCounted { /** - * Returns the length of the ByteBuffer required to serialized the object. If the - * object cannot be serialized, it should return 0. - * + * Returns the length of the ByteBuffer required to serialized the object. If the object cannot be + * serialized, it should return 0. * @return int length in bytes of the serialized form or 0 if the object cannot be cached. */ int getSerializedLength(); @@ -53,7 +46,6 @@ public interface Cacheable extends HeapSize, HBaseReferenceCounted { /** * Returns CacheableDeserializer instance which reconstructs original object from ByteBuffer. - * * @return CacheableDeserialzer instance. */ CacheableDeserializer getDeserializer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java index e12173daba9e..3c1084fca680 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; - import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java index 3f14f4ffeb2a..0d2a0147bc13 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java @@ -1,20 +1,19 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; @@ -22,15 +21,13 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; - import org.apache.yetus.audience.InterfaceAudience; /** - * This class is used to manage the identifiers for {@link CacheableDeserializer}. - * All deserializers are registered with this Manager via the - * {@link #registerDeserializer(CacheableDeserializer)}}. On registration, we return an - * int *identifier* for this deserializer. The int identifier is passed to - * {@link #getDeserializer(int)}} to obtain the registered deserializer instance. + * This class is used to manage the identifiers for {@link CacheableDeserializer}. All deserializers + * are registered with this Manager via the {@link #registerDeserializer(CacheableDeserializer)}}. + * On registration, we return an int *identifier* for this deserializer. The int identifier is + * passed to {@link #getDeserializer(int)}} to obtain the registered deserializer instance. */ @InterfaceAudience.Private public class CacheableDeserializerIdManager { @@ -39,9 +36,9 @@ public class CacheableDeserializerIdManager { private static final AtomicInteger identifier = new AtomicInteger(0); /** - * Register the given {@link Cacheable} -- usually an hfileblock instance, these implement - * the Cacheable Interface -- deserializer and generate a unique identifier id for it and return - * this as our result. + * Register the given {@link Cacheable} -- usually an hfileblock instance, these implement the + * Cacheable Interface -- deserializer and generate a unique identifier id for it and return this + * as our result. * @return the identifier of given cacheable deserializer * @see #getDeserializer(int) */ @@ -61,10 +58,10 @@ public static CacheableDeserializer getDeserializer(int id) { } /** - * Snapshot a map of the current identifiers to class names for reconstruction on reading out - * of a file. + * Snapshot a map of the current identifiers to class names for reconstruction on reading out of a + * file. */ - public static Map save() { + public static Map save() { // No synchronization here because weakly consistent view should be good enough // The assumed risk is that we might not see a new serializer that comes in while iterating, // but with a synchronized block, we won't see it anyway diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java index 0fcef862d42c..81823dd15dbd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,9 +22,14 @@ @InterfaceAudience.Private public interface CachedBlock extends Comparable { BlockPriority getBlockPriority(); + BlockType getBlockType(); + long getOffset(); + long getSize(); + long getCachedTime(); + String getFilename(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java index f2f9d58796ac..a807821fe71e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java @@ -19,15 +19,14 @@ import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.SingleByteBuff; +import org.apache.hadoop.hbase.util.ChecksumType; +import org.apache.hadoop.util.DataChecksum; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.ChecksumType; -import org.apache.hadoop.util.DataChecksum; /** * Utility methods to compute and validate checksums. @@ -39,42 +38,35 @@ public class ChecksumUtil { public static final int CHECKSUM_BUF_SIZE = 256; /** - * This is used by unit tests to make checksum failures throw an - * exception instead of returning null. Returning a null value from - * checksum validation will cause the higher layer to retry that - * read with hdfs-level checksums. Instead, we would like checksum - * failures to cause the entire unit test to fail. + * This is used by unit tests to make checksum failures throw an exception instead of returning + * null. Returning a null value from checksum validation will cause the higher layer to retry that + * read with hdfs-level checksums. Instead, we would like checksum failures to cause the entire + * unit test to fail. */ private static boolean generateExceptions = false; /** - * Generates a checksum for all the data in indata. The checksum is - * written to outdata. + * Generates a checksum for all the data in indata. The checksum is written to outdata. * @param indata input data stream - * @param startOffset starting offset in the indata stream from where to - * compute checkums from - * @param endOffset ending offset in the indata stream upto - * which checksums needs to be computed + * @param startOffset starting offset in the indata stream from where to compute checkums from + * @param endOffset ending offset in the indata stream upto which checksums needs to be computed * @param outdata the output buffer where checksum values are written - * @param outOffset the starting offset in the outdata where the - * checksum values are written + * @param outOffset the starting offset in the outdata where the checksum values are written * @param checksumType type of checksum * @param bytesPerChecksum number of bytes per checksum value */ - static void generateChecksums(byte[] indata, int startOffset, int endOffset, - byte[] outdata, int outOffset, ChecksumType checksumType, - int bytesPerChecksum) throws IOException { + static void generateChecksums(byte[] indata, int startOffset, int endOffset, byte[] outdata, + int outOffset, ChecksumType checksumType, int bytesPerChecksum) throws IOException { if (checksumType == ChecksumType.NULL) { return; // No checksum for this block. } - DataChecksum checksum = DataChecksum.newDataChecksum( - checksumType.getDataChecksumType(), bytesPerChecksum); + DataChecksum checksum = + DataChecksum.newDataChecksum(checksumType.getDataChecksumType(), bytesPerChecksum); - checksum.calculateChunkedSums( - ByteBuffer.wrap(indata, startOffset, endOffset - startOffset), - ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset)); + checksum.calculateChunkedSums(ByteBuffer.wrap(indata, startOffset, endOffset - startOffset), + ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset)); } /** @@ -180,18 +172,19 @@ static boolean validateChecksum(ByteBuff buf, String pathName, long offset, int DataChecksum.newDataChecksum(ctype.getDataChecksumType(), bytesPerChecksum); assert dataChecksum != null; int onDiskDataSizeWithHeader = - buf.getInt(HFileBlock.Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); - LOG.trace("dataLength={}, sizeWithHeader={}, checksumType={}, file={}, " - + "offset={}, headerSize={}, bytesPerChecksum={}", buf.capacity(), onDiskDataSizeWithHeader, - ctype.getName(), pathName, offset, hdrSize, bytesPerChecksum); + buf.getInt(HFileBlock.Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); + LOG.trace( + "dataLength={}, sizeWithHeader={}, checksumType={}, file={}, " + + "offset={}, headerSize={}, bytesPerChecksum={}", + buf.capacity(), onDiskDataSizeWithHeader, ctype.getName(), pathName, offset, hdrSize, + bytesPerChecksum); ByteBuff data = buf.duplicate().position(0).limit(onDiskDataSizeWithHeader); ByteBuff checksums = buf.duplicate().position(onDiskDataSizeWithHeader).limit(buf.limit()); return verifyChunkedSums(dataChecksum, data, checksums, pathName); } /** - * Returns the number of bytes needed to store the checksums for - * a specified data size + * Returns the number of bytes needed to store the checksums for a specified data size * @param datasize number of bytes of data * @param bytesPerChecksum number of bytes in a checksum chunk * @return The number of bytes needed to store the checksum values @@ -201,14 +194,13 @@ static long numBytes(long datasize, int bytesPerChecksum) { } /** - * Returns the number of checksum chunks needed to store the checksums for - * a specified data size + * Returns the number of checksum chunks needed to store the checksums for a specified data size * @param datasize number of bytes of data * @param bytesPerChecksum number of bytes in a checksum chunk * @return The number of checksum chunks */ static long numChunks(long datasize, int bytesPerChecksum) { - long numChunks = datasize/bytesPerChecksum; + long numChunks = datasize / bytesPerChecksum; if (datasize % bytesPerChecksum != 0) { numChunks++; } @@ -216,13 +208,12 @@ static long numChunks(long datasize, int bytesPerChecksum) { } /** - * Mechanism to throw an exception in case of hbase checksum - * failure. This is used by unit tests only. - * @param value Setting this to true will cause hbase checksum - * verification failures to generate exceptions. + * Mechanism to throw an exception in case of hbase checksum failure. This is used by unit tests + * only. + * @param value Setting this to true will cause hbase checksum verification failures to generate + * exceptions. */ public static void generateExceptionForChecksumFailureForTest(boolean value) { generateExceptions = value; } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index dc4f697bae94..45a3e3367da2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -1,37 +1,34 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.util.Iterator; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.yetus.audience.InterfaceAudience; /** - * CombinedBlockCache is an abstraction layer that combines - * {@link FirstLevelBlockCache} and {@link BucketCache}. The smaller lruCache is used - * to cache bloom blocks and index blocks. The larger Cache is used to - * cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads - * first from the smaller l1Cache before looking for the block in the l2Cache. Blocks evicted - * from l1Cache are put into the bucket cache. - * Metrics are the combined size and hits and misses of both caches. + * CombinedBlockCache is an abstraction layer that combines {@link FirstLevelBlockCache} and + * {@link BucketCache}. The smaller lruCache is used to cache bloom blocks and index blocks. The + * larger Cache is used to cache data blocks. + * {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads first from the smaller l1Cache + * before looking for the block in the l2Cache. Blocks evicted from l1Cache are put into the bucket + * cache. Metrics are the combined size and hits and misses of both caches. */ @InterfaceAudience.Private public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @@ -42,8 +39,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { public CombinedBlockCache(FirstLevelBlockCache l1Cache, BlockCache l2Cache) { this.l1Cache = l1Cache; this.l2Cache = l2Cache; - this.combinedCacheStats = new CombinedCacheStats(l1Cache.getStats(), - l2Cache.getStats()); + this.combinedCacheStats = new CombinedCacheStats(l1Cache.getStats(), l2Cache.getStats()); } @Override @@ -71,8 +67,8 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { } @Override - public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, - boolean repeat, boolean updateCacheMetrics) { + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { // We are not in a position to exactly look at LRU cache or BC as BlockType may not be getting // passed always. boolean existInL1 = l1Cache.containsBlock(cacheKey); @@ -80,9 +76,8 @@ public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, // If the block does not exist in L1, the containsBlock should be counted as one miss. l1Cache.getStats().miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); } - return existInL1 ? - l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics): - l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + return existInL1 ? l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics) + : l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } @Override @@ -106,8 +101,7 @@ public boolean evictBlock(BlockCacheKey cacheKey) { @Override public int evictBlocksByHfileName(String hfileName) { - return l1Cache.evictBlocksByHfileName(hfileName) - + l2Cache.evictBlocksByHfileName(hfileName); + return l1Cache.evictBlocksByHfileName(hfileName) + l2Cache.evictBlocksByHfileName(hfileName); } @Override @@ -201,8 +195,8 @@ public long getRootIndexMissCount() { @Override public long getIntermediateIndexMissCount() { - return lruCacheStats.getIntermediateIndexMissCount() + - bucketCacheStats.getIntermediateIndexMissCount(); + return lruCacheStats.getIntermediateIndexMissCount() + + bucketCacheStats.getIntermediateIndexMissCount(); } @Override @@ -212,14 +206,14 @@ public long getFileInfoMissCount() { @Override public long getGeneralBloomMetaMissCount() { - return lruCacheStats.getGeneralBloomMetaMissCount() + - bucketCacheStats.getGeneralBloomMetaMissCount(); + return lruCacheStats.getGeneralBloomMetaMissCount() + + bucketCacheStats.getGeneralBloomMetaMissCount(); } @Override public long getDeleteFamilyBloomMissCount() { - return lruCacheStats.getDeleteFamilyBloomMissCount() + - bucketCacheStats.getDeleteFamilyBloomMissCount(); + return lruCacheStats.getDeleteFamilyBloomMissCount() + + bucketCacheStats.getDeleteFamilyBloomMissCount(); } @Override @@ -254,8 +248,8 @@ public long getRootIndexHitCount() { @Override public long getIntermediateIndexHitCount() { - return lruCacheStats.getIntermediateIndexHitCount() + - bucketCacheStats.getIntermediateIndexHitCount(); + return lruCacheStats.getIntermediateIndexHitCount() + + bucketCacheStats.getIntermediateIndexHitCount(); } @Override @@ -265,14 +259,14 @@ public long getFileInfoHitCount() { @Override public long getGeneralBloomMetaHitCount() { - return lruCacheStats.getGeneralBloomMetaHitCount() + - bucketCacheStats.getGeneralBloomMetaHitCount(); + return lruCacheStats.getGeneralBloomMetaHitCount() + + bucketCacheStats.getGeneralBloomMetaHitCount(); } @Override public long getDeleteFamilyBloomHitCount() { - return lruCacheStats.getDeleteFamilyBloomHitCount() + - bucketCacheStats.getDeleteFamilyBloomHitCount(); + return lruCacheStats.getDeleteFamilyBloomHitCount() + + bucketCacheStats.getDeleteFamilyBloomHitCount(); } @Override @@ -282,14 +276,12 @@ public long getTrailerHitCount() { @Override public long getRequestCount() { - return lruCacheStats.getRequestCount() - + bucketCacheStats.getRequestCount(); + return lruCacheStats.getRequestCount() + bucketCacheStats.getRequestCount(); } @Override public long getRequestCachingCount() { - return lruCacheStats.getRequestCachingCount() - + bucketCacheStats.getRequestCachingCount(); + return lruCacheStats.getRequestCachingCount() + bucketCacheStats.getRequestCachingCount(); } @Override @@ -304,8 +296,7 @@ public long getPrimaryMissCount() { @Override public long getMissCachingCount() { - return lruCacheStats.getMissCachingCount() - + bucketCacheStats.getMissCachingCount(); + return lruCacheStats.getMissCachingCount() + bucketCacheStats.getMissCachingCount(); } @Override @@ -317,28 +308,25 @@ public long getHitCount() { public long getPrimaryHitCount() { return lruCacheStats.getPrimaryHitCount() + bucketCacheStats.getPrimaryHitCount(); } + @Override public long getHitCachingCount() { - return lruCacheStats.getHitCachingCount() - + bucketCacheStats.getHitCachingCount(); + return lruCacheStats.getHitCachingCount() + bucketCacheStats.getHitCachingCount(); } @Override public long getEvictionCount() { - return lruCacheStats.getEvictionCount() - + bucketCacheStats.getEvictionCount(); + return lruCacheStats.getEvictionCount() + bucketCacheStats.getEvictionCount(); } @Override public long getEvictedCount() { - return lruCacheStats.getEvictedCount() - + bucketCacheStats.getEvictedCount(); + return lruCacheStats.getEvictedCount() + bucketCacheStats.getEvictedCount(); } @Override public long getPrimaryEvictedCount() { - return lruCacheStats.getPrimaryEvictedCount() - + bucketCacheStats.getPrimaryEvictedCount(); + return lruCacheStats.getPrimaryEvictedCount() + bucketCacheStats.getPrimaryEvictedCount(); } @Override @@ -384,7 +372,7 @@ public Iterator iterator() { @Override public BlockCache[] getBlockCaches() { - return new BlockCache [] {this.l1Cache, this.l2Cache}; + return new BlockCache[] { this.l1Cache, this.l2Cache }; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java index 29f29e15a8c0..1f72cb4f8acf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,32 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import java.io.DataInput; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Hash; +import org.apache.yetus.audience.InterfaceAudience; /** - * A Bloom filter implementation built on top of - * {@link org.apache.hadoop.hbase.util.BloomFilterChunk}, encapsulating - * a set of fixed-size Bloom filters written out at the time of - * {@link org.apache.hadoop.hbase.io.hfile.HFile} generation into the data - * block stream, and loaded on demand at query time. This class only provides - * reading capabilities. + * A Bloom filter implementation built on top of + * {@link org.apache.hadoop.hbase.util.BloomFilterChunk}, encapsulating a set of fixed-size Bloom + * filters written out at the time of {@link org.apache.hadoop.hbase.io.hfile.HFile} generation into + * the data block stream, and loaded on demand at query time. This class only provides reading + * capabilities. */ @InterfaceAudience.Private -public class CompoundBloomFilter extends CompoundBloomFilterBase - implements BloomFilter { +public class CompoundBloomFilter extends CompoundBloomFilterBase implements BloomFilter { /** Used to load chunks on demand */ private HFile.Reader reader; @@ -55,14 +50,12 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase private long[] numPositivesPerChunk; /** - * De-serialization for compound Bloom filter metadata. Must be consistent - * with what {@link CompoundBloomFilterWriter} does. - * + * De-serialization for compound Bloom filter metadata. Must be consistent with what + * {@link CompoundBloomFilterWriter} does. * @param meta serialized Bloom filter metadata without any magic blocks * @throws IOException */ - public CompoundBloomFilter(DataInput meta, HFile.Reader reader) - throws IOException { + public CompoundBloomFilter(DataInput meta, HFile.Reader reader) throws IOException { this.reader = reader; totalByteSize = meta.readLong(); @@ -72,8 +65,8 @@ public CompoundBloomFilter(DataInput meta, HFile.Reader reader) totalMaxKeys = meta.readLong(); numChunks = meta.readInt(); byte[] comparatorClassName = Bytes.readByteArray(meta); - // The writer would have return 0 as the vint length for the case of - // Bytes.BYTES_RAWCOMPARATOR. In such cases do not initialize comparator, it can be + // The writer would have return 0 as the vint length for the case of + // Bytes.BYTES_RAWCOMPARATOR. In such cases do not initialize comparator, it can be // null if (comparatorClassName.length != 0) { comparator = FixedFileTrailer.createComparator(Bytes.toString(comparatorClassName)); @@ -84,7 +77,7 @@ public CompoundBloomFilter(DataInput meta, HFile.Reader reader) throw new IllegalArgumentException("Invalid hash type: " + hashType); } // We will pass null for ROW block - if(comparator == null) { + if (comparator == null) { index = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); } else { index = new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator, 1); @@ -103,7 +96,7 @@ public boolean contains(byte[] key, int keyOffset, int keyLength, ByteBuff bloom try { ByteBuff bloomBuf = bloomBlock.getBufferReadOnly(); result = BloomFilterUtil.contains(key, keyOffset, keyLength, bloomBuf, - bloomBlock.headerSize(), bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount); + bloomBlock.headerSize(), bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount); } finally { // After the use, should release the block to deallocate byte buffers. bloomBlock.release(); @@ -120,7 +113,7 @@ private HFileBlock getBloomBlock(int block) { try { // We cache the block and use a positional read. bloomBlock = reader.readBlock(index.getRootBlockOffset(block), - index.getRootBlockDataSize(block), true, true, false, true, BlockType.BLOOM_CHUNK, null); + index.getRootBlockDataSize(block), true, true, false, true, BlockType.BLOOM_CHUNK, null); } catch (IOException ex) { // The Bloom filter is broken, turn it off. throw new IllegalArgumentException("Failed to load Bloom block", ex); @@ -198,12 +191,10 @@ public long getNumPositivesForTesting(int chunk) { public String toString() { StringBuilder sb = new StringBuilder(); sb.append(BloomFilterUtil.formatStats(this)); - sb.append(BloomFilterUtil.STATS_RECORD_SEP + - "Number of chunks: " + numChunks); - sb.append(BloomFilterUtil.STATS_RECORD_SEP + - ((comparator != null) ? "Comparator: " - + comparator.getClass().getSimpleName() : "Comparator: " - + Bytes.BYTES_RAWCOMPARATOR.getClass().getSimpleName())); + sb.append(BloomFilterUtil.STATS_RECORD_SEP + "Number of chunks: " + numChunks); + sb.append(BloomFilterUtil.STATS_RECORD_SEP + + ((comparator != null) ? "Comparator: " + comparator.getClass().getSimpleName() + : "Comparator: " + Bytes.BYTES_RAWCOMPARATOR.getClass().getSimpleName())); return sb.toString(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java index efc21c641408..199223882724 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,27 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.util.BloomFilterBase; - import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.util.BloomFilterBase; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class CompoundBloomFilterBase implements BloomFilterBase { /** - * At read time, the total number of chunks. At write time, the number of - * chunks created so far. The first chunk has an ID of 0, and the current - * chunk has the ID of numChunks - 1. + * At read time, the total number of chunks. At write time, the number of chunks created so far. + * The first chunk has an ID of 0, and the current chunk has the ID of numChunks - 1. */ protected int numChunks; /** - * The Bloom filter version. There used to be a DynamicByteBloomFilter which - * had version 2. + * The Bloom filter version. There used to be a DynamicByteBloomFilter which had version 2. */ public static final int VERSION = 3; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java index 228b54c7ab00..2a8595b30a6e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java @@ -23,33 +23,30 @@ import java.util.ArrayDeque; import java.util.Objects; import java.util.Queue; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.BloomFilterChunk; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Adds methods required for writing a compound Bloom filter to the data - * section of an {@link org.apache.hadoop.hbase.io.hfile.HFile} to the - * {@link CompoundBloomFilter} class. + * Adds methods required for writing a compound Bloom filter to the data section of an + * {@link org.apache.hadoop.hbase.io.hfile.HFile} to the {@link CompoundBloomFilter} class. */ @InterfaceAudience.Private public class CompoundBloomFilterWriter extends CompoundBloomFilterBase implements BloomFilterWriter, InlineBlockWriter { - private static final Logger LOG = - LoggerFactory.getLogger(CompoundBloomFilterWriter.class); + private static final Logger LOG = LoggerFactory.getLogger(CompoundBloomFilterWriter.class); /** The current chunk being written to */ private BloomFilterChunk chunk; @@ -62,7 +59,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase /** The size of individual Bloom filter chunks to create */ private int chunkByteSize; - /** The prev Cell that was processed */ + /** The prev Cell that was processed */ private Cell prevCell; /** A Bloom filter chunk enqueued for writing */ @@ -86,23 +83,16 @@ private static class ReadyChunk { private BloomType bloomType; /** - * @param chunkByteSizeHint - * each chunk's size in bytes. The real chunk size might be different - * as required by the fold factor. - * @param errorRate - * target false positive rate - * @param hashType - * hash function type to use - * @param maxFold - * maximum degree of folding allowed - * @param bloomType - * the bloom type + * @param chunkByteSizeHint each chunk's size in bytes. The real chunk size might be different as + * required by the fold factor. + * @param errorRate target false positive rate + * @param hashType hash function type to use + * @param maxFold maximum degree of folding allowed + * @param bloomType the bloom type */ - public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, - int hashType, int maxFold, boolean cacheOnWrite, - CellComparator comparator, BloomType bloomType) { - chunkByteSize = BloomFilterUtil.computeFoldableByteSize( - chunkByteSizeHint * 8L, maxFold); + public CompoundBloomFilterWriter(int chunkByteSizeHint, float errorRate, int hashType, + int maxFold, boolean cacheOnWrite, CellComparator comparator, BloomType bloomType) { + chunkByteSize = BloomFilterUtil.computeFoldableByteSize(chunkByteSizeHint * 8L, maxFold); this.errorRate = errorRate; this.hashType = hashType; @@ -120,20 +110,17 @@ public boolean shouldWriteBlock(boolean closing) { /** * Enqueue the current chunk if it is ready to be written out. - * - * @param closing true if we are closing the file, so we do not expect new - * keys to show up + * @param closing true if we are closing the file, so we do not expect new keys to show up */ private void enqueueReadyChunk(boolean closing) { - if (chunk == null || - (chunk.getKeyCount() < chunk.getMaxKeys() && !closing)) { + if (chunk == null || (chunk.getKeyCount() < chunk.getMaxKeys() && !closing)) { return; } if (firstKeyInChunk == null) { - throw new NullPointerException("Trying to enqueue a chunk, " + - "but first key is null: closing=" + closing + ", keyCount=" + - chunk.getKeyCount() + ", maxKeys=" + chunk.getMaxKeys()); + throw new NullPointerException( + "Trying to enqueue a chunk, " + "but first key is null: closing=" + closing + + ", keyCount=" + chunk.getKeyCount() + ", maxKeys=" + chunk.getMaxKeys()); } ReadyChunk readyChunk = new ReadyChunk(); @@ -148,10 +135,9 @@ private void enqueueReadyChunk(boolean closing) { chunk.compactBloom(); if (LOG.isTraceEnabled() && prevByteSize != chunk.getByteSize()) { - LOG.trace("Compacted Bloom chunk #" + readyChunk.chunkId + " from [" - + prevMaxKeys + " max keys, " + prevByteSize + " bytes] to [" - + chunk.getMaxKeys() + " max keys, " + chunk.getByteSize() - + " bytes]"); + LOG.trace("Compacted Bloom chunk #" + readyChunk.chunkId + " from [" + prevMaxKeys + + " max keys, " + prevByteSize + " bytes] to [" + chunk.getMaxKeys() + " max keys, " + + chunk.getByteSize() + " bytes]"); } totalMaxKeys += chunk.getMaxKeys(); @@ -170,14 +156,13 @@ public void append(Cell cell) throws IOException { if (chunk == null) { if (firstKeyInChunk != null) { - throw new IllegalStateException("First key in chunk already set: " - + Bytes.toStringBinary(firstKeyInChunk)); + throw new IllegalStateException( + "First key in chunk already set: " + Bytes.toStringBinary(firstKeyInChunk)); } // This will be done only once per chunk if (bloomType == BloomType.ROWCOL) { - firstKeyInChunk = - PrivateCellUtil - .getCellKeySerializedAsKeyValueKey(PrivateCellUtil.createFirstOnRowCol(cell)); + firstKeyInChunk = PrivateCellUtil + .getCellKeySerializedAsKeyValueKey(PrivateCellUtil.createFirstOnRowCol(cell)); } else { firstKeyInChunk = CellUtil.copyRow(cell); } @@ -204,8 +189,7 @@ public Cell getPrevCell() { private void allocateNewChunk() { if (prevChunk == null) { // First chunk - chunk = BloomFilterUtil.createBySize(chunkByteSize, errorRate, - hashType, maxFold, bloomType); + chunk = BloomFilterUtil.createBySize(chunkByteSize, errorRate, hashType, maxFold, bloomType); } else { // Use the same parameters as the last chunk, but a new array and // a zero key count. @@ -213,13 +197,13 @@ private void allocateNewChunk() { } if (chunk.getKeyCount() != 0) { - throw new IllegalStateException("keyCount=" + chunk.getKeyCount() - + " > 0"); + throw new IllegalStateException("keyCount=" + chunk.getKeyCount() + " > 0"); } chunk.allocBloom(); ++numChunks; } + @Override public void writeInlineBlock(DataOutput out) throws IOException { // We don't remove the chunk from the queue here, because we might need it @@ -242,7 +226,8 @@ public BlockType getInlineBlockType() { } private class MetaWriter implements Writable { - protected MetaWriter() {} + protected MetaWriter() { + } @Override public void readFields(DataInput in) throws IOException { @@ -250,11 +235,11 @@ public void readFields(DataInput in) throws IOException { } /** - * This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for simplicity, - * although the two metadata formats do not have to be consistent. This - * does have to be consistent with how {@link - * CompoundBloomFilter#CompoundBloomFilter(DataInput, - * org.apache.hadoop.hbase.io.hfile.HFile.Reader)} reads fields. + * This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for simplicity, although + * the two metadata formats do not have to be consistent. This does have to be consistent with + * how + * {@link CompoundBloomFilter#CompoundBloomFilter(DataInput, org.apache.hadoop.hbase.io.hfile.HFile.Reader)} + * reads fields. */ @Override public void write(DataOutput out) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java index 28516c6bab4e..6a40ab1b15b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.yetus.audience.InterfaceAudience; /** * This exception is thrown when attempts to read an HFile fail due to corruption or truncation diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java index 7f7cc3e41b27..a843dac203c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ExclusiveMemHFileBlock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,8 +38,8 @@ public class ExclusiveMemHFileBlock extends HFileBlock { ExclusiveMemHFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuff buf, boolean fillHeader, - long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, - HFileContext fileContext, ByteBuffAllocator alloc) { + long offset, int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext, + ByteBuffAllocator alloc) { super(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, buf, fillHeader, offset, nextBlockOnDiskSize, onDiskDataSizeWithHeader, fileContext, alloc); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java index a0c34c9fe3ef..34d6c8d926b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FirstLevelBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ public interface FirstLevelBlockCache extends ResizableBlockCache, HeapSize { /** * Whether the cache contains the block with specified cacheKey - * * @param cacheKey cache key for the block * @return true if it contains the block */ @@ -37,7 +36,6 @@ public interface FirstLevelBlockCache extends ResizableBlockCache, HeapSize { /** * Specifies the secondary cache. An entry that is evicted from this cache due to a size * constraint will be inserted into the victim cache. - * * @param victimCache the second level cache * @throws IllegalArgumentException if the victim cache had already been set */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 6a2dcf926a4f..880045aaf326 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +17,6 @@ */ package org.apache.hadoop.hbase.io.hfile; - import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInput; @@ -36,20 +34,20 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; /** - * The {@link HFile} has a fixed trailer which contains offsets to other - * variable parts of the file. Also includes basic metadata on this file. The - * trailer size is fixed within a given {@link HFile} format version only, but - * we always store the version number as the last four-byte integer of the file. - * The version number itself is split into two portions, a major - * version and a minor version. The last three bytes of a file are the major - * version and a single preceding byte is the minor number. The major version - * determines which readers/writers to use to read/write a hfile while a minor - * version determines smaller changes in hfile format that do not need a new - * reader/writer type. + * The {@link HFile} has a fixed trailer which contains offsets to other variable parts of the file. + * Also includes basic metadata on this file. The trailer size is fixed within a given {@link HFile} + * format version only, but we always store the version number as the last four-byte integer of the + * file. The version number itself is split into two portions, a major version and a minor version. + * The last three bytes of a file are the major version and a single preceding byte is the minor + * number. The major version determines which readers/writers to use to read/write a hfile while a + * minor version determines smaller changes in hfile format that do not need a new reader/writer + * type. */ @InterfaceAudience.Private public class FixedFileTrailer { @@ -61,17 +59,16 @@ public class FixedFileTrailer { private static final int MAX_COMPARATOR_NAME_LENGTH = 128; /** - * Offset to the fileinfo data, a small block of vitals. Necessary in v1 but - * only potentially useful for pretty-printing in v2. + * Offset to the fileinfo data, a small block of vitals. Necessary in v1 but only potentially + * useful for pretty-printing in v2. */ private long fileInfoOffset; /** - * In version 1, the offset to the data block index. Starting from version 2, - * the meaning of this field is the offset to the section of the file that - * should be loaded at the time the file is being opened: i.e. on open we load - * the root index, file info, etc. See http://hbase.apache.org/book.html#_hfile_format_2 - * in the reference guide. + * In version 1, the offset to the data block index. Starting from version 2, the meaning of this + * field is the offset to the section of the file that should be loaded at the time the file is + * being opened: i.e. on open we load the root index, file info, etc. See + * http://hbase.apache.org/book.html#_hfile_format_2 in the reference guide. */ private long loadOnOpenDataOffset; @@ -96,8 +93,7 @@ public class FixedFileTrailer { private long totalUncompressedBytes; /** - * The number of key/value pairs in the file. This field was int in version 1, - * but is now long. + * The number of key/value pairs in the file. This field was int in version 1, but is now long. */ private long entryCount; @@ -107,8 +103,7 @@ public class FixedFileTrailer { private Compression.Algorithm compressionCodec = Compression.Algorithm.NONE; /** - * The number of levels in the potentially multi-level data index. Used from - * version 2 onwards. + * The number of levels in the potentially multi-level data index. Used from version 2 onwards. */ private int numDataIndexLevels; @@ -118,8 +113,7 @@ public class FixedFileTrailer { private long firstDataBlockOffset; /** - * It is guaranteed that no key/value data blocks start after this offset in - * the file. + * It is guaranteed that no key/value data blocks start after this offset in the file. */ private long lastDataBlockOffset; @@ -185,9 +179,8 @@ public int getTrailerSize() { } /** - * Write the trailer to a data stream. We support writing version 1 for - * testing and for determining version 1 trailer size. It is also easy to see - * what fields changed in version 2. + * Write the trailer to a data stream. We support writing version 1 for testing and for + * determining version 1 trailer size. It is also easy to see what fields changed in version 2. */ void serialize(DataOutputStream outputStream) throws IOException { HFile.checkFormatVersion(majorVersion); @@ -206,18 +199,14 @@ void serialize(DataOutputStream outputStream) throws IOException { HFileProtos.FileTrailerProto toProtobuf() { HFileProtos.FileTrailerProto.Builder builder = HFileProtos.FileTrailerProto.newBuilder() - .setFileInfoOffset(fileInfoOffset) - .setLoadOnOpenDataOffset(loadOnOpenDataOffset) - .setUncompressedDataIndexSize(uncompressedDataIndexSize) - .setTotalUncompressedBytes(totalUncompressedBytes) - .setDataIndexCount(dataIndexCount) - .setMetaIndexCount(metaIndexCount) - .setEntryCount(entryCount) - .setNumDataIndexLevels(numDataIndexLevels) - .setFirstDataBlockOffset(firstDataBlockOffset) - .setLastDataBlockOffset(lastDataBlockOffset) - .setComparatorClassName(getHBase1CompatibleName(comparatorClassName)) - .setCompressionCodec(compressionCodec.ordinal()); + .setFileInfoOffset(fileInfoOffset).setLoadOnOpenDataOffset(loadOnOpenDataOffset) + .setUncompressedDataIndexSize(uncompressedDataIndexSize) + .setTotalUncompressedBytes(totalUncompressedBytes).setDataIndexCount(dataIndexCount) + .setMetaIndexCount(metaIndexCount).setEntryCount(entryCount) + .setNumDataIndexLevels(numDataIndexLevels).setFirstDataBlockOffset(firstDataBlockOffset) + .setLastDataBlockOffset(lastDataBlockOffset) + .setComparatorClassName(getHBase1CompatibleName(comparatorClassName)) + .setCompressionCodec(compressionCodec.ordinal()); if (encryptionKey != null) { builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey)); } @@ -225,9 +214,8 @@ HFileProtos.FileTrailerProto toProtobuf() { } /** - * Write trailer data as protobuf. - * NOTE: we run a translation on the comparator name and will serialize the old hbase-1.x where - * it makes sense. See {@link #getHBase1CompatibleName(String)}. + * Write trailer data as protobuf. NOTE: we run a translation on the comparator name and will + * serialize the old hbase-1.x where it makes sense. See {@link #getHBase1CompatibleName(String)}. */ void serializeAsPB(DataOutputStream output) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -249,9 +237,8 @@ void serializeAsPB(DataOutputStream output) throws IOException { } /** - * Deserialize the fixed file trailer from the given stream. The version needs - * to already be specified. Make sure this is consistent with - * {@link #serialize(DataOutputStream)}. + * Deserialize the fixed file trailer from the given stream. The version needs to already be + * specified. Make sure this is consistent with {@link #serialize(DataOutputStream)}. */ void deserialize(DataInputStream inputStream) throws IOException { HFile.checkFormatVersion(majorVersion); @@ -259,7 +246,7 @@ void deserialize(DataInputStream inputStream) throws IOException { BlockType.TRAILER.readAndCheck(inputStream); if (majorVersion > 2 - || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION)) { + || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION)) { deserializeFromPB(inputStream); } else { deserializeFromWritable(inputStream); @@ -278,7 +265,7 @@ void deserializeFromPB(DataInputStream inputStream) throws IOException { // read PB and skip padding int start = inputStream.available(); HFileProtos.FileTrailerProto trailerProto = - HFileProtos.FileTrailerProto.PARSER.parseDelimitedFrom(inputStream); + HFileProtos.FileTrailerProto.PARSER.parseDelimitedFrom(inputStream); int size = start - inputStream.available(); inputStream.skip(getTrailerSize() - NOT_PB_SIZE - size); @@ -342,10 +329,10 @@ void deserializeFromWritable(DataInput input) throws IOException { numDataIndexLevels = input.readInt(); firstDataBlockOffset = input.readLong(); lastDataBlockOffset = input.readLong(); - // TODO this is a classname encoded into an HFile's trailer. We are going to need to have + // TODO this is a classname encoded into an HFile's trailer. We are going to need to have // some compat code here. - setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input, - MAX_COMPARATOR_NAME_LENGTH))); + setComparatorClass( + getComparatorClass(Bytes.readStringFixedSize(input, MAX_COMPARATOR_NAME_LENGTH))); } private void append(StringBuilder sb, String s) { @@ -381,19 +368,16 @@ public String toString() { /** * Reads a file trailer from the given file. - * - * @param istream the input stream with the ability to seek. Does not have to - * be buffered, as only one read operation is made. + * @param istream the input stream with the ability to seek. Does not have to be buffered, as only + * one read operation is made. * @param fileSize the file size. Can be obtained using - * {@link org.apache.hadoop.fs.FileSystem#getFileStatus( - *org.apache.hadoop.fs.Path)}. + * {@link org.apache.hadoop.fs.FileSystem#getFileStatus( org.apache.hadoop.fs.Path)}. * @return the fixed file trailer read - * @throws IOException if failed to read from the underlying stream, or the - * trailer is corrupted, or the version of the trailer is - * unsupported + * @throws IOException if failed to read from the underlying stream, or the trailer is corrupted, + * or the version of the trailer is unsupported */ - public static FixedFileTrailer readFromStream(FSDataInputStream istream, - long fileSize) throws IOException { + public static FixedFileTrailer readFromStream(FSDataInputStream istream, long fileSize) + throws IOException { int bufferSize = MAX_TRAILER_SIZE; long seekPoint = fileSize - bufferSize; if (seekPoint < 0) { @@ -405,8 +389,7 @@ public static FixedFileTrailer readFromStream(FSDataInputStream istream, HFileUtil.seekOnMultipleSources(istream, seekPoint); ByteBuffer buf = ByteBuffer.allocate(bufferSize); - istream.readFully(buf.array(), buf.arrayOffset(), - buf.arrayOffset() + buf.limit()); + istream.readFully(buf.array(), buf.arrayOffset(), buf.arrayOffset() + buf.limit()); // Read the version from the last int of the file. buf.position(buf.limit() - Bytes.SIZEOF_INT); @@ -422,30 +405,28 @@ public static FixedFileTrailer readFromStream(FSDataInputStream istream, FixedFileTrailer fft = new FixedFileTrailer(majorVersion, minorVersion); fft.deserialize(new DataInputStream(new ByteArrayInputStream(buf.array(), - buf.arrayOffset() + bufferSize - trailerSize, trailerSize))); + buf.arrayOffset() + bufferSize - trailerSize, trailerSize))); return fft; } public void expectMajorVersion(int expected) { if (majorVersion != expected) { - throw new IllegalArgumentException("Invalid HFile major version: " - + majorVersion - + " (expected: " + expected + ")"); + throw new IllegalArgumentException( + "Invalid HFile major version: " + majorVersion + " (expected: " + expected + ")"); } } public void expectMinorVersion(int expected) { if (minorVersion != expected) { - throw new IllegalArgumentException("Invalid HFile minor version: " - + minorVersion + " (expected: " + expected + ")"); + throw new IllegalArgumentException( + "Invalid HFile minor version: " + minorVersion + " (expected: " + expected + ")"); } } public void expectAtLeastMajorVersion(int lowerBound) { if (majorVersion < lowerBound) { - throw new IllegalArgumentException("Invalid HFile major version: " - + majorVersion - + " (expected: " + lowerBound + " or higher)."); + throw new IllegalArgumentException("Invalid HFile major version: " + majorVersion + + " (expected: " + lowerBound + " or higher)."); } } @@ -569,21 +550,20 @@ public void setComparatorClass(Class klass) { } /** - * If a 'standard' Comparator, write the old name for the Comparator when we serialize rather - * than the new name; writing the new name will make it so newly-written hfiles are not parseable - * by hbase-1.x, a facility we'd like to preserve across rolling upgrade and hbase-1.x clusters + * If a 'standard' Comparator, write the old name for the Comparator when we serialize rather than + * the new name; writing the new name will make it so newly-written hfiles are not parseable by + * hbase-1.x, a facility we'd like to preserve across rolling upgrade and hbase-1.x clusters * reading hbase-2.x produce. *

        - * The Comparators in hbase-2.x work the same as they did in hbase-1.x; they compare - * KeyValues. In hbase-2.x they were renamed making use of the more generic 'Cell' - * nomenclature to indicate that we intend to move away from KeyValues post hbase-2. A naming - * change is not reason enough to make it so hbase-1.x cannot read hbase-2.x files given the - * structure goes unchanged (hfile v3). So, lets write the old names for Comparators into the - * hfile tails in hbase-2. Here is where we do the translation. - * {@link #getComparatorClass(String)} does translation going the other way. - * - *

        The translation is done on the serialized Protobuf only.

        - * + * The Comparators in hbase-2.x work the same as they did in hbase-1.x; they compare KeyValues. In + * hbase-2.x they were renamed making use of the more generic 'Cell' nomenclature to indicate that + * we intend to move away from KeyValues post hbase-2. A naming change is not reason enough to + * make it so hbase-1.x cannot read hbase-2.x files given the structure goes unchanged (hfile v3). + * So, lets write the old names for Comparators into the hfile tails in hbase-2. Here is where we + * do the translation. {@link #getComparatorClass(String)} does translation going the other way. + *

        + * The translation is done on the serialized Protobuf only. + *

        * @param comparator String class name of the Comparator used in this hfile. * @return What to store in the trailer as our comparator name. * @see #getComparatorClass(String) @@ -603,42 +583,46 @@ private String getHBase1CompatibleName(final String comparator) { @SuppressWarnings("unchecked") private static Class getComparatorClass(String comparatorClassName) - throws IOException { + throws IOException { Class comparatorKlass; // for BC if (comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName()) - || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName()) - || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator"))) { + || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName()) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator"))) { comparatorKlass = CellComparatorImpl.class; } else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()) - || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) - || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) - || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) - || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator"))) { - comparatorKlass = MetaCellComparator.class; - } else if (comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") - || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator")) { - // When the comparator to be used is Bytes.BYTES_RAWCOMPARATOR, we just return null from here - // Bytes.BYTES_RAWCOMPARATOR is not a CellComparator - comparatorKlass = null; - } else { - // if the name wasn't one of the legacy names, maybe its a legit new kind of comparator. - try { - comparatorKlass = (Class) Class.forName(comparatorClassName); - } catch (ClassNotFoundException e) { - throw new IOException(e); - } - } + || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName()) + || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator")) + || (comparatorClassName + .equals("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator")) + || (comparatorClassName.equals("org.apache.hadoop.hbase.MetaCellComparator"))) { + comparatorKlass = MetaCellComparator.class; + } else + if (comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator") + || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator")) { + // When the comparator to be used is Bytes.BYTES_RAWCOMPARATOR, we just return null from + // here + // Bytes.BYTES_RAWCOMPARATOR is not a CellComparator + comparatorKlass = null; + } else { + // if the name wasn't one of the legacy names, maybe its a legit new kind of comparator. + try { + comparatorKlass = + (Class) Class.forName(comparatorClassName); + } catch (ClassNotFoundException e) { + throw new IOException(e); + } + } return comparatorKlass; } static CellComparator createComparator(String comparatorClassName) throws IOException { if (comparatorClassName.equals(CellComparatorImpl.COMPARATOR.getClass().getName())) { return CellComparatorImpl.COMPARATOR; - } else if (comparatorClassName.equals( - MetaCellComparator.META_COMPARATOR.getClass().getName())) { - return MetaCellComparator.META_COMPARATOR; - } + } else + if (comparatorClassName.equals(MetaCellComparator.META_COMPARATOR.getClass().getName())) { + return MetaCellComparator.META_COMPARATOR; + } try { Class comparatorClass = getComparatorClass(comparatorClassName); if (comparatorClass != null) { @@ -660,8 +644,7 @@ public long getUncompressedDataIndexSize() { return uncompressedDataIndexSize; } - public void setUncompressedDataIndexSize( - long uncompressedDataIndexSize) { + public void setUncompressedDataIndexSize(long uncompressedDataIndexSize) { expectAtLeastMajorVersion(2); this.uncompressedDataIndexSize = uncompressedDataIndexSize; } @@ -678,24 +661,23 @@ public void setEncryptionKey(byte[] keyBytes) { } /** - * Extracts the major version for a 4-byte serialized version data. - * The major version is the 3 least significant bytes + * Extracts the major version for a 4-byte serialized version data. The major version is the 3 + * least significant bytes */ private static int extractMajorVersion(int serializedVersion) { return (serializedVersion & 0x00ffffff); } /** - * Extracts the minor version for a 4-byte serialized version data. - * The major version are the 3 the most significant bytes + * Extracts the minor version for a 4-byte serialized version data. The major version are the 3 + * the most significant bytes */ private static int extractMinorVersion(int serializedVersion) { return (serializedVersion >>> 24); } /** - * Create a 4 byte serialized version number by combining the - * minor and major version numbers. + * Create a 4 byte serialized version number by combining the minor and major version numbers. */ static int materializeVersion(int majorVersion, int minorVersion) { return ((majorVersion & 0x00ffffff) | (minorVersion << 24)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 2e6c19edfca2..1063e18a2c68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,68 +54,66 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * File format for hbase. - * A file of sorted key/value pairs. Both keys and values are byte arrays. + * File format for hbase. A file of sorted key/value pairs. Both keys and values are byte arrays. *

        - * The memory footprint of a HFile includes the following (below is taken from the - * TFile documentation - * but applies also to HFile): + * The memory footprint of a HFile includes the following (below is taken from the TFile documentation but applies also + * to HFile): *

          *
        • Some constant overhead of reading or writing a compressed block. *
            - *
          • Each compressed block requires one compression/decompression codec for - * I/O. + *
          • Each compressed block requires one compression/decompression codec for I/O. *
          • Temporary space to buffer the key. *
          • Temporary space to buffer the value. *
          - *
        • HFile index, which is proportional to the total number of Data Blocks. - * The total amount of memory needed to hold the index can be estimated as - * (56+AvgKeySize)*NumBlocks. + *
        • HFile index, which is proportional to the total number of Data Blocks. The total amount of + * memory needed to hold the index can be estimated as (56+AvgKeySize)*NumBlocks. *
        * Suggestions on performance optimization. *
          - *
        • Minimum block size. We recommend a setting of minimum block size between - * 8KB to 1MB for general usage. Larger block size is preferred if files are - * primarily for sequential access. However, it would lead to inefficient random - * access (because there are more data to decompress). Smaller blocks are good - * for random access, but require more memory to hold the block index, and may - * be slower to create (because we must flush the compressor stream at the - * conclusion of each data block, which leads to an FS I/O flush). Further, due - * to the internal caching in Compression codec, the smallest possible block - * size would be around 20KB-30KB. - *
        • The current implementation does not offer true multi-threading for - * reading. The implementation uses FSDataInputStream seek()+read(), which is - * shown to be much faster than positioned-read call in single thread mode. - * However, it also means that if multiple threads attempt to access the same - * HFile (using multiple scanners) simultaneously, the actual I/O is carried out - * sequentially even if they access different DFS blocks (Reexamine! pread seems - * to be 10% faster than seek+read in my testing -- stack). - *
        • Compression codec. Use "none" if the data is not very compressable (by - * compressable, I mean a compression ratio at least 2:1). Generally, use "lzo" - * as the starting point for experimenting. "gz" overs slightly better - * compression ratio over "lzo" but requires 4x CPU to compress and 2x CPU to - * decompress, comparing to "lzo". + *
        • Minimum block size. We recommend a setting of minimum block size between 8KB to 1MB for + * general usage. Larger block size is preferred if files are primarily for sequential access. + * However, it would lead to inefficient random access (because there are more data to decompress). + * Smaller blocks are good for random access, but require more memory to hold the block index, and + * may be slower to create (because we must flush the compressor stream at the conclusion of each + * data block, which leads to an FS I/O flush). Further, due to the internal caching in Compression + * codec, the smallest possible block size would be around 20KB-30KB. + *
        • The current implementation does not offer true multi-threading for reading. The + * implementation uses FSDataInputStream seek()+read(), which is shown to be much faster than + * positioned-read call in single thread mode. However, it also means that if multiple threads + * attempt to access the same HFile (using multiple scanners) simultaneously, the actual I/O is + * carried out sequentially even if they access different DFS blocks (Reexamine! pread seems to be + * 10% faster than seek+read in my testing -- stack). + *
        • Compression codec. Use "none" if the data is not very compressable (by compressable, I mean a + * compression ratio at least 2:1). Generally, use "lzo" as the starting point for experimenting. + * "gz" overs slightly better compression ratio over "lzo" but requires 4x CPU to compress and 2x + * CPU to decompress, comparing to "lzo". *
        - * * For more on the background behind HFile, see HBASE-61. *

        - * File is made of data blocks followed by meta data blocks (if any), a fileinfo - * block, data block index, meta data block index, and a fixed size trailer - * which records the offsets at which file changes content type. - *

        <data blocks><meta blocks><fileinfo><
        - * data index><meta index><trailer>
        - * Each block has a bit of magic at its start. Block are comprised of - * key/values. In data blocks, they are both byte arrays. Metadata blocks are - * a String key and a byte array value. An empty file looks like this: - *
        <fileinfo><trailer>
        . That is, there are not data nor meta - * blocks present. + * File is made of data blocks followed by meta data blocks (if any), a fileinfo block, data block + * index, meta data block index, and a fixed size trailer which records the offsets at which file + * changes content type. + * + *
        + * <data blocks><meta blocks><fileinfo><
        + * data index><meta index><trailer>
        + * 
        + * + * Each block has a bit of magic at its start. Block are comprised of key/values. In data blocks, + * they are both byte arrays. Metadata blocks are a String key and a byte array value. An empty file + * looks like this: + * + *
        + * <fileinfo><trailer>
        + * 
        + * + * . That is, there are not data nor meta blocks present. *

        - * TODO: Do scanners need to be able to take a start and end row? - * TODO: Should BlockIndex know the name of its file? Should it have a Path - * that points at its file say for the case where an index lives apart from - * an HFile instance? + * TODO: Do scanners need to be able to take a start and end row? TODO: Should BlockIndex know the + * name of its file? Should it have a Path that points at its file say for the case where an index + * lives apart from an HFile instance? */ @InterfaceAudience.Private public final class HFile { @@ -132,12 +129,13 @@ public final class HFile { * Default compression: none. */ public final static Compression.Algorithm DEFAULT_COMPRESSION_ALGORITHM = - Compression.Algorithm.NONE; + Compression.Algorithm.NONE; /** Minimum supported HFile format version */ public static final int MIN_FORMAT_VERSION = 2; - /** Maximum supported HFile format version + /** + * Maximum supported HFile format version */ public static final int MAX_FORMAT_VERSION = 3; @@ -147,17 +145,15 @@ public final class HFile { public static final int MIN_FORMAT_VERSION_WITH_TAGS = 3; /** Default compression name: none. */ - public final static String DEFAULT_COMPRESSION = - DEFAULT_COMPRESSION_ALGORITHM.getName(); + public final static String DEFAULT_COMPRESSION = DEFAULT_COMPRESSION_ALGORITHM.getName(); /** Meta data block name for bloom filter bits. */ public static final String BLOOM_FILTER_DATA_KEY = "BLOOM_FILTER_DATA"; /** - * We assume that HFile path ends with - * ROOT_DIR/TABLE_NAME/REGION_NAME/CF_NAME/HFILE, so it has at least this - * many levels of nesting. This is needed for identifying table and CF name - * from an HFile path. + * We assume that HFile path ends with ROOT_DIR/TABLE_NAME/REGION_NAME/CF_NAME/HFILE, so it has at + * least this many levels of nesting. This is needed for identifying table and CF name from an + * HFile path. */ public final static int MIN_NUM_HFILE_PATH_LEVELS = 5; @@ -178,19 +174,18 @@ public final class HFile { /** * Shutdown constructor. */ - private HFile() {} + private HFile() { + } /** - * Number of checksum verification failures. It also - * clears the counter. + * Number of checksum verification failures. It also clears the counter. */ public static final long getAndResetChecksumFailuresCount() { return CHECKSUM_FAILURES.sumThenReset(); } /** - * Number of checksum verification failures. It also - * clears the counter. + * Number of checksum verification failures. It also clears the counter. */ public static final long getChecksumFailuresCount() { return CHECKSUM_FAILURES.sum(); @@ -211,7 +206,7 @@ public static final void updateWriteLatency(long latencyMillis) { /** API required to write an {@link HFile} */ public interface Writer extends Closeable, CellSink, ShipperListener { /** Max memstore (mvcc) timestamp in FileInfo */ - public static final byte [] MAX_MEMSTORE_TS_KEY = Bytes.toBytes("MAX_MEMSTORE_TS_KEY"); + public static final byte[] MAX_MEMSTORE_TS_KEY = Bytes.toBytes("MAX_MEMSTORE_TS_KEY"); /** Add an element to the file info map. */ void appendFileInfo(byte[] key, byte[] value) throws IOException; @@ -220,29 +215,27 @@ public interface Writer extends Closeable, CellSink, ShipperListener { Path getPath(); /** - * Adds an inline block writer such as a multi-level block index writer or - * a compound Bloom filter writer. + * Adds an inline block writer such as a multi-level block index writer or a compound Bloom + * filter writer. */ void addInlineBlockWriter(InlineBlockWriter bloomWriter); - // The below three methods take Writables. We'd like to undo Writables but undoing the below - // would be pretty painful. Could take a byte [] or a Message but we want to be backward + // The below three methods take Writables. We'd like to undo Writables but undoing the below + // would be pretty painful. Could take a byte [] or a Message but we want to be backward // compatible around hfiles so would need to map between Message and Writable or byte [] and - // current Writable serialization. This would be a bit of work to little gain. Thats my - // thinking at moment. St.Ack 20121129 + // current Writable serialization. This would be a bit of work to little gain. Thats my + // thinking at moment. St.Ack 20121129 void appendMetaBlock(String bloomFilterMetaKey, Writable metaWriter); /** - * Store general Bloom filter in the file. This does not deal with Bloom filter - * internals but is necessary, since Bloom filters are stored differently - * in HFile version 1 and version 2. + * Store general Bloom filter in the file. This does not deal with Bloom filter internals but is + * necessary, since Bloom filters are stored differently in HFile version 1 and version 2. */ void addGeneralBloomFilter(BloomFilterWriter bfw); /** - * Store delete family Bloom filter in the file, which is only supported in - * HFile V2. + * Store delete family Bloom filter in the file, which is only supported in HFile V2. */ void addDeleteFamilyBloomFilter(BloomFilterWriter bfw) throws IOException; @@ -253,8 +246,8 @@ public interface Writer extends Closeable, CellSink, ShipperListener { } /** - * This variety of ways to construct writers is used throughout the code, and - * we want to be able to swap writer implementations. + * This variety of ways to construct writers is used throughout the code, and we want to be able + * to swap writer implementations. */ public static class WriterFactory { protected final Configuration conf; @@ -301,11 +294,9 @@ public WriterFactory withShouldDropCacheBehind(boolean shouldDropBehind) { return this; } - public Writer create() throws IOException { if ((path != null ? 1 : 0) + (ostream != null ? 1 : 0) != 1) { - throw new AssertionError("Please specify exactly one of " + - "filesystem/path or path"); + throw new AssertionError("Please specify exactly one of " + "filesystem/path or path"); } if (path != null) { ostream = HFileWriterImpl.createOutputStream(conf, fs, path, favoredNodes); @@ -330,39 +321,36 @@ public static int getFormatVersion(Configuration conf) { } /** - * Returns the factory to be used to create {@link HFile} writers. - * Disables block cache access for all writers created through the - * returned factory. + * Returns the factory to be used to create {@link HFile} writers. Disables block cache access for + * all writers created through the returned factory. */ - public static final WriterFactory getWriterFactoryNoCache(Configuration - conf) { + public static final WriterFactory getWriterFactoryNoCache(Configuration conf) { return HFile.getWriterFactory(conf, CacheConfig.DISABLED); } /** * Returns the factory to be used to create {@link HFile} writers */ - public static final WriterFactory getWriterFactory(Configuration conf, - CacheConfig cacheConf) { + public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) { int version = getFormatVersion(conf); switch (version) { case 2: - throw new IllegalArgumentException("This should never happen. " + - "Did you change hfile.format.version to read v2? This version of the software writes v3" + - " hfiles only (but it can read v2 files without having to update hfile.format.version " + - "in hbase-site.xml)"); + throw new IllegalArgumentException("This should never happen. " + + "Did you change hfile.format.version to read v2? This version of the software writes v3" + + " hfiles only (but it can read v2 files without having to update hfile.format.version " + + "in hbase-site.xml)"); case 3: return new HFile.WriterFactory(conf, cacheConf); default: - throw new IllegalArgumentException("Cannot create writer for HFile " + - "format version " + version); + throw new IllegalArgumentException( + "Cannot create writer for HFile " + "format version " + version); } } /** - * An abstraction used by the block index. - * Implementations will check cache for any asked-for block and return cached block if found. - * Otherwise, after reading from fs, will try and put block into cache before returning. + * An abstraction used by the block index. Implementations will check cache for any asked-for + * block and return cached block if found. Otherwise, after reading from fs, will try and put + * block into cache before returning. */ public interface CachingBlockReader { /** @@ -370,35 +358,33 @@ public interface CachingBlockReader { * @param offset offset to read. * @param onDiskBlockSize size of the block * @param isCompaction is this block being read as part of a compaction - * @param expectedBlockType the block type we are expecting to read with this read operation, - * or null to read whatever block type is available and avoid checking (that might reduce - * caching efficiency of encoded data blocks) + * @param expectedBlockType the block type we are expecting to read with this read operation, or + * null to read whatever block type is available and avoid checking (that might reduce + * caching efficiency of encoded data blocks) * @param expectedDataBlockEncoding the data block encoding the caller is expecting data blocks - * to be in, or null to not perform this check and return the block irrespective of the - * encoding. This check only applies to data blocks and can be set to null when the caller is - * expecting to read a non-data block and has set expectedBlockType accordingly. + * to be in, or null to not perform this check and return the block irrespective of the + * encoding. This check only applies to data blocks and can be set to null when the + * caller is expecting to read a non-data block and has set expectedBlockType + * accordingly. * @return Block wrapped in a ByteBuffer. */ - HFileBlock readBlock(long offset, long onDiskBlockSize, - boolean cacheBlock, final boolean pread, final boolean isCompaction, - final boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) - throws IOException; + HFileBlock readBlock(long offset, long onDiskBlockSize, boolean cacheBlock, final boolean pread, + final boolean isCompaction, final boolean updateCacheMetrics, BlockType expectedBlockType, + DataBlockEncoding expectedDataBlockEncoding) throws IOException; } /** An interface used by clients to open and iterate an {@link HFile}. */ public interface Reader extends Closeable, CachingBlockReader { /** - * Returns this reader's "name". Usually the last component of the path. - * Needs to be constant as the file is being moved to support caching on - * write. + * Returns this reader's "name". Usually the last component of the path. Needs to be constant as + * the file is being moved to support caching on write. */ String getName(); CellComparator getComparator(); HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread, - boolean isCompaction); + boolean isCompaction); HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws IOException; @@ -421,23 +407,23 @@ HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread, FixedFileTrailer getTrailer(); void setDataBlockIndexReader(HFileBlockIndex.CellBasedKeyBlockIndexReader reader); + HFileBlockIndex.CellBasedKeyBlockIndexReader getDataBlockIndexReader(); void setMetaBlockIndexReader(HFileBlockIndex.ByteArrayKeyBlockIndexReader reader); + HFileBlockIndex.ByteArrayKeyBlockIndexReader getMetaBlockIndexReader(); HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread); /** - * Retrieves general Bloom filter metadata as appropriate for each - * {@link HFile} version. - * Knows nothing about how that metadata is structured. + * Retrieves general Bloom filter metadata as appropriate for each {@link HFile} version. Knows + * nothing about how that metadata is structured. */ DataInput getGeneralBloomFilterMetadata() throws IOException; /** - * Retrieves delete family Bloom filter metadata as appropriate for each - * {@link HFile} version. + * Retrieves delete family Bloom filter metadata as appropriate for each {@link HFile} version. * Knows nothing about how that metadata is structured. */ DataInput getDeleteBloomFilterMetadata() throws IOException; @@ -471,14 +457,15 @@ HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread, void unbufferStream(); ReaderContext getContext(); + HFileInfo getHFileInfo(); + void setDataBlockEncoder(HFileDataBlockEncoder dataBlockEncoder); } /** - * Method returns the reader given the specified arguments. - * TODO This is a bad abstraction. See HBASE-6635. - * + * Method returns the reader given the specified arguments. TODO This is a bad abstraction. See + * HBASE-6635. * @param context Reader context info * @param fileInfo HFile info * @param cacheConf Cache configuation values, cannot be null. @@ -486,8 +473,8 @@ HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread, * @return an appropriate instance of HFileReader * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SF_SWITCH_FALLTHROUGH", + justification = "Intentional") public static Reader createReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, Configuration conf) throws IOException { try { @@ -508,8 +495,8 @@ public static Reader createReader(ReaderContext context, HFileInfo fileInfo, } catch (Throwable t) { IOUtils.closeQuietly(context.getInputStreamWrapper(), e -> LOG.warn("failed to close input stream wrapper", e)); - throw new CorruptHFileException("Problem reading HFile Trailer from file " - + context.getFilePath(), t); + throw new CorruptHFileException( + "Problem reading HFile Trailer from file " + context.getFilePath(), t); } finally { context.getInputStreamWrapper().unbuffer(); } @@ -521,8 +508,8 @@ public static Reader createReader(ReaderContext context, HFileInfo fileInfo, * @param path Path to file to read * @param conf Configuration * @return an active Reader instance - * @throws IOException Will throw a CorruptHFileException - * (DoNotRetryIOException subtype) if hfile is corrupt/invalid. + * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile + * is corrupt/invalid. */ public static Reader createReader(FileSystem fs, Path path, Configuration conf) throws IOException { @@ -546,14 +533,10 @@ public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheCon boolean primaryReplicaReader, Configuration conf) throws IOException { Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf"); FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path); - ReaderContext context = new ReaderContextBuilder() - .withFilePath(path) - .withInputStreamWrapper(stream) - .withFileSize(fs.getFileStatus(path).getLen()) - .withFileSystem(stream.getHfs()) - .withPrimaryReplicaReader(primaryReplicaReader) - .withReaderType(ReaderType.PREAD) - .build(); + ReaderContext context = new ReaderContextBuilder().withFilePath(path) + .withInputStreamWrapper(stream).withFileSize(fs.getFileStatus(path).getLen()) + .withFileSystem(stream.getHfs()).withPrimaryReplicaReader(primaryReplicaReader) + .withReaderType(ReaderType.PREAD).build(); HFileInfo fileInfo = new HFileInfo(context, conf); Reader reader = createReader(context, fileInfo, cacheConf, conf); fileInfo.initMetaAndIndex(reader); @@ -593,12 +576,9 @@ public static boolean isHFileFormat(final FileSystem fs, final FileStatus fileSt } /** - * Get names of supported compression algorithms. The names are acceptable by - * HFile.Writer. - * - * @return Array of strings, each represents a supported compression - * algorithm. Currently, the following compression algorithms are - * supported. + * Get names of supported compression algorithms. The names are acceptable by HFile.Writer. + * @return Array of strings, each represents a supported compression algorithm. Currently, the + * following compression algorithms are supported. *

          *
        • "none" - No compression. *
        • "gz" - GZIP compression. @@ -616,29 +596,26 @@ public static String[] getSupportedCompressionAlgorithms() { static int longToInt(final long l) { // Expecting the size() of a block not exceeding 4GB. Assuming the // size() will wrap to negative integer if it exceeds 2GB (From tfile). - return (int)(l & 0x00000000ffffffffL); + return (int) (l & 0x00000000ffffffffL); } /** - * Returns all HFiles belonging to the given region directory. Could return an - * empty list. - * - * @param fs The file system reference. - * @param regionDir The region directory to scan. + * Returns all HFiles belonging to the given region directory. Could return an empty list. + * @param fs The file system reference. + * @param regionDir The region directory to scan. * @return The list of files found. * @throws IOException When scanning the files fails. */ - public static List getStoreFiles(FileSystem fs, Path regionDir) - throws IOException { + public static List getStoreFiles(FileSystem fs, Path regionDir) throws IOException { List regionHFiles = new ArrayList<>(); PathFilter dirFilter = new FSUtils.DirFilter(fs); FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter); - for(FileStatus dir : familyDirs) { + for (FileStatus dir : familyDirs) { FileStatus[] files = fs.listStatus(dir.getPath()); for (FileStatus file : files) { - if (!file.isDirectory() && - (!file.getPath().toString().contains(HConstants.HREGION_OLDLOGDIR_NAME)) && - (!file.getPath().toString().contains(HConstants.RECOVERED_EDITS_DIR))) { + if (!file.isDirectory() + && (!file.getPath().toString().contains(HConstants.HREGION_OLDLOGDIR_NAME)) + && (!file.getPath().toString().contains(HConstants.RECOVERED_EDITS_DIR))) { regionHFiles.add(file.getPath()); } } @@ -647,32 +624,28 @@ public static List getStoreFiles(FileSystem fs, Path regionDir) } /** - * Checks the given {@link HFile} format version, and throws an exception if - * invalid. Note that if the version number comes from an input file and has - * not been verified, the caller needs to re-throw an {@link IOException} to - * indicate that this is not a software error, but corrupted input. - * + * Checks the given {@link HFile} format version, and throws an exception if invalid. Note that if + * the version number comes from an input file and has not been verified, the caller needs to + * re-throw an {@link IOException} to indicate that this is not a software error, but corrupted + * input. * @param version an HFile version * @throws IllegalArgumentException if the version is invalid */ - public static void checkFormatVersion(int version) - throws IllegalArgumentException { + public static void checkFormatVersion(int version) throws IllegalArgumentException { if (version < MIN_FORMAT_VERSION || version > MAX_FORMAT_VERSION) { - throw new IllegalArgumentException("Invalid HFile version: " + version - + " (expected to be " + "between " + MIN_FORMAT_VERSION + " and " - + MAX_FORMAT_VERSION + ")"); + throw new IllegalArgumentException("Invalid HFile version: " + version + " (expected to be " + + "between " + MIN_FORMAT_VERSION + " and " + MAX_FORMAT_VERSION + ")"); } } - public static void checkHFileVersion(final Configuration c) { int version = c.getInt(FORMAT_VERSION_KEY, MAX_FORMAT_VERSION); if (version < MAX_FORMAT_VERSION || version > MAX_FORMAT_VERSION) { - throw new IllegalArgumentException("The setting for " + FORMAT_VERSION_KEY + - " (in your hbase-*.xml files) is " + version + " which does not match " + - MAX_FORMAT_VERSION + - "; are you running with a configuration from an older or newer hbase install (an " + - "incompatible hbase-default.xml or hbase-site.xml on your CLASSPATH)?"); + throw new IllegalArgumentException( + "The setting for " + FORMAT_VERSION_KEY + " (in your hbase-*.xml files) is " + version + + " which does not match " + MAX_FORMAT_VERSION + + "; are you running with a configuration from an older or newer hbase install (an " + + "incompatible hbase-default.xml or hbase-site.xml on your CLASSPATH)?"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 7c7fa4ef8c36..6c553e9be95c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.hfile; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP; + import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; @@ -28,7 +29,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -62,56 +62,51 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Cacheable Blocks of an {@link HFile} version 2 file. - * Version 2 was introduced in hbase-0.92.0. - * - *

          Version 1 was the original file block. Version 2 was introduced when we changed the hbase file - * format to support multi-level block indexes and compound bloom filters (HBASE-3857). Support - * for Version 1 was removed in hbase-1.3.0. - * - *

          HFileBlock: Version 2

          - * In version 2, a block is structured as follows: + * Cacheable Blocks of an {@link HFile} version 2 file. Version 2 was introduced in hbase-0.92.0. + *

          + * Version 1 was the original file block. Version 2 was introduced when we changed the hbase file + * format to support multi-level block indexes and compound bloom filters (HBASE-3857). Support for + * Version 1 was removed in hbase-1.3.0. + *

          HFileBlock: Version 2

          In version 2, a block is structured as follows: *
            *
          • Header: See Writer#putHeader() for where header is written; header total size is * HFILEBLOCK_HEADER_SIZE *
              - *
            • 0. blockType: Magic record identifying the {@link BlockType} (8 bytes): - * e.g. DATABLK* + *
            • 0. blockType: Magic record identifying the {@link BlockType} (8 bytes): e.g. + * DATABLK* *
            • 1. onDiskSizeWithoutHeader: Compressed -- a.k.a 'on disk' -- block size, excluding header, * but including tailing checksum bytes (4 bytes) *
            • 2. uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and excluding * checksum bytes (4 bytes) - *
            • 3. prevBlockOffset: The offset of the previous block of the same type (8 bytes). This is - * used to navigate to the previous block without having to go to the block index + *
            • 3. prevBlockOffset: The offset of the previous block of the same type (8 bytes). This is used + * to navigate to the previous block without having to go to the block index *
            • 4: For minorVersions >=1, the ordinal describing checksum type (1 byte) *
            • 5: For minorVersions >=1, the number of data bytes/checksum chunk (4 bytes) *
            • 6: onDiskDataSizeWithHeader: For minorVersions >=1, the size of data 'on disk', including * header, excluding checksums (4 bytes) *
            *
          • - *
          • Raw/Compressed/Encrypted/Encoded data: The compression - * algorithm is the same for all the blocks in an {@link HFile}. If compression is NONE, this is - * just raw, serialized Cells. - *
          • Tail: For minorVersions >=1, a series of 4 byte checksums, one each for - * the number of bytes specified by bytesPerChecksum. + *
          • Raw/Compressed/Encrypted/Encoded data: The compression algorithm is the same for all + * the blocks in an {@link HFile}. If compression is NONE, this is just raw, serialized Cells. + *
          • Tail: For minorVersions >=1, a series of 4 byte checksums, one each for the number + * of bytes specified by bytesPerChecksum. *
          - * - *

          Caching

          - * Caches cache whole blocks with trailing checksums if any. We then tag on some metadata, the - * content of BLOCK_METADATA_SPACE which will be flag on if we are doing 'hbase' - * checksums and then the offset into the file which is needed when we re-make a cache key - * when we return the block to the cache as 'done'. - * See {@link Cacheable#serialize(ByteBuffer, boolean)} and {@link Cacheable#getDeserializer()}. - * - *

          TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where - * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the - * only place we get blocks to cache. We also will cache the raw return from an hdfs read. In this - * case, the checksums may be present. If the cache is backed by something that doesn't do ECC, - * say an SSD, we might want to preserve checksums. For now this is open question. - *

          TODO: Over in BucketCache, we save a block allocation by doing a custom serialization. - * Be sure to change it if serialization changes in here. Could we add a method here that takes an - * IOEngine and that then serializes to it rather than expose our internals over in BucketCache? - * IOEngine is in the bucket subpackage. Pull it up? Then this class knows about bucketcache. Ugh. + *

          Caching

          Caches cache whole blocks with trailing checksums if any. We then tag on some + * metadata, the content of BLOCK_METADATA_SPACE which will be flag on if we are doing 'hbase' + * checksums and then the offset into the file which is needed when we re-make a cache key when we + * return the block to the cache as 'done'. See {@link Cacheable#serialize(ByteBuffer, boolean)} and + * {@link Cacheable#getDeserializer()}. + *

          + * TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where we make + * a block to cache-on-write, there is an attempt at turning off checksums. This is not the only + * place we get blocks to cache. We also will cache the raw return from an hdfs read. In this case, + * the checksums may be present. If the cache is backed by something that doesn't do ECC, say an + * SSD, we might want to preserve checksums. For now this is open question. + *

          + * TODO: Over in BucketCache, we save a block allocation by doing a custom serialization. Be sure to + * change it if serialization changes in here. Could we add a method here that takes an IOEngine and + * that then serializes to it rather than expose our internals over in BucketCache? IOEngine is in + * the bucket subpackage. Pull it up? Then this class knows about bucketcache. Ugh. */ @InterfaceAudience.Private public class HFileBlock implements Cacheable { @@ -162,48 +157,47 @@ static class Header { private long prevBlockOffset; /** - * Size on disk of header + data. Excludes checksum. Header field 6, - * OR calculated from {@link #onDiskSizeWithoutHeader} when using HDFS checksum. + * Size on disk of header + data. Excludes checksum. Header field 6, OR calculated from + * {@link #onDiskSizeWithoutHeader} when using HDFS checksum. * @see Writer#putHeader(byte[], int, int, int, int) */ private int onDiskDataSizeWithHeader; // End of Block Header fields. /** - * The in-memory representation of the hfile block. Can be on or offheap. Can be backed by - * a single ByteBuffer or by many. Make no assumptions. - * - *

          Be careful reading from this buf. Duplicate and work on the duplicate or if - * not, be sure to reset position and limit else trouble down the road. - * - *

          TODO: Make this read-only once made. - * - *

          We are using the ByteBuff type. ByteBuffer is not extensible yet we need to be able to have - * a ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. - * So, we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be - * good if could be confined to cache-use only but hard-to-do. + * The in-memory representation of the hfile block. Can be on or offheap. Can be backed by a + * single ByteBuffer or by many. Make no assumptions. + *

          + * Be careful reading from this buf. Duplicate and work on the duplicate or if not, + * be sure to reset position and limit else trouble down the road. + *

          + * TODO: Make this read-only once made. + *

          + * We are using the ByteBuff type. ByteBuffer is not extensible yet we need to be able to have a + * ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. So, + * we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be good if + * could be confined to cache-use only but hard-to-do. */ private ByteBuff buf; - /** Meta data that holds meta information on the hfileblock. + /** + * Meta data that holds meta information on the hfileblock. */ private HFileContext fileContext; /** - * The offset of this block in the file. Populated by the reader for - * convenience of access. This offset is not part of the block header. + * The offset of this block in the file. Populated by the reader for convenience of access. This + * offset is not part of the block header. */ private long offset = UNSET; /** - * The on-disk size of the next block, including the header and checksums if present. - * UNSET if unknown. - * - * Blocks try to carry the size of the next block to read in this data member. Usually - * we get block sizes from the hfile index but sometimes the index is not available: - * e.g. when we read the indexes themselves (indexes are stored in blocks, we do not - * have an index for the indexes). Saves seeks especially around file open when - * there is a flurry of reading in hfile metadata. + * The on-disk size of the next block, including the header and checksums if present. UNSET if + * unknown. Blocks try to carry the size of the next block to read in this data member. Usually we + * get block sizes from the hfile index but sometimes the index is not available: e.g. when we + * read the indexes themselves (indexes are stored in blocks, we do not have an index for the + * indexes). Saves seeks especially around file open when there is a flurry of reading in hfile + * metadata. */ private int nextBlockOnDiskSize = UNSET; @@ -221,22 +215,21 @@ static class Header { // How to get the estimate correctly? if it is a singleBB? public static final int MULTI_BYTE_BUFFER_HEAP_SIZE = - (int)ClassSize.estimateBase(MultiByteBuff.class, false); + (int) ClassSize.estimateBase(MultiByteBuff.class, false); /** - * Space for metadata on a block that gets stored along with the block when we cache it. - * There are a few bytes stuck on the end of the HFileBlock that we pull in from HDFS. - * 8 bytes are for the offset of this block (long) in the file. Offset is important because is is - * used when we remake the CacheKey when we return block to the cache when done. There is also - * a flag on whether checksumming is being done by hbase or not. See class comment for note on - * uncertain state of checksumming of blocks that come out of cache (should we or should we not?). - * Finally there are 4 bytes to hold the length of the next block which can save a seek on - * occasion if available. - * (This EXTRA info came in with original commit of the bucketcache, HBASE-7404. It was - * formerly known as EXTRA_SERIALIZATION_SPACE). + * Space for metadata on a block that gets stored along with the block when we cache it. There are + * a few bytes stuck on the end of the HFileBlock that we pull in from HDFS. 8 bytes are for the + * offset of this block (long) in the file. Offset is important because is is used when we remake + * the CacheKey when we return block to the cache when done. There is also a flag on whether + * checksumming is being done by hbase or not. See class comment for note on uncertain state of + * checksumming of blocks that come out of cache (should we or should we not?). Finally there are + * 4 bytes to hold the length of the next block which can save a seek on occasion if available. + * (This EXTRA info came in with original commit of the bucketcache, HBASE-7404. It was formerly + * known as EXTRA_SERIALIZATION_SPACE). */ public static final int BLOCK_METADATA_SPACE = - Bytes.SIZEOF_BYTE + Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT; + Bytes.SIZEOF_BYTE + Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT; /** * Each checksum value is an integer that can be stored in 4 bytes. @@ -247,9 +240,7 @@ static class Header { new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM]; /** - * Used deserializing blocks from Cache. - * - * + * Used deserializing blocks from Cache. * ++++++++++++++ * + HFileBlock + * ++++++++++++++ @@ -267,8 +258,7 @@ private BlockDeserializer() { } @Override - public HFileBlock deserialize(ByteBuff buf, ByteBuffAllocator alloc) - throws IOException { + public HFileBlock deserialize(ByteBuff buf, ByteBuffAllocator alloc) throws IOException { // The buf has the file block followed by block metadata. // Set limit to just before the BLOCK_METADATA_SPACE then rewind. buf.limit(buf.limit() - BLOCK_METADATA_SPACE).rewind(); @@ -296,15 +286,16 @@ public int getDeserializerIdentifier() { } /** - * Creates a new {@link HFile} block from the given fields. This constructor - * is used only while writing blocks and caching, - * and is sitting in a byte buffer and we want to stuff the block into cache. - * - *

          TODO: The caller presumes no checksumming - *

          TODO: HFile block writer can also off-heap ?

          - * required of this block instance since going into cache; checksum already verified on - * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? - * + * Creates a new {@link HFile} block from the given fields. This constructor is used only while + * writing blocks and caching, and is sitting in a byte buffer and we want to stuff the block into + * cache. + *

          + * TODO: The caller presumes no checksumming + *

          + * TODO: HFile block writer can also off-heap ? + *

          + * required of this block instance since going into cache; checksum already verified on underlying + * block data pulled in from filesystem. Is that correct? What if cache is SSD? * @param blockType the type of this block, see {@link BlockType} * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} @@ -336,10 +327,9 @@ public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, } /** - * Creates a block from an existing buffer starting with a header. Rewinds - * and takes ownership of the buffer. By definition of rewind, ignores the - * buffer position, but if you slice the buffer beforehand, it will rewind - * to that point. + * Creates a block from an existing buffer starting with a header. Rewinds and takes ownership of + * the buffer. By definition of rewind, ignores the buffer position, but if you slice the buffer + * beforehand, it will rewind to that point. * @param buf Has header, content, and trailing checksums if present. */ static HFileBlock createFromBuff(ByteBuff buf, boolean usesHBaseChecksum, final long offset, @@ -353,8 +343,8 @@ static HFileBlock createFromBuff(ByteBuff buf, boolean usesHBaseChecksum, final final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); // This constructor is called when we deserialize a block from cache and when we read a block in // from the fs. fileCache is null when deserialized from cache so need to make up one. - HFileContextBuilder fileContextBuilder = fileContext != null ? - new HFileContextBuilder(fileContext) : new HFileContextBuilder(); + HFileContextBuilder fileContextBuilder = + fileContext != null ? new HFileContextBuilder(fileContext) : new HFileContextBuilder(); fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); int onDiskDataSizeWithHeader; if (usesHBaseChecksum) { @@ -372,18 +362,13 @@ static HFileBlock createFromBuff(ByteBuff buf, boolean usesHBaseChecksum, final } fileContext = fileContextBuilder.build(); assert usesHBaseChecksum == fileContext.isUseHBaseChecksum(); - return new HFileBlockBuilder() - .withBlockType(blockType) + return new HFileBlockBuilder().withBlockType(blockType) .withOnDiskSizeWithoutHeader(onDiskSizeWithoutHeader) .withUncompressedSizeWithoutHeader(uncompressedSizeWithoutHeader) - .withPrevBlockOffset(prevBlockOffset) - .withOffset(offset) + .withPrevBlockOffset(prevBlockOffset).withOffset(offset) .withOnDiskDataSizeWithHeader(onDiskDataSizeWithHeader) - .withNextBlockOnDiskSize(nextBlockOnDiskSize) - .withHFileContext(fileContext) - .withByteBuffAllocator(allocator) - .withByteBuff(buf.rewind()) - .withShared(!buf.hasArray()) + .withNextBlockOnDiskSize(nextBlockOnDiskSize).withHFileContext(fileContext) + .withByteBuffAllocator(allocator).withByteBuff(buf.rewind()).withShared(!buf.hasArray()) .build(); } @@ -393,15 +378,14 @@ static HFileBlock createFromBuff(ByteBuff buf, boolean usesHBaseChecksum, final * @param verifyChecksum true if checksum verification is in use. * @return Size of the block with header included. */ - private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf, - boolean verifyChecksum) { + private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf, boolean verifyChecksum) { return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + headerSize(verifyChecksum); } /** * @return the on-disk size of the next block (including the header size and any checksums if - * present) read by peeking into the next block's header; use as a hint when doing - * a read of the next block when scanning or running over a file. + * present) read by peeking into the next block's header; use as a hint when doing a read + * of the next block when scanning or running over a file. */ int getNextBlockOnDiskSize() { return nextBlockOnDiskSize; @@ -435,8 +419,8 @@ public boolean release() { /** @return get data block encoding id that was used to encode this block */ short getDataBlockEncodingId() { if (blockType != BlockType.ENCODED_DATA) { - throw new IllegalArgumentException("Querying encoder ID of a block " + - "of type other than " + BlockType.ENCODED_DATA + ": " + blockType); + throw new IllegalArgumentException("Querying encoder ID of a block " + "of type other than " + + BlockType.ENCODED_DATA + ": " + blockType); } return buf.getShort(headerSize()); } @@ -463,16 +447,15 @@ int getUncompressedSizeWithoutHeader() { } /** - * @return the offset of the previous block of the same type in the file, or - * -1 if unknown + * @return the offset of the previous block of the same type in the file, or -1 if unknown */ long getPrevBlockOffset() { return prevBlockOffset; } /** - * Rewinds {@code buf} and writes first 4 header fields. {@code buf} position - * is modified as side-effect. + * Rewinds {@code buf} and writes first 4 header fields. {@code buf} position is modified as + * side-effect. */ private void overwriteHeader() { buf.rewind(); @@ -510,10 +493,9 @@ public ByteBuff getBufferWithoutHeader(boolean withChecksum) { * Returns a read-only duplicate of the buffer this block stores internally ready to be read. * Clients must not modify the buffer object though they may set position and limit on the * returned buffer since we pass back a duplicate. This method has to be public because it is used - * in {@link CompoundBloomFilter} to avoid object creation on every Bloom - * filter lookup, but has to be used with caution. Buffer holds header, block content, - * and any follow-on checksums if present. - * + * in {@link CompoundBloomFilter} to avoid object creation on every Bloom filter lookup, but has + * to be used with caution. Buffer holds header, block content, and any follow-on checksums if + * present. * @return the buffer of this block for read-only operations */ public ByteBuff getBufferReadOnly() { @@ -527,8 +509,8 @@ public ByteBuffAllocator getByteBuffAllocator() { return this.allocator; } - private void sanityCheckAssertion(long valueFromBuf, long valueFromField, - String fieldName) throws IOException { + private void sanityCheckAssertion(long valueFromBuf, long valueFromField, String fieldName) + throws IOException { if (valueFromBuf != valueFromField) { throw new AssertionError(fieldName + " in the buffer (" + valueFromBuf + ") is different from that in the field (" + valueFromField + ")"); @@ -538,18 +520,17 @@ private void sanityCheckAssertion(long valueFromBuf, long valueFromField, private void sanityCheckAssertion(BlockType valueFromBuf, BlockType valueFromField) throws IOException { if (valueFromBuf != valueFromField) { - throw new IOException("Block type stored in the buffer: " + - valueFromBuf + ", block type field: " + valueFromField); + throw new IOException("Block type stored in the buffer: " + valueFromBuf + + ", block type field: " + valueFromField); } } /** * Checks if the block is internally consistent, i.e. the first - * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the buffer contain a - * valid header consistent with the fields. Assumes a packed block structure. - * This function is primary for testing and debugging, and is not - * thread-safe, because it alters the internal buffer pointer. - * Used by tests only. + * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the buffer contain a valid header consistent + * with the fields. Assumes a packed block structure. This function is primary for testing and + * debugging, and is not thread-safe, because it alters the internal buffer pointer. Used by tests + * only. */ void sanityCheck() throws IOException { // Duplicate so no side-effects @@ -559,13 +540,13 @@ void sanityCheck() throws IOException { sanityCheckAssertion(dup.getInt(), onDiskSizeWithoutHeader, "onDiskSizeWithoutHeader"); sanityCheckAssertion(dup.getInt(), uncompressedSizeWithoutHeader, - "uncompressedSizeWithoutHeader"); + "uncompressedSizeWithoutHeader"); sanityCheckAssertion(dup.getLong(), prevBlockOffset, "prevBlockOffset"); if (this.fileContext.isUseHBaseChecksum()) { sanityCheckAssertion(dup.get(), this.fileContext.getChecksumType().getCode(), "checksumType"); sanityCheckAssertion(dup.getInt(), this.fileContext.getBytesPerChecksum(), - "bytesPerChecksum"); + "bytesPerChecksum"); sanityCheckAssertion(dup.getInt(), onDiskDataSizeWithHeader, "onDiskDataSizeWithHeader"); } @@ -580,50 +561,44 @@ void sanityCheck() throws IOException { int hdrSize = headerSize(); dup.rewind(); if (dup.remaining() != expectedBufLimit && dup.remaining() != expectedBufLimit + hdrSize) { - throw new AssertionError("Invalid buffer capacity: " + dup.remaining() + - ", expected " + expectedBufLimit + " or " + (expectedBufLimit + hdrSize)); + throw new AssertionError("Invalid buffer capacity: " + dup.remaining() + ", expected " + + expectedBufLimit + " or " + (expectedBufLimit + hdrSize)); } } @Override public String toString() { - StringBuilder sb = new StringBuilder() - .append("[") - .append("blockType=").append(blockType) - .append(", fileOffset=").append(offset) - .append(", headerSize=").append(headerSize()) - .append(", onDiskSizeWithoutHeader=").append(onDiskSizeWithoutHeader) - .append(", uncompressedSizeWithoutHeader=").append(uncompressedSizeWithoutHeader) - .append(", prevBlockOffset=").append(prevBlockOffset) - .append(", isUseHBaseChecksum=").append(fileContext.isUseHBaseChecksum()); + StringBuilder sb = new StringBuilder().append("[").append("blockType=").append(blockType) + .append(", fileOffset=").append(offset).append(", headerSize=").append(headerSize()) + .append(", onDiskSizeWithoutHeader=").append(onDiskSizeWithoutHeader) + .append(", uncompressedSizeWithoutHeader=").append(uncompressedSizeWithoutHeader) + .append(", prevBlockOffset=").append(prevBlockOffset).append(", isUseHBaseChecksum=") + .append(fileContext.isUseHBaseChecksum()); if (fileContext.isUseHBaseChecksum()) { sb.append(", checksumType=").append(ChecksumType.codeToType(this.buf.get(24))) - .append(", bytesPerChecksum=").append(this.buf.getInt(24 + 1)) - .append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader); + .append(", bytesPerChecksum=").append(this.buf.getInt(24 + 1)) + .append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader); } else { - sb.append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader) - .append("(").append(onDiskSizeWithoutHeader) - .append("+").append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")"); + sb.append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader).append("(") + .append(onDiskSizeWithoutHeader).append("+") + .append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")"); } String dataBegin; if (buf.hasArray()) { dataBegin = Bytes.toStringBinary(buf.array(), buf.arrayOffset() + headerSize(), - Math.min(32, buf.limit() - buf.arrayOffset() - headerSize())); + Math.min(32, buf.limit() - buf.arrayOffset() - headerSize())); } else { ByteBuff bufWithoutHeader = getBufferWithoutHeader(); - byte[] dataBeginBytes = new byte[Math.min(32, - bufWithoutHeader.limit() - bufWithoutHeader.position())]; + byte[] dataBeginBytes = + new byte[Math.min(32, bufWithoutHeader.limit() - bufWithoutHeader.position())]; bufWithoutHeader.get(dataBeginBytes); dataBegin = Bytes.toStringBinary(dataBeginBytes); } sb.append(", getOnDiskSizeWithHeader=").append(getOnDiskSizeWithHeader()) - .append(", totalChecksumBytes=").append(totalChecksumBytes()) - .append(", isUnpacked=").append(isUnpacked()) - .append(", buf=[").append(buf).append("]") - .append(", dataBeginsWith=").append(dataBegin) - .append(", fileContext=").append(fileContext) - .append(", nextBlockOnDiskSize=").append(nextBlockOnDiskSize) - .append("]"); + .append(", totalChecksumBytes=").append(totalChecksumBytes()).append(", isUnpacked=") + .append(isUnpacked()).append(", buf=[").append(buf).append("]").append(", dataBeginsWith=") + .append(dataBegin).append(", fileContext=").append(fileContext) + .append(", nextBlockOnDiskSize=").append(nextBlockOnDiskSize).append("]"); return sb.toString(); } @@ -643,8 +618,9 @@ HFileBlock unpack(HFileContext fileContext, FSReader reader) throws IOException unpacked.allocateBuffer(); // allocates space for the decompressed block boolean succ = false; try { - HFileBlockDecodingContext ctx = blockType == BlockType.ENCODED_DATA - ? reader.getBlockDecodingContext() : reader.getDefaultBlockDecodingContext(); + HFileBlockDecodingContext ctx = + blockType == BlockType.ENCODED_DATA ? reader.getBlockDecodingContext() + : reader.getDefaultBlockDecodingContext(); // Create a duplicated buffer without the header part. ByteBuff dup = this.buf.duplicate(); dup.position(this.headerSize()); @@ -662,9 +638,8 @@ HFileBlock unpack(HFileContext fileContext, FSReader reader) throws IOException } /** - * Always allocates a new buffer of the correct size. Copies header bytes - * from the existing buffer. Does not change header fields. - * Reserve room to keep checksum bytes too. + * Always allocates a new buffer of the correct size. Copies header bytes from the existing + * buffer. Does not change header fields. Reserve room to keep checksum bytes too. */ private void allocateBuffer() { int cksumBytes = totalChecksumBytes(); @@ -740,23 +715,20 @@ public boolean isSharedMem() { } /** - * Unified version 2 {@link HFile} block writer. The intended usage pattern - * is as follows: + * Unified version 2 {@link HFile} block writer. The intended usage pattern is as follows: *
            *
          1. Construct an {@link HFileBlock.Writer}, providing a compression algorithm. *
          2. Call {@link Writer#startWriting} and get a data stream to write to. *
          3. Write your data into the stream. - *
          4. Call Writer#writeHeaderAndData(FSDataOutputStream) as many times as you need to. - * store the serialized block into an external stream. + *
          5. Call Writer#writeHeaderAndData(FSDataOutputStream) as many times as you need to. store the + * serialized block into an external stream. *
          6. Repeat to write more blocks. *
          *

          */ static class Writer implements ShipperListener { private enum State { - INIT, - WRITING, - BLOCK_READY + INIT, WRITING, BLOCK_READY } /** Writer state. Used to ensure the correct usage protocol. */ @@ -767,42 +739,39 @@ private enum State { private HFileBlockEncodingContext dataBlockEncodingCtx; - /** block encoding context for non-data blocks*/ + /** block encoding context for non-data blocks */ private HFileBlockDefaultEncodingContext defaultBlockEncodingCtx; /** - * The stream we use to accumulate data into a block in an uncompressed format. - * We reset this stream at the end of each block and reuse it. The - * header is written as the first {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes into this - * stream. + * The stream we use to accumulate data into a block in an uncompressed format. We reset this + * stream at the end of each block and reuse it. The header is written as the first + * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes into this stream. */ private ByteArrayOutputStream baosInMemory; /** - * Current block type. Set in {@link #startWriting(BlockType)}. Could be - * changed in {@link #finishBlock()} from {@link BlockType#DATA} - * to {@link BlockType#ENCODED_DATA}. + * Current block type. Set in {@link #startWriting(BlockType)}. Could be changed in + * {@link #finishBlock()} from {@link BlockType#DATA} to {@link BlockType#ENCODED_DATA}. */ private BlockType blockType; /** - * A stream that we write uncompressed bytes to, which compresses them and - * writes them to {@link #baosInMemory}. + * A stream that we write uncompressed bytes to, which compresses them and writes them to + * {@link #baosInMemory}. */ private DataOutputStream userDataStream; /** - * Bytes to be written to the file system, including the header. Compressed - * if compression is turned on. It also includes the checksum data that - * immediately follows the block data. (header + data + checksums) + * Bytes to be written to the file system, including the header. Compressed if compression is + * turned on. It also includes the checksum data that immediately follows the block data. + * (header + data + checksums) */ private ByteArrayOutputStream onDiskBlockBytesWithHeader; /** - * The size of the checksum data on disk. It is used only if data is - * not compressed. If data is compressed, then the checksums are already - * part of onDiskBytesWithHeader. If data is uncompressed, then this - * variable stores the checksum data for this block. + * The size of the checksum data on disk. It is used only if data is not compressed. If data is + * compressed, then the checksums are already part of onDiskBytesWithHeader. If data is + * uncompressed, then this variable stores the checksum data for this block. */ private byte[] onDiskChecksum = HConstants.EMPTY_BYTE_ARRAY; @@ -813,14 +782,13 @@ private enum State { private long startOffset; /** - * Offset of previous block by block type. Updated when the next block is - * started. + * Offset of previous block by block type. Updated when the next block is started. */ private long[] prevOffsetByType; /** The offset of the previous block of the same type */ private long prevOffset; - /** Meta data that holds information about the hfileblock**/ + /** Meta data that holds information about the hfileblock **/ private HFileContext fileContext; private final ByteBuffAllocator allocator; @@ -847,18 +815,18 @@ public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder, public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext, ByteBuffAllocator allocator) { if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) { - throw new RuntimeException("Unsupported value of bytesPerChecksum. " + - " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " + - fileContext.getBytesPerChecksum()); + throw new RuntimeException("Unsupported value of bytesPerChecksum. " + " Minimum is " + + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " + + fileContext.getBytesPerChecksum()); } this.allocator = allocator; - this.dataBlockEncoder = dataBlockEncoder != null? - dataBlockEncoder: NoOpDataBlockEncoder.INSTANCE; + this.dataBlockEncoder = + dataBlockEncoder != null ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE; this.dataBlockEncodingCtx = this.dataBlockEncoder.newDataBlockEncodingContext(conf, HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); // TODO: This should be lazily instantiated this.defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(conf, null, - HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); + HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); // TODO: Set BAOS initial size. Use fileContext.getBlocksize() and add for header/checksum baosInMemory = new ByteArrayOutputStream(); prevOffsetByType = new long[BlockType.values().length]; @@ -872,11 +840,9 @@ public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder, /** * Starts writing into the block. The previous block's data is discarded. - * * @return the stream the user can write their data into */ - DataOutputStream startWriting(BlockType newBlockType) - throws IOException { + DataOutputStream startWriting(BlockType newBlockType) throws IOException { if (state == State.BLOCK_READY && startOffset != -1) { // We had a previous block that was written to a stream at a specific // offset. Save that offset as the last offset of a block of that type. @@ -902,18 +868,17 @@ DataOutputStream startWriting(BlockType newBlockType) /** * Writes the Cell to this block */ - void write(Cell cell) throws IOException{ + void write(Cell cell) throws IOException { expectState(State.WRITING); this.dataBlockEncoder.encode(cell, dataBlockEncodingCtx, this.userDataStream); } /** - * Transitions the block writer from the "writing" state to the "block - * ready" state. Does nothing if a block is already finished. + * Transitions the block writer from the "writing" state to the "block ready" state. Does + * nothing if a block is already finished. */ void ensureBlockReady() throws IOException { - Preconditions.checkState(state != State.INIT, - "Unexpected state: " + state); + Preconditions.checkState(state != State.INIT, "Unexpected state: " + state); if (state == State.BLOCK_READY) { return; @@ -924,15 +889,14 @@ void ensureBlockReady() throws IOException { } /** - * Finish up writing of the block. - * Flushes the compressing stream (if using compression), fills out the header, - * does any compression/encryption of bytes to flush out to disk, and manages + * Finish up writing of the block. Flushes the compressing stream (if using compression), fills + * out the header, does any compression/encryption of bytes to flush out to disk, and manages * the cache on write content, if applicable. Sets block write state to "block ready". */ private void finishBlock() throws IOException { if (blockType == BlockType.DATA) { this.dataBlockEncoder.endBlockEncoding(dataBlockEncodingCtx, userDataStream, - baosInMemory.getBuffer(), blockType); + baosInMemory.getBuffer(), blockType); blockType = dataBlockEncodingCtx.getBlockType(); } userDataStream.flush(); @@ -943,11 +907,11 @@ private void finishBlock() throws IOException { state = State.BLOCK_READY; Bytes compressAndEncryptDat; if (blockType == BlockType.DATA || blockType == BlockType.ENCODED_DATA) { - compressAndEncryptDat = dataBlockEncodingCtx. - compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); + compressAndEncryptDat = dataBlockEncodingCtx.compressAndEncrypt(baosInMemory.getBuffer(), 0, + baosInMemory.size()); } else { - compressAndEncryptDat = defaultBlockEncodingCtx. - compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); + compressAndEncryptDat = defaultBlockEncodingCtx.compressAndEncrypt(baosInMemory.getBuffer(), + 0, baosInMemory.size()); } if (compressAndEncryptDat == null) { compressAndEncryptDat = new Bytes(baosInMemory.getBuffer(), 0, baosInMemory.size()); @@ -957,34 +921,32 @@ private void finishBlock() throws IOException { } onDiskBlockBytesWithHeader.reset(); onDiskBlockBytesWithHeader.write(compressAndEncryptDat.get(), - compressAndEncryptDat.getOffset(), compressAndEncryptDat.getLength()); + compressAndEncryptDat.getOffset(), compressAndEncryptDat.getLength()); // Calculate how many bytes we need for checksum on the tail of the block. - int numBytes = (int) ChecksumUtil.numBytes( - onDiskBlockBytesWithHeader.size(), - fileContext.getBytesPerChecksum()); + int numBytes = (int) ChecksumUtil.numBytes(onDiskBlockBytesWithHeader.size(), + fileContext.getBytesPerChecksum()); // Put the header for the on disk bytes; header currently is unfilled-out - putHeader(onDiskBlockBytesWithHeader, - onDiskBlockBytesWithHeader.size() + numBytes, - baosInMemory.size(), onDiskBlockBytesWithHeader.size()); + putHeader(onDiskBlockBytesWithHeader, onDiskBlockBytesWithHeader.size() + numBytes, + baosInMemory.size(), onDiskBlockBytesWithHeader.size()); if (onDiskChecksum.length != numBytes) { onDiskChecksum = new byte[numBytes]; } - ChecksumUtil.generateChecksums( - onDiskBlockBytesWithHeader.getBuffer(), 0,onDiskBlockBytesWithHeader.size(), - onDiskChecksum, 0, fileContext.getChecksumType(), fileContext.getBytesPerChecksum()); + ChecksumUtil.generateChecksums(onDiskBlockBytesWithHeader.getBuffer(), 0, + onDiskBlockBytesWithHeader.size(), onDiskChecksum, 0, fileContext.getChecksumType(), + fileContext.getBytesPerChecksum()); } /** * Put the header into the given byte array at the given offset. * @param onDiskSize size of the block on disk header + data + checksum - * @param uncompressedSize size of the block after decompression (but - * before optional data block decoding) including header - * @param onDiskDataSize size of the block on disk with header - * and data but not including the checksums + * @param uncompressedSize size of the block after decompression (but before optional data block + * decoding) including header + * @param onDiskDataSize size of the block on disk with header and data but not including the + * checksums */ - private void putHeader(byte[] dest, int offset, int onDiskSize, - int uncompressedSize, int onDiskDataSize) { + private void putHeader(byte[] dest, int offset, int onDiskSize, int uncompressedSize, + int onDiskDataSize) { offset = blockType.put(dest, offset); offset = Bytes.putInt(dest, offset, onDiskSize - HConstants.HFILEBLOCK_HEADER_SIZE); offset = Bytes.putInt(dest, offset, uncompressedSize - HConstants.HFILEBLOCK_HEADER_SIZE); @@ -994,8 +956,8 @@ private void putHeader(byte[] dest, int offset, int onDiskSize, Bytes.putInt(dest, offset, onDiskDataSize); } - private void putHeader(ByteBuff buff, int onDiskSize, - int uncompressedSize, int onDiskDataSize) { + private void putHeader(ByteBuff buff, int onDiskSize, int uncompressedSize, + int onDiskDataSize) { buff.rewind(); blockType.write(buff); buff.putInt(onDiskSize - HConstants.HFILEBLOCK_HEADER_SIZE); @@ -1006,36 +968,33 @@ private void putHeader(ByteBuff buff, int onDiskSize, buff.putInt(onDiskDataSize); } - private void putHeader(ByteArrayOutputStream dest, int onDiskSize, - int uncompressedSize, int onDiskDataSize) { - putHeader(dest.getBuffer(),0, onDiskSize, uncompressedSize, onDiskDataSize); + private void putHeader(ByteArrayOutputStream dest, int onDiskSize, int uncompressedSize, + int onDiskDataSize) { + putHeader(dest.getBuffer(), 0, onDiskSize, uncompressedSize, onDiskDataSize); } /** - * Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records - * the offset of this block so that it can be referenced in the next block - * of the same type. + * Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records the offset of this + * block so that it can be referenced in the next block of the same type. */ void writeHeaderAndData(FSDataOutputStream out) throws IOException { long offset = out.getPos(); if (startOffset != UNSET && offset != startOffset) { throw new IOException("A " + blockType + " block written to a " - + "stream twice, first at offset " + startOffset + ", then at " - + offset); + + "stream twice, first at offset " + startOffset + ", then at " + offset); } startOffset = offset; finishBlockAndWriteHeaderAndData(out); } /** - * Writes the header and the compressed data of this block (or uncompressed - * data when not using compression) into the given stream. Can be called in - * the "writing" state or in the "block ready" state. If called in the - * "writing" state, transitions the writer to the "block ready" state. + * Writes the header and the compressed data of this block (or uncompressed data when not using + * compression) into the given stream. Can be called in the "writing" state or in the "block + * ready" state. If called in the "writing" state, transitions the writer to the "block ready" + * state. * @param out the output stream to write the */ - protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) - throws IOException { + protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) throws IOException { ensureBlockReady(); long startTime = EnvironmentEdgeManager.currentTime(); out.write(onDiskBlockBytesWithHeader.getBuffer(), 0, onDiskBlockBytesWithHeader.size()); @@ -1044,25 +1003,21 @@ protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) } /** - * Returns the header or the compressed data (or uncompressed data when not - * using compression) as a byte array. Can be called in the "writing" state - * or in the "block ready" state. If called in the "writing" state, - * transitions the writer to the "block ready" state. This returns - * the header + data + checksums stored on disk. - * + * Returns the header or the compressed data (or uncompressed data when not using compression) + * as a byte array. Can be called in the "writing" state or in the "block ready" state. If + * called in the "writing" state, transitions the writer to the "block ready" state. This + * returns the header + data + checksums stored on disk. * @return header and data as they would be stored on disk in a byte array */ byte[] getHeaderAndDataForTest() throws IOException { ensureBlockReady(); // This is not very optimal, because we are doing an extra copy. // But this method is used only by unit tests. - byte[] output = - new byte[onDiskBlockBytesWithHeader.size() - + onDiskChecksum.length]; + byte[] output = new byte[onDiskBlockBytesWithHeader.size() + onDiskChecksum.length]; System.arraycopy(onDiskBlockBytesWithHeader.getBuffer(), 0, output, 0, - onDiskBlockBytesWithHeader.size()); - System.arraycopy(onDiskChecksum, 0, output, - onDiskBlockBytesWithHeader.size(), onDiskChecksum.length); + onDiskBlockBytesWithHeader.size()); + System.arraycopy(onDiskChecksum, 0, output, onDiskBlockBytesWithHeader.size(), + onDiskChecksum.length); return output; } @@ -1081,25 +1036,21 @@ void release() { } /** - * Returns the on-disk size of the data portion of the block. This is the - * compressed size if compression is enabled. Can only be called in the - * "block ready" state. Header is not compressed, and its size is not - * included in the return value. - * + * Returns the on-disk size of the data portion of the block. This is the compressed size if + * compression is enabled. Can only be called in the "block ready" state. Header is not + * compressed, and its size is not included in the return value. * @return the on-disk size of the block, not including the header. */ int getOnDiskSizeWithoutHeader() { expectState(State.BLOCK_READY); - return onDiskBlockBytesWithHeader.size() + - onDiskChecksum.length - HConstants.HFILEBLOCK_HEADER_SIZE; + return onDiskBlockBytesWithHeader.size() + onDiskChecksum.length + - HConstants.HFILEBLOCK_HEADER_SIZE; } /** - * Returns the on-disk size of the block. Can only be called in the - * "block ready" state. - * - * @return the on-disk size of the block ready to be written, including the - * header size, the data and the checksum data. + * Returns the on-disk size of the block. Can only be called in the "block ready" state. + * @return the on-disk size of the block ready to be written, including the header size, the + * data and the checksum data. */ int getOnDiskSizeWithHeader() { expectState(State.BLOCK_READY); @@ -1122,16 +1073,14 @@ int getUncompressedSizeWithHeader() { return baosInMemory.size(); } - /** @return true if a block is being written */ + /** @return true if a block is being written */ boolean isWriting() { return state == State.WRITING; } /** - * Returns the number of bytes written into the current block so far, or - * zero if not writing the block at the moment. Note that this will return - * zero in the "block ready" state as well. - * + * Returns the number of bytes written into the current block so far, or zero if not writing the + * block at the moment. Note that this will return zero in the "block ready" state as well. * @return the number of bytes written */ public int encodedBlockSizeWritten() { @@ -1139,10 +1088,8 @@ public int encodedBlockSizeWritten() { } /** - * Returns the number of bytes written into the current block so far, or - * zero if not writing the block at the moment. Note that this will return - * zero in the "block ready" state as well. - * + * Returns the number of bytes written into the current block so far, or zero if not writing the + * block at the moment. Note that this will return zero in the "block ready" state as well. * @return the number of bytes written */ int blockSizeWritten() { @@ -1150,22 +1097,20 @@ int blockSizeWritten() { } /** - * Clones the header followed by the uncompressed data, even if using - * compression. This is needed for storing uncompressed blocks in the block - * cache. Can be called in the "writing" state or the "block ready" state. - * Returns only the header and data, does not include checksum data. - * + * Clones the header followed by the uncompressed data, even if using compression. This is + * needed for storing uncompressed blocks in the block cache. Can be called in the "writing" + * state or the "block ready" state. Returns only the header and data, does not include checksum + * data. * @return Returns an uncompressed block ByteBuff for caching on write */ ByteBuff cloneUncompressedBufferWithHeader() { expectState(State.BLOCK_READY); ByteBuff bytebuff = allocator.allocate(baosInMemory.size()); baosInMemory.toByteBuff(bytebuff); - int numBytes = (int) ChecksumUtil.numBytes( - onDiskBlockBytesWithHeader.size(), - fileContext.getBytesPerChecksum()); - putHeader(bytebuff, onDiskBlockBytesWithHeader.size() + numBytes, - baosInMemory.size(), onDiskBlockBytesWithHeader.size()); + int numBytes = (int) ChecksumUtil.numBytes(onDiskBlockBytesWithHeader.size(), + fileContext.getBytesPerChecksum()); + putHeader(bytebuff, onDiskBlockBytesWithHeader.size() + numBytes, baosInMemory.size(), + onDiskBlockBytesWithHeader.size()); bytebuff.rewind(); return bytebuff; } @@ -1186,53 +1131,46 @@ private ByteBuff cloneOnDiskBufferWithHeader() { private void expectState(State expectedState) { if (state != expectedState) { - throw new IllegalStateException("Expected state: " + expectedState + - ", actual state: " + state); + throw new IllegalStateException( + "Expected state: " + expectedState + ", actual state: " + state); } } /** - * Takes the given {@link BlockWritable} instance, creates a new block of - * its appropriate type, writes the writable into this block, and flushes - * the block into the output stream. The writer is instructed not to buffer - * uncompressed bytes for cache-on-write. - * + * Takes the given {@link BlockWritable} instance, creates a new block of its appropriate type, + * writes the writable into this block, and flushes the block into the output stream. The writer + * is instructed not to buffer uncompressed bytes for cache-on-write. * @param bw the block-writable object to write as a block * @param out the file system output stream */ - void writeBlock(BlockWritable bw, FSDataOutputStream out) - throws IOException { + void writeBlock(BlockWritable bw, FSDataOutputStream out) throws IOException { bw.writeToBlock(startWriting(bw.getBlockType())); writeHeaderAndData(out); } /** - * Creates a new HFileBlock. Checksums have already been validated, so - * the byte buffer passed into the constructor of this newly created - * block does not have checksum data even though the header minor - * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a - * 0 value in bytesPerChecksum. This method copies the on-disk or - * uncompressed data to build the HFileBlock which is used only - * while writing blocks and caching. - * - *

          TODO: Should there be an option where a cache can ask that hbase preserve block - * checksums for checking after a block comes out of the cache? Otehrwise, cache is responsible - * for blocks being wholesome (ECC memory or if file-backed, it does checksumming). + * Creates a new HFileBlock. Checksums have already been validated, so the byte buffer passed + * into the constructor of this newly created block does not have checksum data even though the + * header minor version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a 0 value + * in bytesPerChecksum. This method copies the on-disk or uncompressed data to build the + * HFileBlock which is used only while writing blocks and caching. + *

          + * TODO: Should there be an option where a cache can ask that hbase preserve block checksums for + * checking after a block comes out of the cache? Otehrwise, cache is responsible for blocks + * being wholesome (ECC memory or if file-backed, it does checksumming). */ HFileBlock getBlockForCaching(CacheConfig cacheConf) { - HFileContext newContext = new HFileContextBuilder() - .withBlockSize(fileContext.getBlocksize()) - .withBytesPerCheckSum(0) - .withChecksumType(ChecksumType.NULL) // no checksums in cached data - .withCompression(fileContext.getCompression()) - .withDataBlockEncoding(fileContext.getDataBlockEncoding()) - .withHBaseCheckSum(fileContext.isUseHBaseChecksum()) - .withCompressTags(fileContext.isCompressTags()) - .withIncludesMvcc(fileContext.isIncludesMvcc()) - .withIncludesTags(fileContext.isIncludesTags()) - .withColumnFamily(fileContext.getColumnFamily()) - .withTableName(fileContext.getTableName()) - .build(); + HFileContext newContext = new HFileContextBuilder().withBlockSize(fileContext.getBlocksize()) + .withBytesPerCheckSum(0).withChecksumType(ChecksumType.NULL) // no checksums in cached + // data + .withCompression(fileContext.getCompression()) + .withDataBlockEncoding(fileContext.getDataBlockEncoding()) + .withHBaseCheckSum(fileContext.isUseHBaseChecksum()) + .withCompressTags(fileContext.isCompressTags()) + .withIncludesMvcc(fileContext.isIncludesMvcc()) + .withIncludesTags(fileContext.isIncludesTags()) + .withColumnFamily(fileContext.getColumnFamily()).withTableName(fileContext.getTableName()) + .build(); // Build the HFileBlock. HFileBlockBuilder builder = new HFileBlockBuilder(); ByteBuff buff; @@ -1244,16 +1182,11 @@ HFileBlock getBlockForCaching(CacheConfig cacheConf) { return builder.withBlockType(blockType) .withOnDiskSizeWithoutHeader(getOnDiskSizeWithoutHeader()) .withUncompressedSizeWithoutHeader(getUncompressedSizeWithoutHeader()) - .withPrevBlockOffset(prevOffset) - .withByteBuff(buff) - .withFillHeader(FILL_HEADER) - .withOffset(startOffset) - .withNextBlockOnDiskSize(UNSET) + .withPrevBlockOffset(prevOffset).withByteBuff(buff).withFillHeader(FILL_HEADER) + .withOffset(startOffset).withNextBlockOnDiskSize(UNSET) .withOnDiskDataSizeWithHeader(onDiskBlockBytesWithHeader.size() + onDiskChecksum.length) - .withHFileContext(newContext) - .withByteBuffAllocator(cacheConf.getByteBuffAllocator()) - .withShared(!buff.hasArray()) - .build(); + .withHFileContext(newContext).withByteBuffAllocator(cacheConf.getByteBuffAllocator()) + .withShared(!buff.hasArray()).build(); } } @@ -1263,9 +1196,7 @@ interface BlockWritable { BlockType getBlockType(); /** - * Writes the block to the provided stream. Must not write any magic - * records. - * + * Writes the block to the provided stream. Must not write any magic records. * @param out a stream to write uncompressed data into */ void writeToBlock(DataOutput out) throws IOException; @@ -1326,12 +1257,10 @@ HFileBlock readBlockData(long offset, long onDiskSize, boolean pread, boolean up boolean intoHeap) throws IOException; /** - * Creates a block iterator over the given portion of the {@link HFile}. - * The iterator returns blocks starting with offset such that offset <= - * startOffset < endOffset. Returned blocks are always unpacked. - * Used when no hfile index available; e.g. reading in the hfile index - * blocks themselves on file open. - * + * Creates a block iterator over the given portion of the {@link HFile}. The iterator returns + * blocks starting with offset such that offset <= startOffset < endOffset. Returned + * blocks are always unpacked. Used when no hfile index available; e.g. reading in the hfile + * index blocks themselves on file open. * @param startOffset the offset of the block to start iteration with * @param endOffset the offset to end iteration at (exclusive) * @return an iterator of blocks between the two given offsets @@ -1348,6 +1277,7 @@ HFileBlock readBlockData(long offset, long onDiskSize, boolean pread, boolean up HFileBlockDecodingContext getDefaultBlockDecodingContext(); void setIncludesMemStoreTS(boolean includesMemstoreTS); + void setDataBlockEncoder(HFileDataBlockEncoder encoder, Configuration conf); /** @@ -1358,12 +1288,10 @@ HFileBlock readBlockData(long offset, long onDiskSize, boolean pread, boolean up } /** - * Data-structure to use caching the header of the NEXT block. Only works if next read - * that comes in here is next in sequence in this block. - * - * When we read, we read current block and the next blocks' header. We do this so we have - * the length of the next block to read if the hfile index is not available (rare, at - * hfile open only). + * Data-structure to use caching the header of the NEXT block. Only works if next read that comes + * in here is next in sequence in this block. When we read, we read current block and the next + * blocks' header. We do this so we have the length of the next block to read if the hfile index + * is not available (rare, at hfile open only). */ private static class PrefetchedHeader { long offset = -1; @@ -1380,8 +1308,10 @@ public String toString() { * Reads version 2 HFile blocks from the filesystem. */ static class FSReaderImpl implements FSReader { - /** The file system stream of the underlying {@link HFile} that - * does or doesn't do checksum validations in the filesystem */ + /** + * The file system stream of the underlying {@link HFile} that does or doesn't do checksum + * validations in the filesystem + */ private FSDataInputStreamWrapper streamWrapper; private HFileBlockDecodingContext encodedBlockDecodingCtx; @@ -1390,13 +1320,12 @@ static class FSReaderImpl implements FSReader { private final HFileBlockDefaultDecodingContext defaultDecodingCtx; /** - * Cache of the NEXT header after this. Check it is indeed next blocks header - * before using it. TODO: Review. This overread into next block to fetch - * next blocks header seems unnecessary given we usually get the block size - * from the hfile index. Review! + * Cache of the NEXT header after this. Check it is indeed next blocks header before using it. + * TODO: Review. This overread into next block to fetch next blocks header seems unnecessary + * given we usually get the block size from the hfile index. Review! */ private AtomicReference prefetchedHeader = - new AtomicReference<>(new PrefetchedHeader()); + new AtomicReference<>(new PrefetchedHeader()); /** The size of the file we are reading from, or -1 if unknown. */ private long fileSize; @@ -1415,8 +1344,8 @@ static class FSReaderImpl implements FSReader { private final Lock streamLock = new ReentrantLock(); - FSReaderImpl(ReaderContext readerContext, HFileContext fileContext, - ByteBuffAllocator allocator, Configuration conf) throws IOException { + FSReaderImpl(ReaderContext readerContext, HFileContext fileContext, ByteBuffAllocator allocator, + Configuration conf) throws IOException { this.fileSize = readerContext.getFileSize(); this.hfs = readerContext.getFileSystem(); if (readerContext.getFilePath() != null) { @@ -1554,17 +1483,14 @@ public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean HFileBlock blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, pread, doVerificationThruHBaseChecksum, updateMetrics, intoHeap); if (blk == null) { - HFile.LOG.warn("HBase checksum verification failed for file " + - pathName + " at offset " + - offset + " filesize " + fileSize + - ". Retrying read with HDFS checksums turned on..."); + HFile.LOG + .warn("HBase checksum verification failed for file " + pathName + " at offset " + offset + + " filesize " + fileSize + ". Retrying read with HDFS checksums turned on..."); if (!doVerificationThruHBaseChecksum) { - String msg = "HBase checksum verification failed for file " + - pathName + " at offset " + - offset + " filesize " + fileSize + - " but this cannot happen because doVerify is " + - doVerificationThruHBaseChecksum; + String msg = "HBase checksum verification failed for file " + pathName + " at offset " + + offset + " filesize " + fileSize + " but this cannot happen because doVerify is " + + doVerificationThruHBaseChecksum; HFile.LOG.warn(msg); throw new IOException(msg); // cannot happen case here } @@ -1581,15 +1507,14 @@ public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, pread, doVerificationThruHBaseChecksum, updateMetrics, intoHeap); if (blk != null) { - HFile.LOG.warn("HDFS checksum verification succeeded for file " + - pathName + " at offset " + - offset + " filesize " + fileSize); + HFile.LOG.warn("HDFS checksum verification succeeded for file " + pathName + " at offset " + + offset + " filesize " + fileSize); } } if (blk == null && !doVerificationThruHBaseChecksum) { - String msg = "readBlockData failed, possibly due to " + - "checksum verification failed for file " + pathName + - " at offset " + offset + " filesize " + fileSize; + String msg = + "readBlockData failed, possibly due to " + "checksum verification failed for file " + + pathName + " at offset " + offset + " filesize " + fileSize; HFile.LOG.warn(msg); throw new IOException(msg); } @@ -1612,34 +1537,33 @@ private static int checkAndGetSizeAsInt(final long onDiskSizeWithHeaderL, final throws IOException { if ((onDiskSizeWithHeaderL < hdrSize && onDiskSizeWithHeaderL != -1) || onDiskSizeWithHeaderL >= Integer.MAX_VALUE) { - throw new IOException("Invalid onDisksize=" + onDiskSizeWithHeaderL - + ": expected to be at least " + hdrSize - + " and at most " + Integer.MAX_VALUE + ", or -1"); + throw new IOException( + "Invalid onDisksize=" + onDiskSizeWithHeaderL + ": expected to be at least " + hdrSize + + " and at most " + Integer.MAX_VALUE + ", or -1"); } - return (int)onDiskSizeWithHeaderL; + return (int) onDiskSizeWithHeaderL; } /** - * Verify the passed in onDiskSizeWithHeader aligns with what is in the header else something - * is not right. + * Verify the passed in onDiskSizeWithHeader aligns with what is in the header else something is + * not right. */ private void verifyOnDiskSizeMatchesHeader(final int passedIn, final ByteBuff headerBuf, - final long offset, boolean verifyChecksum) - throws IOException { + final long offset, boolean verifyChecksum) throws IOException { // Assert size provided aligns with what is in the header int fromHeader = getOnDiskSizeWithHeader(headerBuf, verifyChecksum); if (passedIn != fromHeader) { - throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " != " + fromHeader + - ", offset=" + offset + ", fileContext=" + this.fileContext); + throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " != " + fromHeader + + ", offset=" + offset + ", fileContext=" + this.fileContext); } } /** - * Check atomic reference cache for this block's header. Cache only good if next - * read coming through is next in sequence in the block. We read next block's - * header on the tail of reading the previous block to save a seek. Otherwise, - * we have to do a seek to read the header before we can pull in the block OR - * we have to backup the stream because we over-read (the next block's header). + * Check atomic reference cache for this block's header. Cache only good if next read coming + * through is next in sequence in the block. We read next block's header on the tail of reading + * the previous block to save a seek. Otherwise, we have to do a seek to read the header before + * we can pull in the block OR we have to backup the stream because we over-read (the next + * block's header). * @see PrefetchedHeader * @return The cached block header or null if not found. * @see #cacheNextBlockHeader(long, ByteBuff, int, int) @@ -1654,8 +1578,8 @@ private ByteBuff getCachedHeader(final long offset) { * @see #getCachedHeader(long) * @see PrefetchedHeader */ - private void cacheNextBlockHeader(final long offset, - ByteBuff onDiskBlock, int onDiskSizeWithHeader, int headerLength) { + private void cacheNextBlockHeader(final long offset, ByteBuff onDiskBlock, + int onDiskSizeWithHeader, int headerLength) { PrefetchedHeader ph = new PrefetchedHeader(); ph.offset = offset; onDiskBlock.get(onDiskSizeWithHeader, ph.header, 0, headerLength); @@ -1696,17 +1620,19 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, long onDiskSizeWithHeaderL, boolean pread, boolean verifyChecksum, boolean updateMetrics, boolean intoHeap) throws IOException { if (offset < 0) { - throw new IOException("Invalid offset=" + offset + " trying to read " - + "block (onDiskSize=" + onDiskSizeWithHeaderL + ")"); + throw new IOException("Invalid offset=" + offset + " trying to read " + "block (onDiskSize=" + + onDiskSizeWithHeaderL + ")"); } int onDiskSizeWithHeader = checkAndGetSizeAsInt(onDiskSizeWithHeaderL, hdrSize); // Try and get cached header. Will serve us in rare case where onDiskSizeWithHeaderL is -1 // and will save us having to seek the stream backwards to reread the header we // read the last time through here. ByteBuff headerBuf = getCachedHeader(offset); - LOG.trace("Reading {} at offset={}, pread={}, verifyChecksum={}, cachedHeader={}, " + - "onDiskSizeWithHeader={}", this.fileContext.getHFileName(), offset, pread, - verifyChecksum, headerBuf, onDiskSizeWithHeader); + LOG.trace( + "Reading {} at offset={}, pread={}, verifyChecksum={}, cachedHeader={}, " + + "onDiskSizeWithHeader={}", + this.fileContext.getHFileName(), offset, pread, verifyChecksum, headerBuf, + onDiskSizeWithHeader); // This is NOT same as verifyChecksum. This latter is whether to do hbase // checksums. Can change with circumstances. The below flag is whether the // file has support for checksums (version 2+). @@ -1729,7 +1655,7 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, } onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport); } - int preReadHeaderSize = headerBuf == null? 0 : hdrSize; + int preReadHeaderSize = headerBuf == null ? 0 : hdrSize; // Allocate enough space to fit the next block's header too; saves a seek next time through. // onDiskBlock is whole block + header + checksums then extra hdrSize to read next header; // onDiskSizeWithHeader is header, body, and any checksums if present. preReadHeaderSize @@ -1788,8 +1714,8 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, @Override public void setIncludesMemStoreTS(boolean includesMemstoreTS) { - this.fileContext = new HFileContextBuilder(this.fileContext) - .withIncludesMvcc(includesMemstoreTS).build(); + this.fileContext = + new HFileContextBuilder(this.fileContext).withIncludesMvcc(includesMemstoreTS).build(); } @Override @@ -1808,8 +1734,8 @@ public HFileBlockDecodingContext getDefaultBlockDecodingContext() { } /** - * Generates the checksum for the header as well as the data and then validates it. - * If the block doesn't uses checksum, returns false. + * Generates the checksum for the header as well as the data and then validates it. If the block + * doesn't uses checksum, returns false. * @return True if checksum matches, else false. */ private boolean validateChecksum(long offset, ByteBuff data, int hdrSize) { @@ -1850,12 +1776,10 @@ public String toString() { /** An additional sanity-check in case no compression or encryption is being used. */ void sanityCheckUncompressed() throws IOException { - if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + - totalChecksumBytes()) { - throw new IOException("Using no compression but " - + "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", " - + "uncompressedSizeWithoutHeader=" + uncompressedSizeWithoutHeader - + ", numChecksumbytes=" + totalChecksumBytes()); + if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) { + throw new IOException("Using no compression but " + "onDiskSizeWithoutHeader=" + + onDiskSizeWithoutHeader + ", " + "uncompressedSizeWithoutHeader=" + + uncompressedSizeWithoutHeader + ", numChecksumbytes=" + totalChecksumBytes()); } } @@ -1956,7 +1880,7 @@ public boolean equals(Object comparison) { return false; } if (ByteBuff.compareTo(this.buf, 0, this.buf.limit(), castedComparison.buf, 0, - castedComparison.buf.limit()) != 0) { + castedComparison.buf.limit()) != 0) { return false; } return true; @@ -1983,8 +1907,8 @@ int getOnDiskDataSizeWithHeader() { } /** - * Calculate the number of bytes required to store all the checksums - * for this block. Each checksum value is a 4 byte integer. + * Calculate the number of bytes required to store all the checksums for this block. Each checksum + * value is a 4 byte integer. */ int totalChecksumBytes() { // If the hfile block has minorVersion 0, then there are no checksum @@ -1995,7 +1919,7 @@ int totalChecksumBytes() { return 0; } return (int) ChecksumUtil.numBytes(onDiskDataSizeWithHeader, - this.fileContext.getBytesPerChecksum()); + this.fileContext.getBytesPerChecksum()); } /** @@ -2009,8 +1933,8 @@ public int headerSize() { * Maps a minor version to the size of the header. */ public static int headerSize(boolean usesHBaseChecksum) { - return usesHBaseChecksum? - HConstants.HFILEBLOCK_HEADER_SIZE: HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; + return usesHBaseChecksum ? HConstants.HFILEBLOCK_HEADER_SIZE + : HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; } /** @@ -2025,21 +1949,20 @@ byte[] getDummyHeaderForVersion() { * Return the appropriate DUMMY_HEADER for the minor version */ static private byte[] getDummyHeaderForVersion(boolean usesHBaseChecksum) { - return usesHBaseChecksum? HConstants.HFILEBLOCK_DUMMY_HEADER: DUMMY_HEADER_NO_CHECKSUM; + return usesHBaseChecksum ? HConstants.HFILEBLOCK_DUMMY_HEADER : DUMMY_HEADER_NO_CHECKSUM; } /** - * @return This HFileBlocks fileContext which will a derivative of the - * fileContext for the file from which this block's data was originally read. + * @return This HFileBlocks fileContext which will a derivative of the fileContext for the file + * from which this block's data was originally read. */ public HFileContext getHFileContext() { return this.fileContext; } /** - * Convert the contents of the block header into a human readable string. - * This is mostly helpful for debugging. This assumes that the block - * has minor version > 0. + * Convert the contents of the block header into a human readable string. This is mostly helpful + * for debugging. This assumes that the block has minor version > 0. */ static String toStringHeader(ByteBuff buf) throws IOException { byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), BlockType.MAGIC_LENGTH)]; @@ -2051,31 +1974,23 @@ static String toStringHeader(ByteBuff buf) throws IOException { byte cksumtype = buf.get(); long bytesPerChecksum = buf.getInt(); long onDiskDataSizeWithHeader = buf.getInt(); - return " Header dump: magic: " + Bytes.toString(magicBuf) + - " blockType " + bt + - " compressedBlockSizeNoHeader " + - compressedBlockSizeNoHeader + - " uncompressedBlockSizeNoHeader " + - uncompressedBlockSizeNoHeader + - " prevBlockOffset " + prevBlockOffset + - " checksumType " + ChecksumType.codeToType(cksumtype) + - " bytesPerChecksum " + bytesPerChecksum + - " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader; + return " Header dump: magic: " + Bytes.toString(magicBuf) + " blockType " + bt + + " compressedBlockSizeNoHeader " + compressedBlockSizeNoHeader + + " uncompressedBlockSizeNoHeader " + uncompressedBlockSizeNoHeader + " prevBlockOffset " + + prevBlockOffset + " checksumType " + ChecksumType.codeToType(cksumtype) + + " bytesPerChecksum " + bytesPerChecksum + " onDiskDataSizeWithHeader " + + onDiskDataSizeWithHeader; } - private static HFileBlockBuilder createBuilder(HFileBlock blk){ - return new HFileBlockBuilder() - .withBlockType(blk.blockType) - .withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader) - .withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader) - .withPrevBlockOffset(blk.prevBlockOffset) - .withByteBuff(blk.buf.duplicate()) // Duplicate the buffer. - .withOffset(blk.offset) - .withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader) - .withNextBlockOnDiskSize(blk.nextBlockOnDiskSize) - .withHFileContext(blk.fileContext) - .withByteBuffAllocator(blk.allocator) - .withShared(blk.isSharedMem()); + private static HFileBlockBuilder createBuilder(HFileBlock blk) { + return new HFileBlockBuilder().withBlockType(blk.blockType) + .withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader) + .withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader) + .withPrevBlockOffset(blk.prevBlockOffset).withByteBuff(blk.buf.duplicate()) // Duplicate the + // buffer. + .withOffset(blk.offset).withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader) + .withNextBlockOnDiskSize(blk.nextBlockOnDiskSize).withHFileContext(blk.fileContext) + .withByteBuffAllocator(blk.allocator).withShared(blk.isSharedMem()); } static HFileBlock shallowClone(HFileBlock blk) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java index dc37a920f2ff..64f18135c844 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index b38964ebfd73..ccc6eb0558a5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,20 +28,15 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicReference; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -//import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader; @@ -53,17 +47,16 @@ import org.apache.hadoop.hbase.util.ObjectIntPair; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Provides functionality to write ({@link BlockIndexWriter}) and read - * BlockIndexReader - * single-level and multi-level block indexes. - * - * Examples of how to use the block index writer can be found in - * {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and - * {@link HFileWriterImpl}. Examples of how to use the reader can be - * found in {@link HFileReaderImpl} and - * org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex. + * Provides functionality to write ({@link BlockIndexWriter}) and read BlockIndexReader single-level + * and multi-level block indexes. Examples of how to use the block index writer can be found in + * {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and {@link HFileWriterImpl}. + * Examples of how to use the reader can be found in {@link HFileReaderImpl} and + * org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex. */ @InterfaceAudience.Private public class HFileBlockIndex { @@ -73,8 +66,8 @@ public class HFileBlockIndex { static final int DEFAULT_MAX_CHUNK_SIZE = 128 * 1024; /** - * The maximum size guideline for index blocks (both leaf, intermediate, and - * root). If not specified, DEFAULT_MAX_CHUNK_SIZE is used. + * The maximum size guideline for index blocks (both leaf, intermediate, and root). If not + * specified, DEFAULT_MAX_CHUNK_SIZE is used. */ public static final String MAX_CHUNK_SIZE_KEY = "hfile.index.block.max.size"; @@ -89,13 +82,12 @@ public class HFileBlockIndex { static final int DEFAULT_MIN_INDEX_NUM_ENTRIES = 16; /** - * The number of bytes stored in each "secondary index" entry in addition to - * key bytes in the non-root index block format. The first long is the file - * offset of the deeper-level block the entry points to, and the int that - * follows is that block's on-disk size without including header. + * The number of bytes stored in each "secondary index" entry in addition to key bytes in the + * non-root index block format. The first long is the file offset of the deeper-level block the + * entry points to, and the int that follows is that block's on-disk size without including + * header. */ - static final int SECONDARY_INDEX_ENTRY_OVERHEAD = Bytes.SIZEOF_INT - + Bytes.SIZEOF_LONG; + static final int SECONDARY_INDEX_ENTRY_OVERHEAD = Bytes.SIZEOF_INT + Bytes.SIZEOF_LONG; /** * Error message when trying to use inline block API in single-level mode. @@ -104,20 +96,18 @@ public class HFileBlockIndex { "Inline blocks are not allowed in the single-level-only mode"; /** - * The size of a meta-data record used for finding the mid-key in a - * multi-level index. Consists of the middle leaf-level index block offset - * (long), its on-disk size without header included (int), and the mid-key - * entry's zero-based index in that leaf index block. + * The size of a meta-data record used for finding the mid-key in a multi-level index. Consists of + * the middle leaf-level index block offset (long), its on-disk size without header included + * (int), and the mid-key entry's zero-based index in that leaf index block. */ - private static final int MID_KEY_METADATA_SIZE = Bytes.SIZEOF_LONG + - 2 * Bytes.SIZEOF_INT; + private static final int MID_KEY_METADATA_SIZE = Bytes.SIZEOF_LONG + 2 * Bytes.SIZEOF_INT; /** - * An implementation of the BlockIndexReader that deals with block keys which are plain - * byte[] like MetaBlock or the Bloom Block for ROW bloom. - * Does not need a comparator. It can work on Bytes.BYTES_RAWCOMPARATOR + * An implementation of the BlockIndexReader that deals with block keys which are plain byte[] + * like MetaBlock or the Bloom Block for ROW bloom. Does not need a comparator. It can work on + * Bytes.BYTES_RAWCOMPARATOR */ - static class ByteArrayKeyBlockIndexReader extends BlockIndexReader { + static class ByteArrayKeyBlockIndexReader extends BlockIndexReader { private byte[][] blockKeys; @@ -148,8 +138,7 @@ public boolean isEmpty() { } /** - * @param i - * from 0 to {@link #getRootBlockCount() - 1} + * @param i from 0 to {@link #getRootBlockCount() - 1} */ public byte[] getRootBlockKey(int i) { return blockKeys[i]; @@ -158,8 +147,8 @@ public byte[] getRootBlockKey(int i) { @Override public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, - DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException { + DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) + throws IOException { // this would not be needed return null; } @@ -209,8 +198,8 @@ public int rootBlockContainingKey(byte[] key, int offset, int length, CellCompar public int rootBlockContainingKey(Cell key) { // Should not be called on this because here it deals only with byte[] throw new UnsupportedOperationException( - "Cannot search for a key that is of Cell type. Only plain byte array keys " + - "can be searched for"); + "Cannot search for a key that is of Cell type. Only plain byte array keys " + + "can be searched for"); } @Override @@ -218,18 +207,17 @@ public String toString() { StringBuilder sb = new StringBuilder(); sb.append("size=" + rootCount).append("\n"); for (int i = 0; i < rootCount; i++) { - sb.append("key=").append(KeyValue.keyToString(blockKeys[i])) - .append("\n offset=").append(blockOffsets[i]) - .append(", dataSize=" + blockDataSizes[i]).append("\n"); + sb.append("key=").append(KeyValue.keyToString(blockKeys[i])).append("\n offset=") + .append(blockOffsets[i]).append(", dataSize=" + blockDataSizes[i]).append("\n"); } return sb.toString(); } } /** - * An implementation of the BlockIndexReader that deals with block keys which are the key - * part of a cell like the Data block index or the ROW_COL bloom blocks - * This needs a comparator to work with the Cells + * An implementation of the BlockIndexReader that deals with block keys which are the key part of + * a cell like the Data block index or the ROW_COL bloom blocks This needs a comparator to work + * with the Cells */ static class CellBasedKeyBlockIndexReader extends BlockIndexReader { @@ -268,8 +256,7 @@ public boolean isEmpty() { } /** - * @param i - * from 0 to {@link #getRootBlockCount() - 1} + * @param i from 0 to {@link #getRootBlockCount() - 1} */ public Cell getRootBlockKey(int i) { return blockKeys[i]; @@ -278,8 +265,8 @@ public Cell getRootBlockKey(int i) { @Override public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, - DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException { + DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) + throws IOException { int rootLevelIndex = rootBlockContainingKey(key); if (rootLevelIndex < 0 || rootLevelIndex >= blockOffsets.length) { return null; @@ -356,9 +343,8 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB if (index == -1) { // This has to be changed // For now change this to key value - throw new IOException("The key " - + CellUtil.getCellKeyAsString(key) - + " is before the" + " first key of the non-root index block " + block); + throw new IOException("The key " + CellUtil.getCellKeyAsString(key) + " is before the" + + " first key of the non-root index block " + block); } currentOffset = buffer.getLong(); @@ -396,8 +382,7 @@ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentB @Override public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { - if (rootCount == 0) - throw new IOException("HFile empty"); + if (rootCount == 0) throw new IOException("HFile empty"); Cell targetMidKey = this.midKey.get(); if (targetMidKey != null) { @@ -406,23 +391,21 @@ public Cell midkey(CachingBlockReader cachingBlockReader) throws IOException { if (midLeafBlockOffset >= 0) { if (cachingBlockReader == null) { - throw new IOException("Have to read the middle leaf block but " + - "no block reader available"); + throw new IOException( + "Have to read the middle leaf block but " + "no block reader available"); } // Caching, using pread, assuming this is not a compaction. - HFileBlock midLeafBlock = cachingBlockReader.readBlock( - midLeafBlockOffset, midLeafBlockOnDiskSize, true, true, false, true, - BlockType.LEAF_INDEX, null); + HFileBlock midLeafBlock = cachingBlockReader.readBlock(midLeafBlockOffset, + midLeafBlockOnDiskSize, true, true, false, true, BlockType.LEAF_INDEX, null); try { ByteBuff b = midLeafBlock.getBufferWithoutHeader(); int numDataBlocks = b.getIntAfterPosition(0); int keyRelOffset = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 1)); int keyLen = b.getIntAfterPosition(Bytes.SIZEOF_INT * (midKeyEntry + 2)) - keyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; - int keyOffset = - Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset - + SECONDARY_INDEX_ENTRY_OVERHEAD; + int keyOffset = Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset + + SECONDARY_INDEX_ENTRY_OVERHEAD; byte[] bytes = b.toBytes(keyOffset, keyLen); targetMidKey = new KeyValue.KeyOnlyKeyValue(bytes, 0, bytes.length); } finally { @@ -444,7 +427,6 @@ protected void initialize(int numEntries) { /** * Adds a new entry in the root block index. Only used when reading. - * * @param key Last key in the block * @param offset file offset where the block is stored * @param dataSize the uncompressed data size @@ -462,8 +444,8 @@ protected void add(final byte[] key, final long offset, final int dataSize) { public int rootBlockContainingKey(final byte[] key, int offset, int length, CellComparator comp) { // This should always be called with Cell not with a byte[] key - throw new UnsupportedOperationException("Cannot find for a key containing plain byte " + - "array. Only cell based keys can be searched for"); + throw new UnsupportedOperationException("Cannot find for a key containing plain byte " + + "array. Only cell based keys can be searched for"); } @Override @@ -494,8 +476,7 @@ public String toString() { StringBuilder sb = new StringBuilder(); sb.append("size=" + rootCount).append("\n"); for (int i = 0; i < rootCount; i++) { - sb.append("key=").append((blockKeys[i])) - .append("\n offset=").append(blockOffsets[i]) + sb.append("key=").append((blockKeys[i])).append("\n offset=").append(blockOffsets[i]) .append(", dataSize=" + blockDataSizes[i]).append("\n"); } return sb.toString(); @@ -503,14 +484,12 @@ public String toString() { } /** - * The reader will always hold the root level index in the memory. Index - * blocks at all other levels will be cached in the LRU cache in practice, - * although this API does not enforce that. - * - *

          All non-root (leaf and intermediate) index blocks contain what we call a - * "secondary index": an array of offsets to the entries within the block. - * This allows us to do binary search for the entry corresponding to the - * given key without having to deserialize the block. + * The reader will always hold the root level index in the memory. Index blocks at all other + * levels will be cached in the LRU cache in practice, although this API does not enforce that. + *

          + * All non-root (leaf and intermediate) index blocks contain what we call a "secondary index": an + * array of offsets to the entries within the block. This allows us to do binary search for the + * entry corresponding to the given key without having to deserialize the block. */ static abstract class BlockIndexReader implements HeapSize { @@ -524,8 +503,8 @@ static abstract class BlockIndexReader implements HeapSize { protected int midKeyEntry = -1; /** - * The number of levels in the block index tree. One if there is only root - * level, two for root and leaf levels, etc. + * The number of levels in the block index tree. One if there is only root level, two for root + * and leaf levels, etc. */ protected int searchTreeLevel; @@ -535,8 +514,8 @@ static abstract class BlockIndexReader implements HeapSize { public abstract boolean isEmpty(); /** - * Verifies that the block index is non-empty and throws an - * {@link IllegalStateException} otherwise. + * Verifies that the block index is non-empty and throws an {@link IllegalStateException} + * otherwise. */ public void ensureNonEmpty() { if (isEmpty()) { @@ -545,17 +524,16 @@ public void ensureNonEmpty() { } /** - * Return the data block which contains this key. This function will only - * be called when the HFile version is larger than 1. - * + * Return the data block which contains this key. This function will only be called when the + * HFile version is larger than 1. * @param key the key we are looking for * @param currentBlock the current block, to avoid re-reading the same block * @param cacheBlocks * @param pread * @param isCompaction - * @param expectedDataBlockEncoding the data block encoding the caller is - * expecting the data block to be in, or null to not perform this - * check and return the block irrespective of the encoding + * @param expectedDataBlockEncoding the data block encoding the caller is expecting the data + * block to be in, or null to not perform this check and return the block irrespective + * of the encoding * @return reader a basic way to load blocks * @throws IOException */ @@ -572,29 +550,26 @@ public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boole } /** - * Return the BlockWithScanInfo, a data structure which contains the Data HFileBlock with - * other scan info such as the key that starts the next HFileBlock. This function will only - * be called when the HFile version is larger than 1. - * + * Return the BlockWithScanInfo, a data structure which contains the Data HFileBlock with other + * scan info such as the key that starts the next HFileBlock. This function will only be called + * when the HFile version is larger than 1. * @param key the key we are looking for * @param currentBlock the current block, to avoid re-reading the same block - * @param expectedDataBlockEncoding the data block encoding the caller is - * expecting the data block to be in, or null to not perform this - * check and return the block irrespective of the encoding. - * @return the BlockWithScanInfo which contains the DataBlock with other - * scan info such as nextIndexedKey. + * @param expectedDataBlockEncoding the data block encoding the caller is expecting the data + * block to be in, or null to not perform this check and return the block irrespective + * of the encoding. + * @return the BlockWithScanInfo which contains the DataBlock with other scan info such as + * nextIndexedKey. * @throws IOException */ public abstract BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, - DataBlockEncoding expectedDataBlockEncoding, - CachingBlockReader cachingBlockReader) throws IOException; + DataBlockEncoding expectedDataBlockEncoding, CachingBlockReader cachingBlockReader) + throws IOException; /** - * An approximation to the {@link HFile}'s mid-key. Operates on block - * boundaries, and does not go inside blocks. In other words, returns the - * first key of the middle block of the file. - * + * An approximation to the {@link HFile}'s mid-key. Operates on block boundaries, and does not + * go inside blocks. In other words, returns the first key of the middle block of the file. * @return the first key of the middle block */ public abstract Cell midkey(CachingBlockReader cachingBlockReader) throws IOException; @@ -608,8 +583,8 @@ public long getRootBlockOffset(int i) { /** * @param i zero-based index of a root-level block - * @return the on-disk size of the root-level block for version 2, or the - * uncompressed size for version 1 + * @return the on-disk size of the root-level block for version 2, or the uncompressed size for + * version 1 */ public int getRootBlockDataSize(int i) { return blockDataSizes[i]; @@ -624,14 +599,10 @@ public int getRootBlockCount() { /** * Finds the root-level index block containing the given key. - * - * @param key - * Key to find - * @param comp - * the comparator to be used - * @return Offset of block containing key (between 0 and the - * number of blocks - 1) or -1 if this file does not contain the - * request. + * @param key Key to find + * @param comp the comparator to be used + * @return Offset of block containing key (between 0 and the number of blocks - 1) + * or -1 if this file does not contain the request. */ // When we want to find the meta index block or bloom block for ROW bloom // type Bytes.BYTES_RAWCOMPARATOR would be enough. For the ROW_COL bloom case we need the @@ -641,12 +612,9 @@ public abstract int rootBlockContainingKey(final byte[] key, int offset, int len /** * Finds the root-level index block containing the given key. - * - * @param key - * Key to find - * @return Offset of block containing key (between 0 and the - * number of blocks - 1) or -1 if this file does not contain the - * request. + * @param key Key to find + * @return Offset of block containing key (between 0 and the number of blocks - 1) + * or -1 if this file does not contain the request. */ // When we want to find the meta index block or bloom block for ROW bloom // type @@ -658,9 +626,7 @@ public int rootBlockContainingKey(final byte[] key, int offset, int length) { /** * Finds the root-level index block containing the given key. - * - * @param key - * Key to find + * @param key Key to find */ public abstract int rootBlockContainingKey(final Cell key); @@ -680,38 +646,32 @@ protected byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) { // The secondary index takes numEntries + 1 ints. int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2); // Targetkey's offset relative to the end of secondary index - int targetKeyRelOffset = nonRootIndex.getInt( - Bytes.SIZEOF_INT * (i + 1)); + int targetKeyRelOffset = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 1)); // The offset of the target key in the blockIndex buffer - int targetKeyOffset = entriesOffset // Skip secondary index - + targetKeyRelOffset // Skip all entries until mid - + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size + int targetKeyOffset = entriesOffset // Skip secondary index + + targetKeyRelOffset // Skip all entries until mid + + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size // We subtract the two consecutive secondary index elements, which // gives us the size of the whole (offset, onDiskSize, key) tuple. We // then need to subtract the overhead of offset and onDiskSize. - int targetKeyLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) - - targetKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; + int targetKeyLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) - targetKeyRelOffset + - SECONDARY_INDEX_ENTRY_OVERHEAD; // TODO check whether we can make BB backed Cell here? So can avoid bytes copy. return nonRootIndex.toBytes(targetKeyOffset, targetKeyLength); } /** - * Performs a binary search over a non-root level index block. Utilizes the - * secondary index, which records the offsets of (offset, onDiskSize, - * firstKey) tuples of all entries. - * - * @param key - * the key we are searching for offsets to individual entries in - * the blockIndex buffer - * @param nonRootIndex - * the non-root index block buffer, starting with the secondary - * index. The position is ignored. - * @return the index i in [0, numEntries - 1] such that keys[i] <= key < - * keys[i + 1], if keys is the array of all keys being searched, or - * -1 otherwise + * Performs a binary search over a non-root level index block. Utilizes the secondary index, + * which records the offsets of (offset, onDiskSize, firstKey) tuples of all entries. + * @param key the key we are searching for offsets to individual entries in the blockIndex + * buffer + * @param nonRootIndex the non-root index block buffer, starting with the secondary index. The + * position is ignored. + * @return the index i in [0, numEntries - 1] such that keys[i] <= key < keys[i + 1], if keys is + * the array of all keys being searched, or -1 otherwise * @throws IOException */ static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex, @@ -738,15 +698,15 @@ static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex, int midKeyRelOffset = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 1)); // The offset of the middle key in the blockIndex buffer - int midKeyOffset = entriesOffset // Skip secondary index - + midKeyRelOffset // Skip all entries until mid - + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size + int midKeyOffset = entriesOffset // Skip secondary index + + midKeyRelOffset // Skip all entries until mid + + SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size // We subtract the two consecutive secondary index elements, which // gives us the size of the whole (offset, onDiskSize, key) tuple. We // then need to subtract the overhead of offset and onDiskSize. - int midLength = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 2)) - - midKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; + int midLength = nonRootIndex.getIntAfterPosition(Bytes.SIZEOF_INT * (mid + 2)) + - midKeyRelOffset - SECONDARY_INDEX_ENTRY_OVERHEAD; // we have to compare in this order, because the comparator order // has special logic when the 'left side' is a special key. @@ -758,13 +718,10 @@ static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex, int cmp = PrivateCellUtil.compareKeyIgnoresMvcc(comparator, key, nonRootIndexkeyOnlyKV); // key lives above the midpoint - if (cmp > 0) - low = mid + 1; // Maintain the invariant that keys[low - 1] < key + if (cmp > 0) low = mid + 1; // Maintain the invariant that keys[low - 1] < key // key lives below the midpoint - else if (cmp < 0) - high = mid - 1; // Maintain the invariant that key < keys[high + 1] - else - return mid; // exact match + else if (cmp < 0) high = mid - 1; // Maintain the invariant that key < keys[high + 1] + else return mid; // exact match } // As per our invariant, keys[low - 1] < key < keys[high + 1], meaning @@ -772,8 +729,8 @@ else if (cmp < 0) // condition, low >= high + 1. Therefore, low = high + 1. if (low != high + 1) { - throw new IllegalStateException("Binary search broken: low=" + low - + " " + "instead of " + (high + 1)); + throw new IllegalStateException( + "Binary search broken: low=" + low + " " + "instead of " + (high + 1)); } // OK, our invariant says that keys[low - 1] < key < keys[low]. We need to @@ -782,30 +739,23 @@ else if (cmp < 0) // Some extra validation on the result. if (i < -1 || i >= numEntries) { - throw new IllegalStateException("Binary search broken: result is " + - i + " but expected to be between -1 and (numEntries - 1) = " + - (numEntries - 1)); + throw new IllegalStateException("Binary search broken: result is " + i + + " but expected to be between -1 and (numEntries - 1) = " + (numEntries - 1)); } return i; } /** - * Search for one key using the secondary index in a non-root block. In case - * of success, positions the provided buffer at the entry of interest, where - * the file offset and the on-disk-size can be read. - * - * @param nonRootBlock - * a non-root block without header. Initial position does not - * matter. - * @param key - * the byte array containing the key - * @return the index position where the given key was found, otherwise - * return -1 in the case the given key is before the first key. - * + * Search for one key using the secondary index in a non-root block. In case of success, + * positions the provided buffer at the entry of interest, where the file offset and the + * on-disk-size can be read. + * @param nonRootBlock a non-root block without header. Initial position does not matter. + * @param key the byte array containing the key + * @return the index position where the given key was found, otherwise return -1 in the case the + * given key is before the first key. */ - static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, - CellComparator comparator) { + static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, CellComparator comparator) { int entryIndex = binarySearchNonRootIndex(key, nonRootBlock, comparator); if (entryIndex != -1) { @@ -816,8 +766,7 @@ static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, // The offset of the entry we are interested in relative to the end of // the secondary index. - int entryRelOffset = nonRootBlock - .getIntAfterPosition(Bytes.SIZEOF_INT * (1 + entryIndex)); + int entryRelOffset = nonRootBlock.getIntAfterPosition(Bytes.SIZEOF_INT * (1 + entryIndex)); nonRootBlock.position(entriesOffset + entryRelOffset); } @@ -826,11 +775,9 @@ static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, } /** - * Read in the root-level index from the given input stream. Must match - * what was written into the root level by - * {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the - * offset that function returned. - * + * Read in the root-level index from the given input stream. Must match what was written into + * the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset + * that function returned. * @param in the buffered input stream or wrapped byte input stream * @param numEntries the number of root-level index entries * @throws IOException @@ -856,11 +803,9 @@ public void readRootIndex(DataInput in, final int numEntries) throws IOException protected abstract void add(final byte[] key, final long offset, final int dataSize); /** - * Read in the root-level index from the given input stream. Must match - * what was written into the root level by - * {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the - * offset that function returned. - * + * Read in the root-level index from the given input stream. Must match what was written into + * the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset + * that function returned. * @param blk the HFile block * @param numEntries the number of root-level index entries * @return the buffered input stream or wrapped byte input stream @@ -874,15 +819,13 @@ public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throw /** * Read the root-level metadata of a multi-level block index. Based on - * {@link #readRootIndex(DataInput, int)}, but also reads metadata - * necessary to compute the mid-key in a multi-level index. - * + * {@link #readRootIndex(DataInput, int)}, but also reads metadata necessary to compute the + * mid-key in a multi-level index. * @param blk the HFile block * @param numEntries the number of root-level index entries * @throws IOException */ - public void readMultiLevelIndexRoot(HFileBlock blk, - final int numEntries) throws IOException { + public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException { DataInputStream in = readRootIndex(blk, numEntries); // after reading the root index the checksum bytes have to // be subtracted to know if the mid key exists. @@ -899,8 +842,8 @@ public void readMultiLevelIndexRoot(HFileBlock blk, @Override public long heapSize() { // The BlockIndexReader does not have the blockKey, comparator and the midkey atomic reference - long heapSize = ClassSize.align(3 * ClassSize.REFERENCE + - 2 * Bytes.SIZEOF_INT + ClassSize.OBJECT); + long heapSize = + ClassSize.align(3 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + ClassSize.OBJECT); // Mid-key metadata. heapSize += MID_KEY_METADATA_SIZE; @@ -908,13 +851,11 @@ public long heapSize() { heapSize = calculateHeapSizeForBlockKeys(heapSize); if (blockOffsets != null) { - heapSize += ClassSize.align(ClassSize.ARRAY + blockOffsets.length - * Bytes.SIZEOF_LONG); + heapSize += ClassSize.align(ClassSize.ARRAY + blockOffsets.length * Bytes.SIZEOF_LONG); } if (blockDataSizes != null) { - heapSize += ClassSize.align(ClassSize.ARRAY + blockDataSizes.length - * Bytes.SIZEOF_INT); + heapSize += ClassSize.align(ClassSize.ARRAY + blockDataSizes.length * Bytes.SIZEOF_INT); } return ClassSize.align(heapSize); @@ -924,46 +865,38 @@ public long heapSize() { } /** - * Writes the block index into the output stream. Generate the tree from - * bottom up. The leaf level is written to disk as a sequence of inline - * blocks, if it is larger than a certain number of bytes. If the leaf level - * is not large enough, we write all entries to the root level instead. - * - * After all leaf blocks have been written, we end up with an index - * referencing the resulting leaf index blocks. If that index is larger than - * the allowed root index size, the writer will break it up into - * reasonable-size intermediate-level index block chunks write those chunks - * out, and create another index referencing those chunks. This will be - * repeated until the remaining index is small enough to become the root - * index. However, in most practical cases we will only have leaf-level - * blocks and the root index, or just the root index. + * Writes the block index into the output stream. Generate the tree from bottom up. The leaf level + * is written to disk as a sequence of inline blocks, if it is larger than a certain number of + * bytes. If the leaf level is not large enough, we write all entries to the root level instead. + * After all leaf blocks have been written, we end up with an index referencing the resulting leaf + * index blocks. If that index is larger than the allowed root index size, the writer will break + * it up into reasonable-size intermediate-level index block chunks write those chunks out, and + * create another index referencing those chunks. This will be repeated until the remaining index + * is small enough to become the root index. However, in most practical cases we will only have + * leaf-level blocks and the root index, or just the root index. */ public static class BlockIndexWriter implements InlineBlockWriter { /** - * While the index is being written, this represents the current block - * index referencing all leaf blocks, with one exception. If the file is - * being closed and there are not enough blocks to complete even a single - * leaf block, no leaf blocks get written and this contains the entire + * While the index is being written, this represents the current block index referencing all + * leaf blocks, with one exception. If the file is being closed and there are not enough blocks + * to complete even a single leaf block, no leaf blocks get written and this contains the entire * block index. After all levels of the index were written by - * {@link #writeIndexBlocks(FSDataOutputStream)}, this contains the final - * root-level index. + * {@link #writeIndexBlocks(FSDataOutputStream)}, this contains the final root-level index. */ private BlockIndexChunk rootChunk = new BlockIndexChunk(); /** - * Current leaf-level chunk. New entries referencing data blocks get added - * to this chunk until it grows large enough to be written to disk. + * Current leaf-level chunk. New entries referencing data blocks get added to this chunk until + * it grows large enough to be written to disk. */ private BlockIndexChunk curInlineChunk = new BlockIndexChunk(); /** - * The number of block index levels. This is one if there is only root - * level (even empty), two if there a leaf level and root level, and is - * higher if there are intermediate levels. This is only final after - * {@link #writeIndexBlocks(FSDataOutputStream)} has been called. The - * initial value accounts for the root level, and will be increased to two - * as soon as we find out there is a leaf-level in - * {@link #blockWritten(long, int, int)}. + * The number of block index levels. This is one if there is only root level (even empty), two + * if there a leaf level and root level, and is higher if there are intermediate levels. This is + * only final after {@link #writeIndexBlocks(FSDataOutputStream)} has been called. The initial + * value accounts for the root level, and will be increased to two as soon as we find out there + * is a leaf-level in {@link #blockWritten(long, int, int)}. */ private int numLevels = 1; @@ -971,9 +904,8 @@ public static class BlockIndexWriter implements InlineBlockWriter { private byte[] firstKey = null; /** - * The total number of leaf-level entries, i.e. entries referenced by - * leaf-level blocks. For the data block index this is equal to the number - * of data blocks. + * The total number of leaf-level entries, i.e. entries referenced by leaf-level blocks. For the + * data block index this is equal to the number of data blocks. */ private long totalNumEntries; @@ -1006,15 +938,14 @@ public BlockIndexWriter() { /** * Creates a multi-level block index writer. - * * @param blockWriter the block writer to use to write index blocks * @param cacheConf used to determine when and how a block should be cached-on-write. */ - public BlockIndexWriter(HFileBlock.Writer blockWriter, - CacheConfig cacheConf, String nameForCaching) { + public BlockIndexWriter(HFileBlock.Writer blockWriter, CacheConfig cacheConf, + String nameForCaching) { if ((cacheConf == null) != (nameForCaching == null)) { - throw new IllegalArgumentException("Block cache and file name for " + - "caching must be both specified or both null"); + throw new IllegalArgumentException( + "Block cache and file name for " + "caching must be both specified or both null"); } this.blockWriter = blockWriter; @@ -1039,34 +970,27 @@ public void setMinIndexNumEntries(int minIndexNumEntries) { } /** - * Writes the root level and intermediate levels of the block index into - * the output stream, generating the tree from bottom up. Assumes that the - * leaf level has been inline-written to the disk if there is enough data - * for more than one leaf block. We iterate by breaking the current level - * of the block index, starting with the index of all leaf-level blocks, - * into chunks small enough to be written to disk, and generate its parent - * level, until we end up with a level small enough to become the root - * level. - * - * If the leaf level is not large enough, there is no inline block index - * anymore, so we only write that level of block index to disk as the root - * level. - * + * Writes the root level and intermediate levels of the block index into the output stream, + * generating the tree from bottom up. Assumes that the leaf level has been inline-written to + * the disk if there is enough data for more than one leaf block. We iterate by breaking the + * current level of the block index, starting with the index of all leaf-level blocks, into + * chunks small enough to be written to disk, and generate its parent level, until we end up + * with a level small enough to become the root level. If the leaf level is not large enough, + * there is no inline block index anymore, so we only write that level of block index to disk as + * the root level. * @param out FSDataOutputStream * @return position at which we entered the root-level index. * @throws IOException */ public long writeIndexBlocks(FSDataOutputStream out) throws IOException { if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) { - throw new IOException("Trying to write a multi-level block index, " + - "but are " + curInlineChunk.getNumEntries() + " entries in the " + - "last inline chunk."); + throw new IOException("Trying to write a multi-level block index, " + "but are " + + curInlineChunk.getNumEntries() + " entries in the " + "last inline chunk."); } // We need to get mid-key metadata before we create intermediate // indexes and overwrite the root chunk. - byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata() - : null; + byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata() : null; if (curInlineChunk != null) { while (rootChunk.getRootSize() > maxChunkSize @@ -1083,80 +1007,69 @@ public long writeIndexBlocks(FSDataOutputStream out) throws IOException { long rootLevelIndexPos = out.getPos(); { - DataOutput blockStream = - blockWriter.startWriting(BlockType.ROOT_INDEX); + DataOutput blockStream = blockWriter.startWriting(BlockType.ROOT_INDEX); rootChunk.writeRoot(blockStream); - if (midKeyMetadata != null) - blockStream.write(midKeyMetadata); + if (midKeyMetadata != null) blockStream.write(midKeyMetadata); blockWriter.writeHeaderAndData(out); if (cacheConf != null) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock(new BlockCacheKey(nameForCaching, rootLevelIndexPos, true, - blockForCaching.getBlockType()), blockForCaching); + blockForCaching.getBlockType()), + blockForCaching); }); } } // Add root index block size totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader(); - totalBlockUncompressedSize += - blockWriter.getUncompressedSizeWithoutHeader(); + totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader(); if (LOG.isTraceEnabled()) { - LOG.trace("Wrote a " + numLevels + "-level index with root level at pos " - + rootLevelIndexPos + ", " + rootChunk.getNumEntries() - + " root-level entries, " + totalNumEntries + " total entries, " - + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + - " on-disk size, " - + StringUtils.humanReadableInt(totalBlockUncompressedSize) + - " total uncompressed size."); + LOG.trace( + "Wrote a " + numLevels + "-level index with root level at pos " + rootLevelIndexPos + ", " + + rootChunk.getNumEntries() + " root-level entries, " + totalNumEntries + + " total entries, " + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + + " on-disk size, " + StringUtils.humanReadableInt(totalBlockUncompressedSize) + + " total uncompressed size."); } return rootLevelIndexPos; } /** - * Writes the block index data as a single level only. Does not do any - * block framing. - * - * @param out the buffered output stream to write the index to. Typically a - * stream writing into an {@link HFile} block. - * @param description a short description of the index being written. Used - * in a log message. + * Writes the block index data as a single level only. Does not do any block framing. + * @param out the buffered output stream to write the index to. Typically a stream writing into + * an {@link HFile} block. + * @param description a short description of the index being written. Used in a log message. * @throws IOException */ - public void writeSingleLevelIndex(DataOutput out, String description) - throws IOException { + public void writeSingleLevelIndex(DataOutput out, String description) throws IOException { expectNumLevels(1); - if (!singleLevelOnly) - throw new IOException("Single-level mode is turned off"); + if (!singleLevelOnly) throw new IOException("Single-level mode is turned off"); if (rootChunk.getNumEntries() > 0) - throw new IOException("Root-level entries already added in " + - "single-level mode"); + throw new IOException("Root-level entries already added in " + "single-level mode"); rootChunk = curInlineChunk; curInlineChunk = new BlockIndexChunk(); if (LOG.isTraceEnabled()) { - LOG.trace("Wrote a single-level " + description + " index with " - + rootChunk.getNumEntries() + " entries, " + rootChunk.getRootSize() - + " bytes"); + LOG.trace("Wrote a single-level " + description + " index with " + rootChunk.getNumEntries() + + " entries, " + rootChunk.getRootSize() + " bytes"); } rootChunk.writeRoot(out); } /** - * Split the current level of the block index into intermediate index - * blocks of permitted size and write those blocks to disk. Return the next - * level of the block index referencing those intermediate-level blocks. - * + * Split the current level of the block index into intermediate index blocks of permitted size + * and write those blocks to disk. Return the next level of the block index referencing those + * intermediate-level blocks. * @param out - * @param currentLevel the current level of the block index, such as the a - * chunk referencing all leaf-level index blocks - * @return the parent level block index, which becomes the root index after - * a few (usually zero) iterations + * @param currentLevel the current level of the block index, such as the a chunk referencing all + * leaf-level index blocks + * @return the parent level block index, which becomes the root index after a few (usually zero) + * iterations * @throws IOException */ private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out, @@ -1168,8 +1081,8 @@ private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out, BlockIndexChunk curChunk = new BlockIndexChunk(); for (int i = 0; i < currentLevel.getNumEntries(); ++i) { - curChunk.add(currentLevel.getBlockKey(i), - currentLevel.getBlockOffset(i), currentLevel.getOnDiskDataSize(i)); + curChunk.add(currentLevel.getBlockKey(i), currentLevel.getBlockOffset(i), + currentLevel.getOnDiskDataSize(i)); // HBASE-16288: We have to have at least minIndexNumEntries(16) items in the index so that // we won't end up with too-many levels for a index with very large rowKeys. Also, if the @@ -1186,11 +1099,10 @@ private BlockIndexChunk writeIntermediateLevel(FSDataOutputStream out, return parent; } - private void writeIntermediateBlock(FSDataOutputStream out, - BlockIndexChunk parent, BlockIndexChunk curChunk) throws IOException { + private void writeIntermediateBlock(FSDataOutputStream out, BlockIndexChunk parent, + BlockIndexChunk curChunk) throws IOException { long beginOffset = out.getPos(); - DataOutputStream dos = blockWriter.startWriting( - BlockType.INTERMEDIATE_INDEX); + DataOutputStream dos = blockWriter.startWriting(BlockType.INTERMEDIATE_INDEX); curChunk.writeNonRoot(dos); byte[] curFirstKey = curChunk.getBlockKey(0); blockWriter.writeHeaderAndData(out); @@ -1199,23 +1111,21 @@ private void writeIntermediateBlock(FSDataOutputStream out, cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock( - new BlockCacheKey(nameForCaching, beginOffset, true, blockForCaching.getBlockType()), - blockForCaching); + new BlockCacheKey(nameForCaching, beginOffset, true, blockForCaching.getBlockType()), + blockForCaching); }); } // Add intermediate index block size totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader(); - totalBlockUncompressedSize += - blockWriter.getUncompressedSizeWithoutHeader(); + totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader(); // OFFSET is the beginning offset the chunk of block index entries. // SIZE is the total byte size of the chunk of block index entries // + the secondary index size // FIRST_KEY is the first key in the chunk of block index // entries. - parent.add(curFirstKey, beginOffset, - blockWriter.getOnDiskSizeWithHeader()); + parent.add(curFirstKey, beginOffset, blockWriter.getOnDiskSizeWithHeader()); // clear current block index chunk curChunk.clear(); @@ -1238,15 +1148,15 @@ public int getNumLevels() { private void expectNumLevels(int expectedNumLevels) { if (numLevels != expectedNumLevels) { - throw new IllegalStateException("Number of block index levels is " - + numLevels + "but is expected to be " + expectedNumLevels); + throw new IllegalStateException("Number of block index levels is " + numLevels + + "but is expected to be " + expectedNumLevels); } } /** - * Whether there is an inline block ready to be written. In general, we - * write an leaf-level index block as an inline block as soon as its size - * as serialized in the non-root format reaches a certain threshold. + * Whether there is an inline block ready to be written. In general, we write an leaf-level + * index block as an inline block as soon as its size as serialized in the non-root format + * reaches a certain threshold. */ @Override public boolean shouldWriteBlock(boolean closing) { @@ -1255,8 +1165,8 @@ public boolean shouldWriteBlock(boolean closing) { } if (curInlineChunk == null) { - throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " + - "called with closing=true and then called again?"); + throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been " + + "called with closing=true and then called again?"); } if (curInlineChunk.getNumEntries() == 0) { @@ -1271,7 +1181,7 @@ public boolean shouldWriteBlock(boolean closing) { expectNumLevels(1); rootChunk = curInlineChunk; - curInlineChunk = null; // Disallow adding any more index entries. + curInlineChunk = null; // Disallow adding any more index entries. return false; } @@ -1282,15 +1192,13 @@ public boolean shouldWriteBlock(boolean closing) { } /** - * Write out the current inline index block. Inline blocks are non-root - * blocks, so the non-root index format is used. - * + * Write out the current inline index block. Inline blocks are non-root blocks, so the non-root + * index format is used. * @param out */ @Override public void writeInlineBlock(DataOutput out) throws IOException { - if (singleLevelOnly) - throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); + if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); // Write the inline block index to the output stream in the non-root // index block format. @@ -1305,8 +1213,8 @@ public void writeInlineBlock(DataOutput out) throws IOException { } /** - * Called after an inline block has been written so that we can add an - * entry referring to that block to the parent-level index. + * Called after an inline block has been written so that we can add an entry referring to that + * block to the parent-level index. */ @Override public void blockWritten(long offset, int onDiskSize, int uncompressedSize) { @@ -1314,13 +1222,12 @@ public void blockWritten(long offset, int onDiskSize, int uncompressedSize) { totalBlockOnDiskSize += onDiskSize; totalBlockUncompressedSize += uncompressedSize; - if (singleLevelOnly) - throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); + if (singleLevelOnly) throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED); if (firstKey == null) { - throw new IllegalStateException("Trying to add second-level index " + - "entry with offset=" + offset + " and onDiskSize=" + onDiskSize + - "but the first key was not set in writeInlineBlock"); + throw new IllegalStateException( + "Trying to add second-level index " + "entry with offset=" + offset + " and onDiskSize=" + + onDiskSize + "but the first key was not set in writeInlineBlock"); } if (rootChunk.getNumEntries() == 0) { @@ -1341,14 +1248,12 @@ public BlockType getInlineBlockType() { } /** - * Add one index entry to the current leaf-level block. When the leaf-level - * block gets large enough, it will be flushed to disk as an inline block. - * + * Add one index entry to the current leaf-level block. When the leaf-level block gets large + * enough, it will be flushed to disk as an inline block. * @param firstKey the first key of the data block * @param blockOffset the offset of the data block - * @param blockDataSize the on-disk size of the data block ({@link HFile} - * format version 2), or the uncompressed size of the data block ( - * {@link HFile} format version 1). + * @param blockDataSize the on-disk size of the data block ({@link HFile} format version 2), or + * the uncompressed size of the data block ( {@link HFile} format version 1). */ public void addEntry(byte[] firstKey, long blockOffset, int blockDataSize) { curInlineChunk.add(firstKey, blockOffset, blockDataSize); @@ -1360,16 +1265,15 @@ public void addEntry(byte[] firstKey, long blockOffset, int blockDataSize) { */ public void ensureSingleLevel() throws IOException { if (numLevels > 1) { - throw new IOException ("Wrote a " + numLevels + "-level index with " + - rootChunk.getNumEntries() + " root-level entries, but " + - "this is expected to be a single-level block index."); + throw new IOException("Wrote a " + numLevels + "-level index with " + + rootChunk.getNumEntries() + " root-level entries, but " + + "this is expected to be a single-level block index."); } } /** - * @return true if we are using cache-on-write. This is configured by the - * caller of the constructor by either passing a valid block cache - * or null. + * @return true if we are using cache-on-write. This is configured by the caller of the + * constructor by either passing a valid block cache or null. */ @Override public boolean getCacheOnWrite() { @@ -1377,9 +1281,8 @@ public boolean getCacheOnWrite() { } /** - * The total uncompressed size of the root index block, intermediate-level - * index blocks, and leaf-level index blocks. - * + * The total uncompressed size of the root index block, intermediate-level index blocks, and + * leaf-level index blocks. * @return the total uncompressed size of all index blocks */ public long getTotalUncompressedSize() { @@ -1389,9 +1292,8 @@ public long getTotalUncompressedSize() { } /** - * A single chunk of the block index in the process of writing. The data in - * this chunk can become a leaf-level, intermediate-level, or root index - * block. + * A single chunk of the block index in the process of writing. The data in this chunk can become + * a leaf-level, intermediate-level, or root index block. */ static class BlockIndexChunk { @@ -1405,16 +1307,16 @@ static class BlockIndexChunk { private final List onDiskDataSizes = new ArrayList<>(); /** - * The cumulative number of sub-entries, i.e. entries on deeper-level block - * index entries. numSubEntriesAt[i] is the number of sub-entries in the - * blocks corresponding to this chunk's entries #0 through #i inclusively. + * The cumulative number of sub-entries, i.e. entries on deeper-level block index entries. + * numSubEntriesAt[i] is the number of sub-entries in the blocks corresponding to this chunk's + * entries #0 through #i inclusively. */ private final List numSubEntriesAt = new ArrayList<>(); /** - * The offset of the next entry to be added, relative to the end of the - * "secondary index" in the "non-root" format representation of this index - * chunk. This is the next value to be added to the secondary index. + * The offset of the next entry to be added, relative to the end of the "secondary index" in the + * "non-root" format representation of this index chunk. This is the next value to be added to + * the secondary index. */ private int curTotalNonRootEntrySize = 0; @@ -1424,31 +1326,25 @@ static class BlockIndexChunk { private int curTotalRootSize = 0; /** - * The "secondary index" used for binary search over variable-length - * records in a "non-root" format block. These offsets are relative to the - * end of this secondary index. + * The "secondary index" used for binary search over variable-length records in a "non-root" + * format block. These offsets are relative to the end of this secondary index. */ private final List secondaryIndexOffsetMarks = new ArrayList<>(); /** * Adds a new entry to this block index chunk. - * * @param firstKey the first key in the block pointed to by this entry - * @param blockOffset the offset of the next-level block pointed to by this - * entry - * @param onDiskDataSize the on-disk data of the block pointed to by this - * entry, including header size - * @param curTotalNumSubEntries if this chunk is the root index chunk under - * construction, this specifies the current total number of - * sub-entries in all leaf-level chunks, including the one - * corresponding to the second-level entry being added. + * @param blockOffset the offset of the next-level block pointed to by this entry + * @param onDiskDataSize the on-disk data of the block pointed to by this entry, including + * header size + * @param curTotalNumSubEntries if this chunk is the root index chunk under construction, this + * specifies the current total number of sub-entries in all leaf-level chunks, + * including the one corresponding to the second-level entry being added. */ - void add(byte[] firstKey, long blockOffset, int onDiskDataSize, - long curTotalNumSubEntries) { + void add(byte[] firstKey, long blockOffset, int onDiskDataSize, long curTotalNumSubEntries) { // Record the offset for the secondary index secondaryIndexOffsetMarks.add(curTotalNonRootEntrySize); - curTotalNonRootEntrySize += SECONDARY_INDEX_ENTRY_OVERHEAD - + firstKey.length; + curTotalNonRootEntrySize += SECONDARY_INDEX_ENTRY_OVERHEAD + firstKey.length; curTotalRootSize += Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT + WritableUtils.getVIntSize(firstKey.length) + firstKey.length; @@ -1462,17 +1358,15 @@ void add(byte[] firstKey, long blockOffset, int onDiskDataSize, // Make sure the parallel arrays are in sync. if (numSubEntriesAt.size() != blockKeys.size()) { - throw new IllegalStateException("Only have key/value count " + - "stats for " + numSubEntriesAt.size() + " block index " + - "entries out of " + blockKeys.size()); + throw new IllegalStateException("Only have key/value count " + "stats for " + + numSubEntriesAt.size() + " block index " + "entries out of " + blockKeys.size()); } } } /** - * The same as {@link #add(byte[], long, int, long)} but does not take the - * key/value into account. Used for single-level indexes. - * + * The same as {@link #add(byte[], long, int, long)} but does not take the key/value into + * account. Used for single-level indexes. * @see #add(byte[], long, int, long) */ public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) { @@ -1490,21 +1384,15 @@ public void clear() { } /** - * Finds the entry corresponding to the deeper-level index block containing - * the given deeper-level entry (a "sub-entry"), assuming a global 0-based - * ordering of sub-entries. - * + * Finds the entry corresponding to the deeper-level index block containing the given + * deeper-level entry (a "sub-entry"), assuming a global 0-based ordering of sub-entries. *

          - * Implementation note. We are looking for i such that - * numSubEntriesAt[i - 1] <= k < numSubEntriesAt[i], because a deeper-level - * block #i (0-based) contains sub-entries # numSubEntriesAt[i - 1]'th - * through numSubEntriesAt[i] - 1, assuming a global 0-based ordering of - * sub-entries. i is by definition the insertion point of k in - * numSubEntriesAt. - * + * Implementation note. We are looking for i such that numSubEntriesAt[i - 1] <= k < + * numSubEntriesAt[i], because a deeper-level block #i (0-based) contains sub-entries # + * numSubEntriesAt[i - 1]'th through numSubEntriesAt[i] - 1, assuming a global 0-based ordering + * of sub-entries. i is by definition the insertion point of k in numSubEntriesAt. * @param k sub-entry index, from 0 to the total number sub-entries - 1 - * @return the 0-based index of the entry corresponding to the given - * sub-entry + * @return the 0-based index of the entry corresponding to the given sub-entry */ public int getEntryBySubEntry(long k) { // We define mid-key as the key corresponding to k'th sub-entry @@ -1515,24 +1403,20 @@ public int getEntryBySubEntry(long k) { // Exact match: cumulativeWeight[i] = k. This means chunks #0 through // #i contain exactly k sub-entries, and the sub-entry #k (0-based) // is in the (i + 1)'th chunk. - if (i >= 0) - return i + 1; + if (i >= 0) return i + 1; // Inexact match. Return the insertion point. return -i - 1; } /** - * Used when writing the root block index of a multi-level block index. - * Serializes additional information allowing to efficiently identify the - * mid-key. - * + * Used when writing the root block index of a multi-level block index. Serializes additional + * information allowing to efficiently identify the mid-key. * @return a few serialized fields for finding the mid-key * @throws IOException if could not create metadata for computing mid-key */ public byte[] getMidKeyMetadata() throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream( - MID_KEY_METADATA_SIZE); + ByteArrayOutputStream baos = new ByteArrayOutputStream(MID_KEY_METADATA_SIZE); DataOutputStream baosDos = new DataOutputStream(baos); long totalNumSubEntries = numSubEntriesAt.get(blockKeys.size() - 1); if (totalNumSubEntries == 0) { @@ -1544,23 +1428,20 @@ public byte[] getMidKeyMetadata() throws IOException { baosDos.writeLong(blockOffsets.get(midKeyEntry)); baosDos.writeInt(onDiskDataSizes.get(midKeyEntry)); - long numSubEntriesBefore = midKeyEntry > 0 - ? numSubEntriesAt.get(midKeyEntry - 1) : 0; + long numSubEntriesBefore = midKeyEntry > 0 ? numSubEntriesAt.get(midKeyEntry - 1) : 0; long subEntryWithinEntry = midKeySubEntry - numSubEntriesBefore; - if (subEntryWithinEntry < 0 || subEntryWithinEntry > Integer.MAX_VALUE) - { + if (subEntryWithinEntry < 0 || subEntryWithinEntry > Integer.MAX_VALUE) { throw new IOException("Could not identify mid-key index within the " - + "leaf-level block containing mid-key: out of range (" - + subEntryWithinEntry + ", numSubEntriesBefore=" - + numSubEntriesBefore + ", midKeySubEntry=" + midKeySubEntry + + "leaf-level block containing mid-key: out of range (" + subEntryWithinEntry + + ", numSubEntriesBefore=" + numSubEntriesBefore + ", midKeySubEntry=" + midKeySubEntry + ")"); } baosDos.writeInt((int) subEntryWithinEntry); if (baosDos.size() != MID_KEY_METADATA_SIZE) { - throw new IOException("Could not write mid-key metadata: size=" + - baosDos.size() + ", correct size: " + MID_KEY_METADATA_SIZE); + throw new IOException("Could not write mid-key metadata: size=" + baosDos.size() + + ", correct size: " + MID_KEY_METADATA_SIZE); } // Close just to be good citizens, although this has no effect. @@ -1570,11 +1451,9 @@ public byte[] getMidKeyMetadata() throws IOException { } /** - * Writes the block index chunk in the non-root index block format. This - * format contains the number of entries, an index of integer offsets - * for quick binary search on variable-length records, and tuples of - * block offset, on-disk block size, and the first key for each entry. - * + * Writes the block index chunk in the non-root index block format. This format contains the + * number of entries, an index of integer offsets for quick binary search on variable-length + * records, and tuples of block offset, on-disk block size, and the first key for each entry. * @param out * @throws IOException */ @@ -1583,9 +1462,8 @@ void writeNonRoot(DataOutput out) throws IOException { out.writeInt(blockKeys.size()); if (secondaryIndexOffsetMarks.size() != blockKeys.size()) { - throw new IOException("Corrupted block index chunk writer: " + - blockKeys.size() + " entries but " + - secondaryIndexOffsetMarks.size() + " secondary index items"); + throw new IOException("Corrupted block index chunk writer: " + blockKeys.size() + + " entries but " + secondaryIndexOffsetMarks.size() + " secondary index items"); } // For each entry, write a "secondary index" of relative offsets to the @@ -1607,23 +1485,20 @@ void writeNonRoot(DataOutput out) throws IOException { } /** - * @return the size of this chunk if stored in the non-root index block - * format + * @return the size of this chunk if stored in the non-root index block format */ int getNonRootSize() { - return Bytes.SIZEOF_INT // Number of entries - + Bytes.SIZEOF_INT * (blockKeys.size() + 1) // Secondary index - + curTotalNonRootEntrySize; // All entries + return Bytes.SIZEOF_INT // Number of entries + + Bytes.SIZEOF_INT * (blockKeys.size() + 1) // Secondary index + + curTotalNonRootEntrySize; // All entries } /** - * Writes this chunk into the given output stream in the root block index - * format. This format is similar to the {@link HFile} version 1 block - * index format, except that we store on-disk size of the block instead of - * its uncompressed size. - * - * @param out the data output stream to write the block index to. Typically - * a stream writing into an {@link HFile} block. + * Writes this chunk into the given output stream in the root block index format. This format is + * similar to the {@link HFile} version 1 block index format, except that we store on-disk size + * of the block instead of its uncompressed size. + * @param out the data output stream to write the block index to. Typically a stream writing + * into an {@link HFile} block. * @throws IOException */ void writeRoot(DataOutput out) throws IOException { @@ -1661,8 +1536,7 @@ public int getOnDiskDataSize(int i) { } public long getCumulativeNumKV(int i) { - if (i < 0) - return 0; + if (i < 0) return 0; return numSubEntriesAt.get(i); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java index 6a1611de8dc3..2165128cc65c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; @@ -27,9 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Controls what kind of data block encoding is used. If data block encoding is - * not set or the given block is not a data block (encoded or not), methods - * should just return the unmodified block. + * Controls what kind of data block encoding is used. If data block encoding is not set or the given + * block is not a data block (encoded or not), methods should just return the unmodified block. */ @InterfaceAudience.Private public interface HFileDataBlockEncoder { @@ -38,8 +38,8 @@ public interface HFileDataBlockEncoder { /** * Starts encoding for a block of KeyValues. Call - * {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[], BlockType)} - * to finish encoding of a block. + * {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[], BlockType)} to + * finish encoding of a block. * @param encodingCtx * @param out * @throws IOException @@ -80,23 +80,20 @@ void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream ou * @param writer writer for a given HFile * @exception IOException on disk problems */ - void saveMetadata(HFile.Writer writer) - throws IOException; + void saveMetadata(HFile.Writer writer) throws IOException; /** @return the data block encoding */ DataBlockEncoding getDataBlockEncoding(); /** - * @return the effective in-cache data block encoding, taking into account - * whether we are doing a compaction. + * @return the effective in-cache data block encoding, taking into account whether we are doing a + * compaction. */ public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction); /** - * Create an encoder specific encoding context object for writing. And the - * encoding context should also perform compression if compressionAlgorithm is - * valid. - * + * Create an encoder specific encoding context object for writing. And the encoding context should + * also perform compression if compressionAlgorithm is valid. * @param conf store configuration * @param headerBytes header bytes * @param fileContext HFile meta data @@ -106,10 +103,8 @@ HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, byte[] HFileContext fileContext); /** - * create a encoder specific decoding context for reading. And the - * decoding context should also do decompression if compressionAlgorithm - * is valid. - * + * create a encoder specific decoding context for reading. And the decoding context should also do + * decompression if compressionAlgorithm is valid. * @param conf store configuration * @param fileContext - HFile meta data * @return a new {@link HFileBlockDecodingContext} object diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java index d2ce77245c9c..61c4867af6ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java @@ -1,24 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.io.DataOutputStream; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; @@ -31,8 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Do different kinds of data block encoding according to column family - * options. + * Do different kinds of data block encoding according to column family options. */ @InterfaceAudience.Private public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder { @@ -46,8 +45,7 @@ public HFileDataBlockEncoderImpl(DataBlockEncoding encoding) { this.encoding = encoding != null ? encoding : DataBlockEncoding.NONE; } - public static HFileDataBlockEncoder createFromFileInfo( - HFileInfo fileInfo) throws IOException { + public static HFileDataBlockEncoder createFromFileInfo(HFileInfo fileInfo) throws IOException { DataBlockEncoding encoding = DataBlockEncoding.NONE; byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING); if (dataBlockEncodingType != null) { @@ -55,8 +53,8 @@ public static HFileDataBlockEncoder createFromFileInfo( try { encoding = DataBlockEncoding.valueOf(dataBlockEncodingStr); } catch (IllegalArgumentException ex) { - throw new IOException("Invalid data block encoding type in file info: " - + dataBlockEncodingStr, ex); + throw new IOException( + "Invalid data block encoding type in file info: " + dataBlockEncodingStr, ex); } } @@ -102,7 +100,6 @@ public boolean useEncodedScanner() { return encoding != DataBlockEncoding.NONE; } - @Override public String toString() { return getClass().getSimpleName() + "(encoding=" + encoding + ")"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java index 072e5b10628a..f78c7e581ade 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,8 +59,8 @@ * key seen, comparator used writing the file, etc. Clients can add their own attributes via * {@link #append(byte[], byte[], boolean)} and they'll be persisted and available at read time. * Reader creates the HFileInfo on open by reading the tail of the HFile. The parse of the HFile - * trailer also creates a {@link HFileContext}, a read-only data structure that includes bulk of - * the HFileInfo and extras that is safe to pass around when working on HFiles. + * trailer also creates a {@link HFileContext}, a read-only data structure that includes bulk of the + * HFileInfo and extras that is safe to pass around when working on HFiles. * @see HFileContext */ @InterfaceAudience.Private @@ -71,13 +70,13 @@ public class HFileInfo implements SortedMap { static final String RESERVED_PREFIX = "hfile."; static final byte[] RESERVED_PREFIX_BYTES = Bytes.toBytes(RESERVED_PREFIX); - static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY"); - static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN"); - static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN"); - static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS"); - static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED"); - public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN"); - private final SortedMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); + static final byte[] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY"); + static final byte[] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN"); + static final byte[] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN"); + static final byte[] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS"); + static final byte[] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED"); + public static final byte[] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN"); + private final SortedMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * We can read files whose major version is v2 IFF their minor version is at least 3. @@ -99,15 +98,15 @@ public class HFileInfo implements SortedMap { private boolean decodeMemstoreTS = false; /** - * Blocks read from the load-on-open section, excluding data root index, meta - * index, and file info. + * Blocks read from the load-on-open section, excluding data root index, meta index, and file + * info. */ private List loadOnOpenBlocks = new ArrayList<>(); /** * The iterator will track all blocks in load-on-open section, since we use the - * {@link org.apache.hadoop.hbase.io.ByteBuffAllocator} to manage the ByteBuffers in block now, - * so we must ensure that deallocate all ByteBuffers in the end. + * {@link org.apache.hadoop.hbase.io.ByteBuffAllocator} to manage the ByteBuffers in block now, so + * we must ensure that deallocate all ByteBuffers in the end. */ private HFileBlock.BlockIterator blockIter; @@ -126,25 +125,22 @@ public HFileInfo(ReaderContext context, Configuration conf) throws IOException { } /** - * Append the given key/value pair to the file info, optionally checking the - * key prefix. - * + * Append the given key/value pair to the file info, optionally checking the key prefix. * @param k key to add * @param v value to add - * @param checkPrefix whether to check that the provided key does not start - * with the reserved prefix + * @param checkPrefix whether to check that the provided key does not start with the reserved + * prefix * @return this file info object * @throws IOException if the key or value is invalid * @throws NullPointerException if {@code key} or {@code value} is {@code null} */ - public HFileInfo append(final byte[] k, final byte[] v, - final boolean checkPrefix) throws IOException { + public HFileInfo append(final byte[] k, final byte[] v, final boolean checkPrefix) + throws IOException { Objects.requireNonNull(k, "key cannot be null"); Objects.requireNonNull(v, "value cannot be null"); if (checkPrefix && isReservedFileInfoKey(k)) { - throw new IOException("Keys with a " + HFileInfo.RESERVED_PREFIX - + " are reserved"); + throw new IOException("Keys with a " + HFileInfo.RESERVED_PREFIX + " are reserved"); } put(k, v); return this; @@ -256,13 +252,12 @@ public Collection values() { } /** - * Write out this instance on the passed in out stream. - * We write it as a protobuf. + * Write out this instance on the passed in out stream. We write it as a protobuf. * @see #read(DataInputStream) */ void write(final DataOutputStream out) throws IOException { HFileProtos.FileInfoProto.Builder builder = HFileProtos.FileInfoProto.newBuilder(); - for (Map.Entry e: this.map.entrySet()) { + for (Map.Entry e : this.map.entrySet()) { HBaseProtos.BytesBytesPair.Builder bbpBuilder = HBaseProtos.BytesBytesPair.newBuilder(); bbpBuilder.setFirst(UnsafeByteOperations.unsafeWrap(e.getKey())); bbpBuilder.setSecond(UnsafeByteOperations.unsafeWrap(e.getValue())); @@ -273,14 +268,14 @@ void write(final DataOutputStream out) throws IOException { } /** - * Populate this instance with what we find on the passed in in stream. - * Can deserialize protobuf of old Writables format. + * Populate this instance with what we find on the passed in in stream. Can + * deserialize protobuf of old Writables format. * @see #write(DataOutputStream) */ void read(final DataInputStream in) throws IOException { // This code is tested over in TestHFileReaderV1 where we read an old hfile w/ this new code. int pblen = ProtobufUtil.lengthOfPBMagic(); - byte [] pbuf = new byte[pblen]; + byte[] pbuf = new byte[pblen]; if (in.markSupported()) { in.mark(pblen); } @@ -298,7 +293,7 @@ void read(final DataInputStream in) throws IOException { // We cannot use BufferedInputStream, it consumes more than we read from the underlying IS ByteArrayInputStream bais = new ByteArrayInputStream(pbuf); SequenceInputStream sis = new SequenceInputStream(bais, in); // Concatenate input streams - // TODO: Am I leaking anything here wrapping the passed in stream? We are not calling + // TODO: Am I leaking anything here wrapping the passed in stream? We are not calling // close on the wrapped streams but they should be let go after we leave this context? // I see that we keep a reference to the passed in inputstream but since we no longer // have a reference to this after we leave, we should be ok. @@ -308,10 +303,9 @@ void read(final DataInputStream in) throws IOException { } /** - * Now parse the old Writable format. It was a list of Map entries. Each map entry was a - * key and a value of a byte []. The old map format had a byte before each entry that held - * a code which was short for the key or value type. We know it was a byte [] so in below - * we just read and dump it. + * Now parse the old Writable format. It was a list of Map entries. Each map entry was a key and a + * value of a byte []. The old map format had a byte before each entry that held a code which was + * short for the key or value type. We know it was a byte [] so in below we just read and dump it. */ void parseWritable(final DataInputStream in) throws IOException { // First clear the map. @@ -321,11 +315,11 @@ void parseWritable(final DataInputStream in) throws IOException { int entries = in.readInt(); // Then read each key/value pair for (int i = 0; i < entries; i++) { - byte [] key = Bytes.readByteArray(in); + byte[] key = Bytes.readByteArray(in); // We used to read a byte that encoded the class type. // Read and ignore it because it is always byte [] in hfile in.readByte(); - byte [] value = Bytes.readByteArray(in); + byte[] value = Bytes.readByteArray(in); this.map.put(key, value); } } @@ -336,7 +330,7 @@ void parseWritable(final DataInputStream in) throws IOException { */ void parsePB(final HFileProtos.FileInfoProto fip) { this.map.clear(); - for (BytesBytesPair pair: fip.getMapEntryList()) { + for (BytesBytesPair pair : fip.getMapEntryList()) { this.map.put(pair.getFirst().toByteArray(), pair.getSecond().toByteArray()); } } @@ -344,8 +338,8 @@ void parsePB(final HFileProtos.FileInfoProto fip) { public void initTrailerAndContext(ReaderContext context, Configuration conf) throws IOException { try { boolean isHBaseChecksum = context.getInputStreamWrapper().shouldUseHBaseChecksum(); - trailer = FixedFileTrailer.readFromStream(context.getInputStreamWrapper() - .getStream(isHBaseChecksum), context.getFileSize()); + trailer = FixedFileTrailer.readFromStream( + context.getInputStreamWrapper().getStream(isHBaseChecksum), context.getFileSize()); Path path = context.getFilePath(); checkFileVersion(path); this.hfileContext = createHFileContext(path, trailer, conf); @@ -353,8 +347,8 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr } catch (Throwable t) { IOUtils.closeQuietly(context.getInputStreamWrapper(), e -> LOG.warn("failed to close input stream wrapper", e)); - throw new CorruptHFileException("Problem reading HFile Trailer from file " - + context.getFilePath(), t); + throw new CorruptHFileException( + "Problem reading HFile Trailer from file " + context.getFilePath(), t); } } @@ -367,13 +361,13 @@ public void initMetaAndIndex(HFile.Reader reader) throws IOException { HFileBlock.FSReader blockReader = reader.getUncachedBlockReader(); // Initialize an block iterator, and parse load-on-open blocks in the following. blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), - context.getFileSize() - trailer.getTrailerSize()); + context.getFileSize() - trailer.getTrailerSize()); // Data index. We also read statistics about the block index written after // the root level. - this.dataIndexReader = - new HFileBlockIndex.CellBasedKeyBlockIndexReader(trailer.createComparator(), trailer.getNumDataIndexLevels()); - dataIndexReader - .readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); + this.dataIndexReader = new HFileBlockIndex.CellBasedKeyBlockIndexReader( + trailer.createComparator(), trailer.getNumDataIndexLevels()); + dataIndexReader.readMultiLevelIndexRoot( + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); reader.setDataBlockIndexReader(dataIndexReader); // Meta index. this.metaIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); @@ -393,17 +387,15 @@ public void initMetaAndIndex(HFile.Reader reader) throws IOException { IOUtils.closeQuietly(context.getInputStreamWrapper(), e -> LOG.warn("failed to close input stream wrapper", e)); throw new CorruptHFileException( - "Problem reading data index and meta index from file " + context.getFilePath(), t); + "Problem reading data index and meta index from file " + context.getFilePath(), t); } } - private HFileContext createHFileContext(Path path, - FixedFileTrailer trailer, Configuration conf) throws IOException { - HFileContextBuilder builder = new HFileContextBuilder() - .withHBaseCheckSum(true) - .withHFileName(path.getName()) - .withCompression(trailer.getCompressionCodec()) - .withCellComparator(FixedFileTrailer.createComparator(trailer.getComparatorClassName())); + private HFileContext createHFileContext(Path path, FixedFileTrailer trailer, Configuration conf) + throws IOException { + HFileContextBuilder builder = new HFileContextBuilder().withHBaseCheckSum(true) + .withHFileName(path.getName()).withCompression(trailer.getCompressionCodec()) + .withCellComparator(FixedFileTrailer.createComparator(trailer.getComparatorClassName())); // Check for any key material available byte[] keyBytes = trailer.getEncryptionKey(); if (keyBytes != null) { @@ -412,8 +404,8 @@ private HFileContext createHFileContext(Path path, // Use the algorithm the key wants Cipher cipher = Encryption.getCipher(conf, key.getAlgorithm()); if (cipher == null) { - throw new IOException("Cipher '" + key.getAlgorithm() + "' is not available" - + ", path=" + path); + throw new IOException( + "Cipher '" + key.getAlgorithm() + "' is not available" + ", path=" + path); } cryptoContext.setCipher(cipher); cryptoContext.setKey(key); @@ -427,8 +419,7 @@ private void loadMetaInfo(HFileBlock.BlockIterator blockIter, HFileContext hfile throws IOException { read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); byte[] creationTimeBytes = get(HFileInfo.CREATE_TIME_TS); - hfileContext.setFileCreateTime(creationTimeBytes == null ? - 0 : Bytes.toLong(creationTimeBytes)); + hfileContext.setFileCreateTime(creationTimeBytes == null ? 0 : Bytes.toLong(creationTimeBytes)); byte[] tmp = get(HFileInfo.MAX_TAGS_LEN); // max tag length is not present in the HFile means tags were not at all written to file. if (tmp != null) { @@ -444,9 +435,9 @@ private void loadMetaInfo(HFileBlock.BlockIterator blockIter, HFileContext hfile } avgKeyLen = Bytes.toInt(get(HFileInfo.AVG_KEY_LEN)); avgValueLen = Bytes.toInt(get(HFileInfo.AVG_VALUE_LEN)); - byte [] keyValueFormatVersion = get(HFileWriterImpl.KEY_VALUE_VERSION); - includesMemstoreTS = keyValueFormatVersion != null && - Bytes.toInt(keyValueFormatVersion) == HFileWriterImpl.KEY_VALUE_VER_WITH_MEMSTORE; + byte[] keyValueFormatVersion = get(HFileWriterImpl.KEY_VALUE_VERSION); + includesMemstoreTS = keyValueFormatVersion != null + && Bytes.toInt(keyValueFormatVersion) == HFileWriterImpl.KEY_VALUE_VER_WITH_MEMSTORE; hfileContext.setIncludesMvcc(includesMemstoreTS); if (includesMemstoreTS) { decodeMemstoreTS = Bytes.toLong(get(HFileWriterImpl.MAX_MEMSTORE_TS_KEY)) > 0; @@ -467,9 +458,9 @@ private void checkFileVersion(Path path) { return; } // We can read v3 or v2 versions of hfile. - throw new IllegalArgumentException("Invalid HFile version: major=" + - trailer.getMajorVersion() + ", minor=" + trailer.getMinorVersion() + ": expected at least " + - "major=2 and minor=" + MAX_MINOR_VERSION + ", path=" + path); + throw new IllegalArgumentException("Invalid HFile version: major=" + trailer.getMajorVersion() + + ", minor=" + trailer.getMinorVersion() + ": expected at least " + "major=2 and minor=" + + MAX_MINOR_VERSION + ", path=" + path); } public void close() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 98fe885de516..2ea0b48ecac8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,8 +31,8 @@ public class HFilePreadReader extends HFileReaderImpl { private static final Logger LOG = LoggerFactory.getLogger(HFileReaderImpl.class); - public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, - CacheConfig cacheConf, Configuration conf) throws IOException { + public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, + Configuration conf) throws IOException { super(context, fileInfo, cacheConf, conf); // Prefetch file blocks upon open if requested if (cacheConf.shouldPrefetchOnOpen()) { @@ -74,8 +74,9 @@ public void run() { LOG.trace("Prefetch " + getPathOffsetEndStr(path, offset, end), e); } } catch (NullPointerException e) { - LOG.warn("Stream moved/closed or prefetch cancelled?" + - getPathOffsetEndStr(path, offset, end), e); + LOG.warn( + "Stream moved/closed or prefetch cancelled?" + getPathOffsetEndStr(path, offset, end), + e); } catch (Exception e) { // Other exceptions are interesting LOG.warn("Prefetch " + getPathOffsetEndStr(path, offset, end), e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index c24d8be7c035..005cc00b08e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -139,15 +138,14 @@ public HFilePrettyPrinter(Configuration conf) { } private void init() { - options.addOption("v", "verbose", false, - "Verbose output; emits file and meta data delimiters"); + options.addOption("v", "verbose", false, "Verbose output; emits file and meta data delimiters"); options.addOption("p", "printkv", false, "Print key/value pairs"); options.addOption("e", "printkey", false, "Print keys"); options.addOption("m", "printmeta", false, "Print meta data of file"); options.addOption("b", "printblocks", false, "Print block index meta data"); options.addOption("h", "printblockheaders", false, "Print block headers for each block."); options.addOption("k", "checkrow", false, - "Enable row order check; looks for out-of-order keys"); + "Enable row order check; looks for out-of-order keys"); options.addOption("a", "checkfamily", false, "Enable family check"); options.addOption("w", "seekToRow", true, "Seek to this row and print all the kvs for this row only"); @@ -157,9 +155,9 @@ private void init() { OptionGroup files = new OptionGroup(); files.addOption(new Option("f", "file", true, - "File to scan. Pass full-path; e.g. hdfs://a:9000/hbase/hbase:meta/12/34")); - files.addOption(new Option("r", "region", true, - "Region to scan. Pass region name; e.g. 'hbase:meta,,1'")); + "File to scan. Pass full-path; e.g. hdfs://a:9000/hbase/hbase:meta/12/34")); + files.addOption( + new Option("r", "region", true, "Region to scan. Pass region name; e.g. 'hbase:meta,,1'")); options.addOptionGroup(files); } @@ -168,8 +166,7 @@ public void setPrintStreams(PrintStream out, PrintStream err) { this.err = err; } - public boolean parseOptions(String args[]) throws ParseException, - IOException { + public boolean parseOptions(String args[]) throws ParseException, IOException { if (args.length == 0) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("hfile", options, true); @@ -212,24 +209,19 @@ public boolean parseOptions(String args[]) throws ParseException, Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.valueOf(hri[0])); String enc = RegionInfo.encodeRegionName(rn); Path regionDir = new Path(tableDir, enc); - if (verbose) - out.println("region dir -> " + regionDir); - List regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()), - regionDir); - if (verbose) - out.println("Number of region files found -> " - + regionFiles.size()); + if (verbose) out.println("region dir -> " + regionDir); + List regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()), regionDir); + if (verbose) out.println("Number of region files found -> " + regionFiles.size()); if (verbose) { int i = 1; for (Path p : regionFiles) { - if (verbose) - out.println("Found file[" + i++ + "] -> " + p); + if (verbose) out.println("Found file[" + i++ + "] -> " + p); } } files.addAll(regionFiles); } - if(checkMobIntegrity) { + if (checkMobIntegrity) { if (verbose) { System.out.println("checkMobIntegrity is enabled"); } @@ -242,8 +234,8 @@ public boolean parseOptions(String args[]) throws ParseException, } /** - * Runs the command-line pretty-printer, and returns the desired command - * exit code (zero for success, non-zero for failure). + * Runs the command-line pretty-printer, and returns the desired command exit code (zero for + * success, non-zero for failure). */ @Override public int run(String[] args) { @@ -352,10 +344,8 @@ public int processFile(Path file, boolean checkRootDir) throws IOException { */ FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, file); long fileSize = fs.getFileStatus(file).getLen(); - FixedFileTrailer trailer = - FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); - long offset = trailer.getFirstDataBlockOffset(), - max = trailer.getLastDataBlockOffset(); + FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); + long offset = trailer.getFirstDataBlockOffset(), max = trailer.getLastDataBlockOffset(); HFileBlock block; while (offset <= max) { block = reader.readBlock(offset, -1, /* cacheBlock */ false, /* pread */ false, @@ -374,8 +364,8 @@ public int processFile(Path file, boolean checkRootDir) throws IOException { return 0; } - private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, - HFileScanner scanner, byte[] row) throws IOException { + private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, HFileScanner scanner, + byte[] row) throws IOException { Cell pCell = null; FileSystem fs = FileSystem.get(getConf()); Set foundMobFiles = new LinkedHashSet<>(FOUND_MOB_FILES_CACHE_CAPACITY); @@ -398,9 +388,8 @@ private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, if (printKey) { out.print("K: " + cell); if (printValue) { - out.print(" V: " - + Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + out.print(" V: " + Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength())); int i = 0; List tags = PrivateCellUtil.getTags(cell); for (Tag tag : tags) { @@ -412,37 +401,35 @@ private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, // check if rows are in order if (checkRow && pCell != null) { if (CellComparator.getInstance().compareRows(pCell, cell) > 0) { - err.println("WARNING, previous row is greater then" - + " current row\n\tfilename -> " + file + "\n\tprevious -> " - + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> " + err.println("WARNING, previous row is greater then" + " current row\n\tfilename -> " + + file + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent -> " + CellUtil.getCellKeyAsString(cell)); } } // check if families are consistent if (checkFamily) { - String fam = Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength()); + String fam = + Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); if (!file.toString().contains(fam)) { - err.println("WARNING, filename does not match kv family," - + "\n\tfilename -> " + file + "\n\tkeyvalue -> " - + CellUtil.getCellKeyAsString(cell)); + err.println("WARNING, filename does not match kv family," + "\n\tfilename -> " + file + + "\n\tkeyvalue -> " + CellUtil.getCellKeyAsString(cell)); } if (pCell != null && CellComparator.getInstance().compareFamilies(pCell, cell) != 0) { - err.println("WARNING, previous kv has different family" - + " compared to current key\n\tfilename -> " + file - + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) - + "\n\tcurrent -> " + CellUtil.getCellKeyAsString(cell)); + err.println( + "WARNING, previous kv has different family" + " compared to current key\n\tfilename -> " + + file + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell) + + "\n\tcurrent -> " + CellUtil.getCellKeyAsString(cell)); } } // check if mob files are missing. if (checkMobIntegrity && MobUtils.isMobReferenceCell(cell)) { Optional tn = MobUtils.getTableName(cell); - if (! tn.isPresent()) { - System.err.println("ERROR, wrong tag format in mob reference cell " - + CellUtil.getCellKeyAsString(cell)); + if (!tn.isPresent()) { + System.err.println( + "ERROR, wrong tag format in mob reference cell " + CellUtil.getCellKeyAsString(cell)); } else if (!MobUtils.hasValidMobRefCellValue(cell)) { - System.err.println("ERROR, wrong value format in mob reference cell " - + CellUtil.getCellKeyAsString(cell)); + System.err.println( + "ERROR, wrong value format in mob reference cell " + CellUtil.getCellKeyAsString(cell)); } else { String mobFileName = MobUtils.getMobFileName(cell); boolean exist = mobFileExists(fs, tn.get(), mobFileName, @@ -450,7 +437,7 @@ private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, if (!exist) { // report error System.err.println("ERROR, the mob file [" + mobFileName - + "] is missing referenced by cell " + CellUtil.getCellKeyAsString(cell)); + + "] is missing referenced by cell " + CellUtil.getCellKeyAsString(cell)); } } } @@ -463,7 +450,7 @@ private void scanKeysValues(Path file, KeyValueStatsCollector fileStats, * Checks whether the referenced mob file exists. */ private boolean mobFileExists(FileSystem fs, TableName tn, String mobFileName, String family, - Set foundMobFiles, Set missingMobFiles) throws IOException { + Set foundMobFiles, Set missingMobFiles) throws IOException { if (foundMobFiles.contains(mobFileName)) { return true; } @@ -515,21 +502,17 @@ private void evictMobFilesIfNecessary(Set mobFileNames, int limit) { } /** - * Format a string of the form "k1=v1, k2=v2, ..." into separate lines - * with a four-space indentation. + * Format a string of the form "k1=v1, k2=v2, ..." into separate lines with a four-space + * indentation. */ private static String asSeparateLines(String keyValueStr) { - return keyValueStr.replaceAll(", ([a-zA-Z]+=)", - ",\n" + FOUR_SPACES + "$1"); + return keyValueStr.replaceAll(", ([a-zA-Z]+=)", ",\n" + FOUR_SPACES + "$1"); } - private void printMeta(HFile.Reader reader, Map fileInfo) - throws IOException { - out.println("Block index size as per heapsize: " - + reader.indexSize()); + private void printMeta(HFile.Reader reader, Map fileInfo) throws IOException { + out.println("Block index size as per heapsize: " + reader.indexSize()); out.println(asSeparateLines(reader.toString())); - out.println("Trailer:\n " - + asSeparateLines(reader.getTrailer().toString())); + out.println("Trailer:\n " + asSeparateLines(reader.getTrailer().toString())); out.println("Fileinfo:"); for (Map.Entry e : fileInfo.entrySet()) { out.print(FOUR_SPACES + Bytes.toString(e.getKey()) + " = "); @@ -547,34 +530,35 @@ private void printMeta(HFile.Reader reader, Map fileInfo) || Bytes.equals(e.getKey(), HFileInfo.AVG_VALUE_LEN) || Bytes.equals(e.getKey(), HFileWriterImpl.KEY_VALUE_VERSION) || Bytes.equals(e.getKey(), HFileInfo.MAX_TAGS_LEN)) { - out.println(Bytes.toInt(e.getValue())); - } else if (Bytes.equals(e.getKey(), HStoreFile.MAJOR_COMPACTION_KEY) - || Bytes.equals(e.getKey(), HFileInfo.TAGS_COMPRESSED) - || Bytes.equals(e.getKey(), HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY)) { - out.println(Bytes.toBoolean(e.getValue())); - } else if (Bytes.equals(e.getKey(), HFileInfo.LASTKEY)) { - out.println(new KeyValue.KeyOnlyKeyValue(e.getValue()).toString()); - } else { - out.println(Bytes.toStringBinary(e.getValue())); - } + out.println(Bytes.toInt(e.getValue())); + } else + if (Bytes.equals(e.getKey(), HStoreFile.MAJOR_COMPACTION_KEY) + || Bytes.equals(e.getKey(), HFileInfo.TAGS_COMPRESSED) + || Bytes.equals(e.getKey(), HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY)) { + out.println(Bytes.toBoolean(e.getValue())); + } else + if (Bytes.equals(e.getKey(), HFileInfo.LASTKEY)) { + out.println(new KeyValue.KeyOnlyKeyValue(e.getValue()).toString()); + } else { + out.println(Bytes.toStringBinary(e.getValue())); + } } try { out.println("Mid-key: " + reader.midKey().map(CellUtil::getCellKeyAsString)); } catch (Exception e) { - out.println ("Unable to retrieve the midkey"); + out.println("Unable to retrieve the midkey"); } // Printing general bloom information DataInput bloomMeta = reader.getGeneralBloomFilterMetadata(); BloomFilter bloomFilter = null; - if (bloomMeta != null) - bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); + if (bloomMeta != null) bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); out.println("Bloom filter:"); if (bloomFilter != null) { - out.println(FOUR_SPACES + bloomFilter.toString().replaceAll( - BloomFilterUtil.STATS_RECORD_SEP, "\n" + FOUR_SPACES)); + out.println(FOUR_SPACES + bloomFilter.toString().replaceAll(BloomFilterUtil.STATS_RECORD_SEP, + "\n" + FOUR_SPACES)); } else { out.println(FOUR_SPACES + "Not present"); } @@ -582,14 +566,12 @@ private void printMeta(HFile.Reader reader, Map fileInfo) // Printing delete bloom information bloomMeta = reader.getDeleteBloomFilterMetadata(); bloomFilter = null; - if (bloomMeta != null) - bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); + if (bloomMeta != null) bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); out.println("Delete Family Bloom filter:"); if (bloomFilter != null) { - out.println(FOUR_SPACES - + bloomFilter.toString().replaceAll(BloomFilterUtil.STATS_RECORD_SEP, - "\n" + FOUR_SPACES)); + out.println(FOUR_SPACES + bloomFilter.toString().replaceAll(BloomFilterUtil.STATS_RECORD_SEP, + "\n" + FOUR_SPACES)); } else { out.println(FOUR_SPACES + "Not present"); } @@ -598,15 +580,15 @@ private void printMeta(HFile.Reader reader, Map fileInfo) private static class KeyValueStatsCollector { private final MetricRegistry metricsRegistry = new MetricRegistry(); private final ByteArrayOutputStream metricsOutput = new ByteArrayOutputStream(); - private final SimpleReporter simpleReporter = SimpleReporter.forRegistry(metricsRegistry). - outputTo(new PrintStream(metricsOutput)).filter(MetricFilter.ALL).build(); + private final SimpleReporter simpleReporter = SimpleReporter.forRegistry(metricsRegistry) + .outputTo(new PrintStream(metricsOutput)).filter(MetricFilter.ALL).build(); Histogram keyLen = metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Key length")); Histogram valLen = metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Val length")); - Histogram rowSizeBytes = metricsRegistry.histogram( - name(HFilePrettyPrinter.class, "Row size (bytes)")); - Histogram rowSizeCols = metricsRegistry.histogram( - name(HFilePrettyPrinter.class, "Row size (columns)")); + Histogram rowSizeBytes = + metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Row size (bytes)")); + Histogram rowSizeCols = + metricsRegistry.histogram(name(HFilePrettyPrinter.class, "Row size (columns)")); long curRowBytes = 0; long curRowCols = 0; @@ -619,8 +601,7 @@ private static class KeyValueStatsCollector { public void collect(Cell cell) { valLen.update(cell.getValueLength()); - if (prevCell != null && - CellComparator.getInstance().compareRows(prevCell, cell) != 0) { + if (prevCell != null && CellComparator.getInstance().compareRows(prevCell, cell) != 0) { // new row collectRow(); } @@ -652,27 +633,23 @@ public void finish() { @Override public String toString() { - if (prevCell == null) - return "no data available for statistics"; + if (prevCell == null) return "no data available for statistics"; // Dump the metrics to the output stream simpleReporter.stop(); simpleReporter.report(); - return - metricsOutput.toString() + - "Key of biggest row: " + Bytes.toStringBinary(biggestRow); + return metricsOutput.toString() + "Key of biggest row: " + Bytes.toStringBinary(biggestRow); } } /** - * Almost identical to ConsoleReporter, but extending ScheduledReporter, - * as extending ConsoleReporter in this version of dropwizard is now too much trouble. + * Almost identical to ConsoleReporter, but extending ScheduledReporter, as extending + * ConsoleReporter in this version of dropwizard is now too much trouble. */ private static class SimpleReporter extends ScheduledReporter { /** * Returns a new {@link Builder} for {@link ConsoleReporter}. - * * @param registry the registry to report * @return a {@link Builder} instance for a {@link ConsoleReporter} */ @@ -681,9 +658,9 @@ public static Builder forRegistry(MetricRegistry registry) { } /** - * A builder for {@link SimpleReporter} instances. Defaults to using the default locale and - * time zone, writing to {@code System.out}, converting rates to events/second, converting - * durations to milliseconds, and not filtering metrics. + * A builder for {@link SimpleReporter} instances. Defaults to using the default locale and time + * zone, writing to {@code System.out}, converting rates to events/second, converting durations + * to milliseconds, and not filtering metrics. */ public static class Builder { private final MetricRegistry registry; @@ -706,7 +683,6 @@ private Builder(MetricRegistry registry) { /** * Write to the given {@link PrintStream}. - * * @param output a {@link PrintStream} instance. * @return {@code this} */ @@ -717,7 +693,6 @@ public Builder outputTo(PrintStream output) { /** * Only report metrics which match the given filter. - * * @param filter a {@link MetricFilter} * @return {@code this} */ @@ -728,16 +703,10 @@ public Builder filter(MetricFilter filter) { /** * Builds a {@link ConsoleReporter} with the given properties. - * * @return a {@link ConsoleReporter} */ public SimpleReporter build() { - return new SimpleReporter(registry, - output, - locale, - timeZone, - rateUnit, - durationUnit, + return new SimpleReporter(registry, output, locale, timeZone, rateUnit, durationUnit, filter); } } @@ -746,29 +715,20 @@ public SimpleReporter build() { private final Locale locale; private final DateFormat dateFormat; - private SimpleReporter(MetricRegistry registry, - PrintStream output, - Locale locale, - TimeZone timeZone, - TimeUnit rateUnit, - TimeUnit durationUnit, - MetricFilter filter) { + private SimpleReporter(MetricRegistry registry, PrintStream output, Locale locale, + TimeZone timeZone, TimeUnit rateUnit, TimeUnit durationUnit, MetricFilter filter) { super(registry, "simple-reporter", filter, rateUnit, durationUnit); this.output = output; this.locale = locale; - this.dateFormat = DateFormat.getDateTimeInstance(DateFormat.SHORT, - DateFormat.MEDIUM, - locale); + this.dateFormat = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.MEDIUM, locale); dateFormat.setTimeZone(timeZone); } @Override - public void report(SortedMap gauges, - SortedMap counters, - SortedMap histograms, - SortedMap meters, - SortedMap timers) { + public void report(SortedMap gauges, SortedMap counters, + SortedMap histograms, SortedMap meters, + SortedMap timers) { // we know we only have histograms if (!histograms.isEmpty()) { for (Map.Entry entry : histograms.entrySet()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index c7a71584327c..fc98663c9d36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -58,10 +58,10 @@ * Implementation that can handle all hfile versions of {@link HFile.Reader}. */ @InterfaceAudience.Private -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") public abstract class HFileReaderImpl implements HFile.Reader, Configurable { // This class is HFileReaderV3 + HFileReaderV2 + AbstractHFileReader all squashed together into - // one file. Ditto for all the HFileReader.ScannerV? implementations. I was running up against + // one file. Ditto for all the HFileReader.ScannerV? implementations. I was running up against // the MaxInlineLevel limit because too many tiers involved reading from an hfile. Was also hard // to navigate the source code when so many classes participating in read. private static final Logger LOG = LoggerFactory.getLogger(HFileReaderImpl.class); @@ -77,8 +77,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { private final boolean primaryReplicaReader; /** - * What kind of data block encoding should be used while reading, writing, - * and handling cache. + * What kind of data block encoding should be used while reading, writing, and handling cache. */ protected HFileDataBlockEncoder dataBlockEncoder = NoOpDataBlockEncoder.INSTANCE; @@ -103,10 +102,9 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { protected HFileBlock.FSReader fsBlockReader; /** - * A "sparse lock" implementation allowing to lock on a particular block - * identified by offset. The purpose of this is to avoid two clients loading - * the same block, and have all but one client wait to get the block from the - * cache. + * A "sparse lock" implementation allowing to lock on a particular block identified by offset. The + * purpose of this is to avoid two clients loading the same block, and have all but one client + * wait to get the block from the cache. */ private IdLock offsetLock = new IdLock(); @@ -128,7 +126,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable { * @param cacheConf Cache configuration. * @param conf Configuration */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") public HFileReaderImpl(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf, Configuration conf) throws IOException { this.cacheConf = cacheConf; @@ -140,8 +138,8 @@ public HFileReaderImpl(ReaderContext context, HFileInfo fileInfo, CacheConfig ca this.fileInfo = fileInfo; this.trailer = fileInfo.getTrailer(); this.hfileContext = fileInfo.getHFileContext(); - this.fsBlockReader = new HFileBlock.FSReaderImpl(context, hfileContext, - cacheConf.getByteBuffAllocator(), conf); + this.fsBlockReader = + new HFileBlock.FSReaderImpl(context, hfileContext, cacheConf.getByteBuffAllocator(), conf); this.dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo); fsBlockReader.setDataBlockEncoder(dataBlockEncoder, conf); dataBlockIndexReader = fileInfo.getDataBlockIndexReader(); @@ -166,16 +164,12 @@ private Optional toStringLastKey() { @Override public String toString() { - return "reader=" + path.toString() + - (!isFileInfoLoaded()? "": - ", compression=" + trailer.getCompressionCodec().getName() + - ", cacheConf=" + cacheConf + - ", firstKey=" + toStringFirstKey() + - ", lastKey=" + toStringLastKey()) + - ", avgKeyLen=" + fileInfo.getAvgKeyLen() + - ", avgValueLen=" + fileInfo.getAvgValueLen() + - ", entries=" + trailer.getEntryCount() + - ", length=" + context.getFileSize(); + return "reader=" + path.toString() + + (!isFileInfoLoaded() ? "" + : ", compression=" + trailer.getCompressionCodec().getName() + ", cacheConf=" + + cacheConf + ", firstKey=" + toStringFirstKey() + ", lastKey=" + toStringLastKey()) + + ", avgKeyLen=" + fileInfo.getAvgKeyLen() + ", avgValueLen=" + fileInfo.getAvgValueLen() + + ", entries=" + trailer.getEntryCount() + ", length=" + context.getFileSize(); } @Override @@ -184,9 +178,8 @@ public long length() { } /** - * @return the first key in the file. May be null if file has no entries. Note - * that this is not the first row key, but rather the byte form of the - * first KeyValue. + * @return the first key in the file. May be null if file has no entries. Note that this is not + * the first row key, but rather the byte form of the first KeyValue. */ @Override public Optional getFirstKey() { @@ -198,9 +191,8 @@ public Optional getFirstKey() { } /** - * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's - * patch goes in to eliminate {@link KeyValue} here. - * + * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's patch goes in to + * eliminate {@link KeyValue} here. * @return the first row key, or null if the file is empty. */ @Override @@ -210,9 +202,8 @@ public Optional getFirstRowKey() { } /** - * TODO left from {@link HFile} version 1: move this to StoreFile after - * Ryan's patch goes in to eliminate {@link KeyValue} here. - * + * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's patch goes in to + * eliminate {@link KeyValue} here. * @return the last row key, or null if the file is empty. */ @Override @@ -238,14 +229,13 @@ public Compression.Algorithm getCompressionAlgorithm() { } /** - * @return the total heap size of data and meta block indexes in bytes. Does - * not take into account non-root blocks of a multilevel data index. + * @return the total heap size of data and meta block indexes in bytes. Does not take into account + * non-root blocks of a multilevel data index. */ @Override public long indexSize() { return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0) - + ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize() - : 0); + + ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize() : 0); } @Override @@ -300,8 +290,8 @@ public boolean isPrimaryReplicaReader() { } /** - * An exception thrown when an operation requiring a scanner to be seeked - * is invoked on a scanner that is not seeked. + * An exception thrown when an operation requiring a scanner to be seeked is invoked on a scanner + * that is not seeked. */ @SuppressWarnings("serial") public static class NotSeekedException extends IllegalStateException { @@ -328,11 +318,10 @@ protected static class HFileScannerImpl implements HFileScanner { final ObjectIntPair pair = new ObjectIntPair<>(); /** - * The next indexed key is to keep track of the indexed key of the next data block. - * If the nextIndexedKey is HConstants.NO_NEXT_INDEXED_KEY, it means that the - * current data block is the last data block. - * - * If the nextIndexedKey is null, it means the nextIndexedKey has not been loaded yet. + * The next indexed key is to keep track of the indexed key of the next data block. If the + * nextIndexedKey is HConstants.NO_NEXT_INDEXED_KEY, it means that the current data block is the + * last data block. If the nextIndexedKey is null, it means the nextIndexedKey has not been + * loaded yet. */ protected Cell nextIndexedKey; // Current block being used. NOTICE: DON't release curBlock separately except in shipped() or @@ -380,7 +369,7 @@ private void returnBlocks(boolean returnAll) { } @Override - public boolean isSeeked(){ + public boolean isSeeked() { return blockBuffer != null; } @@ -423,8 +412,7 @@ public void close() { // Returns the #bytes in HFile for the current cell. Used to skip these many bytes in current // HFile block's buffer so as to position to the next cell. private int getCurCellSerializedSize() { - int curCellSize = KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen - + currMemstoreTSLen; + int curCellSize = KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen + currMemstoreTSLen; if (this.reader.getFileContext().isIncludesTags()) { curCellSize += Bytes.SIZEOF_SHORT + currTagsLen; } @@ -443,8 +431,8 @@ protected void readKeyValueLen() { // But ensure that you read long instead of two ints long ll = blockBuffer.getLongAfterPosition(0); // Read top half as an int of key length and bottom int as value length - this.currKeyLen = (int)(ll >> Integer.SIZE); - this.currValueLen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); + this.currKeyLen = (int) (ll >> Integer.SIZE); + this.currValueLen = (int) (Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); checkKeyValueLen(); this.rowLen = blockBuffer.getShortAfterPosition(Bytes.SIZEOF_LONG); // Move position past the key and value lengths and then beyond the key and value @@ -460,11 +448,10 @@ protected void readKeyValueLen() { private final void checkTagsLen() { if (checkLen(this.currTagsLen)) { - throw new IllegalStateException("Invalid currTagsLen " + this.currTagsLen + - ". Block offset: " + curBlock.getOffset() + ", block length: " + - this.blockBuffer.limit() + - ", position: " + this.blockBuffer.position() + " (without header)." + - " path=" + reader.getPath()); + throw new IllegalStateException( + "Invalid currTagsLen " + this.currTagsLen + ". Block offset: " + curBlock.getOffset() + + ", block length: " + this.blockBuffer.limit() + ", position: " + + this.blockBuffer.position() + " (without header)." + " path=" + reader.getPath()); } } @@ -496,7 +483,7 @@ private void _readMvccVersion(int offsetFromPos) { if (len == 1) { this.currMemstoreTS = firstByte; } else { - int remaining = len -1; + int remaining = len - 1; long i = 0; offsetFromPos++; if (remaining >= Bytes.SIZEOF_INT) { @@ -523,19 +510,15 @@ private void _readMvccVersion(int offsetFromPos) { } /** - * Within a loaded block, seek looking for the last key that is smaller than - * (or equal to?) the key we are interested in. - * A note on the seekBefore: if you have seekBefore = true, AND the first - * key in the block = key, then you'll get thrown exceptions. The caller has - * to check for that case and load the previous block as appropriate. - * @param key - * the key to find - * @param seekBefore - * find the key before the given key in case of exact match. - * @return 0 in case of an exact key match, 1 in case of an inexact match, - * -2 in case of an inexact match and furthermore, the input key - * less than the first key of current block(e.g. using a faked index - * key) + * Within a loaded block, seek looking for the last key that is smaller than (or equal to?) the + * key we are interested in. A note on the seekBefore: if you have seekBefore = true, AND the + * first key in the block = key, then you'll get thrown exceptions. The caller has to check for + * that case and load the previous block as appropriate. + * @param key the key to find + * @param seekBefore find the key before the given key in case of exact match. + * @return 0 in case of an exact key match, 1 in case of an inexact match, -2 in case of an + * inexact match and furthermore, the input key less than the first key of current + * block(e.g. using a faked index key) */ protected int blockSeek(Cell key, boolean seekBefore) { int klen, vlen, tlen = 0; @@ -545,14 +528,13 @@ protected int blockSeek(Cell key, boolean seekBefore) { offsetFromPos = 0; // Better to ensure that we use the BB Utils here long ll = blockBuffer.getLongAfterPosition(offsetFromPos); - klen = (int)(ll >> Integer.SIZE); - vlen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); + klen = (int) (ll >> Integer.SIZE); + vlen = (int) (Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll); if (checkKeyLen(klen) || checkLen(vlen)) { - throw new IllegalStateException("Invalid klen " + klen + " or vlen " - + vlen + ". Block offset: " - + curBlock.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)." - + " path=" + reader.getPath()); + throw new IllegalStateException( + "Invalid klen " + klen + " or vlen " + vlen + ". Block offset: " + + curBlock.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " + + blockBuffer.position() + " (without header)." + " path=" + reader.getPath()); } offsetFromPos += Bytes.SIZEOF_LONG; this.rowLen = blockBuffer.getShortAfterPosition(offsetFromPos); @@ -568,8 +550,7 @@ protected int blockSeek(Cell key, boolean seekBefore) { if (checkLen(tlen)) { throw new IllegalStateException("Invalid tlen " + tlen + ". Block offset: " + curBlock.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)." - + " path=" + reader.getPath()); + + blockBuffer.position() + " (without header)." + " path=" + reader.getPath()); } // add the two bytes read for the tags. offsetFromPos += tlen + (Bytes.SIZEOF_SHORT); @@ -584,8 +565,7 @@ protected int blockSeek(Cell key, boolean seekBefore) { throw new IllegalStateException("blockSeek with seekBefore " + "at the first key of the block: key=" + CellUtil.getCellKeyAsString(key) + ", blockOffset=" + curBlock.getOffset() + ", onDiskSize=" - + curBlock.getOnDiskSizeWithHeader() - + ", path=" + reader.getPath()); + + curBlock.getOnDiskSizeWithHeader() + ", path=" + reader.getPath()); } blockBuffer.moveBack(lastKeyValueSize); readKeyValueLen(); @@ -643,8 +623,8 @@ public int reseekTo(Cell key) throws IOException { return compared; } else { // The comparison with no_next_index_key has to be checked - if (this.nextIndexedKey != null && - (this.nextIndexedKey == KeyValueScanner.NO_NEXT_INDEXED_KEY || PrivateCellUtil + if (this.nextIndexedKey != null + && (this.nextIndexedKey == KeyValueScanner.NO_NEXT_INDEXED_KEY || PrivateCellUtil .compareKeyIgnoresMvcc(reader.getComparator(), key, nextIndexedKey) < 0)) { // The reader shall continue to scan the current data block instead // of querying the @@ -652,8 +632,7 @@ public int reseekTo(Cell key) throws IOException { // smaller than // the next indexed key or the current data block is the last data // block. - return loadBlockAndSeekToKey(this.curBlock, nextIndexedKey, false, key, - false); + return loadBlockAndSeekToKey(this.curBlock, nextIndexedKey, false, key, false); } } } @@ -663,22 +642,19 @@ public int reseekTo(Cell key) throws IOException { } /** - * An internal API function. Seek to the given key, optionally rewinding to - * the first key of the block before doing the seek. - * + * An internal API function. Seek to the given key, optionally rewinding to the first key of the + * block before doing the seek. * @param key - a cell representing the key that we need to fetch - * @param rewind whether to rewind to the first key of the block before - * doing the seek. If this is false, we are assuming we never go - * back, otherwise the result is undefined. - * @return -1 if the key is earlier than the first key of the file, - * 0 if we are at the given key, 1 if we are past the given key - * -2 if the key is earlier than the first key of the file while - * using a faked index key + * @param rewind whether to rewind to the first key of the block before doing the seek. If this + * is false, we are assuming we never go back, otherwise the result is undefined. + * @return -1 if the key is earlier than the first key of the file, 0 if we are at the given + * key, 1 if we are past the given key -2 if the key is earlier than the first key of + * the file while using a faked index key */ public int seekTo(Cell key, boolean rewind) throws IOException { HFileBlockIndex.BlockIndexReader indexReader = reader.getDataBlockIndexReader(); BlockWithScanInfo blockWithScanInfo = indexReader.loadDataBlockWithScanInfo(key, curBlock, - cacheBlocks, pread, isCompaction, getEffectiveDataBlockEncoding(), reader); + cacheBlocks, pread, isCompaction, getEffectiveDataBlockEncoding(), reader); if (blockWithScanInfo == null || blockWithScanInfo.getHFileBlock() == null) { // This happens if the key e.g. falls before the beginning of the file. return -1; @@ -690,8 +666,7 @@ public int seekTo(Cell key, boolean rewind) throws IOException { @Override public boolean seekBefore(Cell key) throws IOException { HFileBlock seekToBlock = reader.getDataBlockIndexReader().seekToDataBlock(key, curBlock, - cacheBlocks, pread, isCompaction, reader.getEffectiveEncodingInCache(isCompaction), - reader); + cacheBlocks, pread, isCompaction, reader.getEffectiveEncodingInCache(isCompaction), reader); if (seekToBlock == null) { return false; } @@ -735,13 +710,12 @@ protected void releaseIfNotCurBlock(HFileBlock block) { } /** - * Scans blocks in the "scanned" section of the {@link HFile} until the next - * data block is found. - * + * Scans blocks in the "scanned" section of the {@link HFile} until the next data block is + * found. * @return the next block, or null if there are no more data blocks */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", - justification="Yeah, unnecessary null check; could do w/ clean up") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", + justification = "Yeah, unnecessary null check; could do w/ clean up") protected HFileBlock readNextDataBlock() throws IOException { long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset(); if (curBlock == null) { @@ -828,8 +802,8 @@ public Cell getKey() { blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, keyPair); ByteBuffer keyBuf = keyPair.getFirst(); if (keyBuf.hasArray()) { - return new KeyValue.KeyOnlyKeyValue(keyBuf.array(), keyBuf.arrayOffset() - + keyPair.getSecond(), currKeyLen); + return new KeyValue.KeyOnlyKeyValue(keyBuf.array(), + keyBuf.arrayOffset() + keyPair.getSecond(), currKeyLen); } else { // Better to do a copy here instead of holding on to this BB so that // we could release the blocks referring to this key. This key is specifically used @@ -871,11 +845,10 @@ private void positionThisBlockBuffer() { try { blockBuffer.skip(getCurCellSerializedSize()); } catch (IllegalArgumentException e) { - LOG.error("Current pos = " + blockBuffer.position() - + "; currKeyLen = " + currKeyLen + "; currValLen = " - + currValueLen + "; block limit = " + blockBuffer.limit() - + "; currBlock currBlockOffset = " + this.curBlock.getOffset() - + "; path=" + reader.getPath()); + LOG.error("Current pos = " + blockBuffer.position() + "; currKeyLen = " + currKeyLen + + "; currValLen = " + currValueLen + "; block limit = " + blockBuffer.limit() + + "; currBlock currBlockOffset = " + this.curBlock.getOffset() + "; path=" + + reader.getPath()); throw e; } } @@ -894,7 +867,6 @@ private boolean positionForNextBlock() throws IOException { return isNextBlock(); } - private boolean isNextBlock() throws IOException { // Methods are small so they get inlined because they are 'hot'. HFileBlock nextBlock = readNextDataBlock(); @@ -918,10 +890,8 @@ private final boolean _next() throws IOException { } /** - * Go to the next key/value in the block section. Loads the next block if - * necessary. If successful, {@link #getKey()} and {@link #getValue()} can - * be called. - * + * Go to the next key/value in the block section. Loads the next block if necessary. If + * successful, {@link #getKey()} and {@link #getValue()} can be called. * @return true if successfully navigated to the next key/value */ @Override @@ -935,9 +905,8 @@ public boolean next() throws IOException { /** * Positions this scanner at the start of the file. - * - * @return false if empty file; i.e. a call to next would return false and - * the current key and value are undefined. + * @return false if empty file; i.e. a call to next would return false and the current key and + * value are undefined. */ @Override public boolean seekTo() throws IOException { @@ -959,7 +928,7 @@ public boolean seekTo() throws IOException { return true; } - protected boolean processFirstDataBlock() throws IOException{ + protected boolean processFirstDataBlock() throws IOException { blockBuffer.rewind(); readKeyValueLen(); return true; @@ -970,8 +939,8 @@ protected void readAndUpdateNewBlock(long firstDataBlockOffset) throws IOExcepti isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding()); if (newBlock.getOffset() < 0) { releaseIfNotCurBlock(newBlock); - throw new IOException("Invalid offset=" + newBlock.getOffset() + - ", path=" + reader.getPath()); + throw new IOException( + "Invalid offset=" + newBlock.getOffset() + ", path=" + reader.getPath()); } updateCurrentBlock(newBlock); } @@ -1007,11 +976,11 @@ protected final boolean checkLen(final int v) { */ protected final void checkKeyValueLen() { if (checkKeyLen(this.currKeyLen) || checkLen(this.currValueLen)) { - throw new IllegalStateException("Invalid currKeyLen " + this.currKeyLen - + " or currValueLen " + this.currValueLen + ". Block offset: " - + this.curBlock.getOffset() + ", block length: " - + this.blockBuffer.limit() + ", position: " + this.blockBuffer.position() - + " (without header)." + ", path=" + reader.getPath()); + throw new IllegalStateException( + "Invalid currKeyLen " + this.currKeyLen + " or currValueLen " + this.currValueLen + + ". Block offset: " + this.curBlock.getOffset() + ", block length: " + + this.blockBuffer.limit() + ", position: " + this.blockBuffer.position() + + " (without header)." + ", path=" + reader.getPath()); } } @@ -1048,8 +1017,8 @@ protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) { buffer.skip(Bytes.SIZEOF_INT);// Skip value len part ByteBuffer keyBuff = buffer.asSubByteBuffer(klen); if (keyBuff.hasArray()) { - return new KeyValue.KeyOnlyKeyValue(keyBuff.array(), keyBuff.arrayOffset() - + keyBuff.position(), klen); + return new KeyValue.KeyOnlyKeyValue(keyBuff.array(), + keyBuff.arrayOffset() + keyBuff.position(), klen); } else { return new ByteBufferKeyOnlyKeyValue(keyBuff, keyBuff.position(), klen); } @@ -1106,8 +1075,7 @@ public void setConf(Configuration conf) { public static final int PBUF_TRAILER_MINOR_VERSION = 2; /** - * The size of a (key length, value length) tuple that prefixes each entry in - * a data block. + * The size of a (key length, value length) tuple that prefixes each entry in a data block. */ public final static int KEY_VALUE_LEN_SIZE = 2 * Bytes.SIZEOF_INT; @@ -1121,9 +1089,8 @@ private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, bo // Check cache for block. If found return. BlockCache cache = cacheConf.getBlockCache().orElse(null); if (cache != null) { - HFileBlock cachedBlock = - (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, - updateCacheMetrics, expectedBlockType); + HFileBlock cachedBlock = (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, + updateCacheMetrics, expectedBlockType); if (cachedBlock != null) { if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) { HFileBlock compressedBlock = cachedBlock; @@ -1147,8 +1114,8 @@ private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, bo // Block types other than data blocks always have // DataBlockEncoding.NONE. To avoid false negative cache misses, only // perform this check if cached block is a data block. - if (cachedBlock.getBlockType().isData() && - !actualDataBlockEncoding.equals(expectedDataBlockEncoding)) { + if (cachedBlock.getBlockType().isData() + && !actualDataBlockEncoding.equals(expectedDataBlockEncoding)) { // This mismatch may happen if a Scanner, which is used for say a // compaction, tries to read an encoded block from the block cache. // The reverse might happen when an EncodedScanner tries to read @@ -1159,17 +1126,18 @@ private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, bo // forced here. This will potentially cause a significant number of // cache misses, so update so we should keep track of this as it might // justify the work on a CompoundScanner. - if (!expectedDataBlockEncoding.equals(DataBlockEncoding.NONE) && - !actualDataBlockEncoding.equals(DataBlockEncoding.NONE)) { + if (!expectedDataBlockEncoding.equals(DataBlockEncoding.NONE) + && !actualDataBlockEncoding.equals(DataBlockEncoding.NONE)) { // If the block is encoded but the encoding does not match the // expected encoding it is likely the encoding was changed but the // block was not yet evicted. Evictions on file close happen async // so blocks with the old encoding still linger in cache for some // period of time. This event should be rare as it only happens on // schema definition change. - LOG.info("Evicting cached block with key {} because data block encoding mismatch; " + - "expected {}, actual {}, path={}", cacheKey, actualDataBlockEncoding, - expectedDataBlockEncoding, path); + LOG.info( + "Evicting cached block with key {} because data block encoding mismatch; " + + "expected {}, actual {}, path={}", + cacheKey, actualDataBlockEncoding, expectedDataBlockEncoding, path); // This is an error scenario. so here we need to release the block. returnAndEvictBlock(cache, cacheKey, cachedBlock); } @@ -1191,8 +1159,7 @@ private void returnAndEvictBlock(BlockCache cache, BlockCacheKey cacheKey, Cache * @return block wrapped in a ByteBuffer, with header skipped */ @Override - public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) - throws IOException { + public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws IOException { if (trailer.getMetaIndexCount() == 0) { return null; // there are no meta blocks } @@ -1201,8 +1168,7 @@ public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) } byte[] mbname = Bytes.toBytes(metaBlockName); - int block = metaBlockIndexReader.rootBlockContainingKey(mbname, - 0, mbname.length); + int block = metaBlockIndexReader.rootBlockContainingKey(mbname, 0, mbname.length); if (block == -1) { return null; } @@ -1263,20 +1229,17 @@ private boolean shouldUseHeap(BlockType expectedBlockType) { } @Override - public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, - final boolean cacheBlock, boolean pread, final boolean isCompaction, - boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) - throws IOException { + public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final boolean cacheBlock, + boolean pread, final boolean isCompaction, boolean updateCacheMetrics, + BlockType expectedBlockType, DataBlockEncoding expectedDataBlockEncoding) throws IOException { if (dataBlockIndexReader == null) { throw new IOException(path + " block index not loaded"); } long trailerOffset = trailer.getLoadOnOpenDataOffset(); if (dataBlockOffset < 0 || dataBlockOffset >= trailerOffset) { - throw new IOException("Requested block is out of range: " + dataBlockOffset + - ", lastDataBlockOffset: " + trailer.getLastDataBlockOffset() + - ", trailer.getLoadOnOpenDataOffset: " + trailerOffset + - ", path=" + path); + throw new IOException("Requested block is out of range: " + dataBlockOffset + + ", lastDataBlockOffset: " + trailer.getLastDataBlockOffset() + + ", trailer.getLoadOnOpenDataOffset: " + trailerOffset + ", path=" + path); } // For any given block from any given file, synchronize reads for said // block. @@ -1284,8 +1247,8 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, // the other choice is to duplicate work (which the cache would prevent you // from doing). - BlockCacheKey cacheKey = new BlockCacheKey(name, dataBlockOffset, - this.isPrimaryReplicaReader(), expectedBlockType); + BlockCacheKey cacheKey = + new BlockCacheKey(name, dataBlockOffset, this.isPrimaryReplicaReader(), expectedBlockType); boolean useLock = false; IdLock.Entry lockEntry = null; @@ -1375,16 +1338,13 @@ public boolean hasMVCCInfo() { } /** - * Compares the actual type of a block retrieved from cache or disk with its - * expected type and throws an exception in case of a mismatch. Expected - * block type of {@link BlockType#DATA} is considered to match the actual - * block type [@link {@link BlockType#ENCODED_DATA} as well. + * Compares the actual type of a block retrieved from cache or disk with its expected type and + * throws an exception in case of a mismatch. Expected block type of {@link BlockType#DATA} is + * considered to match the actual block type [@link {@link BlockType#ENCODED_DATA} as well. * @param block a block retrieved from cache or disk - * @param expectedBlockType the expected block type, or null to skip the - * check + * @param expectedBlockType the expected block type, or null to skip the check */ - private void validateBlockType(HFileBlock block, - BlockType expectedBlockType) throws IOException { + private void validateBlockType(HFileBlock block, BlockType expectedBlockType) throws IOException { if (expectedBlockType == null) { return; } @@ -1395,25 +1355,24 @@ private void validateBlockType(HFileBlock block, return; } if (actualBlockType != expectedBlockType) { - throw new IOException("Expected block type " + expectedBlockType + ", " + - "but got " + actualBlockType + ": " + block + ", path=" + path); + throw new IOException("Expected block type " + expectedBlockType + ", " + "but got " + + actualBlockType + ": " + block + ", path=" + path); } } /** - * @return Last key as cell in the file. May be null if file has no entries. Note that - * this is not the last row key, but it is the Cell representation of the last - * key + * @return Last key as cell in the file. May be null if file has no entries. Note that this is not + * the last row key, but it is the Cell representation of the last key */ @Override public Optional getLastKey() { - return dataBlockIndexReader.isEmpty() ? Optional.empty() : - Optional.of(fileInfo.getLastKeyCell()); + return dataBlockIndexReader.isEmpty() ? Optional.empty() + : Optional.of(fileInfo.getLastKeyCell()); } /** - * @return Midkey for this file. We work with block boundaries only so - * returned midkey is an approximation only. + * @return Midkey for this file. We work with block boundaries only so returned midkey is an + * approximation only. */ @Override public Optional midKey() throws IOException { @@ -1444,8 +1403,8 @@ protected static class EncodedScanner extends HFileScannerImpl { private final DataBlockEncoder.EncodedSeeker seeker; private final DataBlockEncoder dataBlockEncoder; - public EncodedScanner(HFile.Reader reader, boolean cacheBlocks, - boolean pread, boolean isCompaction, HFileContext meta, Configuration conf) { + public EncodedScanner(HFile.Reader reader, boolean cacheBlocks, boolean pread, + boolean isCompaction, HFileContext meta, Configuration conf) { super(reader, cacheBlocks, pread, isCompaction); DataBlockEncoding encoding = reader.getDataBlockEncoding(); dataBlockEncoder = encoding.getEncoder(); @@ -1454,7 +1413,7 @@ public EncodedScanner(HFile.Reader reader, boolean cacheBlocks, } @Override - public boolean isSeeked(){ + public boolean isSeeked() { return curBlock != null; } @@ -1480,9 +1439,9 @@ protected void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileExcept short dataBlockEncoderId = newBlock.getDataBlockEncodingId(); if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) { String encoderCls = dataBlockEncoder.getClass().getName(); - throw new CorruptHFileException("Encoder " + encoderCls + - " doesn't support data block encoding " + - DataBlockEncoding.getNameFromId(dataBlockEncoderId) + ",path=" + reader.getPath()); + throw new CorruptHFileException("Encoder " + encoderCls + + " doesn't support data block encoding " + + DataBlockEncoding.getNameFromId(dataBlockEncoderId) + ",path=" + reader.getPath()); } updateCurrBlockRef(newBlock); ByteBuff encodedBuffer = getEncodedBuffer(newBlock); @@ -1567,8 +1526,8 @@ protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) { } @Override - protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, - boolean rewind, Cell key, boolean seekBefore) throws IOException { + protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, boolean rewind, + Cell key, boolean seekBefore) throws IOException { if (this.curBlock == null || this.curBlock.getOffset() != seekToBlock.getOffset()) { updateCurrentBlock(seekToBlock); } else if (rewind) { @@ -1585,8 +1544,7 @@ public int compareKey(CellComparator comparator, Cell key) { } /** - * Returns a buffer with the Bloom filter metadata. The caller takes - * ownership of the buffer. + * Returns a buffer with the Bloom filter metadata. The caller takes ownership of the buffer. */ @Override public DataInput getGeneralBloomFilterMetadata() throws IOException { @@ -1598,12 +1556,11 @@ public DataInput getDeleteBloomFilterMetadata() throws IOException { return this.getBloomFilterMetadata(BlockType.DELETE_FAMILY_BLOOM_META); } - private DataInput getBloomFilterMetadata(BlockType blockType) - throws IOException { - if (blockType != BlockType.GENERAL_BLOOM_META && - blockType != BlockType.DELETE_FAMILY_BLOOM_META) { - throw new RuntimeException("Block Type: " + blockType.toString() + - " is not supported, path=" + path) ; + private DataInput getBloomFilterMetadata(BlockType blockType) throws IOException { + if (blockType != BlockType.GENERAL_BLOOM_META + && blockType != BlockType.DELETE_FAMILY_BLOOM_META) { + throw new RuntimeException( + "Block Type: " + blockType.toString() + " is not supported, path=" + path); } for (HFileBlock b : fileInfo.getLoadOnOpenBlocks()) { @@ -1624,8 +1581,8 @@ public HFileContext getFileContext() { } /** - * Returns false if block prefetching was requested for this file and has - * not completed, true otherwise + * Returns false if block prefetching was requested for this file and has not completed, true + * otherwise */ @Override public boolean prefetchComplete() { @@ -1634,15 +1591,14 @@ public boolean prefetchComplete() { /** * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is - * nothing to clean up in a Scanner. Letting go of your references to the - * scanner is sufficient. NOTE: Do not use this overload of getScanner for - * compactions. See {@link #getScanner(Configuration, boolean, boolean, boolean)} - * + * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up + * in a Scanner. Letting go of your references to the scanner is sufficient. NOTE: Do not use this + * overload of getScanner for compactions. See + * {@link #getScanner(Configuration, boolean, boolean, boolean)} * @param conf Store configuration. * @param cacheBlocks True if we should cache blocks read in by this scanner. - * @param pread Use positional read rather than seek+read if true (pread is - * better for random reads, seek+read is better scanning). + * @param pread Use positional read rather than seek+read if true (pread is better for random + * reads, seek+read is better scanning). * @return Scanner on this file. */ @Override @@ -1652,18 +1608,13 @@ public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final bo /** * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is - * nothing to clean up in a Scanner. Letting go of your references to the - * scanner is sufficient. - * @param conf - * Store configuration. - * @param cacheBlocks - * True if we should cache blocks read in by this scanner. - * @param pread - * Use positional read rather than seek+read if true (pread is better - * for random reads, seek+read is better scanning). - * @param isCompaction - * is scanner being used for a compaction? + * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up + * in a Scanner. Letting go of your references to the scanner is sufficient. + * @param conf Store configuration. + * @param cacheBlocks True if we should cache blocks read in by this scanner. + * @param pread Use positional read rather than seek+read if true (pread is better for random + * reads, seek+read is better scanning). + * @param isCompaction is scanner being used for a compaction? * @return Scanner on this file. */ @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index a2a35fef37af..f9f1d96a072d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,81 +20,68 @@ import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.regionserver.Shipper; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.regionserver.Shipper; +import org.apache.yetus.audience.InterfaceAudience; /** - * A scanner allows you to position yourself within a HFile and - * scan through it. It allows you to reposition yourself as well. - * - *

          A scanner doesn't always have a key/value that it is pointing to - * when it is first created and before - * {@link #seekTo()}/{@link #seekTo(Cell)} are called. - * In this case, {@link #getKey()}/{@link #getValue()} returns null. At most - * other times, a key and value will be available. The general pattern is that - * you position the Scanner using the seekTo variants and then getKey and - * getValue. + * A scanner allows you to position yourself within a HFile and scan through it. It allows you to + * reposition yourself as well. + *

          + * A scanner doesn't always have a key/value that it is pointing to when it is first created and + * before {@link #seekTo()}/{@link #seekTo(Cell)} are called. In this case, + * {@link #getKey()}/{@link #getValue()} returns null. At most other times, a key and value will be + * available. The general pattern is that you position the Scanner using the seekTo variants and + * then getKey and getValue. */ @InterfaceAudience.Private public interface HFileScanner extends Shipper, Closeable { /** - * SeekTo or just before the passed cell. Examine the return - * code to figure whether we found the cell or not. - * Consider the cell stream of all the cells in the file, + * SeekTo or just before the passed cell. Examine the return code to figure whether + * we found the cell or not. Consider the cell stream of all the cells in the file, * c[0] .. c[n], where there are n cells in the file. * @param cell - * @return -1, if cell < c[0], no position; - * 0, such that c[i] = cell and scanner is left in position i; and - * 1, such that c[i] < cell, and scanner is left in position i. - * The scanner will position itself between c[i] and c[i+1] where - * c[i] < cell <= c[i+1]. - * If there is no cell c[i+1] greater than or equal to the input cell, then the - * scanner will position itself at the end of the file and next() will return - * false when it is called. + * @return -1, if cell < c[0], no position; 0, such that c[i] = cell and scanner is left in + * position i; and 1, such that c[i] < cell, and scanner is left in position i. The + * scanner will position itself between c[i] and c[i+1] where c[i] < cell <= c[i+1]. + * If there is no cell c[i+1] greater than or equal to the input cell, then the scanner + * will position itself at the end of the file and next() will return false when it is + * called. * @throws IOException */ int seekTo(Cell cell) throws IOException; /** - * Reseek to or just before the passed cell. Similar to seekTo - * except that this can be called even if the scanner is not at the beginning - * of a file. - * This can be used to seek only to cells which come after the current position - * of the scanner. - * Consider the cell stream of all the cells in the file, - * c[0] .. c[n], where there are n cellc in the file after - * current position of HFileScanner. - * The scanner will position itself between c[i] and c[i+1] where - * c[i] < cell <= c[i+1]. - * If there is no cell c[i+1] greater than or equal to the input cell, then the - * scanner will position itself at the end of the file and next() will return + * Reseek to or just before the passed cell. Similar to seekTo except that this can + * be called even if the scanner is not at the beginning of a file. This can be used to seek only + * to cells which come after the current position of the scanner. Consider the cell stream of all + * the cells in the file, c[0] .. c[n], where there are n cellc in the file after + * current position of HFileScanner. The scanner will position itself between c[i] and c[i+1] + * where c[i] < cell <= c[i+1]. If there is no cell c[i+1] greater than or equal to the + * input cell, then the scanner will position itself at the end of the file and next() will return * false when it is called. * @param cell Cell to find (should be non-null) - * @return -1, if cell < c[0], no position; - * 0, such that c[i] = cell and scanner is left in position i; and - * 1, such that c[i] < cell, and scanner is left in position i. + * @return -1, if cell < c[0], no position; 0, such that c[i] = cell and scanner is left in + * position i; and 1, such that c[i] < cell, and scanner is left in position i. * @throws IOException */ int reseekTo(Cell cell) throws IOException; /** - * Consider the cell stream of all the cells in the file, - * c[0] .. c[n], where there are n cells in the file. + * Consider the cell stream of all the cells in the file, c[0] .. c[n], where there + * are n cells in the file. * @param cell Cell to find - * @return false if cell <= c[0] or true with scanner in position 'i' such - * that: c[i] < cell. Furthermore: there may be a c[i+1], such that - * c[i] < cell <= c[i+1] but there may also NOT be a c[i+1], and next() will - * return false (EOF). + * @return false if cell <= c[0] or true with scanner in position 'i' such that: c[i] < + * cell. Furthermore: there may be a c[i+1], such that c[i] < cell <= c[i+1] but + * there may also NOT be a c[i+1], and next() will return false (EOF). * @throws IOException */ boolean seekBefore(Cell cell) throws IOException; /** * Positions this scanner at the start of the file. - * @return False if empty file; i.e. a call to next would return false and - * the current key and value are undefined. + * @return False if empty file; i.e. a call to next would return false and the current key and + * value are undefined. * @throws IOException */ boolean seekTo() throws IOException; @@ -108,18 +94,17 @@ public interface HFileScanner extends Shipper, Closeable { boolean next() throws IOException; /** - * Gets the current key in the form of a cell. You must call - * {@link #seekTo(Cell)} before this method. + * Gets the current key in the form of a cell. You must call {@link #seekTo(Cell)} before this + * method. * @return gets the current key as a Cell. */ Cell getKey(); /** - * Gets a buffer view to the current value. You must call - * {@link #seekTo(Cell)} before this method. - * - * @return byte buffer for the value. The limit is set to the value size, and - * the position is 0, the start of the buffer view. + * Gets a buffer view to the current value. You must call {@link #seekTo(Cell)} before this + * method. + * @return byte buffer for the value. The limit is set to the value size, and the position is 0, + * the start of the buffer view. */ ByteBuffer getValue(); @@ -129,8 +114,8 @@ public interface HFileScanner extends Shipper, Closeable { Cell getCell(); /** - * Convenience method to get a copy of the key as a string - interpreting the - * bytes as UTF8. You must call {@link #seekTo(Cell)} before this method. + * Convenience method to get a copy of the key as a string - interpreting the bytes as UTF8. You + * must call {@link #seekTo(Cell)} before this method. * @return key as a string * @deprecated Since hbase-2.0.0 */ @@ -138,8 +123,8 @@ public interface HFileScanner extends Shipper, Closeable { String getKeyString(); /** - * Convenience method to get a copy of the value as a string - interpreting - * the bytes as UTF8. You must call {@link #seekTo(Cell)} before this method. + * Convenience method to get a copy of the value as a string - interpreting the bytes as UTF8. You + * must call {@link #seekTo(Cell)} before this method. * @return value as a string * @deprecated Since hbase-2.0.0 */ @@ -152,9 +137,8 @@ public interface HFileScanner extends Shipper, Closeable { HFile.Reader getReader(); /** - * @return True is scanner has had one of the seek calls invoked; i.e. - * {@link #seekBefore(Cell)} or {@link #seekTo()} or {@link #seekTo(Cell)}. - * Otherwise returns false. + * @return True is scanner has had one of the seek calls invoked; i.e. {@link #seekBefore(Cell)} + * or {@link #seekTo()} or {@link #seekTo(Cell)}. Otherwise returns false. */ boolean isSeeked(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java index 3f72b4adab32..a03cbab9f192 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileStreamReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,8 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Implementation of {@link HFile.Reader} to deal with stream read - * do not perform any prefetch operations (HFilePreadReader will do this). + * Implementation of {@link HFile.Reader} to deal with stream read do not perform any prefetch + * operations (HFilePreadReader will do this). */ @InterfaceAudience.Private public class HFileStreamReader extends HFileReaderImpl { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java index ec73f89631db..4d06c97c9aae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +18,21 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; - import org.apache.hadoop.fs.FSDataInputStream; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private class HFileUtil { - /** guards against NullPointer - * utility which tries to seek on the DFSIS and will try an alternative source - * if the FSDataInputStream throws an NPE HBASE-17501 + /** + * guards against NullPointer utility which tries to seek on the DFSIS and will try an alternative + * source if the FSDataInputStream throws an NPE HBASE-17501 * @param istream * @param offset * @throws IOException */ - static public void seekOnMultipleSources(FSDataInputStream istream, long offset) throws IOException { + static public void seekOnMultipleSources(FSDataInputStream istream, long offset) + throws IOException { try { // attempt to seek inside of current blockReader istream.seek(offset); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 4275c368aa97..70eddc8e78cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import java.io.DataOutput; @@ -64,14 +63,14 @@ public class HFileWriterImpl implements HFile.Writer { private static final long UNSET = -1; - /** if this feature is enabled, preCalculate encoded data size before real encoding happens*/ + /** if this feature is enabled, preCalculate encoded data size before real encoding happens */ public static final String UNIFIED_ENCODED_BLOCKSIZE_RATIO = - "hbase.writer.unified.encoded.blocksize.ratio"; + "hbase.writer.unified.encoded.blocksize.ratio"; - /** Block size limit after encoding, used to unify encoded block Cache entry size*/ + /** Block size limit after encoding, used to unify encoded block Cache entry size */ private final int encodedBlockSizeLimit; - /** The Cell previously appended. Becomes the last cell in the file.*/ + /** The Cell previously appended. Becomes the last cell in the file. */ protected Cell lastCell = null; /** FileSystem stream to write into. */ @@ -102,12 +101,10 @@ public class HFileWriterImpl implements HFile.Writer { protected List metaData = new ArrayList<>(); /** - * First cell in a block. - * This reference should be short-lived since we write hfiles in a burst. + * First cell in a block. This reference should be short-lived since we write hfiles in a burst. */ protected Cell firstCellInBlock = null; - /** May be null if we were passed a stream. */ protected final Path path; @@ -115,14 +112,14 @@ public class HFileWriterImpl implements HFile.Writer { protected final CacheConfig cacheConf; /** - * Name for this object used when logging or in toString. Is either - * the result of a toString on stream or else name of passed file Path. + * Name for this object used when logging or in toString. Is either the result of a toString on + * stream or else name of passed file Path. */ protected final String name; /** - * The data block encoding which will be used. - * {@link NoOpDataBlockEncoder#INSTANCE} if there is no encoding. + * The data block encoding which will be used. {@link NoOpDataBlockEncoder#INSTANCE} if there is + * no encoding. */ protected final HFileDataBlockEncoder blockEncoder; @@ -131,7 +128,7 @@ public class HFileWriterImpl implements HFile.Writer { private int maxTagsLength = 0; /** KeyValue version in FileInfo */ - public static final byte [] KEY_VALUE_VERSION = Bytes.toBytes("KEY_VALUE_VERSION"); + public static final byte[] KEY_VALUE_VERSION = Bytes.toBytes("KEY_VALUE_VERSION"); /** Version for KeyValue which includes memstore timestamp */ public static final int KEY_VALUE_VER_WITH_MEMSTORE = 1; @@ -152,8 +149,8 @@ public class HFileWriterImpl implements HFile.Writer { protected long lastDataBlockOffset = UNSET; /** - * The last(stop) Cell of the previous data block. - * This reference should be short-lived since we write hfiles in a burst. + * The last(stop) Cell of the previous data block. This reference should be short-lived since we + * write hfiles in a burst. */ private Cell lastCellOfPreviousBlock = null; @@ -177,35 +174,30 @@ public HFileWriterImpl(final Configuration conf, CacheConfig cacheConf, Path pat closeOutputStream = path != null; this.cacheConf = cacheConf; float encodeBlockSizeRatio = conf.getFloat(UNIFIED_ENCODED_BLOCKSIZE_RATIO, 1f); - this.encodedBlockSizeLimit = (int)(hFileContext.getBlocksize() * encodeBlockSizeRatio); + this.encodedBlockSizeLimit = (int) (hFileContext.getBlocksize() * encodeBlockSizeRatio); finishInit(conf); if (LOG.isTraceEnabled()) { - LOG.trace("Writer" + (path != null ? " for " + path : "") + - " initialized with cacheConf: " + cacheConf + - " fileContext: " + fileContext); + LOG.trace("Writer" + (path != null ? " for " + path : "") + " initialized with cacheConf: " + + cacheConf + " fileContext: " + fileContext); } } /** * Add to the file info. All added key/value pairs can be obtained using * {@link HFile.Reader#getHFileInfo()}. - * * @param k Key * @param v Value * @throws IOException in case the key or the value are invalid */ @Override - public void appendFileInfo(final byte[] k, final byte[] v) - throws IOException { + public void appendFileInfo(final byte[] k, final byte[] v) throws IOException { fileInfo.append(k, v, true); } /** - * Sets the file info offset in the trailer, finishes up populating fields in - * the file info, and writes the file info into the given data output. The - * reason the data output is not always {@link #outputStream} is that we store - * file info as a block in version 2. - * + * Sets the file info offset in the trailer, finishes up populating fields in the file info, and + * writes the file info into the given data output. The reason the data output is not always + * {@link #outputStream} is that we store file info as a block in version 2. * @param trailer fixed file trailer * @param out the data output to write the file info to */ @@ -222,9 +214,9 @@ public long getPos() throws IOException { return outputStream.getPos(); } + /** * Checks that the given Cell's key does not violate the key order. - * * @param cell Cell whose key to check. * @return true if the key is duplicate * @throws IOException if the key or the key order is wrong @@ -254,15 +246,15 @@ private String getLexicalErrorMessage(Cell cell) { sb.append(cell); sb.append(", lastCell = "); sb.append(lastCell); - //file context includes HFile path and optionally table and CF of file being written + // file context includes HFile path and optionally table and CF of file being written sb.append("fileContext="); sb.append(hFileContext); return sb.toString(); } /** Checks the given value for validity. */ - protected void checkValue(final byte[] value, final int offset, - final int length) throws IOException { + protected void checkValue(final byte[] value, final int offset, final int length) + throws IOException { if (value == null) { throw new IOException("Value cannot be null"); } @@ -278,8 +270,8 @@ public Path getPath() { @Override public String toString() { - return "writer=" + (path != null ? path.toString() : null) + ", name=" - + name + ", compression=" + hFileContext.getCompression().getName(); + return "writer=" + (path != null ? path.toString() : null) + ", name=" + name + ", compression=" + + hFileContext.getCompression().getName(); } public static Compression.Algorithm compressionByName(String algoName) { @@ -290,10 +282,9 @@ public static Compression.Algorithm compressionByName(String algoName) { } /** A helper method to create HFile output streams in constructors */ - protected static FSDataOutputStream createOutputStream(Configuration conf, - FileSystem fs, Path path, InetSocketAddress[] favoredNodes) throws IOException { - FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, - HConstants.DATA_FILE_UMASK_KEY); + protected static FSDataOutputStream createOutputStream(Configuration conf, FileSystem fs, + Path path, InetSocketAddress[] favoredNodes) throws IOException { + FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); return FSUtils.create(conf, fs, path, perms, favoredNodes); } @@ -302,17 +293,14 @@ protected void finishInit(final Configuration conf) { if (blockWriter != null) { throw new IllegalStateException("finishInit called twice"); } - blockWriter = new HFileBlock.Writer(conf, blockEncoder, hFileContext, - cacheConf.getByteBuffAllocator()); + blockWriter = + new HFileBlock.Writer(conf, blockEncoder, hFileContext, cacheConf.getByteBuffAllocator()); // Data block index writer boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite(); dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(blockWriter, - cacheIndexesOnWrite ? cacheConf : null, - cacheIndexesOnWrite ? name : null); - dataBlockIndexWriter.setMaxChunkSize( - HFileBlockIndex.getMaxChunkSize(conf)); - dataBlockIndexWriter.setMinIndexNumEntries( - HFileBlockIndex.getMinIndexNumEntries(conf)); + cacheIndexesOnWrite ? cacheConf : null, cacheIndexesOnWrite ? name : null); + dataBlockIndexWriter.setMaxChunkSize(HFileBlockIndex.getMaxChunkSize(conf)); + dataBlockIndexWriter.setMinIndexNumEntries(HFileBlockIndex.getMinIndexNumEntries(conf)); inlineBlockWriters.add(dataBlockIndexWriter); // Meta data block index writer @@ -334,7 +322,7 @@ protected void checkBlockBoundary() throws IOException { } } - /** Clean up the data block that is currently being written.*/ + /** Clean up the data block that is currently being written. */ private void finishBlock() throws IOException { if (!blockWriter.isWriting() || blockWriter.blockSizeWritten() == 0) { return; @@ -348,8 +336,8 @@ private void finishBlock() throws IOException { lastDataBlockOffset = outputStream.getPos(); blockWriter.writeHeaderAndData(outputStream); int onDiskSize = blockWriter.getOnDiskSizeWithHeader(); - Cell indexEntry = - getMidpoint(this.hFileContext.getCellComparator(), lastCellOfPreviousBlock, firstCellInBlock); + Cell indexEntry = getMidpoint(this.hFileContext.getCellComparator(), lastCellOfPreviousBlock, + firstCellInBlock); dataBlockIndexWriter.addEntry(PrivateCellUtil.getCellKeySerializedAsKeyValueKey(indexEntry), lastDataBlockOffset, onDiskSize); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); @@ -359,10 +347,10 @@ private void finishBlock() throws IOException { } /** - * Try to return a Cell that falls between left and - * right but that is shorter; i.e. takes up less space. This - * trick is used building HFile block index. Its an optimization. It does not - * always work. In this case we'll just return the right cell. + * Try to return a Cell that falls between left and right but that is + * shorter; i.e. takes up less space. This trick is used building HFile block index. Its an + * optimization. It does not always work. In this case we'll just return the right + * cell. * @return A cell that sorts between left and right. */ public static Cell getMidpoint(final CellComparator comparator, final Cell left, @@ -380,8 +368,8 @@ public static Cell getMidpoint(final CellComparator comparator, final Cell left, return right; } byte[] midRow; - boolean bufferBacked = left instanceof ByteBufferExtendedCell - && right instanceof ByteBufferExtendedCell; + boolean bufferBacked = + left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell; if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) left).getRowByteBuffer(), ((ByteBufferExtendedCell) left).getRowPosition(), left.getRowLength(), @@ -394,7 +382,7 @@ public static Cell getMidpoint(final CellComparator comparator, final Cell left, if (midRow != null) { return PrivateCellUtil.createFirstOnRow(midRow); } - //Rows are same. Compare on families. + // Rows are same. Compare on families. if (bufferBacked) { midRow = getMinimumMidpointArray(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), ((ByteBufferExtendedCell) left).getFamilyPosition(), left.getFamilyLength(), @@ -429,9 +417,8 @@ public static Cell getMidpoint(final CellComparator comparator, final Cell left, /** * Try to get a byte array that falls between left and right as short as possible with * lexicographical order; - * - * @return Return a new array that is between left and right and minimally - * sized else just return null if left == right. + * @return Return a new array that is between left and right and minimally sized else just return + * null if left == right. */ private static byte[] getMinimumMidpointArray(final byte[] leftArray, final int leftOffset, final int leftLength, final byte[] rightArray, final int rightOffset, final int rightLength) { @@ -441,31 +428,31 @@ private static byte[] getMinimumMidpointArray(final byte[] leftArray, final int byte leftByte = leftArray[leftOffset + diffIdx]; byte rightByte = rightArray[rightOffset + diffIdx]; if ((leftByte & 0xff) > (rightByte & 0xff)) { - throw new IllegalArgumentException("Left byte array sorts after right row; left=" + Bytes - .toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + Bytes - .toStringBinary(rightArray, rightOffset, rightLength)); + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + Bytes.toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + + Bytes.toStringBinary(rightArray, rightOffset, rightLength)); } else if (leftByte != rightByte) { break; } } if (diffIdx == minLength) { if (leftLength > rightLength) { - //right is prefix of left - throw new IllegalArgumentException("Left byte array sorts after right row; left=" + Bytes - .toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + Bytes - .toStringBinary(rightArray, rightOffset, rightLength)); + // right is prefix of left + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + Bytes.toStringBinary(leftArray, leftOffset, leftLength) + ", right=" + + Bytes.toStringBinary(rightArray, rightOffset, rightLength)); } else if (leftLength < rightLength) { - //left is prefix of right. + // left is prefix of right. byte[] minimumMidpointArray = new byte[minLength + 1]; System.arraycopy(rightArray, rightOffset, minimumMidpointArray, 0, minLength + 1); minimumMidpointArray[minLength] = 0x00; return minimumMidpointArray; } else { - //left == right + // left == right return null; } } - //Note that left[diffIdx] can never be equal to 0xff since left < right + // Note that left[diffIdx] can never be equal to 0xff since left < right byte[] minimumMidpointArray = new byte[diffIdx + 1]; System.arraycopy(leftArray, leftOffset, minimumMidpointArray, 0, diffIdx + 1); minimumMidpointArray[diffIdx] = (byte) (minimumMidpointArray[diffIdx] + 1); @@ -475,9 +462,8 @@ private static byte[] getMinimumMidpointArray(final byte[] leftArray, final int /** * Try to create a new byte array that falls between left and right as short as possible with * lexicographical order. - * - * @return Return a new array that is between left and right and minimally - * sized else just return null if left == right. + * @return Return a new array that is between left and right and minimally sized else just return + * null if left == right. */ private static byte[] getMinimumMidpointArray(ByteBuffer left, int leftOffset, int leftLength, ByteBuffer right, int rightOffset, int rightLength) { @@ -487,34 +473,32 @@ private static byte[] getMinimumMidpointArray(ByteBuffer left, int leftOffset, i int leftByte = ByteBufferUtils.toByte(left, leftOffset + diffIdx); int rightByte = ByteBufferUtils.toByte(right, rightOffset + diffIdx); if ((leftByte & 0xff) > (rightByte & 0xff)) { - throw new IllegalArgumentException( - "Left byte array sorts after right row; left=" + ByteBufferUtils - .toStringBinary(left, leftOffset, leftLength) + ", right=" + ByteBufferUtils - .toStringBinary(right, rightOffset, rightLength)); + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + ByteBufferUtils.toStringBinary(left, leftOffset, leftLength) + ", right=" + + ByteBufferUtils.toStringBinary(right, rightOffset, rightLength)); } else if (leftByte != rightByte) { break; } } if (diffIdx == minLength) { if (leftLength > rightLength) { - //right is prefix of left - throw new IllegalArgumentException( - "Left byte array sorts after right row; left=" + ByteBufferUtils - .toStringBinary(left, leftOffset, leftLength) + ", right=" + ByteBufferUtils - .toStringBinary(right, rightOffset, rightLength)); + // right is prefix of left + throw new IllegalArgumentException("Left byte array sorts after right row; left=" + + ByteBufferUtils.toStringBinary(left, leftOffset, leftLength) + ", right=" + + ByteBufferUtils.toStringBinary(right, rightOffset, rightLength)); } else if (leftLength < rightLength) { - //left is prefix of right. + // left is prefix of right. byte[] minimumMidpointArray = new byte[minLength + 1]; - ByteBufferUtils - .copyFromBufferToArray(minimumMidpointArray, right, rightOffset, 0, minLength + 1); + ByteBufferUtils.copyFromBufferToArray(minimumMidpointArray, right, rightOffset, 0, + minLength + 1); minimumMidpointArray[minLength] = 0x00; return minimumMidpointArray; } else { - //left == right + // left == right return null; } } - //Note that left[diffIdx] can never be equal to 0xff since left < right + // Note that left[diffIdx] can never be equal to 0xff since left < right byte[] minimumMidpointArray = new byte[diffIdx + 1]; ByteBufferUtils.copyFromBufferToArray(minimumMidpointArray, left, leftOffset, 0, diffIdx + 1); minimumMidpointArray[diffIdx] = (byte) (minimumMidpointArray[diffIdx] + 1); @@ -527,11 +511,10 @@ private void writeInlineBlocks(boolean closing) throws IOException { while (ibw.shouldWriteBlock(closing)) { long offset = outputStream.getPos(); boolean cacheThisBlock = ibw.getCacheOnWrite(); - ibw.writeInlineBlock(blockWriter.startWriting( - ibw.getInlineBlockType())); + ibw.writeInlineBlock(blockWriter.startWriting(ibw.getInlineBlockType())); blockWriter.writeHeaderAndData(outputStream); ibw.blockWritten(offset, blockWriter.getOnDiskSizeWithHeader(), - blockWriter.getUncompressedSizeWithoutHeader()); + blockWriter.getUncompressedSizeWithoutHeader()); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); if (cacheThisBlock) { @@ -543,15 +526,14 @@ private void writeInlineBlocks(boolean closing) throws IOException { /** * Caches the last written HFile block. - * @param offset the offset of the block we want to cache. Used to determine - * the cache key. + * @param offset the offset of the block we want to cache. Used to determine the cache key. */ private void doCacheOnWrite(long offset) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf); try { cache.cacheBlock(new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType()), - cacheFormatBlock); + cacheFormatBlock); } finally { // refCnt will auto increase when block add to Cache, see RAMCache#putIfAbsent cacheFormatBlock.release(); @@ -572,15 +554,12 @@ protected void newBlock() throws IOException { } /** - * Add a meta block to the end of the file. Call before close(). Metadata - * blocks are expensive. Fill one with a bunch of serialized data rather than - * do a metadata block per metadata instance. If metadata is small, consider - * adding to file info using {@link #appendFileInfo(byte[], byte[])} - * - * @param metaBlockName - * name of the block - * @param content - * will call readFields to get data later (DO NOT REUSE) + * Add a meta block to the end of the file. Call before close(). Metadata blocks are expensive. + * Fill one with a bunch of serialized data rather than do a metadata block per metadata instance. + * If metadata is small, consider adding to file info using + * {@link #appendFileInfo(byte[], byte[])} + * @param metaBlockName name of the block + * @param content will call readFields to get data later (DO NOT REUSE) */ @Override public void appendMetaBlock(String metaBlockName, Writable content) { @@ -589,8 +568,7 @@ public void appendMetaBlock(String metaBlockName, Writable content) { for (i = 0; i < metaNames.size(); ++i) { // stop when the current key is greater than our own byte[] cur = metaNames.get(i); - if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0, - key.length) > 0) { + if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0, key.length) > 0) { break; } } @@ -627,7 +605,7 @@ public void close() throws IOException { // Add the new meta block to the meta index. metaBlockIndexWriter.addEntry(metaNames.get(i), offset, - blockWriter.getOnDiskSizeWithHeader()); + blockWriter.getOnDiskSizeWithHeader()); } } @@ -644,8 +622,8 @@ public void close() throws IOException { trailer.setLoadOnOpenOffset(rootIndexOffset); // Meta block index. - metaBlockIndexWriter.writeSingleLevelIndex(blockWriter.startWriting( - BlockType.ROOT_INDEX), "meta"); + metaBlockIndexWriter.writeSingleLevelIndex(blockWriter.startWriting(BlockType.ROOT_INDEX), + "meta"); blockWriter.writeHeaderAndData(outputStream); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); @@ -660,21 +638,19 @@ public void close() throws IOException { totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); // Load-on-open data supplied by higher levels, e.g. Bloom filters. - for (BlockWritable w : additionalLoadOnOpenData){ + for (BlockWritable w : additionalLoadOnOpenData) { blockWriter.writeBlock(w, outputStream); totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader(); } // Now finish off the trailer. trailer.setNumDataIndexLevels(dataBlockIndexWriter.getNumLevels()); - trailer.setUncompressedDataIndexSize( - dataBlockIndexWriter.getTotalUncompressedSize()); + trailer.setUncompressedDataIndexSize(dataBlockIndexWriter.getTotalUncompressedSize()); trailer.setFirstDataBlockOffset(firstDataBlockOffset); trailer.setLastDataBlockOffset(lastDataBlockOffset); trailer.setComparatorClass(this.hFileContext.getCellComparator().getClass()); trailer.setDataIndexCount(dataBlockIndexWriter.getNumRootEntries()); - finishClose(trailer); blockWriter.release(); @@ -695,16 +671,14 @@ public void addDeleteFamilyBloomFilter(final BloomFilterWriter bfw) { this.addBloomFilter(bfw, BlockType.DELETE_FAMILY_BLOOM_META); } - private void addBloomFilter(final BloomFilterWriter bfw, - final BlockType blockType) { + private void addBloomFilter(final BloomFilterWriter bfw, final BlockType blockType) { if (bfw.getKeyCount() <= 0) { return; } - if (blockType != BlockType.GENERAL_BLOOM_META && - blockType != BlockType.DELETE_FAMILY_BLOOM_META) { - throw new RuntimeException("Block Type: " + blockType.toString() + - "is not supported"); + if (blockType != BlockType.GENERAL_BLOOM_META + && blockType != BlockType.DELETE_FAMILY_BLOOM_META) { + throw new RuntimeException("Block Type: " + blockType.toString() + "is not supported"); } additionalLoadOnOpenData.add(new BlockWritable() { @Override @@ -729,11 +703,9 @@ public HFileContext getFileContext() { } /** - * Add key/value to file. Keys must be added in an order that agrees with the - * Comparator passed on construction. - * - * @param cell - * Cell to add. Cannot be empty nor null. + * Add key/value to file. Keys must be added in an order that agrees with the Comparator passed on + * construction. + * @param cell Cell to add. Cannot be empty nor null. */ @Override public void append(final Cell cell) throws IOException { @@ -792,27 +764,25 @@ protected void finishFileInfo() throws IOException { if (lastCell != null) { // Make a copy. The copy is stuffed into our fileinfo map. Needs a clean // byte buffer. Won't take a tuple. - byte [] lastKey = PrivateCellUtil.getCellKeySerializedAsKeyValueKey(this.lastCell); + byte[] lastKey = PrivateCellUtil.getCellKeySerializedAsKeyValueKey(this.lastCell); fileInfo.append(HFileInfo.LASTKEY, lastKey, false); } // Average key length. - int avgKeyLen = - entryCount == 0 ? 0 : (int) (totalKeyLength / entryCount); + int avgKeyLen = entryCount == 0 ? 0 : (int) (totalKeyLength / entryCount); fileInfo.append(HFileInfo.AVG_KEY_LEN, Bytes.toBytes(avgKeyLen), false); fileInfo.append(HFileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()), false); // Average value length. - int avgValueLen = - entryCount == 0 ? 0 : (int) (totalValueLength / entryCount); + int avgValueLen = entryCount == 0 ? 0 : (int) (totalValueLength / entryCount); fileInfo.append(HFileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false); if (hFileContext.isIncludesTags()) { // When tags are not being written in this file, MAX_TAGS_LEN is excluded // from the FileInfo fileInfo.append(HFileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false); boolean tagsCompressed = (hFileContext.getDataBlockEncoding() != DataBlockEncoding.NONE) - && hFileContext.isCompressTags(); + && hFileContext.isCompressTags(); fileInfo.append(HFileInfo.TAGS_COMPRESSED, Bytes.toBytes(tagsCompressed), false); } } @@ -831,14 +801,14 @@ protected void finishClose(FixedFileTrailer trailer) throws IOException { if (cryptoContext != Encryption.Context.NONE) { // Wrap the context's key and write it as the encryption metadata, the wrapper includes // all information needed for decryption - trailer.setEncryptionKey(EncryptionUtil.wrapKey(cryptoContext.getConf(), - cryptoContext.getConf().get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()), + trailer.setEncryptionKey(EncryptionUtil.wrapKey( + cryptoContext.getConf(), cryptoContext.getConf() + .get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), cryptoContext.getKey())); } // Now we can finish the close trailer.setMetaIndexCount(metaNames.size()); - trailer.setTotalUncompressedBytes(totalUncompressedBytes+ trailer.getTrailerSize()); + trailer.setTotalUncompressedBytes(totalUncompressedBytes + trailer.getTrailerSize()); trailer.setEntryCount(entryCount); trailer.setCompressionCodec(hFileContext.getCompression()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java index 8b85c68f9a58..d993ae2f8bcf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java @@ -1,22 +1,20 @@ -/** - * Copyright The Apache Software Foundation +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import org.apache.yetus.audience.InterfaceAudience; @@ -24,13 +22,13 @@ @InterfaceAudience.Private public class InclusiveCombinedBlockCache extends CombinedBlockCache { public InclusiveCombinedBlockCache(FirstLevelBlockCache l1, BlockCache l2) { - super(l1,l2); + super(l1, l2); l1.setVictimCache(l2); } @Override - public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, - boolean repeat, boolean updateCacheMetrics) { + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { // On all external cache set ups the lru should have the l2 cache set as the victimHandler // Because of that all requests that miss inside of the lru block cache will be // tried in the l2 block cache. @@ -38,11 +36,10 @@ public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, } /** - * * @param cacheKey The block's cache key. * @param buf The block contents wrapped in a ByteBuffer. * @param inMemory Whether block should be treated as in-memory. This parameter is only useful for - * the L1 lru cache. + * the L1 lru cache. */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java index 50b195dd8e96..eb9934202e5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/IndexOnlyLruBlockCache.java @@ -21,23 +21,23 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * An on heap block cache implementation extended LruBlockCache and only cache index block. - * This block cache should be only used by - * {@link org.apache.hadoop.hbase.client.ClientSideRegionScanner} that normally considers to be - * used by client resides out of the region server, e.g. a container of a map reduce job. + * An on heap block cache implementation extended LruBlockCache and only cache index block. This + * block cache should be only used by {@link org.apache.hadoop.hbase.client.ClientSideRegionScanner} + * that normally considers to be used by client resides out of the region server, e.g. a container + * of a map reduce job. **/ @InterfaceAudience.Private public class IndexOnlyLruBlockCache extends LruBlockCache { public IndexOnlyLruBlockCache(long maxSize, long blockSize, boolean evictionThread, - Configuration conf) { + Configuration conf) { super(maxSize, blockSize, evictionThread, conf); } /** * Cache only index block with the specified name and buffer * @param cacheKey block's cache key - * @param buf block buffer + * @param buf block buffer * @param inMemory if block is in-memory */ @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java index 12ae6a50a3c2..48ad08100599 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,41 +19,34 @@ import java.io.DataOutput; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * A way to write "inline" blocks into an {@link HFile}. Inline blocks are - * interspersed with data blocks. For example, Bloom filter chunks and - * leaf-level blocks of a multi-level block index are stored as inline blocks. + * A way to write "inline" blocks into an {@link HFile}. Inline blocks are interspersed with data + * blocks. For example, Bloom filter chunks and leaf-level blocks of a multi-level block index are + * stored as inline blocks. */ @InterfaceAudience.Private public interface InlineBlockWriter { /** * Determines whether there is a new block to be written out. - * - * @param closing - * whether the file is being closed, in which case we need to write - * out all available data and not wait to accumulate another block + * @param closing whether the file is being closed, in which case we need to write out all + * available data and not wait to accumulate another block */ boolean shouldWriteBlock(boolean closing); /** - * Writes the block to the provided stream. Must not write any magic records. - * Called only if {@link #shouldWriteBlock(boolean)} returned true. - * - * @param out - * a stream (usually a compressing stream) to write the block to + * Writes the block to the provided stream. Must not write any magic records. Called only if + * {@link #shouldWriteBlock(boolean)} returned true. + * @param out a stream (usually a compressing stream) to write the block to */ void writeInlineBlock(DataOutput out) throws IOException; /** - * Called after a block has been written, and its offset, raw size, and - * compressed size have been determined. Can be used to add an entry to a - * block index. If this type of inline blocks needs a block index, the inline - * block writer is responsible for maintaining it. - * + * Called after a block has been written, and its offset, raw size, and compressed size have been + * determined. Can be used to add an entry to a block index. If this type of inline blocks needs a + * block index, the inline block writer is responsible for maintaining it. * @param offset the offset of the block in the stream * @param onDiskSize the on-disk size of the block * @param uncompressedSize the uncompressed size of the block diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java index d0526656a3b2..eed3a53acfe4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InvalidHFileException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java index 494a588aadb8..6e5f5e4eaa82 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,14 +49,13 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * This realisation improve performance of classical LRU - * cache up to 3 times via reduce GC job. + * This realisation improve performance of classical LRU cache up to 3 times via reduce GC + * job. *

          * The classical block cache implementation that is memory-aware using {@link HeapSize}, - * memory-bound using an - * LRU eviction algorithm, and concurrent: backed by a {@link ConcurrentHashMap} and with a - * non-blocking eviction thread giving constant-time {@link #cacheBlock} and {@link #getBlock} - * operations. + * memory-bound using an LRU eviction algorithm, and concurrent: backed by a + * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving constant-time + * {@link #cacheBlock} and {@link #getBlock} operations. *

          * Contains three levels of block priority to allow for scan-resistance and in-memory families * {@link org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder#setInMemory(boolean)} (An @@ -91,57 +90,50 @@ *

          * Adaptive LRU cache lets speed up performance while we are reading much more data than can fit * into BlockCache and it is the cause of a high rate of evictions. This in turn leads to heavy - * Garbage Collector works. So a lot of blocks put into BlockCache but never read, but spending - * a lot of CPU resources for cleaning. We could avoid this situation via parameters: + * Garbage Collector works. So a lot of blocks put into BlockCache but never read, but spending a + * lot of CPU resources for cleaning. We could avoid this situation via parameters: *

          - * hbase.lru.cache.heavy.eviction.count.limit - set how many times we have to run the - * eviction process that starts to avoid putting data to BlockCache. By default it is 0 and it - * meats the feature will start at the beginning. But if we have some times short reading the same - * data and some times long-term reading - we can divide it by this parameter. For example we know - * that our short reading used to be about 1 minutes, then we have to set the parameter about 10 - * and it will enable the feature only for long time massive reading (after ~100 seconds). So when - * we use short-reading and want all of them in the cache we will have it (except for eviction of - * course). When we use long-term heavy reading the feature will be enabled after some time and - * bring better performance. + * hbase.lru.cache.heavy.eviction.count.limit - set how many times we have to run the + * eviction process that starts to avoid putting data to BlockCache. By default it is 0 and it meats + * the feature will start at the beginning. But if we have some times short reading the same data + * and some times long-term reading - we can divide it by this parameter. For example we know that + * our short reading used to be about 1 minutes, then we have to set the parameter about 10 and it + * will enable the feature only for long time massive reading (after ~100 seconds). So when we use + * short-reading and want all of them in the cache we will have it (except for eviction of course). + * When we use long-term heavy reading the feature will be enabled after some time and bring better + * performance. *

          * hbase.lru.cache.heavy.eviction.mb.size.limit - set how many bytes in 10 seconds desirable * putting into BlockCache (and evicted from it). The feature will try to reach this value and - * maintain it. Don't try to set it too small because it leads to premature exit from this mode. - * For powerful CPUs (about 20-40 physical cores) it could be about 400-500 MB. Average system - * (~10 cores) 200-300 MB. Some weak systems (2-5 cores) may be good with 50-100 MB. - * How it works: we set the limit and after each ~10 second calculate how many bytes were freed. - * Overhead = Freed Bytes Sum (MB) * 100 / Limit (MB) - 100; - * For example we set the limit = 500 and were evicted 2000 MB. Overhead is: - * 2000 * 100 / 500 - 100 = 300% - * The feature is going to reduce a percent caching data blocks and fit evicted bytes closer to - * 100% (500 MB). Some kind of an auto-scaling. - * If freed bytes less then the limit we have got negative overhead. - * For example if were freed 200 MB: - * 200 * 100 / 500 - 100 = -60% - * The feature will increase the percent of caching blocks. - * That leads to fit evicted bytes closer to 100% (500 MB). - * The current situation we can find out in the log of RegionServer: - * BlockCache evicted (MB): 0, overhead (%): -100, heavy eviction counter: 0, current caching - * DataBlock (%): 100 - means no eviction, 100% blocks is caching - * BlockCache evicted (MB): 2000, overhead (%): 300, heavy eviction counter: 1, current caching - * DataBlock (%): 97 - means eviction begin, reduce of caching blocks by 3%. - * It help to tune your system and find out what value is better set. Don't try to reach 0% - * overhead, it is impossible. Quite good 50-100% overhead, - * it prevents premature exit from this mode. + * maintain it. Don't try to set it too small because it leads to premature exit from this mode. For + * powerful CPUs (about 20-40 physical cores) it could be about 400-500 MB. Average system (~10 + * cores) 200-300 MB. Some weak systems (2-5 cores) may be good with 50-100 MB. How it works: we set + * the limit and after each ~10 second calculate how many bytes were freed. Overhead = Freed Bytes + * Sum (MB) * 100 / Limit (MB) - 100; For example we set the limit = 500 and were evicted 2000 MB. + * Overhead is: 2000 * 100 / 500 - 100 = 300% The feature is going to reduce a percent caching data + * blocks and fit evicted bytes closer to 100% (500 MB). Some kind of an auto-scaling. If freed + * bytes less then the limit we have got negative overhead. For example if were freed 200 MB: 200 * + * 100 / 500 - 100 = -60% The feature will increase the percent of caching blocks. That leads to fit + * evicted bytes closer to 100% (500 MB). The current situation we can find out in the log of + * RegionServer: BlockCache evicted (MB): 0, overhead (%): -100, heavy eviction counter: 0, current + * caching DataBlock (%): 100 - means no eviction, 100% blocks is caching BlockCache evicted (MB): + * 2000, overhead (%): 300, heavy eviction counter: 1, current caching DataBlock (%): 97 - means + * eviction begin, reduce of caching blocks by 3%. It help to tune your system and find out what + * value is better set. Don't try to reach 0% overhead, it is impossible. Quite good 50-100% + * overhead, it prevents premature exit from this mode. *

          * hbase.lru.cache.heavy.eviction.overhead.coefficient - set how fast we want to get the * result. If we know that our reading is heavy for a long time, we don't want to wait and can * increase the coefficient and get good performance sooner. But if we aren't sure we can do it - * slowly and it could prevent premature exit from this mode. So, when the coefficient is higher - * we can get better performance when heavy reading is stable. But when reading is changing we - * can adjust to it and set the coefficient to lower value. - * For example, we set the coefficient = 0.01. It means the overhead (see above) will be - * multiplied by 0.01 and the result is the value of reducing percent caching blocks. For example, - * if the overhead = 300% and the coefficient = 0.01, - * then percent of caching blocks will reduce by 3%. - * Similar logic when overhead has got negative value (overshooting). Maybe it is just short-term - * fluctuation and we will try to stay in this mode. It helps avoid premature exit during - * short-term fluctuation. Backpressure has simple logic: more overshooting - more caching blocks. + * slowly and it could prevent premature exit from this mode. So, when the coefficient is higher we + * can get better performance when heavy reading is stable. But when reading is changing we can + * adjust to it and set the coefficient to lower value. For example, we set the coefficient = 0.01. + * It means the overhead (see above) will be multiplied by 0.01 and the result is the value of + * reducing percent caching blocks. For example, if the overhead = 300% and the coefficient = 0.01, + * then percent of caching blocks will reduce by 3%. Similar logic when overhead has got negative + * value (overshooting). Maybe it is just short-term fluctuation and we will try to stay in this + * mode. It helps avoid premature exit during short-term fluctuation. Backpressure has simple logic: + * more overshooting - more caching blocks. *

          * Find more information about improvement: https://issues.apache.org/jira/browse/HBASE-23887 */ @@ -160,29 +152,29 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { * Acceptable size of cache (no evictions if size < acceptable) */ private static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = - "hbase.lru.blockcache.acceptable.factor"; + "hbase.lru.blockcache.acceptable.factor"; /** * Hard capacity limit of cache, will reject any put if size > this * acceptable */ static final String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME = - "hbase.lru.blockcache.hard.capacity.limit.factor"; + "hbase.lru.blockcache.hard.capacity.limit.factor"; private static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.single.percentage"; + "hbase.lru.blockcache.single.percentage"; private static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.multi.percentage"; + "hbase.lru.blockcache.multi.percentage"; private static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = - "hbase.lru.blockcache.memory.percentage"; + "hbase.lru.blockcache.memory.percentage"; /** - * Configuration key to force data-block always (except in-memory are too much) - * cached in memory for in-memory hfile, unlike inMemory, which is a column-family - * configuration, inMemoryForceMode is a cluster-wide configuration + * Configuration key to force data-block always (except in-memory are too much) cached in memory + * for in-memory hfile, unlike inMemory, which is a column-family configuration, inMemoryForceMode + * is a cluster-wide configuration */ private static final String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME = - "hbase.lru.rs.inmemoryforcemode"; + "hbase.lru.rs.inmemoryforcemode"; - /* Default Configuration Parameters*/ + /* Default Configuration Parameters */ /* Backing Concurrent Map Configuration */ static final float DEFAULT_LOAD_FACTOR = 0.75f; @@ -206,29 +198,28 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { private static final String LRU_MAX_BLOCK_SIZE = "hbase.lru.max.block.size"; private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; - private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT - = "hbase.lru.cache.heavy.eviction.count.limit"; + private static final String LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = + "hbase.lru.cache.heavy.eviction.count.limit"; // Default value actually equal to disable feature of increasing performance. // Because 2147483647 is about ~680 years (after that it will start to work) // We can set it to 0-10 and get the profit right now. // (see details https://issues.apache.org/jira/browse/HBASE-23887). private static final int DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT = Integer.MAX_VALUE; - private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT - = "hbase.lru.cache.heavy.eviction.mb.size.limit"; + private static final String LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = + "hbase.lru.cache.heavy.eviction.mb.size.limit"; private static final long DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT = 500; - private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT - = "hbase.lru.cache.heavy.eviction.overhead.coefficient"; + private static final String LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = + "hbase.lru.cache.heavy.eviction.overhead.coefficient"; private static final float DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT = 0.01f; /** * Defined the cache map as {@link ConcurrentHashMap} here, because in - * {@link LruAdaptiveBlockCache#getBlock}, we need to guarantee the atomicity - * of map#computeIfPresent (key, func). Besides, the func method must execute exactly once only - * when the key is present and under the lock context, otherwise the reference count will be - * messed up. Notice that the - * {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. + * {@link LruAdaptiveBlockCache#getBlock}, we need to guarantee the atomicity of + * map#computeIfPresent (key, func). Besides, the func method must execute exactly once only when + * the key is present and under the lock context, otherwise the reference count will be messed up. + * Notice that the {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. */ private transient final ConcurrentHashMap map; @@ -245,8 +236,8 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { /** Statistics thread schedule pool (for heavy debugging, could remove) */ private transient final ScheduledExecutorService scheduleThreadPool = - Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() - .setNameFormat("LruAdaptiveBlockCacheStatsExecutor").setDaemon(true).build()); + Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() + .setNameFormat("LruAdaptiveBlockCacheStatsExecutor").setDaemon(true).build()); /** Current size of cache */ private final AtomicLong size; @@ -298,8 +289,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { /** * Where to send victims (blocks evicted/missing from the cache). This is used only when we use an - * external cache as L2. - * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache + * external cache as L2. Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache */ private transient BlockCache victimHandler = null; @@ -316,13 +306,11 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { private final float heavyEvictionOverheadCoefficient; /** - * Default constructor. Specify maximum size and expected average block - * size (approximation is fine). - * - *

          All other factors will be calculated based on defaults specified in - * this class. - * - * @param maxSize maximum size of cache, in bytes + * Default constructor. Specify maximum size and expected average block size (approximation is + * fine). + *

          + * All other factors will be calculated based on defaults specified in this class. + * @param maxSize maximum size of cache, in bytes * @param blockSize approximate size of each block, in bytes */ public LruAdaptiveBlockCache(long maxSize, long blockSize) { @@ -330,45 +318,37 @@ public LruAdaptiveBlockCache(long maxSize, long blockSize) { } /** - * Constructor used for testing. Allows disabling of the eviction thread. + * Constructor used for testing. Allows disabling of the eviction thread. */ public LruAdaptiveBlockCache(long maxSize, long blockSize, boolean evictionThread) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, - DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, - DEFAULT_SINGLE_FACTOR, - DEFAULT_MULTI_FACTOR, - DEFAULT_MEMORY_FACTOR, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, - false, - DEFAULT_MAX_BLOCK_SIZE, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, DEFAULT_MIN_FACTOR, + DEFAULT_ACCEPTABLE_FACTOR, DEFAULT_SINGLE_FACTOR, DEFAULT_MULTI_FACTOR, + DEFAULT_MEMORY_FACTOR, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, DEFAULT_MAX_BLOCK_SIZE, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT); } - public LruAdaptiveBlockCache(long maxSize, long blockSize, - boolean evictionThread, Configuration conf) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, - DEFAULT_CONCURRENCY_LEVEL, - conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), - conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), - conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), - conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), - conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), - conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), - conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), - conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), - conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), - conf.getLong(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), - conf.getFloat(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, - DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); + public LruAdaptiveBlockCache(long maxSize, long blockSize, boolean evictionThread, + Configuration conf) { + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, + conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), + conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), + conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), + conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), + conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), + conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), + conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), + conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), + conf.getInt(LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_COUNT_LIMIT), + conf.getLong(LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_MB_SIZE_LIMIT), + conf.getFloat(LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT, + DEFAULT_LRU_CACHE_HEAVY_EVICTION_OVERHEAD_COEFFICIENT)); } public LruAdaptiveBlockCache(long maxSize, long blockSize, Configuration conf) { @@ -376,38 +356,35 @@ public LruAdaptiveBlockCache(long maxSize, long blockSize, Configuration conf) { } /** - * Configurable constructor. Use this constructor if not using defaults. - * - * @param maxSize maximum size of this cache, in bytes - * @param blockSize expected average size of blocks, in bytes - * @param evictionThread whether to run evictions in a bg thread or not - * @param mapInitialSize initial size of backing ConcurrentHashMap - * @param mapLoadFactor initial load factor of backing ConcurrentHashMap + * Configurable constructor. Use this constructor if not using defaults. + * @param maxSize maximum size of this cache, in bytes + * @param blockSize expected average size of blocks, in bytes + * @param evictionThread whether to run evictions in a bg thread or not + * @param mapInitialSize initial size of backing ConcurrentHashMap + * @param mapLoadFactor initial load factor of backing ConcurrentHashMap * @param mapConcurrencyLevel initial concurrency factor for backing CHM - * @param minFactor percentage of total size that eviction will evict until - * @param acceptableFactor percentage of total size that triggers eviction - * @param singleFactor percentage of total size for single-access blocks - * @param multiFactor percentage of total size for multiple-access blocks - * @param memoryFactor percentage of total size for in-memory blocks - * @param hardLimitFactor hard capacity limit - * @param forceInMemory in-memory hfile's data block has higher priority when evicting - * @param maxBlockSize maximum block size for caching - * @param heavyEvictionCountLimit when starts AdaptiveLRU algoritm work - * @param heavyEvictionMbSizeLimit how many bytes desirable putting into BlockCache - * @param heavyEvictionOverheadCoefficient how aggressive AdaptiveLRU will reduce GC + * @param minFactor percentage of total size that eviction will evict until + * @param acceptableFactor percentage of total size that triggers eviction + * @param singleFactor percentage of total size for single-access blocks + * @param multiFactor percentage of total size for multiple-access blocks + * @param memoryFactor percentage of total size for in-memory blocks + * @param hardLimitFactor hard capacity limit + * @param forceInMemory in-memory hfile's data block has higher priority when evicting + * @param maxBlockSize maximum block size for caching + * @param heavyEvictionCountLimit when starts AdaptiveLRU algoritm work + * @param heavyEvictionMbSizeLimit how many bytes desirable putting into BlockCache + * @param heavyEvictionOverheadCoefficient how aggressive AdaptiveLRU will reduce GC */ public LruAdaptiveBlockCache(long maxSize, long blockSize, boolean evictionThread, - int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, - float minFactor, float acceptableFactor, float singleFactor, - float multiFactor, float memoryFactor, float hardLimitFactor, - boolean forceInMemory, long maxBlockSize, - int heavyEvictionCountLimit, long heavyEvictionMbSizeLimit, - float heavyEvictionOverheadCoefficient) { + int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, + float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, + float hardLimitFactor, boolean forceInMemory, long maxBlockSize, int heavyEvictionCountLimit, + long heavyEvictionMbSizeLimit, float heavyEvictionOverheadCoefficient) { this.maxBlockSize = maxBlockSize; - if(singleFactor + multiFactor + memoryFactor != 1 || - singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { - throw new IllegalArgumentException("Single, multi, and memory factors " + - " should be non-negative and total 1.0"); + if (singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 + || memoryFactor < 0) { + throw new IllegalArgumentException( + "Single, multi, and memory factors " + " should be non-negative and total 1.0"); } if (minFactor >= acceptableFactor) { throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); @@ -447,7 +424,7 @@ public LruAdaptiveBlockCache(long maxSize, long blockSize, boolean evictionThrea heavyEvictionOverheadCoefficient = Math.max(heavyEvictionOverheadCoefficient, 0.001f); this.heavyEvictionOverheadCoefficient = heavyEvictionOverheadCoefficient; - // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, STAT_THREAD_PERIOD, TimeUnit.SECONDS); @@ -474,13 +451,13 @@ public int getCacheDataBlockPercent() { } /** - * The block cached in LruAdaptiveBlockCache will always be an heap block: on the one side, - * the heap access will be more faster then off-heap, the small index block or meta block - * cached in CombinedBlockCache will benefit a lot. on other side, the LruAdaptiveBlockCache - * size is always calculated based on the total heap size, if caching an off-heap block in - * LruAdaptiveBlockCache, the heap size will be messed up. Here we will clone the block into an - * heap block if it's an off-heap block, otherwise just use the original block. The key point is - * maintain the refCnt of the block (HBASE-22127):
          + * The block cached in LruAdaptiveBlockCache will always be an heap block: on the one side, the + * heap access will be more faster then off-heap, the small index block or meta block cached in + * CombinedBlockCache will benefit a lot. on other side, the LruAdaptiveBlockCache size is always + * calculated based on the total heap size, if caching an off-heap block in LruAdaptiveBlockCache, + * the heap size will be messed up. Here we will clone the block into an heap block if it's an + * off-heap block, otherwise just use the original block. The key point is maintain the refCnt of + * the block (HBASE-22127):
          * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
          * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's * reservoir, if both RPC and LruAdaptiveBlockCache release the block, then it can be garbage @@ -507,9 +484,8 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { *

          * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547) * this can happen, for which we compare the buffer contents. - * * @param cacheKey block's cache key - * @param buf block buffer + * @param buf block buffer * @param inMemory if block is in-memory */ @Override @@ -532,18 +508,15 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) // big this can make the logs way too noisy. // So we log 2% if (stats.failInsert() % 50 == 0) { - LOG.warn("Trying to cache too large a block " - + cacheKey.getHfileName() + " @ " - + cacheKey.getOffset() - + " is " + buf.heapSize() - + " which is larger than " + maxBlockSize); + LOG.warn("Trying to cache too large a block " + cacheKey.getHfileName() + " @ " + + cacheKey.getOffset() + " is " + buf.heapSize() + " which is larger than " + + maxBlockSize); } return; } LruCachedBlock cb = map.get(cacheKey); - if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, - cacheKey, buf)) { + if (cb != null && !BlockCacheUtil.shouldReplaceExistingCacheBlock(this, cacheKey, buf)) { return; } long currentSize = size.get(); @@ -553,9 +526,9 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) stats.failInsert(); if (LOG.isTraceEnabled()) { LOG.trace("LruAdaptiveBlockCache current size " + StringUtils.byteDesc(currentSize) - + " has exceeded acceptable size " + StringUtils.byteDesc(currentAcceptableSize) + "." - + " The hard limit size is " + StringUtils.byteDesc(hardLimitSize) - + ", failed to put cacheKey:" + cacheKey + " into LruAdaptiveBlockCache."); + + " has exceeded acceptable size " + StringUtils.byteDesc(currentAcceptableSize) + "." + + " The hard limit size is " + StringUtils.byteDesc(hardLimitSize) + + ", failed to put cacheKey:" + cacheKey + " into LruAdaptiveBlockCache."); } if (!evictionInProgress) { runEviction(); @@ -581,20 +554,20 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) } /** - * Sanity-checking for parity between actual block cache content and metrics. - * Intended only for use with TRACE level logging and -ea JVM. + * Sanity-checking for parity between actual block cache content and metrics. Intended only for + * use with TRACE level logging and -ea JVM. */ private static void assertCounterSanity(long mapSize, long counterVal) { if (counterVal < 0) { - LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + + ", mapSize=" + mapSize); return; } if (mapSize < Integer.MAX_VALUE) { double pct_diff = Math.abs((((double) counterVal) / ((double) mapSize)) - 1.); if (pct_diff > 0.05) { - LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + + ", mapSize=" + mapSize); } } } @@ -615,9 +588,8 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { } /** - * Helper function that updates the local size counter and also updates any - * per-cf or per-blocktype metrics it can discern from given - * {@link LruCachedBlock} + * Helper function that updates the local size counter and also updates any per-cf or + * per-blocktype metrics it can discern from given {@link LruCachedBlock} */ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); @@ -633,19 +605,16 @@ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { /** * Get the buffer of the block with the specified name. - * - * @param cacheKey block's cache key - * @param caching true if the caller caches blocks on cache misses - * @param repeat Whether this is a repeat lookup for the same block - * (used to avoid double counting cache misses when doing double-check - * locking) + * @param cacheKey block's cache key + * @param caching true if the caller caches blocks on cache misses + * @param repeat Whether this is a repeat lookup for the same block (used to avoid double counting + * cache misses when doing double-check locking) * @param updateCacheMetrics Whether to update cache metrics or not - * * @return buffer of specified cache key, or null if not in cache */ @Override public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, - boolean updateCacheMetrics) { + boolean updateCacheMetrics) { LruCachedBlock cb = map.computeIfPresent(cacheKey, (key, val) -> { // It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside // this block. because if retain outside the map#computeIfPresent, the evictBlock may remove @@ -683,7 +652,6 @@ public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repea /** * Whether the cache contains block with specified cacheKey - * * @return true if contains the block */ @Override @@ -698,19 +666,17 @@ public boolean evictBlock(BlockCacheKey cacheKey) { } /** - * Evicts all blocks for a specific HFile. This is an - * expensive operation implemented as a linear-time search through all blocks - * in the cache. Ideally this should be a search in a log-access-time map. - * + * Evicts all blocks for a specific HFile. This is an expensive operation implemented as a + * linear-time search through all blocks in the cache. Ideally this should be a search in a + * log-access-time map. *

          * This is used for evict-on-close to remove all blocks of a specific HFile. - * * @return the number of blocks evicted */ @Override public int evictBlocksByHfileName(String hfileName) { int numEvicted = (int) map.keySet().stream().filter(key -> key.getHfileName().equals(hfileName)) - .filter(this::evictBlock).count(); + .filter(this::evictBlock).count(); if (victimHandler != null) { numEvicted += victimHandler.evictBlocksByHfileName(hfileName); } @@ -718,11 +684,9 @@ public int evictBlocksByHfileName(String hfileName) { } /** - * Evict the block, and it will be cached by the victim handler if exists && - * block may be read again later - * - * @param evictedByEvictionProcess true if the given block is evicted by - * EvictionThread + * Evict the block, and it will be cached by the victim handler if exists && block may be + * read again later + * @param evictedByEvictionProcess true if the given block is evicted by EvictionThread * @return the heap size of evicted block */ protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) { @@ -774,11 +738,8 @@ long getOverhead() { } /** - * Eviction method. - * - * Evict items in order of use, allowing delete items - * which haven't been used for the longest amount of time. - * + * Eviction method. Evict items in order of use, allowing delete items which haven't been used for + * the longest amount of time. * @return how many bytes were freed */ long evict() { @@ -796,9 +757,8 @@ long evict() { bytesToFree = currentSize - minSize(); if (LOG.isTraceEnabled()) { - LOG.trace("Block cache LRU eviction started; Attempting to free " + - StringUtils.byteDesc(bytesToFree) + " of total=" + - StringUtils.byteDesc(currentSize)); + LOG.trace("Block cache LRU eviction started; Attempting to free " + + StringUtils.byteDesc(bytesToFree) + " of total=" + StringUtils.byteDesc(currentSize)); } if (bytesToFree <= 0) { @@ -806,12 +766,9 @@ long evict() { } // Instantiate priority buckets - BlockBucket bucketSingle - = new BlockBucket("single", bytesToFree, blockSize, singleSize()); - BlockBucket bucketMulti - = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); - BlockBucket bucketMemory - = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); + BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize()); + BlockBucket bucketMulti = new BlockBucket("multi", bytesToFree, blockSize, multiSize()); + BlockBucket bucketMemory = new BlockBucket("memory", bytesToFree, blockSize, memorySize()); // Scan entire map putting into appropriate buckets for (LruCachedBlock cachedBlock : map.values()) { @@ -841,13 +798,13 @@ long evict() { bytesFreed = bucketSingle.free(s); bytesFreed += bucketMulti.free(m); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " from single and multi buckets"); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " from single and multi buckets"); } bytesFreed += bucketMemory.free(bytesToFree - bytesFreed); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " total from all three buckets "); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " total from all three buckets "); } } else { // this means no need to evict block in memory bucket, @@ -884,7 +841,7 @@ long evict() { long overflow = bucket.overflow(); if (overflow > 0) { long bucketBytesToFree = - Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); + Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets); bytesFreed += bucket.free(bucketBytesToFree); } remainingBuckets--; @@ -894,12 +851,11 @@ long evict() { long single = bucketSingle.totalSize(); long multi = bucketMulti.totalSize(); long memory = bucketMemory.totalSize(); - LOG.trace("Block cache LRU eviction completed; " + - "freed=" + StringUtils.byteDesc(bytesFreed) + ", " + - "total=" + StringUtils.byteDesc(this.size.get()) + ", " + - "single=" + StringUtils.byteDesc(single) + ", " + - "multi=" + StringUtils.byteDesc(multi) + ", " + - "memory=" + StringUtils.byteDesc(memory)); + LOG.trace( + "Block cache LRU eviction completed; " + "freed=" + StringUtils.byteDesc(bytesFreed) + + ", " + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + "single=" + + StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", " + + "memory=" + StringUtils.byteDesc(memory)); } } finally { stats.evict(); @@ -911,26 +867,21 @@ long evict() { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("blockCount", getBlockCount()) - .add("currentSize", StringUtils.byteDesc(getCurrentSize())) - .add("freeSize", StringUtils.byteDesc(getFreeSize())) - .add("maxSize", StringUtils.byteDesc(getMaxSize())) - .add("heapSize", StringUtils.byteDesc(heapSize())) - .add("minSize", StringUtils.byteDesc(minSize())) - .add("minFactor", minFactor) - .add("multiSize", StringUtils.byteDesc(multiSize())) - .add("multiFactor", multiFactor) - .add("singleSize", StringUtils.byteDesc(singleSize())) - .add("singleFactor", singleFactor) - .toString(); + return MoreObjects.toStringHelper(this).add("blockCount", getBlockCount()) + .add("currentSize", StringUtils.byteDesc(getCurrentSize())) + .add("freeSize", StringUtils.byteDesc(getFreeSize())) + .add("maxSize", StringUtils.byteDesc(getMaxSize())) + .add("heapSize", StringUtils.byteDesc(heapSize())) + .add("minSize", StringUtils.byteDesc(minSize())).add("minFactor", minFactor) + .add("multiSize", StringUtils.byteDesc(multiSize())).add("multiFactor", multiFactor) + .add("singleSize", StringUtils.byteDesc(singleSize())).add("singleFactor", singleFactor) + .toString(); } /** - * Used to group blocks into priority buckets. There will be a BlockBucket - * for each priority (single, multi, memory). Once bucketed, the eviction - * algorithm takes the appropriate number of elements out of each according - * to configuration parameters and their relatives sizes. + * Used to group blocks into priority buckets. There will be a BlockBucket for each priority + * (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate number of + * elements out of each according to configuration parameters and their relatives sizes. */ private class BlockBucket implements Comparable { @@ -987,7 +938,7 @@ public boolean equals(Object that) { if (!(that instanceof BlockBucket)) { return false; } - return compareTo((BlockBucket)that) == 0; + return compareTo((BlockBucket) that) == 0; } @Override @@ -997,17 +948,14 @@ public int hashCode() { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name) - .add("totalSize", StringUtils.byteDesc(totalSize)) - .add("bucketSize", StringUtils.byteDesc(bucketSize)) - .toString(); + return MoreObjects.toStringHelper(this).add("name", name) + .add("totalSize", StringUtils.byteDesc(totalSize)) + .add("bucketSize", StringUtils.byteDesc(bucketSize)).toString(); } } /** * Get the maximum size of this cache. - * * @return max size in bytes */ @@ -1051,10 +999,9 @@ EvictionThread getEvictionThread() { } /* - * Eviction thread. Sits in waiting state until an eviction is triggered - * when the cache size grows above the acceptable level.

          - * - * Thread is triggered into action by {@link LruAdaptiveBlockCache#runEviction()} + * Eviction thread. Sits in waiting state until an eviction is triggered when the cache size grows + * above the acceptable level.

          Thread is triggered into action by {@link + * LruAdaptiveBlockCache#runEviction()} */ static class EvictionThread extends Thread { @@ -1079,7 +1026,7 @@ public void run() { while (this.go) { synchronized (this) { try { - this.wait(1000 * 10/*Don't wait for ever*/); + this.wait(1000 * 10/* Don't wait for ever */); } catch (InterruptedException e) { LOG.warn("Interrupted eviction thread ", e); Thread.currentThread().interrupt(); @@ -1089,18 +1036,15 @@ public void run() { if (cache == null) { break; } - freedSumMb += cache.evict()/1024/1024; + freedSumMb += cache.evict() / 1024 / 1024; /* - * Sometimes we are reading more data than can fit into BlockCache - * and it is the cause a high rate of evictions. - * This in turn leads to heavy Garbage Collector works. - * So a lot of blocks put into BlockCache but never read, - * but spending a lot of CPU resources. - * Here we will analyze how many bytes were freed and decide - * decide whether the time has come to reduce amount of caching blocks. - * It help avoid put too many blocks into BlockCache - * when evict() works very active and save CPU for other jobs. - * More delails: https://issues.apache.org/jira/browse/HBASE-23887 + * Sometimes we are reading more data than can fit into BlockCache and it is the cause a + * high rate of evictions. This in turn leads to heavy Garbage Collector works. So a lot of + * blocks put into BlockCache but never read, but spending a lot of CPU resources. Here we + * will analyze how many bytes were freed and decide decide whether the time has come to + * reduce amount of caching blocks. It help avoid put too many blocks into BlockCache when + * evict() works very active and save CPU for other jobs. More delails: + * https://issues.apache.org/jira/browse/HBASE-23887 */ // First of all we have to control how much time @@ -1116,7 +1060,7 @@ public void run() { // We will use this information to decide, // how to change percent of caching blocks. freedDataOverheadPercent = - (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; + (int) (freedSumMb * 100 / cache.heavyEvictionMbSizeLimit) - 100; if (freedSumMb > cache.heavyEvictionMbSizeLimit) { // Now we are in the situation when we are above the limit // But maybe we are going to ignore it because it will end quite soon @@ -1136,7 +1080,7 @@ public void run() { // But when reading is changing we can adjust to it and set // the coefficient to lower value. int change = - (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); + (int) (freedDataOverheadPercent * cache.heavyEvictionOverheadCoefficient); // But practice shows that 15% of reducing is quite enough. // We are not greedy (it could lead to premature exit). change = Math.min(15, change); @@ -1165,11 +1109,10 @@ public void run() { cache.cacheDataBlockPercent = 100; } } - LOG.info("BlockCache evicted (MB): {}, overhead (%): {}, " + - "heavy eviction counter: {}, " + - "current caching DataBlock (%): {}", - freedSumMb, freedDataOverheadPercent, - heavyEvictionCount, cache.cacheDataBlockPercent); + LOG.info( + "BlockCache evicted (MB): {}, overhead (%): {}, " + "heavy eviction counter: {}, " + + "current caching DataBlock (%): {}", + freedSumMb, freedDataOverheadPercent, heavyEvictionCount, cache.cacheDataBlockPercent); freedSumMb = 0; startTime = stopTime; @@ -1177,8 +1120,8 @@ public void run() { } } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", - justification="This is what we want") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", + justification = "This is what we want") public void evict() { synchronized (this) { this.notifyAll(); @@ -1199,7 +1142,7 @@ boolean isEnteringRun() { } /* - * Statistics thread. Periodically prints the cache statistics to the log. + * Statistics thread. Periodically prints the cache statistics to the log. */ static class StatisticsThread extends Thread { @@ -1221,28 +1164,25 @@ public void logStats() { // Log size long totalSize = heapSize(); long freeSize = maxSize - totalSize; - LruAdaptiveBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + - "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + - "max=" + StringUtils.byteDesc(this.maxSize) + ", " + - "blockCount=" + getBlockCount() + ", " + - "accesses=" + stats.getRequestCount() + ", " + - "hits=" + stats.getHitCount() + ", " + - "hitRatio=" + (stats.getHitCount() == 0 ? - "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + - "cachingAccesses=" + stats.getRequestCachingCount() + ", " + - "cachingHits=" + stats.getHitCachingCount() + ", " + - "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? - "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + - "evictions=" + stats.getEvictionCount() + ", " + - "evicted=" + stats.getEvictedCount() + ", " + - "evictedPerRun=" + stats.evictedPerEviction()); + LruAdaptiveBlockCache.LOG + .info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" + + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(this.maxSize) + + ", " + "blockCount=" + getBlockCount() + ", " + "accesses=" + stats.getRequestCount() + + ", " + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + + (stats.getHitCount() == 0 ? "0" + : (StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ")) + + ", " + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (stats.getHitCachingCount() == 0 ? "0," + : (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + "evicted=" + stats.getEvictedCount() + + ", " + "evictedPerRun=" + stats.evictedPerEviction()); } /** * Get counter statistics for this cache. - * - *

          Includes: total accesses, hits, misses, evicted blocks, and runs - * of the eviction processes. + *

          + * Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes. */ @Override public CacheStats getStats() { @@ -1250,7 +1190,7 @@ public CacheStats getStats() { } public final static long CACHE_FIXED_OVERHEAD = - ClassSize.estimateBase(LruAdaptiveBlockCache.class, false); + ClassSize.estimateBase(LruAdaptiveBlockCache.class, false); @Override public long heapSize() { @@ -1260,8 +1200,8 @@ public long heapSize() { private static long calculateOverhead(long maxSize, long blockSize, int concurrency) { // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP - + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) - + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); + + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) + + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); } @Override @@ -1339,7 +1279,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { if (obj instanceof CachedBlock) { - CachedBlock cb = (CachedBlock)obj; + CachedBlock cb = (CachedBlock) obj; return compareTo(cb) == 0; } else { return false; @@ -1358,17 +1298,21 @@ public void remove() { // Simple calculators of sizes given factors and maxSize long acceptableSize() { - return (long)Math.floor(this.maxSize * this.acceptableFactor); + return (long) Math.floor(this.maxSize * this.acceptableFactor); } + private long minSize() { - return (long)Math.floor(this.maxSize * this.minFactor); + return (long) Math.floor(this.maxSize * this.minFactor); } + private long singleSize() { - return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.singleFactor * this.minFactor); } + private long multiSize() { - return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.multiFactor * this.minFactor); } + private long memorySize() { return (long) Math.floor(this.maxSize * this.memoryFactor * this.minFactor); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 3e5ba1d19c56..bd8d948e58d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -114,14 +114,14 @@ public class LruBlockCache implements FirstLevelBlockCache { "hbase.lru.blockcache.memory.percentage"; /** - * Configuration key to force data-block always (except in-memory are too much) - * cached in memory for in-memory hfile, unlike inMemory, which is a column-family - * configuration, inMemoryForceMode is a cluster-wide configuration + * Configuration key to force data-block always (except in-memory are too much) cached in memory + * for in-memory hfile, unlike inMemory, which is a column-family configuration, inMemoryForceMode + * is a cluster-wide configuration */ private static final String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME = "hbase.lru.rs.inmemoryforcemode"; - /* Default Configuration Parameters*/ + /* Default Configuration Parameters */ /* Backing Concurrent Map Configuration */ static final float DEFAULT_LOAD_FACTOR = 0.75f; @@ -167,8 +167,8 @@ public class LruBlockCache implements FirstLevelBlockCache { /** Statistics thread schedule pool (for heavy debugging, could remove) */ private transient final ScheduledExecutorService scheduleThreadPool = - Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() - .setNameFormat("LruBlockCacheStatsExecutor").setDaemon(true).build()); + Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() + .setNameFormat("LruBlockCacheStatsExecutor").setDaemon(true).build()); /** Current size of cache */ private final AtomicLong size; @@ -220,19 +220,16 @@ public class LruBlockCache implements FirstLevelBlockCache { /** * Where to send victims (blocks evicted/missing from the cache). This is used only when we use an - * external cache as L2. - * Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache + * external cache as L2. Note: See org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache */ private transient BlockCache victimHandler = null; /** - * Default constructor. Specify maximum size and expected average block - * size (approximation is fine). - * - *

          All other factors will be calculated based on defaults specified in - * this class. - * - * @param maxSize maximum size of cache, in bytes + * Default constructor. Specify maximum size and expected average block size (approximation is + * fine). + *

          + * All other factors will be calculated based on defaults specified in this class. + * @param maxSize maximum size of cache, in bytes * @param blockSize approximate size of each block, in bytes */ public LruBlockCache(long maxSize, long blockSize) { @@ -240,33 +237,25 @@ public LruBlockCache(long maxSize, long blockSize) { } /** - * Constructor used for testing. Allows disabling of the eviction thread. + * Constructor used for testing. Allows disabling of the eviction thread. */ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, - DEFAULT_MIN_FACTOR, DEFAULT_ACCEPTABLE_FACTOR, - DEFAULT_SINGLE_FACTOR, - DEFAULT_MULTI_FACTOR, - DEFAULT_MEMORY_FACTOR, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, - false, - DEFAULT_MAX_BLOCK_SIZE); + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, DEFAULT_MIN_FACTOR, + DEFAULT_ACCEPTABLE_FACTOR, DEFAULT_SINGLE_FACTOR, DEFAULT_MULTI_FACTOR, + DEFAULT_MEMORY_FACTOR, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR, false, DEFAULT_MAX_BLOCK_SIZE); } public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { - this(maxSize, blockSize, evictionThread, - (int) Math.ceil(1.2 * maxSize / blockSize), - DEFAULT_LOAD_FACTOR, - DEFAULT_CONCURRENCY_LEVEL, + this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, - DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), + DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)); } @@ -276,30 +265,28 @@ public LruBlockCache(long maxSize, long blockSize, Configuration conf) { } /** - * Configurable constructor. Use this constructor if not using defaults. - * - * @param maxSize maximum size of this cache, in bytes - * @param blockSize expected average size of blocks, in bytes - * @param evictionThread whether to run evictions in a bg thread or not - * @param mapInitialSize initial size of backing ConcurrentHashMap - * @param mapLoadFactor initial load factor of backing ConcurrentHashMap + * Configurable constructor. Use this constructor if not using defaults. + * @param maxSize maximum size of this cache, in bytes + * @param blockSize expected average size of blocks, in bytes + * @param evictionThread whether to run evictions in a bg thread or not + * @param mapInitialSize initial size of backing ConcurrentHashMap + * @param mapLoadFactor initial load factor of backing ConcurrentHashMap * @param mapConcurrencyLevel initial concurrency factor for backing CHM - * @param minFactor percentage of total size that eviction will evict until - * @param acceptableFactor percentage of total size that triggers eviction - * @param singleFactor percentage of total size for single-access blocks - * @param multiFactor percentage of total size for multiple-access blocks - * @param memoryFactor percentage of total size for in-memory blocks + * @param minFactor percentage of total size that eviction will evict until + * @param acceptableFactor percentage of total size that triggers eviction + * @param singleFactor percentage of total size for single-access blocks + * @param multiFactor percentage of total size for multiple-access blocks + * @param memoryFactor percentage of total size for in-memory blocks */ - public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, - int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, - float minFactor, float acceptableFactor, float singleFactor, - float multiFactor, float memoryFactor, float hardLimitFactor, + public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, + float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, + float singleFactor, float multiFactor, float memoryFactor, float hardLimitFactor, boolean forceInMemory, long maxBlockSize) { this.maxBlockSize = maxBlockSize; - if(singleFactor + multiFactor + memoryFactor != 1 || - singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { - throw new IllegalArgumentException("Single, multi, and memory factors " + - " should be non-negative and total 1.0"); + if (singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 + || memoryFactor < 0) { + throw new IllegalArgumentException( + "Single, multi, and memory factors " + " should be non-negative and total 1.0"); } if (minFactor >= acceptableFactor) { throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); @@ -330,10 +317,10 @@ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, } else { this.evictionThread = null; } - // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), STAT_THREAD_PERIOD, - STAT_THREAD_PERIOD, TimeUnit.SECONDS); + STAT_THREAD_PERIOD, TimeUnit.SECONDS); } @Override @@ -385,9 +372,8 @@ private Cacheable asReferencedHeapBlock(Cacheable buf) { *

          * It is assumed this will NOT be called on an already cached block. In rare cases (HBASE-8547) * this can happen, for which we compare the buffer contents. - * * @param cacheKey block's cache key - * @param buf block buffer + * @param buf block buffer * @param inMemory if block is in-memory */ @Override @@ -397,11 +383,9 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) // big this can make the logs way too noisy. // So we log 2% if (stats.failInsert() % 50 == 0) { - LOG.warn("Trying to cache too large a block " - + cacheKey.getHfileName() + " @ " - + cacheKey.getOffset() - + " is " + buf.heapSize() - + " which is larger than " + maxBlockSize); + LOG.warn("Trying to cache too large a block " + cacheKey.getHfileName() + " @ " + + cacheKey.getOffset() + " is " + buf.heapSize() + " which is larger than " + + maxBlockSize); } return; } @@ -417,9 +401,9 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) stats.failInsert(); if (LOG.isTraceEnabled()) { LOG.trace("LruBlockCache current size " + StringUtils.byteDesc(currentSize) - + " has exceeded acceptable size " + StringUtils.byteDesc(currentAcceptableSize) + "." - + " The hard limit size is " + StringUtils.byteDesc(hardLimitSize) - + ", failed to put cacheKey:" + cacheKey + " into LruBlockCache."); + + " has exceeded acceptable size " + StringUtils.byteDesc(currentAcceptableSize) + "." + + " The hard limit size is " + StringUtils.byteDesc(hardLimitSize) + + ", failed to put cacheKey:" + cacheKey + " into LruBlockCache."); } if (!evictionInProgress) { runEviction(); @@ -445,20 +429,20 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) } /** - * Sanity-checking for parity between actual block cache content and metrics. - * Intended only for use with TRACE level logging and -ea JVM. + * Sanity-checking for parity between actual block cache content and metrics. Intended only for + * use with TRACE level logging and -ea JVM. */ private static void assertCounterSanity(long mapSize, long counterVal) { if (counterVal < 0) { - LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + + ", mapSize=" + mapSize); return; } if (mapSize < Integer.MAX_VALUE) { double pct_diff = Math.abs((((double) counterVal) / ((double) mapSize)) - 1.); if (pct_diff > 0.05) { - LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + - ", mapSize=" + mapSize); + LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + + ", mapSize=" + mapSize); } } } @@ -479,9 +463,8 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { } /** - * Helper function that updates the local size counter and also updates any - * per-cf or per-blocktype metrics it can discern from given - * {@link LruCachedBlock} + * Helper function that updates the local size counter and also updates any per-cf or + * per-blocktype metrics it can discern from given {@link LruCachedBlock} */ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); @@ -497,14 +480,11 @@ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { /** * Get the buffer of the block with the specified name. - * - * @param cacheKey block's cache key - * @param caching true if the caller caches blocks on cache misses - * @param repeat Whether this is a repeat lookup for the same block - * (used to avoid double counting cache misses when doing double-check - * locking) + * @param cacheKey block's cache key + * @param caching true if the caller caches blocks on cache misses + * @param repeat Whether this is a repeat lookup for the same block (used to avoid double counting + * cache misses when doing double-check locking) * @param updateCacheMetrics Whether to update cache metrics or not - * * @return buffer of specified cache key, or null if not in cache */ @Override @@ -547,7 +527,6 @@ public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repea /** * Whether the cache contains block with specified cacheKey - * * @return true if contains the block */ @Override @@ -562,13 +541,11 @@ public boolean evictBlock(BlockCacheKey cacheKey) { } /** - * Evicts all blocks for a specific HFile. This is an - * expensive operation implemented as a linear-time search through all blocks - * in the cache. Ideally this should be a search in a log-access-time map. - * + * Evicts all blocks for a specific HFile. This is an expensive operation implemented as a + * linear-time search through all blocks in the cache. Ideally this should be a search in a + * log-access-time map. *

          * This is used for evict-on-close to remove all blocks of a specific HFile. - * * @return the number of blocks evicted */ @Override @@ -588,11 +565,9 @@ public int evictBlocksByHfileName(String hfileName) { } /** - * Evict the block, and it will be cached by the victim handler if exists && - * block may be read again later - * - * @param evictedByEvictionProcess true if the given block is evicted by - * EvictionThread + * Evict the block, and it will be cached by the victim handler if exists && block may be + * read again later + * @param evictedByEvictionProcess true if the given block is evicted by EvictionThread * @return the heap size of evicted block */ protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) { @@ -659,9 +634,8 @@ void evict() { long bytesToFree = currentSize - minSize(); if (LOG.isTraceEnabled()) { - LOG.trace("Block cache LRU eviction started; Attempting to free " + - StringUtils.byteDesc(bytesToFree) + " of total=" + - StringUtils.byteDesc(currentSize)); + LOG.trace("Block cache LRU eviction started; Attempting to free " + + StringUtils.byteDesc(bytesToFree) + " of total=" + StringUtils.byteDesc(currentSize)); } if (bytesToFree <= 0) { @@ -701,13 +675,13 @@ void evict() { bytesFreed = bucketSingle.free(s); bytesFreed += bucketMulti.free(m); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " from single and multi buckets"); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " from single and multi buckets"); } bytesFreed += bucketMemory.free(bytesToFree - bytesFreed); if (LOG.isTraceEnabled()) { - LOG.trace("freed " + StringUtils.byteDesc(bytesFreed) + - " total from all three buckets "); + LOG.trace( + "freed " + StringUtils.byteDesc(bytesFreed) + " total from all three buckets "); } } else { // this means no need to evict block in memory bucket, @@ -754,12 +728,11 @@ void evict() { long single = bucketSingle.totalSize(); long multi = bucketMulti.totalSize(); long memory = bucketMemory.totalSize(); - LOG.trace("Block cache LRU eviction completed; " + - "freed=" + StringUtils.byteDesc(bytesFreed) + ", " + - "total=" + StringUtils.byteDesc(this.size.get()) + ", " + - "single=" + StringUtils.byteDesc(single) + ", " + - "multi=" + StringUtils.byteDesc(multi) + ", " + - "memory=" + StringUtils.byteDesc(memory)); + LOG.trace( + "Block cache LRU eviction completed; " + "freed=" + StringUtils.byteDesc(bytesFreed) + + ", " + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + "single=" + + StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", " + + "memory=" + StringUtils.byteDesc(memory)); } } finally { stats.evict(); @@ -770,26 +743,21 @@ void evict() { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("blockCount", getBlockCount()) - .add("currentSize", StringUtils.byteDesc(getCurrentSize())) - .add("freeSize", StringUtils.byteDesc(getFreeSize())) - .add("maxSize", StringUtils.byteDesc(getMaxSize())) - .add("heapSize", StringUtils.byteDesc(heapSize())) - .add("minSize", StringUtils.byteDesc(minSize())) - .add("minFactor", minFactor) - .add("multiSize", StringUtils.byteDesc(multiSize())) - .add("multiFactor", multiFactor) - .add("singleSize", StringUtils.byteDesc(singleSize())) - .add("singleFactor", singleFactor) - .toString(); + return MoreObjects.toStringHelper(this).add("blockCount", getBlockCount()) + .add("currentSize", StringUtils.byteDesc(getCurrentSize())) + .add("freeSize", StringUtils.byteDesc(getFreeSize())) + .add("maxSize", StringUtils.byteDesc(getMaxSize())) + .add("heapSize", StringUtils.byteDesc(heapSize())) + .add("minSize", StringUtils.byteDesc(minSize())).add("minFactor", minFactor) + .add("multiSize", StringUtils.byteDesc(multiSize())).add("multiFactor", multiFactor) + .add("singleSize", StringUtils.byteDesc(singleSize())).add("singleFactor", singleFactor) + .toString(); } /** - * Used to group blocks into priority buckets. There will be a BlockBucket - * for each priority (single, multi, memory). Once bucketed, the eviction - * algorithm takes the appropriate number of elements out of each according - * to configuration parameters and their relatives sizes. + * Used to group blocks into priority buckets. There will be a BlockBucket for each priority + * (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate number of + * elements out of each according to configuration parameters and their relatives sizes. */ private class BlockBucket implements Comparable { @@ -846,7 +814,7 @@ public boolean equals(Object that) { if (that == null || !(that instanceof BlockBucket)) { return false; } - return compareTo((BlockBucket)that) == 0; + return compareTo((BlockBucket) that) == 0; } @Override @@ -856,17 +824,14 @@ public int hashCode() { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name) - .add("totalSize", StringUtils.byteDesc(totalSize)) - .add("bucketSize", StringUtils.byteDesc(bucketSize)) - .toString(); + return MoreObjects.toStringHelper(this).add("name", name) + .add("totalSize", StringUtils.byteDesc(totalSize)) + .add("bucketSize", StringUtils.byteDesc(bucketSize)).toString(); } } /** * Get the maximum size of this cache. - * * @return max size in bytes */ @@ -910,10 +875,9 @@ EvictionThread getEvictionThread() { } /* - * Eviction thread. Sits in waiting state until an eviction is triggered - * when the cache size grows above the acceptable level.

          - * - * Thread is triggered into action by {@link LruBlockCache#runEviction()} + * Eviction thread. Sits in waiting state until an eviction is triggered when the cache size grows + * above the acceptable level.

          Thread is triggered into action by {@link + * LruBlockCache#runEviction()} */ static class EvictionThread extends Thread { @@ -934,7 +898,7 @@ public void run() { while (this.go) { synchronized (this) { try { - this.wait(1000 * 10/*Don't wait for ever*/); + this.wait(1000 * 10/* Don't wait for ever */); } catch (InterruptedException e) { LOG.warn("Interrupted eviction thread ", e); Thread.currentThread().interrupt(); @@ -949,8 +913,8 @@ public void run() { } } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NN_NAKED_NOTIFY", - justification="This is what we want") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", + justification = "This is what we want") public void evict() { synchronized (this) { this.notifyAll(); @@ -975,7 +939,7 @@ boolean isEnteringRun() { } /* - * Statistics thread. Periodically prints the cache statistics to the log. + * Statistics thread. Periodically prints the cache statistics to the log. */ static class StatisticsThread extends Thread { @@ -997,28 +961,24 @@ public void logStats() { // Log size long totalSize = heapSize(); long freeSize = maxSize - totalSize; - LruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + - "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + - "max=" + StringUtils.byteDesc(this.maxSize) + ", " + - "blockCount=" + getBlockCount() + ", " + - "accesses=" + stats.getRequestCount() + ", " + - "hits=" + stats.getHitCount() + ", " + - "hitRatio=" + (stats.getHitCount() == 0 ? - "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2)+ ", ")) + ", " + - "cachingAccesses=" + stats.getRequestCachingCount() + ", " + - "cachingHits=" + stats.getHitCachingCount() + ", " + - "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? - "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + - "evictions=" + stats.getEvictionCount() + ", " + - "evicted=" + stats.getEvictedCount() + ", " + - "evictedPerRun=" + stats.evictedPerEviction()); + LruBlockCache.LOG.info("totalSize=" + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" + + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + + "blockCount=" + getBlockCount() + ", " + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + + (stats.getHitCount() == 0 ? "0" + : (StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ")) + + ", " + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (stats.getHitCachingCount() == 0 ? "0," + : (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + "evicted=" + stats.getEvictedCount() + + ", " + "evictedPerRun=" + stats.evictedPerEviction()); } /** * Get counter statistics for this cache. - * - *

          Includes: total accesses, hits, misses, evicted blocks, and runs - * of the eviction processes. + *

          + * Includes: total accesses, hits, misses, evicted blocks, and runs of the eviction processes. */ @Override public CacheStats getStats() { @@ -1036,8 +996,8 @@ public long heapSize() { private static long calculateOverhead(long maxSize, long blockSize, int concurrency) { // FindBugs ICAST_INTEGER_MULTIPLY_CAST_TO_LONG return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP - + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) - + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); + + ((long) Math.ceil(maxSize * 1.2 / blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) + + ((long) concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); } @Override @@ -1115,7 +1075,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { if (obj instanceof CachedBlock) { - CachedBlock cb = (CachedBlock)obj; + CachedBlock cb = (CachedBlock) obj; return compareTo(cb) == 0; } else { return false; @@ -1134,17 +1094,21 @@ public void remove() { // Simple calculators of sizes given factors and maxSize long acceptableSize() { - return (long)Math.floor(this.maxSize * this.acceptableFactor); + return (long) Math.floor(this.maxSize * this.acceptableFactor); } + private long minSize() { - return (long)Math.floor(this.maxSize * this.minFactor); + return (long) Math.floor(this.maxSize * this.minFactor); } + private long singleSize() { - return (long)Math.floor(this.maxSize * this.singleFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.singleFactor * this.minFactor); } + private long multiSize() { - return (long)Math.floor(this.maxSize * this.multiFactor * this.minFactor); + return (long) Math.floor(this.maxSize * this.multiFactor * this.minFactor); } + private long memorySize() { return (long) Math.floor(this.maxSize * this.memoryFactor * this.minFactor); } @@ -1182,7 +1146,6 @@ public void clearCache() { /** * Used in testing. May be very inefficient. - * * @return the set of cached file names */ SortedSet getCachedFileNamesForTest() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java index 32a277d46266..143d2df800f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,25 +17,24 @@ */ package org.apache.hadoop.hbase.io.hfile; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.yetus.audience.InterfaceAudience; /** * Represents an entry in the {@link LruBlockCache}. - * - *

          Makes the block memory-aware with {@link HeapSize} and Comparable - * to sort by access time for the LRU. It also takes care of priority by - * either instantiating as in-memory or handling the transition from single - * to multiple access. + *

          + * Makes the block memory-aware with {@link HeapSize} and Comparable to sort by access time for the + * LRU. It also takes care of priority by either instantiating as in-memory or handling the + * transition from single to multiple access. */ @InterfaceAudience.Private public class LruCachedBlock implements HeapSize, Comparable { - public final static long PER_BLOCK_OVERHEAD = ClassSize.align( - ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (3 * Bytes.SIZEOF_LONG) + - ClassSize.STRING + ClassSize.BYTE_BUFFER); + public final static long PER_BLOCK_OVERHEAD = + ClassSize.align(ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (3 * Bytes.SIZEOF_LONG) + + ClassSize.STRING + ClassSize.BYTE_BUFFER); private final BlockCacheKey cacheKey; private final Cacheable buf; @@ -44,7 +42,7 @@ public class LruCachedBlock implements HeapSize, Comparable { private long size; private BlockPriority priority; /** - * Time this block was cached. Presumes we are created just before we are added to the cache. + * Time this block was cached. Presumes we are created just before we are added to the cache. */ private final long cachedTime = System.nanoTime(); @@ -52,8 +50,7 @@ public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime) { this(cacheKey, buf, accessTime, false); } - public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, - boolean inMemory) { + public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, boolean inMemory) { this.cacheKey = cacheKey; this.buf = buf; this.accessTime = accessTime; @@ -62,9 +59,9 @@ public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, // the base classes. We also include the base class // sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with // their buffer lengths. This variable is used elsewhere in unit tests. - this.size = ClassSize.align(cacheKey.heapSize()) - + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD; - if(inMemory) { + this.size = + ClassSize.align(cacheKey.heapSize()) + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD; + if (inMemory) { this.priority = BlockPriority.MEMORY; } else { this.priority = BlockPriority.SINGLE; @@ -74,11 +71,11 @@ public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, /** * Block has been accessed. * @param accessTime Last access; this is actually a incremented sequence number rather than an - * actual time. + * actual time. */ public void access(long accessTime) { this.accessTime = accessTime; - if(this.priority == BlockPriority.SINGLE) { + if (this.priority == BlockPriority.SINGLE) { this.priority = BlockPriority.MULTI; } } @@ -104,7 +101,7 @@ public int compareTo(LruCachedBlock that) { @Override public int hashCode() { - return (int)(accessTime ^ (accessTime >>> 32)); + return (int) (accessTime ^ (accessTime >>> 32)); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java index e68939191d0e..cdd80e6e1e4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,17 +24,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.MinMaxPriorityQueue; /** - * A memory-bound queue that will grow until an element brings - * total size >= maxSize. From then on, only entries that are sorted larger - * than the smallest current entry will be inserted/replaced. - * - *

          Use this when you want to find the largest elements (according to their - * ordering, not their heap size) that consume as close to the specified - * maxSize as possible. Default behavior is to grow just above rather than - * just below specified max. - * - *

          Object used in this queue must implement {@link HeapSize} as well as - * {@link Comparable}. + * A memory-bound queue that will grow until an element brings total size >= maxSize. From then + * on, only entries that are sorted larger than the smallest current entry will be + * inserted/replaced. + *

          + * Use this when you want to find the largest elements (according to their ordering, not their heap + * size) that consume as close to the specified maxSize as possible. Default behavior is to grow + * just above rather than just below specified max. + *

          + * Object used in this queue must implement {@link HeapSize} as well as {@link Comparable}. */ @InterfaceAudience.Private public class LruCachedBlockQueue implements HeapSize { @@ -63,16 +60,16 @@ public LruCachedBlockQueue(long maxSize, long blockSize) { /** * Attempt to add the specified cached block to this queue. - * - *

          If the queue is smaller than the max size, or if the specified element - * is ordered before the smallest element in the queue, the element will be - * added to the queue. Otherwise, there is no side effect of this call. + *

          + * If the queue is smaller than the max size, or if the specified element is ordered before the + * smallest element in the queue, the element will be added to the queue. Otherwise, there is no + * side effect of this call. * @param cb block to try to add to the queue */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", - justification = "head can not be null as heapSize is greater than maxSize," - + " which means we have something in the queue") + value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", + justification = "head can not be null as heapSize is greater than maxSize," + + " which means we have something in the queue") public void add(LruCachedBlock cb) { if (heapSize < maxSize) { queue.add(cb); @@ -93,16 +90,14 @@ public void add(LruCachedBlock cb) { } /** - * @return The next element in this queue, or {@code null} if the queue is - * empty. + * @return The next element in this queue, or {@code null} if the queue is empty. */ public LruCachedBlock poll() { return queue.poll(); } /** - * @return The last element in this queue, or {@code null} if the queue is - * empty. + * @return The last element in this queue, or {@code null} if the queue is empty. */ public LruCachedBlock pollLast() { return queue.pollLast(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java index c519d9fd8095..fcf87e29e6a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java @@ -1,27 +1,26 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.io.DataOutputStream; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.EncodingState; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; @@ -29,6 +28,7 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext; import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; import org.apache.hadoop.hbase.io.encoding.NoneEncoder; +import org.apache.yetus.audience.InterfaceAudience; /** * Does not perform any kind of encoding/decoding. @@ -36,8 +36,7 @@ @InterfaceAudience.Private public class NoOpDataBlockEncoder implements HFileDataBlockEncoder { - public static final NoOpDataBlockEncoder INSTANCE = - new NoOpDataBlockEncoder(); + public static final NoOpDataBlockEncoder INSTANCE = new NoOpDataBlockEncoder(); private static class NoneEncodingState extends EncodingState { NoneEncoder encoder = null; @@ -48,10 +47,9 @@ private NoOpDataBlockEncoder() { } @Override - public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, - DataOutputStream out) throws IOException { - NoneEncodingState state = (NoneEncodingState) encodingCtx - .getEncodingState(); + public void encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) + throws IOException { + NoneEncodingState state = (NoneEncodingState) encodingCtx.getEncodingState(); NoneEncoder encoder = state.encoder; int size = encoder.write(cell); state.postCellEncode(size, size); @@ -75,7 +73,7 @@ public DataBlockEncoding getDataBlockEncoding() { public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) { return DataBlockEncoding.NONE; } - + @Override public String toString() { return getClass().getSimpleName(); @@ -94,16 +92,15 @@ public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, } @Override - public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, - DataOutputStream out) throws IOException { + public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOutputStream out) + throws IOException { if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) { throw new IOException(this.getClass().getName() + " only accepts " - + HFileBlockDefaultEncodingContext.class.getName() + " as the " - + "encoding context."); + + HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context."); } HFileBlockDefaultEncodingContext encodingCtx = - (HFileBlockDefaultEncodingContext) blkEncodingCtx; + (HFileBlockDefaultEncodingContext) blkEncodingCtx; encodingCtx.prepareEncoding(out); NoneEncoder encoder = new NoneEncoder(out, encodingCtx); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java index 80de44915f2e..cd042759f59c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +42,7 @@ public final class PrefetchExecutor { private static final Logger LOG = LoggerFactory.getLogger(PrefetchExecutor.class); /** Futures for tracking block prefetch activity */ - private static final Map> prefetchFutures = new ConcurrentSkipListMap<>(); + private static final Map> prefetchFutures = new ConcurrentSkipListMap<>(); /** Executor pool shared among all HFiles for block prefetch */ private static final ScheduledExecutorService prefetchExecutorPool; /** Delay before beginning prefetch */ @@ -59,40 +58,32 @@ public final class PrefetchExecutor { prefetchDelayMillis = conf.getInt("hbase.hfile.prefetch.delay", 1000); prefetchDelayVariation = conf.getFloat("hbase.hfile.prefetch.delay.variation", 0.2f); int prefetchThreads = conf.getInt("hbase.hfile.thread.prefetch", 4); - prefetchExecutorPool = new ScheduledThreadPoolExecutor(prefetchThreads, - new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { - String name = "hfile-prefetch-" + EnvironmentEdgeManager.currentTime(); - Thread t = new Thread(r, name); - t.setDaemon(true); - return t; - } + prefetchExecutorPool = new ScheduledThreadPoolExecutor(prefetchThreads, new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + String name = "hfile-prefetch-" + EnvironmentEdgeManager.currentTime(); + Thread t = new Thread(r, name); + t.setDaemon(true); + return t; + } }); } // TODO: We want HFile, which is where the blockcache lives, to handle // prefetching of file blocks but the Store level is where path convention // knowledge should be contained - private static final Pattern prefetchPathExclude = - Pattern.compile( - "(" + - Path.SEPARATOR_CHAR + - HConstants.HBASE_TEMP_DIRECTORY.replace(".", "\\.") + - Path.SEPARATOR_CHAR + - ")|(" + - Path.SEPARATOR_CHAR + - HConstants.HREGION_COMPACTIONDIR_NAME.replace(".", "\\.") + - Path.SEPARATOR_CHAR + - ")"); + private static final Pattern prefetchPathExclude = Pattern + .compile("(" + Path.SEPARATOR_CHAR + HConstants.HBASE_TEMP_DIRECTORY.replace(".", "\\.") + + Path.SEPARATOR_CHAR + ")|(" + Path.SEPARATOR_CHAR + + HConstants.HREGION_COMPACTIONDIR_NAME.replace(".", "\\.") + Path.SEPARATOR_CHAR + ")"); public static void request(Path path, Runnable runnable) { if (!prefetchPathExclude.matcher(path.toString()).find()) { long delay; if (prefetchDelayMillis > 0) { - delay = (long)((prefetchDelayMillis * (1.0f - (prefetchDelayVariation/2))) + - (prefetchDelayMillis * (prefetchDelayVariation/2) * - ThreadLocalRandom.current().nextFloat())); + delay = (long) ((prefetchDelayMillis * (1.0f - (prefetchDelayVariation / 2))) + + (prefetchDelayMillis * (prefetchDelayVariation / 2) + * ThreadLocalRandom.current().nextFloat())); } else { delay = 0; } @@ -100,8 +91,8 @@ public static void request(Path path, Runnable runnable) { if (LOG.isDebugEnabled()) { LOG.debug("Prefetch requested for " + path + ", delay=" + delay + " ms"); } - prefetchFutures.put(path, prefetchExecutorPool.schedule(runnable, delay, - TimeUnit.MILLISECONDS)); + prefetchFutures.put(path, + prefetchExecutorPool.schedule(runnable, delay, TimeUnit.MILLISECONDS)); } catch (RejectedExecutionException e) { prefetchFutures.remove(path); LOG.warn("Prefetch request rejected for " + path); @@ -136,5 +127,6 @@ public static boolean isCompleted(Path path) { return true; } - private PrefetchExecutor() {} + private PrefetchExecutor() { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java index bd3d63dab0c6..7cd9227fb74b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,9 +29,9 @@ public class ReaderContext { @InterfaceAudience.Private public enum ReaderType { - PREAD, - STREAM + PREAD, STREAM } + private final Path filePath; private final FSDataInputStreamWrapper fsdis; private final long fileSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java index 1f903cfbea64..89872f731c37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +19,7 @@ import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull; + import java.io.IOException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -40,7 +40,8 @@ public class ReaderContextBuilder { private boolean primaryReplicaReader = true; private ReaderType type = ReaderType.PREAD; - public ReaderContextBuilder() {} + public ReaderContextBuilder() { + } public ReaderContextBuilder withFilePath(Path filePath) { this.filePath = filePath; @@ -83,9 +84,7 @@ public ReaderContextBuilder withReaderType(ReaderType type) { public ReaderContextBuilder withFileSystemAndPath(FileSystem fs, Path filePath) throws IOException { - this.withFileSystem(fs) - .withFilePath(filePath) - .withFileSize(fs.getFileStatus(filePath).getLen()) + this.withFileSystem(fs).withFilePath(filePath).withFileSize(fs.getFileStatus(filePath).getLen()) .withInputStreamWrapper(new FSDataInputStreamWrapper(fs, filePath)); return this; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java index 76158b010694..f093073319e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ResizableBlockCache.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java index 0d2217e1579f..a9ccc699009a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/SharedMemHFileBlock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java index e5e2e8fb6320..13b04eea900c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,38 +19,36 @@ import static java.util.Objects.requireNonNull; +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.Policy.Eviction; +import com.github.benmanes.caffeine.cache.RemovalCause; +import com.github.benmanes.caffeine.cache.RemovalListener; import java.util.Comparator; import java.util.Iterator; import java.util.concurrent.Executor; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; - -import com.github.benmanes.caffeine.cache.Cache; -import com.github.benmanes.caffeine.cache.Caffeine; -import com.github.benmanes.caffeine.cache.Policy.Eviction; -import com.github.benmanes.caffeine.cache.RemovalCause; -import com.github.benmanes.caffeine.cache.RemovalListener; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.util.StringUtils; -import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * A block cache that is memory-aware using {@link HeapSize}, memory bounded using the W-TinyLFU * eviction algorithm, and concurrent. This implementation delegates to a Caffeine cache to provide * O(1) read and write operations. *

            - *
          • W-TinyLFU: http://arxiv.org/pdf/1512.00727.pdf
          • - *
          • Caffeine: https://github.com/ben-manes/caffeine
          • - *
          • Cache design: http://highscalability.com/blog/2016/1/25/design-of-a-modern-cache.html
          • + *
          • W-TinyLFU: http://arxiv.org/pdf/1512.00727.pdf
          • + *
          • Caffeine: https://github.com/ben-manes/caffeine
          • + *
          • Cache design: http://highscalability.com/blog/2016/1/25/design-of-a-modern-cache.html
          • *
          */ @InterfaceAudience.Private @@ -72,44 +70,39 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { /** * Creates a block cache. - * * @param maximumSizeInBytes maximum size of this cache, in bytes * @param avgBlockSize expected average size of blocks, in bytes * @param executor the cache's executor * @param conf additional configuration */ - public TinyLfuBlockCache(long maximumSizeInBytes, long avgBlockSize, - Executor executor, Configuration conf) { - this(maximumSizeInBytes, avgBlockSize, - conf.getLong(MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), executor); + public TinyLfuBlockCache(long maximumSizeInBytes, long avgBlockSize, Executor executor, + Configuration conf) { + this(maximumSizeInBytes, avgBlockSize, conf.getLong(MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), + executor); } /** * Creates a block cache. - * * @param maximumSizeInBytes maximum size of this cache, in bytes * @param avgBlockSize expected average size of blocks, in bytes * @param maxBlockSize maximum size of a block, in bytes * @param executor the cache's executor */ - public TinyLfuBlockCache(long maximumSizeInBytes, - long avgBlockSize, long maxBlockSize, Executor executor) { - this.cache = Caffeine.newBuilder() - .executor(executor) - .maximumWeight(maximumSizeInBytes) + public TinyLfuBlockCache(long maximumSizeInBytes, long avgBlockSize, long maxBlockSize, + Executor executor) { + this.cache = Caffeine.newBuilder().executor(executor).maximumWeight(maximumSizeInBytes) .removalListener(new EvictionListener()) - .weigher((BlockCacheKey key, Cacheable value) -> - (int) Math.min(value.heapSize(), Integer.MAX_VALUE)) - .initialCapacity((int) Math.ceil((1.2 * maximumSizeInBytes) / avgBlockSize)) - .build(); + .weigher((BlockCacheKey key, + Cacheable value) -> (int) Math.min(value.heapSize(), Integer.MAX_VALUE)) + .initialCapacity((int) Math.ceil((1.2 * maximumSizeInBytes) / avgBlockSize)).build(); this.maxBlockSize = maxBlockSize; this.policy = cache.policy().eviction().get(); this.stats = new CacheStats(getClass().getSimpleName()); statsThreadPool = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder() .setNameFormat("TinyLfuBlockCacheStatsExecutor").setDaemon(true).build()); - statsThreadPool.scheduleAtFixedRate(this::logStats, - STAT_THREAD_PERIOD_SECONDS, STAT_THREAD_PERIOD_SECONDS, TimeUnit.SECONDS); + statsThreadPool.scheduleAtFixedRate(this::logStats, STAT_THREAD_PERIOD_SECONDS, + STAT_THREAD_PERIOD_SECONDS, TimeUnit.SECONDS); } @Override @@ -156,8 +149,8 @@ public boolean containsBlock(BlockCacheKey cacheKey) { } @Override - public Cacheable getBlock(BlockCacheKey cacheKey, - boolean caching, boolean repeat, boolean updateCacheMetrics) { + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, + boolean updateCacheMetrics) { Cacheable value = cache.asMap().computeIfPresent(cacheKey, (blockCacheKey, cacheable) -> { // It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside // this block. because if retain outside the map#computeIfPresent, the evictBlock may remove @@ -214,9 +207,8 @@ public void cacheBlock(BlockCacheKey key, Cacheable value) { * the block (HBASE-22127):
          * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
          * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's - * reservoir, if both RPC and TinyLfuBlockCache release the block, then it can be - * garbage collected by JVM, so need a retain here. - * + * reservoir, if both RPC and TinyLfuBlockCache release the block, then it can be garbage + * collected by JVM, so need a retain here. * @param buf the original block * @return an block with an heap memory backend. */ @@ -281,33 +273,24 @@ public Iterator iterator() { } private void logStats() { - LOG.info( - "totalSize=" + StringUtils.byteDesc(heapSize()) + ", " + - "freeSize=" + StringUtils.byteDesc(getFreeSize()) + ", " + - "max=" + StringUtils.byteDesc(size()) + ", " + - "blockCount=" + getBlockCount() + ", " + - "accesses=" + stats.getRequestCount() + ", " + - "hits=" + stats.getHitCount() + ", " + - "hitRatio=" + (stats.getHitCount() == 0 ? - "0," : StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ") + - "cachingAccesses=" + stats.getRequestCachingCount() + ", " + - "cachingHits=" + stats.getHitCachingCount() + ", " + - "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? - "0,": (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + - "evictions=" + stats.getEvictionCount() + ", " + - "evicted=" + stats.getEvictedCount()); + LOG.info("totalSize=" + StringUtils.byteDesc(heapSize()) + ", " + "freeSize=" + + StringUtils.byteDesc(getFreeSize()) + ", " + "max=" + StringUtils.byteDesc(size()) + ", " + + "blockCount=" + getBlockCount() + ", " + "accesses=" + stats.getRequestCount() + ", " + + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + + (stats.getHitCount() == 0 ? "0," + : StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ") + + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (stats.getHitCachingCount() == 0 ? "0," + : (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + stats.getEvictionCount() + ", " + "evicted=" + stats.getEvictedCount()); } @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("blockCount", getBlockCount()) - .add("currentSize", getCurrentSize()) - .add("freeSize", getFreeSize()) - .add("maxSize", size()) - .add("heapSize", heapSize()) - .add("victimCache", (victimCache != null)) - .toString(); + return MoreObjects.toStringHelper(this).add("blockCount", getBlockCount()) + .add("currentSize", getCurrentSize()).add("freeSize", getFreeSize()).add("maxSize", size()) + .add("heapSize", heapSize()).add("victimCache", (victimCache != null)).toString(); } /** A removal listener to asynchronously record evictions and populate the victim cache. */ @@ -335,10 +318,10 @@ public void onRemoval(BlockCacheKey key, Cacheable value, RemovalCause cause) { } /** - * Records an eviction. The number of eviction operations and evicted blocks are identical, as - * an eviction is triggered immediately when the capacity has been exceeded. An eviction is - * performed asynchronously. See the library's documentation for details on write buffers, - * batching, and maintenance behavior. + * Records an eviction. The number of eviction operations and evicted blocks are identical, as an + * eviction is triggered immediately when the capacity has been exceeded. An eviction is performed + * asynchronously. See the library's documentation for details on write buffers, batching, and + * maintenance behavior. */ private void recordEviction() { // FIXME: Currently does not capture the insertion time @@ -347,10 +330,9 @@ private void recordEviction() { } private static final class CachedBlockView implements CachedBlock { - private static final Comparator COMPARATOR = Comparator - .comparing(CachedBlock::getFilename) - .thenComparing(CachedBlock::getOffset) - .thenComparing(CachedBlock::getCachedTime); + private static final Comparator COMPARATOR = + Comparator.comparing(CachedBlock::getFilename).thenComparing(CachedBlock::getOffset) + .thenComparing(CachedBlock::getCachedTime); private final BlockCacheKey key; private final Cacheable value; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java index bbbce76cf8e8..a9e29616882f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile.bucket; import java.util.Arrays; @@ -114,8 +113,8 @@ public long getBaseOffset() { } /** - * Allocate a block in this bucket, return the offset representing the - * position in physical space + * Allocate a block in this bucket, return the offset representing the position in physical + * space * @return the offset in the IOEngine */ public long allocate() { @@ -130,18 +129,16 @@ public long allocate() { public void addAllocation(long offset) throws BucketAllocatorException { offset -= baseOffset; if (offset < 0 || offset % itemAllocationSize != 0) - throw new BucketAllocatorException( - "Attempt to add allocation for bad offset: " + offset + " base=" - + baseOffset + ", bucket size=" + itemAllocationSize); + throw new BucketAllocatorException("Attempt to add allocation for bad offset: " + offset + + " base=" + baseOffset + ", bucket size=" + itemAllocationSize); int idx = (int) (offset / itemAllocationSize); boolean matchFound = false; for (int i = 0; i < freeCount; ++i) { if (matchFound) freeList[i - 1] = freeList[i]; else if (freeList[i] == idx) matchFound = true; } - if (!matchFound) - throw new BucketAllocatorException("Couldn't find match for index " - + idx + " in free list"); + if (!matchFound) throw new BucketAllocatorException( + "Couldn't find match for index " + idx + " in free list"); ++usedCount; --freeCount; } @@ -260,10 +257,8 @@ public synchronized IndexStatistics statistics() { @Override public String toString() { - return MoreObjects.toStringHelper(this.getClass()) - .add("sizeIndex", sizeIndex) - .add("bucketSize", bucketSizes[sizeIndex]) - .toString(); + return MoreObjects.toStringHelper(this.getClass()).add("sizeIndex", sizeIndex) + .add("bucketSize", bucketSizes[sizeIndex]).toString(); } } @@ -272,20 +267,17 @@ public String toString() { // The real block size in hfile maybe a little larger than the size we configured , // so we need add extra 1024 bytes for fit. // TODO Support the view of block size distribution statistics - private static final int DEFAULT_BUCKET_SIZES[] = { 4 * 1024 + 1024, 8 * 1024 + 1024, - 16 * 1024 + 1024, 32 * 1024 + 1024, 40 * 1024 + 1024, 48 * 1024 + 1024, - 56 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024, - 192 * 1024 + 1024, 256 * 1024 + 1024, 384 * 1024 + 1024, - 512 * 1024 + 1024 }; + private static final int DEFAULT_BUCKET_SIZES[] = + { 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, 32 * 1024 + 1024, 40 * 1024 + 1024, + 48 * 1024 + 1024, 56 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024, + 192 * 1024 + 1024, 256 * 1024 + 1024, 384 * 1024 + 1024, 512 * 1024 + 1024 }; /** - * Round up the given block size to bucket size, and get the corresponding - * BucketSizeInfo + * Round up the given block size to bucket size, and get the corresponding BucketSizeInfo */ public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) { for (int i = 0; i < bucketSizes.length; ++i) - if (blockSize <= bucketSizes[i]) - return bucketSizeInfos[i]; + if (blockSize <= bucketSizes[i]) return bucketSizeInfos[i]; return null; } @@ -303,16 +295,15 @@ public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) { private final long totalSize; private transient long usedSize = 0; - BucketAllocator(long availableSpace, int[] bucketSizes) - throws BucketAllocatorException { + BucketAllocator(long availableSpace, int[] bucketSizes) throws BucketAllocatorException { this.bucketSizes = bucketSizes == null ? DEFAULT_BUCKET_SIZES : bucketSizes; Arrays.sort(this.bucketSizes); this.bigItemSize = Ints.max(this.bucketSizes); this.bucketCapacity = FEWEST_ITEMS_IN_BUCKET * (long) bigItemSize; buckets = new Bucket[(int) (availableSpace / bucketCapacity)]; if (buckets.length < this.bucketSizes.length) - throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length + - "); must have room for at least " + this.bucketSizes.length + " buckets"); + throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length + + "); must have room for at least " + this.bucketSizes.length + " buckets"); bucketSizeInfos = new BucketSizeInfo[this.bucketSizes.length]; for (int i = 0; i < this.bucketSizes.length; ++i) { bucketSizeInfos[i] = new BucketSizeInfo(i); @@ -324,18 +315,17 @@ public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) { } this.totalSize = ((long) buckets.length) * bucketCapacity; if (LOG.isInfoEnabled()) { - LOG.info("Cache totalSize=" + this.totalSize + ", buckets=" + this.buckets.length + - ", bucket capacity=" + this.bucketCapacity + - "=(" + FEWEST_ITEMS_IN_BUCKET + "*" + this.bigItemSize + ")=" + - "(FEWEST_ITEMS_IN_BUCKET*(largest configured bucketcache size))"); + LOG.info("Cache totalSize=" + this.totalSize + ", buckets=" + this.buckets.length + + ", bucket capacity=" + this.bucketCapacity + "=(" + FEWEST_ITEMS_IN_BUCKET + "*" + + this.bigItemSize + ")=" + + "(FEWEST_ITEMS_IN_BUCKET*(largest configured bucketcache size))"); } } /** * Rebuild the allocator's data structures from a persisted map. * @param availableSpace capacity of cache - * @param map A map stores the block key and BucketEntry(block's meta data - * like offset, length) + * @param map A map stores the block key and BucketEntry(block's meta data like offset, length) * @param realCacheSize cached data size statistics for bucket cache * @throws BucketAllocatorException */ @@ -398,12 +388,12 @@ public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) { } if (sizeNotMatchedCount > 0) { - LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be rebuilt because " + - "there is no matching bucket size for these blocks"); + LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be rebuilt because " + + "there is no matching bucket size for these blocks"); } if (insufficientCapacityCount > 0) { LOG.warn("There are " + insufficientCapacityCount + " blocks which can't be rebuilt - " - + "did you shrink the cache?"); + + "did you shrink the cache?"); } } @@ -438,20 +428,19 @@ public long getTotalSize() { * @throws CacheFullException * @return the offset in the IOEngine */ - public synchronized long allocateBlock(int blockSize) throws CacheFullException, - BucketAllocatorException { + public synchronized long allocateBlock(int blockSize) + throws CacheFullException, BucketAllocatorException { assert blockSize > 0; BucketSizeInfo bsi = roundUpToBucketSizeInfo(blockSize); if (bsi == null) { - throw new BucketAllocatorException("Allocation too big size=" + blockSize + - "; adjust BucketCache sizes " + BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY + - " to accomodate if size seems reasonable and you want it cached."); + throw new BucketAllocatorException("Allocation too big size=" + blockSize + + "; adjust BucketCache sizes " + BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY + + " to accomodate if size seems reasonable and you want it cached."); } long offset = bsi.allocateBlock(); // Ask caller to free up space and try again! - if (offset < 0) - throw new CacheFullException(blockSize, bsi.sizeIndex()); + if (offset < 0) throw new CacheFullException(blockSize, bsi.sizeIndex()); usedSize += bucketSizes[bsi.sizeIndex()]; return offset; } @@ -539,7 +528,7 @@ public void setTo(long free, long used, long itemSize) { } } - public Bucket [] getBuckets() { + public Bucket[] getBuckets() { return this.buckets; } @@ -547,11 +536,11 @@ void logStatistics() { IndexStatistics total = new IndexStatistics(); IndexStatistics[] stats = getIndexStatistics(total); LOG.info("Bucket allocator statistics follow:\n"); - LOG.info(" Free bytes=" + total.freeBytes() + "+; used bytes=" - + total.usedBytes() + "; total bytes=" + total.totalBytes()); + LOG.info(" Free bytes=" + total.freeBytes() + "+; used bytes=" + total.usedBytes() + + "; total bytes=" + total.totalBytes()); for (IndexStatistics s : stats) { - LOG.info(" Object size " + s.itemSize() + " used=" + s.usedCount() - + "; free=" + s.freeCount() + "; total=" + s.totalCount()); + LOG.info(" Object size " + s.itemSize() + " used=" + s.usedCount() + "; free=" + + s.freeCount() + "; total=" + s.totalCount()); } } @@ -585,32 +574,26 @@ public int getBucketIndex(long offset) { } /** - * Returns a set of indices of the buckets that are least filled - * excluding the offsets, we also the fully free buckets for the - * BucketSizes where everything is empty and they only have one + * Returns a set of indices of the buckets that are least filled excluding the offsets, we also + * the fully free buckets for the BucketSizes where everything is empty and they only have one * completely free bucket as a reserved - * - * @param excludedBuckets the buckets that need to be excluded due to - * currently being in used - * @param bucketCount max Number of buckets to return + * @param excludedBuckets the buckets that need to be excluded due to currently being in used + * @param bucketCount max Number of buckets to return * @return set of bucket indices which could be used for eviction */ - public Set getLeastFilledBuckets(Set excludedBuckets, - int bucketCount) { - Queue queue = MinMaxPriorityQueue.orderedBy( - new Comparator() { - @Override - public int compare(Integer left, Integer right) { - // We will always get instantiated buckets - return Float.compare( - ((float) buckets[left].usedCount) / buckets[left].itemCount, - ((float) buckets[right].usedCount) / buckets[right].itemCount); - } - }).maximumSize(bucketCount).create(); - - for (int i = 0; i < buckets.length; i ++ ) { + public Set getLeastFilledBuckets(Set excludedBuckets, int bucketCount) { + Queue queue = MinMaxPriorityQueue. orderedBy(new Comparator() { + @Override + public int compare(Integer left, Integer right) { + // We will always get instantiated buckets + return Float.compare(((float) buckets[left].usedCount) / buckets[left].itemCount, + ((float) buckets[right].usedCount) / buckets[right].itemCount); + } + }).maximumSize(bucketCount).create(); + + for (int i = 0; i < buckets.length; i++) { if (!excludedBuckets.contains(i) && !buckets[i].isUninstantiated() && - // Avoid the buckets that are the only buckets for a sizeIndex + // Avoid the buckets that are the only buckets for a sizeIndex bucketSizeInfos[buckets[i].sizeIndex()].bucketList.size() != 1) { queue.add(i); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java index 55172cf7fb94..c141edb947dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocatorException.java @@ -1,25 +1,23 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index e05645415fc1..3fe53e984036 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,7 +10,6 @@ * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -50,7 +47,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; import java.util.function.Function; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; @@ -88,21 +84,20 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos; /** - * BucketCache uses {@link BucketAllocator} to allocate/free blocks, and uses - * BucketCache#ramCache and BucketCache#backingMap in order to - * determine if a given element is in the cache. The bucket cache can use on-heap or - * off-heap memory {@link ByteBufferIOEngine} or in a file {@link FileIOEngine} to - * store/read the block data. - * - *

          Eviction is via a similar algorithm as used in + * BucketCache uses {@link BucketAllocator} to allocate/free blocks, and uses BucketCache#ramCache + * and BucketCache#backingMap in order to determine if a given element is in the cache. The bucket + * cache can use on-heap or off-heap memory {@link ByteBufferIOEngine} or in a file + * {@link FileIOEngine} to store/read the block data. + *

          + * Eviction is via a similar algorithm as used in * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} - * - *

          BucketCache can be used as mainly a block cache (see - * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with - * a BlockCache to decrease CMS GC and heap fragmentation. - * - *

          It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store - * blocks) to enlarge cache space via a victim cache. + *

          + * BucketCache can be used as mainly a block cache (see + * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with a BlockCache to + * decrease CMS GC and heap fragmentation. + *

          + * It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store blocks) to + * enlarge cache space via a victim cache. */ @InterfaceAudience.Private public class BucketCache implements BlockCache, HeapSize { @@ -147,18 +142,17 @@ public class BucketCache implements BlockCache, HeapSize { transient ConcurrentHashMap backingMap; /** - * Flag if the cache is enabled or not... We shut it off if there are IO - * errors for some time, so that Bucket IO exceptions/errors don't bring down - * the HBase server. + * Flag if the cache is enabled or not... We shut it off if there are IO errors for some time, so + * that Bucket IO exceptions/errors don't bring down the HBase server. */ private volatile boolean cacheEnabled; /** - * A list of writer queues. We have a queue per {@link WriterThread} we have running. - * In other words, the work adding blocks to the BucketCache is divided up amongst the - * running WriterThreads. Its done by taking hash of the cache key modulo queue count. - * WriterThread when it runs takes whatever has been recently added and 'drains' the entries - * to the BucketCache. It then updates the ramCache and backingMap accordingly. + * A list of writer queues. We have a queue per {@link WriterThread} we have running. In other + * words, the work adding blocks to the BucketCache is divided up amongst the running + * WriterThreads. Its done by taking hash of the cache key modulo queue count. WriterThread when + * it runs takes whatever has been recently added and 'drains' the entries to the BucketCache. It + * then updates the ramCache and backingMap accordingly. */ transient final ArrayList> writerQueues = new ArrayList<>(); transient final WriterThread[] writerThreads; @@ -178,9 +172,9 @@ public class BucketCache implements BlockCache, HeapSize { private static final int DEFAULT_CACHE_WAIT_TIME = 50; /** - * Used in tests. If this flag is false and the cache speed is very fast, - * bucket cache will skip some blocks when caching. If the flag is true, we - * will wait until blocks are flushed to IOEngine. + * Used in tests. If this flag is false and the cache speed is very fast, bucket cache will skip + * some blocks when caching. If the flag is true, we will wait until blocks are flushed to + * IOEngine. */ boolean wait_when_cache = false; @@ -201,8 +195,8 @@ public class BucketCache implements BlockCache, HeapSize { private volatile long ioErrorStartTime = -1; /** - * A ReentrantReadWriteLock to lock on a particular block identified by offset. - * The purpose of this is to avoid freeing the block which is being read. + * A ReentrantReadWriteLock to lock on a particular block identified by offset. The purpose of + * this is to avoid freeing the block which is being read. *

          */ transient final IdReadWriteLock offsetLock; @@ -217,8 +211,8 @@ public class BucketCache implements BlockCache, HeapSize { /** Statistics thread schedule pool (for heavy debugging, could remove) */ private transient final ScheduledExecutorService scheduleThreadPool = - Executors.newScheduledThreadPool(1, - new ThreadFactoryBuilder().setNameFormat("BucketCacheStatsExecutor").setDaemon(true).build()); + Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() + .setNameFormat("BucketCacheStatsExecutor").setDaemon(true).build()); // Allocate or free space for the block private transient BucketAllocator bucketAllocator; @@ -229,7 +223,10 @@ public class BucketCache implements BlockCache, HeapSize { /** Minimum threshold of cache (when evicting, evict until size < min) */ private float minFactor; - /** Free this floating point factor of extra blocks when evicting. For example free the number of blocks requested * (1 + extraFreeFactor) */ + /** + * Free this floating point factor of extra blocks when evicting. For example free the number of + * blocks requested * (1 + extraFreeFactor) + */ private float extraFreeFactor; /** Single access bucket size */ @@ -242,13 +239,13 @@ public class BucketCache implements BlockCache, HeapSize { private float memoryFactor; private static final String FILE_VERIFY_ALGORITHM = - "hbase.bucketcache.persistent.file.integrity.check.algorithm"; + "hbase.bucketcache.persistent.file.integrity.check.algorithm"; private static final String DEFAULT_FILE_VERIFY_ALGORITHM = "MD5"; /** - * Use {@link java.security.MessageDigest} class's encryption algorithms to check - * persistent file integrity, default algorithm is MD5 - * */ + * Use {@link java.security.MessageDigest} class's encryption algorithms to check persistent file + * integrity, default algorithm is MD5 + */ private String algorithm; /* Tracing failed Bucket Cache allocations. */ @@ -288,9 +285,10 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck sanityCheckConfigs(); - LOG.info("Instantiating BucketCache with acceptableFactor: " + acceptableFactor + ", minFactor: " + minFactor + - ", extraFreeFactor: " + extraFreeFactor + ", singleFactor: " + singleFactor + ", multiFactor: " + multiFactor + - ", memoryFactor: " + memoryFactor + ", useStrongRef: " + useStrongRef); + LOG.info("Instantiating BucketCache with acceptableFactor: " + acceptableFactor + + ", minFactor: " + minFactor + ", extraFreeFactor: " + extraFreeFactor + ", singleFactor: " + + singleFactor + ", multiFactor: " + multiFactor + ", memoryFactor: " + memoryFactor + + ", useStrongRef: " + useStrongRef); this.cacheCapacity = capacity; this.persistencePath = persistencePath; @@ -326,27 +324,35 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck startWriterThreads(); // Run the statistics thread periodically to print the cache statistics log - // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log + // TODO: Add means of turning this off. Bit obnoxious running thread just to make a log // every five minutes. - this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), - statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS); - LOG.info("Started bucket cache; ioengine=" + ioEngineName + - ", capacity=" + StringUtils.byteDesc(capacity) + - ", blockSize=" + StringUtils.byteDesc(blockSize) + ", writerThreadNum=" + - writerThreadNum + ", writerQLen=" + writerQLen + ", persistencePath=" + - persistencePath + ", bucketAllocator=" + this.bucketAllocator.getClass().getName()); + this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), statThreadPeriod, + statThreadPeriod, TimeUnit.SECONDS); + LOG.info("Started bucket cache; ioengine=" + ioEngineName + ", capacity=" + + StringUtils.byteDesc(capacity) + ", blockSize=" + StringUtils.byteDesc(blockSize) + + ", writerThreadNum=" + writerThreadNum + ", writerQLen=" + writerQLen + + ", persistencePath=" + persistencePath + ", bucketAllocator=" + + this.bucketAllocator.getClass().getName()); } private void sanityCheckConfigs() { - Preconditions.checkArgument(acceptableFactor <= 1 && acceptableFactor >= 0, ACCEPT_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(minFactor <= 1 && minFactor >= 0, MIN_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(minFactor <= acceptableFactor, MIN_FACTOR_CONFIG_NAME + " must be <= " + ACCEPT_FACTOR_CONFIG_NAME); - Preconditions.checkArgument(extraFreeFactor >= 0, EXTRA_FREE_FACTOR_CONFIG_NAME + " must be greater than 0.0"); - Preconditions.checkArgument(singleFactor <= 1 && singleFactor >= 0, SINGLE_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(multiFactor <= 1 && multiFactor >= 0, MULTI_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument(memoryFactor <= 1 && memoryFactor >= 0, MEMORY_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); - Preconditions.checkArgument((singleFactor + multiFactor + memoryFactor) == 1, SINGLE_FACTOR_CONFIG_NAME + ", " + - MULTI_FACTOR_CONFIG_NAME + ", and " + MEMORY_FACTOR_CONFIG_NAME + " segments must add up to 1.0"); + Preconditions.checkArgument(acceptableFactor <= 1 && acceptableFactor >= 0, + ACCEPT_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(minFactor <= 1 && minFactor >= 0, + MIN_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(minFactor <= acceptableFactor, + MIN_FACTOR_CONFIG_NAME + " must be <= " + ACCEPT_FACTOR_CONFIG_NAME); + Preconditions.checkArgument(extraFreeFactor >= 0, + EXTRA_FREE_FACTOR_CONFIG_NAME + " must be greater than 0.0"); + Preconditions.checkArgument(singleFactor <= 1 && singleFactor >= 0, + SINGLE_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(multiFactor <= 1 && multiFactor >= 0, + MULTI_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument(memoryFactor <= 1 && memoryFactor >= 0, + MEMORY_FACTOR_CONFIG_NAME + " must be between 0.0 and 1.0"); + Preconditions.checkArgument((singleFactor + multiFactor + memoryFactor) == 1, + SINGLE_FACTOR_CONFIG_NAME + ", " + MULTI_FACTOR_CONFIG_NAME + ", and " + + MEMORY_FACTOR_CONFIG_NAME + " segments must add up to 1.0"); } /** @@ -386,8 +392,8 @@ private IOEngine getIOEngineFromName(String ioEngineName, long capacity, String // In order to make the usage simple, we only need the prefix 'files:' in // document whether one or multiple file(s), but also support 'file:' for // the compatibility - String[] filePaths = ioEngineName.substring(ioEngineName.indexOf(":") + 1) - .split(FileIOEngine.FILE_DELIMITER); + String[] filePaths = + ioEngineName.substring(ioEngineName.indexOf(":") + 1).split(FileIOEngine.FILE_DELIMITER); return new FileIOEngine(capacity, persistencePath != null, filePaths); } else if (ioEngineName.startsWith("offheap")) { return new ByteBufferIOEngine(capacity); @@ -696,7 +702,7 @@ protected boolean removeFromRamCache(BlockCacheKey cacheKey) { } /* - * Statistics thread. Periodically output cache statistics to the log. + * Statistics thread. Periodically output cache statistics to the log. */ private static class StatisticsThread extends Thread { private final BucketCache bucketCache; @@ -718,25 +724,22 @@ public void logStats() { long usedSize = bucketAllocator.getUsedSize(); long freeSize = totalSize - usedSize; long cacheSize = getRealCacheSize(); - LOG.info("failedBlockAdditions=" + cacheStats.getFailedInserts() + ", " + - "totalSize=" + StringUtils.byteDesc(totalSize) + ", " + - "freeSize=" + StringUtils.byteDesc(freeSize) + ", " + - "usedSize=" + StringUtils.byteDesc(usedSize) +", " + - "cacheSize=" + StringUtils.byteDesc(cacheSize) +", " + - "accesses=" + cacheStats.getRequestCount() + ", " + - "hits=" + cacheStats.getHitCount() + ", " + - "IOhitsPerSecond=" + cacheStats.getIOHitsPerSecond() + ", " + - "IOTimePerHit=" + String.format("%.2f", cacheStats.getIOTimePerHit())+ ", " + - "hitRatio=" + (cacheStats.getHitCount() == 0 ? "0," : - (StringUtils.formatPercent(cacheStats.getHitRatio(), 2)+ ", ")) + - "cachingAccesses=" + cacheStats.getRequestCachingCount() + ", " + - "cachingHits=" + cacheStats.getHitCachingCount() + ", " + - "cachingHitsRatio=" +(cacheStats.getHitCachingCount() == 0 ? "0," : - (StringUtils.formatPercent(cacheStats.getHitCachingRatio(), 2)+ ", ")) + - "evictions=" + cacheStats.getEvictionCount() + ", " + - "evicted=" + cacheStats.getEvictedCount() + ", " + - "evictedPerRun=" + cacheStats.evictedPerEviction() + ", " + - "allocationFailCount=" + cacheStats.getAllocationFailCount()); + LOG.info("failedBlockAdditions=" + cacheStats.getFailedInserts() + ", " + "totalSize=" + + StringUtils.byteDesc(totalSize) + ", " + "freeSize=" + StringUtils.byteDesc(freeSize) + + ", " + "usedSize=" + StringUtils.byteDesc(usedSize) + ", " + "cacheSize=" + + StringUtils.byteDesc(cacheSize) + ", " + "accesses=" + cacheStats.getRequestCount() + ", " + + "hits=" + cacheStats.getHitCount() + ", " + "IOhitsPerSecond=" + + cacheStats.getIOHitsPerSecond() + ", " + "IOTimePerHit=" + + String.format("%.2f", cacheStats.getIOTimePerHit()) + ", " + "hitRatio=" + + (cacheStats.getHitCount() == 0 ? "0," + : (StringUtils.formatPercent(cacheStats.getHitRatio(), 2) + ", ")) + + "cachingAccesses=" + cacheStats.getRequestCachingCount() + ", " + "cachingHits=" + + cacheStats.getHitCachingCount() + ", " + "cachingHitsRatio=" + + (cacheStats.getHitCachingCount() == 0 ? "0," + : (StringUtils.formatPercent(cacheStats.getHitCachingRatio(), 2) + ", ")) + + "evictions=" + cacheStats.getEvictionCount() + ", " + "evicted=" + + cacheStats.getEvictedCount() + ", " + "evictedPerRun=" + cacheStats.evictedPerEviction() + + ", " + "allocationFailCount=" + cacheStats.getAllocationFailCount()); cacheStats.reset(); } @@ -769,12 +772,10 @@ private int bucketSizesAboveThresholdCount(float minFactor) { } /** - * This method will find the buckets that are minimally occupied - * and are not reference counted and will free them completely - * without any constraint on the access times of the elements, - * and as a process will completely free at most the number of buckets - * passed, sometimes it might not due to changing refCounts - * + * This method will find the buckets that are minimally occupied and are not reference counted and + * will free them completely without any constraint on the access times of the elements, and as a + * process will completely free at most the number of buckets passed, sometimes it might not due + * to changing refCounts * @param completelyFreeBucketsNeeded number of buckets to free **/ private void freeEntireBuckets(int completelyFreeBucketsNeeded) { @@ -798,9 +799,9 @@ private void freeEntireBuckets(int completelyFreeBucketsNeeded) { } /** - * Free the space if the used size reaches acceptableSize() or one size block - * couldn't be allocated. When freeing the space, we use the LRU algorithm and - * ensure there must be some blocks evicted + * Free the space if the used size reaches acceptableSize() or one size block couldn't be + * allocated. When freeing the space, we use the LRU algorithm and ensure there must be some + * blocks evicted * @param why Why we are being called */ private void freeSpace(final String why) { @@ -812,7 +813,7 @@ private void freeSpace(final String why) { freeInProgress = true; long bytesToFreeWithoutExtra = 0; // Calculate free byte for each bucketSizeinfo - StringBuilder msgBuffer = LOG.isDebugEnabled()? new StringBuilder(): null; + StringBuilder msgBuffer = LOG.isDebugEnabled() ? new StringBuilder() : null; BucketAllocator.IndexStatistics[] stats = bucketAllocator.getIndexStatistics(); long[] bytesToFreeForBucket = new long[stats.length]; for (int i = 0; i < stats.length; i++) { @@ -824,7 +825,7 @@ private void freeSpace(final String why) { bytesToFreeWithoutExtra += bytesToFreeForBucket[i]; if (msgBuffer != null) { msgBuffer.append("Free for bucketSize(" + stats[i].itemSize() + ")=" - + StringUtils.byteDesc(bytesToFreeForBucket[i]) + ", "); + + StringUtils.byteDesc(bytesToFreeForBucket[i]) + ", "); } } } @@ -838,21 +839,22 @@ private void freeSpace(final String why) { long currentSize = bucketAllocator.getUsedSize(); long totalSize = bucketAllocator.getTotalSize(); if (LOG.isDebugEnabled() && msgBuffer != null) { - LOG.debug("Free started because \"" + why + "\"; " + msgBuffer.toString() + - " of current used=" + StringUtils.byteDesc(currentSize) + ", actual cacheSize=" + - StringUtils.byteDesc(realCacheSize.sum()) + ", total=" + StringUtils.byteDesc(totalSize)); + LOG.debug("Free started because \"" + why + "\"; " + msgBuffer.toString() + + " of current used=" + StringUtils.byteDesc(currentSize) + ", actual cacheSize=" + + StringUtils.byteDesc(realCacheSize.sum()) + ", total=" + + StringUtils.byteDesc(totalSize)); } - long bytesToFreeWithExtra = (long) Math.floor(bytesToFreeWithoutExtra - * (1 + extraFreeFactor)); + long bytesToFreeWithExtra = + (long) Math.floor(bytesToFreeWithoutExtra * (1 + extraFreeFactor)); // Instantiate priority buckets - BucketEntryGroup bucketSingle = new BucketEntryGroup(bytesToFreeWithExtra, - blockSize, getPartitionSize(singleFactor)); - BucketEntryGroup bucketMulti = new BucketEntryGroup(bytesToFreeWithExtra, - blockSize, getPartitionSize(multiFactor)); - BucketEntryGroup bucketMemory = new BucketEntryGroup(bytesToFreeWithExtra, - blockSize, getPartitionSize(memoryFactor)); + BucketEntryGroup bucketSingle = + new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(singleFactor)); + BucketEntryGroup bucketMulti = + new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(multiFactor)); + BucketEntryGroup bucketMemory = + new BucketEntryGroup(bytesToFreeWithExtra, blockSize, getPartitionSize(memoryFactor)); // Scan entire map putting bucket entry into appropriate bucket entry // group @@ -873,8 +875,8 @@ private void freeSpace(final String why) { } } - PriorityQueue bucketQueue = new PriorityQueue<>(3, - Comparator.comparingLong(BucketEntryGroup::overflow)); + PriorityQueue bucketQueue = + new PriorityQueue<>(3, Comparator.comparingLong(BucketEntryGroup::overflow)); bucketQueue.add(bucketSingle); bucketQueue.add(bucketMulti); @@ -887,8 +889,8 @@ private void freeSpace(final String why) { while ((bucketGroup = bucketQueue.poll()) != null) { long overflow = bucketGroup.overflow(); if (overflow > 0) { - long bucketBytesToFree = Math.min(overflow, - (bytesToFreeWithoutExtra - bytesFreed) / remainingBuckets); + long bucketBytesToFree = + Math.min(overflow, (bytesToFreeWithoutExtra - bytesFreed) / remainingBuckets); bytesFreed += bucketGroup.free(bucketBytesToFree); } remainingBuckets--; @@ -915,8 +917,7 @@ private void freeSpace(final String why) { // there might be some buckets where the occupancy is very sparse and thus are not // yielding the free for the other bucket sizes, the fix for this to evict some // of the buckets, we do this by evicting the buckets that are least fulled - freeEntireBuckets(DEFAULT_FREE_ENTIRE_BLOCK_FACTOR * - bucketSizesAboveThresholdCount(1.0f)); + freeEntireBuckets(DEFAULT_FREE_ENTIRE_BLOCK_FACTOR * bucketSizesAboveThresholdCount(1.0f)); if (LOG.isDebugEnabled()) { long single = bucketSingle.totalSize(); @@ -924,11 +925,9 @@ private void freeSpace(final String why) { long memory = bucketMemory.totalSize(); if (LOG.isDebugEnabled()) { LOG.debug("Bucket cache free space completed; " + "freed=" - + StringUtils.byteDesc(bytesFreed) + ", " + "total=" - + StringUtils.byteDesc(totalSize) + ", " + "single=" - + StringUtils.byteDesc(single) + ", " + "multi=" - + StringUtils.byteDesc(multi) + ", " + "memory=" - + StringUtils.byteDesc(memory)); + + StringUtils.byteDesc(bytesFreed) + ", " + "total=" + StringUtils.byteDesc(totalSize) + + ", " + "single=" + StringUtils.byteDesc(single) + ", " + "multi=" + + StringUtils.byteDesc(multi) + ", " + "memory=" + StringUtils.byteDesc(memory)); } } @@ -1092,7 +1091,8 @@ void doDrain(final List entries, ByteBuffer metaBuff) throws Inte } catch (BucketAllocatorException fle) { long currTs = EnvironmentEdgeManager.currentTime(); cacheStats.allocationFailed(); // Record the warning. - if (allocFailLogPrevTs == 0 || (currTs - allocFailLogPrevTs) > ALLOCATION_FAIL_LOG_TIME_PERIOD) { + if (allocFailLogPrevTs == 0 + || (currTs - allocFailLogPrevTs) > ALLOCATION_FAIL_LOG_TIME_PERIOD) { LOG.warn(getAllocationFailWarningMessage(fle, re)); allocFailLogPrevTs = currTs; } @@ -1182,7 +1182,7 @@ static List getRAMQueueEntries(BlockingQueue q, /** * @see #retrieveFromFile(int[]) */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="OBL_UNSATISFIED_OBLIGATION", + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "OBL_UNSATISFIED_OBLIGATION", justification = "false positive, try-with-resources ensures close is called.") private void persistToFile() throws IOException { assert !cacheEnabled; @@ -1213,11 +1213,11 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { throw new IOException("Incorrect number of bytes read while checking for protobuf magic " + "number. Requested=" + pblen + ", Received= " + read + ", File=" + persistencePath); } - if (! ProtobufMagic.isPBMagicPrefix(pbuf)) { + if (!ProtobufMagic.isPBMagicPrefix(pbuf)) { // In 3.0 we have enough flexibility to dump the old cache data. // TODO: In 2.x line, this might need to be filled in to support reading the old format - throw new IOException("Persistence file does not start with protobuf magic number. " + - persistencePath); + throw new IOException( + "Persistence file does not start with protobuf magic number. " + persistencePath); } parsePB(BucketCacheProtos.BucketCacheEntry.parseDelimitedFrom(in)); bucketAllocator = new BucketAllocator(cacheCapacity, bucketSizes, backingMap, realCacheSize); @@ -1228,6 +1228,7 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { /** * Create an input stream that deletes the file after reading it. Use in try-with-resources to * avoid this pattern where an exception thrown from a finally block may mask earlier exceptions: + * *

              *   File f = ...
              *   try (FileInputStream fis = new FileInputStream(f)) {
          @@ -1236,6 +1237,7 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException {
              *     if (!f.delete()) throw new IOException("failed to delete");
              *   }
              * 
          + * * @param file the file to read and delete * @return a FileInputStream for the given file * @throws IOException if there is a problem creating the stream @@ -1243,10 +1245,12 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { private FileInputStream deleteFileOnClose(final File file) throws IOException { return new FileInputStream(file) { private File myFile; + private FileInputStream init(File file) { myFile = file; return this; } + @Override public void close() throws IOException { // close() will be called during try-with-resources and it will be @@ -1268,17 +1272,16 @@ public void close() throws IOException { private void verifyCapacityAndClasses(long capacitySize, String ioclass, String mapclass) throws IOException { if (capacitySize != cacheCapacity) { - throw new IOException("Mismatched cache capacity:" - + StringUtils.byteDesc(capacitySize) + ", expected: " - + StringUtils.byteDesc(cacheCapacity)); + throw new IOException("Mismatched cache capacity:" + StringUtils.byteDesc(capacitySize) + + ", expected: " + StringUtils.byteDesc(cacheCapacity)); } if (!ioEngine.getClass().getName().equals(ioclass)) { - throw new IOException("Class name for IO engine mismatch: " + ioclass - + ", expected:" + ioEngine.getClass().getName()); + throw new IOException("Class name for IO engine mismatch: " + ioclass + ", expected:" + + ioEngine.getClass().getName()); } if (!backingMap.getClass().getName().equals(mapclass)) { - throw new IOException("Class name for cache map mismatch: " + mapclass - + ", expected:" + backingMap.getClass().getName()); + throw new IOException("Class name for cache map mismatch: " + mapclass + ", expected:" + + backingMap.getClass().getName()); } } @@ -1296,9 +1299,8 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOExceptio } /** - * Check whether we tolerate IO error this time. If the duration of IOEngine - * throwing errors exceeds ioErrorsDurationTimeTolerated, we will disable the - * cache + * Check whether we tolerate IO error this time. If the duration of IOEngine throwing errors + * exceeds ioErrorsDurationTimeTolerated, we will disable the cache */ private void checkIOErrorIsTolerated() { long now = EnvironmentEdgeManager.currentTime(); @@ -1306,8 +1308,8 @@ private void checkIOErrorIsTolerated() { long ioErrorStartTimeTmp = this.ioErrorStartTime; if (ioErrorStartTimeTmp > 0) { if (cacheEnabled && (now - ioErrorStartTimeTmp) > this.ioErrorsTolerationDuration) { - LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration + - "ms, disabling cache, please check your IOEngine"); + LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration + + "ms, disabling cache, please check your IOEngine"); disableCache(); } } else { @@ -1323,7 +1325,8 @@ private void disableCache() { cacheEnabled = false; ioEngine.shutdown(); this.scheduleThreadPool.shutdown(); - for (int i = 0; i < writerThreads.length; ++i) writerThreads[i].interrupt(); + for (int i = 0; i < writerThreads.length; ++i) + writerThreads[i].interrupt(); this.ramCache.clear(); if (!ioEngine.isPersistent() || persistencePath == null) { // If persistent ioengine and a path, we will serialize out the backingMap. @@ -1339,8 +1342,8 @@ private void join() throws InterruptedException { @Override public void shutdown() { disableCache(); - LOG.info("Shutdown bucket cache: IO persistent=" + ioEngine.isPersistent() - + "; path to write=" + persistencePath); + LOG.info("Shutdown bucket cache: IO persistent=" + ioEngine.isPersistent() + "; path to write=" + + persistencePath); if (ioEngine.isPersistent() && persistencePath != null) { try { join(); @@ -1405,19 +1408,17 @@ protected String getAlgorithm() { * Evicts all blocks for a specific HFile. *

          * This is used for evict-on-close to remove all blocks of a specific HFile. - * * @return the number of blocks evicted */ @Override public int evictBlocksByHfileName(String hfileName) { - Set keySet = blocksByHFile.subSet( - new BlockCacheKey(hfileName, Long.MIN_VALUE), true, - new BlockCacheKey(hfileName, Long.MAX_VALUE), true); + Set keySet = blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), + true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true); int numEvicted = 0; for (BlockCacheKey key : keySet) { if (evictBlock(key)) { - ++numEvicted; + ++numEvicted; } } @@ -1425,10 +1426,9 @@ public int evictBlocksByHfileName(String hfileName) { } /** - * Used to group bucket entries into priority buckets. There will be a - * BucketEntryGroup for each priority (single, multi, memory). Once bucketed, - * the eviction algorithm takes the appropriate number of elements out of each - * according to configuration parameters and their relative sizes. + * Used to group bucket entries into priority buckets. There will be a BucketEntryGroup for each + * priority (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate + * number of elements out of each according to configuration parameters and their relative sizes. */ private class BucketEntryGroup { @@ -1563,8 +1563,7 @@ void stopWriterThreads() throws InterruptedException { @Override public Iterator iterator() { // Don't bother with ramcache since stuff is in here only a little while. - final Iterator> i = - this.backingMap.entrySet().iterator(); + final Iterator> i = this.backingMap.entrySet().iterator(); return new Iterator() { private final long now = System.nanoTime(); @@ -1589,7 +1588,7 @@ public BlockPriority getBlockPriority() { @Override public BlockType getBlockType() { - // Not held by BucketEntry. Could add it if wanted on BucketEntry creation. + // Not held by BucketEntry. Could add it if wanted on BucketEntry creation. return null; } @@ -1621,8 +1620,8 @@ public int compareTo(CachedBlock other) { diff = Long.compare(this.getOffset(), other.getOffset()); if (diff != 0) return diff; if (other.getCachedTime() < 0 || this.getCachedTime() < 0) { - throw new IllegalStateException("" + this.getCachedTime() + ", " + - other.getCachedTime()); + throw new IllegalStateException( + "" + this.getCachedTime() + ", " + other.getCachedTime()); } return Long.compare(other.getCachedTime(), this.getCachedTime()); } @@ -1635,7 +1634,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { if (obj instanceof CachedBlock) { - CachedBlock cb = (CachedBlock)obj; + CachedBlock cb = (CachedBlock) obj; return compareTo(cb) == 0; } else { return false; @@ -1695,11 +1694,11 @@ static class RAMCache { /** * Defined the map as {@link ConcurrentHashMap} explicitly here, because in * {@link RAMCache#get(BlockCacheKey)} and - * {@link RAMCache#putIfAbsent(BlockCacheKey, BucketCache.RAMQueueEntry)} , we need to - * guarantee the atomicity of map#computeIfPresent(key, func) and map#putIfAbsent(key, func). - * Besides, the func method can execute exactly once only when the key is present(or absent) - * and under the lock context. Otherwise, the reference count of block will be messed up. - * Notice that the {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. + * {@link RAMCache#putIfAbsent(BlockCacheKey, BucketCache.RAMQueueEntry)} , we need to guarantee + * the atomicity of map#computeIfPresent(key, func) and map#putIfAbsent(key, func). Besides, the + * func method can execute exactly once only when the key is present(or absent) and under the + * lock context. Otherwise, the reference count of block will be messed up. Notice that the + * {@link java.util.concurrent.ConcurrentSkipListMap} can not guarantee that. */ final ConcurrentHashMap delegate = new ConcurrentHashMap<>(); @@ -1733,7 +1732,8 @@ public RAMQueueEntry putIfAbsent(BlockCacheKey key, RAMQueueEntry entry) { } public boolean remove(BlockCacheKey key) { - return remove(key, re->{}); + return remove(key, re -> { + }); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java index 4a2b0a13590d..472b30598b75 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java @@ -1,29 +1,27 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** * Class that implements cache metrics for bucket cache. @@ -46,9 +44,8 @@ public class BucketCacheStats extends CacheStats { @Override public String toString() { - return super.toString() + ", ioHitsPerSecond=" + getIOHitsPerSecond() + - ", ioTimePerHit=" + getIOTimePerHit() + ", allocationFailCount=" + - getAllocationFailCount(); + return super.toString() + ", ioHitsPerSecond=" + getIOHitsPerSecond() + ", ioTimePerHit=" + + getIOTimePerHit() + ", allocationFailCount=" + getAllocationFailCount(); } public void ioHit(long time) { @@ -79,7 +76,7 @@ public long getAllocationFailCount() { return allocationFailCount.sum(); } - public void allocationFailed () { + public void allocationFailed() { allocationFailCount.increment(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java index 222cd804112d..725b9109ceed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,7 +10,6 @@ * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -26,7 +23,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; - import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler; import org.apache.hadoop.hbase.io.hfile.BlockPriority; @@ -102,8 +98,7 @@ class BucketEntry implements HBaseReferenceCounted { * becoming 0. NOTICE that {@link ByteBuffAllocator#NONE} could only be used for test. */ BucketEntry(long offset, int length, long accessCounter, boolean inMemory, - Function createRecycler, - ByteBuffAllocator allocator) { + Function createRecycler, ByteBuffAllocator allocator) { if (createRecycler == null) { throw new IllegalArgumentException("createRecycler could not be null!"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java index b2a00f1795e5..6209bd055541 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,7 +10,6 @@ * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -24,7 +21,6 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; - import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; @@ -32,9 +28,10 @@ import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager; import org.apache.hadoop.hbase.io.hfile.HFileBlock; -import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + import org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos; @InterfaceAudience.Private @@ -44,39 +41,32 @@ private BucketProtoUtils() { } static BucketCacheProtos.BucketCacheEntry toPB(BucketCache cache) { - return BucketCacheProtos.BucketCacheEntry.newBuilder() - .setCacheCapacity(cache.getMaxSize()) - .setIoClass(cache.ioEngine.getClass().getName()) - .setMapClass(cache.backingMap.getClass().getName()) - .putAllDeserializers(CacheableDeserializerIdManager.save()) - .setBackingMap(BucketProtoUtils.toPB(cache.backingMap)) - .setChecksum(ByteString.copyFrom(((PersistentIOEngine) cache.ioEngine). - calculateChecksum(cache.getAlgorithm()))).build(); + return BucketCacheProtos.BucketCacheEntry.newBuilder().setCacheCapacity(cache.getMaxSize()) + .setIoClass(cache.ioEngine.getClass().getName()) + .setMapClass(cache.backingMap.getClass().getName()) + .putAllDeserializers(CacheableDeserializerIdManager.save()) + .setBackingMap(BucketProtoUtils.toPB(cache.backingMap)).setChecksum(ByteString.copyFrom( + ((PersistentIOEngine) cache.ioEngine).calculateChecksum(cache.getAlgorithm()))) + .build(); } - private static BucketCacheProtos.BackingMap toPB( - Map backingMap) { + private static BucketCacheProtos.BackingMap toPB(Map backingMap) { BucketCacheProtos.BackingMap.Builder builder = BucketCacheProtos.BackingMap.newBuilder(); for (Map.Entry entry : backingMap.entrySet()) { - builder.addEntry(BucketCacheProtos.BackingMapEntry.newBuilder() - .setKey(toPB(entry.getKey())) - .setValue(toPB(entry.getValue())) - .build()); + builder.addEntry(BucketCacheProtos.BackingMapEntry.newBuilder().setKey(toPB(entry.getKey())) + .setValue(toPB(entry.getValue())).build()); } return builder.build(); } private static BucketCacheProtos.BlockCacheKey toPB(BlockCacheKey key) { - return BucketCacheProtos.BlockCacheKey.newBuilder() - .setHfilename(key.getHfileName()) - .setOffset(key.getOffset()) - .setPrimaryReplicaBlock(key.isPrimary()) - .setBlockType(toPB(key.getBlockType())) - .build(); + return BucketCacheProtos.BlockCacheKey.newBuilder().setHfilename(key.getHfileName()) + .setOffset(key.getOffset()).setPrimaryReplicaBlock(key.isPrimary()) + .setBlockType(toPB(key.getBlockType())).build(); } private static BucketCacheProtos.BlockType toPB(BlockType blockType) { - switch(blockType) { + switch (blockType) { case DATA: return BucketCacheProtos.BlockType.data; case META: @@ -107,13 +97,9 @@ private static BucketCacheProtos.BlockType toPB(BlockType blockType) { } private static BucketCacheProtos.BucketEntry toPB(BucketEntry entry) { - return BucketCacheProtos.BucketEntry.newBuilder() - .setOffset(entry.offset()) - .setLength(entry.getLength()) - .setDeserialiserIndex(entry.deserializerIndex) - .setAccessCounter(entry.getAccessCounter()) - .setPriority(toPB(entry.getPriority())) - .build(); + return BucketCacheProtos.BucketEntry.newBuilder().setOffset(entry.offset()) + .setLength(entry.getLength()).setDeserialiserIndex(entry.deserializerIndex) + .setAccessCounter(entry.getAccessCounter()).setPriority(toPB(entry.getPriority())).build(); } private static BucketCacheProtos.BlockPriority toPB(BlockPriority p) { @@ -129,9 +115,8 @@ private static BucketCacheProtos.BlockPriority toPB(BlockPriority p) { } } - static ConcurrentHashMap fromPB( - Map deserializers, BucketCacheProtos.BackingMap backingMap, - Function createRecycler) + static ConcurrentHashMap fromPB(Map deserializers, + BucketCacheProtos.BackingMap backingMap, Function createRecycler) throws IOException { ConcurrentHashMap result = new ConcurrentHashMap<>(); for (BucketCacheProtos.BackingMapEntry entry : backingMap.getEntryList()) { @@ -141,9 +126,7 @@ static ConcurrentHashMap fromPB( BucketCacheProtos.BucketEntry protoValue = entry.getValue(); // TODO:We use ByteBuffAllocator.HEAP here, because we could not get the ByteBuffAllocator // which created by RpcServer elegantly. - BucketEntry value = new BucketEntry( - protoValue.getOffset(), - protoValue.getLength(), + BucketEntry value = new BucketEntry(protoValue.getOffset(), protoValue.getLength(), protoValue.getAccessCounter(), protoValue.getPriority() == BucketCacheProtos.BlockPriority.memory, createRecycler, ByteBuffAllocator.HEAP); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java index b0415e3e50ba..0bec16308039 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java @@ -1,31 +1,29 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.ByteBufferAllocator; import org.apache.hadoop.hbase.util.ByteBufferArray; +import org.apache.yetus.audience.InterfaceAudience; /** * IO engine that stores data in memory using an array of ByteBuffers {@link ByteBufferArray}. @@ -78,13 +76,12 @@ public ByteBufferIOEngine(long capacity) throws IOException { @Override public String toString() { - return "ioengine=" + this.getClass().getSimpleName() + ", capacity=" + - String.format("%,d", this.capacity); + return "ioengine=" + this.getClass().getSimpleName() + ", capacity=" + + String.format("%,d", this.capacity); } /** - * Memory IO engine is always unable to support persistent storage for the - * cache + * Memory IO engine is always unable to support persistent storage for the cache * @return false */ @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java index d2cbdb7b16c6..15c7ee3236cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.java @@ -1,30 +1,27 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown by {@link BucketAllocator#allocateBlock(int)} when cache is full for - * the requested size + * Thrown by {@link BucketAllocator#allocateBlock(int)} when cache is full for the requested size */ @InterfaceAudience.Private public class CacheFullException extends IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java index b4e77bd2348e..279c59fa9fb0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java @@ -1,49 +1,44 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; - import java.util.Comparator; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.MinMaxPriorityQueue; /** - * A memory-bound queue that will grow until an element brings total size larger - * than maxSize. From then on, only entries that are sorted larger than the - * smallest current entry will be inserted/replaced. - * + * A memory-bound queue that will grow until an element brings total size larger than maxSize. From + * then on, only entries that are sorted larger than the smallest current entry will be + * inserted/replaced. *

          - * Use this when you want to find the largest elements (according to their - * ordering, not their heap size) that consume as close to the specified maxSize - * as possible. Default behavior is to grow just above rather than just below - * specified max. + * Use this when you want to find the largest elements (according to their ordering, not their heap + * size) that consume as close to the specified maxSize as possible. Default behavior is to grow + * just above rather than just below specified max. */ @InterfaceAudience.Private public class CachedEntryQueue { private static final Comparator> COMPARATOR = - (a, b) -> BucketEntry.COMPARATOR.compare(a.getValue(), b.getValue()); + (a, b) -> BucketEntry.COMPARATOR.compare(a.getValue(), b.getValue()); private MinMaxPriorityQueue> queue; @@ -69,15 +64,15 @@ public CachedEntryQueue(long maxSize, long blockSize) { /** * Attempt to add the specified entry to this queue. *

          - * If the queue is smaller than the max size, or if the specified element is - * ordered after the smallest element in the queue, the element will be added - * to the queue. Otherwise, there is no side effect of this call. + * If the queue is smaller than the max size, or if the specified element is ordered after the + * smallest element in the queue, the element will be added to the queue. Otherwise, there is no + * side effect of this call. * @param entry a bucket entry with key to try to add to the queue */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", - justification = "head can not be null as cacheSize is greater than maxSize," - + " which means we have something in the queue") + value = "NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE", + justification = "head can not be null as cacheSize is greater than maxSize," + + " which means we have something in the queue") public void add(Map.Entry entry) { if (cacheSize < maxSize) { queue.add(entry); @@ -98,16 +93,14 @@ public void add(Map.Entry entry) { } /** - * @return The next element in this queue, or {@code null} if the queue is - * empty. + * @return The next element in this queue, or {@code null} if the queue is empty. */ public Map.Entry poll() { return queue.poll(); } /** - * @return The last element in this queue, or {@code null} if the queue is - * empty. + * @return The last element in this queue, or {@code null} if the queue is empty. */ public Map.Entry pollLast() { return queue.pollLast(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java index 3169a66539aa..da5f49596c2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java @@ -1,23 +1,23 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; - import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java index e4a2c0b1aeaa..6bfca77bc8e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java @@ -1,20 +1,19 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; @@ -82,9 +81,8 @@ public FileIOEngine(long capacity, boolean maintainPersistence, String... filePa if (totalSpace < sizePerFile) { // The next setting length will throw exception,logging this message // is just used for the detail reason of exception, - String msg = "Only " + StringUtils.byteDesc(totalSpace) - + " total space under " + filePath + ", not enough for requested " - + StringUtils.byteDesc(sizePerFile); + String msg = "Only " + StringUtils.byteDesc(totalSpace) + " total space under " + filePath + + ", not enough for requested " + StringUtils.byteDesc(sizePerFile); LOG.warn(msg); } File file = new File(filePath); @@ -95,8 +93,8 @@ public FileIOEngine(long capacity, boolean maintainPersistence, String... filePa } fileChannels[i] = rafs[i].getChannel(); channelLocks[i] = new ReentrantLock(); - LOG.info("Allocating cache " + StringUtils.byteDesc(sizePerFile) - + ", on the path:" + filePath); + LOG.info( + "Allocating cache " + StringUtils.byteDesc(sizePerFile) + ", on the path:" + filePath); } catch (IOException fex) { LOG.error("Failed allocating cache on " + filePath, fex); shutdown(); @@ -107,8 +105,8 @@ public FileIOEngine(long capacity, boolean maintainPersistence, String... filePa @Override public String toString() { - return "ioengine=" + this.getClass().getSimpleName() + ", paths=" - + Arrays.asList(filePaths) + ", capacity=" + String.format("%,d", this.capacity); + return "ioengine=" + this.getClass().getSimpleName() + ", paths=" + Arrays.asList(filePaths) + + ", capacity=" + String.format("%,d", this.capacity); } /** @@ -153,7 +151,7 @@ public Cacheable read(BucketEntry be) throws IOException { } void closeFileChannels() { - for (FileChannel fileChannel: fileChannels) { + for (FileChannel fileChannel : fileChannels) { try { fileChannel.close(); } catch (IOException e) { @@ -218,8 +216,8 @@ public void write(ByteBuff srcBuff, long offset) throws IOException { accessFile(writeAccessor, srcBuff, offset); } - private void accessFile(FileAccessor accessor, ByteBuff buff, - long globalOffset) throws IOException { + private void accessFile(FileAccessor accessor, ByteBuff buff, long globalOffset) + throws IOException { int startFileNum = getFileNum(globalOffset); int remainingAccessDataLen = buff.remaining(); int endFileNum = getFileNum(globalOffset + remainingAccessDataLen - 1); @@ -274,8 +272,7 @@ private int getFileNum(long offset) { } int fileNum = (int) (offset / sizePerFile); if (fileNum >= fileChannels.length) { - throw new RuntimeException("Not expected offset " + offset - + " where capacity=" + capacity); + throw new RuntimeException("Not expected offset " + offset + " where capacity=" + capacity); } return fileNum; } @@ -298,31 +295,31 @@ void refreshFileConnection(int accessFileNum, IOException ioe) throws IOExceptio fileChannel.close(); } LOG.warn("Caught ClosedChannelException accessing BucketCache, reopening file: " - + filePaths[accessFileNum], ioe); + + filePaths[accessFileNum], + ioe); rafs[accessFileNum] = new RandomAccessFile(filePaths[accessFileNum], "rw"); fileChannels[accessFileNum] = rafs[accessFileNum].getChannel(); - } finally{ + } finally { channelLock.unlock(); } } private interface FileAccessor { - int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) - throws IOException; + int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) throws IOException; } private static class FileReadAccessor implements FileAccessor { @Override - public int access(FileChannel fileChannel, ByteBuff buff, - long accessOffset) throws IOException { + public int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) + throws IOException { return buff.read(fileChannel, accessOffset); } } private static class FileWriteAccessor implements FileAccessor { @Override - public int access(FileChannel fileChannel, ByteBuff buff, - long accessOffset) throws IOException { + public int access(FileChannel fileChannel, ByteBuff buff, long accessOffset) + throws IOException { return buff.write(fileChannel, accessOffset); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java index c0cb22d0b074..cdc64b3d6864 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.ByteBufferAllocator; @@ -34,8 +33,7 @@ import org.slf4j.LoggerFactory; /** - * IO engine that stores data to a file on the specified file system using memory mapping - * mechanism + * IO engine that stores data to a file on the specified file system using memory mapping mechanism */ @InterfaceAudience.Private public abstract class FileMmapIOEngine extends PersistentIOEngine { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java index 3ffb57ebcf03..0db5c8b7b418 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java @@ -1,33 +1,30 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import java.io.IOException; import java.nio.ByteBuffer; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.yetus.audience.InterfaceAudience; /** - * A class implementing IOEngine interface supports data services for - * {@link BucketCache}. + * A class implementing IOEngine interface supports data services for {@link BucketCache}. */ @InterfaceAudience.Private public interface IOEngine { @@ -57,8 +54,7 @@ default boolean usesSharedMemory() { /** * Transfers data from the given byte buffer to IOEngine * @param srcBuffer the given byte buffer from which bytes are to be read - * @param offset The offset in the IO engine where the first byte to be - * written + * @param offset The offset in the IO engine where the first byte to be written * @throws IOException */ void write(ByteBuffer srcBuffer, long offset) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java index 4ee7d0ed1bec..9fb7b6683059 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/PersistentIOEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.Shell; import org.apache.yetus.audience.InterfaceAudience; @@ -29,13 +28,13 @@ import org.slf4j.LoggerFactory; /** - * A class implementing PersistentIOEngine interface supports file integrity verification - * for {@link BucketCache} which use persistent IOEngine + * A class implementing PersistentIOEngine interface supports file integrity verification for + * {@link BucketCache} which use persistent IOEngine */ @InterfaceAudience.Private public abstract class PersistentIOEngine implements IOEngine { private static final Logger LOG = LoggerFactory.getLogger(PersistentIOEngine.class); - private static final DuFileCommand DU = new DuFileCommand(new String[] {"du", ""}); + private static final DuFileCommand DU = new DuFileCommand(new String[] { "du", "" }); protected final String[] filePaths; public PersistentIOEngine(String... filePaths) { @@ -47,12 +46,12 @@ public PersistentIOEngine(String... filePaths) { * @param algorithm the backingMap persistence path */ protected void verifyFileIntegrity(byte[] persistentChecksum, String algorithm) - throws IOException { + throws IOException { byte[] calculateChecksum = calculateChecksum(algorithm); if (!Bytes.equals(persistentChecksum, calculateChecksum)) { - throw new IOException("Mismatch of checksum! The persistent checksum is " + - Bytes.toString(persistentChecksum) + ", but the calculate checksum is " + - Bytes.toString(calculateChecksum)); + throw new IOException( + "Mismatch of checksum! The persistent checksum is " + Bytes.toString(persistentChecksum) + + ", but the calculate checksum is " + Bytes.toString(calculateChecksum)); } } @@ -65,7 +64,7 @@ protected void verifyFileIntegrity(byte[] persistentChecksum, String algorithm) protected byte[] calculateChecksum(String algorithm) { try { StringBuilder sb = new StringBuilder(); - for (String filePath : filePaths){ + for (String filePath : filePaths) { File file = new File(filePath); sb.append(filePath); sb.append(getFileSize(filePath)); @@ -113,4 +112,3 @@ public String[] getExecString() { } } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java index 53690602093a..77c881888fc8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/package-info.java index 43c92bb4c45e..8fd4e15028c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/package-info.java @@ -1,23 +1,16 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** * Provides {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}, an implementation of - * {@link org.apache.hadoop.hbase.io.hfile.BlockCache}. - * See {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache} for how it works. + * {@link org.apache.hadoop.hbase.io.hfile.BlockCache}. See + * {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache} for how it works. */ package org.apache.hadoop.hbase.io.hfile.bucket; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java index d4a279cb996d..d59032178def 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java @@ -1,44 +1,33 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** * Provides implementations of {@link org.apache.hadoop.hbase.io.hfile.HFile} and HFile - * {@link org.apache.hadoop.hbase.io.hfile.BlockCache}. Caches are configured (and instantiated) - * by {@link org.apache.hadoop.hbase.io.hfile.CacheConfig}. See head of the - * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig} class for constants that define - * cache options and configuration keys to use setting cache options. Cache implementations - * include the default, native on-heap {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} and a + * {@link org.apache.hadoop.hbase.io.hfile.BlockCache}. Caches are configured (and instantiated) by + * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig}. See head of the + * {@link org.apache.hadoop.hbase.io.hfile.CacheConfig} class for constants that define cache + * options and configuration keys to use setting cache options. Cache implementations include the + * default, native on-heap {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} and a * {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache} that has a bunch of deploy formats - * including acting as a L2 for LruBlockCache -- when a block is evicted from LruBlockCache, it - * goes to the BucketCache and when we search a block, we look in both places -- or, the - * most common deploy type, - * using {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}, BucketCache is used as - * a host for data blocks with meta blocks in an instance of LruBlockCache. BucketCache - * can also be onheap, offheap, and file-backed. - * - *

          Which BlockCache should I use?

          - * By default LruBlockCache is on. If you would like to cache more, and offheap (offheap - * usually means less GC headache), try enabling * BucketCache. Fetching will always - * be slower when fetching from BucketCache but latencies tend to be less erratic over time - * (roughly because GC is less). See Nick Dimiduk's + * including acting as a L2 for LruBlockCache -- when a block is evicted from LruBlockCache, it goes + * to the BucketCache and when we search a block, we look in both places -- or, the most common + * deploy type, using {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}, BucketCache is + * used as a host for data blocks with meta blocks in an instance of LruBlockCache. BucketCache can + * also be onheap, offheap, and file-backed. + *

          Which BlockCache should I use?

          By default LruBlockCache is on. If you would like to + * cache more, and offheap (offheap usually means less GC headache), try enabling * BucketCache. + * Fetching will always be slower when fetching from BucketCache but latencies tend to be less + * erratic over time (roughly because GC is less). See Nick Dimiduk's * BlockCache 101 for some numbers. - * - *

          Enabling {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}

          - * See the HBase Reference Guide Enable BucketCache. - * + *

          Enabling {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}

          See the HBase + * Reference Guide Enable + * BucketCache. */ package org.apache.hadoop.hbase.io.hfile; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java index b1f298e3772f..27ceb76598e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,13 @@ import java.lang.management.ManagementFactory; import java.lang.management.MemoryType; import java.lang.management.MemoryUsage; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.regionserver.MemStoreLAB; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.MemStoreLAB; -import org.apache.hadoop.hbase.util.Pair; /** * Util class to calculate memory size for memstore, block cache(L1, L2) of RS. @@ -55,15 +54,15 @@ public class MemorySizeUtil { // a constant to convert a fraction to a percentage private static final int CONVERT_TO_PERCENTAGE = 100; - private static final String JVM_HEAP_EXCEPTION = "Got an exception while attempting to read " + - "information about the JVM heap. Please submit this log information in a bug report and " + - "include your JVM settings, specifically the GC in use and any -XX options. Consider " + - "restarting the service."; + private static final String JVM_HEAP_EXCEPTION = "Got an exception while attempting to read " + + "information about the JVM heap. Please submit this log information in a bug report and " + + "include your JVM settings, specifically the GC in use and any -XX options. Consider " + + "restarting the service."; /** * Return JVM memory statistics while properly handling runtime exceptions from the JVM. - * @return a memory usage object, null if there was a runtime exception. (n.b. you - * could also get -1 values back from the JVM) + * @return a memory usage object, null if there was a runtime exception. (n.b. you could also get + * -1 values back from the JVM) * @see MemoryUsage */ public static MemoryUsage safeGetHeapMemoryUsage() { @@ -86,12 +85,11 @@ public static void checkForClusterFreeHeapMemoryLimit(Configuration conf) { LOG.warn(MEMSTORE_SIZE_OLD_KEY + " is deprecated by " + MEMSTORE_SIZE_KEY); } float globalMemstoreSize = getGlobalMemStoreHeapPercent(conf, false); - int gml = (int)(globalMemstoreSize * CONVERT_TO_PERCENTAGE); + int gml = (int) (globalMemstoreSize * CONVERT_TO_PERCENTAGE); float blockCacheUpperLimit = getBlockCacheHeapPercent(conf); - int bcul = (int)(blockCacheUpperLimit * CONVERT_TO_PERCENTAGE); - if (CONVERT_TO_PERCENTAGE - (gml + bcul) - < (int)(CONVERT_TO_PERCENTAGE * - HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD)) { + int bcul = (int) (blockCacheUpperLimit * CONVERT_TO_PERCENTAGE); + if (CONVERT_TO_PERCENTAGE - (gml + bcul) < (int) (CONVERT_TO_PERCENTAGE + * HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD)) { throw new RuntimeException("Current heap configuration for MemStore and BlockCache exceeds " + "the threshold required for successful cluster operation. " + "The combined value cannot exceed 0.8. Please check " @@ -109,8 +107,8 @@ public static void checkForClusterFreeHeapMemoryLimit(Configuration conf) { */ public static float getGlobalMemStoreHeapPercent(final Configuration c, final boolean logInvalid) { - float limit = c.getFloat(MEMSTORE_SIZE_KEY, - c.getFloat(MEMSTORE_SIZE_OLD_KEY, DEFAULT_MEMSTORE_SIZE)); + float limit = + c.getFloat(MEMSTORE_SIZE_KEY, c.getFloat(MEMSTORE_SIZE_OLD_KEY, DEFAULT_MEMSTORE_SIZE)); if (limit > 0.8f || limit <= 0.0f) { if (logInvalid) { LOG.warn("Setting global memstore limit to default of " + DEFAULT_MEMSTORE_SIZE @@ -204,7 +202,7 @@ public static long getOnheapGlobalMemStoreSize(Configuration conf) { public static float getBlockCacheHeapPercent(final Configuration conf) { // L1 block cache is always on heap float l1CachePercent = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, - HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); + HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); return l1CachePercent; } @@ -220,25 +218,25 @@ public static long getOnHeapCacheSize(final Configuration conf) { return -1; } if (cachePercentage > 1.0) { - throw new IllegalArgumentException(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + - " must be between 0.0 and 1.0, and not > 1.0"); + throw new IllegalArgumentException( + HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + " must be between 0.0 and 1.0, and not > 1.0"); } long max = -1L; final MemoryUsage usage = safeGetHeapMemoryUsage(); if (usage != null) { max = usage.getMax(); } - float onHeapCacheFixedSize = (float) conf - .getLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, - HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT) / max; + float onHeapCacheFixedSize = + (float) conf.getLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, + HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT) / max; // Calculate the amount of heap to give the heap. - return (onHeapCacheFixedSize > 0 && onHeapCacheFixedSize < cachePercentage) ? - (long) (max * onHeapCacheFixedSize) : - (long) (max * cachePercentage); + return (onHeapCacheFixedSize > 0 && onHeapCacheFixedSize < cachePercentage) + ? (long) (max * onHeapCacheFixedSize) + : (long) (max * cachePercentage); } /** - * @param conf used to read config for bucket cache size. + * @param conf used to read config for bucket cache size. * @return the number of bytes to use for bucket cache, negative if disabled. */ public static long getBucketCacheSize(final Configuration conf) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java index b5b79670d930..493593be7664 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AdaptiveLifoCoDelCallQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,22 +25,17 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.LongAdder; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** - * Adaptive LIFO blocking queue utilizing CoDel algorithm to prevent queue overloading. - * - * Implementing {@link BlockingQueue} interface to be compatible with {@link RpcExecutor}. - * - * Currently uses milliseconds internally, need to look into whether we should use - * nanoseconds for timeInterval and minDelay. - * + * Adaptive LIFO blocking queue utilizing CoDel algorithm to prevent queue overloading. Implementing + * {@link BlockingQueue} interface to be compatible with {@link RpcExecutor}. Currently uses + * milliseconds internally, need to look into whether we should use nanoseconds for timeInterval and + * minDelay. * @see Fail at Scale paper - * - * @see - * CoDel version for generic job queues in Wangle library + * @see CoDel + * version for generic job queues in Wangle library */ @InterfaceAudience.Private public class AdaptiveLifoCoDelCallQueue implements BlockingQueue { @@ -88,29 +83,27 @@ public AdaptiveLifoCoDelCallQueue(int capacity, int targetDelay, int interval, /** * Update tunables. - * * @param newCodelTargetDelay new CoDel target delay * @param newCodelInterval new CoDel interval * @param newLifoThreshold new Adaptive Lifo threshold */ public void updateTunables(int newCodelTargetDelay, int newCodelInterval, - double newLifoThreshold) { + double newLifoThreshold) { this.codelTargetDelay = newCodelTargetDelay; this.codelInterval = newCodelInterval; this.lifoThreshold = newLifoThreshold; } /** - * Behaves as {@link LinkedBlockingQueue#take()}, except it will silently - * skip all calls which it thinks should be dropped. - * + * Behaves as {@link LinkedBlockingQueue#take()}, except it will silently skip all calls which it + * thinks should be dropped. * @return the head of this queue * @throws InterruptedException if interrupted while waiting */ @Override public CallRunner take() throws InterruptedException { CallRunner cr; - while(true) { + while (true) { if (((double) queue.size() / this.maxCapacity) > lifoThreshold) { numLifoModeSwitches.increment(); cr = queue.takeLast(); @@ -130,7 +123,7 @@ public CallRunner take() throws InterruptedException { public CallRunner poll() { CallRunner cr; boolean switched = false; - while(true) { + while (true) { if (((double) queue.size() / this.maxCapacity) > lifoThreshold) { // Only count once per switch. if (!switched) { @@ -156,8 +149,8 @@ public CallRunner poll() { /** * @param callRunner to validate - * @return true if this call needs to be skipped based on call timestamp - * and internal queue state (deemed overloaded). + * @return true if this call needs to be skipped based on call timestamp and internal queue state + * (deemed overloaded). */ private boolean needToDrop(CallRunner callRunner) { long now = EnvironmentEdgeManager.currentTime(); @@ -167,9 +160,7 @@ private boolean needToDrop(CallRunner callRunner) { // Try and determine if we should reset // the delay time and determine overload - if (now > intervalTime && - !resetDelay.get() && - !resetDelay.getAndSet(true)) { + if (now > intervalTime && !resetDelay.get() && !resetDelay.getAndSet(true)) { intervalTime = now + codelInterval; isOverloaded.set(localMinDelay > codelTargetDelay); @@ -209,129 +200,128 @@ public String toString() { @Override public CallRunner poll(long timeout, TimeUnit unit) throws InterruptedException { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } - @Override public CallRunner peek() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean remove(Object o) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean contains(Object o) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public Object[] toArray() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public T[] toArray(T[] a) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public void clear() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public int drainTo(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public int drainTo(Collection c, int maxElements) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public Iterator iterator() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean add(CallRunner callRunner) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public CallRunner remove() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public CallRunner element() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean addAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean isEmpty() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean containsAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean removeAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean retainAll(Collection c) { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public int remainingCapacity() { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public void put(CallRunner callRunner) throws InterruptedException { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } @Override public boolean offer(CallRunner callRunner, long timeout, TimeUnit unit) throws InterruptedException { - throw new UnsupportedOperationException("This class doesn't support anything," - + " but take() and offer() methods"); + throw new UnsupportedOperationException( + "This class doesn't support anything," + " but take() and offer() methods"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java index a2d0169010eb..70ac3cc81232 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ // to figure out whether it is a meta region or not. @InterfaceAudience.Private public abstract class AnnotationReadingPriorityFunction> - implements PriorityFunction { + implements PriorityFunction { protected final Map annotatedQos; // We need to mock the regionserver instance for some unit tests (set via diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java index 915b82df4261..1c4cd43ddab3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,12 +20,10 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.GatheringByteChannel; - import org.apache.yetus.audience.InterfaceAudience; /** - * Chain of ByteBuffers. - * Used writing out an array of byte buffers. Writes in chunks. + * Chain of ByteBuffers. Used writing out an array of byte buffers. Writes in chunks. */ @InterfaceAudience.Private class BufferChain { @@ -43,15 +41,15 @@ class BufferChain { } /** - * Expensive. Makes a new buffer to hold a copy of what is in contained ByteBuffers. This - * call drains this instance; it cannot be used subsequent to the call. + * Expensive. Makes a new buffer to hold a copy of what is in contained ByteBuffers. This call + * drains this instance; it cannot be used subsequent to the call. * @return A new byte buffer with the content of all contained ByteBuffers. */ - byte [] getBytes() { + byte[] getBytes() { if (!hasRemaining()) throw new IllegalAccessError(); - byte [] bytes = new byte [this.remaining]; + byte[] bytes = new byte[this.remaining]; int offset = 0; - for (ByteBuffer bb: this.buffers) { + for (ByteBuffer bb : this.buffers) { int length = bb.remaining(); bb.get(bytes, offset, length); offset += length; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java index 19a75eae1101..f0163683a9cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallQueueInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +17,10 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.yetus.audience.InterfaceAudience; - import java.util.HashMap; import java.util.Map; import java.util.Set; - +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class CallQueueInfo { @@ -45,7 +43,8 @@ public Set getCalledMethodNames(String callQueueName) { public long getCallMethodCount(String callQueueName, String methodName) { long methodCount; - Map methodCountMap = callQueueMethodCountsSummary.getOrDefault(callQueueName, null); + Map methodCountMap = + callQueueMethodCountsSummary.getOrDefault(callQueueName, null); if (null != methodCountMap) { methodCount = methodCountMap.getOrDefault(methodName, 0L); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index 1f85346908ff..fb9500195646 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -35,19 +35,18 @@ import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** * The request processing logic, which is usually executed in thread pools provided by an - * {@link RpcScheduler}. Call {@link #run()} to actually execute the contained - * RpcServer.Call + * {@link RpcScheduler}. Call {@link #run()} to actually execute the contained RpcServer.Call */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class CallRunner { - private static final CallDroppedException CALL_DROPPED_EXCEPTION - = new CallDroppedException(); + private static final CallDroppedException CALL_DROPPED_EXCEPTION = new CallDroppedException(); private RpcCall call; private RpcServerInterface rpcServer; @@ -57,7 +56,7 @@ public class CallRunner { /** * On construction, adds the size of this call to the running count of outstanding call sizes. - * Presumption is that we are put on a queue while we wait on an executor to run us. During this + * Presumption is that we are put on a queue while we wait on an executor to run us. During this * time we occupy heap. */ // The constructor is shutdown so only RpcServer in this class can make one of these. @@ -118,8 +117,8 @@ public void run() { try (Scope ignored1 = ipcServerSpan.makeCurrent()) { if (!this.rpcServer.isStarted()) { InetSocketAddress address = rpcServer.getListenerAddress(); - throw new ServerNotRunningYetException("Server " + - (address != null ? address : "(channel closed)") + " is not running yet"); + throw new ServerNotRunningYetException( + "Server " + (address != null ? address : "(channel closed)") + " is not running yet"); } // make the call resultPair = this.rpcServer.call(call, this.status); @@ -141,7 +140,7 @@ public void run() { errorThrowable = e; error = StringUtils.stringifyException(e); if (e instanceof Error) { - throw (Error)e; + throw (Error) e; } } finally { RpcServer.CurCall.set(null); @@ -164,7 +163,7 @@ public void run() { } catch (OutOfMemoryError e) { TraceUtil.setError(span, e); if (this.rpcServer.getErrorHandler() != null - && this.rpcServer.getErrorHandler().checkOOME(e)) { + && this.rpcServer.getErrorHandler().checkOOME(e)) { RpcServer.LOG.info("{}: exiting on OutOfMemoryError", Thread.currentThread().getName()); // exception intentionally swallowed } else { @@ -173,9 +172,10 @@ public void run() { } } catch (ClosedChannelException cce) { InetSocketAddress address = rpcServer.getListenerAddress(); - RpcServer.LOG.warn("{}: caught a ClosedChannelException, " + - "this means that the server " + (address != null ? address : "(channel closed)") + - " was processing a request but the client went away. The error message was: {}", + RpcServer.LOG.warn( + "{}: caught a ClosedChannelException, " + "this means that the server " + + (address != null ? address : "(channel closed)") + + " was processing a request but the client went away. The error message was: {}", Thread.currentThread().getName(), cce.getMessage()); TraceUtil.setError(span, cce); } catch (Exception e) { @@ -211,15 +211,16 @@ public void drop() { // Set the response InetSocketAddress address = rpcServer.getListenerAddress(); call.setResponse(null, null, CALL_DROPPED_EXCEPTION, "Call dropped, server " - + (address != null ? address : "(channel closed)") + " is overloaded, please retry."); + + (address != null ? address : "(channel closed)") + " is overloaded, please retry."); TraceUtil.setError(span, CALL_DROPPED_EXCEPTION); call.sendResponseIfReady(); this.rpcServer.getMetrics().exception(CALL_DROPPED_EXCEPTION); } catch (ClosedChannelException cce) { InetSocketAddress address = rpcServer.getListenerAddress(); - RpcServer.LOG.warn("{}: caught a ClosedChannelException, " + - "this means that the server " + (address != null ? address : "(channel closed)") + - " was processing a request but the client went away. The error message was: {}", + RpcServer.LOG.warn( + "{}: caught a ClosedChannelException, " + "this means that the server " + + (address != null ? address : "(channel closed)") + + " was processing a request but the client went away. The error message was: {}", Thread.currentThread().getName(), cce.getMessage()); TraceUtil.setError(span, cce); } catch (Exception e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java index 9ca292751d4f..798c3bed959a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,4 +21,5 @@ @InterfaceAudience.Private @SuppressWarnings("serial") -public class EmptyServiceNameException extends FatalConnectionException {} +public class EmptyServiceNameException extends FatalConnectionException { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java index 9e6a0bb103a1..1353b8c67c90 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java @@ -26,10 +26,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Balanced queue executor with a fastpath. Because this is FIFO, it has no respect for - * ordering so a fast path skipping the queuing of Calls if an Handler is available, is possible. - * Just pass the Call direct to waiting Handler thread. Try to keep the hot Handlers bubbling - * rather than let them go cold and lose context. Idea taken from Apace Kudu (incubating). See + * Balanced queue executor with a fastpath. Because this is FIFO, it has no respect for ordering so + * a fast path skipping the queuing of Calls if an Handler is available, is possible. Just pass the + * Call direct to waiting Handler thread. Try to keep the hot Handlers bubbling rather than let them + * go cold and lose context. Idea taken from Apace Kudu (incubating). See * https://gerrit.cloudera.org/#/c/2938/7/src/kudu/rpc/service_queue.h */ @InterfaceAudience.Private @@ -59,18 +59,18 @@ protected RpcHandler getHandler(final String name, final double handlerFailureTh final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount, final Abortable abortable) { return new FastPathRpcHandler(name, handlerFailureThreshhold, handlerCount, q, - activeHandlerCount, failedHandlerCount, abortable, fastPathHandlerStack); + activeHandlerCount, failedHandlerCount, abortable, fastPathHandlerStack); } @Override public boolean dispatch(CallRunner callTask) { - //FastPathHandlers don't check queue limits, so if we're completely shut down - //we have to prevent ourselves from using the handler in the first place - if (currentQueueLimit == 0){ + // FastPathHandlers don't check queue limits, so if we're completely shut down + // we have to prevent ourselves from using the handler in the first place + if (currentQueueLimit == 0) { return false; } FastPathRpcHandler handler = popReadyHandler(); - return handler != null? handler.loadCallRunner(callTask): super.dispatch(callTask); + return handler != null ? handler.loadCallRunner(callTask) : super.dispatch(callTask); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java index b07f44900fbb..e9f828d5c136 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRWQueueRpcExecutor.java @@ -31,7 +31,7 @@ * RPC Executor that extends {@link RWQueueRpcExecutor} with fast-path feature, used in * {@link FastPathBalancedQueueRpcExecutor}. */ -@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class FastPathRWQueueRpcExecutor extends RWQueueRpcExecutor { @@ -49,10 +49,10 @@ protected RpcHandler getHandler(final String name, final double handlerFailureTh final int handlerCount, final BlockingQueue q, final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount, final Abortable abortable) { - Deque handlerStack = name.contains("read") ? readHandlerStack : - name.contains("write") ? writeHandlerStack : scanHandlerStack; + Deque handlerStack = name.contains("read") ? readHandlerStack + : name.contains("write") ? writeHandlerStack : scanHandlerStack; return new FastPathRpcHandler(name, handlerFailureThreshhold, handlerCount, q, - activeHandlerCount, failedHandlerCount, abortable, handlerStack); + activeHandlerCount, failedHandlerCount, abortable, handlerStack); } @Override @@ -60,9 +60,9 @@ public boolean dispatch(final CallRunner callTask) { RpcCall call = callTask.getRpcCall(); boolean shouldDispatchToWriteQueue = isWriteRequest(call.getHeader(), call.getParam()); boolean shouldDispatchToScanQueue = shouldDispatchToScanQueue(callTask); - FastPathRpcHandler handler = shouldDispatchToWriteQueue ? writeHandlerStack.poll() : - shouldDispatchToScanQueue ? scanHandlerStack.poll() : readHandlerStack.poll(); - return handler != null ? handler.loadCallRunner(callTask) : - dispatchTo(shouldDispatchToWriteQueue, shouldDispatchToScanQueue, callTask); + FastPathRpcHandler handler = shouldDispatchToWriteQueue ? writeHandlerStack.poll() + : shouldDispatchToScanQueue ? scanHandlerStack.poll() : readHandlerStack.poll(); + return handler != null ? handler.loadCallRunner(callTask) + : dispatchTo(shouldDispatchToWriteQueue, shouldDispatchToScanQueue, callTask); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java index 3064c7aa324d..faff994d9639 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathRpcHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public class FastPathRpcHandler extends RpcHandler { AtomicInteger failedHandlerCount, final Abortable abortable, final Deque fastPathHandlerStack) { super(name, handlerFailureThreshhold, handlerCount, q, activeHandlerCount, failedHandlerCount, - abortable); + abortable); this.fastPathHandlerStack = fastPathHandlerStack; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java index cfd085ebc771..7a7cfb8efffc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java @@ -28,13 +28,13 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.util.internal.StringUtil; /** - * A very simple {@code }RpcScheduler} that serves incoming requests in order. - * - * This can be used for HMaster, where no prioritization is needed. + * A very simple {@code }RpcScheduler} that serves incoming requests in order. This can be used for + * HMaster, where no prioritization is needed. */ @InterfaceAudience.Private public class FifoRpcScheduler extends RpcScheduler { @@ -47,7 +47,7 @@ public class FifoRpcScheduler extends RpcScheduler { public FifoRpcScheduler(Configuration conf, int handlerCount) { this.handlerCount = handlerCount; this.maxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, - handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); } @Override @@ -60,10 +60,10 @@ public void start() { LOG.info("Using {} as user call queue; handlerCount={}; maxQueueLength={}", this.getClass().getSimpleName(), handlerCount, maxQueueLength); this.executor = new ThreadPoolExecutor(handlerCount, handlerCount, 60, TimeUnit.SECONDS, - new ArrayBlockingQueue<>(maxQueueLength), - new ThreadFactoryBuilder().setNameFormat("FifoRpcScheduler.handler-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), - new ThreadPoolExecutor.CallerRunsPolicy()); + new ArrayBlockingQueue<>(maxQueueLength), + new ThreadFactoryBuilder().setNameFormat("FifoRpcScheduler.handler-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); } @Override @@ -103,7 +103,7 @@ protected boolean executeRpcCall(final ThreadPoolExecutor executor, final Atomic return false; } - executor.execute(new FifoCallRunner(task){ + executor.execute(new FifoCallRunner(task) { @Override public void run() { task.setStatus(RpcServer.getStatus()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java index f8ba186fb3d3..3d1a7aa8ade7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCErrorHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; @@ -31,5 +29,5 @@ public interface HBaseRPCErrorHandler { * @param e the throwable * @return if the server should be shut down */ - boolean checkOOME(final Throwable e) ; + boolean checkOOME(final Throwable e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java index a52aa7e759a9..e42f31fb44ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java @@ -28,6 +28,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -70,15 +71,15 @@ public void start() { this.getClass().getSimpleName(), handlerCount, maxQueueLength, rsReportHandlerCount, rsRsreportMaxQueueLength); this.executor = new ThreadPoolExecutor(handlerCount, handlerCount, 60, TimeUnit.SECONDS, - new ArrayBlockingQueue<>(maxQueueLength), - new ThreadFactoryBuilder().setNameFormat("MasterFifoRpcScheduler.call.handler-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), - new ThreadPoolExecutor.CallerRunsPolicy()); + new ArrayBlockingQueue<>(maxQueueLength), + new ThreadFactoryBuilder().setNameFormat("MasterFifoRpcScheduler.call.handler-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); this.rsReportExecutor = new ThreadPoolExecutor(rsReportHandlerCount, rsReportHandlerCount, 60, - TimeUnit.SECONDS, new ArrayBlockingQueue<>(rsRsreportMaxQueueLength), - new ThreadFactoryBuilder().setNameFormat("MasterFifoRpcScheduler.RSReport.handler-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), - new ThreadPoolExecutor.CallerRunsPolicy()); + TimeUnit.SECONDS, new ArrayBlockingQueue<>(rsRsreportMaxQueueLength), + new ThreadFactoryBuilder().setNameFormat("MasterFifoRpcScheduler.RSReport.handler-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java index c9e4270d918c..8629fa191554 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetaRWQueueRpcExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.conf.Configuration; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java index 2e78ef374414..c34446b2f4cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,24 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.CallDroppedException; import org.apache.hadoop.hbase.CallQueueTooBigException; +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.MultiActionResultTooLarge; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.UnknownScannerException; -import org.apache.hadoop.hbase.exceptions.RequestTooBigException; -import org.apache.hadoop.hbase.quotas.QuotaExceededException; -import org.apache.hadoop.hbase.quotas.RpcThrottlingException; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; import org.apache.hadoop.hbase.exceptions.RegionMovedException; +import org.apache.hadoop.hbase.exceptions.RequestTooBigException; import org.apache.hadoop.hbase.exceptions.ScannerResetException; +import org.apache.hadoop.hbase.quotas.QuotaExceededException; +import org.apache.hadoop.hbase.quotas.RpcThrottlingException; +import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,7 +45,7 @@ public class MetricsHBaseServer { public MetricsHBaseServer(String serverName, MetricsHBaseServerWrapper wrapper) { serverWrapper = wrapper; source = CompatibilitySingletonFactory.getInstance(MetricsHBaseServerSourceFactory.class) - .create(serverName, wrapper); + .create(serverName, wrapper); } void authorizationSuccess() { @@ -78,9 +76,13 @@ void receivedBytes(int count) { source.receivedBytes(count); } - void sentResponse(long count) { source.sentResponse(count); } + void sentResponse(long count) { + source.sentResponse(count); + } - void receivedRequest(long count) { source.receivedRequest(count); } + void receivedRequest(long count) { + source.receivedRequest(count); + } void dequeuedCall(int qTime) { source.dequeuedCall(qTime); @@ -98,12 +100,9 @@ public void exception(Throwable throwable) { source.exception(); /** - * Keep some metrics for commonly seen exceptions - * - * Try and put the most common types first. - * Place child types before the parent type that they extend. - * - * If this gets much larger we might have to go to a hashmap + * Keep some metrics for commonly seen exceptions Try and put the most common types first. Place + * child types before the parent type that they extend. If this gets much larger we might have + * to go to a hashmap */ if (throwable != null) { if (throwable instanceof OutOfOrderScannerNextException) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java index 7df63586ab8c..0b00bba04fb7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.util.DirectMemoryUtils; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java index 01cf9b59d06a..4d056cc94243 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -33,10 +33,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; - /** * Decoder for extracting frame - * * @since 2.0.0 */ @InterfaceAudience.Private @@ -59,8 +57,7 @@ void setConnection(NettyServerRpcConnection connection) { } @Override - protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) - throws Exception { + protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { if (requestTooBig) { handleTooBigRequest(in); return; @@ -78,10 +75,9 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) if (frameLength > maxFrameLength) { requestTooBig = true; - requestTooBigMessage = - "RPC data length of " + frameLength + " received from " + connection.getHostAddress() - + " is greater than max allowed " + connection.rpcServer.maxRequestSize + ". Set \"" - + SimpleRpcServer.MAX_REQUEST_SIZE + requestTooBigMessage = "RPC data length of " + frameLength + " received from " + + connection.getHostAddress() + " is greater than max allowed " + + connection.rpcServer.maxRequestSize + ". Set \"" + SimpleRpcServer.MAX_REQUEST_SIZE + "\" on server to override this limit (not recommended)"; NettyRpcServer.LOG.warn(requestTooBigMessage); @@ -174,10 +170,8 @@ private RPCProtos.RequestHeader getHeader(ByteBuf in, int headerSize) throws IOE } /** - * Reads variable length 32bit int from buffer - * This method is from ProtobufVarint32FrameDecoder in Netty and modified a little bit - * to pass the cyeckstyle rule. - * + * Reads variable length 32bit int from buffer This method is from ProtobufVarint32FrameDecoder in + * Netty and modified a little bit to pass the cyeckstyle rule. * @return decoded int if buffers readerIndex has been forwarded else nonsense value */ private static int readRawVarint32(ByteBuf buffer) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index a3ee71fc6fb2..d2f0b99e85ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -53,16 +53,16 @@ * An RPC server with Netty4 implementation. * @since 2.0.0 */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.CONFIG }) public class NettyRpcServer extends RpcServer { public static final Logger LOG = LoggerFactory.getLogger(NettyRpcServer.class); /** - * Name of property to change netty rpc server eventloop thread count. Default is 0. - * Tests may set this down from unlimited. + * Name of property to change netty rpc server eventloop thread count. Default is 0. Tests may set + * this down from unlimited. */ public static final String HBASE_NETTY_EVENTLOOP_RPCSERVER_THREADCOUNT_KEY = - "hbase.netty.eventloop.rpcserver.thread.count"; + "hbase.netty.eventloop.rpcserver.thread.count"; private static final int EVENTLOOP_THREADCOUNT_DEFAULT = 0; private final InetSocketAddress bindAddress; @@ -70,7 +70,7 @@ public class NettyRpcServer extends RpcServer { private final CountDownLatch closed = new CountDownLatch(1); private final Channel serverChannel; private final ChannelGroup allChannels = - new DefaultChannelGroup(GlobalEventExecutor.INSTANCE, true); + new DefaultChannelGroup(GlobalEventExecutor.INSTANCE, true); public NettyRpcServer(Server server, String name, List services, InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler, @@ -84,11 +84,11 @@ public NettyRpcServer(Server server, String name, List} */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java index b294db3aa453..0731f993e01b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,22 +17,24 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.hadoop.hbase.security.User; /** * Function to figure priority of incoming request. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public interface PriorityFunction { /** - * Returns the 'priority type' of the specified request. - * The returned value is mainly used to select the dispatch queue. + * Returns the 'priority type' of the specified request. The returned value is mainly used to + * select the dispatch queue. * @param header * @param param * @param user @@ -41,8 +43,8 @@ public interface PriorityFunction { int getPriority(RequestHeader header, Message param, User user); /** - * Returns the deadline of the specified request. - * The returned value is used to sort the dispatch queue. + * Returns the deadline of the specified request. The returned value is used to sort the dispatch + * queue. * @param header * @param param * @return Deadline of this request. 0 now, otherwise msec of 'delay' diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java index ca1546cd83ae..dc496de6b737 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java index d1141d093edb..a13f5d858235 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QueueBalancer.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java index a5ed6fe0eae7..984681f98e30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RPCTInfoGetter.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos; /** @@ -30,21 +31,18 @@ */ @InterfaceAudience.Private final class RPCTInfoGetter implements TextMapGetter { - RPCTInfoGetter() { } + RPCTInfoGetter() { + } @Override public Iterable keys(TracingProtos.RPCTInfo carrier) { - return Optional.ofNullable(carrier) - .map(TracingProtos.RPCTInfo::getHeadersMap) - .map(Map::keySet) - .orElse(Collections.emptySet()); + return Optional.ofNullable(carrier).map(TracingProtos.RPCTInfo::getHeadersMap).map(Map::keySet) + .orElse(Collections.emptySet()); } @Override public String get(TracingProtos.RPCTInfo carrier, String key) { - return Optional.ofNullable(carrier) - .map(TracingProtos.RPCTInfo::getHeadersMap) - .map(map -> map.get(key)) - .orElse(null); + return Optional.ofNullable(carrier).map(TracingProtos.RPCTInfo::getHeadersMap) + .map(map -> map.get(key)).orElse(null); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java index 835966847a32..8b8289e567b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.Queue; @@ -30,7 +28,9 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest; @@ -40,11 +40,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; /** - * RPC Executor that uses different queues for reads and writes. - * With the options to use different queues/executors for gets and scans. - * Each handler has its own queue and there is no stealing. + * RPC Executor that uses different queues for reads and writes. With the options to use different + * queues/executors for gets and scans. Each handler has its own queue and there is no stealing. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class RWQueueRpcExecutor extends RpcExecutor { private static final Logger LOG = LoggerFactory.getLogger(RWQueueRpcExecutor.class); @@ -81,8 +80,8 @@ public RWQueueRpcExecutor(final String name, final int handlerCount, final int m int readQueues = calcNumReaders(this.numCallQueues, callqReadShare); int readHandlers = Math.max(readQueues, calcNumReaders(handlerCount, callqReadShare)); - int scanQueues = Math.max(0, (int)Math.floor(readQueues * callqScanShare)); - int scanHandlers = Math.max(0, (int)Math.floor(readHandlers * callqScanShare)); + int scanQueues = Math.max(0, (int) Math.floor(readQueues * callqScanShare)); + int scanHandlers = Math.max(0, (int) Math.floor(readHandlers * callqScanShare)); if ((readQueues - scanQueues) > 0) { readQueues -= scanQueues; @@ -102,15 +101,17 @@ public RWQueueRpcExecutor(final String name, final int handlerCount, final int m initializeQueues(numScanQueues); this.writeBalancer = getBalancer(name, conf, queues.subList(0, numWriteQueues)); - this.readBalancer = getBalancer(name, conf, queues.subList(numWriteQueues, numWriteQueues + numReadQueues)); - this.scanBalancer = numScanQueues > 0 ? - getBalancer(name, conf, queues.subList(numWriteQueues + numReadQueues, - numWriteQueues + numReadQueues + numScanQueues)) : - null; + this.readBalancer = + getBalancer(name, conf, queues.subList(numWriteQueues, numWriteQueues + numReadQueues)); + this.scanBalancer = numScanQueues > 0 + ? getBalancer(name, conf, + queues.subList(numWriteQueues + numReadQueues, + numWriteQueues + numReadQueues + numScanQueues)) + : null; LOG.info(getName() + " writeQueues=" + numWriteQueues + " writeHandlers=" + writeHandlersCount - + " readQueues=" + numReadQueues + " readHandlers=" + readHandlersCount + " scanQueues=" - + numScanQueues + " scanHandlers=" + scanHandlersCount); + + " readQueues=" + numReadQueues + " readHandlers=" + readHandlersCount + " scanQueues=" + + numScanQueues + " scanHandlers=" + scanHandlersCount); } @Override @@ -139,7 +140,7 @@ public boolean dispatch(final CallRunner callTask) { } protected boolean dispatchTo(boolean toWriteQueue, boolean toScanQueue, - final CallRunner callTask) { + final CallRunner callTask) { int queueIndex; if (toWriteQueue) { queueIndex = writeBalancer.getNextQueue(callTask); @@ -176,8 +177,8 @@ public int getReadQueueLength() { @Override public int getScanQueueLength() { int length = 0; - for (int i = numWriteQueues + numReadQueues; - i < (numWriteQueues + numReadQueues + numScanQueues); i++) { + for (int i = + numWriteQueues + numReadQueues; i < (numWriteQueues + numReadQueues + numScanQueues); i++) { length += queues.get(i).size(); } return length; @@ -207,9 +208,9 @@ public int getActiveScanHandlerCount() { protected boolean isWriteRequest(final RequestHeader header, final Message param) { // TODO: Is there a better way to do this? if (param instanceof MultiRequest) { - MultiRequest multi = (MultiRequest)param; + MultiRequest multi = (MultiRequest) param; for (RegionAction regionAction : multi.getRegionActionList()) { - for (Action action: regionAction.getActionList()) { + for (Action action : regionAction.getActionList()) { if (action.hasMutation()) { return true; } @@ -267,16 +268,16 @@ protected float getScanShare(final Configuration conf) { } /* - * Calculate the number of writers based on the "total count" and the read share. - * You'll get at least one writer. + * Calculate the number of writers based on the "total count" and the read share. You'll get at + * least one writer. */ private static int calcNumWriters(final int count, final float readShare) { - return Math.max(1, count - Math.max(1, (int)Math.round(count * readShare))); + return Math.max(1, count - Math.max(1, (int) Math.round(count * readShare))); } /* - * Calculate the number of readers based on the "total count" and the read share. - * You'll get at least one reader. + * Calculate the number of readers based on the "total count" and the read share. You'll get at + * least one reader. */ private static int calcNumReaders(final int count, final float readShare) { return count - calcNumWriters(count, readShare); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java index 528affc48049..4e0042e428fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RandomQueueBalancer.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.List; @@ -35,7 +33,8 @@ public class RandomQueueBalancer implements QueueBalancer { private final int queueSize; private final List> queues; - public RandomQueueBalancer(Configuration conf, String executorName, List> queues) { + public RandomQueueBalancer(Configuration conf, String executorName, + List> queues) { this.queueSize = queues.size(); this.queues = queues; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java index 7571ac1539c2..a8d1baa68217 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,25 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; - import java.io.IOException; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** * Interface of all necessary to carry out a RPC method invocation on the server. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public interface RpcCall extends RpcCallContext { @@ -84,15 +82,15 @@ public interface RpcCall extends RpcCallContext { int getPriority(); /** - * Return the deadline of this call. If we can not complete this call in time, - * we can throw a TimeoutIOException and RPCServer will drop it. + * Return the deadline of this call. If we can not complete this call in time, we can throw a + * TimeoutIOException and RPCServer will drop it. * @return The system timestamp of deadline. */ long getDeadline(); /** - * Used to calculate the request call queue size. - * If the total request call size exceeds a limit, the call will be rejected. + * Used to calculate the request call queue size. If the total request call size exceeds a limit, + * the call will be rejected. * @return The raw size of this call. */ long getSize(); @@ -117,8 +115,8 @@ public interface RpcCall extends RpcCallContext { void setResponse(Message param, CellScanner cells, Throwable errorThrowable, String error); /** - * Send the response of this RPC call. - * Implementation provides the underlying facility (connection, etc) to send. + * Send the response of this RPC call. Implementation provides the underlying facility + * (connection, etc) to send. * @throws IOException */ void sendResponseIfReady() throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index 6a4d3a29a52d..5ba3ebb5cf11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +19,10 @@ import java.net.InetAddress; import java.util.Optional; - +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; -import org.apache.hadoop.hbase.security.User; /** * Interface of all necessary to carry out a RPC service invocation on the server. This interface @@ -31,19 +31,19 @@ @InterfaceAudience.Private public interface RpcCallContext { /** - * Check if the caller who made this IPC call has disconnected. - * If called from outside the context of IPC, this does nothing. - * @return < 0 if the caller is still connected. The time in ms - * since the disconnection otherwise + * Check if the caller who made this IPC call has disconnected. If called from outside the context + * of IPC, this does nothing. + * @return < 0 if the caller is still connected. The time in ms since the disconnection + * otherwise */ long disconnectSince(); /** * If the client connected and specified a codec to use, then we will use this codec making - * cellblocks to return. If the client did not specify a codec, we assume it does not support - * cellblocks and will return all content protobuf'd (though it makes our serving slower). - * We need to ask this question per call because a server could be hosting both clients that - * support cellblocks while fielding requests from clients that do not. + * cellblocks to return. If the client did not specify a codec, we assume it does not support + * cellblocks and will return all content protobuf'd (though it makes our serving slower). We need + * to ask this question per call because a server could be hosting both clients that support + * cellblocks while fielding requests from clients that do not. * @return True if the client supports cellblocks, else return all content in pb */ boolean isClientCellBlockSupported(); @@ -75,7 +75,6 @@ default Optional getRequestUserName() { /** * Sets a callback which has to be executed at the end of this RPC call. Such a callback is an * optional one for any Rpc call. - * * @param callback */ void setCallBack(RpcCallback callback); @@ -83,18 +82,15 @@ default Optional getRequestUserName() { boolean isRetryImmediatelySupported(); /** - * The size of response cells that have been accumulated so far. - * This along with the corresponding increment call is used to ensure that multi's or - * scans dont get too excessively large + * The size of response cells that have been accumulated so far. This along with the corresponding + * increment call is used to ensure that multi's or scans dont get too excessively large */ long getResponseCellSize(); /** - * Add on the given amount to the retained cell size. - * - * This is not thread safe and not synchronized at all. If this is used by more than one thread - * then everything will break. Since this is called for every row synchronization would be too - * onerous. + * Add on the given amount to the retained cell size. This is not thread safe and not synchronized + * at all. If this is used by more than one thread then everything will break. Since this is + * called for every row synchronization would be too onerous. */ void incrementResponseCellSize(long cellSize); @@ -103,5 +99,6 @@ default Optional getRequestUserName() { void incrementResponseBlockSize(long blockSize); long getResponseExceptionSize(); + void incrementResponseExceptionSize(long exceptionSize); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java index f0074b54437c..a8bf2d762255 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallback.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +18,10 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** * Denotes a callback action that has to be executed at the end of an Rpc Call. - * * @see RpcCallContext#setCallBack(RpcCallback) */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java index 40ed856be427..1f87767a0524 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.ArrayList; @@ -43,6 +42,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; @@ -58,11 +58,11 @@ public abstract class RpcExecutor { protected static final int DEFAULT_CALL_QUEUE_SIZE_HARD_LIMIT = 250; public static final String CALL_QUEUE_HANDLER_FACTOR_CONF_KEY = - "hbase.ipc.server.callqueue.handler.factor"; + "hbase.ipc.server.callqueue.handler.factor"; /** max delay in msec used to bound the de-prioritized requests */ public static final String QUEUE_MAX_CALL_DELAY_CONF_KEY = - "hbase.ipc.server.queue.max.call.delay"; + "hbase.ipc.server.queue.max.call.delay"; /** * The default, 'fifo', has the least friction but is dumb. If set to 'deadline', uses a priority @@ -77,26 +77,25 @@ public abstract class RpcExecutor { public static final String CALL_QUEUE_TYPE_CONF_DEFAULT = CALL_QUEUE_TYPE_FIFO_CONF_VALUE; public static final String CALL_QUEUE_QUEUE_BALANCER_CLASS = - "hbase.ipc.server.callqueue.balancer.class"; + "hbase.ipc.server.callqueue.balancer.class"; public static final Class CALL_QUEUE_QUEUE_BALANCER_CLASS_DEFAULT = RandomQueueBalancer.class; - // These 3 are only used by Codel executor public static final String CALL_QUEUE_CODEL_TARGET_DELAY = - "hbase.ipc.server.callqueue.codel.target.delay"; + "hbase.ipc.server.callqueue.codel.target.delay"; public static final String CALL_QUEUE_CODEL_INTERVAL = - "hbase.ipc.server.callqueue.codel.interval"; + "hbase.ipc.server.callqueue.codel.interval"; public static final String CALL_QUEUE_CODEL_LIFO_THRESHOLD = - "hbase.ipc.server.callqueue.codel.lifo.threshold"; + "hbase.ipc.server.callqueue.codel.lifo.threshold"; public static final int CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY = 100; public static final int CALL_QUEUE_CODEL_DEFAULT_INTERVAL = 100; public static final double CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD = 0.8; public static final String PLUGGABLE_CALL_QUEUE_CLASS_NAME = - "hbase.ipc.server.callqueue.pluggable.queue.class.name"; + "hbase.ipc.server.callqueue.pluggable.queue.class.name"; public static final String PLUGGABLE_CALL_QUEUE_WITH_FAST_PATH_ENABLED = - "hbase.ipc.server.callqueue.pluggable.queue.fast.path.enabled"; + "hbase.ipc.server.callqueue.pluggable.queue.fast.path.enabled"; private final LongAdder numGeneralCallsDropped = new LongAdder(); private final LongAdder numLifoModeSwitches = new LongAdder(); @@ -120,8 +119,8 @@ public abstract class RpcExecutor { public RpcExecutor(final String name, final int handlerCount, final int maxQueueLength, final PriorityFunction priority, final Configuration conf, final Abortable abortable) { - this(name, handlerCount, conf.get(CALL_QUEUE_TYPE_CONF_KEY, - CALL_QUEUE_TYPE_CONF_DEFAULT), maxQueueLength, priority, conf, abortable); + this(name, handlerCount, conf.get(CALL_QUEUE_TYPE_CONF_KEY, CALL_QUEUE_TYPE_CONF_DEFAULT), + maxQueueLength, priority, conf, abortable); } public RpcExecutor(final String name, final int handlerCount, final String callQueueType, @@ -132,10 +131,10 @@ public RpcExecutor(final String name, final int handlerCount, final String callQ this.abortable = abortable; float callQueuesHandlersFactor = this.conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.1f); - if (Float.compare(callQueuesHandlersFactor, 1.0f) > 0 || - Float.compare(0.0f, callQueuesHandlersFactor) > 0) { - LOG.warn(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY + - " is *ILLEGAL*, it should be in range [0.0, 1.0]"); + if (Float.compare(callQueuesHandlersFactor, 1.0f) > 0 + || Float.compare(0.0f, callQueuesHandlersFactor) > 0) { + LOG.warn( + CALL_QUEUE_HANDLER_FACTOR_CONF_KEY + " is *ILLEGAL*, it should be in range [0.0, 1.0]"); // For callQueuesHandlersFactor > 1.0, we just set it 1.0f. if (Float.compare(callQueuesHandlersFactor, 1.0f) > 0) { LOG.warn("Set " + CALL_QUEUE_HANDLER_FACTOR_CONF_KEY + " 1.0f"); @@ -154,26 +153,26 @@ public RpcExecutor(final String name, final int handlerCount, final String callQ if (isDeadlineQueueType(callQueueType)) { this.name += ".Deadline"; - this.queueInitArgs = new Object[] { maxQueueLength, - new CallPriorityComparator(conf, priority) }; + this.queueInitArgs = + new Object[] { maxQueueLength, new CallPriorityComparator(conf, priority) }; this.queueClass = BoundedPriorityBlockingQueue.class; } else if (isCodelQueueType(callQueueType)) { this.name += ".Codel"; - int codelTargetDelay = conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, - CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); + int codelTargetDelay = + conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); int codelInterval = conf.getInt(CALL_QUEUE_CODEL_INTERVAL, CALL_QUEUE_CODEL_DEFAULT_INTERVAL); - double codelLifoThreshold = conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, - CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); + double codelLifoThreshold = + conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); this.queueInitArgs = new Object[] { maxQueueLength, codelTargetDelay, codelInterval, - codelLifoThreshold, numGeneralCallsDropped, numLifoModeSwitches }; + codelLifoThreshold, numGeneralCallsDropped, numLifoModeSwitches }; this.queueClass = AdaptiveLifoCoDelCallQueue.class; } else if (isPluggableQueueType(callQueueType)) { Optional>> pluggableQueueClass = - getPluggableQueueClass(); + getPluggableQueueClass(); if (!pluggableQueueClass.isPresent()) { - throw new PluggableRpcQueueNotFound("Pluggable call queue failed to load and selected call" - + " queue type required"); + throw new PluggableRpcQueueNotFound( + "Pluggable call queue failed to load and selected call" + " queue type required"); } else { this.queueInitArgs = new Object[] { maxQueueLength, priority, conf }; this.queueClass = pluggableQueueClass.get(); @@ -184,9 +183,10 @@ public RpcExecutor(final String name, final int handlerCount, final String callQ this.queueClass = LinkedBlockingQueue.class; } - LOG.info("Instantiated {} with queueClass={}; " + - "numCallQueues={}, maxQueueLength={}, handlerCount={}", - this.name, this.queueClass, this.numCallQueues, maxQueueLength, this.handlerCount); + LOG.info( + "Instantiated {} with queueClass={}; " + + "numCallQueues={}, maxQueueLength={}, handlerCount={}", + this.name, this.queueClass, this.numCallQueues, maxQueueLength, this.handlerCount); } protected int computeNumCallQueues(final int handlerCount, final float callQueuesHandlersFactor) { @@ -197,35 +197,27 @@ protected int computeNumCallQueues(final int handlerCount, final float callQueue * Return the {@link Descriptors.MethodDescriptor#getName()} from {@code callRunner} or "Unknown". */ private static String getMethodName(final CallRunner callRunner) { - return Optional.ofNullable(callRunner) - .map(CallRunner::getRpcCall) - .map(RpcCall::getMethod) - .map(Descriptors.MethodDescriptor::getName) - .orElse("Unknown"); + return Optional.ofNullable(callRunner).map(CallRunner::getRpcCall).map(RpcCall::getMethod) + .map(Descriptors.MethodDescriptor::getName).orElse("Unknown"); } /** * Return the {@link RpcCall#getSize()} from {@code callRunner} or 0L. */ private static long getRpcCallSize(final CallRunner callRunner) { - return Optional.ofNullable(callRunner) - .map(CallRunner::getRpcCall) - .map(RpcCall::getSize) - .orElse(0L); + return Optional.ofNullable(callRunner).map(CallRunner::getRpcCall).map(RpcCall::getSize) + .orElse(0L); } public Map getCallQueueCountsSummary() { - return queues.stream() - .flatMap(Collection::stream) - .map(RpcExecutor::getMethodName) - .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); + return queues.stream().flatMap(Collection::stream).map(RpcExecutor::getMethodName) + .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); } public Map getCallQueueSizeSummary() { - return queues.stream() - .flatMap(Collection::stream) - .map(callRunner -> new Pair<>(getMethodName(callRunner), getRpcCallSize(callRunner))) - .collect(Collectors.groupingBy(Pair::getFirst, Collectors.summingLong(Pair::getSecond))); + return queues.stream().flatMap(Collection::stream) + .map(callRunner -> new Pair<>(getMethodName(callRunner), getRpcCallSize(callRunner))) + .collect(Collectors.groupingBy(Pair::getFirst, Collectors.summingLong(Pair::getSecond))); } protected void initializeQueues(final int numQueues) { @@ -270,7 +262,7 @@ protected RpcHandler getHandler(final String name, final double handlerFailureTh final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount, final Abortable abortable) { return new RpcHandler(name, handlerFailureThreshhold, handlerCount, q, activeHandlerCount, - failedHandlerCount, abortable); + failedHandlerCount, abortable); } /** @@ -280,9 +272,9 @@ protected void startHandlers(final String nameSuffix, final int numHandlers, final List> callQueues, final int qindex, final int qsize, final int port, final AtomicInteger activeHandlerCount) { final String threadPrefix = name + Strings.nullToEmpty(nameSuffix); - double handlerFailureThreshhold = conf == null ? 1.0 : conf.getDouble( - HConstants.REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT, - HConstants.DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT); + double handlerFailureThreshhold = conf == null ? 1.0 + : conf.getDouble(HConstants.REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT, + HConstants.DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT); for (int i = 0; i < numHandlers; i++) { final int index = qindex + (i % qsize); String name = "RpcServer." + threadPrefix + ".handler=" + handlers.size() + ",queue=" + index @@ -293,7 +285,7 @@ protected void startHandlers(final String nameSuffix, final int numHandlers, handlers.add(handler); } LOG.debug("Started handlerCount={} with threadPrefix={}, numCallQueues={}, port={}", - handlers.size(), threadPrefix, qsize, port); + handlers.size(), threadPrefix, qsize, port); } /** @@ -301,17 +293,14 @@ protected void startHandlers(final String nameSuffix, final int numHandlers, */ private static final QueueBalancer ONE_QUEUE = val -> 0; - public static QueueBalancer getBalancer( - final String executorName, - final Configuration conf, - final List> queues - ) { + public static QueueBalancer getBalancer(final String executorName, final Configuration conf, + final List> queues) { Preconditions.checkArgument(queues.size() > 0, "Queue size is <= 0, must be at least 1"); if (queues.size() == 1) { return ONE_QUEUE; } else { - Class balancerClass = conf.getClass( - CALL_QUEUE_QUEUE_BALANCER_CLASS, CALL_QUEUE_QUEUE_BALANCER_CLASS_DEFAULT); + Class balancerClass = + conf.getClass(CALL_QUEUE_QUEUE_BALANCER_CLASS, CALL_QUEUE_QUEUE_BALANCER_CLASS_DEFAULT); return (QueueBalancer) ReflectionUtils.newInstance(balancerClass, conf, executorName, queues); } } @@ -362,16 +351,16 @@ public static boolean isPluggableQueueType(String callQueueType) { } public static boolean isPluggableQueueWithFastPath(String callQueueType, Configuration conf) { - return isPluggableQueueType(callQueueType) && - conf.getBoolean(PLUGGABLE_CALL_QUEUE_WITH_FAST_PATH_ENABLED, false); + return isPluggableQueueType(callQueueType) + && conf.getBoolean(PLUGGABLE_CALL_QUEUE_WITH_FAST_PATH_ENABLED, false); } private Optional>> getPluggableQueueClass() { String queueClassName = conf.get(PLUGGABLE_CALL_QUEUE_CLASS_NAME); if (queueClassName == null) { - LOG.error("Pluggable queue class config at " + PLUGGABLE_CALL_QUEUE_CLASS_NAME + - " was not found"); + LOG.error( + "Pluggable queue class config at " + PLUGGABLE_CALL_QUEUE_CLASS_NAME + " was not found"); return Optional.empty(); } @@ -381,8 +370,8 @@ private Optional>> getPluggableQueueCl if (BlockingQueue.class.isAssignableFrom(clazz)) { return Optional.of((Class>) clazz); } else { - LOG.error("Pluggable Queue class " + queueClassName + - " does not extend BlockingQueue"); + LOG.error( + "Pluggable Queue class " + queueClassName + " does not extend BlockingQueue"); return Optional.empty(); } } catch (ClassNotFoundException exception) { @@ -418,7 +407,7 @@ public int getActiveScanHandlerCount() { /** Returns the length of the pending queue */ public int getQueueLength() { int length = 0; - for (final BlockingQueue queue: queues) { + for (final BlockingQueue queue : queues) { length += queue.size(); } return length; @@ -459,18 +448,18 @@ public void resizeQueues(Configuration conf) { public void onConfigurationChange(Configuration conf) { // update CoDel Scheduler tunables - int codelTargetDelay = conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, - CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); + int codelTargetDelay = + conf.getInt(CALL_QUEUE_CODEL_TARGET_DELAY, CALL_QUEUE_CODEL_DEFAULT_TARGET_DELAY); int codelInterval = conf.getInt(CALL_QUEUE_CODEL_INTERVAL, CALL_QUEUE_CODEL_DEFAULT_INTERVAL); - double codelLifoThreshold = conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, - CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); + double codelLifoThreshold = + conf.getDouble(CALL_QUEUE_CODEL_LIFO_THRESHOLD, CALL_QUEUE_CODEL_DEFAULT_LIFO_THRESHOLD); for (BlockingQueue queue : queues) { if (queue instanceof AdaptiveLifoCoDelCallQueue) { ((AdaptiveLifoCoDelCallQueue) queue).updateTunables(codelTargetDelay, codelInterval, codelLifoThreshold); } else if (queue instanceof ConfigurationObserver) { - ((ConfigurationObserver)queue).onConfigurationChange(conf); + ((ConfigurationObserver) queue).onConfigurationChange(conf); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java index f46dcfcc08eb..688419826a1c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,8 +27,7 @@ import org.slf4j.LoggerFactory; /** - * Thread to handle rpc call. - * Should only be used in {@link RpcExecutor} and its sub-classes. + * Thread to handle rpc call. Should only be used in {@link RpcExecutor} and its sub-classes. */ @InterfaceAudience.Private public class RpcHandler extends Thread { @@ -108,14 +107,14 @@ private void run(CallRunner cr) { if (e instanceof Error) { int failedCount = failedHandlerCount.incrementAndGet(); if (this.handlerFailureThreshhold >= 0 - && failedCount > handlerCount * this.handlerFailureThreshhold) { + && failedCount > handlerCount * this.handlerFailureThreshhold) { String message = "Number of failed RpcServer handler runs exceeded threshhold " - + this.handlerFailureThreshhold + "; reason: " + StringUtils.stringifyException(e); + + this.handlerFailureThreshhold + "; reason: " + StringUtils.stringifyException(e); if (abortable != null) { abortable.abort(message, e); } else { LOG.error("Error but can't abort because abortable is null: " - + StringUtils.stringifyException(e)); + + StringUtils.stringifyException(e)); throw e; } } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java index 7174a409c932..7840228621ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcResponse.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java index 0f935f6a76dc..efac06f753f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcScheduler.java @@ -25,7 +25,7 @@ /** * An interface for RPC request scheduling algorithm. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public abstract class RpcScheduler { @@ -42,9 +42,8 @@ public static abstract class Context { } /** - * Does some quick initialization. Heavy tasks (e.g. starting threads) should be - * done in {@link #start()}. This method is called before {@code start}. - * + * Does some quick initialization. Heavy tasks (e.g. starting threads) should be done in + * {@link #start()}. This method is called before {@code start}. * @param context provides methods to retrieve runtime information from */ public abstract void init(Context context); @@ -60,7 +59,6 @@ public static abstract class Context { /** * Dispatches an RPC request asynchronously. An implementation is free to choose to process the * request immediately or delay it for later processing. - * * @param task the request to be dispatched */ public abstract boolean dispatch(CallRunner task); @@ -96,15 +94,15 @@ public static abstract class Context { public abstract int getActiveReplicationRpcHandlerCount(); /** - * If CoDel-based RPC executors are used, retrieves the number of Calls that were dropped - * from general queue because RPC executor is under high load; returns 0 otherwise. + * If CoDel-based RPC executors are used, retrieves the number of Calls that were dropped from + * general queue because RPC executor is under high load; returns 0 otherwise. */ public abstract long getNumGeneralCallsDropped(); /** - * If CoDel-based RPC executors are used, retrieves the number of Calls that were - * picked from the tail of the queue (indicating adaptive LIFO mode, when - * in the period of overloade we serve last requests first); returns 0 otherwise. + * If CoDel-based RPC executors are used, retrieves the number of Calls that were picked from the + * tail of the queue (indicating adaptive LIFO mode, when in the period of overloade we serve last + * requests first); returns 0 otherwise. */ public abstract long getNumLifoModeSwitches(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java index 12da141290f6..3e4159b7d021 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcSchedulerContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetSocketAddress; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 9a7ba922cbce..72571c428c34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION; @@ -80,15 +79,13 @@ /** * An RPC server that hosts protobuf described Services. - * */ @InterfaceAudience.Private -public abstract class RpcServer implements RpcServerInterface, - ConfigurationObserver { +public abstract class RpcServer implements RpcServerInterface, ConfigurationObserver { // LOG is being used in CallRunner and the log level is being changed in tests public static final Logger LOG = LoggerFactory.getLogger(RpcServer.class); - protected static final CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION - = new CallQueueTooBigException(); + protected static final CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION = + new CallQueueTooBigException(); private static final String MULTI_GETS = "multi.gets"; private static final String MULTI_MUTATIONS = "multi.mutations"; @@ -104,7 +101,7 @@ public abstract class RpcServer implements RpcServerInterface, * Whether we allow a fallback to SIMPLE auth for insecure clients when security is enabled. */ public static final String FALLBACK_TO_INSECURE_CLIENT_AUTH = - "hbase.ipc.server.fallback-to-simple-auth-allowed"; + "hbase.ipc.server.fallback-to-simple-auth-allowed"; /** * How many calls/handler are allowed in the queue. @@ -115,15 +112,15 @@ public abstract class RpcServer implements RpcServerInterface, protected static final String AUTH_FAILED_FOR = "Auth failed for "; protected static final String AUTH_SUCCESSFUL_FOR = "Auth successful for "; - protected static final Logger AUDITLOG = LoggerFactory.getLogger("SecurityLogger." - + Server.class.getName()); + protected static final Logger AUDITLOG = + LoggerFactory.getLogger("SecurityLogger." + Server.class.getName()); protected SecretManager secretManager; protected final Map saslProps; protected ServiceAuthorizationManager authManager; - /** This is set to Call object before Handler invokes an RPC and ybdie - * after the call returns. + /** + * This is set to Call object before Handler invokes an RPC and ybdie after the call returns. */ protected static final ThreadLocal CurCall = new ThreadLocal<>(); @@ -159,9 +156,9 @@ public abstract class RpcServer implements RpcServerInterface, protected final boolean tcpKeepAlive; // if T then use keepalives /** - * This flag is used to indicate to sub threads when they should go down. When we call - * {@link #start()}, all threads started will consult this flag on whether they should - * keep going. It is set to false when {@link #stop()} is called. + * This flag is used to indicate to sub threads when they should go down. When we call + * {@link #start()}, all threads started will consult this flag on whether they should keep going. + * It is set to false when {@link #stop()} is called. */ volatile boolean running = true; @@ -181,8 +178,8 @@ public abstract class RpcServer implements RpcServerInterface, protected static final String WARN_RESPONSE_SIZE = "hbase.ipc.warn.response.size"; /** - * Minimum allowable timeout (in milliseconds) in rpc request's header. This - * configuration exists to prevent the rpc service regarding this request as timeout immediately. + * Minimum allowable timeout (in milliseconds) in rpc request's header. This configuration exists + * to prevent the rpc service regarding this request as timeout immediately. */ protected static final String MIN_CLIENT_REQUEST_TIMEOUT = "hbase.ipc.min.client.request.timeout"; protected static final int DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT = 20; @@ -221,7 +218,6 @@ public abstract class RpcServer implements RpcServerInterface, */ private RSRpcServices rsRpcServices; - /** * Use to add online slowlog responses */ @@ -233,22 +229,25 @@ protected interface CallCleanup { } /** - * Datastructure for passing a {@link BlockingService} and its associated class of - * protobuf service interface. For example, a server that fielded what is defined - * in the client protobuf service would pass in an implementation of the client blocking service - * and then its ClientService.BlockingInterface.class. Used checking connection setup. + * Datastructure for passing a {@link BlockingService} and its associated class of protobuf + * service interface. For example, a server that fielded what is defined in the client protobuf + * service would pass in an implementation of the client blocking service and then its + * ClientService.BlockingInterface.class. Used checking connection setup. */ public static class BlockingServiceAndInterface { private final BlockingService service; private final Class serviceInterface; + public BlockingServiceAndInterface(final BlockingService service, final Class serviceInterface) { this.service = service; this.serviceInterface = serviceInterface; } + public Class getServiceInterface() { return this.serviceInterface; } + public BlockingService getBlockingService() { return this.service; } @@ -256,8 +255,8 @@ public BlockingService getBlockingService() { /** * Constructs a server listening on the named port and address. - * @param server hosting instance of {@link Server}. We will do authentications if an - * instance else pass null for no authentication check. + * @param server hosting instance of {@link Server}. We will do authentications if an instance + * else pass null for no authentication check. * @param name Used keying this rpc servers' metrics and for naming the Listener thread. * @param services A list of services. * @param bindAddress Where to listen @@ -266,9 +265,8 @@ public BlockingService getBlockingService() { * @param reservoirEnabled Enable ByteBufferPool or not. */ public RpcServer(final Server server, final String name, - final List services, - final InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { this.bbAllocator = ByteBuffAllocator.create(conf, reservoirEnabled); this.server = server; this.services = services; @@ -276,12 +274,12 @@ public RpcServer(final Server server, final String name, this.conf = conf; // See declaration above for documentation on what this size is. this.maxQueueSizeInBytes = - this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE); + this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE); this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME); this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE, DEFAULT_WARN_RESPONSE_SIZE); - this.minClientRequestTimeout = conf.getInt(MIN_CLIENT_REQUEST_TIMEOUT, - DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT); + this.minClientRequestTimeout = + conf.getInt(MIN_CLIENT_REQUEST_TIMEOUT, DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT); this.maxRequestSize = conf.getInt(MAX_REQUEST_SIZE, DEFAULT_MAX_REQUEST_SIZE); this.metrics = new MetricsHBaseServer(name, new MetricsHBaseServerWrapperImpl(this)); @@ -354,10 +352,8 @@ protected AuthenticationTokenSecretManager createSecretManager() { if (!isSecurityEnabled) return null; if (server == null) return null; Configuration conf = server.getConfiguration(); - long keyUpdateInterval = - conf.getLong("hbase.auth.key.update.interval", 24*60*60*1000); - long maxAge = - conf.getLong("hbase.auth.token.max.lifetime", 7*24*60*60*1000); + long keyUpdateInterval = conf.getLong("hbase.auth.key.update.interval", 24 * 60 * 60 * 1000); + long maxAge = conf.getLong("hbase.auth.token.max.lifetime", 7 * 24 * 60 * 60 * 1000); return new AuthenticationTokenSecretManager(conf, server.getZooKeeper(), server.getServerName().toString(), keyUpdateInterval, maxAge); } @@ -372,22 +368,21 @@ public void setSecretManager(SecretManager secretMana } /** - * This is a server side method, which is invoked over RPC. On success - * the return response has protobuf response payload. On failure, the - * exception name and the stack trace are returned in the protobuf response. + * This is a server side method, which is invoked over RPC. On success the return response has + * protobuf response payload. On failure, the exception name and the stack trace are returned in + * the protobuf response. */ @Override - public Pair call(RpcCall call, - MonitoredRPCHandler status) throws IOException { + public Pair call(RpcCall call, MonitoredRPCHandler status) + throws IOException { try { MethodDescriptor md = call.getMethod(); Message param = call.getParam(); - status.setRPC(md.getName(), new Object[]{param}, - call.getReceiveTime()); + status.setRPC(md.getName(), new Object[] { param }, call.getReceiveTime()); // TODO: Review after we add in encoded data blocks. status.setRPCPacket(param); status.resume("Servicing call"); - //get an instance of the method arg type + // get an instance of the method arg type HBaseRpcController controller = new HBaseRpcControllerImpl(call.getCellScanner()); controller.setCallTimeout(call.getTimeout()); Message result = call.getService().callBlockingMethod(md, controller, param); @@ -398,11 +393,9 @@ public Pair call(RpcCall call, int qTime = (int) (startTime - receiveTime); int totalTime = (int) (endTime - receiveTime); if (LOG.isTraceEnabled()) { - LOG.trace(CurCall.get().toString() + - ", response " + TextFormat.shortDebugString(result) + - " queueTime: " + qTime + - " processingTime: " + processingTime + - " totalTime: " + totalTime); + LOG.trace(CurCall.get().toString() + ", response " + TextFormat.shortDebugString(result) + + " queueTime: " + qTime + " processingTime: " + processingTime + " totalTime: " + + totalTime); } // Use the raw request call size for now. long requestSize = call.getSize(); @@ -425,24 +418,21 @@ public Pair call(RpcCall call, final String userName = call.getRequestUserName().orElse(StringUtils.EMPTY); // when tagging, we let TooLarge trump TooSmall to keep output simple // note that large responses will often also be slow. - logResponse(param, - md.getName(), md.getName() + "(" + param.getClass().getName() + ")", - tooLarge, tooSlow, - status.getClient(), startTime, processingTime, qTime, - responseSize, userName); + logResponse(param, md.getName(), md.getName() + "(" + param.getClass().getName() + ")", + tooLarge, tooSlow, status.getClient(), startTime, processingTime, qTime, responseSize, + userName); if (this.namedQueueRecorder != null && this.isOnlineLogProviderEnabled) { // send logs to ring buffer owned by slowLogRecorder final String className = - server == null ? StringUtils.EMPTY : server.getClass().getSimpleName(); - this.namedQueueRecorder.addRecord( - new RpcLogDetails(call, param, status.getClient(), responseSize, className, tooSlow, - tooLarge)); + server == null ? StringUtils.EMPTY : server.getClass().getSimpleName(); + this.namedQueueRecorder.addRecord(new RpcLogDetails(call, param, status.getClient(), + responseSize, className, tooSlow, tooLarge)); } } return new Pair<>(result, controller.cellScanner()); } catch (Throwable e) { - // The above callBlockingMethod will always return a SE. Strip the SE wrapper before - // putting it on the wire. Its needed to adhere to the pb Service Interface but we don't + // The above callBlockingMethod will always return a SE. Strip the SE wrapper before + // putting it on the wire. Its needed to adhere to the pb Service Interface but we don't // need to pass it over the wire. if (e instanceof ServiceException) { if (e.getCause() == null) { @@ -456,15 +446,14 @@ public Pair call(RpcCall call, metrics.exception(e); if (e instanceof LinkageError) throw new DoNotRetryIOException(e); - if (e instanceof IOException) throw (IOException)e; + if (e instanceof IOException) throw (IOException) e; LOG.error("Unexpected throwable object ", e); throw new IOException(e.getMessage(), e); } } /** - * Logs an RPC response to the LOG file, producing valid JSON objects for - * client Operations. + * Logs an RPC response to the LOG file, producing valid JSON objects for client Operations. * @param param The parameters received in the call. * @param methodName The name of the method invoked * @param call The string representation of the call @@ -473,16 +462,14 @@ public Pair call(RpcCall call, * @param clientAddress The address of the client who made this call. * @param startTime The time that the call was initiated, in ms. * @param processingTime The duration that the call took to run, in ms. - * @param qTime The duration that the call spent on the queue - * prior to being initiated, in ms. + * @param qTime The duration that the call spent on the queue prior to being initiated, in ms. * @param responseSize The size in bytes of the response buffer. * @param userName UserName of the current RPC Call */ - void logResponse(Message param, String methodName, String call, boolean tooLarge, - boolean tooSlow, String clientAddress, long startTime, int processingTime, int qTime, - long responseSize, String userName) { - final String className = server == null ? StringUtils.EMPTY : - server.getClass().getSimpleName(); + void logResponse(Message param, String methodName, String call, boolean tooLarge, boolean tooSlow, + String clientAddress, long startTime, int processingTime, int qTime, long responseSize, + String userName) { + final String className = server == null ? StringUtils.EMPTY : server.getClass().getSimpleName(); // base information that is reported regardless of type of call Map responseInfo = new HashMap<>(); responseInfo.put("starttimems", startTime); @@ -517,9 +504,9 @@ void logResponse(Message param, String methodName, String call, boolean tooLarge int numGets = 0; int numMutations = 0; int numServiceCalls = 0; - ClientProtos.MultiRequest multi = (ClientProtos.MultiRequest)param; + ClientProtos.MultiRequest multi = (ClientProtos.MultiRequest) param; for (ClientProtos.RegionAction regionAction : multi.getRegionActionList()) { - for (ClientProtos.Action action: regionAction.getActionList()) { + for (ClientProtos.Action action : regionAction.getActionList()) { if (action.hasMutation()) { numMutations++; } @@ -535,15 +522,14 @@ void logResponse(Message param, String methodName, String call, boolean tooLarge responseInfo.put(MULTI_MUTATIONS, numMutations); responseInfo.put(MULTI_SERVICE_CALLS, numServiceCalls); } - final String tag = (tooLarge && tooSlow) ? "TooLarge & TooSlow" - : (tooSlow ? "TooSlow" : "TooLarge"); + final String tag = + (tooLarge && tooSlow) ? "TooLarge & TooSlow" : (tooSlow ? "TooSlow" : "TooLarge"); LOG.warn("(response" + tag + "): " + GSON.toJson(responseInfo)); } - /** - * Truncate to number of chars decided by conf hbase.ipc.trace.log.max.length - * if TRACE is on else to 150 chars Refer to Jira HBASE-20826 and HBASE-20942 + * Truncate to number of chars decided by conf hbase.ipc.trace.log.max.length if TRACE is on else + * to 150 chars Refer to Jira HBASE-20826 and HBASE-20942 * @param strParam stringifiedParam to be truncated * @return truncated trace log string */ @@ -601,29 +587,27 @@ public synchronized void authorize(UserGroupInformation user, ConnectionHeader c } /** - * When the read or write buffer size is larger than this limit, i/o will be - * done in chunks of this size. Most RPC requests and responses would be - * be smaller. + * When the read or write buffer size is larger than this limit, i/o will be done in chunks of + * this size. Most RPC requests and responses would be be smaller. */ - protected static final int NIO_BUFFER_LIMIT = 64 * 1024; //should not be more than 64KB. + protected static final int NIO_BUFFER_LIMIT = 64 * 1024; // should not be more than 64KB. /** - * This is a wrapper around {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of - * ByteBuffer increases. There should not be any performance degredation. - * + * This is a wrapper around + * {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. If the amount of data + * is large, it writes to channel in smaller chunks. This is to avoid jdk from creating many + * direct buffers as the size of ByteBuffer increases. There should not be any performance + * degredation. * @param channel writable byte channel to write on * @param buffer buffer to write * @return number of bytes written * @throws java.io.IOException e * @see java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer) */ - protected int channelRead(ReadableByteChannel channel, - ByteBuffer buffer) throws IOException { + protected int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException { - int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? - channel.read(buffer) : channelIO(channel, null, buffer); + int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer) + : channelIO(channel, null, buffer); if (count > 0) { metrics.receivedBytes(count); } @@ -633,7 +617,6 @@ protected int channelRead(ReadableByteChannel channel, /** * Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)}. * Only one of readCh or writeCh should be non-null. - * * @param readCh read channel * @param writeCh write channel * @param buf buffer to read or write into/out of @@ -641,9 +624,8 @@ protected int channelRead(ReadableByteChannel channel, * @throws java.io.IOException e * @see #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer) */ - private static int channelIO(ReadableByteChannel readCh, - WritableByteChannel writeCh, - ByteBuffer buf) throws IOException { + private static int channelIO(ReadableByteChannel readCh, WritableByteChannel writeCh, + ByteBuffer buf) throws IOException { int originalLimit = buf.limit(); int initialRemaining = buf.remaining(); @@ -670,9 +652,8 @@ private static int channelIO(ReadableByteChannel readCh, } /** - * Needed for features such as delayed calls. We need to be able to store the current call - * so that we can complete it later or ask questions of what is supported by the current ongoing - * call. + * Needed for features such as delayed calls. We need to be able to store the current call so that + * we can complete it later or ask questions of what is supported by the current ongoing call. * @return An RpcCallContext backed by the currently ongoing call (gotten from a thread local) */ public static Optional getCurrentCall() { @@ -687,7 +668,7 @@ public static Optional getCurrentCall() { */ public static Optional> getCurrentServerCallWithCellScanner() { return getCurrentCall().filter(c -> c instanceof ServerCall) - .filter(c -> c.getCellScanner() != null).map(c -> (ServerCall) c); + .filter(c -> c.getCellScanner() != null).map(c -> (ServerCall) c); } public static boolean isInRpcCallContext() { @@ -732,8 +713,8 @@ public static Optional getRequestUser() { abstract public int getNumOpenConnections(); /** - * Returns the username for any user associated with the current RPC - * request or not present if no user is set. + * Returns the username for any user associated with the current RPC request or not present if no + * user is set. */ public static Optional getRequestUserName() { return getRequestUser().map(User::getShortName); @@ -766,12 +747,10 @@ protected static BlockingServiceAndInterface getServiceAndInterface( * @param services Available services and their service interfaces. * @return Service interface class for serviceName */ - protected static Class getServiceInterface( - final List services, + protected static Class getServiceInterface(final List services, final String serviceName) { - BlockingServiceAndInterface bsasi = - getServiceAndInterface(services, serviceName); - return bsasi == null? null: bsasi.getServiceInterface(); + BlockingServiceAndInterface bsasi = getServiceAndInterface(services, serviceName); + return bsasi == null ? null : bsasi.getServiceInterface(); } /** @@ -779,16 +758,14 @@ protected static Class getServiceInterface( * @param services Available services and their service interfaces. * @return BlockingService that goes with the passed serviceName */ - protected static BlockingService getService( - final List services, + protected static BlockingService getService(final List services, final String serviceName) { - BlockingServiceAndInterface bsasi = - getServiceAndInterface(services, serviceName); - return bsasi == null? null: bsasi.getBlockingService(); + BlockingServiceAndInterface bsasi = getServiceAndInterface(services, serviceName); + return bsasi == null ? null : bsasi.getBlockingService(); } protected static MonitoredRPCHandler getStatus() { - // It is ugly the way we park status up in RpcServer. Let it be for now. TODO. + // It is ugly the way we park status up in RpcServer. Let it be for now. TODO. MonitoredRPCHandler status = RpcServer.MONITORED_RPC.get(); if (status != null) { return status; @@ -799,9 +776,9 @@ protected static MonitoredRPCHandler getStatus() { return status; } - /** Returns the remote side ip address when invoked inside an RPC - * Returns null incase of an error. - * @return InetAddress + /** + * Returns the remote side ip address when invoked inside an RPC Returns null incase of an error. + * @return InetAddress */ public static InetAddress getRemoteIp() { RpcCall call = CurCall.get(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java index 298b47231160..ea30cf8cb9f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,15 +20,15 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; +import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; + import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.ServiceDescriptor; -import org.apache.hadoop.hbase.util.ReflectionUtils; @InterfaceAudience.Private public class RpcServerFactory { @@ -50,13 +50,12 @@ public static RpcServer createRpcServer(final Server server, final String name, } public static RpcServer createRpcServer(final Server server, final String name, - final List services, - final InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { - String rpcServerClass = conf.get(CUSTOM_RPC_SERVER_IMPL_CONF_KEY, - NettyRpcServer.class.getName()); + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { + String rpcServerClass = + conf.get(CUSTOM_RPC_SERVER_IMPL_CONF_KEY, NettyRpcServer.class.getName()); StringBuilder servicesList = new StringBuilder(); - for (BlockingServiceAndInterface s: services) { + for (BlockingServiceAndInterface s : services) { ServiceDescriptor sd = s.getBlockingService().getDescriptorForType(); if (sd == null) continue; // Can be null for certain tests like TestTokenAuthentication if (servicesList.length() > 0) servicesList.append(", "); @@ -64,8 +63,8 @@ public static RpcServer createRpcServer(final Server server, final String name, } LOG.info("Creating " + rpcServerClass + " hosting " + servicesList); return ReflectionUtils.instantiateWithCustomCtor(rpcServerClass, - new Class[] { Server.class, String.class, List.class, - InetSocketAddress.class, Configuration.class, RpcScheduler.class, boolean.class }, - new Object[] { server, name, services, bindAddress, conf, scheduler, reservoirEnabled }); + new Class[] { Server.class, String.class, List.class, InetSocketAddress.class, + Configuration.class, RpcScheduler.class, boolean.class }, + new Object[] { server, name, services, bindAddress, conf, scheduler, reservoirEnabled }); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java index ee6e57a2a9f5..786b8cca4660 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.io.IOException; @@ -36,18 +34,21 @@ @InterfaceAudience.Private public interface RpcServerInterface { void start(); + boolean isStarted(); void stop(); + void join() throws InterruptedException; void setSocketSendBufSize(int size); + InetSocketAddress getListenerAddress(); - Pair call(RpcCall call, MonitoredRPCHandler status) - throws IOException; + Pair call(RpcCall call, MonitoredRPCHandler status) throws IOException; void setErrorHandler(HBaseRPCErrorHandler handler); + HBaseRPCErrorHandler getErrorHandler(); /** @@ -56,7 +57,7 @@ Pair call(RpcCall call, MonitoredRPCHandler status) MetricsHBaseServer getMetrics(); /** - * Add/subtract from the current size of all outstanding calls. Called on setup of a call to add + * Add/subtract from the current size of all outstanding calls. Called on setup of a call to add * call total size and then again at end of a call to remove the call size. * @param diff Change (plus or minus) */ @@ -80,7 +81,6 @@ Pair call(RpcCall call, MonitoredRPCHandler status) /** * Set Online SlowLog Provider - * * @param namedQueueRecorder instance of {@link NamedQueueRecorder} */ void setNamedQueueRecorder(final NamedQueueRecorder namedQueueRecorder); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index f98bfc5cbcaf..37d565c69132 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,10 +40,12 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; @@ -52,22 +54,22 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; /** - * Datastructure that holds all necessary to a method invocation and then afterward, carries - * the result. + * Datastructure that holds all necessary to a method invocation and then afterward, carries the + * result. */ @InterfaceAudience.Private public abstract class ServerCall implements RpcCall, RpcResponse { - protected final int id; // the client's call id + protected final int id; // the client's call id protected final BlockingService service; protected final MethodDescriptor md; protected final RequestHeader header; - protected Message param; // the parameter passed + protected Message param; // the parameter passed // Optional cell data passed outside of protobufs. protected final CellScanner cellScanner; - protected final T connection; // connection to client - protected final long receiveTime; // the time received when response is null - // the time served when response is not null + protected final T connection; // connection to client + protected final long receiveTime; // the time received when response is null + // the time served when response is not null protected final int timeout; protected long startTime; protected final long deadline;// the deadline to handle this call, if exceed we can drop it. @@ -81,7 +83,7 @@ public abstract class ServerCall implements RpcCa */ protected BufferChain response; - protected final long size; // size of current call + protected final long size; // size of current call protected boolean isError; protected ByteBufferListOutputStream cellBlockStream = null; protected CallCleanup reqCleanup = null; @@ -124,7 +126,7 @@ public abstract class ServerCall implements RpcCa this.isError = false; this.size = size; if (connection != null) { - this.user = connection.user; + this.user = connection.user; this.retryImmediatelySupported = connection.retryImmediatelySupported; } else { this.user = null; @@ -140,8 +142,7 @@ public abstract class ServerCall implements RpcCa } /** - * Call is done. Execution happened and we returned results to client. It is - * now safe to cleanup. + * Call is done. Execution happened and we returned results to client. It is now safe to cleanup. */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", justification = "Presume the lock on processing request held by caller is protection enough") @@ -195,9 +196,9 @@ public void releaseByWAL() { @Override public String toString() { - return toShortString() + " param: " + - (this.param != null? ProtobufUtil.getShortTextFormat(this.param): "") + - " connection: " + connection.toString(); + return toShortString() + " param: " + + (this.param != null ? ProtobufUtil.getShortTextFormat(this.param) : "") + " connection: " + + connection.toString(); } @Override @@ -216,17 +217,18 @@ public int getPriority() { */ @Override public String toShortString() { - String serviceName = this.connection.service != null ? - this.connection.service.getDescriptorForType().getName() : "null"; - return "callId: " + this.id + " service: " + serviceName + - " methodName: " + ((this.md != null) ? this.md.getName() : "n/a") + - " size: " + StringUtils.TraditionalBinaryPrefix.long2String(this.size, "", 1) + - " connection: " + connection + " deadline: " + deadline; + String serviceName = + this.connection.service != null ? this.connection.service.getDescriptorForType().getName() + : "null"; + return "callId: " + this.id + " service: " + serviceName + " methodName: " + + ((this.md != null) ? this.md.getName() : "n/a") + " size: " + + StringUtils.TraditionalBinaryPrefix.long2String(this.size, "", 1) + " connection: " + + connection + " deadline: " + deadline; } @Override public synchronized void setResponse(Message m, final CellScanner cells, Throwable t, - String errorMsg) { + String errorMsg) { if (this.isError) { return; } @@ -273,8 +275,7 @@ public synchronized void setResponse(Message m, final CellScanner cells, Throwab headerBuilder.setCellBlockMeta(cellBlockBuilder.build()); } Message header = headerBuilder.build(); - ByteBuffer headerBuf = - createHeaderAndMessageBytes(m, header, cellBlockSize, cellBlock); + ByteBuffer headerBuf = createHeaderAndMessageBytes(m, header, cellBlockSize, cellBlock); ByteBuffer[] responseBufs = null; int cellBlockBufferSize = 0; if (cellBlock != null) { @@ -314,10 +315,10 @@ static void setExceptionResponse(Throwable t, String errorMsg, exceptionBuilder.setStackTrace(errorMsg); exceptionBuilder.setDoNotRetry(t instanceof DoNotRetryIOException); if (t instanceof RegionMovedException) { - // Special casing for this exception. This is only one carrying a payload. + // Special casing for this exception. This is only one carrying a payload. // Do this instead of build a generic system for allowing exceptions carry // any kind of payload. - RegionMovedException rme = (RegionMovedException)t; + RegionMovedException rme = (RegionMovedException) t; exceptionBuilder.setHostname(rme.getHostname()); exceptionBuilder.setPort(rme.getPort()); } @@ -325,8 +326,8 @@ static void setExceptionResponse(Throwable t, String errorMsg, headerBuilder.setException(exceptionBuilder.build()); } - static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, - int cellBlockSize, List cellBlock) throws IOException { + static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, int cellBlockSize, + List cellBlock) throws IOException { // Organize the response as a set of bytebuffers rather than collect it all together inside // one big byte array; save on allocations. // for writing the header, we check if there is available space in the buffers @@ -334,10 +335,8 @@ static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, // the last buffer in the cellblock. This applies to the cellblock created from the // pool or even the onheap cellblock buffer in case there is no pool enabled. // Possible reuse would avoid creating a temporary array for storing the header every time. - ByteBuffer possiblePBBuf = - (cellBlockSize > 0) ? cellBlock.get(cellBlock.size() - 1) : null; - int headerSerializedSize = 0, resultSerializedSize = 0, headerVintSize = 0, - resultVintSize = 0; + ByteBuffer possiblePBBuf = (cellBlockSize > 0) ? cellBlock.get(cellBlock.size() - 1) : null; + int headerSerializedSize = 0, resultSerializedSize = 0, headerVintSize = 0, resultVintSize = 0; if (header != null) { headerSerializedSize = header.getSerializedSize(); headerVintSize = CodedOutputStream.computeUInt32SizeNoTag(headerSerializedSize); @@ -347,15 +346,13 @@ static ByteBuffer createHeaderAndMessageBytes(Message result, Message header, resultVintSize = CodedOutputStream.computeUInt32SizeNoTag(resultSerializedSize); } // calculate the total size - int totalSize = headerSerializedSize + headerVintSize - + (resultSerializedSize + resultVintSize) + int totalSize = headerSerializedSize + headerVintSize + (resultSerializedSize + resultVintSize) + cellBlockSize; - int totalPBSize = headerSerializedSize + headerVintSize + resultSerializedSize - + resultVintSize + Bytes.SIZEOF_INT; + int totalPBSize = headerSerializedSize + headerVintSize + resultSerializedSize + resultVintSize + + Bytes.SIZEOF_INT; // Only if the last buffer has enough space for header use it. Else allocate // a new buffer. Assume they are all flipped - if (possiblePBBuf != null - && possiblePBBuf.limit() + totalPBSize <= possiblePBBuf.capacity()) { + if (possiblePBBuf != null && possiblePBBuf.limit() + totalPBSize <= possiblePBBuf.capacity()) { // duplicate the buffer. This is where the header is going to be written ByteBuffer pbBuf = possiblePBBuf.duplicate(); // get the current limit @@ -402,10 +399,10 @@ protected BufferChain wrapWithSasl(BufferChain bc) throws IOException { if (!this.connection.useSasl) { return bc; } - // Looks like no way around this; saslserver wants a byte array. I have to make it one. + // Looks like no way around this; saslserver wants a byte array. I have to make it one. // THIS IS A BIG UGLY COPY. - byte [] responseBytes = bc.getBytes(); - byte [] token; + byte[] responseBytes = bc.getBytes(); + byte[] token; // synchronization may be needed since there can be multiple Handler // threads using saslServer or Crypto AES to wrap responses. if (connection.useCryptoAesWrap) { @@ -419,8 +416,8 @@ protected BufferChain wrapWithSasl(BufferChain bc) throws IOException { } } if (RpcServer.LOG.isTraceEnabled()) { - RpcServer.LOG.trace("Adding saslServer wrapped token of size " + token.length - + " as call response."); + RpcServer.LOG + .trace("Adding saslServer wrapped token of size " + token.length + " as call response."); } ByteBuffer[] responseBufs = new ByteBuffer[2]; @@ -467,6 +464,7 @@ public void incrementResponseBlockSize(long blockSize) { public long getResponseExceptionSize() { return exceptionSize; } + @Override public void incrementResponseExceptionSize(long exSize) { exceptionSize += exSize; @@ -556,8 +554,8 @@ public int getRemotePort() { public synchronized BufferChain getResponse() { if (connection.useWrap) { /* - * wrapping result with SASL as the last step just before sending it out, so - * every message must have the right increasing sequence number + * wrapping result with SASL as the last step just before sending it out, so every message + * must have the right increasing sequence number */ try { return wrapWithSasl(response); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index 89b1adc2cd89..6e55d76c3c07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -89,9 +89,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos.RPCTInfo; /** Reads calls from a connection and queues them for handling. */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="VO_VOLATILE_INCREMENT", - justification="False positive according to http://sourceforge.net/p/findbugs/bugs/1032/") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT", + justification = "False positive according to http://sourceforge.net/p/findbugs/bugs/1032/") @InterfaceAudience.Private abstract class ServerRpcConnection implements Closeable { @@ -172,25 +171,24 @@ public VersionInfo getVersionInfo() { } private String getFatalConnectionString(final int version, final byte authByte) { - return "serverVersion=" + RpcServer.CURRENT_VERSION + - ", clientVersion=" + version + ", authMethod=" + authByte + + return "serverVersion=" + RpcServer.CURRENT_VERSION + ", clientVersion=" + version + + ", authMethod=" + authByte + // The provider may be null if we failed to parse the header of the request - ", authName=" + (provider == null ? "unknown" : provider.getSaslAuthMethod().getName()) + - " from " + toString(); + ", authName=" + (provider == null ? "unknown" : provider.getSaslAuthMethod().getName()) + + " from " + toString(); } /** * Set up cell block codecs * @throws FatalConnectionException */ - private void setupCellBlockCodecs(final ConnectionHeader header) - throws FatalConnectionException { + private void setupCellBlockCodecs(final ConnectionHeader header) throws FatalConnectionException { // TODO: Plug in other supported decoders. if (!header.hasCellBlockCodecClass()) return; String className = header.getCellBlockCodecClass(); if (className == null || className.length() == 0) return; try { - this.codec = (Codec)Class.forName(className).getDeclaredConstructor().newInstance(); + this.codec = (Codec) Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new UnsupportedCellCodecException(className, e); } @@ -198,7 +196,7 @@ private void setupCellBlockCodecs(final ConnectionHeader header) className = header.getCellBlockCompressorClass(); try { this.compressionCodec = - (CompressionCodec)Class.forName(className).getDeclaredConstructor().newInstance(); + (CompressionCodec) Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new UnsupportedCompressionCodecException(className, e); } @@ -206,42 +204,39 @@ private void setupCellBlockCodecs(final ConnectionHeader header) /** * Set up cipher for rpc encryption with Apache Commons Crypto - * * @throws FatalConnectionException */ private void setupCryptoCipher(final ConnectionHeader header, - RPCProtos.ConnectionHeaderResponse.Builder chrBuilder) - throws FatalConnectionException { + RPCProtos.ConnectionHeaderResponse.Builder chrBuilder) throws FatalConnectionException { // If simple auth, return if (saslServer == null) return; // check if rpc encryption with Crypto AES String qop = saslServer.getNegotiatedQop(); - boolean isEncryption = SaslUtil.QualityOfProtection.PRIVACY - .getSaslQop().equalsIgnoreCase(qop); - boolean isCryptoAesEncryption = isEncryption && this.rpcServer.conf.getBoolean( - "hbase.rpc.crypto.encryption.aes.enabled", false); + boolean isEncryption = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop().equalsIgnoreCase(qop); + boolean isCryptoAesEncryption = isEncryption + && this.rpcServer.conf.getBoolean("hbase.rpc.crypto.encryption.aes.enabled", false); if (!isCryptoAesEncryption) return; if (!header.hasRpcCryptoCipherTransformation()) return; String transformation = header.getRpcCryptoCipherTransformation(); if (transformation == null || transformation.length() == 0) return; - // Negotiates AES based on complete saslServer. - // The Crypto metadata need to be encrypted and send to client. + // Negotiates AES based on complete saslServer. + // The Crypto metadata need to be encrypted and send to client. Properties properties = new Properties(); // the property for SecureRandomFactory properties.setProperty(CryptoRandomFactory.CLASSES_KEY, - this.rpcServer.conf.get("hbase.crypto.sasl.encryption.aes.crypto.random", - "org.apache.commons.crypto.random.JavaCryptoRandom")); + this.rpcServer.conf.get("hbase.crypto.sasl.encryption.aes.crypto.random", + "org.apache.commons.crypto.random.JavaCryptoRandom")); // the property for cipher class properties.setProperty(CryptoCipherFactory.CLASSES_KEY, - this.rpcServer.conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", - "org.apache.commons.crypto.cipher.JceCipher")); + this.rpcServer.conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", + "org.apache.commons.crypto.cipher.JceCipher")); - int cipherKeyBits = this.rpcServer.conf.getInt( - "hbase.rpc.crypto.encryption.aes.cipher.keySizeBits", 128); + int cipherKeyBits = + this.rpcServer.conf.getInt("hbase.rpc.crypto.encryption.aes.cipher.keySizeBits", 128); // generate key and iv if (cipherKeyBits % 8 != 0) { - throw new IllegalArgumentException("The AES cipher key size in bits" + - " should be a multiple of byte"); + throw new IllegalArgumentException( + "The AES cipher key size in bits" + " should be a multiple of byte"); } int len = cipherKeyBits / 8; byte[] inKey = new byte[len]; @@ -258,10 +253,9 @@ private void setupCryptoCipher(final ConnectionHeader header, secureRandom.nextBytes(outIv); // create CryptoAES for server - cryptoAES = new CryptoAES(transformation, properties, - inKey, outKey, inIv, outIv); + cryptoAES = new CryptoAES(transformation, properties, inKey, outKey, inIv, outIv); // create SaslCipherMeta and send to client, - // for client, the [inKey, outKey], [inIv, outIv] should be reversed + // for client, the [inKey, outKey], [inIv, outIv] should be reversed RPCProtos.CryptoCipherMeta.Builder ccmBuilder = RPCProtos.CryptoCipherMeta.newBuilder(); ccmBuilder.setTransformation(transformation); ccmBuilder.setInIv(getByteString(outIv)); @@ -297,8 +291,7 @@ private UserGroupInformation createUser(ConnectionHeader head) { } if (effectiveUser != null) { if (realUser != null) { - UserGroupInformation realUserUgi = - UserGroupInformation.createRemoteUser(realUser); + UserGroupInformation realUserUgi = UserGroupInformation.createRemoteUser(realUser); ugi = UserGroupInformation.createProxyUser(effectiveUser, realUserUgi); } else { ugi = UserGroupInformation.createRemoteUser(effectiveUser); @@ -317,13 +310,13 @@ protected final void disposeSasl() { /** * No protobuf encoding of raw sasl messages */ - protected final void doRawSaslReply(SaslStatus status, Writable rv, - String errorClass, String error) throws IOException { + protected final void doRawSaslReply(SaslStatus status, Writable rv, String errorClass, + String error) throws IOException { BufferChain bc; // In my testing, have noticed that sasl messages are usually // in the ballpark of 100-200. That's why the initial capacity is 256. try (ByteBufferOutputStream saslResponse = new ByteBufferOutputStream(256); - DataOutputStream out = new DataOutputStream(saslResponse)) { + DataOutputStream out = new DataOutputStream(saslResponse)) { out.writeInt(status.state); // write status if (status == SaslStatus.SUCCESS) { rv.write(out); @@ -336,8 +329,7 @@ protected final void doRawSaslReply(SaslStatus status, Writable rv, doRespond(() -> bc); } - public void saslReadAndProcess(ByteBuff saslToken) throws IOException, - InterruptedException { + public void saslReadAndProcess(ByteBuff saslToken) throws IOException, InterruptedException { if (saslContextEstablished) { RpcServer.LOG.trace("Read input token of size={} for processing by saslServer.unwrap()", saslToken.limit()); @@ -345,7 +337,7 @@ public void saslReadAndProcess(ByteBuff saslToken) throws IOException, processOneRpc(saslToken); } else { byte[] b = saslToken.hasArray() ? saslToken.array() : saslToken.toBytes(); - byte [] plaintextData; + byte[] plaintextData; if (useCryptoAesWrap) { // unwrap with CryptoAES plaintextData = cryptoAES.unwrap(b, 0, b.length); @@ -360,19 +352,21 @@ public void saslReadAndProcess(ByteBuff saslToken) throws IOException, if (saslServer == null) { try { saslServer = - new HBaseSaslRpcServer(provider, rpcServer.saslProps, rpcServer.secretManager); - } catch (Exception e){ + new HBaseSaslRpcServer(provider, rpcServer.saslProps, rpcServer.secretManager); + } catch (Exception e) { RpcServer.LOG.error("Error when trying to create instance of HBaseSaslRpcServer " - + "with sasl provider: " + provider, e); + + "with sasl provider: " + provider, + e); throw e; } RpcServer.LOG.debug("Created SASL server with mechanism={}", - provider.getSaslAuthMethod().getAuthMethod()); + provider.getSaslAuthMethod().getAuthMethod()); } - RpcServer.LOG.debug("Read input token of size={} for processing by saslServer." + - "evaluateResponse()", saslToken.limit()); - replyToken = saslServer.evaluateResponse(saslToken.hasArray()? - saslToken.array() : saslToken.toBytes()); + RpcServer.LOG.debug( + "Read input token of size={} for processing by saslServer." + "evaluateResponse()", + saslToken.limit()); + replyToken = saslServer + .evaluateResponse(saslToken.hasArray() ? saslToken.array() : saslToken.toBytes()); } catch (IOException e) { RpcServer.LOG.debug("Failed to execute SASL handshake", e); IOException sendToClient = e; @@ -389,26 +383,24 @@ public void saslReadAndProcess(ByteBuff saslToken) throws IOException, this.rpcServer.metrics.authenticationFailure(); String clientIP = this.toString(); // attempting user could be null - RpcServer.AUDITLOG - .warn("{}{}: {}", RpcServer.AUTH_FAILED_FOR, clientIP, saslServer.getAttemptingUser()); + RpcServer.AUDITLOG.warn("{}{}: {}", RpcServer.AUTH_FAILED_FOR, clientIP, + saslServer.getAttemptingUser()); throw e; } if (replyToken != null) { if (RpcServer.LOG.isDebugEnabled()) { - RpcServer.LOG.debug("Will send token of size " + replyToken.length - + " from saslServer."); + RpcServer.LOG.debug("Will send token of size " + replyToken.length + " from saslServer."); } - doRawSaslReply(SaslStatus.SUCCESS, new BytesWritable(replyToken), null, - null); + doRawSaslReply(SaslStatus.SUCCESS, new BytesWritable(replyToken), null, null); } if (saslServer.isComplete()) { String qop = saslServer.getNegotiatedQop(); useWrap = qop != null && !"auth".equalsIgnoreCase(qop); ugi = provider.getAuthorizedUgi(saslServer.getAuthorizationID(), - this.rpcServer.secretManager); + this.rpcServer.secretManager); RpcServer.LOG.debug( - "SASL server context established. Authenticated client: {}. Negotiated QoP is {}", - ugi, qop); + "SASL server context established. Authenticated client: {}. Negotiated QoP is {}", ugi, + qop); this.rpcServer.metrics.authenticationSuccess(); RpcServer.AUDITLOG.info(RpcServer.AUTH_SUCCESSFUL_FOR + ugi); saslContextEstablished = true; @@ -423,8 +415,7 @@ private void processUnwrappedData(byte[] inBuf) throws IOException, InterruptedE int count; if (unwrappedDataLengthBuffer.remaining() > 0) { count = this.rpcServer.channelRead(ch, unwrappedDataLengthBuffer); - if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0) - return; + if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0) return; } if (unwrappedData == null) { @@ -432,8 +423,7 @@ private void processUnwrappedData(byte[] inBuf) throws IOException, InterruptedE int unwrappedDataLength = unwrappedDataLengthBuffer.getInt(); if (unwrappedDataLength == RpcClient.PING_CALL_ID) { - if (RpcServer.LOG.isDebugEnabled()) - RpcServer.LOG.debug("Received ping message"); + if (RpcServer.LOG.isDebugEnabled()) RpcServer.LOG.debug("Received ping message"); unwrappedDataLengthBuffer.clear(); continue; // ping message } @@ -441,8 +431,7 @@ private void processUnwrappedData(byte[] inBuf) throws IOException, InterruptedE } count = this.rpcServer.channelRead(ch, unwrappedData); - if (count <= 0 || unwrappedData.remaining() > 0) - return; + if (count <= 0 || unwrappedData.remaining() > 0) return; if (unwrappedData.remaining() == 0) { unwrappedDataLengthBuffer.clear(); @@ -453,8 +442,7 @@ private void processUnwrappedData(byte[] inBuf) throws IOException, InterruptedE } } - public void processOneRpc(ByteBuff buf) throws IOException, - InterruptedException { + public void processOneRpc(ByteBuff buf) throws IOException, InterruptedException { if (connectionHeaderRead) { processRequest(buf); } else { @@ -463,8 +451,8 @@ public void processOneRpc(ByteBuff buf) throws IOException, if (rpcServer.needAuthorization() && !authorizeConnection()) { // Throw FatalConnectionException wrapping ACE so client does right thing and closes // down the connection instead of trying to read non-existent retun. - throw new AccessDeniedException("Connection from " + this + " for service " + - connectionHeader.getServiceName() + " is unauthorized for user: " + ugi); + throw new AccessDeniedException("Connection from " + this + " for service " + + connectionHeader.getServiceName() + " is unauthorized for user: " + ugi); } this.user = this.rpcServer.userProvider.create(this.ugi); } @@ -476,8 +464,7 @@ private boolean authorizeConnection() throws IOException { // real user for the effective user, therefore not required to // authorize real user. doAs is allowed only for simple or kerberos // authentication - if (ugi != null && ugi.getRealUser() != null - && provider.supportsProtocolAuthentication()) { + if (ugi != null && ugi.getRealUser() != null && provider.supportsProtocolAuthentication()) { ProxyUsers.authorize(ugi, this.getHostAddress(), this.rpcServer.conf); } this.rpcServer.authorize(ugi, connectionHeader, getHostInetAddress()); @@ -498,8 +485,8 @@ private void processConnectionHeader(ByteBuff buf) throws IOException { if (buf.hasArray()) { this.connectionHeader = ConnectionHeader.parseFrom(buf.array()); } else { - CodedInputStream cis = UnsafeByteOperations.unsafeWrap( - new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput(); + CodedInputStream cis = UnsafeByteOperations + .unsafeWrap(new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput(); cis.enableAliasing(true); this.connectionHeader = ConnectionHeader.parseFrom(cis); } @@ -520,29 +507,26 @@ private void processConnectionHeader(ByteBuff buf) throws IOException { } // audit logging for SASL authenticated users happens in saslReadAndProcess() if (authenticatedWithFallback) { - RpcServer.LOG.warn("Allowed fallback to SIMPLE auth for {} connecting from {}", - ugi, getHostAddress()); + RpcServer.LOG.warn("Allowed fallback to SIMPLE auth for {} connecting from {}", ugi, + getHostAddress()); } } else { // user is authenticated ugi.setAuthenticationMethod(provider.getSaslAuthMethod().getAuthMethod()); - //Now we check if this is a proxy user case. If the protocol user is - //different from the 'user', it is a proxy user scenario. However, - //this is not allowed if user authenticated with DIGEST. - if ((protocolUser != null) - && (!protocolUser.getUserName().equals(ugi.getUserName()))) { + // Now we check if this is a proxy user case. If the protocol user is + // different from the 'user', it is a proxy user scenario. However, + // this is not allowed if user authenticated with DIGEST. + if ((protocolUser != null) && (!protocolUser.getUserName().equals(ugi.getUserName()))) { if (!provider.supportsProtocolAuthentication()) { // Not allowed to doAs if token authentication is used throw new AccessDeniedException("Authenticated user (" + ugi - + ") doesn't match what the client claims to be (" - + protocolUser + ")"); + + ") doesn't match what the client claims to be (" + protocolUser + ")"); } else { // Effective user can be different from authenticated user // for simple auth or kerberos auth // The user is the real user. Now we create a proxy user UserGroupInformation realUser = ugi; - ugi = UserGroupInformation.createProxyUser(protocolUser - .getUserName(), realUser); + ugi = UserGroupInformation.createProxyUser(protocolUser.getUserName(), realUser); // Now the user is a proxy user, set Authentication method Proxy. ugi.setAuthenticationMethod(AuthenticationMethod.PROXY); } @@ -551,14 +535,13 @@ private void processConnectionHeader(ByteBuff buf) throws IOException { String version; if (this.connectionHeader.hasVersionInfo()) { // see if this connection will support RetryImmediatelyException - this.retryImmediatelySupported = - VersionInfoUtil.hasMinimumVersion(getVersionInfo(), 1, 2); + this.retryImmediatelySupported = VersionInfoUtil.hasMinimumVersion(getVersionInfo(), 1, 2); version = this.connectionHeader.getVersionInfo().getVersion(); } else { version = "UNKNOWN"; } RpcServer.AUDITLOG.info("Connection from {}:{}, version={}, sasl={}, ugi={}, service={}", - this.hostAddress, this.remotePort, version, this.useSasl, this.ugi, serviceName); + this.hostAddress, this.remotePort, version, this.useSasl, this.ugi, serviceName); } /** @@ -591,14 +574,12 @@ private void responseConnectionHeader(RPCProtos.ConnectionHeaderResponse.Builder protected abstract void doRespond(RpcResponse resp) throws IOException; /** - * @param buf - * Has the request header and the request param and optionally - * encoded data buffer all in this one array. + * @param buf Has the request header and the request param and optionally encoded data buffer all + * in this one array. * @throws IOException * @throws InterruptedException */ - protected void processRequest(ByteBuff buf) throws IOException, - InterruptedException { + protected void processRequest(ByteBuff buf) throws IOException, InterruptedException { long totalRequestSize = buf.limit(); int offset = 0; // Here we read in the header. We avoid having pb @@ -619,7 +600,7 @@ protected void processRequest(ByteBuff buf) throws IOException, RequestHeader header = (RequestHeader) builder.build(); offset += headerSize; Context traceCtx = GlobalOpenTelemetry.getPropagators().getTextMapPropagator() - .extract(Context.current(), header.getTraceInfo(), getter); + .extract(Context.current(), header.getTraceInfo(), getter); // n.b. Management of this Span instance is a little odd. Most exit paths from this try scope // are early-exits due to error cases. There's only one success path, the asynchronous call to @@ -631,20 +612,20 @@ protected void processRequest(ByteBuff buf) throws IOException, try (Scope ignored = span.makeCurrent()) { int id = header.getCallId(); if (RpcServer.LOG.isTraceEnabled()) { - RpcServer.LOG.trace("RequestHeader " + TextFormat.shortDebugString(header) + - " totalRequestSize: " + totalRequestSize + " bytes"); + RpcServer.LOG.trace("RequestHeader " + TextFormat.shortDebugString(header) + + " totalRequestSize: " + totalRequestSize + " bytes"); } // Enforcing the call queue size, this triggers a retry in the client // This is a bit late to be doing this check - we have already read in the // total request. - if ((totalRequestSize + - this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) { + if ((totalRequestSize + + this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) { final ServerCall callTooBig = createCall(id, this.service, null, null, null, null, totalRequestSize, null, 0, this.callCleanup); this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); callTooBig.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION, - "Call queue is full on " + this.rpcServer.server.getServerName() + - ", is hbase.ipc.server.max.callqueue.size too small?"); + "Call queue is full on " + this.rpcServer.server.getServerName() + + ", is hbase.ipc.server.max.callqueue.size too small?"); TraceUtil.setError(span, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); callTooBig.sendResponseIfReady(); return; @@ -670,8 +651,8 @@ protected void processRequest(ByteBuff buf) throws IOException, } else { // currently header must have request param, so we directly throw // exception here - String msg = "Invalid request header: " + TextFormat.shortDebugString(header) + - ", should have param set in it"; + String msg = "Invalid request header: " + TextFormat.shortDebugString(header) + + ", should have param set in it"; RpcServer.LOG.warn(msg); throw new DoNotRetryIOException(msg); } @@ -684,8 +665,8 @@ protected void processRequest(ByteBuff buf) throws IOException, } } catch (Throwable thrown) { InetSocketAddress address = this.rpcServer.getListenerAddress(); - String msg = (address != null ? address : "(channel closed)") + - " is unable to read call parameter from client " + getHostAddress(); + String msg = (address != null ? address : "(channel closed)") + + " is unable to read call parameter from client " + getHostAddress(); RpcServer.LOG.warn(msg, thrown); this.rpcServer.metrics.exception(thrown); @@ -703,8 +684,8 @@ protected void processRequest(ByteBuff buf) throws IOException, ServerCall readParamsFailedCall = createCall(id, this.service, null, null, null, null, totalRequestSize, null, 0, this.callCleanup); - readParamsFailedCall.setResponse(null, null, responseThrowable, msg + "; " - + responseThrowable.getMessage()); + readParamsFailedCall.setResponse(null, null, responseThrowable, + msg + "; " + responseThrowable.getMessage()); TraceUtil.setError(span, responseThrowable); readParamsFailedCall.sendResponseIfReady(); return; @@ -724,8 +705,8 @@ protected void processRequest(ByteBuff buf) throws IOException, this.rpcServer.callQueueSizeInBytes.add(-1 * call.getSize()); this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); call.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION, - "Call queue is full on " + this.rpcServer.server.getServerName() + - ", too many items queued ?"); + "Call queue is full on " + this.rpcServer.server.getServerName() + + ", too many items queued ?"); TraceUtil.setError(span, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION); call.sendResponseIfReady(); } @@ -759,9 +740,9 @@ protected final boolean processPreamble(ByteBuffer preambleBuffer) throws IOExce for (int i = 0; i < RPC_HEADER.length; i++) { if (RPC_HEADER[i] != preambleBuffer.get()) { doBadPreambleHandling( - "Expected HEADER=" + Bytes.toStringBinary(RPC_HEADER) + " but received HEADER=" + - Bytes.toStringBinary(preambleBuffer.array(), 0, RPC_HEADER.length) + " from " + - toString()); + "Expected HEADER=" + Bytes.toStringBinary(RPC_HEADER) + " but received HEADER=" + + Bytes.toStringBinary(preambleBuffer.array(), 0, RPC_HEADER.length) + " from " + + toString()); return false; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java index 641aaefa4e1e..0ead79b9b01d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java @@ -29,11 +29,13 @@ /** * The default scheduler. Configurable. Maintains isolated handler pools for general ('default'), * high-priority ('priority'), and replication ('replication') requests. Default behavior is to - * balance the requests across handlers. Add configs to enable balancing by read vs writes, etc. - * See below article for explanation of options. - * @see Overview on Request Queuing + * balance the requests across handlers. Add configs to enable balancing by read vs writes, etc. See + * below article for explanation of options. + * @see Overview + * on Request Queuing */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObserver { private int port; @@ -60,18 +62,12 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs * @param highPriorityLevel * @param priority Function to extract request priority. */ - public SimpleRpcScheduler( - Configuration conf, - int handlerCount, - int priorityHandlerCount, - int replicationHandlerCount, - int metaTransitionHandler, - PriorityFunction priority, - Abortable server, - int highPriorityLevel) { + public SimpleRpcScheduler(Configuration conf, int handlerCount, int priorityHandlerCount, + int replicationHandlerCount, int metaTransitionHandler, PriorityFunction priority, + Abortable server, int highPriorityLevel) { int maxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, - handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); int maxPriorityQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_PRIORITY_MAX_CALLQUEUE_LENGTH, priorityHandlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); int maxReplicationQueueLength = @@ -82,18 +78,17 @@ public SimpleRpcScheduler( this.highPriorityLevel = highPriorityLevel; this.abortable = server; - String callQueueType = conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, - RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); + String callQueueType = + conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); float callqReadShare = conf.getFloat(RWQueueRpcExecutor.CALL_QUEUE_READ_SHARE_CONF_KEY, 0); if (callqReadShare > 0) { // at least 1 read handler and 1 write handler callExecutor = new FastPathRWQueueRpcExecutor("default.FPRWQ", Math.max(2, handlerCount), - maxQueueLength, priority, conf, server); + maxQueueLength, priority, conf, server); } else { - if (RpcExecutor.isFifoQueueType(callQueueType) || - RpcExecutor.isCodelQueueType(callQueueType) || - RpcExecutor.isPluggableQueueWithFastPath(callQueueType, conf)) { + if (RpcExecutor.isFifoQueueType(callQueueType) || RpcExecutor.isCodelQueueType(callQueueType) + || RpcExecutor.isPluggableQueueWithFastPath(callQueueType, conf)) { callExecutor = new FastPathBalancedQueueRpcExecutor("default.FPBQ", handlerCount, maxQueueLength, priority, conf, server); } else { @@ -104,19 +99,18 @@ public SimpleRpcScheduler( float metaCallqReadShare = conf.getFloat(MetaRWQueueRpcExecutor.META_CALL_QUEUE_READ_SHARE_CONF_KEY, - MetaRWQueueRpcExecutor.DEFAULT_META_CALL_QUEUE_READ_SHARE); + MetaRWQueueRpcExecutor.DEFAULT_META_CALL_QUEUE_READ_SHARE); if (metaCallqReadShare > 0) { // different read/write handler for meta, at least 1 read handler and 1 write handler - this.priorityExecutor = - new MetaRWQueueRpcExecutor("priority.RWQ", Math.max(2, priorityHandlerCount), - maxPriorityQueueLength, priority, conf, server); + this.priorityExecutor = new MetaRWQueueRpcExecutor("priority.RWQ", + Math.max(2, priorityHandlerCount), maxPriorityQueueLength, priority, conf, server); } else { // Create 2 queues to help priorityExecutor be more scalable. - this.priorityExecutor = priorityHandlerCount > 0 ? - new FastPathBalancedQueueRpcExecutor("priority.FPBQ", priorityHandlerCount, + this.priorityExecutor = priorityHandlerCount > 0 + ? new FastPathBalancedQueueRpcExecutor("priority.FPBQ", priorityHandlerCount, RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxPriorityQueueLength, priority, conf, - abortable) : - null; + abortable) + : null; } this.replicationExecutor = replicationHandlerCount > 0 @@ -124,11 +118,11 @@ public SimpleRpcScheduler( RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxReplicationQueueLength, priority, conf, abortable) : null; - this.metaTransitionExecutor = metaTransitionHandler > 0 ? - new FastPathBalancedQueueRpcExecutor("metaPriority.FPBQ", metaTransitionHandler, + this.metaTransitionExecutor = metaTransitionHandler > 0 + ? new FastPathBalancedQueueRpcExecutor("metaPriority.FPBQ", metaTransitionHandler, RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE, maxPriorityQueueLength, priority, conf, - abortable) : - null; + abortable) + : null; } public SimpleRpcScheduler(Configuration conf, int handlerCount, int priorityHandlerCount, @@ -154,10 +148,10 @@ public void onConfigurationChange(Configuration conf) { metaTransitionExecutor.resizeQueues(conf); } - String callQueueType = conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, - RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); - if (RpcExecutor.isCodelQueueType(callQueueType) || - RpcExecutor.isPluggableQueueType(callQueueType)) { + String callQueueType = + conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT); + if (RpcExecutor.isCodelQueueType(callQueueType) + || RpcExecutor.isPluggableQueueType(callQueueType)) { callExecutor.onConfigurationChange(conf); } } @@ -200,13 +194,13 @@ public void stop() { @Override public boolean dispatch(CallRunner callTask) { RpcCall call = callTask.getRpcCall(); - int level = priority.getPriority(call.getHeader(), call.getParam(), - call.getRequestUser().orElse(null)); + int level = + priority.getPriority(call.getHeader(), call.getParam(), call.getRequestUser().orElse(null)); if (level == HConstants.PRIORITY_UNSET) { level = HConstants.NORMAL_QOS; } - if (metaTransitionExecutor != null && - level == MasterAnnotationReadingPriorityFunction.META_TRANSITION_QOS) { + if (metaTransitionExecutor != null + && level == MasterAnnotationReadingPriorityFunction.META_TRANSITION_QOS) { return metaTransitionExecutor.dispatch(callTask); } else if (priorityExecutor != null && level > highPriorityLevel) { return priorityExecutor.dispatch(callTask); @@ -330,7 +324,7 @@ public CallQueueInfo getCallQueueInfo() { if (null != metaTransitionExecutor) { queueName = "Meta Transition Queue"; callQueueInfo.setCallMethodCount(queueName, - metaTransitionExecutor.getCallQueueCountsSummary()); + metaTransitionExecutor.getCallQueueCountsSummary()); callQueueInfo.setCallMethodSize(queueName, metaTransitionExecutor.getCallQueueSizeSummary()); } @@ -338,4 +332,3 @@ public CallQueueInfo getCallQueueInfo() { } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index 20ea1f544182..8a0031229ffa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,47 +54,41 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * The RPC server with native java NIO implementation deriving from Hadoop to - * host protobuf described Services. It's the original one before HBASE-17262, - * and the default RPC server for now. - * - * An RpcServer instance has a Listener that hosts the socket. Listener has fixed number - * of Readers in an ExecutorPool, 10 by default. The Listener does an accept and then - * round robin a Reader is chosen to do the read. The reader is registered on Selector. Read does - * total read off the channel and the parse from which it makes a Call. The call is wrapped in a - * CallRunner and passed to the scheduler to be run. Reader goes back to see if more to be done - * and loops till done. - * - *

          Scheduler can be variously implemented but default simple scheduler has handlers to which it - * has given the queues into which calls (i.e. CallRunner instances) are inserted. Handlers run - * taking from the queue. They run the CallRunner#run method on each item gotten from queue - * and keep taking while the server is up. - * - * CallRunner#run executes the call. When done, asks the included Call to put itself on new - * queue for Responder to pull from and return result to client. - * + * The RPC server with native java NIO implementation deriving from Hadoop to host protobuf + * described Services. It's the original one before HBASE-17262, and the default RPC server for now. + * An RpcServer instance has a Listener that hosts the socket. Listener has fixed number of Readers + * in an ExecutorPool, 10 by default. The Listener does an accept and then round robin a Reader is + * chosen to do the read. The reader is registered on Selector. Read does total read off the channel + * and the parse from which it makes a Call. The call is wrapped in a CallRunner and passed to the + * scheduler to be run. Reader goes back to see if more to be done and loops till done. + *

          + * Scheduler can be variously implemented but default simple scheduler has handlers to which it has + * given the queues into which calls (i.e. CallRunner instances) are inserted. Handlers run taking + * from the queue. They run the CallRunner#run method on each item gotten from queue and keep taking + * while the server is up. CallRunner#run executes the call. When done, asks the included Call to + * put itself on new queue for Responder to pull from and return result to client. * @see BlockingRpcClient */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.CONFIG }) public class SimpleRpcServer extends RpcServer { - protected int port; // port we listen on - protected InetSocketAddress address; // inet address we listen on - private int readThreads; // number of read threads + protected int port; // port we listen on + protected InetSocketAddress address; // inet address we listen on + private int readThreads; // number of read threads protected int socketSendBufferSize; - protected final long purgeTimeout; // in milliseconds + protected final long purgeTimeout; // in milliseconds // maintains the set of client connections and handles idle timeouts private ConnectionManager connectionManager; private Listener listener = null; protected SimpleRpcServerResponder responder = null; - /** Listens on the socket. Creates jobs for the handler threads*/ + /** Listens on the socket. Creates jobs for the handler threads */ private class Listener extends Thread { - private ServerSocketChannel acceptChannel = null; //the accept channel - private Selector selector = null; //the selector that we use for the server + private ServerSocketChannel acceptChannel = null; // the accept channel + private Selector selector = null; // the selector that we use for the server private Reader[] readers = null; private int currentReader = 0; private final int readerPendingConnectionQueueLength; @@ -113,19 +107,17 @@ public Listener(final String name) throws IOException { // Bind the server socket to the binding addrees (can be different from the default interface) bind(acceptChannel.socket(), bindAddress, backlogLength); - port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port - address = (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); + port = acceptChannel.socket().getLocalPort(); // Could be an ephemeral port + address = (InetSocketAddress) acceptChannel.socket().getLocalSocketAddress(); // create a selector; selector = Selector.open(); readers = new Reader[readThreads]; // Why this executor thing? Why not like hadoop just start up all the threads? I suppose it // has an advantage in that it is easy to shutdown the pool. - readPool = Executors.newFixedThreadPool(readThreads, - new ThreadFactoryBuilder().setNameFormat( - "Reader=%d,bindAddress=" + bindAddress.getHostName() + - ",port=" + port).setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + readPool = Executors.newFixedThreadPool(readThreads, new ThreadFactoryBuilder() + .setNameFormat("Reader=%d,bindAddress=" + bindAddress.getHostName() + ",port=" + port) + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); for (int i = 0; i < readThreads; ++i) { Reader reader = new Reader(); readers[i] = reader; @@ -139,7 +131,6 @@ public Listener(final String name) throws IOException { this.setDaemon(true); } - private class Reader implements Runnable { final private LinkedBlockingQueue pendingConnections; private final Selector readSelector; @@ -168,7 +159,7 @@ private synchronized void doRunLoop() { // Consume as many connections as currently queued to avoid // unbridled acceptance of connections that starves the select int size = pendingConnections.size(); - for (int i=size; i>0; i--) { + for (int i = size; i > 0; i--) { SimpleServerRpcConnection conn = pendingConnections.take(); conn.channel.register(readSelector, SelectionKey.OP_READ, conn); } @@ -185,7 +176,7 @@ private synchronized void doRunLoop() { key = null; } } catch (InterruptedException e) { - if (running) { // unexpected -- log it + if (running) { // unexpected -- log it LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e); } } catch (CancelledKeyException e) { @@ -197,9 +188,9 @@ private synchronized void doRunLoop() { } /** - * Updating the readSelector while it's being used is not thread-safe, - * so the connection must be queued. The reader will drain the queue - * and update its readSelector before performing the next select + * Updating the readSelector while it's being used is not thread-safe, so the connection must + * be queued. The reader will drain the queue and update its readSelector before performing + * the next select */ public void addConnection(SimpleServerRpcConnection conn) throws IOException { pendingConnections.add(conn); @@ -208,9 +199,9 @@ public void addConnection(SimpleServerRpcConnection conn) throws IOException { } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", - justification="selector access is not synchronized; seems fine but concerned changing " + - "it will have per impact") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC", + justification = "selector access is not synchronized; seems fine but concerned changing " + + "it will have per impact") public void run() { LOG.info(getName() + ": starting"); connectionManager.startIdleScan(); @@ -224,8 +215,7 @@ public void run() { iter.remove(); try { if (key.isValid()) { - if (key.isAcceptable()) - doAccept(key); + if (key.isAcceptable()) doAccept(key); } } catch (IOException ignored) { if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored); @@ -266,8 +256,8 @@ public void run() { if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored); } - selector= null; - acceptChannel= null; + selector = null; + acceptChannel = null; // close all connections connectionManager.stopIdleScan(); @@ -277,7 +267,7 @@ public void run() { private void closeCurrentConnection(SelectionKey key, Throwable e) { if (key != null) { - SimpleServerRpcConnection c = (SimpleServerRpcConnection)key.attachment(); + SimpleServerRpcConnection c = (SimpleServerRpcConnection) key.attachment(); if (c != null) { closeConnection(c); key.attach(null); @@ -305,7 +295,7 @@ void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfM } continue; } - key.attach(c); // so closeCurrentConnection can get the object + key.attach(c); // so closeCurrentConnection can get the object reader.addConnection(c); } } @@ -320,13 +310,14 @@ void doRead(SelectionKey key) throws InterruptedException { try { count = c.readAndProcess(); } catch (InterruptedException ieo) { - LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo); + LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", + ieo); throw ieo; } catch (Exception e) { if (LOG.isDebugEnabled()) { LOG.debug("Caught exception while reading:", e); } - count = -1; //so that the (count < 0) block is executed + count = -1; // so that the (count < 0) block is executed } if (count < 0) { closeConnection(c); @@ -361,8 +352,8 @@ Reader getReader() { /** * Constructs a server listening on the named port and address. - * @param server hosting instance of {@link Server}. We will do authentications if an - * instance else pass null for no authentication check. + * @param server hosting instance of {@link Server}. We will do authentications if an instance + * else pass null for no authentication check. * @param name Used keying this rpc servers' metrics and for naming the Listener thread. * @param services A list of services. * @param bindAddress Where to listen @@ -371,9 +362,8 @@ Reader getReader() { * @param reservoirEnabled Enable ByteBufferPool or not. */ public SimpleRpcServer(final Server server, final String name, - final List services, - final InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { super(server, name, services, bindAddress, conf, scheduler, reservoirEnabled); this.socketSendBufferSize = 0; this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size", 10); @@ -393,8 +383,7 @@ public SimpleRpcServer(final Server server, final String name, } /** - * Subclasses of HBaseServer can override this to provide their own - * Connection implementations. + * Subclasses of HBaseServer can override this to provide their own Connection implementations. */ protected SimpleServerRpcConnection getConnection(SocketChannel channel, long time) { return new SimpleServerRpcConnection(this, channel, time); @@ -404,11 +393,14 @@ protected void closeConnection(SimpleServerRpcConnection connection) { connectionManager.close(connection); } - /** Sets the socket buffer size used for responding to RPCs. + /** + * Sets the socket buffer size used for responding to RPCs. * @param size send size */ @Override - public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; } + public void setSocketSendBufSize(int size) { + this.socketSendBufferSize = size; + } /** Starts the service. Must be called before any calls will be handled. */ @Override @@ -461,10 +453,10 @@ public synchronized void join() throws InterruptedException { } /** - * Return the socket (ip+port) on which the RPC server is listening to. May return null if - * the listener channel is closed. + * Return the socket (ip+port) on which the RPC server is listening to. May return null if the + * listener channel is closed. * @return the socket (ip+port) on which the RPC server is listening to, or null if this - * information cannot be determined + * information cannot be determined */ @Override public synchronized InetSocketAddress getListenerAddress() { @@ -475,20 +467,18 @@ public synchronized InetSocketAddress getListenerAddress() { } /** - * This is a wrapper around {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of - * buffer increases. This also minimizes extra copies in NIO layer - * as a result of multiple write operations required to write a large - * buffer. - * + * This is a wrapper around + * {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}. If the amount of data + * is large, it writes to channel in smaller chunks. This is to avoid jdk from creating many + * direct buffers as the size of buffer increases. This also minimizes extra copies in NIO layer + * as a result of multiple write operations required to write a large buffer. * @param channel writable byte channel to write to * @param bufferChain Chain of buffers to write * @return number of bytes written * @see java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer) */ protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChain) - throws IOException { + throws IOException { long count = bufferChain.write(channel, NIO_BUFFER_LIMIT); if (count > 0) { this.metrics.sentBytes(count); @@ -497,8 +487,8 @@ protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChai } /** - * A convenience method to bind to a given address and report - * better exceptions if the address is not a valid host. + * A convenience method to bind to a given address and report better exceptions if the address is + * not a valid host. * @param socket the socket to bind * @param address the address to bind to * @param backlog the number of connections allowed in the queue @@ -507,12 +497,12 @@ protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChai * @throws IOException other random errors from bind */ public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) - throws IOException { + throws IOException { try { socket.bind(address, backlog); } catch (BindException e) { BindException bindException = - new BindException("Problem binding to " + address + " : " + e.getMessage()); + new BindException("Problem binding to " + address + " : " + e.getMessage()); bindException.initCause(e); throw bindException; } catch (SocketException e) { @@ -552,14 +542,14 @@ private class ConnectionManager { this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); this.maxIdleToClose = conf.getInt("hbase.ipc.client.kill.max", 10); int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, - HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); + HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); int maxConnectionQueueSize = handlerCount * conf.getInt("hbase.ipc.server.handler.queue.size", 100); // create a set with concurrency -and- a thread-safe iterator, add 2 // for listener and idle closer threads - this.connections = Collections.newSetFromMap( - new ConcurrentHashMap( - maxConnectionQueueSize, 0.75f, readThreads+2)); + this.connections = + Collections.newSetFromMap(new ConcurrentHashMap( + maxConnectionQueueSize, 0.75f, readThreads + 2)); } private boolean add(SimpleServerRpcConnection connection) { @@ -587,16 +577,15 @@ SimpleServerRpcConnection[] toArray() { } SimpleServerRpcConnection register(SocketChannel channel) { - SimpleServerRpcConnection connection = getConnection(channel, - EnvironmentEdgeManager.currentTime()); + SimpleServerRpcConnection connection = + getConnection(channel, EnvironmentEdgeManager.currentTime()); add(connection); if (LOG.isTraceEnabled()) { - LOG.trace("Connection from " + connection + - "; connections=" + size() + - ", queued calls size (bytes)=" + callQueueSizeInBytes.sum() + - ", general queued calls=" + scheduler.getGeneralQueueLength() + - ", priority queued calls=" + scheduler.getPriorityQueueLength() + - ", meta priority queued calls=" + scheduler.getMetaPriorityQueueLength()); + LOG.trace("Connection from " + connection + "; connections=" + size() + + ", queued calls size (bytes)=" + callQueueSizeInBytes.sum() + + ", general queued calls=" + scheduler.getGeneralQueueLength() + + ", priority queued calls=" + scheduler.getPriorityQueueLength() + + ", meta priority queued calls=" + scheduler.getMetaPriorityQueueLength()); } return connection; } @@ -605,9 +594,8 @@ boolean close(SimpleServerRpcConnection connection) { boolean exists = remove(connection); if (exists) { if (LOG.isTraceEnabled()) { - LOG.trace(Thread.currentThread().getName() + - ": disconnecting client " + connection + - ". Number of active connections: "+ size()); + LOG.trace(Thread.currentThread().getName() + ": disconnecting client " + connection + + ". Number of active connections: " + size()); } // only close if actually removed to avoid double-closing due // to possible races @@ -630,10 +618,8 @@ synchronized void closeIdle(boolean scanAll) { break; } // stop if not scanning all and max connections are closed - if (connection.isIdle() && - connection.getLastContact() < minLastContact && - close(connection) && - !scanAll && (++closed == maxIdleToClose)) { + if (connection.isIdle() && connection.getLastContact() < minLastContact && close(connection) + && !scanAll && (++closed == maxIdleToClose)) { break; } } @@ -659,7 +645,7 @@ private void scheduleIdleScanTask() { if (!running) { return; } - TimerTask idleScanTask = new TimerTask(){ + TimerTask idleScanTask = new TimerTask() { @Override public void run() { if (!running) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java index d6d5dd09a85b..34e6472a473d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.Iterator; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; @@ -176,8 +175,8 @@ private long purge(long lastPurgeTime) { if (connection == null) { throw new IllegalStateException("Coding error: SelectionKey key without attachment."); } - if (connection.lastSentTime > 0 && - now > connection.lastSentTime + this.simpleRpcServer.purgeTimeout) { + if (connection.lastSentTime > 0 + && now > connection.lastSentTime + this.simpleRpcServer.purgeTimeout) { conWithOldCalls.add(connection); } } @@ -227,8 +226,7 @@ private boolean processResponse(SimpleServerRpcConnection conn, RpcResponse resp BufferChain buf = resp.getResponse(); try { // Send as much data as we can in the non-blocking fashion - long numBytes = - this.simpleRpcServer.channelWrite(conn.channel, buf); + long numBytes = this.simpleRpcServer.channelWrite(conn.channel, buf); if (numBytes < 0) { throw new HBaseIOException("Error writing on the socket " + conn); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java index 311b4c7b1a9c..6a1cbfe6ac7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,14 +19,15 @@ import java.io.IOException; import java.net.InetAddress; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java index 622e67ab781d..724346bb9c4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -181,10 +181,10 @@ public int readAndProcess() throws IOException, InterruptedException { } if (dataLength > this.rpcServer.maxRequestSize) { - String msg = "RPC data length of " + dataLength + " received from " + getHostAddress() + - " is greater than max allowed " + this.rpcServer.maxRequestSize + ". Set \"" + - SimpleRpcServer.MAX_REQUEST_SIZE + - "\" on server to override this limit (not recommended)"; + String msg = "RPC data length of " + dataLength + " received from " + getHostAddress() + + " is greater than max allowed " + this.rpcServer.maxRequestSize + ". Set \"" + + SimpleRpcServer.MAX_REQUEST_SIZE + + "\" on server to override this limit (not recommended)"; SimpleRpcServer.LOG.warn(msg); if (connectionHeaderRead && connectionPreambleRead) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java index 5b4a2c241b44..d848a7ae495d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java index ebe24463ea4a..5dba39f6ddff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.master; + import java.io.IOException; import java.io.InterruptedIOException; import java.util.List; @@ -36,22 +36,24 @@ import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * Handles everything on master-side related to master election. Keeps track of - * currently active master and registered backup masters. - * - *

          Listens and responds to ZooKeeper notifications on the master znodes, - * both nodeCreated and nodeDeleted. - * - *

          Contains blocking methods which will hold up backup masters, waiting - * for the active master to fail. - * - *

          This class is instantiated in the HMaster constructor and the method - * #blockUntilBecomingActiveMaster() is called to wait until becoming - * the active master of the cluster. + * Handles everything on master-side related to master election. Keeps track of currently active + * master and registered backup masters. + *

          + * Listens and responds to ZooKeeper notifications on the master znodes, both + * nodeCreated and nodeDeleted. + *

          + * Contains blocking methods which will hold up backup masters, waiting for the active master to + * fail. + *

          + * This class is instantiated in the HMaster constructor and the method + * #blockUntilBecomingActiveMaster() is called to wait until becoming the active master of the + * cluster. */ @InterfaceAudience.Private public class ActiveMasterManager extends ZKListener { @@ -117,7 +119,7 @@ public void nodeDeleted(String path) { // shut down, so that state is now irrelevant. This means that the shutdown // state must be set while we wait on the active master in order // to shutdown this master. See HBASE-8519. - if(path.equals(watcher.getZNodePaths().clusterStateZNode) && !master.isStopped()) { + if (path.equals(watcher.getZNodePaths().clusterStateZNode) && !master.isStopped()) { clusterShutDown.set(true); } handle(path); @@ -177,22 +179,21 @@ public int getBackupMasterInfoPort(final ServerName sn) { } /** - * Handle a change in the master node. Doesn't matter whether this was called - * from a nodeCreated or nodeDeleted event because there are no guarantees - * that the current state of the master node matches the event at the time of - * our next ZK request. - * - *

          Uses the watchAndCheckExists method which watches the master address node - * regardless of whether it exists or not. If it does exist (there is an - * active master), it returns true. Otherwise it returns false. - * - *

          A watcher is set which guarantees that this method will get called again if - * there is another change in the master node. + * Handle a change in the master node. Doesn't matter whether this was called from a nodeCreated + * or nodeDeleted event because there are no guarantees that the current state of the master node + * matches the event at the time of our next ZK request. + *

          + * Uses the watchAndCheckExists method which watches the master address node regardless of whether + * it exists or not. If it does exist (there is an active master), it returns true. Otherwise it + * returns false. + *

          + * A watcher is set which guarantees that this method will get called again if there is another + * change in the master node. */ private void handleMasterNodeChange() { // Watch the node and check if it exists. try { - synchronized(clusterHasActiveMaster) { + synchronized (clusterHasActiveMaster) { if (ZKUtil.watchAndCheckExists(watcher, watcher.getZNodePaths().masterAddressZNode)) { // A master node exists, there is an active master LOG.trace("A master is now available"); @@ -214,30 +215,24 @@ private void handleMasterNodeChange() { } /** - * Block until becoming the active master. - * - * Method blocks until there is not another active master and our attempt - * to become the new active master is successful. - * - * This also makes sure that we are watching the master znode so will be - * notified if another master dies. + * Block until becoming the active master. Method blocks until there is not another active master + * and our attempt to become the new active master is successful. This also makes sure that we are + * watching the master znode so will be notified if another master dies. * @param checkInterval the interval to check if the master is stopped * @param startupStatus the monitor status to track the progress - * @return True if no issue becoming active master else false if another - * master was running or if some other problem (zookeeper, stop flag has been - * set on this Master) + * @return True if no issue becoming active master else false if another master was running or if + * some other problem (zookeeper, stop flag has been set on this Master) */ - boolean blockUntilBecomingActiveMaster( - int checkInterval, MonitoredTask startupStatus) { - String backupZNode = ZNodePaths.joinZNode( - this.watcher.getZNodePaths().backupMasterAddressesZNode, this.sn.toString()); + boolean blockUntilBecomingActiveMaster(int checkInterval, MonitoredTask startupStatus) { + String backupZNode = ZNodePaths + .joinZNode(this.watcher.getZNodePaths().backupMasterAddressesZNode, this.sn.toString()); while (!(master.isAborted() || master.isStopped())) { startupStatus.setStatus("Trying to register in ZK as active master"); // Try to become the active master, watch if there is another master. // Write out our ServerName as versioned bytes. try { if (MasterAddressTracker.setMasterAddress(this.watcher, - this.watcher.getZNodePaths().masterAddressZNode, this.sn, infoPort)) { + this.watcher.getZNodePaths().masterAddressZNode, this.sn, infoPort)) { // If we were a backup master before, delete our ZNode from the backup // master directory since we are the active now) @@ -265,32 +260,32 @@ boolean blockUntilBecomingActiveMaster( String msg; byte[] bytes = - ZKUtil.getDataAndWatch(this.watcher, this.watcher.getZNodePaths().masterAddressZNode); + ZKUtil.getDataAndWatch(this.watcher, this.watcher.getZNodePaths().masterAddressZNode); if (bytes == null) { - msg = ("A master was detected, but went down before its address " + - "could be read. Attempting to become the next active master"); + msg = ("A master was detected, but went down before its address " + + "could be read. Attempting to become the next active master"); } else { ServerName currentMaster; try { currentMaster = ProtobufUtil.parseServerNameFrom(bytes); } catch (DeserializationException e) { LOG.warn("Failed parse", e); - // Hopefully next time around we won't fail the parse. Dangerous. + // Hopefully next time around we won't fail the parse. Dangerous. continue; } if (ServerName.isSameAddress(currentMaster, this.sn)) { - msg = ("Current master has this master's address, " + - currentMaster + "; master was restarted? Deleting node."); + msg = ("Current master has this master's address, " + currentMaster + + "; master was restarted? Deleting node."); // Hurry along the expiration of the znode. ZKUtil.deleteNode(this.watcher, this.watcher.getZNodePaths().masterAddressZNode); // We may have failed to delete the znode at the previous step, but - // we delete the file anyway: a second attempt to delete the znode is likely to fail - // again. + // we delete the file anyway: a second attempt to delete the znode is likely to fail + // again. ZNodeClearer.deleteMyEphemeralNodeOnDisk(); } else { - msg = "Another master is the active master, " + currentMaster + - "; waiting to become the next active master"; + msg = "Another master is the active master, " + currentMaster + + "; waiting to become the next active master"; } } LOG.info(msg); @@ -305,13 +300,12 @@ boolean blockUntilBecomingActiveMaster( clusterHasActiveMaster.wait(checkInterval); } catch (InterruptedException e) { // We expect to be interrupted when a master dies, - // will fall out if so + // will fall out if so LOG.debug("Interrupted waiting for master to die", e); } } if (clusterShutDown.get()) { - this.master.stop( - "Cluster went down before this master became active"); + this.master.stop("Cluster went down before this master became active"); } } } @@ -326,10 +320,8 @@ boolean hasActiveMaster() { if (ZKUtil.checkExists(watcher, watcher.getZNodePaths().masterAddressZNode) >= 0) { return true; } - } - catch (KeeperException ke) { - LOG.info("Received an unexpected KeeperException when checking " + - "isActiveMaster : "+ ke); + } catch (KeeperException ke) { + LOG.info("Received an unexpected KeeperException when checking " + "isActiveMaster : " + ke); } return false; } @@ -348,15 +340,14 @@ public void stop() { } catch (IOException e) { LOG.warn("Failed get of master address: " + e.toString()); } - if (activeMaster != null && activeMaster.equals(this.sn)) { + if (activeMaster != null && activeMaster.equals(this.sn)) { ZKUtil.deleteNode(watcher, watcher.getZNodePaths().masterAddressZNode); // We may have failed to delete the znode at the previous step, but - // we delete the file anyway: a second attempt to delete the znode is likely to fail again. + // we delete the file anyway: a second attempt to delete the znode is likely to fail again. ZNodeClearer.deleteMyEphemeralNodeOnDisk(); } } catch (KeeperException e) { - LOG.debug(this.watcher.prefix("Failed delete of our master address node; " + - e.getMessage())); + LOG.debug(this.watcher.prefix("Failed delete of our master address node; " + e.getMessage())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java index 2f75560dae8c..67d8ef80ce69 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import java.io.IOException; @@ -35,11 +34,9 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Caches the cluster ID of the cluster. For standby masters, this is used to serve the client - * RPCs that fetch the cluster ID. ClusterID is only created by an active master if one does not - * already exist. Standby masters just read the information from the file system. This class is - * thread-safe. - * + * Caches the cluster ID of the cluster. For standby masters, this is used to serve the client RPCs + * that fetch the cluster ID. ClusterID is only created by an active master if one does not already + * exist. Standby masters just read the information from the file system. This class is thread-safe. * TODO: Make it a singleton without affecting concurrent junit tests. */ @InterfaceAudience.Private @@ -95,8 +92,8 @@ private String getClusterId() { /** * Attempts to fetch the cluster ID from the file system. If no attempt is already in progress, - * synchronously fetches the cluster ID and sets it. If an attempt is already in progress, - * returns right away and the caller is expected to wait for the fetch to finish. + * synchronously fetches the cluster ID and sets it. If an attempt is already in progress, returns + * right away and the caller is expected to wait for the fetch to finish. * @return true if the attempt is done, false if another thread is already fetching it. */ private boolean attemptFetch() { @@ -130,12 +127,11 @@ private void waitForFetchToFinish() throws InterruptedException { } /** - * Fetches the ClusterId from FS if it is not cached locally. Atomically updates the cached - * copy and is thread-safe. Optimized to do a single fetch when there are multiple threads are - * trying get from a clean cache. - * - * @return ClusterId by reading from FileSystem or null in any error case or cluster ID does - * not exist on the file system or if the server initiated a tear down. + * Fetches the ClusterId from FS if it is not cached locally. Atomically updates the cached copy + * and is thread-safe. Optimized to do a single fetch when there are multiple threads are trying + * get from a clean cache. + * @return ClusterId by reading from FileSystem or null in any error case or cluster ID does not + * exist on the file system or if the server initiated a tear down. */ public String getFromCacheOrFetch() { if (server.isStopping() || server.isStopped()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java index 0f7153ba8014..489f795c11bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,20 +19,17 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServiceNotRunningException; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.NonceKey; +import org.apache.yetus.audience.InterfaceAudience; /** - * View and edit the current cluster schema. Use this API making any modification to - * namespaces, tables, etc. - * - *

          Implementation Notes

          - * Nonces are for when operation is non-idempotent to ensure once-only semantic, even - * across process failures. + * View and edit the current cluster schema. Use this API making any modification to namespaces, + * tables, etc. + *

          Implementation Notes

          Nonces are for when operation is non-idempotent to ensure once-only + * semantic, even across process failures. */ // ClusterSchema is introduced to encapsulate schema modification. Currently the different aspects // are spread about the code base. This effort is about cleanup, shutting down access, and @@ -65,12 +62,11 @@ public interface ClusterSchema { /** * Default operation timeout in milliseconds. */ - public static final int DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT = - 5 * 60 * 1000; + public static final int DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT = 5 * 60 * 1000; /** - * For internals use only. Do not use! Provisionally part of this Interface. - * Prefer the high-level APIs available elsewhere in this API. + * For internals use only. Do not use! Provisionally part of this Interface. Prefer the high-level + * APIs available elsewhere in this API. * @return Instance of {@link TableNamespaceManager} */ // TODO: Remove from here. Keep internal. This Interface is too high-level to host this accessor. @@ -84,8 +80,8 @@ public interface ClusterSchema { * @return procedure id * @throws IOException if service is not running see {@link ServiceNotRunningException} */ - long createNamespace(NamespaceDescriptor namespaceDescriptor, NonceKey nonceKey, ProcedurePrepareLatch latch) - throws IOException; + long createNamespace(NamespaceDescriptor namespaceDescriptor, NonceKey nonceKey, + ProcedurePrepareLatch latch) throws IOException; /** * Modify an existing Namespace. @@ -94,19 +90,18 @@ long createNamespace(NamespaceDescriptor namespaceDescriptor, NonceKey nonceKey, * @return procedure id * @throws IOException if service is not running see {@link ServiceNotRunningException} */ - long modifyNamespace(NamespaceDescriptor descriptor, NonceKey nonceKey, ProcedurePrepareLatch latch) - throws IOException; + long modifyNamespace(NamespaceDescriptor descriptor, NonceKey nonceKey, + ProcedurePrepareLatch latch) throws IOException; /** - * Delete an existing Namespace. - * Only empty Namespaces (no tables) can be removed. + * Delete an existing Namespace. Only empty Namespaces (no tables) can be removed. * @param nonceKey A unique identifier for this operation from the client or process. * @param latch A latch to block on for precondition validation * @return procedure id * @throws IOException if service is not running see {@link ServiceNotRunningException} */ long deleteNamespace(String name, NonceKey nonceKey, ProcedurePrepareLatch latch) - throws IOException; + throws IOException; /** * Get a Namespace diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java index fadb28ccca9c..4e4e1d1e5a7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,12 @@ package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service; /** * Mixes in ClusterSchema and Service */ @InterfaceAudience.Private -public interface ClusterSchemaService extends ClusterSchema, Service {} +public interface ClusterSchemaService extends ClusterSchema, Service { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java index 2188dc3d324c..13b2f129f933 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -84,26 +84,28 @@ private long submitProcedure(final Procedure procedure, @Override public long createNamespace(NamespaceDescriptor namespaceDescriptor, final NonceKey nonceKey, - final ProcedurePrepareLatch latch) - throws IOException { + final ProcedurePrepareLatch latch) throws IOException { return submitProcedure(new CreateNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, latch), - nonceKey); + this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, + latch), + nonceKey); } @Override public long modifyNamespace(NamespaceDescriptor namespaceDescriptor, final NonceKey nonceKey, final ProcedurePrepareLatch latch) throws IOException { return submitProcedure(new ModifyNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, latch), - nonceKey); + this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor, + latch), + nonceKey); } @Override - public long deleteNamespace(String name, final NonceKey nonceKey, final ProcedurePrepareLatch latch) - throws IOException { - return submitProcedure(new DeleteNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), name, latch), + public long deleteNamespace(String name, final NonceKey nonceKey, + final ProcedurePrepareLatch latch) throws IOException { + return submitProcedure( + new DeleteNamespaceProcedure( + this.masterServices.getMasterProcedureExecutor().getEnvironment(), name, latch), nonceKey); } @@ -118,7 +120,7 @@ public NamespaceDescriptor getNamespace(String name) throws IOException { public List getNamespaces() throws IOException { checkIsRunning(); return getTableNamespaceManager().list().stream() - .sorted(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR) - .collect(ImmutableList.toImmutableList()); + .sorted(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR) + .collect(ImmutableList.toImmutableList()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java index dd67c05eae0e..1609eeb371d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java @@ -15,8 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.master; import java.io.Closeable; @@ -47,8 +45,11 @@ import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.VersionInfo; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.Channel; @@ -64,27 +65,23 @@ import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioDatagramChannel; import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageEncoder; import org.apache.hbase.thirdparty.io.netty.util.internal.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Class to publish the cluster status to the client. This allows them to know immediately - * the dead region servers, hence to cut the connection they have with them, eventually stop - * waiting on the socket. This improves the mean time to recover, and as well allows to increase - * on the client the different timeouts, as the dead servers will be detected separately. + * Class to publish the cluster status to the client. This allows them to know immediately the dead + * region servers, hence to cut the connection they have with them, eventually stop waiting on the + * socket. This improves the mean time to recover, and as well allows to increase on the client the + * different timeouts, as the dead servers will be detected separately. */ @InterfaceAudience.Private public class ClusterStatusPublisher extends ScheduledChore { private static Logger LOG = LoggerFactory.getLogger(ClusterStatusPublisher.class); /** - * The implementation class used to publish the status. Default is null (no publish). - * Use org.apache.hadoop.hbase.master.ClusterStatusPublisher.MulticastPublisher to multicast the + * The implementation class used to publish the status. Default is null (no publish). Use + * org.apache.hadoop.hbase.master.ClusterStatusPublisher.MulticastPublisher to multicast the * status. */ public static final String STATUS_PUBLISHER_CLASS = "hbase.status.publisher.class"; - public static final Class - DEFAULT_STATUS_PUBLISHER_CLASS = + public static final Class DEFAULT_STATUS_PUBLISHER_CLASS = org.apache.hadoop.hbase.master.ClusterStatusPublisher.MulticastPublisher.class; /** @@ -101,8 +98,8 @@ public class ClusterStatusPublisher extends ScheduledChore { private boolean connected = false; /** - * We want to limit the size of the protobuf message sent, do fit into a single packet. - * a reasonable size for ip / ethernet is less than 1Kb. + * We want to limit the size of the protobuf message sent, do fit into a single packet. a + * reasonable size for ip / ethernet is less than 1Kb. */ public final static int MAX_SERVER_PER_MESSAGE = 10; @@ -113,10 +110,9 @@ public class ClusterStatusPublisher extends ScheduledChore { public final static int NB_SEND = 5; public ClusterStatusPublisher(HMaster master, Configuration conf, - Class publisherClass) - throws IOException { - super("ClusterStatusPublisher for=" + master.getName(), master, conf.getInt( - STATUS_PUBLISH_PERIOD, DEFAULT_STATUS_PUBLISH_PERIOD)); + Class publisherClass) throws IOException { + super("ClusterStatusPublisher for=" + master.getName(), master, + conf.getInt(STATUS_PUBLISH_PERIOD, DEFAULT_STATUS_PUBLISH_PERIOD)); this.master = master; this.messagePeriod = conf.getInt(STATUS_PUBLISH_PERIOD, DEFAULT_STATUS_PUBLISH_PERIOD); try { @@ -162,13 +158,10 @@ protected void chore() { // We're reusing an existing protobuf message, but we don't send everything. // This could be extended in the future, for example if we want to send stuff like the - // hbase:meta server name. - publisher.publish(ClusterMetricsBuilder.newBuilder() - .setHBaseVersion(VersionInfo.getVersion()) - .setClusterId(master.getMasterFileSystem().getClusterId().toString()) - .setMasterName(master.getServerName()) - .setDeadServerNames(sns) - .build()); + // hbase:meta server name. + publisher.publish(ClusterMetricsBuilder.newBuilder().setHBaseVersion(VersionInfo.getVersion()) + .setClusterId(master.getMasterFileSystem().getClusterId().toString()) + .setMasterName(master.getServerName()).setDeadServerNames(sns).build()); } @Override @@ -183,8 +176,8 @@ private synchronized boolean isConnected() { /** * Create the dead server to send. A dead server is sent NB_SEND times. We send at max - * MAX_SERVER_PER_MESSAGE at a time. if there are too many dead servers, we send the newly - * dead first. + * MAX_SERVER_PER_MESSAGE at a time. if there are too many dead servers, we send the newly dead + * first. */ protected List generateDeadServersListToSend() { // We're getting the message sent since last time, and add them to the list @@ -221,8 +214,8 @@ public int compare(Map.Entry o1, Map.Entry> getDeadServers(long since) { if (master.getServerManager() == null) { @@ -232,7 +225,6 @@ protected List> getDeadServers(long since) { return master.getServerManager().getDeadServers().copyDeadServersSince(since); } - public interface Publisher extends Closeable { void connect(Configuration conf) throws IOException; @@ -247,8 +239,9 @@ public interface Publisher extends Closeable { public static class MulticastPublisher implements Publisher { private DatagramChannel channel; private final EventLoopGroup group = new NioEventLoopGroup(1, - new ThreadFactoryBuilder().setNameFormat("hbase-master-clusterStatusPublisher-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadFactoryBuilder().setNameFormat("hbase-master-clusterStatusPublisher-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER) + .build()); public MulticastPublisher() { } @@ -261,9 +254,9 @@ public String toString() { @Override public void connect(Configuration conf) throws IOException { String mcAddress = conf.get(HConstants.STATUS_MULTICAST_ADDRESS, - HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); - int port = conf.getInt(HConstants.STATUS_MULTICAST_PORT, - HConstants.DEFAULT_STATUS_MULTICAST_PORT); + HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); + int port = + conf.getInt(HConstants.STATUS_MULTICAST_PORT, HConstants.DEFAULT_STATUS_MULTICAST_PORT); String bindAddress = conf.get(HConstants.STATUS_MULTICAST_PUBLISHER_BIND_ADDRESS, HConstants.DEFAULT_STATUS_MULTICAST_PUBLISHER_BIND_ADDRESS); String niName = conf.get(HConstants.STATUS_MULTICAST_NI_NAME); @@ -299,9 +292,9 @@ public void connect(Configuration conf) throws IOException { } Bootstrap b = new Bootstrap(); b.group(group) - .channelFactory(new HBaseDatagramChannelFactory(NioDatagramChannel.class, family)) - .option(ChannelOption.SO_REUSEADDR, true) - .handler(new ClusterMetricsEncoder(isa)); + .channelFactory( + new HBaseDatagramChannelFactory(NioDatagramChannel.class, family)) + .option(ChannelOption.SO_REUSEADDR, true).handler(new ClusterMetricsEncoder(isa)); try { LOG.debug("Channel bindAddress={}, networkInterface={}, INA={}", bindAddress, ni, ina); channel = (DatagramChannel) b.bind(bindAddress, 0).sync().channel(); @@ -357,7 +350,7 @@ private ClusterMetricsEncoder(InetSocketAddress isa) { @Override protected void encode(ChannelHandlerContext channelHandlerContext, - ClusterMetrics clusterStatus, List objects) { + ClusterMetrics clusterStatus, List objects) { objects.add(new DatagramPacket(Unpooled.wrappedBuffer( ClusterMetricsBuilder.toClusterStatus(clusterStatus).toByteArray()), isa)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index 0471fabe3489..6832d0db6849 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -34,13 +34,12 @@ import org.slf4j.LoggerFactory; /** - * Class to hold dead servers list and utility querying dead server list. - * Servers are added when they expire or when we find them in filesystem on startup. - * When a server crash procedure is queued, it will populate the processing list and - * then remove the server from processing list when done. Servers are removed from - * dead server list when a new instance is started over the old on same hostname and - * port or when new Master comes online tidying up after all initialization. Processing - * list and deadserver list are not tied together (you don't have to be in deadservers + * Class to hold dead servers list and utility querying dead server list. Servers are added when + * they expire or when we find them in filesystem on startup. When a server crash procedure is + * queued, it will populate the processing list and then remove the server from processing list when + * done. Servers are removed from dead server list when a new instance is started over the old on + * same hostname and port or when new Master comes online tidying up after all initialization. + * Processing list and deadserver list are not tied together (you don't have to be in deadservers * list to be processing and vice versa). */ @InterfaceAudience.Private @@ -48,11 +47,10 @@ public class DeadServer { private static final Logger LOG = LoggerFactory.getLogger(DeadServer.class); /** - * Set of known dead servers. On znode expiration, servers are added here. - * This is needed in case of a network partitioning where the server's lease - * expires, but the server is still running. After the network is healed, - * and it's server logs are recovered, it will be told to call server startup - * because by then, its regions have probably been reassigned. + * Set of known dead servers. On znode expiration, servers are added here. This is needed in case + * of a network partitioning where the server's lease expires, but the server is still running. + * After the network is healed, and it's server logs are recovered, it will be told to call server + * startup because by then, its regions have probably been reassigned. */ private final Map deadServers = new HashMap<>(); @@ -86,12 +84,11 @@ synchronized boolean isEmpty() { } /** - * Handles restart of a server. The new server instance has a different start code. - * The new start code should be greater than the old one. We don't check that here. - * Removes the old server from deadserver list. - * + * Handles restart of a server. The new server instance has a different start code. The new start + * code should be greater than the old one. We don't check that here. Removes the old server from + * deadserver list. * @param newServerName Servername as either host:port or - * host,port,startcode. + * host,port,startcode. * @return true if this server was dead before and coming back alive again */ synchronized boolean cleanPreviousInstance(final ServerName newServerName) { @@ -114,9 +111,8 @@ synchronized void cleanAllPreviousInstances(final ServerName newServerName) { /** * @param newServerName Server to match port and hostname against. * @param deadServerIterator Iterator primed so can call 'next' on it. - * @return True if newServerName and current primed - * iterator ServerName have same host and port and we removed old server - * from iterator and from processing list. + * @return True if newServerName and current primed iterator ServerName have same + * host and port and we removed old server from iterator and from processing list. */ private boolean cleanOldServerName(ServerName newServerName, Iterator deadServerIterator) { @@ -151,10 +147,10 @@ public synchronized String toString() { * @return a sorted array list, by death time, lowest values first. */ synchronized List> copyDeadServersSince(long ts) { - List> res = new ArrayList<>(size()); + List> res = new ArrayList<>(size()); - for (Map.Entry entry:deadServers.entrySet()){ - if (entry.getValue() >= ts){ + for (Map.Entry entry : deadServers.entrySet()) { + if (entry.getValue() >= ts) { res.add(new Pair<>(entry.getKey(), entry.getValue())); } } @@ -162,13 +158,13 @@ synchronized List> copyDeadServersSince(long ts) { Collections.sort(res, (o1, o2) -> o1.getSecond().compareTo(o2.getSecond())); return res; } - + /** * Get the time when a server died * @param deadServerName the dead server name - * @return the date when the server died + * @return the date when the server died */ - public synchronized Date getTimeOfDeath(final ServerName deadServerName){ + public synchronized Date getTimeOfDeath(final ServerName deadServerName) { Long time = deadServers.get(deadServerName); return time == null ? null : new Date(time); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java index 14c4a3ec85f6..dc843838e90e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,33 +21,30 @@ import java.util.List; import java.util.NavigableSet; import java.util.TreeSet; - +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.ServerName; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Tracks the list of draining region servers via ZK. - * - *

          This class is responsible for watching for changes to the draining - * servers list. It handles adds/deletes in the draining RS list and - * watches each node. - * - *

          If an RS gets deleted from draining list, we call + *

          + * This class is responsible for watching for changes to the draining servers list. It handles + * adds/deletes in the draining RS list and watches each node. + *

          + * If an RS gets deleted from draining list, we call * {@link ServerManager#removeServerFromDrainList(ServerName)} - * - *

          If an RS gets added to the draining list, we add a watcher to it and call + *

          + * If an RS gets added to the draining list, we add a watcher to it and call * {@link ServerManager#addServerToDrainList(ServerName)} - * - *

          This class is deprecated in 2.0 because decommission/draining API goes through - * master in 2.0. Can remove this class in 3.0. - * + *

          + * This class is deprecated in 2.0 because decommission/draining API goes through master in 2.0. Can + * remove this class in 3.0. */ @InterfaceAudience.Private public class DrainingServerTracker extends ZKListener { @@ -57,8 +54,8 @@ public class DrainingServerTracker extends ZKListener { private final NavigableSet drainingServers = new TreeSet<>(); private Abortable abortable; - public DrainingServerTracker(ZKWatcher watcher, - Abortable abortable, ServerManager serverManager) { + public DrainingServerTracker(ZKWatcher watcher, Abortable abortable, + ServerManager serverManager) { super(watcher); this.abortable = abortable; this.serverManager = serverManager; @@ -66,9 +63,8 @@ public DrainingServerTracker(ZKWatcher watcher, /** * Starts the tracking of draining RegionServers. - * - *

          All Draining RSs will be tracked after this method is called. - * + *

          + * All Draining RSs will be tracked after this method is called. * @throws KeeperException */ public void start() throws KeeperException, IOException { @@ -77,32 +73,31 @@ public void start() throws KeeperException, IOException { serverManager.registerListener(new ServerListener() { @Override public void serverAdded(ServerName sn) { - if (drainingServers.contains(sn)){ + if (drainingServers.contains(sn)) { serverManager.addServerToDrainList(sn); } } }); List servers = - ZKUtil.listChildrenAndWatchThem(watcher, watcher.getZNodePaths().drainingZNode); + ZKUtil.listChildrenAndWatchThem(watcher, watcher.getZNodePaths().drainingZNode); add(servers); } private void add(final List servers) throws IOException { - synchronized(this.drainingServers) { + synchronized (this.drainingServers) { this.drainingServers.clear(); - for (String n: servers) { + for (String n : servers) { final ServerName sn = ServerName.valueOf(ZKUtil.getNodeName(n)); this.drainingServers.add(sn); this.serverManager.addServerToDrainList(sn); - LOG.info("Draining RS node created, adding to list [" + - sn + "]"); + LOG.info("Draining RS node created, adding to list [" + sn + "]"); } } } private void remove(final ServerName sn) { - synchronized(this.drainingServers) { + synchronized (this.drainingServers) { this.drainingServers.remove(sn); this.serverManager.removeServerFromDrainList(sn); } @@ -110,20 +105,19 @@ private void remove(final ServerName sn) { @Override public void nodeDeleted(final String path) { - if(path.startsWith(watcher.getZNodePaths().drainingZNode)) { + if (path.startsWith(watcher.getZNodePaths().drainingZNode)) { final ServerName sn = ServerName.valueOf(ZKUtil.getNodeName(path)); - LOG.info("Draining RS node deleted, removing from list [" + - sn + "]"); + LOG.info("Draining RS node deleted, removing from list [" + sn + "]"); remove(sn); } } @Override public void nodeChildrenChanged(final String path) { - if(path.equals(watcher.getZNodePaths().drainingZNode)) { + if (path.equals(watcher.getZNodePaths().drainingZNode)) { try { final List newNodes = - ZKUtil.listChildrenAndWatchThem(watcher, watcher.getZNodePaths().drainingZNode); + ZKUtil.listChildrenAndWatchThem(watcher, watcher.getZNodePaths().drainingZNode); add(newNodes); } catch (KeeperException e) { abortable.abort("Unexpected zk exception getting RS nodes", e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 01e4e5cd961e..b5a9a06a9581 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -262,6 +262,7 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext; import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; @@ -310,7 +311,7 @@ public class HMaster extends HBaseServerBase implements Maste private ClusterSchemaService clusterSchemaService; public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = - "hbase.master.wait.on.service.seconds"; + "hbase.master.wait.on.service.seconds"; public static final int DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = 5 * 60; public static final String HBASE_MASTER_CLEANER_INTERVAL = "hbase.master.cleaner.interval"; @@ -456,8 +457,8 @@ public class HMaster extends HBaseServerBase implements Maste * *

          * Remaining steps of initialization occur in - * {@link #finishActiveMasterInitialization(MonitoredTask)} after the master becomes the - * active one. + * {@link #finishActiveMasterInitialization(MonitoredTask)} after the master becomes the active + * one. */ public HMaster(final Configuration conf) throws IOException { super(conf, "Master"); @@ -472,7 +473,7 @@ public HMaster(final Configuration conf) throws IOException { maintenanceMode = false; } this.rsFatals = new MemoryBoundedLogMessageBuffer( - conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024)); + conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024)); LOG.info("hbase.rootdir={}, hbase.cluster.distributed={}", CommonFSUtils.getRootDir(this.conf), this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false)); @@ -482,7 +483,7 @@ public HMaster(final Configuration conf) throws IOException { decorateMasterConfiguration(this.conf); - // Hack! Maps DFSClient => Master for logs. HDFS made this + // Hack! Maps DFSClient => Master for logs. HDFS made this // config param for task trackers, but we can piggyback off of it. if (this.conf.get("mapreduce.task.attempt.id") == null) { this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString()); @@ -495,22 +496,22 @@ public HMaster(final Configuration conf) throws IOException { this.maxBalancingTime = getMaxBalancingTime(); this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT, - HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT); + HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT); // Do we publish the status? - boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED, - HConstants.STATUS_PUBLISHED_DEFAULT); + boolean shouldPublish = + conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT); Class publisherClass = conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS, - ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS, - ClusterStatusPublisher.Publisher.class); + ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS, + ClusterStatusPublisher.Publisher.class); if (shouldPublish) { if (publisherClass == null) { - LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " + - ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS + - " is not set - not publishing status"); + LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " + + ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS + + " is not set - not publishing status"); } else { clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass); LOG.debug("Created {}", this.clusterStatusPublisherChore); @@ -583,9 +584,9 @@ public void run() { // If on way out, then we are no longer active master. this.clusterSchemaService.stopAsync(); try { - this.clusterSchemaService.awaitTerminated( - getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, - DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); + this.clusterSchemaService + .awaitTerminated(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, + DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); } catch (TimeoutException te) { LOG.warn("Failed shutdown of clusterSchemaService", te); } @@ -599,8 +600,8 @@ private int putUpJettyServer() throws IOException { if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) { return -1; } - final int infoPort = conf.getInt("hbase.master.info.port.orig", - HConstants.DEFAULT_MASTER_INFOPORT); + final int infoPort = + conf.getInt("hbase.master.info.port.orig", HConstants.DEFAULT_MASTER_INFOPORT); // -1 is for disabling info server, so no redirecting if (infoPort < 0 || infoServer == null) { return -1; @@ -611,10 +612,9 @@ private int putUpJettyServer() throws IOException { } final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0"); if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) { - String msg = - "Failed to start redirecting jetty server. Address " + addr - + " does not belong to this host. Correct configuration parameter: " - + "hbase.master.info.bindAddress"; + String msg = "Failed to start redirecting jetty server. Address " + addr + + " does not belong to this host. Correct configuration parameter: " + + "hbase.master.info.bindAddress"; LOG.error(msg); throw new IOException(msg); } @@ -634,7 +634,8 @@ private int putUpJettyServer() throws IOException { StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead; final MasterRedirectServlet redirect = new MasterRedirectServlet(infoServer, redirectHostname); - final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS); + final WebAppContext context = + new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS); context.addServlet(new ServletHolder(redirect), "/*"); context.setServer(masterJettyServer); @@ -720,7 +721,7 @@ public MetricsMaster getMasterMetrics() { * should have already been initialized along with {@link ServerManager}. */ private void initializeZKBasedSystemTrackers() - throws IOException, KeeperException, ReplicationException { + throws IOException, KeeperException, ReplicationException { if (maintenanceMode) { // in maintenance mode, always use MaintenanceLoadBalancer. conf.unset(LoadBalancer.HBASE_RSGROUP_LOADBALANCER_CLASS); @@ -732,7 +733,7 @@ private void initializeZKBasedSystemTrackers() this.loadBalancerTracker.start(); this.regionNormalizerManager = - RegionNormalizerFactory.createNormalizerManager(conf, zooKeeper, this); + RegionNormalizerFactory.createNormalizerManager(conf, zooKeeper, this); this.configurationManager.registerObserver(regionNormalizerManager); this.regionNormalizerManager.start(); @@ -777,15 +778,14 @@ private void initializeZKBasedSystemTrackers() ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId()); } - // Set the cluster as up. If new RSs, they'll be waiting on this before + // Set the cluster as up. If new RSs, they'll be waiting on this before // going ahead with their startup. boolean wasUp = this.clusterStatusTracker.isClusterUp(); if (!wasUp) this.clusterStatusTracker.setClusterUp(); - LOG.info("Active/primary master=" + this.serverName + - ", sessionid=0x" + - Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) + - ", setting cluster-up flag (Was=" + wasUp + ")"); + LOG.info("Active/primary master=" + this.serverName + ", sessionid=0x" + + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) + + ", setting cluster-up flag (Was=" + wasUp + ")"); // create/initialize the snapshot manager and other procedure managers this.snapshotManager = new SnapshotManager(); @@ -799,14 +799,14 @@ private void initializeZKBasedSystemTrackers() // Will be overriden in test to inject customized AssignmentManager @InterfaceAudience.Private protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManager(master, masterRegion); } private void tryMigrateMetaLocationsFromZooKeeper() throws IOException, KeeperException { // try migrate data from zookeeper try (ResultScanner scanner = - masterRegion.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) { + masterRegion.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) { if (scanner.next() != null) { // notice that all replicas for a region are in the same row, so the migration can be // done with in a one row put, which means if we have data in catalog family then we can @@ -831,9 +831,9 @@ private void tryMigrateMetaLocationsFromZooKeeper() throws IOException, KeeperEx MetaTableAccessor.addLocation(put, state.getServerName(), HConstants.NO_SEQNUM, replicaId); } put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(RegionStateStore.getStateColumn(replicaId)).setTimestamp(put.getTimestamp()) - .setType(Cell.Type.Put).setValue(Bytes.toBytes(state.getState().name())).build()); + .setFamily(HConstants.CATALOG_FAMILY) + .setQualifier(RegionStateStore.getStateColumn(replicaId)).setTimestamp(put.getTimestamp()) + .setType(Cell.Type.Put).setValue(Bytes.toBytes(state.getState().name())).build()); } if (!put.isEmpty()) { LOG.info(info.toString()); @@ -881,8 +881,8 @@ private void tryMigrateMetaLocationsFromZooKeeper() throws IOException, KeeperEx * Notice that now we will not schedule a special procedure to make meta online(unless the first * time where meta has not been created yet), we will rely on SCP to bring meta online. */ - private void finishActiveMasterInitialization(MonitoredTask status) throws IOException, - InterruptedException, KeeperException, ReplicationException { + private void finishActiveMasterInitialization(MonitoredTask status) + throws IOException, InterruptedException, KeeperException, ReplicationException { /* * We are active master now... go initialize components we need to run. */ @@ -918,7 +918,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc Pair result = null; try { result = HBaseFsck.checkAndMarkRunningHbck(this.conf, - HBaseFsck.createLockRetryCounterFactory(this.conf).create()); + HBaseFsck.createLockRetryCounterFactory(this.conf).create()); } finally { if (result != null) { Closeables.close(result.getSecond(), true); @@ -940,14 +940,11 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc this.splitWALManager = new SplitWALManager(this); } - - tryMigrateMetaLocationsFromZooKeeper(); createProcedureExecutor(); - Map, List>> procsByType = - procedureExecutor.getActiveProceduresNoCopy().stream() - .collect(Collectors.groupingBy(p -> p.getClass())); + Map, List>> procsByType = procedureExecutor + .getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass())); // Create Assignment Manager this.assignmentManager = createAssignmentManager(this, masterRegion); @@ -956,9 +953,9 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc // completed, it could still be in the procedure list. This is a bit strange but is another // story, need to verify the implementation for ProcedureExecutor and ProcedureStore. List ritList = - procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream() - .filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p) - .collect(Collectors.toList()); + procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()) + .stream().filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p) + .collect(Collectors.toList()); this.assignmentManager.setupRIT(ritList); // Start RegionServerTracker with listing of servers found with exiting SCPs -- these should @@ -968,7 +965,8 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc // TODO: Generate the splitting and live Set in one pass instead of two as we currently do. this.regionServerTracker.upgrade( procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream() - .map(p -> (ServerCrashProcedure) p).map(p -> p.getServerName()).collect(Collectors.toSet()), + .map(p -> (ServerCrashProcedure) p).map(p -> p.getServerName()) + .collect(Collectors.toSet()), Sets.union(rsListStorage.getAll(), walManager.getLiveServersFromWALDir()), walManager.getSplittingServersFromWALDir()); // This manager must be accessed AFTER hbase:meta is confirmed on line.. @@ -980,8 +978,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc try { this.serverManager.loadLastFlushedSequenceIds(); } catch (IOException e) { - LOG.info("Failed to load last flushed sequence id of regions" - + " from file system", e); + LOG.info("Failed to load last flushed sequence id of regions" + " from file system", e); } // Set ourselves as active Master now our claim has succeeded up in zk. this.activeMaster = true; @@ -999,7 +996,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc } else { // start an in process region server for carrying system regions maintenanceRegionServer = - JVMClusterUtil.createRegionServerThread(getConfiguration(), HRegionServer.class, 0); + JVMClusterUtil.createRegionServerThread(getConfiguration(), HRegionServer.class, 0); maintenanceRegionServer.start(); } @@ -1009,7 +1006,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc // Print out state of hbase:meta on startup; helps debugging. if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) { Optional optProc = procedureExecutor.getProcedures().stream() - .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny(); + .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny(); initMetaProc = optProc.orElseGet(() -> { // schedule an init meta procedure if meta has not been deployed yet InitMetaProcedure temp = new InitMetaProcedure(); @@ -1057,12 +1054,11 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc return; } - TableDescriptor metaDescriptor = - tableDescriptors.get(TableName.META_TABLE_NAME); + TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME); final ColumnFamilyDescriptor tableFamilyDesc = - metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY); + metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY); final ColumnFamilyDescriptor replBarrierFamilyDesc = - metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY); + metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY); this.assignmentManager.joinCluster(); // The below depends on hbase:meta being online. @@ -1074,31 +1070,33 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc // first. if (conf.get(HConstants.META_REPLICAS_NUM) != null) { int replicasNumInConf = - conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); + conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME); if (metaDesc.getRegionReplication() != replicasNumInConf) { // it is possible that we already have some replicas before upgrading, so we must set the // region replication number in meta TableDescriptor directly first, without creating a // ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas. int existingReplicasCount = - assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); + assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); if (existingReplicasCount > metaDesc.getRegionReplication()) { - LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" + - " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); + LOG.info( + "Update replica count of hbase:meta from {}(in TableDescriptor)" + + " to {}(existing ZNodes)", + metaDesc.getRegionReplication(), existingReplicasCount); metaDesc = TableDescriptorBuilder.newBuilder(metaDesc) - .setRegionReplication(existingReplicasCount).build(); + .setRegionReplication(existingReplicasCount).build(); tableDescriptors.update(metaDesc); } // check again, and issue a ModifyTableProcedure if needed if (metaDesc.getRegionReplication() != replicasNumInConf) { LOG.info( - "The {} config is {} while the replica count in TableDescriptor is {}" + - " for hbase:meta, altering...", + "The {} config is {} while the replica count in TableDescriptor is {}" + + " for hbase:meta, altering...", HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication()); procedureExecutor.submitProcedure(new ModifyTableProcedure( - procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) - .setRegionReplication(replicasNumInConf).build(), - null, metaDesc, false)); + procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) + .setRegionReplication(replicasNumInConf).build(), + null, metaDesc, false)); } } } @@ -1139,7 +1137,8 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc if (e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException && tableFamilyDesc == null && replBarrierFamilyDesc == null) { LOG.info("ClusterSchema service could not be initialized. This is " - + "expected during HBase 1 to 2 upgrade", e); + + "expected during HBase 1 to 2 upgrade", + e); } else { throw e; } @@ -1155,7 +1154,7 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc status.markComplete("Initialization successful"); LOG.info(String.format("Master has completed initialization %.3fsec", - (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f)); + (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f)); this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime(); configurationManager.registerObserver(this.balancer); configurationManager.registerObserver(this.hfileCleanerPool); @@ -1237,29 +1236,26 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc zombieDetector.interrupt(); /* - * After master has started up, lets do balancer post startup initialization. Since this runs - * in activeMasterManager thread, it should be fine. + * After master has started up, lets do balancer post startup initialization. Since this runs in + * activeMasterManager thread, it should be fine. */ long start = EnvironmentEdgeManager.currentTime(); this.balancer.postMasterStartupInitialize(); if (LOG.isDebugEnabled()) { - LOG.debug("Balancer post startup initialization complete, took " + ( - (EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds"); + LOG.debug("Balancer post startup initialization complete, took " + + ((EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds"); } this.rollingUpgradeChore = new RollingUpgradeChore(this); getChoreService().scheduleChore(rollingUpgradeChore); } - private void createMissingCFsInMetaDuringUpgrade( - TableDescriptor metaDescriptor) throws IOException { - TableDescriptor newMetaDesc = - TableDescriptorBuilder.newBuilder(metaDescriptor) - .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf)) - .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()) - .build(); - long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, - 0, 0, false); + private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor) + throws IOException { + TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor) + .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf)) + .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build(); + long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false); int tries = 30; while (!(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning() && tries > 0) { @@ -1276,9 +1272,8 @@ && getMasterProcedureExecutor().isRunning() && tries > 0) { } else { Procedure result = getMasterProcedureExecutor().getResult(pid); if (result != null && result.isFailed()) { - throw new IOException( - "Failed to add table and rep_barrier CFs to meta. " - + MasterProcedureUtil.unwrapRemoteIOException(result)); + throw new IOException("Failed to add table and rep_barrier CFs to meta. " + + MasterProcedureUtil.unwrapRemoteIOException(result)); } } } @@ -1286,7 +1281,7 @@ && getMasterProcedureExecutor().isRunning() && tries > 0) { /** * Check hbase:meta is up and ready for reading. For use during Master startup only. * @return True if meta is UP and online and startup can progress. Otherwise, meta is not online - * and we will hold here until operator intervention. + * and we will hold here until operator intervention. */ @InterfaceAudience.Private public boolean waitForMetaOnline() { @@ -1294,8 +1289,8 @@ public boolean waitForMetaOnline() { } /** - * @return True if region is online and scannable else false if an error or shutdown (Otherwise - * we just block in here holding up all forward-progess). + * @return True if region is online and scannable else false if an error or shutdown (Otherwise we + * just block in here holding up all forward-progess). */ private boolean isRegionOnline(RegionInfo ri) { RetryCounter rc = null; @@ -1307,14 +1302,15 @@ private boolean isRegionOnline(RegionInfo ri) { } } // Region is not OPEN. - Optional> optProc = this.procedureExecutor.getProcedures(). - stream().filter(p -> p instanceof ServerCrashProcedure).findAny(); + Optional> optProc = this.procedureExecutor.getProcedures() + .stream().filter(p -> p instanceof ServerCrashProcedure).findAny(); // TODO: Add a page to refguide on how to do repair. Have this log message point to it. // Page will talk about loss of edits, how to schedule at least the meta WAL recovery, and // then how to assign including how to break region lock if one held. - LOG.warn("{} is NOT online; state={}; ServerCrashProcedures={}. Master startup cannot " + - "progress, in holding-pattern until region onlined.", - ri.getRegionNameAsString(), rs, optProc.isPresent()); + LOG.warn( + "{} is NOT online; state={}; ServerCrashProcedures={}. Master startup cannot " + + "progress, in holding-pattern until region onlined.", + ri.getRegionNameAsString(), rs, optProc.isPresent()); // Check once-a-minute. if (rc == null) { rc = new RetryCounterFactory(Integer.MAX_VALUE, 1000, 60_000).create(); @@ -1334,14 +1330,14 @@ private boolean isRegionOnline(RegionInfo ri) { */ private boolean waitForNamespaceOnline() throws IOException { TableState nsTableState = - MetaTableAccessor.getTableState(getConnection(), TableName.NAMESPACE_TABLE_NAME); + MetaTableAccessor.getTableState(getConnection(), TableName.NAMESPACE_TABLE_NAME); if (nsTableState == null || nsTableState.isDisabled()) { // this means we have already migrated the data and disabled or deleted the namespace table, // or this is a new deploy which does not have a namespace table from the beginning. return true; } List ris = - this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME); + this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME); if (ris.isEmpty()) { // maybe this will not happen any more, but anyway, no harm to add a check here... return true; @@ -1362,9 +1358,8 @@ private boolean waitForNamespaceOnline() throws IOException { @InterfaceAudience.Private public void updateConfigurationForQuotasObserver(Configuration conf) { // We're configured to not delete quotas on table deletion, so we don't need to add the obs. - if (!conf.getBoolean( - MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE, - MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT)) { + if (!conf.getBoolean(MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE, + MasterQuotasObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT)) { return; } String[] masterCoprocs = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); @@ -1393,8 +1388,8 @@ private void initMobCleaner() { *

          */ @InterfaceAudience.Private - protected ServerManager createServerManager(MasterServices master, - RegionServerList storage) throws IOException { + protected ServerManager createServerManager(MasterServices master, RegionServerList storage) + throws IOException { // We put this out here in a method so can do a Mockito.spy and stub it out // w/ a mocked up ServerManager. setupClusterConnection(); @@ -1412,9 +1407,9 @@ protected void initClusterSchemaService() throws IOException, InterruptedExcepti this.clusterSchemaService = new ClusterSchemaServiceImpl(this); this.clusterSchemaService.startAsync(); try { - this.clusterSchemaService.awaitRunning(getConfiguration().getInt( - HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, - DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); + this.clusterSchemaService + .awaitRunning(getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS, + DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS); } catch (TimeoutException toe) { throw new IOException("Timedout starting ClusterSchemaService", toe); } @@ -1476,53 +1471,56 @@ public TableStateManager getTableStateManager() { } /* - * Start up all services. If any of these threads gets an unhandled exception - * then they just die with a logged message. This should be fine because - * in general, we do not expect the master to get such unhandled exceptions - * as OOMEs; it should be lightly loaded. See what HRegionServer does if - * need to install an unexpected exception handler. + * Start up all services. If any of these threads gets an unhandled exception then they just die + * with a logged message. This should be fine because in general, we do not expect the master to + * get such unhandled exceptions as OOMEs; it should be lightly loaded. See what HRegionServer + * does if need to install an unexpected exception handler. */ private void startServiceThreads() throws IOException { // Start the executor service pools - final int masterOpenRegionPoolSize = conf.getInt( - HConstants.MASTER_OPEN_REGION_THREADS, HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize)); - final int masterCloseRegionPoolSize = conf.getInt( - HConstants.MASTER_CLOSE_REGION_THREADS, HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_CLOSE_REGION).setCorePoolSize(masterCloseRegionPoolSize)); + final int masterOpenRegionPoolSize = conf.getInt(HConstants.MASTER_OPEN_REGION_THREADS, + HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_OPEN_REGION) + .setCorePoolSize(masterOpenRegionPoolSize)); + final int masterCloseRegionPoolSize = conf.getInt(HConstants.MASTER_CLOSE_REGION_THREADS, + HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_CLOSE_REGION) + .setCorePoolSize(masterCloseRegionPoolSize)); final int masterServerOpThreads = conf.getInt(HConstants.MASTER_SERVER_OPERATIONS_THREADS, - HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_SERVER_OPERATIONS).setCorePoolSize(masterServerOpThreads)); - final int masterServerMetaOpsThreads = conf.getInt( - HConstants.MASTER_META_SERVER_OPERATIONS_THREADS, - HConstants.MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_META_SERVER_OPERATIONS).setCorePoolSize(masterServerMetaOpsThreads)); - final int masterLogReplayThreads = conf.getInt( - HConstants.MASTER_LOG_REPLAY_OPS_THREADS, HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads)); - final int masterSnapshotThreads = conf.getInt( - SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_SNAPSHOT_OPERATIONS).setCorePoolSize(masterSnapshotThreads) - .setAllowCoreThreadTimeout(true)); + HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS) + .setCorePoolSize(masterServerOpThreads)); + final int masterServerMetaOpsThreads = + conf.getInt(HConstants.MASTER_META_SERVER_OPERATIONS_THREADS, + HConstants.MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.MASTER_META_SERVER_OPERATIONS) + .setCorePoolSize(masterServerMetaOpsThreads)); + final int masterLogReplayThreads = conf.getInt(HConstants.MASTER_LOG_REPLAY_OPS_THREADS, + HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads)); + final int masterSnapshotThreads = conf.getInt(SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, + SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SNAPSHOT_OPERATIONS) + .setCorePoolSize(masterSnapshotThreads).setAllowCoreThreadTimeout(true)); final int masterMergeDispatchThreads = conf.getInt(HConstants.MASTER_MERGE_DISPATCH_THREADS, - HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_MERGE_OPERATIONS).setCorePoolSize(masterMergeDispatchThreads) - .setAllowCoreThreadTimeout(true)); + HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT); + executorService.startExecutorService( + executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_MERGE_OPERATIONS) + .setCorePoolSize(masterMergeDispatchThreads).setAllowCoreThreadTimeout(true)); // We depend on there being only one instance of this executor running // at a time. To do concurrency, would need fencing of enable/disable of // tables. // Any time changing this maxThreads to > 1, pls see the comment at // AccessController#postCompletedCreateTableAction - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1)); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1)); startProcedureExecutor(); // Create log cleaner thread pool @@ -1531,10 +1529,10 @@ private void startServiceThreads() throws IOException { params.put(MASTER, this); // Start log cleaner thread int cleanerInterval = - conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL); - this.logCleaner = new LogCleaner(cleanerInterval, this, conf, - getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), - logCleanerPool, params); + conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL); + this.logCleaner = + new LogCleaner(cleanerInterval, this, conf, getMasterWalManager().getFileSystem(), + getMasterWalManager().getOldLogDir(), logCleanerPool, params); getChoreService().scheduleChore(logCleaner); // start the hfile archive cleaner thread @@ -1542,31 +1540,30 @@ private void startServiceThreads() throws IOException { // Create archive cleaner thread pool hfileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf); this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, - getMasterFileSystem().getFileSystem(), archiveDir, hfileCleanerPool, params); + getMasterFileSystem().getFileSystem(), archiveDir, hfileCleanerPool, params); getChoreService().scheduleChore(hfileCleaner); // Regions Reopen based on very high storeFileRefCount is considered enabled // only if hbase.regions.recovery.store.file.ref.count has value > 0 - final int maxStoreFileRefCount = conf.getInt( - HConstants.STORE_FILE_REF_COUNT_THRESHOLD, + final int maxStoreFileRefCount = conf.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD, HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD); if (maxStoreFileRefCount > 0) { this.regionsRecoveryChore = new RegionsRecoveryChore(this, conf, this); getChoreService().scheduleChore(this.regionsRecoveryChore); } else { - LOG.info("Reopening regions with very high storeFileRefCount is disabled. " + - "Provide threshold value > 0 for {} to enable it.", + LOG.info( + "Reopening regions with very high storeFileRefCount is disabled. " + + "Provide threshold value > 0 for {} to enable it.", HConstants.STORE_FILE_REF_COUNT_THRESHOLD); } this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this); - replicationBarrierCleaner = new ReplicationBarrierCleaner(conf, this, getConnection(), - replicationPeerManager); + replicationBarrierCleaner = + new ReplicationBarrierCleaner(conf, this, getConnection(), replicationPeerManager); getChoreService().scheduleChore(replicationBarrierCleaner); - final boolean isSnapshotChoreEnabled = this.snapshotCleanupTracker - .isSnapshotCleanupEnabled(); + final boolean isSnapshotChoreEnabled = this.snapshotCleanupTracker.isSnapshotCleanupEnabled(); this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager()); if (isSnapshotChoreEnabled) { getChoreService().scheduleChore(this.snapshotCleanerChore); @@ -1646,8 +1643,8 @@ protected void stopServiceThreads() { private void createProcedureExecutor() throws IOException { MasterProcedureEnv procEnv = new MasterProcedureEnv(this); - procedureStore = - new RegionProcedureStore(this, masterRegion, new MasterProcedureEnv.FsUtilsLeaseRecovery(this)); + procedureStore = new RegionProcedureStore(this, masterRegion, + new MasterProcedureEnv.FsUtilsLeaseRecovery(this)); procedureStore.registerListener(new ProcedureStoreListener() { @Override @@ -1663,8 +1660,8 @@ public void abortProcess() { final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, Math.max( (cpus > 0 ? cpus / 4 : 0), MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS)); final boolean abortOnCorruption = - conf.getBoolean(MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION, - MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION); + conf.getBoolean(MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION, + MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION); procedureStore.start(numThreads); // Just initialize it but do not start the workers, we will start the workers later by calling // startProcedureExecutor. See the javadoc for finishActiveMasterInitialization for more @@ -1682,7 +1679,6 @@ protected void startProcedureExecutor() throws IOException { /** * Turn on/off Snapshot Cleanup Chore - * * @param on indicates whether Snapshot Cleanup Chore is to be run */ void switchSnapshotCleanup(final boolean on, final boolean synchronous) { @@ -1708,7 +1704,6 @@ private void switchSnapshotCleanup(final boolean on) { } } - private void stopProcedureExecutor() { if (procedureExecutor != null) { configurationManager.deregisterObserver(procedureExecutor.getEnvironment()); @@ -1747,8 +1742,8 @@ protected void stopChores() { /** * @return Get remote side's InetAddress */ - InetAddress getRemoteInetAddress(final int port, - final long serverStartCode) throws UnknownHostException { + InetAddress getRemoteInetAddress(final int port, final long serverStartCode) + throws UnknownHostException { // Do it out here in its own little method so can fake an address when // mocking up in tests. InetAddress ia = RpcServer.getRemoteIp(); @@ -1769,9 +1764,9 @@ InetAddress getRemoteInetAddress(final int port, */ private int getMaxBalancingTime() { // if max balancing time isn't set, defaulting it to period time - int maxBalancingTime = getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, - getConfiguration() - .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD)); + int maxBalancingTime = + getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, getConfiguration() + .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD)); return maxBalancingTime; } @@ -1784,8 +1779,8 @@ private int getMaxRegionsInTransition() { } /** - * It first sleep to the next balance plan start time. Meanwhile, throttling by the max - * number regions in transition to protect availability. + * It first sleep to the next balance plan start time. Meanwhile, throttling by the max number + * regions in transition to protect availability. * @param nextBalanceStartTime The next balance plan start time * @param maxRegionsInTransition max number of regions in transition * @param cutoffTime when to exit balancer @@ -1806,10 +1801,10 @@ private void balanceThrottling(long nextBalanceStartTime, int maxRegionsInTransi } // Throttling by max number regions in transition - while (!interrupted - && maxRegionsInTransition > 0 - && this.assignmentManager.getRegionStates().getRegionsInTransitionCount() - >= maxRegionsInTransition && EnvironmentEdgeManager.currentTime() <= cutoffTime) { + while (!interrupted && maxRegionsInTransition > 0 + && this.assignmentManager.getRegionStates() + .getRegionsInTransitionCount() >= maxRegionsInTransition + && EnvironmentEdgeManager.currentTime() <= cutoffTime) { try { // sleep if the number of regions in transition exceeds the limit Thread.sleep(100); @@ -1826,20 +1821,19 @@ public BalanceResponse balance() throws IOException { } /** - * Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed - * this time, the metrics related to the balance will be updated. - * - * When balance is running, related metrics will be updated at the same time. But if some - * checking logic failed and cause the balancer exit early, we lost the chance to update - * balancer metrics. This will lead to user missing the latest balancer info. - * */ - public BalanceResponse balanceOrUpdateMetrics() throws IOException{ + * Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed this + * time, the metrics related to the balance will be updated. When balance is running, related + * metrics will be updated at the same time. But if some checking logic failed and cause the + * balancer exit early, we lost the chance to update balancer metrics. This will lead to user + * missing the latest balancer info. + */ + public BalanceResponse balanceOrUpdateMetrics() throws IOException { synchronized (this.balancer) { BalanceResponse response = balance(); if (!response.isBalancerRan()) { Map>> assignments = - this.assignmentManager.getRegionStates().getAssignmentsForBalancer(this.tableStateManager, - this.serverManager.getOnlineServersList()); + this.assignmentManager.getRegionStates().getAssignmentsForBalancer( + this.tableStateManager, this.serverManager.getOnlineServersList()); for (Map> serverMap : assignments.values()) { serverMap.keySet().removeAll(this.serverManager.getDrainingServersList()); } @@ -1878,7 +1872,7 @@ public BalanceResponse balance(BalanceRequest request) throws IOException { BalanceResponse.Builder responseBuilder = BalanceResponse.newBuilder(); if (loadBalancerTracker == null - || !(loadBalancerTracker.isBalancerOn() || request.isDryRun())) { + || !(loadBalancerTracker.isBalancerOn() || request.isDryRun())) { return responseBuilder.build(); } @@ -1887,7 +1881,7 @@ public BalanceResponse balance(BalanceRequest request) throws IOException { } synchronized (this.balancer) { - // Only allow one balance run at at time. + // Only allow one balance run at at time. if (this.assignmentManager.hasRegionsInTransition()) { List regionsInTransition = assignmentManager.getRegionsInTransition(); // if hbase:meta region is in transition, result of assignment cannot be recorded @@ -1902,15 +1896,15 @@ public BalanceResponse balance(BalanceRequest request) throws IOException { } if (!request.isIgnoreRegionsInTransition() || metaInTransition) { - LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition + - ") because " + regionsInTransition.size() + " region(s) in transition: " + toPrint - + (truncated? "(truncated list)": "")); + LOG.info("Not running balancer (ignoreRIT=false" + ", metaRIT=" + metaInTransition + + ") because " + regionsInTransition.size() + " region(s) in transition: " + toPrint + + (truncated ? "(truncated list)" : "")); return responseBuilder.build(); } } if (this.serverManager.areDeadServersInProgress()) { - LOG.info("Not running balancer because processing dead regionserver(s): " + - this.serverManager.getDeadServers()); + LOG.info("Not running balancer because processing dead regionserver(s): " + + this.serverManager.getDeadServers()); return responseBuilder.build(); } @@ -1927,13 +1921,13 @@ public BalanceResponse balance(BalanceRequest request) throws IOException { } Map>> assignments = - this.assignmentManager.getRegionStates() - .getAssignmentsForBalancer(tableStateManager, this.serverManager.getOnlineServersList()); + this.assignmentManager.getRegionStates().getAssignmentsForBalancer(tableStateManager, + this.serverManager.getOnlineServersList()); for (Map> serverMap : assignments.values()) { serverMap.keySet().removeAll(this.serverManager.getDrainingServersList()); } - //Give the balancer the current cluster state. + // Give the balancer the current cluster state. this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor()); List plans = this.balancer.balanceCluster(assignments); @@ -1947,9 +1941,8 @@ public BalanceResponse balance(BalanceRequest request) throws IOException { // For dry run we don't actually want to execute the moves, but we do want // to execute the coprocessor below - List sucRPs = request.isDryRun() - ? Collections.emptyList() - : executeRegionPlansWithThrottling(plans); + List sucRPs = + request.isDryRun() ? Collections.emptyList() : executeRegionPlansWithThrottling(plans); if (this.cpHost != null) { try { @@ -1978,24 +1971,24 @@ public List executeRegionPlansWithThrottling(List plans) int maxRegionsInTransition = getMaxRegionsInTransition(); long balanceStartTime = EnvironmentEdgeManager.currentTime(); long cutoffTime = balanceStartTime + this.maxBalancingTime; - int rpCount = 0; // number of RegionPlans balanced so far + int rpCount = 0; // number of RegionPlans balanced so far if (plans != null && !plans.isEmpty()) { int balanceInterval = this.maxBalancingTime / plans.size(); - LOG.info("Balancer plans size is " + plans.size() + ", the balance interval is " - + balanceInterval + " ms, and the max number regions in transition is " - + maxRegionsInTransition); + LOG.info( + "Balancer plans size is " + plans.size() + ", the balance interval is " + balanceInterval + + " ms, and the max number regions in transition is " + maxRegionsInTransition); - for (RegionPlan plan: plans) { + for (RegionPlan plan : plans) { LOG.info("balance " + plan); - //TODO: bulk assign + // TODO: bulk assign try { this.assignmentManager.balance(plan); } catch (HBaseIOException hioe) { - //should ignore failed plans here, avoiding the whole balance plans be aborted - //later calls of balance() can fetch up the failed and skipped plans + // should ignore failed plans here, avoiding the whole balance plans be aborted + // later calls of balance() can fetch up the failed and skipped plans LOG.warn("Failed balance plan {}, skipping...", plan, hioe); } - //rpCount records balance plans processed, does not care if a plan succeeds + // rpCount records balance plans processed, does not care if a plan succeeds rpCount++; successRegionPlans.add(plan); @@ -2006,17 +1999,17 @@ public List executeRegionPlansWithThrottling(List plans) // if performing next balance exceeds cutoff time, exit the loop if (this.maxBalancingTime > 0 && rpCount < plans.size() - && EnvironmentEdgeManager.currentTime() > cutoffTime) { + && EnvironmentEdgeManager.currentTime() > cutoffTime) { // TODO: After balance, there should not be a cutoff time (keeping it as // a security net for now) - LOG.debug("No more balancing till next balance run; maxBalanceTime=" - + this.maxBalancingTime); + LOG.debug( + "No more balancing till next balance run; maxBalanceTime=" + this.maxBalancingTime); break; } } } LOG.debug("Balancer is going into sleep until next period in {}ms", getConfiguration() - .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD)); + .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD)); return successRegionPlans; } @@ -2026,10 +2019,8 @@ public RegionNormalizerManager getRegionNormalizerManager() { } @Override - public boolean normalizeRegions( - final NormalizeTableFilterParams ntfp, - final boolean isHighPriority - ) throws IOException { + public boolean normalizeRegions(final NormalizeTableFilterParams ntfp, + final boolean isHighPriority) throws IOException { if (regionNormalizerManager == null || !regionNormalizerManager.isNormalizerOn()) { LOG.debug("Region normalization is disabled, don't run region normalizer."); return false; @@ -2042,14 +2033,12 @@ public boolean normalizeRegions( } final Set matchingTables = getTableDescriptors(new LinkedList<>(), - ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false) - .stream() - .map(TableDescriptor::getTableName) - .collect(Collectors.toSet()); + ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false).stream() + .map(TableDescriptor::getTableName).collect(Collectors.toSet()); final Set allEnabledTables = - tableStateManager.getTablesInStates(TableState.State.ENABLED); + tableStateManager.getTablesInStates(TableState.State.ENABLED); final List targetTables = - new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables)); + new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables)); Collections.shuffle(targetTables); return regionNormalizerManager.normalizeRegions(targetTables, isHighPriority); } @@ -2059,14 +2048,13 @@ public boolean normalizeRegions( */ @Override public String getClientIdAuditPrefix() { - return "Client=" + RpcServer.getRequestUserName().orElse(null) - + "/" + RpcServer.getRemoteAddress().orElse(null); + return "Client=" + RpcServer.getRequestUserName().orElse(null) + "/" + + RpcServer.getRemoteAddress().orElse(null); } /** - * Switch for the background CatalogJanitor thread. - * Used for testing. The thread will continue to run. It will just be a noop - * if disabled. + * Switch for the background CatalogJanitor thread. Used for testing. The thread will continue to + * run. It will just be a noop if disabled. * @param b If false, the catalog janitor won't do anything. */ public void setCatalogJanitorEnabled(final boolean b) { @@ -2074,22 +2062,19 @@ public void setCatalogJanitorEnabled(final boolean b) { } @Override - public long mergeRegions( - final RegionInfo[] regionsToMerge, - final boolean forcible, - final long ng, + public long mergeRegions(final RegionInfo[] regionsToMerge, final boolean forcible, final long ng, final long nonce) throws IOException { checkInitialized(); if (!isSplitOrMergeEnabled(MasterSwitchType.MERGE)) { String regionsStr = Arrays.deepToString(regionsToMerge); LOG.warn("Merge switch is off! skip merge of " + regionsStr); - throw new DoNotRetryIOException("Merge of " + regionsStr + - " failed because merge switch is off"); + throw new DoNotRetryIOException( + "Merge of " + regionsStr + " failed because merge switch is off"); } final String mergeRegionsStr = Arrays.stream(regionsToMerge).map(RegionInfo::getEncodedName) - .collect(Collectors.joining(", ")); + .collect(Collectors.joining(", ")); return MasterProcedureUtil.submitProcedure(new NonceProcedureRunnable(this, ng, nonce) { @Override protected void run() throws IOException { @@ -2109,38 +2094,38 @@ protected String getDescription() { } @Override - public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, - final long nonceGroup, final long nonce) - throws IOException { + public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long nonceGroup, + final long nonce) throws IOException { checkInitialized(); if (!isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) { LOG.warn("Split switch is off! skip split of " + regionInfo); - throw new DoNotRetryIOException("Split region " + regionInfo.getRegionNameAsString() + - " failed due to split switch off"); + throw new DoNotRetryIOException( + "Split region " + regionInfo.getRegionNameAsString() + " failed due to split switch off"); } - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); - LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); + LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); - // Execute the operation asynchronously - submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow)); - } + // Execute the operation asynchronously + submitProcedure(getAssignmentManager().createSplitProcedure(regionInfo, splitRow)); + } - @Override - protected String getDescription() { - return "SplitTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "SplitTableProcedure"; + } + }); } private void warmUpRegion(ServerName server, RegionInfo region) { FutureUtils.addListener(asyncClusterConnection.getRegionServerAdmin(server) - .warmupRegion(RequestConverter.buildWarmupRegionRequest(region)), (r, e) -> { + .warmupRegion(RequestConverter.buildWarmupRegionRequest(region)), + (r, e) -> { if (e != null) { LOG.warn("Failed to warm up region {} on server {}", region, server, e); } @@ -2152,8 +2137,8 @@ private void warmUpRegion(ServerName server, RegionInfo region) { // a success/failure result. @InterfaceAudience.Private public void move(final byte[] encodedRegionName, byte[] destServerName) throws IOException { - RegionState regionState = assignmentManager.getRegionStates(). - getRegionState(Bytes.toString(encodedRegionName)); + RegionState regionState = + assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName)); RegionInfo hri; if (regionState != null) { @@ -2163,17 +2148,17 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I } ServerName dest; - List exclude = hri.getTable().isSystemTable() ? assignmentManager.getExcludedServersForSystemTable() - : new ArrayList<>(1); - if (destServerName != null && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName)))) { - LOG.info( - Bytes.toString(encodedRegionName) + " can not move to " + Bytes.toString(destServerName) - + " because the server is in exclude list"); + List exclude = + hri.getTable().isSystemTable() ? assignmentManager.getExcludedServersForSystemTable() + : new ArrayList<>(1); + if (destServerName != null + && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName)))) { + LOG.info(Bytes.toString(encodedRegionName) + " can not move to " + + Bytes.toString(destServerName) + " because the server is in exclude list"); destServerName = null; } if (destServerName == null || destServerName.length == 0) { - LOG.info("Passed destination servername is null/empty so " + - "choosing a server at random"); + LOG.info("Passed destination servername is null/empty so " + "choosing a server at random"); exclude.add(regionState.getServerName()); final List destServers = this.serverManager.createDestinationServersList(exclude); dest = balancer.randomAssignment(hri, destServers); @@ -2192,22 +2177,22 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I if (dest.equals(serverName)) { // To avoid unnecessary region moving later by balancer. Don't put user // regions on master. - LOG.debug("Skipping move of region " + hri.getRegionNameAsString() + - " to avoid unnecessary region moving later by load balancer," + - " because it should not be on master"); + LOG.debug("Skipping move of region " + hri.getRegionNameAsString() + + " to avoid unnecessary region moving later by load balancer," + + " because it should not be on master"); return; } } if (dest.equals(regionState.getServerName())) { LOG.debug("Skipping move of region " + hri.getRegionNameAsString() - + " because region already assigned to the same server " + dest + "."); + + " because region already assigned to the same server " + dest + "."); return; } // Now we can do the move RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest); - assert rp.getDestination() != null: rp.toString() + " " + dest; + assert rp.getDestination() != null : rp.toString() + " " + dest; try { checkInitialized(); @@ -2216,13 +2201,13 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I } TransitRegionStateProcedure proc = - this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination()); + this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination()); if (conf.getBoolean(WARMUP_BEFORE_MOVE, DEFAULT_WARMUP_BEFORE_MOVE)) { // Warmup the region on the destination before initiating the move. // A region server could reject the close request because it either does not // have the specified region or the region is being split. - LOG.info(getClientIdAuditPrefix() + " move " + rp + ", warming up region on " + - rp.getDestination()); + LOG.info(getClientIdAuditPrefix() + " move " + rp + ", warming up region on " + + rp.getDestination()); warmUpRegion(rp.getDestination(), hri); } LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer"); @@ -2239,7 +2224,7 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I } } catch (IOException ioe) { if (ioe instanceof HBaseIOException) { - throw (HBaseIOException)ioe; + throw (HBaseIOException) ioe; } throw new HBaseIOException(ioe); } @@ -2260,31 +2245,31 @@ public long createTable(final TableDescriptor tableDescriptor, final byte[][] sp TableDescriptorChecker.sanityCheck(conf, desc); return MasterProcedureUtil - .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preCreateTable(desc, newRegions); - - LOG.info(getClientIdAuditPrefix() + " create " + desc); - - // TODO: We can handle/merge duplicate requests, and differentiate the case of - // TableExistsException by saying if the schema is the same or not. - // - // We need to wait for the procedure to potentially fail due to "prepare" sanity - // checks. This will block only the beginning of the procedure. See HBASE-19953. - ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); - submitProcedure( - new CreateTableProcedure(procedureExecutor.getEnvironment(), desc, newRegions, latch)); - latch.await(); - - getMaster().getMasterCoprocessorHost().postCreateTable(desc, newRegions); - } + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preCreateTable(desc, newRegions); - @Override - protected String getDescription() { - return "CreateTableProcedure"; - } - }); + LOG.info(getClientIdAuditPrefix() + " create " + desc); + + // TODO: We can handle/merge duplicate requests, and differentiate the case of + // TableExistsException by saying if the schema is the same or not. + // + // We need to wait for the procedure to potentially fail due to "prepare" sanity + // checks. This will block only the beginning of the procedure. See HBASE-19953. + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); + submitProcedure(new CreateTableProcedure(procedureExecutor.getEnvironment(), desc, + newRegions, latch)); + latch.await(); + + getMaster().getMasterCoprocessorHost().postCreateTable(desc, newRegions); + } + + @Override + protected String getDescription() { + return "CreateTableProcedure"; + } + }); } @Override @@ -2296,14 +2281,14 @@ public long createSystemTable(final TableDescriptor tableDescriptor) throws IOEx TableName tableName = tableDescriptor.getTableName(); if (!(tableName.isSystemTable())) { throw new IllegalArgumentException( - "Only system table creation can use this createSystemTable API"); + "Only system table creation can use this createSystemTable API"); } RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(tableDescriptor, null); LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor); - // This special create table is called locally to master. Therefore, no RPC means no need + // This special create table is called locally to master. Therefore, no RPC means no need // to use nonce to detect duplicated RPC call. long procId = this.procedureExecutor.submitProcedure( new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions)); @@ -2312,18 +2297,15 @@ public long createSystemTable(final TableDescriptor tableDescriptor) throws IOEx } private void startActiveMasterManager(int infoPort) throws KeeperException { - String backupZNode = ZNodePaths.joinZNode( - zooKeeper.getZNodePaths().backupMasterAddressesZNode, serverName.toString()); + String backupZNode = ZNodePaths.joinZNode(zooKeeper.getZNodePaths().backupMasterAddressesZNode, + serverName.toString()); /* - * Add a ZNode for ourselves in the backup master directory since we - * may not become the active master. If so, we want the actual active - * master to know we are backup masters, so that it won't assign - * regions to us if so configured. - * - * If we become the active master later, ActiveMasterManager will delete - * this node explicitly. If we crash before then, ZooKeeper will delete - * this node for us since it is ephemeral. - */ + * Add a ZNode for ourselves in the backup master directory since we may not become the active + * master. If so, we want the actual active master to know we are backup masters, so that it + * won't assign regions to us if so configured. If we become the active master later, + * ActiveMasterManager will delete this node explicitly. If we crash before then, ZooKeeper will + * delete this node for us since it is ephemeral. + */ LOG.info("Adding backup master ZNode " + backupZNode); if (!MasterAddressTracker.setMasterAddress(zooKeeper, backupZNode, serverName, infoPort)) { LOG.warn("Failed create of " + backupZNode + " by " + serverName); @@ -2350,12 +2332,13 @@ private void startActiveMasterManager(int infoPort) throws KeeperException { status.setStatus("Failed to become active: " + t.getMessage()); LOG.error(HBaseMarkers.FATAL, "Failed to become active master", t); // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility - if (t instanceof NoClassDefFoundError && t.getMessage(). - contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction")) { + if (t instanceof NoClassDefFoundError && t.getMessage() + .contains("org/apache/hadoop/hdfs/protocol/HdfsConstants$SafeModeAction")) { // improved error message for this special case - abort("HBase is having a problem with its Hadoop jars. You may need to recompile " + - "HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion() + - " or change your hadoop jars to start properly", t); + abort("HBase is having a problem with its Hadoop jars. You may need to recompile " + + "HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion() + + " or change your hadoop jars to start properly", + t); } else { abort("Unhandled exception. Starting shutdown.", t); } @@ -2369,67 +2352,62 @@ private static boolean isCatalogTable(final TableName tableName) { } @Override - public long deleteTable( - final TableName tableName, - final long nonceGroup, - final long nonce) throws IOException { + public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce) + throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preDeleteTable(tableName); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preDeleteTable(tableName); - LOG.info(getClientIdAuditPrefix() + " delete " + tableName); + LOG.info(getClientIdAuditPrefix() + " delete " + tableName); - // TODO: We can handle/merge duplicate request - // - // We need to wait for the procedure to potentially fail due to "prepare" sanity - // checks. This will block only the beginning of the procedure. See HBASE-19953. - ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); - submitProcedure(new DeleteTableProcedure(procedureExecutor.getEnvironment(), - tableName, latch)); - latch.await(); + // TODO: We can handle/merge duplicate request + // + // We need to wait for the procedure to potentially fail due to "prepare" sanity + // checks. This will block only the beginning of the procedure. See HBASE-19953. + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); + submitProcedure( + new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch)); + latch.await(); - getMaster().getMasterCoprocessorHost().postDeleteTable(tableName); - } + getMaster().getMasterCoprocessorHost().postDeleteTable(tableName); + } - @Override - protected String getDescription() { - return "DeleteTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "DeleteTableProcedure"; + } + }); } @Override - public long truncateTable( - final TableName tableName, - final boolean preserveSplits, - final long nonceGroup, - final long nonce) throws IOException { + public long truncateTable(final TableName tableName, final boolean preserveSplits, + final long nonceGroup, final long nonce) throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preTruncateTable(tableName); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preTruncateTable(tableName); - LOG.info(getClientIdAuditPrefix() + " truncate " + tableName); - ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); - submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(), - tableName, preserveSplits, latch)); - latch.await(); + LOG.info(getClientIdAuditPrefix() + " truncate " + tableName); + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); + submitProcedure(new TruncateTableProcedure(procedureExecutor.getEnvironment(), + tableName, preserveSplits, latch)); + latch.await(); - getMaster().getMasterCoprocessorHost().postTruncateTable(tableName); - } + getMaster().getMasterCoprocessorHost().postTruncateTable(tableName); + } - @Override - protected String getDescription() { - return "TruncateTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "TruncateTableProcedure"; + } + }); } @Override @@ -2480,31 +2458,30 @@ public TableDescriptor get() throws IOException { }, nonceGroup, nonce, true); } - @Override public long modifyColumnStoreFileTracker(TableName tableName, byte[] family, String dstSFT, - long nonceGroup, long nonce) throws IOException { + long nonceGroup, long nonce) throws IOException { checkInitialized(); return MasterProcedureUtil - .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - - @Override - protected void run() throws IOException { - String sft = getMaster().getMasterCoprocessorHost() - .preModifyColumnFamilyStoreFileTracker(tableName, family, dstSFT); - LOG.info("{} modify column {} store file tracker of table {} to {}", - getClientIdAuditPrefix(), Bytes.toStringBinary(family), tableName, sft); - submitProcedure(new ModifyColumnFamilyStoreFileTrackerProcedure( - procedureExecutor.getEnvironment(), tableName, family, sft)); - getMaster().getMasterCoprocessorHost().postModifyColumnFamilyStoreFileTracker(tableName, - family, dstSFT); - } + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected String getDescription() { - return "ModifyColumnFamilyStoreFileTrackerProcedure"; - } - }); + @Override + protected void run() throws IOException { + String sft = getMaster().getMasterCoprocessorHost() + .preModifyColumnFamilyStoreFileTracker(tableName, family, dstSFT); + LOG.info("{} modify column {} store file tracker of table {} to {}", + getClientIdAuditPrefix(), Bytes.toStringBinary(family), tableName, sft); + submitProcedure(new ModifyColumnFamilyStoreFileTrackerProcedure( + procedureExecutor.getEnvironment(), tableName, family, sft)); + getMaster().getMasterCoprocessorHost().postModifyColumnFamilyStoreFileTracker(tableName, + family, dstSFT); + } + + @Override + protected String getDescription() { + return "ModifyColumnFamilyStoreFileTrackerProcedure"; + } + }); } @Override @@ -2537,54 +2514,56 @@ public long enableTable(final TableName tableName, final long nonceGroup, final throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preEnableTable(tableName); - - // Normally, it would make sense for this authorization check to exist inside - // AccessController, but because the authorization check is done based on internal state - // (rather than explicit permissions) we'll do the check here instead of in the - // coprocessor. - MasterQuotaManager quotaManager = getMasterQuotaManager(); - if (quotaManager != null) { - if (quotaManager.isQuotaInitialized()) { - SpaceQuotaSnapshot currSnapshotOfTable = - QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName); - if (currSnapshotOfTable != null) { - SpaceQuotaStatus quotaStatus = currSnapshotOfTable.getQuotaStatus(); - if (quotaStatus.isInViolation() - && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null)) { - throw new AccessDeniedException("Enabling the table '" + tableName - + "' is disallowed due to a violated space quota."); + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preEnableTable(tableName); + + // Normally, it would make sense for this authorization check to exist inside + // AccessController, but because the authorization check is done based on internal state + // (rather than explicit permissions) we'll do the check here instead of in the + // coprocessor. + MasterQuotaManager quotaManager = getMasterQuotaManager(); + if (quotaManager != null) { + if (quotaManager.isQuotaInitialized()) { + SpaceQuotaSnapshot currSnapshotOfTable = + QuotaTableUtil.getCurrentSnapshotFromQuotaTable(getConnection(), tableName); + if (currSnapshotOfTable != null) { + SpaceQuotaStatus quotaStatus = currSnapshotOfTable.getQuotaStatus(); + if (quotaStatus.isInViolation() + && SpaceViolationPolicy.DISABLE == quotaStatus.getPolicy().orElse(null)) { + throw new AccessDeniedException("Enabling the table '" + tableName + + "' is disallowed due to a violated space quota."); + } + } + } else if (LOG.isTraceEnabled()) { + LOG.trace( + "Unable to check for space quotas as the MasterQuotaManager is not enabled"); } } - } else if (LOG.isTraceEnabled()) { - LOG.trace("Unable to check for space quotas as the MasterQuotaManager is not enabled"); - } - } - LOG.info(getClientIdAuditPrefix() + " enable " + tableName); + LOG.info(getClientIdAuditPrefix() + " enable " + tableName); - // Execute the operation asynchronously - client will check the progress of the operation - // In case the request is from a <1.1 client before returning, - // we want to make sure that the table is prepared to be - // enabled (the table is locked and the table state is set). - // Note: if the procedure throws exception, we will catch it and rethrow. - final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch(); - submitProcedure(new EnableTableProcedure(procedureExecutor.getEnvironment(), - tableName, prepareLatch)); - prepareLatch.await(); + // Execute the operation asynchronously - client will check the progress of the + // operation + // In case the request is from a <1.1 client before returning, + // we want to make sure that the table is prepared to be + // enabled (the table is locked and the table state is set). + // Note: if the procedure throws exception, we will catch it and rethrow. + final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch(); + submitProcedure(new EnableTableProcedure(procedureExecutor.getEnvironment(), tableName, + prepareLatch)); + prepareLatch.await(); - getMaster().getMasterCoprocessorHost().postEnableTable(tableName); - } + getMaster().getMasterCoprocessorHost().postEnableTable(tableName); + } - @Override - protected String getDescription() { - return "EnableTableProcedure"; - } - }); + @Override + protected String getDescription() { + return "EnableTableProcedure"; + } + }); } @Override @@ -2592,35 +2571,36 @@ public long disableTable(final TableName tableName, final long nonceGroup, final throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preDisableTable(tableName); - - LOG.info(getClientIdAuditPrefix() + " disable " + tableName); - - // Execute the operation asynchronously - client will check the progress of the operation - // In case the request is from a <1.1 client before returning, - // we want to make sure that the table is prepared to be - // enabled (the table is locked and the table state is set). - // Note: if the procedure throws exception, we will catch it and rethrow. - // - // We need to wait for the procedure to potentially fail due to "prepare" sanity - // checks. This will block only the beginning of the procedure. See HBASE-19953. - final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createBlockingLatch(); - submitProcedure(new DisableTableProcedure(procedureExecutor.getEnvironment(), - tableName, false, prepareLatch)); - prepareLatch.await(); - - getMaster().getMasterCoprocessorHost().postDisableTable(tableName); - } + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + getMaster().getMasterCoprocessorHost().preDisableTable(tableName); - @Override - protected String getDescription() { - return "DisableTableProcedure"; - } - }); + LOG.info(getClientIdAuditPrefix() + " disable " + tableName); + + // Execute the operation asynchronously - client will check the progress of the + // operation + // In case the request is from a <1.1 client before returning, + // we want to make sure that the table is prepared to be + // enabled (the table is locked and the table state is set). + // Note: if the procedure throws exception, we will catch it and rethrow. + // + // We need to wait for the procedure to potentially fail due to "prepare" sanity + // checks. This will block only the beginning of the procedure. See HBASE-19953. + final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createBlockingLatch(); + submitProcedure(new DisableTableProcedure(procedureExecutor.getEnvironment(), tableName, + false, prepareLatch)); + prepareLatch.await(); + + getMaster().getMasterCoprocessorHost().postDisableTable(tableName); + } + + @Override + protected String getDescription() { + return "DisableTableProcedure"; + } + }); } private long modifyTable(final TableName tableName, @@ -2635,7 +2615,7 @@ protected void run() throws IOException { .preModifyTable(tableName, oldDescriptor, newDescriptorGetter.get()); TableDescriptorChecker.sanityCheck(conf, newDescriptor); LOG.info("{} modify table {} from {} to {}", getClientIdAuditPrefix(), tableName, - oldDescriptor, newDescriptor); + oldDescriptor, newDescriptor); // Execute the operation synchronously - wait for the operation completes before // continuing. @@ -2674,31 +2654,31 @@ public TableDescriptor get() throws IOException { @Override public long modifyTableStoreFileTracker(TableName tableName, String dstSFT, long nonceGroup, - long nonce) throws IOException { + long nonce) throws IOException { checkInitialized(); return MasterProcedureUtil - .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - - @Override - protected void run() throws IOException { - String sft = getMaster().getMasterCoprocessorHost() - .preModifyTableStoreFileTracker(tableName, dstSFT); - LOG.info("{} modify table store file tracker of table {} to {}", getClientIdAuditPrefix(), - tableName, sft); - submitProcedure(new ModifyTableStoreFileTrackerProcedure( - procedureExecutor.getEnvironment(), tableName, sft)); - getMaster().getMasterCoprocessorHost().postModifyTableStoreFileTracker(tableName, sft); - } + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected String getDescription() { - return "ModifyTableStoreFileTrackerProcedure"; - } - }); + @Override + protected void run() throws IOException { + String sft = getMaster().getMasterCoprocessorHost() + .preModifyTableStoreFileTracker(tableName, dstSFT); + LOG.info("{} modify table store file tracker of table {} to {}", + getClientIdAuditPrefix(), tableName, sft); + submitProcedure(new ModifyTableStoreFileTrackerProcedure( + procedureExecutor.getEnvironment(), tableName, sft)); + getMaster().getMasterCoprocessorHost().postModifyTableStoreFileTracker(tableName, sft); + } + + @Override + protected String getDescription() { + return "ModifyTableStoreFileTrackerProcedure"; + } + }); } public long restoreSnapshot(final SnapshotDescription snapshotDesc, final long nonceGroup, - final long nonce, final boolean restoreAcl, final String customSFT) throws IOException { + final long nonce, final boolean restoreAcl, final String customSFT) throws IOException { checkInitialized(); getSnapshotManager().checkSnapshotSupport(); @@ -2706,24 +2686,23 @@ public long restoreSnapshot(final SnapshotDescription snapshotDesc, final long n final TableName dstTable = TableName.valueOf(snapshotDesc.getTable()); getClusterSchema().getNamespace(dstTable.getNamespaceAsString()); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { - @Override - protected void run() throws IOException { - setProcId( - getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), restoreAcl, - customSFT)); - } + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + setProcId(getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), + restoreAcl, customSFT)); + } - @Override - protected String getDescription() { - return "RestoreSnapshotProcedure"; - } - }); + @Override + protected String getDescription() { + return "RestoreSnapshotProcedure"; + } + }); } private void checkTableExists(final TableName tableName) - throws IOException, TableNotFoundException { + throws IOException, TableNotFoundException { if (!tableDescriptors.exists(tableName)) { throw new TableNotFoundException(tableName); } @@ -2761,21 +2740,27 @@ public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet

      + * +
      * =-.}) If reference, then the regex has more than just one * group. Group 1, hfile/hfilelink pattern, is this file's id. Group 2 '(.+)' is the reference's * parent region name. @@ -253,9 +250,8 @@ public void setRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) { } /** - * @return the Reference object associated to this StoreFileInfo. - * null if the StoreFile is not a - * reference. + * @return the Reference object associated to this StoreFileInfo. null if the StoreFile is not a + * reference. */ public Reference getReference() { return this.reference; @@ -458,8 +454,7 @@ public static boolean isMobFile(final Path path) { } /** - * Checks if the file is a MOB reference file, - * created by snapshot + * Checks if the file is a MOB reference file, created by snapshot * @param path path to a file * @return true, if - yes, false otherwise */ @@ -477,7 +472,6 @@ public static boolean isMobRefFile(final Path path) { return m.matches() && m.groupCount() > 1; } - /** * @param path Path to check. * @return True if the path has format of a HStoreFile reference. @@ -633,8 +627,7 @@ public boolean equals(Object that) { if (initialPath == null && o.initialPath != null) { return false; } - if (initialPath != o.initialPath && initialPath != null - && !initialPath.equals(o.initialPath)) { + if (initialPath != o.initialPath && initialPath != null && !initialPath.equals(o.initialPath)) { return false; } if (reference != null && o.reference == null) { @@ -643,8 +636,7 @@ public boolean equals(Object that) { if (reference == null && o.reference != null) { return false; } - if (reference != o.reference && reference != null - && !reference.equals(o.reference)) { + if (reference != o.reference && reference != null && !reference.equals(o.reference)) { return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java index a40b209c6ebb..3e6d71ad74c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +24,6 @@ import java.util.Iterator; import java.util.List; import java.util.Optional; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.yetus.audience.InterfaceAudience; @@ -51,7 +49,7 @@ public interface StoreFileManager { * @param storeFiles The files to load. */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void loadFiles(List storeFiles); /** @@ -59,7 +57,7 @@ public interface StoreFileManager { * @param sfs New store files. */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void insertNewFiles(Collection sfs); /** @@ -68,7 +66,7 @@ public interface StoreFileManager { * @param results The resulting files for the compaction. */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void addCompactionResults(Collection compactedFiles, Collection results); /** @@ -76,7 +74,7 @@ public interface StoreFileManager { * @param compactedFiles the list of compacted files */ @RestrictedApi(explanation = "Should only be called in StoreEngine", link = "", - allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") + allowedOnPath = ".*(/org/apache/hadoop/hbase/regionserver/StoreEngine.java|/src/test/.*)") void removeCompactedFiles(Collection compactedFiles); /** @@ -86,24 +84,23 @@ public interface StoreFileManager { ImmutableCollection clearFiles(); /** - * Clears all the compacted files and returns them. This method is expected to be - * accessed single threaded. + * Clears all the compacted files and returns them. This method is expected to be accessed single + * threaded. * @return The files compacted previously. */ Collection clearCompactedFiles(); /** - * Gets the snapshot of the store files currently in use. Can be used for things like metrics - * and checks; should not assume anything about relations between store files in the list. + * Gets the snapshot of the store files currently in use. Can be used for things like metrics and + * checks; should not assume anything about relations between store files in the list. * @return The list of StoreFiles. */ Collection getStorefiles(); /** - * List of compacted files inside this store that needs to be excluded in reads - * because further new reads will be using only the newly created files out of compaction. - * These compacted files will be deleted/cleared once all the existing readers on these - * compacted files are done. + * List of compacted files inside this store that needs to be excluded in reads because further + * new reads will be using only the newly created files out of compaction. These compacted files + * will be deleted/cleared once all the existing readers on these compacted files are done. * @return the list of compacted files */ Collection getCompactedfiles(); @@ -132,18 +129,18 @@ Collection getFilesForScan(byte[] startRow, boolean includeStartRow, /** * Gets initial, full list of candidate store files to check for row-key-before. * @param targetKey The key that is the basis of the search. - * @return The files that may have the key less than or equal to targetKey, in reverse - * order of new-ness, and preference for target key. + * @return The files that may have the key less than or equal to targetKey, in reverse order of + * new-ness, and preference for target key. */ Iterator getCandidateFilesForRowKeyBefore(KeyValue targetKey); /** * Updates the candidate list for finding row key before. Based on the list of candidates - * remaining to check from getCandidateFilesForRowKeyBefore, targetKey and current candidate, - * may trim and reorder the list to remove the files where a better candidate cannot be found. - * @param candidateFiles The candidate files not yet checked for better candidates - return - * value from {@link #getCandidateFilesForRowKeyBefore(KeyValue)}, - * with some files already removed. + * remaining to check from getCandidateFilesForRowKeyBefore, targetKey and current candidate, may + * trim and reorder the list to remove the files where a better candidate cannot be found. + * @param candidateFiles The candidate files not yet checked for better candidates - return value + * from {@link #getCandidateFilesForRowKeyBefore(KeyValue)}, with some files already + * removed. * @param targetKey The key to search for. * @param candidate The current best candidate found. * @return The list to replace candidateFiles. @@ -151,7 +148,6 @@ Collection getFilesForScan(byte[] startRow, boolean includeStartRow, Iterator updateCandidateFilesForRowKeyBefore(Iterator candidateFiles, KeyValue targetKey, Cell candidate); - /** * Gets the split point for the split of this set of store files (approx. middle). * @return The mid-point if possible. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index 32ee47e21f1c..1b8099c5820b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,14 +28,13 @@ import java.util.Optional; import java.util.SortedSet; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.BlockType; @@ -141,13 +140,13 @@ public CellComparator getComparator() { */ public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread, boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) { - return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction), - !isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); + return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction), !isCompaction, + reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn); } /** - * Return the ref count associated with the reader whenever a scanner associated with the - * reader is opened. + * Return the ref count associated with the reader whenever a scanner associated with the reader + * is opened. */ int getRefCount() { return refCount.get(); @@ -178,9 +177,9 @@ void readCompleted() { /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Do not write further code which depends - * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner - * class/interface which is the preferred way to scan a store with higher level concepts. - * + * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner + * class/interface which is the preferred way to scan a store with higher level + * concepts. * @param cacheBlocks should we cache the blocks? * @param pread use pread (for concurrent small readers) * @return the underlying HFileScanner @@ -193,21 +192,17 @@ public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { /** * @deprecated since 2.0.0 and will be removed in 3.0.0. Do not write further code which depends - * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner - * class/interface which is the preferred way to scan a store with higher level concepts. - * - * @param cacheBlocks - * should we cache the blocks? - * @param pread - * use pread (for concurrent small readers) - * @param isCompaction - * is scanner being used for compaction? + * on this call. Instead use getStoreFileScanner() which uses the StoreFileScanner + * class/interface which is the preferred way to scan a store with higher level + * concepts. + * @param cacheBlocks should we cache the blocks? + * @param pread use pread (for concurrent small readers) + * @param isCompaction is scanner being used for compaction? * @return the underlying HFileScanner * @see HBASE-15296 */ @Deprecated - public HFileScanner getScanner(boolean cacheBlocks, boolean pread, - boolean isCompaction) { + public HFileScanner getScanner(boolean cacheBlocks, boolean pread, boolean isCompaction) { return reader.getScanner(conf, cacheBlocks, pread, isCompaction); } @@ -216,33 +211,30 @@ public void close(boolean evictOnClose) throws IOException { } /** - * Check if this storeFile may contain keys within the TimeRange that - * have not expired (i.e. not older than oldestUnexpiredTS). + * Check if this storeFile may contain keys within the TimeRange that have not expired (i.e. not + * older than oldestUnexpiredTS). * @param tr the timeRange to restrict - * @param oldestUnexpiredTS the oldest timestamp that is not expired, as - * determined by the column family's TTL + * @param oldestUnexpiredTS the oldest timestamp that is not expired, as determined by the column + * family's TTL * @return false if queried keys definitely don't exist in this StoreFile */ boolean passesTimerangeFilter(TimeRange tr, long oldestUnexpiredTS) { - return this.timeRange == null? true: - this.timeRange.includesTimeRange(tr) && this.timeRange.getMax() >= oldestUnexpiredTS; + return this.timeRange == null ? true + : this.timeRange.includesTimeRange(tr) && this.timeRange.getMax() >= oldestUnexpiredTS; } /** - * Checks whether the given scan passes the Bloom filter (if present). Only - * checks Bloom filters for single-row or single-row-column scans. Bloom - * filter checking for multi-gets is implemented as part of the store - * scanner system (see {@link StoreFileScanner#seek(Cell)} and uses - * the lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)} - * and {@link #passesGeneralRowColBloomFilter(Cell)}. - * - * @param scan the scan specification. Used to determine the row, and to - * check whether this is a single-row ("get") scan. - * @param columns the set of columns. Only used for row-column Bloom - * filters. - * @return true if the scan with the given column set passes the Bloom - * filter, or if the Bloom filter is not applicable for the scan. - * False if the Bloom filter is applicable and the scan fails it. + * Checks whether the given scan passes the Bloom filter (if present). Only checks Bloom filters + * for single-row or single-row-column scans. Bloom filter checking for multi-gets is implemented + * as part of the store scanner system (see {@link StoreFileScanner#seek(Cell)} and uses the + * lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)} and + * {@link #passesGeneralRowColBloomFilter(Cell)}. + * @param scan the scan specification. Used to determine the row, and to check whether this is a + * single-row ("get") scan. + * @param columns the set of columns. Only used for row-column Bloom filters. + * @return true if the scan with the given column set passes the Bloom filter, or if the Bloom + * filter is not applicable for the scan. False if the Bloom filter is applicable and the + * scan fails it. */ boolean passesBloomFilter(Scan scan, final SortedSet columns) { byte[] row = scan.getStartRow(); @@ -274,8 +266,7 @@ boolean passesBloomFilter(Scan scan, final SortedSet columns) { } } - public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, - int rowLen) { + public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, int rowLen) { // Cache Bloom filter as a local variable in case it is set to null by // another thread on an IO error. BloomFilter bloomFilter = this.deleteFamilyBloomFilter; @@ -295,8 +286,7 @@ public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, } return bloomFilter.contains(row, rowOffset, rowLen, null); } catch (IllegalArgumentException e) { - LOG.error("Bad Delete Family bloom filter data -- proceeding without", - e); + LOG.error("Bad Delete Family bloom filter data -- proceeding without", e); setDeleteFamilyBloomFilterFaulty(); } @@ -304,9 +294,8 @@ public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, } /** - * A method for checking Bloom filters. Called directly from - * StoreFileScanner in case of a multi-column query. - * + * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a + * multi-column query. * @return True if passes */ private boolean passesGeneralRowBloomFilter(byte[] row, int rowOffset, int rowLen) { @@ -318,19 +307,16 @@ private boolean passesGeneralRowBloomFilter(byte[] row, int rowOffset, int rowLe // Used in ROW bloom byte[] key = null; if (rowOffset != 0 || rowLen != row.length) { - throw new AssertionError( - "For row-only Bloom filters the row must occupy the whole array"); + throw new AssertionError("For row-only Bloom filters the row must occupy the whole array"); } key = row; return checkGeneralBloomFilter(key, null, bloomFilter); } /** - * A method for checking Bloom filters. Called directly from - * StoreFileScanner in case of a multi-column query. - * - * @param cell - * the cell to check if present in BloomFilter + * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a + * multi-column query. + * @param cell the cell to check if present in BloomFilter * @return True if passes */ public boolean passesGeneralRowColBloomFilter(Cell cell) { @@ -350,9 +336,8 @@ public boolean passesGeneralRowColBloomFilter(Cell cell) { } /** - * A method for checking Bloom filters. Called directly from - * StoreFileScanner in case of a multi-column query. - * + * A method for checking Bloom filters. Called directly from StoreFileScanner in case of a + * multi-column query. * @return True if passes */ private boolean passesGeneralRowPrefixBloomFilter(Scan scan) { @@ -369,7 +354,7 @@ private boolean passesGeneralRowPrefixBloomFilter(Scan scan) { // For non-get scans // Find out the common prefix of startRow and stopRow. int commonLength = Bytes.findCommonPrefix(scan.getStartRow(), scan.getStopRow(), - scan.getStartRow().length, scan.getStopRow().length, 0, 0); + scan.getStartRow().length, scan.getStopRow().length, 0, 0); // startRow and stopRow don't have the common prefix. // Or the common prefix length is less than prefixLength if (commonLength <= 0 || commonLength < prefixLength) { @@ -406,7 +391,7 @@ private boolean checkGeneralBloomFilter(byte[] key, Cell kvKey, BloomFilter bloo // a sufficient condition to return false. boolean keyIsAfterLast = (lastBloomKey != null); // hbase:meta does not have blooms. So we need not have special interpretation - // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom + // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom if (keyIsAfterLast) { if (bloomFilterType == BloomType.ROWCOL) { keyIsAfterLast = (CellComparator.getInstance().compare(kvKey, lastBloomKeyOnlyKV)) > 0; @@ -422,25 +407,22 @@ private boolean checkGeneralBloomFilter(byte[] key, Cell kvKey, BloomFilter bloo // required looking only for a row bloom. Cell rowBloomKey = PrivateCellUtil.createFirstOnRow(kvKey); // hbase:meta does not have blooms. So we need not have special interpretation - // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom + // of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom if (keyIsAfterLast && (CellComparator.getInstance().compare(rowBloomKey, lastBloomKeyOnlyKV)) > 0) { exists = false; } else { - exists = - bloomFilter.contains(kvKey, bloom, BloomType.ROWCOL) || - bloomFilter.contains(rowBloomKey, bloom, BloomType.ROWCOL); + exists = bloomFilter.contains(kvKey, bloom, BloomType.ROWCOL) + || bloomFilter.contains(rowBloomKey, bloom, BloomType.ROWCOL); } } else { - exists = !keyIsAfterLast - && bloomFilter.contains(key, 0, key.length, bloom); + exists = !keyIsAfterLast && bloomFilter.contains(key, 0, key.length, bloom); } return exists; } } catch (IOException e) { - LOG.error("Error reading bloom filter data -- proceeding without", - e); + LOG.error("Error reading bloom filter data -- proceeding without", e); setGeneralBloomFilterFaulty(); } catch (IllegalArgumentException e) { LOG.error("Bad bloom filter data -- proceeding without", e); @@ -466,23 +448,23 @@ public boolean passesKeyRangeFilter(Scan scan) { // the file is empty return false; } - if (Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW) && - Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { + if (Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW) + && Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { return true; } byte[] smallestScanRow = scan.isReversed() ? scan.getStopRow() : scan.getStartRow(); byte[] largestScanRow = scan.isReversed() ? scan.getStartRow() : scan.getStopRow(); - boolean nonOverLapping = (getComparator() - .compareRows(firstKeyKV.get(), largestScanRow, 0, largestScanRow.length) > 0 && - !Bytes.equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(), - HConstants.EMPTY_END_ROW)) || - getComparator().compareRows(lastKeyKV.get(), smallestScanRow, 0, - smallestScanRow.length) < 0; + boolean nonOverLapping = + (getComparator().compareRows(firstKeyKV.get(), largestScanRow, 0, largestScanRow.length) > 0 + && !Bytes.equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(), + HConstants.EMPTY_END_ROW)) + || getComparator().compareRows(lastKeyKV.get(), smallestScanRow, 0, + smallestScanRow.length) < 0; return !nonOverLapping; } public Map loadFileInfo() throws IOException { - Map fi = reader.getHFileInfo(); + Map fi = reader.getHFileInfo(); byte[] b = fi.get(BLOOM_FILTER_TYPE_KEY); if (b != null) { @@ -490,12 +472,12 @@ public Map loadFileInfo() throws IOException { } byte[] p = fi.get(BLOOM_FILTER_PARAM_KEY); - if (bloomFilterType == BloomType.ROWPREFIX_FIXED_LENGTH) { + if (bloomFilterType == BloomType.ROWPREFIX_FIXED_LENGTH) { prefixLength = Bytes.toInt(p); } lastBloomKey = fi.get(LAST_BLOOM_KEY); - if(bloomFilterType == BloomType.ROWCOL) { + if (bloomFilterType == BloomType.ROWCOL) { lastBloomKeyOnlyKV = new KeyValue.KeyOnlyKeyValue(lastBloomKey, 0, lastBloomKey.length); } byte[] cnt = fi.get(DELETE_FAMILY_COUNT); @@ -514,48 +496,41 @@ public void loadBloomfilter() { public void loadBloomfilter(BlockType blockType) { try { if (blockType == BlockType.GENERAL_BLOOM_META) { - if (this.generalBloomFilter != null) - return; // Bloom has been loaded + if (this.generalBloomFilter != null) return; // Bloom has been loaded DataInput bloomMeta = reader.getGeneralBloomFilterMetadata(); if (bloomMeta != null) { // sanity check for NONE Bloom filter if (bloomFilterType == BloomType.NONE) { - throw new IOException( - "valid bloom filter type not found in FileInfo"); + throw new IOException("valid bloom filter type not found in FileInfo"); } else { - generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, - reader); + generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); if (LOG.isTraceEnabled()) { LOG.trace("Loaded " + bloomFilterType.toString() + " " - + generalBloomFilter.getClass().getSimpleName() - + " metadata for " + reader.getName()); + + generalBloomFilter.getClass().getSimpleName() + " metadata for " + + reader.getName()); } } } } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { - if (this.deleteFamilyBloomFilter != null) - return; // Bloom has been loaded + if (this.deleteFamilyBloomFilter != null) return; // Bloom has been loaded DataInput bloomMeta = reader.getDeleteBloomFilterMetadata(); if (bloomMeta != null) { - deleteFamilyBloomFilter = BloomFilterFactory.createFromMeta( - bloomMeta, reader); - LOG.info("Loaded Delete Family Bloom (" - + deleteFamilyBloomFilter.getClass().getSimpleName() - + ") metadata for " + reader.getName()); + deleteFamilyBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader); + LOG.info( + "Loaded Delete Family Bloom (" + deleteFamilyBloomFilter.getClass().getSimpleName() + + ") metadata for " + reader.getName()); } } else { - throw new RuntimeException("Block Type: " + blockType.toString() - + "is not supported for Bloom filter"); + throw new RuntimeException( + "Block Type: " + blockType.toString() + "is not supported for Bloom filter"); } } catch (IOException e) { - LOG.error("Error reading bloom filter meta for " + blockType - + " -- proceeding without", e); + LOG.error("Error reading bloom filter meta for " + blockType + " -- proceeding without", e); setBloomFilterFaulty(blockType); } catch (IllegalArgumentException e) { - LOG.error("Bad bloom filter meta " + blockType - + " -- proceeding without", e); + LOG.error("Bad bloom filter meta " + blockType + " -- proceeding without", e); setBloomFilterFaulty(blockType); } } @@ -569,15 +544,12 @@ private void setBloomFilterFaulty(BlockType blockType) { } /** - * The number of Bloom filter entries in this store file, or an estimate - * thereof, if the Bloom filter is not loaded. This always returns an upper - * bound of the number of Bloom filter entries. - * + * The number of Bloom filter entries in this store file, or an estimate thereof, if the Bloom + * filter is not loaded. This always returns an upper bound of the number of Bloom filter entries. * @return an estimate of the number of Bloom filter entries in this file */ public long getFilterEntries() { - return generalBloomFilter != null ? generalBloomFilter.getKeyCount() - : reader.getEntries(); + return generalBloomFilter != null ? generalBloomFilter.getKeyCount() : reader.getEntries(); } public void setGeneralBloomFilterFaulty() { @@ -653,8 +625,7 @@ long getUncompressedDataIndexSize() { } public long getTotalBloomSize() { - if (generalBloomFilter == null) - return 0; + if (generalBloomFilter == null) return 0; return generalBloomFilter.getByteSize(); } @@ -676,7 +647,7 @@ void disableBloomFilterForTesting() { } public long getMaxTimestamp() { - return timeRange == null ? TimeRange.INITIAL_MAX_TIMESTAMP: timeRange.getMax(); + return timeRange == null ? TimeRange.INITIAL_MAX_TIMESTAMP : timeRange.getMax(); } boolean isSkipResetSeqId() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 6e70c5b68de9..3ae1b2f169ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.FileNotFoundException; @@ -28,23 +26,21 @@ import java.util.Optional; import java.util.PriorityQueue; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * KeyValueScanner adaptor over the Reader. It also provides hooks into - * bloom filter things. + * KeyValueScanner adaptor over the Reader. It also provides hooks into bloom filter things. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX) @InterfaceStability.Evolving @@ -135,7 +131,7 @@ public static List getScannersForStoreFiles(Collection readPt)) { + while (enforceMVCC && cur != null && (cur.getSequenceId() > readPt)) { boolean hasNext = hfs.next(); setCurrentCell(hfs.getCell()); if (hasNext && this.stopSkippingKVsIfNextRow @@ -304,23 +297,21 @@ public void close() { } /** - * * @param s * @param k * @return false if not found or if k is after the end. * @throws IOException */ - public static boolean seekAtOrAfter(HFileScanner s, Cell k) - throws IOException { + public static boolean seekAtOrAfter(HFileScanner s, Cell k) throws IOException { int result = s.seekTo(k); - if(result < 0) { + if (result < 0) { if (result == HConstants.INDEX_KEY_MAGIC) { // using faked key return true; } // Passed KV is smaller than first KV in file, work from start of file return s.seekTo(); - } else if(result > 0) { + } else if (result > 0) { // Passed KV is larger than current KV in file, if there is a next // it is the "after", if not then this scanner is done. return s.next(); @@ -329,9 +320,8 @@ public static boolean seekAtOrAfter(HFileScanner s, Cell k) return true; } - static boolean reseekAtOrAfter(HFileScanner s, Cell k) - throws IOException { - //This function is similar to seekAtOrAfter function + static boolean reseekAtOrAfter(HFileScanner s, Cell k) throws IOException { + // This function is similar to seekAtOrAfter function int result = s.reseekTo(k); if (result <= 0) { if (result == HConstants.INDEX_KEY_MAGIC) { @@ -342,7 +332,7 @@ static boolean reseekAtOrAfter(HFileScanner s, Cell k) // than first KV in file, and it is the first time we seek on this file. // So we also need to work from the start of file. if (!s.isSeeked()) { - return s.seekTo(); + return s.seekTo(); } return true; } @@ -360,22 +350,19 @@ public long getScannerOrder() { } /** - * Pretend we have done a seek but don't do it yet, if possible. The hope is - * that we find requested columns in more recent files and won't have to seek - * in older files. Creates a fake key/value with the given row/column and the - * highest (most recent) possible timestamp we might get from this file. When - * users of such "lazy scanner" need to know the next KV precisely (e.g. when - * this scanner is at the top of the heap), they run {@link #enforceSeek()}. + * Pretend we have done a seek but don't do it yet, if possible. The hope is that we find + * requested columns in more recent files and won't have to seek in older files. Creates a fake + * key/value with the given row/column and the highest (most recent) possible timestamp we might + * get from this file. When users of such "lazy scanner" need to know the next KV precisely (e.g. + * when this scanner is at the top of the heap), they run {@link #enforceSeek()}. *

      - * Note that this function does guarantee that the current KV of this scanner - * will be advanced to at least the given KV. Because of this, it does have - * to do a real seek in cases when the seek timestamp is older than the - * highest timestamp of the file, e.g. when we are trying to seek to the next - * row/column and use OLDEST_TIMESTAMP in the seek key. + * Note that this function does guarantee that the current KV of this scanner will be advanced to + * at least the given KV. Because of this, it does have to do a real seek in cases when the seek + * timestamp is older than the highest timestamp of the file, e.g. when we are trying to seek to + * the next row/column and use OLDEST_TIMESTAMP in the seek key. */ @Override - public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) - throws IOException { + public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException { if (kv.getFamilyLength() == 0) { useBloom = false; } @@ -386,13 +373,12 @@ public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) if (reader.getBloomFilterType() == BloomType.ROWCOL) { haveToSeek = reader.passesGeneralRowColBloomFilter(kv); } else if (canOptimizeForNonNullColumn - && ((PrivateCellUtil.isDeleteFamily(kv) - || PrivateCellUtil.isDeleteFamilyVersion(kv)))) { - // if there is no such delete family kv in the store file, - // then no need to seek. - haveToSeek = reader.passesDeleteFamilyBloomFilter(kv.getRowArray(), kv.getRowOffset(), - kv.getRowLength()); - } + && ((PrivateCellUtil.isDeleteFamily(kv) || PrivateCellUtil.isDeleteFamilyVersion(kv)))) { + // if there is no such delete family kv in the store file, + // then no need to seek. + haveToSeek = reader.passesDeleteFamilyBloomFilter(kv.getRowArray(), kv.getRowOffset(), + kv.getRowLength()); + } } delayedReseek = forward; @@ -450,8 +436,7 @@ public boolean realSeekDone() { @Override public void enforceSeek() throws IOException { - if (realSeekDone) - return; + if (realSeekDone) return; if (delayedReseek) { reseek(delayedSeekKV); @@ -487,8 +472,9 @@ public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) if (timeRange == null) { timeRange = scan.getTimeRange(); } - return reader.passesTimerangeFilter(timeRange, oldestUnexpiredTS) && reader - .passesKeyRangeFilter(scan) && reader.passesBloomFilter(scan, scan.getFamilyMap().get(cf)); + return reader.passesTimerangeFilter(timeRange, oldestUnexpiredTS) + && reader.passesKeyRangeFilter(scan) + && reader.passesBloomFilter(scan, scan.getFamilyMap().get(cf)); } @Override @@ -521,8 +507,7 @@ public boolean seekToPreviousRow(Cell originalKey) throws IOException { } finally { this.stopSkippingKVsIfNextRow = false; } - if (!resultOfSkipKVs - || getComparator().compareRows(cur, firstKeyOfPreviousRow) > 0) { + if (!resultOfSkipKVs || getComparator().compareRows(cur, firstKeyOfPreviousRow) > 0) { keepSeeking = true; key = firstKeyOfPreviousRow; continue; @@ -537,8 +522,7 @@ public boolean seekToPreviousRow(Cell originalKey) throws IOException { } catch (FileNotFoundException e) { throw e; } catch (IOException ioe) { - throw new IOException("Could not seekToPreviousRow " + this + " to key " - + originalKey, ioe); + throw new IOException("Could not seekToPreviousRow " + this + " to key " + originalKey, ioe); } } @@ -559,8 +543,7 @@ public boolean seekToLastRow() throws IOException { @Override public boolean backwardSeek(Cell key) throws IOException { seek(key); - if (cur == null - || getComparator().compareRows(cur, key) > 0) { + if (cur == null || getComparator().compareRows(cur, key) > 0) { return seekToPreviousRow(key); } return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java index da67c9432928..92d114390848 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,8 +72,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * A StoreFile writer. Use this to read/write HBase Store Files. It is package - * local because it is an implementation detail of the HBase regionserver. + * A StoreFile writer. Use this to read/write HBase Store Files. It is package local because it is + * an implementation detail of the HBase regionserver. */ @InterfaceAudience.Private public class StoreFileWriter implements CellSink, ShipperListener { @@ -94,59 +94,54 @@ public class StoreFileWriter implements CellSink, ShipperListener { /** * Creates an HFile.Writer that also write helpful meta data. - * - * @param fs file system to write to - * @param path file name to create - * @param conf user configuration - * @param bloomType bloom filter setting - * @param maxKeys the expected maximum number of keys to be added. Was used - * for Bloom filter size in {@link HFile} format version 1. - * @param favoredNodes an array of favored nodes or possibly null - * @param fileContext The HFile context - * @param shouldDropCacheBehind Drop pages written to page cache after writing the store file. + * @param fs file system to write to + * @param path file name to create + * @param conf user configuration + * @param bloomType bloom filter setting + * @param maxKeys the expected maximum number of keys to be added. Was used for Bloom filter size + * in {@link HFile} format version 1. + * @param favoredNodes an array of favored nodes or possibly null + * @param fileContext The HFile context + * @param shouldDropCacheBehind Drop pages written to page cache after writing the store file. * @param compactedFilesSupplier Returns the {@link HStore} compacted files which not archived * @throws IOException problem writing to FS */ private StoreFileWriter(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf, BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, HFileContext fileContext, boolean shouldDropCacheBehind, Supplier> compactedFilesSupplier) - throws IOException { + throws IOException { this.compactedFilesSupplier = compactedFilesSupplier; this.timeRangeTracker = TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC); // TODO : Change all writers to be specifically created for compaction context - writer = HFile.getWriterFactory(conf, cacheConf) - .withPath(fs, path) - .withFavoredNodes(favoredNodes) - .withFileContext(fileContext) - .withShouldDropCacheBehind(shouldDropCacheBehind) - .create(); + writer = + HFile.getWriterFactory(conf, cacheConf).withPath(fs, path).withFavoredNodes(favoredNodes) + .withFileContext(fileContext).withShouldDropCacheBehind(shouldDropCacheBehind).create(); - generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite( - conf, cacheConf, bloomType, - (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, + bloomType, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); if (generalBloomFilterWriter != null) { this.bloomType = bloomType; this.bloomParam = BloomFilterUtil.getBloomFilterParam(bloomType, conf); if (LOG.isTraceEnabled()) { LOG.trace("Bloom filter type for " + path + ": " + this.bloomType + ", param: " - + (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH? - Bytes.toInt(bloomParam):Bytes.toStringBinary(bloomParam)) + + (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH ? Bytes.toInt(bloomParam) + : Bytes.toStringBinary(bloomParam)) + ", " + generalBloomFilterWriter.getClass().getSimpleName()); } // init bloom context switch (bloomType) { case ROW: bloomContext = - new RowBloomContext(generalBloomFilterWriter, fileContext.getCellComparator()); + new RowBloomContext(generalBloomFilterWriter, fileContext.getCellComparator()); break; case ROWCOL: bloomContext = - new RowColBloomContext(generalBloomFilterWriter, fileContext.getCellComparator()); + new RowColBloomContext(generalBloomFilterWriter, fileContext.getCellComparator()); break; case ROWPREFIX_FIXED_LENGTH: bloomContext = new RowPrefixFixedLengthBloomContext(generalBloomFilterWriter, - fileContext.getCellComparator(), Bytes.toInt(bloomParam)); + fileContext.getCellComparator(), Bytes.toInt(bloomParam)); break; default: throw new IOException( @@ -160,26 +155,25 @@ private StoreFileWriter(FileSystem fs, Path path, final Configuration conf, Cach // initialize delete family Bloom filter when there is NO RowCol Bloom // filter if (this.bloomType != BloomType.ROWCOL) { - this.deleteFamilyBloomFilterWriter = BloomFilterFactory - .createDeleteBloomAtWrite(conf, cacheConf, - (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + this.deleteFamilyBloomFilterWriter = BloomFilterFactory.createDeleteBloomAtWrite(conf, + cacheConf, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); deleteFamilyBloomContext = - new RowBloomContext(deleteFamilyBloomFilterWriter, fileContext.getCellComparator()); + new RowBloomContext(deleteFamilyBloomFilterWriter, fileContext.getCellComparator()); } else { deleteFamilyBloomFilterWriter = null; } if (deleteFamilyBloomFilterWriter != null && LOG.isTraceEnabled()) { - LOG.trace("Delete Family Bloom filter type for " + path + ": " + - deleteFamilyBloomFilterWriter.getClass().getSimpleName()); + LOG.trace("Delete Family Bloom filter type for " + path + ": " + + deleteFamilyBloomFilterWriter.getClass().getSimpleName()); } } public long getPos() throws IOException { return ((HFileWriterImpl) writer).getPos(); } + /** - * Writes meta data. - * Call before {@link #close()} since its written as meta data to this file. + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. * @param maxSequenceId Maximum sequence id. * @param majorCompaction True if this file is product of a major compaction * @throws IOException problem writing to FS @@ -190,8 +184,7 @@ public void appendMetadata(final long maxSequenceId, final boolean majorCompacti } /** - * Writes meta data. - * Call before {@link #close()} since its written as meta data to this file. + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. * @param maxSequenceId Maximum sequence id. * @param majorCompaction True if this file is product of a major compaction * @param storeFiles The compacted store files to generate this new file @@ -212,16 +205,14 @@ public void appendMetadata(final long maxSequenceId, final boolean majorCompacti * recursively. If file A, B, C compacted to new file D, and file D compacted to new file E, will * write A, B, C, D to file E's compacted files. So if file E compacted to new file F, will add E * to F's compacted files first, then add E's compacted files: A, B, C, D to it. And no need to - * add D's compacted file, as D's compacted files has been in E's compacted files, too. - * See HBASE-20724 for more details. - * + * add D's compacted file, as D's compacted files has been in E's compacted files, too. See + * HBASE-20724 for more details. * @param storeFiles The compacted store files to generate this new file * @return bytes of CompactionEventTracker */ private byte[] toCompactionEventTrackerBytes(Collection storeFiles) { - Set notArchivedCompactedStoreFiles = - this.compactedFilesSupplier.get().stream().map(sf -> sf.getPath().getName()) - .collect(Collectors.toSet()); + Set notArchivedCompactedStoreFiles = this.compactedFilesSupplier.get().stream() + .map(sf -> sf.getPath().getName()).collect(Collectors.toSet()); Set compactedStoreFiles = new HashSet<>(); for (HStoreFile storeFile : storeFiles) { compactedStoreFiles.add(storeFile.getFileInfo().getPath().getName()); @@ -235,8 +226,7 @@ private byte[] toCompactionEventTrackerBytes(Collection storeFiles) } /** - * Writes meta data. - * Call before {@link #close()} since its written as meta data to this file. + * Writes meta data. Call before {@link #close()} since its written as meta data to this file. * @param maxSequenceId Maximum sequence id. * @param majorCompaction True if this file is product of a major compaction * @param mobCellsCount The number of mob cells. @@ -270,10 +260,8 @@ public void appendTrackedTimestampsToMetadata() throws IOException { } /** - * Record the earlest Put timestamp. - * - * If the timeRangeTracker is not set, - * update TimeRangeTracker to include the timestamp of this key + * Record the earlest Put timestamp. If the timeRangeTracker is not set, update TimeRangeTracker + * to include the timestamp of this key */ public void trackTimestamps(final Cell cell) { if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { @@ -286,19 +274,15 @@ private void appendGeneralBloomfilter(final Cell cell) throws IOException { if (this.generalBloomFilterWriter != null) { /* * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue.png - * Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + Timestamp - * - * 3 Types of Filtering: - * 1. Row = Row - * 2. RowCol = Row + Qualifier - * 3. RowPrefixFixedLength = Fixed Length Row Prefix + * Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + Timestamp 3 Types of + * Filtering: 1. Row = Row 2. RowCol = Row + Qualifier 3. RowPrefixFixedLength = Fixed Length + * Row Prefix */ bloomContext.writeBloom(cell); } } - private void appendDeleteFamilyBloomFilter(final Cell cell) - throws IOException { + private void appendDeleteFamilyBloomFilter(final Cell cell) throws IOException { if (!PrivateCellUtil.isDeleteFamily(cell) && !PrivateCellUtil.isDeleteFamilyVersion(cell)) { return; } @@ -341,7 +325,6 @@ public boolean hasGeneralBloom() { /** * For unit testing only. - * * @return the Bloom filter used by this writer. */ BloomFilterWriter getGeneralBloomWriter() { @@ -395,9 +378,9 @@ public void close() throws IOException { // Log final Bloom filter statistics. This needs to be done after close() // because compound Bloom filters might be finalized as part of closing. if (LOG.isTraceEnabled()) { - LOG.trace((hasGeneralBloom ? "" : "NO ") + "General Bloom and " + - (hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily" + " was added to HFile " + - getPath()); + LOG.trace( + (hasGeneralBloom ? "" : "NO ") + "General Bloom and " + (hasDeleteFamilyBloom ? "" : "NO ") + + "DeleteFamily" + " was added to HFile " + getPath()); } } @@ -406,7 +389,8 @@ public void appendFileInfo(byte[] key, byte[] value) throws IOException { writer.appendFileInfo(key, value); } - /** For use in testing. + /** + * For use in testing. */ HFile.Writer getHFileWriter() { return writer; @@ -423,8 +407,8 @@ static Path getUniqueFile(final FileSystem fs, final Path dir) throws IOExceptio return new Path(dir, dash.matcher(UUID.randomUUID().toString()).replaceAll("")); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", - justification="Will not overflow") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", + justification = "Will not overflow") public static class Builder { private final Configuration conf; private final CacheConfig cacheConf; @@ -440,8 +424,7 @@ public static class Builder { private Supplier> compactedFilesSupplier = () -> Collections.emptySet(); private String fileStoragePolicy; - public Builder(Configuration conf, CacheConfig cacheConf, - FileSystem fs) { + public Builder(Configuration conf, CacheConfig cacheConf, FileSystem fs) { this.conf = conf; this.cacheConf = cacheConf; this.fs = fs; @@ -458,9 +441,8 @@ public Builder(Configuration conf, FileSystem fs) { /** * Use either this method or {@link #withFilePath}, but not both. - * @param dir Path to column family directory. The directory is created if - * does not exist. The file is given a unique name within this - * directory. + * @param dir Path to column family directory. The directory is created if does not exist. The + * file is given a unique name within this directory. * @return this (for chained invocation) */ public Builder withOutputDir(Path dir) { @@ -514,8 +496,8 @@ public Builder withShouldDropCacheBehind(boolean shouldDropCacheBehind) { return this; } - public Builder withCompactedFilesSupplier( - Supplier> compactedFilesSupplier) { + public Builder + withCompactedFilesSupplier(Supplier> compactedFilesSupplier) { this.compactedFilesSupplier = compactedFilesSupplier; return this; } @@ -526,14 +508,12 @@ public Builder withFileStoragePolicy(String fileStoragePolicy) { } /** - * Create a store file writer. Client is responsible for closing file when - * done. If metadata, add BEFORE closing using - * {@link StoreFileWriter#appendMetadata}. + * Create a store file writer. Client is responsible for closing file when done. If metadata, + * add BEFORE closing using {@link StoreFileWriter#appendMetadata}. */ public StoreFileWriter build() throws IOException { if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) { - throw new IllegalArgumentException("Either specify parent directory " + - "or file path"); + throw new IllegalArgumentException("Either specify parent directory " + "or file path"); } if (dir == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java index e53fdc0de2a6..57ca84fbdf4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.IOException; @@ -25,47 +24,37 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A package protected interface for a store flushing. - * A store flush context carries the state required to prepare/flush/commit the store's cache. + * A package protected interface for a store flushing. A store flush context carries the state + * required to prepare/flush/commit the store's cache. */ @InterfaceAudience.Private interface StoreFlushContext { /** - * Prepare for a store flush (create snapshot) - * Requires pausing writes. - * A very short operation. + * Prepare for a store flush (create snapshot) Requires pausing writes. A very short operation. * @return The size of snapshot to flush */ MemStoreSize prepare(); /** - * Flush the cache (create the new store file) - * - * A length operation which doesn't require locking out any function - * of the store. - * + * Flush the cache (create the new store file) A length operation which doesn't require locking + * out any function of the store. * @throws IOException in case the flush fails */ void flushCache(MonitoredTask status) throws IOException; /** - * Commit the flush - add the store file to the store and clear the - * memstore snapshot. - * - * Requires pausing scans. - * - * A very short operation - * + * Commit the flush - add the store file to the store and clear the memstore snapshot. Requires + * pausing scans. A very short operation * @return whether compaction is required * @throws IOException */ boolean commit(MonitoredTask status) throws IOException; /** - * Similar to commit, but called in secondary region replicas for replaying the - * flush cache from primary region. Adds the new files to the store, and drops the - * snapshot depending on dropMemstoreSnapshot argument. + * Similar to commit, but called in secondary region replicas for replaying the flush cache from + * primary region. Adds the new files to the store, and drops the snapshot depending on + * dropMemstoreSnapshot argument. * @param fileNames names of the flushed files * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot * @throws IOException diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index 58031288f751..823d98e7ac58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -59,8 +57,8 @@ public abstract List flushSnapshot(MemStoreSnapshot snapshot, long cacheFl MonitoredTask status, ThroughputController throughputController, FlushLifeCycleTracker tracker) throws IOException; - protected void finalizeWriter(StoreFileWriter writer, long cacheFlushSeqNum, - MonitoredTask status) throws IOException { + protected void finalizeWriter(StoreFileWriter writer, long cacheFlushSeqNum, MonitoredTask status) + throws IOException { // Write out the log sequence number that corresponds to this output // hfile. Also write current time in metadata as minFlushTime. // The hfile is current up to and including cacheFlushSeqNum. @@ -71,12 +69,12 @@ protected void finalizeWriter(StoreFileWriter writer, long cacheFlushSeqNum, } protected final StoreFileWriter createWriter(MemStoreSnapshot snapshot, boolean alwaysIncludesTag) - throws IOException { + throws IOException { return store.getStoreEngine() - .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(snapshot.getCellsCount()) - .compression(store.getColumnFamilyDescriptor().getCompressionType()).isCompaction(false) - .includeMVCCReadpoint(true).includesTag(alwaysIncludesTag || snapshot.isTagsPresent()) - .shouldDropBehind(false)); + .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(snapshot.getCellsCount()) + .compression(store.getColumnFamilyDescriptor().getCompressionType()).isCompaction(false) + .includeMVCCReadpoint(true).includesTag(alwaysIncludesTag || snapshot.isTagsPresent()) + .shouldDropBehind(false)); } /** @@ -84,7 +82,7 @@ protected final StoreFileWriter createWriter(MemStoreSnapshot snapshot, boolean * @return The scanner; null if coprocessor is canceling the flush. */ protected final InternalScanner createScanner(List snapshotScanners, - FlushLifeCycleTracker tracker) throws IOException { + FlushLifeCycleTracker tracker) throws IOException { ScanInfo scanInfo; if (store.getCoprocessorHost() != null) { scanInfo = store.getCoprocessorHost().preFlushScannerOpen(store, tracker); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 6cc5e4010d3b..56d86fe42af8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -94,15 +93,15 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner long mixedReads; // 1) Collects all the KVHeap that are eagerly getting closed during the - // course of a scan + // course of a scan // 2) Collects the unused memstore scanners. If we close the memstore scanners - // before sending data to client, the chunk may be reclaimed by other - // updates and the data will be corrupt. + // before sending data to client, the chunk may be reclaimed by other + // updates and the data will be corrupt. private final List scannersForDelayedClose = new ArrayList<>(); /** - * The number of KVs seen by the scanner. Includes explicitly skipped KVs, but not - * KVs skipped via seeking to next row/column. TODO: estimate them? + * The number of KVs seen by the scanner. Includes explicitly skipped KVs, but not KVs skipped via + * seeking to next row/column. TODO: estimate them? */ private long kvsScanned = 0; private Cell prevCell = null; @@ -134,9 +133,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner /** * If the read type is Scan.ReadType.DEFAULT, we will start with pread, and if the kvs we scanned * reaches this limit, we will reopen the scanner with stream. The default value is 4 times of - * block size for this store. - * If configured with a value <0, for all scans with ReadType DEFAULT, we will open scanner with - * stream mode itself. + * block size for this store. If configured with a value <0, for all scans with ReadType DEFAULT, + * we will open scanner with stream mode itself. */ public static final String STORESCANNER_PREAD_MAX_BYTES = "hbase.storescanner.pread.max.bytes"; @@ -162,8 +160,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner private boolean topChanged = false; /** An internal constructor. */ - private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, - int numColumns, long readPt, boolean cacheBlocks, ScanType scanType) { + private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, int numColumns, long readPt, + boolean cacheBlocks, ScanType scanType) { this.readPt = readPt; this.store = store; this.cacheBlocks = cacheBlocks; @@ -179,8 +177,8 @@ private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, // the seek operation. However, we also look the row-column Bloom filter // for multi-row (non-"get") scans because this is not done in // StoreFile.passesBloomFilter(Scan, SortedSet). - this.useRowColBloom = numColumns > 1 || (!get && numColumns == 1) - && (store == null || store.getColumnFamilyDescriptor().getBloomFilterType() == BloomType.ROWCOL); + this.useRowColBloom = numColumns > 1 || (!get && numColumns == 1) && (store == null + || store.getColumnFamilyDescriptor().getBloomFilterType() == BloomType.ROWCOL); this.maxRowSize = scanInfo.getTableMaxRowSize(); this.preadMaxBytes = scanInfo.getPreadMaxBytes(); if (get) { @@ -223,9 +221,8 @@ private void addCurrentScanners(List scanners) { } /** - * Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we - * are not in a compaction. - * + * Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we are not in a + * compaction. * @param store who we scan * @param scan the spec * @param columns which columns we are scanning @@ -233,8 +230,8 @@ private void addCurrentScanners(List scanners) { */ public StoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet columns, long readPt) throws IOException { - this(store, scan, scanInfo, columns != null ? columns.size() : 0, readPt, - scan.getCacheBlocks(), ScanType.USER_SCAN); + this(store, scan, scanInfo, columns != null ? columns.size() : 0, readPt, scan.getCacheBlocks(), + ScanType.USER_SCAN); if (columns != null && scan.isRaw()) { throw new DoNotRetryIOException("Cannot specify any column for a raw scan"); } @@ -376,8 +373,8 @@ public StoreScanner(ScanInfo scanInfo, ScanType scanType, StoreScanner(ScanInfo scanInfo, int maxVersions, ScanType scanType, List scanners) throws IOException { // 0 is passed as readpoint because the test bypasses Store - this(null, maxVersions > 0 ? new Scan().readVersions(maxVersions) - : SCAN_FOR_COMPACTION, scanInfo, 0, 0L, false, scanType); + this(null, maxVersions > 0 ? new Scan().readVersions(maxVersions) : SCAN_FOR_COMPACTION, + scanInfo, 0, 0L, false, scanType); this.matcher = CompactionScanQueryMatcher.create(scanInfo, scanType, Long.MAX_VALUE, PrivateConstants.OLDEST_TIMESTAMP, oldestUnexpiredTS, now, null, null, null); seekAllScanner(scanInfo, scanners); @@ -386,6 +383,7 @@ public StoreScanner(ScanInfo scanInfo, ScanType scanType, boolean isScanUsePread() { return this.scanUsePread; } + /** * Seek the specified scanners with the given key * @param scanners @@ -394,9 +392,8 @@ boolean isScanUsePread() { * @param isParallelSeek true if using parallel seek * @throws IOException */ - protected void seekScanners(List scanners, - Cell seekKey, boolean isLazy, boolean isParallelSeek) - throws IOException { + protected void seekScanners(List scanners, Cell seekKey, + boolean isLazy, boolean isParallelSeek) throws IOException { // Seek all scanners to the start of the Row (or if the exact matching row // key does not exist, then to the start of the next matching Row). // Always check bloom filter to optimize the top row seek for delete @@ -410,8 +407,8 @@ protected void seekScanners(List scanners, long totalScannersSoughtBytes = 0; for (KeyValueScanner scanner : scanners) { if (matcher.isUserScan() && totalScannersSoughtBytes >= maxRowSize) { - throw new RowTooBigException("Max row size allowed: " + maxRowSize - + ", but row is bigger than that"); + throw new RowTooBigException( + "Max row size allowed: " + maxRowSize + ", but row is bigger than that"); } scanner.seek(seekKey); Cell c = scanner.peek(); @@ -425,8 +422,8 @@ protected void seekScanners(List scanners, } } - protected void resetKVHeap(List scanners, - CellComparator comparator) throws IOException { + protected void resetKVHeap(List scanners, CellComparator comparator) + throws IOException { // Combine all seeked scanners with a heap heap = newKVHeap(scanners, comparator); } @@ -643,8 +640,8 @@ public boolean next(List outResult, ScannerContext scannerContext) throws totalBytesRead += cellSize; /** - * Increment the metric if all the cells are from memstore. - * If not we will account it for mixed reads + * Increment the metric if all the cells are from memstore. If not we will account it + * for mixed reads */ onlyFromMemstore = onlyFromMemstore && heap.isLatestCellFromMemstore(); // Update the progress of the scanner context @@ -741,7 +738,7 @@ public boolean next(List outResult, ScannerContext scannerContext) throws if (nextKV != null) { int difference = comparator.compare(nextKV, cell); if (((!scan.isReversed() && difference > 0) - || (scan.isReversed() && difference < 0))) { + || (scan.isReversed() && difference < 0))) { seekAsDirection(nextKV); NextState stateAfterSeekByHint = needToReturn(outResult); if (stateAfterSeekByHint != null) { @@ -793,15 +790,13 @@ private void updateMetricsStore(boolean memstoreRead) { } /** - * If the top cell won't be flushed into disk, the new top cell may be - * changed after #reopenAfterFlush. Because the older top cell only exist - * in the memstore scanner but the memstore scanner is replaced by hfile - * scanner after #reopenAfterFlush. If the row of top cell is changed, - * we should return the current cells. Otherwise, we may return - * the cells across different rows. + * If the top cell won't be flushed into disk, the new top cell may be changed after + * #reopenAfterFlush. Because the older top cell only exist in the memstore scanner but the + * memstore scanner is replaced by hfile scanner after #reopenAfterFlush. If the row of top cell + * is changed, we should return the current cells. Otherwise, we may return the cells across + * different rows. * @param outResult the cells which are visible for user scan - * @return null is the top cell doesn't change. Otherwise, the NextState - * to return + * @return null is the top cell doesn't change. Otherwise, the NextState to return */ private NextState needToReturn(List outResult) { if (!outResult.isEmpty() && topChanged) { @@ -829,30 +824,31 @@ private void seekOrSkipToNextColumn(Cell cell) throws IOException { /** * See if we should actually SEEK or rather just SKIP to the next Cell (see HBASE-13109). - * ScanQueryMatcher may issue SEEK hints, such as seek to next column, next row, - * or seek to an arbitrary seek key. This method decides whether a seek is the most efficient - * _actual_ way to get us to the requested cell (SEEKs are more expensive than SKIP, SKIP, - * SKIP inside the current, loaded block). - * It does this by looking at the next indexed key of the current HFile. This key - * is then compared with the _SEEK_ key, where a SEEK key is an artificial 'last possible key - * on the row' (only in here, we avoid actually creating a SEEK key; in the compare we work with - * the current Cell but compare as though it were a seek key; see down in - * matcher.compareKeyForNextRow, etc). If the compare gets us onto the - * next block we *_SEEK, otherwise we just SKIP to the next requested cell. - * - *

      Other notes: + * ScanQueryMatcher may issue SEEK hints, such as seek to next column, next row, or seek to an + * arbitrary seek key. This method decides whether a seek is the most efficient _actual_ way to + * get us to the requested cell (SEEKs are more expensive than SKIP, SKIP, SKIP inside the + * current, loaded block). It does this by looking at the next indexed key of the current HFile. + * This key is then compared with the _SEEK_ key, where a SEEK key is an artificial 'last possible + * key on the row' (only in here, we avoid actually creating a SEEK key; in the compare we work + * with the current Cell but compare as though it were a seek key; see down in + * matcher.compareKeyForNextRow, etc). If the compare gets us onto the next block we *_SEEK, + * otherwise we just SKIP to the next requested cell. + *

      + * Other notes: *

        *
      • Rows can straddle block boundaries
      • *
      • Versions of columns can straddle block boundaries (i.e. column C1 at T1 might be in a * different block than column C1 at T2)
      • - *
      • We want to SKIP if the chance is high that we'll find the desired Cell after a - * few SKIPs...
      • - *
      • We want to SEEK when the chance is high that we'll be able to seek - * past many Cells, especially if we know we need to go to the next block.
      • + *
      • We want to SKIP if the chance is high that we'll find the desired Cell after a few + * SKIPs...
      • + *
      • We want to SEEK when the chance is high that we'll be able to seek past many Cells, + * especially if we know we need to go to the next block.
      • *
      - *

      A good proxy (best effort) to determine whether SKIP is better than SEEK is whether - * we'll likely end up seeking to the next block (or past the next block) to get our next column. + *

      + * A good proxy (best effort) to determine whether SKIP is better than SEEK is whether we'll + * likely end up seeking to the next block (or past the next block) to get our next column. * Example: + * *

          * |    BLOCK 1              |     BLOCK 2                   |
          * |  r1/c1, r1/c2, r1/c3    |    r1/c4, r1/c5, r2/c1        |
      @@ -867,6 +863,7 @@ private void seekOrSkipToNextColumn(Cell cell) throws IOException {
          *                                            |              |
          *                                    Next Index Key        SEEK_NEXT_COL
          * 
      + * * Now imagine we want columns c1 and c3 (see first diagram above), the 'Next Index Key' of r1/c4 * is > r1/c3 so we should seek to get to the c1 on the next row, r2. In second case, say we only * want one version of c1, after we have it, a SEEK_COL will be issued to get to c2. Looking at @@ -883,9 +880,9 @@ protected boolean trySkipToNextRow(Cell cell) throws IOException { Cell previousIndexedKey = null; do { Cell nextIndexedKey = getNextIndexedKey(); - if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && - (nextIndexedKey == previousIndexedKey || - matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0)) { + if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY + && (nextIndexedKey == previousIndexedKey + || matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0)) { this.heap.next(); ++kvsScanned; previousIndexedKey = nextIndexedKey; @@ -908,9 +905,9 @@ protected boolean trySkipToNextColumn(Cell cell) throws IOException { Cell previousIndexedKey = null; do { Cell nextIndexedKey = getNextIndexedKey(); - if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && - (nextIndexedKey == previousIndexedKey || - matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0)) { + if (nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY + && (nextIndexedKey == previousIndexedKey + || matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0)) { this.heap.next(); ++kvsScanned; previousIndexedKey = nextIndexedKey; @@ -1022,7 +1019,7 @@ protected final boolean reopenAfterFlush() throws IOException { // Seek the new scanners to the last key seekScanners(scanners, lastTop, false, parallelSeekEnabled); // remove the older memstore scanner - for (int i = currentScanners.size() - 1; i >=0; i--) { + for (int i = currentScanners.size() - 1; i >= 0; i--) { if (!currentScanners.get(i).isFileScanner()) { scannersForDelayedClose.add(currentScanners.remove(i)); } else { @@ -1036,8 +1033,8 @@ protected final boolean reopenAfterFlush() throws IOException { resetKVHeap(this.currentScanners, store.getComparator()); resetQueryMatcher(lastTop); if (heap.peek() == null || store.getComparator().compareRows(lastTop, this.heap.peek()) != 0) { - LOG.info("Storescanner.peek() is changed where before = " + lastTop.toString() + - ",and after = " + heap.peek()); + LOG.info("Storescanner.peek() is changed where before = " + lastTop.toString() + + ",and after = " + heap.peek()); topChanged = true; } else { topChanged = false; @@ -1067,11 +1064,11 @@ private void resetQueryMatcher(Cell lastTopKey) { * @param comparator * @throws IOException */ - protected void checkScanOrder(Cell prevKV, Cell kv, - CellComparator comparator) throws IOException { + protected void checkScanOrder(Cell prevKV, Cell kv, CellComparator comparator) + throws IOException { // Check that the heap gives us KVs in an increasing order. - assert prevKV == null || comparator == null || comparator.compare(prevKV, kv) <= 0 : "Key " - + prevKV + " followed by a smaller key " + kv + " in cf " + store; + assert prevKV == null || comparator == null || comparator.compare(prevKV, kv) <= 0 + : "Key " + prevKV + " followed by a smaller key " + kv + " in cf " + store; } protected boolean seekToNextRow(Cell c) throws IOException { @@ -1084,8 +1081,7 @@ protected boolean seekToNextRow(Cell c) throws IOException { * @return true if scanner has values left, false if end of scanner * @throws IOException */ - protected boolean seekAsDirection(Cell kv) - throws IOException { + protected boolean seekAsDirection(Cell kv) throws IOException { return reseek(kv); } @@ -1101,12 +1097,12 @@ public boolean reseek(Cell kv) throws IOException { } void trySwitchToStreamRead() { - if (readType != Scan.ReadType.DEFAULT || !scanUsePread || closing || - heap.peek() == null || bytesRead < preadMaxBytes) { + if (readType != Scan.ReadType.DEFAULT || !scanUsePread || closing || heap.peek() == null + || bytesRead < preadMaxBytes) { return; } LOG.debug("Switch to stream read (scanned={} bytes) of {}", bytesRead, - this.store.getColumnFamilyName()); + this.store.getColumnFamilyName()); scanUsePread = false; Cell lastTop = heap.peek(); List memstoreScanners = new ArrayList<>(); @@ -1125,9 +1121,9 @@ void trySwitchToStreamRead() { try { // We must have a store instance here so no null check // recreate the scanners on the current file scanners - fileScanners = store.recreateScanners(scannersToClose, cacheBlocks, false, false, - matcher, scan.getStartRow(), scan.includeStartRow(), scan.getStopRow(), - scan.includeStopRow(), readPt, false); + fileScanners = store.recreateScanners(scannersToClose, cacheBlocks, false, false, matcher, + scan.getStartRow(), scan.includeStartRow(), scan.getStopRow(), scan.includeStopRow(), + readPt, false); if (fileScanners == null) { return; } @@ -1169,23 +1165,21 @@ protected final boolean checkFlushed() { return false; } - /** * Seek storefiles in parallel to optimize IO latency as much as possible * @param scanners the list {@link KeyValueScanner}s to be read from * @param kv the KeyValue on which the operation is being requested * @throws IOException */ - private void parallelSeek(final List - scanners, final Cell kv) throws IOException { + private void parallelSeek(final List scanners, final Cell kv) + throws IOException { if (scanners.isEmpty()) return; int storeFileScannerCount = scanners.size(); CountDownLatch latch = new CountDownLatch(storeFileScannerCount); List handlers = new ArrayList<>(storeFileScannerCount); for (KeyValueScanner scanner : scanners) { if (scanner instanceof StoreFileScanner) { - ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv, - this.readPt, latch); + ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv, this.readPt, latch); executor.submit(seekHandler); handlers.add(seekHandler); } else { @@ -1197,7 +1191,7 @@ private void parallelSeek(final List try { latch.await(); } catch (InterruptedException ie) { - throw (InterruptedIOException)new InterruptedIOException().initCause(ie); + throw (InterruptedIOException) new InterruptedIOException().initCause(ie); } for (ParallelSeekHandler handler : handlers) { @@ -1214,8 +1208,7 @@ private void parallelSeek(final List List getAllScannersForTesting() { List allScanners = new ArrayList<>(); KeyValueScanner current = heap.getCurrentForTesting(); - if (current != null) - allScanners.add(current); + if (current != null) allScanners.add(current); for (KeyValueScanner scanner : heap.getHeap()) allScanners.add(scanner); return allScanners; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 10a9330f8326..03a3950d48a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -130,8 +129,8 @@ static Optional getFileSplitPoint(HStoreFile file, CellComparator compar Cell firstKey = reader.getFirstKey().get(); Cell lastKey = reader.getLastKey().get(); // if the midkey is the same as the first or last keys, we cannot (ever) split this region. - if (comparator.compareRows(midKey, firstKey) == 0 || - comparator.compareRows(midKey, lastKey) == 0) { + if (comparator.compareRows(midKey, firstKey) == 0 + || comparator.compareRows(midKey, lastKey) == 0) { if (LOG.isDebugEnabled()) { LOG.debug("cannot split {} because midkey is the same as first or last row", file); } @@ -166,8 +165,7 @@ public static ChecksumType getChecksumType(Configuration conf) { * @return The bytesPerChecksum that is set in the configuration */ public static int getBytesPerChecksum(Configuration conf) { - return conf.getInt(HConstants.BYTES_PER_CHECKSUM, - HFile.DEFAULT_BYTES_PER_CHECKSUM); + return conf.getInt(HConstants.BYTES_PER_CHECKSUM, HFile.DEFAULT_BYTES_PER_CHECKSUM); } public static Configuration createStoreConfiguration(Configuration conf, TableDescriptor td, @@ -184,14 +182,14 @@ public static List toStoreFileInfo(Collection storefi public static long getTotalUncompressedBytes(List files) { return files.stream() - .mapToLong(file -> getStorefileFieldSize(file, StoreFileReader::getTotalUncompressedBytes)) - .sum(); + .mapToLong(file -> getStorefileFieldSize(file, StoreFileReader::getTotalUncompressedBytes)) + .sum(); } public static long getStorefilesSize(Collection files, - Predicate predicate) { + Predicate predicate) { return files.stream().filter(predicate) - .mapToLong(file -> getStorefileFieldSize(file, StoreFileReader::length)).sum(); + .mapToLong(file -> getStorefileFieldSize(file, StoreFileReader::length)).sum(); } public static long getStorefileFieldSize(HStoreFile file, ToLongFunction f) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java index 18f7e185eede..adc4b1cf2c66 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,31 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; - import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A chore for refreshing the store files for secondary regions hosted in the region server. - * - * This chore should run periodically with a shorter interval than HFile TTL - * ("hbase.master.hfilecleaner.ttl", default 5 minutes). - * It ensures that if we cannot refresh files longer than that amount, the region - * will stop serving read requests because the referenced files might have been deleted (by the - * primary region). + * A chore for refreshing the store files for secondary regions hosted in the region server. This + * chore should run periodically with a shorter interval than HFile TTL + * ("hbase.master.hfilecleaner.ttl", default 5 minutes). It ensures that if we cannot refresh files + * longer than that amount, the region will stop serving read requests because the referenced files + * might have been deleted (by the primary region). */ @InterfaceAudience.Private public class StorefileRefresherChore extends ScheduledChore { @@ -49,22 +45,22 @@ public class StorefileRefresherChore extends ScheduledChore { /** * The period (in milliseconds) for refreshing the store files for the secondary regions. */ - public static final String REGIONSERVER_STOREFILE_REFRESH_PERIOD - = "hbase.regionserver.storefile.refresh.period"; - static final int DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD = 0; //disabled by default + public static final String REGIONSERVER_STOREFILE_REFRESH_PERIOD = + "hbase.regionserver.storefile.refresh.period"; + static final int DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD = 0; // disabled by default /** - * Whether all storefiles should be refreshed, as opposed to just hbase:meta's - * Meta region doesn't have WAL replication for replicas enabled yet + * Whether all storefiles should be refreshed, as opposed to just hbase:meta's Meta region doesn't + * have WAL replication for replicas enabled yet */ - public static final String REGIONSERVER_META_STOREFILE_REFRESH_PERIOD - = "hbase.regionserver.meta.storefile.refresh.period"; + public static final String REGIONSERVER_META_STOREFILE_REFRESH_PERIOD = + "hbase.regionserver.meta.storefile.refresh.period"; private HRegionServer regionServer; private long hfileTtl; private int period; private boolean onlyMetaRefresh = true; - //ts of last time regions store files are refreshed + // ts of last time regions store files are refreshed private Map lastRefreshTimes; // encodedName -> long public StorefileRefresherChore(int period, boolean onlyMetaRefresh, HRegionServer regionServer, @@ -72,12 +68,12 @@ public StorefileRefresherChore(int period, boolean onlyMetaRefresh, HRegionServe super("StorefileRefresherChore", stoppable, period); this.period = period; this.regionServer = regionServer; - this.hfileTtl = this.regionServer.getConfiguration().getLong( - TimeToLiveHFileCleaner.TTL_CONF_KEY, TimeToLiveHFileCleaner.DEFAULT_TTL); + this.hfileTtl = this.regionServer.getConfiguration() + .getLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, TimeToLiveHFileCleaner.DEFAULT_TTL); this.onlyMetaRefresh = onlyMetaRefresh; if (period > hfileTtl / 2) { - throw new RuntimeException(REGIONSERVER_STOREFILE_REFRESH_PERIOD + - " should be set smaller than half of " + TimeToLiveHFileCleaner.TTL_CONF_KEY); + throw new RuntimeException(REGIONSERVER_STOREFILE_REFRESH_PERIOD + + " should be set smaller than half of " + TimeToLiveHFileCleaner.TTL_CONF_KEY); } lastRefreshTimes = new HashMap<>(); } @@ -106,16 +102,17 @@ protected void chore() { } } catch (IOException ex) { LOG.warn("Exception while trying to refresh store files for region:" + r.getRegionInfo() - + ", exception:" + StringUtils.stringifyException(ex)); + + ", exception:" + StringUtils.stringifyException(ex)); - // Store files have a TTL in the archive directory. If we fail to refresh for that long, we stop serving reads + // Store files have a TTL in the archive directory. If we fail to refresh for that long, we + // stop serving reads if (isRegionStale(encodedName, time)) { - ((HRegion)r).setReadsEnabled(false); // stop serving reads + ((HRegion) r).setReadsEnabled(false); // stop serving reads } continue; } lastRefreshTimes.put(encodedName, time); - ((HRegion)r).setReadsEnabled(true); // restart serving reads + ((HRegion) r).setReadsEnabled(true); // restart serving reads } // remove closed regions diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java index a4e943ac8b04..88098a0b5386 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,15 +23,14 @@ import java.util.Collection; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes; /** * Base class for cell sink that separates the provided cells into multiple files for stripe @@ -111,10 +110,9 @@ protected void sanityCheckLeft(byte[] left, Cell cell) throws IOException { protected void sanityCheckRight(byte[] right, Cell cell) throws IOException { if (!Arrays.equals(StripeStoreFileManager.OPEN_KEY, right) && comparator.compareRows(cell, right, 0, right.length) >= 0) { - String error = - "The last row is higher or equal than the right boundary of [" + Bytes.toString(right) - + "]: [" - + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]"; + String error = "The last row is higher or equal than the right boundary of [" + + Bytes.toString(right) + "]: [" + + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]"; LOG.error(error); throw new IOException(error); } @@ -149,9 +147,8 @@ public BoundaryMultiWriter(CellComparator comparator, List targetBoundar // must match some target boundaries, let's find them. assert (majorRangeFrom == null) == (majorRangeTo == null); if (majorRangeFrom != null) { - majorRangeFromIndex = - Arrays.equals(majorRangeFrom, StripeStoreFileManager.OPEN_KEY) ? 0 : Collections - .binarySearch(boundaries, majorRangeFrom, Bytes.BYTES_COMPARATOR); + majorRangeFromIndex = Arrays.equals(majorRangeFrom, StripeStoreFileManager.OPEN_KEY) ? 0 + : Collections.binarySearch(boundaries, majorRangeFrom, Bytes.BYTES_COMPARATOR); majorRangeToIndex = Arrays.equals(majorRangeTo, StripeStoreFileManager.OPEN_KEY) ? boundaries.size() : Collections.binarySearch(boundaries, majorRangeTo, Bytes.BYTES_COMPARATOR); @@ -176,8 +173,8 @@ public void append(Cell cell) throws IOException { } private boolean isCellAfterCurrentWriter(Cell cell) { - return !Arrays.equals(currentWriterEndKey, StripeStoreFileManager.OPEN_KEY) - && (comparator.compareRows(cell, currentWriterEndKey, 0, currentWriterEndKey.length) >= 0); + return !Arrays.equals(currentWriterEndKey, StripeStoreFileManager.OPEN_KEY) && (comparator + .compareRows(cell, currentWriterEndKey, 0, currentWriterEndKey.length) >= 0); } @Override @@ -223,9 +220,8 @@ private void createEmptyWriter() throws IOException { boolean needEmptyFile = isInMajorRange || isLastWriter; existingWriters.add(needEmptyFile ? writerFactory.createWriter() : null); hasAnyWriter |= needEmptyFile; - currentWriterEndKey = - (existingWriters.size() + 1 == boundaries.size()) ? null : boundaries.get(existingWriters - .size() + 1); + currentWriterEndKey = (existingWriters.size() + 1 == boundaries.size()) ? null + : boundaries.get(existingWriters.size() + 1); } private void checkCanCreateWriter() throws IOException { @@ -246,9 +242,8 @@ private void stopUsingCurrentWriter() { cellsInCurrentWriter = 0; } currentWriter = null; - currentWriterEndKey = - (existingWriters.size() + 1 == boundaries.size()) ? null : boundaries.get(existingWriters - .size() + 1); + currentWriterEndKey = (existingWriters.size() + 1 == boundaries.size()) ? null + : boundaries.get(existingWriters.size() + 1); } } @@ -297,18 +292,17 @@ public void append(Cell cell) throws IOException { // First append ever, do a sanity check. sanityCheckLeft(left, cell); doCreateWriter = true; - } else if (lastRowInCurrentWriter != null - && !PrivateCellUtil.matchingRows(cell, lastRowInCurrentWriter, 0, - lastRowInCurrentWriter.length)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Stopping to use a writer after [" + Bytes.toString(lastRowInCurrentWriter) - + "] row; wrote out " + cellsInCurrentWriter + " kvs"); + } else if (lastRowInCurrentWriter != null && !PrivateCellUtil.matchingRows(cell, + lastRowInCurrentWriter, 0, lastRowInCurrentWriter.length)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Stopping to use a writer after [" + Bytes.toString(lastRowInCurrentWriter) + + "] row; wrote out " + cellsInCurrentWriter + " kvs"); + } + lastRowInCurrentWriter = null; + cellsInCurrentWriter = 0; + cellsSeenInPrevious += cellsSeen; + doCreateWriter = true; } - lastRowInCurrentWriter = null; - cellsInCurrentWriter = 0; - cellsSeenInPrevious += cellsSeen; - doCreateWriter = true; - } if (doCreateWriter) { // make a copy byte[] boundary = existingWriters.isEmpty() ? left : CellUtil.cloneRow(cell); @@ -325,9 +319,8 @@ public void append(Cell cell) throws IOException { ++cellsInCurrentWriter; cellsSeen = cellsInCurrentWriter; if (this.sourceScanner != null) { - cellsSeen = - Math.max(cellsSeen, this.sourceScanner.getEstimatedNumberOfKvsScanned() - - cellsSeenInPrevious); + cellsSeen = Math.max(cellsSeen, + this.sourceScanner.getEstimatedNumberOfKvsScanned() - cellsSeenInPrevious); } // If we are not already waiting for opportunity to close, start waiting if we can @@ -346,11 +339,10 @@ public void append(Cell cell) throws IOException { @Override protected void preCommitWritersInternal() throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug("Stopping with " - + cellsInCurrentWriter - + " kvs in last writer" - + ((this.sourceScanner == null) ? "" : ("; observed estimated " - + this.sourceScanner.getEstimatedNumberOfKvsScanned() + " KVs total"))); + LOG.debug("Stopping with " + cellsInCurrentWriter + " kvs in last writer" + + ((this.sourceScanner == null) ? "" + : ("; observed estimated " + this.sourceScanner.getEstimatedNumberOfKvsScanned() + + " KVs total"))); } if (lastCell != null) { sanityCheckRight(right, lastCell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java index 61deb0b93ce3..43a8d61cf150 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,16 +17,15 @@ */ package org.apache.hadoop.hbase.regionserver; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; /** - * Configuration class for stripe store and compactions. - * See {@link StripeStoreFileManager} for general documentation. - * See getters for the description of each setting. + * Configuration class for stripe store and compactions. See {@link StripeStoreFileManager} for + * general documentation. See getters for the description of each setting. */ @InterfaceAudience.Private public class StripeStoreConfig { @@ -38,34 +36,43 @@ public class StripeStoreConfig { /** The minimum number of files to compact within a stripe; same as for regular compaction. */ public static final String MIN_FILES_KEY = "hbase.store.stripe.compaction.minFiles"; - /** The minimum number of files to compact when compacting L0; same as minFiles for regular + /** + * The minimum number of files to compact when compacting L0; same as minFiles for regular * compaction. Given that L0 causes unnecessary overwriting of the data, should be higher than - * regular minFiles. */ + * regular minFiles. + */ public static final String MIN_FILES_L0_KEY = "hbase.store.stripe.compaction.minFilesL0"; - /** The size the stripe should achieve to be considered for splitting into multiple stripes. - Stripe will be split when it can be fully compacted, and it is above this size. */ + /** + * The size the stripe should achieve to be considered for splitting into multiple stripes. Stripe + * will be split when it can be fully compacted, and it is above this size. + */ public static final String SIZE_TO_SPLIT_KEY = "hbase.store.stripe.sizeToSplit"; - /** The target count of new stripes to produce when splitting a stripe. A floating point - number, default is 2. Values less than 1 will be converted to 1/x. Non-whole numbers will - produce unbalanced splits, which may be good for some cases. In this case the "smaller" of - the new stripes will always be the rightmost one. If the stripe is bigger than sizeToSplit - when splitting, this will be adjusted by a whole increment. */ + /** + * The target count of new stripes to produce when splitting a stripe. A floating point number, + * default is 2. Values less than 1 will be converted to 1/x. Non-whole numbers will produce + * unbalanced splits, which may be good for some cases. In this case the "smaller" of the new + * stripes will always be the rightmost one. If the stripe is bigger than sizeToSplit when + * splitting, this will be adjusted by a whole increment. + */ public static final String SPLIT_PARTS_KEY = "hbase.store.stripe.splitPartCount"; - /** The initial stripe count to create. If the row distribution is roughly the same over time, - it's good to set this to a count of stripes that is expected to be achieved in most regions, - to get this count from the outset and prevent unnecessary splitting. */ + /** + * The initial stripe count to create. If the row distribution is roughly the same over time, it's + * good to set this to a count of stripes that is expected to be achieved in most regions, to get + * this count from the outset and prevent unnecessary splitting. + */ public static final String INITIAL_STRIPE_COUNT_KEY = "hbase.store.stripe.initialStripeCount"; /** Whether to flush memstore to L0 files, or directly to stripes. */ public static final String FLUSH_TO_L0_KEY = "hbase.store.stripe.compaction.flushToL0"; - /** When splitting region, the maximum size imbalance to allow in an attempt to split at a - stripe boundary, so that no files go to both regions. Most users won't need to change that. */ + /** + * When splitting region, the maximum size imbalance to allow in an attempt to split at a stripe + * boundary, so that no files go to both regions. Most users won't need to change that. + */ public static final String MAX_REGION_SPLIT_IMBALANCE_KEY = "hbase.store.stripe.region.split.max.imbalance"; - private final float maxRegionSplitImbalance; private final int level0CompactMinFiles; private final int stripeCompactMinFiles; @@ -78,6 +85,7 @@ public class StripeStoreConfig { private final long splitPartSize; // derived from sizeToSplitAt and splitPartCount private static final double EPSILON = 0.001; // good enough for this, not a real epsilon. + public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { this.level0CompactMinFiles = config.getInt(MIN_FILES_L0_KEY, 4); this.flushIntoL0 = config.getBoolean(FLUSH_TO_L0_KEY, false); @@ -85,7 +93,7 @@ public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { int minFiles = config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, -1); this.stripeCompactMinFiles = config.getInt(MIN_FILES_KEY, Math.max(minMinFiles, minFiles)); this.stripeCompactMaxFiles = config.getInt(MAX_FILES_KEY, - config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 10)); + config.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 10)); this.maxRegionSplitImbalance = getFloat(config, MAX_REGION_SPLIT_IMBALANCE_KEY, 1.5f, true); float splitPartCount = getFloat(config, SPLIT_PARTS_KEY, 2f, true); @@ -100,7 +108,7 @@ public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { if (flushSize == 0) { flushSize = 128 * 1024 * 1024; } - long defaultSplitSize = (long)(flushSize * getLevel0MinFiles() * 4 * splitPartCount); + long defaultSplitSize = (long) (flushSize * getLevel0MinFiles() * 4 * splitPartCount); this.sizeToSplitAt = config.getLong(SIZE_TO_SPLIT_KEY, defaultSplitSize); int initialCount = config.getInt(INITIAL_STRIPE_COUNT_KEY, 1); if (initialCount == 0) { @@ -108,15 +116,15 @@ public StripeStoreConfig(Configuration config, StoreConfigInformation sci) { initialCount = 1; } this.initialCount = initialCount; - this.splitPartSize = (long)(this.sizeToSplitAt / this.splitPartCount); + this.splitPartSize = (long) (this.sizeToSplitAt / this.splitPartCount); } - private static float getFloat( - Configuration config, String key, float defaultValue, boolean moreThanOne) { + private static float getFloat(Configuration config, String key, float defaultValue, + boolean moreThanOne) { float value = config.getFloat(key, defaultValue); if (value < EPSILON) { - LOG.warn(String.format( - "%s is set to 0 or negative; using default value of %f", key, defaultValue)); + LOG.warn( + String.format("%s is set to 0 or negative; using default value of %f", key, defaultValue)); value = defaultValue; } else if ((value > 1f) != moreThanOne) { value = 1f / value; @@ -157,8 +165,8 @@ public float getSplitCount() { } /** - * @return the desired size of the target stripe when splitting, in bytes. - * Derived from {@link #getSplitSize()} and {@link #getSplitCount()}. + * @return the desired size of the target stripe when splitting, in bytes. Derived from + * {@link #getSplitSize()} and {@link #getSplitCount()}. */ public long getSplitPartSize() { return splitPartSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java index bfb3f649ff27..a4464766a755 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,8 +40,8 @@ * The storage engine that implements the stripe-based store/compaction scheme. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class StripeStoreEngine extends StoreEngine { +public class StripeStoreEngine extends + StoreEngine { private static final Logger LOG = LoggerFactory.getLogger(StripeStoreEngine.class); private StripeStoreConfig config; @@ -56,13 +56,13 @@ public CompactionContext createCompaction() { } @Override - protected void createComponents( - Configuration conf, HStore store, CellComparator comparator) throws IOException { + protected void createComponents(Configuration conf, HStore store, CellComparator comparator) + throws IOException { this.config = new StripeStoreConfig(conf, store); this.compactionPolicy = new StripeCompactionPolicy(conf, store, config); this.storeFileManager = new StripeStoreFileManager(comparator, conf, this.config); - this.storeFlusher = new StripeStoreFlusher( - conf, store, this.compactionPolicy, this.storeFileManager); + this.storeFlusher = + new StripeStoreFlusher(conf, store, this.compactionPolicy, this.storeFileManager); this.compactor = new StripeCompactor(conf, store); } @@ -80,10 +80,10 @@ public List preSelect(List filesCompacting) { @Override public boolean select(List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, boolean forceMajor) throws IOException { - this.stripeRequest = compactionPolicy.selectCompaction( - storeFileManager, filesCompacting, mayUseOffPeak); - this.request = (this.stripeRequest == null) - ? new CompactionRequestImpl(new ArrayList<>()) : this.stripeRequest.getRequest(); + this.stripeRequest = + compactionPolicy.selectCompaction(storeFileManager, filesCompacting, mayUseOffPeak); + this.request = (this.stripeRequest == null) ? new CompactionRequestImpl(new ArrayList<>()) + : this.stripeRequest.getRequest(); return this.stripeRequest != null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java index 1c3ac683dcd8..68458bab2547 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,6 @@ import java.util.Map; import java.util.Optional; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; @@ -44,29 +42,27 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollection; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** - * Stripe implementation of {@link StoreFileManager}. - * Not thread safe - relies on external locking (in HStore). Collections that this class - * returns are immutable or unique to the call, so they should be safe. - * Stripe store splits the key space of the region into non-overlapping stripes, as well as - * some recent files that have all the keys (level 0). Each stripe contains a set of files. - * When L0 is compacted, it's split into the files corresponding to existing stripe boundaries, - * that can thus be added to stripes. - * When scan or get happens, it only has to read the files from the corresponding stripes. - * See {@link StripeCompactionPolicy} on how the stripes are determined; this class doesn't care. - * - * This class should work together with {@link StripeCompactionPolicy} and - * {@link org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor}. - * With regard to how they work, we make at least the following (reasonable) assumptions: - * - Compaction produces one file per new stripe (if any); that is easy to change. - * - Compaction has one contiguous set of stripes both in and out, except if L0 is involved. + * Stripe implementation of {@link StoreFileManager}. Not thread safe - relies on external locking + * (in HStore). Collections that this class returns are immutable or unique to the call, so they + * should be safe. Stripe store splits the key space of the region into non-overlapping stripes, as + * well as some recent files that have all the keys (level 0). Each stripe contains a set of files. + * When L0 is compacted, it's split into the files corresponding to existing stripe boundaries, that + * can thus be added to stripes. When scan or get happens, it only has to read the files from the + * corresponding stripes. See {@link StripeCompactionPolicy} on how the stripes are determined; this + * class doesn't care. This class should work together with {@link StripeCompactionPolicy} and + * {@link org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor}. With regard to how they + * work, we make at least the following (reasonable) assumptions: - Compaction produces one file per + * new stripe (if any); that is easy to change. - Compaction has one contiguous set of stripes both + * in and out, except if L0 is involved. */ @InterfaceAudience.Private public class StripeStoreFileManager - implements StoreFileManager, StripeCompactionPolicy.StripeInformationProvider { + implements StoreFileManager, StripeCompactionPolicy.StripeInformationProvider { private static final Logger LOG = LoggerFactory.getLogger(StripeStoreFileManager.class); /** @@ -84,8 +80,8 @@ public class StripeStoreFileManager final static byte[] INVALID_KEY = null; /** - * The state class. Used solely to replace results atomically during - * compactions and avoid complicated error handling. + * The state class. Used solely to replace results atomically during compactions and avoid + * complicated error handling. */ private static class State { /** @@ -96,9 +92,9 @@ private static class State { public byte[][] stripeEndRows = new byte[0][]; /** - * Files by stripe. Each element of the list corresponds to stripeEndRow element with the - * same index, except the last one. Inside each list, the files are in reverse order by - * seqNum. Note that the length of this is one higher than that of stripeEndKeys. + * Files by stripe. Each element of the list corresponds to stripeEndRow element with the same + * index, except the last one. Inside each list, the files are in reverse order by seqNum. Note + * that the length of this is one higher than that of stripeEndKeys. */ public ArrayList> stripeFiles = new ArrayList<>(); /** Level 0. The files are in reverse order by seqNum. */ @@ -108,14 +104,17 @@ private static class State { public ImmutableList allFilesCached = ImmutableList.of(); private ImmutableList allCompactedFilesCached = ImmutableList.of(); } + private State state = null; /** Cached file metadata (or overrides as the case may be) */ private HashMap fileStarts = new HashMap<>(); private HashMap fileEnds = new HashMap<>(); - /** Normally invalid key is null, but in the map null is the result for "no key"; so use - * the following constant value in these maps instead. Note that this is a constant and - * we use it to compare by reference when we read from the map. */ + /** + * Normally invalid key is null, but in the map null is the result for "no key"; so use the + * following constant value in these maps instead. Note that this is a constant and we use it to + * compare by reference when we read from the map. + */ private static final byte[] INVALID_KEY_IN_MAP = new byte[0]; private final CellComparator cellComparator; @@ -123,12 +122,12 @@ private static class State { private final int blockingFileCount; - public StripeStoreFileManager( - CellComparator kvComparator, Configuration conf, StripeStoreConfig config) { + public StripeStoreFileManager(CellComparator kvComparator, Configuration conf, + StripeStoreConfig config) { this.cellComparator = kvComparator; this.config = config; - this.blockingFileCount = conf.getInt( - HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT); + this.blockingFileCount = + conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT); } @Override @@ -179,8 +178,10 @@ public int getStorefileCount() { return state.allFilesCached.size(); } - /** See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} - * for details on this methods. */ + /** + * See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} for details on this + * methods. + */ @Override public Iterator getCandidateFilesForRowKeyBefore(final KeyValue targetKey) { KeyBeforeConcatenatedLists result = new KeyBeforeConcatenatedLists(); @@ -195,14 +196,16 @@ public Iterator getCandidateFilesForRowKeyBefore(final KeyValue targ return result.iterator(); } - /** See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and - * {@link StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, Cell)} - * for details on this methods. */ + /** + * See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and + * {@link StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, Cell)} for + * details on this methods. + */ @Override public Iterator updateCandidateFilesForRowKeyBefore( Iterator candidateFiles, final KeyValue targetKey, final Cell candidate) { KeyBeforeConcatenatedLists.Iterator original = - (KeyBeforeConcatenatedLists.Iterator)candidateFiles; + (KeyBeforeConcatenatedLists.Iterator) candidateFiles; assert original != null; ArrayList> components = original.getComponents(); for (int firstIrrelevant = 0; firstIrrelevant < components.size(); ++firstIrrelevant) { @@ -211,8 +214,7 @@ public Iterator updateCandidateFilesForRowKeyBefore( // Entries are ordered as such: L0, then stripes in reverse order. We never remove // level 0; we remove the stripe, and all subsequent ones, as soon as we find the // first one that cannot possibly have better candidates. - if (!isInvalid(endKey) && !isOpen(endKey) - && (nonOpenRowCompare(targetKey, endKey) >= 0)) { + if (!isInvalid(endKey) && !isOpen(endKey) && (nonOpenRowCompare(targetKey, endKey) >= 0)) { original.removeComponents(firstIrrelevant); break; } @@ -221,10 +223,9 @@ public Iterator updateCandidateFilesForRowKeyBefore( } /** - * Override of getSplitPoint that determines the split point as the boundary between two - * stripes, unless it causes significant imbalance between split sides' sizes. In that - * case, the split boundary will be chosen from the middle of one of the stripes to - * minimize imbalance. + * Override of getSplitPoint that determines the split point as the boundary between two stripes, + * unless it causes significant imbalance between split sides' sizes. In that case, the split + * boundary will be chosen from the middle of one of the stripes to minimize imbalance. * @return The split point, or null if no split is possible. */ @Override @@ -250,13 +251,14 @@ public Optional getSplitPoint() throws IOException { } } if (leftSize == 0 || rightSize == 0) { - String errMsg = String.format("Cannot split on a boundary - left index %d size %d, " - + "right index %d size %d", leftIndex, leftSize, rightIndex, rightSize); + String errMsg = String.format( + "Cannot split on a boundary - left index %d size %d, " + "right index %d size %d", + leftIndex, leftSize, rightIndex, rightSize); debugDumpState(errMsg); LOG.warn(errMsg); return getSplitPointFromAllFiles(); } - double ratio = (double)rightSize / leftSize; + double ratio = (double) rightSize / leftSize; if (ratio < 1) { ratio = 1 / ratio; } @@ -269,8 +271,7 @@ public Optional getSplitPoint() throws IOException { // moment, and it's not extremely important. // See if we can achieve better ratio if we split the bigger side in half. boolean isRightLarger = rightSize >= leftSize; - double newRatio = isRightLarger - ? getMidStripeSplitRatio(leftSize, rightSize, lastRightSize) + double newRatio = isRightLarger ? getMidStripeSplitRatio(leftSize, rightSize, lastRightSize) : getMidStripeSplitRatio(rightSize, leftSize, lastLeftSize); if (newRatio < 1) { newRatio = 1 / newRatio; @@ -278,8 +279,8 @@ public Optional getSplitPoint() throws IOException { if (newRatio >= ratio) { return Optional.of(state.stripeEndRows[leftIndex]); } - LOG.debug("Splitting the stripe - ratio w/o split " + ratio + ", ratio with split " - + newRatio + " configured ratio " + config.getMaxSplitImbalance()); + LOG.debug("Splitting the stripe - ratio w/o split " + ratio + ", ratio with split " + newRatio + + " configured ratio " + config.getMaxSplitImbalance()); // OK, we may get better ratio, get it. return StoreUtils.getSplitPoint(state.stripeFiles.get(isRightLarger ? rightIndex : leftIndex), cellComparator); @@ -293,7 +294,7 @@ private Optional getSplitPointFromAllFiles() throws IOException { } private double getMidStripeSplitRatio(long smallerSize, long largerSize, long lastLargerSize) { - return (double)(largerSize - lastLargerSize / 2f) / (smallerSize + lastLargerSize / 2f); + return (double) (largerSize - lastLargerSize / 2f) / (smallerSize + lastLargerSize / 2f); } @Override @@ -321,10 +322,10 @@ public Collection getFilesForScan(byte[] startRow, boolean includeSt @Override public void addCompactionResults(Collection compactedFiles, - Collection results) { + Collection results) { // See class comment for the assumptions we make here. - LOG.debug("Attempting to merge compaction results: " + compactedFiles.size() + - " files replaced by " + results.size()); + LOG.debug("Attempting to merge compaction results: " + compactedFiles.size() + + " files replaced by " + results.size()); // In order to be able to fail in the middle of the operation, we'll operate on lazy // copies and apply the result at the end. CompactionOrFlushMergeCopy cmc = new CompactionOrFlushMergeCopy(false); @@ -367,7 +368,7 @@ public int getStoreCompactionPriority() { // many files we have, so do an approximate mapping to normal priority range; L0 counts // for all stripes. int l0 = state.level0Files.size(), sc = state.stripeFiles.size(); - int priority = (int)Math.ceil(((double)(this.blockingFileCount - fc + l0) / sc) - l0); + int priority = (int) Math.ceil(((double) (this.blockingFileCount - fc + l0) / sc) - l0); return (priority <= HStore.PRIORITY_USER) ? (HStore.PRIORITY_USER + 1) : priority; } @@ -385,10 +386,9 @@ private long getStripeFilesSize(int stripeIndex) { } /** - * Loads initial store files that were picked up from some physical location pertaining to - * this store (presumably). Unlike adding files after compaction, assumes empty initial - * sets, and is forgiving with regard to stripe constraints - at worst, many/all files will - * go to level 0. + * Loads initial store files that were picked up from some physical location pertaining to this + * store (presumably). Unlike adding files after compaction, assumes empty initial sets, and is + * forgiving with regard to stripe constraints - at worst, many/all files will go to level 0. * @param storeFiles Store files to add. */ private void loadUnclassifiedStoreFiles(List storeFiles) { @@ -403,10 +403,9 @@ private void loadUnclassifiedStoreFiles(List storeFiles) { if (isInvalid(startRow) || isInvalid(endRow)) { insertFileIntoStripe(level0Files, sf); // No metadata - goes to L0. ensureLevel0Metadata(sf); - } else if (!isOpen(startRow) && !isOpen(endRow) && - nonOpenRowCompare(startRow, endRow) >= 0) { + } else if (!isOpen(startRow) && !isOpen(endRow) && nonOpenRowCompare(startRow, endRow) >= 0) { LOG.error("Unexpected metadata - start row [" + Bytes.toString(startRow) + "], end row [" - + Bytes.toString(endRow) + "] in file [" + sf.getPath() + "], pushing to L0"); + + Bytes.toString(endRow) + "] in file [" + sf.getPath() + "], pushing to L0"); insertFileIntoStripe(level0Files, sf); // Bad metadata - goes to L0 also. ensureLevel0Metadata(sf); } else { @@ -517,23 +516,16 @@ private void debugDumpState(String string) { if (!LOG.isDebugEnabled()) return; StringBuilder sb = new StringBuilder(); sb.append("\n" + string + "; current stripe state is as such:"); - sb.append("\n level 0 with ") - .append(state.level0Files.size()) - .append( - " files: " - + TraditionalBinaryPrefix.long2String( - StripeCompactionPolicy.getTotalFileSize(state.level0Files), "", 1) + ";"); + sb.append("\n level 0 with ").append(state.level0Files.size()) + .append(" files: " + TraditionalBinaryPrefix + .long2String(StripeCompactionPolicy.getTotalFileSize(state.level0Files), "", 1) + ";"); for (int i = 0; i < state.stripeFiles.size(); ++i) { - String endRow = (i == state.stripeEndRows.length) - ? "(end)" : "[" + Bytes.toString(state.stripeEndRows[i]) + "]"; - sb.append("\n stripe ending in ") - .append(endRow) - .append(" with ") + String endRow = (i == state.stripeEndRows.length) ? "(end)" + : "[" + Bytes.toString(state.stripeEndRows[i]) + "]"; + sb.append("\n stripe ending in ").append(endRow).append(" with ") .append(state.stripeFiles.get(i).size()) - .append( - " files: " - + TraditionalBinaryPrefix.long2String( - StripeCompactionPolicy.getTotalFileSize(state.stripeFiles.get(i)), "", 1) + ";"); + .append(" files: " + TraditionalBinaryPrefix.long2String( + StripeCompactionPolicy.getTotalFileSize(state.stripeFiles.get(i)), "", 1) + ";"); } sb.append("\n").append(state.stripeFiles.size()).append(" stripes total."); sb.append("\n").append(getStorefileCount()).append(" files total."); @@ -606,25 +598,23 @@ private final int findStripeForRow(byte[] row, boolean isStart) { @Override public final byte[] getStartRow(int stripeIndex) { - return (stripeIndex == 0 ? OPEN_KEY : state.stripeEndRows[stripeIndex - 1]); + return (stripeIndex == 0 ? OPEN_KEY : state.stripeEndRows[stripeIndex - 1]); } @Override public final byte[] getEndRow(int stripeIndex) { - return (stripeIndex == state.stripeEndRows.length - ? OPEN_KEY : state.stripeEndRows[stripeIndex]); + return (stripeIndex == state.stripeEndRows.length ? OPEN_KEY + : state.stripeEndRows[stripeIndex]); } - private byte[] startOf(HStoreFile sf) { byte[] result = fileStarts.get(sf); // result and INVALID_KEY_IN_MAP are compared _only_ by reference on purpose here as the latter // serves only as a marker and is not to be confused with other empty byte arrays. // See Javadoc of INVALID_KEY_IN_MAP for more information - return (result == null) - ? sf.getMetadataValue(STRIPE_START_KEY) - : result == INVALID_KEY_IN_MAP ? INVALID_KEY : result; + return (result == null) ? sf.getMetadataValue(STRIPE_START_KEY) + : result == INVALID_KEY_IN_MAP ? INVALID_KEY : result; } private byte[] endOf(HStoreFile sf) { @@ -633,9 +623,8 @@ private byte[] endOf(HStoreFile sf) { // result and INVALID_KEY_IN_MAP are compared _only_ by reference on purpose here as the latter // serves only as a marker and is not to be confused with other empty byte arrays. // See Javadoc of INVALID_KEY_IN_MAP for more information - return (result == null) - ? sf.getMetadataValue(STRIPE_END_KEY) - : result == INVALID_KEY_IN_MAP ? INVALID_KEY : result; + return (result == null) ? sf.getMetadataValue(STRIPE_END_KEY) + : result == INVALID_KEY_IN_MAP ? INVALID_KEY : result; } /** @@ -646,7 +635,7 @@ private byte[] endOf(HStoreFile sf) { private static void insertFileIntoStripe(ArrayList stripe, HStoreFile sf) { // The only operation for which sorting of the files matters is KeyBefore. Therefore, // we will store the file in reverse order by seqNum from the outset. - for (int insertBefore = 0; ; ++insertBefore) { + for (int insertBefore = 0;; ++insertBefore) { if (insertBefore == stripe.size() || (StoreFileComparators.SEQ_ID.compare(sf, stripe.get(insertBefore)) >= 0)) { stripe.add(insertBefore, sf); @@ -656,13 +645,12 @@ private static void insertFileIntoStripe(ArrayList stripe, HStoreFil } /** - * An extension of ConcatenatedLists that has several peculiar properties. - * First, one can cut the tail of the logical list by removing last several sub-lists. - * Second, items can be removed thru iterator. - * Third, if the sub-lists are immutable, they are replaced with mutable copies when needed. - * On average KeyBefore operation will contain half the stripes as potential candidates, - * but will quickly cut down on them as it finds something in the more likely ones; thus, - * the above allow us to avoid unnecessary copying of a bunch of lists. + * An extension of ConcatenatedLists that has several peculiar properties. First, one can cut the + * tail of the logical list by removing last several sub-lists. Second, items can be removed thru + * iterator. Third, if the sub-lists are immutable, they are replaced with mutable copies when + * needed. On average KeyBefore operation will contain half the stripes as potential candidates, + * but will quickly cut down on them as it finds something in the more likely ones; thus, the + * above allow us to avoid unnecessary copying of a bunch of lists. */ private static class KeyBeforeConcatenatedLists extends ConcatenatedLists { @Override @@ -706,9 +694,9 @@ public void remove() { } /** - * Non-static helper class for merging compaction or flush results. - * Since we want to merge them atomically (more or less), it operates on lazy copies, - * then creates a new state object and puts it in place. + * Non-static helper class for merging compaction or flush results. Since we want to merge them + * atomically (more or less), it operates on lazy copies, then creates a new state object and puts + * it in place. */ private class CompactionOrFlushMergeCopy { private ArrayList> stripeFiles = null; @@ -728,7 +716,7 @@ public CompactionOrFlushMergeCopy(boolean isFlush) { } private void mergeResults(Collection compactedFiles, - Collection results) { + Collection results) { assert this.compactedFiles == null && this.results == null; this.compactedFiles = compactedFiles; this.results = results; @@ -765,8 +753,9 @@ private State createNewState(boolean delCompactedFiles) { : this.stripeEndRows.toArray(new byte[this.stripeEndRows.size()][]); newState.stripeFiles = new ArrayList<>(this.stripeFiles.size()); for (List newStripe : this.stripeFiles) { - newState.stripeFiles.add(newStripe instanceof ImmutableList - ? (ImmutableList)newStripe : ImmutableList.copyOf(newStripe)); + newState.stripeFiles + .add(newStripe instanceof ImmutableList ? (ImmutableList) newStripe + : ImmutableList.copyOf(newStripe)); } List newAllFiles = new ArrayList<>(oldState.allFilesCached); @@ -813,7 +802,7 @@ private final ArrayList getStripeCopy(int index) { result = new ArrayList<>(stripeCopy); this.stripeFiles.set(index, result); } else { - result = (ArrayList)stripeCopy; + result = (ArrayList) stripeCopy; } return result; } @@ -861,8 +850,9 @@ private TreeMap processResults() { HStoreFile oldSf = newStripes.put(endRow, sf); if (oldSf != null) { throw new IllegalStateException( - "Compactor has produced multiple files for the stripe ending in [" + - Bytes.toString(endRow) + "], found " + sf.getPath() + " and " + oldSf.getPath()); + "Compactor has produced multiple files for the stripe ending in [" + + Bytes.toString(endRow) + "], found " + sf.getPath() + " and " + + oldSf.getPath()); } } return newStripes; @@ -881,8 +871,8 @@ private void removeCompactedFiles() { int stripeIndex = findStripeIndexByEndRow(oldEndRow); if (stripeIndex < 0) { throw new IllegalStateException( - "An allegedly compacted file [" + oldFile + "] does not belong" + - " to a known stripe (end row - [" + Bytes.toString(oldEndRow) + "])"); + "An allegedly compacted file [" + oldFile + "] does not belong" + + " to a known stripe (end row - [" + Bytes.toString(oldEndRow) + "])"); } source = getStripeCopy(stripeIndex); } @@ -893,14 +883,15 @@ private void removeCompactedFiles() { } /** - * See {@link #addCompactionResults(Collection, Collection)} - updates the stripe list with - * new candidate stripes/removes old stripes; produces new set of stripe end rows. - * @param newStripes New stripes - files by end row. + * See {@link #addCompactionResults(Collection, Collection)} - updates the stripe list with new + * candidate stripes/removes old stripes; produces new set of stripe end rows. + * @param newStripes New stripes - files by end row. */ private void processNewCandidateStripes(TreeMap newStripes) { // Validate that the removed and added aggregate ranges still make for a full key space. boolean hasStripes = !this.stripeFiles.isEmpty(); - this.stripeEndRows = new ArrayList<>(Arrays.asList(StripeStoreFileManager.this.state.stripeEndRows)); + this.stripeEndRows = + new ArrayList<>(Arrays.asList(StripeStoreFileManager.this.state.stripeEndRows)); int removeFrom = 0; byte[] firstStartRow = startOf(newStripes.firstEntry().getValue()); byte[] lastEndRow = newStripes.lastKey(); @@ -980,8 +971,8 @@ private void processNewCandidateStripes(TreeMap newStripes) assert !isOpen(previousEndRow); byte[] startRow = startOf(newStripe.getValue()); if (!rowEquals(previousEndRow, startRow)) { - throw new IllegalStateException("The new stripes produced by " + - (isFlush ? "flush" : "compaction") + " are not contiguous"); + throw new IllegalStateException("The new stripes produced by " + + (isFlush ? "flush" : "compaction") + " are not contiguous"); } } // Add the new stripe. @@ -1073,9 +1064,8 @@ public double getCompactionPressure() { double max = 0.0; for (ImmutableList stripeFile : stateLocal.stripeFiles) { int stripeFileCount = stripeFile.size(); - double normCount = - (double) (stripeFileCount + delta - config.getStripeCompactMinFiles()) - / (blockingFilePerStripe - config.getStripeCompactMinFiles()); + double normCount = (double) (stripeFileCount + delta - config.getStripeCompactMinFiles()) + / (blockingFilePerStripe - config.getStripeCompactMinFiles()); if (normCount >= 1.0) { // This could happen if stripe is not split evenly. Do not return values that larger than // 1.0 because we have not reached the blocking file count actually. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index f8183b7645a5..29bc867a9de3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparator; @@ -35,8 +33,8 @@ import org.slf4j.LoggerFactory; /** - * Stripe implementation of StoreFlusher. Flushes files either into L0 file w/o metadata, or - * into separate striped files, avoiding L0. + * Stripe implementation of StoreFlusher. Flushes files either into L0 file w/o metadata, or into + * separate striped files, avoiding L0. */ @InterfaceAudience.Private public class StripeStoreFlusher extends StoreFlusher { @@ -45,8 +43,8 @@ public class StripeStoreFlusher extends StoreFlusher { private final StripeCompactionPolicy policy; private final StripeCompactionPolicy.StripeInformationProvider stripes; - public StripeStoreFlusher(Configuration conf, HStore store, - StripeCompactionPolicy policy, StripeStoreFileManager stripes) { + public StripeStoreFlusher(Configuration conf, HStore store, StripeCompactionPolicy policy, + StripeStoreFileManager stripes) { super(conf, store); this.policy = policy; this.stripes = stripes; @@ -63,15 +61,15 @@ public List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushSeqNum InternalScanner scanner = createScanner(snapshot.getScanners(), tracker); // Let policy select flush method. - StripeFlushRequest req = this.policy.selectFlush(store.getComparator(), this.stripes, - cellsCount); + StripeFlushRequest req = + this.policy.selectFlush(store.getComparator(), this.stripes, cellsCount); boolean success = false; StripeMultiFileWriter mw = null; try { mw = req.createWriter(); // Writer according to the policy. StripeMultiFileWriter.WriterFactory factory = createWriterFactory(snapshot); - StoreScanner storeScanner = (scanner instanceof StoreScanner) ? (StoreScanner)scanner : null; + StoreScanner storeScanner = (scanner instanceof StoreScanner) ? (StoreScanner) scanner : null; mw.init(storeScanner, factory); synchronized (flushLock) { @@ -149,8 +147,8 @@ public static class SizeStripeFlushRequest extends StripeFlushRequest { /** * @param targetCount The maximum number of stripes to flush into. - * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than - * total number of kvs, all the overflow data goes into the last stripe. + * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than total + * number of kvs, all the overflow data goes into the last stripe. */ public SizeStripeFlushRequest(CellComparator comparator, int targetCount, long targetKvs) { super(comparator); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java index e8eaf452d01a..3ff3dfcab555 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ThreadSafeMemStoreSizing.java @@ -19,12 +19,11 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; - import org.apache.yetus.audience.InterfaceAudience; /** - * Accounting of current heap and data sizes. - * Thread-safe. Many threads can do updates against this single instance. + * Accounting of current heap and data sizes. Thread-safe. Many threads can do updates against this + * single instance. * @see NonThreadSafeMemStoreSizing * @see MemStoreSize */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index fdf9db273a69..c7df77ed3877 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,14 +35,13 @@ /** * Stores minimum and maximum timestamp values, it is [minimumTimestamp, maximumTimestamp] in - * interval notation. - * Use this class at write-time ONLY. Too much synchronization to use at read time - * Use {@link TimeRange} at read time instead of this. See toTimeRange() to make TimeRange to use. - * MemStores use this class to track minimum and maximum timestamps. The TimeRangeTracker made by - * the MemStore is passed to the StoreFile for it to write out as part a flush in the the file + * interval notation. Use this class at write-time ONLY. Too much synchronization to use at read + * time Use {@link TimeRange} at read time instead of this. See toTimeRange() to make TimeRange to + * use. MemStores use this class to track minimum and maximum timestamps. The TimeRangeTracker made + * by the MemStore is passed to the StoreFile for it to write out as part a flush in the the file * metadata. If no memstore involved -- i.e. a compaction -- then the StoreFile will calculate its - * own TimeRangeTracker as it appends. The StoreFile serialized TimeRangeTracker is used - * at read time via an instance of {@link TimeRange} to test if Cells fit the StoreFile TimeRange. + * own TimeRangeTracker as it appends. The StoreFile serialized TimeRangeTracker is used at read + * time via an instance of {@link TimeRange} to test if Cells fit the StoreFile TimeRange. */ @InterfaceAudience.Private public abstract class TimeRangeTracker { @@ -92,13 +90,17 @@ public static TimeRangeTracker create(Type type, long minimumTimestamp, long max } protected abstract void setMax(long ts); + protected abstract void setMin(long ts); + protected abstract boolean compareAndSetMin(long expect, long update); + protected abstract boolean compareAndSetMax(long expect, long update); + /** - * Update the current TimestampRange to include the timestamp from cell. - * If the Key is of type DeleteColumn or DeleteFamily, it includes the - * entire time range from 0 to timestamp of the key. + * Update the current TimestampRange to include the timestamp from cell. If the Key + * is of type DeleteColumn or DeleteFamily, it includes the entire time range from 0 to timestamp + * of the key. * @param cell the Cell to include */ public void includeTimestamp(final Cell cell) { @@ -112,8 +114,8 @@ public void includeTimestamp(final Cell cell) { * If required, update the current TimestampRange to include timestamp * @param timestamp the timestamp value to include */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MT_CORRECTNESS", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MT_CORRECTNESS", + justification = "Intentional") void includeTimestamp(final long timestamp) { long initialMinTimestamp = getMin(); if (timestamp < initialMinTimestamp) { @@ -128,14 +130,14 @@ void includeTimestamp(final long timestamp) { } // When it reaches here, there are two possibilities: - // 1). timestamp >= curMinTimestamp, someone already sets the minimumTimestamp. In this case, - // it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP to see - // if it needs to update minimumTimestamp. Someone may already set both - // minimumTimestamp/minimumTimestamp to the same value(curMinTimestamp), - // need to check if maximumTimestamp needs to be updated. - // 2). timestamp < curMinTimestamp, it sets the minimumTimestamp successfully. - // In this case,it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP - // to see if it needs to set maximumTimestamp. + // 1). timestamp >= curMinTimestamp, someone already sets the minimumTimestamp. In this case, + // it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP to see + // if it needs to update minimumTimestamp. Someone may already set both + // minimumTimestamp/minimumTimestamp to the same value(curMinTimestamp), + // need to check if maximumTimestamp needs to be updated. + // 2). timestamp < curMinTimestamp, it sets the minimumTimestamp successfully. + // In this case,it still needs to check if initialMinTimestamp == INITIAL_MIN_TIMESTAMP + // to see if it needs to set maximumTimestamp. if (initialMinTimestamp != INITIAL_MIN_TIMESTAMP) { // Someone already sets minimumTimestamp and timestamp is less than minimumTimestamp. // In this case, no need to set maximumTimestamp as it will be set to at least @@ -185,7 +187,7 @@ public String toString() { /** * @param data the serialization data. It can't be null! * @return An instance of NonSyncTimeRangeTracker filled w/ the content of serialized - * NonSyncTimeRangeTracker in timeRangeTrackerBytes. + * NonSyncTimeRangeTracker in timeRangeTrackerBytes. * @throws IOException */ public static TimeRangeTracker parseFrom(final byte[] data) throws IOException { @@ -207,11 +209,11 @@ public static TimeRangeTracker parseFrom(final byte[] data, Type type) throws IO } /** - * This method used to serialize TimeRangeTracker (TRT) by protobuf while this breaks the - * forward compatibility on HFile.(See HBASE-21008) In previous hbase version ( < 2.0.0 ) we use - * DataOutput to serialize TRT, these old versions don't have capability to deserialize TRT - * which is serialized by protobuf. So we need to revert the change of serializing - * TimeRangeTracker back to DataOutput. For more information, please check HBASE-21012. + * This method used to serialize TimeRangeTracker (TRT) by protobuf while this breaks the forward + * compatibility on HFile.(See HBASE-21008) In previous hbase version ( < 2.0.0 ) we use + * DataOutput to serialize TRT, these old versions don't have capability to deserialize TRT which + * is serialized by protobuf. So we need to revert the change of serializing TimeRangeTracker back + * to DataOutput. For more information, please check HBASE-21012. * @param tracker TimeRangeTracker needed to be serialized. * @return byte array filled with serialized TimeRangeTracker. * @throws IOException if something goes wrong in writeLong. @@ -242,7 +244,7 @@ TimeRange toTimeRange() { return TimeRange.between(min, max); } - //In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. + // In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. public static class NonSyncTimeRangeTracker extends TimeRangeTracker { private long minimumTimestamp = INITIAL_MIN_TIMESTAMP; private long maximumTimestamp = INITIAL_MAX_TIMESTAMP; @@ -299,7 +301,7 @@ public long getMax() { } } - //In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. + // In order to estimate the heap size, this inner class need to be accessible to TestHeapSize. public static class SyncTimeRangeTracker extends TimeRangeTracker { private final AtomicLong minimumTimestamp = new AtomicLong(INITIAL_MIN_TIMESTAMP); private final AtomicLong maximumTimestamp = new AtomicLong(INITIAL_MAX_TIMESTAMP); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java index d5be356f93f9..2d80fad37a1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,13 +22,10 @@ /** * A list of segment managers coupled with the version of the memstore (version at the time it was - * created). - * This structure helps to guarantee that the compaction pipeline updates after the compaction is - * updated in a consistent (atomic) way. - * Specifically, swapping some of the elements in a compaction pipeline with a new compacted - * element is permitted only if the pipeline version is the same as the version attached to the - * elements. - * + * created). This structure helps to guarantee that the compaction pipeline updates after the + * compaction is updated in a consistent (atomic) way. Specifically, swapping some of the elements + * in a compaction pipeline with a new compacted element is permitted only if the pipeline version + * is the same as the version attached to the elements. */ @InterfaceAudience.Private public class VersionedSegmentsList { @@ -70,9 +66,9 @@ public int getNumOfSegments() { for (ImmutableSegment s : storeSegments) { double segmentUniques = s.getNumUniqueKeys(); - if(segmentUniques != CellSet.UNKNOWN_NUM_UNIQUES) { + if (segmentUniques != CellSet.UNKNOWN_NUM_UNIQUES) { segmentCells = s.getCellsCount(); - if(segmentCells > maxCells) { + if (segmentCells > maxCells) { maxCells = segmentCells; est = segmentUniques / segmentCells; } @@ -80,7 +76,7 @@ public int getNumOfSegments() { // else ignore this segment specifically since if the unique number is unknown counting // cells can be expensive } - if(maxCells == 0) { + if (maxCells == 0) { return 1.0; } return est; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java index 19b7a98627e6..2f5d405beff3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -56,7 +55,7 @@ public StoreFileWriter createWriter() throws IOException { @Override public StoreFileWriter createWriterWithStoragePolicy(String fileStoragePolicy) - throws IOException { + throws IOException { return AbstractMultiOutputCompactor.this.createWriter(fd, shouldDropBehind, fileStoragePolicy, major); } @@ -79,7 +78,7 @@ protected void abortWriter() throws IOException { e); } } - //this step signals that the target file is no longer writen and can be cleaned up + // this step signals that the target file is no longer writen and can be cleaned up writer = null; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CloseChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CloseChecker.java index ea711c037729..cc26068190eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CloseChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CloseChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -42,7 +44,6 @@ public CloseChecker(Configuration conf, long currentTime) { /** * Check periodically to see if a system stop is requested every written bytes reach size limit. - * * @return if true, system stop. */ public boolean isSizeLimit(Store store, long bytesWritten) { @@ -61,7 +62,6 @@ public boolean isSizeLimit(Store store, long bytesWritten) { /** * Check periodically to see if a system stop is requested every time. - * * @return if true, system stop. */ public boolean isTimeLimit(Store store, long now) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java index 75966b9e7467..8ad508f199ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,31 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; /** *

      - * Compaction configuration for a particular instance of HStore. - * Takes into account both global settings and ones set on the column family/store. - * Control knobs for default compaction algorithm: + * Compaction configuration for a particular instance of HStore. Takes into account both global + * settings and ones set on the column family/store. Control knobs for default compaction algorithm: *

      *

      - * maxCompactSize - upper bound on file size to be included in minor compactions - * minCompactSize - lower bound below which compaction is selected without ratio test - * minFilesToCompact - lower bound on number of files in any minor compaction - * maxFilesToCompact - upper bound on number of files in any minor compaction - * compactionRatio - Ratio used for compaction - * minLocalityToForceCompact - Locality threshold for a store file to major compact (HBASE-11195) + * maxCompactSize - upper bound on file size to be included in minor compactions minCompactSize - + * lower bound below which compaction is selected without ratio test minFilesToCompact - lower bound + * on number of files in any minor compaction maxFilesToCompact - upper bound on number of files in + * any minor compaction compactionRatio - Ratio used for compaction minLocalityToForceCompact - + * Locality threshold for a store file to major compact (HBASE-11195) *

      * Set parameter as "hbase.hstore.compaction.<attribute>" */ @@ -52,15 +48,15 @@ public class CompactionConfiguration { public static final String HBASE_HSTORE_COMPACTION_RATIO_KEY = "hbase.hstore.compaction.ratio"; public static final String HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY = - "hbase.hstore.compaction.ratio.offpeak"; + "hbase.hstore.compaction.ratio.offpeak"; public static final String HBASE_HSTORE_COMPACTION_MIN_KEY_OLD = - "hbase.hstore.compactionThreshold"; + "hbase.hstore.compactionThreshold"; public static final String HBASE_HSTORE_COMPACTION_MIN_KEY = "hbase.hstore.compaction.min"; public static final String HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY = - "hbase.hstore.compaction.min.size"; + "hbase.hstore.compaction.min.size"; public static final String HBASE_HSTORE_COMPACTION_MAX_KEY = "hbase.hstore.compaction.max"; public static final String HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY = - "hbase.hstore.compaction.max.size"; + "hbase.hstore.compaction.max.size"; public static final String HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY = "hbase.hstore.compaction.max.size.offpeak"; public static final String HBASE_HSTORE_OFFPEAK_END_HOUR = "hbase.offpeak.end.hour"; @@ -75,36 +71,36 @@ public class CompactionConfiguration { * The epoch time length for the windows we no longer compact */ public static final String DATE_TIERED_MAX_AGE_MILLIS_KEY = - "hbase.hstore.compaction.date.tiered.max.storefile.age.millis"; + "hbase.hstore.compaction.date.tiered.max.storefile.age.millis"; public static final String DATE_TIERED_INCOMING_WINDOW_MIN_KEY = - "hbase.hstore.compaction.date.tiered.incoming.window.min"; + "hbase.hstore.compaction.date.tiered.incoming.window.min"; public static final String COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS_KEY = - "hbase.hstore.compaction.date.tiered.window.policy.class"; + "hbase.hstore.compaction.date.tiered.window.policy.class"; public static final String DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY = - "hbase.hstore.compaction.date.tiered.single.output.for.minor.compaction"; + "hbase.hstore.compaction.date.tiered.single.output.for.minor.compaction"; - private static final Class - DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS = ExploringCompactionPolicy.class; + private static final Class DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS = + ExploringCompactionPolicy.class; public static final String DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY = - "hbase.hstore.compaction.date.tiered.window.factory.class"; + "hbase.hstore.compaction.date.tiered.window.factory.class"; - private static final Class - DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS = ExponentialCompactionWindowFactory.class; + private static final Class DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS = + ExponentialCompactionWindowFactory.class; public static final String DATE_TIERED_STORAGE_POLICY_ENABLE_KEY = - "hbase.hstore.compaction.date.tiered.storage.policy.enable"; + "hbase.hstore.compaction.date.tiered.storage.policy.enable"; public static final String DATE_TIERED_HOT_WINDOW_AGE_MILLIS_KEY = - "hbase.hstore.compaction.date.tiered.hot.window.age.millis"; + "hbase.hstore.compaction.date.tiered.hot.window.age.millis"; public static final String DATE_TIERED_HOT_WINDOW_STORAGE_POLICY_KEY = - "hbase.hstore.compaction.date.tiered.hot.window.storage.policy"; + "hbase.hstore.compaction.date.tiered.hot.window.storage.policy"; public static final String DATE_TIERED_WARM_WINDOW_AGE_MILLIS_KEY = - "hbase.hstore.compaction.date.tiered.warm.window.age.millis"; + "hbase.hstore.compaction.date.tiered.warm.window.age.millis"; public static final String DATE_TIERED_WARM_WINDOW_STORAGE_POLICY_KEY = - "hbase.hstore.compaction.date.tiered.warm.window.storage.policy"; + "hbase.hstore.compaction.date.tiered.warm.window.storage.policy"; /** Windows older than warm age belong to COLD_WINDOW **/ public static final String DATE_TIERED_COLD_WINDOW_STORAGE_POLICY_KEY = - "hbase.hstore.compaction.date.tiered.cold.window.storage.policy"; + "hbase.hstore.compaction.date.tiered.cold.window.storage.policy"; Configuration conf; StoreConfigInformation storeConfigInfo; @@ -139,34 +135,34 @@ public class CompactionConfiguration { this.storeConfigInfo = storeConfigInfo; maxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, Long.MAX_VALUE); - offPeakMaxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, - maxCompactSize); - minCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, - storeConfigInfo.getMemStoreFlushSize()); + offPeakMaxCompactSize = + conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, maxCompactSize); + minCompactSize = + conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, storeConfigInfo.getMemStoreFlushSize()); minFilesToCompact = Math.max(2, conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY, - conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY_OLD, 3))); + conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY_OLD, 3))); maxFilesToCompact = conf.getInt(HBASE_HSTORE_COMPACTION_MAX_KEY, 10); compactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F); offPeakCompactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY, 5.0F); throttlePoint = conf.getLong("hbase.regionserver.thread.compaction.throttle", - 2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize()); + 2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize()); majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, - HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD); + HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD); majorCompactionJitter = conf.getFloat(HConstants.MAJOR_COMPACTION_JITTER, - HConstants.DEFAULT_MAJOR_COMPACTION_JITTER); + HConstants.DEFAULT_MAJOR_COMPACTION_JITTER); minLocalityToForceCompact = conf.getFloat(HBASE_HSTORE_MIN_LOCALITY_TO_SKIP_MAJOR_COMPACT, 0f); dateTieredMaxStoreFileAgeMillis = conf.getLong(DATE_TIERED_MAX_AGE_MILLIS_KEY, Long.MAX_VALUE); dateTieredIncomingWindowMin = conf.getInt(DATE_TIERED_INCOMING_WINDOW_MIN_KEY, 6); - compactionPolicyForDateTieredWindow = conf.get( - COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS_KEY, - DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS.getName()); - dateTieredSingleOutputForMinorCompaction = conf - .getBoolean(DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, true); - this.dateTieredCompactionWindowFactory = conf.get( - DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY, - DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS.getName()); + compactionPolicyForDateTieredWindow = + conf.get(COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS_KEY, + DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS.getName()); + dateTieredSingleOutputForMinorCompaction = + conf.getBoolean(DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, true); + this.dateTieredCompactionWindowFactory = + conf.get(DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY, + DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS.getName()); // for Heterogeneous Storage dateTieredStoragePolicyEnable = conf.getBoolean(DATE_TIERED_STORAGE_POLICY_ENABLE_KEY, false); hotWindowAgeMillis = conf.getLong(DATE_TIERED_HOT_WINDOW_AGE_MILLIS_KEY, 86400000L); @@ -181,32 +177,20 @@ public class CompactionConfiguration { public String toString() { return String.format( "size [minCompactSize:%s, maxCompactSize:%s, offPeakMaxCompactSize:%s);" - + " files [minFilesToCompact:%d, maxFilesToCompact:%d);" - + " ratio %f; off-peak ratio %f; throttle point %d;" - + " major period %d, major jitter %f, min locality to compact %f;" - + " tiered compaction: max_age %d, incoming window min %d," - + " compaction policy for tiered window %s, single output for minor %b," - + " compaction window factory %s," - + " region %s columnFamilyName %s", - StringUtils.byteDesc(minCompactSize), - StringUtils.byteDesc(maxCompactSize), - StringUtils.byteDesc(offPeakMaxCompactSize), - minFilesToCompact, - maxFilesToCompact, - compactionRatio, - offPeakCompactionRatio, - throttlePoint, - majorCompactionPeriod, - majorCompactionJitter, - minLocalityToForceCompact, - dateTieredMaxStoreFileAgeMillis, - dateTieredIncomingWindowMin, - compactionPolicyForDateTieredWindow, - dateTieredSingleOutputForMinorCompaction, - dateTieredCompactionWindowFactory, + + " files [minFilesToCompact:%d, maxFilesToCompact:%d);" + + " ratio %f; off-peak ratio %f; throttle point %d;" + + " major period %d, major jitter %f, min locality to compact %f;" + + " tiered compaction: max_age %d, incoming window min %d," + + " compaction policy for tiered window %s, single output for minor %b," + + " compaction window factory %s," + " region %s columnFamilyName %s", + StringUtils.byteDesc(minCompactSize), StringUtils.byteDesc(maxCompactSize), + StringUtils.byteDesc(offPeakMaxCompactSize), minFilesToCompact, maxFilesToCompact, + compactionRatio, offPeakCompactionRatio, throttlePoint, majorCompactionPeriod, + majorCompactionJitter, minLocalityToForceCompact, dateTieredMaxStoreFileAgeMillis, + dateTieredIncomingWindowMin, compactionPolicyForDateTieredWindow, + dateTieredSingleOutputForMinorCompaction, dateTieredCompactionWindowFactory, RegionInfo.prettyPrint(storeConfigInfo.getRegionInfo().getEncodedName()), - storeConfigInfo.getColumnFamilyName() - ); + storeConfigInfo.getColumnFamilyName()); } /** @@ -267,16 +251,16 @@ public long getThrottlePoint() { } /** - * @return Major compaction period from compaction. - * Major compactions are selected periodically according to this parameter plus jitter + * @return Major compaction period from compaction. Major compactions are selected periodically + * according to this parameter plus jitter */ public long getMajorCompactionPeriod() { return majorCompactionPeriod; } /** - * @return Major the jitter fraction, the fraction within which the major compaction - * period is randomly chosen from the majorCompactionPeriod in each store. + * @return Major the jitter fraction, the fraction within which the major compaction period is + * randomly chosen from the majorCompactionPeriod in each store. */ public float getMajorCompactionJitter() { return majorCompactionJitter; @@ -284,8 +268,8 @@ public float getMajorCompactionJitter() { /** * @return Block locality ratio, the ratio at which we will include old regions with a single - * store file for major compaction. Used to improve block locality for regions that - * haven't had writes in a while but are still being read. + * store file for major compaction. Used to improve block locality for regions that + * haven't had writes in a while but are still being read. */ public float getMinLocalityToForceCompact() { return minLocalityToForceCompact; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java index 9aa383c4e66f..672a68c83a57 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,27 +19,24 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; - /** - * This class holds all "physical" details necessary to run a compaction, - * and abstracts away the details specific to a particular compaction. - * It also has compaction request with all the logical details. - * Hence, this class is basically the compaction. + * This class holds all "physical" details necessary to run a compaction, and abstracts away the + * details specific to a particular compaction. It also has compaction request with all the logical + * details. Hence, this class is basically the compaction. */ @InterfaceAudience.Private public abstract class CompactionContext { protected CompactionRequestImpl request = null; /** - * Called before coprocessor preCompactSelection and should filter the candidates - * for coprocessor; i.e. exclude the files that definitely cannot be compacted at this time. + * Called before coprocessor preCompactSelection and should filter the candidates for coprocessor; + * i.e. exclude the files that definitely cannot be compacted at this time. * @param filesCompacting files currently compacting * @return the list of files that can theoretically be compacted. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java index dfff2f980fbb..5feaf15b631f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionLifeCycleTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java index 755b9d39cb2e..56d3c913ece0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,20 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; import java.util.Collection; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.yetus.audience.InterfaceAudience; /** - * A compaction policy determines how to select files for compaction, - * how to compact them, and how to generate the compacted files. + * A compaction policy determines how to select files for compaction, how to compact them, and how + * to generate the compacted files. */ @InterfaceAudience.Private public abstract class CompactionPolicy { @@ -55,8 +52,8 @@ public abstract boolean shouldPerformMajorCompaction(Collection file public abstract boolean throttleCompaction(long compactionSize); /** - * Inform the policy that some configuration has been change, - * so cached value should be updated it any. + * Inform the policy that some configuration has been change, so cached value should be updated it + * any. */ public void setConf(Configuration conf) { this.comConf = new CompactionConfiguration(conf, this.storeConfigInfo); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java index 942cc4f3fd6b..7c95ee883582 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import org.apache.yetus.audience.InterfaceAudience; @@ -24,13 +22,10 @@ import org.slf4j.LoggerFactory; /** - * This class holds information relevant for tracking the progress of a - * compaction. - * - *

      The metrics tracked allow one to calculate the percent completion of the - * compaction based on the number of Key/Value pairs already compacted vs. - * total amount scheduled to be compacted. - * + * This class holds information relevant for tracking the progress of a compaction. + *

      + * The metrics tracked allow one to calculate the percent completion of the compaction based on the + * number of Key/Value pairs already compacted vs. total amount scheduled to be compacted. */ @InterfaceAudience.Private public class CompactionProgress { @@ -43,18 +38,20 @@ public class CompactionProgress { /** the total size of data processed by the currently running compaction, in bytes */ public long totalCompactedSize = 0; - /** Constructor + /** + * Constructor * @param totalCompactingKVs the total Key/Value pairs to be compacted */ public CompactionProgress(long totalCompactingKVs) { this.totalCompactingKVs = totalCompactingKVs; } - /** getter for calculated percent complete + /** + * getter for calculated percent complete * @return float */ public float getProgressPct() { - return (float)currentCompactedKVs / getTotalCompactingKVs(); + return (float) currentCompactedKVs / getTotalCompactingKVs(); } /** @@ -65,8 +62,8 @@ public void cancel() { } /** - * Marks the compaction as complete by setting total to current KV count; - * Total KV count is an estimate, so there might be a discrepancy otherwise. + * Marks the compaction as complete by setting total to current KV count; Total KV count is an + * estimate, so there might be a discrepancy otherwise. */ public void complete() { this.totalCompactingKVs = this.currentCompactedKVs; @@ -77,8 +74,8 @@ public void complete() { */ public long getTotalCompactingKVs() { if (totalCompactingKVs < currentCompactedKVs) { - LOG.debug("totalCompactingKVs={} less than currentCompactedKVs={}", - totalCompactingKVs, currentCompactedKVs); + LOG.debug("totalCompactingKVs={} less than currentCompactedKVs={}", totalCompactingKVs, + currentCompactedKVs); return currentCompactedKVs; } return totalCompactingKVs; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 73f36837f9ec..723cefb73d78 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +17,11 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; +import java.util.Collection; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.yetus.audience.InterfaceAudience; -import java.util.Collection; - /** * Coprocessors use this interface to get details about compaction. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java index 899219d70b24..1c256eed2740 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequestImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.Collection; import java.util.Collections; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; @@ -39,7 +37,11 @@ public class CompactionRequestImpl implements CompactionRequest { // was this compaction promoted to an off-peak private boolean isOffPeak = false; - private enum DisplayCompactionType { MINOR, ALL_FILES, MAJOR } + + private enum DisplayCompactionType { + MINOR, ALL_FILES, MAJOR + } + private DisplayCompactionType isMajor = DisplayCompactionType.MINOR; private int priority = NO_PRIORITY; private Collection filesToCompact; @@ -229,11 +231,11 @@ public String toString() { .map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), "", 1)) .collect(Collectors.joining(", ")); - return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" + - this.getFiles().size() + ", fileSize=" + - TraditionalBinaryPrefix.long2String(totalSize, "", 1) + - ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" + - selectionTime; + return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" + + this.getFiles().size() + ", fileSize=" + + TraditionalBinaryPrefix.long2String(totalSize, "", 1) + + ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" + + selectionTime; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java index 31a7ca7ea4ed..04b7cf2398a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequester.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,13 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; +import edu.umd.cs.findbugs.annotations.Nullable; import java.io.IOException; - import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; -import edu.umd.cs.findbugs.annotations.Nullable; - /** * Request a compaction. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java index ad0cfb4cb396..d71a9c0593ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindow.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java index bd5c85c5770c..9689464f1ba8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionWindowFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 93f0555b7f4d..028889cc2c75 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -88,7 +88,7 @@ public abstract class Compactor { protected int keepSeqIdPeriod; // Configs that drive whether we drop page cache behind compactions - protected static final String MAJOR_COMPACTION_DROP_CACHE = + protected static final String MAJOR_COMPACTION_DROP_CACHE = "hbase.regionserver.majorcompaction.pagecache.drop"; protected static final String MINOR_COMPACTION_DROP_CACHE = "hbase.regionserver.minorcompaction.pagecache.drop"; @@ -101,24 +101,25 @@ public abstract class Compactor { // make it volatile. protected volatile T writer = null; - //TODO: depending on Store is not good but, realistically, all compactors currently do. + // TODO: depending on Store is not good but, realistically, all compactors currently do. Compactor(Configuration conf, HStore store) { this.conf = conf; this.store = store; this.compactionKVMax = - this.conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); - this.majorCompactionCompression = (store.getColumnFamilyDescriptor() == null) ? - Compression.Algorithm.NONE : store.getColumnFamilyDescriptor().getMajorCompactionCompressionType(); - this.minorCompactionCompression = (store.getColumnFamilyDescriptor() == null) ? - Compression.Algorithm.NONE : store.getColumnFamilyDescriptor().getMinorCompactionCompressionType(); - this.keepSeqIdPeriod = Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD, - HConstants.MIN_KEEP_SEQID_PERIOD), HConstants.MIN_KEEP_SEQID_PERIOD); + this.conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); + this.majorCompactionCompression = + (store.getColumnFamilyDescriptor() == null) ? Compression.Algorithm.NONE + : store.getColumnFamilyDescriptor().getMajorCompactionCompressionType(); + this.minorCompactionCompression = + (store.getColumnFamilyDescriptor() == null) ? Compression.Algorithm.NONE + : store.getColumnFamilyDescriptor().getMinorCompactionCompressionType(); + this.keepSeqIdPeriod = + Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD, HConstants.MIN_KEEP_SEQID_PERIOD), + HConstants.MIN_KEEP_SEQID_PERIOD); this.dropCacheMajor = conf.getBoolean(MAJOR_COMPACTION_DROP_CACHE, true); this.dropCacheMinor = conf.getBoolean(MINOR_COMPACTION_DROP_CACHE, true); } - - protected interface CellSinkFactory { S createWriter(InternalScanner scanner, FileDetails fd, boolean shouldDropBehind, boolean major) throws IOException; @@ -140,7 +141,7 @@ protected static class FileDetails { public long maxSeqId = 0; /** Latest memstore read point found in any of the involved files */ public long maxMVCCReadpoint = 0; - /** Max tags length**/ + /** Max tags length **/ public int maxTagsLength = 0; /** Min SeqId to keep during a major compaction **/ public long minSeqIdToKeep = 0; @@ -155,17 +156,17 @@ protected static class FileDetails { * @parma major If major compaction * @return The result. */ - private FileDetails getFileDetails( - Collection filesToCompact, boolean allFiles, boolean major) throws IOException { + private FileDetails getFileDetails(Collection filesToCompact, boolean allFiles, + boolean major) throws IOException { FileDetails fd = new FileDetails(); - long oldestHFileTimestampToKeepMVCC = EnvironmentEdgeManager.currentTime() - - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod); + long oldestHFileTimestampToKeepMVCC = + EnvironmentEdgeManager.currentTime() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod); for (HStoreFile file : filesToCompact) { - if(allFiles && (file.getModificationTimestamp() < oldestHFileTimestampToKeepMVCC)) { + if (allFiles && (file.getModificationTimestamp() < oldestHFileTimestampToKeepMVCC)) { // when isAllFiles is true, all files are compacted so we can calculate the smallest // MVCC value to keep - if(fd.minSeqIdToKeep < file.getMaxMemStoreTS()) { + if (fd.minSeqIdToKeep < file.getMaxMemStoreTS()) { fd.minSeqIdToKeep = file.getMaxMemStoreTS(); } } @@ -192,8 +193,7 @@ private FileDetails getFileDetails( // SeqId number. if (r.isBulkLoaded()) { fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID()); - } - else { + } else { tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY); if (tmp != null) { fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp)); @@ -218,17 +218,16 @@ private FileDetails getFileDetails( } } tmp = fileInfo.get(TIMERANGE_KEY); - fd.latestPutTs = tmp == null ? HConstants.LATEST_TIMESTAMP: TimeRangeTracker.parseFrom(tmp).getMax(); - LOG.debug("Compacting {}, keycount={}, bloomtype={}, size={}, " - + "encoding={}, compression={}, seqNum={}{}", - (file.getPath() == null? null: file.getPath().getName()), - keyCount, - r.getBloomFilterType().toString(), - TraditionalBinaryPrefix.long2String(r.length(), "", 1), - r.getHFileReader().getDataBlockEncoding(), - major ? majorCompactionCompression : minorCompactionCompression, - seqNum, - (allFiles? ", earliestPutTs=" + earliestPutTs: "")); + fd.latestPutTs = + tmp == null ? HConstants.LATEST_TIMESTAMP : TimeRangeTracker.parseFrom(tmp).getMax(); + LOG.debug( + "Compacting {}, keycount={}, bloomtype={}, size={}, " + + "encoding={}, compression={}, seqNum={}{}", + (file.getPath() == null ? null : file.getPath().getName()), keyCount, + r.getBloomFilterType().toString(), TraditionalBinaryPrefix.long2String(r.length(), "", 1), + r.getHFileReader().getDataBlockEncoding(), + major ? majorCompactionCompression : minorCompactionCompression, seqNum, + (allFiles ? ", earliestPutTs=" + earliestPutTs : "")); } return fd; } @@ -252,8 +251,8 @@ protected interface InternalScannerFactory { ScanType getScanType(CompactionRequestImpl request); - InternalScanner createScanner(ScanInfo scanInfo, List scanners, ScanType scanType, - FileDetails fd, long smallestReadPoint) throws IOException; + InternalScanner createScanner(ScanInfo scanInfo, List scanners, + ScanType scanType, FileDetails fd, long smallestReadPoint) throws IOException; } protected final InternalScannerFactory defaultScannerFactory = new InternalScannerFactory() { @@ -272,12 +271,12 @@ public InternalScanner createScanner(ScanInfo scanInfo, List s }; protected final CreateStoreFileWriterParams createParams(FileDetails fd, boolean shouldDropBehind, - boolean major) { + boolean major) { return CreateStoreFileWriterParams.create().maxKeyCount(fd.maxKeyCount) - .compression(major ? majorCompactionCompression : minorCompactionCompression) - .isCompaction(true).includeMVCCReadpoint(fd.maxMVCCReadpoint > 0) - .includesTag(fd.maxTagsLength > 0).shouldDropBehind(shouldDropBehind) - .totalCompactedFilesSize(fd.totalCompactedFilesSize); + .compression(major ? majorCompactionCompression : minorCompactionCompression) + .isCompaction(true).includeMVCCReadpoint(fd.maxMVCCReadpoint > 0) + .includesTag(fd.maxTagsLength > 0).shouldDropBehind(shouldDropBehind) + .totalCompactedFilesSize(fd.totalCompactedFilesSize); } /** @@ -287,16 +286,16 @@ protected final CreateStoreFileWriterParams createParams(FileDetails fd, boolean * @throws IOException if creation failed */ protected final StoreFileWriter createWriter(FileDetails fd, boolean shouldDropBehind, - boolean major) throws IOException { + boolean major) throws IOException { // When all MVCC readpoints are 0, don't write them. // See HBASE-8166, HBASE-12600, and HBASE-13389. return store.getStoreEngine().createWriter(createParams(fd, shouldDropBehind, major)); } protected final StoreFileWriter createWriter(FileDetails fd, boolean shouldDropBehind, - String fileStoragePolicy, boolean major) throws IOException { - return store.getStoreEngine() - .createWriter(createParams(fd, shouldDropBehind, major).fileStoragePolicy(fileStoragePolicy)); + String fileStoragePolicy, boolean major) throws IOException { + return store.getStoreEngine().createWriter( + createParams(fd, shouldDropBehind, major).fileStoragePolicy(fileStoragePolicy)); } private ScanInfo preCompactScannerOpen(CompactionRequestImpl request, ScanType scanType, @@ -343,7 +342,7 @@ protected final List compact(final CompactionRequestImpl request, InternalScanner scanner = null; boolean finished = false; List scanners = - createFileScanners(request.getFiles(), smallestReadPoint, dropCache); + createFileScanners(request.getFiles(), smallestReadPoint, dropCache); try { /* Include deletes, unless we are doing a major compaction */ ScanType scanType = scannerFactory.getScanType(request); @@ -356,14 +355,13 @@ protected final List compact(final CompactionRequestImpl request, smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint); cleanSeqId = true; } - if (writer != null){ + if (writer != null) { LOG.warn("Writer exists when it should not: " + getCompactionTargets().stream() - .map(n -> n.toString()) - .collect(Collectors.joining(", ", "{ ", " }"))); + .map(n -> n.toString()).collect(Collectors.joining(", ", "{ ", " }"))); } writer = sinkFactory.createWriter(scanner, fd, dropCache, request.isMajor()); - finished = performCompaction(fd, scanner, smallestReadPoint, cleanSeqId, - throughputController, request.isAllFiles(), request.getFiles().size()); + finished = performCompaction(fd, scanner, smallestReadPoint, cleanSeqId, throughputController, + request.isAllFiles(), request.getFiles().size()); if (!finished) { throw new InterruptedIOException("Aborting compaction of store " + store + " in region " + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted."); @@ -390,8 +388,8 @@ protected final List compact(final CompactionRequestImpl request, return commitWriter(fd, request); } - protected abstract List commitWriter(FileDetails fd, - CompactionRequestImpl request) throws IOException; + protected abstract List commitWriter(FileDetails fd, CompactionRequestImpl request) + throws IOException; protected abstract void abortWriter() throws IOException; @@ -425,12 +423,12 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, long now = 0; boolean hasMore; ScannerContext scannerContext = - ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); throughputController.start(compactionName); KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null; long shippedCallSizeLimit = - (long) numofFilesToCompact * this.store.getColumnFamilyDescriptor().getBlocksize(); + (long) numofFilesToCompact * this.store.getColumnFamilyDescriptor().getBlocksize(); try { do { hasMore = scanner.next(cells, scannerContext); @@ -508,7 +506,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, } catch (InterruptedException e) { progress.cancel(); throw new InterruptedIOException( - "Interrupted while control throughput of compacting " + compactionName); + "Interrupted while control throughput of compacting " + compactionName); } finally { // Clone last cell in the final because writer will append last cell when committing. If // don't clone here and once the scanner get closed, then the memory of last cell will be @@ -559,13 +557,13 @@ public List getCompactionTargets() { return Arrays.asList(((StoreFileWriter) writer).getPath()); } return ((AbstractMultiFileWriter) writer).writers().stream().map(sfw -> sfw.getPath()) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } /** * Reset the Writer when the new storefiles were successfully added */ - public void resetWriter(){ + public void resetWriter() { writer = null; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java index ebbaa4560472..8c2a65395fbf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CurrentHourProvider.java @@ -68,7 +68,7 @@ public static int getCurrentHour() { } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") static void advanceTick() { tick = nextTick(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java index f60e97db4836..aa9c8610d009 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -84,9 +83,9 @@ public DateTieredCompactionPolicy(Configuration conf, StoreConfigInformation sto + comConf.getCompactionPolicyForDateTieredWindow() + "'", e); } try { - windowFactory = ReflectionUtils.instantiateWithCustomCtor( - comConf.getDateTieredCompactionWindowFactory(), - new Class[] { CompactionConfiguration.class }, new Object[] { comConf }); + windowFactory = + ReflectionUtils.instantiateWithCustomCtor(comConf.getDateTieredCompactionWindowFactory(), + new Class[] { CompactionConfiguration.class }, new Object[] { comConf }); } catch (Exception e) { throw new IOException("Unable to load configured window factory '" + comConf.getDateTieredCompactionWindowFactory() + "'", e); @@ -125,8 +124,8 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac long now = EnvironmentEdgeManager.currentTime(); if (lowTimestamp <= 0L || lowTimestamp >= (now - mcTime)) { if (LOG.isDebugEnabled()) { - LOG.debug("lowTimestamp: " + lowTimestamp + " lowTimestamp: " + lowTimestamp + " now: " + - now + " mcTime: " + mcTime); + LOG.debug("lowTimestamp: " + lowTimestamp + " lowTimestamp: " + lowTimestamp + " now: " + + now + " mcTime: " + mcTime); } return false; } @@ -136,18 +135,17 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac List boundaries = getCompactBoundariesForMajor(filesToCompact, now); boolean[] filesInWindow = new boolean[boundaries.size()]; - for (HStoreFile file: filesToCompact) { + for (HStoreFile file : filesToCompact) { OptionalLong minTimestamp = file.getMinimumTimestamp(); long oldest = minTimestamp.isPresent() ? now - minTimestamp.getAsLong() : Long.MIN_VALUE; if (cfTTL != Long.MAX_VALUE && oldest >= cfTTL) { - LOG.debug("Major compaction triggered on store " + this - + "; for TTL maintenance"); + LOG.debug("Major compaction triggered on store " + this + "; for TTL maintenance"); return true; } if (!file.isMajorCompactionResult() || file.isBulkLoadResult()) { LOG.debug("Major compaction triggered on store " + this - + ", because there are new files and time since last major compaction " - + (now - lowTimestamp) + "ms"); + + ", because there are new files and time since last major compaction " + + (now - lowTimestamp) + "ms"); return true; } @@ -159,12 +157,12 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac lowerWindowIndex = (lowerWindowIndex < 0) ? Math.abs(lowerWindowIndex + 2) : lowerWindowIndex; upperWindowIndex = (upperWindowIndex < 0) ? Math.abs(upperWindowIndex + 2) : upperWindowIndex; if (lowerWindowIndex != upperWindowIndex) { - LOG.debug("Major compaction triggered on store " + this + "; because file " - + file.getPath() + " has data with timestamps cross window boundaries"); + LOG.debug("Major compaction triggered on store " + this + "; because file " + file.getPath() + + " has data with timestamps cross window boundaries"); return true; } else if (filesInWindow[upperWindowIndex]) { - LOG.debug("Major compaction triggered on store " + this + - "; because there are more than one file in some windows"); + LOG.debug("Major compaction triggered on store " + this + + "; because there are more than one file in some windows"); return true; } else { filesInWindow[upperWindowIndex] = true; @@ -176,21 +174,21 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac .getBlockLocalityIndex(DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER)); if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) { LOG.debug("Major compaction triggered on store " + this - + "; to make hdfs blocks local, current blockLocalityIndex is " - + blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() + ")"); + + "; to make hdfs blocks local, current blockLocalityIndex is " + blockLocalityIndex + + " (min " + comConf.getMinLocalityToForceCompact() + ")"); return true; } - LOG.debug("Skipping major compaction of " + this + - ", because the files are already major compacted"); + LOG.debug( + "Skipping major compaction of " + this + ", because the files are already major compacted"); return false; } @Override protected CompactionRequestImpl createCompactionRequest(ArrayList candidateSelection, - boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { + boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { CompactionRequestImpl result = tryingMajor ? selectMajorCompaction(candidateSelection) - : selectMinorCompaction(candidateSelection, mayUseOffPeak, mayBeStuck); + : selectMinorCompaction(candidateSelection, mayUseOffPeak, mayBeStuck); if (LOG.isDebugEnabled()) { LOG.debug("Generated compaction request: " + result); } @@ -201,8 +199,7 @@ public CompactionRequestImpl selectMajorCompaction(ArrayList candida long now = EnvironmentEdgeManager.currentTime(); List boundaries = getCompactBoundariesForMajor(candidateSelection, now); Map boundariesPolicies = getBoundariesStoragePolicyForMajor(boundaries, now); - return new DateTieredCompactionRequest(candidateSelection, - boundaries, boundariesPolicies); + return new DateTieredCompactionRequest(candidateSelection, boundaries, boundariesPolicies); } /** @@ -277,18 +274,18 @@ private DateTieredCompactionRequest generateCompactionRequest(ArrayList storeFileSelection = mayBeStuck ? storeFiles - : compactionPolicyPerWindow.applyCompactionPolicy(storeFiles, mayUseOffPeak, false); + : compactionPolicyPerWindow.applyCompactionPolicy(storeFiles, mayUseOffPeak, false); if (storeFileSelection != null && !storeFileSelection.isEmpty()) { // If there is any file in the window excluded from compaction, // only one file will be output from compaction. - boolean singleOutput = storeFiles.size() != storeFileSelection.size() || - comConf.useDateTieredSingleOutputForMinorCompaction(); + boolean singleOutput = storeFiles.size() != storeFileSelection.size() + || comConf.useDateTieredSingleOutputForMinorCompaction(); List boundaries = getCompactionBoundariesForMinor(window, singleOutput); // we want to generate policy to boundaries for minor compaction Map boundaryPolicyMap = - getBoundariesStoragePolicyForMinor(singleOutput, window, now); - DateTieredCompactionRequest result = new DateTieredCompactionRequest(storeFileSelection, - boundaries, boundaryPolicyMap); + getBoundariesStoragePolicyForMinor(singleOutput, window, now); + DateTieredCompactionRequest result = + new DateTieredCompactionRequest(storeFileSelection, boundaries, boundaryPolicyMap); return result; } return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java index ddf9a0ce2eff..5aafb6beb942 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,8 +24,8 @@ import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.yetus.audience.InterfaceAudience; -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_DOESNT_OVERRIDE_EQUALS", - justification="It is intended to use the same equal method as superclass") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_DOESNT_OVERRIDE_EQUALS", + justification = "It is intended to use the same equal method as superclass") @InterfaceAudience.Private public class DateTieredCompactionRequest extends CompactionRequestImpl { private List boundaries; @@ -50,6 +50,6 @@ public Map getBoundariesPolicies() { @Override public String toString() { return super.toString() + " boundaries=" + Arrays.toString(boundaries.toArray()) - + " boundariesPolicies="+boundariesPolicies.toString(); + + " boundariesPolicies=" + boundariesPolicies.toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java index 43e037c5e702..ee365b432c72 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.List; import java.util.Map; import java.util.OptionalLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.regionserver.DateTieredMultiFileWriter; @@ -51,13 +50,13 @@ private boolean needEmptyFile(CompactionRequestImpl request) { // maxSeqId if we haven't written out anything. OptionalLong maxSeqId = StoreUtils.getMaxSequenceIdInList(request.getFiles()); OptionalLong storeMaxSeqId = store.getMaxSequenceId(); - return maxSeqId.isPresent() && storeMaxSeqId.isPresent() && - maxSeqId.getAsLong() == storeMaxSeqId.getAsLong(); + return maxSeqId.isPresent() && storeMaxSeqId.isPresent() + && maxSeqId.getAsLong() == storeMaxSeqId.getAsLong(); } public List compact(final CompactionRequestImpl request, final List lowerBoundaries, - final Map lowerBoundariesPolicies, - ThroughputController throughputController, User user) throws IOException { + final Map lowerBoundariesPolicies, ThroughputController throughputController, + User user) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Executing compaction with " + lowerBoundaries.size() + "windows, lower boundaries: " + lowerBoundaries); @@ -70,8 +69,7 @@ public List compact(final CompactionRequestImpl request, final List public DateTieredMultiFileWriter createWriter(InternalScanner scanner, FileDetails fd, boolean shouldDropBehind, boolean major) throws IOException { DateTieredMultiFileWriter writer = new DateTieredMultiFileWriter(lowerBoundaries, - lowerBoundariesPolicies, - needEmptyFile(request)); + lowerBoundariesPolicies, needEmptyFile(request)); initMultiWriter(writer, scanner, fd, shouldDropBehind, major); return writer; } @@ -79,10 +77,10 @@ public DateTieredMultiFileWriter createWriter(InternalScanner scanner, FileDetai } @Override - protected List commitWriter(FileDetails fd, - CompactionRequestImpl request) throws IOException { + protected List commitWriter(FileDetails fd, CompactionRequestImpl request) + throws IOException { List pathList = - writer.commitWriters(fd.maxSeqId, request.isAllFiles(), request.getFiles()); + writer.commitWriters(fd.maxSeqId, request.isAllFiles(), request.getFiles()); return pathList; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java index 03e3a1b5f394..725d72c111dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,14 +45,14 @@ public DefaultCompactor(Configuration conf, HStore store) { } private final CellSinkFactory writerFactory = - new CellSinkFactory() { - @Override - public StoreFileWriter createWriter(InternalScanner scanner, - org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd, - boolean shouldDropBehind, boolean major) throws IOException { - return DefaultCompactor.this.createWriter(fd, shouldDropBehind, major); - } - }; + new CellSinkFactory() { + @Override + public StoreFileWriter createWriter(InternalScanner scanner, + org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd, + boolean shouldDropBehind, boolean major) throws IOException { + return DefaultCompactor.this.createWriter(fd, shouldDropBehind, major); + } + }; /** * Do a minor/major compaction on an explicit set of storefiles from a Store. @@ -63,8 +63,8 @@ public List compact(final CompactionRequestImpl request, } @Override - protected List commitWriter(FileDetails fd, - CompactionRequestImpl request) throws IOException { + protected List commitWriter(FileDetails fd, CompactionRequestImpl request) + throws IOException { List newFiles = Lists.newArrayList(writer.getPath()); writer.appendMetadata(fd.maxSeqId, request.isAllFiles(), request.getFiles()); writer.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java index 76bf1d7ac47d..3a5c10e15624 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; @@ -30,10 +28,8 @@ import org.slf4j.LoggerFactory; /** - * Class to pick which files if any to compact together. - * - * This class will search all possibilities for different and if it gets stuck it will choose - * the smallest set of files to compact. + * Class to pick which files if any to compact together. This class will search all possibilities + * for different and if it gets stuck it will choose the smallest set of files to compact. */ @InterfaceAudience.Private public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { @@ -45,7 +41,7 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { * @param storeConfigInfo An object to provide info about the store. */ public ExploringCompactionPolicy(final Configuration conf, - final StoreConfigInformation storeConfigInfo) { + final StoreConfigInformation storeConfigInfo) { super(conf, storeConfigInfo); } @@ -58,8 +54,8 @@ protected final ArrayList applyCompactionPolicy(ArrayList applyCompactionPolicy(List candidates, boolean mightBeStuck, boolean mayUseOffPeak, int minFiles, int maxFiles) { - final double currentRatio = mayUseOffPeak - ? comConf.getCompactionRatioOffPeak() : comConf.getCompactionRatio(); + final double currentRatio = + mayUseOffPeak ? comConf.getCompactionRatioOffPeak() : comConf.getCompactionRatio(); // Start off choosing nothing. List bestSelection = new ArrayList<>(0); @@ -71,8 +67,7 @@ public List applyCompactionPolicy(List candidates, boole // Consider every starting place. for (int start = 0; start < candidates.size(); start++) { // Consider every different sub list permutation in between start and end with min files. - for (int currentEnd = start + minFiles - 1; - currentEnd < candidates.size(); currentEnd++) { + for (int currentEnd = start + minFiles - 1; currentEnd < candidates.size(); currentEnd++) { List potentialMatchFiles = candidates.subList(start, currentEnd + 1); // Sanity checks @@ -87,7 +82,7 @@ public List applyCompactionPolicy(List candidates, boole // have to be read if this set of files is compacted. long size = getTotalStoreSize(potentialMatchFiles); - // Store the smallest set of files. This stored set of files will be used + // Store the smallest set of files. This stored set of files will be used // if it looks like the algorithm is stuck. if (mightBeStuck && size < smallestSize) { smallest = potentialMatchFiles; @@ -113,22 +108,23 @@ public List applyCompactionPolicy(List candidates, boole } } if (bestSelection.isEmpty() && mightBeStuck) { - LOG.debug("Exploring compaction algorithm has selected " + smallest.size() - + " files of size "+ smallestSize + " because the store might be stuck"); + LOG.debug("Exploring compaction algorithm has selected " + smallest.size() + " files of size " + + smallestSize + " because the store might be stuck"); return new ArrayList<>(smallest); } - LOG.debug("Exploring compaction algorithm has selected {} files of size {} starting at " + - "candidate #{} after considering {} permutations with {} in ratio", bestSelection.size(), - bestSize, bestStart, opts, optsInRatio); + LOG.debug( + "Exploring compaction algorithm has selected {} files of size {} starting at " + + "candidate #{} after considering {} permutations with {} in ratio", + bestSelection.size(), bestSize, bestStart, opts, optsInRatio); return new ArrayList<>(bestSelection); } /** - * Select at least one file in the candidates list to compact, through choosing files - * from the head to the index that the accumulation length larger the max compaction size. - * This method is a supplementary of the selectSimpleCompaction() method, aims to make sure - * at least one file can be selected to compact, for compactions like L0 files, which need to - * compact all files and as soon as possible. + * Select at least one file in the candidates list to compact, through choosing files from the + * head to the index that the accumulation length larger the max compaction size. This method is a + * supplementary of the selectSimpleCompaction() method, aims to make sure at least one file can + * be selected to compact, for compactions like L0 files, which need to compact all files and as + * soon as possible. */ public List selectCompactFiles(final List candidates, int maxFiles, boolean isOffpeak) { @@ -150,12 +146,12 @@ private boolean isBetterSelection(List bestSelection, long bestSize, // (might want to tweak that in future). Also, given the current order of looking at // permutations, prefer earlier files and smaller selection if the difference is small. final double REPLACE_IF_BETTER_BY = 1.05; - double thresholdQuality = ((double)bestSelection.size() / bestSize) * REPLACE_IF_BETTER_BY; - return thresholdQuality < ((double)selection.size() / size); + double thresholdQuality = ((double) bestSelection.size() / bestSize) * REPLACE_IF_BETTER_BY; + return thresholdQuality < ((double) selection.size() / size); } - // Keep if this gets rid of more files. Or the same number of files for less io. + // Keep if this gets rid of more files. Or the same number of files for less io. return selection.size() > bestSelection.size() - || (selection.size() == bestSelection.size() && size < bestSize); + || (selection.size() == bestSelection.size() && size < bestSize); } /** @@ -168,9 +164,8 @@ private long getTotalStoreSize(List potentialMatchFiles) { } /** - * Check that all files satisfy the constraint - * FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i) ) * Ratio. - * + * Check that all files satisfy the constraint FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i) + * ) * Ratio. * @param files List of store files to consider as a compaction candidate. * @param currentRatio The ratio to use. * @return a boolean if these files satisfy the ratio constraints. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java index 2ec010807ce1..e21d2f3d5837 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExponentialCompactionWindowFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,11 +34,11 @@ public class ExponentialCompactionWindowFactory extends CompactionWindowFactory LoggerFactory.getLogger(ExponentialCompactionWindowFactory.class); public static final String BASE_WINDOW_MILLIS_KEY = - "hbase.hstore.compaction.date.tiered.base.window.millis"; + "hbase.hstore.compaction.date.tiered.base.window.millis"; public static final String WINDOWS_PER_TIER_KEY = - "hbase.hstore.compaction.date.tiered.windows.per.tier"; + "hbase.hstore.compaction.date.tiered.windows.per.tier"; public static final String MAX_TIER_AGE_MILLIS_KEY = - "hbase.hstore.compaction.date.tiered.max.tier.age.millis"; + "hbase.hstore.compaction.date.tiered.max.tier.age.millis"; private final class Window extends CompactionWindow { @@ -117,7 +117,7 @@ private long getMaxTierAgeCutoff(long now) { return LongMath.checkedSubtract(now, maxTierAgeMillis); } catch (ArithmeticException ae) { LOG.warn("Value for " + MAX_TIER_AGE_MILLIS_KEY + ": " + maxTierAgeMillis - + ". Will always promote to next tier."); + + ". Will always promote to next tier."); return Long.MIN_VALUE; } } @@ -126,8 +126,8 @@ public ExponentialCompactionWindowFactory(CompactionConfiguration comConf) { Configuration conf = comConf.conf; baseWindowMillis = conf.getLong(BASE_WINDOW_MILLIS_KEY, 3600000 * 6); windowsPerTier = conf.getInt(WINDOWS_PER_TIER_KEY, 4); - maxTierAgeMillis = conf.getLong(MAX_TIER_AGE_MILLIS_KEY, - comConf.getDateTieredMaxStoreFileAgeMillis()); + maxTierAgeMillis = + conf.getLong(MAX_TIER_AGE_MILLIS_KEY, comConf.getDateTieredMaxStoreFileAgeMillis()); LOG.info(toString()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java index 344b90d5f85b..4d14e4677011 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; @@ -33,22 +31,18 @@ import org.slf4j.LoggerFactory; /** - * - * FIFO compaction policy selects only files which have all cells expired. - * The column family MUST have non-default TTL. One of the use cases for this - * policy is when we need to store raw data which will be post-processed later - * and discarded completely after quite short period of time. Raw time-series vs. - * time-based roll up aggregates and compacted time-series. We collect raw time-series - * and store them into CF with FIFO compaction policy, periodically we run task - * which creates roll up aggregates and compacts time-series, the original raw data - * can be discarded after that. - * + * FIFO compaction policy selects only files which have all cells expired. The column family MUST + * have non-default TTL. One of the use cases for this policy is when we need to store raw data + * which will be post-processed later and discarded completely after quite short period of time. Raw + * time-series vs. time-based roll up aggregates and compacted time-series. We collect raw + * time-series and store them into CF with FIFO compaction policy, periodically we run task which + * creates roll up aggregates and compacts time-series, the original raw data can be discarded after + * that. */ @InterfaceAudience.Private public class FIFOCompactionPolicy extends ExploringCompactionPolicy { - - private static final Logger LOG = LoggerFactory.getLogger(FIFOCompactionPolicy.class); + private static final Logger LOG = LoggerFactory.getLogger(FIFOCompactionPolicy.class); public FIFOCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) { super(conf, storeConfigInfo); @@ -58,13 +52,13 @@ public FIFOCompactionPolicy(Configuration conf, StoreConfigInformation storeConf public CompactionRequestImpl selectCompaction(Collection candidateFiles, List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, boolean forceMajor) throws IOException { - if(forceMajor){ + if (forceMajor) { LOG.warn("Major compaction is not supported for FIFO compaction policy. Ignore the flag."); } boolean isAfterSplit = StoreUtils.hasReferences(candidateFiles); - if(isAfterSplit){ + if (isAfterSplit) { LOG.info("Split detected, delegate selection to the parent policy."); - return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, + return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, mayUseOffPeak, forceMajor); } @@ -76,9 +70,9 @@ public CompactionRequestImpl selectCompaction(Collection candidateFi @Override public boolean shouldPerformMajorCompaction(Collection filesToCompact) - throws IOException { + throws IOException { boolean isAfterSplit = StoreUtils.hasReferences(filesToCompact); - if(isAfterSplit){ + if (isAfterSplit) { LOG.info("Split detected, delegate to the parent policy."); return super.shouldPerformMajorCompaction(filesToCompact); } @@ -89,7 +83,7 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac public boolean needsCompaction(Collection storeFiles, List filesCompacting) { boolean isAfterSplit = StoreUtils.hasReferences(storeFiles); - if(isAfterSplit){ + if (isAfterSplit) { LOG.info("Split detected, delegate to the parent policy."); return super.needsCompaction(storeFiles, filesCompacting); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ForbidMajorCompactionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ForbidMajorCompactionChecker.java index eecc78057120..ad1fae310111 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ForbidMajorCompactionChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ForbidMajorCompactionChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.util.function.BiPredicate; - import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java index b920de2b57d9..212beb5177f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/OffPeakHours.java @@ -17,18 +17,25 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; +import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; @InterfaceAudience.Private public abstract class OffPeakHours { private static final Logger LOG = LoggerFactory.getLogger(OffPeakHours.class); public static final OffPeakHours DISABLED = new OffPeakHours() { - @Override public boolean isOffPeakHour() { return false; } - @Override public boolean isOffPeakHour(int targetHour) { return false; } + @Override + public boolean isOffPeakHour() { + return false; + } + + @Override + public boolean isOffPeakHour(int targetHour) { + return false; + } }; public static OffPeakHours getInstance(Configuration conf) { @@ -46,11 +53,10 @@ public static OffPeakHours getInstance(int startHour, int endHour) { return DISABLED; } - if (! isValidHour(startHour) || ! isValidHour(endHour)) { + if (!isValidHour(startHour) || !isValidHour(endHour)) { if (LOG.isWarnEnabled()) { - LOG.warn("Ignoring invalid start/end hour for peak hour : start = " + - startHour + " end = " + endHour + - ". Valid numbers are [0-23]"); + LOG.warn("Ignoring invalid start/end hour for peak hour : start = " + startHour + " end = " + + endHour + ". Valid numbers are [0-23]"); } return DISABLED; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java index 425df1bb10ba..b58620d2e982 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.Collection; import java.util.List; import java.util.OptionalLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.HStore; @@ -37,16 +35,15 @@ import org.slf4j.LoggerFactory; /** - * The default algorithm for selecting files for compaction. - * Combines the compaction configuration and the provisional file selection that - * it's given to produce the list of suitable candidates for compaction. + * The default algorithm for selecting files for compaction. Combines the compaction configuration + * and the provisional file selection that it's given to produce the list of suitable candidates for + * compaction. */ @InterfaceAudience.Private public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { private static final Logger LOG = LoggerFactory.getLogger(RatioBasedCompactionPolicy.class); - public RatioBasedCompactionPolicy(Configuration conf, - StoreConfigInformation storeConfigInfo) { + public RatioBasedCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) { super(conf, storeConfigInfo); } @@ -56,7 +53,7 @@ public RatioBasedCompactionPolicy(Configuration conf, */ @Override public boolean shouldPerformMajorCompaction(Collection filesToCompact) - throws IOException { + throws IOException { boolean result = false; long mcTime = getNextMajorCompactTime(filesToCompact); if (filesToCompact == null || filesToCompact.isEmpty() || mcTime == 0) { @@ -68,14 +65,14 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac if (lowTimestamp > 0L && lowTimestamp < (now - mcTime)) { String regionInfo; if (this.storeConfigInfo != null && this.storeConfigInfo instanceof HStore) { - regionInfo = ((HStore)this.storeConfigInfo).getRegionInfo().getRegionNameAsString(); + regionInfo = ((HStore) this.storeConfigInfo).getRegionInfo().getRegionNameAsString(); } else { regionInfo = this.toString(); } // Major compaction time has elapsed. long cfTTL = HConstants.FOREVER; if (this.storeConfigInfo != null) { - cfTTL = this.storeConfigInfo.getStoreFileTtl(); + cfTTL = this.storeConfigInfo.getStoreFileTtl(); } if (filesToCompact.size() == 1) { // Single file @@ -83,29 +80,28 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac OptionalLong minTimestamp = sf.getMinimumTimestamp(); long oldest = minTimestamp.isPresent() ? now - minTimestamp.getAsLong() : Long.MIN_VALUE; if (sf.isMajorCompactionResult() && (cfTTL == Long.MAX_VALUE || oldest < cfTTL)) { - float blockLocalityIndex = - sf.getHDFSBlockDistribution().getBlockLocalityIndex( - DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER)); + float blockLocalityIndex = sf.getHDFSBlockDistribution() + .getBlockLocalityIndex(DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER)); if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) { LOG.debug("Major compaction triggered on only store " + regionInfo - + "; to make hdfs blocks local, current blockLocalityIndex is " - + blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() + ")"); + + "; to make hdfs blocks local, current blockLocalityIndex is " + blockLocalityIndex + + " (min " + comConf.getMinLocalityToForceCompact() + ")"); result = true; } else { LOG.debug("Skipping major compaction of " + regionInfo - + " because one (major) compacted file only, oldestTime " + oldest - + "ms is < TTL=" + cfTTL + " and blockLocalityIndex is " + blockLocalityIndex - + " (min " + comConf.getMinLocalityToForceCompact() + ")"); + + " because one (major) compacted file only, oldestTime " + oldest + "ms is < TTL=" + + cfTTL + " and blockLocalityIndex is " + blockLocalityIndex + " (min " + + comConf.getMinLocalityToForceCompact() + ")"); } } else if (cfTTL != HConstants.FOREVER && oldest > cfTTL) { LOG.debug("Major compaction triggered on store " + regionInfo - + ", because keyvalues outdated; time since last major compaction " - + (now - lowTimestamp) + "ms"); + + ", because keyvalues outdated; time since last major compaction " + + (now - lowTimestamp) + "ms"); result = true; } } else { LOG.debug("Major compaction triggered on store " + regionInfo - + "; time since last major compaction " + (now - lowTimestamp) + "ms"); + + "; time since last major compaction " + (now - lowTimestamp) + "ms"); result = true; } } @@ -113,50 +109,35 @@ public boolean shouldPerformMajorCompaction(Collection filesToCompac } @Override - protected CompactionRequestImpl createCompactionRequest(ArrayList - candidateSelection, boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) - throws IOException { + protected CompactionRequestImpl createCompactionRequest(ArrayList candidateSelection, + boolean tryingMajor, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { if (!tryingMajor) { filterBulk(candidateSelection); candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck); - candidateSelection = checkMinFilesCriteria(candidateSelection, - comConf.getMinFilesToCompact()); + candidateSelection = + checkMinFilesCriteria(candidateSelection, comConf.getMinFilesToCompact()); } return new CompactionRequestImpl(candidateSelection); } /** - * -- Default minor compaction selection algorithm: - * choose CompactSelection from candidates -- - * First exclude bulk-load files if indicated in configuration. - * Start at the oldest file and stop when you find the first file that - * meets compaction criteria: - * (1) a recently-flushed, small file (i.e. <= minCompactSize) - * OR - * (2) within the compactRatio of sum(newer_files) - * Given normal skew, any newer files will also meet this criteria - *

      - * Additional Note: - * If fileSizes.size() >> maxFilesToCompact, we will recurse on - * compact(). Consider the oldest files first to avoid a - * situation where we always compact [end-threshold,end). Then, the - * last file becomes an aggregate of the previous compactions. - * - * normal skew: - * - * older ----> newer (increasing seqID) - * _ - * | | _ - * | | | | _ - * --|-|- |-|- |-|---_-------_------- minCompactSize - * | | | | | | | | _ | | - * | | | | | | | | | | | | - * | | | | | | | | | | | | - * @param candidates pre-filtrate - * @return filtered subset - */ + * -- Default minor compaction selection algorithm: choose CompactSelection from candidates -- + * First exclude bulk-load files if indicated in configuration. Start at the oldest file and stop + * when you find the first file that meets compaction criteria: (1) a recently-flushed, small file + * (i.e. <= minCompactSize) OR (2) within the compactRatio of sum(newer_files) Given normal skew, + * any newer files will also meet this criteria + *

      + * Additional Note: If fileSizes.size() >> maxFilesToCompact, we will recurse on compact(). + * Consider the oldest files first to avoid a situation where we always compact + * [end-threshold,end). Then, the last file becomes an aggregate of the previous compactions. + * normal skew: older ----> newer (increasing seqID) _ | | _ | | | | _ --|-|- |-|- + * |-|---_-------_------- minCompactSize | | | | | | | | _ | | | | | | | | | | | | | | | | | | | | + * | | | | | | + * @param candidates pre-filtrate + * @return filtered subset + */ protected ArrayList applyCompactionPolicy(ArrayList candidates, - boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { + boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { if (candidates.isEmpty()) { return candidates; } @@ -178,20 +159,17 @@ protected ArrayList applyCompactionPolicy(ArrayList cand fileSizes[i] = file.getReader().length(); // calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo int tooFar = i + comConf.getMaxFilesToCompact() - 1; - sumSize[i] = fileSizes[i] - + ((i + 1 < countOfFiles) ? sumSize[i + 1] : 0) - - ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0); + sumSize[i] = fileSizes[i] + ((i + 1 < countOfFiles) ? sumSize[i + 1] : 0) + - ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0); } - - while (countOfFiles - start >= comConf.getMinFilesToCompact() && - fileSizes[start] > Math.max(comConf.getMinCompactSize(), - (long) (sumSize[start + 1] * ratio))) { + while (countOfFiles - start >= comConf.getMinFilesToCompact() && fileSizes[start] > Math + .max(comConf.getMinCompactSize(), (long) (sumSize[start + 1] * ratio))) { ++start; } if (start < countOfFiles) { LOG.info("Default compaction algorithm has selected " + (countOfFiles - start) - + " files from " + countOfFiles + " candidates"); + + " files from " + countOfFiles + " candidates"); } else if (mayBeStuck) { // We may be stuck. Compact the latest files if we can. int filesToLeave = candidates.size() - comConf.getMinFilesToCompact(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java index db469c420ca0..8baf564ca560 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver.compactions; @@ -23,6 +30,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -47,8 +55,8 @@ public List preSelectCompactionForCoprocessor(Collection /** * @param candidateFiles candidate files, ordered from oldest to newest by seqId. We rely on - * DefaultStoreFileManager to sort the files by seqId to guarantee contiguous compaction based - * on seqId for data consistency. + * DefaultStoreFileManager to sort the files by seqId to guarantee contiguous compaction + * based on seqId for data consistency. * @return subset copy of candidate list that meets compaction criteria */ public CompactionRequestImpl selectCompaction(Collection candidateFiles, @@ -60,13 +68,14 @@ public CompactionRequestImpl selectCompaction(Collection candidateFi // able to compact more if stuck and compacting, because ratio policy excludes some // non-compacting files from consideration during compaction (see getCurrentEligibleFiles). int futureFiles = filesCompacting.isEmpty() ? 0 : 1; - boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles) - >= storeConfigInfo.getBlockingFileCount(); + boolean mayBeStuck = + (candidateFiles.size() - filesCompacting.size() + futureFiles) >= storeConfigInfo + .getBlockingFileCount(); candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting); - LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " + - filesCompacting.size() + " compacting, " + candidateSelection.size() + - " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking"); + LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " + + filesCompacting.size() + " compacting, " + candidateSelection.size() + " eligible, " + + storeConfigInfo.getBlockingFileCount() + " blocking"); // If we can't have all files, we cannot do major anyway boolean isAllFiles = candidateFiles.size() == candidateSelection.size(); @@ -79,7 +88,7 @@ public CompactionRequestImpl selectCompaction(Collection candidateFi // or if we do not have too many files to compact and this was requested as a major compaction boolean isTryingMajor = (forceMajor && isAllFiles && isUserCompaction) || (((forceMajor && isAllFiles) || shouldPerformMajorCompaction(candidateSelection)) - && (candidateSelection.size() < comConf.getMaxFilesToCompact())); + && (candidateSelection.size() < comConf.getMaxFilesToCompact())); // Or, if there are any references among the candidates. boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection); @@ -122,8 +131,8 @@ public long getNextMajorCompactTime(Collection filesToCompact) { } /** - * Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_JITTER}, - * that is, +/- 3.5 days (7 days * 0.5). + * Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_JITTER}, that + * is, +/- 3.5 days (7 days * 0.5). */ double jitterPct = comConf.getMajorCompactionJitter(); if (jitterPct <= 0) { @@ -172,19 +181,18 @@ protected ArrayList getCurrentEligibleFiles(ArrayList ca /** * @param candidates pre-filtrate - * @return filtered subset exclude all files above maxCompactSize - * Also save all references. We MUST compact them + * @return filtered subset exclude all files above maxCompactSize Also save all references. We + * MUST compact them */ protected ArrayList skipLargeFiles(ArrayList candidates, - boolean mayUseOffpeak) { + boolean mayUseOffpeak) { int pos = 0; while (pos < candidates.size() && !candidates.get(pos).isReference() - && (candidates.get(pos).getReader().length() > comConf.getMaxCompactSize(mayUseOffpeak))) { + && (candidates.get(pos).getReader().length() > comConf.getMaxCompactSize(mayUseOffpeak))) { ++pos; } if (pos > 0) { - LOG.debug("Some files are too large. Excluding " + pos - + " files from compaction candidates"); + LOG.debug("Some files are too large. Excluding " + pos + " files from compaction candidates"); candidates.subList(0, pos).clear(); } return candidates; @@ -200,16 +208,16 @@ protected void filterBulk(ArrayList candidates) { /** * @param candidates pre-filtrate */ - protected void removeExcessFiles(ArrayList candidates, - boolean isUserCompaction, boolean isMajorCompaction) { + protected void removeExcessFiles(ArrayList candidates, boolean isUserCompaction, + boolean isMajorCompaction) { int excess = candidates.size() - comConf.getMaxFilesToCompact(); if (excess > 0) { if (isMajorCompaction && isUserCompaction) { LOG.debug("Warning, compacting more than " + comConf.getMaxFilesToCompact() + " files because of a user-requested major compaction"); } else { - LOG.debug("Too many admissible files. Excluding " + excess - + " files from compaction candidates"); + LOG.debug( + "Too many admissible files. Excluding " + excess + " files from compaction candidates"); candidates.subList(comConf.getMaxFilesToCompact(), candidates.size()).clear(); } } @@ -223,8 +231,8 @@ protected ArrayList checkMinFilesCriteria(ArrayList cand int minFiles) { if (candidates.size() < minFiles) { if (LOG.isDebugEnabled()) { - LOG.debug("Not compacting files because we only have " + candidates.size() + - " files ready for compaction. Need " + minFiles + " to initiate."); + LOG.debug("Not compacting files because we only have " + candidates.size() + + " files ready for compaction. Need " + minFiles + " to initiate."); } candidates.clear(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java index 19c5b24a4f66..91c4c091609b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +18,11 @@ package org.apache.hadoop.hbase.regionserver.compactions; import static org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.OPEN_KEY; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparator; @@ -41,6 +40,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** @@ -54,8 +54,8 @@ public class StripeCompactionPolicy extends CompactionPolicy { private StripeStoreConfig config; - public StripeCompactionPolicy( - Configuration conf, StoreConfigInformation storeConfigInfo, StripeStoreConfig config) { + public StripeCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo, + StripeStoreConfig config) { super(conf, storeConfigInfo); this.config = config; stripePolicy = new ExploringCompactionPolicy(conf, storeConfigInfo); @@ -71,16 +71,16 @@ public List preSelectFilesForCoprocessor(StripeInformationProvider s return candidateFiles; } - public StripeCompactionRequest createEmptyRequest( - StripeInformationProvider si, CompactionRequestImpl request) { + public StripeCompactionRequest createEmptyRequest(StripeInformationProvider si, + CompactionRequestImpl request) { // Treat as L0-ish compaction with fixed set of files, and hope for the best. if (si.getStripeCount() > 0) { return new BoundaryStripeCompactionRequest(request, si.getStripeBoundaries()); } - Pair targetKvsAndCount = estimateTargetKvs( - request.getFiles(), this.config.getInitialCount()); - return new SplitStripeCompactionRequest( - request, OPEN_KEY, OPEN_KEY, targetKvsAndCount.getSecond(), targetKvsAndCount.getFirst()); + Pair targetKvsAndCount = + estimateTargetKvs(request.getFiles(), this.config.getInitialCount()); + return new SplitStripeCompactionRequest(request, OPEN_KEY, OPEN_KEY, + targetKvsAndCount.getSecond(), targetKvsAndCount.getFirst()); } public StripeStoreFlusher.StripeFlushRequest selectFlush(CellComparator comparator, @@ -102,7 +102,7 @@ public StripeStoreFlusher.StripeFlushRequest selectFlush(CellComparator comparat public StripeCompactionRequest selectCompaction(StripeInformationProvider si, List filesCompacting, boolean isOffpeak) throws IOException { // TODO: first cut - no parallel compactions. To have more fine grained control we - // probably need structure more sophisticated than a list. + // probably need structure more sophisticated than a list. if (!filesCompacting.isEmpty()) { LOG.debug("Not selecting compaction: " + filesCompacting.size() + " files compacting"); return null; @@ -118,8 +118,8 @@ public StripeCompactionRequest selectCompaction(StripeInformationProvider si, if (StoreUtils.hasReferences(allFiles)) { LOG.debug("There are references in the store; compacting all files"); long targetKvs = estimateTargetKvs(allFiles, config.getInitialCount()).getFirst(); - SplitStripeCompactionRequest request = new SplitStripeCompactionRequest( - allFiles, OPEN_KEY, OPEN_KEY, targetKvs); + SplitStripeCompactionRequest request = + new SplitStripeCompactionRequest(allFiles, OPEN_KEY, OPEN_KEY, targetKvs); request.setMajorRangeFull(); request.getRequest().setAfterSplit(true); return request; @@ -142,8 +142,8 @@ public StripeCompactionRequest selectCompaction(StripeInformationProvider si, if (shouldCompactL0) { if (!canDropDeletesNoL0) { // If we need to compact L0, see if we can add something to it, and drop deletes. - StripeCompactionRequest result = selectSingleStripeCompaction( - si, !shouldSelectL0Files(si), canDropDeletesNoL0, isOffpeak); + StripeCompactionRequest result = selectSingleStripeCompaction(si, !shouldSelectL0Files(si), + canDropDeletesNoL0, isOffpeak); if (result != null) { return result; } @@ -165,15 +165,14 @@ public StripeCompactionRequest selectCompaction(StripeInformationProvider si, public boolean needsCompactions(StripeInformationProvider si, List filesCompacting) { // Approximation on whether we need compaction. - return filesCompacting.isEmpty() - && (StoreUtils.hasReferences(si.getStorefiles()) - || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles()) - || needsSingleStripeCompaction(si) || hasExpiredStripes(si) || allL0FilesExpired(si)); + return filesCompacting.isEmpty() && (StoreUtils.hasReferences(si.getStorefiles()) + || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles()) + || needsSingleStripeCompaction(si) || hasExpiredStripes(si) || allL0FilesExpired(si)); } @Override public boolean shouldPerformMajorCompaction(Collection filesToCompact) - throws IOException { + throws IOException { return false; // there's never a major compaction! } @@ -206,14 +205,14 @@ protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformation // If we want to compact L0 to drop deletes, we only want whole-stripe compactions. // So, pass includeL0 as 2nd parameter to indicate that. List selection = selectSimpleCompaction(stripes.get(i), - !canDropDeletesWithoutL0 && includeL0, isOffpeak, false); + !canDropDeletesWithoutL0 && includeL0, isOffpeak, false); if (selection.isEmpty()) continue; long size = 0; for (HStoreFile sf : selection) { size += sf.getReader().length(); } - if (bqSelection == null || selection.size() > bqSelection.size() || - (selection.size() == bqSelection.size() && size < bqTotalSize)) { + if (bqSelection == null || selection.size() > bqSelection.size() + || (selection.size() == bqSelection.size() && size < bqTotalSize)) { bqSelection = selection; bqIndex = i; bqTotalSize = size; @@ -238,13 +237,12 @@ protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformation Pair kvsAndCount = estimateTargetKvs(filesToCompact, config.getSplitCount()); targetKvs = kvsAndCount.getFirst(); targetCount = kvsAndCount.getSecond(); - splitString = "; the stripe will be split into at most " - + targetCount + " stripes with " + targetKvs + " target KVs"; + splitString = "; the stripe will be split into at most " + targetCount + " stripes with " + + targetKvs + " target KVs"; } - LOG.debug("Found compaction in a stripe with end key [" - + Bytes.toString(si.getEndRow(bqIndex)) + "], with " - + filesToCompact.size() + " files of total size " + bqTotalSize + splitString); + LOG.debug("Found compaction in a stripe with end key [" + Bytes.toString(si.getEndRow(bqIndex)) + + "], with " + filesToCompact.size() + " files of total size " + bqTotalSize + splitString); // See if we can drop deletes. StripeCompactionRequest req; @@ -257,8 +255,8 @@ protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformation sfs.addSublist(l0Files); req = new BoundaryStripeCompactionRequest(sfs, si.getStripeBoundaries()); } else { - req = new SplitStripeCompactionRequest( - filesToCompact, si.getStartRow(bqIndex), si.getEndRow(bqIndex), targetCount, targetKvs); + req = new SplitStripeCompactionRequest(filesToCompact, si.getStartRow(bqIndex), + si.getEndRow(bqIndex), targetCount, targetKvs); } if (hasAllFiles && (canDropDeletesWithoutL0 || includeL0)) { req.setMajorRange(si.getStartRow(bqIndex), si.getEndRow(bqIndex)); @@ -273,13 +271,13 @@ protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformation * @param allFilesOnly Whether a compaction of all-or-none files is needed. * @return The resulting selection. */ - private List selectSimpleCompaction( - List sfs, boolean allFilesOnly, boolean isOffpeak, boolean forceCompact) { - int minFilesLocal = Math.max( - allFilesOnly ? sfs.size() : 0, this.config.getStripeCompactMinFiles()); + private List selectSimpleCompaction(List sfs, boolean allFilesOnly, + boolean isOffpeak, boolean forceCompact) { + int minFilesLocal = + Math.max(allFilesOnly ? sfs.size() : 0, this.config.getStripeCompactMinFiles()); int maxFilesLocal = Math.max(this.config.getStripeCompactMaxFiles(), minFilesLocal); - List selected = stripePolicy.applyCompactionPolicy(sfs, false, - isOffpeak, minFilesLocal, maxFilesLocal); + List selected = + stripePolicy.applyCompactionPolicy(sfs, false, isOffpeak, minFilesLocal, maxFilesLocal); if (forceCompact && (selected == null || selected.isEmpty()) && !sfs.isEmpty()) { return stripePolicy.selectCompactFiles(sfs, maxFilesLocal, isOffpeak); } @@ -287,8 +285,8 @@ private List selectSimpleCompaction( } private boolean shouldSelectL0Files(StripeInformationProvider si) { - return si.getLevel0Files().size() > this.config.getStripeCompactMaxFiles() || - getTotalFileSize(si.getLevel0Files()) > comConf.getMaxCompactSize(); + return si.getLevel0Files().size() > this.config.getStripeCompactMaxFiles() + || getTotalFileSize(si.getLevel0Files()) > comConf.getMaxCompactSize(); } private StripeCompactionRequest selectL0OnlyCompaction(StripeInformationProvider si) { @@ -303,8 +301,8 @@ private StripeCompactionRequest selectL0OnlyCompaction(StripeInformationProvider Pair estimate = estimateTargetKvs(selectedFiles, config.getInitialCount()); long targetKvs = estimate.getFirst(); int targetCount = estimate.getSecond(); - request = - new SplitStripeCompactionRequest(selectedFiles, OPEN_KEY, OPEN_KEY, targetCount, targetKvs); + request = new SplitStripeCompactionRequest(selectedFiles, OPEN_KEY, OPEN_KEY, targetCount, + targetKvs); if (selectedFiles.size() == l0Files.size()) { ((SplitStripeCompactionRequest) request).setMajorRangeFull(); // L0 only, can drop deletes. } @@ -317,8 +315,8 @@ private StripeCompactionRequest selectL0OnlyCompaction(StripeInformationProvider return request; } - private StripeCompactionRequest selectExpiredMergeCompaction( - StripeInformationProvider si, boolean canDropDeletesNoL0) { + private StripeCompactionRequest selectExpiredMergeCompaction(StripeInformationProvider si, + boolean canDropDeletesNoL0) { long cfTtl = this.storeConfigInfo.getStoreFileTtl(); if (cfTtl == Long.MAX_VALUE) { return null; // minversion might be set, cannot delete old files @@ -396,7 +394,7 @@ private boolean allFilesExpired(final List storeFiles) { for (HStoreFile storeFile : storeFiles) { // Check store file is not empty and has not expired if (storeFile.getReader().getMaxTimestamp() >= timestampCutoff - && storeFile.getReader().getEntries() != 0) { + && storeFile.getReader().getEntries() != 0) { return false; } } @@ -435,8 +433,8 @@ private Pair estimateTargetKvs(Collection files, doub ratio = newRatio; splitCount += 1.0; } - long kvCount = (long)(getTotalKvCount(files) / splitCount); - return new Pair<>(kvCount, (int)Math.ceil(splitCount)); + long kvCount = (long) (getTotalKvCount(files) / splitCount); + return new Pair<>(kvCount, (int) Math.ceil(splitCount)); } /** Stripe compaction request wrapper. */ @@ -444,13 +442,14 @@ public abstract static class StripeCompactionRequest { protected CompactionRequestImpl request; protected byte[] majorRangeFromRow = null, majorRangeToRow = null; - public List execute(StripeCompactor compactor, - ThroughputController throughputController) throws IOException { + public List execute(StripeCompactor compactor, ThroughputController throughputController) + throws IOException { return execute(compactor, throughputController, null); } + /** - * Executes the request against compactor (essentially, just calls correct overload of - * compact method), to simulate more dynamic dispatch. + * Executes the request against compactor (essentially, just calls correct overload of compact + * method), to simulate more dynamic dispatch. * @param compactor Compactor. * @return result of compact(...) */ @@ -462,8 +461,8 @@ public StripeCompactionRequest(CompactionRequestImpl request) { } /** - * Sets compaction "major range". Major range is the key range for which all - * the files are included, so they can be treated like major-compacted files. + * Sets compaction "major range". Major range is the key range for which all the files are + * included, so they can be treated like major-compacted files. * @param startRow Left boundary, inclusive. * @param endRow Right boundary, exclusive. */ @@ -484,8 +483,8 @@ public void setRequest(CompactionRequestImpl request) { } /** - * Request for stripe compactor that will cause it to split the source files into several - * separate files at the provided boundaries. + * Request for stripe compactor that will cause it to split the source files into several separate + * files at the provided boundaries. */ private static class BoundaryStripeCompactionRequest extends StripeCompactionRequest { private final List targetBoundaries; @@ -506,18 +505,18 @@ public BoundaryStripeCompactionRequest(Collection files, } @Override - public List execute(StripeCompactor compactor, - ThroughputController throughputController, User user) throws IOException { + public List execute(StripeCompactor compactor, ThroughputController throughputController, + User user) throws IOException { return compactor.compact(this.request, this.targetBoundaries, this.majorRangeFromRow, this.majorRangeToRow, throughputController, user); } } /** - * Request for stripe compactor that will cause it to split the source files into several - * separate files into based on key-value count, as well as file count limit. - * Most of the files will be roughly the same size. The last file may be smaller or larger - * depending on the interplay of the amount of data and maximum number of files allowed. + * Request for stripe compactor that will cause it to split the source files into several separate + * files into based on key-value count, as well as file count limit. Most of the files will be + * roughly the same size. The last file may be smaller or larger depending on the interplay of the + * amount of data and maximum number of files allowed. */ private static class SplitStripeCompactionRequest extends StripeCompactionRequest { private final byte[] startRow, endRow; @@ -529,11 +528,11 @@ private static class SplitStripeCompactionRequest extends StripeCompactionReques * @param startRow Left boundary of the range to compact, inclusive. * @param endRow Right boundary of the range to compact, exclusive. * @param targetCount The maximum number of stripe to compact into. - * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than - * total number of kvs, all the overflow data goes into the last stripe. + * @param targetKvs The KV count of each segment. If targetKvs*targetCount is less than total + * number of kvs, all the overflow data goes into the last stripe. */ - public SplitStripeCompactionRequest(CompactionRequestImpl request, - byte[] startRow, byte[] endRow, int targetCount, long targetKvs) { + public SplitStripeCompactionRequest(CompactionRequestImpl request, byte[] startRow, + byte[] endRow, int targetCount, long targetKvs) { super(request); this.startRow = startRow; this.endRow = endRow; @@ -541,25 +540,27 @@ public SplitStripeCompactionRequest(CompactionRequestImpl request, this.targetKvs = targetKvs; } - public SplitStripeCompactionRequest( - Collection files, byte[] startRow, byte[] endRow, long targetKvs) { + public SplitStripeCompactionRequest(Collection files, byte[] startRow, + byte[] endRow, long targetKvs) { this(files, startRow, endRow, Integer.MAX_VALUE, targetKvs); } - public SplitStripeCompactionRequest(Collection files, - byte[] startRow, byte[] endRow, int targetCount, long targetKvs) { + public SplitStripeCompactionRequest(Collection files, byte[] startRow, + byte[] endRow, int targetCount, long targetKvs) { this(new CompactionRequestImpl(files), startRow, endRow, targetCount, targetKvs); } @Override - public List execute(StripeCompactor compactor, - ThroughputController throughputController, User user) throws IOException { + public List execute(StripeCompactor compactor, ThroughputController throughputController, + User user) throws IOException { return compactor.compact(this.request, this.targetCount, this.targetKvs, this.startRow, this.endRow, this.majorRangeFromRow, this.majorRangeToRow, throughputController, user); } - /** Set major range of the compaction to the entire compaction range. - * See {@link #setMajorRange(byte[], byte[])}. */ + /** + * Set major range of the compaction to the entire compaction range. See + * {@link #setMajorRange(byte[], byte[])}. + */ public void setMajorRangeFull() { setMajorRange(this.startRow, this.endRow); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java index 060a11b41fe6..41e98d212579 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.regionserver.HStore; @@ -102,9 +101,10 @@ public StripeMultiFileWriter createWriter(InternalScanner scanner, FileDetails f }, throughputController, user); } - public List compact(CompactionRequestImpl request, final int targetCount, final long targetSize, - final byte[] left, final byte[] right, byte[] majorRangeFromRow, byte[] majorRangeToRow, - ThroughputController throughputController, User user) throws IOException { + public List compact(CompactionRequestImpl request, final int targetCount, + final long targetSize, final byte[] left, final byte[] right, byte[] majorRangeFromRow, + byte[] majorRangeToRow, ThroughputController throughputController, User user) + throws IOException { if (LOG.isDebugEnabled()) { LOG.debug( "Executing compaction with " + targetSize + " target file size, no more than " + targetCount @@ -125,8 +125,8 @@ public StripeMultiFileWriter createWriter(InternalScanner scanner, FileDetails f } @Override - protected List commitWriter(FileDetails fd, - CompactionRequestImpl request) throws IOException { + protected List commitWriter(FileDetails fd, CompactionRequestImpl request) + throws IOException { List newFiles = writer.commitWriters(fd.maxSeqId, request.isMajor(), request.getFiles()); assert !newFiles.isEmpty() : "Should have produced an empty file to preserve metadata."; return newFiles; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java index 101c9c3d9f6b..fcd5dac2ee3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/AssignRegionHandler.java @@ -80,9 +80,9 @@ private void cleanUpAndReportFailure(IOException error) throws IOException { HRegionServer rs = getServer(); rs.getRegionsInTransitionInRS().remove(regionInfo.getEncodedNameAsBytes(), Boolean.TRUE); if (!rs.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.FAILED_OPEN, - HConstants.NO_SEQNUM, openProcId, masterSystemTime, regionInfo))) { + HConstants.NO_SEQNUM, openProcId, masterSystemTime, regionInfo))) { throw new IOException( - "Failed to report failed open to master: " + regionInfo.getRegionNameAsString()); + "Failed to report failed open to master: " + regionInfo.getRegionNameAsString()); } } @@ -106,16 +106,16 @@ public void process() throws IOException { if (previous != null) { if (previous) { // The region is opening and this maybe a retry on the rpc call, it is safe to ignore it. - LOG.info("Receiving OPEN for {} which we are already trying to OPEN" + - " - ignoring this new request for this region.", regionName); + LOG.info("Receiving OPEN for {} which we are already trying to OPEN" + + " - ignoring this new request for this region.", + regionName); } else { // The region is closing. This is possible as we will update the region state to CLOSED when // calling reportRegionStateTransition, so the HMaster will think the region is offline, // before we actually close the region, as reportRegionStateTransition is part of the // closing process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.info( - "Receiving OPEN for {} which we are trying to close, try again after {}ms", + LOG.info("Receiving OPEN for {} which we are trying to close, try again after {}ms", regionName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } @@ -125,7 +125,7 @@ public void process() throws IOException { HRegion region; try { TableDescriptor htd = - tableDesc != null ? tableDesc : rs.getTableDescriptors().get(regionInfo.getTable()); + tableDesc != null ? tableDesc : rs.getTableDescriptors().get(regionInfo.getTable()); if (htd == null) { throw new IOException("Missing table descriptor for " + regionName); } @@ -170,13 +170,13 @@ public static AssignRegionHandler create(HRegionServer server, RegionInfo region EventType eventType; if (regionInfo.isMetaRegion()) { eventType = EventType.M_RS_OPEN_META; - } else if (regionInfo.getTable().isSystemTable() || - (tableDesc != null && tableDesc.getPriority() >= HConstants.ADMIN_QOS)) { - eventType = EventType.M_RS_OPEN_PRIORITY_REGION; - } else { - eventType = EventType.M_RS_OPEN_REGION; - } + } else if (regionInfo.getTable().isSystemTable() + || (tableDesc != null && tableDesc.getPriority() >= HConstants.ADMIN_QOS)) { + eventType = EventType.M_RS_OPEN_PRIORITY_REGION; + } else { + eventType = EventType.M_RS_OPEN_REGION; + } return new AssignRegionHandler(server, regionInfo, openProcId, tableDesc, masterSystemTime, - eventType); + eventType); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java index 38097bafd6e0..6a408d8fe32c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,10 +30,8 @@ public class CloseMetaHandler extends CloseRegionHandler { // Called when regionserver determines its to go down; not master orchestrated - public CloseMetaHandler(final Server server, - final RegionServerServices rsServices, - final RegionInfo regionInfo, - final boolean abort) { + public CloseMetaHandler(final Server server, final RegionServerServices rsServices, + final RegionInfo regionInfo, final boolean abort) { super(server, rsServices, regionInfo, abort, EventType.M_RS_CLOSE_META, null); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java index f9f0e91cc461..cdbd30a29ac5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,25 +31,28 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; /** * Handles closing of a region on a region server. *

      * In normal operation, we use {@link UnassignRegionHandler} closing Regions but when shutting down - * the region server and closing out Regions, we use this handler instead; it does not expect to - * be able to communicate the close back to the Master. - *

      Expects that the close *has* been registered in the hosting RegionServer before - * submitting this Handler; i.e. rss.getRegionsInTransitionInRS().putIfAbsent( - * this.regionInfo.getEncodedNameAsBytes(), Boolean.FALSE); has been called first. - * In here when done, we do the deregister.

      + * the region server and closing out Regions, we use this handler instead; it does not expect to be + * able to communicate the close back to the Master. + *

      + * Expects that the close *has* been registered in the hosting RegionServer before submitting this + * Handler; i.e. rss.getRegionsInTransitionInRS().putIfAbsent( + * this.regionInfo.getEncodedNameAsBytes(), Boolean.FALSE); has been called first. In here + * when done, we do the deregister. + *

      * @see UnassignRegionHandler */ @InterfaceAudience.Private public class CloseRegionHandler extends EventHandler { - // NOTE on priorities shutting down. There are none for close. There are some - // for open. I think that is right. On shutdown, we want the meta to close - // after the user regions have closed. What + // NOTE on priorities shutting down. There are none for close. There are some + // for open. I think that is right. On shutdown, we want the meta to close + // after the user regions have closed. What // about the case where master tells us to shutdown a catalog region and we // have a running queue of user regions to close? private static final Logger LOG = LoggerFactory.getLogger(CloseRegionHandler.class); @@ -58,7 +60,7 @@ public class CloseRegionHandler extends EventHandler { private final RegionServerServices rsServices; private final RegionInfo regionInfo; - // If true, the hosting server is aborting. Region close process is different + // If true, the hosting server is aborting. Region close process is different // when we are aborting. private final boolean abort; private ServerName destination; @@ -67,17 +69,13 @@ public class CloseRegionHandler extends EventHandler { * This method used internally by the RegionServer to close out regions. * @param abort If the regionserver is aborting. */ - public CloseRegionHandler(final Server server, - final RegionServerServices rsServices, - final RegionInfo regionInfo, final boolean abort, - ServerName destination) { - this(server, rsServices, regionInfo, abort, - EventType.M_RS_CLOSE_REGION, destination); + public CloseRegionHandler(final Server server, final RegionServerServices rsServices, + final RegionInfo regionInfo, final boolean abort, ServerName destination) { + this(server, rsServices, regionInfo, abort, EventType.M_RS_CLOSE_REGION, destination); } - protected CloseRegionHandler(final Server server, - final RegionServerServices rsServices, RegionInfo regionInfo, - boolean abort, EventType eventType, ServerName destination) { + protected CloseRegionHandler(final Server server, final RegionServerServices rsServices, + RegionInfo regionInfo, boolean abort, EventType eventType, ServerName destination) { super(server, eventType); this.server = server; this.rsServices = rsServices; @@ -95,7 +93,7 @@ public void process() throws IOException { String name = regionInfo.getEncodedName(); LOG.trace("Processing close of {}", name); // Check that this region is being served here - HRegion region = (HRegion)rsServices.getRegion(name); + HRegion region = (HRegion) rsServices.getRegion(name); try { if (region == null) { LOG.warn("Received CLOSE for region {} but currently not serving - ignoring", name); @@ -113,9 +111,9 @@ public void process() throws IOException { this.rsServices.removeRegion(region, destination); rsServices.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.CLOSED, - HConstants.NO_SEQNUM, Procedure.NO_PROC_ID, -1, regionInfo)); + HConstants.NO_SEQNUM, Procedure.NO_PROC_ID, -1, regionInfo)); - // Done! Region is closed on this RS + // Done! Region is closed on this RS LOG.debug("Closed {}", region.getRegionInfo().getRegionNameAsString()); } finally { // Clear any reference in getServer().getRegionsInTransitionInRS() on success or failure, @@ -125,8 +123,9 @@ public void process() throws IOException { } } - @Override protected void handleException(Throwable t) { - server.abort("Unrecoverable exception while closing " + - this.regionInfo.getRegionNameAsString(), t); + @Override + protected void handleException(Throwable t) { + server.abort("Unrecoverable exception while closing " + this.regionInfo.getRegionNameAsString(), + t); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java index 02ed0ef71c59..994067895321 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HandlerUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,8 +34,8 @@ private HandlerUtil() { */ public static RetryCounter getRetryCounter() { return new RetryCounterFactory( - new RetryCounter.RetryConfig().setBackoffPolicy(new RetryCounter.ExponentialBackoffPolicy()) - .setSleepInterval(100).setMaxSleepTime(30000).setMaxAttempts(Integer.MAX_VALUE) - .setTimeUnit(TimeUnit.MILLISECONDS).setJitter(0.01f)).create(); + new RetryCounter.RetryConfig().setBackoffPolicy(new RetryCounter.ExponentialBackoffPolicy()) + .setSleepInterval(100).setMaxSleepTime(30000).setMaxAttempts(Integer.MAX_VALUE) + .setTimeUnit(TimeUnit.MILLISECONDS).setJitter(0.01f)).create(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java index ca5f9e179a9a..c158e8969885 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,9 +31,8 @@ */ @InterfaceAudience.Private public class OpenMetaHandler extends OpenRegionHandler { - public OpenMetaHandler(final Server server, - final RegionServerServices rsServices, RegionInfo regionInfo, - final TableDescriptor htd, long masterSystemTime) { + public OpenMetaHandler(final Server server, final RegionServerServices rsServices, + RegionInfo regionInfo, final TableDescriptor htd, long masterSystemTime) { super(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_META); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java index 1861a2bba332..e886d28ebe1e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenPriorityRegionHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.handler; import org.apache.hadoop.hbase.Server; @@ -33,8 +32,8 @@ @InterfaceAudience.Private public class OpenPriorityRegionHandler extends OpenRegionHandler { public OpenPriorityRegionHandler(Server server, RegionServerServices rsServices, - RegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) { + RegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) { super(server, rsServices, regionInfo, htd, masterSystemTime, - EventType.M_RS_OPEN_PRIORITY_REGION); + EventType.M_RS_OPEN_PRIORITY_REGION); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java index 3ae38864ba1e..2100782a8307 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.client.RegionInfo; @@ -36,7 +34,9 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; + /** * Handles opening of a region on a region server. *

      @@ -55,15 +55,14 @@ public class OpenRegionHandler extends EventHandler { private final TableDescriptor htd; private final long masterSystemTime; - public OpenRegionHandler(final Server server, - final RegionServerServices rsServices, RegionInfo regionInfo, - TableDescriptor htd, long masterSystemTime) { + public OpenRegionHandler(final Server server, final RegionServerServices rsServices, + RegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) { this(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_REGION); } - protected OpenRegionHandler(final Server server, - final RegionServerServices rsServices, final RegionInfo regionInfo, - final TableDescriptor htd, long masterSystemTime, EventType eventType) { + protected OpenRegionHandler(final Server server, final RegionServerServices rsServices, + final RegionInfo regionInfo, final TableDescriptor htd, long masterSystemTime, + EventType eventType) { super(server, eventType); this.rsServices = rsServices; this.regionInfo = regionInfo; @@ -93,28 +92,28 @@ public void process() throws IOException { // Check that this region is not already online if (this.rsServices.getRegion(encodedName) != null) { - LOG.error("Region " + encodedName + - " was already online when we started processing the opening. " + - "Marking this new attempt as failed"); + LOG.error( + "Region " + encodedName + " was already online when we started processing the opening. " + + "Marking this new attempt as failed"); return; } // Check that we're still supposed to open the region. - // If fails, just return. Someone stole the region from under us. - if (!isRegionStillOpening()){ + // If fails, just return. Someone stole the region from under us. + if (!isRegionStillOpening()) { LOG.error("Region " + encodedName + " opening cancelled"); return; } - // Open region. After a successful open, failures in subsequent + // Open region. After a successful open, failures in subsequent // processing needs to do a close as part of cleanup. region = openRegion(); if (region == null) { return; } - if (!updateMeta(region, masterSystemTime) || this.server.isStopped() || - this.rsServices.isStopping()) { + if (!updateMeta(region, masterSystemTime) || this.server.isStopped() + || this.rsServices.isStopping()) { return; } @@ -126,20 +125,20 @@ public void process() throws IOException { this.rsServices.addRegion(region); openSuccessful = true; - // Done! Successful region open + // Done! Successful region open LOG.debug("Opened " + regionName + " on " + this.server.getServerName()); } finally { // Do all clean up here if (!openSuccessful) { doCleanUpOnFailedOpen(region); } - final Boolean current = this.rsServices.getRegionsInTransitionInRS(). - remove(this.regionInfo.getEncodedNameAsBytes()); + final Boolean current = this.rsServices.getRegionsInTransitionInRS() + .remove(this.regionInfo.getEncodedNameAsBytes()); // Let's check if we have met a race condition on open cancellation.... // A better solution would be to not have any race condition. // this.rsServices.getRegionsInTransitionInRS().remove( - // this.regionInfo.getEncodedNameAsBytes(), Boolean.TRUE); + // this.regionInfo.getEncodedNameAsBytes(), Boolean.TRUE); // would help. if (openSuccessful) { if (current == null) { // Should NEVER happen, but let's be paranoid. @@ -163,32 +162,31 @@ private void doCleanUpOnFailedOpen(HRegion region) throws IOException { } } finally { rsServices.reportRegionStateTransition(new RegionStateTransitionContext( - TransitionCode.FAILED_OPEN, HConstants.NO_SEQNUM, Procedure.NO_PROC_ID, -1, regionInfo)); + TransitionCode.FAILED_OPEN, HConstants.NO_SEQNUM, Procedure.NO_PROC_ID, -1, regionInfo)); } } /** - * Update ZK or META. This can take a while if for example the - * hbase:meta is not available -- if server hosting hbase:meta crashed and we are - * waiting on it to come back -- so run in a thread and keep updating znode - * state meantime so master doesn't timeout our region-in-transition. + * Update ZK or META. This can take a while if for example the hbase:meta is not available -- if + * server hosting hbase:meta crashed and we are waiting on it to come back -- so run in a thread + * and keep updating znode state meantime so master doesn't timeout our region-in-transition. * Caller must cleanup region if this fails. */ private boolean updateMeta(final HRegion r, long masterSystemTime) { if (this.server.isStopped() || this.rsServices.isStopping()) { return false; } - // Object we do wait/notify on. Make it boolean. If set, we're done. + // Object we do wait/notify on. Make it boolean. If set, we're done. // Else, wait. final AtomicBoolean signaller = new AtomicBoolean(false); - PostOpenDeployTasksThread t = new PostOpenDeployTasksThread(r, - this.server, this.rsServices, signaller, masterSystemTime); + PostOpenDeployTasksThread t = + new PostOpenDeployTasksThread(r, this.server, this.rsServices, signaller, masterSystemTime); t.start(); // Post open deploy task: - // meta => update meta location in ZK - // other region => update meta - while (!signaller.get() && t.isAlive() && !this.server.isStopped() && - !this.rsServices.isStopping() && isRegionStillOpening()) { + // meta => update meta location in ZK + // other region => update meta + while (!signaller.get() && t.isAlive() && !this.server.isStopped() + && !this.rsServices.isStopping() && isRegionStillOpening()) { synchronized (signaller) { try { // Wait for 10 seconds, so that server shutdown @@ -199,8 +197,8 @@ private boolean updateMeta(final HRegion r, long masterSystemTime) { } } } - // Is thread still alive? We may have left above loop because server is - // stopping or we timed out the edit. Is so, interrupt it. + // Is thread still alive? We may have left above loop because server is + // stopping or we timed out the edit. Is so, interrupt it. if (t.isAlive()) { if (!signaller.get()) { // Thread still running; interrupt @@ -210,20 +208,19 @@ private boolean updateMeta(final HRegion r, long masterSystemTime) { try { t.join(); } catch (InterruptedException ie) { - LOG.warn("Interrupted joining " + - r.getRegionInfo().getRegionNameAsString(), ie); + LOG.warn("Interrupted joining " + r.getRegionInfo().getRegionNameAsString(), ie); Thread.currentThread().interrupt(); } } - // Was there an exception opening the region? This should trigger on - // InterruptedException too. If so, we failed. + // Was there an exception opening the region? This should trigger on + // InterruptedException too. If so, we failed. return (!Thread.interrupted() && t.getException() == null); } /** - * Thread to run region post open tasks. Call {@link #getException()} after the thread finishes - * to check for exceptions running + * Thread to run region post open tasks. Call {@link #getException()} after the thread finishes to + * check for exceptions running * {@link RegionServerServices#postOpenDeployTasks(PostOpenDeployContext)} */ static class PostOpenDeployTasksThread extends Thread { @@ -251,8 +248,8 @@ public void run() { this.services.postOpenDeployTasks( new PostOpenDeployContext(region, Procedure.NO_PROC_ID, masterSystemTime)); } catch (Throwable e) { - String msg = "Exception running postOpenDeployTasks; region=" + - this.region.getRegionInfo().getEncodedName(); + String msg = "Exception running postOpenDeployTasks; region=" + + this.region.getRegionInfo().getEncodedName(); this.exception = e; if (e instanceof IOException && isRegionStillOpening(region.getRegionInfo(), services)) { server.abort(msg, e); @@ -281,28 +278,25 @@ Throwable getException() { private HRegion openRegion() { HRegion region = null; try { - // Instantiate the region. This also periodically tickles OPENING + // Instantiate the region. This also periodically tickles OPENING // state so master doesn't timeout this region in transition. - region = HRegion.openHRegion(this.regionInfo, this.htd, - this.rsServices.getWAL(this.regionInfo), - this.server.getConfiguration(), - this.rsServices, - new CancelableProgressable() { - @Override - public boolean progress() { - if (!isRegionStillOpening()) { - LOG.warn("Open region aborted since it isn't opening any more"); - return false; - } - return true; - } - }); + region = + HRegion.openHRegion(this.regionInfo, this.htd, this.rsServices.getWAL(this.regionInfo), + this.server.getConfiguration(), this.rsServices, new CancelableProgressable() { + @Override + public boolean progress() { + if (!isRegionStillOpening()) { + LOG.warn("Open region aborted since it isn't opening any more"); + return false; + } + return true; + } + }); } catch (Throwable t) { // We failed open. Our caller will see the 'null' return value // and transition the node back to FAILED_OPEN. If that fails, // we rely on the Timeout Monitor in the master to reassign. - LOG.error( - "Failed open of region=" + this.regionInfo.getRegionNameAsString(), t); + LOG.error("Failed open of region=" + this.regionInfo.getRegionNameAsString(), t); } return region; } @@ -314,8 +308,8 @@ private void cleanupFailedOpen(final HRegion region) throws IOException { } } - private static boolean isRegionStillOpening( - RegionInfo regionInfo, RegionServerServices rsServices) { + private static boolean isRegionStillOpening(RegionInfo regionInfo, + RegionServerServices rsServices) { byte[] encodedName = regionInfo.getEncodedNameAsBytes(); Boolean action = rsServices.getRegionsInTransitionInRS().get(encodedName); return Boolean.TRUE.equals(action); // true means opening for RIT diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java index ed1b2c760f9c..f7b7edebda85 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/ParallelSeekHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +19,13 @@ import java.io.IOException; import java.util.concurrent.CountDownLatch; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Handler to seek storefiles in parallel. @@ -41,8 +39,8 @@ public class ParallelSeekHandler extends EventHandler { private CountDownLatch latch; private Throwable err = null; - public ParallelSeekHandler(KeyValueScanner scanner,Cell keyValue, - long readPoint, CountDownLatch latch) { + public ParallelSeekHandler(KeyValueScanner scanner, Cell keyValue, long readPoint, + CountDownLatch latch) { super(null, EventType.RS_PARALLEL_SEEK); this.scanner = scanner; this.keyValue = keyValue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java index 829d0bf01578..9feed9022ac7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.handler; import java.io.IOException; @@ -41,12 +40,12 @@ /** * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to WAL in * secondary region replicas. This means that a secondary region replica can serve some edits from - * it's memstore that are still not flushed from primary. We do not want to allow secondary - * region's seqId to go back in time, when this secondary region is opened elsewhere after a - * crash or region move. We will trigger a flush cache in the primary region replica and wait - * for observing a complete flush cycle before marking the region readsEnabled. This handler does - * the flushing of the primary region replica and ensures that regular region opening is not - * blocked while the secondary replica is blocked on flush. + * it's memstore that are still not flushed from primary. We do not want to allow secondary region's + * seqId to go back in time, when this secondary region is opened elsewhere after a crash or region + * move. We will trigger a flush cache in the primary region replica and wait for observing a + * complete flush cycle before marking the region readsEnabled. This handler does the flushing of + * the primary region replica and ensures that regular region opening is not blocked while the + * secondary replica is blocked on flush. */ @InterfaceAudience.Private public class RegionReplicaFlushHandler extends EventHandler { @@ -97,24 +96,26 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { HConstants.DEFAULT_HBASE_CLIENT_PAUSE); int maxAttempts = getRetriesCount(connection.getConfiguration()); - RetryCounter counter = new RetryCounterFactory(maxAttempts, (int)pause).create(); + RetryCounter counter = new RetryCounterFactory(maxAttempts, (int) pause).create(); if (LOG.isDebugEnabled()) { - LOG.debug("RPC'ing to primary " + ServerRegionReplicaUtil. - getRegionInfoForDefaultReplica(region.getRegionInfo()).getRegionNameAsString() + - " from " + region.getRegionInfo().getRegionNameAsString() + " to trigger FLUSH"); + LOG.debug("RPC'ing to primary " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) + .getRegionNameAsString() + + " from " + region.getRegionInfo().getRegionNameAsString() + " to trigger FLUSH"); } - while (!region.isClosing() && !region.isClosed() - && !server.isAborted() && !server.isStopped()) { + while (!region.isClosing() && !region.isClosed() && !server.isAborted() + && !server.isStopped()) { // TODO: flushRegion() is a blocking call waiting for the flush to complete. Ideally we // do not have to wait for the whole flush here, just initiate it. FlushRegionResponse response; try { response = FutureUtils.get(connection.flush(ServerRegionReplicaUtil - .getRegionInfoForDefaultReplica(region.getRegionInfo()).getRegionName(), true)); + .getRegionInfoForDefaultReplica(region.getRegionInfo()).getRegionName(), + true)); } catch (IOException e) { if (e instanceof TableNotFoundException || FutureUtils - .get(connection.getAdmin().isTableDisabled(region.getRegionInfo().getTable()))) { + .get(connection.getAdmin().isTableDisabled(region.getRegionInfo().getTable()))) { return; } if (!counter.shouldRetry()) { @@ -127,7 +128,7 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { // frequent... LOG.debug("Failed to trigger a flush of primary region replica {} of region {}, retry={}", ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) - .getRegionNameAsString(), + .getRegionNameAsString(), region.getRegionInfo().getRegionNameAsString(), counter.getAttemptTimes(), e); try { counter.sleepUntilNextRetry(); @@ -141,11 +142,11 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { // then we have to wait for seeing the flush entry. All reads will be rejected until we see // a complete flush cycle or replay a region open event if (LOG.isDebugEnabled()) { - LOG.debug("Triggered flush of primary region replica " + - ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) - .getRegionNameAsString() + - " for " + region.getRegionInfo().getEncodedName() + - "; now waiting and blocking reads until completes a full flush cycle"); + LOG.debug("Triggered flush of primary region replica " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) + .getRegionNameAsString() + + " for " + region.getRegionInfo().getEncodedName() + + "; now waiting and blocking reads until completes a full flush cycle"); } region.setReadsEnabled(true); break; @@ -153,10 +154,11 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { if (response.hasWroteFlushWalMarker()) { if (response.getWroteFlushWalMarker()) { if (LOG.isDebugEnabled()) { - LOG.debug("Triggered empty flush marker (memstore empty) on primary region replica " + - ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()). - getRegionNameAsString() + " for " + region.getRegionInfo().getEncodedName() + - "; now waiting and blocking reads until observing a flush marker"); + LOG.debug("Triggered empty flush marker (memstore empty) on primary region replica " + + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) + .getRegionNameAsString() + + " for " + region.getRegionInfo().getEncodedName() + + "; now waiting and blocking reads until observing a flush marker"); } region.setReadsEnabled(true); break; @@ -164,24 +166,23 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { // somehow we were not able to get the primary to write the flush request. It may be // closing or already flushing. Retry flush again after some sleep. if (!counter.shouldRetry()) { - throw new IOException("Cannot cause primary to flush or drop a wal marker after " + - counter.getAttemptTimes() + " retries. Failing opening of this region replica " + - region.getRegionInfo().getRegionNameAsString()); + throw new IOException("Cannot cause primary to flush or drop a wal marker after " + + counter.getAttemptTimes() + " retries. Failing opening of this region replica " + + region.getRegionInfo().getRegionNameAsString()); } else { LOG.warn( - "Cannot cause primary replica {} to flush or drop a wal marker " + - "for region replica {}, retry={}", + "Cannot cause primary replica {} to flush or drop a wal marker " + + "for region replica {}, retry={}", ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) - .getRegionNameAsString(), + .getRegionNameAsString(), region.getRegionInfo().getRegionNameAsString(), counter.getAttemptTimes()); } } } else { // nothing to do. Are we dealing with an old server? - LOG.warn( - "Was not able to trigger a flush from primary region due to old server version? " + - "Continuing to open the secondary region replica: " + - region.getRegionInfo().getRegionNameAsString()); + LOG.warn("Was not able to trigger a flush from primary region due to old server version? " + + "Continuing to open the secondary region replica: " + + region.getRegionInfo().getRegionNameAsString()); break; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java index 2ac55ec48f62..dca6766f6ada 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java @@ -85,12 +85,14 @@ public void process() throws IOException { // reportRegionStateTransition, so the HMaster will think the region is online, before we // actually open the region, as reportRegionStateTransition is part of the opening process. long backoff = retryCounter.getBackoffTimeAndIncrementAttempts(); - LOG.warn("Received CLOSE for {} which we are already " + - "trying to OPEN; try again after {}ms", encodedName, backoff); + LOG.warn( + "Received CLOSE for {} which we are already " + "trying to OPEN; try again after {}ms", + encodedName, backoff); rs.getExecutorService().delayedSubmit(this, backoff, TimeUnit.MILLISECONDS); } else { - LOG.info("Received CLOSE for {} which we are already trying to CLOSE," + - " but not completed yet", encodedName); + LOG.info( + "Received CLOSE for {} which we are already trying to CLOSE," + " but not completed yet", + encodedName); } return; } @@ -120,9 +122,8 @@ public void process() throws IOException { } rs.removeRegion(region, destination); - if (!rs.reportRegionStateTransition( - new RegionStateTransitionContext(TransitionCode.CLOSED, HConstants.NO_SEQNUM, closeProcId, - -1, region.getRegionInfo()))) { + if (!rs.reportRegionStateTransition(new RegionStateTransitionContext(TransitionCode.CLOSED, + HConstants.NO_SEQNUM, closeProcId, -1, region.getRegionInfo()))) { throw new IOException("Failed to report close to master: " + regionName); } // Cache the close region procedure id after report region transition succeed. @@ -146,9 +147,9 @@ public static UnassignRegionHandler create(HRegionServer server, String encodedN // if we put the handler into a wrong executor. Region region = server.getRegion(encodedName); EventType eventType = - region != null && region.getRegionInfo().isMetaRegion() ? EventType.M_RS_CLOSE_META - : EventType.M_RS_CLOSE_REGION; + region != null && region.getRegionInfo().isMetaRegion() ? EventType.M_RS_CLOSE_META + : EventType.M_RS_CLOSE_REGION; return new UnassignRegionHandler(server, encodedName, closeProcId, abort, destination, - eventType); + eventType); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java index ffdade15372d..a45d68116cc6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +19,6 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; @@ -35,13 +30,16 @@ import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Handles log splitting a wal - * Used by the zk-based distributed log splitting. Created by ZKSplitLogWorkerCoordination. + * Handles log splitting a wal Used by the zk-based distributed log splitting. Created by + * ZKSplitLogWorkerCoordination. * @deprecated since 2.4.0 and in 3.0.0, to be removed in 4.0.0, replaced by procedure-based - * distributed WAL splitter, see SplitWALManager - */ + * distributed WAL splitter, see SplitWALManager + */ @Deprecated @InterfaceAudience.Private public class WALSplitterHandler extends EventHandler { @@ -53,7 +51,6 @@ public class WALSplitterHandler extends EventHandler { private final SplitLogWorkerCoordination.SplitTaskDetails splitTaskDetails; private final SplitLogWorkerCoordination coordination; - public WALSplitterHandler(final Server server, SplitLogWorkerCoordination coordination, SplitLogWorkerCoordination.SplitTaskDetails splitDetails, CancelableProgressable reporter, AtomicInteger inProgressTasks, TaskExecutor splitTaskExecutor) { @@ -74,31 +71,31 @@ public void process() throws IOException { try { status = this.splitTaskExecutor.exec(splitTaskDetails.getWALFile(), reporter); switch (status) { - case DONE: - coordination.endTask(new SplitLogTask.Done(this.serverName), - SplitLogCounters.tot_wkr_task_done, splitTaskDetails); - break; - case PREEMPTED: - SplitLogCounters.tot_wkr_preempt_task.increment(); - LOG.warn("task execution preempted " + splitTaskDetails.getWALFile()); - break; - case ERR: - if (server != null && !server.isStopped()) { - coordination.endTask(new SplitLogTask.Err(this.serverName), - SplitLogCounters.tot_wkr_task_err, splitTaskDetails); + case DONE: + coordination.endTask(new SplitLogTask.Done(this.serverName), + SplitLogCounters.tot_wkr_task_done, splitTaskDetails); + break; + case PREEMPTED: + SplitLogCounters.tot_wkr_preempt_task.increment(); + LOG.warn("task execution preempted " + splitTaskDetails.getWALFile()); + break; + case ERR: + if (server != null && !server.isStopped()) { + coordination.endTask(new SplitLogTask.Err(this.serverName), + SplitLogCounters.tot_wkr_task_err, splitTaskDetails); + break; + } + // if the RS is exiting then there is probably a tons of stuff + // that can go wrong. Resign instead of signaling error. + // $FALL-THROUGH$ + case RESIGNED: + if (server != null && server.isStopped()) { + LOG.info("task execution interrupted because worker is exiting " + + splitTaskDetails.toString()); + } + coordination.endTask(new SplitLogTask.Resigned(this.serverName), + SplitLogCounters.tot_wkr_task_resigned, splitTaskDetails); break; - } - // if the RS is exiting then there is probably a tons of stuff - // that can go wrong. Resign instead of signaling error. - //$FALL-THROUGH$ - case RESIGNED: - if (server != null && server.isStopped()) { - LOG.info("task execution interrupted because worker is exiting " - + splitTaskDetails.toString()); - } - coordination.endTask(new SplitLogTask.Resigned(this.serverName), - SplitLogCounters.tot_wkr_task_resigned, splitTaskDetails); - break; } } finally { LOG.info("Worker " + serverName + " done with task " + splitTaskDetails.toString() + " in " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java index cc48d9ef18a4..76b3b0139af9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSDumpServlet.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,14 +38,12 @@ @InterfaceAudience.Private public class RSDumpServlet extends StateDumpServlet { private static final long serialVersionUID = 1L; - private static final String LINE = - "==========================================================="; + private static final String LINE = "==========================================================="; @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws IOException { - HRegionServer hrs = (HRegionServer)getServletContext().getAttribute( - HRegionServer.REGIONSERVER); + public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { + HRegionServer hrs = + (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER); assert hrs != null : "No RS in context!"; response.setContentType("text/plain"); @@ -60,8 +57,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) OutputStream os = response.getOutputStream(); try (PrintWriter out = new PrintWriter(os)) { - out.println("RegionServer status for " + hrs.getServerName() - + " as of " + new Date()); + out.println("RegionServer status for " + hrs.getServerName() + " as of " + new Date()); out.println("\n\nVersion Info:"); out.println(LINE); @@ -128,29 +124,28 @@ public static void dumpQueue(HRegionServer hrs, PrintWriter out) { } } - public static void dumpCallQueues(HRegionServer hrs, PrintWriter out) { CallQueueInfo callQueueInfo = hrs.getRpcServer().getScheduler().getCallQueueInfo(); - for(String queueName: callQueueInfo.getCallQueueNames()) { + for (String queueName : callQueueInfo.getCallQueueNames()) { out.println("\nQueue Name: " + queueName); long totalCallCount = 0L, totalCallSize = 0L; - for (String methodName: callQueueInfo.getCalledMethodNames(queueName)) { + for (String methodName : callQueueInfo.getCalledMethodNames(queueName)) { long thisMethodCount, thisMethodSize; thisMethodCount = callQueueInfo.getCallMethodCount(queueName, methodName); thisMethodSize = callQueueInfo.getCallMethodSize(queueName, methodName); - out.println("Method in call: "+methodName); - out.println("Total call count for method: "+thisMethodCount); - out.println("Total call size for method (bytes): "+thisMethodSize); + out.println("Method in call: " + methodName); + out.println("Total call count for method: " + thisMethodCount); + out.println("Total call size for method (bytes): " + thisMethodSize); totalCallCount += thisMethodCount; totalCallSize += thisMethodSize; } - out.println("Total call count for queue: "+totalCallCount); - out.println("Total call size for queue (bytes): "+totalCallSize); + out.println("Total call count for queue: " + totalCallCount); + out.println("Total call size for queue (bytes): " + totalCallSize); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java index f2d8d48865ce..4582014345ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/http/RSStatusServlet.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,15 +18,13 @@ package org.apache.hadoop.hbase.regionserver.http; import java.io.IOException; - import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.yetus.audience.InterfaceAudience; - import org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class RSStatusServlet extends HttpServlet { @@ -35,8 +32,9 @@ public class RSStatusServlet extends HttpServlet { @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) - throws ServletException, IOException { - HRegionServer hrs = (HRegionServer)getServletContext().getAttribute(HRegionServer.REGIONSERVER); + throws ServletException, IOException { + HRegionServer hrs = + (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER); assert hrs != null : "No RS in context!"; resp.setContentType("text/html"); @@ -48,14 +46,10 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) } RSStatusTmpl tmpl = new RSStatusTmpl(); - if (req.getParameter("format") != null) - tmpl.setFormat(req.getParameter("format")); - if (req.getParameter("filter") != null) - tmpl.setFilter(req.getParameter("filter")); - if (req.getParameter("bcn") != null) - tmpl.setBcn(req.getParameter("bcn")); - if (req.getParameter("bcv") != null) - tmpl.setBcv(req.getParameter("bcv")); + if (req.getParameter("format") != null) tmpl.setFormat(req.getParameter("format")); + if (req.getParameter("filter") != null) tmpl.setFilter(req.getParameter("filter")); + if (req.getParameter("bcn") != null) tmpl.setBcn(req.getParameter("bcn")); + if (req.getParameter("bcv") != null) tmpl.setBcv(req.getParameter("bcv")); tmpl.render(resp.getWriter(), hrs); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java index 35726ab0f2e4..5f022e0409f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnCount.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java index bd6cb20d8293..0254e546c79a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ColumnTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,34 +18,32 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; +import org.apache.yetus.audience.InterfaceAudience; /** - * Implementing classes of this interface will be used for the tracking - * and enforcement of columns and numbers of versions and timeToLive during - * the course of a Get or Scan operation. + * Implementing classes of this interface will be used for the tracking and enforcement of columns + * and numbers of versions and timeToLive during the course of a Get or Scan operation. *

      * Currently there are two different types of Store/Family-level queries. - *

      • {@link ExplicitColumnTracker} is used when the query specifies - * one or more column qualifiers to return in the family.
      • - *
      • {@link ScanWildcardColumnTracker} is used when no columns are - * explicitly specified.
      • + *
          + *
        • {@link ExplicitColumnTracker} is used when the query specifies one or more column qualifiers + * to return in the family.
        • + *
        • {@link ScanWildcardColumnTracker} is used when no columns are explicitly specified.
        • *
        *

        * This class is utilized by {@link ScanQueryMatcher} mainly through two methods: - *

        • {@link #checkColumn} is called when a Put satisfies all other - * conditions of the query.
        • - *
        • {@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher - * believes that the current column should be skipped (by timestamp, filter etc.)
        • + *
            + *
          • {@link #checkColumn} is called when a Put satisfies all other conditions of the query.
          • + *
          • {@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher believes that the current + * column should be skipped (by timestamp, filter etc.)
          • *
          *

          * These two methods returns a - * {@link org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode} - * to define what action should be taken. + * {@link org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode} to define + * what action should be taken. *

          * This class is NOT thread-safe as queries are never multi-threaded */ @@ -56,10 +53,9 @@ public interface ColumnTracker extends ShipperListener { /** * Checks if the column is present in the list of requested columns by returning the match code * instance. It does not check against the number of versions for the columns asked for. To do the - * version check, one has to call {@link #checkVersions(Cell, long, byte, boolean)} - * method based on the return type (INCLUDE) of this method. The values that can be returned by - * this method are {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and - * {@link MatchCode#SEEK_NEXT_ROW}. + * version check, one has to call {@link #checkVersions(Cell, long, byte, boolean)} method based + * on the return type (INCLUDE) of this method. The values that can be returned by this method are + * {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and {@link MatchCode#SEEK_NEXT_ROW}. * @param cell a cell with the column to match against * @param type The type of the Cell * @return The match code instance. @@ -71,12 +67,11 @@ public interface ColumnTracker extends ShipperListener { /** * Keeps track of the number of versions for the columns asked for. It assumes that the user has * already checked if the cell needs to be included by calling the - * {@link #checkColumn(Cell, byte)} method. The enum values returned by this method - * are {@link MatchCode#SKIP}, {@link MatchCode#INCLUDE}, - * {@link MatchCode#INCLUDE_AND_SEEK_NEXT_COL} and {@link MatchCode#INCLUDE_AND_SEEK_NEXT_ROW}. - * Implementations which include all the columns could just return {@link MatchCode#INCLUDE} in - * the {@link #checkColumn(Cell, byte)} method and perform all the operations in this - * checkVersions method. + * {@link #checkColumn(Cell, byte)} method. The enum values returned by this method are + * {@link MatchCode#SKIP}, {@link MatchCode#INCLUDE}, {@link MatchCode#INCLUDE_AND_SEEK_NEXT_COL} + * and {@link MatchCode#INCLUDE_AND_SEEK_NEXT_ROW}. Implementations which include all the columns + * could just return {@link MatchCode#INCLUDE} in the {@link #checkColumn(Cell, byte)} method and + * perform all the operations in this checkVersions method. * @param cell a cell with the column to match against * @param timestamp The timestamp of the cell. * @param type the type of the key value (Put/Delete) @@ -88,26 +83,23 @@ public interface ColumnTracker extends ShipperListener { */ ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, boolean ignoreCount) throws IOException; + /** * Resets the Matcher */ void reset(); /** - * * @return true when done. */ boolean done(); /** - * Used by matcher and scan/get to get a hint of the next column - * to seek to after checkColumn() returns SKIP. Returns the next interesting - * column we want, or NULL there is none (wildcard scanner). - * - * Implementations aren't required to return anything useful unless the most recent - * call was to checkColumn() and the return code was SKIP. This is pretty implementation - * detail-y, but optimizations are like that. - * + * Used by matcher and scan/get to get a hint of the next column to seek to after checkColumn() + * returns SKIP. Returns the next interesting column we want, or NULL there is none (wildcard + * scanner). Implementations aren't required to return anything useful unless the most recent call + * was to checkColumn() and the return code was SKIP. This is pretty implementation detail-y, but + * optimizations are like that. * @return null, or a ColumnCount that we should seek to */ ColumnCount getColumnHint(); @@ -119,9 +111,8 @@ ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, MatchCode getNextRowOrNextColumn(Cell cell); /** - * Give the tracker a chance to declare it's done based on only the timestamp - * to allow an early out. - * + * Give the tracker a chance to declare it's done based on only the timestamp to allow an early + * out. * @param timestamp * @return true to early out based on timestamp. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java index f9fb6029db31..0f7e39fdc88a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,15 +20,14 @@ import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for compaction. @@ -108,8 +107,8 @@ public static CompactionScanQueryMatcher create(ScanInfo scanInfo, ScanType scan long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now, byte[] dropDeletesFromRow, byte[] dropDeletesToRow, RegionCoprocessorHost regionCoprocessorHost) throws IOException { - Pair trackers = getTrackers(regionCoprocessorHost, null, - scanInfo,oldestUnexpiredTS, null); + Pair trackers = + getTrackers(regionCoprocessorHost, null, scanInfo, oldestUnexpiredTS, null); DeleteTracker deleteTracker = trackers.getFirst(); ColumnTracker columnTracker = trackers.getSecond(); if (dropDeletesFromRow == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java index be9c51eca857..e70b7bbb6371 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hbase.regionserver.querymatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.regionserver.ShipperListener; +import org.apache.yetus.audience.InterfaceAudience; /** * This interface is used for the tracking and enforcement of Deletes during the course of a Get or @@ -80,9 +80,9 @@ enum DeleteResult { FAMILY_VERSION_DELETED, // The Cell is deleted by a delete family version. COLUMN_DELETED, // The Cell is deleted by a delete column. VERSION_DELETED, // The Cell is deleted by a version delete. - NOT_DELETED, - VERSION_MASKED // The Cell is masked by max number of versions which is considered as - // deleted in strong semantics of versions(See MvccTracker) + NOT_DELETED, VERSION_MASKED // The Cell is masked by max number of versions which is considered + // as + // deleted in strong semantics of versions(See MvccTracker) } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java index c9899d510416..b397b369668d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +19,8 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * A query matcher for compaction which can drop delete markers. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java index c0f13c0ac554..c26db09c55f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +19,12 @@ import java.io.IOException; import java.util.NavigableSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; +import org.apache.yetus.audience.InterfaceAudience; /** * This class is used for the tracking and enforcement of columns and numbers of versions during the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java index a486bec4377e..1a53b8bcb667 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,16 +18,15 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * A compaction query matcher that always return INCLUDE and drops nothing. */ @InterfaceAudience.Private -public class IncludeAllCompactionQueryMatcher extends MinorCompactionScanQueryMatcher{ +public class IncludeAllCompactionQueryMatcher extends MinorCompactionScanQueryMatcher { public IncludeAllCompactionQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes, ColumnTracker columns, long readPointToUse, long oldestUnexpiredTS, long now) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java index 2f02d77d0f5b..98b65e0df99d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for major compaction. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java index b3815dae1e73..ab7158885f88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/MinorCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for minor compaction. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java index 3c9a541d0b38..404437b9105d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -26,18 +26,17 @@ import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue.Type; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; +import org.apache.yetus.audience.InterfaceAudience; /** - * A tracker both implementing ColumnTracker and DeleteTracker, used for mvcc-sensitive scanning. - * We should make sure in one QueryMatcher the ColumnTracker and DeleteTracker is the same instance. + * A tracker both implementing ColumnTracker and DeleteTracker, used for mvcc-sensitive scanning. We + * should make sure in one QueryMatcher the ColumnTracker and DeleteTracker is the same instance. */ @InterfaceAudience.Private public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { @@ -71,13 +70,12 @@ public class NewVersionBehaviorTracker implements ColumnTracker, DeleteTracker { /** * Note maxVersion and minVersion must set according to cf's conf, not user's scan parameter. - * - * @param columns columns specified user in query - * @param comparartor the cell comparator - * @param minVersion The minimum number of versions to keep(used when TTL is set). - * @param maxVersion The maximum number of versions in CF's conf + * @param columns columns specified user in query + * @param comparartor the cell comparator + * @param minVersion The minimum number of versions to keep(used when TTL is set). + * @param maxVersion The maximum number of versions in CF's conf * @param resultMaxVersions maximum versions to return per column, which may be different from - * maxVersion + * maxVersion * @param oldestUnexpiredTS the oldest timestamp we are interested in, based on TTL */ public NewVersionBehaviorTracker(NavigableSet columns, CellComparator comparartor, @@ -103,8 +101,8 @@ public void beforeShipped() throws IOException { } /** - * A data structure which contains infos we need that happens before this node's mvcc and - * after the previous node's mvcc. A node means there is a version deletion at the mvcc and ts. + * A data structure which contains infos we need that happens before this node's mvcc and after + * the previous node's mvcc. A node means there is a version deletion at the mvcc and ts. */ protected class DeleteVersionsNode { public long ts; @@ -158,11 +156,10 @@ protected DeleteVersionsNode getDeepCopy() { } /** - * Reset the map if it is different with the last Cell. - * Save the cq array/offset/length for next Cell. - * - * @return If this put has duplicate ts with last cell, return the mvcc of last cell. - * Else return MAX_VALUE. + * Reset the map if it is different with the last Cell. Save the cq array/offset/length for next + * Cell. + * @return If this put has duplicate ts with last cell, return the mvcc of last cell. Else return + * MAX_VALUE. */ protected long prepare(Cell cell) { boolean matchCq = @@ -196,25 +193,25 @@ public void add(Cell cell) { prepare(cell); byte type = cell.getTypeByte(); switch (Type.codeToType(type)) { - // By the order of seen. We put null cq at first. - case DeleteFamily: // Delete all versions of all columns of the specified family - delFamMap.put(cell.getSequenceId(), + // By the order of seen. We put null cq at first. + case DeleteFamily: // Delete all versions of all columns of the specified family + delFamMap.put(cell.getSequenceId(), new DeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId())); - break; - case DeleteFamilyVersion: // Delete all columns of the specified family and specified version - delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; - - // These two kinds of markers are mix with Puts. - case DeleteColumn: // Delete all versions of the specified column - delColMap.put(cell.getSequenceId(), + break; + case DeleteFamilyVersion: // Delete all columns of the specified family and specified version + delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; + + // These two kinds of markers are mix with Puts. + case DeleteColumn: // Delete all versions of the specified column + delColMap.put(cell.getSequenceId(), new DeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId())); - break; - case Delete: // Delete the specified version of the specified column. - delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; - default: - throw new AssertionError("Unknown delete marker type for " + cell); + break; + case Delete: // Delete the specified version of the specified column. + delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; + default: + throw new AssertionError("Unknown delete marker type for " + cell); } } @@ -238,9 +235,8 @@ public DeleteResult isDeleted(Cell cell) { deleteMvcc = tail.first(); } } - SortedMap> subMap = - node.mvccCountingMap - .subMap(cell.getSequenceId(), true, Math.min(duplicateMvcc, deleteMvcc), true); + SortedMap> subMap = node.mvccCountingMap.subMap(cell.getSequenceId(), + true, Math.min(duplicateMvcc, deleteMvcc), true); for (Map.Entry> seg : subMap.entrySet()) { if (seg.getValue().size() >= maxVersions) { return DeleteResult.VERSION_MASKED; @@ -272,17 +268,17 @@ public void update() { // ignore } - //ColumnTracker + // ColumnTracker @Override public MatchCode checkColumn(Cell cell, byte type) throws IOException { if (columns == null) { - return MatchCode.INCLUDE; + return MatchCode.INCLUDE; } while (!done()) { - int c = CellUtil.compareQualifiers(cell, - columns[columnIndex], 0, columns[columnIndex].length); + int c = + CellUtil.compareQualifiers(cell, columns[columnIndex], 0, columns[columnIndex].length); if (c < 0) { return MatchCode.SEEK_NEXT_COL; } @@ -299,8 +295,8 @@ public MatchCode checkColumn(Cell cell, byte type) throws IOException { } @Override - public MatchCode checkVersions(Cell cell, long timestamp, byte type, - boolean ignoreCount) throws IOException { + public MatchCode checkVersions(Cell cell, long timestamp, byte type, boolean ignoreCount) + throws IOException { assert !PrivateCellUtil.isDelete(type); // We drop old version in #isDeleted, so here we won't SKIP because of versioning. But we should // consider TTL. @@ -344,7 +340,7 @@ public void reset() { resetInternal(); } - protected void resetInternal(){ + protected void resetInternal() { delFamMap.put(Long.MAX_VALUE, new DeleteVersionsNode()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java index c755ff5c4556..4134ed213636 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,13 +18,12 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for normal user scan. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java index ed9ba58c9901..5a86ea265c6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for raw scan. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java index 26da698f4774..6ccefb3406d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; import java.util.SortedSet; import java.util.TreeSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * This class is responsible for the tracking and enforcement of Deletes during the course of a Scan @@ -133,8 +131,8 @@ public DeleteResult isDeleted(Cell cell) { deleteCell = null; } else { throw new IllegalStateException("isDelete failed: deleteBuffer=" - + Bytes.toStringBinary(deleteCell.getQualifierArray(), - deleteCell.getQualifierOffset(), deleteCell.getQualifierLength()) + + Bytes.toStringBinary(deleteCell.getQualifierArray(), deleteCell.getQualifierOffset(), + deleteCell.getQualifierLength()) + ", qualifier=" + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java index 5833d10877e0..a22b9603bcc4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Iterator; import java.util.NavigableSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; @@ -226,7 +225,6 @@ protected final MatchCode checkDeleted(DeleteTracker deletes, Cell cell) { } } - /** * Determines if the caller should do one of several things: *

            @@ -301,7 +299,8 @@ public Cell getKeyForNextColumn(Cell cell) { if (nextKey != cell) { return nextKey; } - // The cell is at the end of row/family/qualifier, so it is impossible to find any DeleteFamily cells. + // The cell is at the end of row/family/qualifier, so it is impossible to find any + // DeleteFamily cells. // Let us seek to next column. } ColumnCount nextColumn = columns.getColumnHint(); @@ -319,8 +318,8 @@ public Cell getKeyForNextColumn(Cell cell) { * @return result of the compare between the indexed key and the key portion of the passed cell */ public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) { - return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, null, 0, - 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, + null, 0, 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); } /** @@ -331,8 +330,8 @@ public int compareKeyForNextRow(Cell nextIndexed, Cell currentCell) { public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) { ColumnCount nextColumn = columns.getColumnHint(); if (nextColumn == null) { - return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, null, - 0, 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, + null, 0, 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); } else { return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, currentCell.getFamilyOffset(), currentCell.getFamilyLength(), nextColumn.getBuffer(), @@ -406,7 +405,7 @@ protected static Pair getTrackers(RegionCoprocesso oldestUnexpiredTS, scanInfo.getComparator()); } else { columnTracker = new ExplicitColumnTracker(columns, scanInfo.getMinVersions(), - maxVersionToCheck, oldestUnexpiredTS); + maxVersionToCheck, oldestUnexpiredTS); } return new Pair<>(deleteTracker, columnTracker); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java index f2ad1e6b87c9..53027c43d35b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Keeps track of the columns for a scan if they are not explicitly specified @@ -50,6 +48,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { private long oldestStamp; private final CellComparator comparator; + /** * Return maxVersions of every row. * @param minVersion Minimum number of versions to keep @@ -57,8 +56,8 @@ public class ScanWildcardColumnTracker implements ColumnTracker { * @param oldestUnexpiredTS oldest timestamp that has not expired according to the TTL. * @param comparator used to compare the qualifier of cell */ - public ScanWildcardColumnTracker(int minVersion, int maxVersion, - long oldestUnexpiredTS, CellComparator comparator) { + public ScanWildcardColumnTracker(int minVersion, int maxVersion, long oldestUnexpiredTS, + CellComparator comparator) { this.maxVersions = maxVersion; this.minVersions = minVersion; this.oldestStamp = oldestUnexpiredTS; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java index 763735e10786..3533018f0431 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,10 @@ package org.apache.hadoop.hbase.regionserver.querymatcher; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for stripe compaction if range drop deletes is used. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java index cc994466b332..0157670f6124 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +19,10 @@ import java.io.IOException; import java.util.NavigableSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.Filter.ReturnCode; @@ -32,6 +30,7 @@ import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; /** * Query matcher for user scan. @@ -74,9 +73,8 @@ protected UserScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker colum this.hasNullColumn = hasNullColumn; this.filter = scan.getFilter(); if (this.filter != null) { - this.versionsAfterFilter = - scan.isRaw() ? scan.getMaxVersions() : Math.min(scan.getMaxVersions(), - scanInfo.getMaxVersions()); + this.versionsAfterFilter = scan.isRaw() ? scan.getMaxVersions() + : Math.min(scan.getMaxVersions(), scanInfo.getMaxVersions()); } else { this.versionsAfterFilter = 0; } @@ -152,8 +150,8 @@ protected final MatchCode matchColumn(Cell cell, long timestamp, byte typeByte) break; } - return filter == null ? matchCode : mergeFilterResponse(cell, matchCode, - filter.filterCell(cell)); + return filter == null ? matchCode + : mergeFilterResponse(cell, matchCode, filter.filterCell(cell)); } /** @@ -280,8 +278,8 @@ public static UserScanQueryMatcher create(Scan scan, ScanInfo scanInfo, RegionCoprocessorHost regionCoprocessorHost) throws IOException { boolean hasNullColumn = !(columns != null && columns.size() != 0 && columns.first().length != 0); - Pair trackers = getTrackers(regionCoprocessorHost, columns, - scanInfo, oldestUnexpiredTS, scan); + Pair trackers = + getTrackers(regionCoprocessorHost, columns, scanInfo, oldestUnexpiredTS, scan); DeleteTracker deleteTracker = trackers.getFirst(); ColumnTracker columnTracker = trackers.getSecond(); if (scan.isRaw()) { @@ -289,7 +287,7 @@ public static UserScanQueryMatcher create(Scan scan, ScanInfo scanInfo, oldestUnexpiredTS, now); } else { return NormalUserScanQueryMatcher.create(scan, scanInfo, columnTracker, deleteTracker, - hasNullColumn, oldestUnexpiredTS, now); + hasNullColumn, oldestUnexpiredTS, now); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationBufferManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationBufferManager.java index bda3cb42b33b..119992d92afa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationBufferManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationBufferManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,7 +57,7 @@ public class RegionReplicationBufferManager { public static final long MAX_PENDING_SIZE_DEFAULT = 100L * 1024 * 1024; public static final String SOFT_LIMIT_PERCENTAGE = - "hbase.region.read-replica.sink.max-pending-size.soft-limit-percentage"; + "hbase.region.read-replica.sink.max-pending-size.soft-limit-percentage"; public static final float SOFT_LIMIT_PERCENTAGE_DEFAULT = 0.8f; @@ -76,11 +76,12 @@ public RegionReplicationBufferManager(RegionServerServices rsServices) { Configuration conf = rsServices.getConfiguration(); this.maxPendingSize = conf.getLong(MAX_PENDING_SIZE, MAX_PENDING_SIZE_DEFAULT); this.softMaxPendingSize = - (long) (conf.getFloat(SOFT_LIMIT_PERCENTAGE, SOFT_LIMIT_PERCENTAGE_DEFAULT) * maxPendingSize); + (long) (conf.getFloat(SOFT_LIMIT_PERCENTAGE, SOFT_LIMIT_PERCENTAGE_DEFAULT) + * maxPendingSize); this.executor = new ThreadPoolExecutor( - 1, 1, 1, TimeUnit.SECONDS, new SynchronousQueue<>(), new ThreadFactoryBuilder() - .setDaemon(true).setNameFormat("Region-Replication-Flusher-%d").build(), - (r, e) -> LOG.debug("A flush task is ongoing, drop the new scheduled one")); + 1, 1, 1, TimeUnit.SECONDS, new SynchronousQueue<>(), new ThreadFactoryBuilder() + .setDaemon(true).setNameFormat("Region-Replication-Flusher-%d").build(), + (r, e) -> LOG.debug("A flush task is ongoing, drop the new scheduled one")); executor.allowCoreThreadTimeOut(true); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationFlushRequester.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationFlushRequester.java index 34313241d1f6..7cce70efe094 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationFlushRequester.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationFlushRequester.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ class RegionReplicationFlushRequester { * The minimum interval between two flush requests */ public static final String MIN_INTERVAL_SECS = - "hbase.region.read-replica.sink.flush.min-interval.secs"; + "hbase.region.read-replica.sink.flush.min-interval.secs"; public static final int MIN_INTERVAL_SECS_DEFAULT = 30; @@ -79,10 +79,12 @@ private static HashedWheelTimer getTimer() { if (timer != null) { return timer; } - timer = new HashedWheelTimer( - new ThreadFactoryBuilder().setNameFormat("RegionReplicationFlushRequester-Timer-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), - 500, TimeUnit.MILLISECONDS); + timer = + new HashedWheelTimer( + new ThreadFactoryBuilder() + .setNameFormat("RegionReplicationFlushRequester-Timer-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + 500, TimeUnit.MILLISECONDS); TIMER = timer; } return timer; @@ -122,7 +124,7 @@ synchronized void requestFlush(long sequenceId) { HashedWheelTimer timer = getTimer(); pendingFlushRequestSequenceId = sequenceId; pendingFlushRequest = - timer.newTimeout(this::flush, minIntervalSecs - elapsedSecs, TimeUnit.SECONDS); + timer.newTimeout(this::flush, minIntervalSecs - elapsedSecs, TimeUnit.SECONDS); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationSink.java index cd5d30707d9e..520397d485a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/regionreplication/RegionReplicationSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,7 +73,7 @@ public class RegionReplicationSink { public static final long RPC_TIMEOUT_MS_DEFAULT = 1000; public static final String OPERATION_TIMEOUT_MS = - "hbase.region.read-replica.sink.operation.timeout.ms"; + "hbase.region.read-replica.sink.operation.timeout.ms"; public static final long OPERATION_TIMEOUT_MS_DEFAULT = 5000; @@ -81,12 +81,12 @@ public class RegionReplicationSink { // refreshStoreFiles call at remote side so it will likely to spend more time. And also a meta // edit is more important for fixing inconsistent state so it worth to wait for more time. public static final String META_EDIT_RPC_TIMEOUT_MS = - "hbase.region.read-replica.sink.meta-edit.rpc.timeout.ms"; + "hbase.region.read-replica.sink.meta-edit.rpc.timeout.ms"; public static final long META_EDIT_RPC_TIMEOUT_MS_DEFAULT = 15000; public static final String META_EDIT_OPERATION_TIMEOUT_MS = - "hbase.region.read-replica.sink.meta-edit.operation.timeout.ms"; + "hbase.region.read-replica.sink.meta-edit.operation.timeout.ms"; public static final long META_EDIT_OPERATION_TIMEOUT_MS_DEFAULT = 60000; @@ -179,7 +179,8 @@ void replicated() { private boolean stopped; public RegionReplicationSink(Configuration conf, RegionInfo primary, TableDescriptor td, - RegionReplicationBufferManager manager, Runnable flushRequester, AsyncClusterConnection conn) { + RegionReplicationBufferManager manager, Runnable flushRequester, + AsyncClusterConnection conn) { Preconditions.checkArgument(RegionReplicaUtil.isDefaultReplica(primary), "%s is not primary", primary); this.regionReplication = td.getRegionReplication(); @@ -192,11 +193,11 @@ public RegionReplicationSink(Configuration conf, RegionInfo primary, TableDescri this.conn = conn; this.retries = conf.getInt(RETRIES_NUMBER, RETRIES_NUMBER_DEFAULT); this.rpcTimeoutNs = - TimeUnit.MILLISECONDS.toNanos(conf.getLong(RPC_TIMEOUT_MS, RPC_TIMEOUT_MS_DEFAULT)); + TimeUnit.MILLISECONDS.toNanos(conf.getLong(RPC_TIMEOUT_MS, RPC_TIMEOUT_MS_DEFAULT)); this.operationTimeoutNs = TimeUnit.MILLISECONDS - .toNanos(conf.getLong(OPERATION_TIMEOUT_MS, OPERATION_TIMEOUT_MS_DEFAULT)); + .toNanos(conf.getLong(OPERATION_TIMEOUT_MS, OPERATION_TIMEOUT_MS_DEFAULT)); this.metaEditRpcTimeoutNs = TimeUnit.MILLISECONDS - .toNanos(conf.getLong(META_EDIT_RPC_TIMEOUT_MS, META_EDIT_RPC_TIMEOUT_MS_DEFAULT)); + .toNanos(conf.getLong(META_EDIT_RPC_TIMEOUT_MS, META_EDIT_RPC_TIMEOUT_MS_DEFAULT)); this.metaEditOperationTimeoutNs = TimeUnit.MILLISECONDS.toNanos( conf.getLong(META_EDIT_OPERATION_TIMEOUT_MS, META_EDIT_OPERATION_TIMEOUT_MS_DEFAULT)); this.batchSizeCapacity = conf.getLong(BATCH_SIZE_CAPACITY, BATCH_SIZE_CAPACITY_DEFAULT); @@ -204,8 +205,7 @@ public RegionReplicationSink(Configuration conf, RegionInfo primary, TableDescri this.failedReplicas = new IntHashSet(regionReplication - 1); } - void onComplete(List sent, - Map> replica2Error) { + void onComplete(List sent, Map> replica2Error) { long maxSequenceId = Long.MIN_VALUE; long toReleaseSize = 0; for (SinkEntry entry : sent) { @@ -285,7 +285,7 @@ private void send() { } sending = true; List walEntries = - toSend.stream().map(e -> new WAL.Entry(e.key, e.edit)).collect(Collectors.toList()); + toSend.stream().map(e -> new WAL.Entry(e.key, e.edit)).collect(Collectors.toList()); AtomicInteger remaining = new AtomicInteger(toSendReplicaCount); Map> replica2Error = new HashMap<>(); for (int replicaId = 1; replicaId < regionReplication; replicaId++) { @@ -316,8 +316,8 @@ private boolean isStartFlushAllStores(FlushDescriptor flushDesc) { return false; } Set storesFlushed = - flushDesc.getStoreFlushesList().stream().map(sfd -> sfd.getFamilyName().toByteArray()) - .collect(Collectors.toCollection(() -> new TreeSet<>(Bytes.BYTES_COMPARATOR))); + flushDesc.getStoreFlushesList().stream().map(sfd -> sfd.getFamilyName().toByteArray()) + .collect(Collectors.toCollection(() -> new TreeSet<>(Bytes.BYTES_COMPARATOR))); if (storesFlushed.size() != tableDesc.getColumnFamilyCount()) { return false; } @@ -380,8 +380,8 @@ public void add(WALKeyImpl key, WALEdit edit, ServerCall rpcCall) { long clearedSize = clearAllEntries(); if (LOG.isDebugEnabled()) { LOG.debug( - "Got a flush all request with sequence id {}, clear {} pending" + - " entries with size {}, clear failed replicas {}", + "Got a flush all request with sequence id {}, clear {} pending" + + " entries with size {}, clear failed replicas {}", flushSequenceNumber, clearedCount, StringUtils.TraditionalBinaryPrefix.long2String(clearedSize, "", 1), failedReplicas); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java index a20a001e27c6..2222faa2a208 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,6 @@ import java.io.IOException; import java.util.List; import java.util.concurrent.Callable; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; @@ -34,15 +29,20 @@ import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager.SnapshotSubprocedurePool; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** - * This online snapshot implementation uses the distributed procedure framework to force a - * store flush and then records the hfiles. Its enter stage does nothing. Its leave stage then - * flushes the memstore, builds the region server's snapshot manifest from its hfiles list, and - * copies .regioninfos into the snapshot working directory. At the master side, there is an atomic - * rename of the working dir into the proper snapshot directory. + * This online snapshot implementation uses the distributed procedure framework to force a store + * flush and then records the hfiles. Its enter stage does nothing. Its leave stage then flushes the + * memstore, builds the region server's snapshot manifest from its hfiles list, and copies + * .regioninfos into the snapshot working directory. At the master side, there is an atomic rename + * of the working dir into the proper snapshot directory. */ @InterfaceAudience.Private @InterfaceStability.Unstable @@ -57,9 +57,8 @@ public class FlushSnapshotSubprocedure extends Subprocedure { // the maximum number of attempts we flush final static int MAX_RETRIES = 3; - public FlushSnapshotSubprocedure(ProcedureMember member, - ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout, - List regions, SnapshotDescription snapshot, + public FlushSnapshotSubprocedure(ProcedureMember member, ForeignExceptionDispatcher errorListener, + long wakeFrequency, long timeout, List regions, SnapshotDescription snapshot, SnapshotSubprocedurePool taskManager) { super(member, snapshot.getName(), errorListener, wakeFrequency, timeout); this.snapshot = snapshot; @@ -72,7 +71,7 @@ public FlushSnapshotSubprocedure(ProcedureMember member, } /** - * Callable for adding files to snapshot manifest working dir. Ready for multithreading. + * Callable for adding files to snapshot manifest working dir. Ready for multithreading. */ public static class RegionSnapshotTask implements Callable { private HRegion region; @@ -80,8 +79,8 @@ public static class RegionSnapshotTask implements Callable { private ForeignExceptionDispatcher monitor; private SnapshotDescription snapshotDesc; - public RegionSnapshotTask(HRegion region, SnapshotDescription snapshotDesc, - boolean skipFlush, ForeignExceptionDispatcher monitor) { + public RegionSnapshotTask(HRegion region, SnapshotDescription snapshotDesc, boolean skipFlush, + ForeignExceptionDispatcher monitor) { this.region = region; this.skipFlush = skipFlush; this.monitor = monitor; @@ -91,21 +90,21 @@ public RegionSnapshotTask(HRegion region, SnapshotDescription snapshotDesc, @Override public Void call() throws Exception { // Taking the region read lock prevents the individual region from being closed while a - // snapshot is in progress. This is helpful but not sufficient for preventing races with - // snapshots that involve multiple regions and regionservers. It is still possible to have + // snapshot is in progress. This is helpful but not sufficient for preventing races with + // snapshots that involve multiple regions and regionservers. It is still possible to have // an interleaving such that globally regions are missing, so we still need the verification // step. LOG.debug("Starting snapshot operation on " + region); region.startRegionOperation(Operation.SNAPSHOT); try { if (skipFlush) { - /* - * This is to take an online-snapshot without force a coordinated flush to prevent pause - * The snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure - * should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be - * turned on/off based on the flush type. - * To minimized the code change, class name is not changed. - */ + /* + * This is to take an online-snapshot without force a coordinated flush to prevent pause + * The snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure + * should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be + * turned on/off based on the flush type. To minimized the code change, class name is not + * changed. + */ LOG.debug("take snapshot without flush memstore first"); } else { LOG.debug("Flush Snapshotting region " + region.toString() + " started..."); @@ -155,9 +154,9 @@ private void flushSnapshot() throws ForeignException { // assert that the taskManager is empty. if (taskManager.hasTasks()) { - throw new IllegalStateException("Attempting to take snapshot " - + ClientSnapshotDescriptionUtils.toString(snapshot) - + " but we currently have outstanding tasks"); + throw new IllegalStateException( + "Attempting to take snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + + " but we currently have outstanding tasks"); } // Add all hfiles already existing in region. @@ -200,7 +199,8 @@ public byte[] insideBarrier() throws ForeignException { @Override public void cleanup(Exception e) { LOG.info("Aborting all online FLUSH snapshot subprocedure task threads for '" - + snapshot.getName() + "' due to error", e); + + snapshot.getName() + "' due to error", + e); try { taskManager.cancelTasks(); } catch (InterruptedException e1) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index a01d118718d0..ac76c3644f87 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,17 +28,11 @@ import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; @@ -53,20 +47,26 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; + /** * This manager class handles the work dealing with snapshots for a {@link HRegionServer}. *

            - * This provides the mechanism necessary to kick off a online snapshot specific - * {@link Subprocedure} that is responsible for the regions being served by this region server. - * If any failures occur with the subprocedure, the RegionSeverSnapshotManager's subprocedure - * handler, {@link ProcedureMember}, notifies the master's ProcedureCoordinator to abort all - * others. + * This provides the mechanism necessary to kick off a online snapshot specific {@link Subprocedure} + * that is responsible for the regions being served by this region server. If any failures occur + * with the subprocedure, the RegionSeverSnapshotManager's subprocedure handler, + * {@link ProcedureMember}, notifies the master's ProcedureCoordinator to abort all others. *

            * On startup, requires {@link #start()} to be called. *

            @@ -78,7 +78,8 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { private static final Logger LOG = LoggerFactory.getLogger(RegionServerSnapshotManager.class); /** Maximum number of snapshot region tasks that can run concurrently */ - private static final String CONCURENT_SNAPSHOT_TASKS_KEY = "hbase.snapshot.region.concurrentTasks"; + private static final String CONCURENT_SNAPSHOT_TASKS_KEY = + "hbase.snapshot.region.concurrentTasks"; private static final int DEFAULT_CONCURRENT_SNAPSHOT_TASKS = 3; /** Conf key for number of request threads to start snapshots on regionservers */ @@ -91,8 +92,9 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { /** Keep threads alive in request pool for max of 300 seconds */ public static final long SNAPSHOT_TIMEOUT_MILLIS_DEFAULT = 5 * 60000; - /** Conf key for millis between checks to see if snapshot completed or if there are errors*/ - public static final String SNAPSHOT_REQUEST_WAKE_MILLIS_KEY = "hbase.snapshot.region.wakefrequency"; + /** Conf key for millis between checks to see if snapshot completed or if there are errors */ + public static final String SNAPSHOT_REQUEST_WAKE_MILLIS_KEY = + "hbase.snapshot.region.wakefrequency"; /** Default amount of time to check for errors while regions finish snapshotting */ private static final long SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT = 500; @@ -107,14 +109,15 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { * @param memberRpc use specified memberRpc instance * @param procMember use specified ProcedureMember */ - RegionServerSnapshotManager(Configuration conf, HRegionServer parent, + RegionServerSnapshotManager(Configuration conf, HRegionServer parent, ProcedureMemberRpcs memberRpc, ProcedureMember procMember) { this.rss = parent; this.memberRpcs = memberRpc; this.member = procMember; } - public RegionServerSnapshotManager() {} + public RegionServerSnapshotManager() { + } /** * Start accepting snapshot requests. @@ -144,11 +147,9 @@ public void stop(boolean force) throws IOException { /** * If in a running state, creates the specified subprocedure for handling an online snapshot. - * - * Because this gets the local list of regions to snapshot and not the set the master had, - * there is a possibility of a race where regions may be missed. This detected by the master in - * the snapshot verification step. - * + * Because this gets the local list of regions to snapshot and not the set the master had, there + * is a possibility of a race where regions may be missed. This detected by the master in the + * snapshot verification step. * @param snapshot * @return Subprocedure to submit to the ProcedureMember. */ @@ -156,8 +157,8 @@ public Subprocedure buildSubprocedure(SnapshotDescription snapshot) { // don't run a snapshot if the parent is stop(ping) if (rss.isStopping() || rss.isStopped()) { - throw new IllegalStateException("Can't start snapshot on RS: " + rss.getServerName() - + ", because stopping/stopped!"); + throw new IllegalStateException( + "Can't start snapshot on RS: " + rss.getServerName() + ", because stopping/stopped!"); } // check to see if this server is hosting any regions for the snapshots @@ -170,7 +171,7 @@ public Subprocedure buildSubprocedure(SnapshotDescription snapshot) { + "something has gone awry with the online regions.", e1); } - // We need to run the subprocedure even if we have no relevant regions. The coordinator + // We need to run the subprocedure even if we have no relevant regions. The coordinator // expects participation in the procedure and without sending message the snapshot attempt // will hang and fail. @@ -178,53 +179,49 @@ public Subprocedure buildSubprocedure(SnapshotDescription snapshot) { + snapshot.getTable() + " type " + snapshot.getType()); ForeignExceptionDispatcher exnDispatcher = new ForeignExceptionDispatcher(snapshot.getName()); Configuration conf = rss.getConfiguration(); - long timeoutMillis = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, - SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); - long wakeMillis = conf.getLong(SNAPSHOT_REQUEST_WAKE_MILLIS_KEY, - SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT); + long timeoutMillis = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); + long wakeMillis = + conf.getLong(SNAPSHOT_REQUEST_WAKE_MILLIS_KEY, SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT); switch (snapshot.getType()) { - case FLUSH: - SnapshotSubprocedurePool taskManager = - new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); - return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, - timeoutMillis, involvedRegions, snapshot, taskManager); - case SKIPFLUSH: + case FLUSH: + SnapshotSubprocedurePool taskManager = + new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); + return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, + involvedRegions, snapshot, taskManager); + case SKIPFLUSH: /* - * This is to take an online-snapshot without force a coordinated flush to prevent pause - * The snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure + * This is to take an online-snapshot without force a coordinated flush to prevent pause The + * snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure * should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be - * turned on/off based on the flush type. - * To minimized the code change, class name is not changed. + * turned on/off based on the flush type. To minimized the code change, class name is not + * changed. */ SnapshotSubprocedurePool taskManager2 = new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); - return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, - timeoutMillis, involvedRegions, snapshot, taskManager2); + return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, + involvedRegions, snapshot, taskManager2); - default: - throw new UnsupportedOperationException("Unrecognized snapshot type:" + snapshot.getType()); + default: + throw new UnsupportedOperationException("Unrecognized snapshot type:" + snapshot.getType()); } } /** - * Determine if the snapshot should be handled on this server - * - * NOTE: This is racy -- the master expects a list of regionservers. - * This means if a region moves somewhere between the calls we'll miss some regions. - * For example, a region move during a snapshot could result in a region to be skipped or done - * twice. This is manageable because the {@link MasterSnapshotVerifier} will double check the - * region lists after the online portion of the snapshot completes and will explicitly fail the - * snapshot. - * + * Determine if the snapshot should be handled on this server NOTE: This is racy -- the master + * expects a list of regionservers. This means if a region moves somewhere between the calls we'll + * miss some regions. For example, a region move during a snapshot could result in a region to be + * skipped or done twice. This is manageable because the {@link MasterSnapshotVerifier} will + * double check the region lists after the online portion of the snapshot completes and will + * explicitly fail the snapshot. * @param snapshot * @return the list of online regions. Empty list is returned if no regions are responsible for * the given snapshot. * @throws IOException */ private List getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException { - List onlineRegions = (List) rss - .getRegions(TableName.valueOf(snapshot.getTable())); + List onlineRegions = + (List) rss.getRegions(TableName.valueOf(snapshot.getTable())); Iterator iterator = onlineRegions.iterator(); // remove the non-default regions while (iterator.hasNext()) { @@ -256,16 +253,13 @@ public Subprocedure buildSubprocedure(String name, byte[] data) { /** * We use the SnapshotSubprocedurePool, a class specific thread pool instead of - * {@link org.apache.hadoop.hbase.executor.ExecutorService}. - * - * It uses a {@link java.util.concurrent.ExecutorCompletionService} which provides queuing of - * completed tasks which lets us efficiently cancel pending tasks upon the earliest operation - * failures. - * + * {@link org.apache.hadoop.hbase.executor.ExecutorService}. It uses a + * {@link java.util.concurrent.ExecutorCompletionService} which provides queuing of completed + * tasks which lets us efficiently cancel pending tasks upon the earliest operation failures. * HBase's ExecutorService (different from {@link java.util.concurrent.ExecutorService}) isn't - * really built for coordinated tasks where multiple threads as part of one larger task. In - * RS's the HBase Executor services are only used for open and close and not other threadpooled - * operations such as compactions and replication sinks. + * really built for coordinated tasks where multiple threads as part of one larger task. In RS's + * the HBase Executor services are only used for open and close and not other threadpooled + * operations such as compactions and replication sinks. */ static class SnapshotSubprocedurePool { private final Abortable abortable; @@ -278,14 +272,14 @@ static class SnapshotSubprocedurePool { SnapshotSubprocedurePool(String name, Configuration conf, Abortable abortable) { this.abortable = abortable; // configure the executor service - long keepAlive = conf.getLong( - RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY, + long keepAlive = conf.getLong(RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY, RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); int threads = conf.getInt(CONCURENT_SNAPSHOT_TASKS_KEY, DEFAULT_CONCURRENT_SNAPSHOT_TASKS); this.name = name; executor = Threads.getBoundedCachedThreadPool(threads, keepAlive, TimeUnit.MILLISECONDS, new ThreadFactoryBuilder().setNameFormat("rs(" + name + ")-snapshot-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER) + .build()); taskPool = new ExecutorCompletionService<>(executor); } @@ -294,10 +288,9 @@ boolean hasTasks() { } /** - * Submit a task to the pool. - * - * NOTE: all must be submitted before you can safely {@link #waitForOutstandingTasks()}. This - * version does not support issuing tasks from multiple concurrent table snapshots requests. + * Submit a task to the pool. NOTE: all must be submitted before you can safely + * {@link #waitForOutstandingTasks()}. This version does not support issuing tasks from multiple + * concurrent table snapshots requests. */ void submitTask(final Callable task) { Future f = this.taskPool.submit(task); @@ -307,7 +300,6 @@ void submitTask(final Callable task) { /** * Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}. * This *must* be called after all tasks are submitted via submitTask. - * * @return true on success, false otherwise * @throws InterruptedException * @throws SnapshotCreationException if the snapshot failed while we were waiting @@ -324,9 +316,9 @@ boolean waitForOutstandingTasks() throws ForeignException, InterruptedException if (!futures.remove(f)) { LOG.warn("unexpected future" + f); } - LOG.debug("Completed " + (i+1) + "/" + sz + " local region snapshots."); + LOG.debug("Completed " + (i + 1) + "/" + sz + " local region snapshots."); } - LOG.debug("Completed " + sz + " local region snapshots."); + LOG.debug("Completed " + sz + " local region snapshots."); return true; } catch (InterruptedException e) { LOG.warn("Got InterruptedException in SnapshotSubprocedurePool", e); @@ -339,7 +331,7 @@ boolean waitForOutstandingTasks() throws ForeignException, InterruptedException Throwable cause = e.getCause(); if (cause instanceof ForeignException) { LOG.warn("Rethrowing ForeignException from SnapshotSubprocedurePool", e); - throw (ForeignException)e.getCause(); + throw (ForeignException) e.getCause(); } else if (cause instanceof DroppedSnapshotException) { // we have to abort the region server according to contract of flush abortable.abort("Received DroppedSnapshotException, aborting", cause); @@ -359,22 +351,23 @@ boolean waitForOutstandingTasks() throws ForeignException, InterruptedException void cancelTasks() throws InterruptedException { Collection> tasks = futures; LOG.debug("cancelling " + tasks.size() + " tasks for snapshot " + name); - for (Future f: tasks) { - // TODO Ideally we'd interrupt hbase threads when we cancel. However it seems that there + for (Future f : tasks) { + // TODO Ideally we'd interrupt hbase threads when we cancel. However it seems that there // are places in the HBase code where row/region locks are taken and not released in a - // finally block. Thus we cancel without interrupting. Cancellations will be slower to + // finally block. Thus we cancel without interrupting. Cancellations will be slower to // complete but we won't suffer from unreleased locks due to poor code discipline. f.cancel(false); } // evict remaining tasks and futures from taskPool. futures.clear(); - while (taskPool.poll() != null) {} + while (taskPool.poll() != null) { + } stop(); } /** - * Abruptly shutdown the thread pool. Call when exiting a region server. + * Abruptly shutdown the thread pool. Call when exiting a region server. */ void stop() { if (this.stopped) return; @@ -393,8 +386,8 @@ void stop() { public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; ZKWatcher zkw = rss.getZooKeeper(); - this.memberRpcs = new ZKProcedureMemberRpcs(zkw, - SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION); + this.memberRpcs = + new ZKProcedureMemberRpcs(zkw, SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION); // read in the snapshot request configuration properties Configuration conf = rss.getConfiguration(); @@ -402,8 +395,8 @@ public void initialize(RegionServerServices rss) throws KeeperException { int opThreads = conf.getInt(SNAPSHOT_REQUEST_THREADS_KEY, SNAPSHOT_REQUEST_THREADS_DEFAULT); // create the actual snapshot procedure member - ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(), - opThreads, keepAlive); + ThreadPoolExecutor pool = + ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive); this.member = new ProcedureMember(memberRpcs, pool, new SnapshotSubprocedureBuilder()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java index 99fd3d43572e..49056e5a9704 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DefaultStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.Collections; import java.util.List; import org.apache.hadoop.conf.Configuration; - import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -50,14 +49,14 @@ protected void doAddNewStoreFiles(Collection newFiles) throws IOE @Override protected void doAddCompactionResults(Collection compactedFiles, - Collection newFiles) throws IOException { + Collection newFiles) throws IOException { // NOOP } @Override protected List doLoadStoreFiles(boolean readOnly) throws IOException { List files = - ctx.getRegionFileSystem().getStoreFiles(ctx.getFamily().getNameAsString()); + ctx.getRegionFileSystem().getStoreFiles(ctx.getFamily().getNameAsString()); return files != null ? files : Collections.emptyList(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java index 91e1bdc7dc67..4773c23eb7bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/FileBasedStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,9 +56,9 @@ class FileBasedStoreFileTracker extends StoreFileTrackerBase { public FileBasedStoreFileTracker(Configuration conf, boolean isPrimaryReplica, StoreContext ctx) { super(conf, isPrimaryReplica, ctx); - //CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table - //descriptors with the SFT impl specific configs. By the time this happens, the table has no - //regions nor stores yet, so it can't create a proper StoreContext. + // CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table + // descriptors with the SFT impl specific configs. By the time this happens, the table has no + // regions nor stores yet, so it can't create a proper StoreContext. if (ctx != null) { backedFile = new StoreFileListFile(ctx); } else { @@ -101,7 +101,7 @@ public boolean requireWritingToTmpDirFirst() { private StoreFileEntry toStoreFileEntry(StoreFileInfo info) { return StoreFileEntry.newBuilder().setName(info.getPath().getName()).setSize(info.getSize()) - .build(); + .build(); } @Override @@ -123,9 +123,9 @@ protected void doAddNewStoreFiles(Collection newFiles) throws IOE @Override protected void doAddCompactionResults(Collection compactedFiles, - Collection newFiles) throws IOException { + Collection newFiles) throws IOException { Set compactedFileNames = - compactedFiles.stream().map(info -> info.getPath().getName()).collect(Collectors.toSet()); + compactedFiles.stream().map(info -> info.getPath().getName()).collect(Collectors.toSet()); synchronized (storefiles) { StoreFileList.Builder builder = StoreFileList.newBuilder(); storefiles.forEach((name, info) -> { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java index 5a88f99588b1..026ce2bd4d21 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/InitializeStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,8 @@ @InterfaceAudience.Private public class InitializeStoreFileTrackerProcedure extends ModifyTableDescriptorProcedure { - public InitializeStoreFileTrackerProcedure(){} + public InitializeStoreFileTrackerProcedure() { + } public InitializeStoreFileTrackerProcedure(MasterProcedureEnv env, TableName tableName) { super(env, tableName); @@ -44,8 +45,8 @@ protected Optional modify(MasterProcedureEnv env, TableDescript // no tracker impl means it is a table created in previous version, the tracker impl can only // be default. TableDescriptor td = - TableDescriptorBuilder.newBuilder(current).setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.DEFAULT.name()).build(); + TableDescriptorBuilder.newBuilder(current).setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()).build(); return Optional.of(td); } return Optional.empty(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java index f483d3386729..9662e7d5a284 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/MigrationStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,7 +77,7 @@ protected void doAddNewStoreFiles(Collection newFiles) throws IOE @Override protected void doAddCompactionResults(Collection compactedFiles, - Collection newFiles) throws IOException { + Collection newFiles) throws IOException { src.doAddCompactionResults(compactedFiles, newFiles); dst.doAddCompactionResults(compactedFiles, newFiles); } @@ -85,7 +85,7 @@ protected void doAddCompactionResults(Collection compactedFiles, @Override protected void doSetStoreFiles(Collection files) throws IOException { throw new UnsupportedOperationException( - "Should not call this method on " + getClass().getSimpleName()); + "Should not call this method on " + getClass().getSimpleName()); } static Class getSrcTrackerClass(Configuration conf) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java index 1ecfee26e252..da5674f03e52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyColumnFamilyStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ public ModifyColumnFamilyStoreFileTrackerProcedure() { } public ModifyColumnFamilyStoreFileTrackerProcedure(MasterProcedureEnv env, TableName tableName, - byte[] family, String dstSFT) throws HBaseIOException { + byte[] family, String dstSFT) throws HBaseIOException { super(env, tableName, dstSFT); this.family = family; } @@ -54,7 +54,7 @@ public ModifyColumnFamilyStoreFileTrackerProcedure(MasterProcedureEnv env, Table protected void preCheck(TableDescriptor current) throws IOException { if (!current.hasColumnFamily(family)) { throw new NoSuchColumnFamilyException( - Bytes.toStringBinary(family) + " does not exist for table " + current.getTableName()); + Bytes.toStringBinary(family) + " does not exist for table " + current.getTableName()); } } @@ -66,18 +66,18 @@ protected Configuration createConf(Configuration conf, TableDescriptor current) @Override protected TableDescriptor createRestoreTableDescriptor(TableDescriptor current, - String restoreSFT) { + String restoreSFT) { ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(current.getColumnFamily(family)) - .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, restoreSFT).build(); + ColumnFamilyDescriptorBuilder.newBuilder(current.getColumnFamily(family)) + .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, restoreSFT).build(); return TableDescriptorBuilder.newBuilder(current).modifyColumnFamily(cfd).build(); } @Override protected TableDescriptor createMigrationTableDescriptor(Configuration conf, - TableDescriptor current) { + TableDescriptor current) { ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(current.getColumnFamily(family)); + ColumnFamilyDescriptorBuilder.newBuilder(current.getColumnFamily(family)); migrate(conf, builder::setConfiguration); return TableDescriptorBuilder.newBuilder(current).modifyColumnFamily(builder.build()).build(); } @@ -85,7 +85,7 @@ protected TableDescriptor createMigrationTableDescriptor(Configuration conf, @Override protected TableDescriptor createFinishTableDescriptor(TableDescriptor current) { ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(current.getColumnFamily(family)); + ColumnFamilyDescriptorBuilder.newBuilder(current.getColumnFamily(family)); finish(builder::setConfiguration, builder::removeConfiguration); return TableDescriptorBuilder.newBuilder(current).modifyColumnFamily(builder.build()).build(); } @@ -94,14 +94,14 @@ protected TableDescriptor createFinishTableDescriptor(TableDescriptor current) { protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { super.serializeStateData(serializer); serializer.serialize(ModifyColumnFamilyStoreFileTrackerStateData.newBuilder() - .setFamily(ByteString.copyFrom(family)).build()); + .setFamily(ByteString.copyFrom(family)).build()); } @Override protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { super.deserializeStateData(serializer); ModifyColumnFamilyStoreFileTrackerStateData data = - serializer.deserialize(ModifyColumnFamilyStoreFileTrackerStateData.class); + serializer.deserialize(ModifyColumnFamilyStoreFileTrackerStateData.class); this.family = data.getFamily().toByteArray(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java index a7d8e703acc3..35ce5370055e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ */ @InterfaceAudience.Private public abstract class ModifyStoreFileTrackerProcedure - extends AbstractStateMachineTableProcedure { + extends AbstractStateMachineTableProcedure { private static final Logger LOG = LoggerFactory.getLogger(ModifyStoreFileTrackerProcedure.class); @@ -62,7 +62,7 @@ protected ModifyStoreFileTrackerProcedure() { } protected ModifyStoreFileTrackerProcedure(MasterProcedureEnv env, TableName tableName, - String dstSFT) throws HBaseIOException { + String dstSFT) throws HBaseIOException { super(env); checkDstSFT(dstSFT); this.tableName = tableName; @@ -72,7 +72,7 @@ protected ModifyStoreFileTrackerProcedure(MasterProcedureEnv env, TableName tabl private void checkDstSFT(String dstSFT) throws DoNotRetryIOException { if (MigrationStoreFileTracker.class - .isAssignableFrom(StoreFileTrackerFactory.getTrackerClass(dstSFT))) { + .isAssignableFrom(StoreFileTrackerFactory.getTrackerClass(dstSFT))) { throw new DoNotRetryIOException("Do not need to transfer to " + dstSFT); } } @@ -112,7 +112,7 @@ private StoreFileTrackerState checkState(Configuration conf, String dstSFT) { return StoreFileTrackerState.NEED_START_MIGRATION; } Class currentDstSFT = StoreFileTrackerFactory - .getStoreFileTrackerClassForMigration(conf, MigrationStoreFileTracker.DST_IMPL); + .getStoreFileTrackerClassForMigration(conf, MigrationStoreFileTracker.DST_IMPL); if (currentDstSFT.equals(dstSFTClass)) { return StoreFileTrackerState.NEED_FINISH_MIGRATION; } else { @@ -122,7 +122,7 @@ private StoreFileTrackerState checkState(Configuration conf, String dstSFT) { private final String getRestoreSFT(Configuration conf) { Class currentDstSFT = StoreFileTrackerFactory - .getStoreFileTrackerClassForMigration(conf, MigrationStoreFileTracker.DST_IMPL); + .getStoreFileTrackerClassForMigration(conf, MigrationStoreFileTracker.DST_IMPL); return StoreFileTrackerFactory.getStoreFileTrackerName(currentDstSFT); } @@ -131,7 +131,7 @@ private final String getRestoreSFT(Configuration conf) { protected abstract Configuration createConf(Configuration conf, TableDescriptor current); protected abstract TableDescriptor createRestoreTableDescriptor(TableDescriptor current, - String restoreSFT); + String restoreSFT); private Flow preCheckAndTryRestoreSFT(MasterProcedureEnv env) throws IOException { // Checks whether the table exists @@ -166,7 +166,7 @@ private Flow preCheckAndTryRestoreSFT(MasterProcedureEnv env) throws IOException } protected abstract TableDescriptor createMigrationTableDescriptor(Configuration conf, - TableDescriptor current); + TableDescriptor current); protected final void migrate(Configuration conf, BiConsumer setValue) { setValue.accept(StoreFileTrackerFactory.TRACKER_IMPL, @@ -199,7 +199,7 @@ private void finish(MasterProcedureEnv env) throws IOException { @Override protected Flow executeFromState(MasterProcedureEnv env, ModifyStoreFileTrackerState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { try { switch (state) { case MODIFY_STORE_FILE_TRACKER_FINISH_PREVIOUS_MIGRATION: @@ -226,7 +226,7 @@ protected Flow executeFromState(MasterProcedureEnv env, ModifyStoreFileTrackerSt @Override protected void rollbackState(MasterProcedureEnv env, ModifyStoreFileTrackerState state) - throws IOException, InterruptedException { + throws IOException, InterruptedException { if (isRollbackSupported(state)) { return; } @@ -257,14 +257,14 @@ protected boolean isRollbackSupported(ModifyStoreFileTrackerState state) { protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { super.serializeStateData(serializer); serializer.serialize(ModifyStoreFileTrackerStateData.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tableName)).setDstSft(dstSFT).build()); + .setTableName(ProtobufUtil.toProtoTableName(tableName)).setDstSft(dstSFT).build()); } @Override protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { super.deserializeStateData(serializer); ModifyStoreFileTrackerStateData data = - serializer.deserialize(ModifyStoreFileTrackerStateData.class); + serializer.deserialize(ModifyStoreFileTrackerStateData.class); this.tableName = ProtobufUtil.toTableName(data.getTableName()); this.dstSFT = data.getDstSft(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java index 096f38fa36eb..41c8fb7d22c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/ModifyTableStoreFileTrackerProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ public ModifyTableStoreFileTrackerProcedure() { } public ModifyTableStoreFileTrackerProcedure(MasterProcedureEnv env, TableName tableName, - String dstSFT) throws HBaseIOException { + String dstSFT) throws HBaseIOException { super(env, tableName, dstSFT); } @@ -48,14 +48,14 @@ protected Configuration createConf(Configuration conf, TableDescriptor current) @Override protected TableDescriptor createRestoreTableDescriptor(TableDescriptor current, - String restoreSFT) { + String restoreSFT) { return TableDescriptorBuilder.newBuilder(current) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, restoreSFT).build(); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, restoreSFT).build(); } @Override protected TableDescriptor createMigrationTableDescriptor(Configuration conf, - TableDescriptor current) { + TableDescriptor current) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(current); migrate(conf, builder::setValue); return builder.build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java index 5ed35c7beae1..ec9dca6430cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileListFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -101,8 +101,8 @@ private StoreFileList load(Path path) throws IOException { try (FSDataInputStream in = fs.open(path)) { int length = in.readInt(); if (length <= 0 || length > MAX_FILE_SIZE) { - throw new IOException("Invalid file length " + length + - ", either less than 0 or greater then max allowed size " + MAX_FILE_SIZE); + throw new IOException("Invalid file length " + length + + ", either less than 0 or greater then max allowed size " + MAX_FILE_SIZE); } data = new byte[length]; in.readFully(data); @@ -113,7 +113,7 @@ private StoreFileList load(Path path) throws IOException { int calculatedChecksum = (int) crc32.getValue(); if (expectedChecksum != calculatedChecksum) { throw new IOException( - "Checksum mismatch, expected " + expectedChecksum + ", actual " + calculatedChecksum); + "Checksum mismatch, expected " + expectedChecksum + ", actual " + calculatedChecksum); } return StoreFileList.parseFrom(data); } @@ -165,11 +165,11 @@ private void initializeTrackFiles(long seqId) { } private void cleanUpTrackFiles(long loadedSeqId, - NavigableMap> seqId2TrackFiles) { + NavigableMap> seqId2TrackFiles) { LOG.info("Cleanup track file with sequence id < {}", loadedSeqId); FileSystem fs = ctx.getRegionFileSystem().getFileSystem(); NavigableMap> toDelete = - loadedSeqId >= 0 ? seqId2TrackFiles.tailMap(loadedSeqId, false) : seqId2TrackFiles; + loadedSeqId >= 0 ? seqId2TrackFiles.tailMap(loadedSeqId, false) : seqId2TrackFiles; toDelete.values().stream().flatMap(l -> l.stream()).forEach(file -> { ForkJoinPool.commonPool().execute(() -> { LOG.info("Deleting track file {}", file); @@ -191,8 +191,8 @@ StoreFileList load(boolean readOnly) throws IOException { // should not have more than 2 files, if not, it means that the track files are broken, just // throw exception out and fail the region open. if (files.size() > 2) { - throw new DoNotRetryIOException("Should only have at most 2 track files for sequence id " + - entry.getKey() + ", but got " + files.size() + " files: " + files); + throw new DoNotRetryIOException("Should only have at most 2 track files for sequence id " + + entry.getKey() + ", but got " + files.size() + " files: " + files); } boolean loaded = false; for (int i = 0; i < files.size(); i++) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java index aabbe8d87494..ba2cede57423 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ public interface StoreFileTracker { * Add new store files and remove compacted store files after compaction. */ void replace(Collection compactedFiles, Collection newFiles) - throws IOException; + throws IOException; /** * Set the store files. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java index 1bf354f00a0f..c920aa305171 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -80,7 +80,7 @@ public final void add(Collection newFiles) throws IOException { @Override public final void replace(Collection compactedFiles, - Collection newFiles) throws IOException { + Collection newFiles) throws IOException { if (isPrimaryReplica) { doAddCompactionResults(compactedFiles, newFiles); } @@ -104,19 +104,22 @@ protected final String getTrackerName() { } private HFileContext createFileContext(Compression.Algorithm compression, - boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context encryptionContext) { + boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context encryptionContext) { if (compression == null) { compression = HFile.DEFAULT_COMPRESSION_ALGORITHM; } ColumnFamilyDescriptor family = ctx.getFamily(); HFileContext hFileContext = new HFileContextBuilder().withIncludesMvcc(includeMVCCReadpoint) - .withIncludesTags(includesTag).withCompression(compression) - .withCompressTags(family.isCompressTags()).withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) - .withBlockSize(family.getBlocksize()).withHBaseCheckSum(true) - .withDataBlockEncoding(family.getDataBlockEncoding()).withEncryptionContext(encryptionContext) - .withCreateTime(EnvironmentEdgeManager.currentTime()).withColumnFamily(family.getName()) - .withTableName(ctx.getTableName().getName()).withCellComparator(ctx.getComparator()).build(); + .withIncludesTags(includesTag).withCompression(compression) + .withCompressTags(family.isCompressTags()) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) + .withBlockSize(family.getBlocksize()).withHBaseCheckSum(true) + .withDataBlockEncoding(family.getDataBlockEncoding()) + .withEncryptionContext(encryptionContext) + .withCreateTime(EnvironmentEdgeManager.currentTime()).withColumnFamily(family.getName()) + .withTableName(ctx.getTableName().getName()).withCellComparator(ctx.getComparator()) + .build(); return hFileContext; } @@ -136,12 +139,13 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) th // if data blocks are to be cached on write // during compaction, we should forcefully // cache index and bloom blocks as well - if (cacheCompactedBlocksOnWrite && - totalCompactedFilesSize <= cacheConf.getCacheCompactedBlocksOnWriteThreshold()) { + if (cacheCompactedBlocksOnWrite + && totalCompactedFilesSize <= cacheConf.getCacheCompactedBlocksOnWriteThreshold()) { writerCacheConf.enableCacheOnWrite(); if (!cacheOnWriteLogged) { - LOG.info("For {} , cacheCompactedBlocksOnWrite is true, hence enabled " + - "cacheOnWrite for Data blocks, Index blocks and Bloom filter blocks", this); + LOG.info("For {} , cacheCompactedBlocksOnWrite is true, hence enabled " + + "cacheOnWrite for Data blocks, Index blocks and Bloom filter blocks", + this); cacheOnWriteLogged = true; } } else { @@ -149,8 +153,8 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) th if (totalCompactedFilesSize > cacheConf.getCacheCompactedBlocksOnWriteThreshold()) { // checking condition once again for logging LOG.debug( - "For {}, setting cacheCompactedBlocksOnWrite as false as total size of compacted " + - "files - {}, is greater than cacheCompactedBlocksOnWriteThreshold - {}", + "For {}, setting cacheCompactedBlocksOnWrite as false as total size of compacted " + + "files - {}, is greater than cacheCompactedBlocksOnWriteThreshold - {}", this, totalCompactedFilesSize, cacheConf.getCacheCompactedBlocksOnWriteThreshold()); } } @@ -159,8 +163,9 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) th if (shouldCacheDataOnWrite) { writerCacheConf.enableCacheOnWrite(); if (!cacheOnWriteLogged) { - LOG.info("For {} , cacheDataOnWrite is true, hence enabled cacheOnWrite for " + - "Index blocks and Bloom filter blocks", this); + LOG.info("For {} , cacheDataOnWrite is true, hence enabled cacheOnWrite for " + + "Index blocks and Bloom filter blocks", + this); cacheOnWriteLogged = true; } } @@ -171,17 +176,17 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) th Path outputDir; if (requireWritingToTmpDirFirst()) { outputDir = - new Path(ctx.getRegionFileSystem().getTempDir(), ctx.getFamily().getNameAsString()); + new Path(ctx.getRegionFileSystem().getTempDir(), ctx.getFamily().getNameAsString()); } else { outputDir = ctx.getFamilyStoreDirectoryPath(); } - StoreFileWriter.Builder builder = - new StoreFileWriter.Builder(conf, writerCacheConf, ctx.getRegionFileSystem().getFileSystem()) - .withOutputDir(outputDir).withBloomType(ctx.getBloomFilterType()) - .withMaxKeyCount(params.maxKeyCount()).withFavoredNodes(ctx.getFavoredNodes()) - .withFileContext(hFileContext).withShouldDropCacheBehind(params.shouldDropBehind()) - .withCompactedFilesSupplier(ctx.getCompactedFilesSupplier()) - .withFileStoragePolicy(params.fileStoragePolicy()); + StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, + ctx.getRegionFileSystem().getFileSystem()).withOutputDir(outputDir) + .withBloomType(ctx.getBloomFilterType()).withMaxKeyCount(params.maxKeyCount()) + .withFavoredNodes(ctx.getFavoredNodes()).withFileContext(hFileContext) + .withShouldDropCacheBehind(params.shouldDropBehind()) + .withCompactedFilesSupplier(ctx.getCompactedFilesSupplier()) + .withFileStoragePolicy(params.fileStoragePolicy()); return builder.build(); } @@ -196,7 +201,7 @@ public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) th protected abstract void doAddNewStoreFiles(Collection newFiles) throws IOException; protected abstract void doAddCompactionResults(Collection compactedFiles, - Collection newFiles) throws IOException; + Collection newFiles) throws IOException; protected abstract void doSetStoreFiles(Collection files) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java index 85c5ee24f3b2..515a186e33d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -114,7 +116,7 @@ public static Class getTrackerClass(String trackerNa } public static StoreFileTracker create(Configuration conf, boolean isPrimaryReplica, - StoreContext ctx) { + StoreContext ctx) { Class tracker = getTrackerClass(conf); LOG.info("instantiating StoreFileTracker impl {}", tracker.getName()); return ReflectionUtils.newInstance(tracker, conf, isPrimaryReplica, ctx); @@ -125,25 +127,25 @@ public static StoreFileTracker create(Configuration conf, boolean isPrimaryRepli * StoreContext at master side. */ public static StoreFileTracker create(Configuration conf, TableDescriptor td, - ColumnFamilyDescriptor cfd, HRegionFileSystem regionFs) { + ColumnFamilyDescriptor cfd, HRegionFileSystem regionFs) { StoreContext ctx = - StoreContext.getBuilder().withColumnFamilyDescriptor(cfd).withRegionFileSystem(regionFs) - .withFamilyStoreDirectoryPath(regionFs.getStoreDir(cfd.getNameAsString())).build(); + StoreContext.getBuilder().withColumnFamilyDescriptor(cfd).withRegionFileSystem(regionFs) + .withFamilyStoreDirectoryPath(regionFs.getStoreDir(cfd.getNameAsString())).build(); return StoreFileTrackerFactory.create(mergeConfigurations(conf, td, cfd), true, ctx); } private static Configuration mergeConfigurations(Configuration global, TableDescriptor table, - ColumnFamilyDescriptor family) { + ColumnFamilyDescriptor family) { return StoreUtils.createStoreConfiguration(global, table, family); } static Class - getStoreFileTrackerClassForMigration(Configuration conf, String configName) { + getStoreFileTrackerClassForMigration(Configuration conf, String configName) { String trackerName = - Preconditions.checkNotNull(conf.get(configName), "config %s is not set", configName); + Preconditions.checkNotNull(conf.get(configName), "config %s is not set", configName); try { return Trackers.valueOf(trackerName.toUpperCase()).clazz - .asSubclass(StoreFileTrackerBase.class); + .asSubclass(StoreFileTrackerBase.class); } catch (IllegalArgumentException e) { // Fall back to them specifying a class name try { @@ -159,13 +161,13 @@ private static Configuration mergeConfigurations(Configuration global, TableDesc * {@link MigrationStoreFileTracker}. */ static StoreFileTrackerBase createForMigration(Configuration conf, String configName, - boolean isPrimaryReplica, StoreContext ctx) { + boolean isPrimaryReplica, StoreContext ctx) { Class tracker = - getStoreFileTrackerClassForMigration(conf, configName); + getStoreFileTrackerClassForMigration(conf, configName); // prevent nest of MigrationStoreFileTracker, it will cause infinite recursion. if (MigrationStoreFileTracker.class.isAssignableFrom(tracker)) { throw new IllegalArgumentException("Should not specify " + configName + " as " - + Trackers.MIGRATION + " because it can not be nested"); + + Trackers.MIGRATION + " because it can not be nested"); } LOG.info("instantiating StoreFileTracker impl {} as {}", tracker.getName(), configName); return ReflectionUtils.newInstance(tracker, conf, isPrimaryReplica, ctx); @@ -173,12 +175,11 @@ static StoreFileTrackerBase createForMigration(Configuration conf, String config public static TableDescriptor updateWithTrackerConfigs(Configuration conf, TableDescriptor descriptor) { - //CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table - //descriptors with the SFT impl specific configs. By the time this happens, the table has no - //regions nor stores yet, so it can't create a proper StoreContext. + // CreateTableProcedure needs to instantiate the configured SFT impl, in order to update table + // descriptors with the SFT impl specific configs. By the time this happens, the table has no + // regions nor stores yet, so it can't create a proper StoreContext. if (StringUtils.isEmpty(descriptor.getValue(TRACKER_IMPL))) { - StoreFileTracker tracker = - StoreFileTrackerFactory.create(conf, true, null); + StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, null); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(descriptor); return tracker.updateWithTrackerConfigs(builder).build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java index 38040bc4f006..f431408e0672 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerValidationUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,13 +36,13 @@ private StoreFileTrackerValidationUtils() { // should not use MigrationStoreFileTracker for new family private static void checkForNewFamily(Configuration conf, TableDescriptor table, - ColumnFamilyDescriptor family) throws IOException { + ColumnFamilyDescriptor family) throws IOException { Configuration mergedConf = StoreUtils.createStoreConfiguration(conf, table, family); Class tracker = StoreFileTrackerFactory.getTrackerClass(mergedConf); if (MigrationStoreFileTracker.class.isAssignableFrom(tracker)) { throw new DoNotRetryIOException( - "Should not use " + Trackers.MIGRATION + " as store file tracker for new family " + - family.getNameAsString() + " of table " + table.getTableName()); + "Should not use " + Trackers.MIGRATION + " as store file tracker for new family " + + family.getNameAsString() + " of table " + table.getTableName()); } } @@ -54,7 +54,7 @@ private static void checkForNewFamily(Configuration conf, TableDescriptor table, * {@code CreateTableProcedure}. */ public static void checkForCreateTable(Configuration conf, TableDescriptor table) - throws IOException { + throws IOException { for (ColumnFamilyDescriptor family : table.getColumnFamilies()) { checkForNewFamily(conf, table, family); } @@ -95,7 +95,7 @@ public static void checkForCreateTable(Configuration conf, TableDescriptor table * {@code ModifyTableProcedure}. */ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTable, - TableDescriptor newTable, boolean isTableDisabled) throws IOException { + TableDescriptor newTable, boolean isTableDisabled) throws IOException { for (ColumnFamilyDescriptor newFamily : newTable.getColumnFamilies()) { ColumnFamilyDescriptor oldFamily = oldTable.getColumnFamily(newFamily.getName()); if (oldFamily == null) { @@ -106,32 +106,32 @@ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTa Configuration newConf = StoreUtils.createStoreConfiguration(conf, newTable, newFamily); Class oldTracker = - StoreFileTrackerFactory.getTrackerClass(oldConf); + StoreFileTrackerFactory.getTrackerClass(oldConf); Class newTracker = - StoreFileTrackerFactory.getTrackerClass(newConf); + StoreFileTrackerFactory.getTrackerClass(newConf); if (MigrationStoreFileTracker.class.isAssignableFrom(oldTracker)) { Class oldSrcTracker = - MigrationStoreFileTracker.getSrcTrackerClass(oldConf); + MigrationStoreFileTracker.getSrcTrackerClass(oldConf); Class oldDstTracker = - MigrationStoreFileTracker.getDstTrackerClass(oldConf); + MigrationStoreFileTracker.getDstTrackerClass(oldConf); if (oldTracker.equals(newTracker)) { // confirm that we have the same src tracker and dst tracker Class newSrcTracker = - MigrationStoreFileTracker.getSrcTrackerClass(newConf); + MigrationStoreFileTracker.getSrcTrackerClass(newConf); if (!oldSrcTracker.equals(newSrcTracker)) { - throw new DoNotRetryIOException("The src tracker has been changed from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldSrcTracker) + " to " + - StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("The src tracker has been changed from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldSrcTracker) + " to " + + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } Class newDstTracker = - MigrationStoreFileTracker.getDstTrackerClass(newConf); + MigrationStoreFileTracker.getDstTrackerClass(newConf); if (!oldDstTracker.equals(newDstTracker)) { - throw new DoNotRetryIOException("The dst tracker has been changed from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " to " + - StoreFileTrackerFactory.getStoreFileTrackerName(newDstTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("The dst tracker has been changed from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " to " + + StoreFileTrackerFactory.getStoreFileTrackerName(newDstTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } } else { // do not allow changing from MIGRATION to its dst SFT implementation while the table is @@ -140,16 +140,16 @@ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTa // details. if (isTableDisabled) { throw new TableNotEnabledException( - "Should not change store file tracker implementation from " + - StoreFileTrackerFactory.Trackers.MIGRATION.name() + " while table " + - newTable.getTableName() + " is disabled"); + "Should not change store file tracker implementation from " + + StoreFileTrackerFactory.Trackers.MIGRATION.name() + " while table " + + newTable.getTableName() + " is disabled"); } // we can only change to the dst tracker if (!newTracker.equals(oldDstTracker)) { - throw new DoNotRetryIOException("Should migrate tracker to " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " but got " + - StoreFileTrackerFactory.getStoreFileTrackerName(newTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("Should migrate tracker to " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " but got " + + StoreFileTrackerFactory.getStoreFileTrackerName(newTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } } } else { @@ -158,30 +158,30 @@ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTa // tracker if (!MigrationStoreFileTracker.class.isAssignableFrom(newTracker)) { throw new DoNotRetryIOException( - "Should change to " + Trackers.MIGRATION + " first when migrating from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + "Should change to " + Trackers.MIGRATION + " first when migrating from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } // here we do not check whether the table is disabled, as after changing to MIGRATION, we // still rely on the src SFT implementation to actually load the store files, so there // will be no data loss problem. Class newSrcTracker = - MigrationStoreFileTracker.getSrcTrackerClass(newConf); + MigrationStoreFileTracker.getSrcTrackerClass(newConf); if (!oldTracker.equals(newSrcTracker)) { - throw new DoNotRetryIOException("Should use src tracker " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " first but got " + - StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + - " when migrating from " + - StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("Should use src tracker " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " first but got " + + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + + " when migrating from " + + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } Class newDstTracker = - MigrationStoreFileTracker.getDstTrackerClass(newConf); + MigrationStoreFileTracker.getDstTrackerClass(newConf); // the src and dst tracker should not be the same if (newSrcTracker.equals(newDstTracker)) { - throw new DoNotRetryIOException("The src tracker and dst tracker are both " + - StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + - newFamily.getNameAsString() + " of table " + newTable.getTableName()); + throw new DoNotRetryIOException("The src tracker and dst tracker are both " + + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + + newFamily.getNameAsString() + " of table " + newTable.getTableName()); } } } @@ -197,7 +197,7 @@ public static void checkForModifyTable(Configuration conf, TableDescriptor oldTa * @throws RestoreSnapshotException if restore would break the current SFT setup */ public static void validatePreRestoreSnapshot(TableDescriptor currentTableDesc, - TableDescriptor snapshotTableDesc, Configuration baseConf) throws RestoreSnapshotException { + TableDescriptor snapshotTableDesc, Configuration baseConf) throws RestoreSnapshotException { for (ColumnFamilyDescriptor cfDesc : currentTableDesc.getColumnFamilies()) { ColumnFamilyDescriptor snapCFDesc = snapshotTableDesc.getColumnFamily(cfDesc.getName()); @@ -205,20 +205,20 @@ public static void validatePreRestoreSnapshot(TableDescriptor currentTableDesc, // not matter if (snapCFDesc != null) { Configuration currentCompositeConf = - StoreUtils.createStoreConfiguration(baseConf, currentTableDesc, cfDesc); + StoreUtils.createStoreConfiguration(baseConf, currentTableDesc, cfDesc); Configuration snapCompositeConf = - StoreUtils.createStoreConfiguration(baseConf, snapshotTableDesc, snapCFDesc); + StoreUtils.createStoreConfiguration(baseConf, snapshotTableDesc, snapCFDesc); Class currentSFT = - StoreFileTrackerFactory.getTrackerClass(currentCompositeConf); + StoreFileTrackerFactory.getTrackerClass(currentCompositeConf); Class snapSFT = - StoreFileTrackerFactory.getTrackerClass(snapCompositeConf); + StoreFileTrackerFactory.getTrackerClass(snapCompositeConf); // restoration is not possible if there is an SFT mismatch if (currentSFT != snapSFT) { throw new RestoreSnapshotException( - "Restoring Snapshot is not possible because " + " the config for column family " + - cfDesc.getNameAsString() + " has incompatible configuration. Current SFT: " + - currentSFT + " SFT from snapshot: " + snapSFT); + "Restoring Snapshot is not possible because " + " the config for column family " + + cfDesc.getNameAsString() + " has incompatible configuration. Current SFT: " + + currentSFT + " SFT from snapshot: " + snapSFT); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java index 45e7267ed265..84fb814c02d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/CompactionThroughputControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.util.ReflectionUtils; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public final class CompactionThroughputControllerFactory { @@ -36,35 +36,34 @@ public final class CompactionThroughputControllerFactory { private CompactionThroughputControllerFactory() { } - private static final Class - DEFAULT_THROUGHPUT_CONTROLLER_CLASS = PressureAwareCompactionThroughputController.class; + private static final Class DEFAULT_THROUGHPUT_CONTROLLER_CLASS = + PressureAwareCompactionThroughputController.class; // for backward compatibility and may not be supported in the future private static final String DEPRECATED_NAME_OF_PRESSURE_AWARE_THROUGHPUT_CONTROLLER_CLASS = - "org.apache.hadoop.hbase.regionserver.compactions.PressureAwareCompactionThroughputController"; + "org.apache.hadoop.hbase.regionserver.compactions.PressureAwareCompactionThroughputController"; private static final String DEPRECATED_NAME_OF_NO_LIMIT_THROUGHPUT_CONTROLLER_CLASS = - "org.apache.hadoop.hbase.regionserver.compactions.NoLimitThroughputController"; + "org.apache.hadoop.hbase.regionserver.compactions.NoLimitThroughputController"; - public static ThroughputController create(RegionServerServices server, - Configuration conf) { + public static ThroughputController create(RegionServerServices server, Configuration conf) { Class clazz = getThroughputControllerClass(conf); ThroughputController controller = ReflectionUtils.newInstance(clazz, conf); controller.setup(server); return controller; } - public static Class getThroughputControllerClass( - Configuration conf) { + public static Class + getThroughputControllerClass(Configuration conf) { String className = conf.get(HBASE_THROUGHPUT_CONTROLLER_KEY, DEFAULT_THROUGHPUT_CONTROLLER_CLASS.getName()); className = resolveDeprecatedClassName(className); try { return Class.forName(className).asSubclass(ThroughputController.class); } catch (Exception e) { - LOG.warn( - "Unable to load configured throughput controller '" + className - + "', load default throughput controller " - + DEFAULT_THROUGHPUT_CONTROLLER_CLASS.getName() + " instead", e); + LOG.warn("Unable to load configured throughput controller '" + className + + "', load default throughput controller " + DEFAULT_THROUGHPUT_CONTROLLER_CLASS.getName() + + " instead", + e); return DEFAULT_THROUGHPUT_CONTROLLER_CLASS; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java index fc75c5835831..12e50ec486b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/FlushThroughputControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +19,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.util.ReflectionUtils; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public final class FlushThroughputControllerFactory { @@ -33,32 +33,30 @@ public final class FlushThroughputControllerFactory { public static final String HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY = "hbase.regionserver.flush.throughput.controller"; - private static final Class - DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS = NoLimitThroughputController.class; + private static final Class DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS = + NoLimitThroughputController.class; private FlushThroughputControllerFactory() { } - public static ThroughputController create(RegionServerServices server, - Configuration conf) { + public static ThroughputController create(RegionServerServices server, Configuration conf) { Class clazz = getThroughputControllerClass(conf); ThroughputController controller = ReflectionUtils.newInstance(clazz, conf); controller.setup(server); return controller; } - public static Class getThroughputControllerClass( - Configuration conf) { - String className = - conf.get(HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY, - DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName()); + public static Class + getThroughputControllerClass(Configuration conf) { + String className = conf.get(HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY, + DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName()); try { return Class.forName(className).asSubclass(ThroughputController.class); } catch (Exception e) { - LOG.warn( - "Unable to load configured flush throughput controller '" + className - + "', load default throughput controller " - + DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName() + " instead", e); + LOG.warn("Unable to load configured flush throughput controller '" + className + + "', load default throughput controller " + + DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS.getName() + " instead", + e); return DEFAULT_FLUSH_THROUGHPUT_CONTROLLER_CLASS; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java index 4b1b26108523..31a424d5e996 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/NoLimitThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.regionserver.throttle; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class NoLimitThroughputController implements ThroughputController { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java index 1c3952ed0491..fa346b1b25e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; /** * A throughput controller which uses the follow schema to limit throughput @@ -42,8 +42,8 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class PressureAwareCompactionThroughputController extends PressureAwareThroughputController { - private final static Logger LOG = LoggerFactory - .getLogger(PressureAwareCompactionThroughputController.class); + private final static Logger LOG = + LoggerFactory.getLogger(PressureAwareCompactionThroughputController.class); public static final String HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND = "hbase.hstore.compaction.throughput.higher.bound"; @@ -69,20 +69,20 @@ public class PressureAwareCompactionThroughputController extends PressureAwareTh // check compaction throughput every this size private static final String HBASE_HSTORE_COMPACTION_THROUGHPUT_CONTROL_CHECK_INTERVAL = - "hbase.hstore.compaction.throughput.control.check.interval"; + "hbase.hstore.compaction.throughput.control.check.interval"; private long maxThroughputOffpeak; @Override public void setup(final RegionServerServices server) { - server.getChoreService().scheduleChore( - new ScheduledChore("CompactionThroughputTuner", this, tuningPeriod) { - - @Override - protected void chore() { - tune(server.getCompactionPressure()); - } - }); + server.getChoreService() + .scheduleChore(new ScheduledChore("CompactionThroughputTuner", this, tuningPeriod) { + + @Override + protected void chore() { + tune(server.getCompactionPressure()); + } + }); } private void tune(double compactionPressure) { @@ -95,9 +95,8 @@ private void tune(double compactionPressure) { } else { // compactionPressure is between 0.0 and 1.0, we use a simple linear formula to // calculate the throughput limitation. - maxThroughputToSet = - maxThroughputLowerBound + (maxThroughputUpperBound - maxThroughputLowerBound) - * compactionPressure; + maxThroughputToSet = maxThroughputLowerBound + + (maxThroughputUpperBound - maxThroughputLowerBound) * compactionPressure; } if (LOG.isDebugEnabled()) { if (Math.abs(maxThroughputToSet - getMaxThroughput()) < .0000001) { @@ -117,23 +116,18 @@ public void setConf(Configuration conf) { if (conf == null) { return; } - this.maxThroughputUpperBound = - conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, - DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND); - this.maxThroughputLowerBound = - conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, - DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND); - this.maxThroughputOffpeak = - conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK, - DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK); + this.maxThroughputUpperBound = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, + DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND); + this.maxThroughputLowerBound = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, + DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND); + this.maxThroughputOffpeak = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK, + DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK); this.offPeakHours = OffPeakHours.getInstance(conf); - this.controlPerSize = - conf.getLong(HBASE_HSTORE_COMPACTION_THROUGHPUT_CONTROL_CHECK_INTERVAL, - this.maxThroughputLowerBound); + this.controlPerSize = conf.getLong(HBASE_HSTORE_COMPACTION_THROUGHPUT_CONTROL_CHECK_INTERVAL, + this.maxThroughputLowerBound); this.setMaxThroughput(this.maxThroughputLowerBound); - this.tuningPeriod = - getConf().getInt(HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD, - DEFAULT_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD); + this.tuningPeriod = getConf().getInt(HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD, + DEFAULT_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD); LOG.info("Compaction throughput configurations, higher bound: " + throughputDesc(maxThroughputUpperBound) + ", lower bound " + throughputDesc(maxThroughputLowerBound) + ", off peak: " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java index 51e7b42bf9d6..3807f41567b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareFlushThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; /** * A throughput controller which uses the follow schema to limit throughput @@ -32,8 +32,8 @@ *

          • If flush pressure is greater than or equal to 1.0, no limitation.
          • *
          • In normal case, the max throughput is tuned between * {@value #HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND} and - * {@value #HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND}, using the formula "lower + - * (upper - lower) * flushPressure", where flushPressure is in range [0.0, 1.0)
          • + * {@value #HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND}, using the formula "lower + (upper - + * lower) * flushPressure", where flushPressure is in range [0.0, 1.0) *
          * @see org.apache.hadoop.hbase.regionserver.HRegionServer#getFlushPressure() */ @@ -87,9 +87,8 @@ private void tune(double flushPressure) { } else { // flushPressure is between 0.0 and 1.0, we use a simple linear formula to // calculate the throughput limitation. - maxThroughputToSet = - maxThroughputLowerBound + (maxThroughputUpperBound - maxThroughputLowerBound) - * flushPressure; + maxThroughputToSet = maxThroughputLowerBound + + (maxThroughputUpperBound - maxThroughputLowerBound) * flushPressure; } if (LOG.isDebugEnabled()) { LOG.debug("flushPressure is " + flushPressure + ", tune flush throughput to " @@ -104,20 +103,16 @@ public void setConf(Configuration conf) { if (conf == null) { return; } - this.maxThroughputUpperBound = - conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND, - DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND); - this.maxThroughputLowerBound = - conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND, - DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND); + this.maxThroughputUpperBound = conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND, + DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND); + this.maxThroughputLowerBound = conf.getLong(HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND, + DEFAULT_HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND); this.offPeakHours = OffPeakHours.getInstance(conf); - this.controlPerSize = - conf.getLong(HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL, - DEFAULT_HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL); + this.controlPerSize = conf.getLong(HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL, + DEFAULT_HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL); this.setMaxThroughput(this.maxThroughputLowerBound); - this.tuningPeriod = - getConf().getInt(HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD, - DEFAULT_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD); + this.tuningPeriod = getConf().getInt(HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD, + DEFAULT_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD); LOG.info("Flush throughput configurations, upper bound: " + throughputDesc(maxThroughputUpperBound) + ", lower bound " + throughputDesc(maxThroughputLowerBound) + ", tuning period: " + tuningPeriod + " ms"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java index 306df0b9d5a5..de8ae6a2ba9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,20 +19,19 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Stoppable; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public abstract class PressureAwareThroughputController extends Configured implements - ThroughputController, Stoppable { +public abstract class PressureAwareThroughputController extends Configured + implements ThroughputController, Stoppable { private static final Logger LOG = LoggerFactory.getLogger(PressureAwareThroughputController.class); @@ -77,7 +76,8 @@ private static final class ActiveOperation { private volatile double maxThroughput; private volatile double maxThroughputPerOperation; - protected final ConcurrentMap activeOperations = new ConcurrentHashMap<>(); + protected final ConcurrentMap activeOperations = + new ConcurrentHashMap<>(); @Override public abstract void setup(final RegionServerServices server); @@ -142,9 +142,8 @@ public void finish(String opName) { ActiveOperation operation = activeOperations.remove(opName); maxThroughputPerOperation = getMaxThroughput() / activeOperations.size(); long elapsedTime = EnvironmentEdgeManager.currentTime() - operation.startTime; - LOG.info(opName + " average throughput is " - + throughputDesc(operation.totalSize, elapsedTime) + ", slept " - + operation.numberOfSleeps + " time(s) and total slept time is " + LOG.info(opName + " average throughput is " + throughputDesc(operation.totalSize, elapsedTime) + + ", slept " + operation.numberOfSleeps + " time(s) and total slept time is " + operation.totalSleepTime + " ms. " + activeOperations.size() + " active operations remaining, total limit is " + throughputDesc(getMaxThroughput())); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java index 1bf1a9b52b39..cb0ef5f45069 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java @@ -48,8 +48,8 @@ * 2. parallelPutToStoreThreadLimit: The amount of concurrency allowed to write puts to a Store at * the same time. *

          - * 3. parallelPreparePutToStoreThreadLimit: The amount of concurrency allowed to - * prepare writing puts to a Store at the same time. + * 3. parallelPreparePutToStoreThreadLimit: The amount of concurrency allowed to prepare writing + * puts to a Store at the same time. *

          * Notice that our writing pipeline includes three key process: MVCC acquire, writing MemStore, and * WAL. Only limit the concurrency of writing puts to Store(parallelPutToStoreThreadLimit) is not @@ -92,10 +92,10 @@ public void init(Configuration conf) { this.parallelPutToStoreThreadLimit = conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT, DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT); this.parallelPreparePutToStoreThreadLimit = conf.getInt(PARALLEL_PREPARE_PUT_STORE_MULTIPLIER, - DEFAULT_PARALLEL_PREPARE_PUT_STORE_MULTIPLIER) * parallelPutToStoreThreadLimit; + DEFAULT_PARALLEL_PREPARE_PUT_STORE_MULTIPLIER) * parallelPutToStoreThreadLimit; this.parallelPutToStoreThreadLimitCheckMinColumnCount = conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_COUNT, - DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_NUM); + DEFAULT_PARALLEL_PUT_STORE_THREADS_LIMIT_MIN_COLUMN_NUM); if (!isEnable()) { logDisabledMessageOnce(); @@ -103,15 +103,15 @@ public void init(Configuration conf) { } /** - * {@link #init(Configuration)} is called for every Store that opens on a RegionServer. - * Here we make a lightweight attempt to log this message once per RegionServer, rather than - * per-Store. The goal is just to draw attention to this feature if debugging overload due to - * heavy writes. + * {@link #init(Configuration)} is called for every Store that opens on a RegionServer. Here we + * make a lightweight attempt to log this message once per RegionServer, rather than per-Store. + * The goal is just to draw attention to this feature if debugging overload due to heavy writes. */ private static void logDisabledMessageOnce() { if (!loggedDisableMessage) { - LOG.info("StoreHotnessProtector is disabled. Set {} > 0 to enable, " - + "which may help mitigate load under heavy write pressure.", + LOG.info( + "StoreHotnessProtector is disabled. Set {} > 0 to enable, " + + "which may help mitigate load under heavy write pressure.", PARALLEL_PUT_STORE_THREADS_LIMIT); loggedDisableMessage = true; } @@ -140,18 +140,16 @@ public void start(Map> familyMaps) throws RegionTooBusyExcept if (e.getValue().size() > this.parallelPutToStoreThreadLimitCheckMinColumnCount) { - //we need to try to add #preparePutCount at first because preparePutToStoreMap will be - //cleared when changing the configuration. + // we need to try to add #preparePutCount at first because preparePutToStoreMap will be + // cleared when changing the configuration. int preparePutCount = preparePutToStoreMap - .computeIfAbsent(e.getKey(), key -> new AtomicInteger()) - .incrementAndGet(); + .computeIfAbsent(e.getKey(), key -> new AtomicInteger()).incrementAndGet(); boolean storeAboveThread = - store.getCurrentParallelPutCount() > this.parallelPutToStoreThreadLimit; + store.getCurrentParallelPutCount() > this.parallelPutToStoreThreadLimit; boolean storeAbovePrePut = preparePutCount > this.parallelPreparePutToStoreThreadLimit; if (storeAboveThread || storeAbovePrePut) { - tooBusyStore = (tooBusyStore == null ? - store.getColumnFamilyName() : - tooBusyStore + "," + store.getColumnFamilyName()); + tooBusyStore = (tooBusyStore == null ? store.getColumnFamilyName() + : tooBusyStore + "," + store.getColumnFamilyName()); } aboveParallelThreadLimit |= storeAboveThread; aboveParallelPrePutLimit |= storeAbovePrePut; @@ -164,14 +162,16 @@ public void start(Map> familyMaps) throws RegionTooBusyExcept } if (aboveParallelThreadLimit || aboveParallelPrePutLimit) { - String msg = - "StoreTooBusy," + this.region.getRegionInfo().getRegionNameAsString() + ":" + tooBusyStore - + " Above " - + (aboveParallelThreadLimit ? "parallelPutToStoreThreadLimit(" - + this.parallelPutToStoreThreadLimit + ")" : "") - + (aboveParallelThreadLimit && aboveParallelPrePutLimit ? " or " : "") - + (aboveParallelPrePutLimit ? "parallelPreparePutToStoreThreadLimit(" - + this.parallelPreparePutToStoreThreadLimit + ")" : ""); + String msg = "StoreTooBusy," + this.region.getRegionInfo().getRegionNameAsString() + ":" + + tooBusyStore + " Above " + + (aboveParallelThreadLimit + ? "parallelPutToStoreThreadLimit(" + this.parallelPutToStoreThreadLimit + ")" + : "") + + (aboveParallelThreadLimit && aboveParallelPrePutLimit ? " or " : "") + + (aboveParallelPrePutLimit + ? "parallelPreparePutToStoreThreadLimit(" + this.parallelPreparePutToStoreThreadLimit + + ")" + : ""); LOG.trace(msg); throw new RegionTooBusyException(msg); } @@ -203,9 +203,8 @@ public String toString() { return "StoreHotnessProtector, parallelPutToStoreThreadLimit=" + this.parallelPutToStoreThreadLimit + " ; minColumnNum=" + this.parallelPutToStoreThreadLimitCheckMinColumnCount + " ; preparePutThreadLimit=" - + this.parallelPreparePutToStoreThreadLimit + " ; hotProtect now " + (this.isEnable() ? - "enable" : - "disable"); + + this.parallelPreparePutToStoreThreadLimit + " ; hotProtect now " + + (this.isEnable() ? "enable" : "disable"); } public boolean isEnable() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java index ad65c59436ef..22c34d584f8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.throttle; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.yetus.audience.InterfaceAudience; @@ -49,8 +48,8 @@ public static String getNameForThrottling(HStore store, String opName) { break; } } - return store.getRegionInfo().getEncodedName() + NAME_DELIMITER + - store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName + - NAME_DELIMITER + counter; + return store.getRegionInfo().getEncodedName() + NAME_DELIMITER + + store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName + + NAME_DELIMITER + counter; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java index 707d02d5f92b..284aa6814dd8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +19,12 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Stoppable; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.yetus.audience.InterfaceAudience; /** - * A utility that constrains the total throughput of one or more simultaneous flows by - * sleeping when necessary. + * A utility that constrains the total throughput of one or more simultaneous flows by sleeping when + * necessary. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public interface ThroughputController extends Stoppable { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index 5416e3a2d669..bafb6887f935 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -131,15 +131,15 @@ public abstract class AbstractFSWAL implements WAL { /** Don't log blocking regions more frequently than this. */ private static final long SURVIVED_TOO_LONG_LOG_INTERVAL_NS = TimeUnit.MINUTES.toNanos(5); - protected static final String SLOW_SYNC_TIME_MS ="hbase.regionserver.wal.slowsync.ms"; + protected static final String SLOW_SYNC_TIME_MS = "hbase.regionserver.wal.slowsync.ms"; protected static final int DEFAULT_SLOW_SYNC_TIME_MS = 100; // in ms protected static final String ROLL_ON_SYNC_TIME_MS = "hbase.regionserver.wal.roll.on.sync.ms"; protected static final int DEFAULT_ROLL_ON_SYNC_TIME_MS = 10000; // in ms protected static final String SLOW_SYNC_ROLL_THRESHOLD = - "hbase.regionserver.wal.slowsync.roll.threshold"; + "hbase.regionserver.wal.slowsync.roll.threshold"; protected static final int DEFAULT_SLOW_SYNC_ROLL_THRESHOLD = 100; // 100 slow sync warnings protected static final String SLOW_SYNC_ROLL_INTERVAL_MS = - "hbase.regionserver.wal.slowsync.roll.interval.ms"; + "hbase.regionserver.wal.slowsync.roll.interval.ms"; protected static final int DEFAULT_SLOW_SYNC_ROLL_INTERVAL_MS = 60 * 1000; // in ms, 1 minute protected static final String WAL_SYNC_TIMEOUT_MS = "hbase.regionserver.wal.sync.timeout"; @@ -150,10 +150,9 @@ public abstract class AbstractFSWAL implements WAL { public static final String MAX_LOGS = "hbase.regionserver.maxlogs"; public static final String RING_BUFFER_SLOT_COUNT = - "hbase.regionserver.wal.disruptor.event.count"; + "hbase.regionserver.wal.disruptor.event.count"; - public static final String WAL_SHUTDOWN_WAIT_TIMEOUT_MS = - "hbase.wal.shutdown.wait.timeout.ms"; + public static final String WAL_SHUTDOWN_WAIT_TIMEOUT_MS = "hbase.wal.shutdown.wait.timeout.ms"; public static final int DEFAULT_WAL_SHUTDOWN_WAIT_TIMEOUT_MS = 15 * 1000; /** @@ -296,13 +295,14 @@ public abstract class AbstractFSWAL implements WAL { * an IllegalArgumentException if used to compare paths from different wals. */ final Comparator LOG_NAME_COMPARATOR = - (o1, o2) -> Long.compare(getFileNumFromFileName(o1), getFileNumFromFileName(o2)); + (o1, o2) -> Long.compare(getFileNumFromFileName(o1), getFileNumFromFileName(o2)); private static final class WalProps { /** * Map the encoded region name to the highest sequence id. - *

          Contains all the regions it has an entry for. + *

          + * Contains all the regions it has an entry for. */ public final Map encodedName2HighestSequenceId; @@ -329,7 +329,7 @@ public WalProps(Map encodedName2HighestSequenceId, long logSize) { * (contained in the log file name). */ protected ConcurrentNavigableMap walFile2Props = - new ConcurrentSkipListMap<>(LOG_NAME_COMPARATOR); + new ConcurrentSkipListMap<>(LOG_NAME_COMPARATOR); /** * A cache of sync futures reused by threads. @@ -350,9 +350,9 @@ public WalProps(Map encodedName2HighestSequenceId, long logSize) { // Run in caller if we get reject execution exception, to avoid aborting region server when we get // reject execution exception. Usually this should not happen but let's make it more robust. private final ExecutorService logArchiveExecutor = - new ThreadPoolExecutor(1, 1, 1L, TimeUnit.MINUTES, new LinkedBlockingQueue(), - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("WAL-Archive-%d").build(), - new ThreadPoolExecutor.CallerRunsPolicy()); + new ThreadPoolExecutor(1, 1, 1L, TimeUnit.MINUTES, new LinkedBlockingQueue(), + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("WAL-Archive-%d").build(), + new ThreadPoolExecutor.CallerRunsPolicy()); private final int archiveRetries; @@ -389,8 +389,7 @@ protected final int getPreallocatedEventCount() { // be stuck and make no progress if the buffer is filled with appends only and there is no // sync. If no sync, then the handlers will be outstanding just waiting on sync completion // before they return. - int preallocatedEventCount = - this.conf.getInt(RING_BUFFER_SLOT_COUNT, 1024 * 16); + int preallocatedEventCount = this.conf.getInt(RING_BUFFER_SLOT_COUNT, 1024 * 16); checkArgument(preallocatedEventCount >= 0, RING_BUFFER_SLOT_COUNT + " must > 0"); int floor = Integer.highestOneBit(preallocatedEventCount); if (floor == preallocatedEventCount) { @@ -413,8 +412,7 @@ protected AbstractFSWAL(final FileSystem fs, final Path rootDir, final String lo protected AbstractFSWAL(final FileSystem fs, final Abortable abortable, final Path rootDir, final String logDir, final String archiveDir, final Configuration conf, final List listeners, final boolean failIfWALExists, final String prefix, - final String suffix) - throws FailedLogCloseException, IOException { + final String suffix) throws FailedLogCloseException, IOException { this.fs = fs; this.walDir = new Path(rootDir, logDir); this.walArchiveDir = new Path(rootDir, archiveDir); @@ -433,11 +431,11 @@ protected AbstractFSWAL(final FileSystem fs, final Abortable abortable, final Pa // If prefix is null||empty then just name it wal this.walFilePrefix = - prefix == null || prefix.isEmpty() ? "wal" : URLEncoder.encode(prefix, "UTF8"); + prefix == null || prefix.isEmpty() ? "wal" : URLEncoder.encode(prefix, "UTF8"); // we only correctly differentiate suffices when numeric ones start with '.' if (suffix != null && !(suffix.isEmpty()) && !(suffix.startsWith(WAL_FILE_NAME_DELIMITER))) { - throw new IllegalArgumentException("WAL suffix must start with '" + WAL_FILE_NAME_DELIMITER + - "' but instead was '" + suffix + "'"); + throw new IllegalArgumentException("WAL suffix must start with '" + WAL_FILE_NAME_DELIMITER + + "' but instead was '" + suffix + "'"); } // Now that it exists, set the storage policy for the entire directory of wal files related to // this FSHLog instance @@ -494,28 +492,28 @@ public boolean accept(final Path fileName) { this.logrollsize = (long) (this.blocksize * multiplier); this.maxLogs = conf.getInt(MAX_LOGS, Math.max(32, calculateMaxLogFiles(conf, logrollsize))); - LOG.info("WAL configuration: blocksize=" + StringUtils.byteDesc(blocksize) + ", rollsize=" + - StringUtils.byteDesc(this.logrollsize) + ", prefix=" + this.walFilePrefix + ", suffix=" + - walFileSuffix + ", logDir=" + this.walDir + ", archiveDir=" + this.walArchiveDir + - ", maxLogs=" + this.maxLogs); - this.slowSyncNs = TimeUnit.MILLISECONDS.toNanos(conf.getInt(SLOW_SYNC_TIME_MS, - DEFAULT_SLOW_SYNC_TIME_MS)); - this.rollOnSyncNs = TimeUnit.MILLISECONDS.toNanos(conf.getInt(ROLL_ON_SYNC_TIME_MS, - DEFAULT_ROLL_ON_SYNC_TIME_MS)); - this.slowSyncRollThreshold = conf.getInt(SLOW_SYNC_ROLL_THRESHOLD, - DEFAULT_SLOW_SYNC_ROLL_THRESHOLD); - this.slowSyncCheckInterval = conf.getInt(SLOW_SYNC_ROLL_INTERVAL_MS, - DEFAULT_SLOW_SYNC_ROLL_INTERVAL_MS); - this.walSyncTimeoutNs = TimeUnit.MILLISECONDS.toNanos(conf.getLong(WAL_SYNC_TIMEOUT_MS, - DEFAULT_WAL_SYNC_TIMEOUT_MS)); + LOG.info("WAL configuration: blocksize=" + StringUtils.byteDesc(blocksize) + ", rollsize=" + + StringUtils.byteDesc(this.logrollsize) + ", prefix=" + this.walFilePrefix + ", suffix=" + + walFileSuffix + ", logDir=" + this.walDir + ", archiveDir=" + this.walArchiveDir + + ", maxLogs=" + this.maxLogs); + this.slowSyncNs = + TimeUnit.MILLISECONDS.toNanos(conf.getInt(SLOW_SYNC_TIME_MS, DEFAULT_SLOW_SYNC_TIME_MS)); + this.rollOnSyncNs = TimeUnit.MILLISECONDS + .toNanos(conf.getInt(ROLL_ON_SYNC_TIME_MS, DEFAULT_ROLL_ON_SYNC_TIME_MS)); + this.slowSyncRollThreshold = + conf.getInt(SLOW_SYNC_ROLL_THRESHOLD, DEFAULT_SLOW_SYNC_ROLL_THRESHOLD); + this.slowSyncCheckInterval = + conf.getInt(SLOW_SYNC_ROLL_INTERVAL_MS, DEFAULT_SLOW_SYNC_ROLL_INTERVAL_MS); + this.walSyncTimeoutNs = TimeUnit.MILLISECONDS + .toNanos(conf.getLong(WAL_SYNC_TIMEOUT_MS, DEFAULT_WAL_SYNC_TIMEOUT_MS)); this.syncFutureCache = new SyncFutureCache(conf); this.implClassName = getClass().getSimpleName(); - this.walTooOldNs = TimeUnit.SECONDS.toNanos(conf.getInt( - SURVIVED_TOO_LONG_SEC_KEY, SURVIVED_TOO_LONG_SEC_DEFAULT)); + this.walTooOldNs = TimeUnit.SECONDS + .toNanos(conf.getInt(SURVIVED_TOO_LONG_SEC_KEY, SURVIVED_TOO_LONG_SEC_DEFAULT)); this.useHsync = conf.getBoolean(HRegion.WAL_HSYNC_CONF_KEY, HRegion.DEFAULT_WAL_HSYNC); archiveRetries = this.conf.getInt("hbase.regionserver.walroll.archive.retries", 0); - this.walShutdownTimeout = conf.getLong(WAL_SHUTDOWN_WAIT_TIMEOUT_MS, - DEFAULT_WAL_SHUTDOWN_WAIT_TIMEOUT_MS); + this.walShutdownTimeout = + conf.getLong(WAL_SHUTDOWN_WAIT_TIMEOUT_MS, DEFAULT_WAL_SHUTDOWN_WAIT_TIMEOUT_MS); } /** @@ -607,6 +605,7 @@ public final void sync(long txid, boolean forceSync) throws IOException { protected abstract void doSync(boolean forceSync) throws IOException; protected abstract void doSync(long txid, boolean forceSync) throws IOException; + /** * This is a convenience method that computes a new filename with a given file-number. * @param filenum to use @@ -694,9 +693,9 @@ public int getNumLogFiles() { } /** - * If the number of un-archived WAL files ('live' WALs) is greater than maximum allowed, - * check the first (oldest) WAL, and return those regions which should be flushed so that - * it can be let-go/'archived'. + * If the number of un-archived WAL files ('live' WALs) is greater than maximum allowed, check the + * first (oldest) WAL, and return those regions which should be flushed so that it can be + * let-go/'archived'. * @return stores of regions (encodedRegionNames) to flush in order to archive oldest WAL file. */ Map> findRegionsToForceFlush() throws IOException { @@ -704,8 +703,8 @@ Map> findRegionsToForceFlush() throws IOException { int logCount = getNumRolledLogFiles(); if (logCount > this.maxLogs && logCount > 0) { Map.Entry firstWALEntry = this.walFile2Props.firstEntry(); - regions = - this.sequenceIdAccounting.findLower(firstWALEntry.getValue().encodedName2HighestSequenceId); + regions = this.sequenceIdAccounting + .findLower(firstWALEntry.getValue().encodedName2HighestSequenceId); } if (regions != null) { List listForPrint = new ArrayList<>(); @@ -719,9 +718,9 @@ Map> findRegionsToForceFlush() throws IOException { } listForPrint.add(Bytes.toStringBinary(r.getKey()) + "[" + families.toString() + "]"); } - LOG.info("Too many WALs; count=" + logCount + ", max=" + this.maxLogs + - "; forcing (partial) flush of " + regions.size() + " region(s): " + - StringUtils.join(",", listForPrint)); + LOG.info("Too many WALs; count=" + logCount + ", max=" + this.maxLogs + + "; forcing (partial) flush of " + regions.size() + " region(s): " + + StringUtils.join(",", listForPrint)); } return regions; } @@ -757,7 +756,7 @@ private void cleanOldLogs() throws IOException { } } else if (regionsBlockingThisWal != null) { StringBuilder sb = new StringBuilder(log.toString()).append(" has not been archived for ") - .append(TimeUnit.NANOSECONDS.toSeconds(ageNs)).append(" seconds; blocked by: "); + .append(TimeUnit.NANOSECONDS.toSeconds(ageNs)).append(" seconds; blocked by: "); boolean isFirst = true; for (byte[] region : regionsBlockingThisWal) { if (!isFirst) { @@ -800,8 +799,7 @@ protected void archive(final Pair log) { break; } } else { - LOG.error("Log archiving failed for the log {} - attempt {}", log.getFirst(), retry, - e); + LOG.error("Log archiving failed for the log {} - attempt {}", log.getFirst(), retry, e); } retry++; } @@ -930,8 +928,8 @@ private Map> rollWriterInternal(boolean force) throws IOExc newPath = replaceWriter(oldPath, newPath, nextWriter); tellListenersAboutPostLogRoll(oldPath, newPath); if (LOG.isDebugEnabled()) { - LOG.debug("Create new " + implClassName + " writer with pipeline: " + - Arrays.toString(getPipeline())); + LOG.debug("Create new " + implClassName + " writer with pipeline: " + + Arrays.toString(getPipeline())); } // We got a new writer, so reset the slow sync count lastTimeCheckSlowSync = EnvironmentEdgeManager.currentTime(); @@ -945,8 +943,8 @@ private Map> rollWriterInternal(boolean force) throws IOExc // If the underlying FileSystem can't do what we ask, treat as IO failure so // we'll abort. throw new IOException( - "Underlying FileSystem can't meet stream requirements. See RS log " + "for details.", - exception); + "Underlying FileSystem can't meet stream requirements. See RS log " + "for details.", + exception); } return regionsToFlush; } finally { @@ -1020,9 +1018,9 @@ public Void call() throws Exception { throw new InterruptedIOException("Interrupted when waiting for shutdown WAL"); } catch (TimeoutException e) { throw new TimeoutIOException("We have waited " + walShutdownTimeout + "ms, but" - + " the shutdown of WAL doesn't complete! Please check the status of underlying " - + "filesystem or increase the wait time by the config \"" - + WAL_SHUTDOWN_WAIT_TIMEOUT_MS + "\"", e); + + " the shutdown of WAL doesn't complete! Please check the status of underlying " + + "filesystem or increase the wait time by the config \"" + WAL_SHUTDOWN_WAIT_TIMEOUT_MS + + "\"", e); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); @@ -1174,8 +1172,7 @@ private long postAppend(final Entry e, final long elapsedTime) throws IOExceptio protected final void postSync(long timeInNanos, int handlerSyncs) { if (timeInNanos > this.slowSyncNs) { String msg = new StringBuilder().append("Slow sync cost: ") - .append(TimeUnit.NANOSECONDS.toMillis(timeInNanos)) - .append(" ms, current pipeline: ") + .append(TimeUnit.NANOSECONDS.toMillis(timeInNanos)).append(" ms, current pipeline: ") .append(Arrays.toString(getPipeline())).toString(); LOG.info(msg); if (timeInNanos > this.rollOnSyncNs) { @@ -1183,10 +1180,10 @@ protected final void postSync(long timeInNanos, int handlerSyncs) { // Elsewhere in checkSlowSync, called from checkLogRoll, we will look at cumulative // effects. Here we have a single data point that indicates we should take immediate // action, so do so. - LOG.warn("Requesting log roll because we exceeded slow sync threshold; time=" + - TimeUnit.NANOSECONDS.toMillis(timeInNanos) + " ms, threshold=" + - TimeUnit.NANOSECONDS.toMillis(rollOnSyncNs) + " ms, current pipeline: " + - Arrays.toString(getPipeline())); + LOG.warn("Requesting log roll because we exceeded slow sync threshold; time=" + + TimeUnit.NANOSECONDS.toMillis(timeInNanos) + " ms, threshold=" + + TimeUnit.NANOSECONDS.toMillis(rollOnSyncNs) + " ms, current pipeline: " + + Arrays.toString(getPipeline())); requestLogRoll(SLOW_SYNC); } slowSyncCount.incrementAndGet(); // it's fine to unconditionally increment this @@ -1199,10 +1196,11 @@ protected final void postSync(long timeInNanos, int handlerSyncs) { } protected final long stampSequenceIdAndPublishToRingBuffer(RegionInfo hri, WALKeyImpl key, - WALEdit edits, boolean inMemstore, RingBuffer ringBuffer) throws IOException { + WALEdit edits, boolean inMemstore, RingBuffer ringBuffer) + throws IOException { if (this.closed) { throw new IOException( - "Cannot append; log is closed, regionName = " + hri.getRegionNameAsString()); + "Cannot append; log is closed, regionName = " + hri.getRegionNameAsString()); } MutableLong txidHolder = new MutableLong(); MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(() -> { @@ -1289,12 +1287,12 @@ public long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) throws * @param key Modified by this call; we add to it this edits region edit/sequence id. * @param edits Edits to append. MAY CONTAIN NO EDITS for case where we want to get an edit * sequence id that is after all currently appended edits. - * @param inMemstore Always true except for case where we are writing a region event meta - * marker edit, for example, a compaction completion record into the WAL or noting a - * Region Open event. In these cases the entry is just so we can finish an unfinished - * compaction after a crash when the new Server reads the WAL on recovery, etc. These - * transition event 'Markers' do not go via the memstore. When memstore is false, - * we presume a Marker event edit. + * @param inMemstore Always true except for case where we are writing a region event meta marker + * edit, for example, a compaction completion record into the WAL or noting a Region Open + * event. In these cases the entry is just so we can finish an unfinished compaction + * after a crash when the new Server reads the WAL on recovery, etc. These transition + * event 'Markers' do not go via the memstore. When memstore is false, we presume a + * Marker event edit. * @return Returns a 'transaction id' and key will have the region edit/sequence id * in it. */ @@ -1321,8 +1319,7 @@ protected abstract void doReplaceWriter(Path oldPath, Path newPath, W nextWriter protected abstract boolean doCheckLogLowReplication(); /** - * @return true if we exceeded the slow sync roll threshold over the last check - * interval + * @return true if we exceeded the slow sync roll threshold over the last check interval */ protected boolean doCheckSlowSync() { boolean result = false; @@ -1336,16 +1333,15 @@ protected boolean doCheckSlowSync() { // interval from then until the one more that pushed us over. If so, we // should do nothing and let the count reset. if (LOG.isDebugEnabled()) { - LOG.debug("checkSlowSync triggered but we decided to ignore it; " + - "count=" + slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + - ", elapsedTime=" + elapsedTime + " ms, slowSyncCheckInterval=" + - slowSyncCheckInterval + " ms"); + LOG.debug("checkSlowSync triggered but we decided to ignore it; " + "count=" + + slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + ", elapsedTime=" + + elapsedTime + " ms, slowSyncCheckInterval=" + slowSyncCheckInterval + " ms"); } // Fall through to count reset below } else { - LOG.warn("Requesting log roll because we exceeded slow sync threshold; count=" + - slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + - ", current pipeline: " + Arrays.toString(getPipeline())); + LOG.warn("Requesting log roll because we exceeded slow sync threshold; count=" + + slowSyncCount.get() + ", threshold=" + slowSyncRollThreshold + + ", current pipeline: " + Arrays.toString(getPipeline())); result = true; } } @@ -1406,8 +1402,8 @@ private static void usage() { System.err.println("Usage: AbstractFSWAL "); System.err.println("Arguments:"); System.err.println(" --dump Dump textual representation of passed one or more files"); - System.err.println(" For example: " + - "AbstractFSWAL --dump hdfs://example.com:9000/hbase/WALs/MACHINE/LOGFILE"); + System.err.println(" For example: " + + "AbstractFSWAL --dump hdfs://example.com:9000/hbase/WALs/MACHINE/LOGFILE"); System.err.println(" --split Split the passed directory of WAL logs"); System.err.println( " For example: AbstractFSWAL --split hdfs://example.com:9000/hbase/WALs/DIR"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java index a56a31a5a632..66fab4b77a93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,6 +46,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; @@ -79,8 +80,7 @@ private WALHeader buildWALHeader0(Configuration conf, WALHeader.Builder builder) builder.setWriterClsName(getWriterClassName()); } if (!builder.hasCellCodecClsName()) { - builder.setCellCodecClsName( - WALCellCodec.getWALCellCodecClass(conf).getName()); + builder.setCellCodecClsName(WALCellCodec.getWALCellCodecClass(conf).getName()); } return builder.build(); } @@ -111,10 +111,9 @@ protected final WALHeader buildSecureWALHeader(Configuration conf, WALHeader.Bui // Generate a random encryption key for this WAL Key key = cipher.getRandomKey(); builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(EncryptionUtil.wrapKey(conf, - conf.get(HConstants.CRYPTO_WAL_KEY_NAME_CONF_KEY, - conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName())), - key))); + conf.get(HConstants.CRYPTO_WAL_KEY_NAME_CONF_KEY, + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName())), + key))); // Set up the encryptor Encryptor encryptor = cipher.getEncryptor(); @@ -141,21 +140,22 @@ private boolean initializeCompressionContext(Configuration conf, Path path) thro if (doCompress) { try { final boolean useTagCompression = - conf.getBoolean(CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, true); + conf.getBoolean(CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, true); final boolean useValueCompression = - conf.getBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, false); + conf.getBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, false); final Compression.Algorithm valueCompressionType = - useValueCompression ? CompressionContext.getValueCompressionAlgorithm(conf) : - Compression.Algorithm.NONE; + useValueCompression ? CompressionContext.getValueCompressionAlgorithm(conf) + : Compression.Algorithm.NONE; if (LOG.isTraceEnabled()) { - LOG.trace("Initializing compression context for {}: isRecoveredEdits={}" + - ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", path, - CommonFSUtils.isRecoveredEdits(path), useTagCompression, useValueCompression, + LOG.trace( + "Initializing compression context for {}: isRecoveredEdits={}" + + ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", + path, CommonFSUtils.isRecoveredEdits(path), useTagCompression, useValueCompression, valueCompressionType); } this.compressionContext = - new CompressionContext(LRUDictionary.class, CommonFSUtils.isRecoveredEdits(path), - useTagCompression, useValueCompression, valueCompressionType); + new CompressionContext(LRUDictionary.class, CommonFSUtils.isRecoveredEdits(path), + useTagCompression, useValueCompression, valueCompressionType); } catch (Exception e) { throw new IOException("Failed to initiate CompressionContext", e); } @@ -164,8 +164,8 @@ private boolean initializeCompressionContext(Configuration conf, Path path) thro } public void init(FileSystem fs, Path path, Configuration conf, boolean overwritable, - long blocksize, StreamSlowMonitor monitor) throws IOException, - StreamLacksCapabilityException { + long blocksize, StreamSlowMonitor monitor) + throws IOException, StreamLacksCapabilityException { try { this.conf = conf; boolean doCompress = initializeCompressionContext(conf, path); @@ -177,12 +177,11 @@ public void init(FileSystem fs, Path path, Configuration conf, boolean overwrita initOutput(fs, path, overwritable, bufferSize, replication, blocksize, monitor); boolean doTagCompress = - doCompress && conf.getBoolean(CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, true); + doCompress && conf.getBoolean(CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, true); boolean doValueCompress = - doCompress && conf.getBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, false); - WALHeader.Builder headerBuilder = - WALHeader.newBuilder().setHasCompression(doCompress).setHasTagCompression(doTagCompress) - .setHasValueCompression(doValueCompress); + doCompress && conf.getBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, false); + WALHeader.Builder headerBuilder = WALHeader.newBuilder().setHasCompression(doCompress) + .setHasTagCompression(doTagCompress).setHasValueCompression(doValueCompress); if (doValueCompress) { headerBuilder.setValueCompressionAlgorithm( CompressionContext.getValueCompressionAlgorithm(conf).ordinal()); @@ -197,7 +196,8 @@ public void init(FileSystem fs, Path path, Configuration conf, boolean overwrita if (LOG.isTraceEnabled()) { LOG.trace("Initialized protobuf WAL={}, compression={}, tagCompression={}" - + ", valueCompression={}", path, doCompress, doTagCompress, doValueCompress); + + ", valueCompression={}", + path, doCompress, doTagCompress, doValueCompress); } } catch (Exception e) { LOG.warn("Init output failed, path={}", path, e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index 2602c089216b..b0ba29d842f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor; - /** * An asynchronous implementation of FSWAL. *

          @@ -131,18 +130,18 @@ public class AsyncFSWAL extends AbstractFSWAL { private static final Logger LOG = LoggerFactory.getLogger(AsyncFSWAL.class); - private static final Comparator SEQ_COMPARATOR = Comparator.comparingLong( - SyncFuture::getTxid).thenComparingInt(System::identityHashCode); + private static final Comparator SEQ_COMPARATOR = + Comparator.comparingLong(SyncFuture::getTxid).thenComparingInt(System::identityHashCode); public static final String WAL_BATCH_SIZE = "hbase.wal.batch.size"; public static final long DEFAULT_WAL_BATCH_SIZE = 64L * 1024; public static final String ASYNC_WAL_USE_SHARED_EVENT_LOOP = - "hbase.wal.async.use-shared-event-loop"; + "hbase.wal.async.use-shared-event-loop"; public static final boolean DEFAULT_ASYNC_WAL_USE_SHARED_EVENT_LOOP = false; public static final String ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS = - "hbase.wal.async.wait.on.shutdown.seconds"; + "hbase.wal.async.wait.on.shutdown.seconds"; public static final int DEFAULT_ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS = 5; private final EventLoopGroup eventLoopGroup; @@ -231,29 +230,30 @@ public AsyncFSWAL(FileSystem fs, Abortable abortable, Path rootDir, String logDi Queue queue = (Queue) field.get(consumeExecutor); hasConsumerTask = () -> queue.peek() == consumer; } catch (Exception e) { - LOG.warn("Can not get task queue of " + consumeExecutor + - ", this is not necessary, just give up", e); + LOG.warn("Can not get task queue of " + consumeExecutor + + ", this is not necessary, just give up", + e); hasConsumerTask = () -> false; } } else { hasConsumerTask = () -> false; } } else { - ThreadPoolExecutor threadPool = - new ThreadPoolExecutor(1, 1, 0L, - TimeUnit.MILLISECONDS, new LinkedBlockingQueue(), - new ThreadFactoryBuilder().setNameFormat("AsyncFSWAL-%d-"+ rootDir.toString() + - "-prefix:" + (prefix == null ? "default" : prefix).replace("%", "%%")) - .setDaemon(true).build()); + ThreadPoolExecutor threadPool = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(), + new ThreadFactoryBuilder() + .setNameFormat("AsyncFSWAL-%d-" + rootDir.toString() + "-prefix:" + + (prefix == null ? "default" : prefix).replace("%", "%%")) + .setDaemon(true).build()); hasConsumerTask = () -> threadPool.getQueue().peek() == consumer; this.consumeExecutor = threadPool; } this.hasConsumerTask = hasConsumerTask; int preallocatedEventCount = - conf.getInt("hbase.regionserver.wal.disruptor.event.count", 1024 * 16); + conf.getInt("hbase.regionserver.wal.disruptor.event.count", 1024 * 16); waitingConsumePayloads = - RingBuffer.createMultiProducer(RingBufferTruck::new, preallocatedEventCount); + RingBuffer.createMultiProducer(RingBufferTruck::new, preallocatedEventCount); waitingConsumePayloadsGatingSequence = new Sequence(Sequencer.INITIAL_CURSOR_VALUE); waitingConsumePayloads.addGatingSequences(waitingConsumePayloadsGatingSequence); @@ -344,7 +344,7 @@ private void syncFailed(long epochWhenSync, Throwable error) { } private void syncCompleted(long epochWhenSync, AsyncWriter writer, long processedTxid, - long startTimeNs) { + long startTimeNs) { // Please see the last several comments on HBASE-22761, it is possible that we get a // syncCompleted which acks a previous sync request after we received a syncFailed on the same // writer. So here we will also check on the epoch and state, if the epoch has already been @@ -394,8 +394,8 @@ private void syncCompleted(long epochWhenSync, AsyncWriter writer, long processe // If we haven't already requested a roll, check if we have exceeded logrollsize if (!isLogRollRequested() && writer.getLength() > logrollsize) { if (LOG.isDebugEnabled()) { - LOG.debug("Requesting log roll because of file size threshold; length=" + - writer.getLength() + ", logrollsize=" + logrollsize); + LOG.debug("Requesting log roll because of file size threshold; length=" + writer.getLength() + + ", logrollsize=" + logrollsize); } requestLogRoll(SIZE); } @@ -404,9 +404,8 @@ private void syncCompleted(long epochWhenSync, AsyncWriter writer, long processe // find all the sync futures between these two txids to see if we need to issue a hsync, if no // sync futures then just use the default one. private boolean isHsync(long beginTxid, long endTxid) { - SortedSet futures = - syncFutures.subSet(new SyncFuture().reset(beginTxid, false), - new SyncFuture().reset(endTxid + 1, false)); + SortedSet futures = syncFutures.subSet(new SyncFuture().reset(beginTxid, false), + new SyncFuture().reset(endTxid + 1, false)); if (futures.isEmpty()) { return useHsync; } @@ -422,7 +421,7 @@ private void sync(AsyncWriter writer) { fileLengthAtLastSync = writer.getLength(); long currentHighestProcessedAppendTxid = highestProcessedAppendTxid; boolean shouldUseHsync = - isHsync(highestProcessedAppendTxidAtLastSync, currentHighestProcessedAppendTxid); + isHsync(highestProcessedAppendTxidAtLastSync, currentHighestProcessedAppendTxid); highestProcessedAppendTxidAtLastSync = currentHighestProcessedAppendTxid; final long startTimeNs = System.nanoTime(); final long epoch = (long) epochAndState >>> 2L; @@ -512,8 +511,8 @@ private void appendAndSync() { if (appended) { // This is possible, when we fail to sync, we will add the unackedAppends back to // toWriteAppends, so here we may get an entry which is already in the unackedAppends. - if (addedToUnackedAppends || unackedAppends.isEmpty() || - getLastTxid(unackedAppends) < entry.getTxid()) { + if (addedToUnackedAppends || unackedAppends.isEmpty() + || getLastTxid(unackedAppends) < entry.getTxid()) { unackedAppends.addLast(entry); addedToUnackedAppends = true; } @@ -525,8 +524,8 @@ private void appendAndSync() { // There could be other ways to fix, such as changing the logic in the consume method, but // it will break the assumption and then (may) lead to a big refactoring. So here let's use // this way to fix first, can optimize later. - if (writer.getLength() - fileLengthAtLastSync >= batchSize && - (addedToUnackedAppends || entry.getTxid() >= getLastTxid(unackedAppends))) { + if (writer.getLength() - fileLengthAtLastSync >= batchSize + && (addedToUnackedAppends || entry.getTxid() >= getLastTxid(unackedAppends))) { break; } } @@ -625,8 +624,8 @@ private void consume() { consumeLock.unlock(); } long nextCursor = waitingConsumePayloadsGatingSequence.get() + 1; - for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor <= cursorBound; - nextCursor++) { + for (long cursorBound = + waitingConsumePayloads.getCursor(); nextCursor <= cursorBound; nextCursor++) { if (!waitingConsumePayloads.isPublished(nextCursor)) { break; } @@ -662,8 +661,8 @@ private void consume() { // 3. we set consumerScheduled to false and also give up scheduling consumer task. if (waitingConsumePayloadsGatingSequence.get() == waitingConsumePayloads.getCursor()) { // we will give up consuming so if there are some unsynced data we need to issue a sync. - if (writer.getLength() > fileLengthAtLastSync && !syncFutures.isEmpty() && - syncFutures.last().getTxid() > highestProcessedAppendTxidAtLastSync) { + if (writer.getLength() > fileLengthAtLastSync && !syncFutures.isEmpty() + && syncFutures.last().getTxid() > highestProcessedAppendTxidAtLastSync) { // no new data in the ringbuffer and we have at least one sync request sync(writer); } @@ -696,12 +695,12 @@ protected boolean markerEditOnly() { @Override protected long append(RegionInfo hri, WALKeyImpl key, WALEdit edits, boolean inMemstore) - throws IOException { + throws IOException { if (markerEditOnly() && !edits.isMetaEdit()) { throw new IOException("WAL is closing, only marker edit is allowed"); } long txid = - stampSequenceIdAndPublishToRingBuffer(hri, key, edits, inMemstore, waitingConsumePayloads); + stampSequenceIdAndPublishToRingBuffer(hri, key, edits, inMemstore, waitingConsumePayloads); if (shouldScheduleConsumer()) { consumeExecutor.execute(consumer); } @@ -830,11 +829,11 @@ protected void doShutdown() throws IOException { closeExecutor.shutdown(); try { if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, TimeUnit.SECONDS)) { - LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but" + - " the close of async writer doesn't complete." + - "Please check the status of underlying filesystem" + - " or increase the wait time by the config \"" + ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS + - "\""); + LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but" + + " the close of async writer doesn't complete." + + "Please check the status of underlying filesystem" + + " or increase the wait time by the config \"" + ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS + + "\""); } } catch (InterruptedException e) { LOG.error("The wait for close of async writer is interrupted"); @@ -843,8 +842,8 @@ protected void doShutdown() throws IOException { IOException error = new IOException("WAL has been closed"); long nextCursor = waitingConsumePayloadsGatingSequence.get() + 1; // drain all the pending sync requests - for (long cursorBound = waitingConsumePayloads.getCursor(); nextCursor <= cursorBound; - nextCursor++) { + for (long cursorBound = + waitingConsumePayloads.getCursor(); nextCursor <= cursorBound; nextCursor++) { if (!waitingConsumePayloads.isPublished(nextCursor)) { break; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java index fbd3882d4f73..f7e83db993d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,6 +47,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; @@ -69,8 +70,7 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter */ private volatile long finalSyncedLength = -1; - private static final class OutputStreamWrapper extends OutputStream - implements ByteBufferWriter { + private static final class OutputStreamWrapper extends OutputStream implements ByteBufferWriter { private final AsyncFSOutput out; @@ -129,7 +129,7 @@ public AsyncProtobufLogWriter(EventLoopGroup eventLoopGroup, /* * @return class name which is recognized by hbase-1.x to avoid ProtobufLogReader throwing error: - * IOException: Got unknown writer class: AsyncProtobufLogWriter + * IOException: Got unknown writer class: AsyncProtobufLogWriter */ @Override protected String getWriterClassName() { @@ -140,8 +140,7 @@ protected String getWriterClassName() { public void append(Entry entry) { int buffered = output.buffered(); try { - entry.getKey(). - getBuilder(compressor).setFollowingKvCount(entry.getEdit().size()).build() + entry.getKey().getBuilder(compressor).setFollowingKvCount(entry.getEdit().size()).build() .writeDelimitedTo(asyncOutputWrapper); } catch (IOException e) { throw new AssertionError("should not happen", e); @@ -174,9 +173,8 @@ public synchronized void close() throws IOException { output.recoverAndClose(null); } /** - * We have to call {@link AsyncFSOutput#getSyncedLength()} - * after {@link AsyncFSOutput#close()} to get the final length - * synced to underlying filesystem because {@link AsyncFSOutput#close()} + * We have to call {@link AsyncFSOutput#getSyncedLength()} after {@link AsyncFSOutput#close()} + * to get the final length synced to underlying filesystem because {@link AsyncFSOutput#close()} * may also flush some data to underlying filesystem. */ this.finalSyncedLength = this.output.getSyncedLength(); @@ -189,10 +187,10 @@ public AsyncFSOutput getOutput() { @Override protected void initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize, - short replication, long blockSize, StreamSlowMonitor monitor) throws IOException, - StreamLacksCapabilityException { + short replication, long blockSize, StreamSlowMonitor monitor) + throws IOException, StreamLacksCapabilityException { this.output = AsyncFSOutputHelper.createOutput(fs, path, overwritable, false, replication, - blockSize, eventLoopGroup, channelClass, monitor); + blockSize, eventLoopGroup, channelClass, monitor); this.asyncOutputWrapper = new OutputStreamWrapper(output); } @@ -206,7 +204,7 @@ protected void closeOutput() { } } } - + private long writeWALMetadata(Consumer> action) throws IOException { CompletableFuture future = new CompletableFuture<>(); action.accept(future); @@ -270,16 +268,16 @@ protected OutputStream getOutputStreamForCellEncoder() { @Override public long getSyncedLength() { - /** - * The statement "this.output = null;" in {@link AsyncProtobufLogWriter#close} - * is a sync point, if output is null, then finalSyncedLength must set, - * so we can return finalSyncedLength, else we return output.getSyncedLength - */ + /** + * The statement "this.output = null;" in {@link AsyncProtobufLogWriter#close} is a sync point, + * if output is null, then finalSyncedLength must set, so we can return finalSyncedLength, else + * we return output.getSyncedLength + */ AsyncFSOutput outputToUse = this.output; - if(outputToUse == null) { - long finalSyncedLengthToUse = this.finalSyncedLength; - assert finalSyncedLengthToUse >= 0; - return finalSyncedLengthToUse; + if (outputToUse == null) { + long finalSyncedLengthToUse = this.finalSyncedLength; + assert finalSyncedLengthToUse >= 0; + return finalSyncedLengthToUse; } return outputToUse.getSyncedLength(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java index 850359187ae5..c359acf0ead7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,8 +68,8 @@ public void close() throws IOException { } } if (error != null) { - throw new IOException("Failed to close at least one writer, please see the warn log above. " + - "The cause is the first exception occurred", error); + throw new IOException("Failed to close at least one writer, please see the warn log above. " + + "The cause is the first exception occurred", error); } } @@ -96,6 +96,6 @@ public CompletableFuture sync(boolean forceSync) { public static CombinedAsyncWriter create(AsyncWriter writer, AsyncWriter... writers) { return new CombinedAsyncWriter( - ImmutableList. builder().add(writer).add(writers).build()); + ImmutableList. builder().add(writer).add(writers).build()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java index bfb7f9a85a5b..66b274e49325 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.ByteArrayOutputStream; @@ -41,30 +40,30 @@ /** * Context that holds the various dictionaries for compression in WAL. *

          - * CompressionContexts are not expected to be shared among threads. Multithreaded use may - * produce unexpected results. + * CompressionContexts are not expected to be shared among threads. Multithreaded use may produce + * unexpected results. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) public class CompressionContext { private static final Logger LOG = LoggerFactory.getLogger(CompressionContext.class); public static final String ENABLE_WAL_TAGS_COMPRESSION = - "hbase.regionserver.wal.tags.enablecompression"; + "hbase.regionserver.wal.tags.enablecompression"; public static final String ENABLE_WAL_VALUE_COMPRESSION = - "hbase.regionserver.wal.value.enablecompression"; + "hbase.regionserver.wal.value.enablecompression"; public static final String WAL_VALUE_COMPRESSION_TYPE = - "hbase.regionserver.wal.value.compression.type"; + "hbase.regionserver.wal.value.compression.type"; public enum DictionaryIndex { REGION, TABLE, FAMILY, QUALIFIER, ROW } /** - * Encapsulates the compression algorithm and its streams that we will use for value - * compression in this WAL. + * Encapsulates the compression algorithm and its streams that we will use for value compression + * in this WAL. */ static class ValueCompressor { @@ -86,16 +85,14 @@ public Compression.Algorithm getAlgorithm() { return algorithm; } - public byte[] compress(byte[] valueArray, int valueOffset, int valueLength) - throws IOException { + public byte[] compress(byte[] valueArray, int valueOffset, int valueLength) throws IOException { if (compressedOut == null) { // Create the output streams here the first time around. lowerOut = new ByteArrayOutputStream(); if (compressor == null) { compressor = algorithm.getCompressor(); } - compressedOut = algorithm.createCompressionStream(lowerOut, compressor, - IO_BUFFER_SIZE); + compressedOut = algorithm.createCompressionStream(lowerOut, compressor, IO_BUFFER_SIZE); } else { lowerOut.reset(); } @@ -117,8 +114,7 @@ public int decompress(InputStream in, int inLength, byte[] outArray, int outOffs if (decompressor == null) { decompressor = algorithm.getDecompressor(); } - compressedIn = algorithm.createDecompressionStream(lowerIn, decompressor, - IO_BUFFER_SIZE); + compressedIn = algorithm.createDecompressionStream(lowerIn, decompressor, IO_BUFFER_SIZE); } else { lowerIn.setDelegate(in, inLength); } @@ -178,18 +174,16 @@ public void clear() { TagCompressionContext tagCompressionContext = null; ValueCompressor valueCompressor = null; - public CompressionContext(Class dictType, - boolean recoveredEdits, boolean hasTagCompression, boolean hasValueCompression, - Compression.Algorithm valueCompressionType) - throws SecurityException, NoSuchMethodException, InstantiationException, - IllegalAccessException, InvocationTargetException, IOException { - Constructor dictConstructor = - dictType.getConstructor(); + public CompressionContext(Class dictType, boolean recoveredEdits, + boolean hasTagCompression, boolean hasValueCompression, + Compression.Algorithm valueCompressionType) throws SecurityException, NoSuchMethodException, + InstantiationException, IllegalAccessException, InvocationTargetException, IOException { + Constructor dictConstructor = dictType.getConstructor(); for (DictionaryIndex dictionaryIndex : DictionaryIndex.values()) { Dictionary newDictionary = dictConstructor.newInstance(); dictionaries.put(dictionaryIndex, newDictionary); } - if(recoveredEdits) { + if (recoveredEdits) { getDictionary(DictionaryIndex.REGION).init(1); getDictionary(DictionaryIndex.TABLE).init(1); } else { @@ -210,9 +204,8 @@ public CompressionContext(Class dictType, } public CompressionContext(Class dictType, boolean recoveredEdits, - boolean hasTagCompression) - throws SecurityException, NoSuchMethodException, InstantiationException, - IllegalAccessException, InvocationTargetException, IOException { + boolean hasTagCompression) throws SecurityException, NoSuchMethodException, + InstantiationException, IllegalAccessException, InvocationTargetException, IOException { this(dictType, recoveredEdits, hasTagCompression, false, null); } @@ -233,7 +226,7 @@ public ValueCompressor getValueCompressor() { } void clear() { - for(Dictionary dictionary : dictionaries.values()){ + for (Dictionary dictionary : dictionaries.values()) { dictionary.clear(); } if (tagCompressionContext != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java index 13f5d6ef35bd..9212ab15d203 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -13,15 +13,13 @@ * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and - * limitations under the License + * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -29,17 +27,17 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.util.Dictionary; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.WritableUtils; - -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; - import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALProvider; +import org.apache.hadoop.io.WritableUtils; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * A set of static functions for running our custom WAL compression/decompression. - * Also contains a command line tool to compress and uncompress WALs. + * A set of static functions for running our custom WAL compression/decompression. Also contains a + * command line tool to compress and uncompress WALs. */ @InterfaceAudience.Private public class Compressor { @@ -65,8 +63,7 @@ private static void printHelp() { return; } - private static void transformFile(Path input, Path output) - throws IOException { + private static void transformFile(Path input, Path output) throws IOException { Configuration conf = HBaseConfiguration.create(); FileSystem inFS = input.getFileSystem(conf); @@ -80,12 +77,13 @@ private static void transformFile(Path input, Path output) System.err.println("Cannot proceed, invalid reader type: " + in.getClass().getName()); return; } - boolean compress = ((ReaderBase)in).hasCompression(); + boolean compress = ((ReaderBase) in).hasCompression(); conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress); out = WALFactory.createWALWriter(outFS, output, conf); WAL.Entry e = null; - while ((e = in.next()) != null) out.append(e); + while ((e = in.next()) != null) + out.append(e); } finally { in.close(); if (out != null) { @@ -97,14 +95,12 @@ private static void transformFile(Path input, Path output) /** * Reads the next compressed entry and returns it as a byte array - * * @param in the DataInput to read from * @param dict the dictionary we use for our read. * @return the uncompressed array. */ @Deprecated - static byte[] readCompressed(DataInput in, Dictionary dict) - throws IOException { + static byte[] readCompressed(DataInput in, Dictionary dict) throws IOException { byte status = in.readByte(); if (status == Dictionary.NOT_IN_DICTIONARY) { @@ -121,27 +117,23 @@ static byte[] readCompressed(DataInput in, Dictionary dict) short dictIdx = toShort(status, in.readByte()); byte[] entry = dict.getEntry(dictIdx); if (entry == null) { - throw new IOException("Missing dictionary entry for index " - + dictIdx); + throw new IOException("Missing dictionary entry for index " + dictIdx); } return entry; } } /** - * Reads a compressed entry into an array. - * The output into the array ends up length-prefixed. - * + * Reads a compressed entry into an array. The output into the array ends up length-prefixed. * @param to the array to write into * @param offset array offset to start writing to * @param in the DataInput to read from * @param dict the dictionary to use for compression - * * @return the length of the uncompressed data */ @Deprecated - static int uncompressIntoArray(byte[] to, int offset, DataInput in, - Dictionary dict) throws IOException { + static int uncompressIntoArray(byte[] to, int offset, DataInput in, Dictionary dict) + throws IOException { byte status = in.readByte(); if (status == Dictionary.NOT_IN_DICTIONARY) { @@ -162,8 +154,7 @@ static int uncompressIntoArray(byte[] to, int offset, DataInput in, throw new IOException("Unable to uncompress the log entry", ex); } if (entry == null) { - throw new IOException("Missing dictionary entry for index " - + dictIdx); + throw new IOException("Missing dictionary entry for index " + dictIdx); } // now we write the uncompressed value. Bytes.putBytes(to, offset, entry, 0, entry.length); @@ -173,14 +164,12 @@ static int uncompressIntoArray(byte[] to, int offset, DataInput in, /** * Compresses and writes an array to a DataOutput - * * @param data the array to write. * @param out the DataOutput to write into * @param dict the dictionary to use for compression */ @Deprecated - static void writeCompressed(byte[] data, int offset, int length, - DataOutput out, Dictionary dict) + static void writeCompressed(byte[] data, int offset, int length, DataOutput out, Dictionary dict) throws IOException { short dictIdx = Dictionary.NOT_IN_DICTIONARY; if (dict != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java index c38515e08178..5825ba3217f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DamagedWALException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when a failed append or sync on a WAL. - * Thrown when WAL can no longer be used. Roll the WAL. + * Thrown when a failed append or sync on a WAL. Thrown when WAL can no longer be used. Roll the + * WAL. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java index 1279c2f31e83..b6b5ee3f69e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ public DualAsyncFSWAL(FileSystem fs, FileSystem remoteFs, Path rootDir, Path rem boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, Class channelClass) throws FailedLogCloseException, IOException { super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, - eventLoopGroup, channelClass); + eventLoopGroup, channelClass); this.remoteFs = remoteFs; this.remoteWALDir = remoteWALDir; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 54dfdcde740f..eabff65b820b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -107,8 +107,10 @@ public class FSHLog extends AbstractFSWAL { // We use ring buffer sequence as txid of FSWALEntry and SyncFuture. private static final Logger LOG = LoggerFactory.getLogger(FSHLog.class); - private static final String TOLERABLE_LOW_REPLICATION = "hbase.regionserver.hlog.tolerable.lowreplication"; - private static final String LOW_REPLICATION_ROLL_LIMIT = "hbase.regionserver.hlog.lowreplication.rolllimit"; + private static final String TOLERABLE_LOW_REPLICATION = + "hbase.regionserver.hlog.tolerable.lowreplication"; + private static final String LOW_REPLICATION_ROLL_LIMIT = + "hbase.regionserver.hlog.lowreplication.rolllimit"; private static final int DEFAULT_LOW_REPLICATION_ROLL_LIMIT = 5; private static final String ROLL_ERRORS_TOLERATED = "hbase.regionserver.logroll.errors.tolerated"; private static final int DEFAULT_ROLL_ERRORS_TOLERATED = 2; @@ -117,7 +119,8 @@ public class FSHLog extends AbstractFSWAL { private static final String MAX_BATCH_COUNT = "hbase.regionserver.wal.sync.batch.count"; private static final int DEFAULT_MAX_BATCH_COUNT = 200; - private static final String FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS = "hbase.wal.fshlog.wait.on.shutdown.seconds"; + private static final String FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS = + "hbase.wal.fshlog.wait.on.shutdown.seconds"; private static final int DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS = 5; /** @@ -245,19 +248,20 @@ public FSHLog(final FileSystem fs, final Abortable abortable, final Path rootDir suffix); this.minTolerableReplication = conf.getInt(TOLERABLE_LOW_REPLICATION, CommonFSUtils.getDefaultReplication(fs, this.walDir)); - this.lowReplicationRollLimit = conf.getInt(LOW_REPLICATION_ROLL_LIMIT, DEFAULT_LOW_REPLICATION_ROLL_LIMIT); + this.lowReplicationRollLimit = + conf.getInt(LOW_REPLICATION_ROLL_LIMIT, DEFAULT_LOW_REPLICATION_ROLL_LIMIT); this.closeErrorsTolerated = conf.getInt(ROLL_ERRORS_TOLERATED, DEFAULT_ROLL_ERRORS_TOLERATED); - this.waitOnShutdownInSeconds = conf.getInt(FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS, - DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); + this.waitOnShutdownInSeconds = + conf.getInt(FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS, DEFAULT_FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); // This is the 'writer' -- a single threaded executor. This single thread 'consumes' what is // put on the ring buffer. String hostingThreadName = Thread.currentThread().getName(); // Using BlockingWaitStrategy. Stuff that is going on here takes so long it makes no sense // spinning as other strategies do. this.disruptor = new Disruptor<>(RingBufferTruck::new, getPreallocatedEventCount(), - new ThreadFactoryBuilder().setNameFormat(hostingThreadName + ".append-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), - ProducerType.MULTI, new BlockingWaitStrategy()); + new ThreadFactoryBuilder().setNameFormat(hostingThreadName + ".append-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + ProducerType.MULTI, new BlockingWaitStrategy()); // Advance the ring buffer sequence so that it starts from 1 instead of 0, // because SyncFuture.NOT_DONE = 0. this.disruptor.getRingBuffer().next(); @@ -273,9 +277,9 @@ public FSHLog(final FileSystem fs, final Abortable abortable, final Path rootDir /** * Currently, we need to expose the writer's OutputStream to tests so that they can manipulate the - * default behavior (such as setting the maxRecoveryErrorCount value). This is - * done using reflection on the underlying HDFS OutputStream. NOTE: This could be removed once Hadoop1 support is - * removed. + * default behavior (such as setting the maxRecoveryErrorCount value). This is done using + * reflection on the underlying HDFS OutputStream. NOTE: This could be removed once Hadoop1 + * support is removed. * @return null if underlying stream is not ready. */ OutputStream getOutputStream() { @@ -448,7 +452,8 @@ private void closeWriter(Writer writer, Path path, boolean syncCloseCall) throws throw ioe; } LOG.warn("Riding over failed WAL close of " + path - + "; THIS FILE WAS NOT CLOSED BUT ALL EDITS SYNCED SO SHOULD BE OK", ioe); + + "; THIS FILE WAS NOT CLOSED BUT ALL EDITS SYNCED SO SHOULD BE OK", + ioe); } finally { inflightWALClosures.remove(path.getName()); } @@ -481,10 +486,11 @@ protected void doShutdown() throws IOException { closeExecutor.shutdown(); try { if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, TimeUnit.SECONDS)) { - LOG.error("We have waited {} seconds but the close of writer(s) doesn't complete." - + "Please check the status of underlying filesystem" - + " or increase the wait time by the config \"{}\"", this.waitOnShutdownInSeconds, - FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); + LOG.error( + "We have waited {} seconds but the close of writer(s) doesn't complete." + + "Please check the status of underlying filesystem" + + " or increase the wait time by the config \"{}\"", + this.waitOnShutdownInSeconds, FSHLOG_WAIT_ON_SHUTDOWN_IN_SECONDS); } } catch (InterruptedException e) { LOG.error("The wait for termination of FSHLog writer(s) is interrupted"); @@ -494,7 +500,7 @@ protected void doShutdown() throws IOException { @Override protected long append(final RegionInfo hri, final WALKeyImpl key, final WALEdit edits, - final boolean inMemstore) throws IOException { + final boolean inMemstore) throws IOException { return stampSequenceIdAndPublishToRingBuffer(hri, key, edits, inMemstore, disruptor.getRingBuffer()); } @@ -603,8 +609,7 @@ private long updateHighestSyncedSequence(long sequence) { boolean areSyncFuturesReleased() { // check whether there is no sync futures offered, and no in-flight sync futures that is being // processed. - return syncFutures.size() <= 0 - && takeSyncFuture == null; + return syncFutures.size() <= 0 && takeSyncFuture == null; } @Override @@ -614,8 +619,8 @@ public void run() { int syncCount = 0; try { - // Make a local copy of takeSyncFuture after we get it. We've been running into NPEs - // 2020-03-22 16:54:32,180 WARN [sync.1] wal.FSHLog$SyncRunner(589): UNEXPECTED + // Make a local copy of takeSyncFuture after we get it. We've been running into NPEs + // 2020-03-22 16:54:32,180 WARN [sync.1] wal.FSHLog$SyncRunner(589): UNEXPECTED // java.lang.NullPointerException // at org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:582) // at java.lang.Thread.run(Thread.java:748) @@ -694,14 +699,14 @@ private boolean checkLogRoll() { } try { if (doCheckLogLowReplication()) { - LOG.warn("Requesting log roll because of low replication, current pipeline: " + - Arrays.toString(getPipeline())); + LOG.warn("Requesting log roll because of low replication, current pipeline: " + + Arrays.toString(getPipeline())); requestLogRoll(LOW_REPLICATION); return true; } else if (writer != null && writer.getLength() > logrollsize) { if (LOG.isDebugEnabled()) { - LOG.debug("Requesting log roll because of file size threshold; length=" + - writer.getLength() + ", logrollsize=" + logrollsize); + LOG.debug("Requesting log roll because of file size threshold; length=" + + writer.getLength() + ", logrollsize=" + logrollsize); } requestLogRoll(SIZE); return true; @@ -830,8 +835,8 @@ boolean isLowReplicationRollEnabled() { return lowReplicationRollEnabled; } - public static final long FIXED_OVERHEAD = ClassSize - .align(ClassSize.OBJECT + (5 * ClassSize.REFERENCE) + (2 * ClassSize.ATOMIC_INTEGER) + public static final long FIXED_OVERHEAD = + ClassSize.align(ClassSize.OBJECT + (5 * ClassSize.REFERENCE) + (2 * ClassSize.ATOMIC_INTEGER) + (3 * Bytes.SIZEOF_INT) + (4 * Bytes.SIZEOF_LONG)); /** @@ -850,13 +855,13 @@ boolean isLowReplicationRollEnabled() { * To start up the drama, Thread A creates an instance of this class each time it would do this * zigzag dance and passes it to Thread B (these classes use Latches so it is one shot only). * Thread B notices the new instance (via reading a volatile reference or how ever) and it starts - * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint(SyncFuture)} when it cannot proceed - * until the Thread B 'safe point' is attained. Thread A will be held inside in - * {@link #waitSafePoint(SyncFuture)} until Thread B reaches the 'safe point'. Once there, Thread B frees - * Thread A by calling {@link #safePointAttained()}. Thread A now knows Thread B is at the 'safe - * point' and that it is holding there (When Thread B calls {@link #safePointAttained()} it blocks - * here until Thread A calls {@link #releaseSafePoint()}). Thread A proceeds to do what it needs - * to do while Thread B is paused. When finished, it lets Thread B lose by calling + * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint(SyncFuture)} when it + * cannot proceed until the Thread B 'safe point' is attained. Thread A will be held inside in + * {@link #waitSafePoint(SyncFuture)} until Thread B reaches the 'safe point'. Once there, Thread + * B frees Thread A by calling {@link #safePointAttained()}. Thread A now knows Thread B is at the + * 'safe point' and that it is holding there (When Thread B calls {@link #safePointAttained()} it + * blocks here until Thread A calls {@link #releaseSafePoint()}). Thread A proceeds to do what it + * needs to do while Thread B is paused. When finished, it lets Thread B lose by calling * {@link #releaseSafePoint()} and away go both Threads again. */ static class SafePointZigZagLatch { @@ -883,8 +888,8 @@ private void checkIfSyncFailed(SyncFuture syncFuture) throws FailedSyncBeforeLog * exception, then something is up w/ our syncing. * @return The passed syncFuture */ - SyncFuture waitSafePoint(SyncFuture syncFuture) throws InterruptedException, - FailedSyncBeforeLogCloseException { + SyncFuture waitSafePoint(SyncFuture syncFuture) + throws InterruptedException, FailedSyncBeforeLogCloseException { while (!this.safePointAttainedLatch.await(1, TimeUnit.MILLISECONDS)) { checkIfSyncFailed(syncFuture); } @@ -1015,8 +1020,8 @@ private boolean isOutstandingSyncs() { private boolean isOutstandingSyncsFromRunners() { // Look at SyncFutures in the SyncRunners - for (SyncRunner syncRunner: syncRunners) { - if(syncRunner.isAlive() && !syncRunner.areSyncFuturesReleased()) { + for (SyncRunner syncRunner : syncRunners) { + if (syncRunner.isAlive() && !syncRunner.areSyncFuturesReleased()) { return true; } } @@ -1054,11 +1059,13 @@ public void onEvent(final RingBufferTruck truck, final long sequence, boolean en // Failed append. Record the exception. this.exception = e; // invoking cleanupOutstandingSyncsOnException when append failed with exception, - // it will cleanup existing sync requests recorded in syncFutures but not offered to SyncRunner yet, - // so there won't be any sync future left over if no further truck published to disruptor. + // it will cleanup existing sync requests recorded in syncFutures but not offered to + // SyncRunner yet, + // so there won't be any sync future left over if no further truck published to + // disruptor. cleanupOutstandingSyncsOnException(sequence, - this.exception instanceof DamagedWALException ? this.exception - : new DamagedWALException("On sync", this.exception)); + this.exception instanceof DamagedWALException ? this.exception + : new DamagedWALException("On sync", this.exception)); // Return to keep processing events coming off the ringbuffer return; } finally { @@ -1081,15 +1088,15 @@ public void onEvent(final RingBufferTruck truck, final long sequence, boolean en return; } // syncRunnerIndex is bound to the range [0, Integer.MAX_INT - 1] as follows: - // * The maximum value possible for syncRunners.length is Integer.MAX_INT - // * syncRunnerIndex starts at 0 and is incremented only here - // * after the increment, the value is bounded by the '%' operator to - // [0, syncRunners.length), presuming the value was positive prior to - // the '%' operator. - // * after being bound to [0, Integer.MAX_INT - 1], the new value is stored in - // syncRunnerIndex ensuring that it can't grow without bound and overflow. - // * note that the value after the increment must be positive, because the most it - // could have been prior was Integer.MAX_INT - 1 and we only increment by 1. + // * The maximum value possible for syncRunners.length is Integer.MAX_INT + // * syncRunnerIndex starts at 0 and is incremented only here + // * after the increment, the value is bounded by the '%' operator to + // [0, syncRunners.length), presuming the value was positive prior to + // the '%' operator. + // * after being bound to [0, Integer.MAX_INT - 1], the new value is stored in + // syncRunnerIndex ensuring that it can't grow without bound and overflow. + // * note that the value after the increment must be positive, because the most it + // could have been prior was Integer.MAX_INT - 1 and we only increment by 1. this.syncRunnerIndex = (this.syncRunnerIndex + 1) % this.syncRunners.length; try { // Below expects that the offer 'transfers' responsibility for the outstanding syncs to @@ -1104,8 +1111,9 @@ public void onEvent(final RingBufferTruck truck, final long sequence, boolean en } // We may have picked up an exception above trying to offer sync if (this.exception != null) { - cleanupOutstandingSyncsOnException(sequence, this.exception instanceof DamagedWALException - ? this.exception : new DamagedWALException("On sync", this.exception)); + cleanupOutstandingSyncsOnException(sequence, + this.exception instanceof DamagedWALException ? this.exception + : new DamagedWALException("On sync", this.exception)); } attainSafePoint(sequence); // It is critical that we offer the futures back to the cache for reuse here after the @@ -1165,8 +1173,8 @@ void append(final FSWALEntry entry) throws Exception { try { FSHLog.this.appendEntry(writer, entry); } catch (Exception e) { - String msg = "Append sequenceId=" + entry.getKey().getSequenceId() - + ", requesting roll of WAL"; + String msg = + "Append sequenceId=" + entry.getKey().getSequenceId() + ", requesting roll of WAL"; LOG.warn(msg, e); requestLogRoll(ERROR); throw new DamagedWALException(msg, e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java index ca51ec0c5684..a7c758309104 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java @@ -37,12 +37,12 @@ import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** - * A WAL Entry for {@link AbstractFSWAL} implementation. Immutable. - * A subclass of {@link Entry} that carries extra info across the ring buffer such as - * region sequenceid (we want to use this later, just before we write the WAL to ensure region - * edits maintain order). The extra info added here is not 'serialized' as part of the WALEdit - * hence marked 'transient' to underline this fact. It also adds mechanism so we can wait on - * the assign of the region sequence id. See #stampRegionSequenceId(). + * A WAL Entry for {@link AbstractFSWAL} implementation. Immutable. A subclass of {@link Entry} that + * carries extra info across the ring buffer such as region sequenceid (we want to use this later, + * just before we write the WAL to ensure region edits maintain order). The extra info added here is + * not 'serialized' as part of the WALEdit hence marked 'transient' to underline this fact. It also + * adds mechanism so we can wait on the assign of the region sequence id. See + * #stampRegionSequenceId(). */ @InterfaceAudience.Private class FSWALEntry extends Entry { @@ -51,9 +51,9 @@ class FSWALEntry extends Entry { private final transient long txid; /** - * If false, means this is a meta edit written by the hbase system itself. It was not in - * memstore. HBase uses these edit types to note in the log operational transitions such - * as compactions, flushes, or region open/closes. + * If false, means this is a meta edit written by the hbase system itself. It was not in memstore. + * HBase uses these edit types to note in the log operational transitions such as compactions, + * flushes, or region open/closes. */ private final transient boolean inMemstore; @@ -67,11 +67,11 @@ class FSWALEntry extends Entry { private final transient ServerCall rpcCall; /** - * @param inMemstore If true, then this is a data edit, one that came from client. If false, it - * is a meta edit made by the hbase system itself and is for the WAL only. + * @param inMemstore If true, then this is a data edit, one that came from client. If false, it is + * a meta edit made by the hbase system itself and is for the WAL only. */ FSWALEntry(final long txid, final WALKeyImpl key, final WALEdit edit, final RegionInfo regionInfo, - final boolean inMemstore, ServerCall rpcCall) { + final boolean inMemstore, ServerCall rpcCall) { super(key, edit); this.inMemstore = inMemstore; this.closeRegion = !inMemstore && edit.isRegionCloseMarker(); @@ -95,7 +95,7 @@ static Set collectFamilies(List cells) { return Collections.emptySet(); } else { Set set = new TreeSet<>(Bytes.BYTES_COMPARATOR); - for (Cell cell: cells) { + for (Cell cell : cells) { if (!WALEdit.isMetaEditFamily(cell)) { set.add(CellUtil.cloneFamily(cell)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java index 671147208f1b..8e3a941ef897 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.TableName; @@ -32,8 +29,8 @@ import org.slf4j.LoggerFactory; /** - * Class used to push numbers about the WAL into the metrics subsystem. This will take a - * single function call and turn it into multiple manipulations of the hadoop metrics system. + * Class used to push numbers about the WAL into the metrics subsystem. This will take a single + * function call and turn it into multiple manipulations of the hadoop metrics system. */ @InterfaceAudience.Private public class MetricsWAL implements WALActionsListener { @@ -51,7 +48,7 @@ public MetricsWAL() { @Override public void postSync(final long timeInNanos, final int handlerSyncs) { - source.incrementSyncTime(timeInNanos/1000000L); + source.incrementSyncTime(timeInNanos / 1000000L); } @Override @@ -66,9 +63,7 @@ public void postAppend(final long size, final long time, final WALKey logkey, if (time > 1000) { source.incrementSlowAppendCount(); LOG.warn(String.format("%s took %d ms appending an edit to wal; len~=%s", - Thread.currentThread().getName(), - time, - StringUtils.humanReadableInt(size))); + Thread.currentThread().getName(), time, StringUtils.humanReadableInt(size))); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index 8aba943d0fba..e939cb145a77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.EOFException; @@ -25,23 +23,16 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader.Builder; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,6 +40,12 @@ import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader.Builder; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey; +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALTrailer; + /** * A Protobuf based WAL has the following structure: *

          @@ -56,12 +53,12 @@ * <TrailerSize> <PB_WAL_COMPLETE_MAGIC> *

          * The Reader reads meta information (WAL Compression state, WALTrailer, etc) in - * ProtobufLogReader#initReader(FSDataInputStream). A WALTrailer is an extensible structure - * which is appended at the end of the WAL. This is empty for now; it can contain some meta - * information such as Region level stats, etc in future. + * ProtobufLogReader#initReader(FSDataInputStream). A WALTrailer is an extensible structure which is + * appended at the end of the WAL. This is empty for now; it can contain some meta information such + * as Region level stats, etc in future. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, - HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, + HBaseInterfaceAudience.CONFIG }) public class ProtobufLogReader extends ReaderBase { private static final Logger LOG = LoggerFactory.getLogger(ProtobufLogReader.class); // public for WALFactory until we move everything to o.a.h.h.wal @@ -97,7 +94,7 @@ public class ProtobufLogReader extends ReaderBase { writerClsNames.add(ProtobufLogWriter.class.getSimpleName()); writerClsNames.add(AsyncProtobufLogWriter.class.getSimpleName()); } - + // cell codec classname private String codecClsName = null; @@ -105,12 +102,13 @@ public class ProtobufLogReader extends ReaderBase { public long trailerSize() { if (trailerPresent) { // sizeof PB_WAL_COMPLETE_MAGIC + sizof trailerSize + trailer - final long calculatedSize = (long) PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT - + trailer.getSerializedSize(); + final long calculatedSize = + (long) PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT + trailer.getSerializedSize(); final long expectedSize = fileLength - walEditsStopOffset; if (expectedSize != calculatedSize) { LOG.warn("After parsing the trailer, we expect the total footer to be {} bytes, but we " - + "calculate it as being {}", expectedSize, calculatedSize); + + "calculate it as being {}", + expectedSize, calculatedSize); } return expectedSize; } else { @@ -119,23 +117,24 @@ public long trailerSize() { } enum WALHdrResult { - EOF, // stream is at EOF when method starts - SUCCESS, - UNKNOWN_WRITER_CLS // name of writer class isn't recognized + EOF, // stream is at EOF when method starts + SUCCESS, UNKNOWN_WRITER_CLS // name of writer class isn't recognized } - + // context for WALHdr carrying information such as Cell Codec classname static class WALHdrContext { WALHdrResult result; String cellCodecClsName; - + WALHdrContext(WALHdrResult result, String cellCodecClsName) { this.result = result; this.cellCodecClsName = cellCodecClsName; } + WALHdrResult getResult() { return result; } + String getCellCodecClsName() { return cellCodecClsName; } @@ -182,31 +181,28 @@ protected String initReader(FSDataInputStream stream) throws IOException { public List getWriterClsNames() { return writerClsNames; } - + /* * Returns the cell codec classname */ public String getCodecClsName() { - return codecClsName; + return codecClsName; } - protected WALHdrContext readHeader(Builder builder, FSDataInputStream stream) - throws IOException { - boolean res = builder.mergeDelimitedFrom(stream); - if (!res) return new WALHdrContext(WALHdrResult.EOF, null); - if (builder.hasWriterClsName() && - !getWriterClsNames().contains(builder.getWriterClsName())) { - return new WALHdrContext(WALHdrResult.UNKNOWN_WRITER_CLS, null); - } - String clsName = null; - if (builder.hasCellCodecClsName()) { - clsName = builder.getCellCodecClsName(); - } - return new WALHdrContext(WALHdrResult.SUCCESS, clsName); + protected WALHdrContext readHeader(Builder builder, FSDataInputStream stream) throws IOException { + boolean res = builder.mergeDelimitedFrom(stream); + if (!res) return new WALHdrContext(WALHdrResult.EOF, null); + if (builder.hasWriterClsName() && !getWriterClsNames().contains(builder.getWriterClsName())) { + return new WALHdrContext(WALHdrResult.UNKNOWN_WRITER_CLS, null); + } + String clsName = null; + if (builder.hasCellCodecClsName()) { + clsName = builder.getCellCodecClsName(); + } + return new WALHdrContext(WALHdrResult.SUCCESS, clsName); } - private String initInternal(FSDataInputStream stream, boolean isFirst) - throws IOException { + private String initInternal(FSDataInputStream stream, boolean isFirst) throws IOException { close(); if (!isFirst) { // Re-compute the file length. @@ -234,12 +230,11 @@ private String initInternal(FSDataInputStream stream, boolean isFirst) WALProtos.WALHeader header = builder.build(); this.hasCompression = header.hasHasCompression() && header.getHasCompression(); this.hasTagCompression = header.hasHasTagCompression() && header.getHasTagCompression(); - this.hasValueCompression = header.hasHasValueCompression() && - header.getHasValueCompression(); + this.hasValueCompression = header.hasHasValueCompression() && header.getHasValueCompression(); if (header.hasValueCompressionAlgorithm()) { try { this.valueCompressionType = - Compression.Algorithm.values()[header.getValueCompressionAlgorithm()]; + Compression.Algorithm.values()[header.getValueCompressionAlgorithm()]; } catch (ArrayIndexOutOfBoundsException e) { throw new IOException("Invalid compression type", e); } @@ -252,13 +247,13 @@ private String initInternal(FSDataInputStream stream, boolean isFirst) this.seekOnFs(currentPosition); if (LOG.isTraceEnabled()) { LOG.trace("After reading the trailer: walEditsStopOffset: " + this.walEditsStopOffset - + ", fileLength: " + this.fileLength + ", " + "trailerPresent: " + - (trailerPresent ? "true, size: " + trailer.getSerializedSize() : "false") + - ", currentPosition: " + currentPosition); + + ", fileLength: " + this.fileLength + ", " + "trailerPresent: " + + (trailerPresent ? "true, size: " + trailer.getSerializedSize() : "false") + + ", currentPosition: " + currentPosition); } - + codecClsName = hdrCtxt.getCellCodecClsName(); - + return hdrCtxt.getCellCodecClsName(); } @@ -298,7 +293,7 @@ private boolean setTrailerIfPresent() { } else if (trailerSize > this.trailerWarnSize) { // continue reading after warning the user. LOG.warn("Please investigate WALTrailer usage. Trailer size > maximum configured size : " - + trailerSize + " > " + this.trailerWarnSize); + + trailerSize + " > " + this.trailerWarnSize); } // seek to the position where trailer starts. long positionOfTrailer = trailerSizeOffset - trailerSize; @@ -324,7 +319,7 @@ protected WALCellCodec getCodec(Configuration conf, String cellCodecClsName, protected void initAfterCompression() throws IOException { initAfterCompression(null); } - + @Override protected void initAfterCompression(String cellCodecClsName) throws IOException { WALCellCodec codec = getCodec(this.conf, cellCodecClsName, this.compressionContext); @@ -375,28 +370,25 @@ protected boolean readNext(Entry entry) throws IOException { throw new EOFException(); } size = CodedInputStream.readRawVarint32(firstByte, this.inputStream); - // available may be < 0 on local fs for instance. If so, can't depend on it. + // available may be < 0 on local fs for instance. If so, can't depend on it. available = this.inputStream.available(); if (available > 0 && available < size) { - throw new EOFException( - "Available stream not enough for edit, " + "inputStream.available()= " - + this.inputStream.available() + ", " + "entry size= " + size + " at offset = " - + this.inputStream.getPos()); + throw new EOFException("Available stream not enough for edit, " + + "inputStream.available()= " + this.inputStream.available() + ", " + "entry size= " + + size + " at offset = " + this.inputStream.getPos()); } ProtobufUtil.mergeFrom(builder, ByteStreams.limit(this.inputStream, size), (int) size); } catch (InvalidProtocolBufferException ipbe) { resetPosition = true; - throw (EOFException) new EOFException( - "Invalid PB, EOF? Ignoring; originalPosition=" + originalPosition + ", currentPosition=" - + this.inputStream.getPos() + ", messageSize=" + size + ", currentAvailable=" - + available).initCause(ipbe); + throw (EOFException) new EOFException("Invalid PB, EOF? Ignoring; originalPosition=" + + originalPosition + ", currentPosition=" + this.inputStream.getPos() + ", messageSize=" + + size + ", currentAvailable=" + available).initCause(ipbe); } if (!builder.isInitialized()) { // TODO: not clear if we should try to recover from corrupt PB that looks semi-legit. - // If we can get the KV count, we could, theoretically, try to get next record. - throw new EOFException( - "Partial PB while reading WAL, " + "probably an unexpected EOF, ignoring. current offset=" - + this.inputStream.getPos()); + // If we can get the KV count, we could, theoretically, try to get next record. + throw new EOFException("Partial PB while reading WAL, " + + "probably an unexpected EOF, ignoring. current offset=" + this.inputStream.getPos()); } WALKey walKey = builder.build(); entry.getKey().readFieldsFromPb(walKey, this.byteStringUncompressor); @@ -421,31 +413,33 @@ protected boolean readNext(Entry entry) throws IOException { } catch (Throwable t) { LOG.trace("Error getting pos for error message - ignoring", t); } - String message = - " while reading " + expectedCells + " WAL KVs; started reading at " + posBefore - + " and read up to " + posAfterStr; + String message = " while reading " + expectedCells + " WAL KVs; started reading at " + + posBefore + " and read up to " + posAfterStr; IOException realEofEx = extractHiddenEof(ex); - throw (EOFException) new EOFException("EOF " + message). - initCause(realEofEx != null ? realEofEx : ex); + throw (EOFException) new EOFException("EOF " + message) + .initCause(realEofEx != null ? realEofEx : ex); } if (trailerPresent && this.inputStream.getPos() > this.walEditsStopOffset) { LOG.error( "Read WALTrailer while reading WALEdits. wal: " + this.path + ", inputStream.getPos(): " - + this.inputStream.getPos() + ", walEditsStopOffset: " + this.walEditsStopOffset); + + this.inputStream.getPos() + ", walEditsStopOffset: " + this.walEditsStopOffset); throw new EOFException("Read WALTrailer while reading WALEdits"); } } catch (EOFException eof) { // If originalPosition is < 0, it is rubbish and we cannot use it (probably local fs) if (originalPosition < 0) { - LOG.debug("Encountered a malformed edit, but can't seek back to last good position " - + "because originalPosition is negative. last offset={}", this.inputStream.getPos(), eof); + LOG.debug( + "Encountered a malformed edit, but can't seek back to last good position " + + "because originalPosition is negative. last offset={}", + this.inputStream.getPos(), eof); throw eof; } // If stuck at the same place and we got an exception, lets go back at the beginning. if (inputStream.getPos() == originalPosition) { if (resetPosition) { LOG.debug("Encountered a malformed edit, seeking to the beginning of the WAL since " - + "current position and original position match at {}", originalPosition); + + "current position and original position match at {}", + originalPosition); seekOnFs(0); } else { LOG.debug("EOF at position {}", originalPosition); @@ -454,7 +448,8 @@ protected boolean readNext(Entry entry) throws IOException { // Else restore our position to original location in hope that next time through we will // read successfully. LOG.debug("Encountered a malformed edit, seeking back to last good position in file, " - + "from {} to {}", inputStream.getPos(), originalPosition, eof); + + "from {} to {}", + inputStream.getPos(), originalPosition, eof); seekOnFs(originalPosition); } return false; @@ -467,13 +462,13 @@ private IOException extractHiddenEof(Exception ex) { // for EOF, not EOFException; and scanner further hides it inside RuntimeException. IOException ioEx = null; if (ex instanceof EOFException) { - return (EOFException)ex; + return (EOFException) ex; } else if (ex instanceof IOException) { - ioEx = (IOException)ex; - } else if (ex instanceof RuntimeException - && ex.getCause() != null && ex.getCause() instanceof IOException) { - ioEx = (IOException)ex.getCause(); - } + ioEx = (IOException) ex; + } else if (ex instanceof RuntimeException && ex.getCause() != null + && ex.getCause() instanceof IOException) { + ioEx = (IOException) ex.getCause(); + } if ((ioEx != null) && (ioEx.getMessage() != null)) { if (ioEx.getMessage().contains("EOF")) return ioEx; return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java index dd586b3e0a96..5c0351e1c8a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,7 @@ * Writer for protobuf-based WAL. */ @InterfaceAudience.Private -public class ProtobufLogWriter extends AbstractProtobufLogWriter - implements FSHLogProvider.Writer { +public class ProtobufLogWriter extends AbstractProtobufLogWriter implements FSHLogProvider.Writer { private static final Logger LOG = LoggerFactory.getLogger(ProtobufLogWriter.class); @@ -55,8 +54,8 @@ public class ProtobufLogWriter extends AbstractProtobufLogWriter @Override public void append(Entry entry) throws IOException { - entry.getKey().getBuilder(compressor). - setFollowingKvCount(entry.getEdit().size()).build().writeDelimitedTo(output); + entry.getKey().getBuilder(compressor).setFollowingKvCount(entry.getEdit().size()).build() + .writeDelimitedTo(output); for (Cell cell : entry.getEdit().getCells()) { // cellEncoder must assume little about the stream, since we write PB and cells in turn. cellEncoder.write(cell); @@ -106,17 +105,13 @@ public FSDataOutputStream getStream() { @Override protected void initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize, - short replication, long blockSize, StreamSlowMonitor monitor) throws IOException, - StreamLacksCapabilityException { - FSDataOutputStreamBuilder builder = fs - .createFile(path) - .overwrite(overwritable) - .bufferSize(bufferSize) - .replication(replication) - .blockSize(blockSize); + short replication, long blockSize, StreamSlowMonitor monitor) + throws IOException, StreamLacksCapabilityException { + FSDataOutputStreamBuilder builder = fs.createFile(path).overwrite(overwritable) + .bufferSize(bufferSize).replication(replication).blockSize(blockSize); if (builder instanceof DistributedFileSystem.HdfsDataOutputStreamBuilder) { - this.output = ((DistributedFileSystem.HdfsDataOutputStreamBuilder) builder) - .replicate().build(); + this.output = + ((DistributedFileSystem.HdfsDataOutputStreamBuilder) builder).replicate().build(); } else { this.output = builder.build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java index 90a1653a5140..1f21e2ec6be7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; @@ -35,7 +33,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) public abstract class ReaderBase implements AbstractFSWALProvider.Reader { private static final Logger LOG = LoggerFactory.getLogger(ReaderBase.class); protected Configuration conf; @@ -44,7 +42,7 @@ public abstract class ReaderBase implements AbstractFSWALProvider.Reader { protected long edit = 0; protected long fileLength; /** - * Compression context to use reading. Can be null if no compression. + * Compression context to use reading. Can be null if no compression. */ protected CompressionContext compressionContext = null; protected boolean emptyCompressionContext = true; @@ -70,14 +68,15 @@ public void init(FileSystem fs, Path path, Configuration conf, FSDataInputStream try { if (compressionContext == null) { if (LOG.isDebugEnabled()) { - LOG.debug("Initializing compression context for {}: isRecoveredEdits={}" + - ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", path, - CommonFSUtils.isRecoveredEdits(path), hasTagCompression(), hasValueCompression(), - getValueCompressionAlgorithm()); + LOG.debug( + "Initializing compression context for {}: isRecoveredEdits={}" + + ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", + path, CommonFSUtils.isRecoveredEdits(path), hasTagCompression(), + hasValueCompression(), getValueCompressionAlgorithm()); } - compressionContext = new CompressionContext(LRUDictionary.class, - CommonFSUtils.isRecoveredEdits(path), hasTagCompression(), - hasValueCompression(), getValueCompressionAlgorithm()); + compressionContext = + new CompressionContext(LRUDictionary.class, CommonFSUtils.isRecoveredEdits(path), + hasTagCompression(), hasValueCompression(), getValueCompressionAlgorithm()); } else { compressionContext.clear(); } @@ -109,8 +108,7 @@ public Entry next(Entry reuse) throws IOException { // It is old ROOT table edit, ignore it LOG.info("Got an old ROOT edit, ignoring "); return next(e); - } - else throw iae; + } else throw iae; } edit++; if (compressionContext != null && emptyCompressionContext) { @@ -133,8 +131,8 @@ public void seek(long pos) throws IOException { } /** - * Initializes the log reader with a particular stream (may be null). - * Reader assumes ownership of the stream if not null and may use it. Called once. + * Initializes the log reader with a particular stream (may be null). Reader assumes ownership of + * the stream if not null and may use it. Called once. * @return the class name of cell Codec, null if such information is not available */ protected abstract String initReader(FSDataInputStream stream) throws IOException; @@ -149,6 +147,7 @@ public void seek(long pos) throws IOException { * @param cellCodecClsName class name of cell Codec */ protected abstract void initAfterCompression(String cellCodecClsName) throws IOException; + /** * @return Whether compression is enabled for this log. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java index dfef429455cb..46ba6b5730c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java index e2d294ac1f23..0843548b3113 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureAsyncProtobufLogWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.crypto.Encryptor; @@ -26,6 +25,7 @@ import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -40,12 +40,13 @@ public SecureAsyncProtobufLogWriter(EventLoopGroup eventLoopGroup, /* * @return class name which is recognized by hbase-1.x to avoid ProtobufLogReader throwing error: - * IOException: Got unknown writer class: SecureAsyncProtobufLogWriter + * IOException: Got unknown writer class: SecureAsyncProtobufLogWriter */ @Override protected String getWriterClassName() { return "SecureProtobufLogWriter"; } + @Override protected WALHeader buildWALHeader(Configuration conf, WALHeader.Builder builder) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java index e43d140826c0..cc21a6fe2875 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; @@ -23,20 +22,20 @@ import java.security.KeyException; import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Decryptor; import org.apache.hadoop.hbase.io.crypto.Encryption; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.EncryptionTest; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class SecureProtobufLogReader extends ProtobufLogReader { @@ -89,8 +88,8 @@ protected WALHdrContext readHeader(WALHeader.Builder builder, FSDataInputStream } } if (key == null) { - String masterKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()); + String masterKeyName = + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()); try { // Then, try the cluster master key key = EncryptionUtil.unwrapWALKey(conf, masterKeyName, keyBytes); @@ -100,8 +99,7 @@ protected WALHdrContext readHeader(WALHeader.Builder builder, FSDataInputStream if (LOG.isDebugEnabled()) { LOG.debug("Unable to unwrap key with current master key '" + masterKeyName + "'"); } - String alternateKeyName = - conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); + String alternateKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); if (alternateKeyName != null) { try { key = EncryptionUtil.unwrapWALKey(conf, alternateKeyName, keyBytes); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java index eb8c591a15e2..e6ad5f85e0db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.crypto.Encryptor; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALHeader; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java index 6d2bd61a0234..4201dd07533d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java @@ -22,14 +22,12 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.commons.io.IOUtils; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags; import org.apache.hadoop.hbase.io.ByteBufferWriterOutputStream; import org.apache.hadoop.hbase.io.crypto.Decryptor; @@ -37,6 +35,7 @@ import org.apache.hadoop.hbase.io.crypto.Encryptor; import org.apache.hadoop.hbase.io.util.StreamUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A WALCellCodec that encrypts the WALedits. @@ -91,8 +90,7 @@ protected Cell parseCell() throws IOException { // encoder supports that just read the remainder in directly if (ivLength != this.iv.length) { - throw new IOException("Incorrect IV length: expected=" + iv.length + " have=" + - ivLength); + throw new IOException("Incorrect IV length: expected=" + iv.length + " have=" + ivLength); } IOUtils.readFully(in, this.iv); @@ -124,12 +122,12 @@ protected Cell parseCell() throws IOException { // Row int elemLen = StreamUtils.readRawVarint32(cin); - pos = Bytes.putShort(backingArray, pos, (short)elemLen); + pos = Bytes.putShort(backingArray, pos, (short) elemLen); IOUtils.readFully(cin, backingArray, pos, elemLen); pos += elemLen; // Family elemLen = StreamUtils.readRawVarint32(cin); - pos = Bytes.putByte(backingArray, pos, (byte)elemLen); + pos = Bytes.putByte(backingArray, pos, (byte) elemLen); IOUtils.readFully(cin, backingArray, pos, elemLen); pos += elemLen; // Qualifier diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 6be95391819b..3232c07b02f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,41 +55,42 @@ class SequenceIdAccounting { /** * This lock ties all operations on {@link SequenceIdAccounting#flushingSequenceIds} and - * {@link #lowestUnflushedSequenceIds} Maps. {@link #lowestUnflushedSequenceIds} has the - * lowest outstanding sequence ids EXCEPT when flushing. When we flush, the current - * lowest set for the region/column family are moved (atomically because of this lock) to + * {@link #lowestUnflushedSequenceIds} Maps. {@link #lowestUnflushedSequenceIds} has the lowest + * outstanding sequence ids EXCEPT when flushing. When we flush, the current lowest set for the + * region/column family are moved (atomically because of this lock) to * {@link #flushingSequenceIds}. - * - *

          The two Maps are tied by this locking object EXCEPT when we go to update the lowest - * entry; see {@link #lowestUnflushedSequenceIds}. In here is a putIfAbsent call on - * {@link #lowestUnflushedSequenceIds}. In this latter case, we will add this lowest - * sequence id if we find that there is no entry for the current column family. There will be no - * entry only if we just came up OR we have moved aside current set of lowest sequence ids - * because the current set are being flushed (by putting them into {@link #flushingSequenceIds}). - * This is how we pick up the next 'lowest' sequence id per region per column family to be used - * figuring what is in the next flush. + *

          + * The two Maps are tied by this locking object EXCEPT when we go to update the lowest entry; see + * {@link #lowestUnflushedSequenceIds}. In here is a putIfAbsent call on + * {@link #lowestUnflushedSequenceIds}. In this latter case, we will add this lowest sequence id + * if we find that there is no entry for the current column family. There will be no entry only if + * we just came up OR we have moved aside current set of lowest sequence ids because the current + * set are being flushed (by putting them into {@link #flushingSequenceIds}). This is how we pick + * up the next 'lowest' sequence id per region per column family to be used figuring what is in + * the next flush. */ private final Object tieLock = new Object(); /** - * Map of encoded region names and family names to their OLDEST -- i.e. their first, - * the longest-lived, their 'earliest', the 'lowest' -- sequence id. - * - *

          When we flush, the current lowest sequence ids get cleared and added to - * {@link #flushingSequenceIds}. The next append that comes in, is then added - * here to {@link #lowestUnflushedSequenceIds} as the next lowest sequenceid. - * - *

          If flush fails, currently server is aborted so no need to restore previous sequence ids. - *

          Needs to be concurrent Maps because we use putIfAbsent updating oldest. + * Map of encoded region names and family names to their OLDEST -- i.e. their first, the + * longest-lived, their 'earliest', the 'lowest' -- sequence id. + *

          + * When we flush, the current lowest sequence ids get cleared and added to + * {@link #flushingSequenceIds}. The next append that comes in, is then added here to + * {@link #lowestUnflushedSequenceIds} as the next lowest sequenceid. + *

          + * If flush fails, currently server is aborted so no need to restore previous sequence ids. + *

          + * Needs to be concurrent Maps because we use putIfAbsent updating oldest. */ - private final ConcurrentMap> - lowestUnflushedSequenceIds = new ConcurrentHashMap<>(); + private final ConcurrentMap> lowestUnflushedSequenceIds = + new ConcurrentHashMap<>(); /** * Map of encoded region names and family names to their lowest or OLDEST sequence/edit id * currently being flushed out to hfiles. Entries are moved here from - * {@link #lowestUnflushedSequenceIds} while the lock {@link #tieLock} is held - * (so movement between the Maps is atomic). + * {@link #lowestUnflushedSequenceIds} while the lock {@link #tieLock} is held (so movement + * between the Maps is atomic). */ private final Map> flushingSequenceIds = new HashMap<>(); @@ -108,8 +109,8 @@ class SequenceIdAccounting { /** * Returns the lowest unflushed sequence id for the region. - * @return Lowest outstanding unflushed sequenceid for encodedRegionName. Will - * return {@link HConstants#NO_SEQNUM} when none. + * @return Lowest outstanding unflushed sequenceid for encodedRegionName. Will return + * {@link HConstants#NO_SEQNUM} when none. */ long getLowestSequenceId(final byte[] encodedRegionName) { synchronized (this.tieLock) { @@ -150,7 +151,7 @@ long getLowestSequenceId(final byte[] encodedRegionName, final byte[] familyName /** * Reset the accounting of highest sequenceid by regionname. * @return Return the previous accounting Map of regions to the last sequence id written into - * each. + * each. */ Map resetHighest() { Map old = this.highestSequenceIds; @@ -160,8 +161,8 @@ Map resetHighest() { /** * We've been passed a new sequenceid for the region. Set it as highest seen for this region and - * if we are to record oldest, or lowest sequenceids, save it as oldest seen if nothing - * currently older. + * if we are to record oldest, or lowest sequenceids, save it as oldest seen if nothing currently + * older. * @param encodedRegionName * @param families * @param sequenceid @@ -197,7 +198,7 @@ void onRegionClose(byte[] encodedRegionName) { LOG.warn("Still have flushing records when closing {}, {}", Bytes.toString(encodedRegionName), flushing.entrySet().stream().map(e -> e.getKey().toString() + "->" + e.getValue()) - .collect(Collectors.joining(",", "{", "}"))); + .collect(Collectors.joining(",", "{", "}"))); } } this.highestSequenceIds.remove(encodedRegionName); @@ -250,8 +251,8 @@ ConcurrentMap getOrCreateLowestSequenceIds(byte[] enco */ private static long getLowestSequenceId(Map sequenceids) { long lowest = HConstants.NO_SEQNUM; - for (Map.Entry entry : sequenceids.entrySet()){ - if (entry.getKey().toString().equals("METAFAMILY")){ + for (Map.Entry entry : sequenceids.entrySet()) { + if (entry.getKey().toString().equals("METAFAMILY")) { continue; } Long sid = entry.getValue(); @@ -284,18 +285,18 @@ private > Map flattenToLowestSequenceId(Map /** * @param encodedRegionName Region to flush. * @param families Families to flush. May be a subset of all families in the region. - * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if - * we are flushing a subset of all families but there are no edits in those families not - * being flushed; in other words, this is effectively same as a flush of all of the region - * though we were passed a subset of regions. Otherwise, it returns the sequence id of the - * oldest/lowest outstanding edit. + * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if we are + * flushing a subset of all families but there are no edits in those families not being + * flushed; in other words, this is effectively same as a flush of all of the region + * though we were passed a subset of regions. Otherwise, it returns the sequence id of the + * oldest/lowest outstanding edit. */ Long startCacheFlush(final byte[] encodedRegionName, final Set families) { - Map familytoSeq = new HashMap<>(); - for (byte[] familyName : families){ - familytoSeq.put(familyName,HConstants.NO_SEQNUM); + Map familytoSeq = new HashMap<>(); + for (byte[] familyName : families) { + familytoSeq.put(familyName, HConstants.NO_SEQNUM); } - return startCacheFlush(encodedRegionName,familytoSeq); + return startCacheFlush(encodedRegionName, familytoSeq); } Long startCacheFlush(final byte[] encodedRegionName, final Map familyToSeq) { @@ -311,7 +312,7 @@ Long startCacheFlush(final byte[] encodedRegionName, final Map fam for (Map.Entry entry : familyToSeq.entrySet()) { ImmutableByteArray familyNameWrapper = ImmutableByteArray.wrap((byte[]) entry.getKey()); Long seqId = null; - if(entry.getValue() == HConstants.NO_SEQNUM) { + if (entry.getValue() == HConstants.NO_SEQNUM) { seqId = m.remove(familyNameWrapper); } else { seqId = m.replace(familyNameWrapper, entry.getValue()); @@ -325,8 +326,8 @@ Long startCacheFlush(final byte[] encodedRegionName, final Map fam } if (oldSequenceIds != null && !oldSequenceIds.isEmpty()) { if (this.flushingSequenceIds.put(encodedRegionName, oldSequenceIds) != null) { - LOG.warn("Flushing Map not cleaned up for " + Bytes.toString(encodedRegionName) + - ", sequenceid=" + oldSequenceIds); + LOG.warn("Flushing Map not cleaned up for " + Bytes.toString(encodedRegionName) + + ", sequenceid=" + oldSequenceIds); } } if (m.isEmpty()) { @@ -398,7 +399,7 @@ void abortCacheFlush(final byte[] encodedRegionName) { flushing = this.flushingSequenceIds.remove(encodedRegionName); if (flushing != null) { Map unflushed = getOrCreateLowestSequenceIds(encodedRegionName); - for (Map.Entry e: flushing.entrySet()) { + for (Map.Entry e : flushing.entrySet()) { // Set into unflushed the 'old' oldest sequenceid and if any value in flushed with this // value, it will now be in tmpMap. tmpMap.put(e.getKey(), unflushed.put(e.getKey(), e.getValue())); @@ -412,9 +413,9 @@ void abortCacheFlush(final byte[] encodedRegionName) { for (Map.Entry e : flushing.entrySet()) { Long currentId = tmpMap.get(e.getKey()); if (currentId != null && currentId.longValue() < e.getValue().longValue()) { - String errorStr = Bytes.toString(encodedRegionName) + " family " - + e.getKey().toString() + " acquired edits out of order current memstore seq=" - + currentId + ", previous oldest unflushed id=" + e.getValue(); + String errorStr = Bytes.toString(encodedRegionName) + " family " + e.getKey().toString() + + " acquired edits out of order current memstore seq=" + currentId + + ", previous oldest unflushed id=" + e.getValue(); LOG.error(errorStr); Runtime.getRuntime().halt(1); } @@ -465,9 +466,8 @@ boolean areAllLower(Map sequenceids, Collection keysBlocki /** * Iterates over the given Map and compares sequence ids with corresponding entries in - * {@link #lowestUnflushedSequenceIds}. If a region in - * {@link #lowestUnflushedSequenceIds} has a sequence id less than that passed in - * sequenceids then return it. + * {@link #lowestUnflushedSequenceIds}. If a region in {@link #lowestUnflushedSequenceIds} has a + * sequence id less than that passed in sequenceids then return it. * @param sequenceids Sequenceids keyed by encoded region name. * @return stores of regions found in this instance with sequence ids less than those passed in. */ @@ -486,7 +486,7 @@ Map> findLower(Map sequenceids) { toFlush = new TreeMap(Bytes.BYTES_COMPARATOR); } toFlush.computeIfAbsent(e.getKey(), k -> new ArrayList<>()) - .add(Bytes.toBytes(me.getKey().toString())); + .add(Bytes.toBytes(me.getKey().toString())); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java index 862e91826b5f..75c8bb92699a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java @@ -93,7 +93,6 @@ class SyncFuture { /** * Call this method to clear old usage and get it ready for new deploy. - * * @param txid the new transaction id * @return this */ @@ -114,8 +113,8 @@ SyncFuture reset(long txid, boolean forceSync) { @Override public String toString() { - return "done=" + isDone() + ", txid=" + this.txid + " threadID=" + t.getId() + - " threadName=" + t.getName(); + return "done=" + isDone() + ", txid=" + this.txid + " threadID=" + t.getId() + " threadName=" + + t.getName(); } long getTxid() { @@ -164,15 +163,14 @@ boolean done(final long txid, final Throwable t) { } } - long get(long timeoutNs) throws InterruptedException, - ExecutionException, TimeoutIOException { + long get(long timeoutNs) throws InterruptedException, ExecutionException, TimeoutIOException { doneLock.lock(); try { while (doneTxid == NOT_DONE) { if (!doneCondition.await(timeoutNs, TimeUnit.NANOSECONDS)) { - throw new TimeoutIOException("Failed to get sync result after " - + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + " ms for txid=" + this.txid - + ", WAL system stuck?"); + throw new TimeoutIOException( + "Failed to get sync result after " + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + + " ms for txid=" + this.txid + ", WAL system stuck?"); } } if (this.throwable != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java index de3188f08976..74fbefdae475 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFutureCache.java @@ -21,22 +21,18 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.cache.Cache; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; /** - * A cache of {@link SyncFuture}s. This class supports two methods - * {@link SyncFutureCache#getIfPresentOrNew()} and {@link SyncFutureCache#offer()}. - * - * Usage pattern: - * SyncFuture sf = syncFutureCache.getIfPresentOrNew(); - * sf.reset(...); - * // Use the sync future - * finally: syncFutureCache.offer(sf); - * - * Offering the sync future back to the cache makes it eligible for reuse within the same thread - * context. Cache keyed by the accessing thread instance and automatically invalidated if it remains - * unused for {@link SyncFutureCache#SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS} minutes. + * A cache of {@link SyncFuture}s. This class supports two methods + * {@link SyncFutureCache#getIfPresentOrNew()} and {@link SyncFutureCache#offer()}. Usage pattern: + * SyncFuture sf = syncFutureCache.getIfPresentOrNew(); sf.reset(...); // Use the sync future + * finally: syncFutureCache.offer(sf); Offering the sync future back to the cache makes it eligible + * for reuse within the same thread context. Cache keyed by the accessing thread instance and + * automatically invalidated if it remains unused for + * {@link SyncFutureCache#SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS} minutes. */ @InterfaceAudience.Private public final class SyncFutureCache { @@ -47,7 +43,7 @@ public final class SyncFutureCache { public SyncFutureCache(final Configuration conf) { final int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, - HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); + HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); syncFutureCache = CacheBuilder.newBuilder().initialCapacity(handlerCount) .expireAfterWrite(SYNC_FUTURE_INVALIDATION_TIMEOUT_MINS, TimeUnit.MINUTES).build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java index c109a1b4bdd0..7ea3b4a2ccc4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,8 +25,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Get notification of WAL events. The invocations are inline - * so make sure your implementation is fast else you'll slow hbase. + * Get notification of WAL events. The invocations are inline so make sure your implementation is + * fast else you'll slow hbase. */ @InterfaceAudience.Private public interface WALActionsListener { @@ -45,53 +44,60 @@ static enum RollRequestReason { }; /** - * The WAL is going to be rolled. The oldPath can be null if this is - * the first log file from the regionserver. + * The WAL is going to be rolled. The oldPath can be null if this is the first log file from the + * regionserver. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void preLogRoll(Path oldPath, Path newPath) throws IOException {} + default void preLogRoll(Path oldPath, Path newPath) throws IOException { + } /** - * The WAL has been rolled. The oldPath can be null if this is - * the first log file from the regionserver. + * The WAL has been rolled. The oldPath can be null if this is the first log file from the + * regionserver. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void postLogRoll(Path oldPath, Path newPath) throws IOException {} + default void postLogRoll(Path oldPath, Path newPath) throws IOException { + } /** * The WAL is going to be archived. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void preLogArchive(Path oldPath, Path newPath) throws IOException {} + default void preLogArchive(Path oldPath, Path newPath) throws IOException { + } /** * The WAL has been archived. * @param oldPath the path to the old wal * @param newPath the path to the new wal */ - default void postLogArchive(Path oldPath, Path newPath) throws IOException {} + default void postLogArchive(Path oldPath, Path newPath) throws IOException { + } /** * A request was made that the WAL be rolled. */ - default void logRollRequested(RollRequestReason reason) {} + default void logRollRequested(RollRequestReason reason) { + } /** * The WAL is about to close. */ - default void logCloseRequested() {} + default void logCloseRequested() { + } /** - * Called before each write. - */ - default void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) {} + * Called before each write. + */ + default void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) { + } /** - * For notification post append to the writer. Used by metrics system at least. - * TODO: Combine this with above. + * For notification post append to the writer. Used by metrics system at least. TODO: Combine this + * with above. * @param entryLen approx length of cells in this append. * @param elapsedTimeMillis elapsed time in milliseconds. * @param logKey A WAL key @@ -99,13 +105,14 @@ default void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit lo * @throws IOException if any network or I/O error occurred */ default void postAppend(final long entryLen, final long elapsedTimeMillis, final WALKey logKey, - final WALEdit logEdit) throws IOException {} + final WALEdit logEdit) throws IOException { + } /** - * For notification post writer sync. Used by metrics system at least. + * For notification post writer sync. Used by metrics system at least. * @param timeInNanos How long the filesystem sync took in nanoseconds. - * @param handlerSyncs How many sync handler calls were released by this call to filesystem - * sync. + * @param handlerSyncs How many sync handler calls were released by this call to filesystem sync. */ - default void postSync(final long timeInNanos, final int handlerSyncs) {} + default void postSync(final long timeInNanos, final int handlerSyncs) { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java index 31eccc7a18af..bf3a8fc4c94c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java @@ -21,14 +21,12 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.codec.BaseDecoder; import org.apache.hadoop.hbase.codec.BaseEncoder; import org.apache.hadoop.hbase.codec.Codec; @@ -43,20 +41,19 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.io.IOUtils; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; - /** - * Compression in this class is lifted off Compressor/KeyValueCompression. - * This is a pure coincidence... they are independent and don't have to be compatible. - * - * This codec is used at server side for writing cells to WAL as well as for sending edits - * as part of the distributed splitting process. + * Compression in this class is lifted off Compressor/KeyValueCompression. This is a pure + * coincidence... they are independent and don't have to be compatible. This codec is used at server + * side for writing cells to WAL as well as for sending edits as part of the distributed splitting + * process. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, - HBaseInterfaceAudience.PHOENIX, HBaseInterfaceAudience.CONFIG}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, + HBaseInterfaceAudience.CONFIG }) public class WALCellCodec implements Codec { /** Configuration key for the class to use when encoding cells in the WAL */ public static final String WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec"; @@ -87,9 +84,8 @@ public static Class getWALCellCodecClass(Configuration conf) { /** * Create and setup a {@link WALCellCodec} from the {@code cellCodecClsName} and - * CompressionContext, if {@code cellCodecClsName} is specified. - * Otherwise Cell Codec classname is read from {@link Configuration}. - * Fully prepares the codec for use. + * CompressionContext, if {@code cellCodecClsName} is specified. Otherwise Cell Codec classname is + * read from {@link Configuration}. Fully prepares the codec for use. * @param conf {@link Configuration} to read for the user-specified codec. If none is specified, * uses a {@link WALCellCodec}. * @param cellCodecClsName name of codec @@ -103,26 +99,26 @@ public static WALCellCodec create(Configuration conf, String cellCodecClsName, if (cellCodecClsName == null) { cellCodecClsName = getWALCellCodecClass(conf).getName(); } - return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[] - { Configuration.class, CompressionContext.class }, new Object[] { conf, compression }); + return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, + new Class[] { Configuration.class, CompressionContext.class }, + new Object[] { conf, compression }); } /** - * Create and setup a {@link WALCellCodec} from the - * CompressionContext. - * Cell Codec classname is read from {@link Configuration}. - * Fully prepares the codec for use. + * Create and setup a {@link WALCellCodec} from the CompressionContext. Cell Codec classname is + * read from {@link Configuration}. Fully prepares the codec for use. * @param conf {@link Configuration} to read for the user-specified codec. If none is specified, * uses a {@link WALCellCodec}. * @param compression compression the codec should use * @return a {@link WALCellCodec} ready for use. * @throws UnsupportedOperationException if the codec cannot be instantiated */ - public static WALCellCodec create(Configuration conf, - CompressionContext compression) throws UnsupportedOperationException { + public static WALCellCodec create(Configuration conf, CompressionContext compression) + throws UnsupportedOperationException { String cellCodecClsName = getWALCellCodecClass(conf).getName(); - return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[] - { Configuration.class, CompressionContext.class }, new Object[] { conf, compression }); + return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, + new Class[] { Configuration.class, CompressionContext.class }, + new Object[] { conf, compression }); } public interface ByteStringCompressor { @@ -152,6 +148,7 @@ static class BaosAndCompressor extends ByteArrayOutputStream implements ByteStri public BaosAndCompressor(CompressionContext compressionContext) { this.compressionContext = compressionContext; } + public ByteString toByteString() { // We need this copy to create the ByteString as the byte[] 'buf' is not immutable. We reuse // them. @@ -200,7 +197,7 @@ public byte[] uncompress(ByteString data, Enum dictIndex) { private static byte[] uncompressByteString(ByteString bs, Dictionary dict) throws IOException { InputStream in = bs.newInput(); - byte status = (byte)in.read(); + byte status = (byte) in.read(); if (status == Dictionary.NOT_IN_DICTIONARY) { byte[] arr = new byte[StreamUtils.readRawVarint32(in)]; int bytesRead = in.read(arr); @@ -211,7 +208,7 @@ private static byte[] uncompressByteString(ByteString bs, Dictionary dict) throw return arr; } else { // Status here is the higher-order byte of index of the dictionary entry. - short dictIdx = StreamUtils.toShort(status, (byte)in.read()); + short dictIdx = StreamUtils.toShort(status, (byte) in.read()); byte[] entry = dict.getEntry(dictIdx); if (entry == null) { throw new IOException("Missing dictionary entry for index " + dictIdx); @@ -224,6 +221,7 @@ static class CompressedKvEncoder extends BaseEncoder { private final CompressionContext compression; private final boolean hasValueCompression; private final boolean hasTagCompression; + public CompressedKvEncoder(OutputStream out, CompressionContext compression) { super(out); this.compression = compression; @@ -278,6 +276,7 @@ static class CompressedKvDecoder extends BaseDecoder { private final CompressionContext compression; private final boolean hasValueCompression; private final boolean hasTagCompression; + public CompressedKvDecoder(InputStream in, CompressionContext compression) { super(in); this.compression = compression; @@ -291,7 +290,7 @@ protected Cell parseCell() throws IOException { int vlength = StreamUtils.readRawVarint32(in); int tagsLength = StreamUtils.readRawVarint32(in); int length = 0; - if(tagsLength == 0) { + if (tagsLength == 0) { length = KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE + keylength + vlength; } else { length = KeyValue.KEYVALUE_WITH_TAGS_INFRASTRUCTURE_SIZE + keylength + vlength + tagsLength; @@ -306,14 +305,14 @@ protected Cell parseCell() throws IOException { int elemLen = readIntoArray(backingArray, pos + Bytes.SIZEOF_SHORT, compression.getDictionary(CompressionContext.DictionaryIndex.ROW)); checkLength(elemLen, Short.MAX_VALUE); - pos = Bytes.putShort(backingArray, pos, (short)elemLen); + pos = Bytes.putShort(backingArray, pos, (short) elemLen); pos += elemLen; // family elemLen = readIntoArray(backingArray, pos + Bytes.SIZEOF_BYTE, compression.getDictionary(CompressionContext.DictionaryIndex.FAMILY)); checkLength(elemLen, Byte.MAX_VALUE); - pos = Bytes.putByte(backingArray, pos, (byte)elemLen); + pos = Bytes.putByte(backingArray, pos, (byte) elemLen); pos += elemLen; // qualifier @@ -329,7 +328,7 @@ protected Cell parseCell() throws IOException { if (tagsLength > 0) { typeValLen = typeValLen - tagsLength - KeyValue.TAGS_LENGTH_SIZE; } - pos = Bytes.putByte(backingArray, pos, (byte)in.read()); + pos = Bytes.putByte(backingArray, pos, (byte) in.read()); int valLen = typeValLen - 1; if (hasValueCompression) { readCompressedValue(in, backingArray, pos, valLen); @@ -351,7 +350,7 @@ protected Cell parseCell() throws IOException { } private int readIntoArray(byte[] to, int offset, Dictionary dict) throws IOException { - byte status = (byte)in.read(); + byte status = (byte) in.read(); if (status == Dictionary.NOT_IN_DICTIONARY) { // status byte indicating that data to be read is not in dictionary. // if this isn't in the dictionary, we need to add to the dictionary. @@ -361,7 +360,7 @@ private int readIntoArray(byte[] to, int offset, Dictionary dict) throws IOExcep return length; } else { // the status byte also acts as the higher order byte of the dictionary entry. - short dictIdx = StreamUtils.toShort(status, (byte)in.read()); + short dictIdx = StreamUtils.toShort(status, (byte) in.read()); byte[] entry = dict.getEntry(dictIdx); if (entry == null) { throw new IOException("Missing dictionary entry for index " + dictIdx); @@ -381,8 +380,8 @@ private static void checkLength(int len, int max) throws IOException { private void readCompressedValue(InputStream in, byte[] outArray, int outOffset, int expectedLength) throws IOException { int compressedLen = StreamUtils.readRawVarint32(in); - int read = compression.getValueCompressor().decompress(in, compressedLen, outArray, - outOffset, expectedLength); + int read = compression.getValueCompressor().decompress(in, compressedLen, outArray, outOffset, + expectedLength); if (read != expectedLength) { throw new IOException("ValueCompressor state error: short read"); } @@ -394,6 +393,7 @@ public static class EnsureKvEncoder extends BaseEncoder { public EnsureKvEncoder(OutputStream out) { super(out); } + @Override public void write(Cell cell) throws IOException { checkFlushed(); @@ -405,8 +405,8 @@ public void write(Cell cell) throws IOException { @Override public Decoder getDecoder(InputStream is) { - return (compression == null) - ? new KeyValueCodecWithTags.KeyValueDecoder(is) : new CompressedKvDecoder(is, compression); + return (compression == null) ? new KeyValueCodecWithTags.KeyValueDecoder(is) + : new CompressedKvDecoder(is, compression); } @Override @@ -416,8 +416,7 @@ public Decoder getDecoder(ByteBuff buf) { @Override public Encoder getEncoder(OutputStream os) { - os = (os instanceof ByteBufferWriter) ? os - : new ByteBufferWriterOutputStream(os); + os = (os instanceof ByteBufferWriter) ? os : new ByteBufferWriterOutputStream(os); if (compression == null) { return new EnsureKvEncoder(os); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALClosedException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALClosedException.java index ac6aad0a3815..616edd79e8f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALClosedException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALClosedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.hbase.regionserver.LogRoller; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java index 40d6d0fc948a..c8fc4d25d23d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java @@ -1,6 +1,4 @@ - /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -9,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; import java.lang.reflect.InvocationTargetException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.RegionInfo; @@ -41,19 +37,18 @@ import org.slf4j.LoggerFactory; /** - * Implements the coprocessor environment and runtime support for coprocessors - * loaded within a {@link WAL}. + * Implements the coprocessor environment and runtime support for coprocessors loaded within a + * {@link WAL}. */ @InterfaceAudience.Private -public class WALCoprocessorHost - extends CoprocessorHost { +public class WALCoprocessorHost extends CoprocessorHost { private static final Logger LOG = LoggerFactory.getLogger(WALCoprocessorHost.class); /** * Encapsulation of the environment of each coprocessor */ static class WALEnvironment extends BaseEnvironment - implements WALCoprocessorEnvironment { + implements WALCoprocessorEnvironment { private final WAL wal; @@ -76,8 +71,8 @@ private WALEnvironment(final WALCoprocessor impl, final int priority, final int final Configuration conf, final WAL wal) { super(impl, priority, seq, conf); this.wal = wal; - this.metricRegistry = MetricsCoprocessor.createRegistryForWALCoprocessor( - impl.getClass().getName()); + this.metricRegistry = + MetricsCoprocessor.createRegistryForWALCoprocessor(impl.getClass().getName()); } @Override @@ -119,8 +114,8 @@ public WALEnvironment createEnvironment(final WALCoprocessor instance, final int } @Override - public WALCoprocessor checkAndGetInstance(Class implClass) throws IllegalAccessException, - InstantiationException { + public WALCoprocessor checkAndGetInstance(Class implClass) + throws IllegalAccessException, InstantiationException { if (WALCoprocessor.class.isAssignableFrom(implClass)) { try { return implClass.asSubclass(WALCoprocessor.class).getDeclaredConstructor().newInstance(); @@ -137,8 +132,7 @@ public WALCoprocessor checkAndGetInstance(Class implClass) throws IllegalAcce private ObserverGetter walObserverGetter = WALCoprocessor::getWALObserver; - abstract class WALObserverOperation extends - ObserverOperationWithoutResult { + abstract class WALObserverOperation extends ObserverOperationWithoutResult { public WALObserverOperation() { super(walObserverGetter); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java index 2076dd4fb35b..1028d827db41 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; @@ -49,9 +48,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor; /** - * Helper methods to ease Region Server integration with the Write Ahead Log (WAL). - * Note that methods in this class specifically should not require access to anything - * other than the API found in {@link WAL}. For internal use only. + * Helper methods to ease Region Server integration with the Write Ahead Log (WAL). Note that + * methods in this class specifically should not require access to anything other than the API found + * in {@link WAL}. For internal use only. */ @InterfaceAudience.Private public class WALUtil { @@ -73,10 +72,10 @@ private WALUtil() { * @param mvcc Used by WAL to get sequence Id for the waledit. */ public static WALKeyImpl writeCompactionMarker(WAL wal, - NavigableMap replicationScope, RegionInfo hri, final CompactionDescriptor c, - MultiVersionConcurrencyControl mvcc, RegionReplicationSink sink) throws IOException { + NavigableMap replicationScope, RegionInfo hri, final CompactionDescriptor c, + MultiVersionConcurrencyControl mvcc, RegionReplicationSink sink) throws IOException { WALKeyImpl walKey = - writeMarker(wal, replicationScope, hri, WALEdit.createCompaction(hri, c), mvcc, null, sink); + writeMarker(wal, replicationScope, hri, WALEdit.createCompaction(hri, c), mvcc, null, sink); if (LOG.isTraceEnabled()) { LOG.trace("Appended compaction marker " + TextFormat.shortDebugString(c)); } @@ -89,8 +88,8 @@ public static WALKeyImpl writeCompactionMarker(WAL wal, * This write is for internal use only. Not for external client consumption. */ public static WALKeyImpl writeFlushMarker(WAL wal, NavigableMap replicationScope, - RegionInfo hri, final FlushDescriptor f, boolean sync, MultiVersionConcurrencyControl mvcc, - RegionReplicationSink sink) throws IOException { + RegionInfo hri, final FlushDescriptor f, boolean sync, MultiVersionConcurrencyControl mvcc, + RegionReplicationSink sink) throws IOException { WALKeyImpl walKey = doFullMarkerAppendTransaction(wal, replicationScope, hri, WALEdit.createFlushWALEdit(hri, f), mvcc, null, sync, sink); if (LOG.isTraceEnabled()) { @@ -104,8 +103,8 @@ public static WALKeyImpl writeFlushMarker(WAL wal, NavigableMap * only. Not for external client consumption. */ public static WALKeyImpl writeRegionEventMarker(WAL wal, - NavigableMap replicationScope, RegionInfo hri, RegionEventDescriptor r, - MultiVersionConcurrencyControl mvcc, RegionReplicationSink sink) throws IOException { + NavigableMap replicationScope, RegionInfo hri, RegionEventDescriptor r, + MultiVersionConcurrencyControl mvcc, RegionReplicationSink sink) throws IOException { WALKeyImpl walKey = writeMarker(wal, replicationScope, hri, WALEdit.createRegionEventWALEdit(hri, r), mvcc, null, sink); if (LOG.isTraceEnabled()) { @@ -115,8 +114,8 @@ public static WALKeyImpl writeRegionEventMarker(WAL wal, } /** - * Write a log marker that a bulk load has succeeded and is about to be committed. - * This write is for internal use only. Not for external client consumption. + * Write a log marker that a bulk load has succeeded and is about to be committed. This write is + * for internal use only. Not for external client consumption. * @param wal The log to write into. * @param replicationScope The replication scope of the families in the HRegion * @param hri A description of the region in the table that we are bulk loading into. @@ -125,9 +124,9 @@ public static WALKeyImpl writeRegionEventMarker(WAL wal, * @throws IOException We will throw an IOException if we can not append to the HLog. */ public static WALKeyImpl writeBulkLoadMarkerAndSync(final WAL wal, - final NavigableMap replicationScope, final RegionInfo hri, - final WALProtos.BulkLoadDescriptor desc, final MultiVersionConcurrencyControl mvcc, - final RegionReplicationSink sink) throws IOException { + final NavigableMap replicationScope, final RegionInfo hri, + final WALProtos.BulkLoadDescriptor desc, final MultiVersionConcurrencyControl mvcc, + final RegionReplicationSink sink) throws IOException { WALKeyImpl walKey = writeMarker(wal, replicationScope, hri, WALEdit.createBulkLoadEvent(hri, desc), mvcc, null, sink); if (LOG.isTraceEnabled()) { @@ -137,9 +136,10 @@ public static WALKeyImpl writeBulkLoadMarkerAndSync(final WAL wal, } private static WALKeyImpl writeMarker(final WAL wal, - final NavigableMap replicationScope, final RegionInfo hri, final WALEdit edit, - final MultiVersionConcurrencyControl mvcc, final Map extendedAttributes, - final RegionReplicationSink sink) throws IOException { + final NavigableMap replicationScope, final RegionInfo hri, + final WALEdit edit, final MultiVersionConcurrencyControl mvcc, + final Map extendedAttributes, final RegionReplicationSink sink) + throws IOException { // If sync == true in below, then timeout is not used; safe to pass UNSPECIFIED_TIMEOUT return doFullMarkerAppendTransaction(wal, replicationScope, hri, edit, mvcc, extendedAttributes, true, sink); @@ -154,12 +154,13 @@ private static WALKeyImpl writeMarker(final WAL wal, * @return WALKeyImpl that was added to the WAL. */ private static WALKeyImpl doFullMarkerAppendTransaction(final WAL wal, - final NavigableMap replicationScope, final RegionInfo hri, final WALEdit edit, - final MultiVersionConcurrencyControl mvcc, final Map extendedAttributes, - final boolean sync, final RegionReplicationSink sink) throws IOException { + final NavigableMap replicationScope, final RegionInfo hri, + final WALEdit edit, final MultiVersionConcurrencyControl mvcc, + final Map extendedAttributes, final boolean sync, + final RegionReplicationSink sink) throws IOException { // TODO: Pass in current time to use? WALKeyImpl walKey = new WALKeyImpl(hri.getEncodedNameAsBytes(), hri.getTable(), - EnvironmentEdgeManager.currentTime(), mvcc, replicationScope, extendedAttributes); + EnvironmentEdgeManager.currentTime(), mvcc, replicationScope, extendedAttributes); long trx = MultiVersionConcurrencyControl.NONE; try { trx = wal.appendMarker(hri, walKey, edit); @@ -195,8 +196,8 @@ public static long getWALBlockSize(Configuration conf, FileSystem fs, Path dir) /** * Public because of FSHLog. Should be package-private - * @param isRecoverEdits the created writer is for recovered edits or WAL. - * For recovered edits, it is true and for WAL it is false. + * @param isRecoverEdits the created writer is for recovered edits or WAL. For recovered edits, it + * is true and for WAL it is false. */ public static long getWALBlockSize(Configuration conf, FileSystem fs, Path dir, boolean isRecoverEdits) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java index 56576a6cf3e1..993669b81348 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.io.IOException; import java.util.ArrayList; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService; @@ -35,33 +34,34 @@ // LimitedPrivate. See HBASE-15982. @InterfaceAudience.Private public abstract class BaseReplicationEndpoint extends AbstractService - implements ReplicationEndpoint { + implements ReplicationEndpoint { private static final Logger LOG = LoggerFactory.getLogger(BaseReplicationEndpoint.class); - public static final String REPLICATION_WALENTRYFILTER_CONFIG_KEY - = "hbase.replication.source.custom.walentryfilters"; + public static final String REPLICATION_WALENTRYFILTER_CONFIG_KEY = + "hbase.replication.source.custom.walentryfilters"; protected Context ctx; @Override public void init(Context context) throws IOException { this.ctx = context; - if (this.ctx != null){ + if (this.ctx != null) { ReplicationPeer peer = this.ctx.getReplicationPeer(); - if (peer != null){ + if (peer != null) { peer.registerPeerConfigListener(this); } else { - LOG.warn("Not tracking replication peer config changes for Peer Id " + this.ctx.getPeerId() + - " because there's no such peer"); + LOG.warn("Not tracking replication peer config changes for Peer Id " + this.ctx.getPeerId() + + " because there's no such peer"); } } } @Override /** - * No-op implementation for subclasses to override if they wish to execute logic if their config changes + * No-op implementation for subclasses to override if they wish to execute logic if their config + * changes */ - public void peerConfigUpdated(ReplicationPeerConfig rpc){ + public void peerConfigUpdated(ReplicationPeerConfig rpc) { } @@ -78,7 +78,8 @@ public WALEntryFilter getWALEntryfilter() { filters.add(tableCfFilter); } if (ctx != null && ctx.getPeerConfig() != null) { - String filterNameCSV = ctx.getPeerConfig().getConfiguration().get(REPLICATION_WALENTRYFILTER_CONFIG_KEY); + String filterNameCSV = + ctx.getPeerConfig().getConfiguration().get(REPLICATION_WALENTRYFILTER_CONFIG_KEY); if (filterNameCSV != null && !filterNameCSV.isEmpty()) { String[] filterNames = filterNameCSV.split(","); for (String filterName : filterNames) { @@ -94,14 +95,18 @@ public WALEntryFilter getWALEntryfilter() { return filters.isEmpty() ? null : new ChainWALEntryFilter(filters); } - /** Returns a WALEntryFilter for checking the scope. Subclasses can - * return null if they don't want this filter */ + /** + * Returns a WALEntryFilter for checking the scope. Subclasses can return null if they don't want + * this filter + */ protected WALEntryFilter getScopeWALEntryFilter() { return new ScopeWALEntryFilter(); } - /** Returns a WALEntryFilter for checking replication per table and CF. Subclasses can - * return null if they don't want this filter */ + /** + * Returns a WALEntryFilter for checking replication per table and CF. Subclasses can return null + * if they don't want this filter + */ protected WALEntryFilter getNamespaceTableCfWALEntryFilter() { return new NamespaceTableCfWALEntryFilter(ctx.getReplicationPeer()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java index 6814640dfe50..275e2108a2f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,9 @@ public class BulkLoadCellFilter { private static final Logger LOG = LoggerFactory.getLogger(BulkLoadCellFilter.class); - private final ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + private final ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + /** * Filters the bulk load cell using the supplied predicate. * @param cell The WAL cell to filter. @@ -75,19 +77,13 @@ public Cell filterCell(Cell cell, Predicate famPredicate) { } else if (copiedStoresList.isEmpty()) { return null; } - BulkLoadDescriptor.Builder newDesc = - BulkLoadDescriptor.newBuilder().setTableName(bld.getTableName()) - .setEncodedRegionName(bld.getEncodedRegionName()) - .setBulkloadSeqNum(bld.getBulkloadSeqNum()); + BulkLoadDescriptor.Builder newDesc = BulkLoadDescriptor.newBuilder() + .setTableName(bld.getTableName()).setEncodedRegionName(bld.getEncodedRegionName()) + .setBulkloadSeqNum(bld.getBulkloadSeqNum()); newDesc.addAllStores(copiedStoresList); BulkLoadDescriptor newBulkLoadDescriptor = newDesc.build(); - return cellBuilder.clear() - .setRow(CellUtil.cloneRow(cell)) - .setFamily(WALEdit.METAFAMILY) - .setQualifier(WALEdit.BULK_LOAD) - .setTimestamp(cell.getTimestamp()) - .setType(cell.getTypeByte()) - .setValue(newBulkLoadDescriptor.toByteArray()) - .build(); + return cellBuilder.clear().setRow(CellUtil.cloneRow(cell)).setFamily(WALEdit.METAFAMILY) + .setQualifier(WALEdit.BULK_LOAD).setTimestamp(cell.getTimestamp()) + .setType(cell.getTypeByte()).setValue(newBulkLoadDescriptor.toByteArray()).build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java index 492364780718..894c0794dcd0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEmptyEntryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -48,10 +48,9 @@ public WAL.Entry filter(WAL.Entry entry) { } /** - * To allow the empty entries to get filtered, we want to set this optional flag to decide - * if we want to filter the entries which have no cells or all cells got filtered - * though {@link WALCellFilter}. - * + * To allow the empty entries to get filtered, we want to set this optional flag to decide if we + * want to filter the entries which have no cells or all cells got filtered though + * {@link WALCellFilter}. * @param filterEmptyEntry flag */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java index ae3c74ad4753..09f5b9083644 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.util.ArrayList; @@ -28,8 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A {@link WALEntryFilter} which contains multiple filters and applies them - * in chain order + * A {@link WALEntryFilter} which contains multiple filters and applies them in chain order */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public class ChainWALEntryFilter implements WALEntryFilter { @@ -37,7 +35,7 @@ public class ChainWALEntryFilter implements WALEntryFilter { private final WALEntryFilter[] filters; private WALCellFilter[] cellFilters; - public ChainWALEntryFilter(WALEntryFilter...filters) { + public ChainWALEntryFilter(WALEntryFilter... filters) { this.filters = filters; initCellFilters(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java index 5f92bbf3a65a..57d66b60577f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,18 +18,16 @@ package org.apache.hadoop.hbase.replication; import java.util.UUID; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; +import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKeyImpl; -import org.apache.hadoop.hbase.wal.WAL.Entry; - +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * Filters out entries with our peerClusterId (i.e. already replicated) - * and marks all other entries with our clusterID + * Filters out entries with our peerClusterId (i.e. already replicated) and marks all other entries + * with our clusterID */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) @InterfaceStability.Evolving @@ -44,18 +41,20 @@ public class ClusterMarkingEntryFilter implements WALEntryFilter { * @param peerClusterId of the other cluster * @param replicationEndpoint ReplicationEndpoint which will handle the actual replication */ - public ClusterMarkingEntryFilter(UUID clusterId, UUID peerClusterId, ReplicationEndpoint replicationEndpoint) { + public ClusterMarkingEntryFilter(UUID clusterId, UUID peerClusterId, + ReplicationEndpoint replicationEndpoint) { this.clusterId = clusterId; this.peerClusterId = peerClusterId; this.replicationEndpoint = replicationEndpoint; } + @Override public Entry filter(Entry entry) { // don't replicate if the log entries have already been consumed by the cluster if (replicationEndpoint.canReplicateToSameCluster() || !entry.getKey().getClusterIds().contains(peerClusterId)) { WALEdit edit = entry.getEdit(); - WALKeyImpl logKey = (WALKeyImpl)entry.getKey(); + WALKeyImpl logKey = (WALKeyImpl) entry.getKey(); if (edit != null && !edit.isEmpty()) { // Mark that the current cluster has the change diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 86786856f214..c360e45a64a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.io.IOException; @@ -26,15 +25,15 @@ import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; import org.apache.hadoop.hbase.client.ClusterConnectionFactory; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.zookeeper.ZKListener; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -48,12 +47,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * A {@link BaseReplicationEndpoint} for replication endpoints whose - * target cluster is an HBase cluster. + * A {@link BaseReplicationEndpoint} for replication endpoints whose target cluster is an HBase + * cluster. */ @InterfaceAudience.Private public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint - implements Abortable { + implements Abortable { private static final Logger LOG = LoggerFactory.getLogger(HBaseReplicationEndpoint.class); @@ -65,15 +64,14 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private AsyncClusterConnection conn; /** - * Default maximum number of times a replication sink can be reported as bad before - * it will no longer be provided as a sink for replication without the pool of - * replication sinks being refreshed. + * Default maximum number of times a replication sink can be reported as bad before it will no + * longer be provided as a sink for replication without the pool of replication sinks being + * refreshed. */ public static final int DEFAULT_BAD_SINK_THRESHOLD = 3; /** - * Default ratio of the total number of peer cluster region servers to consider - * replicating to. + * Default ratio of the total number of peer cluster region servers to consider replicating to. */ public static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f; @@ -94,18 +92,17 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint * as protected for possible overridings. */ protected AsyncClusterConnection createConnection(Configuration conf) throws IOException { - return ClusterConnectionFactory.createAsyncClusterConnection(conf, - null, User.getCurrent()); + return ClusterConnectionFactory.createAsyncClusterConnection(conf, null, User.getCurrent()); } @Override public void init(Context context) throws IOException { super.init(context); this.conf = HBaseConfiguration.create(ctx.getConfiguration()); - this.ratio = - ctx.getConfiguration().getFloat("replication.source.ratio", DEFAULT_REPLICATION_SOURCE_RATIO); + this.ratio = ctx.getConfiguration().getFloat("replication.source.ratio", + DEFAULT_REPLICATION_SOURCE_RATIO); this.badSinkThreshold = - ctx.getConfiguration().getInt("replication.bad.sink.threshold", DEFAULT_BAD_SINK_THRESHOLD); + ctx.getConfiguration().getInt("replication.bad.sink.threshold", DEFAULT_BAD_SINK_THRESHOLD); this.badReportCounts = Maps.newHashMap(); } @@ -195,8 +192,8 @@ private void reloadZkWatcher() throws IOException { if (zkw != null) { zkw.close(); } - zkw = new ZKWatcher(ctx.getConfiguration(), - "connection to cluster: " + ctx.getPeerId(), this); + zkw = + new ZKWatcher(ctx.getConfiguration(), "connection to cluster: " + ctx.getPeerId(), this); zkw.registerListener(new PeerRegionServerListener(this)); } } @@ -213,7 +210,8 @@ private void connectPeerCluster() throws IOException { @Override public void abort(String why, Throwable e) { LOG.error("The HBaseReplicationEndpoint corresponding to peer " + ctx.getPeerId() - + " was aborted for the following reason(s):" + why, e); + + " was aborted for the following reason(s):" + why, + e); } @Override @@ -224,7 +222,6 @@ public boolean isAborted() { /** * Get the list of all the region servers from the specified peer - * * @return list of region server addresses or an empty list if the slave is unavailable */ protected List fetchSlavesAddresses() { @@ -277,16 +274,14 @@ protected synchronized SinkPeer getReplicationSink() throws IOException { throw new IOException("No replication sinks are available"); } ServerName serverName = - sinkServers.get(ThreadLocalRandom.current().nextInt(sinkServers.size())); + sinkServers.get(ThreadLocalRandom.current().nextInt(sinkServers.size())); return new SinkPeer(serverName, conn.getRegionServerAdmin(serverName)); } /** - * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it - * failed). If a single SinkPeer is reported as bad more than - * replication.bad.sink.threshold times, it will be removed + * Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it failed). If a single + * SinkPeer is reported as bad more than replication.bad.sink.threshold times, it will be removed * from the pool of potential replication targets. - * * @param sinkPeer The SinkPeer that had a failed replication attempt on it */ protected synchronized void reportBadSink(SinkPeer sinkPeer) { @@ -302,9 +297,7 @@ protected synchronized void reportBadSink(SinkPeer sinkPeer) { /** * Report that a {@code SinkPeer} successfully replicated a chunk of data. - * - * @param sinkPeer - * The SinkPeer that had a failed replication attempt on it + * @param sinkPeer The SinkPeer that had a failed replication attempt on it */ protected synchronized void reportSinkSuccess(SinkPeer sinkPeer) { badReportCounts.remove(sinkPeer.getServerName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java index 4fe04cd6ee5a..82ac9ebd1f32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import org.apache.hadoop.hbase.Cell; @@ -27,7 +26,6 @@ /** * Filter a WAL Entry by the peer config according to the table and family which it belongs to. - * * @see ReplicationPeerConfig#needToReplicate(TableName, byte[]) */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java index 81be5a3e3a00..e48a2284f578 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,9 +68,9 @@ private ReplicationBarrierFamilyFormat() { public static void addReplicationBarrier(Put put, long openSeqNum) throws IOException { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) - .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(HConstants.SEQNUM_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)) - .build()); + .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(HConstants.SEQNUM_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)) + .build()); } private static void writeRegionName(ByteArrayOutputStream out, byte[] regionName) { @@ -118,12 +118,12 @@ private static List parseParentsBytes(byte[] bytes) { public static void addReplicationParent(Put put, List parents) throws IOException { byte[] value = getParentsBytes(parents); put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) - .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(REPLICATION_PARENT_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(value).build()); + .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(REPLICATION_PARENT_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(value).build()); } public static Put makePutForReplicationBarrier(RegionInfo regionInfo, long openSeqNum, long ts) - throws IOException { + throws IOException { Put put = new Put(regionInfo.getRegionName(), ts); addReplicationBarrier(put, openSeqNum); return put; @@ -154,10 +154,10 @@ public List getParentRegionNames() { @Override public String toString() { - return "ReplicationBarrierResult [barriers=" + Arrays.toString(barriers) + ", state=" + - state + ", parentRegionNames=" + - parentRegionNames.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")) + - "]"; + return "ReplicationBarrierResult [barriers=" + Arrays.toString(barriers) + ", state=" + state + + ", parentRegionNames=" + + parentRegionNames.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")) + + "]"; } } @@ -167,33 +167,34 @@ private static long getReplicationBarrier(Cell c) { public static long[] getReplicationBarriers(Result result) { return result.getColumnCells(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER) - .stream().mapToLong(ReplicationBarrierFamilyFormat::getReplicationBarrier).sorted().distinct() - .toArray(); + .stream().mapToLong(ReplicationBarrierFamilyFormat::getReplicationBarrier).sorted() + .distinct().toArray(); } private static ReplicationBarrierResult getReplicationBarrierResult(Result result) { long[] barriers = getReplicationBarriers(result); byte[] stateBytes = result.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER); RegionState.State state = - stateBytes != null ? RegionState.State.valueOf(Bytes.toString(stateBytes)) : null; + stateBytes != null ? RegionState.State.valueOf(Bytes.toString(stateBytes)) : null; byte[] parentRegionsBytes = - result.getValue(HConstants.REPLICATION_BARRIER_FAMILY, REPLICATION_PARENT_QUALIFIER); + result.getValue(HConstants.REPLICATION_BARRIER_FAMILY, REPLICATION_PARENT_QUALIFIER); List parentRegionNames = - parentRegionsBytes != null ? parseParentsBytes(parentRegionsBytes) : Collections.emptyList(); + parentRegionsBytes != null ? parseParentsBytes(parentRegionsBytes) + : Collections.emptyList(); return new ReplicationBarrierResult(barriers, state, parentRegionNames); } public static ReplicationBarrierResult getReplicationBarrierResult(Connection conn, - TableName tableName, byte[] row, byte[] encodedRegionName) throws IOException { + TableName tableName, byte[] row, byte[] encodedRegionName) throws IOException { byte[] metaStartKey = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); byte[] metaStopKey = - RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); + RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); Scan scan = new Scan().withStartRow(metaStartKey).withStopRow(metaStopKey) - .addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER) - .addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions().setReversed(true) - .setCaching(10); + .addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER) + .addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions().setReversed(true) + .setCaching(10); try (Table table = conn.getTable(TableName.META_TABLE_NAME); - ResultScanner scanner = table.getScanner(scan)) { + ResultScanner scanner = table.getScanner(scan)) { for (Result result;;) { result = scanner.next(); if (result == null) { @@ -213,24 +214,24 @@ public static ReplicationBarrierResult getReplicationBarrierResult(Connection co } public static long[] getReplicationBarriers(Connection conn, byte[] regionName) - throws IOException { + throws IOException { try (Table table = conn.getTable(TableName.META_TABLE_NAME)) { Result result = table.get(new Get(regionName) - .addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER) - .readAllVersions()); + .addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER) + .readAllVersions()); return getReplicationBarriers(result); } } public static List> getTableEncodedRegionNameAndLastBarrier(Connection conn, - TableName tableName) throws IOException { + TableName tableName) throws IOException { List> list = new ArrayList<>(); MetaTableAccessor.scanMeta(conn, ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REPLICATION), ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REPLICATION), QueryType.REPLICATION, r -> { byte[] value = - r.getValue(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER); + r.getValue(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER); if (value == null) { return true; } @@ -243,7 +244,7 @@ public static List> getTableEncodedRegionNameAndLastBarrier(C } public static List getTableEncodedRegionNamesForSerialReplication(Connection conn, - TableName tableName) throws IOException { + TableName tableName) throws IOException { List list = new ArrayList<>(); MetaTableAccessor.scanMeta(conn, ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REPLICATION), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java index 3fec8131d090..f77b56bfb8ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.io.IOException; @@ -23,29 +22,27 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; - -import org.apache.hadoop.hbase.Abortable; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; +import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.yetus.audience.InterfaceAudience; /** - * ReplicationEndpoint is a plugin which implements replication - * to other HBase clusters, or other systems. ReplicationEndpoint implementation - * can be specified at the peer creation time by specifying it - * in the {@link ReplicationPeerConfig}. A ReplicationEndpoint is run in a thread - * in each region server in the same process. + * ReplicationEndpoint is a plugin which implements replication to other HBase clusters, or other + * systems. ReplicationEndpoint implementation can be specified at the peer creation time by + * specifying it in the {@link ReplicationPeerConfig}. A ReplicationEndpoint is run in a thread in + * each region server in the same process. *

          - * ReplicationEndpoint is closely tied to ReplicationSource in a producer-consumer - * relation. ReplicationSource is an HBase-private class which tails the logs and manages - * the queue of logs plus management and persistence of all the state for replication. - * ReplicationEndpoint on the other hand is responsible for doing the actual shipping - * and persisting of the WAL entries in the other cluster. + * ReplicationEndpoint is closely tied to ReplicationSource in a producer-consumer relation. + * ReplicationSource is an HBase-private class which tails the logs and manages the queue of logs + * plus management and persistence of all the state for replication. ReplicationEndpoint on the + * other hand is responsible for doing the actual shipping and persisting of the WAL entries in the + * other cluster. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public interface ReplicationEndpoint extends ReplicationPeerConfigListener { @@ -134,14 +131,16 @@ public Abortable getAbortable() { */ void init(Context context) throws IOException; - /** Whether or not, the replication endpoint can replicate to it's source cluster with the same - * UUID */ + /** + * Whether or not, the replication endpoint can replicate to it's source cluster with the same + * UUID + */ boolean canReplicateToSameCluster(); /** - * Returns a UUID of the provided peer id. Every HBase cluster instance has a persisted - * associated UUID. If the replication is not performed to an actual HBase cluster (but - * some other system), the UUID returned has to uniquely identify the connected target system. + * Returns a UUID of the provided peer id. Every HBase cluster instance has a persisted associated + * UUID. If the replication is not performed to an actual HBase cluster (but some other system), + * the UUID returned has to uniquely identify the connected target system. * @return a UUID or null if the peer cluster does not exist or is not connected. */ UUID getPeerUUID(); @@ -162,6 +161,7 @@ static class ReplicateContext { int size; String walGroupId; int timeout; + @InterfaceAudience.Private public ReplicateContext() { } @@ -170,42 +170,46 @@ public ReplicateContext setEntries(List entries) { this.entries = entries; return this; } + public ReplicateContext setSize(int size) { this.size = size; return this; } + public ReplicateContext setWalGroupId(String walGroupId) { this.walGroupId = walGroupId; return this; } + public List getEntries() { return entries; } + public int getSize() { return size; } - public String getWalGroupId(){ + + public String getWalGroupId() { return walGroupId; } + public void setTimeout(int timeout) { this.timeout = timeout; } + public int getTimeout() { return this.timeout; } } /** - * Replicate the given set of entries (in the context) to the other cluster. - * Can block until all the given entries are replicated. Upon this method is returned, - * all entries that were passed in the context are assumed to be persisted in the - * target cluster. - * @param replicateContext a context where WAL entries and other - * parameters can be obtained. + * Replicate the given set of entries (in the context) to the other cluster. Can block until all + * the given entries are replicated. Upon this method is returned, all entries that were passed in + * the context are assumed to be persisted in the target cluster. + * @param replicateContext a context where WAL entries and other parameters can be obtained. */ boolean replicate(ReplicateContext replicateContext); - // The below methods are inspired by Guava Service. See // https://github.com/google/guava/wiki/ServiceExplained for overview of Guava Service. // Below we implement a subset only with different names on some methods so we can implement @@ -231,23 +235,21 @@ public int getTimeout() { /** * Waits for the {@link ReplicationEndpoint} to be up and running. - * * @throws IllegalStateException if the service reaches a state from which it is not possible to - * enter the (internal) running state. e.g. if the state is terminated when this method is - * called then this will throw an IllegalStateException. + * enter the (internal) running state. e.g. if the state is terminated when this method + * is called then this will throw an IllegalStateException. */ void awaitRunning(); /** - * Waits for the {@link ReplicationEndpoint} to to be up and running for no more - * than the given time. - * + * Waits for the {@link ReplicationEndpoint} to to be up and running for no more than the given + * time. * @param timeout the maximum time to wait * @param unit the time unit of the timeout argument * @throws TimeoutException if the service has not reached the given state within the deadline * @throws IllegalStateException if the service reaches a state from which it is not possible to - * enter the (internal) running state. e.g. if the state is terminated when this method is - * called then this will throw an IllegalStateException. + * enter the (internal) running state. e.g. if the state is terminated when this method + * is called then this will throw an IllegalStateException. */ void awaitRunning(long timeout, TimeUnit unit) throws TimeoutException; @@ -260,15 +262,13 @@ public int getTimeout() { /** * Waits for the {@link ReplicationEndpoint} to reach the terminated (internal) state. - * * @throws IllegalStateException if the service FAILED. */ void awaitTerminated(); /** - * Waits for the {@link ReplicationEndpoint} to reach a terminal state for no - * more than the given time. - * + * Waits for the {@link ReplicationEndpoint} to reach a terminal state for no more than the given + * time. * @param timeout the maximum time to wait * @param unit the time unit of the timeout argument * @throws TimeoutException if the service has not reached the given state within the deadline @@ -278,7 +278,6 @@ public int getTimeout() { /** * Returns the {@link Throwable} that caused this service to fail. - * * @throws IllegalStateException if this service's state isn't FAILED. */ Throwable failureCause(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java index edd567914dc7..957694c314a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationSinkServiceImpl.java @@ -21,7 +21,6 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -57,28 +56,26 @@ public class ReplicationSinkServiceImpl implements ReplicationSinkService { @Override public void replicateLogEntries(List entries, CellScanner cells, - String replicationClusterId, String sourceBaseNamespaceDirPath, - String sourceHFileArchiveDirPath) throws IOException { + String replicationClusterId, String sourceBaseNamespaceDirPath, + String sourceHFileArchiveDirPath) throws IOException { this.replicationSink.replicateEntries(entries, cells, replicationClusterId, sourceBaseNamespaceDirPath, sourceHFileArchiveDirPath); } @Override public void initialize(Server server, FileSystem fs, Path logdir, Path oldLogDir, - WALFactory walFactory) throws IOException { + WALFactory walFactory) throws IOException { this.server = server; this.conf = server.getConfiguration(); - this.statsPeriodInSecond = - this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); + this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); this.replicationLoad = new ReplicationLoad(); } @Override public void startReplicationService() throws IOException { this.replicationSink = new ReplicationSink(this.conf); - this.server.getChoreService().scheduleChore( - new ReplicationStatisticsChore("ReplicationSinkStatistics", server, - (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); + this.server.getChoreService().scheduleChore(new ReplicationStatisticsChore( + "ReplicationSinkStatistics", server, (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java index f8722eb3da44..6dc41bcc014a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,6 +47,7 @@ private boolean hasGlobalScope(NavigableMap scopes, byte[] fami Integer scope = scopes.get(family); return scope != null && scope.intValue() == HConstants.REPLICATION_SCOPE_GLOBAL; } + @Override public Cell filterCell(Entry entry, Cell cell) { NavigableMap scopes = entry.getKey().getReplicationScopes(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java index 3cda94a1c028..d71260cce5c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/SystemTableWALEntryFilter.java @@ -15,8 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; + import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; @@ -27,6 +27,6 @@ public class SystemTableWALEntryFilter implements WALEntryFilter { @Override public Entry filter(Entry entry) { - return entry.getKey().getTableName().isSystemTable()? null: entry; + return entry.getKey().getTableName().isSystemTable() ? null : entry; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java index 088827f4d2e3..f700ecfc8282 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/VerifyWALEntriesReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ private void checkCell(Cell cell) { @Override public boolean replicate(ReplicateContext replicateContext) { replicateContext.entries.stream().map(WAL.Entry::getEdit).flatMap(e -> e.getCells().stream()) - .forEach(this::checkCell); + .forEach(this::checkCell); return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java index 46b2f6cb4ddf..9ac3741601c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALCellFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +19,8 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.yetus.audience.InterfaceAudience; /** * A filter for WAL entry cells before being sent over to replication. @@ -29,12 +29,12 @@ public interface WALCellFilter { /** - * Applies the filter, possibly returning a different Cell instance. - * If null is returned, the cell will be skipped. + * Applies the filter, possibly returning a different Cell instance. If null is returned, the cell + * will be skipped. * @param entry Entry which contains the cell * @param cell Cell to filter - * @return a (possibly modified) Cell to use. Returning null will cause the cell - * to be skipped for replication. + * @return a (possibly modified) Cell to use. Returning null will cause the cell to be skipped for + * replication. */ public Cell filterCell(Entry entry, Cell cell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java index 23c1c60f2db1..8aa60f74ebba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java @@ -16,20 +16,22 @@ * limitations under the License. */ package org.apache.hadoop.hbase.replication; + import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; /** - * A Filter for WAL entries before being sent over to replication. Multiple - * filters might be chained together using {@link ChainWALEntryFilter}. - * Applied on the replication source side. - *

          There is also a filter that can be installed on the sink end of a replication stream. - * See {@link org.apache.hadoop.hbase.replication.regionserver.WALEntrySinkFilter}. Certain - * use-cases may need such a facility but better to filter here on the source side rather - * than later, after the edit arrives at the sink.

          + * A Filter for WAL entries before being sent over to replication. Multiple filters might be chained + * together using {@link ChainWALEntryFilter}. Applied on the replication source side. + *

          + * There is also a filter that can be installed on the sink end of a replication stream. See + * {@link org.apache.hadoop.hbase.replication.regionserver.WALEntrySinkFilter}. Certain use-cases + * may need such a facility but better to filter here on the source side rather than later, after + * the edit arrives at the sink. + *

          * @see org.apache.hadoop.hbase.replication.regionserver.WALEntrySinkFilter for filtering - * replication on the sink-side. + * replication on the sink-side. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public interface WALEntryFilter { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java index f06b29ccdeff..2f40bc413d99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -90,11 +90,9 @@ public boolean apply(FileStatus file) { @Override public void setConf(Configuration config) { // If either replication or replication of bulk load hfiles is disabled, keep all members null - if (!(config.getBoolean( - HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + if (!(config.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT))) { - LOG.warn(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY - + " is not enabled. Better to remove " + LOG.warn(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY + " is not enabled. Better to remove " + ReplicationHFileCleaner.class + " from " + HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS + " configuration."); return; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java index a7821f1894a1..f5632f39fd53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,17 +31,17 @@ import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Predicate; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; /** - * Implementation of a log cleaner that checks if a log is still scheduled for - * replication before deleting it when its TTL is over. + * Implementation of a log cleaner that checks if a log is still scheduled for replication before + * deleting it when its TTL is over. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ReplicationLogCleaner extends BaseLogCleanerDelegate { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java index ddae7311225b..7fb53274317e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ClaimReplicationQueueCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ protected void doCall() throws Exception { @Override protected void initParameter(byte[] parameter) throws InvalidProtocolBufferException { ClaimReplicationQueueRemoteParameter param = - ClaimReplicationQueueRemoteParameter.parseFrom(parameter); + ClaimReplicationQueueRemoteParameter.parseFrom(parameter); crashedServer = ProtobufUtil.toServerName(param.getCrashedServer()); queue = param.getQueue(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java index f7040d6fc811..1318af59029c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java @@ -1,12 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.replication.regionserver; @@ -15,7 +22,6 @@ import java.net.URL; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -26,8 +32,8 @@ import org.slf4j.LoggerFactory; /** - * This will load all the xml configuration files for the source cluster replication ID from - * user configured replication configuration directory. + * This will load all the xml configuration files for the source cluster replication ID from user + * configured replication configuration directory. */ @InterfaceAudience.Private public class DefaultSourceFSConfigurationProvider implements SourceFSConfigurationProvider { @@ -62,8 +68,7 @@ public Configuration getConf(Configuration sinkConf, String replicationClusterId File confDir = new File(replicationConfDir, replicationClusterId); LOG.info("Loading source cluster " + replicationClusterId - + " file system configurations from xml " - + "files under directory " + confDir); + + " file system configurations from xml " + "files under directory " + confDir); String[] listofConfFiles = FileUtil.list(confDir); for (String confFile : listofConfFiles) { if (new File(confDir, confFile).isFile() && confFile.endsWith(XML)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index dc4ee347e20a..5423c2730c2c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,10 +60,9 @@ /** * Provides information about the existing states of replication, replication peers and queues. - * * Usage: hbase org.apache.hadoop.hbase.replication.regionserver.DumpReplicationQueues [args] - * Arguments: --distributed Polls each RS to dump information about the queue - * --hdfs Reports HDFS usage by the replication queues (note: can be overestimated). + * Arguments: --distributed Polls each RS to dump information about the queue --hdfs Reports HDFS + * usage by the replication queues (note: can be overestimated). */ @InterfaceAudience.Private public class DumpReplicationQueues extends Configured implements Tool { @@ -96,7 +95,7 @@ public DumpOptions(DumpOptions that) { this.distributed = that.distributed; } - boolean isHdfs () { + boolean isHdfs() { return hdfs; } @@ -104,7 +103,7 @@ boolean isDistributed() { return distributed; } - void setHdfs (boolean hdfs) { + void setHdfs(boolean hdfs) { this.hdfs = hdfs; } @@ -136,7 +135,7 @@ static DumpOptions parseOpts(Queue args) { printUsageAndExit("ERROR: Unrecognized option/command: " + cmd, -1); } // check that --distributed is present when --hdfs is in the arguments - if (!opts.isDistributed() && opts.isHdfs()) { + if (!opts.isDistributed() && opts.isHdfs()) { printUsageAndExit("ERROR: --hdfs option can only be used with --distributed: " + cmd, -1); } } @@ -145,7 +144,6 @@ static DumpOptions parseOpts(Queue args) { /** * Main - * * @param args * @throws Exception */ @@ -206,9 +204,9 @@ private int dumpReplicationQueues(DumpOptions opts) throws Exception { Connection connection = ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); - ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + - EnvironmentEdgeManager.currentTime(), - new WarnOnlyAbortable(), true); + ZKWatcher zkw = + new ZKWatcher(conf, "DumpReplicationQueues" + EnvironmentEdgeManager.currentTime(), + new WarnOnlyAbortable(), true); try { // Our zk watcher @@ -216,7 +214,7 @@ private int dumpReplicationQueues(DumpOptions opts) throws Exception { List replicatedTableCFs = admin.listReplicatedTableCFs(); if (replicatedTableCFs.isEmpty()) { LOG.info("No tables with a configured replication peer were found."); - return(0); + return (0); } else { LOG.info("Replicated Tables: " + replicatedTableCFs); } @@ -232,8 +230,8 @@ private int dumpReplicationQueues(DumpOptions opts) throws Exception { if (opts.isDistributed()) { LOG.info("Found [--distributed], will poll each RegionServer."); - Set peerIds = peers.stream().map((peer) -> peer.getPeerId()) - .collect(Collectors.toSet()); + Set peerIds = + peers.stream().map((peer) -> peer.getPeerId()).collect(Collectors.toSet()); System.out.println(dumpQueues(zkw, peerIds, opts.isHdfs())); System.out.println(dumpReplicationSummary()); } else { @@ -268,7 +266,8 @@ public String dumpReplicationSummary() { if (!peersQueueSize.isEmpty()) { sb.append("Dumping all peers's number of WALs in replication queue\n"); for (Map.Entry entry : peersQueueSize.asMap().entrySet()) { - sb.append(" PeerId: " + entry.getKey() + " , sizeOfLogQueue: " + entry.getValue() + "\n"); + sb.append( + " PeerId: " + entry.getKey() + " , sizeOfLogQueue: " + entry.getValue() + "\n"); } } sb.append(" Total size of WALs on HDFS: " + StringUtils.humanSize(totalSizeOfWALs) + "\n"); @@ -304,7 +303,7 @@ public String dumpQueues(ZKWatcher zkw, Set peerIds, boolean hdfs) throw queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); Set liveRegionServers = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode) - .stream().map(ServerName::parseServerName).collect(Collectors.toSet()); + .stream().map(ServerName::parseServerName).collect(Collectors.toSet()); // Loops each peer on each RS and dumps the queues List regionservers = queueStorage.getListOfReplicators(); @@ -354,8 +353,8 @@ private String formatQueue(ServerName regionserver, ReplicationQueueStorage queu for (String wal : wals) { long position = queueStorage.getWALPosition(regionserver, queueInfo.getPeerId(), wal); - sb.append(" Replication position for " + wal + ": " + (position > 0 ? position : "0" - + " (not started or nothing to replicate)") + "\n"); + sb.append(" Replication position for " + wal + ": " + + (position > 0 ? position : "0" + " (not started or nothing to replicate)") + "\n"); } if (hdfs) { @@ -367,7 +366,7 @@ private String formatQueue(ServerName regionserver, ReplicationQueueStorage queu } /** - * return total size in bytes from a list of WALs + * return total size in bytes from a list of WALs */ private long getTotalWALSize(FileSystem fs, List wals, ServerName server) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 6dd60d14db0d..4e86903af37e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; @@ -69,14 +68,13 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} - * implementation for replicating to another HBase cluster. - * For the slave cluster it selects a random number of peers - * using a replication ratio. For example, if replication ration = 0.1 - * and slave cluster has 100 region servers, 10 will be selected. + * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} implementation for replicating + * to another HBase cluster. For the slave cluster it selects a random number of peers using a + * replication ratio. For example, if replication ration = 0.1 and slave cluster has 100 region + * servers, 10 will be selected. *

          - * A stream is considered down when we cannot contact a region server on the - * peer cluster for more than 55 seconds by default. + * A stream is considered down when we cannot contact a region server on the peer cluster for more + * than 55 seconds by default. *

          */ @InterfaceAudience.Private @@ -103,7 +101,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi private long maxTerminationWait; // Size limit for replication RPCs, in bytes private int replicationRpcLimit; - //Metrics for this source + // Metrics for this source private MetricsSource metrics; private boolean peersSelected = false; private String replicationClusterId = ""; @@ -116,7 +114,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi private boolean dropOnDeletedTables; private boolean dropOnDeletedColumnFamilies; private boolean isSerial = false; - //Initialising as 0 to guarantee at least one logging message + // Initialising as 0 to guarantee at least one logging message private long lastSinkFetchTime = 0; @Override @@ -124,37 +122,33 @@ public void init(Context context) throws IOException { super.init(context); decorateConf(); this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); - this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", - maxRetriesMultiplier); + this.socketTimeoutMultiplier = + this.conf.getInt("replication.source.socketTimeoutMultiplier", maxRetriesMultiplier); // A Replicator job is bound by the RPC timeout. We will wait this long for all Replicator // tasks to terminate when doStop() is called. long maxTerminationWaitMultiplier = this.conf.getLong( - "replication.source.maxterminationmultiplier", - DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER); - this.maxTerminationWait = maxTerminationWaitMultiplier * - this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); - this.sleepForRetries = - this.conf.getLong("replication.source.sleepforretries", 1000); + "replication.source.maxterminationmultiplier", DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER); + this.maxTerminationWait = maxTerminationWaitMultiplier + * this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); // per sink thread pool this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT); this.exec = Threads.getBoundedCachedThreadPool(maxThreads, 60, TimeUnit.SECONDS, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("SinkThread-%d").build()); + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("SinkThread-%d").build()); this.abortable = ctx.getAbortable(); // Set the size limit for replication RPCs to 95% of the max request size. // We could do with less slop if we have an accurate estimate of encoded size. Being // conservative for now. - this.replicationRpcLimit = (int)(0.95 * conf.getLong(RpcServer.MAX_REQUEST_SIZE, - RpcServer.DEFAULT_MAX_REQUEST_SIZE)); - this.dropOnDeletedTables = - this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false); - this.dropOnDeletedColumnFamilies = this.conf - .getBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, false); - - this.replicationBulkLoadDataEnabled = - conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, - HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); + this.replicationRpcLimit = + (int) (0.95 * conf.getLong(RpcServer.MAX_REQUEST_SIZE, RpcServer.DEFAULT_MAX_REQUEST_SIZE)); + this.dropOnDeletedTables = this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false); + this.dropOnDeletedColumnFamilies = + this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, false); + + this.replicationBulkLoadDataEnabled = conf.getBoolean( + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); if (this.replicationBulkLoadDataEnabled) { replicationClusterId = this.conf.get(HConstants.REPLICATION_CLUSTER_ID); } @@ -195,8 +189,8 @@ private void connectToPeers() { private boolean sleepForRetries(String msg, int sleepMultiplier) { try { if (LOG.isTraceEnabled()) { - LOG.trace("{} {}, sleeping {} times {}", - logPeerId(), msg, sleepForRetries, sleepMultiplier); + LOG.trace("{} {}, sleeping {} times {}", logPeerId(), msg, sleepForRetries, + sleepMultiplier); } Thread.sleep(this.sleepForRetries * sleepMultiplier); } catch (InterruptedException e) { @@ -299,7 +293,7 @@ List> filterNotExistTableEdits(final List> oldEntryList) List> entryList = new ArrayList<>(); Map existMap = new HashMap<>(); try (Connection localConn = ConnectionFactory.createConnection(ctx.getLocalConfiguration()); - Admin localAdmin = localConn.getAdmin()) { + Admin localAdmin = localConn.getAdmin()) { for (List oldEntries : oldEntryList) { List entries = new ArrayList<>(); for (Entry e : oldEntries) { @@ -324,7 +318,8 @@ List> filterNotExistTableEdits(final List> oldEntryList) // and add a table filter there; but that would break the encapsulation, // so we're doing the filtering here. LOG.warn("Missing table detected at sink, local table also does not exist, " - + "filtering edits for table '{}'", tableName); + + "filtering edits for table '{}'", + tableName); } } if (!entries.isEmpty()) { @@ -342,7 +337,7 @@ List> filterNotExistColumnFamilyEdits(final List> oldEnt List> entryList = new ArrayList<>(); Map> existColumnFamilyMap = new HashMap<>(); try (Connection localConn = ConnectionFactory.createConnection(ctx.getLocalConfiguration()); - Admin localAdmin = localConn.getAdmin()) { + Admin localAdmin = localConn.getAdmin()) { for (List oldEntries : oldEntryList) { List entries = new ArrayList<>(); for (Entry e : oldEntries) { @@ -384,8 +379,9 @@ List> filterNotExistColumnFamilyEdits(final List> oldEnt // and add a table filter there; but that would break the encapsulation, // so we're doing the filtering here. LOG.warn( - "Missing column family detected at sink, local column family also does not exist," - + " filtering edits for table '{}',column family '{}'", tableName, missingCFs); + "Missing column family detected at sink, local column family also does not exist," + + " filtering edits for table '{}',column family '{}'", + tableName, missingCFs); } } if (!entries.isEmpty()) { @@ -433,8 +429,8 @@ private long parallelReplicate(CompletionService pool, ReplicateContext } catch (InterruptedException ie) { iox = new IOException(ie); } catch (ExecutionException ee) { - iox = ee.getCause() instanceof IOException? - (IOException)ee.getCause(): new IOException(ee.getCause()); + iox = ee.getCause() instanceof IOException ? (IOException) ee.getCause() + : new IOException(ee.getCause()); } } if (iox != null) { @@ -459,12 +455,12 @@ public boolean replicate(ReplicateContext replicateContext) { int numSinks = getNumSinks(); if (numSinks == 0) { - if ((EnvironmentEdgeManager.currentTime() - lastSinkFetchTime) >= - (maxRetriesMultiplier*1000)) { - LOG.warn( - "No replication sinks found, returning without replicating. " + if ((EnvironmentEdgeManager.currentTime() - lastSinkFetchTime) >= (maxRetriesMultiplier + * 1000)) { + LOG.warn("No replication sinks found, returning without replicating. " + "The source should retry with the same set of edits. Not logging this again for " - + "the next {} seconds.", maxRetriesMultiplier); + + "the next {} seconds.", + maxRetriesMultiplier); lastSinkFetchTime = EnvironmentEdgeManager.currentTime(); } sleepForRetries("No sinks available at peer", sleepMultiplier); @@ -496,13 +492,13 @@ public boolean replicate(ReplicateContext replicateContext) { } else if (dropOnDeletedColumnFamilies && isNoSuchColumnFamilyException(ioe)) { batches = filterNotExistColumnFamilyEdits(batches); if (batches.isEmpty()) { - LOG.warn("After filter not exist column family's edits, 0 edits to replicate, " + - "just return"); + LOG.warn("After filter not exist column family's edits, 0 edits to replicate, " + + "just return"); return true; } } else { LOG.warn("{} Peer encountered RemoteException, rechecking all sinks: ", logPeerId(), - ioe); + ioe); chooseSinks(); } } else { @@ -510,9 +506,10 @@ public boolean replicate(ReplicateContext replicateContext) { // This exception means we waited for more than 60s and nothing // happened, the cluster is alive and calling it right away // even for a test just makes things worse. - sleepForRetries("Encountered a SocketTimeoutException. Since the " + - "call to the remote cluster timed out, which is usually " + - "caused by a machine failure or a massive slowdown", + sleepForRetries( + "Encountered a SocketTimeoutException. Since the " + + "call to the remote cluster timed out, which is usually " + + "caused by a machine failure or a massive slowdown", this.socketTimeoutMultiplier); } else if (ioe instanceof ConnectException || ioe instanceof UnknownHostException) { LOG.warn("{} Peer is unavailable, rechecking all sinks: ", logPeerId(), ioe); @@ -544,9 +541,9 @@ protected void doStop() { } // Abort if the tasks did not terminate in time if (!exec.isTerminated()) { - String errMsg = "HBaseInterClusterReplicationEndpoint termination failed. The " + - "ThreadPoolExecutor failed to finish all tasks within " + maxTerminationWait + "ms. " + - "Aborting to prevent Replication from deadlocking. See HBASE-16081."; + String errMsg = "HBaseInterClusterReplicationEndpoint termination failed. The " + + "ThreadPoolExecutor failed to finish all tasks within " + maxTerminationWait + "ms. " + + "Aborting to prevent Replication from deadlocking. See HBASE-16081."; abortable.abort(errMsg, new IOException(errMsg)); } notifyStopped(); @@ -612,7 +609,7 @@ protected Callable createReplicator(List entries, int batchIndex : () -> replicateEntries(entries, batchIndex, timeout); } - private String logPeerId(){ + private String logPeerId() { return "[Source for peer " + this.ctx.getPeerId() + "]:"; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java index 209537137d7b..aac3cef66ea5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java @@ -95,10 +95,10 @@ public class HFileReplicator implements Closeable { private int copiesPerThread; private List sourceClusterIds; - public HFileReplicator(Configuration sourceClusterConf, - String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath, - Map>>> tableQueueMap, Configuration conf, - AsyncClusterConnection connection, List sourceClusterIds) throws IOException { + public HFileReplicator(Configuration sourceClusterConf, String sourceBaseNamespaceDirPath, + String sourceHFileArchiveDirPath, Map>>> tableQueueMap, + Configuration conf, AsyncClusterConnection connection, List sourceClusterIds) + throws IOException { this.sourceClusterConf = sourceClusterConf; this.sourceBaseNamespaceDirPath = sourceBaseNamespaceDirPath; this.sourceHFileArchiveDirPath = sourceHFileArchiveDirPath; @@ -110,17 +110,14 @@ public HFileReplicator(Configuration sourceClusterConf, userProvider = UserProvider.instantiate(conf); fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); this.hbaseStagingDir = - new Path(CommonFSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME); - this.maxCopyThreads = - this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY, - REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT); + new Path(CommonFSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME); + this.maxCopyThreads = this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY, + REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT); this.exec = Threads.getBoundedCachedThreadPool(maxCopyThreads, 60, TimeUnit.SECONDS, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("HFileReplicationCopier-%1$d-" + this.sourceBaseNamespaceDirPath). - build()); - this.copiesPerThread = - conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY, - REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT); + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("HFileReplicationCopier-%1$d-" + this.sourceBaseNamespaceDirPath).build()); + this.copiesPerThread = conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY, + REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT); sinkFs = FileSystem.get(conf); } @@ -167,7 +164,7 @@ private void doBulkLoad(Configuration conf, TableName tableName, Path stagingDir BulkLoadHFilesTool loader = new BulkLoadHFilesTool(conf); // Set the staging directory which will be used by BulkLoadHFilesTool for loading the data loader.setBulkToken(stagingDir.toString()); - //updating list of cluster ids where this bulkload event has already been processed + // updating list of cluster ids where this bulkload event has already been processed loader.setClusterIds(sourceClusterIds); for (int count = 0; !queue.isEmpty(); count++) { if (count != 0) { @@ -230,8 +227,7 @@ private Map copyHFilesToStagingDir() throws IOException { String tableName = tableEntry.getKey(); // Create staging directory for each table - Path stagingDir = - createStagingDir(hbaseStagingDir, user, TableName.valueOf(tableName)); + Path stagingDir = createStagingDir(hbaseStagingDir, user, TableName.valueOf(tableName)); familyHFilePathsPairsList = tableEntry.getValue(); familyHFilePathsPairsListSize = familyHFilePathsPairsList.size(); @@ -253,9 +249,8 @@ private Map copyHFilesToStagingDir() throws IOException { int currentCopied = 0; // Copy the hfiles parallely while (totalNoOfHFiles > currentCopied + this.copiesPerThread) { - c = - new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, - currentCopied + this.copiesPerThread)); + c = new Copier(sourceFs, familyStagingDir, + hfilePaths.subList(currentCopied, currentCopied + this.copiesPerThread)); future = exec.submit(c); futures.add(future); currentCopied += this.copiesPerThread; @@ -263,9 +258,8 @@ private Map copyHFilesToStagingDir() throws IOException { int remaining = totalNoOfHFiles - currentCopied; if (remaining > 0) { - c = - new Copier(sourceFs, familyStagingDir, hfilePaths.subList(currentCopied, - currentCopied + remaining)); + c = new Copier(sourceFs, familyStagingDir, + hfilePaths.subList(currentCopied, currentCopied + remaining)); future = exec.submit(c); futures.add(future); } @@ -274,10 +268,9 @@ private Map copyHFilesToStagingDir() throws IOException { try { f.get(); } catch (InterruptedException e) { - InterruptedIOException iioe = - new InterruptedIOException( - "Failed to copy HFiles to local file system. This will be retried again " - + "by the source cluster."); + InterruptedIOException iioe = new InterruptedIOException( + "Failed to copy HFiles to local file system. This will be retried again " + + "by the source cluster."); iioe.initCause(e); throw iioe; } catch (ExecutionException e) { @@ -295,7 +288,7 @@ private Map copyHFilesToStagingDir() throws IOException { if (sourceFs != null) { sourceFs.close(); } - if(exec != null) { + if (exec != null) { exec.shutdown(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java index dede79d138cc..f7f55fdf75b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** * This class is for maintaining the various replication statistics for a sink and publishing them @@ -40,7 +39,6 @@ public MetricsSink() { /** * Set the age of the last applied operation - * * @param timestamp The timestamp of the last operation applied. * @return the age that was set */ @@ -55,8 +53,8 @@ public long setAgeOfLastAppliedOp(long timestamp) { } /** - * Refreshing the age makes sure the value returned is the actual one and - * not the one set a replication time + * Refreshing the age makes sure the value returned is the actual one and not the one set a + * replication time * @return refreshed age */ public long refreshAgeOfLastAppliedOp() { @@ -65,7 +63,6 @@ public long refreshAgeOfLastAppliedOp() { /** * Convience method to change metrics when a batch of operations are applied. - * * @param batchSize */ public void applyBatch(long batchSize) { @@ -75,7 +72,6 @@ public void applyBatch(long batchSize) { /** * Convience method to change metrics when a batch of operations are applied. - * * @param batchSize total number of mutations that are applied/replicated * @param hfileSize total number of hfiles that are applied/replicated */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index 3ab08065ca78..ac5e135fdfaa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import java.util.HashMap; @@ -53,16 +52,14 @@ public class MetricsSource implements BaseSource { /** * Constructor used to register the metrics - * * @param id Name of the source this class is monitoring */ public MetricsSource(String id) { this.id = id; - singleSourceSource = - CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class) - .getSource(id); + singleSourceSource = CompatibilitySingletonFactory + .getInstance(MetricsReplicationSourceFactory.class).getSource(id); globalSourceSource = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); + .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); singleSourceSourceByTable = new HashMap<>(); } @@ -73,8 +70,8 @@ public MetricsSource(String id) { * @param globalSourceSource Class to monitor global-scoped metrics */ public MetricsSource(String id, MetricsReplicationSourceSource singleSourceSource, - MetricsReplicationGlobalSourceSource globalSourceSource, - Map singleSourceSourceByTable) { + MetricsReplicationGlobalSourceSource globalSourceSource, + Map singleSourceSourceByTable) { this.id = id; this.singleSourceSource = singleSourceSource; this.globalSourceSource = globalSourceSource; @@ -96,7 +93,6 @@ public void setAgeOfLastShippedOp(long timestamp, String walGroup) { /** * Update the table level replication metrics per table - * * @param walEntries List of pairs of WAL entry and it's size */ public void updateTableLevelMetrics(List> walEntries) { @@ -109,9 +105,8 @@ public void updateTableLevelMetrics(List> walEntries) { // get the replication metrics source for table at the run time MetricsReplicationTableSource tableSource = this.getSingleSourceSourceByTable() - .computeIfAbsent(tableName, - t -> CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class) - .getTableSource(t)); + .computeIfAbsent(tableName, t -> CompatibilitySingletonFactory + .getInstance(MetricsReplicationSourceFactory.class).getTableSource(t)); tableSource.setLastShippedAge(age); tableSource.incrShippedBytes(entrySize); } @@ -124,10 +119,10 @@ public void updateTableLevelMetrics(List> walEntries) { */ public void setAgeOfLastShippedOpByTable(long timestamp, String tableName) { long age = EnvironmentEdgeManager.currentTime() - timestamp; - this.getSingleSourceSourceByTable().computeIfAbsent( - tableName, t -> CompatibilitySingletonFactory + this.getSingleSourceSourceByTable() + .computeIfAbsent(tableName, t -> CompatibilitySingletonFactory .getInstance(MetricsReplicationSourceFactory.class).getTableSource(t)) - .setLastShippedAge(age); + .setLastShippedAge(age); } /** @@ -186,7 +181,6 @@ public void decrSourceInitializing() { /** * Add on the the number of log edits read - * * @param delta the number of log edits read. */ private void incrLogEditsRead(long delta) { @@ -201,7 +195,6 @@ public void incrLogEditsRead() { /** * Add on the number of log edits filtered - * * @param delta the number filtered. */ public void incrLogEditsFiltered(long delta) { @@ -216,7 +209,6 @@ public void incrLogEditsFiltered() { /** * Convience method to apply changes to metrics do to shipping a batch of logs. - * * @param batchSize the size of the batch that was shipped to sinks. */ public void shipBatch(long batchSize, int sizeInBytes) { @@ -234,7 +226,7 @@ public void shipBatch(long batchSize, int sizeInBytes) { * Gets the number of edits not eligible for replication this source queue logs so far. * @return logEditsFiltered non-replicable edits filtered from this queue logs. */ - public long getEditsFiltered(){ + public long getEditsFiltered() { return this.singleSourceSource.getEditsFiltered(); } @@ -242,7 +234,7 @@ public long getEditsFiltered(){ * Gets the number of edits eligible for replication read from this source queue logs so far. * @return replicableEdits total number of replicable edits read from this queue logs. */ - public long getReplicableEdits(){ + public long getReplicableEdits() { return this.singleSourceSource.getWALEditsRead() - this.singleSourceSource.getEditsFiltered(); } @@ -256,7 +248,6 @@ public long getOpsShipped() { /** * Convience method to apply changes to metrics do to shipping a batch of logs. - * * @param batchSize the size of the batch that was shipped to sinks. * @param hfiles total number of hfiles shipped to sinks. */ @@ -300,7 +291,6 @@ public int getSizeOfLogQueue() { return singleSourceSource.getSizeOfLogQueue(); } - /** * Get the value of uncleanlyClosedWAL counter * @return uncleanlyClosedWAL @@ -332,9 +322,9 @@ public long getTimeStampNextToReplicate() { } /** - * TimeStamp of next edit targeted for replication. Used for calculating lag, - * as if this timestamp is greater than timestamp of last shipped, it means there's - * at least one edit pending replication. + * TimeStamp of next edit targeted for replication. Used for calculating lag, as if this timestamp + * is greater than timestamp of last shipped, it means there's at least one edit pending + * replication. * @param timeStampNextToReplicate timestamp of next edit in the queue that should be replicated. */ public void setTimeStampNextToReplicate(long timeStampNextToReplicate) { @@ -342,9 +332,9 @@ public void setTimeStampNextToReplicate(long timeStampNextToReplicate) { } public long getReplicationDelay() { - if(getTimestampOfLastShippedOp()>=timeStampNextToReplicate){ + if (getTimestampOfLastShippedOp() >= timeStampNextToReplicate) { return 0; - }else{ + } else { return EnvironmentEdgeManager.currentTime() - timeStampNextToReplicate; } } @@ -420,8 +410,8 @@ public void incrFailedRecoveryQueue() { } /* - Sets the age of oldest log file just for source. - */ + * Sets the age of oldest log file just for source. + */ public void setOldestWalAge(long age) { singleSourceSource.setOldestWalAge(age); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java index efafd09bedce..f0bc869e34ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerActionListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,8 +28,10 @@ @InterfaceAudience.Private public interface PeerActionListener { - static final PeerActionListener DUMMY = new PeerActionListener() {}; + static final PeerActionListener DUMMY = new PeerActionListener() { + }; default void peerSyncReplicationStateChange(String peerId, SyncReplicationState from, - SyncReplicationState to, int stage) {} + SyncReplicationState to, int stage) { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java index 2fe3110d7972..dca3a496eb7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,5 +43,5 @@ void transitSyncReplicationPeerState(String peerId, int stage, HRegionServer rs) throws ReplicationException, IOException; void claimReplicationQueue(ServerName crashedServer, String queue) - throws ReplicationException, IOException; + throws ReplicationException, IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java index a50d74a448b3..c36fda8f6970 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -128,9 +128,9 @@ public void updatePeerConfig(String peerId) throws ReplicationException, IOExcep // disable it first and then enable it. PeerState newState = peers.refreshPeerState(peerId); // RS need to start work with the new replication config change - if (!ReplicationUtils.isNamespacesAndTableCFsEqual(oldConfig, newConfig) || - oldConfig.isSerial() != newConfig.isSerial() || - (oldState.equals(PeerState.ENABLED) && newState.equals(PeerState.DISABLED))) { + if (!ReplicationUtils.isNamespacesAndTableCFsEqual(oldConfig, newConfig) + || oldConfig.isSerial() != newConfig.isSerial() + || (oldState.equals(PeerState.ENABLED) && newState.equals(PeerState.DISABLED))) { replicationSourceManager.refreshSources(peerId); } success = true; @@ -160,8 +160,9 @@ public void transitSyncReplicationPeerState(String peerId, int stage, HRegionSer SyncReplicationState newSyncReplicationState = peer.getNewSyncReplicationState(); if (stage == 0) { if (newSyncReplicationState != SyncReplicationState.NONE) { - LOG.warn("The new sync replication state for peer {} has already been set to {}, " + - "this should be a retry, give up", peerId, newSyncReplicationState); + LOG.warn("The new sync replication state for peer {} has already been set to {}, " + + "this should be a retry, give up", + peerId, newSyncReplicationState); return; } // refresh the peer state first, as when we transit to STANDBY, we may need to disable the @@ -186,8 +187,8 @@ public void transitSyncReplicationPeerState(String peerId, int stage, HRegionSer } else { if (newSyncReplicationState == SyncReplicationState.NONE) { LOG.warn( - "The new sync replication state for peer {} has already been clear, and the " + - "current state is {}, this should be a retry, give up", + "The new sync replication state for peer {} has already been clear, and the " + + "current state is {}, this should be a retry, give up", peerId, newSyncReplicationState); return; } @@ -210,7 +211,7 @@ public void transitSyncReplicationPeerState(String peerId, int stage, HRegionSer // reset the interrupted flag Thread.currentThread().interrupt(); throw (IOException) new InterruptedIOException( - "Interrupted while waiting for wal roll finish").initCause(e); + "Interrupted while waiting for wal roll finish").initCause(e); } } SyncReplicationState oldState = peer.getSyncReplicationState(); @@ -225,7 +226,7 @@ public void transitSyncReplicationPeerState(String peerId, int stage, HRegionSer @Override public void claimReplicationQueue(ServerName crashedServer, String queue) - throws ReplicationException, IOException { + throws ReplicationException, IOException { replicationSourceManager.claimQueue(crashedServer, queue); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java index 526c3e3ec16d..098c9ce8314f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java @@ -36,8 +36,8 @@ import org.slf4j.LoggerFactory; /** - * Class that handles the recovered source of a replication stream, which is transfered from - * another dead region server. This will be closed when all logs are pushed to peer cluster. + * Class that handles the recovered source of a replication stream, which is transfered from another + * dead region server. This will be closed when all logs are pushed to peer cluster. */ @InterfaceAudience.Private public class RecoveredReplicationSource extends ReplicationSource { @@ -65,7 +65,7 @@ public void locateRecoveredPaths(String walGroupId) throws IOException { boolean hasPathChanged = false; PriorityBlockingQueue queue = logQueue.getQueue(walGroupId); PriorityBlockingQueue newPaths = new PriorityBlockingQueue(queueSizePerGroup, - new AbstractFSWALProvider.WALStartTimeComparator()); + new AbstractFSWALProvider.WALStartTimeComparator()); pathsLoop: for (Path path : queue) { if (fs.exists(path)) { // still in same location, don't need to do anything newPaths.add(path); @@ -86,9 +86,8 @@ public void locateRecoveredPaths(String walGroupId) throws IOException { LOG.info("NB dead servers : " + deadRegionServers.size()); final Path walDir = CommonFSUtils.getWALRootDir(conf); for (ServerName curDeadServerName : deadRegionServers) { - final Path deadRsDirectory = - new Path(walDir, AbstractFSWALProvider.getWALDirectoryName(curDeadServerName - .getServerName())); + final Path deadRsDirectory = new Path(walDir, + AbstractFSWALProvider.getWALDirectoryName(curDeadServerName.getServerName())); Path[] locs = new Path[] { new Path(deadRsDirectory, path.getName()), new Path( deadRsDirectory.suffix(AbstractFSWALProvider.SPLITTING_EXT), path.getName()) }; for (Path possibleLogLocation : locs) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java index a9c1fa4a423f..9872a600a77c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,7 @@ import org.slf4j.LoggerFactory; /** - * Used by a {@link RecoveredReplicationSource}. + * Used by a {@link RecoveredReplicationSource}. */ @InterfaceAudience.Private public class RecoveredReplicationSourceShipper extends ReplicationSourceShipper { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java index 0c07b1125b9e..094a61dcdd1f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java index 9ad0af2286e4..517301a8eb99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectReplicationRequestStateChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.replication.regionserver; import java.util.function.BiPredicate; - import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectRequestsFromClientStateChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectRequestsFromClientStateChecker.java index 8e68f0fe3ed0..5dbaea3575d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectRequestsFromClientStateChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RejectRequestsFromClientStateChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ public class RejectRequestsFromClientStateChecker implements BiPredicate { private static final RejectRequestsFromClientStateChecker INST = - new RejectRequestsFromClientStateChecker(); + new RejectRequestsFromClientStateChecker(); @Override public boolean test(SyncReplicationState state, SyncReplicationState newState) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java index fa4167b16789..65289b6262c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -85,7 +85,7 @@ protected void doCall() throws Exception { @Override protected void initParameter(byte[] parameter) throws InvalidProtocolBufferException { ReplaySyncReplicationWALParameter param = - ReplaySyncReplicationWALParameter.parseFrom(parameter); + ReplaySyncReplicationWALParameter.parseFrom(parameter); this.peerId = param.getPeerId(); param.getWalList().forEach(this.wals::add); this.batchSize = rs.getConfiguration().getLong(REPLAY_SYNC_REPLICATION_WAL_BATCH_SIZE, @@ -104,9 +104,9 @@ private void replayWAL(String wal) throws IOException { Pair pair = ReplicationProtobufUtil .buildReplicateWALEntryRequest(entries.toArray(new Entry[entries.size()])); ReplicateWALEntryRequest request = pair.getFirst(); - rs.getReplicationSinkService().replicateLogEntries(request.getEntryList(), - pair.getSecond(), request.getReplicationClusterId(), - request.getSourceBaseNamespaceDirPath(), request.getSourceHFileArchiveDirPath()); + rs.getReplicationSinkService().replicateLogEntries(request.getEntryList(), pair.getSecond(), + request.getReplicationClusterId(), request.getSourceBaseNamespaceDirPath(), + request.getSourceHFileArchiveDirPath()); // Read next entries. entries = readWALEntries(reader); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 4cf2b495fa1a..ce97b41917c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -53,8 +53,7 @@ */ @InterfaceAudience.Private public class Replication implements ReplicationSourceService { - private static final Logger LOG = - LoggerFactory.getLogger(Replication.class); + private static final Logger LOG = LoggerFactory.getLogger(Replication.class); private boolean isReplicationForBulkLoadDataEnabled; private ReplicationSourceManager replicationManager; private ReplicationQueueStorage queueStorage; @@ -82,13 +81,13 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir this.server = server; this.conf = this.server.getConfiguration(); this.isReplicationForBulkLoadDataEnabled = - ReplicationUtils.isReplicationForBulkLoadDataEnabled(this.conf); + ReplicationUtils.isReplicationForBulkLoadDataEnabled(this.conf); if (this.isReplicationForBulkLoadDataEnabled) { if (conf.get(HConstants.REPLICATION_CLUSTER_ID) == null || conf.get(HConstants.REPLICATION_CLUSTER_ID).isEmpty()) { - throw new IllegalArgumentException(HConstants.REPLICATION_CLUSTER_ID - + " cannot be null/empty when " + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY - + " is set to true."); + throw new IllegalArgumentException( + HConstants.REPLICATION_CLUSTER_ID + " cannot be null/empty when " + + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY + " is set to true."); } } @@ -111,15 +110,15 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir this.globalMetricsSource = CompatibilitySingletonFactory .getInstance(MetricsReplicationSourceFactory.class).getGlobalSource(); this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, conf, - this.server, fs, logDir, oldLogDir, clusterId, walFactory, mapping, globalMetricsSource); + this.server, fs, logDir, oldLogDir, clusterId, walFactory, mapping, globalMetricsSource); this.syncReplicationPeerInfoProvider = new SyncReplicationPeerInfoProviderImpl(replicationPeers, mapping); PeerActionListener peerActionListener = PeerActionListener.DUMMY; // Get the user-space WAL provider - WALProvider walProvider = walFactory != null? walFactory.getWALProvider(): null; + WALProvider walProvider = walFactory != null ? walFactory.getWALProvider() : null; if (walProvider != null) { walProvider - .addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager)); + .addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager)); if (walProvider instanceof SyncReplicationWALProvider) { SyncReplicationWALProvider syncWALProvider = (SyncReplicationWALProvider) walProvider; peerActionListener = syncWALProvider; @@ -136,12 +135,11 @@ public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir p.getSyncReplicationState(), p.getNewSyncReplicationState(), 0)); } } - this.statsPeriodInSecond = - this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); + this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); this.replicationLoad = new ReplicationLoad(); this.peerProcedureHandler = - new PeerProcedureHandlerImpl(replicationManager, peerActionListener); + new PeerProcedureHandlerImpl(replicationManager, peerActionListener); } @Override @@ -158,15 +156,14 @@ public void stopReplicationService() { } /** - * If replication is enabled and this cluster is a master, - * it starts + * If replication is enabled and this cluster is a master, it starts */ @Override public void startReplicationService() throws IOException { this.replicationManager.init(); - this.server.getChoreService().scheduleChore( - new ReplicationStatisticsChore("ReplicationSourceStatistics", server, - (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); + this.server.getChoreService() + .scheduleChore(new ReplicationStatisticsChore("ReplicationSourceStatistics", server, + (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond))); LOG.info("{} started", this.server.toString()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java index 6fb21dcfbcc0..f3a426799ff2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java @@ -7,23 +7,20 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.replication.regionserver; +import java.util.ArrayList; import java.util.Date; import java.util.List; -import java.util.ArrayList; - import org.apache.hadoop.hbase.util.Strings; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; @@ -57,7 +54,7 @@ public void buildReplicationLoad(final List sources, if (sinkMetrics != null) { // build the SinkLoad ClusterStatusProtos.ReplicationLoadSink.Builder rLoadSinkBuild = - ClusterStatusProtos.ReplicationLoadSink.newBuilder(); + ClusterStatusProtos.ReplicationLoadSink.newBuilder(); rLoadSinkBuild.setAgeOfLastAppliedOp(sinkMetrics.getAgeOfLastAppliedOp()); rLoadSinkBuild.setTimeStampsOfLastAppliedOp(sinkMetrics.getTimestampOfLastAppliedOp()); rLoadSinkBuild.setTimestampStarted(sinkMetrics.getStartTimestamp()); @@ -90,12 +87,12 @@ public void buildReplicationLoad(final List sources, rLoadSourceBuild.setTimeStampOfNextToReplicate(timeStampOfNextToReplicate); rLoadSourceBuild.setEditsRead(editsRead); rLoadSourceBuild.setOPsShipped(oPsShipped); - if (source instanceof ReplicationSource){ - ReplicationSource replSource = (ReplicationSource)source; + if (source instanceof ReplicationSource) { + ReplicationSource replSource = (ReplicationSource) source; rLoadSourceBuild.setRecovered(replSource.getReplicationQueueInfo().isQueueRecovered()); rLoadSourceBuild.setQueueId(replSource.getReplicationQueueInfo().getQueueId()); rLoadSourceBuild.setRunning(replSource.isWorkerRunning()); - rLoadSourceBuild.setEditsSinceRestart(timeStampOfNextToReplicate>0); + rLoadSourceBuild.setEditsSinceRestart(timeStampOfNextToReplicate > 0); } this.replicationLoadSourceEntries.add(rLoadSourceBuild.build()); @@ -109,15 +106,13 @@ public void buildReplicationLoad(final List sources, public String sourceToString() { StringBuilder sb = new StringBuilder(); - for (ClusterStatusProtos.ReplicationLoadSource rls : - this.replicationLoadSourceEntries) { + for (ClusterStatusProtos.ReplicationLoadSource rls : this.replicationLoadSourceEntries) { sb = Strings.appendKeyValue(sb, "\n PeerID", rls.getPeerID()); sb = Strings.appendKeyValue(sb, "AgeOfLastShippedOp", rls.getAgeOfLastShippedOp()); sb = Strings.appendKeyValue(sb, "SizeOfLogQueue", rls.getSizeOfLogQueue()); - sb = - Strings.appendKeyValue(sb, "TimestampsOfLastShippedOp", - (new Date(rls.getTimeStampOfLastShippedOp()).toString())); + sb = Strings.appendKeyValue(sb, "TimestampsOfLastShippedOp", + (new Date(rls.getTimeStampOfLastShippedOp()).toString())); sb = Strings.appendKeyValue(sb, "Replication Lag", rls.getReplicationLag()); } @@ -132,12 +127,10 @@ public String sinkToString() { if (this.replicationLoadSink == null) return null; StringBuilder sb = new StringBuilder(); - sb = - Strings.appendKeyValue(sb, "AgeOfLastAppliedOp", - this.replicationLoadSink.getAgeOfLastAppliedOp()); - sb = - Strings.appendKeyValue(sb, "TimestampsOfLastAppliedOp", - (new Date(this.replicationLoadSink.getTimeStampsOfLastAppliedOp()).toString())); + sb = Strings.appendKeyValue(sb, "AgeOfLastAppliedOp", + this.replicationLoadSink.getAgeOfLastAppliedOp()); + sb = Strings.appendKeyValue(sb, "TimestampsOfLastAppliedOp", + (new Date(this.replicationLoadSink.getTimeStampsOfLastAppliedOp()).toString())); return sb.toString(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java index b7e437f46241..db9205aba2d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; import java.util.List; import java.util.Optional; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; @@ -53,14 +50,14 @@ public Optional getRegionObserver() { } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", - justification="NPE should never happen; if it does it is a bigger issue") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", + justification = "NPE should never happen; if it does it is a bigger issue") public void preCommitStoreFile(final ObserverContext ctx, final byte[] family, final List> pairs) throws IOException { RegionCoprocessorEnvironment env = ctx.getEnvironment(); Configuration c = env.getConfiguration(); - if (pairs == null || pairs.isEmpty() || - !c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + if (pairs == null || pairs.isEmpty() + || !c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) { LOG.debug("Skipping recording bulk load entries in preCommitStoreFile for bulkloaded " + "data replication."); @@ -69,8 +66,8 @@ public void preCommitStoreFile(final ObserverContext - * This class is responsible for replicating the edits coming - * from another cluster. - *

          - * This replication process is currently waiting for the edits to be applied - * before the method can return. This means that the replication of edits - * is synchronized (after reading from WALs in ReplicationSource) and that a - * single region server cannot receive edits from two sources at the same time - *

          + * This class is responsible for replicating the edits coming from another cluster. + *

          + *

          + * This replication process is currently waiting for the edits to be applied before the method can + * return. This means that the replication of edits is synchronized (after reading from WALs in + * ReplicationSource) and that a single region server cannot receive edits from two sources at the + * same time + *

          + *

          * This class uses the native HBase client in order to replicate entries. *

          - * * TODO make this class more like ReplicationSource wrt log handling */ @InterfaceAudience.Private @@ -104,11 +106,10 @@ public class ReplicationSink { * @param conf conf object * @throws IOException thrown when HDFS goes bad or bad file name */ - public ReplicationSink(Configuration conf) - throws IOException { + public ReplicationSink(Configuration conf) throws IOException { this.conf = HBaseConfiguration.create(conf); - rowSizeWarnThreshold = conf.getInt( - HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); + rowSizeWarnThreshold = + conf.getInt(HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); decorateConf(); this.metrics = new MetricsSink(); this.walEntrySinkFilter = setupWALEntrySinkFilter(); @@ -129,8 +130,8 @@ private WALEntrySinkFilter setupWALEntrySinkFilter() throws IOException { this.conf.getClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY, null); WALEntrySinkFilter filter = null; try { - filter = walEntryFilterClass == null? null: - (WALEntrySinkFilter)walEntryFilterClass.getDeclaredConstructor().newInstance(); + filter = walEntryFilterClass == null ? null + : (WALEntrySinkFilter) walEntryFilterClass.getDeclaredConstructor().newInstance(); } catch (Exception e) { LOG.warn("Failed to instantiate " + walEntryFilterClass); } @@ -141,14 +142,14 @@ private WALEntrySinkFilter setupWALEntrySinkFilter() throws IOException { } /** - * decorate the Configuration object to make replication more receptive to delays: - * lessen the timeout and numTries. + * decorate the Configuration object to make replication more receptive to delays: lessen the + * timeout and numTries. */ private void decorateConf() { this.conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - this.conf.getInt("replication.sink.client.retries.number", 4)); + this.conf.getInt("replication.sink.client.retries.number", 4)); this.conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - this.conf.getInt("replication.sink.client.ops.timeout", 10000)); + this.conf.getInt("replication.sink.client.ops.timeout", 10000)); String replicationCodec = this.conf.get(HConstants.REPLICATION_CODEC_CONF_KEY); if (StringUtils.isNotEmpty(replicationCodec)) { this.conf.set(HConstants.RPC_CODEC_CONF_KEY, replicationCodec); @@ -211,24 +212,23 @@ public void replicateEntries(List entries, final CellScanner cells, // Handle bulk load hfiles replication if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) { BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cell); - if(bld.getReplicate()) { + if (bld.getReplicate()) { if (bulkLoadsPerClusters == null) { bulkLoadsPerClusters = new HashMap<>(); } // Map of table name Vs list of pair of family and list of // hfile paths from its namespace - Map>>> bulkLoadHFileMap = - bulkLoadsPerClusters.computeIfAbsent(bld.getClusterIdsList(), k -> new HashMap<>()); + Map>>> bulkLoadHFileMap = bulkLoadsPerClusters + .computeIfAbsent(bld.getClusterIdsList(), k -> new HashMap<>()); buildBulkLoadHFileMap(bulkLoadHFileMap, table, bld); } } else { // Handle wal replication if (isNewRowOrType(previousCell, cell)) { // Create new mutation - mutation = - CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength()); + mutation = CellUtil.isDelete(cell) + ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); List clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size()); for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) { clusterIds.add(toUUID(clusterId)); @@ -258,9 +258,9 @@ public void replicateEntries(List entries, final CellScanner cells, LOG.debug("Finished replicating mutations."); } - if(bulkLoadsPerClusters != null) { - for (Entry, Map>>>> entry : - bulkLoadsPerClusters.entrySet()) { + if (bulkLoadsPerClusters != null) { + for (Entry, Map>>>> entry : bulkLoadsPerClusters + .entrySet()) { Map>>> bulkLoadHFileMap = entry.getValue(); if (bulkLoadHFileMap != null && !bulkLoadHFileMap.isEmpty()) { LOG.debug("Replicating {} bulk loaded data", entry.getKey().toString()); @@ -304,7 +304,7 @@ private void buildBulkLoadHFileMap( List>> familyHFilePathsList = bulkLoadHFileMap.get(tableName); if (familyHFilePathsList != null) { boolean foundFamily = false; - for (Pair> familyHFilePathsPair : familyHFilePathsList) { + for (Pair> familyHFilePathsPair : familyHFilePathsList) { if (Bytes.equals(familyHFilePathsPair.getFirst(), family)) { // Found family already present, just add the path to the existing list familyHFilePathsPair.getSecond().add(pathToHfileFromNS); @@ -354,8 +354,8 @@ private String getHFilePath(TableName table, BulkLoadDescriptor bld, String stor * @return True if we have crossed over onto a new row or type */ private boolean isNewRowOrType(final Cell previousCell, final Cell cell) { - return previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() || - !CellUtil.matchingRows(previousCell, cell); + return previousCell == null || previousCell.getTypeByte() != cell.getTypeByte() + || !CellUtil.matchingRows(previousCell, cell); } private java.util.UUID toUUID(final HBaseProtos.UUID uuid) { @@ -363,12 +363,11 @@ private java.util.UUID toUUID(final HBaseProtos.UUID uuid) { } /** - * Simple helper to a map from key to (a list of) values - * TODO: Make a general utility method + * Simple helper to a map from key to (a list of) values TODO: Make a general utility method * @return the list of values corresponding to key1 and key2 */ - private List addToHashMultiMap(Map>> map, K1 key1, - K2 key2, V value) { + private List addToHashMultiMap(Map>> map, K1 key1, K2 key2, + V value) { Map> innerMap = map.computeIfAbsent(key1, k -> new HashMap<>()); List values = innerMap.computeIfAbsent(key2, k -> new ArrayList<>()); values.add(value); @@ -393,7 +392,6 @@ public void stopReplicationSinkServices() { } } - /** * Do the changes and handle the pool * @param tableName table to insert into @@ -446,14 +444,14 @@ private AsyncClusterConnection getConnection() throws IOException { /** * Get a string representation of this sink's metrics - * @return string with the total replicated edits count and the date - * of the last edit that was applied + * @return string with the total replicated edits count and the date of the last edit that was + * applied */ public String getStats() { long total = this.totalReplicatedEdits.get(); return total == 0 ? "" - : "Sink: " + "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp() + - ", total replicated edits: " + total; + : "Sink: " + "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp() + + ", total replicated edits: " + total; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 236e109946e0..321f525bd2d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -70,14 +70,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Class that handles the source of a replication stream. - * Currently does not handle more than 1 slave cluster. - * For each slave cluster it selects a random number of peers - * using a replication ratio. For example, if replication ration = 0.1 - * and slave cluster has 100 region servers, 10 will be selected. + * Class that handles the source of a replication stream. Currently does not handle more than 1 + * slave cluster. For each slave cluster it selects a random number of peers using a replication + * ratio. For example, if replication ration = 0.1 and slave cluster has 100 region servers, 10 will + * be selected. *

          - * A stream is considered down when we cannot contact a region server on the - * peer cluster for more than 55 seconds by default. + * A stream is considered down when we cannot contact a region server on the peer cluster for more + * than 55 seconds by default. *

          */ @InterfaceAudience.Private @@ -116,12 +115,12 @@ public class ReplicationSource implements ReplicationSourceInterface { private volatile ReplicationEndpoint replicationEndpoint; private boolean abortOnError; - //This is needed for the startup loop to identify when there's already - //an initialization happening (but not finished yet), - //so that it doesn't try submit another initialize thread. - //NOTE: this should only be set to false at the end of initialize method, prior to return. + // This is needed for the startup loop to identify when there's already + // an initialization happening (but not finished yet), + // so that it doesn't try submit another initialize thread. + // NOTE: this should only be set to false at the end of initialize method, prior to return. private AtomicBoolean startupOngoing = new AtomicBoolean(false); - //Flag that signalizes uncaught error happening while starting up the source + // Flag that signalizes uncaught error happening while starting up the source // and a retry should be attempted private AtomicBoolean retryStartup = new AtomicBoolean(false); @@ -141,24 +140,24 @@ public class ReplicationSource implements ReplicationSourceInterface { private AtomicLong totalBufferUsed; public static final String WAIT_ON_ENDPOINT_SECONDS = - "hbase.replication.wait.on.endpoint.seconds"; + "hbase.replication.wait.on.endpoint.seconds"; public static final int DEFAULT_WAIT_ON_ENDPOINT_SECONDS = 30; private int waitOnEndpointSeconds = -1; private Thread initThread; /** - * WALs to replicate. - * Predicate that returns 'true' for WALs to replicate and false for WALs to skip. + * WALs to replicate. Predicate that returns 'true' for WALs to replicate and false for WALs to + * skip. */ private final Predicate filterInWALs; /** - * Base WALEntry filters for this class. Unmodifiable. Set on construction. - * Filters *out* edits we do not want replicated, passed on to replication endpoints. - * This is the basic set. Down in #initializeWALEntryFilter this set is added to the end of - * the WALEntry filter chain. These are put after those that we pick up from the configured - * endpoints and other machinations to create the final {@link #walEntryFilter}. + * Base WALEntry filters for this class. Unmodifiable. Set on construction. Filters *out* edits we + * do not want replicated, passed on to replication endpoints. This is the basic set. Down in + * #initializeWALEntryFilter this set is added to the end of the WALEntry filter chain. These are + * put after those that we pick up from the configured endpoints and other machinations to create + * the final {@link #walEntryFilter}. * @see WALEntryFilter */ private final List baseFilterOutWALEntries; @@ -166,14 +165,14 @@ public class ReplicationSource implements ReplicationSourceInterface { ReplicationSource() { // Default, filters *in* all WALs but meta WALs & filters *out* all WALEntries of System Tables. this(p -> !AbstractFSWALProvider.isMetaFile(p), - Lists.newArrayList(new SystemTableWALEntryFilter())); + Lists.newArrayList(new SystemTableWALEntryFilter())); } /** - * @param replicateWAL Pass a filter to run against WAL Path; filter *in* WALs to Replicate; - * i.e. return 'true' if you want to replicate the content of the WAL. + * @param replicateWAL Pass a filter to run against WAL Path; filter *in* WALs to Replicate; i.e. + * return 'true' if you want to replicate the content of the WAL. * @param baseFilterOutWALEntries Base set of filters you want applied always; filters *out* - * WALEntries so they never make it out of this ReplicationSource. + * WALEntries so they never make it out of this ReplicationSource. */ ReplicationSource(Predicate replicateWAL, List baseFilterOutWALEntries) { this.filterInWALs = replicateWAL; @@ -198,12 +197,16 @@ public void init(Configuration conf, FileSystem fs, ReplicationSourceManager man this.server = server; this.conf = HBaseConfiguration.create(conf); this.waitOnEndpointSeconds = - this.conf.getInt(WAIT_ON_ENDPOINT_SECONDS, DEFAULT_WAIT_ON_ENDPOINT_SECONDS); + this.conf.getInt(WAIT_ON_ENDPOINT_SECONDS, DEFAULT_WAIT_ON_ENDPOINT_SECONDS); decorateConf(); - this.sleepForRetries = - this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second - this.maxRetriesMultiplier = - this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per + this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); // 1 + // second + this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 + // minutes + // @ + // 1 + // sec + // per this.queueSizePerGroup = this.conf.getInt("hbase.regionserver.maxlogs", 32); this.logQueue = new ReplicationSourceLogQueue(conf, metrics, this); this.queueStorage = queueStorage; @@ -223,8 +226,7 @@ public void init(Configuration conf, FileSystem fs, ReplicationSourceManager man this.totalBufferUsed = manager.getTotalBufferUsed(); this.walFileLengthProvider = walFileLengthProvider; - this.abortOnError = this.conf.getBoolean("replication.source.regionserver.abort", - true); + this.abortOnError = this.conf.getBoolean("replication.source.regionserver.abort", true); LOG.info("queueId={}, ReplicationSource: {}, currentBandwidth={}", queueId, replicationPeer.getId(), this.currentBandwidth); @@ -294,16 +296,14 @@ private ReplicationEndpoint createReplicationEndpoint() } else { try { replicationEndpoint = Class.forName(replicationEndpointImpl) - .asSubclass(ReplicationEndpoint.class) - .getDeclaredConstructor() - .newInstance(); + .asSubclass(ReplicationEndpoint.class).getDeclaredConstructor().newInstance(); } catch (NoSuchMethodException | InvocationTargetException e) { throw new IllegalArgumentException(e); } } if (rsServerHost != null) { ReplicationEndpoint newReplicationEndPoint = - rsServerHost.postCreateReplicationEndPoint(replicationEndpoint); + rsServerHost.postCreateReplicationEndPoint(replicationEndpoint); if (newReplicationEndPoint != null) { // Override the newly created endpoint from the hook with configured end point replicationEndpoint = newReplicationEndPoint; @@ -318,9 +318,9 @@ private void initAndStartReplicationEndpoint(ReplicationEndpoint replicationEndp if (server instanceof HRegionServer) { tableDescriptors = ((HRegionServer) server).getTableDescriptors(); } - replicationEndpoint - .init(new ReplicationEndpoint.Context(server, conf, replicationPeer.getConfiguration(), fs, - replicationPeer.getId(), clusterId, replicationPeer, metrics, tableDescriptors, server)); + replicationEndpoint.init( + new ReplicationEndpoint.Context(server, conf, replicationPeer.getConfiguration(), fs, + replicationPeer.getId(), clusterId, replicationPeer, metrics, tableDescriptors, server)); replicationEndpoint.start(); replicationEndpoint.awaitRunning(waitOnEndpointSeconds, TimeUnit.SECONDS); } @@ -347,11 +347,11 @@ private void tryStartNewShipper(String walGroupId) { ReplicationSourceWALReader walReader = createNewWALReader(walGroupId, worker.getStartPosition()); Threads.setDaemonThreadRunning( - walReader, Thread.currentThread().getName() - + ".replicationSource.wal-reader." + walGroupId + "," + queueId, - (t,e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); + walReader, Thread.currentThread().getName() + ".replicationSource.wal-reader." + + walGroupId + "," + queueId, + (t, e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); worker.setWALReader(walReader); - worker.startup((t,e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); + worker.startup((t, e) -> this.uncaughtException(t, e, this.manager, this.getPeerId())); return worker; } }); @@ -380,13 +380,9 @@ public Map getWalGroupStatus() { LOG.warn("{} No replication ongoing, waiting for new log", logPeerId()); } ReplicationStatus.ReplicationStatusBuilder statusBuilder = ReplicationStatus.newBuilder(); - statusBuilder.withPeerId(this.getPeerId()) - .withQueueSize(queueSize) - .withWalGroup(walGroupId) - .withCurrentPath(currentPath) - .withCurrentPosition(shipper.getCurrentPosition()) - .withFileSize(fileSize) - .withAgeOfLastShippedOp(ageOfLastShippedOp) + statusBuilder.withPeerId(this.getPeerId()).withQueueSize(queueSize).withWalGroup(walGroupId) + .withCurrentPath(currentPath).withCurrentPosition(shipper.getCurrentPosition()) + .withFileSize(fileSize).withAgeOfLastShippedOp(ageOfLastShippedOp) .withReplicationDelay(replicationDelay); sourceReplicationStatus.put(this.getPeerId() + "=>" + walGroupId, statusBuilder.build()); } @@ -414,10 +410,10 @@ protected ReplicationSourceShipper createNewShipper(String walGroupId) { private ReplicationSourceWALReader createNewWALReader(String walGroupId, long startPosition) { return replicationPeer.getPeerConfig().isSerial() - ? new SerialReplicationSourceWALReader(fs, conf, logQueue, startPosition, walEntryFilter, - this, walGroupId) - : new ReplicationSourceWALReader(fs, conf, logQueue, startPosition, walEntryFilter, - this, walGroupId); + ? new SerialReplicationSourceWALReader(fs, conf, logQueue, startPosition, walEntryFilter, + this, walGroupId) + : new ReplicationSourceWALReader(fs, conf, logQueue, startPosition, walEntryFilter, this, + walGroupId); } /** @@ -428,15 +424,14 @@ WALEntryFilter getWalEntryFilter() { return walEntryFilter; } - protected final void uncaughtException(Thread t, Throwable e, - ReplicationSourceManager manager, String peerId) { + protected final void uncaughtException(Thread t, Throwable e, ReplicationSourceManager manager, + String peerId) { OOMEChecker.exitIfOOME(e, getClass().getSimpleName()); - LOG.error("Unexpected exception in {} currentPath={}", - t.getName(), getCurrentPath(), e); - if(abortOnError){ + LOG.error("Unexpected exception in {} currentPath={}", t.getName(), getCurrentPath(), e); + if (abortOnError) { server.abort("Unexpected exception in " + t.getName(), e); } - if(manager != null){ + if (manager != null) { while (true) { try { LOG.info("Refreshing replication sources now due to previous error on thread: {}", @@ -445,8 +440,7 @@ protected final void uncaughtException(Thread t, Throwable e, break; } catch (IOException e1) { LOG.error("Replication sources refresh failed.", e1); - sleepForRetries("Sleeping before try refreshing sources again", - maxRetriesMultiplier); + sleepForRetries("Sleeping before try refreshing sources again", maxRetriesMultiplier); } } } @@ -503,12 +497,12 @@ private long getCurrentBandwidth() { protected boolean sleepForRetries(String msg, int sleepMultiplier) { try { if (LOG.isTraceEnabled()) { - LOG.trace("{} {}, sleeping {} times {}", - logPeerId(), msg, sleepForRetries, sleepMultiplier); + LOG.trace("{} {}, sleeping {} times {}", logPeerId(), msg, sleepForRetries, + sleepMultiplier); } Thread.sleep(this.sleepForRetries * sleepMultiplier); } catch (InterruptedException e) { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("{} Interrupted while sleeping between retries", logPeerId()); } Thread.currentThread().interrupt(); @@ -559,7 +553,7 @@ private void initialize() { for (;;) { peerClusterId = replicationEndpoint.getPeerUUID(); if (this.isSourceActive() && peerClusterId == null) { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("{} Could not connect to Peer ZK. Sleeping for {} millis", logPeerId(), (this.sleepForRetries * sleepMultiplier)); } @@ -571,17 +565,16 @@ private void initialize() { } } - if(!this.isSourceActive()) { + if (!this.isSourceActive()) { retryStartup.set(!this.abortOnError); setSourceStartupStatus(false); throw new IllegalStateException("Source should be active."); } - LOG.info("{} queueId={} (queues={}) is replicating from cluster={} to cluster={}", - logPeerId(), this.replicationQueueInfo.getQueueId(), logQueue.getNumQueues(), clusterId, - peerClusterId); + LOG.info("{} queueId={} (queues={}) is replicating from cluster={} to cluster={}", logPeerId(), + this.replicationQueueInfo.getQueueId(), logQueue.getNumQueues(), clusterId, peerClusterId); initializeWALEntryFilter(peerClusterId); // Start workers - for (String walGroupId: logQueue.getQueues().keySet()) { + for (String walGroupId : logQueue.getQueues().keySet()) { tryStartNewShipper(walGroupId); } setSourceStartupStatus(false); @@ -605,22 +598,21 @@ public ReplicationSourceInterface startup() { setSourceStartupStatus(true); initThread = new Thread(this::initialize); Threads.setDaemonThreadRunning(initThread, - Thread.currentThread().getName() + ".replicationSource," + this.queueId, - (t,e) -> { - //if first initialization attempt failed, and abortOnError is false, we will - //keep looping in this thread until initialize eventually succeeds, - //while the server main startup one can go on with its work. + Thread.currentThread().getName() + ".replicationSource," + this.queueId, (t, e) -> { + // if first initialization attempt failed, and abortOnError is false, we will + // keep looping in this thread until initialize eventually succeeds, + // while the server main startup one can go on with its work. sourceRunning = false; uncaughtException(t, e, null, null); retryStartup.set(!this.abortOnError); do { - if(retryStartup.get()) { + if (retryStartup.get()) { this.sourceRunning = true; setSourceStartupStatus(true); retryStartup.set(false); try { initialize(); - } catch(Throwable error){ + } catch (Throwable error) { setSourceStartupStatus(false); uncaughtException(t, error, null, null); retryStartup.set(!this.abortOnError); @@ -646,13 +638,12 @@ public void terminate(String reason, Exception cause, boolean clearMetrics) { terminate(reason, cause, clearMetrics, true); } - public void terminate(String reason, Exception cause, boolean clearMetrics, - boolean join) { + public void terminate(String reason, Exception cause, boolean clearMetrics, boolean join) { if (cause == null) { LOG.info("{} Closing source {} because: {}", logPeerId(), this.queueId, reason); } else { - LOG.error(String.format("%s Closing source %s because an error occurred: %s", - logPeerId(), this.queueId, reason), cause); + LOG.error(String.format("%s Closing source %s because an error occurred: %s", logPeerId(), + this.queueId, reason), cause); } this.sourceRunning = false; if (initThread != null && Thread.currentThread() != initThread) { @@ -666,7 +657,7 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, for (ReplicationSourceShipper worker : workers) { worker.stopWorker(); - if(worker.entryReader != null) { + if (worker.entryReader != null) { worker.entryReader.setReaderRunning(false); } } @@ -694,8 +685,8 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, } } if (!server.isAborted() && !server.isStopped()) { - //If server is running and worker is already stopped but there was still entries batched, - //we need to clear buffer used for non processed entries + // If server is running and worker is already stopped but there was still entries batched, + // we need to clear buffer used for non processed entries worker.clearWALEntryBatch(); } } @@ -711,7 +702,8 @@ public void terminate(String reason, Exception cause, boolean clearMetrics, TimeUnit.MILLISECONDS); } catch (TimeoutException te) { LOG.warn("{} Got exception while waiting for endpoint to shutdown " - + "for replication source : {}", logPeerId(), this.queueId, te); + + "for replication source : {}", + logPeerId(), this.queueId, te); } } } @@ -748,9 +740,9 @@ public ReplicationQueueInfo getReplicationQueueInfo() { return replicationQueueInfo; } - public boolean isWorkerRunning(){ - for(ReplicationSourceShipper worker : this.workerThreads.values()){ - if(worker.isActive()){ + public boolean isWorkerRunning() { + for (ReplicationSourceShipper worker : this.workerThreads.values()) { + if (worker.isActive()) { return worker.isActive(); } } @@ -784,7 +776,7 @@ public MetricsSource getSourceMetrics() { } @Override - //offsets totalBufferUsed by deducting shipped batchSize. + // offsets totalBufferUsed by deducting shipped batchSize. public void postShipEdits(List entries, int batchSize) { if (throttler.isEnabled()) { throttler.addPushSize(batchSize); @@ -823,7 +815,7 @@ void removeWorker(ReplicationSourceShipper worker) { workerThreads.remove(worker.walGroupId, worker); } - public String logPeerId(){ + public String logPeerId() { return "peerId=" + this.getPeerId() + ","; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java index b05590279e9b..2943c9c5c31d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceFactory.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,14 +24,15 @@ import org.slf4j.LoggerFactory; /** - * Constructs a {@link ReplicationSourceInterface} - * Note, not used to create specialized ReplicationSources + * Constructs a {@link ReplicationSourceInterface} Note, not used to create specialized + * ReplicationSources */ @InterfaceAudience.Private public final class ReplicationSourceFactory { private static final Logger LOG = LoggerFactory.getLogger(ReplicationSourceFactory.class); - private ReplicationSourceFactory() {} + private ReplicationSourceFactory() { + } static ReplicationSourceInterface create(Configuration conf, String queueId) { ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(queueId); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java index 27e4b79c141b..0187d6ded58e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -105,7 +104,6 @@ void addHFileRefs(TableName tableName, byte[] family, List> pai /** * Get the queue id that the source is replicating to - * * @return queue id */ String getQueueId(); @@ -125,8 +123,7 @@ default String getPeerId() { ReplicationPeer getPeer(); /** - * Get a string representation of the current statistics - * for this source + * Get a string representation of the current statistics for this source * @return printable stats */ String getStats(); @@ -144,6 +141,7 @@ default boolean isPeerEnabled() { default boolean isSyncReplication() { return getPeer().getPeerConfig().isSyncReplication(); } + /** * @return active or not */ @@ -210,10 +208,10 @@ default boolean isRecovered() { ReplicationQueueStorage getReplicationQueueStorage(); /** - * Log the current position to storage. Also clean old logs from the replication queue. - * Use to bypass the default call to - * {@link ReplicationSourceManager#logPositionAndCleanOldLogs(ReplicationSourceInterface, - * WALEntryBatch)} whem implementation does not need to persist state to backing storage. + * Log the current position to storage. Also clean old logs from the replication queue. Use to + * bypass the default call to + * {@link ReplicationSourceManager#logPositionAndCleanOldLogs(ReplicationSourceInterface, WALEntryBatch)} + * whem implementation does not need to persist state to backing storage. * @param entryBatch the wal entry batch we just shipped * @return The instance of queueStorage used by this ReplicationSource. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java index 4d89edef5fdc..0f606a397854 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceLogQueue.java @@ -31,8 +31,8 @@ import org.slf4j.LoggerFactory; /* - Class that does enqueueing/dequeuing of wal at one place so that we can update the metrics - just at one place. + * Class that does enqueueing/dequeuing of wal at one place so that we can update the metrics just + * at one place. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -69,7 +69,7 @@ public boolean enqueueLog(Path wal, String walGroupId) { PriorityBlockingQueue queue = queues.get(walGroupId); if (queue == null) { queue = new PriorityBlockingQueue<>(queueSizePerGroup, - new AbstractFSWALProvider.WALStartTimeComparator()); + new AbstractFSWALProvider.WALStartTimeComparator()); // make sure that we do not use an empty queue when setting up a ReplicationSource, otherwise // the shipper may quit immediately queue.put(wal); @@ -85,9 +85,9 @@ public boolean enqueueLog(Path wal, String walGroupId) { // This will wal a warning for each new wal that gets created above the warn threshold int queueSize = queue.size(); if (queueSize > this.logQueueWarnThreshold) { - LOG.warn("{} WAL group {} queue size: {} exceeds value of " + - "replication.source.log.queue.warn {}", source.logPeerId(), walGroupId, queueSize, - logQueueWarnThreshold); + LOG.warn( + "{} WAL group {} queue size: {} exceeds value of " + "replication.source.log.queue.warn {}", + source.logPeerId(), walGroupId, queueSize, logQueueWarnThreshold); } return exists; } @@ -116,9 +116,8 @@ public Map> getQueues() { } /** - * Return queue for the given walGroupId - * Please don't add or remove elements from the returned queue. - * Use @enqueueLog and @remove methods respectively. + * Return queue for the given walGroupId Please don't add or remove elements from the returned + * queue. Use @enqueueLog and @remove methods respectively. * @param walGroupId walGroupId */ public PriorityBlockingQueue getQueue(String walGroupId) { @@ -156,7 +155,7 @@ public void clear(String walGroupId) { } /* - Returns the age of oldest wal. + * Returns the age of oldest wal. */ long getOldestWalAge() { long now = EnvironmentEdgeManager.currentTime(); @@ -171,8 +170,8 @@ long getOldestWalAge() { } /* - Get the oldest wal timestamp from all the queues. - */ + * Get the oldest wal timestamp from all the queues. + */ private long getOldestWalTimestamp() { long oldestWalTimestamp = Long.MAX_VALUE; for (Map.Entry> entry : queues.entrySet()) { @@ -180,8 +179,8 @@ private long getOldestWalTimestamp() { Path path = queue.peek(); // Can path ever be null ? if (path != null) { - oldestWalTimestamp = Math.min(oldestWalTimestamp, - AbstractFSWALProvider.WALStartTimeComparator.getTS(path)); + oldestWalTimestamp = + Math.min(oldestWalTimestamp, AbstractFSWALProvider.WALStartTimeComparator.getTS(path)); } } return oldestWalTimestamp; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index faa654dcf282..808abd362123 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -125,10 +125,9 @@ public class ReplicationSourceManager { private final List oldsources; /** - * Storage for queues that need persistance; e.g. Replication state so can be recovered - * after a crash. queueStorage upkeep is spread about this class and passed - * to ReplicationSource instances for these to do updates themselves. Not all ReplicationSource - * instances keep state. + * Storage for queues that need persistance; e.g. Replication state so can be recovered after a + * crash. queueStorage upkeep is spread about this class and passed to ReplicationSource instances + * for these to do updates themselves. Not all ReplicationSource instances keep state. */ private final ReplicationQueueStorage queueStorage; @@ -187,9 +186,8 @@ public class ReplicationSourceManager { * @param oldLogDir the directory where old logs are archived */ public ReplicationSourceManager(ReplicationQueueStorage queueStorage, - ReplicationPeers replicationPeers, Configuration conf, - Server server, FileSystem fs, Path logDir, Path oldLogDir, UUID clusterId, - WALFactory walFactory, + ReplicationPeers replicationPeers, Configuration conf, Server server, FileSystem fs, + Path logDir, Path oldLogDir, UUID clusterId, WALFactory walFactory, SyncReplicationPeerMappingManager syncReplicationPeerMappingManager, MetricsReplicationGlobalSourceSource globalMetrics) throws IOException { this.sources = new ConcurrentHashMap<>(); @@ -213,8 +211,8 @@ public ReplicationSourceManager(ReplicationQueueStorage queueStorage, int nbWorkers = conf.getInt("replication.executor.workers", 1); // use a short 100ms sleep since this could be done inline with a RS startup // even if we fail, other region servers can take care of it - this.executor = new ThreadPoolExecutor(nbWorkers, nbWorkers, 100, - TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()); + this.executor = new ThreadPoolExecutor(nbWorkers, nbWorkers, 100, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>()); ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); tfb.setNameFormat("ReplicationExecutor-%d"); tfb.setDaemon(true); @@ -224,9 +222,9 @@ public ReplicationSourceManager(ReplicationQueueStorage queueStorage, HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); this.sleepForRetries = this.conf.getLong("replication.source.sync.sleepforretries", 1000); this.maxRetriesMultiplier = - this.conf.getInt("replication.source.sync.maxretriesmultiplier", 60); + this.conf.getInt("replication.source.sync.maxretriesmultiplier", 60); this.totalBufferLimit = conf.getLong(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY, - HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT); + HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT); this.globalMetrics = globalMetrics; } @@ -327,9 +325,9 @@ private ReplicationSourceInterface createSource(String queueId, ReplicationPeer // Init the just created replication source. Pass the default walProvider's wal file length // provider. Presumption is we replicate user-space Tables only. For hbase:meta region replica // replication, see #createCatalogReplicationSource(). - WALFileLengthProvider walFileLengthProvider = - this.walFactory.getWALProvider() != null? - this.walFactory.getWALProvider().getWALFileLengthProvider() : p -> OptionalLong.empty(); + WALFileLengthProvider walFileLengthProvider = this.walFactory.getWALProvider() != null + ? this.walFactory.getWALProvider().getWALFileLengthProvider() + : p -> OptionalLong.empty(); src.init(conf, fs, this, queueStorage, replicationPeer, server, queueId, clusterId, walFileLengthProvider, new MetricsSource(queueId)); return src; @@ -345,7 +343,7 @@ private ReplicationSourceInterface createSource(String queueId, ReplicationPeer void addSource(String peerId) throws IOException { ReplicationPeer peer = replicationPeers.getPeer(peerId); if (ReplicationUtils.LEGACY_REGION_REPLICATION_ENDPOINT_NAME - .equals(peer.getPeerConfig().getReplicationEndpointImpl())) { + .equals(peer.getPeerConfig().getReplicationEndpointImpl())) { // we do not use this endpoint for region replication any more, see HBASE-26233 LOG.info("Legacy region replication peer found, skip adding: {}", peer.getPeerConfig()); return; @@ -394,8 +392,8 @@ void addSource(String peerId) throws IOException { * @param peerId the id of the sync replication peer */ public void drainSources(String peerId) throws IOException, ReplicationException { - String terminateMessage = "Sync replication peer " + peerId + - " is transiting to STANDBY. Will close the previous replication source and open a new one"; + String terminateMessage = "Sync replication peer " + peerId + + " is transiting to STANDBY. Will close the previous replication source and open a new one"; ReplicationPeer peer = replicationPeers.getPeer(peerId); assert peer.getPeerConfig().isSyncReplication(); ReplicationSourceInterface src = createSource(peerId, peer); @@ -458,8 +456,8 @@ public void drainSources(String peerId) throws IOException, ReplicationException * @param peerId the id of the replication peer */ public void refreshSources(String peerId) throws IOException { - String terminateMessage = "Peer " + peerId + - " state or config changed. Will close the previous replication source and open a new one"; + String terminateMessage = "Peer " + peerId + + " state or config changed. Will close the previous replication source and open a new one"; ReplicationPeer peer = replicationPeers.getPeer(peerId); ReplicationSourceInterface src = createSource(peerId, peer); // synchronized on latestPaths to avoid missing the new log @@ -564,15 +562,15 @@ private void interruptOrAbortWhenFail(ReplicationQueueOperation op) { op.exec(); } catch (ReplicationException e) { if (e.getCause() != null && e.getCause() instanceof KeeperException.SystemErrorException - && e.getCause().getCause() != null && e.getCause() - .getCause() instanceof InterruptedException) { + && e.getCause().getCause() != null + && e.getCause().getCause() instanceof InterruptedException) { // ReplicationRuntimeException(a RuntimeException) is thrown out here. The reason is // that thread is interrupted deep down in the stack, it should pass the following // processing logic and propagate to the most top layer which can handle this exception // properly. In this specific case, the top layer is ReplicationSourceShipper#run(). throw new ReplicationRuntimeException( - "Thread is interrupted, the replication source may be terminated", - e.getCause().getCause()); + "Thread is interrupted, the replication source may be terminated", + e.getCause().getCause()); } server.abort("Failed to operate on replication queue", e); } @@ -695,8 +693,8 @@ private void cleanOldLogs(NavigableSet wals, ReplicationSourceInterface // special format, and also, the peer id in its name should match the peer id for the // replication source. List remoteWals = wals.stream().filter(w -> SyncReplicationWALProvider - .getSyncReplicationPeerIdFromWALName(w).map(peerId::equals).orElse(false)) - .collect(Collectors.toList()); + .getSyncReplicationPeerIdFromWALName(w).map(peerId::equals).orElse(false)) + .collect(Collectors.toList()); LOG.debug("Removing {} logs from remote dir {} in the list: {}", remoteWals.size(), remoteWALDir, remoteWals); if (!remoteWals.isEmpty()) { @@ -744,7 +742,7 @@ public void preLogRoll(Path newLog) throws IOException { synchronized (this.walsById) { // Update walsById map for (Map.Entry>> entry : this.walsById - .entrySet()) { + .entrySet()) { String peerId = entry.getKey(); Map> walsByPrefix = entry.getValue(); boolean existingPrefix = false; @@ -780,8 +778,8 @@ public void postLogRoll(Path newLog) throws IOException { // This only updates the sources we own, not the recovered ones for (ReplicationSourceInterface source : this.sources.values()) { source.enqueueLog(newLog); - LOG.trace("Enqueued {} to source {} while performing postLogRoll operation.", - newLog, source.getQueueId()); + LOG.trace("Enqueued {} to source {} while performing postLogRoll operation.", newLog, + source.getQueueId()); } } @@ -789,8 +787,8 @@ void claimQueue(ServerName deadRS, String queue) { // Wait a bit before transferring the queues, we may be shutting down. // This sleep may not be enough in some cases. try { - Thread.sleep(sleepBeforeFailover + - (long) (ThreadLocalRandom.current().nextFloat() * sleepBeforeFailover)); + Thread.sleep(sleepBeforeFailover + + (long) (ThreadLocalRandom.current().nextFloat() * sleepBeforeFailover)); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting before transferring a queue."); Thread.currentThread().interrupt(); @@ -818,10 +816,9 @@ void claimQueue(ServerName deadRS, String queue) { claimedQueue = queueStorage.claimQueue(deadRS, queue, server.getServerName()); } catch (ReplicationException e) { LOG.error( - "ReplicationException: cannot claim dead region ({})'s " + - "replication queue. Znode : ({})" + - " Possible solution: check if znode size exceeds jute.maxBuffer value. " + - " If so, increase it for both client and server side.", + "ReplicationException: cannot claim dead region ({})'s " + "replication queue. Znode : ({})" + + " Possible solution: check if znode size exceeds jute.maxBuffer value. " + + " If so, increase it for both client and server side.", deadRS, queueStorage.getRsNode(deadRS), e); server.abort("Failed to claim queue from dead regionserver.", e); return; @@ -837,8 +834,8 @@ void claimQueue(ServerName deadRS, String queue) { abortWhenFail(() -> queueStorage.removeQueue(server.getServerName(), queueId)); return; } - if (server instanceof ReplicationSyncUp.DummyServer && - peer.getPeerState().equals(PeerState.DISABLED)) { + if (server instanceof ReplicationSyncUp.DummyServer + && peer.getPeerState().equals(PeerState.DISABLED)) { LOG.warn( "Peer {} is disabled. ReplicationSyncUp tool will skip " + "replicating data to this peer.", peerId); @@ -867,10 +864,10 @@ void claimQueue(ServerName deadRS, String queue) { // replicated back. if (peer.getPeerConfig().isSyncReplication()) { Pair stateAndNewState = - peer.getSyncReplicationStateAndNewState(); - if ((stateAndNewState.getFirst().equals(SyncReplicationState.STANDBY) && - stateAndNewState.getSecond().equals(SyncReplicationState.NONE)) || - stateAndNewState.getSecond().equals(SyncReplicationState.STANDBY)) { + peer.getSyncReplicationStateAndNewState(); + if ((stateAndNewState.getFirst().equals(SyncReplicationState.STANDBY) + && stateAndNewState.getSecond().equals(SyncReplicationState.NONE)) + || stateAndNewState.getSecond().equals(SyncReplicationState.STANDBY)) { src.terminate("Sync replication peer is in STANDBY state"); deleteQueue(queueId); return; @@ -980,8 +977,8 @@ public AtomicLong getTotalBufferUsed() { } /** - * Returns the maximum size in bytes of edits held in memory which are pending replication - * across all sources inside this RegionServer. + * Returns the maximum size in bytes of edits held in memory which are pending replication across + * all sources inside this RegionServer. */ public long getTotalBufferLimit() { return totalBufferLimit; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java index 9754c495417f..ba8344fdfcf1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +19,10 @@ import static org.apache.hadoop.hbase.replication.ReplicationUtils.getAdaptiveTimeout; import static org.apache.hadoop.hbase.replication.ReplicationUtils.sleepForRetries; + import java.io.IOException; import java.util.List; import java.util.concurrent.atomic.LongAccumulator; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -36,6 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor; @@ -49,9 +50,7 @@ public class ReplicationSourceShipper extends Thread { // Hold the state of a replication worker thread public enum WorkerState { - RUNNING, - STOPPED, - FINISHED, // The worker is done processing a queue + RUNNING, STOPPED, FINISHED, // The worker is done processing a queue } private final Configuration conf; @@ -82,14 +81,18 @@ public ReplicationSourceShipper(Configuration conf, String walGroupId, this.walGroupId = walGroupId; this.logQueue = logQueue; this.source = source; - this.sleepForRetries = - this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second - this.maxRetriesMultiplier = - this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per + this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); // 1 + // second + this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 + // minutes + // @ + // 1 + // sec + // per this.getEntriesTimeout = this.conf.getInt("replication.source.getEntries.timeout", DEFAULT_TIMEOUT); // 20 seconds this.shipEditsTimeout = this.conf.getInt(HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT, - HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT); + HConstants.REPLICATION_SOURCE_SHIPEDITS_TIMEOUT_DFAULT); } @Override @@ -107,8 +110,8 @@ public final void run() { } try { WALEntryBatch entryBatch = entryReader.poll(getEntriesTimeout); - LOG.debug("Shipper from source {} got entry batch from reader: {}", - source.getQueueId(), entryBatch); + LOG.debug("Shipper from source {} got entry batch from reader: {}", source.getQueueId(), + entryBatch); if (entryBatch == null) { continue; } @@ -149,15 +152,15 @@ protected void postFinish() { } /** - * get batchEntry size excludes bulk load file sizes. - * Uses ReplicationSourceWALReader's static method. + * get batchEntry size excludes bulk load file sizes. Uses ReplicationSourceWALReader's static + * method. */ private int getBatchEntrySizeExcludeBulkLoad(WALEntryBatch entryBatch) { int totalSize = 0; - for(Entry entry : entryBatch.getWalEntries()) { + for (Entry entry : entryBatch.getWalEntries()) { totalSize += ReplicationSourceWALReader.getEntrySizeExcludeBulkLoad(entry); } - return totalSize; + return totalSize; } /** @@ -172,8 +175,8 @@ private void shipEdits(WALEntryBatch entryBatch) { } int currentSize = (int) entryBatch.getHeapSize(); int sizeExcludeBulkLoad = getBatchEntrySizeExcludeBulkLoad(entryBatch); - source.getSourceMetrics().setTimeStampNextToReplicate(entries.get(entries.size() - 1) - .getKey().getWriteTime()); + source.getSourceMetrics() + .setTimeStampNextToReplicate(entries.get(entries.size() - 1).getKey().getWriteTime()); while (isActive()) { try { try { @@ -211,10 +214,10 @@ private void shipEdits(WALEntryBatch entryBatch) { // Log and clean up WAL logs updateLogPosition(entryBatch); - //offsets totalBufferUsed by deducting shipped batchSize (excludes bulk load size) - //this sizeExcludeBulkLoad has to use same calculation that when calling - //acquireBufferQuota() in ReplicationSourceWALReader because they maintain - //same variable: totalBufferUsed + // offsets totalBufferUsed by deducting shipped batchSize (excludes bulk load size) + // this sizeExcludeBulkLoad has to use same calculation that when calling + // acquireBufferQuota() in ReplicationSourceWALReader because they maintain + // same variable: totalBufferUsed source.postShipEdits(entries, sizeExcludeBulkLoad); // FIXME check relationship between wal group and overall source.getSourceMetrics().shipBatch(entryBatch.getNbOperations(), currentSize, @@ -224,8 +227,8 @@ private void shipEdits(WALEntryBatch entryBatch) { source.getSourceMetrics().updateTableLevelMetrics(entryBatch.getWalEntriesWithSize()); if (LOG.isTraceEnabled()) { - LOG.debug("Replicated {} entries or {} operations in {} ms", - entries.size(), entryBatch.getNbOperations(), (endTimeNs - startTimeNs) / 1000000); + LOG.debug("Replicated {} entries or {} operations in {} ms", entries.size(), + entryBatch.getNbOperations(), (endTimeNs - startTimeNs) / 1000000); } break; } catch (Exception ex) { @@ -269,8 +272,8 @@ private boolean updateLogPosition(WALEntryBatch batch) { // record on zk, so let's call it. The last wal position maybe zero if end of file is true and // there is no entry in the batch. It is OK because that the queue storage will ignore the zero // position and the file will be removed soon in cleanOldLogs. - if (batch.isEndOfFile() || !batch.getLastWalPath().equals(currentPath) || - batch.getLastWalPosition() != currentPosition) { + if (batch.isEndOfFile() || !batch.getLastWalPath().equals(currentPath) + || batch.getLastWalPosition() != currentPosition) { source.logPositionAndCleanOldLogs(batch); updated = true; } @@ -327,28 +330,26 @@ public boolean isFinished() { } /** - * Attempts to properly update ReplicationSourceManager.totalBufferUser, - * in case there were unprocessed entries batched by the reader to the shipper, - * but the shipper didn't manage to ship those because the replication source is being terminated. - * In that case, it iterates through the batched entries and decrease the pending - * entries size from ReplicationSourceManager.totalBufferUser + * Attempts to properly update ReplicationSourceManager.totalBufferUser, in case + * there were unprocessed entries batched by the reader to the shipper, but the shipper didn't + * manage to ship those because the replication source is being terminated. In that case, it + * iterates through the batched entries and decrease the pending entries size from + * ReplicationSourceManager.totalBufferUser *

          - * NOTES - * 1) This method should only be called upon replication source termination. - * It blocks waiting for both shipper and reader threads termination, - * to make sure no race conditions - * when updating ReplicationSourceManager.totalBufferUser. - * - * 2) It does not attempt to terminate reader and shipper threads. Those must - * have been triggered interruption/termination prior to calling this method. + * NOTES 1) This method should only be called upon replication source termination. It + * blocks waiting for both shipper and reader threads termination, to make sure no race conditions + * when updating ReplicationSourceManager.totalBufferUser. 2) It does not + * attempt to terminate reader and shipper threads. Those must have been triggered + * interruption/termination prior to calling this method. */ void clearWALEntryBatch() { long timeout = EnvironmentEdgeManager.currentTime() + this.shipEditsTimeout; - while(this.isAlive() || this.entryReader.isAlive()){ + while (this.isAlive() || this.entryReader.isAlive()) { try { if (EnvironmentEdgeManager.currentTime() >= timeout) { - LOG.warn("Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper " - + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}", + LOG.warn( + "Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper " + + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}", this.source.getPeerId(), this.isAlive(), this.entryReader.isAlive()); return; } else { @@ -357,11 +358,12 @@ void clearWALEntryBatch() { } } catch (InterruptedException e) { LOG.warn("{} Interrupted while waiting {} to stop on clearWALEntryBatch. " - + "Not cleaning buffer usage: {}", this.source.getPeerId(), this.getName(), e); + + "Not cleaning buffer usage: {}", + this.source.getPeerId(), this.getName(), e); return; } } - LongAccumulator totalToDecrement = new LongAccumulator((a,b) -> a + b, 0); + LongAccumulator totalToDecrement = new LongAccumulator((a, b) -> a + b, 0); entryReader.entryBatchQueue.forEach(w -> { entryReader.entryBatchQueue.remove(w); w.getWalEntries().forEach(e -> { @@ -369,12 +371,12 @@ void clearWALEntryBatch() { totalToDecrement.accumulate(entrySizeExcludeBulkLoad); }); }); - if( LOG.isTraceEnabled()) { + if (LOG.isTraceEnabled()) { LOG.trace("Decrementing totalBufferUsed by {}B while stopping Replication WAL Readers.", totalToDecrement.longValue()); } - long newBufferUsed = source.getSourceManager().getTotalBufferUsed() - .addAndGet(-totalToDecrement.longValue()); + long newBufferUsed = + source.getSourceManager().getTotalBufferUsed().addAndGet(-totalToDecrement.longValue()); source.getSourceManager().getGlobalMetrics().setWALReaderEditsBufferBytes(newBufferUsed); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java index f3311eedbe06..c0778188c14b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,8 +70,8 @@ static void scopeWALEdits(WALKey logKey, WALEdit logEdit, Configuration conf) { return; } // For replay, or if all the cells are markers, do not need to store replication scope. - if (logEdit.isReplay() || - logEdit.getCells().stream().allMatch(c -> WALEdit.isMetaEditFamily(c))) { + if (logEdit.isReplay() + || logEdit.getCells().stream().allMatch(c -> WALEdit.isMetaEditFamily(c))) { ((WALKeyImpl) logKey).clearReplicationScope(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index 11090448c7c6..6c800c9b68ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,6 +41,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor; @@ -72,7 +72,7 @@ class ReplicationSourceWALReader extends Thread { private final int maxRetriesMultiplier; private final boolean eofAutoRecovery; - //Indicates whether this particular worker is running + // Indicates whether this particular worker is running private boolean isReaderRunning = true; private AtomicLong totalBufferUsed; @@ -106,18 +106,21 @@ public ReplicationSourceWALReader(FileSystem fs, Configuration conf, int batchCount = conf.getInt("replication.source.nb.batches", 1); this.totalBufferUsed = source.getSourceManager().getTotalBufferUsed(); this.totalBufferQuota = source.getSourceManager().getTotalBufferLimit(); - this.sleepForRetries = - this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second - this.maxRetriesMultiplier = - this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per + this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); // 1 + // second + this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 + // minutes + // @ + // 1 + // sec + // per this.eofAutoRecovery = conf.getBoolean("replication.source.eof.autorecovery", false); this.entryBatchQueue = new LinkedBlockingQueue<>(batchCount); this.walGroupId = walGroupId; - LOG.info("peerClusterZnode=" + source.getQueueId() - + ", ReplicationSourceWALReaderThread : " + source.getPeerId() - + " inited, replicationBatchSizeCapacity=" + replicationBatchSizeCapacity - + ", replicationBatchCountCapacity=" + replicationBatchCountCapacity - + ", replicationBatchQueueCapacity=" + batchCount); + LOG.info("peerClusterZnode=" + source.getQueueId() + ", ReplicationSourceWALReaderThread : " + + source.getPeerId() + " inited, replicationBatchSizeCapacity=" + + replicationBatchSizeCapacity + ", replicationBatchCountCapacity=" + + replicationBatchCountCapacity + ", replicationBatchQueueCapacity=" + batchCount); } @Override @@ -126,9 +129,8 @@ public void run() { while (isReaderRunning()) { // we only loop back here if something fatal happened to our stream WALEntryBatch batch = null; try (WALEntryStream entryStream = - new WALEntryStream(logQueue, conf, currentPosition, - source.getWALFileLengthProvider(), source.getServerWALsBelongTo(), - source.getSourceMetrics(), walGroupId)) { + new WALEntryStream(logQueue, conf, currentPosition, source.getWALFileLengthProvider(), + source.getServerWALsBelongTo(), source.getSourceMetrics(), walGroupId)) { while (isReaderRunning()) { // loop here to keep reusing stream while we can batch = null; if (!source.isPeerEnabled()) { @@ -179,7 +181,7 @@ protected final boolean addEntryToBatch(WALEntryBatch batch, Entry entry) { return false; } LOG.debug("updating TimeStampOfLastAttempted to {}, from entry {}, for source queue: {}", - entry.getKey().getWriteTime(), entry.getKey(), this.source.getQueueId()); + entry.getKey().getWriteTime(), entry.getKey(), this.source.getQueueId()); long entrySize = getEntrySizeIncludeBulkLoad(entry); long entrySizeExcludeBulkLoad = getEntrySizeExcludeBulkLoad(entry); batch.addEntry(entry, entrySize); @@ -187,8 +189,8 @@ protected final boolean addEntryToBatch(WALEntryBatch batch, Entry entry) { boolean totalBufferTooLarge = acquireBufferQuota(entrySizeExcludeBulkLoad); // Stop if too many entries or too big - return totalBufferTooLarge || batch.getHeapSize() >= replicationBatchSizeCapacity || - batch.getNbEntries() >= replicationBatchCountCapacity; + return totalBufferTooLarge || batch.getHeapSize() >= replicationBatchSizeCapacity + || batch.getNbEntries() >= replicationBatchCountCapacity; } protected static final boolean switched(WALEntryStream entryStream, Path path) { @@ -200,7 +202,7 @@ protected static final boolean switched(WALEntryStream entryStream, Path path) { // This is required in case there is any exception in while reading entries // we do not want to loss the existing entries in the batch protected void readWALEntries(WALEntryStream entryStream, WALEntryBatch batch) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Path currentPath = entryStream.getCurrentPath(); for (;;) { Entry entry = entryStream.next(); @@ -238,7 +240,7 @@ private void handleEmptyWALEntryBatch() throws InterruptedException { } private WALEntryBatch tryAdvanceStreamAndCreateWALBatch(WALEntryStream entryStream) - throws IOException { + throws IOException { Path currentPath = entryStream.getCurrentPath(); if (!entryStream.hasNext()) { // check whether we have switched a file @@ -257,24 +259,23 @@ private WALEntryBatch tryAdvanceStreamAndCreateWALBatch(WALEntryStream entryStre } /** - * This is to handle the EOFException from the WAL entry stream. EOFException should - * be handled carefully because there are chances of data loss because of never replicating - * the data. Thus we should always try to ship existing batch of entries here. - * If there was only one log in the queue before EOF, we ship the empty batch here - * and since reader is still active, in the next iteration of reader we will - * stop the reader. + * This is to handle the EOFException from the WAL entry stream. EOFException should be handled + * carefully because there are chances of data loss because of never replicating the data. Thus we + * should always try to ship existing batch of entries here. If there was only one log in the + * queue before EOF, we ship the empty batch here and since reader is still active, in the next + * iteration of reader we will stop the reader. *

          - * If there was more than one log in the queue before EOF, we ship the existing batch - * and reset the wal patch and position to the log with EOF, so shipper can remove - * logs from replication queue + * If there was more than one log in the queue before EOF, we ship the existing batch and reset + * the wal patch and position to the log with EOF, so shipper can remove logs from replication + * queue * @return true only the IOE can be handled */ private boolean handleEofException(Exception e, WALEntryBatch batch) { PriorityBlockingQueue queue = logQueue.getQueue(walGroupId); // Dump the log even if logQueue size is 1 if the source is from recovered Source // since we don't add current log to recovered source queue so it is safe to remove. - if ((e instanceof EOFException || e.getCause() instanceof EOFException) && - (source.isRecovered() || queue.size() > 1) && this.eofAutoRecovery) { + if ((e instanceof EOFException || e.getCause() instanceof EOFException) + && (source.isRecovered() || queue.size() > 1) && this.eofAutoRecovery) { Path path = queue.peek(); try { if (!fs.exists(path)) { @@ -325,12 +326,12 @@ public Path getCurrentPath() { return logQueue.getQueue(walGroupId).peek(); } - //returns false if we've already exceeded the global quota + // returns false if we've already exceeded the global quota private boolean checkQuota() { // try not to go over total quota if (totalBufferUsed.get() > totalBufferQuota) { LOG.warn("peer={}, can't read more edits from WAL as buffer usage {}B exceeds limit {}B", - this.source.getPeerId(), totalBufferUsed.get(), totalBufferQuota); + this.source.getPeerId(), totalBufferUsed.get(), totalBufferQuota); Threads.sleep(sleepForRetries); return false; } @@ -366,7 +367,7 @@ public WALEntryBatch poll(long timeout) throws InterruptedException { private long getEntrySizeIncludeBulkLoad(Entry entry) { WALEdit edit = entry.getEdit(); - return getEntrySizeExcludeBulkLoad(entry) + sizeOfStoreFilesIncludeBulkLoad(edit); + return getEntrySizeExcludeBulkLoad(entry) + sizeOfStoreFilesIncludeBulkLoad(edit); } public static long getEntrySizeExcludeBulkLoad(Entry entry) { @@ -375,7 +376,6 @@ public static long getEntrySizeExcludeBulkLoad(Entry entry) { return edit.heapSize() + key.estimatedSerializedSizeOf(); } - private void updateBatchStats(WALEntryBatch batch, Entry entry, long entrySize) { WALEdit edit = entry.getEdit(); batch.incrementHeapSize(entrySize); @@ -409,7 +409,8 @@ private Pair countDistinctRowKeysAndHFiles(WALEdit edit) { } } catch (IOException e) { LOG.error("Failed to deserialize bulk load entry from wal edit. " - + "Then its hfiles count will not be added into metric.", e); + + "Then its hfiles count will not be added into metric.", + e); } } @@ -446,7 +447,8 @@ private int sizeOfStoreFilesIncludeBulkLoad(WALEdit edit) { } catch (IOException e) { LOG.error("Failed to deserialize bulk load entry from wal edit. " + "Size of HFiles part of cell will not be considered in replication " - + "request size calculation.", e); + + "request size calculation.", + e); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java index 10d6cd59d4ae..2161cc35ed99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 0e938ecf2026..77788d41bc40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -71,14 +71,14 @@ public static void main(String[] args) throws Exception { private Set getLiveRegionServers(ZKWatcher zkw) throws KeeperException { List rsZNodes = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode); - return rsZNodes == null ? Collections.emptySet() : - rsZNodes.stream().map(ServerName::parseServerName).collect(Collectors.toSet()); + return rsZNodes == null ? Collections.emptySet() + : rsZNodes.stream().map(ServerName::parseServerName).collect(Collectors.toSet()); } // When using this tool, usually the source cluster is unhealthy, so we should try to claim the // replication queues for the dead region servers first and then replicate the data out. private void claimReplicationQueues(ZKWatcher zkw, ReplicationSourceManager mgr) - throws ReplicationException, KeeperException { + throws ReplicationException, KeeperException { List replicators = mgr.getQueueStorage().getListOfReplicators(); Set liveRegionServers = getLiveRegionServers(zkw); for (ServerName sn : replicators) { @@ -106,8 +106,7 @@ public boolean isAborted() { }; Configuration conf = getConf(); try (ZKWatcher zkw = new ZKWatcher(conf, - "syncupReplication" + EnvironmentEdgeManager.currentTime(), - abortable, true)) { + "syncupReplication" + EnvironmentEdgeManager.currentTime(), abortable, true)) { Path walRootDir = CommonFSUtils.getWALRootDir(conf); FileSystem fs = CommonFSUtils.getWALFileSystem(conf); Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java index 7f73030699e8..3e4bb77b23fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hbase.replication.regionserver; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; /** - * Per-peer per-node throttling controller for replication: enabled if - * bandwidth > 0, a cycle = 100ms, by throttling we guarantee data pushed - * to peer within each cycle won't exceed 'bandwidth' bytes + * Per-peer per-node throttling controller for replication: enabled if bandwidth > 0, a cycle = + * 100ms, by throttling we guarantee data pushed to peer within each cycle won't exceed 'bandwidth' + * bytes */ @InterfaceAudience.Private public class ReplicationThrottler { @@ -33,8 +33,7 @@ public class ReplicationThrottler { private long cycleStartTick; /** - * ReplicationThrottler constructor - * If bandwidth less than 1, throttling is disabled + * ReplicationThrottler constructor If bandwidth less than 1, throttling is disabled * @param bandwidth per cycle(100ms) */ public ReplicationThrottler(final double bandwidth) { @@ -55,9 +54,8 @@ public boolean isEnabled() { } /** - * Get how long the caller should sleep according to the current size and - * current cycle's total push size and start tick, return the sleep interval - * for throttling control. + * Get how long the caller should sleep according to the current size and current cycle's total + * push size and start tick, return the sleep interval for throttling control. * @param size is the size of edits to be pushed * @return sleep interval for throttling control */ @@ -69,11 +67,11 @@ public long getNextSleepInterval(final int size) { long sleepTicks = 0; long now = EnvironmentEdgeManager.currentTime(); // 1. if cyclePushSize exceeds bandwidth, we need to sleep some - // following cycles to amortize, this case can occur when a single push - // exceeds the bandwidth - if ((double)this.cyclePushSize > bandwidth) { - double cycles = Math.ceil((double)this.cyclePushSize / bandwidth); - long shouldTillTo = this.cycleStartTick + (long)(cycles * 100); + // following cycles to amortize, this case can occur when a single push + // exceeds the bandwidth + if ((double) this.cyclePushSize > bandwidth) { + double cycles = Math.ceil((double) this.cyclePushSize / bandwidth); + long shouldTillTo = this.cycleStartTick + (long) (cycles * 100); if (shouldTillTo > now) { sleepTicks = shouldTillTo - now; } else { @@ -82,16 +80,15 @@ public long getNextSleepInterval(final int size) { } this.cyclePushSize = 0; } else { - long nextCycleTick = this.cycleStartTick + 100; //a cycle is 100ms + long nextCycleTick = this.cycleStartTick + 100; // a cycle is 100ms if (now >= nextCycleTick) { // 2. switch to next cycle if the current cycle has passed this.cycleStartTick = now; this.cyclePushSize = 0; - } else if (this.cyclePushSize > 0 && - (double)(this.cyclePushSize + size) >= bandwidth) { + } else if (this.cyclePushSize > 0 && (double) (this.cyclePushSize + size) >= bandwidth) { // 3. delay the push to next cycle if exceeds throttling bandwidth. - // enforcing cyclePushSize > 0 to avoid the unnecessary sleep for case - // where a cycle's first push size(currentSize) > bandwidth + // enforcing cyclePushSize > 0 to avoid the unnecessary sleep for case + // where a cycle's first push size(currentSize) > bandwidth sleepTicks = nextCycleTick - now; this.cyclePushSize = 0; } @@ -101,8 +98,7 @@ public long getNextSleepInterval(final int size) { /** * Add current size to the current cycle's total push size - * @param size is the current size added to the current cycle's - * total push size + * @param size is the current size added to the current cycle's total push size */ public void addPushSize(final int size) { if (this.enabled) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java index fdc1e5414d00..2fd474372551 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,6 +37,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.cache.Cache; import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader; @@ -50,12 +51,11 @@ *

          * We record all the open sequence number for a region in a special family in meta, which is called * 'rep_barrier', so there will be a sequence of open sequence number (b1, b2, b3, ...). We call - * [bn, bn+1) a range, and it is obvious that a region will always be on the same RS within a - * range. + * [bn, bn+1) a range, and it is obvious that a region will always be on the same RS within a range. *

          * When split and merge, we will also record the parent for the generated region(s) in the special - * family in meta. And also, we will write an extra 'open sequence number' for the parent - * region(s), which is the max sequence id of the region plus one. + * family in meta. And also, we will write an extra 'open sequence number' for the parent region(s), + * which is the max sequence id of the region plus one. *

          *

          *

          @@ -113,7 +113,7 @@ class SerialReplicationChecker { private static final Logger LOG = LoggerFactory.getLogger(SerialReplicationChecker.class); public static final String REPLICATION_SERIALLY_WAITING_KEY = - "hbase.serial.replication.waiting.ms"; + "hbase.serial.replication.waiting.ms"; public static final long REPLICATION_SERIALLY_WAITING_DEFAULT = 10000; private final String peerId; @@ -125,24 +125,24 @@ class SerialReplicationChecker { private final long waitTimeMs; private final LoadingCache pushed = CacheBuilder.newBuilder() - .expireAfterAccess(1, TimeUnit.DAYS).build(new CacheLoader() { + .expireAfterAccess(1, TimeUnit.DAYS).build(new CacheLoader() { - @Override - public MutableLong load(String key) throws Exception { - return new MutableLong(HConstants.NO_SEQNUM); - } - }); + @Override + public MutableLong load(String key) throws Exception { + return new MutableLong(HConstants.NO_SEQNUM); + } + }); // Use guava cache to set ttl for each key private final Cache canPushUnder = - CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS).build(); + CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS).build(); public SerialReplicationChecker(Configuration conf, ReplicationSource source) { this.peerId = source.getPeerId(); this.storage = source.getReplicationQueueStorage(); this.conn = source.getServer().getConnection(); this.waitTimeMs = - conf.getLong(REPLICATION_SERIALLY_WAITING_KEY, REPLICATION_SERIALLY_WAITING_DEFAULT); + conf.getLong(REPLICATION_SERIALLY_WAITING_KEY, REPLICATION_SERIALLY_WAITING_DEFAULT); } private boolean isRangeFinished(long endBarrier, String encodedRegionName) throws IOException { @@ -151,7 +151,7 @@ private boolean isRangeFinished(long endBarrier, String encodedRegionName) throw pushedSeqId = storage.getLastSequenceId(encodedRegionName, peerId); } catch (ReplicationException e) { throw new IOException( - "Failed to get pushed sequence id for " + encodedRegionName + ", peer " + peerId, e); + "Failed to get pushed sequence id for " + encodedRegionName + ", peer " + peerId, e); } // endBarrier is the open sequence number. When opening a region, the open sequence number will // be set to the old max sequence id plus one, so here we need to minus one. @@ -170,8 +170,8 @@ private boolean isParentFinished(byte[] regionName) throws IOException { // if a region is in OPENING state and we are in the last range, it is not safe to say we can push // even if the previous range is finished. private boolean isLastRangeAndOpening(ReplicationBarrierResult barrierResult, int index) { - return index == barrierResult.getBarriers().length && - barrierResult.getState() == RegionState.State.OPENING; + return index == barrierResult.getBarriers().length + && barrierResult.getState() == RegionState.State.OPENING; } private void recordCanPush(String encodedNameAsString, long seqId, long[] barriers, int index) { @@ -185,8 +185,8 @@ private boolean canPush(Entry entry, byte[] row) throws IOException { String encodedNameAsString = Bytes.toString(entry.getKey().getEncodedRegionName()); long seqId = entry.getKey().getSequenceId(); ReplicationBarrierResult barrierResult = - ReplicationBarrierFamilyFormat.getReplicationBarrierResult(conn, - entry.getKey().getTableName(), row, entry.getKey().getEncodedRegionName()); + ReplicationBarrierFamilyFormat.getReplicationBarrierResult(conn, + entry.getKey().getTableName(), row, entry.getKey().getEncodedRegionName()); LOG.debug("Replication barrier for {}: {}", entry, barrierResult); long[] barriers = barrierResult.getBarriers(); int index = Arrays.binarySearch(barriers, seqId); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java index 1de4c998546e..c545e34dcac9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SerialReplicationSourceWALReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ public SerialReplicationSourceWALReader(FileSystem fs, Configuration conf, @Override protected void readWALEntries(WALEntryStream entryStream, WALEntryBatch batch) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Path currentPath = entryStream.getCurrentPath(); long positionBefore = entryStream.getPosition(); for (;;) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java index b578587193dd..f8581ba4275c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.java @@ -1,17 +1,23 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java index c78fe40b028f..d09c821b9edc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SwitchRpcThrottleRemoteCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java index cfe525ac5d3c..e102e0460eeb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.util.Optional; import java.util.function.BiPredicate; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.util.Pair; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java index 170441b45c1f..b9f9a6b9eab3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.util.Optional; import java.util.function.BiPredicate; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.replication.ReplicationPeerImpl; import org.apache.hadoop.hbase.replication.ReplicationPeers; @@ -55,10 +54,10 @@ public Optional> getPeerIdAndRemoteWALDir(TableName table) } Pair states = peer.getSyncReplicationStateAndNewState(); - if ((states.getFirst() == SyncReplicationState.ACTIVE && - states.getSecond() == SyncReplicationState.NONE) || - (states.getFirst() == SyncReplicationState.DOWNGRADE_ACTIVE && - states.getSecond() == SyncReplicationState.ACTIVE)) { + if ((states.getFirst() == SyncReplicationState.ACTIVE + && states.getSecond() == SyncReplicationState.NONE) + || (states.getFirst() == SyncReplicationState.DOWNGRADE_ACTIVE + && states.getSecond() == SyncReplicationState.ACTIVE)) { return Optional.of(Pair.newPair(peerId, peer.getPeerConfig().getRemoteWALDir())); } else { return Optional.empty(); @@ -77,7 +76,7 @@ public boolean checkState(TableName table, return false; } Pair states = - peer.getSyncReplicationStateAndNewState(); + peer.getSyncReplicationStateAndNewState(); return checker.test(states.getFirst(), states.getSecond()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java index 5d19f7224463..9a50ef433cdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerMappingManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java index 8301dff26d61..9f2ada0b3584 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,6 @@ class WALEntryBatch { this.lastWalPath = lastWalPath; } - static WALEntryBatch endOfFile(Path lastWalPath) { WALEntryBatch batch = new WALEntryBatch(0, lastWalPath); batch.setLastWalPosition(-1L); @@ -174,9 +173,9 @@ public void setLastSeqId(String region, long sequenceId) { @Override public String toString() { - return "WALEntryBatch [walEntries=" + walEntriesWithSize + ", lastWalPath=" + lastWalPath + - ", lastWalPosition=" + lastWalPosition + ", nbRowKeys=" + nbRowKeys + ", nbHFiles=" + - nbHFiles + ", heapSize=" + heapSize + ", lastSeqIds=" + lastSeqIds + ", endOfFile=" + - endOfFile + "]"; + return "WALEntryBatch [walEntries=" + walEntriesWithSize + ", lastWalPath=" + lastWalPath + + ", lastWalPosition=" + lastWalPosition + ", nbRowKeys=" + nbRowKeys + ", nbHFiles=" + + nbHFiles + ", heapSize=" + heapSize + ", lastSeqIds=" + lastSeqIds + ", endOfFile=" + + endOfFile + "]"; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java index f93f8b058b27..861f2d720077 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryFilterRetryableException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This exception should be thrown from any wal filter when the filter is expected - * to recover from the failures and it wants the replication to backup till it fails. - * There is special handling in replication wal reader to catch this exception and - * retry. + * This exception should be thrown from any wal filter when the filter is expected to recover from + * the failures and it wants the replication to backup till it fails. There is special handling in + * replication wal reader to catch this exception and retry. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public class WALEntryFilterRetryableException extends RuntimeException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java index 6f6ae1f8bd0f..dcaaae61de62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,15 +26,16 @@ /** * Implementations are installed on a Replication Sink called from inside * ReplicationSink#replicateEntries to filter replicated WALEntries based off WALEntry attributes. - * Currently only table name and replication write time are exposed (WALEntry is a private, - * internal class so we cannot pass it here). To install, set - * hbase.replication.sink.walentryfilter to the name of the implementing - * class. Implementing class must have a no-param Constructor. - *

          This filter is of limited use. It is better to filter on the replication source rather than - * here after the edits have been shipped on the replication sink. That said, applications such - * as the hbase-indexer want to filter out any edits that were made before replication was enabled. + * Currently only table name and replication write time are exposed (WALEntry is a private, internal + * class so we cannot pass it here). To install, set + * hbase.replication.sink.walentryfilter to the name of the implementing class. + * Implementing class must have a no-param Constructor. + *

          + * This filter is of limited use. It is better to filter on the replication source rather than here + * after the edits have been shipped on the replication sink. That said, applications such as the + * hbase-indexer want to filter out any edits that were made before replication was enabled. * @see org.apache.hadoop.hbase.replication.WALEntryFilter for filtering on the replication - * source-side. + * source-side. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) @InterfaceStability.Evolving @@ -45,8 +46,7 @@ public interface WALEntrySinkFilter { public static final String WAL_ENTRY_FILTER_KEY = "hbase.replication.sink.walentrysinkfilter"; /** - * Called after Construction. - * Use passed Connection to keep any context the filter might need. + * Called after Construction. Use passed Connection to keep any context the filter might need. */ void init(AsyncConnection conn); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java index 488355c3a2b2..06934c1bc421 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -82,9 +81,9 @@ class WALEntryStream implements Closeable { * @param metrics the replication metrics * @throws IOException throw IO exception from stream */ - public WALEntryStream(ReplicationSourceLogQueue logQueue, Configuration conf, - long startPosition, WALFileLengthProvider walFileLengthProvider, ServerName serverName, - MetricsSource metrics, String walGroupId) throws IOException { + public WALEntryStream(ReplicationSourceLogQueue logQueue, Configuration conf, long startPosition, + WALFileLengthProvider walFileLengthProvider, ServerName serverName, MetricsSource metrics, + String walGroupId) throws IOException { this.logQueue = logQueue; this.fs = CommonFSUtils.getWALFileSystem(conf); this.conf = conf; @@ -109,7 +108,7 @@ public boolean hasNext() throws IOException { * Returns the next WAL entry in this stream but does not advance. */ public Entry peek() throws IOException { - return hasNext() ? currentEntry: null; + return hasNext() ? currentEntry : null; } /** @@ -224,15 +223,15 @@ private boolean checkAllBytesParsed() throws IOException { if (currentPositionOfReader < stat.getLen()) { final long skippedBytes = stat.getLen() - currentPositionOfReader; // See the commits in HBASE-25924/HBASE-25932 for context. - LOG.warn("Reached the end of WAL {}. It was not closed cleanly," + - " so we did not parse {} bytes of data.", currentPath, skippedBytes); + LOG.warn("Reached the end of WAL {}. It was not closed cleanly," + + " so we did not parse {} bytes of data.", + currentPath, skippedBytes); metrics.incrUncleanlyClosedWALs(); metrics.incrBytesSkippedInUncleanlyClosedWALs(skippedBytes); } } else if (currentPositionOfReader + trailerSize < stat.getLen()) { - LOG.warn( - "Processing end of WAL {} at position {}, which is too far away from" + - " reported file length {}. Restarting WAL reading (see HBASE-15983 for details). {}", + LOG.warn("Processing end of WAL {} at position {}, which is too far away from" + + " reported file length {}. Restarting WAL reading (see HBASE-15983 for details). {}", currentPath, currentPositionOfReader, stat.getLen(), getCurrentPathStat()); setPosition(0); resetReader(); @@ -242,8 +241,8 @@ private boolean checkAllBytesParsed() throws IOException { } } if (LOG.isTraceEnabled()) { - LOG.trace("Reached the end of " + this.currentPath + " and length of the file is " + - (stat == null ? "N/A" : stat.getLen())); + LOG.trace("Reached the end of " + this.currentPath + " and length of the file is " + + (stat == null ? "N/A" : stat.getLen())); } metrics.incrCompletedWAL(); return true; @@ -268,8 +267,8 @@ private boolean readNextEntryAndRecordReaderPosition() throws IOException { // See HBASE-14004, for AsyncFSWAL which uses fan-out, it is possible that we read uncommitted // data, so we need to make sure that we do not read beyond the committed file length. if (LOG.isDebugEnabled()) { - LOG.debug("The provider tells us the valid length for " + currentPath + " is " + - fileLength.getAsLong() + ", but we have advanced to " + readerPos); + LOG.debug("The provider tells us the valid length for " + currentPath + " is " + + fileLength.getAsLong() + ", but we have advanced to " + readerPos); } resetReader(); return true; @@ -341,12 +340,12 @@ private void openReader(Path path) throws IOException { } } catch (FileNotFoundException fnfe) { handleFileNotFound(path, fnfe); - } catch (RemoteException re) { + } catch (RemoteException re) { IOException ioe = re.unwrapRemoteException(FileNotFoundException.class); if (!(ioe instanceof FileNotFoundException)) { throw ioe; } - handleFileNotFound(path, (FileNotFoundException)ioe); + handleFileNotFound(path, (FileNotFoundException) ioe); } catch (LeaseNotRecoveredException lnre) { // HBASE-15019 the WAL was not closed due to some hiccup. LOG.warn("Try to recover the WAL lease " + path, lnre); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java index c60faa9e5db8..b0550cc37cf9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALFileLengthProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,16 +18,15 @@ package org.apache.hadoop.hbase.replication.regionserver; import java.util.OptionalLong; - import org.apache.hadoop.fs.Path; import org.apache.yetus.audience.InterfaceAudience; /** * Used by replication to prevent replicating unacked log entries. See - * https://issues.apache.org/jira/browse/HBASE-14004 for more details. - * WALFileLengthProvider exists because we do not want to reference WALFactory and WALProvider - * directly in the replication code so in the future it will be easier to decouple them. - * Each walProvider will have its own implementation. + * https://issues.apache.org/jira/browse/HBASE-14004 for more details. WALFileLengthProvider exists + * because we do not want to reference WALFactory and WALProvider directly in the replication code + * so in the future it will be easier to decouple them. Each walProvider will have its own + * implementation. */ @InterfaceAudience.Private @FunctionalInterface diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/DisabledRSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/DisabledRSGroupInfoManager.java index 8ed250538c60..5244eb27c43d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/DisabledRSGroupInfoManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/DisabledRSGroupInfoManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,7 +67,7 @@ public void moveServers(Set

          servers, String targetGroupName) throws IOE private SortedSet
          getOnlineServers() { SortedSet
          onlineServers = new TreeSet
          (); serverManager.getOnlineServers().keySet().stream().map(ServerName::getAddress) - .forEach(onlineServers::add); + .forEach(onlineServers::add); return onlineServers; } @@ -110,7 +110,8 @@ public RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException { } @Override - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { throw new DoNotRetryIOException("RSGroup is disabled"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/MigrateRSGroupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/MigrateRSGroupProcedure.java index 3c03abc95949..2e1f2b30fff0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/MigrateRSGroupProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/MigrateRSGroupProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ public MigrateRSGroupProcedure(MasterProcedureEnv env, TableName tableName) { @Override protected Optional modify(MasterProcedureEnv env, TableDescriptor current) - throws IOException { + throws IOException { if (current.getRegionServerGroup().isPresent()) { // usually this means user has set the rs group using the new code which will set the group // directly on table descriptor, skip. @@ -54,13 +54,14 @@ protected Optional modify(MasterProcedureEnv env, TableDescript return Optional.empty(); } RSGroupInfo group = - env.getMasterServices().getRSGroupInfoManager().getRSGroupForTable(current.getTableName()); + env.getMasterServices().getRSGroupInfoManager().getRSGroupForTable(current.getTableName()); if (group == null) { - LOG.debug("RSGroup for table {} is empty when migrating, usually this should not happen" + - " unless we have removed the RSGroup, ignore...", current.getTableName()); + LOG.debug("RSGroup for table {} is empty when migrating, usually this should not happen" + + " unless we have removed the RSGroup, ignore...", + current.getTableName()); return Optional.empty(); } - return Optional - .of(TableDescriptorBuilder.newBuilder(current).setRegionServerGroup(group.getName()).build()); + return Optional.of( + TableDescriptorBuilder.newBuilder(current).setRegionServerGroup(group.getName()).build()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java index 4c291547110a..164a6be1cc11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -88,7 +88,7 @@ public RSGroupInfo getRSGroupInfo(String groupName) throws IOException { */ public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException { GetRSGroupInfoOfTableRequest request = GetRSGroupInfoOfTableRequest.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); + .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); try { GetRSGroupInfoOfTableResponse resp = stub.getRSGroupInfoOfTable(null, request); if (resp.hasRSGroupInfo()) { @@ -107,10 +107,10 @@ public void moveServers(Set
          servers, String targetGroup) throws IOExcep Set hostPorts = Sets.newHashSet(); for (Address el : servers) { hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) - .setPort(el.getPort()).build()); + .setPort(el.getPort()).build()); } - MoveServersRequest request = - MoveServersRequest.newBuilder().setTargetGroup(targetGroup).addAllServers(hostPorts).build(); + MoveServersRequest request = MoveServersRequest.newBuilder().setTargetGroup(targetGroup) + .addAllServers(hostPorts).build(); try { stub.moveServers(null, request); } catch (ServiceException e) { @@ -165,7 +165,8 @@ public void removeRSGroup(String name) throws IOException { * Balance regions in the given RegionServer group. * @return BalanceResponse details about the balancer run */ - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { try { BalanceRSGroupRequest req = ProtobufUtil.createBalanceRSGroupRequest(groupName, request); return ProtobufUtil.toBalanceResponse(stub.balanceRSGroup(null, req)); @@ -179,8 +180,9 @@ public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) */ public List listRSGroups() throws IOException { try { - List resp = stub - .listRSGroupInfos(null, ListRSGroupInfosRequest.getDefaultInstance()).getRSGroupInfoList(); + List resp = + stub.listRSGroupInfos(null, ListRSGroupInfosRequest.getDefaultInstance()) + .getRSGroupInfoList(); List result = new ArrayList<>(resp.size()); for (RSGroupProtos.RSGroupInfo entry : resp) { result.add(ProtobufUtil.toGroupInfo(entry)); @@ -197,8 +199,8 @@ public List listRSGroups() throws IOException { */ public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException { GetRSGroupInfoOfServerRequest request = - GetRSGroupInfoOfServerRequest.newBuilder().setServer(HBaseProtos.ServerName.newBuilder() - .setHostName(hostPort.getHostname()).setPort(hostPort.getPort()).build()).build(); + GetRSGroupInfoOfServerRequest.newBuilder().setServer(HBaseProtos.ServerName.newBuilder() + .setHostName(hostPort.getHostname()).setPort(hostPort.getPort()).build()).build(); try { GetRSGroupInfoOfServerResponse resp = stub.getRSGroupInfoOfServer(null, request); if (resp.hasRSGroupInfo()) { @@ -218,12 +220,12 @@ public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException { * @throws IOException if moving the server and tables fail */ public void moveServersAndTables(Set
          servers, Set tables, String targetGroup) - throws IOException { + throws IOException { MoveServersAndTablesRequest.Builder builder = - MoveServersAndTablesRequest.newBuilder().setTargetGroup(targetGroup); + MoveServersAndTablesRequest.newBuilder().setTargetGroup(targetGroup); for (Address el : servers) { builder.addServers(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) - .setPort(el.getPort()).build()); + .setPort(el.getPort()).build()); } for (TableName tableName : tables) { builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); @@ -249,10 +251,10 @@ public void removeServers(Set
          servers) throws IOException { Set hostPorts = Sets.newHashSet(); for (Address el : servers) { hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) - .setPort(el.getPort()).build()); + .setPort(el.getPort()).build()); } RemoveServersRequest request = - RemoveServersRequest.newBuilder().addAllServers(hostPorts).build(); + RemoveServersRequest.newBuilder().addAllServers(hostPorts).build(); try { stub.removeServers(null, request); } catch (ServiceException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index 574d6e79edb4..291a342405a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java index b8b2a4f3206f..3b949e1c3a82 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServiceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,9 +39,11 @@ import org.apache.hadoop.hbase.procedure2.Procedure; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos; @@ -74,9 +76,8 @@ * Implementation of RSGroupAdminService defined in RSGroupAdmin.proto. This class calls * {@link RSGroupInfoManagerImpl} for actual work, converts result to protocol buffer response, * handles exceptions if any occurred and then calls the {@code RpcCallback} with the response. - * - * @deprecated Keep it here only for compatibility with {@link RSGroupAdminClient}, - * using {@link org.apache.hadoop.hbase.master.MasterRpcServices} instead. + * @deprecated Keep it here only for compatibility with {@link RSGroupAdminClient}, using + * {@link org.apache.hadoop.hbase.master.MasterRpcServices} instead. */ @Deprecated class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService { @@ -90,7 +91,7 @@ class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService { RSGroupAdminServiceImpl() { } - void initialize(MasterServices masterServices){ + void initialize(MasterServices masterServices) { this.master = masterServices; this.rsGroupInfoManager = masterServices.getRSGroupInfoManager(); } @@ -136,7 +137,7 @@ public void getRSGroupInfoOfTable(RpcController controller, GetRSGroupInfoOfTabl master.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName); } Optional optGroup = - RSGroupUtil.getRSGroupInfo(master, rsGroupInfoManager, tableName); + RSGroupUtil.getRSGroupInfo(master, rsGroupInfoManager, tableName); if (optGroup.isPresent()) { builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(fillTables(optGroup.get()))); } else { @@ -163,8 +164,8 @@ public void moveServers(RpcController controller, MoveServersRequest request, for (HBaseProtos.ServerName el : request.getServersList()) { hostPorts.add(Address.fromParts(el.getHostName(), el.getPort())); } - LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " + - request.getTargetGroup()); + LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " + + request.getTargetGroup()); try { if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup()); @@ -208,8 +209,8 @@ public void moveTables(RpcController controller, MoveTablesRequest request, for (HBaseProtos.TableName tableName : request.getTableNameList()) { tables.add(ProtobufUtil.toTableName(tableName)); } - LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables + " to rsgroup " + - request.getTargetGroup()); + LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables + " to rsgroup " + + request.getTargetGroup()); try { if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveTables(tables, request.getTargetGroup()); @@ -266,24 +267,24 @@ public void removeRSGroup(RpcController controller, RemoveRSGroupRequest request public void balanceRSGroup(RpcController controller, BalanceRSGroupRequest request, RpcCallback done) { BalanceRequest balanceRequest = ProtobufUtil.toBalanceRequest(request); - BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder() - .setBalanceRan(false); + BalanceRSGroupResponse.Builder builder = + BalanceRSGroupResponse.newBuilder().setBalanceRan(false); LOG.info( master.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName()); try { if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost() - .preBalanceRSGroup(request.getRSGroupName(), balanceRequest); + master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName(), + balanceRequest); } BalanceResponse response = - rsGroupInfoManager.balanceRSGroup(request.getRSGroupName(), balanceRequest); + rsGroupInfoManager.balanceRSGroup(request.getRSGroupName(), balanceRequest); ProtobufUtil.populateBalanceRSGroupResponse(builder, response); if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost() - .postBalanceRSGroup(request.getRSGroupName(), balanceRequest, response); + master.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(), + balanceRequest, response); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -362,8 +363,8 @@ public void moveServersAndTables(RpcController controller, MoveServersAndTablesR for (HBaseProtos.TableName tableName : request.getTableNameList()) { tables.add(ProtobufUtil.toTableName(tableName)); } - LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " and tables " + - tables + " to rsgroup" + request.getTargetGroup()); + LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " and tables " + + tables + " to rsgroup" + request.getTargetGroup()); try { if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveServersAndTables(hostPorts, tables, @@ -410,8 +411,8 @@ public void renameRSGroup(RpcController controller, RenameRSGroupRequest request RpcCallback done) { String oldRSGroup = request.getOldRsgroupName(); String newRSGroup = request.getNewRsgroupName(); - LOG.info("{} rename rsgroup from {} to {}", - master.getClientIdAuditPrefix(), oldRSGroup, newRSGroup); + LOG.info("{} rename rsgroup from {} to {}", master.getClientIdAuditPrefix(), oldRSGroup, + newRSGroup); RenameRSGroupResponse.Builder builder = RenameRSGroupResponse.newBuilder(); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 07b32053cbd1..b332bd767559 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseIOException; @@ -80,11 +79,9 @@ public class RSGroupBasedLoadBalancer implements LoadBalancer { private volatile LoadBalancer internalBalancer; /** - * Set this key to {@code true} to allow region fallback. - * Fallback to the default rsgroup first, then fallback to any group if no online servers in - * default rsgroup. - * Please keep balancer switch on at the same time, which is relied on to correct misplaced - * regions + * Set this key to {@code true} to allow region fallback. Fallback to the default rsgroup first, + * then fallback to any group if no online servers in default rsgroup. Please keep balancer switch + * on at the same time, which is relied on to correct misplaced regions */ public static final String FALLBACK_GROUP_ENABLE_KEY = "hbase.rsgroup.fallback.enable"; @@ -94,7 +91,8 @@ public class RSGroupBasedLoadBalancer implements LoadBalancer { * Used by reflection in {@link org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory}. */ @InterfaceAudience.Private - public RSGroupBasedLoadBalancer() {} + public RSGroupBasedLoadBalancer() { + } // must be called after calling initialize @Override @@ -104,8 +102,8 @@ public synchronized void updateClusterMetrics(ClusterMetrics sm) { } @Override - public synchronized void updateBalancerLoadInfo(Map>> - loadOfAllTable){ + public synchronized void + updateBalancerLoadInfo(Map>> loadOfAllTable) { internalBalancer.updateBalancerLoadInfo(loadOfAllTable); } @@ -125,8 +123,8 @@ public synchronized List balanceCluster( } // Calculate correct assignments and a list of RegionPlan for mis-placed regions - Pair>>, List> - correctedStateAndRegionPlans = correctAssignments(loadOfAllTable); + Pair>>, List> correctedStateAndRegionPlans = + correctAssignments(loadOfAllTable); Map>> correctedLoadOfAllTable = correctedStateAndRegionPlans.getFirst(); List regionPlans = correctedStateAndRegionPlans.getSecond(); @@ -206,8 +204,8 @@ public Map> retainAssignment(Map servers) throws IOException { + public ServerName randomAssignment(RegionInfo region, List servers) + throws IOException { List, List>> pairs = generateGroupAssignments(Lists.newArrayList(region), servers); List filteredServers = pairs.iterator().next().getSecond(); @@ -246,22 +244,23 @@ private List, List>> generateGroupAssignments( if (LOG.isDebugEnabled()) { LOG.debug("Falling back {} regions to servers outside their RSGroup. Regions: {}", fallbackRegions.size(), fallbackRegions.stream() - .map(RegionInfo::getRegionNameAsString).collect(Collectors.toSet())); + .map(RegionInfo::getRegionNameAsString).collect(Collectors.toSet())); } candidates = getFallBackCandidates(servers); } - candidates = (candidates == null || candidates.isEmpty()) ? - Lists.newArrayList(BOGUS_SERVER_NAME) : candidates; + candidates = + (candidates == null || candidates.isEmpty()) ? Lists.newArrayList(BOGUS_SERVER_NAME) + : candidates; result.add(Pair.newPair(fallbackRegions, candidates)); } return result; - } catch(IOException e) { + } catch (IOException e) { throw new HBaseIOException("Failed to generate group assignments", e); } } private List filterOfflineServers(RSGroupInfo RSGroupInfo, - List onlineServers) { + List onlineServers) { if (RSGroupInfo != null) { return filterServers(RSGroupInfo.getServers(), onlineServers); } else { @@ -399,8 +398,8 @@ public void regionOffline(RegionInfo regionInfo) { public synchronized void onConfigurationChange(Configuration conf) { boolean newFallbackEnabled = conf.getBoolean(FALLBACK_GROUP_ENABLE_KEY, false); if (fallbackEnabled != newFallbackEnabled) { - LOG.info("Changing the value of {} from {} to {}", FALLBACK_GROUP_ENABLE_KEY, - fallbackEnabled, newFallbackEnabled); + LOG.info("Changing the value of {} from {} to {}", FALLBACK_GROUP_ENABLE_KEY, fallbackEnabled, + newFallbackEnabled); fallbackEnabled = newFallbackEnabled; } provider.onConfigurationChange(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java index 9d73a5279886..5b0e87a9ff58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index 6ec3746c01b2..8aa3b8505569 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -110,8 +110,8 @@ * persistence store for the group information. It also makes use of zookeeper to store group * information needed for bootstrapping during offline mode. *

          Concurrency

          RSGroup state is kept locally in Maps. There is a rsgroup name to cached - * RSGroupInfo Map at {@link RSGroupInfoHolder#groupName2Group}. - * These Maps are persisted to the hbase:rsgroup table (and cached in zk) on each modification. + * RSGroupInfo Map at {@link RSGroupInfoHolder#groupName2Group}. These Maps are persisted to the + * hbase:rsgroup table (and cached in zk) on each modification. *

          * Mutations on state are synchronized but reads can continue without having to wait on an instance * monitor, mutations do wholesale replace of the Maps on update -- Copy-On-Write; the local Maps of @@ -132,8 +132,8 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { static final TableName RSGROUP_TABLE_NAME = TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); - static final String KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE = "should keep at least " + - "one server in 'default' RSGroup."; + static final String KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE = + "should keep at least " + "one server in 'default' RSGroup."; /** Define the config key of retries threshold when movements failed */ static final String FAILED_MOVE_MAX_RETRY = "hbase.rsgroup.move.max.retry"; @@ -155,12 +155,12 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager { private static final TableDescriptor RSGROUP_TABLE_DESC; static { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(RSGROUP_TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(META_FAMILY_BYTES)) - .setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(META_FAMILY_BYTES)) + .setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()); try { builder.setCoprocessor( CoprocessorDescriptorBuilder.newBuilder(MultiRowMutationEndpoint.class.getName()) - .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); + .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); } catch (IOException ex) { throw new Error(ex); } @@ -184,7 +184,7 @@ private static final class RSGroupInfoHolder { group2Name2GroupBuilder.put(groupName, rsGroupInfo); if (!groupName.equals(RSGroupInfo.DEFAULT_GROUP)) { rsGroupInfo.getTables() - .forEach(tableName -> tableName2GroupBuilder.put(tableName, rsGroupInfo)); + .forEach(tableName -> tableName2GroupBuilder.put(tableName, rsGroupInfo)); } }); this.groupName2Group = group2Name2GroupBuilder.build(); @@ -205,7 +205,7 @@ private static final class RSGroupInfoHolder { static class RSGroupMappingScript { static final String RS_GROUP_MAPPING_SCRIPT = "hbase.rsgroup.table.mapping.script"; static final String RS_GROUP_MAPPING_SCRIPT_TIMEOUT = - "hbase.rsgroup.table.mapping.script.timeout"; + "hbase.rsgroup.table.mapping.script.timeout"; private Shell.ShellCommandExecutor rsgroupMappingScript; RSGroupMappingScript(Configuration conf) { @@ -214,9 +214,8 @@ static class RSGroupMappingScript { return; } - rsgroupMappingScript = new Shell.ShellCommandExecutor( - new String[] { script, "", "" }, null, null, - conf.getLong(RS_GROUP_MAPPING_SCRIPT_TIMEOUT, 5000) // 5 seconds + rsgroupMappingScript = new Shell.ShellCommandExecutor(new String[] { script, "", "" }, null, + null, conf.getLong(RS_GROUP_MAPPING_SCRIPT_TIMEOUT, 5000) // 5 seconds ); } @@ -231,14 +230,14 @@ String getRSGroup(String namespace, String tablename) { rsgroupMappingScript.execute(); } catch (IOException e) { // This exception may happen, like process doesn't have permission to run this script. - LOG.error("{}, placing {} back to default rsgroup", - e.getMessage(), + LOG.error("{}, placing {} back to default rsgroup", e.getMessage(), TableName.valueOf(namespace, tablename)); return RSGroupInfo.DEFAULT_GROUP; } return rsgroupMappingScript.getOutput().trim(); } } + private RSGroupMappingScript script; private RSGroupInfoManagerImpl(MasterServices masterServices) { @@ -255,7 +254,7 @@ private synchronized void updateDefaultServers() { RSGroupInfo oldDefaultGroupInfo = getRSGroup(RSGroupInfo.DEFAULT_GROUP); assert oldDefaultGroupInfo != null; RSGroupInfo newDefaultGroupInfo = - new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getDefaultServers()); + new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getDefaultServers()); newDefaultGroupInfo.addAllTables(oldDefaultGroupInfo.getTables()); newGroupMap.put(RSGroupInfo.DEFAULT_GROUP, newDefaultGroupInfo); // do not need to persist, as we do not persist default group. @@ -297,8 +296,8 @@ public void start() { public synchronized void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException { checkGroupName(rsGroupInfo.getName()); Map rsGroupMap = holder.groupName2Group; - if (rsGroupMap.get(rsGroupInfo.getName()) != null || - rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + if (rsGroupMap.get(rsGroupInfo.getName()) != null + || rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { throw new ConstraintException("Group already exists: " + rsGroupInfo.getName()); } Map newGroupMap = Maps.newHashMap(rsGroupMap); @@ -320,7 +319,7 @@ private RSGroupInfo getRSGroupInfo(final String groupName) throws ConstraintExce */ private Set

          getOnlineServers() { return masterServices.getServerManager().getOnlineServers().keySet().stream() - .map(ServerName::getAddress).collect(Collectors.toSet()); + .map(ServerName::getAddress).collect(Collectors.toSet()); } public synchronized Set
          moveServers(Set
          servers, String srcGroup, @@ -332,7 +331,7 @@ public synchronized Set
          moveServers(Set
          servers, String srcGro // it. If not 'default' group, add server to 'dst' rsgroup EVEN IF IT IS NOT online (could be a // rsgroup of dead servers that are to come back later). Set
          onlineServers = - dst.getName().equals(RSGroupInfo.DEFAULT_GROUP) ? getOnlineServers() : null; + dst.getName().equals(RSGroupInfo.DEFAULT_GROUP) ? getOnlineServers() : null; for (Address el : servers) { src.removeServer(el); if (onlineServers != null) { @@ -373,15 +372,15 @@ public synchronized void removeRSGroup(String groupName) throws IOException { RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName); int serverCount = rsGroupInfo.getServers().size(); if (serverCount > 0) { - throw new ConstraintException("RSGroup " + groupName + " has " + serverCount + - " servers; you must remove these servers from the RSGroup before" + - " the RSGroup can be removed."); + throw new ConstraintException("RSGroup " + groupName + " has " + serverCount + + " servers; you must remove these servers from the RSGroup before" + + " the RSGroup can be removed."); } for (TableDescriptor td : masterServices.getTableDescriptors().getAll().values()) { if (td.getRegionServerGroup().map(groupName::equals).orElse(false)) { - throw new ConstraintException("RSGroup " + groupName + " is already referenced by " + - td.getTableName() + "; you must remove all the tables from the RSGroup before " + - "the RSGroup can be removed."); + throw new ConstraintException("RSGroup " + groupName + " is already referenced by " + + td.getTableName() + "; you must remove all the tables from the RSGroup before " + + "the RSGroup can be removed."); } } for (NamespaceDescriptor ns : masterServices.getClusterSchema().getNamespaces()) { @@ -394,7 +393,7 @@ public synchronized void removeRSGroup(String groupName) throws IOException { Map rsGroupMap = holder.groupName2Group; if (!rsGroupMap.containsKey(groupName) || groupName.equals(RSGroupInfo.DEFAULT_GROUP)) { throw new ConstraintException( - "Group " + groupName + " does not exist or is a reserved " + "group"); + "Group " + groupName + " does not exist or is a reserved " + "group"); } Map newGroupMap = Maps.newHashMap(rsGroupMap); newGroupMap.remove(groupName); @@ -478,9 +477,8 @@ private List retrieveGroupListFromZookeeper() throws IOException { if (data != null && data.length > 0) { ProtobufUtil.expectPBMagicPrefix(data); ByteArrayInputStream bis = - new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length); - RSGroupInfoList - .add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis))); + new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length); + RSGroupInfoList.add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis))); } } LOG.debug("Read ZK GroupInfo count:" + RSGroupInfoList.size()); @@ -493,8 +491,7 @@ private List retrieveGroupListFromZookeeper() throws IOException { private void migrate(Collection groupList) { TableDescriptors tds = masterServices.getTableDescriptors(); - ProcedureExecutor procExec = - masterServices.getMasterProcedureExecutor(); + ProcedureExecutor procExec = masterServices.getMasterProcedureExecutor(); for (RSGroupInfo groupInfo : groupList) { if (groupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { continue; @@ -528,7 +525,7 @@ private void migrate(Collection groupList) { // master first and then region server, so after all the region servers has been reopened, // the new TableDescriptor will be loaded. MigrateRSGroupProcedure proc = - new MigrateRSGroupProcedure(procExec.getEnvironment(), tableName); + new MigrateRSGroupProcedure(procExec.getEnvironment(), tableName); procExec.submitProcedure(proc); procs.add(proc); } @@ -547,7 +544,7 @@ private void migrate(Collection groupList) { RSGroupInfo currentInfo = rsGroupMap.get(groupInfo.getName()); if (currentInfo != null) { RSGroupInfo newInfo = - new RSGroupInfo(currentInfo.getName(), currentInfo.getServers(), failedTables); + new RSGroupInfo(currentInfo.getName(), currentInfo.getServers(), failedTables); Map newGroupMap = new HashMap<>(rsGroupMap); newGroupMap.put(groupInfo.getName(), newInfo); try { @@ -659,8 +656,9 @@ private synchronized void flushConfig(Map newGroupMap) thro Map oldGroupMap = Maps.newHashMap(holder.groupName2Group); RSGroupInfo oldDefaultGroup = oldGroupMap.remove(RSGroupInfo.DEFAULT_GROUP); RSGroupInfo newDefaultGroup = newGroupMap.remove(RSGroupInfo.DEFAULT_GROUP); - if (!oldGroupMap.equals(newGroupMap) /* compare both tables and servers in other groups */ || - !oldDefaultGroup.getTables().equals(newDefaultGroup.getTables()) + if (!oldGroupMap.equals(newGroupMap) + /* compare both tables and servers in other groups */ || !oldDefaultGroup.getTables() + .equals(newDefaultGroup.getTables()) /* compare tables in default group */) { throw new IOException("Only servers in default group can be updated during offline mode"); } @@ -815,9 +813,9 @@ private boolean waitForGroupTableOnline() { private void createRSGroupTable() throws IOException { OptionalLong optProcId = masterServices.getProcedures().stream() - .filter(p -> p instanceof CreateTableProcedure).map(p -> (CreateTableProcedure) p) - .filter(p -> p.getTableName().equals(RSGROUP_TABLE_NAME)).mapToLong(Procedure::getProcId) - .findFirst(); + .filter(p -> p instanceof CreateTableProcedure).map(p -> (CreateTableProcedure) p) + .filter(p -> p.getTableName().equals(RSGROUP_TABLE_NAME)).mapToLong(Procedure::getProcId) + .findFirst(); long procId; if (optProcId.isPresent()) { procId = optProcId.getAsLong(); @@ -827,8 +825,8 @@ private void createRSGroupTable() throws IOException { } // wait for region to be online int tries = 600; - while (!(masterServices.getMasterProcedureExecutor().isFinished(procId)) && - masterServices.getMasterProcedureExecutor().isRunning() && tries > 0) { + while (!(masterServices.getMasterProcedureExecutor().isFinished(procId)) + && masterServices.getMasterProcedureExecutor().isRunning() && tries > 0) { try { Thread.sleep(100); } catch (InterruptedException e) { @@ -841,8 +839,8 @@ private void createRSGroupTable() throws IOException { } else { Procedure result = masterServices.getMasterProcedureExecutor().getResult(procId); if (result != null && result.isFailed()) { - throw new IOException("Failed to create group table. " + - MasterProcedureUtil.unwrapRemoteIOException(result)); + throw new IOException("Failed to create group table. " + + MasterProcedureUtil.unwrapRemoteIOException(result)); } } } @@ -890,7 +888,6 @@ public RSGroupInfo getRSGroupForTable(TableName tableName) throws IOException { return holder.tableName2Group.get(tableName); } - /** * Check if the set of servers are belong to dead servers list or online servers list. * @param servers servers to remove @@ -907,19 +904,18 @@ private void checkForDeadOrOnlineServers(Set
          servers) throws IOExceptio } Set
          deadServers = new HashSet<>(); - for(ServerName server: masterServices.getServerManager().getDeadServers().copyServerNames()) { + for (ServerName server : masterServices.getServerManager().getDeadServers().copyServerNames()) { deadServers.add(server.getAddress()); } - for (Address address: servers) { + for (Address address : servers) { if (onlineServers.contains(address)) { throw new DoNotRetryIOException( "Server " + address + " is an online server, not allowed to remove."); } if (deadServers.contains(address)) { - throw new DoNotRetryIOException( - "Server " + address + " is on the dead servers list," - + " Maybe it will come back again, not allowed to remove."); + throw new DoNotRetryIOException("Server " + address + " is on the dead servers list," + + " Maybe it will come back again, not allowed to remove."); } } } @@ -928,13 +924,13 @@ private void checkOnlineServersOnly(Set
          servers) throws IOException { // This uglyness is because we only have Address, not ServerName. // Online servers are keyed by ServerName. Set
          onlineServers = new HashSet<>(); - for(ServerName server: masterServices.getServerManager().getOnlineServers().keySet()) { + for (ServerName server : masterServices.getServerManager().getOnlineServers().keySet()) { onlineServers.add(server.getAddress()); } - for (Address address: servers) { + for (Address address : servers) { if (!onlineServers.contains(address)) { - throw new DoNotRetryIOException("Server " + address + - " is not an online server in 'default' RSGroup."); + throw new DoNotRetryIOException( + "Server " + address + " is not an online server in 'default' RSGroup."); } } } @@ -944,8 +940,8 @@ private void checkOnlineServersOnly(Set
          servers) throws IOException { */ private List getRegions(final Address server) { LinkedList regions = new LinkedList<>(); - for (Map.Entry el : - masterServices.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + for (Map.Entry el : masterServices.getAssignmentManager() + .getRegionStates().getRegionAssignments().entrySet()) { if (el.getValue() == null) { continue; } @@ -955,8 +951,8 @@ private List getRegions(final Address server) { } } for (RegionStateNode state : masterServices.getAssignmentManager().getRegionsInTransition()) { - if (state.getRegionLocation() != null && - state.getRegionLocation().getAddress().equals(server)) { + if (state.getRegionLocation() != null + && state.getRegionLocation().getAddress().equals(server)) { addRegion(regions, state.getRegionInfo()); } } @@ -977,31 +973,30 @@ private void addRegion(final LinkedList regions, RegionInfo hri) { /** * Move every region from servers which are currently located on these servers, but should not be * located there. - * @param movedServers the servers that are moved to new group + * @param movedServers the servers that are moved to new group * @param srcGrpServers all servers in the source group, excluding the movedServers * @param targetGroupName the target group * @param sourceGroupName the source group * @throws IOException if moving the server and tables fail */ private void moveServerRegionsFromGroup(Set
          movedServers, Set
          srcGrpServers, - String targetGroupName, String sourceGroupName) throws IOException { + String targetGroupName, String sourceGroupName) throws IOException { moveRegionsBetweenGroups(movedServers, srcGrpServers, targetGroupName, sourceGroupName, - rs -> getRegions(rs), info -> { + rs -> getRegions(rs), info -> { try { String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable()) - .map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP); + .map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP); return groupName.equals(targetGroupName); } catch (IOException e) { - LOG.warn("Failed to test group for region {} and target group {}", info, - targetGroupName); + LOG.warn("Failed to test group for region {} and target group {}", info, targetGroupName); return false; } }); } private void moveRegionsBetweenGroups(Set regionsOwners, Set
          newRegionsOwners, - String targetGroupName, String sourceGroupName, Function> getRegionsInfo, - Function validation) throws IOException { + String targetGroupName, String sourceGroupName, Function> getRegionsInfo, + Function validation) throws IOException { // Get server names corresponding to given Addresses List movedServerNames = new ArrayList<>(regionsOwners.size()); List srcGrpServerNames = new ArrayList<>(newRegionsOwners.size()); @@ -1030,7 +1025,7 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
          new region.getShortNameToLog(), targetGroupName); // Move region back to source RSGroup servers ServerName dest = - masterServices.getLoadBalancer().randomAssignment(region, srcGrpServerNames); + masterServices.getLoadBalancer().randomAssignment(region, srcGrpServerNames); if (dest == null) { failedRegions.add(region.getRegionNameAsString()); continue; @@ -1062,37 +1057,37 @@ private void moveRegionsBetweenGroups(Set regionsOwners, Set
          new retry++; } } while (!failedRegions.isEmpty() && retry <= masterServices.getConfiguration() - .getInt(FAILED_MOVE_MAX_RETRY, DEFAULT_MAX_RETRY_VALUE)); + .getInt(FAILED_MOVE_MAX_RETRY, DEFAULT_MAX_RETRY_VALUE)); - //has up to max retry time or there are no more regions to move + // has up to max retry time or there are no more regions to move if (!failedRegions.isEmpty()) { // print failed moved regions, for later process conveniently - String msg = String - .format("move regions for group %s failed, failed regions: %s", sourceGroupName, - failedRegions); + String msg = String.format("move regions for group %s failed, failed regions: %s", + sourceGroupName, failedRegions); LOG.error(msg); throw new DoNotRetryIOException( - msg + ", just record the last failed region's cause, more details in server log", toThrow); + msg + ", just record the last failed region's cause, more details in server log", + toThrow); } } /** - * Wait for all the region move to complete. Keep waiting for other region movement - * completion even if some region movement fails. + * Wait for all the region move to complete. Keep waiting for other region movement completion + * even if some region movement fails. */ private void waitForRegionMovement(List>> regionMoveFutures, - Set failedRegions, String sourceGroupName, int retryCount) { + Set failedRegions, String sourceGroupName, int retryCount) { LOG.info("Moving {} region(s) to group {}, current retry={}", regionMoveFutures.size(), sourceGroupName, retryCount); for (Pair> pair : regionMoveFutures) { try { pair.getSecond().get(); - if (masterServices.getAssignmentManager().getRegionStates(). - getRegionState(pair.getFirst()).isFailedOpen()) { + if (masterServices.getAssignmentManager().getRegionStates().getRegionState(pair.getFirst()) + .isFailedOpen()) { failedRegions.add(pair.getFirst().getRegionNameAsString()); } } catch (InterruptedException e) { - //Dont return form there lets wait for other regions to complete movement. + // Dont return form there lets wait for other regions to complete movement. failedRegions.add(pair.getFirst().getRegionNameAsString()); LOG.warn("Sleep interrupted", e); } catch (Exception e) { @@ -1108,8 +1103,7 @@ private boolean isTableInGroup(TableName tableName, String groupName, if (tablesInGroupCache.contains(tableName)) { return true; } - if (RSGroupUtil.getRSGroupInfo(masterServices, this, tableName) - .map(RSGroupInfo::getName) + if (RSGroupUtil.getRSGroupInfo(masterServices, this, tableName).map(RSGroupInfo::getName) .orElse(RSGroupInfo.DEFAULT_GROUP).equals(groupName)) { tablesInGroupCache.add(tableName); return true; @@ -1121,8 +1115,8 @@ private Map rsGroupGetRegionsInTransition(String groupName) throws IOException { Map rit = Maps.newTreeMap(); Set tablesInGroupCache = new HashSet<>(); - for (RegionStateNode regionNode : - masterServices.getAssignmentManager().getRegionsInTransition()) { + for (RegionStateNode regionNode : masterServices.getAssignmentManager() + .getRegionsInTransition()) { TableName tn = regionNode.getTable(); if (isTableInGroup(tn, groupName, tablesInGroupCache)) { rit.put(regionNode.getRegionInfo().getEncodedName(), regionNode.toRegionState()); @@ -1139,24 +1133,24 @@ private Map rsGroupGetRegionsInTransition(String groupName) * @return A clone of current assignments for this group. */ Map>> getRSGroupAssignmentsByTable( - TableStateManager tableStateManager, String groupName) throws IOException { + TableStateManager tableStateManager, String groupName) throws IOException { Map>> result = Maps.newHashMap(); Set tablesInGroupCache = new HashSet<>(); for (Map.Entry entry : masterServices.getAssignmentManager() - .getRegionStates().getRegionAssignments().entrySet()) { + .getRegionStates().getRegionAssignments().entrySet()) { RegionInfo region = entry.getKey(); TableName tn = region.getTable(); ServerName server = entry.getValue(); if (isTableInGroup(tn, groupName, tablesInGroupCache)) { - if (tableStateManager - .isTableState(tn, TableState.State.DISABLED, TableState.State.DISABLING)) { + if (tableStateManager.isTableState(tn, TableState.State.DISABLED, + TableState.State.DISABLING)) { continue; } if (region.isSplitParent()) { continue; } result.computeIfAbsent(tn, k -> new HashMap<>()) - .computeIfAbsent(server, k -> new ArrayList<>()).add(region); + .computeIfAbsent(server, k -> new ArrayList<>()).add(region); } } RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName); @@ -1171,7 +1165,8 @@ Map>> getRSGroupAssignmentsByTable( } @Override - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { ServerManager serverManager = masterServices.getServerManager(); LoadBalancer balancer = masterServices.getLoadBalancer(); getRSGroupInfo(groupName); @@ -1188,15 +1183,15 @@ public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) Map groupRIT = rsGroupGetRegionsInTransition(groupName); if (groupRIT.size() > 0 && !request.isIgnoreRegionsInTransition()) { LOG.debug("Not running balancer because {} region(s) in transition: {}", groupRIT.size(), - StringUtils.abbreviate(masterServices.getAssignmentManager().getRegionStates() - .getRegionsInTransition().toString(), - 256)); + StringUtils.abbreviate(masterServices.getAssignmentManager().getRegionStates() + .getRegionsInTransition().toString(), + 256)); return responseBuilder.build(); } if (serverManager.areDeadServersInProgress()) { LOG.debug("Not running balancer because processing dead regionserver(s): {}", - serverManager.getDeadServers()); + serverManager.getDeadServers()); return responseBuilder.build(); } @@ -1229,8 +1224,8 @@ private void moveTablesAndWait(Set tables, String targetGroup) throws } TableDescriptor newTd = TableDescriptorBuilder.newBuilder(oldTd).setRegionServerGroup(targetGroup).build(); - procIds.add(masterServices.modifyTable(tableName, newTd, HConstants.NO_NONCE, - HConstants.NO_NONCE)); + procIds.add( + masterServices.modifyTable(tableName, newTd, HConstants.NO_NONCE, HConstants.NO_NONCE)); } for (long procId : procIds) { Procedure proc = masterServices.getMasterProcedureExecutor().getProcedure(procId); @@ -1238,7 +1233,7 @@ private void moveTablesAndWait(Set tables, String targetGroup) throws continue; } ProcedureSyncWait.waitForProcedureToCompleteIOE(masterServices.getMasterProcedureExecutor(), - proc, Long.MAX_VALUE); + proc, Long.MAX_VALUE); } LOG.info("Move tables done: moved {} tables to {}", tables.size(), targetGroup); if (LOG.isDebugEnabled()) { @@ -1273,8 +1268,8 @@ public void moveServers(Set
          servers, String targetGroupName) throws IOE RSGroupInfo srcGrp = getRSGroupOfServer(firstServer); if (srcGrp == null) { // Be careful. This exception message is tested for in TestRSGroupAdmin2... - throw new ConstraintException("Server " + firstServer - + " is either offline or it does not exist."); + throw new ConstraintException( + "Server " + firstServer + " is either offline or it does not exist."); } // Only move online servers (when moving from 'default') or servers from other @@ -1286,11 +1281,11 @@ public void moveServers(Set
          servers, String targetGroupName) throws IOE checkOnlineServersOnly(servers); } // Ensure all servers are of same rsgroup. - for (Address server: servers) { + for (Address server : servers) { String tmpGroup = getRSGroupOfServer(server).getName(); if (!tmpGroup.equals(srcGrp.getName())) { - throw new ConstraintException("Move server request should only come from one source " + - "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); + throw new ConstraintException("Move server request should only come from one source " + + "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup); } } if (srcGrp.getServers().size() <= servers.size()) { @@ -1299,16 +1294,16 @@ public void moveServers(Set
          servers, String targetGroupName) throws IOE Optional optGroupName = td.getRegionServerGroup(); if (optGroupName.isPresent() && optGroupName.get().equals(srcGrp.getName())) { throw new ConstraintException( - "Cannot leave a RSGroup " + srcGrp.getName() + " that contains tables('" + - td.getTableName() + "' at least) without servers to host them."); + "Cannot leave a RSGroup " + srcGrp.getName() + " that contains tables('" + + td.getTableName() + "' at least) without servers to host them."); } } } // MovedServers may be < passed in 'servers'. - Set
          movedServers = moveServers(servers, srcGrp.getName(), - targetGroupName); - moveServerRegionsFromGroup(movedServers, srcGrp.getServers(), targetGroupName, srcGrp.getName()); + Set
          movedServers = moveServers(servers, srcGrp.getName(), targetGroupName); + moveServerRegionsFromGroup(movedServers, srcGrp.getServers(), targetGroupName, + srcGrp.getName()); LOG.info("Move servers done: moved {} servers from {} to {}", movedServers.size(), srcGrp.getName(), targetGroupName); if (LOG.isDebugEnabled()) { @@ -1329,7 +1324,7 @@ public synchronized void renameRSGroup(String oldName, String newName) throws IO throw new ConstraintException(RSGroupInfo.DEFAULT_GROUP + " can't be rename"); } checkGroupName(newName); - //getRSGroupInfo validates old RSGroup existence. + // getRSGroupInfo validates old RSGroup existence. RSGroupInfo oldRSG = getRSGroupInfo(oldName); Map rsGroupMap = holder.groupName2Group; if (rsGroupMap.containsKey(newName)) { @@ -1341,12 +1336,9 @@ public synchronized void renameRSGroup(String oldName, String newName) throws IO RSGroupInfo newRSG = new RSGroupInfo(newName, oldRSG.getServers()); newGroupMap.put(newName, newRSG); flushConfig(newGroupMap); - Set updateTables = - masterServices.getTableDescriptors().getAll().values() - .stream() - .filter(t -> oldName.equals(t.getRegionServerGroup().orElse(null))) - .map(TableDescriptor::getTableName) - .collect(Collectors.toSet()); + Set updateTables = masterServices.getTableDescriptors().getAll().values().stream() + .filter(t -> oldName.equals(t.getRegionServerGroup().orElse(null))) + .map(TableDescriptor::getTableName).collect(Collectors.toSet()); setRSGroup(updateTables, newName); LOG.info("Rename RSGroup done: {} => {}", oldName, newName); } @@ -1357,8 +1349,8 @@ public synchronized void updateRSGroupConfig(String groupName, Map rsGroupInfo.removeConfiguration(k)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java index aec38ee49052..d4288ac28a79 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupMajorCompactionTTL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rsgroup; import java.util.Arrays; @@ -54,9 +53,10 @@ public class RSGroupMajorCompactionTTL extends MajorCompactorTTL { } public int compactTTLRegionsOnGroup(Configuration conf, String rsgroup, int concurrency, - long sleep, int numServers, int numRegions, boolean dryRun, boolean skipWait) throws Exception { + long sleep, int numServers, int numRegions, boolean dryRun, boolean skipWait) + throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()) { + Admin admin = conn.getAdmin()) { if (admin.getRSGroup(rsgroup) == null) { LOG.error("Invalid rsgroup specified: " + rsgroup); throw new IllegalArgumentException("Invalid rsgroup specified: " + rsgroup); @@ -77,7 +77,7 @@ protected Options getOptions() { Options options = getCommonOptions(); options.addOption(Option.builder("rsgroup").required().desc("Tables of rsgroup to be compacted") - .hasArg().build()); + .hasArg().build()); return options; } @@ -91,8 +91,8 @@ public int run(String[] args) throws Exception { try { commandLine = cmdLineParser.parse(options, args); } catch (ParseException parseException) { - System.out.println("ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + - " due to: " + parseException); + System.out.println("ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + + " due to: " + parseException); printUsage(options); return -1; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java index 08c545327a3e..742152bd1d12 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupUtil.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.rsgroup; @@ -48,13 +55,13 @@ public static void enableRSGroup(Configuration conf) { } public static List listTablesInRSGroup(MasterServices master, String groupName) - throws IOException { + throws IOException { List tables = new ArrayList<>(); boolean isDefaultGroup = RSGroupInfo.DEFAULT_GROUP.equals(groupName); for (TableDescriptor td : master.getTableDescriptors().getAll().values()) { // no config means in default group if (RSGroupUtil.getRSGroupInfo(master, master.getRSGroupInfoManager(), td.getTableName()) - .map(g -> g.getName().equals(groupName)).orElse(isDefaultGroup)) { + .map(g -> g.getName().equals(groupName)).orElse(isDefaultGroup)) { tables.add(td.getTableName()); } } @@ -98,8 +105,8 @@ public static Optional getRSGroupInfo(MasterServices master, ClusterSchema clusterSchema = master.getClusterSchema(); if (clusterSchema == null) { if (TableName.isMetaTableName(tableName)) { - LOG.info("Can not get the namespace rs group config for meta table, since the" + - " meta table is not online yet, will use default group to assign meta first"); + LOG.info("Can not get the namespace rs group config for meta table, since the" + + " meta table is not online yet, will use default group to assign meta first"); } else { LOG.warn("ClusterSchema is null, can only use default rsgroup, should not happen?"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java index 8fbe6ac418dd..d50dccc0ff47 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java @@ -32,22 +32,20 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos; /** - * Implementation of secure Hadoop policy provider for mapping - * protocol interfaces to hbase-policy.xml entries. + * Implementation of secure Hadoop policy provider for mapping protocol interfaces to + * hbase-policy.xml entries. */ @InterfaceAudience.Private public class HBasePolicyProvider extends PolicyProvider { protected final static Service[] services = { - new Service("security.client.protocol.acl", ClientService.BlockingInterface.class), - new Service("security.client.protocol.acl", AdminService.BlockingInterface.class), - new Service("security.client.protocol.acl", - MasterProtos.HbckService.BlockingInterface.class), - new Service("security.client.protocol.acl", - RegistryProtos.ClientMetaService.BlockingInterface.class), - new Service("security.admin.protocol.acl", MasterService.BlockingInterface.class), - new Service("security.masterregion.protocol.acl", - RegionServerStatusService.BlockingInterface.class) - }; + new Service("security.client.protocol.acl", ClientService.BlockingInterface.class), + new Service("security.client.protocol.acl", AdminService.BlockingInterface.class), + new Service("security.client.protocol.acl", MasterProtos.HbckService.BlockingInterface.class), + new Service("security.client.protocol.acl", + RegistryProtos.ClientMetaService.BlockingInterface.class), + new Service("security.admin.protocol.acl", MasterService.BlockingInterface.class), + new Service("security.masterregion.protocol.acl", + RegionServerStatusService.BlockingInterface.class) }; @Override public Service[] getServices() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java index 3074fcea1dc3..0bdb5d12a878 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java @@ -43,7 +43,7 @@ public class HBaseSaslRpcServer { public HBaseSaslRpcServer(SaslServerAuthenticationProvider provider, Map saslProps, SecretManager secretManager) - throws IOException { + throws IOException { serverWithProvider = provider.createServer(secretManager, saslProps); saslServer = serverWithProvider.getServer(); } @@ -62,9 +62,7 @@ public void dispose() { } public String getAttemptingUser() { - return serverWithProvider.getAttemptingUser() - .map(Object::toString) - .orElse("Unknown"); + return serverWithProvider.getAttemptingUser().map(Object::toString).orElse("Unknown"); } public byte[] wrap(byte[] buf, int off, int len) throws SaslException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java index 2bf351b63259..5f9433a3f141 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java index be968e530916..0288fdd9e839 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.security.access; @@ -71,7 +70,6 @@ public static boolean isAuthorizationSupported(Configuration conf) { /** * Constructor with existing configuration - * * @param conf Existing configuration to use */ public AccessChecker(final Configuration conf) { @@ -85,27 +83,26 @@ public AuthManager getAuthManager() { /** * Authorizes that the current user has any of the given permissions to access the table. - * * @param user Active user to which authorization checks should be applied * @param request Request type. - * @param tableName Table requested + * @param tableName Table requested * @param permissions Actions being requested * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ - public void requireAccess(User user, String request, TableName tableName, - Action... permissions) throws IOException { + public void requireAccess(User user, String request, TableName tableName, Action... permissions) + throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.accessUserTable(user, tableName, permission)) { - result = AuthResult.allow(request, "Table permission granted", - user, permission, tableName, null, null); + result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, + null, null); break; } else { // rest of the world - result = AuthResult.deny(request, "Insufficient permissions", - user, permission, tableName, null, null); + result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, + null, null); } } logResult(result); @@ -129,19 +126,16 @@ public void requirePermission(User user, String request, String filterUser, Acti } /** - * Checks that the user has the given global permission. The generated - * audit log message will contain context information for the operation - * being authorized, based on the given parameters. - * + * Checks that the user has the given global permission. The generated audit log message will + * contain context information for the operation being authorized, based on the given parameters. * @param user Active user to which authorization checks should be applied * @param request Request type - * @param perm Action being requested + * @param perm Action being requested * @param tableName Affected table name. * @param familyMap Affected column families. * @param filterUser User name to be filtered from permission as requested */ - public void requireGlobalPermission(User user, String request, - Action perm, TableName tableName, + public void requireGlobalPermission(User user, String request, Action perm, TableName tableName, Map> familyMap, String filterUser) throws IOException { AuthResult result; if (authManager.authorizeUserGlobal(user, perm)) { @@ -160,17 +154,15 @@ public void requireGlobalPermission(User user, String request, } /** - * Checks that the user has the given global permission. The generated - * audit log message will contain context information for the operation - * being authorized, based on the given parameters. - * + * Checks that the user has the given global permission. The generated audit log message will + * contain context information for the operation being authorized, based on the given parameters. * @param user Active user to which authorization checks should be applied * @param request Request type - * @param perm Action being requested + * @param perm Action being requested * @param namespace The given namespace */ - public void requireGlobalPermission(User user, String request, Action perm, - String namespace) throws IOException { + public void requireGlobalPermission(User user, String request, Action perm, String namespace) + throws IOException { AuthResult authResult; if (authManager.authorizeUserGlobal(user, perm)) { authResult = AuthResult.allow(request, "Global check allowed", user, perm, null); @@ -217,12 +209,11 @@ public void requireNamespacePermission(User user, String request, String namespa /** * Checks that the user has the given global or namespace permission. - * * @param user Active user to which authorization checks should be applied * @param request Request type - * @param namespace The given namespace + * @param namespace The given namespace * @param tableName Table requested - * @param familyMap Column family map requested + * @param familyMap Column family map requested * @param permissions Actions being requested */ public void requireNamespacePermission(User user, String request, String namespace, @@ -249,13 +240,12 @@ public void requireNamespacePermission(User user, String request, String namespa } /** - * Authorizes that the current user has any of the given permissions for the - * given table, column family and column qualifier. - * + * Authorizes that the current user has any of the given permissions for the given table, column + * family and column qualifier. * @param user Active user to which authorization checks should be applied * @param request Request type * @param tableName Table requested - * @param family Column family requested + * @param family Column family requested * @param qualifier Column qualifier requested * @param filterUser User name to be filtered from permission as requested * @param permissions Actions being requested @@ -268,13 +258,13 @@ public void requirePermission(User user, String request, TableName tableName, by for (Action permission : permissions) { if (authManager.authorizeUserTable(user, tableName, family, qualifier, permission)) { - result = AuthResult.allow(request, "Table permission granted", - user, permission, tableName, family, qualifier); + result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, + family, qualifier); break; } else { // rest of the world - result = AuthResult.deny(request, "Insufficient permissions", - user, permission, tableName, family, qualifier); + result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, + family, qualifier); } } result.getParams().addExtraParam("filterUser", filterUser); @@ -285,32 +275,30 @@ public void requirePermission(User user, String request, TableName tableName, by } /** - * Authorizes that the current user has any of the given permissions for the - * given table, column family and column qualifier. - * + * Authorizes that the current user has any of the given permissions for the given table, column + * family and column qualifier. * @param user Active user to which authorization checks should be applied * @param request Request type * @param tableName Table requested - * @param family Column family param + * @param family Column family param * @param qualifier Column qualifier param - * @throws IOException if obtaining the current user fails + * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ - public void requireTablePermission(User user, String request, - TableName tableName,byte[] family, byte[] qualifier, - Action... permissions) throws IOException { + public void requireTablePermission(User user, String request, TableName tableName, byte[] family, + byte[] qualifier, Action... permissions) throws IOException { AuthResult result = null; for (Action permission : permissions) { if (authManager.authorizeUserTable(user, tableName, permission)) { - result = AuthResult.allow(request, "Table permission granted", - user, permission, tableName, null, null); + result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, + null, null); result.getParams().setFamily(family).setQualifier(qualifier); break; } else { // rest of the world - result = AuthResult.deny(request, "Insufficient permissions", - user, permission, tableName, family, qualifier); + result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, + family, qualifier); result.getParams().setFamily(family).setQualifier(qualifier); } } @@ -338,28 +326,23 @@ public void performOnSuperuser(String request, User caller, String userToBeCheck } for (String name : userGroups) { if (Superusers.isSuperUser(name)) { - AuthResult result = AuthResult.deny( - request, - "Granting or revoking superusers's or supergroups's permissions is not allowed", - caller, - Action.ADMIN, - NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); + AuthResult result = AuthResult.deny(request, + "Granting or revoking superusers's or supergroups's permissions is not allowed", caller, + Action.ADMIN, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); logResult(result); throw new AccessDeniedException(result.getReason()); } } } - public void checkLockPermissions(User user, String namespace, - TableName tableName, RegionInfo[] regionInfos, String reason) - throws IOException { + public void checkLockPermissions(User user, String namespace, TableName tableName, + RegionInfo[] regionInfos, String reason) throws IOException { if (namespace != null && !namespace.isEmpty()) { requireNamespacePermission(user, reason, namespace, null, Action.ADMIN, Action.CREATE); } else if (tableName != null || (regionInfos != null && regionInfos.length > 0)) { // So, either a table or regions op. If latter, check perms ons table. - TableName tn = tableName != null? tableName: regionInfos[0].getTable(); - requireTablePermission(user, reason, tn, null, null, - Action.ADMIN, Action.CREATE); + TableName tn = tableName != null ? tableName : regionInfos[0].getTable(); + requireTablePermission(user, reason, tn, null, null, Action.ADMIN, Action.CREATE); } else { throw new DoNotRetryIOException("Invalid lock level when requesting permissions."); } @@ -370,13 +353,12 @@ public static void logResult(AuthResult result) { User user = result.getUser(); UserGroupInformation ugi = user != null ? user.getUGI() : null; AUDITLOG.trace( - "Access {} for user {}; reason: {}; remote address: {}; request: {}; context: {};" + - "auth method: {}", + "Access {} for user {}; reason: {}; remote address: {}; request: {}; context: {};" + + "auth method: {}", (result.isAllowed() ? "allowed" : "denied"), - (user != null ? user.getShortName() : "UNKNOWN"), - result.getReason(), RpcServer.getRemoteAddress().map(InetAddress::toString).orElse(""), - result.getRequest(), result.toContextString(), - ugi != null ? ugi.getAuthenticationMethod() : "UNKNOWN"); + (user != null ? user.getShortName() : "UNKNOWN"), result.getReason(), + RpcServer.getRemoteAddress().map(InetAddress::toString).orElse(""), result.getRequest(), + result.toContextString(), ugi != null ? ugi.getAuthenticationMethod() : "UNKNOWN"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java index 79233df751eb..41c5e8e7a85e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.IOException; import java.util.Map; import java.util.Objects; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -32,20 +29,19 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.SimpleMutableByteRange; +import org.apache.yetus.audience.InterfaceAudience; /** * NOTE: for internal use only by AccessController implementation - * *

          - * TODO: There is room for further performance optimization here. - * Calling AuthManager.authorize() per KeyValue imposes a fair amount of - * overhead. A more optimized solution might look at the qualifiers where - * permissions are actually granted and explicitly limit the scan to those. + * TODO: There is room for further performance optimization here. Calling AuthManager.authorize() + * per KeyValue imposes a fair amount of overhead. A more optimized solution might look at the + * qualifiers where permissions are actually granted and explicitly limit the scan to those. *

          *

          - * We should aim to use this _only_ when access to the requested column families - * is not granted at the column family levels. If table or column family - * access succeeds, then there is no need to impose the overhead of this filter. + * We should aim to use this _only_ when access to the requested column families is not granted at + * the column family levels. If table or column family access succeeds, then there is no need to + * impose the overhead of this filter. *

          */ @InterfaceAudience.Private @@ -75,8 +71,8 @@ public static enum Strategy { AccessControlFilter() { } - AccessControlFilter(AuthManager mgr, User ugi, TableName tableName, - Strategy strategy, Map cfVsMaxVersions) { + AccessControlFilter(AuthManager mgr, User ugi, TableName tableName, Strategy strategy, + Map cfVsMaxVersions) { authManager = mgr; table = tableName; user = ugi; @@ -98,20 +94,17 @@ public ReturnCode filterCell(final Cell cell) { if (isSystemTable) { return ReturnCode.INCLUDE; } - if (prevFam.getBytes() == null - || !(PrivateCellUtil.matchingFamily(cell, prevFam.getBytes(), prevFam.getOffset(), - prevFam.getLength()))) { + if (prevFam.getBytes() == null || !(PrivateCellUtil.matchingFamily(cell, prevFam.getBytes(), + prevFam.getOffset(), prevFam.getLength()))) { prevFam.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); // Similar to VisibilityLabelFilter familyMaxVersions = cfVsMaxVersions.get(prevFam); // Family is changed. Just unset curQualifier. prevQual.unset(); } - if (prevQual.getBytes() == null - || !(PrivateCellUtil.matchingQualifier(cell, prevQual.getBytes(), prevQual.getOffset(), - prevQual.getLength()))) { - prevQual.set(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); + if (prevQual.getBytes() == null || !(PrivateCellUtil.matchingQualifier(cell, + prevQual.getBytes(), prevQual.getOffset(), prevQual.getLength()))) { + prevQual.set(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); currentVersions = 0; } currentVersions++; @@ -128,15 +121,15 @@ public ReturnCode filterCell(final Cell cell) { return ReturnCode.INCLUDE; } } - break; + break; // Cell permissions can override table or CF permissions case CHECK_CELL_DEFAULT: { - if (authManager.authorizeUserTable(user, table, f, q, Permission.Action.READ) || - authManager.authorizeCell(user, table, cell, Permission.Action.READ)) { + if (authManager.authorizeUserTable(user, table, f, q, Permission.Action.READ) + || authManager.authorizeCell(user, table, cell, Permission.Action.READ)) { return ReturnCode.INCLUDE; } } - break; + break; default: throw new RuntimeException("Unhandled strategy " + strategy); } @@ -156,10 +149,10 @@ public void reset() throws IOException { * @return The filter serialized using pb */ @Override - public byte [] toByteArray() { + public byte[] toByteArray() { // no implementation, server-side use only throw new UnsupportedOperationException( - "Serialization not supported. Intended for server-side use only."); + "Serialization not supported. Intended for server-side use only."); } /** @@ -168,11 +161,11 @@ public void reset() throws IOException { * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray() */ - public static AccessControlFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static AccessControlFilter parseFrom(final byte[] pbBytes) + throws DeserializationException { // no implementation, server-side use only throw new UnsupportedOperationException( - "Serialization not supported. Intended for server-side use only."); + "Serialization not supported. Intended for server-side use only."); } @Override @@ -180,15 +173,13 @@ public boolean equals(Object obj) { if (!(obj instanceof AccessControlFilter)) { return false; } - if (this == obj){ + if (this == obj) { return true; } - AccessControlFilter f=(AccessControlFilter)obj; - return this.authManager.equals(f.authManager) && - this.table.equals(f.table) && - this.user.equals(f.user) && - this.strategy.equals(f.strategy) && - this.cfVsMaxVersions.equals(f.cfVsMaxVersions); + AccessControlFilter f = (AccessControlFilter) obj; + return this.authManager.equals(f.authManager) && this.table.equals(f.table) + && this.user.equals(f.user) && this.strategy.equals(f.strategy) + && this.cfVsMaxVersions.equals(f.cfVsMaxVersions); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 1594e1306b09..198881713194 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.security.access; @@ -146,49 +145,41 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasPermissionResponse; /** - * Provides basic authorization checks for data access and administrative - * operations. - * + * Provides basic authorization checks for data access and administrative operations. *

          - * {@code AccessController} performs authorization checks for HBase operations - * based on: + * {@code AccessController} performs authorization checks for HBase operations based on: *

          *
            - *
          • the identity of the user performing the operation
          • - *
          • the scope over which the operation is performed, in increasing - * specificity: global, table, column family, or qualifier
          • - *
          • the type of action being performed (as mapped to - * {@link Permission.Action} values)
          • + *
          • the identity of the user performing the operation
          • + *
          • the scope over which the operation is performed, in increasing specificity: global, table, + * column family, or qualifier
          • + *
          • the type of action being performed (as mapped to {@link Permission.Action} values)
          • *
          *

          - * If the authorization check fails, an {@link AccessDeniedException} - * will be thrown for the operation. + * If the authorization check fails, an {@link AccessDeniedException} will be thrown for the + * operation. *

          - * *

          - * To perform authorization checks, {@code AccessController} relies on the - * RpcServerEngine being loaded to provide - * the user identities for remote requests. + * To perform authorization checks, {@code AccessController} relies on the RpcServerEngine being + * loaded to provide the user identities for remote requests. *

          - * *

          - * The access control lists used for authorization can be manipulated via the - * exposed {@link AccessControlService} Interface implementation, and the associated - * {@code grant}, {@code revoke}, and {@code user_permission} HBase shell - * commands. + * The access control lists used for authorization can be manipulated via the exposed + * {@link AccessControlService} Interface implementation, and the associated {@code grant}, + * {@code revoke}, and {@code user_permission} HBase shell commands. *

          */ @CoreCoprocessor @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class AccessController implements MasterCoprocessor, RegionCoprocessor, - RegionServerCoprocessor, AccessControlService.Interface, - MasterObserver, RegionObserver, RegionServerObserver, EndpointObserver, BulkLoadObserver { + RegionServerCoprocessor, AccessControlService.Interface, MasterObserver, RegionObserver, + RegionServerObserver, EndpointObserver, BulkLoadObserver { // TODO: encapsulate observer functions into separate class/sub-class. private static final Logger LOG = LoggerFactory.getLogger(AccessController.class); private static final Logger AUDITLOG = - LoggerFactory.getLogger("SecurityLogger."+AccessController.class.getName()); + LoggerFactory.getLogger("SecurityLogger." + AccessController.class.getName()); private static final String CHECK_COVERING_PERM = "check_covering_perm"; private static final String TAG_CHECK_PASSED = "tag_check_passed"; private static final byte[] TRUE = Bytes.toBytes(true); @@ -199,21 +190,23 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, /** flags if we are running on a region of the _acl_ table */ private boolean aclRegion = false; - /** defined only for Endpoint implementation, so it can have way to - access region services */ + /** + * defined only for Endpoint implementation, so it can have way to access region services + */ private RegionCoprocessorEnvironment regionEnv; /** Mapping of scanner instances to the user who created them */ - private Map scannerOwners = - new MapMaker().weakKeys().makeMap(); + private Map scannerOwners = new MapMaker().weakKeys().makeMap(); private Map> tableAcls; /** Provider for mapping principal names to Users */ private UserProvider userProvider; - /** if we are active, usually false, only true if "hbase.security.authorization" - has been set to true in site configuration */ + /** + * if we are active, usually false, only true if "hbase.security.authorization" has been set to + * true in site configuration + */ private boolean authorizationEnabled; /** if we are able to support cell ACLs */ @@ -222,8 +215,10 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, /** if we should check EXEC permissions */ private boolean shouldCheckExecPermission; - /** if we should terminate access checks early as soon as table or CF grants - allow access; pre-0.98 compatible behavior */ + /** + * if we should terminate access checks early as soon as table or CF grants allow access; pre-0.98 + * compatible behavior + */ private boolean compatibleEarlyTermination; /** if we have been successfully initialized */ @@ -233,8 +228,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, private volatile boolean aclTabAvailable = false; public static boolean isCellAuthorizationSupported(Configuration conf) { - return AccessChecker.isAuthorizationSupported(conf) && - (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS); + return AccessChecker.isAuthorizationSupported(conf) + && (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS); } public Region getRegion() { @@ -251,8 +246,7 @@ private void initialize(RegionCoprocessorEnvironment e) throws IOException { Map> tables = PermissionStorage.loadAll(region); // For each table, write out the table's permissions to the respective // znode for that table. - for (Map.Entry> t: - tables.entrySet()) { + for (Map.Entry> t : tables.entrySet()) { byte[] entry = t.getKey(); ListMultimap perms = t.getValue(); byte[] serialized = PermissionStorage.writePermissionsAsBytes(perms, conf); @@ -262,27 +256,25 @@ private void initialize(RegionCoprocessorEnvironment e) throws IOException { } /** - * Writes all table ACLs for the tables in the given Map up into ZooKeeper - * znodes. This is called to synchronize ACL changes following {@code _acl_} - * table updates. + * Writes all table ACLs for the tables in the given Map up into ZooKeeper znodes. This is called + * to synchronize ACL changes following {@code _acl_} table updates. */ - private void updateACL(RegionCoprocessorEnvironment e, - final Map> familyMap) { + private void updateACL(RegionCoprocessorEnvironment e, final Map> familyMap) { Set entries = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR); for (Map.Entry> f : familyMap.entrySet()) { List cells = f.getValue(); - for (Cell cell: cells) { + for (Cell cell : cells) { if (CellUtil.matchingFamily(cell, PermissionStorage.ACL_LIST_FAMILY)) { entries.add(CellUtil.cloneRow(cell)); } } } Configuration conf = regionEnv.getConfiguration(); - byte [] currentEntry = null; + byte[] currentEntry = null; // TODO: Here we are already on the ACL region. (And it is single // region) We can even just get the region from the env and do get // directly. The short circuit connection would avoid the RPC overhead - // so no socket communication, req write/read .. But we have the PB + // so no socket communication, req write/read .. But we have the PB // to and fro conversion overhead. get req is converted to PB req // and results are converted to PB results 1st and then to POJOs // again. We could have avoided such at least in ACL table context.. @@ -294,27 +286,27 @@ private void updateACL(RegionCoprocessorEnvironment e, byte[] serialized = PermissionStorage.writePermissionsAsBytes(perms, conf); zkPermissionWatcher.writeToZookeeper(entry, serialized); } - } catch(IOException ex) { - LOG.error("Failed updating permissions mirror for '" + - (currentEntry == null? "null": Bytes.toString(currentEntry)) + "'", ex); + } catch (IOException ex) { + LOG.error("Failed updating permissions mirror for '" + + (currentEntry == null ? "null" : Bytes.toString(currentEntry)) + "'", + ex); } } /** - * Check the current user for authorization to perform a specific action - * against the given set of row data. + * Check the current user for authorization to perform a specific action against the given set of + * row data. * @param opType the operation type * @param user the user * @param e the coprocessor environment - * @param families the map of column families to qualifiers present in - * the request + * @param families the map of column families to qualifiers present in the request * @param actions the desired actions * @return an authorization result */ private AuthResult permissionGranted(OpType opType, User user, RegionCoprocessorEnvironment e, - Map> families, Action... actions) { + Map> families, Action... actions) { AuthResult result = null; - for (Action action: actions) { + for (Action action : actions) { result = accessChecker.permissionGranted(opType.toString(), user, action, e.getRegion().getRegionInfo().getTable(), families); if (!result.isAllowed()) { @@ -329,67 +321,60 @@ public void requireAccess(ObserverContext ctx, String request, TableName tabl accessChecker.requireAccess(getActiveUser(ctx), request, tableName, permissions); } - public void requirePermission(ObserverContext ctx, String request, - Action perm) throws IOException { + public void requirePermission(ObserverContext ctx, String request, Action perm) + throws IOException { accessChecker.requirePermission(getActiveUser(ctx), request, null, perm); } - public void requireGlobalPermission(ObserverContext ctx, String request, - Action perm, TableName tableName, - Map> familyMap) throws IOException { + public void requireGlobalPermission(ObserverContext ctx, String request, Action perm, + TableName tableName, Map> familyMap) throws IOException { accessChecker.requireGlobalPermission(getActiveUser(ctx), request, perm, tableName, familyMap, null); } - public void requireGlobalPermission(ObserverContext ctx, String request, - Action perm, String namespace) throws IOException { - accessChecker.requireGlobalPermission(getActiveUser(ctx), - request, perm, namespace); + public void requireGlobalPermission(ObserverContext ctx, String request, Action perm, + String namespace) throws IOException { + accessChecker.requireGlobalPermission(getActiveUser(ctx), request, perm, namespace); } public void requireNamespacePermission(ObserverContext ctx, String request, String namespace, Action... permissions) throws IOException { - accessChecker.requireNamespacePermission(getActiveUser(ctx), - request, namespace, null, permissions); + accessChecker.requireNamespacePermission(getActiveUser(ctx), request, namespace, null, + permissions); } public void requireNamespacePermission(ObserverContext ctx, String request, String namespace, TableName tableName, Map> familyMap, Action... permissions) throws IOException { - accessChecker.requireNamespacePermission(getActiveUser(ctx), - request, namespace, tableName, familyMap, - permissions); + accessChecker.requireNamespacePermission(getActiveUser(ctx), request, namespace, tableName, + familyMap, permissions); } public void requirePermission(ObserverContext ctx, String request, TableName tableName, byte[] family, byte[] qualifier, Action... permissions) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), request, - tableName, family, qualifier, null, permissions); + accessChecker.requirePermission(getActiveUser(ctx), request, tableName, family, qualifier, null, + permissions); } - public void requireTablePermission(ObserverContext ctx, String request, - TableName tableName,byte[] family, byte[] qualifier, - Action... permissions) throws IOException { - accessChecker.requireTablePermission(getActiveUser(ctx), - request, tableName, family, qualifier, permissions); + public void requireTablePermission(ObserverContext ctx, String request, TableName tableName, + byte[] family, byte[] qualifier, Action... permissions) throws IOException { + accessChecker.requireTablePermission(getActiveUser(ctx), request, tableName, family, qualifier, + permissions); } - public void checkLockPermissions(ObserverContext ctx, String namespace, - TableName tableName, RegionInfo[] regionInfos, String reason) - throws IOException { - accessChecker.checkLockPermissions(getActiveUser(ctx), - namespace, tableName, regionInfos, reason); + public void checkLockPermissions(ObserverContext ctx, String namespace, TableName tableName, + RegionInfo[] regionInfos, String reason) throws IOException { + accessChecker.checkLockPermissions(getActiveUser(ctx), namespace, tableName, regionInfos, + reason); } /** - * Returns true if the current user is allowed the given action - * over at least one of the column qualifiers in the given column families. + * Returns true if the current user is allowed the given action over at least one of + * the column qualifiers in the given column families. */ - private boolean hasFamilyQualifierPermission(User user, - Action perm, - RegionCoprocessorEnvironment env, - Map> familyMap) - throws IOException { + private boolean hasFamilyQualifierPermission(User user, Action perm, + RegionCoprocessorEnvironment env, Map> familyMap) + throws IOException { RegionInfo hri = env.getRegion().getRegionInfo(); TableName tableName = hri.getTable(); @@ -399,12 +384,11 @@ private boolean hasFamilyQualifierPermission(User user, if (familyMap != null && familyMap.size() > 0) { // at least one family must be allowed - for (Map.Entry> family : - familyMap.entrySet()) { + for (Map.Entry> family : familyMap.entrySet()) { if (family.getValue() != null && !family.getValue().isEmpty()) { for (byte[] qualifier : family.getValue()) { - if (getAuthManager().authorizeUserTable(user, tableName, - family.getKey(), qualifier, perm)) { + if (getAuthManager().authorizeUserTable(user, tableName, family.getKey(), qualifier, + perm)) { return true; } } @@ -422,14 +406,8 @@ private boolean hasFamilyQualifierPermission(User user, } private enum OpType { - GET("get"), - EXISTS("exists"), - SCAN("scan"), - PUT("put"), - DELETE("delete"), - CHECK_AND_PUT("checkAndPut"), - CHECK_AND_DELETE("checkAndDelete"), - APPEND("append"), + GET("get"), EXISTS("exists"), SCAN("scan"), PUT("put"), DELETE("delete"), + CHECK_AND_PUT("checkAndPut"), CHECK_AND_DELETE("checkAndDelete"), APPEND("append"), INCREMENT("increment"); private String type; @@ -465,35 +443,34 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce // consider only one such passing cell. In case of Delete we have to consider all the cell // versions under this passing version. When Delete Mutation contains columns which are a // version delete just consider only one version for those column cells. - boolean considerCellTs = (request == OpType.PUT || request == OpType.DELETE); + boolean considerCellTs = (request == OpType.PUT || request == OpType.DELETE); if (considerCellTs) { get.readAllVersions(); } else { get.readVersions(1); } boolean diffCellTsFromOpTs = false; - for (Map.Entry> entry: familyMap.entrySet()) { + for (Map.Entry> entry : familyMap.entrySet()) { byte[] col = entry.getKey(); // TODO: HBASE-7114 could possibly unify the collection type in family // maps so we would not need to do this if (entry.getValue() instanceof Set) { - Set set = (Set)entry.getValue(); + Set set = (Set) entry.getValue(); if (set == null || set.isEmpty()) { get.addFamily(col); } else { - for (byte[] qual: set) { + for (byte[] qual : set) { get.addColumn(col, qual); } } } else if (entry.getValue() instanceof List) { - List list = (List)entry.getValue(); + List list = (List) entry.getValue(); if (list == null || list.isEmpty()) { get.addFamily(col); } else { // In case of family delete, a Cell will be added into the list with Qualifier as null. for (Cell cell : list) { - if (cell.getQualifierLength() == 0 - && (cell.getTypeByte() == Type.DeleteFamily.getCode() + if (cell.getQualifierLength() == 0 && (cell.getTypeByte() == Type.DeleteFamily.getCode() || cell.getTypeByte() == Type.DeleteFamilyVersion.getCode())) { get.addFamily(col); } else { @@ -509,8 +486,8 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce } else if (entry.getValue() == null) { get.addFamily(col); } else { - throw new RuntimeException("Unhandled collection type " + - entry.getValue().getClass().getName()); + throw new RuntimeException( + "Unhandled collection type " + entry.getValue().getClass().getName()); } } // We want to avoid looking into the future. So, if the cells of the @@ -557,7 +534,7 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce cells.clear(); // scan with limit as 1 to hold down memory use on wide rows more = scanner.next(cells, scannerContext); - for (Cell cell: cells) { + for (Cell cell : cells) { if (LOG.isTraceEnabled()) { LOG.trace("Found cell " + cell); } @@ -595,7 +572,7 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce continue; } foundColumn = true; - for (Action action: actions) { + for (Action action : actions) { // Are there permissions for this user for the cell? if (!getAuthManager().authorizeCell(user, getTableName(e), cell, action)) { // We can stop if the cell ACL denies access @@ -621,9 +598,9 @@ private boolean checkCoveringPermission(User user, OpType request, RegionCoproce private static void addCellPermissions(final byte[] perms, Map> familyMap) { // Iterate over the entries in the familyMap, replacing the cells therein // with new cells including the ACL data - for (Map.Entry> e: familyMap.entrySet()) { + for (Map.Entry> e : familyMap.entrySet()) { List newCells = Lists.newArrayList(); - for (Cell cell: e.getValue()) { + for (Cell cell : e.getValue()) { // Prepend the supplied perms in a new ACL tag to an update list of tags for the cell List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(PermissionStorage.ACL_TAG_TYPE, perms)); @@ -757,40 +734,37 @@ public Optional getRegionServerObserver() { @Override public Iterable getServices() { - return Collections.singleton( - AccessControlProtos.AccessControlService.newReflectiveService(this)); + return Collections + .singleton(AccessControlProtos.AccessControlService.newReflectiveService(this)); } /*********************************** Observer implementations ***********************************/ @Override - public void preCreateTable(ObserverContext c, - TableDescriptor desc, RegionInfo[] regions) throws IOException { + public void preCreateTable(ObserverContext c, TableDescriptor desc, + RegionInfo[] regions) throws IOException { Set families = desc.getColumnFamilyNames(); Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (byte[] family: families) { + for (byte[] family : families) { familyMap.put(family, null); } - requireNamespacePermission(c, "createTable", - desc.getTableName().getNamespaceAsString(), desc.getTableName(), familyMap, Action.ADMIN, - Action.CREATE); + requireNamespacePermission(c, "createTable", desc.getTableName().getNamespaceAsString(), + desc.getTableName(), familyMap, Action.ADMIN, Action.CREATE); } @Override - public void postCompletedCreateTableAction( - final ObserverContext c, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException { + public void postCompletedCreateTableAction(final ObserverContext c, + final TableDescriptor desc, final RegionInfo[] regions) throws IOException { // When AC is used, it should be configured as the 1st CP. // In Master, the table operations like create, are handled by a Thread pool but the max size // for this pool is 1. So if multiple CPs create tables on startup, these creations will happen // sequentially only. // Related code in HMaster#startServiceThreads // {code} - // // We depend on there being only one instance of this executor running - // // at a time. To do concurrency, would need fencing of enable/disable of - // // tables. - // this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1); + // // We depend on there being only one instance of this executor running + // // at a time. To do concurrency, would need fencing of enable/disable of + // // tables. + // this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1); // {code} // In future if we change this pool to have more threads, then there is a chance for thread, // creating acl table, getting delayed and by that time another table creation got over and @@ -826,8 +800,7 @@ public Void run() throws Exception { @Override public void preDeleteTable(ObserverContext c, TableName tableName) throws IOException { - requirePermission(c, "deleteTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + requirePermission(c, "deleteTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override @@ -850,8 +823,7 @@ public Void run() throws Exception { @Override public void preTruncateTable(ObserverContext c, final TableName tableName) throws IOException { - requirePermission(c, "truncateTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + requirePermission(c, "truncateTable", tableName, null, null, Action.ADMIN, Action.CREATE); final Configuration conf = c.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction() { @@ -900,7 +872,7 @@ public TableDescriptor preModifyTable(ObserverContext c, - TableName tableName, String dstSFT) throws IOException { + TableName tableName, String dstSFT) throws IOException { requirePermission(c, "modifyTableStoreFileTracker", tableName, null, null, Action.ADMIN, Action.CREATE); return dstSFT; @@ -908,8 +880,8 @@ public String preModifyTableStoreFileTracker(ObserverContext c, TableName tableName, byte[] family, - String dstSFT) throws IOException { + ObserverContext c, TableName tableName, byte[] family, + String dstSFT) throws IOException { requirePermission(c, "modifyColumnFamilyStoreFileTracker", tableName, family, null, Action.ADMIN, Action.CREATE); return dstSFT; @@ -917,7 +889,7 @@ public String preModifyColumnFamilyStoreFileTracker( @Override public void postModifyTable(ObserverContext c, TableName tableName, - TableDescriptor oldDesc, TableDescriptor currentDesc) throws IOException { + TableDescriptor oldDesc, TableDescriptor currentDesc) throws IOException { final Configuration conf = c.getEnvironment().getConfiguration(); // default the table owner to current user, if not specified. final String owner = getActiveUser(c).getShortName(); @@ -938,8 +910,7 @@ public Void run() throws Exception { @Override public void preEnableTable(ObserverContext c, TableName tableName) throws IOException { - requirePermission(c, "enableTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + requirePermission(c, "enableTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override @@ -953,8 +924,7 @@ public void preDisableTable(ObserverContext c, Tab throw new AccessDeniedException("Not allowed to disable " + PermissionStorage.ACL_TABLE_NAME + " table with AccessController installed"); } - requirePermission(c, "disableTable", - tableName, null, null, Action.ADMIN, Action.CREATE); + requirePermission(c, "disableTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override @@ -976,8 +946,7 @@ public void preGetProcedures(ObserverContext ctx) } @Override - public void preGetLocks(ObserverContext ctx) - throws IOException { + public void preGetLocks(ObserverContext ctx) throws IOException { User user = getActiveUser(ctx); accessChecker.requirePermission(user, "getLocks", null, Action.ADMIN); } @@ -985,36 +954,31 @@ public void preGetLocks(ObserverContext ctx) @Override public void preMove(ObserverContext c, RegionInfo region, ServerName srcServer, ServerName destServer) throws IOException { - requirePermission(c, "move", - region.getTable(), null, null, Action.ADMIN); + requirePermission(c, "move", region.getTable(), null, null, Action.ADMIN); } @Override public void preAssign(ObserverContext c, RegionInfo regionInfo) throws IOException { - requirePermission(c, "assign", - regionInfo.getTable(), null, null, Action.ADMIN); + requirePermission(c, "assign", regionInfo.getTable(), null, null, Action.ADMIN); } @Override public void preUnassign(ObserverContext c, RegionInfo regionInfo) throws IOException { - requirePermission(c, "unassign", - regionInfo.getTable(), null, null, Action.ADMIN); + requirePermission(c, "unassign", regionInfo.getTable(), null, null, Action.ADMIN); } @Override public void preRegionOffline(ObserverContext c, RegionInfo regionInfo) throws IOException { - requirePermission(c, "regionOffline", - regionInfo.getTable(), null, null, Action.ADMIN); + requirePermission(c, "regionOffline", regionInfo.getTable(), null, null, Action.ADMIN); } @Override public void preSetSplitOrMergeEnabled(final ObserverContext ctx, final boolean newValue, final MasterSwitchType switchType) throws IOException { - requirePermission(ctx, "setSplitOrMergeEnabled", - Action.ADMIN); + requirePermission(ctx, "setSplitOrMergeEnabled", Action.ADMIN); } @Override @@ -1024,20 +988,18 @@ public void preBalance(ObserverContext c, BalanceR } @Override - public void preBalanceSwitch(ObserverContext c, - boolean newValue) throws IOException { + public void preBalanceSwitch(ObserverContext c, boolean newValue) + throws IOException { requirePermission(c, "balanceSwitch", Action.ADMIN); } @Override - public void preShutdown(ObserverContext c) - throws IOException { + public void preShutdown(ObserverContext c) throws IOException { requirePermission(c, "shutdown", Action.ADMIN); } @Override - public void preStopMaster(ObserverContext c) - throws IOException { + public void preStopMaster(ObserverContext c) throws IOException { requirePermission(c, "stopMaster", Action.ADMIN); } @@ -1052,23 +1014,19 @@ public void postStartMaster(ObserverContext ctx) } } } + /** * Create the ACL table * @throws IOException */ private static void createACLTable(Admin admin) throws IOException { /** Table descriptor for ACL table */ - ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(PermissionStorage.ACL_LIST_FAMILY). - setMaxVersions(1). - setInMemory(true). - setBlockCacheEnabled(true). - setBlocksize(8 * 1024). - setBloomFilterType(BloomType.NONE). - setScope(HConstants.REPLICATION_SCOPE_LOCAL).build(); - TableDescriptor td = - TableDescriptorBuilder.newBuilder(PermissionStorage.ACL_TABLE_NAME). - setColumnFamily(cfd).build(); + ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder + .newBuilder(PermissionStorage.ACL_LIST_FAMILY).setMaxVersions(1).setInMemory(true) + .setBlockCacheEnabled(true).setBlocksize(8 * 1024).setBloomFilterType(BloomType.NONE) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL).build(); + TableDescriptor td = TableDescriptorBuilder.newBuilder(PermissionStorage.ACL_TABLE_NAME) + .setColumnFamily(cfd).build(); admin.createTable(td); } @@ -1077,8 +1035,8 @@ public void preSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { // Move this ACL check to SnapshotManager#checkPermissions as part of AC deprecation. - requirePermission(ctx, "snapshot " + snapshot.getName(), - hTableDescriptor.getTableName(), null, null, Permission.Action.ADMIN); + requirePermission(ctx, "snapshot " + snapshot.getName(), hTableDescriptor.getTableName(), null, + null, Permission.Action.ADMIN); } @Override @@ -1088,7 +1046,7 @@ public void preListSnapshot(ObserverContext ctx, if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) { // list it, if user is the owner of snapshot AuthResult result = AuthResult.allow("listSnapshot " + snapshot.getName(), - "Snapshot owner check allowed", user, null, null, null); + "Snapshot owner check allowed", user, null, null, null); AccessChecker.logResult(result); } else { accessChecker.requirePermission(user, "listSnapshot " + snapshot.getName(), null, @@ -1101,9 +1059,8 @@ public void preCloneSnapshot(final ObserverContext final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { User user = getActiveUser(ctx); - if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user) - && hTableDescriptor.getTableName().getNameAsString() - .equals(snapshot.getTableNameAsString())) { + if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user) && hTableDescriptor.getTableName() + .getNameAsString().equals(snapshot.getTableNameAsString())) { // Snapshot owner is allowed to create a table with the same name as the snapshot he took AuthResult result = AuthResult.allow("cloneSnapshot " + snapshot.getName(), "Snapshot owner check allowed", user, null, hTableDescriptor.getTableName(), null); @@ -1135,7 +1092,7 @@ public void preDeleteSnapshot(final ObserverContext ctx, NamespaceDescriptor ns) throws IOException { - requireGlobalPermission(ctx, "createNamespace", - Action.ADMIN, ns.getName()); + requireGlobalPermission(ctx, "createNamespace", Action.ADMIN, ns.getName()); } @Override - public void preDeleteNamespace(ObserverContext ctx, String namespace) - throws IOException { - requireGlobalPermission(ctx, "deleteNamespace", - Action.ADMIN, namespace); + public void preDeleteNamespace(ObserverContext ctx, + String namespace) throws IOException { + requireGlobalPermission(ctx, "deleteNamespace", Action.ADMIN, namespace); } @Override @@ -1185,7 +1140,7 @@ public void preModifyNamespace(ObserverContext ctx @Override public void preGetNamespaceDescriptor(ObserverContext ctx, - String namespace) throws IOException { + String namespace) throws IOException { requireNamespacePermission(ctx, "getNamespaceDescriptor", namespace, Action.ADMIN); } @@ -1218,17 +1173,13 @@ public void preTableFlush(final ObserverContext ct final TableName tableName) throws IOException { // Move this ACL check to MasterFlushTableProcedureManager#checkPermissions as part of AC // deprecation. - requirePermission(ctx, "flushTable", tableName, - null, null, Action.ADMIN, Action.CREATE); + requirePermission(ctx, "flushTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override - public void preSplitRegion( - final ObserverContext ctx, - final TableName tableName, - final byte[] splitRow) throws IOException { - requirePermission(ctx, "split", tableName, - null, null, Action.ADMIN); + public void preSplitRegion(final ObserverContext ctx, + final TableName tableName, final byte[] splitRow) throws IOException { + requirePermission(ctx, "split", tableName, null, null, Action.ADMIN); } @Override @@ -1246,8 +1197,7 @@ public void preDecommissionRegionServers(ObserverContext ctx) throws IOException { - requirePermission(ctx, "listDecommissionedRegionServers", - Action.ADMIN); + requirePermission(ctx, "listDecommissionedRegionServers", Action.ADMIN); } @Override @@ -1259,8 +1209,7 @@ public void preRecommissionRegionServer(ObserverContext c) - throws IOException { + public void preOpen(ObserverContext c) throws IOException { RegionCoprocessorEnvironment env = c.getEnvironment(); final Region region = env.getRegion(); if (region == null) { @@ -1300,16 +1249,16 @@ public void postOpen(ObserverContext c) { @Override public void preFlush(ObserverContext c, FlushLifeCycleTracker tracker) throws IOException { - requirePermission(c, "flush", getTableName(c.getEnvironment()), - null, null, Action.ADMIN, Action.CREATE); + requirePermission(c, "flush", getTableName(c.getEnvironment()), null, null, Action.ADMIN, + Action.CREATE); } @Override public InternalScanner preCompact(ObserverContext c, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { - requirePermission(c, "compact", getTableName(c.getEnvironment()), - null, null, Action.ADMIN, Action.CREATE); + requirePermission(c, "compact", getTableName(c.getEnvironment()), null, null, Action.ADMIN, + Action.CREATE); return scanner; } @@ -1322,17 +1271,17 @@ private void internalPreRead(final ObserverContext } User user = getActiveUser(c); RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = null; + Map> families = null; switch (opType) { - case GET: - case EXISTS: - families = ((Get)query).getFamilyMap(); - break; - case SCAN: - families = ((Scan)query).getFamilyMap(); - break; - default: - throw new RuntimeException("Unhandled operation " + opType); + case GET: + case EXISTS: + families = ((Get) query).getFamilyMap(); + break; + case SCAN: + families = ((Scan) query).getFamilyMap(); + break; + default: + throw new RuntimeException("Unhandled operation " + opType); } AuthResult authResult = permissionGranted(opType, user, env, families, Action.READ); Region region = getRegion(env); @@ -1357,20 +1306,19 @@ private void internalPreRead(final ObserverContext // Only wrap the filter if we are enforcing authorizations if (authorizationEnabled) { Filter ourFilter = new AccessControlFilter(getAuthManager(), user, table, - AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, - cfVsMaxVersions); + AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, cfVsMaxVersions); // wrap any existing filter if (filter != null) { ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, - Lists.newArrayList(ourFilter, filter)); + Lists.newArrayList(ourFilter, filter)); } switch (opType) { case GET: case EXISTS: - ((Get)query).setFilter(ourFilter); + ((Get) query).setFilter(ourFilter); break; case SCAN: - ((Scan)query).setFilter(ourFilter); + ((Scan) query).setFilter(ourFilter); break; default: throw new RuntimeException("Unhandled operation " + opType); @@ -1387,19 +1335,19 @@ private void internalPreRead(final ObserverContext // Only wrap the filter if we are enforcing authorizations if (authorizationEnabled) { Filter ourFilter = new AccessControlFilter(getAuthManager(), user, table, - AccessControlFilter.Strategy.CHECK_CELL_DEFAULT, cfVsMaxVersions); + AccessControlFilter.Strategy.CHECK_CELL_DEFAULT, cfVsMaxVersions); // wrap any existing filter if (filter != null) { ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, - Lists.newArrayList(ourFilter, filter)); + Lists.newArrayList(ourFilter, filter)); } switch (opType) { case GET: case EXISTS: - ((Get)query).setFilter(ourFilter); + ((Get) query).setFilter(ourFilter); break; case SCAN: - ((Scan)query).setFilter(ourFilter); + ((Scan) query).setFilter(ourFilter); break; default: throw new RuntimeException("Unhandled operation " + opType); @@ -1411,28 +1359,26 @@ private void internalPreRead(final ObserverContext AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { throw new AccessDeniedException("Insufficient permissions for user '" - + (user != null ? user.getShortName() : "null") - + "' (table=" + table + ", action=READ)"); + + (user != null ? user.getShortName() : "null") + "' (table=" + table + ", action=READ)"); } } @Override - public void preGetOp(final ObserverContext c, - final Get get, final List result) throws IOException { + public void preGetOp(final ObserverContext c, final Get get, + final List result) throws IOException { internalPreRead(c, get, OpType.GET); } @Override - public boolean preExists(final ObserverContext c, - final Get get, final boolean exists) throws IOException { + public boolean preExists(final ObserverContext c, final Get get, + final boolean exists) throws IOException { internalPreRead(c, get, OpType.EXISTS); return exists; } @Override - public void prePut(final ObserverContext c, - final Put put, final WALEdit edit, final Durability durability) - throws IOException { + public void prePut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, put); @@ -1443,9 +1389,8 @@ public void prePut(final ObserverContext c, // change the ACL of any previous Put. This allows simple evolution of // security policy over time without requiring expensive updates. RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = put.getFamilyCellMap(); - AuthResult authResult = permissionGranted(OpType.PUT, - user, env, families, Action.WRITE); + Map> families = put.getFamilyCellMap(); + AuthResult authResult = permissionGranted(OpType.PUT, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { @@ -1467,17 +1412,16 @@ public void prePut(final ObserverContext c, } @Override - public void postPut(final ObserverContext c, - final Put put, final WALEdit edit, final Durability durability) { + public void postPut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) { if (aclRegion) { updateACL(c.getEnvironment(), put.getFamilyCellMap()); } } @Override - public void preDelete(final ObserverContext c, - final Delete delete, final WALEdit edit, final Durability durability) - throws IOException { + public void preDelete(final ObserverContext c, final Delete delete, + final WALEdit edit, final Durability durability) throws IOException { // An ACL on a delete is useless, we shouldn't allow it if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) { throw new DoNotRetryIOException("ACL on delete has no effect: " + delete.toString()); @@ -1488,17 +1432,15 @@ public void preDelete(final ObserverContext c, // overwrite any of the visible versions ('visible' defined as not covered // by a tombstone already) then we have to disallow this operation. RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = delete.getFamilyCellMap(); + Map> families = delete.getFamilyCellMap(); User user = getActiveUser(c); - AuthResult authResult = permissionGranted(OpType.DELETE, - user, env, families, Action.WRITE); + AuthResult authResult = permissionGranted(OpType.DELETE, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { delete.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } } @@ -1536,16 +1478,16 @@ public void preBatchMutate(ObserverContext c, AuthResult authResult = null; if (checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), m.getFamilyCellMap(), timestamp, Action.WRITE)) { - authResult = AuthResult.allow(opType.toString(), "Covering cell set", - user, Action.WRITE, table, m.getFamilyCellMap()); + authResult = AuthResult.allow(opType.toString(), "Covering cell set", user, + Action.WRITE, table, m.getFamilyCellMap()); } else { - authResult = AuthResult.deny(opType.toString(), "Covering cell set", - user, Action.WRITE, table, m.getFamilyCellMap()); + authResult = AuthResult.deny(opType.toString(), "Covering cell set", user, Action.WRITE, + table, m.getFamilyCellMap()); } AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " - + authResult.toContextString()); + throw new AccessDeniedException( + "Insufficient permissions " + authResult.toContextString()); } } } @@ -1553,9 +1495,8 @@ public void preBatchMutate(ObserverContext c, } @Override - public void postDelete(final ObserverContext c, - final Delete delete, final WALEdit edit, final Durability durability) - throws IOException { + public void postDelete(final ObserverContext c, final Delete delete, + final WALEdit edit, final Durability durability) throws IOException { if (aclRegion) { updateACL(c.getEnvironment(), delete.getFamilyCellMap()); } @@ -1563,25 +1504,23 @@ public void postDelete(final ObserverContext c, @Override public boolean preCheckAndPut(final ObserverContext c, - final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOperator op, - final ByteArrayComparable comparator, final Put put, - final boolean result) throws IOException { + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, + final ByteArrayComparable comparator, final Put put, final boolean result) + throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, put); // Require READ and WRITE permissions on the table, CF, and KV to update RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = makeFamilyMap(family, qualifier); - AuthResult authResult = permissionGranted(OpType.CHECK_AND_PUT, - user, env, families, Action.READ, Action.WRITE); + Map> families = makeFamilyMap(family, qualifier); + AuthResult authResult = + permissionGranted(OpType.CHECK_AND_PUT, user, env, families, Action.READ, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { put.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } @@ -1598,9 +1537,9 @@ public boolean preCheckAndPut(final ObserverContext c, - final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator opp, final ByteArrayComparable comparator, final Put put, - final boolean result) throws IOException { + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator opp, + final ByteArrayComparable comparator, final Put put, final boolean result) + throws IOException { if (put.getAttribute(CHECK_COVERING_PERM) != null) { // We had failure with table, cf and q perm checks and now giving a chance for cell // perm check @@ -1609,12 +1548,12 @@ public boolean preCheckAndPutAfterRowLock(final ObserverContext c, - final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOperator op, - final ByteArrayComparable comparator, final Delete delete, - final boolean result) throws IOException { + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, + final ByteArrayComparable comparator, final Delete delete, final boolean result) + throws IOException { // An ACL on a delete is useless, we shouldn't allow it if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) { - throw new DoNotRetryIOException("ACL on checkAndDelete has no effect: " + - delete.toString()); + throw new DoNotRetryIOException("ACL on checkAndDelete has no effect: " + delete.toString()); } // Require READ and WRITE permissions on the table, CF, and the KV covered // by the delete RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = makeFamilyMap(family, qualifier); + Map> families = makeFamilyMap(family, qualifier); User user = getActiveUser(c); - AuthResult authResult = permissionGranted( - OpType.CHECK_AND_DELETE, user, env, families, Action.READ, Action.WRITE); + AuthResult authResult = + permissionGranted(OpType.CHECK_AND_DELETE, user, env, families, Action.READ, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { delete.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } return result; @@ -1656,10 +1592,9 @@ public boolean preCheckAndDelete(final ObserverContext c, final byte[] row, - final byte[] family, final byte[] qualifier, final CompareOperator op, - final ByteArrayComparable comparator, final Delete delete, final boolean result) - throws IOException { + final ObserverContext c, final byte[] row, final byte[] family, + final byte[] qualifier, final CompareOperator op, final ByteArrayComparable comparator, + final Delete delete, final boolean result) throws IOException { if (delete.getAttribute(CHECK_COVERING_PERM) != null) { // We had failure with table, cf and q perm checks and now giving a chance for cell // perm check @@ -1667,13 +1602,13 @@ public boolean preCheckAndDeleteAfterRowLock( Map> families = makeFamilyMap(family, qualifier); AuthResult authResult = null; User user = getActiveUser(c); - if (checkCoveringPermission(user, OpType.CHECK_AND_DELETE, c.getEnvironment(), - row, families, HConstants.LATEST_TIMESTAMP, Action.READ)) { - authResult = AuthResult.allow(OpType.CHECK_AND_DELETE.toString(), - "Covering cell set", user, Action.READ, table, families); + if (checkCoveringPermission(user, OpType.CHECK_AND_DELETE, c.getEnvironment(), row, families, + HConstants.LATEST_TIMESTAMP, Action.READ)) { + authResult = AuthResult.allow(OpType.CHECK_AND_DELETE.toString(), "Covering cell set", user, + Action.READ, table, families); } else { - authResult = AuthResult.deny(OpType.CHECK_AND_DELETE.toString(), - "Covering cell set", user, Action.READ, table, families); + authResult = AuthResult.deny(OpType.CHECK_AND_DELETE.toString(), "Covering cell set", user, + Action.READ, table, families); } AccessChecker.logResult(authResult); if (authorizationEnabled && !authResult.isAllowed()) { @@ -1691,16 +1626,14 @@ public Result preAppend(ObserverContext c, Append // Require WRITE permission to the table, CF, and the KV to be appended RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = append.getFamilyCellMap(); - AuthResult authResult = permissionGranted(OpType.APPEND, user, - env, families, Action.WRITE); + Map> families = append.getFamilyCellMap(); + AuthResult authResult = permissionGranted(OpType.APPEND, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { append.setAttribute(CHECK_COVERING_PERM, TRUE); - } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + } else if (authorizationEnabled) { + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } @@ -1718,24 +1651,21 @@ public Result preAppend(ObserverContext c, Append @Override public Result preIncrement(final ObserverContext c, - final Increment increment) - throws IOException { + final Increment increment) throws IOException { User user = getActiveUser(c); checkForReservedTagPresence(user, increment); // Require WRITE permission to the table, CF, and the KV to be replaced by // the incremented value RegionCoprocessorEnvironment env = c.getEnvironment(); - Map> families = increment.getFamilyCellMap(); - AuthResult authResult = permissionGranted(OpType.INCREMENT, - user, env, families, Action.WRITE); + Map> families = increment.getFamilyCellMap(); + AuthResult authResult = permissionGranted(OpType.INCREMENT, user, env, families, Action.WRITE); AccessChecker.logResult(authResult); if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { increment.setAttribute(CHECK_COVERING_PERM, TRUE); } else if (authorizationEnabled) { - throw new AccessDeniedException("Insufficient permissions " + - authResult.toContextString()); + throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } @@ -1760,8 +1690,9 @@ public List> postIncrementBeforeWAL( if (!cellFeaturesEnabled || mutation.getACL() == null) { return cellPairs; } - return cellPairs.stream().map(pair -> new Pair<>(pair.getFirst(), - createNewCellWithTags(mutation, pair.getFirst(), pair.getSecond()))) + return cellPairs.stream() + .map(pair -> new Pair<>(pair.getFirst(), + createNewCellWithTags(mutation, pair.getFirst(), pair.getSecond()))) .collect(Collectors.toList()); } @@ -1774,8 +1705,9 @@ public List> postAppendBeforeWAL( if (!cellFeaturesEnabled || mutation.getACL() == null) { return cellPairs; } - return cellPairs.stream().map(pair -> new Pair<>(pair.getFirst(), - createNewCellWithTags(mutation, pair.getFirst(), pair.getSecond()))) + return cellPairs.stream() + .map(pair -> new Pair<>(pair.getFirst(), + createNewCellWithTags(mutation, pair.getFirst(), pair.getSecond()))) .collect(Collectors.toList()); } @@ -1792,7 +1724,7 @@ private Cell createNewCellWithTags(Mutation mutation, Cell oldCell, Cell newCell // Not an ACL tag, just carry it through if (LOG.isTraceEnabled()) { LOG.trace("Carrying forward tag from " + newCell + ": type " + tag.getType() - + " length " + tag.getValueLength()); + + " length " + tag.getValueLength()); } tags.add(tag); } @@ -1824,8 +1756,8 @@ public RegionScanner postScannerOpen(final ObserverContext c, - final InternalScanner s, final List result, - final int limit, final boolean hasNext) throws IOException { + final InternalScanner s, final List result, final int limit, final boolean hasNext) + throws IOException { requireScannerOwner(s); return hasNext; } @@ -1844,9 +1776,8 @@ public void postScannerClose(final ObserverContext } /** - * Verify, when servicing an RPC, that the caller is the scanner owner. - * If so, we assume that access control is correctly enforced based on - * the checks performed in preScannerOpen() + * Verify, when servicing an RPC, that the caller is the scanner owner. If so, we assume that + * access control is correctly enforced based on the checks performed in preScannerOpen() */ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException { if (!RpcServer.isInRpcCallContext()) { @@ -1855,21 +1786,19 @@ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException String requestUserName = RpcServer.getRequestUserName().orElse(null); String owner = scannerOwners.get(s); if (authorizationEnabled && owner != null && !owner.equals(requestUserName)) { - throw new AccessDeniedException("User '"+ requestUserName +"' is not the scanner owner!"); + throw new AccessDeniedException("User '" + requestUserName + "' is not the scanner owner!"); } } /** - * Verifies user has CREATE or ADMIN privileges on - * the Column Families involved in the bulkLoadHFile - * request. Specific Column Write privileges are presently - * ignored. + * Verifies user has CREATE or ADMIN privileges on the Column Families involved in the + * bulkLoadHFile request. Specific Column Write privileges are presently ignored. */ @Override public void preBulkLoadHFile(ObserverContext ctx, List> familyPaths) throws IOException { User user = getActiveUser(ctx); - for(Pair el : familyPaths) { + for (Pair el : familyPaths) { accessChecker.requirePermission(user, "preBulkLoadHFile", ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), el.getFirst(), null, null, Action.ADMIN, Action.CREATE); @@ -1877,31 +1806,29 @@ public void preBulkLoadHFile(ObserverContext ctx, } /** - * Authorization check for - * SecureBulkLoadProtocol.prepareBulkLoad() + * Authorization check for SecureBulkLoadProtocol.prepareBulkLoad() * @param ctx the context * @throws IOException */ @Override public void prePrepareBulkLoad(ObserverContext ctx) - throws IOException { + throws IOException { requireAccess(ctx, "prePrepareBulkLoad", - ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, - Action.CREATE); + ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, + Action.CREATE); } /** - * Authorization security check for - * SecureBulkLoadProtocol.cleanupBulkLoad() + * Authorization security check for SecureBulkLoadProtocol.cleanupBulkLoad() * @param ctx the context * @throws IOException */ @Override public void preCleanupBulkLoad(ObserverContext ctx) - throws IOException { + throws IOException { requireAccess(ctx, "preCleanupBulkLoad", - ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, - Action.CREATE); + ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), Action.ADMIN, + Action.CREATE); } /* ---- EndpointObserver implementation ---- */ @@ -1913,9 +1840,8 @@ public Message preEndpointInvocation(ObserverContext ctx, Service service, String methodName, Message request, Message.Builder responseBuilder) - throws IOException { } + throws IOException { + } /* ---- Protobuf AccessControlService implementation ---- */ /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use - * {@link Admin#grant(UserPermission, boolean)} instead. + * {@link Admin#grant(UserPermission, boolean)} instead. * @see Admin#grant(UserPermission, boolean) * @see HBASE-21739 */ @Deprecated @Override - public void grant(RpcController controller, - AccessControlProtos.GrantRequest request, + public void grant(RpcController controller, AccessControlProtos.GrantRequest request, RpcCallback done) { final UserPermission perm = AccessControlUtil.toUserPermission(request.getUserPermission()); AccessControlProtos.GrantResponse response = null; @@ -1948,8 +1874,8 @@ public void grant(RpcController controller, } User caller = RpcServer.getRequestUser().orElse(null); if (LOG.isDebugEnabled()) { - LOG.debug("Received request from {} to grant access permission {}", - caller.getName(), perm.toString()); + LOG.debug("Received request from {} to grant access permission {}", caller.getName(), + perm.toString()); } preGrantOrRevoke(caller, "grant", perm); @@ -1962,8 +1888,8 @@ public void grant(RpcController controller, AUDITLOG.trace("Granted permission " + perm.toString()); } } else { - throw new CoprocessorException(AccessController.class, "This method " - + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); + throw new CoprocessorException(AccessController.class, + "This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); } response = AccessControlProtos.GrantResponse.getDefaultInstance(); } catch (IOException ioe) { @@ -1975,7 +1901,7 @@ public void grant(RpcController controller, /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use {@link Admin#revoke(UserPermission)} - * instead. + * instead. * @see Admin#revoke(UserPermission) * @see HBASE-21739 */ @@ -2005,8 +1931,8 @@ public void revoke(RpcController controller, AccessControlProtos.RevokeRequest r AUDITLOG.trace("Revoked permission " + perm.toString()); } } else { - throw new CoprocessorException(AccessController.class, "This method " - + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); + throw new CoprocessorException(AccessController.class, + "This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); } response = AccessControlProtos.RevokeResponse.getDefaultInstance(); } catch (IOException ioe) { @@ -2018,7 +1944,7 @@ public void revoke(RpcController controller, AccessControlProtos.RevokeRequest r /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use - * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. + * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead. * @see Admin#getUserPermissions(GetUserPermissionsRequest) * @see HBASE-21911 */ @@ -2060,8 +1986,8 @@ public void getUserPermissions(RpcController controller, regionEnv.getConnection().getAdmin().getUserPermissions(getUserPermissionsRequest); response = AccessControlUtil.buildGetUserPermissionsResponse(perms); } else { - throw new CoprocessorException(AccessController.class, "This method " - + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); + throw new CoprocessorException(AccessController.class, + "This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table."); } } catch (IOException ioe) { // pass exception back up @@ -2072,7 +1998,7 @@ public void getUserPermissions(RpcController controller, /** * @deprecated since 2.2.0 and will be removed 4.0.0. Use {@link Admin#hasUserPermissions(List)} - * instead. + * instead. * @see Admin#hasUserPermissions(List) * @see HBASE-22117 */ @@ -2146,20 +2072,19 @@ private void checkSystemOrSuperUser(User activeUser) throws IOException { return; } if (!Superusers.isSuperUser(activeUser)) { - throw new AccessDeniedException("User '" + (activeUser != null ? - activeUser.getShortName() : "null") + "' is not system or super user."); + throw new AccessDeniedException( + "User '" + (activeUser != null ? activeUser.getShortName() : "null") + + "' is not system or super user."); } } @Override - public void preStopRegionServer( - ObserverContext ctx) + public void preStopRegionServer(ObserverContext ctx) throws IOException { requirePermission(ctx, "preStopRegionServer", Action.ADMIN); } - private Map> makeFamilyMap(byte[] family, - byte[] qualifier) { + private Map> makeFamilyMap(byte[] family, byte[] qualifier) { if (family == null) { return null; } @@ -2171,8 +2096,8 @@ public void preStopRegionServer( @Override public void preGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException { + List tableNamesList, List descriptors, String regex) + throws IOException { // We are delegating the authorization check to postGetTableDescriptors as we don't have // any concrete set of table names when a regex is present or the full list is requested. if (regex == null && tableNamesList != null && !tableNamesList.isEmpty()) { @@ -2193,8 +2118,8 @@ public void preGetTableDescriptors(ObserverContext @Override public void postGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException { + List tableNamesList, List descriptors, String regex) + throws IOException { // Skipping as checks in this case are already done by preGetTableDescriptors. if (regex == null && tableNamesList != null && !tableNamesList.isEmpty()) { return; @@ -2206,8 +2131,8 @@ public void postGetTableDescriptors(ObserverContext ctx, @Override public void preMergeRegions(final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException { - requirePermission(ctx, "mergeRegions", regionsToMerge[0].getTable(), null, null, - Action.ADMIN); + final RegionInfo[] regionsToMerge) throws IOException { + requirePermission(ctx, "mergeRegions", regionsToMerge[0].getTable(), null, null, Action.ADMIN); } @Override @@ -2244,7 +2168,8 @@ public void preRollWALWriterRequest(ObserverContext ctx) - throws IOException { } + throws IOException { + } @Override public void preSetUserQuota(final ObserverContext ctx, @@ -2255,14 +2180,14 @@ public void preSetUserQuota(final ObserverContext @Override public void preSetUserQuota(final ObserverContext ctx, final String userName, final TableName tableName, final GlobalQuotaSettings quotas) - throws IOException { + throws IOException { requirePermission(ctx, "setUserTableQuota", tableName, null, null, Action.ADMIN); } @Override public void preSetUserQuota(final ObserverContext ctx, final String userName, final String namespace, final GlobalQuotaSettings quotas) - throws IOException { + throws IOException { requirePermission(ctx, "setUserNamespaceQuota", Action.ADMIN); } @@ -2297,8 +2222,8 @@ public void preReplicateLogEntries(ObserverContext ctx) - throws IOException { + public void preClearCompactionQueues(ObserverContext ctx) + throws IOException { requirePermission(ctx, "preClearCompactionQueues", Permission.Action.ADMIN); } @@ -2392,9 +2317,9 @@ public void preSwitchExceedThrottleQuota(ObserverContext ctx) throws IOException { // for non-rpc handling, fallback to system user @@ -2407,7 +2332,7 @@ private User getActiveUser(ObserverContext ctx) throws IOException { /** * @deprecated since 2.2.0 and will be removed in 4.0.0. Use - * {@link Admin#hasUserPermissions(String, List)} instead. + * {@link Admin#hasUserPermissions(String, List)} instead. * @see Admin#hasUserPermissions(String, List) * @see HBASE-22117 */ @@ -2538,85 +2463,84 @@ private void preHasUserPermissions(User caller, String userName, List ctx, Set
          servers, Set tables, String targetGroup) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "moveServersAndTables", - null, Permission.Action.ADMIN); + accessChecker.requirePermission(getActiveUser(ctx), "moveServersAndTables", null, + Permission.Action.ADMIN); } @Override public void preMoveServers(final ObserverContext ctx, Set
          servers, String targetGroup) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "moveServers", - null, Permission.Action.ADMIN); + accessChecker.requirePermission(getActiveUser(ctx), "moveServers", null, + Permission.Action.ADMIN); } @Override public void preMoveTables(ObserverContext ctx, Set tables, String targetGroup) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "moveTables", - null, Permission.Action.ADMIN); + accessChecker.requirePermission(getActiveUser(ctx), "moveTables", null, + Permission.Action.ADMIN); } @Override - public void preAddRSGroup(ObserverContext ctx, - String name) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "addRSGroup", - null, Permission.Action.ADMIN); + public void preAddRSGroup(ObserverContext ctx, String name) + throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "addRSGroup", null, + Permission.Action.ADMIN); } @Override - public void preRemoveRSGroup(ObserverContext ctx, - String name) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "removeRSGroup", - null, Permission.Action.ADMIN); + public void preRemoveRSGroup(ObserverContext ctx, String name) + throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "removeRSGroup", null, + Permission.Action.ADMIN); } @Override - public void preBalanceRSGroup(ObserverContext ctx, - String groupName, BalanceRequest request) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "balanceRSGroup", - null, Permission.Action.ADMIN); + public void preBalanceRSGroup(ObserverContext ctx, String groupName, + BalanceRequest request) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "balanceRSGroup", null, + Permission.Action.ADMIN); } @Override - public void preRemoveServers( - ObserverContext ctx, + public void preRemoveServers(ObserverContext ctx, Set
          servers) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "removeServers", - null, Permission.Action.ADMIN); + accessChecker.requirePermission(getActiveUser(ctx), "removeServers", null, + Permission.Action.ADMIN); } @Override - public void preGetRSGroupInfo(ObserverContext ctx, - String groupName) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfo", - null, Permission.Action.ADMIN); + public void preGetRSGroupInfo(ObserverContext ctx, String groupName) + throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfo", null, + Permission.Action.ADMIN); } @Override public void preGetRSGroupInfoOfTable(ObserverContext ctx, TableName tableName) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfoOfTable", - null, Permission.Action.ADMIN); - //todo: should add check for table existence + accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfoOfTable", null, + Permission.Action.ADMIN); + // todo: should add check for table existence } @Override public void preListRSGroups(ObserverContext ctx) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "listRSGroups", - null, Permission.Action.ADMIN); + accessChecker.requirePermission(getActiveUser(ctx), "listRSGroups", null, + Permission.Action.ADMIN); } @Override public void preListTablesInRSGroup(ObserverContext ctx, - String groupName) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "listTablesInRSGroup", - null, Permission.Action.ADMIN); + String groupName) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "listTablesInRSGroup", null, + Permission.Action.ADMIN); } @Override public void preGetConfiguredNamespacesAndTablesInRSGroup( - ObserverContext ctx, String groupName) throws IOException { + ObserverContext ctx, String groupName) throws IOException { accessChecker.requirePermission(getActiveUser(ctx), "getConfiguredNamespacesAndTablesInRSGroup", null, Permission.Action.ADMIN); } @@ -2624,21 +2548,21 @@ public void preGetConfiguredNamespacesAndTablesInRSGroup( @Override public void preGetRSGroupInfoOfServer(ObserverContext ctx, Address server) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfoOfServer", - null, Permission.Action.ADMIN); + accessChecker.requirePermission(getActiveUser(ctx), "getRSGroupInfoOfServer", null, + Permission.Action.ADMIN); } @Override public void preRenameRSGroup(ObserverContext ctx, String oldName, String newName) throws IOException { - accessChecker.requirePermission(getActiveUser(ctx), "renameRSGroup", - null, Permission.Action.ADMIN); + accessChecker.requirePermission(getActiveUser(ctx), "renameRSGroup", null, + Permission.Action.ADMIN); } @Override public void preUpdateRSGroupConfig(final ObserverContext ctx, - final String groupName, final Map configuration) throws IOException { - accessChecker - .requirePermission(getActiveUser(ctx), "updateRSGroupConfig", null, Permission.Action.ADMIN); + final String groupName, final Map configuration) throws IOException { + accessChecker.requirePermission(getActiveUser(ctx), "updateRSGroupConfig", null, + Permission.Action.ADMIN); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java index 3ced725e0ad7..0411fbf91f26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.IOException; @@ -26,7 +25,6 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.Cell; @@ -44,17 +42,15 @@ /** * Performs authorization checks for a given user's assigned permissions. *

          - * There're following scopes: Global, Namespace, Table, Family, - * Qualifier, Cell. - * Generally speaking, higher scopes can overrides lower scopes, - * except for Cell permission can be granted even a user has not permission on specified table, - * which means the user can get/scan only those granted cells parts. + * There're following scopes: Global, Namespace, Table, Family, + * Qualifier, Cell. Generally speaking, higher scopes can overrides lower scopes, + * except for Cell permission can be granted even a user has not permission on specified table, + * which means the user can get/scan only those granted cells parts. *

          - * e.g, if user A has global permission R(ead), he can - * read table T without checking table scope permission, so authorization checks alway starts from - * Global scope. + * e.g, if user A has global permission R(ead), he can read table T without checking table scope + * permission, so authorization checks alway starts from Global scope. *

          - * For each scope, not only user but also groups he belongs to will be checked. + * For each scope, not only user but also groups he belongs to will be checked. *

          */ @InterfaceAudience.Private @@ -91,20 +87,21 @@ void clear() { } } } + PermissionCache NS_NO_PERMISSION = new PermissionCache<>(); PermissionCache TBL_NO_PERMISSION = new PermissionCache<>(); /** - * Cache for global permission excluding superuser and supergroup. - * Since every user/group can only have one global permission, no need to use PermissionCache. + * Cache for global permission excluding superuser and supergroup. Since every user/group can only + * have one global permission, no need to use PermissionCache. */ private Map globalCache = new ConcurrentHashMap<>(); /** Cache for namespace permission. */ private ConcurrentHashMap> namespaceCache = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); /** Cache for table permission. */ private ConcurrentHashMap> tableCache = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); private static final Logger LOG = LoggerFactory.getLogger(AuthManager.class); @@ -188,7 +185,7 @@ private void updateGlobalCache(ListMultimap globalPerms) { */ private void updateTableCache(TableName table, ListMultimap tablePerms) { PermissionCache cacheToUpdate = - tableCache.getOrDefault(table, new PermissionCache<>()); + tableCache.getOrDefault(table, new PermissionCache<>()); clearCache(cacheToUpdate); updateCache(tablePerms, cacheToUpdate); tableCache.put(table, cacheToUpdate); @@ -200,10 +197,9 @@ private void updateTableCache(TableName table, ListMultimap * @param namespace updated namespace * @param nsPerms new namespace permissions */ - private void updateNamespaceCache(String namespace, - ListMultimap nsPerms) { + private void updateNamespaceCache(String namespace, ListMultimap nsPerms) { PermissionCache cacheToUpdate = - namespaceCache.getOrDefault(namespace, new PermissionCache<>()); + namespaceCache.getOrDefault(namespace, new PermissionCache<>()); clearCache(cacheToUpdate); updateCache(nsPerms, cacheToUpdate); namespaceCache.put(namespace, cacheToUpdate); @@ -266,8 +262,8 @@ public boolean authorizeUserNamespace(User user, String namespace, Permission.Ac if (authorizeUserGlobal(user, action)) { return true; } - PermissionCache nsPermissions = namespaceCache.getOrDefault(namespace, - NS_NO_PERMISSION); + PermissionCache nsPermissions = + namespaceCache.getOrDefault(namespace, NS_NO_PERMISSION); if (authorizeNamespace(nsPermissions.get(user.getShortName()), namespace, action)) { return true; } @@ -279,8 +275,8 @@ public boolean authorizeUserNamespace(User user, String namespace, Permission.Ac return false; } - private boolean authorizeNamespace(Set permissions, - String namespace, Permission.Action action) { + private boolean authorizeNamespace(Set permissions, String namespace, + Permission.Action action) { if (permissions == null) { return false; } @@ -293,8 +289,8 @@ private boolean authorizeNamespace(Set permissions, } /** - * Checks if the user has access to the full table or at least a family/qualifier - * for the specified action. + * Checks if the user has access to the full table or at least a family/qualifier for the + * specified action. * @param user user name * @param table table name * @param action action in one of [Read, Write, Create, Exec, Admin] @@ -310,8 +306,8 @@ public boolean accessUserTable(User user, TableName table, Permission.Action act if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) { return true; } - PermissionCache tblPermissions = tableCache.getOrDefault(table, - TBL_NO_PERMISSION); + PermissionCache tblPermissions = + tableCache.getOrDefault(table, TBL_NO_PERMISSION); if (hasAccessTable(tblPermissions.get(user.getShortName()), action)) { return true; } @@ -368,8 +364,8 @@ public boolean authorizeUserTable(User user, TableName table, byte[] family, * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ - public boolean authorizeUserTable(User user, TableName table, byte[] family, - byte[] qualifier, Permission.Action action) { + public boolean authorizeUserTable(User user, TableName table, byte[] family, byte[] qualifier, + Permission.Action action) { if (user == null) { return false; } @@ -379,22 +375,22 @@ public boolean authorizeUserTable(User user, TableName table, byte[] family, if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) { return true; } - PermissionCache tblPermissions = tableCache.getOrDefault(table, - TBL_NO_PERMISSION); + PermissionCache tblPermissions = + tableCache.getOrDefault(table, TBL_NO_PERMISSION); if (authorizeTable(tblPermissions.get(user.getShortName()), table, family, qualifier, action)) { return true; } for (String group : user.getGroupNames()) { - if (authorizeTable(tblPermissions.get(AuthUtil.toGroupEntry(group)), - table, family, qualifier, action)) { + if (authorizeTable(tblPermissions.get(AuthUtil.toGroupEntry(group)), table, family, qualifier, + action)) { return true; } } return false; } - private boolean authorizeTable(Set permissions, - TableName table, byte[] family, byte[] qualifier, Permission.Action action) { + private boolean authorizeTable(Set permissions, TableName table, byte[] family, + byte[] qualifier, Permission.Action action) { if (permissions == null) { return false; } @@ -407,32 +403,32 @@ private boolean authorizeTable(Set permissions, } /** - * Check if user has given action privilige in table:family scope. - * This method is for backward compatibility. + * Check if user has given action privilige in table:family scope. This method is for backward + * compatibility. * @param user user name * @param table table name * @param family family names * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ - public boolean authorizeUserFamily(User user, TableName table, - byte[] family, Permission.Action action) { - PermissionCache tblPermissions = tableCache.getOrDefault(table, - TBL_NO_PERMISSION); + public boolean authorizeUserFamily(User user, TableName table, byte[] family, + Permission.Action action) { + PermissionCache tblPermissions = + tableCache.getOrDefault(table, TBL_NO_PERMISSION); if (authorizeFamily(tblPermissions.get(user.getShortName()), table, family, action)) { return true; } for (String group : user.getGroupNames()) { - if (authorizeFamily(tblPermissions.get(AuthUtil.toGroupEntry(group)), - table, family, action)) { + if (authorizeFamily(tblPermissions.get(AuthUtil.toGroupEntry(group)), table, family, + action)) { return true; } } return false; } - private boolean authorizeFamily(Set permissions, - TableName table, byte[] family, Permission.Action action) { + private boolean authorizeFamily(Set permissions, TableName table, byte[] family, + Permission.Action action) { if (permissions == null) { return false; } @@ -456,11 +452,11 @@ public boolean authorizeCell(User user, TableName table, Cell cell, Permission.A try { List perms = PermissionStorage.getCellPermissionsForUser(user, cell); if (LOG.isTraceEnabled()) { - LOG.trace("Perms for user {} in table {} in cell {}: {}", - user.getShortName(), table, cell, (perms != null ? perms : "")); + LOG.trace("Perms for user {} in table {} in cell {}: {}", user.getShortName(), table, cell, + (perms != null ? perms : "")); } if (perms != null) { - for (Permission p: perms) { + for (Permission p : perms) { if (p.implies(action)) { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java index 64a8c4cfeae9..274122382567 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,25 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; /** - * Represents the result of an authorization check for logging and error - * reporting. + * Represents the result of an authorization check for logging and error reporting. */ @InterfaceAudience.Private public class AuthResult { @@ -67,8 +64,7 @@ public AuthResult(boolean allowed, String request, String reason, User user, } public AuthResult(boolean allowed, String request, String reason, User user, - Permission.Action action, TableName table, - Map> families) { + Permission.Action action, TableName table, Map> families) { this.allowed = allowed; this.request = request; this.reason = reason; @@ -83,7 +79,7 @@ public AuthResult(boolean allowed, String request, String reason, User user, } public AuthResult(boolean allowed, String request, String reason, User user, - Permission.Action action, String namespace) { + Permission.Action action, String namespace) { this.allowed = allowed; this.request = request; this.reason = reason; @@ -129,7 +125,9 @@ public String getRequest() { return request; } - public Params getParams() { return this.params;} + public Params getParams() { + return this.params; + } public void setAllowed(boolean allowed) { this.allowed = allowed; @@ -150,11 +148,11 @@ private static String toFamiliesString(Map> fami for (Object o : entry.getValue()) { String qualifier; if (o instanceof byte[]) { - qualifier = Bytes.toString((byte[])o); + qualifier = Bytes.toString((byte[]) o); } else if (o instanceof Cell) { Cell c = (Cell) o; qualifier = Bytes.toString(c.getQualifierArray(), c.getQualifierOffset(), - c.getQualifierLength()); + c.getQualifierLength()); } else { // Shouldn't really reach this? qualifier = o.toString(); @@ -185,27 +183,17 @@ private static String toFamiliesString(Map> fami public String toContextString() { StringBuilder sb = new StringBuilder(); String familiesString = toFamiliesString(families, family, qualifier); - sb.append("(user=") - .append(user != null ? user.getName() : "UNKNOWN") - .append(", "); - sb.append("scope=") - .append(namespace != null ? namespace : - table == null ? "GLOBAL" : table.getNameWithNamespaceInclAsString()) - .append(", "); - if(namespace == null && familiesString.length() > 0) { - sb.append("family=") - .append(familiesString) - .append(", "); + sb.append("(user=").append(user != null ? user.getName() : "UNKNOWN").append(", "); + sb.append("scope=").append(namespace != null ? namespace + : table == null ? "GLOBAL" : table.getNameWithNamespaceInclAsString()).append(", "); + if (namespace == null && familiesString.length() > 0) { + sb.append("family=").append(familiesString).append(", "); } String paramsString = params.toString(); - if(paramsString.length() > 0) { - sb.append("params=[") - .append(paramsString) - .append("],"); + if (paramsString.length() > 0) { + sb.append("params=[").append(paramsString).append("],"); } - sb.append("action=") - .append(action != null ? action.toString() : "") - .append(")"); + sb.append("action=").append(action != null ? action.toString() : "").append(")"); return sb.toString(); } @@ -214,35 +202,33 @@ public String toString() { return "AuthResult" + toContextString(); } - public static AuthResult allow(String request, String reason, User user, - Permission.Action action, String namespace) { + public static AuthResult allow(String request, String reason, User user, Permission.Action action, + String namespace) { return new AuthResult(true, request, reason, user, action, namespace); } - public static AuthResult allow(String request, String reason, User user, - Permission.Action action, TableName table, byte[] family, byte[] qualifier) { + public static AuthResult allow(String request, String reason, User user, Permission.Action action, + TableName table, byte[] family, byte[] qualifier) { return new AuthResult(true, request, reason, user, action, table, family, qualifier); } - public static AuthResult allow(String request, String reason, User user, - Permission.Action action, TableName table, - Map> families) { + public static AuthResult allow(String request, String reason, User user, Permission.Action action, + TableName table, Map> families) { return new AuthResult(true, request, reason, user, action, table, families); } - public static AuthResult deny(String request, String reason, User user, - Permission.Action action, String namespace) { + public static AuthResult deny(String request, String reason, User user, Permission.Action action, + String namespace) { return new AuthResult(false, request, reason, user, action, namespace); } - public static AuthResult deny(String request, String reason, User user, - Permission.Action action, TableName table, byte[] family, byte[] qualifier) { + public static AuthResult deny(String request, String reason, User user, Permission.Action action, + TableName table, byte[] family, byte[] qualifier) { return new AuthResult(false, request, reason, user, action, table, family, qualifier); } - public static AuthResult deny(String request, String reason, User user, - Permission.Action action, TableName table, - Map> families) { + public static AuthResult deny(String request, String reason, User user, Permission.Action action, + TableName table, Map> families) { return new AuthResult(false, request, reason, user, action, table, families); } @@ -292,12 +278,10 @@ public Params setQualifier(byte[] qualifier) { @Override public String toString() { String familiesString = toFamiliesString(families, family, qualifier); - String[] params = new String[] { - namespace != null ? "namespace=" + namespace : null, + String[] params = new String[] { namespace != null ? "namespace=" + namespace : null, tableName != null ? "table=" + tableName.getNameWithNamespaceInclAsString() : null, familiesString.length() > 0 ? "family=" + familiesString : null, - extraParams.isEmpty() ? null : concatenateExtraParams() - }; + extraParams.isEmpty() ? null : concatenateExtraParams() }; return Joiner.on(",").skipNulls().join(params); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java index 1e83e966102f..5152f801a02c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.IOException; @@ -45,8 +44,8 @@ public class CoprocessorWhitelistMasterObserver implements MasterCoprocessor, Ma public static final String CP_COPROCESSOR_WHITELIST_PATHS_KEY = "hbase.coprocessor.region.whitelist.paths"; - private static final Logger LOG = LoggerFactory - .getLogger(CoprocessorWhitelistMasterObserver.class); + private static final Logger LOG = + LoggerFactory.getLogger(CoprocessorWhitelistMasterObserver.class); @Override public Optional getMasterObserver() { @@ -62,28 +61,25 @@ public TableDescriptor preModifyTable(ObserverContext ctx, - TableDescriptor htd, RegionInfo[] regions) throws IOException { + public void preCreateTable(ObserverContext ctx, TableDescriptor htd, + RegionInfo[] regions) throws IOException { verifyCoprocessors(ctx, htd); } /** * Validates a single whitelist path against the coprocessor path - * @param coprocPath the path to the coprocessor including scheme - * @param wlPath can be: - * 1) a "*" to wildcard all coprocessor paths - * 2) a specific filesystem (e.g. hdfs://my-cluster/) - * 3) a wildcard path to be evaluated by - * {@link FilenameUtils#wildcardMatch(String, String)} - * path can specify scheme or not (e.g. - * "file:///usr/hbase/coprocessors" or for all - * filesystems "/usr/hbase/coprocessors") - * @return if the path was found under the wlPath + * @param coprocPath the path to the coprocessor including scheme + * @param wlPath can be: 1) a "*" to wildcard all coprocessor paths 2) a specific filesystem (e.g. + * hdfs://my-cluster/) 3) a wildcard path to be evaluated by + * {@link FilenameUtils#wildcardMatch(String, String)} path can specify scheme or not + * (e.g. "file:///usr/hbase/coprocessors" or for all filesystems + * "/usr/hbase/coprocessors") + * @return if the path was found under the wlPath */ private static boolean validatePath(Path coprocPath, Path wlPath) { // verify if all are allowed if (wlPath.toString().equals("*")) { - return(true); + return (true); } // verify we are on the same filesystem if wlPath has a scheme @@ -113,50 +109,46 @@ private static boolean validatePath(Path coprocPath, Path wlPath) { coprocPathHost = ""; } if (!wlPathScheme.equals(coprocPathScheme) || !wlPathHost.equals(coprocPathHost)) { - return(false); + return (false); } } // allow any on this file-system (file systems were verified to be the same above) if (wlPath.isRoot()) { - return(true); + return (true); } // allow "loose" matches stripping scheme - if (FilenameUtils.wildcardMatch( - Path.getPathWithoutSchemeAndAuthority(coprocPath).toString(), - Path.getPathWithoutSchemeAndAuthority(wlPath).toString())) { - return(true); + if (FilenameUtils.wildcardMatch(Path.getPathWithoutSchemeAndAuthority(coprocPath).toString(), + Path.getPathWithoutSchemeAndAuthority(wlPath).toString())) { + return (true); } - return(false); + return (false); } /** - * Perform the validation checks for a coprocessor to determine if the path - * is white listed or not. - * @throws IOException if path is not included in whitelist or a failure - * occurs in processing - * @param ctx as passed in from the coprocessor - * @param htd as passed in from the coprocessor + * Perform the validation checks for a coprocessor to determine if the path is white listed or + * not. + * @throws IOException if path is not included in whitelist or a failure occurs in processing + * @param ctx as passed in from the coprocessor + * @param htd as passed in from the coprocessor */ private static void verifyCoprocessors(ObserverContext ctx, TableDescriptor htd) throws IOException { - Collection paths = - ctx.getEnvironment().getConfiguration().getStringCollection( - CP_COPROCESSOR_WHITELIST_PATHS_KEY); + Collection paths = ctx.getEnvironment().getConfiguration() + .getStringCollection(CP_COPROCESSOR_WHITELIST_PATHS_KEY); for (CoprocessorDescriptor cp : htd.getCoprocessorDescriptors()) { if (cp.getJarPath().isPresent()) { if (paths.stream().noneMatch(p -> { Path wlPath = new Path(p); if (validatePath(new Path(cp.getJarPath().get()), wlPath)) { - LOG.debug(String.format("Coprocessor %s found in directory %s", - cp.getClassName(), p)); + LOG.debug(String.format("Coprocessor %s found in directory %s", cp.getClassName(), p)); return true; } return false; })) { - throw new IOException(String.format("Loading %s DENIED in %s", - cp.getClassName(), CP_COPROCESSOR_WHITELIST_PATHS_KEY)); + throw new IOException(String.format("Loading %s DENIED in %s", cp.getClassName(), + CP_COPROCESSOR_WHITELIST_PATHS_KEY)); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java index 95927c0b164a..7a093f8d7645 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/NoopAccessChecker.java @@ -19,7 +19,6 @@ import java.util.Collection; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -28,8 +27,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * NoopAccessChecker is returned when hbase.security.authorization is not enabled. - * Always allow authorization if any user require any permission. + * NoopAccessChecker is returned when hbase.security.authorization is not enabled. Always allow + * authorization if any user require any permission. */ @InterfaceAudience.Private public final class NoopAccessChecker extends AccessChecker { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java index 268bc36fc45b..d98e52f6aa81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/PermissionStorage.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.ByteArrayInputStream; @@ -78,14 +77,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; /** - * Maintains lists of permission grants to users and groups to allow for - * authorization checks by {@link AccessController}. - * + * Maintains lists of permission grants to users and groups to allow for authorization checks by + * {@link AccessController}. *

          - * Access control lists are stored in an "internal" metadata table named - * {@code _acl_}. Each table's permission grants are stored as a separate row, - * keyed by the table name. KeyValues for permissions assignments are stored - * in one of the formats: + * Access control lists are stored in an "internal" metadata table named {@code _acl_}. Each table's + * permission grants are stored as a separate row, keyed by the table name. KeyValues for + * permissions assignments are stored in one of the formats: + * *

            * Key                      Desc
            * --------                 --------
          @@ -116,8 +114,8 @@ public final class PermissionStorage {
             public static final char NAMESPACE_PREFIX = '@';
           
             /**
          -   * Delimiter to separate user, column family, and qualifier in
          -   * _acl_ table info: column keys */
          +   * Delimiter to separate user, column family, and qualifier in _acl_ table info: column keys
          +   */
             public static final char ACL_KEY_DELIMITER = ',';
           
             private static final Logger LOG = LoggerFactory.getLogger(PermissionStorage.class);
          @@ -147,7 +145,7 @@ public static void addUserPermission(Configuration conf, UserPermission userPerm
               }
           
               Set actionSet = new TreeSet();
          -    if(mergeExistingPermissions){
          +    if (mergeExistingPermissions) {
                 List perms = getUserPermissions(conf, rowKey, null, null, null, false);
                 UserPermission currentPerm = null;
                 for (UserPermission perm : perms) {
          @@ -157,7 +155,7 @@ public static void addUserPermission(Configuration conf, UserPermission userPerm
                   }
                 }
           
          -      if (currentPerm != null && currentPerm.getPermission().getActions() != null){
          +      if (currentPerm != null && currentPerm.getPermission().getActions() != null) {
                   actionSet.addAll(Arrays.asList(currentPerm.getPermission().getActions()));
                 }
               }
          @@ -171,14 +169,9 @@ public static void addUserPermission(Configuration conf, UserPermission userPerm
               for (Permission.Action action : actionSet) {
                 value[index++] = action.code();
               }
          -    p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
          -        .setRow(p.getRow())
          -        .setFamily(ACL_LIST_FAMILY)
          -        .setQualifier(key)
          -        .setTimestamp(p.getTimestamp())
          -        .setType(Type.Put)
          -        .setValue(value)
          -        .build());
          +    p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow())
          +        .setFamily(ACL_LIST_FAMILY).setQualifier(key).setTimestamp(p.getTimestamp())
          +        .setType(Type.Put).setValue(value).build());
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Writing permission with rowKey " + Bytes.toString(rowKey) + " "
                     + Bytes.toString(key) + ": " + Bytes.toStringBinary(value));
          @@ -191,19 +184,17 @@ public static void addUserPermission(Configuration conf, UserPermission userPerm
             }
           
             static void addUserPermission(Configuration conf, UserPermission userPerm, Table t)
          -          throws IOException{
          +      throws IOException {
               addUserPermission(conf, userPerm, t, false);
             }
           
             /**
          -   * Removes a previously granted permission from the stored access control
          -   * lists.  The {@link TablePermission} being removed must exactly match what
          -   * is stored -- no wildcard matching is attempted.  Ie, if user "bob" has
          -   * been granted "READ" access to the "data" table, but only to column family
          -   * plus qualifier "info:colA", then trying to call this method with only
          -   * user "bob" and the table name "data" (but without specifying the
          -   * column qualifier "info:colA") will have no effect.
          -   *
          +   * Removes a previously granted permission from the stored access control lists. The
          +   * {@link TablePermission} being removed must exactly match what is stored -- no wildcard matching
          +   * is attempted. Ie, if user "bob" has been granted "READ" access to the "data" table, but only to
          +   * column family plus qualifier "info:colA", then trying to call this method with only user "bob"
          +   * and the table name "data" (but without specifying the column qualifier "info:colA") will have
          +   * no effect.
              * @param conf the configuration
              * @param userPerm the details of the permission to be revoked
              * @param t acl table
          @@ -211,14 +202,13 @@ static void addUserPermission(Configuration conf, UserPermission userPerm, Table
              */
             public static void removeUserPermission(Configuration conf, UserPermission userPerm, Table t)
                 throws IOException {
          -    if (null == userPerm.getPermission().getActions() ||
          -        userPerm.getPermission().getActions().length == 0) {
          +    if (null == userPerm.getPermission().getActions()
          +        || userPerm.getPermission().getActions().length == 0) {
                 removePermissionRecord(conf, userPerm, t);
               } else {
                 // Get all the global user permissions from the acl table
          -      List permsList =
          -        getUserPermissions(conf, userPermissionRowKey(userPerm.getPermission()),
          -          null, null, null, false);
          +      List permsList = getUserPermissions(conf,
          +        userPermissionRowKey(userPerm.getPermission()), null, null, null, false);
                 List remainingActions = new ArrayList<>();
                 List dropActions = Arrays.asList(userPerm.getPermission().getActions());
                 for (UserPermission perm : permsList) {
          @@ -241,7 +231,7 @@ public static void removeUserPermission(Configuration conf, UserPermission userP
                 }
               }
               if (LOG.isDebugEnabled()) {
          -      LOG.debug("Removed permission "+ userPerm.toString());
          +      LOG.debug("Removed permission " + userPerm.toString());
               }
             }
           
          @@ -260,12 +250,12 @@ private static void removePermissionRecord(Configuration conf, UserPermission us
              * Remove specified table from the _acl_ table.
              */
             static void removeTablePermissions(Configuration conf, TableName tableName, Table t)
          -      throws IOException{
          +      throws IOException {
               Delete d = new Delete(tableName.getName());
               d.addFamily(ACL_LIST_FAMILY);
           
               if (LOG.isDebugEnabled()) {
          -      LOG.debug("Removing permissions of removed table "+ tableName);
          +      LOG.debug("Removing permissions of removed table " + tableName);
               }
               try {
                 t.delete(d);
          @@ -278,11 +268,11 @@ static void removeTablePermissions(Configuration conf, TableName tableName, Tabl
              * Remove specified namespace from the acl table.
              */
             static void removeNamespacePermissions(Configuration conf, String namespace, Table t)
          -      throws IOException{
          +      throws IOException {
               Delete d = new Delete(Bytes.toBytes(toNamespaceEntry(namespace)));
               d.addFamily(ACL_LIST_FAMILY);
               if (LOG.isDebugEnabled()) {
          -      LOG.debug("Removing permissions of removed namespace "+ namespace);
          +      LOG.debug("Removing permissions of removed namespace " + namespace);
               }
           
               try {
          @@ -298,10 +288,9 @@ static private void removeTablePermissions(TableName tableName, byte[] column, T
               scan.addFamily(ACL_LIST_FAMILY);
           
               String columnName = Bytes.toString(column);
          -    scan.setFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(
          -        String.format("(%s%s%s)|(%s%s)$",
          -            ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER,
          -            ACL_KEY_DELIMITER, columnName))));
          +    scan.setFilter(new QualifierFilter(CompareOperator.EQUAL,
          +        new RegexStringComparator(String.format("(%s%s%s)|(%s%s)$", ACL_KEY_DELIMITER, columnName,
          +          ACL_KEY_DELIMITER, ACL_KEY_DELIMITER, columnName))));
           
               Set qualifierSet = new TreeSet<>(Bytes.BYTES_COMPARATOR);
               ResultScanner scanner = null;
          @@ -336,8 +325,8 @@ static private void removeTablePermissions(TableName tableName, byte[] column, T
             static void removeTablePermissions(Configuration conf, TableName tableName, byte[] column,
                 Table t) throws IOException {
               if (LOG.isDebugEnabled()) {
          -      LOG.debug("Removing permissions of removed column " + Bytes.toString(column) +
          -          " from table "+ tableName);
          +      LOG.debug("Removing permissions of removed column " + Bytes.toString(column) + " from table "
          +          + tableName);
               }
               removeTablePermissions(tableName, column, t, true);
             }
          @@ -358,10 +347,7 @@ static byte[] userPermissionRowKey(Permission permission) {
             }
           
             /**
          -   * Build qualifier key from user permission:
          -   *  username
          -   *  username,family
          -   *  username,family,qualifier
          +   * Build qualifier key from user permission: username username,family username,family,qualifier
              */
             static byte[] userPermissionKey(UserPermission permission) {
               byte[] key = Bytes.toBytes(permission.getUser());
          @@ -374,9 +360,9 @@ static byte[] userPermissionKey(UserPermission permission) {
               }
           
               if (family != null && family.length > 0) {
          -      key = Bytes.add(key, Bytes.add(new byte[]{ACL_KEY_DELIMITER}, family));
          +      key = Bytes.add(key, Bytes.add(new byte[] { ACL_KEY_DELIMITER }, family));
                 if (qualifier != null && qualifier.length > 0) {
          -        key = Bytes.add(key, Bytes.add(new byte[]{ACL_KEY_DELIMITER}, qualifier));
          +        key = Bytes.add(key, Bytes.add(new byte[] { ACL_KEY_DELIMITER }, qualifier));
                 }
               }
           
          @@ -384,8 +370,7 @@ static byte[] userPermissionKey(UserPermission permission) {
             }
           
             /**
          -   * Returns {@code true} if the given region is part of the {@code _acl_}
          -   * metadata table.
          +   * Returns {@code true} if the given region is part of the {@code _acl_} metadata table.
              */
             static boolean isAclRegion(Region region) {
               return ACL_TABLE_NAME.equals(region.getTableDescriptor().getTableName());
          @@ -399,9 +384,7 @@ static boolean isAclTable(TableDescriptor desc) {
             }
           
             /**
          -   * Loads all of the permission grants stored in a region of the {@code _acl_}
          -   * table.
          -   *
          +   * Loads all of the permission grants stored in a region of the {@code _acl_} table.
              * @param aclRegion the acl region
              * @return a map of the permissions for this table.
              * @throws IOException if an error occurs
          @@ -409,11 +392,11 @@ static boolean isAclTable(TableDescriptor desc) {
             static Map> loadAll(Region aclRegion)
                 throws IOException {
               if (!isAclRegion(aclRegion)) {
          -      throw new IOException("Can only load permissions from "+ACL_TABLE_NAME);
          +      throw new IOException("Can only load permissions from " + ACL_TABLE_NAME);
               }
           
               Map> allPerms =
          -      new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
          +        new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
           
               // do a full scan of _acl_ table
           
          @@ -459,13 +442,13 @@ static Map> loadAll(Region aclRegio
             }
           
             /**
          -   * Load all permissions from the region server holding {@code _acl_},
          -   * primarily intended for testing purposes.
          +   * Load all permissions from the region server holding {@code _acl_}, primarily intended for
          +   * testing purposes.
              */
          -  static Map> loadAll(
          -      Configuration conf) throws IOException {
          +  static Map> loadAll(Configuration conf)
          +      throws IOException {
               Map> allPerms =
          -      new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
          +        new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
           
               // do a full scan of _acl_, filtering on only first table region rows
           
          @@ -624,10 +607,8 @@ private static Pair parsePermissionRecord(byte[] entryName,
               byte[] key = CellUtil.cloneQualifier(kv);
               byte[] value = CellUtil.cloneValue(kv);
               if (LOG.isDebugEnabled()) {
          -      LOG.debug("Read acl: entry[" +
          -        Bytes.toStringBinary(entryName) + "], kv [" +
          -        Bytes.toStringBinary(key) + ": " +
          -        Bytes.toStringBinary(value)+"]");
          +      LOG.debug("Read acl: entry[" + Bytes.toStringBinary(entryName) + "], kv ["
          +          + Bytes.toStringBinary(key) + ": " + Bytes.toStringBinary(value) + "]");
               }
           
               // check for a column family appended to the key
          @@ -670,13 +651,13 @@ private static Pair parsePermissionRecord(byte[] entryName,
               int idx = username.indexOf(ACL_KEY_DELIMITER);
               byte[] permFamily = null;
               byte[] permQualifier = null;
          -    if (idx > 0 && idx < username.length()-1) {
          -      String remainder = username.substring(idx+1);
          +    if (idx > 0 && idx < username.length() - 1) {
          +      String remainder = username.substring(idx + 1);
                 username = username.substring(0, idx);
                 idx = remainder.indexOf(ACL_KEY_DELIMITER);
          -      if (idx > 0 && idx < remainder.length()-1) {
          +      if (idx > 0 && idx < remainder.length() - 1) {
                   permFamily = Bytes.toBytes(remainder.substring(0, idx));
          -        permQualifier = Bytes.toBytes(remainder.substring(idx+1));
          +        permQualifier = Bytes.toBytes(remainder.substring(idx + 1));
                 } else {
                   permFamily = Bytes.toBytes(remainder);
                 }
          @@ -761,8 +742,8 @@ public static byte[] writePermissionsAsBytes(ListMultimap readWritableUserPermission(DataInput in,
          -      Configuration conf) throws IOException, ClassNotFoundException {
          +  private static List readWritableUserPermission(DataInput in, Configuration conf)
          +      throws IOException, ClassNotFoundException {
               assert WritableUtils.readVInt(in) == LIST_CODE;
               int length = in.readInt();
               List list = new ArrayList<>(length);
          @@ -784,7 +765,7 @@ public static ListMultimap readUserPermission(byte[] dat
                 int pblen = ProtobufUtil.lengthOfPBMagic();
                 try {
                   AccessControlProtos.UsersAndPermissions.Builder builder =
          -          AccessControlProtos.UsersAndPermissions.newBuilder();
          +            AccessControlProtos.UsersAndPermissions.newBuilder();
                   ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
                   return AccessControlUtil.toUserPermission(builder.build());
                 } catch (IOException e) {
          @@ -811,13 +792,13 @@ public static ListMultimap readUserPermission(byte[] dat
               }
             }
           
          -  public static ListMultimap readPermissions(byte[] data,
          -      Configuration conf) throws DeserializationException {
          +  public static ListMultimap readPermissions(byte[] data, Configuration conf)
          +      throws DeserializationException {
               if (ProtobufUtil.isPBMagicPrefix(data)) {
                 int pblen = ProtobufUtil.lengthOfPBMagic();
                 try {
                   AccessControlProtos.UsersAndPermissions.Builder builder =
          -          AccessControlProtos.UsersAndPermissions.newBuilder();
          +            AccessControlProtos.UsersAndPermissions.newBuilder();
                   ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
                   return AccessControlUtil.toPermission(builder.build());
                 } catch (IOException e) {
          @@ -850,7 +831,7 @@ public static boolean isNamespaceEntry(String entryName) {
             }
           
             public static boolean isNamespaceEntry(byte[] entryName) {
          -    return entryName != null && entryName.length !=0 && entryName[0] == NAMESPACE_PREFIX;
          +    return entryName != null && entryName.length != 0 && entryName[0] == NAMESPACE_PREFIX;
             }
           
             public static boolean isTableEntry(byte[] entryName) {
          @@ -869,16 +850,16 @@ public static String fromNamespaceEntry(String namespace) {
             }
           
             public static byte[] toNamespaceEntry(byte[] namespace) {
          -    byte[] ret = new byte[namespace.length+1];
          +    byte[] ret = new byte[namespace.length + 1];
               ret[0] = NAMESPACE_PREFIX;
               System.arraycopy(namespace, 0, ret, 1, namespace.length);
               return ret;
             }
           
             public static byte[] fromNamespaceEntry(byte[] namespace) {
          -    if(namespace[0] != NAMESPACE_PREFIX) {
          -      throw new IllegalArgumentException("Argument is not a valid namespace entry: " +
          -          Bytes.toString(namespace));
          +    if (namespace[0] != NAMESPACE_PREFIX) {
          +      throw new IllegalArgumentException(
          +          "Argument is not a valid namespace entry: " + Bytes.toString(namespace));
               }
               return Arrays.copyOfRange(namespace, 1, namespace.length);
             }
          @@ -905,7 +886,7 @@ public static List getCellPermissionsForUser(User user, Cell cell)
                   } else {
                     ProtobufUtil.mergeFrom(builder, Tag.cloneValue(tag));
                   }
          -        ListMultimap kvPerms =
          +        ListMultimap kvPerms =
                       AccessControlUtil.toUsersAndPermissions(builder.build());
                   // Are there permissions for this user?
                   List userPerms = kvPerms.get(user.getShortName());
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java
          index 72da07cee5ea..563db605745d 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclCleaner.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -19,7 +19,6 @@
           
           import java.io.IOException;
           import java.util.Map;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.fs.FileStatus;
           import org.apache.hadoop.fs.Path;
          @@ -86,8 +85,8 @@ private boolean isEmptyArchiveDirDeletable(Path dir) {
                   return false;
                 } else if (isArchiveTableDir(dir)
                     && tableExists(TableName.valueOf(dir.getParent().getName(), dir.getName()))) {
          -        return false;
          -      }
          +            return false;
          +          }
                 return true;
               } catch (IOException e) {
                 LOG.warn("Check if empty dir {} is deletable error", dir, e);
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java
          index acb6940697a8..1a0fa0624974 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -15,7 +15,6 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.security.access;
           
           import java.io.IOException;
          @@ -268,24 +267,25 @@ public void postModifyTable(ObserverContext ctx,
                     tableUsers, tableName);
                 } else if (needHandleTableHdfsAcl(oldDescriptor, "modifyTable " + tableName)
                     && !hdfsAclHelper.isAclSyncToHdfsEnabled(currentDescriptor)) {
          -        // 1. Remove empty table directories
          -        List tableRootPaths = hdfsAclHelper.getTableRootPaths(tableName, false);
          -        for (Path path : tableRootPaths) {
          -          hdfsAclHelper.deleteEmptyDir(path);
          -        }
          -        // 2. Remove all table HDFS acls
          -        Set tableUsers = hdfsAclHelper.getUsersWithTableReadAction(tableName, false, false);
          -        Set users = hdfsAclHelper
          -            .getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), true);
          -        users.addAll(tableUsers);
          -        hdfsAclHelper.removeTableAcl(tableName, users);
          -        // 3. Remove namespace access HDFS acls for users who only own permission for this table
          -        hdfsAclHelper.removeNamespaceAccessAcl(tableName,
          -          filterUsersToRemoveNsAccessAcl(aclTable, tableName, tableUsers), "modify");
          -        // 4. Record table user acl is not synced to HDFS
          -        SnapshotScannerHDFSAclStorage.deleteUserTableHdfsAcl(ctx.getEnvironment().getConnection(),
          -          tableUsers, tableName);
          -      }
          +            // 1. Remove empty table directories
          +            List tableRootPaths = hdfsAclHelper.getTableRootPaths(tableName, false);
          +            for (Path path : tableRootPaths) {
          +              hdfsAclHelper.deleteEmptyDir(path);
          +            }
          +            // 2. Remove all table HDFS acls
          +            Set tableUsers =
          +                hdfsAclHelper.getUsersWithTableReadAction(tableName, false, false);
          +            Set users = hdfsAclHelper
          +                .getUsersWithNamespaceReadAction(tableName.getNamespaceAsString(), true);
          +            users.addAll(tableUsers);
          +            hdfsAclHelper.removeTableAcl(tableName, users);
          +            // 3. Remove namespace access HDFS acls for users who only own permission for this table
          +            hdfsAclHelper.removeNamespaceAccessAcl(tableName,
          +              filterUsersToRemoveNsAccessAcl(aclTable, tableName, tableUsers), "modify");
          +            // 4. Record table user acl is not synced to HDFS
          +            SnapshotScannerHDFSAclStorage.deleteUserTableHdfsAcl(
          +              ctx.getEnvironment().getConnection(), tableUsers, tableName);
          +          }
               }
             }
           
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java
          index 53d9970e09df..2f04eb97efc9 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java
          @@ -7,7 +7,7 @@
            * "License"); you may not use this file except in compliance
            * with the License.  You may obtain a copy of the License at
            *
          - * http://www.apache.org/licenses/LICENSE-2.0
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
            * Unless required by applicable law or agreed to in writing, software
            * distributed under the License is distributed on an "AS IS" BASIS,
          @@ -15,7 +15,6 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.security.access;
           
           import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
          @@ -38,7 +37,6 @@
           import java.util.concurrent.ExecutorService;
           import java.util.concurrent.Executors;
           import java.util.stream.Collectors;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.fs.FileStatus;
           import org.apache.hadoop.fs.FileSystem;
          @@ -475,8 +473,7 @@ List getNamespaceRootPaths(String namespace) {
             List getTableRootPaths(TableName tableName, boolean includeSnapshotPath)
                 throws IOException {
               List paths = Lists.newArrayList(pathHelper.getDataTableDir(tableName),
          -      pathHelper.getMobTableDir(tableName),
          -      pathHelper.getArchiveTableDir(tableName));
          +      pathHelper.getMobTableDir(tableName), pathHelper.getArchiveTableDir(tableName));
               if (includeSnapshotPath) {
                 paths.addAll(getTableSnapshotPaths(tableName));
               }
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
          index 1102dac12a53..64c87d873c1c 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
          @@ -15,9 +15,18 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.security.access;
           
          +import java.io.Closeable;
          +import java.io.IOException;
          +import java.util.List;
          +import java.util.concurrent.Callable;
          +import java.util.concurrent.CountDownLatch;
          +import java.util.concurrent.ExecutionException;
          +import java.util.concurrent.ExecutorService;
          +import java.util.concurrent.Executors;
          +import java.util.concurrent.Future;
          +import java.util.concurrent.RejectedExecutionException;
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.hbase.TableName;
           import org.apache.hadoop.hbase.util.Bytes;
          @@ -26,31 +35,20 @@
           import org.apache.hadoop.hbase.zookeeper.ZKUtil;
           import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
           import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
          -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
           import org.apache.yetus.audience.InterfaceAudience;
           import org.apache.zookeeper.KeeperException;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
           
          -import java.io.Closeable;
          -import java.io.IOException;
          -import java.util.List;
          -import java.util.concurrent.Callable;
          -import java.util.concurrent.CountDownLatch;
          -import java.util.concurrent.ExecutionException;
          -import java.util.concurrent.ExecutorService;
          -import java.util.concurrent.Executors;
          -import java.util.concurrent.Future;
          -import java.util.concurrent.RejectedExecutionException;
          +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
           
           /**
          - * Handles synchronization of access control list entries and updates
          - * throughout all nodes in the cluster.  The {@link AccessController} instance
          - * on the {@code _acl_} table regions, creates a znode for each table as
          - * {@code /hbase/acl/tablename}, with the znode data containing a serialized
          - * list of the permissions granted for the table.  The {@code AccessController}
          - * instances on all other cluster hosts watch the znodes for updates, which
          - * trigger updates in the {@link AuthManager} permission cache.
          + * Handles synchronization of access control list entries and updates throughout all nodes in the
          + * cluster. The {@link AccessController} instance on the {@code _acl_} table regions, creates a
          + * znode for each table as {@code /hbase/acl/tablename}, with the znode data containing a serialized
          + * list of the permissions granted for the table. The {@code AccessController} instances on all
          + * other cluster hosts watch the znodes for updates, which trigger updates in the
          + * {@link AuthManager} permission cache.
            */
           @InterfaceAudience.Private
           public class ZKPermissionWatcher extends ZKListener implements Closeable {
          @@ -63,15 +61,14 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable {
             private final ExecutorService executor;
             private Future childrenChangedFuture;
           
          -  public ZKPermissionWatcher(ZKWatcher watcher,
          -      AuthManager authManager, Configuration conf) {
          +  public ZKPermissionWatcher(ZKWatcher watcher, AuthManager authManager, Configuration conf) {
               super(watcher);
               this.authManager = authManager;
               String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE);
               this.aclZNode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, aclZnodeParent);
               executor = Executors.newSingleThreadExecutor(
                 new ThreadFactoryBuilder().setNameFormat("zk-permission-watcher-pool-%d").setDaemon(true)
          -        .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
          +          .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
             }
           
             public void start() throws KeeperException {
          @@ -92,7 +89,7 @@ public Void call() throws KeeperException {
                     }).get();
                   } catch (ExecutionException ex) {
                     if (ex.getCause() instanceof KeeperException) {
          -            throw (KeeperException)ex.getCause();
          +            throw (KeeperException) ex.getCause();
                     } else {
                       throw new RuntimeException(ex.getCause());
                     }
          @@ -182,7 +179,6 @@ public void run() {
               }
             }
           
          -
             @Override
             public void nodeChildrenChanged(final String path) {
               waitUntilStarted();
          @@ -194,14 +190,14 @@ public void nodeChildrenChanged(final String path) {
                     // task may have finished between our check and attempted cancel, this is fine.
                     if (!childrenChangedFuture.isDone()) {
                       LOG.warn("Could not cancel processing node children changed event, "
          -              + "please file a JIRA and attach logs if possible.");
          +                + "please file a JIRA and attach logs if possible.");
                     }
                   }
                 }
                 childrenChangedFuture = asyncProcessNodeUpdate(() -> {
                   try {
                     final List nodeList =
          -            ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode, false);
          +              ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode, false);
                     refreshNodes(nodeList);
                   } catch (KeeperException ke) {
                     String msg = "ZooKeeper error while reading node children data for path " + path;
          @@ -239,8 +235,7 @@ private void refreshNodes(List nodes) {
                 try {
                   refreshAuthManager(entry, n.getData());
                 } catch (IOException ioe) {
          -        LOG.error("Failed parsing permissions for table '" + entry +
          -            "' from zk", ioe);
          +        LOG.error("Failed parsing permissions for table '" + entry + "' from zk", ioe);
                 }
               }
             }
          @@ -248,7 +243,7 @@ private void refreshNodes(List nodes) {
             private void refreshAuthManager(String entry, byte[] nodeData) throws IOException {
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Updating permissions cache from {} with data {}", entry,
          -          Bytes.toStringBinary(nodeData));
          +        Bytes.toStringBinary(nodeData));
               }
               if (PermissionStorage.isNamespaceEntry(entry)) {
                 authManager.refreshNamespaceCacheFromWritable(PermissionStorage.fromNamespaceEntry(entry),
          @@ -272,9 +267,8 @@ public void writeToZookeeper(byte[] entry, byte[] permsData) {
                 ZKUtil.createWithParents(watcher, zkNode);
                 ZKUtil.updateExistingNodeData(watcher, zkNode, permsData, -1);
               } catch (KeeperException e) {
          -      LOG.error("Failed updating permissions for entry '" +
          -          entryName + "'", e);
          -      watcher.abort("Failed writing node "+zkNode+" to zookeeper", e);
          +      LOG.error("Failed updating permissions for entry '" + entryName + "'", e);
          +      watcher.abort("Failed writing node " + zkNode + " to zookeeper", e);
               }
             }
           
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java
          index e6dc3574726e..add8e6840d0e 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/AttemptingUserProvidingSaslServer.java
          @@ -19,17 +19,15 @@
           
           import java.util.Optional;
           import java.util.function.Supplier;
          -
           import javax.security.sasl.SaslServer;
          -
           import org.apache.hadoop.hbase.HBaseInterfaceAudience;
           import org.apache.hadoop.security.UserGroupInformation;
           import org.apache.yetus.audience.InterfaceAudience;
           import org.apache.yetus.audience.InterfaceStability;
           
           /**
          - * Wrapper around a SaslServer which provides the last user attempting to authenticate via SASL,
          - * if the server/mechanism allow figuring that out.
          + * Wrapper around a SaslServer which provides the last user attempting to authenticate via SASL, if
          + * the server/mechanism allow figuring that out.
            */
           @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION)
           @InterfaceStability.Evolving
          @@ -37,8 +35,8 @@ public class AttemptingUserProvidingSaslServer {
             private final Supplier producer;
             private final SaslServer saslServer;
           
          -  public AttemptingUserProvidingSaslServer(
          -      SaslServer saslServer, Supplier producer) {
          +  public AttemptingUserProvidingSaslServer(SaslServer saslServer,
          +      Supplier producer) {
               this.saslServer = saslServer;
               this.producer = producer;
             }
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java
          index b3236d653764..32fb4af457d6 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslServerAuthenticationProvider.java
          @@ -20,7 +20,6 @@
           import java.io.IOException;
           import java.util.Map;
           import java.util.concurrent.atomic.AtomicReference;
          -
           import javax.security.auth.callback.Callback;
           import javax.security.auth.callback.CallbackHandler;
           import javax.security.auth.callback.NameCallback;
          @@ -30,7 +29,6 @@
           import javax.security.sasl.RealmCallback;
           import javax.security.sasl.Sasl;
           import javax.security.sasl.SaslServer;
          -
           import org.apache.hadoop.hbase.security.AccessDeniedException;
           import org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
           import org.apache.hadoop.hbase.security.SaslUtil;
          @@ -45,15 +43,15 @@
           @InterfaceAudience.Private
           public class DigestSaslServerAuthenticationProvider extends DigestSaslAuthenticationProvider
               implements SaslServerAuthenticationProvider {
          -  private static final Logger LOG = LoggerFactory.getLogger(
          -      DigestSaslServerAuthenticationProvider.class);
          +  private static final Logger LOG =
          +      LoggerFactory.getLogger(DigestSaslServerAuthenticationProvider.class);
           
             private AtomicReference attemptingUser = new AtomicReference<>(null);
           
             @Override
          -  public AttemptingUserProvidingSaslServer createServer(
          -      SecretManager secretManager,
          -      Map saslProps) throws IOException {
          +  public AttemptingUserProvidingSaslServer
          +      createServer(SecretManager secretManager, Map saslProps)
          +          throws IOException {
               if (secretManager == null) {
                 throw new AccessDeniedException("Server is not configured to do DIGEST authentication.");
               }
          @@ -99,13 +97,13 @@ public void handle(Callback[] callbacks) throws InvalidToken, UnsupportedCallbac
                   }
                 }
                 if (pc != null) {
          -        TokenIdentifier tokenIdentifier = HBaseSaslRpcServer.getIdentifier(
          -            nc.getDefaultName(), secretManager);
          +        TokenIdentifier tokenIdentifier =
          +            HBaseSaslRpcServer.getIdentifier(nc.getDefaultName(), secretManager);
                   attemptingUser.set(tokenIdentifier.getUser());
                   char[] password = getPassword(tokenIdentifier);
                   if (LOG.isTraceEnabled()) {
                     LOG.trace("SASL server DIGEST-MD5 callback: setting password for client: {}",
          -              tokenIdentifier.getUser());
          +            tokenIdentifier.getUser());
                   }
                   pc.setPassword(password);
                 }
          @@ -123,8 +121,8 @@ public void handle(Callback[] callbacks) throws InvalidToken, UnsupportedCallbac
                   if (authenticatedUserId.equals(userRequestedToExecuteAs)) {
                     ac.setAuthorized(true);
                     if (LOG.isTraceEnabled()) {
          -            String username = HBaseSaslRpcServer.getIdentifier(
          -                userRequestedToExecuteAs, secretManager).getUser().getUserName();
          +            String username = HBaseSaslRpcServer
          +                .getIdentifier(userRequestedToExecuteAs, secretManager).getUser().getUserName();
                       LOG.trace(
                         "SASL server DIGEST-MD5 callback: setting " + "canonicalized client ID: " + username);
                     }
          @@ -148,8 +146,7 @@ public UserGroupInformation getAuthorizedUgi(String authzId,
               TokenIdentifier tokenId = HBaseSaslRpcServer.getIdentifier(authzId, secretManager);
               authorizedUgi = tokenId.getUser();
               if (authorizedUgi == null) {
          -      throw new AccessDeniedException(
          -          "Can't retrieve username from tokenIdentifier.");
          +      throw new AccessDeniedException("Can't retrieve username from tokenIdentifier.");
               }
               authorizedUgi.addTokenIdentifier(tokenId);
               authorizedUgi.setAuthenticationMethod(getSaslAuthMethod().getAuthMethod());
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java
          index 8a542c69c0dc..99a6d80d9a4d 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslServerAuthenticationProvider.java
          @@ -1,5 +1,5 @@
           /*
          - *  Licensed to the Apache Software Foundation (ASF) under one
          + * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
            * regarding copyright ownership.  The ASF licenses this file
          @@ -20,14 +20,12 @@
           import java.io.IOException;
           import java.security.PrivilegedExceptionAction;
           import java.util.Map;
          -
           import javax.security.auth.callback.Callback;
           import javax.security.auth.callback.CallbackHandler;
           import javax.security.auth.callback.UnsupportedCallbackException;
           import javax.security.sasl.AuthorizeCallback;
           import javax.security.sasl.Sasl;
           import javax.security.sasl.SaslException;
          -
           import org.apache.hadoop.hbase.security.AccessDeniedException;
           import org.apache.hadoop.hbase.security.SaslUtil;
           import org.apache.hadoop.security.UserGroupInformation;
          @@ -40,13 +38,13 @@
           @InterfaceAudience.Private
           public class GssSaslServerAuthenticationProvider extends GssSaslAuthenticationProvider
               implements SaslServerAuthenticationProvider {
          -  private static final Logger LOG = LoggerFactory.getLogger(
          -      GssSaslServerAuthenticationProvider.class);
          +  private static final Logger LOG =
          +      LoggerFactory.getLogger(GssSaslServerAuthenticationProvider.class);
           
             @Override
          -  public AttemptingUserProvidingSaslServer createServer(
          -      SecretManager secretManager,
          -      Map saslProps) throws IOException {
          +  public AttemptingUserProvidingSaslServer
          +      createServer(SecretManager secretManager, Map saslProps)
          +          throws IOException {
               UserGroupInformation current = UserGroupInformation.getCurrentUser();
               String fullName = current.getUserName();
               LOG.debug("Server's Kerberos principal name is {}", fullName);
          @@ -59,9 +57,10 @@ public AttemptingUserProvidingSaslServer createServer(
                 return current.doAs(new PrivilegedExceptionAction() {
                   @Override
                   public AttemptingUserProvidingSaslServer run() throws SaslException {
          -          return new AttemptingUserProvidingSaslServer(Sasl.createSaslServer(
          -              getSaslAuthMethod().getSaslMechanism(), names[0], names[1], saslProps,
          -              new SaslGssCallbackHandler()), () -> null);
          +          return new AttemptingUserProvidingSaslServer(
          +              Sasl.createSaslServer(getSaslAuthMethod().getSaslMechanism(), names[0], names[1],
          +                saslProps, new SaslGssCallbackHandler()),
          +              () -> null);
                   }
                 });
               } catch (InterruptedException e) {
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java
          index 3487cfcd586e..13b638130925 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProvider.java
          @@ -19,7 +19,6 @@
           
           import java.io.IOException;
           import java.util.Map;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.hbase.HBaseInterfaceAudience;
           import org.apache.hadoop.security.UserGroupInformation;
          @@ -29,8 +28,8 @@
           import org.apache.yetus.audience.InterfaceStability;
           
           /**
          - * Encapsulates the server-side logic to authenticate a client over SASL. Tied one-to-one to
          - * a single client authentication implementation.
          + * Encapsulates the server-side logic to authenticate a client over SASL. Tied one-to-one to a
          + * single client authentication implementation.
            */
           @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION)
           @InterfaceStability.Evolving
          @@ -39,7 +38,8 @@ public interface SaslServerAuthenticationProvider extends SaslAuthenticationProv
             /**
              * Allows implementations to initialize themselves, prior to creating a server.
              */
          -  default void init(Configuration conf) throws IOException {}
          +  default void init(Configuration conf) throws IOException {
          +  }
           
             /**
              * Creates the SaslServer to accept incoming SASL authentication requests.
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java
          index 829498dfd9fe..16cc6be30d52 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SaslServerAuthenticationProviders.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -24,7 +24,6 @@
           import java.util.ServiceLoader;
           import java.util.concurrent.atomic.AtomicReference;
           import java.util.stream.Collectors;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.yetus.audience.InterfaceAudience;
           import org.slf4j.Logger;
          @@ -32,8 +31,8 @@
           
           @InterfaceAudience.Private
           public final class SaslServerAuthenticationProviders {
          -  private static final Logger LOG = LoggerFactory.getLogger(
          -      SaslClientAuthenticationProviders.class);
          +  private static final Logger LOG =
          +      LoggerFactory.getLogger(SaslClientAuthenticationProviders.class);
           
             public static final String EXTRA_PROVIDERS_KEY = "hbase.server.sasl.provider.extras";
             private static final AtomicReference holder =
          @@ -87,13 +86,13 @@ public static void reset() {
              * already exist in the map.
              */
             static void addProviderIfNotExists(SaslServerAuthenticationProvider provider,
          -      HashMap providers) {
          +      HashMap providers) {
               final byte newProviderAuthCode = provider.getSaslAuthMethod().getCode();
          -    final SaslServerAuthenticationProvider alreadyRegisteredProvider = providers.get(
          -        newProviderAuthCode);
          +    final SaslServerAuthenticationProvider alreadyRegisteredProvider =
          +        providers.get(newProviderAuthCode);
               if (alreadyRegisteredProvider != null) {
                 throw new RuntimeException("Trying to load SaslServerAuthenticationProvider "
          -          + provider.getClass() + ", but "+ alreadyRegisteredProvider.getClass()
          +          + provider.getClass() + ", but " + alreadyRegisteredProvider.getClass()
                     + " is already registered with the same auth code");
               }
               providers.put(newProviderAuthCode, provider);
          @@ -103,7 +102,7 @@ static void addProviderIfNotExists(SaslServerAuthenticationProvider provider,
              * Adds any providers defined in the configuration.
              */
             static void addExtraProviders(Configuration conf,
          -      HashMap providers) {
          +      HashMap providers) {
               for (String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) {
                 Class clz;
                 try {
          @@ -115,7 +114,8 @@ static void addExtraProviders(Configuration conf,
           
                 if (!SaslServerAuthenticationProvider.class.isAssignableFrom(clz)) {
                   LOG.warn("Server authentication class {} is not an instance of "
          -            + "SaslServerAuthenticationProvider", clz);
          +            + "SaslServerAuthenticationProvider",
          +          clz);
                   continue;
                 }
           
          @@ -137,7 +137,7 @@ static void addExtraProviders(Configuration conf,
             static SaslServerAuthenticationProviders createProviders(Configuration conf) {
               ServiceLoader loader =
                   ServiceLoader.load(SaslServerAuthenticationProvider.class);
          -    HashMap providers = new HashMap<>();
          +    HashMap providers = new HashMap<>();
               for (SaslServerAuthenticationProvider provider : loader) {
                 addProviderIfNotExists(provider, providers);
               }
          @@ -146,8 +146,7 @@ static SaslServerAuthenticationProviders createProviders(Configuration conf) {
           
               if (LOG.isTraceEnabled()) {
                 String loadedProviders = providers.values().stream()
          -          .map((provider) -> provider.getClass().getName())
          -          .collect(Collectors.joining(", "));
          +          .map((provider) -> provider.getClass().getName()).collect(Collectors.joining(", "));
                 if (loadedProviders.isEmpty()) {
                   loadedProviders = "None!";
                 }
          @@ -155,14 +154,13 @@ static SaslServerAuthenticationProviders createProviders(Configuration conf) {
               }
           
               // Initialize the providers once, before we get into the RPC path.
          -    providers.forEach((b,provider) -> {
          +    providers.forEach((b, provider) -> {
                 try {
                   // Give them a copy, just to make sure there is no funny-business going on.
                   provider.init(new Configuration(conf));
                 } catch (IOException e) {
                   LOG.error("Failed to initialize {}", provider.getClass(), e);
          -        throw new RuntimeException(
          -            "Failed to initialize " + provider.getClass().getName(), e);
          +        throw new RuntimeException("Failed to initialize " + provider.getClass().getName(), e);
                 }
               });
           
          @@ -181,10 +179,8 @@ public SaslServerAuthenticationProvider selectProvider(byte authByte) {
              * Extracts the SIMPLE authentication provider.
              */
             public SaslServerAuthenticationProvider getSimpleProvider() {
          -    Optional opt = providers.values()
          -        .stream()
          -        .filter((p) -> p instanceof SimpleSaslServerAuthenticationProvider)
          -        .findFirst();
          +    Optional opt = providers.values().stream()
          +        .filter((p) -> p instanceof SimpleSaslServerAuthenticationProvider).findFirst();
               if (!opt.isPresent()) {
                 throw new RuntimeException("SIMPLE authentication provider not available when it should be");
               }
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java
          index ed7bf4ce9e76..4006a6e8d790 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslServerAuthenticationProvider.java
          @@ -19,7 +19,6 @@
           
           import java.io.IOException;
           import java.util.Map;
          -
           import org.apache.hadoop.security.UserGroupInformation;
           import org.apache.hadoop.security.token.SecretManager;
           import org.apache.hadoop.security.token.TokenIdentifier;
          @@ -30,9 +29,9 @@ public class SimpleSaslServerAuthenticationProvider extends SimpleSaslAuthentica
               implements SaslServerAuthenticationProvider {
           
             @Override
          -  public AttemptingUserProvidingSaslServer createServer(
          -      SecretManager secretManager,
          -      Map saslProps) throws IOException {
          +  public AttemptingUserProvidingSaslServer
          +      createServer(SecretManager secretManager, Map saslProps)
          +          throws IOException {
               throw new RuntimeException("HBase SIMPLE authentication doesn't use SASL");
             }
           
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java
          index 9e124a54111c..0b516f69899e 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationKey.java
          @@ -15,25 +15,22 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.security.token;
           
          -import javax.crypto.SecretKey;
          -
           import java.io.DataInput;
           import java.io.DataOutput;
           import java.io.IOException;
           import java.time.Instant;
           import java.util.Arrays;
          -
          -import org.apache.yetus.audience.InterfaceAudience;
          +import javax.crypto.SecretKey;
           import org.apache.hadoop.hbase.util.Bytes;
           import org.apache.hadoop.io.Writable;
           import org.apache.hadoop.io.WritableUtils;
          +import org.apache.yetus.audience.InterfaceAudience;
           
           /**
          - * Represents a secret key used for signing and verifying authentication tokens
          - * by {@link AuthenticationTokenSecretManager}.
          + * Represents a secret key used for signing and verifying authentication tokens by
          + * {@link AuthenticationTokenSecretManager}.
            */
           @InterfaceAudience.Private
           public class AuthenticationKey implements Writable {
          @@ -80,22 +77,18 @@ public boolean equals(Object obj) {
               if (obj == null || !(obj instanceof AuthenticationKey)) {
                 return false;
               }
          -    AuthenticationKey other = (AuthenticationKey)obj;
          -    return id == other.getKeyId() &&
          -        expirationDate == other.getExpiration() &&
          -        (secret == null ? other.getKey() == null :
          -            other.getKey() != null &&
          -                Bytes.equals(secret.getEncoded(), other.getKey().getEncoded()));       
          +    AuthenticationKey other = (AuthenticationKey) obj;
          +    return id == other.getKeyId() && expirationDate == other.getExpiration() && (secret == null
          +        ? other.getKey() == null
          +        : other.getKey() != null && Bytes.equals(secret.getEncoded(), other.getKey().getEncoded()));
             }
           
             @Override
             public String toString() {
               StringBuilder buf = new StringBuilder();
          -    buf.append("AuthenticationKey[")
          -       .append("id=").append(id)
          -       .append(", expiration=").append(Instant.ofEpochMilli(this.expirationDate))
          -      .append(", obj=").append(super.toString())
          -      .append("]");
          +    buf.append("AuthenticationKey[").append("id=").append(id).append(", expiration=")
          +        .append(Instant.ofEpochMilli(this.expirationDate)).append(", obj=").append(super.toString())
          +        .append("]");
               return buf.toString();
             }
           
          diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
          index 641288c03836..94412abf7ba0 100644
          --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
          +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
          @@ -15,53 +15,46 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.security.token;
           
          -import javax.crypto.SecretKey;
           import java.io.IOException;
           import java.util.Iterator;
           import java.util.Map;
           import java.util.concurrent.ConcurrentHashMap;
           import java.util.concurrent.atomic.AtomicLong;
          -
          -import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
          -import org.apache.yetus.audience.InterfaceAudience;
          +import javax.crypto.SecretKey;
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.hbase.Stoppable;
           import org.apache.hadoop.hbase.util.Bytes;
           import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
           import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
           import org.apache.hadoop.hbase.zookeeper.ZKLeaderManager;
          +import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
           import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
           import org.apache.hadoop.io.Text;
           import org.apache.hadoop.security.token.SecretManager;
           import org.apache.hadoop.security.token.Token;
          +import org.apache.yetus.audience.InterfaceAudience;
           import org.apache.zookeeper.KeeperException;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
           
           /**
          - * Manages an internal list of secret keys used to sign new authentication
          - * tokens as they are generated, and to valid existing tokens used for
          - * authentication.
          - *
          + * Manages an internal list of secret keys used to sign new authentication tokens as they are
          + * generated, and to valid existing tokens used for authentication.
            * 

          - * A single instance of {@code AuthenticationTokenSecretManager} will be - * running as the "leader" in a given HBase cluster. The leader is responsible - * for periodically generating new secret keys, which are then distributed to - * followers via ZooKeeper, and for expiring previously used secret keys that - * are no longer needed (as any tokens using them have expired). + * A single instance of {@code AuthenticationTokenSecretManager} will be running as the "leader" in + * a given HBase cluster. The leader is responsible for periodically generating new secret keys, + * which are then distributed to followers via ZooKeeper, and for expiring previously used secret + * keys that are no longer needed (as any tokens using them have expired). *

          */ @InterfaceAudience.Private -public class AuthenticationTokenSecretManager - extends SecretManager { +public class AuthenticationTokenSecretManager extends SecretManager { static final String NAME_PREFIX = "SecretManager-"; - private static final Logger LOG = LoggerFactory.getLogger( - AuthenticationTokenSecretManager.class); + private static final Logger LOG = LoggerFactory.getLogger(AuthenticationTokenSecretManager.class); private long lastKeyUpdate; private long keyUpdateInterval; @@ -70,7 +63,7 @@ public class AuthenticationTokenSecretManager private LeaderElector leaderElector; private ZKClusterId clusterId; - private Map allKeys = new ConcurrentHashMap<>(); + private Map allKeys = new ConcurrentHashMap<>(); private AuthenticationKey currentKey; private int idSeq; @@ -81,21 +74,23 @@ public class AuthenticationTokenSecretManager * Create a new secret manager instance for generating keys. * @param conf Configuration to use * @param zk Connection to zookeeper for handling leader elections - * @param keyUpdateInterval Time (in milliseconds) between rolling a new master key for token signing - * @param tokenMaxLifetime Maximum age (in milliseconds) before a token expires and is no longer valid + * @param keyUpdateInterval Time (in milliseconds) between rolling a new master key for token + * signing + * @param tokenMaxLifetime Maximum age (in milliseconds) before a token expires and is no longer + * valid */ - /* TODO: Restrict access to this constructor to make rogues instances more difficult. - * For the moment this class is instantiated from - * org.apache.hadoop.hbase.ipc.SecureServer so public access is needed. + /* + * TODO: Restrict access to this constructor to make rogues instances more difficult. For the + * moment this class is instantiated from org.apache.hadoop.hbase.ipc.SecureServer so public + * access is needed. */ - public AuthenticationTokenSecretManager(Configuration conf, - ZKWatcher zk, String serverName, - long keyUpdateInterval, long tokenMaxLifetime) { + public AuthenticationTokenSecretManager(Configuration conf, ZKWatcher zk, String serverName, + long keyUpdateInterval, long tokenMaxLifetime) { this.zkWatcher = new ZKSecretWatcher(conf, zk, this); this.keyUpdateInterval = keyUpdateInterval; this.tokenMaxLifetime = tokenMaxLifetime; this.leaderElector = new LeaderElector(zk, serverName); - this.name = NAME_PREFIX+serverName; + this.name = NAME_PREFIX + serverName; this.clusterId = new ZKClusterId(zk, zk); } @@ -130,31 +125,29 @@ protected synchronized byte[] createPassword(AuthenticationTokenIdentifier ident identifier.setIssueDate(now); identifier.setExpirationDate(now + tokenMaxLifetime); identifier.setSequenceNumber(tokenSeq.getAndIncrement()); - return createPassword(identifier.getBytes(), - secretKey.getKey()); + return createPassword(identifier.getBytes(), secretKey.getKey()); } @Override - public byte[] retrievePassword(AuthenticationTokenIdentifier identifier) - throws InvalidToken { + public byte[] retrievePassword(AuthenticationTokenIdentifier identifier) throws InvalidToken { long now = EnvironmentEdgeManager.currentTime(); if (identifier.getExpirationDate() < now) { throw new InvalidToken("Token has expired"); } AuthenticationKey masterKey = allKeys.get(identifier.getKeyId()); - if(masterKey == null) { - if(zkWatcher.getWatcher().isAborted()) { + if (masterKey == null) { + if (zkWatcher.getWatcher().isAborted()) { LOG.error("ZKWatcher is abort"); - throw new InvalidToken("Token keys could not be sync from zookeeper" - + " because of ZKWatcher abort"); + throw new InvalidToken( + "Token keys could not be sync from zookeeper" + " because of ZKWatcher abort"); } synchronized (this) { if (!leaderElector.isAlive() || leaderElector.isStopped()) { - LOG.warn("Thread leaderElector[" + leaderElector.getName() + ":" - + leaderElector.getId() + "] is stopped or not alive"); + LOG.warn("Thread leaderElector[" + leaderElector.getName() + ":" + leaderElector.getId() + + "] is stopped or not alive"); leaderElector.start(); - LOG.info("Thread leaderElector [" + leaderElector.getName() + ":" - + leaderElector.getId() + "] is started"); + LOG.info("Thread leaderElector [" + leaderElector.getName() + ":" + leaderElector.getId() + + "] is started"); } } zkWatcher.refreshKeys(); @@ -164,12 +157,10 @@ public byte[] retrievePassword(AuthenticationTokenIdentifier identifier) masterKey = allKeys.get(identifier.getKeyId()); } if (masterKey == null) { - throw new InvalidToken("Unknown master key for token (id="+ - identifier.getKeyId()+")"); + throw new InvalidToken("Unknown master key for token (id=" + identifier.getKeyId() + ")"); } // regenerate the password - return createPassword(identifier.getBytes(), - masterKey.getKey()); + return createPassword(identifier.getBytes(), masterKey.getKey()); } @Override @@ -178,8 +169,7 @@ public AuthenticationTokenIdentifier createIdentifier() { } public Token generateToken(String username) { - AuthenticationTokenIdentifier ident = - new AuthenticationTokenIdentifier(username); + AuthenticationTokenIdentifier ident = new AuthenticationTokenIdentifier(username); Token token = new Token<>(ident, this); if (clusterId.hasId()) { token.setService(new Text(clusterId.getId())); @@ -259,8 +249,10 @@ synchronized void rollCurrentKey() { long now = EnvironmentEdgeManager.currentTime(); AuthenticationKey prev = currentKey; - AuthenticationKey newKey = new AuthenticationKey(++idSeq, - Long.MAX_VALUE, // don't allow to expire until it's replaced by a new key + AuthenticationKey newKey = new AuthenticationKey(++idSeq, Long.MAX_VALUE, // don't allow to + // expire until it's + // replaced by a new + // key generateSecret()); allKeys.put(newKey.getKeyId(), newKey); currentKey = newKey; @@ -293,8 +285,8 @@ public LeaderElector(ZKWatcher watcher, String serverName) { setDaemon(true); setName("ZKSecretWatcher-leaderElector"); zkLeader = new ZKLeaderManager(watcher, - ZNodePaths.joinZNode(zkWatcher.getRootKeyZNode(), "keymaster"), - Bytes.toBytes(serverName), this); + ZNodePaths.joinZNode(zkWatcher.getRootKeyZNode(), "keymaster"), Bytes.toBytes(serverName), + this); } public boolean isMaster() { @@ -318,7 +310,7 @@ public void stop(String reason) { zkLeader.stepDownAsLeader(); } isMaster = false; - LOG.info("Stopping leader election, because: "+reason); + LOG.info("Stopping leader election, because: " + reason); interrupt(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java index 9a58006343e6..17cacd16f32f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/FsDelegationToken.java @@ -35,9 +35,8 @@ import org.slf4j.LoggerFactory; /** - * Helper class to obtain a filesystem delegation token. - * Mainly used by Map-Reduce jobs that requires to read/write data to - * a remote file-system (e.g. BulkLoad, ExportSnapshot). + * Helper class to obtain a filesystem delegation token. Mainly used by Map-Reduce jobs that + * requires to read/write data to a remote file-system (e.g. BulkLoad, ExportSnapshot). */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -60,15 +59,13 @@ public FsDelegationToken(final UserProvider userProvider, final String renewer) } /** - * Acquire the delegation token for the specified filesystem. - * Before requesting a new delegation token, tries to find one already available. - * Currently supports checking existing delegation tokens for swebhdfs, webhdfs and hdfs. - * + * Acquire the delegation token for the specified filesystem. Before requesting a new delegation + * token, tries to find one already available. Currently supports checking existing delegation + * tokens for swebhdfs, webhdfs and hdfs. * @param fs the filesystem that requires the delegation token * @throws IOException on fs.getDelegationToken() failure */ - public void acquireDelegationToken(final FileSystem fs) - throws IOException { + public void acquireDelegationToken(final FileSystem fs) throws IOException { String tokenKind; String scheme = fs.getUri().getScheme(); if (SWEBHDFS_SCHEME.equalsIgnoreCase(scheme)) { @@ -87,9 +84,8 @@ public void acquireDelegationToken(final FileSystem fs) } /** - * Acquire the delegation token for the specified filesystem and token kind. - * Before requesting a new delegation token, tries to find one already available. - * + * Acquire the delegation token for the specified filesystem and token kind. Before requesting a + * new delegation token, tries to find one already available. * @param tokenKind non-null token kind to get delegation token from the {@link UserProvider} * @param fs the filesystem that requires the delegation token * @throws IOException on fs.getDelegationToken() failure diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java index 2946344e73d8..5263e63545a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.security.token; - import java.io.IOException; import java.util.Collections; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -46,33 +45,33 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AuthenticationProtos; /** - * Provides a service for obtaining authentication tokens via the - * {@link AuthenticationProtos} AuthenticationService coprocessor service. + * Provides a service for obtaining authentication tokens via the {@link AuthenticationProtos} + * AuthenticationService coprocessor service. */ @CoreCoprocessor @InterfaceAudience.Private -public class TokenProvider implements AuthenticationProtos.AuthenticationService.Interface, - RegionCoprocessor { +public class TokenProvider + implements AuthenticationProtos.AuthenticationService.Interface, RegionCoprocessor { private static final Logger LOG = LoggerFactory.getLogger(TokenProvider.class); private AuthenticationTokenSecretManager secretManager; - @Override public void start(CoprocessorEnvironment env) { // if running at region if (env instanceof RegionCoprocessorEnvironment) { - RegionCoprocessorEnvironment regionEnv = (RegionCoprocessorEnvironment)env; - /* Getting the RpcServer from a RegionCE is wrong. There cannot be an expectation that Region - is hosted inside a RegionServer. If you need RpcServer, then pass in a RegionServerCE. - TODO: FIX. + RegionCoprocessorEnvironment regionEnv = (RegionCoprocessorEnvironment) env; + /* + * Getting the RpcServer from a RegionCE is wrong. There cannot be an expectation that Region + * is hosted inside a RegionServer. If you need RpcServer, then pass in a RegionServerCE. + * TODO: FIX. */ - RegionServerServices rss = ((HasRegionServerServices)regionEnv).getRegionServerServices(); + RegionServerServices rss = ((HasRegionServerServices) regionEnv).getRegionServerServices(); RpcServerInterface server = rss.getRpcServer(); - SecretManager mgr = ((RpcServer)server).getSecretManager(); + SecretManager mgr = ((RpcServer) server).getSecretManager(); if (mgr instanceof AuthenticationTokenSecretManager) { - secretManager = (AuthenticationTokenSecretManager)mgr; + secretManager = (AuthenticationTokenSecretManager) mgr; } } } @@ -102,28 +101,27 @@ private boolean isAllowedDelegationTokenOp(UserGroupInformation ugi) throws IOEx @Override public Iterable getServices() { - return Collections.singleton( - AuthenticationProtos.AuthenticationService.newReflectiveService(this)); + return Collections + .singleton(AuthenticationProtos.AuthenticationService.newReflectiveService(this)); } @Override public void getAuthenticationToken(RpcController controller, - AuthenticationProtos.GetAuthenticationTokenRequest request, - RpcCallback done) { + AuthenticationProtos.GetAuthenticationTokenRequest request, + RpcCallback done) { AuthenticationProtos.GetAuthenticationTokenResponse.Builder response = AuthenticationProtos.GetAuthenticationTokenResponse.newBuilder(); try { if (secretManager == null) { - throw new IOException( - "No secret manager configured for token authentication"); + throw new IOException("No secret manager configured for token authentication"); } User currentUser = RpcServer.getRequestUser() .orElseThrow(() -> new AccessDeniedException("No authenticated user for request!")); UserGroupInformation ugi = currentUser.getUGI(); if (!isAllowedDelegationTokenOp(ugi)) { - LOG.warn("Token generation denied for user=" + currentUser.getName() + ", authMethod=" + - ugi.getAuthenticationMethod()); + LOG.warn("Token generation denied for user=" + currentUser.getName() + ", authMethod=" + + ugi.getAuthenticationMethod()); throw new AccessDeniedException( "Token generation only allowed for Kerberos authenticated clients"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java index 56805ea4dbe3..30d649600440 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,15 +43,15 @@ public class TokenUtil { // This class is referenced indirectly by User out in common; instances are created by reflection private static final Logger LOG = LoggerFactory.getLogger(TokenUtil.class); - /** - * See {@link ClientTokenUtil#obtainToken(org.apache.hadoop.hbase.client.AsyncConnection)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. - */ + /** + * See {@link ClientTokenUtil#obtainToken(org.apache.hadoop.hbase.client.AsyncConnection)}. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. + */ @Deprecated - public static CompletableFuture> obtainToken( - AsyncConnection conn) { + public static CompletableFuture> + obtainToken(AsyncConnection conn) { return ClientTokenUtil.obtainToken(conn); } @@ -70,9 +70,9 @@ public static Token obtainToken(Configuration con /** * See {@link ClientTokenUtil#obtainToken(org.apache.hadoop.hbase.client.Connection)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. */ @Deprecated public static Token obtainToken(Connection conn) @@ -80,12 +80,11 @@ public static Token obtainToken(Connection conn) return ClientTokenUtil.obtainToken(conn); } - /** * See {@link ClientTokenUtil#toToken(org.apache.hadoop.security.token.Token)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. */ @Deprecated public static AuthenticationProtos.Token toToken(Token token) { @@ -93,56 +92,52 @@ public static AuthenticationProtos.Token toToken(Token obtainToken( - final Connection conn, User user) throws IOException, InterruptedException { + public static Token obtainToken(final Connection conn, User user) + throws IOException, InterruptedException { return ClientTokenUtil.obtainToken(conn, user); } /** - * See {@link ClientTokenUtil#obtainAndCacheToken(org.apache.hadoop.hbase.client.Connection, - * org.apache.hadoop.hbase.security.User)}. + * See + * {@link ClientTokenUtil#obtainAndCacheToken(org.apache.hadoop.hbase.client.Connection, org.apache.hadoop.hbase.security.User)}. */ - public static void obtainAndCacheToken(final Connection conn, - User user) + public static void obtainAndCacheToken(final Connection conn, User user) throws IOException, InterruptedException { ClientTokenUtil.obtainAndCacheToken(conn, user); } /** * See {@link ClientTokenUtil#toToken(org.apache.hadoop.security.token.Token)}. - * @deprecated External users should not use this method. Please post on - * the HBase dev mailing list if you need this method. Internal - * HBase code should use {@link ClientTokenUtil} instead. + * @deprecated External users should not use this method. Please post on the HBase dev mailing + * list if you need this method. Internal HBase code should use + * {@link ClientTokenUtil} instead. */ @Deprecated public static Token toToken(AuthenticationProtos.Token proto) { return ClientTokenUtil.toToken(proto); } - private static Text getClusterId(Token token) - throws IOException { - return token.getService() != null - ? token.getService() : new Text("default"); + private static Text getClusterId(Token token) throws IOException { + return token.getService() != null ? token.getService() : new Text("default"); } /** - * Obtain an authentication token on behalf of the given user and add it to - * the credentials for the given map reduce job. + * Obtain an authentication token on behalf of the given user and add it to the credentials for + * the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token * @param job The job instance in which the token should be stored * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ - public static void obtainTokenForJob(final Connection conn, - User user, Job job) + public static void obtainTokenForJob(final Connection conn, User user, Job job) throws IOException, InterruptedException { try { Token token = ClientTokenUtil.obtainToken(conn, user); @@ -152,8 +147,8 @@ public static void obtainTokenForJob(final Connection conn, } Text clusterId = getClusterId(token); if (LOG.isDebugEnabled()) { - LOG.debug("Obtained token " + token.getKind().toString() + " for user " + - user.getName() + " on cluster " + clusterId.toString()); + LOG.debug("Obtained token " + token.getKind().toString() + " for user " + user.getName() + + " on cluster " + clusterId.toString()); } job.getCredentials().addToken(clusterId, token); } catch (IOException ioe) { @@ -169,8 +164,8 @@ public static void obtainTokenForJob(final Connection conn, } /** - * Obtain an authentication token on behalf of the given user and add it to - * the credentials for the given map reduce job. + * Obtain an authentication token on behalf of the given user and add it to the credentials for + * the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token * @param job The job configuration in which the token should be stored @@ -187,8 +182,8 @@ public static void obtainTokenForJob(final Connection conn, final JobConf job, U } Text clusterId = getClusterId(token); if (LOG.isDebugEnabled()) { - LOG.debug("Obtained token " + token.getKind().toString() + " for user " + - user.getName() + " on cluster " + clusterId.toString()); + LOG.debug("Obtained token " + token.getKind().toString() + " for user " + user.getName() + + " on cluster " + clusterId.toString()); } job.getCredentials().addToken(clusterId, token); } catch (IOException ioe) { @@ -199,14 +194,13 @@ public static void obtainTokenForJob(final Connection conn, final JobConf job, U throw re; } catch (Exception e) { throw new UndeclaredThrowableException(e, - "Unexpected exception obtaining token for user "+user.getName()); + "Unexpected exception obtaining token for user " + user.getName()); } } /** - * Checks for an authentication token for the given user, obtaining a new token if necessary, - * and adds it to the credentials for the given map reduce job. - * + * Checks for an authentication token for the given user, obtaining a new token if necessary, and + * adds it to the credentials for the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token * @param job The job configuration in which the token should be stored @@ -224,9 +218,8 @@ public static void addTokenForJob(final Connection conn, final JobConf job, User } /** - * Checks for an authentication token for the given user, obtaining a new token if necessary, - * and adds it to the credentials for the given map reduce job. - * + * Checks for an authentication token for the given user, obtaining a new token if necessary, and + * adds it to the credentials for the given map reduce job. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token * @param job The job instance in which the token should be stored @@ -243,9 +236,8 @@ public static void addTokenForJob(final Connection conn, User user, Job job) } /** - * Checks if an authentication tokens exists for the connected cluster, - * obtaining one if needed and adding it to the user's credentials. - * + * Checks if an authentication tokens exists for the connected cluster, obtaining one if needed + * and adding it to the user's credentials. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token * @throws IOException If making a remote call to the authentication service fails diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java index 2398ba4031ed..2d074cc3b597 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.io.IOException; @@ -24,8 +23,8 @@ import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.zookeeper.ZKListener; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; @@ -45,8 +44,7 @@ public class ZKSecretWatcher extends ZKListener { private String baseKeyZNode; private String keysParentZNode; - public ZKSecretWatcher(Configuration conf, - ZKWatcher watcher, + public ZKSecretWatcher(Configuration conf, ZKWatcher watcher, AuthenticationTokenSecretManager secretManager) { super(watcher); this.secretManager = secretManager; @@ -76,7 +74,7 @@ public void nodeCreated(String path) { refreshNodes(nodes); } catch (KeeperException ke) { LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke); - watcher.abort("Error reading new key znode "+path, ke); + watcher.abort("Error reading new key znode " + path, ke); } } } @@ -90,7 +88,7 @@ public void nodeDeleted(String path) { secretManager.removeKey(id); LOG.info("Node deleted id={}", id); } catch (NumberFormatException nfe) { - LOG.error("Invalid znode name for key ID '"+keyId+"'", nfe); + LOG.error("Invalid znode name for key ID '" + keyId + "'", nfe); } } } @@ -101,19 +99,19 @@ public void nodeDataChanged(String path) { try { byte[] data = ZKUtil.getDataAndWatch(watcher, path); if (data == null || data.length == 0) { - LOG.debug("Ignoring empty node "+path); + LOG.debug("Ignoring empty node " + path); return; } - AuthenticationKey key = (AuthenticationKey)Writables.getWritable(data, - new AuthenticationKey()); + AuthenticationKey key = + (AuthenticationKey) Writables.getWritable(data, new AuthenticationKey()); secretManager.addKey(key); } catch (KeeperException ke) { LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke); - watcher.abort("Error reading updated key znode "+path, ke); + watcher.abort("Error reading updated key znode " + path, ke); } catch (IOException ioe) { LOG.error(HBaseMarkers.FATAL, "Error reading key writables", ioe); - watcher.abort("Error reading key writables from znode "+path, ioe); + watcher.abort("Error reading key writables from znode " + path, ioe); } } } @@ -144,16 +142,16 @@ private void refreshNodes(List nodes) { try { byte[] data = n.getData(); if (data == null || data.length == 0) { - LOG.debug("Ignoring empty node "+path); + LOG.debug("Ignoring empty node " + path); continue; } - AuthenticationKey key = (AuthenticationKey)Writables.getWritable( - data, new AuthenticationKey()); + AuthenticationKey key = + (AuthenticationKey) Writables.getWritable(data, new AuthenticationKey()); secretManager.addKey(key); } catch (IOException ioe) { - LOG.error(HBaseMarkers.FATAL, "Failed reading new secret key for id '" + - keyId + "' from zk", ioe); - watcher.abort("Error deserializing key from znode "+path, ioe); + LOG.error(HBaseMarkers.FATAL, + "Failed reading new secret key for id '" + keyId + "' from zk", ioe); + watcher.abort("Error deserializing key from znode " + path, ioe); } } } @@ -167,12 +165,12 @@ public void removeKeyFromZK(AuthenticationKey key) { try { ZKUtil.deleteNode(watcher, keyZNode); } catch (KeeperException.NoNodeException nne) { - LOG.error("Non-existent znode "+keyZNode+" for key "+key.getKeyId(), nne); + LOG.error("Non-existent znode " + keyZNode + " for key " + key.getKeyId(), nne); } catch (KeeperException ke) { - LOG.error(HBaseMarkers.FATAL, "Failed removing znode "+keyZNode+" for key "+ - key.getKeyId(), ke); - watcher.abort("Unhandled zookeeper error removing znode "+keyZNode+ - " for key "+key.getKeyId(), ke); + LOG.error(HBaseMarkers.FATAL, + "Failed removing znode " + keyZNode + " for key " + key.getKeyId(), ke); + watcher.abort( + "Unhandled zookeeper error removing znode " + keyZNode + " for key " + key.getKeyId(), ke); } } @@ -183,13 +181,12 @@ public void addKeyToZK(AuthenticationKey key) { // TODO: is there any point in retrying beyond what ZK client does? ZKUtil.createSetData(watcher, keyZNode, keyData); } catch (KeeperException ke) { - LOG.error(HBaseMarkers.FATAL, "Unable to synchronize master key "+key.getKeyId()+ - " to znode "+keyZNode, ke); - watcher.abort("Unable to synchronize secret key "+ - key.getKeyId()+" in zookeeper", ke); + LOG.error(HBaseMarkers.FATAL, + "Unable to synchronize master key " + key.getKeyId() + " to znode " + keyZNode, ke); + watcher.abort("Unable to synchronize secret key " + key.getKeyId() + " in zookeeper", ke); } catch (IOException ioe) { // this can only happen from an error serializing the key - watcher.abort("Failed serializing key "+key.getKeyId(), ioe); + watcher.abort("Failed serializing key " + key.getKeyId(), ioe); } } @@ -204,13 +201,12 @@ public void updateKeyInZK(AuthenticationKey key) { ZKUtil.createSetData(watcher, keyZNode, keyData); } } catch (KeeperException ke) { - LOG.error(HBaseMarkers.FATAL, "Unable to update master key "+key.getKeyId()+ - " in znode "+keyZNode); - watcher.abort("Unable to synchronize secret key "+ - key.getKeyId()+" in zookeeper", ke); + LOG.error(HBaseMarkers.FATAL, + "Unable to update master key " + key.getKeyId() + " in znode " + keyZNode); + watcher.abort("Unable to synchronize secret key " + key.getKeyId() + " in zookeeper", ke); } catch (IOException ioe) { // this can only happen from an error serializing the key - watcher.abort("Failed serializing key "+key.getKeyId(), ioe); + watcher.abort("Failed serializing key " + key.getKeyId(), ioe); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java index 519502e5aea8..0231d035336a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java @@ -39,7 +39,6 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Pattern; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.AuthUtil; @@ -118,15 +117,15 @@ public Configuration getConf() { @Override public void init(RegionCoprocessorEnvironment e) throws IOException { - /* So, presumption that the RegionCE has a ZK Connection is too much. Why would a RCE have - * a ZK instance? This is cheating presuming we have access to the RS ZKW. TODO: Fix. - * - * And what is going on here? This ain't even a Coprocessor? And its being passed a CP Env? + /* + * So, presumption that the RegionCE has a ZK Connection is too much. Why would a RCE have a ZK + * instance? This is cheating presuming we have access to the RS ZKW. TODO: Fix. And what is + * going on here? This ain't even a Coprocessor? And its being passed a CP Env? */ // This is a CoreCoprocessor. On creation, we should have gotten an environment that // implements HasRegionServerServices so we can get at RSS. FIX!!!! Integrate this CP as // native service. - ZKWatcher zk = ((HasRegionServerServices)e).getRegionServerServices().getZooKeeper(); + ZKWatcher zk = ((HasRegionServerServices) e).getRegionServerServices().getZooKeeper(); try { labelsCache = VisibilityLabelsCache.createAndGet(zk, this.conf); } catch (IOException ioe) { @@ -182,20 +181,20 @@ protected List> getExistingLabelsWithAuths() throws IOException { return existingLabels; } - protected Pair, Map>> extractLabelsAndAuths( - List> labelDetails) { + protected Pair, Map>> + extractLabelsAndAuths(List> labelDetails) { Map labels = new HashMap<>(); Map> userAuths = new HashMap<>(); for (List cells : labelDetails) { for (Cell cell : cells) { if (CellUtil.matchingQualifier(cell, LABEL_QUALIFIER)) { labels.put( - Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()), - PrivateCellUtil.getRowAsInt(cell)); + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()), + PrivateCellUtil.getRowAsInt(cell)); } else { // These are user cells who has authorization for this label String user = Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); + cell.getQualifierLength()); List auths = userAuths.get(user); if (auths == null) { auths = new ArrayList<>(); @@ -213,14 +212,10 @@ protected void addSystemLabel(Region region, Map labels, if (!labels.containsKey(SYSTEM_LABEL)) { byte[] row = Bytes.toBytes(SYSTEM_LABEL_ORDINAL); Put p = new Put(row); - p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(LABELS_TABLE_FAMILY) - .setQualifier(LABEL_QUALIFIER) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .setValue(Bytes.toBytes(SYSTEM_LABEL)) - .build()); + p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row) + .setFamily(LABELS_TABLE_FAMILY).setQualifier(LABEL_QUALIFIER) + .setTimestamp(p.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(SYSTEM_LABEL)) + .build()); region.put(p); labels.put(SYSTEM_LABEL, SYSTEM_LABEL_ORDINAL); } @@ -241,15 +236,9 @@ public OperationStatus[] addLabels(List labels) throws IOException { } else { byte[] row = Bytes.toBytes(ordinalCounter.get()); Put p = new Put(row); - p.add(builder.clear() - .setRow(row) - .setFamily(LABELS_TABLE_FAMILY) - .setQualifier(LABEL_QUALIFIER) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .setValue(label) - .setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))) - .build()); + p.add(builder.clear().setRow(row).setFamily(LABELS_TABLE_FAMILY) + .setQualifier(LABEL_QUALIFIER).setTimestamp(p.getTimestamp()).setType(Type.Put) + .setValue(label).setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))).build()); if (LOG.isDebugEnabled()) { LOG.debug("Adding the label " + labelStr); } @@ -281,15 +270,9 @@ public OperationStatus[] setAuths(byte[] user, List authLabels) throws I } else { byte[] row = Bytes.toBytes(labelOrdinal); Put p = new Put(row); - p.add(builder.clear() - .setRow(row) - .setFamily(LABELS_TABLE_FAMILY) - .setQualifier(user) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(DUMMY_VALUE) - .setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))) - .build()); + p.add(builder.clear().setRow(row).setFamily(LABELS_TABLE_FAMILY).setQualifier(user) + .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).setValue(DUMMY_VALUE) + .setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS))).build()); puts.add(p); } i++; @@ -307,9 +290,8 @@ public OperationStatus[] clearAuths(byte[] user, List authLabels) throws List currentAuths; if (AuthUtil.isGroupPrincipal(Bytes.toString(user))) { String group = AuthUtil.getGroupName(Bytes.toString(user)); - currentAuths = this.getGroupAuths(new String[]{group}, true); - } - else { + currentAuths = this.getGroupAuths(new String[] { group }, true); + } else { currentAuths = this.getUserAuths(user, true); } List deletes = new ArrayList<>(authLabels.size()); @@ -324,9 +306,9 @@ public OperationStatus[] clearAuths(byte[] user, List authLabels) throws deletes.add(d); } else { // This label is not set for the user. - finalOpStatus[i] = new OperationStatus(OperationStatusCode.FAILURE, - new InvalidLabelException("Label '" + authLabelStr + "' is not set for the user " - + Bytes.toString(user))); + finalOpStatus[i] = + new OperationStatus(OperationStatusCode.FAILURE, new InvalidLabelException( + "Label '" + authLabelStr + "' is not set for the user " + Bytes.toString(user))); } i++; } @@ -346,8 +328,8 @@ public OperationStatus[] clearAuths(byte[] user, List authLabels) throws */ private boolean mutateLabelsRegion(List mutations, OperationStatus[] finalOpStatus) throws IOException { - OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations - .toArray(new Mutation[mutations.size()])); + OperationStatus[] opStatus = + this.labelsRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()])); int i = 0; boolean updateZk = false; for (OperationStatus status : opStatus) { @@ -364,8 +346,7 @@ private boolean mutateLabelsRegion(List mutations, OperationStatus[] f } @Override - public List getUserAuths(byte[] user, boolean systemCall) - throws IOException { + public List getUserAuths(byte[] user, boolean systemCall) throws IOException { assert (labelsRegion != null || systemCall); if (systemCall || labelsRegion == null) { return this.labelsCache.getUserAuths(Bytes.toString(user)); @@ -375,7 +356,7 @@ public List getUserAuths(byte[] user, boolean systemCall) s.addColumn(LABELS_TABLE_FAMILY, user); } Filter filter = VisibilityUtils.createVisibilityLabelFilter(this.labelsRegion, - new Authorizations(SYSTEM_LABEL)); + new Authorizations(SYSTEM_LABEL)); s.setFilter(filter); ArrayList auths = new ArrayList<>(); RegionScanner scanner = this.labelsRegion.getScanner(s); @@ -399,8 +380,7 @@ public List getUserAuths(byte[] user, boolean systemCall) } @Override - public List getGroupAuths(String[] groups, boolean systemCall) - throws IOException { + public List getGroupAuths(String[] groups, boolean systemCall) throws IOException { assert (labelsRegion != null || systemCall); if (systemCall || labelsRegion == null) { return this.labelsCache.getGroupAuths(groups); @@ -412,7 +392,7 @@ public List getGroupAuths(String[] groups, boolean systemCall) } } Filter filter = VisibilityUtils.createVisibilityLabelFilter(this.labelsRegion, - new Authorizations(SYSTEM_LABEL)); + new Authorizations(SYSTEM_LABEL)); s.setFilter(filter); Set auths = new HashSet<>(); RegionScanner scanner = this.labelsRegion.getScanner(s); @@ -465,7 +445,7 @@ public List createVisibilityExpTags(String visExpression, boolean withSeria auths.addAll(this.labelsCache.getGroupAuthsAsOrdinals(user.getGroupNames())); } return VisibilityUtils.createVisibilityExpTags(visExpression, withSerializationFormat, - checkAuths, auths, labelsCache); + checkAuths, auths, labelsCache); } protected void updateZk(boolean labelAddition) throws IOException { @@ -617,20 +597,20 @@ public boolean matchVisibility(List putVisTags, Byte putTagsFormat, List putVisTags, - List deleteVisTags) throws IOException { + private static boolean matchUnSortedVisibilityTags(List putVisTags, List deleteVisTags) + throws IOException { return compareTagsOrdinals(sortTagsBasedOnOrdinal(putVisTags), - sortTagsBasedOnOrdinal(deleteVisTags)); + sortTagsBasedOnOrdinal(deleteVisTags)); } /** * @param putVisTags Visibility tags in Put Mutation * @param deleteVisTags Visibility tags in Delete Mutation - * @return true when all the visibility tags in Put matches with visibility tags in Delete. - * This is used when both the set of tags are sorted based on the label ordinal. + * @return true when all the visibility tags in Put matches with visibility tags in Delete. This + * is used when both the set of tags are sorted based on the label ordinal. */ private static boolean matchOrdinalSortedVisibilityTags(List putVisTags, List deleteVisTags) { @@ -699,27 +679,24 @@ private static boolean compareTagsOrdinals(List> putVisTags, @Override public byte[] encodeVisibilityForReplication(final List tags, final Byte serializationFormat) throws IOException { - if (tags.size() > 0 - && (serializationFormat == null || - serializationFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT)) { + if (tags.size() > 0 && (serializationFormat == null + || serializationFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT)) { return createModifiedVisExpression(tags); } return null; } /** - * @param tags - * - all the visibility tags associated with the current Cell + * @param tags - all the visibility tags associated with the current Cell * @return - the modified visibility expression as byte[] */ - private byte[] createModifiedVisExpression(final List tags) - throws IOException { + private byte[] createModifiedVisExpression(final List tags) throws IOException { StringBuilder visibilityString = new StringBuilder(); for (Tag tag : tags) { if (tag.getType() == TagType.VISIBILITY_TAG_TYPE) { if (visibilityString.length() != 0) { - visibilityString.append(VisibilityConstants.CLOSED_PARAN).append( - VisibilityConstants.OR_OPERATOR); + visibilityString.append(VisibilityConstants.CLOSED_PARAN) + .append(VisibilityConstants.OR_OPERATOR); } int offset = tag.getValueOffset(); int endOffset = offset + tag.getValueLength(); @@ -741,11 +718,11 @@ private byte[] createModifiedVisExpression(final List tags) } else { String label = this.labelsCache.getLabel(currLabelOrdinal); if (expressionStart) { - visibilityString.append(VisibilityConstants.OPEN_PARAN).append( - CellVisibility.quote(label)); + visibilityString.append(VisibilityConstants.OPEN_PARAN) + .append(CellVisibility.quote(label)); } else { - visibilityString.append(VisibilityConstants.AND_OPERATOR).append( - CellVisibility.quote(label)); + visibilityString.append(VisibilityConstants.AND_OPERATOR) + .append(CellVisibility.quote(label)); } } expressionStart = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java index 77bc2057cdc8..2037244ca847 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,17 +21,15 @@ import java.util.HashSet; import java.util.List; import java.util.Set; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; /** - * This is an implementation for ScanLabelGenerator. - * It will extract labels from passed in authorizations and cross check - * against the set of predefined authorization labels for given user. + * This is an implementation for ScanLabelGenerator. It will extract labels from passed in + * authorizations and cross check against the set of predefined authorization labels for given user. * The labels for which the user is not authorized will be dropped. */ @InterfaceAudience.Private @@ -86,7 +84,7 @@ private List dropLabelsNotInUserAuths(List labels, List sb.append("Dropping invalid authorizations requested by user "); sb.append(userName); sb.append(": [ "); - for (String label: droppedLabels) { + for (String label : droppedLabels) { sb.append(label); sb.append(' '); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java index e2bc16b5f02c..3be8ac1de976 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,18 +21,16 @@ import java.util.HashSet; import java.util.List; import java.util.Set; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; /** - * This ScanLabelGenerator enforces a set of predefined authorizations for a - * given user, the set defined by the admin using the VisibilityClient admin - * interface or the set_auths shell command. Any authorizations requested with - * Scan#authorizations will be ignored. + * This ScanLabelGenerator enforces a set of predefined authorizations for a given user, the set + * defined by the admin using the VisibilityClient admin interface or the set_auths shell command. + * Any authorizations requested with Scan#authorizations will be ignored. */ @InterfaceAudience.Private public class EnforcingScanLabelGenerator implements ScanLabelGenerator { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java index 11842a2bd807..a88c2195f57b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionExpander.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +18,11 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.security.visibility.expression.ExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.LeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.NonLeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.Operator; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class ExpressionExpander { @@ -117,10 +116,10 @@ private NonLeafExpressionNode expandNonLeaf(NonLeafExpressionNode newNode, Opera newNode = leftChildNLE; } else if (leftChildNLE.getOperator() == Operator.AND && rightChildNLE.getOperator() == Operator.OR) { - // (a & b) | (c | d) - rightChildNLE.addChildExp(leftChildNLE); - newNode = rightChildNLE; - } + // (a & b) | (c | d) + rightChildNLE.addChildExp(leftChildNLE); + newNode = rightChildNLE; + } // (a & b) | (c & d) // This case no need to do any thing } else { @@ -136,25 +135,25 @@ private NonLeafExpressionNode expandNonLeaf(NonLeafExpressionNode newNode, Opera } } else if (leftChildNLE.getOperator() == Operator.AND && rightChildNLE.getOperator() == Operator.OR) { - // (a & b) & (c | d) => (a & b & c) | (a & b & d) - newNode = new NonLeafExpressionNode(Operator.OR); - for (ExpressionNode exp : rightChildNLE.getChildExps()) { - NonLeafExpressionNode leftChildNLEClone = leftChildNLE.deepClone(); - leftChildNLEClone.addChildExp(exp); - newNode.addChildExp(leftChildNLEClone); - } - } else { - // (a | b) & (c | d) => (a & c) | (a & d) | (b & c) | (b & d) - newNode = new NonLeafExpressionNode(Operator.OR); - for (ExpressionNode leftExp : leftChildNLE.getChildExps()) { - for (ExpressionNode rightExp : rightChildNLE.getChildExps()) { - NonLeafExpressionNode newChild = new NonLeafExpressionNode(Operator.AND); - newChild.addChildExp(leftExp.deepClone()); - newChild.addChildExp(rightExp.deepClone()); - newNode.addChildExp(newChild); + // (a & b) & (c | d) => (a & b & c) | (a & b & d) + newNode = new NonLeafExpressionNode(Operator.OR); + for (ExpressionNode exp : rightChildNLE.getChildExps()) { + NonLeafExpressionNode leftChildNLEClone = leftChildNLE.deepClone(); + leftChildNLEClone.addChildExp(exp); + newNode.addChildExp(leftChildNLEClone); + } + } else { + // (a | b) & (c | d) => (a & c) | (a & d) | (b & c) | (b & d) + newNode = new NonLeafExpressionNode(Operator.OR); + for (ExpressionNode leftExp : leftChildNLE.getChildExps()) { + for (ExpressionNode rightExp : rightChildNLE.getChildExps()) { + NonLeafExpressionNode newChild = new NonLeafExpressionNode(Operator.AND); + newChild.addChildExp(leftExp.deepClone()); + newChild.addChildExp(rightExp.deepClone()); + newNode.addChildExp(newChild); + } + } } - } - } } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java index 313e8801e3e1..4c41dda9a05e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,13 +21,12 @@ import java.util.ArrayList; import java.util.List; import java.util.Stack; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.security.visibility.expression.ExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.LeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.NonLeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.Operator; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class ExpressionParser { @@ -39,6 +38,7 @@ public class ExpressionParser { private static final char NOT = '!'; private static final char SPACE = ' '; private static final char DOUBLE_QUOTES = '"'; + public ExpressionNode parse(String expS) throws ParseException { expS = expS.trim(); Stack expStack = new Stack<>(); @@ -66,28 +66,28 @@ public ExpressionNode parse(String expS) throws ParseException { break; case DOUBLE_QUOTES: int labelOffset = ++index; - // We have to rewrite the expression within double quotes as incase of expressions + // We have to rewrite the expression within double quotes as incase of expressions // with escape characters we may have to avoid them as the original expression did // not have them List list = new ArrayList<>(); while (index < endPos && !endDoubleQuotesFound(exp[index])) { if (exp[index] == '\\') { index++; - if (exp[index] != '\\' && exp[index] != '"') - throw new ParseException("invalid escaping with quotes " + expS + " at column : " - + index); + if (exp[index] != '\\' && exp[index] != '"') throw new ParseException( + "invalid escaping with quotes " + expS + " at column : " + index); } list.add(exp[index]); index++; } - // The expression has come to the end. still no double quotes found - if(index == endPos) { + // The expression has come to the end. still no double quotes found + if (index == endPos) { throw new ParseException("No terminating quotes " + expS + " at column : " + index); } // This could be costly. but do we have any alternative? // If we don't do this way then we may have to handle while checking the authorizations. // Better to do it here. - byte[] array = org.apache.hbase.thirdparty.com.google.common.primitives.Bytes.toArray(list); + byte[] array = + org.apache.hbase.thirdparty.com.google.common.primitives.Bytes.toArray(list); String leafExp = Bytes.toString(array).trim(); if (leafExp.isEmpty()) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); @@ -99,8 +99,8 @@ public ExpressionNode parse(String expS) throws ParseException { labelOffset = index; do { if (!VisibilityLabelsValidator.isValidAuthChar(exp[index])) { - throw new ParseException("Error parsing expression " - + expS + " at column : " + index); + throw new ParseException( + "Error parsing expression " + expS + " at column : " + index); } index++; } while (index < endPos && !isEndOfLabel(exp[index])); @@ -137,7 +137,7 @@ public ExpressionNode parse(String expS) throws ParseException { } private int skipSpaces(byte[] exp, int index) { - while (index < exp.length -1 && exp[index+1] == SPACE) { + while (index < exp.length - 1 && exp[index + 1] == SPACE) { index++; } return index; @@ -293,21 +293,21 @@ private void processNOTOp(Stack expStack, String expS, int index private static boolean endDoubleQuotesFound(byte b) { return (b == DOUBLE_QUOTES); } + private static boolean isEndOfLabel(byte b) { - return (b == OPEN_PARAN || b == CLOSE_PARAN || b == OR || b == AND || - b == NOT || b == SPACE); + return (b == OPEN_PARAN || b == CLOSE_PARAN || b == OR || b == AND || b == NOT || b == SPACE); } private static Operator getOperator(byte op) { switch (op) { - case AND: - return Operator.AND; - case OR: - return Operator.OR; - case NOT: - return Operator.NOT; - default: - return null; + case AND: + return Operator.AND; + case OR: + return Operator.OR; + case NOT: + return Operator.NOT; + default: + return null; } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java index 1c77a4d008de..492513b0a631 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,22 +21,18 @@ import java.util.HashSet; import java.util.List; import java.util.Set; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.security.User; /** - * If the passed in authorization is null, then this ScanLabelGenerator - * feeds the set of predefined authorization labels for the given user. That is - * the set defined by the admin using the VisibilityClient admin interface - * or the set_auths shell command. - * Otherwise the passed in authorization labels are returned with no change. - * - * Note: This SLG should not be used alone because it does not check - * the passed in authorization labels against what the user is authorized for. + * If the passed in authorization is null, then this ScanLabelGenerator feeds the set of predefined + * authorization labels for the given user. That is the set defined by the admin using the + * VisibilityClient admin interface or the set_auths shell command. Otherwise the passed in + * authorization labels are returned with no change. Note: This SLG should not be used alone because + * it does not check the passed in authorization labels against what the user is authorized for. */ @InterfaceAudience.Private public class FeedUserAuthScanLabelGenerator implements ScanLabelGenerator { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java index b6c11b806510..594e27b9f5fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ParseException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java index fbbf8f5a08e1..53ab28d2f39a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,15 +18,13 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; /** - * This would be the interface which would be used add labels to the RPC context - * and this would be stored against the UGI. - * + * This would be the interface which would be used add labels to the RPC context and this would be + * stored against the UGI. */ @InterfaceAudience.Public public interface ScanLabelGenerator extends Configurable { @@ -35,7 +33,7 @@ public interface ScanLabelGenerator extends Configurable { * Helps to get a list of lables associated with an UGI * @param user * @param authorizations - * @return The labels + * @return The labels */ public List getLabels(User user, Authorizations authorizations); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java index 840ee32da4e0..bbd49d3e371c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/SimpleScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,9 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; /** * This is a simple implementation for ScanLabelGenerator. It will just extract labels passed via diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 66dd862b663e..c9e87cc85399 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.visibility; import static org.apache.hadoop.hbase.HConstants.OperationStatusCode.SANITY_CHECK_FAILURE; @@ -123,10 +122,9 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocessor, VisibilityLabelsService.Interface, MasterObserver, RegionObserver { - private static final Logger LOG = LoggerFactory.getLogger(VisibilityController.class); - private static final Logger AUDITLOG = LoggerFactory.getLogger("SecurityLogger." - + VisibilityController.class.getName()); + private static final Logger AUDITLOG = + LoggerFactory.getLogger("SecurityLogger." + VisibilityController.class.getName()); // flags if we are running on a region of the 'labels' table private boolean labelsRegion = false; // Flag denoting whether AcessController is available or not. @@ -135,13 +133,14 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso private volatile boolean initialized = false; private boolean checkAuths = false; /** Mapping of scanner instances to the user who created them */ - private Map scannerOwners = - new MapMaker().weakKeys().makeMap(); + private Map scannerOwners = new MapMaker().weakKeys().makeMap(); private VisibilityLabelService visibilityLabelService; - /** if we are active, usually false, only true if "hbase.security.authorization" - has been set to true in site configuration */ + /** + * if we are active, usually false, only true if "hbase.security.authorization" has been set to + * true in site configuration + */ boolean authorizationEnabled; // Add to this list if there are any reserved tag types @@ -167,14 +166,14 @@ public void start(CoprocessorEnvironment env) throws IOException { if (HFile.getFormatVersion(conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) { throw new RuntimeException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS - + " is required to persist visibility labels. Consider setting " + HFile.FORMAT_VERSION_KEY - + " accordingly."); + + " is required to persist visibility labels. Consider setting " + + HFile.FORMAT_VERSION_KEY + " accordingly."); } // Do not create for master CPs if (!(env instanceof MasterCoprocessorEnvironment)) { - visibilityLabelService = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService(this.conf); + visibilityLabelService = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService(this.conf); } } @@ -196,15 +195,15 @@ public Optional getMasterObserver() { @Override public Iterable getServices() { - return Collections.singleton( - VisibilityLabelsProtos.VisibilityLabelsService.newReflectiveService(this)); + return Collections + .singleton(VisibilityLabelsProtos.VisibilityLabelsService.newReflectiveService(this)); } /********************************* Master related hooks **********************************/ @Override public void postStartMaster(ObserverContext ctx) - throws IOException { + throws IOException { // Need to create the new system table for labels here try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) { if (!admin.tableExists(LABELS_TABLE_NAME)) { @@ -212,10 +211,11 @@ public void postStartMaster(ObserverContext ctx) // Let the "labels" table having only one region always. We are not expecting too many // labels in the system. TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(LABELS_TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(LABELS_TABLE_FAMILY) - .setBloomFilterType(BloomType.NONE).setBlockCacheEnabled(false).build()) - .setValue(TableDescriptorBuilder.SPLIT_POLICY, DisabledRegionSplitPolicy.class.getName()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(LABELS_TABLE_FAMILY) + .setBloomFilterType(BloomType.NONE).setBlockCacheEnabled(false).build()) + .setValue(TableDescriptorBuilder.SPLIT_POLICY, + DisabledRegionSplitPolicy.class.getName()) + .build(); admin.createTable(tableDescriptor); } @@ -235,8 +235,8 @@ public TableDescriptor preModifyTable(ObserverContext ctx, TableName tableName) - throws IOException { + public void preDisableTable(ObserverContext ctx, + TableName tableName) throws IOException { if (!authorizationEnabled) { return; } @@ -253,8 +253,8 @@ public void postOpen(ObserverContext e) { if (e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) { this.labelsRegion = true; synchronized (this) { - this.accessControllerAvailable = CoprocessorHost.getLoadedCoprocessors() - .contains(AccessController.class.getName()); + this.accessControllerAvailable = + CoprocessorHost.getLoadedCoprocessors().contains(AccessController.class.getName()); } initVisibilityLabelService(e.getEnvironment()); } else { @@ -294,7 +294,7 @@ public void preBatchMutate(ObserverContext c, cellVisibility = m.getCellVisibility(); } catch (DeserializationException de) { miniBatchOp.setOperationStatus(i, - new OperationStatus(SANITY_CHECK_FAILURE, de.getMessage())); + new OperationStatus(SANITY_CHECK_FAILURE, de.getMessage())); continue; } boolean sanityFailure = false; @@ -306,7 +306,7 @@ public void preBatchMutate(ObserverContext c, // Don't disallow reserved tags if authorization is disabled if (authorizationEnabled) { miniBatchOp.setOperationStatus(i, new OperationStatus(SANITY_CHECK_FAILURE, - "Mutation contains cell with reserved type tag")); + "Mutation contains cell with reserved type tag")); sanityFailure = true; } break; @@ -328,11 +328,11 @@ public void preBatchMutate(ObserverContext c, // Don't check user auths for labels with Mutations when the user is super user boolean authCheck = authorizationEnabled && checkAuths && !(isSystemOrSuperUser()); try { - visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, true, - authCheck); + visibilityTags = + this.visibilityLabelService.createVisibilityExpTags(labelsExp, true, authCheck); } catch (InvalidLabelException e) { miniBatchOp.setOperationStatus(i, - new OperationStatus(SANITY_CHECK_FAILURE, e.getMessage())); + new OperationStatus(SANITY_CHECK_FAILURE, e.getMessage())); } if (visibilityTags != null) { labelCache.put(labelsExp, visibilityTags); @@ -369,9 +369,8 @@ public void preBatchMutate(ObserverContext c, } @Override - public void prePrepareTimeStampForDeleteVersion( - ObserverContext ctx, Mutation delete, Cell cell, - byte[] byteNow, Get get) throws IOException { + public void prePrepareTimeStampForDeleteVersion(ObserverContext ctx, + Mutation delete, Cell cell, byte[] byteNow, Get get) throws IOException { // Nothing to do if we are not filtering by visibility if (!authorizationEnabled) { return; @@ -389,8 +388,8 @@ public void prePrepareTimeStampForDeleteVersion( if (cellVisibility != null) { String labelsExp = cellVisibility.getExpression(); try { - visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, false, - false); + visibilityTags = + this.visibilityLabelService.createVisibilityExpTags(labelsExp, false, false); } catch (InvalidLabelException e) { throw new IOException("Invalid cell visibility specified " + labelsExp, e); } @@ -409,8 +408,8 @@ public void prePrepareTimeStampForDeleteVersion( return; } if (result.size() > get.getMaxVersions()) { - throw new RuntimeException("Unexpected size: " + result.size() + - ". Results more than the max versions obtained."); + throw new RuntimeException( + "Unexpected size: " + result.size() + ". Results more than the max versions obtained."); } Cell getCell = result.get(get.getMaxVersions() - 1); PrivateCellUtil.setTimestamp(cell, getCell.getTimestamp()); @@ -424,14 +423,13 @@ public void prePrepareTimeStampForDeleteVersion( } /** - * Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This - * tag type is reserved and should not be explicitly set by user. - * + * Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This tag type is + * reserved and should not be explicitly set by user. * @param cell The cell under consideration * @param pair An optional pair of type {@code } which would be reused if already - * set and new one will be created if NULL is passed + * set and new one will be created if NULL is passed * @return If the boolean is false then it indicates that the cell has a RESERVERD_VIS_TAG and - * with boolean as true, not null tag indicates that a string modified tag was found. + * with boolean as true, not null tag indicates that a string modified tag was found. */ private Pair checkForReservedVisibilityTagPresence(Cell cell, Pair pair) throws IOException { @@ -509,8 +507,8 @@ public void preScannerOpen(ObserverContext e, Scan } } - Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(region, - authorizations); + Filter visibilityLabelFilter = + VisibilityUtils.createVisibilityLabelFilter(region, authorizations); if (visibilityLabelFilter != null) { Filter filter = scan.getFilter(); if (filter != null) { @@ -578,8 +576,7 @@ public void postScannerClose(final ObserverContext * access control is correctly enforced based on the checks performed in preScannerOpen() */ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException { - if (!RpcServer.isInRpcCallContext()) - return; + if (!RpcServer.isInRpcCallContext()) return; String requestUName = RpcServer.getRequestUserName().orElse(null); String owner = scannerOwners.get(s); if (authorizationEnabled && owner != null && !owner.equals(requestUName)) { @@ -588,8 +585,8 @@ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException } @Override - public void preGetOp(ObserverContext e, Get get, - List results) throws IOException { + public void preGetOp(ObserverContext e, Get get, List results) + throws IOException { if (!initialized) { throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized"); } @@ -613,8 +610,8 @@ public void preGetOp(ObserverContext e, Get get, return; } } - Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(e.getEnvironment() - .getRegion(), authorizations); + Filter visibilityLabelFilter = + VisibilityUtils.createVisibilityLabelFilter(e.getEnvironment().getRegion(), authorizations); if (visibilityLabelFilter != null) { Filter filter = get.getFilter(); if (filter != null) { @@ -668,7 +665,7 @@ private Cell createNewCellWithTags(Mutation mutation, Cell newCell) throws IOExc // Don't check user auths for labels with Mutations when the user is super user boolean authCheck = authorizationEnabled && checkAuths && !(isSystemOrSuperUser()); tags.addAll(this.visibilityLabelService.createVisibilityExpTags(cellVisibility.getExpression(), - true, authCheck)); + true, authCheck)); // Carry forward all other tags Iterator tagsItr = PrivateCellUtil.tagsIterator(newCell); while (tagsItr.hasNext()) { @@ -682,7 +679,9 @@ private Cell createNewCellWithTags(Mutation mutation, Cell newCell) throws IOExc return PrivateCellUtil.createCell(newCell, tags); } - /****************************** VisibilityEndpoint service related methods ******************************/ + /****************************** + * VisibilityEndpoint service related methods + ******************************/ @Override public synchronized void addLabels(RpcController controller, VisibilityLabelsRequest request, RpcCallback done) { @@ -716,8 +715,8 @@ public synchronized void addLabels(RpcController controller, VisibilityLabelsReq } if (status.getOperationStatusCode() != SUCCESS) { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); - failureResultBuilder.setException(buildException(new DoNotRetryIOException( - status.getExceptionMsg()))); + failureResultBuilder.setException( + buildException(new DoNotRetryIOException(status.getExceptionMsg()))); response.setResult(i, failureResultBuilder.build()); } i++; @@ -773,8 +772,8 @@ public synchronized void setAuths(RpcController controller, SetAuthsRequest requ response.addResult(successResult); } else { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); - failureResultBuilder.setException(buildException(new DoNotRetryIOException( - status.getExceptionMsg()))); + failureResultBuilder + .setException(buildException(new DoNotRetryIOException(status.getExceptionMsg()))); response.addResult(failureResultBuilder.build()); } } @@ -810,12 +809,12 @@ private void logResult(boolean isAllowed, String request, String reason, byte[] LOG.warn("Failed to get active system user."); LOG.debug("Details on failure to get active system user.", e); } - AUDITLOG.trace("Access " + (isAllowed ? "allowed" : "denied") + " for user " + - (requestingUser != null ? requestingUser.getShortName() : "UNKNOWN") + "; reason: " + - reason + "; remote address: " + - RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("") + "; request: " + - request + "; user: " + (user != null ? Bytes.toShort(user) : "null") + "; labels: " + - labelAuthsStr + "; regex: " + regex); + AUDITLOG.trace("Access " + (isAllowed ? "allowed" : "denied") + " for user " + + (requestingUser != null ? requestingUser.getShortName() : "UNKNOWN") + "; reason: " + + reason + "; remote address: " + + RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("") + "; request: " + + request + "; user: " + (user != null ? Bytes.toShort(user) : "null") + "; labels: " + + labelAuthsStr + "; regex: " + regex); } } @@ -833,15 +832,14 @@ public synchronized void getAuths(RpcController controller, GetAuthsRequest requ // AccessController CP methods. if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { User requestingUser = VisibilityUtils.getActiveUser(); - throw new AccessDeniedException("User '" - + (requestingUser != null ? requestingUser.getShortName() : "null") - + "' is not authorized to perform this action."); + throw new AccessDeniedException( + "User '" + (requestingUser != null ? requestingUser.getShortName() : "null") + + "' is not authorized to perform this action."); } if (AuthUtil.isGroupPrincipal(Bytes.toString(user))) { String group = AuthUtil.getGroupName(Bytes.toString(user)); - labels = this.visibilityLabelService.getGroupAuths(new String[]{group}, false); - } - else { + labels = this.visibilityLabelService.getGroupAuths(new String[] { group }, false); + } else { labels = this.visibilityLabelService.getUserAuths(user, false); } logResult(true, "getAuths", "Get authorizations for user allowed", user, null, null); @@ -867,8 +865,8 @@ public synchronized void clearAuths(RpcController controller, SetAuthsRequest re VisibilityLabelsResponse.Builder response = VisibilityLabelsResponse.newBuilder(); List auths = request.getAuthList(); if (!initialized) { - setExceptionResults(auths.size(), new CoprocessorException( - "VisibilityController not yet initialized"), response); + setExceptionResults(auths.size(), + new CoprocessorException("VisibilityController not yet initialized"), response); } else { byte[] requestUser = request.getUser().toByteArray(); List labelAuths = new ArrayList<>(auths.size()); @@ -897,8 +895,8 @@ public synchronized void clearAuths(RpcController controller, SetAuthsRequest re response.addResult(successResult); } else { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); - failureResultBuilder.setException(buildException(new DoNotRetryIOException( - status.getExceptionMsg()))); + failureResultBuilder + .setException(buildException(new DoNotRetryIOException(status.getExceptionMsg()))); response.addResult(failureResultBuilder.build()); } } @@ -928,9 +926,9 @@ public synchronized void listLabels(RpcController controller, ListLabelsRequest // AccessController CP methods. if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { User requestingUser = VisibilityUtils.getActiveUser(); - throw new AccessDeniedException("User '" - + (requestingUser != null ? requestingUser.getShortName() : "null") - + "' is not authorized to perform this action."); + throw new AccessDeniedException( + "User '" + (requestingUser != null ? requestingUser.getShortName() : "null") + + "' is not authorized to perform this action."); } labels = this.visibilityLabelService.listLabels(regex); logResult(true, "listLabels", "Listing labels allowed", null, null, regex); @@ -959,8 +957,8 @@ private void checkCallingUserAuth() throws IOException { throw new IOException("Unable to retrieve calling user"); } if (!(this.visibilityLabelService.havingSystemAuth(user))) { - throw new AccessDeniedException("User '" + user.getShortName() - + "' is not authorized to perform this action."); + throw new AccessDeniedException( + "User '" + user.getShortName() + "' is not authorized to perform this action."); } } } @@ -989,10 +987,9 @@ public ReturnCode filterCell(final Cell cell) throws IOException { // Early out if there are no tags in the cell return ReturnCode.INCLUDE; } - boolean matchFound = VisibilityLabelServiceManager - .getInstance().getVisibilityLabelService() - .matchVisibility(putVisTags, putCellVisTagsFormat, deleteCellVisTags, - deleteCellVisTagsFormat); + boolean matchFound = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService().matchVisibility( + putVisTags, putCellVisTagsFormat, deleteCellVisTags, deleteCellVisTagsFormat); return matchFound ? ReturnCode.INCLUDE : ReturnCode.SKIP; } @@ -1001,12 +998,12 @@ public boolean equals(Object obj) { if (!(obj instanceof DeleteVersionVisibilityExpressionFilter)) { return false; } - if (this == obj){ + if (this == obj) { return true; } - DeleteVersionVisibilityExpressionFilter f = (DeleteVersionVisibilityExpressionFilter)obj; - return this.deleteCellVisTags.equals(f.deleteCellVisTags) && - this.deleteCellVisTagsFormat.equals(f.deleteCellVisTagsFormat); + DeleteVersionVisibilityExpressionFilter f = (DeleteVersionVisibilityExpressionFilter) obj; + return this.deleteCellVisTags.equals(f.deleteCellVisTags) + && this.deleteCellVisTagsFormat.equals(f.deleteCellVisTagsFormat); } @Override @@ -1023,8 +1020,7 @@ public int hashCode() { private static NameBytesPair buildException(final Throwable t) { NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder(); parameterBuilder.setName(t.getClass().getName()); - parameterBuilder.setValue( - ByteString.copyFromUtf8(StringUtils.stringifyException(t))); + parameterBuilder.setValue(ByteString.copyFromUtf8(StringUtils.stringifyException(t))); return parameterBuilder.build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java index 64058b715058..8f67afd3395b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityExpEvaluator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +18,8 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.yetus.audience.InterfaceAudience; /** * During the read (ie. get/Scan) the VisibilityController calls this interface for each of the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java index 4c3f1414b864..4eb91edada79 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,13 +20,12 @@ import java.io.IOException; import java.util.Map; import java.util.Objects; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.SimpleMutableByteRange; +import org.apache.yetus.audience.InterfaceAudience; /** * This Filter checks the visibility expression with each KV against visibility labels associated @@ -58,9 +57,8 @@ public boolean filterRowKey(Cell cell) throws IOException { @Override public ReturnCode filterCell(final Cell cell) throws IOException { - if (curFamily.getBytes() == null - || !(PrivateCellUtil.matchingFamily(cell, curFamily.getBytes(), curFamily.getOffset(), - curFamily.getLength()))) { + if (curFamily.getBytes() == null || !(PrivateCellUtil.matchingFamily(cell, curFamily.getBytes(), + curFamily.getOffset(), curFamily.getLength()))) { curFamily.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); // For this family, all the columns can have max of curFamilyMaxVersions versions. No need to // consider the older versions for visibility label check. @@ -72,7 +70,7 @@ public ReturnCode filterCell(final Cell cell) throws IOException { if (curQualifier.getBytes() == null || !(PrivateCellUtil.matchingQualifier(cell, curQualifier.getBytes(), curQualifier.getOffset(), curQualifier.getLength()))) { curQualifier.set(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()); + cell.getQualifierLength()); curQualMetVersions = 0; } curQualMetVersions++; @@ -96,12 +94,12 @@ public boolean equals(Object obj) { if (!(obj instanceof VisibilityLabelFilter)) { return false; } - if(this == obj){ + if (this == obj) { return true; } - VisibilityLabelFilter f = (VisibilityLabelFilter)obj; - return this.expEvaluator.equals(f.expEvaluator) && - this.cfVsMaxVersions.equals(f.cfVsMaxVersions); + VisibilityLabelFilter f = (VisibilityLabelFilter) obj; + return this.expEvaluator.equals(f.expEvaluator) + && this.cfVsMaxVersions.equals(f.cfVsMaxVersions); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java index b1e4d8909c69..9e58cff23bc9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelOrdinalProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java index 55ba344670fd..3192027be22d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,14 +19,13 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.regionserver.OperationStatus; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; /** * The interface which deals with visibility labels and user auths admin service as well as the cell @@ -38,74 +37,63 @@ public interface VisibilityLabelService extends Configurable { /** * System calls this after opening of regions. Gives a chance for the VisibilityLabelService to so * any initialization logic. - * @param e - * the region coprocessor env + * @param e the region coprocessor env */ void init(RegionCoprocessorEnvironment e) throws IOException; /** * Adds the set of labels into the system. - * @param labels - * Labels to add to the system. + * @param labels Labels to add to the system. * @return OperationStatus for each of the label addition */ OperationStatus[] addLabels(List labels) throws IOException; /** * Sets given labels globally authorized for the user. - * @param user - * The authorizing user - * @param authLabels - * Labels which are getting authorized for the user + * @param user The authorizing user + * @param authLabels Labels which are getting authorized for the user * @return OperationStatus for each of the label auth addition */ OperationStatus[] setAuths(byte[] user, List authLabels) throws IOException; /** * Removes given labels from user's globally authorized list of labels. - * @param user - * The user whose authorization to be removed - * @param authLabels - * Labels which are getting removed from authorization set + * @param user The user whose authorization to be removed + * @param authLabels Labels which are getting removed from authorization set * @return OperationStatus for each of the label auth removal */ OperationStatus[] clearAuths(byte[] user, List authLabels) throws IOException; /** * Retrieve the visibility labels for the user. - * @param user - * Name of the user whose authorization to be retrieved - * @param systemCall - * Whether a system or user originated call. + * @param user Name of the user whose authorization to be retrieved + * @param systemCall Whether a system or user originated call. * @return Visibility labels authorized for the given user. */ List getUserAuths(byte[] user, boolean systemCall) throws IOException; /** * Retrieve the visibility labels for the groups. - * @param groups - * Name of the groups whose authorization to be retrieved - * @param systemCall - * Whether a system or user originated call. + * @param groups Name of the groups whose authorization to be retrieved + * @param systemCall Whether a system or user originated call. * @return Visibility labels authorized for the given group. */ List getGroupAuths(String[] groups, boolean systemCall) throws IOException; /** * Retrieve the list of visibility labels defined in the system. - * @param regex The regular expression to filter which labels are returned. + * @param regex The regular expression to filter which labels are returned. * @return List of visibility labels */ List listLabels(String regex) throws IOException; /** - * Creates tags corresponding to given visibility expression. - *
          - * Note: This will be concurrently called from multiple threads and implementation should - * take care of thread safety. + * Creates tags corresponding to given visibility expression.
          + * Note: This will be concurrently called from multiple threads and implementation should take + * care of thread safety. * @param visExpression The Expression for which corresponding Tags to be created. - * @param withSerializationFormat specifies whether a tag, denoting the serialization version - * of the tags, to be added in the list. When this is true make sure to add the + * @param withSerializationFormat specifies whether a tag, denoting the serialization version of + * the tags, to be added in the list. When this is true make sure to add the * serialization format Tag also. The format tag value should be byte type. * @param checkAuths denotes whether to check individual labels in visExpression against user's * global auth label. @@ -119,8 +107,7 @@ List createVisibilityExpTags(String visExpression, boolean withSerializatio * Creates VisibilityExpEvaluator corresponding to given Authorizations.
          * Note: This will be concurrently called from multiple threads and implementation should take * care of thread safety. - * @param authorizations - * Authorizations for the read request + * @param authorizations Authorizations for the read request * @return The VisibilityExpEvaluator corresponding to the given set of authorization labels. */ VisibilityExpEvaluator getVisibilityExpEvaluator(Authorizations authorizations) @@ -130,8 +117,7 @@ VisibilityExpEvaluator getVisibilityExpEvaluator(Authorizations authorizations) * System checks for user auth during admin operations. (ie. Label add, set/clear auth). The * operation is allowed only for users having system auth. Also during read, if the requesting * user has system auth, he can view all the data irrespective of its labels. - * @param user - * User for whom system auth check to be done. + * @param user User for whom system auth check to be done. * @return true if the given user is having system/super auth */ boolean havingSystemAuth(User user) throws IOException; @@ -142,16 +128,15 @@ VisibilityExpEvaluator getVisibilityExpEvaluator(Authorizations authorizations) * of visibility tags in Put and Delete.
          * Note: This will be concurrently called from multiple threads and implementation should take * care of thread safety. - * @param putVisTags - * The visibility tags present in the Put mutation - * @param putVisTagFormat - * The serialization format for the Put visibility tags. A null value for - * this format means the tags are written with unsorted label ordinals - * @param deleteVisTags - * - The visibility tags in the delete mutation (the specified Cell Visibility) - * @param deleteVisTagFormat - * The serialization format for the Delete visibility tags. A null value for - * this format means the tags are written with unsorted label ordinals + * @param putVisTags The visibility tags present in the Put mutation + * @param putVisTagFormat The serialization format for the Put visibility tags. A + * null value for this format means the tags are written with unsorted label + * ordinals + * @param deleteVisTags - The visibility tags in the delete mutation (the specified Cell + * Visibility) + * @param deleteVisTagFormat The serialization format for the Delete visibility tags. A + * null value for this format means the tags are written with unsorted label + * ordinals * @return true if matching tags are found * @see VisibilityConstants#SORTED_ORDINAL_SERIALIZATION_FORMAT */ @@ -159,23 +144,17 @@ boolean matchVisibility(List putVisTags, Byte putVisTagFormat, List de Byte deleteVisTagFormat) throws IOException; /** - * Provides a way to modify the visibility tags of type {@link TagType} - * .VISIBILITY_TAG_TYPE, that are part of the cell created from the WALEdits - * that are prepared for replication while calling - * {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} - * .replicate(). - * {@link org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint} - * calls this API to provide an opportunity to modify the visibility tags - * before replicating. - * - * @param visTags - * the visibility tags associated with the cell - * @param serializationFormat - * the serialization format associated with the tag + * Provides a way to modify the visibility tags of type {@link TagType} .VISIBILITY_TAG_TYPE, that + * are part of the cell created from the WALEdits that are prepared for replication while calling + * {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} .replicate(). + * {@link org.apache.hadoop.hbase.security.visibility.VisibilityReplicationEndpoint} calls this + * API to provide an opportunity to modify the visibility tags before replicating. + * @param visTags the visibility tags associated with the cell + * @param serializationFormat the serialization format associated with the tag * @return the modified visibility expression in the form of byte[] * @throws IOException */ - byte[] encodeVisibilityForReplication(final List visTags, - final Byte serializationFormat) throws IOException; + byte[] encodeVisibilityForReplication(final List visTags, final Byte serializationFormat) + throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java index 74531b92ce78..47cdcde675f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelServiceManager.java @@ -18,12 +18,11 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.util.ReflectionUtils; /** * Manages singleton instance of {@link VisibilityLabelService} @@ -56,7 +55,7 @@ public static VisibilityLabelServiceManager getInstance() { */ public VisibilityLabelService getVisibilityLabelService(Configuration conf) throws IOException { String vlsClassName = conf.get(VISIBILITY_LABEL_SERVICE_CLASS, - DefaultVisibilityLabelServiceImpl.class.getCanonicalName()).trim(); + DefaultVisibilityLabelServiceImpl.class.getCanonicalName()).trim(); if (this.visibilityLabelService != null) { checkForClusterLevelSingleConf(vlsClassName); return this.visibilityLabelService; @@ -68,8 +67,8 @@ public VisibilityLabelService getVisibilityLabelService(Configuration conf) thro } this.vlsClazzName = vlsClassName; try { - this.visibilityLabelService = (VisibilityLabelService) ReflectionUtils.newInstance( - Class.forName(vlsClassName), conf); + this.visibilityLabelService = + (VisibilityLabelService) ReflectionUtils.newInstance(Class.forName(vlsClassName), conf); } catch (ClassNotFoundException e) { throw new IOException(e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java index 6eed5b66c761..64f2378424ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java @@ -97,8 +97,7 @@ public synchronized static VisibilityLabelsCache createAndGet(ZKWatcher watcher, /** * @return Singleton instance of VisibilityLabelsCache - * @throws IllegalStateException - * when this is called before calling + * @throws IllegalStateException when this is called before calling * {@link #createAndGet(ZKWatcher, Configuration)} */ public static VisibilityLabelsCache get() { @@ -239,7 +238,6 @@ public List getGroupAuths(String[] groups) { /** * Returns the list of ordinals of labels associated with the user - * * @param user Not null value. * @return the list of ordinals */ @@ -255,7 +253,6 @@ public Set getUserAuthsAsOrdinals(String user) { /** * Returns the list of ordinals of labels associated with the groups - * * @param groups * @return the list of ordinals */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java index b25b7e21c011..6d51166931c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,8 +38,8 @@ import org.slf4j.LoggerFactory; /** - * Similar to MvccSensitiveTracker but tracks the visibility expression also before - * deciding if a Cell can be considered deleted + * Similar to MvccSensitiveTracker but tracks the visibility expression also before deciding if a + * Cell can be considered deleted */ @InterfaceAudience.Private public class VisibilityNewVersionBehaivorTracker extends NewVersionBehaviorTracker { @@ -122,37 +121,35 @@ public void add(Cell cell) { prepare(cell); byte type = cell.getTypeByte(); switch (KeyValue.Type.codeToType(type)) { - // By the order of seen. We put null cq at first. - case DeleteFamily: // Delete all versions of all columns of the specified family - delFamMap.put(cell.getSequenceId(), - new VisibilityDeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId(), - new TagInfo(cell))); - break; - case DeleteFamilyVersion: // Delete all columns of the specified family and specified version - delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; - - // These two kinds of markers are mix with Puts. - case DeleteColumn: // Delete all versions of the specified column - delColMap.put(cell.getSequenceId(), - new VisibilityDeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId(), - new TagInfo(cell))); - break; - case Delete: // Delete the specified version of the specified column. - delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); - break; - default: - throw new AssertionError("Unknown delete marker type for " + cell); + // By the order of seen. We put null cq at first. + case DeleteFamily: // Delete all versions of all columns of the specified family + delFamMap.put(cell.getSequenceId(), new VisibilityDeleteVersionsNode(cell.getTimestamp(), + cell.getSequenceId(), new TagInfo(cell))); + break; + case DeleteFamilyVersion: // Delete all columns of the specified family and specified version + delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; + + // These two kinds of markers are mix with Puts. + case DeleteColumn: // Delete all versions of the specified column + delColMap.put(cell.getSequenceId(), new VisibilityDeleteVersionsNode(cell.getTimestamp(), + cell.getSequenceId(), new TagInfo(cell))); + break; + case Delete: // Delete the specified version of the specified column. + delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell); + break; + default: + throw new AssertionError("Unknown delete marker type for " + cell); } } private boolean tagMatched(Cell put, TagInfo delInfo) throws IOException { List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(put, putVisTags); - return putVisTags.isEmpty() == delInfo.tags.isEmpty() && ( - (putVisTags.isEmpty() && delInfo.tags.isEmpty()) || VisibilityLabelServiceManager - .getInstance().getVisibilityLabelService() - .matchVisibility(putVisTags, putCellVisTagsFormat, delInfo.tags, delInfo.format)); + return putVisTags.isEmpty() == delInfo.tags.isEmpty() + && ((putVisTags.isEmpty() && delInfo.tags.isEmpty()) + || VisibilityLabelServiceManager.getInstance().getVisibilityLabelService() + .matchVisibility(putVisTags, putCellVisTagsFormat, delInfo.tags, delInfo.format)); } @Override @@ -174,8 +171,8 @@ public DeleteResult isDeleted(Cell cell) { } } } - SortedMap> subMap = node.mvccCountingMap - .subMap(cell.getSequenceId(), true, Math.min(duplicateMvcc, deleteMvcc), true); + SortedMap> subMap = node.mvccCountingMap.subMap(cell.getSequenceId(), + true, Math.min(duplicateMvcc, deleteMvcc), true); for (Map.Entry> seg : subMap.entrySet()) { if (seg.getValue().size() >= maxVersions) { return DeleteResult.VERSION_MASKED; @@ -202,6 +199,6 @@ public DeleteResult isDeleted(Cell cell) { @Override protected void resetInternal() { delFamMap.put(Long.MAX_VALUE, - new VisibilityDeleteVersionsNode(Long.MIN_VALUE, Long.MAX_VALUE, new TagInfo())); + new VisibilityDeleteVersionsNode(Long.MIN_VALUE, Long.MAX_VALUE, new TagInfo())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java index e39d6016463d..e04b56599e3e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; @@ -31,10 +29,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A RegionServerObserver impl that provides the custom - * VisibilityReplicationEndpoint. This class should be configured as the - * 'hbase.coprocessor.regionserver.classes' for the visibility tags to be - * replicated as string. The value for the configuration should be + * A RegionServerObserver impl that provides the custom VisibilityReplicationEndpoint. This class + * should be configured as the 'hbase.coprocessor.regionserver.classes' for the visibility tags to + * be replicated as string. The value for the configuration should be * 'org.apache.hadoop.hbase.security.visibility.VisibilityController$VisibilityReplication'. */ @InterfaceAudience.Private @@ -45,15 +42,16 @@ public class VisibilityReplication implements RegionServerCoprocessor, RegionSer @Override public void start(CoprocessorEnvironment env) throws IOException { this.conf = env.getConfiguration(); - visibilityLabelService = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService(this.conf); + visibilityLabelService = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService(this.conf); } @Override public void stop(CoprocessorEnvironment env) throws IOException { } - @Override public Optional getRegionServerObserver() { + @Override + public Optional getRegionServerObserver() { return Optional.of(this); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java index cd495ce442a3..1c05c5dc7bdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; - import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -58,7 +57,7 @@ public void init(Context context) throws IOException { } @Override - public void peerConfigUpdated(ReplicationPeerConfig rpc){ + public void peerConfigUpdated(ReplicationPeerConfig rpc) { delegator.peerConfigUpdated(rpc); } @@ -80,8 +79,8 @@ public boolean replicate(ReplicateContext replicateContext) { if (cell.getTagsLength() > 0) { visTags.clear(); nonVisTags.clear(); - Byte serializationFormat = VisibilityUtils.extractAndPartitionTags(cell, visTags, - nonVisTags); + Byte serializationFormat = + VisibilityUtils.extractAndPartitionTags(cell, visTags, nonVisTags); if (!visTags.isEmpty()) { try { byte[] modifiedVisExpression = visibilityLabelsService @@ -92,10 +91,10 @@ public boolean replicate(ReplicateContext replicateContext) { } } catch (Exception ioe) { LOG.error( - "Exception while reading the visibility labels from the cell. The replication " - + "would happen as per the existing format and not as " + - "string type for the cell " - + cell + ".", ioe); + "Exception while reading the visibility labels from the cell. The replication " + + "would happen as per the existing format and not as " + + "string type for the cell " + cell + ".", + ioe); // just return the old entries as it is without applying the string type change newEdit.add(cell); continue; @@ -140,7 +139,9 @@ public boolean isRunning() { } @Override - public boolean isStarting() {return this.delegator.isStarting();} + public boolean isStarting() { + return this.delegator.isStarting(); + } @Override public void start() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java index 6b9ac7449a4b..d92a43f3e3f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,24 +21,23 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.regionserver.querymatcher.ScanDeleteTracker; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.Tag; +import org.apache.hadoop.hbase.regionserver.querymatcher.ScanDeleteTracker; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Triple; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Similar to ScanDeletTracker but tracks the visibility expression also before - * deciding if a Cell can be considered deleted + * Similar to ScanDeletTracker but tracks the visibility expression also before deciding if a Cell + * can be considered deleted */ @InterfaceAudience.Private public class VisibilityScanDeleteTracker extends ScanDeleteTracker { @@ -50,10 +48,10 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { * This tag is used for the DELETE cell which has no visibility label. */ private static final List EMPTY_TAG = Collections.EMPTY_LIST; - // Its better to track the visibility tags in delete based on each type. Create individual - // data structures for tracking each of them. This would ensure that there is no tracking based + // Its better to track the visibility tags in delete based on each type. Create individual + // data structures for tracking each of them. This would ensure that there is no tracking based // on time and also would handle all cases where deletefamily or deletecolumns is specified with - // Latest_timestamp. In such cases the ts in the delete marker and the masking + // Latest_timestamp. In such cases the ts in the delete marker and the masking // put will not be same. So going with individual data structures for different delete // type would solve this problem and also ensure that the combination of different type // of deletes with diff ts would also work fine @@ -73,7 +71,7 @@ public VisibilityScanDeleteTracker(CellComparator comparator) { @Override public void add(Cell delCell) { - //Cannot call super.add because need to find if the delete needs to be considered + // Cannot call super.add because need to find if the delete needs to be considered long timestamp = delCell.getTimestamp(); byte type = delCell.getTypeByte(); if (type == KeyValue.Type.DeleteFamily.getCode()) { @@ -124,23 +122,27 @@ private boolean extractDeleteCellVisTags(Cell delCell, Type type) { } deleteCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(delCell, delTags); if (!delTags.isEmpty()) { - visibilityTagsDeleteFamily.add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamily + .add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); hasVisTag = true; } else { - visibilityTagsDeleteFamily.add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamily + .add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); } break; case DeleteFamilyVersion: - if(visibilityTagsDeleteFamilyVersion == null) { + if (visibilityTagsDeleteFamilyVersion == null) { visibilityTagsDeleteFamilyVersion = new ArrayList<>(); } delTags = new ArrayList<>(); deleteCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(delCell, delTags); if (!delTags.isEmpty()) { - visibilityTagsDeleteFamilyVersion.add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamilyVersion + .add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); hasVisTag = true; } else { - visibilityTagsDeleteFamilyVersion.add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamilyVersion + .add(new Triple<>(EMPTY_TAG, deleteCellVisTagsFormat, delCell.getTimestamp())); } break; case DeleteColumn: @@ -311,13 +313,14 @@ public DeleteResult isDeleted(Cell cell) { visibilityTagsDeleteColumns = null; visiblityTagsDeleteColumnVersion = null; } else { - throw new IllegalStateException("isDeleted failed: deleteBuffer=" - + Bytes.toStringBinary(deleteCell.getQualifierArray(), + throw new IllegalStateException( + "isDeleted failed: deleteBuffer=" + + Bytes.toStringBinary(deleteCell.getQualifierArray(), deleteCell.getQualifierOffset(), deleteCell.getQualifierLength()) - + ", qualifier=" - + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength()) - + ", timestamp=" + timestamp + ", comparison result: " + ret); + + ", qualifier=" + + Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()) + + ", timestamp=" + timestamp + ", comparison result: " + ret); } } } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java index de0c28746459..e536fb7e6e04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -77,9 +77,9 @@ public class VisibilityUtils { public static final String VISIBILITY_LABEL_GENERATOR_CLASS = "hbase.regionserver.scan.visibility.label.generator.class"; public static final String SYSTEM_LABEL = "system"; - public static final Tag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG = new ArrayBackedTag( - TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE, - VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG_VAL); + public static final Tag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG = + new ArrayBackedTag(TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE, + VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG_VAL); private static final String COMMA = ","; private static final ExpressionParser EXP_PARSER = new ExpressionParser(); @@ -122,7 +122,6 @@ public static byte[] getUserAuthsDataToWriteToZooKeeper(Map readLabelsFromZKData(byte[] data) * @return User auth details * @throws DeserializationException */ - public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) + public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { int pblen = ProtobufUtil.lengthOfPBMagic(); @@ -168,8 +167,8 @@ public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) * @return Stack of ScanLabelGenerator instances. ScanLabelGenerator classes can be specified in * Configuration as comma separated list using key * "hbase.regionserver.scan.visibility.label.generator.class" - * @throws IllegalArgumentException - * when any of the specified ScanLabelGenerator class can not be loaded. + * @throws IllegalArgumentException when any of the specified ScanLabelGenerator class can not be + * loaded. */ public static List getScanLabelGenerators(Configuration conf) { // There can be n SLG specified as comma separated in conf @@ -194,9 +193,9 @@ public static List getScanLabelGenerators(Configuration conf // 2. DefinedSetFilterScanLabelGenerator // This stacking will achieve the following default behavior: // 1. If there is no Auths in the scan, we will obtain the global defined set for the user - // from the labels table. + // from the labels table. // 2. If there is Auths in the scan, we will examine the passed in Auths and filter out the - // labels that the user is not entitled to. Then use the resulting label set. + // labels that the user is not entitled to. Then use the resulting label set. if (slgs.isEmpty()) { slgs.add(ReflectionUtils.newInstance(FeedUserAuthScanLabelGenerator.class, conf)); slgs.add(ReflectionUtils.newInstance(DefinedSetFilterScanLabelGenerator.class, conf)); @@ -226,18 +225,15 @@ public static Byte extractVisibilityTags(Cell cell, List tags) { /** * Extracts and partitions the visibility tags and nonVisibility Tags - * - * @param cell - the cell for which we would extract and partition the - * visibility and non visibility tags - * @param visTags - * - all the visibilty tags of type TagType.VISIBILITY_TAG_TYPE would - * be added to this list + * @param cell - the cell for which we would extract and partition the visibility and non + * visibility tags + * @param visTags - all the visibilty tags of type TagType.VISIBILITY_TAG_TYPE would be added to + * this list * @param nonVisTags - all the non visibility tags would be added to this list - * @return - the serailization format of the tag. Can be null if no tags are found or - * if there is no visibility tag found + * @return - the serailization format of the tag. Can be null if no tags are found or if there is + * no visibility tag found */ - public static Byte extractAndPartitionTags(Cell cell, List visTags, - List nonVisTags) { + public static Byte extractAndPartitionTags(Cell cell, List visTags, List nonVisTags) { Byte serializationFormat = null; Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { @@ -271,10 +267,10 @@ public static Filter createVisibilityLabelFilter(Region region, Authorizations a for (ColumnFamilyDescriptor hcd : region.getTableDescriptor().getColumnFamilies()) { cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), hcd.getMaxVersions()); } - VisibilityLabelService vls = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService(); - Filter visibilityLabelFilter = new VisibilityLabelFilter( - vls.getVisibilityExpEvaluator(authorizations), cfVsMaxVersions); + VisibilityLabelService vls = + VisibilityLabelServiceManager.getInstance().getVisibilityLabelService(); + Filter visibilityLabelFilter = + new VisibilityLabelFilter(vls.getVisibilityExpEvaluator(authorizations), cfVsMaxVersions); return visibilityLabelFilter; } @@ -355,8 +351,8 @@ private static void getLabelOrdinals(ExpressionNode node, List labelOrd checkAuths(auths, labelOrdinal, identifier, checkAuths); } else { // This is a NOT node. - LeafExpressionNode lNode = (LeafExpressionNode) ((NonLeafExpressionNode) node) - .getChildExps().get(0); + LeafExpressionNode lNode = + (LeafExpressionNode) ((NonLeafExpressionNode) node).getChildExps().get(0); identifier = lNode.getIdentifier(); labelOrdinal = ordinalProvider.getLabelOrdinal(identifier); checkAuths(auths, labelOrdinal, identifier, checkAuths); @@ -377,12 +373,9 @@ private static void getLabelOrdinals(ExpressionNode node, List labelOrd /** * This will sort the passed labels in ascending oder and then will write one after the other to * the passed stream. - * @param labelOrdinals - * Unsorted label ordinals - * @param dos - * Stream where to write the labels. - * @throws IOException - * When IOE during writes to Stream. + * @param labelOrdinals Unsorted label ordinals + * @param dos Stream where to write the labels. + * @throws IOException When IOE during writes to Stream. */ private static void writeLabelOrdinalsToStream(List labelOrdinals, DataOutputStream dos) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java index bcb3b8ba4fbe..9d776cbb92f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,13 +18,12 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - -import org.apache.hadoop.hbase.zookeeper.ZKListener; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,15 +47,15 @@ public class ZKVisibilityLabelWatcher extends ZKListener { private String userAuthsZnode; public ZKVisibilityLabelWatcher(ZKWatcher watcher, VisibilityLabelsCache labelsCache, - Configuration conf) { + Configuration conf) { super(watcher); this.labelsCache = labelsCache; String labelZnodeParent = conf.get(VISIBILITY_LABEL_ZK_PATH, DEFAULT_VISIBILITY_LABEL_NODE); - String userAuthsZnodeParent = conf.get(VISIBILITY_USER_AUTHS_ZK_PATH, - DEFAULT_VISIBILITY_USER_AUTHS_NODE); + String userAuthsZnodeParent = + conf.get(VISIBILITY_USER_AUTHS_ZK_PATH, DEFAULT_VISIBILITY_USER_AUTHS_NODE); this.labelZnode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, labelZnodeParent); - this.userAuthsZnode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, - userAuthsZnodeParent); + this.userAuthsZnode = + ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, userAuthsZnodeParent); } public void start() throws KeeperException { @@ -133,7 +132,6 @@ public void nodeChildrenChanged(String path) { /** * Write a labels mirror or user auths mirror into zookeeper - * * @param data * @param labelsOrUserAuths true for writing labels and false for user auths. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java index fcc66a8b5ea0..4a3cbd358b66 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/ExpressionNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java index fd479b40594f..4151ecff4506 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java index 83610fadc8f1..94bb99faa036 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java index 2281453c2dc0..4b0cd98d14b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java index 342aa87feda2..d71bf819c8ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/server/trace/IpcServerSpanBuilder.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.server.trace; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_METHOD; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_SERVICE; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RPC_SYSTEM; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanBuilder; @@ -34,12 +34,14 @@ import org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.RpcSystem; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; /** * Construct {@link Span} instances originating from the server side of an IPC. - * - * @see Semantic conventions for RPC spans + * @see Semantic + * conventions for RPC spans */ @InterfaceAudience.Private public class IpcServerSpanBuilder implements Supplier { @@ -48,13 +50,11 @@ public class IpcServerSpanBuilder implements Supplier { private final Map, Object> attributes = new HashMap<>(); public IpcServerSpanBuilder(final RpcCall rpcCall) { - final String packageAndService = Optional.ofNullable(rpcCall.getService()) - .map(BlockingService::getDescriptorForType) - .map(IpcClientSpanBuilder::getRpcPackageAndService) - .orElse(""); - final String method = Optional.ofNullable(rpcCall.getMethod()) - .map(IpcClientSpanBuilder::getRpcName) - .orElse(""); + final String packageAndService = + Optional.ofNullable(rpcCall.getService()).map(BlockingService::getDescriptorForType) + .map(IpcClientSpanBuilder::getRpcPackageAndService).orElse(""); + final String method = + Optional.ofNullable(rpcCall.getMethod()).map(IpcClientSpanBuilder::getRpcName).orElse(""); setName(IpcClientSpanBuilder.buildSpanName(packageAndService, method)); addAttribute(RPC_SYSTEM, RpcSystem.HBASE_RPC.name()); addAttribute(RPC_SERVICE, packageAndService); @@ -78,9 +78,8 @@ public IpcServerSpanBuilder addAttribute(final AttributeKey key, T value) @SuppressWarnings("unchecked") public Span build() { - final SpanBuilder builder = TraceUtil.getGlobalTracer() - .spanBuilder(name) - .setSpanKind(SpanKind.SERVER); + final SpanBuilder builder = + TraceUtil.getGlobalTracer().spanBuilder(name).setSpanKind(SpanKind.SERVER); attributes.forEach((k, v) -> builder.setAttribute((AttributeKey) k, v)); return builder.startSpan(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java index f8e54c9c459c..36c10ea3b662 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.util.Arrays; import java.util.Locale; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; @@ -37,43 +35,42 @@ */ @InterfaceAudience.Private public class CreateSnapshot extends AbstractHBaseTool { - private SnapshotType snapshotType = SnapshotType.FLUSH; - private TableName tableName = null; - private String snapshotName = null; + private SnapshotType snapshotType = SnapshotType.FLUSH; + private TableName tableName = null; + private String snapshotName = null; - public static void main(String[] args) { - new CreateSnapshot().doStaticMain(args); - } + public static void main(String[] args) { + new CreateSnapshot().doStaticMain(args); + } - @Override - protected void addOptions() { - this.addRequiredOptWithArg("t", "table", "The name of the table"); - this.addRequiredOptWithArg("n", "name", "The name of the created snapshot"); - this.addOptWithArg("s", "snapshot_type", - "Snapshot Type. FLUSH is default. Posible values are " - + Arrays.toString(SnapshotType.values())); - } + @Override + protected void addOptions() { + this.addRequiredOptWithArg("t", "table", "The name of the table"); + this.addRequiredOptWithArg("n", "name", "The name of the created snapshot"); + this.addOptWithArg("s", "snapshot_type", "Snapshot Type. FLUSH is default. Posible values are " + + Arrays.toString(SnapshotType.values())); + } - @Override - protected void processOptions(CommandLine cmd) { - this.tableName = TableName.valueOf(cmd.getOptionValue('t')); - this.snapshotName = cmd.getOptionValue('n'); - String snapshotTypeName = cmd.getOptionValue('s'); - if (snapshotTypeName != null) { - snapshotTypeName = snapshotTypeName.toUpperCase(Locale.ROOT); - this.snapshotType = SnapshotType.valueOf(snapshotTypeName); - } + @Override + protected void processOptions(CommandLine cmd) { + this.tableName = TableName.valueOf(cmd.getOptionValue('t')); + this.snapshotName = cmd.getOptionValue('n'); + String snapshotTypeName = cmd.getOptionValue('s'); + if (snapshotTypeName != null) { + snapshotTypeName = snapshotTypeName.toUpperCase(Locale.ROOT); + this.snapshotType = SnapshotType.valueOf(snapshotTypeName); } + } - @Override - protected int doWork() throws Exception { - try (Connection connection = ConnectionFactory.createConnection(getConf()); - Admin admin = connection.getAdmin()) { - admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); - } catch (Exception e) { - System.err.println("failed to take the snapshot: " + e.getMessage()); - return -1; - } - return 0; + @Override + protected int doWork() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(getConf()); + Admin admin = connection.getAdmin()) { + admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType)); + } catch (Exception e) { + System.err.println("failed to take the snapshot: " + e.getMessage()); + return -1; } + return 0; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 17406445fc3a..fbb849e298a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; @@ -73,50 +72,52 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** * Helper to Restore/Clone a Snapshot - * - *

          The helper assumes that a table is already created, and by calling restore() - * the content present in the snapshot will be restored as the new content of the table. - * - *

          Clone from Snapshot: If the target table is empty, the restore operation - * is just a "clone operation", where the only operations are: + *

          + * The helper assumes that a table is already created, and by calling restore() the content present + * in the snapshot will be restored as the new content of the table. + *

          + * Clone from Snapshot: If the target table is empty, the restore operation is just a "clone + * operation", where the only operations are: *

            - *
          • for each region in the snapshot create a new region - * (note that the region will have a different name, since the encoding contains the table name) - *
          • for each file in the region create a new HFileLink to point to the original file. - *
          • restore the logs, if any + *
          • for each region in the snapshot create a new region (note that the region will have a + * different name, since the encoding contains the table name) + *
          • for each file in the region create a new HFileLink to point to the original file. + *
          • restore the logs, if any *
          - * - *

          Restore from Snapshot: + *

          + * Restore from Snapshot: + *

            + *
          • for each region in the table verify which are available in the snapshot and which are not + *
              + *
            • if the region is not present in the snapshot, remove it. + *
            • if the region is present in the snapshot *
                - *
              • for each region in the table verify which are available in the snapshot and which are not - *
                  - *
                • if the region is not present in the snapshot, remove it. - *
                • if the region is present in the snapshot - *
                    - *
                  • for each file in the table region verify which are available in the snapshot - *
                      - *
                    • if the hfile is not present in the snapshot, remove it - *
                    • if the hfile is present, keep it (nothing to do) - *
                    - *
                  • for each file in the snapshot region but not in the table - *
                      - *
                    • create a new HFileLink that point to the original file - *
                    - *
                  - *
                - *
              • for each region in the snapshot not present in the current table state - *
                  - *
                • create a new region and for each file in the region create a new HFileLink - * (This is the same as the clone operation) - *
                - *
              • restore the logs, if any + *
              • for each file in the table region verify which are available in the snapshot + *
                  + *
                • if the hfile is not present in the snapshot, remove it + *
                • if the hfile is present, keep it (nothing to do) + *
                + *
              • for each file in the snapshot region but not in the table + *
                  + *
                • create a new HFileLink that point to the original file + *
                + *
              + *
            + *
          • for each region in the snapshot not present in the current table state + *
              + *
            • create a new region and for each file in the region create a new HFileLink (This is the same + * as the clone operation) + *
            + *
          • restore the logs, if any *
          */ @InterfaceAudience.Private @@ -125,7 +126,7 @@ public class RestoreSnapshotHelper { private final Map regionsMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - private final Map > parentsMap = new HashMap<>(); + private final Map> parentsMap = new HashMap<>(); private final ForeignExceptionDispatcher monitor; private final MonitoredTask status; @@ -142,20 +143,16 @@ public class RestoreSnapshotHelper { private final FileSystem fs; private final boolean createBackRefs; - public RestoreSnapshotHelper(final Configuration conf, - final FileSystem fs, - final SnapshotManifest manifest, - final TableDescriptor tableDescriptor, - final Path rootDir, - final ForeignExceptionDispatcher monitor, - final MonitoredTask status) { + public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs, + final SnapshotManifest manifest, final TableDescriptor tableDescriptor, final Path rootDir, + final ForeignExceptionDispatcher monitor, final MonitoredTask status) { this(conf, fs, manifest, tableDescriptor, rootDir, monitor, status, true); } public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs, - final SnapshotManifest manifest, final TableDescriptor tableDescriptor, final Path rootDir, - final ForeignExceptionDispatcher monitor, final MonitoredTask status, - final boolean createBackRefs) { + final SnapshotManifest manifest, final TableDescriptor tableDescriptor, final Path rootDir, + final ForeignExceptionDispatcher monitor, final MonitoredTask status, + final boolean createBackRefs) { this.fs = fs; this.conf = conf; this.snapshotManifest = manifest; @@ -200,7 +197,7 @@ private RestoreMetaChanges restoreHdfsRegions(final ThreadPoolExecutor exec) thr List tableRegions = getTableRegions(); RegionInfo mobRegion = - MobUtils.getMobRegionInfo(snapshotManifest.getTableDescriptor().getTableName()); + MobUtils.getMobRegionInfo(snapshotManifest.getTableDescriptor().getTableName()); if (tableRegions != null) { // restore the mob region in case if (regionNames.contains(mobRegion.getEncodedName())) { @@ -226,13 +223,13 @@ private RestoreMetaChanges restoreHdfsRegions(final ThreadPoolExecutor exec) thr // NOTE: we rely upon the region name as: "table name, start key, end key" if (tableRegions != null) { monitor.rethrowException(); - for (RegionInfo regionInfo: tableRegions) { + for (RegionInfo regionInfo : tableRegions) { String regionName = regionInfo.getEncodedName(); if (regionNames.contains(regionName)) { LOG.info("region to restore: " + regionName); regionNames.remove(regionName); - metaChanges.addRegionToRestore(ProtobufUtil.toRegionInfo(regionManifests.get(regionName) - .getRegionInfo())); + metaChanges.addRegionToRestore( + ProtobufUtil.toRegionInfo(regionManifests.get(regionName).getRegionInfo())); } else { LOG.info("region to remove: " + regionName); metaChanges.addRegionToRemove(regionInfo); @@ -244,10 +241,10 @@ private RestoreMetaChanges restoreHdfsRegions(final ThreadPoolExecutor exec) thr List regionsToAdd = new ArrayList<>(regionNames.size()); if (regionNames.size() > 0) { monitor.rethrowException(); - for (String regionName: regionNames) { + for (String regionName : regionNames) { LOG.info("region to add: " + regionName); - regionsToAdd.add(ProtobufUtil.toRegionInfo(regionManifests.get(regionName) - .getRegionInfo())); + regionsToAdd + .add(ProtobufUtil.toRegionInfo(regionManifests.get(regionName).getRegionInfo())); } } @@ -281,14 +278,14 @@ private RestoreMetaChanges restoreHdfsRegions(final ThreadPoolExecutor exec) thr * Describe the set of operations needed to update hbase:meta after restore. */ public static class RestoreMetaChanges { - private final Map > parentsMap; + private final Map> parentsMap; private final TableDescriptor htd; private List regionsToRestore = null; private List regionsToRemove = null; private List regionsToAdd = null; - public RestoreMetaChanges(TableDescriptor htd, Map > parentsMap) { + public RestoreMetaChanges(TableDescriptor htd, Map> parentsMap) { this.parentsMap = parentsMap; this.htd = htd; } @@ -313,9 +310,8 @@ public boolean hasRegionsToAdd() { } /** - * Returns the list of new regions added during the on-disk restore. - * The caller is responsible to add the regions to META. - * e.g MetaTableAccessor.addRegionsToMeta(...) + * Returns the list of new regions added during the on-disk restore. The caller is responsible + * to add the regions to META. e.g MetaTableAccessor.addRegionsToMeta(...) * @return the list of regions to add to META */ public List getRegionsToAdd() { @@ -330,8 +326,8 @@ public boolean hasRegionsToRestore() { } /** - * Returns the list of 'restored regions' during the on-disk restore. - * The caller is responsible to add the regions to hbase:meta if not present. + * Returns the list of 'restored regions' during the on-disk restore. The caller is responsible + * to add the regions to hbase:meta if not present. * @return the list of regions restored */ public List getRegionsToRestore() { @@ -346,9 +342,8 @@ public boolean hasRegionsToRemove() { } /** - * Returns the list of regions removed during the on-disk restore. - * The caller is responsible to remove the regions from META. - * e.g. MetaTableAccessor.deleteRegions(...) + * Returns the list of regions removed during the on-disk restore. The caller is responsible to + * remove the regions from META. e.g. MetaTableAccessor.deleteRegions(...) * @return the list of regions to remove from META */ public List getRegionsToRemove() { @@ -377,14 +372,14 @@ void addRegionToRestore(final RegionInfo hri) { regionsToRestore.add(hri); } - public void updateMetaParentRegions(Connection connection, - final List regionInfos) throws IOException { + public void updateMetaParentRegions(Connection connection, final List regionInfos) + throws IOException { if (regionInfos == null || parentsMap.isEmpty()) return; // Extract region names and offlined regions Map regionsByName = new HashMap<>(regionInfos.size()); List parentRegions = new LinkedList<>(); - for (RegionInfo regionInfo: regionInfos) { + for (RegionInfo regionInfo : regionInfos) { if (regionInfo.isSplitParent()) { parentRegions.add(regionInfo); } else { @@ -393,7 +388,7 @@ public void updateMetaParentRegions(Connection connection, } // Update Offline parents - for (RegionInfo regionInfo: parentRegions) { + for (RegionInfo regionInfo : parentRegions) { Pair daughters = parentsMap.get(regionInfo.getEncodedName()); if (daughters == null) { // The snapshot contains an unreferenced region. @@ -409,8 +404,7 @@ public void updateMetaParentRegions(Connection connection, LOG.debug("Update splits parent " + regionInfo.getEncodedName() + " -> " + daughters); MetaTableAccessor.addSplitsToParent(connection, regionInfo, - regionsByName.get(daughters.getFirst()), - regionsByName.get(daughters.getSecond())); + regionsByName.get(daughters.getFirst()), regionsByName.get(daughters.getSecond())); } } } @@ -433,8 +427,8 @@ public void editRegion(final RegionInfo hri) throws IOException { * Restore specified regions by restoring content to the snapshot state. */ private void restoreHdfsRegions(final ThreadPoolExecutor exec, - final Map regionManifests, - final List regions) throws IOException { + final Map regionManifests, final List regions) + throws IOException { if (regions == null || regions.isEmpty()) return; ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() { @Override @@ -448,8 +442,8 @@ public void editRegion(final RegionInfo hri) throws IOException { * Restore specified mob regions by restoring content to the snapshot state. */ private void restoreHdfsMobRegions(final ThreadPoolExecutor exec, - final Map regionManifests, - final List regions) throws IOException { + final Map regionManifests, final List regions) + throws IOException { if (regions == null || regions.isEmpty()) return; ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() { @Override @@ -459,11 +453,11 @@ public void editRegion(final RegionInfo hri) throws IOException { }); } - private Map> getRegionHFileReferences( - final SnapshotRegionManifest manifest) { + private Map> + getRegionHFileReferences(final SnapshotRegionManifest manifest) { Map> familyMap = - new HashMap<>(manifest.getFamilyFilesCount()); - for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + new HashMap<>(manifest.getFamilyFilesCount()); + for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) { familyMap.put(familyFiles.getFamilyName().toStringUtf8(), new ArrayList<>(familyFiles.getStoreFilesList())); } @@ -471,8 +465,8 @@ private Map> getRegionHFileRefere } /** - * Restore region by removing files not in the snapshot - * and adding the missing ones from the snapshot. + * Restore region by removing files not in the snapshot and adding the missing ones from the + * snapshot. */ private void restoreRegion(final RegionInfo regionInfo, final SnapshotRegionManifest regionManifest) throws IOException { @@ -480,8 +474,8 @@ private void restoreRegion(final RegionInfo regionInfo, } /** - * Restore mob region by removing files not in the snapshot - * and adding the missing ones from the snapshot. + * Restore mob region by removing files not in the snapshot and adding the missing ones from the + * snapshot. */ private void restoreMobRegion(final RegionInfo regionInfo, final SnapshotRegionManifest regionManifest) throws IOException { @@ -493,24 +487,24 @@ private void restoreMobRegion(final RegionInfo regionInfo, } /** - * Restore region by removing files not in the snapshot - * and adding the missing ones from the snapshot. + * Restore region by removing files not in the snapshot and adding the missing ones from the + * snapshot. */ private void restoreRegion(final RegionInfo regionInfo, final SnapshotRegionManifest regionManifest, Path regionDir) throws IOException { Map> snapshotFiles = - getRegionHFileReferences(regionManifest); + getRegionHFileReferences(regionManifest); String tableName = tableDesc.getTableName().getNameAsString(); final String snapshotName = snapshotDesc.getName(); Path regionPath = new Path(tableDir, regionInfo.getEncodedName()); - HRegionFileSystem regionFS = (fs.exists(regionPath)) ? - HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, false) : - HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, regionInfo); + HRegionFileSystem regionFS = (fs.exists(regionPath)) + ? HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, false) + : HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, regionInfo); // Restore families present in the table - for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { + for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) { byte[] family = Bytes.toBytes(familyDir.getName()); Set familyFiles = getTableRegionFamilyFiles(familyDir); @@ -519,13 +513,13 @@ private void restoreRegion(final RegionInfo regionInfo, List filesToTrack = new ArrayList<>(); if (snapshotFamilyFiles != null) { List hfilesToAdd = new ArrayList<>(); - for (SnapshotRegionManifest.StoreFile storeFile: snapshotFamilyFiles) { + for (SnapshotRegionManifest.StoreFile storeFile : snapshotFamilyFiles) { if (familyFiles.contains(storeFile.getName())) { // HFile already present familyFiles.remove(storeFile.getName()); - //no need to restore already present files, but we need to add those to tracker - filesToTrack.add(new StoreFileInfo(conf, fs, - new Path(familyDir, storeFile.getName()), true)); + // no need to restore already present files, but we need to add those to tracker + filesToTrack + .add(new StoreFileInfo(conf, fs, new Path(familyDir, storeFile.getName()), true)); } else { // HFile missing hfilesToAdd.add(storeFile); @@ -533,57 +527,55 @@ private void restoreRegion(final RegionInfo regionInfo, } // Remove hfiles not present in the snapshot - for (String hfileName: familyFiles) { + for (String hfileName : familyFiles) { Path hfile = new Path(familyDir, hfileName); if (!fs.getFileStatus(hfile).isDirectory()) { - LOG.trace("Removing HFile=" + hfileName + " not present in snapshot=" + - snapshotName + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); + LOG.trace("Removing HFile=" + hfileName + " not present in snapshot=" + snapshotName + + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile); } } // Restore Missing files - for (SnapshotRegionManifest.StoreFile storeFile: hfilesToAdd) { - LOG.debug("Restoring missing HFileLink " + storeFile.getName() + - " of snapshot=" + snapshotName+ - " to region=" + regionInfo.getEncodedName() + " table=" + tableName); + for (SnapshotRegionManifest.StoreFile storeFile : hfilesToAdd) { + LOG.debug("Restoring missing HFileLink " + storeFile.getName() + " of snapshot=" + + snapshotName + " to region=" + regionInfo.getEncodedName() + " table=" + tableName); String fileName = restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs); // mark the reference file to be added to tracker - filesToTrack.add(new StoreFileInfo(conf, fs, - new Path(familyDir, fileName), true)); + filesToTrack.add(new StoreFileInfo(conf, fs, new Path(familyDir, fileName), true)); } } else { // Family doesn't exists in the snapshot - LOG.trace("Removing family=" + Bytes.toString(family) + " in snapshot=" + snapshotName + - " from region=" + regionInfo.getEncodedName() + " table=" + tableName); + LOG.trace("Removing family=" + Bytes.toString(family) + " in snapshot=" + snapshotName + + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); HFileArchiver.archiveFamilyByFamilyDir(fs, conf, regionInfo, familyDir, family); fs.delete(familyDir, true); } - StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, - StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir). - withRegionFileSystem(regionFS).build()); + StoreFileTracker tracker = + StoreFileTrackerFactory.create(conf, true, StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); - //simply reset list of tracked files with the matching files - //and the extra one present in the snapshot + // simply reset list of tracked files with the matching files + // and the extra one present in the snapshot tracker.set(filesToTrack); } // Add families not present in the table - for (Map.Entry> familyEntry: - snapshotFiles.entrySet()) { + for (Map.Entry> familyEntry : snapshotFiles + .entrySet()) { Path familyDir = new Path(regionDir, familyEntry.getKey()); - StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, true, - StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir). - withRegionFileSystem(regionFS).build()); + StoreFileTracker tracker = + StoreFileTrackerFactory.create(conf, true, StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); List files = new ArrayList<>(); if (!fs.mkdirs(familyDir)) { throw new IOException("Unable to create familyDir=" + familyDir); } - for (SnapshotRegionManifest.StoreFile storeFile: familyEntry.getValue()) { + for (SnapshotRegionManifest.StoreFile storeFile : familyEntry.getValue()) { LOG.trace("Adding HFileLink (Not present in the table) " + storeFile.getName() - + " of snapshot " + snapshotName + " to table=" + tableName); + + " of snapshot " + snapshotName + " to table=" + tableName); String fileName = restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs); files.add(new StoreFileInfo(conf, fs, new Path(familyDir, fileName), true)); } @@ -610,12 +602,12 @@ private Set getTableRegionFamilyFiles(final Path familyDir) throws IOExc } /** - * Clone specified regions. For each region create a new region - * and create a HFileLink for each hfile. + * Clone specified regions. For each region create a new region and create a HFileLink for each + * hfile. */ private RegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec, - final Map regionManifests, - final List regions) throws IOException { + final Map regionManifests, final List regions) + throws IOException { if (regions == null || regions.isEmpty()) return null; final Map snapshotRegions = new HashMap<>(regions.size()); @@ -632,16 +624,16 @@ private RegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec, String snapshotRegionName = snapshotRegionInfo.getEncodedName(); String clonedRegionName = clonedRegionsInfo[i].getEncodedName(); regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName)); - LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName + - " in snapshot " + snapshotName); + LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName + " in snapshot " + + snapshotName); // Add mapping between cloned region name and snapshot region info snapshotRegions.put(clonedRegionName, snapshotRegionInfo); } // create the regions on disk - ModifyRegionUtils.createRegions(exec, conf, rootDir, - tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() { + ModifyRegionUtils.createRegions(exec, conf, rootDir, tableDesc, clonedRegionsInfo, + new ModifyRegionUtils.RegionFillTask() { @Override public void fillRegion(final HRegion region) throws IOException { RegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName()); @@ -653,42 +645,37 @@ public void fillRegion(final HRegion region) throws IOException { } /** - * Clone the mob region. For the region create a new region - * and create a HFileLink for each hfile. + * Clone the mob region. For the region create a new region and create a HFileLink for each hfile. */ private void cloneHdfsMobRegion(final Map regionManifests, final RegionInfo region) throws IOException { // clone region info (change embedded tableName with the new one) Path clonedRegionPath = MobUtils.getMobRegionPath(rootDir, tableDesc.getTableName()); - cloneRegion(MobUtils.getMobRegionInfo(tableDesc.getTableName()), - clonedRegionPath, region, regionManifests.get(region.getEncodedName())); + cloneRegion(MobUtils.getMobRegionInfo(tableDesc.getTableName()), clonedRegionPath, region, + regionManifests.get(region.getEncodedName())); } /** - * Clone region directory content from the snapshot info. - * - * Each region is encoded with the table name, so the cloned region will have - * a different region name. - * - * Instead of copying the hfiles a HFileLink is created. - * + * Clone region directory content from the snapshot info. Each region is encoded with the table + * name, so the cloned region will have a different region name. Instead of copying the hfiles a + * HFileLink is created. * @param regionDir {@link Path} cloned dir * @param snapshotRegionInfo */ private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, final RegionInfo snapshotRegionInfo, final SnapshotRegionManifest manifest) - throws IOException { + throws IOException { final String tableName = tableDesc.getTableName().getNameAsString(); final String snapshotName = snapshotDesc.getName(); - for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) { Path familyDir = new Path(regionDir, familyFiles.getFamilyName().toStringUtf8()); List clonedFiles = new ArrayList<>(); - for (SnapshotRegionManifest.StoreFile storeFile: familyFiles.getStoreFilesList()) { - LOG.info("Adding HFileLink " + storeFile.getName() +" from cloned region " - + "in snapshot " + snapshotName + " to table=" + tableName); + for (SnapshotRegionManifest.StoreFile storeFile : familyFiles.getStoreFilesList()) { + LOG.info("Adding HFileLink " + storeFile.getName() + " from cloned region " + "in snapshot " + + snapshotName + " to table=" + tableName); if (MobUtils.isMobRegionInfo(newRegionInfo)) { - String mobFileName = HFileLink.createHFileLinkName(snapshotRegionInfo, - storeFile.getName()); + String mobFileName = + HFileLink.createHFileLinkName(snapshotRegionInfo, storeFile.getName()); Path mobPath = new Path(familyDir, mobFileName); if (fs.exists(mobPath)) { fs.delete(mobPath, true); @@ -699,18 +686,18 @@ private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, clonedFiles.add(new StoreFileInfo(conf, fs, new Path(familyDir, file), true)); } } - //we don't need to track files under mobdir + // we don't need to track files under mobdir if (!MobUtils.isMobRegionInfo(newRegionInfo)) { Path regionPath = new Path(tableDir, newRegionInfo.getEncodedName()); - HRegionFileSystem regionFS = (fs.exists(regionPath)) ? - HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, newRegionInfo, false) : - HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, newRegionInfo); + HRegionFileSystem regionFS = (fs.exists(regionPath)) + ? HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, newRegionInfo, false) + : HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, newRegionInfo); Configuration sftConf = StoreUtils.createStoreConfiguration(conf, tableDesc, tableDesc.getColumnFamily(familyFiles.getFamilyName().toByteArray())); - StoreFileTracker tracker = StoreFileTrackerFactory.create(sftConf, true, - StoreContext.getBuilder().withFamilyStoreDirectoryPath(familyDir). - withRegionFileSystem(regionFS).build()); + StoreFileTracker tracker = + StoreFileTrackerFactory.create(sftConf, true, StoreContext.getBuilder() + .withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build()); tracker.set(clonedFiles); } } @@ -718,31 +705,26 @@ private void cloneRegion(final RegionInfo newRegionInfo, final Path regionDir, } /** - * Clone region directory content from the snapshot info. - * - * Each region is encoded with the table name, so the cloned region will have - * a different region name. - * - * Instead of copying the hfiles a HFileLink is created. - * + * Clone region directory content from the snapshot info. Each region is encoded with the table + * name, so the cloned region will have a different region name. Instead of copying the hfiles a + * HFileLink is created. * @param region {@link HRegion} cloned * @param snapshotRegionInfo */ private void cloneRegion(final HRegion region, final RegionInfo snapshotRegionInfo, final SnapshotRegionManifest manifest) throws IOException { - cloneRegion(region.getRegionInfo(), - new Path(tableDir, region.getRegionInfo().getEncodedName()), - snapshotRegionInfo, - manifest); + cloneRegion(region.getRegionInfo(), new Path(tableDir, region.getRegionInfo().getEncodedName()), + snapshotRegionInfo, manifest); } /** * Create a new {@link HFileLink} to reference the store file. - *

          The store file in the snapshot can be a simple hfile, an HFileLink or a reference. + *

          + * The store file in the snapshot can be a simple hfile, an HFileLink or a reference. *

            - *
          • hfile: abc -> table=region-abc - *
          • reference: abc.1234 -> table=region-abc.1234 - *
          • hfilelink: table=region-hfile -> table=region-hfile + *
          • hfile: abc -> table=region-abc + *
          • reference: abc.1234 -> table=region-abc.1234 + *
          • hfilelink: table=region-hfile -> table=region-hfile *
          * @param familyDir destination directory for the store file * @param regionInfo destination region info for the table @@ -751,7 +733,7 @@ private void cloneRegion(final HRegion region, final RegionInfo snapshotRegionIn */ private String restoreStoreFile(final Path familyDir, final RegionInfo regionInfo, final SnapshotRegionManifest.StoreFile storeFile, final boolean createBackRef) - throws IOException { + throws IOException { String hfileName = storeFile.getName(); if (HFileLink.isHFileLink(hfileName)) { return HFileLink.createFromHFileLink(conf, fs, familyDir, hfileName, createBackRef); @@ -764,7 +746,10 @@ private String restoreStoreFile(final Path familyDir, final RegionInfo regionInf /** * Create a new {@link Reference} as copy of the source one. - *

          +   * 

          + *

          + * + *
              * The source table looks like:
              *    1234/abc      (original file)
              *    5678/abc.1234 (reference file)
          @@ -775,7 +760,9 @@ private String restoreStoreFile(final Path familyDir, final RegionInfo regionInf
              *
              * NOTE that the region name in the clone changes (md5 of regioninfo)
              * and the reference should reflect that change.
          -   * 
          + *
          + * + *
          * @param familyDir destination directory for the store file * @param regionInfo destination region info for the table * @param storeFile reference file name @@ -786,9 +773,10 @@ private String restoreReferenceFile(final Path familyDir, final RegionInfo regio // Extract the referred information (hfile name and parent region) Path refPath = - StoreFileInfo.getReferredToFile(new Path(new Path(new Path(new Path(snapshotTable - .getNamespaceAsString(), snapshotTable.getQualifierAsString()), regionInfo - .getEncodedName()), familyDir.getName()), hfileName)); + StoreFileInfo.getReferredToFile(new Path(new Path( + new Path(new Path(snapshotTable.getNamespaceAsString(), + snapshotTable.getQualifierAsString()), regionInfo.getEncodedName()), + familyDir.getName()), hfileName)); String snapshotRegionName = refPath.getParent().getParent().getName(); String fileName = refPath.getName(); @@ -802,7 +790,7 @@ private String restoreReferenceFile(final Path familyDir, final RegionInfo regio if (!HFileLink.isHFileLink(fileName)) { refLink = HFileLink.createHFileLinkName(snapshotTable, snapshotRegionName, fileName); linkPath = new Path(familyDir, - HFileLink.createHFileLinkName(snapshotTable, regionInfo.getEncodedName(), hfileName)); + HFileLink.createHFileLinkName(snapshotTable, regionInfo.getEncodedName(), hfileName)); } Path outPath = new Path(familyDir, refLink + '.' + clonedRegionName); @@ -816,15 +804,15 @@ private String restoreReferenceFile(final Path familyDir, final RegionInfo regio if (linkPath != null) { in = HFileLink.buildFromHFileLinkPattern(conf, linkPath).open(fs); } else { - linkPath = new Path(new Path(HRegion.getRegionDir(snapshotManifest.getSnapshotDir(), - regionInfo.getEncodedName()), familyDir.getName()), hfileName); + linkPath = new Path(new Path( + HRegion.getRegionDir(snapshotManifest.getSnapshotDir(), regionInfo.getEncodedName()), + familyDir.getName()), hfileName); in = fs.open(linkPath); } OutputStream out = fs.create(outPath); IOUtils.copyBytes(in, out, conf); } - // Add the daughter region to the map String regionName = Bytes.toString(regionsMap.get(regionInfo.getEncodedNameAsBytes())); if (regionName == null) { @@ -846,10 +834,8 @@ private String restoreReferenceFile(final Path familyDir, final RegionInfo regio } /** - * Create a new {@link RegionInfo} from the snapshot region info. - * Keep the same startKey, endKey, regionId and split information but change - * the table name. - * + * Create a new {@link RegionInfo} from the snapshot region info. Keep the same startKey, endKey, + * regionId and split information but change the table name. * @param snapshotRegionInfo Info for region to clone. * @return the new HRegion instance */ @@ -858,12 +844,9 @@ public RegionInfo cloneRegionInfo(final RegionInfo snapshotRegionInfo) { } public static RegionInfo cloneRegionInfo(TableName tableName, RegionInfo snapshotRegionInfo) { - return RegionInfoBuilder.newBuilder(tableName) - .setStartKey(snapshotRegionInfo.getStartKey()) - .setEndKey(snapshotRegionInfo.getEndKey()) - .setSplit(snapshotRegionInfo.isSplit()) - .setRegionId(snapshotRegionInfo.getRegionId()) - .setOffline(snapshotRegionInfo.isOffline()) + return RegionInfoBuilder.newBuilder(tableName).setStartKey(snapshotRegionInfo.getStartKey()) + .setEndKey(snapshotRegionInfo.getEndKey()).setSplit(snapshotRegionInfo.isSplit()) + .setRegionId(snapshotRegionInfo.getRegionId()).setOffline(snapshotRegionInfo.isOffline()) .build(); } @@ -873,7 +856,7 @@ public static RegionInfo cloneRegionInfo(TableName tableName, RegionInfo snapsho private List getTableRegions() throws IOException { LOG.debug("get table regions: " + tableDir); FileStatus[] regionDirs = - CommonFSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs)); + CommonFSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regionDirs == null) { return null; } @@ -883,8 +866,8 @@ private List getTableRegions() throws IOException { RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDirs[i].getPath()); regions.add(hri); } - LOG.debug("found " + regions.size() + " regions for table=" + - tableDesc.getTableName().getNameAsString()); + LOG.debug("found " + regions.size() + " regions for table=" + + tableDesc.getTableName().getNameAsString()); return regions; } @@ -901,26 +884,26 @@ public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, File Path rootDir, Path restoreDir, String snapshotName) throws IOException { // ensure that restore dir is not under root dir if (!restoreDir.getFileSystem(conf).getUri().equals(rootDir.getFileSystem(conf).getUri())) { - throw new IllegalArgumentException("Filesystems for restore directory and HBase root " + - "directory should be the same"); + throw new IllegalArgumentException( + "Filesystems for restore directory and HBase root " + "directory should be the same"); } - if (restoreDir.toUri().getPath().startsWith(rootDir.toUri().getPath() +"/")) { - throw new IllegalArgumentException("Restore directory cannot be a sub directory of HBase " + - "root directory. RootDir: " + rootDir + ", restoreDir: " + restoreDir); + if (restoreDir.toUri().getPath().startsWith(rootDir.toUri().getPath() + "/")) { + throw new IllegalArgumentException("Restore directory cannot be a sub directory of HBase " + + "root directory. RootDir: " + rootDir + ", restoreDir: " + restoreDir); } Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); - MonitoredTask status = TaskMonitor.get().createStatus( - "Restoring snapshot '" + snapshotName + "' to directory " + restoreDir); + MonitoredTask status = TaskMonitor.get() + .createStatus("Restoring snapshot '" + snapshotName + "' to directory " + restoreDir); ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(); // we send createBackRefs=false so that restored hfiles do not create back reference links // in the base hbase root dir. - RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, - manifest, manifest.getTableDescriptor(), restoreDir, monitor, status, false); + RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, manifest, + manifest.getTableDescriptor(), restoreDir, monitor, status, false); RestoreMetaChanges metaChanges = helper.restoreHdfsRegions(); // TODO: parallelize. if (LOG.isDebugEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index d126ec5a7526..9947a8e26103 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -84,9 +84,8 @@ *
          * * Utility methods in this class are useful for getting the correct locations for different parts of - * the snapshot, as well as moving completed snapshots into place (see - * {@link #completeSnapshot}, and writing the - * {@link SnapshotDescription} to the working snapshot directory. + * the snapshot, as well as moving completed snapshots into place (see {@link #completeSnapshot}, + * and writing the {@link SnapshotDescription} to the working snapshot directory. */ @InterfaceAudience.Private public final class SnapshotDescriptionUtils { @@ -121,8 +120,7 @@ public CompletedSnaphotDirectoriesFilter(FileSystem fs) { public static final String SNAPSHOT_TMP_DIR_NAME = ".tmp"; /** - * The configuration property that determines the filepath of the snapshot - * base working directory + * The configuration property that determines the filepath of the snapshot base working directory */ public static final String SNAPSHOT_WORKING_DIR = "hbase.snapshot.working.dir"; @@ -133,10 +131,11 @@ public CompletedSnaphotDirectoriesFilter(FileSystem fs) { // Default value if no ttl is specified for Snapshot private static final long NO_SNAPSHOT_TTL_SPECIFIED = 0; - public static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS = "hbase.snapshot.master.timeout.millis"; + public static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS = + "hbase.snapshot.master.timeout.millis"; /** By default, wait 300 seconds for a snapshot to complete */ - public static final long DEFAULT_MAX_WAIT_TIME = 60000 * 5 ; + public static final long DEFAULT_MAX_WAIT_TIME = 60000 * 5; public static final String SNAPSHOT_CORRUPTED_FILE = "_CORRUPTED"; @@ -154,12 +153,12 @@ public static long getMaxMasterTimeout(Configuration conf, SnapshotDescription.T long defaultMaxWaitTime) { String confKey; switch (type) { - case DISABLED: - default: - confKey = MASTER_SNAPSHOT_TIMEOUT_MILLIS; + case DISABLED: + default: + confKey = MASTER_SNAPSHOT_TIMEOUT_MILLIS; } return Math.max(conf.getLong(confKey, defaultMaxWaitTime), - conf.getLong(MASTER_SNAPSHOT_TIMEOUT_MILLIS, defaultMaxWaitTime)); + conf.getLong(MASTER_SNAPSHOT_TIMEOUT_MILLIS, defaultMaxWaitTime)); } /** @@ -179,7 +178,8 @@ public static Path getSnapshotRootDir(final Path rootDir) { * @param rootDir hbase root directory * @return the final directory for the completed snapshot */ - public static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, final Path rootDir) { + public static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, + final Path rootDir) { return getCompletedSnapshotDir(snapshot.getName(), rootDir); } @@ -202,8 +202,8 @@ public static Path getCompletedSnapshotDir(final String snapshotName, final Path * @return Path to the snapshot tmp directory, relative to the passed root directory */ public static Path getWorkingSnapshotDir(final Path rootDir, final Configuration conf) { - return new Path(conf.get(SNAPSHOT_WORKING_DIR, - getDefaultWorkingSnapshotDir(rootDir).toString())); + return new Path( + conf.get(SNAPSHOT_WORKING_DIR, getDefaultWorkingSnapshotDir(rootDir).toString())); } /** @@ -238,6 +238,7 @@ public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir public static Path getCorruptedFlagFileForSnapshot(final Path workingDir) { return new Path(workingDir, SNAPSHOT_CORRUPTED_FILE); } + /** * Get the directory within the given filepath to store the snapshot instance * @param snapshotsDir directory to store snapshot directory within @@ -260,8 +261,8 @@ public static final Path getSnapshotsDir(Path rootDir) { * Determines if the given workingDir is a subdirectory of the given "root directory" * @param workingDir a directory to check * @param rootDir root directory of the HBase installation - * @return true if the given workingDir is a subdirectory of the given root directory, - * false otherwise + * @return true if the given workingDir is a subdirectory of the given root directory, false + * otherwise */ public static boolean isSubDirectoryOf(final Path workingDir, final Path rootDir) { return workingDir.toString().startsWith(rootDir.toString() + Path.SEPARATOR); @@ -272,11 +273,11 @@ public static boolean isSubDirectoryOf(final Path workingDir, final Path rootDir * @param workingDir a directory to check * @param conf configuration for the HBase cluster * @return true if the given workingDir is a subdirectory of the default working directory for - * snapshots, false otherwise + * snapshots, false otherwise * @throws IOException if we can't get the root dir */ public static boolean isWithinDefaultWorkingDir(final Path workingDir, Configuration conf) - throws IOException { + throws IOException { Path defaultWorkingDir = getDefaultWorkingSnapshotDir(CommonFSUtils.getRootDir(conf)); return workingDir.equals(defaultWorkingDir) || isSubDirectoryOf(workingDir, defaultWorkingDir); } @@ -321,13 +322,13 @@ public static SnapshotDescription validate(SnapshotDescription snapshot, Configu long ttl = snapshot.getTtl(); // set default ttl(sec) if it is not set already or the value is out of the range - if (ttl == SnapshotDescriptionUtils.NO_SNAPSHOT_TTL_SPECIFIED || - ttl > TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { - final long defaultSnapshotTtl = conf.getLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, - HConstants.DEFAULT_SNAPSHOT_TTL); + if (ttl == SnapshotDescriptionUtils.NO_SNAPSHOT_TTL_SPECIFIED + || ttl > TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { + final long defaultSnapshotTtl = + conf.getLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, HConstants.DEFAULT_SNAPSHOT_TTL); if (LOG.isDebugEnabled()) { LOG.debug("Snapshot current TTL value: {} resetting it to default value: {}", ttl, - defaultSnapshotTtl); + defaultSnapshotTtl); } ttl = defaultSnapshotTtl; } @@ -365,10 +366,10 @@ public static SnapshotDescription validate(SnapshotDescription snapshot, Configu */ public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs) throws IOException { - FsPermission perms = CommonFSUtils.getFilePermissions(fs, fs.getConf(), - HConstants.DATA_FILE_UMASK_KEY); + FsPermission perms = + CommonFSUtils.getFilePermissions(fs, fs.getConf(), HConstants.DATA_FILE_UMASK_KEY); Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE); - try (FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true)){ + try (FSDataOutputStream out = CommonFSUtils.create(fs, snapshotInfo, perms, true)) { snapshot.writeTo(out); } catch (IOException e) { // if we get an exception, try to remove the snapshot info @@ -390,7 +391,7 @@ public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingD public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir) throws CorruptedSnapshotException { Path snapshotInfo = new Path(snapshotDir, SNAPSHOTINFO_FILE); - try (FSDataInputStream in = fs.open(snapshotInfo)){ + try (FSDataInputStream in = fs.open(snapshotInfo)) { return SnapshotDescription.parseFrom(in); } catch (IOException e) { throw new CorruptedSnapshotException("Couldn't read snapshot info from:" + snapshotInfo, e); @@ -398,38 +399,34 @@ public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotD } /** - * Commits the snapshot process by moving the working snapshot - * to the finalized filepath - * + * Commits the snapshot process by moving the working snapshot to the finalized filepath * @param snapshotDir The file path of the completed snapshots - * @param workingDir The file path of the in progress snapshots + * @param workingDir The file path of the in progress snapshots * @param fs The file system of the completed snapshots * @param workingDirFs The file system of the in progress snapshots * @param conf Configuration - * * @throws SnapshotCreationException if the snapshot could not be moved * @throws IOException the filesystem could not be reached */ public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs, - FileSystem workingDirFs, final Configuration conf) - throws SnapshotCreationException, IOException { - LOG.debug("Sentinel is done, just moving the snapshot from " + workingDir + " to " - + snapshotDir); + FileSystem workingDirFs, final Configuration conf) + throws SnapshotCreationException, IOException { + LOG.debug( + "Sentinel is done, just moving the snapshot from " + workingDir + " to " + snapshotDir); // If the working and completed snapshot directory are on the same file system, attempt // to rename the working snapshot directory to the completed location. If that fails, // or the file systems differ, attempt to copy the directory over, throwing an exception // if this fails URI workingURI = workingDirFs.getUri(); URI rootURI = fs.getUri(); - if ((!workingURI.getScheme().equals(rootURI.getScheme()) || - workingURI.getAuthority() == null || - !workingURI.getAuthority().equals(rootURI.getAuthority()) || - workingURI.getUserInfo() == null || - !workingURI.getUserInfo().equals(rootURI.getUserInfo()) || - !fs.rename(workingDir, snapshotDir)) && !FileUtil.copy(workingDirFs, workingDir, fs, - snapshotDir, true, true, conf)) { + if ((!workingURI.getScheme().equals(rootURI.getScheme()) || workingURI.getAuthority() == null + || !workingURI.getAuthority().equals(rootURI.getAuthority()) + || workingURI.getUserInfo() == null + || !workingURI.getUserInfo().equals(rootURI.getUserInfo()) + || !fs.rename(workingDir, snapshotDir)) + && !FileUtil.copy(workingDirFs, workingDir, fs, snapshotDir, true, true, conf)) { throw new SnapshotCreationException("Failed to copy working directory(" + workingDir - + ") to completed directory(" + snapshotDir + ")."); + + ") to completed directory(" + snapshotDir + ")."); } } @@ -437,8 +434,8 @@ public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSyste * Check if the user is this table snapshot's owner * @param snapshot the table snapshot description * @param user the user - * @return true if the user is the owner of the snapshot, - * false otherwise or the snapshot owner field is not present. + * @return true if the user is the owner of the snapshot, false otherwise or the snapshot owner + * field is not present. */ public static boolean isSnapshotOwner(org.apache.hadoop.hbase.client.SnapshotDescription snapshot, User user) { @@ -447,7 +444,8 @@ public static boolean isSnapshotOwner(org.apache.hadoop.hbase.client.SnapshotDes } public static boolean isSecurityAvailable(Configuration conf) throws IOException { - try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()) { + try (Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { return admin.tableExists(PermissionStorage.ACL_TABLE_NAME); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java index c3e0c103a0e8..75704347bbf2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.FileNotFoundException; @@ -57,10 +56,10 @@ /** * Tool for dumping snapshot information. *
            - *
          1. Table Descriptor - *
          2. Snapshot creation time, type, format version, ... - *
          3. List of hfiles and wals - *
          4. Stats about hfiles and logs sizes, percentage of shared with the source table, ... + *
          5. Table Descriptor + *
          6. Snapshot creation time, type, format version, ... + *
          7. List of hfiles and wals + *
          8. Stats about hfiles and logs sizes, percentage of shared with the source table, ... *
          */ @InterfaceAudience.Public @@ -69,24 +68,24 @@ public final class SnapshotInfo extends AbstractHBaseTool { static final class Options { static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to examine."); - static final Option REMOTE_DIR = new Option(null, "remote-dir", true, - "Root directory that contains the snapshots."); - static final Option LIST_SNAPSHOTS = new Option(null, "list-snapshots", false, - "List all the available snapshots and exit."); + static final Option REMOTE_DIR = + new Option(null, "remote-dir", true, "Root directory that contains the snapshots."); + static final Option LIST_SNAPSHOTS = + new Option(null, "list-snapshots", false, "List all the available snapshots and exit."); static final Option FILES = new Option(null, "files", false, "Files and logs list."); static final Option STATS = new Option(null, "stats", false, "Files and logs stats."); - static final Option SCHEMA = new Option(null, "schema", false, - "Describe the snapshotted table."); - static final Option SIZE_IN_BYTES = new Option(null, "size-in-bytes", false, - "Print the size of the files in bytes."); + static final Option SCHEMA = + new Option(null, "schema", false, "Describe the snapshotted table."); + static final Option SIZE_IN_BYTES = + new Option(null, "size-in-bytes", false, "Print the size of the files in bytes."); } /** * Statistics about the snapshot *
            - *
          1. How many store files and logs are in the archive - *
          2. How many store files and logs are shared with the table - *
          3. Total store files and logs size and shared amount + *
          4. How many store files and logs are in the archive + *
          5. How many store files and logs are shared with the table + *
          6. Total store files and logs size and shared amount *
          */ public static class SnapshotStats { @@ -149,8 +148,7 @@ String getStateToString() { private final FileSystem fs; SnapshotStats(final Configuration conf, final FileSystem fs, - final SnapshotDescription snapshot) - { + final SnapshotDescription snapshot) { this.snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); this.snapshotTable = snapshot.getTableName(); this.conf = conf; @@ -165,7 +163,6 @@ String getStateToString() { this.fs = fs; } - /** @return the snapshot descriptor */ public SnapshotDescription getSnapshotDescription() { return ProtobufUtil.createSnapshotDesc(this.snapshot); @@ -173,9 +170,7 @@ public SnapshotDescription getSnapshotDescription() { /** @return true if the snapshot is corrupted */ public boolean isSnapshotCorrupted() { - return hfilesMissing.get() > 0 || - logsMissing.get() > 0 || - hfilesCorrupted.get() > 0; + return hfilesMissing.get() > 0 || logsMissing.get() > 0 || hfilesCorrupted.get() > 0; } /** @return the number of available store files */ @@ -189,7 +184,9 @@ public int getArchivedStoreFilesCount() { } /** @return the number of available store files in the mob dir */ - public int getMobStoreFilesCount() { return hfilesMobCount.get(); } + public int getMobStoreFilesCount() { + return hfilesMobCount.get(); + } /** @return the number of available log files */ public int getLogsCount() { @@ -226,15 +223,16 @@ public long getArchivedStoreFileSize() { return hfilesArchiveSize.get(); } - /** @return the total size of the store files in the mob store*/ - public long getMobStoreFilesSize() { return hfilesMobSize.get(); } + /** @return the total size of the store files in the mob store */ + public long getMobStoreFilesSize() { + return hfilesMobSize.get(); + } - /** @return the total size of the store files in the archive which is not shared - * with other snapshots and tables - * - * This is only calculated when - * {@link #getSnapshotStats(Configuration, SnapshotProtos.SnapshotDescription, Map)} - * is called with a non-null Map + /** + * @return the total size of the store files in the archive which is not shared with other + * snapshots and tables This is only calculated when + * {@link #getSnapshotStats(Configuration, SnapshotProtos.SnapshotDescription, Map)} is + * called with a non-null Map */ public long getNonSharedArchivedStoreFilesSize() { return nonSharedHfilesArchiveSize.get(); @@ -255,11 +253,11 @@ public long getLogsSize() { return logSize.get(); } - /** Check if for a give file in archive, if there are other snapshots/tables still - * reference it. + /** + * Check if for a give file in archive, if there are other snapshots/tables still reference it. * @param filePath file path in archive - * @param snapshotFilesMap a map for store files in snapshots about how many snapshots refer - * to it. + * @param snapshotFilesMap a map for store files in snapshots about how many snapshots refer to + * it. * @return true or false */ private boolean isArchivedFileStillReferenced(final Path filePath, @@ -293,10 +291,10 @@ private boolean isArchivedFileStillReferenced(final Path filePath, * @return the store file information */ FileInfo addStoreFile(final RegionInfo region, final String family, - final SnapshotRegionManifest.StoreFile storeFile, - final Map filesMap) throws IOException { - HFileLink link = HFileLink.build(conf, snapshotTable, region.getEncodedName(), - family, storeFile.getName()); + final SnapshotRegionManifest.StoreFile storeFile, final Map filesMap) + throws IOException { + HFileLink link = HFileLink.build(conf, snapshotTable, region.getEncodedName(), family, + storeFile.getName()); boolean isCorrupted = false; boolean inArchive = false; long size = -1; @@ -309,8 +307,8 @@ FileInfo addStoreFile(final RegionInfo region, final String family, // If store file is not shared with other snapshots and tables, // increase nonSharedHfilesArchiveSize - if ((filesMap != null) && - !isArchivedFileStillReferenced(link.getArchivePath(), filesMap)) { + if ((filesMap != null) + && !isArchivedFileStillReferenced(link.getArchivePath(), filesMap)) { nonSharedHfilesArchiveSize.addAndGet(size); } } else if (fs.exists(link.getMobPath())) { @@ -376,11 +374,10 @@ public int doWork() throws IOException, InterruptedException { if (listSnapshots) { SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); System.out.printf("%-20s | %-20s | %-20s | %s%n", "SNAPSHOT", "CREATION TIME", "TTL IN SEC", - "TABLE NAME"); - for (SnapshotDescription desc: getSnapshotList(conf)) { + "TABLE NAME"); + for (SnapshotDescription desc : getSnapshotList(conf)) { System.out.printf("%-20s | %20s | %20s | %s%n", desc.getName(), - df.format(new Date(desc.getCreationTime())), desc.getTtl(), - desc.getTableNameAsString()); + df.format(new Date(desc.getCreationTime())), desc.getTtl(), desc.getTableNameAsString()); } return 0; } @@ -449,8 +446,8 @@ private void printSchema() { } /** - * Collect the hfiles and logs statistics of the snapshot and - * dump the file list if requested and the collected information. + * Collect the hfiles and logs statistics of the snapshot and dump the file list if requested and + * the collected information. */ private void printFiles(final boolean showFiles, final boolean showStats) throws IOException { if (showFiles) { @@ -459,13 +456,13 @@ private void printFiles(final boolean showFiles, final boolean showStats) throws } // Collect information about hfiles and logs in the snapshot - final SnapshotProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription(); + final SnapshotProtos.SnapshotDescription snapshotDesc = + snapshotManifest.getSnapshotDescription(); final String table = snapshotDesc.getTable(); final SnapshotDescription desc = ProtobufUtil.createSnapshotDesc(snapshotDesc); final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, desc); SnapshotReferenceUtil.concurrentVisitReferencedFiles(getConf(), fs, snapshotManifest, - "SnapshotInfo", - new SnapshotReferenceUtil.SnapshotVisitor() { + "SnapshotInfo", new SnapshotReferenceUtil.SnapshotVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { @@ -475,12 +472,12 @@ public void storeFile(final RegionInfo regionInfo, final String family, if (showFiles) { String state = info.getStateToString(); System.out.printf("%8s %s/%s/%s/%s %s%n", - (info.isMissing() ? "-" : fileSizeToString(info.getSize())), - table, regionInfo.getEncodedName(), family, storeFile.getName(), + (info.isMissing() ? "-" : fileSizeToString(info.getSize())), table, + regionInfo.getEncodedName(), family, storeFile.getName(), state == null ? "" : "(" + state + ")"); } } - }); + }); // Dump the stats System.out.println(); @@ -494,18 +491,15 @@ public void storeFile(final RegionInfo regionInfo, final String family, } if (showStats) { - System.out.printf("%d HFiles (%d in archive, %d in mob storage), total size %s " + - "(%.2f%% %s shared with the source table, %.2f%% %s in mob dir)%n", + System.out.printf( + "%d HFiles (%d in archive, %d in mob storage), total size %s " + + "(%.2f%% %s shared with the source table, %.2f%% %s in mob dir)%n", stats.getStoreFilesCount(), stats.getArchivedStoreFilesCount(), - stats.getMobStoreFilesCount(), - fileSizeToString(stats.getStoreFilesSize()), - stats.getSharedStoreFilePercentage(), - fileSizeToString(stats.getSharedStoreFilesSize()), - stats.getMobStoreFilePercentage(), - fileSizeToString(stats.getMobStoreFilesSize()) - ); - System.out.printf("%d Logs, total size %s%n", - stats.getLogsCount(), fileSizeToString(stats.getLogsSize())); + stats.getMobStoreFilesCount(), fileSizeToString(stats.getStoreFilesSize()), + stats.getSharedStoreFilePercentage(), fileSizeToString(stats.getSharedStoreFilesSize()), + stats.getMobStoreFilePercentage(), fileSizeToString(stats.getMobStoreFilesSize())); + System.out.printf("%d Logs, total size %s%n", stats.getLogsCount(), + fileSizeToString(stats.getLogsSize())); System.out.println(); } } @@ -529,8 +523,8 @@ protected void addOptions() { protected void processOptions(CommandLine cmd) { snapshotName = cmd.getOptionValue(Options.SNAPSHOT.getLongOpt()); showFiles = cmd.hasOption(Options.FILES.getLongOpt()); - showStats = cmd.hasOption(Options.FILES.getLongOpt()) - || cmd.hasOption(Options.STATS.getLongOpt()); + showStats = + cmd.hasOption(Options.FILES.getLongOpt()) || cmd.hasOption(Options.STATS.getLongOpt()); showSchema = cmd.hasOption(Options.SCHEMA.getLongOpt()); listSnapshots = cmd.hasOption(Options.LIST_SNAPSHOTS.getLongOpt()); printSizeInBytes = cmd.hasOption(Options.SIZE_IN_BYTES.getLongOpt()); @@ -555,34 +549,35 @@ protected void printUsage() { public static SnapshotStats getSnapshotStats(final Configuration conf, final SnapshotDescription snapshot) throws IOException { SnapshotProtos.SnapshotDescription snapshotDesc = - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); return getSnapshotStats(conf, snapshotDesc, null); } /** * Returns the snapshot stats * @param conf the {@link Configuration} to use - * @param snapshotDesc HBaseProtos.SnapshotDescription to get stats from + * @param snapshotDesc HBaseProtos.SnapshotDescription to get stats from * @param filesMap {@link Map} store files map for all snapshots, it may be null * @return the snapshot stats */ public static SnapshotStats getSnapshotStats(final Configuration conf, - final SnapshotProtos.SnapshotDescription snapshotDesc, - final Map filesMap) throws IOException { + final SnapshotProtos.SnapshotDescription snapshotDesc, final Map filesMap) + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = FileSystem.get(rootDir.toUri(), conf); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc, rootDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); final SnapshotStats stats = new SnapshotStats(conf, fs, snapshotDesc); SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest, - "SnapshotsStatsAggregation", new SnapshotReferenceUtil.SnapshotVisitor() { - @Override - public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { - if (!storeFile.hasReference()) { - stats.addStoreFile(regionInfo, family, storeFile, filesMap); - } - }}); + "SnapshotsStatsAggregation", new SnapshotReferenceUtil.SnapshotVisitor() { + @Override + public void storeFile(final RegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + if (!storeFile.hasReference()) { + stats.addStoreFile(regionInfo, family, storeFile, filesMap); + } + } + }); return stats; } @@ -597,9 +592,9 @@ public static List getSnapshotList(final Configuration conf FileSystem fs = FileSystem.get(rootDir.toUri(), conf); Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir); FileStatus[] snapshots = fs.listStatus(snapshotDir, - new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); + new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); List snapshotLists = new ArrayList<>(snapshots.length); - for (FileStatus snapshotDirStat: snapshots) { + for (FileStatus snapshotDirStat : snapshots) { SnapshotProtos.SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()); snapshotLists.add(ProtobufUtil.createSnapshotDesc(snapshotDesc)); @@ -619,9 +614,8 @@ public static List getSnapshotList(final Configuration conf */ private static void getSnapshotFilesMap(final Configuration conf, final SnapshotDescription snapshot, final ExecutorService exec, - final ConcurrentHashMap filesMap, - final AtomicLong uniqueHFilesArchiveSize, final AtomicLong uniqueHFilesSize, - final AtomicLong uniqueHFilesMobSize) throws IOException { + final ConcurrentHashMap filesMap, final AtomicLong uniqueHFilesArchiveSize, + final AtomicLong uniqueHFilesSize, final AtomicLong uniqueHFilesMobSize) throws IOException { SnapshotProtos.SnapshotDescription snapshotDesc = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); Path rootDir = CommonFSUtils.getRootDir(conf); @@ -630,44 +624,45 @@ private static void getSnapshotFilesMap(final Configuration conf, Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc, rootDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest, exec, - new SnapshotReferenceUtil.SnapshotVisitor() { - @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { - if (!storeFile.hasReference()) { - HFileLink link = HFileLink.build(conf, snapshot.getTableName(), - regionInfo.getEncodedName(), family, storeFile.getName()); - long size; - Integer count; - Path p; - AtomicLong al; - int c = 0; - - if (fs.exists(link.getArchivePath())) { - p = link.getArchivePath(); - al = uniqueHFilesArchiveSize; - size = fs.getFileStatus(p).getLen(); - } else if (fs.exists(link.getMobPath())) { - p = link.getMobPath(); - al = uniqueHFilesMobSize; - size = fs.getFileStatus(p).getLen(); - } else { - p = link.getOriginPath(); - al = uniqueHFilesSize; - size = link.getFileStatus(fs).getLen(); - } - - // If it has been counted, do not double count - count = filesMap.get(p); - if (count != null) { - c = count.intValue(); - } else { - al.addAndGet(size); - } - - filesMap.put(p, ++c); + new SnapshotReferenceUtil.SnapshotVisitor() { + @Override + public void storeFile(final RegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + if (!storeFile.hasReference()) { + HFileLink link = HFileLink.build(conf, snapshot.getTableName(), + regionInfo.getEncodedName(), family, storeFile.getName()); + long size; + Integer count; + Path p; + AtomicLong al; + int c = 0; + + if (fs.exists(link.getArchivePath())) { + p = link.getArchivePath(); + al = uniqueHFilesArchiveSize; + size = fs.getFileStatus(p).getLen(); + } else if (fs.exists(link.getMobPath())) { + p = link.getMobPath(); + al = uniqueHFilesMobSize; + size = fs.getFileStatus(p).getLen(); + } else { + p = link.getOriginPath(); + al = uniqueHFilesSize; + size = link.getFileStatus(fs).getLen(); } + + // If it has been counted, do not double count + count = filesMap.get(p); + if (count != null) { + c = count.intValue(); + } else { + al.addAndGet(size); + } + + filesMap.put(p, ++c); } - }); + } + }); } /** @@ -683,7 +678,6 @@ public static Map getSnapshotsFilesMap(final Configuration conf, AtomicLong uniqueHFilesMobSize) throws IOException { List snapshotList = getSnapshotList(conf); - if (snapshotList.isEmpty()) { return Collections.emptyMap(); } @@ -695,7 +689,7 @@ public static Map getSnapshotsFilesMap(final Configuration conf, try { for (final SnapshotDescription snapshot : snapshotList) { getSnapshotFilesMap(conf, snapshot, exec, fileMap, uniqueHFilesArchiveSize, - uniqueHFilesSize, uniqueHFilesMobSize); + uniqueHFilesSize, uniqueHFilesMobSize); } } finally { exec.shutdown(); @@ -704,7 +698,6 @@ public static Map getSnapshotsFilesMap(final Configuration conf, return fileMap; } - public static void main(String[] args) { new SnapshotInfo().doStaticMain(args); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index f154aa92cd6e..cc05c4b7fba1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.FileNotFoundException; @@ -66,11 +65,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * Utility class to help read/write the Snapshot Manifest. - * - * The snapshot format is transparent for the users of this class, - * once the snapshot is written, it will never be modified. - * On open() the snapshot will be loaded to the current in-memory format. + * Utility class to help read/write the Snapshot Manifest. The snapshot format is transparent for + * the users of this class, once the snapshot is written, it will never be modified. On open() the + * snapshot will be loaded to the current in-memory format. */ @InterfaceAudience.Private public final class SnapshotManifest { @@ -93,18 +90,17 @@ public final class SnapshotManifest { private final MonitoredTask statusTask; /** - * * @param conf configuration file for HBase setup * @param rootFs root filesystem containing HFiles * @param workingDir file path of where the manifest should be located * @param desc description of snapshot being taken * @param monitor monitor of foreign exceptions - * @throws IOException if the working directory file system cannot be - * determined from the config file + * @throws IOException if the working directory file system cannot be determined from the config + * file */ - private SnapshotManifest(final Configuration conf, final FileSystem rootFs, - final Path workingDir, final SnapshotDescription desc, - final ForeignExceptionSnare monitor, final MonitoredTask statusTask) throws IOException { + private SnapshotManifest(final Configuration conf, final FileSystem rootFs, final Path workingDir, + final SnapshotDescription desc, final ForeignExceptionSnare monitor, + final MonitoredTask statusTask) throws IOException { this.monitor = monitor; this.desc = desc; this.workingDir = workingDir; @@ -116,21 +112,15 @@ private SnapshotManifest(final Configuration conf, final FileSystem rootFs, } /** - * Return a SnapshotManifest instance, used for writing a snapshot. - * - * There are two usage pattern: - * - The Master will create a manifest, add the descriptor, offline regions - * and consolidate the snapshot by writing all the pending stuff on-disk. - * manifest = SnapshotManifest.create(...) - * manifest.addRegion(tableDir, hri) - * manifest.consolidate() - * - The RegionServer will create a single region manifest - * manifest = SnapshotManifest.create(...) - * manifest.addRegion(region) + * Return a SnapshotManifest instance, used for writing a snapshot. There are two usage pattern: - + * The Master will create a manifest, add the descriptor, offline regions and consolidate the + * snapshot by writing all the pending stuff on-disk. manifest = SnapshotManifest.create(...) + * manifest.addRegion(tableDir, hri) manifest.consolidate() - The RegionServer will create a + * single region manifest manifest = SnapshotManifest.create(...) manifest.addRegion(region) */ public static SnapshotManifest create(final Configuration conf, final FileSystem fs, - final Path workingDir, final SnapshotDescription desc, - final ForeignExceptionSnare monitor) throws IOException { + final Path workingDir, final SnapshotDescription desc, final ForeignExceptionSnare monitor) + throws IOException { return create(conf, fs, workingDir, desc, monitor, null); } @@ -144,12 +134,10 @@ public static SnapshotManifest create(final Configuration conf, final FileSystem /** * Return a SnapshotManifest instance with the information already loaded in-memory. - * SnapshotManifest manifest = SnapshotManifest.open(...) - * TableDescriptor htd = manifest.getDescriptor() - * for (SnapshotRegionManifest regionManifest: manifest.getRegionManifests()) - * hri = regionManifest.getRegionInfo() - * for (regionManifest.getFamilyFiles()) - * ... + * SnapshotManifest manifest = SnapshotManifest.open(...) TableDescriptor htd = + * manifest.getDescriptor() for (SnapshotRegionManifest regionManifest: + * manifest.getRegionManifests()) hri = regionManifest.getRegionInfo() for + * (regionManifest.getFamilyFiles()) ... */ public static SnapshotManifest open(final Configuration conf, final FileSystem fs, final Path workingDir, final SnapshotDescription desc) throws IOException { @@ -158,7 +146,6 @@ public static SnapshotManifest open(final Configuration conf, final FileSystem f return manifest; } - /** * Add the table descriptor to the snapshot manifest */ @@ -168,13 +155,15 @@ public void addTableDescriptor(final TableDescriptor htd) throws IOException { interface RegionVisitor { TRegion regionOpen(final RegionInfo regionInfo) throws IOException; + void regionClose(final TRegion region) throws IOException; TFamily familyOpen(final TRegion region, final byte[] familyName) throws IOException; + void familyClose(final TRegion region, final TFamily family) throws IOException; void storeFile(final TRegion region, final TFamily family, final StoreFileInfo storeFile) - throws IOException; + throws IOException; } private RegionVisitor createRegionVisitor(final SnapshotDescription desc) throws IOException { @@ -184,8 +173,8 @@ private RegionVisitor createRegionVisitor(final SnapshotDescription desc) throws case SnapshotManifestV2.DESCRIPTOR_VERSION: return new SnapshotManifestV2.ManifestBuilder(conf, rootFs, workingDir); default: - throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), - ProtobufUtil.createSnapshotDesc(desc)); + throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), + ProtobufUtil.createSnapshotDesc(desc)); } } @@ -256,13 +245,13 @@ protected void addRegion(final HRegion region, RegionVisitor visitor) throws IOE for (HStore store : region.getStores()) { // 2.1. build the snapshot reference for the store - Object familyData = visitor.familyOpen(regionData, - store.getColumnFamilyDescriptor().getName()); + Object familyData = + visitor.familyOpen(regionData, store.getColumnFamilyDescriptor().getName()); monitor.rethrowException(); List storeFiles = new ArrayList<>(store.getStorefiles()); if (LOG.isDebugEnabled()) { - LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); + LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); } // 2.2. iterate through all the store's files and create "references". @@ -271,8 +260,8 @@ protected void addRegion(final HRegion region, RegionVisitor visitor) throws IOE monitor.rethrowException(); // create "reference" to this store file. - LOG.debug("Adding reference for file (" + (i+1) + "/" + sz + "): " + storeFile.getPath() + - " for snapshot=" + snapshotName); + LOG.debug("Adding reference for file (" + (i + 1) + "/" + sz + "): " + storeFile.getPath() + + " for snapshot=" + snapshotName); visitor.storeFile(regionData, familyData, storeFile.getFileInfo()); } visitor.familyClose(regionData, familyData); @@ -281,8 +270,8 @@ protected void addRegion(final HRegion region, RegionVisitor visitor) throws IOE } /** - * Creates a 'manifest' for the specified region, by reading directly from the disk. - * This is used by the "offline snapshot" when the table is disabled. + * Creates a 'manifest' for the specified region, by reading directly from the disk. This is used + * by the "offline snapshot" when the table is disabled. */ public void addRegion(final Path tableDir, final RegionInfo regionInfo) throws IOException { // Get the ManifestBuilder/RegionVisitor @@ -293,7 +282,7 @@ public void addRegion(final Path tableDir, final RegionInfo regionInfo) throws I } protected void addRegion(Path tableDir, RegionInfo regionInfo, RegionVisitor visitor) - throws IOException { + throws IOException { boolean isMobRegion = MobUtils.isMobRegionInfo(regionInfo); try { Path baseDir = tableDir; @@ -302,7 +291,7 @@ protected void addRegion(Path tableDir, RegionInfo regionInfo, RegionVisitor vis baseDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), regionInfo.getTable()); } HRegionFileSystem regionFs = - HRegionFileSystem.openRegionFromFileSystem(conf, rootFs, baseDir, regionInfo, true); + HRegionFileSystem.openRegionFromFileSystem(conf, rootFs, baseDir, regionInfo, true); monitor.rethrowException(); // 1. dump region meta info into the snapshot directory @@ -362,11 +351,11 @@ private void addReferenceFiles(RegionVisitor visitor, Object regionData, Object int i = 0; int sz = storeFiles.size(); - for (StoreFileInfo storeFile: storeFiles) { + for (StoreFileInfo storeFile : storeFiles) { monitor.rethrowException(); - LOG.debug(String.format("Adding reference for %s (%d/%d): %s", - fileType, ++i, sz, storeFile.getPath())); + LOG.debug(String.format("Adding reference for %s (%d/%d): %s", fileType, ++i, sz, + storeFile.getPath())); // create "reference" to this store file. visitor.storeFile(regionData, familyData, storeFile); @@ -374,11 +363,9 @@ private void addReferenceFiles(RegionVisitor visitor, Object regionData, Object } /** - * Load the information in the SnapshotManifest. Called by SnapshotManifest.open() - * - * If the format is v2 and there is no data-manifest, means that we are loading an - * in-progress snapshot. Since we support rolling-upgrades, we loook for v1 and v2 - * regions format. + * Load the information in the SnapshotManifest. Called by SnapshotManifest.open() If the format + * is v2 and there is no data-manifest, means that we are loading an in-progress snapshot. Since + * we support rolling-upgrades, we loook for v1 and v2 regions format. */ private void load() throws IOException { switch (getSnapshotFormat(desc)) { @@ -387,7 +374,7 @@ private void load() throws IOException { ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); try { this.regionManifests = - SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc); + SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc); } finally { tpool.shutdown(); } @@ -404,13 +391,13 @@ private void load() throws IOException { List v1Regions, v2Regions; ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); try { - v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, - workingDir, desc); - v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs, - workingDir, desc, manifestSizeLimit); + v1Regions = + SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc); + v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs, workingDir, + desc, manifestSizeLimit); } catch (InvalidProtocolBufferException e) { - throw new CorruptedSnapshotException("unable to parse region manifest " + - e.getMessage(), e); + throw new CorruptedSnapshotException( + "unable to parse region manifest " + e.getMessage(), e); } finally { tpool.shutdown(); } @@ -427,8 +414,8 @@ private void load() throws IOException { break; } default: - throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), - ProtobufUtil.createSnapshotDesc(desc)); + throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), + ProtobufUtil.createSnapshotDesc(desc)); } } @@ -467,14 +454,14 @@ private void setStatusMsg(String msg) { } /** - * Get all the Region Manifest from the snapshot. - * This is an helper to get a map with the region encoded name + * Get all the Region Manifest from the snapshot. This is an helper to get a map with the region + * encoded name */ public Map getRegionManifestsMap() { if (regionManifests == null || regionManifests.isEmpty()) return null; HashMap regionsMap = new HashMap<>(regionManifests.size()); - for (SnapshotRegionManifest manifest: regionManifests) { + for (SnapshotRegionManifest manifest : regionManifests) { String regionName = getRegionNameFromManifest(manifest); regionsMap.put(regionName, manifest); } @@ -486,7 +473,7 @@ public void consolidate() throws IOException { LOG.info("Using old Snapshot Format"); // write a copy of descriptor to the snapshot directory FSTableDescriptors.createTableDescriptorForTableDirectory(workingDirFs, workingDir, htd, - false); + false); } else { LOG.debug("Convert to Single Snapshot Manifest for {}", this.desc.getName()); convertToV2SingleManifest(); @@ -494,8 +481,8 @@ public void consolidate() throws IOException { } /* - * In case of rolling-upgrade, we try to read all the formats and build - * the snapshot with the latest format. + * In case of rolling-upgrade, we try to read all the formats and build the snapshot with the + * latest format. */ private void convertToV2SingleManifest() throws IOException { // Try to load v1 and v2 regions @@ -503,10 +490,10 @@ private void convertToV2SingleManifest() throws IOException { ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); setStatusMsg("Loading Region manifests for " + this.desc.getName()); try { - v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs, - workingDir, desc); - v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, workingDirFs, - workingDir, desc, manifestSizeLimit); + v1Regions = + SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs, workingDir, desc); + v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, workingDirFs, workingDir, + desc, manifestSizeLimit); SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder(); dataManifestBuilder.setTableSchema(ProtobufUtil.toTableSchema(htd)); @@ -535,7 +522,7 @@ private void convertToV2SingleManifest() throws IOException { int totalDeletes = 0; ExecutorCompletionService completionService = new ExecutorCompletionService<>(tpool); if (v1Regions != null) { - for (SnapshotRegionManifest regionManifest: v1Regions) { + for (SnapshotRegionManifest regionManifest : v1Regions) { ++totalDeletes; completionService.submit(() -> { SnapshotManifestV1.deleteRegionManifest(workingDirFs, workingDir, regionManifest); @@ -544,7 +531,7 @@ private void convertToV2SingleManifest() throws IOException { } } if (v2Regions != null) { - for (SnapshotRegionManifest regionManifest: v2Regions) { + for (SnapshotRegionManifest regionManifest : v2Regions) { ++totalDeletes; completionService.submit(() -> { SnapshotManifestV2.deleteRegionManifest(workingDirFs, workingDir, regionManifest); @@ -570,9 +557,9 @@ private void convertToV2SingleManifest() throws IOException { /* * Write the SnapshotDataManifest file */ - private void writeDataManifest(final SnapshotDataManifest manifest) - throws IOException { - try (FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME))) { + private void writeDataManifest(final SnapshotDataManifest manifest) throws IOException { + try ( + FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME))) { manifest.writeTo(stream); } } @@ -600,7 +587,7 @@ public static ThreadPoolExecutor createExecutor(final Configuration conf, final int maxThreads = conf.getInt("hbase.snapshot.thread.pool.max", 8); return Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, new ThreadFactoryBuilder().setNameFormat(name + "-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } /** @@ -608,9 +595,9 @@ public static ThreadPoolExecutor createExecutor(final Configuration conf, final */ static String getRegionNameFromManifest(final SnapshotRegionManifest manifest) { byte[] regionName = RegionInfo.createRegionName( - ProtobufUtil.toTableName(manifest.getRegionInfo().getTableName()), - manifest.getRegionInfo().getStartKey().toByteArray(), - manifest.getRegionInfo().getRegionId(), true); + ProtobufUtil.toTableName(manifest.getRegionInfo().getTableName()), + manifest.getRegionInfo().getStartKey().toByteArray(), manifest.getRegionInfo().getRegionId(), + true); return RegionInfo.encodeRegionName(regionName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java index b1eca35febf2..d7fcf8299da2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.IOException; @@ -48,13 +47,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. - * - * Snapshot v1 layout format - * - Each region in the table is represented by a directory with the .hregioninfo file - * /snapshotName/regionName/.hregioninfo - * - Each file present in the table is represented by an empty file - * /snapshotName/regionName/familyName/fileName + * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. Snapshot v1 layout format - Each region in the + * table is represented by a directory with the .hregioninfo file + * /snapshotName/regionName/.hregioninfo - Each file present in the table is represented by an empty + * file /snapshotName/regionName/familyName/fileName */ @InterfaceAudience.Private public final class SnapshotManifestV1 { @@ -65,8 +61,7 @@ public final class SnapshotManifestV1 { private SnapshotManifestV1() { } - static class ManifestBuilder implements SnapshotManifest.RegionVisitor< - HRegionFileSystem, Path> { + static class ManifestBuilder implements SnapshotManifest.RegionVisitor { private final Configuration conf; private final Path snapshotDir; private final FileSystem rootFs; @@ -82,8 +77,8 @@ public ManifestBuilder(final Configuration conf, final FileSystem rootFs, @Override public HRegionFileSystem regionOpen(final RegionInfo regionInfo) throws IOException { - HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf, - workingDirFs, snapshotDir, regionInfo); + HRegionFileSystem snapshotRegionFs = + HRegionFileSystem.createRegionOnFileSystem(conf, workingDirFs, snapshotDir, regionInfo); return snapshotRegionFs; } @@ -110,8 +105,8 @@ public void storeFile(final HRegionFileSystem region, final Path familyDir, // write the Reference object to the snapshot storeFile.getReference().write(workingDirFs, referenceFile); } else { - // create "reference" to this store file. It is intentionally an empty file -- all - // necessary information is captured by its fs location and filename. This allows us to + // create "reference" to this store file. It is intentionally an empty file -- all + // necessary information is captured by its fs location and filename. This allows us to // only figure out what needs to be done via a single nn operation (instead of having to // open and read the files as well). success = workingDirFs.createNewFile(referenceFile); @@ -123,18 +118,18 @@ public void storeFile(final HRegionFileSystem region, final Path familyDir, } static List loadRegionManifests(final Configuration conf, - final Executor executor,final FileSystem fs, final Path snapshotDir, + final Executor executor, final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException { FileStatus[] regions = - CommonFSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs)); + CommonFSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { LOG.debug("No regions under directory:" + snapshotDir); return null; } final ExecutorCompletionService completionService = - new ExecutorCompletionService<>(executor); - for (final FileStatus region: regions) { + new ExecutorCompletionService<>(executor); + for (final FileStatus region : regions) { completionService.submit(new Callable() { @Override public SnapshotRegionManifest call() throws IOException { @@ -163,10 +158,10 @@ static void deleteRegionManifest(final FileSystem fs, final Path snapshotDir, fs.delete(new Path(snapshotDir, regionName), true); } - static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, - final FileSystem fs, final Path tableDir, final RegionInfo regionInfo) throws IOException { - HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, - tableDir, regionInfo, true); + static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, final FileSystem fs, + final Path tableDir, final RegionInfo regionInfo) throws IOException { + HRegionFileSystem regionFs = + HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, true); SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder(); // 1. dump region meta info into the snapshot directory @@ -183,7 +178,7 @@ static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, // files/batch, far more than the number of store files under a single column family. Collection familyNames = regionFs.getFamilies(); if (familyNames != null) { - for (String familyName: familyNames) { + for (String familyName : familyNames) { Collection storeFiles = regionFs.getStoreFiles(familyName, false); if (storeFiles == null) { LOG.debug("No files under family: " + familyName); @@ -192,21 +187,21 @@ static SnapshotRegionManifest buildManifestFromDisk(final Configuration conf, // 2.1. build the snapshot reference for the store SnapshotRegionManifest.FamilyFiles.Builder family = - SnapshotRegionManifest.FamilyFiles.newBuilder(); + SnapshotRegionManifest.FamilyFiles.newBuilder(); family.setFamilyName(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(familyName))); if (LOG.isDebugEnabled()) { - LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); + LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); } // 2.2. iterate through all the store's files and create "references". int i = 0; int sz = storeFiles.size(); - for (StoreFileInfo storeFile: storeFiles) { + for (StoreFileInfo storeFile : storeFiles) { // create "reference" to this store file. - LOG.debug("Adding reference for file ("+ (++i) +"/" + sz + "): " + storeFile.getPath()); + LOG.debug("Adding reference for file (" + (++i) + "/" + sz + "): " + storeFile.getPath()); SnapshotRegionManifest.StoreFile.Builder sfManifest = - SnapshotRegionManifest.StoreFile.newBuilder(); + SnapshotRegionManifest.StoreFile.newBuilder(); sfManifest.setName(storeFile.getPath().getName()); family.addStoreFiles(sfManifest.build()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java index ae914f69b5cc..efac80c244d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.IOException; @@ -49,12 +48,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. - * - * Snapshot v2 layout format - * - Single Manifest file containing all the information of regions - * - In the online-snapshot case each region will write a "region manifest" - * /snapshotName/manifest.regionName + * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. Snapshot v2 layout format - Single Manifest + * file containing all the information of regions - In the online-snapshot case each region will + * write a "region manifest" /snapshotName/manifest.regionName */ @InterfaceAudience.Private public final class SnapshotManifestV2 { @@ -64,10 +60,11 @@ public final class SnapshotManifestV2 { public static final String SNAPSHOT_MANIFEST_PREFIX = "region-manifest."; - private SnapshotManifestV2() {} + private SnapshotManifestV2() { + } - static class ManifestBuilder implements SnapshotManifest.RegionVisitor< - SnapshotRegionManifest.Builder, SnapshotRegionManifest.FamilyFiles.Builder> { + static class ManifestBuilder implements + SnapshotManifest.RegionVisitor { private final Configuration conf; private final Path snapshotDir; private final FileSystem rootFs; @@ -93,8 +90,8 @@ public void regionClose(final SnapshotRegionManifest.Builder region) throws IOEx FileSystem workingDirFs = snapshotDir.getFileSystem(this.conf); if (workingDirFs.exists(snapshotDir)) { SnapshotRegionManifest manifest = region.build(); - try (FSDataOutputStream stream = workingDirFs.create( - getRegionManifestPath(snapshotDir, manifest))) { + try (FSDataOutputStream stream = + workingDirFs.create(getRegionManifestPath(snapshotDir, manifest))) { manifest.writeTo(stream); } } else { @@ -103,8 +100,8 @@ public void regionClose(final SnapshotRegionManifest.Builder region) throws IOEx } @Override - public SnapshotRegionManifest.FamilyFiles.Builder familyOpen( - final SnapshotRegionManifest.Builder region, final byte[] familyName) { + public SnapshotRegionManifest.FamilyFiles.Builder + familyOpen(final SnapshotRegionManifest.Builder region, final byte[] familyName) { SnapshotRegionManifest.FamilyFiles.Builder family = SnapshotRegionManifest.FamilyFiles.newBuilder(); family.setFamilyName(UnsafeByteOperations.unsafeWrap(familyName)); @@ -122,7 +119,7 @@ public void storeFile(final SnapshotRegionManifest.Builder region, final SnapshotRegionManifest.FamilyFiles.Builder family, final StoreFileInfo storeFile) throws IOException { SnapshotRegionManifest.StoreFile.Builder sfManifest = - SnapshotRegionManifest.StoreFile.newBuilder(); + SnapshotRegionManifest.StoreFile.newBuilder(); sfManifest.setName(storeFile.getPath().getName()); if (storeFile.isReference()) { sfManifest.setReference(storeFile.getReference().convert()); @@ -149,8 +146,8 @@ public boolean accept(Path path) { if (manifestFiles == null || manifestFiles.length == 0) return null; final ExecutorCompletionService completionService = - new ExecutorCompletionService<>(executor); - for (final FileStatus st: manifestFiles) { + new ExecutorCompletionService<>(executor); + for (final FileStatus st : manifestFiles) { completionService.submit(new Callable() { @Override public SnapshotRegionManifest call() throws IOException { @@ -173,8 +170,8 @@ public SnapshotRegionManifest call() throws IOException { } catch (ExecutionException e) { Throwable t = e.getCause(); - if(t instanceof InvalidProtocolBufferException) { - throw (InvalidProtocolBufferException)t; + if (t instanceof InvalidProtocolBufferException) { + throw (InvalidProtocolBufferException) t; } else { throw new IOException("ExecutionException", e.getCause()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java index 2e8c0dfdff96..421eb2567235 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.FileNotFoundException; @@ -28,7 +27,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -42,6 +40,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; @@ -55,7 +54,7 @@ public final class SnapshotReferenceUtil { public interface StoreFileVisitor { void storeFile(final RegionInfo regionInfo, final String familyName, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException; + final SnapshotRegionManifest.StoreFile storeFile) throws IOException; } public interface SnapshotVisitor extends StoreFileVisitor { @@ -67,7 +66,6 @@ private SnapshotReferenceUtil() { /** * Iterate over the snapshot store files - * * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory @@ -75,15 +73,13 @@ private SnapshotReferenceUtil() { * @throws IOException if an error occurred while scanning the directory */ public static void visitReferencedFiles(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotVisitor visitor) - throws IOException { + final Path snapshotDir, final SnapshotVisitor visitor) throws IOException { SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); visitReferencedFiles(conf, fs, snapshotDir, desc, visitor); } /** * Iterate over the snapshot store files, restored.edits and logs - * * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory @@ -97,9 +93,8 @@ public static void visitReferencedFiles(final Configuration conf, final FileSyst visitTableStoreFiles(conf, fs, snapshotDir, desc, visitor); } - /**© - * Iterate over the snapshot store files - * + /** + * © Iterate over the snapshot store files * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory @@ -117,14 +112,13 @@ static void visitTableStoreFiles(final Configuration conf, final FileSystem fs, return; } - for (SnapshotRegionManifest regionManifest: regionManifests) { + for (SnapshotRegionManifest regionManifest : regionManifests) { visitRegionStoreFiles(regionManifest, visitor); } } /** * Iterate over the snapshot store files in the specified region - * * @param manifest snapshot manifest to inspect * @param visitor callback object to get the store files * @throws IOException if an error occurred while scanning the directory @@ -132,9 +126,9 @@ static void visitTableStoreFiles(final Configuration conf, final FileSystem fs, public static void visitRegionStoreFiles(final SnapshotRegionManifest manifest, final StoreFileVisitor visitor) throws IOException { RegionInfo regionInfo = ProtobufUtil.toRegionInfo(manifest.getRegionInfo()); - for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + for (SnapshotRegionManifest.FamilyFiles familyFiles : manifest.getFamilyFilesList()) { String familyName = familyFiles.getFamilyName().toStringUtf8(); - for (SnapshotRegionManifest.StoreFile storeFile: familyFiles.getStoreFilesList()) { + for (SnapshotRegionManifest.StoreFile storeFile : familyFiles.getStoreFilesList()) { visitor.storeFile(regionInfo, familyName, storeFile); } } @@ -142,7 +136,6 @@ public static void visitRegionStoreFiles(final SnapshotRegionManifest manifest, /** * Verify the validity of the snapshot - * * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify @@ -158,7 +151,6 @@ public static void verifySnapshot(final Configuration conf, final FileSystem fs, /** * Verify the validity of the snapshot - * * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param manifest snapshot manifest to inspect @@ -179,12 +171,11 @@ public void storeFile(final RegionInfo regionInfo, final String family, } /** - * Verify the validity of the snapshot. - * + * Verify the validity of the snapshot. * @param visitor user-specified store file visitor */ public static void verifySnapshot(final Configuration conf, final FileSystem fs, - final SnapshotManifest manifest, final StoreFileVisitor visitor) throws IOException { + final SnapshotManifest manifest, final StoreFileVisitor visitor) throws IOException { concurrentVisitReferencedFiles(conf, fs, manifest, "VerifySnapshot", visitor); } @@ -224,7 +215,8 @@ public static void concurrentVisitReferencedFiles(final Configuration conf, fina for (final SnapshotRegionManifest regionManifest : regionManifests) { completionService.submit(new Callable() { - @Override public Void call() throws IOException { + @Override + public Void call() throws IOException { visitRegionStoreFiles(regionManifest, visitor); return null; } @@ -248,7 +240,6 @@ public static void concurrentVisitReferencedFiles(final Configuration conf, fina /** * Verify the validity of the snapshot store file - * * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify @@ -291,8 +282,8 @@ public static void verifyStoreFile(final Configuration conf, final FileSystem fs } else if (HFileLink.isHFileLink(fileName)) { linkPath = new Path(family, fileName); } else { - linkPath = new Path(family, HFileLink.createHFileLinkName( - table, regionInfo.getEncodedName(), fileName)); + linkPath = new Path(family, + HFileLink.createHFileLinkName(table, regionInfo.getEncodedName(), fileName)); } // check if the linked file exists (in the archive, or in the table dir) @@ -300,7 +291,7 @@ public static void verifyStoreFile(final Configuration conf, final FileSystem fs if (MobUtils.isMobRegionInfo(regionInfo)) { // for mob region link = HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf), - HFileArchiveUtil.getArchivePath(conf), linkPath); + HFileArchiveUtil.getArchivePath(conf), linkPath); } else { // not mob region link = HFileLink.buildFromHFileLinkPattern(conf, linkPath); @@ -308,25 +299,21 @@ public static void verifyStoreFile(final Configuration conf, final FileSystem fs try { FileStatus fstat = link.getFileStatus(fs); if (storeFile.hasFileSize() && storeFile.getFileSize() != fstat.getLen()) { - String msg = "hfile: " + fileName + " size does not match with the expected one. " + - " found=" + fstat.getLen() + " expected=" + storeFile.getFileSize(); + String msg = "hfile: " + fileName + " size does not match with the expected one. " + + " found=" + fstat.getLen() + " expected=" + storeFile.getFileSize(); LOG.error(msg); - throw new CorruptedSnapshotException(msg, - ProtobufUtil.createSnapshotDesc(snapshot)); + throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot)); } } catch (FileNotFoundException e) { - String msg = "Can't find hfile: " + fileName + " in the real (" + - link.getOriginPath() + ") or archive (" + link.getArchivePath() - + ") directory for the primary table."; + String msg = "Can't find hfile: " + fileName + " in the real (" + link.getOriginPath() + + ") or archive (" + link.getArchivePath() + ") directory for the primary table."; LOG.error(msg); - throw new CorruptedSnapshotException(msg, - ProtobufUtil.createSnapshotDesc(snapshot)); + throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot)); } } /** * Returns the store file names in the snapshot. - * * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory @@ -341,7 +328,6 @@ public static Set getHFileNames(final Configuration conf, final FileSyst /** * Returns the store file names in the snapshot. - * * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory @@ -350,13 +336,12 @@ public static Set getHFileNames(final Configuration conf, final FileSyst * @return the names of hfiles in the specified snaphot */ private static Set getHFileNames(final Configuration conf, final FileSystem fs, - final Path snapshotDir, final SnapshotDescription snapshotDesc) - throws IOException { + final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException { final Set names = new HashSet<>(); visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, new StoreFileVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, - final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { String hfile = storeFile.getName(); if (HFileLink.isHFileLink(hfile)) { names.add(HFileLink.getReferencedHFileName(hfile)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java index a5d4f1e56d36..375e2676b548 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFiles.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public interface BulkLoadHFiles { static final String RETRY_ON_IO_EXCEPTION = "hbase.bulkload.retries.retryOnIOException"; static final String MAX_FILES_PER_REGION_PER_FAMILY = - "hbase.mapreduce.bulkload.max.hfiles.perRegion.perFamily"; + "hbase.mapreduce.bulkload.max.hfiles.perRegion.perFamily"; static final String ASSIGN_SEQ_IDS = "hbase.mapreduce.bulkload.assign.sequenceNumbers"; static final String CREATE_TABLE_CONF_KEY = "create.table"; static final String IGNORE_UNMATCHED_CF_CONF_KEY = "ignore.unmatched.families"; @@ -85,13 +85,12 @@ Map bulkLoad(TableName tableName, Map(), - new ThreadFactoryBuilder().setNameFormat("BulkLoadHFilesTool-%1$d").setDaemon(true).build()); + new LinkedBlockingQueue<>(), new ThreadFactoryBuilder() + .setNameFormat("BulkLoadHFilesTool-%1$d").setDaemon(true).build()); pool.allowCoreThreadTimeOut(true); return pool; } @@ -197,14 +197,14 @@ private static boolean shouldCopyHFileMetaKey(byte[] key) { private static void validateFamiliesInHFiles(TableDescriptor tableDesc, Deque queue, boolean silence) throws IOException { Set familyNames = Arrays.stream(tableDesc.getColumnFamilies()) - .map(ColumnFamilyDescriptor::getNameAsString).collect(Collectors.toSet()); + .map(ColumnFamilyDescriptor::getNameAsString).collect(Collectors.toSet()); List unmatchedFamilies = queue.stream().map(item -> Bytes.toString(item.getFamily())) - .filter(fn -> !familyNames.contains(fn)).distinct().collect(Collectors.toList()); + .filter(fn -> !familyNames.contains(fn)).distinct().collect(Collectors.toList()); if (unmatchedFamilies.size() > 0) { String msg = - "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " + - unmatchedFamilies + "; valid family names of table " + tableDesc.getTableName() + - " are: " + familyNames; + "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: " + + unmatchedFamilies + "; valid family names of table " + tableDesc.getTableName() + + " are: " + familyNames; LOG.error(msg); if (!silence) { throw new IOException(msg); @@ -306,8 +306,8 @@ public void bulkHFile(final byte[] family, final FileStatus hfile) { long length = hfile.getLen(); if (length > conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE)) { - LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + length + - " bytes can be problematic as it may lead to oversplitting."); + LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + length + + " bytes can be problematic as it may lead to oversplitting."); } ret.add(new LoadQueueItem(family, hfile.getPath())); } @@ -492,17 +492,17 @@ protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, return families2Queue; } - private boolean checkHFilesCountPerRegionPerFamily( - final Multimap regionGroups) { + private boolean + checkHFilesCountPerRegionPerFamily(final Multimap regionGroups) { for (Map.Entry> e : regionGroups.asMap().entrySet()) { Map filesMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (LoadQueueItem lqi : e.getValue()) { MutableInt count = filesMap.computeIfAbsent(lqi.getFamily(), k -> new MutableInt()); count.increment(); if (count.intValue() > maxFilesPerRegionPerFamily) { - LOG.error("Trying to load more than " + maxFilesPerRegionPerFamily + - " hfiles to family " + Bytes.toStringBinary(lqi.getFamily()) + - " of region with start key " + Bytes.toStringBinary(e.getKey())); + LOG.error("Trying to load more than " + maxFilesPerRegionPerFamily + " hfiles to family " + + Bytes.toStringBinary(lqi.getFamily()) + " of region with start key " + + Bytes.toStringBinary(e.getKey())); return false; } } @@ -527,7 +527,7 @@ private Pair, Set> groupOrSplitPhase final Multimap regionGroups = Multimaps.synchronizedMultimap(rgs); Set missingHFiles = new HashSet<>(); Pair, Set> pair = - new Pair<>(regionGroups, missingHFiles); + new Pair<>(regionGroups, missingHFiles); // drain LQIs and figure out bulk load groups Set, String>>> splittingFutures = new HashSet<>(); @@ -535,7 +535,7 @@ private Pair, Set> groupOrSplitPhase final LoadQueueItem item = queue.remove(); final Callable, String>> call = - () -> groupOrSplit(conn, tableName, regionGroups, item, startEndKeys); + () -> groupOrSplit(conn, tableName, regionGroups, item, startEndKeys); splittingFutures.add(pool.submit(call)); } // get all the results. All grouping and splitting must finish before @@ -643,14 +643,15 @@ private void checkRegionIndexValid(int idx, List> startEndK + " can't be found in hbase:meta.Please use hbck tool to fix it first."); } else if ((idx == startEndKeys.size() - 1) && !Bytes.equals(startEndKeys.get(idx).getSecond(), HConstants.EMPTY_BYTE_ARRAY)) { - throw new IOException("The last region info for table " + tableName + throw new IOException("The last region info for table " + tableName + " can't be found in hbase:meta.Please use hbck tool to fix it first."); - } else if (idx + 1 < startEndKeys.size() && !(Bytes.compareTo(startEndKeys.get(idx).getSecond(), + } else + if (idx + 1 < startEndKeys.size() && !(Bytes.compareTo(startEndKeys.get(idx).getSecond(), startEndKeys.get(idx + 1).getFirst()) == 0)) { - throw new IOException("The endkey of one region for table " + tableName + throw new IOException("The endkey of one region for table " + tableName + " is not equal to the startkey of the next region in hbase:meta." + "Please use hbck tool to fix it first."); - } + } } /** @@ -676,8 +677,8 @@ CacheConfig.DISABLED, true, getConf())) { return new Pair<>(null, hfilePath.getName()); } - LOG.info("Trying to load hfile=" + hfilePath + " first=" + first.map(Bytes::toStringBinary) + - " last=" + last.map(Bytes::toStringBinary)); + LOG.info("Trying to load hfile=" + hfilePath + " first=" + first.map(Bytes::toStringBinary) + + " last=" + last.map(Bytes::toStringBinary)); if (!first.isPresent() || !last.isPresent()) { assert !first.isPresent() && !last.isPresent(); // TODO what if this is due to a bad HFile? @@ -737,11 +738,10 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, HalfStoreFileReader halfReader = null; StoreFileWriter halfWriter = null; try { - ReaderContext context = new ReaderContextBuilder() - .withFileSystemAndPath(fs, inFile).build(); + ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, inFile).build(); HFileInfo hfile = new HFileInfo(context, conf); - halfReader = new HalfStoreFileReader(context, hfile, cacheConf, reference, - new AtomicInteger(0), conf); + halfReader = + new HalfStoreFileReader(context, hfile, cacheConf, reference, new AtomicInteger(0), conf); hfile.initMetaAndIndex(halfReader.getHFileReader()); Map fileInfo = halfReader.loadFileInfo(); @@ -749,12 +749,12 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Algorithm compression = familyDescriptor.getCompressionType(); BloomType bloomFilterType = familyDescriptor.getBloomFilterType(); HFileContext hFileContext = new HFileContextBuilder().withCompression(compression) - .withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize) - .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true) - .build(); + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize) + .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true) + .build(); halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) - .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); + .withBloomType(bloomFilterType).withFileContext(hFileContext).build(); HFileScanner scanner = halfReader.getScanner(false, false, false); scanner.seekTo(); do { @@ -839,7 +839,7 @@ private void createTable(TableName tableName, Path hfofDir, AsyncAdmin admin) th @Override public ColumnFamilyDescriptorBuilder bulkFamily(byte[] familyName) { ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(familyName); + ColumnFamilyDescriptorBuilder.newBuilder(familyName); familyBuilders.add(builder); return builder; } @@ -849,17 +849,17 @@ public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileSta throws IOException { Path hfile = hfileStatus.getPath(); try (HFile.Reader reader = - HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) { + HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) { if (builder.getCompressionType() != reader.getFileContext().getCompression()) { builder.setCompressionType(reader.getFileContext().getCompression()); - LOG.info("Setting compression " + reader.getFileContext().getCompression().name() + - " for family " + builder.getNameAsString()); + LOG.info("Setting compression " + reader.getFileContext().getCompression().name() + + " for family " + builder.getNameAsString()); } byte[] first = reader.getFirstRowKey().get(); byte[] last = reader.getLastRowKey().get(); - LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + - Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); + LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); // To eventually infer start key-end key boundaries Integer value = map.getOrDefault(first, 0); @@ -874,7 +874,7 @@ public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileSta byte[][] keys = inferBoundaries(map); TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName); familyBuilders.stream().map(ColumnFamilyDescriptorBuilder::build) - .forEachOrdered(tdBuilder::setColumnFamily); + .forEachOrdered(tdBuilder::setColumnFamily); FutureUtils.get(admin.createTable(tdBuilder.build(), keys)); LOG.info("Table " + tableName + " is available!!"); @@ -894,17 +894,17 @@ private Map performBulkLoad(AsyncClusterConnection co while (!queue.isEmpty()) { // need to reload split keys each iteration. final List> startEndKeys = - FutureUtils.get(conn.getRegionLocator(tableName).getStartEndKeys()); + FutureUtils.get(conn.getRegionLocator(tableName).getStartEndKeys()); if (count != 0) { - LOG.info("Split occurred while grouping HFiles, retry attempt " + count + " with " + - queue.size() + " files remaining to group or split"); + LOG.info("Split occurred while grouping HFiles, retry attempt " + count + " with " + + queue.size() + " files remaining to group or split"); } int maxRetries = getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10); maxRetries = Math.max(maxRetries, startEndKeys.size() + 1); if (maxRetries != 0 && count >= maxRetries) { throw new IOException( - "Retry attempted " + count + " times without completing, bailing out"); + "Retry attempted " + count + " times without completing, bailing out"); } count++; @@ -914,8 +914,8 @@ private Map performBulkLoad(AsyncClusterConnection co if (!checkHFilesCountPerRegionPerFamily(regionGroups)) { // Error is logged inside checkHFilesCountPerRegionPerFamily. - throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily + - " hfiles to one family of one region"); + throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily + + " hfiles to one family of one region"); } bulkLoadPhase(conn, tableName, queue, regionGroups, copyFile, item2RegionMap); @@ -988,8 +988,7 @@ private Map doBulkLoad(AsyncClusterConnection conn, * @param copyFile always copy hfiles if true */ private Map doBulkLoad(AsyncClusterConnection conn, - TableName tableName, Path hfofDir, boolean silence, boolean copyFile) - throws IOException { + TableName tableName, Path hfofDir, boolean silence, boolean copyFile) throws IOException { tableExists(conn, tableName); /* @@ -998,10 +997,10 @@ private Map doBulkLoad(AsyncClusterConnection conn, */ boolean validateHFile = getConf().getBoolean(VALIDATE_HFILES, true); if (!validateHFile) { - LOG.warn("You are skipping HFiles validation, it might cause some data loss if files " + - "are not correct. If you fail to read data from your table after using this " + - "option, consider removing the files and bulkload again without this option. " + - "See HBASE-13985"); + LOG.warn("You are skipping HFiles validation, it might cause some data loss if files " + + "are not correct. If you fail to read data from your table after using this " + + "option, consider removing the files and bulkload again without this option. " + + "See HBASE-13985"); } // LQI queue does not need to be threadsafe -- all operations on this queue // happen in this thread @@ -1012,8 +1011,8 @@ private Map doBulkLoad(AsyncClusterConnection conn, if (queue.isEmpty()) { LOG.warn( - "Bulk load operation did not find any files to load in directory {}. " + - "Does it contain files in subdirectories that correspond to column family names?", + "Bulk load operation did not find any files to load in directory {}. " + + "Does it contain files in subdirectories that correspond to column family names?", (hfofDir != null ? hfofDir.toUri().toString() : "")); return Collections.emptyMap(); } @@ -1027,17 +1026,16 @@ private Map doBulkLoad(AsyncClusterConnection conn, @Override public Map bulkLoad(TableName tableName, Map> family2Files) throws IOException { - try (AsyncClusterConnection conn = ClusterConnectionFactory. - createAsyncClusterConnection(getConf(), null, userProvider.getCurrent())) { + try (AsyncClusterConnection conn = ClusterConnectionFactory + .createAsyncClusterConnection(getConf(), null, userProvider.getCurrent())) { return doBulkLoad(conn, tableName, family2Files, isSilence(), isAlwaysCopyFiles()); } } @Override - public Map bulkLoad(TableName tableName, Path dir) - throws IOException { + public Map bulkLoad(TableName tableName, Path dir) throws IOException { try (AsyncClusterConnection conn = ClusterConnectionFactory - .createAsyncClusterConnection(getConf(), null, userProvider.getCurrent())) { + .createAsyncClusterConnection(getConf(), null, userProvider.getCurrent())) { AsyncAdmin admin = conn.getAdmin(); if (!FutureUtils.get(admin.tableExists(tableName))) { if (isCreateTable()) { @@ -1077,14 +1075,12 @@ private void usage() { System.err.println("Usage: " + "bin/hbase completebulkload [OPTIONS] " + " \n" + "Loads directory of hfiles -- a region dir or product of HFileOutputFormat -- " - + "into an hbase table.\n" - + "OPTIONS (for other -D options, see source code):\n" - + " -D" + CREATE_TABLE_CONF_KEY + "=no whether to create table; when 'no', target " - + "table must exist.\n" - + " -D" + IGNORE_UNMATCHED_CF_CONF_KEY + "=yes to ignore unmatched column families.\n" + + "into an hbase table.\n" + "OPTIONS (for other -D options, see source code):\n" + " -D" + + CREATE_TABLE_CONF_KEY + "=no whether to create table; when 'no', target " + + "table must exist.\n" + " -D" + IGNORE_UNMATCHED_CF_CONF_KEY + + "=yes to ignore unmatched column families.\n" + " -loadTable for when directory of files to load has a depth of 3; target table must " - + "exist;\n" - + " must be last of the options on command line.\n" + + "exist;\n" + " must be last of the options on command line.\n" + "See http://hbase.apache.org/book.html#arch.bulk.load.complete.strays for " + "documentation.\n"); } @@ -1126,12 +1122,12 @@ public static void main(String[] args) throws Exception { } @Override - public void disableReplication(){ + public void disableReplication() { this.replicate = false; } @Override - public boolean isReplicationDisabled(){ + public boolean isReplicationDisabled() { return !this.replicate; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 89ad398c0fba..fd1251c631af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,10 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool; - import java.util.Map; import java.util.concurrent.ExecutorService; import org.apache.hadoop.conf.Configuration; @@ -39,7 +36,6 @@ static Canary create(Configuration conf, ExecutorService executor, CanaryTool.Si /** * Run Canary in Region mode. - * * @param targets -- list of monitor tables. * @return the exit code of the Canary tool. */ @@ -47,7 +43,6 @@ static Canary create(Configuration conf, ExecutorService executor, CanaryTool.Si /** * Runs Canary in Region server mode. - * * @param targets -- list of monitor tables. * @return the exit code of the Canary tool. */ @@ -55,7 +50,6 @@ static Canary create(Configuration conf, ExecutorService executor, CanaryTool.Si /** * Runs Canary in Zookeeper mode. - * * @return the exit code of the Canary tool. */ public int checkZooKeeper() throws Exception; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java index ce214a7a2973..e657d1e25379 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryStatusServlet.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,18 +25,16 @@ import org.apache.hadoop.hbase.tmpl.tool.CanaryStatusTmpl; import org.apache.yetus.audience.InterfaceAudience; - @InterfaceAudience.Private public class CanaryStatusServlet extends HttpServlet { @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) - throws ServletException, IOException { + throws ServletException, IOException { CanaryTool.RegionStdOutSink sink = - (CanaryTool.RegionStdOutSink) getServletContext().getAttribute( - "sink"); + (CanaryTool.RegionStdOutSink) getServletContext().getAttribute("sink"); if (sink == null) { throw new ServletException( - "RegionStdOutSink is null! The CanaryTool's InfoServer is not initialized correctly"); + "RegionStdOutSink is null! The CanaryTool's InfoServer is not initialized correctly"); } resp.setContentType("text/html"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index 38f1ce31c18e..3850a91155f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool; import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT; import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; import static org.apache.hadoop.hbase.util.Addressing.inetSocketAddress2String; + import java.io.Closeable; import java.io.IOException; import java.net.BindException; @@ -99,24 +99,18 @@ import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * HBase Canary Tool for "canary monitoring" of a running HBase cluster. - * - * There are three modes: + * HBase Canary Tool for "canary monitoring" of a running HBase cluster. There are three modes: *
            *
          1. region mode (Default): For each region, try to get one row per column family outputting - * information on failure (ERROR) or else the latency. - *
          2. - * - *
          3. regionserver mode: For each regionserver try to get one row from one table selected - * randomly outputting information on failure (ERROR) or else the latency. - *
          4. - * + * information on failure (ERROR) or else the latency. + *
          5. regionserver mode: For each regionserver try to get one row from one table selected randomly + * outputting information on failure (ERROR) or else the latency.
          6. *
          7. zookeeper mode: for each zookeeper instance, selects a znode outputting information on - * failure (ERROR) or else the latency. - *
          8. + * failure (ERROR) or else the latency. *
          */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @@ -179,16 +173,27 @@ public int checkZooKeeper() throws Exception { */ public interface Sink { long getReadFailureCount(); + long incReadFailureCount(); - Map getReadFailures(); + + Map getReadFailures(); + void updateReadFailures(String regionName, String serverName); + long getWriteFailureCount(); + long incWriteFailureCount(); - Map getWriteFailures(); + + Map getWriteFailures(); + void updateWriteFailures(String regionName, String serverName); + long getReadSuccessCount(); + long incReadSuccessCount(); + long getWriteSuccessCount(); + long incWriteSuccessCount(); } @@ -196,10 +201,8 @@ public interface Sink { * Simple implementation of canary sink that allows plotting to a file or standard output. */ public static class StdOutSink implements Sink { - private AtomicLong readFailureCount = new AtomicLong(0), - writeFailureCount = new AtomicLong(0), - readSuccessCount = new AtomicLong(0), - writeSuccessCount = new AtomicLong(0); + private AtomicLong readFailureCount = new AtomicLong(0), writeFailureCount = new AtomicLong(0), + readSuccessCount = new AtomicLong(0), writeSuccessCount = new AtomicLong(0); private Map readFailures = new ConcurrentHashMap<>(); private Map writeFailures = new ConcurrentHashMap<>(); @@ -293,15 +296,14 @@ public void publishReadTiming(String znode, String server, long msTime) { } /** - * By Region, for 'region' mode. + * By Region, for 'region' mode. */ public static class RegionStdOutSink extends StdOutSink { private Map perTableReadLatency = new HashMap<>(); private LongAdder writeLatency = new LongAdder(); private final ConcurrentMap> regionMap = - new ConcurrentHashMap<>(); - private ConcurrentMap perServerFailuresCount = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); + private ConcurrentMap perServerFailuresCount = new ConcurrentHashMap<>(); private ConcurrentMap perTableFailuresCount = new ConcurrentHashMap<>(); public ConcurrentMap getPerServerFailuresCount() { @@ -337,8 +339,8 @@ private void incFailuresCountDetails(ServerName serverName, RegionInfo region) { public void publishReadFailure(ServerName serverName, RegionInfo region, Exception e) { incReadFailureCount(); incFailuresCountDetails(serverName, region); - LOG.error("Read from {} on serverName={} failed", - region.getRegionNameAsString(), serverName, e); + LOG.error("Read from {} on serverName={} failed", region.getRegionNameAsString(), serverName, + e); } public void publishReadFailure(ServerName serverName, RegionInfo region, @@ -346,8 +348,7 @@ public void publishReadFailure(ServerName serverName, RegionInfo region, incReadFailureCount(); incFailuresCountDetails(serverName, region); LOG.error("Read from {} on serverName={}, columnFamily={} failed", - region.getRegionNameAsString(), serverName, - column.getNameAsString(), e); + region.getRegionNameAsString(), serverName, column.getNameAsString(), e); } public void publishReadTiming(ServerName serverName, RegionInfo region, @@ -360,7 +361,7 @@ public void publishReadTiming(ServerName serverName, RegionInfo region, // Note that read success count will be equal to total column family read successes. incReadSuccessCount(); LOG.info("Read from {} on {} {} in {}ms", region.getRegionNameAsString(), serverName, - column.getNameAsString(), msTime); + column.getNameAsString(), msTime); } public void publishWriteFailure(ServerName serverName, RegionInfo region, Exception e) { @@ -374,7 +375,7 @@ public void publishWriteFailure(ServerName serverName, RegionInfo region, incWriteFailureCount(); incFailuresCountDetails(serverName, region); LOG.error("Write to {} on {} {} failed", region.getRegionNameAsString(), serverName, - column.getNameAsString(), e); + column.getNameAsString(), e); } public void publishWriteTiming(ServerName serverName, RegionInfo region, @@ -386,8 +387,8 @@ public void publishWriteTiming(ServerName serverName, RegionInfo region, rtrs.add(rtr); // Note that write success count will be equal to total column family write successes. incWriteSuccessCount(); - LOG.info("Write to {} on {} {} in {}ms", - region.getRegionNameAsString(), serverName, column.getNameAsString(), msTime); + LOG.info("Write to {} on {} {} in {}ms", region.getRegionNameAsString(), serverName, + column.getNameAsString(), msTime); } public Map getReadLatencyMap() { @@ -436,7 +437,8 @@ public ZookeeperTask(Connection connection, String host, String znode, int timeo this.sink = sink; } - @Override public Void call() throws Exception { + @Override + public Void call() throws Exception { ZooKeeper zooKeeper = null; try { zooKeeper = new ZooKeeper(host, timeout, EmptyWatcher.instance); @@ -462,9 +464,10 @@ public ZookeeperTask(Connection connection, String host, String znode, int timeo * output latency or failure. */ static class RegionTask implements Callable { - public enum TaskType{ + public enum TaskType { READ, WRITE } + private Connection connection; private RegionInfo region; private RegionStdOutSink sink; @@ -600,17 +603,17 @@ private Void write() { tableDesc = table.getDescriptor(); byte[] rowToCheck = region.getStartKey(); if (rowToCheck.length == 0) { - rowToCheck = new byte[]{0x0}; + rowToCheck = new byte[] { 0x0 }; } - int writeValueSize = connection.getConfiguration() - .getInt(HConstants.HBASE_CANARY_WRITE_VALUE_SIZE_KEY, 10); + int writeValueSize = + connection.getConfiguration().getInt(HConstants.HBASE_CANARY_WRITE_VALUE_SIZE_KEY, 10); for (ColumnFamilyDescriptor column : tableDesc.getColumnFamilies()) { Put put = new Put(rowToCheck); byte[] value = new byte[writeValueSize]; Bytes.random(value); put.addColumn(column.getName(), HConstants.EMPTY_BYTE_ARRAY, value); - LOG.debug("Writing to {} {} {} {}", - tableDesc.getTableName(), region.getRegionNameAsString(), column.getNameAsString(), + LOG.debug("Writing to {} {} {} {}", tableDesc.getTableName(), + region.getRegionNameAsString(), column.getNameAsString(), Bytes.toStringBinary(rowToCheck)); try { long startTime = EnvironmentEdgeManager.currentTime(); @@ -632,8 +635,8 @@ private Void write() { } /** - * Run a single RegionServer Task and then exit. - * Get one row from a region on the regionserver and output latency or the failure. + * Run a single RegionServer Task and then exit. Get one row from a region on the regionserver and + * output latency or the failure. */ static class RegionServerTask implements Callable { private Connection connection; @@ -666,9 +669,8 @@ public Void call() { table = connection.getTable(tableName); startKey = region.getStartKey(); // Can't do a get on empty start row so do a Scan of first element if any instead. - LOG.debug("Reading from {} {} {} {}", - serverName, region.getTable(), region.getRegionNameAsString(), - Bytes.toStringBinary(startKey)); + LOG.debug("Reading from {} {} {} {}", serverName, region.getTable(), + region.getRegionNameAsString(), Bytes.toStringBinary(startKey)); if (startKey.length > 0) { get = new Get(startKey); get.setCacheBlocks(false); @@ -733,8 +735,8 @@ public Void call() { private static final Logger LOG = LoggerFactory.getLogger(Canary.class); - public static final TableName DEFAULT_WRITE_TABLE_NAME = TableName.valueOf( - NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "canary"); + public static final TableName DEFAULT_WRITE_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "canary"); private static final String CANARY_TABLE_FAMILY_NAME = "Test"; @@ -753,32 +755,31 @@ public Void call() { private boolean zookeeperMode = false; /** - * This is a Map of table to timeout. The timeout is for reading all regions in the table; i.e. - * we aggregate time to fetch each region and it needs to be less than this value else we - * log an ERROR. + * This is a Map of table to timeout. The timeout is for reading all regions in the table; i.e. we + * aggregate time to fetch each region and it needs to be less than this value else we log an + * ERROR. */ private HashMap configuredReadTableTimeouts = new HashMap<>(); - public static final String HBASE_CANARY_REGIONSERVER_ALL_REGIONS - = "hbase.canary.regionserver_all_regions"; + public static final String HBASE_CANARY_REGIONSERVER_ALL_REGIONS = + "hbase.canary.regionserver_all_regions"; - public static final String HBASE_CANARY_REGION_WRITE_SNIFFING - = "hbase.canary.region.write.sniffing"; - public static final String HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT - = "hbase.canary.region.write.table.timeout"; - public static final String HBASE_CANARY_REGION_WRITE_TABLE_NAME - = "hbase.canary.region.write.table.name"; - public static final String HBASE_CANARY_REGION_READ_TABLE_TIMEOUT - = "hbase.canary.region.read.table.timeout"; + public static final String HBASE_CANARY_REGION_WRITE_SNIFFING = + "hbase.canary.region.write.sniffing"; + public static final String HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT = + "hbase.canary.region.write.table.timeout"; + public static final String HBASE_CANARY_REGION_WRITE_TABLE_NAME = + "hbase.canary.region.write.table.name"; + public static final String HBASE_CANARY_REGION_READ_TABLE_TIMEOUT = + "hbase.canary.region.read.table.timeout"; - public static final String HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES - = "hbase.canary.zookeeper.permitted.failures"; + public static final String HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES = + "hbase.canary.zookeeper.permitted.failures"; public static final String HBASE_CANARY_USE_REGEX = "hbase.canary.use.regex"; public static final String HBASE_CANARY_TIMEOUT = "hbase.canary.timeout"; public static final String HBASE_CANARY_FAIL_ON_ERROR = "hbase.canary.fail.on.error"; - private ExecutorService executor; // threads to retrieve data from regionservers public CanaryTool() { @@ -853,15 +854,15 @@ private int parseArgs(String[] args) { } } else if (cmd.equals("-zookeeper")) { this.zookeeperMode = true; - } else if(cmd.equals("-regionserver")) { + } else if (cmd.equals("-regionserver")) { this.regionServerMode = true; - } else if(cmd.equals("-allRegions")) { + } else if (cmd.equals("-allRegions")) { conf.setBoolean(HBASE_CANARY_REGIONSERVER_ALL_REGIONS, true); regionServerAllRegions = true; - } else if(cmd.equals("-writeSniffing")) { + } else if (cmd.equals("-writeSniffing")) { writeSniffing = true; conf.setBoolean(HBASE_CANARY_REGION_WRITE_SNIFFING, true); - } else if(cmd.equals("-treatFailureAsError") || cmd.equals("-failureAsError")) { + } else if (cmd.equals("-treatFailureAsError") || cmd.equals("-failureAsError")) { conf.setBoolean(HBASE_CANARY_FAIL_ON_ERROR, true); } else if (cmd.equals("-e")) { conf.setBoolean(HBASE_CANARY_USE_REGEX, true); @@ -880,7 +881,7 @@ private int parseArgs(String[] args) { printUsageAndExit(); } conf.setLong(HBASE_CANARY_TIMEOUT, timeout); - } else if(cmd.equals("-writeTableTimeout")) { + } else if (cmd.equals("-writeTableTimeout")) { i++; if (i == args.length) { @@ -906,8 +907,7 @@ private int parseArgs(String[] args) { } else if (cmd.equals("-f")) { i++; if (i == args.length) { - System.err - .println("-f needs a boolean value argument (true|false)."); + System.err.println("-f needs a boolean value argument (true|false)."); printUsageAndExit(); } @@ -915,8 +915,8 @@ private int parseArgs(String[] args) { } else if (cmd.equals("-readTableTimeouts")) { i++; if (i == args.length) { - System.err.println("-readTableTimeouts needs a comma-separated list of read " + - "millisecond timeouts per table (without spaces)."); + System.err.println("-readTableTimeouts needs a comma-separated list of read " + + "millisecond timeouts per table (without spaces)."); printUsageAndExit(); } readTableTimeoutsStr = args[i]; @@ -951,8 +951,7 @@ private int parseArgs(String[] args) { } if (this.zookeeperMode) { if (this.regionServerMode || regionServerAllRegions || writeSniffing) { - System.err.println("-zookeeper is exclusive and cannot be combined with " - + "other modes."); + System.err.println("-zookeeper is exclusive and cannot be combined with " + "other modes."); printUsageAndExit(); } } @@ -978,7 +977,7 @@ public int run(String[] args) throws Exception { System.arraycopy(args, index, monitorTargets, 0, length); } if (interval > 0) { - //Only show the web page in daemon mode + // Only show the web page in daemon mode putUpWebUI(); } if (zookeeperMode) { @@ -1033,8 +1032,7 @@ private int runMonitor(String[] monitorTargets) throws Exception { currentTimeLength = EnvironmentEdgeManager.currentTime() - startTime; if (currentTimeLength > timeout) { LOG.error("The monitor is running too long (" + currentTimeLength - + ") after timeout limit:" + timeout - + " will be killed itself !!"); + + ") after timeout limit:" + timeout + " will be killed itself !!"); if (monitor.initialized) { return TIMEOUT_ERROR_EXIT_CODE; } else { @@ -1064,12 +1062,12 @@ private int runMonitor(String[] monitorTargets) throws Exception { } @Override - public Map getReadFailures() { + public Map getReadFailures() { return sink.getReadFailures(); } @Override - public Map getWriteFailures() { + public Map getWriteFailures() { return sink.getWriteFailures(); } @@ -1078,38 +1076,38 @@ private void printUsageAndExit() { "Usage: canary [OPTIONS] [ [ [ interval between checks in seconds"); - System.err.println(" -e consider table/regionserver argument as regular " + - "expression"); + System.err.println( + " -e consider table/regionserver argument as regular " + "expression"); System.err.println(" -f exit on first error; default=true"); System.err.println(" -failureAsError treat read/write failure as error"); System.err.println(" -t timeout for canary-test run; default=600000ms"); System.err.println(" -writeSniffing enable write sniffing"); System.err.println(" -writeTable the table used for write sniffing; default=hbase:canary"); System.err.println(" -writeTableTimeout timeout for writeTable; default=600000ms"); - System.err.println(" -readTableTimeouts =," + - "=,..."); - System.err.println(" comma-separated list of table read timeouts " + - "(no spaces);"); + System.err.println( + " -readTableTimeouts =," + "=,..."); + System.err + .println(" comma-separated list of table read timeouts " + "(no spaces);"); System.err.println(" logs 'ERROR' if takes longer. default=600000ms"); System.err.println(" -permittedZookeeperFailures Ignore first N failures attempting to "); System.err.println(" connect to individual zookeeper nodes in ensemble"); System.err.println(""); System.err.println(" -D= to assign or override configuration params"); - System.err.println(" -Dhbase.canary.read.raw.enabled= Set to enable/disable " + - "raw scan; default=false"); - System.err.println(" -Dhbase.canary.info.port=PORT_NUMBER Set for a Canary UI; " + - "default=-1 (None)"); + System.err.println(" -Dhbase.canary.read.raw.enabled= Set to enable/disable " + + "raw scan; default=false"); + System.err.println( + " -Dhbase.canary.info.port=PORT_NUMBER Set for a Canary UI; " + "default=-1 (None)"); System.err.println(""); - System.err.println("Canary runs in one of three modes: region (default), regionserver, or " + - "zookeeper."); + System.err.println( + "Canary runs in one of three modes: region (default), regionserver, or " + "zookeeper."); System.err.println("To sniff/probe all regions, pass no arguments."); System.err.println("To sniff/probe all regions of a table, pass tablename."); System.err.println("To sniff/probe regionservers, pass -regionserver, etc."); @@ -1119,14 +1117,14 @@ private void printUsageAndExit() { Sink getSink(Configuration configuration, Class clazz) { // In test context, this.sink might be set. Use it if non-null. For testing. - return this.sink != null? this.sink: - (Sink)ReflectionUtils.newInstance(configuration.getClass("hbase.canary.sink.class", - clazz, Sink.class)); + return this.sink != null ? this.sink + : (Sink) ReflectionUtils + .newInstance(configuration.getClass("hbase.canary.sink.class", clazz, Sink.class)); } /** - * Canary region mode-specific data structure which stores information about each region - * to be scanned + * Canary region mode-specific data structure which stores information about each region to be + * scanned */ public static class RegionTaskResult { private RegionInfo region; @@ -1226,45 +1224,36 @@ public void setWriteSuccess() { } /** - * A Factory method for {@link Monitor}. - * Makes a RegionServerMonitor, or a ZooKeeperMonitor, or a RegionMonitor. + * A Factory method for {@link Monitor}. Makes a RegionServerMonitor, or a ZooKeeperMonitor, or a + * RegionMonitor. * @return a Monitor instance */ private Monitor newMonitor(final Connection connection, String[] monitorTargets) { Monitor monitor; boolean useRegExp = conf.getBoolean(HBASE_CANARY_USE_REGEX, false); - boolean regionServerAllRegions - = conf.getBoolean(HBASE_CANARY_REGIONSERVER_ALL_REGIONS, false); - boolean failOnError - = conf.getBoolean(HBASE_CANARY_FAIL_ON_ERROR, true); - int permittedFailures - = conf.getInt(HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES, 0); - boolean writeSniffing - = conf.getBoolean(HBASE_CANARY_REGION_WRITE_SNIFFING, false); - String writeTableName = conf.get(HBASE_CANARY_REGION_WRITE_TABLE_NAME, - DEFAULT_WRITE_TABLE_NAME.getNameAsString()); - long configuredWriteTableTimeout - = conf.getLong(HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT, DEFAULT_TIMEOUT); + boolean regionServerAllRegions = conf.getBoolean(HBASE_CANARY_REGIONSERVER_ALL_REGIONS, false); + boolean failOnError = conf.getBoolean(HBASE_CANARY_FAIL_ON_ERROR, true); + int permittedFailures = conf.getInt(HBASE_CANARY_ZOOKEEPER_PERMITTED_FAILURES, 0); + boolean writeSniffing = conf.getBoolean(HBASE_CANARY_REGION_WRITE_SNIFFING, false); + String writeTableName = + conf.get(HBASE_CANARY_REGION_WRITE_TABLE_NAME, DEFAULT_WRITE_TABLE_NAME.getNameAsString()); + long configuredWriteTableTimeout = + conf.getLong(HBASE_CANARY_REGION_WRITE_TABLE_TIMEOUT, DEFAULT_TIMEOUT); if (this.regionServerMode) { - monitor = - new RegionServerMonitor(connection, monitorTargets, useRegExp, - getSink(connection.getConfiguration(), RegionServerStdOutSink.class), - this.executor, regionServerAllRegions, - failOnError, permittedFailures); + monitor = new RegionServerMonitor(connection, monitorTargets, useRegExp, + getSink(connection.getConfiguration(), RegionServerStdOutSink.class), this.executor, + regionServerAllRegions, failOnError, permittedFailures); } else if (this.zookeeperMode) { - monitor = - new ZookeeperMonitor(connection, monitorTargets, useRegExp, - getSink(connection.getConfiguration(), ZookeeperStdOutSink.class), - this.executor, failOnError, permittedFailures); + monitor = new ZookeeperMonitor(connection, monitorTargets, useRegExp, + getSink(connection.getConfiguration(), ZookeeperStdOutSink.class), this.executor, + failOnError, permittedFailures); } else { - monitor = - new RegionMonitor(connection, monitorTargets, useRegExp, - getSink(connection.getConfiguration(), RegionStdOutSink.class), - this.executor, writeSniffing, - TableName.valueOf(writeTableName), failOnError, configuredReadTableTimeouts, - configuredWriteTableTimeout, permittedFailures); + monitor = new RegionMonitor(connection, monitorTargets, useRegExp, + getSink(connection.getConfiguration(), RegionStdOutSink.class), this.executor, + writeSniffing, TableName.valueOf(writeTableName), failOnError, + configuredReadTableTimeouts, configuredWriteTableTimeout, permittedFailures); } return monitor; } @@ -1274,19 +1263,20 @@ private void populateReadTableTimeoutsMap(String configuredReadTableTimeoutsStr) for (String tT : tableTimeouts) { String[] nameTimeout = tT.split("="); if (nameTimeout.length < 2) { - throw new IllegalArgumentException("Each -readTableTimeouts argument must be of the form " + - "= (without spaces)."); + throw new IllegalArgumentException("Each -readTableTimeouts argument must be of the form " + + "= (without spaces)."); } long timeoutVal; try { timeoutVal = Long.parseLong(nameTimeout[1]); } catch (NumberFormatException e) { - throw new IllegalArgumentException("-readTableTimeouts read timeout for each table" + - " must be a numeric value argument."); + throw new IllegalArgumentException("-readTableTimeouts read timeout for each table" + + " must be a numeric value argument."); } configuredReadTableTimeouts.put(nameTimeout[0], timeoutVal); } } + /** * A Monitor super-class can be extended by users */ @@ -1294,8 +1284,8 @@ public static abstract class Monitor implements Runnable, Closeable { protected Connection connection; protected Admin admin; /** - * 'Target' dependent on 'mode'. Could be Tables or RegionServers or ZNodes. - * Passed on the command-line as arguments. + * 'Target' dependent on 'mode'. Could be Tables or RegionServers or ZNodes. Passed on the + * command-line as arguments. */ protected String[] targets; protected boolean useRegExp; @@ -1390,8 +1380,8 @@ private static class RegionMonitor extends Monitor { private boolean readAllCF; /** - * This is a timeout per table. If read of each region in the table aggregated takes longer - * than what is configured here, we log an ERROR rather than just an INFO. + * This is a timeout per table. If read of each region in the table aggregated takes longer than + * what is configured here, we log an ERROR rather than just an INFO. */ private HashMap configuredReadTableTimeouts; @@ -1400,8 +1390,7 @@ private static class RegionMonitor extends Monitor { public RegionMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink, ExecutorService executor, boolean writeSniffing, TableName writeTableName, boolean treatFailureAsError, HashMap configuredReadTableTimeouts, - long configuredWriteTableTimeout, - long allowedFailures) { + long configuredWriteTableTimeout, long allowedFailures) { super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, allowedFailures); Configuration conf = connection.getConfiguration(); @@ -1413,9 +1402,8 @@ public RegionMonitor(Connection connection, String[] monitorTargets, boolean use conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY, 1.0f); this.regionsUpperLimit = conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_UPPERLIMIT_KEY, 1.5f); - this.checkPeriod = - conf.getInt(HConstants.HBASE_CANARY_WRITE_TABLE_CHECK_PERIOD_KEY, - DEFAULT_WRITE_TABLE_CHECK_PERIOD); + this.checkPeriod = conf.getInt(HConstants.HBASE_CANARY_WRITE_TABLE_CHECK_PERIOD_KEY, + DEFAULT_WRITE_TABLE_CHECK_PERIOD); this.rawScanEnabled = conf.getBoolean(HConstants.HBASE_CANARY_READ_RAW_SCAN_KEY, false); this.configuredReadTableTimeouts = new HashMap<>(configuredReadTableTimeouts); this.configuredWriteTableTimeout = configuredWriteTableTimeout; @@ -1440,10 +1428,10 @@ public void run() { String[] tables = generateMonitorTables(this.targets); // Check to see that each table name passed in the -readTableTimeouts argument is also // passed as a monitor target. - if (!new HashSet<>(Arrays.asList(tables)). - containsAll(this.configuredReadTableTimeouts.keySet())) { - LOG.error("-readTableTimeouts can only specify read timeouts for monitor targets " + - "passed via command line."); + if (!new HashSet<>(Arrays.asList(tables)) + .containsAll(this.configuredReadTableTimeouts.keySet())) { + LOG.error("-readTableTimeouts can only specify read timeouts for monitor targets " + + "passed via command line."); this.errorCode = USAGE_EXIT_CODE; return; } @@ -1488,11 +1476,12 @@ public void run() { Long actual = actualReadTableLatency.get(tableName).longValue(); Long configured = entry.getValue(); if (actual > configured) { - LOG.error("Read operation for {} took {}ms exceeded the configured read timeout." + - "(Configured read timeout {}ms.", tableName, actual, configured); + LOG.error("Read operation for {} took {}ms exceeded the configured read timeout." + + "(Configured read timeout {}ms.", + tableName, actual, configured); } else { LOG.info("Read operation for {} took {}ms (Configured read timeout {}ms.", - tableName, actual, configured); + tableName, actual, configured); } } else { LOG.error("Read operation for {} failed!", tableName); @@ -1502,12 +1491,12 @@ public void run() { String writeTableStringName = this.writeTableName.getNameAsString(); long actualWriteLatency = regionSink.getWriteLatency().longValue(); LOG.info("Write operation for {} took {}ms. Configured write timeout {}ms.", - writeTableStringName, actualWriteLatency, this.configuredWriteTableTimeout); + writeTableStringName, actualWriteLatency, this.configuredWriteTableTimeout); // Check that the writeTable write operation latency does not exceed the configured // timeout. if (actualWriteLatency > this.configuredWriteTableTimeout) { LOG.error("Write operation for {} exceeded the configured write timeout.", - writeTableStringName); + writeTableStringName); } } } catch (Exception e) { @@ -1571,9 +1560,9 @@ private List> sniff(TaskType taskType, RegionStdOutSink regionSink) throws Exception { LOG.debug("Reading list of tables"); List> taskFutures = new LinkedList<>(); - for (TableDescriptor td: admin.listTableDescriptors()) { - if (admin.tableExists(td.getTableName()) && admin.isTableEnabled(td.getTableName()) && - (!td.getTableName().equals(writeTableName))) { + for (TableDescriptor td : admin.listTableDescriptors()) { + if (admin.tableExists(td.getTableName()) && admin.isTableEnabled(td.getTableName()) + && (!td.getTableName().equals(writeTableName))) { LongAdder readLatency = regionSink.initializeAndGetReadLatencyForTable(td.getTableName().getNameAsString()); taskFutures.addAll(CanaryTool.sniff(admin, sink, td, executor, taskType, @@ -1623,16 +1612,16 @@ private void checkWriteTableDistribution() throws IOException { } private void createWriteTable(int numberOfServers) throws IOException { - int numberOfRegions = (int)(numberOfServers * regionsLowerLimit); - LOG.info("Number of live regionservers {}, pre-splitting the canary table into {} regions " + - "(current lower limit of regions per server is {} and you can change it with config {}).", - numberOfServers, numberOfRegions, regionsLowerLimit, - HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY); - ColumnFamilyDescriptor family = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(CANARY_TABLE_FAMILY_NAME)).setMaxVersions(1) - .setTimeToLive(writeDataTTL).build(); - TableDescriptor desc = TableDescriptorBuilder.newBuilder(writeTableName) - .setColumnFamily(family).build(); + int numberOfRegions = (int) (numberOfServers * regionsLowerLimit); + LOG.info("Number of live regionservers {}, pre-splitting the canary table into {} regions " + + "(current lower limit of regions per server is {} and you can change it with config {}).", + numberOfServers, numberOfRegions, regionsLowerLimit, + HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY); + ColumnFamilyDescriptor family = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CANARY_TABLE_FAMILY_NAME)) + .setMaxVersions(1).setTimeToLive(writeDataTTL).build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(writeTableName).setColumnFamily(family).build(); byte[][] splits = new RegionSplitter.HexStringSplit().split(numberOfRegions); admin.createTable(desc, splits); } @@ -1665,15 +1654,15 @@ private static List> sniff(final Admin admin, final Sink sink, try (Table table = admin.getConnection().getTable(tableDesc.getTableName())) { List tasks = new ArrayList<>(); try (RegionLocator regionLocator = - admin.getConnection().getRegionLocator(tableDesc.getTableName())) { - for (HRegionLocation location: regionLocator.getAllRegionLocations()) { + admin.getConnection().getRegionLocator(tableDesc.getTableName())) { + for (HRegionLocation location : regionLocator.getAllRegionLocations()) { if (location == null) { LOG.warn("Null location"); continue; } ServerName rs = location.getServerName(); RegionInfo region = location.getRegion(); - tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink)sink, + tasks.add(new RegionTask(admin.getConnection(), region, rs, (RegionStdOutSink) sink, taskType, rawScanEnabled, rwLatency, readAllCF)); Map> regionMap = ((RegionStdOutSink) sink).getRegionMap(); regionMap.put(region.getRegionNameAsString(), new ArrayList()); @@ -1685,22 +1674,20 @@ private static List> sniff(final Admin admin, final Sink sink, } } - // monitor for zookeeper mode + // monitor for zookeeper mode private static class ZookeeperMonitor extends Monitor { private List hosts; private final String znode; private final int timeout; protected ZookeeperMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, - Sink sink, ExecutorService executor, boolean treatFailureAsError, long allowedFailures) { - super(connection, monitorTargets, useRegExp, - sink, executor, treatFailureAsError, allowedFailures); + Sink sink, ExecutorService executor, boolean treatFailureAsError, long allowedFailures) { + super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, + allowedFailures); Configuration configuration = connection.getConfiguration(); - znode = - configuration.get(ZOOKEEPER_ZNODE_PARENT, - DEFAULT_ZOOKEEPER_ZNODE_PARENT); - timeout = configuration - .getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); + znode = configuration.get(ZOOKEEPER_ZNODE_PARENT, DEFAULT_ZOOKEEPER_ZNODE_PARENT); + timeout = configuration.getInt(HConstants.ZK_SESSION_TIMEOUT, + HConstants.DEFAULT_ZK_SESSION_TIMEOUT); ConnectStringParser parser = new ConnectStringParser(ZKConfig.getZKQuorumServersString(configuration)); hosts = Lists.newArrayList(); @@ -1715,7 +1702,8 @@ protected ZookeeperMonitor(Connection connection, String[] monitorTargets, boole } } - @Override public void run() { + @Override + public void run() { List tasks = Lists.newArrayList(); ZookeeperStdOutSink zkSink = null; try { @@ -1753,7 +1741,6 @@ private ZookeeperStdOutSink getSink() { } } - /** * A monitor for regionserver mode */ @@ -1761,8 +1748,8 @@ private static class RegionServerMonitor extends Monitor { private boolean allRegions; public RegionServerMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, - Sink sink, ExecutorService executor, boolean allRegions, - boolean treatFailureAsError, long allowedFailures) { + Sink sink, ExecutorService executor, boolean allRegions, boolean treatFailureAsError, + long allowedFailures) { super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, allowedFailures); this.allRegions = allRegions; @@ -1817,8 +1804,8 @@ private boolean checkNoTableNames() { } if (foundTableNames.size() > 0) { - System.err.println("Cannot pass a tablename when using the -regionserver " + - "option, tablenames:" + foundTableNames.toString()); + System.err.println("Cannot pass a tablename when using the -regionserver " + + "option, tablenames:" + foundTableNames.toString()); this.errorCode = USAGE_EXIT_CODE; } return foundTableNames.isEmpty(); @@ -1836,21 +1823,15 @@ private void monitorRegionServers(Map> rsAndRMap, LOG.error("Regionserver not serving any regions - {}", serverName); } else if (this.allRegions) { for (RegionInfo region : entry.getValue()) { - tasks.add(new RegionServerTask(this.connection, - serverName, - region, - regionServerSink, + tasks.add(new RegionServerTask(this.connection, serverName, region, regionServerSink, successes)); } } else { // random select a region if flag not set - RegionInfo region = entry.getValue() - .get(ThreadLocalRandom.current().nextInt(entry.getValue().size())); - tasks.add(new RegionServerTask(this.connection, - serverName, - region, - regionServerSink, - successes)); + RegionInfo region = + entry.getValue().get(ThreadLocalRandom.current().nextInt(entry.getValue().size())); + tasks.add( + new RegionServerTask(this.connection, serverName, region, regionServerSink, successes)); } } try { @@ -1866,7 +1847,7 @@ private void monitorRegionServers(Map> rsAndRMap, for (Map.Entry> entry : rsAndRMap.entrySet()) { String serverName = entry.getKey(); LOG.info("Successfully read {} regions out of {} on regionserver {}", - successMap.get(serverName), entry.getValue().size(), serverName); + successMap.get(serverName), entry.getValue().size(), serverName); } } } catch (InterruptedException e) { @@ -1887,9 +1868,9 @@ private Map> getAllRegionServerByName() { LOG.debug("Reading list of tables and locations"); List tableDescs = this.admin.listTableDescriptors(); List regions = null; - for (TableDescriptor tableDesc: tableDescs) { + for (TableDescriptor tableDesc : tableDescs) { try (RegionLocator regionLocator = - this.admin.getConnection().getRegionLocator(tableDesc.getTableName())) { + this.admin.getConnection().getRegionLocator(tableDesc.getTableName())) { for (HRegionLocation location : regionLocator.getAllRegionLocations()) { if (location == null) { LOG.warn("Null location"); @@ -1910,7 +1891,7 @@ private Map> getAllRegionServerByName() { } // get any live regionservers not serving any regions - for (ServerName rs: this.admin.getRegionServers()) { + for (ServerName rs : this.admin.getRegionServers()) { String rsName = rs.getHostname(); if (!rsAndRMap.containsKey(rsName)) { rsAndRMap.put(rsName, Collections. emptyList()); @@ -1923,8 +1904,8 @@ private Map> getAllRegionServerByName() { return rsAndRMap; } - private Map> doFilterRegionServerByName( - Map> fullRsAndRMap) { + private Map> + doFilterRegionServerByName(Map> fullRsAndRMap) { Map> filteredRsAndRMap = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java index c909725a616a..50c9a3f1f438 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/DataBlockEncodingValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -44,7 +42,6 @@ public class DataBlockEncodingValidator extends AbstractHBaseTool { /** * Check DataBlockEncodings of column families are compatible. - * * @return number of column families with incompatible DataBlockEncoding * @throws IOException if a remote or network exception occurs */ @@ -68,7 +65,7 @@ private int validateDBE() throws IOException { } catch (IllegalArgumentException e) { incompatibilities++; LOG.warn("Incompatible DataBlockEncoding for table: {}, cf: {}, encoding: {}", - td.getTableName().getNameAsString(), cfd.getNameAsString(), encoding); + td.getTableName().getNameAsString(), cfd.getNameAsString(), encoding); } } } @@ -77,7 +74,8 @@ private int validateDBE() throws IOException { if (incompatibilities > 0) { LOG.warn("There are {} column families with incompatible Data Block Encodings. Do not " + "upgrade until these encodings are converted to a supported one. " - + "Check https://s.apache.org/prefixtree for instructions.", incompatibilities); + + "Check https://s.apache.org/prefixtree for instructions.", + incompatibilities); } else { LOG.info("The used Data Block Encodings are compatible with HBase 2.0."); } @@ -87,8 +85,8 @@ private int validateDBE() throws IOException { @Override protected void printUsage() { - String header = "hbase " + PreUpgradeValidator.TOOL_NAME + " " + - PreUpgradeValidator.VALIDATE_DBE_NAME; + String header = + "hbase " + PreUpgradeValidator.TOOL_NAME + " " + PreUpgradeValidator.VALIDATE_DBE_NAME; printUsage(header, null, ""); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java index 2f648975724e..c598827ccbec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,11 +33,11 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @@ -48,7 +47,6 @@ public class HFileContentValidator extends AbstractHBaseTool { /** * Check HFile contents are readable by HBase 2. - * * @param conf used configuration * @return number of HFiles corrupted HBase * @throws IOException if a remote or network exception occurs @@ -110,7 +108,7 @@ private ExecutorService createThreadPool(Configuration conf) { int numThreads = conf.getInt("hfilevalidator.numthreads", availableProcessors); return Executors.newFixedThreadPool(numThreads, new ThreadFactoryBuilder().setNameFormat("hfile-validator-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java index 818004c272ea..dcfb3878c502 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.tool; import java.util.Arrays; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -44,8 +42,7 @@ */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class PreUpgradeValidator implements Tool { - private static final Logger LOG = LoggerFactory - .getLogger(PreUpgradeValidator.class); + private static final Logger LOG = LoggerFactory.getLogger(PreUpgradeValidator.class); public static final String TOOL_NAME = "pre-upgrade"; public static final String VALIDATE_CP_NAME = "validate-cp"; @@ -68,11 +65,10 @@ private void printUsage() { System.out.println("usage: hbase " + TOOL_NAME + " command ..."); System.out.println("Available commands:"); System.out.printf(" %-15s Validate co-processors are compatible with HBase%n", - VALIDATE_CP_NAME); + VALIDATE_CP_NAME); System.out.printf(" %-15s Validate DataBlockEncodings are compatible with HBase%n", - VALIDATE_DBE_NAME); - System.out.printf(" %-15s Validate HFile contents are readable%n", - VALIDATE_HFILE); + VALIDATE_DBE_NAME); + System.out.printf(" %-15s Validate HFile contents are readable%n", VALIDATE_HFILE); System.out.println("For further information, please use command -h"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java index 9311200ac939..c6bbf7d1fa02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,29 +34,23 @@ /** *

          - * This coprocessor 'shallows' all the writes. It allows to test a pure - * write workload, going through all the communication layers. - * The reads will work as well, but they as we never write, they will always always - * return an empty structure. The WAL is also skipped. - * Obviously, the region will never be split automatically. It's up to the user - * to split and move it. + * This coprocessor 'shallows' all the writes. It allows to test a pure write workload, going + * through all the communication layers. The reads will work as well, but they as we never write, + * they will always always return an empty structure. The WAL is also skipped. Obviously, the region + * will never be split automatically. It's up to the user to split and move it. *

          *

          - * For a table created like this: - * create 'usertable', {NAME => 'f1', VERSIONS => 1} + * For a table created like this: create 'usertable', {NAME => 'f1', VERSIONS => 1} *

          *

          - * You can then add the coprocessor with this command: - * alter 'usertable', 'coprocessor' => '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|' + * You can then add the coprocessor with this command: alter 'usertable', 'coprocessor' => + * '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|' *

          *

          - * And then - * put 'usertable', 'f1', 'f1', 'f1' + * And then put 'usertable', 'f1', 'f1', 'f1' *

          *

          - * scan 'usertable' - * Will return: - * 0 row(s) in 0.0050 seconds + * scan 'usertable' Will return: 0 row(s) in 0.0050 seconds *

          * TODO: It needs tests */ @@ -80,15 +73,14 @@ public void preOpen(ObserverContext e) throws IOEx @Override public void preBatchMutate(final ObserverContext c, - final MiniBatchOperationInProgress miniBatchOp) - throws IOException { + final MiniBatchOperationInProgress miniBatchOp) throws IOException { if (ops.incrementAndGet() % 20000 == 0) { LOG.info("Wrote " + ops.get() + " times in region " + regionName); } for (int i = 0; i < miniBatchOp.size(); i++) { miniBatchOp.setOperationStatus(i, - new OperationStatus(HConstants.OperationStatusCode.SUCCESS)); + new OperationStatus(HConstants.OperationStatusCode.SUCCESS)); } c.bypass(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java index 0f5d829de6b9..92f419e543af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/Branch1CoprocessorMethods.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import org.apache.yetus.audience.InterfaceAudience; @@ -32,1106 +31,721 @@ public Branch1CoprocessorMethods() { private void addMethods() { /* BulkLoadObserver */ - addMethod("prePrepareBulkLoad", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest"); + addMethod("prePrepareBulkLoad", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest"); - addMethod("preCleanupBulkLoad", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest"); + addMethod("preCleanupBulkLoad", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest"); /* EndpointObserver */ - addMethod("postEndpointInvocation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "com.google.protobuf.Service", - "java.lang.String", - "com.google.protobuf.Message", - "com.google.protobuf.Message.Builder"); + addMethod("postEndpointInvocation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "com.google.protobuf.Service", "java.lang.String", "com.google.protobuf.Message", + "com.google.protobuf.Message.Builder"); - addMethod("preEndpointInvocation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "com.google.protobuf.Service", - "java.lang.String", - "com.google.protobuf.Message"); + addMethod("preEndpointInvocation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "com.google.protobuf.Service", "java.lang.String", "com.google.protobuf.Message"); /* MasterObserver */ - addMethod("preCreateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("postCreateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("preDeleteTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postDeleteTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preDeleteTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preMove", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.ServerName", - "org.apache.hadoop.hbase.ServerName"); - - addMethod("preCreateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("postCreateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HTableDescriptor", - "org.apache.hadoop.hbase.HRegionInfo[]"); - - addMethod("postMove", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.ServerName", - "org.apache.hadoop.hbase.ServerName"); - - addMethod("postDeleteTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preTruncateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postTruncateTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preTruncateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postTruncateTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preModifyTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postModifyTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preModifyTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postModifyTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preAddColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postAddColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preAddColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postAddColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preModifyColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postModifyColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preModifyColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("postModifyColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.HColumnDescriptor"); - - addMethod("preDeleteColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("postDeleteColumn", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("preDeleteColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("postDeleteColumnHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "byte[]"); - - addMethod("preEnableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postEnableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preEnableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postEnableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preDisableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postDisableTable", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preDisableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postDisableTableHandler", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preAbortProcedure", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.procedure2.ProcedureExecutor", - "long"); - - addMethod("postAbortProcedure", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preListProcedures", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postListProcedures", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preAssign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("postAssign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("preUnassign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "boolean"); - - addMethod("postUnassign", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "boolean"); - - addMethod("preRegionOffline", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("postRegionOffline", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("preBalance", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postBalance", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preSetSplitOrMergeEnabled", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean", - "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); - - addMethod("postSetSplitOrMergeEnabled", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean", - "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); - - addMethod("preBalanceSwitch", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean"); - - addMethod("postBalanceSwitch", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean", - "boolean"); - - addMethod("preShutdown", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preStopMaster", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postStartMaster", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preMasterInitialization", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preListSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("postListSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("preCloneSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postCloneSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preRestoreSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("postRestoreSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", - "org.apache.hadoop.hbase.HTableDescriptor"); - - addMethod("preDeleteSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("postDeleteSnapshot", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); - - addMethod("preGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List"); - - addMethod("preGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List", - "java.lang.String"); - - addMethod("postGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List", - "java.lang.String"); - - addMethod("postGetTableDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preGetTableNames", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.lang.String"); - - addMethod("postGetTableNames", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.lang.String"); - - addMethod("preCreateNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("postCreateNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("preDeleteNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postDeleteNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("preModifyNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("postModifyNamespace", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("preGetNamespaceDescriptor", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postGetNamespaceDescriptor", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.NamespaceDescriptor"); - - addMethod("preListNamespaceDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("postListNamespaceDescriptors", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preTableFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("postTableFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName"); - - addMethod("preSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetUserQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetTableQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetTableQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.TableName", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preSetNamespaceQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("postSetNamespaceQuota", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); - - addMethod("preDispatchMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("postDispatchMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.HRegionInfo"); - - addMethod("preGetClusterStatus", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postGetClusterStatus", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.ClusterStatus"); - - addMethod("preClearDeadServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postClearDeadServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "java.util.List"); - - addMethod("preMoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("postMoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("preMoveTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("postMoveTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.lang.String"); - - addMethod("preMoveServersAndTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.util.Set", - "java.lang.String"); - - addMethod("postMoveServersAndTables", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set", - "java.util.Set", - "java.lang.String"); - - addMethod("preAddRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postAddRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("preRemoveRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postRemoveRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("preRemoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set"); - - addMethod("postRemoveServers", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.Set"); - - addMethod("preBalanceRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String"); - - addMethod("postBalanceRSGroup", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.lang.String", - "boolean"); + addMethod("preCreateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("postCreateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("preDeleteTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postDeleteTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preDeleteTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preMove", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.ServerName", + "org.apache.hadoop.hbase.ServerName"); + + addMethod("preCreateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("postCreateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); + + addMethod("postMove", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.ServerName", + "org.apache.hadoop.hbase.ServerName"); + + addMethod("postDeleteTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preTruncateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postTruncateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preTruncateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postTruncateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preModifyTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postModifyTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preModifyTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postModifyTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preAddColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postAddColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preAddColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postAddColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preModifyColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postModifyColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preModifyColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("postModifyColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); + + addMethod("preDeleteColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("postDeleteColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("preDeleteColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("postDeleteColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", "byte[]"); + + addMethod("preEnableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postEnableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preEnableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postEnableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preDisableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postDisableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preDisableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postDisableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preAbortProcedure", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.procedure2.ProcedureExecutor", "long"); + + addMethod("postAbortProcedure", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preListProcedures", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postListProcedures", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preAssign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("postAssign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("preUnassign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "boolean"); + + addMethod("postUnassign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "boolean"); + + addMethod("preRegionOffline", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("postRegionOffline", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("preBalance", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postBalance", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preSetSplitOrMergeEnabled", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "boolean", "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); + + addMethod("postSetSplitOrMergeEnabled", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "boolean", "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); + + addMethod("preBalanceSwitch", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); + + addMethod("postBalanceSwitch", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean", + "boolean"); + + addMethod("preShutdown", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preStopMaster", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postStartMaster", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preMasterInitialization", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preListSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("postListSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("preCloneSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postCloneSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preRestoreSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("postRestoreSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", + "org.apache.hadoop.hbase.HTableDescriptor"); + + addMethod("preDeleteSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("postDeleteSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); + + addMethod("preGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List"); + + addMethod("preGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List", "java.lang.String"); + + addMethod("postGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List", "java.lang.String"); + + addMethod("postGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preGetTableNames", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.lang.String"); + + addMethod("postGetTableNames", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.lang.String"); + + addMethod("preCreateNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("postCreateNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("preDeleteNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postDeleteNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("preModifyNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("postModifyNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("preGetNamespaceDescriptor", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postGetNamespaceDescriptor", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.NamespaceDescriptor"); + + addMethod("preListNamespaceDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("postListNamespaceDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preTableFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("postTableFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName"); + + addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "java.lang.String", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "java.lang.String", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetTableQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetTableQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.TableName", + "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preSetNamespaceQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("postSetNamespaceQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); + + addMethod("preDispatchMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("postDispatchMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.HRegionInfo"); + + addMethod("preGetClusterStatus", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postGetClusterStatus", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.ClusterStatus"); + + addMethod("preClearDeadServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postClearDeadServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "java.util.List"); + + addMethod("preMoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("postMoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("preMoveTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("postMoveTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.lang.String"); + + addMethod("preMoveServersAndTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.util.Set", "java.lang.String"); + + addMethod("postMoveServersAndTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set", "java.util.Set", "java.lang.String"); + + addMethod("preAddRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postAddRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("preRemoveRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postRemoveRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("preRemoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set"); + + addMethod("postRemoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.Set"); + + addMethod("preBalanceRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String"); + + addMethod("postBalanceRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.lang.String", "boolean"); /* RegionObserver */ - addMethod("preOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postLogReplay", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preFlushScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.KeyValueScanner", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "long"); - - addMethod("preFlushScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.KeyValueScanner", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); - - addMethod("preFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); - - addMethod("preFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.StoreFile"); - - addMethod("postFlush", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List"); - - addMethod("preCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - - addMethod("postCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "com.google.common.collect.ImmutableList"); - - addMethod("postCompactSelection", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "com.google.common.collect.ImmutableList", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - - addMethod("preCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.ScanType"); - - addMethod("preCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.ScanType", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - - addMethod("preClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean"); - - addMethod("preCompactScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.ScanType", - "long", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); - - addMethod("preCompactScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.ScanType", - "long", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest", - "long"); - - addMethod("preCompactScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "java.util.List", - "org.apache.hadoop.hbase.regionserver.ScanType", - "long", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - - addMethod("postCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.StoreFile"); - - addMethod("postCompact", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.regionserver.StoreFile", - "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); - - addMethod("preSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]"); - - addMethod("preSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); - - addMethod("preSplitBeforePONR", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "java.util.List"); - - addMethod("preSplitAfterPONR", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("preRollBackSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postRollBackSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postCompleteSplit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - - addMethod("postClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "boolean"); - - addMethod("preGetClosestRowBefore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.client.Result"); - - addMethod("postGetClosestRowBefore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.client.Result"); - - addMethod("preGetOp", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "java.util.List"); - - addMethod("postGetOp", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "java.util.List"); - - addMethod("preExists", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "boolean"); - - addMethod("postExists", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Get", - "boolean"); - - addMethod("prePut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Put", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); - - addMethod("postPut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Put", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); - - addMethod("preDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Delete", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); + addMethod("preOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postLogReplay", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preFlushScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.KeyValueScanner", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "long"); + + addMethod("preFlushScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.KeyValueScanner", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); + + addMethod("preFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); + + addMethod("preFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.StoreFile"); + + addMethod("postFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List"); + + addMethod("preCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + + addMethod("postCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "com.google.common.collect.ImmutableList"); + + addMethod("postCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "com.google.common.collect.ImmutableList", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + + addMethod("preCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.ScanType"); + + addMethod("preCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.ScanType", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + + addMethod("preClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); + + addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.ScanType", "long", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); + + addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.ScanType", "long", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest", "long"); + + addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", + "org.apache.hadoop.hbase.regionserver.ScanType", "long", + "org.apache.hadoop.hbase.regionserver.InternalScanner", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + + addMethod("postCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.StoreFile"); + + addMethod("postCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", + "org.apache.hadoop.hbase.regionserver.StoreFile", + "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); + + addMethod("preSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]"); + + addMethod("preSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); + + addMethod("preSplitBeforePONR", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "java.util.List"); + + addMethod("preSplitAfterPONR", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("preRollBackSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postRollBackSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postCompleteSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + + addMethod("postClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); + + addMethod("preGetClosestRowBefore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "org.apache.hadoop.hbase.client.Result"); + + addMethod("postGetClosestRowBefore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "org.apache.hadoop.hbase.client.Result"); + + addMethod("preGetOp", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "java.util.List"); + + addMethod("postGetOp", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "java.util.List"); + + addMethod("preExists", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "boolean"); + + addMethod("postExists", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Get", "boolean"); + + addMethod("prePut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Put", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); + + addMethod("postPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Put", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); + + addMethod("preDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Delete", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); addMethod("prePrepareTimeStampForDeleteVersion", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Mutation", - "org.apache.hadoop.hbase.Cell", - "byte[]", - "org.apache.hadoop.hbase.client.Get"); - - addMethod("postDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Delete", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit", - "org.apache.hadoop.hbase.client.Durability"); - - addMethod("preBatchMutate", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); - - addMethod("postBatchMutate", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); - - addMethod("postStartRegionOperation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region.Operation"); - - addMethod("postCloseRegionOperation", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region.Operation"); - - addMethod("postBatchMutateIndispensably", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress", - "boolean"); - - addMethod("preCheckAndPut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Put", - "boolean"); - - addMethod("preCheckAndPutAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Put", - "boolean"); - - addMethod("postCheckAndPut", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Put", - "boolean"); - - addMethod("preCheckAndDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Delete", - "boolean"); + "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Mutation", "org.apache.hadoop.hbase.Cell", "byte[]", + "org.apache.hadoop.hbase.client.Get"); + + addMethod("postDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Delete", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", + "org.apache.hadoop.hbase.client.Durability"); + + addMethod("preBatchMutate", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); + + addMethod("postBatchMutate", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); + + addMethod("postStartRegionOperation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region.Operation"); + + addMethod("postCloseRegionOperation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region.Operation"); + + addMethod("postBatchMutateIndispensably", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress", "boolean"); + + addMethod("preCheckAndPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", + "boolean"); + + addMethod("preCheckAndPutAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", + "boolean"); + + addMethod("postCheckAndPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", + "boolean"); + + addMethod("preCheckAndDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", + "boolean"); addMethod("preCheckAndDeleteAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Delete", - "boolean"); - - addMethod("postCheckAndDelete", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", - "org.apache.hadoop.hbase.filter.ByteArrayComparable", - "org.apache.hadoop.hbase.client.Delete", - "boolean"); - - addMethod("preIncrementColumnValue", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "long", - "boolean"); - - addMethod("postIncrementColumnValue", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "byte[]", - "byte[]", - "long", - "boolean", - "long"); - - addMethod("preAppend", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Append"); - - addMethod("preAppendAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Append"); - - addMethod("postAppend", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Append", - "org.apache.hadoop.hbase.client.Result"); - - addMethod("preIncrement", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Increment"); - - addMethod("preIncrementAfterRowLock", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Increment"); - - addMethod("postIncrement", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Increment", - "org.apache.hadoop.hbase.client.Result"); - - addMethod("preScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Scan", - "org.apache.hadoop.hbase.regionserver.RegionScanner"); - - addMethod("preStoreScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Store", - "org.apache.hadoop.hbase.client.Scan", - "java.util.NavigableSet", - "org.apache.hadoop.hbase.regionserver.KeyValueScanner"); - - addMethod("postScannerOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.client.Scan", - "org.apache.hadoop.hbase.regionserver.RegionScanner"); - - addMethod("preScannerNext", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "java.util.List", - "int", - "boolean"); - - addMethod("postScannerNext", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "java.util.List", - "int", - "boolean"); - - addMethod("postScannerFilterRow", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner", - "byte[]", - "int", - "short", - "boolean"); - - addMethod("preScannerClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); - - addMethod("postScannerClose", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.InternalScanner"); - - addMethod("preWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("preWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("postWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("postWALRestore", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("preBulkLoadHFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List"); - - addMethod("preCommitStoreFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "java.util.List"); - - addMethod("postCommitStoreFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "byte[]", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.fs.Path"); - - addMethod("postBulkLoadHFile", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "boolean"); - - addMethod("preStoreFileReaderOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.FileSystem", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", - "long", - "org.apache.hadoop.hbase.io.hfile.CacheConfig", - "org.apache.hadoop.hbase.io.Reference", - "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); - - addMethod("postStoreFileReaderOpen", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.FileSystem", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", - "long", - "org.apache.hadoop.hbase.io.hfile.CacheConfig", - "org.apache.hadoop.hbase.io.Reference", - "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); - - addMethod("postMutationBeforeWAL", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType", - "org.apache.hadoop.hbase.client.Mutation", - "org.apache.hadoop.hbase.Cell", - "org.apache.hadoop.hbase.Cell"); - - addMethod("postInstantiateDeleteTracker", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.DeleteTracker"); + "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", + "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", + "boolean"); + + addMethod("postCheckAndDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", + "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", + "boolean"); + + addMethod("preIncrementColumnValue", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "byte[]", "long", "boolean"); + + addMethod("postIncrementColumnValue", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "byte[]", "byte[]", "long", "boolean", "long"); + + addMethod("preAppend", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Append"); + + addMethod("preAppendAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Append"); + + addMethod("postAppend", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Append", "org.apache.hadoop.hbase.client.Result"); + + addMethod("preIncrement", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Increment"); + + addMethod("preIncrementAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Increment"); + + addMethod("postIncrement", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Increment", "org.apache.hadoop.hbase.client.Result"); + + addMethod("preScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Scan", "org.apache.hadoop.hbase.regionserver.RegionScanner"); + + addMethod("preStoreScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.client.Scan", + "java.util.NavigableSet", "org.apache.hadoop.hbase.regionserver.KeyValueScanner"); + + addMethod("postScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.client.Scan", "org.apache.hadoop.hbase.regionserver.RegionScanner"); + + addMethod("preScannerNext", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "java.util.List", "int", "boolean"); + + addMethod("postScannerNext", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "java.util.List", "int", "boolean"); + + addMethod("postScannerFilterRow", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner", "byte[]", "int", "short", "boolean"); + + addMethod("preScannerClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); + + addMethod("postScannerClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.InternalScanner"); + + addMethod("preWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("preWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("postWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("postWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("preBulkLoadHFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List"); + + addMethod("preCommitStoreFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", + "java.util.List"); + + addMethod("postCommitStoreFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "byte[]", "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); + + addMethod("postBulkLoadHFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "boolean"); + + addMethod("preStoreFileReaderOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.FileSystem", "org.apache.hadoop.fs.Path", + "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", "long", + "org.apache.hadoop.hbase.io.hfile.CacheConfig", "org.apache.hadoop.hbase.io.Reference", + "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); + + addMethod("postStoreFileReaderOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.FileSystem", "org.apache.hadoop.fs.Path", + "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", "long", + "org.apache.hadoop.hbase.io.hfile.CacheConfig", "org.apache.hadoop.hbase.io.Reference", + "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); + + addMethod("postMutationBeforeWAL", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType", + "org.apache.hadoop.hbase.client.Mutation", "org.apache.hadoop.hbase.Cell", + "org.apache.hadoop.hbase.Cell"); + + addMethod("postInstantiateDeleteTracker", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.DeleteTracker"); /* RegionServerObserver */ - addMethod("preMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("preMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preStopRegionServer", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("preStopRegionServer", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("postMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", + "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preMergeCommit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region", - "java.util.List"); + addMethod("preMergeCommit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", + "java.util.List"); - addMethod("postMergeCommit", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("postMergeCommit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", + "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preRollBackMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("preRollBackMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("postRollBackMerge", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.regionserver.Region", - "org.apache.hadoop.hbase.regionserver.Region"); + addMethod("postRollBackMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); - addMethod("preRollWALWriterRequest", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("preRollWALWriterRequest", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); - addMethod("postRollWALWriterRequest", - "org.apache.hadoop.hbase.coprocessor.ObserverContext"); + addMethod("postRollWALWriterRequest", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postCreateReplicationEndPoint", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.replication.ReplicationEndpoint"); + "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.replication.ReplicationEndpoint"); - addMethod("preReplicateLogEntries", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "org.apache.hadoop.hbase.CellScanner"); + addMethod("preReplicateLogEntries", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "org.apache.hadoop.hbase.CellScanner"); - addMethod("postReplicateLogEntries", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "java.util.List", - "org.apache.hadoop.hbase.CellScanner"); + addMethod("postReplicateLogEntries", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "java.util.List", "org.apache.hadoop.hbase.CellScanner"); /* WALObserver */ - addMethod("preWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("preWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("postWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.regionserver.wal.HLogKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("postWALWrite", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.hbase.HRegionInfo", - "org.apache.hadoop.hbase.wal.WALKey", - "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); - - addMethod("preWALRoll", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.fs.Path"); - - addMethod("postWALRoll", - "org.apache.hadoop.hbase.coprocessor.ObserverContext", - "org.apache.hadoop.fs.Path", - "org.apache.hadoop.fs.Path"); + addMethod("preWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("preWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("postWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("postWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", + "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); + + addMethod("preWALRoll", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); + + addMethod("postWALRoll", "org.apache.hadoop.hbase.coprocessor.ObserverContext", + "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java index 60e384171352..36b98f5bcfb4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethod.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.util.ArrayList; import java.util.List; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -36,7 +33,7 @@ public CoprocessorMethod(String name) { parameters = new ArrayList<>(); } - public CoprocessorMethod withParameters(String ... parameters) { + public CoprocessorMethod withParameters(String... parameters) { for (String parameter : parameters) { this.parameters.add(parameter); } @@ -44,7 +41,7 @@ public CoprocessorMethod withParameters(String ... parameters) { return this; } - public CoprocessorMethod withParameters(Class ... parameters) { + public CoprocessorMethod withParameters(Class... parameters) { for (Class parameter : parameters) { this.parameters.add(parameter.getCanonicalName()); } @@ -60,10 +57,9 @@ public boolean equals(Object obj) { return false; } - CoprocessorMethod other = (CoprocessorMethod)obj; + CoprocessorMethod other = (CoprocessorMethod) obj; - return Objects.equals(name, other.name) && - Objects.equals(parameters, other.parameters); + return Objects.equals(name, other.name) && Objects.equals(parameters, other.parameters); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java index 2e0c801b8aad..16837c1863d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorMethods.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.lang.reflect.Method; import java.util.HashSet; import java.util.Set; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -32,35 +30,35 @@ public CoprocessorMethods() { methods = new HashSet<>(); } - public void addMethod(String name, String ... parameters) { + public void addMethod(String name, String... parameters) { CoprocessorMethod cpMethod = new CoprocessorMethod(name).withParameters(parameters); methods.add(cpMethod); } - public void addMethod(String name, Class ... parameters) { + public void addMethod(String name, Class... parameters) { CoprocessorMethod cpMethod = new CoprocessorMethod(name).withParameters(parameters); methods.add(cpMethod); } public void addMethod(Method method) { - CoprocessorMethod cpMethod = new CoprocessorMethod(method.getName()) - .withParameters(method.getParameterTypes()); + CoprocessorMethod cpMethod = + new CoprocessorMethod(method.getName()).withParameters(method.getParameterTypes()); methods.add(cpMethod); } - public boolean hasMethod(String name, String ... parameters) { + public boolean hasMethod(String name, String... parameters) { CoprocessorMethod method = new CoprocessorMethod(name).withParameters(parameters); return methods.contains(method); } - public boolean hasMethod(String name, Class ... parameters) { + public boolean hasMethod(String name, Class... parameters) { CoprocessorMethod method = new CoprocessorMethod(name).withParameters(parameters); return methods.contains(method); } public boolean hasMethod(Method method) { - CoprocessorMethod cpMethod = new CoprocessorMethod(method.getName()) - .withParameters(method.getParameterTypes()); + CoprocessorMethod cpMethod = + new CoprocessorMethod(method.getName()).withParameters(method.getParameterTypes()); return methods.contains(cpMethod); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java index 766224e5d381..5661d81a82da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.io.IOException; @@ -56,8 +54,7 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class CoprocessorValidator extends AbstractHBaseTool { - private static final Logger LOG = LoggerFactory - .getLogger(CoprocessorValidator.class); + private static final Logger LOG = LoggerFactory.getLogger(CoprocessorValidator.class); private CoprocessorMethods branch1; private CoprocessorMethods current; @@ -79,11 +76,10 @@ public CoprocessorValidator() { } /** - * This classloader implementation calls {@link #resolveClass(Class)} - * method for every loaded class. It means that some extra validation will - * take place - * according to JLS. + * This classloader implementation calls {@link #resolveClass(Class)} method for every loaded + * class. It means that some extra validation will take place + * according to + * JLS. */ private static final class ResolverUrlClassLoader extends URLClassLoader { private ResolverUrlClassLoader(URL[] urls, ClassLoader parent) { @@ -135,19 +131,19 @@ private void validate(ClassLoader classLoader, String className, LOG.trace("Validating method '{}'.", method); if (branch1.hasMethod(method) && !current.hasMethod(method)) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.WARNING, "method '" + method + - "' was removed from new coprocessor API, so it won't be called by HBase"); + CoprocessorViolation violation = + new CoprocessorViolation(className, Severity.WARNING, "method '" + method + + "' was removed from new coprocessor API, so it won't be called by HBase"); violations.add(violation); } } } catch (ClassNotFoundException e) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.ERROR, "no such class", e); + CoprocessorViolation violation = + new CoprocessorViolation(className, Severity.ERROR, "no such class", e); violations.add(violation); } catch (RuntimeException | Error e) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.ERROR, "could not validate class", e); + CoprocessorViolation violation = + new CoprocessorViolation(className, Severity.ERROR, "could not validate class", e); violations.add(violation); } } @@ -165,8 +161,8 @@ public void validateClasses(ClassLoader classLoader, String[] classNames, } @InterfaceAudience.Private - protected void validateTables(ClassLoader classLoader, Admin admin, - Pattern pattern, List violations) throws IOException { + protected void validateTables(ClassLoader classLoader, Admin admin, Pattern pattern, + List violations) throws IOException { List tableDescriptors = admin.listTableDescriptors(pattern); for (TableDescriptor tableDescriptor : tableDescriptors) { @@ -184,8 +180,7 @@ protected void validateTables(ClassLoader classLoader, Admin admin, try (ResolverUrlClassLoader cpClassLoader = createClassLoader(classLoader, path)) { validate(cpClassLoader, className, violations); } catch (IOException e) { - CoprocessorViolation violation = new CoprocessorViolation( - className, Severity.ERROR, + CoprocessorViolation violation = new CoprocessorViolation(className, Severity.ERROR, "could not validate jar file '" + path + "'", e); violations.add(violation); } @@ -206,9 +201,8 @@ private void validateTables(ClassLoader classLoader, Pattern pattern, @Override protected void printUsage() { - String header = "hbase " + PreUpgradeValidator.TOOL_NAME + " " + - PreUpgradeValidator.VALIDATE_CP_NAME + - " [-jar ...] [-class ... | -table ... | -config]"; + String header = "hbase " + PreUpgradeValidator.TOOL_NAME + " " + + PreUpgradeValidator.VALIDATE_CP_NAME + " [-jar ...] [-class ... | -table ... | -config]"; printUsage(header, "Options:", ""); } @@ -249,9 +243,8 @@ private List buildClasspath(List jars) throws IOException { Path jarPath = Paths.get(jar); if (Files.isDirectory(jarPath)) { try (Stream stream = Files.list(jarPath)) { - List files = stream - .filter((path) -> Files.isRegularFile(path)) - .collect(Collectors.toList()); + List files = + stream.filter((path) -> Files.isRegularFile(path)).collect(Collectors.toList()); for (Path file : files) { URL url = file.toUri().toURL(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java index d00398ecc270..f4b0da2fa890 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorViolation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import org.apache.yetus.audience.InterfaceAudience; @@ -38,8 +36,7 @@ public CoprocessorViolation(String className, Severity severity, String message) this(className, severity, message, null); } - public CoprocessorViolation(String className, Severity severity, String message, - Throwable t) { + public CoprocessorViolation(String className, Severity severity, String message, Throwable t) { this.className = className; this.severity = severity; this.message = message; @@ -64,11 +61,7 @@ public Throwable getThrowable() { @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("className", className) - .add("severity", severity) - .add("message", message) - .add("throwable", throwable) - .toString(); + return MoreObjects.toStringHelper(this).add("className", className).add("severity", severity) + .add("message", message).add("throwable", throwable).toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java index 265cf5158ee4..8dca18aa7eda 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/coprocessor/CurrentCoprocessorMethods.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import java.lang.reflect.Method; - import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver; import org.apache.hadoop.hbase.coprocessor.EndpointObserver; import org.apache.hadoop.hbase.coprocessor.MasterObserver; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java index 6825e426c7dc..94159f7ec21b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,33 +17,28 @@ */ package org.apache.hadoop.hbase.util; +import edu.umd.cs.findbugs.annotations.CheckForNull; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; - -import edu.umd.cs.findbugs.annotations.CheckForNull; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** - * Typical base class for file status filter. Works more efficiently when - * filtering file statuses, otherwise implementation will need to lookup filestatus - * for the path which will be expensive. + * Typical base class for file status filter. Works more efficiently when filtering file statuses, + * otherwise implementation will need to lookup filestatus for the path which will be expensive. */ @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class AbstractFileStatusFilter implements PathFilter, FileStatusFilter { /** - * Filters out a path. Can be given an optional directory hint to avoid - * filestatus lookup. - * - * @param p A filesystem path - * @param isDir An optional boolean indicating whether the path is a directory or not - * @return true if the path is accepted, false if the path is filtered out + * Filters out a path. Can be given an optional directory hint to avoid filestatus lookup. + * @param p A filesystem path + * @param isDir An optional boolean indicating whether the path is a directory or not + * @return true if the path is accepted, false if the path is filtered out */ protected abstract boolean accept(Path p, @CheckForNull Boolean isDir); @@ -61,7 +56,8 @@ protected boolean isFile(FileSystem fs, @CheckForNull Boolean isDir, Path p) thr return !isDirectory(fs, isDir, p); } - protected boolean isDirectory(FileSystem fs, @CheckForNull Boolean isDir, Path p) throws IOException { + protected boolean isDirectory(FileSystem fs, @CheckForNull Boolean isDir, Path p) + throws IOException { return isDir != null ? isDir : fs.isDirectory(p); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java index 31394e8a97b5..bd3b04452927 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,15 +18,13 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.yetus.audience.InterfaceAudience; /** - * The bloom context that is used by the StorefileWriter to add the bloom details - * per cell + * The bloom context that is used by the StorefileWriter to add the bloom details per cell */ @InterfaceAudience.Private public abstract class BloomContext { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java index 0d99d30da454..a0524e724e31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,59 +17,40 @@ */ package org.apache.hadoop.hbase.util; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.yetus.audience.InterfaceAudience; /** - * * Implements a Bloom filter, as defined by Bloom in 1970. *

          - * The Bloom filter is a data structure that was introduced in 1970 and that has - * been adopted by the networking research community in the past decade thanks - * to the bandwidth efficiencies that it offers for the transmission of set - * membership information between networked hosts. A sender encodes the - * information into a bit vector, the Bloom filter, that is more compact than a - * conventional representation. Computation and space costs for construction are - * linear in the number of elements. The receiver uses the filter to test - * whether various elements are members of the set. Though the filter will - * occasionally return a false positive, it will never return a false negative. - * When creating the filter, the sender can choose its desired point in a - * trade-off between the false positive rate and the size. - * + * The Bloom filter is a data structure that was introduced in 1970 and that has been adopted by the + * networking research community in the past decade thanks to the bandwidth efficiencies that it + * offers for the transmission of set membership information between networked hosts. A sender + * encodes the information into a bit vector, the Bloom filter, that is more compact than a + * conventional representation. Computation and space costs for construction are linear in the + * number of elements. The receiver uses the filter to test whether various elements are members of + * the set. Though the filter will occasionally return a false positive, it will never return a + * false negative. When creating the filter, the sender can choose its desired point in a trade-off + * between the false positive rate and the size. *

          - * Originally inspired by European Commission - * One-Lab Project 034819. - * - * Bloom filters are very sensitive to the number of elements inserted into - * them. For HBase, the number of entries depends on the size of the data stored - * in the column. Currently the default region size is 256MB, so entry count ~= - * 256MB / (average value size for column). Despite this rule of thumb, there is - * no efficient way to calculate the entry count after compactions. Therefore, - * it is often easier to use a dynamic bloom filter that will add extra space - * instead of allowing the error rate to grow. - * - * ( http://www.eecs.harvard.edu/~michaelm/NEWWORK/postscripts/BloomFilterSurvey - * .pdf ) - * - * m denotes the number of bits in the Bloom filter (bitSize) n denotes the - * number of elements inserted into the Bloom filter (maxKeys) k represents the - * number of hash functions used (nbHash) e represents the desired false - * positive rate for the bloom (err) - * - * If we fix the error rate (e) and know the number of entries, then the optimal - * bloom size m = -(n * ln(err) / (ln(2)^2) ~= n * ln(err) / ln(0.6185) - * + * Originally inspired by European Commission One-Lab Project + * 034819. Bloom filters are very sensitive to the number of elements inserted into them. For + * HBase, the number of entries depends on the size of the data stored in the column. Currently the + * default region size is 256MB, so entry count ~= 256MB / (average value size for column). Despite + * this rule of thumb, there is no efficient way to calculate the entry count after compactions. + * Therefore, it is often easier to use a dynamic bloom filter that will add extra space instead of + * allowing the error rate to grow. ( + * http://www.eecs.harvard.edu/~michaelm/NEWWORK/postscripts/BloomFilterSurvey .pdf ) m denotes the + * number of bits in the Bloom filter (bitSize) n denotes the number of elements inserted into the + * Bloom filter (maxKeys) k represents the number of hash functions used (nbHash) e represents the + * desired false positive rate for the bloom (err) If we fix the error rate (e) and know the number + * of entries, then the optimal bloom size m = -(n * ln(err) / (ln(2)^2) ~= n * ln(err) / ln(0.6185) * The probability of false positives is minimized when k = m/n ln(2). - * * @see BloomFilter The general behavior of a filter - * - * @see - * Space/Time Trade-Offs in Hash Coding with Allowable Errors - * + * @see Space/Time + * Trade-Offs in Hash Coding with Allowable Errors * @see BloomFilterWriter for the ability to add elements to a Bloom filter */ @InterfaceAudience.Private @@ -79,8 +59,7 @@ public interface BloomFilter extends BloomFilterBase { /** * Check if the specified key is contained in the bloom filter. * @param keyCell the key to check for the existence of - * @param bloom bloom filter data to search. This can be null if auto-loading - * is supported. + * @param bloom bloom filter data to search. This can be null if auto-loading is supported. * @param type The type of Bloom ROW/ ROW_COL * @return true if matched by bloom, false if not */ @@ -91,15 +70,14 @@ public interface BloomFilter extends BloomFilterBase { * @param buf data to check for existence of * @param offset offset into the data * @param length length of the data - * @param bloom bloom filter data to search. This can be null if auto-loading - * is supported. + * @param bloom bloom filter data to search. This can be null if auto-loading is supported. * @return true if matched by bloom, false if not */ boolean contains(byte[] buf, int offset, int length, ByteBuff bloom); /** - * @return true if this Bloom filter can automatically load its data - * and thus allows a null byte buffer to be passed to contains() + * @return true if this Bloom filter can automatically load its data and thus allows a null byte + * buffer to be passed to contains() */ boolean supportsAutoLoading(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java index 131552560e59..142a36c35f8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,8 +31,7 @@ public interface BloomFilterBase { long getKeyCount(); /** - * @return The max number of keys that can be inserted - * to maintain the desired error rate + * @return The max number of keys that can be inserted to maintain the desired error rate */ long getMaxKeys(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java index 06cf699e34fe..70e46899c4aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.yetus.audience.InterfaceAudience; /** * The basic building block for the {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilter} @@ -56,8 +53,7 @@ public class BloomFilterChunk implements BloomFilterBase { * @param meta stored bloom meta data * @throws IllegalArgumentException meta data is invalid */ - public BloomFilterChunk(DataInput meta) - throws IOException, IllegalArgumentException { + public BloomFilterChunk(DataInput meta) throws IOException, IllegalArgumentException { this.byteSize = meta.readInt(); this.hashCount = meta.readInt(); this.hashType = meta.readInt(); @@ -72,12 +68,10 @@ public BloomFilterChunk(DataInput meta) } /** - * Computes the error rate for this Bloom filter, taking into account the - * actual number of hash functions and keys inserted. The return value of - * this function changes as a Bloom filter is being populated. Used for - * reporting the actual error rate of compound Bloom filters when writing - * them out. - * + * Computes the error rate for this Bloom filter, taking into account the actual number of hash + * functions and keys inserted. The return value of this function changes as a Bloom filter is + * being populated. Used for reporting the actual error rate of compound Bloom filters when + * writing them out. * @return error rate for this particular Bloom filter */ public double actualErrorRate() { @@ -93,21 +87,17 @@ public BloomFilterChunk(int hashType, BloomType bloomType) { /** * Determines & initializes bloom filter meta data from user config. Call * {@link #allocBloom()} to allocate bloom filter data. - * - * @param maxKeys Maximum expected number of keys that will be stored in this - * bloom - * @param errorRate Desired false positive error rate. Lower rate = more - * storage required + * @param maxKeys Maximum expected number of keys that will be stored in this bloom + * @param errorRate Desired false positive error rate. Lower rate = more storage required * @param hashType Type of hash function to use - * @param foldFactor When finished adding entries, you may be able to 'fold' - * this bloom to save space. Tradeoff potentially excess bytes in - * bloom for ability to fold if keyCount is exponentially greater - * than maxKeys. + * @param foldFactor When finished adding entries, you may be able to 'fold' this bloom to save + * space. Tradeoff potentially excess bytes in bloom for ability to fold if keyCount is + * exponentially greater than maxKeys. * @throws IllegalArgumentException */ // Used only in testcases - public BloomFilterChunk(int maxKeys, double errorRate, int hashType, - int foldFactor) throws IllegalArgumentException { + public BloomFilterChunk(int maxKeys, double errorRate, int hashType, int foldFactor) + throws IllegalArgumentException { this(hashType, BloomType.ROW); long bitSize = BloomFilterUtil.computeBitSize(maxKeys, errorRate); @@ -121,9 +111,8 @@ public BloomFilterChunk(int maxKeys, double errorRate, int hashType, } /** - * Creates another similar Bloom filter. Does not copy the actual bits, and - * sets the new filter's key count to zero. - * + * Creates another similar Bloom filter. Does not copy the actual bits, and sets the new filter's + * key count to zero. * @return a Bloom filter with the same configuration as this */ public BloomFilterChunk createAnother() { @@ -138,16 +127,16 @@ public void allocBloom() { if (this.bloom != null) { throw new IllegalArgumentException("can only create bloom once."); } - this.bloom = ByteBuffer.allocate((int)this.byteSize); + this.bloom = ByteBuffer.allocate((int) this.byteSize); assert this.bloom.hasArray(); } void sanityCheck() throws IllegalArgumentException { - if(0 >= this.byteSize || this.byteSize > Integer.MAX_VALUE) { + if (0 >= this.byteSize || this.byteSize > Integer.MAX_VALUE) { throw new IllegalArgumentException("Invalid byteSize: " + this.byteSize); } - if(this.hashCount <= 0) { + if (this.hashCount <= 0) { throw new IllegalArgumentException("Hash function count must be > 0"); } @@ -160,15 +149,14 @@ void sanityCheck() throws IllegalArgumentException { } } - void bloomCheck(ByteBuffer bloom) throws IllegalArgumentException { + void bloomCheck(ByteBuffer bloom) throws IllegalArgumentException { if (this.byteSize != bloom.limit()) { - throw new IllegalArgumentException( - "Configured bloom length should match actual length"); + throw new IllegalArgumentException("Configured bloom length should match actual length"); } } // Used only by tests - void add(byte [] buf, int offset, int len) { + void add(byte[] buf, int offset, int len) { /* * For faster hashing, use combinatorial generation * http://www.eecs.harvard.edu/~kirsch/pubs/bbbf/esa06.pdf @@ -208,17 +196,16 @@ private void setHashLoc(int hash1, int hash2) { ++this.keyCount; } - //--------------------------------------------------------------------------- + // --------------------------------------------------------------------------- /** Private helpers */ /** * Set the bit at the specified index to 1. - * * @param pos index of bit */ void set(long pos) { - int bytePos = (int)(pos / 8); - int bitPos = (int)(pos % 8); + int bytePos = (int) (pos / 8); + int bitPos = (int) (pos % 8); byte curByte = bloom.get(bytePos); curByte |= BloomFilterUtil.bitvals[bitPos]; bloom.put(bytePos, curByte); @@ -226,13 +213,12 @@ void set(long pos) { /** * Check if bit at specified index is 1. - * * @param pos index of bit * @return true if bit at specified index is 1, false if 0. */ static boolean get(int pos, ByteBuffer bloomBuf, int bloomOffset) { - int bytePos = pos >> 3; //pos / 8 - int bitPos = pos & 0x7; //pos % 8 + int bytePos = pos >> 3; // pos / 8 + int bitPos = pos & 0x7; // pos % 8 // TODO access this via Util API which can do Unsafe access if possible(?) byte curByte = bloomBuf.get(bloomOffset + bytePos); curByte &= BloomFilterUtil.bitvals[bitPos]; @@ -262,11 +248,11 @@ public void compactBloom() { // see if the actual size is exponentially smaller than expected. if (this.keyCount > 0 && this.bloom.hasArray()) { int pieces = 1; - int newByteSize = (int)this.byteSize; + int newByteSize = (int) this.byteSize; int newMaxKeys = this.maxKeys; // while exponentially smaller & folding is lossless - while ((newByteSize & 1) == 0 && newMaxKeys > (this.keyCount<<1)) { + while ((newByteSize & 1) == 0 && newMaxKeys > (this.keyCount << 1)) { pieces <<= 1; newByteSize >>= 1; newMaxKeys >>= 1; @@ -278,8 +264,8 @@ public void compactBloom() { int start = this.bloom.arrayOffset(); int end = start + newByteSize; int off = end; - for(int p = 1; p < pieces; ++p) { - for(int pos = start; pos < end; ++pos) { + for (int p = 1; p < pieces; ++p) { + for (int pos = start; pos < end; ++pos) { array[pos] |= array[off++]; } } @@ -298,8 +284,7 @@ public void compactBloom() { * @param out OutputStream to place bloom * @throws IOException Error writing bloom array */ - public void writeBloom(final DataOutput out) - throws IOException { + public void writeBloom(final DataOutput out) throws IOException { if (!this.bloom.hasArray()) { throw new IOException("Only writes ByteBuffer with underlying array."); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java index 506aa210f914..a95faa23dd8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.DataInput; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -39,64 +38,53 @@ @InterfaceAudience.Private public final class BloomFilterFactory { - private static final Logger LOG = - LoggerFactory.getLogger(BloomFilterFactory.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(BloomFilterFactory.class.getName()); /** This class should not be instantiated. */ - private BloomFilterFactory() {} + private BloomFilterFactory() { + } /** - * Specifies the target error rate to use when selecting the number of keys - * per Bloom filter. + * Specifies the target error rate to use when selecting the number of keys per Bloom filter. */ - public static final String IO_STOREFILE_BLOOM_ERROR_RATE = - "io.storefile.bloom.error.rate"; + public static final String IO_STOREFILE_BLOOM_ERROR_RATE = "io.storefile.bloom.error.rate"; /** - * Maximum folding factor allowed. The Bloom filter will be shrunk by - * the factor of up to 2 ** this times if we oversize it initially. + * Maximum folding factor allowed. The Bloom filter will be shrunk by the factor of up to 2 ** + * this times if we oversize it initially. */ - public static final String IO_STOREFILE_BLOOM_MAX_FOLD = - "io.storefile.bloom.max.fold"; + public static final String IO_STOREFILE_BLOOM_MAX_FOLD = "io.storefile.bloom.max.fold"; /** - * For default (single-block) Bloom filters this specifies the maximum number - * of keys. + * For default (single-block) Bloom filters this specifies the maximum number of keys. */ - public static final String IO_STOREFILE_BLOOM_MAX_KEYS = - "io.storefile.bloom.max.keys"; + public static final String IO_STOREFILE_BLOOM_MAX_KEYS = "io.storefile.bloom.max.keys"; /** Master switch to enable Bloom filters */ - public static final String IO_STOREFILE_BLOOM_ENABLED = - "io.storefile.bloom.enabled"; + public static final String IO_STOREFILE_BLOOM_ENABLED = "io.storefile.bloom.enabled"; /** Master switch to enable Delete Family Bloom filters */ public static final String IO_STOREFILE_DELETEFAMILY_BLOOM_ENABLED = "io.storefile.delete.family.bloom.enabled"; /** - * Target Bloom block size. Bloom filter blocks of approximately this size - * are interleaved with data blocks. + * Target Bloom block size. Bloom filter blocks of approximately this size are interleaved with + * data blocks. */ - public static final String IO_STOREFILE_BLOOM_BLOCK_SIZE = - "io.storefile.bloom.block.size"; + public static final String IO_STOREFILE_BLOOM_BLOCK_SIZE = "io.storefile.bloom.block.size"; /** Maximum number of times a Bloom filter can be "folded" if oversized */ private static final int MAX_ALLOWED_FOLD_FACTOR = 7; /** - * Instantiates the correct Bloom filter class based on the version provided - * in the meta block data. - * - * @param meta the byte array holding the Bloom filter's metadata, including - * version information - * @param reader the {@link HFile} reader to use to lazily load Bloom filter - * blocks + * Instantiates the correct Bloom filter class based on the version provided in the meta block + * data. + * @param meta the byte array holding the Bloom filter's metadata, including version information + * @param reader the {@link HFile} reader to use to lazily load Bloom filter blocks * @return an instance of the correct type of Bloom filter * @throws IllegalArgumentException */ - public static BloomFilter - createFromMeta(DataInput meta, HFile.Reader reader) + public static BloomFilter createFromMeta(DataInput meta, HFile.Reader reader) throws IllegalArgumentException, IOException { int version = meta.readInt(); switch (version) { @@ -104,15 +92,12 @@ private BloomFilterFactory() {} return new CompoundBloomFilter(meta, reader); default: - throw new IllegalArgumentException( - "Bad bloom filter format version " + version - ); + throw new IllegalArgumentException("Bad bloom filter format version " + version); } } /** - * @return true if general Bloom (Row or RowCol) filters are enabled in the - * given configuration + * @return true if general Bloom (Row or RowCol) filters are enabled in the given configuration */ public static boolean isGeneralBloomEnabled(Configuration conf) { return conf.getBoolean(IO_STOREFILE_BLOOM_ENABLED, true); @@ -145,8 +130,8 @@ public static int getBloomBlockSize(Configuration conf) { } /** - * @return max key for the Bloom filter from the configuration - */ + * @return max key for the Bloom filter from the configuration + */ public static int getMaxKeys(Configuration conf) { return conf.getInt(IO_STOREFILE_BLOOM_MAX_KEYS, 128 * 1000 * 1000); } @@ -154,22 +139,19 @@ public static int getMaxKeys(Configuration conf) { /** * Creates a new general (Row or RowCol) Bloom filter at the time of * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. - * * @param conf * @param cacheConf * @param bloomType - * @param maxKeys an estimate of the number of keys we expect to insert. - * Irrelevant if compound Bloom filters are enabled. + * @param maxKeys an estimate of the number of keys we expect to insert. Irrelevant if compound + * Bloom filters are enabled. * @param writer the HFile writer - * @return the new Bloom filter, or null in case Bloom filters are disabled - * or when failed to create one. + * @return the new Bloom filter, or null in case Bloom filters are disabled or when failed to + * create one. */ public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf, - CacheConfig cacheConf, BloomType bloomType, int maxKeys, - HFile.Writer writer) { + CacheConfig cacheConf, BloomType bloomType, int maxKeys, HFile.Writer writer) { if (!isGeneralBloomEnabled(conf)) { - LOG.trace("Bloom filters are disabled by configuration for " - + writer.getPath() + LOG.trace("Bloom filters are disabled by configuration for " + writer.getPath() + (conf == null ? " (configuration is null)" : "")); return null; } else if (bloomType == BloomType.NONE) { @@ -187,8 +169,7 @@ public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf, err = (float) (1 - Math.sqrt(1 - err)); } - int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD, - MAX_ALLOWED_FOLD_FACTOR); + int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD, MAX_ALLOWED_FOLD_FACTOR); // Do we support compound bloom filters? // In case of compound Bloom filters we ignore the maxKeys hint. @@ -204,17 +185,16 @@ public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf, * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing. * @param conf * @param cacheConf - * @param maxKeys an estimate of the number of keys we expect to insert. - * Irrelevant if compound Bloom filters are enabled. + * @param maxKeys an estimate of the number of keys we expect to insert. Irrelevant if compound + * Bloom filters are enabled. * @param writer the HFile writer - * @return the new Bloom filter, or null in case Bloom filters are disabled - * or when failed to create one. + * @return the new Bloom filter, or null in case Bloom filters are disabled or when failed to + * create one. */ public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf, CacheConfig cacheConf, int maxKeys, HFile.Writer writer) { if (!isDeleteFamilyBloomEnabled(conf)) { - LOG.info("Delete Bloom filters are disabled by configuration for " - + writer.getPath() + LOG.info("Delete Bloom filters are disabled by configuration for " + writer.getPath() + (conf == null ? " (configuration is null)" : "")); return null; } @@ -223,9 +203,9 @@ public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf, int maxFold = getMaxFold(conf); // In case of compound Bloom filters we ignore the maxKeys hint. - CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf), - err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(), - null, BloomType.ROW); + CompoundBloomFilterWriter bloomWriter = + new CompoundBloomFilterWriter(getBloomBlockSize(conf), err, Hash.getHashType(conf), maxFold, + cacheConf.shouldCacheBloomsOnWrite(), null, BloomType.ROW); writer.addInlineBlockWriter(bloomWriter); return bloomWriter; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java index c7afb0e5f915..3b66876af562 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterUtil.java @@ -21,7 +21,6 @@ import java.text.NumberFormat; import java.util.Random; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -38,30 +37,21 @@ public final class BloomFilterUtil { /** Record separator for the Bloom filter statistics human-readable string */ public static final String STATS_RECORD_SEP = "; "; /** - * Used in computing the optimal Bloom filter size. This approximately equals - * 0.480453. + * Used in computing the optimal Bloom filter size. This approximately equals 0.480453. */ public static final double LOG2_SQUARED = Math.log(2) * Math.log(2); - + /** - * A random number generator to use for "fake lookups" when testing to - * estimate the ideal false positive rate. + * A random number generator to use for "fake lookups" when testing to estimate the ideal false + * positive rate. */ private static Random randomGeneratorForTest; public static final String PREFIX_LENGTH_KEY = "RowPrefixBloomFilter.prefix_length"; - + /** Bit-value lookup array to prevent doing the same work over and over */ - public static final byte [] bitvals = { - (byte) 0x01, - (byte) 0x02, - (byte) 0x04, - (byte) 0x08, - (byte) 0x10, - (byte) 0x20, - (byte) 0x40, - (byte) 0x80 - }; + public static final byte[] bitvals = { (byte) 0x01, (byte) 0x02, (byte) 0x04, (byte) 0x08, + (byte) 0x10, (byte) 0x20, (byte) 0x40, (byte) 0x80 }; /** * Private constructor to keep this class from being instantiated. @@ -72,10 +62,9 @@ private BloomFilterUtil() { /** * @param maxKeys * @param errorRate - * @return the number of bits for a Bloom filter than can hold the given - * number of keys and provide the given error rate, assuming that the - * optimal number of hash functions is used and it does not have to - * be an integer. + * @return the number of bits for a Bloom filter than can hold the given number of keys and + * provide the given error rate, assuming that the optimal number of hash functions is + * used and it does not have to be an integer. */ public static long computeBitSize(long maxKeys, double errorRate) { return (long) Math.ceil(maxKeys * (-Math.log(errorRate) / LOG2_SQUARED)); @@ -86,7 +75,7 @@ public static long computeBitSize(long maxKeys, double errorRate) { * simulate uniformity of accesses better in a test environment. Should not be set in a real * environment where correctness matters! *

          - * This gets used in {@link #contains(ByteBuff, int, int, Hash, int, HashKey)} + * This gets used in {@link #contains(ByteBuff, int, int, Hash, int, HashKey)} * @param random The random number source to use, or null to compute actual hashes */ public static void setRandomGeneratorForTest(Random random) { @@ -94,11 +83,9 @@ public static void setRandomGeneratorForTest(Random random) { } /** - * The maximum number of keys we can put into a Bloom filter of a certain - * size to maintain the given error rate, assuming the number of hash - * functions is chosen optimally and does not even have to be an integer - * (hence the "ideal" in the function name). - * + * The maximum number of keys we can put into a Bloom filter of a certain size to maintain the + * given error rate, assuming the number of hash functions is chosen optimally and does not even + * have to be an integer (hence the "ideal" in the function name). * @param bitSize * @param errorRate * @return maximum number of keys that can be inserted into the Bloom filter @@ -111,44 +98,36 @@ public static long idealMaxKeys(long bitSize, double errorRate) { } /** - * The maximum number of keys we can put into a Bloom filter of a certain - * size to get the given error rate, with the given number of hash functions. - * + * The maximum number of keys we can put into a Bloom filter of a certain size to get the given + * error rate, with the given number of hash functions. * @param bitSize * @param errorRate * @param hashCount - * @return the maximum number of keys that can be inserted in a Bloom filter - * to maintain the target error rate, if the number of hash functions - * is provided. + * @return the maximum number of keys that can be inserted in a Bloom filter to maintain the + * target error rate, if the number of hash functions is provided. */ - public static long computeMaxKeys(long bitSize, double errorRate, - int hashCount) { - return (long) (-bitSize * 1.0 / hashCount * - Math.log(1 - Math.exp(Math.log(errorRate) / hashCount))); + public static long computeMaxKeys(long bitSize, double errorRate, int hashCount) { + return (long) (-bitSize * 1.0 / hashCount + * Math.log(1 - Math.exp(Math.log(errorRate) / hashCount))); } /** - * Computes the actual error rate for the given number of elements, number - * of bits, and number of hash functions. Taken directly from the - * Wikipedia Bloom filter article. - * + * Computes the actual error rate for the given number of elements, number of bits, and number of + * hash functions. Taken directly from the + * Wikipedia + * Bloom filter article. * @param maxKeys * @param bitSize * @param functionCount * @return the actual error rate */ - public static double actualErrorRate(long maxKeys, long bitSize, - int functionCount) { - return Math.exp(Math.log(1 - Math.exp(-functionCount * maxKeys * 1.0 - / bitSize)) * functionCount); + public static double actualErrorRate(long maxKeys, long bitSize, int functionCount) { + return Math + .exp(Math.log(1 - Math.exp(-functionCount * maxKeys * 1.0 / bitSize)) * functionCount); } /** - * Increases the given byte size of a Bloom filter until it can be folded by - * the given factor. - * + * Increases the given byte size of a Bloom filter until it can be folded by the given factor. * @param bitSize * @param foldFactor * @return Foldable byte size @@ -162,8 +141,8 @@ public static int computeFoldableByteSize(long bitSize, int foldFactor) { byteSizeLong <<= foldFactor; } if (byteSizeLong > Integer.MAX_VALUE) { - throw new IllegalArgumentException("byteSize=" + byteSizeLong + " too " - + "large for bitSize=" + bitSize + ", foldFactor=" + foldFactor); + throw new IllegalArgumentException("byteSize=" + byteSizeLong + " too " + "large for bitSize=" + + bitSize + ", foldFactor=" + foldFactor); } return (int) byteSizeLong; } @@ -171,25 +150,24 @@ public static int computeFoldableByteSize(long bitSize, int foldFactor) { public static int optimalFunctionCount(int maxKeys, long bitSize) { long i = bitSize / maxKeys; double result = Math.ceil(Math.log(2) * i); - if (result > Integer.MAX_VALUE){ + if (result > Integer.MAX_VALUE) { throw new IllegalArgumentException("result too large for integer value."); } - return (int)result; + return (int) result; } - + /** * Creates a Bloom filter chunk of the given size. - * - * @param byteSizeHint the desired number of bytes for the Bloom filter bit - * array. Will be increased so that folding is possible. + * @param byteSizeHint the desired number of bytes for the Bloom filter bit array. Will be + * increased so that folding is possible. * @param errorRate target false positive rate of the Bloom filter * @param hashType Bloom filter hash function type * @param foldFactor * @param bloomType * @return the new Bloom filter of the desired size */ - public static BloomFilterChunk createBySize(int byteSizeHint, - double errorRate, int hashType, int foldFactor, BloomType bloomType) { + public static BloomFilterChunk createBySize(int byteSizeHint, double errorRate, int hashType, + int foldFactor, BloomType bloomType) { BloomFilterChunk bbf = new BloomFilterChunk(hashType, bloomType); bbf.byteSize = computeFoldableByteSize(byteSizeHint * 8L, foldFactor); @@ -205,9 +183,8 @@ public static BloomFilterChunk createBySize(int byteSizeHint, return bbf; } - public static boolean contains(byte[] buf, int offset, int length, - ByteBuff bloomBuf, int bloomOffset, int bloomSize, Hash hash, - int hashCount) { + public static boolean contains(byte[] buf, int offset, int length, ByteBuff bloomBuf, + int bloomOffset, int bloomSize, Hash hash, int hashCount) { HashKey hashKey = new ByteArrayHashKey(buf, offset, length); return contains(bloomBuf, bloomOffset, bloomSize, hash, hashCount, hashKey); } @@ -242,20 +219,19 @@ private static boolean contains(ByteBuff bloomBuf, int bloomOffset, int bloo public static boolean contains(Cell cell, ByteBuff bloomBuf, int bloomOffset, int bloomSize, Hash hash, int hashCount, BloomType type) { - HashKey hashKey = type == BloomType.ROWCOL ? new RowColBloomHashKey(cell) - : new RowBloomHashKey(cell); + HashKey hashKey = + type == BloomType.ROWCOL ? new RowColBloomHashKey(cell) : new RowBloomHashKey(cell); return contains(bloomBuf, bloomOffset, bloomSize, hash, hashCount, hashKey); } /** * Check if bit at specified index is 1. - * * @param pos index of bit * @return true if bit at specified index is 1, false if 0. */ - static boolean checkBit(int pos, ByteBuff bloomBuf, int bloomOffset) { - int bytePos = pos >> 3; //pos / 8 - int bitPos = pos & 0x7; //pos % 8 + static boolean checkBit(int pos, ByteBuff bloomBuf, int bloomOffset) { + int bytePos = pos >> 3; // pos / 8 + int bitPos = pos & 0x7; // pos % 8 byte curByte = bloomBuf.get(bloomOffset + bytePos); curByte &= bitvals[bitPos]; return (curByte != 0); @@ -263,10 +239,9 @@ static boolean checkBit(int pos, ByteBuff bloomBuf, int bloomOffset) { /** * A human-readable string with statistics for the given Bloom filter. - * * @param bloomFilter the Bloom filter to output statistics for; - * @return a string consisting of "<key>: <value>" parts - * separated by {@link #STATS_RECORD_SEP}. + * @return a string consisting of "<key>: <value>" parts separated by + * {@link #STATS_RECORD_SEP}. */ public static String formatStats(BloomFilterBase bloomFilter) { StringBuilder sb = new StringBuilder(); @@ -307,8 +282,8 @@ public static byte[] getBloomFilterParam(BloomType bloomFilterType, Configuratio throw new IllegalArgumentException(message); } } catch (NumberFormatException nfe) { - message = "Number format exception when parsing " + PREFIX_LENGTH_KEY + " for BloomType " + - bloomFilterType.toString() + ":" + prefixLengthString; + message = "Number format exception when parsing " + PREFIX_LENGTH_KEY + " for BloomType " + + bloomFilterType.toString() + ":" + prefixLengthString; throw new IllegalArgumentException(message, nfe); } bloomParam = Bytes.toBytes(prefixLength); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java index a68897801580..ec8390697aec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,35 +15,33 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.CellSink; import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.io.Writable; +import org.apache.yetus.audience.InterfaceAudience; /** - * Specifies methods needed to add elements to a Bloom filter and serialize the - * resulting Bloom filter as a sequence of bytes. + * Specifies methods needed to add elements to a Bloom filter and serialize the resulting Bloom + * filter as a sequence of bytes. */ @InterfaceAudience.Private public interface BloomFilterWriter extends BloomFilterBase, CellSink, ShipperListener { /** Compact the Bloom filter before writing metadata & data to disk. */ void compactBloom(); + /** * Get a writable interface into bloom filter meta data. - * * @return a writable instance that can be later written to a stream */ Writable getMetaWriter(); /** - * Get a writable interface into bloom filter data (the actual Bloom bits). - * Not used for compound Bloom filters. - * + * Get a writable interface into bloom filter data (the actual Bloom bits). Not used for compound + * Bloom filters. * @return a writable instance that can be later written to a stream */ Writable getDataWriter(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java index efff41e11c86..92345d225f93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java @@ -15,36 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.ReentrantLock; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.TimeUnit; +import java.util.AbstractQueue; import java.util.Collection; import java.util.Comparator; import java.util.Iterator; import java.util.Objects; -import java.util.AbstractQueue; - +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; - /** - * A generic bounded blocking Priority-Queue. - * - * The elements of the priority queue are ordered according to the Comparator - * provided at queue construction time. - * - * If multiple elements have the same priority this queue orders them in - * FIFO (first-in-first-out) manner. - * The head of this queue is the least element with respect to the specified - * ordering. If multiple elements are tied for least value, the head is the - * first one inserted. - * The queue retrieval operations poll, remove, peek, and element access the - * element at the head of the queue. + * A generic bounded blocking Priority-Queue. The elements of the priority queue are ordered + * according to the Comparator provided at queue construction time. If multiple elements have the + * same priority this queue orders them in FIFO (first-in-first-out) manner. The head of this queue + * is the least element with respect to the specified ordering. If multiple elements are tied for + * least value, the head is the first one inserted. The queue retrieval operations poll, remove, + * peek, and element access the element at the head of the queue. */ @InterfaceAudience.Private @InterfaceStability.Stable @@ -58,7 +49,7 @@ private static class PriorityQueue { @SuppressWarnings("unchecked") public PriorityQueue(int capacity, Comparator comparator) { - this.objects = (E[])new Object[capacity]; + this.objects = (E[]) new Object[capacity]; this.comparator = comparator; } @@ -133,7 +124,6 @@ private int upperBound(int start, int end, E key) { } } - // Lock used for all operations private final ReentrantLock lock = new ReentrantLock(); @@ -146,13 +136,12 @@ private int upperBound(int start, int end, E key) { private final PriorityQueue queue; /** - * Creates a PriorityQueue with the specified capacity that orders its - * elements according to the specified comparator. + * Creates a PriorityQueue with the specified capacity that orders its elements according to the + * specified comparator. * @param capacity the capacity of this queue * @param comparator the comparator that will be used to order this priority queue */ - public BoundedPriorityBlockingQueue(int capacity, - Comparator comparator) { + public BoundedPriorityBlockingQueue(int capacity, Comparator comparator) { this.queue = new PriorityQueue<>(capacity, comparator); } @@ -190,16 +179,14 @@ public void put(E e) throws InterruptedException { } @Override - public boolean offer(E e, long timeout, TimeUnit unit) - throws InterruptedException { + public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException { Objects.requireNonNull(e); long nanos = unit.toNanos(timeout); lock.lockInterruptibly(); try { while (queue.remainingCapacity() == 0) { - if (nanos <= 0) - return false; + if (nanos <= 0) return false; nanos = notFull.awaitNanos(nanos); } this.queue.add(e); @@ -242,8 +229,7 @@ public E poll() { } @Override - public E poll(long timeout, TimeUnit unit) - throws InterruptedException { + public E poll(long timeout, TimeUnit unit) throws InterruptedException { long nanos = unit.toNanos(timeout); lock.lockInterruptibly(); E result = null; @@ -323,10 +309,8 @@ public int drainTo(Collection c) { @Override public int drainTo(Collection c, int maxElements) { Objects.requireNonNull(c); - if (c == this) - throw new IllegalArgumentException(); - if (maxElements <= 0) - return 0; + if (c == this) throw new IllegalArgumentException(); + if (maxElements <= 0) return 0; lock.lock(); try { int n = Math.min(queue.size(), maxElements); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java index d69a8c7483c7..5465c24540a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,11 +22,10 @@ import java.util.Iterator; import java.util.List; import java.util.SortedSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility scanner that wraps a sortable collection and serves as a KeyValueScanner. @@ -43,8 +41,7 @@ public CollectionBackedScanner(SortedSet set) { this(set, CellComparator.getInstance()); } - public CollectionBackedScanner(SortedSet set, - CellComparator comparator) { + public CollectionBackedScanner(SortedSet set, CellComparator comparator) { this.comparator = comparator; data = set; init(); @@ -54,16 +51,14 @@ public CollectionBackedScanner(List list) { this(list, CellComparator.getInstance()); } - public CollectionBackedScanner(List list, - CellComparator comparator) { + public CollectionBackedScanner(List list, CellComparator comparator) { Collections.sort(list, comparator); this.comparator = comparator; data = list; init(); } - public CollectionBackedScanner(CellComparator comparator, - Cell... array) { + public CollectionBackedScanner(CellComparator comparator, Cell... array) { this.comparator = comparator; List tmp = new ArrayList<>(array.length); @@ -75,7 +70,7 @@ public CollectionBackedScanner(CellComparator comparator, private void init() { iter = data.iterator(); - if(iter.hasNext()){ + if (iter.hasNext()) { current = iter.next(); } } @@ -88,7 +83,7 @@ public Cell peek() { @Override public Cell next() { Cell oldCurrent = current; - if(iter.hasNext()){ + if (iter.hasNext()) { current = iter.next(); } else { current = null; @@ -105,10 +100,10 @@ public boolean seek(Cell seekCell) { @Override public boolean reseek(Cell seekCell) { - while(iter.hasNext()){ + while (iter.hasNext()) { Cell next = iter.next(); int ret = comparator.compare(next, seekCell); - if(ret >= 0){ + if (ret >= 0) { current = next; return true; } @@ -116,7 +111,6 @@ public boolean reseek(Cell seekCell) { return false; } - @Override public void close() { // do nothing diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index f0549c3d633c..fb792daa24b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,36 +19,35 @@ import java.io.IOException; import java.util.Locale; - import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hbase.CellBuilderType; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.io.compress.Compressor; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Compression validation test. Checks compression is working. Be sure to run - * on every node in your cluster. + * Compression validation test. Checks compression is working. Be sure to run on every node in your + * cluster. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving @@ -77,23 +75,22 @@ public static boolean testCompression(String codec) { } } - private final static Boolean[] compressionTestResults - = new Boolean[Compression.Algorithm.values().length]; + private final static Boolean[] compressionTestResults = + new Boolean[Compression.Algorithm.values().length]; static { - for (int i = 0 ; i < compressionTestResults.length ; ++i) { + for (int i = 0; i < compressionTestResults.length; ++i) { compressionTestResults[i] = null; } } - public static void testCompression(Compression.Algorithm algo) - throws IOException { + public static void testCompression(Compression.Algorithm algo) throws IOException { if (compressionTestResults[algo.ordinal()] != null) { if (compressionTestResults[algo.ordinal()]) { - return ; // already passed test, dont do it again. + return; // already passed test, dont do it again. } else { // failed. - throw new DoNotRetryIOException("Compression algorithm '" + algo.getName() + "'" + - " previously failed test."); + throw new DoNotRetryIOException( + "Compression algorithm '" + algo.getName() + "'" + " previously failed test."); } } @@ -111,34 +108,24 @@ public static void testCompression(Compression.Algorithm algo) public static void usage() { - System.err.println( - "Usage: CompressionTest " + - StringUtils.join( Compression.Algorithm.values(), "|").toLowerCase(Locale.ROOT) + - "\n" + - "For example:\n" + - " hbase " + CompressionTest.class + " file:///tmp/testfile gz\n"); + System.err.println("Usage: CompressionTest " + + StringUtils.join(Compression.Algorithm.values(), "|").toLowerCase(Locale.ROOT) + "\n" + + "For example:\n" + " hbase " + CompressionTest.class + " file:///tmp/testfile gz\n"); System.exit(1); } - public static void doSmokeTest(FileSystem fs, Path path, String codec) - throws Exception { + public static void doSmokeTest(FileSystem fs, Path path, String codec) throws Exception { Configuration conf = HBaseConfiguration.create(); - HFileContext context = new HFileContextBuilder() - .withCompression(HFileWriterImpl.compressionByName(codec)).build(); - HFile.Writer writer = HFile.getWriterFactoryNoCache(conf) - .withPath(fs, path) - .withFileContext(context) - .create(); + HFileContext context = + new HFileContextBuilder().withCompression(HFileWriterImpl.compressionByName(codec)).build(); + HFile.Writer writer = + HFile.getWriterFactoryNoCache(conf).withPath(fs, path).withFileContext(context).create(); // Write any-old Cell... - final byte [] rowKey = Bytes.toBytes("compressiontestkey"); - Cell c = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(rowKey) - .setFamily(HConstants.EMPTY_BYTE_ARRAY) - .setQualifier(HConstants.EMPTY_BYTE_ARRAY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(Bytes.toBytes("compressiontestval")) - .build(); + final byte[] rowKey = Bytes.toBytes("compressiontestkey"); + Cell c = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(rowKey) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(Bytes.toBytes("compressiontestval")).build(); writer.append(c); writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval")); writer.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java index cdc926fa709f..b9bacf1d9d33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConfigurationUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,18 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.util.StringUtils; - import java.util.AbstractMap; import java.util.Collection; import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** * Utilities for storing more complex collection types in @@ -34,7 +33,7 @@ */ @InterfaceAudience.Public public final class ConfigurationUtil { - // TODO: hopefully this is a good delimiter; it's not in the base64 alphabet, + // TODO: hopefully this is a good delimiter; it's not in the base64 alphabet, // nor is it valid for paths public static final char KVP_DELIMITER = '^'; @@ -44,11 +43,10 @@ private ConfigurationUtil() { } /** - * Store a collection of Map.Entry's in conf, with each entry separated by ',' - * and key values delimited by {@link #KVP_DELIMITER} - * - * @param conf configuration to store the collection in - * @param key overall key to store keyValues under + * Store a collection of Map.Entry's in conf, with each entry separated by ',' and key values + * delimited by {@link #KVP_DELIMITER} + * @param conf configuration to store the collection in + * @param key overall key to store keyValues under * @param keyValues kvps to be stored under key in conf */ public static void setKeyValues(Configuration conf, String key, @@ -57,11 +55,10 @@ public static void setKeyValues(Configuration conf, String key, } /** - * Store a collection of Map.Entry's in conf, with each entry separated by ',' - * and key values delimited by delimiter. - * - * @param conf configuration to store the collection in - * @param key overall key to store keyValues under + * Store a collection of Map.Entry's in conf, with each entry separated by ',' and key values + * delimited by delimiter. + * @param conf configuration to store the collection in + * @param key overall key to store keyValues under * @param keyValues kvps to be stored under key in conf * @param delimiter character used to separate each kvp */ @@ -78,9 +75,8 @@ public static void setKeyValues(Configuration conf, String key, /** * Retrieve a list of key value pairs from configuration, stored under the provided key - * * @param conf configuration to retrieve kvps from - * @param key key under which the key values are stored + * @param key key under which the key values are stored * @return the list of kvps stored under key in conf, or null if the key isn't present. * @see #setKeyValues(Configuration, String, Collection, char) */ @@ -90,9 +86,8 @@ public static List> getKeyValues(Configuration conf, S /** * Retrieve a list of key value pairs from configuration, stored under the provided key - * - * @param conf configuration to retrieve kvps from - * @param key key under which the key values are stored + * @param conf configuration to retrieve kvps from + * @param key key under which the key values are stored * @param delimiter character used to separate each kvp * @return the list of kvps stored under key in conf, or null if the key isn't present. * @see #setKeyValues(Configuration, String, Collection, char) @@ -111,9 +106,8 @@ public static List> getKeyValues(Configuration conf, S String[] splitKvp = StringUtils.split(kvp, delimiter); if (splitKvp.length != 2) { - throw new IllegalArgumentException( - "Expected key value pair for configuration key '" + key + "'" + " to be of form '" - + delimiter + "; was " + kvp + " instead"); + throw new IllegalArgumentException("Expected key value pair for configuration key '" + key + + "'" + " to be of form '" + delimiter + "; was " + kvp + " instead"); } rtn.add(new AbstractMap.SimpleImmutableEntry<>(splitKvp[0], splitKvp[1])); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java index 4559d783729c..1e7e1f5b5e2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,15 +21,11 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.Lock; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -39,12 +34,14 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A utility to store user specific HConnections in memory. - * There is a chore to clean up connections idle for too long. - * This class is used by REST server and Thrift server to - * support authentication and impersonation. + * A utility to store user specific HConnections in memory. There is a chore to clean up connections + * idle for too long. This class is used by REST server and Thrift server to support authentication + * and impersonation. */ @InterfaceAudience.Private public class ConnectionCache { @@ -58,27 +55,33 @@ public class ConnectionCache { private final Configuration conf; private final ChoreService choreService; - private final ThreadLocal effectiveUserNames = - new ThreadLocal() { + private final ThreadLocal effectiveUserNames = new ThreadLocal() { @Override protected String initialValue() { return realUserName; } }; - public ConnectionCache(final Configuration conf, - final UserProvider userProvider, + public ConnectionCache(final Configuration conf, final UserProvider userProvider, final int cleanInterval, final int maxIdleTime) throws IOException { Stoppable stoppable = new Stoppable() { private volatile boolean isStopped = false; - @Override public void stop(String why) { isStopped = true;} - @Override public boolean isStopped() {return isStopped;} + + @Override + public void stop(String why) { + isStopped = true; + } + + @Override + public boolean isStopped() { + return isStopped; + } }; this.choreService = new ChoreService("ConnectionCache"); ScheduledChore cleaner = new ScheduledChore("ConnectionCleaner", stoppable, cleanInterval) { @Override protected void chore() { - for (Map.Entry entry: connections.entrySet()) { + for (Map.Entry entry : connections.entrySet()) { ConnectionInfo connInfo = entry.getValue(); if (connInfo.timedOut(maxIdleTime)) { if (connInfo.admin != null) { @@ -127,8 +130,7 @@ public void shutdown() { } /** - * Caller doesn't close the admin afterwards. - * We need to manage it and close it properly. + * Caller doesn't close the admin afterwards. We need to manage it and close it properly. */ public Admin getAdmin() throws IOException { ConnectionInfo connInfo = getCurrentConnection(); @@ -161,8 +163,7 @@ public RegionLocator getRegionLocator(byte[] tableName) throws IOException { } /** - * Get the cached connection for the current user. - * If none or timed out, create a new one. + * Get the cached connection for the current user. If none or timed out, create a new one. */ ConnectionInfo getCurrentConnection() throws IOException { String userName = getEffectiveUser(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CoprocessorConfigurationUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CoprocessorConfigurationUtil.java index 6c0415462507..9b284006e331 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CoprocessorConfigurationUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CoprocessorConfigurationUtil.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -33,7 +33,7 @@ private CoprocessorConfigurationUtil() { } public static boolean checkConfigurationChange(Configuration oldConfig, Configuration newConfig, - String... configurationKey) { + String... configurationKey) { Preconditions.checkArgument(configurationKey != null, "Configuration Key(s) must be provided"); boolean isConfigurationChange = false; for (String key : configurationKey) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java index aba421d8078d..0298f3da0271 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.lang.management.ManagementFactory; @@ -26,22 +24,20 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.Locale; - import javax.management.JMException; import javax.management.MBeanServer; import javax.management.MalformedObjectNameException; import javax.management.ObjectName; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocatorMetric; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufAllocatorMetricProvider; import org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator; - /** * Utilities for interacting with and monitoring DirectByteBuffer allocations. */ @@ -82,17 +78,16 @@ public class DirectMemoryUtils { } /** - * @return the setting of -XX:MaxDirectMemorySize as a long. Returns 0 if - * -XX:MaxDirectMemorySize is not set. + * @return the setting of -XX:MaxDirectMemorySize as a long. Returns 0 if -XX:MaxDirectMemorySize + * is not set. */ public static long getDirectMemorySize() { RuntimeMXBean runtimemxBean = ManagementFactory.getRuntimeMXBean(); List arguments = runtimemxBean.getInputArguments(); - long multiplier = 1; //for the byte case. + long multiplier = 1; // for the byte case. for (String s : arguments) { if (s.contains("-XX:MaxDirectMemorySize=")) { - String memSize = s.toLowerCase(Locale.ROOT) - .replace("-xx:maxdirectmemorysize=", "").trim(); + String memSize = s.toLowerCase(Locale.ROOT).replace("-xx:maxdirectmemorysize=", "").trim(); if (memSize.contains("k")) { multiplier = 1024; @@ -133,29 +128,24 @@ public static long getDirectMemoryUsage() { */ public static long getNettyDirectMemoryUsage() { - ByteBufAllocatorMetric metric = ((ByteBufAllocatorMetricProvider) - PooledByteBufAllocator.DEFAULT).metric(); + ByteBufAllocatorMetric metric = + ((ByteBufAllocatorMetricProvider) PooledByteBufAllocator.DEFAULT).metric(); return metric.usedDirectMemory(); } /** - * DirectByteBuffers are garbage collected by using a phantom reference and a - * reference queue. Every once a while, the JVM checks the reference queue and - * cleans the DirectByteBuffers. However, as this doesn't happen - * immediately after discarding all references to a DirectByteBuffer, it's - * easy to OutOfMemoryError yourself using DirectByteBuffers. This function + * DirectByteBuffers are garbage collected by using a phantom reference and a reference queue. + * Every once a while, the JVM checks the reference queue and cleans the DirectByteBuffers. + * However, as this doesn't happen immediately after discarding all references to a + * DirectByteBuffer, it's easy to OutOfMemoryError yourself using DirectByteBuffers. This function * explicitly calls the Cleaner method of a DirectByteBuffer. - * - * @param toBeDestroyed - * The DirectByteBuffer that will be "cleaned". Utilizes reflection. - * + * @param toBeDestroyed The DirectByteBuffer that will be "cleaned". Utilizes reflection. */ public static void destroyDirectByteBuffer(ByteBuffer toBeDestroyed) - throws IllegalArgumentException, IllegalAccessException, - InvocationTargetException, SecurityException, NoSuchMethodException { + throws IllegalArgumentException, IllegalAccessException, InvocationTargetException, + SecurityException, NoSuchMethodException { - Preconditions.checkArgument(toBeDestroyed.isDirect(), - "toBeDestroyed isn't direct!"); + Preconditions.checkArgument(toBeDestroyed.isDirect(), "toBeDestroyed isn't direct!"); Method cleanerMethod = toBeDestroyed.getClass().getMethod("cleaner"); cleanerMethod.setAccessible(true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java index cee3b56d6f6f..d84f2387aa95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,17 +22,16 @@ import java.io.IOException; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.KeyStoreKeyProvider; import org.apache.hadoop.hbase.security.EncryptionUtil; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class EncryptionTest { @@ -47,15 +45,13 @@ private EncryptionTest() { } /** - * Check that the configured key provider can be loaded and initialized, or - * throw an exception. - * + * Check that the configured key provider can be loaded and initialized, or throw an exception. * @param conf * @throws IOException */ public static void testKeyProvider(final Configuration conf) throws IOException { - String providerClassName = conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, - KeyStoreKeyProvider.class.getName()); + String providerClassName = + conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName()); Boolean result = keyProviderResults.get(providerClassName); if (result == null) { try { @@ -63,8 +59,8 @@ public static void testKeyProvider(final Configuration conf) throws IOException keyProviderResults.put(providerClassName, true); } catch (Exception e) { // most likely a RuntimeException keyProviderResults.put(providerClassName, false); - throw new IOException("Key provider " + providerClassName + " failed test: " + - e.getMessage(), e); + throw new IOException( + "Key provider " + providerClassName + " failed test: " + e.getMessage(), e); } } else if (!result) { throw new IOException("Key provider " + providerClassName + " previously failed test"); @@ -72,15 +68,13 @@ public static void testKeyProvider(final Configuration conf) throws IOException } /** - * Check that the configured cipher provider can be loaded and initialized, or - * throw an exception. - * + * Check that the configured cipher provider can be loaded and initialized, or throw an exception. * @param conf * @throws IOException */ public static void testCipherProvider(final Configuration conf) throws IOException { - String providerClassName = conf.get(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, - DefaultCipherProvider.class.getName()); + String providerClassName = + conf.get(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, DefaultCipherProvider.class.getName()); Boolean result = cipherProviderResults.get(providerClassName); if (result == null) { try { @@ -88,8 +82,8 @@ public static void testCipherProvider(final Configuration conf) throws IOExcepti cipherProviderResults.put(providerClassName, true); } catch (Exception e) { // most likely a RuntimeException cipherProviderResults.put(providerClassName, false); - throw new IOException("Cipher provider " + providerClassName + " failed test: " + - e.getMessage(), e); + throw new IOException( + "Cipher provider " + providerClassName + " failed test: " + e.getMessage(), e); } } else if (!result) { throw new IOException("Cipher provider " + providerClassName + " previously failed test"); @@ -97,23 +91,22 @@ public static void testCipherProvider(final Configuration conf) throws IOExcepti } /** - * Check that the specified cipher can be loaded and initialized, or throw - * an exception. Verifies key and cipher provider configuration as a - * prerequisite for cipher verification. Also verifies if encryption is enabled globally. - * + * Check that the specified cipher can be loaded and initialized, or throw an exception. Verifies + * key and cipher provider configuration as a prerequisite for cipher verification. Also verifies + * if encryption is enabled globally. * @param conf HBase configuration * @param cipher chiper algorith to use for the column family * @param key encryption key * @throws IOException in case of encryption configuration error */ - public static void testEncryption(final Configuration conf, final String cipher, - byte[] key) throws IOException { + public static void testEncryption(final Configuration conf, final String cipher, byte[] key) + throws IOException { if (cipher == null) { return; } - if(!Encryption.isEncryptionEnabled(conf)) { - String message = String.format("Cipher %s failed test: encryption is disabled on the cluster", - cipher); + if (!Encryption.isEncryptionEnabled(conf)) { + String message = + String.format("Cipher %s failed test: encryption is disabled on the cluster", cipher); throw new IOException(message); } testKeyProvider(conf); @@ -129,8 +122,7 @@ public static void testEncryption(final Configuration conf, final String cipher, } else { // This will be a wrapped key from schema context.setKey(EncryptionUtil.unwrapKey(conf, - conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"), - key)); + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"), key)); } byte[] iv = null; if (context.getCipher().getIvLength() > 0) { @@ -143,8 +135,8 @@ public static void testEncryption(final Configuration conf, final String cipher, Encryption.encrypt(out, new ByteArrayInputStream(plaintext), context, iv); byte[] ciphertext = out.toByteArray(); out.reset(); - Encryption.decrypt(out, new ByteArrayInputStream(ciphertext), plaintext.length, - context, iv); + Encryption.decrypt(out, new ByteArrayInputStream(ciphertext), plaintext.length, context, + iv); byte[] test = out.toByteArray(); if (!Bytes.equals(plaintext, test)) { throw new IOException("Did not pass encrypt/decrypt test"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java index 04a33846871b..1bb5a3f1fe50 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,22 +20,19 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; - import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Thread that walks over the filesystem, and computes the mappings - * Region -> BestHost and Region -> {@code Map} - * + * Thread that walks over the filesystem, and computes the mappings Region -> BestHost and Region -> + * {@code Map} */ @InterfaceAudience.Private class FSRegionScanner implements Runnable { @@ -52,17 +48,16 @@ class FSRegionScanner implements Runnable { /** * Maps each region to the RS with highest locality for that region. */ - private final Map regionToBestLocalityRSMapping; + private final Map regionToBestLocalityRSMapping; /** - * Maps region encoded names to maps of hostnames to fractional locality of - * that region on that host. + * Maps region encoded names to maps of hostnames to fractional locality of that region on that + * host. */ private Map> regionDegreeLocalityMapping; - FSRegionScanner(FileSystem fs, Path regionPath, - Map regionToBestLocalityRSMapping, - Map> regionDegreeLocalityMapping) { + FSRegionScanner(FileSystem fs, Path regionPath, Map regionToBestLocalityRSMapping, + Map> regionDegreeLocalityMapping) { this.fs = fs; this.regionPath = regionPath; this.regionToBestLocalityRSMapping = regionToBestLocalityRSMapping; @@ -75,7 +70,7 @@ public void run() { // empty the map for each region Map blockCountMap = new HashMap<>(); - //get table name + // get table name String tableName = regionPath.getParent().getName(); int totalBlkCount = 0; @@ -98,15 +93,14 @@ public void run() { } for (FileStatus storeFile : storeFileLists) { - BlockLocation[] blkLocations = - fs.getFileBlockLocations(storeFile, 0, storeFile.getLen()); + BlockLocation[] blkLocations = fs.getFileBlockLocations(storeFile, 0, storeFile.getLen()); if (null == blkLocations) { continue; } totalBlkCount += blkLocations.length; - for(BlockLocation blk: blkLocations) { - for (String host: blk.getHosts()) { + for (BlockLocation blk : blkLocations) { + for (String host : blk.getHosts()) { AtomicInteger count = blockCountMap.get(host); if (count == null) { count = new AtomicInteger(0); @@ -137,11 +131,11 @@ public void run() { } if (hostToRun.endsWith(".")) { - hostToRun = hostToRun.substring(0, hostToRun.length()-1); + hostToRun = hostToRun.substring(0, hostToRun.length() - 1); } String name = tableName + ":" + regionPath.getName(); synchronized (regionToBestLocalityRSMapping) { - regionToBestLocalityRSMapping.put(name, hostToRun); + regionToBestLocalityRSMapping.put(name, hostToRun); } } @@ -153,7 +147,7 @@ public void run() { host = host.substring(0, host.length() - 1); } // Locality is fraction of blocks local to this host. - float locality = ((float)entry.getValue().get()) / totalBlkCount; + float locality = ((float) entry.getValue().get()) / totalBlkCount; hostLocalityMap.put(host, locality); } // Put the locality map into the result map, keyed by the encoded name diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 7ac09a97adfb..8bd1f05e9dc8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -57,22 +57,19 @@ import org.apache.hbase.thirdparty.com.google.common.primitives.Ints; /** - * Implementation of {@link TableDescriptors} that reads descriptors from the - * passed filesystem. It expects descriptors to be in a file in the - * {@link #TABLEINFO_DIR} subdir of the table's directory in FS. Can be read-only - * -- i.e. does not modify the filesystem or can be read and write. - * - *

          Also has utility for keeping up the table descriptors tableinfo file. - * The table schema file is kept in the {@link #TABLEINFO_DIR} subdir - * of the table directory in the filesystem. - * It has a {@link #TABLEINFO_FILE_PREFIX} and then a suffix that is the - * edit sequenceid: e.g. .tableinfo.0000000003. This sequenceid - * is always increasing. It starts at zero. The table schema file with the - * highest sequenceid has the most recent schema edit. Usually there is one file - * only, the most recent but there may be short periods where there are more - * than one file. Old files are eventually cleaned. Presumption is that there - * will not be lots of concurrent clients making table schema edits. If so, - * the below needs a bit of a reworking and perhaps some supporting api in hdfs. + * Implementation of {@link TableDescriptors} that reads descriptors from the passed filesystem. It + * expects descriptors to be in a file in the {@link #TABLEINFO_DIR} subdir of the table's directory + * in FS. Can be read-only -- i.e. does not modify the filesystem or can be read and write. + *

          + * Also has utility for keeping up the table descriptors tableinfo file. The table schema file is + * kept in the {@link #TABLEINFO_DIR} subdir of the table directory in the filesystem. It has a + * {@link #TABLEINFO_FILE_PREFIX} and then a suffix that is the edit sequenceid: e.g. + * .tableinfo.0000000003. This sequenceid is always increasing. It starts at zero. The + * table schema file with the highest sequenceid has the most recent schema edit. Usually there is + * one file only, the most recent but there may be short periods where there are more than one file. + * Old files are eventually cleaned. Presumption is that there will not be lots of concurrent + * clients making table schema edits. If so, the below needs a bit of a reworking and perhaps some + * supporting api in hdfs. */ @InterfaceAudience.Private public class FSTableDescriptors implements TableDescriptors { @@ -92,7 +89,7 @@ public class FSTableDescriptors implements TableDescriptors { static final String TABLEINFO_FILE_PREFIX = ".tableinfo"; public static final String TABLEINFO_DIR = ".tabledesc"; - // This cache does not age out the old stuff. Thinking is that the amount + // This cache does not age out the old stuff. Thinking is that the amount // of data we keep up in here is so small, no need to do occasional purge. // TODO. private final Map cache = new ConcurrentHashMap<>(); @@ -123,7 +120,7 @@ public static void tryUpdateMetaTableDescriptor(Configuration conf) throws IOExc } public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration conf, - FileSystem fs, Path rootdir) throws IOException { + FileSystem fs, Path rootdir) throws IOException { // see if we already have meta descriptor on fs. Write one if not. Optional> opt = getTableDescriptorFromFs(fs, CommonFSUtils.getTableDir(rootdir, TableName.META_TABLE_NAME), false); @@ -143,63 +140,53 @@ public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration c return td; } - public static ColumnFamilyDescriptor getTableFamilyDescForMeta( - final Configuration conf) { - return ColumnFamilyDescriptorBuilder - .newBuilder(HConstants.TABLE_FAMILY) - .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, - HConstants.DEFAULT_HBASE_META_VERSIONS)) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL) - .build(); + public static ColumnFamilyDescriptor getTableFamilyDescForMeta(final Configuration conf) { + return ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY) + .setMaxVersions( + conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) + .setInMemory(true).setBlocksize(8 * 1024).setScope(HConstants.REPLICATION_SCOPE_LOCAL) + .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) + .setBloomFilterType(BloomType.ROWCOL).build(); } public static ColumnFamilyDescriptor getReplBarrierFamilyDescForMeta() { - return ColumnFamilyDescriptorBuilder - .newBuilder(HConstants.REPLICATION_BARRIER_FAMILY) - .setMaxVersions(HConstants.ALL_VERSIONS) - .setInMemory(true) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL) - .build(); + return ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_BARRIER_FAMILY) + .setMaxVersions(HConstants.ALL_VERSIONS).setInMemory(true) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) + .setBloomFilterType(BloomType.ROWCOL).build(); } private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) - throws IOException { + throws IOException { // TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now // the META table data goes to File mode BC only. Test how that affect the system. If too much, // we have to rethink about adding back the setCacheDataInL1 for META table CFs. - return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY) - .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, - HConstants.DEFAULT_HBASE_META_VERSIONS)) - .setInMemory(true) - .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, - HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setBloomFilterType(BloomType.ROWCOL) - .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .build()) - .setColumnFamily(getTableFamilyDescForMeta(conf)) - .setColumnFamily(getReplBarrierFamilyDescForMeta()) - .setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(HConstants.NAMESPACE_FAMILY) - .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, - HConstants.DEFAULT_HBASE_META_VERSIONS)) - .setInMemory(true) - .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, - HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) - .setBloomFilterType(BloomType.ROWCOL) - .build()) - .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder( - MultiRowMutationEndpoint.class.getName()) - .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); + return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY) + .setMaxVersions( + conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) + .setInMemory(true) + .setBlocksize( + conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.ROWCOL) + .setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) + .build()) + .setColumnFamily(getTableFamilyDescForMeta(conf)) + .setColumnFamily(getReplBarrierFamilyDescForMeta()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.NAMESPACE_FAMILY) + .setMaxVersions( + conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) + .setInMemory(true) + .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, + HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + .setDataBlockEncoding( + org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1) + .setBloomFilterType(BloomType.ROWCOL).build()) + .setCoprocessor( + CoprocessorDescriptorBuilder.newBuilder(MultiRowMutationEndpoint.class.getName()) + .setPriority(Coprocessor.PRIORITY_SYSTEM).build()); } protected boolean isUsecache() { @@ -232,7 +219,7 @@ public TableDescriptor get(TableName tableName) { TableDescriptor tdmt = null; try { tdmt = getTableDescriptorFromFs(fs, getTableDir(tableName), fsreadonly).map(Pair::getSecond) - .orElse(null); + .orElse(null); } catch (IOException ioe) { LOG.debug("Exception during readTableDecriptor. Current table name = " + tableName, ioe); } @@ -251,7 +238,7 @@ public TableDescriptor get(TableName tableName) { public Map getAll() throws IOException { Map tds = new TreeMap<>(); if (fsvisited) { - for (Map.Entry entry: this.cache.entrySet()) { + for (Map.Entry entry : this.cache.entrySet()) { tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue()); } } else { @@ -271,14 +258,14 @@ public Map getAll() throws IOException { } /** - * Find descriptors by namespace. - * @see #get(org.apache.hadoop.hbase.TableName) - */ + * Find descriptors by namespace. + * @see #get(org.apache.hadoop.hbase.TableName) + */ @Override public Map getByNamespace(String name) throws IOException { Map htds = new TreeMap<>(); List tableDirs = - FSUtils.getLocalTableDirs(fs, CommonFSUtils.getNamespaceDir(rootdir, name)); + FSUtils.getLocalTableDirs(fs, CommonFSUtils.getNamespaceDir(rootdir, name)); for (Path d : tableDirs) { TableDescriptor htd = get(CommonFSUtils.getTableName(d)); if (htd == null) { @@ -308,7 +295,7 @@ public void update(TableDescriptor td, boolean cacheOnly) throws IOException { } @RestrictedApi(explanation = "Should only be called in tests or self", link = "", - allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") + allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") Path updateTableDescriptor(TableDescriptor td) throws IOException { TableName tableName = td.getTableName(); Path tableDir = getTableDir(tableName); @@ -322,9 +309,8 @@ Path updateTableDescriptor(TableDescriptor td) throws IOException { } /** - * Removes the table descriptor from the local cache and returns it. - * If not in read only mode, it also deletes the entire table directory(!) - * from the FileSystem. + * Removes the table descriptor from the local cache and returns it. If not in read only mode, it + * also deletes the entire table directory(!) from the FileSystem. */ @Override public TableDescriptor remove(final TableName tablename) throws IOException { @@ -352,12 +338,12 @@ public static boolean isTableDir(FileSystem fs, Path tableDir) throws IOExceptio * Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in reverse order. */ static final Comparator TABLEINFO_FILESTATUS_COMPARATOR = - new Comparator() { - @Override - public int compare(FileStatus left, FileStatus right) { - return right.getPath().getName().compareTo(left.getPath().getName()); - } - }; + new Comparator() { + @Override + public int compare(FileStatus left, FileStatus right) { + return right.getPath().getName().compareTo(left.getPath().getName()); + } + }; /** * Return the table directory in HDFS @@ -371,7 +357,8 @@ private Path getTableDir(TableName tableName) { public boolean accept(Path p) { // Accept any file that starts with TABLEINFO_NAME return p.getName().startsWith(TABLEINFO_FILE_PREFIX); - }}; + } + }; /** * Width of the sequenceid that is a suffix on a tableinfo file. @@ -384,10 +371,10 @@ public boolean accept(Path p) { * negative). */ private static String formatTableInfoSequenceId(final int number) { - byte [] b = new byte[WIDTH_OF_SEQUENCE_ID]; + byte[] b = new byte[WIDTH_OF_SEQUENCE_ID]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return Bytes.toString(b); @@ -410,7 +397,7 @@ static final class SequenceIdAndFileLength { * @param p Path to a .tableinfo file. */ @RestrictedApi(explanation = "Should only be called in tests or self", link = "", - allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") + allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") static SequenceIdAndFileLength getTableInfoSequenceIdAndFileLength(Path p) { String name = p.getName(); if (!name.startsWith(TABLEINFO_FILE_PREFIX)) { @@ -436,19 +423,18 @@ static SequenceIdAndFileLength getTableInfoSequenceIdAndFileLength(Path p) { * Returns Name of tableinfo file. */ @RestrictedApi(explanation = "Should only be called in tests or self", link = "", - allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") + allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java") static String getTableInfoFileName(int sequenceId, byte[] content) { - return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceId) + "." + - content.length; + return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceId) + "." + + content.length; } /** - * Returns the latest table descriptor for the given table directly from the file system - * if it exists, bypassing the local cache. - * Returns null if it's not found. + * Returns the latest table descriptor for the given table directly from the file system if it + * exists, bypassing the local cache. Returns null if it's not found. */ - public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, - Path hbaseRootDir, TableName tableName) throws IOException { + public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path hbaseRootDir, + TableName tableName) throws IOException { Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableName); return getTableDescriptorFromFs(fs, tableDir); } @@ -458,7 +444,7 @@ public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, * the file system if it exists. */ public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir) - throws IOException { + throws IOException { return getTableDescriptorFromFs(fs, tableDir, true).map(Pair::getSecond).orElse(null); } @@ -470,7 +456,7 @@ private static void deleteMalformedFile(FileSystem fs, Path file) throws IOExcep } private static Optional> getTableDescriptorFromFs(FileSystem fs, - Path tableDir, boolean readonly) throws IOException { + Path tableDir, boolean readonly) throws IOException { Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR); FileStatus[] descFiles = CommonFSUtils.listStatus(fs, tableInfoDir, TABLEINFO_PATHFILTER); if (descFiles == null || descFiles.length < 1) { @@ -521,7 +507,7 @@ private static Optional> getTableDescriptorFro } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") public static void deleteTableDescriptors(FileSystem fs, Path tableDir) throws IOException { Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR); deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE); @@ -532,7 +518,7 @@ public static void deleteTableDescriptors(FileSystem fs, Path tableDir) throws I * is at most the given max sequenceId. */ private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId) - throws IOException { + throws IOException { FileStatus[] status = CommonFSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER); for (FileStatus file : status) { Path path = file.getPath(); @@ -556,7 +542,7 @@ private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxS * @return Descriptor file or null if we failed write. */ private static Path writeTableDescriptor(final FileSystem fs, final TableDescriptor td, - final Path tableDir, final FileStatus currentDescriptorFile) throws IOException { + final Path tableDir, final FileStatus currentDescriptorFile) throws IOException { // Here we will write to the final directory directly to avoid renaming as on OSS renaming is // not atomic and has performance issue. The reason why we could do this is that, in the below // code we will not overwrite existing files, we will write a new file instead. And when @@ -566,15 +552,15 @@ private static Path writeTableDescriptor(final FileSystem fs, final TableDescrip // In proc v2 we have table lock so typically, there will be no concurrent writes. Keep the // retry logic here since we may still want to write the table descriptor from for example, // HBCK2? - int currentSequenceId = currentDescriptorFile == null ? 0 : - getTableInfoSequenceIdAndFileLength(currentDescriptorFile.getPath()).sequenceId; + int currentSequenceId = currentDescriptorFile == null ? 0 + : getTableInfoSequenceIdAndFileLength(currentDescriptorFile.getPath()).sequenceId; // Put arbitrary upperbound on how often we retry int maxAttempts = 10; int maxSequenceId = currentSequenceId + maxAttempts; byte[] bytes = TableDescriptorBuilder.toByteArray(td); for (int newSequenceId = - currentSequenceId + 1; newSequenceId <= maxSequenceId; newSequenceId++) { + currentSequenceId + 1; newSequenceId <= maxSequenceId; newSequenceId++) { String fileName = getTableInfoFileName(newSequenceId, bytes); Path filePath = new Path(tableInfoDir, fileName); try (FSDataOutputStream out = fs.create(filePath, false)) { @@ -593,8 +579,7 @@ private static Path writeTableDescriptor(final FileSystem fs, final TableDescrip } /** - * Create new TableDescriptor in HDFS. Happens when we are creating table. - * Used by tests. + * Create new TableDescriptor in HDFS. Happens when we are creating table. Used by tests. * @return True if we successfully created file. */ public boolean createTableDescriptor(TableDescriptor htd) throws IOException { @@ -602,21 +587,19 @@ public boolean createTableDescriptor(TableDescriptor htd) throws IOException { } /** - * Create new TableDescriptor in HDFS. Happens when we are creating table. If - * forceCreation is true then even if previous table descriptor is present it - * will be overwritten - * + * Create new TableDescriptor in HDFS. Happens when we are creating table. If forceCreation is + * true then even if previous table descriptor is present it will be overwritten * @return True if we successfully created file. */ public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation) - throws IOException { + throws IOException { Path tableDir = getTableDir(htd.getTableName()); return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation); } /** - * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create - * a new table during cluster start or in Clone and Create Table Procedures. Checks readOnly flag + * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create a + * new table during cluster start or in Clone and Create Table Procedures. Checks readOnly flag * passed on construction. * @param tableDir table directory under which we should write the file * @param htd description of the table to write @@ -635,8 +618,8 @@ public boolean createTableDescriptorForTableDirectory(Path tableDir, TableDescri } /** - * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create - * a new table snapshoting. Does not enforce read-only. That is for caller to determine. + * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create a + * new table snapshoting. Does not enforce read-only. That is for caller to determine. * @param fs Filesystem to use. * @param tableDir table directory under which we should write the file * @param htd description of the table to write @@ -647,7 +630,7 @@ public boolean createTableDescriptorForTableDirectory(Path tableDir, TableDescri * @throws IOException if a filesystem error occurs */ public static boolean createTableDescriptorForTableDirectory(FileSystem fs, Path tableDir, - TableDescriptor htd, boolean forceCreation) throws IOException { + TableDescriptor htd, boolean forceCreation) throws IOException { Optional> opt = getTableDescriptorFromFs(fs, tableDir, false); if (opt.isPresent()) { LOG.debug("Current path={}", opt.get().getFirst()); @@ -661,4 +644,3 @@ public static boolean createTableDescriptorForTableDirectory(FileSystem fs, Path return writeTableDescriptor(fs, htd, tableDir, opt.map(Pair::getFirst).orElse(null)) != null; } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 461170de6a15..17af8d34f418 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -126,14 +125,14 @@ public static boolean isDistributedFileSystem(final FileSystem fs) throws IOExce // If passed an instance of HFileSystem, it fails instanceof DistributedFileSystem. // Check its backing fs for dfs-ness. if (fs instanceof HFileSystem) { - fileSystem = ((HFileSystem)fs).getBackingFs(); + fileSystem = ((HFileSystem) fs).getBackingFs(); } return fileSystem instanceof DistributedFileSystem; } /** * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the - * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider + * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider * schema; i.e. if schemas different but path or subpath matches, the two will equate. * @param pathToSearch Path we will be trying to match. * @param pathTail @@ -160,10 +159,10 @@ public static boolean isMatchingTail(final Path pathToSearch, final Path pathTai if (toSearchName == null || toSearchName.isEmpty()) { break; } - // Move up a parent on each path for next go around. Path doesn't let us go off the end. + // Move up a parent on each path for next go around. Path doesn't let us go off the end. tailPath = tailPath.getParent(); toSearch = toSearch.getParent(); - } while(tailName.equals(toSearchName)); + } while (tailName.equals(toSearchName)); return result; } @@ -173,14 +172,14 @@ public static boolean isMatchingTail(final Path pathToSearch, final Path pathTai * @throws IOException */ public static boolean deleteRegionDir(final Configuration conf, final RegionInfo hri) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); return CommonFSUtils.deleteDirectory(fs, new Path(CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri.getEncodedName())); } - /** + /** * Create the specified file on the filesystem. By default, this will: *

            *
          1. overwrite the file if it exists
          2. @@ -200,7 +199,7 @@ public static boolean deleteRegionDir(final Configuration conf, final RegionInfo * @throws IOException if the file cannot be created */ public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path path, - FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException { + FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException { if (fs instanceof HFileSystem) { FileSystem backingFs = ((HFileSystem) fs).getBackingFs(); if (backingFs instanceof DistributedFileSystem) { @@ -210,11 +209,12 @@ public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path String.valueOf(ColumnFamilyDescriptorBuilder.DEFAULT_DFS_REPLICATION))); try { return (FSDataOutputStream) (DistributedFileSystem.class - .getDeclaredMethod("create", Path.class, FsPermission.class, boolean.class, int.class, - short.class, long.class, Progressable.class, InetSocketAddress[].class) - .invoke(backingFs, path, perm, true, CommonFSUtils.getDefaultBufferSize(backingFs), - replication > 0 ? replication : CommonFSUtils.getDefaultReplication(backingFs, path), - CommonFSUtils.getDefaultBlockSize(backingFs, path), null, favoredNodes)); + .getDeclaredMethod("create", Path.class, FsPermission.class, boolean.class, int.class, + short.class, long.class, Progressable.class, InetSocketAddress[].class) + .invoke(backingFs, path, perm, true, CommonFSUtils.getDefaultBufferSize(backingFs), + replication > 0 ? replication + : CommonFSUtils.getDefaultReplication(backingFs, path), + CommonFSUtils.getDefaultBlockSize(backingFs, path), null, favoredNodes)); } catch (InvocationTargetException ite) { // Function was properly called, but threw it's own exception. throw new IOException(ite.getCause()); @@ -231,12 +231,10 @@ public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path /** * Checks to see if the specified file system is available - * * @param fs filesystem * @throws IOException e */ - public static void checkFileSystemAvailable(final FileSystem fs) - throws IOException { + public static void checkFileSystemAvailable(final FileSystem fs) throws IOException { if (!(fs instanceof DistributedFileSystem)) { return; } @@ -247,8 +245,7 @@ public static void checkFileSystemAvailable(final FileSystem fs) return; } } catch (IOException e) { - exception = e instanceof RemoteException ? - ((RemoteException)e).unwrapRemoteException() : e; + exception = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e; } try { fs.close(); @@ -260,7 +257,6 @@ public static void checkFileSystemAvailable(final FileSystem fs) /** * Inquire the Active NameNode's safe mode status. - * * @param dfs A DistributedFileSystem object representing the underlying HDFS. * @return whether we're in safe mode * @throws IOException @@ -274,12 +270,11 @@ private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOExceptio * @param conf * @throws IOException */ - public static void checkDfsSafeMode(final Configuration conf) - throws IOException { + public static void checkDfsSafeMode(final Configuration conf) throws IOException { boolean isInSafeMode = false; FileSystem fs = FileSystem.get(conf); if (fs instanceof DistributedFileSystem) { - DistributedFileSystem dfs = (DistributedFileSystem)fs; + DistributedFileSystem dfs = (DistributedFileSystem) fs; isInSafeMode = isInSafeMode(dfs); } if (isInSafeMode) { @@ -289,7 +284,6 @@ public static void checkDfsSafeMode(final Configuration conf) /** * Verifies current version of file system - * * @param fs filesystem object * @param rootdir root hbase directory * @return null if no version file exists, version string otherwise @@ -297,7 +291,7 @@ public static void checkDfsSafeMode(final Configuration conf) * @throws DeserializationException if the version data cannot be translated into a version */ public static String getVersion(FileSystem fs, Path rootdir) - throws IOException, DeserializationException { + throws IOException, DeserializationException { final Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); FileStatus[] status = null; try { @@ -311,7 +305,7 @@ public static String getVersion(FileSystem fs, Path rootdir) return null; } String version = null; - byte [] content = new byte [(int)status[0].getLen()]; + byte[] content = new byte[(int) status[0].getLen()]; FSDataInputStream s = fs.open(versionFile); try { IOUtils.readFully(s, content, 0, content.length); @@ -337,12 +331,11 @@ public static String getVersion(FileSystem fs, Path rootdir) * @return The version found in the file as a String * @throws DeserializationException if the version data cannot be translated into a version */ - static String parseVersionFrom(final byte [] bytes) - throws DeserializationException { + static String parseVersionFrom(final byte[] bytes) throws DeserializationException { ProtobufUtil.expectPBMagicPrefix(bytes); int pblen = ProtobufUtil.lengthOfPBMagic(); FSProtos.HBaseVersionFileContent.Builder builder = - FSProtos.HBaseVersionFileContent.newBuilder(); + FSProtos.HBaseVersionFileContent.newBuilder(); try { ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); return builder.getVersion(); @@ -355,17 +348,17 @@ static String parseVersionFrom(final byte [] bytes) /** * Create the content to write into the ${HBASE_ROOTDIR}/hbase.version file. * @param version Version to persist - * @return Serialized protobuf with version content and a bit of pb magic for a prefix. + * @return Serialized protobuf with version content and a bit of pb magic for a + * prefix. */ - static byte [] toVersionByteArray(final String version) { + static byte[] toVersionByteArray(final String version) { FSProtos.HBaseVersionFileContent.Builder builder = - FSProtos.HBaseVersionFileContent.newBuilder(); + FSProtos.HBaseVersionFileContent.newBuilder(); return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray()); } /** * Verifies current version of file system - * * @param fs file system * @param rootdir root directory of HBase installation * @param message if true, issues a message on System.out @@ -373,25 +366,22 @@ static String parseVersionFrom(final byte [] bytes) * @throws DeserializationException if the contents of the version file cannot be parsed */ public static void checkVersion(FileSystem fs, Path rootdir, boolean message) - throws IOException, DeserializationException { + throws IOException, DeserializationException { checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS); } /** * Verifies current version of file system - * * @param fs file system * @param rootdir root directory of HBase installation * @param message if true, issues a message on System.out * @param wait wait interval * @param retries number of times to retry - * * @throws IOException if the version file cannot be opened * @throws DeserializationException if the contents of the version file cannot be parsed */ - public static void checkVersion(FileSystem fs, Path rootdir, - boolean message, int wait, int retries) - throws IOException, DeserializationException { + public static void checkVersion(FileSystem fs, Path rootdir, boolean message, int wait, + int retries) throws IOException, DeserializationException { String version = getVersion(fs, rootdir); String msg; if (version == null) { @@ -401,17 +391,17 @@ public static void checkVersion(FileSystem fs, Path rootdir, setVersion(fs, rootdir, wait, retries); return; } else { - msg = "hbase.version file is missing. Is your hbase.rootdir valid? " + - "You can restore hbase.version file by running 'HBCK2 filesystem -fix'. " + - "See https://github.com/apache/hbase-operator-tools/tree/master/hbase-hbck2"; + msg = "hbase.version file is missing. Is your hbase.rootdir valid? " + + "You can restore hbase.version file by running 'HBCK2 filesystem -fix'. " + + "See https://github.com/apache/hbase-operator-tools/tree/master/hbase-hbck2"; } } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) { return; } else { - msg = "HBase file layout needs to be upgraded. Current filesystem version is " + version + - " but software requires version " + HConstants.FILE_SYSTEM_VERSION + - ". Consult http://hbase.apache.org/book.html for further information about " + - "upgrading HBase."; + msg = "HBase file layout needs to be upgraded. Current filesystem version is " + version + + " but software requires version " + HConstants.FILE_SYSTEM_VERSION + + ". Consult http://hbase.apache.org/book.html for further information about " + + "upgrading HBase."; } // version is deprecated require migration @@ -424,20 +414,17 @@ public static void checkVersion(FileSystem fs, Path rootdir, /** * Sets version of file system - * * @param fs filesystem object * @param rootdir hbase root * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir) - throws IOException { + public static void setVersion(FileSystem fs, Path rootdir) throws IOException { setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS); } /** * Sets version of file system - * * @param fs filesystem object * @param rootdir hbase root * @param wait time to wait for retry @@ -445,14 +432,12 @@ public static void setVersion(FileSystem fs, Path rootdir) * @throws IOException e */ public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries) - throws IOException { + throws IOException { setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries); } - /** * Sets version of file system - * * @param fs filesystem object * @param rootdir hbase root directory * @param version version to set @@ -460,11 +445,11 @@ public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries * @param retries number of times to retry before throwing an IOException * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir, String version, - int wait, int retries) throws IOException { + public static void setVersion(FileSystem fs, Path rootdir, String version, int wait, int retries) + throws IOException { Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); - Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR + - HConstants.VERSION_FILE_NAME); + Path tempVersionFile = new Path(rootdir, + HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR + HConstants.VERSION_FILE_NAME); while (true) { try { // Write the version to a temporary file @@ -486,7 +471,8 @@ public static void setVersion(FileSystem fs, Path rootdir, String version, // Attempt to close the stream on the way out if it is still open. try { if (s != null) s.close(); - } catch (IOException ignore) { } + } catch (IOException ignore) { + } } LOG.info("Created version file at " + rootdir.toString() + " with version=" + version); return; @@ -499,7 +485,7 @@ public static void setVersion(FileSystem fs, Path rootdir, String version, Thread.sleep(wait); } } catch (InterruptedException ie) { - throw (InterruptedIOException)new InterruptedIOException().initCause(ie); + throw (InterruptedIOException) new InterruptedIOException().initCause(ie); } retries--; } else { @@ -517,8 +503,8 @@ public static void setVersion(FileSystem fs, Path rootdir, String version, * @return true if the file exists, otherwise false * @throws IOException if checking the FileSystem fails */ - public static boolean checkClusterIdExists(FileSystem fs, Path rootdir, - long wait) throws IOException { + public static boolean checkClusterIdExists(FileSystem fs, Path rootdir, long wait) + throws IOException { while (true) { try { Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME); @@ -546,20 +532,19 @@ public static boolean checkClusterIdExists(FileSystem fs, Path rootdir, * @return the unique cluster identifier * @throws IOException if reading the cluster ID file fails */ - public static ClusterId getClusterId(FileSystem fs, Path rootdir) - throws IOException { + public static ClusterId getClusterId(FileSystem fs, Path rootdir) throws IOException { Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME); ClusterId clusterId = null; - FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null; + FileStatus status = fs.exists(idPath) ? fs.getFileStatus(idPath) : null; if (status != null) { int len = Ints.checkedCast(status.getLen()); - byte [] content = new byte[len]; + byte[] content = new byte[len]; FSDataInputStream in = fs.open(idPath); try { in.readFully(content); } catch (EOFException eof) { LOG.warn("Cluster ID file {} is empty", idPath); - } finally{ + } finally { in.close(); } try { @@ -593,9 +578,8 @@ public static ClusterId getClusterId(FileSystem fs, Path rootdir) * @throws IOException */ private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p, - final ClusterId cid) - throws IOException { - // Rewrite the file as pb. Move aside the old one first, write new + final ClusterId cid) throws IOException { + // Rewrite the file as pb. Move aside the old one first, write new // then delete the moved-aside file. Path movedAsideName = new Path(p + "." + EnvironmentEdgeManager.currentTime()); if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p); @@ -610,7 +594,6 @@ private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final P * Writes a new unique identifier for this cluster to the "hbase.id" file in the HBase root * directory. If any operations on the ID file fails, and {@code wait} is a positive value, the * method will retry to produce the ID file until the thread is forcibly interrupted. - * * @param fs the root directory FileSystem * @param rootdir the path to the HBase root directory * @param clusterId the unique identifier to store @@ -677,12 +660,10 @@ public static void setClusterId(final FileSystem fs, final Path rootdir, * @param wait Sleep between retries * @throws IOException e */ - public static void waitOnSafeMode(final Configuration conf, - final long wait) - throws IOException { + public static void waitOnSafeMode(final Configuration conf, final long wait) throws IOException { FileSystem fs = FileSystem.get(conf); if (!(fs instanceof DistributedFileSystem)) return; - DistributedFileSystem dfs = (DistributedFileSystem)fs; + DistributedFileSystem dfs = (DistributedFileSystem) fs; // Make sure dfs is not in safe mode while (isInSafeMode(dfs)) { LOG.info("Waiting for dfs to exit safe mode..."); @@ -707,17 +688,16 @@ public static boolean metaRegionExists(FileSystem fs, Path rootDir) throws IOExc } /** - * Compute HDFS block distribution of a given HdfsDataInputStream. All HdfsDataInputStreams - * are backed by a series of LocatedBlocks, which are fetched periodically from the namenode. - * This method retrieves those blocks from the input stream and uses them to calculate - * HDFSBlockDistribution. - * - * The underlying method in DFSInputStream does attempt to use locally cached blocks, but - * may hit the namenode if the cache is determined to be incomplete. The method also involves - * making copies of all LocatedBlocks rather than return the underlying blocks themselves. + * Compute HDFS block distribution of a given HdfsDataInputStream. All HdfsDataInputStreams are + * backed by a series of LocatedBlocks, which are fetched periodically from the namenode. This + * method retrieves those blocks from the input stream and uses them to calculate + * HDFSBlockDistribution. The underlying method in DFSInputStream does attempt to use locally + * cached blocks, but may hit the namenode if the cache is determined to be incomplete. The method + * also involves making copies of all LocatedBlocks rather than return the underlying blocks + * themselves. */ - static public HDFSBlocksDistribution computeHDFSBlocksDistribution( - HdfsDataInputStream inputStream) throws IOException { + static public HDFSBlocksDistribution + computeHDFSBlocksDistribution(HdfsDataInputStream inputStream) throws IOException { List blocks = inputStream.getAllBlocks(); HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution(); for (LocatedBlock block : blocks) { @@ -746,12 +726,10 @@ private static String[] getHostsForLocations(LocatedBlock block) { * @param length length of the portion * @return The HDFS blocks distribution */ - static public HDFSBlocksDistribution computeHDFSBlocksDistribution( - final FileSystem fs, FileStatus status, long start, long length) - throws IOException { + static public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs, + FileStatus status, long start, long length) throws IOException { HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution(); - BlockLocation [] blockLocations = - fs.getFileBlockLocations(status, start, length); + BlockLocation[] blockLocations = fs.getFileBlockLocations(status, start, length); addToHDFSBlocksDistribution(blocksDistribution, blockLocations); return blocksDistribution; } @@ -761,9 +739,8 @@ static public HDFSBlocksDistribution computeHDFSBlocksDistribution( * @param blocksDistribution the hdfs blocks distribution * @param blockLocations an array containing block location */ - static public void addToHDFSBlocksDistribution( - HDFSBlocksDistribution blocksDistribution, BlockLocation[] blockLocations) - throws IOException { + static public void addToHDFSBlocksDistribution(HDFSBlocksDistribution blocksDistribution, + BlockLocation[] blockLocations) throws IOException { for (BlockLocation bl : blockLocations) { String[] hosts = bl.getHosts(); long len = bl.getLength(); @@ -774,31 +751,26 @@ static public void addToHDFSBlocksDistribution( // TODO move this method OUT of FSUtils. No dependencies to HMaster /** - * Returns the total overall fragmentation percentage. Includes hbase:meta and - * -ROOT- as well. - * - * @param master The master defining the HBase root and file system + * Returns the total overall fragmentation percentage. Includes hbase:meta and -ROOT- as well. + * @param master The master defining the HBase root and file system * @return A map for each table and its percentage (never null) * @throws IOException When scanning the directory fails */ - public static int getTotalTableFragmentation(final HMaster master) - throws IOException { + public static int getTotalTableFragmentation(final HMaster master) throws IOException { Map map = getTableFragmentation(master); - return map.isEmpty() ? -1 : map.get("-TOTAL-"); + return map.isEmpty() ? -1 : map.get("-TOTAL-"); } /** - * Runs through the HBase rootdir and checks how many stores for each table - * have more than one file in them. Checks -ROOT- and hbase:meta too. The total - * percentage across all tables is stored under the special key "-TOTAL-". - * - * @param master The master defining the HBase root and file system. + * Runs through the HBase rootdir and checks how many stores for each table have more than one + * file in them. Checks -ROOT- and hbase:meta too. The total percentage across all tables is + * stored under the special key "-TOTAL-". + * @param master The master defining the HBase root and file system. * @return A map for each table and its percentage (never null). - * * @throws IOException When scanning the directory fails. */ public static Map getTableFragmentation(final HMaster master) - throws IOException { + throws IOException { Path path = CommonFSUtils.getRootDir(master.getConfiguration()); // since HMaster.getFileSystem() is package private FileSystem fs = path.getFileSystem(master.getConfiguration()); @@ -806,18 +778,16 @@ public static Map getTableFragmentation(final HMaster master) } /** - * Runs through the HBase rootdir and checks how many stores for each table - * have more than one file in them. Checks -ROOT- and hbase:meta too. The total - * percentage across all tables is stored under the special key "-TOTAL-". - * - * @param fs The file system to use - * @param hbaseRootDir The root directory to scan + * Runs through the HBase rootdir and checks how many stores for each table have more than one + * file in them. Checks -ROOT- and hbase:meta too. The total percentage across all tables is + * stored under the special key "-TOTAL-". + * @param fs The file system to use + * @param hbaseRootDir The root directory to scan * @return A map for each table and its percentage (never null) * @throws IOException When scanning the directory fails */ - public static Map getTableFragmentation( - final FileSystem fs, final Path hbaseRootDir) - throws IOException { + public static Map getTableFragmentation(final FileSystem fs, + final Path hbaseRootDir) throws IOException { Map frags = new HashMap<>(); int cfCountTotal = 0; int cfFragTotal = 0; @@ -846,11 +816,11 @@ public static Map getTableFragmentation( } // compute percentage per table and store in result list frags.put(CommonFSUtils.getTableName(d).getNameAsString(), - cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100)); + cfCount == 0 ? 0 : Math.round((float) cfFrag / cfCount * 100)); } // set overall percentage for all tables frags.put("-TOTAL-", - cfCountTotal == 0? 0: Math.round((float) cfFragTotal / cfCountTotal * 100)); + cfCountTotal == 0 ? 0 : Math.round((float) cfFragTotal / cfCountTotal * 100)); return frags; } @@ -900,8 +870,7 @@ public static class BlackListDirFilter extends AbstractFileStatusFilter { @SuppressWarnings("unchecked") public BlackListDirFilter(final FileSystem fs, final List directoryNameBlackList) { this.fs = fs; - blacklist = - (List) (directoryNameBlackList == null ? Collections.emptyList() + blacklist = (List) (directoryNameBlackList == null ? Collections.emptyList() : directoryNameBlackList); } @@ -915,7 +884,8 @@ protected boolean accept(Path p, @CheckForNull Boolean isDir) { return isDirectory(fs, isDir, p); } catch (IOException e) { LOG.warn("An error occurred while verifying if [{}] is a valid directory." - + " Returning 'not valid' and continuing.", p, e); + + " Returning 'not valid' and continuing.", + p, e); return false; } } @@ -946,8 +916,7 @@ public UserTableDirFilter(FileSystem fs) { @Override protected boolean isValidName(final String name) { - if (!super.isValidName(name)) - return false; + if (!super.isValidName(name)) return false; try { TableName.isLegalTableQualifierName(Bytes.toBytes(name)); @@ -974,8 +943,8 @@ public static List getTableDirs(final FileSystem fs, final Path rootdir) /** * @param fs * @param rootdir - * @return All the table directories under rootdir. Ignore non table hbase folders such as - * .logs, .oldlogs, .corrupt folders. + * @return All the table directories under rootdir. Ignore non table hbase folders + * such as .logs, .oldlogs, .corrupt folders. * @throws IOException */ public static List getLocalTableDirs(final FileSystem fs, final Path rootdir) @@ -983,7 +952,7 @@ public static List getLocalTableDirs(final FileSystem fs, final Path rootd // presumes any directory under hbase.rootdir is a table FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs)); List tabledirs = new ArrayList<>(dirs.length); - for (FileStatus dir: dirs) { + for (FileStatus dir : dirs) { tabledirs.add(dir.getPath()); } return tabledirs; @@ -1025,14 +994,15 @@ protected boolean accept(Path p, @CheckForNull Boolean isDir) { * @return List of paths to valid region directories in table dir. * @throws IOException */ - public static List getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException { + public static List getRegionDirs(final FileSystem fs, final Path tableDir) + throws IOException { // assumes we are in a table dir. List rds = listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); if (rds == null) { return Collections.emptyList(); } List regionDirs = new ArrayList<>(rds.size()); - for (FileStatus rdfs: rds) { + for (FileStatus rdfs : rds) { Path rdPath = rdfs.getPath(); regionDirs.add(rdPath); } @@ -1045,7 +1015,7 @@ public static Path getRegionDirFromRootDir(Path rootDir, RegionInfo region) { public static Path getRegionDirFromTableDir(Path tableDir, RegionInfo region) { return getRegionDirFromTableDir(tableDir, - ServerRegionReplicaUtil.getRegionInfoForFs(region).getEncodedName()); + ServerRegionReplicaUtil.getRegionInfoForFs(region).getEncodedName()); } public static Path getRegionDirFromTableDir(Path tableDir, String encodedRegionName) { @@ -1053,8 +1023,8 @@ public static Path getRegionDirFromTableDir(Path tableDir, String encodedRegionN } /** - * Filter for all dirs that are legal column family names. This is generally used for colfam - * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>. + * Filter for all dirs that are legal column family names. This is generally used for colfam dirs + * <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>. */ public static class FamilyDirFilter extends AbstractFileStatusFilter { final FileSystem fs; @@ -1085,7 +1055,6 @@ protected boolean accept(Path p, @CheckForNull Boolean isDir) { /** * Given a particular region dir, return all the familydirs inside it - * * @param fs A file system for the Path * @param regionDir Path to a specific region directory * @return List of paths to valid family directories in region dir. @@ -1111,7 +1080,7 @@ private static List getFilePaths(final FileSystem fs, final Path dir, final PathFilter pathFilter) throws IOException { FileStatus[] fds = fs.listStatus(dir, pathFilter); List files = new ArrayList<>(fds.length); - for (FileStatus fdfs: fds) { + for (FileStatus fdfs : fds) { Path fdPath = fdfs.getPath(); files.add(fdPath); } @@ -1142,11 +1111,11 @@ public ReferenceAndLinkFileFilter(FileSystem fs) { public boolean accept(Path rd) { try { // only files can be references. - return !fs.getFileStatus(rd).isDirectory() && (StoreFileInfo.isReference(rd) || - HFileLink.isHFileLink(rd)); + return !fs.getFileStatus(rd).isDirectory() + && (StoreFileInfo.isReference(rd) || HFileLink.isHFileLink(rd)); } catch (IOException ioe) { // Maybe the file was moved or the fs was disconnected. - LOG.warn("Skipping file " + rd +" due to IOException", ioe); + LOG.warn("Skipping file " + rd + " due to IOException", ioe); return false; } } @@ -1179,8 +1148,8 @@ protected boolean accept(Path p, @CheckForNull Boolean isDir) { } /** - * Filter for HFileLinks (StoreFiles and HFiles not included). - * the filter itself does not consider if a link is file or not. + * Filter for HFileLinks (StoreFiles and HFiles not included). the filter itself does not consider + * if a link is file or not. */ public static class HFileLinkFilter implements PathFilter { @@ -1216,8 +1185,7 @@ protected boolean accept(Path p, @CheckForNull Boolean isDir) { } /** - * Called every so-often by storefile map builder getTableStoreFilePathMap to - * report progress. + * Called every so-often by storefile map builder getTableStoreFilePathMap to report progress. */ interface ProgressReporter { /** @@ -1227,47 +1195,43 @@ interface ProgressReporter { } /** - * Runs through the HBase rootdir/tablename and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
            + * Runs through the HBase rootdir/tablename and creates a reverse lookup map for table StoreFile + * names to the full Path.
            * Example...
            - * Key = 3944417774205889744
            + * Key = 3944417774205889744
            * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param map map to add values. If null, this method will create and populate one to return - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. + * @param map map to add values. If null, this method will create and populate one to return + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. * @param tableName name of the table to scan. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. * @throws InterruptedException */ public static Map getTableStoreFilePathMap(Map map, - final FileSystem fs, final Path hbaseRootDir, TableName tableName) - throws IOException, InterruptedException { + final FileSystem fs, final Path hbaseRootDir, TableName tableName) + throws IOException, InterruptedException { return getTableStoreFilePathMap(map, fs, hbaseRootDir, tableName, null, null, - (ProgressReporter)null); + (ProgressReporter) null); } /** - * Runs through the HBase rootdir/tablename and creates a reverse lookup map for - * table StoreFile names to the full Path. Note that because this method can be called - * on a 'live' HBase system that we will skip files that no longer exist by the time - * we traverse them and similarly the user of the result needs to consider that some - * entries in this map may not exist by the time this call completes. - *
            + * Runs through the HBase rootdir/tablename and creates a reverse lookup map for table StoreFile + * names to the full Path. Note that because this method can be called on a 'live' HBase system + * that we will skip files that no longer exist by the time we traverse them and similarly the + * user of the result needs to consider that some entries in this map may not exist by the time + * this call completes.
            * Example...
            - * Key = 3944417774205889744
            + * Key = 3944417774205889744
            * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param resultMap map to add values. If null, this method will create and populate one to return - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. + * @param resultMap map to add values. If null, this method will create and populate one to return + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. * @param tableName name of the table to scan. * @param sfFilter optional path filter to apply to store files * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. * @deprecated Since 2.3.0. For removal in hbase4. Use ProgressReporter override instead. @@ -1278,35 +1242,32 @@ public static Map getTableStoreFilePathMap(Map resul ExecutorService executor, final HbckErrorReporter progressReporter) throws IOException, InterruptedException { return getTableStoreFilePathMap(resultMap, fs, hbaseRootDir, tableName, sfFilter, executor, - new ProgressReporter() { - @Override - public void progress(FileStatus status) { - // status is not used in this implementation. - progressReporter.progress(); - } - }); + new ProgressReporter() { + @Override + public void progress(FileStatus status) { + // status is not used in this implementation. + progressReporter.progress(); + } + }); } /** - * Runs through the HBase rootdir/tablename and creates a reverse lookup map for - * table StoreFile names to the full Path. Note that because this method can be called - * on a 'live' HBase system that we will skip files that no longer exist by the time - * we traverse them and similarly the user of the result needs to consider that some - * entries in this map may not exist by the time this call completes. - *
            + * Runs through the HBase rootdir/tablename and creates a reverse lookup map for table StoreFile + * names to the full Path. Note that because this method can be called on a 'live' HBase system + * that we will skip files that no longer exist by the time we traverse them and similarly the + * user of the result needs to consider that some entries in this map may not exist by the time + * this call completes.
            * Example...
            - * Key = 3944417774205889744
            + * Key = 3944417774205889744
            * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param resultMap map to add values. If null, this method will create and populate one - * to return - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. + * @param resultMap map to add values. If null, this method will create and populate one to return + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. * @param tableName name of the table to scan. * @param sfFilter optional path filter to apply to store files * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. * @throws InterruptedException the thread is interrupted, either before or during the activity. @@ -1314,20 +1275,21 @@ public void progress(FileStatus status) { public static Map getTableStoreFilePathMap(Map resultMap, final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter, ExecutorService executor, final ProgressReporter progressReporter) - throws IOException, InterruptedException { + throws IOException, InterruptedException { final Map finalResultMap = resultMap == null ? new ConcurrentHashMap<>(128, 0.75f, 32) : resultMap; // only include the directory paths to tables Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableName); - // Inside a table, there are compaction.dir directories to skip. Otherwise, all else + // Inside a table, there are compaction.dir directories to skip. Otherwise, all else // should be regions. final FamilyDirFilter familyFilter = new FamilyDirFilter(fs); final Vector exceptions = new Vector<>(); try { - List regionDirs = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); + List regionDirs = + FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); if (regionDirs == null) { return finalResultMap; } @@ -1348,8 +1310,9 @@ public static Map getTableStoreFilePathMap(Map resul @Override public void run() { try { - HashMap regionStoreFileMap = new HashMap<>(); - List familyDirs = FSUtils.listStatusWithStatusFilter(fs, dd, familyFilter); + HashMap regionStoreFileMap = new HashMap<>(); + List familyDirs = + FSUtils.listStatusWithStatusFilter(fs, dd, familyFilter); if (familyDirs == null) { if (!fs.exists(dd)) { LOG.warn("Skipping region because it no longer exists: " + dd); @@ -1375,7 +1338,7 @@ public void run() { } Path sf = sfStatus.getPath(); if (sfFilter == null || sfFilter.accept(sf)) { - regionStoreFileMap.put( sf.getName(), sf); + regionStoreFileMap.put(sf.getName(), sf); } } } @@ -1429,7 +1392,7 @@ public void run() { public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) { int result = 0; try { - for (Path familyDir:getFamilyDirs(fs, p)){ + for (Path familyDir : getFamilyDirs(fs, p)) { result += getReferenceFilePaths(fs, familyDir).size(); } } catch (IOException e) { @@ -1439,80 +1402,70 @@ public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) } /** - * Runs through the HBase rootdir and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
            + * Runs through the HBase rootdir and creates a reverse lookup map for table StoreFile names to + * the full Path.
            * Example...
            - * Key = 3944417774205889744
            + * Key = 3944417774205889744
            * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. */ public static Map getTableStoreFilePathMap(final FileSystem fs, - final Path hbaseRootDir) - throws IOException, InterruptedException { - return getTableStoreFilePathMap(fs, hbaseRootDir, null, null, (ProgressReporter)null); + final Path hbaseRootDir) throws IOException, InterruptedException { + return getTableStoreFilePathMap(fs, hbaseRootDir, null, null, (ProgressReporter) null); } /** - * Runs through the HBase rootdir and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
            + * Runs through the HBase rootdir and creates a reverse lookup map for table StoreFile names to + * the full Path.
            * Example...
            - * Key = 3944417774205889744
            + * Key = 3944417774205889744
            * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. * @param sfFilter optional path filter to apply to store files * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. - * @deprecated Since 2.3.0. Will be removed in hbase4. Used {@link - * #getTableStoreFilePathMap(FileSystem, Path, PathFilter, ExecutorService, ProgressReporter)} + * @deprecated Since 2.3.0. Will be removed in hbase4. Used + * {@link #getTableStoreFilePathMap(FileSystem, Path, PathFilter, ExecutorService, ProgressReporter)} */ @Deprecated public static Map getTableStoreFilePathMap(final FileSystem fs, final Path hbaseRootDir, PathFilter sfFilter, ExecutorService executor, - HbckErrorReporter progressReporter) - throws IOException, InterruptedException { - return getTableStoreFilePathMap(fs, hbaseRootDir, sfFilter, executor, - new ProgressReporter() { - @Override - public void progress(FileStatus status) { - // status is not used in this implementation. - progressReporter.progress(); - } - }); + HbckErrorReporter progressReporter) throws IOException, InterruptedException { + return getTableStoreFilePathMap(fs, hbaseRootDir, sfFilter, executor, new ProgressReporter() { + @Override + public void progress(FileStatus status) { + // status is not used in this implementation. + progressReporter.progress(); + } + }); } /** - * Runs through the HBase rootdir and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
            + * Runs through the HBase rootdir and creates a reverse lookup map for table StoreFile names to + * the full Path.
            * Example...
            - * Key = 3944417774205889744
            + * Key = 3944417774205889744
            * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. * @param sfFilter optional path filter to apply to store files * @param executor optional executor service to parallelize this operation * @param progressReporter Instance or null; gets called every time we move to new region of - * family dir and for each store file. + * family dir and for each store file. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. * @throws InterruptedException */ - public static Map getTableStoreFilePathMap( - final FileSystem fs, final Path hbaseRootDir, PathFilter sfFilter, - ExecutorService executor, ProgressReporter progressReporter) - throws IOException, InterruptedException { + public static Map getTableStoreFilePathMap(final FileSystem fs, + final Path hbaseRootDir, PathFilter sfFilter, ExecutorService executor, + ProgressReporter progressReporter) throws IOException, InterruptedException { ConcurrentHashMap map = new ConcurrentHashMap<>(1024, 0.75f, 32); // if this method looks similar to 'getTableFragmentation' that is because @@ -1528,23 +1481,20 @@ public static Map getTableStoreFilePathMap( /** * Filters FileStatuses in an array and returns a list - * - * @param input An array of FileStatuses - * @param filter A required filter to filter the array - * @return A list of FileStatuses + * @param input An array of FileStatuses + * @param filter A required filter to filter the array + * @return A list of FileStatuses */ - public static List filterFileStatuses(FileStatus[] input, - FileStatusFilter filter) { + public static List filterFileStatuses(FileStatus[] input, FileStatusFilter filter) { if (input == null) return null; return filterFileStatuses(Iterators.forArray(input), filter); } /** * Filters FileStatuses in an iterator and returns a list - * - * @param input An iterator of FileStatuses - * @param filter A required filter to filter the array - * @return A list of FileStatuses + * @param input An iterator of FileStatuses + * @param filter A required filter to filter the array + * @return A list of FileStatuses */ public static List filterFileStatuses(Iterator input, FileStatusFilter filter) { @@ -1560,19 +1510,17 @@ public static List filterFileStatuses(Iterator input, } /** - * Calls fs.listStatus() and treats FileNotFoundException as non-fatal - * This accommodates differences between hadoop versions, where hadoop 1 - * does not throw a FileNotFoundException, and return an empty FileStatus[] - * while Hadoop 2 will throw FileNotFoundException. - * + * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates + * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and + * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException. * @param fs file system * @param dir directory * @param filter file status filter * @return null if dir is empty or doesn't exist, otherwise FileStatus list */ - public static List listStatusWithStatusFilter(final FileSystem fs, - final Path dir, final FileStatusFilter filter) throws IOException { - FileStatus [] status = null; + public static List listStatusWithStatusFilter(final FileSystem fs, final Path dir, + final FileStatusFilter filter) throws IOException { + FileStatus[] status = null; try { status = fs.listStatus(dir); } catch (FileNotFoundException fnfe) { @@ -1580,7 +1528,7 @@ public static List listStatusWithStatusFilter(final FileSystem fs, return null; } - if (ArrayUtils.getLength(status) == 0) { + if (ArrayUtils.getLength(status) == 0) { return null; } @@ -1597,71 +1545,51 @@ public static List listStatusWithStatusFilter(final FileSystem fs, } /** - * This function is to scan the root path of the file system to get the - * degree of locality for each region on each of the servers having at least - * one block of that region. - * This is used by the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer} - * - * @param conf - * the configuration to use - * @return the mapping from region encoded name to a map of server names to - * locality fraction - * @throws IOException - * in case of file system errors or interrupts + * This function is to scan the root path of the file system to get the degree of locality for + * each region on each of the servers having at least one block of that region. This is used by + * the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer} + * @param conf the configuration to use + * @return the mapping from region encoded name to a map of server names to locality fraction + * @throws IOException in case of file system errors or interrupts */ - public static Map> getRegionDegreeLocalityMappingFromFS( - final Configuration conf) throws IOException { - return getRegionDegreeLocalityMappingFromFS( - conf, null, - conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE)); + public static Map> + getRegionDegreeLocalityMappingFromFS(final Configuration conf) throws IOException { + return getRegionDegreeLocalityMappingFromFS(conf, null, + conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE)); } /** - * This function is to scan the root path of the file system to get the - * degree of locality for each region on each of the servers having at least - * one block of that region. - * - * @param conf - * the configuration to use - * @param desiredTable - * the table you wish to scan locality for - * @param threadPoolSize - * the thread pool size to use - * @return the mapping from region encoded name to a map of server names to - * locality fraction - * @throws IOException - * in case of file system errors or interrupts + * This function is to scan the root path of the file system to get the degree of locality for + * each region on each of the servers having at least one block of that region. + * @param conf the configuration to use + * @param desiredTable the table you wish to scan locality for + * @param threadPoolSize the thread pool size to use + * @return the mapping from region encoded name to a map of server names to locality fraction + * @throws IOException in case of file system errors or interrupts */ public static Map> getRegionDegreeLocalityMappingFromFS( - final Configuration conf, final String desiredTable, int threadPoolSize) - throws IOException { + final Configuration conf, final String desiredTable, int threadPoolSize) throws IOException { Map> regionDegreeLocalityMapping = new ConcurrentHashMap<>(); getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, regionDegreeLocalityMapping); return regionDegreeLocalityMapping; } /** - * This function is to scan the root path of the file system to get either the - * mapping between the region name and its best locality region server or the - * degree of locality of each region on each of the servers having at least - * one block of that region. The output map parameters are both optional. - * - * @param conf - * the configuration to use - * @param desiredTable - * the table you wish to scan locality for - * @param threadPoolSize - * the thread pool size to use - * @param regionDegreeLocalityMapping - * the map into which to put the locality degree mapping or null, - * must be a thread-safe implementation - * @throws IOException - * in case of file system errors or interrupts + * This function is to scan the root path of the file system to get either the mapping between the + * region name and its best locality region server or the degree of locality of each region on + * each of the servers having at least one block of that region. The output map parameters are + * both optional. + * @param conf the configuration to use + * @param desiredTable the table you wish to scan locality for + * @param threadPoolSize the thread pool size to use + * @param regionDegreeLocalityMapping the map into which to put the locality degree mapping or + * null, must be a thread-safe implementation + * @throws IOException in case of file system errors or interrupts */ private static void getRegionLocalityMappingFromFS(final Configuration conf, - final String desiredTable, int threadPoolSize, - final Map> regionDegreeLocalityMapping) throws IOException { + final String desiredTable, int threadPoolSize, + final Map> regionDegreeLocalityMapping) throws IOException { final FileSystem fs = FileSystem.get(conf); final Path rootPath = CommonFSUtils.getRootDir(conf); final long startTime = EnvironmentEdgeManager.currentTime(); @@ -1669,10 +1597,10 @@ private static void getRegionLocalityMappingFromFS(final Configuration conf, // The table files are in ${hbase.rootdir}/data//
      /* if (null == desiredTable) { queryPath = - new Path(new Path(rootPath, HConstants.BASE_NAMESPACE_DIR).toString() + "/*/*/*/"); + new Path(new Path(rootPath, HConstants.BASE_NAMESPACE_DIR).toString() + "/*/*/*/"); } else { queryPath = new Path( - CommonFSUtils.getTableDir(rootPath, TableName.valueOf(desiredTable)).toString() + "/*/"); + CommonFSUtils.getTableDir(rootPath, TableName.valueOf(desiredTable)).toString() + "/*/"); } // reject all paths that are not appropriate @@ -1718,7 +1646,7 @@ public boolean accept(Path path) { // run in multiple threads final ExecutorService tpe = Executors.newFixedThreadPool(threadPoolSize, new ThreadFactoryBuilder().setNameFormat("FSRegionQuery-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); try { // ignore all file status items that are not of interest for (FileStatus regionStatus : statusList) { @@ -1738,8 +1666,7 @@ public boolean accept(Path path) { try { // here we wait until TPE terminates, which is either naturally or by // exceptions in the execution of the threads - while (!tpe.awaitTermination(threadWakeFrequency, - TimeUnit.MILLISECONDS)) { + while (!tpe.awaitTermination(threadWakeFrequency, TimeUnit.MILLISECONDS)) { // printing out rough estimate, so as to not introduce // AtomicInteger LOG.info("Locality checking is underway: { Scanned Regions : " @@ -1757,20 +1684,23 @@ public boolean accept(Path path) { } /** - * Do our short circuit read setup. - * Checks buffer size to use and whether to do checksumming in hbase or hdfs. + * Do our short circuit read setup. Checks buffer size to use and whether to do checksumming in + * hbase or hdfs. * @param conf */ public static void setupShortCircuitRead(final Configuration conf) { // Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property. boolean shortCircuitSkipChecksum = - conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false); + conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false); boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); if (shortCircuitSkipChecksum) { - LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " + - "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " + - "it, see https://issues.apache.org/jira/browse/HBASE-6868." : "")); - assert !shortCircuitSkipChecksum; //this will fail if assertions are on + LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " + + "be set to true." + + (useHBaseChecksum + ? " HBase checksum doesn't require " + + "it, see https://issues.apache.org/jira/browse/HBASE-6868." + : "")); + assert !shortCircuitSkipChecksum; // this will fail if assertions are on } checkShortCircuitReadBufferSize(conf); } @@ -1787,7 +1717,7 @@ public static void checkShortCircuitReadBufferSize(final Configuration conf) { int size = conf.getInt(dfsKey, notSet); // If a size is set, return -- we will use it. if (size != notSet) return; - // But short circuit buffer size is normally not set. Put in place the hbase wanted size. + // But short circuit buffer size is normally not set. Put in place the hbase wanted size. int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize); conf.setIfUnset(dfsKey, Integer.toString(hbaseSize)); } @@ -1806,25 +1736,25 @@ public static DFSHedgedReadMetrics getDFSHedgedReadMetrics(final Configuration c // to the DFS FS instance and make the method getHedgedReadMetrics accessible, then invoke it // to get the singleton instance of DFSHedgedReadMetrics shared by DFSClients. final String name = "getHedgedReadMetrics"; - DFSClient dfsclient = ((DistributedFileSystem)FileSystem.get(c)).getClient(); + DFSClient dfsclient = ((DistributedFileSystem) FileSystem.get(c)).getClient(); Method m; try { m = dfsclient.getClass().getDeclaredMethod(name); } catch (NoSuchMethodException e) { - LOG.warn("Failed find method " + name + " in dfsclient; no hedged read metrics: " + - e.getMessage()); + LOG.warn( + "Failed find method " + name + " in dfsclient; no hedged read metrics: " + e.getMessage()); return null; } catch (SecurityException e) { - LOG.warn("Failed find method " + name + " in dfsclient; no hedged read metrics: " + - e.getMessage()); + LOG.warn( + "Failed find method " + name + " in dfsclient; no hedged read metrics: " + e.getMessage()); return null; } m.setAccessible(true); try { - return (DFSHedgedReadMetrics)m.invoke(dfsclient); + return (DFSHedgedReadMetrics) m.invoke(dfsclient); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { - LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " + - e.getMessage()); + LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " + + e.getMessage()); return null; } } @@ -1875,14 +1805,14 @@ private static List copyFiles(FileSystem srcFS, Path src, FileSystem dstFS * @return A set containing all namenode addresses of fs */ private static Set getNNAddresses(DistributedFileSystem fs, - Configuration conf) { + Configuration conf) { Set addresses = new HashSet<>(); String serviceName = fs.getCanonicalServiceName(); if (serviceName.startsWith("ha-hdfs")) { try { Map> addressMap = - DFSUtil.getNNServiceRpcAddressesForCluster(conf); + DFSUtil.getNNServiceRpcAddressesForCluster(conf); String nameService = serviceName.substring(serviceName.indexOf(":") + 1); if (addressMap.containsKey(nameService)) { Map nnMap = addressMap.get(nameService); @@ -1926,7 +1856,7 @@ public static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSyste } if (srcServiceName.startsWith("ha-hdfs") && desServiceName.startsWith("ha-hdfs")) { Collection internalNameServices = - conf.getTrimmedStringCollection("dfs.internal.nameservices"); + conf.getTrimmedStringCollection("dfs.internal.nameservices"); if (!internalNameServices.isEmpty()) { if (internalNameServices.contains(srcServiceName.split(":")[1])) { return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java index c3858aeccf0c..a6dfa57130be 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +36,7 @@ public final class FSVisitor { public interface StoreFileVisitor { void storeFile(final String region, final String family, final String hfileName) - throws IOException; + throws IOException; } private FSVisitor() { @@ -46,7 +45,6 @@ private FSVisitor() { /** * Iterate over the table store files - * * @param fs {@link FileSystem} * @param tableDir {@link Path} to the table directory * @param visitor callback object to get the store files @@ -54,7 +52,8 @@ private FSVisitor() { */ public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir, final StoreFileVisitor visitor) throws IOException { - List regions = FSUtils.listStatusWithStatusFilter(fs, tableDir, new FSUtils.RegionDirFilter(fs)); + List regions = + FSUtils.listStatusWithStatusFilter(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { if (LOG.isTraceEnabled()) { LOG.trace("No regions under directory:" + tableDir); @@ -62,14 +61,13 @@ public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir return; } - for (FileStatus region: regions) { + for (FileStatus region : regions) { visitRegionStoreFiles(fs, region.getPath(), visitor); } } /** * Iterate over the region store files - * * @param fs {@link FileSystem} * @param regionDir {@link Path} to the region directory * @param visitor callback object to get the store files @@ -77,7 +75,8 @@ public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir */ public static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir, final StoreFileVisitor visitor) throws IOException { - List families = FSUtils.listStatusWithStatusFilter(fs, regionDir, new FSUtils.FamilyDirFilter(fs)); + List families = + FSUtils.listStatusWithStatusFilter(fs, regionDir, new FSUtils.FamilyDirFilter(fs)); if (families == null) { if (LOG.isTraceEnabled()) { LOG.trace("No families under region directory:" + regionDir); @@ -86,7 +85,7 @@ public static void visitRegionStoreFiles(final FileSystem fs, final Path regionD } PathFilter fileFilter = new FSUtils.FileFilter(fs); - for (FileStatus family: families) { + for (FileStatus family : families) { Path familyDir = family.getPath(); String familyName = familyDir.getName(); @@ -99,7 +98,7 @@ public static void visitRegionStoreFiles(final FileSystem fs, final Path regionD continue; } - for (FileStatus hfile: storeFiles) { + for (FileStatus hfile : storeFiles) { Path hfilePath = hfile.getPath(); visitor.storeFile(regionDir.getName(), familyName, hfilePath.getName()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java index e757fca8e5b2..e57d0c1814b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FileStatusFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,20 +17,17 @@ */ package org.apache.hadoop.hbase.util; +import org.apache.hadoop.fs.FileStatus; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.fs.FileStatus; @InterfaceAudience.Private @InterfaceStability.Evolving public interface FileStatusFilter { /** - * Tests whether or not the specified filestatus should be - * included in a filestatus list. - * - * @param f The filestatus to be tested - * @return true if and only if the filestatus - * should be included + * Tests whether or not the specified filestatus should be included in a filestatus list. + * @param f The filestatus to be tested + * @return true if and only if the filestatus should be included */ boolean accept(FileStatus f); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java index 2d4de3b4d52d..552ac6c9f870 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,15 +26,16 @@ public final class GetJavaProperty { public static void main(String args[]) { if (args.length == 0) { - for (Object prop: System.getProperties().keySet()) { - System.out.println(prop + "=" + System.getProperty((String)prop, "")); + for (Object prop : System.getProperties().keySet()) { + System.out.println(prop + "=" + System.getProperty((String) prop, "")); } } else { - for (String prop: args) { + for (String prop : args) { System.out.println(System.getProperty(prop, "")); } } } - private GetJavaProperty() {} + private GetJavaProperty() { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java index 44dd9776d3e3..91cdff76b3e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseConfTool.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.yetus.audience.InterfaceAudience; /** - * Tool that prints out a configuration. - * Pass the configuration key on the command-line. + * Tool that prints out a configuration. Pass the configuration key on the command-line. */ @InterfaceAudience.Private public class HBaseConfTool { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 1aacd2d1ac29..cd4e1e19fc53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -134,67 +134,59 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.apache.zookeeper.KeeperException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** - * HBaseFsck (hbck) is a tool for checking and repairing region consistency and - * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not - * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'. - * Even though it can 'read' state, given how so much has changed in how hbase1 and hbase2 operate, - * it will often misread. See hbck2 (HBASE-19121) for a hbck tool for hbase2. This class is - * deprecated. - * + * HBaseFsck (hbck) is a tool for checking and repairing region consistency and table integrity + * problems in a corrupted HBase. This tool was written for hbase-1.x. It does not work with + * hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'. Even + * though it can 'read' state, given how so much has changed in how hbase1 and hbase2 operate, it + * will often misread. See hbck2 (HBASE-19121) for a hbck tool for hbase2. This class is deprecated. *

      - * Region consistency checks verify that hbase:meta, region deployment on region - * servers and the state of data in HDFS (.regioninfo files) all are in - * accordance. + * Region consistency checks verify that hbase:meta, region deployment on region servers and the + * state of data in HDFS (.regioninfo files) all are in accordance. *

      - * Table integrity checks verify that all possible row keys resolve to exactly - * one region of a table. This means there are no individual degenerate - * or backwards regions; no holes between regions; and that there are no - * overlapping regions. + * Table integrity checks verify that all possible row keys resolve to exactly one region of a + * table. This means there are no individual degenerate or backwards regions; no holes between + * regions; and that there are no overlapping regions. *

      * The general repair strategy works in two phases: *

        - *
      1. Repair Table Integrity on HDFS. (merge or fabricate regions) - *
      2. Repair Region Consistency with hbase:meta and assignments + *
      3. Repair Table Integrity on HDFS. (merge or fabricate regions) + *
      4. Repair Region Consistency with hbase:meta and assignments *
      *

      - * For table integrity repairs, the tables' region directories are scanned - * for .regioninfo files. Each table's integrity is then verified. If there - * are any orphan regions (regions with no .regioninfo files) or holes, new - * regions are fabricated. Backwards regions are sidelined as well as empty - * degenerate (endkey==startkey) regions. If there are any overlapping regions, - * a new region is created and all data is merged into the new region. + * For table integrity repairs, the tables' region directories are scanned for .regioninfo files. + * Each table's integrity is then verified. If there are any orphan regions (regions with no + * .regioninfo files) or holes, new regions are fabricated. Backwards regions are sidelined as well + * as empty degenerate (endkey==startkey) regions. If there are any overlapping regions, a new + * region is created and all data is merged into the new region. *

      - * Table integrity repairs deal solely with HDFS and could potentially be done - * offline -- the hbase region servers or master do not need to be running. - * This phase can eventually be used to completely reconstruct the hbase:meta table in - * an offline fashion. + * Table integrity repairs deal solely with HDFS and could potentially be done offline -- the hbase + * region servers or master do not need to be running. This phase can eventually be used to + * completely reconstruct the hbase:meta table in an offline fashion. *

      - * Region consistency requires three conditions -- 1) valid .regioninfo file - * present in an HDFS region dir, 2) valid row with .regioninfo data in META, - * and 3) a region is deployed only at the regionserver that was assigned to - * with proper state in the master. + * Region consistency requires three conditions -- 1) valid .regioninfo file present in an HDFS + * region dir, 2) valid row with .regioninfo data in META, and 3) a region is deployed only at the + * regionserver that was assigned to with proper state in the master. *

      - * Region consistency repairs require hbase to be online so that hbck can - * contact the HBase master and region servers. The hbck#connect() method must - * first be called successfully. Much of the region consistency information - * is transient and less risky to repair. + * Region consistency repairs require hbase to be online so that hbck can contact the HBase master + * and region servers. The hbck#connect() method must first be called successfully. Much of the + * region consistency information is transient and less risky to repair. *

      - * If hbck is run from the command line, there are a handful of arguments that - * can be used to limit the kinds of repairs hbck will do. See the code in - * {@link #printUsageAndExit()} for more details. + * If hbck is run from the command line, there are a handful of arguments that can be used to limit + * the kinds of repairs hbck will do. See the code in {@link #printUsageAndExit()} for more details. * @deprecated For removal in hbase-4.0.0. Use HBCK2 instead. */ @Deprecated @@ -209,8 +201,8 @@ public class HBaseFsck extends Configured implements Closeable { private static final int DEFAULT_MAX_MERGE = 5; /** - * Here is where hbase-1.x used to default the lock for hbck1. - * It puts in place a lock when it goes to write/make changes. + * Here is where hbase-1.x used to default the lock for hbck1. It puts in place a lock when it + * goes to write/make changes. */ @InterfaceAudience.Private public static final String HBCK_LOCK_FILE = "hbase-hbck.lock"; @@ -248,9 +240,9 @@ public class HBaseFsck extends Configured implements Closeable { // Unsupported options in HBase 2.0+ private static final Set unsupportedOptionsInV2 = Sets.newHashSet("-fix", - "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans", - "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents", - "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge"); + "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans", + "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents", + "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge"); /*********** * Options @@ -298,23 +290,20 @@ public class HBaseFsck extends Configured implements Closeable { int fixes = 0; /** - * This map contains the state of all hbck items. It maps from encoded region - * name to HbckRegionInfo structure. The information contained in HbckRegionInfo is used - * to detect and correct consistency (hdfs/meta/deployment) problems. + * This map contains the state of all hbck items. It maps from encoded region name to + * HbckRegionInfo structure. The information contained in HbckRegionInfo is used to detect and + * correct consistency (hdfs/meta/deployment) problems. */ private TreeMap regionInfoMap = new TreeMap<>(); // Empty regioninfo qualifiers in hbase:meta private Set emptyRegionInfoQualifiers = new HashSet<>(); /** - * This map from Tablename -> TableInfo contains the structures necessary to - * detect table consistency problems (holes, dupes, overlaps). It is sorted - * to prevent dupes. - * - * If tablesIncluded is empty, this map contains all tables. - * Otherwise, it contains only meta tables and tables in tablesIncluded, - * unless checkMetaOnly is specified, in which case, it contains only - * the meta table + * This map from Tablename -> TableInfo contains the structures necessary to detect table + * consistency problems (holes, dupes, overlaps). It is sorted to prevent dupes. If tablesIncluded + * is empty, this map contains all tables. Otherwise, it contains only meta tables and tables in + * tablesIncluded, unless checkMetaOnly is specified, in which case, it contains only the meta + * table */ private SortedMap tablesInfo = new ConcurrentSkipListMap<>(); @@ -336,7 +325,6 @@ public class HBaseFsck extends Configured implements Closeable { /** * Constructor - * * @param conf Configuration object * @throws MasterNotRunningException if the master is not running * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper @@ -348,19 +336,15 @@ public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException private static ExecutorService createThreadPool(Configuration conf) { int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS); return new ScheduledThreadPoolExecutor(numThreads, - new ThreadFactoryBuilder().setNameFormat("hbasefsck-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadFactoryBuilder().setNameFormat("hbasefsck-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); } /** * Constructor - * - * @param conf - * Configuration object - * @throws MasterNotRunningException - * if the master is not running - * @throws ZooKeeperConnectionException - * if unable to connect to ZooKeeper + * @param conf Configuration object + * @throws MasterNotRunningException if the master is not running + * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper */ public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException, ZooKeeperConnectionException, IOException, ClassNotFoundException { @@ -379,9 +363,9 @@ public static RetryCounterFactory createLockRetryCounterFactory(Configuration co return new RetryCounterFactory( conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS), conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval", - DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL), + DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL), conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime", - DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME)); + DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME)); } /** @@ -391,9 +375,9 @@ private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration return new RetryCounterFactory( conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS), conf.getInt("hbase.hbck.createznode.attempt.sleep.interval", - DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL), + DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL), conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime", - DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME)); + DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME)); } /** @@ -426,20 +410,20 @@ public FSDataOutputStream call() throws IOException { try { FileSystem fs = CommonFSUtils.getCurrentFileSystem(this.conf); FsPermission defaultPerms = - CommonFSUtils.getFilePermissions(fs, this.conf, HConstants.DATA_FILE_UMASK_KEY); + CommonFSUtils.getFilePermissions(fs, this.conf, HConstants.DATA_FILE_UMASK_KEY); Path tmpDir = getTmpDir(conf); this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE); fs.mkdirs(tmpDir); final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms); out.writeBytes(InetAddress.getLocalHost().toString()); // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file. - out.writeBytes(" Written by an hbase-2.x Master to block an " + - "attempt by an hbase-1.x HBCK tool making modification to state. " + - "See 'HBCK must match HBase server version' in the hbase refguide."); + out.writeBytes(" Written by an hbase-2.x Master to block an " + + "attempt by an hbase-1.x HBCK tool making modification to state. " + + "See 'HBCK must match HBase server version' in the hbase refguide."); out.flush(); return out; - } catch(RemoteException e) { - if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){ + } catch (RemoteException e) { + if (AlreadyBeingCreatedException.class.getName().equals(e.getClassName())) { return null; } else { throw e; @@ -448,25 +432,21 @@ public FSDataOutputStream call() throws IOException { } private FSDataOutputStream createFileWithRetries(final FileSystem fs, - final Path hbckLockFilePath, final FsPermission defaultPerms) - throws IOException { + final Path hbckLockFilePath, final FsPermission defaultPerms) throws IOException { IOException exception = null; do { try { return CommonFSUtils.create(fs, hbckLockFilePath, defaultPerms, false); } catch (IOException ioe) { - LOG.info("Failed to create lock file " + hbckLockFilePath.getName() - + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of " - + retryCounter.getMaxAttempts()); - LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(), - ioe); + LOG.info("Failed to create lock file " + hbckLockFilePath.getName() + ", try=" + + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); + LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(), ioe); try { exception = ioe; retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { throw (InterruptedIOException) new InterruptedIOException( - "Can't create lock file " + hbckLockFilePath.getName()) - .initCause(ie); + "Can't create lock file " + hbckLockFilePath.getName()).initCause(ie); } } } while (retryCounter.shouldRetry()); @@ -477,7 +457,6 @@ private FSDataOutputStream createFileWithRetries(final FileSystem fs, /** * This method maintains a lock using a file. If the creation fails we return null - * * @return FSDataOutputStream object corresponding to the newly opened lock file * @throws IOException if IO failure occurs */ @@ -487,8 +466,8 @@ public static Pair checkAndMarkRunningHbck(Configurati ExecutorService executor = Executors.newFixedThreadPool(1); FutureTask futureTask = new FutureTask<>(callable); executor.execute(futureTask); - final int timeoutInSeconds = conf.getInt( - "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT); + final int timeoutInSeconds = + conf.getInt("hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT); FSDataOutputStream stream = null; try { stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS); @@ -518,15 +497,13 @@ private void unlockHbck() { return; } catch (IOException ioe) { LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try=" - + (retryCounter.getAttemptTimes() + 1) + " of " - + retryCounter.getMaxAttempts()); + + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe); try { retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); - LOG.warn("Interrupted while deleting lock file" + - HBCK_LOCK_PATH); + LOG.warn("Interrupted while deleting lock file" + HBCK_LOCK_PATH); return; } } @@ -535,8 +512,7 @@ private void unlockHbck() { } /** - * To repair region consistency, one must call connect() in order to repair - * online state. + * To repair region consistency, one must call connect() in order to repair online state. */ public void connect() throws IOException { @@ -548,9 +524,9 @@ public void connect() throws IOException { this.hbckOutFd = pair.getSecond(); if (hbckOutFd == null) { setRetCode(-1); - LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " + - "[If you are sure no other instance is running, delete the lock file " + - HBCK_LOCK_PATH + " and rerun the tool]"); + LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " + + "[If you are sure no other instance is running, delete the lock file " + + HBCK_LOCK_PATH + " and rerun the tool]"); throw new IOException("Duplicate hbck - Abort"); } @@ -558,7 +534,6 @@ public void connect() throws IOException { hbckLockCleanup.set(true); } - // Add a shutdown hook to this thread, in case user tries to // kill the hbck with a ctrl-c, we want to cleanup the lock so that // it is available for further calls @@ -576,9 +551,8 @@ public void run() { connection = ConnectionFactory.createConnection(getConf()); admin = connection.getAdmin(); meta = connection.getTable(TableName.META_TABLE_NAME); - status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, - Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS, - Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION)); + status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS, + Option.MASTER, Option.BACKUP_MASTERS, Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION)); } /** @@ -589,7 +563,7 @@ private void loadDeployedRegions() throws IOException, InterruptedException { Collection regionServers = status.getLiveServerMetrics().keySet(); errors.print("Number of live region servers: " + regionServers.size()); if (details) { - for (ServerName rsinfo: regionServers) { + for (ServerName rsinfo : regionServers) { errors.print(" " + rsinfo.getServerName()); } } @@ -598,7 +572,7 @@ private void loadDeployedRegions() throws IOException, InterruptedException { Collection deadRegionServers = status.getDeadServerNames(); errors.print("Number of dead region servers: " + deadRegionServers.size()); if (details) { - for (ServerName name: deadRegionServers) { + for (ServerName name : deadRegionServers) { errors.print(" " + name); } } @@ -610,7 +584,7 @@ private void loadDeployedRegions() throws IOException, InterruptedException { Collection backupMasters = status.getBackupMasterNames(); errors.print("Number of backup masters: " + backupMasters.size()); if (details) { - for (ServerName name: backupMasters) { + for (ServerName name : backupMasters) { errors.print(" " + name); } } @@ -622,7 +596,7 @@ private void loadDeployedRegions() throws IOException, InterruptedException { List rits = status.getRegionStatesInTransition(); errors.print("Number of regions in transition: " + rits.size()); if (details) { - for (RegionState state: rits) { + for (RegionState state : rits) { errors.print(" " + state.toDescriptiveString()); } } @@ -647,9 +621,8 @@ private void clearState() { } /** - * This repair method analyzes hbase data in hdfs and repairs it to satisfy - * the table integrity rules. HBase doesn't need to be online for this - * operation to work. + * This repair method analyzes hbase data in hdfs and repairs it to satisfy the table integrity + * rules. HBase doesn't need to be online for this operation to work. */ public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException { // Initial pass to fix orphans. @@ -680,15 +653,12 @@ public void offlineHdfsIntegrityRepair() throws IOException, InterruptedExceptio } /** - * This repair method requires the cluster to be online since it contacts - * region servers and the masters. It makes each region's state in HDFS, in - * hbase:meta, and deployments consistent. - * - * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable - * error. If 0, we have a clean hbase. + * This repair method requires the cluster to be online since it contacts region servers and the + * masters. It makes each region's state in HDFS, in hbase:meta, and deployments consistent. + * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable error. If + * 0, we have a clean hbase. */ - public int onlineConsistencyRepair() throws IOException, KeeperException, - InterruptedException { + public int onlineConsistencyRepair() throws IOException, KeeperException, InterruptedException { // get regions according to what is online on each RegionServer loadDeployedRegions(); @@ -749,14 +719,12 @@ public int onlineConsistencyRepair() throws IOException, KeeperException, /** * This method maintains an ephemeral znode. If the creation fails we return false or throw * exception - * * @return true if creating znode succeeds; false otherwise * @throws IOException if IO failure occurs */ private boolean setMasterInMaintenanceMode() throws IOException { RetryCounter retryCounter = createZNodeRetryCounterFactory.create(); - hbckEphemeralNodePath = ZNodePaths.joinZNode( - zkw.getZNodePaths().masterMaintZNode, + hbckEphemeralNodePath = ZNodePaths.joinZNode(zkw.getZNodePaths().masterMaintZNode, "hbck-" + Long.toString(EnvironmentEdgeManager.currentTime())); do { try { @@ -766,19 +734,19 @@ private boolean setMasterInMaintenanceMode() throws IOException { } } catch (KeeperException e) { if (retryCounter.getAttemptTimes() >= retryCounter.getMaxAttempts()) { - throw new IOException("Can't create znode " + hbckEphemeralNodePath, e); + throw new IOException("Can't create znode " + hbckEphemeralNodePath, e); } // fall through and retry } - LOG.warn("Fail to create znode " + hbckEphemeralNodePath + ", try=" + - (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); + LOG.warn("Fail to create znode " + hbckEphemeralNodePath + ", try=" + + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); try { retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { throw (InterruptedIOException) new InterruptedIOException( - "Can't create znode " + hbckEphemeralNodePath).initCause(ie); + "Can't create znode " + hbckEphemeralNodePath).initCause(ie); } } while (retryCounter.shouldRetry()); return hbckZodeCreated; @@ -814,12 +782,12 @@ public int onlineHbck() offlineReferenceFileRepair(); offlineHLinkFileRepair(); // If Master runs maintenance tasks (such as balancer, catalog janitor, etc) during online - // hbck, it is likely that hbck would be misled and report transient errors. Therefore, it + // hbck, it is likely that hbck would be misled and report transient errors. Therefore, it // is better to set Master into maintenance mode during online hbck. // if (!setMasterInMaintenanceMode()) { LOG.warn("HBCK is running while master is not in maintenance mode, you might see transient " - + "error. Please run HBCK multiple times to reduce the chance of transient error."); + + "error. Please run HBCK multiple times to reduce the chance of transient error."); } onlineConsistencyRepair(); @@ -844,8 +812,7 @@ public int onlineHbck() } public static byte[] keyOnly(byte[] b) { - if (b == null) - return b; + if (b == null) return b; int rowlength = Bytes.toShort(b, 0); byte[] result = new byte[rowlength]; System.arraycopy(b, Bytes.SIZEOF_SHORT, result, 0, rowlength); @@ -871,18 +838,19 @@ public void close() throws IOException { } private static class RegionBoundariesInformation { - public byte [] regionName; - public byte [] metaFirstKey; - public byte [] metaLastKey; - public byte [] storesFirstKey; - public byte [] storesLastKey; + public byte[] regionName; + public byte[] metaFirstKey; + public byte[] metaLastKey; + public byte[] storesFirstKey; + public byte[] storesLastKey; + @Override - public String toString () { - return "regionName=" + Bytes.toStringBinary(regionName) + - "\nmetaFirstKey=" + Bytes.toStringBinary(metaFirstKey) + - "\nmetaLastKey=" + Bytes.toStringBinary(metaLastKey) + - "\nstoresFirstKey=" + Bytes.toStringBinary(storesFirstKey) + - "\nstoresLastKey=" + Bytes.toStringBinary(storesLastKey); + public String toString() { + return "regionName=" + Bytes.toStringBinary(regionName) + "\nmetaFirstKey=" + + Bytes.toStringBinary(metaFirstKey) + "\nmetaLastKey=" + + Bytes.toStringBinary(metaLastKey) + "\nstoresFirstKey=" + + Bytes.toStringBinary(storesFirstKey) + "\nstoresLastKey=" + + Bytes.toStringBinary(storesLastKey); } } @@ -915,13 +883,13 @@ public void checkRegionBoundaries() { CacheConfig.DISABLED, true, getConf()); if ((reader.getFirstKey() != null) && ((storeFirstKey == null) || (comparator.compare(storeFirstKey, - ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey()) > 0))) { - storeFirstKey = ((KeyValue.KeyOnlyKeyValue)reader.getFirstKey().get()).getKey(); + ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey()) > 0))) { + storeFirstKey = ((KeyValue.KeyOnlyKeyValue) reader.getFirstKey().get()).getKey(); } if ((reader.getLastKey() != null) && ((storeLastKey == null) || (comparator.compare(storeLastKey, - ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey())) < 0)) { - storeLastKey = ((KeyValue.KeyOnlyKeyValue)reader.getLastKey().get()).getKey(); + ((KeyValue.KeyOnlyKeyValue) reader.getLastKey().get()).getKey())) < 0)) { + storeLastKey = ((KeyValue.KeyOnlyKeyValue) reader.getLastKey().get()).getKey(); } reader.close(); } @@ -945,16 +913,14 @@ public void checkRegionBoundaries() { // Checking start key. if ((currentRegionBoundariesInformation.storesFirstKey != null) && (currentRegionBoundariesInformation.metaFirstKey != null)) { - valid = valid - && comparator.compare(currentRegionBoundariesInformation.storesFirstKey, - currentRegionBoundariesInformation.metaFirstKey) >= 0; + valid = valid && comparator.compare(currentRegionBoundariesInformation.storesFirstKey, + currentRegionBoundariesInformation.metaFirstKey) >= 0; } // Checking stop key. if ((currentRegionBoundariesInformation.storesLastKey != null) && (currentRegionBoundariesInformation.metaLastKey != null)) { - valid = valid - && comparator.compare(currentRegionBoundariesInformation.storesLastKey, - currentRegionBoundariesInformation.metaLastKey) < 0; + valid = valid && comparator.compare(currentRegionBoundariesInformation.storesLastKey, + currentRegionBoundariesInformation.metaLastKey) < 0; } if (!valid) { errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries", @@ -979,13 +945,11 @@ private void adoptHdfsOrphans(Collection orphanHdfsDirs) throws } /** - * Orphaned regions are regions without a .regioninfo file in them. We "adopt" - * these orphans by creating a new region, and moving the column families, - * recovered edits, WALs, into the new region dir. We determine the region - * startkey and endkeys by looking at all of the hfiles inside the column - * families to identify the min and max keys. The resulting region will - * likely violate table integrity but will be dealt with by merging - * overlapping regions. + * Orphaned regions are regions without a .regioninfo file in them. We "adopt" these orphans by + * creating a new region, and moving the column families, recovered edits, WALs, into the new + * region dir. We determine the region startkey and endkeys by looking at all of the hfiles inside + * the column families to identify the min and max keys. The resulting region will likely violate + * table integrity but will be dealt with by merging overlapping regions. */ @SuppressWarnings("deprecation") private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { @@ -993,9 +957,9 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { FileSystem fs = p.getFileSystem(getConf()); FileStatus[] dirs = fs.listStatus(p); if (dirs == null) { - LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " + - p + ". This dir could probably be deleted."); - return ; + LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " + p + + ". This dir could probably be deleted."); + return; } TableName tableName = hi.getTableName(); @@ -1004,9 +968,9 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { TableDescriptor template = tableInfo.getTableDescriptor(); // find min and max key values - Pair orphanRegionRange = null; + Pair orphanRegionRange = null; for (FileStatus cf : dirs) { - String cfName= cf.getPath().getName(); + String cfName = cf.getPath().getName(); // TODO Figure out what the special dirs are if (cfName.startsWith(".") || cfName.equals(HConstants.SPLIT_LOGDIR_NAME)) continue; @@ -1043,7 +1007,7 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { if (Bytes.compareTo(orphanRegionRange.getFirst(), start) > 0) { orphanRegionRange.setFirst(start); } - if (Bytes.compareTo(orphanRegionRange.getSecond(), end) < 0 ) { + if (Bytes.compareTo(orphanRegionRange.getSecond(), end) < 0) { orphanRegionRange.setSecond(end); } } @@ -1055,14 +1019,13 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { sidelineRegionDir(fs, hi); return; } - LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " + - Bytes.toString(orphanRegionRange.getSecond()) + ")"); + LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " + + Bytes.toString(orphanRegionRange.getSecond()) + ")"); // create new region on hdfs. move data into place. RegionInfo regionInfo = RegionInfoBuilder.newBuilder(template.getTableName()) .setStartKey(orphanRegionRange.getFirst()) - .setEndKey(Bytes.add(orphanRegionRange.getSecond(), new byte[1])) - .build(); + .setEndKey(Bytes.add(orphanRegionRange.getSecond(), new byte[1])).build(); LOG.info("Creating new region : " + regionInfo); HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), regionInfo, template); Path target = region.getRegionFileSystem().getRegionDir(); @@ -1073,11 +1036,9 @@ private void adoptHdfsOrphan(HbckRegionInfo hi) throws IOException { } /** - * This method determines if there are table integrity errors in HDFS. If - * there are errors and the appropriate "fix" options are enabled, the method - * will first correct orphan regions making them into legit regiondirs, and - * then reload to merge potentially overlapping regions. - * + * This method determines if there are table integrity errors in HDFS. If there are errors and the + * appropriate "fix" options are enabled, the method will first correct orphan regions making them + * into legit regiondirs, and then reload to merge potentially overlapping regions. * @return number of table integrity errors found */ private int restoreHdfsIntegrity() throws IOException, InterruptedException { @@ -1121,12 +1082,12 @@ private int restoreHdfsIntegrity() throws IOException, InterruptedException { } /** - * Scan all the store file names to find any lingering reference files, - * which refer to some none-exiting files. If "fix" option is enabled, - * any lingering reference file will be sidelined if found. + * Scan all the store file names to find any lingering reference files, which refer to some + * none-exiting files. If "fix" option is enabled, any lingering reference file will be sidelined + * if found. *

      - * Lingering reference file prevents a region from opening. It has to - * be fixed before a cluster can start properly. + * Lingering reference file prevents a region from opening. It has to be fixed before a cluster + * can start properly. */ private void offlineReferenceFileRepair() throws IOException, InterruptedException { clearState(); @@ -1138,9 +1099,9 @@ private void offlineReferenceFileRepair() throws IOException, InterruptedExcepti new FSUtils.ReferenceFileFilter(fs), executor, errors); errors.print(""); LOG.info("Validating mapping using HDFS state"); - for (Path path: allFiles.values()) { + for (Path path : allFiles.values()) { Path referredToFile = StoreFileInfo.getReferredToFile(path); - if (fs.exists(referredToFile)) continue; // good, expected + if (fs.exists(referredToFile)) continue; // good, expected // Found a lingering reference file errors.reportError(ERROR_CODE.LINGERING_REFERENCE_HFILE, @@ -1163,8 +1124,7 @@ private void offlineReferenceFileRepair() throws IOException, InterruptedExcepti Path rootDir = getSidelineDir(); Path dst = new Path(rootDir, pathStr.substring(index + 1)); fs.mkdirs(dst.getParent()); - LOG.info("Trying to sideline reference file " - + path + " to " + dst); + LOG.info("Trying to sideline reference file " + path + " to " + dst); setShouldRerun(); success = fs.rename(path, dst); @@ -1178,17 +1138,17 @@ private void offlineReferenceFileRepair() throws IOException, InterruptedExcepti } /** - * Scan all the store file names to find any lingering HFileLink files, - * which refer to some none-exiting files. If "fix" option is enabled, - * any lingering HFileLink file will be sidelined if found. + * Scan all the store file names to find any lingering HFileLink files, which refer to some + * none-exiting files. If "fix" option is enabled, any lingering HFileLink file will be sidelined + * if found. */ private void offlineHLinkFileRepair() throws IOException, InterruptedException { Configuration conf = getConf(); Path hbaseRoot = CommonFSUtils.getRootDir(conf); FileSystem fs = hbaseRoot.getFileSystem(conf); LOG.info("Computing mapping of all link files"); - Map allFiles = FSUtils - .getTableStoreFilePathMap(fs, hbaseRoot, new FSUtils.HFileLinkFilter(), executor, errors); + Map allFiles = FSUtils.getTableStoreFilePathMap(fs, hbaseRoot, + new FSUtils.HFileLinkFilter(), executor, errors); errors.print(""); LOG.info("Validating mapping using HDFS state"); @@ -1206,7 +1166,8 @@ private void offlineHLinkFileRepair() throws IOException, InterruptedException { // An HFileLink path should be like // ${hbase.rootdir}/data/namespace/table_name/region_id/family_name/linkedtable=linkedregionname-linkedhfilename - // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same folder structure. + // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same + // folder structure. boolean success = sidelineFile(fs, hbaseRoot, path); if (!success) { @@ -1215,12 +1176,13 @@ private void offlineHLinkFileRepair() throws IOException, InterruptedException { // An HFileLink backreference path should be like // ${hbase.rootdir}/archive/data/namespace/table_name/region_id/family_name/.links-linkedhfilename - // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same folder structure. - Path backRefPath = FileLink.getBackReferencesDir(HFileArchiveUtil - .getStoreArchivePath(conf, HFileLink.getReferencedTableName(path.getName().toString()), - HFileLink.getReferencedRegionName(path.getName().toString()), - path.getParent().getName()), - HFileLink.getReferencedHFileName(path.getName().toString())); + // sidelineing will happen in the ${hbase.rootdir}/${sidelinedir} directory with the same + // folder structure. + Path backRefPath = FileLink.getBackReferencesDir( + HFileArchiveUtil.getStoreArchivePath(conf, + HFileLink.getReferencedTableName(path.getName().toString()), + HFileLink.getReferencedRegionName(path.getName().toString()), path.getParent().getName()), + HFileLink.getReferencedHFileName(path.getName().toString())); success = sidelineFile(fs, hbaseRoot, backRefPath); if (!success) { @@ -1248,10 +1210,10 @@ private boolean sidelineFile(FileSystem fs, Path hbaseRoot, Path path) throws IO * TODO -- need to add tests for this. */ private void reportEmptyMetaCells() { - errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: " + - emptyRegionInfoQualifiers.size()); + errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: " + + emptyRegionInfoQualifiers.size()); if (details) { - for (Result r: emptyRegionInfoQualifiers) { + for (Result r : emptyRegionInfoQualifiers) { errors.print(" " + r); } } @@ -1269,10 +1231,9 @@ private void reportTablesInFlux() { errors.detail("Number of Tables in flux: " + numSkipped.get()); } for (TableDescriptor td : allTables) { - errors.detail(" Table: " + td.getTableName() + "\t" + - (td.isReadOnly() ? "ro" : "rw") + "\t" + - (td.isMetaRegion() ? "META" : " ") + "\t" + - " families: " + td.getColumnFamilyCount()); + errors.detail(" Table: " + td.getTableName() + "\t" + (td.isReadOnly() ? "ro" : "rw") + + "\t" + (td.isMetaRegion() ? "META" : " ") + "\t" + " families: " + + td.getColumnFamilyCount()); } } } @@ -1302,28 +1263,27 @@ private SortedMap loadHdfsRegionInfos() // Submit and wait for completion hbiFutures = executor.invokeAll(hbis); - for(int i=0; i f = hbiFutures.get(i); try { f.get(); - } catch(ExecutionException e) { - LOG.warn("Failed to read .regioninfo file for region " + - work.hbi.getRegionNameAsString(), e.getCause()); + } catch (ExecutionException e) { + LOG.warn("Failed to read .regioninfo file for region " + work.hbi.getRegionNameAsString(), + e.getCause()); } } Path hbaseRoot = CommonFSUtils.getRootDir(getConf()); FileSystem fs = hbaseRoot.getFileSystem(getConf()); // serialized table info gathering. - for (HbckRegionInfo hbi: hbckRegionInfos) { + for (HbckRegionInfo hbi : hbckRegionInfos) { if (hbi.getHdfsHRI() == null) { // was an orphan continue; } - // get table name from hdfs, populate various HBaseFsck tables. TableName tableName = hbi.getTableName(); if (tableName == null) { @@ -1344,9 +1304,9 @@ private SortedMap loadHdfsRegionInfos() } catch (IOException ioe) { if (!orphanTableDirs.containsKey(tableName)) { LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe); - //should only report once for each table + // should only report once for each table errors.reportError(ERROR_CODE.NO_TABLEINFO_FILE, - "Unable to read .tableinfo from " + hbaseRoot + "/" + tableName); + "Unable to read .tableinfo from " + hbaseRoot + "/" + tableName); Set columns = new HashSet<>(); orphanTableDirs.put(tableName, getColumnFamilyList(columns, hbi)); } @@ -1386,12 +1346,13 @@ private Set getColumnFamilyList(Set columns, HbckRegionInfo hbi) * To fabricate a .tableinfo file with following contents
      * 1. the correct tablename
      * 2. the correct colfamily list
      - * 3. the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}
      + * 3. the default properties for both {@link TableDescriptor} and + * {@link ColumnFamilyDescriptor}
      * @throws IOException */ private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName, Set columns) throws IOException { - if (columns ==null || columns.isEmpty()) return false; + if (columns == null || columns.isEmpty()) return false; TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (String columnfamimly : columns) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly)); @@ -1421,7 +1382,8 @@ public void fixEmptyMetaCells() throws IOException { * 2. else create a default .tableinfo file with following items
      *  2.1 the correct tablename
      *  2.2 the correct colfamily list
      - *  2.3 the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}
      + *  2.3 the default properties for both {@link TableDescriptor} and + * {@link ColumnFamilyDescriptor}
      * @throws IOException */ public void fixOrphanTables() throws IOException { @@ -1430,14 +1392,12 @@ public void fixOrphanTables() throws IOException { List tmpList = new ArrayList<>(orphanTableDirs.keySet().size()); tmpList.addAll(orphanTableDirs.keySet()); TableDescriptor[] htds = getTableDescriptors(tmpList); - Iterator>> iter = - orphanTableDirs.entrySet().iterator(); + Iterator>> iter = orphanTableDirs.entrySet().iterator(); int j = 0; int numFailedCase = 0; FSTableDescriptors fstd = new FSTableDescriptors(getConf()); while (iter.hasNext()) { - Entry> entry = - iter.next(); + Entry> entry = iter.next(); TableName tableName = entry.getKey(); LOG.info("Trying to fix orphan table error: " + tableName); if (j < htds.length) { @@ -1451,10 +1411,12 @@ public void fixOrphanTables() throws IOException { } else { if (fabricateTableInfo(fstd, tableName, entry.getValue())) { LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file"); - LOG.warn("Strongly recommend to modify the TableDescriptor if necessary for: " + tableName); + LOG.warn( + "Strongly recommend to modify the TableDescriptor if necessary for: " + tableName); iter.remove(); } else { - LOG.error("Unable to create default .tableinfo for " + tableName + " while missing column family information"); + LOG.error("Unable to create default .tableinfo for " + tableName + + " while missing column family information"); numFailedCase++; } } @@ -1465,14 +1427,14 @@ public void fixOrphanTables() throws IOException { // all orphanTableDirs are luckily recovered // re-run doFsck after recovering the .tableinfo file setShouldRerun(); - LOG.warn("Strongly recommend to re-run manually hfsck after all orphanTableDirs being fixed"); + LOG.warn( + "Strongly recommend to re-run manually hfsck after all orphanTableDirs being fixed"); } else if (numFailedCase > 0) { - LOG.error("Failed to fix " + numFailedCase - + " OrphanTables with default .tableinfo files"); + LOG.error("Failed to fix " + numFailedCase + " OrphanTables with default .tableinfo files"); } } - //cleanup the list + // cleanup the list orphanTableDirs.clear(); } @@ -1482,11 +1444,11 @@ public void fixOrphanTables() throws IOException { */ private void logParallelMerge() { if (getConf().getBoolean("hbasefsck.overlap.merge.parallel", true)) { - LOG.info("Handling overlap merges in parallel. set hbasefsck.overlap.merge.parallel to" + - " false to run serially."); + LOG.info("Handling overlap merges in parallel. set hbasefsck.overlap.merge.parallel to" + + " false to run serially."); } else { - LOG.info("Handling overlap merges serially. set hbasefsck.overlap.merge.parallel to" + - " true to run in parallel."); + LOG.info("Handling overlap merges serially. set hbasefsck.overlap.merge.parallel to" + + " true to run in parallel."); } } @@ -1497,8 +1459,7 @@ private SortedMap checkHdfsIntegrity(boolean fixHoles, for (HbckTableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler; if (fixHoles || fixOverlaps) { - handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), - fixHoles, fixOverlaps); + handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), fixHoles, fixOverlaps); } else { handler = tInfo.new IntegrityFixSuggester(tInfo, errors); } @@ -1514,8 +1475,7 @@ Path getSidelineDir() throws IOException { if (sidelineDir == null) { Path hbaseDir = CommonFSUtils.getRootDir(getConf()); Path hbckDir = new Path(hbaseDir, HConstants.HBCK_SIDELINEDIR_NAME); - sidelineDir = new Path(hbckDir, hbaseDir.getName() + "-" - + startMillis); + sidelineDir = new Path(hbckDir, hbaseDir.getName() + "-" + startMillis); } return sidelineDir; } @@ -1529,14 +1489,12 @@ Path sidelineRegionDir(FileSystem fs, HbckRegionInfo hi) throws IOException { /** * Sideline a region dir (instead of deleting it) - * * @param parentDir if specified, the region will be sidelined to folder like - * {@literal .../parentDir/

      /}. The purpose is to group together - * similar regions sidelined, for example, those regions should be bulk loaded back later - * on. If NULL, it is ignored. + * {@literal .../parentDir/
      /}. The purpose is to group together + * similar regions sidelined, for example, those regions should be bulk loaded back later + * on. If NULL, it is ignored. */ - Path sidelineRegionDir(FileSystem fs, - String parentDir, HbckRegionInfo hi) throws IOException { + Path sidelineRegionDir(FileSystem fs, String parentDir, HbckRegionInfo hi) throws IOException { TableName tableName = hi.getTableName(); Path regionDir = hi.getHdfsRegionDir(); @@ -1549,22 +1507,22 @@ Path sidelineRegionDir(FileSystem fs, if (parentDir != null) { rootDir = new Path(rootDir, parentDir); } - Path sidelineTableDir= CommonFSUtils.getTableDir(rootDir, tableName); + Path sidelineTableDir = CommonFSUtils.getTableDir(rootDir, tableName); Path sidelineRegionDir = new Path(sidelineTableDir, regionDir.getName()); fs.mkdirs(sidelineRegionDir); boolean success = false; - FileStatus[] cfs = fs.listStatus(regionDir); + FileStatus[] cfs = fs.listStatus(regionDir); if (cfs == null) { LOG.info("Region dir is empty: " + regionDir); } else { for (FileStatus cf : cfs) { Path src = cf.getPath(); - Path dst = new Path(sidelineRegionDir, src.getName()); + Path dst = new Path(sidelineRegionDir, src.getName()); if (fs.isFile(src)) { // simple file success = fs.rename(src, dst); if (!success) { - String msg = "Unable to rename file " + src + " to " + dst; + String msg = "Unable to rename file " + src + " to " + dst; LOG.error(msg); throw new IOException(msg); } @@ -1577,14 +1535,14 @@ Path sidelineRegionDir(FileSystem fs, LOG.info("Sidelining files from " + src + " into containing region " + dst); // FileSystem.rename is inconsistent with directories -- if the // dst (foo/a) exists and is a dir, and the src (foo/b) is a dir, - // it moves the src into the dst dir resulting in (foo/a/b). If + // it moves the src into the dst dir resulting in (foo/a/b). If // the dst does not exist, and the src a dir, src becomes dst. (foo/b) FileStatus[] hfiles = fs.listStatus(src); if (hfiles != null && hfiles.length > 0) { for (FileStatus hfile : hfiles) { success = fs.rename(hfile.getPath(), dst); if (!success) { - String msg = "Unable to rename file " + src + " to " + dst; + String msg = "Unable to rename file " + src + " to " + dst; LOG.error(msg); throw new IOException(msg); } @@ -1610,14 +1568,13 @@ Path sidelineRegionDir(FileSystem fs, * @throws ZooKeeperConnectionException * @throws IOException */ - private void loadTableStates() - throws IOException { + private void loadTableStates() throws IOException { tableStates = MetaTableAccessor.getTableStates(connection); // Add hbase:meta so this tool keeps working. In hbase2, meta is always enabled though it // has no entry in the table states. HBCK doesn't work right w/ hbase2 but just do this in // meantime. this.tableStates.put(TableName.META_TABLE_NAME, - new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED)); + new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED)); } /** @@ -1625,14 +1582,12 @@ private void loadTableStates() * @param tableName table to check status of */ boolean isTableDisabled(TableName tableName) { - return tableStates.containsKey(tableName) - && tableStates.get(tableName) + return tableStates.containsKey(tableName) && tableStates.get(tableName) .inStates(TableState.State.DISABLED, TableState.State.DISABLING); } /** - * Scan HDFS for all regions, recording their information into - * regionInfoMap + * Scan HDFS for all regions, recording their information into regionInfoMap */ public void loadHdfsRegionDirs() throws IOException, InterruptedException { Path rootDir = CommonFSUtils.getRootDir(getConf()); @@ -1646,44 +1601,41 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException { List paths = FSUtils.getTableDirs(fs, rootDir); for (Path path : paths) { TableName tableName = CommonFSUtils.getTableName(path); - if ((!checkMetaOnly && - isTableIncluded(tableName)) || - tableName.equals(TableName.META_TABLE_NAME)) { - tableDirs.add(fs.getFileStatus(path)); - } + if ((!checkMetaOnly && isTableIncluded(tableName)) + || tableName.equals(TableName.META_TABLE_NAME)) { + tableDirs.add(fs.getFileStatus(path)); + } } // verify that version file exists if (!foundVersionFile) { errors.reportError(ERROR_CODE.NO_VERSION_FILE, - "Version file does not exist in root dir " + rootDir); + "Version file does not exist in root dir " + rootDir); if (shouldFixVersionFile()) { - LOG.info("Trying to create a new " + HConstants.VERSION_FILE_NAME - + " file."); + LOG.info("Trying to create a new " + HConstants.VERSION_FILE_NAME + " file."); setShouldRerun(); - FSUtils.setVersion(fs, rootDir, getConf().getInt( - HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt( - HConstants.VERSION_FILE_WRITE_ATTEMPTS, + FSUtils.setVersion(fs, rootDir, + getConf().getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), + getConf().getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS)); } } // Avoid multithreading at table-level because already multithreaded internally at - // region-level. Additionally multithreading at table-level can lead to deadlock - // if there are many tables in the cluster. Since there are a limited # of threads + // region-level. Additionally multithreading at table-level can lead to deadlock + // if there are many tables in the cluster. Since there are a limited # of threads // in the executor's thread pool and if we multithread at the table-level by putting // WorkItemHdfsDir callables into the executor, then we will have some threads in the // executor tied up solely in waiting for the tables' region-level calls to complete. // If there are enough tables then there will be no actual threads in the pool left // for the region-level callables to be serviced. for (FileStatus tableDir : tableDirs) { - LOG.debug("Loading region dirs from " +tableDir.getPath()); + LOG.debug("Loading region dirs from " + tableDir.getPath()); WorkItemHdfsDir item = new WorkItemHdfsDir(fs, errors, tableDir); try { item.call(); } catch (ExecutionException e) { - LOG.warn("Could not completely load table dir " + - tableDir.getPath(), e.getCause()); + LOG.warn("Could not completely load table dir " + tableDir.getPath(), e.getCause()); } } errors.print(""); @@ -1750,26 +1702,25 @@ public boolean isAborted() { * @throws IOException if a remote or network exception occurs */ void processRegionServers(Collection regionServerList) - throws IOException, InterruptedException { + throws IOException, InterruptedException { List workItems = new ArrayList<>(regionServerList.size()); List> workFutures; // loop to contact each region server in parallel - for (ServerName rsinfo: regionServerList) { + for (ServerName rsinfo : regionServerList) { workItems.add(new WorkItemRegion(this, rsinfo, errors, connection)); } workFutures = executor.invokeAll(workItems); - for(int i=0; i f = workFutures.get(i); try { f.get(); - } catch(ExecutionException e) { - LOG.warn("Could not process regionserver {}", item.rsinfo.getAddress(), - e.getCause()); + } catch (ExecutionException e) { + LOG.warn("Could not process regionserver {}", item.rsinfo.getAddress(), e.getCause()); } } } @@ -1777,13 +1728,12 @@ void processRegionServers(Collection regionServerList) /** * Check consistency of all regions that have been found in previous phases. */ - private void checkAndFixConsistency() - throws IOException, KeeperException, InterruptedException { + private void checkAndFixConsistency() throws IOException, KeeperException, InterruptedException { // Divide the checks in two phases. One for default/primary replicas and another // for the non-primary ones. Keeps code cleaner this way. List workItems = new ArrayList<>(regionInfoMap.size()); - for (java.util.Map.Entry e: regionInfoMap.entrySet()) { + for (java.util.Map.Entry e : regionInfoMap.entrySet()) { if (e.getValue().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { workItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue())); } @@ -1791,11 +1741,11 @@ private void checkAndFixConsistency() checkRegionConsistencyConcurrently(workItems); boolean prevHdfsCheck = shouldCheckHdfs(); - setCheckHdfs(false); //replicas don't have any hdfs data + setCheckHdfs(false); // replicas don't have any hdfs data // Run a pass over the replicas and fix any assignment issues that exist on the currently // deployed/undeployed replicas. List replicaWorkItems = new ArrayList<>(regionInfoMap.size()); - for (java.util.Map.Entry e: regionInfoMap.entrySet()) { + for (java.util.Map.Entry e : regionInfoMap.entrySet()) { if (e.getValue().getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { replicaWorkItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue())); } @@ -1807,11 +1757,11 @@ private void checkAndFixConsistency() // not get accurate state of the hbase if continuing. The config here allows users to tune // the tolerance of number of skipped region. // TODO: evaluate the consequence to continue the hbck operation without config. - int terminateThreshold = getConf().getInt("hbase.hbck.skipped.regions.limit", 0); + int terminateThreshold = getConf().getInt("hbase.hbck.skipped.regions.limit", 0); int numOfSkippedRegions = skippedRegions.size(); if (numOfSkippedRegions > 0 && numOfSkippedRegions > terminateThreshold) { throw new IOException(numOfSkippedRegions - + " region(s) could not be checked or repaired. See logs for detail."); + + " region(s) could not be checked or repaired. See logs for detail."); } if (shouldCheckHdfs()) { @@ -1822,25 +1772,25 @@ private void checkAndFixConsistency() /** * Check consistency of all regions using multiple threads concurrently. */ - private void checkRegionConsistencyConcurrently( - final List workItems) - throws IOException, KeeperException, InterruptedException { + private void + checkRegionConsistencyConcurrently(final List workItems) + throws IOException, KeeperException, InterruptedException { if (workItems.isEmpty()) { - return; // nothing to check + return; // nothing to check } List> workFutures = executor.invokeAll(workItems); - for(Future f: workFutures) { + for (Future f : workFutures) { try { f.get(); - } catch(ExecutionException e1) { - LOG.warn("Could not check region consistency " , e1.getCause()); + } catch (ExecutionException e1) { + LOG.warn("Could not check region consistency ", e1.getCause()); if (e1.getCause() instanceof IOException) { - throw (IOException)e1.getCause(); + throw (IOException) e1.getCause(); } else if (e1.getCause() instanceof KeeperException) { - throw (KeeperException)e1.getCause(); + throw (KeeperException) e1.getCause(); } else if (e1.getCause() instanceof InterruptedException) { - throw (InterruptedException)e1.getCause(); + throw (InterruptedException) e1.getCause(); } else { throw new IOException(e1.getCause()); } @@ -1864,8 +1814,9 @@ public synchronized Void call() throws Exception { } catch (Exception e) { // If the region is non-META region, skip this region and send warning/error message; if // the region is META region, we should not continue. - LOG.warn("Unable to complete check or repair the region '" + hbi.getRegionNameAsString() - + "'.", e); + LOG.warn( + "Unable to complete check or repair the region '" + hbi.getRegionNameAsString() + "'.", + e); if (hbi.getHdfsHRI().isMetaRegion()) { throw e; } @@ -1886,9 +1837,7 @@ private void addSkippedRegion(final HbckRegionInfo hbi) { } /** - * Check and fix table states, assumes full info available: - * - tableInfos - * - empty tables loaded + * Check and fix table states, assumes full info available: - tableInfos - empty tables loaded */ private void checkAndFixTableStates() throws IOException { // first check dangling states @@ -1896,21 +1845,19 @@ private void checkAndFixTableStates() throws IOException { TableName tableName = entry.getKey(); TableState tableState = entry.getValue(); HbckTableInfo tableInfo = tablesInfo.get(tableName); - if (isTableIncluded(tableName) - && !tableName.isSystemTable() - && tableInfo == null) { + if (isTableIncluded(tableName) && !tableName.isSystemTable() && tableInfo == null) { if (fixMeta) { MetaTableAccessor.deleteTableState(connection, tableName); TableState state = MetaTableAccessor.getTableState(connection, tableName); if (state != null) { errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE, - tableName + " unable to delete dangling table state " + tableState); + tableName + " unable to delete dangling table state " + tableState); } } else if (!checkMetaOnly) { // dangling table state in meta if checkMetaOnly is false. If checkMetaOnly is // true, tableInfo will be null as tablesInfo are not polulated for all tables from hdfs errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE, - tableName + " has dangling table state " + tableState); + tableName + " has dangling table state " + tableState); } } } @@ -1922,11 +1869,10 @@ private void checkAndFixTableStates() throws IOException { TableState newState = MetaTableAccessor.getTableState(connection, tableName); if (newState == null) { errors.reportError(ERROR_CODE.NO_TABLE_STATE, - "Unable to change state for table " + tableName + " in meta "); + "Unable to change state for table " + tableName + " in meta "); } } else { - errors.reportError(ERROR_CODE.NO_TABLE_STATE, - tableName + " has no state in meta "); + errors.reportError(ERROR_CODE.NO_TABLE_STATE, tableName + " has no state in meta "); } } } @@ -1947,9 +1893,9 @@ private void preCheckPermission() throws IOException { fs.access(file.getPath(), FsAction.WRITE); } catch (AccessControlException ace) { LOG.warn("Got AccessDeniedException when preCheckPermission ", ace); - errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName() - + " does not have write perms to " + file.getPath() - + ". Please rerun hbck as hdfs user " + file.getOwner()); + errors.reportError(ERROR_CODE.WRONG_USAGE, + "Current user " + ugi.getUserName() + " does not have write perms to " + file.getPath() + + ". Please rerun hbck as hdfs user " + file.getOwner()); throw ace; } } @@ -1968,7 +1914,7 @@ private void deleteMetaRegion(HbckRegionInfo hi) throws IOException { private void deleteMetaRegion(byte[] metaKey) throws IOException { Delete d = new Delete(metaKey); meta.delete(d); - LOG.info("Deleted " + Bytes.toString(metaKey) + " from META" ); + LOG.info("Deleted " + Bytes.toString(metaKey) + " from META"); } /** @@ -1982,29 +1928,28 @@ private void resetSplitParent(HbckRegionInfo hi) throws IOException { mutations.add(d); RegionInfo hri = RegionInfoBuilder.newBuilder(hi.getMetaEntry().getRegionInfo()) - .setOffline(false).setSplit(false).build(); + .setOffline(false).setSplit(false).build(); Put p = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime()); mutations.add(p); meta.mutateRow(mutations); - LOG.info("Reset split parent " + hi.getMetaEntry().getRegionInfo().getRegionNameAsString() + - " in META"); + LOG.info("Reset split parent " + hi.getMetaEntry().getRegionInfo().getRegionNameAsString() + + " in META"); } /** - * This backwards-compatibility wrapper for permanently offlining a region - * that should not be alive. If the region server does not support the - * "offline" method, it will use the closest unassign method instead. This - * will basically work until one attempts to disable or delete the affected - * table. The problem has to do with in-memory only master state, so - * restarting the HMaster or failing over to another should fix this. + * This backwards-compatibility wrapper for permanently offlining a region that should not be + * alive. If the region server does not support the "offline" method, it will use the closest + * unassign method instead. This will basically work until one attempts to disable or delete the + * affected table. The problem has to do with in-memory only master state, so restarting the + * HMaster or failing over to another should fix this. */ void offline(byte[] regionName) throws IOException { String regionString = Bytes.toStringBinary(regionName); if (!rsSupportsOffline) { LOG.warn( - "Using unassign region " + regionString + " instead of using offline method, you should" + - " restart HMaster after these repairs"); + "Using unassign region " + regionString + " instead of using offline method, you should" + + " restart HMaster after these repairs"); admin.unassign(regionName, true); return; } @@ -2014,12 +1959,12 @@ void offline(byte[] regionName) throws IOException { LOG.info("Offlining region " + regionString); admin.offline(regionName); } catch (IOException ioe) { - String notFoundMsg = "java.lang.NoSuchMethodException: " + - "org.apache.hadoop.hbase.master.HMaster.offline([B)"; + String notFoundMsg = "java.lang.NoSuchMethodException: " + + "org.apache.hadoop.hbase.master.HMaster.offline([B)"; if (ioe.getMessage().contains(notFoundMsg)) { - LOG.warn("Using unassign region " + regionString + - " instead of using offline method, you should" + - " restart HMaster after these repairs"); + LOG.warn( + "Using unassign region " + regionString + " instead of using offline method, you should" + + " restart HMaster after these repairs"); rsSupportsOffline = false; // in the future just use unassign admin.unassign(regionName, true); return; @@ -2029,16 +1974,13 @@ void offline(byte[] regionName) throws IOException { } /** - * Attempts to undeploy a region from a region server based in information in - * META. Any operations that modify the file system should make sure that - * its corresponding region is not deployed to prevent data races. - * - * A separate call is required to update the master in-memory region state - * kept in the AssignementManager. Because disable uses this state instead of - * that found in META, we can't seem to cleanly disable/delete tables that - * have been hbck fixed. When used on a version of HBase that does not have - * the offline ipc call exposed on the master (<0.90.5, <0.92.0) a master - * restart or failover may be required. + * Attempts to undeploy a region from a region server based in information in META. Any operations + * that modify the file system should make sure that its corresponding region is not deployed to + * prevent data races. A separate call is required to update the master in-memory region state + * kept in the AssignementManager. Because disable uses this state instead of that found in META, + * we can't seem to cleanly disable/delete tables that have been hbck fixed. When used on a + * version of HBase that does not have the offline ipc call exposed on the master (<0.90.5, + * <0.92.0) a master restart or failover may be required. */ void closeRegion(HbckRegionInfo hi) throws IOException, InterruptedException { if (hi.getMetaEntry() == null && hi.getHdfsEntry() == null) { @@ -2062,25 +2004,22 @@ void closeRegion(HbckRegionInfo hi) throws IOException, InterruptedException { Result r = meta.get(get); RegionLocations rl = CatalogFamilyFormat.getRegionLocations(r); if (rl == null) { - LOG.warn("Unable to close region " + hi.getRegionNameAsString() + - " since meta does not have handle to reach it"); + LOG.warn("Unable to close region " + hi.getRegionNameAsString() + + " since meta does not have handle to reach it"); return; } for (HRegionLocation h : rl.getRegionLocations()) { ServerName serverName = h.getServerName(); if (serverName == null) { - errors.reportError("Unable to close region " - + hi.getRegionNameAsString() + " because meta does not " - + "have handle to reach it."); + errors.reportError("Unable to close region " + hi.getRegionNameAsString() + + " because meta does not " + "have handle to reach it."); continue; } RegionInfo hri = h.getRegion(); if (hri == null) { LOG.warn("Unable to close region " + hi.getRegionNameAsString() - + " because hbase:meta had invalid or missing " - + HConstants.CATALOG_FAMILY_STR + ":" - + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) - + " qualifier value."); + + " because hbase:meta had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":" + + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value."); continue; } // close the region -- close files and remove assignment @@ -2097,13 +2036,13 @@ private void undeployRegions(HbckRegionInfo hi) throws IOException, InterruptedE int numReplicas = admin.getDescriptor(hi.getTableName()).getRegionReplication(); for (int i = 1; i < numReplicas; i++) { if (hi.getPrimaryHRIForDeployedReplica() == null) continue; - RegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica( - hi.getPrimaryHRIForDeployedReplica(), i); + RegionInfo hri = + RegionReplicaUtil.getRegionInfoForReplica(hi.getPrimaryHRIForDeployedReplica(), i); HbckRegionInfo h = regionInfoMap.get(hri.getEncodedName()); if (h != null) { undeployRegionsForHbi(h); - //set skip checks; we undeployed it, and we don't want to evaluate this anymore - //in consistency checks + // set skip checks; we undeployed it, and we don't want to evaluate this anymore + // in consistency checks h.setSkipChecks(true); } } @@ -2111,20 +2050,21 @@ private void undeployRegions(HbckRegionInfo hi) throws IOException, InterruptedE private void undeployRegionsForHbi(HbckRegionInfo hi) throws IOException, InterruptedException { for (HbckRegionInfo.OnlineEntry rse : hi.getOnlineEntries()) { - LOG.debug("Undeploy region " + rse.getRegionInfo() + " from " + rse.getServerName()); + LOG.debug("Undeploy region " + rse.getRegionInfo() + " from " + rse.getServerName()); try { - HBaseFsckRepair - .closeRegionSilentlyAndWait(connection, rse.getServerName(), rse.getRegionInfo()); + HBaseFsckRepair.closeRegionSilentlyAndWait(connection, rse.getServerName(), + rse.getRegionInfo()); offline(rse.getRegionInfo().getRegionName()); } catch (IOException ioe) { LOG.warn("Got exception when attempting to offline region " - + Bytes.toString(rse.getRegionInfo().getRegionName()), ioe); + + Bytes.toString(rse.getRegionInfo().getRegionName()), + ioe); } } } - private void tryAssignmentRepair(HbckRegionInfo hbi, String msg) throws IOException, - KeeperException, InterruptedException { + private void tryAssignmentRepair(HbckRegionInfo hbi, String msg) + throws IOException, KeeperException, InterruptedException { // If we are trying to fix the errors if (shouldFixAssignments()) { errors.print(msg); @@ -2145,8 +2085,8 @@ private void tryAssignmentRepair(HbckRegionInfo hbi, String msg) throws IOExcept HbckRegionInfo h = regionInfoMap.get(hri.getEncodedName()); if (h != null) { undeployRegions(h); - //set skip checks; we undeploy & deploy it; we don't want to evaluate this hbi anymore - //in consistency checks + // set skip checks; we undeploy & deploy it; we don't want to evaluate this hbi anymore + // in consistency checks h.setSkipChecks(true); } HBaseFsckRepair.fixUnassigned(admin, hri); @@ -2170,14 +2110,14 @@ private void checkRegionConsistency(final String key, final HbckRegionInfo hbi) boolean hasMetaAssignment = inMeta && hbi.getMetaEntry().regionServer != null; boolean isDeployed = !hbi.getDeployedOn().isEmpty(); boolean isMultiplyDeployed = hbi.getDeployedOn().size() > 1; - boolean deploymentMatchesMeta = hasMetaAssignment && isDeployed && !isMultiplyDeployed && - hbi.getMetaEntry().regionServer.equals(hbi.getDeployedOn().get(0)); - boolean splitParent = inMeta && hbi.getMetaEntry().getRegionInfo().isSplit() && - hbi.getMetaEntry().getRegionInfo().isOffline(); + boolean deploymentMatchesMeta = hasMetaAssignment && isDeployed && !isMultiplyDeployed + && hbi.getMetaEntry().regionServer.equals(hbi.getDeployedOn().get(0)); + boolean splitParent = inMeta && hbi.getMetaEntry().getRegionInfo().isSplit() + && hbi.getMetaEntry().getRegionInfo().isOffline(); boolean shouldBeDeployed = - inMeta && !isTableDisabled(hbi.getMetaEntry().getRegionInfo().getTable()); - boolean recentlyModified = inHdfs && - hbi.getModTime() + timelag > EnvironmentEdgeManager.currentTime(); + inMeta && !isTableDisabled(hbi.getMetaEntry().getRegionInfo().getTable()); + boolean recentlyModified = + inHdfs && hbi.getModTime() + timelag > EnvironmentEdgeManager.currentTime(); // ========== First the healthy cases ============= if (hbi.containsOnlyHdfsEdits()) { @@ -2186,8 +2126,8 @@ private void checkRegionConsistency(final String key, final HbckRegionInfo hbi) if (inMeta && inHdfs && isDeployed && deploymentMatchesMeta && shouldBeDeployed) { return; } else if (inMeta && inHdfs && !shouldBeDeployed && !isDeployed) { - LOG.info("Region " + descriptiveName + " is in META, and in a disabled " + - "tabled that is not deployed"); + LOG.info("Region " + descriptiveName + " is in META, and in a disabled " + + "tabled that is not deployed"); return; } else if (recentlyModified) { LOG.warn("Region " + descriptiveName + " was recently modified -- skipping"); @@ -2198,9 +2138,9 @@ else if (!inMeta && !inHdfs && !isDeployed) { // We shouldn't have record of this region at all then! assert false : "Entry for region with no data"; } else if (!inMeta && !inHdfs && isDeployed) { - errors.reportError(ERROR_CODE.NOT_IN_META_HDFS, "Region " - + descriptiveName + ", key=" + key + ", not on HDFS or in hbase:meta but " + - "deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); + errors.reportError(ERROR_CODE.NOT_IN_META_HDFS, + "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in hbase:meta but " + + "deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); if (shouldFixAssignments()) { undeployRegions(hbi); } @@ -2214,15 +2154,13 @@ else if (!inMeta && !inHdfs && !isDeployed) { + " got merge recently, its file(s) will be cleaned by CatalogJanitor later"); return; } - errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region " - + descriptiveName + " on HDFS, but not listed in hbase:meta " + - "or deployed on any region server"); + errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region " + descriptiveName + + " on HDFS, but not listed in hbase:meta " + "or deployed on any region server"); // restore region consistency of an adopted orphan if (shouldFixMeta()) { if (!hbi.isHdfsRegioninfoPresent()) { LOG.error("Region " + hbi.getHdfsHRI() + " could have been repaired" - + " in table integrity repair phase if -fixHdfsOrphans was" + - " used."); + + " in table integrity repair phase if -fixHdfsOrphans was" + " used."); return; } @@ -2231,10 +2169,10 @@ else if (!inMeta && !inHdfs && !isDeployed) { for (RegionInfo region : tableInfo.getRegionsFromMeta(this.regionInfoMap)) { if (Bytes.compareTo(region.getStartKey(), hri.getStartKey()) <= 0 - && (region.getEndKey().length == 0 || Bytes.compareTo(region.getEndKey(), - hri.getEndKey()) >= 0) + && (region.getEndKey().length == 0 + || Bytes.compareTo(region.getEndKey(), hri.getEndKey()) >= 0) && Bytes.compareTo(region.getStartKey(), hri.getEndKey()) <= 0) { - if(region.isSplit() || region.isOffline()) continue; + if (region.isSplit() || region.isOffline()) continue; Path regionDir = hbi.getHdfsRegionDir(); FileSystem fs = regionDir.getFileSystem(getConf()); List familyDirs = FSUtils.getFamilyDirs(fs, regionDir); @@ -2247,8 +2185,8 @@ else if (!inMeta && !inHdfs && !isDeployed) { LOG.warn(hri + " start and stop keys are in the range of " + region + ". The region might not be cleaned up from hdfs when region " + region + " split failed. Hence deleting from hdfs."); - HRegionFileSystem.deleteRegionFromFileSystem(getConf(), fs, - regionDir.getParent(), hri); + HRegionFileSystem.deleteRegionFromFileSystem(getConf(), fs, regionDir.getParent(), + hri); return; } } @@ -2258,8 +2196,8 @@ else if (!inMeta && !inHdfs && !isDeployed) { LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI()); int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), - admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet(), numReplicas); + admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(), + numReplicas); tryAssignmentRepair(hbi, "Trying to reassign region..."); } @@ -2286,12 +2224,12 @@ else if (!inMeta && !inHdfs && !isDeployed) { LOG.info("Patching hbase:meta with with .regioninfo: " + hbi.getHdfsHRI()); int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), - admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet(), numReplicas); + admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(), + numReplicas); tryAssignmentRepair(hbi, "Trying to fix unassigned region..."); } - // ========== Cases where the region is in hbase:meta ============= + // ========== Cases where the region is in hbase:meta ============= } else if (inMeta && inHdfs && !isDeployed && splitParent) { // check whether this is an actual error, or just transient state where parent // is not cleaned @@ -2315,27 +2253,26 @@ else if (!inMeta && !inHdfs && !isDeployed) { return; } - errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region " - + descriptiveName + " is a split parent in META, in HDFS, " - + "and not deployed on any region server. This could be transient, " - + "consider to run the catalog janitor first!"); + errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, + "Region " + descriptiveName + " is a split parent in META, in HDFS, " + + "and not deployed on any region server. This could be transient, " + + "consider to run the catalog janitor first!"); if (shouldFixSplitParents()) { setShouldRerun(); resetSplitParent(hbi); } } else if (inMeta && !inHdfs && !isDeployed) { - errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region " - + descriptiveName + " found in META, but not in HDFS " - + "or deployed on any region server."); + errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region " + descriptiveName + + " found in META, but not in HDFS " + "or deployed on any region server."); if (shouldFixMeta()) { deleteMetaRegion(hbi); } } else if (inMeta && !inHdfs && isDeployed) { - errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName - + " found in META, but not in HDFS, " + - "and deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); - // We treat HDFS as ground truth. Any information in meta is transient - // and equivalent data can be regenerated. So, lets unassign and remove + errors.reportError(ERROR_CODE.NOT_IN_HDFS, + "Region " + descriptiveName + " found in META, but not in HDFS, " + "and deployed on " + + Joiner.on(", ").join(hbi.getDeployedOn())); + // We treat HDFS as ground truth. Any information in meta is transient + // and equivalent data can be regenerated. So, lets unassign and remove // these problems from META. if (shouldFixAssignments()) { errors.print("Trying to fix unassigned region..."); @@ -2346,13 +2283,13 @@ else if (!inMeta && !inHdfs && !isDeployed) { deleteMetaRegion(hbi); } } else if (inMeta && inHdfs && !isDeployed && shouldBeDeployed) { - errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName - + " not deployed on any region server."); + errors.reportError(ERROR_CODE.NOT_DEPLOYED, + "Region " + descriptiveName + " not deployed on any region server."); tryAssignmentRepair(hbi, "Trying to fix unassigned region..."); } else if (inMeta && inHdfs && isDeployed && !shouldBeDeployed) { errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED, - "Region " + descriptiveName + " should not be deployed according " + - "to META, but is deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); + "Region " + descriptiveName + " should not be deployed according " + + "to META, but is deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); if (shouldFixAssignments()) { errors.print("Trying to close the region " + descriptiveName); setShouldRerun(); @@ -2361,9 +2298,9 @@ else if (!inMeta && !inHdfs && !isDeployed) { } } else if (inMeta && inHdfs && isMultiplyDeployed) { errors.reportError(ERROR_CODE.MULTI_DEPLOYED, - "Region " + descriptiveName + " is listed in hbase:meta on region server " + - hbi.getMetaEntry().regionServer + " but is multiply assigned to region servers " + - Joiner.on(", ").join(hbi.getDeployedOn())); + "Region " + descriptiveName + " is listed in hbase:meta on region server " + + hbi.getMetaEntry().regionServer + " but is multiply assigned to region servers " + + Joiner.on(", ").join(hbi.getDeployedOn())); // If we are trying to fix the errors if (shouldFixAssignments()) { errors.print("Trying to fix assignment error..."); @@ -2372,10 +2309,10 @@ else if (!inMeta && !inHdfs && !isDeployed) { hbi.getDeployedOn()); } } else if (inMeta && inHdfs && isDeployed && !deploymentMatchesMeta) { - errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region " - + descriptiveName + " listed in hbase:meta on region server " + - hbi.getMetaEntry().regionServer + " but found on region server " + - hbi.getDeployedOn().get(0)); + errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, + "Region " + descriptiveName + " listed in hbase:meta on region server " + + hbi.getMetaEntry().regionServer + " but found on region server " + + hbi.getDeployedOn().get(0)); // If we are trying to fix the errors if (shouldFixAssignments()) { errors.print("Trying to fix assignment error..."); @@ -2385,21 +2322,17 @@ else if (!inMeta && !inHdfs && !isDeployed) { HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI()); } } else { - errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName + - " is in an unforeseen state:" + - " inMeta=" + inMeta + - " inHdfs=" + inHdfs + - " isDeployed=" + isDeployed + - " isMultiplyDeployed=" + isMultiplyDeployed + - " deploymentMatchesMeta=" + deploymentMatchesMeta + - " shouldBeDeployed=" + shouldBeDeployed); + errors.reportError(ERROR_CODE.UNKNOWN, + "Region " + descriptiveName + " is in an unforeseen state:" + " inMeta=" + inMeta + + " inHdfs=" + inHdfs + " isDeployed=" + isDeployed + " isMultiplyDeployed=" + + isMultiplyDeployed + " deploymentMatchesMeta=" + deploymentMatchesMeta + + " shouldBeDeployed=" + shouldBeDeployed); } } /** - * Checks tables integrity. Goes over all regions and scans the tables. - * Collects all the pieces for each table and checks if there are missing, - * repeated or overlapping ones. + * Checks tables integrity. Goes over all regions and scans the tables. Collects all the pieces + * for each table and checks if there are missing, repeated or overlapping ones. * @throws IOException */ SortedMap checkIntegrity() throws IOException { @@ -2433,8 +2366,8 @@ SortedMap checkIntegrity() throws IOException { // Missing regionDir or over-deployment is checked elsewhere. Include // these cases in modTInfo, so we can evaluate those regions as part of // the region chain in META - //if (hbi.foundRegionDir == null) continue; - //if (hbi.deployedOn.size() != 1) continue; + // if (hbi.foundRegionDir == null) continue; + // if (hbi.deployedOn.size() != 1) continue; if (hbi.getDeployedOn().isEmpty()) { continue; } @@ -2468,8 +2401,9 @@ SortedMap checkIntegrity() throws IOException { return tablesInfo; } - /** Loads table info's for tables that may not have been included, since there are no - * regions reported for the table, but table dir is there in hdfs + /** + * Loads table info's for tables that may not have been included, since there are no regions + * reported for the table, but table dir is there in hdfs */ private void loadTableInfosForTablesWithNoRegion() throws IOException { Map allTables = new FSTableDescriptors(getConf()).getAll(); @@ -2503,7 +2437,7 @@ public int mergeRegionDirs(Path targetRegionDir, HbckRegionInfo contained) throw try { dirs = fs.listStatus(contained.getHdfsRegionDir()); } catch (FileNotFoundException fnfe) { - // region we are attempting to merge in is not present! Since this is a merge, there is + // region we are attempting to merge in is not present! Since this is a merge, there is // no harm skipping this region if it does not exist. if (!fs.exists(contained.getHdfsRegionDir())) { LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir() @@ -2526,7 +2460,7 @@ public int mergeRegionDirs(Path targetRegionDir, HbckRegionInfo contained) throw for (FileStatus cf : dirs) { Path src = cf.getPath(); - Path dst = new Path(targetRegionDir, src.getName()); + Path dst = new Path(targetRegionDir, src.getName()); if (src.getName().equals(HRegionFileSystem.REGION_INFO_FILE)) { // do not copy the old .regioninfo file. @@ -2541,7 +2475,7 @@ public int mergeRegionDirs(Path targetRegionDir, HbckRegionInfo contained) throw LOG.info("[" + thread + "] Moving files from " + src + " into containing region " + dst); // FileSystem.rename is inconsistent with directories -- if the // dst (foo/a) exists and is a dir, and the src (foo/b) is a dir, - // it moves the src into the dst dir resulting in (foo/a/b). If + // it moves the src into the dst dir resulting in (foo/a/b). If // the dst does not exist, and the src a dir, src becomes dst. (foo/b) for (FileStatus hfile : fs.listStatus(src)) { boolean success = fs.rename(hfile.getPath(), dst); @@ -2555,14 +2489,13 @@ public int mergeRegionDirs(Path targetRegionDir, HbckRegionInfo contained) throw // if all success. sidelineRegionDir(fs, contained); - LOG.info("[" + thread + "] Sidelined region dir "+ contained.getHdfsRegionDir() + " into " + - getSidelineDir()); + LOG.info("[" + thread + "] Sidelined region dir " + contained.getHdfsRegionDir() + " into " + + getSidelineDir()); debugLsr(contained.getHdfsRegionDir()); return fileMoves; } - static class WorkItemOverlapMerge implements Callable { private TableIntegrityErrorHandler handler; Collection overlapgroup; @@ -2581,10 +2514,9 @@ public Void call() throws Exception { } /** - * Return a list of user-space table names whose metadata have not been - * modified in the last few milliseconds specified by timelag - * if any of the REGIONINFO_QUALIFIER, SERVER_QUALIFIER, STARTCODE_QUALIFIER, - * SPLITA_QUALIFIER, SPLITB_QUALIFIER have not changed in the last + * Return a list of user-space table names whose metadata have not been modified in the last few + * milliseconds specified by timelag if any of the REGIONINFO_QUALIFIER, SERVER_QUALIFIER, + * STARTCODE_QUALIFIER, SPLITA_QUALIFIER, SPLITB_QUALIFIER have not changed in the last * milliseconds specified by timelag, then the table is a candidate to be returned. * @return tables that have not been modified recently * @throws IOException if an error is encountered @@ -2598,8 +2530,8 @@ TableDescriptor[] getTables(AtomicInteger numSkipped) { // if the start key is zero, then we have found the first region of a table. // pick only those tables that were not modified in the last few milliseconds. - if (info != null && info.getRegionInfo().getStartKey().length == 0 && - !info.getRegionInfo().isMetaRegion()) { + if (info != null && info.getRegionInfo().getStartKey().length == 0 + && !info.getRegionInfo().isMetaRegion()) { if (info.modTime + timelag < now) { tableNames.add(info.getRegionInfo().getTable()); } else { @@ -2611,7 +2543,7 @@ TableDescriptor[] getTables(AtomicInteger numSkipped) { } TableDescriptor[] getTableDescriptors(List tableNames) { - LOG.info("getTableDescriptors == tableNames => " + tableNames); + LOG.info("getTableDescriptors == tableNames => " + tableNames); try (Connection conn = ConnectionFactory.createConnection(getConf()); Admin admin = conn.getAdmin()) { List tds = admin.listTableDescriptors(tableNames); @@ -2623,9 +2555,8 @@ TableDescriptor[] getTableDescriptors(List tableNames) { } /** - * Gets the entry in regionInfo corresponding to the the given encoded - * region name. If the region has not been seen yet, a new entry is added - * and returned. + * Gets the entry in regionInfo corresponding to the the given encoded region name. If the region + * has not been seen yet, a new entry is added and returned. */ private synchronized HbckRegionInfo getOrCreateInfo(String name) { HbckRegionInfo hbi = regionInfoMap.get(name); @@ -2647,14 +2578,13 @@ private void checkAndFixReplication() throws ReplicationException { } /** - * Check values in regionInfo for hbase:meta - * Check if zero or more than one regions with hbase:meta are found. - * If there are inconsistencies (i.e. zero or more than one regions - * pretend to be holding the hbase:meta) try to fix that and report an error. - * @throws IOException from HBaseFsckRepair functions - * @throws KeeperException - * @throws InterruptedException - */ + * Check values in regionInfo for hbase:meta Check if zero or more than one regions with + * hbase:meta are found. If there are inconsistencies (i.e. zero or more than one regions pretend + * to be holding the hbase:meta) try to fix that and report an error. + * @throws IOException from HBaseFsckRepair functions + * @throws KeeperException + * @throws InterruptedException + */ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedException { Map metaRegions = new HashMap<>(); for (HbckRegionInfo value : regionInfoMap.values()) { @@ -2662,8 +2592,7 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept metaRegions.put(value.getReplicaId(), value); } } - int metaReplication = admin.getDescriptor(TableName.META_TABLE_NAME) - .getRegionReplication(); + int metaReplication = admin.getDescriptor(TableName.META_TABLE_NAME).getRegionReplication(); boolean noProblem = true; // There will be always entries in regionInfoMap corresponding to hbase:meta & its replicas // Check the deployed servers. It should be exactly one server for each replica. @@ -2678,12 +2607,11 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept if (servers.isEmpty()) { assignMetaReplica(i); } else if (servers.size() > 1) { - errors - .reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, replicaId " + - metaHbckRegionInfo.getReplicaId() + " is found on more than one region."); + errors.reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, replicaId " + + metaHbckRegionInfo.getReplicaId() + " is found on more than one region."); if (shouldFixAssignments()) { - errors.print("Trying to fix a problem with hbase:meta, replicaId " + - metaHbckRegionInfo.getReplicaId() + ".."); + errors.print("Trying to fix a problem with hbase:meta, replicaId " + + metaHbckRegionInfo.getReplicaId() + ".."); setShouldRerun(); // try fix it (treat is a dupe assignment) HBaseFsckRepair.fixMultiAssignment(connection, @@ -2696,11 +2624,11 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept for (Map.Entry entry : metaRegions.entrySet()) { noProblem = false; errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED, - "hbase:meta replicas are deployed in excess. Configured " + metaReplication + - ", deployed " + metaRegions.size()); + "hbase:meta replicas are deployed in excess. Configured " + metaReplication + ", deployed " + + metaRegions.size()); if (shouldFixAssignments()) { - errors.print("Trying to undeploy excess replica, replicaId: " + entry.getKey() + - " of hbase:meta.."); + errors.print( + "Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of hbase:meta.."); setShouldRerun(); unassignMetaReplica(entry.getValue()); } @@ -2711,7 +2639,7 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept } private void unassignMetaReplica(HbckRegionInfo hi) - throws IOException, InterruptedException, KeeperException { + throws IOException, InterruptedException, KeeperException { undeployRegions(hi); ZKUtil.deleteNode(zkw, zkw.getZNodePaths().getZNodeForReplica(hi.getMetaEntry().getRegionInfo().getReplicaId())); @@ -2719,14 +2647,14 @@ private void unassignMetaReplica(HbckRegionInfo hi) private void assignMetaReplica(int replicaId) throws IOException, KeeperException, InterruptedException { - errors.reportError(ERROR_CODE.NO_META_REGION, "hbase:meta, replicaId " + - replicaId +" is not found on any region."); + errors.reportError(ERROR_CODE.NO_META_REGION, + "hbase:meta, replicaId " + replicaId + " is not found on any region."); if (shouldFixAssignments()) { errors.print("Trying to fix a problem with hbase:meta.."); setShouldRerun(); // try to fix it (treat it as unassigned region) - RegionInfo h = RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId); + RegionInfo h = RegionReplicaUtil + .getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId); HBaseFsckRepair.fixUnassigned(admin, h); HBaseFsckRepair.waitUntilAssigned(admin, h); } @@ -2753,7 +2681,7 @@ public boolean visit(Result result) throws IOException { try { // record the latest modification of this META record - long ts = Collections.max(result.listCells(), comp).getTimestamp(); + long ts = Collections.max(result.listCells(), comp).getTimestamp(); RegionLocations rl = CatalogFamilyFormat.getRegionLocations(result); if (rl == null) { emptyRegionInfoQualifiers.add(result); @@ -2762,16 +2690,15 @@ public boolean visit(Result result) throws IOException { return true; } ServerName sn = null; - if (rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID) == null || - rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion() == null) { + if (rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID) == null + || rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion() == null) { emptyRegionInfoQualifiers.add(result); errors.reportError(ERROR_CODE.EMPTY_META_CELL, "Empty REGIONINFO_QUALIFIER found in hbase:meta"); return true; } RegionInfo hri = rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion(); - if (!(isTableIncluded(hri.getTable()) - || hri.isMetaRegion())) { + if (!(isTableIncluded(hri.getTable()) || hri.isMetaRegion())) { return true; } PairOfSameType daughters = MetaTableAccessor.getDaughterRegions(result); @@ -2838,16 +2765,16 @@ private void printTableSummary(SortedMap tablesInfo) { int numOfSkippedRegions; errors.print("Summary:"); for (HbckTableInfo tInfo : tablesInfo.values()) { - numOfSkippedRegions = (skippedRegions.containsKey(tInfo.getName())) ? - skippedRegions.get(tInfo.getName()).size() : 0; + numOfSkippedRegions = + (skippedRegions.containsKey(tInfo.getName())) ? skippedRegions.get(tInfo.getName()).size() + : 0; if (errors.tableHasErrors(tInfo)) { errors.print("Table " + tInfo.getName() + " is inconsistent."); - } else if (numOfSkippedRegions > 0){ - errors.print("Table " + tInfo.getName() + " is okay (with " - + numOfSkippedRegions + " skipped regions)."); - } - else { + } else if (numOfSkippedRegions > 0) { + errors.print("Table " + tInfo.getName() + " is okay (with " + numOfSkippedRegions + + " skipped regions)."); + } else { errors.print("Table " + tInfo.getName() + " is okay."); } errors.print(" Number of regions: " + tInfo.getNumRegions()); @@ -2855,7 +2782,7 @@ private void printTableSummary(SortedMap tablesInfo) { Set skippedRegionStrings = skippedRegions.get(tInfo.getName()); System.out.println(" Number of skipped regions: " + numOfSkippedRegions); System.out.println(" List of skipped regions:"); - for(String sr : skippedRegionStrings) { + for (String sr : skippedRegionStrings) { System.out.println(" " + sr); } } @@ -2870,9 +2797,8 @@ private void printTableSummary(SortedMap tablesInfo) { static HbckErrorReporter getErrorReporter(final Configuration conf) throws ClassNotFoundException { - Class reporter = - conf.getClass("hbasefsck.errorreporter", PrintingErrorReporter.class, - HbckErrorReporter.class); + Class reporter = conf.getClass("hbasefsck.errorreporter", + PrintingErrorReporter.class, HbckErrorReporter.class); return ReflectionUtils.newInstance(reporter, conf); } @@ -2918,7 +2844,7 @@ public synchronized void reportError(ERROR_CODE errorCode, String message, @Override public synchronized void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, - HbckRegionInfo info) { + HbckRegionInfo info) { errorTables.add(table); String reference = "(region " + info.getRegionNameAsString() + ")"; reportError(errorCode, reference + " " + message); @@ -2926,10 +2852,10 @@ public synchronized void reportError(ERROR_CODE errorCode, String message, HbckT @Override public synchronized void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, - HbckRegionInfo info1, HbckRegionInfo info2) { + HbckRegionInfo info1, HbckRegionInfo info2) { errorTables.add(table); - String reference = "(regions " + info1.getRegionNameAsString() - + " and " + info2.getRegionNameAsString() + ")"; + String reference = "(regions " + info1.getRegionNameAsString() + " and " + + info2.getRegionNameAsString() + ")"; reportError(errorCode, reference + " " + message); } @@ -2939,13 +2865,13 @@ public synchronized void reportError(String message) { } /** - * Report error information, but do not increment the error count. Intended for cases - * where the actual error would have been reported previously. + * Report error information, but do not increment the error count. Intended for cases where the + * actual error would have been reported previously. * @param message */ @Override public synchronized void report(String message) { - if (! summary) { + if (!summary) { System.out.println("ERROR: " + message); } showProgress = 0; @@ -2953,8 +2879,7 @@ public synchronized void report(String message) { @Override public synchronized int summarize() { - System.out.println(Integer.toString(errorCount) + - " inconsistencies detected."); + System.out.println(Integer.toString(errorCount) + " inconsistencies detected."); if (errorCount == 0) { System.out.println("Status: OK"); return 0; @@ -3034,10 +2959,10 @@ public synchronized Void call() throws IOException { errors.detail( "RegionServer: " + rsinfo.getServerName() + " number of regions: " + regions.size()); for (RegionInfo rinfo : regions) { - errors.detail(" " + rinfo.getRegionNameAsString() + " id: " + rinfo.getRegionId() + - " encoded_name: " + rinfo.getEncodedName() + " start: " + - Bytes.toStringBinary(rinfo.getStartKey()) + " end: " + - Bytes.toStringBinary(rinfo.getEndKey())); + errors.detail(" " + rinfo.getRegionNameAsString() + " id: " + rinfo.getRegionId() + + " encoded_name: " + rinfo.getEncodedName() + " start: " + + Bytes.toStringBinary(rinfo.getStartKey()) + " end: " + + Bytes.toStringBinary(rinfo.getEndKey())); } } @@ -3057,8 +2982,7 @@ public synchronized Void call() throws IOException { private List filterRegions(List regions) { List ret = Lists.newArrayList(); for (RegionInfo hri : regions) { - if (hri.isMetaRegion() || (!hbck.checkMetaOnly - && hbck.isTableIncluded(hri.getTable()))) { + if (hri.isMetaRegion() || (!hbck.checkMetaOnly && hbck.isTableIncluded(hri.getTable()))) { ret.add(hri); } } @@ -3067,8 +2991,7 @@ private List filterRegions(List regions) { } /** - * Contact hdfs and get all information about specified table directory into - * regioninfo list. + * Contact hdfs and get all information about specified table directory into regioninfo list. */ class WorkItemHdfsDir implements Callable { private FileStatus tableDir; @@ -3105,9 +3028,10 @@ public synchronized Void call() throws InterruptedException, ExecutionException @Override public void run() { try { - LOG.debug("Loading region info from hdfs:"+ regionDir.getPath()); + LOG.debug("Loading region info from hdfs:" + regionDir.getPath()); - Path regioninfoFile = new Path(regionDir.getPath(), HRegionFileSystem.REGION_INFO_FILE); + Path regioninfoFile = + new Path(regionDir.getPath(), HRegionFileSystem.REGION_INFO_FILE); boolean regioninfoFileExists = fs.exists(regioninfoFile); if (!regioninfoFileExists) { @@ -3124,8 +3048,8 @@ public void run() { HbckRegionInfo.HdfsEntry he = new HbckRegionInfo.HdfsEntry(); synchronized (hbi) { if (hbi.getHdfsRegionDir() != null) { - errors.print("Directory " + encodedName + " duplicate??" + - hbi.getHdfsRegionDir()); + errors.print( + "Directory " + encodedName + " duplicate??" + hbi.getHdfsRegionDir()); } he.regionDir = regionDir.getPath(); @@ -3174,11 +3098,11 @@ public void run() { } finally { if (!exceptions.isEmpty()) { errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, "Table Directory: " - + tableDir.getPath().getName() - + " Unable to fetch all HDFS region information. "); + + tableDir.getPath().getName() + " Unable to fetch all HDFS region information. "); // Just throw the first exception as an indication something bad happened // Don't need to propagate all the exceptions, we already logged them all anyway - throw new ExecutionException("First exception in WorkItemHdfsDir", exceptions.firstElement()); + throw new ExecutionException("First exception in WorkItemHdfsDir", + exceptions.firstElement()); } } return null; @@ -3186,8 +3110,7 @@ public void run() { } /** - * Contact hdfs and get all information about specified table directory into - * regioninfo list. + * Contact hdfs and get all information about specified table directory into regioninfo list. */ static class WorkItemHdfsRegionInfo implements Callable { private HbckRegionInfo hbi; @@ -3209,8 +3132,7 @@ public synchronized Void call() throws IOException { hbi.loadHdfsRegioninfo(hbck.getConf()); } catch (IOException ioe) { String msg = "Orphan region in HDFS: Unable to load .regioninfo from table " - + hbi.getTableName() + " in hdfs dir " - + hbi.getHdfsRegionDir() + + hbi.getTableName() + " in hdfs dir " + hbi.getHdfsRegionDir() + "! It may be an invalid format or version file. Treating as " + "an orphaned regiondir."; errors.reportError(ERROR_CODE.ORPHAN_HDFS_REGION, msg); @@ -3229,8 +3151,8 @@ public synchronized Void call() throws IOException { } /** - * Display the full report from fsck. This displays all live and dead region - * servers, and all known regions. + * Display the full report from fsck. This displays all live and dead region servers, and all + * known regions. */ public static void setDisplayFullReport() { details = true; @@ -3255,16 +3177,14 @@ public boolean isExclusive() { } /** - * Set summary mode. - * Print only summary of the tables and status (OK or INCONSISTENT) + * Set summary mode. Print only summary of the tables and status (OK or INCONSISTENT) */ static void setSummary() { summary = true; } /** - * Set hbase:meta check mode. - * Print only info about hbase:meta table deployment/state + * Set hbase:meta check mode. Print only info about hbase:meta table deployment/state */ void setCheckMetaOnly() { checkMetaOnly = true; @@ -3290,9 +3210,8 @@ public void setCleanReplicationBarrier(boolean shouldClean) { } /** - * Check if we should rerun fsck again. This checks if we've tried to - * fix something and we should rerun fsck tool again. - * Display the full report from fsck. This displays all live and dead + * Check if we should rerun fsck again. This checks if we've tried to fix something and we should + * rerun fsck tool again. Display the full report from fsck. This displays all live and dead * region servers, and all known regions. */ void setShouldRerun() { @@ -3304,8 +3223,8 @@ public boolean shouldRerun() { } /** - * Fix inconsistencies found by fsck. This should try to fix errors (if any) - * found by fsck utility. + * Fix inconsistencies found by fsck. This should try to fix errors (if any) found by fsck + * utility. */ public void setFixAssignments(boolean shouldFix) { fixAssignments = shouldFix; @@ -3459,8 +3378,7 @@ public int getMaxOverlapsToSideline() { } /** - * Only check/fix tables specified by the list, - * Empty list means all tables are included. + * Only check/fix tables specified by the list, Empty list means all tables are included. */ boolean isTableIncluded(TableName table) { return (tablesIncluded.isEmpty()) || tablesIncluded.contains(table); @@ -3475,8 +3393,8 @@ Set getIncludedTables() { } /** - * We are interested in only those tables that have not changed their state in - * hbase:meta during the last few seconds specified by hbase.admin.fsck.timelag + * We are interested in only those tables that have not changed their state in hbase:meta during + * the last few seconds specified by hbase.admin.fsck.timelag * @param seconds - the time in seconds */ public void setTimeLag(long seconds) { @@ -3484,14 +3402,14 @@ public void setTimeLag(long seconds) { } /** - * * @param sidelineDir - HDFS path to sideline data */ public void setSidelineDir(String sidelineDir) { this.sidelineDir = new Path(sidelineDir); } - protected HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException { + protected HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) + throws IOException { return new HFileCorruptionChecker(getConf(), executor, sidelineCorruptHFiles); } @@ -3526,28 +3444,31 @@ protected HBaseFsck printUsageAndExit() { out.println(" where [opts] are:"); out.println(" -help Display help options (this)"); out.println(" -details Display full report of all regions."); - out.println(" -timelag Process only regions that " + - " have not experienced any metadata updates in the last " + - " seconds."); - out.println(" -sleepBeforeRerun Sleep this many seconds" + - " before checking if the fix worked if run with -fix"); + out.println(" -timelag Process only regions that " + + " have not experienced any metadata updates in the last " + " seconds."); + out.println(" -sleepBeforeRerun Sleep this many seconds" + + " before checking if the fix worked if run with -fix"); out.println(" -summary Print only summary of the tables and status."); out.println(" -metaonly Only check the state of the hbase:meta table."); out.println(" -sidelineDir HDFS path to backup existing meta."); - out.println(" -boundaries Verify that regions boundaries are the same between META and store files."); + out.println( + " -boundaries Verify that regions boundaries are the same between META and store files."); out.println(" -exclusive Abort if another hbck is exclusive or fixing."); out.println(""); out.println(" Datafile Repair options: (expert features, use with caution!)"); - out.println(" -checkCorruptHFiles Check all Hfiles by opening them to make sure they are valid"); - out.println(" -sidelineCorruptHFiles Quarantine corrupted HFiles. implies -checkCorruptHFiles"); + out.println( + " -checkCorruptHFiles Check all Hfiles by opening them to make sure they are valid"); + out.println( + " -sidelineCorruptHFiles Quarantine corrupted HFiles. implies -checkCorruptHFiles"); out.println(""); out.println(" Replication options"); out.println(" -fixReplication Deletes replication queues for removed peers"); out.println(""); - out.println(" Metadata Repair options supported as of version 2.0: (expert features, use with caution!)"); + out.println( + " Metadata Repair options supported as of version 2.0: (expert features, use with caution!)"); out.println(" -fixVersionFile Try to fix missing hbase.version file in hdfs."); out.println(" -fixReferenceFiles Try to offline lingering reference store files"); out.println(" -fixHFileLinks Try to offline lingering HFileLinks"); @@ -3559,32 +3480,41 @@ protected HBaseFsck printUsageAndExit() { out.println("NOTE: Following options are NOT supported as of HBase version 2.0+."); out.println(""); out.println(" UNSUPPORTED Metadata Repair options: (expert features, use with caution!)"); - out.println(" -fix Try to fix region assignments. This is for backwards compatibility"); + out.println( + " -fix Try to fix region assignments. This is for backwards compatibility"); out.println(" -fixAssignments Try to fix region assignments. Replaces the old -fix"); - out.println(" -fixMeta Try to fix meta problems. This assumes HDFS region info is good."); + out.println( + " -fixMeta Try to fix meta problems. This assumes HDFS region info is good."); out.println(" -fixHdfsHoles Try to fix region holes in hdfs."); out.println(" -fixHdfsOrphans Try to fix region dirs with no .regioninfo file in hdfs"); - out.println(" -fixTableOrphans Try to fix table dirs with no .tableinfo file in hdfs (online mode only)"); + out.println( + " -fixTableOrphans Try to fix table dirs with no .tableinfo file in hdfs (online mode only)"); out.println(" -fixHdfsOverlaps Try to fix region overlaps in hdfs."); - out.println(" -maxMerge When fixing region overlaps, allow at most regions to merge. (n=" + DEFAULT_MAX_MERGE +" by default)"); - out.println(" -sidelineBigOverlaps When fixing region overlaps, allow to sideline big overlaps"); - out.println(" -maxOverlapsToSideline When fixing region overlaps, allow at most regions to sideline per group. (n=" + DEFAULT_OVERLAPS_TO_SIDELINE +" by default)"); + out.println( + " -maxMerge When fixing region overlaps, allow at most regions to merge. (n=" + + DEFAULT_MAX_MERGE + " by default)"); + out.println( + " -sidelineBigOverlaps When fixing region overlaps, allow to sideline big overlaps"); + out.println( + " -maxOverlapsToSideline When fixing region overlaps, allow at most regions to sideline per group. (n=" + + DEFAULT_OVERLAPS_TO_SIDELINE + " by default)"); out.println(" -fixSplitParents Try to force offline split parents to be online."); - out.println(" -removeParents Try to offline and sideline lingering parents and keep daughter regions."); + out.println( + " -removeParents Try to offline and sideline lingering parents and keep daughter regions."); out.println(" -fixEmptyMetaCells Try to fix hbase:meta entries not referencing any region" + " (empty REGIONINFO_QUALIFIER rows)"); out.println(""); out.println(" UNSUPPORTED Metadata Repair shortcuts"); - out.println(" -repair Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " + - "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles" + - "-fixHFileLinks"); + out.println(" -repair Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " + + "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles" + + "-fixHFileLinks"); out.println(" -repairHoles Shortcut for -fixAssignments -fixMeta -fixHdfsHoles"); out.println(""); out.println(" Replication options"); out.println(" -fixReplication Deletes replication queues for removed peers"); - out.println(" -cleanReplicationBarrier [tableName] clean the replication barriers " + - "of a specified table, tableName is required"); + out.println(" -cleanReplicationBarrier [tableName] clean the replication barriers " + + "of a specified table, tableName is required"); out.flush(); errors.reportError(ERROR_CODE.WRONG_USAGE, sw.toString()); @@ -3594,7 +3524,6 @@ protected HBaseFsck printUsageAndExit() { /** * Main program - * * @param args * @throws Exception */ @@ -3612,7 +3541,10 @@ public static void main(String[] args) throws Exception { * This is a Tool wrapper that gathers -Dxxx=yyy configuration settings from the command line. */ static class HBaseFsckTool extends Configured implements Tool { - HBaseFsckTool(Configuration conf) { super(conf); } + HBaseFsckTool(Configuration conf) { + super(conf); + } + @Override public int run(String[] args) throws Exception { HBaseFsck hbck = new HBaseFsck(getConf()); @@ -3652,8 +3584,7 @@ public HBaseFsck exec(ExecutorService exec, String[] args) } } else if (cmd.equals("-sleepBeforeRerun")) { if (i == args.length - 1) { - errors.reportError(ERROR_CODE.WRONG_USAGE, - "HBaseFsck: -sleepBeforeRerun needs a value."); + errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -sleepBeforeRerun needs a value."); return printUsageAndExit(); } try { @@ -3746,16 +3677,14 @@ public HBaseFsck exec(ExecutorService exec, String[] args) } } else if (cmd.equals("-maxMerge")) { if (i == args.length - 1) { - errors.reportError(ERROR_CODE.WRONG_USAGE, - "-maxMerge needs a numeric value argument."); + errors.reportError(ERROR_CODE.WRONG_USAGE, "-maxMerge needs a numeric value argument."); return printUsageAndExit(); } try { int maxMerge = Integer.parseInt(args[++i]); setMaxMerge(maxMerge); } catch (NumberFormatException e) { - errors.reportError(ERROR_CODE.WRONG_USAGE, - "-maxMerge needs a numeric value argument."); + errors.reportError(ERROR_CODE.WRONG_USAGE, "-maxMerge needs a numeric value argument."); return printUsageAndExit(); } } else if (cmd.equals("-summary")) { @@ -3768,7 +3697,7 @@ public HBaseFsck exec(ExecutorService exec, String[] args) setFixReplication(true); } else if (cmd.equals("-cleanReplicationBarrier")) { setCleanReplicationBarrier(true); - if(args[++i].startsWith("-")){ + if (args[++i].startsWith("-")) { printUsageAndExit(); } setCleanReplicationBarrierTable(args[i]); @@ -3859,7 +3788,7 @@ private boolean isOptionsSupported(String[] args) { for (String arg : args) { if (unsupportedOptionsInV2.contains(arg)) { errors.reportError(ERROR_CODE.UNSUPPORTED_OPTION, - "option '" + arg + "' is not " + "supported!"); + "option '" + arg + "' is not " + "supported!"); result = false; break; } @@ -3919,8 +3848,8 @@ public void cleanReplicationBarrier() throws IOException { List peerDescriptions = admin.listReplicationPeers(); if (peerDescriptions != null && peerDescriptions.size() > 0) { List peers = peerDescriptions.stream() - .filter(peerConfig -> peerConfig.getPeerConfig() - .needToReplicate(cleanReplicationBarrierTable)) + .filter( + peerConfig -> peerConfig.getPeerConfig().needToReplicate(cleanReplicationBarrierTable)) .map(peerConfig -> peerConfig.getPeerId()).collect(Collectors.toList()); try { List batch = new ArrayList<>(); @@ -3957,16 +3886,15 @@ void debugLsr(Path p) throws IOException { /** * ls -r for debugging purposes */ - public static void debugLsr(Configuration conf, - Path p) throws IOException { + public static void debugLsr(Configuration conf, Path p) throws IOException { debugLsr(conf, p, new PrintingErrorReporter()); } /** * ls -r for debugging purposes */ - public static void debugLsr(Configuration conf, - Path p, HbckErrorReporter errors) throws IOException { + public static void debugLsr(Configuration conf, Path p, HbckErrorReporter errors) + throws IOException { if (!LOG.isDebugEnabled() || p == null) { return; } @@ -3983,7 +3911,7 @@ public static void debugLsr(Configuration conf, } if (fs.getFileStatus(p).isDirectory()) { - FileStatus[] fss= fs.listStatus(p); + FileStatus[] fss = fs.listStatus(p); for (FileStatus status : fss) { debugLsr(conf, status.getPath(), errors); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index c6db715d1a0f..3edd29e8184b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +23,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ClusterMetrics.Option; @@ -51,27 +49,23 @@ import org.slf4j.LoggerFactory; /** - * This class contains helper methods that repair parts of hbase's filesystem - * contents. + * This class contains helper methods that repair parts of hbase's filesystem contents. */ @InterfaceAudience.Private public class HBaseFsckRepair { private static final Logger LOG = LoggerFactory.getLogger(HBaseFsckRepair.class); /** - * Fix multiple assignment by doing silent closes on each RS hosting the region - * and then force ZK unassigned node to OFFLINE to trigger assignment by - * master. - * + * Fix multiple assignment by doing silent closes on each RS hosting the region and then force ZK + * unassigned node to OFFLINE to trigger assignment by master. * @param connection HBase connection to the cluster * @param region Region to undeploy * @param servers list of Servers to undeploy from */ public static void fixMultiAssignment(Connection connection, RegionInfo region, - List servers) - throws IOException, KeeperException, InterruptedException { + List servers) throws IOException, KeeperException, InterruptedException { // Close region on the servers silently - for(ServerName server : servers) { + for (ServerName server : servers) { closeRegionSilentlyAndWait(connection, server, region); } @@ -80,12 +74,9 @@ public static void fixMultiAssignment(Connection connection, RegionInfo region, } /** - * Fix unassigned by creating/transition the unassigned ZK node for this - * region to OFFLINE state with a special flag to tell the master that this is - * a forced operation by HBCK. - * - * This assumes that info is in META. - * + * Fix unassigned by creating/transition the unassigned ZK node for this region to OFFLINE state + * with a special flag to tell the master that this is a forced operation by HBCK. This assumes + * that info is in META. * @param admin * @param region * @throws IOException @@ -98,32 +89,30 @@ public static void fixUnassigned(Admin admin, RegionInfo region) } /** - * In 0.90, this forces an HRI offline by setting the RegionTransitionData - * in ZK to have HBCK_CODE_NAME as the server. This is a special case in - * the AssignmentManager that attempts an assign call by the master. - * - * This doesn't seem to work properly in the updated version of 0.92+'s hbck - * so we use assign to force the region into transition. This has the - * side-effect of requiring a RegionInfo that considers regionId (timestamp) - * in comparators that is addressed by HBASE-5563. + * In 0.90, this forces an HRI offline by setting the RegionTransitionData in ZK to have + * HBCK_CODE_NAME as the server. This is a special case in the AssignmentManager that attempts an + * assign call by the master. This doesn't seem to work properly in the updated version of 0.92+'s + * hbck so we use assign to force the region into transition. This has the side-effect of + * requiring a RegionInfo that considers regionId (timestamp) in comparators that is addressed by + * HBASE-5563. */ private static void forceOfflineInZK(Admin admin, final RegionInfo region) - throws ZooKeeperConnectionException, KeeperException, IOException, InterruptedException { + throws ZooKeeperConnectionException, KeeperException, IOException, InterruptedException { admin.assign(region.getRegionName()); } /* * Should we check all assignments or just not in RIT? */ - public static void waitUntilAssigned(Admin admin, - RegionInfo region) throws IOException, InterruptedException { + public static void waitUntilAssigned(Admin admin, RegionInfo region) + throws IOException, InterruptedException { long timeout = admin.getConfiguration().getLong("hbase.hbck.assign.timeout", 120000); long expiration = timeout + EnvironmentEdgeManager.currentTime(); while (EnvironmentEdgeManager.currentTime() < expiration) { try { boolean inTransition = false; for (RegionState rs : admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)) - .getRegionStatesInTransition()) { + .getRegionStatesInTransition()) { if (RegionInfo.COMPARATOR.compare(rs.getRegion(), region) == 0) { inTransition = true; break; @@ -134,16 +123,14 @@ public static void waitUntilAssigned(Admin admin, return; } // still in rit - LOG.info("Region still in transition, waiting for " - + "it to become assigned: " + region); + LOG.info("Region still in transition, waiting for " + "it to become assigned: " + region); } catch (IOException e) { - LOG.warn("Exception when waiting for region to become assigned," - + " retrying", e); + LOG.warn("Exception when waiting for region to become assigned," + " retrying", e); } Thread.sleep(1000); } - throw new IOException("Region " + region + " failed to move out of " + - "transition within timeout " + timeout + "ms"); + throw new IOException("Region " + region + " failed to move out of " + + "transition within timeout " + timeout + "ms"); } /** @@ -155,7 +142,7 @@ public static void closeRegionSilentlyAndWait(Connection connection, ServerName long timeout = connection.getConfiguration().getLong("hbase.hbck.close.timeout", 120000); // this is a bit ugly but it is only used in the old hbck and tests, so I think it is fine. try (AsyncClusterConnection asyncConn = ClusterConnectionFactory - .createAsyncClusterConnection(connection.getConfiguration(), null, User.getCurrent())) { + .createAsyncClusterConnection(connection.getConfiguration(), null, User.getCurrent())) { ServerManager.closeRegionSilentlyAndWait(asyncConn, server, region, timeout); } } @@ -163,8 +150,8 @@ public static void closeRegionSilentlyAndWait(Connection connection, ServerName /** * Puts the specified RegionInfo into META with replica related columns */ - public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, - RegionInfo hri, Collection servers, int numReplicas) throws IOException { + public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, RegionInfo hri, + Collection servers, int numReplicas) throws IOException { Connection conn = ConnectionFactory.createConnection(conf); Table meta = conn.getTable(TableName.META_TABLE_NAME); Put put = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime()); @@ -188,8 +175,8 @@ public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, /** * Creates, flushes, and closes a new region. */ - public static HRegion createHDFSRegionDir(Configuration conf, - RegionInfo hri, TableDescriptor htd) throws IOException { + public static HRegion createHDFSRegionDir(Configuration conf, RegionInfo hri, TableDescriptor htd) + throws IOException { // Create HRegion Path root = CommonFSUtils.getRootDir(conf); HRegion region = HRegion.createHRegion(hri, root, conf, htd, null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java index f54864492f35..2026259d62bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ private HFileArchiveUtil() { * not be archived */ public static Path getStoreArchivePath(final Configuration conf, final TableName tableName, - final String regionName, final String familyName) throws IOException { + final String regionName, final String familyName) throws IOException { Path tableArchiveDir = getTableArchivePath(conf, tableName); return HRegionFileSystem.getStoreHomedir(tableArchiveDir, regionName, Bytes.toBytes(familyName)); @@ -62,7 +62,7 @@ public static Path getStoreArchivePath(final Configuration conf, final TableName * not be archived */ public static Path getStoreArchivePath(Configuration conf, RegionInfo region, Path tabledir, - byte[] family) throws IOException { + byte[] family) throws IOException { return getStoreArchivePath(conf, region, family); } @@ -75,7 +75,7 @@ public static Path getStoreArchivePath(Configuration conf, RegionInfo region, Pa * not be archived */ public static Path getStoreArchivePath(Configuration conf, RegionInfo region, byte[] family) - throws IOException { + throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); Path tableArchiveDir = getTableArchivePath(rootDir, region.getTable()); return HRegionFileSystem.getStoreHomedir(tableArchiveDir, region, family); @@ -98,7 +98,7 @@ public static Path getStoreArchivePathForRootDir(Path rootDir, RegionInfo region } public static Path getStoreArchivePathForArchivePath(Path archivePath, RegionInfo region, - byte[] family) { + byte[] family) { Path tableArchiveDir = CommonFSUtils.getTableDir(archivePath, region.getTable()); return HRegionFileSystem.getStoreHomedir(tableArchiveDir, region, family); } @@ -128,7 +128,7 @@ public static Path getRegionArchiveDir(Path rootDir, TableName tableName, Path r * should not be archived */ public static Path getRegionArchiveDir(Path rootDir, TableName tableName, - String encodedRegionName) { + String encodedRegionName) { // get the archive directory for a table Path archiveDir = getTableArchivePath(rootDir, tableName); return HRegion.getRegionDir(archiveDir, encodedRegionName); @@ -158,7 +158,7 @@ public static Path getTableArchivePath(final Path rootdir, final TableName table * @return {@link Path} to the archive directory for the table */ public static Path getTableArchivePath(final Configuration conf, final TableName tableName) - throws IOException { + throws IOException { return CommonFSUtils.getTableDir(getArchivePath(conf), tableName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java index 774871b38263..c1ac06cada1d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HashedBytes.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +18,12 @@ package org.apache.hadoop.hbase.util; import java.util.Arrays; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * This class encapsulates a byte array and overrides hashCode and equals so - * that it's identity is based on the data rather than the array instance. + * This class encapsulates a byte array and overrides hashCode and equals so that it's identity is + * based on the data rather than the array instance. */ @InterfaceAudience.Private @InterfaceStability.Stable @@ -50,10 +48,8 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null || getClass() != obj.getClass()) - return false; + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; HashedBytes other = (HashedBytes) obj; return (hashCode == other.hashCode) && Arrays.equals(bytes, other.bytes); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java index 52012dfa2354..8906ee9e20bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckErrorReporter.java @@ -18,13 +18,12 @@ package org.apache.hadoop.hbase.util; import java.util.ArrayList; - import org.apache.yetus.audience.InterfaceAudience; /** * Used by {@link HBaseFsck} reporting system. * @deprecated Since 2.3.0. To be removed in hbase4. Use HBCK2 instead. Remove when - * {@link HBaseFsck} is removed. + * {@link HBaseFsck} is removed. */ @Deprecated @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java index c2bfa7bae145..e348fae3098a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckRegionInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,8 +37,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Maintain information about a particular region. It gathers information - * from three places -- HDFS, META, and region servers. + * Maintain information about a particular region. It gathers information from three places -- HDFS, + * META, and region servers. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -59,11 +59,11 @@ public HbckRegionInfo(MetaEntry metaEntry) { } public synchronized int getReplicaId() { - return metaEntry != null? metaEntry.hri.getReplicaId(): deployedReplicaId; + return metaEntry != null ? metaEntry.hri.getReplicaId() : deployedReplicaId; } public synchronized void addServer(RegionInfo regionInfo, ServerName serverName) { - OnlineEntry rse = new OnlineEntry(regionInfo, serverName) ; + OnlineEntry rse = new OnlineEntry(regionInfo, serverName); this.deployedEntries.add(rse); this.deployedOn.add(serverName); // save the replicaId that we see deployed in the cluster @@ -76,7 +76,7 @@ public synchronized void addServer(RegionInfo regionInfo, ServerName serverName) public synchronized String toString() { StringBuilder sb = new StringBuilder(); sb.append("{ meta => "); - sb.append((metaEntry != null)? metaEntry.hri.getRegionNameAsString() : "null"); + sb.append((metaEntry != null) ? metaEntry.hri.getRegionNameAsString() : "null"); sb.append(", hdfs => " + getHdfsRegionDir()); sb.append(", deployed => " + Joiner.on(", ").join(deployedEntries)); sb.append(", replicaId => " + getReplicaId()); @@ -133,8 +133,8 @@ public List getDeployedOn() { } /** - * Read the .regioninfo file from the file system. If there is no - * .regioninfo, add it to the orphan hdfs region list. + * Read the .regioninfo file from the file system. If there is no .regioninfo, add it to the + * orphan hdfs region list. */ public void loadHdfsRegioninfo(Configuration conf) throws IOException { Path regionDir = getHdfsRegionDir(); @@ -264,16 +264,16 @@ public boolean isMerged() { */ public static class MetaEntry { RegionInfo hri; - ServerName regionServer; // server hosting this region - long modTime; // timestamp of most recent modification metadata - RegionInfo splitA, splitB; //split daughters + ServerName regionServer; // server hosting this region + long modTime; // timestamp of most recent modification metadata + RegionInfo splitA, splitB; // split daughters public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime) { this(rinfo, regionServer, modTime, null, null); } - public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime, - RegionInfo splitA, RegionInfo splitB) { + public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime, RegionInfo splitA, + RegionInfo splitB) { this.hri = rinfo; this.regionServer = regionServer; this.modTime = modTime; @@ -371,8 +371,8 @@ public int compare(HbckRegionInfo l, HbckRegionInfo r) { return tableCompare; } - int startComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare( - l.getStartKey(), r.getStartKey()); + int startComparison = + RegionSplitCalculator.BYTES_COMPARATOR.compare(l.getStartKey(), r.getStartKey()); if (startComparison != 0) { return startComparison; } @@ -382,8 +382,7 @@ public int compare(HbckRegionInfo l, HbckRegionInfo r) { endKey = (endKey.length == 0) ? null : endKey; byte[] endKey2 = l.getEndKey(); endKey2 = (endKey2.length == 0) ? null : endKey2; - int endComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare( - endKey2, endKey); + int endComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare(endKey2, endKey); if (endComparison != 0) { return endComparison; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java index 75699d888900..5b7d12773033 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HbckTableInfo.java @@ -30,7 +30,6 @@ import java.util.TreeSet; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -79,7 +78,7 @@ public class HbckTableInfo { final RegionSplitCalculator sc = new RegionSplitCalculator<>(HbckRegionInfo.COMPARATOR); - // Histogram of different TableDescriptors found. Ideally there is only one! + // Histogram of different TableDescriptors found. Ideally there is only one! final Set htds = new HashSet<>(); // key = start split, values = set of splits in problem group @@ -98,14 +97,14 @@ public class HbckTableInfo { } /** - * @return descriptor common to all regions. null if are none or multiple! + * @return descriptor common to all regions. null if are none or multiple! */ TableDescriptor getTableDescriptor() { if (htds.size() == 1) { - return (TableDescriptor)htds.toArray()[0]; + return (TableDescriptor) htds.toArray()[0]; } else { - LOG.error("None/Multiple table descriptors found for table '" - + tableName + "' regions: " + htds); + LOG.error( + "None/Multiple table descriptors found for table '" + tableName + "' regions: " + htds); } return null; } @@ -122,10 +121,11 @@ public void addRegionInfo(HbckRegionInfo hir) { // if not the absolute end key, check for cycle if (Bytes.compareTo(hir.getStartKey(), hir.getEndKey()) > 0) { - hbck.getErrors().reportError(HbckErrorReporter.ERROR_CODE.REGION_CYCLE, String.format( + hbck.getErrors().reportError(HbckErrorReporter.ERROR_CODE.REGION_CYCLE, + String.format( "The endkey for this region comes before the " + "startkey, startkey=%s, endkey=%s", - Bytes.toStringBinary(hir.getStartKey()), Bytes.toStringBinary(hir.getEndKey())), this, - hir); + Bytes.toStringBinary(hir.getStartKey()), Bytes.toStringBinary(hir.getEndKey())), + this, hir); backwards.add(hir); return; } @@ -149,8 +149,8 @@ public int getNumRegions() { return sc.getStarts().size() + backwards.size(); } - public synchronized ImmutableList getRegionsFromMeta( - TreeMap regionInfoMap) { + public synchronized ImmutableList + getRegionsFromMeta(TreeMap regionInfoMap) { // lazy loaded, synchronized to ensure a single load if (regionsFromMeta == null) { List regions = new ArrayList<>(); @@ -177,23 +177,25 @@ class IntegrityFixSuggester extends TableIntegrityErrorHandlerImpl { @Override public void handleRegionStartKeyNotEmpty(HbckRegionInfo hi) throws IOException { - errors.reportError(HbckErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY, - "First region should start with an empty key. You need to " - + " create a new region and regioninfo in HDFS to plug the hole.", - getTableInfo(), hi); + errors + .reportError(HbckErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY, + "First region should start with an empty key. You need to " + + " create a new region and regioninfo in HDFS to plug the hole.", + getTableInfo(), hi); } @Override public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY, - "Last region should end with an empty key. You need to " - + "create a new region and regioninfo in HDFS to plug the hole.", getTableInfo()); + "Last region should end with an empty key. You need to " + + "create a new region and regioninfo in HDFS to plug the hole.", + getTableInfo()); } @Override - public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException{ + public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.DEGENERATE_REGION, - "Region has the same start and end key.", getTableInfo(), hi); + "Region has the same start and end key.", getTableInfo(), hi); } @Override @@ -201,55 +203,47 @@ public void handleDuplicateStartKeys(HbckRegionInfo r1, HbckRegionInfo r2) throw byte[] key = r1.getStartKey(); // dup start key errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_STARTKEYS, - "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), - r1); + "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), + r1); errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_STARTKEYS, - "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), - r2); + "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), + r2); } @Override - public void handleSplit(HbckRegionInfo r1, HbckRegionInfo r2) throws IOException{ + public void handleSplit(HbckRegionInfo r1, HbckRegionInfo r2) throws IOException { byte[] key = r1.getStartKey(); // dup start key errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_ENDKEYS, - "Multiple regions have the same regionID: " - + Bytes.toStringBinary(key), getTableInfo(), r1); + "Multiple regions have the same regionID: " + Bytes.toStringBinary(key), getTableInfo(), + r1); errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_ENDKEYS, - "Multiple regions have the same regionID: " - + Bytes.toStringBinary(key), getTableInfo(), r2); + "Multiple regions have the same regionID: " + Bytes.toStringBinary(key), getTableInfo(), + r2); } @Override public void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.OVERLAP_IN_REGION_CHAIN, - "There is an overlap in the region chain.", getTableInfo(), hi1, hi2); + "There is an overlap in the region chain.", getTableInfo(), hi1, hi2); } @Override public void handleHoleInRegionChain(byte[] holeStart, byte[] holeStop) throws IOException { - errors.reportError( - HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN, - "There is a hole in the region chain between " - + Bytes.toStringBinary(holeStart) + " and " - + Bytes.toStringBinary(holeStop) - + ". You need to create a new .regioninfo and region " - + "dir in hdfs to plug the hole."); + errors.reportError(HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN, + "There is a hole in the region chain between " + Bytes.toStringBinary(holeStart) + " and " + + Bytes.toStringBinary(holeStop) + ". You need to create a new .regioninfo and region " + + "dir in hdfs to plug the hole."); } } /** - * This handler fixes integrity errors from hdfs information. There are - * basically three classes of integrity problems 1) holes, 2) overlaps, and - * 3) invalid regions. - * - * This class overrides methods that fix holes and the overlap group case. - * Individual cases of particular overlaps are handled by the general - * overlap group merge repair case. - * - * If hbase is online, this forces regions offline before doing merge - * operations. + * This handler fixes integrity errors from hdfs information. There are basically three classes of + * integrity problems 1) holes, 2) overlaps, and 3) invalid regions. This class overrides methods + * that fix holes and the overlap group case. Individual cases of particular overlaps are handled + * by the general overlap group merge repair case. If hbase is online, this forces regions offline + * before doing merge operations. */ class HDFSIntegrityFixer extends IntegrityFixSuggester { Configuration conf; @@ -265,84 +259,74 @@ class HDFSIntegrityFixer extends IntegrityFixSuggester { } /** - * This is a special case hole -- when the first region of a table is - * missing from META, HBase doesn't acknowledge the existance of the - * table. + * This is a special case hole -- when the first region of a table is missing from META, HBase + * doesn't acknowledge the existance of the table. */ @Override public void handleRegionStartKeyNotEmpty(HbckRegionInfo next) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY, - "First region should start with an empty key. Creating a new " + - "region and regioninfo in HDFS to plug the hole.", - getTableInfo(), next); + "First region should start with an empty key. Creating a new " + + "region and regioninfo in HDFS to plug the hole.", + getTableInfo(), next); TableDescriptor htd = getTableInfo().getTableDescriptor(); // from special EMPTY_START_ROW to next region's startKey RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(HConstants.EMPTY_START_ROW) - .setEndKey(next.getStartKey()) - .build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(next.getStartKey()).build(); // TODO test HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); - LOG.info("Table region start key was not empty. Created new empty region: " - + newRegion + " " +region); + LOG.info("Table region start key was not empty. Created new empty region: " + newRegion + " " + + region); hbck.fixes++; } @Override public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY, - "Last region should end with an empty key. Creating a new " - + "region and regioninfo in HDFS to plug the hole.", getTableInfo()); + "Last region should end with an empty key. Creating a new " + + "region and regioninfo in HDFS to plug the hole.", + getTableInfo()); TableDescriptor htd = getTableInfo().getTableDescriptor(); // from curEndKey to EMPTY_START_ROW - RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(curEndKey) - .setEndKey(HConstants.EMPTY_START_ROW) - .build(); + RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(curEndKey) + .setEndKey(HConstants.EMPTY_START_ROW).build(); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); - LOG.info("Table region end key was not empty. Created new empty region: " + newRegion - + " " + region); + LOG.info("Table region end key was not empty. Created new empty region: " + newRegion + " " + + region); hbck.fixes++; } /** - * There is a hole in the hdfs regions that violates the table integrity - * rules. Create a new empty region that patches the hole. + * There is a hole in the hdfs regions that violates the table integrity rules. Create a new + * empty region that patches the hole. */ @Override public void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeStopKey) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN, - "There is a hole in the region chain between " + Bytes.toStringBinary(holeStartKey) + - " and " + Bytes.toStringBinary(holeStopKey) + - ". Creating a new regioninfo and region " + "dir in hdfs to plug the hole."); + "There is a hole in the region chain between " + Bytes.toStringBinary(holeStartKey) + + " and " + Bytes.toStringBinary(holeStopKey) + + ". Creating a new regioninfo and region " + "dir in hdfs to plug the hole."); TableDescriptor htd = getTableInfo().getTableDescriptor(); - RegionInfo newRegion = - RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(holeStartKey) - .setEndKey(holeStopKey).build(); + RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) + .setStartKey(holeStartKey).setEndKey(holeStopKey).build(); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); LOG.info("Plugged hole by creating new empty region: " + newRegion + " " + region); hbck.fixes++; } /** - * This takes set of overlapping regions and merges them into a single - * region. This covers cases like degenerate regions, shared start key, - * general overlaps, duplicate ranges, and partial overlapping regions. - * - * Cases: - * - Clean regions that overlap - * - Only .oldlogs regions (can't find start/stop range, or figure out) - * - * This is basically threadsafe, except for the fixer increment in mergeOverlaps. + * This takes set of overlapping regions and merges them into a single region. This covers cases + * like degenerate regions, shared start key, general overlaps, duplicate ranges, and partial + * overlapping regions. Cases: - Clean regions that overlap - Only .oldlogs regions (can't find + * start/stop range, or figure out) This is basically threadsafe, except for the fixer increment + * in mergeOverlaps. */ @Override - public void handleOverlapGroup(Collection overlap) - throws IOException { + public void handleOverlapGroup(Collection overlap) throws IOException { Preconditions.checkNotNull(overlap); - Preconditions.checkArgument(overlap.size() >0); + Preconditions.checkArgument(overlap.size() > 0); if (!this.fixOverlaps) { LOG.warn("Not attempting to repair overlaps."); @@ -350,9 +334,9 @@ public void handleOverlapGroup(Collection overlap) } if (overlap.size() > hbck.getMaxMerge()) { - LOG.warn("Overlap group has " + overlap.size() + " overlapping " + - "regions which is greater than " + hbck.getMaxMerge() + - ", the max number of regions to merge"); + LOG.warn( + "Overlap group has " + overlap.size() + " overlapping " + "regions which is greater than " + + hbck.getMaxMerge() + ", the max number of regions to merge"); if (hbck.shouldSidelineBigOverlaps()) { // we only sideline big overlapped groups that exceeds the max number of regions to merge sidelineBigOverlaps(overlap); @@ -385,12 +369,12 @@ void removeParentsAndFixSplits(Collection overlap) throws IOExce if (range == null) { range = new Pair(hi.getStartKey(), hi.getEndKey()); } else { - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getStartKey(), range.getFirst()) < 0) { + if (RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getStartKey(), + range.getFirst()) < 0) { range.setFirst(hi.getStartKey()); } - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getEndKey(), range.getSecond()) > 0) { + if (RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getEndKey(), + range.getSecond()) > 0) { range.setSecond(hi.getEndKey()); } } @@ -429,8 +413,8 @@ void removeParentsAndFixSplits(Collection overlap) throws IOExce } // daughters must share the same regionID and we should have a parent too - if (daughterA.getHdfsHRI().getRegionId() != daughterB.getHdfsHRI().getRegionId() || - parent == null) { + if (daughterA.getHdfsHRI().getRegionId() != daughterB.getHdfsHRI().getRegionId() + || parent == null) { return; } @@ -453,7 +437,8 @@ void removeParentsAndFixSplits(Collection overlap) throws IOExce hbck.offline(parent.getRegionName()); } catch (IOException ioe) { LOG.warn("Unable to offline parent region: " + parent.getRegionNameAsString() - + ". Just continuing with regular merge... ", ioe); + + ". Just continuing with regular merge... ", + ioe); return; } @@ -461,14 +446,14 @@ void removeParentsAndFixSplits(Collection overlap) throws IOExce HBaseFsckRepair.removeParentInMeta(conf, parent.getHdfsHRI()); } catch (IOException ioe) { LOG.warn("Unable to remove parent region in META: " + parent.getRegionNameAsString() - + ". Just continuing with regular merge... ", ioe); + + ". Just continuing with regular merge... ", + ioe); return; } hbck.sidelineRegionDir(fs, parent); - LOG.info( - "[" + thread + "] Sidelined parent region dir " + parent.getHdfsRegionDir() + " into " + - hbck.getSidelineDir()); + LOG.info("[" + thread + "] Sidelined parent region dir " + parent.getHdfsRegionDir() + + " into " + hbck.getSidelineDir()); hbck.debugLsr(parent.getHdfsRegionDir()); // Make sure we don't have the parents and daughters around @@ -480,39 +465,38 @@ void removeParentsAndFixSplits(Collection overlap) throws IOExce } - void mergeOverlaps(Collection overlap) - throws IOException { + void mergeOverlaps(Collection overlap) throws IOException { String thread = Thread.currentThread().getName(); - LOG.info("== [" + thread + "] Merging regions into one region: " - + Joiner.on(",").join(overlap)); + LOG.info( + "== [" + thread + "] Merging regions into one region: " + Joiner.on(",").join(overlap)); // get the min / max range and close all concerned regions Pair range = null; for (HbckRegionInfo hi : overlap) { if (range == null) { range = new Pair<>(hi.getStartKey(), hi.getEndKey()); } else { - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getStartKey(), range.getFirst()) < 0) { + if (RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getStartKey(), + range.getFirst()) < 0) { range.setFirst(hi.getStartKey()); } - if (RegionSplitCalculator.BYTES_COMPARATOR - .compare(hi.getEndKey(), range.getSecond()) > 0) { + if (RegionSplitCalculator.BYTES_COMPARATOR.compare(hi.getEndKey(), + range.getSecond()) > 0) { range.setSecond(hi.getEndKey()); } } // need to close files so delete can happen. - LOG.debug("[" + thread + "] Closing region before moving data around: " + hi); + LOG.debug("[" + thread + "] Closing region before moving data around: " + hi); LOG.debug("[" + thread + "] Contained region dir before close"); hbck.debugLsr(hi.getHdfsRegionDir()); try { LOG.info("[" + thread + "] Closing region: " + hi); hbck.closeRegion(hi); } catch (IOException ioe) { - LOG.warn("[" + thread + "] Was unable to close region " + hi - + ". Just continuing... ", ioe); + LOG.warn("[" + thread + "] Was unable to close region " + hi + ". Just continuing... ", + ioe); } catch (InterruptedException e) { - LOG.warn("[" + thread + "] Was unable to close region " + hi - + ". Just continuing... ", e); + LOG.warn("[" + thread + "] Was unable to close region " + hi + ". Just continuing... ", + e); } try { @@ -520,7 +504,8 @@ void mergeOverlaps(Collection overlap) hbck.offline(hi.getRegionName()); } catch (IOException ioe) { LOG.warn("[" + thread + "] Unable to offline region from master: " + hi - + ". Just continuing... ", ioe); + + ". Just continuing... ", + ioe); } } @@ -528,19 +513,17 @@ void mergeOverlaps(Collection overlap) TableDescriptor htd = getTableInfo().getTableDescriptor(); // from start key to end Key RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(range.getFirst()) - .setEndKey(range.getSecond()) - .build(); + .setStartKey(range.getFirst()).setEndKey(range.getSecond()).build(); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); - LOG.info("[" + thread + "] Created new empty container region: " + - newRegion + " to contain regions: " + Joiner.on(",").join(overlap)); + LOG.info("[" + thread + "] Created new empty container region: " + newRegion + + " to contain regions: " + Joiner.on(",").join(overlap)); hbck.debugLsr(region.getRegionFileSystem().getRegionDir()); // all target regions are closed, should be able to safely cleanup. - boolean didFix= false; + boolean didFix = false; Path target = region.getRegionFileSystem().getRegionDir(); for (HbckRegionInfo contained : overlap) { - LOG.info("[" + thread + "] Merging " + contained + " into " + target); + LOG.info("[" + thread + "] Merging " + contained + " into " + target); int merges = hbck.mergeRegionDirs(target, contained); if (merges > 0) { didFix = true; @@ -552,9 +535,8 @@ void mergeOverlaps(Collection overlap) } /** - * Sideline some regions in a big overlap group so that it - * will have fewer regions, and it is easier to merge them later on. - * + * Sideline some regions in a big overlap group so that it will have fewer regions, and it is + * easier to merge them later on. * @param bigOverlap the overlapped group with regions more than maxMerge */ void sidelineBigOverlaps(Collection bigOverlap) throws IOException { @@ -565,24 +547,24 @@ void sidelineBigOverlaps(Collection bigOverlap) throws IOExcepti List regionsToSideline = RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline); FileSystem fs = FileSystem.get(conf); - for (HbckRegionInfo regionToSideline: regionsToSideline) { + for (HbckRegionInfo regionToSideline : regionsToSideline) { try { LOG.info("Closing region: " + regionToSideline); hbck.closeRegion(regionToSideline); } catch (IOException ioe) { - LOG.warn("Was unable to close region " + regionToSideline - + ". Just continuing... ", ioe); + LOG.warn("Was unable to close region " + regionToSideline + ". Just continuing... ", + ioe); } catch (InterruptedException e) { - LOG.warn("Was unable to close region " + regionToSideline - + ". Just continuing... ", e); + LOG.warn("Was unable to close region " + regionToSideline + ". Just continuing... ", e); } try { LOG.info("Offlining region: " + regionToSideline); hbck.offline(regionToSideline.getRegionName()); } catch (IOException ioe) { - LOG.warn("Unable to offline region from master: " + regionToSideline - + ". Just continuing... ", ioe); + LOG.warn( + "Unable to offline region from master: " + regionToSideline + ". Just continuing... ", + ioe); } LOG.info("Before sideline big overlapped region: " + regionToSideline.toString()); @@ -590,8 +572,7 @@ void sidelineBigOverlaps(Collection bigOverlap) throws IOExcepti if (sidelineRegionDir != null) { sidelinedRegions.put(sidelineRegionDir, regionToSideline); LOG.info("After sidelined big overlapped region: " - + regionToSideline.getRegionNameAsString() - + " to " + sidelineRegionDir.toString()); + + regionToSideline.getRegionNameAsString() + " to " + sidelineRegionDir.toString()); hbck.fixes++; } } @@ -599,8 +580,8 @@ void sidelineBigOverlaps(Collection bigOverlap) throws IOExcepti } /** - * Check the region chain (from META) of this table. We are looking for - * holes, overlaps, and cycles. + * Check the region chain (from META) of this table. We are looking for holes, overlaps, and + * cycles. * @return false if there are errors */ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOException { @@ -635,7 +616,7 @@ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOExc // special endkey case converts '' to null byte[] endKey = rng.getEndKey(); endKey = (endKey.length == 0) ? null : endKey; - if (Bytes.equals(rng.getStartKey(),endKey)) { + if (Bytes.equals(rng.getStartKey(), endKey)) { handler.handleDegenerateRegion(rng); } } @@ -658,7 +639,7 @@ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOExc // record errors ArrayList subRange = new ArrayList<>(ranges); - // this dumb and n^2 but this shouldn't happen often + // this dumb and n^2 but this shouldn't happen often for (HbckRegionInfo r1 : ranges) { if (r1.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { continue; @@ -669,16 +650,16 @@ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOExc continue; } // general case of same start key - if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey())==0) { - handler.handleDuplicateStartKeys(r1,r2); - } else if (Bytes.compareTo(r1.getEndKey(), r2.getStartKey())==0 && - r1.getHdfsHRI().getRegionId() == r2.getHdfsHRI().getRegionId()) { - LOG.info("this is a split, log to splits"); - handler.handleSplit(r1, r2); - } else { - // overlap - handler.handleOverlapInRegionChain(r1, r2); - } + if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey()) == 0) { + handler.handleDuplicateStartKeys(r1, r2); + } else if (Bytes.compareTo(r1.getEndKey(), r2.getStartKey()) == 0 + && r1.getHdfsHRI().getRegionId() == r2.getHdfsHRI().getRegionId()) { + LOG.info("this is a split, log to splits"); + handler.handleSplit(r1, r2); + } else { + // overlap + handler.handleOverlapInRegionChain(r1, r2); + } } } @@ -718,20 +699,17 @@ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOExc if (HBaseFsck.shouldDisplayFullReport()) { // do full region split map dump - hbck.getErrors().print("---- Table '" + this.tableName - + "': region split map"); + hbck.getErrors().print("---- Table '" + this.tableName + "': region split map"); dump(splits, regions); - hbck.getErrors().print("---- Table '" + this.tableName - + "': overlap groups"); + hbck.getErrors().print("---- Table '" + this.tableName + "': overlap groups"); dumpOverlapProblems(overlapGroups); - hbck.getErrors().print("There are " + overlapGroups.keySet().size() - + " overlap groups with " + overlapGroups.size() - + " overlapping regions"); + hbck.getErrors().print("There are " + overlapGroups.keySet().size() + " overlap groups with " + + overlapGroups.size() + " overlapping regions"); } if (!sidelinedRegions.isEmpty()) { LOG.warn("Sidelined big overlapped regions, please bulk load them!"); - hbck.getErrors().print("---- Table '" + this.tableName - + "': sidelined big overlapped regions"); + hbck.getErrors() + .print("---- Table '" + this.tableName + "': sidelined big overlapped regions"); dumpSidelinedRegions(sidelinedRegions); } return hbck.getErrors().getErrorList().size() == originalErrorsCount; @@ -739,7 +717,7 @@ public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOExc private boolean handleOverlapsParallel(TableIntegrityErrorHandler handler, byte[] prevKey) throws IOException { - // we parallelize overlap handler for the case we have lots of groups to fix. We can + // we parallelize overlap handler for the case we have lots of groups to fix. We can // safely assume each group is independent. List merges = new ArrayList<>(overlapGroups.size()); List> rets; @@ -753,12 +731,12 @@ private boolean handleOverlapsParallel(TableIntegrityErrorHandler handler, byte[ LOG.error("Overlap merges were interrupted", e); return false; } - for(int i=0; i f = rets.get(i); try { f.get(); - } catch(ExecutionException e) { + } catch (ExecutionException e) { LOG.warn("Failed to merge overlap group" + work, e.getCause()); } catch (InterruptedException e) { LOG.error("Waiting for overlap merges was interrupted", e); @@ -778,8 +756,7 @@ private void dump(SortedSet splits, Multimap reg sb.setLength(0); // clear out existing buffer, if any. sb.append(Bytes.toStringBinary(k) + ":\t"); for (HbckRegionInfo r : regions.get(k)) { - sb.append("[ "+ r.toString() + ", " - + Bytes.toStringBinary(r.getEndKey())+ "]\t"); + sb.append("[ " + r.toString() + ", " + Bytes.toStringBinary(r.getEndKey()) + "]\t"); } hbck.getErrors().print(sb.toString()); } @@ -791,8 +768,8 @@ private void dumpOverlapProblems(Multimap regions) { for (byte[] k : regions.keySet()) { hbck.getErrors().print(Bytes.toStringBinary(k) + ":"); for (HbckRegionInfo r : regions.get(k)) { - hbck.getErrors().print("[ " + r.toString() + ", " - + Bytes.toStringBinary(r.getEndKey()) + "]"); + hbck.getErrors() + .print("[ " + r.toString() + ", " + Bytes.toStringBinary(r.getEndKey()) + "]"); } hbck.getErrors().print("----"); } @@ -803,8 +780,8 @@ private void dumpSidelinedRegions(Map regions) { TableName tableName = entry.getValue().getTableName(); Path path = entry.getKey(); hbck.getErrors().print("This sidelined region dir should be bulk loaded: " + path.toString()); - hbck.getErrors().print("Bulk load command looks like: " + BulkLoadHFilesTool.NAME + " " + - path.toUri().getPath() + " " + tableName); + hbck.getErrors().print("Bulk load command looks like: " + BulkLoadHFilesTool.NAME + " " + + path.toUri().getPath() + " " + tableName); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java index 10cc4e98d39a..b1c66baf7395 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.util; import java.util.concurrent.locks.ReentrantReadWriteLock; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java index 179b7d4a732e..c7febcf0549b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockStrongRef.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReentrantReadWriteLock; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java index 5492a8537d22..7b17e8bbaf8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLockWithObjectPool.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +19,10 @@ import java.lang.ref.Reference; import java.util.concurrent.locks.ReentrantReadWriteLock; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class IdReadWriteLockWithObjectPool extends IdReadWriteLock{ +public class IdReadWriteLockWithObjectPool extends IdReadWriteLock { // The number of lock we want to easily support. It's not a maximum. private static final int NB_CONCURRENT_LOCKS = 1000; /** @@ -89,7 +87,7 @@ int purgeAndGetEntryPoolSize() { return lockPool.size(); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DM_GC", justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DM_GC", justification = "Intentional") private void gc() { System.gc(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index 1e2ac3ebb973..653a9c7e3e25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +17,21 @@ */ package org.apache.hadoop.hbase.util; -import java.io.InterruptedIOException; import java.io.IOException; +import java.io.InterruptedIOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.regionserver.HRegionServer; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** @@ -59,21 +58,19 @@ public HRegionServer getRegionServer() { } /** - * Block until the region server has come online, indicating it is ready - * to be used. + * Block until the region server has come online, indicating it is ready to be used. */ public void waitForServerOnline() { // The server is marked online after the init method completes inside of - // the HRS#run method. HRS#init can fail for whatever region. In those - // cases, we'll jump out of the run without setting online flag. Check + // the HRS#run method. HRS#init can fail for whatever region. In those + // cases, we'll jump out of the run without setting online flag. Check // stopRequested so we don't wait here a flag that will never be flipped. regionServer.waitForServerOnline(); } } /** - * Creates a {@link RegionServerThread}. - * Call 'start' on the returned thread to make it run. + * Creates a {@link RegionServerThread}. Call 'start' on the returned thread to make it run. * @param c Configuration to use. * @param hrsc Class to create. * @param index Used distinguishing the object returned. @@ -89,16 +86,14 @@ public static JVMClusterUtil.RegionServerThread createRegionServerThread(final C server = ctor.newInstance(c); } catch (InvocationTargetException ite) { Throwable target = ite.getTargetException(); - throw new RuntimeException("Failed construction of RegionServer: " + - hrsc.toString() + ((target.getCause() != null)? - target.getCause().getMessage(): ""), target); + throw new RuntimeException("Failed construction of RegionServer: " + hrsc.toString() + + ((target.getCause() != null) ? target.getCause().getMessage() : ""), target); } catch (Exception e) { throw new IOException(e); } return new JVMClusterUtil.RegionServerThread(server, index); } - /** * Datastructure to hold Master Thread and Master instance */ @@ -117,8 +112,7 @@ public HMaster getMaster() { } /** - * Creates a {@link MasterThread}. - * Call 'start' on the returned thread to make it run. + * Creates a {@link MasterThread}. Call 'start' on the returned thread to make it run. * @param c Configuration to use. * @param hmc Class to create. * @param index Used distinguishing the object returned. @@ -132,9 +126,8 @@ public static JVMClusterUtil.MasterThread createMasterThread(final Configuration server = hmc.getConstructor(Configuration.class).newInstance(c); } catch (InvocationTargetException ite) { Throwable target = ite.getTargetException(); - throw new RuntimeException("Failed construction of Master: " + - hmc.toString() + ((target.getCause() != null)? - target.getCause().getMessage(): ""), target); + throw new RuntimeException("Failed construction of Master: " + hmc.toString() + + ((target.getCause() != null) ? target.getCause().getMessage() : ""), target); } catch (Exception e) { throw new IOException(e); } @@ -142,12 +135,12 @@ public static JVMClusterUtil.MasterThread createMasterThread(final Configuration // just add the current master host port since we do not know other master addresses up front // in mini cluster tests. c.set(HConstants.MASTER_ADDRS_KEY, - Preconditions.checkNotNull(server.getServerName().getAddress()).toString()); + Preconditions.checkNotNull(server.getServerName().getAddress()).toString()); return new JVMClusterUtil.MasterThread(server, index); } - private static JVMClusterUtil.MasterThread findActiveMaster( - List masters) { + private static JVMClusterUtil.MasterThread + findActiveMaster(List masters) { for (JVMClusterUtil.MasterThread t : masters) { if (t.master.isActiveMaster()) { return t; @@ -158,8 +151,7 @@ private static JVMClusterUtil.MasterThread findActiveMaster( } /** - * Start the cluster. Waits until there is a primary master initialized - * and returns its address. + * Start the cluster. Waits until there is a primary master initialized and returns its address. * @param masters * @param regionservers * @return Address to use contacting primary master. @@ -181,28 +173,33 @@ public static String startup(final List masters, } // Wait for an active master - // having an active master before starting the region threads allows - // then to succeed on their connection to master - final int startTimeout = configuration != null ? Integer.parseInt( - configuration.get("hbase.master.start.timeout.localHBaseCluster", "30000")) : 30000; + // having an active master before starting the region threads allows + // then to succeed on their connection to master + final int startTimeout = + configuration != null + ? Integer.parseInt( + configuration.get("hbase.master.start.timeout.localHBaseCluster", "30000")) + : 30000; waitForEvent(startTimeout, "active", () -> findActiveMaster(masters) != null); if (regionservers != null) { - for (JVMClusterUtil.RegionServerThread t: regionservers) { + for (JVMClusterUtil.RegionServerThread t : regionservers) { t.start(); } } // Wait for an active master to be initialized (implies being master) - // with this, when we return the cluster is complete - final int initTimeout = configuration != null ? Integer.parseInt( - configuration.get("hbase.master.init.timeout.localHBaseCluster", "200000")) : 200000; + // with this, when we return the cluster is complete + final int initTimeout = + configuration != null + ? Integer.parseInt( + configuration.get("hbase.master.init.timeout.localHBaseCluster", "200000")) + : 200000; waitForEvent(initTimeout, "initialized", () -> { - JVMClusterUtil.MasterThread t = findActiveMaster(masters); - // master thread should never be null at this point, but let's keep the check anyway - return t != null && t.master.isInitialized(); - } - ); + JVMClusterUtil.MasterThread t = findActiveMaster(masters); + // master thread should never be null at this point, but let's keep the check anyway + return t != null && t.master.isInitialized(); + }); return findActiveMaster(masters).master.getServerName().toString(); } @@ -210,10 +207,10 @@ public static String startup(final List masters, /** * Utility method to wait some time for an event to occur, and then return control to the caller. * @param millis How long to wait, in milliseconds. - * @param action The action that we are waiting for. Will be used in log message if the event - * does not occur. + * @param action The action that we are waiting for. Will be used in log message if the event does + * not occur. * @param check A Supplier that will be checked periodically to produce an updated true/false - * result indicating if the expected event has happened or not. + * result indicating if the expected event has happened or not. * @throws InterruptedIOException If we are interrupted while waiting for the event. * @throws RuntimeException If we reach the specified timeout while waiting for the event. */ @@ -235,7 +232,7 @@ private static void waitForEvent(long millis, String action, Supplier c try { Thread.sleep(100); } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } @@ -260,15 +257,15 @@ public static void shutdown(final List masters, } catch (IOException e) { LOG.error("Exception occurred while stopping master", e); } - LOG.info("Stopped backup Master {} is stopped: {}", - t.master.hashCode(), t.master.isStopped()); + LOG.info("Stopped backup Master {} is stopped: {}", t.master.hashCode(), + t.master.isStopped()); } else { if (activeMaster != null) { LOG.warn("Found more than 1 active master, hash {}", activeMaster.master.hashCode()); } activeMaster = t; - LOG.debug("Found active master hash={}, stopped={}", - t.master.hashCode(), t.master.isStopped()); + LOG.debug("Found active master hash={}, stopped={}", t.master.hashCode(), + t.master.isStopped()); } } } @@ -294,8 +291,9 @@ public static void shutdown(final List masters, try { t.join(maxTime - now); } catch (InterruptedException e) { - LOG.info("Got InterruptedException on shutdown - " + - "not waiting anymore on region server ends", e); + LOG.info("Got InterruptedException on shutdown - " + + "not waiting anymore on region server ends", + e); wasInterrupted = true; // someone wants us to speed up. } } @@ -318,8 +316,8 @@ public static void shutdown(final List masters, if (!atLeastOneLiveServer) break; for (RegionServerThread t : regionservers) { if (t.isAlive()) { - LOG.warn("RegionServerThreads taking too long to stop, interrupting; thread dump " + - "if > 3 attempts: i=" + i); + LOG.warn("RegionServerThreads taking too long to stop, interrupting; thread dump " + + "if > 3 attempts: i=" + i); if (i > 3) { Threads.printThreadInfo(System.out, "Thread dump " + t.getName()); } @@ -337,20 +335,19 @@ public static void shutdown(final List masters, // tests. // this.master.join(): Threads.threadDumpingIsAlive(t.master); - } catch(InterruptedException e) { - LOG.info("Got InterruptedException on shutdown - " + - "not waiting anymore on master ends", e); + } catch (InterruptedException e) { + LOG.info( + "Got InterruptedException on shutdown - " + "not waiting anymore on master ends", e); wasInterrupted = true; } } } } - LOG.info("Shutdown of " + - ((masters != null) ? masters.size() : "0") + " master(s) and " + - ((regionservers != null) ? regionservers.size() : "0") + - " regionserver(s) " + (wasInterrupted ? "interrupted" : "complete")); + LOG.info("Shutdown of " + ((masters != null) ? masters.size() : "0") + " master(s) and " + + ((regionservers != null) ? regionservers.size() : "0") + " regionserver(s) " + + (wasInterrupted ? "interrupted" : "complete")); - if (wasInterrupted){ + if (wasInterrupted) { Thread.currentThread().interrupt(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java index 9c00771ee4fe..d6c24ebee2a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmPauseMonitor.java @@ -23,12 +23,11 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.metrics.JvmPauseMonitorSource; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.metrics.JvmPauseMonitorSource; -import org.apache.hadoop.conf.Configuration; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @@ -38,16 +37,13 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * Class which sets up a simple thread which runs in a loop sleeping - * for a short interval of time. If the sleep takes significantly longer - * than its target time, it implies that the JVM or host machine has - * paused processing, which may cause other problems. If such a pause is - * detected, the thread logs a message. - * The original JvmPauseMonitor is: - * ${hadoop-common-project}/hadoop-common/src/main/java/org/apache/hadoop/util/ - * JvmPauseMonitor.java - * r1503806 | cmccabe | 2013-07-17 01:48:24 +0800 (Wed, 17 Jul 2013) | 1 line - * HADOOP-9618. thread which detects GC pauses(Todd Lipcon) + * Class which sets up a simple thread which runs in a loop sleeping for a short interval of time. + * If the sleep takes significantly longer than its target time, it implies that the JVM or host + * machine has paused processing, which may cause other problems. If such a pause is detected, the + * thread logs a message. The original JvmPauseMonitor is: + * ${hadoop-common-project}/hadoop-common/src/main/java/org/apache/hadoop/util/ JvmPauseMonitor.java + * r1503806 | cmccabe | 2013-07-17 01:48:24 +0800 (Wed, 17 Jul 2013) | 1 line HADOOP-9618. thread + * which detects GC pauses(Todd Lipcon) */ @InterfaceAudience.Private public class JvmPauseMonitor { @@ -55,17 +51,15 @@ public class JvmPauseMonitor { /** The target sleep time */ private static final long SLEEP_INTERVAL_MS = 500; - + /** log WARN if we detect a pause longer than this threshold */ private final long warnThresholdMs; - public static final String WARN_THRESHOLD_KEY = - "jvm.pause.warn-threshold.ms"; + public static final String WARN_THRESHOLD_KEY = "jvm.pause.warn-threshold.ms"; private static final long WARN_THRESHOLD_DEFAULT = 10000; - + /** log INFO if we detect a pause longer than this threshold */ private final long infoThresholdMs; - public static final String INFO_THRESHOLD_KEY = - "jvm.pause.info-threshold.ms"; + public static final String INFO_THRESHOLD_KEY = "jvm.pause.info-threshold.ms"; private static final long INFO_THRESHOLD_DEFAULT = 1000; private Thread monitorThread; @@ -81,7 +75,7 @@ public JvmPauseMonitor(Configuration conf, JvmPauseMonitorSource metricsSource) this.infoThresholdMs = conf.getLong(INFO_THRESHOLD_KEY, INFO_THRESHOLD_DEFAULT); this.metricsSource = metricsSource; } - + public void start() { Preconditions.checkState(monitorThread == null, "Already started"); monitorThread = new Thread(new Monitor(), "JvmPauseMonitor"); @@ -98,7 +92,7 @@ public void stop() { Thread.currentThread().interrupt(); } } - + private String formatMessage(long extraSleepTime, List gcDiffs) { String ret = "Detected pause in JVM or host machine (eg GC): " + "pause of approximately " + extraSleepTime + "ms\n"; @@ -109,7 +103,7 @@ private String formatMessage(long extraSleepTime, List gcDiffs) { } return ret; } - + private Map getGcTimes() { Map map = Maps.newHashMap(); List gcBeans = ManagementFactory.getGarbageCollectorMXBeans(); @@ -160,8 +154,8 @@ public void run() { Map gcTimesAfterSleep = getGcTimes(); if (extraSleepTime > infoThresholdMs) { - Set gcBeanNames = Sets.intersection(gcTimesAfterSleep.keySet(), - gcTimesBeforeSleep.keySet()); + Set gcBeanNames = + Sets.intersection(gcTimesAfterSleep.keySet(), gcTimesBeforeSleep.keySet()); List gcDiffs = Lists.newArrayList(); for (String name : gcBeanNames) { GcTimes diff = gcTimesAfterSleep.get(name).subtract(gcTimesBeforeSleep.get(name)); @@ -207,13 +201,11 @@ public void setMetricsSource(JvmPauseMonitorSource metricsSource) { } /** - * Simple 'main' to facilitate manual testing of the pause monitor. - * - * This main function just leaks memory into a list. Running this class - * with a 1GB heap will very quickly go into "GC hell" and result in - * log messages about the GC pauses. + * Simple 'main' to facilitate manual testing of the pause monitor. This main function just leaks + * memory into a list. Running this class with a 1GB heap will very quickly go into "GC hell" and + * result in log messages about the GC pauses. */ - public static void main(String []args) throws Exception { + public static void main(String[] args) throws Exception { new JvmPauseMonitor(new Configuration()).start(); List list = Lists.newArrayList(); int i = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java index 65c952e4be73..236503f3365f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.util.HashSet; import java.util.Set; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -47,8 +45,8 @@ public static boolean isBadJvmVersion() { * Return the current JVM version information. */ public static String getVersion() { - return System.getProperty("java.vm.vendor", "UNKNOWN_VM_VENDOR") + ' ' + - System.getProperty("java.version", "UNKNOWN_JAVA_VERSION") + '-' + - System.getProperty("java.vm.version", "UNKNOWN_VM_VERSION"); + return System.getProperty("java.vm.vendor", "UNKNOWN_VM_VENDOR") + ' ' + + System.getProperty("java.version", "UNKNOWN_JAVA_VERSION") + '-' + + System.getProperty("java.vm.version", "UNKNOWN_VM_VERSION"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java index 29e7836a7481..b579b609f324 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyRange.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java index e6075d2754bf..0857364fc06c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,8 +21,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when the lease was expected to be recovered, - * but the file can't be opened. + * Thrown when the lease was expected to be recovered, but the file can't be opened. */ @InterfaceAudience.Public public class LeaseNotRecoveredException extends HBaseIOException { @@ -36,10 +34,10 @@ public LeaseNotRecoveredException(String message) { } public LeaseNotRecoveredException(String message, Throwable cause) { - super(message, cause); + super(message, cause); } public LeaseNotRecoveredException(Throwable cause) { - super(cause); + super(cause); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java index 9ade12d578c4..7d2483c66639 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LossyCounting.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.util.Map; @@ -35,14 +33,10 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * LossyCounting utility, bounded data structure that maintains approximate high frequency - * elements in data stream. - * - * Bucket size is 1 / error rate. (Error rate is 0.02 by default) - * Lemma If element does not appear in set, then is frequency is less than e * N - * (N is total element counts until now.) - * Based on paper: - * http://www.vldb.org/conf/2002/S10P03.pdf + * LossyCounting utility, bounded data structure that maintains approximate high frequency elements + * in data stream. Bucket size is 1 / error rate. (Error rate is 0.02 by default) Lemma If element + * does not appear in set, then is frequency is less than e * N (N is total element counts until + * now.) Based on paper: http://www.vldb.org/conf/2002/S10P03.pdf */ @InterfaceAudience.Private public class LossyCounting { @@ -88,22 +82,22 @@ public LossyCounting(String name, Configuration conf, LossyCountingListener l } private void addByOne(T key) { - //If entry exists, we update the entry by incrementing its frequency by one. Otherwise, - //we create a new entry starting with currentTerm so that it will not be pruned immediately + // If entry exists, we update the entry by incrementing its frequency by one. Otherwise, + // we create a new entry starting with currentTerm so that it will not be pruned immediately data.put(key, data.getOrDefault(key, currentTerm != 0 ? currentTerm - 1 : 0) + 1); - //update totalDataCount and term + // update totalDataCount and term totalDataCount++; calculateCurrentTerm(); } public void add(T key) { addByOne(key); - if(totalDataCount % bucketSize == 0) { - //sweep the entries at bucket boundaries - //run Sweep + if (totalDataCount % bucketSize == 0) { + // sweep the entries at bucket boundaries + // run Sweep Future future = fut.get(); - if (future != null && !future.isDone()){ + if (future != null && !future.isDone()) { return; } future = executor.submit(new SweepRunnable()); @@ -111,13 +105,12 @@ public void add(T key) { } } - /** * sweep low frequency data */ public void sweep() { - for(Map.Entry entry : data.entrySet()) { - if(entry.getValue() < currentTerm) { + for (Map.Entry entry : data.entrySet()) { + if (entry.getValue() < currentTerm) { T metric = entry.getKey(); data.remove(metric); if (listener != null) { @@ -134,7 +127,7 @@ private void calculateCurrentTerm() { this.currentTerm = (int) Math.ceil(1.0 * totalDataCount / (double) bucketSize); } - public long getBucketSize(){ + public long getBucketSize() { return bucketSize; } @@ -146,7 +139,7 @@ public boolean contains(T key) { return data.containsKey(key); } - public Set getElements(){ + public Set getElements() { return data.keySet(); } @@ -155,7 +148,8 @@ public long getCurrentTerm() { } class SweepRunnable implements Runnable { - @Override public void run() { + @Override + public void run() { if (LOG.isTraceEnabled()) { LOG.trace("Starting sweep of lossyCounting-" + name); } @@ -171,4 +165,3 @@ public Future getSweepFuture() { return fut.get(); } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java index e5081273d472..29cc1063b972 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * An environment edge that uses a manually set value. This is useful for testing events that are supposed to - * happen in the same millisecond. + * An environment edge that uses a manually set value. This is useful for testing events that are + * supposed to happen in the same millisecond. */ @InterfaceAudience.Private public class ManualEnvironmentEdge implements EnvironmentEdge { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index b1517c76c9a6..a4ac3d0ce060 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.IOException; @@ -30,7 +28,6 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; @@ -38,11 +35,12 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * Utility methods for interacting with the regions. */ @@ -66,14 +64,8 @@ public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor, long regionId = EnvironmentEdgeManager.currentTime(); RegionInfo[] hRegionInfos = null; if (splitKeys == null || splitKeys.length == 0) { - hRegionInfos = new RegionInfo[]{ - RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setStartKey(null) - .setEndKey(null) - .setSplit(false) - .setRegionId(regionId) - .build() - }; + hRegionInfos = new RegionInfo[] { RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) + .setStartKey(null).setEndKey(null).setSplit(false).setRegionId(regionId).build() }; } else { int numRegions = splitKeys.length + 1; hRegionInfos = new RegionInfo[numRegions]; @@ -81,13 +73,8 @@ public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor, byte[] endKey = null; for (int i = 0; i < numRegions; i++) { endKey = (i == splitKeys.length) ? null : splitKeys[i]; - hRegionInfos[i] = - RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setStartKey(startKey) - .setEndKey(endKey) - .setSplit(false) - .setRegionId(regionId) - .build(); + hRegionInfos[i] = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) + .setStartKey(startKey).setEndKey(endKey).setSplit(false).setRegionId(regionId).build(); startKey = endKey; } } @@ -95,9 +82,8 @@ public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor, } /** - * Create new set of regions on the specified file-system. - * NOTE: that you should add the regions to hbase:meta after this operation. - * + * Create new set of regions on the specified file-system. NOTE: that you should add the regions + * to hbase:meta after this operation. * @param conf {@link Configuration} * @param rootDir Root directory for HBase instance * @param tableDescriptor description of the table @@ -111,7 +97,7 @@ public static List createRegions(final Configuration conf, final Pat if (newRegions == null) return null; int regionNumber = newRegions.length; ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf, - "RegionOpenAndInit-" + tableDescriptor.getTableName(), regionNumber); + "RegionOpenAndInit-" + tableDescriptor.getTableName(), regionNumber); try { return createRegions(exec, conf, rootDir, tableDescriptor, newRegions, task); } finally { @@ -120,9 +106,8 @@ public static List createRegions(final Configuration conf, final Pat } /** - * Create new set of regions on the specified file-system. - * NOTE: that you should add the regions to hbase:meta after this operation. - * + * Create new set of regions on the specified file-system. NOTE: that you should add the regions + * to hbase:meta after this operation. * @param exec Thread Pool Executor * @param conf {@link Configuration} * @param rootDir Root directory for HBase instance @@ -132,9 +117,8 @@ public static List createRegions(final Configuration conf, final Pat * @throws IOException */ public static List createRegions(final ThreadPoolExecutor exec, - final Configuration conf, final Path rootDir, - final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, - final RegionFillTask task) throws IOException { + final Configuration conf, final Path rootDir, final TableDescriptor tableDescriptor, + final RegionInfo[] newRegions, final RegionFillTask task) throws IOException { if (newRegions == null) return null; int regionNumber = newRegions.length; CompletionService completionService = new ExecutorCompletionService<>(exec); @@ -171,8 +155,8 @@ public RegionInfo call() throws IOException { * @throws IOException */ public static RegionInfo createRegion(final Configuration conf, final Path rootDir, - final TableDescriptor tableDescriptor, final RegionInfo newRegion, - final RegionFillTask task) throws IOException { + final TableDescriptor tableDescriptor, final RegionInfo newRegion, final RegionFillTask task) + throws IOException { // 1. Create HRegion // The WAL subsystem will use the default rootDir rather than the passed in rootDir // unless I pass along via the conf. @@ -193,7 +177,6 @@ public static RegionInfo createRegion(final Configuration conf, final Path rootD /** * Execute the task on the specified set of regions. - * * @param exec Thread Pool Executor * @param regions {@link RegionInfo} that describes the regions to edit * @param task {@link RegionFillTask} custom code to edit the region @@ -202,7 +185,7 @@ public static RegionInfo createRegion(final Configuration conf, final Path rootD public static void editRegions(final ThreadPoolExecutor exec, final Collection regions, final RegionEditTask task) throws IOException { final ExecutorCompletionService completionService = new ExecutorCompletionService<>(exec); - for (final RegionInfo hri: regions) { + for (final RegionInfo hri : regions) { completionService.submit(new Callable() { @Override public Void call() throws IOException { @@ -213,7 +196,7 @@ public Void call() throws IOException { } try { - for (RegionInfo hri: regions) { + for (RegionInfo hri : regions) { completionService.take().get(); } } catch (InterruptedException e) { @@ -228,12 +211,12 @@ public Void call() throws IOException { * "hbase.hregion.open.and.init.threads.max" property. */ static ThreadPoolExecutor getRegionOpenAndInitThreadPool(final Configuration conf, - final String threadNamePrefix, int regionNumber) { + final String threadNamePrefix, int regionNumber) { int maxThreads = - Math.min(regionNumber, conf.getInt("hbase.hregion.open.and.init.threads.max", 16)); + Math.min(regionNumber, conf.getInt("hbase.hregion.open.and.init.threads.max", 16)); ThreadPoolExecutor regionOpenAndInitThreadPool = Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, new ThreadFactoryBuilder().setNameFormat(threadNamePrefix + "-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); return regionOpenAndInitThreadPool; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java index bde7fea1c366..64fc9418197f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithAck.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,9 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.Callable; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; @@ -32,10 +33,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.Callable; - /** * Move Regions and make sure that they are up on the target server.If a region movement fails we * exit as failure @@ -53,7 +50,7 @@ class MoveWithAck implements Callable { private final Admin admin; MoveWithAck(Connection conn, RegionInfo regionInfo, ServerName sourceServer, - ServerName targetServer, List movedRegions) throws IOException { + ServerName targetServer, List movedRegions) throws IOException { this.conn = conn; this.region = regionInfo; this.targetServer = targetServer; @@ -66,10 +63,10 @@ class MoveWithAck implements Callable { public Boolean call() throws IOException, InterruptedException { boolean moved = false; int count = 0; - int retries = admin.getConfiguration() - .getInt(RegionMover.MOVE_RETRIES_MAX_KEY, RegionMover.DEFAULT_MOVE_RETRIES_MAX); - int maxWaitInSeconds = admin.getConfiguration() - .getInt(RegionMover.MOVE_WAIT_MAX_KEY, RegionMover.DEFAULT_MOVE_WAIT_MAX); + int retries = admin.getConfiguration().getInt(RegionMover.MOVE_RETRIES_MAX_KEY, + RegionMover.DEFAULT_MOVE_RETRIES_MAX); + int maxWaitInSeconds = admin.getConfiguration().getInt(RegionMover.MOVE_WAIT_MAX_KEY, + RegionMover.DEFAULT_MOVE_WAIT_MAX); long startTime = EnvironmentEdgeManager.currentTime(); boolean sameServer = true; // Assert we can scan the region in its current location @@ -114,10 +111,10 @@ private static String getTimeDiffInSec(long startTime) { */ private void isSuccessfulScan(RegionInfo region) throws IOException { Scan scan = new Scan().withStartRow(region.getStartKey()).setRaw(true).setOneRowLimit() - .setMaxResultSize(1L).setCaching(1).setFilter(new FirstKeyOnlyFilter()) - .setCacheBlocks(false); + .setMaxResultSize(1L).setCaching(1).setFilter(new FirstKeyOnlyFilter()) + .setCacheBlocks(false); try (Table table = conn.getTable(region.getTable()); - ResultScanner scanner = table.getScanner(scan)) { + ResultScanner scanner = table.getScanner(scan)) { scanner.next(); } catch (IOException e) { LOG.error("Could not scan region: {}", region.getEncodedName(), e); @@ -129,8 +126,7 @@ private void isSuccessfulScan(RegionInfo region) throws IOException { * Returns true if passed region is still on serverName when we look at hbase:meta. * @return true if region is hosted on serverName otherwise false */ - private boolean isSameServer(RegionInfo region, ServerName serverName) - throws IOException { + private boolean isSameServer(RegionInfo region, ServerName serverName) throws IOException { ServerName serverForRegion = getServerNameForRegion(region, admin, conn); return serverForRegion != null && serverForRegion.equals(serverName); } @@ -145,9 +141,8 @@ static ServerName getServerNameForRegion(RegionInfo region, Admin admin, Connect if (!admin.isTableEnabled(region.getTable())) { return null; } - HRegionLocation loc = - conn.getRegionLocator(region.getTable()).getRegionLocation(region.getStartKey(), - region.getReplicaId(),true); + HRegionLocation loc = conn.getRegionLocator(region.getTable()) + .getRegionLocation(region.getStartKey(), region.getReplicaId(), true); if (loc != null) { return loc.getServerName(); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java index 0ddb99ac4180..82c8ae49de50 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MoveWithoutAck.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,9 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; +import java.util.List; +import java.util.concurrent.Callable; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; @@ -26,12 +26,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.List; -import java.util.concurrent.Callable; - /** - * Move Regions without Acknowledging.Usefule in case of RS shutdown as we might want to shut the - * RS down anyways and not abort on a stuck region. Improves movement performance + * Move Regions without Acknowledging.Usefule in case of RS shutdown as we might want to shut the RS + * down anyways and not abort on a stuck region. Improves movement performance */ @InterfaceAudience.Private class MoveWithoutAck implements Callable { @@ -45,7 +42,7 @@ class MoveWithoutAck implements Callable { private final Admin admin; MoveWithoutAck(Admin admin, RegionInfo regionInfo, ServerName sourceServer, - ServerName targetServer, List movedRegions) { + ServerName targetServer, List movedRegions) { this.admin = admin; this.region = regionInfo; this.targetServer = targetServer; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java index fa4c18442ac8..f1c7ca82cb08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,18 +20,15 @@ import java.util.Arrays; import java.util.Deque; import java.util.LinkedList; - import org.apache.yetus.audience.InterfaceAudience; /** - * Computes the optimal (minimal cost) assignment of jobs to workers (or other - * analogous) concepts given a cost matrix of each pair of job and worker, using - * the algorithm by James Munkres in "Algorithms for the Assignment and - * Transportation Problems", with additional optimizations as described by Jin - * Kue Wong in "A New Implementation of an Algorithm for the Optimal Assignment - * Problem: An Improved Version of Munkres' Algorithm". The algorithm runs in - * O(n^3) time and need O(n^2) auxiliary space where n is the number of jobs or - * workers, whichever is greater. + * Computes the optimal (minimal cost) assignment of jobs to workers (or other analogous) concepts + * given a cost matrix of each pair of job and worker, using the algorithm by James Munkres in + * "Algorithms for the Assignment and Transportation Problems", with additional optimizations as + * described by Jin Kue Wong in "A New Implementation of an Algorithm for the Optimal Assignment + * Problem: An Improved Version of Munkres' Algorithm". The algorithm runs in O(n^3) time and need + * O(n^2) auxiliary space where n is the number of jobs or workers, whichever is greater. */ @InterfaceAudience.Private public class MunkresAssignment { @@ -88,10 +84,10 @@ public class MunkresAssignment { private float[] colAdjust; /** - * Construct a new problem instance with the specified cost matrix. The cost - * matrix must be rectangular, though not necessarily square. If one dimension - * is greater than the other, some elements in the greater dimension will not - * be assigned. The input cost matrix will not be modified. + * Construct a new problem instance with the specified cost matrix. The cost matrix must be + * rectangular, though not necessarily square. If one dimension is greater than the other, some + * elements in the greater dimension will not be assigned. The input cost matrix will not be + * modified. * @param costMatrix */ public MunkresAssignment(float[][] costMatrix) { @@ -146,11 +142,10 @@ public MunkresAssignment(float[][] costMatrix) { } /** - * Get the optimal assignments. The returned array will have the same number - * of elements as the number of elements as the number of rows in the input - * cost matrix. Each element will indicate which column should be assigned to - * that row or -1 if no column should be assigned, i.e. if result[i] = j then - * row i should be assigned to column j. Subsequent invocations of this method + * Get the optimal assignments. The returned array will have the same number of elements as the + * number of elements as the number of rows in the input cost matrix. Each element will indicate + * which column should be assigned to that row or -1 if no column should be assigned, i.e. if + * result[i] = j then row i should be assigned to column j. Subsequent invocations of this method * will simply return the same object without additional computation. * @return an array with the optimal assignments */ @@ -174,8 +169,7 @@ public int[] solve() { // Extract the assignments from the mask matrix. if (transposed) { assignments = new int[cols]; - outer: - for (int c = 0; c < cols; c++) { + outer: for (int c = 0; c < cols; c++) { for (int r = 0; r < rows; r++) { if (mask[r][c] == STAR) { assignments[c] = r; @@ -187,8 +181,7 @@ public int[] solve() { } } else { assignments = new int[rows]; - outer: - for (int r = 0; r < rows; r++) { + outer: for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { if (mask[r][c] == STAR) { assignments[r] = c; @@ -215,9 +208,8 @@ public int[] solve() { } /** - * Corresponds to the "preliminaries" step of the original algorithm. - * Guarantees that the matrix is an equivalent non-negative matrix with at - * least one zero in each row. + * Corresponds to the "preliminaries" step of the original algorithm. Guarantees that the matrix + * is an equivalent non-negative matrix with at least one zero in each row. */ private void preliminaries() { for (int r = 0; r < rows; r++) { @@ -250,8 +242,8 @@ private void preliminaries() { } /** - * Test whether the algorithm is done, i.e. we have the optimal assignment. - * This occurs when there is exactly one starred zero in each row. + * Test whether the algorithm is done, i.e. we have the optimal assignment. This occurs when there + * is exactly one starred zero in each row. * @return true if the algorithm is done */ private boolean testIsDone() { @@ -431,8 +423,8 @@ private void stepThree() { } /** - * Find a zero cost assignment which is not covered. If there are no zero cost - * assignments which are uncovered, then null will be returned. + * Find a zero cost assignment which is not covered. If there are no zero cost assignments which + * are uncovered, then null will be returned. * @return pair of row and column indices of an uncovered zero or null */ private Pair findUncoveredZero() { @@ -445,8 +437,8 @@ private Pair findUncoveredZero() { } /** - * A specified row has become covered, and a specified column has become - * uncovered. The least value per row may need to be updated. + * A specified row has become covered, and a specified column has become uncovered. The least + * value per row may need to be updated. * @param row the index of the row which was just covered * @param col the index of the column which was just uncovered */ @@ -467,8 +459,8 @@ private void updateMin(int row, int col) { } /** - * Find a starred zero in a specified row. If there are no starred zeroes in - * the specified row, then null will be returned. + * Find a starred zero in a specified row. If there are no starred zeroes in the specified row, + * then null will be returned. * @param r the index of the row to be searched * @return pair of row and column indices of starred zero or null */ @@ -482,8 +474,8 @@ private Pair starInRow(int r) { } /** - * Find a starred zero in the specified column. If there are no starred zeroes - * in the specified row, then null will be returned. + * Find a starred zero in the specified column. If there are no starred zeroes in the specified + * row, then null will be returned. * @param c the index of the column to be searched * @return pair of row and column indices of starred zero or null */ @@ -497,8 +489,8 @@ private Pair starInCol(int c) { } /** - * Find a primed zero in the specified row. If there are no primed zeroes in - * the specified row, then null will be returned. + * Find a primed zero in the specified row. If there are no primed zeroes in the specified row, + * then null will be returned. * @param r the index of the row to be searched * @return pair of row and column indices of primed zero or null */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java index 346f3df51834..a02085e2cd19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hbase.util; +import java.util.concurrent.ThreadFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper; +import org.apache.hadoop.hbase.wal.NettyAsyncFSWALConfigHelper; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.ServerChannel; @@ -27,11 +33,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioServerSocketChannel; import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel; import org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultThreadFactory; -import java.util.concurrent.ThreadFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper; -import org.apache.hadoop.hbase.wal.NettyAsyncFSWALConfigHelper; -import org.apache.yetus.audience.InterfaceAudience; /** * Event loop group related config. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/OOMEChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/OOMEChecker.java index 9fdf7ea74b33..764a1f105803 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/OOMEChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/OOMEChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,9 +33,9 @@ private OOMEChecker() { public static boolean exitIfOOME(Throwable e, String service) { boolean stop = false; try { - if (e instanceof OutOfMemoryError || - (e.getCause() != null && e.getCause() instanceof OutOfMemoryError) || - (e.getMessage() != null && e.getMessage().contains("java.lang.OutOfMemoryError"))) { + if (e instanceof OutOfMemoryError + || (e.getCause() != null && e.getCause() instanceof OutOfMemoryError) + || (e.getMessage() != null && e.getMessage().contains("java.lang.OutOfMemoryError"))) { stop = true; LOG.error(HBaseMarkers.FATAL, "Run out of memory; {} will abort itself immediately", service, e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 286caf8ed3b0..2a8ace0ae79a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.BufferedInputStream; @@ -171,8 +169,8 @@ private static Configuration createConf() { } /** - * @param hostname Hostname to unload regions from or load regions to. Can be either hostname - * or hostname:port. + * @param hostname Hostname to unload regions from or load regions to. Can be either hostname or + * hostname:port. * @param conf Configuration object */ public RegionMoverBuilder(String hostname, Configuration conf) { @@ -184,7 +182,7 @@ public RegionMoverBuilder(String hostname, Configuration conf) { this.port = conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT); } this.filename = defaultDir + File.separator + System.getProperty("user.name") + this.hostname - + ":" + Integer.toString(this.port); + + ":" + Integer.toString(this.port); this.conf = conf; } @@ -257,9 +255,7 @@ public RegionMoverBuilder timeout(int timeout) { } /** - * Set specific rackManager implementation. - * This setter method is for testing purpose only. - * + * Set specific rackManager implementation. This setter method is for testing purpose only. * @param rackManager rackManager impl * @return RegionMoverBuilder object */ @@ -333,13 +329,11 @@ private Optional getMetaRegionInfoIfToBeMoved(List regio return regionsToMove.stream().filter(RegionInfo::isMetaRegion).findFirst(); } - private void loadRegions(List regionsToMove) - throws Exception { + private void loadRegions(List regionsToMove) throws Exception { ServerName server = getTargetServer(); List movedRegions = Collections.synchronizedList(new ArrayList<>()); - LOG.info( - "Moving " + regionsToMove.size() + " regions to " + server + " using " + this.maxthreads - + " threads.Ack mode:" + this.ack); + LOG.info("Moving " + regionsToMove.size() + " regions to " + server + " using " + + this.maxthreads + " threads.Ack mode:" + this.ack); final ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads); List> taskList = new ArrayList<>(); @@ -349,30 +343,30 @@ private void loadRegions(List regionsToMove) ServerName currentServer = MoveWithAck.getServerNameForRegion(region, admin, conn); if (currentServer == null) { LOG.warn( - "Could not get server for Region:" + region.getRegionNameAsString() + " moving on"); + "Could not get server for Region:" + region.getRegionNameAsString() + " moving on"); counter++; continue; } else if (server.equals(currentServer)) { LOG.info( - "Region " + region.getRegionNameAsString() + " is already on target server=" + server); + "Region " + region.getRegionNameAsString() + " is already on target server=" + server); counter++; continue; } if (ack) { Future task = moveRegionsPool - .submit(new MoveWithAck(conn, region, currentServer, server, movedRegions)); + .submit(new MoveWithAck(conn, region, currentServer, server, movedRegions)); taskList.add(task); } else { Future task = moveRegionsPool - .submit(new MoveWithoutAck(admin, region, currentServer, server, movedRegions)); + .submit(new MoveWithoutAck(admin, region, currentServer, server, movedRegions)); taskList.add(task); } counter++; } moveRegionsPool.shutdown(); - long timeoutInSeconds = regionsToMove.size() * admin.getConfiguration() - .getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); + long timeoutInSeconds = regionsToMove.size() + * admin.getConfiguration().getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); waitMoveTasksToFinish(moveRegionsPool, taskList, timeoutInSeconds); } @@ -382,7 +376,6 @@ private void loadRegions(List regionsToMove) * server,hence it is best effort.We do not unload regions to hostnames given in * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions * to hostnames provided in {@link #designatedFile} - * * @return true if unloading succeeded, false otherwise */ public boolean unload() throws InterruptedException, ExecutionException, TimeoutException { @@ -394,10 +387,9 @@ public boolean unload() throws InterruptedException, ExecutionException, Timeout * noAck mode we do not make sure that region is successfully online on the target region * server,hence it is best effort.We do not unload regions to hostnames given in * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions - * to hostnames provided in {@link #designatedFile}. - * While unloading regions, destination RegionServers are selected from different rack i.e - * regions should not move to any RegionServers that belong to same rack as source RegionServer. - * + * to hostnames provided in {@link #designatedFile}. While unloading regions, destination + * RegionServers are selected from different rack i.e regions should not move to any RegionServers + * that belong to same rack as source RegionServer. * @return true if unloading succeeded, false otherwise */ public boolean unloadFromRack() @@ -405,8 +397,8 @@ public boolean unloadFromRack() return unloadRegions(true); } - private boolean unloadRegions(boolean unloadFromRack) throws InterruptedException, - ExecutionException, TimeoutException { + private boolean unloadRegions(boolean unloadFromRack) + throws InterruptedException, ExecutionException, TimeoutException { deleteFile(this.filename); ExecutorService unloadPool = Executors.newFixedThreadPool(1); Future unloadTask = unloadPool.submit(() -> { @@ -421,7 +413,7 @@ private boolean unloadRegions(boolean unloadFromRack) throws InterruptedExceptio ServerName server = stripServer(regionServers, hostname, port); if (server == null) { LOG.info("Could not find server '{}:{}' in the set of region servers. giving up.", - hostname, port); + hostname, port); LOG.debug("List of region servers: {}", regionServers); return false; } @@ -452,8 +444,9 @@ private boolean unloadRegions(boolean unloadFromRack) throws InterruptedExceptio Set decommissionedRS = new HashSet<>(admin.listDecommissionedRegionServers()); if (CollectionUtils.isNotEmpty(decommissionedRS)) { regionServers.removeIf(decommissionedRS::contains); - LOG.debug("Excluded RegionServers from unloading regions to because they " + - "are marked as decommissioned. Servers: {}", decommissionedRS); + LOG.debug("Excluded RegionServers from unloading regions to because they " + + "are marked as decommissioned. Servers: {}", + decommissionedRS); } stripMaster(regionServers); @@ -517,27 +510,25 @@ private void unloadRegions(ServerName server, List regionServers, } private void submitRegionMovesWhileUnloading(ServerName server, List regionServers, - List movedRegions, List regionsToMove) throws Exception { + List movedRegions, List regionsToMove) throws Exception { final ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads); List> taskList = new ArrayList<>(); int serverIndex = 0; for (RegionInfo regionToMove : regionsToMove) { if (ack) { - Future task = moveRegionsPool.submit( - new MoveWithAck(conn, regionToMove, server, regionServers.get(serverIndex), - movedRegions)); + Future task = moveRegionsPool.submit(new MoveWithAck(conn, regionToMove, server, + regionServers.get(serverIndex), movedRegions)); taskList.add(task); } else { - Future task = moveRegionsPool.submit( - new MoveWithoutAck(admin, regionToMove, server, regionServers.get(serverIndex), - movedRegions)); + Future task = moveRegionsPool.submit(new MoveWithoutAck(admin, regionToMove, + server, regionServers.get(serverIndex), movedRegions)); taskList.add(task); } serverIndex = (serverIndex + 1) % regionServers.size(); } moveRegionsPool.shutdown(); - long timeoutInSeconds = regionsToMove.size() * admin.getConfiguration() - .getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); + long timeoutInSeconds = regionsToMove.size() + * admin.getConfiguration().getLong(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); waitMoveTasksToFinish(moveRegionsPool, taskList, timeoutInSeconds); } @@ -546,9 +537,8 @@ private boolean waitTaskToFinish(ExecutorService pool, Future task, Str pool.shutdown(); try { if (!pool.awaitTermination((long) this.timeout, TimeUnit.SECONDS)) { - LOG.warn( - "Timed out before finishing the " + operation + " operation. Timeout: " + this.timeout - + "sec"); + LOG.warn("Timed out before finishing the " + operation + " operation. Timeout: " + + this.timeout + "sec"); pool.shutdownNow(); } } catch (InterruptedException e) { @@ -596,7 +586,8 @@ private void waitMoveTasksToFinish(ExecutorService moveRegionsPool, } } catch (CancellationException e) { LOG.error("Thread for moving region cancelled. Timeout for cancellation:" + timeoutInSeconds - + "secs", e); + + "secs", + e); throw e; } } @@ -609,10 +600,10 @@ private boolean ignoreRegionMoveFailure(ExecutionException e) { ignoreFailure = true; } else if (e.getCause() instanceof DoNotRetryRegionException && e.getCause().getMessage() != null && e.getCause().getMessage() - .contains(AssignmentManager.UNEXPECTED_STATE_REGION + "state=SPLIT,")) { - // region is recently split - ignoreFailure = true; - } + .contains(AssignmentManager.UNEXPECTED_STATE_REGION + "state=SPLIT,")) { + // region is recently split + ignoreFailure = true; + } return ignoreFailure; } @@ -650,8 +641,8 @@ private List readRegionsFromFile(String filename) throws IOException if (!f.exists()) { return regions; } - try (DataInputStream dis = new DataInputStream( - new BufferedInputStream(new FileInputStream(f)))) { + try (DataInputStream dis = + new DataInputStream(new BufferedInputStream(new FileInputStream(f)))) { int numRegions = dis.readInt(); int index = 0; while (index < numRegions) { @@ -670,16 +661,16 @@ private List readRegionsFromFile(String filename) throws IOException * lines */ private void writeFile(String filename, List movedRegions) throws IOException { - try (DataOutputStream dos = new DataOutputStream( - new BufferedOutputStream(new FileOutputStream(filename)))) { + try (DataOutputStream dos = + new DataOutputStream(new BufferedOutputStream(new FileOutputStream(filename)))) { dos.writeInt(movedRegions.size()); for (RegionInfo region : movedRegions) { Bytes.writeByteArray(dos, RegionInfo.toByteArray(region)); } } catch (IOException e) { - LOG.error( - "ERROR: Was Not able to write regions moved to output file but moved " + movedRegions - .size() + " regions", e); + LOG.error("ERROR: Was Not able to write regions moved to output file but moved " + + movedRegions.size() + " regions", + e); throw e; } } @@ -700,8 +691,8 @@ private List readServersFromFile(String filename) throws IOException { if (filename != null) { try { Files.readAllLines(Paths.get(filename)).stream().map(String::trim) - .filter(((Predicate) String::isEmpty).negate()).map(String::toLowerCase) - .forEach(servers::add); + .filter(((Predicate) String::isEmpty).negate()).map(String::toLowerCase) + .forEach(servers::add); } catch (IOException e) { LOG.error("Exception while reading servers from file,", e); throw e; @@ -711,15 +702,14 @@ private List readServersFromFile(String filename) throws IOException { } /** - * Designates or excludes the servername whose hostname and port portion matches the list given - * in the file. - * Example:
      + * Designates or excludes the servername whose hostname and port portion matches the list given in + * the file. Example:
      * If you want to designated RSs, suppose designatedFile has RS1, regionServers has RS1, RS2 and - * RS3. When we call includeExcludeRegionServers(designatedFile, regionServers, true), RS2 and - * RS3 are removed from regionServers list so that regions can move to only RS1. - * If you want to exclude RSs, suppose excludeFile has RS1, regionServers has RS1, RS2 and RS3. - * When we call includeExcludeRegionServers(excludeFile, servers, false), RS1 is removed from - * regionServers list so that regions can move to only RS2 and RS3. + * RS3. When we call includeExcludeRegionServers(designatedFile, regionServers, true), RS2 and RS3 + * are removed from regionServers list so that regions can move to only RS1. If you want to + * exclude RSs, suppose excludeFile has RS1, regionServers has RS1, RS2 and RS3. When we call + * includeExcludeRegionServers(excludeFile, servers, false), RS1 is removed from regionServers + * list so that regions can move to only RS2 and RS3. */ private void includeExcludeRegionServers(String fileName, List regionServers, boolean isInclude) throws IOException { @@ -732,8 +722,8 @@ private void includeExcludeRegionServers(String fileName, List regio Iterator i = regionServers.iterator(); while (i.hasNext()) { String rs = i.next().getServerName(); - String rsPort = rs.split(ServerName.SERVERNAME_SEPARATOR)[0].toLowerCase() + ":" + rs - .split(ServerName.SERVERNAME_SEPARATOR)[1]; + String rsPort = rs.split(ServerName.SERVERNAME_SEPARATOR)[0].toLowerCase() + ":" + + rs.split(ServerName.SERVERNAME_SEPARATOR)[1]; if (isInclude != servers.contains(rsPort)) { i.remove(); } @@ -757,8 +747,8 @@ private void stripMaster(List regionServers) throws IOException { private ServerName stripServer(List regionServers, String hostname, int port) { for (Iterator iter = regionServers.iterator(); iter.hasNext();) { ServerName server = iter.next(); - if (server.getAddress().getHostName().equalsIgnoreCase(hostname) && - server.getAddress().getPort() == port) { + if (server.getAddress().getHostName().equalsIgnoreCase(hostname) + && server.getAddress().getPort() == port) { iter.remove(); return server; } @@ -771,20 +761,20 @@ protected void addOptions() { this.addRequiredOptWithArg("r", "regionserverhost", "region server |"); this.addRequiredOptWithArg("o", "operation", "Expected: load/unload/unload_from_rack"); this.addOptWithArg("m", "maxthreads", - "Define the maximum number of threads to use to unload and reload the regions"); + "Define the maximum number of threads to use to unload and reload the regions"); this.addOptWithArg("x", "excludefile", - "File with per line to exclude as unload targets; default excludes only " - + "target host; useful for rack decommisioning."); - this.addOptWithArg("d","designatedfile","File with per line as unload targets;" - + "default is all online hosts"); + "File with per line to exclude as unload targets; default excludes only " + + "target host; useful for rack decommisioning."); + this.addOptWithArg("d", "designatedfile", + "File with per line as unload targets;" + "default is all online hosts"); this.addOptWithArg("f", "filename", - "File to save regions list into unloading, or read from loading; " - + "default /tmp/"); + "File to save regions list into unloading, or read from loading; " + + "default /tmp/"); this.addOptNoArg("n", "noack", - "Turn on No-Ack mode(default: false) which won't check if region is online on target " - + "RegionServer, hence best effort. This is more performant in unloading and loading " - + "but might lead to region being unavailable for some time till master reassigns it " - + "in case the move failed"); + "Turn on No-Ack mode(default: false) which won't check if region is online on target " + + "RegionServer, hence best effort. This is more performant in unloading and loading " + + "but might lead to region being unavailable for some time till master reassigns it " + + "in case the move failed"); this.addOptWithArg("t", "timeout", "timeout in seconds after which the tool will exit " + "irrespective of whether it finished or not;default Integer.MAX_VALUE"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java index 0f75b0e9bd5b..3515631d7d3a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,30 +24,23 @@ import java.util.Map.Entry; import java.util.TreeMap; import java.util.TreeSet; - +import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap; /** - * This is a generic region split calculator. It requires Ranges that provide - * start, end, and a comparator. It works in two phases -- the first adds ranges - * and rejects backwards ranges. Then one calls calcRegions to generate the - * multimap that has a start split key as a key and possibly multiple Ranges as - * members. - * - * To traverse, one normally would get the split set, and iterate through the - * calcRegions. Normal regions would have only one entry, holes would have zero, - * and any overlaps would have multiple entries. - * - * The interface is a bit cumbersome currently but is exposed this way so that - * clients can choose how to iterate through the region splits. - * + * This is a generic region split calculator. It requires Ranges that provide start, end, and a + * comparator. It works in two phases -- the first adds ranges and rejects backwards ranges. Then + * one calls calcRegions to generate the multimap that has a start split key as a key and possibly + * multiple Ranges as members. To traverse, one normally would get the split set, and iterate + * through the calcRegions. Normal regions would have only one entry, holes would have zero, and any + * overlaps would have multiple entries. The interface is a bit cumbersome currently but is exposed + * this way so that clients can choose how to iterate through the region splits. * @param */ @InterfaceAudience.Private @@ -57,17 +49,14 @@ public class RegionSplitCalculator { private final Comparator rangeCmp; /** - * This contains a sorted set of all the possible split points - * - * Invariant: once populated this has 0 entries if empty or at most n+1 values - * where n == number of added ranges. + * This contains a sorted set of all the possible split points Invariant: once populated this has + * 0 entries if empty or at most n+1 values where n == number of added ranges. */ private final TreeSet splits = new TreeSet<>(BYTES_COMPARATOR); /** - * This is a map from start key to regions with the same start key. - * - * Invariant: This always have n values in total + * This is a map from start key to regions with the same start key. Invariant: This always have n + * values in total */ private final Multimap starts = ArrayListMultimap.create(); @@ -83,19 +72,15 @@ public RegionSplitCalculator(Comparator cmp) { public final static Comparator BYTES_COMPARATOR = new ByteArrayComparator() { @Override public int compare(byte[] l, byte[] r) { - if (l == null && r == null) - return 0; - if (l == null) - return 1; - if (r == null) - return -1; + if (l == null && r == null) return 0; + if (l == null) return 1; + if (r == null) return -1; return super.compare(l, r); } }; /** * SPECIAL CASE wrapper for empty end key - * * @return ENDKEY if end key is empty, else normal endkey. */ private static byte[] specialEndKey(R range) { @@ -108,7 +93,6 @@ private static byte[] specialEndKey(R range) { /** * Adds an edge to the split calculator - * * @return true if is included, false if backwards/invalid */ public boolean add(R range) { @@ -118,8 +102,8 @@ public boolean add(R range) { // No need to use Arrays.equals because ENDKEY is null if (end != ENDKEY && Bytes.compareTo(start, end) > 0) { // don't allow backwards edges - LOG.debug("attempted to add backwards edge: " + Bytes.toString(start) - + " " + Bytes.toString(end)); + LOG.debug( + "attempted to add backwards edge: " + Bytes.toString(start) + " " + Bytes.toString(end)); return false; } @@ -130,16 +114,13 @@ public boolean add(R range) { } /** - * Generates a coverage multimap from split key to Regions that start with the - * split key. - * + * Generates a coverage multimap from split key to Regions that start with the split key. * @return coverage multimap */ public Multimap calcCoverage() { // This needs to be sorted to force the use of the comparator on the values, // otherwise byte array comparison isn't used - Multimap regions = TreeMultimap.create(BYTES_COMPARATOR, - rangeCmp); + Multimap regions = TreeMultimap.create(BYTES_COMPARATOR, rangeCmp); // march through all splits from the start points for (Entry> start : starts.asMap().entrySet()) { @@ -147,8 +128,7 @@ public Multimap calcCoverage() { for (R r : start.getValue()) { regions.put(key, r); - for (byte[] coveredSplit : splits.subSet(r.getStartKey(), - specialEndKey(r))) { + for (byte[] coveredSplit : splits.subSet(r.getStartKey(), specialEndKey(r))) { regions.put(coveredSplit, r); } } @@ -165,31 +145,28 @@ public Multimap getStarts() { } /** - * Find specified number of top ranges in a big overlap group. - * It could return less if there are not that many top ranges. - * Once these top ranges are excluded, the big overlap group will - * be broken into ranges with no overlapping, or smaller overlapped - * groups, and most likely some holes. - * + * Find specified number of top ranges in a big overlap group. It could return less if there are + * not that many top ranges. Once these top ranges are excluded, the big overlap group will be + * broken into ranges with no overlapping, or smaller overlapped groups, and most likely some + * holes. * @param bigOverlap a list of ranges that overlap with each other * @param count the max number of ranges to find * @return a list of ranges that overlap with most others */ - public static List - findBigRanges(Collection bigOverlap, int count) { + public static List findBigRanges(Collection bigOverlap, int count) { List bigRanges = new ArrayList<>(); // The key is the count of overlaps, // The value is a list of ranges that have that many overlaps TreeMap> overlapRangeMap = new TreeMap<>(); - for (R r: bigOverlap) { + for (R r : bigOverlap) { // Calculates the # of overlaps for each region // and populates rangeOverlapMap byte[] startKey = r.getStartKey(); byte[] endKey = specialEndKey(r); int overlappedRegions = 0; - for (R rr: bigOverlap) { + for (R rr : bigOverlap) { byte[] start = rr.getStartKey(); byte[] end = specialEndKey(rr); @@ -213,7 +190,7 @@ public Multimap getStarts() { } } int toBeAdded = count; - for (Integer key: overlapRangeMap.descendingKeySet()) { + for (Integer key : overlapRangeMap.descendingKeySet()) { List chunk = overlapRangeMap.get(key); int chunkSize = chunk.size(); if (chunkSize <= toBeAdded) { @@ -225,7 +202,7 @@ public Multimap getStarts() { // chained, for example: [a, c), [b, e), [d, g), [f h)... // In such a case, sideline the middle chunk will break // the group efficiently. - int start = (chunkSize - toBeAdded)/2; + int start = (chunkSize - toBeAdded) / 2; int end = start + toBeAdded; for (int i = start; i < end; i++) { bigRanges.add(chunk.get(i)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 11bbd210a79d..84984aa4cc42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,10 +66,9 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; /** - * The {@link RegionSplitter} class provides several utilities to help in the - * administration lifecycle for developers who choose to manually split regions - * instead of having HBase handle that automatically. The most useful utilities - * are: + * The {@link RegionSplitter} class provides several utilities to help in the administration + * lifecycle for developers who choose to manually split regions instead of having HBase handle that + * automatically. The most useful utilities are: *

      *

        *
      • Create a table with a specified number of pre-split regions @@ -81,13 +79,13 @@ *

        * Question: How do I turn off automatic splitting?
        * Answer: Automatic splitting is determined by the configuration value - * HConstants.HREGION_MAX_FILESIZE. It is not recommended that you set this - * to Long.MAX_VALUE in case you forget about manual splits. A suggested setting - * is 100GB, which would result in > 1hr major compactions if reached. + * HConstants.HREGION_MAX_FILESIZE. It is not recommended that you set this to Long.MAX_VALUE + * in case you forget about manual splits. A suggested setting is 100GB, which would result in > + * 1hr major compactions if reached. *

        * Question: Why did the original authors decide to manually split?
        - * Answer: Specific workload characteristics of our use case allowed us - * to benefit from a manual split system. + * Answer: Specific workload characteristics of our use case allowed us to benefit from a + * manual split system. *

        *

          *
        • Data (~1k) that would grow instead of being replaced @@ -96,90 +94,77 @@ *
        *

        * Question: Why is manual splitting good for this workload?
        - * Answer: Although automated splitting is not a bad option, there are - * benefits to manual splitting. + * Answer: Although automated splitting is not a bad option, there are benefits to manual + * splitting. *

        *

          - *
        • With growing amounts of data, splits will continually be needed. Since - * you always know exactly what regions you have, long-term debugging and - * profiling is much easier with manual splits. It is hard to trace the logs to - * understand region level problems if it keeps splitting and getting renamed. - *
        • Data offlining bugs + unknown number of split regions == oh crap! If an - * WAL or StoreFile was mistakenly unprocessed by HBase due to a weird bug and - * you notice it a day or so later, you can be assured that the regions - * specified in these files are the same as the current regions and you have - * less headaches trying to restore/replay your data. - *
        • You can finely tune your compaction algorithm. With roughly uniform data - * growth, it's easy to cause split / compaction storms as the regions all - * roughly hit the same data size at the same time. With manual splits, you can - * let staggered, time-based major compactions spread out your network IO load. + *
        • With growing amounts of data, splits will continually be needed. Since you always know + * exactly what regions you have, long-term debugging and profiling is much easier with manual + * splits. It is hard to trace the logs to understand region level problems if it keeps splitting + * and getting renamed. + *
        • Data offlining bugs + unknown number of split regions == oh crap! If an WAL or StoreFile was + * mistakenly unprocessed by HBase due to a weird bug and you notice it a day or so later, you can + * be assured that the regions specified in these files are the same as the current regions and you + * have less headaches trying to restore/replay your data. + *
        • You can finely tune your compaction algorithm. With roughly uniform data growth, it's easy to + * cause split / compaction storms as the regions all roughly hit the same data size at the same + * time. With manual splits, you can let staggered, time-based major compactions spread out your + * network IO load. *
        *

        * Question: What's the optimal number of pre-split regions to create?
        * Answer: Mileage will vary depending upon your application. *

        - * The short answer for our application is that we started with 10 pre-split - * regions / server and watched our data growth over time. It's better to err on - * the side of too little regions and rolling split later. + * The short answer for our application is that we started with 10 pre-split regions / server and + * watched our data growth over time. It's better to err on the side of too little regions and + * rolling split later. *

        - * The more complicated answer is that this depends upon the largest storefile - * in your region. With a growing data size, this will get larger over time. You - * want the largest region to be just big enough that the - * {@link org.apache.hadoop.hbase.regionserver.HStore} compact - * selection algorithm only compacts it due to a timed major. If you don't, your - * cluster can be prone to compaction storms as the algorithm decides to run - * major compactions on a large series of regions all at once. Note that - * compaction storms are due to the uniform data growth, not the manual split + * The more complicated answer is that this depends upon the largest storefile in your region. With + * a growing data size, this will get larger over time. You want the largest region to be just big + * enough that the {@link org.apache.hadoop.hbase.regionserver.HStore} compact selection algorithm + * only compacts it due to a timed major. If you don't, your cluster can be prone to compaction + * storms as the algorithm decides to run major compactions on a large series of regions all at + * once. Note that compaction storms are due to the uniform data growth, not the manual split * decision. *

        - * If you pre-split your regions too thin, you can increase the major compaction - * interval by configuring HConstants.MAJOR_COMPACTION_PERIOD. If your data size - * grows too large, use this script to perform a network IO safe rolling split - * of all regions. + * If you pre-split your regions too thin, you can increase the major compaction interval by + * configuring HConstants.MAJOR_COMPACTION_PERIOD. If your data size grows too large, use this + * script to perform a network IO safe rolling split of all regions. */ @InterfaceAudience.Private public class RegionSplitter { private static final Logger LOG = LoggerFactory.getLogger(RegionSplitter.class); /** - * A generic interface for the RegionSplitter code to use for all it's - * functionality. Note that the original authors of this code use - * {@link HexStringSplit} to partition their table and set it as default, but - * provided this for your custom algorithm. To use, create a new derived class + * A generic interface for the RegionSplitter code to use for all it's functionality. Note that + * the original authors of this code use {@link HexStringSplit} to partition their table and set + * it as default, but provided this for your custom algorithm. To use, create a new derived class * from this interface and call {@link RegionSplitter#createPresplitTable} or - * RegionSplitter#rollingSplit(TableName, SplitAlgorithm, Configuration) with the - * argument splitClassName giving the name of your class. + * RegionSplitter#rollingSplit(TableName, SplitAlgorithm, Configuration) with the argument + * splitClassName giving the name of your class. */ public interface SplitAlgorithm { /** * Split a pre-existing region into 2 regions. - * - * @param start - * first row (inclusive) - * @param end - * last row (exclusive) + * @param start first row (inclusive) + * @param end last row (exclusive) * @return the split row to use */ byte[] split(byte[] start, byte[] end); /** * Split an entire table. - * - * @param numRegions - * number of regions to split the table into - * - * @throws RuntimeException - * user input is validated at this time. may throw a runtime - * exception in response to a parse failure - * @return array of split keys for the initial regions of the table. The - * length of the returned array should be numRegions-1. + * @param numRegions number of regions to split the table into + * @throws RuntimeException user input is validated at this time. may throw a runtime exception + * in response to a parse failure + * @return array of split keys for the initial regions of the table. The length of the returned + * array should be numRegions-1. */ byte[][] split(int numRegions); /** - * Some MapReduce jobs may want to run multiple mappers per region, - * this is intended for such usecase. - * + * Some MapReduce jobs may want to run multiple mappers per region, this is intended for such + * usecase. * @param start first row (inclusive) * @param end last row (exclusive) * @param numSplits number of splits to generate @@ -188,54 +173,44 @@ public interface SplitAlgorithm { byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive); /** - * In HBase, the first row is represented by an empty byte array. This might - * cause problems with your split algorithm or row printing. All your APIs - * will be passed firstRow() instead of empty array. - * + * In HBase, the first row is represented by an empty byte array. This might cause problems with + * your split algorithm or row printing. All your APIs will be passed firstRow() instead of + * empty array. * @return your representation of your first row */ byte[] firstRow(); /** - * In HBase, the last row is represented by an empty byte array. This might - * cause problems with your split algorithm or row printing. All your APIs - * will be passed firstRow() instead of empty array. - * + * In HBase, the last row is represented by an empty byte array. This might cause problems with + * your split algorithm or row printing. All your APIs will be passed firstRow() instead of + * empty array. * @return your representation of your last row */ byte[] lastRow(); /** - * In HBase, the last row is represented by an empty byte array. Set this - * value to help the split code understand how to evenly divide the first - * region. - * - * @param userInput - * raw user input (may throw RuntimeException on parse failure) + * In HBase, the last row is represented by an empty byte array. Set this value to help the + * split code understand how to evenly divide the first region. + * @param userInput raw user input (may throw RuntimeException on parse failure) */ void setFirstRow(String userInput); /** - * In HBase, the last row is represented by an empty byte array. Set this - * value to help the split code understand how to evenly divide the last - * region. Note that this last row is inclusive for all rows sharing the - * same prefix. - * - * @param userInput - * raw user input (may throw RuntimeException on parse failure) + * In HBase, the last row is represented by an empty byte array. Set this value to help the + * split code understand how to evenly divide the last region. Note that this last row is + * inclusive for all rows sharing the same prefix. + * @param userInput raw user input (may throw RuntimeException on parse failure) */ void setLastRow(String userInput); /** - * @param input - * user or file input for row + * @param input user or file input for row * @return byte array representation of this row for HBase */ byte[] strToRow(String input); /** - * @param row - * byte array representing a row in HBase + * @param row byte array representing a row in HBase * @return String to use for debug & file printing */ String rowToStr(byte[] row); @@ -262,46 +237,35 @@ public interface SplitAlgorithm { * The main function for the RegionSplitter application. Common uses: *

        *

          - *
        • create a table named 'myTable' with 60 pre-split regions containing 2 - * column families 'test' & 'rs', assuming the keys are hex-encoded ASCII: + *
        • create a table named 'myTable' with 60 pre-split regions containing 2 column families + * 'test' & 'rs', assuming the keys are hex-encoded ASCII: *
            - *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs - * myTable HexStringSplit + *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs myTable + * HexStringSplit *
          - *
        • create a table named 'myTable' with 50 pre-split regions, - * assuming the keys are decimal-encoded ASCII: + *
        • create a table named 'myTable' with 50 pre-split regions, assuming the keys are + * decimal-encoded ASCII: *
            - *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 50 - * myTable DecimalStringSplit + *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 50 myTable DecimalStringSplit *
          - *
        • perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2 - * outstanding splits at a time, assuming keys are uniformly distributed - * bytes: + *
        • perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2 outstanding splits at + * a time, assuming keys are uniformly distributed bytes: *
            - *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -r -o 2 myTable - * UniformSplit + *
          • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -r -o 2 myTable UniformSplit *
          *
        - * - * There are three SplitAlgorithms built into RegionSplitter, HexStringSplit, - * DecimalStringSplit, and UniformSplit. These are different strategies for - * choosing region boundaries. See their source code for details. - * - * @param args - * Usage: RegionSplitter <TABLE> <SPLITALGORITHM> - * <-c <# regions> -f <family:family:...> | -r - * [-o <# outstanding splits>]> - * [-D <conf.param=value>] - * @throws IOException - * HBase IO problem - * @throws InterruptedException - * user requested exit - * @throws ParseException - * problem parsing user input + * There are three SplitAlgorithms built into RegionSplitter, HexStringSplit, DecimalStringSplit, + * and UniformSplit. These are different strategies for choosing region boundaries. See their + * source code for details. + * @param args Usage: RegionSplitter <TABLE> <SPLITALGORITHM> <-c <# regions> + * -f <family:family:...> | -r [-o <# outstanding splits>]> [-D + * <conf.param=value>] + * @throws IOException HBase IO problem + * @throws InterruptedException user requested exit + * @throws ParseException problem parsing user input */ @SuppressWarnings("static-access") - public static void main(String[] args) throws IOException, - InterruptedException, ParseException { + public static void main(String[] args) throws IOException, InterruptedException, ParseException { Configuration conf = HBaseConfiguration.create(); // parse user input @@ -309,25 +273,19 @@ public static void main(String[] args) throws IOException, opt.addOption(OptionBuilder.withArgName("property=value").hasArg() .withDescription("Override HBase Configuration Settings").create("D")); opt.addOption(OptionBuilder.withArgName("region count").hasArg() - .withDescription( - "Create a new table with a pre-split number of regions") - .create("c")); + .withDescription("Create a new table with a pre-split number of regions").create("c")); opt.addOption(OptionBuilder.withArgName("family:family:...").hasArg() - .withDescription( - "Column Families to create with new table. Required with -c") + .withDescription("Column Families to create with new table. Required with -c") .create("f")); opt.addOption("h", false, "Print this usage help"); opt.addOption("r", false, "Perform a rolling split of an existing region"); - opt.addOption(OptionBuilder.withArgName("count").hasArg().withDescription( - "Max outstanding splits that have unfinished major compactions") + opt.addOption(OptionBuilder.withArgName("count").hasArg() + .withDescription("Max outstanding splits that have unfinished major compactions") .create("o")); - opt.addOption(null, "firstrow", true, - "First Row in Table for Split Algorithm"); - opt.addOption(null, "lastrow", true, - "Last Row in Table for Split Algorithm"); - opt.addOption(null, "risky", false, - "Skip verification steps to complete quickly. " - + "STRONGLY DISCOURAGED for production systems. "); + opt.addOption(null, "firstrow", true, "First Row in Table for Split Algorithm"); + opt.addOption(null, "lastrow", true, "Last Row in Table for Split Algorithm"); + opt.addOption(null, "risky", false, "Skip verification steps to complete quickly. " + + "STRONGLY DISCOURAGED for production systems. "); CommandLine cmd = new GnuParser().parse(opt, args); if (cmd.hasOption("D")) { @@ -351,13 +309,14 @@ public static void main(String[] args) throws IOException, boolean oneOperOnly = createTable ^ rollingSplit; if (2 != cmd.getArgList().size() || !oneOperOnly || cmd.hasOption("h")) { - new HelpFormatter().printHelp("bin/hbase regionsplitter
      \n"+ - "SPLITALGORITHM is the java class name of a class implementing " + - "SplitAlgorithm, or one of the special strings HexStringSplit or " + - "DecimalStringSplit or UniformSplit, which are built-in split algorithms. " + - "HexStringSplit treats keys as hexadecimal ASCII, and " + - "DecimalStringSplit treats keys as decimal ASCII, and " + - "UniformSplit treats keys as arbitrary bytes.", opt); + new HelpFormatter().printHelp("bin/hbase regionsplitter
      \n" + + "SPLITALGORITHM is the java class name of a class implementing " + + "SplitAlgorithm, or one of the special strings HexStringSplit or " + + "DecimalStringSplit or UniformSplit, which are built-in split algorithms. " + + "HexStringSplit treats keys as hexadecimal ASCII, and " + + "DecimalStringSplit treats keys as decimal ASCII, and " + + "UniformSplit treats keys as arbitrary bytes.", + opt); return; } TableName tableName = TableName.valueOf(cmd.getArgs()[0]); @@ -385,13 +344,12 @@ public static void main(String[] args) throws IOException, } static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo, - String[] columnFamilies, Configuration conf) - throws IOException, InterruptedException { + String[] columnFamilies, Configuration conf) throws IOException, InterruptedException { final int splitCount = conf.getInt("split.count", 0); Preconditions.checkArgument(splitCount > 1, "Split count must be > 1"); Preconditions.checkArgument(columnFamilies.length > 0, - "Must specify at least one column family. "); + "Must specify at least one column family. "); LOG.debug("Creating table " + tableName + " with " + columnFamilies.length + " column families. Presplitting to " + splitCount + " regions"); @@ -435,14 +393,14 @@ static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo, private static int getRegionServerCount(final Connection connection) throws IOException { try (Admin admin = connection.getAdmin()) { Collection servers = admin.getRegionServers(); - return servers == null || servers.isEmpty()? 0: servers.size(); + return servers == null || servers.isEmpty() ? 0 : servers.size(); } } - private static byte [] readFile(final FileSystem fs, final Path path) throws IOException { + private static byte[] readFile(final FileSystem fs, final Path path) throws IOException { FSDataInputStream tmpIn = fs.open(path); try { - byte [] rawData = new byte[tmpIn.available()]; + byte[] rawData = new byte[tmpIn.available()]; tmpIn.readFully(rawData); return rawData; } finally { @@ -451,7 +409,7 @@ private static int getRegionServerCount(final Connection connection) throws IOEx } static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configuration conf) - throws IOException, InterruptedException { + throws IOException, InterruptedException { final int minOS = conf.getInt("split.outstanding", 2); try (Connection connection = ConnectionFactory.createConnection(conf)) { // Max outstanding splits. default == 50% of servers @@ -475,9 +433,8 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur // requests to the same RS can stall the outstanding split queue. // To fix, group the regions into an RS pool and round-robin through it LOG.debug("Bucketing regions by regionserver..."); - TreeMap>> daughterRegions = - Maps.newTreeMap(); - // Get a regionLocator. Need it in below. + TreeMap>> daughterRegions = Maps.newTreeMap(); + // Get a regionLocator. Need it in below. try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { for (Pair dr : tmpRegionSet) { ServerName rsLocation = regionLocator.getRegionLocation(dr.getSecond()).getServerName(); @@ -505,7 +462,7 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur // Get ServerName to region count mapping final TreeMap rsSizes = Maps.newTreeMap(); List hrls = regionLocator.getAllRegionLocations(); - for (HRegionLocation hrl: hrls) { + for (HRegionLocation hrl : hrls) { ServerName sn = hrl.getServerName(); if (rsSizes.containsKey(sn)) { rsSizes.put(sn, rsSizes.get(sn) + 1); @@ -516,8 +473,8 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur // Round-robin through the ServerName list. Choose the lightest-loaded servers // first to keep the master from load-balancing regions as we split. - for (Map.Entry>> daughterRegion : - daughterRegions.entrySet()) { + for (Map.Entry>> daughterRegion : daughterRegions + .entrySet()) { Pair dr = null; ServerName rsLoc = daughterRegion.getKey(); LinkedList> regionList = daughterRegion.getValue(); @@ -534,8 +491,8 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur // if this region moved locations ServerName newRs = regionLoc.getServerName(); if (newRs.compareTo(rsLoc) != 0) { - LOG.debug("Region with " + splitAlgo.rowToStr(split) - + " moved to " + newRs + ". Relocating..."); + LOG.debug("Region with " + splitAlgo.rowToStr(split) + " moved to " + newRs + + ". Relocating..."); // relocate it, don't use it right now if (!daughterRegions.containsKey(newRs)) { LinkedList> entry = Lists.newLinkedList(); @@ -550,15 +507,15 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur byte[] sk = regionLoc.getRegion().getStartKey(); if (sk.length != 0) { if (Bytes.equals(split, sk)) { - LOG.debug("Region already split on " - + splitAlgo.rowToStr(split) + ". Skipping this region..."); + LOG.debug("Region already split on " + splitAlgo.rowToStr(split) + + ". Skipping this region..."); ++splitCount; dr = null; continue; } byte[] start = dr.getFirst(); - Preconditions.checkArgument(Bytes.equals(start, sk), splitAlgo - .rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); + Preconditions.checkArgument(Bytes.equals(start, sk), + splitAlgo.rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); } // passed all checks! found a good region @@ -567,8 +524,7 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur if (regionList.isEmpty()) { daughterRegions.remove(rsLoc); } - if (dr == null) - continue; + if (dr == null) continue; // we have a good region, time to split! byte[] split = dr.getSecond(); @@ -600,15 +556,14 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur // mark each finished region as successfully split. for (Pair region : finished) { - splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) - + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); + splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++; if (splitCount % 10 == 0) { - long tDiff = (EnvironmentEdgeManager.currentTime() - startTime) - / splitCount; - LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount - + ". Avg Time / Split = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); + long tDiff = (EnvironmentEdgeManager.currentTime() - startTime) / splitCount; + LOG.debug( + "STATUS UPDATE: " + splitCount + " / " + origCount + ". Avg Time / Split = " + + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); } } } @@ -616,15 +571,15 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur if (conf.getBoolean("split.verify", true)) { while (!outstanding.isEmpty()) { LOG.debug("Finally Wait for outstanding splits " + outstanding.size()); - LinkedList> finished = splitScan(outstanding, - connection, tableName, splitAlgo); + LinkedList> finished = + splitScan(outstanding, connection, tableName, splitAlgo); if (finished.isEmpty()) { Thread.sleep(30 * 1000); } else { outstanding.removeAll(finished); for (Pair region : finished) { - splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) - + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); + splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++; } LOG.debug("Finally " + finished.size() + " outstanding splits finished"); @@ -634,8 +589,7 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur LOG.debug("All regions have been successfully split!"); } finally { long tDiff = EnvironmentEdgeManager.currentTime() - startTime; - LOG.debug("TOTAL TIME = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); + LOG.debug("TOTAL TIME = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); LOG.debug("Splits = " + splitCount); if (0 < splitCount) { LOG.debug("Avg Time / Split = " @@ -651,16 +605,15 @@ static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configur } /** - * @throws IOException if the specified SplitAlgorithm class couldn't be - * instantiated + * @throws IOException if the specified SplitAlgorithm class couldn't be instantiated */ - public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, - String splitClassName) throws IOException { + public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, String splitClassName) + throws IOException { Class splitClass; // For split algorithms builtin to RegionSplitter, the user can specify // their simple class name instead of a fully qualified class name. - if(splitClassName.equals(HexStringSplit.class.getSimpleName())) { + if (splitClassName.equals(HexStringSplit.class.getSimpleName())) { splitClass = HexStringSplit.class; } else if (splitClassName.equals(DecimalStringSplit.class.getSimpleName())) { splitClass = DecimalStringSplit.class; @@ -672,12 +625,11 @@ public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, } catch (ClassNotFoundException e) { throw new IOException("Couldn't load split class " + splitClassName, e); } - if(splitClass == null) { + if (splitClass == null) { throw new IOException("Failed loading split class " + splitClassName); } - if(!SplitAlgorithm.class.isAssignableFrom(splitClass)) { - throw new IOException( - "Specified split class doesn't implement SplitAlgorithm"); + if (!SplitAlgorithm.class.isAssignableFrom(splitClass)) { + throw new IOException("Specified split class doesn't implement SplitAlgorithm"); } } try { @@ -687,11 +639,8 @@ public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, } } - static LinkedList> splitScan( - LinkedList> regionList, - final Connection connection, - final TableName tableName, - SplitAlgorithm splitAlgo) + static LinkedList> splitScan(LinkedList> regionList, + final Connection connection, final TableName tableName, SplitAlgorithm splitAlgo) throws IOException, InterruptedException { LinkedList> finished = Lists.newLinkedList(); LinkedList> logicalSplitting = Lists.newLinkedList(); @@ -699,7 +648,7 @@ static LinkedList> splitScan( // Get table info Pair tableDirAndSplitFile = - getTableDirAndSplitFile(connection.getConfiguration(), tableName); + getTableDirAndSplitFile(connection.getConfiguration(), tableName); Path tableDir = tableDirAndSplitFile.getFirst(); FileSystem fs = tableDir.getFileSystem(connection.getConfiguration()); // Clear the cache to forcibly refresh region information @@ -736,11 +685,10 @@ static LinkedList> splitScan( check.add(regionLocator.getRegionLocation(split).getRegion()); for (RegionInfo hri : check.toArray(new RegionInfo[check.size()])) { byte[] sk = hri.getStartKey(); - if (sk.length == 0) - sk = splitAlgo.firstRow(); + if (sk.length == 0) sk = splitAlgo.firstRow(); - HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( - connection.getConfiguration(), fs, tableDir, hri, true); + HRegionFileSystem regionFs = HRegionFileSystem + .openRegionFromFileSystem(connection.getConfiguration(), fs, tableDir, hri, true); // Check every Column Family for that region -- check does not have references. boolean refFound = false; @@ -767,9 +715,8 @@ static LinkedList> splitScan( } } - LOG.debug("Split Scan: " + finished.size() + " finished / " - + logicalSplitting.size() + " split wait / " - + physicalSplitting.size() + " reference wait"); + LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size() + + " split wait / " + physicalSplitting.size() + " reference wait"); return finished; } @@ -782,7 +729,7 @@ static LinkedList> splitScan( * @throws IOException if a remote or network exception occurs */ private static Pair getTableDirAndSplitFile(final Configuration conf, - final TableName tableName) throws IOException { + final TableName tableName) throws IOException { Path hbDir = CommonFSUtils.getRootDir(conf); Path tableDir = CommonFSUtils.getTableDir(hbDir, tableName); Path splitFile = new Path(tableDir, "_balancedSplit"); @@ -790,10 +737,9 @@ private static Pair getTableDirAndSplitFile(final Configuration conf } static LinkedList> getSplits(final Connection connection, - TableName tableName, SplitAlgorithm splitAlgo) - throws IOException { + TableName tableName, SplitAlgorithm splitAlgo) throws IOException { Pair tableDirAndSplitFile = - getTableDirAndSplitFile(connection.getConfiguration(), tableName); + getTableDirAndSplitFile(connection.getConfiguration(), tableName); Path tableDir = tableDirAndSplitFile.getFirst(); Path splitFile = tableDirAndSplitFile.getSecond(); @@ -814,13 +760,11 @@ static LinkedList> getSplits(final Connection connection, tmp = regionLocator.getStartEndKeys(); } Preconditions.checkArgument(tmp.getFirst().length == tmp.getSecond().length, - "Start and End rows should be equivalent"); + "Start and End rows should be equivalent"); for (int i = 0; i < tmp.getFirst().length; ++i) { byte[] start = tmp.getFirst()[i], end = tmp.getSecond()[i]; - if (start.length == 0) - start = splitAlgo.firstRow(); - if (end.length == 0) - end = splitAlgo.lastRow(); + if (start.length == 0) start = splitAlgo.firstRow(); + if (end.length == 0) end = splitAlgo.lastRow(); rows.add(Pair.newPair(start, end)); } LOG.debug("Table " + tableName + " has " + rows.size() + " regions that will be split."); @@ -835,10 +779,9 @@ static LinkedList> getSplits(final Connection connection, String startStr = splitAlgo.rowToStr(r.getFirst()); String splitStr = splitAlgo.rowToStr(splitPoint); daughterRegions.add(Pair.newPair(startStr, splitStr)); - LOG.debug("Will Split [" + startStr + " , " - + splitAlgo.rowToStr(r.getSecond()) + ") at " + splitStr); - tmpOut.writeChars("+ " + startStr + splitAlgo.separator() + splitStr - + "\n"); + LOG.debug("Will Split [" + startStr + " , " + splitAlgo.rowToStr(r.getSecond()) + ") at " + + splitStr); + tmpOut.writeChars("+ " + startStr + splitAlgo.separator() + splitStr + "\n"); } tmpOut.close(); fs.rename(tmpFile, splitFile); @@ -866,10 +809,8 @@ static LinkedList> getSplits(final Connection connection, daughterRegions.add(r); } else { LOG.debug("Removing: " + r); - Preconditions.checkArgument(cmd[0].equals("-"), - "Unknown option: " + cmd[0]); - Preconditions.checkState(daughterRegions.contains(r), - "Missing row: " + r); + Preconditions.checkArgument(cmd[0].equals("-"), "Unknown option: " + cmd[0]); + Preconditions.checkState(daughterRegions.contains(r), "Missing row: " + r); daughterRegions.remove(r); } } @@ -877,22 +818,18 @@ static LinkedList> getSplits(final Connection connection, } LinkedList> ret = Lists.newLinkedList(); for (Pair r : daughterRegions) { - ret.add(Pair.newPair(splitAlgo.strToRow(r.getFirst()), splitAlgo - .strToRow(r.getSecond()))); + ret.add(Pair.newPair(splitAlgo.strToRow(r.getFirst()), splitAlgo.strToRow(r.getSecond()))); } return ret; } /** - * HexStringSplit is a well-known {@link SplitAlgorithm} for choosing region - * boundaries. The format of a HexStringSplit region boundary is the ASCII - * representation of an MD5 checksum, or any other uniformly distributed - * hexadecimal value. Row are hex-encoded long values in the range - * "00000000" => "FFFFFFFF" and are left-padded with zeros to keep the - * same order lexicographically as if they were binary. - * - * Since this split algorithm uses hex strings as keys, it is easy to read & - * write in the shell but takes up more space and may be non-intuitive. + * HexStringSplit is a well-known {@link SplitAlgorithm} for choosing region boundaries. The + * format of a HexStringSplit region boundary is the ASCII representation of an MD5 checksum, or + * any other uniformly distributed hexadecimal value. Row are hex-encoded long values in the range + * "00000000" => "FFFFFFFF" and are left-padded with zeros to keep the same order + * lexicographically as if they were binary. Since this split algorithm uses hex strings as keys, + * it is easy to read & write in the shell but takes up more space and may be non-intuitive. */ public static class HexStringSplit extends NumberStringSplit { final static String DEFAULT_MIN_HEX = "00000000"; @@ -906,11 +843,10 @@ public HexStringSplit() { } /** - * The format of a DecimalStringSplit region boundary is the ASCII representation of - * reversed sequential number, or any other uniformly distributed decimal value. - * Row are decimal-encoded long values in the range - * "00000000" => "99999999" and are left-padded with zeros to keep the - * same order lexicographically as if they were binary. + * The format of a DecimalStringSplit region boundary is the ASCII representation of reversed + * sequential number, or any other uniformly distributed decimal value. Row are decimal-encoded + * long values in the range "00000000" => "99999999" and are left-padded with zeros to + * keep the same order lexicographically as if they were binary. */ public static class DecimalStringSplit extends NumberStringSplit { final static String DEFAULT_MIN_DEC = "00000000"; @@ -952,20 +888,18 @@ public byte[] split(byte[] start, byte[] end) { @Override public byte[][] split(int n) { Preconditions.checkArgument(lastRowInt.compareTo(firstRowInt) > 0, - "last row (%s) is configured less than first row (%s)", lastRow, - firstRow); + "last row (%s) is configured less than first row (%s)", lastRow, firstRow); // +1 to range because the last row is inclusive BigInteger range = lastRowInt.subtract(firstRowInt).add(BigInteger.ONE); Preconditions.checkState(range.compareTo(BigInteger.valueOf(n)) >= 0, - "split granularity (%s) is greater than the range (%s)", n, range); + "split granularity (%s) is greater than the range (%s)", n, range); BigInteger[] splits = new BigInteger[n - 1]; BigInteger sizeOfEachSplit = range.divide(BigInteger.valueOf(n)); for (int i = 1; i < n; i++) { // NOTE: this means the last region gets all the slop. // This is not a big deal if we're assuming n << MAXHEX - splits[i - 1] = firstRowInt.add(sizeOfEachSplit.multiply(BigInteger - .valueOf(i))); + splits[i - 1] = firstRowInt.add(sizeOfEachSplit.multiply(BigInteger.valueOf(i))); } return convertToBytes(splits); } @@ -976,20 +910,18 @@ public byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive BigInteger e = convertToBigInteger(end); Preconditions.checkArgument(e.compareTo(s) > 0, - "last row (%s) is configured less than first row (%s)", rowToStr(end), - end); + "last row (%s) is configured less than first row (%s)", rowToStr(end), end); // +1 to range because the last row is inclusive BigInteger range = e.subtract(s).add(BigInteger.ONE); Preconditions.checkState(range.compareTo(BigInteger.valueOf(numSplits)) >= 0, - "split granularity (%s) is greater than the range (%s)", numSplits, range); + "split granularity (%s) is greater than the range (%s)", numSplits, range); BigInteger[] splits = new BigInteger[numSplits - 1]; BigInteger sizeOfEachSplit = range.divide(BigInteger.valueOf(numSplits)); for (int i = 1; i < numSplits; i++) { // NOTE: this means the last region gets all the slop. // This is not a big deal if we're assuming n << MAXHEX - splits[i - 1] = s.add(sizeOfEachSplit.multiply(BigInteger - .valueOf(i))); + splits[i - 1] = s.add(sizeOfEachSplit.multiply(BigInteger.valueOf(i))); } if (inclusive) { @@ -1054,7 +986,6 @@ public void setLastRow(byte[] userInput) { /** * Divide 2 numbers in half (for split algorithm) - * * @param a number #1 * @param b number #2 * @return the midpoint of the 2 numbers @@ -1065,7 +996,6 @@ public BigInteger split2(BigInteger a, BigInteger b) { /** * Returns an array of bytes corresponding to an array of BigIntegers - * * @param bigIntegers numbers to convert * @return bytes corresponding to the bigIntegers */ @@ -1079,7 +1009,6 @@ public byte[][] convertToBytes(BigInteger[] bigIntegers) { /** * Returns the bytes corresponding to the BigInteger - * * @param bigInteger number to convert * @param pad padding length * @return byte corresponding to input BigInteger @@ -1092,7 +1021,6 @@ public byte[] convertToByte(BigInteger bigInteger, int pad) { /** * Returns the bytes corresponding to the BigInteger - * * @param bigInteger number to convert * @return corresponding bytes */ @@ -1102,35 +1030,32 @@ public byte[] convertToByte(BigInteger bigInteger) { /** * Returns the BigInteger represented by the byte array - * * @param row byte array representing row * @return the corresponding BigInteger */ public BigInteger convertToBigInteger(byte[] row) { - return (row.length > 0) ? new BigInteger(Bytes.toString(row), radix) - : BigInteger.ZERO; + return (row.length > 0) ? new BigInteger(Bytes.toString(row), radix) : BigInteger.ZERO; } @Override public String toString() { - return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) - + "," + rowToStr(lastRow()) + "]"; + return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) + "," + + rowToStr(lastRow()) + "]"; } } /** - * A SplitAlgorithm that divides the space of possible keys evenly. Useful - * when the keys are approximately uniform random bytes (e.g. hashes). Rows - * are raw byte values in the range 00 => FF and are right-padded with - * zeros to keep the same memcmp() order. This is the natural algorithm to use - * for a byte[] environment and saves space, but is not necessarily the + * A SplitAlgorithm that divides the space of possible keys evenly. Useful when the keys are + * approximately uniform random bytes (e.g. hashes). Rows are raw byte values in the range 00 + * => FF and are right-padded with zeros to keep the same memcmp() order. This is the + * natural algorithm to use for a byte[] environment and saves space, but is not necessarily the * easiest for readability. */ public static class UniformSplit implements SplitAlgorithm { static final byte xFF = (byte) 0xFF; byte[] firstRowBytes = ArrayUtils.EMPTY_BYTE_ARRAY; - byte[] lastRowBytes = - new byte[] {xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF}; + byte[] lastRowBytes = new byte[] { xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF }; + @Override public byte[] split(byte[] start, byte[] end) { return Bytes.split(start, end, 1)[1]; @@ -1138,20 +1063,17 @@ public byte[] split(byte[] start, byte[] end) { @Override public byte[][] split(int numRegions) { - Preconditions.checkArgument( - Bytes.compareTo(lastRowBytes, firstRowBytes) > 0, - "last row (%s) is configured less than first row (%s)", - Bytes.toStringBinary(lastRowBytes), - Bytes.toStringBinary(firstRowBytes)); - - byte[][] splits = Bytes.split(firstRowBytes, lastRowBytes, true, - numRegions - 1); + Preconditions.checkArgument(Bytes.compareTo(lastRowBytes, firstRowBytes) > 0, + "last row (%s) is configured less than first row (%s)", Bytes.toStringBinary(lastRowBytes), + Bytes.toStringBinary(firstRowBytes)); + + byte[][] splits = Bytes.split(firstRowBytes, lastRowBytes, true, numRegions - 1); Preconditions.checkState(splits != null, - "Could not split region with given user input: " + this); + "Could not split region with given user input: " + this); // remove endpoints, which are included in the splits list - return splits == null? null: Arrays.copyOfRange(splits, 1, splits.length - 1); + return splits == null ? null : Arrays.copyOfRange(splits, 1, splits.length - 1); } @Override @@ -1162,16 +1084,13 @@ public byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive if (Arrays.equals(end, HConstants.EMPTY_BYTE_ARRAY)) { end = lastRowBytes; } - Preconditions.checkArgument( - Bytes.compareTo(end, start) > 0, - "last row (%s) is configured less than first row (%s)", - Bytes.toStringBinary(end), - Bytes.toStringBinary(start)); - - byte[][] splits = Bytes.split(start, end, true, - numSplits - 1); + Preconditions.checkArgument(Bytes.compareTo(end, start) > 0, + "last row (%s) is configured less than first row (%s)", Bytes.toStringBinary(end), + Bytes.toStringBinary(start)); + + byte[][] splits = Bytes.split(start, end, true, numSplits - 1); Preconditions.checkState(splits != null, - "Could not calculate input splits with given user input: " + this); + "Could not calculate input splits with given user input: " + this); if (inclusive) { return splits; } else { @@ -1200,7 +1119,6 @@ public void setLastRow(String userInput) { lastRowBytes = Bytes.toBytesBinary(userInput); } - @Override public void setFirstRow(byte[] userInput) { firstRowBytes = userInput; @@ -1228,8 +1146,8 @@ public String separator() { @Override public String toString() { - return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) - + "," + rowToStr(lastRow()) + "]"; + return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) + "," + + rowToStr(lastRow()) + "]"; } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java index fb2a95417427..f7c916aa2c16 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import org.apache.yetus.audience.InterfaceAudience; /** - * This class maintains mean and variation for any sequence of input provided to it. - * It is initialized with number of rolling periods which basically means the number of past - * inputs whose data will be considered to maintain mean and variation. - * It will use O(N) memory to maintain these statistics, where N is number of look up periods it - * was initialized with. - * If zero is passed during initialization then it will maintain mean and variance from the - * start. It will use O(1) memory only. But note that since it will maintain mean / variance - * from the start the statistics may behave like constants and may ignore short trends. - * All operations are O(1) except the initialization which is O(N). + * This class maintains mean and variation for any sequence of input provided to it. It is + * initialized with number of rolling periods which basically means the number of past inputs whose + * data will be considered to maintain mean and variation. It will use O(N) memory to maintain these + * statistics, where N is number of look up periods it was initialized with. If zero is passed + * during initialization then it will maintain mean and variance from the start. It will use O(1) + * memory only. But note that since it will maintain mean / variance from the start the statistics + * may behave like constants and may ignore short trends. All operations are O(1) except the + * initialization which is O(N). */ @InterfaceAudience.Private public class RollingStatCalculator { @@ -41,7 +38,7 @@ public class RollingStatCalculator { private int rollingPeriod; private int currentIndexPosition; // to be used only if we have non-zero rolling period - private long [] dataValues; + private long[] dataValues; /** * Creates a RollingStatCalculator with given number of rolling periods. @@ -63,14 +60,13 @@ public RollingStatCalculator(int rollingPeriod) { public void insertDataValue(long data) { // if current number of data points already equals rolling period and rolling period is // non-zero then remove one data and update the statistics - if(numberOfDataValues >= rollingPeriod && rollingPeriod > 0) { + if (numberOfDataValues >= rollingPeriod && rollingPeriod > 0) { this.removeData(dataValues[currentIndexPosition]); } numberOfDataValues++; - currentSum = currentSum + (double)data; - currentSqrSum = currentSqrSum + ((double)data * data); - if (rollingPeriod >0) - { + currentSum = currentSum + (double) data; + currentSqrSum = currentSqrSum + ((double) data * data); + if (rollingPeriod > 0) { dataValues[currentIndexPosition] = data; currentIndexPosition = (currentIndexPosition + 1) % rollingPeriod; } @@ -81,8 +77,8 @@ public void insertDataValue(long data) { * @param data */ private void removeData(long data) { - currentSum = currentSum - (double)data; - currentSqrSum = currentSqrSum - ((double)data * data); + currentSum = currentSum - (double) data; + currentSqrSum = currentSqrSum - ((double) data * data); numberOfDataValues--; } @@ -90,15 +86,15 @@ private void removeData(long data) { * @return mean of the data values that are in the current list of data values */ public double getMean() { - return this.currentSum / (double)numberOfDataValues; + return this.currentSum / (double) numberOfDataValues; } /** * @return deviation of the data values that are in the current list of data values */ public double getDeviation() { - double variance = (currentSqrSum - (currentSum*currentSum)/(double)(numberOfDataValues))/ - numberOfDataValues; + double variance = (currentSqrSum - (currentSum * currentSum) / (double) (numberOfDataValues)) + / numberOfDataValues; return Math.sqrt(variance); } @@ -106,9 +102,9 @@ public double getDeviation() { * @param size * @return an array of given size initialized with zeros */ - private long [] fillWithZeros(int size) { - long [] zeros = new long [size]; - for (int i=0; iIf enabled, you can also exclude environment variables containing - * certain substrings by setting {@code "hbase.envvars.logging.skipwords"} - * to comma separated list of such substrings. + * Logs information about the currently running JVM process including the environment variables. + * Logging of env vars can be disabled by setting {@code "hbase.envvars.logging.disabled"} to + * {@code "true"}. + *

      + * If enabled, you can also exclude environment variables containing certain substrings by setting + * {@code "hbase.envvars.logging.skipwords"} to comma separated list of such substrings. */ public static void logProcessInfo(Configuration conf) { logHBaseConfigs(conf); @@ -124,16 +119,14 @@ public static void logProcessInfo(Configuration conf) { } } - nextEnv: - for (Entry entry : System.getenv().entrySet()) { + nextEnv: for (Entry entry : System.getenv().entrySet()) { String key = entry.getKey().toLowerCase(Locale.ROOT); String value = entry.getValue().toLowerCase(Locale.ROOT); // exclude variables which may contain skip words - for(String skipWord : skipWords) { - if (key.contains(skipWord) || value.contains(skipWord)) - continue nextEnv; + for (String skipWord : skipWords) { + if (key.contains(skipWord) || value.contains(skipWord)) continue nextEnv; } - LOG.info("env:"+entry); + LOG.info("env:" + entry); } } @@ -142,10 +135,9 @@ public static void logProcessInfo(Configuration conf) { } /** - * Parse and run the given command line. This will exit the JVM with - * the exit code returned from run(). - * If return code is 0, wait for atmost 30 seconds for all non-daemon threads to quit, - * otherwise exit the jvm + * Parse and run the given command line. This will exit the JVM with the exit code returned from + * run(). If return code is 0, wait for atmost 30 seconds for all non-daemon threads + * to quit, otherwise exit the jvm */ public void doMain(String args[]) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java index 1844be641a07..213339f8f049 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.IOException; @@ -39,15 +38,15 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { /** - * Whether asynchronous WAL replication to the secondary region replicas is enabled or not. - * If this is enabled, a replication peer named "region_replica_replication" will be created - * which will tail the logs and replicate the mutatations to region replicas for tables that - * have region replication > 1. If this is enabled once, disabling this replication also - * requires disabling the replication peer using shell or {@link Admin} java class. - * Replication to secondary region replicas works over standard inter-cluster replication.· + * Whether asynchronous WAL replication to the secondary region replicas is enabled or not. If + * this is enabled, a replication peer named "region_replica_replication" will be created which + * will tail the logs and replicate the mutatations to region replicas for tables that have region + * replication > 1. If this is enabled once, disabling this replication also requires disabling + * the replication peer using shell or {@link Admin} java class. Replication to secondary region + * replicas works over standard inter-cluster replication.· */ - public static final String REGION_REPLICA_REPLICATION_CONF_KEY - = "hbase.region.replica.replication.enabled"; + public static final String REGION_REPLICA_REPLICATION_CONF_KEY = + "hbase.region.replica.replication.enabled"; private static final boolean DEFAULT_REGION_REPLICA_REPLICATION = false; /** @@ -59,19 +58,18 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { /** * Same as for {@link #REGION_REPLICA_REPLICATION_CONF_KEY} but for catalog replication. */ - public static final String REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY - = "hbase.region.replica.replication.catalog.enabled"; + public static final String REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY = + "hbase.region.replica.replication.catalog.enabled"; private static final boolean DEFAULT_REGION_REPLICA_REPLICATION_CATALOG = false; - /** * Enables or disables refreshing store files of secondary region replicas when the memory is * above the global memstore lower limit. Refreshing the store files means that we will do a file * list of the primary regions store files, and pick up new files. Also depending on the store * files, we can drop some memstore contents which will free up memory. */ - public static final String REGION_REPLICA_STORE_FILE_REFRESH - = "hbase.region.replica.storefile.refresh"; + public static final String REGION_REPLICA_STORE_FILE_REFRESH = + "hbase.region.replica.storefile.refresh"; private static final boolean DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH = true; /** @@ -79,8 +77,8 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { * region. Default value assumes that for doing the file refresh, the biggest secondary should be * 4 times bigger than the biggest primary. */ - public static final String REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER - = "hbase.region.replica.storefile.refresh.memstore.multiplier"; + public static final String REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER = + "hbase.region.replica.storefile.refresh.memstore.multiplier"; private static final double DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER = 4; /** @@ -100,15 +98,13 @@ public static RegionInfo getRegionInfoForFs(RegionInfo regionInfo) { * @return whether the replica is read only */ public static boolean isReadOnly(HRegion region) { - return region.getTableDescriptor().isReadOnly() - || !isDefaultReplica(region.getRegionInfo()); + return region.getTableDescriptor().isReadOnly() || !isDefaultReplica(region.getRegionInfo()); } /** - * Returns whether to replay the recovered edits to flush the results. - * Currently secondary region replicas do not replay the edits, since it would - * cause flushes which might affect the primary region. Primary regions even opened - * in read only mode should replay the edits. + * Returns whether to replay the recovered edits to flush the results. Currently secondary region + * replicas do not replay the edits, since it would cause flushes which might affect the primary + * region. Primary regions even opened in read only mode should replay the edits. * @param region the HRegion object * @return whether recovered edits should be replayed. */ @@ -117,10 +113,10 @@ public static boolean shouldReplayRecoveredEdits(HRegion region) { } /** - * Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the - * files of the primary region, so an HFileLink is used to construct the StoreFileInfo. This - * way ensures that the secondary will be able to continue reading the store files even if - * they are moved to archive after compaction + * Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the files of the + * primary region, so an HFileLink is used to construct the StoreFileInfo. This way ensures that + * the secondary will be able to continue reading the store files even if they are moved to + * archive after compaction */ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path) @@ -133,9 +129,8 @@ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, // else create a store file link. The link file does not exists on filesystem though. if (HFileLink.isHFileLink(path) || StoreFileInfo.isHFile(path)) { - HFileLink link = HFileLink - .build(conf, regionInfoForFs.getTable(), regionInfoForFs.getEncodedName(), familyName, - path.getName()); + HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(), + regionInfoForFs.getEncodedName(), familyName, path.getName()); return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link); } else if (StoreFileInfo.isReference(path)) { Reference reference = Reference.read(fs, path); @@ -146,9 +141,8 @@ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference, link); } else { // Reference - HFileLink link = HFileLink - .build(conf, regionInfoForFs.getTable(), regionInfoForFs.getEncodedName(), familyName, - path.getName()); + HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(), + regionInfoForFs.getEncodedName(), familyName, path.getName()); return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference); } } else { @@ -158,11 +152,11 @@ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, /** * @return True if Region Read Replica is enabled for tn (whether hbase:meta or - * user-space tables). + * user-space tables). */ public static boolean isRegionReplicaReplicationEnabled(Configuration conf, TableName tn) { - return isMetaRegionReplicaReplicationEnabled(conf, tn) || - isRegionReplicaReplicationEnabled(conf); + return isMetaRegionReplicaReplicationEnabled(conf, tn) + || isRegionReplicaReplicationEnabled(conf); } /** @@ -176,9 +170,8 @@ private static boolean isRegionReplicaReplicationEnabled(Configuration conf) { * @return True if hbase:meta Region Read Replica is enabled. */ public static boolean isMetaRegionReplicaReplicationEnabled(Configuration conf, TableName tn) { - return TableName.isMetaTableName(tn) && - conf.getBoolean(REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY, - DEFAULT_REGION_REPLICA_REPLICATION_CATALOG); + return TableName.isMetaTableName(tn) && conf.getBoolean( + REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY, DEFAULT_REGION_REPLICA_REPLICATION_CATALOG); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java index 17da6812fe68..2be277d8d96c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +21,7 @@ /** * This class provides ShutdownHookManager shims for HBase to interact with the Hadoop 1.0.x and the - * Hadoop 2.0+ series. - * - * NOTE: No testing done against 0.22.x, or 0.21.x. + * Hadoop 2.0+ series. NOTE: No testing done against 0.22.x, or 0.21.x. */ @InterfaceAudience.Private abstract public class ShutdownHookManager { @@ -75,10 +73,8 @@ private static class ShutdownHookManagerV2 extends ShutdownHookManager { public void addShutdownHook(Thread shutdownHookThread, int priority) { try { Methods.call(shutdownHookManagerClass, - Methods.call(shutdownHookManagerClass, null, "get", null, null), - "addShutdownHook", - new Class[] { Runnable.class, int.class }, - new Object[] { shutdownHookThread, priority }); + Methods.call(shutdownHookManagerClass, null, "get", null, null), "addShutdownHook", + new Class[] { Runnable.class, int.class }, new Object[] { shutdownHookThread, priority }); } catch (Exception ex) { throw new RuntimeException("we could not use ShutdownHookManager.addShutdownHook", ex); } @@ -87,12 +83,9 @@ public void addShutdownHook(Thread shutdownHookThread, int priority) { @Override public boolean removeShutdownHook(Runnable shutdownHook) { try { - return (Boolean) - Methods.call(shutdownHookManagerClass, - Methods.call(shutdownHookManagerClass, null, "get", null, null), - "removeShutdownHook", - new Class[] { Runnable.class }, - new Object[] { shutdownHook }); + return (Boolean) Methods.call(shutdownHookManagerClass, + Methods.call(shutdownHookManagerClass, null, "get", null, null), "removeShutdownHook", + new Class[] { Runnable.class }, new Object[] { shutdownHook }); } catch (Exception ex) { throw new RuntimeException("we could not use ShutdownHookManager", ex); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java index f896e550a169..005888982a6c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.util.ArrayList; @@ -29,35 +28,31 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Simple sorted list implementation that uses {@link java.util.ArrayList} as - * the underlying collection so we can support RandomAccess. All mutations - * create a new copy of the ArrayList instance, so can be - * expensive. This class is only intended for use on small, very rarely - * written collections that expect highly concurrent reads. + * Simple sorted list implementation that uses {@link java.util.ArrayList} as the underlying + * collection so we can support RandomAccess. All mutations create a new copy of the + * ArrayList instance, so can be expensive. This class is only intended for use on + * small, very rarely written collections that expect highly concurrent reads. *

      - * Read operations are performed on a reference to the internal list at the - * time of invocation, so will not see any mutations to the collection during - * their operation. Iterating over list elements manually using the - * RandomAccess pattern involves multiple operations. For this to be safe get - * a reference to the internal list first using get(). + * Read operations are performed on a reference to the internal list at the time of invocation, so + * will not see any mutations to the collection during their operation. Iterating over list elements + * manually using the RandomAccess pattern involves multiple operations. For this to be safe get a + * reference to the internal list first using get(). *

      - * If constructed with a {@link java.util.Comparator}, the list will be sorted - * using the comparator. Adding or changing an element using an index will - * trigger a resort. + * If constructed with a {@link java.util.Comparator}, the list will be sorted using the comparator. + * Adding or changing an element using an index will trigger a resort. *

      * Iterators are read-only. They cannot be used to remove elements. */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UG_SYNC_SET_UNSYNC_GET", - justification="TODO: synchronization in here needs review!!!") +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "UG_SYNC_SET_UNSYNC_GET", + justification = "TODO: synchronization in here needs review!!!") @InterfaceAudience.Private public class SortedList implements List, RandomAccess { private volatile List list; private final Comparator comparator; /** - * Constructs an empty list with the default initial capacity that will be - * sorted using the given comparator. - * + * Constructs an empty list with the default initial capacity that will be sorted using the given + * comparator. * @param comparator the comparator */ public SortedList(Comparator comparator) { @@ -66,10 +61,8 @@ public SortedList(Comparator comparator) { } /** - * Constructs a list containing the elements of the given collection, in the - * order returned by the collection's iterator, that will be sorted with the - * given comparator. - * + * Constructs a list containing the elements of the given collection, in the order returned by the + * collection's iterator, that will be sorted with the given comparator. * @param c the collection * @param comparator the comparator */ @@ -79,10 +72,9 @@ public SortedList(Collection c, Comparator comparator) { } /** - * Returns a reference to the unmodifiable list currently backing the SortedList. - * Changes to the SortedList will not be reflected in this list. Use this - * method to get a reference for iterating over using the RandomAccess - * pattern. + * Returns a reference to the unmodifiable list currently backing the SortedList. Changes to the + * SortedList will not be reflected in this list. Use this method to get a reference for iterating + * over using the RandomAccess pattern. */ public List get() { // FindBugs: UG_SYNC_SET_UNSYNC_GET complaint. Fix!! return list; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java index efd3da3a88d0..7b018afcc1d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/StealJobQueue.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,8 +17,6 @@ */ package org.apache.hadoop.hbase.util; -import org.apache.yetus.audience.InterfaceAudience; - import java.util.Comparator; import java.util.concurrent.BlockingQueue; import java.util.concurrent.PriorityBlockingQueue; @@ -27,17 +24,16 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.apache.yetus.audience.InterfaceAudience; /** - * This queue allows a ThreadPoolExecutor to steal jobs from another ThreadPoolExecutor. - * This queue also acts as the factory for creating the PriorityBlockingQueue to be used in the - * steal-from ThreadPoolExecutor. The behavior of this queue is the same as a normal - * PriorityBlockingQueue except the take/poll(long,TimeUnit) methods would also check whether there - * are jobs in the steal-from queue if this q ueue is empty. - * - * Note the workers in ThreadPoolExecutor must be pre-started so that they can steal job from the - * other queue, otherwise the worker will only be started after there are jobs submitted to main - * queue. + * This queue allows a ThreadPoolExecutor to steal jobs from another ThreadPoolExecutor. This queue + * also acts as the factory for creating the PriorityBlockingQueue to be used in the steal-from + * ThreadPoolExecutor. The behavior of this queue is the same as a normal PriorityBlockingQueue + * except the take/poll(long,TimeUnit) methods would also check whether there are jobs in the + * steal-from queue if this q ueue is empty. Note the workers in ThreadPoolExecutor must be + * pre-started so that they can steal job from the other queue, otherwise the worker will only be + * started after there are jobs submitted to main queue. */ @InterfaceAudience.Private public class StealJobQueue extends PriorityBlockingQueue { @@ -92,7 +88,6 @@ public boolean offer(T t) { } } - @Override public T take() throws InterruptedException { lock.lockInterruptibly(); @@ -124,8 +119,7 @@ public T poll(long timeout, TimeUnit unit) throws InterruptedException { retVal = stealFromQueue.poll(); } if (retVal == null) { - if (nanos <= 0) - return null; + if (nanos <= 0) return null; nanos = notEmpty.awaitNanos(nanos); } else { return retVal; @@ -136,4 +130,3 @@ public T poll(long timeout, TimeUnit unit) throws InterruptedException { } } } - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java index 5a28187b8245..a7c573cae7fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -50,11 +49,11 @@ public final class TableDescriptorChecker { public static final String TABLE_SANITY_CHECKS = "hbase.table.sanity.checks"; public static final boolean DEFAULT_TABLE_SANITY_CHECKS = true; - //should we check the compression codec type at master side, default true, HBASE-6370 + // should we check the compression codec type at master side, default true, HBASE-6370 public static final String MASTER_CHECK_COMPRESSION = "hbase.master.check.compression"; public static final boolean DEFAULT_MASTER_CHECK_COMPRESSION = true; - //should we check encryption settings at master side, default true + // should we check encryption settings at master side, default true public static final String MASTER_CHECK_ENCRYPTION = "hbase.master.check.encryption"; public static final boolean DEFAULT_MASTER_CHECK_ENCRYPTION = true; @@ -62,14 +61,12 @@ private TableDescriptorChecker() { } /** - * Checks whether the table conforms to some sane limits, and configured - * values (compression, etc) work. Throws an exception if something is wrong. + * Checks whether the table conforms to some sane limits, and configured values (compression, etc) + * work. Throws an exception if something is wrong. */ public static void sanityCheck(final Configuration c, final TableDescriptor td) throws IOException { - CompoundConfiguration conf = new CompoundConfiguration() - .add(c) - .addBytesMap(td.getValues()); + CompoundConfiguration conf = new CompoundConfiguration().add(c).addBytesMap(td.getValues()); // Setting this to true logs the warning instead of throwing exception boolean logWarn = false; @@ -85,14 +82,13 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check - long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null ? - conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) : - Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE)); + long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null + ? conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) + : Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE)); if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) { - String message = - "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + - maxFileSize + ") is too small, which might cause over splitting into unmanageable " + - "number of regions."; + String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + + maxFileSize + ") is too small, which might cause over splitting into unmanageable " + + "number of regions."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -100,13 +96,13 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in // hbase-site.xml, use flushSizeLowerLimit instead to skip this check - long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null ? - conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) : - Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE)); + long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null + ? conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) + : Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE)); if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) { - String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + - "\"hbase.hregion.memstore.flush.size\" (" + flushSize + - ") is too small, which might cause" + " very frequent flushing."; + String message = + "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (" + + flushSize + ") is too small, which might cause" + " very frequent flushing."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -167,8 +163,8 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check blockSize if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) { - String message = "Block size for column family " + hcd.getNameAsString() + - " must be between 1K and 16MB."; + String message = "Block size for column family " + hcd.getNameAsString() + + " must be between 1K and 16MB."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -181,11 +177,11 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // max versions already being checked // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor - // does not throw IllegalArgumentException + // does not throw IllegalArgumentException // check minVersions <= maxVerions if (hcd.getMinVersions() > hcd.getMaxVersions()) { - String message = "Min versions for column family " + hcd.getNameAsString() + - " must be less than the Max versions."; + String message = "Min versions for column family " + hcd.getNameAsString() + + " must be less than the Max versions."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -197,8 +193,8 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check data replication factor, it can be 0(default value) when user has not explicitly // set the value, in this case we use default replication factor set in the file system. if (hcd.getDFSReplication() < 0) { - String message = "HFile Replication for column family " + hcd.getNameAsString() + - " must be greater than zero."; + String message = "HFile Replication for column family " + hcd.getNameAsString() + + " must be greater than zero."; warnOrThrowExceptionForFailure(logWarn, message, null); } @@ -215,9 +211,8 @@ private static void checkReplicationScope(final ColumnFamilyDescriptor cfd) thro // check replication scope WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(cfd.getScope()); if (scop == null) { - String message = - "Replication scope for column family " + cfd.getNameAsString() + " is " + cfd.getScope() + - " which is invalid."; + String message = "Replication scope for column family " + cfd.getNameAsString() + " is " + + cfd.getScope() + " which is invalid."; LOG.error(message); throw new DoNotRetryIOException(message); @@ -231,7 +226,7 @@ private static void checkCompactionPolicy(Configuration conf, TableDescriptor td String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY); if (className == null) { className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, - ExploringCompactionPolicy.class.getName()); + ExploringCompactionPolicy.class.getName()); } int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT; @@ -272,9 +267,9 @@ private static void checkCompactionPolicy(Configuration conf, TableDescriptor td blockingFileCount = Integer.parseInt(sv); } if (blockingFileCount < 1000) { - message = - "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount + - " is below recommended minimum of 1000 for column family " + hcd.getNameAsString(); + message = "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + + blockingFileCount + " is below recommended minimum of 1000 for column family " + + hcd.getNameAsString(); throw new IOException(message); } } @@ -315,8 +310,8 @@ public static void checkClassLoading(final Configuration conf, final TableDescri private static void warnOrThrowExceptionForFailure(boolean logWarn, String message, Exception cause) throws IOException { if (!logWarn) { - throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS + - " to false at conf or table descriptor if you want to bypass sanity checks", cause); + throw new DoNotRetryIOException(message + " Set " + TABLE_SANITY_CHECKS + + " to false at conf or table descriptor if you want to bypass sanity checks", cause); } LOG.warn(message); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java index 3070fb37277d..4c5b4058c9c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,8 @@ public final class YammerHistogramUtils { // not for public consumption - private YammerHistogramUtils() {} + private YammerHistogramUtils() { + } /** * Used formatting doubles so only two places after decimal point. @@ -38,13 +38,12 @@ private YammerHistogramUtils() {} private static DecimalFormat DOUBLE_FORMAT = new DecimalFormat("#0.00"); /** - * Create a new {@link com.codahale.metrics.Histogram} instance. These constructors are - * not public in 2.2.0, so we use reflection to find them. + * Create a new {@link com.codahale.metrics.Histogram} instance. These constructors are not public + * in 2.2.0, so we use reflection to find them. */ public static Histogram newHistogram(Reservoir sample) { try { - Constructor ctor = - Histogram.class.getDeclaredConstructor(Reservoir.class); + Constructor ctor = Histogram.class.getDeclaredConstructor(Reservoir.class); ctor.setAccessible(true); return (Histogram) ctor.newInstance(sample); } catch (Exception e) { @@ -55,44 +54,41 @@ public static Histogram newHistogram(Reservoir sample) { /** @return an abbreviated summary of {@code hist}. */ public static String getShortHistogramReport(final Histogram hist) { Snapshot sn = hist.getSnapshot(); - return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + - ", min=" + DOUBLE_FORMAT.format(sn.getMin()) + - ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + - ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + - ", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) + - ", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile()); + return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + ", min=" + + DOUBLE_FORMAT.format(sn.getMin()) + ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + + ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + ", 95th=" + + DOUBLE_FORMAT.format(sn.get95thPercentile()) + ", 99th=" + + DOUBLE_FORMAT.format(sn.get99thPercentile()); } /** @return a summary of {@code hist}. */ public static String getHistogramReport(final Histogram hist) { Snapshot sn = hist.getSnapshot(); - return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + - ", min=" + DOUBLE_FORMAT.format(sn.getMin()) + - ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + - ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + - ", 50th=" + DOUBLE_FORMAT.format(sn.getMedian()) + - ", 75th=" + DOUBLE_FORMAT.format(sn.get75thPercentile()) + - ", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) + - ", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile()) + - ", 99.9th=" + DOUBLE_FORMAT.format(sn.get999thPercentile()) + - ", 99.99th=" + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + - ", 99.999th=" + DOUBLE_FORMAT.format(sn.getValue(0.99999)); + return "mean=" + DOUBLE_FORMAT.format(sn.getMean()) + ", min=" + + DOUBLE_FORMAT.format(sn.getMin()) + ", max=" + DOUBLE_FORMAT.format(sn.getMax()) + + ", stdDev=" + DOUBLE_FORMAT.format(sn.getStdDev()) + ", 50th=" + + DOUBLE_FORMAT.format(sn.getMedian()) + ", 75th=" + + DOUBLE_FORMAT.format(sn.get75thPercentile()) + ", 95th=" + + DOUBLE_FORMAT.format(sn.get95thPercentile()) + ", 99th=" + + DOUBLE_FORMAT.format(sn.get99thPercentile()) + ", 99.9th=" + + DOUBLE_FORMAT.format(sn.get999thPercentile()) + ", 99.99th=" + + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + ", 99.999th=" + + DOUBLE_FORMAT.format(sn.getValue(0.99999)); } /** @return pretty summary of {@code hist}. */ public static String getPrettyHistogramReport(final Histogram h) { Snapshot sn = h.getSnapshot(); - return - "Mean = " + DOUBLE_FORMAT.format(sn.getMean()) + "\n" + - "Min = " + DOUBLE_FORMAT.format(sn.getMin()) + "\n" + - "Max = " + DOUBLE_FORMAT.format(sn.getMax()) + "\n" + - "StdDev = " + DOUBLE_FORMAT.format(sn.getStdDev()) + "\n" + - "50th = " + DOUBLE_FORMAT.format(sn.getMedian()) + "\n" + - "75th = " + DOUBLE_FORMAT.format(sn.get75thPercentile()) + "\n" + - "95th = " + DOUBLE_FORMAT.format(sn.get95thPercentile()) + "\n" + - "99th = " + DOUBLE_FORMAT.format(sn.get99thPercentile()) + "\n" + - "99.9th = " + DOUBLE_FORMAT.format(sn.get999thPercentile()) + "\n" + - "99.99th = " + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + "\n" + - "99.999th = " + DOUBLE_FORMAT.format(sn.getValue(0.99999)); + return "Mean = " + DOUBLE_FORMAT.format(sn.getMean()) + "\n" + "Min = " + + DOUBLE_FORMAT.format(sn.getMin()) + "\n" + "Max = " + + DOUBLE_FORMAT.format(sn.getMax()) + "\n" + "StdDev = " + + DOUBLE_FORMAT.format(sn.getStdDev()) + "\n" + "50th = " + + DOUBLE_FORMAT.format(sn.getMedian()) + "\n" + "75th = " + + DOUBLE_FORMAT.format(sn.get75thPercentile()) + "\n" + "95th = " + + DOUBLE_FORMAT.format(sn.get95thPercentile()) + "\n" + "99th = " + + DOUBLE_FORMAT.format(sn.get99thPercentile()) + "\n" + "99.9th = " + + DOUBLE_FORMAT.format(sn.get999thPercentile()) + "\n" + "99.99th = " + + DOUBLE_FORMAT.format(sn.getValue(0.9999)) + "\n" + "99.999th = " + + DOUBLE_FORMAT.format(sn.getValue(0.99999)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java index 78ef55ca2c5b..2c508ac4b93b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; @@ -35,6 +32,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; + /** * Utlity method to migrate zookeeper data across HBase versions. * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. @@ -45,15 +45,13 @@ public class ZKDataMigrator { private static final Logger LOG = LoggerFactory.getLogger(ZKDataMigrator.class); // Shutdown constructor. - private ZKDataMigrator() {} + private ZKDataMigrator() { + } /** - * Method for table states migration. - * Used when upgrading from pre-2.0 to 2.0 - * Reading state from zk, applying them to internal state - * and delete. - * Used by master to clean migration from zk based states to - * table descriptor based states. + * Method for table states migration. Used when upgrading from pre-2.0 to 2.0 Reading state from + * zk, applying them to internal state and delete. Used by master to clean migration from zk based + * states to table descriptor based states. * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. */ @Deprecated @@ -61,27 +59,26 @@ public static Map queryForTableStates(ZKWatcher zkw throws KeeperException, InterruptedException { Map rv = new HashMap<>(); List children = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().tableZNode); - if (children == null) - return rv; - for (String child: children) { + if (children == null) return rv; + for (String child : children) { TableName tableName = TableName.valueOf(child); ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName); TableState.State newState = TableState.State.ENABLED; if (state != null) { switch (state) { - case ENABLED: - newState = TableState.State.ENABLED; - break; - case DISABLED: - newState = TableState.State.DISABLED; - break; - case DISABLING: - newState = TableState.State.DISABLING; - break; - case ENABLING: - newState = TableState.State.ENABLING; - break; - default: + case ENABLED: + newState = TableState.State.ENABLED; + break; + case DISABLED: + newState = TableState.State.DISABLED; + break; + case DISABLING: + newState = TableState.State.DISABLING; + break; + case ENABLING: + newState = TableState.State.ENABLING; + break; + default: } } rv.put(tableName, newState); @@ -94,18 +91,17 @@ public static Map queryForTableStates(ZKWatcher zkw * @param zkw ZKWatcher instance to use * @param tableName table we're checking * @return Null or - * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State} - * found in znode. + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State} + * found in znode. * @throws KeeperException * @deprecated Since 2.0.0. To be removed in hbase-3.0.0. */ @Deprecated - private static ZooKeeperProtos.DeprecatedTableState.State getTableState( - final ZKWatcher zkw, final TableName tableName) - throws KeeperException, InterruptedException { - String znode = ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode, - tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); + private static ZooKeeperProtos.DeprecatedTableState.State getTableState(final ZKWatcher zkw, + final TableName tableName) throws KeeperException, InterruptedException { + String znode = + ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode, tableName.getNameAsString()); + byte[] data = ZKUtil.getData(zkw, znode); if (data == null || data.length <= 0) return null; try { ProtobufUtil.expectPBMagicPrefix(data); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java index 9be182d245f7..5b0534a585d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/ClusterCompactionQueues.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -127,8 +127,8 @@ Optional getLargestQueueFromServersNotCompacting() { try { return compactionQueues.entrySet().stream() .filter(entry -> !compactingServers.contains(entry.getKey())) - .max(Map.Entry.comparingByValue( - (o1, o2) -> Integer.compare(o1.size(), o2.size()))).map(Map.Entry::getKey); + .max(Map.Entry.comparingByValue((o1, o2) -> Integer.compare(o1.size(), o2.size()))) + .map(Map.Entry::getKey); } finally { lock.readLock().unlock(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java index 2112b97c741f..ab1cf9741be3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,16 +52,14 @@ class MajorCompactionRequest { this.region = region; } - MajorCompactionRequest(Connection connection, RegionInfo region, - Set stores) { + MajorCompactionRequest(Connection connection, RegionInfo region, Set stores) { this(connection, region); this.stores = stores; } static Optional newRequest(Connection connection, RegionInfo info, Set stores, long timestamp) throws IOException { - MajorCompactionRequest request = - new MajorCompactionRequest(connection, info, stores); + MajorCompactionRequest request = new MajorCompactionRequest(connection, info, stores); return request.createRequest(connection, stores, timestamp); } @@ -77,8 +75,8 @@ void setStores(Set stores) { this.stores = stores; } - Optional createRequest(Connection connection, - Set stores, long timestamp) throws IOException { + Optional createRequest(Connection connection, Set stores, + long timestamp) throws IOException { Set familiesToCompact = getStoresRequiringCompaction(stores, timestamp); MajorCompactionRequest request = null; if (!familiesToCompact.isEmpty()) { @@ -104,8 +102,9 @@ boolean shouldCFBeCompacted(HRegionFileSystem fileSystem, String family, long ts // do we have any store files? Collection storeFiles = fileSystem.getStoreFiles(family); if (storeFiles == null) { - LOG.info("Excluding store: " + family + " for compaction for region: " + fileSystem - .getRegionInfo().getEncodedName(), " has no store files"); + LOG.info("Excluding store: " + family + " for compaction for region: " + + fileSystem.getRegionInfo().getEncodedName(), + " has no store files"); return false; } // check for reference files @@ -117,8 +116,8 @@ boolean shouldCFBeCompacted(HRegionFileSystem fileSystem, String family, long ts // check store file timestamps boolean includeStore = this.shouldIncludeStore(fileSystem, family, storeFiles, ts); if (!includeStore) { - LOG.info("Excluding store: " + family + " for compaction for region: " + fileSystem - .getRegionInfo().getEncodedName() + " already compacted"); + LOG.info("Excluding store: " + family + " for compaction for region: " + + fileSystem.getRegionInfo().getEncodedName() + " already compacted"); } return includeStore; } @@ -129,8 +128,7 @@ protected boolean shouldIncludeStore(HRegionFileSystem fileSystem, String family for (StoreFileInfo storeFile : storeFiles) { if (storeFile.getModificationTime() < ts) { LOG.info("Including store: " + family + " with: " + storeFiles.size() - + " files for compaction for region: " - + fileSystem.getRegionInfo().getEncodedName()); + + " files for compaction for region: " + fileSystem.getRegionInfo().getEncodedName()); return true; } } @@ -144,8 +142,8 @@ protected boolean familyHasReferenceFile(HRegionFileSystem fileSystem, String fa for (Path referenceFile : referenceFiles) { FileStatus status = fileSystem.getFileSystem().getFileLinkStatus(referenceFile); if (status.getModificationTime() < ts) { - LOG.info("Including store: " + family + " for compaction for region: " + fileSystem - .getRegionInfo().getEncodedName() + " (reference store files)"); + LOG.info("Including store: " + family + " for compaction for region: " + + fileSystem.getRegionInfo().getEncodedName() + " (reference store files)"); return true; } } @@ -153,17 +151,16 @@ protected boolean familyHasReferenceFile(HRegionFileSystem fileSystem, String fa } - List getReferenceFilePaths(FileSystem fileSystem, Path familyDir) - throws IOException { + List getReferenceFilePaths(FileSystem fileSystem, Path familyDir) throws IOException { return FSUtils.getReferenceFilePaths(fileSystem, familyDir); } HRegionFileSystem getFileSystem() throws IOException { try (Admin admin = connection.getAdmin()) { return HRegionFileSystem.openRegionFromFileSystem(admin.getConfiguration(), - CommonFSUtils.getCurrentFileSystem(admin.getConfiguration()), - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(admin.getConfiguration()), - region.getTable()), region, true); + CommonFSUtils.getCurrentFileSystem(admin.getConfiguration()), CommonFSUtils.getTableDir( + CommonFSUtils.getRootDir(admin.getConfiguration()), region.getTable()), + region, true); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java index a12fa71080ce..37dd7f8fe8b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.compaction; import java.io.IOException; @@ -94,9 +93,8 @@ protected boolean shouldIncludeStore(HRegionFileSystem fileSystem, String family // Lets only compact when all files are older than TTL if (storeFile.getModificationTime() >= ts) { LOG.info("There is atleast one file in store: " + family + " file: " + storeFile.getPath() - + " with timestamp " + storeFile.getModificationTime() - + " for region: " + fileSystem.getRegionInfo().getEncodedName() - + " older than TTL: " + ts); + + " with timestamp " + storeFile.getModificationTime() + " for region: " + + fileSystem.getRegionInfo().getEncodedName() + " older than TTL: " + ts); return false; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java index b8c8626d8189..71a8d8d1b99f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,14 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.compaction; import java.io.IOException; @@ -122,8 +123,8 @@ public void compactAllRegions() throws Exception { clusterCompactionQueues.releaseCompaction(serverName); } else { LOG.info("Firing off compaction request for server: " + serverName + ", " + request - + " total queue size left: " + clusterCompactionQueues - .getCompactionRequestsLeftToFinish()); + + " total queue size left: " + + clusterCompactionQueues.getCompactionRequestsLeftToFinish()); futures.add(executor.submit(new Compact(serverName, request))); } } else { @@ -143,11 +144,10 @@ public void shutdown() throws Exception { executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); if (!ERRORS.isEmpty()) { - StringBuilder builder = - new StringBuilder().append("Major compaction failed, there were: ").append(ERRORS.size()) - .append(" regions / stores that failed compacting\n") - .append("Failed compaction requests\n").append("--------------------------\n") - .append(Joiner.on("\n").join(ERRORS)); + StringBuilder builder = new StringBuilder().append("Major compaction failed, there were: ") + .append(ERRORS.size()).append(" regions / stores that failed compacting\n") + .append("Failed compaction requests\n").append("--------------------------\n") + .append(Joiner.on("\n").join(ERRORS)); LOG.error(builder.toString()); } if (connection != null) { @@ -164,21 +164,21 @@ void initializeWorkQueues() throws IOException { LOG.info("No family specified, will execute for all families"); } LOG.info( - "Initializing compaction queues for table: " + tableName + " with cf: " + storesToCompact); + "Initializing compaction queues for table: " + tableName + " with cf: " + storesToCompact); Map> snRegionMap = getServerRegionsMap(); /* - * If numservers is specified, stop inspecting regions beyond the numservers, it will serve - * to throttle and won't end up scanning all the regions in the event there are not many - * regions to compact based on the criteria. + * If numservers is specified, stop inspecting regions beyond the numservers, it will serve to + * throttle and won't end up scanning all the regions in the event there are not many regions to + * compact based on the criteria. */ for (ServerName sn : getServersToCompact(snRegionMap.keySet())) { List regions = snRegionMap.get(sn); LOG.debug("Table: " + tableName + " Server: " + sn + " No of regions: " + regions.size()); /* - * If the tool is run periodically, then we could shuffle the regions and provide - * some random order to select regions. Helps if numregions is specified. + * If the tool is run periodically, then we could shuffle the regions and provide some random + * order to select regions. Helps if numregions is specified. */ Collections.shuffle(regions); int regionsToCompact = numRegions; @@ -206,7 +206,7 @@ protected Optional getMajorCompactionRequest(RegionInfo } private Collection getServersToCompact(Set snSet) { - if(numServers < 0 || snSet.size() <= numServers) { + if (numServers < 0 || snSet.size() <= numServers) { return snSet; } else { @@ -253,7 +253,8 @@ class Compact implements Runnable { this.request = request; } - @Override public void run() { + @Override + public void run() { try { compactAndWait(request); } catch (NotServingRegionException e) { @@ -290,8 +291,8 @@ void compactAndWait(MajorCompactionRequest request) throws Exception { if (!skipWait) { while (isCompacting(request)) { Thread.sleep(sleepForMs); - LOG.debug("Waiting for compaction to complete for region: " + request.getRegion() - .getEncodedName()); + LOG.debug("Waiting for compaction to complete for region: " + + request.getRegion().getEncodedName()); } } } finally { @@ -312,9 +313,8 @@ void compactAndWait(MajorCompactionRequest request) throws Exception { .equals(serverName); if (regionHasNotMoved) { LOG.error( - "Not all store files were compacted, this may be due to the regionserver not " - + "being aware of all store files. Will not reattempt compacting, " - + request); + "Not all store files were compacted, this may be due to the regionserver not " + + "being aware of all store files. Will not reattempt compacting, " + request); ERRORS.add(request); } else { request.setStores(storesRequiringCompaction); @@ -332,16 +332,15 @@ void compactAndWait(MajorCompactionRequest request) throws Exception { private void compactRegionOnServer(MajorCompactionRequest request, Admin admin, String store) throws IOException { - admin.majorCompactRegion(request.getRegion().getEncodedNameAsBytes(), - Bytes.toBytes(store)); + admin.majorCompactRegion(request.getRegion().getEncodedNameAsBytes(), Bytes.toBytes(store)); } } private boolean isCompacting(MajorCompactionRequest request) throws Exception { CompactionState compactionState = connection.getAdmin() .getCompactionStateForRegion(request.getRegion().getEncodedNameAsBytes()); - return compactionState.equals(CompactionState.MAJOR) || compactionState - .equals(CompactionState.MAJOR_AND_MINOR); + return compactionState.equals(CompactionState.MAJOR) + || compactionState.equals(CompactionState.MAJOR_AND_MINOR); } private void addNewRegions() { @@ -370,104 +369,48 @@ protected Options getCommonOptions() { Options options = new Options(); options.addOption( - Option.builder("servers") - .required() - .desc("Concurrent servers compacting") - .hasArg() - .build() - ); - options.addOption( - Option.builder("minModTime"). - desc("Compact if store files have modification time < minModTime") - .hasArg() - .build() - ); - options.addOption( - Option.builder("zk") - .optionalArg(true) - .desc("zk quorum") - .hasArg() - .build() - ); - options.addOption( - Option.builder("rootDir") - .optionalArg(true) - .desc("hbase.rootDir") - .hasArg() - .build() - ); - options.addOption( - Option.builder("sleep") - .desc("Time to sleepForMs (ms) for checking compaction status per region and available " - + "work queues: default 30s") - .hasArg() - .build() - ); - options.addOption( - Option.builder("retries") - .desc("Max # of retries for a compaction request," + " defaults to 3") - .hasArg() - .build() - ); + Option.builder("servers").required().desc("Concurrent servers compacting").hasArg().build()); + options.addOption(Option.builder("minModTime") + .desc("Compact if store files have modification time < minModTime").hasArg().build()); + options.addOption(Option.builder("zk").optionalArg(true).desc("zk quorum").hasArg().build()); options.addOption( - Option.builder("dryRun") - .desc("Dry run, will just output a list of regions that require compaction based on " + Option.builder("rootDir").optionalArg(true).desc("hbase.rootDir").hasArg().build()); + options.addOption(Option.builder("sleep") + .desc("Time to sleepForMs (ms) for checking compaction status per region and available " + + "work queues: default 30s") + .hasArg().build()); + options.addOption(Option.builder("retries") + .desc("Max # of retries for a compaction request," + " defaults to 3").hasArg().build()); + options.addOption(Option.builder("dryRun") + .desc("Dry run, will just output a list of regions that require compaction based on " + "parameters passed") - .hasArg(false) - .build() - ); + .hasArg(false).build()); - options.addOption( - Option.builder("skipWait") - .desc("Skip waiting after triggering compaction.") - .hasArg(false) - .build() - ); + options.addOption(Option.builder("skipWait").desc("Skip waiting after triggering compaction.") + .hasArg(false).build()); - options.addOption( - Option.builder("numservers") - .optionalArg(true) - .desc("Number of servers to compact in this run, defaults to all") - .hasArg() - .build() - ); + options.addOption(Option.builder("numservers").optionalArg(true) + .desc("Number of servers to compact in this run, defaults to all").hasArg().build()); - options.addOption( - Option.builder("numregions") - .optionalArg(true) - .desc("Number of regions to compact per server, defaults to all") - .hasArg() - .build() - ); + options.addOption(Option.builder("numregions").optionalArg(true) + .desc("Number of regions to compact per server, defaults to all").hasArg().build()); return options; } @Override public int run(String[] args) throws Exception { Options options = getCommonOptions(); - options.addOption( - Option.builder("table") - .required() - .desc("table name") - .hasArg() - .build() - ); - options.addOption( - Option.builder("cf") - .optionalArg(true) - .desc("column families: comma separated eg: a,b,c") - .hasArg() - .build() - ); + options.addOption(Option.builder("table").required().desc("table name").hasArg().build()); + options.addOption(Option.builder("cf").optionalArg(true) + .desc("column families: comma separated eg: a,b,c").hasArg().build()); final CommandLineParser cmdLineParser = new DefaultParser(); CommandLine commandLine = null; try { commandLine = cmdLineParser.parse(options, args); } catch (ParseException parseException) { - System.out.println( - "ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + " due to: " - + parseException); + System.out.println("ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + + " due to: " + parseException); printUsage(options); return -1; } @@ -485,9 +428,8 @@ public int run(String[] args) throws Exception { Configuration configuration = getConf(); int concurrency = Integer.parseInt(commandLine.getOptionValue("servers")); - long minModTime = Long.parseLong( - commandLine.getOptionValue("minModTime", - String.valueOf(EnvironmentEdgeManager.currentTime()))); + long minModTime = Long.parseLong(commandLine.getOptionValue("minModTime", + String.valueOf(EnvironmentEdgeManager.currentTime()))); String quorum = commandLine.getOptionValue("zk", configuration.get(HConstants.ZOOKEEPER_QUORUM)); String rootDir = commandLine.getOptionValue("rootDir", configuration.get(HConstants.HBASE_DIR)); @@ -499,9 +441,8 @@ public int run(String[] args) throws Exception { configuration.set(HConstants.HBASE_DIR, rootDir); configuration.set(HConstants.ZOOKEEPER_QUORUM, quorum); - MajorCompactor compactor = - new MajorCompactor(configuration, TableName.valueOf(tableName), families, concurrency, - minModTime, sleep); + MajorCompactor compactor = new MajorCompactor(configuration, TableName.valueOf(tableName), + families, concurrency, minModTime, sleep); compactor.setNumServers(numServers); compactor.setNumRegions(numRegions); compactor.setSkipWait(commandLine.hasOption("skipWait")); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java index c6ea5af7e138..87714e5aa5d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactorTTL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.compaction; import java.io.IOException; @@ -53,7 +52,7 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class MajorCompactorTTL extends MajorCompactor { - private static final Logger LOG = LoggerFactory.getLogger(MajorCompactorTTL .class); + private static final Logger LOG = LoggerFactory.getLogger(MajorCompactorTTL.class); private TableDescriptor htd; @@ -82,12 +81,11 @@ protected Optional getMajorCompactionRequest(RegionInfo @Override protected Set getStoresRequiringCompaction(MajorCompactionRequest request) throws IOException { - return ((MajorCompactionTTLRequest)request).getStoresRequiringCompaction(htd).keySet(); + return ((MajorCompactionTTLRequest) request).getStoresRequiringCompaction(htd).keySet(); } - public int compactRegionsTTLOnTable(Configuration conf, String table, int concurrency, - long sleep, int numServers, int numRegions, boolean dryRun, boolean skipWait) - throws Exception { + public int compactRegionsTTLOnTable(Configuration conf, String table, int concurrency, long sleep, + int numServers, int numRegions, boolean dryRun, boolean skipWait) throws Exception { Connection conn = ConnectionFactory.createConnection(conf); TableName tableName = TableName.valueOf(table); @@ -124,13 +122,7 @@ private boolean doesAnyColFamilyHaveTTL(TableDescriptor htd) { private Options getOptions() { Options options = getCommonOptions(); - options.addOption( - Option.builder("table") - .required() - .desc("table name") - .hasArg() - .build() - ); + options.addOption(Option.builder("table").required().desc("table name").hasArg().build()); return options; } @@ -144,9 +136,8 @@ public int run(String[] args) throws Exception { try { commandLine = cmdLineParser.parse(options, args); } catch (ParseException parseException) { - System.out.println( - "ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + " due to: " - + parseException); + System.out.println("ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + + " due to: " + parseException); printUsage(options); return -1; } @@ -165,7 +156,7 @@ public int run(String[] args) throws Exception { boolean skipWait = commandLine.hasOption("skipWait"); return compactRegionsTTLOnTable(HBaseConfiguration.create(), table, concurrency, sleep, - numServers, numRegions, dryRun, skipWait); + numServers, numRegions, dryRun, skipWait); } public static void main(String[] args) throws Exception { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java index 6a6c530c3b64..040559c1d49b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,14 +50,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** - * This class marches through all of the region's hfiles and verifies that - * they are all valid files. One just needs to instantiate the class, use - * checkTables(List<Path>) and then retrieve the corrupted hfiles (and - * quarantined files if in quarantining mode) - * - * The implementation currently parallelizes at the regionDir level. + * This class marches through all of the region's hfiles and verifies that they are all valid files. + * One just needs to instantiate the class, use checkTables(List<Path>) and then retrieve the + * corrupted hfiles (and quarantined files if in quarantining mode) The implementation currently + * parallelizes at the regionDir level. */ @InterfaceAudience.Private public class HFileCorruptionChecker { @@ -79,8 +76,8 @@ public class HFileCorruptionChecker { final AtomicInteger hfilesChecked = new AtomicInteger(); final AtomicInteger mobFilesChecked = new AtomicInteger(); - public HFileCorruptionChecker(Configuration conf, ExecutorService executor, - boolean quarantine) throws IOException { + public HFileCorruptionChecker(Configuration conf, ExecutorService executor, boolean quarantine) + throws IOException { this.conf = conf; this.fs = FileSystem.get(conf); this.cacheConf = CacheConfig.DISABLED; @@ -90,11 +87,8 @@ public HFileCorruptionChecker(Configuration conf, ExecutorService executor, /** * Checks a path to see if it is a valid hfile. - * - * @param p - * full Path to an HFile - * @throws IOException - * This is a connectivity related exception + * @param p full Path to an HFile + * @throws IOException This is a connectivity related exception */ protected void checkHFile(Path p) throws IOException { HFile.Reader r = null; @@ -107,7 +101,7 @@ protected void checkHFile(Path p) throws IOException { Path dest = createQuarantinePath(p); LOG.warn("Quarantining corrupt HFile " + p + " into " + dest); boolean success = fs.mkdirs(dest.getParent()); - success = success ? fs.rename(p, dest): false; + success = success ? fs.rename(p, dest) : false; if (!success) { failures.add(p); } else { @@ -127,12 +121,9 @@ protected void checkHFile(Path p) throws IOException { } /** - * Given a path, generates a new path to where we move a corrupted hfile (bad - * trailer, no trailer). - * - * @param hFile - * Path to a corrupt hfile (assumes that it is HBASE_DIR/ table - * /region/cf/file) + * Given a path, generates a new path to where we move a corrupted hfile (bad trailer, no + * trailer). + * @param hFile Path to a corrupt hfile (assumes that it is HBASE_DIR/ table /region/cf/file) * @return path to where corrupted files are stored. This should be * HBASE_DIR/.corrupt/table/region/cf/file. */ @@ -156,9 +147,7 @@ Path createQuarantinePath(Path hFile) throws IOException { /** * Check all files in a column family dir. - * - * @param cfDir - * column family directory + * @param cfDir column family directory * @throws IOException */ protected void checkColFamDir(Path cfDir) throws IOException { @@ -167,8 +156,8 @@ protected void checkColFamDir(Path cfDir) throws IOException { statuses = fs.listStatus(cfDir); // use same filter as scanner. } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Colfam Directory " + cfDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Colfam Directory " + cfDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(cfDir); return; } @@ -176,8 +165,8 @@ protected void checkColFamDir(Path cfDir) throws IOException { List hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs)); // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (hfs.isEmpty() && !fs.exists(cfDir)) { - LOG.warn("Colfam Directory " + cfDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Colfam Directory " + cfDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(cfDir); return; } @@ -192,9 +181,7 @@ protected void checkColFamDir(Path cfDir) throws IOException { /** * Check all files in a mob column family dir. - * - * @param cfDir - * mob column family directory + * @param cfDir mob column family directory * @throws IOException */ protected void checkMobColFamDir(Path cfDir) throws IOException { @@ -203,8 +190,8 @@ protected void checkMobColFamDir(Path cfDir) throws IOException { statuses = fs.listStatus(cfDir); // use same filter as scanner. } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Mob colfam Directory " + cfDir + - " does not exist. Likely the table is deleted. Skipping."); + LOG.warn("Mob colfam Directory " + cfDir + + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(cfDir); return; } @@ -212,8 +199,8 @@ protected void checkMobColFamDir(Path cfDir) throws IOException { List hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs)); // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (hfs.isEmpty() && !fs.exists(cfDir)) { - LOG.warn("Mob colfam Directory " + cfDir + - " does not exist. Likely the table is deleted. Skipping."); + LOG.warn("Mob colfam Directory " + cfDir + + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(cfDir); return; } @@ -228,11 +215,8 @@ protected void checkMobColFamDir(Path cfDir) throws IOException { /** * Checks a path to see if it is a valid mob file. - * - * @param p - * full Path to a mob file. - * @throws IOException - * This is a connectivity related exception + * @param p full Path to a mob file. + * @throws IOException This is a connectivity related exception */ protected void checkMobFile(Path p) throws IOException { HFile.Reader r = null; @@ -245,7 +229,7 @@ protected void checkMobFile(Path p) throws IOException { Path dest = createQuarantinePath(p); LOG.warn("Quarantining corrupt mob file " + p + " into " + dest); boolean success = fs.mkdirs(dest.getParent()); - success = success ? fs.rename(p, dest): false; + success = success ? fs.rename(p, dest) : false; if (!success) { failureMobFiles.add(p); } else { @@ -278,16 +262,16 @@ private void checkMobRegionDir(Path regionDir) throws IOException { hfs = fs.listStatus(regionDir, new FamilyDirFilter(fs)); } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Mob directory " + regionDir - + " does not exist. Likely the table is deleted. Skipping."); + LOG.warn( + "Mob directory " + regionDir + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(regionDir); return; } // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (hfs.length == 0 && !fs.exists(regionDir)) { - LOG.warn("Mob directory " + regionDir - + " does not exist. Likely the table is deleted. Skipping."); + LOG.warn( + "Mob directory " + regionDir + " does not exist. Likely the table is deleted. Skipping."); missedMobFiles.add(regionDir); return; } @@ -302,9 +286,7 @@ private void checkMobRegionDir(Path regionDir) throws IOException { /** * Check all column families in a region dir. - * - * @param regionDir - * region directory + * @param regionDir region directory * @throws IOException */ protected void checkRegionDir(Path regionDir) throws IOException { @@ -313,8 +295,8 @@ protected void checkRegionDir(Path regionDir) throws IOException { statuses = fs.listStatus(regionDir); } catch (FileNotFoundException fnfe) { // Hadoop 0.23+ listStatus semantics throws an exception if the path does not exist. - LOG.warn("Region Directory " + regionDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Region Directory " + regionDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(regionDir); return; } @@ -322,8 +304,8 @@ protected void checkRegionDir(Path regionDir) throws IOException { List cfs = FSUtils.filterFileStatuses(statuses, new FamilyDirFilter(fs)); // Hadoop 1.0 listStatus does not throw an exception if the path does not exist. if (cfs.isEmpty() && !fs.exists(regionDir)) { - LOG.warn("Region Directory " + regionDir + - " does not exist. Likely due to concurrent split/compaction. Skipping."); + LOG.warn("Region Directory " + regionDir + + " does not exist. Likely due to concurrent split/compaction. Skipping."); missing.add(regionDir); return; } @@ -338,23 +320,23 @@ protected void checkRegionDir(Path regionDir) throws IOException { /** * Check all the regiondirs in the specified tableDir - * - * @param tableDir - * path to a table + * @param tableDir path to a table * @throws IOException */ void checkTableDir(Path tableDir) throws IOException { - List rds = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); + List rds = + FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); if (rds == null) { if (!fs.exists(tableDir)) { - LOG.warn("Table Directory " + tableDir + - " does not exist. Likely due to concurrent delete. Skipping."); + LOG.warn("Table Directory " + tableDir + + " does not exist. Likely due to concurrent delete. Skipping."); missing.add(tableDir); } return; } - LOG.info("Checking Table Directory {}. Number of entries (including mob) = {}", tableDir, rds.size() + 1); + LOG.info("Checking Table Directory {}. Number of entries (including mob) = {}", tableDir, + rds.size() + 1); // Parallelize check at the region dir level List rdcs = new ArrayList<>(rds.size() + 1); @@ -382,8 +364,8 @@ void checkTableDir(Path tableDir) throws IOException { try { f.get(); } catch (ExecutionException e) { - LOG.warn("Failed to quarantine an HFile in regiondir " - + rdcs.get(i).regionDir, e.getCause()); + LOG.warn("Failed to quarantine an HFile in regiondir " + rdcs.get(i).regionDir, + e.getCause()); // rethrow IOExceptions if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); @@ -407,8 +389,8 @@ void checkTableDir(Path tableDir) throws IOException { } /** - * An individual work item for parallelized regiondir processing. This is - * intentionally an inner class so it can use the shared error sets and fs. + * An individual work item for parallelized regiondir processing. This is intentionally an inner + * class so it can use the shared error sets and fs. */ private class RegionDirChecker implements Callable { final Path regionDir; @@ -425,8 +407,8 @@ public Void call() throws IOException { } /** - * An individual work item for parallelized mob dir processing. This is - * intentionally an inner class so it can use the shared error sets and fs. + * An individual work item for parallelized mob dir processing. This is intentionally an inner + * class so it can use the shared error sets and fs. */ private class MobRegionDirChecker extends RegionDirChecker { @@ -490,8 +472,8 @@ public Collection getQuarantined() { } /** - * @return the set of paths that were missing. Likely due to deletion/moves from - * compaction or flushes. + * @return the set of paths that were missing. Likely due to deletion/moves from compaction or + * flushes. */ public Collection getMissing() { return new HashSet<>(missing); @@ -526,8 +508,8 @@ public Collection getQuarantinedMobFiles() { } /** - * @return the set of paths that were missing. Likely due to table deletion or - * deletion/moves from compaction. + * @return the set of paths that were missing. Likely due to table deletion or deletion/moves from + * compaction. */ public Collection getMissedMobFiles() { return new HashSet<>(missedMobFiles); @@ -556,8 +538,7 @@ public void report(HbckErrorReporter out) { } String initialState = (corrupted.isEmpty()) ? "OK" : "CORRUPTED"; - String fixedState = (corrupted.size() == quarantined.size()) ? "OK" - : "CORRUPTED"; + String fixedState = (corrupted.size() == quarantined.size()) ? "OK" : "CORRUPTED"; // print mob-related report out.print("Checked " + mobFilesChecked.get() + " Mob files for corruption"); @@ -577,8 +558,8 @@ public void report(HbckErrorReporter out) { out.print(" " + mq); } String initialMobState = (corruptedMobFiles.isEmpty()) ? "OK" : "CORRUPTED"; - String fixedMobState = (corruptedMobFiles.size() == quarantinedMobFiles.size()) ? "OK" - : "CORRUPTED"; + String fixedMobState = + (corruptedMobFiles.size() == quarantinedMobFiles.size()) ? "OK" : "CORRUPTED"; if (inQuarantineMode) { out.print("Summary: " + initialState + " => " + fixedState); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java index 7203fd103bb4..3bf048586ee8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,8 +73,8 @@ private Map> getUnDeletedQueues() throws ReplicationExc if (!peerIds.contains(queueInfo.getPeerId())) { undeletedQueues.computeIfAbsent(replicator, key -> new ArrayList<>()).add(queueId); LOG.debug( - "Undeleted replication queue for removed peer found: " + - "[removedPeerId={}, replicator={}, queueId={}]", + "Undeleted replication queue for removed peer found: " + + "[removedPeerId={}, replicator={}, queueId={}]", queueInfo.getPeerId(), replicator, queueId); } } @@ -84,7 +84,7 @@ private Map> getUnDeletedQueues() throws ReplicationExc private Set getUndeletedHFileRefsPeers() throws ReplicationException { Set undeletedHFileRefsPeerIds = - new HashSet<>(queueStorage.getAllPeersFromHFileRefsQueue()); + new HashSet<>(queueStorage.getAllPeersFromHFileRefsQueue()); Set peerIds = new HashSet<>(peerStorage.listPeerIds()); undeletedHFileRefsPeerIds.removeAll(peerIds); if (LOG.isDebugEnabled()) { @@ -100,15 +100,16 @@ public void checkUnDeletedQueues() throws ReplicationException { undeletedQueueIds.forEach((replicator, queueIds) -> { queueIds.forEach(queueId -> { ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId); - String msg = "Undeleted replication queue for removed peer found: " + - String.format("[removedPeerId=%s, replicator=%s, queueId=%s]", queueInfo.getPeerId(), - replicator, queueId); + String msg = "Undeleted replication queue for removed peer found: " + + String.format("[removedPeerId=%s, replicator=%s, queueId=%s]", queueInfo.getPeerId(), + replicator, queueId); errorReporter.reportError(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg); }); }); undeletedHFileRefsPeerIds = getUndeletedHFileRefsPeers(); - undeletedHFileRefsPeerIds.stream().map( - peerId -> "Undeleted replication hfile-refs queue for removed peer " + peerId + " found") + undeletedHFileRefsPeerIds.stream() + .map( + peerId -> "Undeleted replication hfile-refs queue for removed peer " + peerId + " found") .forEach(msg -> errorReporter .reportError(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java index e4b4a814e2d0..a52dce262ef8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.hbck; import java.io.IOException; @@ -25,10 +24,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This interface provides callbacks for handling particular table integrity - * invariant violations. This could probably be boiled down to handling holes - * and handling overlaps but currently preserves the older more specific error - * condition codes. + * This interface provides callbacks for handling particular table integrity invariant violations. + * This could probably be boiled down to handling holes and handling overlaps but currently + * preserves the older more specific error condition codes. */ @InterfaceAudience.Private public interface TableIntegrityErrorHandler { @@ -41,66 +39,56 @@ public interface TableIntegrityErrorHandler { void setTableInfo(HbckTableInfo ti); /** - * Callback for handling case where a Table has a first region that does not - * have an empty start key. - * - * @param hi An HbckRegionInfo of the second region in a table. This should have - * a non-empty startkey, and can be used to fabricate a first region that - * has an empty start key. + * Callback for handling case where a Table has a first region that does not have an empty start + * key. + * @param hi An HbckRegionInfo of the second region in a table. This should have a non-empty + * startkey, and can be used to fabricate a first region that has an empty start key. */ void handleRegionStartKeyNotEmpty(HbckRegionInfo hi) throws IOException; /** - * Callback for handling case where a Table has a last region that does not - * have an empty end key. - * - * @param curEndKey The end key of the current last region. There should be a new region - * with start key as this and an empty end key. + * Callback for handling case where a Table has a last region that does not have an empty end key. + * @param curEndKey The end key of the current last region. There should be a new region with + * start key as this and an empty end key. */ void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException; /** * Callback for handling a region that has the same start and end key. - * * @param hi An HbckRegionInfo for a degenerate key. */ void handleDegenerateRegion(HbckRegionInfo hi) throws IOException; /** - * Callback for handling two regions that have the same start key. This is - * a specific case of a region overlap. + * Callback for handling two regions that have the same start key. This is a specific case of a + * region overlap. * @param hi1 one of the overlapping HbckRegionInfo * @param hi2 the other overlapping HbckRegionInfo */ void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException; /** - * Callback for handling two regions that have the same regionID - * a specific case of a split + * Callback for handling two regions that have the same regionID a specific case of a split * @param hi1 one of the overlapping HbckRegionInfo * @param hi2 the other overlapping HbckRegionInfo */ void handleSplit(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException; /** - * Callback for handling two reigons that overlap in some arbitrary way. - * This is a specific case of region overlap, and called for each possible - * pair. If two regions have the same start key, the handleDuplicateStartKeys - * method is called. + * Callback for handling two reigons that overlap in some arbitrary way. This is a specific case + * of region overlap, and called for each possible pair. If two regions have the same start key, + * the handleDuplicateStartKeys method is called. * @param hi1 one of the overlapping HbckRegionInfo * @param hi2 the other overlapping HbckRegionInfo */ - void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) - throws IOException; + void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException; /** * Callback for handling a region hole between two keys. * @param holeStartKey key at the beginning of the region hole * @param holeEndKey key at the end of the region hole - */ - void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeEndKey) - throws IOException; + void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeEndKey) throws IOException; /** * Callback for handling an group of regions that overlap. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java index f39c623aa460..82400dfedd72 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,12 +24,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Simple implementation of TableIntegrityErrorHandler. Can be used as a base - * class. + * Simple implementation of TableIntegrityErrorHandler. Can be used as a base class. */ @InterfaceAudience.Private -abstract public class TableIntegrityErrorHandlerImpl implements - TableIntegrityErrorHandler { +abstract public class TableIntegrityErrorHandlerImpl implements TableIntegrityErrorHandler { HbckTableInfo ti; /** @@ -73,8 +71,7 @@ public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException { * {@inheritDoc} */ @Override - public void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) - throws IOException { + public void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException { } /** @@ -89,16 +86,14 @@ public void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) * {@inheritDoc} */ @Override - public void handleHoleInRegionChain(byte[] holeStart, byte[] holeEnd) - throws IOException { + public void handleHoleInRegionChain(byte[] holeStart, byte[] holeEnd) throws IOException { } /** * {@inheritDoc} */ @Override - public void handleOverlapGroup(Collection overlap) - throws IOException { + public void handleOverlapGroup(Collection overlap) throws IOException { } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 75605e604c82..b6dfd7297e31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -254,7 +254,7 @@ static void requestLogRoll(final WAL wal) { * description. */ private static final Pattern WAL_FILE_NAME_PATTERN = - Pattern.compile("(.+)\\.(\\d+)(\\.[0-9A-Za-z]+)?"); + Pattern.compile("(.+)\\.(\\d+)(\\.[0-9A-Za-z]+)?"); /** * Define for when no timestamp found. @@ -296,16 +296,16 @@ public static boolean validateWALFilename(String filename) { * with as part of their name, usually the suffix. Sometimes there will be an extra suffix as when * it is a WAL for the meta table. For example, WALs might look like this * 10.20.20.171%3A60020.1277499063250 where 1277499063250 is the - * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a - * synchronous replication WAL which adds a '.syncrep' suffix. Check for these. File also may have - * no timestamp on it. For example the recovered.edits files are WALs but are named in ascending - * order. Here is an example: 0000000000000016310. Allow for this. + * timestamp. Could also be a meta WAL which adds a '.meta' suffix or a synchronous replication + * WAL which adds a '.syncrep' suffix. Check for these. File also may have no timestamp on it. For + * example the recovered.edits files are WALs but are named in ascending order. Here is an + * example: 0000000000000016310. Allow for this. * @param name Name of the WAL file. * @return Timestamp or {@link #NO_TIMESTAMP}. */ public static long getTimestamp(String name) { Matcher matcher = WAL_FILE_NAME_PATTERN.matcher(name); - return matcher.matches() ? Long.parseLong(matcher.group(2)): NO_TIMESTAMP; + return matcher.matches() ? Long.parseLong(matcher.group(2)) : NO_TIMESTAMP; } /** @@ -439,8 +439,8 @@ public static boolean isMetaFile(String p) { } /** - * Comparator used to compare WAL files together based on their start time. - * Just compares start times and nothing else. + * Comparator used to compare WAL files together based on their start time. Just compares start + * times and nothing else. */ public static class WALStartTimeComparator implements Comparator { @Override @@ -449,10 +449,9 @@ public int compare(Path o1, Path o2) { } /** - * Split a path to get the start time - * For example: 10.20.20.171%3A60020.1277499063250 - * Could also be a meta WAL which adds a '.meta' suffix or a synchronous replication WAL - * which adds a '.syncrep' suffix. Check. + * Split a path to get the start time For example: 10.20.20.171%3A60020.1277499063250 Could also + * be a meta WAL which adds a '.meta' suffix or a synchronous replication WAL which adds a + * '.syncrep' suffix. Check. * @param p path to split * @return start time */ @@ -461,8 +460,6 @@ public static long getTS(Path p) { } } - - public static boolean isArchivedLogFile(Path p) { String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR; return p.toString().contains(oldLog); @@ -492,8 +489,7 @@ public static Path findArchivedLog(Path path, Configuration conf) throws IOExcep ServerName serverName = getServerNameFromWALDirectoryName(path); // Try finding the log in separate old log dir - oldLogDir = - new Path(walRootDir, new StringBuilder(HConstants.HREGION_OLDLOGDIR_NAME) + oldLogDir = new Path(walRootDir, new StringBuilder(HConstants.HREGION_OLDLOGDIR_NAME) .append(Path.SEPARATOR).append(serverName.getServerName()).toString()); archivedLogLocation = new Path(oldLogDir, path.getName()); if (fs.exists(archivedLogLocation)) { @@ -586,6 +582,7 @@ private static String getWALNameGroupFromWALName(String name, int group) { throw new IllegalArgumentException(name + " is not a valid wal file name"); } } + /** * Get prefix of the log from its name, assuming WAL name in format of * log_prefix.filenumber.log_suffix diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java index 0da082a4caf9..4fe6750288f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractRecoveredEditsOutputSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -62,9 +61,9 @@ protected RecoveredEditsWriter createRecoveredEditsWriter(TableName tableName, b walSplitter.getFileBeingSplit().getPath().getName(), walSplitter.getTmpDirName(), walSplitter.conf); if (walSplitter.walFS.exists(regionEditsPath)) { - LOG.warn("Found old edits file. It could be the " + - "result of a previous failed split attempt. Deleting " + regionEditsPath + ", length=" + - walSplitter.walFS.getFileStatus(regionEditsPath).getLen()); + LOG.warn("Found old edits file. It could be the " + + "result of a previous failed split attempt. Deleting " + regionEditsPath + ", length=" + + walSplitter.walFS.getFileStatus(regionEditsPath).getLen()); if (!walSplitter.walFS.delete(regionEditsPath, false)) { LOG.warn("Failed delete of old {}", regionEditsPath); } @@ -88,8 +87,8 @@ protected Path closeRecoveredEditsWriter(RecoveredEditsWriter editsWriter, return null; } final String msg = "Closed recovered edits writer path=" + editsWriter.path + " (wrote " - + editsWriter.editsWritten + " edits, skipped " + editsWriter.editsSkipped + " edits in " + ( - editsWriter.nanosSpent / 1000 / 1000) + " ms)"; + + editsWriter.editsWritten + " edits, skipped " + editsWriter.editsSkipped + " edits in " + + (editsWriter.nanosSpent / 1000 / 1000) + " ms)"; LOG.info(msg); updateStatusWithMsg(msg); if (editsWriter.editsWritten == 0) { @@ -116,7 +115,7 @@ protected Path closeRecoveredEditsWriter(RecoveredEditsWriter editsWriter, if (walSplitter.walFS.exists(editsWriter.path)) { if (!walSplitter.walFS.rename(editsWriter.path, dst)) { final String errorMsg = - "Failed renaming recovered edits " + editsWriter.path + " to " + dst; + "Failed renaming recovered edits " + editsWriter.path + " to " + dst; updateStatusWithMsg(errorMsg); throw new IOException(errorMsg); } @@ -125,8 +124,7 @@ protected Path closeRecoveredEditsWriter(RecoveredEditsWriter editsWriter, updateStatusWithMsg(renameEditMsg); } } catch (IOException ioe) { - final String errorMsg = "Could not rename recovered edits " + editsWriter.path - + " to " + dst; + final String errorMsg = "Could not rename recovered edits " + editsWriter.path + " to " + dst; LOG.error(errorMsg, ioe); updateStatusWithMsg(errorMsg); thrown.add(ioe); @@ -161,7 +159,7 @@ void updateRegionMaximumEditLogSeqNum(WAL.Entry entry) { // delete the one with fewer wal entries private void deleteOneWithFewerEntries(RecoveredEditsWriter editsWriter, Path dst) - throws IOException { + throws IOException { long dstMinLogSeqNum = -1L; try (WAL.Reader reader = walSplitter.getWalFactory().createReader(walSplitter.walFS, dst)) { WAL.Entry entry = reader.next(); @@ -173,17 +171,17 @@ private void deleteOneWithFewerEntries(RecoveredEditsWriter editsWriter, Path ds e); } if (editsWriter.minLogSeqNum < dstMinLogSeqNum) { - LOG.warn("Found existing old edits file. It could be the result of a previous failed" + - " split attempt or we have duplicated wal entries. Deleting " + dst + ", length=" + - walSplitter.walFS.getFileStatus(dst).getLen()); + LOG.warn("Found existing old edits file. It could be the result of a previous failed" + + " split attempt or we have duplicated wal entries. Deleting " + dst + ", length=" + + walSplitter.walFS.getFileStatus(dst).getLen()); if (!walSplitter.walFS.delete(dst, false)) { LOG.warn("Failed deleting of old {}", dst); throw new IOException("Failed deleting of old " + dst); } } else { LOG.warn( - "Found existing old edits file and we have less entries. Deleting " + editsWriter.path + - ", length=" + walSplitter.walFS.getFileStatus(editsWriter.path).getLen()); + "Found existing old edits file and we have less entries. Deleting " + editsWriter.path + + ", length=" + walSplitter.walFS.getFileStatus(editsWriter.path).getLen()); if (!walSplitter.walFS.delete(editsWriter.path, false)) { LOG.warn("Failed deleting of {}", editsWriter.path); throw new IOException("Failed deleting of " + editsWriter.path); @@ -209,7 +207,7 @@ final class RecoveredEditsWriter { final long minLogSeqNum; RecoveredEditsWriter(byte[] encodedRegionName, Path path, WALProvider.Writer writer, - long minLogSeqNum) { + long minLogSeqNum) { this.encodedRegionName = encodedRegionName; this.path = path; this.writer = writer; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java index 672b41e26057..32eb76334b98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,14 +48,13 @@ * NOTE: This class extends Thread rather than Chore because the sleep time can be interrupted when * there is something to do, rather than the Chore sleep time which is invariant. *

      - * The {@link #scheduleFlush(String, List)} is abstract here, - * as sometimes we hold a region without a region server but we still want to roll its WAL. + * The {@link #scheduleFlush(String, List)} is abstract here, as sometimes we hold a region without + * a region server but we still want to roll its WAL. *

      * TODO: change to a pool of threads */ @InterfaceAudience.Private -public abstract class AbstractWALRoller extends Thread - implements Closeable { +public abstract class AbstractWALRoller extends Thread implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(AbstractWALRoller.class); protected static final String WAL_ROLL_PERIOD_KEY = "hbase.regionserver.logroll.period"; @@ -67,9 +66,8 @@ public abstract class AbstractWALRoller extends Thread public static final long DEFAULT_WAL_ROLL_WAIT_TIMEOUT = 30000; /** - * Configure for the max count of log rolling retry. - * The real retry count is also limited by the timeout of log rolling - * via {@link #WAL_ROLL_WAIT_TIMEOUT} + * Configure for the max count of log rolling retry. The real retry count is also limited by the + * timeout of log rolling via {@link #WAL_ROLL_WAIT_TIMEOUT} */ protected static final String WAL_ROLL_RETRIES = "hbase.regionserver.logroll.retries"; @@ -130,7 +128,7 @@ protected AbstractWALRoller(String name, Configuration conf, T abortable) { this.rollPeriod = conf.getLong(WAL_ROLL_PERIOD_KEY, 3600000); this.threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); this.checkLowReplicationInterval = - conf.getLong("hbase.regionserver.hlog.check.lowreplication.interval", 30 * 1000); + conf.getLong("hbase.regionserver.hlog.check.lowreplication.interval", 30 * 1000); this.rollWaitTimeout = conf.getLong(WAL_ROLL_WAIT_TIMEOUT, DEFAULT_WAL_ROLL_WAIT_TIMEOUT); // retry rolling does not have to be the default behavior, so the default value is 0 here this.maxRollRetry = conf.getInt(WAL_ROLL_RETRIES, 0); @@ -190,15 +188,15 @@ public void run() { } } try { - for (Iterator> iter = wals.entrySet().iterator(); - iter.hasNext();) { + for (Iterator> iter = wals.entrySet().iterator(); iter + .hasNext();) { Entry entry = iter.next(); WAL wal = entry.getKey(); RollController controller = entry.getValue(); if (controller.isRollRequested()) { // WAL roll requested, fall through LOG.debug("WAL {} roll requested", wal); - } else if (controller.needsPeriodicRoll(now)){ + } else if (controller.needsPeriodicRoll(now)) { // Time for periodic roll, fall through LOG.debug("WAL {} roll period {} ms elapsed", wal, this.rollPeriod); } else { @@ -223,7 +221,8 @@ public void run() { if (waitingTime < rollWaitTimeout && nAttempts < maxRollRetry) { nAttempts++; LOG.warn("Retry to roll log, nAttempts={}, waiting time={}ms, sleeping 1s to retry," - + " last exception", nAttempts, waitingTime, ioe); + + " last exception", + nAttempts, waitingTime, ioe); sleep(1000); } else { LOG.error("Roll wal failed and waiting timeout, will not retry", ioe); @@ -271,8 +270,7 @@ private boolean isWaiting() { public boolean walRollFinished() { // TODO add a status field of roll in RollController return wals.values().stream() - .noneMatch(rc -> rc.needsRoll(EnvironmentEdgeManager.currentTime())) - && isWaiting(); + .noneMatch(rc -> rc.needsRoll(EnvironmentEdgeManager.currentTime())) && isWaiting(); } /** @@ -291,8 +289,8 @@ public void close() { } /** - * Independently control the roll of each wal. When use multiwal, - * can avoid all wal roll together. see HBASE-24665 for detail + * Independently control the roll of each wal. When use multiwal, can avoid all wal roll together. + * see HBASE-24665 for detail */ protected class RollController { private final WAL wal; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java index 06729e2356a2..c45be9d443a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,7 +56,7 @@ public interface AsyncWriter extends WALProvider.AsyncWriter { /** * @throws IOException if something goes wrong initializing an output stream * @throws StreamLacksCapabilityException if the given FileSystem can't provide streams that - * meet the needs of the given Writer implementation. + * meet the needs of the given Writer implementation. */ void init(FileSystem fs, Path path, Configuration c, boolean overwritable, long blocksize, StreamSlowMonitor monitor) throws IOException, CommonFSUtils.StreamLacksCapabilityException; @@ -69,16 +69,16 @@ void init(FileSystem fs, Path path, Configuration c, boolean overwritable, long @Override protected AsyncFSWAL createWAL() throws IOException { return new AsyncFSWAL(CommonFSUtils.getWALFileSystem(conf), this.abortable, - CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), - getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, - META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, eventLoopGroup, - channelClass, factory.getExcludeDatanodeManager().getStreamSlowMonitor(providerId)); + CommonFSUtils.getWALRootDir(conf), getWALDirectoryName(factory.factoryId), + getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix, + META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null, eventLoopGroup, + channelClass, factory.getExcludeDatanodeManager().getStreamSlowMonitor(providerId)); } @Override protected void doInit(Configuration conf) throws IOException { Pair> eventLoopGroupAndChannelClass = - NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf); + NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf); eventLoopGroup = eventLoopGroupAndChannelClass.getFirst(); channelClass = eventLoopGroupAndChannelClass.getSecond(); } @@ -87,10 +87,10 @@ protected void doInit(Configuration conf) throws IOException { * Public because of AsyncFSWAL. Should be package-private */ public static AsyncWriter createAsyncWriter(Configuration conf, FileSystem fs, Path path, - boolean overwritable, EventLoopGroup eventLoopGroup, - Class channelClass) throws IOException { + boolean overwritable, EventLoopGroup eventLoopGroup, Class channelClass) + throws IOException { return createAsyncWriter(conf, fs, path, overwritable, WALUtil.getWALBlockSize(conf, fs, path), - eventLoopGroup, channelClass, StreamSlowMonitor.create(conf, path.getName())); + eventLoopGroup, channelClass, StreamSlowMonitor.create(conf, path.getName())); } /** @@ -100,8 +100,8 @@ public static AsyncWriter createAsyncWriter(Configuration conf, FileSystem fs, P boolean overwritable, long blocksize, EventLoopGroup eventLoopGroup, Class channelClass, StreamSlowMonitor monitor) throws IOException { // Configuration already does caching for the Class lookup. - Class logWriterClass = conf.getClass( - WRITER_IMPL, AsyncProtobufLogWriter.class, AsyncWriter.class); + Class logWriterClass = + conf.getClass(WRITER_IMPL, AsyncProtobufLogWriter.class, AsyncWriter.class); try { AsyncWriter writer = logWriterClass.getConstructor(EventLoopGroup.class, Class.class) .newInstance(eventLoopGroup, channelClass); @@ -109,11 +109,11 @@ public static AsyncWriter createAsyncWriter(Configuration conf, FileSystem fs, P return writer; } catch (Exception e) { if (e instanceof CommonFSUtils.StreamLacksCapabilityException) { - LOG.error("The RegionServer async write ahead log provider " + - "relies on the ability to call " + e.getMessage() + " for proper operation during " + - "component failures, but the current FileSystem does not support doing so. Please " + - "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + - "it points to a FileSystem mount that has suitable capabilities for output streams."); + LOG.error("The RegionServer async write ahead log provider " + + "relies on the ability to call " + e.getMessage() + " for proper operation during " + + "component failures, but the current FileSystem does not support doing so. Please " + + "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + + "it points to a FileSystem mount that has suitable capabilities for output streams."); } else { LOG.debug("Error instantiating log writer.", e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java index ed3c8b7f3e2a..7db007e1d2eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedEntryBuffers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,10 @@ /** * Used for {@link BoundedRecoveredEditsOutputSink}. The core part of limiting opening writers is it - * doesn't return chunk only if the heap size is over maxHeapUsage. Thus it doesn't need to create - * a writer for each region during splitting. The returned {@link EntryBuffers.RegionEntryBuffer} - * will be write to recovered edits file and close the writer immediately. - * See {@link BoundedRecoveredEditsOutputSink#append(EntryBuffers.RegionEntryBuffer)} for more - * details. + * doesn't return chunk only if the heap size is over maxHeapUsage. Thus it doesn't need to create a + * writer for each region during splitting. The returned {@link EntryBuffers.RegionEntryBuffer} will + * be write to recovered edits file and close the writer immediately. See + * {@link BoundedRecoveredEditsOutputSink#append(EntryBuffers.RegionEntryBuffer)} for more details. */ @InterfaceAudience.Private public class BoundedEntryBuffers extends EntryBuffers { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java index bafcee339e7d..cf531354e440 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,18 +21,17 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.RegionGroupingProvider.RegionGroupingStrategy; +import org.apache.yetus.audience.InterfaceAudience; /** * A WAL grouping strategy that limits the number of wal groups to * "hbase.wal.regiongrouping.numgroups". */ @InterfaceAudience.Private -public class BoundedGroupingStrategy implements RegionGroupingStrategy{ +public class BoundedGroupingStrategy implements RegionGroupingStrategy { static final String NUM_REGION_GROUPS = "hbase.wal.regiongrouping.numgroups"; static final int DEFAULT_NUM_REGION_GROUPS = 2; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java index e2aa478075c3..b728bbf82758 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredEditsOutputSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.MultipleIOException; @@ -36,14 +35,13 @@ import org.slf4j.LoggerFactory; /** - * Class that manages the output streams from the log splitting process. - * Every region may have many recovered edits file. But the opening writers is bounded. - * Bounded means the output streams will be no more than the size of threadpool. + * Class that manages the output streams from the log splitting process. Every region may have many + * recovered edits file. But the opening writers is bounded. Bounded means the output streams will + * be no more than the size of threadpool. */ @InterfaceAudience.Private class BoundedRecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { - private static final Logger LOG = - LoggerFactory.getLogger(BoundedRecoveredEditsOutputSink.class); + private static final Logger LOG = LoggerFactory.getLogger(BoundedRecoveredEditsOutputSink.class); // Since the splitting process may create multiple output files, we need a map // to track the output count of each region. @@ -57,17 +55,15 @@ public BoundedRecoveredEditsOutputSink(WALSplitter walSplitter, } @Override - public void append(EntryBuffers.RegionEntryBuffer buffer) - throws IOException { + public void append(EntryBuffers.RegionEntryBuffer buffer) throws IOException { List entries = buffer.entryBuffer; if (entries.isEmpty()) { LOG.warn("got an empty buffer, skipping"); return; } // The key point is create a new writer, write edits then close writer. - RecoveredEditsWriter writer = - createRecoveredEditsWriter(buffer.tableName, buffer.encodedRegionName, - entries.get(0).getKey().getSequenceId()); + RecoveredEditsWriter writer = createRecoveredEditsWriter(buffer.tableName, + buffer.encodedRegionName, entries.get(0).getKey().getSequenceId()); if (writer != null) { openingWritersNum.incrementAndGet(); writer.writeRegionEntries(entries); @@ -96,7 +92,6 @@ public List close() throws IOException { /** * Write out the remaining RegionEntryBuffers and close the writers. - * * @return true when there is no error. */ private boolean writeRemainingEntryBuffers() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index 50bc5fe62fb8..71bd9c331cf3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.wal; import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; + import java.io.IOException; import java.io.InterruptedIOException; import java.util.HashMap; @@ -49,10 +50,10 @@ import org.slf4j.LoggerFactory; /** - * A WALSplitter sink that outputs {@link org.apache.hadoop.hbase.io.hfile.HFile}s. - * Runs with a bounded number of HFile writers at any one time rather than let the count run up. + * A WALSplitter sink that outputs {@link org.apache.hadoop.hbase.io.hfile.HFile}s. Runs with a + * bounded number of HFile writers at any one time rather than let the count run up. * @see BoundedRecoveredEditsOutputSink for a sink implementation that writes intermediate - * recovered.edits files. + * recovered.edits files. */ @InterfaceAudience.Private public class BoundedRecoveredHFilesOutputSink extends OutputSink { @@ -67,7 +68,7 @@ public class BoundedRecoveredHFilesOutputSink extends OutputSink { private final AtomicInteger openingWritersNum = new AtomicInteger(0); public BoundedRecoveredHFilesOutputSink(WALSplitter walSplitter, - WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { + WALSplitter.PipelineController controller, EntryBuffers entryBuffers, int numWriters) { super(controller, entryBuffers, numWriters); this.walSplitter = walSplitter; } @@ -136,7 +137,6 @@ public List close() throws IOException { /** * Write out the remaining RegionEntryBuffers and close the writers. - * * @return true when there is no error. */ private boolean writeRemainingEntryBuffers() throws IOException { @@ -188,8 +188,8 @@ boolean keepRegionEvent(Entry entry) { } /** - * @return Returns a base HFile without compressions or encodings; good enough for recovery - * given hfile has metadata on how it was written. + * @return Returns a base HFile without compressions or encodings; good enough for recovery given + * hfile has metadata on how it was written. */ private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String regionName, long seqId, String familyName, boolean isMetaTable) throws IOException { @@ -198,11 +198,12 @@ private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String r StoreFileWriter.Builder writerBuilder = new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS) .withOutputDir(outputDir); - HFileContext hFileContext = new HFileContextBuilder(). - withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)). - withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)). - withCellComparator(isMetaTable? - MetaCellComparator.META_COMPARATOR: CellComparatorImpl.COMPARATOR).build(); + HFileContext hFileContext = + new HFileContextBuilder().withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)) + .withCellComparator( + isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR) + .build(); return writerBuilder.withFileContext(hFileContext).build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index 6e5a0538296c..887a39fa33ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; @@ -44,10 +43,8 @@ // imports for things that haven't moved from regionserver.wal yet. /** - * No-op implementation of {@link WALProvider} used when the WAL is disabled. - * - * Should only be used when severe data loss is acceptable. - * + * No-op implementation of {@link WALProvider} used when the WAL is disabled. Should only be used + * when severe data loss is acceptable. */ @InterfaceAudience.Private class DisabledWALProvider implements WALProvider { @@ -101,7 +98,7 @@ public DisabledWAL(final Path path, final Configuration conf, this.coprocessorHost = new WALCoprocessorHost(this, conf); this.path = path; if (null != listeners) { - for(WALActionsListener listener : listeners) { + for (WALActionsListener listener : listeners) { registerWALActionsListener(listener); } } @@ -148,7 +145,7 @@ public Map> rollWriter(boolean force) { @Override public void shutdown() { - if(closed.compareAndSet(false, true)) { + if (closed.compareAndSet(false, true)) { if (!this.listeners.isEmpty()) { for (WALActionsListener listener : this.listeners) { listener.logCloseRequested(); @@ -168,8 +165,7 @@ public long appendData(RegionInfo info, WALKeyImpl key, WALEdit edits) throws IO } @Override - public long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) - throws IOException { + public long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) throws IOException { return append(info, key, edits, false); } @@ -197,8 +193,10 @@ private long append(RegionInfo info, WALKeyImpl key, WALEdit edits, boolean inMe } @Override - public void updateStore(byte[] encodedRegionName, byte[] familyName, - Long sequenceid, boolean onlyIfGreater) { return; } + public void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, + boolean onlyIfGreater) { + return; + } @Override public void sync() { @@ -215,8 +213,8 @@ public void sync(long txid) { } @Override - public Long startCacheFlush(final byte[] encodedRegionName, Map - flushedFamilyNamesToSeq) { + public Long startCacheFlush(final byte[] encodedRegionName, + Map flushedFamilyNamesToSeq) { return startCacheFlush(encodedRegionName, flushedFamilyNamesToSeq.keySet()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java index 0ca1219bd26f..ec191e2c3ce3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/EntryBuffers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.Bytes; @@ -132,10 +131,9 @@ synchronized boolean isRegionCurrentlyWriting(byte[] region) { } /** - * A buffer of some number of edits for a given region. - * This accumulates edits and also provides a memory optimization in order to - * share a single byte array instance for the table and region name. - * Also tracks memory usage of the accumulated edits. + * A buffer of some number of edits for a given region. This accumulates edits and also provides a + * memory optimization in order to share a single byte array instance for the table and region + * name. Also tracks memory usage of the accumulated edits. */ static class RegionEntryBuffer implements HeapSize { private long heapInBuffer = 0; @@ -153,8 +151,8 @@ long appendEntry(WAL.Entry entry) { internify(entry); entryBuffer.add(entry); // TODO linkedlist entry - long incrHeap = entry.getEdit().heapSize() + - ClassSize.align(2 * ClassSize.REFERENCE); // WALKey pointers + long incrHeap = entry.getEdit().heapSize() + ClassSize.align(2 * ClassSize.REFERENCE); // WALKey + // pointers heapInBuffer += incrHeap; return incrHeap; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java index f5c39c0edf27..2ab9091e88de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +46,7 @@ public interface Writer extends WALProvider.Writer { /** * @throws IOException if something goes wrong initializing an output stream * @throws StreamLacksCapabilityException if the given FileSystem can't provide streams that - * meet the needs of the given Writer implementation. + * meet the needs of the given Writer implementation. */ void init(FileSystem fs, Path path, Configuration c, boolean overwritable, long blocksize, StreamSlowMonitor monitor) throws IOException, CommonFSUtils.StreamLacksCapabilityException; @@ -71,22 +70,21 @@ public static Writer createWriter(final Configuration conf, final FileSystem fs, final boolean overwritable, long blocksize) throws IOException { // Configuration already does caching for the Class lookup. Class logWriterClass = - conf.getClass("hbase.regionserver.hlog.writer.impl", ProtobufLogWriter.class, - Writer.class); + conf.getClass("hbase.regionserver.hlog.writer.impl", ProtobufLogWriter.class, Writer.class); Writer writer = null; try { writer = logWriterClass.getDeclaredConstructor().newInstance(); FileSystem rootFs = FileSystem.get(path.toUri(), conf); writer.init(rootFs, path, conf, overwritable, blocksize, - StreamSlowMonitor.create(conf, path.getName())); + StreamSlowMonitor.create(conf, path.getName())); return writer; - } catch (Exception e) { + } catch (Exception e) { if (e instanceof CommonFSUtils.StreamLacksCapabilityException) { - LOG.error("The RegionServer write ahead log provider for FileSystem implementations " + - "relies on the ability to call " + e.getMessage() + " for proper operation during " + - "component failures, but the current FileSystem does not support doing so. Please " + - "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + - "it points to a FileSystem mount that has suitable capabilities for output streams."); + LOG.error("The RegionServer write ahead log provider for FileSystem implementations " + + "relies on the ability to call " + e.getMessage() + " for proper operation during " + + "component failures, but the current FileSystem does not support doing so. Please " + + "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure " + + "it points to a FileSystem mount that has suitable capabilities for output streams."); } else { LOG.debug("Error instantiating log writer.", e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java index 3022a25fdb11..c718fb961725 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +19,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.RegionGroupingProvider.RegionGroupingStrategy; +import org.apache.yetus.audience.InterfaceAudience; /** - * A WAL grouping strategy based on namespace. - * Notice: the wal-group mapping might change if we support dynamic namespace updating later, - * and special attention needed if we support feature like group-based replication. + * A WAL grouping strategy based on namespace. Notice: the wal-group mapping might change if we + * support dynamic namespace updating later, and special attention needed if we support feature like + * group-based replication. */ @InterfaceAudience.Private public class NamespaceGroupingStrategy implements RegionGroupingStrategy { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java index 7f33eda9e652..ad1a20984ebc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,5 +68,6 @@ static Pair> getEventLoopConfig(Configu return EVENT_LOOP_CONFIG_MAP.get(name); } - private NettyAsyncFSWALConfigHelper() {} + private NettyAsyncFSWALConfigHelper() { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java index d995b979ea3b..9ddb28ded2ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,17 +27,16 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * The following class is an abstraction class to provide a common interface to support different @@ -78,7 +77,7 @@ public OutputSink(WALSplitter.PipelineController controller, EntryBuffers entryB this.entryBuffers = entryBuffers; this.closeThreadPool = Threads.getBoundedCachedThreadPool(numThreads, 30L, TimeUnit.SECONDS, new ThreadFactoryBuilder().setNameFormat("split-log-closeStream-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); this.closeCompletionService = new ExecutorCompletionService<>(closeThreadPool); } @@ -103,7 +102,6 @@ void startWriterThreads() throws IOException { /** * Wait for writer threads to dump all info to the sink - * * @return true when there is no error */ boolean finishWriterThreads() throws IOException { @@ -191,7 +189,7 @@ public static class WriterThread extends Thread { } @Override - public void run() { + public void run() { try { doRun(); } catch (Throwable t) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java index 645af60efcb4..eaa668ac3d6e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RecoveredEditsOutputSink.java @@ -34,15 +34,15 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Class that manages the output streams from the log splitting process. - * Every region only has one recovered edits file PER split WAL (if we split - * multiple WALs during a log-splitting session, on open, a Region may - * have multiple recovered.edits files to replay -- one per split WAL). - * @see BoundedRecoveredEditsOutputSink which is like this class but imposes upper bound on - * the number of writers active at one time (makes for better throughput). + * Class that manages the output streams from the log splitting process. Every region only has one + * recovered edits file PER split WAL (if we split multiple WALs during a log-splitting session, on + * open, a Region may have multiple recovered.edits files to replay -- one per split WAL). + * @see BoundedRecoveredEditsOutputSink which is like this class but imposes upper bound on the + * number of writers active at one time (makes for better throughput). */ @InterfaceAudience.Private class RecoveredEditsOutputSink extends AbstractRecoveredEditsOutputSink { @@ -55,16 +55,14 @@ public RecoveredEditsOutputSink(WALSplitter walSplitter, } @Override - public void append(EntryBuffers.RegionEntryBuffer buffer) - throws IOException { + public void append(EntryBuffers.RegionEntryBuffer buffer) throws IOException { List entries = buffer.entryBuffer; if (entries.isEmpty()) { LOG.warn("got an empty buffer, skipping"); return; } - RecoveredEditsWriter writer = - getRecoveredEditsWriter(buffer.tableName, buffer.encodedRegionName, - entries.get(0).getKey().getSequenceId()); + RecoveredEditsWriter writer = getRecoveredEditsWriter(buffer.tableName, + buffer.encodedRegionName, entries.get(0).getKey().getSequenceId()); if (writer != null) { writer.writeRegionEntries(entries); } @@ -103,7 +101,6 @@ public List close() throws IOException { /** * Close all of the output streams. - * * @return true when there is no error. */ private boolean closeWriters() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 20d043b6ae26..800e49bd299a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,13 +27,11 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.Lock; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; -// imports for classes still in regionserver.wal import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.KeyLocker; @@ -43,18 +40,15 @@ import org.slf4j.LoggerFactory; /** - * A WAL Provider that returns a WAL per group of regions. - * - * This provider follows the decorator pattern and mainly holds the logic for WAL grouping. - * WAL creation/roll/close is delegated to {@link #DELEGATE_PROVIDER} - * - * Region grouping is handled via {@link RegionGroupingStrategy} and can be configured via the - * property "hbase.wal.regiongrouping.strategy". Current strategy choices are + * A WAL Provider that returns a WAL per group of regions. This provider follows the decorator + * pattern and mainly holds the logic for WAL grouping. WAL creation/roll/close is delegated to + * {@link #DELEGATE_PROVIDER} Region grouping is handled via {@link RegionGroupingStrategy} and can + * be configured via the property "hbase.wal.regiongrouping.strategy". Current strategy choices are *

        - *
      • defaultStrategy : Whatever strategy this version of HBase picks. currently - * "bounded".
      • - *
      • identity : each region belongs to its own group.
      • - *
      • bounded : bounded number of groups and region evenly assigned to each group.
      • + *
      • defaultStrategy : Whatever strategy this version of HBase picks. currently + * "bounded".
      • + *
      • identity : each region belongs to its own group.
      • + *
      • bounded : bounded number of groups and region evenly assigned to each group.
      • *
      * Optionally, a FQCN to a custom implementation may be given. */ @@ -72,6 +66,7 @@ public static interface RegionGroupingStrategy { * Given an identifier and a namespace, pick a group. */ String group(final byte[] identifier, byte[] namespace); + void init(Configuration config, String providerId); } @@ -79,20 +74,19 @@ public static interface RegionGroupingStrategy { * Maps between configuration names for strategies and implementation classes. */ static enum Strategies { - defaultStrategy(BoundedGroupingStrategy.class), - identity(IdentityGroupingStrategy.class), - bounded(BoundedGroupingStrategy.class), - namespace(NamespaceGroupingStrategy.class); + defaultStrategy(BoundedGroupingStrategy.class), identity(IdentityGroupingStrategy.class), + bounded(BoundedGroupingStrategy.class), namespace(NamespaceGroupingStrategy.class); final Class clazz; + Strategies(Class clazz) { this.clazz = clazz; } } /** - * instantiate a strategy from a config property. - * requires conf to have already been set (as well as anything the provider might need to read). + * instantiate a strategy from a config property. requires conf to have already been set (as well + * as anything the provider might need to read). */ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, final String defaultValue) throws IOException { @@ -111,8 +105,8 @@ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, result.init(conf, providerId); return result; } catch (Exception e) { - LOG.error("couldn't set up region grouping strategy, check config key " + - REGION_GROUPING_STRATEGY); + LOG.error( + "couldn't set up region grouping strategy, check config key " + REGION_GROUPING_STRATEGY); LOG.debug("Exception details for failure to load region grouping strategy.", e); throw new IOException("couldn't set up region grouping strategy", e); } @@ -123,8 +117,8 @@ RegionGroupingStrategy getStrategy(final Configuration conf, final String key, /** delegate provider for WAL creation/roll/close, but not support multiwal */ public static final String DELEGATE_PROVIDER = "hbase.wal.regiongrouping.delegate.provider"; - public static final String DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider - .name(); + public static final String DEFAULT_DELEGATE_PROVIDER = + WALFactory.Providers.defaultProvider.name(); private static final String META_WAL_GROUP_NAME = "meta"; @@ -228,7 +222,7 @@ public WAL getWAL(RegionInfo region) throws IOException { public void shutdown() throws IOException { // save the last exception and rethrow IOException failure = null; - for (WALProvider provider: cached.values()) { + for (WALProvider provider : cached.values()) { try { provider.shutdown(); } catch (IOException e) { @@ -266,7 +260,9 @@ public void close() throws IOException { static class IdentityGroupingStrategy implements RegionGroupingStrategy { @Override - public void init(Configuration config, String providerId) {} + public void init(Configuration config, String providerId) { + } + @Override public String group(final byte[] identifier, final byte[] namespace) { return Bytes.toString(identifier); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java index f57ec31c531a..b703badfd809 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -76,7 +76,7 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen private final WALProvider provider; private SyncReplicationPeerInfoProvider peerInfoProvider = - new DefaultSyncReplicationPeerInfoProvider(); + new DefaultSyncReplicationPeerInfoProvider(); private WALFactory factory; @@ -94,7 +94,7 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen // the peer yet. When getting WAL from this map the caller should know that it should not use // DualAsyncFSWAL any more. private final ConcurrentMap> peerId2WAL = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); private final KeyLocker createLock = new KeyLocker<>(); @@ -116,7 +116,7 @@ public void init(WALFactory factory, Configuration conf, String providerId, Abor this.conf = conf; this.factory = factory; Pair> eventLoopGroupAndChannelClass = - NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf); + NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf); eventLoopGroup = eventLoopGroupAndChannelClass.getFirst(); channelClass = eventLoopGroupAndChannelClass.getSecond(); } @@ -131,7 +131,7 @@ private String getLogPrefix(String peerId) { private DualAsyncFSWAL createWAL(String peerId, String remoteWALDir) throws IOException { Class clazz = - conf.getClass(DUAL_WAL_IMPL, DualAsyncFSWAL.class, DualAsyncFSWAL.class); + conf.getClass(DUAL_WAL_IMPL, DualAsyncFSWAL.class, DualAsyncFSWAL.class); try { Constructor constructor = null; for (Constructor c : clazz.getDeclaredConstructors()) { @@ -144,8 +144,7 @@ private DualAsyncFSWAL createWAL(String peerId, String remoteWALDir) throws IOEx throw new IllegalArgumentException("No valid constructor provided for class " + clazz); } constructor.setAccessible(true); - return (DualAsyncFSWAL) constructor.newInstance( - CommonFSUtils.getWALFileSystem(conf), + return (DualAsyncFSWAL) constructor.newInstance(CommonFSUtils.getWALFileSystem(conf), ReplicationUtils.getRemoteWALFileSystem(conf, remoteWALDir), CommonFSUtils.getWALRootDir(conf), ReplicationUtils.getPeerRemoteWALDir(remoteWALDir, peerId), @@ -263,7 +262,7 @@ public long getNumLogFiles() { @Override public long getLogFileSize() { return peerId2WAL.values().stream().filter(Optional::isPresent).map(Optional::get) - .mapToLong(DualAsyncFSWAL::getLogFileSize).sum() + provider.getLogFileSize(); + .mapToLong(DualAsyncFSWAL::getLogFileSize).sum() + provider.getLogFileSize(); } private void safeClose(WAL wal) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 2a434a73b672..13d4a1e7e269 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -33,10 +33,9 @@ /** * A Write Ahead Log (WAL) provides service for reading, writing waledits. This interface provides - * APIs for WAL users (such as RegionServer) to use the WAL (do append, sync, etc). - * - * Note that some internals, such as log rolling and performance evaluation tools, will use - * WAL.equals to determine if they have already seen a given WAL. + * APIs for WAL users (such as RegionServer) to use the WAL (do append, sync, etc). Note that some + * internals, such as log rolling and performance evaluation tools, will use WAL.equals to determine + * if they have already seen a given WAL. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -54,43 +53,37 @@ public interface WAL extends Closeable, WALFileLengthProvider { /** * Roll the log writer. That is, start writing log messages to a new file. - * *

      - * The implementation is synchronized in order to make sure there's one rollWriter - * running at any given time. - * - * @return If lots of logs, flush the stores of returned regions so next time through we - * can clean logs. Returns null if nothing to flush. Names are actual - * region names as returned by {@link RegionInfo#getEncodedName()} + * The implementation is synchronized in order to make sure there's one rollWriter running at any + * given time. + * @return If lots of logs, flush the stores of returned regions so next time through we can clean + * logs. Returns null if nothing to flush. Names are actual region names as returned by + * {@link RegionInfo#getEncodedName()} */ Map> rollWriter() throws FailedLogCloseException, IOException; /** * Roll the log writer. That is, start writing log messages to a new file. - * *

      - * The implementation is synchronized in order to make sure there's one rollWriter - * running at any given time. - * - * @param force - * If true, force creation of a new writer even if no entries have - * been written to the current writer - * @return If lots of logs, flush the stores of returned regions so next time through we - * can clean logs. Returns null if nothing to flush. Names are actual - * region names as returned by {@link RegionInfo#getEncodedName()} + * The implementation is synchronized in order to make sure there's one rollWriter running at any + * given time. + * @param force If true, force creation of a new writer even if no entries have been written to + * the current writer + * @return If lots of logs, flush the stores of returned regions so next time through we can clean + * logs. Returns null if nothing to flush. Names are actual region names as returned by + * {@link RegionInfo#getEncodedName()} */ Map> rollWriter(boolean force) throws IOException; /** - * Stop accepting new writes. If we have unsynced writes still in buffer, sync them. - * Extant edits are left in place in backing storage to be replayed later. + * Stop accepting new writes. If we have unsynced writes still in buffer, sync them. Extant edits + * are left in place in backing storage to be replayed later. */ void shutdown() throws IOException; /** - * Caller no longer needs any edits from this WAL. Implementers are free to reclaim - * underlying resources after this call; i.e. filesystem based WALs can archive or - * delete files. + * Caller no longer needs any edits from this WAL. Implementers are free to reclaim underlying + * resources after this call; i.e. filesystem based WALs can archive or delete files. */ @Override void close() throws IOException; @@ -113,11 +106,11 @@ public interface WAL extends Closeable, WALFileLengthProvider { long appendData(RegionInfo info, WALKeyImpl key, WALEdit edits) throws IOException; /** - * Append an operational 'meta' event marker edit to the WAL. A marker meta edit could - * be a FlushDescriptor, a compaction marker, or a region event marker; e.g. region open - * or region close. The difference between a 'marker' append and a 'data' append as in - * {@link #appendData(RegionInfo, WALKeyImpl, WALEdit)}is that a marker will not have - * transitioned through the memstore. + * Append an operational 'meta' event marker edit to the WAL. A marker meta edit could be a + * FlushDescriptor, a compaction marker, or a region event marker; e.g. region open or region + * close. The difference between a 'marker' append and a 'data' append as in + * {@link #appendData(RegionInfo, WALKeyImpl, WALEdit)}is that a marker will not have transitioned + * through the memstore. *

      * The WAL is not flushed/sync'd after this transaction completes BUT on return this edit must * have its region edit/sequence id assigned else it messes up our unification of mvcc and @@ -133,9 +126,8 @@ public interface WAL extends Closeable, WALFileLengthProvider { long appendMarker(RegionInfo info, WALKeyImpl key, WALEdit edits) throws IOException; /** - * updates the seuence number of a specific store. - * depending on the flag: replaces current seq number if the given seq id is bigger, - * or even if it is lower than existing one + * updates the seuence number of a specific store. depending on the flag: replaces current seq + * number if the given seq id is bigger, or even if it is lower than existing one */ void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, boolean onlyIfGreater); @@ -169,19 +161,19 @@ default void sync(long txid, boolean forceSync) throws IOException { } /** - * WAL keeps track of the sequence numbers that are as yet not flushed im memstores - * in order to be able to do accounting to figure which WALs can be let go. This method tells WAL - * that some region is about to flush. The flush can be the whole region or for a column family - * of the region only. - * - *

      Currently, it is expected that the update lock is held for the region; i.e. no - * concurrent appends while we set up cache flush. + * WAL keeps track of the sequence numbers that are as yet not flushed im memstores in order to be + * able to do accounting to figure which WALs can be let go. This method tells WAL that some + * region is about to flush. The flush can be the whole region or for a column family of the + * region only. + *

      + * Currently, it is expected that the update lock is held for the region; i.e. no concurrent + * appends while we set up cache flush. * @param families Families to flush. May be a subset of all families in the region. - * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if - * we are flushing a subset of all families but there are no edits in those families not - * being flushed; in other words, this is effectively same as a flush of all of the region - * though we were passed a subset of regions. Otherwise, it returns the sequence id of the - * oldest/lowest outstanding edit. + * @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if we are + * flushing a subset of all families but there are no edits in those families not being + * flushed; in other words, this is effectively same as a flush of all of the region + * though we were passed a subset of regions. Otherwise, it returns the sequence id of the + * oldest/lowest outstanding edit. * @see #completeCacheFlush(byte[], long) * @see #abortCacheFlush(byte[]) */ @@ -200,9 +192,9 @@ default void sync(long txid, boolean forceSync) throws IOException { void completeCacheFlush(final byte[] encodedRegionName, long maxFlushedSeqId); /** - * Abort a cache flush. Call if the flush fails. Note that the only recovery - * for an aborted flush currently is a restart of the regionserver so the - * snapshot content dropped by the failure gets restored to the memstore. + * Abort a cache flush. Call if the flush fails. Note that the only recovery for an aborted flush + * currently is a restart of the regionserver so the snapshot content dropped by the failure gets + * restored to the memstore. * @param encodedRegionName Encoded region name. */ void abortCacheFlush(byte[] encodedRegionName); @@ -217,7 +209,7 @@ default void sync(long txid, boolean forceSync) throws IOException { * @param encodedRegionName The region to get the number for. * @return The earliest/lowest/oldest sequence id if present, HConstants.NO_SEQNUM if absent. * @deprecated Since version 1.2.0. Removing because not used and exposes subtle internal - * workings. Use {@link #getEarliestMemStoreSeqNum(byte[], byte[])} + * workings. Use {@link #getEarliestMemStoreSeqNum(byte[], byte[])} */ @Deprecated long getEarliestMemStoreSeqNum(byte[] encodedRegionName); @@ -231,23 +223,25 @@ default void sync(long txid, boolean forceSync) throws IOException { long getEarliestMemStoreSeqNum(byte[] encodedRegionName, byte[] familyName); /** - * Human readable identifying information about the state of this WAL. - * Implementors are encouraged to include information appropriate for debugging. - * Consumers are advised not to rely on the details of the returned String; it does - * not have a defined structure. + * Human readable identifying information about the state of this WAL. Implementors are encouraged + * to include information appropriate for debugging. Consumers are advised not to rely on the + * details of the returned String; it does not have a defined structure. */ @Override String toString(); /** - * When outside clients need to consume persisted WALs, they rely on a provided - * Reader. + * When outside clients need to consume persisted WALs, they rely on a provided Reader. */ interface Reader extends Closeable { Entry next() throws IOException; + Entry next(Entry reuse) throws IOException; + void seek(long pos) throws IOException; + long getPosition() throws IOException; + void reset() throws IOException; } @@ -264,7 +258,6 @@ public Entry() { /** * Constructor for both params - * * @param edit log's edit * @param key log's key */ @@ -275,7 +268,6 @@ public Entry(WALKeyImpl key, WALEdit edit) { /** * Gets the edit - * * @return edit */ public WALEdit getEdit() { @@ -284,7 +276,6 @@ public WALEdit getEdit() { /** * Gets the key - * * @return key */ public WALKeyImpl getKey() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java index 61f36fab74af..e5f2e82f4a1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java @@ -45,30 +45,32 @@ * Used in HBase's transaction log (WAL) to represent a collection of edits (Cell/KeyValue objects) * that came in as a single transaction. All the edits for a given transaction are written out as a * single record, in PB format, followed (optionally) by Cells written via the WALCellEncoder. - *

      This class is LimitedPrivate for CPs to read-only. The {@link #add} methods are - * classified as private methods, not for use by CPs.

      - * - *

      A particular WALEdit 'type' is the 'meta' type used to mark key operational - * events in the WAL such as compaction, flush, or region open. These meta types do not traverse - * hbase memstores. They are edits made by the hbase system rather than edit data submitted by - * clients. They only show in the WAL. These 'Meta' types have not been formally specified - * (or made into an explicit class type). They evolved organically. HBASE-8457 suggests codifying - * a WALEdit 'type' by adding a type field to WALEdit that gets serialized into the WAL. TODO. - * Would have to work on the consumption-side. Reading WALs on replay we seem to consume - * a Cell-at-a-time rather than by WALEdit. We are already in the below going out of our - * way to figure particular types -- e.g. if a compaction, replay, or close meta Marker -- during - * normal processing so would make sense to do this. Current system is an awkward marking of Cell - * columnfamily as {@link #METAFAMILY} and then setting qualifier based off meta edit type. For - * replay-time where we read Cell-at-a-time, there are utility methods below for figuring - * meta type. See also - * {@link #createBulkLoadEvent(RegionInfo, WALProtos.BulkLoadDescriptor)}, etc., for where we - * create meta WALEdit instances.

      - * - *

      WALEdit will accumulate a Set of all column family names referenced by the Cells - * {@link #add(Cell)}'d. This is an optimization. Usually when loading a WALEdit, we have the - * column family name to-hand.. just shove it into the WALEdit if available. Doing this, we can - * save on a parse of each Cell to figure column family down the line when we go to add the - * WALEdit to the WAL file. See the hand-off in FSWALEntry Constructor. + *

      + * This class is LimitedPrivate for CPs to read-only. The {@link #add} methods are classified as + * private methods, not for use by CPs. + *

      + *

      + * A particular WALEdit 'type' is the 'meta' type used to mark key operational events in the WAL + * such as compaction, flush, or region open. These meta types do not traverse hbase memstores. They + * are edits made by the hbase system rather than edit data submitted by clients. They only show in + * the WAL. These 'Meta' types have not been formally specified (or made into an explicit class + * type). They evolved organically. HBASE-8457 suggests codifying a WALEdit 'type' by adding a type + * field to WALEdit that gets serialized into the WAL. TODO. Would have to work on the + * consumption-side. Reading WALs on replay we seem to consume a Cell-at-a-time rather than by + * WALEdit. We are already in the below going out of our way to figure particular types -- e.g. if a + * compaction, replay, or close meta Marker -- during normal processing so would make sense to do + * this. Current system is an awkward marking of Cell columnfamily as {@link #METAFAMILY} and then + * setting qualifier based off meta edit type. For replay-time where we read Cell-at-a-time, there + * are utility methods below for figuring meta type. See also + * {@link #createBulkLoadEvent(RegionInfo, WALProtos.BulkLoadDescriptor)}, etc., for where we create + * meta WALEdit instances. + *

      + *

      + * WALEdit will accumulate a Set of all column family names referenced by the Cells + * {@link #add(Cell)}'d. This is an optimization. Usually when loading a WALEdit, we have the column + * family name to-hand.. just shove it into the WALEdit if available. Doing this, we can save on a + * parse of each Cell to figure column family down the line when we go to add the WALEdit to the WAL + * file. See the hand-off in FSWALEntry Constructor. * @see WALKey */ // TODO: Do not expose this class to Coprocessors. It has set methods. A CP might meddle. @@ -78,17 +80,17 @@ public class WALEdit implements HeapSize { // Below defines are for writing WALEdit 'meta' Cells.. // TODO: Get rid of this system of special 'meta' Cells. See HBASE-8457. It suggests // adding a type to WALEdit itself for use denoting meta Edits and their types. - public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY"); + public static final byte[] METAFAMILY = Bytes.toBytes("METAFAMILY"); /** * @deprecated Since 2.3.0. Not used. */ @Deprecated - public static final byte [] METAROW = Bytes.toBytes("METAROW"); + public static final byte[] METAROW = Bytes.toBytes("METAROW"); /** * @deprecated Since 2.3.0. Make it protected, internal-use only. Use - * {@link #isCompactionMarker(Cell)} + * {@link #isCompactionMarker(Cell)} */ @Deprecated @InterfaceAudience.Private @@ -99,37 +101,36 @@ public class WALEdit implements HeapSize { */ @Deprecated @InterfaceAudience.Private - public static final byte [] FLUSH = Bytes.toBytes("HBASE::FLUSH"); + public static final byte[] FLUSH = Bytes.toBytes("HBASE::FLUSH"); /** - * Qualifier for region event meta 'Marker' WALEdits start with the - * {@link #REGION_EVENT_PREFIX} prefix ('HBASE::REGION_EVENT::'). After the prefix, - * we note the type of the event which we get from the RegionEventDescriptor protobuf - * instance type (A RegionEventDescriptor protobuf instance is written as the meta Marker - * Cell value). Adding a type suffix means we do not have to deserialize the protobuf to - * figure out what type of event this is.. .just read the qualifier suffix. For example, - * a close region event descriptor will have a qualifier of HBASE::REGION_EVENT::REGION_CLOSE. - * See WAL.proto and the EventType in RegionEventDescriptor protos for all possible - * event types. + * Qualifier for region event meta 'Marker' WALEdits start with the {@link #REGION_EVENT_PREFIX} + * prefix ('HBASE::REGION_EVENT::'). After the prefix, we note the type of the event which we get + * from the RegionEventDescriptor protobuf instance type (A RegionEventDescriptor protobuf + * instance is written as the meta Marker Cell value). Adding a type suffix means we do not have + * to deserialize the protobuf to figure out what type of event this is.. .just read the qualifier + * suffix. For example, a close region event descriptor will have a qualifier of + * HBASE::REGION_EVENT::REGION_CLOSE. See WAL.proto and the EventType in RegionEventDescriptor + * protos for all possible event types. */ private static final String REGION_EVENT_STR = "HBASE::REGION_EVENT"; private static final String REGION_EVENT_PREFIX_STR = REGION_EVENT_STR + "::"; - private static final byte [] REGION_EVENT_PREFIX = Bytes.toBytes(REGION_EVENT_PREFIX_STR); + private static final byte[] REGION_EVENT_PREFIX = Bytes.toBytes(REGION_EVENT_PREFIX_STR); /** * @deprecated Since 2.3.0. Remove. Not for external use. Not used. */ @Deprecated - public static final byte [] REGION_EVENT = Bytes.toBytes(REGION_EVENT_STR); + public static final byte[] REGION_EVENT = Bytes.toBytes(REGION_EVENT_STR); /** * We use this define figuring if we are carrying a close event. */ - private static final byte [] REGION_EVENT_CLOSE = + private static final byte[] REGION_EVENT_CLOSE = createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType.REGION_CLOSE); @InterfaceAudience.Private - public static final byte [] BULK_LOAD = Bytes.toBytes("HBASE::BULK_LOAD"); + public static final byte[] BULK_LOAD = Bytes.toBytes("HBASE::BULK_LOAD"); private final transient boolean replay; @@ -137,10 +138,10 @@ public class WALEdit implements HeapSize { /** * All the Cell families in cells. Updated by {@link #add(Cell)} and - * {@link #add(Map)}. This Set is passed to the FSWALEntry so it does not have - * to recalculate the Set of families in a transaction; makes for a bunch of CPU savings. + * {@link #add(Map)}. This Set is passed to the FSWALEntry so it does not have to recalculate the + * Set of families in a transaction; makes for a bunch of CPU savings. */ - private Set families = null; + private Set families = null; public WALEdit() { this(1, false); @@ -148,7 +149,7 @@ public WALEdit() { /** * @deprecated since 2.0.1 and will be removed in 4.0.0. Use {@link #WALEdit(int, boolean)} - * instead. + * instead. * @see #WALEdit(int, boolean) * @see HBASE-20781 */ @@ -159,7 +160,7 @@ public WALEdit(boolean replay) { /** * @deprecated since 2.0.1 and will be removed in 4.0.0. Use {@link #WALEdit(int, boolean)} - * instead. + * instead. * @see #WALEdit(int, boolean) * @see HBASE-20781 */ @@ -187,7 +188,7 @@ private Set getOrCreateFamilies() { * For use by FSWALEntry ONLY. An optimization. * @return All families in {@link #getCells()}; may be null. */ - public Set getFamilies() { + public Set getFamilies() { return this.families; } @@ -196,7 +197,7 @@ private Set getOrCreateFamilies() { * @deprecated Since 2.3.0. Do not expose. Make protected. */ @Deprecated - public static boolean isMetaEditFamily(final byte [] f) { + public static boolean isMetaEditFamily(final byte[] f) { return Bytes.equals(METAFAMILY, f); } @@ -208,8 +209,8 @@ public static boolean isMetaEditFamily(Cell cell) { } /** - * @return True if this is a meta edit; has one edit only and its columnfamily - * is {@link #METAFAMILY}. + * @return True if this is a meta edit; has one edit only and its columnfamily is + * {@link #METAFAMILY}. */ public boolean isMetaEdit() { return this.families != null && this.families.size() == 1 && this.families.contains(METAFAMILY); @@ -224,7 +225,7 @@ public boolean isReplay() { } @InterfaceAudience.Private - public WALEdit add(Cell cell, byte [] family) { + public WALEdit add(Cell cell, byte[] family) { getOrCreateFamilies().add(family); return addCell(cell); } @@ -249,10 +250,8 @@ public ArrayList getCells() { } /** - * This is not thread safe. - * This will change the WALEdit and shouldn't be used unless you are sure that nothing - * else depends on the contents being immutable. - * + * This is not thread safe. This will change the WALEdit and shouldn't be used unless you are sure + * that nothing else depends on the contents being immutable. * @param cells the list of cells that this WALEdit now contains. */ @InterfaceAudience.Private @@ -288,7 +287,7 @@ public long heapSize() { public long estimatedSerializedSizeOf() { long ret = 0; - for (Cell cell: cells) { + for (Cell cell : cells) { ret += PrivateCellUtil.estimatedSerializedSizeOf(cell); } return ret; @@ -309,21 +308,22 @@ public String toString() { public static WALEdit createFlushWALEdit(RegionInfo hri, FlushDescriptor f) { KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, FLUSH, - EnvironmentEdgeManager.currentTime(), f.toByteArray()); + EnvironmentEdgeManager.currentTime(), f.toByteArray()); return new WALEdit().add(kv, METAFAMILY); } public static FlushDescriptor getFlushDescriptor(Cell cell) throws IOException { - return CellUtil.matchingColumn(cell, METAFAMILY, FLUSH)? - FlushDescriptor.parseFrom(CellUtil.cloneValue(cell)): null; + return CellUtil.matchingColumn(cell, METAFAMILY, FLUSH) + ? FlushDescriptor.parseFrom(CellUtil.cloneValue(cell)) + : null; } /** * @return A meta Marker WALEdit that has a single Cell whose value is the passed in - * regionEventDesc serialized and whose row is this region, - * columnfamily is {@link #METAFAMILY} and qualifier is - * {@link #REGION_EVENT_PREFIX} + {@link RegionEventDescriptor#getEventType()}; - * for example HBASE::REGION_EVENT::REGION_CLOSE. + * regionEventDesc serialized and whose row is this region, columnfamily is + * {@link #METAFAMILY} and qualifier is {@link #REGION_EVENT_PREFIX} + + * {@link RegionEventDescriptor#getEventType()}; for example + * HBASE::REGION_EVENT::REGION_CLOSE. */ public static WALEdit createRegionEventWALEdit(RegionInfo hri, RegionEventDescriptor regionEventDesc) { @@ -331,7 +331,7 @@ public static WALEdit createRegionEventWALEdit(RegionInfo hri, } @InterfaceAudience.Private - public static WALEdit createRegionEventWALEdit(byte [] rowForRegion, + public static WALEdit createRegionEventWALEdit(byte[] rowForRegion, RegionEventDescriptor regionEventDesc) { KeyValue kv = new KeyValue(rowForRegion, METAFAMILY, createRegionEventDescriptorQualifier(regionEventDesc.getEventType()), @@ -340,11 +340,11 @@ public static WALEdit createRegionEventWALEdit(byte [] rowForRegion, } /** - * @return Cell qualifier for the passed in RegionEventDescriptor Type; e.g. we'll - * return something like a byte array with HBASE::REGION_EVENT::REGION_OPEN in it. + * @return Cell qualifier for the passed in RegionEventDescriptor Type; e.g. we'll return + * something like a byte array with HBASE::REGION_EVENT::REGION_OPEN in it. */ @InterfaceAudience.Private - public static byte [] createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType t) { + public static byte[] createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType t) { return Bytes.toBytes(REGION_EVENT_PREFIX_STR + t.toString()); } @@ -353,28 +353,28 @@ public static WALEdit createRegionEventWALEdit(byte [] rowForRegion, * @return True if this is a Marker Edit and it is a RegionClose type. */ public boolean isRegionCloseMarker() { - return isMetaEdit() && PrivateCellUtil.matchingQualifier(this.cells.get(0), - REGION_EVENT_CLOSE, 0, REGION_EVENT_CLOSE.length); + return isMetaEdit() && PrivateCellUtil.matchingQualifier(this.cells.get(0), REGION_EVENT_CLOSE, + 0, REGION_EVENT_CLOSE.length); } /** - * @return Returns a RegionEventDescriptor made by deserializing the content of the - * passed in cell, IFF the cell is a RegionEventDescriptor - * type WALEdit. + * @return Returns a RegionEventDescriptor made by deserializing the content of the passed in + * cell, IFF the cell is a RegionEventDescriptor type WALEdit. */ public static RegionEventDescriptor getRegionEventDescriptor(Cell cell) throws IOException { - return CellUtil.matchingColumnFamilyAndQualifierPrefix(cell, METAFAMILY, REGION_EVENT_PREFIX)? - RegionEventDescriptor.parseFrom(CellUtil.cloneValue(cell)): null; + return CellUtil.matchingColumnFamilyAndQualifierPrefix(cell, METAFAMILY, REGION_EVENT_PREFIX) + ? RegionEventDescriptor.parseFrom(CellUtil.cloneValue(cell)) + : null; } /** * @return A Marker WALEdit that has c serialized as its value */ public static WALEdit createCompaction(final RegionInfo hri, final CompactionDescriptor c) { - byte [] pbbytes = c.toByteArray(); + byte[] pbbytes = c.toByteArray(); KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, COMPACTION, - EnvironmentEdgeManager.currentTime(), pbbytes); - return new WALEdit().add(kv, METAFAMILY); //replication scope null so this won't be replicated + EnvironmentEdgeManager.currentTime(), pbbytes); + return new WALEdit().add(kv, METAFAMILY); // replication scope null so this won't be replicated } public static byte[] getRowForRegion(RegionInfo hri) { @@ -382,7 +382,7 @@ public static byte[] getRowForRegion(RegionInfo hri) { if (startKey.length == 0) { // empty row key is not allowed in mutations because it is both the start key and the end key // we return the smallest byte[] that is bigger (in lex comparison) than byte[0]. - return new byte[] {0}; + return new byte[] { 0 }; } return startKey; } @@ -393,12 +393,11 @@ public static byte[] getRowForRegion(RegionInfo hri) { * @return deserialized CompactionDescriptor or null. */ public static CompactionDescriptor getCompaction(Cell kv) throws IOException { - return isCompactionMarker(kv)? CompactionDescriptor.parseFrom(CellUtil.cloneValue(kv)): null; + return isCompactionMarker(kv) ? CompactionDescriptor.parseFrom(CellUtil.cloneValue(kv)) : null; } /** * Returns true if the given cell is a serialized {@link CompactionDescriptor} - * * @see #getCompaction(Cell) */ public static boolean isCompactionMarker(Cell cell) { @@ -407,8 +406,7 @@ public static boolean isCompactionMarker(Cell cell) { /** * Create a bulk loader WALEdit - * - * @param hri The RegionInfo for the region in which we are bulk loading + * @param hri The RegionInfo for the region in which we are bulk loading * @param bulkLoadDescriptor The descriptor for the Bulk Loader * @return The WALEdit for the BulkLoad */ @@ -425,21 +423,20 @@ public static WALEdit createBulkLoadEvent(RegionInfo hri, * @return deserialized BulkLoadDescriptor or null. */ public static WALProtos.BulkLoadDescriptor getBulkLoadDescriptor(Cell cell) throws IOException { - return CellUtil.matchingColumn(cell, METAFAMILY, BULK_LOAD)? - WALProtos.BulkLoadDescriptor.parseFrom(CellUtil.cloneValue(cell)): null; + return CellUtil.matchingColumn(cell, METAFAMILY, BULK_LOAD) + ? WALProtos.BulkLoadDescriptor.parseFrom(CellUtil.cloneValue(cell)) + : null; } /** - * Append the given map of family->edits to a WALEdit data structure. - * This does not write to the WAL itself. - * Note that as an optimization, we will stamp the Set of column families into the WALEdit - * to save on our having to calculate column families subsequently down in the actual WAL + * Append the given map of family->edits to a WALEdit data structure. This does not write to the + * WAL itself. Note that as an optimization, we will stamp the Set of column families into the + * WALEdit to save on our having to calculate column families subsequently down in the actual WAL * writing. - * * @param familyMap map of family->edits */ public void add(Map> familyMap) { - for (Map.Entry> e: familyMap.entrySet()) { + for (Map.Entry> e : familyMap.entrySet()) { // 'foreach' loop NOT used. See HBASE-12023 "...creates too many iterator objects." int listSize = e.getValue().size(); // Add all Cells first and then at end, add the family rather than call {@link #add(Cell)} @@ -451,7 +448,7 @@ public void add(Map> familyMap) { } } - private void addFamily(byte [] family) { + private void addFamily(byte[] family) { getOrCreateFamilies().add(family); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index cff3154626be..c40e4e4c5e6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -39,23 +39,19 @@ import org.slf4j.LoggerFactory; /** - * Entry point for users of the Write Ahead Log. - * Acts as the shim between internal use and the particular WALProvider we use to handle wal - * requests. - * - * Configure which provider gets used with the configuration setting "hbase.wal.provider". Available - * implementations: + * Entry point for users of the Write Ahead Log. Acts as the shim between internal use and the + * particular WALProvider we use to handle wal requests. Configure which provider gets used with the + * configuration setting "hbase.wal.provider". Available implementations: *

        - *
      • defaultProvider : whatever provider is standard for the hbase version. Currently - * "asyncfs"
      • - *
      • asyncfs : a provider that will run on top of an implementation of the Hadoop - * FileSystem interface via an asynchronous client.
      • - *
      • filesystem : a provider that will run on top of an implementation of the Hadoop - * FileSystem interface via HDFS's synchronous DFSClient.
      • - *
      • multiwal : a provider that will use multiple "filesystem" wal instances per region - * server.
      • + *
      • defaultProvider : whatever provider is standard for the hbase version. Currently + * "asyncfs"
      • + *
      • asyncfs : a provider that will run on top of an implementation of the Hadoop + * FileSystem interface via an asynchronous client.
      • + *
      • filesystem : a provider that will run on top of an implementation of the Hadoop + * FileSystem interface via HDFS's synchronous DFSClient.
      • + *
      • multiwal : a provider that will use multiple "filesystem" wal instances per region + * server.
      • *
      - * * Alternatively, you may provide a custom implementation of {@link WALProvider} by class name. */ @InterfaceAudience.Private @@ -67,12 +63,11 @@ public class WALFactory { * Maps between configuration names for providers and implementation classes. */ enum Providers { - defaultProvider(AsyncFSWALProvider.class), - filesystem(FSHLogProvider.class), - multiwal(RegionGroupingProvider.class), - asyncfs(AsyncFSWALProvider.class); + defaultProvider(AsyncFSWALProvider.class), filesystem(FSHLogProvider.class), + multiwal(RegionGroupingProvider.class), asyncfs(AsyncFSWALProvider.class); final Class clazz; + Providers(Class clazz) { this.clazz = clazz; } @@ -220,9 +215,8 @@ public WALFactory(Configuration conf, String factoryId, Abortable abortable, } /** - * Shutdown all WALs and clean up any underlying storage. - * Use only when you will not need to replay and edits that have gone to any wals from this - * factory. + * Shutdown all WALs and clean up any underlying storage. Use only when you will not need to + * replay and edits that have gone to any wals from this factory. */ public void close() throws IOException { final WALProvider metaProvider = this.metaProvider.get(); @@ -237,9 +231,9 @@ public void close() throws IOException { } /** - * Tell the underlying WAL providers to shut down, but do not clean up underlying storage. - * If you are not ending cleanly and will need to replay edits from this factory's wals, - * use this method if you can as it will try to leave things as tidy as possible. + * Tell the underlying WAL providers to shut down, but do not clean up underlying storage. If you + * are not ending cleanly and will need to replay edits from this factory's wals, use this method + * if you can as it will try to leave things as tidy as possible. */ public void shutdown() throws IOException { IOException exception = null; @@ -247,7 +241,7 @@ public void shutdown() throws IOException { if (null != metaProvider) { try { metaProvider.shutdown(); - } catch(IOException ioe) { + } catch (IOException ioe) { exception = ioe; } } @@ -279,8 +273,8 @@ public WALProvider getMetaProvider() throws IOException { } catch (Throwable t) { // the WAL provider should be an enum. Proceed } - } - if (clz == null){ + } + if (clz == null) { clz = getProviderClass(META_WAL_PROVIDER, conf.get(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); } provider = createProvider(clz); @@ -300,8 +294,8 @@ public WALProvider getMetaProvider() throws IOException { */ public WAL getWAL(RegionInfo region) throws IOException { // Use different WAL for hbase:meta. Instantiates the meta WALProvider if not already up. - if (region != null && region.isMetaRegion() && - region.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { + if (region != null && region.isMetaRegion() + && region.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { return getMetaProvider().getWAL(region); } else { return provider.getWAL(region); @@ -309,17 +303,17 @@ public WAL getWAL(RegionInfo region) throws IOException { } public Reader createReader(final FileSystem fs, final Path path) throws IOException { - return createReader(fs, path, (CancelableProgressable)null); + return createReader(fs, path, (CancelableProgressable) null); } /** - * Create a reader for the WAL. If you are reading from a file that's being written to and need - * to reopen it multiple times, use {@link WAL.Reader#reset()} instead of this method - * then just seek back to the last known good position. - * @return A WAL reader. Close when done with it. + * Create a reader for the WAL. If you are reading from a file that's being written to and need to + * reopen it multiple times, use {@link WAL.Reader#reset()} instead of this method then just seek + * back to the last known good position. + * @return A WAL reader. Close when done with it. */ - public Reader createReader(final FileSystem fs, final Path path, - CancelableProgressable reporter) throws IOException { + public Reader createReader(final FileSystem fs, final Path path, CancelableProgressable reporter) + throws IOException { return createReader(fs, path, reporter, true); } @@ -355,10 +349,9 @@ public Reader createReader(final FileSystem fs, final Path path, CancelableProgr // Only inspect the Exception to consider retry when it's an IOException if (e instanceof IOException) { String msg = e.getMessage(); - if (msg != null - && (msg.contains("Cannot obtain block length") - || msg.contains("Could not obtain the last block") || msg - .matches("Blocklist for [^ ]* has changed.*"))) { + if (msg != null && (msg.contains("Cannot obtain block length") + || msg.contains("Could not obtain the last block") + || msg.matches("Blocklist for [^ ]* has changed.*"))) { if (++nbAttempt == 1) { LOG.warn("Lease should have recovered. This is not expected. Will retry", e); } @@ -367,7 +360,8 @@ public Reader createReader(final FileSystem fs, final Path path, CancelableProgr } if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTime()) { LOG.error("Can't open after " + nbAttempt + " attempts and " - + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for " + path); + + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for " + + path); } else { try { Thread.sleep(nbAttempt < 3 ? 500 : 1000); @@ -394,8 +388,7 @@ public Reader createReader(final FileSystem fs, final Path path, CancelableProgr } /** - * Create a writer for the WAL. - * Uses defaults. + * Create a writer for the WAL. Uses defaults. *

      * Should be package-private. public only for tests and * {@link org.apache.hadoop.hbase.regionserver.wal.Compressor} @@ -406,8 +399,7 @@ public Writer createWALWriter(final FileSystem fs, final Path path) throws IOExc } /** - * Should be package-private, visible for recovery testing. - * Uses defaults. + * Should be package-private, visible for recovery testing. Uses defaults. * @return an overwritable writer for recovered edits. caller should close. */ public Writer createRecoveredEditsWriter(final FileSystem fs, final Path path) @@ -421,7 +413,7 @@ public Writer createRecoveredEditsWriter(final FileSystem fs, final Path path) // For now, first Configuration object wins. Practically this just impacts the reader/writer class private static final AtomicReference singleton = new AtomicReference<>(); private static final String SINGLETON_ID = WALFactory.class.getName(); - + // Public only for FSHLog public static WALFactory getInstance(Configuration configuration) { WALFactory factory = singleton.get(); @@ -443,8 +435,8 @@ public static WALFactory getInstance(Configuration configuration) { } /** - * Create a reader for the given path, accept custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. + * Create a reader for the given path, accept custom reader classes from conf. If you already have + * a WALFactory, you should favor the instance method. * @return a WAL Reader, caller must close. */ public static Reader createReader(final FileSystem fs, final Path path, @@ -453,8 +445,8 @@ public static Reader createReader(final FileSystem fs, final Path path, } /** - * Create a reader for the given path, accept custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. + * Create a reader for the given path, accept custom reader classes from conf. If you already have + * a WALFactory, you should favor the instance method. * @return a WAL Reader, caller must close. */ static Reader createReader(final FileSystem fs, final Path path, @@ -463,9 +455,9 @@ static Reader createReader(final FileSystem fs, final Path path, } /** - * Create a reader for the given path, ignore custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. - * only public pending move of {@link org.apache.hadoop.hbase.regionserver.wal.Compressor} + * Create a reader for the given path, ignore custom reader classes from conf. If you already have + * a WALFactory, you should favor the instance method. only public pending move of + * {@link org.apache.hadoop.hbase.regionserver.wal.Compressor} * @return a WAL Reader, caller must close. */ public static Reader createReaderIgnoreCustomClass(final FileSystem fs, final Path path, @@ -474,24 +466,20 @@ public static Reader createReaderIgnoreCustomClass(final FileSystem fs, final Pa } /** - * If you already have a WALFactory, you should favor the instance method. - * Uses defaults. + * If you already have a WALFactory, you should favor the instance method. Uses defaults. * @return a Writer that will overwrite files. Caller must close. */ static Writer createRecoveredEditsWriter(final FileSystem fs, final Path path, - final Configuration configuration) - throws IOException { + final Configuration configuration) throws IOException { return FSHLogProvider.createWriter(configuration, fs, path, true); } /** - * If you already have a WALFactory, you should favor the instance method. - * Uses defaults. + * If you already have a WALFactory, you should favor the instance method. Uses defaults. * @return a writer that won't overwrite files. Caller must close. */ public static Writer createWALWriter(final FileSystem fs, final Path path, - final Configuration configuration) - throws IOException { + final Configuration configuration) throws IOException { return FSHLogProvider.createWriter(configuration, fs, path, false); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java index fdbacbda2779..5d8d53f5207f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java @@ -17,26 +17,24 @@ */ package org.apache.hadoop.hbase.wal; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.regionserver.SequenceId; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.yetus.audience.InterfaceAudience; - import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; - +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.SequenceId; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Key for WAL Entry. */ -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.REPLICATION, - HBaseInterfaceAudience.COPROC}) +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.REPLICATION, + HBaseInterfaceAudience.COPROC }) public interface WALKey extends SequenceId, Comparable { /** * Unmodifiable empty list of UUIDs. @@ -92,26 +90,25 @@ default long getNonce() { */ void addExtendedAttribute(String attributeKey, byte[] attributeValue); - /** - * Return a named String value injected into the WALKey during processing, such as by a - * coprocessor - * @param attributeKey The key of a key / value pair - */ - default byte[] getExtendedAttribute(String attributeKey){ + /** + * Return a named String value injected into the WALKey during processing, such as by a + * coprocessor + * @param attributeKey The key of a key / value pair + */ + default byte[] getExtendedAttribute(String attributeKey) { return null; } - /** - * Returns a map of all extended attributes injected into this WAL key. - */ + /** + * Returns a map of all extended attributes injected into this WAL key. + */ default Map getExtendedAttributes() { return new HashMap<>(); } + /** - * Produces a string map for this key. Useful for programmatic use and - * manipulation of the data stored in an WALKeyImpl, for example, printing - * as JSON. - * + * Produces a string map for this key. Useful for programmatic use and manipulation of the data + * stored in an WALKeyImpl, for example, printing as JSON. * @return a Map containing data from this key */ default Map toStringMap() { @@ -120,8 +117,8 @@ default Map toStringMap() { stringMap.put("region", Bytes.toStringBinary(getEncodedRegionName())); stringMap.put("sequence", getSequenceId()); Map extendedAttributes = getExtendedAttributes(); - if (extendedAttributes != null){ - for (Map.Entry entry : extendedAttributes.entrySet()){ + if (extendedAttributes != null) { + for (Map.Entry entry : extendedAttributes.entrySet()) { stringMap.put(entry.getKey(), Bytes.toStringBinary(entry.getValue())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java index 4c3fc4edc787..2b2779264c0c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,19 +44,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.ScopeType; /** - * Default implementation of Key for an Entry in the WAL. - * For internal use only though Replication needs to have access. - * - * The log intermingles edits to many tables and rows, so each log entry - * identifies the appropriate table and row. Within a table and row, they're - * also sorted. - * - *

      Some Transactional edits (START, COMMIT, ABORT) will not have an associated row. - * + * Default implementation of Key for an Entry in the WAL. For internal use only though Replication + * needs to have access. The log intermingles edits to many tables and rows, so each log entry + * identifies the appropriate table and row. Within a table and row, they're also sorted. + *

      + * Some Transactional edits (START, COMMIT, ABORT) will not have an associated row. */ // TODO: Key and WALEdit are never used separately, or in one-to-many relation, for practical -// purposes. They need to be merged into WALEntry. -@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.REPLICATION}) +// purposes. They need to be merged into WALEntry. +@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.REPLICATION }) public class WALKeyImpl implements WALKey { public static final WALKeyImpl EMPTY_WALKEYIMPL = new WALKeyImpl(); @@ -65,11 +61,10 @@ public MultiVersionConcurrencyControl getMvcc() { } /** - * Use it to complete mvcc transaction. This WALKeyImpl was part of - * (the transaction is started when you call append; see the comment on FSHLog#append). To - * complete call + * Use it to complete mvcc transaction. This WALKeyImpl was part of (the transaction is started + * when you call append; see the comment on FSHLog#append). To complete call + * {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} or * {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} - * or {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} * @return A WriteEntry gotten from local WAL subsystem. * @see #setWriteEntry(MultiVersionConcurrencyControl.WriteEntry) */ @@ -84,7 +79,7 @@ public void setWriteEntry(MultiVersionConcurrencyControl.WriteEntry writeEntry) this.sequenceId = writeEntry.getWriteNumber(); } - private byte [] encodedRegionName; + private byte[] encodedRegionName; private TableName tablename; @@ -119,13 +114,13 @@ public void setWriteEntry(MultiVersionConcurrencyControl.WriteEntry writeEntry) private Map extendedAttributes; public WALKeyImpl() { - init(null, null, 0L, HConstants.LATEST_TIMESTAMP, - new ArrayList<>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, null, null); + init(null, null, 0L, HConstants.LATEST_TIMESTAMP, new ArrayList<>(), HConstants.NO_NONCE, + HConstants.NO_NONCE, null, null, null); } public WALKeyImpl(final NavigableMap replicationScope) { - init(null, null, 0L, HConstants.LATEST_TIMESTAMP, - new ArrayList<>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, replicationScope, null); + init(null, null, 0L, HConstants.LATEST_TIMESTAMP, new ArrayList<>(), HConstants.NO_NONCE, + HConstants.NO_NONCE, null, replicationScope, null); } @InterfaceAudience.Private @@ -148,233 +143,176 @@ public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, lon // TODO: Fix being able to pass in sequenceid. public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now) { - init(encodedRegionName, - tablename, - NO_SEQUENCE_ID, - now, - EMPTY_UUIDS, - HConstants.NO_NONCE, - HConstants.NO_NONCE, - null, null, null); + init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, + HConstants.NO_NONCE, null, null, null); } // TODO: Fix being able to pass in sequenceid. public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, final NavigableMap replicationScope) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, - HConstants.NO_NONCE, null, replicationScope, null); + HConstants.NO_NONCE, null, replicationScope, null); } public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, - HConstants.NO_NONCE, mvcc, replicationScope, null); + HConstants.NO_NONCE, mvcc, replicationScope, null); } public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, - MultiVersionConcurrencyControl mvcc, - final NavigableMap replicationScope, - Map extendedAttributes) { + MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope, + Map extendedAttributes) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, - HConstants.NO_NONCE, mvcc, replicationScope, extendedAttributes); + HConstants.NO_NONCE, mvcc, replicationScope, extendedAttributes); } - public WALKeyImpl(final byte[] encodedRegionName, - final TableName tablename, - final long now, - MultiVersionConcurrencyControl mvcc) { - init(encodedRegionName, - tablename, - NO_SEQUENCE_ID, - now, - EMPTY_UUIDS, - HConstants.NO_NONCE, - HConstants.NO_NONCE, - mvcc, null, null); + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + MultiVersionConcurrencyControl mvcc) { + init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, EMPTY_UUIDS, HConstants.NO_NONCE, + HConstants.NO_NONCE, mvcc, null, null); } /** - * Copy constructor that takes in an existing WALKeyImpl plus some extended attributes. - * Intended for coprocessors to add annotations to a system-generated WALKey - * for persistence to the WAL. + * Copy constructor that takes in an existing WALKeyImpl plus some extended attributes. Intended + * for coprocessors to add annotations to a system-generated WALKey for persistence to the WAL. * @param key Key to be copied into this new key * @param extendedAttributes Extra attributes to copy into the new key */ - public WALKeyImpl(WALKeyImpl key, - Map extendedAttributes){ - init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), - key.getWriteTime(), key.getClusterIds(), key.getNonceGroup(), key.getNonce(), - key.getMvcc(), key.getReplicationScopes(), extendedAttributes); + public WALKeyImpl(WALKeyImpl key, Map extendedAttributes) { + init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), key.getWriteTime(), + key.getClusterIds(), key.getNonceGroup(), key.getNonce(), key.getMvcc(), + key.getReplicationScopes(), extendedAttributes); } /** - * Copy constructor that takes in an existing WALKey, the extra WALKeyImpl fields that the - * parent interface is missing, plus some extended attributes. Intended - * for coprocessors to add annotations to a system-generated WALKey for - * persistence to the WAL. + * Copy constructor that takes in an existing WALKey, the extra WALKeyImpl fields that the parent + * interface is missing, plus some extended attributes. Intended for coprocessors to add + * annotations to a system-generated WALKey for persistence to the WAL. */ - public WALKeyImpl(WALKey key, - List clusterIds, - MultiVersionConcurrencyControl mvcc, - final NavigableMap replicationScopes, - Map extendedAttributes){ - init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), - key.getWriteTime(), clusterIds, key.getNonceGroup(), key.getNonce(), - mvcc, replicationScopes, extendedAttributes); + public WALKeyImpl(WALKey key, List clusterIds, MultiVersionConcurrencyControl mvcc, + final NavigableMap replicationScopes, + Map extendedAttributes) { + init(key.getEncodedRegionName(), key.getTableName(), key.getSequenceId(), key.getWriteTime(), + clusterIds, key.getNonceGroup(), key.getNonce(), mvcc, replicationScopes, extendedAttributes); } + /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - *

      Used by log splitting and snapshots. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. + *

      + * Used by log splitting and snapshots. * @param encodedRegionName Encoded name of the region as returned by - * HRegionInfo#getEncodedNameAsBytes(). - * @param tablename - name of table - * @param logSeqNum - log sequence number - * @param now Time at which this edit was written. - * @param clusterIds the clusters that have consumed the change(used in Replication) - * @param nonceGroup the nonceGroup - * @param nonce the nonce - * @param mvcc the mvcc associate the WALKeyImpl - * @param replicationScope the non-default replication scope - * associated with the region's column families + * HRegionInfo#getEncodedNameAsBytes(). + * @param tablename - name of table + * @param logSeqNum - log sequence number + * @param now Time at which this edit was written. + * @param clusterIds the clusters that have consumed the change(used in Replication) + * @param nonceGroup the nonceGroup + * @param nonce the nonce + * @param mvcc the mvcc associate the WALKeyImpl + * @param replicationScope the non-default replication scope associated with the region's column + * families */ // TODO: Fix being able to pass in sequenceid. public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, final long now, List clusterIds, long nonceGroup, long nonce, MultiVersionConcurrencyControl mvcc, final NavigableMap replicationScope) { init(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, nonce, mvcc, - replicationScope, null); + replicationScope, null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - *

      Used by log splitting and snapshots. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. + *

      + * Used by log splitting and snapshots. * @param encodedRegionName Encoded name of the region as returned by - * HRegionInfo#getEncodedNameAsBytes(). - * @param tablename - name of table - * @param logSeqNum - log sequence number - * @param now Time at which this edit was written. - * @param clusterIds the clusters that have consumed the change(used in Replication) + * HRegionInfo#getEncodedNameAsBytes(). + * @param tablename - name of table + * @param logSeqNum - log sequence number + * @param now Time at which this edit was written. + * @param clusterIds the clusters that have consumed the change(used in Replication) */ // TODO: Fix being able to pass in sequenceid. - public WALKeyImpl(final byte[] encodedRegionName, - final TableName tablename, - long logSeqNum, - final long now, - List clusterIds, - long nonceGroup, - long nonce, - MultiVersionConcurrencyControl mvcc) { - init(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, - nonce, mvcc, null, null); + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, + final long now, List clusterIds, long nonceGroup, long nonce, + MultiVersionConcurrencyControl mvcc) { + init(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, nonce, mvcc, null, + null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. * @param encodedRegionName Encoded name of the region as returned by - * HRegionInfo#getEncodedNameAsBytes(). - * @param tablename the tablename - * @param now Time at which this edit was written. - * @param clusterIds the clusters that have consumed the change(used in Replication) + * HRegionInfo#getEncodedNameAsBytes(). + * @param tablename the tablename + * @param now Time at which this edit was written. + * @param clusterIds the clusters that have consumed the change(used in Replication) * @param nonceGroup * @param nonce * @param mvcc mvcc control used to generate sequence numbers and control read/write points */ - public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, - final long now, List clusterIds, long nonceGroup, - final long nonce, final MultiVersionConcurrencyControl mvcc) { + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + List clusterIds, long nonceGroup, final long nonce, + final MultiVersionConcurrencyControl mvcc) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, clusterIds, nonceGroup, nonce, mvcc, - null, null); + null, null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. * @param encodedRegionName Encoded name of the region as returned by - * HRegionInfo#getEncodedNameAsBytes(). + * HRegionInfo#getEncodedNameAsBytes(). * @param tablename - * @param now Time at which this edit was written. - * @param clusterIds the clusters that have consumed the change(used in Replication) - * @param nonceGroup the nonceGroup - * @param nonce the nonce + * @param now Time at which this edit was written. + * @param clusterIds the clusters that have consumed the change(used in Replication) + * @param nonceGroup the nonceGroup + * @param nonce the nonce * @param mvcc mvcc control used to generate sequence numbers and control read/write points - * @param replicationScope the non-default replication scope of the column families + * @param replicationScope the non-default replication scope of the column families */ - public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, - final long now, List clusterIds, long nonceGroup, - final long nonce, final MultiVersionConcurrencyControl mvcc, - NavigableMap replicationScope) { + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + List clusterIds, long nonceGroup, final long nonce, + final MultiVersionConcurrencyControl mvcc, NavigableMap replicationScope) { init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, clusterIds, nonceGroup, nonce, mvcc, - replicationScope, null); + replicationScope, null); } /** - * Create the log key for writing to somewhere. - * We maintain the tablename mainly for debugging purposes. - * A regionName is always a sub-table object. - * + * Create the log key for writing to somewhere. We maintain the tablename mainly for debugging + * purposes. A regionName is always a sub-table object. * @param encodedRegionName Encoded name of the region as returned by - * HRegionInfo#getEncodedNameAsBytes(). + * HRegionInfo#getEncodedNameAsBytes(). * @param tablename * @param logSeqNum * @param nonceGroup * @param nonce */ // TODO: Fix being able to pass in sequenceid. - public WALKeyImpl(final byte[] encodedRegionName, - final TableName tablename, - long logSeqNum, - long nonceGroup, - long nonce, - final MultiVersionConcurrencyControl mvcc) { - init(encodedRegionName, - tablename, - logSeqNum, - EnvironmentEdgeManager.currentTime(), - EMPTY_UUIDS, - nonceGroup, - nonce, - mvcc, null, null); - } - - public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, - final long now, List clusterIds, long nonceGroup, - final long nonce, final MultiVersionConcurrencyControl mvcc, - NavigableMap replicationScope, - Map extendedAttributes){ - init(encodedRegionName, - tablename, - NO_SEQUENCE_ID, - now, - clusterIds, - nonceGroup, - nonce, - mvcc, replicationScope, extendedAttributes); + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, + long nonceGroup, long nonce, final MultiVersionConcurrencyControl mvcc) { + init(encodedRegionName, tablename, logSeqNum, EnvironmentEdgeManager.currentTime(), EMPTY_UUIDS, + nonceGroup, nonce, mvcc, null, null); + } + + public WALKeyImpl(final byte[] encodedRegionName, final TableName tablename, final long now, + List clusterIds, long nonceGroup, final long nonce, + final MultiVersionConcurrencyControl mvcc, NavigableMap replicationScope, + Map extendedAttributes) { + init(encodedRegionName, tablename, NO_SEQUENCE_ID, now, clusterIds, nonceGroup, nonce, mvcc, + replicationScope, extendedAttributes); } @InterfaceAudience.Private - protected void init(final byte[] encodedRegionName, - final TableName tablename, - long logSeqNum, - final long now, - List clusterIds, - long nonceGroup, - long nonce, - MultiVersionConcurrencyControl mvcc, - NavigableMap replicationScope, - Map extendedAttributes) { + protected void init(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, + final long now, List clusterIds, long nonceGroup, long nonce, + MultiVersionConcurrencyControl mvcc, NavigableMap replicationScope, + Map extendedAttributes) { this.sequenceId = logSeqNum; this.writeTime = now; this.clusterIds = clusterIds; @@ -398,7 +336,7 @@ protected void setSequenceId(long sequenceId) { /** @return encoded region name */ @Override - public byte [] getEncodedRegionName() { + public byte[] getEncodedRegionName() { return encodedRegionName; } @@ -489,27 +427,27 @@ public List getClusterIds() { * returns DEFAULT_CLUSTER_ID (cases where replication is not enabled) */ @Override - public UUID getOriginatingClusterId(){ - return clusterIds.isEmpty()? HConstants.DEFAULT_CLUSTER_ID: clusterIds.get(0); + public UUID getOriginatingClusterId() { + return clusterIds.isEmpty() ? HConstants.DEFAULT_CLUSTER_ID : clusterIds.get(0); } @Override - public void addExtendedAttribute(String attributeKey, byte[] attributeValue){ - if (extendedAttributes == null){ + public void addExtendedAttribute(String attributeKey, byte[] attributeValue) { + if (extendedAttributes == null) { extendedAttributes = new HashMap(); } extendedAttributes.put(attributeKey, attributeValue); } @Override - public byte[] getExtendedAttribute(String attributeKey){ + public byte[] getExtendedAttribute(String attributeKey) { return extendedAttributes != null ? extendedAttributes.get(attributeKey) : null; } @Override - public Map getExtendedAttributes(){ - return extendedAttributes != null ? new HashMap(extendedAttributes) : - new HashMap(); + public Map getExtendedAttributes() { + return extendedAttributes != null ? new HashMap(extendedAttributes) + : new HashMap(); } @Override @@ -525,7 +463,7 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) { return false; } - return compareTo((WALKey)obj) == 0; + return compareTo((WALKey) obj) == 0; } @Override @@ -544,7 +482,7 @@ public int compareTo(WALKey o) { long otherSid = o.getSequenceId(); if (sid < otherSid) { result = -1; - } else if (sid > otherSid) { + } else if (sid > otherSid) { result = 1; } if (result == 0) { @@ -560,10 +498,9 @@ public int compareTo(WALKey o) { } /** - * Drop this instance's tablename byte array and instead - * hold a reference to the provided tablename. This is not - * meant to be a general purpose setter - it's only used - * to collapse references to conserve memory. + * Drop this instance's tablename byte array and instead hold a reference to the provided + * tablename. This is not meant to be a general purpose setter - it's only used to collapse + * references to conserve memory. */ void internTableName(TableName tablename) { // We should not use this as a setter - only to swap @@ -573,12 +510,11 @@ void internTableName(TableName tablename) { } /** - * Drop this instance's region name byte array and instead - * hold a reference to the provided region name. This is not - * meant to be a general purpose setter - it's only used - * to collapse references to conserve memory. + * Drop this instance's region name byte array and instead hold a reference to the provided region + * name. This is not meant to be a general purpose setter - it's only used to collapse references + * to conserve memory. */ - void internEncodedRegionName(byte []encodedRegionName) { + void internEncodedRegionName(byte[] encodedRegionName) { // We should not use this as a setter - only to swap // in a new reference to the same table name. assert Bytes.equals(this.encodedRegionName, encodedRegionName); @@ -617,11 +553,11 @@ public WALProtos.WALKey.Builder getBuilder(WALCellCodec.ByteStringCompressor com .setScopeType(ScopeType.forNumber(e.getValue()))); } } - if (extendedAttributes != null){ - for (Map.Entry e : extendedAttributes.entrySet()){ - WALProtos.Attribute attr = WALProtos.Attribute.newBuilder(). - setKey(e.getKey()).setValue(compressor.compress(e.getValue(), - CompressionContext.DictionaryIndex.TABLE)).build(); + if (extendedAttributes != null) { + for (Map.Entry e : extendedAttributes.entrySet()) { + WALProtos.Attribute attr = WALProtos.Attribute.newBuilder().setKey(e.getKey()) + .setValue(compressor.compress(e.getValue(), CompressionContext.DictionaryIndex.TABLE)) + .build(); builder.addExtendedAttributes(attr); } } @@ -659,9 +595,9 @@ public void readFieldsFromPb(WALProtos.WALKey walKey, if (walKey.hasOrigSequenceNumber()) { this.origLogSeqNum = walKey.getOrigSequenceNumber(); } - if (walKey.getExtendedAttributesCount() > 0){ + if (walKey.getExtendedAttributesCount() > 0) { this.extendedAttributes = new HashMap<>(walKey.getExtendedAttributesCount()); - for (WALProtos.Attribute attr : walKey.getExtendedAttributesList()){ + for (WALProtos.Attribute attr : walKey.getExtendedAttributesList()) { byte[] value = uncompressor.uncompress(attr.getValue(), CompressionContext.DictionaryIndex.TABLE); extendedAttributes.put(attr.getKey(), value); @@ -683,7 +619,7 @@ public long estimatedSerializedSizeOf() { size += Bytes.SIZEOF_LONG; // nonce } if (replicationScope != null) { - for (Map.Entry scope: replicationScope.entrySet()) { + for (Map.Entry scope : replicationScope.entrySet()) { size += scope.getKey().length; size += Bytes.SIZEOF_INT; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index 07bcb1067ffc..6e3c291dec80 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,6 +46,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Strings; import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; @@ -56,17 +57,10 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.PosixParser; /** - * WALPrettyPrinter prints the contents of a given WAL with a variety of - * options affecting formatting and extent of content. - * - * It targets two usage cases: pretty printing for ease of debugging directly by - * humans, and JSON output for consumption by monitoring and/or maintenance - * scripts. - * - * It can filter by row, region, or sequence id. - * - * It can also toggle output of values. - * + * WALPrettyPrinter prints the contents of a given WAL with a variety of options affecting + * formatting and extent of content. It targets two usage cases: pretty printing for ease of + * debugging directly by humans, and JSON output for consumption by monitoring and/or maintenance + * scripts. It can filter by row, region, or sequence id. It can also toggle output of values. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving @@ -99,7 +93,7 @@ public class WALPrettyPrinter { private PrintStream out; // for JSON encoding private static final Gson GSON = GsonUtil.createGson().create(); - //allows for jumping straight to a given portion of the file + // allows for jumping straight to a given portion of the file private long position; /** @@ -149,10 +143,8 @@ public void disableJSON() { /** * sets the region by which output will be filtered - * - * @param sequence - * when nonnegative, serves as a filter; only log entries with this - * sequence id will be printed + * @param sequence when nonnegative, serves as a filter; only log entries with this sequence id + * will be printed */ public void setSequenceFilter(long sequence) { this.sequence = sequence; @@ -165,12 +157,11 @@ public void setSequenceFilter(long sequence) { public void setTableFilter(String tablesWithDelimiter) { Collections.addAll(tableSet, tablesWithDelimiter.split(",")); } + /** * sets the region by which output will be filtered - * - * @param region - * when not null, serves as a filter; only log entries from this - * region will be printed + * @param region when not null, serves as a filter; only log entries from this region will be + * printed */ public void setRegionFilter(String region) { this.region = region; @@ -178,10 +169,7 @@ public void setRegionFilter(String region) { /** * sets the row key by which output will be filtered - * - * @param row - * when not null, serves as a filter; only log entries from this row - * will be printed + * @param row when not null, serves as a filter; only log entries from this row will be printed */ public void setRowFilter(String row) { this.row = row; @@ -189,10 +177,8 @@ public void setRowFilter(String row) { /** * sets the rowPrefix key prefix by which output will be filtered - * - * @param rowPrefix - * when not null, serves as a filter; only log entries with rows - * having this prefix will be printed + * @param rowPrefix when not null, serves as a filter; only log entries with rows having this + * prefix will be printed */ public void setRowPrefixFilter(String rowPrefix) { this.rowPrefix = rowPrefix; @@ -207,16 +193,15 @@ public void setOutputOnlyRowKey() { /** * sets the position to start seeking the WAL file - * @param position - * initial position to start seeking the given WAL file + * @param position initial position to start seeking the given WAL file */ public void setPosition(long position) { this.position = position; } /** - * enables output as a single, persistent list. at present, only relevant in - * the case of JSON output. + * enables output as a single, persistent list. at present, only relevant in the case of JSON + * output. */ public void beginPersistentOutput() { if (persistentOutput) { @@ -230,8 +215,7 @@ public void beginPersistentOutput() { } /** - * ends output of a single, persistent list. at present, only relevant in the - * case of JSON output. + * ends output of a single, persistent list. at present, only relevant in the case of JSON output. */ public void endPersistentOutput() { if (!persistentOutput) { @@ -244,19 +228,13 @@ public void endPersistentOutput() { } /** - * reads a log file and outputs its contents, one transaction at a time, as - * specified by the currently configured options - * - * @param conf - * the HBase configuration relevant to this log file - * @param p - * the path of the log file to be read - * @throws IOException - * may be unable to access the configured filesystem or requested - * file. + * reads a log file and outputs its contents, one transaction at a time, as specified by the + * currently configured options + * @param conf the HBase configuration relevant to this log file + * @param p the path of the log file to be read + * @throws IOException may be unable to access the configured filesystem or requested file. */ - public void processFile(final Configuration conf, final Path p) - throws IOException { + public void processFile(final Configuration conf, final Path p) throws IOException { FileSystem fs = p.getFileSystem(conf); if (!fs.exists(p)) { throw new FileNotFoundException(p.toString()); @@ -304,8 +282,7 @@ public void processFile(final Configuration conf, final Path p) Map txn = key.toStringMap(); long writeTime = key.getWriteTime(); // check output filters - if (!tableSet.isEmpty() && - !tableSet.contains(txn.get("table").toString())) { + if (!tableSet.isEmpty() && !tableSet.contains(txn.get("table").toString())) { continue; } if (sequence >= 0 && ((Long) txn.get("sequence")) != sequence) { @@ -319,7 +296,7 @@ public void processFile(final Configuration conf, final Path p) for (Cell cell : edit.getCells()) { // add atomic operation to txn Map op = - new HashMap<>(toStringMap(cell, outputOnlyRowKey, rowPrefix, row, outputValues)); + new HashMap<>(toStringMap(cell, outputOnlyRowKey, rowPrefix, row, outputValues)); if (op.isEmpty()) { continue; } @@ -341,8 +318,8 @@ public void processFile(final Configuration conf, final Path p) } else { // Pretty output, complete with indentation by atomic action if (!outputOnlyRowKey) { - out.println(String.format(outputTmpl, - txn.get("sequence"), txn.get("table"), txn.get("region"), new Date(writeTime))); + out.println(String.format(outputTmpl, txn.get("sequence"), txn.get("table"), + txn.get("region"), new Date(writeTime))); } for (int i = 0; i < actions.size(); i++) { Map op = actions.get(i); @@ -362,8 +339,8 @@ public void processFile(final Configuration conf, final Path p) } } - public static void printCell(PrintStream out, Map op, - boolean outputValues, boolean outputOnlyRowKey) { + public static void printCell(PrintStream out, Map op, boolean outputValues, + boolean outputOnlyRowKey) { String rowDetails = "row=" + op.get("row"); if (outputOnlyRowKey) { out.println(rowDetails); @@ -382,16 +359,16 @@ public static void printCell(PrintStream out, Map op, out.println("cell total size sum: " + op.get("total_size_sum")); } - public static Map toStringMap(Cell cell, - boolean printRowKeyOnly, String rowPrefix, String row, boolean outputValues) { + public static Map toStringMap(Cell cell, boolean printRowKeyOnly, + String rowPrefix, String row, boolean outputValues) { Map stringMap = new HashMap<>(); - String rowKey = Bytes.toStringBinary(cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength()); + String rowKey = + Bytes.toStringBinary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); // Row and row prefix are mutually options so both cannot be true at the // same time. We can include checks in the same condition // Check if any of the filters are satisfied by the row, if not return empty map - if ((!Strings.isNullOrEmpty(rowPrefix) && !rowKey.startsWith(rowPrefix)) || - (!Strings.isNullOrEmpty(row) && !rowKey.equals(row))) { + if ((!Strings.isNullOrEmpty(rowPrefix) && !rowKey.startsWith(rowPrefix)) + || (!Strings.isNullOrEmpty(row) && !rowKey.equals(row))) { return stringMap; } @@ -400,11 +377,10 @@ public static Map toStringMap(Cell cell, return stringMap; } stringMap.put("type", cell.getType()); - stringMap.put("family", Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(), - cell.getFamilyLength())); - stringMap.put("qualifier", - Bytes.toStringBinary(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength())); + stringMap.put("family", + Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())); + stringMap.put("qualifier", Bytes.toStringBinary(cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength())); stringMap.put("timestamp", cell.getTimestamp()); stringMap.put("vlen", cell.getValueLength()); stringMap.put("total_size_sum", cell.heapSize()); @@ -413,8 +389,7 @@ public static Map toStringMap(Cell cell, Iterator tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { Tag tag = tagsIterator.next(); - tagsString - .add((tag.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(tag))); + tagsString.add((tag.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(tag))); } stringMap.put("tag", tagsString); } @@ -433,13 +408,10 @@ public static void main(String[] args) throws IOException { } /** - * Pass one or more log file names and formatting options and it will dump out - * a text version of the contents on stdout. - * - * @param args - * Command line arguments - * @throws IOException - * Thrown upon file system errors etc. + * Pass one or more log file names and formatting options and it will dump out a text version of + * the contents on stdout. + * @param args Command line arguments + * @throws IOException Thrown upon file system errors etc. */ public static void run(String[] args) throws IOException { // create options @@ -450,11 +422,9 @@ public static void run(String[] args) throws IOException { options.addOption("t", "tables", true, "Table names (comma separated) to filter by; eg: test1,test2,test3 "); options.addOption("r", "region", true, - "Region to filter by. Pass encoded region name; e.g. '9192caead6a5a20acb4454ffbc79fa14'"); - options.addOption("s", "sequence", true, - "Sequence to filter by. Pass sequence number."); - options.addOption("k", "outputOnlyRowKey", false, - "Print only row keys"); + "Region to filter by. Pass encoded region name; e.g. '9192caead6a5a20acb4454ffbc79fa14'"); + options.addOption("s", "sequence", true, "Sequence to filter by. Pass sequence number."); + options.addOption("k", "outputOnlyRowKey", false, "Print only row keys"); options.addOption("w", "row", true, "Row to filter by. Pass row name."); options.addOption("f", "rowPrefix", true, "Row prefix to filter by."); options.addOption("g", "goto", true, "Position to seek to in the file"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java index 01c1d11ead70..5f90941fcc16 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.List; import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.client.RegionInfo; @@ -78,18 +76,17 @@ void init(WALFactory factory, Configuration conf, String providerId, Abortable s interface WriterBase extends Closeable { long getLength(); + /** - * NOTE: We add this method for {@link WALFileLengthProvider} used for replication, - * considering the case if we use {@link AsyncFSWAL},we write to 3 DNs concurrently, - * according to the visibility guarantee of HDFS, the data will be available immediately - * when arriving at DN since all the DNs will be considered as the last one in pipeline. - * This means replication may read uncommitted data and replicate it to the remote cluster - * and cause data inconsistency. - * The method {@link WriterBase#getLength} may return length which just in hdfs client - * buffer and not successfully synced to HDFS, so we use this method to return the length - * successfully synced to HDFS and replication thread could only read writing WAL file - * limited by this length. - * see also HBASE-14004 and this document for more details: + * NOTE: We add this method for {@link WALFileLengthProvider} used for replication, considering + * the case if we use {@link AsyncFSWAL},we write to 3 DNs concurrently, according to the + * visibility guarantee of HDFS, the data will be available immediately when arriving at DN + * since all the DNs will be considered as the last one in pipeline. This means replication may + * read uncommitted data and replicate it to the remote cluster and cause data inconsistency. + * The method {@link WriterBase#getLength} may return length which just in hdfs client buffer + * and not successfully synced to HDFS, so we use this method to return the length successfully + * synced to HDFS and replication thread could only read writing WAL file limited by this + * length. see also HBASE-14004 and this document for more details: * https://docs.google.com/document/d/11AyWtGhItQs6vsLRIx32PwTxmBY3libXwGXI25obVEY/edit# * @return byteSize successfully synced to underlying filesystem. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java index 94747ae026d2..150273d5f4b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java @@ -132,8 +132,8 @@ private static void mkdir(FileSystem fs, Path dir) throws IOException { } /** - * Move WAL. Used to move processed WALs to archive or bad WALs to corrupt WAL dir. - * WAL may have already been moved; makes allowance. + * Move WAL. Used to move processed WALs to archive or bad WALs to corrupt WAL dir. WAL may have + * already been moved; makes allowance. */ public static void moveWAL(FileSystem fs, Path p, Path targetDir) throws IOException { if (fs.exists(p)) { @@ -149,8 +149,8 @@ public static void moveWAL(FileSystem fs, Path p, Path targetDir) throws IOExcep * Path to a file under RECOVERED_EDITS_DIR directory of the region found in logEntry * named for the sequenceid in the passed logEntry: e.g. * /hbase/some_table/2323432434/recovered.edits/2332. This method also ensures existence of - * RECOVERED_EDITS_DIR under the region creating it if necessary. - * And also set storage policy for RECOVERED_EDITS_DIR if WAL_STORAGE_POLICY is configured. + * RECOVERED_EDITS_DIR under the region creating it if necessary. And also set storage policy for + * RECOVERED_EDITS_DIR if WAL_STORAGE_POLICY is configured. * @param tableName the table name * @param encodedRegionName the encoded region name * @param seqId the sequence id which used to generate file name @@ -187,7 +187,7 @@ static Path getRegionSplitEditsPath(TableName tableName, byte[] encodedRegionNam LOG.warn("mkdir failed on {}", dir); } else { String storagePolicy = - conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); + conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY); CommonFSUtils.setStoragePolicy(walFS, dir, storagePolicy); } // Append fileBeingSplit to prevent name conflict since we may have duplicate wal entries now. @@ -240,10 +240,10 @@ public static boolean hasRecoveredEdits(final Configuration conf, final RegionIn // Only default replica region can reach here, so we can use regioninfo // directly without converting it to default replica's regioninfo. Path regionWALDir = - CommonFSUtils.getWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName()); + CommonFSUtils.getWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName()); Path regionDir = FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(conf), regionInfo); - Path wrongRegionWALDir = - CommonFSUtils.getWrongWALRegionDir(conf, regionInfo.getTable(), regionInfo.getEncodedName()); + Path wrongRegionWALDir = CommonFSUtils.getWrongWALRegionDir(conf, regionInfo.getTable(), + regionInfo.getEncodedName()); FileSystem walFs = CommonFSUtils.getWALFileSystem(conf); FileSystem rootFs = CommonFSUtils.getRootDirFileSystem(conf); NavigableSet files = getSplitEditFilesSorted(walFs, regionWALDir); @@ -269,17 +269,17 @@ public static boolean hasRecoveredEdits(final Configuration conf, final RegionIn */ @Deprecated public static long getMaxRegionSequenceId(Configuration conf, RegionInfo region, - IOExceptionSupplier rootFsSupplier, IOExceptionSupplier walFsSupplier) - throws IOException { + IOExceptionSupplier rootFsSupplier, IOExceptionSupplier walFsSupplier) + throws IOException { FileSystem rootFs = rootFsSupplier.get(); FileSystem walFs = walFsSupplier.get(); Path regionWALDir = - CommonFSUtils.getWALRegionDir(conf, region.getTable(), region.getEncodedName()); + CommonFSUtils.getWALRegionDir(conf, region.getTable(), region.getEncodedName()); // This is the old place where we store max sequence id file Path regionDir = FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(conf), region); // This is for HBASE-20734, where we use a wrong directory, see HBASE-22617 for more details. Path wrongRegionWALDir = - CommonFSUtils.getWrongWALRegionDir(conf, region.getTable(), region.getEncodedName()); + CommonFSUtils.getWrongWALRegionDir(conf, region.getTable(), region.getEncodedName()); long maxSeqId = getMaxRegionSequenceId(walFs, regionWALDir); maxSeqId = Math.max(maxSeqId, getMaxRegionSequenceId(rootFs, regionDir)); maxSeqId = Math.max(maxSeqId, getMaxRegionSequenceId(walFs, wrongRegionWALDir)); @@ -437,9 +437,12 @@ public MutationReplay(ClientProtos.MutationProto.MutationType type, Mutation mut } private final ClientProtos.MutationProto.MutationType type; - @SuppressWarnings("checkstyle:VisibilityModifier") public final Mutation mutation; - @SuppressWarnings("checkstyle:VisibilityModifier") public final long nonceGroup; - @SuppressWarnings("checkstyle:VisibilityModifier") public final long nonce; + @SuppressWarnings("checkstyle:VisibilityModifier") + public final Mutation mutation; + @SuppressWarnings("checkstyle:VisibilityModifier") + public final long nonceGroup; + @SuppressWarnings("checkstyle:VisibilityModifier") + public final long nonce; @Override public int compareTo(final MutationReplay d) { @@ -577,7 +580,7 @@ static Path tryCreateRecoveredHFilesDir(FileSystem rootFS, Configuration conf, } /** - * @param regionDir This regions directory in the filesystem + * @param regionDir This regions directory in the filesystem * @param familyName The column family name * @return The directory that holds recovered hfiles for the region's column family */ @@ -585,8 +588,8 @@ private static Path getRecoveredHFilesDir(final Path regionDir, String familyNam return new Path(new Path(regionDir, familyName), HConstants.RECOVERED_HFILES_DIR); } - public static FileStatus[] getRecoveredHFiles(final FileSystem rootFS, - final Path regionDir, String familyName) throws IOException { + public static FileStatus[] getRecoveredHFiles(final FileSystem rootFS, final Path regionDir, + String familyName) throws IOException { Path dir = getRecoveredHFilesDir(regionDir, familyName); return CommonFSUtils.listStatus(rootFS, dir); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index ed684868cdd1..65a5a7269db7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -63,10 +63,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId; /** - * Split RegionServer WAL files. Splits the WAL into new files, - * one per region, to be picked up on Region reopen. Deletes the split WAL when finished. - * Create an instance and call {@link #splitWAL(FileStatus, CancelableProgressable)} per file or - * use static helper methods. + * Split RegionServer WAL files. Splits the WAL into new files, one per region, to be picked up on + * Region reopen. Deletes the split WAL when finished. Create an instance and call + * {@link #splitWAL(FileStatus, CancelableProgressable)} per file or use static helper methods. */ @InterfaceAudience.Private public class WALSplitter { @@ -92,8 +91,8 @@ public class WALSplitter { private EntryBuffers entryBuffers; /** - * Coordinator for split log. Used by the zk-based log splitter. - * Not used by the procedure v2-based log splitter. + * Coordinator for split log. Used by the zk-based log splitter. Not used by the procedure + * v2-based log splitter. */ private SplitLogWorkerCoordination splitLogWorkerCoordination; @@ -120,16 +119,16 @@ public class WALSplitter { public static final boolean DEFAULT_WAL_SPLIT_TO_HFILE = false; /** - * True if we are to run with bounded amount of writers rather than let the count blossom. - * Default is 'false'. Does not apply if you have set 'hbase.wal.split.to.hfile' as that - * is always bounded. Only applies when you are doing recovery to 'recovered.edits' - * files (the old default). Bounded writing tends to have higher throughput. + * True if we are to run with bounded amount of writers rather than let the count blossom. Default + * is 'false'. Does not apply if you have set 'hbase.wal.split.to.hfile' as that is always + * bounded. Only applies when you are doing recovery to 'recovered.edits' files (the old default). + * Bounded writing tends to have higher throughput. */ public final static String SPLIT_WRITER_CREATION_BOUNDED = "hbase.split.writer.creation.bounded"; public final static String SPLIT_WAL_BUFFER_SIZE = "hbase.regionserver.hlog.splitlog.buffersize"; public final static String SPLIT_WAL_WRITER_THREADS = - "hbase.regionserver.hlog.splitlog.writer.threads"; + "hbase.regionserver.hlog.splitlog.writer.threads"; private final int numWriterThreads; private final long bufferSize; @@ -137,17 +136,17 @@ public class WALSplitter { private final boolean hfile; private final boolean skipErrors; - WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, - FileSystem walFS, Path rootDir, FileSystem rootFS) { + WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, FileSystem walFS, + Path rootDir, FileSystem rootFS) { this(factory, conf, walRootDir, walFS, rootDir, rootFS, null, null, null); } - WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, - FileSystem walFS, Path rootDir, FileSystem rootFS, LastSequenceId idChecker, + WALSplitter(final WALFactory factory, Configuration conf, Path walRootDir, FileSystem walFS, + Path rootDir, FileSystem rootFS, LastSequenceId idChecker, SplitLogWorkerCoordination splitLogWorkerCoordination, RegionServerServices rsServices) { this.conf = HBaseConfiguration.create(conf); String codecClassName = - conf.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName()); + conf.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName()); this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName); this.walRootDir = walRootDir; this.walFS = walFS; @@ -157,8 +156,8 @@ public class WALSplitter { this.splitLogWorkerCoordination = splitLogWorkerCoordination; this.rsServices = rsServices; this.walFactory = factory; - this.tmpDirName = - conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY); + this.tmpDirName = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, + HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY); // if we limit the number of writers opened for sinking recovered edits this.splitWriterCreationBounded = conf.getBoolean(SPLIT_WRITER_CREATION_BOUNDED, false); this.bufferSize = this.conf.getLong(SPLIT_WAL_BUFFER_SIZE, 128 * 1024 * 1024); @@ -184,10 +183,8 @@ Map> getRegionMaxSeqIdInStores() { } /** - * Splits a WAL file. - * Used by old {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} and tests. - * Not used by new procedure-based WAL splitter. - * + * Splits a WAL file. Used by old {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} and + * tests. Not used by new procedure-based WAL splitter. * @return false if it is interrupted by the progress-able. */ public static boolean splitLogFile(Path walDir, FileStatus logfile, FileSystem walFS, @@ -197,7 +194,7 @@ public static boolean splitLogFile(Path walDir, FileStatus logfile, FileSystem w Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem rootFS = rootDir.getFileSystem(conf); WALSplitter splitter = new WALSplitter(factory, conf, walDir, walFS, rootDir, rootFS, idChecker, - splitLogWorkerCoordination, rsServices); + splitLogWorkerCoordination, rsServices); // splitWAL returns a data structure with whether split is finished and if the file is corrupt. // We don't need to propagate corruption flag here because it is propagated by the // SplitLogWorkerCoordination. @@ -205,9 +202,8 @@ public static boolean splitLogFile(Path walDir, FileStatus logfile, FileSystem w } /** - * Split a folder of WAL files. Delete the directory when done. - * Used by tools and unit tests. It should be package private. - * It is public only because TestWALObserver is in a different package, + * Split a folder of WAL files. Delete the directory when done. Used by tools and unit tests. It + * should be package private. It is public only because TestWALObserver is in a different package, * which uses this method to do log splitting. * @return List of output files created by the split. */ @@ -217,14 +213,14 @@ public static List split(Path walRootDir, Path walsDir, Path archiveDir, F FileSystem rootFS = rootDir.getFileSystem(conf); WALSplitter splitter = new WALSplitter(factory, conf, walRootDir, walFS, rootDir, rootFS); final List wals = - SplitLogManager.getFileList(conf, Collections.singletonList(walsDir), null); + SplitLogManager.getFileList(conf, Collections.singletonList(walsDir), null); List splits = new ArrayList<>(); if (!wals.isEmpty()) { - for (FileStatus wal: wals) { + for (FileStatus wal : wals) { SplitWALResult splitWALResult = splitter.splitWAL(wal, null); if (splitWALResult.isFinished()) { WALSplitUtil.archive(wal.getPath(), splitWALResult.isCorrupt(), archiveDir, walFS, conf); - //splitter.outputSink.splits is mark as final, do not need null check + // splitter.outputSink.splits is mark as final, do not need null check splits.addAll(splitter.outputSink.splits); } } @@ -236,9 +232,9 @@ public static List split(Path walRootDir, Path walsDir, Path archiveDir, F } /** - * Data structure returned as result by #splitWAL(FileStatus, CancelableProgressable). - * Test {@link #isFinished()} to see if we are done with the WAL and {@link #isCorrupt()} for if - * the WAL is corrupt. + * Data structure returned as result by #splitWAL(FileStatus, CancelableProgressable). Test + * {@link #isFinished()} to see if we are done with the WAL and {@link #isCorrupt()} for if the + * WAL is corrupt. */ static final class SplitWALResult { private final boolean finished; @@ -265,16 +261,16 @@ private void createOutputSinkAndEntryBuffers() { PipelineController controller = new PipelineController(); if (this.hfile) { this.entryBuffers = new BoundedEntryBuffers(controller, this.bufferSize); - this.outputSink = new BoundedRecoveredHFilesOutputSink(this, controller, - this.entryBuffers, this.numWriterThreads); + this.outputSink = new BoundedRecoveredHFilesOutputSink(this, controller, this.entryBuffers, + this.numWriterThreads); } else if (this.splitWriterCreationBounded) { this.entryBuffers = new BoundedEntryBuffers(controller, this.bufferSize); - this.outputSink = new BoundedRecoveredEditsOutputSink(this, controller, - this.entryBuffers, this.numWriterThreads); + this.outputSink = new BoundedRecoveredEditsOutputSink(this, controller, this.entryBuffers, + this.numWriterThreads); } else { this.entryBuffers = new EntryBuffers(controller, this.bufferSize); - this.outputSink = new RecoveredEditsOutputSink(this, controller, - this.entryBuffers, this.numWriterThreads); + this.outputSink = + new RecoveredEditsOutputSink(this, controller, this.entryBuffers, this.numWriterThreads); } } @@ -292,7 +288,7 @@ SplitWALResult splitWAL(FileStatus walStatus, CancelableProgressable cancel) thr int editsCount = 0; int editsSkipped = 0; MonitoredTask status = - TaskMonitor.get().createStatus("Splitting " + wal + " to temporary staging area."); + TaskMonitor.get().createStatus("Splitting " + wal + " to temporary staging area."); status.enableStatusJournal(true); Reader walReader = null; this.fileBeingSplit = walStatus; @@ -328,7 +324,7 @@ SplitWALResult splitWAL(FileStatus walStatus, CancelableProgressable cancel) thr Long lastFlushedSequenceId = lastFlushedSequenceIds.get(encodedRegionNameAsStr); if (lastFlushedSequenceId == null) { if (!(isRegionDirPresentUnderRoot(entry.getKey().getTableName(), - encodedRegionNameAsStr))) { + encodedRegionNameAsStr))) { // The region directory itself is not present in the FS. This indicates that // the region/table is already removed. We can just skip all the edits for this // region. Setting lastFlushedSequenceId as Long.MAX_VALUE so that all edits @@ -342,7 +338,7 @@ SplitWALResult splitWAL(FileStatus walStatus, CancelableProgressable cancel) thr Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) { maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(), - storeSeqId.getSequenceId()); + storeSeqId.getSequenceId()); } regionMaxSeqIdInStores.put(encodedRegionNameAsStr, maxSeqIdInStores); lastFlushedSequenceId = ids.getLastFlushedSequenceId(); @@ -419,10 +415,10 @@ SplitWALResult splitWAL(FileStatus walStatus, CancelableProgressable cancel) thr } finally { long processCost = EnvironmentEdgeManager.currentTime() - startTS; // See if length got updated post lease recovery - String msg = "Processed " + editsCount + " edits across " + - outputSink.getNumberOfRecoveredRegions() + " Regions in " + processCost + - " ms; skipped=" + editsSkipped + "; WAL=" + wal + ", size=" + lengthStr + - ", length=" + length + ", corrupted=" + corrupt + ", cancelled=" + cancelled; + String msg = "Processed " + editsCount + " edits across " + + outputSink.getNumberOfRecoveredRegions() + " Regions in " + processCost + + " ms; skipped=" + editsSkipped + "; WAL=" + wal + ", size=" + lengthStr + ", length=" + + length + ", corrupted=" + corrupt + ", cancelled=" + cancelled; LOG.info(msg); status.markComplete(msg); if (LOG.isDebugEnabled()) { @@ -441,8 +437,8 @@ private boolean isRegionDirPresentUnderRoot(TableName tn, String region) throws * Create a new {@link Reader} for reading logs to split. * @return Returns null if file has length zero or file can't be found. */ - protected Reader getReader(FileStatus walStatus, boolean skipErrors, CancelableProgressable cancel) - throws IOException, CorruptedLogFileException { + protected Reader getReader(FileStatus walStatus, boolean skipErrors, + CancelableProgressable cancel) throws IOException, CorruptedLogFileException { Path path = walStatus.getPath(); long length = walStatus.getLen(); Reader in; @@ -479,8 +475,8 @@ protected Reader getReader(FileStatus walStatus, boolean skipErrors, CancelableP if (!skipErrors || e instanceof InterruptedIOException) { throw e; // Don't mark the file corrupted if interrupted, or not skipErrors } - throw new CorruptedLogFileException("skipErrors=true; could not open " + path + - ", skipping", e); + throw new CorruptedLogFileException("skipErrors=true; could not open " + path + ", skipping", + e); } return in; } @@ -505,7 +501,7 @@ private Entry getNextLogLine(Reader in, Path path, boolean skipErrors) throw e; } throw new CorruptedLogFileException("skipErrors=true Ignoring exception" - + " while parsing wal " + path + ". Marking as corrupted", e); + + " while parsing wal " + path + ". Marking as corrupted", e); } } @@ -577,7 +573,6 @@ static class CorruptedLogFileException extends Exception { /** * CorruptedLogFileException with cause - * * @param message the message for this exception * @param cause the cause for this exception */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java index 0f68e11f0f87..d9dc373e6677 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java index ae940dc24a17..632ebd31d178 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,11 +47,12 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; /** @@ -92,9 +93,9 @@ private ExecutorService createThreadPool() { maxThreads * HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS); ThreadPoolExecutor tpe = - new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, - new ThreadFactoryBuilder().setNameFormat(toString() + "-shared-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, + new ThreadFactoryBuilder().setNameFormat(toString() + "-shared-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); tpe.allowCoreThreadTimeOut(true); return tpe; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java index b0ea6f4879f1..60728a9fcaca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java @@ -33,35 +33,36 @@ import org.slf4j.LoggerFactory; /** - * A {@link TestRule} that clears all user namespaces and tables - * {@link ExternalResource#before() before} the test executes. Can be used in either the - * {@link Rule} or {@link ClassRule} positions. Lazily realizes the provided - * {@link AsyncConnection} so as to avoid initialization races with other {@link Rule Rules}. - * Does not {@link AsyncConnection#close() close()} provided connection instance when - * finished. + * A {@link TestRule} that clears all user namespaces and tables {@link ExternalResource#before() + * before} the test executes. Can be used in either the {@link Rule} or {@link ClassRule} positions. + * Lazily realizes the provided {@link AsyncConnection} so as to avoid initialization races with + * other {@link Rule Rules}. Does not {@link AsyncConnection#close() close()} provided + * connection instance when finished. *

      * Use in combination with {@link MiniClusterRule} and {@link ConnectionRule}, for example: * - *
      {@code
      + * 
      + * {
      + *   @code
        *   public class TestMyClass {
      - *     @ClassRule
      + *     @ClassRule
        *     public static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder().build();
        *
        *     private final ConnectionRule connectionRule =
      - *       new ConnectionRule(miniClusterRule::createConnection);
      + *         new ConnectionRule(miniClusterRule::createConnection);
        *     private final ClearUserNamespacesAndTablesRule clearUserNamespacesAndTablesRule =
      - *       new ClearUserNamespacesAndTablesRule(connectionRule::getConnection);
      + *         new ClearUserNamespacesAndTablesRule(connectionRule::getConnection);
        *
      - *     @Rule
      - *     public TestRule rule = RuleChain
      - *       .outerRule(connectionRule)
      - *       .around(clearUserNamespacesAndTablesRule);
      + *     @Rule
      + *     public TestRule rule =
      + *         RuleChain.outerRule(connectionRule).around(clearUserNamespacesAndTablesRule);
        *   }
      - * }
      + * } + *
      */ public class ClearUserNamespacesAndTablesRule extends ExternalResource { private static final Logger logger = - LoggerFactory.getLogger(ClearUserNamespacesAndTablesRule.class); + LoggerFactory.getLogger(ClearUserNamespacesAndTablesRule.class); private final Supplier connectionSupplier; private AsyncAdmin admin; @@ -83,85 +84,68 @@ private CompletableFuture clearTablesAndNamespaces() { } private CompletableFuture deleteUserTables() { - return listTableNames() - .thenApply(tableNames -> tableNames.stream() + return listTableNames().thenApply(tableNames -> tableNames.stream() .map(tableName -> disableIfEnabled(tableName).thenCompose(_void -> deleteTable(tableName))) - .toArray(CompletableFuture[]::new)) - .thenCompose(CompletableFuture::allOf); + .toArray(CompletableFuture[]::new)).thenCompose(CompletableFuture::allOf); } private CompletableFuture> listTableNames() { - return CompletableFuture - .runAsync(() -> logger.trace("listing tables")) - .thenCompose(_void -> admin.listTableNames(false)) - .thenApply(tableNames -> { - if (logger.isTraceEnabled()) { - final StringJoiner joiner = new StringJoiner(", ", "[", "]"); - tableNames.stream().map(TableName::getNameAsString).forEach(joiner::add); - logger.trace("found existing tables {}", joiner.toString()); - } - return tableNames; - }); + return CompletableFuture.runAsync(() -> logger.trace("listing tables")) + .thenCompose(_void -> admin.listTableNames(false)).thenApply(tableNames -> { + if (logger.isTraceEnabled()) { + final StringJoiner joiner = new StringJoiner(", ", "[", "]"); + tableNames.stream().map(TableName::getNameAsString).forEach(joiner::add); + logger.trace("found existing tables {}", joiner.toString()); + } + return tableNames; + }); } private CompletableFuture isTableEnabled(final TableName tableName) { - return admin.isTableEnabled(tableName) - .thenApply(isEnabled -> { - logger.trace("table {} is enabled.", tableName); - return isEnabled; - }); + return admin.isTableEnabled(tableName).thenApply(isEnabled -> { + logger.trace("table {} is enabled.", tableName); + return isEnabled; + }); } private CompletableFuture disableIfEnabled(final TableName tableName) { - return isTableEnabled(tableName) - .thenCompose(isEnabled -> isEnabled - ? disableTable(tableName) - : CompletableFuture.completedFuture(null)); + return isTableEnabled(tableName).thenCompose( + isEnabled -> isEnabled ? disableTable(tableName) : CompletableFuture.completedFuture(null)); } private CompletableFuture disableTable(final TableName tableName) { - return CompletableFuture - .runAsync(() -> logger.trace("disabling enabled table {}", tableName)) - .thenCompose(_void -> admin.disableTable(tableName)); + return CompletableFuture.runAsync(() -> logger.trace("disabling enabled table {}", tableName)) + .thenCompose(_void -> admin.disableTable(tableName)); } private CompletableFuture deleteTable(final TableName tableName) { - return CompletableFuture - .runAsync(() -> logger.trace("deleting disabled table {}", tableName)) - .thenCompose(_void -> admin.deleteTable(tableName)); + return CompletableFuture.runAsync(() -> logger.trace("deleting disabled table {}", tableName)) + .thenCompose(_void -> admin.deleteTable(tableName)); } private CompletableFuture> listUserNamespaces() { - return CompletableFuture - .runAsync(() -> logger.trace("listing namespaces")) - .thenCompose(_void -> admin.listNamespaceDescriptors()) - .thenApply(namespaceDescriptors -> { - final StringJoiner joiner = new StringJoiner(", ", "[", "]"); - final List names = namespaceDescriptors.stream() - .map(NamespaceDescriptor::getName) - .peek(joiner::add) - .collect(Collectors.toList()); - logger.trace("found existing namespaces {}", joiner); - return names; - }) - .thenApply(namespaces -> namespaces.stream() - .filter(namespace -> !Objects.equals( - namespace, NamespaceDescriptor.SYSTEM_NAMESPACE.getName())) - .filter(namespace -> !Objects.equals( - namespace, NamespaceDescriptor.DEFAULT_NAMESPACE.getName())) - .collect(Collectors.toList())); + return CompletableFuture.runAsync(() -> logger.trace("listing namespaces")) + .thenCompose(_void -> admin.listNamespaceDescriptors()).thenApply(namespaceDescriptors -> { + final StringJoiner joiner = new StringJoiner(", ", "[", "]"); + final List names = namespaceDescriptors.stream().map(NamespaceDescriptor::getName) + .peek(joiner::add).collect(Collectors.toList()); + logger.trace("found existing namespaces {}", joiner); + return names; + }) + .thenApply(namespaces -> namespaces.stream().filter( + namespace -> !Objects.equals(namespace, NamespaceDescriptor.SYSTEM_NAMESPACE.getName())) + .filter(namespace -> !Objects.equals(namespace, + NamespaceDescriptor.DEFAULT_NAMESPACE.getName())) + .collect(Collectors.toList())); } private CompletableFuture deleteNamespace(final String namespace) { - return CompletableFuture - .runAsync(() -> logger.trace("deleting namespace {}", namespace)) - .thenCompose(_void -> admin.deleteNamespace(namespace)); + return CompletableFuture.runAsync(() -> logger.trace("deleting namespace {}", namespace)) + .thenCompose(_void -> admin.deleteNamespace(namespace)); } private CompletableFuture deleteUserNamespaces() { - return listUserNamespaces() - .thenCompose(namespaces -> CompletableFuture.allOf(namespaces.stream() - .map(this::deleteNamespace) - .toArray(CompletableFuture[]::new))); + return listUserNamespaces().thenCompose(namespaces -> CompletableFuture + .allOf(namespaces.stream().map(this::deleteNamespace).toArray(CompletableFuture[]::new))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java index 77bd1c531c68..0c566dcfa8b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java @@ -32,18 +32,20 @@ *

      * Use in combination with {@link MiniClusterRule}, for example: * - *
      {@code
      + * 
      + * {
      + *   @code
        *   public class TestMyClass {
        *     private static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder().build();
        *     private static final ConnectionRule connectionRule =
      - *       ConnectionRule.createAsyncConnectionRule(miniClusterRule::createConnection);
      + *         ConnectionRule.createAsyncConnectionRule(miniClusterRule::createConnection);
        *
      - *     @ClassRule
      - *     public static final TestRule rule = RuleChain
      - *       .outerRule(miniClusterRule)
      - *       .around(connectionRule);
      + *     @ClassRule
      + *     public static final TestRule rule =
      + *         RuleChain.outerRule(miniClusterRule).around(connectionRule);
        *   }
      - * }
      + * } + *
      */ public final class ConnectionRule extends ExternalResource { @@ -53,29 +55,22 @@ public final class ConnectionRule extends ExternalResource { private Connection connection; private AsyncConnection asyncConnection; - public static ConnectionRule createConnectionRule( - final Supplier connectionSupplier - ) { + public static ConnectionRule createConnectionRule(final Supplier connectionSupplier) { return new ConnectionRule(connectionSupplier, null); } public static ConnectionRule createAsyncConnectionRule( - final Supplier> asyncConnectionSupplier - ) { + final Supplier> asyncConnectionSupplier) { return new ConnectionRule(null, asyncConnectionSupplier); } - public static ConnectionRule createConnectionRule( - final Supplier connectionSupplier, - final Supplier> asyncConnectionSupplier - ) { + public static ConnectionRule createConnectionRule(final Supplier connectionSupplier, + final Supplier> asyncConnectionSupplier) { return new ConnectionRule(connectionSupplier, asyncConnectionSupplier); } - private ConnectionRule( - final Supplier connectionSupplier, - final Supplier> asyncConnectionSupplier - ) { + private ConnectionRule(final Supplier connectionSupplier, + final Supplier> asyncConnectionSupplier) { this.connectionSupplier = connectionSupplier; this.asyncConnectionSupplier = asyncConnectionSupplier; } @@ -83,7 +78,7 @@ private ConnectionRule( public Connection getConnection() { if (connection == null) { throw new IllegalStateException( - "ConnectionRule not initialized with a synchronous connection."); + "ConnectionRule not initialized with a synchronous connection."); } return connection; } @@ -91,7 +86,7 @@ public Connection getConnection() { public AsyncConnection getAsyncConnection() { if (asyncConnection == null) { throw new IllegalStateException( - "ConnectionRule not initialized with an asynchronous connection."); + "ConnectionRule not initialized with an asynchronous connection."); } return asyncConnection; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java index 0584be85e72b..db45cb7a53b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -125,7 +125,7 @@ public ClusterMetrics getInitialClusterMetrics() throws IOException { * @throws IOException if something goes wrong or timeout occurs */ public void waitForRegionServerToStart(String hostname, int port, long timeout) - throws IOException { + throws IOException { long start = EnvironmentEdgeManager.currentTime(); while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { for (ServerName server : getClusterMetrics().getLiveServerMetrics().keySet()) { @@ -136,7 +136,7 @@ public void waitForRegionServerToStart(String hostname, int port, long timeout) Threads.sleep(100); } throw new IOException( - "did timeout " + timeout + "ms waiting for region server to start: " + hostname); + "did timeout " + timeout + "ms waiting for region server to start: " + hostname); } /** @@ -144,7 +144,7 @@ public void waitForRegionServerToStart(String hostname, int port, long timeout) * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForRegionServerToStop(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** * Suspend the region server @@ -219,14 +219,14 @@ public abstract void waitForRegionServerToStop(ServerName serverName, long timeo * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForDataNodeToStart(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** * Wait for the specified datanode to stop the thread / process. * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForDataNodeToStop(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** * Starts a new namenode on the given hostname or if this is a mini/local cluster, silently logs @@ -253,14 +253,14 @@ public abstract void waitForDataNodeToStop(ServerName serverName, long timeout) * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForNameNodeToStart(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** * Wait for the specified namenode to stop * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForNameNodeToStop(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** * Starts a new master on the given hostname or if this is a mini/local cluster, starts a master @@ -350,7 +350,7 @@ public ServerName getServerHoldingMeta() throws IOException { * @return ServerName that hosts the region or null */ public abstract ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) - throws IOException; + throws IOException; /** * @return whether we are interacting with a distributed cluster as opposed to an in-process diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index 4b4ce9e03a1d..e39c99974334 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -151,7 +151,9 @@ import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.ZooKeeper.States; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** @@ -292,7 +294,7 @@ public static List memStoreTSTagsAndOffheapCombination() { } public static final Collection BLOOM_AND_COMPRESSION_COMBINATIONS = - bloomAndCompressionCombinations(); + bloomAndCompressionCombinations(); /** *

      @@ -412,13 +414,13 @@ private void createSubDirAndSystemProperty(String propertyName, Path parent, Str if (sysValue != null) { // There is already a value set. So we do nothing but hope // that there will be no conflicts - LOG.info("System.getProperty(\"" + propertyName + "\") already set to: " + sysValue + - " so I do NOT create it in " + parent); + LOG.info("System.getProperty(\"" + propertyName + "\") already set to: " + sysValue + + " so I do NOT create it in " + parent); String confValue = conf.get(propertyName); if (confValue != null && !confValue.endsWith(sysValue)) { - LOG.warn(propertyName + " property value differs in configuration and system: " + - "Configuration=" + confValue + " while System=" + sysValue + - " Erasing configuration value by system value."); + LOG.warn(propertyName + " property value differs in configuration and system: " + + "Configuration=" + confValue + " while System=" + sysValue + + " Erasing configuration value by system value."); } conf.set(propertyName, sysValue); } else { @@ -572,7 +574,7 @@ private void setFs() throws IOException { } public MiniDFSCluster startMiniDFSCluster(int servers, final String[] racks, String[] hosts) - throws Exception { + throws Exception { createDirsAndSetProperties(); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); @@ -580,8 +582,8 @@ public MiniDFSCluster startMiniDFSCluster(int servers, final String[] racks, Str Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR"); Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(), "ERROR"); - this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true, - true, null, racks, hosts, null); + this.dfsCluster = + new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null); // Set this just-started cluster as our filesystem. setFs(); @@ -604,8 +606,8 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR"); Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(), "ERROR"); - dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, - null, null, null); + dfsCluster = + new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null); return dfsCluster; } @@ -742,7 +744,7 @@ public void shutdownMiniDFSCluster() throws IOException { */ public SingleProcessHBaseCluster startMiniCluster(int numSlaves) throws Exception { StartTestingClusterOption option = StartTestingClusterOption.builder() - .numRegionServers(numSlaves).numDataNodes(numSlaves).build(); + .numRegionServers(numSlaves).numDataNodes(numSlaves).build(); return startMiniCluster(option); } @@ -763,7 +765,7 @@ public SingleProcessHBaseCluster startMiniCluster() throws Exception { * @see #shutdownMiniDFSCluster() */ public SingleProcessHBaseCluster startMiniCluster(StartTestingClusterOption option) - throws Exception { + throws Exception { LOG.info("Starting up minicluster with option: {}", option); // If we already put up a cluster, fail. @@ -801,7 +803,7 @@ public SingleProcessHBaseCluster startMiniCluster(StartTestingClusterOption opti * @see #shutdownMiniHBaseCluster() */ public SingleProcessHBaseCluster startMiniHBaseCluster(StartTestingClusterOption option) - throws IOException, InterruptedException { + throws IOException, InterruptedException { // Now do the mini hbase cluster. Set the hbase.rootdir in config. createRootDir(option.isCreateRootDir()); if (option.isCreateWALDir()) { @@ -825,13 +827,13 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(StartTestingClusterOption Configuration c = new Configuration(this.conf); this.hbaseCluster = new SingleProcessHBaseCluster(c, option.getNumMasters(), - option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(), - option.getMasterClass(), option.getRsClass()); + option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(), + option.getMasterClass(), option.getRsClass()); // Populate the master address configuration from mini cluster configuration. conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c)); // Don't leave here till we've done a successful scan of the hbase:meta try (Table t = getConnection().getTable(TableName.META_TABLE_NAME); - ResultScanner s = t.getScanner(new Scan())) { + ResultScanner s = t.getScanner(new Scan())) { for (;;) { if (s.next() == null) { break; @@ -852,7 +854,7 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(StartTestingClusterOption * @see #shutdownMiniHBaseCluster() */ public SingleProcessHBaseCluster startMiniHBaseCluster() - throws IOException, InterruptedException { + throws IOException, InterruptedException { return startMiniHBaseCluster(StartTestingClusterOption.builder().build()); } @@ -871,9 +873,9 @@ public SingleProcessHBaseCluster startMiniHBaseCluster() */ @Deprecated public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers) - throws IOException, InterruptedException { + throws IOException, InterruptedException { StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(numMasters) - .numRegionServers(numRegionServers).build(); + .numRegionServers(numRegionServers).build(); return startMiniHBaseCluster(option); } @@ -893,9 +895,9 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRe */ @Deprecated public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers, - List rsPorts) throws IOException, InterruptedException { + List rsPorts) throws IOException, InterruptedException { StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(numMasters) - .numRegionServers(numRegionServers).rsPorts(rsPorts).build(); + .numRegionServers(numRegionServers).rsPorts(rsPorts).build(); return startMiniHBaseCluster(option); } @@ -919,12 +921,12 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRe */ @Deprecated public SingleProcessHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers, - List rsPorts, Class masterClass, - Class rsClass, - boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException { + List rsPorts, Class masterClass, + Class rsClass, + boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException { StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(numMasters) - .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts) - .createRootDir(createRootDir).createWALDir(createWALDir).build(); + .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass) + .rsPorts(rsPorts).createRootDir(createRootDir).createWALDir(createWALDir).build(); return startMiniHBaseCluster(option); } @@ -938,19 +940,19 @@ public void restartHBaseCluster(int servers) throws IOException, InterruptedExce } public void restartHBaseCluster(int servers, List ports) - throws IOException, InterruptedException { + throws IOException, InterruptedException { StartTestingClusterOption option = - StartTestingClusterOption.builder().numRegionServers(servers).rsPorts(ports).build(); + StartTestingClusterOption.builder().numRegionServers(servers).rsPorts(ports).build(); restartHBaseCluster(option); invalidateConnection(); } public void restartHBaseCluster(StartTestingClusterOption option) - throws IOException, InterruptedException { + throws IOException, InterruptedException { closeConnection(); this.hbaseCluster = new SingleProcessHBaseCluster(this.conf, option.getNumMasters(), - option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(), - option.getMasterClass(), option.getRsClass()); + option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(), + option.getMasterClass(), option.getRsClass()); // Don't leave here till we've done a successful scan of the hbase:meta Connection conn = ConnectionFactory.createConnection(this.conf); Table t = conn.getTable(TableName.META_TABLE_NAME); @@ -974,7 +976,7 @@ public SingleProcessHBaseCluster getMiniHBaseCluster() { return (SingleProcessHBaseCluster) this.hbaseCluster; } throw new RuntimeException( - hbaseCluster + " not an instance of " + SingleProcessHBaseCluster.class.getName()); + hbaseCluster + " not an instance of " + SingleProcessHBaseCluster.class.getName()); } /** @@ -1191,7 +1193,7 @@ public Table createTable(TableName tableName, byte[] family) throws IOException * @throws IOException */ public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions) - throws IOException { + throws IOException { if (numRegions < 3) throw new IOException("Must create at least 3 regions"); byte[] startKey = Bytes.toBytes("aaaaa"); byte[] endKey = Bytes.toBytes("zzzzz"); @@ -1231,7 +1233,7 @@ public Table createMultiRegionTable(TableName tableName, byte[][] families) thro * @throws IOException */ public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[][] families) - throws IOException { + throws IOException { return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE, replicaCount); } @@ -1244,7 +1246,7 @@ public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[ * @throws IOException */ public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys) - throws IOException { + throws IOException { return createTable(tableName, families, splitKeys, 1, new Configuration(getConfiguration())); } @@ -1258,13 +1260,13 @@ public Table createTable(TableName tableName, byte[][] families, byte[][] splitK * @throws IOException throws IOException */ public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys, - int replicaCount) throws IOException { + int replicaCount) throws IOException { return createTable(tableName, families, splitKeys, replicaCount, new Configuration(getConfiguration())); } public Table createTable(TableName tableName, byte[][] families, int numVersions, byte[] startKey, - byte[] endKey, int numRegions) throws IOException { + byte[] endKey, int numRegions) throws IOException { TableDescriptor desc = createTableDescriptor(tableName, families, numVersions); getAdmin().createTable(desc, startKey, endKey, numRegions); @@ -1280,7 +1282,7 @@ public Table createTable(TableName tableName, byte[][] families, int numVersions * @return A Table instance for the created table. */ public Table createTable(TableDescriptor htd, byte[][] families, Configuration c) - throws IOException { + throws IOException { return createTable(htd, families, null, c); } @@ -1294,7 +1296,7 @@ public Table createTable(TableDescriptor htd, byte[][] families, Configuration c * @throws IOException if getAdmin or createTable fails */ public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys, - Configuration c) throws IOException { + Configuration c) throws IOException { // Disable blooms (they are on by default as of 0.95) but we disable them here because // tests have hard coded counts of what to expect in block cache, etc., and blooms being // on is interfering. @@ -1314,11 +1316,11 @@ public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitK */ public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys, - BloomType type, int blockSize, Configuration c) throws IOException { + BloomType type, int blockSize, Configuration c) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd); for (byte[] family : families) { ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(family) - .setBloomFilterType(type).setBlocksize(blockSize); + .setBloomFilterType(type).setBlocksize(blockSize); if (isNewVersionBehaviorEnabled()) { cfdb.setNewVersionBehavior(true); } @@ -1372,9 +1374,9 @@ public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOExcep * @return A Table instance for the created table. */ public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys, - int replicaCount, final Configuration c) throws IOException { + int replicaCount, final Configuration c) throws IOException { TableDescriptor htd = - TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(replicaCount).build(); + TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(replicaCount).build(); return createTable(htd, families, splitKeys, c); } @@ -1391,7 +1393,7 @@ public Table createTable(TableName tableName, byte[] family, int numVersions) th * @return A Table instance for the created table. */ public Table createTable(TableName tableName, byte[][] families, int numVersions) - throws IOException { + throws IOException { return createTable(tableName, families, numVersions, (byte[][]) null); } @@ -1400,11 +1402,11 @@ public Table createTable(TableName tableName, byte[][] families, int numVersions * @return A Table instance for the created table. */ public Table createTable(TableName tableName, byte[][] families, int numVersions, - byte[][] splitKeys) throws IOException { + byte[][] splitKeys) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (byte[] family : families) { ColumnFamilyDescriptorBuilder cfBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(numVersions); + ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(numVersions); if (isNewVersionBehaviorEnabled()) { cfBuilder.setNewVersionBehavior(true); } @@ -1426,7 +1428,7 @@ public Table createTable(TableName tableName, byte[][] families, int numVersions * @return A Table instance for the created table. */ public Table createMultiRegionTable(TableName tableName, byte[][] families, int numVersions) - throws IOException { + throws IOException { return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE); } @@ -1435,11 +1437,11 @@ public Table createMultiRegionTable(TableName tableName, byte[][] families, int * @return A Table instance for the created table. */ public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize) - throws IOException { + throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (byte[] family : families) { ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family) - .setMaxVersions(numVersions).setBlocksize(blockSize); + .setMaxVersions(numVersions).setBlocksize(blockSize); if (isNewVersionBehaviorEnabled()) { cfBuilder.setNewVersionBehavior(true); } @@ -1453,11 +1455,11 @@ public Table createTable(TableName tableName, byte[][] families, int numVersions } public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize, - String cpName) throws IOException { + String cpName) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (byte[] family : families) { ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family) - .setMaxVersions(numVersions).setBlocksize(blockSize); + .setMaxVersions(numVersions).setBlocksize(blockSize); if (isNewVersionBehaviorEnabled()) { cfBuilder.setNewVersionBehavior(true); } @@ -1478,12 +1480,12 @@ public Table createTable(TableName tableName, byte[][] families, int numVersions * @return A Table instance for the created table. */ public Table createTable(TableName tableName, byte[][] families, int[] numVersions) - throws IOException { + throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); int i = 0; for (byte[] family : families) { ColumnFamilyDescriptorBuilder cfBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(numVersions[i]); + ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(numVersions[i]); if (isNewVersionBehaviorEnabled()) { cfBuilder.setNewVersionBehavior(true); } @@ -1502,7 +1504,7 @@ public Table createTable(TableName tableName, byte[][] families, int[] numVersio * @return A Table instance for the created table. */ public Table createTable(TableName tableName, byte[] family, byte[][] splitRows) - throws IOException { + throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family); if (isNewVersionBehaviorEnabled()) { @@ -1528,9 +1530,9 @@ public Table createMultiRegionTable(TableName tableName, byte[] family) throws I * Set the number of Region replicas. */ public static void setReplicas(Admin admin, TableName table, int replicaCount) - throws IOException, InterruptedException { + throws IOException, InterruptedException { TableDescriptor desc = TableDescriptorBuilder.newBuilder(admin.getDescriptor(table)) - .setRegionReplication(replicaCount).build(); + .setRegionReplication(replicaCount).build(); admin.modifyTable(desc); } @@ -1580,12 +1582,12 @@ public TableDescriptorBuilder createModifyableTableDescriptor(final String name) } public TableDescriptor createTableDescriptor(final TableName name, final int minVersions, - final int versions, final int ttl, KeepDeletedCells keepDeleted) { + final int versions, final int ttl, KeepDeletedCells keepDeleted) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(name); for (byte[] cfName : new byte[][] { fam1, fam2, fam3 }) { ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(cfName) - .setMinVersions(minVersions).setMaxVersions(versions).setKeepDeletedCells(keepDeleted) - .setBlockCacheEnabled(false).setTimeToLive(ttl); + .setMinVersions(minVersions).setMaxVersions(versions).setKeepDeletedCells(keepDeleted) + .setBlockCacheEnabled(false).setTimeToLive(ttl); if (isNewVersionBehaviorEnabled()) { cfBuilder.setNewVersionBehavior(true); } @@ -1595,12 +1597,12 @@ public TableDescriptor createTableDescriptor(final TableName name, final int min } public TableDescriptorBuilder createModifyableTableDescriptor(final TableName name, - final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) { + final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(name); for (byte[] cfName : new byte[][] { fam1, fam2, fam3 }) { ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(cfName) - .setMinVersions(minVersions).setMaxVersions(versions).setKeepDeletedCells(keepDeleted) - .setBlockCacheEnabled(false).setTimeToLive(ttl); + .setMinVersions(minVersions).setMaxVersions(versions).setKeepDeletedCells(keepDeleted) + .setBlockCacheEnabled(false).setTimeToLive(ttl); if (isNewVersionBehaviorEnabled()) { cfBuilder.setNewVersionBehavior(true); } @@ -1624,11 +1626,11 @@ public TableDescriptor createTableDescriptor(final TableName tableName, byte[] f } public TableDescriptor createTableDescriptor(final TableName tableName, byte[][] families, - int maxVersions) { + int maxVersions) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (byte[] family : families) { ColumnFamilyDescriptorBuilder cfBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(maxVersions); + ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(maxVersions); if (isNewVersionBehaviorEnabled()) { cfBuilder.setNewVersionBehavior(true); } @@ -1645,9 +1647,9 @@ public TableDescriptor createTableDescriptor(final TableName tableName, byte[][] * @return a region that writes to local dir for testing */ public HRegion createLocalHRegion(TableDescriptor desc, byte[] startKey, byte[] endKey) - throws IOException { + throws IOException { RegionInfo hri = RegionInfoBuilder.newBuilder(desc.getTableName()).setStartKey(startKey) - .setEndKey(endKey).build(); + .setEndKey(endKey).build(); return createLocalHRegion(hri, desc); } @@ -1669,7 +1671,7 @@ public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws * @throws IOException */ public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc, - WAL wal) throws IOException { + WAL wal) throws IOException { return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal); } @@ -1684,15 +1686,15 @@ public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDesc * @throws IOException */ public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey, - Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) - throws IOException { + Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) + throws IOException { return createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, conf, isReadOnly, durability, wal, null, families); } public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey, - byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, - boolean[] compactedMemStore, byte[]... families) throws IOException { + byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, + boolean[] compactedMemStore, byte[]... families) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); builder.setReadOnly(isReadOnly); int i = 0; @@ -1711,7 +1713,7 @@ public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] s } builder.setDurability(durability); RegionInfo info = - RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(stopKey).build(); + RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(stopKey).build(); return createLocalHRegion(info, conf, builder.build(), wal); } @@ -1746,7 +1748,7 @@ public Table deleteTableData(TableName tableName) throws IOException { * @return HTable for the new table */ public Table truncateTable(final TableName tableName, final boolean preserveRegions) - throws IOException { + throws IOException { Admin admin = getAdmin(); if (!admin.isTableDisabled(tableName)) { admin.disableTable(tableName); @@ -1820,7 +1822,7 @@ public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOExc * @throws IOException */ public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) - throws IOException { + throws IOException { List puts = new ArrayList<>(); for (byte[] row : HBaseTestingUtil.ROWS) { Put put = new Put(row); @@ -1874,14 +1876,14 @@ public void validate() { for (byte b3 = 'a'; b3 <= 'z'; b3++) { int count = seenRows[i(b1)][i(b2)][i(b3)]; int expectedCount = 0; - if (Bytes.compareTo(new byte[] { b1, b2, b3 }, startRow) >= 0 && - Bytes.compareTo(new byte[] { b1, b2, b3 }, stopRow) < 0) { + if (Bytes.compareTo(new byte[] { b1, b2, b3 }, startRow) >= 0 + && Bytes.compareTo(new byte[] { b1, b2, b3 }, stopRow) < 0) { expectedCount = 1; } if (count != expectedCount) { String row = new String(new byte[] { b1, b2, b3 }, StandardCharsets.UTF_8); - throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " + - "instead of " + expectedCount); + throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " + + "instead of " + expectedCount); } } } @@ -1942,7 +1944,7 @@ public int loadRegion(final HRegion r, final byte[] f, final boolean flush) thro } public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow) - throws IOException { + throws IOException { for (int i = startRow; i < endRow; i++) { byte[] data = Bytes.toBytes(String.valueOf(i)); Put put = new Put(data); @@ -1952,7 +1954,7 @@ public void loadNumericRows(final Table t, final byte[] f, int startRow, int end } public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows) - throws IOException { + throws IOException { for (int i = 0; i < totalRows; i++) { byte[] row = new byte[rowSize]; Bytes.random(row); @@ -1963,7 +1965,7 @@ public void loadRandomRows(final Table t, final byte[] f, int rowSize, int total } public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow, - int replicaId) throws IOException { + int replicaId) throws IOException { for (int i = startRow; i < endRow; i++) { String failMsg = "Failed verification of row :" + i; byte[] data = Bytes.toBytes(String.valueOf(i)); @@ -1980,22 +1982,22 @@ public void verifyNumericRows(Table table, final byte[] f, int startRow, int end } public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow) - throws IOException { + throws IOException { verifyNumericRows((HRegion) region, f, startRow, endRow); } public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow) - throws IOException { + throws IOException { verifyNumericRows(region, f, startRow, endRow, true); } public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow, - final boolean present) throws IOException { + final boolean present) throws IOException { verifyNumericRows((HRegion) region, f, startRow, endRow, present); } public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow, - final boolean present) throws IOException { + final boolean present) throws IOException { for (int i = startRow; i < endRow; i++) { String failMsg = "Failed verification of row :" + i; byte[] data = Bytes.toBytes(String.valueOf(i)); @@ -2014,7 +2016,7 @@ public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int } public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow) - throws IOException { + throws IOException { for (int i = startRow; i < endRow; i++) { byte[] data = Bytes.toBytes(String.valueOf(i)); Delete delete = new Delete(data); @@ -2111,20 +2113,20 @@ public String checksumRows(final Table table) throws Exception { } public static final byte[][] KEYS = { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"), - Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), - Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"), - Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), - Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), - Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), - Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy") }; + Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), + Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"), + Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), + Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), + Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), + Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy") }; public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = { Bytes.toBytes("bbb"), - Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), - Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"), - Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), - Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), - Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), - Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz") }; + Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), + Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"), + Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), + Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), + Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), + Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz") }; /** * Create rows in hbase:meta for regions of the specified table with the specified start keys. The @@ -2132,7 +2134,7 @@ public String checksumRows(final Table table) throws Exception { * @return list of region info for regions added to meta */ public List createMultiRegionsInMeta(final Configuration conf, - final TableDescriptor htd, byte[][] startKeys) throws IOException { + final TableDescriptor htd, byte[][] startKeys) throws IOException { try (Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); List newRegions = new ArrayList<>(startKeys.length); @@ -2142,7 +2144,7 @@ public List createMultiRegionsInMeta(final Configuration conf, for (int i = 0; i < startKeys.length; i++) { int j = (i + 1) % startKeys.length; RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKeys[i]) - .setEndKey(startKeys[j]).build(); + .setEndKey(startKeys[j]).build(); MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1); newRegions.add(hri); } @@ -2154,7 +2156,7 @@ public List createMultiRegionsInMeta(final Configuration conf, * Create an unmanaged WAL. Be sure to close it when you're through. */ public static WAL createWal(final Configuration conf, final Path rootDir, final RegionInfo hri) - throws IOException { + throws IOException { // The WAL subsystem will use the default rootDir rather than the passed in rootDir // unless I pass along via the conf. Configuration confForWAL = new Configuration(conf); @@ -2167,7 +2169,7 @@ public static WAL createWal(final Configuration conf, final Path rootDir, final * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources. */ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir, - final Configuration conf, final TableDescriptor htd) throws IOException { + final Configuration conf, final TableDescriptor htd) throws IOException { return createRegionAndWAL(info, rootDir, conf, htd, true); } @@ -2176,7 +2178,8 @@ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootD * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources. */ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir, - final Configuration conf, final TableDescriptor htd, BlockCache blockCache) throws IOException { + final Configuration conf, final TableDescriptor htd, BlockCache blockCache) + throws IOException { HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false); region.setBlockCache(blockCache); region.initialize(); @@ -2188,8 +2191,8 @@ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootD * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources. */ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir, - final Configuration conf, final TableDescriptor htd, MobFileCache mobFileCache) - throws IOException { + final Configuration conf, final TableDescriptor htd, MobFileCache mobFileCache) + throws IOException { HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false); region.setMobFileCache(mobFileCache); region.initialize(); @@ -2201,7 +2204,7 @@ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootD * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources. */ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir, - final Configuration conf, final TableDescriptor htd, boolean initialize) throws IOException { + final Configuration conf, final TableDescriptor htd, boolean initialize) throws IOException { ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); WAL wal = createWal(conf, rootDir, info); @@ -2228,7 +2231,7 @@ public HRegionServer getOtherRegionServer(HRegionServer rs) { * @return region server that holds it, null if the row doesn't exist */ public HRegionServer getRSForFirstRegionInTable(TableName tableName) - throws IOException, InterruptedException { + throws IOException, InterruptedException { List regions = getAdmin().getRegions(tableName); if (regions == null || regions.isEmpty()) { return null; @@ -2236,8 +2239,8 @@ public HRegionServer getRSForFirstRegionInTable(TableName tableName) LOG.debug("Found " + regions.size() + " regions for table " + tableName); byte[] firstRegionName = - regions.stream().filter(r -> !r.isOffline()).map(RegionInfo::getRegionName).findFirst() - .orElseThrow(() -> new IOException("online regions not found in table " + tableName)); + regions.stream().filter(r -> !r.isOffline()).map(RegionInfo::getRegionName).findFirst() + .orElseThrow(() -> new IOException("online regions not found in table " + tableName)); LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName)); long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE, @@ -2330,9 +2333,9 @@ private void startMiniMapReduceCluster(final int servers) throws IOException { } // Allow the user to override FS URI for this map-reduce cluster to use. - mrCluster = - new MiniMRCluster(servers, FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), - 1, null, null, new JobConf(this.conf)); + mrCluster = new MiniMRCluster(servers, + FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1, null, null, + new JobConf(this.conf)); JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster); if (jobConf == null) { jobConf = mrCluster.createJobConf(); @@ -2397,7 +2400,7 @@ public RegionServerServices createMockRegionServerService() throws IOException { * TestTokenAuthentication */ public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) - throws IOException { + throws IOException { final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher()); rss.setFileSystem(getTestFileSystem()); rss.setRpcServer(rpc); @@ -2489,13 +2492,13 @@ public void process(WatchedEvent watchedEvent) { // Making it expire ZooKeeper newZK = - new ZooKeeper(quorumServers, 1000, EmptyWatcher.instance, sessionID, password); + new ZooKeeper(quorumServers, 1000, EmptyWatcher.instance, sessionID, password); // ensure that we have connection to the server before closing down, otherwise // the close session event will be eaten out before we start CONNECTING state long start = EnvironmentEdgeManager.currentTime(); - while (newZK.getState() != States.CONNECTED && - EnvironmentEdgeManager.currentTime() - start < 1000) { + while (newZK.getState() != States.CONNECTED + && EnvironmentEdgeManager.currentTime() - start < 1000) { Thread.sleep(1); } newZK.close(); @@ -2724,7 +2727,7 @@ public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, * @throws IOException if the FileSystem could not be set from the passed dfs cluster */ public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown) - throws IllegalStateException, IOException { + throws IllegalStateException, IOException { if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) { throw new IllegalStateException("DFSCluster is already running! Shut it down first."); } @@ -2746,7 +2749,7 @@ public void waitTableAvailable(TableName table) throws InterruptedException, IOE } public void waitTableAvailable(TableName table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitFor(timeoutMillis, predicateTableAvailable(table)); } @@ -2756,30 +2759,30 @@ public void waitTableAvailable(TableName table, long timeoutMillis) * @param timeoutMillis Timeout. */ public void waitTableAvailable(byte[] table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitFor(timeoutMillis, predicateTableAvailable(TableName.valueOf(table))); } public String explainTableAvailability(TableName tableName) throws IOException { StringBuilder msg = - new StringBuilder(explainTableState(tableName, TableState.State.ENABLED)).append(", "); + new StringBuilder(explainTableState(tableName, TableState.State.ENABLED)).append(", "); if (getHBaseCluster().getMaster().isAlive()) { Map assignments = getHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getRegionAssignments(); + .getRegionStates().getRegionAssignments(); final List> metaLocations = - MetaTableAccessor.getTableRegionsAndLocations(getConnection(), tableName); + MetaTableAccessor.getTableRegionsAndLocations(getConnection(), tableName); for (Pair metaLocation : metaLocations) { RegionInfo hri = metaLocation.getFirst(); ServerName sn = metaLocation.getSecond(); if (!assignments.containsKey(hri)) { msg.append(", region ").append(hri) - .append(" not assigned, but found in meta, it expected to be on ").append(sn); + .append(" not assigned, but found in meta, it expected to be on ").append(sn); } else if (sn == null) { msg.append(", region ").append(hri).append(" assigned, but has no server in meta"); } else if (!sn.equals(assignments.get(hri))) { msg.append(", region ").append(hri) - .append(" assigned, but has different servers in meta and AM ( ").append(sn) - .append(" <> ").append(assignments.get(hri)); + .append(" assigned, but has different servers in meta and AM ( ").append(sn) + .append(" <> ").append(assignments.get(hri)); } } } @@ -2787,11 +2790,11 @@ public String explainTableAvailability(TableName tableName) throws IOException { } public String explainTableState(final TableName table, TableState.State state) - throws IOException { + throws IOException { TableState tableState = MetaTableAccessor.getTableState(getConnection(), table); if (tableState == null) { - return "TableState in META: No table state in META for table " + table + - " last state in meta (including deleted is " + findLastTableState(table) + ")"; + return "TableState in META: No table state in META for table " + table + + " last state in meta (including deleted is " + findLastTableState(table) + ")"; } else if (!tableState.inStates(state)) { return "TableState in META: Not " + state + " state, but " + tableState; } else { @@ -2840,7 +2843,7 @@ public void waitTableEnabled(TableName table) throws InterruptedException, IOExc * @param timeoutMillis Time to wait on it being marked enabled. */ public void waitTableEnabled(byte[] table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitTableEnabled(TableName.valueOf(table), timeoutMillis); } @@ -2858,7 +2861,7 @@ public void waitTableDisabled(byte[] table) throws InterruptedException, IOExcep } public void waitTableDisabled(TableName table, long millisTimeout) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitFor(millisTimeout, predicateTableDisabled(table)); } @@ -2868,7 +2871,7 @@ public void waitTableDisabled(TableName table, long millisTimeout) * @param timeoutMillis Time to wait on it being marked disabled. */ public void waitTableDisabled(byte[] table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitTableDisabled(TableName.valueOf(table), timeoutMillis); } @@ -2922,7 +2925,7 @@ public boolean ensureSomeNonStoppedRegionServersAvailable(final int num) throws * @return A new configuration instance with a different user set into it. */ public static User getDifferentUser(final Configuration c, final String differentiatingSuffix) - throws IOException { + throws IOException { FileSystem currentfs = FileSystem.get(c); if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) { return User.getCurrent(); @@ -2935,12 +2938,12 @@ public static User getDifferentUser(final Configuration c, final String differen } public static NavigableSet getAllOnlineRegions(SingleProcessHBaseCluster cluster) - throws IOException { + throws IOException { NavigableSet online = new TreeSet<>(); for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) { try { for (RegionInfo region : ProtobufUtil - .getOnlineRegions(rst.getRegionServer().getRSRpcServices())) { + .getOnlineRegions(rst.getRegionServer().getRSRpcServices())) { online.add(region.getRegionNameAsString()); } } catch (RegionServerStoppedException e) { @@ -2971,7 +2974,7 @@ public static void setMaxRecoveryErrorCount(final OutputStream stream, final int if (className.equals("DFSOutputStream")) { if (clazz.isInstance(stream)) { Field maxRecoveryErrorCountField = - stream.getClass().getDeclaredField("maxRecoveryErrorCount"); + stream.getClass().getDeclaredField("maxRecoveryErrorCount"); maxRecoveryErrorCountField.setAccessible(true); maxRecoveryErrorCountField.setInt(stream, max); break; @@ -2989,7 +2992,7 @@ public static void setMaxRecoveryErrorCount(final OutputStream stream, final int * @return true if the region is assigned false otherwise. */ public boolean assignRegion(final RegionInfo regionInfo) - throws IOException, InterruptedException { + throws IOException, InterruptedException { final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager(); am.assign(regionInfo); return AssignmentTestingUtil.waitForAssignment(am, regionInfo); @@ -3001,13 +3004,13 @@ public boolean assignRegion(final RegionInfo regionInfo) * @param destServer destination server of the region */ public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer) - throws InterruptedException, IOException { + throws InterruptedException, IOException { HMaster master = getMiniHBaseCluster().getMaster(); // TODO: Here we start the move. The move can take a while. getAdmin().move(destRegion.getEncodedNameAsBytes(), destServer); while (true) { ServerName serverName = - master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(destRegion); + master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(destRegion); if (serverName != null && serverName.equals(destServer)) { assertRegionOnServer(destRegion, serverName, 2000); break; @@ -3042,11 +3045,11 @@ public void waitUntilAllSystemRegionsAssigned() throws IOException { * @param timeout timeout, in milliseconds */ public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout) - throws IOException { + throws IOException { if (!TableName.isMetaTableName(tableName)) { try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { - LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + - timeout + "ms"); + LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + + timeout + "ms"); waitFor(timeout, 200, true, new ExplainingPredicate() { @Override public String explainFailure() throws IOException { @@ -3068,17 +3071,17 @@ public boolean evaluate() throws IOException { // (for fault tolerance testing). tableFound = true; byte[] server = - r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); + r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); if (server == null) { return false; } else { byte[] startCode = - r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); + r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); ServerName serverName = - ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + - Bytes.toLong(startCode)); - if (!getHBaseClusterInterface().isDistributedCluster() && - getHBaseCluster().isKilledRS(serverName)) { + ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + + Bytes.toLong(startCode)); + if (!getHBaseClusterInterface().isDistributedCluster() + && getHBaseCluster().isKilledRS(serverName)) { return false; } } @@ -3164,7 +3167,7 @@ public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numR * querying itself, and relies on StoreScanner. */ public static List getFromStoreFile(HStore store, byte[] row, NavigableSet columns) - throws IOException { + throws IOException { Get get = new Get(row); Map> s = get.getFamilyMap(); s.put(store.getColumnFamilyDescriptor().getName(), columns); @@ -3173,14 +3176,14 @@ public static List getFromStoreFile(HStore store, byte[] row, NavigableSet } public static void assertKVListsEqual(String additionalMsg, final List expected, - final List actual) { + final List actual) { final int eLen = expected.size(); final int aLen = actual.size(); final int minLen = Math.min(eLen, aLen); int i = 0; - while (i < minLen && - CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0) { + while (i < minLen + && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0) { i++; } @@ -3192,9 +3195,9 @@ public static void assertKVListsEqual(String additionalMsg, final List String safeGetAsStr(List lst, int i) { } public String getClusterKey() { - return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + - conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":" + - conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + + ":" + + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); } /** * Creates a random table with the given parameters */ public Table createRandomTable(TableName tableName, final Collection families, - final int maxVersions, final int numColsPerRow, final int numFlushes, final int numRegions, - final int numRowsPerFlush) throws IOException, InterruptedException { - LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + " regions, " + - numFlushes + " storefiles per region, " + numRowsPerFlush + " rows per flush, maxVersions=" + - maxVersions + "\n"); + final int maxVersions, final int numColsPerRow, final int numFlushes, final int numRegions, + final int numRowsPerFlush) throws IOException, InterruptedException { + LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + " regions, " + + numFlushes + " storefiles per region, " + numRowsPerFlush + + " rows per flush, maxVersions=" + maxVersions + "\n"); final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L); final int numCF = families.size(); @@ -3261,8 +3264,8 @@ public Table createRandomTable(TableName tableName, final Collection fam final byte[] qual = Bytes.toBytes("col" + iCol); if (rand.nextBoolean()) { final byte[] value = - Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_" + - iCol + "_ts_" + ts + "_random_" + rand.nextLong()); + Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_" + + iCol + "_ts_" + ts + "_random_" + rand.nextLong()); put.addColumn(cf, qual, ts, value); } else if (rand.nextDouble() < 0.8) { del.addColumn(cf, qual, ts); @@ -3329,8 +3332,8 @@ public static void waitForHostPort(String host, int port) throws IOException { * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, - byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding) - throws IOException { + byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding) + throws IOException { return createPreSplitLoadTestTable(conf, tableName, columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1, Durability.USE_DEFAULT); } @@ -3341,13 +3344,13 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableName tabl * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, - byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding, - int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { + byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding, + int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); builder.setDurability(durability); builder.setRegionReplication(regionReplication); ColumnFamilyDescriptorBuilder cfBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(columnFamily); + ColumnFamilyDescriptorBuilder.newBuilder(columnFamily); cfBuilder.setDataBlockEncoding(dataBlockEncoding); cfBuilder.setCompressionType(compression); return createPreSplitLoadTestTable(conf, builder.build(), cfBuilder.build(), @@ -3360,15 +3363,15 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableName tabl * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, - byte[][] columnFamilies, Algorithm compression, DataBlockEncoding dataBlockEncoding, - int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { + byte[][] columnFamilies, Algorithm compression, DataBlockEncoding dataBlockEncoding, + int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); builder.setDurability(durability); builder.setRegionReplication(regionReplication); ColumnFamilyDescriptor[] hcds = new ColumnFamilyDescriptor[columnFamilies.length]; for (int i = 0; i < columnFamilies.length; i++) { ColumnFamilyDescriptorBuilder cfBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(columnFamilies[i]); + ColumnFamilyDescriptorBuilder.newBuilder(columnFamilies[i]); cfBuilder.setDataBlockEncoding(dataBlockEncoding); cfBuilder.setCompressionType(compression); hcds[i] = cfBuilder.build(); @@ -3382,7 +3385,7 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableName tabl * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, - ColumnFamilyDescriptor hcd) throws IOException { + ColumnFamilyDescriptor hcd) throws IOException { return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER); } @@ -3392,7 +3395,7 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableDescripto * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, - ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException { + ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException { return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] { hcd }, numRegionsPerServer); } @@ -3403,7 +3406,7 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableDescripto * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, - ColumnFamilyDescriptor[] hcds, int numRegionsPerServer) throws IOException { + ColumnFamilyDescriptor[] hcds, int numRegionsPerServer) throws IOException { return createPreSplitLoadTestTable(conf, desc, hcds, new RegionSplitter.HexStringSplit(), numRegionsPerServer); } @@ -3414,8 +3417,8 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableDescripto * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor td, - ColumnFamilyDescriptor[] cds, SplitAlgorithm splitter, int numRegionsPerServer) - throws IOException { + ColumnFamilyDescriptor[] cds, SplitAlgorithm splitter, int numRegionsPerServer) + throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(td); for (ColumnFamilyDescriptor cd : cds) { if (!td.hasColumnFamily(cd.getName())) { @@ -3437,9 +3440,9 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableDescripto } totalNumberOfRegions = numberOfServers * numRegionsPerServer; - LOG.info( - "Number of live regionservers: " + numberOfServers + ", " + "pre-splitting table into " + - totalNumberOfRegions + " regions " + "(regions per server: " + numRegionsPerServer + ")"); + LOG.info("Number of live regionservers: " + numberOfServers + ", " + + "pre-splitting table into " + totalNumberOfRegions + " regions " + + "(regions per server: " + numRegionsPerServer + ")"); byte[][] splits = splitter.split(totalNumberOfRegions); @@ -3467,7 +3470,7 @@ public static int getMetaRSPort(Connection connection) throws IOException { * yet, after the assignment znode is deleted and the new assignment is recorded in master. */ public void assertRegionOnServer(final RegionInfo hri, final ServerName server, - final long timeout) throws IOException, InterruptedException { + final long timeout) throws IOException, InterruptedException { long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout; while (true) { List regions = getAdmin().getRegions(server); @@ -3483,13 +3486,13 @@ public void assertRegionOnServer(final RegionInfo hri, final ServerName server, * Check to make sure the region is open on the specified region server, but not on any other one. */ public void assertRegionOnlyOnServer(final RegionInfo hri, final ServerName server, - final long timeout) throws IOException, InterruptedException { + final long timeout) throws IOException, InterruptedException { long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout; while (true) { List regions = getAdmin().getRegions(server); if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) { List rsThreads = - getHBaseCluster().getLiveRegionServerThreads(); + getHBaseCluster().getLiveRegionServerThreads(); for (JVMClusterUtil.RegionServerThread rsThread : rsThreads) { HRegionServer rs = rsThread.getRegionServer(); if (server.equals(rs.getServerName())) { @@ -3512,15 +3515,15 @@ public void assertRegionOnlyOnServer(final RegionInfo hri, final ServerName serv public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd) throws IOException { TableDescriptor td = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build(); return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td); } public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd, - BlockCache blockCache) throws IOException { + BlockCache blockCache) throws IOException { TableDescriptor td = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build(); RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build(); return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td, blockCache); } @@ -3537,7 +3540,7 @@ public ExplainingPredicate predicateNoRegionsInTransition() { @Override public String explainFailure() throws IOException { final RegionStates regionStates = - getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); + getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); return "found in transition: " + regionStates.getRegionsInTransition().toString(); } @@ -3603,10 +3606,10 @@ public boolean evaluate() throws IOException { try (Table table = getConnection().getTable(tableName)) { TableDescriptor htd = table.getDescriptor(); for (HRegionLocation loc : getConnection().getRegionLocator(tableName) - .getAllRegionLocations()) { + .getAllRegionLocations()) { Scan scan = new Scan().withStartRow(loc.getRegion().getStartKey()) - .withStopRow(loc.getRegion().getEndKey()).setOneRowLimit() - .setMaxResultsPerColumnFamily(1).setCacheBlocks(false); + .withStopRow(loc.getRegion().getEndKey()).setOneRowLimit() + .setMaxResultsPerColumnFamily(1).setCacheBlocks(false); for (byte[] family : htd.getColumnFamilyNames()) { scan.addFamily(family); } @@ -3689,7 +3692,7 @@ public static List generateColumnDescriptors(final Strin for (BloomType bloomType : BloomType.values()) { String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId); ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name)); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name)); columnFamilyDescriptorBuilder.setCompressionType(compressionType); columnFamilyDescriptorBuilder.setDataBlockEncoding(encodingType); columnFamilyDescriptorBuilder.setBloomFilterType(bloomType); @@ -3790,7 +3793,7 @@ public int getNumHFiles(final TableName tableName, final byte[] family) { } public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName, - final byte[] family) { + final byte[] family) { int numHFiles = 0; for (Region region : rs.getRegions(tableName)) { numHFiles += region.getStore(family).getStorefilesCount(); @@ -3804,7 +3807,7 @@ public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescr Collection rtdFamilies = Arrays.asList(rtd.getColumnFamilies()); assertEquals(ltdFamilies.size(), rtdFamilies.size()); for (Iterator it = ltdFamilies.iterator(), - it2 = rtdFamilies.iterator(); it.hasNext();) { + it2 = rtdFamilies.iterator(); it.hasNext();) { assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next())); } } @@ -3814,7 +3817,7 @@ public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescr * invocations. */ public static void await(final long sleepMillis, final BooleanSupplier condition) - throws InterruptedException { + throws InterruptedException { try { while (!condition.getAsBoolean()) { Thread.sleep(sleepMillis); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index b3fb634a1de1..224f0f31f2bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,29 +19,28 @@ import java.io.IOException; import java.util.concurrent.ThreadLocalRandom; - import org.apache.commons.math3.random.RandomData; import org.apache.commons.math3.random.RandomDataImpl; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.crypto.CryptoCipherProvider; import org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.io.crypto.aes.AES; -import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class runs performance benchmarks for {@link HFile}. @@ -53,19 +51,19 @@ public class HFilePerformanceEvaluation { private static final int ROW_COUNT = 1000000; private static final int RFILE_BLOCKSIZE = 8 * 1024; private static StringBuilder testSummary = new StringBuilder(); - + // Disable verbose INFO logging from org.apache.hadoop.io.compress.CodecPool static { - System.setProperty("org.apache.commons.logging.Log", + System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.SimpleLog"); - System.setProperty("org.apache.commons.logging.simplelog.log.org.apache.hadoop.io.compress.CodecPool", - "WARN"); + System.setProperty( + "org.apache.commons.logging.simplelog.log.org.apache.hadoop.io.compress.CodecPool", "WARN"); } - + private static final Logger LOG = - LoggerFactory.getLogger(HFilePerformanceEvaluation.class.getName()); + LoggerFactory.getLogger(HFilePerformanceEvaluation.class.getName()); - static byte [] format(final int i) { + static byte[] format(final int i) { String v = Integer.toString(i); return Bytes.toBytes("0000000000".substring(v.length()) + v); } @@ -80,52 +78,44 @@ static Cell createCell(final int i) { } /** - * HFile is Cell-based. It used to be byte arrays. Doing this test, pass Cells. All Cells + * HFile is Cell-based. It used to be byte arrays. Doing this test, pass Cells. All Cells * intentionally have same coordinates in all fields but row. * @param i Integer to format as a row Key. * @param value Value to use * @return Created Cell. */ - static Cell createCell(final int i, final byte [] value) { + static Cell createCell(final int i, final byte[] value) { return createCell(format(i), value); } - static Cell createCell(final byte [] keyRow) { - return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(keyRow) - .setFamily(HConstants.EMPTY_BYTE_ARRAY) - .setQualifier(HConstants.EMPTY_BYTE_ARRAY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build(); + static Cell createCell(final byte[] keyRow) { + return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(keyRow) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(HConstants.EMPTY_BYTE_ARRAY).build(); } - static Cell createCell(final byte [] keyRow, final byte [] value) { - return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(keyRow) - .setFamily(HConstants.EMPTY_BYTE_ARRAY) - .setQualifier(HConstants.EMPTY_BYTE_ARRAY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(value) - .build(); + static Cell createCell(final byte[] keyRow, final byte[] value) { + return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(keyRow) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(value).build(); } /** - * Add any supported codec or cipher to test the HFile read/write performance. - * Specify "none" to disable codec or cipher or both. + * Add any supported codec or cipher to test the HFile read/write performance. Specify "none" to + * disable codec or cipher or both. * @throws Exception */ private void runBenchmarks() throws Exception { final Configuration conf = new Configuration(); final FileSystem fs = FileSystem.get(conf); final Path mf = fs.makeQualified(new Path("performanceevaluation.mapfile")); - + // codec=none cipher=none runWriteBenchmark(conf, fs, mf, "none", "none"); runReadBenchmark(conf, fs, mf, "none", "none"); - + // codec=gz cipher=none runWriteBenchmark(conf, fs, mf, "gz", "none"); runReadBenchmark(conf, fs, mf, "gz", "none"); @@ -195,13 +185,13 @@ private void runWriteBenchmark(Configuration conf, FileSystem fs, Path mf, Strin fs.delete(mf, true); } - runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT, codec, cipher), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT, codec, cipher), ROW_COUNT, + codec, getCipherName(conf, cipher)); } /** - * Run all the read benchmarks for the test HFile + * Run all the read benchmarks for the test HFile * @param conf * @param fs * @param mf @@ -214,72 +204,70 @@ private void runReadBenchmark(final Configuration conf, final FileSystem fs, fin @Override public void run() { try { - runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("UniformRandomSmallScan failed " + e.getMessage()); e.printStackTrace(); } } }); - + PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { - runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("UniformRandomReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } }); - + PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { - runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("GaussianRandomReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } }); - + PerformanceEvaluationCommons.concurrentReads(new Runnable() { @Override public void run() { try { - runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT), - ROW_COUNT, codec, getCipherName(conf, cipher)); + runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT), ROW_COUNT, codec, + getCipherName(conf, cipher)); } catch (Exception e) { testSummary.append("SequentialReadBenchmark failed " + e.getMessage()); e.printStackTrace(); } } - }); + }); } - - protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount, - String codec, String cipher) throws Exception { - LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + - codec + "] " + "cipher[" + cipher + "] for " + rowCount + " rows."); - + + protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount, String codec, + String cipher) throws Exception { + LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + codec + "] " + + "cipher[" + cipher + "] for " + rowCount + " rows."); + long elapsedTime = benchmark.run(); - - LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + - codec + "] " + "cipher[" + cipher + "] for " + rowCount + " rows took " + - elapsedTime + "ms."); - + + LOG.info("Running " + benchmark.getClass().getSimpleName() + " with codec[" + codec + "] " + + "cipher[" + cipher + "] for " + rowCount + " rows took " + elapsedTime + "ms."); + // Store results to print summary at the end testSummary.append("Running ").append(benchmark.getClass().getSimpleName()) - .append(" with codec[").append(codec).append("] cipher[").append(cipher) - .append("] for ").append(rowCount).append(" rows took ").append(elapsedTime) - .append("ms.").append("\n"); + .append(" with codec[").append(codec).append("] cipher[").append(cipher).append("] for ") + .append(rowCount).append(" rows took ").append(elapsedTime).append("ms.").append("\n"); } static abstract class RowOrientedBenchmark { @@ -291,8 +279,8 @@ static abstract class RowOrientedBenchmark { protected String codec = "none"; protected String cipher = "none"; - public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows, String codec, String cipher) { + public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows, + String codec, String cipher) { this.conf = conf; this.fs = fs; this.mf = mf; @@ -301,8 +289,7 @@ public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, this.cipher = cipher; } - public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows) { + public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { this.conf = conf; this.fs = fs; this.mf = mf; @@ -352,8 +339,8 @@ static class SequentialWriteBenchmark extends RowOrientedBenchmark { protected HFile.Writer writer; private byte[] bytes = new byte[ROW_LENGTH]; - public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows, String codec, String cipher) { + public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows, + String codec, String cipher) { super(conf, fs, mf, totalRows, codec, cipher); } @@ -361,27 +348,23 @@ public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, void setUp() throws Exception { HFileContextBuilder builder = new HFileContextBuilder() - .withCompression(HFileWriterImpl.compressionByName(codec)) - .withBlockSize(RFILE_BLOCKSIZE); - + .withCompression(HFileWriterImpl.compressionByName(codec)).withBlockSize(RFILE_BLOCKSIZE); + if (cipher == "aes") { byte[] cipherKey = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(cipherKey); builder.withEncryptionContext(Encryption.newContext(conf) - .setCipher(Encryption.getCipher(conf, cipher)) - .setKey(cipherKey)); + .setCipher(Encryption.getCipher(conf, cipher)).setKey(cipherKey)); } else if (!"none".equals(cipher)) { throw new IOException("Cipher " + cipher + " not supported."); } - + HFileContext hFileContext = builder.build(); - writer = HFile.getWriterFactoryNoCache(conf) - .withPath(fs, mf) - .withFileContext(hFileContext) + writer = HFile.getWriterFactoryNoCache(conf).withPath(fs, mf).withFileContext(hFileContext) .create(); } - + @Override void doRow(int i) throws Exception { writer.append(createCell(i, generateValue())); @@ -408,8 +391,7 @@ static abstract class ReadBenchmark extends RowOrientedBenchmark { protected HFile.Reader reader; - public ReadBenchmark(Configuration conf, FileSystem fs, Path mf, - int totalRows) { + public ReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @@ -428,8 +410,7 @@ void tearDown() throws Exception { static class SequentialReadBenchmark extends ReadBenchmark { private HFileScanner scanner; - public SequentialReadBenchmark(Configuration conf, FileSystem fs, - Path mf, int totalRows) { + public SequentialReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @@ -459,15 +440,14 @@ protected int getReportingPeriod() { static class UniformRandomReadBenchmark extends ReadBenchmark { - public UniformRandomReadBenchmark(Configuration conf, FileSystem fs, - Path mf, int totalRows) { + public UniformRandomReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @Override void doRow(int i) throws Exception { HFileScanner scanner = this.reader.getScanner(conf, false, true); - byte [] b = getRandomRow(); + byte[] b = getRandomRow(); if (scanner.seekTo(createCell(b)) < 0) { LOG.info("Not able to seekTo " + new String(b)); return; @@ -478,22 +458,21 @@ void doRow(int i) throws Exception { PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, c.getValueLength()); } - private byte [] getRandomRow() { + private byte[] getRandomRow() { return format(ThreadLocalRandom.current().nextInt(totalRows)); } } static class UniformRandomSmallScan extends ReadBenchmark { - public UniformRandomSmallScan(Configuration conf, FileSystem fs, - Path mf, int totalRows) { - super(conf, fs, mf, totalRows/10); + public UniformRandomSmallScan(Configuration conf, FileSystem fs, Path mf, int totalRows) { + super(conf, fs, mf, totalRows / 10); } @Override void doRow(int i) throws Exception { HFileScanner scanner = this.reader.getScanner(conf, false, false); - byte [] b = getRandomRow(); + byte[] b = getRandomRow(); // System.out.println("Random row: " + new String(b)); Cell c = createCell(b); if (scanner.seekTo(c) != 0) { @@ -503,7 +482,7 @@ void doRow(int i) throws Exception { // TODO: HFileScanner doesn't do Cells yet. Temporary fix. c = scanner.getCell(); // System.out.println("Found row: " + - // new String(c.getRowArray(), c.getRowOffset(), c.getRowLength())); + // new String(c.getRowArray(), c.getRowOffset(), c.getRowLength())); PerformanceEvaluationCommons.assertKey(b, c); for (int ii = 0; ii < 30; ii++) { if (!scanner.next()) { @@ -515,7 +494,7 @@ void doRow(int i) throws Exception { } } - private byte [] getRandomRow() { + private byte[] getRandomRow() { return format(ThreadLocalRandom.current().nextInt(totalRows)); } } @@ -524,8 +503,7 @@ static class GaussianRandomReadBenchmark extends ReadBenchmark { private RandomData randomData = new RandomDataImpl(); - public GaussianRandomReadBenchmark(Configuration conf, FileSystem fs, - Path mf, int totalRows) { + public GaussianRandomReadBenchmark(Configuration conf, FileSystem fs, Path mf, int totalRows) { super(conf, fs, mf, totalRows); } @@ -544,11 +522,10 @@ void doRow(int i) throws Exception { } } - private byte [] getGaussianRandomRowBytes() { - int r = (int) randomData.nextGaussian((double)totalRows / 2.0, - (double)totalRows / 10.0); + private byte[] getGaussianRandomRowBytes() { + int r = (int) randomData.nextGaussian((double) totalRows / 2.0, (double) totalRows / 10.0); // make sure r falls into [0,totalRows) - return format(Math.min(totalRows, Math.max(r,0))); + return format(Math.min(totalRows, Math.max(r, 0))); } } @@ -565,7 +542,7 @@ private String getCipherName(Configuration conf, String cipherName) { if (cipherName.equals("aes")) { String provider = conf.get(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY); if (provider == null || provider.equals("") - || provider.equals(DefaultCipherProvider.class.getName())) { + || provider.equals(DefaultCipherProvider.class.getName())) { return "aes-default"; } else if (provider.equals(CryptoCipherProvider.class.getName())) { return "aes-commons"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java index c490c836c634..60a862721dce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java @@ -1,26 +1,27 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.Set; -import java.util.Collections; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; @@ -45,7 +46,7 @@ private HTestConst() { public static final byte[] DEFAULT_CF_BYTES = Bytes.toBytes(DEFAULT_CF_STR); public static final Set DEFAULT_CF_STR_SET = - Collections.unmodifiableSet(new HashSet<>(Arrays.asList(new String[] { DEFAULT_CF_STR }))); + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(new String[] { DEFAULT_CF_STR }))); public static final String DEFAULT_ROW_STR = "MyTestRow"; public static final byte[] DEFAULT_ROW_BYTES = Bytes.toBytes(DEFAULT_ROW_STR); @@ -79,7 +80,7 @@ public static byte[][] makeNAscii(byte[] base, int n) { * @return count of what we added. */ public static long addContent(final Region r, final byte[] columnFamily, final byte[] column) - throws IOException { + throws IOException { byte[] startKey = r.getRegionInfo().getStartKey(); byte[] endKey = r.getRegionInfo().getEndKey(); byte[] startKeyBytes = startKey; @@ -113,12 +114,12 @@ public static long addContent(Table updater, String family, String column) throw * @return count of what we added. */ public static long addContent(Table updater, String columnFamily, byte[] startKeyBytes, - byte[] endKey) throws IOException { + byte[] endKey) throws IOException { return addContent(updater, columnFamily, null, startKeyBytes, endKey, -1); } public static long addContent(Table updater, String family, String column, byte[] startKeyBytes, - byte[] endKey) throws IOException { + byte[] endKey) throws IOException { return addContent(updater, family, column, startKeyBytes, endKey, -1); } @@ -128,7 +129,7 @@ public static long addContent(Table updater, String family, String column, byte[ * @return count of what we added. */ public static long addContent(Table updater, String columnFamily, String column, - byte[] startKeyBytes, byte[] endKey, long ts) throws IOException { + byte[] startKeyBytes, byte[] endKey, long ts) throws IOException { long count = 0; // Add rows of three characters. The first character starts with the // 'a' character and runs up to 'z'. Per first character, we run the diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java index 68935ffe5c8c..be344148aaac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; @@ -32,20 +31,19 @@ public class MetaMockingUtil { /** - * Returns a Result object constructed from the given region information simulating - * a catalog table result. + * Returns a Result object constructed from the given region information simulating a catalog + * table result. * @param region the HRegionInfo object or null * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. * @throws IOException */ - public static Result getMetaTableRowResult(final RegionInfo region) - throws IOException { + public static Result getMetaTableRowResult(final RegionInfo region) throws IOException { return getMetaTableRowResult(region, null, null, null); } /** - * Returns a Result object constructed from the given region information simulating - * a catalog table result. + * Returns a Result object constructed from the given region information simulating a catalog + * table result. * @param region the HRegionInfo object or null * @param sn to use making startcode and server hostname:port in meta or null * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. @@ -57,12 +55,12 @@ public static Result getMetaTableRowResult(final RegionInfo region, final Server } /** - * Returns a Result object constructed from the given region information simulating - * a catalog table result. + * Returns a Result object constructed from the given region information simulating a catalog + * table result. * @param region the RegionInfo object or null * @param sn to use making startcode and server hostname:port in meta or null * @param splita daughter region or null - * @param splitb daughter region or null + * @param splitb daughter region or null * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. * @throws IOException */ @@ -70,36 +68,28 @@ public static Result getMetaTableRowResult(RegionInfo region, final ServerName s RegionInfo splita, RegionInfo splitb) throws IOException { List kvs = new ArrayList<>(); if (region != null) { - kvs.add(new KeyValue( - region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - RegionInfo.toByteArray(region))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER, RegionInfo.toByteArray(region))); } if (sn != null) { - kvs.add(new KeyValue(region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(sn.getAddress().toString()))); - kvs.add(new KeyValue(region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, - Bytes.toBytes(sn.getStartcode()))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER, Bytes.toBytes(sn.getAddress().toString()))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode()))); } if (splita != null) { - kvs.add(new KeyValue( - region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, - RegionInfo.toByteArray(splita))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.SPLITA_QUALIFIER, RegionInfo.toByteArray(splita))); } if (splitb != null) { - kvs.add(new KeyValue( - region.getRegionName(), - HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, - RegionInfo.toByteArray(splitb))); + kvs.add(new KeyValue(region.getRegionName(), HConstants.CATALOG_FAMILY, + HConstants.SPLITB_QUALIFIER, RegionInfo.toByteArray(splitb))); } - //important: sort the kvs so that binary search work + // important: sort the kvs so that binary search work Collections.sort(kvs, MetaCellComparator.META_COMPARATOR); return Result.create(kvs); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java index f13258f93736..0e6d278ca28b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java @@ -46,7 +46,7 @@ * * @Rule * public final ConnectionRule connectionRule = - * ConnectionRule.createAsyncConnectionRule(miniClusterRule::createAsyncConnection); + * ConnectionRule.createAsyncConnectionRule(miniClusterRule::createAsyncConnection); * } * } * @@ -83,8 +83,8 @@ public Builder setConfiguration(Supplier supplier) { } public MiniClusterRule build() { - return new MiniClusterRule(conf, miniClusterOption != null ? miniClusterOption : - StartTestingClusterOption.builder().build()); + return new MiniClusterRule(conf, miniClusterOption != null ? miniClusterOption + : StartTestingClusterOption.builder().build()); } } @@ -94,7 +94,7 @@ public MiniClusterRule build() { private SingleProcessHBaseCluster miniCluster; private MiniClusterRule(final Configuration conf, - final StartTestingClusterOption miniClusterOptions) { + final StartTestingClusterOption miniClusterOptions) { this.testingUtility = new HBaseTestingUtil(conf); this.miniClusterOptions = miniClusterOptions; } @@ -111,8 +111,8 @@ public HBaseTestingUtil getTestingUtility() { } /** - * Create a {@link Connection} to the managed {@link SingleProcessHBaseCluster}. It's up to - * the caller to {@link Connection#close() close()} the connection when finished. + * Create a {@link Connection} to the managed {@link SingleProcessHBaseCluster}. It's up to the + * caller to {@link Connection#close() close()} the connection when finished. */ public Connection createConnection() { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index ebe6edd73c49..11af772236df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -68,13 +68,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** - * Basic mock region server services. Should only be instantiated by HBaseTestingUtility.b + * Basic mock region server services. Should only be instantiated by HBaseTestingUtility.b */ public class MockRegionServerServices implements RegionServerServices { protected static final Logger LOG = LoggerFactory.getLogger(MockRegionServerServices.class); private final Map regions = new HashMap<>(); private final ConcurrentSkipListMap rit = - new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); + new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); private HFileSystem hfs = null; private final Configuration conf; private ZKWatcher zkw = null; @@ -94,7 +94,7 @@ public MockRegionServerServices(ZKWatcher zkw, ServerName serverName) { this.conf = (zkw == null ? new Configuration() : zkw.getConfiguration()); } - public MockRegionServerServices(){ + public MockRegionServerServices() { this(null, null); } @@ -226,7 +226,7 @@ public HFileSystem getFileSystem() { } public void setFileSystem(FileSystem hfs) { - this.hfs = (HFileSystem)hfs; + this.hfs = (HFileSystem) hfs; } @Override @@ -335,8 +335,8 @@ public boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore) { } @Override - public boolean reportFileArchivalForQuotas( - TableName tableName, Collection> archivedFiles) { + public boolean reportFileArchivalForQuotas(TableName tableName, + Collection> archivedFiles) { return true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java index 5268d3d7b380..6b967671925d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +23,6 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; @@ -32,8 +30,7 @@ public abstract class MultithreadedTestUtil { - private static final Logger LOG = - LoggerFactory.getLogger(MultithreadedTestUtil.class); + private static final Logger LOG = LoggerFactory.getLogger(MultithreadedTestUtil.class); public static class TestContext { private final Configuration conf; @@ -50,7 +47,7 @@ protected Configuration getConf() { return conf; } - public synchronized boolean shouldRun() { + public synchronized boolean shouldRun() { return !stopped && err == null; } @@ -75,6 +72,7 @@ public void waitFor(long millis) throws Exception { } } } + private synchronized void checkException() throws Exception { if (err != null) { throw new RuntimeException("Deferred", err); @@ -109,8 +107,7 @@ public void stop() throws Exception { } /** - * A thread that can be added to a test context, and properly - * passes exceptions through. + * A thread that can be added to a test context, and properly passes exceptions through. */ public static abstract class TestThread extends Thread { protected final TestContext ctx; @@ -157,13 +154,16 @@ public final void doWork() throws Exception { } public abstract void doAnAction() throws Exception; - public void workDone() throws IOException {} + + public void workDone() throws IOException { + } } /** - * Verify that no assertions have failed inside a future. - * Used for unit tests that spawn threads. E.g., + * Verify that no assertions have failed inside a future. Used for unit tests that spawn threads. + * E.g., *

      + * *

          *   List<Future<Void>> results = Lists.newArrayList();
          *   Future<Void> f = executor.submit(new Callable<Void> {
      @@ -174,14 +174,14 @@ public void workDone() throws IOException {}
          *   results.add(f);
          *   assertOnFutures(results);
          * 
      + * * @param threadResults A list of futures - * @throws InterruptedException If interrupted when waiting for a result - * from one of the futures - * @throws ExecutionException If an exception other than AssertionError - * occurs inside any of the futures + * @throws InterruptedException If interrupted when waiting for a result from one of the futures + * @throws ExecutionException If an exception other than AssertionError occurs inside any of the + * futures */ public static void assertOnFutures(List> threadResults) - throws InterruptedException, ExecutionException { + throws InterruptedException, ExecutionException { for (Future threadResult : threadResults) { try { threadResult.get(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java index 97d326aa6324..090ad667bead 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,18 +20,16 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Code shared by PE tests. */ public class PerformanceEvaluationCommons { private static final Logger LOG = - LoggerFactory.getLogger(PerformanceEvaluationCommons.class.getName()); + LoggerFactory.getLogger(PerformanceEvaluationCommons.class.getName()); public static void assertValueSize(final int expectedSize, final int got) { if (got != expectedSize) { @@ -40,28 +37,27 @@ public static void assertValueSize(final int expectedSize, final int got) { } } - public static void assertKey(final byte [] expected, final ByteBuffer got) { - byte [] b = new byte[got.limit()]; + public static void assertKey(final byte[] expected, final ByteBuffer got) { + byte[] b = new byte[got.limit()]; got.get(b, 0, got.limit()); assertKey(expected, b); } - public static void assertKey(final byte [] expected, final Cell c) { + public static void assertKey(final byte[] expected, final Cell c) { assertKey(expected, c.getRowArray(), c.getRowOffset(), c.getRowLength()); } - public static void assertKey(final byte [] expected, final byte [] got) { + public static void assertKey(final byte[] expected, final byte[] got) { assertKey(expected, got, 0, got.length); } - public static void assertKey(final byte [] expected, final byte [] gotArray, + public static void assertKey(final byte[] expected, final byte[] gotArray, final int gotArrayOffset, final int gotArrayLength) { - if (!org.apache.hadoop.hbase.util.Bytes.equals(expected, 0, expected.length, - gotArray, gotArrayOffset, gotArrayLength)) { - throw new AssertionError("Expected " + - org.apache.hadoop.hbase.util.Bytes.toString(expected) + - " but got " + - org.apache.hadoop.hbase.util.Bytes.toString(gotArray, gotArrayOffset, gotArrayLength)); + if (!org.apache.hadoop.hbase.util.Bytes.equals(expected, 0, expected.length, gotArray, + gotArrayOffset, gotArrayLength)) { + throw new AssertionError("Expected " + org.apache.hadoop.hbase.util.Bytes.toString(expected) + + " but got " + + org.apache.hadoop.hbase.util.Bytes.toString(gotArray, gotArrayOffset, gotArrayLength)); } } @@ -72,10 +68,10 @@ public static void concurrentReads(final Runnable r) { for (int i = 0; i < count; i++) { threads.add(new Thread(r, "concurrentRead-" + i)); } - for (Thread t: threads) { + for (Thread t : threads) { t.start(); } - for (Thread t: threads) { + for (Thread t : threads) { try { t.join(); } catch (InterruptedException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java index b4ba729b1d52..289da05160cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ProcedureTestUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,8 +58,8 @@ public static void waitUntilProcedureWaitingTimeout(HBaseTestingUtil util, JsonParser parser = new JsonParser(); util.waitFor(timeout, () -> getProcedure(util, clazz, parser) - .filter(o -> ProcedureState.WAITING_TIMEOUT.name().equals(o.get("state").getAsString())) - .isPresent()); + .filter(o -> ProcedureState.WAITING_TIMEOUT.name().equals(o.get("state").getAsString())) + .isPresent()); } public static void waitUntilProcedureTimeoutIncrease(HBaseTestingUtil util, @@ -69,7 +69,7 @@ public static void waitUntilProcedureTimeoutIncrease(HBaseTestingUtil util, int timeoutIncrements = 0; for (;;) { long timeout = getProcedure(util, clazz, parser).filter(o -> o.has("timeout")) - .map(o -> o.get("timeout").getAsLong()).orElse(-1L); + .map(o -> o.get("timeout").getAsLong()).orElse(-1L); if (timeout > oldTimeout) { LOG.info("Timeout incremented, was {}, now is {}, increments={}", timeout, oldTimeout, timeoutIncrements); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/RegionReplicationLagEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/RegionReplicationLagEvaluation.java index da4101dbb853..a4f90891f4e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/RegionReplicationLagEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/RegionReplicationLagEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,6 +42,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser; import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; @@ -69,14 +70,14 @@ public class RegionReplicationLagEvaluation extends Configured implements Tool { public static final int ROW_LENGTH = 16; private static final Options OPTIONS = new Options().addOption("t", "table", true, "Table name") - .addOption("rlen", "rlength", true, "The length of row key") - .addOption("vlen", "vlength", true, "The length of value") - .addRequiredOption("r", "rows", true, "Number of rows to test"); + .addOption("rlen", "rlength", true, "The length of row key") + .addOption("vlen", "vlength", true, "The length of value") + .addRequiredOption("r", "rows", true, "Number of rows to test"); private FastLongHistogram histogram = new FastLongHistogram(); @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") FastLongHistogram getHistogram() { return histogram; } @@ -105,8 +106,8 @@ public int run(String[] args) throws Exception { private void createTable(Admin admin, TableName tableName) throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME)).setRegionReplication(2) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME)).setRegionReplication(2) + .build(); admin.createTable(td); } @@ -158,7 +159,7 @@ private void exec(TableName tableName, int rlen, int vlen, int rows) throws IOEx public static void main(String[] args) throws Exception { int res = - ToolRunner.run(HBaseConfiguration.create(), new RegionReplicationLagEvaluation(), args); + ToolRunner.run(HBaseConfiguration.create(), new RegionReplicationLagEvaluation(), args); System.exit(res); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java index af30b58f463d..664fcee9a745 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,7 +55,7 @@ @InterfaceStability.Evolving public class SingleProcessHBaseCluster extends HBaseClusterInterface { private static final Logger LOG = - LoggerFactory.getLogger(SingleProcessHBaseCluster.class.getName()); + LoggerFactory.getLogger(SingleProcessHBaseCluster.class.getName()); public LocalHBaseCluster hbaseCluster; private static int index; @@ -66,7 +65,7 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface { * @param numRegionServers initial number of region servers to start. */ public SingleProcessHBaseCluster(Configuration conf, int numRegionServers) - throws IOException, InterruptedException { + throws IOException, InterruptedException { this(conf, 1, numRegionServers); } @@ -77,7 +76,7 @@ public SingleProcessHBaseCluster(Configuration conf, int numRegionServers) * @param numRegionServers initial number of region servers to start. */ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numRegionServers) - throws IOException, InterruptedException { + throws IOException, InterruptedException { this(conf, numMasters, numRegionServers, null, null); } @@ -88,9 +87,9 @@ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numRegi * @param numRegionServers initial number of region servers to start. */ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numRegionServers, - Class masterClass, - Class regionserverClass) - throws IOException, InterruptedException { + Class masterClass, + Class regionserverClass) + throws IOException, InterruptedException { this(conf, numMasters, 0, numRegionServers, null, masterClass, regionserverClass); } @@ -101,9 +100,9 @@ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numRegi * each cluster start. */ public SingleProcessHBaseCluster(Configuration conf, int numMasters, int numAlwaysStandByMasters, - int numRegionServers, List rsPorts, Class masterClass, - Class regionserverClass) - throws IOException, InterruptedException { + int numRegionServers, List rsPorts, Class masterClass, + Class regionserverClass) + throws IOException, InterruptedException { super(conf); // Hadoop 2 @@ -134,14 +133,14 @@ public static class MiniHBaseClusterRegionServer extends HRegionServer { static Set killedServers = new HashSet<>(); public MiniHBaseClusterRegionServer(Configuration conf) - throws IOException, InterruptedException { + throws IOException, InterruptedException { super(conf); this.user = User.getCurrent(); } @Override protected void handleReportForDutyResponse(final RegionServerStartupResponse c) - throws IOException { + throws IOException { super.handleReportForDutyResponse(c); // Run this thread to shutdown our filesystem on way out. this.shutdownThread = new SingleFileSystemShutdownThread(getFileSystem()); @@ -220,9 +219,9 @@ public void run() { } private void init(final int nMasterNodes, final int numAlwaysStandByMasters, - final int nRegionNodes, List rsPorts, Class masterClass, - Class regionserverClass) - throws IOException, InterruptedException { + final int nRegionNodes, List rsPorts, Class masterClass, + Class regionserverClass) + throws IOException, InterruptedException { try { if (masterClass == null) { masterClass = HMaster.class; @@ -233,7 +232,7 @@ private void init(final int nMasterNodes, final int numAlwaysStandByMasters, // start up a LocalHBaseCluster hbaseCluster = new LocalHBaseCluster(conf, nMasterNodes, numAlwaysStandByMasters, 0, - masterClass, regionserverClass); + masterClass, regionserverClass); // manually add the regionservers as other users for (int i = 0; i < nRegionNodes; i++) { @@ -406,12 +405,12 @@ public JVMClusterUtil.RegionServerThread startRegionServer() throws IOException } private JVMClusterUtil.RegionServerThread startRegionServer(Configuration configuration) - throws IOException { + throws IOException { User rsUser = HBaseTestingUtil.getDifferentUser(configuration, ".hfs." + index++); JVMClusterUtil.RegionServerThread t = null; try { - t = - hbaseCluster.addRegionServer(configuration, hbaseCluster.getRegionServers().size(), rsUser); + t = hbaseCluster.addRegionServer(configuration, hbaseCluster.getRegionServers().size(), + rsUser); t.start(); t.waitForServerOnline(); } catch (InterruptedException ie) { @@ -427,7 +426,7 @@ private JVMClusterUtil.RegionServerThread startRegionServer(Configuration config * @return New RegionServerThread */ public JVMClusterUtil.RegionServerThread startRegionServerAndWait(long timeout) - throws IOException { + throws IOException { JVMClusterUtil.RegionServerThread t = startRegionServer(); ServerName rsServerName = t.getRegionServer().getServerName(); @@ -476,7 +475,7 @@ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber) { * @return the region server that was stopped */ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber, - final boolean shutdownFS) { + final boolean shutdownFS) { JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber); LOG.info("Stopping " + server.toString()); server.getRegionServer().stop("Stopping rs " + serverNumber); @@ -762,7 +761,7 @@ public HRegionServer getRegionServer(int serverNumber) { public HRegionServer getRegionServer(ServerName serverName) { return hbaseCluster.getRegionServers().stream().map(t -> t.getRegionServer()) - .filter(r -> r.getServerName().equals(serverName)).findFirst().orElse(null); + .filter(r -> r.getServerName().equals(serverName)).findFirst().orElse(null); } public List getRegions(byte[] tableName) { @@ -813,7 +812,7 @@ public int getServerWith(byte[] regionName) { @Override public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) - throws IOException { + throws IOException { int index = getServerWith(regionName); if (index < 0) { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java index 30c54244dfc9..5a030b78c01f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/StartTestingClusterOption.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -110,10 +109,10 @@ public final class StartTestingClusterOption { * Private constructor. Use {@link Builder#build()}. */ private StartTestingClusterOption(int numMasters, int numAlwaysStandByMasters, - Class masterClass, int numRegionServers, List rsPorts, - Class rsClass, - int numDataNodes, String[] dataNodeHosts, int numZkServers, boolean createRootDir, - boolean createWALDir) { + Class masterClass, int numRegionServers, List rsPorts, + Class rsClass, + int numDataNodes, String[] dataNodeHosts, int numZkServers, boolean createRootDir, + boolean createWALDir) { this.numMasters = numMasters; this.numAlwaysStandByMasters = numAlwaysStandByMasters; this.masterClass = masterClass; @@ -173,11 +172,11 @@ public boolean isCreateWALDir() { @Override public String toString() { - return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass + - ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts) + - ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes + ", dataNodeHosts=" + - Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers + ", createRootDir=" + - createRootDir + ", createWALDir=" + createWALDir + '}'; + return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass + + ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts) + + ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes + ", dataNodeHosts=" + + Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers + ", createRootDir=" + + createRootDir + ", createWALDir=" + createWALDir + '}'; } /** @@ -214,8 +213,8 @@ public StartTestingClusterOption build() { numDataNodes = dataNodeHosts.length; } return new StartTestingClusterOption(numMasters, numAlwaysStandByMasters, masterClass, - numRegionServers, rsPorts, rsClass, numDataNodes, dataNodeHosts, numZkServers, - createRootDir, createWALDir); + numRegionServers, rsPorts, rsClass, numDataNodes, dataNodeHosts, numZkServers, + createRootDir, createWALDir); } public Builder numMasters(int numMasters) { @@ -244,7 +243,7 @@ public Builder rsPorts(List rsPorts) { } public Builder - rsClass(Class rsClass) { + rsClass(Class rsClass) { this.rsClass = rsClass; return this; } @@ -265,8 +264,7 @@ public Builder numZkServers(int numZkServers) { } public Builder numWorkers(int numWorkers) { - return numDataNodes(numWorkers) - .numRegionServers(numWorkers); + return numDataNodes(numWorkers).numRegionServers(numWorkers); } public Builder createRootDir(boolean createRootDir) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java index ff770815dd2f..3d82292d307b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithAdaptivePolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java index 02c6a98a7a41..2d91c685bd37 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithBasicPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java index 5f2e245a8349..7524b297248a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithEagerPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java index 4b2bcd04733b..5ef1a85022ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuaranteesWithNoInMemCompaction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java index 8dd82fe0e0c9..61149d87bd88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCachedClusterId.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; @@ -44,6 +45,7 @@ public class TestCachedClusterId { private static class GetClusterIdThread extends TestThread { CachedClusterId cachedClusterId; + public GetClusterIdThread(TestContext ctx, CachedClusterId clusterId) { super(ctx); cachedClusterId = clusterId; @@ -76,8 +78,8 @@ public void testClusterIdMatch() { @Test public void testMultiThreadedGetClusterId() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); - CachedClusterId cachedClusterId = new CachedClusterId(TEST_UTIL.getHBaseCluster().getMaster(), - conf); + CachedClusterId cachedClusterId = + new CachedClusterId(TEST_UTIL.getHBaseCluster().getMaster(), conf); TestContext context = new TestContext(conf); int numThreads = 16; for (int i = 0; i < numThreads; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java index 7cf9b6c012ec..9b1ecc97c64e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertTrue; + import java.util.List; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -28,7 +29,7 @@ /** * Checks tests are categorized. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestCheckTestClasses { @ClassRule @@ -48,7 +49,7 @@ public void checkClasses() throws Exception { badClasses.add(c); } } - assertTrue("There are " + badClasses.size() + " test classes without category: " - + badClasses, badClasses.isEmpty()); + assertTrue("There are " + badClasses.size() + " test classes without category: " + badClasses, + badClasses.isEmpty()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java index 4e38eba6417c..627bf95d24c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.Waiter.Predicate; @@ -88,9 +87,9 @@ public static class MyRegionServer public MyRegionServer(Configuration conf) throws IOException, InterruptedException { super(conf); } + @Override - public void tryRegionServerReport(long reportStartTime, long reportEndTime) - throws IOException { + public void tryRegionServerReport(long reportStartTime, long reportEndTime) throws IOException { super.tryRegionServerReport(reportStartTime, reportEndTime); } } @@ -100,9 +99,9 @@ public static void setUpBeforeClass() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MyObserver.class.getName()); UTIL = new HBaseTestingUtil(conf); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .rsClass(TestClientClusterMetrics.MyRegionServer.class) - .numMasters(MASTERS).numRegionServers(SLAVES).numDataNodes(SLAVES).build(); + StartTestingClusterOption option = + StartTestingClusterOption.builder().rsClass(TestClientClusterMetrics.MyRegionServer.class) + .numMasters(MASTERS).numRegionServers(SLAVES).numDataNodes(SLAVES).build(); UTIL.startMiniCluster(option); CLUSTER = UTIL.getHBaseCluster(); CLUSTER.waitForActiveAndReadyMaster(); @@ -125,11 +124,11 @@ public void testDefaults() throws Exception { Assert.assertEquals(origin.getClusterId(), defaults.getClusterId()); Assert.assertEquals(origin.getAverageLoad(), defaults.getAverageLoad(), 0); Assert.assertEquals(origin.getBackupMasterNames().size(), - defaults.getBackupMasterNames().size()); + defaults.getBackupMasterNames().size()); Assert.assertEquals(origin.getDeadServerNames().size(), defaults.getDeadServerNames().size()); Assert.assertEquals(origin.getRegionCount(), defaults.getRegionCount()); Assert.assertEquals(origin.getLiveServerMetrics().size(), - defaults.getLiveServerMetrics().size()); + defaults.getLiveServerMetrics().size()); Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort()); Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size()); Assert.assertEquals(ADMIN.getRegionServers().size(), defaults.getServersName().size()); @@ -137,13 +136,12 @@ public void testDefaults() throws Exception { @Test public void testAsyncClient() throws Exception { - try (AsyncConnection asyncConnect = ConnectionFactory.createAsyncConnection( - UTIL.getConfiguration()).get()) { + try (AsyncConnection asyncConnect = + ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) { AsyncAdmin asyncAdmin = asyncConnect.getAdmin(); - CompletableFuture originFuture = - asyncAdmin.getClusterMetrics(); + CompletableFuture originFuture = asyncAdmin.getClusterMetrics(); CompletableFuture defaultsFuture = - asyncAdmin.getClusterMetrics(EnumSet.allOf(Option.class)); + asyncAdmin.getClusterMetrics(EnumSet.allOf(Option.class)); ClusterMetrics origin = originFuture.get(); ClusterMetrics defaults = defaultsFuture.get(); Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion()); @@ -160,8 +158,8 @@ public void testAsyncClient() throws Exception { Assert.assertEquals(origin.getMasterInfoPort(), defaults.getMasterInfoPort()); Assert.assertEquals(origin.getServersName().size(), defaults.getServersName().size()); origin.getTableRegionStatesCount().forEach(((tableName, regionStatesCount) -> { - RegionStatesCount defaultRegionStatesCount = defaults.getTableRegionStatesCount() - .get(tableName); + RegionStatesCount defaultRegionStatesCount = + defaults.getTableRegionStatesCount().get(tableName); Assert.assertEquals(defaultRegionStatesCount, regionStatesCount); })); } @@ -194,7 +192,7 @@ public boolean evaluate() throws Exception { ClusterMetrics metrics = ADMIN.getClusterMetrics(options); Assert.assertNotNull(metrics); // exclude a dead region server - Assert.assertEquals(SLAVES -1, numRs); + Assert.assertEquals(SLAVES - 1, numRs); // live servers = nums of regionservers // By default, HMaster don't carry any regions so it won't report its load. // Hence, it won't be in the server list. @@ -211,31 +209,27 @@ public boolean evaluate() throws Exception { @Test public void testRegionStatesCount() throws Exception { Table table = UTIL.createTable(TABLE_NAME, CF); - table.put(new Put(Bytes.toBytes("k1")) - .addColumn(CF, Bytes.toBytes("q1"), Bytes.toBytes("v1"))); - table.put(new Put(Bytes.toBytes("k2")) - .addColumn(CF, Bytes.toBytes("q2"), Bytes.toBytes("v2"))); - table.put(new Put(Bytes.toBytes("k3")) - .addColumn(CF, Bytes.toBytes("q3"), Bytes.toBytes("v3"))); + table.put(new Put(Bytes.toBytes("k1")).addColumn(CF, Bytes.toBytes("q1"), Bytes.toBytes("v1"))); + table.put(new Put(Bytes.toBytes("k2")).addColumn(CF, Bytes.toBytes("q2"), Bytes.toBytes("v2"))); + table.put(new Put(Bytes.toBytes("k3")).addColumn(CF, Bytes.toBytes("q3"), Bytes.toBytes("v3"))); ClusterMetrics metrics = ADMIN.getClusterMetrics(); Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) - .getRegionsInTransition(), 0); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) - .getOpenRegions(), 1); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) - .getTotalRegions(), 1); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) - .getClosedRegions(), 0); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) - .getSplitRegions(), 0); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) - .getRegionsInTransition(), 0); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) - .getOpenRegions(), 1); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) - .getTotalRegions(), 1); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(), + 0); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getClosedRegions(), 0); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getSplitRegions(), 0); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getTotalRegions(), 1); UTIL.deleteTable(TABLE_NAME); } @@ -245,27 +239,24 @@ public void testRegionStatesWithSplit() throws Exception { int startRowNum = 20; int rowCount = 80; Table table = UTIL.createTable(TABLE_NAME, CF); - table.put(new Put(Bytes.toBytes("k1")) - .addColumn(CF, Bytes.toBytes("q1"), Bytes.toBytes("v1"))); - table.put(new Put(Bytes.toBytes("k2")) - .addColumn(CF, Bytes.toBytes("q2"), Bytes.toBytes("v2"))); + table.put(new Put(Bytes.toBytes("k1")).addColumn(CF, Bytes.toBytes("q1"), Bytes.toBytes("v1"))); + table.put(new Put(Bytes.toBytes("k2")).addColumn(CF, Bytes.toBytes("q2"), Bytes.toBytes("v2"))); insertData(TABLE_NAME, startRowNum, rowCount); ClusterMetrics metrics = ADMIN.getClusterMetrics(); Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) - .getRegionsInTransition(), 0); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) - .getOpenRegions(), 1); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) - .getTotalRegions(), 1); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) - .getRegionsInTransition(), 0); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) - .getOpenRegions(), 1); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) - .getTotalRegions(), 1); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(), + 0); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getTotalRegions(), 1); int splitRowNum = startRowNum + rowCount / 2; byte[] splitKey = Bytes.toBytes("" + splitRowNum); @@ -275,27 +266,25 @@ public void testRegionStatesWithSplit() throws Exception { metrics = ADMIN.getClusterMetrics(); Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) - .getRegionsInTransition(), 0); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) - .getOpenRegions(), 1); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME) - .getTotalRegions(), 1); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) - .getRegionsInTransition(), 0); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) - .getOpenRegions(), 2); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) - .getTotalRegions(), 3); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) - .getSplitRegions(), 1); - Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME) - .getClosedRegions(), 0); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(), + 0); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1); + Assert.assertEquals( + metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 2); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getTotalRegions(), 3); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getSplitRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getClosedRegions(), 0); UTIL.deleteTable(TABLE_NAME); } - @Test public void testMasterAndBackupMastersStatus() throws Exception { + @Test + public void testMasterAndBackupMastersStatus() throws Exception { // get all the master threads List masterThreads = CLUSTER.getMasterThreads(); int numActive = 0; @@ -323,9 +312,9 @@ public void testRegionStatesWithSplit() throws Exception { @Test public void testUserMetrics() throws Exception { Configuration conf = UTIL.getConfiguration(); - // If metrics for users is not enabled, this test doesn't make sense. + // If metrics for users is not enabled, this test doesn't make sense. if (!conf.getBoolean(MetricsUserAggregateFactory.METRIC_USER_ENABLED_CONF, - MetricsUserAggregateFactory.DEFAULT_METRIC_USER_ENABLED_CONF)) { + MetricsUserAggregateFactory.DEFAULT_METRIC_USER_ENABLED_CONF)) { return; } User userFoo = User.createUserForTesting(conf, "FOO_USER_METRIC_TEST", new String[0]); @@ -335,7 +324,8 @@ public void testUserMetrics() throws Exception { waitForUsersMetrics(0); long writeMetaMetricBeforeNextuser = getMetaMetrics().getWriteRequestCount(); userFoo.runAs(new PrivilegedAction() { - @Override public Void run() { + @Override + public Void run() { try { doPut(); } catch (IOException e) { @@ -349,7 +339,8 @@ public void testUserMetrics() throws Exception { getMetaMetrics().getWriteRequestCount() - writeMetaMetricBeforeNextuser; long readMetaMetricBeforeNextuser = getMetaMetrics().getReadRequestCount(); userBar.runAs(new PrivilegedAction() { - @Override public Void run() { + @Override + public Void run() { try { doGet(); } catch (IOException e) { @@ -363,7 +354,8 @@ public void testUserMetrics() throws Exception { getMetaMetrics().getReadRequestCount() - readMetaMetricBeforeNextuser; long filteredMetaReqeust = getMetaMetrics().getFilteredReadRequestCount(); userTest.runAs(new PrivilegedAction() { - @Override public Void run() { + @Override + public Void run() { try { Table table = createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME); for (Result result : table.getScanner(new Scan().setFilter(new FilterAllFilter()))) { @@ -378,30 +370,29 @@ public void testUserMetrics() throws Exception { waitForUsersMetrics(3); long filteredMetaReqeustForTestUser = getMetaMetrics().getFilteredReadRequestCount() - filteredMetaReqeust; - Map userMap = - ADMIN.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().values() - .iterator().next().getUserMetrics(); + Map userMap = ADMIN.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) + .getLiveServerMetrics().values().iterator().next().getUserMetrics(); for (byte[] user : userMap.keySet()) { switch (Bytes.toString(user)) { case "FOO_USER_METRIC_TEST": Assert.assertEquals(1, - userMap.get(user).getWriteRequestCount() - writeMetaMetricForUserFoo); + userMap.get(user).getWriteRequestCount() - writeMetaMetricForUserFoo); break; case "BAR_USER_METRIC_TEST": - Assert - .assertEquals(1, userMap.get(user).getReadRequestCount() - readMetaMetricForUserBar); + Assert.assertEquals(1, + userMap.get(user).getReadRequestCount() - readMetaMetricForUserBar); Assert.assertEquals(0, userMap.get(user).getWriteRequestCount()); break; case "TEST_USER_METRIC_TEST": Assert.assertEquals(1, - userMap.get(user).getFilteredReadRequests() - filteredMetaReqeustForTestUser); + userMap.get(user).getFilteredReadRequests() - filteredMetaReqeustForTestUser); Assert.assertEquals(0, userMap.get(user).getWriteRequestCount()); break; default: - //current user + // current user Assert.assertEquals(UserProvider.instantiate(conf).getCurrent().getName(), - Bytes.toString(user)); - //Read/write count because of Meta operations + Bytes.toString(user)); + // Read/write count because of Meta operations Assert.assertTrue(userMap.get(user).getReadRequestCount() > 1); break; } @@ -418,14 +409,14 @@ public void testServerTasks() throws Exception { // Of course, first we must trigger regionserver reports. final long now = EnvironmentEdgeManager.currentTime(); final long last = now - 1000; // fake a period, or someone might div by zero - for (RegionServerThread rs: CLUSTER.getRegionServerThreads()) { - ((MyRegionServer)rs.getRegionServer()).tryRegionServerReport(last, now); + for (RegionServerThread rs : CLUSTER.getRegionServerThreads()) { + ((MyRegionServer) rs.getRegionServer()).tryRegionServerReport(last, now); } // Get status now ClusterMetrics clusterMetrics = ADMIN.getClusterMetrics(EnumSet.of(Option.TASKS)); // The test task will be in the master metrics list boolean found = false; - for (ServerTask task: clusterMetrics.getMasterTasks()) { + for (ServerTask task : clusterMetrics.getMasterTasks()) { if (testTaskName.equals(task.getDescription())) { // Found it found = true; @@ -435,9 +426,9 @@ public void testServerTasks() throws Exception { Assert.assertTrue("Expected task not found in master task list", found); // Get the tasks information (carried in server metrics) found = false; - for (ServerMetrics serverMetrics: clusterMetrics.getLiveServerMetrics().values()) { + for (ServerMetrics serverMetrics : clusterMetrics.getLiveServerMetrics().values()) { if (serverMetrics.getTasks() != null) { - for (ServerTask task: serverMetrics.getTasks()) { + for (ServerTask task : serverMetrics.getTasks()) { if (testTaskName.equals(task.getDescription())) { // Found it found = true; @@ -465,15 +456,15 @@ private RegionMetrics getMetaMetrics() throws IOException { } private void waitForUsersMetrics(int noOfUsers) throws Exception { - //Sleep for metrics to get updated on master + // Sleep for metrics to get updated on master Thread.sleep(5000); Waiter.waitFor(CLUSTER.getConfiguration(), 10 * 1000, 100, new Predicate() { - @Override public boolean evaluate() throws Exception { - Map metrics = - ADMIN.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().values() - .iterator().next().getUserMetrics(); + @Override + public boolean evaluate() throws Exception { + Map metrics = ADMIN.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) + .getLiveServerMetrics().values().iterator().next().getUserMetrics(); Assert.assertNotNull(metrics); - //including current user + noOfUsers + // including current user + noOfUsers return metrics.keySet().size() > noOfUsers; } }); @@ -498,9 +489,8 @@ private Connection createConnection(Configuration conf) throws IOException { @Test public void testOtherStatusInfos() throws Exception { - EnumSet

      + * Tests five cases of scans and timestamps. */ @Test public void testScanMultipleVersions() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); final byte[][] rows = new byte[][] { Bytes.toBytes("row_0200"), Bytes.toBytes("row_0800") }; - final byte [][] splitRows = new byte[][] {Bytes.toBytes("row_0500")}; - final long [] timestamp = new long[] {100L, 1000L}; + final byte[][] splitRows = new byte[][] { Bytes.toBytes("row_0500") }; + final long[] timestamp = new long[] { 100L, 1000L }; this.admin.createTable(tableDescriptor, splitRows); Table table = UTIL.getConnection().getTable(tableName); // Assert we got the region layout wanted. - Pair keys = UTIL.getConnection() - .getRegionLocator(tableName).getStartEndKeys(); + Pair keys = + UTIL.getConnection().getRegionLocator(tableName).getStartEndKeys(); assertEquals(2, keys.getFirst().length); byte[][] startKeys = keys.getFirst(); byte[][] endKeys = keys.getSecond(); @@ -245,7 +237,8 @@ public void testScanMultipleVersions() throws Exception { get.setTimestamp(timestamp[j]); Result result = table.get(get); int cellCount = 0; - for(@SuppressWarnings("unused")Cell kv : result.listCells()) { + for (@SuppressWarnings("unused") + Cell kv : result.listCells()) { cellCount++; } assertTrue(cellCount == 1); @@ -341,4 +334,3 @@ public void testScanMultipleVersions() throws Exception { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java index b63054f544be..fcaf26fa2c22 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestNamespace { @ClassRule @@ -79,7 +79,7 @@ public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE); admin = TEST_UTIL.getAdmin(); cluster = TEST_UTIL.getHBaseCluster(); - master = ((SingleProcessHBaseCluster)cluster).getMaster(); + master = ((SingleProcessHBaseCluster) cluster).getMaster(); LOG.info("Done initializing cluster"); } @@ -103,7 +103,7 @@ public void beforeMethod() throws IOException { @Test public void verifyReservedNS() throws IOException { - //verify existence of reserved namespaces + // verify existence of reserved namespaces NamespaceDescriptor ns = admin.getNamespaceDescriptor(NamespaceDescriptor.DEFAULT_NAMESPACE.getName()); assertNotNull(ns); @@ -116,7 +116,7 @@ public void verifyReservedNS() throws IOException { assertEquals(2, admin.listNamespaces().length); assertEquals(2, admin.listNamespaceDescriptors().length); - //verify existence of system tables + // verify existence of system tables Set systemTables = Sets.newHashSet(TableName.META_TABLE_NAME); List descs = admin.listTableDescriptorsByNamespace( Bytes.toBytes(NamespaceDescriptor.SYSTEM_NAMESPACE.getName())); @@ -124,10 +124,10 @@ public void verifyReservedNS() throws IOException { for (TableDescriptor desc : descs) { assertTrue(systemTables.contains(desc.getTableName())); } - //verify system tables aren't listed + // verify system tables aren't listed assertEquals(0, admin.listTableDescriptors().size()); - //Try creating default and system namespaces. + // Try creating default and system namespaces. boolean exceptionCaught = false; try { admin.createNamespace(NamespaceDescriptor.DEFAULT_NAMESPACE); @@ -176,11 +176,11 @@ public void createRemoveTest() throws Exception { String nsName = prefix + "_" + name.getMethodName(); LOG.info(name.getMethodName()); - //create namespace and verify + // create namespace and verify admin.createNamespace(NamespaceDescriptor.create(nsName).build()); assertEquals(3, admin.listNamespaces().length); assertEquals(3, admin.listNamespaceDescriptors().length); - //remove namespace and verify + // remove namespace and verify admin.deleteNamespace(nsName); assertEquals(2, admin.listNamespaces().length); assertEquals(2, admin.listNamespaceDescriptors().length); @@ -193,16 +193,14 @@ public void createDoubleTest() throws IOException, InterruptedException { final TableName tableName = TableName.valueOf(name.getMethodName()); final TableName tableNameFoo = TableName.valueOf(nsName + ":" + name.getMethodName()); - //create namespace and verify + // create namespace and verify admin.createNamespace(NamespaceDescriptor.create(nsName).build()); TEST_UTIL.createTable(tableName, Bytes.toBytes(nsName)); - TEST_UTIL.createTable(tableNameFoo,Bytes.toBytes(nsName)); + TEST_UTIL.createTable(tableNameFoo, Bytes.toBytes(nsName)); assertEquals(2, admin.listTableDescriptors().size()); - assertNotNull(admin - .getDescriptor(tableName)); - assertNotNull(admin - .getDescriptor(tableNameFoo)); - //remove namespace and verify + assertNotNull(admin.getDescriptor(tableName)); + assertNotNull(admin.getDescriptor(tableNameFoo)); + // remove namespace and verify admin.disableTable(tableName); admin.deleteTable(tableName); assertEquals(1, admin.listTableDescriptors().size()); @@ -214,9 +212,9 @@ public void createTableTest() throws IOException, InterruptedException { LOG.info(name.getMethodName()); TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(nsName + ":" + name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(nsName + ":" + name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("my_cf")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("my_cf")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); try { @@ -224,18 +222,17 @@ public void createTableTest() throws IOException, InterruptedException { fail("Expected no namespace exists exception"); } catch (NamespaceNotFoundException ex) { } - //create table and in new namespace + // create table and in new namespace admin.createNamespace(NamespaceDescriptor.create(nsName).build()); admin.createTable(tableDescriptor); TEST_UTIL.waitTableAvailable(tableDescriptor.getTableName().getName(), 10000); FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); assertTrue(fs.exists( - new Path(master.getMasterFileSystem().getRootDir(), - new Path(HConstants.BASE_NAMESPACE_DIR, - new Path(nsName, tableDescriptor.getTableName().getQualifierAsString()))))); + new Path(master.getMasterFileSystem().getRootDir(), new Path(HConstants.BASE_NAMESPACE_DIR, + new Path(nsName, tableDescriptor.getTableName().getQualifierAsString()))))); assertEquals(1, admin.listTableDescriptors().size()); - //verify non-empty namespace can't be removed + // verify non-empty namespace can't be removed try { admin.deleteNamespace(nsName); fail("Expected non-empty namespace constraint exception"); @@ -243,17 +240,17 @@ public void createTableTest() throws IOException, InterruptedException { LOG.info("Caught expected exception: " + ex); } - //sanity check try to write and read from table + // sanity check try to write and read from table Table table = TEST_UTIL.getConnection().getTable(tableDescriptor.getTableName()); Put p = new Put(Bytes.toBytes("row1")); p.addColumn(Bytes.toBytes("my_cf"), Bytes.toBytes("my_col"), Bytes.toBytes("value1")); table.put(p); - //flush and read from disk to make sure directory changes are working + // flush and read from disk to make sure directory changes are working admin.flush(tableDescriptor.getTableName()); Get g = new Get(Bytes.toBytes("row1")); assertTrue(table.exists(g)); - //normal case of removing namespace + // normal case of removing namespace TEST_UTIL.deleteTable(tableDescriptor.getTableName()); admin.deleteNamespace(nsName); } @@ -261,9 +258,9 @@ public void createTableTest() throws IOException, InterruptedException { @Test public void createTableInDefaultNamespace() throws Exception { TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf1")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf1")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); admin.createTable(tableDescriptor); @@ -275,10 +272,9 @@ public void createTableInDefaultNamespace() throws Exception { @Test public void createTableInSystemNamespace() throws Exception { final TableName tableName = TableName.valueOf("hbase:" + name.getMethodName()); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tableName); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf1")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf1")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); admin.createTable(tableDescriptor); @@ -307,9 +303,9 @@ public Void call() throws Exception { @Override public Void call() throws Exception { TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder - .newBuilder(TableName.valueOf("non_existing_namespace", name.getMethodName())); + .newBuilder(TableName.valueOf("non_existing_namespace", name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("family1")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("family1")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); return null; @@ -355,13 +351,13 @@ public Void call() throws Exception { // get table descriptors for existing namespace TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(prefix + "ns1", name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(prefix + "ns1", name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("family1")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("family1")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); List htds = - admin.listTableDescriptorsByNamespace(Bytes.toBytes(prefix + "ns1")); + admin.listTableDescriptorsByNamespace(Bytes.toBytes(prefix + "ns1")); assertNotNull("Should have not returned null", htds); assertEquals("Should have returned non-empty array", 1, htds.size()); @@ -390,10 +386,11 @@ public Void call() throws Exception { } - private static void runWithExpectedException(Callable callable, Class exceptionClass) { + private static void runWithExpectedException(Callable callable, + Class exceptionClass) { try { callable.call(); - } catch(Exception ex) { + } catch (Exception ex) { Assert.assertEquals(exceptionClass, ex.getClass()); return; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNodeHealthCheckChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNodeHealthCheckChore.java index 60a0880d3fa1..5023b53d76e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNodeHealthCheckChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNodeHealthCheckChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestNodeHealthCheckChore { @ClassRule @@ -99,14 +99,14 @@ public void healthCheckerTest(String script, HealthCheckerExitStatus expectedSta } @Test - public void testRSHealthChore() throws Exception{ + public void testRSHealthChore() throws Exception { Stoppable stop = new StoppableImplementation(); Configuration conf = getConfForNodeHealthScript(); String errorScript = "echo ERROR" + eol + " echo \"Server not healthy\""; createScript(errorScript, true); HealthCheckChore rsChore = new HealthCheckChore(100, stop, conf); try { - //Default threshold is three. + // Default threshold is three. rsChore.chore(); rsChore.chore(); assertFalse("Stoppable must not be stopped.", stop.isStopped()); @@ -117,8 +117,7 @@ public void testRSHealthChore() throws Exception{ } } - private void createScript(String scriptStr, boolean setExecutable) - throws Exception { + private void createScript(String scriptStr, boolean setExecutable) throws Exception { if (!this.healthScriptFile.exists()) { if (!healthScriptFile.createNewFile()) { throw new IOException("Failed create of " + this.healthScriptFile); @@ -143,8 +142,8 @@ private Configuration getConfForNodeHealthScript() throws IOException { throw new IOException("Failed mkdirs " + tempDir); } } - String scriptName = "HealthScript" + UTIL.getRandomUUID().toString() - + (Shell.WINDOWS ? ".cmd" : ".sh"); + String scriptName = + "HealthScript" + UTIL.getRandomUUID().toString() + (Shell.WINDOWS ? ".cmd" : ".sh"); healthScriptFile = new File(tempDir.getAbsolutePath(), scriptName); conf.set(HConstants.HEALTH_SCRIPT_LOC, healthScriptFile.getAbsolutePath()); conf.setLong(HConstants.HEALTH_FAILURE_THRESHOLD, 3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java index 776fd2ab2acf..f08768206ff8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -283,7 +283,7 @@ public void testOrderingOfCellsInPartialResults(final Scan basePartialScan) thro partialResult = partialScanner.next(); assertTrue("Partial Result is null. iteration: " + iterationCount, partialResult != null); assertTrue("Partial cells are null. iteration: " + iterationCount, - partialResult.rawCells() != null); + partialResult.rawCells() != null); for (Cell c : partialResult.rawCells()) { aggregatePartialCells.add(c); @@ -291,7 +291,7 @@ public void testOrderingOfCellsInPartialResults(final Scan basePartialScan) thro } while (partialResult.mayHaveMoreCellsInRow()); assertTrue("Number of cells differs. iteration: " + iterationCount, - oneShotResult.rawCells().length == aggregatePartialCells.size()); + oneShotResult.rawCells().length == aggregatePartialCells.size()); final Cell[] oneShotCells = oneShotResult.rawCells(); for (int cell = 0; cell < oneShotCells.length; cell++) { Cell oneShotCell = oneShotCells[cell]; @@ -300,7 +300,7 @@ public void testOrderingOfCellsInPartialResults(final Scan basePartialScan) thro assertTrue("One shot cell was null", oneShotCell != null); assertTrue("Partial cell was null", partialCell != null); assertTrue("Cell differs. oneShotCell:" + oneShotCell + " partialCell:" + partialCell, - oneShotCell.equals(partialCell)); + oneShotCell.equals(partialCell)); } oneShotResult = oneShotScanner.next(); @@ -356,10 +356,9 @@ public void testExpectedNumberOfCellsPerPartialResult(Scan baseScan, int expecte // 1. Returned result is the final result needed to form the complete result for that row // 2. It is the first result we have seen for that row and thus may have been fetched as // the last group of cells that fit inside the maxResultSize - assertTrue( - "Result's cell count differed from expected number. result: " + result, - result.rawCells().length == expectedNumberOfCells || !result.mayHaveMoreCellsInRow() - || !Bytes.equals(prevRow, result.getRow())); + assertTrue("Result's cell count differed from expected number. result: " + result, + result.rawCells().length == expectedNumberOfCells || !result.mayHaveMoreCellsInRow() + || !Bytes.equals(prevRow, result.getRow())); prevRow = result.getRow(); } @@ -436,9 +435,8 @@ public void testPartialResultsAndBatch(final int batch, final int cellsPerPartia assertTrue(result.rawCells() != null); if (result.mayHaveMoreCellsInRow()) { - final String error = - "Cells:" + result.rawCells().length + " Batch size:" + batch - + " cellsPerPartialResult:" + cellsPerPartialResult + " rep:" + repCount; + final String error = "Cells:" + result.rawCells().length + " Batch size:" + batch + + " cellsPerPartialResult:" + cellsPerPartialResult + " rep:" + repCount; assertTrue(error, result.rawCells().length == batch); } else { assertTrue(result.rawCells().length <= batch); @@ -665,7 +663,7 @@ static void verifyResult(Result result, List expKvList, String msg) { Cell kvExp = expKvList.get(i++); assertTrue("Not equal. get kv: " + kv.toString() + " exp kv: " + kvExp.toString(), - kvExp.equals(kv)); + kvExp.equals(kv)); } assertEquals(expKvList.size(), result.size()); @@ -726,14 +724,14 @@ public void testReadPointAndPartialResults() throws Exception { scannerCount += countCellsFromScanner(scanner); int expectedCount = numRows * numFamilies * numQualifiers; assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, - scannerCount == expectedCount); + scannerCount == expectedCount); // Minus 2 for the two cells that were deleted scanner = tmpTable.getScanner(new Scan().setMaxResultSize(1).setAllowPartialResults(true)); scannerCount = countCellsFromScanner(scanner); expectedCount = numRows * numFamilies * numQualifiers - 2; assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, - scannerCount == expectedCount); + scannerCount == expectedCount); scanner = tmpTable.getScanner(new Scan().setMaxResultSize(1).setAllowPartialResults(true)); scannerCount = scanner.next().rawCells().length; @@ -750,14 +748,14 @@ public void testReadPointAndPartialResults() throws Exception { scannerCount += countCellsFromScanner(scanner); expectedCount = numRows * numFamilies * numQualifiers - 2; assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, - scannerCount == expectedCount); + scannerCount == expectedCount); // Now the scanner should see the cells that were added by puts scanner = tmpTable.getScanner(new Scan().setMaxResultSize(1).setAllowPartialResults(true)); scannerCount = countCellsFromScanner(scanner); expectedCount = numRows * numFamilies * numQualifiers; assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, - scannerCount == expectedCount); + scannerCount == expectedCount); TEST_UTIL.deleteTable(tableName); } @@ -794,10 +792,9 @@ public void testPartialResultsWithColumnFilter() throws Exception { testPartialResultsWithColumnFilter(new ColumnRangeFilter(Bytes.toBytes("testQualifer1"), true, Bytes.toBytes("testQualifier7"), true)); - //Throw an Exception to the old version client to remind them not to use this filter anymore + // Throw an Exception to the old version client to remind them not to use this filter anymore assertThrows("Stop using", DoNotRetryIOException.class, - () -> testPartialResultsWithColumnFilter( - new FirstKeyValueMatchingQualifiersFilter())); + () -> testPartialResultsWithColumnFilter(new FirstKeyValueMatchingQualifiersFilter())); } public void testPartialResultsWithColumnFilter(Filter filter) throws Exception { @@ -816,10 +813,9 @@ public void testPartialResultsWithColumnFilter(Filter filter) throws Exception { } } - private void moveRegion(Table table, int index) throws IOException{ - List> regions = MetaTableAccessor - .getTableRegionsAndLocations(TEST_UTIL.getConnection(), - table.getName()); + private void moveRegion(Table table, int index) throws IOException { + List> regions = + MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), table.getName()); assertEquals(1, regions.size()); RegionInfo regionInfo = regions.get(0).getFirst(); ServerName name = TEST_UTIL.getHBaseCluster().getRegionServer(index).getServerName(); @@ -828,17 +824,17 @@ private void moveRegion(Table table, int index) throws IOException{ private void assertCell(Cell cell, byte[] row, byte[] cf, byte[] cq) { assertArrayEquals(row, - Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); assertArrayEquals(cf, - Bytes.copy(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())); + Bytes.copy(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())); assertArrayEquals(cq, - Bytes.copy(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); + Bytes.copy(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); } @Test public void testPartialResultWhenRegionMove() throws IOException { - Table table = createTestTable(TableName.valueOf(name.getMethodName()), - ROWS, FAMILIES, QUALIFIERS, VALUE); + Table table = + createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, QUALIFIERS, VALUE); moveRegion(table, 1); @@ -875,8 +871,8 @@ public void testPartialResultWhenRegionMove() throws IOException { @Test public void testReversedPartialResultWhenRegionMove() throws IOException { - Table table = createTestTable(TableName.valueOf(name.getMethodName()), - ROWS, FAMILIES, QUALIFIERS, VALUE); + Table table = + createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, QUALIFIERS, VALUE); moveRegion(table, 1); @@ -885,13 +881,13 @@ public void testReversedPartialResultWhenRegionMove() throws IOException { scan.setAllowPartialResults(true); scan.setReversed(true); ResultScanner scanner = table.getScanner(scan); - for (int i = 0; i < NUM_FAMILIES * NUM_QUALIFIERS-1; i++) { + for (int i = 0; i < NUM_FAMILIES * NUM_QUALIFIERS - 1; i++) { scanner.next(); } Result result1 = scanner.next(); assertEquals(1, result1.rawCells().length); Cell c1 = result1.rawCells()[0]; - assertCell(c1, ROWS[NUM_ROWS-1], FAMILIES[NUM_FAMILIES - 1], QUALIFIERS[NUM_QUALIFIERS - 1]); + assertCell(c1, ROWS[NUM_ROWS - 1], FAMILIES[NUM_FAMILIES - 1], QUALIFIERS[NUM_QUALIFIERS - 1]); assertFalse(result1.mayHaveMoreCellsInRow()); moveRegion(table, 2); @@ -899,7 +895,7 @@ public void testReversedPartialResultWhenRegionMove() throws IOException { Result result2 = scanner.next(); assertEquals(1, result2.rawCells().length); Cell c2 = result2.rawCells()[0]; - assertCell(c2, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[0]); + assertCell(c2, ROWS[NUM_ROWS - 2], FAMILIES[0], QUALIFIERS[0]); assertTrue(result2.mayHaveMoreCellsInRow()); moveRegion(table, 3); @@ -907,15 +903,15 @@ public void testReversedPartialResultWhenRegionMove() throws IOException { Result result3 = scanner.next(); assertEquals(1, result3.rawCells().length); Cell c3 = result3.rawCells()[0]; - assertCell(c3, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[1]); + assertCell(c3, ROWS[NUM_ROWS - 2], FAMILIES[0], QUALIFIERS[1]); assertTrue(result3.mayHaveMoreCellsInRow()); } @Test public void testCompleteResultWhenRegionMove() throws IOException { - Table table = createTestTable(TableName.valueOf(name.getMethodName()), - ROWS, FAMILIES, QUALIFIERS, VALUE); + Table table = + createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, QUALIFIERS, VALUE); moveRegion(table, 1); @@ -950,8 +946,8 @@ public void testCompleteResultWhenRegionMove() throws IOException { @Test public void testReversedCompleteResultWhenRegionMove() throws IOException { - Table table = createTestTable(TableName.valueOf(name.getMethodName()), - ROWS, FAMILIES, QUALIFIERS, VALUE); + Table table = + createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, QUALIFIERS, VALUE); moveRegion(table, 1); @@ -962,25 +958,25 @@ public void testReversedCompleteResultWhenRegionMove() throws IOException { ResultScanner scanner = table.getScanner(scan); Result result1 = scanner.next(); - assertEquals(NUM_FAMILIES*NUM_QUALIFIERS, result1.rawCells().length); + assertEquals(NUM_FAMILIES * NUM_QUALIFIERS, result1.rawCells().length); Cell c1 = result1.rawCells()[0]; - assertCell(c1, ROWS[NUM_ROWS-1], FAMILIES[0], QUALIFIERS[0]); + assertCell(c1, ROWS[NUM_ROWS - 1], FAMILIES[0], QUALIFIERS[0]); assertFalse(result1.mayHaveMoreCellsInRow()); moveRegion(table, 2); Result result2 = scanner.next(); - assertEquals(NUM_FAMILIES*NUM_QUALIFIERS, result2.rawCells().length); + assertEquals(NUM_FAMILIES * NUM_QUALIFIERS, result2.rawCells().length); Cell c2 = result2.rawCells()[0]; - assertCell(c2, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[0]); + assertCell(c2, ROWS[NUM_ROWS - 2], FAMILIES[0], QUALIFIERS[0]); assertFalse(result2.mayHaveMoreCellsInRow()); moveRegion(table, 3); Result result3 = scanner.next(); - assertEquals(NUM_FAMILIES*NUM_QUALIFIERS, result3.rawCells().length); + assertEquals(NUM_FAMILIES * NUM_QUALIFIERS, result3.rawCells().length); Cell c3 = result3.rawCells()[0]; - assertCell(c3, ROWS[NUM_ROWS-3], FAMILIES[0], QUALIFIERS[0]); + assertCell(c3, ROWS[NUM_ROWS - 3], FAMILIES[0], QUALIFIERS[0]); assertFalse(result3.mayHaveMoreCellsInRow()); } @@ -990,8 +986,8 @@ public void testBatchingResultWhenRegionMove() throws IOException { // If user setBatch(5) and rpc returns 3+5+5+5+3 cells, // we should return 5+5+5+5+1 to user. // setBatch doesn't mean setAllowPartialResult(true) - Table table = createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, - QUALIFIERS, VALUE); + Table table = + createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, QUALIFIERS, VALUE); Put put = new Put(ROWS[1]); put.addColumn(FAMILIES[0], QUALIFIERS[1], new byte[VALUE_SIZE * 10]); @@ -1014,9 +1010,9 @@ public void testBatchingResultWhenRegionMove() throws IOException { Result result1 = scanner.next(); assertEquals(5, result1.rawCells().length); assertCell(result1.rawCells()[0], ROWS[0], FAMILIES[NUM_FAMILIES - 1], - QUALIFIERS[NUM_QUALIFIERS - 5]); + QUALIFIERS[NUM_QUALIFIERS - 5]); assertCell(result1.rawCells()[4], ROWS[0], FAMILIES[NUM_FAMILIES - 1], - QUALIFIERS[NUM_QUALIFIERS - 1]); + QUALIFIERS[NUM_QUALIFIERS - 1]); assertFalse(result1.mayHaveMoreCellsInRow()); moveRegion(table, 2); @@ -1044,7 +1040,6 @@ public void testBatchingResultWhenRegionMove() throws IOException { assertEquals(4, result.rawCells().length); assertFalse(result.mayHaveMoreCellsInRow()); - for (int i = 2; i < NUM_ROWS; i++) { for (int j = 0; j < NUM_FAMILIES; j++) { for (int k = 0; k < NUM_QUALIFIERS; k += 5) { @@ -1064,8 +1059,8 @@ public void testBatchingResultWhenRegionMove() throws IOException { @Test public void testDontThrowUnknowScannerExceptionToClient() throws Exception { - Table table = createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, - QUALIFIERS, VALUE); + Table table = + createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, QUALIFIERS, VALUE); Scan scan = new Scan(); scan.setCaching(1); ResultScanner scanner = table.getScanner(scan); @@ -1081,8 +1076,8 @@ public void testDontThrowUnknowScannerExceptionToClient() throws Exception { @Test public void testMayHaveMoreCellsInRowReturnsTrueAndSetBatch() throws IOException { - Table table = createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, - QUALIFIERS, VALUE); + Table table = + createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, QUALIFIERS, VALUE); Scan scan = new Scan(); scan.setBatch(1); scan.setFilter(new FirstKeyOnlyFilter()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java index bb683e41b42c..4a0a2c0549e4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -87,7 +87,6 @@ public static void afterClass() throws Exception { UTIL.shutdownMiniCluster(); } - @Test public void testRegionMetrics() throws Exception { @@ -95,8 +94,7 @@ public void testRegionMetrics() throws Exception { for (ServerName serverName : admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) .getLiveServerMetrics().keySet()) { List regions = admin.getRegions(serverName); - Collection regionMetricsList = - admin.getRegionMetrics(serverName); + Collection regionMetricsList = admin.getRegionMetrics(serverName); checkRegionsAndRegionMetrics(regions, regionMetricsList); } @@ -121,36 +119,33 @@ public void testRegionMetrics() throws Exception { // Check RegionMetrics matches the RegionMetrics from ClusterMetrics for (Map.Entry entry : admin - .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().entrySet()) { + .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().entrySet()) { ServerName serverName = entry.getKey(); ServerMetrics serverMetrics = entry.getValue(); List regionMetrics = admin.getRegionMetrics(serverName); - LOG.debug("serverName=" + serverName + ", getRegionLoads=" + - serverMetrics.getRegionMetrics().keySet().stream().map(r -> Bytes.toString(r)). - collect(Collectors.toList())); - LOG.debug("serverName=" + serverName + ", regionLoads=" + - regionMetrics.stream().map(r -> Bytes.toString(r.getRegionName())). - collect(Collectors.toList())); + LOG.debug("serverName=" + serverName + ", getRegionLoads=" + serverMetrics.getRegionMetrics() + .keySet().stream().map(r -> Bytes.toString(r)).collect(Collectors.toList())); + LOG.debug("serverName=" + serverName + ", regionLoads=" + regionMetrics.stream() + .map(r -> Bytes.toString(r.getRegionName())).collect(Collectors.toList())); assertEquals(serverMetrics.getRegionMetrics().size(), regionMetrics.size()); checkMetricsValue(regionMetrics, serverMetrics); } } private void checkMetricsValue(List regionMetrics, ServerMetrics serverMetrics) - throws InvocationTargetException, IllegalAccessException { + throws InvocationTargetException, IllegalAccessException { for (RegionMetrics fromRM : regionMetrics) { RegionMetrics fromSM = serverMetrics.getRegionMetrics().get(fromRM.getRegionName()); Class clazz = RegionMetrics.class; for (Method method : clazz.getMethods()) { // check numeric values only - if (method.getReturnType().equals(Size.class) - || method.getReturnType().equals(int.class) - || method.getReturnType().equals(long.class) - || method.getReturnType().equals(float.class)) { + if (method.getReturnType().equals(Size.class) || method.getReturnType().equals(int.class) + || method.getReturnType().equals(long.class) + || method.getReturnType().equals(float.class)) { Object valueRm = method.invoke(fromRM); Object valueSM = method.invoke(fromSM); assertEquals("Return values of method " + method.getName() + " are different", - valueRm.toString(), valueSM.toString()); + valueRm.toString(), valueSM.toString()); } } } @@ -160,16 +155,16 @@ private void checkRegionsAndRegionMetrics(Collection regions, Collection regionMetrics) { assertEquals("No of regions and regionMetrics doesn't match", regions.size(), - regionMetrics.size()); + regionMetrics.size()); Map regionMetricsMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); for (RegionMetrics r : regionMetrics) { regionMetricsMap.put(r.getRegionName(), r); } for (RegionInfo info : regions) { - assertTrue("Region not in RegionMetricsMap region:" - + info.getRegionNameAsString() + " regionMap: " - + regionMetricsMap, regionMetricsMap.containsKey(info.getRegionName())); + assertTrue("Region not in RegionMetricsMap region:" + info.getRegionNameAsString() + + " regionMap: " + regionMetricsMap, + regionMetricsMap.containsKey(info.getRegionName())); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java index 4f23bc05c72a..88c11cd66a0b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,11 +56,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * Test whether region re-balancing works. (HBASE-71) - * The test only works for cluster wide balancing, not per table wide. - * Increase the margin a little to make StochasticLoadBalancer result acceptable. + * Test whether region re-balancing works. (HBASE-71) The test only works for cluster wide + * balancing, not per table wide. Increase the margin a little to make StochasticLoadBalancer result + * acceptable. */ -@Category({FlakeyTests.class, LargeTests.class}) +@Category({ FlakeyTests.class, LargeTests.class }) @RunWith(value = Parameterized.class) public class TestRegionRebalancing { @@ -100,36 +100,34 @@ public void before() throws Exception { UTIL.startMiniCluster(1); this.tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf("test")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME)).build(); } /** - * For HBASE-71. Try a few different configurations of starting and stopping - * region servers to see if the assignment or regions is pretty balanced. + * For HBASE-71. Try a few different configurations of starting and stopping region servers to see + * if the assignment or regions is pretty balanced. * @throws IOException * @throws InterruptedException */ @Test - public void testRebalanceOnRegionServerNumberChange() - throws IOException, InterruptedException { - try(Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); + public void testRebalanceOnRegionServerNumberChange() throws IOException, InterruptedException { + try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); Admin admin = connection.getAdmin()) { - admin.createTable(this.tableDescriptor, Arrays.copyOfRange(HBaseTestingUtil.KEYS, - 1, HBaseTestingUtil.KEYS.length)); + admin.createTable(this.tableDescriptor, + Arrays.copyOfRange(HBaseTestingUtil.KEYS, 1, HBaseTestingUtil.KEYS.length)); this.regionLocator = connection.getRegionLocator(this.tableDescriptor.getTableName()); MetaTableAccessor.fullScanMetaAndPrint(admin.getConnection()); - assertEquals("Test table should have right number of regions", - HBaseTestingUtil.KEYS.length, + assertEquals("Test table should have right number of regions", HBaseTestingUtil.KEYS.length, this.regionLocator.getStartKeys().length); // verify that the region assignments are balanced to start out assertRegionsAreBalanced(); // add a region server - total of 2 - LOG.info("Started second server=" + - UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); + LOG.info("Started second server=" + + UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); UTIL.getHBaseCluster().getMaster().balance(); assertRegionsAreBalanced(); @@ -141,8 +139,8 @@ public void testRebalanceOnRegionServerNumberChange() // if we add a server, then the balance() call should return true // add a region server - total of 3 - LOG.info("Started third server=" + - UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); + LOG.info("Started third server=" + + UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); waitForAllRegionsAssigned(); response = UTIL.getHBaseCluster().getMaster().balance(); @@ -159,10 +157,10 @@ public void testRebalanceOnRegionServerNumberChange() assertRegionsAreBalanced(); // start two more region servers - total of 4 - LOG.info("Readding third server=" + - UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); - LOG.info("Added fourth server=" + - UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); + LOG.info("Readding third server=" + + UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); + LOG.info("Added fourth server=" + + UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); waitOnCrashProcessing(); waitForAllRegionsAssigned(); @@ -172,7 +170,7 @@ public void testRebalanceOnRegionServerNumberChange() assertEquals(response.getMovesCalculated(), response.getMovesExecuted()); assertRegionsAreBalanced(); - for (int i = 0; i < 6; i++){ + for (int i = 0; i < 6; i++) { LOG.info("Adding " + (i + 5) + "th region server"); UTIL.getHBaseCluster().startRegionServer(); } @@ -199,15 +197,14 @@ private void waitOnCrashProcessing() throws IOException { } /** - * Determine if regions are balanced. Figure out the total, divide by the - * number of online servers, then test if each server is +/- 1 of average - * rounded up. + * Determine if regions are balanced. Figure out the total, divide by the number of online + * servers, then test if each server is +/- 1 of average rounded up. */ private void assertRegionsAreBalanced() throws IOException { - // TODO: Fix this test. Old balancer used to run with 'slop'. New + // TODO: Fix this test. Old balancer used to run with 'slop'. New // balancer does not. boolean success = false; - float slop = (float)UTIL.getConfiguration().getFloat("hbase.regions.slop", 0.1f); + float slop = (float) UTIL.getConfiguration().getFloat("hbase.regions.slop", 0.1f); if (slop <= 0) slop = 1; for (int i = 0; i < 5; i++) { @@ -217,32 +214,29 @@ private void assertRegionsAreBalanced() throws IOException { long regionCount = UTIL.getMiniHBaseCluster().countServedRegions(); List servers = getOnlineRegionServers(); - double avg = (double)regionCount / (double)servers.size(); - int avgLoadPlusSlop = (int)Math.ceil(avg * (1 + slop)); - int avgLoadMinusSlop = (int)Math.floor(avg * (1 - slop)) - 1; + double avg = (double) regionCount / (double) servers.size(); + int avgLoadPlusSlop = (int) Math.ceil(avg * (1 + slop)); + int avgLoadMinusSlop = (int) Math.floor(avg * (1 - slop)) - 1; // Increase the margin a little to accommodate StochasticLoadBalancer if (this.balancerName.contains("StochasticLoadBalancer")) { avgLoadPlusSlop++; avgLoadMinusSlop--; } LOG.debug("There are " + servers.size() + " servers and " + regionCount - + " regions. Load Average: " + avg + " low border: " + avgLoadMinusSlop - + ", up border: " + avgLoadPlusSlop + "; attempt: " + i); + + " regions. Load Average: " + avg + " low border: " + avgLoadMinusSlop + ", up border: " + + avgLoadPlusSlop + "; attempt: " + i); for (HRegionServer server : servers) { - int serverLoad = - ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size(); + int serverLoad = ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size(); LOG.debug(server.getServerName() + " Avg: " + avg + " actual: " + serverLoad); - if (!(avg > 2.0 && serverLoad <= avgLoadPlusSlop - && serverLoad >= avgLoadMinusSlop)) { - for (RegionInfo hri : - ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) { + if (!(avg > 2.0 && serverLoad <= avgLoadPlusSlop && serverLoad >= avgLoadMinusSlop)) { + for (RegionInfo hri : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) { if (hri.isMetaRegion()) serverLoad--; // LOG.debug(hri.getRegionNameAsString()); } if (!(serverLoad <= avgLoadPlusSlop && serverLoad >= avgLoadMinusSlop)) { - LOG.debug(server.getServerName() + " Isn't balanced!!! Avg: " + avg + - " actual: " + serverLoad + " slop: " + slop); + LOG.debug(server.getServerName() + " Isn't balanced!!! Avg: " + avg + " actual: " + + serverLoad + " slop: " + slop); success = false; break; } @@ -254,7 +248,8 @@ private void assertRegionsAreBalanced() throws IOException { // chance to catch up. then, go back to the retry loop. try { Thread.sleep(10000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { + } UTIL.getHBaseCluster().getMaster().balance(); continue; @@ -270,8 +265,7 @@ private void assertRegionsAreBalanced() throws IOException { private List getOnlineRegionServers() { List list = new ArrayList<>(); - for (JVMClusterUtil.RegionServerThread rst : - UTIL.getHBaseCluster().getRegionServerThreads()) { + for (JVMClusterUtil.RegionServerThread rst : UTIL.getHBaseCluster().getRegionServerThreads()) { if (rst.getRegionServer().isOnline()) { list.add(rst.getRegionServer()); } @@ -285,14 +279,14 @@ private List getOnlineRegionServers() { private void waitForAllRegionsAssigned() throws IOException { int totalRegions = HBaseTestingUtil.KEYS.length; try { - Thread.sleep(200); + Thread.sleep(200); } catch (InterruptedException e) { throw new InterruptedIOException(); } while (UTIL.getMiniHBaseCluster().countServedRegions() < totalRegions) { - // while (!cluster.getMaster().allRegionsAssigned()) { - LOG.debug("Waiting for there to be "+ totalRegions +" regions, but there are " - + UTIL.getMiniHBaseCluster().countServedRegions() + " right now."); + // while (!cluster.getMaster().allRegionsAssigned()) { + LOG.debug("Waiting for there to be " + totalRegions + " regions, but there are " + + UTIL.getMiniHBaseCluster().countServedRegions() + " right now."); try { Thread.sleep(200); } catch (InterruptedException e) { @@ -303,4 +297,3 @@ private void waitForAllRegionsAssigned() throws IOException { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionReplicationLagEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionReplicationLagEvaluation.java index f4b7970b8b4b..648f70a97aaa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionReplicationLagEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionReplicationLagEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestRegionReplicationLagEvaluation { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionReplicationLagEvaluation.class); + HBaseClassTestRule.forClass(TestRegionReplicationLagEvaluation.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java index 07348488ac7c..3f7845172880 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ public class TestSequenceIdMonotonicallyIncreasing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSequenceIdMonotonicallyIncreasing.class); + HBaseClassTestRule.forClass(TestSequenceIdMonotonicallyIncreasing.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -94,7 +94,7 @@ private long getMaxSeqId(HRegionServer rs, RegionInfo region) throws IOException Path walFile = ((AbstractFSWAL) rs.getWAL(null)).getCurrentFileName(); long maxSeqId = -1L; try (WAL.Reader reader = - WALFactory.createReader(UTIL.getTestFileSystem(), walFile, UTIL.getConfiguration())) { + WALFactory.createReader(UTIL.getTestFileSystem(), walFile, UTIL.getConfiguration())) { for (;;) { WAL.Entry entry = reader.next(); if (entry == null) { @@ -142,15 +142,15 @@ public void testMerge() HRegion regionA = regions.get(0); HRegion regionB = regions.get(1); HRegionServer rsA = - cluster.getRegionServer(cluster.getServerWith(regionA.getRegionInfo().getRegionName())); + cluster.getRegionServer(cluster.getServerWith(regionA.getRegionInfo().getRegionName())); HRegionServer rsB = - cluster.getRegionServer(cluster.getServerWith(regionB.getRegionInfo().getRegionName())); + cluster.getRegionServer(cluster.getServerWith(regionB.getRegionInfo().getRegionName())); UTIL.getAdmin().mergeRegionsAsync(regionA.getRegionInfo().getRegionName(), regionB.getRegionInfo().getRegionName(), false).get(1, TimeUnit.MINUTES); long maxSeqIdA = getMaxSeqId(rsA, regionA.getRegionInfo()); long maxSeqIdB = getMaxSeqId(rsB, regionB.getRegionInfo()); HRegionLocation loc = - UTIL.getConnection().getRegionLocator(NAME).getRegionLocation(Bytes.toBytes(0), true); + UTIL.getConnection().getRegionLocator(NAME).getRegionLocation(Bytes.toBytes(0), true); assertEquals(Math.max(maxSeqIdA, maxSeqIdB) + 1, loc.getSeqNum()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index 90d84bee5877..576b80fb468e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,7 +64,7 @@ public class TestSerialization { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSerialization.class); + HBaseClassTestRule.forClass(TestSerialization.class); @Test public void testKeyValue() throws Exception { @@ -92,10 +92,10 @@ public void testKeyValue() throws Exception { public void testCreateKeyValueInvalidNegativeLength() { KeyValue kv_0 = new KeyValue(Bytes.toBytes("myRow"), Bytes.toBytes("myCF"), // 51 bytes - Bytes.toBytes("myQualifier"), 12345L, Bytes.toBytes("my12345")); + Bytes.toBytes("myQualifier"), 12345L, Bytes.toBytes("my12345")); KeyValue kv_1 = new KeyValue(Bytes.toBytes("myRow"), Bytes.toBytes("myCF"), // 49 bytes - Bytes.toBytes("myQualifier"), 12345L, Bytes.toBytes("my123")); + Bytes.toBytes("myQualifier"), 12345L, Bytes.toBytes("my123")); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); @@ -133,7 +133,7 @@ public void testCreateKeyValueInvalidNegativeLength() { @Test public void testCompareFilter() throws Exception { Filter f = - new RowFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + new RowFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); byte[] bytes = f.toByteArray(); Filter ff = RowFilter.parseFrom(bytes); assertNotNull(ff); @@ -187,11 +187,11 @@ public void testRegionInfos() throws Exception { private RegionInfo createRandomRegion(final String name) { TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name)); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name)); String[] families = new String[] { "info", "anchor" }; for (int i = 0; i < families.length; i++) { ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(families[i])).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(families[i])).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); } TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); @@ -308,12 +308,12 @@ protected TableDescriptor createTableDescriptor(final String name) { protected TableDescriptor createTableDescriptor(final String name, final int versions) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)); builder - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).setMaxVersions(versions) - .setBlockCacheEnabled(false).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2).setMaxVersions(versions) - .setBlockCacheEnabled(false).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam3).setMaxVersions(versions) - .setBlockCacheEnabled(false).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).setMaxVersions(versions) + .setBlockCacheEnabled(false).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2).setMaxVersions(versions) + .setBlockCacheEnabled(false).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam3).setMaxVersions(versions) + .setBlockCacheEnabled(false).build()); return builder.build(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java index 4016a68130aa..450f321e7b3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,33 +40,36 @@ public class TestServerMetrics { @Test public void testRegionLoadAggregation() { - ServerMetrics metrics = ServerMetricsBuilder.toServerMetrics( - ServerName.valueOf("localhost,1,1"), createServerLoadProto()); + ServerMetrics metrics = ServerMetricsBuilder + .toServerMetrics(ServerName.valueOf("localhost,1,1"), createServerLoadProto()); assertEquals(13, - metrics.getRegionMetrics().values().stream().mapToInt(v -> v.getStoreCount()).sum()); + metrics.getRegionMetrics().values().stream().mapToInt(v -> v.getStoreCount()).sum()); assertEquals(114, - metrics.getRegionMetrics().values().stream().mapToInt(v -> v.getStoreFileCount()).sum()); + metrics.getRegionMetrics().values().stream().mapToInt(v -> v.getStoreFileCount()).sum()); assertEquals(129, metrics.getRegionMetrics().values().stream() - .mapToDouble(v -> v.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)).sum(), 0); + .mapToDouble(v -> v.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)).sum(), + 0); assertEquals(504, metrics.getRegionMetrics().values().stream() - .mapToDouble(v -> v.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE)).sum(), 0); + .mapToDouble(v -> v.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE)).sum(), + 0); assertEquals(820, metrics.getRegionMetrics().values().stream() - .mapToDouble(v -> v.getStoreFileSize().get(Size.Unit.MEGABYTE)).sum(), 0); + .mapToDouble(v -> v.getStoreFileSize().get(Size.Unit.MEGABYTE)).sum(), + 0); assertEquals(82, metrics.getRegionMetrics().values().stream() - .mapToDouble(v -> v.getStoreFileIndexSize().get(Size.Unit.KILOBYTE)).sum(), 0); + .mapToDouble(v -> v.getStoreFileIndexSize().get(Size.Unit.KILOBYTE)).sum(), + 0); assertEquals(((long) Integer.MAX_VALUE) * 2, - metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getReadRequestCount()).sum()); + metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getReadRequestCount()).sum()); assertEquals(100, - metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getCpRequestCount()).sum()); - assertEquals(300, - metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getFilteredReadRequestCount()) - .sum()); + metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getCpRequestCount()).sum()); + assertEquals(300, metrics.getRegionMetrics().values().stream() + .mapToLong(v -> v.getFilteredReadRequestCount()).sum()); } @Test public void testToString() { - ServerMetrics metrics = ServerMetricsBuilder.toServerMetrics( - ServerName.valueOf("localhost,1,1"), createServerLoadProto()); + ServerMetrics metrics = ServerMetricsBuilder + .toServerMetrics(ServerName.valueOf("localhost,1,1"), createServerLoadProto()); String slToString = metrics.toString(); assertTrue(slToString.contains("numberOfStores=13")); assertTrue(slToString.contains("numberOfStorefiles=114")); @@ -79,14 +82,13 @@ public void testToString() { @Test public void testRegionLoadWrapAroundAggregation() { - ServerMetrics metrics = ServerMetricsBuilder.toServerMetrics( - ServerName.valueOf("localhost,1,1"), createServerLoadProto()); + ServerMetrics metrics = ServerMetricsBuilder + .toServerMetrics(ServerName.valueOf("localhost,1,1"), createServerLoadProto()); long totalCount = ((long) Integer.MAX_VALUE) * 2; assertEquals(totalCount, - metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getReadRequestCount()).sum()); + metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getReadRequestCount()).sum()); assertEquals(totalCount, - metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getWriteRequestCount()) - .sum()); + metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getWriteRequestCount()).sum()); } private ClusterStatusProtos.ServerLoad createServerLoadProto() { @@ -97,23 +99,19 @@ private ClusterStatusProtos.ServerLoad createServerLoadProto() { .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build(); - ClusterStatusProtos.RegionLoad rlOne = - ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10) - .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520) - .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201) - .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) - .build(); - ClusterStatusProtos.RegionLoad rlTwo = - ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3) - .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300) - .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303) - .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) - .setCpRequestsCount(100) - .build(); + ClusterStatusProtos.RegionLoad rlOne = ClusterStatusProtos.RegionLoad.newBuilder() + .setRegionSpecifier(rSpecOne).setStores(10).setStorefiles(101) + .setStoreUncompressedSizeMB(106).setStorefileSizeMB(520).setFilteredReadRequestsCount(100) + .setStorefileIndexSizeKB(42).setRootIndexSizeKB(201).setReadRequestsCount(Integer.MAX_VALUE) + .setWriteRequestsCount(Integer.MAX_VALUE).build(); + ClusterStatusProtos.RegionLoad rlTwo = ClusterStatusProtos.RegionLoad.newBuilder() + .setRegionSpecifier(rSpecTwo).setStores(3).setStorefiles(13).setStoreUncompressedSizeMB(23) + .setStorefileSizeMB(300).setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40) + .setRootIndexSizeKB(303).setReadRequestsCount(Integer.MAX_VALUE) + .setWriteRequestsCount(Integer.MAX_VALUE).setCpRequestsCount(100).build(); - ClusterStatusProtos.ServerLoad sl = - ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne). - addRegionLoads(rlTwo).build(); + ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder() + .addRegionLoads(rlOne).addRegionLoads(rlTwo).build(); return sl; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java index 3eaa2ffd7bd4..6beab83794e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -51,7 +52,7 @@ @Category(LargeTests.class) public class TestServerSideScanMetricsFromClientSide { private static final Logger LOG = - LoggerFactory.getLogger(TestServerSideScanMetricsFromClientSide.class); + LoggerFactory.getLogger(TestServerSideScanMetricsFromClientSide.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -289,8 +290,8 @@ private void testRowsFilteredMetric(Scan baseScan) throws Exception { testRowsFilteredMetric(baseScan, filter, 0); // No matching column value should exist in any row. Filter all rows - filter = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS[0], - CompareOperator.NOT_EQUAL, VALUE); + filter = + new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS[0], CompareOperator.NOT_EQUAL, VALUE); testRowsFilteredMetric(baseScan, filter, ROWS.length); List filters = new ArrayList<>(); @@ -307,7 +308,7 @@ private void testRowsFilteredMetric(Scan baseScan) throws Exception { for (int family = 0; family < FAMILIES.length; family++) { for (int qualifier = 0; qualifier < QUALIFIERS.length; qualifier++) { filters.add(new SingleColumnValueExcludeFilter(FAMILIES[family], QUALIFIERS[qualifier], - CompareOperator.EQUAL, VALUE)); + CompareOperator.EQUAL, VALUE)); } } filter = new FilterList(Operator.MUST_PASS_ONE, filters); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSize.java index ecb317f53815..2103b720dcc2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSize.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,12 +27,11 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestSize { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSize.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSize.class); @Test public void testConversion() { @@ -71,16 +70,11 @@ public void testCompare() { @Test public void testEqual() { - assertEquals(new Size(1024D, Size.Unit.TERABYTE), - new Size(1D, Size.Unit.PETABYTE)); - assertEquals(new Size(1024D, Size.Unit.GIGABYTE), - new Size(1D, Size.Unit.TERABYTE)); - assertEquals(new Size(1024D, Size.Unit.MEGABYTE), - new Size(1D, Size.Unit.GIGABYTE)); - assertEquals(new Size(1024D, Size.Unit.KILOBYTE), - new Size(1D, Size.Unit.MEGABYTE)); - assertEquals(new Size(1024D, Size.Unit.BYTE), - new Size(1D, Size.Unit.KILOBYTE)); + assertEquals(new Size(1024D, Size.Unit.TERABYTE), new Size(1D, Size.Unit.PETABYTE)); + assertEquals(new Size(1024D, Size.Unit.GIGABYTE), new Size(1D, Size.Unit.TERABYTE)); + assertEquals(new Size(1024D, Size.Unit.MEGABYTE), new Size(1D, Size.Unit.GIGABYTE)); + assertEquals(new Size(1024D, Size.Unit.KILOBYTE), new Size(1D, Size.Unit.MEGABYTE)); + assertEquals(new Size(1024D, Size.Unit.BYTE), new Size(1D, Size.Unit.KILOBYTE)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitMerge.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitMerge.java index ce700353ee5b..9c87e10fc0bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitMerge.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitMerge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestSplitMerge { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitMerge.class); + HBaseClassTestRule.forClass(TestSplitMerge.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -65,7 +65,7 @@ public void test() throws Exception { TableName tableName = TableName.valueOf("SplitMerge"); byte[] family = Bytes.toBytes("CF"); TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); UTIL.getAdmin().createTable(td, new byte[][] { Bytes.toBytes(1) }); UTIL.waitTableAvailable(tableName); UTIL.getAdmin().split(tableName, Bytes.toBytes(2)); @@ -94,32 +94,32 @@ public String explainFailure() throws Exception { assertNotNull(regionA); assertNotNull(regionB); UTIL.getAdmin().mergeRegionsAsync(regionA.getRegionName(), regionB.getRegionName(), false) - .get(30, TimeUnit.SECONDS); + .get(30, TimeUnit.SECONDS); assertEquals(2, UTIL.getAdmin().getRegions(tableName).size()); ServerName expected = UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName(); assertEquals(expected, UTIL.getConnection().getRegionLocator(tableName) - .getRegionLocation(Bytes.toBytes(1), true).getServerName()); + .getRegionLocation(Bytes.toBytes(1), true).getServerName()); try (AsyncConnection asyncConn = - ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) { + ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) { assertEquals(expected, asyncConn.getRegionLocator(tableName) - .getRegionLocation(Bytes.toBytes(1), true).get().getServerName()); + .getRegionLocation(Bytes.toBytes(1), true).get().getServerName()); } } @Test public void testMergeRegionOrder() throws Exception { - int regionCount= 20; + int regionCount = 20; TableName tableName = TableName.valueOf("MergeRegionOrder"); byte[] family = Bytes.toBytes("CF"); TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); - byte[][] splitKeys = new byte[regionCount-1][]; + byte[][] splitKeys = new byte[regionCount - 1][]; - for (int c = 0; c < regionCount-1; c++) { - splitKeys[c] = Bytes.toBytes(c+1 * 1000); + for (int c = 0; c < regionCount - 1; c++) { + splitKeys[c] = Bytes.toBytes(c + 1 * 1000); } UTIL.getAdmin().createTable(td, splitKeys); @@ -142,7 +142,7 @@ public void testMergeRegionOrder() throws Exception { RegionInfo mergedRegion = mergedRegions.get(0); List mergeParentRegions = UTIL.getMiniHBaseCluster().getMaster() - .getAssignmentManager().getRegionStateStore().getMergeRegions(mergedRegion); + .getAssignmentManager().getRegionStateStore().getMergeRegions(mergedRegion); assertEquals(mergeParentRegions.size(), regionCount); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java index 92f4a8dc1013..86b33dd16d62 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTagRewriteCell.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,28 +36,25 @@ public class TestTagRewriteCell { @Test public void testHeapSize() { Cell originalCell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(Bytes.toBytes("row")) - .setFamily(HConstants.EMPTY_BYTE_ARRAY) - .setQualifier(HConstants.EMPTY_BYTE_ARRAY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(Bytes.toBytes("value")) - .build(); + .setRow(Bytes.toBytes("row")).setFamily(HConstants.EMPTY_BYTE_ARRAY) + .setQualifier(HConstants.EMPTY_BYTE_ARRAY).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(Bytes.toBytes("value")).build(); final int fakeTagArrayLength = 10; Cell trCell = PrivateCellUtil.createCell(originalCell, new byte[fakeTagArrayLength]); // Get the heapSize before the internal tags array in trCell are nuked - long trCellHeapSize = ((HeapSize)trCell).heapSize(); + long trCellHeapSize = ((HeapSize) trCell).heapSize(); // Make another TagRewriteCell with the original TagRewriteCell // This happens on systems with more than one RegionObserver/Coproc loaded (such as // VisibilityController and AccessController) Cell trCell2 = PrivateCellUtil.createCell(trCell, new byte[fakeTagArrayLength]); - assertTrue("TagRewriteCell containing a TagRewriteCell's heapsize should be " + - "larger than a single TagRewriteCell's heapsize", - trCellHeapSize < ((HeapSize)trCell2).heapSize()); + assertTrue( + "TagRewriteCell containing a TagRewriteCell's heapsize should be " + + "larger than a single TagRewriteCell's heapsize", + trCellHeapSize < ((HeapSize) trCell2).heapSize()); assertTrue("TagRewriteCell should have had nulled out tags array", - ((HeapSize)trCell).heapSize() < trCellHeapSize); + ((HeapSize) trCell).heapSize() < trCellHeapSize); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index 1acf0a9d71c4..f74c5468ac4b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import edu.umd.cs.findbugs.annotations.NonNull; import java.util.List; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -59,7 +58,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestZooKeeper { @ClassRule @@ -94,8 +93,8 @@ public static void tearDownAfterClass() throws Exception { @Before public void setUp() throws Exception { - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numMasters(2).numRegionServers(2).build(); + StartTestingClusterOption option = + StartTestingClusterOption.builder().numMasters(2).numRegionServers(2).build(); TEST_UTIL.startMiniHBaseCluster(option); } @@ -131,17 +130,15 @@ public void testMasterSessionExpired() throws Exception { } /** - * Master recovery when the znode already exists. Internally, this - * test differs from {@link #testMasterSessionExpired} because here - * the master znode will exist in ZK. + * Master recovery when the znode already exists. Internally, this test differs from + * {@link #testMasterSessionExpired} because here the master znode will exist in ZK. */ @Test public void testMasterZKSessionRecoveryFailure() throws Exception { LOG.info("Starting " + name.getMethodName()); SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); HMaster m = cluster.getMaster(); - m.abort("Test recovery from zk session expired", - new KeeperException.SessionExpiredException()); + m.abort("Test recovery from zk session expired", new KeeperException.SessionExpiredException()); assertTrue(m.isStopped()); // Master doesn't recover any more testSanity(name.getMethodName()); } @@ -195,7 +192,7 @@ public void testRegionAssignmentAfterMasterRecoveryDueToZKExpiry() throws Except MockLoadBalancer.retainAssignCalled = false; final int expectedNumOfListeners = countPermanentListeners(zkw); m.abort("Test recovery from zk session expired", - new KeeperException.SessionExpiredException()); + new KeeperException.SessionExpiredException()); assertTrue(m.isStopped()); // Master doesn't recover any more // The recovered master should not call retainAssignment, as it is not a // clean startup. @@ -209,8 +206,8 @@ public void testRegionAssignmentAfterMasterRecoveryDueToZKExpiry() throws Except } /** - * Count listeners in zkw excluding listeners, that belongs to workers or other - * temporary processes. + * Count listeners in zkw excluding listeners, that belongs to workers or other temporary + * processes. */ private int countPermanentListeners(ZKWatcher watcher) { return countListeners(watcher, ZkSplitLogWorkerCoordination.class); @@ -248,7 +245,7 @@ public void testLogSplittingAfterMasterRecoveryDueToZKExpiry() throws Exception byte[] family = Bytes.toBytes("col"); try (Admin admin = TEST_UTIL.getAdmin()) { byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("1"), Bytes.toBytes("2"), - Bytes.toBytes("3"), Bytes.toBytes("4"), Bytes.toBytes("5") }; + Bytes.toBytes("3"), Bytes.toBytes("4"), Bytes.toBytes("5") }; TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); admin.createTable(htd, SPLIT_KEYS); @@ -284,12 +281,11 @@ static class MockLoadBalancer extends SimpleLoadBalancer { @Override @NonNull - public Map> retainAssignment( - Map regions, List servers) throws HBaseIOException { + public Map> retainAssignment(Map regions, + List servers) throws HBaseIOException { retainAssignCalled = true; return super.retainAssignment(regions, servers); } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java index da732ac3a0e8..b1c54de231ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,36 +15,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Assert; /** - * Tests user specifiable time stamps putting, getting and scanning. Also - * tests same in presence of deletes. Test cores are written so can be - * run against an HRegion and against an HTable: i.e. both local and remote. + * Tests user specifiable time stamps putting, getting and scanning. Also tests same in presence of + * deletes. Test cores are written so can be run against an HRegion and against an HTable: i.e. both + * local and remote. */ public class TimestampTestBase { private static final long T0 = 10L; private static final long T1 = 100L; private static final long T2 = 200L; - public static final byte [] FAMILY_NAME = Bytes.toBytes("colfamily11"); - private static final byte [] QUALIFIER_NAME = Bytes.toBytes("contents"); + public static final byte[] FAMILY_NAME = Bytes.toBytes("colfamily11"); + private static final byte[] QUALIFIER_NAME = Bytes.toBytes("contents"); - private static final byte [] ROW = Bytes.toBytes("row"); + private static final byte[] ROW = Bytes.toBytes("row"); interface FlushCache { void flushcache() throws IOException; @@ -57,33 +55,32 @@ interface FlushCache { * @param flusher * @throws IOException */ - public static void doTestDelete(final Table table, FlushCache flusher) - throws IOException { + public static void doTestDelete(final Table table, FlushCache flusher) throws IOException { // Add values at various timestamps (Values are timestampes as bytes). put(table, T0); put(table, T1); put(table, T2); put(table); // Verify that returned versions match passed timestamps. - assertVersions(table, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1}); + assertVersions(table, new long[] { HConstants.LATEST_TIMESTAMP, T2, T1 }); // If I delete w/o specifying a timestamp, this means I'm deleting the latest. delete(table); // Verify that I get back T2 through T1 -- that the latest version has been deleted. - assertVersions(table, new long [] {T2, T1, T0}); + assertVersions(table, new long[] { T2, T1, T0 }); // Flush everything out to disk and then retry flusher.flushcache(); - assertVersions(table, new long [] {T2, T1, T0}); + assertVersions(table, new long[] { T2, T1, T0 }); // Now add, back a latest so I can test remove other than the latest. put(table); - assertVersions(table, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1}); + assertVersions(table, new long[] { HConstants.LATEST_TIMESTAMP, T2, T1 }); delete(table, T2); - assertVersions(table, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0}); + assertVersions(table, new long[] { HConstants.LATEST_TIMESTAMP, T1, T0 }); // Flush everything out to disk and then retry flusher.flushcache(); - assertVersions(table, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0}); + assertVersions(table, new long[] { HConstants.LATEST_TIMESTAMP, T1, T0 }); // Now try deleting all from T2 back inclusive (We first need to add T2 // back into the mix and to make things a little interesting, delete and then readd T1. @@ -95,7 +92,7 @@ public static void doTestDelete(final Table table, FlushCache flusher) delete.addColumns(FAMILY_NAME, QUALIFIER_NAME, T2); table.delete(delete); - // Should only be current value in set. Assert this is so + // Should only be current value in set. Assert this is so assertOnlyLatest(table, HConstants.LATEST_TIMESTAMP); // Flush everything out to disk and then redo above tests @@ -104,7 +101,7 @@ public static void doTestDelete(final Table table, FlushCache flusher) } private static void assertOnlyLatest(final Table incommon, final long currentTime) - throws IOException { + throws IOException { Get get = null; get = new Get(ROW); get.addColumn(FAMILY_NAME, QUALIFIER_NAME); @@ -116,21 +113,20 @@ private static void assertOnlyLatest(final Table incommon, final long currentTim } /* - * Assert that returned versions match passed in timestamps and that results - * are returned in the right order. Assert that values when converted to - * longs match the corresponding passed timestamp. + * Assert that returned versions match passed in timestamps and that results are returned in the + * right order. Assert that values when converted to longs match the corresponding passed + * timestamp. * @param r * @param tss * @throws IOException */ - public static void assertVersions(final Table incommon, final long [] tss) - throws IOException { + public static void assertVersions(final Table incommon, final long[] tss) throws IOException { // Assert that 'latest' is what we expect. Get get = null; get = new Get(ROW); get.addColumn(FAMILY_NAME, QUALIFIER_NAME); Result r = incommon.get(get); - byte [] bytes = r.getValue(FAMILY_NAME, QUALIFIER_NAME); + byte[] bytes = r.getValue(FAMILY_NAME, QUALIFIER_NAME); long t = Bytes.toLong(bytes); Assert.assertEquals(tss[0], t); @@ -140,9 +136,9 @@ public static void assertVersions(final Table incommon, final long [] tss) get.addColumn(FAMILY_NAME, QUALIFIER_NAME); get.readVersions(tss.length); Result result = incommon.get(get); - Cell [] kvs = result.rawCells(); + Cell[] kvs = result.rawCells(); Assert.assertEquals(kvs.length, tss.length); - for(int i=0;ivalue = -// new TreeMap(Bytes.BYTES_COMPARATOR); -// while (scanner.next(key, value)) { -// assertTrue(key.getTimestamp() <= ts); -// // Content matches the key or HConstants.LATEST_TIMESTAMP. -// // (Key does not match content if we 'put' with LATEST_TIMESTAMP). -// long l = Bytes.toLong(value.get(COLUMN).getValue()); -// assertTrue(key.getTimestamp() == l || -// HConstants.LATEST_TIMESTAMP == l); -// count++; -// value.clear(); -// } + // HStoreKey key = new HStoreKey(); + // TreeMapvalue = + // new TreeMap(Bytes.BYTES_COMPARATOR); + // while (scanner.next(key, value)) { + // assertTrue(key.getTimestamp() <= ts); + // // Content matches the key or HConstants.LATEST_TIMESTAMP. + // // (Key does not match content if we 'put' with LATEST_TIMESTAMP). + // long l = Bytes.toLong(value.get(COLUMN).getValue()); + // assertTrue(key.getTimestamp() == l || + // HConstants.LATEST_TIMESTAMP == l); + // count++; + // value.clear(); + // } } finally { scanner.close(); } return count; } - public static void put(final Table loader, final long ts) - throws IOException { + public static void put(final Table loader, final long ts) throws IOException { put(loader, Bytes.toBytes(ts), ts); } - public static void put(final Table loader) - throws IOException { + public static void put(final Table loader) throws IOException { long ts = HConstants.LATEST_TIMESTAMP; put(loader, Bytes.toBytes(ts), ts); } @@ -245,9 +236,7 @@ public static void put(final Table loader) * @param ts * @throws IOException */ - public static void put(final Table loader, final byte [] bytes, - final long ts) - throws IOException { + public static void put(final Table loader, final byte[] bytes, final long ts) throws IOException { Put put = new Put(ROW, ts); put.setDurability(Durability.SKIP_WAL); put.addColumn(FAMILY_NAME, QUALIFIER_NAME, bytes); @@ -258,21 +247,17 @@ public static void delete(final Table loader) throws IOException { delete(loader, null); } - public static void delete(final Table loader, final byte [] column) - throws IOException { + public static void delete(final Table loader, final byte[] column) throws IOException { delete(loader, column, HConstants.LATEST_TIMESTAMP); } - public static void delete(final Table loader, final long ts) - throws IOException { + public static void delete(final Table loader, final long ts) throws IOException { delete(loader, null, ts); } - public static void delete(final Table loader, final byte [] column, - final long ts) - throws IOException { - Delete delete = ts == HConstants.LATEST_TIMESTAMP? - new Delete(ROW): new Delete(ROW, ts); + public static void delete(final Table loader, final byte[] column, final long ts) + throws IOException { + Delete delete = ts == HConstants.LATEST_TIMESTAMP ? new Delete(ROW) : new Delete(ROW, ts); delete.addColumn(FAMILY_NAME, QUALIFIER_NAME, ts); loader.delete(delete); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index fc57c43f30b6..336b013f9aa2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -87,7 +87,7 @@ * Test that the {@link HFileArchiver} correctly removes all the parts of a region when cleaning up * a region */ -@Category({LargeTests.class, MiscTests.class}) +@Category({ LargeTests.class, MiscTests.class }) public class TestHFileArchiving { @ClassRule @@ -145,15 +145,13 @@ public static void cleanupTest() throws Exception { public void testArchiveStoreFilesDifferentFileSystemsWallWithSchemaPlainRoot() throws Exception { String walDir = "mockFS://mockFSAuthority:9876/mockDir/wals/"; String baseDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()).toString() + "/"; - testArchiveStoreFilesDifferentFileSystems(walDir, baseDir, - HFileArchiver::archiveStoreFiles); + testArchiveStoreFilesDifferentFileSystems(walDir, baseDir, HFileArchiver::archiveStoreFiles); } @Test public void testArchiveStoreFilesDifferentFileSystemsWallNullPlainRoot() throws Exception { String baseDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()).toString() + "/"; - testArchiveStoreFilesDifferentFileSystems(null, baseDir, - HFileArchiver::archiveStoreFiles); + testArchiveStoreFilesDifferentFileSystems(null, baseDir, HFileArchiver::archiveStoreFiles); } @Test @@ -185,31 +183,31 @@ public void testArchiveStoreFilesDifferentFileSystemsArchiveFileMismatch() throw } private void testArchiveStoreFilesDifferentFileSystems(String walDir, String expectedBase, - ArchivingFunction> archivingFunction) throws IOException { + ArchivingFunction> archivingFunction) + throws IOException { testArchiveStoreFilesDifferentFileSystems(walDir, expectedBase, false, true, false, archivingFunction); } private void testArchiveStoreFilesDifferentFileSystems(String walDir, String expectedBase, - boolean archiveFileExists, boolean sourceFileExists, boolean archiveFileDifferentLength, - ArchivingFunction> archivingFunction) throws IOException { + boolean archiveFileExists, boolean sourceFileExists, boolean archiveFileDifferentLength, + ArchivingFunction> archivingFunction) + throws IOException { FileSystem mockedFileSystem = mock(FileSystem.class); Configuration conf = new Configuration(UTIL.getConfiguration()); - if(walDir != null) { + if (walDir != null) { conf.set(CommonFSUtils.HBASE_WAL_DIR, walDir); } when(mockedFileSystem.getScheme()).thenReturn("mockFS"); when(mockedFileSystem.mkdirs(any())).thenReturn(true); - HashMap existsTracker = new HashMap<>(); + HashMap existsTracker = new HashMap<>(); Path filePath = new Path("/mockDir/wals/mockFile"); - String expectedDir = expectedBase + - "archive/data/default/mockTable/mocked-region-encoded-name/testfamily/mockFile"; + String expectedDir = expectedBase + + "archive/data/default/mockTable/mocked-region-encoded-name/testfamily/mockFile"; existsTracker.put(new Path(expectedDir), archiveFileExists); existsTracker.put(filePath, sourceFileExists); - when(mockedFileSystem.exists(any())).thenAnswer(invocation -> - existsTracker.getOrDefault((Path)invocation.getArgument(0), true)); + when(mockedFileSystem.exists(any())).thenAnswer( + invocation -> existsTracker.getOrDefault((Path) invocation.getArgument(0), true)); FileStatus mockedStatus = mock(FileStatus.class); when(mockedStatus.getLen()).thenReturn(12L).thenReturn(archiveFileDifferentLength ? 34L : 12L); when(mockedFileSystem.getFileStatus(any())).thenReturn(mockedStatus); @@ -223,7 +221,7 @@ private void testArchiveStoreFilesDifferentFileSystems(String walDir, String exp List list = new ArrayList<>(); list.add(mockedFile); when(mockedFile.getPath()).thenReturn(filePath); - when(mockedFileSystem.rename(any(),any())).thenReturn(true); + when(mockedFileSystem.rename(any(), any())).thenReturn(true); archivingFunction.apply(conf, mockedFileSystem, mockedRegion, tableDir, family, list); if (sourceFileExists) { @@ -254,7 +252,7 @@ private void testArchiveStoreFilesDifferentFileSystems(String walDir, String exp @FunctionalInterface private interface ArchivingFunction { void apply(Configuration config, FS fs, Region region, Dir dir, Family family, Files files) - throws IOException; + throws IOException; } @Test @@ -272,9 +270,8 @@ private void testArchiveRecoveredEditsWalDirNullOrSame(String walDir) throws Exc try { String baseDir = "mockFS://mockFSAuthority:9876/hbase/"; UTIL.getConfiguration().set(HConstants.HBASE_DIR, baseDir); - testArchiveStoreFilesDifferentFileSystems(walDir, baseDir, - (conf, fs, region, dir, family, list) -> HFileArchiver - .archiveRecoveredEdits(conf, fs, region, family, list)); + testArchiveStoreFilesDifferentFileSystems(walDir, baseDir, (conf, fs, region, dir, family, + list) -> HFileArchiver.archiveRecoveredEdits(conf, fs, region, family, list)); } finally { UTIL.getConfiguration().set(HConstants.HBASE_DIR, originalRootDir); } @@ -283,20 +280,18 @@ private void testArchiveRecoveredEditsWalDirNullOrSame(String walDir) throws Exc @Test(expected = IOException.class) public void testArchiveRecoveredEditsWrongFS() throws Exception { String baseDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()).toString() + "/"; - //Internally, testArchiveStoreFilesDifferentFileSystems will pass a "mockedFS" + // Internally, testArchiveStoreFilesDifferentFileSystems will pass a "mockedFS" // to HFileArchiver.archiveRecoveredEdits, but since wal-dir is supposedly on same FS // as root dir it would lead to conflicting FSes and an IOException is expected. - testArchiveStoreFilesDifferentFileSystems("/wal-dir", baseDir, - (conf, fs, region, dir, family, list) -> HFileArchiver - .archiveRecoveredEdits(conf, fs, region, family, list)); + testArchiveStoreFilesDifferentFileSystems("/wal-dir", baseDir, (conf, fs, region, dir, family, + list) -> HFileArchiver.archiveRecoveredEdits(conf, fs, region, family, list)); } @Test public void testArchiveRecoveredEditsWalDirDifferentFS() throws Exception { String walDir = "mockFS://mockFSAuthority:9876/mockDir/wals/"; - testArchiveStoreFilesDifferentFileSystems(walDir, walDir, - (conf, fs, region, dir, family, list) -> - HFileArchiver.archiveRecoveredEdits(conf, fs, region, family, list)); + testArchiveStoreFilesDifferentFileSystems(walDir, walDir, (conf, fs, region, dir, family, + list) -> HFileArchiver.archiveRecoveredEdits(conf, fs, region, family, list)); } @Test @@ -355,8 +350,8 @@ public boolean accept(Path p) { /** * Test that the region directory is removed when we archive a region without store files, but * still has hidden files. - * @throws IOException throws an IOException if there's problem creating a table - * or if there's an issue with accessing FileSystem. + * @throws IOException throws an IOException if there's problem creating a table or if there's an + * issue with accessing FileSystem. */ @Test public void testDeleteRegionWithNoStoreFiles() throws IOException { @@ -407,9 +402,8 @@ public boolean accept(Path file) { } private List initTableForArchivingRegions(TableName tableName) throws IOException { - final byte[][] splitKeys = new byte[][] { - Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d") - }; + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d") }; UTIL.createTable(tableName, TEST_FAM, splitKeys); @@ -440,20 +434,20 @@ public void testArchiveRegions() throws Exception { Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); Path tableDir = CommonFSUtils.getTableDir(rootDir, regions.get(0).getRegionInfo().getTable()); List regionDirList = regions.stream() - .map(region -> FSUtils.getRegionDirFromTableDir(tableDir, region.getRegionInfo())) - .collect(Collectors.toList()); + .map(region -> FSUtils.getRegionDirFromTableDir(tableDir, region.getRegionInfo())) + .collect(Collectors.toList()); HFileArchiver.archiveRegions(UTIL.getConfiguration(), fs, rootDir, tableDir, regionDirList); // check for the existence of the archive directory and some files in it for (HRegion region : regions) { - Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), - region); + Path archiveDir = + HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region); assertTrue(fs.exists(archiveDir)); // check to make sure the store directory was copied - FileStatus[] stores = fs.listStatus(archiveDir, - p -> !p.getName().contains(HConstants.RECOVERED_EDITS_DIR)); + FileStatus[] stores = + fs.listStatus(archiveDir, p -> !p.getName().contains(HConstants.RECOVERED_EDITS_DIR)); assertTrue(stores.length == 1); // make sure we archived the store files @@ -462,14 +456,14 @@ public void testArchiveRegions() throws Exception { } // then ensure the region's directories aren't present - for (Path regionDir: regionDirList) { + for (Path regionDir : regionDirList) { assertFalse(fs.exists(regionDir)); } UTIL.deleteTable(tableName); } - @Test(expected=IOException.class) + @Test(expected = IOException.class) public void testArchiveRegionsWhenPermissionDenied() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); List regions = initTableForArchivingRegions(tableName); @@ -478,18 +472,17 @@ public void testArchiveRegionsWhenPermissionDenied() throws Exception { Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); Path tableDir = CommonFSUtils.getTableDir(rootDir, regions.get(0).getRegionInfo().getTable()); List regionDirList = regions.stream() - .map(region -> FSUtils.getRegionDirFromTableDir(tableDir, region.getRegionInfo())) - .collect(Collectors.toList()); + .map(region -> FSUtils.getRegionDirFromTableDir(tableDir, region.getRegionInfo())) + .collect(Collectors.toList()); // To create a permission denied error, we do archive regions as a non-current user - UserGroupInformation - ugi = UserGroupInformation.createUserForTesting("foo1234", new String[]{"group1"}); + UserGroupInformation ugi = + UserGroupInformation.createUserForTesting("foo1234", new String[] { "group1" }); try { ugi.doAs((PrivilegedExceptionAction) () -> { FileSystem fs = UTIL.getTestFileSystem(); - HFileArchiver.archiveRegions(UTIL.getConfiguration(), fs, rootDir, tableDir, - regionDirList); + HFileArchiver.archiveRegions(UTIL.getConfiguration(), fs, rootDir, tableDir, regionDirList); return null; }); } catch (IOException e) { @@ -534,7 +527,7 @@ public void testArchiveOnTableDelete() throws Exception { clearArchiveDirectory(); // then get the current store files - byte[][]columns = region.getTableDescriptor().getColumnFamilyNames().toArray(new byte[0][]); + byte[][] columns = region.getTableDescriptor().getColumnFamilyNames().toArray(new byte[0][]); List storeFiles = region.getStoreFileList(columns); // then delete the table so the hfiles get archived @@ -545,7 +538,7 @@ public void testArchiveOnTableDelete() throws Exception { } private void assertArchiveFiles(FileSystem fs, List storeFiles, long timeout) - throws IOException { + throws IOException { long end = EnvironmentEdgeManager.currentTime() + timeout; Path archiveDir = HFileArchiveUtil.getArchivePath(UTIL.getConfiguration()); List archivedFiles = new ArrayList<>(); @@ -576,7 +569,6 @@ private void assertArchiveFiles(FileSystem fs, List storeFiles, long tim archivedFiles.containsAll(storeFiles)); } - /** * Test that the store files are archived when a column family is removed. * @throws java.io.IOException if there's a problem creating a table. @@ -585,7 +577,7 @@ private void assertArchiveFiles(FileSystem fs, List storeFiles, long tim @Test public void testArchiveOnTableFamilyDelete() throws IOException, InterruptedException { final TableName tableName = TableName.valueOf(name.getMethodName()); - UTIL.createTable(tableName, new byte[][] {TEST_FAM, Bytes.toBytes("fam2")}); + UTIL.createTable(tableName, new byte[][] { TEST_FAM, Bytes.toBytes("fam2") }); List servingRegions = UTIL.getHBaseCluster().getRegions(tableName); // make sure we only have 1 region serving this table @@ -616,7 +608,7 @@ public void testArchiveOnTableFamilyDelete() throws IOException, InterruptedExce clearArchiveDirectory(); // then get the current store files - byte[][]columns = region.getTableDescriptor().getColumnFamilyNames().toArray(new byte[0][]); + byte[][] columns = region.getTableDescriptor().getColumnFamilyNames().toArray(new byte[0][]); List storeFiles = region.getStoreFileList(columns); // then delete the table so the hfiles get archived @@ -640,8 +632,9 @@ public void testCleaningRace() throws Exception { FileSystem fs = UTIL.getTestFileSystem(); Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); - Path regionDir = new Path(CommonFSUtils.getTableDir(new Path("./"), - TableName.valueOf(name.getMethodName())), "abcdef"); + Path regionDir = + new Path(CommonFSUtils.getTableDir(new Path("./"), TableName.valueOf(name.getMethodName())), + "abcdef"); Path familyDir = new Path(regionDir, "cf"); Path sourceRegionDir = new Path(rootDir, regionDir); @@ -657,7 +650,7 @@ public void testCleaningRace() throws Exception { // Keep creating/archiving new files while the cleaner is running in the other thread long startTime = EnvironmentEdgeManager.currentTime(); for (long fid = 0; (EnvironmentEdgeManager.currentTime() - startTime) < TEST_TIME; ++fid) { - Path file = new Path(familyDir, String.valueOf(fid)); + Path file = new Path(familyDir, String.valueOf(fid)); Path sourceFile = new Path(rootDir, file); Path archiveFile = new Path(archiveDir, file); @@ -665,8 +658,7 @@ public void testCleaningRace() throws Exception { try { // Try to archive the file - HFileArchiver.archiveRegion(fs, rootDir, - sourceRegionDir.getParent(), sourceRegionDir); + HFileArchiver.archiveRegion(fs, rootDir, sourceRegionDir.getParent(), sourceRegionDir); // The archiver succeded, the file is no longer in the original location // but it's in the archive location. @@ -703,8 +695,9 @@ public void testArchiveRegionTableAndRegionDirsNull() throws IOException { @Test public void testArchiveRegionWithTableDirNull() throws IOException { - Path regionDir = new Path(CommonFSUtils.getTableDir(new Path("./"), - TableName.valueOf(name.getMethodName())), "xyzabc"); + Path regionDir = + new Path(CommonFSUtils.getTableDir(new Path("./"), TableName.valueOf(name.getMethodName())), + "xyzabc"); Path familyDir = new Path(regionDir, "rd"); Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace"); Path file = new Path(familyDir, "1"); @@ -720,8 +713,9 @@ public void testArchiveRegionWithTableDirNull() throws IOException { @Test public void testArchiveRegionWithRegionDirNull() throws IOException { - Path regionDir = new Path(CommonFSUtils.getTableDir(new Path("./"), - TableName.valueOf(name.getMethodName())), "elgn4nf"); + Path regionDir = + new Path(CommonFSUtils.getTableDir(new Path("./"), TableName.valueOf(name.getMethodName())), + "elgn4nf"); Path familyDir = new Path(regionDir, "rdar"); Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace"); Path file = new Path(familyDir, "2"); @@ -731,15 +725,15 @@ public void testArchiveRegionWithRegionDirNull() throws IOException { Path sourceRegionDir = new Path(rootDir, regionDir); fileSystem.mkdirs(sourceRegionDir); // Try to archive the file but with null regionDir, can't delete sourceFile - assertFalse(HFileArchiver.archiveRegion(fileSystem, rootDir, sourceRegionDir.getParent(), - null)); + assertFalse( + HFileArchiver.archiveRegion(fileSystem, rootDir, sourceRegionDir.getParent(), null)); assertTrue(fileSystem.exists(sourceRegionDir)); fileSystem.delete(sourceRegionDir, true); } // Avoid passing a null master to CleanerChore, see HBASE-21175 private HFileCleaner getHFileCleaner(Stoppable stoppable, Configuration conf, FileSystem fs, - Path archiveDir) throws IOException { + Path archiveDir) throws IOException { Map params = new HashMap<>(); params.put(HMaster.MASTER, UTIL.getMiniHBaseCluster().getMaster()); HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir, POOL); @@ -747,8 +741,8 @@ private HFileCleaner getHFileCleaner(Stoppable stoppable, Configuration conf, Fi } private void clearArchiveDirectory() throws IOException { - UTIL.getTestFileSystem().delete( - new Path(UTIL.getDefaultRootDirPath(), HConstants.HFILE_ARCHIVE_DIRECTORY), true); + UTIL.getTestFileSystem() + .delete(new Path(UTIL.getDefaultRootDirPath(), HConstants.HFILE_ARCHIVE_DIRECTORY), true); } /** @@ -758,7 +752,7 @@ private void clearArchiveDirectory() throws IOException { * @return a list of all files in the directory and sub-directories * @throws java.io.IOException throws IOException in case FS is unavailable */ - private List getAllFileNames(final FileSystem fs, Path archiveDir) throws IOException { + private List getAllFileNames(final FileSystem fs, Path archiveDir) throws IOException { FileStatus[] files = CommonFSUtils.listStatus(fs, archiveDir, new PathFilter() { @Override public boolean accept(Path p) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 92b6b78e14cb..5c6b80c7e78c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,7 +74,7 @@ * Spin up a small cluster and check that the hfiles of region are properly long-term archived as * specified via the {@link ZKTableArchiveClient}. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestZooKeeperTableArchiveClient { @ClassRule @@ -119,7 +119,7 @@ public static void setupCluster() throws Exception { String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher); ZKUtil.createWithParents(watcher, archivingZNode); rss = mock(RegionServerServices.class); - POOL= DirScanPool.getHFileCleanerScanPool(UTIL.getConfiguration()); + POOL = DirScanPool.getHFileCleanerScanPool(UTIL.getConfiguration()); } private static void setupConf(Configuration conf) { @@ -133,7 +133,7 @@ public void tearDown() throws Exception { FileSystem fs = UTIL.getTestFileSystem(); // cleanup each of the files/directories registered for (Path file : toCleanup) { - // remove the table and archive directories + // remove the table and archive directories CommonFSUtils.delete(fs, file, true); } } catch (IOException e) { @@ -164,8 +164,7 @@ public void testArchivingEnableDisable() throws Exception { // 1. turn on hfile backups LOG.debug("----Starting archiving"); archivingClient.enableHFileBackupAsync(TABLE_NAME); - assertTrue("Archving didn't get turned on", archivingClient - .getArchivingEnabled(TABLE_NAME)); + assertTrue("Archving didn't get turned on", archivingClient.getArchivingEnabled(TABLE_NAME)); // 2. Turn off archiving and make sure its off archivingClient.disableHFileBackup(); @@ -173,8 +172,7 @@ public void testArchivingEnableDisable() throws Exception { // 3. Check enable/disable on a single table archivingClient.enableHFileBackupAsync(TABLE_NAME); - assertTrue("Archving didn't get turned on", archivingClient - .getArchivingEnabled(TABLE_NAME)); + assertTrue("Archving didn't get turned on", archivingClient.getArchivingEnabled(TABLE_NAME)); // 4. Turn off archiving and make sure its off archivingClient.disableHFileBackup(TABLE_NAME); @@ -267,12 +265,12 @@ public void testMultipleTables() throws Exception { regions = new ArrayList<>(); regions.add(otherRegion); Mockito.doReturn(regions).when(rss).getRegions(); - final CompactedHFilesDischarger compactionCleaner1 = new CompactedHFilesDischarger(100, stop, - rss, false); + final CompactedHFilesDischarger compactionCleaner1 = + new CompactedHFilesDischarger(100, stop, rss, false); loadFlushAndCompact(otherRegion, TEST_FAM); compactionCleaner1.chore(); // get the current hfiles in the archive directory - // Should be archived + // Should be archived List files = getAllFiles(fs, archiveDir); if (files == null) { CommonFSUtils.logFileSystemState(fs, archiveDir, LOG); @@ -308,7 +306,7 @@ public void testMultipleTables() throws Exception { // know the cleaner ran, so now check all the files again to make sure they are still there List archivedFiles = getAllFiles(fs, archiveDir); int archivedForPrimary = 0; - for(Path file: archivedFiles) { + for (Path file : archivedFiles) { String tableName = file.getParent().getParent().getParent().getName(); // ensure we don't have files from the non-archived table assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable)); @@ -318,15 +316,14 @@ public void testMultipleTables() throws Exception { } assertEquals("Not all archived files for the primary table were retained.", - initialCountForPrimary, archivedForPrimary); + initialCountForPrimary, archivedForPrimary); // but we still have the archive directory assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir)); } - private void createArchiveDirectory() throws IOException { - //create the archive and test directory + // create the archive and test directory FileSystem fs = UTIL.getTestFileSystem(); Path archiveDir = getArchiveDir(); fs.mkdirs(archiveDir); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java index 6134a54bba54..9f4fd7dd0976 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableRegionReplicasRead.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -89,7 +89,7 @@ public static List params() { protected static volatile boolean FAIL_PRIMARY_GET = false; protected static ConcurrentMap REPLICA_ID_TO_COUNT = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); public static final class FailPrimaryGetCP implements RegionObserver, RegionCoprocessor { @@ -105,7 +105,7 @@ private void recordAndTryFail(ObserverContext c) return; } REPLICA_ID_TO_COUNT.computeIfAbsent(region.getReplicaId(), k -> new AtomicInteger()) - .incrementAndGet(); + .incrementAndGet(); if (region.getReplicaId() == RegionReplicaUtil.DEFAULT_REPLICA_ID && FAIL_PRIMARY_GET) { throw new IOException("Inject error"); } @@ -137,9 +137,11 @@ private static boolean allReplicasHaveRow(byte[] row) throws IOException { protected static void startClusterAndCreateTable() throws Exception { TEST_UTIL.startMiniCluster(3); - TEST_UTIL.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setRegionReplication(REPLICA_COUNT) - .setCoprocessor(FailPrimaryGetCP.class.getName()).build()); + TEST_UTIL.getAdmin() + .createTable(TableDescriptorBuilder.newBuilder(TABLE_NAME) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setRegionReplication(REPLICA_COUNT).setCoprocessor(FailPrimaryGetCP.class.getName()) + .build()); TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME); ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get(); } @@ -159,8 +161,8 @@ public static void tearDownAfterClass() throws Exception { protected static int getSecondaryGetCount() { return REPLICA_ID_TO_COUNT.entrySet().stream() - .filter(e -> e.getKey().intValue() != RegionReplicaUtil.DEFAULT_REPLICA_ID) - .mapToInt(e -> e.getValue().get()).sum(); + .filter(e -> e.getKey().intValue() != RegionReplicaUtil.DEFAULT_REPLICA_ID) + .mapToInt(e -> e.getValue().get()).sum(); } protected static int getPrimaryGetCount() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java index ba160f1d7c3f..737c2dcaf5e4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,6 +26,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; import static org.junit.Assert.fail; + import io.opentelemetry.sdk.trace.data.SpanData; import java.io.IOException; import java.io.UncheckedIOException; @@ -65,13 +66,10 @@ public abstract class AbstractTestAsyncTableScan { protected static final OpenTelemetryClassRule otelClassRule = OpenTelemetryClassRule.create(); protected static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder() - .setMiniClusterOption(StartTestingClusterOption.builder() - .numWorkers(3) - .build()) - .build(); + .setMiniClusterOption(StartTestingClusterOption.builder().numWorkers(3).build()).build(); protected static final ConnectionRule connectionRule = - ConnectionRule.createAsyncConnectionRule(miniClusterRule::createAsyncConnection); + ConnectionRule.createAsyncConnectionRule(miniClusterRule::createAsyncConnection); private static final class Setup extends ExternalResource { @Override @@ -86,20 +84,18 @@ protected void before() throws Throwable { testingUtil.createTable(TABLE_NAME, FAMILY, splitKeys); testingUtil.waitTableAvailable(TABLE_NAME); conn.getTable(TABLE_NAME) - .putAll(IntStream.range(0, COUNT) - .mapToObj(i -> new Put(Bytes.toBytes(String.format("%03d", i))) - .addColumn(FAMILY, CQ1, Bytes.toBytes(i)) - .addColumn(FAMILY, CQ2, Bytes.toBytes(i * i))) - .collect(Collectors.toList())) - .get(); + .putAll(IntStream.range(0, COUNT) + .mapToObj(i -> new Put(Bytes.toBytes(String.format("%03d", i))) + .addColumn(FAMILY, CQ1, Bytes.toBytes(i)) + .addColumn(FAMILY, CQ2, Bytes.toBytes(i * i))) + .collect(Collectors.toList())) + .get(); } } @ClassRule public static final TestRule classRule = RuleChain.outerRule(otelClassRule) - .around(miniClusterRule) - .around(connectionRule) - .around(new Setup()); + .around(miniClusterRule).around(connectionRule).around(new Setup()); @Rule public final OpenTelemetryTestRule otelTestRule = new OpenTelemetryTestRule(otelClassRule); @@ -198,22 +194,19 @@ protected final List convertFromBatchResult(List results) { protected static void waitForSpan(final Matcher parentSpanMatcher) { final Configuration conf = miniClusterRule.getTestingUtility().getConfiguration(); Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>( - "Span for test failed to complete.", otelClassRule::getSpans, hasItem(parentSpanMatcher))); + "Span for test failed to complete.", otelClassRule::getSpans, hasItem(parentSpanMatcher))); } @Test public void testScanAll() throws Exception { List results = doScan(createScan(), -1); // make sure all scanners are closed at RS side - miniClusterRule.getTestingUtility() - .getHBaseCluster() - .getRegionServerThreads() - .stream() - .map(JVMClusterUtil.RegionServerThread::getRegionServer) - .forEach(rs -> assertEquals( - "The scanner count of " + rs.getServerName() + " is " + - rs.getRSRpcServices().getScannersCount(), - 0, rs.getRSRpcServices().getScannersCount())); + miniClusterRule.getTestingUtility().getHBaseCluster().getRegionServerThreads().stream() + .map(JVMClusterUtil.RegionServerThread::getRegionServer) + .forEach(rs -> assertEquals( + "The scanner count of " + rs.getServerName() + " is " + + rs.getRSRpcServices().getScannersCount(), + 0, rs.getRSRpcServices().getScannersCount())); assertEquals(COUNT, results.size()); IntStream.range(0, COUNT).forEach(i -> { Result result = results.get(i); @@ -230,8 +223,8 @@ private void assertResultEquals(Result result, int i) { @Test public void testReversedScanAll() throws Exception { - List results = TraceUtil.trace( - () -> doScan(createScan().setReversed(true), -1), testName.getMethodName()); + List results = + TraceUtil.trace(() -> doScan(createScan().setReversed(true), -1), testName.getMethodName()); assertEquals(COUNT, results.size()); IntStream.range(0, COUNT).forEach(i -> assertResultEquals(results.get(i), COUNT - i - 1)); assertTraceContinuity(); @@ -240,8 +233,8 @@ public void testReversedScanAll() throws Exception { @Test public void testScanNoStopKey() throws Exception { int start = 345; - List results = TraceUtil.trace(() -> - doScan(createScan().withStartRow(Bytes.toBytes(String.format("%03d", start))), -1), + List results = TraceUtil.trace( + () -> doScan(createScan().withStartRow(Bytes.toBytes(String.format("%03d", start))), -1), testName.getMethodName()); assertEquals(COUNT - start, results.size()); IntStream.range(0, COUNT - start).forEach(i -> assertResultEquals(results.get(i), start + i)); @@ -251,9 +244,8 @@ public void testScanNoStopKey() throws Exception { @Test public void testReverseScanNoStopKey() throws Exception { int start = 765; - final Scan scan = createScan() - .withStartRow(Bytes.toBytes(String.format("%03d", start))) - .setReversed(true); + final Scan scan = + createScan().withStartRow(Bytes.toBytes(String.format("%03d", start))).setReversed(true); List results = TraceUtil.trace(() -> doScan(scan, -1), testName.getMethodName()); assertEquals(start + 1, results.size()); IntStream.range(0, start + 1).forEach(i -> assertResultEquals(results.get(i), start - i)); @@ -262,9 +254,10 @@ public void testReverseScanNoStopKey() throws Exception { @Test public void testScanWrongColumnFamily() { - final Exception e = assertThrows(Exception.class, () -> TraceUtil.trace( - () -> doScan(createScan().addFamily(Bytes.toBytes("WrongColumnFamily")), -1), - testName.getMethodName())); + final Exception e = assertThrows(Exception.class, + () -> TraceUtil.trace( + () -> doScan(createScan().addFamily(Bytes.toBytes("WrongColumnFamily")), -1), + testName.getMethodName())); // hamcrest generic enforcement for `anyOf` is a pain; skip it // but -- don't we always unwrap ExecutionExceptions -- bug? if (e instanceof NoSuchColumnFamilyException) { @@ -272,8 +265,7 @@ public void testScanWrongColumnFamily() { assertThat(ex, isA(NoSuchColumnFamilyException.class)); } else if (e instanceof ExecutionException) { final ExecutionException ex = (ExecutionException) e; - assertThat(ex, allOf( - isA(ExecutionException.class), + assertThat(ex, allOf(isA(ExecutionException.class), hasProperty("cause", isA(NoSuchColumnFamilyException.class)))); } else { fail("Found unexpected Exception " + e); @@ -282,15 +274,15 @@ public void testScanWrongColumnFamily() { } private void testScan(int start, boolean startInclusive, int stop, boolean stopInclusive, - int limit) throws Exception { + int limit) throws Exception { testScan(start, startInclusive, stop, stopInclusive, limit, -1); } private void testScan(int start, boolean startInclusive, int stop, boolean stopInclusive, int limit, int closeAfter) throws Exception { Scan scan = - createScan().withStartRow(Bytes.toBytes(String.format("%03d", start)), startInclusive) - .withStopRow(Bytes.toBytes(String.format("%03d", stop)), stopInclusive); + createScan().withStartRow(Bytes.toBytes(String.format("%03d", start)), startInclusive) + .withStopRow(Bytes.toBytes(String.format("%03d", stop)), stopInclusive); if (limit > 0) { scan.setLimit(limit); } @@ -310,9 +302,9 @@ private void testScan(int start, boolean startInclusive, int stop, boolean stopI private void testReversedScan(int start, boolean startInclusive, int stop, boolean stopInclusive, int limit) throws Exception { - Scan scan = - createScan().withStartRow(Bytes.toBytes(String.format("%03d", start)), startInclusive) - .withStopRow(Bytes.toBytes(String.format("%03d", stop)), stopInclusive).setReversed(true); + Scan scan = createScan() + .withStartRow(Bytes.toBytes(String.format("%03d", start)), startInclusive) + .withStopRow(Bytes.toBytes(String.format("%03d", stop)), stopInclusive).setReversed(true); if (limit > 0) { scan.setLimit(limit); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java index d1f0e1aa1be6..455d1db26709 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,8 +60,8 @@ public void setUp() throws IOException { @Test public void testOperationTimeout() throws IOException { TableBuilder builder = - TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(Integer.MAX_VALUE) - .setReadRpcTimeout(Integer.MAX_VALUE).setWriteRpcTimeout(Integer.MAX_VALUE); + TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(Integer.MAX_VALUE) + .setReadRpcTimeout(Integer.MAX_VALUE).setWriteRpcTimeout(Integer.MAX_VALUE); // Check that it works if the timeout is big enough SleepAndFailFirstTime.ct.set(0); try (Table table = builder.setOperationTimeout(120 * 1000).build()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java index aedb8148dd1b..ca26d46ea8be 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCIRpcTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,9 +40,9 @@ public abstract class AbstractTestCIRpcTimeout extends AbstractTestCITimeout { @Before public void setUp() throws IOException { tableName = TableName.valueOf(name.getMethodName()); - TableDescriptor htd = - TableDescriptorBuilder.newBuilder(tableName).setCoprocessor(SleepCoprocessor.class.getName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAM_NAM)).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) + .setCoprocessor(SleepCoprocessor.class.getName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAM_NAM)).build(); TEST_UTIL.getAdmin().createTable(htd); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java index 5923236c854f..3656030812ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestCITimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java index f14faf7568c9..93a654a1cd60 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,8 +50,8 @@ protected static void startClusterAndCreateTable() throws Exception { UTIL.startMiniCluster(3); HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, REGION_REPLICATION); TableDescriptor td = - TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(REGION_REPLICATION) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(REGION_REPLICATION) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); SPLIT_KEYS = new byte[9][]; for (int i = 0; i < 9; i++) { SPLIT_KEYS[i] = Bytes.toBytes(Integer.toString(i + 1)); @@ -59,7 +59,7 @@ protected static void startClusterAndCreateTable() throws Exception { UTIL.getAdmin().createTable(td, SPLIT_KEYS); UTIL.waitTableAvailable(TABLE_NAME); try (ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(UTIL.getConfiguration())) { + ConnectionRegistryFactory.getRegistry(UTIL.getConfiguration())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry); } UTIL.getAdmin().balancerSwitch(false, true); @@ -114,10 +114,10 @@ private void assertRegionLocation(HRegionLocation loc, int index, int replicaId) private ServerName findRegionLocation(TableName tableName, byte[] startKey, int replicaId) { return UTIL.getMiniHBaseCluster().getRegionServerThreads().stream() - .map(t -> t.getRegionServer()) - .filter(rs -> rs.getRegions(tableName).stream().map(Region::getRegionInfo) - .anyMatch(r -> r.containsRow(startKey) && r.getReplicaId() == replicaId)) - .findFirst().get().getServerName(); + .map(t -> t.getRegionServer()) + .filter(rs -> rs.getRegions(tableName).stream().map(Region::getRegionInfo) + .anyMatch(r -> r.containsRow(startKey) && r.getReplicaId() == replicaId)) + .findFirst().get().getServerName(); } @Test @@ -170,7 +170,7 @@ private void assertMetaRegionLocation(HRegionLocation loc, int replicaId) { assertArrayEquals(HConstants.EMPTY_END_ROW, region.getEndKey()); assertEquals(replicaId, region.getReplicaId()); ServerName expected = - findRegionLocation(TableName.META_TABLE_NAME, region.getStartKey(), replicaId); + findRegionLocation(TableName.META_TABLE_NAME, region.getStartKey(), replicaId); assertEquals(expected, loc.getServerName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestResultScannerCursor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestResultScannerCursor.java index 3df7a7b53c10..84bf0acfdcb3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestResultScannerCursor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestResultScannerCursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; - import org.junit.Test; public abstract class AbstractTestResultScannerCursor extends AbstractTestScanCursor { @@ -37,8 +36,7 @@ public void testHeartbeatWithSparseFilter() throws Exception { while ((r = scanner.next()) != null) { if (num < (NUM_ROWS - 1) * NUM_FAMILIES * NUM_QUALIFIERS) { assertTrue(r.isCursor()); - assertArrayEquals(ROWS[num / NUM_FAMILIES / NUM_QUALIFIERS], - r.getCursor().getRow()); + assertArrayEquals(ROWS[num / NUM_FAMILIES / NUM_QUALIFIERS], r.getCursor().getRow()); } else { assertFalse(r.isCursor()); assertArrayEquals(ROWS[num / NUM_FAMILIES / NUM_QUALIFIERS], r.getRow()); @@ -76,8 +74,7 @@ public void testSizeLimit() throws IOException { while ((r = scanner.next()) != null) { if (num % (NUM_FAMILIES * NUM_QUALIFIERS) != (NUM_FAMILIES * NUM_QUALIFIERS) - 1) { assertTrue(r.isCursor()); - assertArrayEquals(ROWS[num / NUM_FAMILIES / NUM_QUALIFIERS], - r.getCursor().getRow()); + assertArrayEquals(ROWS[num / NUM_FAMILIES / NUM_QUALIFIERS], r.getCursor().getRow()); } else { assertFalse(r.isCursor()); assertArrayEquals(ROWS[num / NUM_FAMILIES / NUM_QUALIFIERS], r.getRow()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestScanCursor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestScanCursor.java index e8d51a55743f..cc0c5d74ab3a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestScanCursor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestScanCursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestUpdateConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestUpdateConfiguration.java index ff84bfc83926..04884d366651 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestUpdateConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestUpdateConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,21 +26,20 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; /** - * Base class to test Configuration Update logic. It wraps up things needed to - * test configuration change and provides utility methods for test cluster setup, - * updating/restoring configuration file. + * Base class to test Configuration Update logic. It wraps up things needed to test configuration + * change and provides utility methods for test cluster setup, updating/restoring configuration + * file. */ public abstract class AbstractTestUpdateConfiguration { private static final String SERVER_CONFIG = "hbase-site.xml"; - private static final String OVERRIDE_SERVER_CONFIG = "override-hbase-site.xml"; - private static final String BACKUP_SERVER_CONFIG = "backup-hbase-site.xml"; + private static final String OVERRIDE_SERVER_CONFIG = "override-hbase-site.xml"; + private static final String BACKUP_SERVER_CONFIG = "backup-hbase-site.xml"; private static Path configFileUnderTestDataDir; private static Path overrideConfigFileUnderTestDataDir; private static Path backupConfigFileUnderTestDataDir; - protected static void setUpConfigurationFiles(final HBaseTestingUtil testUtil) - throws Exception { + protected static void setUpConfigurationFiles(final HBaseTestingUtil testUtil) throws Exception { // Before this change, the test will update hbase-site.xml under target/test-classes and // trigger a config reload. Since target/test-classes/hbase-site.xml is being used by // other testing cases at the same time, this update will break other testing cases so it will @@ -63,8 +62,7 @@ protected static void setUpConfigurationFiles(final HBaseTestingUtil testUtil) // Copy override config file overrider-hbase-site.xml from target/test-class to // target/test-data/UUID directory. - Path overrideConfigFile = Paths.get("target", "test-classes", - OVERRIDE_SERVER_CONFIG); + Path overrideConfigFile = Paths.get("target", "test-classes", OVERRIDE_SERVER_CONFIG); overrideConfigFileUnderTestDataDir = Paths.get(absoluteDataPath, OVERRIDE_SERVER_CONFIG); Files.copy(overrideConfigFile, overrideConfigFileUnderTestDataDir); @@ -81,8 +79,8 @@ protected static void addResourceToRegionServerConfiguration(final HBaseTestingU // Exposing a new method in HBaseConfiguration causes confusion. Instead, the new hbase-site.xml // under test-data directory is added to RegionServer's configuration as a workaround. for (RegionServerThread rsThread : testUtil.getMiniHBaseCluster().getRegionServerThreads()) { - rsThread.getRegionServer().getConfiguration().addResource( - testUtil.getDataTestDir(SERVER_CONFIG)); + rsThread.getRegionServer().getConfiguration() + .addResource(testUtil.getDataTestDir(SERVER_CONFIG)); } } @@ -90,27 +88,25 @@ protected static void addResourceToRegionServerConfiguration(final HBaseTestingU * Replace the hbase-site.xml file under this test's data directory with the content of the * override-hbase-site.xml file. Stashes the current existing file so that it can be restored * using {@link #restoreHBaseSiteXML()}. - * * @throws IOException if an I/O error occurs */ protected void replaceHBaseSiteXML() throws IOException { // make a backup of hbase-site.xml - Files.copy(configFileUnderTestDataDir, - backupConfigFileUnderTestDataDir, StandardCopyOption.REPLACE_EXISTING); + Files.copy(configFileUnderTestDataDir, backupConfigFileUnderTestDataDir, + StandardCopyOption.REPLACE_EXISTING); // update hbase-site.xml by overwriting it - Files.copy(overrideConfigFileUnderTestDataDir, - configFileUnderTestDataDir, StandardCopyOption.REPLACE_EXISTING); + Files.copy(overrideConfigFileUnderTestDataDir, configFileUnderTestDataDir, + StandardCopyOption.REPLACE_EXISTING); } /** * Restores the hbase-site.xml file that was stashed by a previous call to * {@link #replaceHBaseSiteXML()}. - * * @throws IOException if an I/O error occurs */ protected void restoreHBaseSiteXML() throws IOException { // restore hbase-site.xml - Files.copy(backupConfigFileUnderTestDataDir, - configFileUnderTestDataDir, StandardCopyOption.REPLACE_EXISTING); + Files.copy(backupConfigFileUnderTestDataDir, configFileUnderTestDataDir, + StandardCopyOption.REPLACE_EXISTING); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/BufferingScanResultConsumer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/BufferingScanResultConsumer.java index 4606ebc0790c..d3ef0758f459 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/BufferingScanResultConsumer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/BufferingScanResultConsumer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hbase.thirdparty.com.google.common.base.Throwables; - import java.io.IOException; import java.util.ArrayDeque; import java.util.Queue; - import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hbase.thirdparty.com.google.common.base.Throwables; + /** * A scan result consumer which buffers all the data in memory and you can call the {@link #take()} * method below to get the result one by one. Should only be used by tests, do not write production diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ClientPushbackTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ClientPushbackTestBase.java index c818d6f80d97..353f747113bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ClientPushbackTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ClientPushbackTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -129,8 +129,8 @@ public void testClientTracksServerPushback() throws Exception { // check that the load reported produces a nonzero delay long backoffTime = backoffPolicy.getBackoffTime(server, regionName, serverStats); assertNotEquals("Reported load does not produce a backoff", 0, backoffTime); - LOG.debug("Backoff calculated for " + region.getRegionInfo().getRegionNameAsString() + " @ " + - server + " is " + backoffTime); + LOG.debug("Backoff calculated for " + region.getRegionInfo().getRegionNameAsString() + " @ " + + server + " is " + backoffTime); CountDownLatch latch = new CountDownLatch(1); AtomicLong endTime = new AtomicLong(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java index 0340bdc57cfc..0b9a6117a55c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,26 +54,26 @@ public void testCloneSnapshotAfterSplittingRegion() throws IOException, Interrup // Clone the snapshot to another table TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName2, clonedTableName); SnapshotTestingUtils.waitForTableToBeOnline(TEST_UTIL, clonedTableName); verifyRowCount(TEST_UTIL, clonedTableName, snapshot1Rows); RegionStates regionStates = - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); + TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); // The region count of the cloned table should be the same as the one of the original table int openRegionCountOfOriginalTable = - regionStates.getRegionByStateOfTable(tableName).get(RegionState.State.OPEN).size(); + regionStates.getRegionByStateOfTable(tableName).get(RegionState.State.OPEN).size(); int openRegionCountOfClonedTable = - regionStates.getRegionByStateOfTable(clonedTableName).get(RegionState.State.OPEN).size(); + regionStates.getRegionByStateOfTable(clonedTableName).get(RegionState.State.OPEN).size(); assertEquals(openRegionCountOfOriginalTable, openRegionCountOfClonedTable); int splitRegionCountOfOriginalTable = - regionStates.getRegionByStateOfTable(tableName).get(RegionState.State.SPLIT).size(); + regionStates.getRegionByStateOfTable(tableName).get(RegionState.State.SPLIT).size(); int splitRegionCountOfClonedTable = - regionStates.getRegionByStateOfTable(clonedTableName).get(RegionState.State.SPLIT).size(); + regionStates.getRegionByStateOfTable(clonedTableName).get(RegionState.State.SPLIT).size(); assertEquals(splitRegionCountOfOriginalTable, splitRegionCountOfClonedTable); TEST_UTIL.deleteTable(clonedTableName); @@ -84,7 +84,7 @@ public void testCloneSnapshotAfterSplittingRegion() throws IOException, Interrup @Test public void testCloneSnapshotBeforeSplittingRegionAndDroppingTable() - throws IOException, InterruptedException { + throws IOException, InterruptedException { // Turn off the CatalogJanitor admin.catalogJanitorSwitch(false); @@ -94,7 +94,7 @@ public void testCloneSnapshotBeforeSplittingRegionAndDroppingTable() // Clone the snapshot to another table TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName2, clonedTableName); SnapshotTestingUtils.waitForTableToBeOnline(TEST_UTIL, clonedTableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java index 1d7e67c6f285..24f2101fbeff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ public class CloneSnapshotFromClientCloneLinksAfterDeleteTestBase public void testCloneLinksAfterDelete() throws IOException, InterruptedException { // Clone a table from the first snapshot final TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "1-" + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(getValidMethodName() + "1-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName0, clonedTableName); verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows); @@ -42,7 +42,7 @@ public void testCloneLinksAfterDelete() throws IOException, InterruptedException // Clone the snapshot of the cloned table final TableName clonedTableName2 = - TableName.valueOf(getValidMethodName() + "2-" + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(getValidMethodName() + "2-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName2, clonedTableName2); verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows); admin.disableTable(clonedTableName2); @@ -70,7 +70,7 @@ public void testCloneLinksAfterDelete() throws IOException, InterruptedException // Clone a new table from cloned final TableName clonedTableName3 = - TableName.valueOf(getValidMethodName() + "3-" + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(getValidMethodName() + "3-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName2, clonedTableName3); verifyRowCount(TEST_UTIL, clonedTableName3, snapshot0Rows); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java index d660fff51d01..5936fc4b7986 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class CloneSnapshotFromClientErrorTestBase extends CloneSnapshotFromClien public void testCloneNonExistentSnapshot() throws IOException, InterruptedException { String snapshotName = "random-snapshot-" + EnvironmentEdgeManager.currentTime(); final TableName tableName = - TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName, tableName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java index cf25d663e84f..e455d5c9903b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ public class CloneSnapshotFromClientNormalTestBase extends CloneSnapshotFromClie @Test public void testCloneSnapshot() throws IOException, InterruptedException { TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows); testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows); testCloneSnapshot(clonedTableName, emptySnapshot, 0); @@ -53,8 +53,8 @@ private void verifyReplicasCameOnline(TableName tableName) throws IOException { public void testCloneSnapshotCrossNamespace() throws IOException, InterruptedException { String nsName = getValidMethodName() + "_ns_" + EnvironmentEdgeManager.currentTime(); admin.createNamespace(NamespaceDescriptor.create(nsName).build()); - final TableName clonedTableName = - TableName.valueOf(nsName, getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); + final TableName clonedTableName = TableName.valueOf(nsName, + getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows); testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows); testCloneSnapshot(clonedTableName, emptySnapshot, 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java index fa44c5c35ec4..2867bf21ce17 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ColumnCountOnRowFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ColumnCountOnRowFilter.java index 45355f099c7f..9147f54d20e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ColumnCountOnRowFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ColumnCountOnRowFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +19,11 @@ import java.io.IOException; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public final class ColumnCountOnRowFilter extends FilterBase { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java index ef3511ca6fb9..9838a21a7714 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -138,8 +138,8 @@ public CompletableFuture prepareBulkLoad(TableName tableName) { @Override public CompletableFuture bulkLoad(TableName tableName, - List> familyPaths, byte[] row, boolean assignSeqNum, Token userToken, - String bulkToken, boolean copyFiles, List clusterIds, boolean replicate) { + List> familyPaths, byte[] row, boolean assignSeqNum, Token userToken, + String bulkToken, boolean copyFiles, List clusterIds, boolean replicate) { return null; } @@ -155,7 +155,7 @@ public Connection toConnection() { @Override public CompletableFuture> - getLiveRegionServers(MasterAddressTracker masterAddrTracker, int count) { + getLiveRegionServers(MasterAddressTracker masterAddrTracker, int count) { return null; } @@ -165,9 +165,8 @@ public CompletableFuture> getAllBootstrapNodes(ServerName regio } @Override - public CompletableFuture replicate(RegionInfo replica, - List entries, int numRetries, long rpcTimeoutNs, - long operationTimeoutNs) { + public CompletableFuture replicate(RegionInfo replica, List entries, int numRetries, + long rpcTimeoutNs, long operationTimeoutNs) { return null; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncTable.java index d3390bf60f81..46377237b99f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -118,8 +118,8 @@ public CompletableFuture checkAndMutate(CheckAndMutate che } @Override - public List> checkAndMutate( - List checkAndMutates) { + public List> + checkAndMutate(List checkAndMutates) { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideBase.java index 21fa57a56615..55b95218e3f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideBase.java @@ -58,35 +58,33 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Base for TestFromClientSide* classes. - * Has common defines and utility used by all. + * Base for TestFromClientSide* classes. Has common defines and utility used by all. */ -@Category({LargeTests.class, ClientTests.class}) -@SuppressWarnings ("deprecation") +@Category({ LargeTests.class, ClientTests.class }) +@SuppressWarnings("deprecation") @RunWith(Parameterized.class) class FromClientSideBase { private static final Logger LOG = LoggerFactory.getLogger(FromClientSideBase.class); static HBaseTestingUtil TEST_UTIL; - static byte [] ROW = Bytes.toBytes("testRow"); - static byte [] FAMILY = Bytes.toBytes("testFamily"); + static byte[] ROW = Bytes.toBytes("testRow"); + static byte[] FAMILY = Bytes.toBytes("testFamily"); static final byte[] INVALID_FAMILY = Bytes.toBytes("invalidTestFamily"); - static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); - static byte [] VALUE = Bytes.toBytes("testValue"); + static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + static byte[] VALUE = Bytes.toBytes("testValue"); static int SLAVES = 1; // To keep the child classes happy. - FromClientSideBase() {} + FromClientSideBase() { + } /** * JUnit does not provide an easy way to run a hook after each parameterized run. Without that * there is no easy way to restart the test cluster after each parameterized run. Annotation * BeforeParam does not work either because it runs before parameterization and hence does not - * have access to the test parameters (which is weird). - * - * This *hack* checks if the current instance of test cluster configuration has the passed - * parameterized configs. In such a case, we can just reuse the cluster for test and do not need - * to initialize from scratch. While this is a hack, it saves a ton of time for the full - * test and de-flakes it. + * have access to the test parameters (which is weird). This *hack* checks if the current instance + * of test cluster configuration has the passed parameterized configs. In such a case, we can just + * reuse the cluster for test and do not need to initialize from scratch. While this is a hack, it + * saves a ton of time for the full test and de-flakes it. */ protected static boolean isSameParameterizedCluster(Class registryImpl, int numHedgedReqs) { if (TEST_UTIL == null) { @@ -101,7 +99,7 @@ protected static boolean isSameParameterizedCluster(Class registryImpl, int n } protected static final void initialize(Class registryImpl, int numHedgedReqs, Class... cps) - throws Exception { + throws Exception { // initialize() is called for every unit test, however we only want to reset the cluster state // at the end of every parameterized run. if (isSameParameterizedCluster(registryImpl, numHedgedReqs)) { @@ -124,7 +122,7 @@ protected static final void initialize(Class registryImpl, int numHedgedReqs, Arrays.stream(cps).map(Class::getName).toArray(String[]::new)); conf.setBoolean(TableDescriptorChecker.TABLE_SANITY_CHECKS, true); // enable for below tests conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, registryImpl, - ConnectionRegistry.class); + ConnectionRegistry.class); Preconditions.checkArgument(numHedgedReqs > 0); conf.setInt(MasterRegistry.MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY, numHedgedReqs); StartTestingClusterOption.Builder builder = StartTestingClusterOption.builder(); @@ -139,8 +137,7 @@ protected static void afterClass() throws Exception { } } - protected void deleteColumns(Table ht, String value, String keyPrefix) - throws IOException { + protected void deleteColumns(Table ht, String value, String keyPrefix) throws IOException { ResultScanner scanner = buildScanner(keyPrefix, value, ht); Iterator it = scanner.iterator(); int count = 0; @@ -154,8 +151,7 @@ protected void deleteColumns(Table ht, String value, String keyPrefix) assertEquals("Did not perform correct number of deletes", 3, count); } - protected int getNumberOfRows(String keyPrefix, String value, Table ht) - throws Exception { + protected int getNumberOfRows(String keyPrefix, String value, Table ht) throws Exception { ResultScanner resultScanner = buildScanner(keyPrefix, value, ht); Iterator scanner = resultScanner.iterator(); int numberOfResults = 0; @@ -163,8 +159,7 @@ protected int getNumberOfRows(String keyPrefix, String value, Table ht) Result result = scanner.next(); System.out.println("Got back key: " + Bytes.toString(result.getRow())); for (Cell kv : result.rawCells()) { - System.out.println("kv=" + kv.toString() + ", " - + Bytes.toString(CellUtil.cloneValue(kv))); + System.out.println("kv=" + kv.toString() + ", " + Bytes.toString(CellUtil.cloneValue(kv))); } numberOfResults++; } @@ -172,13 +167,12 @@ protected int getNumberOfRows(String keyPrefix, String value, Table ht) } protected ResultScanner buildScanner(String keyPrefix, String value, Table ht) - throws IOException { + throws IOException { // OurFilterList allFilters = new OurFilterList(); FilterList allFilters = new FilterList(/* FilterList.Operator.MUST_PASS_ALL */); allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix))); - SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes - .toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOperator.EQUAL, Bytes - .toBytes(value)); + SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes.toBytes("trans-tags"), + Bytes.toBytes("qual2"), CompareOperator.EQUAL, Bytes.toBytes(value)); filter.setFilterIfMissing(true); allFilters.addFilter(filter); @@ -197,23 +191,17 @@ protected ResultScanner buildScanner(String keyPrefix, String value, Table ht) return ht.getScanner(scan); } - protected void putRows(Table ht, int numRows, String value, String key) - throws IOException { + protected void putRows(Table ht, int numRows, String value, String key) throws IOException { for (int i = 0; i < numRows; i++) { String row = key + "_" + HBaseCommonTestingUtil.getRandomUUID().toString(); - System.out.println(String.format("Saving row: %s, with value %s", row, - value)); + System.out.println(String.format("Saving row: %s, with value %s", row, value)); Put put = new Put(Bytes.toBytes(row)); put.setDurability(Durability.SKIP_WAL); - put.addColumn(Bytes.toBytes("trans-blob"), null, Bytes - .toBytes("value for blob")); + put.addColumn(Bytes.toBytes("trans-blob"), null, Bytes.toBytes("value for blob")); put.addColumn(Bytes.toBytes("trans-type"), null, Bytes.toBytes("statement")); - put.addColumn(Bytes.toBytes("trans-date"), null, Bytes - .toBytes("20090921010101999")); - put.addColumn(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"), Bytes - .toBytes(value)); - put.addColumn(Bytes.toBytes("trans-group"), null, Bytes - .toBytes("adhocTransactionGroupId")); + put.addColumn(Bytes.toBytes("trans-date"), null, Bytes.toBytes("20090921010101999")); + put.addColumn(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"), Bytes.toBytes(value)); + put.addColumn(Bytes.toBytes("trans-group"), null, Bytes.toBytes("adhocTransactionGroupId")); ht.put(put); } } @@ -226,7 +214,7 @@ protected void assertRowCount(final Table t, final int expected) throws IOExcept * @param key * @return Scan with RowFilter that does LESS than passed key. */ - protected Scan createScanWithRowFilter(final byte [] key) { + protected Scan createScanWithRowFilter(final byte[] key) { return createScanWithRowFilter(key, null, CompareOperator.LESS); } @@ -236,13 +224,13 @@ protected Scan createScanWithRowFilter(final byte [] key) { * @param startRow * @return Scan with RowFilter that does CompareOp op on passed key. */ - protected Scan createScanWithRowFilter(final byte [] key, - final byte [] startRow, CompareOperator op) { + protected Scan createScanWithRowFilter(final byte[] key, final byte[] startRow, + CompareOperator op) { // Make sure key is of some substance... non-null and > than first key. - assertTrue(key != null && key.length > 0 && - Bytes.BYTES_COMPARATOR.compare(key, new byte [] {'a', 'a', 'a'}) >= 0); + assertTrue(key != null && key.length > 0 + && Bytes.BYTES_COMPARATOR.compare(key, new byte[] { 'a', 'a', 'a' }) >= 0); LOG.info("Key=" + Bytes.toString(key)); - Scan s = startRow == null? new Scan(): new Scan().withStartRow(startRow); + Scan s = startRow == null ? new Scan() : new Scan().withStartRow(startRow); Filter f = new RowFilter(op, new BinaryComparator(key)); f = new WhileMatchFilter(f); s.setFilter(f); @@ -266,8 +254,8 @@ protected List splitTable(final Table t) throws IOException { } /* - * Wait on table split. May return because we waited long enough on the split - * and it didn't happen. Caller should check. + * Wait on table split. May return because we waited long enough on the split and it didn't + * happen. Caller should check. * @param t * @return Map of table regions; caller needs to check table actually split. */ @@ -297,48 +285,48 @@ protected Result getSingleScanResult(Table ht, Scan scan) throws IOException { return result; } - byte [][] makeNAscii(byte [] base, int n) { - if(n > 256) { + byte[][] makeNAscii(byte[] base, int n) { + if (n > 256) { return makeNBig(base, n); } - byte [][] ret = new byte[n][]; - for(int i=0;i 256) { return makeNBig(base, n); } - byte [][] ret = new byte[n][]; - for(int i=0;i> 8); - ret[i] = Bytes.add(base, new byte[]{(byte)byteB,(byte)byteA}); + ret[i] = Bytes.add(base, new byte[] { (byte) byteB, (byte) byteA }); } return ret; } - protected long [] makeStamps(int n) { - long [] stamps = new long[n]; + protected long[] makeStamps(int n) { + long[] stamps = new long[n]; for (int i = 0; i < n; i++) { - stamps[i] = i+1L; + stamps[i] = i + 1L; } return stamps; } - protected static boolean equals(byte [] left, byte [] right) { + protected static boolean equals(byte[] left, byte[] right) { if (left == null && right == null) { return true; } @@ -351,226 +339,240 @@ protected static boolean equals(byte [] left, byte [] right) { return Bytes.equals(left, right); } - protected void assertKey(Cell key, byte [] row, byte [] family, byte [] qualifier, - byte [] value) { - assertTrue("Expected row [" + Bytes.toString(row) + "] " + - "Got row [" + Bytes.toString(CellUtil.cloneRow(key)) +"]", + protected void assertKey(Cell key, byte[] row, byte[] family, byte[] qualifier, byte[] value) { + assertTrue("Expected row [" + Bytes.toString(row) + "] " + "Got row [" + + Bytes.toString(CellUtil.cloneRow(key)) + "]", equals(row, CellUtil.cloneRow(key))); - assertTrue("Expected family [" + Bytes.toString(family) + "] " + - "Got family [" + Bytes.toString(CellUtil.cloneFamily(key)) + "]", + assertTrue( + "Expected family [" + Bytes.toString(family) + "] " + "Got family [" + + Bytes.toString(CellUtil.cloneFamily(key)) + "]", equals(family, CellUtil.cloneFamily(key))); - assertTrue("Expected qualifier [" + Bytes.toString(qualifier) + "] " + - "Got qualifier [" + Bytes.toString(CellUtil.cloneQualifier(key)) + "]", + assertTrue( + "Expected qualifier [" + Bytes.toString(qualifier) + "] " + "Got qualifier [" + + Bytes.toString(CellUtil.cloneQualifier(key)) + "]", equals(qualifier, CellUtil.cloneQualifier(key))); - assertTrue("Expected value [" + Bytes.toString(value) + "] " + - "Got value [" + Bytes.toString(CellUtil.cloneValue(key)) + "]", + assertTrue( + "Expected value [" + Bytes.toString(value) + "] " + "Got value [" + + Bytes.toString(CellUtil.cloneValue(key)) + "]", equals(value, CellUtil.cloneValue(key))); } - static void assertIncrementKey(Cell key, byte [] row, byte [] family, - byte [] qualifier, long value) { - assertTrue("Expected row [" + Bytes.toString(row) + "] " + - "Got row [" + Bytes.toString(CellUtil.cloneRow(key)) +"]", + static void assertIncrementKey(Cell key, byte[] row, byte[] family, byte[] qualifier, + long value) { + assertTrue("Expected row [" + Bytes.toString(row) + "] " + "Got row [" + + Bytes.toString(CellUtil.cloneRow(key)) + "]", equals(row, CellUtil.cloneRow(key))); - assertTrue("Expected family [" + Bytes.toString(family) + "] " + - "Got family [" + Bytes.toString(CellUtil.cloneFamily(key)) + "]", + assertTrue( + "Expected family [" + Bytes.toString(family) + "] " + "Got family [" + + Bytes.toString(CellUtil.cloneFamily(key)) + "]", equals(family, CellUtil.cloneFamily(key))); - assertTrue("Expected qualifier [" + Bytes.toString(qualifier) + "] " + - "Got qualifier [" + Bytes.toString(CellUtil.cloneQualifier(key)) + "]", + assertTrue( + "Expected qualifier [" + Bytes.toString(qualifier) + "] " + "Got qualifier [" + + Bytes.toString(CellUtil.cloneQualifier(key)) + "]", equals(qualifier, CellUtil.cloneQualifier(key))); - assertEquals( - "Expected value [" + value + "] " + "Got value [" + Bytes.toLong(CellUtil.cloneValue(key)) - + "]", Bytes.toLong(CellUtil.cloneValue(key)), value); + assertEquals("Expected value [" + value + "] " + "Got value [" + + Bytes.toLong(CellUtil.cloneValue(key)) + "]", + Bytes.toLong(CellUtil.cloneValue(key)), value); } protected void assertNumKeys(Result result, int n) throws Exception { assertEquals("Expected " + n + " keys but got " + result.size(), result.size(), n); } - protected void assertNResult(Result result, byte [] row, - byte [][] families, byte [][] qualifiers, byte [][] values, int [][] idxs) { - assertTrue("Expected row [" + Bytes.toString(row) + "] " + - "Got row [" + Bytes.toString(result.getRow()) +"]", + protected void assertNResult(Result result, byte[] row, byte[][] families, byte[][] qualifiers, + byte[][] values, int[][] idxs) { + assertTrue("Expected row [" + Bytes.toString(row) + "] " + "Got row [" + + Bytes.toString(result.getRow()) + "]", equals(row, result.getRow())); assertEquals("Expected " + idxs.length + " keys but result contains " + result.size(), result.size(), idxs.length); - Cell [] keys = result.rawCells(); + Cell[] keys = result.rawCells(); - for(int i=0;iconf - * configuration instance. Minimally the mock will return <code>conf</conf> when - * {@link Connection#getConfiguration()} is invoked. Be sure to shutdown the - * connection when done by calling {@link Connection#close()} else it will stick around; this is - * probably not what you want. + * Get a Mocked {@link Connection} that goes with the passed conf configuration + * instance. Minimally the mock will return <code>conf</conf> when + * {@link Connection#getConfiguration()} is invoked. Be sure to shutdown the connection when done + * by calling {@link Connection#close()} else it will stick around; this is probably not what you + * want. * @param conf configuration * @return ConnectionImplementation object for conf */ - public static Connection getMockedConnection(final Configuration conf) - throws IOException { + public static Connection getMockedConnection(final Configuration conf) throws IOException { Connection connection = Mockito.mock(Connection.class); Mockito.when(connection.getConfiguration()).thenReturn(conf); - // Some test cases need Mock of getTable and getScanner + // Some test cases need Mock of getTable and getScanner Table t = Mockito.mock(Table.class); Mockito.when(connection.getTable(Mockito.any())).thenReturn(t); ResultScanner rs = Mockito.mock(ResultScanner.class); - Mockito.when(t.getScanner((Scan)Mockito.any())).thenReturn(rs); + Mockito.when(t.getScanner((Scan) Mockito.any())).thenReturn(rs); return connection; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java index 14de74233b58..b6a09f8b0aa1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,21 +56,21 @@ public class MetaWithReplicasTestBase { protected static void startCluster() throws Exception { TEST_UTIL.getConfiguration().setInt("zookeeper.session.timeout", 30000); TEST_UTIL.getConfiguration() - .setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); + .setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); StartTestingClusterOption option = StartTestingClusterOption.builder() - .numAlwaysStandByMasters(1).numMasters(1).numRegionServers(REGIONSERVERS_COUNT).build(); + .numAlwaysStandByMasters(1).numMasters(1).numRegionServers(REGIONSERVERS_COUNT).build(); TEST_UTIL.startMiniCluster(option); Admin admin = TEST_UTIL.getAdmin(); HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3); AssignmentManager am = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); Set sns = new HashSet(); ServerName hbaseMetaServerName = am.getRegionStates() - .getRegionStateNode(RegionInfoBuilder.FIRST_META_REGIONINFO).getRegionLocation(); + .getRegionStateNode(RegionInfoBuilder.FIRST_META_REGIONINFO).getRegionLocation(); LOG.info("HBASE:META DEPLOY: on " + hbaseMetaServerName); sns.add(hbaseMetaServerName); for (int replicaId = 1; replicaId < 3; replicaId++) { RegionInfo h = RegionReplicaUtil - .getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId); + .getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId); AssignmentTestingUtil.waitForAssignment(am, h); ServerName sn = am.getRegionStates().getRegionServerOfRegion(h); assertNotNull(sn); @@ -90,28 +90,28 @@ protected static void startCluster() throws Exception { } assertNotEquals(metaServerIndex, newServerIndex); ServerName destinationServerName = - TEST_UTIL.getHBaseCluster().getRegionServer(newServerIndex).getServerName(); + TEST_UTIL.getHBaseCluster().getRegionServer(newServerIndex).getServerName(); ServerName metaServerName = - TEST_UTIL.getHBaseCluster().getRegionServer(metaServerIndex).getServerName(); + TEST_UTIL.getHBaseCluster().getRegionServer(metaServerIndex).getServerName(); assertNotEquals(destinationServerName, metaServerName); TEST_UTIL.getAdmin().move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), destinationServerName); } // Disable the balancer LoadBalancerTracker l = - new LoadBalancerTracker(TEST_UTIL.getZooKeeperWatcher(), new Abortable() { - AtomicBoolean aborted = new AtomicBoolean(false); + new LoadBalancerTracker(TEST_UTIL.getZooKeeperWatcher(), new Abortable() { + AtomicBoolean aborted = new AtomicBoolean(false); - @Override - public boolean isAborted() { - return aborted.get(); - } + @Override + public boolean isAborted() { + return aborted.get(); + } - @Override - public void abort(String why, Throwable e) { - aborted.set(true); - } - }); + @Override + public void abort(String why, Throwable e) { + aborted.set(true); + } + }); l.setBalancerOn(false); LOG.debug("All meta replicas assigned"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java index abb0c1103446..e8da619eef63 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,11 +43,11 @@ private RegionReplicaTestHelper() { } // waits for all replicas to have region location - static void waitUntilAllMetaReplicasAreReady(HBaseTestingUtil util, - ConnectionRegistry registry) throws IOException { + static void waitUntilAllMetaReplicasAreReady(HBaseTestingUtil util, ConnectionRegistry registry) + throws IOException { Configuration conf = util.getConfiguration(); int regionReplicaCount = - util.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication(); + util.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication(); Waiter.waitFor(conf, conf.getLong("hbase.client.sync.wait.timeout.msec", 60000), 200, true, new ExplainingPredicate() { @Override @@ -84,23 +84,22 @@ public boolean evaluate() { static Optional getRSCarryingReplica(HBaseTestingUtil util, TableName tableName, int replicaId) { return util.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer()) - .filter(rs -> rs.getRegions(tableName).stream() - .anyMatch(r -> r.getRegionInfo().getReplicaId() == replicaId)) - .findAny().map(rs -> rs.getServerName()); + .filter(rs -> rs.getRegions(tableName).stream() + .anyMatch(r -> r.getRegionInfo().getReplicaId() == replicaId)) + .findAny().map(rs -> rs.getServerName()); } /** * Return the new location. */ - static ServerName moveRegion(HBaseTestingUtil util, HRegionLocation currentLoc) - throws Exception { + static ServerName moveRegion(HBaseTestingUtil util, HRegionLocation currentLoc) throws Exception { ServerName serverName = currentLoc.getServerName(); RegionInfo regionInfo = currentLoc.getRegion(); TableName tableName = regionInfo.getTable(); int replicaId = regionInfo.getReplicaId(); ServerName newServerName = util.getHBaseCluster().getRegionServerThreads().stream() - .map(t -> t.getRegionServer().getServerName()).filter(sn -> !sn.equals(serverName)).findAny() - .get(); + .map(t -> t.getRegionServer().getServerName()).filter(sn -> !sn.equals(serverName)) + .findAny().get(); util.getAdmin().move(regionInfo.getEncodedNameAsBytes(), newServerName); util.waitFor(30000, new ExplainingPredicate() { @@ -128,7 +127,7 @@ RegionLocations getRegionLocations(TableName tableName, int replicaId, boolean r static void testLocator(HBaseTestingUtil util, TableName tableName, Locator locator) throws Exception { RegionLocations locs = - locator.getRegionLocations(tableName, RegionReplicaUtil.DEFAULT_REPLICA_ID, false); + locator.getRegionLocations(tableName, RegionReplicaUtil.DEFAULT_REPLICA_ID, false); assertEquals(3, locs.size()); for (int i = 0; i < 3; i++) { HRegionLocation loc = locs.getRegionLocation(i); @@ -140,14 +139,15 @@ static void testLocator(HBaseTestingUtil util, TableName tableName, Locator loca // The cached location should not be changed assertEquals(locs.getDefaultRegionLocation().getServerName(), locator.getRegionLocations(tableName, RegionReplicaUtil.DEFAULT_REPLICA_ID, false) - .getDefaultRegionLocation().getServerName()); + .getDefaultRegionLocation().getServerName()); // should get the new location when reload = true // when meta replica LoadBalance mode is enabled, it may delay a bit. util.waitFor(3000, new ExplainingPredicate() { @Override public boolean evaluate() throws Exception { - ServerName sn = locator.getRegionLocations(tableName, RegionReplicaUtil.DEFAULT_REPLICA_ID, - true).getDefaultRegionLocation().getServerName(); + ServerName sn = + locator.getRegionLocations(tableName, RegionReplicaUtil.DEFAULT_REPLICA_ID, true) + .getDefaultRegionLocation().getServerName(); return newServerName.equals(sn); } @@ -159,11 +159,11 @@ public String explainFailure() throws Exception { assertEquals(newServerName, locator.getRegionLocations(tableName, RegionReplicaUtil.DEFAULT_REPLICA_ID, true) - .getDefaultRegionLocation().getServerName()); + .getDefaultRegionLocation().getServerName()); // the cached location should be replaced assertEquals(newServerName, locator.getRegionLocations(tableName, RegionReplicaUtil.DEFAULT_REPLICA_ID, false) - .getDefaultRegionLocation().getServerName()); + .getDefaultRegionLocation().getServerName()); ServerName newServerName1 = moveRegion(util, locs.getRegionLocation(1)); ServerName newServerName2 = moveRegion(util, locs.getRegionLocation(2)); @@ -185,14 +185,13 @@ public String explainFailure() throws Exception { locator.getRegionLocations(tableName, 2, false).getRegionLocation(2).getServerName()); } - public static void assertReplicaDistributed(HBaseTestingUtil util, Table t) - throws IOException { + public static void assertReplicaDistributed(HBaseTestingUtil util, Table t) throws IOException { if (t.getDescriptor().getRegionReplication() <= 1) { return; } List regionInfos = new ArrayList<>(); for (JVMClusterUtil.RegionServerThread rs : util.getMiniHBaseCluster() - .getRegionServerThreads()) { + .getRegionServerThreads()) { regionInfos.clear(); for (Region r : rs.getRegionServer().getRegions(t.getName())) { if (contains(regionInfos, r.getRegionInfo())) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientAfterSplittingRegionsTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientAfterSplittingRegionsTestBase.java index 59f0fd41087c..1b4202904c58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientAfterSplittingRegionsTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientAfterSplittingRegionsTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientAfterTruncateTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientAfterTruncateTestBase.java index 11fd6f196c1f..052ee3828bfa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientAfterTruncateTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientAfterTruncateTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java index 101ba9cc02da..15292d21e074 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ public class RestoreSnapshotFromClientCloneTestBase extends RestoreSnapshotFromC @Test public void testCloneSnapshotOfCloned() throws IOException, InterruptedException { TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName0, clonedTableName); verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows); SnapshotTestingUtils.verifyReplicasCameOnline(clonedTableName, admin, getNumReplicas()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientGetCompactionStateTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientGetCompactionStateTestBase.java index 03c908fd0680..250057d5d7d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientGetCompactionStateTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientGetCompactionStateTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSchemaChangeTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSchemaChangeTestBase.java index c60bd1047998..18634e38b25a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSchemaChangeTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSchemaChangeTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java index 627de2a2ee13..5926b9111c86 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ public void testRestoreSnapshot() throws IOException { public void testCorruptedSnapshot() throws IOException, InterruptedException { SnapshotTestingUtils.corruptSnapshot(TEST_UTIL, snapshotName0); TableName cloneName = - TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); try { admin.cloneSnapshot(snapshotName0, cloneName); fail("Expected CorruptedSnapshotException, got succeeded cloneSnapshot()"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java index f1ad4627cc95..a549ba26e662 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ScanPerNextResultScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ScanPerNextResultScanner.java index c8665e912667..68b05a6d1806 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ScanPerNextResultScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/ScanPerNextResultScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SimpleScanResultConsumerImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SimpleScanResultConsumerImpl.java index 98941fece196..a7d7f7661ff0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SimpleScanResultConsumerImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SimpleScanResultConsumerImpl.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; + import org.apache.hbase.thirdparty.com.google.common.base.Throwables; class SimpleScanResultConsumerImpl implements SimpleScanResultConsumer { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SleepAtFirstRpcCall.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SleepAtFirstRpcCall.java index 2ba54bfd2a3a..61a53d677e58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SleepAtFirstRpcCall.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SleepAtFirstRpcCall.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java index f0e47d1d74de..5328b7c18a48 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,7 +74,7 @@ public Object run() throws Exception { Get g = new Get(TEST_ROW); g.addFamily(TEST_FAMILY); try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table t = conn.getTable(tableName)) { + Table t = conn.getTable(tableName)) { t.get(g); } return null; @@ -93,7 +93,7 @@ public Object run() throws Exception { Put p = new Put(TEST_ROW); p.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(0)); try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table t = conn.getTable(tableName)) { + Table t = conn.getTable(tableName)) { t.put(p); } return null; @@ -113,7 +113,7 @@ public static void setupBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); TEST_UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME); MasterCoprocessorHost cpHost = - TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]); @@ -128,8 +128,9 @@ public static void setupBeforeClass() throws Exception { @Before public void setUp() throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()).build(); + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()) + .build(); createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); TEST_UTIL.waitTableEnabled(TEST_TABLE); @@ -159,7 +160,8 @@ public static void tearDownAfterClass() throws Exception { private void verifyRows(TableName tableName) throws IOException { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table t = conn.getTable(tableName); ResultScanner scanner = t.getScanner(new Scan())) { + Table t = conn.getTable(tableName); + ResultScanner scanner = t.getScanner(new Scan())) { Result result; int rowCount = 0; while ((result = scanner.next()) != null) { @@ -235,16 +237,17 @@ public void testRestoreSnapshot() throws Exception { verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE); } - final class AccessSnapshotAction implements AccessTestAction { private String snapshotName; + private AccessSnapshotAction(String snapshotName) { this.snapshotName = snapshotName; } + @Override public Object run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Admin admin = conn.getAdmin()) { + Admin admin = conn.getAdmin()) { admin.snapshot(this.snapshotName, TEST_TABLE); } return null; @@ -257,13 +260,13 @@ public void testDeleteSnapshot() throws Exception { verifyAllowed(new AccessSnapshotAction(testSnapshotName), USER_OWNER); verifyDenied(new AccessSnapshotAction(HBaseCommonTestingUtil.getRandomUUID().toString()), USER_RO, USER_RW, USER_NONE); - List snapshotDescriptions = TEST_UTIL.getAdmin().listSnapshots( - Pattern.compile(testSnapshotName)); + List snapshotDescriptions = + TEST_UTIL.getAdmin().listSnapshots(Pattern.compile(testSnapshotName)); Assert.assertEquals(1, snapshotDescriptions.size()); Assert.assertEquals(USER_OWNER.getShortName(), snapshotDescriptions.get(0).getOwner()); AccessTestAction deleteSnapshotAction = () -> { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Admin admin = conn.getAdmin()) { + Admin admin = conn.getAdmin()) { admin.deleteSnapshot(testSnapshotName); } return null; @@ -271,8 +274,8 @@ public void testDeleteSnapshot() throws Exception { verifyDenied(deleteSnapshotAction, USER_RO, USER_RW, USER_NONE); verifyAllowed(deleteSnapshotAction, USER_OWNER); - List snapshotsAfterDelete = TEST_UTIL.getAdmin().listSnapshots( - Pattern.compile(testSnapshotName)); + List snapshotsAfterDelete = + TEST_UTIL.getAdmin().listSnapshots(Pattern.compile(testSnapshotName)); Assert.assertEquals(0, snapshotsAfterDelete.size()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index ba843fe43460..f6d910595240 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,14 +59,12 @@ public class TestAdmin extends TestAdminBase { private static final Logger LOG = LoggerFactory.getLogger(TestAdmin.class); @Test - public void testListTableDescriptors() throws IOException{ - TableDescriptor metaTableDescriptor = TEST_UTIL.getAdmin(). - getDescriptor(TableName.META_TABLE_NAME); - List tableDescriptors = TEST_UTIL.getAdmin(). - listTableDescriptors(true); + public void testListTableDescriptors() throws IOException { + TableDescriptor metaTableDescriptor = + TEST_UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME); + List tableDescriptors = TEST_UTIL.getAdmin().listTableDescriptors(true); assertTrue(tableDescriptors.contains(metaTableDescriptor)); - tableDescriptors = TEST_UTIL.getAdmin(). - listTableDescriptors(false); + tableDescriptors = TEST_UTIL.getAdmin().listTableDescriptors(false); assertFalse(tableDescriptors.contains(metaTableDescriptor)); } @@ -79,7 +77,7 @@ public void testCreateTable() throws IOException { tables = ADMIN.listTableDescriptors(); assertEquals(numTables + 1, tables.size()); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster() - .getTableStateManager().isTableState(tableName, TableState.State.ENABLED)); + .getTableStateManager().isTableState(tableName, TableState.State.ENABLED)); assertEquals(TableState.State.ENABLED, getStateFromMeta(tableName)); } @@ -175,8 +173,8 @@ public void testCreateTableWithRegions() throws IOException, InterruptedExceptio TableName table = TableName.valueOf(name.getMethodName()); ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY); byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, - new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, - new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 }, }; + new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, + new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 }, }; int expectedRegions = splitKeys.length + 1; ADMIN.createTable(TableDescriptorBuilder.newBuilder(table).setColumnFamily(cfd).build(), @@ -311,7 +309,7 @@ public void testCreateTableWithRegions() throws IOException, InterruptedExceptio // Try an invalid case where there are duplicate split keys splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, - new byte[] { 3, 3, 3 }, new byte[] { 2, 2, 2 } }; + new byte[] { 3, 3, 3 }, new byte[] { 2, 2, 2 } }; TableName table4 = TableName.valueOf(table.getNameAsString() + "_4"); try { @@ -330,7 +328,7 @@ public void testCreateTableWithOnlyEmptyStartRow() throws IOException { byte[][] splitKeys = new byte[1][]; splitKeys[0] = HConstants.EMPTY_BYTE_ARRAY; TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("col")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("col")).build(); try { ADMIN.createTable(desc, splitKeys); fail("Test case should fail as empty split key is passed."); @@ -346,7 +344,7 @@ public void testCreateTableWithEmptyRowInTheSplitKeys() throws IOException { splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY; splitKeys[2] = Bytes.toBytes("region2"); TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("col")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("col")).build(); try { ADMIN.createTable(desc, splitKeys); fail("Test case should fail as empty split key is passed."); @@ -407,10 +405,10 @@ private void testCloneTableSchema(final TableName tableName, final TableName new // Create the table TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_1).setBlocksize(BLOCK_SIZE) - .setBlockCacheEnabled(BLOCK_CACHE).setTimeToLive(TTL).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_1).setBlocksize(BLOCK_SIZE) + .setBlockCacheEnabled(BLOCK_CACHE).setTimeToLive(TTL).build()) + .build(); ADMIN.createTable(tableDesc, splitKeys); assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(tableName).size()); @@ -426,9 +424,11 @@ private void testCloneTableSchema(final TableName tableName, final TableName new assertEquals(BLOCK_CACHE, newTableDesc.getColumnFamily(FAMILY_1).isBlockCacheEnabled()); assertEquals(TTL, newTableDesc.getColumnFamily(FAMILY_1).getTimeToLive()); // HBASE-26246 introduced persist of store file tracker into table descriptor - tableDesc = TableDescriptorBuilder.newBuilder(tableDesc).setValue(TRACKER_IMPL, - StoreFileTrackerFactory.getStoreFileTrackerName(TEST_UTIL.getConfiguration())). - build(); + tableDesc = + TableDescriptorBuilder.newBuilder(tableDesc) + .setValue(TRACKER_IMPL, + StoreFileTrackerFactory.getStoreFileTrackerName(TEST_UTIL.getConfiguration())) + .build(); TEST_UTIL.verifyTableDescriptorIgnoreTableName(tableDesc, newTableDesc); if (preserveSplits) { @@ -473,14 +473,14 @@ public void testCloneTableSchemaWithExistentDestinationTable() throws Exception public void testModifyTableOnTableWithRegionReplicas() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))) - .setRegionReplication(5).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))) + .setRegionReplication(5).build(); ADMIN.createTable(desc); int maxFileSize = 10000000; TableDescriptor newDesc = - TableDescriptorBuilder.newBuilder(desc).setMaxFileSize(maxFileSize).build(); + TableDescriptorBuilder.newBuilder(desc).setMaxFileSize(maxFileSize).build(); ADMIN.modifyTable(newDesc); TableDescriptor newTableDesc = ADMIN.getDescriptor(tableName); @@ -511,7 +511,7 @@ public void testOnlineChangeTableSchema() throws IOException, InterruptedExcepti newFlushSize = TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE / 2; } copy = TableDescriptorBuilder.newBuilder(copy).setMemStoreFlushSize(newFlushSize) - .setValue(key, key).build(); + .setValue(key, key).build(); ADMIN.modifyTable(copy); TableDescriptor modifiedHtd = ADMIN.getDescriptor(tableName); assertNotEquals(htd, modifiedHtd); @@ -536,7 +536,7 @@ public void testOnlineChangeTableSchema() throws IOException, InterruptedExcepti assertFalse(ADMIN.isTableDisabled(tableName)); String xtracolName = "xtracol"; ColumnFamilyDescriptor xtracol = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(xtracolName)).setValue(xtracolName, xtracolName).build(); + .newBuilder(Bytes.toBytes(xtracolName)).setValue(xtracolName, xtracolName).build(); ADMIN.addColumnFamily(tableName, xtracol); modifiedHtd = ADMIN.getDescriptor(tableName); hcd = modifiedHtd.getColumnFamily(xtracol.getName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index b48841660166..0e98f73374f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -104,7 +104,7 @@ public void testSplitFlushCompactUnknownTable() throws InterruptedException { public void testCompactATableWithSuperLongTableName() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); try { ADMIN.createTable(htd); assertThrows(IllegalArgumentException.class, @@ -122,7 +122,7 @@ public void testCompactATableWithSuperLongTableName() throws Exception { public void testCompactionTimestamps() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); ADMIN.createTable(htd); Table table = TEST_UTIL.getConnection().getTable(htd.getTableName()); long ts = ADMIN.getLastMajorCompactionTimestamp(tableName); @@ -362,13 +362,14 @@ public void run() { // check if splitKey is based on the largest column family // in terms of it store size int deltaForLargestFamily = Math.abs(rowCount / 2 - splitKey); - LOG.debug("SplitKey=" + splitKey + "&deltaForLargestFamily=" + deltaForLargestFamily + - ", r=" + regions.get(0).getRegion()); + LOG.debug("SplitKey=" + splitKey + "&deltaForLargestFamily=" + deltaForLargestFamily + + ", r=" + regions.get(0).getRegion()); for (int index = 0; index < familyNames.length; index++) { int delta = Math.abs(rowCounts[index] / 2 - splitKey); if (delta < deltaForLargestFamily) { - assertTrue("Delta " + delta + " for family " + index + " should be at least " + - "deltaForLargestFamily " + deltaForLargestFamily, false); + assertTrue("Delta " + delta + " for family " + index + " should be at least " + + "deltaForLargestFamily " + deltaForLargestFamily, + false); } } } @@ -386,7 +387,7 @@ public void testSplitAndMergeWithReplicaTable() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); byte[] cf = Bytes.toBytes("f"); TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(3) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)).build(); byte[][] splitRows = new byte[2][]; splitRows[0] = new byte[] { (byte) '4' }; splitRows[1] = new byte[] { (byte) '7' }; @@ -412,13 +413,13 @@ public void testSplitAndMergeWithReplicaTable() throws Exception { ht.put(puts); ht.close(); List> regions = - MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tableName); + MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tableName); boolean gotException = false; // the element at index 1 would be a replica (since the metareader gives us ordered // regions). Try splitting that region via the split API . Should fail try { - FutureUtils.get( - TEST_UTIL.getAdmin().splitRegionAsync(regions.get(1).getFirst().getRegionName())); + FutureUtils + .get(TEST_UTIL.getAdmin().splitRegionAsync(regions.get(1).getFirst().getRegionName())); } catch (IllegalArgumentException ex) { gotException = true; } @@ -439,7 +440,7 @@ public void testSplitAndMergeWithReplicaTable() throws Exception { // testing Sync split operation try { FutureUtils.get(TEST_UTIL.getAdmin() - .splitRegionAsync(regions.get(1).getFirst().getRegionName(), new byte[] { (byte) '1' })); + .splitRegionAsync(regions.get(1).getFirst().getRegionName(), new byte[] { (byte) '1' })); } catch (IllegalArgumentException ex) { gotException = true; } @@ -448,10 +449,9 @@ public void testSplitAndMergeWithReplicaTable() throws Exception { gotException = false; // Try merging a replica with another. Should fail. try { - FutureUtils.get(TEST_UTIL.getAdmin().mergeRegionsAsync( - regions.get(1).getFirst().getEncodedNameAsBytes(), - regions.get(2).getFirst().getEncodedNameAsBytes(), - true)); + FutureUtils.get( + TEST_UTIL.getAdmin().mergeRegionsAsync(regions.get(1).getFirst().getEncodedNameAsBytes(), + regions.get(2).getFirst().getEncodedNameAsBytes(), true)); } catch (IllegalArgumentException m) { gotException = true; } @@ -493,10 +493,10 @@ public void testHFileReplication() throws Exception { String fn1 = "rep1"; String fn = "defaultRep"; TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fn)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(fn1)) - .setDFSReplication((short) 1).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fn)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(fn1)) + .setDFSReplication((short) 1).build()) + .build(); Table table = TEST_UTIL.createTable(htd, null); TEST_UTIL.waitTableAvailable(tableName); Put p = new Put(Bytes.toBytes("defaultRep_rk")); @@ -541,7 +541,7 @@ public void testHFileReplication() throws Exception { public void testMergeRegions() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("d")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("d")).build(); byte[][] splitRows = new byte[2][]; splitRows[0] = new byte[] { (byte) '3' }; splitRows[1] = new byte[] { (byte) '6' }; @@ -562,8 +562,8 @@ public void testMergeRegions() throws Exception { regionB = tableRegions.get(1); regionC = tableRegions.get(2); // TODO convert this to version that is synchronous (See HBASE-16668) - ADMIN.mergeRegionsAsync(regionA.getRegionName(), regionB.getRegionName(), - false).get(60, TimeUnit.SECONDS); + ADMIN.mergeRegionsAsync(regionA.getRegionName(), regionB.getRegionName(), false).get(60, + TimeUnit.SECONDS); tableRegions = ADMIN.getRegions(tableName); @@ -587,8 +587,7 @@ public void testMergeRegions() throws Exception { // TODO convert this to version that is synchronous (See HBASE-16668) ADMIN.mergeRegionsAsync(regionC.getEncodedNameAsBytes(), - mergedChildRegion.getEncodedNameAsBytes(), false) - .get(60, TimeUnit.SECONDS); + mergedChildRegion.getEncodedNameAsBytes(), false).get(60, TimeUnit.SECONDS); assertEquals(1, ADMIN.getRegions(tableName).size()); } finally { @@ -602,7 +601,7 @@ public void testMergeRegionsInvalidRegionCount() throws IOException, InterruptedException, ExecutionException { TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("d")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("d")).build(); byte[][] splitRows = new byte[2][]; splitRows[0] = new byte[] { (byte) '3' }; splitRows[1] = new byte[] { (byte) '6' }; @@ -620,8 +619,8 @@ public void testMergeRegionsInvalidRegionCount() } // 1 try { - FutureUtils.get(ADMIN - .mergeRegionsAsync(new byte[][] { tableRegions.get(0).getEncodedNameAsBytes() }, false)); + FutureUtils.get(ADMIN.mergeRegionsAsync( + new byte[][] { tableRegions.get(0).getEncodedNameAsBytes() }, false)); fail(); } catch (IllegalArgumentException e) { // expected @@ -636,8 +635,8 @@ public void testMergeRegionsInvalidRegionCount() public void testSplitShouldNotHappenIfSplitIsDisabledForTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); + .setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); Table table = TEST_UTIL.createTable(htd, null); for (int i = 0; i < 10; i++) { Put p = new Put(Bytes.toBytes("row" + i)); @@ -655,7 +654,7 @@ public void testSplitShouldNotHappenIfSplitIsDisabledForTable() throws Exception } // Split should not happen. List allRegions = - MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName, true); + MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName, true); assertEquals(1, allRegions.size()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index d7916343278f..b17864ef37eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,16 +69,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; /** - * Class to test HBaseAdmin. - * Spins up the minicluster once at test start and then takes it down afterward. - * Add any testing of HBaseAdmin functionality here. + * Class to test HBaseAdmin. Spins up the minicluster once at test start and then takes it down + * afterward. Add any testing of HBaseAdmin functionality here. */ -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestAdmin2 extends TestAdminBase { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAdmin2.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdmin2.class); private static final Logger LOG = LoggerFactory.getLogger(TestAdmin2.class); @@ -90,16 +88,16 @@ public void testCreateBadTables() throws IOException { } catch (TableExistsException e) { msg = e.toString(); } - assertTrue("Unexcepted exception message " + msg, msg != null && - msg.startsWith(TableExistsException.class.getName()) && - msg.contains(TableName.META_TABLE_NAME.getNameAsString())); + assertTrue("Unexcepted exception message " + msg, + msg != null && msg.startsWith(TableExistsException.class.getName()) + && msg.contains(TableName.META_TABLE_NAME.getNameAsString())); // Now try and do concurrent creation with a bunch of threads. TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); int count = 10; - Thread [] threads = new Thread [count]; + Thread[] threads = new Thread[count]; final AtomicInteger successes = new AtomicInteger(0); final AtomicInteger failures = new AtomicInteger(0); final Admin localAdmin = ADMIN; @@ -122,7 +120,7 @@ public void run() { threads[i].start(); } for (int i = 0; i < count; i++) { - while(threads[i].isAlive()) { + while (threads[i].isAlive()) { try { Thread.sleep(100); } catch (InterruptedException e) { @@ -130,7 +128,7 @@ public void run() { } } } - // All threads are now dead. Count up how many tables were created and + // All threads are now dead. Count up how many tables were created and // how many failed w/ appropriate exception. assertEquals(1, successes.get()); assertEquals(count - 1, failures.get()); @@ -143,10 +141,10 @@ public void run() { public void testTableNameClash() throws Exception { final String name = this.name.getMethodName(); TableDescriptor tableDescriptor1 = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name + "SOMEUPPERCASE")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name + "SOMEUPPERCASE")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); TableDescriptor tableDescriptor2 = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); ADMIN.createTable(tableDescriptor1); ADMIN.createTable(tableDescriptor2); // Before fix, below would fail throwing a NoServerForRegionException. @@ -154,25 +152,24 @@ public void testTableNameClash() throws Exception { } /*** - * HMaster.createTable used to be kind of synchronous call - * Thus creating of table with lots of regions can cause RPC timeout - * After the fix to make createTable truly async, RPC timeout shouldn't be an - * issue anymore + * HMaster.createTable used to be kind of synchronous call Thus creating of table with lots of + * regions can cause RPC timeout After the fix to make createTable truly async, RPC timeout + * shouldn't be an issue anymore */ @Test public void testCreateTableRPCTimeOut() throws Exception { final String name = this.name.getMethodName(); - int oldTimeout = TEST_UTIL.getConfiguration(). - getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + int oldTimeout = TEST_UTIL.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, + HConstants.DEFAULT_HBASE_RPC_TIMEOUT); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500); try { int expectedRegions = 100; // Use 80 bit numbers to make sure we aren't limited - byte [] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; - byte [] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; + byte[] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; + byte[] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; Admin hbaseadmin = TEST_UTIL.getAdmin(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); hbaseadmin.createTable(tableDescriptor, startKey, endKey, expectedRegions); } finally { TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, oldTimeout); @@ -196,8 +193,8 @@ public void testReadOnlyTable() throws Exception { } /** - * Test that user table names can contain '-' and '.' so long as they do not - * start with same. HBASE-771 + * Test that user table names can contain '-' and '.' so long as they do not start with same. + * HBASE-771 */ @Test public void testTableNames() throws IOException { @@ -211,15 +208,15 @@ public void testTableNames() throws IOException { try { TableName.valueOf(legalName); } catch (IllegalArgumentException e) { - fail("Legal user table name: '" + Bytes.toString(legalName) + - "' caused IllegalArgumentException: " + e.getMessage()); + fail("Legal user table name: '" + Bytes.toString(legalName) + + "' caused IllegalArgumentException: " + e.getMessage()); } } /** * For HADOOP-2579 */ - @Test (expected=TableExistsException.class) + @Test(expected = TableExistsException.class) public void testTableExistsExceptionWithATable() throws IOException { final TableName name = TableName.valueOf(this.name.getMethodName()); TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY).close(); @@ -229,7 +226,7 @@ public void testTableExistsExceptionWithATable() throws IOException { /** * Can't disable a table if the table isn't in enabled state */ - @Test (expected=TableNotEnabledException.class) + @Test(expected = TableNotEnabledException.class) public void testTableNotEnabledExceptionWithATable() throws IOException { final TableName name = TableName.valueOf(this.name.getMethodName()); TEST_UTIL.createTable(name, HConstants.CATALOG_FAMILY).close(); @@ -273,17 +270,14 @@ public void testShouldUnassignTheRegion() throws Exception { ADMIN.unassign(regionInfo.getRegionName(), true); } } - boolean isInList = ProtobufUtil.getOnlineRegions( - rs.getRSRpcServices()).contains(info); + boolean isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info); long timeout = EnvironmentEdgeManager.currentTime() + 10000; while ((EnvironmentEdgeManager.currentTime() < timeout) && (isInList)) { Thread.sleep(100); - isInList = ProtobufUtil.getOnlineRegions( - rs.getRSRpcServices()).contains(info); + isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info); } - assertFalse("The region should not be present in online regions list.", - isInList); + assertFalse("The region should not be present in online regions list.", isInList); } @Test @@ -299,15 +293,14 @@ public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception { if (!regionInfo.isMetaRegion()) { if (regionInfo.getRegionNameAsString().contains(name)) { info = regionInfo; - assertThrows(UnknownRegionException.class, - () -> ADMIN.unassign(Bytes.toBytes( - "test,,1358563771069.acc1ad1b7962564fc3a43e5907e8db33."), true)); + assertThrows(UnknownRegionException.class, () -> ADMIN.unassign( + Bytes.toBytes("test,,1358563771069.acc1ad1b7962564fc3a43e5907e8db33."), true)); } } } onlineRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()); assertTrue("The region should be present in online regions list.", - onlineRegions.contains(info)); + onlineRegions.contains(info)); } @Test @@ -327,26 +320,22 @@ public void testCloseRegionThatFetchesTheHRIFromMeta() throws Exception { } } - boolean isInList = ProtobufUtil.getOnlineRegions( - rs.getRSRpcServices()).contains(info); + boolean isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info); long timeout = EnvironmentEdgeManager.currentTime() + 10000; while ((EnvironmentEdgeManager.currentTime() < timeout) && (isInList)) { Thread.sleep(100); - isInList = ProtobufUtil.getOnlineRegions( - rs.getRSRpcServices()).contains(info); + isInList = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info); } - assertFalse("The region should not be present in online regions list.", - isInList); + assertFalse("The region should not be present in online regions list.", isInList); } private Admin createTable(TableName tableName) throws IOException { Admin admin = TEST_UTIL.getAdmin(); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tableName); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("value")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("value")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); @@ -358,10 +347,9 @@ private void createTableWithDefaultConf(byte[] TABLENAME) throws IOException { } private void createTableWithDefaultConf(TableName TABLENAME) throws IOException { - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TABLENAME); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLENAME); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("value")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("value")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); ADMIN.createTable(tableDescriptorBuilder.build()); @@ -377,20 +365,19 @@ public void testGetTableRegions() throws IOException { int expectedRegions = 10; // Use 80 bit numbers to make sure we aren't limited - byte [] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; - byte [] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; - + byte[] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; + byte[] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); ADMIN.createTable(tableDescriptor, startKey, endKey, expectedRegions); List RegionInfos = ADMIN.getRegions(tableName); - assertEquals("Tried to create " + expectedRegions + " regions " + - "but only found " + RegionInfos.size(), - expectedRegions, RegionInfos.size()); - } + assertEquals( + "Tried to create " + expectedRegions + " regions " + "but only found " + RegionInfos.size(), + expectedRegions, RegionInfos.size()); + } @Test public void testMoveToPreviouslyAssignedRS() throws IOException, InterruptedException { @@ -426,38 +413,32 @@ public void testWALRollWriting() throws Exception { } ADMIN.rollWALWriter(regionServer.getServerName()); int count = AbstractFSWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)); - LOG.info("after flushing all regions and rolling logs there are " + - count + " log files"); + LOG.info("after flushing all regions and rolling logs there are " + count + " log files"); assertTrue(("actual count: " + count), count <= 2); } private void setUpforLogRolling() { // Force a region split after every 768KB - TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, - 768L * 1024L); + TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 768L * 1024L); // We roll the log after every 32 writes TEST_UTIL.getConfiguration().setInt("hbase.regionserver.maxlogentries", 32); - TEST_UTIL.getConfiguration().setInt( - "hbase.regionserver.logroll.errors.tolerated", 2); + TEST_UTIL.getConfiguration().setInt("hbase.regionserver.logroll.errors.tolerated", 2); TEST_UTIL.getConfiguration().setInt("hbase.rpc.timeout", 10 * 1000); // For less frequently updated regions flush after every 2 flushes - TEST_UTIL.getConfiguration().setInt( - "hbase.hregion.memstore.optionalflushcount", 2); + TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.optionalflushcount", 2); // We flush the cache after every 8192 bytes - TEST_UTIL.getConfiguration().setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, - 8192); + TEST_UTIL.getConfiguration().setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 8192); // Increase the amount of time between client retries TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 10 * 1000); // Reduce thread wake frequency so that other threads can get // a chance to run. - TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY, - 2 * 1000); + TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000); /**** configuration for testLogRollOnDatanodeDeath ****/ // lower the namenode & datanode heartbeat so the namenode @@ -467,20 +448,18 @@ private void setUpforLogRolling() { // the namenode might still try to choose the recently-dead datanode // for a pipeline, so try to a new pipeline multiple times TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 30); - TEST_UTIL.getConfiguration().setInt( - "hbase.regionserver.hlog.tolerable.lowreplication", 2); - TEST_UTIL.getConfiguration().setInt( - "hbase.regionserver.hlog.lowreplication.rolllimit", 3); + TEST_UTIL.getConfiguration().setInt("hbase.regionserver.hlog.tolerable.lowreplication", 2); + TEST_UTIL.getConfiguration().setInt("hbase.regionserver.hlog.lowreplication.rolllimit", 3); } private HRegionServer startAndWriteData(TableName tableName, byte[] value) - throws IOException, InterruptedException { + throws IOException, InterruptedException { // When the hbase:meta table can be opened, the region servers are running TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close(); // Create the test table and open it TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); ADMIN.createTable(tableDescriptor); Table table = TEST_UTIL.getConnection().getTable(tableName); @@ -513,8 +492,8 @@ public void testDisableCatalogTable() throws Exception { // Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table // actually getting disabled by the disableTable() call. TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes(name.getMethodName()))) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf1"))).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes(name.getMethodName()))) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf1"))).build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); } @@ -604,9 +583,8 @@ public void testDecommissionRegionServers() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TEST_UTIL.createMultiRegionTable(tableName, Bytes.toBytes("f"), 6); - ArrayList clusterRegionServers = - new ArrayList<>(ADMIN.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet()); + ArrayList clusterRegionServers = new ArrayList<>( + ADMIN.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet()); assertEquals(3, clusterRegionServers.size()); @@ -681,9 +659,9 @@ public void testGetRegionInfo() throws Exception { Assert.assertEquals(expectedStoreFilesSize, store.getSize()); for (int i = 0; i < 10; i++) { RegionInfo ri = ProtobufUtil - .toRegionInfo(TEST_UTIL.getAsyncConnection().getRegionServerAdmin(rs.getServerName()) - .getRegionInfo(RequestConverter.buildGetRegionInfoRequest(regionName)).get() - .getRegionInfo()); + .toRegionInfo(TEST_UTIL.getAsyncConnection().getRegionServerAdmin(rs.getServerName()) + .getRegionInfo(RequestConverter.buildGetRegionInfoRequest(regionName)).get() + .getRegionInfo()); Assert.assertEquals(region.getRegionInfo(), ri); @@ -710,11 +688,14 @@ public void testGetRegionInfo() throws Exception { /** * Do get of RegionInfo from Master using encoded region name. */ - private void testGetWithRegionName(ServerName sn, RegionInfo inputRI, - byte [] regionName) throws IOException { - RegionInfo ri = ProtobufUtil.toRegionInfo(FutureUtils.get( - TEST_UTIL.getAsyncConnection().getRegionServerAdmin(sn).getRegionInfo( - ProtobufUtil.getGetRegionInfoRequest(regionName))).getRegionInfo()); + private void testGetWithRegionName(ServerName sn, RegionInfo inputRI, byte[] regionName) + throws IOException { + RegionInfo ri = + ProtobufUtil + .toRegionInfo(FutureUtils + .get(TEST_UTIL.getAsyncConnection().getRegionServerAdmin(sn) + .getRegionInfo(ProtobufUtil.getGetRegionInfoRequest(regionName))) + .getRegionInfo()); assertEquals(inputRI, ri); } @@ -740,8 +721,7 @@ public boolean evaluate() throws Exception { // do some table modification TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName)) - .setMaxFileSize(11111111) - .build(); + .setMaxFileSize(11111111).build(); ADMIN.modifyTable(tableDesc); assertEquals(11111111, ADMIN.getDescriptor(tableName).getMaxFileSize()); } @@ -772,8 +752,7 @@ public boolean evaluate() throws Exception { // do some table modification TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName)) - .setMaxFileSize(11111111) - .build(); + .setMaxFileSize(11111111).build(); ADMIN.modifyTable(tableDesc); assertEquals(11111111, ADMIN.getDescriptor(tableName).getMaxFileSize()); } @@ -821,12 +800,12 @@ public void testSlowLogResponses() throws Exception { } Assert.assertEquals(countFailedClearSlowResponse, 0); - List onlineLogRecords = ADMIN.getLogEntries(new HashSet<>(serverNames), - "SLOW_LOG", ServerType.REGION_SERVER, 100, null); + List onlineLogRecords = ADMIN.getLogEntries(new HashSet<>(serverNames), "SLOW_LOG", + ServerType.REGION_SERVER, 100, null); // after cleanup of slowlog responses, total count of slowlog payloads should be 0 Assert.assertEquals(onlineLogRecords.size(), 0); List balancerDecisionRecords = - ADMIN.getLogEntries(null, "BALANCER_DECISION", ServerType.MASTER, 100, null); + ADMIN.getLogEntries(null, "BALANCER_DECISION", ServerType.MASTER, 100, null); Assert.assertEquals(balancerDecisionRecords.size(), 0); } @@ -859,8 +838,7 @@ private static void waitForServerCommissioned(ServerName excludeServer, try { List decomServers = TEST_UTIL.getAdmin().listDecommissionedRegionServers(); if (anyServerDecommissioned) { - return decomServers.size() == 1 - && decomServers.get(0).equals(excludeServer); + return decomServers.size() == 1 && decomServers.get(0).equals(excludeServer); } else { return decomServers.size() == 0; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java index 9657fda882d3..d4505c6675c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -75,7 +75,7 @@ public void testDisableAndEnableTable() throws IOException { ADMIN.disableTable(ht.getName()); assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster().getMaster() - .getTableStateManager().isTableState(ht.getName(), TableState.State.DISABLED)); + .getTableStateManager().isTableState(ht.getName(), TableState.State.DISABLED)); assertEquals(TableState.State.DISABLED, getStateFromMeta(table)); // Test that table is disabled @@ -102,7 +102,7 @@ public void testDisableAndEnableTable() throws IOException { assertTrue(ok); ADMIN.enableTable(table); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster() - .getTableStateManager().isTableState(ht.getName(), TableState.State.ENABLED)); + .getTableStateManager().isTableState(ht.getName(), TableState.State.ENABLED)); assertEquals(TableState.State.ENABLED, getStateFromMeta(table)); // Test that table is enabled @@ -184,11 +184,11 @@ public void testDisableAndEnableTables() throws IOException { public void testEnableTableRetainAssignment() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, - new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, - new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } }; + new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, + new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } }; int expectedRegions = splitKeys.length + 1; TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); ADMIN.createTable(desc, splitKeys); try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) { @@ -237,16 +237,18 @@ public void testEnableDisableAddColumnDeleteColumn() throws Exception { @Test public void testGetTableDescriptor() throws IOException { TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam2")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam3")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam2")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam3")).build(); ADMIN.createTable(htd); Table table = TEST_UTIL.getConnection().getTable(htd.getTableName()); TableDescriptor confirmedHtd = table.getDescriptor(); - //HBASE-26246 introduced persist of store file tracker into table descriptor - htd = TableDescriptorBuilder.newBuilder(htd).setValue(TRACKER_IMPL, - StoreFileTrackerFactory.getStoreFileTrackerName(TEST_UTIL.getConfiguration())). - build(); + // HBASE-26246 introduced persist of store file tracker into table descriptor + htd = + TableDescriptorBuilder.newBuilder(htd) + .setValue(TRACKER_IMPL, + StoreFileTrackerFactory.getStoreFileTrackerName(TEST_UTIL.getConfiguration())) + .build(); assertEquals(0, TableDescriptor.COMPARATOR.compare(htd, confirmedHtd)); MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection()); table.close(); @@ -262,12 +264,12 @@ public void testReadOnlyTableModify() throws IOException, InterruptedException { // Make table read only TableDescriptor htd = - TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName)).setReadOnly(true).build(); + TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName)).setReadOnly(true).build(); ADMIN.modifyTable(htd); // try to modify the read only table now htd = TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName)) - .setCompactionEnabled(false).build(); + .setCompactionEnabled(false).build(); ADMIN.modifyTable(htd); // Delete the table ADMIN.disableTable(tableName); @@ -361,7 +363,7 @@ public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException { exception = null; try { TableDescriptor htd = TableDescriptorBuilder.newBuilder(nonexistentTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); ADMIN.modifyTable(htd); } catch (IOException e) { exception = e; @@ -371,9 +373,9 @@ public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException { // Now make it so at least the table exists and then do tests against a // nonexistent column family -- see if we get right exceptions. final TableName tableName = - TableName.valueOf(name.getMethodName() + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(name.getMethodName() + EnvironmentEdgeManager.currentTime()); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build(); ADMIN.createTable(htd); try { exception = null; @@ -404,7 +406,7 @@ public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException { private static final String DST_IMPL = "hbase.store.file-tracker.migration.dst.impl"; private void verifyModifyTableResult(TableName tableName, byte[] family, byte[] qual, byte[] row, - byte[] value, String sft) throws IOException { + byte[] value, String sft) throws IOException { TableDescriptor td = ADMIN.getDescriptor(tableName); assertEquals(sft, td.getValue(StoreFileTrackerFactory.TRACKER_IMPL)); // no migration related configs @@ -437,33 +439,27 @@ public void testModifyTableStoreFileTracker() throws IOException { // change to MIGRATION, and then to FILE ADMIN.modifyTable(TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName)) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(SRC_IMPL, - StoreFileTrackerFactory.Trackers.FILE.name()) - .setValue(DST_IMPL, - StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .build()); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .setValue(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()); ADMIN.modifyTableStoreFileTracker(tableName, StoreFileTrackerFactory.Trackers.FILE.name()); verifyModifyTableResult(tableName, family, qual, row, value, StoreFileTrackerFactory.Trackers.FILE.name()); // change to MIGRATION, and then to DEFAULT ADMIN.modifyTable(TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName)) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(SRC_IMPL, - StoreFileTrackerFactory.Trackers.FILE.name()) - .setValue(DST_IMPL, - StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .build()); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .setValue(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()); ADMIN.modifyTableStoreFileTracker(tableName, StoreFileTrackerFactory.Trackers.DEFAULT.name()); verifyModifyTableResult(tableName, family, qual, row, value, StoreFileTrackerFactory.Trackers.DEFAULT.name()); } private void verifyModifyColumnFamilyResult(TableName tableName, byte[] family, byte[] qual, - byte[] row, byte[] value, String sft) throws IOException { + byte[] row, byte[] value, String sft) throws IOException { TableDescriptor td = ADMIN.getDescriptor(tableName); ColumnFamilyDescriptor cfd = td.getColumnFamily(family); assertEquals(sft, cfd.getConfigurationValue(StoreFileTrackerFactory.TRACKER_IMPL)); @@ -501,13 +497,15 @@ public void testModifyColumnFamilyStoreFileTracker() throws IOException { // change to MIGRATION, and then to FILE TableDescriptor current = ADMIN.getDescriptor(tableName); - ADMIN.modifyTable(TableDescriptorBuilder.newBuilder(current) - .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(current.getColumnFamily(family)) - .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setConfiguration(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .setConfiguration(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()) - .build()); + ADMIN.modifyTable( + TableDescriptorBuilder.newBuilder(current) + .modifyColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(current.getColumnFamily(family)) + .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setConfiguration(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .setConfiguration(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()) + .build()); ADMIN.modifyColumnFamilyStoreFileTracker(tableName, family, StoreFileTrackerFactory.Trackers.FILE.name()); verifyModifyColumnFamilyResult(tableName, family, qual, row, value, @@ -515,13 +513,15 @@ public void testModifyColumnFamilyStoreFileTracker() throws IOException { // change to MIGRATION, and then to DEFAULT current = ADMIN.getDescriptor(tableName); - ADMIN.modifyTable(TableDescriptorBuilder.newBuilder(current) - .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(current.getColumnFamily(family)) - .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setConfiguration(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .setConfiguration(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()) - .build()); + ADMIN.modifyTable( + TableDescriptorBuilder.newBuilder(current) + .modifyColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(current.getColumnFamily(family)) + .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setConfiguration(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .setConfiguration(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()) + .build()); ADMIN.modifyColumnFamilyStoreFileTracker(tableName, family, StoreFileTrackerFactory.Trackers.DEFAULT.name()); verifyModifyColumnFamilyResult(tableName, family, qual, row, value, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java index 4a21b62348a1..b0e8b73d8a82 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,8 +39,7 @@ @Category({ MediumTests.class, ClientTests.class }) public class TestAdmin4 extends TestAdminBase { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAdmin4.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdmin4.class); // For HBASE-24208 @Test @@ -48,8 +47,7 @@ public void testDecommissionAndStopRegionServers() throws Exception { List decommissionedRegionServers = ADMIN.listDecommissionedRegionServers(); assertTrue(decommissionedRegionServers.isEmpty()); - ArrayList clusterRegionServers = - new ArrayList<>(ADMIN.getRegionServers(true)); + ArrayList clusterRegionServers = new ArrayList<>(ADMIN.getRegionServers(true)); List serversToDecommission = new ArrayList(); serversToDecommission.add(clusterRegionServers.get(0)); @@ -60,7 +58,7 @@ public void testDecommissionAndStopRegionServers() throws Exception { // Stop decommissioned region server and verify it is removed from draining znode ServerName serverName = serversToDecommission.get(0); - ADMIN.stopRegionServer(serverName.getHostname()+":"+serverName.getPort()); + ADMIN.stopRegionServer(serverName.getHostname() + ":" + serverName.getPort()); assertNotEquals("RS not removed from decommissioned list", -1, TEST_UTIL.waitFor(10000, () -> ADMIN.listDecommissionedRegionServers().isEmpty())); ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdminBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdminBase.java index 605efcd958aa..7b61d1ebfc32 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdminBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdminBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java index 14123b31f5b4..5663246ba486 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAllowPartialScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java index ac1bdb603892..88977ce3e36f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.java @@ -51,7 +51,7 @@ public class TestAlwaysSetScannerId { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAlwaysSetScannerId.class); + HBaseClassTestRule.forClass(TestAlwaysSetScannerId.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -78,8 +78,8 @@ public static void setUp() throws Exception { } HRI = table.getRegionLocator().getAllRegionLocations().get(0).getRegion(); } - CONN = - (AsyncConnectionImpl) ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get(); + CONN = (AsyncConnectionImpl) ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()) + .get(); STUB = CONN.getRegionServerStub(UTIL.getHBaseCluster().getRegionServer(0).getServerName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAppendFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAppendFromClientSide.java index 366990a2b959..1d5ce07a14a3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAppendFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAppendFromClientSide.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; + import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellBuilderType; @@ -48,11 +49,12 @@ public class TestAppendFromClientSide { HBaseClassTestRule.forClass(TestAppendFromClientSide.class); protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static byte [] ROW = Bytes.toBytes("testRow"); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); - private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte[] ROW = Bytes.toBytes("testRow"); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); @Rule public TestName name = new TestName(); + @BeforeClass public static void beforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); @@ -70,14 +72,9 @@ public void testAppendWithCustomTimestamp() throws IOException { Table table = TEST_UTIL.createTable(TABLENAME, FAMILY); long timestamp = 999; Append append = new Append(ROW); - append.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(ROW) - .setFamily(FAMILY) - .setQualifier(QUALIFIER) - .setTimestamp(timestamp) - .setType(KeyValue.Type.Put.getCode()) - .setValue(Bytes.toBytes(100L)) - .build()); + append.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ROW) + .setFamily(FAMILY).setQualifier(QUALIFIER).setTimestamp(timestamp) + .setType(KeyValue.Type.Put.getCode()).setValue(Bytes.toBytes(100L)).build()); Result r = table.append(append); assertEquals(1, r.size()); assertEquals(timestamp, r.rawCells()[0].getTimestamp()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAccessControlAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAccessControlAdminApi.java index 9182e6fb9fb2..c612ea377d5f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAccessControlAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAccessControlAdminApi.java @@ -1,14 +1,20 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; @@ -34,6 +40,7 @@ import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @RunWith(Parameterized.class) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java index a185501f6a40..b14264ba0509 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -85,8 +85,8 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 120000); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); TEST_UTIL.getConfiguration().setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0); - StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(2). - numMasters(2).build(); + StartTestingClusterOption option = + StartTestingClusterOption.builder().numRegionServers(2).numMasters(2).build(); TEST_UTIL.startMiniCluster(option); ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get(); } @@ -107,18 +107,18 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { admin.listTableNames(Pattern.compile(tableName.getNameAsString() + ".*"), false) - .whenCompleteAsync((tables, err) -> { - if (tables != null) { - tables.forEach(table -> { - try { - admin.disableTable(table).join(); - } catch (Exception e) { - LOG.debug("Table: " + tableName + " already disabled, so just deleting it."); - } - admin.deleteTable(table).join(); - }); - } - }, ForkJoinPool.commonPool()).join(); + .whenCompleteAsync((tables, err) -> { + if (tables != null) { + tables.forEach(table -> { + try { + admin.disableTable(table).join(); + } catch (Exception e) { + LOG.debug("Table: " + tableName + " already disabled, so just deleting it."); + } + admin.deleteTable(table).join(); + }); + } + }, ForkJoinPool.commonPool()).join(); if (!admin.isBalancerEnabled().join()) { admin.balancerSwitch(true, true); } @@ -151,12 +151,12 @@ protected void createTableWithDefaultConf(TableName tableName, byte[][] splitKey protected void createTableWithDefaultConf(TableName tableName, int regionReplication, byte[][] splitKeys, byte[]... families) throws IOException { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication); + TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication); for (byte[] family : families) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); } CompletableFuture future = splitKeys == null ? admin.createTable(builder.build()) - : admin.createTable(builder.build(), splitKeys); + : admin.createTable(builder.build(), splitKeys); future.join(); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java index f084eec45bb1..6bbbaabb8bf4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -182,11 +182,11 @@ public static class TestRpcTimeoutCoprocessor implements MasterCoprocessor, Mast public TestRpcTimeoutCoprocessor() { } - @Override public Optional getMasterObserver() { return Optional.of(this); } + @Override public void preGetNamespaceDescriptor(ObserverContext ctx, String namespace) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminMasterSwitch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminMasterSwitch.java index ce91e54d4cc2..7c93d64a6d1a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminMasterSwitch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminMasterSwitch.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,18 +41,18 @@ public class TestAsyncAdminMasterSwitch extends TestAsyncAdminBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncAdminMasterSwitch.class); + HBaseClassTestRule.forClass(TestAsyncAdminMasterSwitch.class); @Test public void testSwitch() throws IOException, InterruptedException { assertEquals(TEST_UTIL.getHBaseCluster().getRegionServerThreads().size(), admin.getClusterMetrics(EnumSet.of(ClusterMetrics.Option.SERVERS_NAME)).join() - .getServersName().size()); + .getServersName().size()); TEST_UTIL.getMiniHBaseCluster().stopMaster(0).join(); assertTrue(TEST_UTIL.getMiniHBaseCluster().waitForActiveAndReadyMaster(30000)); // make sure that we could still call master assertEquals(TEST_UTIL.getHBaseCluster().getRegionServerThreads().size(), admin.getClusterMetrics(EnumSet.of(ClusterMetrics.Option.SERVERS_NAME)).join() - .getServersName().size()); + .getServersName().size()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminModifyStoreFileTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminModifyStoreFileTracker.java index c8821b68cf57..1d1b9a4e890e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminModifyStoreFileTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminModifyStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,14 +46,14 @@ public class TestAsyncAdminModifyStoreFileTracker extends TestAsyncAdminBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncAdminModifyStoreFileTracker.class); + HBaseClassTestRule.forClass(TestAsyncAdminModifyStoreFileTracker.class); private static final String SRC_IMPL = "hbase.store.file-tracker.migration.src.impl"; private static final String DST_IMPL = "hbase.store.file-tracker.migration.dst.impl"; private void verifyModifyTableResult(TableName tableName, byte[] family, byte[] qual, byte[] row, - byte[] value, String sft) throws IOException { + byte[] value, String sft) throws IOException { TableDescriptor td = admin.getDescriptor(tableName).join(); assertEquals(sft, td.getValue(StoreFileTrackerFactory.TRACKER_IMPL)); // no migration related configs @@ -75,41 +75,41 @@ public void testModifyTableStoreFileTracker() throws IOException { } // change to FILE admin.modifyTableStoreFileTracker(tableName, StoreFileTrackerFactory.Trackers.FILE.name()) - .join(); + .join(); verifyModifyTableResult(tableName, family, qual, row, value, StoreFileTrackerFactory.Trackers.FILE.name()); // change to FILE again, should have no effect admin.modifyTableStoreFileTracker(tableName, StoreFileTrackerFactory.Trackers.FILE.name()) - .join(); + .join(); verifyModifyTableResult(tableName, family, qual, row, value, StoreFileTrackerFactory.Trackers.FILE.name()); // change to MIGRATION, and then to FILE admin.modifyTable(TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName).join()) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .setValue(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()).join(); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .setValue(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()).join(); admin.modifyTableStoreFileTracker(tableName, StoreFileTrackerFactory.Trackers.FILE.name()) - .join(); + .join(); verifyModifyTableResult(tableName, family, qual, row, value, StoreFileTrackerFactory.Trackers.FILE.name()); // change to MIGRATION, and then to DEFAULT admin.modifyTable(TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName).join()) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .setValue(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()).join(); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .setValue(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()).join(); admin.modifyTableStoreFileTracker(tableName, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .join(); + .join(); verifyModifyTableResult(tableName, family, qual, row, value, StoreFileTrackerFactory.Trackers.DEFAULT.name()); } private void verifyModifyColumnFamilyResult(TableName tableName, byte[] family, byte[] qual, - byte[] row, byte[] value, String sft) throws IOException { + byte[] row, byte[] value, String sft) throws IOException { TableDescriptor td = admin.getDescriptor(tableName).join(); ColumnFamilyDescriptor cfd = td.getColumnFamily(family); assertEquals(sft, cfd.getConfigurationValue(StoreFileTrackerFactory.TRACKER_IMPL)); @@ -146,13 +146,16 @@ public void testModifyColumnFamilyStoreFileTracker() throws IOException { // change to MIGRATION, and then to FILE TableDescriptor current = admin.getDescriptor(tableName).join(); - admin.modifyTable(TableDescriptorBuilder.newBuilder(current) - .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(current.getColumnFamily(family)) - .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setConfiguration(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .setConfiguration(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()) - .build()).join(); + admin.modifyTable( + TableDescriptorBuilder.newBuilder(current) + .modifyColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(current.getColumnFamily(family)) + .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setConfiguration(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .setConfiguration(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()) + .build()) + .join(); admin.modifyColumnFamilyStoreFileTracker(tableName, family, StoreFileTrackerFactory.Trackers.FILE.name()).join(); verifyModifyColumnFamilyResult(tableName, family, qual, row, value, @@ -160,13 +163,16 @@ public void testModifyColumnFamilyStoreFileTracker() throws IOException { // change to MIGRATION, and then to DEFAULT current = admin.getDescriptor(tableName).join(); - admin.modifyTable(TableDescriptorBuilder.newBuilder(current) - .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(current.getColumnFamily(family)) - .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setConfiguration(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .setConfiguration(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()) - .build()).join(); + admin.modifyTable( + TableDescriptorBuilder.newBuilder(current) + .modifyColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(current.getColumnFamily(family)) + .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setConfiguration(SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .setConfiguration(DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build()) + .build()) + .join(); admin.modifyColumnFamilyStoreFileTracker(tableName, family, StoreFileTrackerFactory.Trackers.DEFAULT.name()).join(); verifyModifyColumnFamilyResult(tableName, family, qual, row, value, @@ -187,8 +193,9 @@ public void testModifyStoreFileTrackerError() throws IOException { () -> FutureUtils.get(admin.modifyColumnFamilyStoreFileTracker(tableName, Bytes.toBytes("not_exists"), StoreFileTrackerFactory.Trackers.FILE.name()))); // to migration - assertThrows(DoNotRetryIOException.class, () -> FutureUtils.get(admin - .modifyTableStoreFileTracker(tableName, StoreFileTrackerFactory.Trackers.MIGRATION.name()))); + assertThrows(DoNotRetryIOException.class, + () -> FutureUtils.get(admin.modifyTableStoreFileTracker(tableName, + StoreFileTrackerFactory.Trackers.MIGRATION.name()))); // disabled admin.disableTable(tableName).join(); assertThrows(TableNotEnabledException.class, () -> FutureUtils.get( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java index 81dafae9f975..7824a5c01669 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,14 +48,14 @@ public class TestAsyncAdminWithRegionReplicas extends TestAsyncAdminBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncAdminWithRegionReplicas.class); + HBaseClassTestRule.forClass(TestAsyncAdminWithRegionReplicas.class); @BeforeClass public static void setUpBeforeClass() throws Exception { TestAsyncAdminBase.setUpBeforeClass(); HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); try (ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration())) { + ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry); } } @@ -87,7 +87,7 @@ public void testSplitNonDefaultReplica() throws InterruptedException, ExecutionException, IOException { createTableWithDefaultConf(tableName, 3); List locs = - ASYNC_CONN.getRegionLocator(tableName).getAllRegionLocations().get(); + ASYNC_CONN.getRegionLocator(tableName).getAllRegionLocations().get(); try { admin.splitRegion(locs.get(1).getRegion().getRegionName()).get(); } catch (ExecutionException e) { @@ -106,23 +106,21 @@ public void testMergeNonDefaultReplicas() byte[][] splitRows = new byte[][] { Bytes.toBytes(0) }; createTableWithDefaultConf(tableName, 3, splitRows); List locs = - ASYNC_CONN.getRegionLocator(tableName).getAllRegionLocations().get(); + ASYNC_CONN.getRegionLocator(tableName).getAllRegionLocations().get(); assertEquals(6, locs.size()); Map> replicaId2RegionInfo = locs.stream() - .map(HRegionLocation::getRegion).collect(Collectors.groupingBy(RegionInfo::getReplicaId)); + .map(HRegionLocation::getRegion).collect(Collectors.groupingBy(RegionInfo::getReplicaId)); List replicaOnes = replicaId2RegionInfo.get(1); try { - admin - .mergeRegions(replicaOnes.get(0).getRegionName(), replicaOnes.get(1).getRegionName(), false) - .get(); + admin.mergeRegions(replicaOnes.get(0).getRegionName(), replicaOnes.get(1).getRegionName(), + false).get(); } catch (ExecutionException e) { assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } List replicaTwos = replicaId2RegionInfo.get(2); try { - admin - .mergeRegions(replicaTwos.get(0).getRegionName(), replicaTwos.get(1).getRegionName(), false) - .get(); + admin.mergeRegions(replicaTwos.get(0).getRegionName(), replicaTwos.get(1).getRegionName(), + false).get(); } catch (ExecutionException e) { assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } @@ -132,7 +130,7 @@ public void testMergeNonDefaultReplicas() public void testCloneTableSchema() throws IOException, InterruptedException, ExecutionException { createTableWithDefaultConf(tableName, 3); admin.cloneTableSchema(tableName, TableName.valueOf(tableName.getNameAsString() + "_new"), true) - .get(); + .get(); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java index 9a2c5505945a..64d3cd3c6e7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncBufferMutator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,6 +46,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hbase.thirdparty.io.netty.util.Timeout; @@ -54,7 +55,7 @@ public class TestAsyncBufferMutator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncBufferMutator.class); + HBaseClassTestRule.forClass(TestAsyncBufferMutator.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -100,10 +101,10 @@ public void testWithSingleRegionTable() throws InterruptedException { private void test(TableName tableName) throws InterruptedException { List> futures = new ArrayList<>(); try (AsyncBufferedMutator mutator = - CONN.getBufferedMutatorBuilder(tableName).setWriteBufferSize(16 * 1024).build()) { + CONN.getBufferedMutatorBuilder(tableName).setWriteBufferSize(16 * 1024).build()) { List> fs = mutator.mutate(IntStream.range(0, COUNT / 2) - .mapToObj(i -> new Put(Bytes.toBytes(i)).addColumn(CF, CQ, VALUE)) - .collect(Collectors.toList())); + .mapToObj(i -> new Put(Bytes.toBytes(i)).addColumn(CF, CQ, VALUE)) + .collect(Collectors.toList())); // exceeded the write buffer size, a flush will be called directly fs.forEach(f -> f.join()); IntStream.range(COUNT / 2, COUNT).forEach(i -> { @@ -119,9 +120,9 @@ private void test(TableName tableName) throws InterruptedException { futures.forEach(f -> f.join()); AsyncTable table = CONN.getTable(tableName); IntStream.range(0, COUNT).mapToObj(i -> new Get(Bytes.toBytes(i))).map(g -> table.get(g).join()) - .forEach(r -> { - assertArrayEquals(VALUE, r.getValue(CF, CQ)); - }); + .forEach(r -> { + assertArrayEquals(VALUE, r.getValue(CF, CQ)); + }); } @Test @@ -150,7 +151,7 @@ public void testClosedMutate() throws InterruptedException { @Test public void testNoPeriodicFlush() throws InterruptedException, ExecutionException { try (AsyncBufferedMutator mutator = - CONN.getBufferedMutatorBuilder(TABLE_NAME).disableWriteBufferPeriodicFlush().build()) { + CONN.getBufferedMutatorBuilder(TABLE_NAME).disableWriteBufferPeriodicFlush().build()) { Put put = new Put(Bytes.toBytes(0)).addColumn(CF, CQ, VALUE); CompletableFuture future = mutator.mutate(put); Thread.sleep(2000); @@ -166,7 +167,7 @@ public void testNoPeriodicFlush() throws InterruptedException, ExecutionExceptio @Test public void testPeriodicFlush() throws InterruptedException, ExecutionException { AsyncBufferedMutator mutator = CONN.getBufferedMutatorBuilder(TABLE_NAME) - .setWriteBufferPeriodicFlush(1, TimeUnit.SECONDS).build(); + .setWriteBufferPeriodicFlush(1, TimeUnit.SECONDS).build(); Put put = new Put(Bytes.toBytes(0)).addColumn(CF, CQ, VALUE); CompletableFuture future = mutator.mutate(put); future.get(); @@ -179,8 +180,8 @@ public void testPeriodicFlush() throws InterruptedException, ExecutionException public void testCancelPeriodicFlush() throws InterruptedException, ExecutionException { Put put = new Put(Bytes.toBytes(0)).addColumn(CF, CQ, VALUE); try (AsyncBufferedMutatorImpl mutator = (AsyncBufferedMutatorImpl) CONN - .getBufferedMutatorBuilder(TABLE_NAME).setWriteBufferPeriodicFlush(1, TimeUnit.SECONDS) - .setWriteBufferSize(10 * put.heapSize()).build()) { + .getBufferedMutatorBuilder(TABLE_NAME).setWriteBufferPeriodicFlush(1, TimeUnit.SECONDS) + .setWriteBufferSize(10 * put.heapSize()).build()) { List> futures = new ArrayList<>(); futures.add(mutator.mutate(put)); Timeout task = mutator.periodicFlushTask; @@ -205,10 +206,10 @@ public void testCancelPeriodicFlush() throws InterruptedException, ExecutionExce public void testCancelPeriodicFlushByManuallyFlush() throws InterruptedException, ExecutionException { try (AsyncBufferedMutatorImpl mutator = - (AsyncBufferedMutatorImpl) CONN.getBufferedMutatorBuilder(TABLE_NAME) - .setWriteBufferPeriodicFlush(1, TimeUnit.SECONDS).build()) { + (AsyncBufferedMutatorImpl) CONN.getBufferedMutatorBuilder(TABLE_NAME) + .setWriteBufferPeriodicFlush(1, TimeUnit.SECONDS).build()) { CompletableFuture future = - mutator.mutate(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, VALUE)); + mutator.mutate(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, VALUE)); Timeout task = mutator.periodicFlushTask; // we should have scheduled a periodic flush task assertNotNull(task); @@ -225,8 +226,8 @@ public void testCancelPeriodicFlushByClose() throws InterruptedException, Execut CompletableFuture future; Timeout task; try (AsyncBufferedMutatorImpl mutator = - (AsyncBufferedMutatorImpl) CONN.getBufferedMutatorBuilder(TABLE_NAME) - .setWriteBufferPeriodicFlush(1, TimeUnit.SECONDS).build()) { + (AsyncBufferedMutatorImpl) CONN.getBufferedMutatorBuilder(TABLE_NAME) + .setWriteBufferPeriodicFlush(1, TimeUnit.SECONDS).build()) { future = mutator.mutate(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, VALUE)); task = mutator.periodicFlushTask; // we should have scheduled a periodic flush task @@ -259,8 +260,8 @@ public void testRaceBetweenNormalFlushAndPeriodicFlush() throws InterruptedException, ExecutionException { Put put = new Put(Bytes.toBytes(0)).addColumn(CF, CQ, VALUE); try (AsyncBufferMutatorForTest mutator = - new AsyncBufferMutatorForTest(AsyncConnectionImpl.RETRY_TIMER, CONN.getTable(TABLE_NAME), - 10 * put.heapSize(), TimeUnit.MILLISECONDS.toNanos(200), 1024 * 1024)) { + new AsyncBufferMutatorForTest(AsyncConnectionImpl.RETRY_TIMER, CONN.getTable(TABLE_NAME), + 10 * put.heapSize(), TimeUnit.MILLISECONDS.toNanos(200), 1024 * 1024)) { CompletableFuture future = mutator.mutate(put); Timeout task = mutator.periodicFlushTask; // we should have scheduled a periodic flush task diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForCallQueueTooBig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForCallQueueTooBig.java index ba871066eb3c..79764abcea1b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForCallQueueTooBig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPauseForCallQueueTooBig.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -52,6 +53,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; @@ -60,7 +62,7 @@ public class TestAsyncClientPauseForCallQueueTooBig { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncClientPauseForCallQueueTooBig.class); + HBaseClassTestRule.forClass(TestAsyncClientPauseForCallQueueTooBig.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -84,7 +86,7 @@ public CQTBERpcScheduler(Configuration conf, int handlerCount, int priorityHandl int replicationHandlerCount, int metaTransitionHandler, PriorityFunction priority, Abortable server, int highPriorityLevel) { super(conf, handlerCount, priorityHandlerCount, replicationHandlerCount, - metaTransitionHandler, priority, server, highPriorityLevel); + metaTransitionHandler, priority, server, highPriorityLevel); } @Override @@ -108,13 +110,13 @@ public RpcScheduler create(Configuration conf, PriorityFunction priority, Aborta int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); return new CQTBERpcScheduler(conf, handlerCount, - conf.getInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, - HConstants.DEFAULT_REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT), - conf.getInt(HConstants.REGION_SERVER_REPLICATION_HANDLER_COUNT, - HConstants.DEFAULT_REGION_SERVER_REPLICATION_HANDLER_COUNT), - conf.getInt(HConstants.MASTER_META_TRANSITION_HANDLER_COUNT, - HConstants.MASTER__META_TRANSITION_HANDLER_COUNT_DEFAULT), - priority, server, HConstants.QOS_THRESHOLD); + conf.getInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, + HConstants.DEFAULT_REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT), + conf.getInt(HConstants.REGION_SERVER_REPLICATION_HANDLER_COUNT, + HConstants.DEFAULT_REGION_SERVER_REPLICATION_HANDLER_COUNT), + conf.getInt(HConstants.MASTER_META_TRANSITION_HANDLER_COUNT, + HConstants.MASTER__META_TRANSITION_HANDLER_COUNT_DEFAULT), + priority, server, HConstants.QOS_THRESHOLD); } } @@ -177,7 +179,7 @@ public void testBatch() throws Exception { try (AsyncBufferedMutator mutator = CONN.getBufferedMutator(TABLE_NAME)) { for (int i = 100; i < 110; i++) { futures.add(mutator - .mutate(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i)))); + .mutate(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i)))); } } return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get(); @@ -189,7 +191,7 @@ public void testScan() throws Exception { // we will hit CallQueueTooBigException two times so the sleep time should be twice assertTime(() -> { try ( - ResultScanner scanner = CONN.getTable(TABLE_NAME).getScanner(new Scan().setCaching(80))) { + ResultScanner scanner = CONN.getTable(TABLE_NAME).getScanner(new Scan().setCaching(80))) { for (int i = 0; i < 100; i++) { Result result = scanner.next(); assertArrayEquals(Bytes.toBytes(i), result.getValue(FAMILY, QUALIFIER)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPushback.java index fd746d5b73cd..ee4d85890123 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPushback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClientPushback.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestAsyncClientPushback extends ClientPushbackTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncClientPushback.class); + HBaseClassTestRule.forClass(TestAsyncClientPushback.class); private AsyncConnectionImpl conn; @@ -47,8 +47,8 @@ public class TestAsyncClientPushback extends ClientPushbackTestBase { @Before public void setUp() throws Exception { - conn = - (AsyncConnectionImpl) ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get(); + conn = (AsyncConnectionImpl) ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()) + .get(); mutator = conn.getBufferedMutator(tableName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java index 1fd0b0de26e8..3e69ee215668 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -79,8 +79,8 @@ public static void setUpBeforeClass() throws Exception { @Test public void testGetMasterInfoPort() throws Exception { - assertEquals(TEST_UTIL.getHBaseCluster().getMaster().getInfoServer().getPort(), (int) admin - .getMasterInfoPort().get()); + assertEquals(TEST_UTIL.getHBaseCluster().getMaster().getInfoServer().getPort(), + (int) admin.getMasterInfoPort().get()); } @Test @@ -153,38 +153,32 @@ public void testRollWALWALWriter() throws Exception { } admin.rollWALWriter(regionServer.getServerName()).join(); int count = AbstractFSWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)); - LOG.info("after flushing all regions and rolling logs there are " + - count + " log files"); + LOG.info("after flushing all regions and rolling logs there are " + count + " log files"); assertTrue(("actual count: " + count), count <= 2); } private void setUpforLogRolling() { // Force a region split after every 768KB - TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, - 768L * 1024L); + TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 768L * 1024L); // We roll the log after every 32 writes TEST_UTIL.getConfiguration().setInt("hbase.regionserver.maxlogentries", 32); - TEST_UTIL.getConfiguration().setInt( - "hbase.regionserver.logroll.errors.tolerated", 2); + TEST_UTIL.getConfiguration().setInt("hbase.regionserver.logroll.errors.tolerated", 2); TEST_UTIL.getConfiguration().setInt("hbase.rpc.timeout", 10 * 1000); // For less frequently updated regions flush after every 2 flushes - TEST_UTIL.getConfiguration().setInt( - "hbase.hregion.memstore.optionalflushcount", 2); + TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.optionalflushcount", 2); // We flush the cache after every 8192 bytes - TEST_UTIL.getConfiguration().setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, - 8192); + TEST_UTIL.getConfiguration().setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 8192); // Increase the amount of time between client retries TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 10 * 1000); // Reduce thread wake frequency so that other threads can get // a chance to run. - TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY, - 2 * 1000); + TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000); /**** configuration for testLogRollOnDatanodeDeath ****/ // lower the namenode & datanode heartbeat so the namenode @@ -194,10 +188,8 @@ private void setUpforLogRolling() { // the namenode might still try to choose the recently-dead datanode // for a pipeline, so try to a new pipeline multiple times TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 30); - TEST_UTIL.getConfiguration().setInt( - "hbase.regionserver.hlog.tolerable.lowreplication", 2); - TEST_UTIL.getConfiguration().setInt( - "hbase.regionserver.hlog.lowreplication.rolllimit", 3); + TEST_UTIL.getConfiguration().setInt("hbase.regionserver.hlog.tolerable.lowreplication", 2); + TEST_UTIL.getConfiguration().setInt("hbase.regionserver.hlog.lowreplication.rolllimit", 3); } private HRegionServer startAndWriteData(TableName tableName, byte[] value) throws Exception { @@ -224,13 +216,13 @@ private HRegionServer startAndWriteData(TableName tableName, byte[] value) throw public void testGetRegionLoads() throws Exception { // Turn off the balancer admin.balancerSwitch(false).join(); - TableName[] tables = - new TableName[] { TableName.valueOf(tableName.getNameAsString() + "1"), - TableName.valueOf(tableName.getNameAsString() + "2"), - TableName.valueOf(tableName.getNameAsString() + "3") }; + TableName[] tables = new TableName[] { TableName.valueOf(tableName.getNameAsString() + "1"), + TableName.valueOf(tableName.getNameAsString() + "2"), + TableName.valueOf(tableName.getNameAsString() + "3") }; createAndLoadTable(tables); // Sleep to wait region server report - Thread.sleep(TEST_UTIL.getConfiguration().getInt("hbase.regionserver.msginterval", 3 * 1000) * 2); + Thread + .sleep(TEST_UTIL.getConfiguration().getInt("hbase.regionserver.msginterval", 3 * 1000) * 2); // Check if regions match with the regionLoad from the server Collection servers = admin.getRegionServers().get(); for (ServerName serverName : servers) { @@ -251,8 +243,8 @@ public void testGetRegionLoads() throws Exception { // Check RegionLoad matches the regionLoad from ClusterStatus ClusterMetrics clusterStatus = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).get(); assertEquals(servers.size(), clusterStatus.getLiveServerMetrics().size()); - for (Map.Entry entry : - clusterStatus.getLiveServerMetrics().entrySet()) { + for (Map.Entry entry : clusterStatus.getLiveServerMetrics() + .entrySet()) { ServerName sn = entry.getKey(); ServerMetrics sm = entry.getValue(); compareRegionLoads(sm.getRegionMetrics().values(), admin.getRegionMetrics(sn).get()); @@ -264,7 +256,7 @@ public void testGetRegionLoads() throws Exception { } @Test - public void testGetRegionServers() throws Exception{ + public void testGetRegionServers() throws Exception { List serverNames = new ArrayList<>(admin.getRegionServers(true).get()); assertEquals(2, serverNames.size()); @@ -312,7 +304,8 @@ private void checkRegionsAndRegionLoads(Collection regions, } for (RegionInfo info : regions) { assertTrue("Region not in regionLoadMap region:" + info.getRegionNameAsString() - + " regionMap: " + regionLoadMap, regionLoadMap.containsKey(info.getRegionName())); + + " regionMap: " + regionLoadMap, + regionLoadMap.containsKey(info.getRegionName())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java index e52a2562a456..8ac7b907e957 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java index 45400114ba62..0e346be7b894 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,9 +52,8 @@ public void testAsyncDecommissionRegionServers() throws Exception { TEST_UTIL.createMultiRegionTable(tableName, FAMILY, 4); - ArrayList clusterRegionServers = - new ArrayList<>(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).get() - .getLiveServerMetrics().keySet()); + ArrayList clusterRegionServers = new ArrayList<>(admin + .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).get().getLiveServerMetrics().keySet()); assertEquals(TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().size(), clusterRegionServers.size()); @@ -75,8 +74,8 @@ public void testAsyncDecommissionRegionServers() throws Exception { ServerName remainingServer = clusterRegionServers.get(0); // Decommission - admin.decommissionRegionServers(new ArrayList(serversToDecommssion.keySet()), - true).get(); + admin.decommissionRegionServers(new ArrayList(serversToDecommssion.keySet()), true) + .get(); assertEquals(1, admin.listDecommissionedRegionServers().get().size()); // Verify the regions have been off the decommissioned servers, all on the remaining server. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java index 480d797d5950..e0ebf3d921bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java @@ -40,7 +40,7 @@ public class TestAsyncMetaRegionLocator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncMetaRegionLocator.class); + HBaseClassTestRule.forClass(TestAsyncMetaRegionLocator.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNamespaceAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNamespaceAdminApi.java index f6bb3c6f3401..3e90852adc9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNamespaceAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNamespaceAdminApi.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -118,11 +118,10 @@ public Void call() throws Exception { runWithExpectedException(new Callable() { @Override public Void call() throws Exception { - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf("non_existing_namespace", - "table1")); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder + .newBuilder(TableName.valueOf("non_existing_namespace", "table1")); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("family1")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("family1")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()).join(); return null; @@ -172,7 +171,8 @@ public Void call() throws Exception { admin.deleteNamespace(prefix + "ns1").join(); } - private static void runWithExpectedException(Callable callable, Class exceptionClass) { + private static void runWithExpectedException(Callable callable, + Class exceptionClass) { try { callable.call(); } catch (Exception ex) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java index 040bc627ab9f..dc635d6fae12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,7 +70,7 @@ public class TestAsyncNonMetaRegionLocator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncNonMetaRegionLocator.class); + HBaseClassTestRule.forClass(TestAsyncNonMetaRegionLocator.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -102,7 +102,7 @@ public static void setUp() throws Exception { // Enable hbase:meta replication. HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, NUM_OF_META_REPLICA); TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster() - .getRegions(TableName.META_TABLE_NAME).size() >= NUM_OF_META_REPLICA); + .getRegions(TableName.META_TABLE_NAME).size() >= NUM_OF_META_REPLICA); SPLIT_KEYS = new byte[8][]; for (int i = 111; i < 999; i += 111) { @@ -121,9 +121,9 @@ public void setUpBeforeTest() throws InterruptedException, ExecutionException, I // Enable meta replica LoadBalance mode for this connection. c.set(RegionLocator.LOCATOR_META_REPLICAS_MODE, metaReplicaMode.toString()); ConnectionRegistry registry = - ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); - conn = - new AsyncConnectionImpl(c, registry, registry.getClusterId().get(), null, User.getCurrent()); + ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); + conn = new AsyncConnectionImpl(c, registry, registry.getClusterId().get(), null, + User.getCurrent()); locator = new AsyncNonMetaRegionLocator(conn); } @@ -141,8 +141,8 @@ public void tearDownAfterTest() throws IOException { @Parameterized.Parameters public static Collection parameters() { - return Arrays - .asList(new Object[][] { { CatalogReplicaMode.NONE }, { CatalogReplicaMode.LOAD_BALANCE } }); + return Arrays.asList( + new Object[][] { { CatalogReplicaMode.NONE }, { CatalogReplicaMode.LOAD_BALANCE } }); } private void createSingleRegionTable() throws IOException, InterruptedException { @@ -151,10 +151,9 @@ private void createSingleRegionTable() throws IOException, InterruptedException } private CompletableFuture getDefaultRegionLocation(TableName tableName, - byte[] row, RegionLocateType locateType, boolean reload) { - return locator - .getRegionLocations(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID, locateType, reload) - .thenApply(RegionLocations::getDefaultRegionLocation); + byte[] row, RegionLocateType locateType, boolean reload) { + return locator.getRegionLocations(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID, + locateType, reload).thenApply(RegionLocations::getDefaultRegionLocation); } @Test @@ -182,7 +181,7 @@ public void testDisableTable() throws IOException, InterruptedException { } private void assertLocEquals(byte[] startKey, byte[] endKey, ServerName serverName, - HRegionLocation loc) { + HRegionLocation loc) { RegionInfo info = loc.getRegion(); assertEquals(TABLE_NAME, info.getTable()); assertArrayEquals(startKey, info.getStartKey()); @@ -227,12 +226,12 @@ private static byte[][] getEndKeys() { private ServerName[] getLocations(byte[][] startKeys) { ServerName[] serverNames = new ServerName[startKeys.length]; TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer()) - .forEach(rs -> { - rs.getRegions(TABLE_NAME).forEach(r -> { - serverNames[Arrays.binarySearch(startKeys, r.getRegionInfo().getStartKey(), - Bytes::compareTo)] = rs.getServerName(); + .forEach(rs -> { + rs.getRegions(TABLE_NAME).forEach(r -> { + serverNames[Arrays.binarySearch(startKeys, r.getRegionInfo().getStartKey(), + Bytes::compareTo)] = rs.getServerName(); + }); }); - }); return serverNames; } @@ -246,7 +245,7 @@ public void testMultiRegionTable() throws IOException, InterruptedException { assertLocEquals(startKeys[i], i == startKeys.length - 1 ? EMPTY_END_ROW : startKeys[i + 1], serverNames[i], getDefaultRegionLocation(TABLE_NAME, startKeys[i], RegionLocateType.CURRENT, false) - .get()); + .get()); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } @@ -281,15 +280,16 @@ public void testRegionMove() throws IOException, InterruptedException, Execution createSingleRegionTable(); ServerName serverName = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME).getServerName(); HRegionLocation loc = - getDefaultRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT, false).get(); + getDefaultRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT, false) + .get(); assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName, loc); ServerName newServerName = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream() - .map(t -> t.getRegionServer().getServerName()).filter(sn -> !sn.equals(serverName)).findAny() - .get(); + .map(t -> t.getRegionServer().getServerName()).filter(sn -> !sn.equals(serverName)) + .findAny().get(); TEST_UTIL.getAdmin().move(Bytes.toBytes(loc.getRegion().getEncodedName()), newServerName); while (!TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME).getServerName() - .equals(newServerName)) { + .equals(newServerName)) { Thread.sleep(100); } // Should be same as it is in cache @@ -313,17 +313,17 @@ public void testLocateAfter() throws IOException, InterruptedException, Executio TEST_UTIL.createTable(TABLE_NAME, FAMILY, new byte[][] { splitKey }); TEST_UTIL.waitTableAvailable(TABLE_NAME); HRegionLocation currentLoc = - getDefaultRegionLocation(TABLE_NAME, row, RegionLocateType.CURRENT, false).get(); + getDefaultRegionLocation(TABLE_NAME, row, RegionLocateType.CURRENT, false).get(); ServerName currentServerName = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME).getServerName(); assertLocEquals(EMPTY_START_ROW, splitKey, currentServerName, currentLoc); HRegionLocation afterLoc = - getDefaultRegionLocation(TABLE_NAME, row, RegionLocateType.AFTER, false).get(); + getDefaultRegionLocation(TABLE_NAME, row, RegionLocateType.AFTER, false).get(); ServerName afterServerName = - TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer()) - .filter(rs -> rs.getRegions(TABLE_NAME).stream() - .anyMatch(r -> Bytes.equals(splitKey, r.getRegionInfo().getStartKey()))) - .findAny().get().getServerName(); + TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer()) + .filter(rs -> rs.getRegions(TABLE_NAME).stream() + .anyMatch(r -> Bytes.equals(splitKey, r.getRegionInfo().getStartKey()))) + .findAny().get().getServerName(); assertLocEquals(splitKey, EMPTY_END_ROW, afterServerName, afterLoc); assertSame(afterLoc, @@ -339,8 +339,8 @@ public void testConcurrentLocate() throws IOException, InterruptedException, Exe ServerName[] serverNames = getLocations(startKeys); for (int i = 0; i < 100; i++) { locator.clearCache(TABLE_NAME); - List> futures = - IntStream.range(0, 1000).mapToObj(n -> String.format("%03d", n)).map(s -> Bytes.toBytes(s)) + List> futures = IntStream.range(0, 1000) + .mapToObj(n -> String.format("%03d", n)).map(s -> Bytes.toBytes(s)) .map(r -> getDefaultRegionLocation(TABLE_NAME, r, RegionLocateType.CURRENT, false)) .collect(toList()); for (int j = 0; j < 1000; j++) { @@ -359,8 +359,8 @@ public void testReload() throws Exception { getDefaultRegionLocation(TABLE_NAME, EMPTY_START_ROW, locateType, false).get()); } ServerName newServerName = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream() - .map(t -> t.getRegionServer().getServerName()).filter(sn -> !sn.equals(serverName)).findAny() - .get(); + .map(t -> t.getRegionServer().getServerName()).filter(sn -> !sn.equals(serverName)) + .findAny().get(); Admin admin = TEST_UTIL.getAdmin(); RegionInfo region = admin.getRegions(TABLE_NAME).stream().findAny().get(); admin.move(region.getEncodedNameAsBytes(), newServerName); @@ -389,8 +389,8 @@ public String explainFailure() throws Exception { @Override public boolean evaluate() throws Exception { HRegionLocation loc = - getDefaultRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT, true) - .get(); + getDefaultRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT, true) + .get(); return newServerName.equals(loc.getServerName()); } @@ -410,11 +410,11 @@ public String explainFailure() throws Exception { // Testcase for HBASE-20822 @Test public void testLocateBeforeLastRegion() - throws IOException, InterruptedException, ExecutionException { + throws IOException, InterruptedException, ExecutionException { createMultiRegionTable(); getDefaultRegionLocation(TABLE_NAME, SPLIT_KEYS[0], RegionLocateType.CURRENT, false).join(); HRegionLocation loc = - getDefaultRegionLocation(TABLE_NAME, EMPTY_END_ROW, RegionLocateType.BEFORE, false).get(); + getDefaultRegionLocation(TABLE_NAME, EMPTY_END_ROW, RegionLocateType.BEFORE, false).get(); // should locate to the last region assertArrayEquals(loc.getRegion().getEndKey(), EMPTY_END_ROW); } @@ -422,19 +422,19 @@ public void testLocateBeforeLastRegion() @Test public void testRegionReplicas() throws Exception { TEST_UTIL.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setRegionReplication(3).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setRegionReplication(3).build()); TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME); testLocator(TEST_UTIL, TABLE_NAME, new Locator() { @Override public void updateCachedLocationOnError(HRegionLocation loc, Throwable error) - throws Exception { + throws Exception { locator.updateCachedLocationOnError(loc, error); } @Override public RegionLocations getRegionLocations(TableName tableName, int replicaId, boolean reload) - throws Exception { + throws Exception { return locator.getRegionLocations(tableName, EMPTY_START_ROW, replicaId, RegionLocateType.CURRENT, reload).get(); } @@ -446,7 +446,8 @@ public RegionLocations getRegionLocations(TableName tableName, int replicaId, bo public void testLocateBeforeInOnlyRegion() throws IOException, InterruptedException { createSingleRegionTable(); HRegionLocation loc = - getDefaultRegionLocation(TABLE_NAME, Bytes.toBytes(1), RegionLocateType.BEFORE, false).join(); + getDefaultRegionLocation(TABLE_NAME, Bytes.toBytes(1), RegionLocateType.BEFORE, false) + .join(); // should locate to the only region assertArrayEquals(loc.getRegion().getStartKey(), EMPTY_START_ROW); assertArrayEquals(loc.getRegion().getEndKey(), EMPTY_END_ROW); @@ -456,8 +457,9 @@ public void testLocateBeforeInOnlyRegion() throws IOException, InterruptedExcept public void testConcurrentUpdateCachedLocationOnError() throws Exception { createSingleRegionTable(); HRegionLocation loc = - getDefaultRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT, false).get(); + getDefaultRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT, false) + .get(); IntStream.range(0, 100).parallel() - .forEach(i -> locator.updateCachedLocationOnError(loc, new NotServingRegionException())); + .forEach(i -> locator.updateCachedLocationOnError(loc, new NotServingRegionException())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java index 690a3848a4f6..d168475e093d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ public class TestAsyncNonMetaRegionLocatorConcurrenyLimit { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncNonMetaRegionLocatorConcurrenyLimit.class); + HBaseClassTestRule.forClass(TestAsyncNonMetaRegionLocatorConcurrenyLimit.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -127,10 +127,10 @@ public static void setUp() throws Exception { ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, - registry.getClusterId().get(), null, User.getCurrent()); + registry.getClusterId().get(), null, User.getCurrent()); LOCATOR = new AsyncNonMetaRegionLocator(CONN); SPLIT_KEYS = IntStream.range(1, 256).mapToObj(i -> Bytes.toBytes(String.format("%02x", i))) - .toArray(byte[][]::new); + .toArray(byte[][]::new); TEST_UTIL.createTable(TABLE_NAME, FAMILY, SPLIT_KEYS); TEST_UTIL.waitTableAvailable(TABLE_NAME); } @@ -162,10 +162,10 @@ private void assertLocs(List> futures) @Test public void test() throws InterruptedException, ExecutionException { List> futures = - IntStream.range(0, 256).mapToObj(i -> Bytes.toBytes(String.format("%02x", i))) - .map(r -> LOCATOR.getRegionLocations(TABLE_NAME, r, RegionReplicaUtil.DEFAULT_REPLICA_ID, - RegionLocateType.CURRENT, false)) - .collect(toList()); + IntStream.range(0, 256).mapToObj(i -> Bytes.toBytes(String.format("%02x", i))) + .map(r -> LOCATOR.getRegionLocations(TABLE_NAME, r, + RegionReplicaUtil.DEFAULT_REPLICA_ID, RegionLocateType.CURRENT, false)) + .collect(toList()); assertLocs(futures); assertTrue("max allowed is " + MAX_ALLOWED + " but actual is " + MAX_CONCURRENCY.get(), MAX_CONCURRENCY.get() <= MAX_ALLOWED); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java index 37b2b88c43e6..203911d4e1c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncQuotaAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncQuotaAdminApi.java index fbf38a0effa9..608f2348515b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncQuotaAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncQuotaAdminApi.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -81,14 +81,14 @@ public void testThrottleType() throws Exception { int countGlobalBypass = 0; for (QuotaSettings settings : admin.getQuota(null).get()) { switch (settings.getQuotaType()) { - case THROTTLE: - countThrottle++; - break; - case GLOBAL_BYPASS: - countGlobalBypass++; - break; - default: - fail("unexpected settings type: " + settings.getQuotaType()); + case THROTTLE: + countThrottle++; + break; + case GLOBAL_BYPASS: + countGlobalBypass++; + break; + default: + fail("unexpected settings type: " + settings.getQuotaType()); } } assertEquals(2, countThrottle); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index 6ff2d22db98a..118eadf4aab5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ /** * Class to test asynchronous region admin operations. * @see TestAsyncRegionAdminApi2 This test and it used to be joined it was taking longer than our - * ten minute timeout so they were split. + * ten minute timeout so they were split. */ @RunWith(Parameterized.class) @Category({ LargeTests.class, ClientTests.class }) @@ -105,9 +105,8 @@ public void testAssignRegionAndUnassignRegion() throws Exception { RegionInfo createTableAndGetOneRegion(final TableName tableName) throws IOException, InterruptedException, ExecutionException { - TableDescriptor desc = - TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5).get(); // wait till the table is assigned @@ -133,10 +132,11 @@ public void testGetRegionByStateOfTable() throws Exception { RegionStates regionStates = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); - assertTrue(regionStates.getRegionByStateOfTable(tableName).get(RegionState.State.OPEN) - .stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)); + assertTrue(regionStates.getRegionByStateOfTable(tableName).get(RegionState.State.OPEN).stream() + .anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)); assertFalse(regionStates.getRegionByStateOfTable(TableName.valueOf("I_am_the_phantom")) - .get(RegionState.State.OPEN).stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)); + .get(RegionState.State.OPEN).stream() + .anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)); } @Test @@ -183,15 +183,15 @@ public void testGetOnlineRegions() throws Exception { createTableAndGetOneRegion(tableName); AtomicInteger regionServerCount = new AtomicInteger(0); TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().stream() - .map(rsThread -> rsThread.getRegionServer()).forEach(rs -> { - ServerName serverName = rs.getServerName(); - try { - assertEquals(admin.getRegions(serverName).get().size(), rs.getRegions().size()); - } catch (Exception e) { - fail("admin.getOnlineRegions() method throws a exception: " + e.getMessage()); - } - regionServerCount.incrementAndGet(); - }); + .map(rsThread -> rsThread.getRegionServer()).forEach(rs -> { + ServerName serverName = rs.getServerName(); + try { + assertEquals(admin.getRegions(serverName).get().size(), rs.getRegions().size()); + } catch (Exception e) { + fail("admin.getOnlineRegions() method throws a exception: " + e.getMessage()); + } + regionServerCount.incrementAndGet(); + }); assertEquals(TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().size(), regionServerCount.get()); } @@ -199,13 +199,11 @@ public void testGetOnlineRegions() throws Exception { @Test public void testFlushTableAndRegion() throws Exception { RegionInfo hri = createTableAndGetOneRegion(tableName); - ServerName serverName = - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates() - .getRegionServerOfRegion(hri); - HRegionServer regionServer = - TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().stream() - .map(rsThread -> rsThread.getRegionServer()) - .filter(rs -> rs.getServerName().equals(serverName)).findFirst().get(); + ServerName serverName = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() + .getRegionStates().getRegionServerOfRegion(hri); + HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().stream() + .map(rsThread -> rsThread.getRegionServer()) + .filter(rs -> rs.getServerName().equals(serverName)).findFirst().get(); // write a put into the specific region ASYNC_CONN.getTable(tableName) @@ -253,12 +251,11 @@ private void waitUntilMobCompactionFinished(TableName tableName) @Test public void testCompactMob() throws Exception { - ColumnFamilyDescriptor columnDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("mob")) - .setMobEnabled(true).setMobThreshold(0).build(); + ColumnFamilyDescriptor columnDescriptor = ColumnFamilyDescriptorBuilder + .newBuilder(Bytes.toBytes("mob")).setMobEnabled(true).setMobThreshold(0).build(); - TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(columnDescriptor).build(); + TableDescriptor tableDescriptor = + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(columnDescriptor).build(); admin.createTable(tableDescriptor).get(); @@ -279,9 +276,8 @@ public void testCompactRegionServer() throws Exception { createTableWithDefaultConf(tableName, null, families); loadData(tableName, families, 3000, 8); - List rsList = - TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().stream() - .map(rsThread -> rsThread.getRegionServer()).collect(Collectors.toList()); + List rsList = TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().stream() + .map(rsThread -> rsThread.getRegionServer()).collect(Collectors.toList()); List regions = new ArrayList<>(); rsList.forEach(rs -> regions.addAll(rs.getRegions(tableName))); assertEquals(1, regions.size()); @@ -307,38 +303,33 @@ public void testCompactRegionServer() throws Exception { public void testCompactionSwitchStates() throws Exception { // Create a table with regions byte[] family = Bytes.toBytes("family"); - byte[][] families = {family, Bytes.add(family, Bytes.toBytes("2")), - Bytes.add(family, Bytes.toBytes("3"))}; + byte[][] families = + { family, Bytes.add(family, Bytes.toBytes("2")), Bytes.add(family, Bytes.toBytes("3")) }; createTableWithDefaultConf(tableName, null, families); loadData(tableName, families, 3000, 8); List regions = new ArrayList<>(); - TEST_UTIL - .getHBaseCluster() - .getLiveRegionServerThreads() + TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads() .forEach(rsThread -> regions.addAll(rsThread.getRegionServer().getRegions(tableName))); CompletableFuture> listCompletableFuture = admin.compactionSwitch(true, new ArrayList<>()); Map pairs = listCompletableFuture.get(); for (Map.Entry p : pairs.entrySet()) { - assertEquals("Default compaction state, expected=enabled actual=disabled", - true, p.getValue()); + assertEquals("Default compaction state, expected=enabled actual=disabled", true, + p.getValue()); } CompletableFuture> listCompletableFuture1 = admin.compactionSwitch(false, new ArrayList<>()); Map pairs1 = listCompletableFuture1.get(); for (Map.Entry p : pairs1.entrySet()) { - assertEquals("Last compaction state, expected=enabled actual=disabled", - true, p.getValue()); + assertEquals("Last compaction state, expected=enabled actual=disabled", true, p.getValue()); } CompletableFuture> listCompletableFuture2 = admin.compactionSwitch(true, new ArrayList<>()); Map pairs2 = listCompletableFuture2.get(); for (Map.Entry p : pairs2.entrySet()) { - assertEquals("Last compaction state, expected=disabled actual=enabled", - false, p.getValue()); + assertEquals("Last compaction state, expected=disabled actual=enabled", false, p.getValue()); } - ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0) - .getServerName(); + ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); List serverNameList = new ArrayList(); serverNameList.add(serverName.getServerName()); CompletableFuture> listCompletableFuture3 = @@ -346,16 +337,14 @@ public void testCompactionSwitchStates() throws Exception { Map pairs3 = listCompletableFuture3.get(); assertEquals(pairs3.entrySet().size(), 1); for (Map.Entry p : pairs3.entrySet()) { - assertEquals("Last compaction state, expected=enabled actual=disabled", - true, p.getValue()); + assertEquals("Last compaction state, expected=enabled actual=disabled", true, p.getValue()); } CompletableFuture> listCompletableFuture4 = admin.compactionSwitch(true, serverNameList); Map pairs4 = listCompletableFuture4.get(); assertEquals(pairs4.entrySet().size(), 1); for (Map.Entry p : pairs4.entrySet()) { - assertEquals("Last compaction state, expected=disabled actual=enabled", - false, p.getValue()); + assertEquals("Last compaction state, expected=disabled actual=enabled", false, p.getValue()); } } @@ -403,9 +392,7 @@ private void compactionTest(final TableName tableName, final int flushes, } List regions = new ArrayList<>(); - TEST_UTIL - .getHBaseCluster() - .getLiveRegionServerThreads() + TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads() .forEach(rsThread -> regions.addAll(rsThread.getRegionServer().getRegions(tableName))); assertEquals(1, regions.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java index c9d47dc65323..e0e6bdcbd99a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,8 +51,8 @@ /** * Class to test asynchronous region admin operations. - * @see TestAsyncRegionAdminApi This test and it used to be joined it was taking longer than our - * ten minute timeout so they were split. + * @see TestAsyncRegionAdminApi This test and it used to be joined it was taking longer than our ten + * minute timeout so they were split. */ @RunWith(Parameterized.class) @Category({ LargeTests.class, ClientTests.class }) @@ -79,7 +79,7 @@ public void testGetRegionLocation() throws Exception { @Test public void testSplitSwitch() throws Exception { createTableWithDefaultConf(tableName); - byte[][] families = {FAMILY}; + byte[][] families = { FAMILY }; final int rows = 10000; TestAsyncRegionAdminApi.loadData(tableName, families, rows); @@ -93,7 +93,7 @@ public void testSplitSwitch() throws Exception { try { admin.split(tableName, Bytes.toBytes(rows / 2)).join(); } catch (Exception e) { - //Expected + // Expected } int count = admin.getRegions(tableName).get().size(); assertTrue(originalCount == count); @@ -111,7 +111,7 @@ public void testSplitSwitch() throws Exception { // It was ignored in TestSplitOrMergeStatus, too public void testMergeSwitch() throws Exception { createTableWithDefaultConf(tableName); - byte[][] families = {FAMILY}; + byte[][] families = { FAMILY }; TestAsyncRegionAdminApi.loadData(tableName, families, 1000); AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); @@ -126,7 +126,7 @@ public void testMergeSwitch() throws Exception { Threads.sleep(100); } assertTrue("originalCount=" + originalCount + ", postSplitCount=" + postSplitCount, - originalCount != postSplitCount); + originalCount != postSplitCount); // Merge switch is off so merge should NOT succeed. assertTrue(admin.mergeSwitch(false).get()); @@ -156,12 +156,12 @@ private void initSplitMergeSwitch() throws Exception { @Test public void testMergeRegions() throws Exception { - byte[][] splitRows = new byte[][]{Bytes.toBytes("3"), Bytes.toBytes("6")}; + byte[][] splitRows = new byte[][] { Bytes.toBytes("3"), Bytes.toBytes("6") }; createTableWithDefaultConf(tableName, splitRows); AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); - List regionLocations = ClientMetaTableAccessor - .getTableHRegionLocations(metaTable, tableName).get(); + List regionLocations = + ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); RegionInfo regionA; RegionInfo regionB; RegionInfo regionC; @@ -174,8 +174,7 @@ public void testMergeRegions() throws Exception { regionC = regionLocations.get(2).getRegion(); admin.mergeRegions(regionA.getRegionName(), regionB.getRegionName(), false).get(); - regionLocations = ClientMetaTableAccessor - .getTableHRegionLocations(metaTable, tableName).get(); + regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); assertEquals(2, regionLocations.size()); for (HRegionLocation rl : regionLocations) { @@ -195,11 +194,9 @@ public void testMergeRegions() throws Exception { Thread.sleep(200); } // merge with encoded name - admin.mergeRegions(regionC.getRegionName(), mergedChildRegion.getRegionName(), - false).get(); + admin.mergeRegions(regionC.getRegionName(), mergedChildRegion.getRegionName(), false).get(); - regionLocations = ClientMetaTableAccessor - .getTableHRegionLocations(metaTable, tableName).get(); + regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); assertEquals(1, regionLocations.size()); } @@ -219,7 +216,8 @@ public void testMergeRegionsInvalidRegionCount() throws Exception { // 1 try { admin.mergeRegions(regions.stream().limit(1).map(RegionInfo::getEncodedNameAsBytes) - .collect(Collectors.toList()), false).get(); + .collect(Collectors.toList()), + false).get(); fail(); } catch (ExecutionException e) { // expected @@ -233,18 +231,18 @@ public void testSplitTable() throws Exception { splitTest(TableName.valueOf("testSplitTable"), 3000, false, null); splitTest(TableName.valueOf("testSplitTableWithSplitPoint"), 3000, false, Bytes.toBytes("3")); splitTest(TableName.valueOf("testSplitTableRegion"), 3000, true, null); - splitTest(TableName.valueOf("testSplitTableRegionWithSplitPoint2"), 3000, true, Bytes.toBytes("3")); + splitTest(TableName.valueOf("testSplitTableRegionWithSplitPoint2"), 3000, true, + Bytes.toBytes("3")); } - private void - splitTest(TableName tableName, int rowCount, boolean isSplitRegion, byte[] splitPoint) - throws Exception { + private void splitTest(TableName tableName, int rowCount, boolean isSplitRegion, + byte[] splitPoint) throws Exception { // create table createTableWithDefaultConf(tableName); AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); - List regionLocations = ClientMetaTableAccessor - .getTableHRegionLocations(metaTable, tableName).get(); + List regionLocations = + ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); assertEquals(1, regionLocations.size()); AsyncTable table = ASYNC_CONN.getTable(tableName); @@ -273,8 +271,8 @@ public void testSplitTable() throws Exception { int count = 0; for (int i = 0; i < 45; i++) { try { - regionLocations = ClientMetaTableAccessor - .getTableHRegionLocations(metaTable, tableName).get(); + regionLocations = + ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); count = regionLocations.size(); if (count >= 2) { break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java index 0a0885e2b7c9..e3b16de67272 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,7 +62,7 @@ public class TestAsyncRegionLocator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncRegionLocator.class); + HBaseClassTestRule.forClass(TestAsyncRegionLocator.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -102,7 +102,7 @@ public static void setUp() throws Exception { ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, - registry.getClusterId().get(), null, User.getCurrent()); + registry.getClusterId().get(), null, User.getCurrent()); LOCATOR = CONN.getLocator(); } @@ -146,9 +146,10 @@ public void testNoCompletionException() { SLEEP_MS = 0; AtomicReference errorHolder = new AtomicReference<>(); try { - LOCATOR.getRegionLocation(TableName.valueOf("NotExist"), EMPTY_START_ROW, - RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)) - .whenComplete((r, e) -> errorHolder.set(e)).join(); + LOCATOR + .getRegionLocation(TableName.valueOf("NotExist"), EMPTY_START_ROW, + RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)) + .whenComplete((r, e) -> errorHolder.set(e)).join(); fail(); } catch (CompletionException e) { // join will return a CompletionException, which is OK diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java index f7a28f9c998c..b6f667ab2920 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,7 +69,7 @@ public class TestAsyncReplicationAdminApi extends TestAsyncAdminBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncReplicationAdminApi.class); + HBaseClassTestRule.forClass(TestAsyncReplicationAdminApi.class); private final String ID_ONE = "1"; private static String KEY_ONE; @@ -99,7 +99,7 @@ public void clearPeerAndQueues() throws IOException, ReplicationException { } catch (Exception e) { } ReplicationQueueStorage queueStorage = ReplicationStorageFactory - .getReplicationQueueStorage(TEST_UTIL.getZooKeeperWatcher(), TEST_UTIL.getConfiguration()); + .getReplicationQueueStorage(TEST_UTIL.getZooKeeperWatcher(), TEST_UTIL.getConfiguration()); for (ServerName serverName : queueStorage.getListOfReplicators()) { for (String queue : queueStorage.getAllQueues(serverName)) { queueStorage.removeQueue(serverName, queue); @@ -141,11 +141,8 @@ public void testAddRemovePeer() throws Exception { @Test public void testPeerConfig() throws Exception { - ReplicationPeerConfig config = ReplicationPeerConfig.newBuilder() - .setClusterKey(KEY_ONE) - .putConfiguration("key1", "value1") - .putConfiguration("key2", "value2") - .build(); + ReplicationPeerConfig config = ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE) + .putConfiguration("key1", "value1").putConfiguration("key2", "value2").build(); admin.addReplicationPeer(ID_ONE, config).join(); List peers = admin.listReplicationPeers().get(); @@ -176,7 +173,7 @@ public void testEnableDisablePeer() throws Exception { @Test public void testAppendPeerTableCFs() throws Exception { ReplicationPeerConfigBuilder rpcBuilder = - ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); final TableName tableName1 = TableName.valueOf(tableName.getNameAsString() + "t1"); final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "t2"); final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "t3"); @@ -195,7 +192,7 @@ public void testAppendPeerTableCFs() throws Exception { tableCFs.put(tableName1, null); admin.appendReplicationPeerTableCFs(ID_ONE, tableCFs).join(); Map> result = - admin.getReplicationPeerConfig(ID_ONE).get().getTableCFsMap(); + admin.getReplicationPeerConfig(ID_ONE).get().getTableCFsMap(); assertEquals(1, result.size()); assertEquals(true, result.containsKey(tableName1)); assertNull(result.get(tableName1)); @@ -280,7 +277,7 @@ public void testAppendPeerTableCFs() throws Exception { @Test public void testRemovePeerTableCFs() throws Exception { ReplicationPeerConfigBuilder rpcBuilder = - ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); final TableName tableName1 = TableName.valueOf(tableName.getNameAsString() + "t1"); final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "t2"); final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "t3"); @@ -309,13 +306,13 @@ public void testRemovePeerTableCFs() throws Exception { tableCFs.clear(); tableCFs.put(tableName3, null); admin.removeReplicationPeerTableCFs(ID_ONE, tableCFs).join(); - fail("Test case should fail as removing table-cfs from a peer whose" + - " table-cfs didn't contain t3"); + fail("Test case should fail as removing table-cfs from a peer whose" + + " table-cfs didn't contain t3"); } catch (CompletionException e) { assertTrue(e.getCause() instanceof ReplicationException); } Map> result = - admin.getReplicationPeerConfig(ID_ONE).get().getTableCFsMap(); + admin.getReplicationPeerConfig(ID_ONE).get().getTableCFsMap(); assertEquals(2, result.size()); assertTrue("Should contain t1", result.containsKey(tableName1)); assertTrue("Should contain t2", result.containsKey(tableName2)); @@ -369,7 +366,7 @@ public void testSetPeerNamespaces() throws Exception { String ns2 = "ns2"; ReplicationPeerConfigBuilder rpcBuilder = - ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join(); rpcBuilder.setReplicateAllUserTables(false); admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); @@ -405,7 +402,7 @@ public void testNamespacesAndTableCfsConfigConflict() throws Exception { final TableName tableName2 = TableName.valueOf(ns2 + ":" + tableName.getNameAsString() + "2"); ReplicationPeerConfigBuilder rpcBuilder = - ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join(); rpcBuilder.setReplicateAllUserTables(false); admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join(); @@ -446,9 +443,10 @@ public void testNamespacesAndTableCfsConfigConflict() throws Exception { @Test public void testPeerBandwidth() throws Exception { ReplicationPeerConfigBuilder rpcBuilder = - ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE); - admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join();; + admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join(); + ; assertEquals(0, admin.getReplicationPeerConfig(ID_ONE).get().getBandwidth()); rpcBuilder.setBandwidth(2097152); @@ -480,8 +478,10 @@ public void testClusterKeyWithTrailingSpace() throws Exception { @Test public void testInvalidReplicationEndpoint() throws InterruptedException { try { - admin.addReplicationPeer(ID_ONE, - ReplicationPeerConfig.newBuilder().setReplicationEndpointImpl("whatever").build()).get(); + admin + .addReplicationPeer(ID_ONE, + ReplicationPeerConfig.newBuilder().setReplicationEndpointImpl("whatever").build()) + .get(); fail(); } catch (ExecutionException e) { assertThat(e.getCause(), instanceOf(DoNotRetryIOException.class)); @@ -492,18 +492,16 @@ public void testInvalidReplicationEndpoint() throws InterruptedException { @Test public void testSetReplicationEndpoint() throws InterruptedException, ExecutionException { // make sure that we do not need to set cluster key when we use customized ReplicationEndpoint - admin - .addReplicationPeer(ID_ONE, - ReplicationPeerConfig.newBuilder() + admin.addReplicationPeer(ID_ONE, + ReplicationPeerConfig.newBuilder() .setReplicationEndpointImpl(VerifyWALEntriesReplicationEndpoint.class.getName()).build()) - .get(); + .get(); // but we still need to check cluster key if we specify the default ReplicationEndpoint try { - admin - .addReplicationPeer(ID_TWO, ReplicationPeerConfig.newBuilder() + admin.addReplicationPeer(ID_TWO, ReplicationPeerConfig.newBuilder() .setReplicationEndpointImpl(HBaseInterClusterReplicationEndpoint.class.getName()).build()) - .get(); + .get(); fail(); } catch (ExecutionException e) { assertThat(e.getCause(), instanceOf(DoNotRetryIOException.class)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java index 6337bd0d0139..68880d450dbe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ * Class to test asynchronous replication admin operations when more than 1 cluster */ @RunWith(Parameterized.class) -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestAsyncReplicationAdminApiWithClusters extends TestAsyncAdminBase { @ClassRule @@ -83,12 +83,11 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL2 = new HBaseTestingUtil(conf2); TEST_UTIL2.startMiniCluster(); - connection = - ConnectionFactory.createAsyncConnection(TEST_UTIL2.getConfiguration()).get(); + connection = ConnectionFactory.createAsyncConnection(TEST_UTIL2.getConfiguration()).get(); admin2 = connection.getAdmin(); - ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(TEST_UTIL2.getClusterKey()).build(); + ReplicationPeerConfig rpc = + ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL2.getClusterKey()).build(); ASYNC_CONN.getAdmin().addReplicationPeer(ID_SECOND, rpc).join(); } @@ -158,8 +157,8 @@ public void testEnableReplicationWhenTableDescriptorIsNotSameInClusters() throws createTableWithDefaultConf(admin2, tableName); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName).get()); - builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("newFamily")) - .build()); + builder.setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("newFamily")).build()); admin2.disableTable(tableName).join(); admin2.modifyTable(builder.build()).join(); admin2.enableTable(tableName).join(); @@ -227,23 +226,22 @@ public void testEnableReplicationForExplicitSetTableCfs() throws Exception { // Only create table in source cluster createTableWithDefaultConf(tableName); createTableWithDefaultConf(tableName2); - assertFalse("Table should not exists in the peer cluster", - admin2.tableExists(tableName).get()); + assertFalse("Table should not exists in the peer cluster", admin2.tableExists(tableName).get()); assertFalse("Table should not exists in the peer cluster", admin2.tableExists(tableName2).get()); Map> tableCfs = new HashMap<>(); tableCfs.put(tableName, null); - ReplicationPeerConfigBuilder rpcBuilder = ReplicationPeerConfig - .newBuilder(admin.getReplicationPeerConfig(ID_SECOND).get()) - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs); + ReplicationPeerConfigBuilder rpcBuilder = + ReplicationPeerConfig.newBuilder(admin.getReplicationPeerConfig(ID_SECOND).get()) + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs); try { // Only add tableName to replication peer config admin.updateReplicationPeerConfig(ID_SECOND, rpcBuilder.build()).join(); admin.enableTableReplication(tableName2).join(); assertFalse("Table should not be created if user has set table cfs explicitly for the " - + "peer and this is not part of that collection", admin2.tableExists(tableName2).get()); + + "peer and this is not part of that collection", + admin2.tableExists(tableName2).get()); // Add tableName2 to replication peer config, too tableCfs.put(tableName2, null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java index 9da38ae4959f..80af0ef93ee9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestAsyncSingleRequestRpcRetryingCaller { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncSingleRequestRpcRetryingCaller.class); + HBaseClassTestRule.forClass(TestAsyncSingleRequestRpcRetryingCaller.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -75,7 +75,7 @@ public static void setUpBeforeClass() throws Exception { ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, - registry.getClusterId().get(), null, User.getCurrent()); + registry.getClusterId().get(), null, User.getCurrent()); } @AfterClass @@ -92,7 +92,7 @@ public void testRegionMove() throws InterruptedException, ExecutionException, IO TEST_UTIL.getAdmin().move(loc.getRegion().getEncodedNameAsBytes(), TEST_UTIL.getHBaseCluster().getRegionServer(1 - index).getServerName()); AsyncTable table = CONN.getTableBuilder(TABLE_NAME).setRetryPause(100, TimeUnit.MILLISECONDS) - .setMaxRetries(30).build(); + .setMaxRetries(30).build(); table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE)).get(); // move back @@ -111,8 +111,8 @@ private CompletableFuture failedFuture() { public void testMaxRetries() throws IOException, InterruptedException { try { CONN.callerFactory.single().table(TABLE_NAME).row(ROW).operationTimeout(1, TimeUnit.DAYS) - .maxAttempts(3).pause(10, TimeUnit.MILLISECONDS) - .action((controller, loc, stub) -> failedFuture()).call().get(); + .maxAttempts(3).pause(10, TimeUnit.MILLISECONDS) + .action((controller, loc, stub) -> failedFuture()).call().get(); fail(); } catch (ExecutionException e) { assertThat(e.getCause(), instanceOf(RetriesExhaustedException.class)); @@ -124,8 +124,8 @@ public void testOperationTimeout() throws IOException, InterruptedException { long startNs = System.nanoTime(); try { CONN.callerFactory.single().table(TABLE_NAME).row(ROW).operationTimeout(1, TimeUnit.SECONDS) - .pause(100, TimeUnit.MILLISECONDS).maxAttempts(Integer.MAX_VALUE) - .action((controller, loc, stub) -> failedFuture()).call().get(); + .pause(100, TimeUnit.MILLISECONDS).maxAttempts(Integer.MAX_VALUE) + .action((controller, loc, stub) -> failedFuture()).call().get(); fail(); } catch (ExecutionException e) { e.printStackTrace(); @@ -142,30 +142,30 @@ public void testLocateError() throws IOException, InterruptedException, Executio AtomicInteger count = new AtomicInteger(0); HRegionLocation loc = CONN.getRegionLocator(TABLE_NAME).getRegionLocation(ROW).get(); AsyncRegionLocator mockedLocator = - new AsyncRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER) { - @Override - CompletableFuture getRegionLocation(TableName tableName, byte[] row, - int replicaId, RegionLocateType locateType, long timeoutNs) { - if (tableName.equals(TABLE_NAME)) { - CompletableFuture future = new CompletableFuture<>(); - if (count.getAndIncrement() == 0) { - errorTriggered.set(true); - future.completeExceptionally(new RuntimeException("Inject error!")); + new AsyncRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER) { + @Override + CompletableFuture getRegionLocation(TableName tableName, byte[] row, + int replicaId, RegionLocateType locateType, long timeoutNs) { + if (tableName.equals(TABLE_NAME)) { + CompletableFuture future = new CompletableFuture<>(); + if (count.getAndIncrement() == 0) { + errorTriggered.set(true); + future.completeExceptionally(new RuntimeException("Inject error!")); + } else { + future.complete(loc); + } + return future; } else { - future.complete(loc); + return super.getRegionLocation(tableName, row, replicaId, locateType, timeoutNs); } - return future; - } else { - return super.getRegionLocation(tableName, row, replicaId, locateType, timeoutNs); } - } - @Override - void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) { - } - }; + @Override + void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) { + } + }; try (AsyncConnectionImpl mockedConn = new AsyncConnectionImpl(CONN.getConfiguration(), - CONN.registry, CONN.registry.getClusterId().get(), null, User.getCurrent()) { + CONN.registry, CONN.registry.getClusterId().get(), null, User.getCurrent()) { @Override AsyncRegionLocator getLocator() { @@ -173,7 +173,7 @@ AsyncRegionLocator getLocator() { } }) { AsyncTable table = mockedConn.getTableBuilder(TABLE_NAME) - .setRetryPause(100, TimeUnit.MILLISECONDS).setMaxRetries(5).build(); + .setRetryPause(100, TimeUnit.MILLISECONDS).setMaxRetries(5).build(); table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE)).get(); assertTrue(errorTriggered.get()); errorTriggered.set(false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java index db72dd2a233e..47d48c37135b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -180,12 +180,15 @@ public void testListSnapshots() throws Exception { assertEquals(3, admin.listSnapshots(Pattern.compile("snapshotName(\\d+)")).get().size()); assertEquals(2, admin.listSnapshots(Pattern.compile("snapshotName[1|3]")).get().size()); assertEquals(3, admin.listSnapshots(Pattern.compile("snapshot(.*)")).get().size()); - assertEquals(3, admin.listTableSnapshots(Pattern.compile("testListSnapshots"), - Pattern.compile("s(.*)")).get().size()); - assertEquals(0, admin.listTableSnapshots(Pattern.compile("fakeTableName"), - Pattern.compile("snap(.*)")).get().size()); - assertEquals(2, admin.listTableSnapshots(Pattern.compile("test(.*)"), - Pattern.compile("snap(.*)[1|3]")).get().size()); + assertEquals(3, + admin.listTableSnapshots(Pattern.compile("testListSnapshots"), Pattern.compile("s(.*)")).get() + .size()); + assertEquals(0, + admin.listTableSnapshots(Pattern.compile("fakeTableName"), Pattern.compile("snap(.*)")).get() + .size()); + assertEquals(2, + admin.listTableSnapshots(Pattern.compile("test(.*)"), Pattern.compile("snap(.*)[1|3]")).get() + .size()); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java index d37f3c369dad..d17cb8d16325 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -79,7 +79,7 @@ public class TestAsyncTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTable.class); + HBaseClassTestRule.forClass(TestAsyncTable.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -168,19 +168,19 @@ public void testSimpleMultiple() throws Exception { CountDownLatch putLatch = new CountDownLatch(count); IntStream.range(0, count).forEach( i -> table.put(new Put(concat(row, i)).addColumn(FAMILY, QUALIFIER, concat(VALUE, i))) - .thenAccept(x -> putLatch.countDown())); + .thenAccept(x -> putLatch.countDown())); putLatch.await(); BlockingQueue existsResp = new ArrayBlockingQueue<>(count); IntStream.range(0, count) - .forEach(i -> table.exists(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER)) - .thenAccept(x -> existsResp.add(x))); + .forEach(i -> table.exists(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER)) + .thenAccept(x -> existsResp.add(x))); for (int i = 0; i < count; i++) { assertTrue(existsResp.take()); } BlockingQueue> getResp = new ArrayBlockingQueue<>(count); IntStream.range(0, count) - .forEach(i -> table.get(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER)) - .thenAccept(x -> getResp.add(Pair.newPair(i, x)))); + .forEach(i -> table.get(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER)) + .thenAccept(x -> getResp.add(Pair.newPair(i, x)))); for (int i = 0; i < count; i++) { Pair pair = getResp.take(); assertArrayEquals(concat(VALUE, pair.getFirst()), @@ -191,14 +191,14 @@ public void testSimpleMultiple() throws Exception { i -> table.delete(new Delete(concat(row, i))).thenAccept(x -> deleteLatch.countDown())); deleteLatch.await(); IntStream.range(0, count) - .forEach(i -> table.exists(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER)) - .thenAccept(x -> existsResp.add(x))); + .forEach(i -> table.exists(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER)) + .thenAccept(x -> existsResp.add(x))); for (int i = 0; i < count; i++) { assertFalse(existsResp.take()); } IntStream.range(0, count) - .forEach(i -> table.get(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER)) - .thenAccept(x -> getResp.add(Pair.newPair(i, x)))); + .forEach(i -> table.get(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER)) + .thenAccept(x -> getResp.add(Pair.newPair(i, x)))); for (int i = 0; i < count; i++) { Pair pair = getResp.take(); assertTrue(pair.getSecond().isEmpty()); @@ -213,10 +213,10 @@ public void testIncrement() throws InterruptedException, ExecutionException { CountDownLatch latch = new CountDownLatch(count); AtomicLong sum = new AtomicLong(0L); IntStream.range(0, count) - .forEach(i -> table.incrementColumnValue(row, FAMILY, QUALIFIER, 1).thenAccept(x -> { - sum.addAndGet(x); - latch.countDown(); - })); + .forEach(i -> table.incrementColumnValue(row, FAMILY, QUALIFIER, 1).thenAccept(x -> { + sum.addAndGet(x); + latch.countDown(); + })); latch.await(); assertEquals(count, Bytes.toLong( table.get(new Get(row).addColumn(FAMILY, QUALIFIER)).get().getValue(FAMILY, QUALIFIER))); @@ -232,19 +232,19 @@ public void testAppend() throws InterruptedException, ExecutionException { char suffix = ':'; AtomicLong suffixCount = new AtomicLong(0L); IntStream.range(0, count) - .forEachOrdered(i -> table - .append(new Append(row).addColumn(FAMILY, QUALIFIER, Bytes.toBytes("" + i + suffix))) - .thenAccept(r -> { - suffixCount.addAndGet( - Bytes.toString(r.getValue(FAMILY, QUALIFIER)).chars().filter(x -> x == suffix).count()); - latch.countDown(); - })); + .forEachOrdered(i -> table + .append(new Append(row).addColumn(FAMILY, QUALIFIER, Bytes.toBytes("" + i + suffix))) + .thenAccept(r -> { + suffixCount.addAndGet(Bytes.toString(r.getValue(FAMILY, QUALIFIER)).chars() + .filter(x -> x == suffix).count()); + latch.countDown(); + })); latch.await(); assertEquals((1 + count) * count / 2, suffixCount.get()); String value = Bytes.toString( table.get(new Get(row).addColumn(FAMILY, QUALIFIER)).get().getValue(FAMILY, QUALIFIER)); int[] actual = Arrays.asList(value.split("" + suffix)).stream().mapToInt(Integer::parseInt) - .sorted().toArray(); + .sorted().toArray(); assertArrayEquals(IntStream.range(0, count).toArray(), actual); } @@ -289,14 +289,14 @@ public void testCheckAndPutForOldApi() throws InterruptedException, ExecutionExc int count = 10; CountDownLatch latch = new CountDownLatch(count); IntStream.range(0, count) - .forEach(i -> table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).ifNotExists() - .thenPut(new Put(row).addColumn(FAMILY, QUALIFIER, concat(VALUE, i))).thenAccept(x -> { - if (x) { - successCount.incrementAndGet(); - successIndex.set(i); - } - latch.countDown(); - })); + .forEach(i -> table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).ifNotExists() + .thenPut(new Put(row).addColumn(FAMILY, QUALIFIER, concat(VALUE, i))).thenAccept(x -> { + if (x) { + successCount.incrementAndGet(); + successIndex.set(i); + } + latch.countDown(); + })); latch.await(); assertEquals(1, successCount.get()); String actual = Bytes.toString(table.get(new Get(row)).get().getValue(FAMILY, QUALIFIER)); @@ -312,24 +312,24 @@ public void testCheckAndDeleteForOldApi() throws InterruptedException, Execution CountDownLatch putLatch = new CountDownLatch(count + 1); table.put(new Put(row).addColumn(FAMILY, QUALIFIER, VALUE)).thenRun(() -> putLatch.countDown()); IntStream.range(0, count) - .forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE)) - .thenRun(() -> putLatch.countDown())); + .forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE)) + .thenRun(() -> putLatch.countDown())); putLatch.await(); AtomicInteger successCount = new AtomicInteger(0); AtomicInteger successIndex = new AtomicInteger(-1); CountDownLatch deleteLatch = new CountDownLatch(count); IntStream.range(0, count) - .forEach(i -> table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE) - .thenDelete( - new Delete(row).addColumn(FAMILY, QUALIFIER).addColumn(FAMILY, concat(QUALIFIER, i))) - .thenAccept(x -> { - if (x) { - successCount.incrementAndGet(); - successIndex.set(i); - } - deleteLatch.countDown(); - })); + .forEach(i -> table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE) + .thenDelete( + new Delete(row).addColumn(FAMILY, QUALIFIER).addColumn(FAMILY, concat(QUALIFIER, i))) + .thenAccept(x -> { + if (x) { + successCount.incrementAndGet(); + successIndex.set(i); + } + deleteLatch.countDown(); + })); deleteLatch.await(); assertEquals(1, successCount.get()); Result result = table.get(new Get(row)).get(); @@ -351,8 +351,8 @@ public void testCheckAndMutateForOldApi() throws InterruptedException, Execution CountDownLatch putLatch = new CountDownLatch(count + 1); table.put(new Put(row).addColumn(FAMILY, QUALIFIER, VALUE)).thenRun(() -> putLatch.countDown()); IntStream.range(0, count) - .forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE)) - .thenRun(() -> putLatch.countDown())); + .forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE)) + .thenRun(() -> putLatch.countDown())); putLatch.await(); AtomicInteger successCount = new AtomicInteger(0); @@ -363,18 +363,18 @@ public void testCheckAndMutateForOldApi() throws InterruptedException, Execution try { mutation.add((Mutation) new Delete(row).addColumn(FAMILY, QUALIFIER)); mutation - .add((Mutation) new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), concat(VALUE, i))); + .add((Mutation) new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), concat(VALUE, i))); } catch (IOException e) { throw new UncheckedIOException(e); } table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenMutate(mutation) - .thenAccept(x -> { - if (x) { - successCount.incrementAndGet(); - successIndex.set(i); - } - mutateLatch.countDown(); - }); + .thenAccept(x -> { + if (x) { + successCount.incrementAndGet(); + successIndex.set(i); + } + mutateLatch.countDown(); + }); }); mutateLatch.await(); assertEquals(1, successCount.get()); @@ -397,35 +397,35 @@ public void testCheckAndMutateWithTimeRangeForOldApi() throws Exception { put.addColumn(FAMILY, QUALIFIER, ts, VALUE); boolean ok = - table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put).get(); + table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put).get(); assertTrue(ok); ok = table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts + 10000)) - .ifEquals(VALUE).thenPut(put).get(); + .ifEquals(VALUE).thenPut(put).get(); assertFalse(ok); ok = table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) - .ifEquals(VALUE).thenPut(put).get(); + .ifEquals(VALUE).thenPut(put).get(); assertTrue(ok); RowMutations rm = new RowMutations(row).add((Mutation) put); ok = table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts + 10000)) - .ifEquals(VALUE).thenMutate(rm).get(); + .ifEquals(VALUE).thenMutate(rm).get(); assertFalse(ok); ok = table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) - .ifEquals(VALUE).thenMutate(rm).get(); + .ifEquals(VALUE).thenMutate(rm).get(); assertTrue(ok); Delete delete = new Delete(row).addColumn(FAMILY, QUALIFIER); ok = table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts + 10000)) - .ifEquals(VALUE).thenDelete(delete).get(); + .ifEquals(VALUE).thenDelete(delete).get(); assertFalse(ok); ok = table.checkAndMutate(row, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) - .ifEquals(VALUE).thenDelete(delete).get(); + .ifEquals(VALUE).thenDelete(delete).get(); assertTrue(ok); } @@ -442,41 +442,45 @@ public void testCheckAndMutateWithSingleFilterForOldApi() throws Throwable { table.put(put).get(); // Put with success - boolean ok = table.checkAndMutate(row, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .get(); + boolean ok = table + .checkAndMutate(row, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))).get(); assertTrue(ok); Result result = table.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("D"))).get(); assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D")))); // Put with failure - ok = table.checkAndMutate(row, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("b"))) - .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))) - .get(); + ok = table + .checkAndMutate(row, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("b"))) + .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))).get(); assertFalse(ok); assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("E"))).get()); // Delete with success - ok = table.checkAndMutate(row, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .thenDelete(new Delete(row).addColumns(FAMILY, Bytes.toBytes("D"))) - .get(); + ok = table + .checkAndMutate(row, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .thenDelete(new Delete(row).addColumns(FAMILY, Bytes.toBytes("D"))).get(); assertTrue(ok); assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("D"))).get()); // Mutate with success - ok = table.checkAndMutate(row, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), - CompareOperator.EQUAL, Bytes.toBytes("b"))) - .thenMutate(new RowMutations(row) - .add((Mutation) new Put(row) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A")))) - .get(); + ok = table + .checkAndMutate(row, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b"))) + .thenMutate(new RowMutations(row) + .add((Mutation) new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) + .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A")))) + .get(); assertTrue(ok); result = table.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("D"))).get(); @@ -498,57 +502,57 @@ public void testCheckAndMutateWithMultipleFiltersForOldApi() throws Throwable { table.put(put).get(); // Put with success - boolean ok = table.checkAndMutate(row, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")) - )) - .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .get(); + boolean ok = table + .checkAndMutate(row, + new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))).get(); assertTrue(ok); Result result = table.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("D"))).get(); assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D")))); // Put with failure - ok = table.checkAndMutate(row, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("c")) - )) - .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))) - .get(); + ok = table + .checkAndMutate(row, + new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("c")))) + .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))).get(); assertFalse(ok); assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("E"))).get()); // Delete with success - ok = table.checkAndMutate(row, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")) - )) - .thenDelete(new Delete(row).addColumns(FAMILY, Bytes.toBytes("D"))) - .get(); + ok = table + .checkAndMutate(row, + new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .thenDelete(new Delete(row).addColumns(FAMILY, Bytes.toBytes("D"))).get(); assertTrue(ok); assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("D"))).get()); // Mutate with success - ok = table.checkAndMutate(row, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")) - )) - .thenMutate(new RowMutations(row) - .add((Mutation) new Put(row) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A")))) - .get(); + ok = table + .checkAndMutate(row, + new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .thenMutate(new RowMutations(row) + .add((Mutation) new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) + .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A")))) + .get(); assertTrue(ok); result = table.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("D"))).get(); @@ -566,26 +570,26 @@ public void testCheckAndMutateWithTimestampFilterForOldApi() throws Throwable { table.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a"))).get(); // Put with success - boolean ok = table.checkAndMutate(row, new FilterList( - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("A"))), - new TimestampsFilter(Collections.singletonList(100L)) - )) - .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))) - .get(); + boolean ok = + table + .checkAndMutate(row, + new FilterList(new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), + new QualifierFilter(CompareOperator.EQUAL, + new BinaryComparator(Bytes.toBytes("A"))), + new TimestampsFilter(Collections.singletonList(100L)))) + .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))).get(); assertTrue(ok); Result result = table.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("B"))).get(); assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); // Put with failure - ok = table.checkAndMutate(row, new FilterList( - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("A"))), - new TimestampsFilter(Collections.singletonList(101L)) - )) - .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))) - .get(); + ok = table + .checkAndMutate(row, + new FilterList(new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), + new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("A"))), + new TimestampsFilter(Collections.singletonList(101L)))) + .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))).get(); assertFalse(ok); assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("C"))).get()); @@ -597,26 +601,27 @@ public void testCheckAndMutateWithFilterAndTimeRangeForOldApi() throws Throwable AsyncTable table = getTable.get(); // Put with specifying the timestamp - table.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a"))) - .get(); + table.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a"))).get(); // Put with success - boolean ok = table.checkAndMutate(row, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .timeRange(TimeRange.between(0, 101)) - .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))) - .get(); + boolean ok = table + .checkAndMutate(row, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .timeRange(TimeRange.between(0, 101)) + .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))).get(); assertTrue(ok); Result result = table.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("B"))).get(); assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); // Put with failure - ok = table.checkAndMutate(row, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .timeRange(TimeRange.between(0, 100)) - .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))) - .get(); + ok = table + .checkAndMutate(row, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .timeRange(TimeRange.between(0, 100)) + .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))).get(); assertFalse(ok); assertFalse(table.exists(new Get(row).addColumn(FAMILY, Bytes.toBytes("C"))).get()); @@ -626,7 +631,7 @@ public void testCheckAndMutateWithFilterAndTimeRangeForOldApi() throws Throwable @Deprecated public void testCheckAndMutateWithoutConditionForOldApi() { getTable.get().checkAndMutate(row, FAMILY) - .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + .thenPut(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); } // Tests for new CheckAndMutate API @@ -641,17 +646,17 @@ public void testCheckAndPut() throws InterruptedException, ExecutionException { CountDownLatch latch = new CountDownLatch(count); IntStream.range(0, count) - .forEach(i -> table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, QUALIFIER) - .build(new Put(row).addColumn(FAMILY, QUALIFIER, concat(VALUE, i)))) - .thenAccept(x -> { - if (x.isSuccess()) { - successCount.incrementAndGet(); - successIndex.set(i); - } - assertNull(x.getResult()); - latch.countDown(); - })); + .forEach(i -> table + .checkAndMutate(CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, QUALIFIER) + .build(new Put(row).addColumn(FAMILY, QUALIFIER, concat(VALUE, i)))) + .thenAccept(x -> { + if (x.isSuccess()) { + successCount.incrementAndGet(); + successIndex.set(i); + } + assertNull(x.getResult()); + latch.countDown(); + })); latch.await(); assertEquals(1, successCount.get()); String actual = Bytes.toString(table.get(new Get(row)).get().getValue(FAMILY, QUALIFIER)); @@ -666,8 +671,8 @@ public void testCheckAndDelete() throws InterruptedException, ExecutionException CountDownLatch putLatch = new CountDownLatch(count + 1); table.put(new Put(row).addColumn(FAMILY, QUALIFIER, VALUE)).thenRun(() -> putLatch.countDown()); IntStream.range(0, count) - .forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE)) - .thenRun(() -> putLatch.countDown())); + .forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE)) + .thenRun(() -> putLatch.countDown())); putLatch.await(); AtomicInteger successCount = new AtomicInteger(0); @@ -675,18 +680,17 @@ public void testCheckAndDelete() throws InterruptedException, ExecutionException CountDownLatch deleteLatch = new CountDownLatch(count); IntStream.range(0, count) - .forEach(i -> table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, QUALIFIER, VALUE) - .build( - new Delete(row).addColumn(FAMILY, QUALIFIER).addColumn(FAMILY, concat(QUALIFIER, i)))) - .thenAccept(x -> { - if (x.isSuccess()) { - successCount.incrementAndGet(); - successIndex.set(i); - } - assertNull(x.getResult()); - deleteLatch.countDown(); - })); + .forEach(i -> table + .checkAndMutate(CheckAndMutate.newBuilder(row).ifEquals(FAMILY, QUALIFIER, VALUE).build( + new Delete(row).addColumn(FAMILY, QUALIFIER).addColumn(FAMILY, concat(QUALIFIER, i)))) + .thenAccept(x -> { + if (x.isSuccess()) { + successCount.incrementAndGet(); + successIndex.set(i); + } + assertNull(x.getResult()); + deleteLatch.countDown(); + })); deleteLatch.await(); assertEquals(1, successCount.get()); Result result = table.get(new Get(row)).get(); @@ -707,8 +711,8 @@ public void testCheckAndMutate() throws InterruptedException, ExecutionException CountDownLatch putLatch = new CountDownLatch(count + 1); table.put(new Put(row).addColumn(FAMILY, QUALIFIER, VALUE)).thenRun(() -> putLatch.countDown()); IntStream.range(0, count) - .forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE)) - .thenRun(() -> putLatch.countDown())); + .forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE)) + .thenRun(() -> putLatch.countDown())); putLatch.await(); AtomicInteger successCount = new AtomicInteger(0); @@ -719,22 +723,22 @@ public void testCheckAndMutate() throws InterruptedException, ExecutionException try { mutation.add((Mutation) new Delete(row).addColumn(FAMILY, QUALIFIER)); mutation - .add((Mutation) new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), concat(VALUE, i))); + .add((Mutation) new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), concat(VALUE, i))); } catch (IOException e) { throw new UncheckedIOException(e); } - table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, QUALIFIER, VALUE) - .build(mutation)) - .thenAccept(x -> { - if (x.isSuccess()) { - successCount.incrementAndGet(); - successIndex.set(i); - } - assertNull(x.getResult()); - mutateLatch.countDown(); - }); + table + .checkAndMutate( + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, QUALIFIER, VALUE).build(mutation)) + .thenAccept(x -> { + if (x.isSuccess()) { + successCount.incrementAndGet(); + successIndex.set(i); + } + assertNull(x.getResult()); + mutateLatch.countDown(); + }); }); mutateLatch.await(); assertEquals(1, successCount.get()); @@ -755,55 +759,43 @@ public void testCheckAndMutateWithTimeRange() throws Exception { Put put = new Put(row); put.addColumn(FAMILY, QUALIFIER, ts, VALUE); - CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, QUALIFIER) - .build(put)).get(); + CheckAndMutateResult result = table + .checkAndMutate(CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, QUALIFIER).build(put)) + .get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); - result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, QUALIFIER, VALUE) - .timeRange(TimeRange.at(ts + 10000)) - .build(put)).get(); + result = table.checkAndMutate(CheckAndMutate.newBuilder(row).ifEquals(FAMILY, QUALIFIER, VALUE) + .timeRange(TimeRange.at(ts + 10000)).build(put)).get(); assertFalse(result.isSuccess()); assertNull(result.getResult()); - result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, QUALIFIER, VALUE) - .timeRange(TimeRange.at(ts)) - .build(put)).get(); + result = table.checkAndMutate(CheckAndMutate.newBuilder(row).ifEquals(FAMILY, QUALIFIER, VALUE) + .timeRange(TimeRange.at(ts)).build(put)).get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); RowMutations rm = new RowMutations(row).add((Mutation) put); - result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, QUALIFIER, VALUE) - .timeRange(TimeRange.at(ts + 10000)) - .build(rm)).get(); + result = table.checkAndMutate(CheckAndMutate.newBuilder(row).ifEquals(FAMILY, QUALIFIER, VALUE) + .timeRange(TimeRange.at(ts + 10000)).build(rm)).get(); assertFalse(result.isSuccess()); assertNull(result.getResult()); - result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, QUALIFIER, VALUE) - .timeRange(TimeRange.at(ts)) - .build(rm)).get(); + result = table.checkAndMutate(CheckAndMutate.newBuilder(row).ifEquals(FAMILY, QUALIFIER, VALUE) + .timeRange(TimeRange.at(ts)).build(rm)).get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); Delete delete = new Delete(row).addColumn(FAMILY, QUALIFIER); - result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, QUALIFIER, VALUE) - .timeRange(TimeRange.at(ts + 10000)) - .build(delete)).get(); + result = table.checkAndMutate(CheckAndMutate.newBuilder(row).ifEquals(FAMILY, QUALIFIER, VALUE) + .timeRange(TimeRange.at(ts + 10000)).build(delete)).get(); assertFalse(result.isSuccess()); assertNull(result.getResult()); - result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, QUALIFIER, VALUE) - .timeRange(TimeRange.at(ts)) - .build(delete)).get(); + result = table.checkAndMutate(CheckAndMutate.newBuilder(row).ifEquals(FAMILY, QUALIFIER, VALUE) + .timeRange(TimeRange.at(ts)).build(delete)).get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); } @@ -820,10 +812,13 @@ public void testCheckAndMutateWithSingleFilter() throws Throwable { table.put(put).get(); // Put with success - CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))).get(); + CheckAndMutateResult result = + table + .checkAndMutate(CheckAndMutate.newBuilder(row) + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), + CompareOperator.EQUAL, Bytes.toBytes("a"))) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))) + .get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -831,10 +826,13 @@ public void testCheckAndMutateWithSingleFilter() throws Throwable { assertEquals("d", Bytes.toString(r.getValue(FAMILY, Bytes.toBytes("D")))); // Put with failure - result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("b"))) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")))).get(); + result = + table + .checkAndMutate(CheckAndMutate.newBuilder(row) + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), + CompareOperator.EQUAL, Bytes.toBytes("b"))) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")))) + .get(); assertFalse(result.isSuccess()); assertNull(result.getResult()); @@ -842,9 +840,9 @@ public void testCheckAndMutateWithSingleFilter() throws Throwable { // Delete with success result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .build(new Delete(row).addColumns(FAMILY, Bytes.toBytes("D")))).get(); + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .build(new Delete(row).addColumns(FAMILY, Bytes.toBytes("D")))).get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -852,12 +850,12 @@ public void testCheckAndMutateWithSingleFilter() throws Throwable { // Mutate with success result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), - CompareOperator.EQUAL, Bytes.toBytes("b"))) - .build(new RowMutations(row) - .add((Mutation) new Put(row) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A"))))).get(); + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b"))) + .build(new RowMutations(row) + .add((Mutation) new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) + .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A"))))) + .get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -879,13 +877,16 @@ public void testCheckAndMutateWithMultipleFilters() throws Throwable { table.put(put).get(); // Put with success - CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))).get(); + CheckAndMutateResult result = + table + .checkAndMutate(CheckAndMutate.newBuilder(row) + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))) + .get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -893,13 +894,16 @@ public void testCheckAndMutateWithMultipleFilters() throws Throwable { assertEquals("d", Bytes.toString(r.getValue(FAMILY, Bytes.toBytes("D")))); // Put with failure - result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("c")))) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")))).get(); + result = + table + .checkAndMutate(CheckAndMutate.newBuilder(row) + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("c")))) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")))) + .get(); assertFalse(result.isSuccess()); assertNull(result.getResult()); @@ -907,12 +911,12 @@ public void testCheckAndMutateWithMultipleFilters() throws Throwable { // Delete with success result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Delete(row).addColumns(FAMILY, Bytes.toBytes("D")))).get(); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Delete(row).addColumns(FAMILY, Bytes.toBytes("D")))).get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -920,15 +924,15 @@ public void testCheckAndMutateWithMultipleFilters() throws Throwable { // Mutate with success result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new RowMutations(row) - .add((Mutation) new Put(row) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A"))))).get(); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new RowMutations(row) + .add((Mutation) new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) + .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A"))))) + .get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -946,12 +950,16 @@ public void testCheckAndMutateWithTimestampFilter() throws Throwable { table.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a"))).get(); // Put with success - CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("A"))), - new TimestampsFilter(Collections.singletonList(100L)))) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))).get(); + CheckAndMutateResult result = + table + .checkAndMutate(CheckAndMutate.newBuilder(row) + .ifMatches(new FilterList( + new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), + new QualifierFilter(CompareOperator.EQUAL, + new BinaryComparator(Bytes.toBytes("A"))), + new TimestampsFilter(Collections.singletonList(100L)))) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))) + .get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -959,12 +967,16 @@ public void testCheckAndMutateWithTimestampFilter() throws Throwable { assertEquals("b", Bytes.toString(r.getValue(FAMILY, Bytes.toBytes("B")))); // Put with failure - result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("A"))), - new TimestampsFilter(Collections.singletonList(101L)))) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")))).get(); + result = + table + .checkAndMutate(CheckAndMutate.newBuilder(row) + .ifMatches(new FilterList( + new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), + new QualifierFilter(CompareOperator.EQUAL, + new BinaryComparator(Bytes.toBytes("A"))), + new TimestampsFilter(Collections.singletonList(101L)))) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")))) + .get(); assertFalse(result.isSuccess()); assertNull(result.getResult()); @@ -976,15 +988,17 @@ public void testCheckAndMutateWithFilterAndTimeRange() throws Throwable { AsyncTable table = getTable.get(); // Put with specifying the timestamp - table.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a"))) - .get(); + table.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a"))).get(); // Put with success - CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .timeRange(TimeRange.between(0, 101)) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))).get(); + CheckAndMutateResult result = + table + .checkAndMutate(CheckAndMutate.newBuilder(row) + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), + CompareOperator.EQUAL, Bytes.toBytes("a"))) + .timeRange(TimeRange.between(0, 101)) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))) + .get(); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -992,12 +1006,14 @@ public void testCheckAndMutateWithFilterAndTimeRange() throws Throwable { assertEquals("b", Bytes.toString(r.getValue(FAMILY, Bytes.toBytes("B")))); // Put with failure - result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .timeRange(TimeRange.between(0, 100)) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")))) - .get(); + result = + table + .checkAndMutate(CheckAndMutate.newBuilder(row) + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), + CompareOperator.EQUAL, Bytes.toBytes("a"))) + .timeRange(TimeRange.between(0, 100)) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")))) + .get(); assertFalse(result.isSuccess()); assertNull(result.getResult()); @@ -1011,9 +1027,10 @@ public void testCheckAndIncrement() throws Throwable { table.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a"))).get(); // CheckAndIncrement with correct value - CheckAndMutateResult res = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 1))).get(); + CheckAndMutateResult res = table.checkAndMutate( + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 1))) + .get(); assertTrue(res.isSuccess()); assertEquals(1, Bytes.toLong(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -1021,9 +1038,10 @@ public void testCheckAndIncrement() throws Throwable { assertEquals(1, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("B")))); // CheckAndIncrement with wrong value - res = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) - .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 1))).get(); + res = table.checkAndMutate( + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) + .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 1))) + .get(); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -1034,12 +1052,12 @@ public void testCheckAndIncrement() throws Throwable { // CheckAndIncrement with a filter and correct value res = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("c")))) - .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 2))).get(); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("c")))) + .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 2))).get(); assertTrue(res.isSuccess()); assertEquals(3, Bytes.toLong(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -1048,12 +1066,12 @@ public void testCheckAndIncrement() throws Throwable { // CheckAndIncrement with a filter and correct value res = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("b")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("d")))) - .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 2))).get(); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("b")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("d")))) + .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 2))).get(); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -1068,9 +1086,11 @@ public void testCheckAndAppend() throws Throwable { table.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a"))).get(); // CheckAndAppend with correct value - CheckAndMutateResult res = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))).get(); + CheckAndMutateResult res = table + .checkAndMutate( + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))) + .get(); assertTrue(res.isSuccess()); assertEquals("b", Bytes.toString(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -1078,9 +1098,11 @@ public void testCheckAndAppend() throws Throwable { assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); // CheckAndAppend with correct value - res = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) - .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))).get(); + res = table + .checkAndMutate( + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) + .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))) + .get(); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -1090,13 +1112,15 @@ public void testCheckAndAppend() throws Throwable { table.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))); // CheckAndAppend with a filter and correct value - res = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("c")))) - .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))).get(); + res = table + .checkAndMutate(CheckAndMutate.newBuilder(row) + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("c")))) + .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))) + .get(); assertTrue(res.isSuccess()); assertEquals("bbb", Bytes.toString(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -1104,13 +1128,15 @@ public void testCheckAndAppend() throws Throwable { assertEquals("bbb", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); // CheckAndAppend with a filter and wrong value - res = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("b")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("d")))) - .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))).get(); + res = table + .checkAndMutate(CheckAndMutate.newBuilder(row) + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("b")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("d")))) + .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))) + .get(); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -1129,20 +1155,15 @@ public void testCheckAndRowMutations() throws Throwable { AsyncTable table = getTable.get(); // Initial values - table.putAll(Arrays.asList( - new Put(row).addColumn(FAMILY, q2, Bytes.toBytes("toBeDeleted")), + table.putAll(Arrays.asList(new Put(row).addColumn(FAMILY, q2, Bytes.toBytes("toBeDeleted")), new Put(row).addColumn(FAMILY, q3, Bytes.toBytes(5L)), new Put(row).addColumn(FAMILY, q4, Bytes.toBytes("a")))).get(); // Do CheckAndRowMutations - CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, q1) - .build(new RowMutations(row).add(Arrays.asList( - new Put(row).addColumn(FAMILY, q1, Bytes.toBytes(v1)), - new Delete(row).addColumns(FAMILY, q2), - new Increment(row).addColumn(FAMILY, q3, 1), - new Append(row).addColumn(FAMILY, q4, Bytes.toBytes("b")))) - ); + CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, q1).build( + new RowMutations(row).add(Arrays.asList(new Put(row).addColumn(FAMILY, q1, Bytes.toBytes(v1)), + new Delete(row).addColumns(FAMILY, q2), new Increment(row).addColumn(FAMILY, q3, 1), + new Append(row).addColumn(FAMILY, q4, Bytes.toBytes("b"))))); CheckAndMutateResult result = table.checkAndMutate(checkAndMutate).get(); assertTrue(result.isSuccess()); @@ -1157,14 +1178,11 @@ public void testCheckAndRowMutations() throws Throwable { assertEquals("ab", Bytes.toString(r.getValue(FAMILY, q4))); // Do CheckAndRowMutations again - checkAndMutate = CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, q1) - .build(new RowMutations(row).add(Arrays.asList( - new Delete(row).addColumns(FAMILY, q1), - new Put(row).addColumn(FAMILY, q2, Bytes.toBytes(v1)), - new Increment(row).addColumn(FAMILY, q3, 1), - new Append(row).addColumn(FAMILY, q4, Bytes.toBytes("b")))) - ); + checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, q1) + .build(new RowMutations(row).add(Arrays.asList(new Delete(row).addColumns(FAMILY, q1), + new Put(row).addColumn(FAMILY, q2, Bytes.toBytes(v1)), + new Increment(row).addColumn(FAMILY, q3, 1), + new Append(row).addColumn(FAMILY, q4, Bytes.toBytes("b"))))); result = table.checkAndMutate(checkAndMutate).get(); assertFalse(result.isSuccess()); @@ -1187,23 +1205,23 @@ public void testCheckAndMutateBatch() throws Throwable { byte[] row3 = Bytes.toBytes(Bytes.toString(row) + "3"); byte[] row4 = Bytes.toBytes(Bytes.toString(row) + "4"); - table.putAll(Arrays.asList( - new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), - new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), - new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), - new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))).get(); + table.putAll( + Arrays.asList(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), + new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), + new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), + new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))).get(); // Test for Put - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e"))); + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e"))); - CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("a")) - .build(new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); + CheckAndMutate checkAndMutate2 = + CheckAndMutate.newBuilder(row2).ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("a")) + .build(new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); List results = - table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); + table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); assertTrue(results.get(0).isSuccess()); assertNull(results.get(0).getResult()); @@ -1218,12 +1236,10 @@ public void testCheckAndMutateBatch() throws Throwable { // Test for Delete checkAndMutate1 = CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e")) - .build(new Delete(row)); + .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e")).build(new Delete(row)); checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("a")) - .build(new Delete(row2)); + .ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("a")).build(new Delete(row2)); results = table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); @@ -1239,18 +1255,16 @@ public void testCheckAndMutateBatch() throws Throwable { // Test for RowMutations checkAndMutate1 = CheckAndMutate.newBuilder(row3) - .ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) - .build(new RowMutations(row3) - .add((Mutation) new Put(row3) - .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) - .add((Mutation) new Delete(row3).addColumns(FAMILY, Bytes.toBytes("C")))); + .ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) + .build(new RowMutations(row3) + .add((Mutation) new Put(row3).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) + .add((Mutation) new Delete(row3).addColumns(FAMILY, Bytes.toBytes("C")))); checkAndMutate2 = CheckAndMutate.newBuilder(row4) - .ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("f")) - .build(new RowMutations(row4) - .add((Mutation) new Put(row4) - .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) - .add((Mutation) new Delete(row4).addColumns(FAMILY, Bytes.toBytes("D")))); + .ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("f")) + .build(new RowMutations(row4) + .add((Mutation) new Put(row4).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) + .add((Mutation) new Delete(row4).addColumns(FAMILY, Bytes.toBytes("D")))); results = table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); @@ -1275,23 +1289,23 @@ public void testCheckAndMutateBatch2() throws Throwable { byte[] row3 = Bytes.toBytes(Bytes.toString(row) + "3"); byte[] row4 = Bytes.toBytes(Bytes.toString(row) + "4"); - table.putAll(Arrays.asList( - new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), - new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), - new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), 100, Bytes.toBytes("c")), - new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), 100, Bytes.toBytes("d")))).get(); + table.putAll( + Arrays.asList(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), + new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), + new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), 100, Bytes.toBytes("c")), + new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), 100, Bytes.toBytes("d")))).get(); // Test for ifNotExists() - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, Bytes.toBytes("B")) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e"))); + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, Bytes.toBytes("B")) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e"))); - CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifNotExists(FAMILY, Bytes.toBytes("B")) - .build(new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); + CheckAndMutate checkAndMutate2 = + CheckAndMutate.newBuilder(row2).ifNotExists(FAMILY, Bytes.toBytes("B")) + .build(new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); List results = - table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); + table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); assertTrue(results.get(0).isSuccess()); assertNull(results.get(0).getResult()); @@ -1306,12 +1320,12 @@ public void testCheckAndMutateBatch2() throws Throwable { // Test for ifMatches() checkAndMutate1 = CheckAndMutate.newBuilder(row) - .ifMatches(FAMILY, Bytes.toBytes("A"), CompareOperator.NOT_EQUAL, Bytes.toBytes("a")) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a"))); + .ifMatches(FAMILY, Bytes.toBytes("A"), CompareOperator.NOT_EQUAL, Bytes.toBytes("a")) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a"))); checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifMatches(FAMILY, Bytes.toBytes("B"), CompareOperator.GREATER, Bytes.toBytes("b")) - .build(new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); + .ifMatches(FAMILY, Bytes.toBytes("B"), CompareOperator.GREATER, Bytes.toBytes("b")) + .build(new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); results = table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); @@ -1327,15 +1341,15 @@ public void testCheckAndMutateBatch2() throws Throwable { assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); // Test for timeRange() - checkAndMutate1 = CheckAndMutate.newBuilder(row3) - .ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) - .timeRange(TimeRange.between(0, 101)) - .build(new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("e"))); + checkAndMutate1 = + CheckAndMutate.newBuilder(row3).ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) + .timeRange(TimeRange.between(0, 101)) + .build(new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("e"))); - checkAndMutate2 = CheckAndMutate.newBuilder(row4) - .ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")) - .timeRange(TimeRange.between(0, 100)) - .build(new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("f"))); + checkAndMutate2 = + CheckAndMutate.newBuilder(row4).ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")) + .timeRange(TimeRange.between(0, 100)) + .build(new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("f"))); results = table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); @@ -1357,34 +1371,33 @@ public void testCheckAndMutateBatchWithFilter() throws Throwable { byte[] row2 = Bytes.toBytes(Bytes.toString(row) + "2"); table.putAll(Arrays.asList( - new Put(row) - .addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")) - .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), - new Put(row2) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")) - .addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")) - .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f")))).get(); + new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")) + .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), + new Put(row2).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")) + .addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")) + .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f")))) + .get(); // Test for Put CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("g"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("g"))); CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Put(row2).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("h"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Put(row2).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("h"))); List results = - table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); + table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); assertTrue(results.get(0).isSuccess()); assertNull(results.get(0).getResult()); @@ -1399,20 +1412,20 @@ public void testCheckAndMutateBatchWithFilter() throws Throwable { // Test for Delete checkAndMutate1 = CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Delete(row).addColumns(FAMILY, Bytes.toBytes("C"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Delete(row).addColumns(FAMILY, Bytes.toBytes("C"))); checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Delete(row2).addColumn(FAMILY, Bytes.toBytes("F"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Delete(row2).addColumn(FAMILY, Bytes.toBytes("F"))); results = table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); @@ -1427,27 +1440,28 @@ public void testCheckAndMutateBatchWithFilter() throws Throwable { assertEquals("f", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("F")))); // Test for RowMutations - checkAndMutate1 = CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new RowMutations(row) - .add((Mutation) new Put(row) - .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))) - .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A")))); - - checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new RowMutations(row2) - .add((Mutation) new Put(row2) - .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("g"))) - .add((Mutation) new Delete(row2).addColumns(FAMILY, Bytes.toBytes("D")))); + checkAndMutate1 = + CheckAndMutate.newBuilder(row) + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new RowMutations(row) + .add( + (Mutation) new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))) + .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A")))); + + checkAndMutate2 = + CheckAndMutate.newBuilder(row2) + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new RowMutations(row2).add( + (Mutation) new Put(row2).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("g"))) + .add((Mutation) new Delete(row2).addColumns(FAMILY, Bytes.toBytes("D")))); results = table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); @@ -1472,32 +1486,33 @@ public void testCheckAndMutateBatchWithFilterAndTimeRange() throws Throwable { table.putAll(Arrays.asList( new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a")) - .addColumn(FAMILY, Bytes.toBytes("B"), 100, Bytes.toBytes("b")) - .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), + .addColumn(FAMILY, Bytes.toBytes("B"), 100, Bytes.toBytes("b")) + .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), new Put(row2).addColumn(FAMILY, Bytes.toBytes("D"), 100, Bytes.toBytes("d")) - .addColumn(FAMILY, Bytes.toBytes("E"), 100, Bytes.toBytes("e")) - .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f")))).get(); + .addColumn(FAMILY, Bytes.toBytes("E"), 100, Bytes.toBytes("e")) + .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f")))) + .get(); CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .timeRange(TimeRange.between(0, 101)) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("g"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .timeRange(TimeRange.between(0, 101)) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("g"))); CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, - Bytes.toBytes("d")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, - Bytes.toBytes("e")))) - .timeRange(TimeRange.between(0, 100)) - .build(new Put(row2).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("h"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, + Bytes.toBytes("d")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, + Bytes.toBytes("e")))) + .timeRange(TimeRange.between(0, 100)) + .build(new Put(row2).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("h"))); List results = - table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); + table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); assertTrue(results.get(0).isSuccess()); assertNull(results.get(0).getResult()); @@ -1517,27 +1532,26 @@ public void testCheckAndIncrementBatch() throws Throwable { byte[] row2 = Bytes.toBytes(Bytes.toString(row) + "2"); table.putAll(Arrays.asList( - new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes(0L)), - new Put(row2).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes(0L)))).get(); + new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")).addColumn(FAMILY, + Bytes.toBytes("B"), Bytes.toBytes(0L)), + new Put(row2).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")).addColumn(FAMILY, + Bytes.toBytes("D"), Bytes.toBytes(0L)))).get(); // CheckAndIncrement with correct value - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 1)); + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 1)); // CheckAndIncrement with wrong value - CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("d")) - .build(new Increment(row2).addColumn(FAMILY, Bytes.toBytes("D"), 1)); + CheckAndMutate checkAndMutate2 = + CheckAndMutate.newBuilder(row2).ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("d")) + .build(new Increment(row2).addColumn(FAMILY, Bytes.toBytes("D"), 1)); List results = - table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); + table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); assertTrue(results.get(0).isSuccess()); - assertEquals(1, Bytes.toLong(results.get(0).getResult() - .getValue(FAMILY, Bytes.toBytes("B")))); + assertEquals(1, Bytes.toLong(results.get(0).getResult().getValue(FAMILY, Bytes.toBytes("B")))); assertFalse(results.get(1).isSuccess()); assertNull(results.get(1).getResult()); @@ -1554,27 +1568,27 @@ public void testCheckAndAppendBatch() throws Throwable { byte[] row2 = Bytes.toBytes(Bytes.toString(row) + "2"); table.putAll(Arrays.asList( - new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), - new Put(row2).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))).get(); + new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")).addColumn(FAMILY, + Bytes.toBytes("B"), Bytes.toBytes("b")), + new Put(row2).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")).addColumn(FAMILY, + Bytes.toBytes("D"), Bytes.toBytes("d")))).get(); // CheckAndAppend with correct value - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); // CheckAndAppend with wrong value - CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("d")) - .build(new Append(row2).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + CheckAndMutate checkAndMutate2 = + CheckAndMutate.newBuilder(row2).ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("d")) + .build(new Append(row2).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); List results = - table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); + table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); assertTrue(results.get(0).isSuccess()); - assertEquals("bb", Bytes.toString(results.get(0).getResult() - .getValue(FAMILY, Bytes.toBytes("B")))); + assertEquals("bb", + Bytes.toString(results.get(0).getResult().getValue(FAMILY, Bytes.toBytes("B")))); assertFalse(results.get(1).isSuccess()); assertNull(results.get(1).getResult()); @@ -1592,41 +1606,38 @@ public void testCheckAndRowMutationsBatch() throws Throwable { table.putAll(Arrays.asList( new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")) - .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes(1L)) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")), + .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes(1L)) + .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")), new Put(row2).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f")) - .addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes(1L)) - .addColumn(FAMILY, Bytes.toBytes("H"), Bytes.toBytes("h"))) - ).get(); + .addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes(1L)) + .addColumn(FAMILY, Bytes.toBytes("H"), Bytes.toBytes("h")))) + .get(); // CheckAndIncrement with correct value - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")) - .build(new RowMutations(row).add(Arrays.asList( - new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), - new Delete(row).addColumns(FAMILY, Bytes.toBytes("B")), - new Increment(row).addColumn(FAMILY, Bytes.toBytes("C"), 1L), - new Append(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")) - ))); + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")) + .build(new RowMutations(row).add( + Arrays.asList(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), + new Delete(row).addColumns(FAMILY, Bytes.toBytes("B")), + new Increment(row).addColumn(FAMILY, Bytes.toBytes("C"), 1L), + new Append(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))))); // CheckAndIncrement with wrong value - CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifEquals(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("a")) - .build(new RowMutations(row2).add(Arrays.asList( - new Put(row2).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")), - new Delete(row2).addColumns(FAMILY, Bytes.toBytes("F")), - new Increment(row2).addColumn(FAMILY, Bytes.toBytes("G"), 1L), - new Append(row2).addColumn(FAMILY, Bytes.toBytes("H"), Bytes.toBytes("h")) - ))); + CheckAndMutate checkAndMutate2 = + CheckAndMutate.newBuilder(row2).ifEquals(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("a")) + .build(new RowMutations(row2).add( + Arrays.asList(new Put(row2).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")), + new Delete(row2).addColumns(FAMILY, Bytes.toBytes("F")), + new Increment(row2).addColumn(FAMILY, Bytes.toBytes("G"), 1L), + new Append(row2).addColumn(FAMILY, Bytes.toBytes("H"), Bytes.toBytes("h"))))); List results = - table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); + table.checkAndMutateAll(Arrays.asList(checkAndMutate1, checkAndMutate2)).get(); assertTrue(results.get(0).isSuccess()); - assertEquals(2, Bytes.toLong(results.get(0).getResult() - .getValue(FAMILY, Bytes.toBytes("C")))); - assertEquals("dd", Bytes.toString(results.get(0).getResult() - .getValue(FAMILY, Bytes.toBytes("D")))); + assertEquals(2, Bytes.toLong(results.get(0).getResult().getValue(FAMILY, Bytes.toBytes("C")))); + assertEquals("dd", + Bytes.toString(results.get(0).getResult().getValue(FAMILY, Bytes.toBytes("D")))); assertFalse(results.get(1).isSuccess()); assertNull(results.get(1).getResult()); @@ -1667,8 +1678,8 @@ public void testInvalidPut() { } try { - getTable.get() - .put(new Put(Bytes.toBytes(0)).addColumn(FAMILY, QUALIFIER, new byte[MAX_KEY_VALUE_SIZE])); + getTable.get().put( + new Put(Bytes.toBytes(0)).addColumn(FAMILY, QUALIFIER, new byte[MAX_KEY_VALUE_SIZE])); fail("Should fail since the put exceeds the max key value size"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("KeyValue size too large")); @@ -1686,9 +1697,8 @@ public void testInvalidPutInRowMutations() throws IOException { } try { - getTable.get() - .mutateRow(new RowMutations(row).add(new Put(row) - .addColumn(FAMILY, QUALIFIER, new byte[MAX_KEY_VALUE_SIZE]))); + getTable.get().mutateRow(new RowMutations(row) + .add(new Put(row).addColumn(FAMILY, QUALIFIER, new byte[MAX_KEY_VALUE_SIZE]))); fail("Should fail since the put exceeds the max key value size"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("KeyValue size too large")); @@ -1699,19 +1709,17 @@ public void testInvalidPutInRowMutations() throws IOException { public void testInvalidPutInRowMutationsInCheckAndMutate() throws IOException { final byte[] row = Bytes.toBytes(0); try { - getTable.get().checkAndMutate(CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, QUALIFIER) - .build(new RowMutations(row).add(new Put(row)))); + getTable.get().checkAndMutate(CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, QUALIFIER) + .build(new RowMutations(row).add(new Put(row)))); fail("Should fail since the put does not contain any cells"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("No columns to insert")); } try { - getTable.get().checkAndMutate(CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, QUALIFIER) - .build(new RowMutations(row).add(new Put(row) - .addColumn(FAMILY, QUALIFIER, new byte[MAX_KEY_VALUE_SIZE])))); + getTable.get().checkAndMutate( + CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, QUALIFIER).build(new RowMutations(row) + .add(new Put(row).addColumn(FAMILY, QUALIFIER, new byte[MAX_KEY_VALUE_SIZE])))); fail("Should fail since the put exceeds the max key value size"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("KeyValue size too large")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java index 147630b53a38..6a3d28238a3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,8 +47,8 @@ /** * Class to test asynchronous table admin operations. - * @see TestAsyncTableAdminApi2 This test and it used to be joined it was taking longer than our - * ten minute timeout so they were split. + * @see TestAsyncTableAdminApi2 This test and it used to be joined it was taking longer than our ten + * minute timeout so they were split. * @see TestAsyncTableAdminApi3 Another split out from this class so each runs under ten minutes. */ @RunWith(Parameterized.class) @@ -83,8 +83,8 @@ public void testCreateTableNumberOfRegions() throws Exception { AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); createTableWithDefaultConf(tableName); - List regionLocations = ClientMetaTableAccessor - .getTableHRegionLocations(metaTable, tableName).get(); + List regionLocations = + ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); assertEquals("Table should have only 1 region", 1, regionLocations.size()); final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "_2"); @@ -120,8 +120,8 @@ public void testCreateTableNumberOfRegions() throws Exception { @Test public void testCreateTableWithRegions() throws Exception { byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, - new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, - new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 }, }; + new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, + new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 }, }; int expectedRegions = splitKeys.length + 1; createTableWithDefaultConf(tableName, splitKeys); @@ -129,8 +129,8 @@ public void testCreateTableWithRegions() throws Exception { assertTrue("Table should be created with splitKyes + 1 rows in META", tableAvailable); AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); - List regions = ClientMetaTableAccessor - .getTableHRegionLocations(metaTable, tableName).get(); + List regions = + ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); Iterator hris = regions.iterator(); assertEquals( @@ -233,8 +233,7 @@ public void testCreateTableWithRegions() throws Exception { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)); admin.createTable(builder.build(), startKey, endKey, expectedRegions).join(); - regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName3) - .get(); + regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName3).get(); assertEquals( "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size()); @@ -242,7 +241,7 @@ public void testCreateTableWithRegions() throws Exception { // Try an invalid case where there are duplicate split keys splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, - new byte[] { 3, 3, 3 }, new byte[] { 2, 2, 2 } }; + new byte[] { 3, 3, 3 }, new byte[] { 2, 2, 2 } }; final TableName tableName4 = TableName.valueOf(tableName.getNameAsString() + "_4"); try { createTableWithDefaultConf(tableName4, splitKeys); @@ -339,8 +338,8 @@ public void testCloneTableSchemaPreservingSplits() throws Exception { testCloneTableSchema(tableName, newTableName, true); } - private void testCloneTableSchema(final TableName tableName, - final TableName newTableName, boolean preserveSplits) throws Exception { + private void testCloneTableSchema(final TableName tableName, final TableName newTableName, + boolean preserveSplits) throws Exception { byte[][] splitKeys = new byte[2][]; splitKeys[0] = Bytes.toBytes(4); splitKeys[1] = Bytes.toBytes(8); @@ -351,20 +350,16 @@ private void testCloneTableSchema(final TableName tableName, boolean BLOCK_CACHE = false; // Create the table - TableDescriptor tableDesc = TableDescriptorBuilder - .newBuilder(tableName) + TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)) - .setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(FAMILY_1) - .setBlocksize(BLOCK_SIZE) - .setBlockCacheEnabled(BLOCK_CACHE) - .setTimeToLive(TTL) - .build()).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_1).setBlocksize(BLOCK_SIZE) + .setBlockCacheEnabled(BLOCK_CACHE).setTimeToLive(TTL).build()) + .build(); admin.createTable(tableDesc, splitKeys).join(); assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(tableName).size()); assertTrue("Table should be created with splitKyes + 1 rows in META", - admin.isTableAvailable(tableName).get()); + admin.isTableAvailable(tableName).get()); // Clone & Verify admin.cloneTableSchema(tableName, newTableName, preserveSplits).join(); @@ -374,16 +369,18 @@ private void testCloneTableSchema(final TableName tableName, assertEquals(BLOCK_SIZE, newTableDesc.getColumnFamily(FAMILY_1).getBlocksize()); assertEquals(BLOCK_CACHE, newTableDesc.getColumnFamily(FAMILY_1).isBlockCacheEnabled()); assertEquals(TTL, newTableDesc.getColumnFamily(FAMILY_1).getTimeToLive()); - //HBASE-26246 introduced persist of store file tracker into table descriptor - tableDesc = TableDescriptorBuilder.newBuilder(tableDesc).setValue(TRACKER_IMPL, - StoreFileTrackerFactory.getStoreFileTrackerName(TEST_UTIL.getConfiguration())). - build(); + // HBASE-26246 introduced persist of store file tracker into table descriptor + tableDesc = + TableDescriptorBuilder.newBuilder(tableDesc) + .setValue(TRACKER_IMPL, + StoreFileTrackerFactory.getStoreFileTrackerName(TEST_UTIL.getConfiguration())) + .build(); TEST_UTIL.verifyTableDescriptorIgnoreTableName(tableDesc, newTableDesc); if (preserveSplits) { assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size()); assertTrue("New table should be created with splitKyes + 1 rows in META", - admin.isTableAvailable(newTableName).get()); + admin.isTableAvailable(newTableName).get()); } else { assertEquals(1, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java index 5f9f8f5e9bbd..f71808d15c18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,8 +43,8 @@ /** * Class to test asynchronous table admin operations - * @see TestAsyncTableAdminApi This test and it used to be joined it was taking longer than our - * ten minute timeout so they were split. + * @see TestAsyncTableAdminApi This test and it used to be joined it was taking longer than our ten + * minute timeout so they were split. */ @RunWith(Parameterized.class) @Category({ LargeTests.class, ClientTests.class }) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java index 67377b07c180..c0107157a7d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,8 +48,8 @@ /** * Class to test asynchronous table admin operations. - * @see TestAsyncTableAdminApi2 This test and it used to be joined it was taking longer than our - * ten minute timeout so they were split. + * @see TestAsyncTableAdminApi2 This test and it used to be joined it was taking longer than our ten + * minute timeout so they were split. */ @RunWith(Parameterized.class) @Category({ LargeTests.class, ClientTests.class }) @@ -124,7 +124,7 @@ public void testListTables() throws Exception { assertEquals(tables.length + 1, size); for (int i = 0, j = 0; i < tables.length && j < size; i++, j++) { assertTrue("tableName should be equal in order", - tableDescs.get(j).getTableName().equals(tables[i])); + tableDescs.get(j).getTableName().equals(tables[i])); } assertTrue(tableDescs.get(size - 1).getTableName().equals(TableName.META_TABLE_NAME)); @@ -149,10 +149,12 @@ public void testGetTableDescriptor() throws Exception { TableDescriptor desc = builder.build(); admin.createTable(desc).join(); TableDescriptor confirmedHtd = admin.getDescriptor(tableName).get(); - //HBASE-26246 introduced persist of store file tracker into table descriptor - desc = TableDescriptorBuilder.newBuilder(desc).setValue(TRACKER_IMPL, - StoreFileTrackerFactory.getStoreFileTrackerName(TEST_UTIL.getConfiguration())). - build(); + // HBASE-26246 introduced persist of store file tracker into table descriptor + desc = + TableDescriptorBuilder.newBuilder(desc) + .setValue(TRACKER_IMPL, + StoreFileTrackerFactory.getStoreFileTrackerName(TEST_UTIL.getConfiguration())) + .build(); assertEquals(0, TableDescriptor.COMPARATOR.compare(desc, confirmedHtd)); } @@ -281,14 +283,14 @@ public void testDisableAndEnableTables() throws Exception { @Test public void testEnableTableRetainAssignment() throws Exception { byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, - new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, - new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } }; + new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, + new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } }; int expectedRegions = splitKeys.length + 1; createTableWithDefaultConf(tableName, splitKeys); AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); - List regions = ClientMetaTableAccessor - .getTableHRegionLocations(metaTable, tableName).get(); + List regions = + ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); assertEquals( "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size()); @@ -298,8 +300,8 @@ public void testEnableTableRetainAssignment() throws Exception { // Enable table, use retain assignment to assign regions. admin.enableTable(tableName).join(); - List regions2 = ClientMetaTableAccessor - .getTableHRegionLocations(metaTable, tableName).get(); + List regions2 = + ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); // Check the assignment. assertEquals(regions.size(), regions2.size()); assertTrue(regions2.containsAll(regions)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java index e6544b2d1cbc..4076fe233077 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -168,16 +168,16 @@ public void test() Admin admin = TEST_UTIL.getAdmin(); admin.flush(TABLE_NAME); List> splitFutures = - TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME).stream().map(r -> { - byte[] startKey = r.getRegionInfo().getStartKey(); - int number = startKey.length == 0 ? 55 : Integer.parseInt(Bytes.toString(startKey)); - byte[] splitPoint = Bytes.toBytes(String.format("%03d", number + 55)); - try { - return admin.splitRegionAsync(r.getRegionInfo().getRegionName(), splitPoint); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }).collect(Collectors.toList()); + TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME).stream().map(r -> { + byte[] startKey = r.getRegionInfo().getStartKey(); + int number = startKey.length == 0 ? 55 : Integer.parseInt(Bytes.toString(startKey)); + byte[] splitPoint = Bytes.toBytes(String.format("%03d", number + 55)); + try { + return admin.splitRegionAsync(r.getRegionInfo().getRegionName(), splitPoint); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }).collect(Collectors.toList()); for (Future future : splitFutures) { future.get(30, TimeUnit.SECONDS); } @@ -348,10 +348,9 @@ public void testInvalidPutInRowMutations() throws IOException { } try { - table.batch( - Arrays.asList(new RowMutations(row).add(new Put(row) - .addColumn(FAMILY, CQ, new byte[MAX_KEY_VALUE_SIZE])), - new Delete(row))); + table.batch(Arrays.asList( + new RowMutations(row).add(new Put(row).addColumn(FAMILY, CQ, new byte[MAX_KEY_VALUE_SIZE])), + new Delete(row))); fail("Should fail since the put exceeds the max key value size"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("KeyValue size too large")); @@ -365,20 +364,18 @@ public void testInvalidPutInRowMutationsInCheckAndMutate() throws IOException { AsyncTable table = tableGetter.apply(TABLE_NAME); try { table.batch(Arrays.asList(new Delete(row), CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, CQ) - .build(new RowMutations(row).add(new Put(row))))); + .ifNotExists(FAMILY, CQ).build(new RowMutations(row).add(new Put(row))))); fail("Should fail since the put does not contain any cells"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("No columns to insert")); } try { - table.batch( - Arrays.asList(CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, CQ) - .build(new RowMutations(row).add(new Put(row) - .addColumn(FAMILY, CQ, new byte[MAX_KEY_VALUE_SIZE]))), - new Delete(row))); + table.batch(Arrays.asList( + CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, CQ) + .build(new RowMutations(row) + .add(new Put(row).addColumn(FAMILY, CQ, new byte[MAX_KEY_VALUE_SIZE]))), + new Delete(row))); fail("Should fail since the put exceeds the max key value size"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("KeyValue size too large")); @@ -397,38 +394,38 @@ public void testWithCheckAndMutate() throws Exception { byte[] row6 = Bytes.toBytes("row6"); byte[] row7 = Bytes.toBytes("row7"); - table.putAll(Arrays.asList( - new Put(row1).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), - new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), - new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), - new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")), - new Put(row5).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")), - new Put(row6).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)), - new Put(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")))).get(); - - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(row1) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new RowMutations(row1) - .add(new Put(row1).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("g"))) - .add(new Delete(row1).addColumns(FAMILY, Bytes.toBytes("A"))) - .add(new Increment(row1).addColumn(FAMILY, Bytes.toBytes("C"), 3L)) - .add(new Append(row1).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); + table.putAll( + Arrays.asList(new Put(row1).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), + new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), + new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), + new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")), + new Put(row5).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")), + new Put(row6).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)), + new Put(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")))).get(); + + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(row1).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new RowMutations(row1) + .add(new Put(row1).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("g"))) + .add(new Delete(row1).addColumns(FAMILY, Bytes.toBytes("A"))) + .add(new Increment(row1).addColumn(FAMILY, Bytes.toBytes("C"), 3L)) + .add(new Append(row1).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); Get get = new Get(row2).addColumn(FAMILY, Bytes.toBytes("B")); - RowMutations mutations = new RowMutations(row3) - .add(new Delete(row3).addColumns(FAMILY, Bytes.toBytes("C"))) - .add(new Put(row3).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) - .add(new Increment(row3).addColumn(FAMILY, Bytes.toBytes("A"), 5L)) - .add(new Append(row3).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); - CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(row4) - .ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("a")) - .build(new Put(row4).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("h"))); + RowMutations mutations = + new RowMutations(row3).add(new Delete(row3).addColumns(FAMILY, Bytes.toBytes("C"))) + .add(new Put(row3).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) + .add(new Increment(row3).addColumn(FAMILY, Bytes.toBytes("A"), 5L)) + .add(new Append(row3).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); + CheckAndMutate checkAndMutate2 = + CheckAndMutate.newBuilder(row4).ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("a")) + .build(new Put(row4).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("h"))); Put put = new Put(row5).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("f")); - CheckAndMutate checkAndMutate3 = CheckAndMutate.newBuilder(row6) - .ifEquals(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)) - .build(new Increment(row6).addColumn(FAMILY, Bytes.toBytes("F"), 1)); - CheckAndMutate checkAndMutate4 = CheckAndMutate.newBuilder(row7) - .ifEquals(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")) - .build(new Append(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g"))); + CheckAndMutate checkAndMutate3 = + CheckAndMutate.newBuilder(row6).ifEquals(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)) + .build(new Increment(row6).addColumn(FAMILY, Bytes.toBytes("F"), 1)); + CheckAndMutate checkAndMutate4 = + CheckAndMutate.newBuilder(row7).ifEquals(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")) + .build(new Append(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g"))); List actions = Arrays.asList(checkAndMutate1, get, mutations, checkAndMutate2, put, checkAndMutate3, checkAndMutate4); @@ -457,13 +454,13 @@ public void testWithCheckAndMutate() throws Exception { checkAndMutateResult = (CheckAndMutateResult) results.get(5); assertTrue(checkAndMutateResult.isSuccess()); - assertEquals(11, Bytes.toLong(checkAndMutateResult.getResult() - .getValue(FAMILY, Bytes.toBytes("F")))); + assertEquals(11, + Bytes.toLong(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("F")))); checkAndMutateResult = (CheckAndMutateResult) results.get(6); assertTrue(checkAndMutateResult.isSuccess()); - assertEquals("gg", Bytes.toString(checkAndMutateResult.getResult() - .getValue(FAMILY, Bytes.toBytes("G")))); + assertEquals("gg", + Bytes.toString(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("G")))); result = table.get(new Get(row1)).get(); assertEquals("g", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatchRetryImmediately.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatchRetryImmediately.java index 9f22eba623a5..b5c91b26a1fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatchRetryImmediately.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatchRetryImmediately.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public class TestAsyncTableBatchRetryImmediately { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableBatchRetryImmediately.class); + HBaseClassTestRule.forClass(TestAsyncTableBatchRetryImmediately.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -93,7 +93,7 @@ public void test() { // if we do not deal with RetryImmediatelyException, we will timeout here since we need to retry // hundreds times. List gets = IntStream.range(0, COUNT).mapToObj(i -> new Get(Bytes.toBytes(i))) - .collect(Collectors.toList()); + .collect(Collectors.toList()); List results = table.getAll(gets).join(); for (int i = 0; i < COUNT; i++) { byte[] value = results.get(i).getValue(FAMILY, QUAL); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java index 71e8e3bcdbb9..71a12274de81 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,7 @@ public class TestAsyncTableGetMultiThreaded { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableGetMultiThreaded.class); + HBaseClassTestRule.forClass(TestAsyncTableGetMultiThreaded.class); private static final Logger LOG = LoggerFactory.getLogger(TestAsyncTableGetMultiThreaded.class); @@ -137,7 +137,7 @@ public void test() throws Exception { AtomicBoolean stop = new AtomicBoolean(false); ExecutorService executor = Executors.newFixedThreadPool(numThreads, new ThreadFactoryBuilder().setNameFormat("TestAsyncGet-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); List> futures = new ArrayList<>(); IntStream.range(0, numThreads).forEach(i -> futures.add(executor.submit(() -> { run(stop); @@ -183,8 +183,8 @@ public String explainFailure() throws Exception { LOG.warn("Failed to query"); } if (!retrier.shouldRetry()) { - throw new IOException("Can not finish compaction in time after attempt " + - retrier.getAttemptTimes() + " times"); + throw new IOException("Can not finish compaction in time after attempt " + + retrier.getAttemptTimes() + " times"); } retrier.sleepUntilNextRetry(); } @@ -209,7 +209,7 @@ public String explainFailure() throws Exception { Thread.sleep(5000); } List balancerDecisionRecords = - admin.getLogEntries(null, "BALANCER_DECISION", ServerType.MASTER, 2, null); + admin.getLogEntries(null, "BALANCER_DECISION", ServerType.MASTER, 2, null); Assert.assertEquals(balancerDecisionRecords.size(), 2); LOG.info("====== Read test finished, shutdown thread pool ======"); stop.set(true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java index 9325f4df62da..f74420167e7f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,8 +28,8 @@ @Ignore // Can't move hbase:meta off master server in AMv2. TODO. @Category({ LargeTests.class, ClientTests.class }) -public class TestAsyncTableGetMultiThreadedWithBasicCompaction extends - TestAsyncTableGetMultiThreaded { +public class TestAsyncTableGetMultiThreadedWithBasicCompaction + extends TestAsyncTableGetMultiThreaded { @ClassRule public static final HBaseClassTestRule CLASS_RULE = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java index 992808b73ef6..0334706074f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,8 +28,8 @@ @Ignore // Can't move hbase:meta off master server in AMv2. TODO. @Category({ LargeTests.class, ClientTests.class }) -public class TestAsyncTableGetMultiThreadedWithEagerCompaction extends - TestAsyncTableGetMultiThreaded { +public class TestAsyncTableGetMultiThreadedWithEagerCompaction + extends TestAsyncTableGetMultiThreaded { @ClassRule public static final HBaseClassTestRule CLASS_RULE = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java index 245d75554840..9473c66d5ddd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocatePrefetch.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestAsyncTableLocatePrefetch { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableLocatePrefetch.class); + HBaseClassTestRule.forClass(TestAsyncTableLocatePrefetch.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java index ad52e6356998..3f0bec26f756 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableLocateRegionForDeletedTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestAsyncTableLocateRegionForDeletedTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableLocateRegionForDeletedTable.class); + HBaseClassTestRule.forClass(TestAsyncTableLocateRegionForDeletedTable.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java index a384bf194d91..47ba964e5a9c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableNoncedRetry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,7 +56,7 @@ public class TestAsyncTableNoncedRetry { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableNoncedRetry.class); + HBaseClassTestRule.forClass(TestAsyncTableNoncedRetry.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -97,7 +97,7 @@ public Optional getRegionObserver() { @Override public void postBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) { + MiniBatchOperationInProgress miniBatchOp) { // We sleep when the last of the miniBatchOperation is executed if (CALLED.getAndIncrement() == miniBatchOperationCount - 1) { Threads.sleepWithoutInterrupt(SLEEP_TIME); @@ -109,9 +109,9 @@ public void postBatchMutate(ObserverContext c, public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); TEST_UTIL.getAdmin() - .createTable(TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(SleepOnceCP.class.getName()).build()); + .createTable(TableDescriptorBuilder.newBuilder(TABLE_NAME) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(SleepOnceCP.class.getName()).build()); TEST_UTIL.waitTableAvailable(TABLE_NAME); ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get(); } @@ -132,7 +132,7 @@ public void setUp() throws IOException, InterruptedException { public void testAppend() throws InterruptedException, ExecutionException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; Result result = table.append(new Append(row).addColumn(FAMILY, QUALIFIER, VALUE)).get(); @@ -143,15 +143,15 @@ public void testAppend() throws InterruptedException, ExecutionException { } @Test - public void testAppendWhenReturnResultsEqualsFalse() throws InterruptedException, - ExecutionException { + public void testAppendWhenReturnResultsEqualsFalse() + throws InterruptedException, ExecutionException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; - Result result = table.append(new Append(row).addColumn(FAMILY, QUALIFIER, VALUE) - .setReturnResults(false)).get(); + Result result = table + .append(new Append(row).addColumn(FAMILY, QUALIFIER, VALUE).setReturnResults(false)).get(); // make sure we called twice and the result is still correct assertEquals(2, CALLED.get()); @@ -162,7 +162,7 @@ public void testAppendWhenReturnResultsEqualsFalse() throws InterruptedException public void testIncrement() throws InterruptedException, ExecutionException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; long result = table.incrementColumnValue(row, FAMILY, QUALIFIER, 1L).get(); @@ -173,15 +173,16 @@ public void testIncrement() throws InterruptedException, ExecutionException { } @Test - public void testIncrementWhenReturnResultsEqualsFalse() throws InterruptedException, - ExecutionException { + public void testIncrementWhenReturnResultsEqualsFalse() + throws InterruptedException, ExecutionException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; - Result result = table.increment(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L) - .setReturnResults(false)).get(); + Result result = + table.increment(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L).setReturnResults(false)) + .get(); // make sure we called twice and the result is still correct assertEquals(2, CALLED.get()); @@ -190,15 +191,16 @@ public void testIncrementWhenReturnResultsEqualsFalse() throws InterruptedExcept @Test public void testIncrementInRowMutations() - throws InterruptedException, ExecutionException, IOException { + throws InterruptedException, ExecutionException, IOException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setWriteRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setWriteRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; - Result result = table.mutateRow(new RowMutations(row) - .add(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L)) - .add(new Delete(row).addColumn(FAMILY, QUALIFIER2))).get(); + Result result = table + .mutateRow(new RowMutations(row).add(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L)) + .add(new Delete(row).addColumn(FAMILY, QUALIFIER2))) + .get(); // make sure we called twice and the result is still correct assertEquals(2, CALLED.get()); @@ -207,15 +209,16 @@ public void testIncrementInRowMutations() @Test public void testAppendInRowMutations() - throws InterruptedException, ExecutionException, IOException { + throws InterruptedException, ExecutionException, IOException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setWriteRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setWriteRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; - Result result = table.mutateRow(new RowMutations(row) - .add(new Append(row).addColumn(FAMILY, QUALIFIER, VALUE)) - .add(new Delete(row).addColumn(FAMILY, QUALIFIER2))).get(); + Result result = table + .mutateRow(new RowMutations(row).add(new Append(row).addColumn(FAMILY, QUALIFIER, VALUE)) + .add(new Delete(row).addColumn(FAMILY, QUALIFIER2))) + .get(); // make sure we called twice and the result is still correct assertEquals(2, CALLED.get()); @@ -224,15 +227,16 @@ public void testAppendInRowMutations() @Test public void testIncrementAndAppendInRowMutations() - throws InterruptedException, ExecutionException, IOException { + throws InterruptedException, ExecutionException, IOException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setWriteRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setWriteRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; - Result result = table.mutateRow(new RowMutations(row) - .add(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L)) - .add(new Append(row).addColumn(FAMILY, QUALIFIER2, VALUE))).get(); + Result result = table + .mutateRow(new RowMutations(row).add(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L)) + .add(new Append(row).addColumn(FAMILY, QUALIFIER2, VALUE))) + .get(); // make sure we called twice and the result is still correct assertEquals(2, CALLED.get()); @@ -244,12 +248,12 @@ public void testIncrementAndAppendInRowMutations() public void testIncrementInCheckAndMutate() throws InterruptedException, ExecutionException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; - CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, QUALIFIER2) - .build(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L))).get(); + CheckAndMutateResult result = + table.checkAndMutate(CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, QUALIFIER2) + .build(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L))).get(); // make sure we called twice and the result is still correct assertEquals(2, CALLED.get()); @@ -261,12 +265,12 @@ public void testIncrementInCheckAndMutate() throws InterruptedException, Executi public void testAppendInCheckAndMutate() throws InterruptedException, ExecutionException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; - CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, QUALIFIER2) - .build(new Append(row).addColumn(FAMILY, QUALIFIER, VALUE))).get(); + CheckAndMutateResult result = + table.checkAndMutate(CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, QUALIFIER2) + .build(new Append(row).addColumn(FAMILY, QUALIFIER, VALUE))).get(); // make sure we called twice and the result is still correct assertEquals(2, CALLED.get()); @@ -275,17 +279,18 @@ public void testAppendInCheckAndMutate() throws InterruptedException, ExecutionE } @Test - public void testIncrementInRowMutationsInCheckAndMutate() throws InterruptedException, - ExecutionException, IOException { + public void testIncrementInRowMutationsInCheckAndMutate() + throws InterruptedException, ExecutionException, IOException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; - CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, QUALIFIER3) - .build(new RowMutations(row).add(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L)) - .add(new Delete(row).addColumn(FAMILY, QUALIFIER2)))).get(); + CheckAndMutateResult result = + table.checkAndMutate(CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, QUALIFIER3) + .build(new RowMutations(row).add(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L)) + .add(new Delete(row).addColumn(FAMILY, QUALIFIER2)))) + .get(); // make sure we called twice and the result is still correct assertEquals(2, CALLED.get()); @@ -294,17 +299,18 @@ public void testIncrementInRowMutationsInCheckAndMutate() throws InterruptedExce } @Test - public void testAppendInRowMutationsInCheckAndMutate() throws InterruptedException, - ExecutionException, IOException { + public void testAppendInRowMutationsInCheckAndMutate() + throws InterruptedException, ExecutionException, IOException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; - CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, QUALIFIER3) - .build(new RowMutations(row).add(new Append(row).addColumn(FAMILY, QUALIFIER, VALUE)) - .add(new Delete(row).addColumn(FAMILY, QUALIFIER2)))).get(); + CheckAndMutateResult result = + table.checkAndMutate(CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, QUALIFIER3) + .build(new RowMutations(row).add(new Append(row).addColumn(FAMILY, QUALIFIER, VALUE)) + .add(new Delete(row).addColumn(FAMILY, QUALIFIER2)))) + .get(); // make sure we called twice and the result is still correct assertEquals(2, CALLED.get()); @@ -313,17 +319,18 @@ public void testAppendInRowMutationsInCheckAndMutate() throws InterruptedExcepti } @Test - public void testIncrementAndAppendInRowMutationsInCheckAndMutate() throws InterruptedException, - ExecutionException, IOException { + public void testIncrementAndAppendInRowMutationsInCheckAndMutate() + throws InterruptedException, ExecutionException, IOException { assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 1; - CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifNotExists(FAMILY, QUALIFIER3) - .build(new RowMutations(row).add(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L)) - .add(new Append(row).addColumn(FAMILY, QUALIFIER2, VALUE)))).get(); + CheckAndMutateResult result = + table.checkAndMutate(CheckAndMutate.newBuilder(row).ifNotExists(FAMILY, QUALIFIER3) + .build(new RowMutations(row).add(new Increment(row).addColumn(FAMILY, QUALIFIER, 1L)) + .add(new Append(row).addColumn(FAMILY, QUALIFIER2, VALUE)))) + .get(); // make sure we called twice and the result is still correct assertEquals(2, CALLED.get()); @@ -333,8 +340,7 @@ public void testIncrementAndAppendInRowMutationsInCheckAndMutate() throws Interr } @Test - public void testBatch() throws InterruptedException, - ExecutionException, IOException { + public void testBatch() throws InterruptedException, ExecutionException, IOException { byte[] row2 = Bytes.toBytes(Bytes.toString(row) + "2"); byte[] row3 = Bytes.toBytes(Bytes.toString(row) + "3"); byte[] row4 = Bytes.toBytes(Bytes.toString(row) + "4"); @@ -344,25 +350,22 @@ public void testBatch() throws InterruptedException, assertEquals(0, CALLED.get()); AsyncTable table = ASYNC_CONN.getTableBuilder(TABLE_NAME) - .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); + .setRpcTimeout(RPC_TIMEOUT, TimeUnit.MILLISECONDS).build(); miniBatchOperationCount = 6; - List results = table.batchAll(Arrays.asList( - new Append(row).addColumn(FAMILY, QUALIFIER, VALUE), - new Increment(row2).addColumn(FAMILY, QUALIFIER, 1L), - new RowMutations(row3) - .add(new Increment(row3).addColumn(FAMILY, QUALIFIER, 1L)) - .add(new Append(row3).addColumn(FAMILY, QUALIFIER2, VALUE)), - CheckAndMutate.newBuilder(row4) - .ifNotExists(FAMILY, QUALIFIER2) - .build(new Increment(row4).addColumn(FAMILY, QUALIFIER, 1L)), - CheckAndMutate.newBuilder(row5) - .ifNotExists(FAMILY, QUALIFIER2) - .build(new Append(row5).addColumn(FAMILY, QUALIFIER, VALUE)), - CheckAndMutate.newBuilder(row6) - .ifNotExists(FAMILY, QUALIFIER3) - .build(new RowMutations(row6).add(new Increment(row6).addColumn(FAMILY, QUALIFIER, 1L)) - .add(new Append(row6).addColumn(FAMILY, QUALIFIER2, VALUE))))).get(); + List results = table.batchAll( + Arrays.asList(new Append(row).addColumn(FAMILY, QUALIFIER, VALUE), + new Increment(row2).addColumn(FAMILY, QUALIFIER, 1L), + new RowMutations(row3).add(new Increment(row3).addColumn(FAMILY, QUALIFIER, 1L)) + .add(new Append(row3).addColumn(FAMILY, QUALIFIER2, VALUE)), + CheckAndMutate.newBuilder(row4).ifNotExists(FAMILY, QUALIFIER2) + .build(new Increment(row4).addColumn(FAMILY, QUALIFIER, 1L)), + CheckAndMutate.newBuilder(row5).ifNotExists(FAMILY, QUALIFIER2) + .build(new Append(row5).addColumn(FAMILY, QUALIFIER, VALUE)), + CheckAndMutate.newBuilder(row6).ifNotExists(FAMILY, QUALIFIER3) + .build(new RowMutations(row6).add(new Increment(row6).addColumn(FAMILY, QUALIFIER, 1L)) + .add(new Append(row6).addColumn(FAMILY, QUALIFIER2, VALUE))))) + .get(); // make sure we called twice and the result is still correct diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java index 6c538f5e3e76..14f4b6b2846e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRSCrashPublish.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertNotEquals; + import java.io.IOException; import java.util.concurrent.ExecutionException; import org.apache.hadoop.conf.Configuration; @@ -43,7 +44,7 @@ public class TestAsyncTableRSCrashPublish { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableRSCrashPublish.class); + HBaseClassTestRule.forClass(TestAsyncTableRSCrashPublish.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -54,22 +55,18 @@ public class TestAsyncTableRSCrashPublish { @BeforeClass public static void beforeClass() throws Exception { UTIL.getConfiguration().setBoolean(HConstants.STATUS_PUBLISHED, true); - /* Below is code for choosing a NetworkInterface and then setting it into - configs so can be picked up by the client and server. - String niName = UTIL.getConfiguration().get(HConstants.STATUS_MULTICAST_NI_NAME); - NetworkInterface ni; - if (niName != null) { - ni = NetworkInterface.getByName(niName); - } else { - String mcAddress = UTIL.getConfiguration().get(HConstants.STATUS_MULTICAST_ADDRESS, - HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); - InetAddress ina = InetAddress.getByName(mcAddress); - boolean inet6Address = ina instanceof Inet6Address; - ni = NetworkInterface.getByInetAddress(inet6Address? - Addressing.getIp6Address(): Addressing.getIp4Address()); - } - UTIL.getConfiguration().set(HConstants.STATUS_MULTICAST_NI_NAME, ni.getName()); - */ + /* + * Below is code for choosing a NetworkInterface and then setting it into configs so can be + * picked up by the client and server. String niName = + * UTIL.getConfiguration().get(HConstants.STATUS_MULTICAST_NI_NAME); NetworkInterface ni; if + * (niName != null) { ni = NetworkInterface.getByName(niName); } else { String mcAddress = + * UTIL.getConfiguration().get(HConstants.STATUS_MULTICAST_ADDRESS, + * HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); InetAddress ina = + * InetAddress.getByName(mcAddress); boolean inet6Address = ina instanceof Inet6Address; ni = + * NetworkInterface.getByInetAddress(inet6Address? Addressing.getIp6Address(): + * Addressing.getIp4Address()); } + * UTIL.getConfiguration().set(HConstants.STATUS_MULTICAST_NI_NAME, ni.getName()); + */ UTIL.startMiniCluster(2); UTIL.createTable(TABLE_NAME, FAMILY); UTIL.waitTableAvailable(TABLE_NAME); @@ -85,18 +82,18 @@ public void test() throws IOException, ExecutionException, InterruptedException Configuration conf = UTIL.getHBaseCluster().getMaster().getConfiguration(); try (AsyncConnection connection = ConnectionFactory.createAsyncConnection(conf).get()) { AsyncNonMetaRegionLocator locator = - ((AsyncConnectionImpl) connection).getLocator().getNonMetaRegionLocator(); + ((AsyncConnectionImpl) connection).getLocator().getNonMetaRegionLocator(); connection.getTable(TABLE_NAME).get(new Get(Bytes.toBytes(0))).join(); ServerName serverName = - locator.getRegionLocationInCache(TABLE_NAME, HConstants.EMPTY_START_ROW) - .getDefaultRegionLocation().getServerName(); + locator.getRegionLocationInCache(TABLE_NAME, HConstants.EMPTY_START_ROW) + .getDefaultRegionLocation().getServerName(); UTIL.getMiniHBaseCluster().stopRegionServer(serverName); UTIL.waitFor(60000, () -> locator.getRegionLocationInCache(TABLE_NAME, HConstants.EMPTY_START_ROW) == null); connection.getTable(TABLE_NAME).get(new Get(Bytes.toBytes(0))).join(); assertNotEquals(serverName, locator.getRegionLocationInCache(TABLE_NAME, HConstants.EMPTY_START_ROW) - .getDefaultRegionLocation().getServerName()); + .getDefaultRegionLocation().getServerName()); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionLocator.java index 29865e7841bb..c75fd66869e4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestAsyncTableRegionLocator extends AbstractTestRegionLocator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableRegionLocator.class); + HBaseClassTestRule.forClass(TestAsyncTableRegionLocator.class); private static AsyncConnection CONN; @@ -68,7 +68,7 @@ protected byte[][] getEndKeys(TableName tableName) throws IOException { @Override protected Pair getStartEndKeys(TableName tableName) throws IOException { List> startEndKeys = - get(CONN.getRegionLocator(tableName).getStartEndKeys()); + get(CONN.getRegionLocator(tableName).getStartEndKeys()); byte[][] startKeys = new byte[startEndKeys.size()][]; byte[][] endKeys = new byte[startEndKeys.size()][]; for (int i = 0, n = startEndKeys.size(); i < n; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java index 3e1d994cc81c..6f74dba8eef4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ public class TestAsyncTableRegionReplicasGet extends AbstractTestAsyncTableRegio @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableRegionReplicasGet.class); + HBaseClassTestRule.forClass(TestAsyncTableRegionReplicasGet.class); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java index bd0f00c4864b..75b73b8d7d67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestAsyncTableRegionReplicasScan extends AbstractTestAsyncTableRegi @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableRegionReplicasScan.class); + HBaseClassTestRule.forClass(TestAsyncTableRegionReplicasScan.class); private static int ROW_COUNT = 1000; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScan.java index 7ea7388b78cb..9efb5447a66e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,6 +26,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.startsWith; + import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.trace.data.SpanData; import java.util.List; @@ -74,8 +75,8 @@ protected Scan createScan() { @Override protected List doScan(Scan scan, int closeAfter) throws Exception { - AsyncTable table = connectionRule.getAsyncConnection() - .getTable(TABLE_NAME, ForkJoinPool.commonPool()); + AsyncTable table = + connectionRule.getAsyncConnection().getTable(TABLE_NAME, ForkJoinPool.commonPool()); List results; if (closeAfter > 0) { // these tests batch settings with the sample data result in each result being @@ -85,12 +86,12 @@ protected List doScan(Scan scan, int closeAfter) throws Exception { closeAfter = closeAfter * 2; } TracedScanResultConsumer consumer = - new TracedScanResultConsumer(new LimitedScanResultConsumer(closeAfter)); + new TracedScanResultConsumer(new LimitedScanResultConsumer(closeAfter)); table.scan(scan, consumer); results = consumer.getAll(); } else { TracedScanResultConsumer consumer = - new TracedScanResultConsumer(new SimpleScanResultConsumerImpl()); + new TracedScanResultConsumer(new SimpleScanResultConsumerImpl()); table.scan(scan, consumer); results = consumer.getAll(); } @@ -103,68 +104,44 @@ protected List doScan(Scan scan, int closeAfter) throws Exception { @Override protected void assertTraceContinuity() { final String parentSpanName = testName.getMethodName(); - final Matcher parentSpanMatcher = allOf( - hasName(parentSpanName), - hasStatusWithCode(StatusCode.OK), - hasEnded()); + final Matcher parentSpanMatcher = + allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded()); waitForSpan(parentSpanMatcher); - final List spans = otelClassRule.getSpans() - .stream() - .filter(Objects::nonNull) - .collect(Collectors.toList()); + final List spans = + otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList()); if (logger.isDebugEnabled()) { StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans); stringTraceRenderer.render(logger::debug); } - final String parentSpanId = spans.stream() - .filter(parentSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); - - final Matcher scanOperationSpanMatcher = allOf( - hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), - hasParentSpanId(parentSpanId), - hasStatusWithCode(StatusCode.OK), - hasEnded()); + final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); + + final Matcher scanOperationSpanMatcher = + allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), + hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded()); assertThat(spans, hasItem(scanOperationSpanMatcher)); - final String scanOperationSpanId = spans.stream() - .filter(scanOperationSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); + final String scanOperationSpanId = spans.stream().filter(scanOperationSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); final Matcher onScanMetricsCreatedMatcher = - hasName("TracedScanResultConsumer#onScanMetricsCreated"); + hasName("TracedScanResultConsumer#onScanMetricsCreated"); assertThat(spans, hasItem(onScanMetricsCreatedMatcher)); - spans.stream() - .filter(onScanMetricsCreatedMatcher::matches) - .forEach(span -> assertThat(span, allOf( - onScanMetricsCreatedMatcher, - hasParentSpanId(scanOperationSpanId), - hasEnded()))); + spans.stream().filter(onScanMetricsCreatedMatcher::matches).forEach(span -> assertThat(span, + allOf(onScanMetricsCreatedMatcher, hasParentSpanId(scanOperationSpanId), hasEnded()))); final Matcher onNextMatcher = hasName("TracedScanResultConsumer#onNext"); assertThat(spans, hasItem(onNextMatcher)); - spans.stream() - .filter(onNextMatcher::matches) - .forEach(span -> assertThat(span, allOf( - onNextMatcher, - hasParentSpanId(scanOperationSpanId), - hasStatusWithCode(StatusCode.OK), - hasEnded()))); + spans.stream().filter(onNextMatcher::matches) + .forEach(span -> assertThat(span, allOf(onNextMatcher, hasParentSpanId(scanOperationSpanId), + hasStatusWithCode(StatusCode.OK), hasEnded()))); final Matcher onCompleteMatcher = hasName("TracedScanResultConsumer#onComplete"); assertThat(spans, hasItem(onCompleteMatcher)); - spans.stream() - .filter(onCompleteMatcher::matches) - .forEach(span -> assertThat(span, allOf( - onCompleteMatcher, - hasParentSpanId(scanOperationSpanId), - hasStatusWithCode(StatusCode.OK), - hasEnded()))); + spans.stream().filter(onCompleteMatcher::matches) + .forEach(span -> assertThat(span, allOf(onCompleteMatcher, + hasParentSpanId(scanOperationSpanId), hasStatusWithCode(StatusCode.OK), hasEnded()))); } @Override @@ -173,42 +150,28 @@ protected void assertTraceError(Matcher exceptionTypeNameMatcher) { final Matcher parentSpanMatcher = allOf(hasName(parentSpanName), hasEnded()); waitForSpan(parentSpanMatcher); - final List spans = otelClassRule.getSpans() - .stream() - .filter(Objects::nonNull) - .collect(Collectors.toList()); + final List spans = + otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList()); if (logger.isDebugEnabled()) { StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans); stringTraceRenderer.render(logger::debug); } - final String parentSpanId = spans.stream() - .filter(parentSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); - - final Matcher scanOperationSpanMatcher = allOf( - hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), - hasParentSpanId(parentSpanId), - hasStatusWithCode(StatusCode.ERROR), - hasExceptionWithType(exceptionTypeNameMatcher), - hasEnded()); + final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); + + final Matcher scanOperationSpanMatcher = + allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), + hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.ERROR), + hasExceptionWithType(exceptionTypeNameMatcher), hasEnded()); assertThat(spans, hasItem(scanOperationSpanMatcher)); - final String scanOperationSpanId = spans.stream() - .filter(scanOperationSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); + final String scanOperationSpanId = spans.stream().filter(scanOperationSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); final Matcher onErrorMatcher = hasName("TracedScanResultConsumer#onError"); assertThat(spans, hasItem(onErrorMatcher)); - spans.stream() - .filter(onErrorMatcher::matches) - .forEach(span -> assertThat(span, allOf( - onErrorMatcher, - hasParentSpanId(scanOperationSpanId), - hasStatusWithCode(StatusCode.OK), - hasEnded()))); + spans.stream().filter(onErrorMatcher::matches) + .forEach(span -> assertThat(span, allOf(onErrorMatcher, + hasParentSpanId(scanOperationSpanId), hasStatusWithCode(StatusCode.OK), hasEnded()))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanAll.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanAll.java index 33460bf4dbaf..79e1c6bc4b22 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanAll.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanAll.java @@ -26,6 +26,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.startsWith; + import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.trace.data.SpanData; import java.util.List; @@ -94,32 +95,23 @@ protected List doScan(Scan scan, int closeAfter) throws Exception { @Override protected void assertTraceContinuity() { final String parentSpanName = testName.getMethodName(); - final Matcher parentSpanMatcher = allOf( - hasName(parentSpanName), - hasStatusWithCode(StatusCode.OK), - hasEnded()); + final Matcher parentSpanMatcher = + allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded()); waitForSpan(parentSpanMatcher); - final List spans = otelClassRule.getSpans() - .stream() - .filter(Objects::nonNull) - .collect(Collectors.toList()); + final List spans = + otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList()); if (logger.isDebugEnabled()) { StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans); stringTraceRenderer.render(logger::debug); } - final String parentSpanId = spans.stream() - .filter(parentSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); - - final Matcher scanOperationSpanMatcher = allOf( - hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), - hasParentSpanId(parentSpanId), - hasStatusWithCode(StatusCode.OK), - hasEnded()); + final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); + + final Matcher scanOperationSpanMatcher = + allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), + hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded()); assertThat(spans, hasItem(scanOperationSpanMatcher)); } @@ -129,27 +121,20 @@ protected void assertTraceError(Matcher exceptionTypeNameMatcher) { final Matcher parentSpanMatcher = allOf(hasName(parentSpanName), hasEnded()); waitForSpan(parentSpanMatcher); - final List spans = otelClassRule.getSpans() - .stream() - .filter(Objects::nonNull) - .collect(Collectors.toList()); + final List spans = + otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList()); if (logger.isDebugEnabled()) { StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans); stringTraceRenderer.render(logger::debug); } - final String parentSpanId = spans.stream() - .filter(parentSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); - - final Matcher scanOperationSpanMatcher = allOf( - hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), - hasParentSpanId(parentSpanId), - hasStatusWithCode(StatusCode.ERROR), - hasExceptionWithType(exceptionTypeNameMatcher), - hasEnded()); + final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); + + final Matcher scanOperationSpanMatcher = + allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), + hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.ERROR), + hasExceptionWithType(exceptionTypeNameMatcher), hasEnded()); assertThat(spans, hasItem(scanOperationSpanMatcher)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java index 893c38393789..3d9ac767fb6a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ public class TestAsyncTableScanException { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableScanException.class); + HBaseClassTestRule.forClass(TestAsyncTableScanException.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -103,9 +103,9 @@ public boolean postScannerNext(ObserverContext c, public static void setUp() throws Exception { UTIL.startMiniCluster(1); UTIL.getAdmin() - .createTable(TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(ErrorCP.class.getName()).build()); + .createTable(TableDescriptorBuilder.newBuilder(TABLE_NAME) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(ErrorCP.class.getName()).build()); try (Table table = UTIL.getConnection().getTable(TABLE_NAME)) { for (int i = 0; i < ROW_COUNT; i++) { table.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(i))); @@ -141,7 +141,7 @@ public void testDoNotRetryIOException() throws IOException { public void testIOException() throws IOException { ERROR = true; try (ResultScanner scanner = - CONN.getTableBuilder(TABLE_NAME).setMaxAttempts(3).build().getScanner(FAMILY)) { + CONN.getTableBuilder(TABLE_NAME).setMaxAttempts(3).build().getScanner(FAMILY)) { scanner.next(); fail(); } catch (RetriesExhaustedException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java index 9c7f024c99c9..9c26b72f6d02 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanRenewLease.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanRenewLease.java index 0936bbe4049a..8fbf1f42f614 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanRenewLease.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanRenewLease.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanner.java index 9f3d4de54452..4c0f755324f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,6 +26,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.startsWith; + import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.trace.data.SpanData; import java.util.ArrayList; @@ -81,8 +82,8 @@ protected Scan createScan() { @Override protected List doScan(Scan scan, int closeAfter) throws Exception { - AsyncTable table = connectionRule.getAsyncConnection() - .getTable(TABLE_NAME, ForkJoinPool.commonPool()); + AsyncTable table = + connectionRule.getAsyncConnection().getTable(TABLE_NAME, ForkJoinPool.commonPool()); List results = new ArrayList<>(); // these tests batch settings with the sample data result in each result being // split in two. so we must allow twice the expected results in order to reach @@ -107,32 +108,23 @@ protected List doScan(Scan scan, int closeAfter) throws Exception { @Override protected void assertTraceContinuity() { final String parentSpanName = testName.getMethodName(); - final Matcher parentSpanMatcher = allOf( - hasName(parentSpanName), - hasStatusWithCode(StatusCode.OK), - hasEnded()); + final Matcher parentSpanMatcher = + allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded()); waitForSpan(parentSpanMatcher); - final List spans = otelClassRule.getSpans() - .stream() - .filter(Objects::nonNull) - .collect(Collectors.toList()); + final List spans = + otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList()); if (logger.isDebugEnabled()) { StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans); stringTraceRenderer.render(logger::debug); } - final String parentSpanId = spans.stream() - .filter(parentSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); - - assertThat(spans, hasItem(allOf( - hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), - hasParentSpanId(parentSpanId), - hasStatusWithCode(StatusCode.OK), - hasEnded()))); + final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); + + assertThat(spans, + hasItem(allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), + hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded()))); } @Override @@ -141,27 +133,20 @@ protected void assertTraceError(Matcher exceptionTypeNameMatcher) { final Matcher parentSpanMatcher = allOf(hasName(parentSpanName), hasEnded()); waitForSpan(parentSpanMatcher); - final List spans = otelClassRule.getSpans() - .stream() - .filter(Objects::nonNull) - .collect(Collectors.toList()); + final List spans = + otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList()); if (logger.isDebugEnabled()) { StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans); stringTraceRenderer.render(logger::debug); } - final String parentSpanId = spans.stream() - .filter(parentSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); - - final Matcher scanOperationSpanMatcher = allOf( - hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), - hasParentSpanId(parentSpanId), - hasStatusWithCode(StatusCode.ERROR), - hasExceptionWithType(exceptionTypeNameMatcher), - hasEnded()); + final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); + + final Matcher scanOperationSpanMatcher = + allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), + hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.ERROR), + hasExceptionWithType(exceptionTypeNameMatcher), hasEnded()); assertThat(spans, hasItem(scanOperationSpanMatcher)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScannerCloseWhileSuspending.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScannerCloseWhileSuspending.java index 10b6b622fec1..9854cd32f17b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScannerCloseWhileSuspending.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScannerCloseWhileSuspending.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java index 61bb1635a51e..e3d24b66a86a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestAsyncTableUseMetaReplicas { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableUseMetaReplicas.class); + HBaseClassTestRule.forClass(TestAsyncTableUseMetaReplicas.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -77,8 +77,8 @@ public Optional getRegionObserver() { public void preScannerOpen(ObserverContext c, Scan scan) throws IOException { RegionInfo region = c.getEnvironment().getRegionInfo(); - if (FAIL_PRIMARY_SCAN && TableName.isMetaTableName(region.getTable()) && - region.getReplicaId() == RegionReplicaUtil.DEFAULT_REPLICA_ID) { + if (FAIL_PRIMARY_SCAN && TableName.isMetaTableName(region.getTable()) + && region.getReplicaId() == RegionReplicaUtil.DEFAULT_REPLICA_ID) { throw new IOException("Inject error"); } } @@ -123,7 +123,7 @@ private void testRead(boolean useMetaReplicas) conf.setLong(HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, TimeUnit.SECONDS.toMicros(1)); try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(conf).get()) { Result result = FutureUtils.get(conn.getTableBuilder(TABLE_NAME) - .setOperationTimeout(3, TimeUnit.SECONDS).build().get(new Get(ROW))); + .setOperationTimeout(3, TimeUnit.SECONDS).build().get(new Get(ROW))); assertArrayEquals(VALUE, result.getValue(FAMILY, QUALIFIER)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncToolAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncToolAdminApi.java index bc78aaa0a621..973d5e9e42b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncToolAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncToolAdminApi.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java index ff85ef93ea33..ad12311c01ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -226,7 +226,7 @@ public ScannerThread(Table table, BlockCache cache) { public void run() { Scan s = new Scan().withStartRow(ROW4).withStopRow(ROW5).setCaching(1); try { - while(!doScan.get()) { + while (!doScan.get()) { try { // Sleep till you start scan Thread.sleep(1); @@ -300,8 +300,8 @@ public void testHBASE16372InReadPath() throws Exception { // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = (HRegion) TEST_UTIL.getRSForFirstRegionInTable(tableName) - .getRegion(regionName); + HRegion region = + (HRegion) TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); @@ -364,7 +364,7 @@ public void testHBASE16372InReadPath() throws Exception { s.setAllowPartialResults(true); s.setMaxResultSize(1000); try (ScanPerNextResultScanner scanner = - new ScanPerNextResultScanner(TEST_UTIL.getAsyncConnection().getTable(tableName), s)) { + new ScanPerNextResultScanner(TEST_UTIL.getAsyncConnection().getTable(tableName), s)) { Thread evictorThread = new Thread() { @Override public void run() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java index ab81ca01fc5b..44d7b136c95b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBatchScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java index a69bc4d4dc56..d25638cbb7c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -114,7 +114,7 @@ public static void setUpBeforeClass() throws Exception { ROWS[1] = ROW1; Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - MultiRowMutationEndpoint.class.getName()); + MultiRowMutationEndpoint.class.getName()); conf.setInt("hbase.regionserver.handler.count", 20); conf.setInt("hbase.bucketcache.size", 400); conf.setStrings(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); @@ -186,12 +186,11 @@ public void testBlockEvictionWithParallelScans() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, - CustomInnerRegionObserver.class.getName()); + CustomInnerRegionObserver.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName) - .getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); @@ -276,12 +275,11 @@ public void testParallelGetsAndScans() throws IOException, InterruptedException // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, - CustomInnerRegionObserver.class.getName()); + CustomInnerRegionObserver.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); @@ -335,12 +333,11 @@ public void testGetWithCellsInDifferentFiles() throws IOException, InterruptedEx // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, - CustomInnerRegionObserver.class.getName()); + CustomInnerRegionObserver.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); @@ -397,12 +394,11 @@ public void testGetsWithMultiColumnsAndExplicitTracker() // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, - CustomInnerRegionObserver.class.getName()); + CustomInnerRegionObserver.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); BlockCache cache = setCacheProperties(region); Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, data); @@ -490,12 +486,11 @@ public void testGetWithMultipleColumnFamilies() throws IOException, InterruptedE fams[i] = (Bytes.toBytes("testFamily" + i)); } table = TEST_UTIL.createTable(tableName, fams, 1, 1024, - CustomInnerRegionObserver.class.getName()); + CustomInnerRegionObserver.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); BlockCache cache = setCacheProperties(region); Put put = new Put(ROW); @@ -586,8 +581,7 @@ public void testBlockRefCountAfterSplits() throws IOException, InterruptedExcept // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setEvictOnClose(true); @@ -618,7 +612,7 @@ public void testBlockRefCountAfterSplits() throws IOException, InterruptedExcept TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getAdmin().getRegions(rs).size() > regionCount); region.compact(true); List regions = TEST_UTIL.getMiniHBaseCluster().getRegionServer(rs).getRegions(); - for (HRegion r: regions) { + for (HRegion r : regions) { LOG.info("" + r.getCompactionState()); TEST_UTIL.waitFor(30000, () -> r.getCompactionState().equals(CompactionState.NONE)); } @@ -645,12 +639,11 @@ public void testMultiGets() throws IOException, InterruptedException { // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, - CustomInnerRegionObserver.class.getName()); + CustomInnerRegionObserver.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); @@ -695,7 +688,7 @@ public void testMultiGets() throws IOException, InterruptedException { foundNonZeroBlock = true; } } - assertTrue("Should have found nonzero ref count block",foundNonZeroBlock); + assertTrue("Should have found nonzero ref count block", foundNonZeroBlock); CustomInnerRegionObserver.getCdl().get().countDown(); CustomInnerRegionObserver.getCdl().get().countDown(); for (MultiGetThread thread : getThreads) { @@ -713,6 +706,7 @@ public void testMultiGets() throws IOException, InterruptedException { } } } + @Test public void testScanWithMultipleColumnFamilies() throws IOException, InterruptedException { Table table = null; @@ -728,12 +722,11 @@ public void testScanWithMultipleColumnFamilies() throws IOException, Interrupted fams[i] = (Bytes.toBytes("testFamily" + i)); } table = TEST_UTIL.createTable(tableName, fams, 1, 1024, - CustomInnerRegionObserver.class.getName()); + CustomInnerRegionObserver.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); BlockCache cache = setCacheProperties(region); Put put = new Put(ROW); @@ -817,8 +810,8 @@ private BlockCache setCacheProperties(HRegion region) { } @Test - public void testParallelGetsAndScanWithWrappedRegionScanner() throws IOException, - InterruptedException { + public void testParallelGetsAndScanWithWrappedRegionScanner() + throws IOException, InterruptedException { Table table = null; try { latch = new CountDownLatch(2); @@ -828,12 +821,11 @@ public void testParallelGetsAndScanWithWrappedRegionScanner() throws IOException // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, - CustomInnerRegionObserverWrapper.class.getName()); + CustomInnerRegionObserverWrapper.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); @@ -894,12 +886,11 @@ private void testScanWithCompactionInternals(String tableNameStr, boolean revers TableName tableName = TableName.valueOf(tableNameStr); // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, - CustomInnerRegionObserverWrapper.class.getName()); + CustomInnerRegionObserverWrapper.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); @@ -1012,12 +1003,11 @@ public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() final TableName tableName = TableName.valueOf(name.getMethodName()); // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, - CustomInnerRegionObserverWrapper.class.getName()); + CustomInnerRegionObserverWrapper.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); @@ -1131,7 +1121,6 @@ public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() } } - @Test public void testScanWithException() throws IOException, InterruptedException { Table table = null; @@ -1142,12 +1131,11 @@ public void testScanWithException() throws IOException, InterruptedException { // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, - CustomInnerRegionObserverWrapper.class.getName()); + CustomInnerRegionObserverWrapper.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); @@ -1253,8 +1241,8 @@ private void insertData(Table table) throws IOException { table.put(put); } - private ScanThread[] initiateScan(Table table, boolean reverse) throws IOException, - InterruptedException { + private ScanThread[] initiateScan(Table table, boolean reverse) + throws IOException, InterruptedException { ScanThread[] scanThreads = new ScanThread[NO_OF_THREADS]; for (int i = 0; i < NO_OF_THREADS; i++) { scanThreads[i] = new ScanThread(table, reverse); @@ -1277,8 +1265,7 @@ private GetThread[] initiateGet(Table table, boolean tracker, boolean multipleCF return getThreads; } - private MultiGetThread[] initiateMultiGet(Table table) - throws IOException, InterruptedException { + private MultiGetThread[] initiateMultiGet(Table table) throws IOException, InterruptedException { MultiGetThread[] multiGetThreads = new MultiGetThread[NO_OF_THREADS]; for (int i = 0; i < NO_OF_THREADS; i++) { multiGetThreads[i] = new MultiGetThread(table); @@ -1329,7 +1316,7 @@ private void checkForBlockEviction(BlockCache cache, boolean getClosed, boolean // If get has closed only the scan's blocks would be available assertEquals(refCount, CustomInnerRegionObserver.countOfGets.get()); } else { - assertEquals(refCount, CustomInnerRegionObserver.countOfGets.get() + (NO_OF_THREADS)); + assertEquals(refCount, CustomInnerRegionObserver.countOfGets.get() + (NO_OF_THREADS)); } } } else { @@ -1354,9 +1341,11 @@ private void checkForBlockEviction(BlockCache cache, boolean getClosed, boolean private static class MultiGetThread extends Thread { private final Table table; private final List gets = new ArrayList<>(); + public MultiGetThread(Table table) { this.table = table; } + @Override public void run() { gets.add(new Get(ROW)); @@ -1422,14 +1411,14 @@ private void initiateGet(Table table) throws IOException { assertTrue(Bytes.equals(r.getValue(FAMILY, Bytes.toBytes("testQualifier" + 9)), data2)); } else { assertTrue(Bytes.equals( - r.getValue(Bytes.toBytes("testFamily" + 3), Bytes.toBytes("testQualifier" + 3)), - data2)); + r.getValue(Bytes.toBytes("testFamily" + 3), Bytes.toBytes("testQualifier" + 3)), + data2)); assertTrue(Bytes.equals( - r.getValue(Bytes.toBytes("testFamily" + 8), Bytes.toBytes("testQualifier" + 8)), - data2)); + r.getValue(Bytes.toBytes("testFamily" + 8), Bytes.toBytes("testQualifier" + 8)), + data2)); assertTrue(Bytes.equals( - r.getValue(Bytes.toBytes("testFamily" + 9), Bytes.toBytes("testQualifier" + 9)), - data2)); + r.getValue(Bytes.toBytes("testFamily" + 9), Bytes.toBytes("testQualifier" + 9)), + data2)); } } } @@ -1480,12 +1469,12 @@ private void initiateScan(Table table) throws IOException { private void waitForStoreFileCount(HStore store, int count, int timeout) throws InterruptedException { long start = EnvironmentEdgeManager.currentTime(); - while (start + timeout > EnvironmentEdgeManager.currentTime() && - store.getStorefilesCount() != count) { + while (start + timeout > EnvironmentEdgeManager.currentTime() + && store.getStorefilesCount() != count) { Thread.sleep(100); } - System.out.println("start=" + start + ", now=" + EnvironmentEdgeManager.currentTime() + - ", cur=" + store.getStorefilesCount()); + System.out.println("start=" + start + ", now=" + EnvironmentEdgeManager.currentTime() + ", cur=" + + store.getStorefilesCount()); assertEquals(count, store.getStorefilesCount()); } @@ -1572,8 +1561,8 @@ public int getBatch() { public static class CustomInnerRegionObserverWrapper extends CustomInnerRegionObserver { @Override - public RegionScanner postScannerOpen(ObserverContext e, - Scan scan, RegionScanner s) throws IOException { + public RegionScanner postScannerOpen(ObserverContext e, Scan scan, + RegionScanner s) throws IOException { return new CustomScanner(s); } } @@ -1583,8 +1572,8 @@ public static class CustomInnerRegionObserver implements RegionCoprocessor, Regi static final AtomicInteger countOfGets = new AtomicInteger(0); static final AtomicBoolean waitForGets = new AtomicBoolean(false); static final AtomicBoolean throwException = new AtomicBoolean(false); - private static final AtomicReference cdl = new AtomicReference<>( - new CountDownLatch(0)); + private static final AtomicReference cdl = + new AtomicReference<>(new CountDownLatch(0)); @Override public Optional getRegionObserver() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java index 03f32b097615..b8dc1ec477bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestBufferedMutator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBufferedMutator.class); + HBaseClassTestRule.forClass(TestBufferedMutator.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -67,14 +67,14 @@ public static void tearDown() throws Exception { @Test public void test() throws Exception { try (BufferedMutator mutator = TEST_UTIL.getConnection() - .getBufferedMutator(new BufferedMutatorParams(TABLE_NAME).writeBufferSize(64 * 1024))) { + .getBufferedMutator(new BufferedMutatorParams(TABLE_NAME).writeBufferSize(64 * 1024))) { mutator.mutate(IntStream.range(0, COUNT / 2) - .mapToObj(i -> new Put(Bytes.toBytes(i)).addColumn(CF, CQ, VALUE)) - .collect(Collectors.toList())); + .mapToObj(i -> new Put(Bytes.toBytes(i)).addColumn(CF, CQ, VALUE)) + .collect(Collectors.toList())); mutator.flush(); mutator.mutate(IntStream.range(COUNT / 2, COUNT) - .mapToObj(i -> new Put(Bytes.toBytes(i)).addColumn(CF, CQ, VALUE)) - .collect(Collectors.toList())); + .mapToObj(i -> new Put(Bytes.toBytes(i)).addColumn(CF, CQ, VALUE)) + .collect(Collectors.toList())); mutator.close(); verifyData(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.java index c4f6e3bda195..6abf286d588d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.java index 32c059445f95..b216f9d56e9c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.java index 1fca99f4f351..0dd768cda698 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.java index fb8482a09c52..b41ce4324d11 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIIncrementRpcTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIIncrementRpcTimeout.java index 87b148171812..f74953896f20 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIIncrementRpcTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIIncrementRpcTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutOperationTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutOperationTimeout.java index 8c36bf6dbec9..49f909447e96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutOperationTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutOperationTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutRpcTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutRpcTimeout.java index f5921fb05e96..aa1cbe2bd92b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutRpcTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCIPutRpcTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java index 1c8480931ef3..fe0ed884a216 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,11 +54,11 @@ public void setUp() { @Test public void testRpcRetryingCallerSleep() throws Exception { TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAM_NAM)) - .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(SleepAndFailFirstTime.class.getName()) - .setProperty(SleepAndFailFirstTime.SLEEP_TIME_CONF_KEY, String.valueOf(2000)) - .build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAM_NAM)) + .setCoprocessor( + CoprocessorDescriptorBuilder.newBuilder(SleepAndFailFirstTime.class.getName()) + .setProperty(SleepAndFailFirstTime.SLEEP_TIME_CONF_KEY, String.valueOf(2000)).build()) + .build(); TEST_UTIL.getAdmin().createTable(htd); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java index bebc843933e7..61978eb90ed8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,10 +47,10 @@ public class TestCatalogReplicaLoadBalanceSimpleSelector { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCatalogReplicaLoadBalanceSimpleSelector.class); + HBaseClassTestRule.forClass(TestCatalogReplicaLoadBalanceSimpleSelector.class); - private static final Logger LOG = LoggerFactory.getLogger( - TestCatalogReplicaLoadBalanceSimpleSelector.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestCatalogReplicaLoadBalanceSimpleSelector.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -72,12 +72,12 @@ public static void setUp() throws Exception { // Enable hbase:meta replication. HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, numOfMetaReplica); - TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( - TableName.META_TABLE_NAME).size() >= numOfMetaReplica); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster() + .getRegions(TableName.META_TABLE_NAME).size() >= numOfMetaReplica); registry = ConnectionRegistryFactory.getRegistry(TEST_UTIL.getConfiguration()); - CONN = new AsyncConnectionImpl(conf, registry, - registry.getClusterId().get(), null, User.getCurrent()); + CONN = new AsyncConnectionImpl(conf, registry, registry.getClusterId().get(), null, + User.getCurrent()); } @AfterClass @@ -88,22 +88,22 @@ public static void tearDown() throws Exception { @Test public void testMetaChangeFromReplicaNoReplica() throws IOException, InterruptedException { - String replicaSelectorClass = CONN.getConfiguration(). - get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, - CatalogReplicaLoadBalanceSimpleSelector.class.getName()); + String replicaSelectorClass = + CONN.getConfiguration().get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, + CatalogReplicaLoadBalanceSimpleSelector.class.getName()); CatalogReplicaLoadBalanceSelector metaSelector = CatalogReplicaLoadBalanceSelectorFactory - .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; - try { - RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get - (CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); - numOfReplicas = metaLocations.size(); - } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); - } - return numOfReplicas; - }); + .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = CONN.registry.getMetaRegionLocations() + .get(CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); assertNotEquals( metaSelector.select(TableName.valueOf("test"), EMPTY_START_ROW, RegionLocateType.CURRENT), @@ -111,24 +111,23 @@ public void testMetaChangeFromReplicaNoReplica() throws IOException, Interrupted // Change to No meta replica HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 1); - TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getRegions( - TableName.META_TABLE_NAME).size() == 1); + TEST_UTIL.waitFor(30000, + () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() == 1); CatalogReplicaLoadBalanceSelector metaSelectorWithNoReplica = - CatalogReplicaLoadBalanceSelectorFactory.createSelector( - replicaSelectorClass, META_TABLE_NAME, CONN, () -> { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; - try { - RegionLocations metaLocations = CONN.registry.getMetaRegionLocations().get( - CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); - numOfReplicas = metaLocations.size(); - } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); - } - return numOfReplicas; - }); - assertEquals( - metaSelectorWithNoReplica.select(TableName.valueOf("test"), EMPTY_START_ROW, - RegionLocateType.CURRENT), RegionReplicaUtil.DEFAULT_REPLICA_ID); + CatalogReplicaLoadBalanceSelectorFactory.createSelector(replicaSelectorClass, + META_TABLE_NAME, CONN, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = CONN.registry.getMetaRegionLocations() + .get(CONN.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); + assertEquals(metaSelectorWithNoReplica.select(TableName.valueOf("test"), EMPTY_START_ROW, + RegionLocateType.CURRENT), RegionReplicaUtil.DEFAULT_REPLICA_ID); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java index e4509affb766..55169a984441 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,6 +24,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.Arrays; import java.util.Collections; @@ -77,8 +78,7 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - private Table createTable() - throws IOException, InterruptedException { + private Table createTable() throws IOException, InterruptedException { final TableName tableName = TableName.valueOf(name.getMethodName()); Table table = TEST_UTIL.createTable(tableName, FAMILY); TEST_UTIL.waitTableAvailable(tableName.getName(), 5000); @@ -111,8 +111,7 @@ private void getOneRowAndAssertAllButCExist(final Table table) throws IOExceptio Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("A"))).equals("a")); assertTrue("Column B value should be b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B"))).equals("b")); - assertTrue("Column C should not exist", - result.getValue(FAMILY, Bytes.toBytes("C")) == null); + assertTrue("Column C should not exist", result.getValue(FAMILY, Bytes.toBytes("C")) == null); } private RowMutations makeRowMutationsWithColumnCDeleted() throws IOException { @@ -130,7 +129,7 @@ private RowMutations makeRowMutationsWithColumnCDeleted() throws IOException { private RowMutations getBogusRowMutations() throws IOException { Put p = new Put(ROWKEY); byte[] value = new byte[0]; - p.addColumn(new byte[]{'b', 'o', 'g', 'u', 's'}, new byte[]{'A'}, value); + p.addColumn(new byte[] { 'b', 'o', 'g', 'u', 's' }, new byte[] { 'A' }, value); RowMutations rm = new RowMutations(ROWKEY); rm.add(p); return rm; @@ -160,7 +159,7 @@ public void testCheckAndMutateForOldApi() throws Throwable { try { rm = getBogusRowMutations(); table.checkAndMutate(ROWKEY, FAMILY).qualifier(Bytes.toBytes("A")) - .ifEquals(Bytes.toBytes("a")).thenMutate(rm); + .ifEquals(Bytes.toBytes("a")).thenMutate(rm); fail("Expected NoSuchColumnFamilyException"); } catch (NoSuchColumnFamilyException e) { // expected @@ -180,37 +179,44 @@ public void testCheckAndMutateWithSingleFilterForOldApi() throws Throwable { getOneRowAndAssertAllExist(table); // Put with success - boolean ok = table.checkAndMutate(ROWKEY, new SingleColumnValueFilter(FAMILY, - Bytes.toBytes("A"), CompareOperator.EQUAL, Bytes.toBytes("a"))) - .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + boolean ok = table + .checkAndMutate(ROWKEY, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); assertTrue(ok); Result result = table.get(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"))); assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D")))); // Put with failure - ok = table.checkAndMutate(ROWKEY, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("b"))) - .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))); + ok = table + .checkAndMutate(ROWKEY, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("b"))) + .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))); assertFalse(ok); assertFalse(table.exists(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("E")))); // Delete with success - ok = table.checkAndMutate(ROWKEY, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .thenDelete(new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("D"))); + ok = table + .checkAndMutate(ROWKEY, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .thenDelete(new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("D"))); assertTrue(ok); assertFalse(table.exists(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D")))); // Mutate with success - ok = table.checkAndMutate(ROWKEY, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), - CompareOperator.EQUAL, Bytes.toBytes("b"))) - .thenMutate(new RowMutations(ROWKEY) - .add((Mutation) new Put(ROWKEY) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .add((Mutation) new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("A")))); + ok = table + .checkAndMutate(ROWKEY, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b"))) + .thenMutate(new RowMutations(ROWKEY).add( + (Mutation) new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) + .add((Mutation) new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("A")))); assertTrue(ok); result = table.get(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"))); @@ -230,53 +236,56 @@ public void testCheckAndMutateWithMultipleFiltersForOldApi() throws Throwable { getOneRowAndAssertAllExist(table); // Put with success - boolean ok = table.checkAndMutate(ROWKEY, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")) - )) - .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + boolean ok = table + .checkAndMutate(ROWKEY, + new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); assertTrue(ok); Result result = table.get(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"))); assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D")))); // Put with failure - ok = table.checkAndMutate(ROWKEY, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("c")) - )) - .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))); + ok = table + .checkAndMutate(ROWKEY, + new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("c")))) + .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))); assertFalse(ok); assertFalse(table.exists(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("E")))); // Delete with success - ok = table.checkAndMutate(ROWKEY, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")) - )) - .thenDelete(new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("D"))); + ok = table + .checkAndMutate(ROWKEY, + new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .thenDelete(new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("D"))); assertTrue(ok); assertFalse(table.exists(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D")))); // Mutate with success - ok = table.checkAndMutate(ROWKEY, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")) - )) - .thenMutate(new RowMutations(ROWKEY) - .add((Mutation) new Put(ROWKEY) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .add((Mutation) new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("A")))); + ok = table + .checkAndMutate(ROWKEY, + new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .thenMutate(new RowMutations(ROWKEY).add( + (Mutation) new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) + .add((Mutation) new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("A")))); assertTrue(ok); result = table.get(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"))); @@ -294,24 +303,26 @@ public void testCheckAndMutateWithTimestampFilterForOldApi() throws Throwable { table.put(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a"))); // Put with success - boolean ok = table.checkAndMutate(ROWKEY, new FilterList( - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("A"))), - new TimestampsFilter(Collections.singletonList(100L)) - )) - .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); + boolean ok = table + .checkAndMutate(ROWKEY, + new FilterList(new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), + new QualifierFilter(CompareOperator.EQUAL, + new BinaryComparator(Bytes.toBytes("A"))), + new TimestampsFilter(Collections.singletonList(100L)))) + .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); assertTrue(ok); Result result = table.get(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"))); assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); // Put with failure - ok = table.checkAndMutate(ROWKEY, new FilterList( - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("A"))), - new TimestampsFilter(Collections.singletonList(101L)) - )) - .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))); + ok = table + .checkAndMutate(ROWKEY, + new FilterList(new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), + new QualifierFilter(CompareOperator.EQUAL, + new BinaryComparator(Bytes.toBytes("A"))), + new TimestampsFilter(Collections.singletonList(101L)))) + .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))); assertFalse(ok); assertFalse(table.exists(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C")))); @@ -326,20 +337,24 @@ public void testCheckAndMutateWithFilterAndTimeRangeForOldApi() throws Throwable table.put(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a"))); // Put with success - boolean ok = table.checkAndMutate(ROWKEY, new SingleColumnValueFilter(FAMILY, - Bytes.toBytes("A"), CompareOperator.EQUAL, Bytes.toBytes("a"))) - .timeRange(TimeRange.between(0, 101)) - .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); + boolean ok = table + .checkAndMutate(ROWKEY, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .timeRange(TimeRange.between(0, 101)) + .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); assertTrue(ok); Result result = table.get(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"))); assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); // Put with failure - ok = table.checkAndMutate(ROWKEY, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .timeRange(TimeRange.between(0, 100)) - .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))); + ok = table + .checkAndMutate(ROWKEY, + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .timeRange(TimeRange.between(0, 100)) + .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))); assertFalse(ok); assertFalse(table.exists(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C")))); @@ -351,7 +366,7 @@ public void testCheckAndMutateWithFilterAndTimeRangeForOldApi() throws Throwable public void testCheckAndMutateWithoutConditionForOldApi() throws Throwable { try (Table table = createTable()) { table.checkAndMutate(ROWKEY, FAMILY) - .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + .thenPut(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); } } @@ -368,8 +383,7 @@ public void testCheckAndMutate() throws Throwable { // put the same row again with C column deleted RowMutations rm = makeRowMutationsWithColumnCDeleted(); CheckAndMutateResult res = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(rm)); + .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")).build(rm)); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -380,8 +394,7 @@ public void testCheckAndMutate() throws Throwable { try { rm = getBogusRowMutations(); table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(rm)); + .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")).build(rm)); fail("Expected NoSuchColumnFamilyException"); } catch (NoSuchColumnFamilyException e) { // expected @@ -401,9 +414,9 @@ public void testCheckAndMutateWithSingleFilter() throws Throwable { // Put with success CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new SingleColumnValueFilter(FAMILY, - Bytes.toBytes("A"), CompareOperator.EQUAL, Bytes.toBytes("a"))) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -412,9 +425,9 @@ public void testCheckAndMutateWithSingleFilter() throws Throwable { // Put with failure result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("b"))) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")))); + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("b"))) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")))); assertFalse(result.isSuccess()); assertNull(result.getResult()); @@ -422,9 +435,9 @@ public void testCheckAndMutateWithSingleFilter() throws Throwable { // Delete with success result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .build(new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("D")))); + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .build(new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("D")))); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -432,12 +445,11 @@ public void testCheckAndMutateWithSingleFilter() throws Throwable { // Mutate with success result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), - CompareOperator.EQUAL, Bytes.toBytes("b"))) - .build(new RowMutations(ROWKEY) - .add((Mutation) new Put(ROWKEY) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .add((Mutation) new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("A"))))); + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b"))) + .build(new RowMutations(ROWKEY).add( + (Mutation) new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) + .add((Mutation) new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("A"))))); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -458,12 +470,12 @@ public void testCheckAndMutateWithMultipleFilters() throws Throwable { // Put with success CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -472,12 +484,12 @@ public void testCheckAndMutateWithMultipleFilters() throws Throwable { // Put with failure result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("c")))) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("c")))) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")))); assertFalse(result.isSuccess()); assertNull(result.getResult()); @@ -485,28 +497,29 @@ public void testCheckAndMutateWithMultipleFilters() throws Throwable { // Delete with success result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("D")))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("D")))); assertTrue(result.isSuccess()); assertNull(result.getResult()); assertFalse(table.exists(new Get(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D")))); // Mutate with success - result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new RowMutations(ROWKEY) - .add((Mutation) new Put(ROWKEY) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .add((Mutation) new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("A"))))); + result = + table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new RowMutations(ROWKEY) + .add((Mutation) new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), + Bytes.toBytes("d"))) + .add((Mutation) new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("A"))))); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -524,12 +537,12 @@ public void testCheckAndMutateWithTimestampFilter() throws Throwable { table.put(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a"))); // Put with success - CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("A"))), - new TimestampsFilter(Collections.singletonList(100L)))) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); + CheckAndMutateResult result = table.checkAndMutate( + CheckAndMutate.newBuilder(ROWKEY).ifMatches( + new FilterList(new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), + new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("A"))), + new TimestampsFilter(Collections.singletonList(100L)))) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -537,12 +550,11 @@ public void testCheckAndMutateWithTimestampFilter() throws Throwable { assertEquals("b", Bytes.toString(r.getValue(FAMILY, Bytes.toBytes("B")))); // Put with failure - result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("A"))), - new TimestampsFilter(Collections.singletonList(101L)))) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")))); + result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY).ifMatches( + new FilterList(new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(FAMILY)), + new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("A"))), + new TimestampsFilter(Collections.singletonList(101L)))) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")))); assertFalse(result.isSuccess()); assertNull(result.getResult()); @@ -558,10 +570,10 @@ public void testCheckAndMutateWithFilterAndTimeRange() throws Throwable { // Put with success CheckAndMutateResult result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new SingleColumnValueFilter(FAMILY, - Bytes.toBytes("A"), CompareOperator.EQUAL, Bytes.toBytes("a"))) - .timeRange(TimeRange.between(0, 101)) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .timeRange(TimeRange.between(0, 101)) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); assertTrue(result.isSuccess()); assertNull(result.getResult()); @@ -570,10 +582,10 @@ public void testCheckAndMutateWithFilterAndTimeRange() throws Throwable { // Put with failure result = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), - CompareOperator.EQUAL, Bytes.toBytes("a"))) - .timeRange(TimeRange.between(0, 100)) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")))); + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .timeRange(TimeRange.between(0, 100)) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")))); assertFalse(result.isSuccess()); assertNull(result.getResult()); @@ -584,7 +596,7 @@ public void testCheckAndMutateWithFilterAndTimeRange() throws Throwable { @Test(expected = IllegalStateException.class) public void testCheckAndMutateBuilderWithoutCondition() { CheckAndMutate.newBuilder(ROWKEY) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); } @Test @@ -593,9 +605,9 @@ public void testCheckAndIncrement() throws Throwable { table.put(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a"))); // CheckAndIncrement with correct value - CheckAndMutateResult res = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), 1))); + CheckAndMutateResult res = table.checkAndMutate( + CheckAndMutate.newBuilder(ROWKEY).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), 1))); assertTrue(res.isSuccess()); assertEquals(1, Bytes.toLong(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -603,9 +615,9 @@ public void testCheckAndIncrement() throws Throwable { assertEquals(1, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("B")))); // CheckAndIncrement with wrong value - res = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) - .build(new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), 1))); + res = table.checkAndMutate( + CheckAndMutate.newBuilder(ROWKEY).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) + .build(new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), 1))); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -616,12 +628,12 @@ public void testCheckAndIncrement() throws Throwable { // CheckAndIncrement with a filter and correct value res = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("c")))) - .build(new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), 2))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("c")))) + .build(new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), 2))); assertTrue(res.isSuccess()); assertEquals(3, Bytes.toLong(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -630,12 +642,12 @@ public void testCheckAndIncrement() throws Throwable { // CheckAndIncrement with a filter and correct value res = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("b")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("d")))) - .build(new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), 2))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("b")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("d")))) + .build(new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), 2))); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -650,9 +662,9 @@ public void testCheckAndAppend() throws Throwable { table.put(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a"))); // CheckAndAppend with correct value - CheckAndMutateResult res = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); + CheckAndMutateResult res = table.checkAndMutate( + CheckAndMutate.newBuilder(ROWKEY).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); assertTrue(res.isSuccess()); assertEquals("b", Bytes.toString(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -660,9 +672,9 @@ public void testCheckAndAppend() throws Throwable { assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); // CheckAndAppend with correct value - res = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) - .build(new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); + res = table.checkAndMutate( + CheckAndMutate.newBuilder(ROWKEY).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) + .build(new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -673,12 +685,12 @@ public void testCheckAndAppend() throws Throwable { // CheckAndAppend with a filter and correct value res = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("c")))) - .build(new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("c")))) + .build(new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))); assertTrue(res.isSuccess()); assertEquals("bbb", Bytes.toString(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -687,12 +699,12 @@ public void testCheckAndAppend() throws Throwable { // CheckAndAppend with a filter and wrong value res = table.checkAndMutate(CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("b")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("d")))) - .build(new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("b")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("d")))) + .build(new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -711,20 +723,17 @@ public void testCheckAndRowMutations() throws Throwable { try (Table table = createTable()) { // Initial values - table.put(Arrays.asList( - new Put(ROWKEY).addColumn(FAMILY, q2, Bytes.toBytes("toBeDeleted")), + table.put(Arrays.asList(new Put(ROWKEY).addColumn(FAMILY, q2, Bytes.toBytes("toBeDeleted")), new Put(ROWKEY).addColumn(FAMILY, q3, Bytes.toBytes(5L)), new Put(ROWKEY).addColumn(FAMILY, q4, Bytes.toBytes("a")))); // Do CheckAndRowMutations - CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(ROWKEY) - .ifNotExists(FAMILY, q1) - .build(new RowMutations(ROWKEY).add(Arrays.asList( - new Put(ROWKEY).addColumn(FAMILY, q1, Bytes.toBytes(v1)), - new Delete(ROWKEY).addColumns(FAMILY, q2), - new Increment(ROWKEY).addColumn(FAMILY, q3, 1), - new Append(ROWKEY).addColumn(FAMILY, q4, Bytes.toBytes("b")))) - ); + CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(ROWKEY).ifNotExists(FAMILY, q1) + .build(new RowMutations(ROWKEY) + .add(Arrays.asList(new Put(ROWKEY).addColumn(FAMILY, q1, Bytes.toBytes(v1)), + new Delete(ROWKEY).addColumns(FAMILY, q2), + new Increment(ROWKEY).addColumn(FAMILY, q3, 1), + new Append(ROWKEY).addColumn(FAMILY, q4, Bytes.toBytes("b"))))); CheckAndMutateResult result = table.checkAndMutate(checkAndMutate); assertTrue(result.isSuccess()); @@ -739,14 +748,11 @@ public void testCheckAndRowMutations() throws Throwable { assertEquals("ab", Bytes.toString(r.getValue(FAMILY, q4))); // Do CheckAndRowMutations again - checkAndMutate = CheckAndMutate.newBuilder(ROWKEY) - .ifNotExists(FAMILY, q1) - .build(new RowMutations(ROWKEY).add(Arrays.asList( - new Delete(ROWKEY).addColumns(FAMILY, q1), + checkAndMutate = CheckAndMutate.newBuilder(ROWKEY).ifNotExists(FAMILY, q1).build( + new RowMutations(ROWKEY).add(Arrays.asList(new Delete(ROWKEY).addColumns(FAMILY, q1), new Put(ROWKEY).addColumn(FAMILY, q2, Bytes.toBytes(v1)), new Increment(ROWKEY).addColumn(FAMILY, q3, 1), - new Append(ROWKEY).addColumn(FAMILY, q4, Bytes.toBytes("b")))) - ); + new Append(ROWKEY).addColumn(FAMILY, q4, Bytes.toBytes("b"))))); result = table.checkAndMutate(checkAndMutate); assertFalse(result.isSuccess()); @@ -766,23 +772,23 @@ public void testCheckAndRowMutations() throws Throwable { @Test public void testCheckAndMutateBatch() throws Throwable { try (Table table = createTable()) { - table.put(Arrays.asList( - new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), - new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), - new Put(ROWKEY3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), - new Put(ROWKEY4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); + table.put( + Arrays.asList(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), + new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), + new Put(ROWKEY3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), + new Put(ROWKEY4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); // Test for Put - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e"))); + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(ROWKEY).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e"))); CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY2) - .ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("a")) - .build(new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); + .ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("a")) + .build(new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); List results = - table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); + table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); assertTrue(results.get(0).isSuccess()); assertNull(results.get(0).getResult()); @@ -797,12 +803,10 @@ public void testCheckAndMutateBatch() throws Throwable { // Test for Delete checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e")) - .build(new Delete(ROWKEY)); + .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e")).build(new Delete(ROWKEY)); checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY2) - .ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("a")) - .build(new Delete(ROWKEY2)); + .ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("a")).build(new Delete(ROWKEY2)); results = table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); @@ -818,18 +822,16 @@ public void testCheckAndMutateBatch() throws Throwable { // Test for RowMutations checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY3) - .ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) - .build(new RowMutations(ROWKEY3) - .add((Mutation) new Put(ROWKEY3) - .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) - .add((Mutation) new Delete(ROWKEY3).addColumns(FAMILY, Bytes.toBytes("C")))); + .ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) + .build(new RowMutations(ROWKEY3).add( + (Mutation) new Put(ROWKEY3).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) + .add((Mutation) new Delete(ROWKEY3).addColumns(FAMILY, Bytes.toBytes("C")))); checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY4) - .ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("f")) - .build(new RowMutations(ROWKEY4) - .add((Mutation) new Put(ROWKEY4) - .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) - .add((Mutation) new Delete(ROWKEY4).addColumns(FAMILY, Bytes.toBytes("D")))); + .ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("f")) + .build(new RowMutations(ROWKEY4).add( + (Mutation) new Put(ROWKEY4).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) + .add((Mutation) new Delete(ROWKEY4).addColumns(FAMILY, Bytes.toBytes("D")))); results = table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); @@ -851,23 +853,23 @@ public void testCheckAndMutateBatch() throws Throwable { @Test public void testCheckAndMutateBatch2() throws Throwable { try (Table table = createTable()) { - table.put(Arrays.asList( - new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), - new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), - new Put(ROWKEY3).addColumn(FAMILY, Bytes.toBytes("C"), 100, Bytes.toBytes("c")), - new Put(ROWKEY4).addColumn(FAMILY, Bytes.toBytes("D"), 100, Bytes.toBytes("d")))); + table.put( + Arrays.asList(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), + new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), + new Put(ROWKEY3).addColumn(FAMILY, Bytes.toBytes("C"), 100, Bytes.toBytes("c")), + new Put(ROWKEY4).addColumn(FAMILY, Bytes.toBytes("D"), 100, Bytes.toBytes("d")))); // Test for ifNotExists() - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY) - .ifNotExists(FAMILY, Bytes.toBytes("B")) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e"))); + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(ROWKEY).ifNotExists(FAMILY, Bytes.toBytes("B")) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("e"))); - CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY2) - .ifNotExists(FAMILY, Bytes.toBytes("B")) - .build(new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); + CheckAndMutate checkAndMutate2 = + CheckAndMutate.newBuilder(ROWKEY2).ifNotExists(FAMILY, Bytes.toBytes("B")) + .build(new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); List results = - table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); + table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); assertTrue(results.get(0).isSuccess()); assertNull(results.get(0).getResult()); @@ -882,12 +884,12 @@ public void testCheckAndMutateBatch2() throws Throwable { // Test for ifMatches() checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(FAMILY, Bytes.toBytes("A"), CompareOperator.NOT_EQUAL, Bytes.toBytes("a")) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a"))); + .ifMatches(FAMILY, Bytes.toBytes("A"), CompareOperator.NOT_EQUAL, Bytes.toBytes("a")) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a"))); checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY2) - .ifMatches(FAMILY, Bytes.toBytes("B"), CompareOperator.GREATER, Bytes.toBytes("b")) - .build(new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); + .ifMatches(FAMILY, Bytes.toBytes("B"), CompareOperator.GREATER, Bytes.toBytes("b")) + .build(new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("f"))); results = table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); @@ -904,14 +906,14 @@ public void testCheckAndMutateBatch2() throws Throwable { // Test for timeRange() checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY3) - .ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) - .timeRange(TimeRange.between(0, 101)) - .build(new Put(ROWKEY3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("e"))); + .ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) + .timeRange(TimeRange.between(0, 101)) + .build(new Put(ROWKEY3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("e"))); checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY4) - .ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")) - .timeRange(TimeRange.between(0, 100)) - .build(new Put(ROWKEY4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("f"))); + .ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")) + .timeRange(TimeRange.between(0, 100)) + .build(new Put(ROWKEY4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("f"))); results = table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); @@ -932,34 +934,32 @@ public void testCheckAndMutateBatch2() throws Throwable { public void testCheckAndMutateBatchWithFilter() throws Throwable { try (Table table = createTable()) { table.put(Arrays.asList( - new Put(ROWKEY) - .addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")) - .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), - new Put(ROWKEY2) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")) - .addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")) - .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f")))); + new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")) + .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), + new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")) + .addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")) + .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f")))); // Test for Put CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("g"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("g"))); CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY2) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("h"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("h"))); List results = - table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); + table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); assertTrue(results.get(0).isSuccess()); assertNull(results.get(0).getResult()); @@ -974,20 +974,20 @@ public void testCheckAndMutateBatchWithFilter() throws Throwable { // Test for Delete checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("C"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("C"))); checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY2) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Delete(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("F"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Delete(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("F"))); results = table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); @@ -1002,27 +1002,29 @@ public void testCheckAndMutateBatchWithFilter() throws Throwable { assertEquals("f", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("F")))); // Test for RowMutations - checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new RowMutations(ROWKEY) - .add((Mutation) new Put(ROWKEY) - .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))) - .add((Mutation) new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("A")))); - - checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY2) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new RowMutations(ROWKEY2) - .add((Mutation) new Put(ROWKEY2) - .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("g"))) - .add((Mutation) new Delete(ROWKEY2).addColumns(FAMILY, Bytes.toBytes("D")))); + checkAndMutate1 = + CheckAndMutate.newBuilder(ROWKEY) + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new RowMutations(ROWKEY) + .add((Mutation) new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), + Bytes.toBytes("c"))) + .add((Mutation) new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("A")))); + + checkAndMutate2 = + CheckAndMutate.newBuilder(ROWKEY2) + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new RowMutations(ROWKEY2) + .add((Mutation) new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("F"), + Bytes.toBytes("g"))) + .add((Mutation) new Delete(ROWKEY2).addColumns(FAMILY, Bytes.toBytes("D")))); results = table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); @@ -1046,32 +1048,32 @@ public void testCheckAndMutateBatchWithFilterAndTimeRange() throws Throwable { try (Table table = createTable()) { table.put(Arrays.asList( new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a")) - .addColumn(FAMILY, Bytes.toBytes("B"), 100, Bytes.toBytes("b")) - .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), + .addColumn(FAMILY, Bytes.toBytes("B"), 100, Bytes.toBytes("b")) + .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("D"), 100, Bytes.toBytes("d")) - .addColumn(FAMILY, Bytes.toBytes("E"), 100, Bytes.toBytes("e")) - .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f")))); + .addColumn(FAMILY, Bytes.toBytes("E"), 100, Bytes.toBytes("e")) + .addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f")))); CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .timeRange(TimeRange.between(0, 101)) - .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("g"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .timeRange(TimeRange.between(0, 101)) + .build(new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("g"))); CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY2) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, - Bytes.toBytes("d")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, - Bytes.toBytes("e")))) - .timeRange(TimeRange.between(0, 100)) - .build(new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("h"))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("D"), CompareOperator.EQUAL, + Bytes.toBytes("d")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("E"), CompareOperator.EQUAL, + Bytes.toBytes("e")))) + .timeRange(TimeRange.between(0, 100)) + .build(new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("h"))); List results = - table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); + table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); assertTrue(results.get(0).isSuccess()); assertNull(results.get(0).getResult()); @@ -1090,15 +1092,15 @@ public void testCheckAndMutateBatchWithFilterAndTimeRange() throws Throwable { public void testCheckAndIncrementBatch() throws Throwable { try (Table table = createTable()) { table.put(Arrays.asList( - new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes(0L)), - new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes(0L)))); + new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")).addColumn(FAMILY, + Bytes.toBytes("B"), Bytes.toBytes(0L)), + new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")).addColumn(FAMILY, + Bytes.toBytes("D"), Bytes.toBytes(0L)))); // CheckAndIncrement with correct value - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), 1)); + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(ROWKEY).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), 1)); // CheckAndIncrement with wrong value CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY2) @@ -1106,11 +1108,11 @@ public void testCheckAndIncrementBatch() throws Throwable { .build(new Increment(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("D"), 1)); List results = - table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); + table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); assertTrue(results.get(0).isSuccess()); - assertEquals(1, Bytes.toLong(results.get(0).getResult() - .getValue(FAMILY, Bytes.toBytes("B")))); + assertEquals(1, + Bytes.toLong(results.get(0).getResult().getValue(FAMILY, Bytes.toBytes("B")))); assertFalse(results.get(1).isSuccess()); assertNull(results.get(1).getResult()); @@ -1126,27 +1128,27 @@ public void testCheckAndIncrementBatch() throws Throwable { public void testCheckAndAppendBatch() throws Throwable { try (Table table = createTable()) { table.put(Arrays.asList( - new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), - new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); + new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")).addColumn(FAMILY, + Bytes.toBytes("B"), Bytes.toBytes("b")), + new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")).addColumn(FAMILY, + Bytes.toBytes("D"), Bytes.toBytes("d")))); // CheckAndAppend with correct value - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(ROWKEY).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); // CheckAndAppend with wrong value CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY2) - .ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("d")) - .build(new Append(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); + .ifEquals(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("d")) + .build(new Append(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); List results = - table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); + table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); assertTrue(results.get(0).isSuccess()); - assertEquals("bb", Bytes.toString(results.get(0).getResult() - .getValue(FAMILY, Bytes.toBytes("B")))); + assertEquals("bb", + Bytes.toString(results.get(0).getResult().getValue(FAMILY, Bytes.toBytes("B")))); assertFalse(results.get(1).isSuccess()); assertNull(results.get(1).getResult()); @@ -1163,41 +1165,38 @@ public void testCheckAndRowMutationsBatch() throws Throwable { try (Table table = createTable()) { table.put(Arrays.asList( new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")) - .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes(1L)) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")), + .addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes(1L)) + .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")), new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f")) - .addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes(1L)) - .addColumn(FAMILY, Bytes.toBytes("H"), Bytes.toBytes("h"))) - ); + .addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes(1L)) + .addColumn(FAMILY, Bytes.toBytes("H"), Bytes.toBytes("h")))); // CheckAndIncrement with correct value - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(ROWKEY) - .ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")) - .build(new RowMutations(ROWKEY).add(Arrays.asList( - new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), - new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("B")), - new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), 1L), - new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")) - ))); + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(ROWKEY).ifEquals(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")) + .build(new RowMutations(ROWKEY).add(Arrays.asList( + new Put(ROWKEY).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), + new Delete(ROWKEY).addColumns(FAMILY, Bytes.toBytes("B")), + new Increment(ROWKEY).addColumn(FAMILY, Bytes.toBytes("C"), 1L), + new Append(ROWKEY).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))))); // CheckAndIncrement with wrong value CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(ROWKEY2) - .ifEquals(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("a")) - .build(new RowMutations(ROWKEY2).add(Arrays.asList( - new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")), - new Delete(ROWKEY2).addColumns(FAMILY, Bytes.toBytes("F")), - new Increment(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("G"), 1L), - new Append(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("H"), Bytes.toBytes("h")) - ))); + .ifEquals(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("a")) + .build(new RowMutations(ROWKEY2).add(Arrays.asList( + new Put(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")), + new Delete(ROWKEY2).addColumns(FAMILY, Bytes.toBytes("F")), + new Increment(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("G"), 1L), + new Append(ROWKEY2).addColumn(FAMILY, Bytes.toBytes("H"), Bytes.toBytes("h"))))); List results = - table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); + table.checkAndMutate(Arrays.asList(checkAndMutate1, checkAndMutate2)); assertTrue(results.get(0).isSuccess()); - assertEquals(2, Bytes.toLong(results.get(0).getResult() - .getValue(FAMILY, Bytes.toBytes("C")))); - assertEquals("dd", Bytes.toString(results.get(0).getResult() - .getValue(FAMILY, Bytes.toBytes("D")))); + assertEquals(2, + Bytes.toLong(results.get(0).getResult().getValue(FAMILY, Bytes.toBytes("C")))); + assertEquals("dd", + Bytes.toString(results.get(0).getResult().getValue(FAMILY, Bytes.toBytes("D")))); assertFalse(results.get(1).isSuccess()); assertNull(results.get(1).getResult()); @@ -1220,7 +1219,7 @@ public void testCheckAndRowMutationsBatch() throws Throwable { public void testCheckAndMutateForNull() throws Exception { byte[] qualifier = Bytes.toBytes("Q"); try (Table table = createTable()) { - byte [] row1 = Bytes.toBytes("testRow1"); + byte[] row1 = Bytes.toBytes("testRow1"); Put put = new Put(row1); put.addColumn(FAMILY, qualifier, Bytes.toBytes("v0")); table.put(put); @@ -1228,13 +1227,13 @@ public void testCheckAndMutateForNull() throws Exception { table.get(new Get(row1).addColumn(FAMILY, qualifier)).getValue(FAMILY, qualifier))); CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(row1) - .ifMatches(FAMILY, qualifier, CompareOperator.NOT_EQUAL, new byte[] {}) - .build(new Put(row1).addColumn(FAMILY, qualifier, Bytes.toBytes("v1"))); + .ifMatches(FAMILY, qualifier, CompareOperator.NOT_EQUAL, new byte[] {}) + .build(new Put(row1).addColumn(FAMILY, qualifier, Bytes.toBytes("v1"))); table.checkAndMutate(checkAndMutate1); assertEquals("v1", Bytes.toString( table.get(new Get(row1).addColumn(FAMILY, qualifier)).getValue(FAMILY, qualifier))); - byte [] row2 = Bytes.toBytes("testRow2"); + byte[] row2 = Bytes.toBytes("testRow2"); put = new Put(row2); put.addColumn(FAMILY, qualifier, new byte[] {}); table.put(put); @@ -1242,24 +1241,22 @@ public void testCheckAndMutateForNull() throws Exception { table.get(new Get(row2).addColumn(FAMILY, qualifier)).getValue(FAMILY, qualifier).length); CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(row2) - .ifMatches(FAMILY, qualifier, CompareOperator.EQUAL, new byte[] {}) - .build(new Put(row2).addColumn(FAMILY, qualifier, Bytes.toBytes("v2"))); + .ifMatches(FAMILY, qualifier, CompareOperator.EQUAL, new byte[] {}) + .build(new Put(row2).addColumn(FAMILY, qualifier, Bytes.toBytes("v2"))); table.checkAndMutate(checkAndMutate2); assertEquals("v2", Bytes.toString( table.get(new Get(row2).addColumn(FAMILY, qualifier)).getValue(FAMILY, qualifier))); - byte [] row3 = Bytes.toBytes("testRow3"); + byte[] row3 = Bytes.toBytes("testRow3"); put = new Put(row3).addColumn(FAMILY, qualifier, Bytes.toBytes("v0")); assertNull(table.get(new Get(row3).addColumn(FAMILY, qualifier)).getValue(FAMILY, qualifier)); CheckAndMutate checkAndMutate3 = CheckAndMutate.newBuilder(row3) - .ifMatches(FAMILY, qualifier, CompareOperator.NOT_EQUAL, new byte[] {}) - .build(put); + .ifMatches(FAMILY, qualifier, CompareOperator.NOT_EQUAL, new byte[] {}).build(put); table.checkAndMutate(checkAndMutate3); assertNull(table.get(new Get(row3).addColumn(FAMILY, qualifier)).getValue(FAMILY, qualifier)); CheckAndMutate checkAndMutate4 = CheckAndMutate.newBuilder(row3) - .ifMatches(FAMILY, qualifier, CompareOperator.EQUAL, new byte[] {}) - .build(put); + .ifMatches(FAMILY, qualifier, CompareOperator.EQUAL, new byte[] {}).build(put); table.checkAndMutate(checkAndMutate4); assertEquals("v0", Bytes.toString( table.get(new Get(row3).addColumn(FAMILY, qualifier)).getValue(FAMILY, qualifier))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutateWithByteBuff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutateWithByteBuff.java index 1489c1f0400b..bea109104814 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutateWithByteBuff.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutateWithByteBuff.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,7 +56,7 @@ public class TestCheckAndMutateWithByteBuff { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCheckAndMutateWithByteBuff.class); + HBaseClassTestRule.forClass(TestCheckAndMutateWithByteBuff.class); @Rule public TestName name = new TestName(); @@ -97,10 +97,12 @@ public void testCheckAndMutateWithByteBuffEncode() throws Exception { // Tests for HBASE-26777. // As most HBase.getRegion() calls have been factored out from HBase, you'd need to revert // both HBASE-26777, and the HBase.get() replacements from HBASE-26036 for this test to fail - testCheckAndMutateWithByteBuff(TableName.valueOf(name.getMethodName()), DataBlockEncoding.FAST_DIFF); + testCheckAndMutateWithByteBuff(TableName.valueOf(name.getMethodName()), + DataBlockEncoding.FAST_DIFF); } - private void testCheckAndMutateWithByteBuff(TableName tableName, DataBlockEncoding dbe) throws Exception { + private void testCheckAndMutateWithByteBuff(TableName tableName, DataBlockEncoding dbe) + throws Exception { Table testTable = createTable(tableName, dbe); byte[] checkRow = Bytes.toBytes("checkRow"); byte[] checkQualifier = Bytes.toBytes("cq"); @@ -111,20 +113,14 @@ private void testCheckAndMutateWithByteBuff(TableName tableName, DataBlockEncodi testTable.put(put); admin.flush(testTable.getName()); - assertTrue(testTable.checkAndMutate(checkRow, CF).qualifier(checkQualifier). - ifEquals(checkValue) - .thenPut(new Put(checkRow).addColumn(CF, Bytes.toBytes("q1"), - Bytes.toBytes("testValue")))); + assertTrue(testTable.checkAndMutate(checkRow, CF).qualifier(checkQualifier).ifEquals(checkValue) + .thenPut(new Put(checkRow).addColumn(CF, Bytes.toBytes("q1"), Bytes.toBytes("testValue")))); } - private Table createTable(TableName tableName, DataBlockEncoding dbe) - throws IOException { - TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF) - .setBlocksize(100) - .setDataBlockEncoding(dbe) - .build()) - .build(); + private Table createTable(TableName tableName, DataBlockEncoding dbe) throws IOException { + TableDescriptor td = + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(CF).setBlocksize(100).setDataBlockEncoding(dbe).build()).build(); return TEST_UTIL.createTable(td, null); } @@ -133,12 +129,12 @@ private Table createTable(TableName tableName, DataBlockEncoding dbe) */ public static class TestCheckAndMutateRegion extends HRegion { public TestCheckAndMutateRegion(Path tableDir, WAL log, FileSystem fs, Configuration confParam, - RegionInfo info, TableDescriptor htd, RegionServerServices rsServices) { + RegionInfo info, TableDescriptor htd, RegionServerServices rsServices) { super(tableDir, log, fs, confParam, info, htd, rsServices); } public TestCheckAndMutateRegion(HRegionFileSystem fs, WAL wal, Configuration confParam, - TableDescriptor htd, RegionServerServices rsServices) { + TableDescriptor htd, RegionServerServices rsServices) { super(fs, wal, confParam, htd, rsServices); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java index dc9228446925..10391631b243 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestCleanupMetaReplica extends MetaWithReplicasTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCleanupMetaReplica.class); + HBaseClassTestRule.forClass(TestCleanupMetaReplica.class); @BeforeClass public static void setUp() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java index 66f2df6bbc8c..c2951d6da07c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplicaThroughConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public class TestCleanupMetaReplicaThroughConfig extends MetaWithReplicasTestBas @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCleanupMetaReplicaThroughConfig.class); + HBaseClassTestRule.forClass(TestCleanupMetaReplicaThroughConfig.class); @BeforeClass public static void setUp() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java index 9de23a1bce2c..3c232a8e1da6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestClientOperationInterrupt { @ClassRule @@ -71,18 +71,17 @@ public Optional getRegionObserver() { } @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { + public void preGetOp(final ObserverContext e, final Get get, + final List results) throws IOException { Threads.sleep(2500); } } - @BeforeClass public static void setUpBeforeClass() throws Exception { conf = HBaseConfiguration.create(); conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - TestCoprocessor.class.getName()); + TestCoprocessor.class.getName()); util = new HBaseTestingUtil(conf); util.startMiniCluster(); @@ -93,14 +92,13 @@ public static void setUpBeforeClass() throws Exception { } admin.deleteTable(tableName); } - Table ht = util.createTable(tableName, new byte[][]{dummy, test}); + Table ht = util.createTable(tableName, new byte[][] { dummy, test }); Put p = new Put(row1); p.addColumn(dummy, dummy, dummy); ht.put(p); } - @Test public void testInterrupt50Percent() throws IOException, InterruptedException { final AtomicInteger noEx = new AtomicInteger(0); @@ -147,7 +145,6 @@ public void run() { threads.get(i).interrupt(); } - boolean stillAlive = true; while (stillAlive) { stillAlive = false; @@ -161,10 +158,10 @@ public void run() { Assert.assertFalse(Thread.currentThread().isInterrupted()); Assert.assertTrue(" noEx: " + noEx.get() + ", badEx=" + badEx.get() + ", noInt=" + noInt.get(), - noEx.get() == expectedNoExNum && badEx.get() == 0); + noEx.get() == expectedNoExNum && badEx.get() == 0); // The problem here is that we need the server to free its handlers to handle all operations - while (done.get() != nbThread){ + while (done.get() != nbThread) { Thread.sleep(1); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java index e58a66ffebc9..b8413ccacdef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ * Test the scenario where a HRegionServer#scan() call, while scanning, timeout at client side and * getting retried. This scenario should not result in some data being skipped at RS side. */ -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestClientScannerRPCTimeout { @ClassRule @@ -129,9 +129,10 @@ public void testScannerNextRPCTimesout() throws Exception { // catch the exception after max retry number LOG.info("Failed after maximal attempts=" + CLIENT_RETRIES_NUMBER, ioe); } - assertTrue("Expected maximal try number=" + CLIENT_RETRIES_NUMBER - + ", actual =" + RSRpcServicesWithScanTimeout.tryNumber, - RSRpcServicesWithScanTimeout.tryNumber <= CLIENT_RETRIES_NUMBER); + assertTrue( + "Expected maximal try number=" + CLIENT_RETRIES_NUMBER + ", actual =" + + RSRpcServicesWithScanTimeout.tryNumber, + RSRpcServicesWithScanTimeout.tryNumber <= CLIENT_RETRIES_NUMBER); } private void putToTable(Table ht, byte[] rowkey) throws IOException { @@ -159,8 +160,7 @@ private static class RSRpcServicesWithScanTimeout extends RSRpcServices { private static boolean sleepAlways = false; private static int tryNumber = 0; - public RSRpcServicesWithScanTimeout(HRegionServer rs) - throws IOException { + public RSRpcServicesWithScanTimeout(HRegionServer rs) throws IOException { super(rs); } @@ -169,8 +169,8 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque throws ServiceException { if (request.hasScannerId()) { ScanResponse scanResponse = super.scan(controller, request); - if (this.tableScannerId == request.getScannerId() && - (sleepAlways || (!slept && seqNoToSleepOn == request.getNextCallSeq()))) { + if (this.tableScannerId == request.getScannerId() + && (sleepAlways || (!slept && seqNoToSleepOn == request.getNextCallSeq()))) { try { LOG.info("SLEEPING " + (rpcTimeout + 500)); Thread.sleep(rpcTimeout + 500); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java index 859e36f00cbb..8d9cb0108c9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -43,7 +44,7 @@ public class TestClientSideRegionScanner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientSideRegionScanner.class); + HBaseClassTestRule.forClass(TestClientSideRegionScanner.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -78,13 +79,13 @@ public void setup() throws IOException { public void testDefaultBlockCache() throws IOException { Configuration copyConf = new Configuration(conf); ClientSideRegionScanner clientSideRegionScanner = - new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null); + new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null); BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache(); assertNotNull(blockCache); assertTrue(blockCache instanceof IndexOnlyLruBlockCache); assertTrue(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT == blockCache - .getMaxSize()); + .getMaxSize()); } @Test @@ -94,7 +95,7 @@ public void testConfiguredBlockCache() throws IOException { long blockCacheFixedSize = 1024 * 1024L; copyConf.setLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, blockCacheFixedSize); ClientSideRegionScanner clientSideRegionScanner = - new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null); + new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null); BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache(); assertNotNull(blockCache); @@ -107,7 +108,7 @@ public void testNoBlockCache() throws IOException { Configuration copyConf = new Configuration(conf); copyConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); ClientSideRegionScanner clientSideRegionScanner = - new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null); + new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null); BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache(); assertNull(blockCache); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java index 610fa7b3e57b..4b7c8355df13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ public class TestClientTimeouts { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientTimeouts.class); + HBaseClassTestRule.forClass(TestClientTimeouts.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); protected static int SLAVES = 1; @@ -85,7 +85,7 @@ public void testAdminTimeout() throws Exception { boolean lastFailed = false; int initialInvocations = invokations.get(); RandomTimeoutRpcClient rpcClient = (RandomTimeoutRpcClient) RpcClientFactory - .createClient(TEST_UTIL.getConfiguration(), TEST_UTIL.getClusterKey()); + .createClient(TEST_UTIL.getConfiguration(), TEST_UTIL.getClusterKey()); try { for (int i = 0; i < 5 || (lastFailed && i < 100); ++i) { @@ -108,7 +108,7 @@ public void testAdminTimeout() throws Exception { admin.close(); if (admin.getConnection().isClosed()) { rpcClient = (RandomTimeoutRpcClient) RpcClientFactory - .createClient(TEST_UTIL.getConfiguration(), TEST_UTIL.getClusterKey()); + .createClient(TEST_UTIL.getConfiguration(), TEST_UTIL.getClusterKey()); } } if (connection != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientAfterSplittingRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientAfterSplittingRegion.java index 65513f8a930b..3aa2a01ba208 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientAfterSplittingRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientAfterSplittingRegion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestCloneSnapshotFromClientAfterSplittingRegion @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCloneSnapshotFromClientAfterSplittingRegion.class); + HBaseClassTestRule.forClass(TestCloneSnapshotFromClientAfterSplittingRegion.class); @Parameter public int numReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientCloneLinksAfterDelete.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientCloneLinksAfterDelete.java index e5f1fad20eb5..dc48fdefe6a3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientCloneLinksAfterDelete.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientCloneLinksAfterDelete.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestCloneSnapshotFromClientCloneLinksAfterDelete @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCloneSnapshotFromClientCloneLinksAfterDelete.class); + HBaseClassTestRule.forClass(TestCloneSnapshotFromClientCloneLinksAfterDelete.class); @Parameter public int numReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientCustomSFT.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientCustomSFT.java index 53b7f58d9bb6..17efb4bbb04b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientCustomSFT.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientCustomSFT.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,18 +32,18 @@ import org.junit.experimental.categories.Category; @Category({ LargeTests.class, ClientTests.class }) -public class TestCloneSnapshotFromClientCustomSFT extends CloneSnapshotFromClientTestBase{ +public class TestCloneSnapshotFromClientCustomSFT extends CloneSnapshotFromClientTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCloneSnapshotFromClientCustomSFT.class); + HBaseClassTestRule.forClass(TestCloneSnapshotFromClientCustomSFT.class); public static final String CLONE_SFT = "FILE"; @Test public void testCloneSnapshotWithCustomSFT() throws IOException, InterruptedException { TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName1, clonedTableName, false, CLONE_SFT); verifyRowCount(TEST_UTIL, clonedTableName, snapshot1Rows); @@ -57,15 +57,15 @@ public void testCloneSnapshotWithCustomSFT() throws IOException, InterruptedExce @Test public void testCloneSnapshotWithIncorrectCustomSFT() throws IOException, InterruptedException { TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); IOException ioException = assertThrows(IOException.class, () -> { admin.cloneSnapshot(snapshotName1, clonedTableName, false, "IncorrectSFT"); }); assertEquals( - "java.lang.RuntimeException: java.lang.RuntimeException: " + - "java.lang.ClassNotFoundException: Class IncorrectSFT not found", + "java.lang.RuntimeException: java.lang.RuntimeException: " + + "java.lang.ClassNotFoundException: Class IncorrectSFT not found", ioException.getMessage()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientError.java index 93c2388e3e81..b2331330c75d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientError.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientError.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestCloneSnapshotFromClientError extends CloneSnapshotFromClientErr @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCloneSnapshotFromClientError.class); + HBaseClassTestRule.forClass(TestCloneSnapshotFromClientError.class); @Parameter public int numReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientNormal.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientNormal.java index c7730d0ea682..ae4610c2277a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientNormal.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClientNormal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestCloneSnapshotFromClientNormal extends CloneSnapshotFromClientNo @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCloneSnapshotFromClientNormal.class); + HBaseClassTestRule.forClass(TestCloneSnapshotFromClientNormal.class); @Parameter public int numReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCompleteResultScanResultCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCompleteResultScanResultCache.java index e6b31cb9eaa7..8512cb182626 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCompleteResultScanResultCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCompleteResultScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java index 662790de0adc..728e18c25327 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,7 +72,7 @@ public class TestConnection { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnection.class); + HBaseClassTestRule.forClass(TestConnection.class); private static final Logger LOG = LoggerFactory.getLogger(TestConnection.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -317,29 +317,17 @@ public void testCreateConnection() throws Exception { } /* - ====> With MasterRegistry, connections cannot outlast the masters' lifetime. - @Test - public void testConnectionRideOverClusterRestart() throws IOException, InterruptedException { - Configuration config = new Configuration(TEST_UTIL.getConfiguration()); - - final TableName tableName = TableName.valueOf(name.getMethodName()); - TEST_UTIL.createTable(tableName, new byte[][] { FAM_NAM }).close(); - - Connection connection = ConnectionFactory.createConnection(config); - Table table = connection.getTable(tableName); - - // this will cache the meta location and table's region location - table.get(new Get(Bytes.toBytes("foo"))); - - // restart HBase - TEST_UTIL.shutdownMiniHBaseCluster(); - TEST_UTIL.restartHBaseCluster(2); - // this should be able to discover new locations for meta and table's region - table.get(new Get(Bytes.toBytes("foo"))); - TEST_UTIL.deleteTable(tableName); - table.close(); - connection.close(); - } + * ====> With MasterRegistry, connections cannot outlast the masters' lifetime. + * @Test public void testConnectionRideOverClusterRestart() throws IOException, + * InterruptedException { Configuration config = new Configuration(TEST_UTIL.getConfiguration()); + * final TableName tableName = TableName.valueOf(name.getMethodName()); + * TEST_UTIL.createTable(tableName, new byte[][] { FAM_NAM }).close(); Connection connection = + * ConnectionFactory.createConnection(config); Table table = connection.getTable(tableName); // + * this will cache the meta location and table's region location table.get(new + * Get(Bytes.toBytes("foo"))); // restart HBase TEST_UTIL.shutdownMiniHBaseCluster(); + * TEST_UTIL.restartHBaseCluster(2); // this should be able to discover new locations for meta and + * table's region table.get(new Get(Bytes.toBytes("foo"))); TEST_UTIL.deleteTable(tableName); + * table.close(); connection.close(); } */ @Test @@ -350,12 +338,12 @@ public void testLocateRegionsWithRegionReplicas() throws IOException { // Create a table with region replicas TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); + TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); TEST_UTIL.getAdmin().createTable(builder.build()); try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - RegionLocator locator = conn.getRegionLocator(tableName)) { + RegionLocator locator = conn.getRegionLocator(tableName)) { // Get locations of the regions of the table List locations = locator.getAllRegionLocations(); @@ -364,7 +352,7 @@ public void testLocateRegionsWithRegionReplicas() throws IOException { // The replicaIds of the returned locations should be 0, 1 and 2 Set expectedReplicaIds = - IntStream.range(0, regionReplication).boxed().collect(Collectors.toSet()); + IntStream.range(0, regionReplication).boxed().collect(Collectors.toSet()); for (HRegionLocation location : locations) { assertTrue(expectedReplicaIds.remove(location.getRegion().getReplicaId())); } @@ -378,8 +366,8 @@ public void testClosedConnection() throws ServiceException, Throwable { byte[] family = Bytes.toBytes("cf"); TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName) - .setCoprocessor(MultiRowMutationEndpoint.class.getName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); + .setCoprocessor(MultiRowMutationEndpoint.class.getName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); TEST_UTIL.getAdmin().createTable(builder.build()); Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); @@ -403,7 +391,7 @@ public void testCancelConnectionMemoryLeak() throws IOException, InterruptedExce TEST_UTIL.createTable(tableName, FAM_NAM).close(); TEST_UTIL.getAdmin().balancerSwitch(false, true); try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { table.get(new Get(Bytes.toBytes("1"))); ServerName sn = TEST_UTIL.getRSForFirstRegionInTable(tableName).getServerName(); RpcClient rpcClient = ((AsyncConnectionImpl) connection.toAsyncConnection()).rpcClient; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java index 1e2dfd46d671..636284ca3c2c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class, ClientTests.class}) +@Category({ SmallTests.class, ClientTests.class }) public class TestConnectionUtils { @ClassRule @@ -39,7 +39,7 @@ public class TestConnectionUtils { @Test public void testRetryTimeJitter() { long[] retries = new long[200]; - long baseTime = 1000000; //Larger number than reality to help test randomness. + long baseTime = 1000000; // Larger number than reality to help test randomness. long maxTimeExpected = (long) (baseTime * 1.01f); for (int i = 0; i < retries.length; i++) { retries[i] = ConnectionUtils.getPauseTime(baseTime, 0); @@ -47,14 +47,14 @@ public void testRetryTimeJitter() { Set retyTimeSet = new TreeSet<>(); for (long l : retries) { - /*make sure that there is some jitter but only 1%*/ + /* make sure that there is some jitter but only 1% */ assertTrue(l >= baseTime); assertTrue(l <= maxTimeExpected); // Add the long to the set retyTimeSet.add(l); } - //Make sure that most are unique. some overlap will happen + // Make sure that most are unique. some overlap will happen assertTrue(retyTimeSet.size() > (retries.length * 0.80)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestDropTimeoutRequest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestDropTimeoutRequest.java index 8fd2566dd897..e8e2751cea1e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestDropTimeoutRequest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestDropTimeoutRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,11 +44,10 @@ import org.slf4j.LoggerFactory; /** - * Test a drop timeout request. - * This test used to be in TestHCM but it has particulare requirements -- i.e. one handler only -- - * so run it apart from the rest of TestHCM. + * Test a drop timeout request. This test used to be in TestHCM but it has particulare requirements + * -- i.e. one handler only -- so run it apart from the rest of TestHCM. */ -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestDropTimeoutRequest { @ClassRule @@ -76,8 +75,8 @@ public Optional getRegionObserver() { } @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { + public void preGetOp(final ObserverContext e, final Get get, + final List results) throws IOException { // After first sleep, all requests are timeout except the last retry. If we handle // all the following requests, finally the last request is also timeout. If we drop all // timeout requests, we can handle the last request immediately and it will not timeout. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java index 006f2e696941..78e7816dd4b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,17 +73,17 @@ public void tearDown() throws Exception { } /** - * We were only clearing rows that had a hregioninfo column in hbase:meta. Mangled rows that - * were missing the hregioninfo because of error were being left behind messing up any - * subsequent table made with the same name. HBASE-12980 + * We were only clearing rows that had a hregioninfo column in hbase:meta. Mangled rows that were + * missing the hregioninfo because of error were being left behind messing up any subsequent table + * made with the same name. HBASE-12980 */ @Test public void testDeleteForSureClearsAllTableRowsFromMeta() - throws IOException, InterruptedException { + throws IOException, InterruptedException { final TableName tableName = TableName.valueOf(name.getMethodName()); final Admin admin = TEST_UTIL.getAdmin(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYNAME)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYNAME)).build(); try { createTable(TEST_UTIL, tableDescriptor, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE); } catch (Exception e) { @@ -125,7 +125,7 @@ public void testDeleteForSureClearsAllTableRowsFromMeta() } } - public static class MasterSyncObserver implements MasterCoprocessor, MasterObserver { + public static class MasterSyncObserver implements MasterCoprocessor, MasterObserver { volatile CountDownLatch tableCreationLatch = null; volatile CountDownLatch tableDeletionLatch = null; @@ -136,8 +136,7 @@ public Optional getMasterObserver() { @Override public void postCompletedCreateTableAction( - final ObserverContext ctx, - final TableDescriptor desc, + final ObserverContext ctx, final TableDescriptor desc, final RegionInfo[] regions) throws IOException { // the AccessController test, some times calls only and directly the // postCompletedCreateTableAction() @@ -148,9 +147,8 @@ public void postCompletedCreateTableAction( @Override public void postCompletedDeleteTableAction( - final ObserverContext ctx, - final TableName tableName) - throws IOException { + final ObserverContext ctx, final TableName tableName) + throws IOException { // the AccessController test, some times calls only and directly the postDeleteTableHandler() if (tableDeletionLatch != null) { tableDeletionLatch.countDown(); @@ -159,11 +157,11 @@ public void postCompletedDeleteTableAction( } public static void createTable(HBaseTestingUtil testUtil, TableDescriptor tableDescriptor, - byte[][] splitKeys) throws Exception { + byte[][] splitKeys) throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster().getMasterCoprocessorHost() - .findCoprocessor(MasterSyncObserver.class); + .findCoprocessor(MasterSyncObserver.class); observer.tableCreationLatch = new CountDownLatch(1); Admin admin = testUtil.getAdmin(); if (splitKeys != null) { @@ -176,12 +174,11 @@ public static void createTable(HBaseTestingUtil testUtil, TableDescriptor tableD testUtil.waitUntilAllRegionsAssigned(tableDescriptor.getTableName()); } - public static void deleteTable(HBaseTestingUtil testUtil, TableName tableName) - throws Exception { + public static void deleteTable(HBaseTestingUtil testUtil, TableName tableName) throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class); + MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster().getMasterCoprocessorHost() + .findCoprocessor(MasterSyncObserver.class); observer.tableDeletionLatch = new CountDownLatch(1); Admin admin = testUtil.getAdmin(); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java index fde362c916d5..e5d8a0eeec8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFailedMetaReplicaAssigment.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ public class TestFailedMetaReplicaAssigment { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFailedMetaReplicaAssigment.class); + HBaseClassTestRule.forClass(TestFailedMetaReplicaAssigment.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -64,8 +64,8 @@ public static void setUp() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt(HConstants.META_REPLICAS_NUM, 3); StartTestingClusterOption option = - StartTestingClusterOption.builder().numAlwaysStandByMasters(1).numMasters(1) - .numRegionServers(1).masterClass(BrokenMetaReplicaMaster.class).build(); + StartTestingClusterOption.builder().numAlwaysStandByMasters(1).numMasters(1) + .numRegionServers(1).masterClass(BrokenMetaReplicaMaster.class).build(); TEST_UTIL.startMiniCluster(option); } @@ -83,18 +83,18 @@ public void testFailedReplicaAssignment() throws InterruptedException { AssignmentManager am = master.getAssignmentManager(); // showing one of the replicas got assigned RegionInfo metaReplicaHri = - RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, 1); + RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, 1); // we use assignAsync so we need to wait a bit TEST_UTIL.waitFor(30000, () -> { RegionStateNode metaReplicaRegionNode = - am.getRegionStates().getOrCreateRegionStateNode(metaReplicaHri); + am.getRegionStates().getOrCreateRegionStateNode(metaReplicaHri); return metaReplicaRegionNode.getRegionLocation() != null; }); // showing one of the replicas failed to be assigned RegionInfo metaReplicaHri2 = - RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, 2); + RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, 2); RegionStateNode metaReplicaRegionNode2 = - am.getRegionStates().getOrCreateRegionStateNode(metaReplicaHri2); + am.getRegionStates().getOrCreateRegionStateNode(metaReplicaHri2); // wait for several seconds to make sure that it is not assigned for (int i = 0; i < 3; i++) { Thread.sleep(2000); @@ -119,7 +119,7 @@ public BrokenTransitRegionStateProcedure(MasterProcedureEnv env, RegionInfo hri) @Override protected Procedure[] execute(MasterProcedureEnv env) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { throw new ProcedureSuspendedException("Never end procedure!"); } } @@ -131,7 +131,7 @@ public BrokenMetaReplicaMaster(final Configuration conf) throws IOException { @Override public AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new BrokenMasterMetaAssignmentManager(master, masterRegion); } } @@ -140,7 +140,7 @@ public static class BrokenMasterMetaAssignmentManager extends AssignmentManager MasterServices master; public BrokenMasterMetaAssignmentManager(final MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { super(master, masterRegion); this.master = master; } @@ -154,7 +154,7 @@ public TransitRegionStateProcedure[] createAssignProcedures(List hri regionNode.lock(); try { procs.add(regionNode.setProcedure(new BrokenTransitRegionStateProcedure( - master.getMasterProcedureExecutor().getEnvironment(), hri))); + master.getMasterProcedureExecutor().getEnvironment(), hri))); } finally { regionNode.unlock(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java index 35c4d958a30a..34f57b386764 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,7 +63,7 @@ public class TestFallbackToUseReplay { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFallbackToUseReplay.class); + HBaseClassTestRule.forClass(TestFallbackToUseReplay.class); private static Configuration CONF = HBaseConfiguration.create(); @@ -72,7 +72,7 @@ public class TestFallbackToUseReplay { private static AsyncRegionReplicationRetryingCaller CALLER; private static RegionInfo REPLICA = - RegionInfoBuilder.newBuilder(TableName.valueOf("test")).setReplicaId(1).build(); + RegionInfoBuilder.newBuilder(TableName.valueOf("test")).setReplicaId(1).build(); private static AtomicBoolean REPLAY_CALLED = new AtomicBoolean(false); @@ -81,8 +81,8 @@ public static void setUpBeforeClass() throws IOException { CONF.setInt(AsyncConnectionConfiguration.START_LOG_ERRORS_AFTER_COUNT_KEY, 0); AsyncRegionLocator locator = mock(AsyncRegionLocator.class); when(locator.getRegionLocation(any(), any(), anyInt(), any(), anyLong())) - .thenReturn(CompletableFuture.completedFuture(new HRegionLocation(REPLICA, - ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())))); + .thenReturn(CompletableFuture.completedFuture(new HRegionLocation(REPLICA, + ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())))); AdminService.Interface stub = mock(AdminService.Interface.class); // fail the call to replicateToReplica doAnswer(i -> { @@ -99,7 +99,7 @@ public static void setUpBeforeClass() throws IOException { return null; }).when(stub).replay(any(), any(), any()); CONN = new AsyncClusterConnectionImpl(CONF, mock(ConnectionRegistry.class), "test", null, - User.getCurrent()) { + User.getCurrent()) { @Override AsyncRegionLocator getLocator() { @@ -112,8 +112,8 @@ Interface getAdminStub(ServerName serverName) throws IOException { } }; CALLER = new AsyncRegionReplicationRetryingCaller(AsyncClusterConnectionImpl.RETRY_TIMER, CONN, - 10, TimeUnit.SECONDS.toNanos(1), TimeUnit.SECONDS.toNanos(10), REPLICA, - Collections.emptyList()); + 10, TimeUnit.SECONDS.toNanos(1), TimeUnit.SECONDS.toNanos(10), REPLICA, + Collections.emptyList()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java index 61520d337521..71c5f40d0211 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,11 +44,10 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestFlushFromClient { @ClassRule @@ -58,14 +57,12 @@ public class TestFlushFromClient { private static final Logger LOG = LoggerFactory.getLogger(TestFlushFromClient.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static AsyncConnection asyncConn; - private static final byte[][] SPLITS = new byte[][]{Bytes.toBytes("3"), Bytes.toBytes("7")}; - private static final List ROWS = Arrays.asList( - Bytes.toBytes("1"), - Bytes.toBytes("4"), - Bytes.toBytes("8")); + private static final byte[][] SPLITS = new byte[][] { Bytes.toBytes("3"), Bytes.toBytes("7") }; + private static final List ROWS = + Arrays.asList(Bytes.toBytes("1"), Bytes.toBytes("4"), Bytes.toBytes("8")); private static final byte[] FAMILY_1 = Bytes.toBytes("f1"); private static final byte[] FAMILY_2 = Bytes.toBytes("f2"); - public static final byte[][] FAMILIES = {FAMILY_1, FAMILY_2}; + public static final byte[][] FAMILIES = { FAMILY_1, FAMILY_2 }; @Rule public TestName name = new TestName(); @@ -122,8 +119,8 @@ public void testFlushTableFamily() throws Exception { try (Admin admin = TEST_UTIL.getAdmin()) { long sizeBeforeFlush = getRegionInfo().get(0).getMemStoreDataSize(); admin.flush(tableName, FAMILY_1); - assertFalse(getRegionInfo().stream(). - anyMatch(r -> r.getMemStoreDataSize() != sizeBeforeFlush / 2)); + assertFalse( + getRegionInfo().stream().anyMatch(r -> r.getMemStoreDataSize() != sizeBeforeFlush / 2)); } } @@ -139,8 +136,8 @@ public void testAsyncFlushTableFamily() throws Exception { AsyncAdmin admin = asyncConn.getAdmin(); long sizeBeforeFlush = getRegionInfo().get(0).getMemStoreDataSize(); admin.flush(tableName, FAMILY_1).get(); - assertFalse(getRegionInfo().stream(). - anyMatch(r -> r.getMemStoreDataSize() != sizeBeforeFlush / 2)); + assertFalse( + getRegionInfo().stream().anyMatch(r -> r.getMemStoreDataSize() != sizeBeforeFlush / 2)); } @Test @@ -190,10 +187,8 @@ public void testAsyncFlushRegionFamily() throws Exception { @Test public void testFlushRegionServer() throws Exception { try (Admin admin = TEST_UTIL.getAdmin()) { - for (HRegionServer rs : TEST_UTIL.getHBaseCluster() - .getLiveRegionServerThreads() - .stream().map(JVMClusterUtil.RegionServerThread::getRegionServer) - .collect(Collectors.toList())) { + for (HRegionServer rs : TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().stream() + .map(JVMClusterUtil.RegionServerThread::getRegionServer).collect(Collectors.toList())) { admin.flushRegionServer(rs.getServerName()); assertFalse(getRegionInfo(rs).stream().anyMatch(r -> r.getMemStoreDataSize() != 0)); } @@ -203,10 +198,8 @@ public void testFlushRegionServer() throws Exception { @Test public void testAsyncFlushRegionServer() throws Exception { AsyncAdmin admin = asyncConn.getAdmin(); - for (HRegionServer rs : TEST_UTIL.getHBaseCluster() - .getLiveRegionServerThreads() - .stream().map(JVMClusterUtil.RegionServerThread::getRegionServer) - .collect(Collectors.toList())) { + for (HRegionServer rs : TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().stream() + .map(JVMClusterUtil.RegionServerThread::getRegionServer).collect(Collectors.toList())) { admin.flushRegionServer(rs.getServerName()).get(); assertFalse(getRegionInfo(rs).stream().anyMatch(r -> r.getMemStoreDataSize() != 0)); } @@ -218,7 +211,7 @@ private List getRegionInfo() { private List getRegionInfo(HRegionServer rs) { return rs.getRegions().stream() - .filter(v -> v.getTableDescriptor().getTableName().equals(tableName)) - .collect(Collectors.toList()); + .filter(v -> v.getTableDescriptor().getTableName().equals(tableName)) + .collect(Collectors.toList()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index e5bf90b2aa74..6f79b4a6b3ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -67,28 +67,23 @@ import org.slf4j.LoggerFactory; /** - * Run tests that use the HBase clients; {@link Table}. - * Sets up the HBase mini cluster once at start and runs through all client tests. - * Each creates a table named for the method and does its stuff against that. - * - * Parameterized to run with different registry implementations. - * - * This class was split in three because it got too big when parameterized. Other classes - * are below. - * + * Run tests that use the HBase clients; {@link Table}. Sets up the HBase mini cluster once at start + * and runs through all client tests. Each creates a table named for the method and does its stuff + * against that. Parameterized to run with different registry implementations. This class was split + * in three because it got too big when parameterized. Other classes are below. * @see TestFromClientSide4 * @see TestFromClientSide5 */ // NOTE: Increment tests were moved to their own class, TestIncrementsFromClientSide. -@Category({LargeTests.class, ClientTests.class}) -@SuppressWarnings ("deprecation") +@Category({ LargeTests.class, ClientTests.class }) +@SuppressWarnings("deprecation") @RunWith(Parameterized.class) public class TestFromClientSide extends FromClientSideBase { private static final Logger LOG = LoggerFactory.getLogger(TestFromClientSide.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSide.class); + HBaseClassTestRule.forClass(TestFromClientSide.class); @Rule public TableNameTestRule name = new TableNameTestRule(); @@ -100,12 +95,14 @@ public TestFromClientSide(Class registry, int numHedgedReqs) throws Exception { initialize(registry, numHedgedReqs, MultiRowMutationEndpoint.class); } - @Parameterized.Parameters public static Collection parameters() { + @Parameterized.Parameters + public static Collection parameters() { return Arrays.asList(new Object[][] { { MasterRegistry.class, 1 }, { MasterRegistry.class, 2 }, - { ZKConnectionRegistry.class, 1 } }); + { ZKConnectionRegistry.class, 1 } }); } - @AfterClass public static void tearDownAfterClass() throws Exception { + @AfterClass + public static void tearDownAfterClass() throws Exception { afterClass(); } @@ -114,17 +111,13 @@ public TestFromClientSide(Class registry, int numHedgedReqs) throws Exception { */ @Test public void testDuplicateAppend() throws Exception { - TableDescriptorBuilder builder = TEST_UTIL - .createModifyableTableDescriptor(name.getTableName(), - ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, - ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); + TableDescriptorBuilder builder = TEST_UTIL.createModifyableTableDescriptor(name.getTableName(), + ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, + ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); Map kvs = new HashMap<>(); kvs.put(SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, "2000"); builder.setCoprocessor(CoprocessorDescriptorBuilder - .newBuilder(SleepAtFirstRpcCall.class.getName()) - .setPriority(1) - .setProperties(kvs) - .build()); + .newBuilder(SleepAtFirstRpcCall.class.getName()).setPriority(1).setProperties(kvs).build()); TEST_UTIL.createTable(builder.build(), new byte[][] { ROW }).close(); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); @@ -133,8 +126,8 @@ public void testDuplicateAppend() throws Exception { c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500); try (Connection connection = ConnectionFactory.createConnection(c); - Table table = connection.getTableBuilder(name.getTableName(), null). - setOperationTimeout(3 * 1000).build()) { + Table table = connection.getTableBuilder(name.getTableName(), null) + .setOperationTimeout(3 * 1000).build()) { Append append = new Append(ROW); append.addColumn(HBaseTestingUtil.fam1, QUALIFIER, VALUE); Result result = table.append(append); @@ -157,17 +150,13 @@ public void testDuplicateAppend() throws Exception { */ @Test public void testDuplicateBatchAppend() throws Exception { - TableDescriptorBuilder builder = TEST_UTIL - .createModifyableTableDescriptor(name.getTableName(), - ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, - ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); + TableDescriptorBuilder builder = TEST_UTIL.createModifyableTableDescriptor(name.getTableName(), + ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, + ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); Map kvs = new HashMap<>(); kvs.put(SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, "2000"); builder.setCoprocessor(CoprocessorDescriptorBuilder - .newBuilder(SleepAtFirstRpcCall.class.getName()) - .setPriority(1) - .setProperties(kvs) - .build()); + .newBuilder(SleepAtFirstRpcCall.class.getName()).setPriority(1).setProperties(kvs).build()); TEST_UTIL.createTable(builder.build(), new byte[][] { ROW }).close(); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); @@ -176,8 +165,8 @@ public void testDuplicateBatchAppend() throws Exception { c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500); try (Connection connection = ConnectionFactory.createConnection(c); - Table table = connection.getTableBuilder(name.getTableName(), null). - setOperationTimeout(3 * 1000).build()) { + Table table = connection.getTableBuilder(name.getTableName(), null) + .setOperationTimeout(3 * 1000).build()) { Append append = new Append(ROW); append.addColumn(HBaseTestingUtil.fam1, QUALIFIER, VALUE); @@ -212,9 +201,9 @@ public void testKeepDeletedCells() throws Exception { final byte[] T3 = Bytes.toBytes("T3"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY) - .setKeepDeletedCells(KeepDeletedCells.TRUE).setMaxVersions(3).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY) + .setKeepDeletedCells(KeepDeletedCells.TRUE).setMaxVersions(3).build()) + .build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); try (Table h = TEST_UTIL.getConnection().getTable(tableName)) { long ts = EnvironmentEdgeManager.currentTime(); @@ -267,7 +256,8 @@ public void testKeepDeletedCells() throws Exception { /** * Basic client side validation of HBASE-10118 */ - @Test public void testPurgeFutureDeletes() throws Exception { + @Test + public void testPurgeFutureDeletes() throws Exception { final TableName tableName = name.getTableName(); final byte[] ROW = Bytes.toBytes("row"); final byte[] FAMILY = Bytes.toBytes("family"); @@ -312,10 +302,11 @@ public void testKeepDeletedCells() throws Exception { } /** - * Verifies that getConfiguration returns the same Configuration object used - * to create the HTable instance. + * Verifies that getConfiguration returns the same Configuration object used to create the HTable + * instance. */ - @Test public void testGetConfiguration() throws Exception { + @Test + public void testGetConfiguration() throws Exception { final TableName tableName = name.getTableName(); byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") }; Configuration conf = TEST_UTIL.getConfiguration(); @@ -325,18 +316,13 @@ public void testKeepDeletedCells() throws Exception { } /** - * Test from client side of an involved filter against a multi family that - * involves deletes. + * Test from client side of an involved filter against a multi family that involves deletes. */ - @Test public void testWeirdCacheBehaviour() throws Exception { + @Test + public void testWeirdCacheBehaviour() throws Exception { final TableName tableName = name.getTableName(); - byte[][] FAMILIES = new byte[][] { - Bytes.toBytes("trans-blob"), - Bytes.toBytes("trans-type"), - Bytes.toBytes("trans-date"), - Bytes.toBytes("trans-tags"), - Bytes.toBytes("trans-group") - }; + byte[][] FAMILIES = new byte[][] { Bytes.toBytes("trans-blob"), Bytes.toBytes("trans-type"), + Bytes.toBytes("trans-date"), Bytes.toBytes("trans-tags"), Bytes.toBytes("trans-group") }; try (Table ht = TEST_UTIL.createTable(tableName, FAMILIES)) { String value = "this is the value"; String value2 = "this is some other value"; @@ -374,16 +360,17 @@ public void testKeepDeletedCells() throws Exception { } /** - * Test filters when multiple regions. It does counts. Needs eye-balling of - * logs to ensure that we're not scanning more regions that we're supposed to. - * Related to the TestFilterAcrossRegions over in the o.a.h.h.filter package. + * Test filters when multiple regions. It does counts. Needs eye-balling of logs to ensure that + * we're not scanning more regions that we're supposed to. Related to the TestFilterAcrossRegions + * over in the o.a.h.h.filter package. */ - @Test public void testFilterAcrossMultipleRegions() throws IOException { + @Test + public void testFilterAcrossMultipleRegions() throws IOException { final TableName tableName = name.getTableName(); try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { int rowCount = TEST_UTIL.loadTable(t, FAMILY, false); assertRowCount(t, rowCount); - // Split the table. Should split on a reasonable key; 'lqj' + // Split the table. Should split on a reasonable key; 'lqj' List regions = splitTable(t); assertRowCount(t, rowCount); // Get end key of first region. @@ -393,13 +380,13 @@ public void testKeepDeletedCells() throws Exception { int endKeyCount = countRows(t, createScanWithRowFilter(endKey)); assertTrue(endKeyCount < rowCount); - // How do I know I did not got to second region? Thats tough. Can't really - // do that in client-side region test. I verified by tracing in debugger. + // How do I know I did not got to second region? Thats tough. Can't really + // do that in client-side region test. I verified by tracing in debugger. // I changed the messages that come out when set to DEBUG so should see // when scanner is done. Says "Finished with scanning..." with region name. // Check that its finished in right region. - // New test. Make it so scan goes into next region by one and then two. + // New test. Make it so scan goes into next region by one and then two. // Make sure count comes out right. byte[] key = new byte[] { endKey[0], endKey[1], (byte) (endKey[2] + 1) }; int plusOneCount = countRows(t, createScanWithRowFilter(key)); @@ -408,11 +395,11 @@ public void testKeepDeletedCells() throws Exception { int plusTwoCount = countRows(t, createScanWithRowFilter(key)); assertEquals(endKeyCount + 2, plusTwoCount); - // New test. Make it so I scan one less than endkey. + // New test. Make it so I scan one less than endkey. key = new byte[] { endKey[0], endKey[1], (byte) (endKey[2] - 1) }; int minusOneCount = countRows(t, createScanWithRowFilter(key)); assertEquals(endKeyCount - 1, minusOneCount); - // For above test... study logs. Make sure we do "Finished with scanning.." + // For above test... study logs. Make sure we do "Finished with scanning.." // in first region and that we do not fall into the next region. key = new byte[] { 'a', 'a', 'a' }; @@ -420,16 +407,17 @@ public void testKeepDeletedCells() throws Exception { assertEquals(1, countBBB); int countGreater = - countRows(t, createScanWithRowFilter(endKey, null, CompareOperator.GREATER_OR_EQUAL)); + countRows(t, createScanWithRowFilter(endKey, null, CompareOperator.GREATER_OR_EQUAL)); // Because started at start of table. assertEquals(0, countGreater); countGreater = - countRows(t, createScanWithRowFilter(endKey, endKey, CompareOperator.GREATER_OR_EQUAL)); + countRows(t, createScanWithRowFilter(endKey, endKey, CompareOperator.GREATER_OR_EQUAL)); assertEquals(rowCount - endKeyCount, countGreater); } } - @Test public void testSuperSimple() throws Exception { + @Test + public void testSuperSimple() throws Exception { final TableName tableName = name.getTableName(); try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { Put put = new Put(ROW); @@ -444,7 +432,8 @@ public void testKeepDeletedCells() throws Exception { } } - @Test public void testMaxKeyValueSize() throws Exception { + @Test + public void testMaxKeyValueSize() throws Exception { final TableName tableName = name.getTableName(); Configuration conf = TEST_UTIL.getConfiguration(); String oldMaxSize = conf.get(ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY); @@ -473,16 +462,17 @@ public void testKeepDeletedCells() throws Exception { conf.set(ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY, oldMaxSize); } - @Test public void testFilters() throws Exception { + @Test + public void testFilters() throws Exception { final TableName tableName = name.getTableName(); try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { byte[][] ROWS = makeN(ROW, 10); byte[][] QUALIFIERS = - { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), - Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), - Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), - Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), - Bytes.toBytes("col8--"), Bytes.toBytes("col9--") }; + { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), + Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), + Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), + Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), + Bytes.toBytes("col8--"), Bytes.toBytes("col9--") }; for (int i = 0; i < 10; i++) { Put put = new Put(ROWS[i]); put.setDurability(Durability.SKIP_WAL); @@ -491,16 +481,16 @@ public void testKeepDeletedCells() throws Exception { } Scan scan = new Scan(); scan.addFamily(FAMILY); - Filter filter = new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator("col[1-5]")); + Filter filter = + new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator("col[1-5]")); scan.setFilter(filter); try (ResultScanner scanner = ht.getScanner(scan)) { int expectedIndex = 1; for (Result result : scanner) { assertEquals(1, result.size()); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]), ROWS[expectedIndex])); - assertTrue(Bytes.equals(CellUtil.cloneQualifier(result.rawCells()[0]), - QUALIFIERS[expectedIndex])); + assertTrue( + Bytes.equals(CellUtil.cloneQualifier(result.rawCells()[0]), QUALIFIERS[expectedIndex])); expectedIndex++; } assertEquals(6, expectedIndex); @@ -508,7 +498,8 @@ public void testKeepDeletedCells() throws Exception { } } - @Test public void testFilterWithLongCompartor() throws Exception { + @Test + public void testFilterWithLongCompartor() throws Exception { final TableName tableName = name.getTableName(); try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { byte[][] ROWS = makeN(ROW, 10); @@ -525,7 +516,7 @@ public void testKeepDeletedCells() throws Exception { Scan scan = new Scan(); scan.addFamily(FAMILY); Filter filter = new SingleColumnValueFilter(FAMILY, QUALIFIER, CompareOperator.GREATER, - new LongComparator(500)); + new LongComparator(500)); scan.setFilter(filter); try (ResultScanner scanner = ht.getScanner(scan)) { int expectedIndex = 0; @@ -539,16 +530,17 @@ public void testKeepDeletedCells() throws Exception { } } - @Test public void testKeyOnlyFilter() throws Exception { + @Test + public void testKeyOnlyFilter() throws Exception { final TableName tableName = name.getTableName(); try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { byte[][] ROWS = makeN(ROW, 10); byte[][] QUALIFIERS = - { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), - Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), - Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), - Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), - Bytes.toBytes("col8--"), Bytes.toBytes("col9--") }; + { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), + Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), + Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), + Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), + Bytes.toBytes("col8--"), Bytes.toBytes("col9--") }; for (int i = 0; i < 10; i++) { Put put = new Put(ROWS[i]); put.setDurability(Durability.SKIP_WAL); @@ -575,7 +567,8 @@ public void testKeepDeletedCells() throws Exception { /** * Test simple table and non-existent row cases. */ - @Test public void testSimpleMissing() throws Exception { + @Test + public void testSimpleMissing() throws Exception { final TableName tableName = name.getTableName(); try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { byte[][] ROWS = makeN(ROW, 4); @@ -682,8 +675,7 @@ public void testKeepDeletedCells() throws Exception { } /** - * Test basic puts, gets, scans, and deletes for a single row - * in a multiple family table. + * Test basic puts, gets, scans, and deletes for a single row in a multiple family table. */ @SuppressWarnings("checkstyle:MethodLength") @Test @@ -939,11 +931,11 @@ public void testSingleRowMultipleFamily() throws Exception { assertNullResult(getSingleScanResult(ht, new Scan().addFamily(FAMILIES[4]))); // Make sure we can still get another family - assertSingleResult(ht.get(new Get(ROWS[0]).addColumn(FAMILIES[2], QUALIFIERS[2])), - ROWS[0], FAMILIES[2], QUALIFIERS[2], VALUES[2]); + assertSingleResult(ht.get(new Get(ROWS[0]).addColumn(FAMILIES[2], QUALIFIERS[2])), ROWS[0], + FAMILIES[2], QUALIFIERS[2], VALUES[2]); - assertSingleResult(ht.get(new Get(ROWS[0]).addColumn(FAMILIES[6], QUALIFIERS[9])), - ROWS[0], FAMILIES[6], QUALIFIERS[9], VALUES[9]); + assertSingleResult(ht.get(new Get(ROWS[0]).addColumn(FAMILIES[6], QUALIFIERS[9])), ROWS[0], + FAMILIES[6], QUALIFIERS[9], VALUES[9]); // Make sure we can still scan another family assertSingleResult(getSingleScanResult(ht, new Scan().addColumn(FAMILIES[6], QUALIFIERS[6])), @@ -954,14 +946,15 @@ public void testSingleRowMultipleFamily() throws Exception { } } - @Test(expected = NullPointerException.class) public void testNullTableName() throws IOException { + @Test(expected = NullPointerException.class) + public void testNullTableName() throws IOException { // Null table name (should NOT work) TEST_UTIL.createTable(null, FAMILY); fail("Creating a table with null name passed, should have failed"); } - @Test(expected = IllegalArgumentException.class) public void testNullFamilyName() - throws IOException { + @Test(expected = IllegalArgumentException.class) + public void testNullFamilyName() throws IOException { final TableName tableName = name.getTableName(); // Null family (should NOT work) @@ -969,7 +962,8 @@ public void testSingleRowMultipleFamily() throws Exception { fail("Creating a table with a null family passed, should fail"); } - @Test public void testNullRowAndQualifier() throws Exception { + @Test + public void testNullRowAndQualifier() throws Exception { final TableName tableName = name.getTableName(); try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { @@ -1004,7 +998,8 @@ public void testSingleRowMultipleFamily() throws Exception { } } - @Test public void testNullEmptyQualifier() throws Exception { + @Test + public void testNullEmptyQualifier() throws Exception { final TableName tableName = name.getTableName(); try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { @@ -1041,7 +1036,8 @@ public void testSingleRowMultipleFamily() throws Exception { } } - @Test public void testNullValue() throws IOException { + @Test + public void testNullValue() throws IOException { final TableName tableName = name.getTableName(); try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { @@ -1075,7 +1071,8 @@ public void testSingleRowMultipleFamily() throws Exception { } } - @Test public void testNullQualifier() throws Exception { + @Test + public void testNullQualifier() throws Exception { final TableName tableName = name.getTableName(); try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { @@ -1127,8 +1124,8 @@ public void testSingleRowMultipleFamily() throws Exception { delete = new Delete(ROW); delete.addColumns(FAMILY, null); - table.checkAndMutate(ROW, FAMILY). - ifEquals(Bytes.toBytes("checkAndMutate")).thenDelete(delete); + table.checkAndMutate(ROW, FAMILY).ifEquals(Bytes.toBytes("checkAndMutate")) + .thenDelete(delete); } } @@ -1237,9 +1234,10 @@ public void testVersions() throws Exception { result = ht.get(get); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], - STAMPS[8] }, + STAMPS[8] }, new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, 0, 7); + VALUES[8] }, + 0, 7); scan = new Scan().withStartRow(ROW); scan.addColumn(FAMILY, QUALIFIER); @@ -1247,27 +1245,30 @@ public void testVersions() throws Exception { result = getSingleScanResult(ht, scan); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], - STAMPS[8] }, + STAMPS[8] }, new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, 0, 7); + VALUES[8] }, + 0, 7); get = new Get(ROW); get.readAllVersions(); result = ht.get(get); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], - STAMPS[8] }, + STAMPS[8] }, new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, 0, 7); + VALUES[8] }, + 0, 7); scan = new Scan().withStartRow(ROW); scan.readAllVersions(); result = getSingleScanResult(ht, scan); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], - STAMPS[8] }, + STAMPS[8] }, new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, 0, 7); + VALUES[8] }, + 0, 7); // Verify we can get each one properly getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); @@ -1303,9 +1304,10 @@ public void testVersions() throws Exception { result = ht.get(get); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8], STAMPS[9], - STAMPS[11], STAMPS[13], STAMPS[15] }, + STAMPS[11], STAMPS[13], STAMPS[15] }, new byte[][] { VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], VALUES[8], VALUES[9], - VALUES[11], VALUES[13], VALUES[15] }, 0, 9); + VALUES[11], VALUES[13], VALUES[15] }, + 0, 9); scan = new Scan().withStartRow(ROW); scan.addColumn(FAMILY, QUALIFIER); @@ -1313,9 +1315,10 @@ public void testVersions() throws Exception { result = getSingleScanResult(ht, scan); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8], STAMPS[9], - STAMPS[11], STAMPS[13], STAMPS[15] }, + STAMPS[11], STAMPS[13], STAMPS[15] }, new byte[][] { VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[7], VALUES[8], VALUES[9], - VALUES[11], VALUES[13], VALUES[15] }, 0, 9); + VALUES[11], VALUES[13], VALUES[15] }, + 0, 9); // Delete a version in the memstore and a version in a storefile Delete delete = new Delete(ROW); @@ -1330,9 +1333,10 @@ public void testVersions() throws Exception { result = ht.get(get); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[8], - STAMPS[9], STAMPS[13], STAMPS[15] }, + STAMPS[9], STAMPS[13], STAMPS[15] }, new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[8], - VALUES[9], VALUES[13], VALUES[15] }, 0, 9); + VALUES[9], VALUES[13], VALUES[15] }, + 0, 9); scan = new Scan().withStartRow(ROW); scan.addColumn(FAMILY, QUALIFIER); @@ -1340,14 +1344,16 @@ public void testVersions() throws Exception { result = getSingleScanResult(ht, scan); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[8], - STAMPS[9], STAMPS[13], STAMPS[15] }, + STAMPS[9], STAMPS[13], STAMPS[15] }, new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[4], VALUES[5], VALUES[6], VALUES[8], - VALUES[9], VALUES[13], VALUES[15] }, 0, 9); + VALUES[9], VALUES[13], VALUES[15] }, + 0, 9); } } - @Test @SuppressWarnings("checkstyle:MethodLength") public void testVersionLimits() - throws Exception { + @Test + @SuppressWarnings("checkstyle:MethodLength") + public void testVersionLimits() throws Exception { final TableName tableName = name.getTableName(); byte[][] FAMILIES = makeNAscii(FAMILY, 3); int[] LIMITS = { 1, 3, 5 }; @@ -1518,7 +1524,8 @@ public void testVersions() throws Exception { } } - @Test public void testDeleteFamilyVersion() throws Exception { + @Test + public void testDeleteFamilyVersion() throws Exception { try (Admin admin = TEST_UTIL.getAdmin()) { final TableName tableName = name.getTableName(); @@ -1538,8 +1545,8 @@ public void testVersions() throws Exception { admin.flush(tableName); Delete delete = new Delete(ROW); - delete.addFamilyVersion(FAMILY, ts[1]); // delete version '2000' - delete.addFamilyVersion(FAMILY, ts[3]); // delete version '4000' + delete.addFamilyVersion(FAMILY, ts[1]); // delete version '2000' + delete.addFamilyVersion(FAMILY, ts[3]); // delete version '4000' ht.delete(delete); admin.flush(tableName); @@ -1556,15 +1563,16 @@ public void testVersions() throws Exception { } } - @Test public void testDeleteFamilyVersionWithOtherDeletes() throws Exception { + @Test + public void testDeleteFamilyVersionWithOtherDeletes() throws Exception { final TableName tableName = name.getTableName(); byte[][] QUALIFIERS = makeNAscii(QUALIFIER, 5); byte[][] VALUES = makeN(VALUE, 5); long[] ts = { 1000, 2000, 3000, 4000, 5000 }; - try (Admin admin = TEST_UTIL.getAdmin(); Table ht = TEST_UTIL.createTable(tableName, FAMILY, - 5)) { + try (Admin admin = TEST_UTIL.getAdmin(); + Table ht = TEST_UTIL.createTable(tableName, FAMILY, 5)) { Put put; Result result; Get get; @@ -1610,8 +1618,8 @@ public void testVersions() throws Exception { // 4. delete on ROWS[0] delete = new Delete(ROW2); - delete.addFamilyVersion(FAMILY, ts[1]); // delete version '2000' - delete.addFamilyVersion(FAMILY, ts[3]); // delete version '4000' + delete.addFamilyVersion(FAMILY, ts[1]); // delete version '2000' + delete.addFamilyVersion(FAMILY, ts[3]); // delete version '4000' ht.delete(delete); admin.flush(tableName); @@ -1663,7 +1671,8 @@ public void testVersions() throws Exception { } } - @Test public void testDeleteWithFailed() throws Exception { + @Test + public void testDeleteWithFailed() throws Exception { final TableName tableName = name.getTableName(); byte[][] FAMILIES = makeNAscii(FAMILY, 3); @@ -1762,7 +1771,7 @@ public void testDeletes() throws Exception { ht.delete(delete); // Expected client behavior might be that you can re-put deleted values - // But alas, this is not to be. We can't put them back in either case. + // But alas, this is not to be. We can't put them back in either case. put = new Put(ROW); put.addColumn(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]); // 1000 diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index d3c055e75457..5cf0cf412898 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -83,7 +83,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos; -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestFromClientSide3 { @ClassRule @@ -91,8 +91,7 @@ public class TestFromClientSide3 { HBaseClassTestRule.forClass(TestFromClientSide3.class); private static final Logger LOG = LoggerFactory.getLogger(TestFromClientSide3.class); - private final static HBaseTestingUtil TEST_UTIL - = new HBaseTestingUtil(); + private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final int WAITTABLE_MILLIS = 10000; private static byte[] FAMILY = Bytes.toBytes("testFamily"); private static int SLAVES = 3; @@ -137,8 +136,7 @@ public void tearDown() throws Exception { } } - private void randomCFPuts(Table table, byte[] row, byte[] family, int nPuts) - throws Exception { + private void randomCFPuts(Table table, byte[] row, byte[] family, int nPuts) throws Exception { Put put = new Put(row); Random rand = ThreadLocalRandom.current(); for (int i = 0; i < nPuts; i++) { @@ -264,13 +262,11 @@ private int getStoreFileCount(Admin admin, ServerName serverName, RegionInfo reg @Test public void testAdvancedConfigOverride() throws Exception { /* - * Overall idea: (1) create 3 store files and issue a compaction. config's - * compaction.min == 3, so should work. (2) Increase the compaction.min - * toggle in the HTD to 5 and modify table. If we use the HTD value instead - * of the default config value, adding 3 files and issuing a compaction - * SHOULD NOT work (3) Decrease the compaction.min toggle in the HCD to 2 - * and modify table. The CF schema should override the Table schema and now - * cause a minor compaction. + * Overall idea: (1) create 3 store files and issue a compaction. config's compaction.min == 3, + * so should work. (2) Increase the compaction.min toggle in the HTD to 5 and modify table. If + * we use the HTD value instead of the default config value, adding 3 files and issuing a + * compaction SHOULD NOT work (3) Decrease the compaction.min toggle in the HCD to 2 and modify + * table. The CF schema should override the Table schema and now cause a minor compaction. */ TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3); @@ -308,7 +304,7 @@ public void testAdvancedConfigOverride() throws Exception { // change the compaction.min config option for this table to 5 LOG.info("hbase.hstore.compaction.min should now be 5"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(table.getDescriptor()) - .setValue("hbase.hstore.compaction.min", String.valueOf(5)).build(); + .setValue("hbase.hstore.compaction.min", String.valueOf(5)).build(); admin.modifyTable(htd); LOG.info("alter status finished"); @@ -327,9 +323,10 @@ public void testAdvancedConfigOverride() throws Exception { // change an individual CF's config option to 2 & online schema update LOG.info("hbase.hstore.compaction.min should now be 2"); htd = TableDescriptorBuilder.newBuilder(htd) - .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(htd.getColumnFamily(FAMILY)) - .setValue("hbase.hstore.compaction.min", String.valueOf(2)).build()) - .build(); + .modifyColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(htd.getColumnFamily(FAMILY)) + .setValue("hbase.hstore.compaction.min", String.valueOf(2)).build()) + .build(); admin.modifyTable(htd); LOG.info("alter status finished"); @@ -344,8 +341,8 @@ public void testAdvancedConfigOverride() throws Exception { break; } } catch (Exception e) { - LOG.debug("Waiting for region to come online: " + - Bytes.toStringBinary(loc.getRegion().getRegionName())); + LOG.debug("Waiting for region to come online: " + + Bytes.toStringBinary(loc.getRegion().getRegionName())); } Thread.sleep(40); } @@ -357,19 +354,20 @@ public void testAdvancedConfigOverride() throws Exception { LOG.info("Removing CF config value"); LOG.info("hbase.hstore.compaction.min should now be 5"); htd = TableDescriptorBuilder.newBuilder(htd) - .modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(htd.getColumnFamily(FAMILY)) - .setValue("hbase.hstore.compaction.min", null).build()) - .build(); + .modifyColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(htd.getColumnFamily(FAMILY)) + .setValue("hbase.hstore.compaction.min", null).build()) + .build(); admin.modifyTable(htd); LOG.info("alter status finished"); assertNull(table.getDescriptor().getColumnFamily(FAMILY) - .getValue(Bytes.toBytes("hbase.hstore.compaction.min"))); + .getValue(Bytes.toBytes("hbase.hstore.compaction.min"))); } } } @Test - public void testHTableBatchWithEmptyPut () throws IOException, InterruptedException { + public void testHTableBatchWithEmptyPut() throws IOException, InterruptedException { try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); List actions = (List) new ArrayList(); @@ -416,15 +414,13 @@ public void testHTableWithLargeBatch() throws IOException, InterruptedException @Test public void testBatchWithRowMutation() throws Exception { LOG.info("Starting testBatchWithRowMutation"); - byte [][] QUALIFIERS = new byte [][] { - Bytes.toBytes("a"), Bytes.toBytes("b") - }; + byte[][] QUALIFIERS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b") }; try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - RowMutations arm = RowMutations.of(Collections.singletonList( - new Put(ROW).addColumn(FAMILY, QUALIFIERS[0], VALUE))); + RowMutations arm = RowMutations + .of(Collections.singletonList(new Put(ROW).addColumn(FAMILY, QUALIFIERS[0], VALUE))); Object[] batchResult = new Object[1]; table.batch(Arrays.asList(arm), batchResult); @@ -432,9 +428,8 @@ public void testBatchWithRowMutation() throws Exception { Result r = table.get(g); assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[0]))); - arm = RowMutations.of(Arrays.asList( - new Put(ROW).addColumn(FAMILY, QUALIFIERS[1], VALUE), - new Delete(ROW).addColumns(FAMILY, QUALIFIERS[0]))); + arm = RowMutations.of(Arrays.asList(new Put(ROW).addColumn(FAMILY, QUALIFIERS[1], VALUE), + new Delete(ROW).addColumns(FAMILY, QUALIFIERS[0]))); table.batch(Arrays.asList(arm), batchResult); r = table.get(g); assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1]))); @@ -443,10 +438,10 @@ public void testBatchWithRowMutation() throws Exception { // Test that we get the correct remote exception for RowMutations from batch() try { arm = RowMutations.of(Collections.singletonList( - new Put(ROW).addColumn(new byte[]{'b', 'o', 'g', 'u', 's'}, QUALIFIERS[0], VALUE))); + new Put(ROW).addColumn(new byte[] { 'b', 'o', 'g', 'u', 's' }, QUALIFIERS[0], VALUE))); table.batch(Arrays.asList(arm), batchResult); fail("Expected RetriesExhaustedWithDetailsException with NoSuchColumnFamilyException"); - } catch(RetriesExhaustedException e) { + } catch (RetriesExhaustedException e) { String msg = e.getMessage(); assertTrue(msg.contains("NoSuchColumnFamilyException")); } @@ -464,38 +459,38 @@ public void testBatchWithCheckAndMutate() throws Exception { byte[] row6 = Bytes.toBytes("row6"); byte[] row7 = Bytes.toBytes("row7"); - table.put(Arrays.asList( - new Put(row1).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), - new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), - new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), - new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")), - new Put(row5).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")), - new Put(row6).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)), - new Put(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")))); - - CheckAndMutate checkAndMutate1 = CheckAndMutate.newBuilder(row1) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new RowMutations(row1) - .add(new Put(row1).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("g"))) - .add(new Delete(row1).addColumns(FAMILY, Bytes.toBytes("A"))) - .add(new Increment(row1).addColumn(FAMILY, Bytes.toBytes("C"), 3L)) - .add(new Append(row1).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); + table.put( + Arrays.asList(new Put(row1).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")), + new Put(row2).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")), + new Put(row3).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")), + new Put(row4).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")), + new Put(row5).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")), + new Put(row6).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)), + new Put(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")))); + + CheckAndMutate checkAndMutate1 = + CheckAndMutate.newBuilder(row1).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new RowMutations(row1) + .add(new Put(row1).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("g"))) + .add(new Delete(row1).addColumns(FAMILY, Bytes.toBytes("A"))) + .add(new Increment(row1).addColumn(FAMILY, Bytes.toBytes("C"), 3L)) + .add(new Append(row1).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); Get get = new Get(row2).addColumn(FAMILY, Bytes.toBytes("B")); - RowMutations mutations = new RowMutations(row3) - .add(new Delete(row3).addColumns(FAMILY, Bytes.toBytes("C"))) - .add(new Put(row3).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) - .add(new Increment(row3).addColumn(FAMILY, Bytes.toBytes("A"), 5L)) - .add(new Append(row3).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); - CheckAndMutate checkAndMutate2 = CheckAndMutate.newBuilder(row4) - .ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("a")) - .build(new Put(row4).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("h"))); + RowMutations mutations = + new RowMutations(row3).add(new Delete(row3).addColumns(FAMILY, Bytes.toBytes("C"))) + .add(new Put(row3).addColumn(FAMILY, Bytes.toBytes("F"), Bytes.toBytes("f"))) + .add(new Increment(row3).addColumn(FAMILY, Bytes.toBytes("A"), 5L)) + .add(new Append(row3).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); + CheckAndMutate checkAndMutate2 = + CheckAndMutate.newBuilder(row4).ifEquals(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("a")) + .build(new Put(row4).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("h"))); Put put = new Put(row5).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("f")); - CheckAndMutate checkAndMutate3 = CheckAndMutate.newBuilder(row6) - .ifEquals(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)) - .build(new Increment(row6).addColumn(FAMILY, Bytes.toBytes("F"), 1)); - CheckAndMutate checkAndMutate4 = CheckAndMutate.newBuilder(row7) - .ifEquals(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")) - .build(new Append(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g"))); + CheckAndMutate checkAndMutate3 = + CheckAndMutate.newBuilder(row6).ifEquals(FAMILY, Bytes.toBytes("F"), Bytes.toBytes(10L)) + .build(new Increment(row6).addColumn(FAMILY, Bytes.toBytes("F"), 1)); + CheckAndMutate checkAndMutate4 = + CheckAndMutate.newBuilder(row7).ifEquals(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g")) + .build(new Append(row7).addColumn(FAMILY, Bytes.toBytes("G"), Bytes.toBytes("g"))); List actions = Arrays.asList(checkAndMutate1, get, mutations, checkAndMutate2, put, checkAndMutate3, checkAndMutate4); @@ -509,8 +504,7 @@ public void testBatchWithCheckAndMutate() throws Exception { assertEquals("d", Bytes.toString(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("D")))); - assertEquals("b", - Bytes.toString(((Result) results[1]).getValue(FAMILY, Bytes.toBytes("B")))); + assertEquals("b", Bytes.toString(((Result) results[1]).getValue(FAMILY, Bytes.toBytes("B")))); Result result = (Result) results[2]; assertTrue(result.getExists()); @@ -525,13 +519,13 @@ public void testBatchWithCheckAndMutate() throws Exception { checkAndMutateResult = (CheckAndMutateResult) results[5]; assertTrue(checkAndMutateResult.isSuccess()); - assertEquals(11, Bytes.toLong(checkAndMutateResult.getResult() - .getValue(FAMILY, Bytes.toBytes("F")))); + assertEquals(11, + Bytes.toLong(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("F")))); checkAndMutateResult = (CheckAndMutateResult) results[6]; assertTrue(checkAndMutateResult.isSuccess()); - assertEquals("gg", Bytes.toString(checkAndMutateResult.getResult() - .getValue(FAMILY, Bytes.toBytes("G")))); + assertEquals("gg", + Bytes.toString(checkAndMutateResult.getResult().getValue(FAMILY, Bytes.toBytes("G")))); result = table.get(new Get(row1)); assertEquals("g", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); @@ -562,7 +556,7 @@ public void testBatchWithCheckAndMutate() throws Exception { @Test public void testHTableExistsMethodSingleRegionSingleGet() - throws IOException, InterruptedException { + throws IOException, InterruptedException { try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); @@ -584,7 +578,7 @@ public void testHTableExistsMethodSingleRegionSingleGet() @Test public void testHTableExistsMethodSingleRegionMultipleGets() - throws IOException, InterruptedException { + throws IOException, InterruptedException { try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); @@ -655,9 +649,8 @@ public void testHTableExistsAllBeforeGet() throws IOException, InterruptedExcept @Test public void testHTableExistsMethodMultipleRegionsSingleGet() throws Exception { - try (Table table = TEST_UTIL.createTable( - tableName, new byte[][] { FAMILY }, - 1, new byte[] { 0x00 }, new byte[] { (byte) 0xff }, 255)) { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 1, + new byte[] { 0x00 }, new byte[] { (byte) 0xff }, 255)) { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); Put put = new Put(ROW); @@ -677,9 +670,8 @@ public void testHTableExistsMethodMultipleRegionsSingleGet() throws Exception { @Test public void testHTableExistsMethodMultipleRegionsMultipleGets() throws Exception { - try (Table table = TEST_UTIL.createTable( - tableName, - new byte[][] { FAMILY }, 1, new byte[] { 0x00 }, new byte[] { (byte) 0xff }, 255)) { + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 1, + new byte[] { 0x00 }, new byte[] { (byte) 0xff }, 255)) { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); Put put = new Put(ROW); @@ -688,9 +680,9 @@ public void testHTableExistsMethodMultipleRegionsMultipleGets() throws Exception List gets = new ArrayList<>(); gets.add(new Get(ANOTHERROW)); - gets.add(new Get(Bytes.add(ROW, new byte[]{0x00}))); + gets.add(new Get(Bytes.add(ROW, new byte[] { 0x00 }))); gets.add(new Get(ROW)); - gets.add(new Get(Bytes.add(ANOTHERROW, new byte[]{0x00}))); + gets.add(new Get(Bytes.add(ANOTHERROW, new byte[] { 0x00 }))); LOG.info("Calling exists"); boolean[] results = table.exists(gets); @@ -700,26 +692,26 @@ public void testHTableExistsMethodMultipleRegionsMultipleGets() throws Exception assertFalse(results[3]); // Test with the first region. - put = new Put(new byte[]{0x00}); + put = new Put(new byte[] { 0x00 }); put.addColumn(FAMILY, QUALIFIER, VALUE); table.put(put); gets = new ArrayList<>(); - gets.add(new Get(new byte[]{0x00})); - gets.add(new Get(new byte[]{0x00, 0x00})); + gets.add(new Get(new byte[] { 0x00 })); + gets.add(new Get(new byte[] { 0x00, 0x00 })); results = table.exists(gets); assertTrue(results[0]); assertFalse(results[1]); // Test with the last region - put = new Put(new byte[]{(byte) 0xff, (byte) 0xff}); + put = new Put(new byte[] { (byte) 0xff, (byte) 0xff }); put.addColumn(FAMILY, QUALIFIER, VALUE); table.put(put); gets = new ArrayList<>(); - gets.add(new Get(new byte[]{(byte) 0xff})); - gets.add(new Get(new byte[]{(byte) 0xff, (byte) 0xff})); - gets.add(new Get(new byte[]{(byte) 0xff, (byte) 0xff, (byte) 0xff})); + gets.add(new Get(new byte[] { (byte) 0xff })); + gets.add(new Get(new byte[] { (byte) 0xff, (byte) 0xff })); + gets.add(new Get(new byte[] { (byte) 0xff, (byte) 0xff, (byte) 0xff })); results = table.exists(gets); assertFalse(results[0]); assertTrue(results[1]); @@ -729,7 +721,7 @@ public void testHTableExistsMethodMultipleRegionsMultipleGets() throws Exception @Test public void testGetEmptyRow() throws Exception { - //Create a table and put in 1 row + // Create a table and put in 1 row try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); @@ -737,7 +729,7 @@ public void testGetEmptyRow() throws Exception { put.addColumn(FAMILY, COL_QUAL, VAL_BYTES); table.put(put); - //Try getting the row with an empty row key + // Try getting the row with an empty row key Result res = null; try { res = table.get(new Get(new byte[0])); @@ -756,7 +748,7 @@ public void testGetEmptyRow() throws Exception { @Test public void testConnectionDefaultUsesCodec() throws Exception { try ( - RpcClient client = RpcClientFactory.createClient(TEST_UTIL.getConfiguration(), "cluster")) { + RpcClient client = RpcClientFactory.createClient(TEST_UTIL.getConfiguration(), "cluster")) { assertTrue(client.hasCellBlockSupport()); } } @@ -791,8 +783,8 @@ public void testRowMutationsWithPreBatchMutate() throws Exception { private void testPreBatchMutate(TableName tableName, Runnable rn) throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(WaitingForScanObserver.class.getName()).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(WaitingForScanObserver.class.getName()).build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); // Don't use waitTableAvailable(), because the scanner will mess up the co-processor @@ -818,16 +810,17 @@ private void testPreBatchMutate(TableName tableName, Runnable rn) throws Excepti service.shutdown(); service.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); assertEquals("The write is blocking by RegionObserver#postBatchMutate" - + ", so the data is invisible to reader", 0, cells.size()); + + ", so the data is invisible to reader", + 0, cells.size()); TEST_UTIL.deleteTable(tableName); } @Test public void testLockLeakWithDelta() throws Exception, Throwable { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(WaitingForMultiMutationsObserver.class.getName()) - .setValue("hbase.rowlock.wait.duration", String.valueOf(5000)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(WaitingForMultiMutationsObserver.class.getName()) + .setValue("hbase.rowlock.wait.duration", String.valueOf(5000)).build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); @@ -861,7 +854,7 @@ public void testLockLeakWithDelta() throws Exception, Throwable { appendService.shutdown(); appendService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); WaitingForMultiMutationsObserver observer = - find(tableName, WaitingForMultiMutationsObserver.class); + find(tableName, WaitingForMultiMutationsObserver.class); observer.latch.countDown(); putService.shutdown(); putService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); @@ -880,10 +873,10 @@ public void testLockLeakWithDelta() throws Exception, Throwable { @Test public void testMultiRowMutations() throws Exception, Throwable { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(MultiRowMutationEndpoint.class.getName()) - .setCoprocessor(WaitingForMultiMutationsObserver.class.getName()) - .setValue("hbase.rowlock.wait.duration", String.valueOf(5000)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(MultiRowMutationEndpoint.class.getName()) + .setCoprocessor(WaitingForMultiMutationsObserver.class.getName()) + .setValue("hbase.rowlock.wait.duration", String.valueOf(5000)).build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); @@ -892,7 +885,7 @@ public void testMultiRowMutations() throws Exception, Throwable { copy.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); try (Connection con = ConnectionFactory.createConnection(copy)) { byte[] row = Bytes.toBytes("ROW-0"); - byte[] rowLocked= Bytes.toBytes("ROW-1"); + byte[] rowLocked = Bytes.toBytes("ROW-1"); byte[] value0 = Bytes.toBytes("VALUE-0"); byte[] value1 = Bytes.toBytes("VALUE-1"); byte[] value2 = Bytes.toBytes("VALUE-2"); @@ -917,21 +910,20 @@ public void testMultiRowMutations() throws Exception, Throwable { put2.addColumn(FAMILY, QUALIFIER, value2); try (Table table = con.getTable(tableName)) { MultiRowMutationProtos.MutateRowsRequest request = - MultiRowMutationProtos.MutateRowsRequest.newBuilder() - .addMutationRequest( - ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, put1)) - .addMutationRequest( - ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, put2)) - .build(); - table.coprocessorService(MultiRowMutationProtos.MultiRowMutationService.class, - ROW, ROW, + MultiRowMutationProtos.MutateRowsRequest.newBuilder() + .addMutationRequest( + ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, put1)) + .addMutationRequest( + ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, put2)) + .build(); + table.coprocessorService(MultiRowMutationProtos.MultiRowMutationService.class, ROW, ROW, (MultiRowMutationProtos.MultiRowMutationService exe) -> { ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); exe.mutateRows(controller, request, rpcCallback); - if (controller.failedOnException() && - !(controller.getFailedOn() instanceof UnknownProtocolException)) { + if (controller.failedOnException() + && !(controller.getFailedOn() instanceof UnknownProtocolException)) { exceptionDuringMutateRows.set(true); } return rpcCallback.get(); @@ -942,8 +934,8 @@ public void testMultiRowMutations() throws Exception, Throwable { }); cpService.shutdown(); cpService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); - WaitingForMultiMutationsObserver observer = find(tableName, - WaitingForMultiMutationsObserver.class); + WaitingForMultiMutationsObserver observer = + find(tableName, WaitingForMultiMutationsObserver.class); observer.latch.countDown(); putService.shutdown(); putService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); @@ -964,23 +956,21 @@ public void testMultiRowMutations() throws Exception, Throwable { } /** - * A test case for issue HBASE-17482 - * After combile seqid with mvcc readpoint, seqid/mvcc is acquired and stamped - * onto cells in the append thread, a countdown latch is used to ensure that happened - * before cells can be put into memstore. But the MVCCPreAssign patch(HBASE-16698) - * make the seqid/mvcc acquirement in handler thread and stamping in append thread - * No countdown latch to assure cells in memstore are stamped with seqid/mvcc. - * If cells without mvcc(A.K.A mvcc=0) are put into memstore, then a scanner - * with a smaller readpoint can see these data, which disobey the multi version - * concurrency control rules. - * This test case is to reproduce this scenario. + * A test case for issue HBASE-17482 After combile seqid with mvcc readpoint, seqid/mvcc is + * acquired and stamped onto cells in the append thread, a countdown latch is used to ensure that + * happened before cells can be put into memstore. But the MVCCPreAssign patch(HBASE-16698) make + * the seqid/mvcc acquirement in handler thread and stamping in append thread No countdown latch + * to assure cells in memstore are stamped with seqid/mvcc. If cells without mvcc(A.K.A mvcc=0) + * are put into memstore, then a scanner with a smaller readpoint can see these data, which + * disobey the multi version concurrency control rules. This test case is to reproduce this + * scenario. * @throws IOException */ @Test public void testMVCCUsingMVCCPreAssign() throws IOException, InterruptedException { try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - //put two row first to init the scanner + // put two row first to init the scanner Put put = new Put(Bytes.toBytes("0")); put.addColumn(FAMILY, Bytes.toBytes(""), Bytes.toBytes("0")); table.put(put); @@ -992,7 +982,7 @@ public void testMVCCUsingMVCCPreAssign() throws IOException, InterruptedExceptio scan.setCaching(1); ResultScanner scanner = table.getScanner(scan); int rowNum = scanner.next() != null ? 1 : 0; - //the started scanner shouldn't see the rows put below + // the started scanner shouldn't see the rows put below for (int i = 1; i < 1000; i++) { put = new Put(Bytes.toBytes(String.valueOf(i))); put.setDurability(Durability.ASYNC_WAL); @@ -1002,7 +992,7 @@ public void testMVCCUsingMVCCPreAssign() throws IOException, InterruptedExceptio for (Result result : scanner) { rowNum++; } - //scanner should only see two rows + // scanner should only see two rows assertEquals(2, rowNum); scanner = table.getScanner(scan); rowNum = 0; @@ -1043,8 +1033,8 @@ public void run() { successCnt.getAndIncrement(); } else { LOG.error("Should be equal but not, original value: " + Bytes.toString(value) - + ", returned value: " - + (returnedValue == null ? "null" : Bytes.toString(returnedValue))); + + ", returned value: " + + (returnedValue == null ? "null" : Bytes.toString(returnedValue))); } } catch (Throwable e) { // do nothing @@ -1066,24 +1056,25 @@ public void run() { } private static void assertNoLocks(final TableName tableName) - throws IOException, InterruptedException { + throws IOException, InterruptedException { HRegion region = (HRegion) find(tableName); assertEquals(0, region.getLockedRows().size()); } - private static HRegion find(final TableName tableName) - throws IOException, InterruptedException { + + private static HRegion find(final TableName tableName) throws IOException, InterruptedException { HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(tableName); List regions = rs.getRegions(tableName); assertEquals(1, regions.size()); return regions.get(0); } - private static T find(final TableName tableName, - Class clz) throws IOException, InterruptedException { + private static T find(final TableName tableName, Class clz) + throws IOException, InterruptedException { HRegion region = find(tableName); Coprocessor cp = region.getCoprocessorHost().findCoprocessor(clz.getName()); - assertTrue("The cp instance should be " + clz.getName() - + ", current instance is " + cp.getClass().getName(), clz.isInstance(cp)); + assertTrue("The cp instance should be " + clz.getName() + ", current instance is " + + cp.getClass().getName(), + clz.isInstance(cp)); return clz.cast(cp); } @@ -1098,7 +1089,7 @@ public Optional getRegionObserver() { @Override public void postBatchMutate(final ObserverContext c, - final MiniBatchOperationInProgress miniBatchOp) throws IOException { + final MiniBatchOperationInProgress miniBatchOp) throws IOException { try { latch.await(); } catch (InterruptedException ex) { @@ -1117,7 +1108,7 @@ public Optional getRegionObserver() { @Override public void postBatchMutate(final ObserverContext c, - final MiniBatchOperationInProgress miniBatchOp) throws IOException { + final MiniBatchOperationInProgress miniBatchOp) throws IOException { try { // waiting for scanner latch.await(); @@ -1128,7 +1119,7 @@ public void postBatchMutate(final ObserverContext @Override public RegionScanner postScannerOpen(final ObserverContext e, - final Scan scan, final RegionScanner s) throws IOException { + final Scan scan, final RegionScanner s) throws IOException { latch.countDown(); return s; } @@ -1144,10 +1135,11 @@ static byte[] generateHugeValue(int size) { } @Test - public void testScanWithBatchSizeReturnIncompleteCells() throws IOException, InterruptedException { + public void testScanWithBatchSizeReturnIncompleteCells() + throws IOException, InterruptedException { TableDescriptor hd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(3).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(3).build()) + .build(); try (Table table = TEST_UTIL.createTable(hd, null)) { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); @@ -1169,14 +1161,14 @@ public void testScanWithBatchSizeReturnIncompleteCells() throws IOException, Int Scan scan = new Scan(); scan.withStartRow(ROW).withStopRow(ROW, true).addFamily(FAMILY).setBatch(3) - .setMaxResultSize(4 * 1024 * 1024); + .setMaxResultSize(4 * 1024 * 1024); Result result; try (ResultScanner scanner = table.getScanner(scan)) { List list = new ArrayList<>(); /* - * The first scan rpc should return a result with 2 cells, because 3MB + 4MB > 4MB; The second - * scan rpc should return a result with 3 cells, because reach the batch limit = 3; The - * mayHaveMoreCellsInRow in last result should be false in the scan rpc. BTW, the + * The first scan rpc should return a result with 2 cells, because 3MB + 4MB > 4MB; The + * second scan rpc should return a result with 3 cells, because reach the batch limit = 3; + * The mayHaveMoreCellsInRow in last result should be false in the scan rpc. BTW, the * moreResultsInRegion also would be false. Finally, the client should collect all the cells * into two result: 2+3 -> 3+2; */ @@ -1192,7 +1184,7 @@ public void testScanWithBatchSizeReturnIncompleteCells() throws IOException, Int scan = new Scan(); scan.withStartRow(ROW).withStopRow(ROW, true).addFamily(FAMILY).setBatch(2) - .setMaxResultSize(4 * 1024 * 1024); + .setMaxResultSize(4 * 1024 * 1024); try (ResultScanner scanner = table.getScanner(scan)) { List list = new ArrayList<>(); while ((result = scanner.next()) != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java index 6ce193969b35..757dbe6223f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java @@ -60,20 +60,18 @@ import org.slf4j.LoggerFactory; /** - * Run tests that use the HBase clients; {@link Table}. - * Sets up the HBase mini cluster once at start and runs through all client tests. - * Each creates a table named for the method and does its stuff against that. - * - * Parameterized to run with different registry implementations. + * Run tests that use the HBase clients; {@link Table}. Sets up the HBase mini cluster once at start + * and runs through all client tests. Each creates a table named for the method and does its stuff + * against that. Parameterized to run with different registry implementations. */ -@Category({LargeTests.class, ClientTests.class}) -@SuppressWarnings ("deprecation") +@Category({ LargeTests.class, ClientTests.class }) +@SuppressWarnings("deprecation") @RunWith(Parameterized.class) public class TestFromClientSide4 extends FromClientSideBase { private static final Logger LOG = LoggerFactory.getLogger(TestFromClientSide4.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSide4.class); + HBaseClassTestRule.forClass(TestFromClientSide4.class); @Rule public TableNameTestRule name = new TableNameTestRule(); @@ -88,7 +86,7 @@ public TestFromClientSide4(Class registry, int numHedgedReqs) throws Exception { @Parameterized.Parameters public static Collection parameters() { return Arrays.asList(new Object[][] { { MasterRegistry.class, 1 }, { MasterRegistry.class, 2 }, - { ZKConnectionRegistry.class, 1 } }); + { ZKConnectionRegistry.class, 1 } }); } @AfterClass @@ -99,7 +97,8 @@ public static void tearDownAfterClass() throws Exception { /** * Test batch operations with combination of valid and invalid args */ - @Test public void testBatchOperationsWithErrors() throws Exception { + @Test + public void testBatchOperationsWithErrors() throws Exception { final TableName tableName = name.getTableName(); try (Table foo = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 10)) { @@ -212,14 +211,13 @@ public static void tearDownAfterClass() throws Exception { // /** - * HBASE-867 - * If millions of columns in a column family, hbase scanner won't come up - * Test will create numRows rows, each with numColsPerRow columns - * (1 version each), and attempt to scan them all. - * To test at scale, up numColsPerRow to the millions - * (have not gotten that to work running as junit though) + * HBASE-867 If millions of columns in a column family, hbase scanner won't come up Test will + * create numRows rows, each with numColsPerRow columns (1 version each), and attempt to scan them + * all. To test at scale, up numColsPerRow to the millions (have not gotten that to work running + * as junit though) */ - @Test public void testJiraTest867() throws Exception { + @Test + public void testJiraTest867() throws Exception { int numRows = 10; int numColsPerRow = 2000; @@ -238,9 +236,9 @@ public static void tearDownAfterClass() throws Exception { for (int j = 0; j < numColsPerRow; j++) { put.addColumn(FAMILY, QUALIFIERS[j], QUALIFIERS[j]); } - assertEquals( - "Put expected to contain " + numColsPerRow + " columns but " + "only contains " + put - .size(), put.size(), numColsPerRow); + assertEquals("Put expected to contain " + numColsPerRow + " columns but " + "only contains " + + put.size(), + put.size(), numColsPerRow); ht.put(put); } @@ -303,11 +301,11 @@ public static void tearDownAfterClass() throws Exception { } /** - * HBASE-861 - * get with timestamp will return a value if there is a version with an - * earlier timestamp + * HBASE-861 get with timestamp will return a value if there is a version with an earlier + * timestamp */ - @Test public void testJiraTest861() throws Exception { + @Test + public void testJiraTest861() throws Exception { final TableName tableName = name.getTableName(); byte[][] VALUES = makeNAscii(VALUE, 7); long[] STAMPS = makeStamps(7); @@ -365,11 +363,11 @@ public static void tearDownAfterClass() throws Exception { } /** - * HBASE-33 - * Add a HTable get/obtainScanner method that retrieves all versions of a - * particular column and row between two timestamps + * HBASE-33 Add a HTable get/obtainScanner method that retrieves all versions of a particular + * column and row between two timestamps */ - @Test public void testJiraTest33() throws Exception { + @Test + public void testJiraTest33() throws Exception { final TableName tableName = name.getTableName(); byte[][] VALUES = makeNAscii(VALUE, 7); long[] STAMPS = makeStamps(7); @@ -413,10 +411,10 @@ public static void tearDownAfterClass() throws Exception { } /** - * HBASE-1014 - * commit(BatchUpdate) method should return timestamp + * HBASE-1014 commit(BatchUpdate) method should return timestamp */ - @Test public void testJiraTest1014() throws Exception { + @Test + public void testJiraTest1014() throws Exception { final TableName tableName = name.getTableName(); try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 10)) { @@ -436,10 +434,10 @@ public static void tearDownAfterClass() throws Exception { } /** - * HBASE-1182 - * Scan for columns > some timestamp + * HBASE-1182 Scan for columns > some timestamp */ - @Test public void testJiraTest1182() throws Exception { + @Test + public void testJiraTest1182() throws Exception { final TableName tableName = name.getTableName(); byte[][] VALUES = makeNAscii(VALUE, 7); long[] STAMPS = makeStamps(7); @@ -479,10 +477,10 @@ public static void tearDownAfterClass() throws Exception { } /** - * HBASE-52 - * Add a means of scanning over all versions + * HBASE-52 Add a means of scanning over all versions */ - @Test public void testJiraTest52() throws Exception { + @Test + public void testJiraTest52() throws Exception { final TableName tableName = name.getTableName(); byte[][] VALUES = makeNAscii(VALUE, 7); long[] STAMPS = makeStamps(7); @@ -620,7 +618,8 @@ public void testDuplicateVersions() throws Exception { assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, 0, 6); + VALUES[8] }, + 0, 6); scan = new Scan().withStartRow(ROW); scan.addColumn(FAMILY, QUALIFIER); @@ -629,7 +628,8 @@ public void testDuplicateVersions() throws Exception { assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, 0, 6); + VALUES[8] }, + 0, 6); get = new Get(ROW); get.readVersions(7); @@ -637,7 +637,8 @@ public void testDuplicateVersions() throws Exception { assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, 0, 6); + VALUES[8] }, + 0, 6); scan = new Scan().withStartRow(ROW); scan.readVersions(7); @@ -645,7 +646,8 @@ public void testDuplicateVersions() throws Exception { assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, 0, 6); + VALUES[8] }, + 0, 6); // Verify we can get each one properly getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); @@ -681,9 +683,10 @@ public void testDuplicateVersions() throws Exception { result = ht.get(get); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8], STAMPS[9], - STAMPS[11], STAMPS[13], STAMPS[15] }, + STAMPS[11], STAMPS[13], STAMPS[15] }, new byte[][] { VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], VALUES[8], VALUES[9], - VALUES[11], VALUES[13], VALUES[15] }, 0, 9); + VALUES[11], VALUES[13], VALUES[15] }, + 0, 9); scan = new Scan().withStartRow(ROW); scan.addColumn(FAMILY, QUALIFIER); @@ -691,9 +694,10 @@ public void testDuplicateVersions() throws Exception { result = getSingleScanResult(ht, scan); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8], STAMPS[9], - STAMPS[11], STAMPS[13], STAMPS[15] }, + STAMPS[11], STAMPS[13], STAMPS[15] }, new byte[][] { VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], VALUES[8], VALUES[9], - VALUES[11], VALUES[13], VALUES[15] }, 0, 9); + VALUES[11], VALUES[13], VALUES[15] }, + 0, 9); // Delete a version in the memstore and a version in a storefile Delete delete = new Delete(ROW); @@ -708,9 +712,10 @@ public void testDuplicateVersions() throws Exception { result = ht.get(get); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[8], - STAMPS[9], STAMPS[13], STAMPS[15] }, + STAMPS[9], STAMPS[13], STAMPS[15] }, new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[8], - VALUES[9], VALUES[13], VALUES[15] }, 0, 9); + VALUES[9], VALUES[13], VALUES[15] }, + 0, 9); scan = new Scan().withStartRow(ROW); scan.addColumn(FAMILY, QUALIFIER); @@ -718,13 +723,15 @@ public void testDuplicateVersions() throws Exception { result = getSingleScanResult(ht, scan); assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[8], - STAMPS[9], STAMPS[13], STAMPS[15] }, + STAMPS[9], STAMPS[13], STAMPS[15] }, new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[8], - VALUES[9], VALUES[13], VALUES[15] }, 0, 9); + VALUES[9], VALUES[13], VALUES[15] }, + 0, 9); } } - @Test public void testUpdates() throws Exception { + @Test + public void testUpdates() throws Exception { final TableName tableName = name.getTableName(); try (Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10)) { @@ -772,7 +779,8 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testUpdatesWithMajorCompaction() throws Exception { + @Test + public void testUpdatesWithMajorCompaction() throws Exception { final TableName tableName = name.getTableName(); try (Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10); Admin admin = TEST_UTIL.getAdmin()) { @@ -831,7 +839,8 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testMajorCompactionBetweenTwoUpdates() throws Exception { + @Test + public void testMajorCompactionBetweenTwoUpdates() throws Exception { final TableName tableName = name.getTableName(); try (Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10); Admin admin = TEST_UTIL.getAdmin()) { @@ -896,7 +905,8 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testGet_EmptyTable() throws IOException { + @Test + public void testGet_EmptyTable() throws IOException { try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { Get get = new Get(ROW); get.addFamily(FAMILY); @@ -905,7 +915,8 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testGet_NullQualifier() throws IOException { + @Test + public void testGet_NullQualifier() throws IOException { try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); @@ -928,7 +939,8 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testGet_NonExistentRow() throws IOException { + @Test + public void testGet_NonExistentRow() throws IOException { try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); @@ -950,14 +962,15 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testPut() throws IOException { + @Test + public void testPut() throws IOException { final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); final byte[] SMALL_FAMILY = Bytes.toBytes("smallfam"); final byte[] row1 = Bytes.toBytes("row1"); final byte[] row2 = Bytes.toBytes("row2"); final byte[] value = Bytes.toBytes("abcd"); - try (Table table = TEST_UTIL - .createTable(name.getTableName(), new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY })) { + try (Table table = TEST_UTIL.createTable(name.getTableName(), + new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY })) { Put put = new Put(row1); put.addColumn(CONTENTS_FAMILY, null, value); table.put(put); @@ -968,7 +981,7 @@ public void testDuplicateVersions() throws Exception { assertEquals(1, put.size()); assertEquals(1, put.getFamilyCellMap().get(CONTENTS_FAMILY).size()); - // KeyValue v1 expectation. Cast for now until we go all Cell all the time. TODO + // KeyValue v1 expectation. Cast for now until we go all Cell all the time. TODO KeyValue kv = (KeyValue) put.getFamilyCellMap().get(CONTENTS_FAMILY).get(0); assertTrue(Bytes.equals(CellUtil.cloneFamily(kv), CONTENTS_FAMILY)); @@ -991,7 +1004,8 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testPutNoCF() throws IOException { + @Test + public void testPutNoCF() throws IOException { final byte[] BAD_FAM = Bytes.toBytes("BAD_CF"); final byte[] VAL = Bytes.toBytes(100); try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { @@ -1008,13 +1022,14 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testRowsPut() throws IOException { + @Test + public void testRowsPut() throws IOException { final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); final byte[] SMALL_FAMILY = Bytes.toBytes("smallfam"); final int NB_BATCH_ROWS = 10; final byte[] value = Bytes.toBytes("abcd"); - try (Table table = TEST_UTIL - .createTable(name.getTableName(), new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY })) { + try (Table table = TEST_UTIL.createTable(name.getTableName(), + new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY })) { ArrayList rowsUpdate = new ArrayList<>(); for (int i = 0; i < NB_BATCH_ROWS; i++) { byte[] row = Bytes.toBytes("row" + i); @@ -1028,7 +1043,8 @@ public void testDuplicateVersions() throws Exception { scan.addFamily(CONTENTS_FAMILY); try (ResultScanner scanner = table.getScanner(scan)) { int nbRows = 0; - for (@SuppressWarnings("unused") Result row : scanner) { + for (@SuppressWarnings("unused") + Result row : scanner) { nbRows++; } assertEquals(NB_BATCH_ROWS, nbRows); @@ -1036,13 +1052,14 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testRowsPutBufferedManyManyFlushes() throws IOException { + @Test + public void testRowsPutBufferedManyManyFlushes() throws IOException { final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); final byte[] SMALL_FAMILY = Bytes.toBytes("smallfam"); final byte[] value = Bytes.toBytes("abcd"); final int NB_BATCH_ROWS = 10; - try (Table table = TEST_UTIL - .createTable(name.getTableName(), new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY })) { + try (Table table = TEST_UTIL.createTable(name.getTableName(), + new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY })) { ArrayList rowsUpdate = new ArrayList<>(); for (int i = 0; i < NB_BATCH_ROWS * 10; i++) { byte[] row = Bytes.toBytes("row" + i); @@ -1057,7 +1074,8 @@ public void testDuplicateVersions() throws Exception { scan.addFamily(CONTENTS_FAMILY); try (ResultScanner scanner = table.getScanner(scan)) { int nbRows = 0; - for (@SuppressWarnings("unused") Result row : scanner) { + for (@SuppressWarnings("unused") + Result row : scanner) { nbRows++; } assertEquals(NB_BATCH_ROWS * 10, nbRows); @@ -1065,7 +1083,8 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testAddKeyValue() { + @Test + public void testAddKeyValue() { final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); final byte[] value = Bytes.toBytes("abcd"); final byte[] row1 = Bytes.toBytes("row1"); @@ -1097,7 +1116,8 @@ public void testDuplicateVersions() throws Exception { /** * test for HBASE-737 */ - @Test public void testHBase737() throws IOException { + @Test + public void testHBase737() throws IOException { final byte[] FAM1 = Bytes.toBytes("fam1"); final byte[] FAM2 = Bytes.toBytes("fam2"); // Open table @@ -1109,7 +1129,7 @@ public void testDuplicateVersions() throws Exception { try { Thread.sleep(1000); } catch (InterruptedException i) { - //ignore + // ignore } put = new Put(ROW); @@ -1119,7 +1139,7 @@ public void testDuplicateVersions() throws Exception { try { Thread.sleep(1000); } catch (InterruptedException i) { - //ignore + // ignore } put = new Put(ROW); @@ -1157,7 +1177,7 @@ public void testDuplicateVersions() throws Exception { try { Thread.sleep(1000); } catch (InterruptedException i) { - //ignore + // ignore } scan = new Scan(); scan.addFamily(FAM1); @@ -1179,7 +1199,8 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testListTables() throws IOException { + @Test + public void testListTables() throws IOException { final String testTableName = name.getTableName().toString(); final TableName tableName1 = TableName.valueOf(testTableName + "1"); final TableName tableName2 = TableName.valueOf(testTableName + "2"); @@ -1207,28 +1228,29 @@ public void testDuplicateVersions() throws Exception { } /** - * simple test that just executes parts of the client - * API that accept a pre-created Connection instance + * simple test that just executes parts of the client API that accept a pre-created Connection + * instance */ - @Test public void testUnmanagedHConnection() throws IOException { + @Test + public void testUnmanagedHConnection() throws IOException { final TableName tableName = name.getTableName(); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY); try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table t = conn.getTable(tableName); - Admin admin = conn.getAdmin()) { + Table t = conn.getTable(tableName); + Admin admin = conn.getAdmin()) { assertTrue(admin.tableExists(tableName)); assertTrue(t.get(new Get(ROW)).isEmpty()); } } /** - * test of that unmanaged HConnections are able to reconnect - * properly (see HBASE-5058) + * test of that unmanaged HConnections are able to reconnect properly (see HBASE-5058) */ - @Test public void testUnmanagedHConnectionReconnect() throws Exception { + @Test + public void testUnmanagedHConnectionReconnect() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); - Class registryImpl = conf - .getClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, ZKConnectionRegistry.class); + Class registryImpl = conf.getClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, + ZKConnectionRegistry.class); // This test does not make sense for MasterRegistry since it stops the only master in the // cluster and starts a new master without populating the underlying config for the connection. Assume.assumeFalse(registryImpl.equals(MasterRegistry.class)); @@ -1260,7 +1282,8 @@ public void testDuplicateVersions() throws Exception { } } - @Test public void testMiscHTableStuff() throws IOException { + @Test + public void testMiscHTableStuff() throws IOException { final String testTableName = name.getTableName().toString(); final TableName tableAname = TableName.valueOf(testTableName + "A"); final TableName tableBname = TableName.valueOf(testTableName + "B"); @@ -1269,7 +1292,7 @@ public void testDuplicateVersions() throws Exception { byte[] value = Bytes.toBytes("value"); try (Table a = TEST_UTIL.createTable(tableAname, HConstants.CATALOG_FAMILY); - Table b = TEST_UTIL.createTable(tableBname, HConstants.CATALOG_FAMILY)) { + Table b = TEST_UTIL.createTable(tableBname, HConstants.CATALOG_FAMILY)) { Put put = new Put(ROW); put.addColumn(HConstants.CATALOG_FAMILY, null, value); a.put(put); @@ -1311,7 +1334,7 @@ public void testDuplicateVersions() throws Exception { admin.disableTable(tableAname); // add a user attribute to HTD TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(desc).setValue(attrName, attrValue); + TableDescriptorBuilder.newBuilder(desc).setValue(attrName, attrValue); // add a user attribute to HCD for (ColumnFamilyDescriptor c : desc.getColumnFamilies()) { builder.modifyColumnFamily( @@ -1338,4 +1361,3 @@ public void testDuplicateVersions() throws Exception { } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java index dd35c5e61fbf..84f69c71dea1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java @@ -102,14 +102,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse; /** - * Run tests that use the HBase clients; {@link Table}. - * Sets up the HBase mini cluster once at start and runs through all client tests. - * Each creates a table named for the method and does its stuff against that. - * - * Parameterized to run with different registry implementations. + * Run tests that use the HBase clients; {@link Table}. Sets up the HBase mini cluster once at start + * and runs through all client tests. Each creates a table named for the method and does its stuff + * against that. Parameterized to run with different registry implementations. */ -@Category({LargeTests.class, ClientTests.class}) -@SuppressWarnings ("deprecation") +@Category({ LargeTests.class, ClientTests.class }) +@SuppressWarnings("deprecation") @RunWith(Parameterized.class) public class TestFromClientSide5 extends FromClientSideBase { private static final Logger LOG = LoggerFactory.getLogger(TestFromClientSide5.class); @@ -121,7 +119,8 @@ public class TestFromClientSide5 extends FromClientSideBase { public TableNameTestRule name = new TableNameTestRule(); // To keep the child classes happy. - TestFromClientSide5() {} + TestFromClientSide5() { + } public TestFromClientSide5(Class registry, int numHedgedReqs) throws Exception { initialize(registry, numHedgedReqs, MultiRowMutationEndpoint.class); @@ -129,14 +128,12 @@ public TestFromClientSide5(Class registry, int numHedgedReqs) throws Exception { @Parameterized.Parameters public static Collection parameters() { - return Arrays.asList(new Object[][] { - { MasterRegistry.class, 1}, - { MasterRegistry.class, 2}, - { ZKConnectionRegistry.class, 1} - }); + return Arrays.asList(new Object[][] { { MasterRegistry.class, 1 }, { MasterRegistry.class, 2 }, + { ZKConnectionRegistry.class, 1 } }); } - @AfterClass public static void tearDownAfterClass() throws Exception { + @AfterClass + public static void tearDownAfterClass() throws Exception { afterClass(); } @@ -152,10 +149,10 @@ public void testGetClosestRowBefore() throws IOException, InterruptedException { final byte[] beforeThirdRow = Bytes.toBytes("row33"); final byte[] beforeForthRow = Bytes.toBytes("row44"); - try (Table table = - TEST_UTIL.createTable(tableName, + try ( + Table table = TEST_UTIL.createTable(tableName, new byte[][] { HConstants.CATALOG_FAMILY, Bytes.toBytes("info2") }, 1, 1024); - RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { // set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow // in Store.rowAtOrBeforeFromStoreFile @@ -269,17 +266,15 @@ public void testScanVariableReuse() { public void testMultiRowMutation() throws Exception { LOG.info("Starting testMultiRowMutation"); final TableName tableName = name.getTableName(); - final byte [] ROW1 = Bytes.toBytes("testRow1"); - final byte [] ROW2 = Bytes.toBytes("testRow2"); - final byte [] ROW3 = Bytes.toBytes("testRow3"); + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] ROW3 = Bytes.toBytes("testRow3"); try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { // Add initial data - t.batch(Arrays.asList( - new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE), + t.batch(Arrays.asList(new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE), new Put(ROW2).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(1L)), - new Put(ROW3).addColumn(FAMILY, QUALIFIER, VALUE) - ), new Object[3]); + new Put(ROW3).addColumn(FAMILY, QUALIFIER, VALUE)), new Object[3]); // Execute MultiRowMutation Put put = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); @@ -302,7 +297,7 @@ public void testMultiRowMutation() throws Exception { CoprocessorRpcChannel channel = t.coprocessorService(ROW); MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); + MultiRowMutationService.newBlockingStub(channel); MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); // Assert @@ -326,10 +321,10 @@ public void testMultiRowMutation() throws Exception { @Test public void testMultiRowMutationWithSingleConditionWhenConditionMatches() throws Exception { final TableName tableName = name.getTableName(); - final byte [] ROW1 = Bytes.toBytes("testRow1"); - final byte [] ROW2 = Bytes.toBytes("testRow2"); - final byte [] VALUE1 = Bytes.toBytes("testValue1"); - final byte [] VALUE2 = Bytes.toBytes("testValue2"); + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { // Add initial data @@ -347,12 +342,12 @@ public void testMultiRowMutationWithSingleConditionWhenConditionMatches() throws mrmBuilder.addMutationRequest(m1); mrmBuilder.addMutationRequest(m2); mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, - CompareOperator.EQUAL, VALUE2, null)); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2, null)); CoprocessorRpcChannel channel = t.coprocessorService(ROW); MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); + MultiRowMutationService.newBlockingStub(channel); MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); // Assert @@ -372,10 +367,10 @@ public void testMultiRowMutationWithSingleConditionWhenConditionMatches() throws @Test public void testMultiRowMutationWithSingleConditionWhenConditionNotMatch() throws Exception { final TableName tableName = name.getTableName(); - final byte [] ROW1 = Bytes.toBytes("testRow1"); - final byte [] ROW2 = Bytes.toBytes("testRow2"); - final byte [] VALUE1 = Bytes.toBytes("testValue1"); - final byte [] VALUE2 = Bytes.toBytes("testValue2"); + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { // Add initial data @@ -393,12 +388,12 @@ public void testMultiRowMutationWithSingleConditionWhenConditionNotMatch() throw mrmBuilder.addMutationRequest(m1); mrmBuilder.addMutationRequest(m2); mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, - CompareOperator.EQUAL, VALUE1, null)); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE1, null)); CoprocessorRpcChannel channel = t.coprocessorService(ROW); MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); + MultiRowMutationService.newBlockingStub(channel); MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); // Assert @@ -418,10 +413,10 @@ public void testMultiRowMutationWithSingleConditionWhenConditionNotMatch() throw @Test public void testMultiRowMutationWithMultipleConditionsWhenConditionsMatch() throws Exception { final TableName tableName = name.getTableName(); - final byte [] ROW1 = Bytes.toBytes("testRow1"); - final byte [] ROW2 = Bytes.toBytes("testRow2"); - final byte [] VALUE1 = Bytes.toBytes("testValue1"); - final byte [] VALUE2 = Bytes.toBytes("testValue2"); + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { // Add initial data @@ -439,14 +434,14 @@ public void testMultiRowMutationWithMultipleConditionsWhenConditionsMatch() thro mrmBuilder.addMutationRequest(m1); mrmBuilder.addMutationRequest(m2); mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW, FAMILY, QUALIFIER, - CompareOperator.EQUAL, null, null)); - mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, - CompareOperator.EQUAL, VALUE2, null)); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW, FAMILY, QUALIFIER, CompareOperator.EQUAL, null, null)); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2, null)); CoprocessorRpcChannel channel = t.coprocessorService(ROW); MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); + MultiRowMutationService.newBlockingStub(channel); MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); // Assert @@ -466,10 +461,10 @@ public void testMultiRowMutationWithMultipleConditionsWhenConditionsMatch() thro @Test public void testMultiRowMutationWithMultipleConditionsWhenConditionsNotMatch() throws Exception { final TableName tableName = name.getTableName(); - final byte [] ROW1 = Bytes.toBytes("testRow1"); - final byte [] ROW2 = Bytes.toBytes("testRow2"); - final byte [] VALUE1 = Bytes.toBytes("testValue1"); - final byte [] VALUE2 = Bytes.toBytes("testValue2"); + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { // Add initial data @@ -487,14 +482,14 @@ public void testMultiRowMutationWithMultipleConditionsWhenConditionsNotMatch() t mrmBuilder.addMutationRequest(m1); mrmBuilder.addMutationRequest(m2); mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW1, FAMILY, QUALIFIER, - CompareOperator.EQUAL, null, null)); - mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, - CompareOperator.EQUAL, VALUE1, null)); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW1, FAMILY, QUALIFIER, CompareOperator.EQUAL, null, null)); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE1, null)); CoprocessorRpcChannel channel = t.coprocessorService(ROW); MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); + MultiRowMutationService.newBlockingStub(channel); MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); // Assert @@ -514,17 +509,17 @@ public void testMultiRowMutationWithMultipleConditionsWhenConditionsNotMatch() t @Test public void testMultiRowMutationWithFilterConditionWhenConditionMatches() throws Exception { final TableName tableName = name.getTableName(); - final byte [] ROW1 = Bytes.toBytes("testRow1"); - final byte [] ROW2 = Bytes.toBytes("testRow2"); - final byte [] QUALIFIER2 = Bytes.toBytes("testQualifier2"); - final byte [] VALUE1 = Bytes.toBytes("testValue1"); - final byte [] VALUE2 = Bytes.toBytes("testValue2"); - final byte [] VALUE3 = Bytes.toBytes("testValue3"); + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] QUALIFIER2 = Bytes.toBytes("testQualifier2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); + final byte[] VALUE3 = Bytes.toBytes("testValue3"); try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { // Add initial data - t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2) - .addColumn(FAMILY, QUALIFIER2, VALUE3)); + t.put( + new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2).addColumn(FAMILY, QUALIFIER2, VALUE3)); // Execute MultiRowMutation with conditions Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); @@ -538,13 +533,15 @@ public void testMultiRowMutationWithFilterConditionWhenConditionMatches() throws mrmBuilder.addMutationRequest(m1); mrmBuilder.addMutationRequest(m2); mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, new FilterList( - new SingleColumnValueFilter(FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2), - new SingleColumnValueFilter(FAMILY, QUALIFIER2, CompareOperator.EQUAL, VALUE3)), null)); + mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, + new FilterList( + new SingleColumnValueFilter(FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2), + new SingleColumnValueFilter(FAMILY, QUALIFIER2, CompareOperator.EQUAL, VALUE3)), + null)); CoprocessorRpcChannel channel = t.coprocessorService(ROW); MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); + MultiRowMutationService.newBlockingStub(channel); MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); // Assert @@ -564,17 +561,17 @@ public void testMultiRowMutationWithFilterConditionWhenConditionMatches() throws @Test public void testMultiRowMutationWithFilterConditionWhenConditionNotMatch() throws Exception { final TableName tableName = name.getTableName(); - final byte [] ROW1 = Bytes.toBytes("testRow1"); - final byte [] ROW2 = Bytes.toBytes("testRow2"); - final byte [] QUALIFIER2 = Bytes.toBytes("testQualifier2"); - final byte [] VALUE1 = Bytes.toBytes("testValue1"); - final byte [] VALUE2 = Bytes.toBytes("testValue2"); - final byte [] VALUE3 = Bytes.toBytes("testValue3"); + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] QUALIFIER2 = Bytes.toBytes("testQualifier2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); + final byte[] VALUE3 = Bytes.toBytes("testValue3"); try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { // Add initial data - t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2) - .addColumn(FAMILY, QUALIFIER2, VALUE3)); + t.put( + new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2).addColumn(FAMILY, QUALIFIER2, VALUE3)); // Execute MultiRowMutation with conditions Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); @@ -588,13 +585,15 @@ public void testMultiRowMutationWithFilterConditionWhenConditionNotMatch() throw mrmBuilder.addMutationRequest(m1); mrmBuilder.addMutationRequest(m2); mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, new FilterList( - new SingleColumnValueFilter(FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2), - new SingleColumnValueFilter(FAMILY, QUALIFIER2, CompareOperator.EQUAL, VALUE2)), null)); + mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, + new FilterList( + new SingleColumnValueFilter(FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2), + new SingleColumnValueFilter(FAMILY, QUALIFIER2, CompareOperator.EQUAL, VALUE2)), + null)); CoprocessorRpcChannel channel = t.coprocessorService(ROW); MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); + MultiRowMutationService.newBlockingStub(channel); MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); // Assert @@ -617,7 +616,7 @@ public void testRowMutations() throws Exception { final TableName tableName = name.getTableName(); try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { byte[][] QUALIFIERS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), - Bytes.toBytes("c"), Bytes.toBytes("d") }; + Bytes.toBytes("c"), Bytes.toBytes("d") }; // Test for Put operations RowMutations arm = new RowMutations(ROW); @@ -651,12 +650,10 @@ public void testRowMutations() throws Exception { // Test for Increment and Append operations arm = new RowMutations(ROW); - arm.add(Arrays.asList( - new Put(ROW).addColumn(FAMILY, QUALIFIERS[0], VALUE), + arm.add(Arrays.asList(new Put(ROW).addColumn(FAMILY, QUALIFIERS[0], VALUE), new Delete(ROW).addColumns(FAMILY, QUALIFIERS[1]), new Increment(ROW).addColumn(FAMILY, QUALIFIERS[2], 5L), - new Append(ROW).addColumn(FAMILY, QUALIFIERS[3], Bytes.toBytes("abc")) - )); + new Append(ROW).addColumn(FAMILY, QUALIFIERS[3], Bytes.toBytes("abc")))); r = t.mutateRow(arm); assertTrue(r.getExists()); assertEquals(5L, Bytes.toLong(r.getValue(FAMILY, QUALIFIERS[2]))); @@ -721,9 +718,8 @@ public void testAppend() throws Exception { try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { byte[] v1 = Bytes.toBytes("42"); byte[] v2 = Bytes.toBytes("23"); - byte[][] QUALIFIERS = new byte[][]{ - Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("c") - }; + byte[][] QUALIFIERS = + new byte[][] { Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("c") }; Append a = new Append(ROW); a.addColumn(FAMILY, QUALIFIERS[0], v1); a.addColumn(FAMILY, QUALIFIERS[1], v2); @@ -740,13 +736,14 @@ public void testAppend() throws Exception { // QUALIFIERS[2] previously not exist, verify both value and timestamp are correct assertEquals(0, Bytes.compareTo(v2, r.getValue(FAMILY, QUALIFIERS[2]))); assertEquals(r.getColumnLatestCell(FAMILY, QUALIFIERS[0]).getTimestamp(), - r.getColumnLatestCell(FAMILY, QUALIFIERS[2]).getTimestamp()); + r.getColumnLatestCell(FAMILY, QUALIFIERS[2]).getTimestamp()); } } + private List doAppend(final boolean walUsed) throws IOException { LOG.info("Starting testAppend, walUsed is " + walUsed); final TableName TABLENAME = - TableName.valueOf(walUsed ? "testAppendWithWAL" : "testAppendWithoutWAL"); + TableName.valueOf(walUsed ? "testAppendWithWAL" : "testAppendWithoutWAL"); try (Table t = TEST_UTIL.createTable(TABLENAME, FAMILY)) { final byte[] row1 = Bytes.toBytes("c"); final byte[] row2 = Bytes.toBytes("b"); @@ -818,7 +815,7 @@ public void testClientPoolRoundRobin() throws IOException { conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize); try (Table table = - TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, Integer.MAX_VALUE)) { + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, Integer.MAX_VALUE)) { final long ts = EnvironmentEdgeManager.currentTime(); Get get = new Get(ROW); @@ -831,21 +828,21 @@ public void testClientPoolRoundRobin() throws IOException { table.put(put); Result result = table.get(get); - NavigableMap navigableMap = result.getMap().get(FAMILY) - .get(QUALIFIER); + NavigableMap navigableMap = result.getMap().get(FAMILY).get(QUALIFIER); assertEquals("The number of versions of '" + Bytes.toString(FAMILY) + ":" - + Bytes.toString(QUALIFIER) + " did not match", versions, navigableMap.size()); + + Bytes.toString(QUALIFIER) + " did not match", + versions, navigableMap.size()); for (Map.Entry entry : navigableMap.entrySet()) { - assertTrue("The value at time " + entry.getKey() - + " did not match what was put", - Bytes.equals(VALUE, entry.getValue())); + assertTrue("The value at time " + entry.getKey() + " did not match what was put", + Bytes.equals(VALUE, entry.getValue())); } } } } - @Ignore ("Flakey: HBASE-8989") @Test + @Ignore("Flakey: HBASE-8989") + @Test public void testClientPoolThreadLocal() throws IOException { final TableName tableName = name.getTableName(); @@ -855,7 +852,7 @@ public void testClientPoolThreadLocal() throws IOException { conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "thread-local"); conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize); - try (final Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 3)) { + try (final Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 3)) { final long ts = EnvironmentEdgeManager.currentTime(); final Get get = new Get(ROW); @@ -868,15 +865,14 @@ public void testClientPoolThreadLocal() throws IOException { table.put(put); Result result = table.get(get); - NavigableMap navigableMap = result.getMap().get(FAMILY) - .get(QUALIFIER); + NavigableMap navigableMap = result.getMap().get(FAMILY).get(QUALIFIER); assertEquals("The number of versions of '" + Bytes.toString(FAMILY) + ":" - + Bytes.toString(QUALIFIER) + " did not match", versions, navigableMap.size()); + + Bytes.toString(QUALIFIER) + " did not match", + versions, navigableMap.size()); for (Map.Entry entry : navigableMap.entrySet()) { - assertTrue("The value at time " + entry.getKey() - + " did not match what was put", - Bytes.equals(VALUE, entry.getValue())); + assertTrue("The value at time " + entry.getKey() + " did not match what was put", + Bytes.equals(VALUE, entry.getValue())); } } @@ -892,16 +888,15 @@ public void testClientPoolThreadLocal() throws IOException { table.put(put); Result result = table.get(get); - NavigableMap navigableMap = result.getMap() - .get(FAMILY).get(QUALIFIER); + NavigableMap navigableMap = result.getMap().get(FAMILY).get(QUALIFIER); - assertEquals("The number of versions of '" + Bytes.toString(FAMILY) + ":" - + Bytes.toString(QUALIFIER) + " did not match " + versionsCopy, versionsCopy, - navigableMap.size()); + assertEquals( + "The number of versions of '" + Bytes.toString(FAMILY) + ":" + + Bytes.toString(QUALIFIER) + " did not match " + versionsCopy, + versionsCopy, navigableMap.size()); for (Map.Entry entry : navigableMap.entrySet()) { - assertTrue("The value at time " + entry.getKey() - + " did not match what was put", - Bytes.equals(VALUE, entry.getValue())); + assertTrue("The value at time " + entry.getKey() + " did not match what was put", + Bytes.equals(VALUE, entry.getValue())); } synchronized (waitLock) { waitLock.wait(); @@ -927,16 +922,16 @@ public void testClientPoolThreadLocal() throws IOException { @Test public void testCheckAndPut() throws IOException { - final byte [] anotherrow = Bytes.toBytes("anotherrow"); - final byte [] value2 = Bytes.toBytes("abcd"); + final byte[] anotherrow = Bytes.toBytes("anotherrow"); + final byte[] value2 = Bytes.toBytes("abcd"); try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { Put put1 = new Put(ROW); put1.addColumn(FAMILY, QUALIFIER, VALUE); // row doesn't exist, so using non-null value should be considered "not match". - boolean ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifEquals(VALUE).thenPut(put1); + boolean ok = + table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenPut(put1); assertFalse(ok); // row doesn't exist, so using "ifNotExists" should be considered "match". @@ -973,96 +968,69 @@ public void testCheckAndMutateWithTimeRange() throws IOException { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, ts, VALUE); - boolean ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifNotExists() - .thenPut(put); + boolean ok = + table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put); assertTrue(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.at(ts + 10000)) - .ifEquals(VALUE) - .thenPut(put); + .timeRange(TimeRange.at(ts + 10000)).ifEquals(VALUE).thenPut(put); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.from(ts + 10000)) - .ifEquals(VALUE) - .thenPut(put); + .timeRange(TimeRange.from(ts + 10000)).ifEquals(VALUE).thenPut(put); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.between(ts + 10000, ts + 20000)) - .ifEquals(VALUE) - .thenPut(put); + .timeRange(TimeRange.between(ts + 10000, ts + 20000)).ifEquals(VALUE).thenPut(put); assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.until(ts)) - .ifEquals(VALUE) - .thenPut(put); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.until(ts)) + .ifEquals(VALUE).thenPut(put); assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.at(ts)) - .ifEquals(VALUE) - .thenPut(put); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) + .ifEquals(VALUE).thenPut(put); assertTrue(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.from(ts)) - .ifEquals(VALUE) - .thenPut(put); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.from(ts)) + .ifEquals(VALUE).thenPut(put); assertTrue(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.between(ts, ts + 20000)) - .ifEquals(VALUE) - .thenPut(put); + .timeRange(TimeRange.between(ts, ts + 20000)).ifEquals(VALUE).thenPut(put); assertTrue(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.until(ts + 10000)) - .ifEquals(VALUE) - .thenPut(put); + .timeRange(TimeRange.until(ts + 10000)).ifEquals(VALUE).thenPut(put); assertTrue(ok); - RowMutations rm = new RowMutations(ROW) - .add((Mutation) put); + RowMutations rm = new RowMutations(ROW).add((Mutation) put); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.at(ts + 10000)) - .ifEquals(VALUE) - .thenMutate(rm); + .timeRange(TimeRange.at(ts + 10000)).ifEquals(VALUE).thenMutate(rm); assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.at(ts)) - .ifEquals(VALUE) - .thenMutate(rm); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) + .ifEquals(VALUE).thenMutate(rm); assertTrue(ok); - Delete delete = new Delete(ROW) - .addColumn(FAMILY, QUALIFIER); + Delete delete = new Delete(ROW).addColumn(FAMILY, QUALIFIER); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.at(ts + 10000)) - .ifEquals(VALUE) - .thenDelete(delete); + .timeRange(TimeRange.at(ts + 10000)).ifEquals(VALUE).thenDelete(delete); assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.at(ts)) - .ifEquals(VALUE) - .thenDelete(delete); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) + .ifEquals(VALUE).thenDelete(delete); assertTrue(ok); } } @Test public void testCheckAndPutWithCompareOp() throws IOException { - final byte [] value1 = Bytes.toBytes("aaaa"); - final byte [] value2 = Bytes.toBytes("bbbb"); - final byte [] value3 = Bytes.toBytes("cccc"); - final byte [] value4 = Bytes.toBytes("dddd"); + final byte[] value1 = Bytes.toBytes("aaaa"); + final byte[] value2 = Bytes.toBytes("bbbb"); + final byte[] value3 = Bytes.toBytes("cccc"); + final byte[] value4 = Bytes.toBytes("dddd"); try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { @@ -1074,80 +1042,79 @@ public void testCheckAndPutWithCompareOp() throws IOException { // row doesn't exist, so using "ifNotExists" should be considered "match". boolean ok = - table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put2); + table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put2); assertTrue(ok); // cell = "bbbb", using "aaaa" to compare only LESS/LESS_OR_EQUAL/NOT_EQUAL // turns out "match" ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value1).thenPut(put2); + .ifMatches(CompareOperator.GREATER, value1).thenPut(put2); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value1).thenPut(put2); + .ifMatches(CompareOperator.EQUAL, value1).thenPut(put2); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value1).thenPut(put2); + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value1).thenPut(put2); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value1).thenPut(put2); + .ifMatches(CompareOperator.LESS, value1).thenPut(put2); assertTrue(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value1).thenPut(put2); + .ifMatches(CompareOperator.LESS_OR_EQUAL, value1).thenPut(put2); assertTrue(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value1).thenPut(put3); + .ifMatches(CompareOperator.NOT_EQUAL, value1).thenPut(put3); assertTrue(ok); // cell = "cccc", using "dddd" to compare only LARGER/LARGER_OR_EQUAL/NOT_EQUAL // turns out "match" ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value4).thenPut(put3); + .ifMatches(CompareOperator.LESS, value4).thenPut(put3); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value4).thenPut(put3); + .ifMatches(CompareOperator.LESS_OR_EQUAL, value4).thenPut(put3); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value4).thenPut(put3); + .ifMatches(CompareOperator.EQUAL, value4).thenPut(put3); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value4).thenPut(put3); + .ifMatches(CompareOperator.GREATER, value4).thenPut(put3); assertTrue(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value4).thenPut(put3); + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value4).thenPut(put3); assertTrue(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value4).thenPut(put2); + .ifMatches(CompareOperator.NOT_EQUAL, value4).thenPut(put2); assertTrue(ok); // cell = "bbbb", using "bbbb" to compare only GREATER_OR_EQUAL/LESS_OR_EQUAL/EQUAL // turns out "match" ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value2).thenPut(put2); + .ifMatches(CompareOperator.GREATER, value2).thenPut(put2); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value2).thenPut(put2); + .ifMatches(CompareOperator.NOT_EQUAL, value2).thenPut(put2); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value2).thenPut(put2); + .ifMatches(CompareOperator.LESS, value2).thenPut(put2); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value2).thenPut(put2); + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value2).thenPut(put2); assertTrue(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value2).thenPut(put2); + .ifMatches(CompareOperator.LESS_OR_EQUAL, value2).thenPut(put2); assertTrue(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value2).thenPut(put3); + .ifMatches(CompareOperator.EQUAL, value2).thenPut(put3); assertTrue(ok); } } @Test public void testCheckAndDelete() throws IOException { - final byte [] value1 = Bytes.toBytes("aaaa"); + final byte[] value1 = Bytes.toBytes("aaaa"); - try (Table table = TEST_UTIL.createTable(name.getTableName(), - FAMILY)) { + try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, value1); @@ -1156,21 +1123,20 @@ public void testCheckAndDelete() throws IOException { Delete delete = new Delete(ROW); delete.addColumns(FAMILY, QUALIFIER); - boolean ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifEquals(value1).thenDelete(delete); + boolean ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(value1) + .thenDelete(delete); assertTrue(ok); } } @Test public void testCheckAndDeleteWithCompareOp() throws IOException { - final byte [] value1 = Bytes.toBytes("aaaa"); - final byte [] value2 = Bytes.toBytes("bbbb"); - final byte [] value3 = Bytes.toBytes("cccc"); - final byte [] value4 = Bytes.toBytes("dddd"); + final byte[] value1 = Bytes.toBytes("aaaa"); + final byte[] value2 = Bytes.toBytes("bbbb"); + final byte[] value3 = Bytes.toBytes("cccc"); + final byte[] value4 = Bytes.toBytes("dddd"); - try (Table table = TEST_UTIL.createTable(name.getTableName(), - FAMILY)) { + try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { Put put2 = new Put(ROW); put2.addColumn(FAMILY, QUALIFIER, value2); @@ -1185,81 +1151,81 @@ public void testCheckAndDeleteWithCompareOp() throws IOException { // cell = "bbbb", using "aaaa" to compare only LESS/LESS_OR_EQUAL/NOT_EQUAL // turns out "match" boolean ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value1).thenDelete(delete); + .ifMatches(CompareOperator.GREATER, value1).thenDelete(delete); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value1).thenDelete(delete); + .ifMatches(CompareOperator.EQUAL, value1).thenDelete(delete); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value1).thenDelete(delete); + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value1).thenDelete(delete); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value1).thenDelete(delete); + .ifMatches(CompareOperator.LESS, value1).thenDelete(delete); assertTrue(ok); table.put(put2); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value1).thenDelete(delete); + .ifMatches(CompareOperator.LESS_OR_EQUAL, value1).thenDelete(delete); assertTrue(ok); table.put(put2); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value1).thenDelete(delete); + .ifMatches(CompareOperator.NOT_EQUAL, value1).thenDelete(delete); assertTrue(ok); // cell = "cccc", using "dddd" to compare only LARGER/LARGER_OR_EQUAL/NOT_EQUAL // turns out "match" table.put(put3); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value4).thenDelete(delete); + .ifMatches(CompareOperator.LESS, value4).thenDelete(delete); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value4).thenDelete(delete); + .ifMatches(CompareOperator.LESS_OR_EQUAL, value4).thenDelete(delete); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value4).thenDelete(delete); + .ifMatches(CompareOperator.EQUAL, value4).thenDelete(delete); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value4).thenDelete(delete); + .ifMatches(CompareOperator.GREATER, value4).thenDelete(delete); assertTrue(ok); table.put(put3); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value4).thenDelete(delete); + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value4).thenDelete(delete); assertTrue(ok); table.put(put3); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value4).thenDelete(delete); + .ifMatches(CompareOperator.NOT_EQUAL, value4).thenDelete(delete); assertTrue(ok); // cell = "bbbb", using "bbbb" to compare only GREATER_OR_EQUAL/LESS_OR_EQUAL/EQUAL // turns out "match" table.put(put2); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value2).thenDelete(delete); + .ifMatches(CompareOperator.GREATER, value2).thenDelete(delete); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value2).thenDelete(delete); + .ifMatches(CompareOperator.NOT_EQUAL, value2).thenDelete(delete); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value2).thenDelete(delete); + .ifMatches(CompareOperator.LESS, value2).thenDelete(delete); assertFalse(ok); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value2).thenDelete(delete); + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value2).thenDelete(delete); assertTrue(ok); table.put(put2); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value2).thenDelete(delete); + .ifMatches(CompareOperator.LESS_OR_EQUAL, value2).thenDelete(delete); assertTrue(ok); table.put(put2); ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value2).thenDelete(delete); + .ifMatches(CompareOperator.EQUAL, value2).thenDelete(delete); assertTrue(ok); } } /** - * Test ScanMetrics - */ + * Test ScanMetrics + */ @Test - @SuppressWarnings({"unused", "checkstyle:EmptyBlock"}) + @SuppressWarnings({ "unused", "checkstyle:EmptyBlock" }) public void testScanMetrics() throws Exception { final TableName tableName = name.getTableName(); @@ -1352,16 +1318,14 @@ public void testScanMetrics() throws Exception { // now, test that the metrics are still collected even if you don't call close, but do // run past the end of all the records - /** There seems to be a timing issue here. Comment out for now. Fix when time. - Scan scanWithoutClose = new Scan(); - scanWithoutClose.setCaching(1); - scanWithoutClose.setScanMetricsEnabled(true); - ResultScanner scannerWithoutClose = ht.getScanner(scanWithoutClose); - for (Result result : scannerWithoutClose.next(numRecords + 1)) { - } - ScanMetrics scanMetricsWithoutClose = getScanMetrics(scanWithoutClose); - assertEquals("Did not access all the regions in the table", numOfRegions, - scanMetricsWithoutClose.countOfRegions.get()); + /** + * There seems to be a timing issue here. Comment out for now. Fix when time. Scan + * scanWithoutClose = new Scan(); scanWithoutClose.setCaching(1); + * scanWithoutClose.setScanMetricsEnabled(true); ResultScanner scannerWithoutClose = + * ht.getScanner(scanWithoutClose); for (Result result : scannerWithoutClose.next(numRecords + + * 1)) { } ScanMetrics scanMetricsWithoutClose = getScanMetrics(scanWithoutClose); + * assertEquals("Did not access all the regions in the table", numOfRegions, + * scanMetricsWithoutClose.countOfRegions.get()); */ // finally, @@ -1385,22 +1349,19 @@ public void testScanMetrics() throws Exception { } /** - * Tests that cache on write works all the way up from the client-side. - * - * Performs inserts, flushes, and compactions, verifying changes in the block - * cache along the way. + * Tests that cache on write works all the way up from the client-side. Performs inserts, flushes, + * and compactions, verifying changes in the block cache along the way. */ @Test public void testCacheOnWriteEvictOnClose() throws Exception { final TableName tableName = name.getTableName(); - byte [] data = Bytes.toBytes("data"); + byte[] data = Bytes.toBytes("data"); try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { // get the block cache and region String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName) - .getRegion(regionName); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); @@ -1416,8 +1377,8 @@ public void testCacheOnWriteEvictOnClose() throws Exception { for (int i = 0; i < 5; i++) { Thread.sleep(100); if (startBlockCount != cache.getBlockCount() - || startBlockHits != cache.getStats().getHitCount() - || startBlockMiss != cache.getStats().getMissCount()) { + || startBlockHits != cache.getStats().getHitCount() + || startBlockMiss != cache.getStats().getMissCount()) { startBlockCount = cache.getBlockCount(); startBlockHits = cache.getStats().getHitCount(); startBlockMiss = cache.getStats().getMissCount(); @@ -1504,12 +1465,12 @@ public void testCacheOnWriteEvictOnClose() throws Exception { private void waitForStoreFileCount(HStore store, int count, int timeout) throws InterruptedException { long start = EnvironmentEdgeManager.currentTime(); - while (start + timeout > EnvironmentEdgeManager.currentTime() && - store.getStorefilesCount() != count) { + while (start + timeout > EnvironmentEdgeManager.currentTime() + && store.getStorefilesCount() != count) { Thread.sleep(100); } - System.out.println("start=" + start + ", now=" + EnvironmentEdgeManager.currentTime() + - ", cur=" + store.getStorefilesCount()); + System.out.println("start=" + start + ", now=" + EnvironmentEdgeManager.currentTime() + ", cur=" + + store.getStorefilesCount()); assertEquals(count, store.getStorefilesCount()); } @@ -1520,9 +1481,9 @@ private void waitForStoreFileCount(HStore store, int count, int timeout) public void testNonCachedGetRegionLocation() throws Exception { // Test Initialization. final TableName tableName = name.getTableName(); - byte [] family1 = Bytes.toBytes("f1"); - byte [] family2 = Bytes.toBytes("f2"); - try (Table ignored = TEST_UTIL.createTable(tableName, new byte[][] {family1, family2}, 10); + byte[] family1 = Bytes.toBytes("f1"); + byte[] family2 = Bytes.toBytes("f2"); + try (Table ignored = TEST_UTIL.createTable(tableName, new byte[][] { family1, family2 }, 10); Admin admin = TEST_UTIL.getAdmin(); RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { List allRegionLocations = locator.getAllRegionLocations(); @@ -1531,17 +1492,17 @@ public void testNonCachedGetRegionLocation() throws Exception { ServerName addrBefore = allRegionLocations.get(0).getServerName(); // Verify region location before move. HRegionLocation addrCache = locator.getRegionLocation(regionInfo.getStartKey(), false); - HRegionLocation addrNoCache = locator.getRegionLocation(regionInfo.getStartKey(), true); + HRegionLocation addrNoCache = locator.getRegionLocation(regionInfo.getStartKey(), true); assertEquals(addrBefore.getPort(), addrCache.getPort()); assertEquals(addrBefore.getPort(), addrNoCache.getPort()); - // Make sure more than one server. if (TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size() <= 1) { TEST_UTIL.getMiniHBaseCluster().startRegionServer(); Waiter.waitFor(TEST_UTIL.getConfiguration(), 30000, new Waiter.Predicate() { - @Override public boolean evaluate() throws Exception { + @Override + public boolean evaluate() throws Exception { return TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size() > 1; } }); @@ -1549,8 +1510,8 @@ public void testNonCachedGetRegionLocation() throws Exception { ServerName addrAfter = null; // Now move the region to a different server. - for (int i = 0; i < TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size(); - i++) { + for (int i = 0; i < TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads() + .size(); i++) { HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(i); ServerName addr = regionServer.getServerName(); if (addr.getPort() != addrBefore.getPort()) { @@ -1572,14 +1533,14 @@ public void testNonCachedGetRegionLocation() throws Exception { } /** - * Tests getRegionsInRange by creating some regions over which a range of - * keys spans; then changing the key range. + * Tests getRegionsInRange by creating some regions over which a range of keys spans; then + * changing the key range. */ @Test public void testGetRegionsInRange() throws Exception { // Test Initialization. - byte [] startKey = Bytes.toBytes("ddc"); - byte [] endKey = Bytes.toBytes("mmm"); + byte[] startKey = Bytes.toBytes("ddc"); + byte[] endKey = Bytes.toBytes("mmm"); TableName tableName = name.getTableName(); TEST_UTIL.createMultiRegionTable(tableName, new byte[][] { FAMILY }, 10); @@ -1612,8 +1573,8 @@ public void testGetRegionsInRange() throws Exception { assertEquals(21, regionsList.size()); // Both start and end keys empty - regionsList = getRegionsInRange(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW); + regionsList = + getRegionsInRange(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); assertEquals(26, regionsList.size()); // Change the end key to somewhere in the last block @@ -1652,7 +1613,7 @@ private List getRegionsInRange(TableName tableName, byte[] star @Test public void testJira6912() throws Exception { final TableName tableName = name.getTableName(); - try (Table foo = TEST_UTIL.createTable(tableName, new byte[][] {FAMILY}, 10)) { + try (Table foo = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 10)) { List puts = new ArrayList<>(); for (int i = 0; i != 100; i++) { @@ -1668,8 +1629,8 @@ public void testJira6912() throws Exception { scan.withStartRow(Bytes.toBytes(1)); scan.withStopRow(Bytes.toBytes(3)); scan.addColumn(FAMILY, FAMILY); - scan.setFilter(new RowFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(Bytes.toBytes(1)))); + scan.setFilter( + new RowFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes(1)))); try (ResultScanner scanner = foo.getScanner(scan)) { Result[] bar = scanner.next(100); @@ -1761,7 +1722,7 @@ public void testNegativeTimestamp() throws IOException { // already has negative timestamps in cluster data, HBase won't be able to handle that try { new KeyValue(Bytes.toBytes(42), Bytes.toBytes(42), Bytes.toBytes(42), -1, - Bytes.toBytes(42)); + Bytes.toBytes(42)); } catch (IllegalArgumentException ex) { fail("KeyValue SHOULD allow negative timestamps"); } @@ -1801,11 +1762,11 @@ public void testRawScanRespectsVersions() throws Exception { int count = 0; for (Result r : scanner) { assertEquals("Found an unexpected number of results for the row!", versions, - r.listCells().size()); + r.listCells().size()); count++; } assertEquals("Found more than a single row when raw scanning the table with a single row!", - 1, count); + 1, count); } // then if we decrease the number of versions, but keep the scan raw, we should see exactly @@ -1816,11 +1777,11 @@ public void testRawScanRespectsVersions() throws Exception { int count = 0; for (Result r : scanner) { assertEquals("Found an unexpected number of results for the row!", versions, - r.listCells().size()); + r.listCells().size()); count++; } assertEquals("Found more than a single row when raw scanning the table with a single row!", - 1, count); + 1, count); } // finally, if we turn off raw scanning, but max out the number of versions, we should go back @@ -1831,11 +1792,11 @@ public void testRawScanRespectsVersions() throws Exception { int count = 0; for (Result r : scanner) { assertEquals("Found an unexpected number of results for the row!", versions, - r.listCells().size()); + r.listCells().size()); count++; } assertEquals("Found more than a single row when raw scanning the table with a single row!", - 1, count); + 1, count); } } @@ -1870,14 +1831,13 @@ public void testEmptyFilterList() throws Exception { for (int i = 0; i != scanResult.rawCells().length; ++i) { Cell scanCell = scanResult.rawCells()[i]; Cell getCell = getResult.rawCells()[i]; - assertEquals(0, Bytes.compareTo(CellUtil.cloneRow(scanCell), - CellUtil.cloneRow(getCell))); - assertEquals(0, Bytes.compareTo(CellUtil.cloneFamily(scanCell), - CellUtil.cloneFamily(getCell))); - assertEquals(0, Bytes.compareTo(CellUtil.cloneQualifier(scanCell), - CellUtil.cloneQualifier(getCell))); - assertEquals(0, Bytes.compareTo(CellUtil.cloneValue(scanCell), - CellUtil.cloneValue(getCell))); + assertEquals(0, Bytes.compareTo(CellUtil.cloneRow(scanCell), CellUtil.cloneRow(getCell))); + assertEquals(0, + Bytes.compareTo(CellUtil.cloneFamily(scanCell), CellUtil.cloneFamily(getCell))); + assertEquals(0, + Bytes.compareTo(CellUtil.cloneQualifier(scanCell), CellUtil.cloneQualifier(getCell))); + assertEquals(0, + Bytes.compareTo(CellUtil.cloneValue(scanCell), CellUtil.cloneValue(getCell))); } } } @@ -1908,7 +1868,7 @@ public void testSmallScan() throws Exception { // small scan Scan scan = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(HConstants.EMPTY_END_ROW, true); + .withStopRow(HConstants.EMPTY_END_ROW, true); scan.setReadType(ReadType.PREAD); scan.setCaching(2); try (ResultScanner scanner = table.getScanner(scan)) { @@ -1957,12 +1917,11 @@ public void testSuperSimpleWithReverseScan() throws Exception { put.addColumn(FAMILY, QUALIFIER, VALUE); ht.put(put); Scan scan = new Scan().withStartRow(Bytes.toBytes("0-b11111-9223372036854775807")) - .withStopRow(Bytes.toBytes("0-b11111-0000000000000000000"), true); + .withStopRow(Bytes.toBytes("0-b11111-0000000000000000000"), true); scan.setReversed(true); try (ResultScanner scanner = ht.getScanner(scan)) { Result result = scanner.next(); - assertTrue(Bytes.equals(result.getRow(), - Bytes.toBytes("0-b11111-0000000000000000008"))); + assertTrue(Bytes.equals(result.getRow(), Bytes.toBytes("0-b11111-0000000000000000008"))); } } } @@ -1972,16 +1931,12 @@ public void testFiltersWithReverseScan() throws Exception { final TableName tableName = name.getTableName(); try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { byte[][] ROWS = makeN(ROW, 10); - byte[][] QUALIFIERS = {Bytes.toBytes("col0--"), - Bytes.toBytes("col1--"), - Bytes.toBytes("col2--"), - Bytes.toBytes("col3--"), - Bytes.toBytes("col4--"), - Bytes.toBytes("col5--"), - Bytes.toBytes("col6--"), - Bytes.toBytes("col7--"), - Bytes.toBytes("col8--"), - Bytes.toBytes("col9--")}; + byte[][] QUALIFIERS = + { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), + Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), + Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), + Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), + Bytes.toBytes("col8--"), Bytes.toBytes("col9--") }; for (int i = 0; i < 10; i++) { Put put = new Put(ROWS[i]); put.addColumn(FAMILY, QUALIFIERS[i], VALUE); @@ -1990,8 +1945,8 @@ public void testFiltersWithReverseScan() throws Exception { Scan scan = new Scan(); scan.setReversed(true); scan.addFamily(FAMILY); - Filter filter = new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator("col[1-5]")); + Filter filter = + new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator("col[1-5]")); scan.setFilter(filter); try (ResultScanner scanner = ht.getScanner(scan)) { int expectedIndex = 5; @@ -1999,10 +1954,10 @@ public void testFiltersWithReverseScan() throws Exception { assertEquals(1, result.size()); Cell c = result.rawCells()[0]; assertTrue(Bytes.equals(c.getRowArray(), c.getRowOffset(), c.getRowLength(), - ROWS[expectedIndex], 0, ROWS[expectedIndex].length)); - assertTrue(Bytes.equals(c.getQualifierArray(), c.getQualifierOffset(), - c.getQualifierLength(), QUALIFIERS[expectedIndex], 0, - QUALIFIERS[expectedIndex].length)); + ROWS[expectedIndex], 0, ROWS[expectedIndex].length)); + assertTrue( + Bytes.equals(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength(), + QUALIFIERS[expectedIndex], 0, QUALIFIERS[expectedIndex].length)); expectedIndex--; } assertEquals(0, expectedIndex); @@ -2015,16 +1970,12 @@ public void testKeyOnlyFilterWithReverseScan() throws Exception { final TableName tableName = name.getTableName(); try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { byte[][] ROWS = makeN(ROW, 10); - byte[][] QUALIFIERS = {Bytes.toBytes("col0--"), - Bytes.toBytes("col1--"), - Bytes.toBytes("col2--"), - Bytes.toBytes("col3--"), - Bytes.toBytes("col4--"), - Bytes.toBytes("col5--"), - Bytes.toBytes("col6--"), - Bytes.toBytes("col7--"), - Bytes.toBytes("col8--"), - Bytes.toBytes("col9--")}; + byte[][] QUALIFIERS = + { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), + Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), + Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), + Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), + Bytes.toBytes("col8--"), Bytes.toBytes("col9--") }; for (int i = 0; i < 10; i++) { Put put = new Put(ROWS[i]); put.addColumn(FAMILY, QUALIFIERS[i], VALUE); @@ -2132,7 +2083,7 @@ public void testNullWithReverseScan() throws Exception { // Use a new table try (Table ht = - TEST_UTIL.createTable(TableName.valueOf(name.getTableName().toString() + "2"), FAMILY)) { + TEST_UTIL.createTable(TableName.valueOf(name.getTableName().toString() + "2"), FAMILY)) { // Empty qualifier, byte[0] instead of null (should work) Put put = new Put(ROW); put.addColumn(FAMILY, HConstants.EMPTY_BYTE_ARRAY, VALUE); @@ -2179,8 +2130,8 @@ public void testDeletesWithReverseScan() throws Exception { scan.addFamily(FAMILIES[0]); scan.readVersions(Integer.MAX_VALUE); Result result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[]{ts[1]}, - new byte[][]{VALUES[1]}, 0, 0); + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[] { ts[1] }, + new byte[][] { VALUES[1] }, 0, 0); // Test delete latest version put = new Put(ROW); @@ -2201,8 +2152,8 @@ public void testDeletesWithReverseScan() throws Exception { scan.addColumn(FAMILIES[0], QUALIFIER); scan.readVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[]{ts[1], - ts[2], ts[3]}, new byte[][]{VALUES[1], VALUES[2], VALUES[3]}, 0, 2); + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[] { ts[1], ts[2], ts[3] }, + new byte[][] { VALUES[1], VALUES[2], VALUES[3] }, 0, 2); // Test for HBASE-1847 delete = new Delete(ROW); @@ -2230,8 +2181,8 @@ public void testDeletesWithReverseScan() throws Exception { scan.addFamily(FAMILIES[0]); scan.readVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[]{ts[1], - ts[2], ts[3]}, new byte[][]{VALUES[1], VALUES[2], VALUES[3]}, 0, 2); + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[] { ts[1], ts[2], ts[3] }, + new byte[][] { VALUES[1], VALUES[2], VALUES[3] }, 0, 2); // Test deleting an entire family from one row but not the other various // ways @@ -2278,8 +2229,8 @@ public void testDeletesWithReverseScan() throws Exception { scan.readVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); assertEquals("Expected 2 keys but received " + result.size(), 2, result.size()); - assertNResult(result, ROWS[0], FAMILIES[1], QUALIFIER, new long[]{ts[0], - ts[1]}, new byte[][]{VALUES[0], VALUES[1]}, 0, 1); + assertNResult(result, ROWS[0], FAMILIES[1], QUALIFIER, new long[] { ts[0], ts[1] }, + new byte[][] { VALUES[0], VALUES[1] }, 0, 1); scan = new Scan().withStartRow(ROWS[1]); scan.setReversed(true); @@ -2296,8 +2247,8 @@ public void testDeletesWithReverseScan() throws Exception { scan.readVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); assertEquals(1, result.size()); - assertNResult(result, ROWS[2], FAMILIES[2], QUALIFIER, - new long[]{ts[2]}, new byte[][]{VALUES[2]}, 0, 0); + assertNResult(result, ROWS[2], FAMILIES[2], QUALIFIER, new long[] { ts[2] }, + new byte[][] { VALUES[2] }, 0, 0); // Test if we delete the family first in one row (HBASE-1541) @@ -2343,12 +2294,10 @@ public void testReversedScanUnderMultiRegions() throws Exception { final TableName tableName = name.getTableName(); byte[] maxByteArray = ConnectionUtils.MAX_BYTE_ARRAY; byte[][] splitRows = new byte[][] { Bytes.toBytes("005"), - Bytes.add(Bytes.toBytes("005"), Bytes.multiple(maxByteArray, 16)), - Bytes.toBytes("006"), - Bytes.add(Bytes.toBytes("006"), Bytes.multiple(maxByteArray, 8)), - Bytes.toBytes("007"), - Bytes.add(Bytes.toBytes("007"), Bytes.multiple(maxByteArray, 4)), - Bytes.toBytes("008"), Bytes.multiple(maxByteArray, 2) }; + Bytes.add(Bytes.toBytes("005"), Bytes.multiple(maxByteArray, 16)), Bytes.toBytes("006"), + Bytes.add(Bytes.toBytes("006"), Bytes.multiple(maxByteArray, 8)), Bytes.toBytes("007"), + Bytes.add(Bytes.toBytes("007"), Bytes.multiple(maxByteArray, 4)), Bytes.toBytes("008"), + Bytes.multiple(maxByteArray, 2) }; try (Table table = TEST_UTIL.createTable(tableName, FAMILY, splitRows)) { TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); @@ -2384,9 +2333,9 @@ public void testReversedScanUnderMultiRegions() throws Exception { count++; byte[] thisRow = r.getRow(); if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) - + ",this row=" + Bytes.toString(thisRow), - Bytes.compareTo(thisRow, lastRow) < 0); + assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" + + Bytes.toString(thisRow), + Bytes.compareTo(thisRow, lastRow) < 0); } lastRow = thisRow; } @@ -2402,9 +2351,8 @@ public void testReversedScanUnderMultiRegions() throws Exception { public void testSmallReversedScanUnderMultiRegions() throws Exception { // Test Initialization. final TableName tableName = name.getTableName(); - byte[][] splitRows = new byte[][]{ - Bytes.toBytes("000"), Bytes.toBytes("002"), Bytes.toBytes("004"), - Bytes.toBytes("006"), Bytes.toBytes("008"), Bytes.toBytes("010")}; + byte[][] splitRows = new byte[][] { Bytes.toBytes("000"), Bytes.toBytes("002"), + Bytes.toBytes("004"), Bytes.toBytes("006"), Bytes.toBytes("008"), Bytes.toBytes("010") }; try (Table table = TEST_UTIL.createTable(tableName, FAMILY, splitRows)) { TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); @@ -2452,9 +2400,9 @@ private void reverseScanTest(Table table, ReadType readType) throws IOException count++; byte[] thisRow = r.getRow(); if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) - + ",this row=" + Bytes.toString(thisRow), - Bytes.compareTo(thisRow, lastRow) < 0); + assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" + + Bytes.toString(thisRow), + Bytes.compareTo(thisRow, lastRow) < 0); } lastRow = thisRow; } @@ -2473,9 +2421,9 @@ private void reverseScanTest(Table table, ReadType readType) throws IOException count++; byte[] thisRow = r.getRow(); if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) - + ",this row=" + Bytes.toString(thisRow), - Bytes.compareTo(thisRow, lastRow) < 0); + assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" + + Bytes.toString(thisRow), + Bytes.compareTo(thisRow, lastRow) < 0); } lastRow = thisRow; } @@ -2495,9 +2443,9 @@ private void reverseScanTest(Table table, ReadType readType) throws IOException count++; byte[] thisRow = r.getRow(); if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) - + ",this row=" + Bytes.toString(thisRow), - Bytes.compareTo(thisRow, lastRow) < 0); + assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" + + Bytes.toString(thisRow), + Bytes.compareTo(thisRow, lastRow) < 0); } lastRow = thisRow; } @@ -2516,9 +2464,9 @@ private void reverseScanTest(Table table, ReadType readType) throws IOException count++; byte[] thisRow = r.getRow(); if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) - + ",this row=" + Bytes.toString(thisRow), - Bytes.compareTo(thisRow, lastRow) < 0); + assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" + + Bytes.toString(thisRow), + Bytes.compareTo(thisRow, lastRow) < 0); } lastRow = thisRow; } @@ -2537,9 +2485,9 @@ private void reverseScanTest(Table table, ReadType readType) throws IOException count++; byte[] thisRow = r.getRow(); if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) - + ",this row=" + Bytes.toString(thisRow), - Bytes.compareTo(thisRow, lastRow) < 0); + assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" + + Bytes.toString(thisRow), + Bytes.compareTo(thisRow, lastRow) < 0); } lastRow = thisRow; } @@ -2559,9 +2507,9 @@ private void reverseScanTest(Table table, ReadType readType) throws IOException count++; byte[] thisRow = r.getRow(); if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) - + ",this row=" + Bytes.toString(thisRow), - Bytes.compareTo(thisRow, lastRow) < 0); + assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" + + Bytes.toString(thisRow), + Bytes.compareTo(thisRow, lastRow) < 0); } lastRow = thisRow; } @@ -2587,8 +2535,8 @@ public void testFilterAllRecords() throws IOException { public void testCellSizeLimit() throws IOException { final TableName tableName = name.getTableName(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setValue(HRegion.HBASE_MAX_CELL_SIZE_KEY, Integer.toString(10 * 1024)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + .setValue(HRegion.HBASE_MAX_CELL_SIZE_KEY, Integer.toString(10 * 1024)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); try (Admin admin = TEST_UTIL.getAdmin()) { admin.createTable(tableDescriptor); } @@ -2599,7 +2547,7 @@ public void testCellSizeLimit() throws IOException { } // Will succeed try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { - t.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, new byte[9*1024])); + t.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, new byte[9 * 1024])); } // Will fail try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { @@ -2622,8 +2570,8 @@ public void testCellSizeLimit() throws IOException { public void testCellSizeNoLimit() throws IOException { final TableName tableName = name.getTableName(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setValue(HRegion.HBASE_MAX_CELL_SIZE_KEY, Integer.toString(0)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + .setValue(HRegion.HBASE_MAX_CELL_SIZE_KEY, Integer.toString(0)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); try (Admin admin = TEST_UTIL.getAdmin()) { admin.createTable(tableDescriptor); @@ -2631,8 +2579,8 @@ public void testCellSizeNoLimit() throws IOException { // Will succeed try (Table ht = TEST_UTIL.getConnection().getTable(tableName)) { - ht.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, new byte[HRegion.DEFAULT_MAX_CELL_SIZE - - 1024])); + ht.put( + new Put(ROW).addColumn(FAMILY, QUALIFIER, new byte[HRegion.DEFAULT_MAX_CELL_SIZE - 1024])); ht.append(new Append(ROW).addColumn(FAMILY, QUALIFIER, new byte[1024 + 1])); } } @@ -2642,7 +2590,7 @@ public void testDeleteSpecifiedVersionOfSpecifiedColumn() throws Exception { final TableName tableName = name.getTableName(); byte[][] VALUES = makeN(VALUE, 5); - long[] ts = {1000, 2000, 3000, 4000, 5000}; + long[] ts = { 1000, 2000, 3000, 4000, 5000 }; try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 5)) { @@ -2663,8 +2611,8 @@ public void testDeleteSpecifiedVersionOfSpecifiedColumn() throws Exception { get.readVersions(Integer.MAX_VALUE); Result result = ht.get(get); // verify version 1000,2000,4000 remains for column FAMILY:QUALIFIER - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[0], ts[1], ts[3]}, new byte[][]{ - VALUES[0], VALUES[1], VALUES[3]}, 0, 2); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[1], ts[3] }, + new byte[][] { VALUES[0], VALUES[1], VALUES[3] }, 0, 2); delete = new Delete(ROW); // Delete a version 5000 of column FAMILY:QUALIFIER which didn't exist @@ -2676,8 +2624,8 @@ public void testDeleteSpecifiedVersionOfSpecifiedColumn() throws Exception { get.readVersions(Integer.MAX_VALUE); result = ht.get(get); // verify version 1000,2000,4000 remains for column FAMILY:QUALIFIER - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[0], ts[1], ts[3]}, new byte[][]{ - VALUES[0], VALUES[1], VALUES[3]}, 0, 2); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[1], ts[3] }, + new byte[][] { VALUES[0], VALUES[1], VALUES[3] }, 0, 2); } } @@ -2685,7 +2633,7 @@ public void testDeleteSpecifiedVersionOfSpecifiedColumn() throws Exception { public void testDeleteLatestVersionOfSpecifiedColumn() throws Exception { final TableName tableName = name.getTableName(); byte[][] VALUES = makeN(VALUE, 5); - long[] ts = {1000, 2000, 3000, 4000, 5000}; + long[] ts = { 1000, 2000, 3000, 4000, 5000 }; try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 5)) { Put put = new Put(ROW); // Put version 1000,2000,3000,4000 of column FAMILY:QUALIFIER @@ -2704,8 +2652,8 @@ public void testDeleteLatestVersionOfSpecifiedColumn() throws Exception { get.readVersions(Integer.MAX_VALUE); Result result = ht.get(get); // verify version 1000,2000,3000 remains for column FAMILY:QUALIFIER - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[0], ts[1], ts[2]}, new byte[][]{ - VALUES[0], VALUES[1], VALUES[2]}, 0, 2); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[1], ts[2] }, + new byte[][] { VALUES[0], VALUES[1], VALUES[2] }, 0, 2); delete = new Delete(ROW); // Delete two latest version of column FAMILY:QUALIFIER @@ -2718,8 +2666,8 @@ public void testDeleteLatestVersionOfSpecifiedColumn() throws Exception { get.readVersions(Integer.MAX_VALUE); result = ht.get(get); // verify version 1000 remains for column FAMILY:QUALIFIER - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[0]}, new byte[][]{VALUES[0]}, - 0, 0); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0] }, + new byte[][] { VALUES[0] }, 0, 0); put = new Put(ROW); // Put a version 5000 of column FAMILY:QUALIFIER @@ -2731,8 +2679,8 @@ public void testDeleteLatestVersionOfSpecifiedColumn() throws Exception { get.readVersions(Integer.MAX_VALUE); result = ht.get(get); // verify version 1000,5000 remains for column FAMILY:QUALIFIER - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[0], ts[4]}, new byte[][]{ - VALUES[0], VALUES[4]}, 0, 1); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[4] }, + new byte[][] { VALUES[0], VALUES[4] }, 0, 1); } } @@ -2746,7 +2694,7 @@ public void testReadWithFilter() throws Exception { byte[] VALUEA = Bytes.toBytes("value-a"); byte[] VALUEB = Bytes.toBytes("value-b"); - long[] ts = {1000, 2000, 3000, 4000}; + long[] ts = { 1000, 2000, 3000, 4000 }; Put put = new Put(ROW); // Put version 1000,2000,3000,4000 of column FAMILY:QUALIFIER @@ -2759,69 +2707,60 @@ public void testReadWithFilter() throws Exception { } table.put(put); - Scan scan = - new Scan().setFilter(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("value-a"))) - .readVersions(3); + Scan scan = new Scan() + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(3); ResultScanner scanner = table.getScanner(scan); Result result = scanner.next(); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, - 0); - - Get get = - new Get(ROW) - .setFilter(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("value-a"))) - .readVersions(3); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); + + Get get = new Get(ROW) + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(3); result = table.get(get); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, - 0); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); // Test with max versions 1, it should still read ts[1] - scan = - new Scan().setFilter(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("value-a"))) - .readVersions(1); + scan = new Scan() + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(1); scanner = table.getScanner(scan); result = scanner.next(); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, - 0); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); // Test with max versions 1, it should still read ts[1] - get = - new Get(ROW) - .setFilter(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("value-a"))) - .readVersions(1); + get = new Get(ROW) + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(1); result = table.get(get); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, - 0); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); // Test with max versions 5, it should still read ts[1] - scan = - new Scan().setFilter(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("value-a"))) - .readVersions(5); + scan = new Scan() + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(5); scanner = table.getScanner(scan); result = scanner.next(); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, - 0); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); // Test with max versions 5, it should still read ts[1] - get = - new Get(ROW) - .setFilter(new ValueFilter(CompareOperator.EQUAL, - new SubstringComparator("value-a"))) - .readVersions(5); + get = new Get(ROW) + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(5); result = table.get(get); // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[]{ts[1]}, new byte[][]{VALUEA}, 0, - 0); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); } } @@ -2884,8 +2823,7 @@ public void testCreateTableWithZeroRegionReplicas() throws Exception { TableName tableName = name.getTableName(); TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))) - .setRegionReplication(0) - .build(); + .setRegionReplication(0).build(); TEST_UTIL.getAdmin().createTable(desc); } @@ -2894,13 +2832,11 @@ public void testCreateTableWithZeroRegionReplicas() throws Exception { public void testModifyTableWithZeroRegionReplicas() throws Exception { TableName tableName = name.getTableName(); TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))).build(); TEST_UTIL.getAdmin().createTable(desc); - TableDescriptor newDesc = TableDescriptorBuilder.newBuilder(desc) - .setRegionReplication(0) - .build(); + TableDescriptor newDesc = + TableDescriptorBuilder.newBuilder(desc).setRegionReplication(0).build(); TEST_UTIL.getAdmin().modifyTable(newDesc); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java index 9990f8763302..37810a82f46d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ * Do some ops and prove that client and server can work w/o codecs; that we can pb all the time. * Good for third-party clients or simple scripts that want to talk direct to hbase. */ -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestFromClientSideNoCodec { @ClassRule @@ -75,13 +75,13 @@ public static void tearDownAfterClass() throws Exception { @Test public void testBasics() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); - final byte [][] fs = new byte[][] {Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), - Bytes.toBytes("cf3") }; + final byte[][] fs = + new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; Table ht = TEST_UTIL.createTable(tableName, fs); // Check put and get. - final byte [] row = Bytes.toBytes("row"); + final byte[] row = Bytes.toBytes("row"); Put p = new Put(row); - for (byte [] f: fs) { + for (byte[] f : fs) { p.addColumn(f, f, f); } ht.put(p); @@ -89,10 +89,9 @@ public void testBasics() throws IOException { int i = 0; for (CellScanner cellScanner = r.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); - byte [] f = fs[i++]; - assertTrue(Bytes.toString(f), - Bytes.equals(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), - f, 0, f.length)); + byte[] f = fs[i++]; + assertTrue(Bytes.toString(f), Bytes.equals(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength(), f, 0, f.length)); } // Check getRowOrBefore byte[] f = fs[0]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java index c4932415b7c7..c215df8b6a70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.java index 48ad5755693b..7b6a3ef1c6d0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java index cd93b3870742..2ace2f97f945 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java @@ -32,8 +32,8 @@ /** * Test all client operations with a coprocessor that just implements the default flush/compact/scan * policy. - * - *

      Base class was split into three so this class got split into three. See below for other parts. + *

      + * Base class was split into three so this class got split into three. See below for other parts. * @see TestFromClientSide4 * @see TestFromClientSide5 */ @@ -41,16 +41,14 @@ public class TestFromClientSideWithCoprocessor extends TestFromClientSide { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSideWithCoprocessor.class); + HBaseClassTestRule.forClass(TestFromClientSideWithCoprocessor.class); // Override the parameters from the parent class. We just want to run it for the default // param combination. @Parameterized.Parameters public static Collection parameters() { - return Arrays.asList(new Object[][] { - { MasterRegistry.class, 1}, - { ZKConnectionRegistry.class, 1} - }); + return Arrays + .asList(new Object[][] { { MasterRegistry.class, 1 }, { ZKConnectionRegistry.class, 1 } }); } @AfterClass @@ -60,6 +58,6 @@ public static void tearDownAfterClass() throws Exception { public TestFromClientSideWithCoprocessor(Class registry, int numHedgedReqs) throws Exception { initialize(registry, numHedgedReqs, NoOpScanPolicyObserver.class, - MultiRowMutationEndpoint.class); + MultiRowMutationEndpoint.class); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor4.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor4.java index 4d02d185d065..44e4ff8c8c42 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor4.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor4.java @@ -32,23 +32,21 @@ /** * Test all client operations with a coprocessor that just implements the default flush/compact/scan * policy. - * - *

      Base class was split into three so this class got split into three. + *

      + * Base class was split into three so this class got split into three. */ @Category({ LargeTests.class, ClientTests.class }) public class TestFromClientSideWithCoprocessor4 extends TestFromClientSide4 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSideWithCoprocessor4.class); + HBaseClassTestRule.forClass(TestFromClientSideWithCoprocessor4.class); // Override the parameters from the parent class. We just want to run it for the default // param combination. @Parameterized.Parameters public static Collection parameters() { - return Arrays.asList(new Object[][] { - { MasterRegistry.class, 1}, - { ZKConnectionRegistry.class, 1} - }); + return Arrays + .asList(new Object[][] { { MasterRegistry.class, 1 }, { ZKConnectionRegistry.class, 1 } }); } @AfterClass @@ -58,6 +56,6 @@ public static void tearDownAfterClass() throws Exception { public TestFromClientSideWithCoprocessor4(Class registry, int numHedgedReqs) throws Exception { initialize(registry, numHedgedReqs, NoOpScanPolicyObserver.class, - MultiRowMutationEndpoint.class); + MultiRowMutationEndpoint.class); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor5.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor5.java index 46f80d3f847c..428536406fa0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor5.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor5.java @@ -37,16 +37,14 @@ public class TestFromClientSideWithCoprocessor5 extends TestFromClientSide5 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSideWithCoprocessor5.class); + HBaseClassTestRule.forClass(TestFromClientSideWithCoprocessor5.class); // Override the parameters from the parent class. We just want to run it for the default // param combination. @Parameterized.Parameters public static Collection parameters() { - return Arrays.asList(new Object[][] { - { MasterRegistry.class, 1}, - { ZKConnectionRegistry.class, 1} - }); + return Arrays + .asList(new Object[][] { { MasterRegistry.class, 1 }, { ZKConnectionRegistry.class, 1 } }); } @AfterClass @@ -56,6 +54,6 @@ public static void tearDownAfterClass() throws Exception { public TestFromClientSideWithCoprocessor5(Class registry, int numHedgedReqs) throws Exception { initialize(registry, numHedgedReqs, NoOpScanPolicyObserver.class, - MultiRowMutationEndpoint.class); + MultiRowMutationEndpoint.class); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java index a6f3e2238ded..5ce34077229a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -115,14 +115,14 @@ public static void tearDown() throws Exception { private GetProcedureResultResponse.State getState(long procId) throws MasterNotRunningException, IOException, ServiceException { GetProcedureResultResponse resp = UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices() - .getProcedureResult(null, GetProcedureResultRequest.newBuilder().setProcId(procId).build()); + .getProcedureResult(null, GetProcedureResultRequest.newBuilder().setProcId(procId).build()); return resp.getState(); } @Test public void testRace() throws Exception { ProcedureExecutor executor = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); DummyProcedure p = new DummyProcedure(); long procId = executor.submitProcedure(p); p.failureSet.await(); @@ -137,8 +137,8 @@ public boolean evaluate() throws Exception { @Override public String explainFailure() throws Exception { - return "Procedure pid=" + procId + " is still in " + getState(procId) + - " state, expected " + GetProcedureResultResponse.State.FINISHED; + return "Procedure pid=" + procId + " is still in " + getState(procId) + " state, expected " + + GetProcedureResultResponse.State.FINISHED; } }); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanColumnsWithNewVersionBehavior.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanColumnsWithNewVersionBehavior.java index cb4b260434ab..4212941c80c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanColumnsWithNewVersionBehavior.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanColumnsWithNewVersionBehavior.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,13 @@ import static org.junit.Assert.assertArrayEquals; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.AfterClass; @@ -33,9 +35,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -import java.util.List; -import java.util.ArrayList; - /** * Testcase for HBASE-21032, where use the wrong readType from a Scan instance which is actually a * get scan and cause returning only 1 cell per rpc call. @@ -45,7 +44,7 @@ public class TestGetScanColumnsWithNewVersionBehavior { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGetScanColumnsWithNewVersionBehavior.class); + HBaseClassTestRule.forClass(TestGetScanColumnsWithNewVersionBehavior.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLE = TableName.valueOf("table"); @@ -59,14 +58,10 @@ public class TestGetScanColumnsWithNewVersionBehavior { @BeforeClass public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(1); - ColumnFamilyDescriptor cd = ColumnFamilyDescriptorBuilder - .newBuilder(CF) - .setNewVersionBehavior(true) - .build(); - TEST_UTIL.createTable(TableDescriptorBuilder - .newBuilder(TABLE) - .setColumnFamily(cd) - .build(), null); + ColumnFamilyDescriptor cd = + ColumnFamilyDescriptorBuilder.newBuilder(CF).setNewVersionBehavior(true).build(); + TEST_UTIL.createTable(TableDescriptorBuilder.newBuilder(TABLE).setColumnFamily(cd).build(), + null); } @AfterClass @@ -77,7 +72,7 @@ public static void tearDown() throws Exception { @Test public void test() throws IOException { try (Table t = TEST_UTIL.getConnection().getTable(TABLE)) { - Cell [] expected = new Cell[2]; + Cell[] expected = new Cell[2]; expected[0] = new KeyValue(ROW, CF, COLA, TS, COLA); expected[1] = new KeyValue(ROW, CF, COLC, TS, COLC); @@ -101,7 +96,7 @@ public void test() throws IOException { ResultScanner scanner = t.getScanner(scan); List scanResult = new ArrayList(); for (Result result = scanner.next(); (result != null); result = scanner.next()) { - scanResult.addAll(result.listCells()); + scanResult.addAll(result.listCells()); } assertArrayEquals(expected, scanResult.toArray(new Cell[scanResult.size()])); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanPartialResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanPartialResult.java index 222cd12ee503..54943559b94d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanPartialResult.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetScanPartialResult.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ public class TestGetScanPartialResult { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGetScanPartialResult.class); + HBaseClassTestRule.forClass(TestGetScanPartialResult.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLE = TableName.valueOf("table"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java index 4d3cd393ef02..c40151092558 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Arrays; import java.util.HashMap; @@ -66,6 +67,7 @@ import org.junit.runners.Parameterized.Parameters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; /** @@ -84,7 +86,8 @@ public class TestHbck { @Rule public TestName name = new TestName(); - @SuppressWarnings("checkstyle:VisibilityModifier") @Parameter + @SuppressWarnings("checkstyle:VisibilityModifier") + @Parameter public boolean async; private static final TableName TABLE_NAME = TableName.valueOf(TestHbck.class.getSimpleName()); @@ -178,7 +181,7 @@ public void testSetTableStateInMeta() throws Exception { // Method {@link Hbck#setTableStateInMeta()} returns previous state, which in this case // will be DISABLED TableState prevState = - hbck.setTableStateInMeta(new TableState(TABLE_NAME, TableState.State.ENABLED)); + hbck.setTableStateInMeta(new TableState(TABLE_NAME, TableState.State.ENABLED)); assertTrue("Incorrect previous state! expeced=DISABLED, found=" + prevState.getState(), prevState.isDisabled()); } @@ -219,51 +222,50 @@ public void testAssigns() throws Exception { List regions = admin.getRegions(TABLE_NAME); for (RegionInfo ri : regions) { RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getRegionState(ri.getEncodedName()); + .getRegionStates().getRegionState(ri.getEncodedName()); LOG.info("RS: {}", rs.toString()); } - List pids = - hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList())); + List pids = hbck + .unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList())); waitOnPids(pids); // Rerun the unassign. Should fail for all Regions since they already unassigned; failed // unassign will manifest as all pids being -1 (ever since HBASE-24885). - pids = - hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList())); + pids = hbck + .unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList())); waitOnPids(pids); - for (long pid: pids) { + for (long pid : pids) { assertEquals(Procedure.NO_PROC_ID, pid); } // If we pass override, then we should be able to unassign EVEN THOUGH Regions already // unassigned.... makes for a mess but operator might want to do this at an extreme when // doing fixup of broke cluster. - pids = - hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()), - true); + pids = hbck.unassigns( + regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()), true); waitOnPids(pids); - for (long pid: pids) { + for (long pid : pids) { assertNotEquals(Procedure.NO_PROC_ID, pid); } // Clean-up by bypassing all the unassigns we just made so tests can continue. hbck.bypassProcedure(pids, 10000, true, true); for (RegionInfo ri : regions) { RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getRegionState(ri.getEncodedName()); + .getRegionStates().getRegionState(ri.getEncodedName()); LOG.info("RS: {}", rs.toString()); assertTrue(rs.toString(), rs.isClosed()); } pids = - hbck.assigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList())); + hbck.assigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList())); waitOnPids(pids); // Rerun the assign. Should fail for all Regions since they already assigned; failed // assign will manifest as all pids being -1 (ever since HBASE-24885). pids = - hbck.assigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList())); - for (long pid: pids) { + hbck.assigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList())); + for (long pid : pids) { assertEquals(Procedure.NO_PROC_ID, pid); } for (RegionInfo ri : regions) { RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getRegionState(ri.getEncodedName()); + .getRegionStates().getRegionState(ri.getEncodedName()); LOG.info("RS: {}", rs.toString()); assertTrue(rs.toString(), rs.isOpened()); } @@ -283,13 +285,11 @@ public void testScheduleSCP() throws Exception { true); ServerName serverName = testRs.getServerName(); Hbck hbck = getHbck(); - List pids = - hbck.scheduleServerCrashProcedures(Arrays.asList(serverName)); + List pids = hbck.scheduleServerCrashProcedures(Arrays.asList(serverName)); assertTrue(pids.get(0) > 0); LOG.info("pid is {}", pids.get(0)); - List newPids = - hbck.scheduleServerCrashProcedures(Arrays.asList(serverName)); + List newPids = hbck.scheduleServerCrashProcedures(Arrays.asList(serverName)); assertTrue(newPids.get(0) < 0); LOG.info("pid is {}", newPids.get(0)); waitOnPids(pids); @@ -311,7 +311,8 @@ public void testRunHbckChore() throws Exception { public static class FailingSplitAfterMetaUpdatedMasterObserver implements MasterCoprocessor, MasterObserver { - @SuppressWarnings("checkstyle:VisibilityModifier") public volatile CountDownLatch latch; + @SuppressWarnings("checkstyle:VisibilityModifier") + public volatile CountDownLatch latch; @Override public void start(CoprocessorEnvironment e) throws IOException { @@ -338,7 +339,8 @@ public void resetLatch() { public static class FailingMergeAfterMetaUpdatedMasterObserver implements MasterCoprocessor, MasterObserver { - @SuppressWarnings("checkstyle:VisibilityModifier") public volatile CountDownLatch latch; + @SuppressWarnings("checkstyle:VisibilityModifier") + public volatile CountDownLatch latch; @Override public void start(CoprocessorEnvironment e) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java index 2ea885274720..cb1ae878ed0b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ import org.junit.rules.TestName; import org.slf4j.Logger; -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestIllegalTableDescriptor { @ClassRule @@ -56,7 +56,7 @@ public class TestIllegalTableDescriptor { protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); @Rule public TestName name = new TestName(); @@ -84,7 +84,7 @@ public static void tearDownAfterClass() throws Exception { @Test public void testIllegalTableDescriptor() throws Exception { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY); // create table with 0 families @@ -142,7 +142,7 @@ public void testIllegalTableDescriptor() throws Exception { checkTableIsLegal(builder.modifyColumnFamily(cfBuilder.build()).build()); // HBASE-13776 Setting illegal versions for HColumnDescriptor - // does not throw IllegalArgumentException + // does not throw IllegalArgumentException // finally, minVersions must be less than or equal to maxVersions cfBuilder.setMaxVersions(4); cfBuilder.setMinVersions(5); @@ -199,7 +199,7 @@ private void checkTableIsIllegal(TableDescriptor tableDescriptor) throws IOExcep try { admin.createTable(tableDescriptor); fail(); - } catch(Exception ex) { + } catch (Exception ex) { // should throw ex } assertFalse(admin.tableExists(tableDescriptor.getTableName())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java index 77a2a0c4423e..c08ddd0b63ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestIncreaseMetaReplicaThroughConfig extends MetaWithReplicasTestBa @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncreaseMetaReplicaThroughConfig.class); + HBaseClassTestRule.forClass(TestIncreaseMetaReplicaThroughConfig.class); @BeforeClass public static void setUp() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java index 368750e2856b..d0c74d136acb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementFromClientSideWithCoprocessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,11 +28,9 @@ import org.junit.experimental.categories.Category; /** - * Test all {@link Increment} client operations with a coprocessor that - * just implements the default flush/compact/scan policy. - * - * This test takes a long time. The test it derives from is parameterized so we run through both - * options of the test. + * Test all {@link Increment} client operations with a coprocessor that just implements the default + * flush/compact/scan policy. This test takes a long time. The test it derives from is parameterized + * so we run through both options of the test. */ @Category(LargeTests.class) public class TestIncrementFromClientSideWithCoprocessor extends TestIncrementsFromClientSide { @@ -45,6 +43,6 @@ public class TestIncrementFromClientSideWithCoprocessor extends TestIncrementsFr public void before() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName()); + MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java index fdfe2df6f42e..35fb85791039 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,13 +60,11 @@ import org.slf4j.LoggerFactory; /** - * Run Increment tests that use the HBase clients; {@link TableBuilder}. - * - * Test is parameterized to run the slow and fast increment code paths. If fast, in the @before, we - * do a rolling restart of the single regionserver so that it can pick up the go fast configuration. - * Doing it this way should be faster than starting/stopping a cluster per test. - * - * Test takes a long time because spin up a cluster between each run -- ugh. + * Run Increment tests that use the HBase clients; {@link TableBuilder}. Test is parameterized to + * run the slow and fast increment code paths. If fast, in the @before, we do a rolling restart of + * the single regionserver so that it can pick up the go fast configuration. Doing it this way + * should be faster than starting/stopping a cluster per test. Test takes a long time because spin + * up a cluster between each run -- ugh. */ @Category(LargeTests.class) public class TestIncrementsFromClientSide { @@ -77,19 +75,20 @@ public class TestIncrementsFromClientSide { final Logger LOG = LoggerFactory.getLogger(getClass()); protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static byte [] ROW = Bytes.toBytes("testRow"); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); - private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte[] ROW = Bytes.toBytes("testRow"); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); // This test depends on there being only one slave running at at a time. See the @Before // method where we do rolling restart. protected static int SLAVES = 1; - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); @BeforeClass public static void beforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - MultiRowMutationEndpoint.class.getName()); + MultiRowMutationEndpoint.class.getName()); // We need more than one region server in this test TEST_UTIL.startMiniCluster(SLAVES); } @@ -108,14 +107,11 @@ public static void afterClass() throws Exception { @Test public void testDuplicateIncrement() throws Exception { TableDescriptorBuilder builder = - TEST_UTIL.createModifyableTableDescriptor(name.getMethodName()); + TEST_UTIL.createModifyableTableDescriptor(name.getMethodName()); Map kvs = new HashMap<>(); kvs.put(SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, "2000"); builder.setCoprocessor(CoprocessorDescriptorBuilder - .newBuilder(SleepAtFirstRpcCall.class.getName()) - .setPriority(1) - .setProperties(kvs) - .build()); + .newBuilder(SleepAtFirstRpcCall.class.getName()).setPriority(1).setProperties(kvs).build()); TEST_UTIL.createTable(builder.build(), new byte[][] { ROW }).close(); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); @@ -125,7 +121,7 @@ public void testDuplicateIncrement() throws Exception { try (Connection connection = ConnectionFactory.createConnection(c); Table table = connection.getTableBuilder(TableName.valueOf(name.getMethodName()), null) - .setOperationTimeout(3 * 1000).build()) { + .setOperationTimeout(3 * 1000).build()) { Increment inc = new Increment(ROW); inc.addColumn(HBaseTestingUtil.fam1, QUALIFIER, 1); Result result = table.increment(inc); @@ -148,14 +144,11 @@ public void testDuplicateIncrement() throws Exception { @Test public void testDuplicateBatchIncrement() throws Exception { TableDescriptorBuilder builder = - TEST_UTIL.createModifyableTableDescriptor(name.getMethodName()); + TEST_UTIL.createModifyableTableDescriptor(name.getMethodName()); Map kvs = new HashMap<>(); kvs.put(SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, "2000"); builder.setCoprocessor(CoprocessorDescriptorBuilder - .newBuilder(SleepAtFirstRpcCall.class.getName()) - .setPriority(1) - .setProperties(kvs) - .build()); + .newBuilder(SleepAtFirstRpcCall.class.getName()).setPriority(1).setProperties(kvs).build()); TEST_UTIL.createTable(builder.build(), new byte[][] { ROW }).close(); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); @@ -164,8 +157,8 @@ public void testDuplicateBatchIncrement() throws Exception { c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500); try (Connection connection = ConnectionFactory.createConnection(c); - Table table = connection.getTableBuilder(TableName.valueOf(name.getMethodName()), null) - .setOperationTimeout(3 * 1000).build()) { + Table table = connection.getTableBuilder(TableName.valueOf(name.getMethodName()), null) + .setOperationTimeout(3 * 1000).build()) { Increment inc = new Increment(ROW); inc.addColumn(HBaseTestingUtil.fam1, QUALIFIER, 1); @@ -251,8 +244,8 @@ public void testBatchIncrementsWithReturnResultFalse() throws Exception { Object[] results = new Object[2]; table.batch(incs, results); assertTrue(results.length == 2); - for(Object r : results) { - Result result = (Result)r; + for (Object r : results) { + Result result = (Result) r; assertTrue(result.isEmpty()); } table.close(); @@ -262,7 +255,7 @@ public void testBatchIncrementsWithReturnResultFalse() throws Exception { public void testIncrementInvalidArguments() throws Exception { LOG.info("Starting " + this.name.getMethodName()); final TableName TABLENAME = - TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); + TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY); final byte[] COLUMN = Bytes.toBytes("column"); try { @@ -304,12 +297,11 @@ public void testIncrementOutOfOrder() throws Exception { TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName())); Table ht = TEST_UTIL.createTable(TABLENAME, FAMILY); - byte [][] QUALIFIERS = new byte [][] { - Bytes.toBytes("B"), Bytes.toBytes("A"), Bytes.toBytes("C") - }; + byte[][] QUALIFIERS = + new byte[][] { Bytes.toBytes("B"), Bytes.toBytes("A"), Bytes.toBytes("C") }; Increment inc = new Increment(ROW); - for (int i=0; i tags = PrivateCellUtil.getTags(cell); // Make sure there is only 1 tag. assertEquals(1, tags.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java index 748d896d2f28..54f55de435d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ /** * Test scan/get offset and limit settings within one row through HRegion API. */ -@Category({SmallTests.class, ClientTests.class}) +@Category({ SmallTests.class, ClientTests.class }) public class TestIntraRowPagination { @ClassRule @@ -50,20 +50,20 @@ public class TestIntraRowPagination { */ @Test public void testScanLimitAndOffset() throws Exception { - //byte [] TABLE = HTestConst.DEFAULT_TABLE_BYTES; - byte [][] ROWS = HTestConst.makeNAscii(HTestConst.DEFAULT_ROW_BYTES, 2); - byte [][] FAMILIES = HTestConst.makeNAscii(HTestConst.DEFAULT_CF_BYTES, 3); - byte [][] QUALIFIERS = HTestConst.makeNAscii(HTestConst.DEFAULT_QUALIFIER_BYTES, 10); + // byte [] TABLE = HTestConst.DEFAULT_TABLE_BYTES; + byte[][] ROWS = HTestConst.makeNAscii(HTestConst.DEFAULT_ROW_BYTES, 2); + byte[][] FAMILIES = HTestConst.makeNAscii(HTestConst.DEFAULT_CF_BYTES, 3); + byte[][] QUALIFIERS = HTestConst.makeNAscii(HTestConst.DEFAULT_QUALIFIER_BYTES, 10); TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(HTestConst.DEFAULT_TABLE_BYTES)); + TableDescriptorBuilder.newBuilder(TableName.valueOf(HTestConst.DEFAULT_TABLE_BYTES)); RegionInfo info = RegionInfoBuilder.newBuilder(HTestConst.DEFAULT_TABLE).build(); for (byte[] family : FAMILIES) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); } HRegion region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), builder.build()); + TEST_UTIL.getConfiguration(), builder.build()); try { Put put; Scan scan; @@ -101,7 +101,7 @@ public void testScanLimitAndOffset() throws Exception { } result = Result.create(kvListScan); TestScannersFromClientSide.verifyResult(result, kvListExp, toLog, - "Testing scan with storeOffset and storeLimit"); + "Testing scan with storeOffset and storeLimit"); } finally { HBaseTestingUtil.closeRegionAndWAL(region); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestInvalidMutationDurabilityException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestInvalidMutationDurabilityException.java index f5142cd62810..4f69f3ccc2c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestInvalidMutationDurabilityException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestInvalidMutationDurabilityException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,9 +57,11 @@ public static void setUp() throws Exception { UTIL.startMiniCluster(); UTIL.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TABLE_NOT_REPLICATE) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF).build()).build()); - UTIL.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TABLE_NEED_REPLICATE) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build()); + UTIL.getAdmin() + .createTable(TableDescriptorBuilder.newBuilder(TABLE_NEED_REPLICATE) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .build()); tableNotReplicate = UTIL.getConnection().getTable(TABLE_NOT_REPLICATE); tableNeedReplicate = UTIL.getConnection().getTable(TABLE_NEED_REPLICATE); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java index cecd87fa3657..395b8bc57fab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLimitedScanWithFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java index 20ee1993872e..0a1dfb386a7f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMalformedCellFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,7 +66,7 @@ public class TestMalformedCellFromClient { private static final Logger LOG = LoggerFactory.getLogger(TestMalformedCellFromClient.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMalformedCellFromClient.class); + HBaseClassTestRule.forClass(TestMalformedCellFromClient.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final byte[] FAMILY = Bytes.toBytes("testFamily"); @@ -83,8 +83,8 @@ public static void setUpBeforeClass() throws Exception { @Before public void before() throws Exception { TableDescriptor desc = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setValue(HRegion.HBASE_MAX_CELL_SIZE_KEY, String.valueOf(CELL_SIZE)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setValue(HRegion.HBASE_MAX_CELL_SIZE_KEY, String.valueOf(CELL_SIZE)).build(); TEST_UTIL.getConnection().getAdmin().createTable(desc); } @@ -101,10 +101,9 @@ public static void tearDownAfterClass() throws Exception { } /** - * The purpose of this ut is to check the consistency between the exception and results. - * If the RetriesExhaustedWithDetailsException contains the whole batch, - * each result should be of IOE. Otherwise, the row operation which is not in the exception - * should have a true result. + * The purpose of this ut is to check the consistency between the exception and results. If the + * RetriesExhaustedWithDetailsException contains the whole batch, each result should be of IOE. + * Otherwise, the row operation which is not in the exception should have a true result. */ @Test public void testRegionException() throws InterruptedException, IOException { @@ -158,8 +157,8 @@ public void testRegionExceptionByAsync() throws Exception { RowMutations rm = new RowMutations(Bytes.toBytes("fail")); rm.add(new Put(rm.getRow()).addColumn(FAMILY, null, new byte[CELL_SIZE])); batches.add(rm); - try (AsyncConnection asyncConnection = ConnectionFactory - .createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { + try (AsyncConnection asyncConnection = + ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { AsyncTable table = asyncConnection.getTable(TABLE_NAME); List> results = table.batch(batches); assertEquals(2, results.size()); @@ -191,13 +190,14 @@ public void testAtomicOperations() throws Exception { // build the request HRegion r = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME).get(0); ClientProtos.MultiRequest request = - ClientProtos.MultiRequest.newBuilder(createRequest(rm, r.getRegionInfo().getRegionName())) - .addRegionAction(ClientProtos.RegionAction.newBuilder().setRegion(RequestConverter - .buildRegionSpecifier(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME, - r.getRegionInfo().getRegionName())).addAction(ClientProtos.Action.newBuilder() - .setMutation( - ProtobufUtil.toMutationNoData(ClientProtos.MutationProto.MutationType.PUT, put)))) - .build(); + ClientProtos.MultiRequest.newBuilder(createRequest(rm, r.getRegionInfo().getRegionName())) + .addRegionAction(ClientProtos.RegionAction.newBuilder() + .setRegion(RequestConverter.buildRegionSpecifier( + HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME, + r.getRegionInfo().getRegionName())) + .addAction(ClientProtos.Action.newBuilder().setMutation( + ProtobufUtil.toMutationNoData(ClientProtos.MutationProto.MutationType.PUT, put)))) + .build(); List cells = new ArrayList<>(); for (Mutation m : rm.getMutations()) { @@ -207,9 +207,9 @@ public void testAtomicOperations() throws Exception { assertEquals(3, cells.size()); HBaseRpcController controller = Mockito.mock(HBaseRpcController.class); Mockito.when(controller.cellScanner()).thenReturn(CellUtil.createCellScanner(cells)); - HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer( - TEST_UTIL.getMiniHBaseCluster() - .getServerHoldingRegion(TABLE_NAME, r.getRegionInfo().getRegionName())); + HRegionServer rs = + TEST_UTIL.getMiniHBaseCluster().getRegionServer(TEST_UTIL.getMiniHBaseCluster() + .getServerHoldingRegion(TABLE_NAME, r.getRegionInfo().getRegionName())); ClientProtos.MultiResponse response = rs.getRSRpcServices().multi(controller, request); assertEquals(2, response.getRegionActionResultCount()); @@ -227,9 +227,9 @@ public void testAtomicOperations() throws Exception { } private static ClientProtos.MultiRequest createRequest(RowMutations rm, byte[] regionName) - throws IOException { + throws IOException { ClientProtos.RegionAction.Builder builder = RequestConverter - .getRegionActionBuilderWithRegion(ClientProtos.RegionAction.newBuilder(), regionName); + .getRegionActionBuilderWithRegion(ClientProtos.RegionAction.newBuilder(), regionName); builder.setAtomic(true); ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder(); ClientProtos.MutationProto.Builder mutationBuilder = ClientProtos.MutationProto.newBuilder(); @@ -243,11 +243,11 @@ private static ClientProtos.MultiRequest createRequest(RowMutations rm, byte[] r mutateType = ClientProtos.MutationProto.MutationType.DELETE; } else { throw new DoNotRetryIOException( - "RowMutations supports only put and delete, not " + mutation.getClass().getName()); + "RowMutations supports only put and delete, not " + mutation.getClass().getName()); } mutationBuilder.clear(); ClientProtos.MutationProto mp = - ProtobufUtil.toMutationNoData(mutateType, mutation, mutationBuilder); + ProtobufUtil.toMutationNoData(mutateType, mutation, mutationBuilder); actionBuilder.clear(); actionBuilder.setMutation(mp); builder.addAction(actionBuilder.build()); @@ -258,11 +258,9 @@ private static ClientProtos.MultiRequest createRequest(RowMutations rm, byte[] r } /** - * This test depends on how regionserver process the batch ops. - * 1) group the put/delete until meeting the increment - * 2) process the batch of put/delete - * 3) process the increment - * see RSRpcServices#doNonAtomicRegionMutation + * This test depends on how regionserver process the batch ops. 1) group the put/delete until + * meeting the increment 2) process the batch of put/delete 3) process the increment see + * RSRpcServices#doNonAtomicRegionMutation */ @Test public void testNonAtomicOperations() throws InterruptedException, IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java index 1cc176e2951d..74507126e5b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java @@ -53,7 +53,7 @@ public class TestMasterRegistry { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterRegistry.class); + HBaseClassTestRule.forClass(TestMasterRegistry.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @BeforeClass @@ -122,7 +122,7 @@ public void testRegistryRPCs() throws Exception { assertEquals(registry.getClusterId().get(), activeMaster.getClusterId()); assertEquals(registry.getActiveMaster().get(), activeMaster.getServerName()); List metaLocations = - Arrays.asList(registry.getMetaRegionLocations().get().getRegionLocations()); + Arrays.asList(registry.getMetaRegionLocations().get().getRegionLocations()); List actualMetaLocations = activeMaster.getMetaLocations(); Collections.sort(metaLocations); Collections.sort(actualMetaLocations); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java index 4ad2b2de8bc6..ea2d6bd1382f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestMetaCache { @ClassRule @@ -87,8 +87,8 @@ public static void setUpBeforeClass() throws Exception { badRS = TEST_UTIL.getHBaseCluster().getRegionServer(0); assertTrue(badRS.getRSRpcServices() instanceof FakeRSRpcServices); TableDescriptor desc = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(2).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(2).build()) + .build(); TEST_UTIL.createTable(desc, null); } @@ -115,9 +115,9 @@ private void setupConnection(int retry) throws IOException { @Test public void testPreserveMetaCacheOnException() throws Exception { ((FakeRSRpcServices) badRS.getRSRpcServices()) - .setExceptionInjector(new RoundRobinExceptionInjector()); + .setExceptionInjector(new RoundRobinExceptionInjector()); setupConnection(1); - try (Table table = conn.getTable(TABLE_NAME)){ + try (Table table = conn.getTable(TABLE_NAME)) { byte[] row = Bytes.toBytes("row1"); Put put = new Put(row); @@ -166,7 +166,7 @@ public void testPreserveMetaCacheOnException() throws Exception { @Test public void testCacheClearingOnCallQueueTooBig() throws Exception { ((FakeRSRpcServices) badRS.getRSRpcServices()) - .setExceptionInjector(new CallQueueTooBigExceptionInjector()); + .setExceptionInjector(new CallQueueTooBigExceptionInjector()); setupConnection(2); Table table = conn.getTable(TABLE_NAME); byte[] row = Bytes.toBytes("row1"); @@ -206,7 +206,7 @@ public static class RegionServerWithFakeRpcServices extends HRegionServer { private FakeRSRpcServices rsRpcServices; public RegionServerWithFakeRpcServices(Configuration conf) - throws IOException, InterruptedException { + throws IOException, InterruptedException { super(conf); } @@ -235,8 +235,8 @@ public void setExceptionInjector(ExceptionInjector injector) { } @Override - public GetResponse get(final RpcController controller, - final ClientProtos.GetRequest request) throws ServiceException { + public GetResponse get(final RpcController controller, final ClientProtos.GetRequest request) + throws ServiceException { exceptions.throwOnGet(this, request); return super.get(controller, request); } @@ -258,10 +258,10 @@ public ClientProtos.ScanResponse scan(final RpcController controller, public static abstract class ExceptionInjector { protected boolean isTestTable(FakeRSRpcServices rpcServices, - HBaseProtos.RegionSpecifier regionSpec) throws ServiceException { + HBaseProtos.RegionSpecifier regionSpec) throws ServiceException { try { - return TABLE_NAME.equals( - rpcServices.getRegion(regionSpec).getTableDescriptor().getTableName()); + return TABLE_NAME + .equals(rpcServices.getRegion(regionSpec).getTableDescriptor().getTableName()); } catch (IOException ioe) { throw new ServiceException(ioe); } @@ -270,16 +270,15 @@ protected boolean isTestTable(FakeRSRpcServices rpcServices, public abstract void throwOnGet(FakeRSRpcServices rpcServices, ClientProtos.GetRequest request) throws ServiceException; - public abstract void throwOnMutate(FakeRSRpcServices rpcServices, ClientProtos.MutateRequest request) - throws ServiceException; + public abstract void throwOnMutate(FakeRSRpcServices rpcServices, + ClientProtos.MutateRequest request) throws ServiceException; - public abstract void throwOnScan(FakeRSRpcServices rpcServices, ClientProtos.ScanRequest request) - throws ServiceException; + public abstract void throwOnScan(FakeRSRpcServices rpcServices, + ClientProtos.ScanRequest request) throws ServiceException; } /** - * Rotates through the possible cache clearing and non-cache clearing exceptions - * for requests. + * Rotates through the possible cache clearing and non-cache clearing exceptions for requests. */ public static class RoundRobinExceptionInjector extends ExceptionInjector { private int numReqs = -1; @@ -308,13 +307,12 @@ public void throwOnScan(FakeRSRpcServices rpcServices, ClientProtos.ScanRequest } /** - * Throw some exceptions. Mostly throw exceptions which do not clear meta cache. - * Periodically throw NotSevingRegionException which clears the meta cache. + * Throw some exceptions. Mostly throw exceptions which do not clear meta cache. Periodically + * throw NotSevingRegionException which clears the meta cache. * @throws ServiceException */ private void throwSomeExceptions(FakeRSRpcServices rpcServices, - HBaseProtos.RegionSpecifier regionSpec) - throws ServiceException { + HBaseProtos.RegionSpecifier regionSpec) throws ServiceException { if (!isTestTable(rpcServices, regionSpec)) { return; } @@ -322,7 +320,7 @@ private void throwSomeExceptions(FakeRSRpcServices rpcServices, numReqs++; // Succeed every 5 request, throw cache clearing exceptions twice every 5 requests and throw // meta cache preserving exceptions otherwise. - if (numReqs % 5 ==0) { + if (numReqs % 5 == 0) { return; } else if (numReqs % 5 == 1 || numReqs % 5 == 2) { throw new ServiceException(new NotServingRegionException()); @@ -332,8 +330,8 @@ private void throwSomeExceptions(FakeRSRpcServices rpcServices, // But, we don't really care here if we throw MultiActionTooLargeException while doing // single Gets. expCount++; - Throwable t = metaCachePreservingExceptions.get( - expCount % metaCachePreservingExceptions.size()); + Throwable t = + metaCachePreservingExceptions.get(expCount % metaCachePreservingExceptions.size()); throw new ServiceException(t); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index bf795bb1cd4c..88f594d27197 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -50,7 +50,7 @@ import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({SmallTests.class, MasterTests.class }) +@Category({ SmallTests.class, MasterTests.class }) public class TestMetaRegionLocationCache { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -76,7 +76,7 @@ public static void cleanUp() throws Exception { private List getCurrentMetaLocations(ZKWatcher zk) throws Exception { List result = new ArrayList<>(); - for (String znode: zk.getMetaReplicaNodes()) { + for (String znode : zk.getMetaReplicaNodes()) { String path = ZNodePaths.joinZNode(zk.getZNodePaths().baseZNode, znode); int replicaId = zk.getZNodePaths().getMetaReplicaIdFromPath(path); RegionState state = MetaTableLocator.getMetaRegionState(zk, replicaId); @@ -100,7 +100,7 @@ private void verifyCachedMetaLocations(HMaster master) throws Exception { // Wait till all replicas available. retries = 0; while (master.getMetaRegionLocationCache().getMetaRegionLocations().size() != metaZnodes - .size()) { + .size()) { Thread.sleep(1000); if (++retries == 10) { break; @@ -129,16 +129,17 @@ public void testStandByMetaLocations() throws Exception { /* * Shuffles the meta region replicas around the cluster and makes sure the cache is not stale. */ - @Test public void testMetaLocationsChange() throws Exception { + @Test + public void testMetaLocationsChange() throws Exception { List currentMetaLocs = getCurrentMetaLocations(TEST_UTIL.getMiniHBaseCluster().getMaster().getZooKeeper()); // Move these replicas to random servers. - for (HRegionLocation location: currentMetaLocs) { + for (HRegionLocation location : currentMetaLocs) { RegionReplicaTestHelper.moveRegion(TEST_UTIL, location); } RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); - for (JVMClusterUtil.MasterThread masterThread: - TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) { + for (JVMClusterUtil.MasterThread masterThread : TEST_UTIL.getMiniHBaseCluster() + .getMasterThreads()) { verifyCachedMetaLocations(masterThread.getMaster()); } } @@ -147,7 +148,8 @@ public void testStandByMetaLocations() throws Exception { * Tests MetaRegionLocationCache's init procedure to make sure that it correctly watches the base * znode for notifications. */ - @Test public void testMetaRegionLocationCache() throws Exception { + @Test + public void testMetaRegionLocationCache() throws Exception { final String parentZnodeName = "/randomznodename"; Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parentZnodeName); @@ -157,7 +159,8 @@ public void testStandByMetaLocations() throws Exception { // some ZK activity in the background. MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf); ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) { - @Override public void doAnAction() throws Exception { + @Override + public void doAnAction() throws Exception { final String testZnode = parentZnodeName + "/child"; ZKUtil.createNodeIfNotExistsAndWatch(zkWatcher, testZnode, testZnode.getBytes()); ZKUtil.deleteNode(zkWatcher, testZnode); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java index fe105848c865..731422508328 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaReplicasAddressChange.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public class TestMetaReplicasAddressChange extends MetaWithReplicasTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetaReplicasAddressChange.class); + HBaseClassTestRule.forClass(TestMetaReplicasAddressChange.class); private static final Logger LOG = LoggerFactory.getLogger(TestMetaReplicasAddressChange.class); @@ -64,16 +64,16 @@ public void testMetaAddressChange() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); String baseZNode = - conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - String primaryMetaZnode = - ZNodePaths.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server")); + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + String primaryMetaZnode = ZNodePaths.joinZNode(baseZNode, + conf.get("zookeeper.znode.metaserver", "meta-region-server")); // check that the data in the znode is parseable (this would also mean the znode exists) byte[] data = ZKUtil.getData(zkw, primaryMetaZnode); ServerName currentServer = ProtobufUtil.toServerName(data); Collection liveServers = TEST_UTIL.getAdmin() - .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(); + .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(); ServerName moveToServer = - liveServers.stream().filter(s -> !currentServer.equals(s)).findAny().get(); + liveServers.stream().filter(s -> !currentServer.equals(s)).findAny().get(); final TableName tableName = name.getTableName(); TEST_UTIL.createTable(tableName, "f"); assertTrue(TEST_UTIL.getAdmin().tableExists(tableName)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java index e859e72ca83f..cee2a0e98f97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,17 +39,17 @@ import org.junit.experimental.categories.Category; /** - * Test MetaTableAccessor but without spinning up a cluster. - * We mock regionserver back and forth (we do spin up a zk cluster). + * Test MetaTableAccessor but without spinning up a cluster. We mock regionserver back and forth (we + * do spin up a zk cluster). */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestMetaTableAccessorNoCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetaTableAccessorNoCluster.class); - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @Before public void before() throws Exception { @@ -81,7 +81,7 @@ public void testGetHRegionInfo() throws IOException { // OK, give it what it expects kvs.clear(); kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, - RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO))); + RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO))); hri = CatalogFamilyFormat.getRegionInfo(Result.create(kvs)); assertNotNull(hri); assertTrue(RegionInfo.COMPARATOR.compare(hri, RegionInfoBuilder.FIRST_META_REGIONINFO) == 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java index 8ffbe6bb47fd..53cabd251756 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestMetaWithReplicasBasic extends MetaWithReplicasTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetaWithReplicasBasic.class); + HBaseClassTestRule.forClass(TestMetaWithReplicasBasic.class); @BeforeClass public static void setUp() throws Exception { @@ -63,9 +63,9 @@ public void testZookeeperNodesForReplicas() throws Exception { ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); Configuration conf = TEST_UTIL.getConfiguration(); String baseZNode = - conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - String primaryMetaZnode = - ZNodePaths.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server")); + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + String primaryMetaZnode = ZNodePaths.joinZNode(baseZNode, + conf.get("zookeeper.znode.metaserver", "meta-region-server")); // check that the data in the znode is parseable (this would also mean the znode exists) byte[] data = ZKUtil.getData(zkw, primaryMetaZnode); ProtobufUtil.toServerName(data); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java index e7c872d57a04..f72091de80b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,10 +51,10 @@ public class TestMetaWithReplicasShutdownHandling extends MetaWithReplicasTestBa @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetaWithReplicasShutdownHandling.class); + HBaseClassTestRule.forClass(TestMetaWithReplicasShutdownHandling.class); private static final Logger LOG = - LoggerFactory.getLogger(TestMetaWithReplicasShutdownHandling.class); + LoggerFactory.getLogger(TestMetaWithReplicasShutdownHandling.class); @BeforeClass public static void setUp() throws Exception { @@ -80,9 +80,9 @@ public static void shutdownMetaAndDoValidations(HBaseTestingUtil util) throws Ex conf.setBoolean(HConstants.USE_META_REPLICAS, true); String baseZNode = - conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - String primaryMetaZnode = - ZNodePaths.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server")); + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + String primaryMetaZnode = ZNodePaths.joinZNode(baseZNode, + conf.get("zookeeper.znode.metaserver", "meta-region-server")); byte[] data = ZKUtil.getData(zkw, primaryMetaZnode); ServerName primary = ProtobufUtil.toServerName(data); LOG.info("Primary=" + primary.toString()); @@ -156,7 +156,7 @@ public static void shutdownMetaAndDoValidations(HBaseTestingUtil util) throws Ex conf.setBoolean(HConstants.USE_META_REPLICAS, false); LOG.info("Running GETs no replicas"); try (Connection c = ConnectionFactory.createConnection(conf); - Table htable = c.getTable(TABLE)) { + Table htable = c.getTable(TABLE)) { Result r = htable.get(new Get(row)); assertArrayEquals(row, r.getRow()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientAfterSplittingRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientAfterSplittingRegion.java index a9e2b542e6f1..89588a7be59e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientAfterSplittingRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientAfterSplittingRegion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestMobCloneSnapshotFromClientAfterSplittingRegion @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobCloneSnapshotFromClientAfterSplittingRegion.class); + HBaseClassTestRule.forClass(TestMobCloneSnapshotFromClientAfterSplittingRegion.class); protected static void setupConfiguration() { CloneSnapshotFromClientTestBase.setupConfiguration(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java index e352303f76ec..c8cd59e6eb8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ public class TestMobCloneSnapshotFromClientCloneLinksAfterDelete @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobCloneSnapshotFromClientCloneLinksAfterDelete.class); + HBaseClassTestRule.forClass(TestMobCloneSnapshotFromClientCloneLinksAfterDelete.class); private static boolean delayFlush = false; @@ -118,7 +118,7 @@ public void testCloneLinksAfterDelete() throws IOException, InterruptedException long tid = EnvironmentEdgeManager.currentTime(); String snapshotName3 = "snaptb3-" + tid; TableName clonedTableName3 = - TableName.valueOf(name.getMethodName() + EnvironmentEdgeManager.currentTime()); + TableName.valueOf(name.getMethodName() + EnvironmentEdgeManager.currentTime()); admin.snapshot(snapshotName3, tableName); delayFlush = false; int snapshot3Rows = -1; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientError.java index b691e6090d28..185242aa80d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientError.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientError.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ public class TestMobCloneSnapshotFromClientError extends CloneSnapshotFromClient @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobCloneSnapshotFromClientError.class); + HBaseClassTestRule.forClass(TestMobCloneSnapshotFromClientError.class); protected static void setupConfiguration() { CloneSnapshotFromClientTestBase.setupConfiguration(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientNormal.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientNormal.java index 96180d1c34f2..1028b8f4ee4c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientNormal.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientNormal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestMobCloneSnapshotFromClientNormal extends CloneSnapshotFromClien @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobCloneSnapshotFromClientNormal.class); + HBaseClassTestRule.forClass(TestMobCloneSnapshotFromClientNormal.class); protected static void setupConfiguration() { CloneSnapshotFromClientTestBase.setupConfiguration(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientAfterSplittingRegions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientAfterSplittingRegions.java index cf4330b26f58..fbd187804e79 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientAfterSplittingRegions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientAfterSplittingRegions.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestMobRestoreSnapshotFromClientAfterSplittingRegions @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientAfterSplittingRegions.class); + HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientAfterSplittingRegions.class); @BeforeClass public static void setupCluster() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientAfterTruncate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientAfterTruncate.java index 4c0c0a066d30..1f8ce431f059 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientAfterTruncate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientAfterTruncate.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestMobRestoreSnapshotFromClientAfterTruncate @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientAfterTruncate.class); + HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientAfterTruncate.class); @BeforeClass public static void setupCluster() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientClone.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientClone.java index 3696eaa3c384..7fc1c1353514 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientClone.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientClone.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestMobRestoreSnapshotFromClientClone extends RestoreSnapshotFromCl @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientClone.class); + HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientClone.class); @BeforeClass public static void setupCluster() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientGetCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientGetCompactionState.java index b17dd03a9e82..535d52d10bfc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientGetCompactionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientGetCompactionState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestMobRestoreSnapshotFromClientGetCompactionState @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientGetCompactionState.class); + HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientGetCompactionState.class); @BeforeClass public static void setupCluster() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSchemaChange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSchemaChange.java index abd6a7ce8c11..07c25c64fa28 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSchemaChange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSchemaChange.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestMobRestoreSnapshotFromClientSchemaChange @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientSchemaChange.class); + HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientSchemaChange.class); @BeforeClass public static void setupCluster() throws Exception { @@ -68,6 +68,6 @@ protected int countRows(Table table, byte[]... families) throws IOException { @Override protected ColumnFamilyDescriptor getTestRestoreSchemaChangeHCD() { return ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY2).setMobEnabled(true) - .setMobThreshold(3L).build(); + .setMobThreshold(3L).build(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSimple.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSimple.java index f0ebc70ee83e..02ecb2127d3a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSimple.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSimple.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestMobRestoreSnapshotFromClientSimple @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientSimple.class); + HBaseClassTestRule.forClass(TestMobRestoreSnapshotFromClientSimple.class); @BeforeClass public static void setupCluster() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java index ffdd1d1ec99c..8c8dcd4fb2d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java index 0695be12d0c4..922decb7a54d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ *

      * This is an end-to-end test for the snapshot utility */ -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestMobSnapshotFromClient extends TestSnapshotFromClient { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java index ca73807e97bb..dd30213cea2d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; @@ -75,7 +74,7 @@ public void testMultiMetrics() throws Exception { mutator.close(); MetricsConnection metrics = - ((AsyncConnectionImpl) conn.toAsyncConnection()).getConnectionMetrics().get(); + ((AsyncConnectionImpl) conn.toAsyncConnection()).getConnectionMetrics().get(); assertEquals(1, metrics.multiTracker.reqHist.getCount()); assertEquals(3, metrics.numActionsPerServerHist.getSnapshot().getMean(), 1e-15); assertEquals(1, metrics.numActionsPerServerHist.getCount()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java index bb878cac5fda..c092af363a15 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,7 +56,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MediumTests.class, FlakeyTests.class}) +@Category({ MediumTests.class, FlakeyTests.class }) public class TestMultiParallel { @ClassRule @@ -72,7 +72,7 @@ public class TestMultiParallel { private static final TableName TEST_TABLE = TableName.valueOf("multi_test_table"); private static final byte[] BYTES_FAMILY = Bytes.toBytes(FAMILY); private static final byte[] ONE_ROW = Bytes.toBytes("xxx"); - private static final byte [][] KEYS = makeKeys(); + private static final byte[][] KEYS = makeKeys(); private static final int slaves = 5; // also used for testing HTable pool size private static Connection CONNECTION; @@ -81,18 +81,18 @@ public class TestMultiParallel { public static void beforeClass() throws Exception { // Uncomment the following lines if more verbosity is needed for // debugging (see HBASE-12285 for details). - //((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); - //((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); - //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); + // ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); + // ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); + // ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); UTIL.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY, - KeyValueCodec.class.getCanonicalName()); + KeyValueCodec.class.getCanonicalName()); // Disable table on master for now as the feature is broken - //UTIL.getConfiguration().setBoolean(LoadBalancer.TABLES_ON_MASTER, true); + // UTIL.getConfiguration().setBoolean(LoadBalancer.TABLES_ON_MASTER, true); // We used to ask for system tables on Master exclusively but not needed by test and doesn't // work anyways -- so commented out. // UTIL.getConfiguration().setBoolean(LoadBalancer.SYSTEM_TABLES_ON_MASTER, true); - UTIL.getConfiguration() - .set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MyMasterObserver.class.getName()); + UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + MyMasterObserver.class.getName()); UTIL.startMiniCluster(slaves); Table t = UTIL.createMultiRegionTable(TEST_TABLE, Bytes.toBytes(FAMILY)); UTIL.waitTableEnabled(TEST_TABLE); @@ -118,9 +118,8 @@ public void before() throws Exception { if (MyMasterObserver.postBalanceCount.get() > balanceCount) { // It is necessary to wait the move procedure to start. // Otherwise, the next wait may pass immediately. - UTIL.waitFor(3 * 1000, 100, false, () -> - UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().hasRegionsInTransition() - ); + UTIL.waitFor(3 * 1000, 100, false, () -> UTIL.getMiniHBaseCluster().getMaster() + .getAssignmentManager().hasRegionsInTransition()); } // Wait until completing balance @@ -130,7 +129,7 @@ public void before() throws Exception { } private static byte[][] makeKeys() { - byte [][] starterKeys = HBaseTestingUtil.KEYS; + byte[][] starterKeys = HBaseTestingUtil.KEYS; // Create a "non-uniform" test set with the following characteristics: // a) Unequal number of keys per region @@ -160,7 +159,7 @@ private static byte[][] makeKeys() { cp[k.length] = new Integer(i % 256).byteValue(); keys.add(cp); } - return keys.toArray(new byte [][] {new byte [] {}}); + return keys.toArray(new byte[][] { new byte[] {} }); } @Test @@ -195,8 +194,8 @@ public void testBatchWithGet() throws Exception { Cell[] multiKvs = multiRes[i].rawCells(); for (int j = 0; j < singleKvs.length; j++) { Assert.assertEquals(singleKvs[j], multiKvs[j]); - Assert.assertEquals(0, Bytes.compareTo(CellUtil.cloneValue(singleKvs[j]), - CellUtil.cloneValue(multiKvs[j]))); + Assert.assertEquals(0, + Bytes.compareTo(CellUtil.cloneValue(singleKvs[j]), CellUtil.cloneValue(multiKvs[j]))); } } table.close(); @@ -237,8 +236,8 @@ public void testFlushCommitsNoAbort() throws Exception { } /** - * Only run one Multi test with a forced RegionServer abort. Otherwise, the - * unit tests will take an unnecessarily long time to run. + * Only run one Multi test with a forced RegionServer abort. Otherwise, the unit tests will take + * an unnecessarily long time to run. */ @Test public void testFlushCommitsWithAbort() throws Exception { @@ -259,14 +258,12 @@ private void doTestFlushCommits(boolean doAbort) throws Exception { List puts = constructPutRequests(); table.put(puts); LOG.info("puts"); - final int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads() - .size(); + final int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size(); assert liveRScount > 0; - JVMClusterUtil.RegionServerThread liveRS = UTIL.getMiniHBaseCluster() - .getLiveRegionServerThreads().get(0); + JVMClusterUtil.RegionServerThread liveRS = + UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().get(0); if (doAbort) { - liveRS.getRegionServer().abort("Aborting for tests", - new Exception("doTestFlushCommits")); + liveRS.getRegionServer().abort("Aborting for tests", new Exception("doTestFlushCommits")); // If we wait for no regions being online after we abort the server, we // could ensure the master has re-assigned the regions on killed server // after writing successfully. It means the server we aborted is dead @@ -284,23 +281,24 @@ private void doTestFlushCommits(boolean doAbort) throws Exception { validateLoadedData(table); // Validate server and region count - List liveRSs = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads(); + List liveRSs = + UTIL.getMiniHBaseCluster().getLiveRegionServerThreads(); int count = 0; - for (JVMClusterUtil.RegionServerThread t: liveRSs) { + for (JVMClusterUtil.RegionServerThread t : liveRSs) { count++; LOG.info("Count=" + count + ", Alive=" + t.getRegionServer()); } LOG.info("Count=" + count); Assert.assertEquals("Server count=" + count + ", abort=" + doAbort, - (doAbort ? (liveRScount - 1) : liveRScount), count); + (doAbort ? (liveRScount - 1) : liveRScount), count); if (doAbort) { UTIL.getMiniHBaseCluster().waitOnRegionServer(0); UTIL.waitFor(15 * 1000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { // We disable regions on master so the count should be liveRScount - 1 - return UTIL.getMiniHBaseCluster().getMaster() - .getClusterMetrics().getLiveServerMetrics().size() == liveRScount - 1; + return UTIL.getMiniHBaseCluster().getMaster().getClusterMetrics().getLiveServerMetrics() + .size() == liveRScount - 1; } }); UTIL.waitFor(15 * 1000, UTIL.predicateNoRegionsInTransition()); @@ -325,7 +323,7 @@ public void testBatchWithPut() throws Exception { int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size(); assert liveRScount > 0; JVMClusterUtil.RegionServerThread liveRS = - UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().get(0); + UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().get(0); liveRS.getRegionServer().abort("Aborting for tests", new Exception("testBatchWithPut")); puts = constructPutRequests(); try { @@ -361,7 +359,7 @@ public void testBatchWithDelete() throws Exception { delete.addFamily(BYTES_FAMILY); deletes.add(delete); } - results= new Object[deletes.size()]; + results = new Object[deletes.size()]; table.batch(deletes, results); validateSizeAndEmpty(results, KEYS.length); @@ -586,12 +584,12 @@ private void validateResult(Object r) { } private void validateResult(Object r1, byte[] qual, byte[] val) { - Result r = (Result)r1; + Result r = (Result) r1; Assert.assertTrue(r.containsColumn(BYTES_FAMILY, qual)); byte[] value = r.getValue(BYTES_FAMILY, qual); if (0 != Bytes.compareTo(val, value)) { - fail("Expected [" + Bytes.toStringBinary(val) - + "] but got [" + Bytes.toStringBinary(value) + "]"); + fail("Expected [" + Bytes.toStringBinary(val) + "] but got [" + Bytes.toStringBinary(value) + + "]"); } } @@ -616,7 +614,7 @@ private void validateLoadedData(Table table) throws IOException { } int retryNum = 10; Result[] results = null; - do { + do { results = table.get(gets); boolean finished = true; for (Result result : results) { @@ -641,8 +639,7 @@ private void validateLoadedData(Table table) throws IOException { if (results != null) { for (Result r : results) { Assert.assertTrue(r.containsColumn(BYTES_FAMILY, QUALIFIER)); - Assert.assertEquals(0, Bytes.compareTo(VALUE, r - .getValue(BYTES_FAMILY, QUALIFIER))); + Assert.assertEquals(0, Bytes.compareTo(VALUE, r.getValue(BYTES_FAMILY, QUALIFIER))); } LOG.info("Validating data on " + table + " successfully!"); } @@ -650,7 +647,7 @@ private void validateLoadedData(Table table) throws IOException { } private void validateEmpty(Object r1) { - Result result = (Result)r1; + Result result = (Result) r1; Assert.assertTrue(result != null); Assert.assertTrue(result.isEmpty()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java index d81b5a26f428..4924cff0d025 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ * This test sets the multi size WAAAAAY low and then checks to make sure that gets will still make * progress. */ -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestMultiRespectsLimits { @ClassRule @@ -121,18 +121,21 @@ public boolean evaluate() throws Exception { // Cells from TEST_UTIL.loadTable have a length of 27. // Multiplying by less than that gives an easy lower bound on size. // However in reality each kv is being reported as much higher than that. - METRICS_ASSERT.assertCounterGt("exceptions", - startingExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s); + METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions + ((MAX_SIZE * 25) / MAX_SIZE), + s); METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", - startingMultiExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s); + startingMultiExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s); } @Test public void testBlockMultiLimits() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); - TEST_UTIL.getAdmin().createTable( - TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(FAMILY).setDataBlockEncoding(DataBlockEncoding.FAST_DIFF).build()).build()); + TEST_UTIL.getAdmin() + .createTable( + TableDescriptorBuilder + .newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(FAMILY).setDataBlockEncoding(DataBlockEncoding.FAST_DIFF).build()) + .build()); Table t = TEST_UTIL.getConnection().getTable(tableName); final HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0); @@ -142,8 +145,7 @@ public void testBlockMultiLimits() throws Exception { long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s); byte[] row = Bytes.toBytes("TEST"); - byte[][] cols = new byte[][]{ - Bytes.toBytes("0"), // Get this + byte[][] cols = new byte[][] { Bytes.toBytes("0"), // Get this Bytes.toBytes("1"), // Buffer Bytes.toBytes("2"), // Buffer Bytes.toBytes("3"), // Get This @@ -157,16 +159,11 @@ public void testBlockMultiLimits() throws Exception { byte[] value = new byte[MAX_SIZE - 100]; Bytes.random(value); - for (byte[] col:cols) { + for (byte[] col : cols) { Put p = new Put(row); - p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(FAMILY) - .setQualifier(col) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(value) - .build()); + p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(FAMILY) + .setQualifier(col).setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).setValue(value) + .build()); t.put(p); } @@ -193,7 +190,6 @@ public boolean evaluate() throws Exception { Result[] results = t.get(gets); assertEquals(2, results.length); METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions, s); - METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", - startingMultiExceptions, s); + METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", startingMultiExceptions, s); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java index 561b9d9cab04..e75b7f667847 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,11 +44,11 @@ import org.slf4j.LoggerFactory; /** - * Run tests related to {@link org.apache.hadoop.hbase.filter.TimestampsFilter} using HBase client APIs. - * Sets up the HBase mini cluster once at start. Each creates a table - * named for the method and does its stuff against that. + * Run tests related to {@link org.apache.hadoop.hbase.filter.TimestampsFilter} using HBase client + * APIs. Sets up the HBase mini cluster once at start. Each creates a table named for the method and + * does its stuff against that. */ -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestMultipleTimestamps { @ClassRule @@ -96,27 +96,27 @@ public void tearDown() throws Exception { @Test public void testReseeksWithOneColumnMiltipleTimestamp() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); - byte [] FAMILY = Bytes.toBytes("event_log"); - byte [][] FAMILIES = new byte[][] { FAMILY }; + byte[] FAMILY = Bytes.toBytes("event_log"); + byte[][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); - Integer[] putRows = new Integer[] {1, 3, 5, 7}; - Integer[] putColumns = new Integer[] { 1, 3, 5}; - Long[] putTimestamps = new Long[] {1L, 2L, 3L, 4L, 5L}; + Integer[] putRows = new Integer[] { 1, 3, 5, 7 }; + Integer[] putColumns = new Integer[] { 1, 3, 5 }; + Long[] putTimestamps = new Long[] { 1L, 2L, 3L, 4L, 5L }; - Integer[] scanRows = new Integer[] {3, 5}; - Integer[] scanColumns = new Integer[] {3}; - Long[] scanTimestamps = new Long[] {3L, 4L}; + Integer[] scanRows = new Integer[] { 3, 5 }; + Integer[] scanColumns = new Integer[] { 3 }; + Long[] scanTimestamps = new Long[] { 3L, 4L }; int scanMaxVersions = 2; put(ht, FAMILY, putRows, putColumns, putTimestamps); TEST_UTIL.flush(tableName); - ResultScanner scanner = scan(ht, FAMILY, scanRows, scanColumns, - scanTimestamps, scanMaxVersions); + ResultScanner scanner = + scan(ht, FAMILY, scanRows, scanColumns, scanTimestamps, scanMaxVersions); Cell[] kvs; @@ -136,27 +136,27 @@ public void testReseeksWithOneColumnMiltipleTimestamp() throws IOException { public void testReseeksWithMultipleColumnOneTimestamp() throws IOException { LOG.info(name.getMethodName()); final TableName tableName = TableName.valueOf(name.getMethodName()); - byte [] FAMILY = Bytes.toBytes("event_log"); - byte [][] FAMILIES = new byte[][] { FAMILY }; + byte[] FAMILY = Bytes.toBytes("event_log"); + byte[][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); - Integer[] putRows = new Integer[] {1, 3, 5, 7}; - Integer[] putColumns = new Integer[] { 1, 3, 5}; - Long[] putTimestamps = new Long[] {1L, 2L, 3L, 4L, 5L}; + Integer[] putRows = new Integer[] { 1, 3, 5, 7 }; + Integer[] putColumns = new Integer[] { 1, 3, 5 }; + Long[] putTimestamps = new Long[] { 1L, 2L, 3L, 4L, 5L }; - Integer[] scanRows = new Integer[] {3, 5}; - Integer[] scanColumns = new Integer[] {3,4}; - Long[] scanTimestamps = new Long[] {3L}; + Integer[] scanRows = new Integer[] { 3, 5 }; + Integer[] scanColumns = new Integer[] { 3, 4 }; + Long[] scanTimestamps = new Long[] { 3L }; int scanMaxVersions = 2; put(ht, FAMILY, putRows, putColumns, putTimestamps); TEST_UTIL.flush(tableName); - ResultScanner scanner = scan(ht, FAMILY, scanRows, scanColumns, - scanTimestamps, scanMaxVersions); + ResultScanner scanner = + scan(ht, FAMILY, scanRows, scanColumns, scanTimestamps, scanMaxVersions); Cell[] kvs; @@ -171,24 +171,23 @@ public void testReseeksWithMultipleColumnOneTimestamp() throws IOException { } @Test - public void testReseeksWithMultipleColumnMultipleTimestamp() throws - IOException { + public void testReseeksWithMultipleColumnMultipleTimestamp() throws IOException { LOG.info(name.getMethodName()); final TableName tableName = TableName.valueOf(name.getMethodName()); - byte [] FAMILY = Bytes.toBytes("event_log"); - byte [][] FAMILIES = new byte[][] { FAMILY }; + byte[] FAMILY = Bytes.toBytes("event_log"); + byte[][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); - Integer[] putRows = new Integer[] {1, 3, 5, 7}; - Integer[] putColumns = new Integer[] { 1, 3, 5}; - Long[] putTimestamps = new Long[] {1L, 2L, 3L, 4L, 5L}; + Integer[] putRows = new Integer[] { 1, 3, 5, 7 }; + Integer[] putColumns = new Integer[] { 1, 3, 5 }; + Long[] putTimestamps = new Long[] { 1L, 2L, 3L, 4L, 5L }; - Integer[] scanRows = new Integer[] {5, 7}; - Integer[] scanColumns = new Integer[] {3, 4, 5}; - Long[] scanTimestamps = new Long[] { 2L, 3L}; + Integer[] scanRows = new Integer[] { 5, 7 }; + Integer[] scanColumns = new Integer[] { 3, 4, 5 }; + Long[] scanTimestamps = new Long[] { 2L, 3L }; int scanMaxVersions = 2; put(ht, FAMILY, putRows, putColumns, putTimestamps); @@ -206,7 +205,7 @@ public void testReseeksWithMultipleColumnMultipleTimestamp() throws Cell[] kvs; - // This looks like wrong answer. Should be 2. Even then we are returning wrong result, + // This looks like wrong answer. Should be 2. Even then we are returning wrong result, // timestamps that are 3 whereas should be 2 since min is inclusive. kvs = scanner.next().rawCells(); assertEquals(4, kvs.length); @@ -228,28 +227,27 @@ public void testReseeksWithMultipleColumnMultipleTimestamp() throws public void testReseeksWithMultipleFiles() throws IOException { LOG.info(name.getMethodName()); final TableName tableName = TableName.valueOf(name.getMethodName()); - byte [] FAMILY = Bytes.toBytes("event_log"); - byte [][] FAMILIES = new byte[][] { FAMILY }; + byte[] FAMILY = Bytes.toBytes("event_log"); + byte[][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); - Integer[] putRows1 = new Integer[] {1, 2, 3}; - Integer[] putColumns1 = new Integer[] { 2, 5, 6}; - Long[] putTimestamps1 = new Long[] {1L, 2L, 5L}; - - Integer[] putRows2 = new Integer[] {6, 7}; - Integer[] putColumns2 = new Integer[] {3, 6}; - Long[] putTimestamps2 = new Long[] {4L, 5L}; + Integer[] putRows1 = new Integer[] { 1, 2, 3 }; + Integer[] putColumns1 = new Integer[] { 2, 5, 6 }; + Long[] putTimestamps1 = new Long[] { 1L, 2L, 5L }; - Integer[] putRows3 = new Integer[] {2, 3, 5}; - Integer[] putColumns3 = new Integer[] {1, 2, 3}; - Long[] putTimestamps3 = new Long[] {4L,8L}; + Integer[] putRows2 = new Integer[] { 6, 7 }; + Integer[] putColumns2 = new Integer[] { 3, 6 }; + Long[] putTimestamps2 = new Long[] { 4L, 5L }; + Integer[] putRows3 = new Integer[] { 2, 3, 5 }; + Integer[] putColumns3 = new Integer[] { 1, 2, 3 }; + Long[] putTimestamps3 = new Long[] { 4L, 8L }; - Integer[] scanRows = new Integer[] {3, 5, 7}; - Integer[] scanColumns = new Integer[] {3, 4, 5}; - Long[] scanTimestamps = new Long[] { 2L, 4L}; + Integer[] scanRows = new Integer[] { 3, 5, 7 }; + Integer[] scanColumns = new Integer[] { 3, 4, 5 }; + Long[] scanTimestamps = new Long[] { 2L, 4L }; int scanMaxVersions = 5; put(ht, FAMILY, putRows1, putColumns1, putTimestamps1); @@ -258,8 +256,8 @@ public void testReseeksWithMultipleFiles() throws IOException { TEST_UTIL.flush(tableName); put(ht, FAMILY, putRows3, putColumns3, putTimestamps3); - ResultScanner scanner = scan(ht, FAMILY, scanRows, scanColumns, - scanTimestamps, scanMaxVersions); + ResultScanner scanner = + scan(ht, FAMILY, scanRows, scanColumns, scanTimestamps, scanMaxVersions); Cell[] kvs; @@ -294,11 +292,11 @@ public void testWithVersionDeletes() throws Exception { } public void testWithVersionDeletes(boolean flushTables) throws IOException { - LOG.info(name.getMethodName() + "_"+ (flushTables ? "flush" : "noflush")); - final TableName tableName = TableName.valueOf(name.getMethodName() + "_" + (flushTables ? - "flush" : "noflush")); - byte [] FAMILY = Bytes.toBytes("event_log"); - byte [][] FAMILIES = new byte[][] { FAMILY }; + LOG.info(name.getMethodName() + "_" + (flushTables ? "flush" : "noflush")); + final TableName tableName = + TableName.valueOf(name.getMethodName() + "_" + (flushTables ? "flush" : "noflush")); + byte[] FAMILY = Bytes.toBytes("event_log"); + byte[][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); @@ -315,8 +313,7 @@ public void testWithVersionDeletes(boolean flushTables) throws IOException { // request a bunch of versions including the deleted version. We should // only get back entries for the versions that exist. - Cell kvs[] = getNVersions(ht, FAMILY, 0, 0, - Arrays.asList(2L, 3L, 4L, 5L)); + Cell kvs[] = getNVersions(ht, FAMILY, 0, 0, Arrays.asList(2L, 3L, 4L, 5L)); assertEquals(3, kvs.length); checkOneCell(kvs[0], FAMILY, 0, 0, 5); checkOneCell(kvs[1], FAMILY, 0, 0, 3); @@ -330,8 +327,8 @@ public void testWithMultipleVersionDeletes() throws IOException { LOG.info(name.getMethodName()); final TableName tableName = TableName.valueOf(name.getMethodName()); - byte [] FAMILY = Bytes.toBytes("event_log"); - byte [][] FAMILIES = new byte[][] { FAMILY }; + byte[] FAMILY = Bytes.toBytes("event_log"); + byte[][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); @@ -355,8 +352,8 @@ public void testWithMultipleVersionDeletes() throws IOException { @Test public void testWithColumnDeletes() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); - byte [] FAMILY = Bytes.toBytes("event_log"); - byte [][] FAMILIES = new byte[][] { FAMILY }; + byte[] FAMILY = Bytes.toBytes("event_log"); + byte[][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); @@ -380,8 +377,8 @@ public void testWithColumnDeletes() throws IOException { @Test public void testWithFamilyDeletes() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); - byte [] FAMILY = Bytes.toBytes("event_log"); - byte [][] FAMILIES = new byte[][] { FAMILY }; + byte[] FAMILY = Bytes.toBytes("event_log"); + byte[][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(tableName, FAMILIES, Integer.MAX_VALUE); @@ -403,82 +400,71 @@ public void testWithFamilyDeletes() throws IOException { } /** - * Assert that the passed in KeyValue has expected contents for the - * specified row, column & timestamp. + * Assert that the passed in KeyValue has expected contents for the specified row, column & + * timestamp. */ - private void checkOneCell(Cell kv, byte[] cf, - int rowIdx, int colIdx, long ts) { + private void checkOneCell(Cell kv, byte[] cf, int rowIdx, int colIdx, long ts) { String ctx = "rowIdx=" + rowIdx + "; colIdx=" + colIdx + "; ts=" + ts; - assertEquals("Row mismatch which checking: " + ctx, - "row:"+ rowIdx, Bytes.toString(CellUtil.cloneRow(kv))); + assertEquals("Row mismatch which checking: " + ctx, "row:" + rowIdx, + Bytes.toString(CellUtil.cloneRow(kv))); - assertEquals("ColumnFamily mismatch while checking: " + ctx, - Bytes.toString(cf), Bytes.toString(CellUtil.cloneFamily(kv))); + assertEquals("ColumnFamily mismatch while checking: " + ctx, Bytes.toString(cf), + Bytes.toString(CellUtil.cloneFamily(kv))); - assertEquals("Column qualifier mismatch while checking: " + ctx, - "column:" + colIdx, - Bytes.toString(CellUtil.cloneQualifier(kv))); + assertEquals("Column qualifier mismatch while checking: " + ctx, "column:" + colIdx, + Bytes.toString(CellUtil.cloneQualifier(kv))); - assertEquals("Timestamp mismatch while checking: " + ctx, - ts, kv.getTimestamp()); + assertEquals("Timestamp mismatch while checking: " + ctx, ts, kv.getTimestamp()); - assertEquals("Value mismatch while checking: " + ctx, - "value-version-" + ts, Bytes.toString(CellUtil.cloneValue(kv))); + assertEquals("Value mismatch while checking: " + ctx, "value-version-" + ts, + Bytes.toString(CellUtil.cloneValue(kv))); } /** - * Uses the TimestampFilter on a Get to request a specified list of - * versions for the row/column specified by rowIdx & colIdx. - * + * Uses the TimestampFilter on a Get to request a specified list of versions for the row/column + * specified by rowIdx & colIdx. */ - private Cell[] getNVersions(Table ht, byte[] cf, int rowIdx, - int colIdx, List versions) - throws IOException { + private Cell[] getNVersions(Table ht, byte[] cf, int rowIdx, int colIdx, List versions) + throws IOException { byte row[] = Bytes.toBytes("row:" + rowIdx); byte column[] = Bytes.toBytes("column:" + colIdx); Get get = new Get(row); get.addColumn(cf, column); get.readAllVersions(); - get.setTimeRange(Collections.min(versions), Collections.max(versions)+1); + get.setTimeRange(Collections.min(versions), Collections.max(versions) + 1); Result result = ht.get(get); return result.rawCells(); } - private ResultScanner scan(Table ht, byte[] cf, - Integer[] rowIndexes, Integer[] columnIndexes, - Long[] versions, int maxVersions) - throws IOException { - byte startRow[] = Bytes.toBytes("row:" + - Collections.min( Arrays.asList(rowIndexes))); - byte endRow[] = Bytes.toBytes("row:" + - Collections.max( Arrays.asList(rowIndexes))+1); + private ResultScanner scan(Table ht, byte[] cf, Integer[] rowIndexes, Integer[] columnIndexes, + Long[] versions, int maxVersions) throws IOException { + byte startRow[] = Bytes.toBytes("row:" + Collections.min(Arrays.asList(rowIndexes))); + byte endRow[] = Bytes.toBytes("row:" + Collections.max(Arrays.asList(rowIndexes)) + 1); Scan scan = new Scan().withStartRow(startRow).withStopRow(endRow); - for (Integer colIdx: columnIndexes) { + for (Integer colIdx : columnIndexes) { byte column[] = Bytes.toBytes("column:" + colIdx); scan.addColumn(cf, column); } scan.readVersions(maxVersions); scan.setTimeRange(Collections.min(Arrays.asList(versions)), - Collections.max(Arrays.asList(versions))+1); + Collections.max(Arrays.asList(versions)) + 1); ResultScanner scanner = ht.getScanner(scan); return scanner; } - private void put(Table ht, byte[] cf, Integer[] rowIndexes, - Integer[] columnIndexes, Long[] versions) - throws IOException { - for (int rowIdx: rowIndexes) { + private void put(Table ht, byte[] cf, Integer[] rowIndexes, Integer[] columnIndexes, + Long[] versions) throws IOException { + for (int rowIdx : rowIndexes) { byte row[] = Bytes.toBytes("row:" + rowIdx); Put put = new Put(row); put.setDurability(Durability.SKIP_WAL); - for(int colIdx: columnIndexes) { + for (int colIdx : columnIndexes) { byte column[] = Bytes.toBytes("column:" + colIdx); - for (long version: versions) { - put.addColumn(cf, column, version, Bytes.toBytes("value-version-" + - version)); + for (long version : versions) { + put.addColumn(cf, column, version, Bytes.toBytes("value-version-" + version)); } } ht.put(put); @@ -486,12 +472,10 @@ private void put(Table ht, byte[] cf, Integer[] rowIndexes, } /** - * Insert in specific row/column versions with timestamps - * versionStart..versionEnd. + * Insert in specific row/column versions with timestamps versionStart..versionEnd. */ - private void putNVersions(Table ht, byte[] cf, int rowIdx, int colIdx, - long versionStart, long versionEnd) - throws IOException { + private void putNVersions(Table ht, byte[] cf, int rowIdx, int colIdx, long versionStart, + long versionEnd) throws IOException { byte row[] = Bytes.toBytes("row:" + rowIdx); byte column[] = Bytes.toBytes("column:" + colIdx); Put put = new Put(row); @@ -505,12 +489,11 @@ private void putNVersions(Table ht, byte[] cf, int rowIdx, int colIdx, } /** - * For row/column specified by rowIdx/colIdx, delete the cell - * corresponding to the specified version. + * For row/column specified by rowIdx/colIdx, delete the cell corresponding to the specified + * version. */ - private void deleteOneVersion(Table ht, byte[] cf, int rowIdx, - int colIdx, long version) - throws IOException { + private void deleteOneVersion(Table ht, byte[] cf, int rowIdx, int colIdx, long version) + throws IOException { byte row[] = Bytes.toBytes("row:" + rowIdx); byte column[] = Bytes.toBytes("column:" + colIdx); Delete del = new Delete(row); @@ -519,12 +502,10 @@ private void deleteOneVersion(Table ht, byte[] cf, int rowIdx, } /** - * For row/column specified by rowIdx/colIdx, delete all cells - * preceeding the specified version. + * For row/column specified by rowIdx/colIdx, delete all cells preceeding the specified version. */ - private void deleteAllVersionsBefore(Table ht, byte[] cf, int rowIdx, - int colIdx, long version) - throws IOException { + private void deleteAllVersionsBefore(Table ht, byte[] cf, int rowIdx, int colIdx, long version) + throws IOException { byte row[] = Bytes.toBytes("row:" + rowIdx); byte column[] = Bytes.toBytes("column:" + colIdx); Delete del = new Delete(row); @@ -548,5 +529,3 @@ private void deleteFamily(Table ht, byte[] cf, int rowIdx) throws IOException { } } - - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java index 4ca05affc0a9..0f5ec4eb14df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestMutationGetCellBuilder { @ClassRule @@ -72,9 +72,9 @@ public void testMutationGetCellBuilder() throws Exception { TEST_UTIL.waitTableAvailable(tableName.getName(), 5000); // put one row Put put = new Put(rowKey); - CellBuilder cellBuilder = put.getCellBuilder().setQualifier(qualifier) - .setFamily(family).setValue(Bytes.toBytes("bar")).setTimestamp(now); - //setRow is useless + CellBuilder cellBuilder = put.getCellBuilder().setQualifier(qualifier).setFamily(family) + .setValue(Bytes.toBytes("bar")).setTimestamp(now); + // setRow is useless cellBuilder.setRow(uselessRowKey); put.add(cellBuilder.build()); byte[] cloneRow = CellUtil.cloneRow(cellBuilder.build()); @@ -87,25 +87,21 @@ public void testMutationGetCellBuilder() throws Exception { Result result = table.get(get); assertTrue("row key must be same", Arrays.equals(result.getRow(), rowKey)); assertTrue("Column foo value should be bar", - Bytes.toString(result.getValue(family, qualifier)).equals("bar")); + Bytes.toString(result.getValue(family, qualifier)).equals("bar")); - //Delete that row + // Delete that row Delete delete = new Delete(rowKey); - cellBuilder = delete.getCellBuilder().setQualifier(qualifier) - .setFamily(family); - //if this row has been deleted,then can check setType is useless. + cellBuilder = delete.getCellBuilder().setQualifier(qualifier).setFamily(family); + // if this row has been deleted,then can check setType is useless. cellBuilder.setType(Cell.Type.Put); delete.add(cellBuilder.build()); table.delete(delete); - //check this row whether exist + // check this row whether exist get = new Get(rowKey); get.setTimestamp(now); result = table.get(get); - assertTrue("Column foo should not exist", - result.getValue(family, qualifier) == null); + assertTrue("Column foo should not exist", result.getValue(family, qualifier) == null); } } } - - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java index 97c0a13ceae9..95c026210ce9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMvccConsistentScanner.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; + import java.io.IOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPreadReversedScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPreadReversedScanner.java index e58b897178d6..e414c6504210 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPreadReversedScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPreadReversedScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,10 +78,10 @@ public void tearDown() throws IOException { */ @Test public void testPreadReversedScan01() throws IOException { - String[][] keysCases = new String[][] { - { "d0", "d1", "d2", "d3" }, // all rowKeys fit in the last region. - { "a0", "a1", "a2", "a3" }, // all rowKeys fit in the first region. - { "a0", "b1", "c2", "d3" }, // each region with a rowKey + String[][] keysCases = new String[][] { { "d0", "d1", "d2", "d3" }, // all rowKeys fit in the + // last region. + { "a0", "a1", "a2", "a3" }, // all rowKeys fit in the first region. + { "a0", "b1", "c2", "d3" }, // each region with a rowKey }; for (int caseIndex = 0; caseIndex < keysCases.length; caseIndex++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java index b5e1178cca89..c55b837262b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,14 +39,14 @@ /** * Test that I can Iterate Client Actions that hold Cells (Get does not have Cells). */ -@Category({SmallTests.class, ClientTests.class}) +@Category({ SmallTests.class, ClientTests.class }) public class TestPutDeleteEtcCellIteration { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestPutDeleteEtcCellIteration.class); - private static final byte [] ROW = new byte [] {'r'}; + private static final byte[] ROW = new byte[] { 'r' }; private static final long TIMESTAMP = EnvironmentEdgeManager.currentTime(); private static final int COUNT = 10; @@ -54,29 +54,29 @@ public class TestPutDeleteEtcCellIteration { public void testPutIteration() throws IOException { Put p = new Put(ROW); for (int i = 0; i < COUNT; i++) { - byte [] bytes = Bytes.toBytes(i); + byte[] bytes = Bytes.toBytes(i); p.addColumn(bytes, bytes, TIMESTAMP, bytes); } int index = 0; for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); - byte [] bytes = Bytes.toBytes(index++); + byte[] bytes = Bytes.toBytes(index++); assertEquals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes), cell); } assertEquals(COUNT, index); } - @Test (expected = ConcurrentModificationException.class) + @Test(expected = ConcurrentModificationException.class) public void testPutConcurrentModificationOnIteration() throws IOException { Put p = new Put(ROW); for (int i = 0; i < COUNT; i++) { - byte [] bytes = Bytes.toBytes(i); + byte[] bytes = Bytes.toBytes(i); p.addColumn(bytes, bytes, TIMESTAMP, bytes); } int index = 0; for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); - byte [] bytes = Bytes.toBytes(index++); + byte[] bytes = Bytes.toBytes(index++); // When we hit the trigger, try inserting a new KV; should trigger exception p.addColumn(bytes, bytes, TIMESTAMP, bytes); assertEquals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes), cell); @@ -87,13 +87,13 @@ public void testPutConcurrentModificationOnIteration() throws IOException { public void testDeleteIteration() throws IOException { Delete d = new Delete(ROW); for (int i = 0; i < COUNT; i++) { - byte [] bytes = Bytes.toBytes(i); + byte[] bytes = Bytes.toBytes(i); d.addColumn(bytes, bytes, TIMESTAMP); } int index = 0; for (CellScanner cellScanner = d.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); - byte [] bytes = Bytes.toBytes(index++); + byte[] bytes = Bytes.toBytes(index++); assertEquals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, KeyValue.Type.Delete), cell); } assertEquals(COUNT, index); @@ -103,14 +103,14 @@ public void testDeleteIteration() throws IOException { public void testAppendIteration() throws IOException { Append a = new Append(ROW); for (int i = 0; i < COUNT; i++) { - byte [] bytes = Bytes.toBytes(i); + byte[] bytes = Bytes.toBytes(i); a.addColumn(bytes, bytes, bytes); } int index = 0; for (CellScanner cellScanner = a.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); - byte [] bytes = Bytes.toBytes(index++); - KeyValue kv = (KeyValue)cell; + byte[] bytes = Bytes.toBytes(index++); + KeyValue kv = (KeyValue) cell; assertTrue(Bytes.equals(CellUtil.cloneFamily(kv), bytes)); assertTrue(Bytes.equals(CellUtil.cloneValue(kv), bytes)); } @@ -121,15 +121,15 @@ public void testAppendIteration() throws IOException { public void testIncrementIteration() throws IOException { Increment increment = new Increment(ROW); for (int i = 0; i < COUNT; i++) { - byte [] bytes = Bytes.toBytes(i); + byte[] bytes = Bytes.toBytes(i); increment.addColumn(bytes, bytes, i); } int index = 0; for (CellScanner cellScanner = increment.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); int value = index; - byte [] bytes = Bytes.toBytes(index++); - KeyValue kv = (KeyValue)cell; + byte[] bytes = Bytes.toBytes(index++); + KeyValue kv = (KeyValue) cell; assertTrue(Bytes.equals(CellUtil.cloneFamily(kv), bytes)); long a = Bytes.toLong(CellUtil.cloneValue(kv)); assertEquals(value, a); @@ -139,16 +139,16 @@ public void testIncrementIteration() throws IOException { @Test public void testResultIteration() throws IOException { - Cell [] cells = new Cell[COUNT]; - for(int i = 0; i < COUNT; i++) { - byte [] bytes = Bytes.toBytes(i); + Cell[] cells = new Cell[COUNT]; + for (int i = 0; i < COUNT; i++) { + byte[] bytes = Bytes.toBytes(i); cells[i] = new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes); } Result r = Result.create(Arrays.asList(cells)); int index = 0; for (CellScanner cellScanner = r.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); - byte [] bytes = Bytes.toBytes(index++); + byte[] bytes = Bytes.toBytes(index++); assertEquals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes), cell); } assertEquals(COUNT, index); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutWithDelete.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutWithDelete.java index b6861568be75..403692c18f74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutWithDelete.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutWithDelete.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestPutWithDelete { @ClassRule @@ -82,13 +82,13 @@ public void testHbasePutDeleteCell() throws Exception { Get get = new Get(rowKey); Result result = table.get(get); assertTrue("Column A value should be a", - Bytes.toString(result.getValue(family, Bytes.toBytes("A"))).equals("a")); + Bytes.toString(result.getValue(family, Bytes.toBytes("A"))).equals("a")); assertTrue("Column B value should be b", - Bytes.toString(result.getValue(family, Bytes.toBytes("B"))).equals("b")); + Bytes.toString(result.getValue(family, Bytes.toBytes("B"))).equals("b")); assertTrue("Column C value should be c", - Bytes.toString(result.getValue(family, Bytes.toBytes("C"))).equals("c")); + Bytes.toString(result.getValue(family, Bytes.toBytes("C"))).equals("c")); assertTrue("Column D value should be d", - Bytes.toString(result.getValue(family, Bytes.toBytes("D"))).equals("d")); + Bytes.toString(result.getValue(family, Bytes.toBytes("D"))).equals("d")); // put the same row again with C column deleted put = new Put(rowKey); put.addColumn(family, Bytes.toBytes("A"), Bytes.toBytes("a1")); @@ -102,17 +102,14 @@ public void testHbasePutDeleteCell() throws Exception { get = new Get(rowKey); result = table.get(get); assertTrue("Column A value should be a1", - Bytes.toString(result.getValue(family, Bytes.toBytes("A"))).equals("a1")); + Bytes.toString(result.getValue(family, Bytes.toBytes("A"))).equals("a1")); assertTrue("Column B value should be b1", - Bytes.toString(result.getValue(family, Bytes.toBytes("B"))).equals("b1")); - assertTrue("Column C should not exist", - result.getValue(family, Bytes.toBytes("C")) == null); + Bytes.toString(result.getValue(family, Bytes.toBytes("B"))).equals("b1")); + assertTrue("Column C should not exist", result.getValue(family, Bytes.toBytes("C")) == null); assertTrue("Column D value should be d1", - Bytes.toString(result.getValue(family, Bytes.toBytes("D"))).equals("d1")); + Bytes.toString(result.getValue(family, Bytes.toBytes("D"))).equals("d1")); } finally { table.close(); } } } - - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.java index 0eb1e1e9a7c4..418486f005fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java index d63dacfc4d89..fe7a16859743 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTablePartialScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTablePartialScan.java index c74d0bf9b365..73b11a5800d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTablePartialScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTablePartialScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ public class TestRawAsyncTablePartialScan { private static byte[] FAMILY = Bytes.toBytes("cf"); private static byte[][] CQS = - new byte[][] { Bytes.toBytes("cq1"), Bytes.toBytes("cq2"), Bytes.toBytes("cq3") }; + new byte[][] { Bytes.toBytes("cq1"), Bytes.toBytes("cq2"), Bytes.toBytes("cq3") }; private static int COUNT = 100; @@ -105,7 +105,7 @@ public void testReversedBatchDoNotAllowPartial() throws InterruptedException, Ex // we set batch to 2 and max result size to 1, then server will only returns one result per call // but we should get 2 + 1 for every row. List results = - TABLE.scanAll(new Scan().setBatch(2).setMaxResultSize(1).setReversed(true)).get(); + TABLE.scanAll(new Scan().setBatch(2).setMaxResultSize(1).setReversed(true)).get(); assertEquals(2 * COUNT, results.size()); for (int i = 0; i < COUNT; i++) { int row = COUNT - i - 1; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java index bfa24661954a..eb4eb56438f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +27,7 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; + import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.trace.data.SpanData; import java.util.ArrayList; @@ -99,65 +100,45 @@ protected List doScan(Scan scan, int closeAfter) throws Exception { @Override protected void assertTraceContinuity() { final String parentSpanName = testName.getMethodName(); - final Matcher parentSpanMatcher = allOf( - hasName(parentSpanName), - hasStatusWithCode(StatusCode.OK), - hasEnded()); + final Matcher parentSpanMatcher = + allOf(hasName(parentSpanName), hasStatusWithCode(StatusCode.OK), hasEnded()); waitForSpan(parentSpanMatcher); - final List spans = otelClassRule.getSpans() - .stream() - .filter(Objects::nonNull) - .collect(Collectors.toList()); + final List spans = + otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList()); if (logger.isDebugEnabled()) { StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans); stringTraceRenderer.render(logger::debug); } - final String parentSpanId = spans.stream() - .filter(parentSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); - - final Matcher scanOperationSpanMatcher = allOf( - hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), - hasParentSpanId(parentSpanId), - hasStatusWithCode(StatusCode.OK), - hasEnded()); + final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); + + final Matcher scanOperationSpanMatcher = + allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), + hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.OK), hasEnded()); assertThat(spans, hasItem(scanOperationSpanMatcher)); - final String scanOperationSpanId = spans.stream() - .filter(scanOperationSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); + final String scanOperationSpanId = spans.stream().filter(scanOperationSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); // RawAsyncTableImpl never invokes the callback to `onScanMetricsCreated` -- bug? final Matcher onScanMetricsCreatedMatcher = - hasName("TracedAdvancedScanResultConsumer#onScanMetricsCreated"); + hasName("TracedAdvancedScanResultConsumer#onScanMetricsCreated"); assertThat(spans, not(hasItem(onScanMetricsCreatedMatcher))); final Matcher onNextMatcher = hasName("TracedAdvancedScanResultConsumer#onNext"); assertThat(spans, hasItem(onNextMatcher)); - spans.stream() - .filter(onNextMatcher::matches) - .forEach(span -> assertThat(span, hasParentSpanId(scanOperationSpanId))); - assertThat(spans, hasItem(allOf( - onNextMatcher, - hasParentSpanId(scanOperationSpanId), - hasStatusWithCode(StatusCode.OK), - hasEnded()))); + spans.stream().filter(onNextMatcher::matches) + .forEach(span -> assertThat(span, hasParentSpanId(scanOperationSpanId))); + assertThat(spans, hasItem(allOf(onNextMatcher, hasParentSpanId(scanOperationSpanId), + hasStatusWithCode(StatusCode.OK), hasEnded()))); final Matcher onCompleteMatcher = - hasName("TracedAdvancedScanResultConsumer#onComplete"); + hasName("TracedAdvancedScanResultConsumer#onComplete"); assertThat(spans, hasItem(onCompleteMatcher)); - spans.stream() - .filter(onCompleteMatcher::matches) - .forEach(span -> assertThat(span, allOf( - onCompleteMatcher, - hasParentSpanId(scanOperationSpanId), - hasStatusWithCode(StatusCode.OK), - hasEnded()))); + spans.stream().filter(onCompleteMatcher::matches) + .forEach(span -> assertThat(span, allOf(onCompleteMatcher, + hasParentSpanId(scanOperationSpanId), hasStatusWithCode(StatusCode.OK), hasEnded()))); } @Override @@ -166,42 +147,28 @@ protected void assertTraceError(Matcher exceptionTypeNameMatcher) { final Matcher parentSpanMatcher = allOf(hasName(parentSpanName), hasEnded()); waitForSpan(parentSpanMatcher); - final List spans = otelClassRule.getSpans() - .stream() - .filter(Objects::nonNull) - .collect(Collectors.toList()); + final List spans = + otelClassRule.getSpans().stream().filter(Objects::nonNull).collect(Collectors.toList()); if (logger.isDebugEnabled()) { StringTraceRenderer stringTraceRenderer = new StringTraceRenderer(spans); stringTraceRenderer.render(logger::debug); } - final String parentSpanId = spans.stream() - .filter(parentSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); - - final Matcher scanOperationSpanMatcher = allOf( - hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), - hasParentSpanId(parentSpanId), - hasStatusWithCode(StatusCode.ERROR), - hasExceptionWithType(exceptionTypeNameMatcher), - hasEnded()); + final String parentSpanId = spans.stream().filter(parentSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); + + final Matcher scanOperationSpanMatcher = + allOf(hasName(startsWith("SCAN " + TABLE_NAME.getNameWithNamespaceInclAsString())), + hasParentSpanId(parentSpanId), hasStatusWithCode(StatusCode.ERROR), + hasExceptionWithType(exceptionTypeNameMatcher), hasEnded()); assertThat(spans, hasItem(scanOperationSpanMatcher)); - final String scanOperationSpanId = spans.stream() - .filter(scanOperationSpanMatcher::matches) - .map(SpanData::getSpanId) - .findAny() - .orElseThrow(AssertionError::new); + final String scanOperationSpanId = spans.stream().filter(scanOperationSpanMatcher::matches) + .map(SpanData::getSpanId).findAny().orElseThrow(AssertionError::new); final Matcher onCompleteMatcher = hasName("TracedAdvancedScanResultConsumer#onError"); assertThat(spans, hasItem(onCompleteMatcher)); - spans.stream() - .filter(onCompleteMatcher::matches) - .forEach(span -> assertThat(span, allOf( - onCompleteMatcher, - hasParentSpanId(scanOperationSpanId), - hasStatusWithCode(StatusCode.OK), - hasEnded()))); + spans.stream().filter(onCompleteMatcher::matches) + .forEach(span -> assertThat(span, allOf(onCompleteMatcher, + hasParentSpanId(scanOperationSpanId), hasStatusWithCode(StatusCode.OK), hasEnded()))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java index 35e0a2d4d88b..40c003e94b95 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocationCaching.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestRegionLocationCaching { @ClassRule @@ -104,7 +104,7 @@ private void checkRegionLocationIsCached(final TableName tableName, final Connec throws InterruptedException, IOException { for (int count = 0; count < 50; count++) { int number = ((AsyncConnectionImpl) conn.toAsyncConnection()).getLocator() - .getNumberOfCachedRegionLocations(tableName); + .getNumberOfCachedRegionLocations(tableName); assertNotEquals("Expected non-zero number of cached region locations", 0, number); Thread.sleep(100); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocator.java index 081d8489b265..01861eaf7522 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestRegionLocator extends AbstractTestRegionLocator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionLocator.class); + HBaseClassTestRule.forClass(TestRegionLocator.class); @BeforeClass public static void setUp() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index 391eed35b065..2d1da58ce7a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -60,7 +60,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestReplicaWithCluster { @ClassRule @@ -96,8 +96,8 @@ public Optional getRegionObserver() { } @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { + public void preGetOp(final ObserverContext e, final Get get, + final List results) throws IOException { if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) { CountDownLatch latch = cdl.get(); @@ -136,16 +136,16 @@ public Optional getRegionObserver() { } @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { + public void preGetOp(final ObserverContext e, final Get get, + final List results) throws IOException { int replicaId = e.getEnvironment().getRegion().getRegionInfo().getReplicaId(); // Fail for the primary replica and replica 1 if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() <= 1) { LOG.info("Throw Region Server Stopped Exceptoin for replica id " + replicaId); - throw new RegionServerStoppedException("Server " + e.getEnvironment().getServerName() - + " not running"); + throw new RegionServerStoppedException( + "Server " + e.getEnvironment().getServerName() + " not running"); } else { LOG.info("We're replica region " + replicaId); } @@ -158,8 +158,8 @@ public void preScannerOpen(final ObserverContext e // Fail for the primary replica and replica 1 if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() <= 1) { LOG.info("Throw Region Server Stopped Exceptoin for replica id " + replicaId); - throw new RegionServerStoppedException("Server " + e.getEnvironment().getServerName() - + " not running"); + throw new RegionServerStoppedException( + "Server " + e.getEnvironment().getServerName() + " not running"); } else { LOG.info("We're replica region " + replicaId); } @@ -180,18 +180,18 @@ public Optional getRegionObserver() { } @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { + public void preGetOp(final ObserverContext e, final Get get, + final List results) throws IOException { int replicaId = e.getEnvironment().getRegion().getRegionInfo().getReplicaId(); // Fail for the primary replica, but not for meta if (throwException) { if (!e.getEnvironment().getRegion().getRegionInfo().isMetaRegion() && (replicaId == 0)) { - LOG.info("Get, throw Region Server Stopped Exceptoin for region " + e.getEnvironment() - .getRegion().getRegionInfo()); - throw new RegionServerStoppedException("Server " + e.getEnvironment().getServerName() - + " not running"); + LOG.info("Get, throw Region Server Stopped Exceptoin for region " + + e.getEnvironment().getRegion().getRegionInfo()); + throw new RegionServerStoppedException( + "Server " + e.getEnvironment().getServerName() + " not running"); } } else { LOG.info("Get, We're replica region " + replicaId); @@ -217,11 +217,11 @@ public void preScannerOpen(final ObserverContext e // Fail for the primary replica if (throwException) { - LOG.info("Scan, throw Region Server Stopped Exceptoin for replica " + e.getEnvironment() - .getRegion().getRegionInfo()); + LOG.info("Scan, throw Region Server Stopped Exceptoin for replica " + + e.getEnvironment().getRegion().getRegionInfo()); - throw new RegionServerStoppedException("Server " + e.getEnvironment().getServerName() - + " not running"); + throw new RegionServerStoppedException( + "Server " + e.getEnvironment().getServerName() + " not running"); } else { LOG.info("Scan, We're replica region " + replicaId); } @@ -235,7 +235,7 @@ public void preScannerOpen(final ObserverContext e public static void beforeClass() throws Exception { // enable store file refreshing HTU.getConfiguration().setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, - REFRESH_PERIOD); + REFRESH_PERIOD); HTU.getConfiguration().setFloat("hbase.regionserver.logroll.multiplier", 0.0001f); HTU.getConfiguration().setInt("replication.source.size.capacity", 10240); @@ -254,10 +254,10 @@ public static void beforeClass() throws Exception { // Set system coprocessor so it can be applied to meta regions HTU.getConfiguration().set("hbase.coprocessor.region.classes", - RegionServerHostingPrimayMetaRegionSlowOrStopCopro.class.getName()); + RegionServerHostingPrimayMetaRegionSlowOrStopCopro.class.getName()); HTU.getConfiguration().setInt(HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, - META_SCAN_TIMEOUT_IN_MILLISEC * 1000); + META_SCAN_TIMEOUT_IN_MILLISEC * 1000); HTU.startMiniCluster(NB_SERVERS); // Enable meta replica at server side @@ -268,8 +268,7 @@ public static void beforeClass() throws Exception { @AfterClass public static void afterClass() throws Exception { - if (HTU2 != null) - HTU2.shutdownMiniCluster(); + if (HTU2 != null) HTU2.shutdownMiniCluster(); HTU.shutdownMiniCluster(); } @@ -277,9 +276,9 @@ public static void afterClass() throws Exception { public void testCreateDeleteTable() throws IOException { // Create table then get the single region for our new table. TableDescriptorBuilder builder = - HTU.createModifyableTableDescriptor(TableName.valueOf("testCreateDeleteTable"), - ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, - ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); + HTU.createModifyableTableDescriptor(TableName.valueOf("testCreateDeleteTable"), + ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, + ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); builder.setRegionReplication(NB_SERVERS); builder.setCoprocessor(SlowMeCopro.class.getName()); TableDescriptor hdt = builder.build(); @@ -313,10 +312,8 @@ public void testCreateDeleteTable() throws IOException { @Test public void testChangeTable() throws Exception { TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf("testChangeTable")) - .setRegionReplication(NB_SERVERS) - .setCoprocessor(SlowMeCopro.class.getName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(f)) - .build(); + .setRegionReplication(NB_SERVERS).setCoprocessor(SlowMeCopro.class.getName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(f)).build(); HTU.getAdmin().createTable(td); Table table = HTU.getConnection().getTable(td.getTableName()); // basic test: it should work. @@ -331,14 +328,13 @@ public void testChangeTable() throws Exception { // Add a CF, it should work. TableDescriptor bHdt = HTU.getAdmin().getDescriptor(td.getTableName()); td = TableDescriptorBuilder.newBuilder(td) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(row)) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(row)).build(); HTU.getAdmin().disableTable(td.getTableName()); HTU.getAdmin().modifyTable(td); HTU.getAdmin().enableTable(td.getTableName()); TableDescriptor nHdt = HTU.getAdmin().getDescriptor(td.getTableName()); Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()), - bHdt.getColumnFamilyCount() + 1, nHdt.getColumnFamilyCount()); + bHdt.getColumnFamilyCount() + 1, nHdt.getColumnFamilyCount()); p = new Put(row); p.addColumn(row, row, row); @@ -360,9 +356,9 @@ public void testChangeTable() throws Exception { } Admin admin = HTU.getAdmin(); - nHdt =admin.getDescriptor(td.getTableName()); + nHdt = admin.getDescriptor(td.getTableName()); Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()), - bHdt.getColumnFamilyCount() + 1, nHdt.getColumnFamilyCount()); + bHdt.getColumnFamilyCount() + 1, nHdt.getColumnFamilyCount()); admin.disableTable(td.getTableName()); admin.deleteTable(td.getTableName()); @@ -373,10 +369,10 @@ public void testChangeTable() throws Exception { @Test public void testReplicaAndReplication() throws Exception { TableDescriptorBuilder builder = - HTU.createModifyableTableDescriptor("testReplicaAndReplication"); + HTU.createModifyableTableDescriptor("testReplicaAndReplication"); builder.setRegionReplication(NB_SERVERS); builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(row) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); builder.setCoprocessor(SlowMeCopro.class.getName()); TableDescriptor tableDescriptor = builder.build(); @@ -394,9 +390,9 @@ public void testReplicaAndReplication() throws Exception { HTU2.getAdmin().createTable(tableDescriptor, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE); try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); - Admin admin = connection.getAdmin()) { - ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(HTU2.getClusterKey()).build(); + Admin admin = connection.getAdmin()) { + ReplicationPeerConfig rpc = + ReplicationPeerConfig.newBuilder().setClusterKey(HTU2.getClusterKey()).build(); admin.addReplicationPeer("2", rpc); } @@ -409,7 +405,8 @@ public void testReplicaAndReplication() throws Exception { LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster."); Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate() { - @Override public boolean evaluate() throws Exception { + @Override + public boolean evaluate() throws Exception { try { SlowMeCopro.cdl.set(new CountDownLatch(1)); Get g = new Get(row); @@ -428,7 +425,8 @@ public void testReplicaAndReplication() throws Exception { final Table table2 = HTU.getConnection().getTable(tableDescriptor.getTableName()); Waiter.waitFor(HTU.getConfiguration(), 1000, new Waiter.Predicate() { - @Override public boolean evaluate() throws Exception { + @Override + public boolean evaluate() throws Exception { try { SlowMeCopro.cdl.set(new CountDownLatch(1)); Get g = new Get(row); @@ -471,7 +469,7 @@ public void testBulkLoad() throws IOException { Path dir = HTU.getDataTestDirOnTestFS("testBulkLoad"); final int numRows = 10; final byte[] qual = Bytes.toBytes("qual"); - final byte[] val = Bytes.toBytes("val"); + final byte[] val = Bytes.toBytes("val"); Map> family2Files = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (ColumnFamilyDescriptor col : hdt.getColumnFamilies()) { Path hfile = new Path(dir, col.getNameAsString()); @@ -518,9 +516,9 @@ public void testBulkLoad() throws IOException { public void testReplicaGetWithPrimaryDown() throws IOException { // Create table then get the single region for our new table. TableDescriptorBuilder builder = - HTU.createModifyableTableDescriptor(TableName.valueOf("testCreateDeleteTable"), - ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, - ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); + HTU.createModifyableTableDescriptor(TableName.valueOf("testCreateDeleteTable"), + ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, + ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); builder.setRegionReplication(NB_SERVERS); builder.setCoprocessor(RegionServerStoppedCopro.class.getName()); TableDescriptor hdt = builder.build(); @@ -556,9 +554,9 @@ public void testReplicaGetWithPrimaryDown() throws IOException { public void testReplicaScanWithPrimaryDown() throws IOException { // Create table then get the single region for our new table. TableDescriptorBuilder builder = - HTU.createModifyableTableDescriptor(TableName.valueOf("testCreateDeleteTable"), - ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, - ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); + HTU.createModifyableTableDescriptor(TableName.valueOf("testCreateDeleteTable"), + ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, + ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); builder.setRegionReplication(NB_SERVERS); builder.setCoprocessor(RegionServerStoppedCopro.class.getName()); TableDescriptor hdt = builder.build(); @@ -605,10 +603,10 @@ public void testReplicaGetWithAsyncRpcClientImpl() throws IOException { HTU.getConfiguration().set("hbase.rpc.client.impl", "org.apache.hadoop.hbase.ipc.AsyncRpcClient"); // Create table then get the single region for our new table. - TableDescriptorBuilder builder = - HTU.createModifyableTableDescriptor(TableName.valueOf("testReplicaGetWithAsyncRpcClientImpl"), - ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, - ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); + TableDescriptorBuilder builder = HTU.createModifyableTableDescriptor( + TableName.valueOf("testReplicaGetWithAsyncRpcClientImpl"), + ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, + ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); builder.setRegionReplication(NB_SERVERS); builder.setCoprocessor(SlowMeCopro.class.getName()); TableDescriptor hdt = builder.build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java index 17e8121537ac..deec7fa4080d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,10 +65,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; /** - * Tests for region replicas. Sad that we cannot isolate these without bringing up a whole - * cluster. See {@link org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster}. + * Tests for region replicas. Sad that we cannot isolate these without bringing up a whole cluster. + * See {@link org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster}. */ -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) @SuppressWarnings("deprecation") public class TestReplicasClient { @@ -101,6 +101,7 @@ public static class SlowMeCopro implements RegionCoprocessor, RegionObserver { new AtomicReference<>(new CountDownLatch(0)); private static final AtomicReference secondaryCdl = new AtomicReference<>(new CountDownLatch(0)); + public SlowMeCopro() { } @@ -110,8 +111,8 @@ public Optional getRegionObserver() { } @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { + public void preGetOp(final ObserverContext e, final Get get, + final List results) throws IOException { slowdownCode(e); } @@ -123,12 +124,12 @@ public void preScannerOpen(final ObserverContext e @Override public boolean preScannerNext(final ObserverContext e, - final InternalScanner s, final List results, - final int limit, final boolean hasMore) throws IOException { - //this will slow down a certain next operation if the conditions are met. The slowness - //will allow the call to go to a replica + final InternalScanner s, final List results, final int limit, final boolean hasMore) + throws IOException { + // this will slow down a certain next operation if the conditions are met. The slowness + // will allow the call to go to a replica if (slowDownNext.get()) { - //have some "next" return successfully from the primary; hence countOfNext checked + // have some "next" return successfully from the primary; hence countOfNext checked if (countOfNext.incrementAndGet() == 2) { sleepTime.set(2000); slowdownCode(e); @@ -184,12 +185,12 @@ public static AtomicReference getSecondaryCdl() { @BeforeClass public static void beforeClass() throws Exception { // enable store file refreshing - HTU.getConfiguration().setInt( - StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, REFRESH_PERIOD); + HTU.getConfiguration().setInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, + REFRESH_PERIOD); HTU.getConfiguration().setBoolean("hbase.client.log.scanner.activity", true); HTU.getConfiguration().setBoolean(MetricsConnection.CLIENT_SIDE_METRICS_ENABLED_KEY, true); - StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(1). - numAlwaysStandByMasters(1).numMasters(1).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(1) + .numAlwaysStandByMasters(1).numMasters(1).build(); HTU.startMiniCluster(option); // Create table then get the single region for our new table. @@ -199,7 +200,7 @@ public static void beforeClass() throws Exception { ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); builder.setCoprocessor(SlowMeCopro.class.getName()); TableDescriptor hdt = builder.build(); - HTU.createTable(hdt, new byte[][]{f}, null); + HTU.createTable(hdt, new byte[][] { f }, null); TABLE_NAME = hdt.getTableName(); try (RegionLocator locator = HTU.getConnection().getRegionLocator(hdt.getTableName())) { hriPrimary = locator.getRegionLocation(row, false).getRegion(); @@ -256,22 +257,23 @@ private HRegionServer getRS() { private void openRegion(RegionInfo hri) throws Exception { try { if (isRegionOpened(hri)) return; - } catch (Exception e){} + } catch (Exception e) { + } // first version is '0' - AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest( - getRS().getServerName(), hri, null); + AdminProtos.OpenRegionRequest orr = + RequestConverter.buildOpenRegionRequest(getRS().getServerName(), hri, null); AdminProtos.OpenRegionResponse responseOpen = getRS().getRSRpcServices().openRegion(null, orr); Assert.assertEquals(1, responseOpen.getOpeningStateCount()); Assert.assertEquals(AdminProtos.OpenRegionResponse.RegionOpeningState.OPENED, - responseOpen.getOpeningState(0)); + responseOpen.getOpeningState(0)); checkRegionIsOpened(hri); } private void closeRegion(RegionInfo hri) throws Exception { - AdminProtos.CloseRegionRequest crr = ProtobufUtil.buildCloseRegionRequest( - getRS().getServerName(), hri.getRegionName()); - AdminProtos.CloseRegionResponse responseClose = getRS() - .getRSRpcServices().closeRegion(null, crr); + AdminProtos.CloseRegionRequest crr = + ProtobufUtil.buildCloseRegionRequest(getRS().getServerName(), hri.getRegionName()); + AdminProtos.CloseRegionResponse responseClose = + getRS().getRSRpcServices().closeRegion(null, crr); Assert.assertTrue(responseClose.getClosed()); checkRegionIsClosed(hri.getEncodedName()); @@ -360,7 +362,6 @@ public void testGetNoResultNoStaleRegionWithReplica() throws Exception { } } - @Test public void testGetNoResultStaleRegionWithReplica() throws Exception { byte[] b1 = Bytes.toBytes("testGetNoResultStaleRegionWithReplica"); @@ -566,7 +567,7 @@ public void testHedgedRead() throws Exception { Assert.assertFalse(r.getColumnCells(f, b1).isEmpty()); LOG.info("get works and is not stale done"); - //reset + // reset AsyncConnectionImpl conn = (AsyncConnectionImpl) HTU.getConnection().toAsyncConnection(); Counter hedgedReadOps = conn.getConnectionMetrics().get().hedgedReadOps; Counter hedgedReadWin = conn.getConnectionMetrics().get().hedgedReadWin; @@ -591,7 +592,6 @@ public void testHedgedRead() throws Exception { SlowMeCopro.getSecondaryCdl().get().countDown(); LOG.info("hedged read occurred but not faster"); - // But if we ask for stale we will get it and hedged read returned faster SlowMeCopro.getPrimaryCdl().set(new CountDownLatch(1)); g = new Get(b1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestTooBigException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestTooBigException.java index 1ffdf1fae539..8a309ab7245f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestTooBigException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRequestTooBigException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,6 +32,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ MediumTests.class, ClientTests.class }) @@ -39,7 +40,7 @@ public class TestRequestTooBigException { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRequestTooBigException.class); + HBaseClassTestRule.forClass(TestRequestTooBigException.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterSplittingRegions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterSplittingRegions.java index 499bfc6843c1..5b35b5d9f3f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterSplittingRegions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterSplittingRegions.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestRestoreSnapshotFromClientAfterSplittingRegions @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientAfterSplittingRegions.class); + HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientAfterSplittingRegions.class); @Parameter public int numReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterTruncate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterTruncate.java index 6ce0fb3622d2..0c0b48e7dbe1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterTruncate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterTruncate.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestRestoreSnapshotFromClientAfterTruncate @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientAfterTruncate.class); + HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientAfterTruncate.class); @Parameter public int numReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientClone.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientClone.java index e9704584cf1a..455244078c5a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientClone.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientClone.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestRestoreSnapshotFromClientClone extends RestoreSnapshotFromClien @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientClone.class); + HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientClone.class); @Parameter public int numReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientGetCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientGetCompactionState.java index 85eba79d1326..7d8e9355d31a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientGetCompactionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientGetCompactionState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestRestoreSnapshotFromClientGetCompactionState @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientGetCompactionState.class); + HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientGetCompactionState.class); @Parameter public int numReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientSchemaChange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientSchemaChange.java index 0dd9b7b879ce..1d2b4e0d1b1b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientSchemaChange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientSchemaChange.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestRestoreSnapshotFromClientSchemaChange @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientSchemaChange.class); + HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientSchemaChange.class); @Parameter public int numReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientSimple.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientSimple.java index f498e72f1f69..0924cfdbc3bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientSimple.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientSimple.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestRestoreSnapshotFromClientSimple extends RestoreSnapshotFromClie @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientSimple.class); + HBaseClassTestRule.forClass(TestRestoreSnapshotFromClientSimple.class); @Parameter public int numReplicas; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java index 5d9566b6344d..3fc9ba59c0c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,37 +56,32 @@ public class TestResult { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestResult.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestResult.class); private static final Logger LOG = LoggerFactory.getLogger(TestResult.class.getName()); - static KeyValue[] genKVs(final byte[] row, final byte[] family, - final byte[] value, - final long timestamp, - final int cols) { - KeyValue [] kvs = new KeyValue[cols]; + static KeyValue[] genKVs(final byte[] row, final byte[] family, final byte[] value, + final long timestamp, final int cols) { + KeyValue[] kvs = new KeyValue[cols]; - for (int i = 0; i < cols ; i++) { - kvs[i] = new KeyValue( - row, family, Bytes.toBytes(i), - timestamp, + for (int i = 0; i < cols; i++) { + kvs[i] = new KeyValue(row, family, Bytes.toBytes(i), timestamp, Bytes.add(value, Bytes.toBytes(i))); } return kvs; } - static final byte [] row = Bytes.toBytes("row"); - static final byte [] family = Bytes.toBytes("family"); - static final byte [] value = Bytes.toBytes("value"); - static final byte [] qual = Bytes.toBytes("qual"); + static final byte[] row = Bytes.toBytes("row"); + static final byte[] family = Bytes.toBytes("family"); + static final byte[] value = Bytes.toBytes("value"); + static final byte[] qual = Bytes.toBytes("qual"); /** * Run some tests to ensure Result acts like a proper CellScanner. */ @Test public void testResultAsCellScanner() throws IOException { - Cell [] cells = genKVs(row, family, value, 1, 10); + Cell[] cells = genKVs(row, family, value, 1, 10); Arrays.sort(cells, CellComparator.getInstance()); Result r = Result.create(cells); assertSame(r, cells); @@ -97,7 +92,7 @@ public void testResultAsCellScanner() throws IOException { assertTrue(r == r.cellScanner()); } - private void assertSame(final CellScanner cellScanner, final Cell [] cells) throws IOException { + private void assertSame(final CellScanner cellScanner, final Cell[] cells) throws IOException { int count = 0; while (cellScanner.advance()) { assertTrue(cells[count].equals(cellScanner.current())); @@ -108,7 +103,7 @@ private void assertSame(final CellScanner cellScanner, final Cell [] cells) thro @Test public void testBasicGetColumn() throws Exception { - KeyValue [] kvs = genKVs(row, family, value, 1, 100); + KeyValue[] kvs = genKVs(row, family, value, 1, 100); Arrays.sort(kvs, CellComparator.getInstance()); @@ -143,10 +138,10 @@ public void testAdvanceMultipleOnEmptyCell() throws IOException { @Test public void testMultiVersionGetColumn() throws Exception { - KeyValue [] kvs1 = genKVs(row, family, value, 1, 100); - KeyValue [] kvs2 = genKVs(row, family, value, 200, 100); + KeyValue[] kvs1 = genKVs(row, family, value, 1, 100); + KeyValue[] kvs2 = genKVs(row, family, value, 200, 100); - KeyValue [] kvs = new KeyValue[kvs1.length+kvs2.length]; + KeyValue[] kvs = new KeyValue[kvs1.length + kvs2.length]; System.arraycopy(kvs1, 0, kvs, 0, kvs1.length); System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length); @@ -166,7 +161,7 @@ public void testMultiVersionGetColumn() throws Exception { @Test public void testBasicGetValue() throws Exception { - KeyValue [] kvs = genKVs(row, family, value, 1, 100); + KeyValue[] kvs = genKVs(row, family, value, 1, 100); Arrays.sort(kvs, CellComparator.getInstance()); @@ -182,10 +177,10 @@ public void testBasicGetValue() throws Exception { @Test public void testMultiVersionGetValue() throws Exception { - KeyValue [] kvs1 = genKVs(row, family, value, 1, 100); - KeyValue [] kvs2 = genKVs(row, family, value, 200, 100); + KeyValue[] kvs1 = genKVs(row, family, value, 1, 100); + KeyValue[] kvs2 = genKVs(row, family, value, 200, 100); - KeyValue [] kvs = new KeyValue[kvs1.length+kvs2.length]; + KeyValue[] kvs = new KeyValue[kvs1.length + kvs2.length]; System.arraycopy(kvs1, 0, kvs, 0, kvs1.length); System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length); @@ -202,7 +197,7 @@ public void testMultiVersionGetValue() throws Exception { @Test public void testBasicLoadValue() throws Exception { - KeyValue [] kvs = genKVs(row, family, value, 1, 100); + KeyValue[] kvs = genKVs(row, family, value, 1, 100); Arrays.sort(kvs, CellComparator.getInstance()); @@ -217,16 +212,16 @@ public void testBasicLoadValue() throws Exception { loadValueBuffer.flip(); assertEquals(loadValueBuffer, ByteBuffer.wrap(Bytes.add(value, Bytes.toBytes(i)))); assertEquals(ByteBuffer.wrap(Bytes.add(value, Bytes.toBytes(i))), - r.getValueAsByteBuffer(family, qf)); + r.getValueAsByteBuffer(family, qf)); } } @Test public void testMultiVersionLoadValue() throws Exception { - KeyValue [] kvs1 = genKVs(row, family, value, 1, 100); - KeyValue [] kvs2 = genKVs(row, family, value, 200, 100); + KeyValue[] kvs1 = genKVs(row, family, value, 1, 100); + KeyValue[] kvs2 = genKVs(row, family, value, 200, 100); - KeyValue [] kvs = new KeyValue[kvs1.length+kvs2.length]; + KeyValue[] kvs = new KeyValue[kvs1.length + kvs2.length]; System.arraycopy(kvs1, 0, kvs, 0, kvs1.length); System.arraycopy(kvs2, 0, kvs, kvs1.length, kvs2.length); @@ -243,7 +238,7 @@ public void testMultiVersionLoadValue() throws Exception { loadValueBuffer.flip(); assertEquals(loadValueBuffer, ByteBuffer.wrap(Bytes.add(value, Bytes.toBytes(i)))); assertEquals(ByteBuffer.wrap(Bytes.add(value, Bytes.toBytes(i))), - r.getValueAsByteBuffer(family, qf)); + r.getValueAsByteBuffer(family, qf)); } } @@ -252,14 +247,14 @@ public void testMultiVersionLoadValue() throws Exception { */ @Test public void testCompareResults() throws Exception { - byte [] value1 = Bytes.toBytes("value1"); - byte [] qual = Bytes.toBytes("qual"); + byte[] value1 = Bytes.toBytes("value1"); + byte[] qual = Bytes.toBytes("qual"); KeyValue kv1 = new KeyValue(row, family, qual, value); KeyValue kv2 = new KeyValue(row, family, qual, value1); - Result r1 = Result.create(new KeyValue[] {kv1}); - Result r2 = Result.create(new KeyValue[] {kv2}); + Result r1 = Result.create(new KeyValue[] { kv1 }); + Result r2 = Result.create(new KeyValue[] { kv2 }); // no exception thrown Result.compareResults(r1, r1); try { @@ -422,9 +417,8 @@ private Result getArrayBackedTagResult(Tag tag) { if (tag != null) { tags = Arrays.asList(tag); } - KeyValue kvCell = new KeyValue(row, family, qual, 0L, KeyValue.Type.Put, - value, tags); - return Result.create(new Cell[] {kvCell}); + KeyValue kvCell = new KeyValue(row, family, qual, 0L, KeyValue.Type.Put, value, tags); + return Result.create(new Cell[] { kvCell }); } private Result getByteBufferBackedTagResult(Tag tag) { @@ -432,13 +426,13 @@ private Result getByteBufferBackedTagResult(Tag tag) { if (tag != null) { tags = Arrays.asList(tag); } - KeyValue kvCell = new KeyValue(row, family, qual, 0L, KeyValue.Type.Put, - value, tags); + KeyValue kvCell = new KeyValue(row, family, qual, 0L, KeyValue.Type.Put, value, tags); ByteBuffer buf = ByteBuffer.allocateDirect(kvCell.getBuffer().length); ByteBufferUtils.copyFromArrayToBuffer(buf, kvCell.getBuffer(), 0, kvCell.getBuffer().length); ByteBufferKeyValue bbKV = new ByteBufferKeyValue(buf, 0, buf.capacity(), 0L); - return Result.create(new Cell[] {bbKV}); + return Result.create(new Cell[] { bbKV }); } + /** * Verifies that one can't modify instance of EMPTY_RESULT. */ @@ -463,7 +457,6 @@ public void testEmptyResultIsReadonly() { /** * Microbenchmark that compares {@link Result#getValue} and {@link Result#loadValue} performance. - * * @throws Exception */ public void doReadBenchmark() throws Exception { @@ -473,16 +466,16 @@ public void doReadBenchmark() throws Exception { StringBuilder valueSB = new StringBuilder(); for (int i = 0; i < 100; i++) { - valueSB.append((byte)(Math.random() * 10)); + valueSB.append((byte) (Math.random() * 10)); } StringBuilder rowSB = new StringBuilder(); for (int i = 0; i < 50; i++) { - rowSB.append((byte)(Math.random() * 10)); + rowSB.append((byte) (Math.random() * 10)); } - KeyValue [] kvs = genKVs(Bytes.toBytes(rowSB.toString()), family, - Bytes.toBytes(valueSB.toString()), 1, n); + KeyValue[] kvs = + genKVs(Bytes.toBytes(rowSB.toString()), family, Bytes.toBytes(valueSB.toString()), 1, n); Arrays.sort(kvs, CellComparator.getInstance()); ByteBuffer loadValueBuffer = ByteBuffer.allocate(1024); Result r = Result.create(kvs); @@ -527,7 +520,6 @@ public void doReadBenchmark() throws Exception { /** * Calls non-functional test methods. - * * @param args */ public static void main(String[] args) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java index 42520a94d87b..8c57b9183f15 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestResultFromCoprocessor { @ClassRule @@ -58,21 +58,17 @@ public class TestResultFromCoprocessor { private static final byte[] VALUE = Bytes.toBytes(100L); private static final byte[] FIXED_VALUE = Bytes.toBytes("fixed_value"); private static final Cell FIXED_CELL = ExtendedCellBuilderFactory - .create(CellBuilderType.DEEP_COPY) - .setRow(ROW).setFamily(FAMILY) - .setQualifier(QUAL).setTimestamp(0) - .setType(KeyValue.Type.Put.getCode()) - .setValue(FIXED_VALUE) - .build(); + .create(CellBuilderType.DEEP_COPY).setRow(ROW).setFamily(FAMILY).setQualifier(QUAL) + .setTimestamp(0).setType(KeyValue.Type.Put.getCode()).setValue(FIXED_VALUE).build(); private static final Result FIXED_RESULT = Result.create(Arrays.asList(FIXED_CELL)); private static final TableName TABLE_NAME = TableName.valueOf("TestResultFromCoprocessor"); + @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); - TableDescriptor desc = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setCoprocessor(MyObserver.class.getName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TABLE_NAME).setCoprocessor(MyObserver.class.getName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); TEST_UTIL.getAdmin().createTable(desc); } @@ -124,13 +120,13 @@ public Optional getRegionObserver() { @Override public Result postAppend(final ObserverContext c, - final Append append, final Result result) { + final Append append, final Result result) { return FIXED_RESULT; } @Override public Result postIncrement(final ObserverContext c, - final Increment increment, final Result result) { + final Increment increment, final Result result) { return FIXED_RESULT; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultScannerCursor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultScannerCursor.java index 05a4b20eb1ba..10429402ebbc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultScannerCursor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultScannerCursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java index b16c7b6f4a1a..da60f1455fb6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -70,11 +71,11 @@ public static void tearDownAfterClass() throws Exception { @Test public void testResultSizeEstimation() throws Exception { - byte [] ROW1 = Bytes.toBytes("testRow1"); - byte [] ROW2 = Bytes.toBytes("testRow2"); - byte [] FAMILY = Bytes.toBytes("testFamily"); - byte [] QUALIFIER = Bytes.toBytes("testQualifier"); - byte [] VALUE = Bytes.toBytes("testValue"); + byte[] ROW1 = Bytes.toBytes("testRow1"); + byte[] ROW2 = Bytes.toBytes("testRow2"); + byte[] FAMILY = Bytes.toBytes("testFamily"); + byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + byte[] VALUE = Bytes.toBytes("testValue"); final TableName tableName = TableName.valueOf(name.getMethodName()); byte[][] FAMILIES = new byte[][] { FAMILY }; @@ -90,7 +91,7 @@ public void testResultSizeEstimation() throws Exception { s.setMaxResultSize(SCANNER_DATA_LIMIT); ResultScanner rs = table.getScanner(s); int count = 0; - while(rs.next() != null) { + while (rs.next() != null) { count++; } assertEquals("Result size estimation did not work properly", 2, count); @@ -100,29 +101,29 @@ public void testResultSizeEstimation() throws Exception { @Test public void testResultSizeEstimationWithTags() throws Exception { - byte [] ROW1 = Bytes.toBytes("testRow1"); - byte [] ROW2 = Bytes.toBytes("testRow2"); - byte [] FAMILY = Bytes.toBytes("testFamily"); - byte [] QUALIFIER = Bytes.toBytes("testQualifier"); - byte [] VALUE = Bytes.toBytes("testValue"); + byte[] ROW1 = Bytes.toBytes("testRow1"); + byte[] ROW2 = Bytes.toBytes("testRow2"); + byte[] FAMILY = Bytes.toBytes("testFamily"); + byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + byte[] VALUE = Bytes.toBytes("testValue"); final TableName tableName = TableName.valueOf(name.getMethodName()); byte[][] FAMILIES = new byte[][] { FAMILY }; Table table = TEST_UTIL.createTable(tableName, FAMILIES); Put p = new Put(ROW1); p.add(new KeyValue(ROW1, FAMILY, QUALIFIER, Long.MAX_VALUE, VALUE, - new Tag[] { new ArrayBackedTag((byte)1, new byte[TAG_DATA_SIZE]) })); + new Tag[] { new ArrayBackedTag((byte) 1, new byte[TAG_DATA_SIZE]) })); table.put(p); p = new Put(ROW2); p.add(new KeyValue(ROW2, FAMILY, QUALIFIER, Long.MAX_VALUE, VALUE, - new Tag[] { new ArrayBackedTag((byte)1, new byte[TAG_DATA_SIZE]) })); + new Tag[] { new ArrayBackedTag((byte) 1, new byte[TAG_DATA_SIZE]) })); table.put(p); Scan s = new Scan(); s.setMaxResultSize(SCANNER_DATA_LIMIT); ResultScanner rs = table.getScanner(s); int count = 0; - while(rs.next() != null) { + while (rs.next() != null) { count++; } assertEquals("Result size estimation did not work properly", 2, count); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java index 452d7fb89205..3a7fa45090ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestRpcConnectionRegistry { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRpcConnectionRegistry.class); + HBaseClassTestRule.forClass(TestRpcConnectionRegistry.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -84,11 +84,11 @@ public void tearDown() throws IOException { private void setMaxNodeCount(int count) { UTIL.getMiniHBaseCluster().getMasterThreads().stream() - .map(t -> t.getMaster().getConfiguration()) - .forEach(conf -> conf.setInt(RSRpcServices.CLIENT_BOOTSTRAP_NODE_LIMIT, count)); + .map(t -> t.getMaster().getConfiguration()) + .forEach(conf -> conf.setInt(RSRpcServices.CLIENT_BOOTSTRAP_NODE_LIMIT, count)); UTIL.getMiniHBaseCluster().getRegionServerThreads().stream() - .map(t -> t.getRegionServer().getConfiguration()) - .forEach(conf -> conf.setInt(RSRpcServices.CLIENT_BOOTSTRAP_NODE_LIMIT, count)); + .map(t -> t.getRegionServer().getConfiguration()) + .forEach(conf -> conf.setInt(RSRpcServices.CLIENT_BOOTSTRAP_NODE_LIMIT, count)); } @Test @@ -106,7 +106,7 @@ public void testRegistryRPCs() throws Exception { assertEquals(registry.getClusterId().get(), activeMaster.getClusterId()); assertEquals(registry.getActiveMaster().get(), activeMaster.getServerName()); List metaLocations = - Arrays.asList(registry.getMetaRegionLocations().get().getRegionLocations()); + Arrays.asList(registry.getMetaRegionLocations().get().getRegionLocations()); List actualMetaLocations = activeMaster.getMetaLocations(); Collections.sort(metaLocations); Collections.sort(actualMetaLocations); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java index f5f16e4a16a7..247c6d3e76fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScanWithoutFetchingData.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -80,8 +80,8 @@ public static void setUp() throws Exception { } } HRI = UTIL.getAdmin().getRegions(TABLE_NAME).get(0); - CONN = - (AsyncConnectionImpl) ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get(); + CONN = (AsyncConnectionImpl) ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()) + .get(); STUB = CONN.getRegionServerStub(UTIL.getHBaseCluster().getRegionServer(0).getServerName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java index 3ad91b4d57f8..001ae463eeea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,15 +41,14 @@ /** * Test various scanner timeout issues. */ -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestScannerTimeout { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestScannerTimeout.class); - private final static HBaseTestingUtil - TEST_UTIL = new HBaseTestingUtil(); + private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Logger LOG = LoggerFactory.getLogger(TestScannerTimeout.class); private final static byte[] SOME_BYTES = Bytes.toBytes("f"); @@ -61,7 +60,7 @@ public class TestScannerTimeout { private final static int SCANNER_TIMEOUT = 15000; private final static int SCANNER_CACHING = 5; - /** + /** * @throws java.lang.Exception */ @BeforeClass @@ -97,8 +96,8 @@ public void setUp() throws Exception { } /** - * Test that scanner can continue even if the region server it was reading - * from failed. Before 2772, it reused the same scanner id. + * Test that scanner can continue even if the region server it was reading from failed. Before + * 2772, it reused the same scanner id. * @throws Exception */ @Test @@ -128,8 +127,8 @@ public void test2772() throws Exception { } /** - * Test that scanner won't miss any rows if the region server it was reading - * from failed. Before 3686, it would skip rows in the scan. + * Test that scanner won't miss any rows if the region server it was reading from failed. Before + * 3686, it would skip rows in the scan. * @throws Exception */ @Test @@ -147,8 +146,7 @@ public void test3686a() throws Exception { // Since the RS is already created, this conf is client-side only for // this new table Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - conf.setInt( - HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, SCANNER_TIMEOUT*100); + conf.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, SCANNER_TIMEOUT * 100); Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(TABLE_NAME); LOG.info("START ************ TEST3686A---22"); @@ -162,8 +160,8 @@ public void test3686a() throws Exception { // Kill after one call to next(), which got 5 rows. rs.abort("die!"); - while(r.next() != null) { - count ++; + while (r.next() != null) { + count++; } assertEquals(NB_ROWS, count); r.close(); @@ -173,9 +171,8 @@ public void test3686a() throws Exception { } /** - * Make sure that no rows are lost if the scanner timeout is longer on the - * client than the server, and the scan times out on the server but not the - * client. + * Make sure that no rows are lost if the scanner timeout is longer on the client than the server, + * and the scan times out on the server but not the client. * @throws Exception */ @Test @@ -196,9 +193,9 @@ public void test3686b() throws Exception { int count = 1; r.next(); // Sleep, allowing the scan to timeout on the server but not on the client. - Thread.sleep(SCANNER_TIMEOUT+2000); - while(r.next() != null) { - count ++; + Thread.sleep(SCANNER_TIMEOUT + 2000); + while (r.next() != null) { + count++; } assertEquals(NB_ROWS, count); r.close(); @@ -209,4 +206,3 @@ public void test3686b() throws Exception { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index 70264ed935ec..3fad6edac133 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,7 +77,7 @@ * A client-side test, mostly testing scanners with various parameters. Parameterized on different * registry implementations. */ -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) @RunWith(Parameterized.class) public class TestScannersFromClientSide { @@ -88,10 +88,10 @@ public class TestScannersFromClientSide { private static final Logger LOG = LoggerFactory.getLogger(TestScannersFromClientSide.class); private static HBaseTestingUtil TEST_UTIL; - private static byte [] ROW = Bytes.toBytes("testRow"); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); - private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); - private static byte [] VALUE = Bytes.toBytes("testValue"); + private static byte[] ROW = Bytes.toBytes("testRow"); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte[] VALUE = Bytes.toBytes("testValue"); @Rule public TableNameTestRule name = new TableNameTestRule(); @@ -105,23 +105,18 @@ public static void tearDownAfterClass() throws Exception { @Parameterized.Parameters public static Collection parameters() { - return Arrays.asList(new Object[][] { - { MasterRegistry.class, 1}, - { MasterRegistry.class, 2}, - { ZKConnectionRegistry.class, 1} - }); + return Arrays.asList(new Object[][] { { MasterRegistry.class, 1 }, { MasterRegistry.class, 2 }, + { ZKConnectionRegistry.class, 1 } }); } /** * JUnit does not provide an easy way to run a hook after each parameterized run. Without that * there is no easy way to restart the test cluster after each parameterized run. Annotation * BeforeParam does not work either because it runs before parameterization and hence does not - * have access to the test parameters (which is weird). - * - * This *hack* checks if the current instance of test cluster configuration has the passed - * parameterized configs. In such a case, we can just reuse the cluster for test and do not need - * to initialize from scratch. While this is a hack, it saves a ton of time for the full - * test and de-flakes it. + * have access to the test parameters (which is weird). This *hack* checks if the current instance + * of test cluster configuration has the passed parameterized configs. In such a case, we can just + * reuse the cluster for test and do not need to initialize from scratch. While this is a hack, it + * saves a ton of time for the full test and de-flakes it. */ private static boolean isSameParameterizedCluster(Class registryImpl, int numHedgedReqs) { // initialize() is called for every unit test, however we only want to reset the cluster state @@ -149,7 +144,7 @@ public TestScannersFromClientSide(Class registryImpl, int numHedgedReqs) thro Configuration conf = TEST_UTIL.getConfiguration(); conf.setLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, 10 * 1024 * 1024); conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, registryImpl, - ConnectionRegistry.class); + ConnectionRegistry.class); Preconditions.checkArgument(numHedgedReqs > 0); conf.setInt(MasterRegistry.MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY, numHedgedReqs); StartTestingClusterOption.Builder builder = StartTestingClusterOption.builder(); @@ -164,7 +159,7 @@ public TestScannersFromClientSide(Class registryImpl, int numHedgedReqs) thro @Test public void testScanBatch() throws Exception { final TableName tableName = name.getTableName(); - byte [][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 8); + byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 8); Table ht = TEST_UTIL.createTable(tableName, FAMILY); @@ -178,7 +173,7 @@ public void testScanBatch() throws Exception { // table: row, family, c0:0, c1:1, ... , c7:7 put = new Put(ROW); - for (int i=0; i < QUALIFIERS.length; i++) { + for (int i = 0; i < QUALIFIERS.length; i++) { KeyValue kv = new KeyValue(ROW, FAMILY, QUALIFIERS[i], i, VALUE); put.add(kv); } @@ -210,7 +205,7 @@ public void testScanBatch() throws Exception { verifyResult(result, kvListExp, toLog, "Testing first batch of scan"); // with batch - scan = new Scan().withStartRow(ROW); + scan = new Scan().withStartRow(ROW); scan.readAllVersions(); scan.setBatch(2); scanner = ht.getScanner(scan); @@ -284,8 +279,8 @@ public void testMaxResultSizeIsSetToDefault() throws Exception { */ @Test public void testScannerForNotExistingTable() { - String[] tableNames = {"A", "Z", "A:A", "Z:Z"}; - for(String tableName : tableNames) { + String[] tableNames = { "A", "Z", "A:A", "Z:Z" }; + for (String tableName : tableNames) { try { Table table = TEST_UTIL.getConnection().getTable(TableName.valueOf(tableName)); testSmallScan(table, true, 1, 5); @@ -335,8 +330,8 @@ public void testSmallScan() throws Exception { /** * Run through a variety of test configurations with a small scan */ - private void testSmallScan( - Table table, boolean reversed, int rows, int columns) throws Exception { + private void testSmallScan(Table table, boolean reversed, int rows, int columns) + throws Exception { Scan baseScan = new Scan(); baseScan.setReversed(reversed); baseScan.setReadType(ReadType.PREAD); @@ -367,9 +362,9 @@ private void verifyExpectedCounts(Table table, Scan scan, int expectedRowCount, } assertTrue("Expected row count: " + expectedRowCount + " Actual row count: " + rowCount, - expectedRowCount == rowCount); + expectedRowCount == rowCount); assertTrue("Expected cell count: " + expectedCellCount + " Actual cell count: " + cellCount, - expectedCellCount == cellCount); + expectedCellCount == cellCount); scanner.close(); } @@ -379,8 +374,8 @@ private void verifyExpectedCounts(Table table, Scan scan, int expectedRowCount, @Test public void testGetMaxResults() throws Exception { final TableName tableName = name.getTableName(); - byte [][] FAMILIES = HTestConst.makeNAscii(FAMILY, 3); - byte [][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 20); + byte[][] FAMILIES = HTestConst.makeNAscii(FAMILY, 3); + byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 20); Table ht = TEST_UTIL.createTable(tableName, FAMILIES); @@ -393,7 +388,7 @@ public void testGetMaxResults() throws Exception { kvListExp = new ArrayList<>(); // Insert one CF for row[0] put = new Put(ROW); - for (int i=0; i < 10; i++) { + for (int i = 0; i < 10; i++) { KeyValue kv = new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE); put.add(kv); kvListExp.add(kv); @@ -415,8 +410,7 @@ public void testGetMaxResults() throws Exception { // Filters: ColumnRangeFilter get = new Get(ROW); get.setMaxResultsPerColumnFamily(5); - get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, QUALIFIERS[5], - true)); + get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, QUALIFIERS[5], true)); result = ht.get(get); kvListExp = new ArrayList<>(); kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[2], 1, VALUE)); @@ -428,14 +422,14 @@ public void testGetMaxResults() throws Exception { // Insert two more CF for row[0] // 20 columns for CF2, 10 columns for CF1 put = new Put(ROW); - for (int i=0; i < QUALIFIERS.length; i++) { + for (int i = 0; i < QUALIFIERS.length; i++) { KeyValue kv = new KeyValue(ROW, FAMILIES[2], QUALIFIERS[i], 1, VALUE); put.add(kv); } ht.put(put); put = new Put(ROW); - for (int i=0; i < 10; i++) { + for (int i = 0; i < 10; i++) { KeyValue kv = new KeyValue(ROW, FAMILIES[1], QUALIFIERS[i], 1, VALUE); put.add(kv); } @@ -447,14 +441,14 @@ public void testGetMaxResults() throws Exception { get.addFamily(FAMILIES[2]); result = ht.get(get); kvListExp = new ArrayList<>(); - //Exp: CF1:q0, ..., q9, CF2: q0, q1, q10, q11, ..., q19 - for (int i=0; i < 10; i++) { + // Exp: CF1:q0, ..., q9, CF2: q0, q1, q10, q11, ..., q19 + for (int i = 0; i < 10; i++) { kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[i], 1, VALUE)); } - for (int i=0; i < 2; i++) { + for (int i = 0; i < 2; i++) { kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[i], 1, VALUE)); } - for (int i=10; i < 20; i++) { + for (int i = 10; i < 20; i++) { kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[i], 1, VALUE)); } verifyResult(result, kvListExp, toLog, "Testing multiple CFs"); @@ -465,13 +459,13 @@ public void testGetMaxResults() throws Exception { get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, null, true)); result = ht.get(get); kvListExp = new ArrayList<>(); - for (int i=2; i < 5; i++) { + for (int i = 2; i < 5; i++) { kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE)); } - for (int i=2; i < 5; i++) { + for (int i = 2; i < 5; i++) { kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[i], 1, VALUE)); } - for (int i=2; i < 5; i++) { + for (int i = 2; i < 5; i++) { kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[i], 1, VALUE)); } verifyResult(result, kvListExp, toLog, "Testing multiple CFs + CRF"); @@ -484,7 +478,7 @@ public void testGetMaxResults() throws Exception { kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[1], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[1], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[1], 1, VALUE)); - for (int i=10; i < 16; i++) { + for (int i = 10; i < 16; i++) { kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[i], 1, VALUE)); } verifyResult(result, kvListExp, toLog, "Testing multiple CFs + PFF"); @@ -497,9 +491,9 @@ public void testGetMaxResults() throws Exception { @Test public void testScanMaxResults() throws Exception { final TableName tableName = name.getTableName(); - byte [][] ROWS = HTestConst.makeNAscii(ROW, 2); - byte [][] FAMILIES = HTestConst.makeNAscii(FAMILY, 3); - byte [][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 10); + byte[][] ROWS = HTestConst.makeNAscii(ROW, 2); + byte[][] FAMILIES = HTestConst.makeNAscii(FAMILY, 3); + byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 10); Table ht = TEST_UTIL.createTable(tableName, FAMILIES); @@ -511,10 +505,10 @@ public void testScanMaxResults() throws Exception { kvListExp = new ArrayList<>(); - for (int r=0; r < ROWS.length; r++) { + for (int r = 0; r < ROWS.length; r++) { put = new Put(ROWS[r]); - for (int c=0; c < FAMILIES.length; c++) { - for (int q=0; q < QUALIFIERS.length; q++) { + for (int c = 0; c < FAMILIES.length; c++) { + for (int q = 0; q < QUALIFIERS.length; q++) { KeyValue kv = new KeyValue(ROWS[r], FAMILIES[c], QUALIFIERS[q], 1, VALUE); put.add(kv); if (q < 4) { @@ -545,8 +539,8 @@ public void testScanMaxResults() throws Exception { @Test public void testGetRowOffset() throws Exception { final TableName tableName = name.getTableName(); - byte [][] FAMILIES = HTestConst.makeNAscii(FAMILY, 3); - byte [][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 20); + byte[][] FAMILIES = HTestConst.makeNAscii(FAMILY, 3); + byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 20); Table ht = TEST_UTIL.createTable(tableName, FAMILIES); @@ -559,7 +553,7 @@ public void testGetRowOffset() throws Exception { // Insert one CF for row kvListExp = new ArrayList<>(); put = new Put(ROW); - for (int i=0; i < 10; i++) { + for (int i = 0; i < 10; i++) { KeyValue kv = new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE); put.add(kv); // skipping first two kvs @@ -570,36 +564,34 @@ public void testGetRowOffset() throws Exception { } ht.put(put); - //setting offset to 2 + // setting offset to 2 get = new Get(ROW); get.setRowOffsetPerColumnFamily(2); result = ht.get(get); verifyResult(result, kvListExp, toLog, "Testing basic setRowOffset"); - //setting offset to 20 + // setting offset to 20 get = new Get(ROW); get.setRowOffsetPerColumnFamily(20); result = ht.get(get); kvListExp = new ArrayList<>(); verifyResult(result, kvListExp, toLog, "Testing offset > #kvs"); - //offset + maxResultPerCF + // offset + maxResultPerCF get = new Get(ROW); get.setRowOffsetPerColumnFamily(4); get.setMaxResultsPerColumnFamily(5); result = ht.get(get); kvListExp = new ArrayList<>(); - for (int i=4; i < 9; i++) { + for (int i = 4; i < 9; i++) { kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE)); } - verifyResult(result, kvListExp, toLog, - "Testing offset + setMaxResultsPerCF"); + verifyResult(result, kvListExp, toLog, "Testing offset + setMaxResultsPerCF"); // Filters: ColumnRangeFilter get = new Get(ROW); get.setRowOffsetPerColumnFamily(1); - get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, QUALIFIERS[5], - true)); + get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, QUALIFIERS[5], true)); result = ht.get(get); kvListExp = new ArrayList<>(); kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[3], 1, VALUE)); @@ -609,9 +601,9 @@ public void testGetRowOffset() throws Exception { // Insert into two more CFs for row // 10 columns for CF2, 10 columns for CF1 - for(int j=2; j > 0; j--) { + for (int j = 2; j > 0; j--) { put = new Put(ROW); - for (int i=0; i < 10; i++) { + for (int i = 0; i < 10; i++) { KeyValue kv = new KeyValue(ROW, FAMILIES[j], QUALIFIERS[i], 1, VALUE); put.add(kv); } @@ -625,13 +617,12 @@ public void testGetRowOffset() throws Exception { get.addFamily(FAMILIES[2]); result = ht.get(get); kvListExp = new ArrayList<>(); - //Exp: CF1:q4, q5, CF2: q4, q5 + // Exp: CF1:q4, q5, CF2: q4, q5 kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[4], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[5], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[4], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[5], 1, VALUE)); - verifyResult(result, kvListExp, toLog, - "Testing offset + multiple CFs + maxResults"); + verifyResult(result, kvListExp, toLog, "Testing offset + multiple CFs + maxResults"); } @Test @@ -659,13 +650,12 @@ public void testScanRawDeleteFamilyVersion() throws Exception { } /** - * Test from client side for scan while the region is reopened - * on the same region server. + * Test from client side for scan while the region is reopened on the same region server. */ @Test public void testScanOnReopenedRegion() throws Exception { final TableName tableName = name.getTableName(); - byte [][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 2); + byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 2); Table ht = TEST_UTIL.createTable(tableName, FAMILY); @@ -678,7 +668,7 @@ public void testScanOnReopenedRegion() throws Exception { // table: row, family, c0:0, c1:1 put = new Put(ROW); - for (int i=0; i < QUALIFIERS.length; i++) { + for (int i = 0; i < QUALIFIERS.length; i++) { KeyValue kv = new KeyValue(ROW, FAMILY, QUALIFIERS[i], i, VALUE); put.add(kv); } @@ -733,8 +723,7 @@ public void testScanOnReopenedRegion() throws Exception { verifyResult(result, kvListExp, toLog, "Testing scan on re-opened region"); } - static void verifyResult(Result result, List expKvList, boolean toLog, - String msg) { + static void verifyResult(Result result, List expKvList, boolean toLog, String msg) { LOG.info(msg); LOG.info("Expected count: " + expKvList.size()); @@ -746,7 +735,7 @@ static void verifyResult(Result result, List expKvList, boolean toLog, int i = 0; for (Cell kv : result.rawCells()) { if (i >= expKvList.size()) { - break; // we will check the size later + break; // we will check the size later } Cell kvExp = expKvList.get(i++); @@ -848,7 +837,7 @@ public void testReverseScanWithFlush() throws Exception { final byte[] LARGE_VALUE = generateHugeValue(128 * 1024); try (Table table = TEST_UTIL.createTable(tableName, FAMILY); - Admin admin = TEST_UTIL.getAdmin()) { + Admin admin = TEST_UTIL.getAdmin()) { List putList = new ArrayList<>(); for (long i = 0; i < ROWS_TO_INSERT; i++) { Put put = new Put(Bytes.toBytes(i)); @@ -885,8 +874,7 @@ public void testReverseScanWithFlush() throws Exception { @Test public void testScannerWithPartialResults() throws Exception { TableName tableName = TableName.valueOf("testScannerWithPartialResults"); - try (Table table = TEST_UTIL.createMultiRegionTable(tableName, - Bytes.toBytes("c"), 4)) { + try (Table table = TEST_UTIL.createMultiRegionTable(tableName, Bytes.toBytes("c"), 4)) { List puts = new ArrayList<>(); byte[] largeArray = new byte[10000]; Put put = new Put(Bytes.toBytes("aaaa0")); @@ -911,7 +899,7 @@ public void testScannerWithPartialResults() throws Exception { Result result; int expectedKvNumber = 6; int returnedKvNumber = 0; - while((result = rs.next()) != null) { + while ((result = rs.next()) != null) { returnedKvNumber += result.listCells().size(); } rs.close(); @@ -947,7 +935,7 @@ public String toString() { return this.getClass().getSimpleName(); } - public static LimitKVsReturnFilter parseFrom(final byte [] pbBytes) + public static LimitKVsReturnFilter parseFrom(final byte[] pbBytes) throws DeserializationException { return new LimitKVsReturnFilter(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java index 4c23ef73c046..22daa48e49f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java @@ -53,7 +53,7 @@ public class TestSeparateClientZKCluster { private static final Logger LOG = LoggerFactory.getLogger(TestSeparateClientZKCluster.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final File clientZkDir = - new File(TEST_UTIL.getDataTestDir("TestSeparateClientZKCluster").toString()); + new File(TEST_UTIL.getDataTestDir("TestSeparateClientZKCluster").toString()); private static final int ZK_SESSION_TIMEOUT = 5000; private static MiniZooKeeperCluster clientZkCluster; @@ -68,7 +68,7 @@ public class TestSeparateClientZKCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSeparateClientZKCluster.class); + HBaseClassTestRule.forClass(TestSeparateClientZKCluster.class); @BeforeClass public static void beforeAllTests() throws Exception { @@ -88,8 +88,8 @@ public static void beforeAllTests() throws Exception { // reduce zk session timeout to easier trigger session expiration TEST_UTIL.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT, ZK_SESSION_TIMEOUT); // Start a cluster with 2 masters and 3 regionservers. - StartTestingClusterOption option = - StartTestingClusterOption.builder().numMasters(2).numRegionServers(3).numDataNodes(3).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(2) + .numRegionServers(3).numDataNodes(3).build(); TEST_UTIL.startMiniCluster(option); } @@ -107,9 +107,9 @@ public void testBasicOperation() throws Exception { Connection conn = TEST_UTIL.getConnection(); try (Admin admin = conn.getAdmin(); Table table = conn.getTable(tn)) { ColumnFamilyDescriptorBuilder cfDescBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(family); + ColumnFamilyDescriptorBuilder.newBuilder(family); TableDescriptorBuilder tableDescBuilder = - TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); + TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); admin.createTable(tableDescBuilder.build()); // test simple get and put Put put = new Put(row); @@ -154,13 +154,13 @@ public void testMetaRegionMove() throws Exception { // create table Connection conn = TEST_UTIL.getConnection(); try (Admin admin = conn.getAdmin(); - Table table = conn.getTable(tn); - RegionLocator locator = conn.getRegionLocator(tn)) { + Table table = conn.getTable(tn); + RegionLocator locator = conn.getRegionLocator(tn)) { SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); ColumnFamilyDescriptorBuilder cfDescBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(family); + ColumnFamilyDescriptorBuilder.newBuilder(family); TableDescriptorBuilder tableDescBuilder = - TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); + TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); admin.createTable(tableDescBuilder.build()); // issue some requests to cache the region location Put put = new Put(row); @@ -207,9 +207,9 @@ public void testMetaMoveDuringClientZkClusterRestart() throws Exception { Connection conn = TEST_UTIL.getConnection(); try (Admin admin = conn.getAdmin(); Table table = conn.getTable(tn)) { ColumnFamilyDescriptorBuilder cfDescBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(family); + ColumnFamilyDescriptorBuilder.newBuilder(family); TableDescriptorBuilder tableDescBuilder = - TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); + TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); admin.createTable(tableDescBuilder.build()); // put some data Put put = new Put(row); @@ -247,9 +247,9 @@ public void testAsyncTable() throws Exception { TableName tn = name.getTableName(); ColumnFamilyDescriptorBuilder cfDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family); TableDescriptorBuilder tableDescBuilder = - TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); + TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfDescBuilder.build()); try (AsyncConnection ASYNC_CONN = - ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { + ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { ASYNC_CONN.getAdmin().createTable(tableDescBuilder.build()).get(); AsyncTable table = ASYNC_CONN.getTable(tn); // put some data @@ -268,7 +268,7 @@ public void testAsyncTable() throws Exception { public void testChangeMetaReplicaCount() throws Exception { Admin admin = TEST_UTIL.getAdmin(); try (RegionLocator locator = - TEST_UTIL.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { + TEST_UTIL.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { assertEquals(1, locator.getAllRegionLocations().size()); HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3); TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java index d14f20e46bc5..3a31f88cdb87 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,11 +46,10 @@ import org.junit.rules.TestName; /** - * This class is for testing HBaseConnectionManager ServerBusyException. - * Be careful adding to this class. It sets a low - * HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD + * This class is for testing HBaseConnectionManager ServerBusyException. Be careful adding to this + * class. It sets a low HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD */ -@Category({LargeTests.class}) +@Category({ LargeTests.class }) public class TestServerBusyException { @ClassRule @@ -67,33 +66,34 @@ public class TestServerBusyException { public static class SleepCoprocessor implements RegionCoprocessor, RegionObserver { public static final int SLEEP_TIME = 5000; + @Override public Optional getRegionObserver() { return Optional.of(this); } @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { + public void preGetOp(final ObserverContext e, final Get get, + final List results) throws IOException { Threads.sleep(SLEEP_TIME); } @Override - public void prePut(final ObserverContext e, - final Put put, final WALEdit edit, final Durability durability) throws IOException { + public void prePut(final ObserverContext e, final Put put, + final WALEdit edit, final Durability durability) throws IOException { Threads.sleep(SLEEP_TIME); } @Override public Result preIncrement(final ObserverContext e, - final Increment increment) throws IOException { + final Increment increment) throws IOException { Threads.sleep(SLEEP_TIME); return null; } @Override - public void preDelete(final ObserverContext e, final Delete delete, - final WALEdit edit, final Durability durability) throws IOException { + public void preDelete(final ObserverContext e, + final Delete delete, final WALEdit edit, final Durability durability) throws IOException { Threads.sleep(SLEEP_TIME); } @@ -109,8 +109,8 @@ public Optional getRegionObserver() { } @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { + public void preGetOp(final ObserverContext e, final Get get, + final List results) throws IOException { // After first sleep, all requests are timeout except the last retry. If we handle // all the following requests, finally the last request is also timeout. If we drop all // timeout requests, we can handle the last request immediately and it will not timeout. @@ -135,7 +135,8 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(2); } - @AfterClass public static void tearDownAfterClass() throws Exception { + @AfterClass + public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -143,7 +144,7 @@ private static class TestPutThread extends Thread { Table table; int getServerBusyException = 0; - TestPutThread(Table table){ + TestPutThread(Table table) { this.table = table; } @@ -164,7 +165,7 @@ private static class TestGetThread extends Thread { Table table; int getServerBusyException = 0; - TestGetThread(Table table){ + TestGetThread(Table table) { this.table = table; } @@ -184,20 +185,15 @@ public void run() { @Test() public void testServerBusyException() throws Exception { TableDescriptor hdt = TEST_UTIL.createModifyableTableDescriptor(name.getMethodName()) - .setCoprocessor(SleepCoprocessor.class.getName()).build(); + .setCoprocessor(SleepCoprocessor.class.getName()).build(); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); TEST_UTIL.createTable(hdt, new byte[][] { FAM_NAM }, c); - TestGetThread tg1 = - new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestGetThread tg2 = - new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestGetThread tg3 = - new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestGetThread tg4 = - new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestGetThread tg5 = - new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); + TestGetThread tg1 = new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); + TestGetThread tg2 = new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); + TestGetThread tg3 = new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); + TestGetThread tg4 = new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); + TestGetThread tg5 = new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); tg1.start(); tg2.start(); tg3.start(); @@ -208,23 +204,17 @@ public void testServerBusyException() throws Exception { tg3.join(); tg4.join(); tg5.join(); - assertEquals(2, - tg1.getServerBusyException + tg2.getServerBusyException + tg3.getServerBusyException - + tg4.getServerBusyException + tg5.getServerBusyException); + assertEquals(2, tg1.getServerBusyException + tg2.getServerBusyException + + tg3.getServerBusyException + tg4.getServerBusyException + tg5.getServerBusyException); // Put has its own logic in HTable, test Put alone. We use AsyncProcess for Put (use multi at // RPC level) and it wrap exceptions to RetriesExhaustedWithDetailsException. - TestPutThread tp1 = - new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestPutThread tp2 = - new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestPutThread tp3 = - new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestPutThread tp4 = - new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); - TestPutThread tp5 = - new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); + TestPutThread tp1 = new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); + TestPutThread tp2 = new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); + TestPutThread tp3 = new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); + TestPutThread tp4 = new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); + TestPutThread tp5 = new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName())); tp1.start(); tp2.start(); tp3.start(); @@ -235,8 +225,7 @@ public void testServerBusyException() throws Exception { tp3.join(); tp4.join(); tp5.join(); - assertEquals(2, - tp1.getServerBusyException + tp2.getServerBusyException + tp3.getServerBusyException - + tp4.getServerBusyException + tp5.getServerBusyException); + assertEquals(2, tp1.getServerBusyException + tp2.getServerBusyException + + tp3.getServerBusyException + tp4.getServerBusyException + tp5.getServerBusyException); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerLoadDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerLoadDurability.java index 22e1b5c62156..925dc0ecbbb3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerLoadDurability.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerLoadDurability.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,9 +45,9 @@ import org.junit.runners.Parameterized; /** - * HBASE-19496 noticed that the RegionLoad/ServerLoad may be corrupted if rpc server - * reuses the bytebuffer backed, so this test call the Admin#getLastMajorCompactionTimestamp() to - * invoke HMaster to iterate all stored server/region loads. + * HBASE-19496 noticed that the RegionLoad/ServerLoad may be corrupted if rpc server reuses the + * bytebuffer backed, so this test call the Admin#getLastMajorCompactionTimestamp() to invoke + * HMaster to iterate all stored server/region loads. */ @RunWith(Parameterized.class) @Category({ MediumTests.class, ClientTests.class }) @@ -79,8 +79,7 @@ private static Configuration createConfigurationForSimpleRpcServer() { private static Configuration createConfigurationForNettyRpcServer() { Configuration conf = HBaseConfiguration.create(); - conf.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, - NettyRpcServer.class.getName()); + conf.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, NettyRpcServer.class.getName()); return conf; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java index 11e5404ec5d8..ea1e9d232920 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestShutdownOfMetaReplicaHolder extends MetaWithReplicasTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestShutdownOfMetaReplicaHolder.class); + HBaseClassTestRule.forClass(TestShutdownOfMetaReplicaHolder.class); private static final Logger LOG = LoggerFactory.getLogger(TestShutdownOfMetaReplicaHolder.class); @@ -50,7 +50,7 @@ public void testShutdownOfReplicaHolder() throws Exception { // checks that the when the server holding meta replica is shut down, the meta replica // can be recovered try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) { + RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) { HRegionLocation hrl = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true).get(1); ServerName oldServer = hrl.getServerName(); TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java index 3d23b9b9936e..7cd8d5fbf8d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java @@ -48,7 +48,7 @@ public class TestSizeFailures { private static final Logger LOG = LoggerFactory.getLogger(TestSizeFailures.class); protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); protected static int SLAVES = 1; private static TableName TABLENAME; private static final int NUM_ROWS = 1000 * 1000, NUM_COLS = 9; @@ -57,9 +57,9 @@ public class TestSizeFailures { public static void setUpBeforeClass() throws Exception { // Uncomment the following lines if more verbosity is needed for // debugging (see HBASE-12285 for details). - //((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); - //((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); - //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); + // ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); + // ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); + // ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); TEST_UTIL.startMiniCluster(SLAVES); // Write a bunch of data @@ -70,7 +70,7 @@ public static void setUpBeforeClass() throws Exception { } TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLENAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); byte[][] splits = new byte[9][2]; for (int i = 1; i < 10; i++) { int split = 48 + i; @@ -120,7 +120,7 @@ public void testScannerSeesAllRecords() throws Exception { s.setMaxResultSize(-1); s.setBatch(-1); s.setCaching(500); - Entry entry = sumTable(table.getScanner(s)); + Entry entry = sumTable(table.getScanner(s)); long rowsObserved = entry.getKey(); long entriesObserved = entry.getValue(); @@ -143,7 +143,7 @@ public void testSmallScannerSeesAllRecords() throws Exception { s.setMaxResultSize(-1); s.setBatch(-1); s.setCaching(500); - Entry entry = sumTable(table.getScanner(s)); + Entry entry = sumTable(table.getScanner(s)); long rowsObserved = entry.getKey(); long entriesObserved = entry.getValue(); @@ -155,12 +155,10 @@ public void testSmallScannerSeesAllRecords() throws Exception { /** * Count the number of rows and the number of entries from a scanner - * - * @param scanner - * The Scanner + * @param scanner The Scanner * @return An entry where the first item is rows observed and the second is entries observed. */ - private Entry sumTable(ResultScanner scanner) { + private Entry sumTable(ResultScanner scanner) { long rowsObserved = 0L; long entriesObserved = 0L; @@ -171,6 +169,6 @@ private Entry sumTable(ResultScanner scanner) { entriesObserved++; } } - return Maps.immutableEntry(rowsObserved,entriesObserved); + return Maps.immutableEntry(rowsObserved, entriesObserved); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java index 0e89db0c13c6..8e5dd2bf22f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ /** * Test to verify that the cloned table is independent of the table from which it was cloned */ -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestSnapshotCloneIndependence { @ClassRule @@ -105,7 +105,7 @@ static void setupConf(Configuration conf) { conf.setBoolean("hbase.master.enabletable.roundrobin", true); // Avoid potentially aggressive splitting which would cause snapshot to fail conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, - ConstantSizeRegionSplitPolicy.class.getName()); + ConstantSizeRegionSplitPolicy.class.getName()); // Execute cleaner frequently to induce failures conf.setInt("hbase.master.cleaner.interval", CLEANER_INTERVAL); conf.setInt("hbase.master.hfilecleaner.plugins.snapshot.period", CLEANER_INTERVAL); @@ -234,9 +234,9 @@ private static void waitOnSplit(Connection c, final Table t, int originalCount) } /** - * Takes the snapshot of originalTable and clones the snapshot to another tables. - * If {@code online} is false, the original table is disabled during taking snapshot, so also - * enables it again. + * Takes the snapshot of originalTable and clones the snapshot to another tables. If + * {@code online} is false, the original table is disabled during taking snapshot, so also enables + * it again. * @param online - Whether the table is online or not during the snapshot */ private void createAndCloneSnapshot(boolean online) throws Exception { @@ -373,7 +373,7 @@ private void runTestSnapshotDeleteIndependent() throws Exception { } protected Table createTable(final TableName table, byte[] family) throws Exception { - Table t = UTIL.createTable(table, family); + Table t = UTIL.createTable(table, family); // Wait for everything to be ready with the table UTIL.waitUntilAllRegionsAssigned(table); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java index 7501867de160..77877eac21b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotDFSTemporaryDirectory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,8 +38,7 @@ * This is an end-to-end test for the snapshot utility */ @Category(LargeTests.class) -public class TestSnapshotDFSTemporaryDirectory - extends TestSnapshotTemporaryDirectory { +public class TestSnapshotDFSTemporaryDirectory extends TestSnapshotTemporaryDirectory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -47,7 +46,6 @@ public class TestSnapshotDFSTemporaryDirectory /** * Setup the config for the cluster - * * @throws Exception on failure */ @BeforeClass @@ -71,10 +69,11 @@ private static void setupConf(Configuration conf) throws IOException { // Enable snapshot conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, - ConstantSizeRegionSplitPolicy.class.getName()); + ConstantSizeRegionSplitPolicy.class.getName()); - String snapshotPath = UTIL.getDefaultRootDirPath().toString() + Path.SEPARATOR + - UUID.randomUUID().toString() + Path.SEPARATOR + ".tmpdir" + Path.SEPARATOR; - conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, "file://" + new Path(snapshotPath).toUri()); + String snapshotPath = UTIL.getDefaultRootDirPath().toString() + Path.SEPARATOR + + UUID.randomUUID().toString() + Path.SEPARATOR + ".tmpdir" + Path.SEPARATOR; + conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, + "file://" + new Path(snapshotPath).toUri()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index 56a48c122a14..ef1f3de450a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -84,8 +84,7 @@ public class TestSnapshotFromClient { protected static final int NUM_RS = 2; protected static final String STRING_TABLE_NAME = "test"; protected static final byte[] TEST_FAM = Bytes.toBytes("fam"); - protected static final TableName TABLE_NAME = - TableName.valueOf(STRING_TABLE_NAME); + protected static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME); private static final Pattern MATCH_ALL = Pattern.compile(".*"); @Rule @@ -124,7 +123,7 @@ protected static void setupConf(Configuration conf) { // Enable snapshot conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, - ConstantSizeRegionSplitPolicy.class.getName()); + ConstantSizeRegionSplitPolicy.class.getName()); } @Before @@ -134,8 +133,8 @@ public void setup() throws Exception { protected void createTable() throws Exception { TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(getNumReplicas()) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, trackerImpl.name()).build(); + TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(getNumReplicas()) + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, trackerImpl.name()).build(); UTIL.createTable(htd, new byte[][] { TEST_FAM }, null); } @@ -169,7 +168,6 @@ public void testMetaTablesSnapshot() throws Exception { /** * Test HBaseAdmin#deleteSnapshots(String) which deletes snapshots whose names match the parameter - * * @throws Exception */ @Test @@ -204,6 +202,7 @@ public void testSnapshotDeletionWithRegex() throws Exception { admin.deleteSnapshot(snapshot3); admin.close(); } + /** * Test snapshotting a table that is offline * @throws Exception @@ -233,8 +232,8 @@ public void testOfflineTableSnapshot() throws Exception { final String SNAPSHOT_NAME = "offlineTableSnapshot"; String snapshot = SNAPSHOT_NAME; - admin.snapshot(new SnapshotDescription(SNAPSHOT_NAME, TABLE_NAME, - SnapshotType.DISABLED, null, -1, SnapshotManifestV1.DESCRIPTOR_VERSION, null)); + admin.snapshot(new SnapshotDescription(SNAPSHOT_NAME, TABLE_NAME, SnapshotType.DISABLED, null, + -1, SnapshotManifestV1.DESCRIPTOR_VERSION, null)); LOG.debug("Snapshot completed."); // make sure we have the snapshot @@ -248,8 +247,8 @@ public void testOfflineTableSnapshot() throws Exception { CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); SnapshotTestingUtils.confirmSnapshotValid( - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM, - rootDir, admin, fs); + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM, rootDir, + admin, fs); admin.deleteSnapshot(snapshot); snapshots = admin.listSnapshots(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClientWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClientWithRegionReplicas.java index 6b7f45293503..75e2f168e4b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClientWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClientWithRegionReplicas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestSnapshotFromClientWithRegionReplicas extends TestSnapshotFromClient { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java index 1b8bb2dab7a8..3194ab30f930 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ /** * Test class to verify that metadata is consistent before and after a snapshot attempt. */ -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestSnapshotMetadata { @ClassRule @@ -80,9 +80,8 @@ public class TestSnapshotMetadata { private static final String TEST_CONF_CUSTOM_VALUE = "TestCustomConf"; private static final String TEST_CUSTOM_VALUE = "TestCustomValue"; - private static final byte[][] families = { - MAX_VERSIONS_FAM, BLOOMFILTER_FAM, COMPRESSED_FAM, BLOCKSIZE_FAM - }; + private static final byte[][] families = + { MAX_VERSIONS_FAM, BLOOMFILTER_FAM, COMPRESSED_FAM, BLOCKSIZE_FAM }; private static final DataBlockEncoding DATA_BLOCK_ENCODING_TYPE = DataBlockEncoding.FAST_DIFF; private static final BloomType BLOOM_TYPE = BloomType.ROW; @@ -147,7 +146,7 @@ public void tearDown() throws Exception { } /* - * Create a table that has non-default properties so we can see if they hold + * Create a table that has non-default properties so we can see if they hold */ private void createTableWithNonDefaultProperties() throws Exception { final long startTime = EnvironmentEdgeManager.currentTime(); @@ -156,19 +155,19 @@ private void createTableWithNonDefaultProperties() throws Exception { // enable replication on a column family ColumnFamilyDescriptor maxVersionsColumn = ColumnFamilyDescriptorBuilder - .newBuilder(MAX_VERSIONS_FAM).setMaxVersions(MAX_VERSIONS).build(); + .newBuilder(MAX_VERSIONS_FAM).setMaxVersions(MAX_VERSIONS).build(); ColumnFamilyDescriptor bloomFilterColumn = ColumnFamilyDescriptorBuilder - .newBuilder(BLOOMFILTER_FAM).setBloomFilterType(BLOOM_TYPE).build(); + .newBuilder(BLOOMFILTER_FAM).setBloomFilterType(BLOOM_TYPE).build(); ColumnFamilyDescriptor dataBlockColumn = ColumnFamilyDescriptorBuilder - .newBuilder(COMPRESSED_FAM).setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE).build(); + .newBuilder(COMPRESSED_FAM).setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE).build(); ColumnFamilyDescriptor blockSizeColumn = - ColumnFamilyDescriptorBuilder.newBuilder(BLOCKSIZE_FAM).setBlocksize(BLOCK_SIZE).build(); + ColumnFamilyDescriptorBuilder.newBuilder(BLOCKSIZE_FAM).setBlocksize(BLOCK_SIZE).build(); TableDescriptor tableDescriptor = TableDescriptorBuilder - .newBuilder(TableName.valueOf(sourceTableNameAsString)).setColumnFamily(maxVersionsColumn) - .setColumnFamily(bloomFilterColumn).setColumnFamily(dataBlockColumn) - .setColumnFamily(blockSizeColumn).setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE) - .setValue(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE).build(); + .newBuilder(TableName.valueOf(sourceTableNameAsString)).setColumnFamily(maxVersionsColumn) + .setColumnFamily(bloomFilterColumn).setColumnFamily(dataBlockColumn) + .setColumnFamily(blockSizeColumn).setValue(TEST_CUSTOM_VALUE, TEST_CUSTOM_VALUE) + .setValue(TEST_CONF_CUSTOM_VALUE, TEST_CONF_CUSTOM_VALUE).build(); assertTrue(tableDescriptor.getValues().size() > 0); admin.createTable(tableDescriptor); @@ -180,7 +179,6 @@ private void createTableWithNonDefaultProperties() throws Exception { original.close(); } - /** * Verify that the describe for a cloned table matches the describe from the original. */ @@ -189,8 +187,8 @@ public void testDescribeMatchesAfterClone() throws Exception { // Clone the original table final String clonedTableNameAsString = "clone" + originalTableName; final TableName clonedTableName = TableName.valueOf(clonedTableNameAsString); - final String snapshotNameAsString = "snapshot" + originalTableName - + EnvironmentEdgeManager.currentTime(); + final String snapshotNameAsString = + "snapshot" + originalTableName + EnvironmentEdgeManager.currentTime(); final String snapshotName = snapshotNameAsString; // restore the snapshot into a cloned table and examine the output @@ -198,19 +196,17 @@ public void testDescribeMatchesAfterClone() throws Exception { Collections.addAll(familiesList, families); // Create a snapshot in which all families are empty - SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, null, - familiesList, snapshotNameAsString, rootDir, fs, /* onlineSnapshot= */ false); + SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, null, familiesList, + snapshotNameAsString, rootDir, fs, /* onlineSnapshot= */ false); admin.cloneSnapshot(snapshotName, clonedTableName); Table clonedTable = UTIL.getConnection().getTable(clonedTableName); TableDescriptor cloneHtd = admin.getDescriptor(clonedTableName); - assertEquals( - originalTableDescription.replace(originalTableName.getNameAsString(),clonedTableNameAsString), - cloneHtd.toStringCustomizedValues()); + assertEquals(originalTableDescription.replace(originalTableName.getNameAsString(), + clonedTableNameAsString), cloneHtd.toStringCustomizedValues()); // Verify the custom fields - assertEquals(originalTableDescriptor.getValues().size(), - cloneHtd.getValues().size()); + assertEquals(originalTableDescriptor.getValues().size(), cloneHtd.getValues().size()); assertEquals(TEST_CUSTOM_VALUE, cloneHtd.getValue(TEST_CUSTOM_VALUE)); assertEquals(TEST_CONF_CUSTOM_VALUE, cloneHtd.getValue(TEST_CONF_CUSTOM_VALUE)); assertEquals(originalTableDescriptor.getValues(), cloneHtd.getValues()); @@ -278,12 +274,11 @@ private void runRestoreWithAdditionalMetadata(boolean changeMetadata, boolean ad } // take a "disabled" snapshot - final String snapshotNameAsString = "snapshot" + originalTableName - + EnvironmentEdgeManager.currentTime(); + final String snapshotNameAsString = + "snapshot" + originalTableName + EnvironmentEdgeManager.currentTime(); - SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, - familiesWithDataList, emptyFamiliesList, snapshotNameAsString, rootDir, fs, - /* onlineSnapshot= */ false); + SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, familiesWithDataList, + emptyFamiliesList, snapshotNameAsString, rootDir, fs, /* onlineSnapshot= */ false); admin.enableTable(originalTableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java index 212774600445..fbf6346b95b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

      - * http://www.apache.org/licenses/LICENSE-2.0 - *

      + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -75,15 +75,18 @@ @RunWith(Parameterized.class) public class TestSnapshotTemporaryDirectory { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSnapshotTemporaryDirectory.class); - @Parameterized.Parameters public static Iterable data() { - return Arrays - .asList(SnapshotManifestV1.DESCRIPTOR_VERSION, SnapshotManifestV2.DESCRIPTOR_VERSION); + @Parameterized.Parameters + public static Iterable data() { + return Arrays.asList(SnapshotManifestV1.DESCRIPTOR_VERSION, + SnapshotManifestV2.DESCRIPTOR_VERSION); } - @Parameterized.Parameter public int manifestVersion; + @Parameterized.Parameter + public int manifestVersion; private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotTemporaryDirectory.class); protected static final int NUM_RS = 2; @@ -98,7 +101,6 @@ public class TestSnapshotTemporaryDirectory { /** * Setup the config for the cluster - * * @throws Exception on failure */ @BeforeClass @@ -122,14 +124,15 @@ private static void setupConf(Configuration conf) { // Enable snapshot conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, - ConstantSizeRegionSplitPolicy.class.getName()); - conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, "file://" + new Path(TEMP_DIR, ".tmpDir").toUri()); + ConstantSizeRegionSplitPolicy.class.getName()); + conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, + "file://" + new Path(TEMP_DIR, ".tmpDir").toUri()); } @Before public void setup() throws Exception { - TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(getNumReplicas()).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TABLE_NAME) + .setRegionReplication(getNumReplicas()).build(); UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration()); } @@ -155,8 +158,7 @@ public static void cleanupTest() { } @Test - public void testRestoreDisabledSnapshot() - throws IOException, InterruptedException { + public void testRestoreDisabledSnapshot() throws IOException, InterruptedException { long tid = EnvironmentEdgeManager.currentTime(); TableName tableName = TableName.valueOf("testtb-" + tid); String emptySnapshot = "emptySnaptb-" + tid; @@ -222,8 +224,7 @@ public void testRestoreDisabledSnapshot() } @Test - public void testRestoreEnabledSnapshot() - throws IOException, InterruptedException { + public void testRestoreEnabledSnapshot() throws IOException, InterruptedException { long tid = EnvironmentEdgeManager.currentTime(); TableName tableName = TableName.valueOf("testtb-" + tid); String emptySnapshot = "emptySnaptb-" + tid; @@ -286,7 +287,6 @@ public void testRestoreEnabledSnapshot() /** * Test snapshotting a table that is offline - * * @throws Exception if snapshot does not complete successfully */ @Test @@ -327,18 +327,17 @@ public void testOfflineTableSnapshot() throws Exception { CommonFSUtils.logFileSystemState(UTIL.getTestFileSystem(), CommonFSUtils.getRootDir(UTIL.getConfiguration()), LOG); - SnapshotTestingUtils - .confirmSnapshotValid(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), - TABLE_NAME, TEST_FAM, rootDir, admin, fs); + SnapshotTestingUtils.confirmSnapshotValid( + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM, rootDir, + admin, fs); admin.deleteSnapshot(snapshot); SnapshotTestingUtils.assertNoSnapshots(admin); } /** - * Tests that snapshot has correct contents by taking snapshot, cloning it, then affirming - * the contents of the original and cloned table match - * + * Tests that snapshot has correct contents by taking snapshot, cloning it, then affirming the + * contents of the original and cloned table match * @throws Exception if snapshot does not complete successfully */ @Test @@ -370,7 +369,7 @@ public void testSnapshotCloneContents() throws Exception { while (i.hasNext()) { assertTrue(i2.hasNext()); assertEquals(Bytes.toString(i.next().getValue(TEST_FAM, new byte[] {})), - Bytes.toString(i2.next().getValue(TEST_FAM, new byte[] {}))); + Bytes.toString(i2.next().getValue(TEST_FAM, new byte[] {}))); } assertFalse(i2.hasNext()); admin.deleteSnapshot(snapshot1); @@ -401,7 +400,7 @@ public void testOfflineTableSnapshotWithEmptyRegion() throws Exception { // make sure we have the snapshot List snapshots = - SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); + SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); // make sure its a valid snapshot FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); @@ -412,9 +411,9 @@ public void testOfflineTableSnapshotWithEmptyRegion() throws Exception { List emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region List nonEmptyCfs = Lists.newArrayList(); - SnapshotTestingUtils - .confirmSnapshotValid(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), - TABLE_NAME, nonEmptyCfs, emptyCfs, rootDir, admin, fs); + SnapshotTestingUtils.confirmSnapshotValid( + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, nonEmptyCfs, + emptyCfs, rootDir, admin, fs); admin.deleteSnapshot(snapshot); SnapshotTestingUtils.assertNoSnapshots(admin); @@ -442,7 +441,7 @@ public void testEnsureTemporaryDirectoryTransfer() throws Exception { LOG.debug("Table2Snapshot1 completed."); List listTableSnapshots = - admin.listTableSnapshots(Pattern.compile("test.*"), Pattern.compile(".*")); + admin.listTableSnapshots(Pattern.compile("test.*"), Pattern.compile(".*")); List listTableSnapshotNames = new ArrayList(); assertEquals(3, listTableSnapshots.size()); for (SnapshotDescription s : listTableSnapshots) { @@ -466,8 +465,8 @@ public void testEnsureTemporaryDirectoryTransfer() throws Exception { private void takeSnapshot(TableName tableName, String snapshotName, boolean disabled) throws IOException { SnapshotType type = disabled ? SnapshotType.DISABLED : SnapshotType.FLUSH; - SnapshotDescription desc = new SnapshotDescription(snapshotName, tableName, type, null, -1, - manifestVersion, null); + SnapshotDescription desc = + new SnapshotDescription(snapshotName, tableName, type, null, -1, manifestVersion, null); admin.snapshot(desc); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java index a4521d27a8c1..bb1ef5913de2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectoryWithRegionReplicas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java index 614c1fe416a3..74e9d32817b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ public class TestSnapshotWithAcl extends SnapshotWithAclTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotWithAcl.class); + HBaseClassTestRule.forClass(TestSnapshotWithAcl.class); @Override protected void snapshot(String snapshotName, TableName tableName) throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java index 792f0e3d9cb8..6ed226d110ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,12 +29,12 @@ public class TestSnapshotWithAclAsyncAdmin extends SnapshotWithAclTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotWithAclAsyncAdmin.class); + HBaseClassTestRule.forClass(TestSnapshotWithAclAsyncAdmin.class); @Override protected void snapshot(String snapshotName, TableName tableName) throws Exception { try (AsyncConnection conn = - ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { + ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { conn.getAdmin().snapshot(snapshotName, tableName).get(); } } @@ -43,7 +43,7 @@ protected void snapshot(String snapshotName, TableName tableName) throws Excepti protected void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl) throws Exception { try (AsyncConnection conn = - ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { + ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { conn.getAdmin().cloneSnapshot(snapshotName, tableName, restoreAcl).get(); } } @@ -51,7 +51,7 @@ protected void cloneSnapshot(String snapshotName, TableName tableName, boolean r @Override protected void restoreSnapshot(String snapshotName, boolean restoreAcl) throws Exception { try (AsyncConnection conn = - ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { + ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { conn.getAdmin().restoreSnapshot(snapshotName, false, restoreAcl).get(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java index fc7b41bad837..20db0365919c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeAtTableLevel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,8 +70,7 @@ public static void tearDownAfterClass() throws Exception { public void testTableSplitSwitch() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setSplitEnabled(false).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setSplitEnabled(false).build(); // create a table with split disabled Table t = TEST_UTIL.createTable(tableDesc, null); @@ -95,9 +94,7 @@ public void testTableSplitSwitchForPreSplittedTable() throws Exception { // create a table with split disabled TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setSplitEnabled(false) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setSplitEnabled(false).build(); Table t = TEST_UTIL.createTable(tableDesc, new byte[][] { Bytes.toBytes(10) }); TEST_UTIL.waitTableAvailable(tableName); @@ -118,9 +115,7 @@ public void testTableMergeSwitch() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setMergeEnabled(false) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setMergeEnabled(false).build(); Table t = TEST_UTIL.createTable(tableDesc, null); TEST_UTIL.waitTableAvailable(tableName); @@ -143,9 +138,7 @@ public void testTableMergeSwitchForPreSplittedTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setMergeEnabled(false) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setMergeEnabled(false).build(); Table t = TEST_UTIL.createTable(tableDesc, new byte[][] { Bytes.toBytes(10) }); TEST_UTIL.waitTableAvailable(tableName); @@ -183,9 +176,8 @@ private void trySplitAndEnsureItFails(final TableName tableName) throws Exceptio private void enableTableSplit(final TableName tableName) throws Exception { // Get the original table descriptor TableDescriptor originalTableDesc = admin.getDescriptor(tableName); - TableDescriptor modifiedTableDesc = TableDescriptorBuilder.newBuilder(originalTableDesc) - .setSplitEnabled(true) - .build(); + TableDescriptor modifiedTableDesc = + TableDescriptorBuilder.newBuilder(originalTableDesc).setSplitEnabled(true).build(); // Now modify the table descriptor and enable split for it admin.modifyTable(modifiedTableDesc); @@ -194,8 +186,7 @@ private void enableTableSplit(final TableName tableName) throws Exception { assertTrue(admin.getDescriptor(tableName).isSplitEnabled()); } - private void trySplitAndEnsureItIsSuccess(final TableName tableName) - throws Exception { + private void trySplitAndEnsureItIsSuccess(final TableName tableName) throws Exception { // get the original table region count List regions = admin.getRegions(tableName); int originalCount = regions.size(); @@ -237,7 +228,6 @@ private void tryMergeAndEnsureItFails(final TableName tableName) throws Exceptio } } - /** * Method to enable merge for the passed table and validate this modification. * @param tableName name of the table @@ -245,9 +235,8 @@ private void tryMergeAndEnsureItFails(final TableName tableName) throws Exceptio private void enableTableMerge(final TableName tableName) throws Exception { // Get the original table descriptor TableDescriptor originalTableDesc = admin.getDescriptor(tableName); - TableDescriptor modifiedTableDesc = TableDescriptorBuilder.newBuilder(originalTableDesc) - .setMergeEnabled(true) - .build(); + TableDescriptor modifiedTableDesc = + TableDescriptorBuilder.newBuilder(originalTableDesc).setMergeEnabled(true).build(); // Now modify the table descriptor and enable merge for it admin.modifyTable(modifiedTableDesc); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java index ce7e03921529..8e5674976d7f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java @@ -27,7 +27,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -53,7 +52,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestSplitOrMergeStatus { @ClassRule @@ -61,7 +60,7 @@ public class TestSplitOrMergeStatus { HBaseClassTestRule.forClass(TestSplitOrMergeStatus.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); @Rule public TestName name = new TestName(); @@ -106,8 +105,8 @@ public void testSplitSwitch() throws Exception { admin.close(); } - - @Ignore @Test + @Ignore + @Test public void testMergeSwitch() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); Table t = TEST_UTIL.createTable(tableName, FAMILY); @@ -122,7 +121,7 @@ public void testMergeSwitch() throws Exception { Threads.sleep(1); } assertTrue("originalCount=" + originalCount + ", newCount=" + postSplitCount, - originalCount != postSplitCount); + originalCount != postSplitCount); // Merge switch is off so merge should NOT succeed. boolean result = admin.mergeSwitch(false, false); @@ -147,7 +146,7 @@ public void testMergeSwitch() throws Exception { regions.get(1).getEncodedNameAsBytes(), true); f.get(10, TimeUnit.SECONDS); count = admin.getRegions(tableName).size(); - assertTrue((postSplitCount / 2 /*Merge*/) == count); + assertTrue((postSplitCount / 2 /* Merge */) == count); admin.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java index 9e31cc338627..a506af68e32a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,7 +66,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; -@Category({ClientTests.class, MediumTests.class}) +@Category({ ClientTests.class, MediumTests.class }) public class TestTableFavoredNodes { @ClassRule @@ -81,7 +81,7 @@ public class TestTableFavoredNodes { private FavoredNodesManager fnm; private Admin admin; - private final byte[][] splitKeys = new byte[][] {Bytes.toBytes(1), Bytes.toBytes(9)}; + private final byte[][] splitKeys = new byte[][] { Bytes.toBytes(1), Bytes.toBytes(9) }; private final int NUM_REGIONS = splitKeys.length + 1; @Rule @@ -92,7 +92,7 @@ public static void setupBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // Setting FavoredNodeBalancer will enable favored nodes conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - LoadOnlyFavoredStochasticBalancer.class, LoadBalancer.class); + LoadOnlyFavoredStochasticBalancer.class, LoadBalancer.class); conf.set(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, "" + SLAVES); TEST_UTIL.startMiniCluster(SLAVES); @@ -182,7 +182,7 @@ public void testSplitTable() throws Exception { LOG.info("FINISHED WAITING ON RIT"); waitUntilTableRegionCountReached(tableName, numberOfRegions + 1); - // All regions should have favored nodes checkIfFavoredNodeInformationIsCorrect(tableName); + // All regions should have favored nodes checkIfFavoredNodeInformationIsCorrect(tableName); // Get the daughters of parent. RegionInfo daughter1 = locator.getRegionLocation(parent.getStartKey(), true).getRegion(); @@ -195,21 +195,21 @@ public void testSplitTable() throws Exception { checkIfDaughterInherits2FN(parentFN, daughter2FN); assertEquals("Daughter's PRIMARY FN should be PRIMARY of parent", - parentFN.get(PRIMARY.ordinal()), daughter1FN.get(PRIMARY.ordinal())); + parentFN.get(PRIMARY.ordinal()), daughter1FN.get(PRIMARY.ordinal())); assertEquals("Daughter's SECONDARY FN should be SECONDARY of parent", - parentFN.get(SECONDARY.ordinal()), daughter1FN.get(SECONDARY.ordinal())); + parentFN.get(SECONDARY.ordinal()), daughter1FN.get(SECONDARY.ordinal())); assertEquals("Daughter's PRIMARY FN should be PRIMARY of parent", - parentFN.get(PRIMARY.ordinal()), daughter2FN.get(PRIMARY.ordinal())); + parentFN.get(PRIMARY.ordinal()), daughter2FN.get(PRIMARY.ordinal())); assertEquals("Daughter's SECONDARY FN should be TERTIARY of parent", - parentFN.get(TERTIARY.ordinal()), daughter2FN.get(SECONDARY.ordinal())); + parentFN.get(TERTIARY.ordinal()), daughter2FN.get(SECONDARY.ordinal())); // Major compact table and run catalog janitor. Parent's FN should be removed TEST_UTIL.getMiniHBaseCluster().compact(tableName, true); admin.runCatalogJanitor(); // Catalog cleanup is async. Wait on procedure to finish up. ProcedureTestingUtility.waitAllProcedures( - TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); // assertEquals("Parent region should have been cleaned", 1, admin.runCatalogScan()); assertNull("Parent FN should be null", fnm.getFavoredNodes(parent)); @@ -243,8 +243,8 @@ public void testMergeTable() throws Exception { LOG.info("regionB: " + regionA.getEncodedName() + " with FN: " + fnm.getFavoredNodes(regionB)); int countOfRegions = TEST_UTIL.getMiniHBaseCluster().getRegions(tableName).size(); - admin.mergeRegionsAsync(regionA.getEncodedNameAsBytes(), - regionB.getEncodedNameAsBytes(), false).get(60, TimeUnit.SECONDS); + admin.mergeRegionsAsync(regionA.getEncodedNameAsBytes(), regionB.getEncodedNameAsBytes(), false) + .get(60, TimeUnit.SECONDS); TEST_UTIL.waitUntilNoRegionsInTransition(WAIT_TIMEOUT); waitUntilTableRegionCountReached(tableName, countOfRegions - 1); @@ -252,19 +252,18 @@ public void testMergeTable() throws Exception { // All regions should have favored nodes checkIfFavoredNodeInformationIsCorrect(tableName); - RegionInfo mergedRegion = - locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegion(); + RegionInfo mergedRegion = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegion(); List mergedFN = fnm.getFavoredNodes(mergedRegion); - assertArrayEquals("Merged region doesn't match regionA's FN", - regionAFN.toArray(), mergedFN.toArray()); + assertArrayEquals("Merged region doesn't match regionA's FN", regionAFN.toArray(), + mergedFN.toArray()); // Major compact table and run catalog janitor. Parent FN should be removed TEST_UTIL.getMiniHBaseCluster().compact(tableName, true); assertEquals("Merge parents should have been cleaned", 1, admin.runCatalogJanitor()); // Catalog cleanup is async. Wait on procedure to finish up. ProcedureTestingUtility.waitAllProcedures( - TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); assertNull("Parent FN should be null", fnm.getFavoredNodes(regionA)); assertNull("Parent FN should be null", fnm.getFavoredNodes(regionB)); @@ -283,20 +282,18 @@ private void checkNoFNForDeletedTable(List regions) { } /* - * This checks the following: - * - * 1. Do all regions of the table have favored nodes updated in master? - * 2. Is the number of favored nodes correct for a region? Is the start code -1? - * 3. Is the FN information consistent between Master and the respective RegionServer? + * This checks the following: 1. Do all regions of the table have favored nodes updated in master? + * 2. Is the number of favored nodes correct for a region? Is the start code -1? 3. Is the FN + * information consistent between Master and the respective RegionServer? */ private void checkIfFavoredNodeInformationIsCorrect(TableName tableName) throws Exception { /* - * Since we need HRegionServer to check for consistency of FN between Master and RS, - * lets construct a map for each serverName lookup. Makes it easy later. + * Since we need HRegionServer to check for consistency of FN between Master and RS, lets + * construct a map for each serverName lookup. Makes it easy later. */ Map snRSMap = Maps.newHashMap(); - for (JVMClusterUtil.RegionServerThread rst : - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads()) { + for (JVMClusterUtil.RegionServerThread rst : TEST_UTIL.getMiniHBaseCluster() + .getLiveRegionServerThreads()) { snRSMap.put(rst.getRegionServer().getServerName(), rst.getRegionServer()); } @@ -322,12 +319,14 @@ private void checkIfFavoredNodeInformationIsCorrect(TableName tableName) throws assertNotNull("RS should not be null for regionLocation: " + regionLocation, regionServer); InetSocketAddress[] rsFavNodes = - regionServer.getFavoredNodesForRegion(regionInfo.getEncodedName()); - assertNotNull("RS " + regionLocation.getServerName() - + " does not have FN for region: " + regionInfo, rsFavNodes); - assertEquals("Incorrect FN for region:" + regionInfo.getEncodedName() + " on server:" + - regionLocation.getServerName(), FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, - rsFavNodes.length); + regionServer.getFavoredNodesForRegion(regionInfo.getEncodedName()); + assertNotNull( + "RS " + regionLocation.getServerName() + " does not have FN for region: " + regionInfo, + rsFavNodes); + assertEquals( + "Incorrect FN for region:" + regionInfo.getEncodedName() + " on server:" + + regionLocation.getServerName(), + FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, rsFavNodes.length); // 4. Does DN port match all FN node list? for (ServerName sn : fnm.getFavoredNodesWithDNPort(regionInfo)) { @@ -349,8 +348,8 @@ public void testSystemTables() throws Exception { // All regions should have favored nodes checkIfFavoredNodeInformationIsCorrect(tableName); - for (TableName sysTable : - admin.listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { + for (TableName sysTable : admin + .listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { List regions = admin.getRegions(sysTable); for (RegionInfo region : regions) { assertNull("FN should be null for sys region", fnm.getFavoredNodes(region)); @@ -369,12 +368,13 @@ private void checkIfDaughterInherits2FN(List parentFN, List e, } } - public static class ThrowIOExceptionCoprocessor - implements RegionCoprocessor, RegionObserver { + public static class ThrowIOExceptionCoprocessor implements RegionCoprocessor, RegionObserver { public ThrowIOExceptionCoprocessor() { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java index 32eac9af0e11..d366446480e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,7 +57,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestTableSnapshotScanner { @ClassRule @@ -67,7 +67,7 @@ public class TestTableSnapshotScanner { private static final Logger LOG = LoggerFactory.getLogger(TestTableSnapshotScanner.class); private final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final int NUM_REGION_SERVERS = 2; - private static final byte[][] FAMILIES = {Bytes.toBytes("f1"), Bytes.toBytes("f2")}; + private static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2") }; public static byte[] bbb = Bytes.toBytes("bbb"); public static byte[] yyy = Bytes.toBytes("yyy"); @@ -90,9 +90,9 @@ public static void blockUntilSplitFinished(HBaseTestingUtil util, TableName tabl public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numRegionServers(NUM_REGION_SERVERS).numDataNodes(NUM_REGION_SERVERS) - .createRootDir(true).build(); + StartTestingClusterOption option = + StartTestingClusterOption.builder().numRegionServers(NUM_REGION_SERVERS) + .numDataNodes(NUM_REGION_SERVERS).createRootDir(true).build(); UTIL.startMiniCluster(option); rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); fs = rootDir.getFileSystem(UTIL.getConfiguration()); @@ -112,11 +112,10 @@ public void tearDown() throws Exception { } public static void createTableAndSnapshot(HBaseTestingUtil util, TableName tableName, - String snapshotName, int numRegions) - throws Exception { + String snapshotName, int numRegions) throws Exception { try { util.deleteTable(tableName); - } catch(Exception ex) { + } catch (Exception ex) { // ignore } @@ -134,8 +133,8 @@ public static void createTableAndSnapshot(HBaseTestingUtil util, TableName table Path rootDir = CommonFSUtils.getRootDir(util.getConfiguration()); FileSystem fs = rootDir.getFileSystem(util.getConfiguration()); - SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, - Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true); + SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES), null, + snapshotName, rootDir, fs, true); // load different values byte[] value = Bytes.toBytes("after_snapshot_value"); @@ -170,8 +169,8 @@ public void testNoDuplicateResultsWhenSplitting() throws Exception { Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); FileSystem fs = rootDir.getFileSystem(UTIL.getConfiguration()); - SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, - Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true); + SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES), + null, snapshotName, rootDir, fs, true); // load different values byte[] value = Bytes.toBytes("after_snapshot_value"); @@ -198,7 +197,6 @@ public void testNoDuplicateResultsWhenSplitting() throws Exception { } } - @Test public void testScanLimit() throws Exception { setupCluster(); @@ -305,8 +303,8 @@ private void testScanner(HBaseTestingUtil util, String snapshotName, int numRegi Path restoreDir = util.getDataTestDirOnTestFS(snapshotName); Scan scan = new Scan().withStartRow(bbb).withStopRow(yyy); // limit the scan - TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir, - snapshotName, scan); + TableSnapshotScanner scanner = + new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir, snapshotName, scan); verifyScanner(scanner, bbb, yyy); scanner.close(); @@ -344,15 +342,16 @@ private static void verifyRow(Result result) throws IOException { while (scanner.advance()) { Cell cell = scanner.current(); - //assert that all Cells in the Result have the same key - Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + // assert that all Cells in the Result have the same key + Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength())); } for (int j = 0; j < FAMILIES.length; j++) { byte[] actual = result.getValue(FAMILIES[j], FAMILIES[j]); Assert.assertArrayEquals("Row in snapshot does not match, expected:" + Bytes.toString(row) - + " ,actual:" + Bytes.toString(actual), row, actual); + + " ,actual:" + Bytes.toString(actual), + row, actual); } } @@ -455,9 +454,9 @@ public void testMergeRegion() throws Exception { traverseAndSetFileTime(HFileArchiveUtil.getArchivePath(conf), time); UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().runCleaner(); // scan snapshot - try (TableSnapshotScanner scanner = new TableSnapshotScanner(conf, - UTIL.getDataTestDirOnTestFS(snapshotName), snapshotName, - new Scan().withStartRow(bbb).withStopRow(yyy))) { + try (TableSnapshotScanner scanner = + new TableSnapshotScanner(conf, UTIL.getDataTestDirOnTestFS(snapshotName), snapshotName, + new Scan().withStartRow(bbb).withStopRow(yyy))) { verifyScanner(scanner, bbb, yyy); } } catch (Exception e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java index 161f3eba4ea6..5cc6c5788d74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,11 +45,10 @@ import org.junit.rules.TestName; /** - * Run tests related to {@link TimestampsFilter} using HBase client APIs. - * Sets up the HBase mini cluster once at start. Each creates a table - * named for the method and does its stuff against that. + * Run tests related to {@link TimestampsFilter} using HBase client APIs. Sets up the HBase mini + * cluster once at start. Each creates a table named for the method and does its stuff against that. */ -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestTimestampsFilter { @ClassRule @@ -94,18 +93,16 @@ public void tearDown() throws Exception { } /** - * Test from client side for TimestampsFilter. - * - * The TimestampsFilter provides the ability to request cells (KeyValues) - * whose timestamp/version is in the specified list of timestamps/version. - * + * Test from client side for TimestampsFilter. The TimestampsFilter provides the ability to + * request cells (KeyValues) whose timestamp/version is in the specified list of + * timestamps/version. * @throws Exception */ @Test public void testTimestampsFilter() throws Exception { - final byte [] TABLE = Bytes.toBytes(name.getMethodName()); - byte [] FAMILY = Bytes.toBytes("event_log"); - byte [][] FAMILIES = new byte[][] { FAMILY }; + final byte[] TABLE = Bytes.toBytes(name.getMethodName()); + byte[] FAMILY = Bytes.toBytes("event_log"); + byte[][] FAMILIES = new byte[][] { FAMILY }; Cell kvs[]; // create table; set versions to max... @@ -139,8 +136,7 @@ public void testTimestampsFilter() throws Exception { for (int rowIdx = 0; rowIdx < 5; rowIdx++) { for (int colIdx = 0; colIdx < 5; colIdx++) { - kvs = getNVersions(ht, FAMILY, rowIdx, colIdx, - Arrays.asList(505L, 5L, 105L, 305L, 205L)); + kvs = getNVersions(ht, FAMILY, rowIdx, colIdx, Arrays.asList(505L, 5L, 105L, 305L, 205L)); assertEquals(4, kvs.length); checkOneCell(kvs[0], FAMILY, rowIdx, colIdx, 305); checkOneCell(kvs[1], FAMILY, rowIdx, colIdx, 205); @@ -152,21 +148,19 @@ public void testTimestampsFilter() throws Exception { // Request an empty list of versions using the Timestamps filter; // Should return none. kvs = getNVersions(ht, FAMILY, 2, 2, new ArrayList<>()); - assertEquals(0, kvs == null? 0: kvs.length); + assertEquals(0, kvs == null ? 0 : kvs.length); // // Test the filter using a Scan operation // Scan rows 0..4. For each row, get all its columns, but only // those versions of the columns with the specified timestamps. - Result[] results = scanNVersions(ht, FAMILY, 0, 4, - Arrays.asList(6L, 106L, 306L)); + Result[] results = scanNVersions(ht, FAMILY, 0, 4, Arrays.asList(6L, 106L, 306L)); assertEquals("# of rows returned from scan", 5, results.length); for (int rowIdx = 0; rowIdx < 5; rowIdx++) { kvs = results[rowIdx].rawCells(); // each row should have 5 columns. // And we have requested 3 versions for each. - assertEquals("Number of KeyValues in result for row:" + rowIdx, - 3*5, kvs.length); + assertEquals("Number of KeyValues in result for row:" + rowIdx, 3 * 5, kvs.length); for (int colIdx = 0; colIdx < 5; colIdx++) { int offset = colIdx * 3; checkOneCell(kvs[offset + 0], FAMILY, rowIdx, colIdx, 306); @@ -179,9 +173,9 @@ public void testTimestampsFilter() throws Exception { @Test public void testMultiColumns() throws Exception { - final byte [] TABLE = Bytes.toBytes(name.getMethodName()); - byte [] FAMILY = Bytes.toBytes("event_log"); - byte [][] FAMILIES = new byte[][] { FAMILY }; + final byte[] TABLE = Bytes.toBytes(name.getMethodName()); + byte[] FAMILY = Bytes.toBytes("event_log"); + byte[][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(TableName.valueOf(TABLE), FAMILIES, Integer.MAX_VALUE); @@ -210,8 +204,8 @@ public void testMultiColumns() throws Exception { Result result = ht.get(g); for (Cell kv : result.listCells()) { - System.out.println("found row " + Bytes.toString(CellUtil.cloneRow(kv)) + - ", column " + Bytes.toString(CellUtil.cloneQualifier(kv)) + ", value " + System.out.println("found row " + Bytes.toString(CellUtil.cloneRow(kv)) + ", column " + + Bytes.toString(CellUtil.cloneQualifier(kv)) + ", value " + Bytes.toString(CellUtil.cloneValue(kv))); } @@ -224,7 +218,6 @@ public void testMultiColumns() throws Exception { /** * Test TimestampsFilter in the presence of version deletes. - * * @throws Exception */ @Test @@ -238,10 +231,10 @@ public void testWithVersionDeletes() throws Exception { } private void testWithVersionDeletes(boolean flushTables) throws IOException { - final byte [] TABLE = Bytes.toBytes(name.getMethodName() + "_" + - (flushTables ? "flush" : "noflush")); - byte [] FAMILY = Bytes.toBytes("event_log"); - byte [][] FAMILIES = new byte[][] { FAMILY }; + final byte[] TABLE = + Bytes.toBytes(name.getMethodName() + "_" + (flushTables ? "flush" : "noflush")); + byte[] FAMILY = Bytes.toBytes("event_log"); + byte[][] FAMILIES = new byte[][] { FAMILY }; // create table; set versions to max... Table ht = TEST_UTIL.createTable(TableName.valueOf(TABLE), FAMILIES, Integer.MAX_VALUE); @@ -271,8 +264,7 @@ private void verifyInsertedValues(Table ht, byte[] cf) throws IOException { for (int rowIdx = 0; rowIdx < 5; rowIdx++) { for (int colIdx = 0; colIdx < 5; colIdx++) { // ask for versions that exist. - Cell[] kvs = getNVersions(ht, cf, rowIdx, colIdx, - Arrays.asList(5L, 300L, 6L, 80L)); + Cell[] kvs = getNVersions(ht, cf, rowIdx, colIdx, Arrays.asList(5L, 300L, 6L, 80L)); assertEquals(4, kvs.length); checkOneCell(kvs[0], cf, rowIdx, colIdx, 300); checkOneCell(kvs[1], cf, rowIdx, colIdx, 80); @@ -280,13 +272,11 @@ private void verifyInsertedValues(Table ht, byte[] cf) throws IOException { checkOneCell(kvs[3], cf, rowIdx, colIdx, 5); // ask for versions that do not exist. - kvs = getNVersions(ht, cf, rowIdx, colIdx, - Arrays.asList(101L, 102L)); - assertEquals(0, kvs == null? 0: kvs.length); + kvs = getNVersions(ht, cf, rowIdx, colIdx, Arrays.asList(101L, 102L)); + assertEquals(0, kvs == null ? 0 : kvs.length); // ask for some versions that exist and some that do not. - kvs = getNVersions(ht, cf, rowIdx, colIdx, - Arrays.asList(1L, 300L, 105L, 70L, 115L)); + kvs = getNVersions(ht, cf, rowIdx, colIdx, Arrays.asList(1L, 300L, 105L, 70L, 115L)); assertEquals(3, kvs.length); checkOneCell(kvs[0], cf, rowIdx, colIdx, 300); checkOneCell(kvs[1], cf, rowIdx, colIdx, 70); @@ -296,39 +286,34 @@ private void verifyInsertedValues(Table ht, byte[] cf) throws IOException { } /** - * Assert that the passed in KeyValue has expected contents for the - * specified row, column & timestamp. + * Assert that the passed in KeyValue has expected contents for the specified row, column & + * timestamp. */ - private void checkOneCell(Cell kv, byte[] cf, - int rowIdx, int colIdx, long ts) { + private void checkOneCell(Cell kv, byte[] cf, int rowIdx, int colIdx, long ts) { String ctx = "rowIdx=" + rowIdx + "; colIdx=" + colIdx + "; ts=" + ts; - assertEquals("Row mismatch which checking: " + ctx, - "row:"+ rowIdx, Bytes.toString(CellUtil.cloneRow(kv))); + assertEquals("Row mismatch which checking: " + ctx, "row:" + rowIdx, + Bytes.toString(CellUtil.cloneRow(kv))); - assertEquals("ColumnFamily mismatch while checking: " + ctx, - Bytes.toString(cf), Bytes.toString(CellUtil.cloneFamily(kv))); + assertEquals("ColumnFamily mismatch while checking: " + ctx, Bytes.toString(cf), + Bytes.toString(CellUtil.cloneFamily(kv))); - assertEquals("Column qualifier mismatch while checking: " + ctx, - "column:" + colIdx, - Bytes.toString(CellUtil.cloneQualifier(kv))); + assertEquals("Column qualifier mismatch while checking: " + ctx, "column:" + colIdx, + Bytes.toString(CellUtil.cloneQualifier(kv))); - assertEquals("Timestamp mismatch while checking: " + ctx, - ts, kv.getTimestamp()); + assertEquals("Timestamp mismatch while checking: " + ctx, ts, kv.getTimestamp()); - assertEquals("Value mismatch while checking: " + ctx, - "value-version-" + ts, Bytes.toString(CellUtil.cloneValue(kv))); + assertEquals("Value mismatch while checking: " + ctx, "value-version-" + ts, + Bytes.toString(CellUtil.cloneValue(kv))); } /** - * Uses the TimestampFilter on a Get to request a specified list of - * versions for the row/column specified by rowIdx & colIdx. - * + * Uses the TimestampFilter on a Get to request a specified list of versions for the row/column + * specified by rowIdx & colIdx. */ - private Cell[] getNVersions(Table ht, byte[] cf, int rowIdx, - int colIdx, List versions) - throws IOException { + private Cell[] getNVersions(Table ht, byte[] cf, int rowIdx, int colIdx, List versions) + throws IOException { byte row[] = Bytes.toBytes("row:" + rowIdx); byte column[] = Bytes.toBytes("column:" + colIdx); Filter filter = new TimestampsFilter(versions); @@ -342,12 +327,11 @@ private Cell[] getNVersions(Table ht, byte[] cf, int rowIdx, } /** - * Uses the TimestampFilter on a Scan to request a specified list of - * versions for the rows from startRowIdx to endRowIdx (both inclusive). + * Uses the TimestampFilter on a Scan to request a specified list of versions for the rows from + * startRowIdx to endRowIdx (both inclusive). */ - private Result[] scanNVersions(Table ht, byte[] cf, int startRowIdx, - int endRowIdx, List versions) - throws IOException { + private Result[] scanNVersions(Table ht, byte[] cf, int startRowIdx, int endRowIdx, + List versions) throws IOException { byte startRow[] = Bytes.toBytes("row:" + startRowIdx); byte endRow[] = Bytes.toBytes("row:" + endRowIdx + 1); // exclusive Filter filter = new TimestampsFilter(versions); @@ -359,12 +343,10 @@ private Result[] scanNVersions(Table ht, byte[] cf, int startRowIdx, } /** - * Insert in specific row/column versions with timestamps - * versionStart..versionEnd. + * Insert in specific row/column versions with timestamps versionStart..versionEnd. */ - private void putNVersions(Table ht, byte[] cf, int rowIdx, int colIdx, - long versionStart, long versionEnd) - throws IOException { + private void putNVersions(Table ht, byte[] cf, int rowIdx, int colIdx, long versionStart, + long versionEnd) throws IOException { byte row[] = Bytes.toBytes("row:" + rowIdx); byte column[] = Bytes.toBytes("column:" + colIdx); Put put = new Put(row); @@ -378,12 +360,11 @@ private void putNVersions(Table ht, byte[] cf, int rowIdx, int colIdx, } /** - * For row/column specified by rowIdx/colIdx, delete the cell - * corresponding to the specified version. + * For row/column specified by rowIdx/colIdx, delete the cell corresponding to the specified + * version. */ - private void deleteOneVersion(Table ht, byte[] cf, int rowIdx, - int colIdx, long version) - throws IOException { + private void deleteOneVersion(Table ht, byte[] cf, int rowIdx, int colIdx, long version) + throws IOException { byte row[] = Bytes.toBytes("row:" + rowIdx); byte column[] = Bytes.toBytes("column:" + colIdx); Delete del = new Delete(row); @@ -392,5 +373,3 @@ private void deleteOneVersion(Table ht, byte[] cf, int rowIdx, } } - - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java index 5703ae28b3a6..df3a0d6335f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestUpdateConfiguration extends AbstractTestUpdateConfiguration { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index 4894c52d1583..73b23e2e1335 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -54,7 +54,7 @@ public class TestZKConnectionRegistry { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZKConnectionRegistry.class); + HBaseClassTestRule.forClass(TestZKConnectionRegistry.class); static final Logger LOG = LoggerFactory.getLogger(TestZKConnectionRegistry.class); static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -83,8 +83,7 @@ public void test() throws InterruptedException, ExecutionException, IOException clusterId); assertEquals(TEST_UTIL.getHBaseCluster().getMaster().getServerName(), REGISTRY.getActiveMaster().get()); - RegionReplicaTestHelper - .waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); + RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); RegionLocations locs = REGISTRY.getMetaRegionLocations().get(); assertEquals(3, locs.getRegionLocations().length); IntStream.range(0, 3).forEach(i -> { @@ -102,8 +101,9 @@ public void testIndependentZKConnections() throws IOException { otherConf.set(HConstants.ZOOKEEPER_QUORUM, MiniZooKeeperCluster.HOST); try (ZKConnectionRegistry otherRegistry = new ZKConnectionRegistry(otherConf)) { ReadOnlyZKClient zk2 = otherRegistry.getZKClient(); - assertNotSame("Using a different configuration / quorum should result in different " + - "backing zk connection.", zk1, zk2); + assertNotSame("Using a different configuration / quorum should result in different " + + "backing zk connection.", + zk1, zk2); assertNotEquals( "Using a different configrution / quorum should be reflected in the zk connection.", zk1.getConnectString(), zk2.getConnectString()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TracedAdvancedScanResultConsumer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TracedAdvancedScanResultConsumer.java index 702e16a14635..ba6f75a591b6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TracedAdvancedScanResultConsumer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TracedAdvancedScanResultConsumer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,23 +31,19 @@ public class TracedAdvancedScanResultConsumer implements AdvancedScanResultConsu @Override public void onScanMetricsCreated(ScanMetrics scanMetrics) { - TraceUtil.trace( - () -> delegate.onScanMetricsCreated(scanMetrics), + TraceUtil.trace(() -> delegate.onScanMetricsCreated(scanMetrics), "TracedAdvancedScanResultConsumer#onScanMetricsCreated"); } @Override public void onNext(Result[] results, ScanController controller) { - TraceUtil.trace( - () -> delegate.onNext(results, controller), + TraceUtil.trace(() -> delegate.onNext(results, controller), "TracedAdvancedScanResultConsumer#onNext"); } @Override public void onError(Throwable error) { - TraceUtil.trace( - () -> delegate.onError(error), - "TracedAdvancedScanResultConsumer#onError"); + TraceUtil.trace(() -> delegate.onError(error), "TracedAdvancedScanResultConsumer#onError"); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TracedScanResultConsumer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TracedScanResultConsumer.java index 0427218038d2..cfb5907c1fd8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TracedScanResultConsumer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TracedScanResultConsumer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,8 +22,7 @@ import org.apache.hadoop.hbase.trace.TraceUtil; /** - * A wrapper over {@link SimpleScanResultConsumer} that adds tracing of spans to its - * implementation. + * A wrapper over {@link SimpleScanResultConsumer} that adds tracing of spans to its implementation. */ class TracedScanResultConsumer implements SimpleScanResultConsumer { @@ -35,15 +34,13 @@ public TracedScanResultConsumer(final SimpleScanResultConsumer delegate) { @Override public void onScanMetricsCreated(ScanMetrics scanMetrics) { - TraceUtil.trace( - () -> delegate.onScanMetricsCreated(scanMetrics), + TraceUtil.trace(() -> delegate.onScanMetricsCreated(scanMetrics), "TracedScanResultConsumer#onScanMetricsCreated"); } @Override public boolean onNext(Result result) { - return TraceUtil.trace(() -> delegate.onNext(result), - "TracedScanResultConsumer#onNext"); + return TraceUtil.trace(() -> delegate.onNext(result), "TracedScanResultConsumer#onNext"); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/hamcrest/BytesMatchers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/hamcrest/BytesMatchers.java index 581ac9fa3947..ee412cace767 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/hamcrest/BytesMatchers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/hamcrest/BytesMatchers.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client.hamcrest; import static org.hamcrest.core.Is.is; + import org.apache.hadoop.hbase.util.Bytes; import org.hamcrest.Description; import org.hamcrest.Matcher; @@ -28,7 +29,8 @@ */ public final class BytesMatchers { - private BytesMatchers() {} + private BytesMatchers() { + } public static Matcher bytesAsStringBinary(final String binary) { return bytesAsStringBinary(is(binary)); @@ -36,7 +38,8 @@ public static Matcher bytesAsStringBinary(final String binary) { public static Matcher bytesAsStringBinary(final Matcher matcher) { return new TypeSafeDiagnosingMatcher() { - @Override protected boolean matchesSafely(byte[] item, Description mismatchDescription) { + @Override + protected boolean matchesSafely(byte[] item, Description mismatchDescription) { final String binary = Bytes.toStringBinary(item); if (matcher.matches(binary)) { return true; @@ -46,10 +49,10 @@ public static Matcher bytesAsStringBinary(final Matcher matcher) return false; } - @Override public void describeTo(Description description) { - description - .appendText("has a byte[] with a Bytes.toStringBinary value that ") - .appendDescriptionOf(matcher); + @Override + public void describeTo(Description description) { + description.appendText("has a byte[] with a Bytes.toStringBinary value that ") + .appendDescriptionOf(matcher); } }; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java index 50b6a278a4a2..9707a8bfda87 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestEntityLocks { @ClassRule @@ -70,25 +70,23 @@ public class TestEntityLocks { private final Configuration conf = HBaseConfiguration.create(); private final LockService.BlockingInterface master = - Mockito.mock(LockService.BlockingInterface.class); + Mockito.mock(LockService.BlockingInterface.class); private LockServiceClient admin; private ArgumentCaptor lockReqArgCaptor; private ArgumentCaptor lockHeartbeatReqArgCaptor; - private static final LockHeartbeatResponse UNLOCKED_RESPONSE = - LockHeartbeatResponse.newBuilder().setLockStatus( - LockHeartbeatResponse.LockStatus.UNLOCKED).build(); + private static final LockHeartbeatResponse UNLOCKED_RESPONSE = LockHeartbeatResponse.newBuilder() + .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build(); // timeout such that worker thread waits for 500ms for each heartbeat. - private static final LockHeartbeatResponse LOCKED_RESPONSE = - LockHeartbeatResponse.newBuilder().setLockStatus( - LockHeartbeatResponse.LockStatus.LOCKED).setTimeoutMs(10000).build(); + private static final LockHeartbeatResponse LOCKED_RESPONSE = LockHeartbeatResponse.newBuilder() + .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).setTimeoutMs(10000).build(); private long procId; // Setup mock admin. LockServiceClient getAdmin() throws Exception { conf.setInt("hbase.client.retries.number", 3); - conf.setInt("hbase.client.pause", 1); // 1ms. Immediately retry rpc on failure. + conf.setInt("hbase.client.pause", 1); // 1ms. Immediately retry rpc on failure. return new LockServiceClient(conf, master, PerClientRandomNonceGenerator.get()); } @@ -123,14 +121,14 @@ private boolean waitLockTimeOut(EntityLock lock, long maxWaitTimeMillis) { @Test public void testEntityLock() throws Exception { final long procId = 100; - final long workerSleepTime = 200; // in ms + final long workerSleepTime = 200; // in ms EntityLock lock = admin.namespaceLock("namespace", "description", null); lock.setTestingSleepTime(workerSleepTime); - when(master.requestLock(any(), any())).thenReturn( - LockResponse.newBuilder().setProcId(procId).build()); - when(master.lockHeartbeat(any(), any())).thenReturn( - UNLOCKED_RESPONSE, UNLOCKED_RESPONSE, UNLOCKED_RESPONSE, LOCKED_RESPONSE); + when(master.requestLock(any(), any())) + .thenReturn(LockResponse.newBuilder().setProcId(procId).build()); + when(master.lockHeartbeat(any(), any())).thenReturn(UNLOCKED_RESPONSE, UNLOCKED_RESPONSE, + UNLOCKED_RESPONSE, LOCKED_RESPONSE); lock.requestLock(); // we return unlock response 3 times, so actual wait time should be around 2 * workerSleepTime @@ -160,7 +158,7 @@ public void testEntityLock() throws Exception { */ @Test public void testEntityLockTimeout() throws Exception { - final long workerSleepTime = 200; // in ms + final long workerSleepTime = 200; // in ms Abortable abortable = Mockito.mock(Abortable.class); EntityLock lock = admin.namespaceLock("namespace", "description", abortable); lock.setTestingSleepTime(workerSleepTime); @@ -168,8 +166,7 @@ public void testEntityLockTimeout() throws Exception { when(master.requestLock(any(), any())) .thenReturn(LockResponse.newBuilder().setProcId(procId).build()); // Acquires the lock, but then it times out (since we don't call unlock() on it). - when(master.lockHeartbeat(any(), any())) - .thenReturn(LOCKED_RESPONSE, UNLOCKED_RESPONSE); + when(master.lockHeartbeat(any(), any())).thenReturn(LOCKED_RESPONSE, UNLOCKED_RESPONSE); lock.requestLock(); lock.await(); @@ -188,15 +185,14 @@ public void testEntityLockTimeout() throws Exception { */ @Test public void testHeartbeatException() throws Exception { - final long workerSleepTime = 100; // in ms + final long workerSleepTime = 100; // in ms Abortable abortable = Mockito.mock(Abortable.class); EntityLock lock = admin.namespaceLock("namespace", "description", abortable); lock.setTestingSleepTime(workerSleepTime); when(master.requestLock(any(), any())) .thenReturn(LockResponse.newBuilder().setProcId(procId).build()); - when(master.lockHeartbeat(any(), any())) - .thenReturn(LOCKED_RESPONSE) + when(master.lockHeartbeat(any(), any())).thenReturn(LOCKED_RESPONSE) .thenThrow(new ServiceException("Failed heartbeat!")); lock.requestLock(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java index 572e35456829..0bf041559a90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminForSyncReplication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -44,7 +43,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestReplicationAdminForSyncReplication { @ClassRule @@ -54,8 +53,7 @@ public class TestReplicationAdminForSyncReplication { private static final Logger LOG = LoggerFactory.getLogger(TestReplicationAdminForSyncReplication.class); - private final static HBaseTestingUtil TEST_UTIL = - new HBaseTestingUtil(); + private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Admin hbaseAdmin; @@ -85,8 +83,8 @@ public void testAddPeerWithSameTable() throws Exception { int index = i; threads[i] = new Thread(() -> { try { - hbaseAdmin - .addReplicationPeer(peerId, buildSyncReplicationPeerConfig(clusterKey, tableName)); + hbaseAdmin.addReplicationPeer(peerId, + buildSyncReplicationPeerConfig(clusterKey, tableName)); } catch (IOException e) { LOG.error("Failed to add replication peer " + peerId); success[index] = false; @@ -115,7 +113,7 @@ private ReplicationPeerConfig buildSyncReplicationPeerConfig(String clusterKey, ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder(); builder.setClusterKey(clusterKey); builder.setRemoteWALDir(rootDir.makeQualified(TEST_UTIL.getTestFileSystem().getUri(), - TEST_UTIL.getTestFileSystem().getWorkingDirectory()).toString()); + TEST_UTIL.getTestFileSystem().getWorkingDirectory()).toString()); builder.setReplicateAllUserTables(false); Map> tableCfs = new HashMap<>(); tableCfs.put(tableName, new ArrayList<>()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java index e801b5b4beca..ad58465c6b02 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,16 +23,15 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.CellOutputStream; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Do basic codec performance eval. @@ -43,63 +42,62 @@ public class CodecPerformance { @Deprecated public static final Logger LOG = LoggerFactory.getLogger(CodecPerformance.class); - static Cell [] getCells(final int howMany) { - Cell [] cells = new Cell[howMany]; + static Cell[] getCells(final int howMany) { + Cell[] cells = new Cell[howMany]; for (int i = 0; i < howMany; i++) { - byte [] index = Bytes.toBytes(i); + byte[] index = Bytes.toBytes(i); KeyValue kv = new KeyValue(index, Bytes.toBytes("f"), index, index); cells[i] = kv; } return cells; } - static int getRoughSize(final Cell [] cells) { + static int getRoughSize(final Cell[] cells) { int size = 0; - for (Cell c: cells) { + for (Cell c : cells) { size += c.getRowLength() + c.getFamilyLength() + c.getQualifierLength() + c.getValueLength(); size += Bytes.SIZEOF_LONG + Bytes.SIZEOF_BYTE; } return size; } - static byte [] runEncoderTest(final int index, final int initialBufferSize, - final ByteArrayOutputStream baos, final CellOutputStream encoder, final Cell [] cells) - throws IOException { + static byte[] runEncoderTest(final int index, final int initialBufferSize, + final ByteArrayOutputStream baos, final CellOutputStream encoder, final Cell[] cells) + throws IOException { long startTime = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < cells.length; i++) { encoder.write(cells[i]); } encoder.flush(); - LOG.info("" + index + " encoded count=" + cells.length + " in " + - (EnvironmentEdgeManager.currentTime() - startTime) + "ms for encoder " + encoder); + LOG.info("" + index + " encoded count=" + cells.length + " in " + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms for encoder " + encoder); // Ensure we did not have to grow the backing buffer. assertTrue(baos.size() < initialBufferSize); return baos.toByteArray(); } - static Cell [] runDecoderTest(final int index, final int count, final CellScanner decoder) - throws IOException { - Cell [] cells = new Cell[count]; + static Cell[] runDecoderTest(final int index, final int count, final CellScanner decoder) + throws IOException { + Cell[] cells = new Cell[count]; long startTime = EnvironmentEdgeManager.currentTime(); for (int i = 0; decoder.advance(); i++) { cells[i] = decoder.current(); } - LOG.info("" + index + " decoded count=" + cells.length + " in " + - (EnvironmentEdgeManager.currentTime() - startTime) + "ms for decoder " + decoder); + LOG.info("" + index + " decoded count=" + cells.length + " in " + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms for decoder " + decoder); // Ensure we did not have to grow the backing buffer. assertTrue(cells.length == count); return cells; } - static void verifyCells(final Cell [] input, final Cell [] output) { + static void verifyCells(final Cell[] input, final Cell[] output) { assertArrayEquals(input, output); } - static void doCodec(final Codec codec, final Cell [] cells, final int cycles, final int count, - final int initialBufferSize) - throws IOException { - byte [] bytes = null; - Cell [] cellsDecoded = null; + static void doCodec(final Codec codec, final Cell[] cells, final int cycles, final int count, + final int initialBufferSize) throws IOException { + byte[] bytes = null; + Cell[] cellsDecoded = null; for (int i = 0; i < cycles; i++) { ByteArrayOutputStream baos = new ByteArrayOutputStream(initialBufferSize); Codec.Encoder encoder = codec.getEncoder(baos); @@ -119,7 +117,7 @@ public static void main(String[] args) throws IOException { // How many times to do an operation; repeat gives hotspot chance to warm up. final int cycles = 30; - Cell [] cells = getCells(count); + Cell[] cells = getCells(count); int size = getRoughSize(cells); int initialBufferSize = 2 * size; // Multiply by 2 to ensure we don't have to grow buffer diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java index e4b2b2f80048..93bc42c2531b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/TestCellMessageCodec.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ import org.apache.hbase.thirdparty.com.google.common.io.CountingInputStream; import org.apache.hbase.thirdparty.com.google.common.io.CountingOutputStream; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestCellMessageCodec { @ClassRule @@ -77,8 +77,8 @@ public void testOne() throws IOException { DataOutputStream dos = new DataOutputStream(cos); MessageCodec cmc = new MessageCodec(); Codec.Encoder encoder = cmc.getEncoder(dos); - final KeyValue kv = - new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("v")); + final KeyValue kv = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), + Bytes.toBytes("v")); encoder.write(kv); encoder.flush(); dos.close(); @@ -87,7 +87,8 @@ public void testOne() throws IOException { DataInputStream dis = new DataInputStream(cis); Codec.Decoder decoder = cmc.getDecoder(dis); assertTrue(decoder.advance()); // First read should pull in the KV - assertFalse(decoder.advance()); // Second read should trip over the end-of-stream marker and return false + assertFalse(decoder.advance()); // Second read should trip over the end-of-stream marker and + // return false dis.close(); assertEquals(offset, cis.getCount()); } @@ -99,9 +100,12 @@ public void testThree() throws IOException { DataOutputStream dos = new DataOutputStream(cos); MessageCodec cmc = new MessageCodec(); Codec.Encoder encoder = cmc.getEncoder(dos); - final KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("1")); - final KeyValue kv2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"), Bytes.toBytes("2")); - final KeyValue kv3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"), Bytes.toBytes("3")); + final KeyValue kv1 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("1"), + Bytes.toBytes("1")); + final KeyValue kv2 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("2"), + Bytes.toBytes("2")); + final KeyValue kv3 = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("3"), + Bytes.toBytes("3")); encoder.write(kv1); encoder.write(kv2); encoder.write(kv3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/AllFailConstraint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/AllFailConstraint.java index 03fec35aa289..abd97812f372 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/AllFailConstraint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/AllFailConstraint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/AllPassConstraint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/AllPassConstraint.java index a33dfdbb080b..fc69ddc673ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/AllPassConstraint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/AllPassConstraint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/CheckConfigurationConstraint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/CheckConfigurationConstraint.java index c7b0604aafc9..e404884f2ea9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/CheckConfigurationConstraint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/CheckConfigurationConstraint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,15 +45,13 @@ public void setConf(Configuration conf) { if (conf != null) { String val = conf.get(key); if (val == null || !val.equals(value)) - throw new IllegalArgumentException( - "Configuration was not passed correctly"); + throw new IllegalArgumentException("Configuration was not passed correctly"); // and then check to make sure we got a fresh config by checking for a // hadoop-based config value, and if we don't find it, its fine - if (conf.getRaw("fs.file.impl") != null) - throw new IllegalArgumentException( - "Configuration was created using 'new Configuration()', should be " - + "done via 'new Configuration(false) to exclude defaut hadoop " - + "configurations values."); + if (conf.getRaw("fs.file.impl") != null) throw new IllegalArgumentException( + "Configuration was created using 'new Configuration()', should be " + + "done via 'new Configuration(false) to exclude defaut hadoop " + + "configurations values."); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/RuntimeFailConstraint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/RuntimeFailConstraint.java index 64f6e6d342a7..3566c0101b5b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/RuntimeFailConstraint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/RuntimeFailConstraint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,8 +26,7 @@ public class RuntimeFailConstraint extends BaseConstraint { @Override public void check(Put p) throws ConstraintException { - throw new RuntimeException( - "RuntimeFailConstraint always throws a runtime exception"); + throw new RuntimeException("RuntimeFailConstraint always throws a runtime exception"); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java index 4ff21d0843b4..cd91946065b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java @@ -49,7 +49,7 @@ public class TestConstraint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConstraint.class); + HBaseClassTestRule.forClass(TestConstraint.class); private static final Logger LOG = LoggerFactory.getLogger(TestConstraint.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java index 2087a9819780..0ac305ec8da6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public class TestConstraints { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConstraints.class); + HBaseClassTestRule.forClass(TestConstraints.class); @Rule public TableNameTestRule name = new TableNameTestRule(); @@ -57,7 +57,7 @@ public void testSimpleReadWrite() throws Exception { Constraints.add(builder, WorksConstraint.class); List constraints = - Constraints.getConstraints(builder.build(), this.getClass().getClassLoader()); + Constraints.getConstraints(builder.build(), this.getClass().getClassLoader()); assertEquals(1, constraints.size()); assertEquals(WorksConstraint.class, constraints.get(0).getClass()); @@ -78,10 +78,10 @@ public void testSimpleReadWrite() throws Exception { public void testReadWriteWithConf() throws Exception { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(name.getTableName()); Constraints.add(builder, new Pair<>(CheckConfigurationConstraint.class, - CheckConfigurationConstraint.getConfiguration())); + CheckConfigurationConstraint.getConfiguration())); List c = - Constraints.getConstraints(builder.build(), this.getClass().getClassLoader()); + Constraints.getConstraints(builder.build(), this.getClass().getClassLoader()); assertEquals(1, c.size()); assertEquals(CheckConfigurationConstraint.class, c.get(0).getClass()); @@ -142,7 +142,7 @@ public void testUpdateConstraint() throws Exception { CheckConfigurationConstraint.getConfiguration()); List constraints = - Constraints.getConstraints(builder.build(), this.getClass().getClassLoader()); + Constraints.getConstraints(builder.build(), this.getClass().getClassLoader()); assertEquals(2, constraints.size()); @@ -172,7 +172,7 @@ public void testConfigurationPreserved() throws Exception { Constraints.add(builder, WorksConstraint.class); assertFalse(Constraints.enabled(builder.build(), AlsoWorks.class)); List constraints = - Constraints.getConstraints(builder.build(), this.getClass().getClassLoader()); + Constraints.getConstraints(builder.build(), this.getClass().getClassLoader()); for (Constraint c : constraints) { Configuration storedConf = c.getConf(); if (c instanceof AlsoWorks) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/WorksConstraint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/WorksConstraint.java index fa94230c65cd..f12fb63ce59f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/WorksConstraint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/WorksConstraint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,9 +29,8 @@ public void check(Put p) { } /** - * Constraint to check that the naming of constraints doesn't mess up the - * pattern matching.(that constraint $___Constraint$NameConstraint isn't a - * problem) + * Constraint to check that the naming of constraints doesn't mess up the pattern matching.(that + * constraint $___Constraint$NameConstraint isn't a problem) */ public static class NameConstraint extends WorksConstraint { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java index 1da31dadb0a0..9e7bc29cb813 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.Optional; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -40,8 +37,8 @@ * passed-in WALEdit, i.e, ignore specified columns when writing, or add a KeyValue. On the other * side, it checks whether the ignored column is still in WAL when Restoreed at region reconstruct. */ -public class SampleRegionWALCoprocessor implements WALCoprocessor, RegionCoprocessor, - WALObserver, RegionObserver { +public class SampleRegionWALCoprocessor + implements WALCoprocessor, RegionCoprocessor, WALObserver, RegionObserver { private static final Logger LOG = LoggerFactory.getLogger(SampleRegionWALCoprocessor.class); @@ -62,11 +59,11 @@ public class SampleRegionWALCoprocessor implements WALCoprocessor, RegionCoproce private boolean postWALRollCalled = false; /** - * Set values: with a table name, a column name which will be ignored, and - * a column name which will be added to WAL. + * Set values: with a table name, a column name which will be ignored, and a column name which + * will be added to WAL. */ - public void setTestValues(byte[] tableName, byte[] row, byte[] igf, byte[] igq, - byte[] chf, byte[] chq, byte[] addf, byte[] addq) { + public void setTestValues(byte[] tableName, byte[] row, byte[] igf, byte[] igq, byte[] chf, + byte[] chq, byte[] addf, byte[] addq) { this.row = row; this.tableName = tableName; this.ignoredFamily = igf; @@ -83,7 +80,8 @@ public void setTestValues(byte[] tableName, byte[] row, byte[] igf, byte[] igq, postWALRollCalled = false; } - @Override public Optional getWALObserver() { + @Override + public Optional getWALObserver() { return Optional.of(this); } @@ -99,8 +97,8 @@ public void postWALWrite(ObserverContext en } @Override - public void preWALWrite(ObserverContext env, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + public void preWALWrite(ObserverContext env, RegionInfo info, + WALKey logKey, WALEdit logEdit) throws IOException { // check table name matches or not. if (!Bytes.equals(info.getTable().toBytes(), this.tableName)) { return; @@ -115,13 +113,11 @@ public void preWALWrite(ObserverContext env byte[] family = CellUtil.cloneFamily(cell); byte[] qulifier = CellUtil.cloneQualifier(cell); - if (Arrays.equals(family, ignoredFamily) && - Arrays.equals(qulifier, ignoredQualifier)) { + if (Arrays.equals(family, ignoredFamily) && Arrays.equals(qulifier, ignoredQualifier)) { LOG.debug("Found the KeyValue from WALEdit which should be ignored."); deletedCell = cell; } - if (Arrays.equals(family, changedFamily) && - Arrays.equals(qulifier, changedQualifier)) { + if (Arrays.equals(family, changedFamily) && Arrays.equals(qulifier, changedQualifier)) { LOG.debug("Found the KeyValue from WALEdit which should be changed."); cell.getValueArray()[cell.getValueOffset()] = (byte) (cell.getValueArray()[cell.getValueOffset()] + 1); @@ -137,30 +133,28 @@ public void preWALWrite(ObserverContext env } /** - * Triggered before {@link org.apache.hadoop.hbase.regionserver.HRegion} when WAL is - * Restoreed. + * Triggered before {@link org.apache.hadoop.hbase.regionserver.HRegion} when WAL is Restoreed. */ @Override public void preWALRestore(ObserverContext env, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { preWALRestoreCalled = true; } @Override - public void preWALRoll(ObserverContext ctx, - Path oldPath, Path newPath) throws IOException { + public void preWALRoll(ObserverContext ctx, Path oldPath, + Path newPath) throws IOException { preWALRollCalled = true; } @Override - public void postWALRoll(ObserverContext ctx, - Path oldPath, Path newPath) throws IOException { + public void postWALRoll(ObserverContext ctx, Path oldPath, + Path newPath) throws IOException { postWALRollCalled = true; } /** - * Triggered after {@link org.apache.hadoop.hbase.regionserver.HRegion} when WAL is - * Restoreed. + * Triggered after {@link org.apache.hadoop.hbase.regionserver.HRegion} when WAL is Restoreed. */ @Override public void postWALRestore(ObserverContext env, @@ -177,14 +171,12 @@ public boolean isPostWALWriteCalled() { } public boolean isPreWALRestoreCalled() { - LOG.debug(SampleRegionWALCoprocessor.class.getName() + - ".isPreWALRestoreCalled is called."); + LOG.debug(SampleRegionWALCoprocessor.class.getName() + ".isPreWALRestoreCalled is called."); return preWALRestoreCalled; } public boolean isPostWALRestoreCalled() { - LOG.debug(SampleRegionWALCoprocessor.class.getName() + - ".isPostWALRestoreCalled is called."); + LOG.debug(SampleRegionWALCoprocessor.class.getName() + ".isPostWALRestoreCalled is called."); return postWALRestoreCalled; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index fb1da66e603f..5239b167a5a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import static org.junit.Assert.assertArrayEquals; @@ -31,7 +29,6 @@ import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -74,8 +71,8 @@ import org.apache.hadoop.hbase.wal.WALKey; /** - * A sample region observer that tests the RegionObserver interface. - * It works with TestRegionObserverInterface to provide the test case. + * A sample region observer that tests the RegionObserver interface. It works with + * TestRegionObserverInterface to provide the test case. */ public class SimpleRegionObserver implements RegionCoprocessor, RegionObserver { @@ -142,10 +139,10 @@ public class SimpleRegionObserver implements RegionCoprocessor, RegionObserver { final AtomicInteger ctPreWALAppend = new AtomicInteger(0); static final String TABLE_SKIPPED = "SKIPPED_BY_PREWALRESTORE"; - Map extendedAttributes = new HashMap(); + Map extendedAttributes = new HashMap(); static final byte[] WAL_EXTENDED_ATTRIBUTE_BYTES = Bytes.toBytes("foo"); - public void setThrowOnPostFlush(Boolean val){ + public void setThrowOnPostFlush(Boolean val) { throwOnPostFlush.set(val); } @@ -187,17 +184,17 @@ public boolean wasClosed() { } @Override - public InternalScanner preFlush(ObserverContext c, - Store store, InternalScanner scanner, FlushLifeCycleTracker tracker) throws IOException { + public InternalScanner preFlush(ObserverContext c, Store store, + InternalScanner scanner, FlushLifeCycleTracker tracker) throws IOException { ctPreFlush.incrementAndGet(); return scanner; } @Override - public void postFlush(ObserverContext c, - Store store, StoreFile resultFile, FlushLifeCycleTracker tracker) throws IOException { + public void postFlush(ObserverContext c, Store store, + StoreFile resultFile, FlushLifeCycleTracker tracker) throws IOException { ctPostFlush.incrementAndGet(); - if (throwOnPostFlush.get()){ + if (throwOnPostFlush.get()) { throw new IOException("throwOnPostFlush is true in postFlush"); } } @@ -229,8 +226,8 @@ public InternalScanner preCompact(ObserverContext @Override public void postCompact(ObserverContext c, Store store, - StoreFile resultFile, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException { + StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) + throws IOException { ctPostCompact.incrementAndGet(); } @@ -246,24 +243,23 @@ public void preScannerOpen(final ObserverContext c @Override public RegionScanner postScannerOpen(final ObserverContext c, - final Scan scan, final RegionScanner s) - throws IOException { + final Scan scan, final RegionScanner s) throws IOException { ctPostScannerOpen.incrementAndGet(); return s; } @Override public boolean preScannerNext(final ObserverContext c, - final InternalScanner s, final List results, - final int limit, final boolean hasMore) throws IOException { + final InternalScanner s, final List results, final int limit, final boolean hasMore) + throws IOException { ctPreScannerNext.incrementAndGet(); return hasMore; } @Override public boolean postScannerNext(final ObserverContext c, - final InternalScanner s, final List results, final int limit, - final boolean hasMore) throws IOException { + final InternalScanner s, final List results, final int limit, final boolean hasMore) + throws IOException { ctPostScannerNext.incrementAndGet(); return hasMore; } @@ -306,12 +302,12 @@ public void postGetOp(final ObserverContext c, fin assertNotNull(e.getRegion()); assertNotNull(get); assertNotNull(results); - if (e.getRegion().getTableDescriptor().getTableName().equals( - TestRegionObserverInterface.TEST_TABLE)) { + if (e.getRegion().getTableDescriptor().getTableName() + .equals(TestRegionObserverInterface.TEST_TABLE)) { boolean foundA = false; boolean foundB = false; boolean foundC = false; - for (Cell kv: results) { + for (Cell kv : results) { if (CellUtil.matchingFamily(kv, TestRegionObserverInterface.A)) { foundA = true; } @@ -330,85 +326,82 @@ public void postGetOp(final ObserverContext c, fin } @Override - public void prePut(final ObserverContext c, - final Put put, final WALEdit edit, - final Durability durability) throws IOException { - Map> familyMap = put.getFamilyCellMap(); + public void prePut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) throws IOException { + Map> familyMap = put.getFamilyCellMap(); RegionCoprocessorEnvironment e = c.getEnvironment(); assertNotNull(e); assertNotNull(e.getRegion()); assertNotNull(familyMap); - if (e.getRegion().getTableDescriptor().getTableName().equals( - TestRegionObserverInterface.TEST_TABLE)) { + if (e.getRegion().getTableDescriptor().getTableName() + .equals(TestRegionObserverInterface.TEST_TABLE)) { List cells = familyMap.get(TestRegionObserverInterface.A); assertNotNull(cells); assertNotNull(cells.get(0)); Cell cell = cells.get(0); - assertTrue(Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength(), TestRegionObserverInterface.A, 0, - TestRegionObserverInterface.A.length)); + assertTrue( + Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), + TestRegionObserverInterface.A, 0, TestRegionObserverInterface.A.length)); cells = familyMap.get(TestRegionObserverInterface.B); assertNotNull(cells); assertNotNull(cells.get(0)); cell = cells.get(0); - assertTrue(Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength(), TestRegionObserverInterface.B, 0, - TestRegionObserverInterface.B.length)); + assertTrue( + Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), + TestRegionObserverInterface.B, 0, TestRegionObserverInterface.B.length)); cells = familyMap.get(TestRegionObserverInterface.C); assertNotNull(cells); assertNotNull(cells.get(0)); cell = cells.get(0); - assertTrue(Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength(), TestRegionObserverInterface.C, 0, - TestRegionObserverInterface.C.length)); + assertTrue( + Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), + TestRegionObserverInterface.C, 0, TestRegionObserverInterface.C.length)); } ctPrePut.incrementAndGet(); } @Override - public void postPut(final ObserverContext c, - final Put put, final WALEdit edit, - final Durability durability) throws IOException { - Map> familyMap = put.getFamilyCellMap(); + public void postPut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) throws IOException { + Map> familyMap = put.getFamilyCellMap(); RegionCoprocessorEnvironment e = c.getEnvironment(); assertNotNull(e); assertNotNull(e.getRegion()); assertNotNull(familyMap); List cells = familyMap.get(TestRegionObserverInterface.A); - if (e.getRegion().getTableDescriptor().getTableName().equals( - TestRegionObserverInterface.TEST_TABLE)) { + if (e.getRegion().getTableDescriptor().getTableName() + .equals(TestRegionObserverInterface.TEST_TABLE)) { assertNotNull(cells); assertNotNull(cells.get(0)); - // KeyValue v1 expectation. Cast for now until we go all Cell all the time. TODO + // KeyValue v1 expectation. Cast for now until we go all Cell all the time. TODO Cell cell = cells.get(0); - assertTrue(Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength(), TestRegionObserverInterface.A, 0, - TestRegionObserverInterface.A.length)); + assertTrue( + Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), + TestRegionObserverInterface.A, 0, TestRegionObserverInterface.A.length)); cells = familyMap.get(TestRegionObserverInterface.B); assertNotNull(cells); assertNotNull(cells.get(0)); - // KeyValue v1 expectation. Cast for now until we go all Cell all the time. TODO + // KeyValue v1 expectation. Cast for now until we go all Cell all the time. TODO cell = cells.get(0); - assertTrue(Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength(), TestRegionObserverInterface.B, 0, - TestRegionObserverInterface.B.length)); + assertTrue( + Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), + TestRegionObserverInterface.B, 0, TestRegionObserverInterface.B.length)); cells = familyMap.get(TestRegionObserverInterface.C); assertNotNull(cells); assertNotNull(cells.get(0)); - // KeyValue v1 expectation. Cast for now until we go all Cell all the time. TODO + // KeyValue v1 expectation. Cast for now until we go all Cell all the time. TODO cell = cells.get(0); - assertTrue(Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), - cell.getQualifierLength(), TestRegionObserverInterface.C, 0, - TestRegionObserverInterface.C.length)); + assertTrue( + Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), + TestRegionObserverInterface.C, 0, TestRegionObserverInterface.C.length)); } ctPostPut.incrementAndGet(); } @Override - public void preDelete(final ObserverContext c, - final Delete delete, final WALEdit edit, - final Durability durability) throws IOException { - Map> familyMap = delete.getFamilyCellMap(); + public void preDelete(final ObserverContext c, final Delete delete, + final WALEdit edit, final Durability durability) throws IOException { + Map> familyMap = delete.getFamilyCellMap(); RegionCoprocessorEnvironment e = c.getEnvironment(); assertNotNull(e); assertNotNull(e.getRegion()); @@ -425,10 +418,9 @@ public void prePrepareTimeStampForDeleteVersion(ObserverContext c, - final Delete delete, final WALEdit edit, - final Durability durability) throws IOException { - Map> familyMap = delete.getFamilyCellMap(); + public void postDelete(final ObserverContext c, final Delete delete, + final WALEdit edit, final Durability durability) throws IOException { + Map> familyMap = delete.getFamilyCellMap(); RegionCoprocessorEnvironment e = c.getEnvironment(); assertNotNull(e); assertNotNull(e.getRegion()); @@ -473,7 +465,8 @@ public void postCloseRegionOperation(final ObserverContext ctx, - MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException { + MiniBatchOperationInProgress miniBatchOp, final boolean success) + throws IOException { ctPostBatchMutateIndispensably.incrementAndGet(); } @@ -500,15 +493,15 @@ public Result postIncrement(final ObserverContext @Override public boolean preCheckAndPut(ObserverContext e, byte[] row, - byte[] family, byte[] qualifier, CompareOperator compareOp, ByteArrayComparable comparator, - Put put, boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator compareOp, ByteArrayComparable comparator, + Put put, boolean result) throws IOException { ctPreCheckAndPut.incrementAndGet(); return true; } @Override public boolean preCheckAndPut(ObserverContext c, byte[] row, - Filter filter, Put put, boolean result) throws IOException { + Filter filter, Put put, boolean result) throws IOException { ctPreCheckAndPutWithFilter.incrementAndGet(); return true; } @@ -523,37 +516,37 @@ public boolean preCheckAndPutAfterRowLock(ObserverContext c, - byte[] row, Filter filter, Put put, boolean result) throws IOException { + byte[] row, Filter filter, Put put, boolean result) throws IOException { ctPreCheckAndPutWithFilterAfterRowLock.incrementAndGet(); return true; } @Override public boolean postCheckAndPut(ObserverContext e, byte[] row, - byte[] family, byte[] qualifier, CompareOperator compareOp, ByteArrayComparable comparator, - Put put, boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator compareOp, ByteArrayComparable comparator, + Put put, boolean result) throws IOException { ctPostCheckAndPut.incrementAndGet(); return true; } @Override public boolean postCheckAndPut(ObserverContext c, byte[] row, - Filter filter, Put put, boolean result) throws IOException { + Filter filter, Put put, boolean result) throws IOException { ctPostCheckAndPutWithFilter.incrementAndGet(); return true; } @Override public boolean preCheckAndDelete(ObserverContext e, byte[] row, - byte[] family, byte[] qualifier, CompareOperator compareOp, ByteArrayComparable comparator, - Delete delete, boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator compareOp, ByteArrayComparable comparator, + Delete delete, boolean result) throws IOException { ctPreCheckAndDelete.incrementAndGet(); return true; } @Override public boolean preCheckAndDelete(ObserverContext c, byte[] row, - Filter filter, Delete delete, boolean result) throws IOException { + Filter filter, Delete delete, boolean result) throws IOException { ctPreCheckAndDeleteWithFilter.incrementAndGet(); return true; } @@ -568,44 +561,44 @@ public boolean preCheckAndDeleteAfterRowLock(ObserverContext c, - byte[] row, Filter filter, Delete delete, boolean result) throws IOException { + byte[] row, Filter filter, Delete delete, boolean result) throws IOException { ctPreCheckAndDeleteWithFilterAfterRowLock.incrementAndGet(); return true; } @Override public boolean postCheckAndDelete(ObserverContext e, byte[] row, - byte[] family, byte[] qualifier, CompareOperator compareOp, ByteArrayComparable comparator, - Delete delete, boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator compareOp, ByteArrayComparable comparator, + Delete delete, boolean result) throws IOException { ctPostCheckAndDelete.incrementAndGet(); return true; } @Override public boolean postCheckAndDelete(ObserverContext e, byte[] row, - Filter filter, Delete delete, boolean result) throws IOException { + Filter filter, Delete delete, boolean result) throws IOException { ctPostCheckAndDeleteWithFilter.incrementAndGet(); return true; } @Override public CheckAndMutateResult preCheckAndMutate(ObserverContext c, - CheckAndMutate checkAndMutate, CheckAndMutateResult result) throws IOException { + CheckAndMutate checkAndMutate, CheckAndMutateResult result) throws IOException { ctPreCheckAndMutate.incrementAndGet(); return RegionObserver.super.preCheckAndMutate(c, checkAndMutate, result); } @Override public CheckAndMutateResult preCheckAndMutateAfterRowLock( - ObserverContext c, CheckAndMutate checkAndMutate, - CheckAndMutateResult result) throws IOException { + ObserverContext c, CheckAndMutate checkAndMutate, + CheckAndMutateResult result) throws IOException { ctPreCheckAndMutateAfterRowLock.incrementAndGet(); return RegionObserver.super.preCheckAndMutateAfterRowLock(c, checkAndMutate, result); } @Override public CheckAndMutateResult postCheckAndMutate(ObserverContext c, - CheckAndMutate checkAndMutate, CheckAndMutateResult result) throws IOException { + CheckAndMutate checkAndMutate, CheckAndMutateResult result) throws IOException { ctPostCheckAndMutate.incrementAndGet(); return RegionObserver.super.postCheckAndMutate(c, checkAndMutate, result); } @@ -633,37 +626,38 @@ public Result postAppend(ObserverContext e, Append @Override public void preBulkLoadHFile(ObserverContext ctx, - List> familyPaths) throws IOException { + List> familyPaths) throws IOException { RegionCoprocessorEnvironment e = ctx.getEnvironment(); assertNotNull(e); assertNotNull(e.getRegion()); - if (e.getRegion().getTableDescriptor().getTableName().equals( - TestRegionObserverInterface.TEST_TABLE)) { + if (e.getRegion().getTableDescriptor().getTableName() + .equals(TestRegionObserverInterface.TEST_TABLE)) { assertNotNull(familyPaths); - assertEquals(1,familyPaths.size()); + assertEquals(1, familyPaths.size()); assertArrayEquals(TestRegionObserverInterface.A, familyPaths.get(0).getFirst()); String familyPath = familyPaths.get(0).getSecond(); String familyName = Bytes.toString(TestRegionObserverInterface.A); - assertEquals(familyPath.substring(familyPath.length()-familyName.length()-1),"/"+familyName); + assertEquals(familyPath.substring(familyPath.length() - familyName.length() - 1), + "/" + familyName); } ctPreBulkLoadHFile.incrementAndGet(); } @Override public void postBulkLoadHFile(ObserverContext ctx, - List> familyPaths, Map> map) - throws IOException { + List> familyPaths, Map> map) throws IOException { RegionCoprocessorEnvironment e = ctx.getEnvironment(); assertNotNull(e); assertNotNull(e.getRegion()); - if (e.getRegion().getTableDescriptor().getTableName().equals( - TestRegionObserverInterface.TEST_TABLE)) { + if (e.getRegion().getTableDescriptor().getTableName() + .equals(TestRegionObserverInterface.TEST_TABLE)) { assertNotNull(familyPaths); - assertEquals(1,familyPaths.size()); + assertEquals(1, familyPaths.size()); assertArrayEquals(TestRegionObserverInterface.A, familyPaths.get(0).getFirst()); String familyPath = familyPaths.get(0).getSecond(); String familyName = Bytes.toString(TestRegionObserverInterface.A); - assertEquals(familyPath.substring(familyPath.length()-familyName.length()-1),"/"+familyName); + assertEquals(familyPath.substring(familyPath.length() - familyName.length() - 1), + "/" + familyName); } ctPostBulkLoadHFile.incrementAndGet(); } @@ -694,7 +688,7 @@ public void preWALRestore(ObserverContext env, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { ctPostWALRestore.incrementAndGet(); } @@ -715,8 +709,8 @@ public StoreFileReader postStoreFileReaderOpen(ObserverContext ctx, - Store store, ScanOptions options) throws IOException { + public void preStoreScannerOpen(ObserverContext ctx, Store store, + ScanOptions options) throws IOException { if (options.getScan().getTimeRange().isAllTime()) { setScanOptions(options); } @@ -724,19 +718,19 @@ public void preStoreScannerOpen(ObserverContext ct @Override public void preCompactScannerOpen(ObserverContext c, Store store, - ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker, - CompactionRequest request) throws IOException { + ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker, + CompactionRequest request) throws IOException { setScanOptions(options); } public void preFlushScannerOpen(ObserverContext c, Store store, - ScanOptions options,FlushLifeCycleTracker tracker) throws IOException { + ScanOptions options, FlushLifeCycleTracker tracker) throws IOException { setScanOptions(options); } public void preMemStoreCompactionCompactScannerOpen( - ObserverContext c, Store store, ScanOptions options) - throws IOException { + ObserverContext c, Store store, ScanOptions options) + throws IOException { setScanOptions(options); } @@ -748,14 +742,12 @@ private void setScanOptions(ScanOptions options) { options.setTimeToPurgeDeletes(TestRegionCoprocessorHost.TIME_TO_PURGE_DELETES); } - @Override - public void preWALAppend(ObserverContext ctx, - WALKey key, WALEdit edit) throws IOException { + public void preWALAppend(ObserverContext ctx, WALKey key, + WALEdit edit) throws IOException { ctPreWALAppend.incrementAndGet(); - key.addExtendedAttribute(Integer.toString(ctPreWALAppend.get()), - Bytes.toBytes("foo")); + key.addExtendedAttribute(Integer.toString(ctPreWALAppend.get()), Bytes.toBytes("foo")); } public boolean hadPreGet() { @@ -925,15 +917,19 @@ public boolean hadPostWALRestore() { public boolean wasScannerNextCalled() { return ctPreScannerNext.get() > 0 && ctPostScannerNext.get() > 0; } + public boolean wasScannerFilterRowCalled() { return ctPostScannerFilterRow.get() > 0; } + public boolean wasScannerCloseCalled() { return ctPreScannerClose.get() > 0 && ctPostScannerClose.get() > 0; } + public boolean wasScannerOpenCalled() { return ctPreScannerOpen.get() > 0 && ctPostScannerOpen.get() > 0; } + public boolean hadDeleted() { return ctPreDeleted.get() > 0 && ctPostDeleted.get() > 0; } @@ -946,7 +942,6 @@ public boolean hadPreBulkLoadHFile() { return ctPreBulkLoadHFile.get() > 0; } - public int getCtBeforeDelete() { return ctBeforeDelete.get(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java index 2e40de3112c2..98865f43a817 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAppendTimeRange.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestAppendTimeRange { @ClassRule @@ -76,7 +76,7 @@ public class TestAppendTimeRange { @BeforeClass public static void setupBeforeClass() throws Exception { util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - MyObserver.class.getName()); + MyObserver.class.getName()); // Make general delay zero rather than default. Timing is off in this // test that depends on an evironment edge that is manually moved forward. util.getConfiguration().setInt(RemoteProcedureDispatcher.DISPATCH_DELAY_CONF_KEY, 0); @@ -101,11 +101,11 @@ public Optional getRegionObserver() { @Override public Result preAppend(final ObserverContext e, final Append append) throws IOException { - NavigableMap> map = append.getFamilyCellMap(); - for (Map.Entry> entry : map.entrySet()) { + NavigableMap> map = append.getFamilyCellMap(); + for (Map.Entry> entry : map.entrySet()) { for (Cell cell : entry.getValue()) { - String appendStr = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength()); + String appendStr = + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); if (appendStr.equals("b")) { tr10 = append.getTimeRange(); } else if (appendStr.equals("c") && !append.getTimeRange().isAllTime()) { @@ -130,19 +130,18 @@ public void testHTableInterfaceMethods() throws Exception { mee.setValue(time); TimeRange range10 = TimeRange.between(1, time + 10); table.append(new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("b")) - .setTimeRange(range10.getMin(), range10.getMax())); + .setTimeRange(range10.getMin(), range10.getMax())); checkRowValue(table, ROW, Bytes.toBytes("ab")); assertEquals(MyObserver.tr10.getMin(), range10.getMin()); assertEquals(MyObserver.tr10.getMax(), range10.getMax()); time = EnvironmentEdgeManager.currentTime(); mee.setValue(time); TimeRange range2 = TimeRange.between(1, time + 20); - List actions = - Arrays.asList(new Row[] { - new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")) - .setTimeRange(range2.getMin(), range2.getMax()), - new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")) - .setTimeRange(range2.getMin(), range2.getMax()) }); + List actions = Arrays.asList(new Row[] { + new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")) + .setTimeRange(range2.getMin(), range2.getMax()), + new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")) + .setTimeRange(range2.getMin(), range2.getMax()) }); Object[] results1 = new Object[actions.size()]; table.batch(actions, results1); assertEquals(MyObserver.tr2.getMin(), range2.getMin()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java index 84d40464d399..05dd797b4de8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,23 +51,22 @@ /** * Tests for global coprocessor loading configuration */ -@Category({CoprocessorTests.class, SmallTests.class}) +@Category({ CoprocessorTests.class, SmallTests.class }) public class TestCoprocessorConfiguration { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCoprocessorConfiguration.class); - @Rule public ExpectedException thrown = ExpectedException.none(); + @Rule + public ExpectedException thrown = ExpectedException.none(); private static final Configuration CONF = HBaseConfiguration.create(); static { - CONF.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - SystemCoprocessor.class.getName()); + CONF.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, SystemCoprocessor.class.getName()); CONF.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, SystemCoprocessor.class.getName()); - CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - SystemCoprocessor.class.getName()); + CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, SystemCoprocessor.class.getName()); } private static final TableName TABLENAME = TableName.valueOf("TestCoprocessorConfiguration"); private static final RegionInfo REGIONINFO = RegionInfoBuilder.newBuilder(TABLENAME).build(); @@ -75,7 +74,7 @@ public class TestCoprocessorConfiguration { static { try { TABLEDESC = TableDescriptorBuilder.newBuilder(TABLENAME) - .setCoprocessor(TableCoprocessor.class.getName()).build(); + .setCoprocessor(TableCoprocessor.class.getName()).build(); } catch (IOException e) { throw new RuntimeException(e); } @@ -86,15 +85,16 @@ public class TestCoprocessorConfiguration { private static final AtomicBoolean systemCoprocessorLoaded = new AtomicBoolean(); private static final AtomicBoolean tableCoprocessorLoaded = new AtomicBoolean(); - public static class SystemCoprocessor implements MasterCoprocessor, RegionCoprocessor, - RegionServerCoprocessor { + public static class SystemCoprocessor + implements MasterCoprocessor, RegionCoprocessor, RegionServerCoprocessor { @Override public void start(CoprocessorEnvironment env) throws IOException { systemCoprocessorLoaded.set(true); } @Override - public void stop(CoprocessorEnvironment env) throws IOException { } + public void stop(CoprocessorEnvironment env) throws IOException { + } } public static class TableCoprocessor implements RegionCoprocessor { @@ -104,7 +104,8 @@ public void start(CoprocessorEnvironment env) throws IOException { } @Override - public void stop(CoprocessorEnvironment env) throws IOException { } + public void stop(CoprocessorEnvironment env) throws IOException { + } } @Test @@ -118,10 +119,11 @@ public void testRegionCoprocessorHostDefaults() throws Exception { tableCoprocessorLoaded.set(false); new RegionCoprocessorHost(region, rsServices, conf); assertEquals("System coprocessors loading default was not honored", - CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get()); + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get()); assertEquals("Table coprocessors loading default was not honored", - CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED && - CoprocessorHost.DEFAULT_USER_COPROCESSORS_ENABLED, tableCoprocessorLoaded.get()); + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED + && CoprocessorHost.DEFAULT_USER_COPROCESSORS_ENABLED, + tableCoprocessorLoaded.get()); } @Test @@ -131,7 +133,7 @@ public void testRegionServerCoprocessorHostDefaults() throws Exception { systemCoprocessorLoaded.set(false); new RegionServerCoprocessorHost(rsServices, conf); assertEquals("System coprocessors loading default was not honored", - CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get()); + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get()); } @Test @@ -141,7 +143,7 @@ public void testMasterCoprocessorHostDefaults() throws Exception { systemCoprocessorLoaded.set(false); new MasterCoprocessorHost(masterServices, conf); assertEquals("System coprocessors loading default was not honored", - CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get()); + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get()); } @Test @@ -155,10 +157,8 @@ public void testRegionCoprocessorHostAllDisabled() throws Exception { systemCoprocessorLoaded.set(false); tableCoprocessorLoaded.set(false); new RegionCoprocessorHost(region, rsServices, conf); - assertFalse("System coprocessors should not have been loaded", - systemCoprocessorLoaded.get()); - assertFalse("Table coprocessors should not have been loaded", - tableCoprocessorLoaded.get()); + assertFalse("System coprocessors should not have been loaded", systemCoprocessorLoaded.get()); + assertFalse("Table coprocessors should not have been loaded", tableCoprocessorLoaded.get()); } @Test @@ -173,15 +173,13 @@ public void testRegionCoprocessorHostTableLoadingDisabled() throws Exception { systemCoprocessorLoaded.set(false); tableCoprocessorLoaded.set(false); new RegionCoprocessorHost(region, rsServices, conf); - assertTrue("System coprocessors should have been loaded", - systemCoprocessorLoaded.get()); - assertFalse("Table coprocessors should not have been loaded", - tableCoprocessorLoaded.get()); + assertTrue("System coprocessors should have been loaded", systemCoprocessorLoaded.get()); + assertFalse("Table coprocessors should not have been loaded", tableCoprocessorLoaded.get()); } /** - * Rough test that Coprocessor Environment is Read-Only. - * Just check a random CP and see that it returns a read-only config. + * Rough test that Coprocessor Environment is Read-Only. Just check a random CP and see that it + * returns a read-only config. */ @Test public void testReadOnlyConfiguration() throws Exception { @@ -192,7 +190,7 @@ public void testReadOnlyConfiguration() throws Exception { RegionServerServices rsServices = mock(RegionServerServices.class); RegionCoprocessorHost rcp = new RegionCoprocessorHost(region, rsServices, conf); boolean found = false; - for (String cpStr: rcp.getCoprocessors()) { + for (String cpStr : rcp.getCoprocessors()) { CoprocessorEnvironment cpenv = rcp.findCoprocessorEnvironment(cpStr); if (cpenv != null) { found = true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java index 9531a7d045bc..bbcfa4370e35 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.io.File; import java.lang.reflect.InvocationTargetException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Coprocessor; @@ -37,7 +36,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestCoprocessorHost { @ClassRule @@ -74,12 +73,11 @@ public void testDoubleLoadingAndPriorityValue() { host = new CoprocessorHostForTest<>(conf); int overridePriority = Integer.MAX_VALUE - 1; - final String coprocessor_v3 = - SimpleRegionObserverV3.class.getName() + "|" + overridePriority; + final String coprocessor_v3 = SimpleRegionObserverV3.class.getName() + "|" + overridePriority; // Try and load a coprocessor three times conf.setStrings(key, coprocessor, coprocessor, coprocessor, - SimpleRegionObserverV2.class.getName(), coprocessor_v3); + SimpleRegionObserverV2.class.getName(), coprocessor_v3); host.loadSystemCoprocessors(conf, key); // Three coprocessors(SimpleRegionObserver, SimpleRegionObserverV2, @@ -87,12 +85,12 @@ public void testDoubleLoadingAndPriorityValue() { Assert.assertEquals(3, host.coprocEnvironments.size()); // Check the priority value - CoprocessorEnvironment simpleEnv = host.findCoprocessorEnvironment( - SimpleRegionObserver.class.getName()); - CoprocessorEnvironment simpleEnv_v2 = host.findCoprocessorEnvironment( - SimpleRegionObserverV2.class.getName()); - CoprocessorEnvironment simpleEnv_v3 = host.findCoprocessorEnvironment( - SimpleRegionObserverV3.class.getName()); + CoprocessorEnvironment simpleEnv = + host.findCoprocessorEnvironment(SimpleRegionObserver.class.getName()); + CoprocessorEnvironment simpleEnv_v2 = + host.findCoprocessorEnvironment(SimpleRegionObserverV2.class.getName()); + CoprocessorEnvironment simpleEnv_v3 = + host.findCoprocessorEnvironment(SimpleRegionObserverV3.class.getName()); assertNotNull(simpleEnv); assertNotNull(simpleEnv_v2); @@ -119,14 +117,14 @@ public void testLoadSystemCoprocessorWithPath() throws Exception { // make a string of coprocessor with only priority int overridePriority = Integer.MAX_VALUE - 1; final String coprocessorWithPriority = - SimpleRegionObserverV3.class.getName() + "|" + overridePriority; + SimpleRegionObserverV3.class.getName() + "|" + overridePriority; // make a string of coprocessor with path but no priority final String coprocessorWithPath = - String.format("%s|%s|%s", testClassName, "", jarFile.getAbsolutePath()); + String.format("%s|%s|%s", testClassName, "", jarFile.getAbsolutePath()); // make a string of coprocessor with priority and path - final String coprocessorWithPriorityAndPath = String - .format("%s|%s|%s", testClassNameWithPriorityAndPath, (overridePriority - 1), - jarFileWithPriorityAndPath.getAbsolutePath()); + final String coprocessorWithPriorityAndPath = + String.format("%s|%s|%s", testClassNameWithPriorityAndPath, (overridePriority - 1), + jarFileWithPriorityAndPath.getAbsolutePath()); // Try and load a system coprocessors conf.setStrings(key, SimpleRegionObserverV2.class.getName(), coprocessorWithPriority, @@ -135,25 +133,25 @@ public void testLoadSystemCoprocessorWithPath() throws Exception { // first loaded system coprocessor with default priority CoprocessorEnvironment simpleEnv = - host.findCoprocessorEnvironment(SimpleRegionObserverV2.class.getName()); + host.findCoprocessorEnvironment(SimpleRegionObserverV2.class.getName()); assertNotNull(simpleEnv); assertEquals(Coprocessor.PRIORITY_SYSTEM, simpleEnv.getPriority()); // external system coprocessor with default priority CoprocessorEnvironment coprocessorEnvironmentWithPath = - host.findCoprocessorEnvironment(testClassName); + host.findCoprocessorEnvironment(testClassName); assertNotNull(coprocessorEnvironmentWithPath); assertEquals(Coprocessor.PRIORITY_SYSTEM + 1, coprocessorEnvironmentWithPath.getPriority()); // system coprocessor with configured priority CoprocessorEnvironment coprocessorEnvironmentWithPriority = - host.findCoprocessorEnvironment(SimpleRegionObserverV3.class.getName()); + host.findCoprocessorEnvironment(SimpleRegionObserverV3.class.getName()); assertNotNull(coprocessorEnvironmentWithPriority); assertEquals(overridePriority, coprocessorEnvironmentWithPriority.getPriority()); // external system coprocessor with override priority CoprocessorEnvironment coprocessorEnvironmentWithPriorityAndPath = - host.findCoprocessorEnvironment(testClassNameWithPriorityAndPath); + host.findCoprocessorEnvironment(testClassNameWithPriorityAndPath); assertNotNull(coprocessorEnvironmentWithPriorityAndPath); assertEquals(overridePriority - 1, coprocessorEnvironmentWithPriorityAndPath.getPriority()); } finally { @@ -196,7 +194,7 @@ public void testLoadSystemCoprocessorWithPathDoesNotExistAndPriority() throws Ex int overridePriority = Integer.MAX_VALUE - 1; // make a string of coprocessor with path and priority final String coprocessor = - testClassName + "|" + overridePriority + "|" + testClassName + ".jar"; + testClassName + "|" + overridePriority + "|" + testClassName + ".jar"; // Try and load a system coprocessors conf.setStrings(key, coprocessor); @@ -204,14 +202,15 @@ public void testLoadSystemCoprocessorWithPathDoesNotExistAndPriority() throws Ex host.loadSystemCoprocessors(conf, key); } - public static class SimpleRegionObserverV2 extends SimpleRegionObserver { } + public static class SimpleRegionObserverV2 extends SimpleRegionObserver { + } public static class SimpleRegionObserverV3 extends SimpleRegionObserver { } - private static class CoprocessorHostForTest extends - CoprocessorHost> { + private static class CoprocessorHostForTest + extends CoprocessorHost> { final Configuration cpHostConf; public CoprocessorHostForTest(Configuration conf) { @@ -239,7 +238,8 @@ public CoprocessorEnvironment createEnvironment(final E instance, final int p private File buildCoprocessorJar(String className) throws Exception { String dataTestDir = TEST_UTIL.getDataTestDir().toString(); String code = String.format("import org.apache.hadoop.hbase.coprocessor.*; public class %s" - + " implements RegionCoprocessor {}", className); + + " implements RegionCoprocessor {}", + className); return ClassLoaderTestHelper.buildJar(dataTestDir, className, code); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java index b1cc99752355..14467be53072 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java @@ -71,14 +71,15 @@ import org.junit.rules.TestName; import org.mockito.Mockito; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorInterface { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCoprocessorInterface.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); static final Path DIR = TEST_UTIL.getDataTestDir(); @@ -96,20 +97,17 @@ public boolean next(List results) throws IOException { } @Override - public boolean next(List result, ScannerContext scannerContext) - throws IOException { + public boolean next(List result, ScannerContext scannerContext) throws IOException { return delegate.next(result, scannerContext); } @Override - public boolean nextRaw(List result) - throws IOException { + public boolean nextRaw(List result) throws IOException { return delegate.nextRaw(result); } @Override - public boolean nextRaw(List result, ScannerContext context) - throws IOException { + public boolean nextRaw(List result, ScannerContext context) throws IOException { return delegate.nextRaw(result, context); } @@ -165,7 +163,7 @@ public static class CoprocessorImpl implements RegionCoprocessor, RegionObserver @Override public void start(CoprocessorEnvironment e) { - sharedData = ((RegionCoprocessorEnvironment)e).getSharedData(); + sharedData = ((RegionCoprocessorEnvironment) e).getSharedData(); // using new String here, so that there will be new object on each invocation sharedData.putIfAbsent("test1", new Object()); startCalled = true; @@ -186,29 +184,33 @@ public Optional getRegionObserver() { public void preOpen(ObserverContext e) { preOpenCalled = true; } + @Override public void postOpen(ObserverContext e) { postOpenCalled = true; } + @Override public void preClose(ObserverContext e, boolean abortRequested) { preCloseCalled = true; } + @Override public void postClose(ObserverContext e, boolean abortRequested) { postCloseCalled = true; } + @Override - public InternalScanner preCompact(ObserverContext e, - Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, + public InternalScanner preCompact(ObserverContext e, Store store, + InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) { preCompactCalled = true; return scanner; } + @Override - public void postCompact(ObserverContext e, - Store store, StoreFile resultFile, CompactionLifeCycleTracker tracker, - CompactionRequest request) { + public void postCompact(ObserverContext e, Store store, + StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) { postCompactCalled = true; } @@ -233,21 +235,27 @@ public RegionScanner postScannerOpen(final ObserverContext getSharedData() { return sharedData; } @@ -258,7 +266,7 @@ public static class CoprocessorII implements RegionCoprocessor { @Override public void start(CoprocessorEnvironment e) { - sharedData = ((RegionCoprocessorEnvironment)e).getSharedData(); + sharedData = ((RegionCoprocessorEnvironment) e).getSharedData(); sharedData.putIfAbsent("test2", new Object()); } @@ -271,8 +279,8 @@ public void stop(CoprocessorEnvironment e) { public Optional getRegionObserver() { return Optional.of(new RegionObserver() { @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { + public void preGetOp(final ObserverContext e, final Get get, + final List results) throws IOException { throw new RuntimeException(); } }); @@ -286,10 +294,10 @@ Map getSharedData() { @Test public void testSharedData() throws IOException { TableName tableName = TableName.valueOf(name.getMethodName()); - byte [][] families = { fam1, fam2, fam3 }; + byte[][] families = { fam1, fam2, fam3 }; Configuration hc = initConfig(); - HRegion region = initHRegion(tableName, name.getMethodName(), hc, new Class[]{}, families); + HRegion region = initHRegion(tableName, name.getMethodName(), hc, new Class[] {}, families); for (int i = 0; i < 3; i++) { HTestConst.addContent(region, fam3); @@ -302,24 +310,24 @@ public void testSharedData() throws IOException { Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); Coprocessor c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); - Object o = ((CoprocessorImpl)c).getSharedData().get("test1"); - Object o2 = ((CoprocessorII)c2).getSharedData().get("test2"); + Object o = ((CoprocessorImpl) c).getSharedData().get("test1"); + Object o2 = ((CoprocessorII) c2).getSharedData().get("test2"); assertNotNull(o); assertNotNull(o2); // to coprocessors get different sharedDatas - assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData()); + assertFalse(((CoprocessorImpl) c).getSharedData() == ((CoprocessorII) c2).getSharedData()); c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); // make sure that all coprocessor of a class have identical sharedDatas - assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); - assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2); + assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o); + assertTrue(((CoprocessorII) c2).getSharedData().get("test2") == o2); // now have all Environments fail try { - byte [] r = region.getRegionInfo().getStartKey(); + byte[] r = region.getRegionInfo().getStartKey(); if (r == null || r.length <= 0) { - // Its the start row. Can't ask for null. Ask for minimal key instead. - r = new byte [] {0}; + // Its the start row. Can't ask for null. Ask for minimal key instead. + r = new byte[] { 0 }; } Get g = new Get(r); region.get(g); @@ -328,7 +336,7 @@ public void testSharedData() throws IOException { } assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class)); c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); - assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); + assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o); c = c2 = null; // perform a GC System.gc(); @@ -336,11 +344,11 @@ public void testSharedData() throws IOException { region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class); c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); // CPimpl is unaffected, still the same reference - assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); + assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o); c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); // new map and object created, hence the reference is different // hence the old entry was indeed removed by the GC and new one has been created - Object o3 = ((CoprocessorII)c2).getSharedData().get("test2"); + Object o3 = ((CoprocessorII) c2).getSharedData().get("test2"); assertFalse(o3 == o2); HBaseTestingUtil.closeRegionAndWAL(region); } @@ -348,11 +356,11 @@ public void testSharedData() throws IOException { @Test public void testCoprocessorInterface() throws IOException { TableName tableName = TableName.valueOf(name.getMethodName()); - byte [][] families = { fam1, fam2, fam3 }; + byte[][] families = { fam1, fam2, fam3 }; Configuration hc = initConfig(); HRegion region = initHRegion(tableName, name.getMethodName(), hc, - new Class[]{CoprocessorImpl.class}, families); + new Class[] { CoprocessorImpl.class }, families); for (int i = 0; i < 3; i++) { HTestConst.addContent(region, fam3); region.flush(true); @@ -370,17 +378,16 @@ public void testCoprocessorInterface() throws IOException { HBaseTestingUtil.closeRegionAndWAL(region); Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); - assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted()); - assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped()); - assertTrue(((CoprocessorImpl)c).wasOpened()); - assertTrue(((CoprocessorImpl)c).wasClosed()); - assertTrue(((CoprocessorImpl)c).wasFlushed()); - assertTrue(((CoprocessorImpl)c).wasCompacted()); + assertTrue("Coprocessor not started", ((CoprocessorImpl) c).wasStarted()); + assertTrue("Coprocessor not stopped", ((CoprocessorImpl) c).wasStopped()); + assertTrue(((CoprocessorImpl) c).wasOpened()); + assertTrue(((CoprocessorImpl) c).wasClosed()); + assertTrue(((CoprocessorImpl) c).wasFlushed()); + assertTrue(((CoprocessorImpl) c).wasCompacted()); } - HRegion reopenRegion(final HRegion closedRegion, Class ... implClasses) - throws IOException { - //RegionInfo info = new RegionInfo(tableName, null, null, false); + HRegion reopenRegion(final HRegion closedRegion, Class... implClasses) throws IOException { + // RegionInfo info = new RegionInfo(tableName, null, null, false); HRegion r = HRegion.openHRegion(closedRegion, null); // this following piece is a hack. currently a coprocessorHost @@ -388,8 +395,8 @@ HRegion reopenRegion(final HRegion closedRegion, Class ... implClasses) // start a region server here, so just manually create cphost // and set it to region. Configuration conf = TEST_UTIL.getConfiguration(); - RegionCoprocessorHost host = new RegionCoprocessorHost(r, - Mockito.mock(RegionServerServices.class), conf); + RegionCoprocessorHost host = + new RegionCoprocessorHost(r, Mockito.mock(RegionServerServices.class), conf); r.setCoprocessorHost(host); for (Class implClass : implClasses) { @@ -407,18 +414,15 @@ HRegion reopenRegion(final HRegion closedRegion, Class ... implClasses) } HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, - Class[] implClasses, byte[][] families) throws IOException { + Class[] implClasses, byte[][] families) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (byte[] family : families) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); } - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - RegionInfo info = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(null) - .setEndKey(null) - .setSplit(false) - .build(); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + RegionInfo info = RegionInfoBuilder.newBuilder(tableName).setStartKey(null).setEndKey(null) + .setSplit(false).build(); Path path = new Path(DIR + callingMethod); HRegion r = HBaseTestingUtil.createRegionAndWAL(info, path, conf, builder.build()); @@ -446,9 +450,8 @@ private Configuration initConfig() { // Increase the amount of time between client retries TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 15 * 1000); // This size should make it so we always split using the addContent - // below. After adding all data, the first region is 1.3M - TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, - 1024 * 128); + // below. After adding all data, the first region is 1.3M + TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 1024 * 128); TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false); return TEST_UTIL.getConfiguration(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java index 60aef8d539cf..8df71d88762b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java @@ -85,7 +85,7 @@ /** * Testing of coprocessor metrics end-to-end. */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorMetrics { @ClassRule @@ -110,14 +110,14 @@ public static class CustomMasterObserver implements MasterCoprocessor, MasterObs @Override public void preCreateTable(ObserverContext ctx, - TableDescriptor desc, RegionInfo[] regions) throws IOException { + TableDescriptor desc, RegionInfo[] regions) throws IOException { // we rely on the fact that there is only 1 instance of our MasterObserver this.start = EnvironmentEdgeManager.currentTime(); } @Override public void postCreateTable(ObserverContext ctx, - TableDescriptor desc, RegionInfo[] regions) throws IOException { + TableDescriptor desc, RegionInfo[] regions) throws IOException { if (this.start > 0) { long time = EnvironmentEdgeManager.currentTime() - start; LOG.info("Create table took: " + time); @@ -128,10 +128,9 @@ public void postCreateTable(ObserverContext ctx, @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof MasterCoprocessorEnvironment) { - MetricRegistry registry = - ((MasterCoprocessorEnvironment) env).getMetricRegistryForMaster(); + MetricRegistry registry = ((MasterCoprocessorEnvironment) env).getMetricRegistryForMaster(); - createTableTimer = registry.timer("CreateTable"); + createTableTimer = registry.timer("CreateTable"); } } @@ -144,12 +143,13 @@ public Optional getMasterObserver() { /** * RegionServerObserver that has a Counter for rollWAL requests. */ - public static class CustomRegionServerObserver implements RegionServerCoprocessor, - RegionServerObserver { + public static class CustomRegionServerObserver + implements RegionServerCoprocessor, RegionServerObserver { /** This is the Counter metric object to keep track of the current count across invocations */ private Counter rollWALCounter; - @Override public Optional getRegionServerObserver() { + @Override + public Optional getRegionServerObserver() { return Optional.of(this); } @@ -181,8 +181,7 @@ public static class CustomWALObserver implements WALCoprocessor, WALObserver { @Override public void postWALWrite(ObserverContext ctx, - RegionInfo info, WALKey logKey, - WALEdit logEdit) throws IOException { + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { walEditsCount.increment(); } @@ -198,7 +197,8 @@ public void start(CoprocessorEnvironment env) throws IOException { } } - @Override public Optional getWALObserver() { + @Override + public Optional getWALObserver() { return Optional.of(this); } } @@ -211,7 +211,7 @@ public static class CustomRegionObserver implements RegionCoprocessor, RegionObs @Override public void preGetOp(ObserverContext e, Get get, - List results) throws IOException { + List results) throws IOException { preGetCounter.increment(); } @@ -245,7 +245,7 @@ public static class CustomRegionEndpoint extends MultiRowMutationEndpoint { @Override public void mutateRows(RpcController controller, MutateRowsRequest request, - RpcCallback done) { + RpcCallback done) { long start = System.nanoTime(); super.mutateRows(controller, request, done); endpointExecution.updateNanos(System.nanoTime() - start); @@ -270,12 +270,10 @@ public void start(CoprocessorEnvironment env) throws IOException { public static void setupBeforeClass() throws Exception { Configuration conf = UTIL.getConfiguration(); // inject master, regionserver and WAL coprocessors - conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - CustomMasterObserver.class.getName()); + conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, CustomMasterObserver.class.getName()); conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, - CustomRegionServerObserver.class.getName()); - conf.set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, - CustomWALObserver.class.getName()); + CustomRegionServerObserver.class.getName()); + conf.set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, CustomWALObserver.class.getName()); conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true); UTIL.startMiniCluster(); } @@ -288,7 +286,7 @@ public static void teardownAfterClass() throws Exception { @Before public void setup() throws IOException { try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { for (TableDescriptor htd : admin.listTableDescriptors()) { UTIL.deleteTable(htd.getTableName()); } @@ -298,24 +296,24 @@ public void setup() throws IOException { @Test public void testMasterObserver() throws IOException { // Find out the MetricRegistry used by the CP using the global registries - MetricRegistryInfo info = MetricsCoprocessor.createRegistryInfoForMasterCoprocessor( - CustomMasterObserver.class.getName()); - Optional registry = MetricRegistries.global().get(info); + MetricRegistryInfo info = MetricsCoprocessor + .createRegistryInfoForMasterCoprocessor(CustomMasterObserver.class.getName()); + Optional registry = MetricRegistries.global().get(info); assertTrue(registry.isPresent()); Optional metric = registry.get().get("CreateTable"); assertTrue(metric.isPresent()); try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { - Timer createTableTimer = (Timer)metric.get(); + Timer createTableTimer = (Timer) metric.get(); long prevCount = createTableTimer.getHistogram().getCount(); LOG.info("Creating table"); TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); assertEquals(1, createTableTimer.getHistogram().getCount() - prevCount); @@ -325,47 +323,47 @@ public void testMasterObserver() throws IOException { @Test public void testRegionServerObserver() throws IOException { try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { LOG.info("Rolling WALs"); admin.rollWALWriter(UTIL.getMiniHBaseCluster().getServerHoldingMeta()); } // Find out the MetricRegistry used by the CP using the global registries - MetricRegistryInfo info = MetricsCoprocessor.createRegistryInfoForRSCoprocessor( - CustomRegionServerObserver.class.getName()); + MetricRegistryInfo info = MetricsCoprocessor + .createRegistryInfoForRSCoprocessor(CustomRegionServerObserver.class.getName()); - Optional registry = MetricRegistries.global().get(info); + Optional registry = MetricRegistries.global().get(info); assertTrue(registry.isPresent()); Optional metric = registry.get().get("rollWALRequests"); assertTrue(metric.isPresent()); - Counter rollWalRequests = (Counter)metric.get(); + Counter rollWalRequests = (Counter) metric.get(); assertEquals(1, rollWalRequests.getCount()); } @Test public void testWALObserver() throws IOException { // Find out the MetricRegistry used by the CP using the global registries - MetricRegistryInfo info = MetricsCoprocessor.createRegistryInfoForWALCoprocessor( - CustomWALObserver.class.getName()); + MetricRegistryInfo info = + MetricsCoprocessor.createRegistryInfoForWALCoprocessor(CustomWALObserver.class.getName()); - Optional registry = MetricRegistries.global().get(info); + Optional registry = MetricRegistries.global().get(info); assertTrue(registry.isPresent()); Optional metric = registry.get().get("walEditsCount"); assertTrue(metric.isPresent()); try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); - Counter rollWalRequests = (Counter)metric.get(); + Counter rollWalRequests = (Counter) metric.get(); long prevCount = rollWalRequests.getCount(); assertTrue(prevCount > 0); @@ -382,16 +380,16 @@ public void testWALObserver() throws IOException { */ private void assertPreGetRequestsCounter(Class coprocClass) { // Find out the MetricRegistry used by the CP using the global registries - MetricRegistryInfo info = MetricsCoprocessor.createRegistryInfoForRegionCoprocessor( - coprocClass.getName()); + MetricRegistryInfo info = + MetricsCoprocessor.createRegistryInfoForRegionCoprocessor(coprocClass.getName()); - Optional registry = MetricRegistries.global().get(info); + Optional registry = MetricRegistries.global().get(info); assertTrue(registry.isPresent()); Optional metric = registry.get().get("preGetRequests"); assertTrue(metric.isPresent()); - Counter preGetRequests = (Counter)metric.get(); + Counter preGetRequests = (Counter) metric.get(); assertEquals(2, preGetRequests.getCount()); } @@ -399,11 +397,11 @@ private void assertPreGetRequestsCounter(Class coprocClass) { public void testRegionObserverSingleRegion() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.createTable(TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) - // add the coprocessor for the region - .setCoprocessor(CustomRegionObserver.class.getName()).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) + // add the coprocessor for the region + .setCoprocessor(CustomRegionObserver.class.getName()).build()); try (Table table = connection.getTable(tableName)) { table.get(new Get(foo)); table.get(new Get(foo)); // 2 gets @@ -417,19 +415,20 @@ public void testRegionObserverSingleRegion() throws IOException { public void testRegionObserverMultiRegion() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.createTable(TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) - // add the coprocessor for the region - .setCoprocessor(CustomRegionObserver.class.getName()).build(), new byte[][] { foo }); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) + // add the coprocessor for the region + .setCoprocessor(CustomRegionObserver.class.getName()).build(), + new byte[][] { foo }); // create with 2 regions try (Table table = connection.getTable(tableName); - RegionLocator locator = connection.getRegionLocator(tableName)) { + RegionLocator locator = connection.getRegionLocator(tableName)) { table.get(new Get(bar)); table.get(new Get(foo)); // 2 gets to 2 separate regions assertEquals(2, locator.getAllRegionLocations().size()); assertNotEquals(locator.getRegionLocation(bar).getRegion(), - locator.getRegionLocation(foo).getRegion()); + locator.getRegionLocation(foo).getRegion()); } } @@ -441,17 +440,17 @@ public void testRegionObserverMultiTable() throws IOException { final TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); final TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.createTable(TableDescriptorBuilder.newBuilder(tableName1) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) - // add the coprocessor for the region - .setCoprocessor(CustomRegionObserver.class.getName()).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) + // add the coprocessor for the region + .setCoprocessor(CustomRegionObserver.class.getName()).build()); admin.createTable(TableDescriptorBuilder.newBuilder(tableName2) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) - // add the coprocessor for the region - .setCoprocessor(CustomRegionObserver.class.getName()).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) + // add the coprocessor for the region + .setCoprocessor(CustomRegionObserver.class.getName()).build()); try (Table table1 = connection.getTable(tableName1); - Table table2 = connection.getTable(tableName2)) { + Table table2 = connection.getTable(tableName2)) { table1.get(new Get(bar)); table2.get(new Get(foo)); // 2 gets to 2 separate tables } @@ -463,12 +462,12 @@ public void testRegionObserverMultiTable() throws IOException { public void testRegionObserverMultiCoprocessor() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.createTable(TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) - // add the coprocessor for the region. We add two different coprocessors - .setCoprocessor(CustomRegionObserver.class.getName()) - .setCoprocessor(CustomRegionObserver2.class.getName()).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) + // add the coprocessor for the region. We add two different coprocessors + .setCoprocessor(CustomRegionObserver.class.getName()) + .setCoprocessor(CustomRegionObserver2.class.getName()).build()); try (Table table = connection.getTable(tableName)) { table.get(new Get(foo)); table.get(new Get(foo)); // 2 gets @@ -484,11 +483,12 @@ public void testRegionObserverMultiCoprocessor() throws IOException { public void testRegionObserverAfterRegionClosed() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.createTable(TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) - // add the coprocessor for the region - .setCoprocessor(CustomRegionObserver.class.getName()).build(), new byte[][] { foo }); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) + // add the coprocessor for the region + .setCoprocessor(CustomRegionObserver.class.getName()).build(), + new byte[][] { foo }); // create with 2 regions try (Table table = connection.getTable(tableName)) { table.get(new Get(foo)); @@ -503,8 +503,7 @@ public void testRegionObserverAfterRegionClosed() throws IOException { admin.unassign(loc.getRegion().getEncodedNameAsBytes(), true); HRegionServer server = UTIL.getMiniHBaseCluster().getRegionServer(loc.getServerName()); - UTIL.waitFor(30000, - () -> server.getOnlineRegion(loc.getRegion().getRegionName()) == null); + UTIL.waitFor(30000, () -> server.getOnlineRegion(loc.getRegion().getRegionName()) == null); assertNull(server.getOnlineRegion(loc.getRegion().getRegionName())); } @@ -514,11 +513,11 @@ public void testRegionObserverAfterRegionClosed() throws IOException { // close the table admin.disableTable(tableName); - MetricRegistryInfo info = MetricsCoprocessor.createRegistryInfoForRegionCoprocessor( - CustomRegionObserver.class.getName()); + MetricRegistryInfo info = MetricsCoprocessor + .createRegistryInfoForRegionCoprocessor(CustomRegionObserver.class.getName()); // ensure that MetricRegistry is deleted - Optional registry = MetricRegistries.global().get(info); + Optional registry = MetricRegistries.global().get(info); assertFalse(registry.isPresent()); } } @@ -527,19 +526,19 @@ public void testRegionObserverAfterRegionClosed() throws IOException { public void testRegionObserverEndpoint() throws IOException, ServiceException { final TableName tableName = TableName.valueOf(name.getMethodName()); try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.createTable(TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) - // add the coprocessor for the region - .setCoprocessor(CustomRegionEndpoint.class.getName()).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(foo)) + // add the coprocessor for the region + .setCoprocessor(CustomRegionEndpoint.class.getName()).build()); try (Table table = connection.getTable(tableName)) { List mutations = Lists.newArrayList(new Put(foo), new Put(bar)); MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); for (Mutation mutation : mutations) { - mrmBuilder.addMutationRequest(ProtobufUtil.toMutation( - ClientProtos.MutationProto.MutationType.PUT, mutation)); + mrmBuilder.addMutationRequest( + ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation)); } CoprocessorRpcChannel channel = table.coprocessorService(bar); @@ -551,16 +550,16 @@ public void testRegionObserverEndpoint() throws IOException, ServiceException { } // Find out the MetricRegistry used by the CP using the global registries - MetricRegistryInfo info = MetricsCoprocessor.createRegistryInfoForRegionCoprocessor( - CustomRegionEndpoint.class.getName()); + MetricRegistryInfo info = MetricsCoprocessor + .createRegistryInfoForRegionCoprocessor(CustomRegionEndpoint.class.getName()); - Optional registry = MetricRegistries.global().get(info); + Optional registry = MetricRegistries.global().get(info); assertTrue(registry.isPresent()); Optional metric = registry.get().get("EndpointExecution"); assertTrue(metric.isPresent()); - Timer endpointExecutions = (Timer)metric.get(); + Timer endpointExecutions = (Timer) metric.get(); assertEquals(1, endpointExecutions.getHistogram().getCount()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorSharedConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorSharedConnection.java index 88c6e4df81c7..fab707a3f406 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorSharedConnection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorSharedConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,11 +59,11 @@ public static void beforeClass() throws Exception { // Set my test Coprocessors into the Configuration before we start up the cluster. Configuration conf = HTU.getConfiguration(); conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - TestMasterCoprocessor.class.getName()); + TestMasterCoprocessor.class.getName()); conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, - TestRegionServerCoprocessor.class.getName()); + TestRegionServerCoprocessor.class.getName()); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - TestRegionCoprocessor.class.getName()); + TestRegionCoprocessor.class.getName()); HTU.startMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java index 7cf3a38b7279..6ba2343538f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,9 +40,8 @@ /** * Tests for master and regionserver coprocessor stop method - * */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorStop { @ClassRule @@ -52,8 +51,8 @@ public class TestCoprocessorStop { private static final Logger LOG = LoggerFactory.getLogger(TestCoprocessorStop.class); private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static final String MASTER_FILE = "master" + EnvironmentEdgeManager.currentTime(); - private static final String REGIONSERVER_FILE = "regionserver" + - EnvironmentEdgeManager.currentTime(); + private static final String REGIONSERVER_FILE = + "regionserver" + EnvironmentEdgeManager.currentTime(); public static class FooCoprocessor implements MasterCoprocessor, RegionServerCoprocessor { @Override @@ -97,10 +96,8 @@ public void stop(CoprocessorEnvironment env) throws IOException { public static void setupBeforeClass() throws Exception { Configuration conf = UTIL.getConfiguration(); - conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - FooCoprocessor.class.getName()); - conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, - FooCoprocessor.class.getName()); + conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, FooCoprocessor.class.getName()); + conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, FooCoprocessor.class.getName()); UTIL.startMiniCluster(); } @@ -112,7 +109,7 @@ public static void tearDownAfterClass() throws Exception { @Test public void testStopped() throws Exception { - //shutdown hbase only. then check flag file. + // shutdown hbase only. then check flag file. SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster(); LOG.info("shutdown hbase cluster..."); cluster.shutdown(); @@ -123,9 +120,9 @@ public void testStopped() throws Exception { FileSystem fs = FileSystem.get(conf); Path resultFile = new Path(UTIL.getDataTestDirOnTestFS(), MASTER_FILE); - assertTrue("Master flag file should have been created",fs.exists(resultFile)); + assertTrue("Master flag file should have been created", fs.exists(resultFile)); resultFile = new Path(UTIL.getDataTestDirOnTestFS(), REGIONSERVER_FILE); - assertTrue("RegionServer flag file should have been created",fs.exists(resultFile)); + assertTrue("RegionServer flag file should have been created", fs.exists(resultFile)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreMasterCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreMasterCoprocessor.java index f274e397114a..a2b43244b4b6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreMasterCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreMasterCoprocessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,16 +38,17 @@ import org.junit.rules.TestName; /** - * Test CoreCoprocessor Annotation works giving access to facility not usually available. - * Test MasterCoprocessor. + * Test CoreCoprocessor Annotation works giving access to facility not usually available. Test + * MasterCoprocessor. */ -@Category({CoprocessorTests.class, SmallTests.class}) +@Category({ CoprocessorTests.class, SmallTests.class }) public class TestCoreMasterCoprocessor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCoreMasterCoprocessor.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final HBaseTestingUtil HTU = new HBaseTestingUtil(); private MasterServices ms; private MasterCoprocessorHost mch; @@ -70,22 +71,23 @@ public void after() throws IOException { * MasterServices instance after some gymnastics. */ public static class NotCoreMasterCoprocessor implements MasterCoprocessor { - public NotCoreMasterCoprocessor() {} + public NotCoreMasterCoprocessor() { + } } /** - * Annotate with CoreCoprocessor. This should make it so I can get at instance of a - * MasterServices instance after some gymnastics. + * Annotate with CoreCoprocessor. This should make it so I can get at instance of a MasterServices + * instance after some gymnastics. */ @CoreCoprocessor public static class CoreMasterCoprocessor implements MasterCoprocessor { - public CoreMasterCoprocessor() {} + public CoreMasterCoprocessor() { + } } /** - * Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to - * access a MasterServices instance. Assert the opposite too. - * Do it to MasterCoprocessors. + * Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to access + * a MasterServices instance. Assert the opposite too. Do it to MasterCoprocessors. * @throws IOException */ @Test @@ -95,6 +97,6 @@ public void testCoreRegionCoprocessor() throws IOException { assertFalse(env instanceof HasMasterServices); env = this.mch.load(null, CoreMasterCoprocessor.class.getName(), 1, HTU.getConfiguration()); assertTrue(env instanceof HasMasterServices); - assertEquals(this.ms, ((HasMasterServices)env).getMasterServices()); + assertEquals(this.ms, ((HasMasterServices) env).getMasterServices()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java index e467e8de2724..b0dbd5401a91 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,17 +47,18 @@ import org.junit.rules.TestName; /** - * Test CoreCoprocessor Annotation works giving access to facility not usually available. - * Test RegionCoprocessor. + * Test CoreCoprocessor Annotation works giving access to facility not usually available. Test + * RegionCoprocessor. */ -@Category({CoprocessorTests.class, SmallTests.class}) +@Category({ CoprocessorTests.class, SmallTests.class }) public class TestCoreRegionCoprocessor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCoreRegionCoprocessor.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); HBaseTestingUtil HTU = new HBaseTestingUtil(); private HRegion region = null; private RegionServerServices rss; @@ -66,7 +67,8 @@ public class TestCoreRegionCoprocessor { public void before() throws IOException { String methodName = this.name.getMethodName(); TableName tn = TableName.valueOf(methodName); - ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(methodName)).build(); + ColumnFamilyDescriptor cfd = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(methodName)).build(); TableDescriptor td = TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfd).build(); RegionInfo ri = RegionInfoBuilder.newBuilder(tn).build(); this.rss = new MockRegionServerServices(HTU.getConfiguration()); @@ -83,7 +85,8 @@ public void after() throws IOException { * RegionServerServices instance after some gymnastics. */ public static class NotCoreRegionCoprocessor implements RegionCoprocessor { - public NotCoreRegionCoprocessor() {} + public NotCoreRegionCoprocessor() { + } } /** @@ -92,13 +95,13 @@ public NotCoreRegionCoprocessor() {} */ @org.apache.hadoop.hbase.coprocessor.CoreCoprocessor public static class CoreRegionCoprocessor implements RegionCoprocessor { - public CoreRegionCoprocessor() {} + public CoreRegionCoprocessor() { + } } /** - * Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to - * access a RegionServerServices instance. Assert the opposite too. - * Do it to RegionCoprocessors. + * Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to access + * a RegionServerServices instance. Assert the opposite too. Do it to RegionCoprocessors. * @throws IOException */ @Test @@ -109,6 +112,6 @@ public void testCoreRegionCoprocessor() throws IOException { assertFalse(env instanceof HasRegionServerServices); env = rch.load(null, CoreRegionCoprocessor.class.getName(), 1, HTU.getConfiguration()); assertTrue(env instanceof HasRegionServerServices); - assertEquals(this.rss, ((HasRegionServerServices)env).getRegionServerServices()); + assertEquals(this.rss, ((HasRegionServerServices) env).getRegionServerServices()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionServerCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionServerCoprocessor.java index 0f2dd1e4cb7c..457e9a572770 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionServerCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionServerCoprocessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,17 +38,18 @@ import org.junit.rules.TestName; /** - * Test CoreCoprocessor Annotation works giving access to facility not usually available. - * Test RegionServerCoprocessor. + * Test CoreCoprocessor Annotation works giving access to facility not usually available. Test + * RegionServerCoprocessor. */ -@Category({CoprocessorTests.class, SmallTests.class}) +@Category({ CoprocessorTests.class, SmallTests.class }) public class TestCoreRegionServerCoprocessor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCoreRegionServerCoprocessor.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final HBaseTestingUtil HTU = new HBaseTestingUtil(); private RegionServerServices rss; private RegionServerCoprocessorHost rsch; @@ -70,7 +71,8 @@ public void after() throws IOException { * RegionServerServices instance after some gymnastics. */ public static class NotCoreRegionServerCoprocessor implements RegionServerCoprocessor { - public NotCoreRegionServerCoprocessor() {} + public NotCoreRegionServerCoprocessor() { + } } /** @@ -79,13 +81,13 @@ public NotCoreRegionServerCoprocessor() {} */ @CoreCoprocessor public static class CoreRegionServerCoprocessor implements RegionServerCoprocessor { - public CoreRegionServerCoprocessor() {} + public CoreRegionServerCoprocessor() { + } } /** - * Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to - * access a RegionServerServices instance. Assert the opposite too. - * Do it to RegionServerCoprocessors. + * Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to access + * a RegionServerServices instance. Assert the opposite too. Do it to RegionServerCoprocessors. * @throws IOException */ @Test @@ -95,6 +97,6 @@ public void testCoreRegionCoprocessor() throws IOException { assertFalse(env instanceof HasRegionServerServices); env = rsch.load(null, CoreRegionServerCoprocessor.class.getName(), 1, HTU.getConfiguration()); assertTrue(env instanceof HasRegionServerServices); - assertEquals(this.rss, ((HasRegionServerServices)env).getRegionServerServices()); + assertEquals(this.rss, ((HasRegionServerServices) env).getRegionServerServices()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementAndAppendWithNullResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementAndAppendWithNullResult.java index fa61cbe33e98..feb0bf5337c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementAndAppendWithNullResult.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementAndAppendWithNullResult.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import static org.junit.Assert.assertNotNull; @@ -47,12 +45,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestIncrementAndAppendWithNullResult { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIncrementAndAppendWithNullResult.class); + HBaseClassTestRule.forClass(TestIncrementAndAppendWithNullResult.class); private static final HBaseTestingUtil util = new HBaseTestingUtil(); private static final TableName TEST_TABLE = TableName.valueOf("test"); @@ -64,7 +62,7 @@ public class TestIncrementAndAppendWithNullResult { @BeforeClass public static void setupBeforeClass() throws Exception { util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - MyObserver.class.getName()); + MyObserver.class.getName()); // reduce the retry count so as to speed up the test util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); util.startMiniCluster(); @@ -76,16 +74,12 @@ public static void tearDownAfterClass() throws Exception { util.shutdownMiniCluster(); } - public static class MyObserver implements RegionCoprocessor, RegionObserver { - private static final Result TMP_RESULT = Result.create(Arrays.asList( - CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(Bytes.toBytes("row")) - .setFamily(Bytes.toBytes("family")) - .setQualifier(Bytes.toBytes("qualifier")) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes("value")) - .build())); + private static final Result TMP_RESULT = Result.create(Arrays + .asList(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(Bytes.toBytes("row")) + .setFamily(Bytes.toBytes("family")).setQualifier(Bytes.toBytes("qualifier")) + .setType(Cell.Type.Put).setValue(Bytes.toBytes("value")).build())); + @Override public Optional getRegionObserver() { return Optional.of(this); @@ -99,7 +93,7 @@ public Result preIncrementAfterRowLock(ObserverContext c, - Increment increment, Result result) throws IOException { + Increment increment, Result result) throws IOException { return null; } @@ -119,8 +113,8 @@ public Result preAppendAfterRowLock(ObserverContext e, final Increment increment) throws IOException { - NavigableMap> map = increment.getFamilyCellMap(); - for (Map.Entry> entry : map.entrySet()) { + NavigableMap> map = increment.getFamilyCellMap(); + for (Map.Entry> entry : map.entrySet()) { for (Cell cell : entry.getValue()) { - long incr = Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength()); + long incr = + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); if (incr == 10) { tr10 = increment.getTimeRange(); } else if (incr == 2 && !increment.getTimeRange().isAllTime()) { @@ -165,7 +165,7 @@ private void checkHTableInterfaceMethods() throws Exception { time = EnvironmentEdgeManager.currentTime(); mee.setValue(time); - TimeRange range10 = TimeRange.between(1, time+10); + TimeRange range10 = TimeRange.between(1, time + 10); hTableInterface.increment(new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 10L) .setTimeRange(range10.getMin(), range10.getMax())); checkRowValue(ROW_A, Bytes.toBytes(11L)); @@ -175,11 +175,11 @@ private void checkHTableInterfaceMethods() throws Exception { time = EnvironmentEdgeManager.currentTime(); mee.setValue(time); TimeRange range2 = TimeRange.between(1, time + 20); - List actions = - Arrays.asList(new Row[] { new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L) - .setTimeRange(range2.getMin(), range2.getMax()), - new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L) - .setTimeRange(range2.getMin(), range2.getMax()) }); + List actions = Arrays.asList(new Row[] { + new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L).setTimeRange(range2.getMin(), + range2.getMax()), + new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L).setTimeRange(range2.getMin(), + range2.getMax()) }); Object[] results3 = new Object[actions.size()]; Object[] results1 = results3; hTableInterface.batch(actions, results1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java index f2fdf932d764..2853918350d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java @@ -51,12 +51,11 @@ import org.junit.experimental.categories.Category; /** - * Tests unhandled exceptions thrown by coprocessors running on master. - * Expected result is that the master will abort with an informative - * error message describing the set of its loaded coprocessors for crash diagnosis. - * (HBASE-4014). + * Tests unhandled exceptions thrown by coprocessors running on master. Expected result is that the + * master will abort with an informative error message describing the set of its loaded coprocessors + * for crash diagnosis. (HBASE-4014). */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestMasterCoprocessorExceptionWithAbort { @ClassRule @@ -80,6 +79,7 @@ public synchronized void nodeDeleted(String path) { public static class CreateTableThread extends Thread { HBaseTestingUtil UTIL; + public CreateTableThread(HBaseTestingUtil UTIL) { this.UTIL = UTIL; } @@ -89,17 +89,17 @@ public void run() { // create a table : master coprocessor will throw an exception and not // catch it. TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); try { Admin admin = UTIL.getAdmin(); admin.createTable(tableDescriptor); fail("BuggyMasterObserver failed to throw an exception."); } catch (IOException e) { assertEquals("HBaseAdmin threw an interrupted IOException as expected.", - "java.io.InterruptedIOException", e.getClass().getName()); + "java.io.InterruptedIOException", e.getClass().getName()); } - } + } } public static class BuggyMasterObserver implements MasterCoprocessor, MasterObserver { @@ -154,10 +154,9 @@ public boolean wasStarted() { @BeforeClass public static void setupBeforeClass() throws Exception { Configuration conf = UTIL.getConfiguration(); - conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - BuggyMasterObserver.class.getName()); + conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, BuggyMasterObserver.class.getName()); conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true); - conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Fail fast + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Fail fast UTIL.startMiniCluster(); } @@ -167,8 +166,7 @@ public static void teardownAfterClass() throws Exception { } @Test - public void testExceptionFromCoprocessorWhenCreatingTable() - throws IOException { + public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException { SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster(); HMaster master = cluster.getMaster(); @@ -178,29 +176,29 @@ public void testExceptionFromCoprocessorWhenCreatingTable() // set a watch on the zookeeper /hbase/master node. If the master dies, // the node will be deleted. - ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), - "unittest", new Abortable() { + ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), "unittest", new Abortable() { @Override public void abort(String why, Throwable e) { throw new RuntimeException("Fatal ZK error: " + why, e); } + @Override public boolean isAborted() { return false; } }); - MasterTracker masterTracker = new MasterTracker(zkw,"/hbase/master", - new Abortable() { - @Override - public void abort(String why, Throwable e) { - throw new RuntimeException("Fatal ZK master tracker error, why=", e); - } - @Override - public boolean isAborted() { - return false; - } - }); + MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() { + @Override + public void abort(String why, Throwable e) { + throw new RuntimeException("Fatal ZK master tracker error, why=", e); + } + + @Override + public boolean isAborted() { + return false; + } + }); masterTracker.start(); zkw.registerListener(masterTracker); @@ -208,12 +206,13 @@ public boolean isAborted() { // Test (part of the) output that should have be printed by master when it aborts: // (namely the part that shows the set of loaded coprocessors). // In this test, there is only a single coprocessor (BuggyMasterObserver). - assertTrue(HMaster.getLoadedCoprocessors(). - contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName())); + assertTrue(HMaster.getLoadedCoprocessors() + .contains(TestMasterCoprocessorExceptionWithAbort.BuggyMasterObserver.class.getName())); CreateTableThread createTableThread = new CreateTableThread(UTIL); - // Attempting to create a table (using createTableThread above) triggers an NPE in BuggyMasterObserver. + // Attempting to create a table (using createTableThread above) triggers an NPE in + // BuggyMasterObserver. // Master will then abort and the /hbase/master zk node will be deleted. createTableThread.start(); @@ -225,22 +224,20 @@ public boolean isAborted() { try { Thread.sleep(1000); } catch (InterruptedException e) { - fail("InterruptedException while waiting for master zk node to " - + "be deleted."); + fail("InterruptedException while waiting for master zk node to " + "be deleted."); } } assertTrue("Master aborted on coprocessor exception, as expected.", - masterTracker.masterZKNodeWasDeleted); + masterTracker.masterZKNodeWasDeleted); createTableThread.interrupt(); try { createTableThread.join(1000); } catch (InterruptedException e) { - assertTrue("Ignoring InterruptedException while waiting for " + - " createTableThread.join().", true); + assertTrue("Ignoring InterruptedException while waiting for " + " createTableThread.join().", + true); } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java index 42f129db018b..212a014ff8c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java @@ -49,13 +49,11 @@ import org.junit.experimental.categories.Category; /** - * Tests unhandled exceptions thrown by coprocessors running on master. - * Expected result is that the master will remove the buggy coprocessor from - * its set of coprocessors and throw a org.apache.hadoop.hbase.exceptions.DoNotRetryIOException - * back to the client. - * (HBASE-4014). + * Tests unhandled exceptions thrown by coprocessors running on master. Expected result is that the + * master will remove the buggy coprocessor from its set of coprocessors and throw a + * org.apache.hadoop.hbase.exceptions.DoNotRetryIOException back to the client. (HBASE-4014). */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestMasterCoprocessorExceptionWithRemove { @ClassRule @@ -135,8 +133,7 @@ public boolean wasStarted() { @BeforeClass public static void setupBeforeClass() throws Exception { Configuration conf = UTIL.getConfiguration(); - conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - BuggyMasterObserver.class.getName()); + conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, BuggyMasterObserver.class.getName()); UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false); UTIL.startMiniCluster(); } @@ -147,8 +144,7 @@ public static void teardownAfterClass() throws Exception { } @Test - public void testExceptionFromCoprocessorWhenCreatingTable() - throws IOException { + public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException { SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster(); HMaster master = cluster.getMaster(); @@ -162,29 +158,29 @@ public void testExceptionFromCoprocessorWhenCreatingTable() // we are testing that the default setting of hbase.coprocessor.abortonerror // =false // is respected. - ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), - "unittest", new Abortable() { + ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), "unittest", new Abortable() { @Override public void abort(String why, Throwable e) { throw new RuntimeException("Fatal ZK error: " + why, e); } + @Override public boolean isAborted() { return false; } }); - MasterTracker masterTracker = new MasterTracker(zkw,"/hbase/master", - new Abortable() { - @Override - public void abort(String why, Throwable e) { - throw new RuntimeException("Fatal ZooKeeper tracker error, why=", e); - } - @Override - public boolean isAborted() { - return false; - } - }); + MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() { + @Override + public void abort(String why, Throwable e) { + throw new RuntimeException("Fatal ZooKeeper tracker error, why=", e); + } + + @Override + public boolean isAborted() { + return false; + } + }); masterTracker.start(); zkw.registerListener(masterTracker); @@ -192,13 +188,12 @@ public boolean isAborted() { // Test (part of the) output that should have be printed by master when it aborts: // (namely the part that shows the set of loaded coprocessors). // In this test, there is only a single coprocessor (BuggyMasterObserver). - String coprocessorName = - BuggyMasterObserver.class.getName(); + String coprocessorName = BuggyMasterObserver.class.getName(); assertTrue(HMaster.getLoadedCoprocessors().contains(coprocessorName)); TableDescriptor tableDescriptor1 = - TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE1)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY1)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE1)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY1)).build(); boolean threwDNRE = false; try { @@ -220,7 +215,7 @@ public boolean isAborted() { } assertFalse("Master survived coprocessor NPE, as expected.", - masterTracker.masterZKNodeWasDeleted); + masterTracker.masterZKNodeWasDeleted); String loadedCoprocessors = HMaster.getLoadedCoprocessors(); assertTrue(loadedCoprocessors.contains(coprocessorName)); @@ -228,8 +223,8 @@ public boolean isAborted() { // Verify that BuggyMasterObserver has been removed due to its misbehavior // by creating another table: should not have a problem this time. TableDescriptor tableDescriptor2 = - TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE2)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY2)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE2)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY2)).build(); Admin admin = UTIL.getAdmin(); try { admin.createTable(tableDescriptor2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index 8efc4dc94934..02382448fd74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -79,10 +79,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; /** - * Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.MasterObserver} - * interface hooks at all appropriate times during normal HMaster operations. + * Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.MasterObserver} interface + * hooks at all appropriate times during normal HMaster operations. */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestMasterObserver { @ClassRule @@ -228,7 +228,7 @@ public void resetStates() { postGetProceduresCalled = false; preGetLocksCalled = false; postGetLocksCalled = false; - preMoveCalled= false; + preMoveCalled = false; postMoveCalled = false; preAssignCalled = false; postAssignCalled = false; @@ -288,15 +288,13 @@ public Optional getMasterObserver() { } @Override - public void preMergeRegions( - final ObserverContext ctx, + public void preMergeRegions(final ObserverContext ctx, final RegionInfo[] regionsToMerge) throws IOException { preMergeRegionsCalled = true; } @Override - public void postMergeRegions( - final ObserverContext ctx, + public void postMergeRegions(final ObserverContext ctx, final RegionInfo[] regionsToMerge) throws IOException { postMergeRegionsCalled = true; } @@ -381,7 +379,7 @@ public void postSetSplitOrMergeEnabled(final ObserverContext env, TableName tableName, final TableDescriptor currentDescriptor, - final TableDescriptor newDescriptor) throws IOException { + final TableDescriptor newDescriptor) throws IOException { preModifyTableCalled = true; return newDescriptor; } @@ -389,7 +387,7 @@ public TableDescriptor preModifyTable(ObserverContext env, TableName tableName, final TableDescriptor oldDescriptor, - final TableDescriptor currentDescriptor) throws IOException { + final TableDescriptor currentDescriptor) throws IOException { postModifyTableCalled = true; } @@ -422,14 +420,14 @@ public boolean preCreateNamespaceCalledOnly() { } @Override - public void preDeleteNamespace(ObserverContext env, - String name) throws IOException { + public void preDeleteNamespace(ObserverContext env, String name) + throws IOException { preDeleteNamespaceCalled = true; } @Override - public void postDeleteNamespace(ObserverContext env, - String name) throws IOException { + public void postDeleteNamespace(ObserverContext env, String name) + throws IOException { postDeleteNamespaceCalled = true; } @@ -461,7 +459,6 @@ public boolean preModifyNamespaceCalledOnly() { return preModifyNamespaceCalled && !postModifyNamespaceCalled; } - @Override public void preGetNamespaceDescriptor(ObserverContext ctx, String namespace) throws IOException { @@ -551,14 +548,14 @@ public boolean preDisableTableCalledOnly() { } @Override - public void preAbortProcedure( - ObserverContext ctx, final long procId) throws IOException { + public void preAbortProcedure(ObserverContext ctx, + final long procId) throws IOException { preAbortProcedureCalled = true; } @Override - public void postAbortProcedure( - ObserverContext ctx) throws IOException { + public void postAbortProcedure(ObserverContext ctx) + throws IOException { postAbortProcedureCalled = true; } @@ -571,14 +568,14 @@ public boolean wasPreAbortProcedureCalledOnly() { } @Override - public void preGetProcedures( - ObserverContext ctx) throws IOException { + public void preGetProcedures(ObserverContext ctx) + throws IOException { preGetProceduresCalled = true; } @Override - public void postGetProcedures( - ObserverContext ctx) throws IOException { + public void postGetProcedures(ObserverContext ctx) + throws IOException { postGetProceduresCalled = true; } @@ -596,8 +593,7 @@ public void preGetLocks(ObserverContext ctx) throw } @Override - public void postGetLocks(ObserverContext ctx) - throws IOException { + public void postGetLocks(ObserverContext ctx) throws IOException { postGetLocksCalled = true; } @@ -610,16 +606,14 @@ public boolean wasPreGetLocksCalledOnly() { } @Override - public void preMove(ObserverContext env, - RegionInfo region, ServerName srcServer, ServerName destServer) - throws IOException { + public void preMove(ObserverContext env, RegionInfo region, + ServerName srcServer, ServerName destServer) throws IOException { preMoveCalled = true; } @Override public void postMove(ObserverContext env, RegionInfo region, - ServerName srcServer, ServerName destServer) - throws IOException { + ServerName srcServer, ServerName destServer) throws IOException { postMoveCalled = true; } @@ -693,14 +687,13 @@ public boolean preRegionOfflineCalledOnly() { @Override public void preBalance(ObserverContext env, - BalanceRequest request) throws IOException { + BalanceRequest request) throws IOException { preBalanceCalled = true; } @Override public void postBalance(ObserverContext env, - BalanceRequest request, - List plans) throws IOException { + BalanceRequest request, List plans) throws IOException { postBalanceCalled = true; } @@ -733,8 +726,7 @@ public boolean preBalanceSwitchCalledOnly() { } @Override - public void preShutdown(ObserverContext env) - throws IOException { + public void preShutdown(ObserverContext env) throws IOException { preShutdownCalled = true; } @@ -753,12 +745,12 @@ public boolean wasStopMasterCalled() { } @Override - public void preMasterInitialization( - ObserverContext ctx) throws IOException { + public void preMasterInitialization(ObserverContext ctx) + throws IOException { preMasterInitializationCalled = true; } - public boolean wasMasterInitializationCalled(){ + public boolean wasMasterInitializationCalled() { return preMasterInitializationCalled; } @@ -782,9 +774,13 @@ public void stop(CoprocessorEnvironment env) throws IOException { stopCalled = true; } - public boolean wasStarted() { return startCalled; } + public boolean wasStarted() { + return startCalled; + } - public boolean wasStopped() { return stopCalled; } + public boolean wasStopped() { + return stopCalled; + } @Override public void preSnapshot(final ObserverContext ctx, @@ -873,25 +869,23 @@ public boolean wasDeleteSnapshotCalled() { } @Override - public void preCreateTableAction( - final ObserverContext env, - final TableDescriptor desc, - final RegionInfo[] regions) throws IOException { + public void preCreateTableAction(final ObserverContext env, + final TableDescriptor desc, final RegionInfo[] regions) throws IOException { preCreateTableActionCalled = true; } @Override public void postCompletedCreateTableAction( - final ObserverContext ctx, - final TableDescriptor desc, + final ObserverContext ctx, final TableDescriptor desc, final RegionInfo[] regions) throws IOException { postCompletedCreateTableActionCalled = true; tableCreationLatch.countDown(); } - public boolean wasPreCreateTableActionCalled(){ + public boolean wasPreCreateTableActionCalled() { return preCreateTableActionCalled; } + public boolean wasCreateTableActionCalled() { return preCreateTableActionCalled && postCompletedCreateTableActionCalled; } @@ -901,9 +895,8 @@ public boolean wasCreateTableActionCalledOnly() { } @Override - public void preDeleteTableAction( - final ObserverContext env, final TableName tableName) - throws IOException { + public void preDeleteTableAction(final ObserverContext env, + final TableName tableName) throws IOException { preDeleteTableActionCalled = true; } @@ -924,9 +917,8 @@ public boolean wasDeleteTableActionCalledOnly() { } @Override - public void preTruncateTableAction( - final ObserverContext env, final TableName tableName) - throws IOException { + public void preTruncateTableAction(final ObserverContext env, + final TableName tableName) throws IOException { preTruncateTableActionCalled = true; } @@ -946,20 +938,17 @@ public boolean wasTruncateTableActionCalledOnly() { } @Override - public void preModifyTableAction( - final ObserverContext env, - final TableName tableName, - final TableDescriptor currentDescriptor, + public void preModifyTableAction(final ObserverContext env, + final TableName tableName, final TableDescriptor currentDescriptor, final TableDescriptor newDescriptor) throws IOException { preModifyTableActionCalled = true; } @Override public void postCompletedModifyTableAction( - final ObserverContext env, - final TableName tableName, - final TableDescriptor oldDescriptor, - final TableDescriptor currentDescriptor) throws IOException { + final ObserverContext env, final TableName tableName, + final TableDescriptor oldDescriptor, final TableDescriptor currentDescriptor) + throws IOException { postCompletedModifyTableActionCalled = true; } @@ -972,9 +961,8 @@ public boolean wasModifyTableActionCalledOnly() { } @Override - public void preEnableTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException { + public void preEnableTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { preEnableTableActionCalled = true; } @@ -994,9 +982,8 @@ public boolean preEnableTableActionCalledOnly() { } @Override - public void preDisableTableAction( - final ObserverContext ctx, final TableName tableName) - throws IOException { + public void preDisableTableAction(final ObserverContext ctx, + final TableName tableName) throws IOException { preDisableTableActionCalled = true; } @@ -1024,8 +1011,8 @@ public void preGetTableDescriptors(ObserverContext @Override public void postGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors, - String regex) throws IOException { + List tableNamesList, List descriptors, String regex) + throws IOException { postGetTableDescriptorsCalled = true; } @@ -1072,25 +1059,25 @@ public void postSetUserQuota(final ObserverContext @Override public void preSetUserQuota(final ObserverContext ctx, final String userName, final TableName tableName, final GlobalQuotaSettings quotas) - throws IOException { + throws IOException { } @Override public void postSetUserQuota(final ObserverContext ctx, final String userName, final TableName tableName, final GlobalQuotaSettings quotas) - throws IOException { + throws IOException { } @Override public void preSetUserQuota(final ObserverContext ctx, final String userName, final String namespace, final GlobalQuotaSettings quotas) - throws IOException { + throws IOException { } @Override public void postSetUserQuota(final ObserverContext ctx, final String userName, final String namespace, final GlobalQuotaSettings quotas) - throws IOException { + throws IOException { } @Override @@ -1120,52 +1107,52 @@ public void preMoveServersAndTables(ObserverContext ctx, - Set

      servers, Set tables,String targetGroup) throws IOException { + Set
      servers, Set tables, String targetGroup) throws IOException { } @Override public void preMoveServers(ObserverContext ctx, - Set
      servers, String targetGroup) throws IOException { + Set
      servers, String targetGroup) throws IOException { } @Override public void postMoveServers(ObserverContext ctx, - Set
      servers, String targetGroup) throws IOException { + Set
      servers, String targetGroup) throws IOException { } @Override public void preMoveTables(ObserverContext ctx, - Set tables, String targetGroupGroup) throws IOException { + Set tables, String targetGroupGroup) throws IOException { } @Override public void postMoveTables(ObserverContext ctx, - Set tables, String targetGroup) throws IOException { + Set tables, String targetGroup) throws IOException { } @Override - public void preAddRSGroup(ObserverContext ctx, - String name) throws IOException { + public void preAddRSGroup(ObserverContext ctx, String name) + throws IOException { } @Override - public void postAddRSGroup(ObserverContext ctx, - String name) throws IOException { + public void postAddRSGroup(ObserverContext ctx, String name) + throws IOException { } @Override - public void preRemoveRSGroup(ObserverContext ctx, - String name) throws IOException { + public void preRemoveRSGroup(ObserverContext ctx, String name) + throws IOException { } @Override - public void postRemoveRSGroup(ObserverContext ctx, - String name) throws IOException { + public void postRemoveRSGroup(ObserverContext ctx, String name) + throws IOException { } @Override public void preBalanceRSGroup(ObserverContext ctx, - String groupName, BalanceRequest request) throws IOException { + String groupName, BalanceRequest request) throws IOException { } @Override @@ -1186,8 +1173,8 @@ public void postRequestLock(ObserverContext ctx, S } @Override - public void preLockHeartbeat(ObserverContext ctx, - TableName tn, String description) throws IOException { + public void preLockHeartbeat(ObserverContext ctx, TableName tn, + String description) throws IOException { preLockHeartbeatCalled = true; } @@ -1198,35 +1185,29 @@ public void postLockHeartbeat(ObserverContext ctx) } public boolean preAndPostForQueueLockAndHeartbeatLockCalled() { - return preRequestLockCalled && postRequestLockCalled && preLockHeartbeatCalled && - postLockHeartbeatCalled; + return preRequestLockCalled && postRequestLockCalled && preLockHeartbeatCalled + && postLockHeartbeatCalled; } @Override - public void preSplitRegion( - final ObserverContext c, - final TableName tableName, - final byte[] splitRow) throws IOException { + public void preSplitRegion(final ObserverContext c, + final TableName tableName, final byte[] splitRow) throws IOException { } @Override - public void preSplitRegionAction( - final ObserverContext c, - final TableName tableName, - final byte[] splitRow) throws IOException { + public void preSplitRegionAction(final ObserverContext c, + final TableName tableName, final byte[] splitRow) throws IOException { } @Override public void postCompletedSplitRegionAction( - final ObserverContext c, - final RegionInfo regionInfoA, + final ObserverContext c, final RegionInfo regionInfoA, final RegionInfo regionInfoB) throws IOException { } @Override public void preSplitRegionBeforeMETAAction( - final ObserverContext ctx, - final byte[] splitKey, + final ObserverContext ctx, final byte[] splitKey, final List metaEntries) throws IOException { } @@ -1241,36 +1222,31 @@ public void postRollBackSplitRegionAction( } @Override - public void preMergeRegionsAction( - final ObserverContext ctx, + public void preMergeRegionsAction(final ObserverContext ctx, final RegionInfo[] regionsToMerge) throws IOException { } @Override public void postCompletedMergeRegionsAction( - final ObserverContext c, - final RegionInfo[] regionsToMerge, + final ObserverContext c, final RegionInfo[] regionsToMerge, final RegionInfo mergedRegion) throws IOException { } @Override - public void preMergeRegionsCommitAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge, - final List metaEntries) throws IOException { + public void preMergeRegionsCommitAction(final ObserverContext ctx, + final RegionInfo[] regionsToMerge, final List metaEntries) throws IOException { } @Override public void postMergeRegionsCommitAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge, + final ObserverContext ctx, final RegionInfo[] regionsToMerge, final RegionInfo mergedRegion) throws IOException { } @Override public void postRollBackMergeRegionsAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException { + final ObserverContext ctx, final RegionInfo[] regionsToMerge) + throws IOException { } } @@ -1285,8 +1261,7 @@ public void postRollBackMergeRegionsAction( @BeforeClass public static void setupBeforeClass() throws Exception { Configuration conf = UTIL.getConfiguration(); - conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - CPMasterObserver.class.getName()); + conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, CPMasterObserver.class.getName()); // We need more than one data server on this test UTIL.startMiniCluster(2); } @@ -1310,9 +1285,8 @@ public void testStarted() throws Exception { // check basic lifecycle assertTrue("MasterObserver should have been started", cp.wasStarted()); assertTrue("preMasterInitialization() hook should have been called", - cp.wasMasterInitializationCalled()); - assertTrue("postStartMaster() hook should have been called", - cp.wasStartMasterCalled()); + cp.wasMasterInitializationCalled()); + assertTrue("postStartMaster() hook should have been called", cp.wasStartMasterCalled()); } @Test @@ -1327,54 +1301,47 @@ public void testTableOperations() throws Exception { // create a table TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); - try(Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); Admin admin = connection.getAdmin()) { tableCreationLatch = new CountDownLatch(1); - admin.createTable(tableDescriptor, Arrays.copyOfRange(HBaseTestingUtil.KEYS, - 1, HBaseTestingUtil.KEYS.length)); + admin.createTable(tableDescriptor, + Arrays.copyOfRange(HBaseTestingUtil.KEYS, 1, HBaseTestingUtil.KEYS.length)); assertTrue("Test table should be created", cp.wasCreateTableCalled()); tableCreationLatch.await(); - assertTrue("Table pre create handler called.", cp - .wasPreCreateTableActionCalled()); - assertTrue("Table create handler should be called.", - cp.wasCreateTableActionCalled()); + assertTrue("Table pre create handler called.", cp.wasPreCreateTableActionCalled()); + assertTrue("Table create handler should be called.", cp.wasCreateTableActionCalled()); RegionLocator regionLocator = connection.getRegionLocator(tableDescriptor.getTableName()); List regions = regionLocator.getAllRegionLocations(); admin.mergeRegionsAsync(regions.get(0).getRegion().getEncodedNameAsBytes(), regions.get(1).getRegion().getEncodedNameAsBytes(), true).get(); - assertTrue("Coprocessor should have been called on region merge", - cp.wasMergeRegionsCalled()); + assertTrue("Coprocessor should have been called on region merge", cp.wasMergeRegionsCalled()); tableCreationLatch = new CountDownLatch(1); admin.disableTable(tableName); assertTrue(admin.isTableDisabled(tableName)); assertTrue("Coprocessor should have been called on table disable", cp.wasDisableTableCalled()); - assertTrue("Disable table handler should be called.", - cp.wasDisableTableActionCalled()); + assertTrue("Disable table handler should be called.", cp.wasDisableTableActionCalled()); // enable assertFalse(cp.wasEnableTableCalled()); admin.enableTable(tableName); assertTrue(admin.isTableEnabled(tableName)); - assertTrue("Coprocessor should have been called on table enable", - cp.wasEnableTableCalled()); - assertTrue("Enable table handler should be called.", - cp.wasEnableTableActionCalled()); + assertTrue("Coprocessor should have been called on table enable", cp.wasEnableTableCalled()); + assertTrue("Enable table handler should be called.", cp.wasEnableTableActionCalled()); admin.disableTable(tableName); assertTrue(admin.isTableDisabled(tableName)); // modify table tableDescriptor = TableDescriptorBuilder.newBuilder(tableDescriptor) - .setMaxFileSize(512 * 1024 * 1024).build(); + .setMaxFileSize(512 * 1024 * 1024).build(); modifyTableSync(admin, tableName, tableDescriptor); - assertTrue("Test table should have been modified", - cp.wasModifyTableCalled()); + assertTrue("Test table should have been modified", cp.wasModifyTableCalled()); // truncate table admin.truncateTable(tableName, false); @@ -1383,12 +1350,9 @@ public void testTableOperations() throws Exception { admin.disableTable(tableName); assertTrue(admin.isTableDisabled(tableName)); deleteTable(admin, tableName); - assertFalse("Test table should have been deleted", - admin.tableExists(tableName)); - assertTrue("Coprocessor should have been called on table delete", - cp.wasDeleteTableCalled()); - assertTrue("Delete table handler should be called.", - cp.wasDeleteTableActionCalled()); + assertFalse("Test table should have been deleted", admin.tableExists(tableName)); + assertTrue("Coprocessor should have been called on table delete", cp.wasDeleteTableCalled()); + assertTrue("Delete table handler should be called.", cp.wasDeleteTableActionCalled()); // When bypass was supported, we'd turn off bypass and rerun tests. Leaving rerun in place. cp.resetStates(); @@ -1396,10 +1360,8 @@ public void testTableOperations() throws Exception { admin.createTable(tableDescriptor); assertTrue("Test table should be created", cp.wasCreateTableCalled()); tableCreationLatch.await(); - assertTrue("Table pre create handler called.", cp - .wasPreCreateTableActionCalled()); - assertTrue("Table create handler should be called.", - cp.wasCreateTableActionCalled()); + assertTrue("Table pre create handler called.", cp.wasPreCreateTableActionCalled()); + assertTrue("Table create handler should be called.", cp.wasCreateTableActionCalled()); // disable assertFalse(cp.wasDisableTableCalled()); @@ -1408,25 +1370,21 @@ public void testTableOperations() throws Exception { assertTrue(admin.isTableDisabled(tableName)); assertTrue("Coprocessor should have been called on table disable", cp.wasDisableTableCalled()); - assertTrue("Disable table handler should be called.", - cp.wasDisableTableActionCalled()); + assertTrue("Disable table handler should be called.", cp.wasDisableTableActionCalled()); // modify table tableDescriptor = TableDescriptorBuilder.newBuilder(tableDescriptor) - .setMaxFileSize(512 * 1024 * 1024).build(); + .setMaxFileSize(512 * 1024 * 1024).build(); modifyTableSync(admin, tableName, tableDescriptor); - assertTrue("Test table should have been modified", - cp.wasModifyTableCalled()); + assertTrue("Test table should have been modified", cp.wasModifyTableCalled()); // enable assertFalse(cp.wasEnableTableCalled()); assertFalse(cp.wasEnableTableActionCalled()); admin.enableTable(tableName); assertTrue(admin.isTableEnabled(tableName)); - assertTrue("Coprocessor should have been called on table enable", - cp.wasEnableTableCalled()); - assertTrue("Enable table handler should be called.", - cp.wasEnableTableActionCalled()); + assertTrue("Coprocessor should have been called on table enable", cp.wasEnableTableCalled()); + assertTrue("Enable table handler should be called.", cp.wasEnableTableActionCalled()); // disable again admin.disableTable(tableName); @@ -1434,15 +1392,11 @@ public void testTableOperations() throws Exception { // delete table assertFalse("No table deleted yet", cp.wasDeleteTableCalled()); - assertFalse("Delete table handler should not be called.", - cp.wasDeleteTableActionCalled()); + assertFalse("Delete table handler should not be called.", cp.wasDeleteTableActionCalled()); deleteTable(admin, tableName); - assertFalse("Test table should have been deleted", - admin.tableExists(tableName)); - assertTrue("Coprocessor should have been called on table delete", - cp.wasDeleteTableCalled()); - assertTrue("Delete table handler should be called.", - cp.wasDeleteTableActionCalled()); + assertFalse("Test table should have been deleted", admin.tableExists(tableName)); + assertTrue("Coprocessor should have been called on table delete", cp.wasDeleteTableCalled()); + assertTrue("Delete table handler should be called.", cp.wasDeleteTableActionCalled()); } } @@ -1457,7 +1411,7 @@ public void testSnapshotOperations() throws Exception { // create a table TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); Admin admin = UTIL.getAdmin(); tableCreationLatch = new CountDownLatch(1); @@ -1470,13 +1424,11 @@ public void testSnapshotOperations() throws Exception { try { // Test snapshot operation - assertFalse("Coprocessor should not have been called yet", - cp.wasSnapshotCalled()); + assertFalse("Coprocessor should not have been called yet", cp.wasSnapshotCalled()); admin.snapshot(TEST_SNAPSHOT, tableName); - assertTrue("Coprocessor should have been called on snapshot", - cp.wasSnapshotCalled()); + assertTrue("Coprocessor should have been called on snapshot", cp.wasSnapshotCalled()); - //Test list operation + // Test list operation admin.listSnapshots(); assertTrue("Coprocessor should have been called on snapshot list", cp.wasListSnapshotCalled()); @@ -1527,13 +1479,13 @@ public void testNamespaceOperations() throws Exception { assertNotNull(admin.getNamespaceDescriptor(testNamespace)); assertTrue("Test namespace descriptor should have been called", - cp.wasGetNamespaceDescriptorCalled()); + cp.wasGetNamespaceDescriptorCalled()); // This test used to do a bunch w/ bypass but bypass of these table and namespace stuff has // been removed so the testing code was removed. } private void modifyTableSync(Admin admin, TableName tableName, TableDescriptor tableDescriptor) - throws IOException { + throws IOException { admin.modifyTable(tableDescriptor); // wait until modify table finishes for (int t = 0; t < 100; t++) { // 10 sec timeout @@ -1562,7 +1514,7 @@ public void testRegionTransitionOperations() throws Exception { List regions = r.getAllRegionLocations(); HRegionLocation firstGoodPair = null; - for (HRegionLocation e: regions) { + for (HRegionLocation e : regions) { if (e.getServerName() != null) { firstGoodPair = e; break; @@ -1590,14 +1542,13 @@ public void testRegionTransitionOperations() throws Exception { assertTrue("Found server", found); LOG.info("Found " + destName); master.getMasterRpcServices().moveRegion(null, RequestConverter.buildMoveRegionRequest( - firstGoodPair.getRegion().getEncodedNameAsBytes(), ServerName.valueOf(destName))); - assertTrue("Coprocessor should have been called on region move", - cp.wasMoveCalled()); + firstGoodPair.getRegion().getEncodedNameAsBytes(), ServerName.valueOf(destName))); + assertTrue("Coprocessor should have been called on region move", cp.wasMoveCalled()); // make sure balancer is on master.balanceSwitch(true); assertTrue("Coprocessor should have been called on balance switch", - cp.wasBalanceSwitchCalled()); + cp.wasBalanceSwitchCalled()); // turn balancer off master.balanceSwitch(false); @@ -1608,10 +1559,10 @@ public void testRegionTransitionOperations() throws Exception { // move half the open regions from RS 0 to RS 1 HRegionServer rs = cluster.getRegionServer(0); byte[] destRS = Bytes.toBytes(cluster.getRegionServer(1).getServerName().toString()); - //Make sure no regions are in transition now + // Make sure no regions are in transition now UTIL.waitUntilNoRegionsInTransition(); List openRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()); - int moveCnt = openRegions.size()/2; + int moveCnt = openRegions.size() / 2; for (int i = 0; i < moveCnt; i++) { RegionInfo info = openRegions.get(i); if (!info.isMetaRegion()) { @@ -1620,13 +1571,12 @@ public void testRegionTransitionOperations() throws Exception { ServerName.valueOf(Bytes.toString(destRS)))); } } - //Make sure no regions are in transition now + // Make sure no regions are in transition now UTIL.waitUntilNoRegionsInTransition(); // now trigger a balance master.balanceSwitch(true); master.balance(); - assertTrue("Coprocessor should be called on region rebalancing", - cp.wasBalanceCalled()); + assertTrue("Coprocessor should be called on region rebalancing", cp.wasBalanceCalled()); } finally { Admin admin = UTIL.getAdmin(); admin.disableTable(tableName); @@ -1644,7 +1594,7 @@ public void testTableDescriptorsEnumeration() throws Exception { cp.resetStates(); GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest((List)null); + RequestConverter.buildGetTableDescriptorsRequest((List) null); master.getMasterRpcServices().getTableDescriptors(null, req); assertTrue("Coprocessor should be called on table descriptors request", @@ -1660,10 +1610,8 @@ public void testTableNamesEnumeration() throws Exception { CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); cp.resetStates(); - master.getMasterRpcServices().getTableNames(null, - GetTableNamesRequest.newBuilder().build()); - assertTrue("Coprocessor should be called on table names request", - cp.wasGetTableNamesCalled()); + master.getMasterRpcServices().getTableNames(null, GetTableNamesRequest.newBuilder().build()); + assertTrue("Coprocessor should be called on table names request", cp.wasGetTableNamesCalled()); } @Test @@ -1676,8 +1624,7 @@ public void testAbortProcedureOperation() throws Exception { cp.resetStates(); master.abortProcedure(1, true); - assertTrue( - "Coprocessor should be called on abort procedure request", + assertTrue("Coprocessor should be called on abort procedure request", cp.wasAbortProcedureCalled()); } @@ -1691,8 +1638,7 @@ public void testGetProceduresOperation() throws Exception { cp.resetStates(); master.getProcedures(); - assertTrue( - "Coprocessor should be called on get procedures request", + assertTrue("Coprocessor should be called on get procedures request", cp.wasGetProceduresCalled()); } @@ -1706,9 +1652,7 @@ public void testGetLocksOperation() throws Exception { cp.resetStates(); master.getLocks(); - assertTrue( - "Coprocessor should be called on get locks request", - cp.wasGetLocksCalled()); + assertTrue("Coprocessor should be called on get locks request", cp.wasGetLocksCalled()); } private void deleteTable(Admin admin, TableName tableName) throws Exception { @@ -1728,7 +1672,7 @@ public void testQueueLockAndLockHeartbeatOperations() throws Exception { final TableName tableName = TableName.valueOf("testLockedTable"); long procId = master.getLockManager().remoteLocks().requestTableLock(tableName, - LockType.EXCLUSIVE, "desc", null); + LockType.EXCLUSIVE, "desc", null); master.getLockManager().remoteLocks().lockHeartbeat(procId, false); assertTrue(cp.preAndPostForQueueLockAndHeartbeatLockCalled()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserverToModifyTableSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserverToModifyTableSchema.java index 49283786b523..c24bfd0208cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserverToModifyTableSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserverToModifyTableSchema.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Optional; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -59,7 +58,7 @@ public class TestMasterObserverToModifyTableSchema { public static void setupBeforeClass() throws Exception { Configuration conf = UTIL.getConfiguration(); conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - OnlyOneVersionAllowedMasterObserver.class.getName()); + OnlyOneVersionAllowedMasterObserver.class.getName()); UTIL.startMiniCluster(1); } @@ -72,9 +71,8 @@ public static void tearDownAfterClass() throws Exception { public void testMasterObserverToModifyTableSchema() throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TABLENAME); for (int i = 1; i <= 3; i++) { - builder.setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf" + i)).setMaxVersions(i) - .build()); + builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf" + i)) + .setMaxVersions(i).build()); } try (Admin admin = UTIL.getAdmin()) { admin.createTable(builder.build()); @@ -108,7 +106,7 @@ public TableDescriptor preCreateTableRegionsInfos( TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(desc); for (ColumnFamilyDescriptor cfd : desc.getColumnFamilies()) { builder.modifyColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(1).build()); + ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(1).build()); } return builder.build(); } @@ -120,7 +118,7 @@ public TableDescriptor preModifyTable(ObserverContext { Map jmxMetrics = readMetaTableJmxMetrics(); - boolean allMetricsFound = AllOf.allOf( - containsPositiveJmxAttributesFor("MetaTable_get_request"), - containsPositiveJmxAttributesFor("MetaTable_put_request"), - containsPositiveJmxAttributesFor("MetaTable_delete_request"), - containsPositiveJmxAttributesFor("MetaTable_region_.+_lossy_request"), - containsPositiveJmxAttributesFor("MetaTable_table_" + NAME1 + "_request"), - containsPositiveJmxAttributesFor("MetaTable_client_.+_put_request"), - containsPositiveJmxAttributesFor("MetaTable_client_.+_get_request"), - containsPositiveJmxAttributesFor("MetaTable_client_.+_delete_request"), - containsPositiveJmxAttributesFor("MetaTable_client_.+_lossy_request") - ).matches(jmxMetrics); + boolean allMetricsFound = AllOf + .allOf(containsPositiveJmxAttributesFor("MetaTable_get_request"), + containsPositiveJmxAttributesFor("MetaTable_put_request"), + containsPositiveJmxAttributesFor("MetaTable_delete_request"), + containsPositiveJmxAttributesFor("MetaTable_region_.+_lossy_request"), + containsPositiveJmxAttributesFor("MetaTable_table_" + NAME1 + "_request"), + containsPositiveJmxAttributesFor("MetaTable_client_.+_put_request"), + containsPositiveJmxAttributesFor("MetaTable_client_.+_get_request"), + containsPositiveJmxAttributesFor("MetaTable_client_.+_delete_request"), + containsPositiveJmxAttributesFor("MetaTable_client_.+_lossy_request")) + .matches(jmxMetrics); if (allMetricsFound) { LOG.info("all the meta table metrics found with positive values: {}", jmxMetrics); @@ -264,7 +261,7 @@ private Map readMetaTableJmxMetrics() throws IOException { while (iterator.hasNext()) { ObjectInstance instance = iterator.next(); LOG.debug("Class and object name: {} [{}]", instance.getClassName(), - instance.getObjectName()); + instance.getObjectName()); } } } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java index 11da101d706f..40d56eff68ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java index 89c38d2a75c9..44d178506caa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -52,10 +51,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * Test that a coprocessor can open a connection and write to another table, inside a hook. */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestOpenTableInCoprocessor { @ClassRule @@ -67,6 +68,7 @@ public class TestOpenTableInCoprocessor { private static final byte[] family = new byte[] { 'f' }; private static boolean[] completed = new boolean[1]; + /** * Custom coprocessor that just copies the write to another table. */ @@ -80,8 +82,7 @@ public Optional getRegionObserver() { @Override public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException { - try (Table table = e.getEnvironment().getConnection(). - getTable(otherTable)) { + try (Table table = e.getEnvironment().getConnection().getTable(otherTable)) { table.put(put); completed[0] = true; } @@ -90,6 +91,7 @@ public void prePut(final ObserverContext e, final } private static boolean[] completedWithPool = new boolean[1]; + /** * Coprocessor that creates an HTable with a pool to write to another table */ @@ -103,9 +105,9 @@ private ExecutorService getPool() { int maxThreads = 1; long keepAliveTime = 60; ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, - TimeUnit.SECONDS, new SynchronousQueue<>(), - new ThreadFactoryBuilder().setNameFormat("hbase-table-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + TimeUnit.SECONDS, new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat("hbase-table-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); pool.allowCoreThreadTimeOut(true); return pool; } @@ -119,8 +121,8 @@ public Optional getRegionObserver() { public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException { try (Table table = e.getEnvironment().getConnection().getTable(otherTable, getPool())) { - Put p = new Put(new byte[]{'a'}); - p.addColumn(family, null, new byte[]{'a'}); + Put p = new Put(new byte[] { 'a' }); + p.addColumn(family, null, new byte[] { 'a' }); try { table.batch(Collections.singletonList(put), null); } catch (InterruptedException e1) { @@ -169,13 +171,11 @@ private void runCoprocessorConnectionToRemoteTable(Class clazz, boolean[] com assert (RegionObserver.class.isAssignableFrom(clazz)); // add our coprocessor TableDescriptor primaryDescriptor = TableDescriptorBuilder.newBuilder(primaryTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).setCoprocessor(clazz.getName()) - .build(); - + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).setCoprocessor(clazz.getName()) + .build(); TableDescriptor otherDescriptor = TableDescriptorBuilder.newBuilder(otherTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); - + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); Admin admin = UTIL.getAdmin(); admin.createTable(primaryDescriptor); @@ -183,7 +183,7 @@ private void runCoprocessorConnectionToRemoteTable(Class clazz, boolean[] com Table table = UTIL.getConnection().getTable(TableName.valueOf("primary")); Put p = new Put(new byte[] { 'a' }); - p.addColumn(family, null, new byte[]{'a'}); + p.addColumn(family, null, new byte[] { 'a' }); table.put(p); table.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java index 8a446cb34ba3..bbca827777e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -113,9 +113,9 @@ public void clearTable() throws IOException { admin.deleteTable(name); } table = UTIL.createTable(TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(RegionObserverImpl.class.getName()) - .build(), null); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(RegionObserverImpl.class.getName()).build(), + null); } } @@ -142,10 +142,8 @@ public void testMutation() throws Exception { append.addColumn(FAMILY, QUALIFIER, APPEND_VALUE); table.append(append); // 10L + "MB" - value = ByteBuffer.wrap(new byte[value.length + APPEND_VALUE.length]) - .put(value) - .put(APPEND_VALUE) - .array(); + value = ByteBuffer.wrap(new byte[value.length + APPEND_VALUE.length]).put(value) + .put(APPEND_VALUE).array(); assertResult(table.get(new Get(ROW)), value, value); assertObserverHasExecuted(); @@ -169,15 +167,15 @@ public void testMutation() throws Exception { @Test public void testMultiPut() throws Exception { List puts = IntStream.range(0, 10) - .mapToObj(i -> new Put(ROW).addColumn(FAMILY, Bytes.toBytes(i), VALUE)) - .collect(Collectors.toList()); + .mapToObj(i -> new Put(ROW).addColumn(FAMILY, Bytes.toBytes(i), VALUE)) + .collect(Collectors.toList()); table.put(puts); assertResult(table.get(new Get(ROW)), VALUE); assertObserverHasExecuted(); - List deletes = IntStream.range(0, 10) - .mapToObj(i -> new Delete(ROW).addColumn(FAMILY, Bytes.toBytes(i))) - .collect(Collectors.toList()); + List deletes = + IntStream.range(0, 10).mapToObj(i -> new Delete(ROW).addColumn(FAMILY, Bytes.toBytes(i))) + .collect(Collectors.toList()); table.delete(deletes); assertTrue(table.get(new Get(ROW)).isEmpty()); assertObserverHasExecuted(); @@ -211,8 +209,8 @@ private static void assertResult(Result result, byte[] expectedValue, byte[] exp } } - private static Cell createCustomCell(byte[] row, byte[] family, byte[] qualifier, - Cell.Type type, byte[] value) { + private static Cell createCustomCell(byte[] row, byte[] family, byte[] qualifier, Cell.Type type, + byte[] value) { return new Cell() { @Override @@ -344,8 +342,8 @@ private static Cell createCustomCell(Increment inc) { } private static Cell createCustomCell(Delete delete) { - return createCustomCell(delete.getRow(), FAMILY, QUALIFIER_FROM_CP, - Cell.Type.DeleteColumn, null); + return createCustomCell(delete.getRow(), FAMILY, QUALIFIER_FROM_CP, Cell.Type.DeleteColumn, + null); } public static class RegionObserverImpl implements RegionCoprocessor, RegionObserver { @@ -358,22 +356,22 @@ public Optional getRegionObserver() { @Override public void prePut(ObserverContext c, Put put, WALEdit edit, - Durability durability) throws IOException { + Durability durability) throws IOException { put.add(createCustomCell(put)); COUNT.incrementAndGet(); } @Override public void preDelete(ObserverContext c, Delete delete, - WALEdit edit, Durability durability) throws IOException { + WALEdit edit, Durability durability) throws IOException { delete.add(createCustomCell(delete)); COUNT.incrementAndGet(); } @Override public boolean preCheckAndPut(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put put, - boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, + Put put, boolean result) throws IOException { put.add(createCustomCell(put)); COUNT.incrementAndGet(); return result; @@ -381,8 +379,8 @@ public boolean preCheckAndPut(ObserverContext c, b @Override public boolean preCheckAndDelete(ObserverContext c, byte[] row, - byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, - Delete delete, boolean result) throws IOException { + byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, + Delete delete, boolean result) throws IOException { delete.add(createCustomCell(delete)); COUNT.incrementAndGet(); return result; @@ -390,16 +388,15 @@ public boolean preCheckAndDelete(ObserverContext c @Override public Result preAppend(ObserverContext c, Append append) - throws IOException { + throws IOException { append.add(createCustomCell(append)); COUNT.incrementAndGet(); return null; } - @Override public Result preIncrement(ObserverContext c, Increment increment) - throws IOException { + throws IOException { increment.add(createCustomCell(increment)); COUNT.incrementAndGet(); return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java index 2975d711d32a..2a96bc53a6a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.List; import java.util.Optional; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; @@ -74,7 +73,7 @@ * change the cells which will be applied to memstore and WAL. So add unit test for the case which * change the cell's column family and tags. */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestPostIncrementAndAppendBeforeWAL { @ClassRule @@ -90,7 +89,7 @@ public class TestPostIncrementAndAppendBeforeWAL { private static Connection connection; - private static final byte [] ROW = Bytes.toBytes("row"); + private static final byte[] ROW = Bytes.toBytes("row"); private static final String CF1 = "cf1"; private static final byte[] CF1_BYTES = Bytes.toBytes(CF1); private static final String CF2 = "cf2"; @@ -103,7 +102,7 @@ public class TestPostIncrementAndAppendBeforeWAL { private static final byte[] VALUE2 = Bytes.toBytes("valuevalue"); private static final String USER = "User"; private static final Permission PERMS = - Permission.newBuilder().withActions(Permission.Action.READ).build(); + Permission.newBuilder().withActions(Permission.Action.READ).build(); @BeforeClass public static void setupBeforeClass() throws Exception { @@ -179,8 +178,8 @@ public void testIncrementTTLWithACLTag() throws Exception { createTableWithCoprocessor(tableName, ChangeCellWithACLTagObserver.class.getName()); try (Table table = connection.getTable(tableName)) { // Increment without TTL - Increment firstIncrement = new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1) - .setACL(USER, PERMS); + Increment firstIncrement = + new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1).setACL(USER, PERMS); Result result = table.increment(firstIncrement); assertEquals(1, result.size()); assertEquals(1, Bytes.toLong(result.getValue(CF1_BYTES, CQ1))); @@ -192,8 +191,8 @@ public void testIncrementTTLWithACLTag() throws Exception { assertEquals(1, Bytes.toLong(result.getValue(CF1_BYTES, CQ1))); // Increment with TTL - Increment secondIncrement = new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1).setTTL(1000) - .setACL(USER, PERMS); + Increment secondIncrement = + new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1).setTTL(1000).setACL(USER, PERMS); result = table.increment(secondIncrement); // We should get value 2 here @@ -229,8 +228,8 @@ public void testAppendTTLWithACLTag() throws Exception { assertTrue(Bytes.equals(VALUE, result.getValue(CF1_BYTES, CQ2))); // Append with TTL - Append secondAppend = new Append(ROW).addColumn(CF1_BYTES, CQ2, VALUE).setTTL(1000) - .setACL(USER, PERMS); + Append secondAppend = + new Append(ROW).addColumn(CF1_BYTES, CQ2, VALUE).setTTL(1000).setACL(USER, PERMS); result = table.append(secondAppend); // We should get "valuevalue"" @@ -253,8 +252,8 @@ private static boolean checkAclTag(byte[] acl, Cell cell) { while (iter.hasNext()) { Tag tag = iter.next(); if (tag.getType() == TagType.ACL_TAG_TYPE) { - Tag temp = TagBuilderFactory.create(). - setTagType(TagType.ACL_TAG_TYPE).setTagValue(acl).build(); + Tag temp = + TagBuilderFactory.create().setTagType(TagType.ACL_TAG_TYPE).setTagValue(acl).build(); return Tag.matchingValue(tag, temp); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestReadOnlyConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestReadOnlyConfiguration.java index a91c505f175a..afbbeb70cae1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestReadOnlyConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestReadOnlyConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java index 66077619cf40..2107772666fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,6 @@ import java.io.IOException; import java.util.Optional; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -62,7 +61,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRegionCoprocessorHost { private Configuration conf; @@ -134,9 +133,9 @@ public void testPreStoreScannerOpen() throws IOException { Scan scan = new Scan(); scan.setTimeRange(TimeRange.INITIAL_MIN_TIMESTAMP, TimeRange.INITIAL_MAX_TIMESTAMP); assertTrue("Scan is not for all time", scan.getTimeRange().isAllTime()); - //SimpleRegionObserver is set to update the ScanInfo parameters if the passed-in scan - //is for all time. this lets us exercise both that the Scan is wired up properly in the coproc - //and that we can customize the metadata + // SimpleRegionObserver is set to update the ScanInfo parameters if the passed-in scan + // is for all time. this lets us exercise both that the Scan is wired up properly in the coproc + // and that we can customize the metadata ScanInfo oldScanInfo = getScanInfo(); @@ -213,8 +212,7 @@ private ScanInfo getScanInfo() { long oldTTL = 10000; return new ScanInfo(conf, Bytes.toBytes("cf"), oldMinVersions, oldMaxVersions, oldTTL, - KeepDeletedCells.FALSE, HConstants.FOREVER, 1000, - CellComparator.getInstance(), true); + KeepDeletedCells.FALSE, HConstants.FOREVER, 1000, CellComparator.getInstance(), true); } /* diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java index fd1caed4e4ba..d2e60d5dca5f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestRegionObserverBypass { @ClassRule @@ -72,9 +72,8 @@ public static void setUpBeforeClass() throws Exception { // Stack up three coprocessors just so I can check bypass skips subsequent calls. Configuration conf = HBaseConfiguration.create(); conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - new String [] {TestCoprocessor.class.getName(), - TestCoprocessor2.class.getName(), - TestCoprocessor3.class.getName()}); + new String[] { TestCoprocessor.class.getName(), TestCoprocessor2.class.getName(), + TestCoprocessor3.class.getName() }); util = new HBaseTestingUtil(conf); util.startMiniCluster(); } @@ -93,7 +92,7 @@ public void setUp() throws Exception { } admin.deleteTable(tableName); } - util.createTable(tableName, new byte[][] {dummy, test}); + util.createTable(tableName, new byte[][] { dummy, test }); TestCoprocessor.PREPUT_BYPASSES.set(0); TestCoprocessor.PREPUT_INVOCATIONS.set(0); } @@ -109,19 +108,18 @@ public void testSimple() throws Exception { p.addColumn(test, dummy, dummy); // before HBASE-4331, this would throw an exception t.put(p); - checkRowAndDelete(t,row1,0); + checkRowAndDelete(t, row1, 0); t.close(); } /** - * Test various multiput operations. - * If the column family is 'test', then bypass is invoked. + * Test various multiput operations. If the column family is 'test', then bypass is invoked. * @throws Exception */ @Test public void testMulti() throws Exception { - //ensure that server time increments every time we do an operation, otherwise - //previous deletes will eclipse successive puts having the same timestamp + // ensure that server time increments every time we do an operation, otherwise + // previous deletes will eclipse successive puts having the same timestamp EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge()); Table t = util.getConnection().getTable(tableName); @@ -137,9 +135,9 @@ public void testMulti() throws Exception { puts.add(p); // before HBASE-4331, this would throw an exception t.put(puts); - checkRowAndDelete(t,row1,1); - checkRowAndDelete(t,row2,0); - checkRowAndDelete(t,row3,0); + checkRowAndDelete(t, row1, 1); + checkRowAndDelete(t, row2, 0); + checkRowAndDelete(t, row3, 0); puts.clear(); p = new Put(row1); @@ -153,9 +151,9 @@ public void testMulti() throws Exception { puts.add(p); // before HBASE-4331, this would throw an exception t.put(puts); - checkRowAndDelete(t,row1,0); - checkRowAndDelete(t,row2,0); - checkRowAndDelete(t,row3,0); + checkRowAndDelete(t, row1, 0); + checkRowAndDelete(t, row2, 0); + checkRowAndDelete(t, row3, 0); puts.clear(); p = new Put(row1); @@ -169,9 +167,9 @@ public void testMulti() throws Exception { puts.add(p); // this worked fine even before HBASE-4331 t.put(puts); - checkRowAndDelete(t,row1,0); - checkRowAndDelete(t,row2,0); - checkRowAndDelete(t,row3,1); + checkRowAndDelete(t, row1, 0); + checkRowAndDelete(t, row2, 0); + checkRowAndDelete(t, row3, 1); puts.clear(); p = new Put(row1); @@ -185,9 +183,9 @@ public void testMulti() throws Exception { puts.add(p); // this worked fine even before HBASE-4331 t.put(puts); - checkRowAndDelete(t,row1,1); - checkRowAndDelete(t,row2,0); - checkRowAndDelete(t,row3,1); + checkRowAndDelete(t, row1, 1); + checkRowAndDelete(t, row2, 0); + checkRowAndDelete(t, row3, 1); puts.clear(); p = new Put(row1); @@ -201,9 +199,9 @@ public void testMulti() throws Exception { puts.add(p); // before HBASE-4331, this would throw an exception t.put(puts); - checkRowAndDelete(t,row1,0); - checkRowAndDelete(t,row2,1); - checkRowAndDelete(t,row3,0); + checkRowAndDelete(t, row1, 0); + checkRowAndDelete(t, row2, 1); + checkRowAndDelete(t, row3, 0); t.close(); EnvironmentEdgeManager.reset(); @@ -219,13 +217,12 @@ private void checkRowAndDelete(Table t, byte[] row, int count) throws IOExceptio /** * Test that when bypass is called, we skip out calling any other coprocessors stacked up method, - * in this case, a prePut. - * If the column family is 'test', then bypass is invoked. + * in this case, a prePut. If the column family is 'test', then bypass is invoked. */ @Test public void testBypassAlsoCompletes() throws IOException { - //ensure that server time increments every time we do an operation, otherwise - //previous deletes will eclipse successive puts having the same timestamp + // ensure that server time increments every time we do an operation, otherwise + // previous deletes will eclipse successive puts having the same timestamp EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge()); Table t = util.getConnection().getTable(tableName); @@ -241,9 +238,9 @@ public void testBypassAlsoCompletes() throws IOException { puts.add(p); t.put(puts); // Ensure expected result. - checkRowAndDelete(t,row1,1); - checkRowAndDelete(t,row2,0); - checkRowAndDelete(t,row3,0); + checkRowAndDelete(t, row1, 1); + checkRowAndDelete(t, row2, 0); + checkRowAndDelete(t, row3, 0); // We have three Coprocessors stacked up on the prePut. See the beforeClass setup. We did three // puts above two of which bypassed. A bypass means do not call the other coprocessors in the // stack so for the two 'test' calls in the above, we should not have call through to all all @@ -254,7 +251,6 @@ public void testBypassAlsoCompletes() throws IOException { assertEquals("Total CP bypasses", 2, TestCoprocessor.PREPUT_BYPASSES.get()); } - public static class TestCoprocessor implements RegionCoprocessor, RegionObserver { static AtomicInteger PREPUT_INVOCATIONS = new AtomicInteger(0); static AtomicInteger PREPUT_BYPASSES = new AtomicInteger(0); @@ -265,9 +261,8 @@ public Optional getRegionObserver() { } @Override - public void prePut(final ObserverContext e, - final Put put, final WALEdit edit, final Durability durability) - throws IOException { + public void prePut(final ObserverContext e, final Put put, + final WALEdit edit, final Durability durability) throws IOException { PREPUT_INVOCATIONS.incrementAndGet(); Map> familyMap = put.getFamilyCellMap(); if (familyMap.containsKey(test)) { @@ -280,10 +275,12 @@ public void prePut(final ObserverContext e, /** * Calls through to TestCoprocessor. */ - public static class TestCoprocessor2 extends TestRegionObserverBypass.TestCoprocessor {} + public static class TestCoprocessor2 extends TestRegionObserverBypass.TestCoprocessor { + } /** * Calls through to TestCoprocessor. */ - public static class TestCoprocessor3 extends TestRegionObserverBypass.TestCoprocessor {} + public static class TestCoprocessor3 extends TestRegionObserverBypass.TestCoprocessor { + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java index 3bc6f97bb552..7a525dad8b26 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,8 +66,8 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRegionObserverForAddingMutationsFromCoprocessors.class); - private static final Logger LOG - = LoggerFactory.getLogger(TestRegionObserverForAddingMutationsFromCoprocessors.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestRegionObserverForAddingMutationsFromCoprocessors.class); private static HBaseTestingUtil util; private static final byte[] dummy = Bytes.toBytes("dummy"); @@ -100,8 +100,9 @@ public void setUp() throws Exception { private void createTable(String coprocessor) throws IOException { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(dummy)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(test)).setCoprocessor(coprocessor).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(dummy)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(test)).setCoprocessor(coprocessor) + .build(); util.getAdmin().createTable(tableDescriptor); } @@ -137,7 +138,7 @@ public void testCPMutationsAreWrittenToWALEdit() throws Exception { private static void assertRowCount(Table t, int expected) throws IOException { try (ResultScanner scanner = t.getScanner(new Scan())) { int i = 0; - for (Result r: scanner) { + for (Result r : scanner) { LOG.info(r.toString()); i++; } @@ -150,11 +151,8 @@ public void testDeleteCell() throws Exception { createTable(TestDeleteCellCoprocessor.class.getName()); try (Table t = util.getConnection().getTable(tableName)) { - t.put(Lists.newArrayList( - new Put(row1).addColumn(test, dummy, dummy), - new Put(row2).addColumn(test, dummy, dummy), - new Put(row3).addColumn(test, dummy, dummy) - )); + t.put(Lists.newArrayList(new Put(row1).addColumn(test, dummy, dummy), + new Put(row2).addColumn(test, dummy, dummy), new Put(row3).addColumn(test, dummy, dummy))); assertRowCount(t, 3); @@ -168,11 +166,8 @@ public void testDeleteFamily() throws Exception { createTable(TestDeleteFamilyCoprocessor.class.getName()); try (Table t = util.getConnection().getTable(tableName)) { - t.put(Lists.newArrayList( - new Put(row1).addColumn(test, dummy, dummy), - new Put(row2).addColumn(test, dummy, dummy), - new Put(row3).addColumn(test, dummy, dummy) - )); + t.put(Lists.newArrayList(new Put(row1).addColumn(test, dummy, dummy), + new Put(row2).addColumn(test, dummy, dummy), new Put(row3).addColumn(test, dummy, dummy))); assertRowCount(t, 3); @@ -186,11 +181,8 @@ public void testDeleteRow() throws Exception { createTable(TestDeleteRowCoprocessor.class.getName()); try (Table t = util.getConnection().getTable(tableName)) { - t.put(Lists.newArrayList( - new Put(row1).addColumn(test, dummy, dummy), - new Put(row2).addColumn(test, dummy, dummy), - new Put(row3).addColumn(test, dummy, dummy) - )); + t.put(Lists.newArrayList(new Put(row1).addColumn(test, dummy, dummy), + new Put(row2).addColumn(test, dummy, dummy), new Put(row3).addColumn(test, dummy, dummy))); assertRowCount(t, 3); @@ -223,10 +215,9 @@ public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp) throws IOException { Mutation mut = miniBatchOp.getOperation(0); List cells = mut.getFamilyCellMap().get(test); - Put[] puts = new Put[] { - new Put(Bytes.toBytes("cpPut")).addColumn(test, dummy, cells.get(0).getTimestamp(), - Bytes.toBytes("cpdummy")).setTTL(mut.getTTL()) - }; + Put[] puts = new Put[] { new Put(Bytes.toBytes("cpPut")) + .addColumn(test, dummy, cells.get(0).getTimestamp(), Bytes.toBytes("cpdummy")) + .setTTL(mut.getTTL()) }; LOG.info("Putting:" + Arrays.toString(puts)); miniBatchOp.addOperationsFromCP(0, puts); } @@ -247,8 +238,7 @@ public void preBatchMutate(ObserverContext c, new Put(row1).addColumn(test, dummy, cells.get(0).getTimestamp(), Bytes.toBytes("cpdummy")), new Put(row2).addColumn(test, dummy, cells.get(0).getTimestamp(), dummy), - new Put(row3).addColumn(test, dummy, cells.get(0).getTimestamp(), dummy), - }; + new Put(row3).addColumn(test, dummy, cells.get(0).getTimestamp(), dummy), }; LOG.info("Putting:" + Arrays.toString(puts)); miniBatchOp.addOperationsFromCP(0, puts); } @@ -270,8 +260,7 @@ public void preBatchMutate(ObserverContext c, Delete[] deletes = new Delete[] { // delete only 2 rows new Delete(row1).addColumns(test, dummy, cells.get(0).getTimestamp()), - new Delete(row2).addColumns(test, dummy, cells.get(0).getTimestamp()), - }; + new Delete(row2).addColumns(test, dummy, cells.get(0).getTimestamp()), }; LOG.info("Deleting:" + Arrays.toString(deletes)); miniBatchOp.addOperationsFromCP(0, deletes); } @@ -294,8 +283,7 @@ public void preBatchMutate(ObserverContext c, Delete[] deletes = new Delete[] { // delete only 2 rows new Delete(row1).addFamily(test, cells.get(0).getTimestamp()), - new Delete(row2).addFamily(test, cells.get(0).getTimestamp()), - }; + new Delete(row2).addFamily(test, cells.get(0).getTimestamp()), }; LOG.info("Deleting:" + Arrays.toString(deletes)); miniBatchOp.addOperationsFromCP(0, deletes); } @@ -318,8 +306,7 @@ public void preBatchMutate(ObserverContext c, Delete[] deletes = new Delete[] { // delete only 2 rows new Delete(row1, cells.get(0).getTimestamp()), - new Delete(row2, cells.get(0).getTimestamp()), - }; + new Delete(row2, cells.get(0).getTimestamp()), }; LOG.info("Deleting:" + Arrays.toString(deletes)); miniBatchOp.addOperationsFromCP(0, deletes); } @@ -336,7 +323,7 @@ public Optional getWALObserver() { @Override public void postWALWrite(ObserverContext ctx, - RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { if (info.getTable().equals(TableName.valueOf("testCPMutationsAreWrittenToWALEdit"))) { savedEdit = logEdit; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 27eee9f3c654..8b0c97635b50 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -142,7 +142,8 @@ public static void tearDownAfterClass() throws Exception { @Test public void testRegionObserver() throws IOException { - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); // recreate table every time in order to reset the status of the // coprocessor. Table table = util.createTable(tableName, new byte[][] { A, B, C }); @@ -202,7 +203,8 @@ public void testRegionObserver() throws IOException { @Test public void testRowMutation() throws IOException { - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); Table table = util.createTable(tableName, new byte[][] { A, B, C }); try { verifyMethodResult(SimpleRegionObserver.class, @@ -234,7 +236,8 @@ public void testRowMutation() throws IOException { @Test public void testIncrementHook() throws IOException { - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); Table table = util.createTable(tableName, new byte[][] { A, B, C }); try { Increment inc = new Increment(Bytes.toBytes(0)); @@ -242,14 +245,14 @@ public void testIncrementHook() throws IOException { verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock", - "hadPreBatchMutate", "hadPostBatchMutate", "hadPostBatchMutateIndispensably" }, + "hadPreBatchMutate", "hadPostBatchMutate", "hadPostBatchMutateIndispensably" }, tableName, new Boolean[] { false, false, false, false, false, false }); table.increment(inc); verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock", - "hadPreBatchMutate", "hadPostBatchMutate", "hadPostBatchMutateIndispensably" }, + "hadPreBatchMutate", "hadPostBatchMutate", "hadPostBatchMutateIndispensably" }, tableName, new Boolean[] { true, true, true, true, true, true }); } finally { util.deleteTable(tableName); @@ -259,7 +262,8 @@ public void testIncrementHook() throws IOException { @Test public void testCheckAndPutHooks() throws IOException { - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); try (Table table = util.createTable(tableName, new byte[][] { A, B, C })) { Put p = new Put(Bytes.toBytes(0)); p.addColumn(A, A, A); @@ -268,27 +272,26 @@ public void testCheckAndPutHooks() throws IOException { p.addColumn(A, A, A); verifyMethodResult(SimpleRegionObserver.class, new String[] { "getPreCheckAndPut", "getPreCheckAndPutAfterRowLock", "getPostCheckAndPut", - "getPreCheckAndPutWithFilter", "getPreCheckAndPutWithFilterAfterRowLock", - "getPostCheckAndPutWithFilter", "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndPutWithFilter", "getPreCheckAndPutWithFilterAfterRowLock", + "getPostCheckAndPutWithFilter", "getPreCheckAndMutate", + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 0, 0, 0, 0, 0, 0, 0, 0, 0 }); table.checkAndMutate(Bytes.toBytes(0), A).qualifier(A).ifEquals(A).thenPut(p); verifyMethodResult(SimpleRegionObserver.class, new String[] { "getPreCheckAndPut", "getPreCheckAndPutAfterRowLock", "getPostCheckAndPut", - "getPreCheckAndPutWithFilter", "getPreCheckAndPutWithFilterAfterRowLock", - "getPostCheckAndPutWithFilter", "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndPutWithFilter", "getPreCheckAndPutWithFilterAfterRowLock", + "getPostCheckAndPutWithFilter", "getPreCheckAndMutate", + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 1, 1, 1, 0, 0, 0, 1, 1, 1 }); table.checkAndMutate(Bytes.toBytes(0), - new SingleColumnValueFilter(A, A, CompareOperator.EQUAL, A)) - .thenPut(p); + new SingleColumnValueFilter(A, A, CompareOperator.EQUAL, A)).thenPut(p); verifyMethodResult(SimpleRegionObserver.class, new String[] { "getPreCheckAndPut", "getPreCheckAndPutAfterRowLock", "getPostCheckAndPut", - "getPreCheckAndPutWithFilter", "getPreCheckAndPutWithFilterAfterRowLock", - "getPostCheckAndPutWithFilter", "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndPutWithFilter", "getPreCheckAndPutWithFilterAfterRowLock", + "getPostCheckAndPutWithFilter", "getPreCheckAndMutate", + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 1, 1, 1, 1, 1, 1, 2, 2, 2 }); } finally { util.deleteTable(tableName); @@ -297,7 +300,8 @@ public void testCheckAndPutHooks() throws IOException { @Test public void testCheckAndDeleteHooks() throws IOException { - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); Table table = util.createTable(tableName, new byte[][] { A, B, C }); try { Put p = new Put(Bytes.toBytes(0)); @@ -305,32 +309,28 @@ public void testCheckAndDeleteHooks() throws IOException { table.put(p); Delete d = new Delete(Bytes.toBytes(0)); table.delete(d); - verifyMethodResult( - SimpleRegionObserver.class, new String[] { "getPreCheckAndDelete", - "getPreCheckAndDeleteAfterRowLock", "getPostCheckAndDelete", - "getPreCheckAndDeleteWithFilter", "getPreCheckAndDeleteWithFilterAfterRowLock", - "getPostCheckAndDeleteWithFilter", "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + verifyMethodResult(SimpleRegionObserver.class, + new String[] { "getPreCheckAndDelete", "getPreCheckAndDeleteAfterRowLock", + "getPostCheckAndDelete", "getPreCheckAndDeleteWithFilter", + "getPreCheckAndDeleteWithFilterAfterRowLock", "getPostCheckAndDeleteWithFilter", + "getPreCheckAndMutate", "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 0, 0, 0, 0, 0, 0, 0, 0, 0 }); table.checkAndMutate(Bytes.toBytes(0), A).qualifier(A).ifEquals(A).thenDelete(d); - verifyMethodResult( - SimpleRegionObserver.class, new String[] { "getPreCheckAndDelete", - "getPreCheckAndDeleteAfterRowLock", "getPostCheckAndDelete", - "getPreCheckAndDeleteWithFilter", "getPreCheckAndDeleteWithFilterAfterRowLock", - "getPostCheckAndDeleteWithFilter", "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + verifyMethodResult(SimpleRegionObserver.class, + new String[] { "getPreCheckAndDelete", "getPreCheckAndDeleteAfterRowLock", + "getPostCheckAndDelete", "getPreCheckAndDeleteWithFilter", + "getPreCheckAndDeleteWithFilterAfterRowLock", "getPostCheckAndDeleteWithFilter", + "getPreCheckAndMutate", "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 1, 1, 1, 0, 0, 0, 1, 1, 1 }); table.checkAndMutate(Bytes.toBytes(0), - new SingleColumnValueFilter(A, A, CompareOperator.EQUAL, A)) - .thenDelete(d); - verifyMethodResult( - SimpleRegionObserver.class, new String[] { "getPreCheckAndDelete", - "getPreCheckAndDeleteAfterRowLock", "getPostCheckAndDelete", - "getPreCheckAndDeleteWithFilter", "getPreCheckAndDeleteWithFilterAfterRowLock", - "getPostCheckAndDeleteWithFilter", "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + new SingleColumnValueFilter(A, A, CompareOperator.EQUAL, A)).thenDelete(d); + verifyMethodResult(SimpleRegionObserver.class, + new String[] { "getPreCheckAndDelete", "getPreCheckAndDeleteAfterRowLock", + "getPostCheckAndDelete", "getPreCheckAndDeleteWithFilter", + "getPreCheckAndDeleteWithFilterAfterRowLock", "getPostCheckAndDeleteWithFilter", + "getPreCheckAndMutate", "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 1, 1, 1, 1, 1, 1, 2, 2, 2 }); } finally { util.deleteTable(tableName); @@ -340,31 +340,29 @@ public void testCheckAndDeleteHooks() throws IOException { @Test public void testCheckAndIncrementHooks() throws Exception { - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + - name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); Table table = util.createTable(tableName, new byte[][] { A, B, C }); try { byte[] row = Bytes.toBytes(0); verifyMethodResult( SimpleRegionObserver.class, new String[] { "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 0, 0, 0 }); - table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifNotExists(A, A) - .build(new Increment(row).addColumn(A, A, 1))); + table.checkAndMutate(CheckAndMutate.newBuilder(row).ifNotExists(A, A) + .build(new Increment(row).addColumn(A, A, 1))); verifyMethodResult( SimpleRegionObserver.class, new String[] { "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 1, 1, 1 }); - table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(A, A, Bytes.toBytes(1L)) - .build(new Increment(row).addColumn(A, A, 1))); + table.checkAndMutate(CheckAndMutate.newBuilder(row).ifEquals(A, A, Bytes.toBytes(1L)) + .build(new Increment(row).addColumn(A, A, 1))); verifyMethodResult( SimpleRegionObserver.class, new String[] { "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 2, 2, 2 }); } finally { util.deleteTable(tableName); @@ -374,31 +372,29 @@ public void testCheckAndIncrementHooks() throws Exception { @Test public void testCheckAndAppendHooks() throws Exception { - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + - name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); Table table = util.createTable(tableName, new byte[][] { A, B, C }); try { byte[] row = Bytes.toBytes(0); verifyMethodResult( SimpleRegionObserver.class, new String[] { "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 0, 0, 0 }); - table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifNotExists(A, A) - .build(new Append(row).addColumn(A, A, A))); + table.checkAndMutate( + CheckAndMutate.newBuilder(row).ifNotExists(A, A).build(new Append(row).addColumn(A, A, A))); verifyMethodResult( SimpleRegionObserver.class, new String[] { "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 1, 1, 1 }); - table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(A, A, A) - .build(new Append(row).addColumn(A, A, A))); + table.checkAndMutate( + CheckAndMutate.newBuilder(row).ifEquals(A, A, A).build(new Append(row).addColumn(A, A, A))); verifyMethodResult( SimpleRegionObserver.class, new String[] { "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 2, 2, 2 }); } finally { util.deleteTable(tableName); @@ -408,8 +404,8 @@ public void testCheckAndAppendHooks() throws Exception { @Test public void testCheckAndRowMutationsHooks() throws Exception { - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + - name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); Table table = util.createTable(tableName, new byte[][] { A, B, C }); try { byte[] row = Bytes.toBytes(0); @@ -418,28 +414,26 @@ public void testCheckAndRowMutationsHooks() throws Exception { table.put(p); verifyMethodResult( SimpleRegionObserver.class, new String[] { "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 0, 0, 0 }); - table.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(A, A, A) - .build(new RowMutations(row) - .add((Mutation) new Put(row).addColumn(B, B, B)) - .add((Mutation) new Delete(row)))); + table.checkAndMutate( + CheckAndMutate.newBuilder(row).ifEquals(A, A, A).build(new RowMutations(row) + .add((Mutation) new Put(row).addColumn(B, B, B)).add((Mutation) new Delete(row)))); verifyMethodResult( SimpleRegionObserver.class, new String[] { "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 1, 1, 1 }); Object[] result = new Object[2]; - table.batch(Arrays.asList(p, CheckAndMutate.newBuilder(row) - .ifEquals(A, A, A) - .build(new RowMutations(row) - .add((Mutation) new Put(row).addColumn(B, B, B)) - .add((Mutation) new Delete(row)))), result); + table.batch( + Arrays.asList(p, + CheckAndMutate.newBuilder(row).ifEquals(A, A, A).build(new RowMutations(row) + .add((Mutation) new Put(row).addColumn(B, B, B)).add((Mutation) new Delete(row)))), + result); verifyMethodResult( SimpleRegionObserver.class, new String[] { "getPreCheckAndMutate", - "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, + "getPreCheckAndMutateAfterRowLock", "getPostCheckAndMutate" }, tableName, new Integer[] { 2, 2, 2 }); } finally { util.deleteTable(tableName); @@ -449,7 +443,8 @@ public void testCheckAndRowMutationsHooks() throws Exception { @Test public void testAppendHook() throws IOException { - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); Table table = util.createTable(tableName, new byte[][] { A, B, C }); try { Append app = new Append(Bytes.toBytes(0)); @@ -457,17 +452,15 @@ public void testAppendHook() throws IOException { verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock", - "hadPreBatchMutate", "hadPostBatchMutate", "hadPostBatchMutateIndispensably" }, - tableName, - new Boolean[] { false, false, false, false, false, false }); + "hadPreBatchMutate", "hadPostBatchMutate", "hadPostBatchMutateIndispensably" }, + tableName, new Boolean[] { false, false, false, false, false, false }); table.append(app); verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock", - "hadPreBatchMutate", "hadPostBatchMutate", "hadPostBatchMutateIndispensably" }, - tableName, - new Boolean[] { true, true, true, true, true, true }); + "hadPreBatchMutate", "hadPostBatchMutate", "hadPostBatchMutateIndispensably" }, + tableName, new Boolean[] { true, true, true, true, true, true }); } finally { util.deleteTable(tableName); table.close(); @@ -652,8 +645,8 @@ public void testCompactionOverride() throws Exception { } TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(compactTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(A)) - .setCoprocessor(EvenOnlyCompactor.class.getName()).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(A)) + .setCoprocessor(EvenOnlyCompactor.class.getName()).build(); admin.createTable(tableDescriptor); Table table = util.getConnection().getTable(compactTable); @@ -714,8 +707,10 @@ public void testCompactionOverride() throws Exception { @Test public void bulkLoadHFileTest() throws Exception { - final String testName = TestRegionObserverInterface.class.getName() + "." + name.getMethodName(); - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); + final String testName = + TestRegionObserverInterface.class.getName() + "." + name.getMethodName(); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); Configuration conf = util.getConfiguration(); Table table = util.createTable(tableName, new byte[][] { A, B, C }); try (RegionLocator locator = util.getConnection().getRegionLocator(tableName)) { @@ -744,7 +739,8 @@ public void bulkLoadHFileTest() throws Exception { @Test public void testRecovery() throws Exception { LOG.info(TestRegionObserverInterface.class.getName() + "." + name.getMethodName()); - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); Table table = util.createTable(tableName, new byte[][] { A, B, C }); try (RegionLocator locator = util.getConnection().getRegionLocator(tableName)) { @@ -826,10 +822,10 @@ public void testPreWALRestoreSkip() throws Exception { table.close(); } - //called from testPreWALAppendIsWrittenToWAL + // called from testPreWALAppendIsWrittenToWAL private void testPreWALAppendHook(Table table, TableName tableName) throws IOException { int expectedCalls = 0; - String [] methodArray = new String[1]; + String[] methodArray = new String[1]; methodArray[0] = "getCtPreWALAppend"; Object[] resultArray = new Object[1]; @@ -861,36 +857,36 @@ private void testPreWALAppendHook(Table table, TableName tableName) throws IOExc public void testPreWALAppend() throws Exception { SimpleRegionObserver sro = new SimpleRegionObserver(); ObserverContext ctx = Mockito.mock(ObserverContext.class); - WALKey key = new WALKeyImpl(Bytes.toBytes("region"), TEST_TABLE, - EnvironmentEdgeManager.currentTime()); + WALKey key = + new WALKeyImpl(Bytes.toBytes("region"), TEST_TABLE, EnvironmentEdgeManager.currentTime()); WALEdit edit = new WALEdit(); sro.preWALAppend(ctx, key, edit); Assert.assertEquals(1, key.getExtendedAttributes().size()); Assert.assertArrayEquals(SimpleRegionObserver.WAL_EXTENDED_ATTRIBUTE_BYTES, - key.getExtendedAttribute(Integer.toString(sro.getCtPreWALAppend()))); + key.getExtendedAttribute(Integer.toString(sro.getCtPreWALAppend()))); } @Test public void testPreWALAppendIsWrittenToWAL() throws Exception { - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + - "." + name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); Table table = util.createTable(tableName, new byte[][] { A, B, C }); PreWALAppendWALActionsListener listener = new PreWALAppendWALActionsListener(); List regions = util.getHBaseCluster().getRegions(tableName); - //should be only one region + // should be only one region HRegion region = regions.get(0); region.getWAL().registerWALActionsListener(listener); testPreWALAppendHook(table, tableName); - boolean[] expectedResults = {true, true, true, true}; + boolean[] expectedResults = { true, true, true, true }; Assert.assertArrayEquals(expectedResults, listener.getWalKeysCorrectArray()); } @Test public void testPreWALAppendNotCalledOnMetaEdit() throws Exception { - final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + - "." + name.getMethodName()); + final TableName tableName = + TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY); tdBuilder.setColumnFamily(cfBuilder.build()); @@ -900,20 +896,20 @@ public void testPreWALAppendNotCalledOnMetaEdit() throws Exception { PreWALAppendWALActionsListener listener = new PreWALAppendWALActionsListener(); List regions = util.getHBaseCluster().getRegions(tableName); - //should be only one region + // should be only one region HRegion region = regions.get(0); region.getWAL().registerWALActionsListener(listener); - //flushing should write to the WAL + // flushing should write to the WAL region.flush(true); - //so should compaction + // so should compaction region.compact(false); - //and so should closing the region + // and so should closing the region region.close(); - //but we still shouldn't have triggered preWALAppend because no user data was written - String[] methods = new String[] {"getCtPreWALAppend"}; - Object[] expectedResult = new Integer[]{0}; + // but we still shouldn't have triggered preWALAppend because no user data was written + String[] methods = new String[] { "getCtPreWALAppend" }; + Object[] expectedResult = new Integer[] { 0 }; verifyMethodResult(SimpleRegionObserver.class, methods, tableName, expectedResult); } @@ -925,8 +921,7 @@ private void verifyMethodResult(Class coprocessor, String methodName[], Table if (!t.isAlive() || t.getRegionServer().isAborted() || t.getRegionServer().isStopping()) { continue; } - for (RegionInfo r : ProtobufUtil - .getOnlineRegions(t.getRegionServer().getRSRpcServices())) { + for (RegionInfo r : ProtobufUtil.getOnlineRegions(t.getRegionServer().getRSRpcServices())) { if (!r.getTable().equals(tableName)) { continue; } @@ -939,8 +934,8 @@ private void verifyMethodResult(Class coprocessor, String methodName[], Table Method m = coprocessor.getMethod(methodName[i]); Object o = m.invoke(cp); assertTrue("Result of " + coprocessor.getName() + "." + methodName[i] - + " is expected to be " + value[i].toString() + ", while we get " - + o.toString(), o.equals(value[i])); + + " is expected to be " + value[i].toString() + ", while we get " + o.toString(), + o.equals(value[i])); } } } @@ -967,15 +962,15 @@ private static void createHFile(Configuration conf, FileSystem fs, Path path, by } private static class PreWALAppendWALActionsListener implements WALActionsListener { - boolean[] walKeysCorrect = {false, false, false, false}; + boolean[] walKeysCorrect = { false, false, false, false }; @Override - public void postAppend(long entryLen, long elapsedTimeMillis, - WALKey logKey, WALEdit logEdit) throws IOException { + public void postAppend(long entryLen, long elapsedTimeMillis, WALKey logKey, WALEdit logEdit) + throws IOException { for (int k = 0; k < 4; k++) { if (!walKeysCorrect[k]) { walKeysCorrect[k] = Arrays.equals(SimpleRegionObserver.WAL_EXTENDED_ATTRIBUTE_BYTES, - logKey.getExtendedAttribute(Integer.toString(k + 1))); + logKey.getExtendedAttribute(Integer.toString(k + 1))); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java index 734d4e0f913e..c9c239697310 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverPreFlushAndPreCompact.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,18 +49,19 @@ /** * Test that we fail if a Coprocessor tries to return a null scanner out * {@link RegionObserver#preFlush(ObserverContext, Store, InternalScanner, FlushLifeCycleTracker)} - * or {@link RegionObserver#preCompact(ObserverContext, Store, InternalScanner, ScanType, - * CompactionLifeCycleTracker, CompactionRequest)} + * or + * {@link RegionObserver#preCompact(ObserverContext, Store, InternalScanner, ScanType, CompactionLifeCycleTracker, CompactionRequest)} * @see HBASE-19122 */ -@Category({CoprocessorTests.class, SmallTests.class}) +@Category({ CoprocessorTests.class, SmallTests.class }) public class TestRegionObserverPreFlushAndPreCompact { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRegionObserverPreFlushAndPreCompact.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); /** * Coprocessor that returns null when preCompact or preFlush is called. @@ -89,7 +90,7 @@ public Optional getRegionObserver() { * Ensure we get expected exception when we try to return null from a preFlush call. * @throws IOException We expect it to throw {@link CoprocessorException} */ - @Test (expected = CoprocessorException.class) + @Test(expected = CoprocessorException.class) public void testPreFlushReturningNull() throws IOException { RegionCoprocessorHost rch = getRegionCoprocessorHost(); rch.preFlush(null, null, null); @@ -99,7 +100,7 @@ public void testPreFlushReturningNull() throws IOException { * Ensure we get expected exception when we try to return null from a preCompact call. * @throws IOException We expect it to throw {@link CoprocessorException} */ - @Test (expected = CoprocessorException.class) + @Test(expected = CoprocessorException.class) public void testPreCompactReturningNull() throws IOException { RegionCoprocessorHost rch = getRegionCoprocessorHost(); rch.preCompact(null, null, null, null, null, null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java index a9d6576e8ff5..18e01057c3c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java @@ -76,7 +76,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestRegionObserverScannerOpenHook { @ClassRule @@ -147,8 +147,10 @@ public boolean next(List result, ScannerContext scannerContext) throws IOE } @Override - public void close() throws IOException {} + public void close() throws IOException { + } }; + /** * Don't allow any data in a flush by creating a custom {@link StoreScanner}. */ @@ -186,14 +188,13 @@ public InternalScanner preCompact(ObserverContext HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf, byte[]... families) throws IOException { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); for (byte[] family : families) { - builder.setColumnFamily( - ColumnFamilyDescriptorBuilder.of(family)); + builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); } TableDescriptor tableDescriptor = builder.build(); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); Path path = new Path(DIR + callingMethod); WAL wal = HBaseTestingUtil.createWal(conf, path, info); @@ -229,7 +230,8 @@ public void testRegionObserverScanTimeStacking() throws Exception { Result r = region.get(get); assertNull( "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " - + r, r.listCells()); + + r, + r.listCells()); HBaseTestingUtil.closeRegionAndWAL(region); } @@ -256,7 +258,8 @@ public void testRegionObserverFlushTimeStacking() throws Exception { Result r = region.get(get); assertNull( "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " - + r, r.listCells()); + + r, + r.listCells()); HBaseTestingUtil.closeRegionAndWAL(region); } @@ -267,9 +270,9 @@ public static class CompactionCompletionNotifyingRegion extends HRegion { private static volatile CountDownLatch compactionStateChangeLatch = null; @SuppressWarnings("deprecation") - public CompactionCompletionNotifyingRegion(Path tableDir, WAL log, - FileSystem fs, Configuration confParam, RegionInfo info, - TableDescriptor htd, RegionServerServices rsServices) { + public CompactionCompletionNotifyingRegion(Path tableDir, WAL log, FileSystem fs, + Configuration confParam, RegionInfo info, TableDescriptor htd, + RegionServerServices rsServices) { super(tableDir, log, fs, confParam, info, htd, rsServices); } @@ -309,15 +312,16 @@ public void testRegionObserverCompactionTimeStacking() throws Exception { UTIL.startMiniCluster(); byte[] ROW = Bytes.toBytes("testRow"); byte[] A = Bytes.toBytes("A"); - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + TableDescriptor tableDescriptor = TableDescriptorBuilder + .newBuilder(TableName.valueOf(name.getMethodName())) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(A)) - .setCoprocessor(CoprocessorDescriptorBuilder - .newBuilder(EmptyRegionObsever.class.getName()).setJarPath(null) - .setPriority(Coprocessor.PRIORITY_USER).setProperties(Collections.emptyMap()).build()) - .setCoprocessor(CoprocessorDescriptorBuilder - .newBuilder(NoDataFromCompaction.class.getName()).setJarPath(null) - .setPriority(Coprocessor.PRIORITY_HIGHEST).setProperties(Collections.emptyMap()).build()) + .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(EmptyRegionObsever.class.getName()) + .setJarPath(null).setPriority(Coprocessor.PRIORITY_USER) + .setProperties(Collections.emptyMap()).build()) + .setCoprocessor( + CoprocessorDescriptorBuilder.newBuilder(NoDataFromCompaction.class.getName()) + .setJarPath(null).setPriority(Coprocessor.PRIORITY_HIGHEST) + .setProperties(Collections.emptyMap()).build()) .build(); Admin admin = UTIL.getAdmin(); @@ -335,8 +339,8 @@ public void testRegionObserverCompactionTimeStacking() throws Exception { assertEquals("More than 1 region serving test table with 1 row", 1, regions.size()); Region region = regions.get(0); admin.flushRegion(region.getRegionInfo().getRegionName()); - CountDownLatch latch = ((CompactionCompletionNotifyingRegion)region) - .getCompactionStateChangeLatch(); + CountDownLatch latch = + ((CompactionCompletionNotifyingRegion) region).getCompactionStateChangeLatch(); // put another row and flush that too put = new Put(Bytes.toBytes("anotherrow")); @@ -352,13 +356,15 @@ public void testRegionObserverCompactionTimeStacking() throws Exception { Result r = table.get(get); assertNull( "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " - + r, r.listCells()); + + r, + r.listCells()); get = new Get(Bytes.toBytes("anotherrow")); r = table.get(get); assertNull( "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: " - + r, r.listCells()); + + r, + r.listCells()); table.close(); UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java index 0bbd802bfa9e..12797a226e4c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java @@ -50,15 +50,14 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({CoprocessorTests.class, SmallTests.class}) +@Category({ CoprocessorTests.class, SmallTests.class }) public class TestRegionObserverStacking { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRegionObserverStacking.class); - private static HBaseTestingUtil TEST_UTIL - = new HBaseTestingUtil(); + private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); static final Path DIR = TEST_UTIL.getDataTestDir(); public static class ObserverA implements RegionCoprocessor, RegionObserver { @@ -70,10 +69,8 @@ public Optional getRegionObserver() { } @Override - public void postPut(final ObserverContext c, - final Put put, final WALEdit edit, - final Durability durability) - throws IOException { + public void postPut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) throws IOException { id = EnvironmentEdgeManager.currentTime(); Threads.sleepWithoutInterrupt(10); } @@ -88,10 +85,8 @@ public Optional getRegionObserver() { } @Override - public void postPut(final ObserverContext c, - final Put put, final WALEdit edit, - final Durability durability) - throws IOException { + public void postPut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) throws IOException { id = EnvironmentEdgeManager.currentTime(); Threads.sleepWithoutInterrupt(10); } @@ -106,25 +101,23 @@ public Optional getRegionObserver() { } @Override - public void postPut(final ObserverContext c, - final Put put, final WALEdit edit, - final Durability durability) - throws IOException { + public void postPut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) throws IOException { id = EnvironmentEdgeManager.currentTime(); Threads.sleepWithoutInterrupt(10); } } HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf, - byte[]... families) throws IOException { + byte[]... families) throws IOException { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); for (byte[] family : families) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); } TableDescriptor tableDescriptor = builder.build(); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); Path path = new Path(DIR + callingMethod); HRegion r = HBaseTestingUtil.createRegionAndWAL(info, path, conf, tableDescriptor); @@ -132,8 +125,8 @@ HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf, // is secretly loaded at OpenRegionHandler. we don't really // start a region server here, so just manually create cphost // and set it to region. - RegionCoprocessorHost host = new RegionCoprocessorHost(r, - Mockito.mock(RegionServerServices.class), conf); + RegionCoprocessorHost host = + new RegionCoprocessorHost(r, Mockito.mock(RegionServerServices.class), conf); r.setCoprocessorHost(host); return r; } @@ -143,11 +136,10 @@ public void testRegionObserverStacking() throws Exception { byte[] ROW = Bytes.toBytes("testRow"); byte[] TABLE = Bytes.toBytes(this.getClass().getSimpleName()); byte[] A = Bytes.toBytes("A"); - byte[][] FAMILIES = new byte[][] { A } ; + byte[][] FAMILIES = new byte[][] { A }; Configuration conf = TEST_UTIL.getConfiguration(); - HRegion region = initHRegion(TABLE, getClass().getName(), - conf, FAMILIES); + HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES); RegionCoprocessorHost h = region.getCoprocessorHost(); h.load(ObserverA.class, Coprocessor.PRIORITY_HIGHEST, conf); h.load(ObserverB.class, Coprocessor.PRIORITY_USER, conf); @@ -158,11 +150,11 @@ public void testRegionObserverStacking() throws Exception { region.put(put); Coprocessor c = h.findCoprocessor(ObserverA.class.getName()); - long idA = ((ObserverA)c).id; + long idA = ((ObserverA) c).id; c = h.findCoprocessor(ObserverB.class.getName()); - long idB = ((ObserverB)c).id; + long idB = ((ObserverB) c).id; c = h.findCoprocessor(ObserverC.class.getName()); - long idC = ((ObserverC)c).id; + long idC = ((ObserverC) c).id; assertTrue(idA < idB); assertTrue(idB < idC); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java index 292c4d31be25..095099064c35 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,27 +44,26 @@ import org.slf4j.LoggerFactory; /** - * Tests unhandled exceptions thrown by coprocessors running on a regionserver.. - * Expected result is that the regionserver will abort with an informative - * error message describing the set of its loaded coprocessors for crash - * diagnosis. (HBASE-4014). + * Tests unhandled exceptions thrown by coprocessors running on a regionserver.. Expected result is + * that the regionserver will abort with an informative error message describing the set of its + * loaded coprocessors for crash diagnosis. (HBASE-4014). */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestRegionServerCoprocessorExceptionWithAbort { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRegionServerCoprocessorExceptionWithAbort.class); - private static final Logger LOG = LoggerFactory.getLogger( - TestRegionServerCoprocessorExceptionWithAbort.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestRegionServerCoprocessorExceptionWithAbort.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLE_NAME = TableName.valueOf("observed_table"); @Test public void testExceptionDuringInitialization() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); - conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast. + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast. conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, ""); TEST_UTIL.startMiniCluster(2); @@ -92,7 +91,7 @@ public boolean evaluate() throws Exception { public void testExceptionFromCoprocessorDuringPut() throws Exception { // set configure to indicate which cp should be loaded Configuration conf = TEST_UTIL.getConfiguration(); - conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast. + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast. conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, BuggyRegionObserver.class.getName()); conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true); TEST_UTIL.startMiniCluster(2); @@ -130,8 +129,7 @@ public void testExceptionFromCoprocessorDuringPut() throws Exception { try { Thread.sleep(1000); } catch (InterruptedException e) { - fail("InterruptedException while waiting for regionserver " + - "zk node to be deleted."); + fail("InterruptedException while waiting for regionserver " + "zk node to be deleted."); } } Assert.assertTrue("The region server should have aborted", aborted); @@ -154,9 +152,8 @@ public void start(CoprocessorEnvironment e) throws IOException { public static class BuggyRegionObserver extends SimpleRegionObserver { @SuppressWarnings("null") @Override - public void prePut(final ObserverContext c, - final Put put, final WALEdit edit, - final Durability durability) { + public void prePut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) { String tableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString(); if (tableName.equals("observed_table")) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java index 8c3e4ab8aaf7..6c2326bf3cc9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,13 +41,11 @@ import org.junit.experimental.categories.Category; /** - * Tests unhandled exceptions thrown by coprocessors running on regionserver. - * Expected result is that the region server will remove the buggy coprocessor from - * its set of coprocessors and throw a org.apache.hadoop.hbase.exceptions.DoNotRetryIOException - * back to the client. - * (HBASE-4014). + * Tests unhandled exceptions thrown by coprocessors running on regionserver. Expected result is + * that the region server will remove the buggy coprocessor from its set of coprocessors and throw a + * org.apache.hadoop.hbase.exceptions.DoNotRetryIOException back to the client. (HBASE-4014). */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestRegionServerCoprocessorExceptionWithRemove { @ClassRule @@ -57,9 +55,8 @@ public class TestRegionServerCoprocessorExceptionWithRemove { public static class BuggyRegionObserver extends SimpleRegionObserver { @SuppressWarnings("null") @Override - public void prePut(final ObserverContext c, - final Put put, final WALEdit edit, - final Durability durability) { + public void prePut(final ObserverContext c, final Put put, + final WALEdit edit, final Durability durability) { String tableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString(); if (tableName.equals("observed_table")) { @@ -76,8 +73,7 @@ public void prePut(final ObserverContext c, public static void setupBeforeClass() throws Exception { // set configure to indicate which cp should be loaded Configuration conf = TEST_UTIL.getConfiguration(); - conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - BuggyRegionObserver.class.getName()); + conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, BuggyRegionObserver.class.getName()); TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false); TEST_UTIL.startMiniCluster(); } @@ -88,8 +84,7 @@ public static void teardownAfterClass() throws Exception { } @Test - public void testExceptionFromCoprocessorDuringPut() - throws IOException, InterruptedException { + public void testExceptionFromCoprocessorDuringPut() throws IOException, InterruptedException { // Set watches on the zookeeper nodes for all of the regionservers in the // cluster. When we try to write to TEST_TABLE, the buggy coprocessor will // cause a NullPointerException, which will cause the regionserver (which @@ -105,8 +100,7 @@ public void testExceptionFromCoprocessorDuringPut() TEST_UTIL.waitUntilAllRegionsAssigned(TEST_TABLE); // Note which regionServer that should survive the buggy coprocessor's // prePut(). - HRegionServer regionServer = - TEST_UTIL.getRSForFirstRegionInTable(TEST_TABLE); + HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(TEST_TABLE); boolean threwIOE = false; try { @@ -129,12 +123,10 @@ public void testExceptionFromCoprocessorDuringPut() try { Thread.sleep(1000); } catch (InterruptedException e) { - fail("InterruptedException while waiting for regionserver " + - "zk node to be deleted."); + fail("InterruptedException while waiting for regionserver " + "zk node to be deleted."); } } table.close(); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index d1a7937d66b9..9f049ecfaab3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,11 +77,10 @@ import org.slf4j.LoggerFactory; /** - * Tests invocation of the - * {@link org.apache.hadoop.hbase.coprocessor.MasterObserver} interface hooks at - * all appropriate times during normal HMaster operations. + * Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.MasterObserver} interface + * hooks at all appropriate times during normal HMaster operations. */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestWALObserver { @ClassRule @@ -92,12 +91,12 @@ public class TestWALObserver { private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static byte[] TEST_TABLE = Bytes.toBytes("observedTable"); - private static byte[][] TEST_FAMILY = { Bytes.toBytes("fam1"), - Bytes.toBytes("fam2"), Bytes.toBytes("fam3"), }; - private static byte[][] TEST_QUALIFIER = { Bytes.toBytes("q1"), - Bytes.toBytes("q2"), Bytes.toBytes("q3"), }; - private static byte[][] TEST_VALUE = { Bytes.toBytes("v1"), - Bytes.toBytes("v2"), Bytes.toBytes("v3"), }; + private static byte[][] TEST_FAMILY = + { Bytes.toBytes("fam1"), Bytes.toBytes("fam2"), Bytes.toBytes("fam3"), }; + private static byte[][] TEST_QUALIFIER = + { Bytes.toBytes("q1"), Bytes.toBytes("q2"), Bytes.toBytes("q3"), }; + private static byte[][] TEST_VALUE = + { Bytes.toBytes("v1"), Bytes.toBytes("v2"), Bytes.toBytes("v3"), }; private static byte[] TEST_ROW = Bytes.toBytes("testRow"); @Rule @@ -115,16 +114,15 @@ public class TestWALObserver { public static void setupBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, - SampleRegionWALCoprocessor.class.getName()); + SampleRegionWALCoprocessor.class.getName()); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - SampleRegionWALCoprocessor.class.getName()); + SampleRegionWALCoprocessor.class.getName()); conf.setInt("dfs.client.block.recovery.retries", 2); TEST_UTIL.startMiniCluster(1); - Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem() - .makeQualified(new Path("/hbase")); - Path hbaseWALRootDir = TEST_UTIL.getDFSCluster().getFileSystem() - .makeQualified(new Path("/hbaseLogRoot")); + Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase")); + Path hbaseWALRootDir = + TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbaseLogRoot")); LOG.info("hbase.rootdir=" + hbaseRootDir); CommonFSUtils.setRootDir(conf, hbaseRootDir); CommonFSUtils.setWALRootDir(conf, hbaseWALRootDir); @@ -143,10 +141,11 @@ public void setUp() throws Exception { this.hbaseRootDir = CommonFSUtils.getRootDir(conf); this.hbaseWALRootDir = CommonFSUtils.getWALRootDir(conf); this.oldLogDir = new Path(this.hbaseWALRootDir, HConstants.HREGION_OLDLOGDIR_NAME); - String serverName = ServerName.valueOf(currentTest.getMethodName(), 16010, - EnvironmentEdgeManager.currentTime()).toString(); - this.logDir = new Path(this.hbaseWALRootDir, - AbstractFSWALProvider.getWALDirectoryName(serverName)); + String serverName = + ServerName.valueOf(currentTest.getMethodName(), 16010, EnvironmentEdgeManager.currentTime()) + .toString(); + this.logDir = + new Path(this.hbaseWALRootDir, AbstractFSWALProvider.getWALDirectoryName(serverName)); if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) { TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); @@ -171,9 +170,8 @@ public void tearDown() throws Exception { } /** - * Test WAL write behavior with WALObserver. The coprocessor monitors a - * WALEdit written to WAL, and ignore, modify, and add KeyValue's for the - * WALEdit. + * Test WAL write behavior with WALObserver. The coprocessor monitors a WALEdit written to WAL, + * and ignore, modify, and add KeyValue's for the WALEdit. */ @Test public void testWALObserverWriteToWAL() throws Exception { @@ -184,8 +182,7 @@ public void testWALObserverWriteToWAL() throws Exception { private void verifyWritesSeen(final WAL log, final SampleRegionWALCoprocessor cp, final boolean seesLegacy) throws Exception { RegionInfo hri = createBasicHRegionInfo(Bytes.toString(TEST_TABLE)); - TableDescriptor htd = createBasic3FamilyHTD(Bytes - .toString(TEST_TABLE)); + TableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE)); NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] fam : htd.getColumnFamilyNames()) { scopes.put(fam, 0); @@ -197,8 +194,8 @@ private void verifyWritesSeen(final WAL log, final SampleRegionWALCoprocessor cp // TEST_FAMILY[0] shall be removed from WALEdit. // TEST_FAMILY[1] value shall be changed. // TEST_FAMILY[2] shall be added to WALEdit, although it's not in the put. - cp.setTestValues(TEST_TABLE, TEST_ROW, TEST_FAMILY[0], TEST_QUALIFIER[0], - TEST_FAMILY[1], TEST_QUALIFIER[1], TEST_FAMILY[2], TEST_QUALIFIER[2]); + cp.setTestValues(TEST_TABLE, TEST_ROW, TEST_FAMILY[0], TEST_QUALIFIER[0], TEST_FAMILY[1], + TEST_QUALIFIER[1], TEST_FAMILY[2], TEST_QUALIFIER[2]); assertFalse(cp.isPreWALWriteCalled()); assertFalse(cp.isPostWALWriteCalled()); @@ -239,7 +236,8 @@ private void verifyWritesSeen(final WAL log, final SampleRegionWALCoprocessor cp long now = EnvironmentEdgeManager.currentTime(); // we use HLogKey here instead of WALKeyImpl directly to support legacy coprocessors. long txid = log.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), hri.getTable(), now, - new MultiVersionConcurrencyControl(), scopes), edit); + new MultiVersionConcurrencyControl(), scopes), + edit); log.sync(txid); // the edit shall have been change now by the coprocessor. @@ -276,7 +274,7 @@ public void testEmptyWALEditAreNotSeen() throws Exception { TableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE)); MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for(byte[] fam : htd.getColumnFamilyNames()) { + for (byte[] fam : htd.getColumnFamilyNames()) { scopes.put(fam, 0); } WAL log = wals.getWAL(null); @@ -342,24 +340,22 @@ public void testWALCoprocessorReplay() throws Exception { // sync to fs. wal.sync(); - User user = HBaseTestingUtil.getDifferentUser(newConf, - ".replay.wal.secondtime"); + User user = HBaseTestingUtil.getDifferentUser(newConf, ".replay.wal.secondtime"); user.runAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { Path p = runWALSplit(newConf); LOG.info("WALSplit path == " + p); // Make a new wal for new region open. - final WALFactory wals2 = new WALFactory(conf, - ServerName.valueOf(currentTest.getMethodName() + "2", 16010, - EnvironmentEdgeManager.currentTime()) - .toString()); + final WALFactory wals2 = new WALFactory(conf, ServerName + .valueOf(currentTest.getMethodName() + "2", 16010, EnvironmentEdgeManager.currentTime()) + .toString()); WAL wal2 = wals2.getWAL(null); - HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, - hri, htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null); + HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, + htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null); SampleRegionWALCoprocessor cp2 = - region.getCoprocessorHost().findCoprocessor(SampleRegionWALCoprocessor.class); + region.getCoprocessorHost().findCoprocessor(SampleRegionWALCoprocessor.class); // TODO: asserting here is problematic. assertNotNull(cp2); assertTrue(cp2.isPreWALRestoreCalled()); @@ -372,9 +368,8 @@ public Void run() throws Exception { } /** - * Test to see CP loaded successfully or not. There is a duplication at - * TestHLog, but the purpose of that one is to see whether the loaded CP will - * impact existing WAL tests or not. + * Test to see CP loaded successfully or not. There is a duplication at TestHLog, but the purpose + * of that one is to see whether the loaded CP will impact existing WAL tests or not. */ @Test public void testWALObserverLoaded() throws Exception { @@ -431,8 +426,8 @@ private Put creatPutWith2Families(byte[] row) throws IOException { } private Path runWALSplit(final Configuration c) throws IOException { - List splits = WALSplitter.split( - hbaseRootDir, logDir, oldLogDir, FileSystem.get(c), c, wals); + List splits = + WALSplitter.split(hbaseRootDir, logDir, oldLogDir, FileSystem.get(c), c, wals); // Split should generate only 1 file since there's only 1 region assertEquals(1, splits.size()); // Make sure the file exists diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionDispatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionDispatcher.java index 8861a69e45d1..8f65692939fa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionDispatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionDispatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,10 +31,10 @@ import org.slf4j.LoggerFactory; /** - * Test that we propagate errors through an dispatcher exactly once via different failure - * injection mechanisms. + * Test that we propagate errors through an dispatcher exactly once via different failure injection + * mechanisms. */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestForeignExceptionDispatcher { @ClassRule @@ -46,8 +46,10 @@ public class TestForeignExceptionDispatcher { /** * Exception thrown from the test */ - final ForeignException EXTEXN = new ForeignException("FORTEST", new IllegalArgumentException("FORTEST")); - final ForeignException EXTEXN2 = new ForeignException("FORTEST2", new IllegalArgumentException("FORTEST2")); + final ForeignException EXTEXN = + new ForeignException("FORTEST", new IllegalArgumentException("FORTEST")); + final ForeignException EXTEXN2 = + new ForeignException("FORTEST2", new IllegalArgumentException("FORTEST2")); /** * Tests that a dispatcher only dispatches only the first exception, and does not propagate diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionSerialization.java index 6b6ef0c430d4..7001209f14bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.io.IOException; import java.util.Objects; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -34,7 +33,7 @@ /** * Test that we correctly serialize exceptions from a remote source */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestForeignExceptionSerialization { @ClassRule @@ -80,10 +79,10 @@ public void testRemoteFromLocal() throws IOException { // Workaround for java 11 - replaced assertArrayEquals with individual elements comparison // using custom comparison helper method assertEquals("Stacktrace lengths don't match", generic.getStackTrace().length, - e.getCause().getStackTrace().length); + e.getCause().getStackTrace().length); for (int i = 0; i < generic.getStackTrace().length; i++) { assertTrue("Local stack trace got corrupted at " + i + "th index", - compareStackTraceElement(generic.getStackTrace()[i], e.getCause().getStackTrace()[i])); + compareStackTraceElement(generic.getStackTrace()[i], e.getCause().getStackTrace()[i])); } e.printStackTrace(); // should have ForeignException and source node in it. @@ -95,8 +94,9 @@ public void testRemoteFromLocal() throws IOException { // Helper method to compare two stackTraceElements private boolean compareStackTraceElement(StackTraceElement obj1, StackTraceElement obj2) { - return obj1.getClassName().equals(obj2.getClassName()) && obj1.getLineNumber() == obj2 - .getLineNumber() && Objects.equals(obj1.getMethodName(), obj2.getMethodName()) && Objects - .equals(obj1.getFileName(), obj2.getFileName()); + return obj1.getClassName().equals(obj2.getClassName()) + && obj1.getLineNumber() == obj2.getLineNumber() + && Objects.equals(obj1.getMethodName(), obj2.getMethodName()) + && Objects.equals(obj1.getFileName(), obj2.getFileName()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestTimeoutExceptionInjector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestTimeoutExceptionInjector.java index 9d5537244c1e..cdea88c87745 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestTimeoutExceptionInjector.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/errorhandling/TestTimeoutExceptionInjector.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ /** * Test the {@link TimeoutExceptionInjector} to ensure we fulfill contracts */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestTimeoutExceptionInjector { @ClassRule @@ -88,8 +88,8 @@ public void testStartAfterComplete() throws InterruptedException { } /** - * Demonstrate TimeoutExceptionInjector semantics -- triggering fires exception and completes - * the timer. + * Demonstrate TimeoutExceptionInjector semantics -- triggering fires exception and completes the + * timer. */ @Test public void testStartAfterTrigger() throws InterruptedException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java index 5df089b597ec..9cbde1f53199 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestExecutorService { @ClassRule @@ -71,11 +71,10 @@ public void testExecutorService() throws Exception { // Start an executor service pool with max 5 threads ExecutorService executorService = new ExecutorService("unit_test"); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_SERVER_OPERATIONS).setCorePoolSize(maxThreads)); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS).setCorePoolSize(maxThreads)); - Executor executor = - executorService.getExecutor(ExecutorType.MASTER_SERVER_OPERATIONS); + Executor executor = executorService.getExecutor(ExecutorType.MASTER_SERVER_OPERATIONS); ThreadPoolExecutor pool = executor.threadPoolExecutor; // Assert no threads yet @@ -86,9 +85,8 @@ public void testExecutorService() throws Exception { // Submit maxThreads executors. for (int i = 0; i < maxThreads; i++) { - executorService.submit( - new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, - lock, counter)); + executorService + .submit(new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter)); } // The TestEventHandler will increment counter when it starts. @@ -108,7 +106,6 @@ public void testExecutorService() throws Exception { assertEquals(5, status.running.size()); checkStatusDump(status); - // Now interrupt the running Executor synchronized (lock) { lock.set(false); @@ -128,9 +125,8 @@ public void testExecutorService() throws Exception { // Add more than the number of threads items. // Make sure we don't get RejectedExecutionException. for (int i = 0; i < (2 * maxThreads); i++) { - executorService.submit( - new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, - lock, counter)); + executorService + .submit(new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter)); } // Now interrupt the running Executor synchronized (lock) { @@ -147,9 +143,8 @@ public void testExecutorService() throws Exception { assertEquals(0, executorService.getAllExecutorStatuses().size()); // Test that submit doesn't throw NPEs - executorService.submit( - new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, - lock, counter)); + executorService + .submit(new TestEventHandler(mockedServer, EventType.M_SERVER_SHUTDOWN, lock, counter)); } private void checkStatusDump(ExecutorStatus status) throws IOException { @@ -166,7 +161,7 @@ public static class TestEventHandler extends EventHandler { private AtomicInteger counter; public TestEventHandler(Server server, EventType eventType, AtomicBoolean lock, - AtomicInteger counter) { + AtomicInteger counter) { super(server, eventType); this.lock = lock; this.counter = counter; @@ -175,8 +170,7 @@ public TestEventHandler(Server server, EventType eventType, AtomicBoolean lock, @Override public void process() throws IOException { int num = counter.incrementAndGet(); - LOG.info("Running process #" + num + ", threadName=" + - Thread.currentThread().getName()); + LOG.info("Running process #" + num + ", threadName=" + Thread.currentThread().getName()); synchronized (lock) { while (lock.get()) { try { @@ -197,9 +191,8 @@ public void testAborting() throws Exception { when(server.getConfiguration()).thenReturn(conf); ExecutorService executorService = new ExecutorService("unit_test"); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_SERVER_OPERATIONS).setCorePoolSize(1)); - + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS).setCorePoolSize(1)); executorService.submit(new EventHandler(server, EventType.M_SERVER_SHUTDOWN) { @Override @@ -230,8 +223,8 @@ public void testSnapshotHandlers() throws Exception { when(server.getConfiguration()).thenReturn(conf); ExecutorService executorService = new ExecutorService("testSnapshotHandlers"); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.MASTER_SNAPSHOT_OPERATIONS).setCorePoolSize(1)); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.MASTER_SNAPSHOT_OPERATIONS).setCorePoolSize(1)); CountDownLatch latch = new CountDownLatch(1); CountDownLatch waitForEventToStart = new CountDownLatch(1); @@ -247,7 +240,7 @@ public void process() throws IOException { } }); - //Wait EventHandler to start + // Wait EventHandler to start waitForEventToStart.await(10, TimeUnit.SECONDS); int activeCount = executorService.getExecutor(ExecutorType.MASTER_SNAPSHOT_OPERATIONS) .getThreadPoolExecutor().getActiveCount(); @@ -260,4 +253,3 @@ public void process() throws IOException { }); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterAllFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterAllFilter.java index 40628db3e3c8..a5650adad914 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterAllFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterAllFilter.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.filter; import java.io.IOException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -54,10 +53,8 @@ public static FilterAllFilter parseFrom(final byte[] pbBytes) throws Deserializa @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) - return true; - if (!(o instanceof FilterAllFilter)) - return false; + if (o == this) return true; + if (!(o instanceof FilterAllFilter)) return false; return true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java index c9bd4e4845fa..3fced4bacb2c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,10 +44,10 @@ import org.junit.experimental.categories.Category; /** - * By using this class as the super class of a set of tests you will have a HBase testing - * cluster available that is very suitable for writing tests for scanning and filtering against. + * By using this class as the super class of a set of tests you will have a HBase testing cluster + * available that is very suitable for writing tests for scanning and filtering against. */ -@Category({FilterTests.class, MediumTests.class}) +@Category({ FilterTests.class, MediumTests.class }) public class FilterTestingCluster { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Admin admin = null; @@ -58,7 +56,7 @@ public class FilterTestingCluster { protected static void createTable(TableName tableName, String columnFamilyName) { assertNotNull("HBaseAdmin is not initialized successfully.", admin); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes(columnFamilyName))).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes(columnFamilyName))).build(); try { admin.createTable(tableDescriptor); @@ -77,7 +75,7 @@ protected static Table openTable(TableName tableName) throws IOException { private static void deleteTables() { if (admin != null) { - for (TableName tableName: createdTables){ + for (TableName tableName : createdTables) { try { if (admin.tableExists(tableName)) { admin.disableTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBigDecimalComparator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBigDecimalComparator.java index fcff0c1f627e..86f8104f9310 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBigDecimalComparator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBigDecimalComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBitComparator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBitComparator.java index 35db73980334..7c564377134f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBitComparator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBitComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,28 +30,28 @@ /** * Tests for the bit comparator */ -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestBitComparator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestBitComparator.class); - private static byte[] zeros = new byte[]{0, 0, 0, 0, 0, 0}; + private static byte[] zeros = new byte[] { 0, 0, 0, 0, 0, 0 }; private static ByteBuffer zeros_bb = ByteBuffer.wrap(zeros); - private static byte[] ones = new byte[]{1, 1, 1, 1, 1, 1}; + private static byte[] ones = new byte[] { 1, 1, 1, 1, 1, 1 }; private static ByteBuffer ones_bb = ByteBuffer.wrap(ones); - private static byte[] data0 = new byte[]{0, 1, 2, 4, 8, 15}; - private static byte[] data1 = new byte[]{15, 0, 0, 0, 0, 0}; + private static byte[] data0 = new byte[] { 0, 1, 2, 4, 8, 15 }; + private static byte[] data1 = new byte[] { 15, 0, 0, 0, 0, 0 }; private static ByteBuffer data1_bb = ByteBuffer.wrap(data1); - private static byte[] data2 = new byte[]{0, 0, 0, 0, 0, 15}; + private static byte[] data2 = new byte[] { 0, 0, 0, 0, 0, 15 }; private static ByteBuffer data2_bb = ByteBuffer.wrap(data2); - private static byte[] data3 = new byte[]{15, 15, 15, 15, 15}; + private static byte[] data3 = new byte[] { 15, 15, 15, 15, 15 }; // data for testing compareTo method with offset and length parameters - private static byte[] data1_2 = new byte[]{15, 15, 0, 0, 0, 0, 0, 15}; + private static byte[] data1_2 = new byte[] { 15, 15, 0, 0, 0, 0, 0, 15 }; private static ByteBuffer data1_2_bb = ByteBuffer.wrap(data1_2); - private static byte[] data2_2 = new byte[]{15, 0, 0, 0, 0, 0, 15, 15}; + private static byte[] data2_2 = new byte[] { 15, 0, 0, 0, 0, 0, 15, 15 }; private static ByteBuffer data2_2_bb = ByteBuffer.wrap(data2_2); private final int Equal = 0; @@ -155,4 +155,3 @@ private void testOperationWithOffset(ByteBuffer data, byte[] comparatorBytes, assertEquals(expected, comparator.compareTo(data, 1, comparatorBytes.length)); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java index b193260ab43f..3f9e69e4c747 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,74 +33,68 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; /** - * Test for the ColumnPaginationFilter, used mainly to test the successful serialization of the filter. - * More test functionality can be found within {@link org.apache.hadoop.hbase.filter.TestFilter#testColumnPaginationFilter()} + * Test for the ColumnPaginationFilter, used mainly to test the successful serialization of the + * filter. More test functionality can be found within + * {@link org.apache.hadoop.hbase.filter.TestFilter#testColumnPaginationFilter()} */ -@Category({FilterTests.class, SmallTests.class}) -public class TestColumnPaginationFilter -{ +@Category({ FilterTests.class, SmallTests.class }) +public class TestColumnPaginationFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestColumnPaginationFilter.class); - private static final byte[] ROW = Bytes.toBytes("row_1_test"); - private static final byte[] COLUMN_FAMILY = Bytes.toBytes("test"); - private static final byte[] VAL_1 = Bytes.toBytes("a"); - private static final byte[] COLUMN_QUALIFIER = Bytes.toBytes("foo"); - - private Filter columnPaginationFilterOffset; - private Filter columnPaginationFilter; - - @Before - public void setUp() throws Exception { - columnPaginationFilter = getColumnPaginationFilter(); - columnPaginationFilterOffset = getColumnPaginationFilterOffset(); - } - - private Filter getColumnPaginationFilter() { - return new ColumnPaginationFilter(1, 0); - } - - private Filter getColumnPaginationFilterOffset() { - return new ColumnPaginationFilter(1, COLUMN_QUALIFIER); - } - - private Filter serializationTest(Filter filter) throws Exception { - FilterProtos.Filter filterProto = ProtobufUtil.toFilter(filter); - Filter newFilter = ProtobufUtil.toFilter(filterProto); - - return newFilter; - } - - - /** - * The more specific functionality tests are contained within the TestFilters class. This class is mainly for testing - * serialization - * - * @param filter - * @throws Exception - */ - private void basicFilterTests(ColumnPaginationFilter filter) throws Exception - { - KeyValue c = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1); - assertTrue("basicFilter1", filter.filterCell(c) == Filter.ReturnCode.INCLUDE_AND_NEXT_COL); - } - - /** - * Tests serialization - * @throws Exception - */ - @Test - public void testSerialization() throws Exception { - Filter newFilter = serializationTest(columnPaginationFilter); - basicFilterTests((ColumnPaginationFilter)newFilter); - - Filter newFilterOffset = serializationTest(columnPaginationFilterOffset); - basicFilterTests((ColumnPaginationFilter)newFilterOffset); - } - - + private static final byte[] ROW = Bytes.toBytes("row_1_test"); + private static final byte[] COLUMN_FAMILY = Bytes.toBytes("test"); + private static final byte[] VAL_1 = Bytes.toBytes("a"); + private static final byte[] COLUMN_QUALIFIER = Bytes.toBytes("foo"); + + private Filter columnPaginationFilterOffset; + private Filter columnPaginationFilter; + + @Before + public void setUp() throws Exception { + columnPaginationFilter = getColumnPaginationFilter(); + columnPaginationFilterOffset = getColumnPaginationFilterOffset(); + } + + private Filter getColumnPaginationFilter() { + return new ColumnPaginationFilter(1, 0); + } + + private Filter getColumnPaginationFilterOffset() { + return new ColumnPaginationFilter(1, COLUMN_QUALIFIER); + } + + private Filter serializationTest(Filter filter) throws Exception { + FilterProtos.Filter filterProto = ProtobufUtil.toFilter(filter); + Filter newFilter = ProtobufUtil.toFilter(filterProto); + + return newFilter; + } + + /** + * The more specific functionality tests are contained within the TestFilters class. This class is + * mainly for testing serialization + * @param filter + * @throws Exception + */ + private void basicFilterTests(ColumnPaginationFilter filter) throws Exception { + KeyValue c = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1); + assertTrue("basicFilter1", filter.filterCell(c) == Filter.ReturnCode.INCLUDE_AND_NEXT_COL); + } + + /** + * Tests serialization + * @throws Exception + */ + @Test + public void testSerialization() throws Exception { + Filter newFilter = serializationTest(columnPaginationFilter); + basicFilterTests((ColumnPaginationFilter) newFilter); + + Filter newFilterOffset = serializationTest(columnPaginationFilterOffset); + basicFilterTests((ColumnPaginationFilter) newFilterOffset); + } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java index f2f6e2e9f181..55a702236467 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,15 +52,14 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestColumnPrefixFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestColumnPrefixFilter.class); - private final static HBaseTestingUtil TEST_UTIL = new - HBaseTestingUtil(); + private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @Rule public TestName name = new TestName(); @@ -69,12 +68,9 @@ public class TestColumnPrefixFilter { public void testColumnPrefixFilter() throws IOException { String family = "Family"; TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(family)) - .setMaxVersions(3) - .build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).setMaxVersions(3).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); @@ -94,16 +90,15 @@ public void testColumnPrefixFilter() throws IOException { String valueString = "ValueString"; - for (String row: rows) { + for (String row : rows) { Put p = new Put(Bytes.toBytes(row)); p.setDurability(Durability.SKIP_WAL); - for (String column: columns) { + for (String column : columns) { for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { - KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, - valueString); + KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, valueString); p.add(kv); kvList.add(kv); - for (String s: prefixMap.keySet()) { + for (String s : prefixMap.keySet()) { if (column.startsWith(s)) { prefixMap.get(s).add(kv); } @@ -116,7 +111,7 @@ public void testColumnPrefixFilter() throws IOException { ColumnPrefixFilter filter; Scan scan = new Scan(); scan.readAllVersions(); - for (String s: prefixMap.keySet()) { + for (String s : prefixMap.keySet()) { filter = new ColumnPrefixFilter(Bytes.toBytes(s)); scan.setFilter(filter); @@ -138,12 +133,9 @@ public void testColumnPrefixFilter() throws IOException { public void testColumnPrefixFilterWithFilterList() throws IOException { String family = "Family"; TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(family)) - .setMaxVersions(3) - .build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).setMaxVersions(3).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); @@ -163,16 +155,15 @@ public void testColumnPrefixFilterWithFilterList() throws IOException { String valueString = "ValueString"; - for (String row: rows) { + for (String row : rows) { Put p = new Put(Bytes.toBytes(row)); p.setDurability(Durability.SKIP_WAL); - for (String column: columns) { + for (String column : columns) { for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { - KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, - valueString); + KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, valueString); p.add(kv); kvList.add(kv); - for (String s: prefixMap.keySet()) { + for (String s : prefixMap.keySet()) { if (column.startsWith(s)) { prefixMap.get(s).add(kv); } @@ -185,10 +176,10 @@ public void testColumnPrefixFilterWithFilterList() throws IOException { ColumnPrefixFilter filter; Scan scan = new Scan(); scan.readAllVersions(); - for (String s: prefixMap.keySet()) { + for (String s : prefixMap.keySet()) { filter = new ColumnPrefixFilter(Bytes.toBytes(s)); - //this is how this test differs from the one above + // this is how this test differs from the one above FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL); filterList.addFilter(filter); scan.setFilter(filterList); @@ -209,7 +200,7 @@ public void testColumnPrefixFilterWithFilterList() throws IOException { List generateRandomWords(int numberOfWords, String suffix) { Set wordSet = new HashSet<>(); for (int i = 0; i < numberOfWords; i++) { - int lengthOfWords = (int) (Math.random()*2) + 1; + int lengthOfWords = (int) (Math.random() * 2) + 1; char[] wordChar = new char[lengthOfWords]; for (int j = 0; j < wordChar.length; j++) { wordChar[j] = (char) (Math.random() * 26 + 97); @@ -227,4 +218,3 @@ List generateRandomWords(int numberOfWords, String suffix) { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.java index 23b9bd6e1d90..6910374ac01a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -62,8 +61,7 @@ class StringRange { private boolean startInclusive = true; private boolean endInclusive = false; - public StringRange(String start, boolean startInclusive, String end, - boolean endInclusive) { + public StringRange(String start, boolean startInclusive, String end, boolean endInclusive) { this.start = start; this.startInclusive = startInclusive; this.end = end; @@ -111,22 +109,18 @@ public boolean equals(Object obj) { return false; } StringRange oth = (StringRange) obj; - return this.startInclusive == oth.startInclusive && - this.endInclusive == oth.endInclusive && - Objects.equals(this.start, oth.start) && - Objects.equals(this.end, oth.end); + return this.startInclusive == oth.startInclusive && this.endInclusive == oth.endInclusive + && Objects.equals(this.start, oth.start) && Objects.equals(this.end, oth.end); } @Override public String toString() { - String result = (this.startInclusive ? "[" : "(") - + (this.start == null ? null : this.start) + ", " - + (this.end == null ? null : this.end) - + (this.endInclusive ? "]" : ")"); + String result = (this.startInclusive ? "[" : "(") + (this.start == null ? null : this.start) + + ", " + (this.end == null ? null : this.end) + (this.endInclusive ? "]" : ")"); return result; } - public boolean inRange(String value) { + public boolean inRange(String value) { boolean afterStart = true; if (this.start != null) { int startCmp = value.compareTo(this.start); @@ -144,8 +138,7 @@ public boolean inRange(String value) { } - -@Category({FilterTests.class, MediumTests.class}) +@Category({ FilterTests.class, MediumTests.class }) public class TestColumnRangeFilter { @ClassRule @@ -182,8 +175,8 @@ public void tearDown() throws Exception { @Test public void TestColumnRangeFilterClient() throws Exception { String family = "Family"; - Table ht = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), - Bytes.toBytes(family), Integer.MAX_VALUE); + Table ht = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), Bytes.toBytes(family), + Integer.MAX_VALUE); List rows = generateRandomWords(10, 8); long maxTimestamp = 2; @@ -193,14 +186,10 @@ public void TestColumnRangeFilterClient() throws Exception { Map> rangeMap = new HashMap<>(); - rangeMap.put(new StringRange(null, true, "b", false), - new ArrayList<>()); - rangeMap.put(new StringRange("p", true, "q", false), - new ArrayList<>()); - rangeMap.put(new StringRange("r", false, "s", true), - new ArrayList<>()); - rangeMap.put(new StringRange("z", false, null, false), - new ArrayList<>()); + rangeMap.put(new StringRange(null, true, "b", false), new ArrayList<>()); + rangeMap.put(new StringRange("p", true, "q", false), new ArrayList<>()); + rangeMap.put(new StringRange("r", false, "s", true), new ArrayList<>()); + rangeMap.put(new StringRange("z", false, null, false), new ArrayList<>()); String valueString = "ValueString"; for (String row : rows) { @@ -208,8 +197,7 @@ public void TestColumnRangeFilterClient() throws Exception { p.setDurability(Durability.SKIP_WAL); for (String column : columns) { for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { - KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, - valueString); + KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, valueString); p.add(kv); kvList.add(kv); for (StringRange s : rangeMap.keySet()) { @@ -308,4 +296,3 @@ List generateRandomWords(int numberOfWords, int maxLengthOfWords) { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java index 6c77bcf61e0e..6717d9414c65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestComparatorSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestComparatorSerialization { @ClassRule @@ -49,7 +49,7 @@ public void testBinaryComparator() throws Exception { @Test public void testBinaryPrefixComparator() throws Exception { BinaryPrefixComparator binaryPrefixComparator = - new BinaryPrefixComparator(Bytes.toBytes("binaryPrefixComparator")); + new BinaryPrefixComparator(Bytes.toBytes("binaryPrefixComparator")); assertTrue(binaryPrefixComparator.areSerializedFieldsEqual( ProtobufUtil.toComparator(ProtobufUtil.toComparator(binaryPrefixComparator)))); } @@ -57,7 +57,7 @@ public void testBinaryPrefixComparator() throws Exception { @Test public void testBitComparator() throws Exception { BitComparator bitComparator = - new BitComparator(Bytes.toBytes("bitComparator"), BitComparator.BitwiseOp.XOR); + new BitComparator(Bytes.toBytes("bitComparator"), BitComparator.BitwiseOp.XOR); assertTrue(bitComparator.areSerializedFieldsEqual( ProtobufUtil.toComparator(ProtobufUtil.toComparator(bitComparator)))); } @@ -95,8 +95,8 @@ public void testSubstringComparator() throws Exception { public void testBigDecimalComparator() throws Exception { BigDecimal bigDecimal = new BigDecimal(Double.MIN_VALUE); BigDecimalComparator bigDecimalComparator = new BigDecimalComparator(bigDecimal); - assertTrue(bigDecimalComparator.areSerializedFieldsEqual(ProtobufUtil.toComparator(ProtobufUtil - .toComparator(bigDecimalComparator)))); + assertTrue(bigDecimalComparator.areSerializedFieldsEqual( + ProtobufUtil.toComparator(ProtobufUtil.toComparator(bigDecimalComparator)))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java index 4c97a1143a5f..3330b0b5e4d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java @@ -54,7 +54,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestDependentColumnFilter { @ClassRule @@ -62,20 +62,14 @@ public class TestDependentColumnFilter { HBaseClassTestRule.forClass(TestDependentColumnFilter.class); private static final Logger LOG = LoggerFactory.getLogger(TestDependentColumnFilter.class); - private static final byte[][] ROWS = { - Bytes.toBytes("test1"),Bytes.toBytes("test2") - }; - private static final byte[][] FAMILIES = { - Bytes.toBytes("familyOne"),Bytes.toBytes("familyTwo") - }; + private static final byte[][] ROWS = { Bytes.toBytes("test1"), Bytes.toBytes("test2") }; + private static final byte[][] FAMILIES = + { Bytes.toBytes("familyOne"), Bytes.toBytes("familyTwo") }; private static final long STAMP_BASE = EnvironmentEdgeManager.currentTime(); - private static final long[] STAMPS = { - STAMP_BASE-100, STAMP_BASE-200, STAMP_BASE-300 - }; + private static final long[] STAMPS = { STAMP_BASE - 100, STAMP_BASE - 200, STAMP_BASE - 300 }; private static final byte[] QUALIFIER = Bytes.toBytes("qualifier"); - private static final byte[][] BAD_VALS = { - Bytes.toBytes("bad1"), Bytes.toBytes("bad2"), Bytes.toBytes("bad3") - }; + private static final byte[][] BAD_VALS = + { Bytes.toBytes("bad1"), Bytes.toBytes("bad2"), Bytes.toBytes("bad3") }; private static final byte[] MATCH_VAL = Bytes.toBytes("match"); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -86,8 +80,8 @@ public class TestDependentColumnFilter { public void setUp() throws Exception { testVals = makeTestVals(); - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(this.getClass().getSimpleName())) + TableDescriptor tableDescriptor = TableDescriptorBuilder + .newBuilder(TableName.valueOf(this.getClass().getSimpleName())) .setColumnFamily( ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]).setMaxVersions(3).build()) .setColumnFamily( @@ -95,7 +89,7 @@ public void setUp() throws Exception { .build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); this.region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), tableDescriptor); + TEST_UTIL.getConfiguration(), tableDescriptor); addData(); } @@ -143,37 +137,34 @@ private List makeTestVals() { } /** - * This shouldn't be confused with TestFilter#verifyScan - * as expectedKeys is not the per row total, but the scan total - * + * This shouldn't be confused with TestFilter#verifyScan as expectedKeys is not the per row total, + * but the scan total * @param s * @param expectedRows * @param expectedCells * @throws IOException */ - private void verifyScan(Scan s, long expectedRows, long expectedCells) - throws IOException { + private void verifyScan(Scan s, long expectedRows, long expectedCells) throws IOException { InternalScanner scanner = this.region.getScanner(s); List results = new ArrayList<>(); int i = 0; int cells = 0; for (boolean done = true; done; i++) { done = scanner.next(results); - Arrays.sort(results.toArray(new Cell[results.size()]), - CellComparatorImpl.COMPARATOR); + Arrays.sort(results.toArray(new Cell[results.size()]), CellComparatorImpl.COMPARATOR); LOG.info("counter=" + i + ", " + results); if (results.isEmpty()) break; cells += results.size(); - assertTrue("Scanned too many rows! Only expected " + expectedRows + - " total but already scanned " + (i+1), expectedRows > i); - assertTrue("Expected " + expectedCells + " cells total but " + - "already scanned " + cells, expectedCells >= cells); + assertTrue("Scanned too many rows! Only expected " + expectedRows + + " total but already scanned " + (i + 1), + expectedRows > i); + assertTrue("Expected " + expectedCells + " cells total but " + "already scanned " + cells, + expectedCells >= cells); results.clear(); } - assertEquals("Expected " + expectedRows + " rows but scanned " + i + - " rows", expectedRows, i); - assertEquals("Expected " + expectedCells + " cells but scanned " + cells + - " cells", expectedCells, cells); + assertEquals("Expected " + expectedRows + " rows but scanned " + i + " rows", expectedRows, i); + assertEquals("Expected " + expectedCells + " cells but scanned " + cells + " cells", + expectedCells, cells); } /** @@ -198,33 +189,29 @@ public void testScans() throws Exception { verifyScan(scan, 2, 3); // include a comparator operation - filter = new DependentColumnFilter(FAMILIES[0], QUALIFIER, false, - CompareOperator.EQUAL, new BinaryComparator(MATCH_VAL)); + filter = new DependentColumnFilter(FAMILIES[0], QUALIFIER, false, CompareOperator.EQUAL, + new BinaryComparator(MATCH_VAL)); scan = new Scan(); scan.setFilter(filter); scan.readVersions(Integer.MAX_VALUE); /* - * expecting to get the following 3 cells - * row 0 - * put.add(FAMILIES[0], QUALIFIER, STAMPS[2], MATCH_VAL); - * put.add(FAMILIES[1], QUALIFIER, STAMPS[2], BAD_VALS[2]); - * row 1 - * put.add(FAMILIES[0], QUALIFIER, STAMPS[2], MATCH_VAL); + * expecting to get the following 3 cells row 0 put.add(FAMILIES[0], QUALIFIER, STAMPS[2], + * MATCH_VAL); put.add(FAMILIES[1], QUALIFIER, STAMPS[2], BAD_VALS[2]); row 1 + * put.add(FAMILIES[0], QUALIFIER, STAMPS[2], MATCH_VAL); */ verifyScan(scan, 2, 3); // include a comparator operation and drop comparator - filter = new DependentColumnFilter(FAMILIES[0], QUALIFIER, true, - CompareOperator.EQUAL, new BinaryComparator(MATCH_VAL)); + filter = new DependentColumnFilter(FAMILIES[0], QUALIFIER, true, CompareOperator.EQUAL, + new BinaryComparator(MATCH_VAL)); scan = new Scan(); scan.setFilter(filter); scan.readVersions(Integer.MAX_VALUE); /* - * expecting to get the following 1 cell - * row 0 - * put.add(FAMILIES[1], QUALIFIER, STAMPS[2], BAD_VALS[2]); + * expecting to get the following 1 cell row 0 put.add(FAMILIES[1], QUALIFIER, STAMPS[2], + * BAD_VALS[2]); */ verifyScan(scan, 1, 1); @@ -232,35 +219,35 @@ public void testScans() throws Exception { /** * Test that the filter correctly drops rows without a corresponding timestamp - * * @throws Exception */ @Test public void testFilterDropping() throws Exception { Filter filter = new DependentColumnFilter(FAMILIES[0], QUALIFIER); List accepted = new ArrayList<>(); - for(Cell val : testVals) { - if(filter.filterCell(val) == ReturnCode.INCLUDE) { + for (Cell val : testVals) { + if (filter.filterCell(val) == ReturnCode.INCLUDE) { accepted.add(val); } } assertEquals("check all values accepted from filterCell", 5, accepted.size()); filter.filterRowCells(accepted); - assertEquals("check filterRow(List) dropped cell without corresponding column entry", 4, accepted.size()); + assertEquals("check filterRow(List) dropped cell without corresponding column entry", + 4, accepted.size()); // start do it again with dependent column dropping on filter = new DependentColumnFilter(FAMILIES[1], QUALIFIER, true); accepted.clear(); - for(KeyValue val : testVals) { - if(filter.filterCell(val) == ReturnCode.INCLUDE) { - accepted.add(val); - } + for (KeyValue val : testVals) { + if (filter.filterCell(val) == ReturnCode.INCLUDE) { + accepted.add(val); } - assertEquals("check the filtering column cells got dropped", 2, accepted.size()); + } + assertEquals("check the filtering column cells got dropped", 2, accepted.size()); - filter.filterRowCells(accepted); - assertEquals("check cell retention", 2, accepted.size()); + filter.filterRowCells(accepted); + assertEquals("check cell retention", 2, accepted.size()); } /** @@ -283,12 +270,10 @@ public void testToStringWithNullComparator() { @Test public void testToStringWithNonNullComparator() { - Filter filter = - new DependentColumnFilter(FAMILIES[0], QUALIFIER, true, CompareOperator.EQUAL, - new BinaryComparator(MATCH_VAL)); + Filter filter = new DependentColumnFilter(FAMILIES[0], QUALIFIER, true, CompareOperator.EQUAL, + new BinaryComparator(MATCH_VAL)); assertNotNull(filter.toString()); assertTrue("check string contains comparator value", filter.toString().contains("match")); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java index b4d9577ee1aa..8599727eae2f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java @@ -68,12 +68,11 @@ /** * Test filters at the HRegion doorstep. */ -@Category({FilterTests.class, MediumTests.class}) +@Category({ FilterTests.class, MediumTests.class }) public class TestFilter { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFilter.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestFilter.class); private final static Logger LOG = LoggerFactory.getLogger(TestFilter.class); private HRegion region; @@ -86,97 +85,81 @@ public class TestFilter { // Rows, Qualifiers, and Values are in two groups, One and Two. // - private static final byte [][] ROWS_ONE = { - Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"), - Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3") - }; - - private static final byte [][] ROWS_TWO = { - Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-1"), - Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3") - }; - - private static final byte [][] ROWS_THREE = { - Bytes.toBytes("testRowThree-0"), Bytes.toBytes("testRowThree-1"), - Bytes.toBytes("testRowThree-2"), Bytes.toBytes("testRowThree-3") - }; - - private static final byte [][] ROWS_FOUR = { - Bytes.toBytes("testRowFour-0"), Bytes.toBytes("testRowFour-1"), - Bytes.toBytes("testRowFour-2"), Bytes.toBytes("testRowFour-3") - }; - - private static final byte [][] FAMILIES = { - Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo") - }; - - private static final byte [][] FAMILIES_1 = { - Bytes.toBytes("testFamilyThree"), Bytes.toBytes("testFamilyFour") - }; - - private static final byte [][] QUALIFIERS_ONE = { - Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"), - Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3") - }; - - private static final byte [][] QUALIFIERS_TWO = { - Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"), - Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3") - }; - - private static final byte [][] QUALIFIERS_THREE = { - Bytes.toBytes("testQualifierThree-0"), Bytes.toBytes("testQualifierThree-1"), - Bytes.toBytes("testQualifierThree-2"), Bytes.toBytes("testQualifierThree-3") - }; - - private static final byte [][] QUALIFIERS_FOUR = { - Bytes.toBytes("testQualifierFour-0"), Bytes.toBytes("testQualifierFour-1"), - Bytes.toBytes("testQualifierFour-2"), Bytes.toBytes("testQualifierFour-3") - }; - - private static final byte [][] QUALIFIERS_FIVE = { - Bytes.toBytes("testQualifierFive-0"), Bytes.toBytes("testQualifierFive-1") - }; - - private static final byte [][] VALUES = { - Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") - }; - - byte [][] NEW_FAMILIES = { - Bytes.toBytes("f1"), Bytes.toBytes("f2") - }; + private static final byte[][] ROWS_ONE = { Bytes.toBytes("testRowOne-0"), + Bytes.toBytes("testRowOne-1"), Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3") }; + + private static final byte[][] ROWS_TWO = { Bytes.toBytes("testRowTwo-0"), + Bytes.toBytes("testRowTwo-1"), Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3") }; + + private static final byte[][] ROWS_THREE = + { Bytes.toBytes("testRowThree-0"), Bytes.toBytes("testRowThree-1"), + Bytes.toBytes("testRowThree-2"), Bytes.toBytes("testRowThree-3") }; + + private static final byte[][] ROWS_FOUR = + { Bytes.toBytes("testRowFour-0"), Bytes.toBytes("testRowFour-1"), + Bytes.toBytes("testRowFour-2"), Bytes.toBytes("testRowFour-3") }; + + private static final byte[][] FAMILIES = + { Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo") }; + + private static final byte[][] FAMILIES_1 = + { Bytes.toBytes("testFamilyThree"), Bytes.toBytes("testFamilyFour") }; + + private static final byte[][] QUALIFIERS_ONE = + { Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"), + Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3") }; + + private static final byte[][] QUALIFIERS_TWO = + { Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"), + Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3") }; + + private static final byte[][] QUALIFIERS_THREE = + { Bytes.toBytes("testQualifierThree-0"), Bytes.toBytes("testQualifierThree-1"), + Bytes.toBytes("testQualifierThree-2"), Bytes.toBytes("testQualifierThree-3") }; + + private static final byte[][] QUALIFIERS_FOUR = + { Bytes.toBytes("testQualifierFour-0"), Bytes.toBytes("testQualifierFour-1"), + Bytes.toBytes("testQualifierFour-2"), Bytes.toBytes("testQualifierFour-3") }; + + private static final byte[][] QUALIFIERS_FIVE = + { Bytes.toBytes("testQualifierFive-0"), Bytes.toBytes("testQualifierFive-1") }; + + private static final byte[][] VALUES = + { Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") }; + + byte[][] NEW_FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2") }; private long numRows = (long) ROWS_ONE.length + ROWS_TWO.length; private long colsPerRow = (long) FAMILIES.length * QUALIFIERS_ONE.length; @Before public void setUp() throws Exception { - TableDescriptor tableDescriptor = TableDescriptorBuilder - .newBuilder(TableName.valueOf("TestFilter")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]).setMinVersions(100) - .setMaxVersions(100).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES[1])) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES_1[0])) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES_1[1])) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NEW_FAMILIES[0])) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NEW_FAMILIES[1])).build(); + TableDescriptor tableDescriptor = + TableDescriptorBuilder.newBuilder(TableName.valueOf("TestFilter")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]) + .setMinVersions(100).setMaxVersions(100).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES[1])) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES_1[0])) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES_1[1])) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NEW_FAMILIES[0])) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NEW_FAMILIES[1])).build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); this.region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), tableDescriptor); + TEST_UTIL.getConfiguration(), tableDescriptor); // Insert first half - for(byte [] ROW : ROWS_ONE) { + for (byte[] ROW : ROWS_ONE) { Put p = new Put(ROW); p.setDurability(Durability.SKIP_WAL); - for(byte [] QUALIFIER : QUALIFIERS_ONE) { + for (byte[] QUALIFIER : QUALIFIERS_ONE) { p.addColumn(FAMILIES[0], QUALIFIER, VALUES[0]); } this.region.put(p); } - for(byte [] ROW : ROWS_TWO) { + for (byte[] ROW : ROWS_TWO) { Put p = new Put(ROW); p.setDurability(Durability.SKIP_WAL); - for(byte [] QUALIFIER : QUALIFIERS_TWO) { + for (byte[] QUALIFIER : QUALIFIERS_TWO) { p.addColumn(FAMILIES[1], QUALIFIER, VALUES[1]); } this.region.put(p); @@ -186,31 +169,31 @@ public void setUp() throws Exception { this.region.flush(true); // Insert second half (reverse families) - for(byte [] ROW : ROWS_ONE) { + for (byte[] ROW : ROWS_ONE) { Put p = new Put(ROW); p.setDurability(Durability.SKIP_WAL); - for(byte [] QUALIFIER : QUALIFIERS_ONE) { + for (byte[] QUALIFIER : QUALIFIERS_ONE) { p.addColumn(FAMILIES[1], QUALIFIER, VALUES[0]); } this.region.put(p); } - for(byte [] ROW : ROWS_TWO) { + for (byte[] ROW : ROWS_TWO) { Put p = new Put(ROW); p.setDurability(Durability.SKIP_WAL); - for(byte [] QUALIFIER : QUALIFIERS_TWO) { + for (byte[] QUALIFIER : QUALIFIERS_TWO) { p.addColumn(FAMILIES[0], QUALIFIER, VALUES[1]); } this.region.put(p); } // Delete the second qualifier from all rows and families - for(byte [] ROW : ROWS_ONE) { + for (byte[] ROW : ROWS_ONE) { Delete d = new Delete(ROW); d.addColumns(FAMILIES[0], QUALIFIERS_ONE[1]); d.addColumns(FAMILIES[1], QUALIFIERS_ONE[1]); this.region.delete(d); } - for(byte [] ROW : ROWS_TWO) { + for (byte[] ROW : ROWS_TWO) { Delete d = new Delete(ROW); d.addColumns(FAMILIES[0], QUALIFIERS_TWO[1]); d.addColumns(FAMILIES[1], QUALIFIERS_TWO[1]); @@ -219,13 +202,13 @@ public void setUp() throws Exception { colsPerRow -= 2; // Delete the second rows from both groups, one column at a time - for(byte [] QUALIFIER : QUALIFIERS_ONE) { + for (byte[] QUALIFIER : QUALIFIERS_ONE) { Delete d = new Delete(ROWS_ONE[1]); d.addColumns(FAMILIES[0], QUALIFIER); d.addColumns(FAMILIES[1], QUALIFIER); this.region.delete(d); } - for(byte [] QUALIFIER : QUALIFIERS_TWO) { + for (byte[] QUALIFIER : QUALIFIERS_TWO) { Delete d = new Delete(ROWS_TWO[1]); d.addColumns(FAMILIES[0], QUALIFIER); d.addColumns(FAMILIES[1], QUALIFIER); @@ -293,7 +276,7 @@ public void testRegionScannerReseek() throws Exception { scanner.next(results); for (Cell keyValue : results) { assertTrue("The rows with ROWS_TWO as row key should be appearing.", - CellUtil.matchingRows(keyValue, ROWS_THREE[1])); + CellUtil.matchingRows(keyValue, ROWS_THREE[1])); } // again try to reseek to a value before ROWS_THREE[1] scanner.reseek(ROWS_ONE[1]); @@ -302,7 +285,7 @@ public void testRegionScannerReseek() throws Exception { scanner.next(results); for (Cell keyValue : results) { assertFalse("Cannot rewind back to a value less than previous reseek.", - Bytes.toString(CellUtil.cloneRow(keyValue)).contains("testRowOne")); + Bytes.toString(CellUtil.cloneRow(keyValue)).contains("testRowOne")); } } @@ -319,7 +302,7 @@ public void testNoFilter() throws Exception { // One family s = new Scan(); s.addFamily(FAMILIES[0]); - verifyScan(s, expectedRows, expectedKeys/2); + verifyScan(s, expectedRows, expectedKeys / 2); } @Test @@ -347,50 +330,49 @@ public void testPrefixFilterWithReverseScan() throws Exception { public void testPageFilter() throws Exception { // KVs in first 6 rows - KeyValue [] expectedKVs = { - // testRowOne-0 - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-2 - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-3 - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) - }; + KeyValue[] expectedKVs = { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) }; // Grab all 6 rows long expectedRows = 6; @@ -511,8 +493,7 @@ public void testPageFilterWithReverseScan() throws Exception { } @Test - public void testWhileMatchFilterWithFilterRowWithReverseScan() - throws Exception { + public void testWhileMatchFilterWithFilterRowWithReverseScan() throws Exception { final int pageSize = 4; Scan s = new Scan(); @@ -527,26 +508,23 @@ public void testWhileMatchFilterWithFilterRowWithReverseScan() scannerCounter++; if (scannerCounter >= pageSize) { - Assert.assertTrue( - "The WhileMatchFilter should now filter all remaining", - filter.filterAllRemaining()); + Assert.assertTrue("The WhileMatchFilter should now filter all remaining", + filter.filterAllRemaining()); } if (!isMoreResults) { break; } } scanner.close(); - Assert.assertEquals("The page filter returned more rows than expected", - pageSize, scannerCounter); + Assert.assertEquals("The page filter returned more rows than expected", pageSize, + scannerCounter); } @Test - public void testWhileMatchFilterWithFilterRowKeyWithReverseScan() - throws Exception { + public void testWhileMatchFilterWithFilterRowKeyWithReverseScan() throws Exception { Scan s = new Scan(); String prefix = "testRowOne"; - WhileMatchFilter filter = new WhileMatchFilter(new PrefixFilter( - Bytes.toBytes(prefix))); + WhileMatchFilter filter = new WhileMatchFilter(new PrefixFilter(Bytes.toBytes(prefix))); s.setFilter(filter); s.setReversed(true); @@ -554,11 +532,9 @@ public void testWhileMatchFilterWithFilterRowKeyWithReverseScan() while (true) { ArrayList values = new ArrayList<>(); boolean isMoreResults = scanner.next(values); - if (!isMoreResults - || !Bytes.toString(CellUtil.cloneRow(values.get(0))).startsWith(prefix)) { - Assert.assertTrue( - "The WhileMatchFilter should now filter all remaining", - filter.filterAllRemaining()); + if (!isMoreResults || !Bytes.toString(CellUtil.cloneRow(values.get(0))).startsWith(prefix)) { + Assert.assertTrue("The WhileMatchFilter should now filter all remaining", + filter.filterAllRemaining()); } if (!isMoreResults) { break; @@ -568,12 +544,8 @@ public void testWhileMatchFilterWithFilterRowKeyWithReverseScan() } /** - * Tests the the {@link WhileMatchFilter} works in combination with a - * {@link Filter} that uses the - * {@link Filter#filterRow()} method. - * - * See HBASE-2258. - * + * Tests the the {@link WhileMatchFilter} works in combination with a {@link Filter} that uses the + * {@link Filter#filterRow()} method. See HBASE-2258. * @throws Exception */ @Test @@ -591,7 +563,8 @@ public void testWhileMatchFilterWithFilterRow() throws Exception { scannerCounter++; if (scannerCounter >= pageSize) { - assertTrue("The WhileMatchFilter should now filter all remaining", filter.filterAllRemaining()); + assertTrue("The WhileMatchFilter should now filter all remaining", + filter.filterAllRemaining()); } if (!isMoreResults) { break; @@ -600,14 +573,15 @@ public void testWhileMatchFilterWithFilterRow() throws Exception { assertEquals("The page filter returned more rows than expected", pageSize, scannerCounter); } - /** * The following filter simulates a pre-0.96 filter where filterRow() is defined while * hasFilterRow() returns false */ static class OldTestFilter extends FilterBase { @Override - public byte [] toByteArray() {return null;} + public byte[] toByteArray() { + return null; + } @Override public boolean hasFilterRow() { @@ -628,10 +602,7 @@ public ReturnCode filterCell(final Cell ignored) throws IOException { /** * The following test is to ensure old(such as hbase0.94) filterRow() can be correctly fired in - * 0.96+ code base. - * - * See HBASE-10366 - * + * 0.96+ code base. See HBASE-10366 * @throws Exception */ @Test @@ -647,12 +618,8 @@ public void test94FilterRowCompatibility() throws Exception { } /** - * Tests the the {@link WhileMatchFilter} works in combination with a - * {@link Filter} that uses the - * {@link Filter#filterRowKey(Cell)} method. - * - * See HBASE-2258. - * + * Tests the the {@link WhileMatchFilter} works in combination with a {@link Filter} that uses the + * {@link Filter#filterRowKey(Cell)} method. See HBASE-2258. * @throws Exception */ @Test @@ -667,7 +634,8 @@ public void testWhileMatchFilterWithFilterRowKey() throws Exception { ArrayList values = new ArrayList<>(); boolean isMoreResults = scanner.next(values); if (!isMoreResults || !Bytes.toString(CellUtil.cloneRow(values.get(0))).startsWith(prefix)) { - assertTrue("The WhileMatchFilter should now filter all remaining", filter.filterAllRemaining()); + assertTrue("The WhileMatchFilter should now filter all remaining", + filter.filterAllRemaining()); } if (!isMoreResults) { break; @@ -676,26 +644,23 @@ public void testWhileMatchFilterWithFilterRowKey() throws Exception { } /** - * Tests the the {@link WhileMatchFilter} works in combination with a - * {@link Filter} that uses the {@link Filter#filterCell(Cell)} method. - * - * See HBASE-2258. - * + * Tests the the {@link WhileMatchFilter} works in combination with a {@link Filter} that uses the + * {@link Filter#filterCell(Cell)} method. See HBASE-2258. * @throws Exception */ @Test public void testWhileMatchFilterWithFilterCell() throws Exception { Scan s = new Scan(); - WhileMatchFilter filter = new WhileMatchFilter( - new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], CompareOperator.EQUAL, Bytes.toBytes("foo")) - ); + WhileMatchFilter filter = new WhileMatchFilter(new SingleColumnValueFilter(FAMILIES[0], + QUALIFIERS_ONE[0], CompareOperator.EQUAL, Bytes.toBytes("foo"))); s.setFilter(filter); InternalScanner scanner = this.region.getScanner(s); while (true) { ArrayList values = new ArrayList<>(); boolean isMoreResults = scanner.next(values); - assertTrue("The WhileMatchFilter should now filter all remaining", filter.filterAllRemaining()); + assertTrue("The WhileMatchFilter should now filter all remaining", + filter.filterAllRemaining()); if (!isMoreResults) { break; } @@ -711,7 +676,7 @@ public void testInclusiveStopFilter() throws IOException { long expectedRows = (this.numRows / 2) - 1; long expectedKeys = this.colsPerRow; Scan s = new Scan().withStartRow(Bytes.toBytes("testRowOne-0")) - .withStopRow(Bytes.toBytes("testRowOne-3")); + .withStopRow(Bytes.toBytes("testRowOne-3")); verifyScan(s, expectedRows, expectedKeys); // Now use start row with inclusive stop filter @@ -726,7 +691,7 @@ public void testInclusiveStopFilter() throws IOException { expectedRows = (this.numRows / 2) - 1; expectedKeys = this.colsPerRow; s = new Scan().withStartRow(Bytes.toBytes("testRowTwo-0")) - .withStopRow(Bytes.toBytes("testRowTwo-3")); + .withStopRow(Bytes.toBytes("testRowTwo-3")); verifyScan(s, expectedRows, expectedKeys); // Now use start row with inclusive stop filter @@ -746,7 +711,7 @@ public void testInclusiveStopFilterWithReverseScan() throws IOException { long expectedRows = (this.numRows / 2) - 1; long expectedKeys = this.colsPerRow; Scan s = new Scan().withStartRow(Bytes.toBytes("testRowOne-3")) - .withStopRow(Bytes.toBytes("testRowOne-0")); + .withStopRow(Bytes.toBytes("testRowOne-0")); s.setReversed(true); verifyScan(s, expectedRows, expectedKeys); @@ -763,7 +728,7 @@ public void testInclusiveStopFilterWithReverseScan() throws IOException { expectedRows = (this.numRows / 2) - 1; expectedKeys = this.colsPerRow; s = new Scan().withStartRow(Bytes.toBytes("testRowTwo-3")) - .withStopRow(Bytes.toBytes("testRowTwo-0")); + .withStopRow(Bytes.toBytes("testRowTwo-0")); s.setReversed(true); verifyScan(s, expectedRows, expectedKeys); @@ -816,7 +781,7 @@ public void testQualifierFilter() throws IOException { f = new QualifierFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + .withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -828,7 +793,7 @@ public void testQualifierFilter() throws IOException { f = new QualifierFilter(CompareOperator.GREATER_OR_EQUAL, new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + .withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -840,19 +805,18 @@ public void testQualifierFilter() throws IOException { f = new QualifierFilter(CompareOperator.GREATER, new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + .withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); // Match keys not equal to // Look across rows and fully validate the keys and ordering // Expect varied numbers of keys, 4 per row in group one, 6 per row in group two - f = new QualifierFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(QUALIFIERS_ONE[2])); + f = new QualifierFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(QUALIFIERS_ONE[2])); s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), @@ -888,20 +852,17 @@ public void testQualifierFilter() throws IOException { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); - // Test across rows and groups with a regex // Filter out "test*-2" // Expect 4 keys per row across both groups - f = new QualifierFilter(CompareOperator.NOT_EQUAL, - new RegexStringComparator("test.+-2")); + f = new QualifierFilter(CompareOperator.NOT_EQUAL, new RegexStringComparator("test.+-2")); s = new Scan(); s.setFilter(f); - kvs = new KeyValue [] { + kvs = new KeyValue[] { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), @@ -931,8 +892,7 @@ public void testQualifierFilter() throws IOException { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -951,8 +911,7 @@ public void testFamilyFilter() throws IOException { // Match keys less than given family, should return nothing expectedRows = 0; expectedKeys = 0; - f = new FamilyFilter(CompareOperator.LESS, - new BinaryComparator(Bytes.toBytes("testFamily"))); + f = new FamilyFilter(CompareOperator.LESS, new BinaryComparator(Bytes.toBytes("testFamily"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -973,7 +932,7 @@ public void testFamilyFilter() throws IOException { f = new FamilyFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes("testFamilyOne"))); s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + .withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -984,7 +943,7 @@ public void testFamilyFilter() throws IOException { f = new FamilyFilter(CompareOperator.GREATER_OR_EQUAL, new BinaryComparator(Bytes.toBytes("testFamilyOne"))); s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + .withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -995,91 +954,86 @@ public void testFamilyFilter() throws IOException { f = new FamilyFilter(CompareOperator.GREATER, new BinaryComparator(Bytes.toBytes("testFamilyOne"))); s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(Bytes.toBytes("testRowTwo")); + .withStopRow(Bytes.toBytes("testRowTwo")); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); // Match keys not equal to given family // Look across rows and fully validate the keys and ordering - f = new FamilyFilter(CompareOperator.NOT_EQUAL, - new BinaryComparator(FAMILIES[1])); + f = new FamilyFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(FAMILIES[1])); s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { - // testRowOne-0 - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-2 - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-3 - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - }; + KeyValue[] kvs = { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); // Test across rows and groups with a regex // Filter out "test*-2" // Expect 4 keys per row across both groups - f = new FamilyFilter(CompareOperator.NOT_EQUAL, - new RegexStringComparator("test.*One")); + f = new FamilyFilter(CompareOperator.NOT_EQUAL, new RegexStringComparator("test.*One")); s = new Scan(); s.setFilter(f); - kvs = new KeyValue [] { - // testRowOne-0 - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-2 - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-3 - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + kvs = new KeyValue[] { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } - @Test public void testRowFilter() throws IOException { // Match a single row, all keys long expectedRows = 1; long expectedKeys = this.colsPerRow; - Filter f = new RowFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + Filter f = + new RowFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -1087,8 +1041,7 @@ public void testRowFilter() throws IOException { // Match a two rows, one from each group, using regex expectedRows = 2; expectedKeys = this.colsPerRow; - f = new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator("testRow.+-2")); + f = new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("testRow.+-2")); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -1097,8 +1050,7 @@ public void testRowFilter() throws IOException { // Expect all keys in one row expectedRows = 1; expectedKeys = this.colsPerRow; - f = new RowFilter(CompareOperator.LESS, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = new RowFilter(CompareOperator.LESS, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -1137,8 +1089,7 @@ public void testRowFilter() throws IOException { // Expect all keys in all but two rows expectedRows = this.numRows - 2; expectedKeys = this.colsPerRow; - f = new RowFilter(CompareOperator.GREATER, - new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + f = new RowFilter(CompareOperator.GREATER, new BinaryComparator(Bytes.toBytes("testRowOne-2"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -1151,7 +1102,7 @@ public void testRowFilter() throws IOException { s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), @@ -1186,20 +1137,17 @@ public void testRowFilter() throws IOException { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); - // Test across rows and groups with a regex // Filter out everything that doesn't match "*-2" // Expect all keys in two rows - f = new RowFilter(CompareOperator.EQUAL, - new RegexStringComparator(".+-2")); + f = new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(".+-2")); s = new Scan(); s.setFilter(f); - kvs = new KeyValue [] { + kvs = new KeyValue[] { // testRowOne-2 new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), @@ -1213,8 +1161,7 @@ public void testRowFilter() throws IOException { new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) - }; + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) }; verifyScanFull(s, kvs); } @@ -1225,8 +1172,8 @@ public void testValueFilter() throws IOException { // Match group one rows long expectedRows = this.numRows / 2; long expectedKeys = this.colsPerRow; - Filter f = new ValueFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testValueOne"))); + Filter f = + new ValueFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testValueOne"))); Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -1234,8 +1181,7 @@ public void testValueFilter() throws IOException { // Match group two rows expectedRows = this.numRows / 2; expectedKeys = this.colsPerRow; - f = new ValueFilter(CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("testValueTwo"))); + f = new ValueFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("testValueTwo"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -1243,8 +1189,7 @@ public void testValueFilter() throws IOException { // Match all values using regex expectedRows = this.numRows; expectedKeys = this.colsPerRow; - f = new ValueFilter(CompareOperator.EQUAL, - new RegexStringComparator("testValue((One)|(Two))")); + f = new ValueFilter(CompareOperator.EQUAL, new RegexStringComparator("testValue((One)|(Two))")); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -1253,8 +1198,7 @@ public void testValueFilter() throws IOException { // Expect group one rows expectedRows = this.numRows / 2; expectedKeys = this.colsPerRow; - f = new ValueFilter(CompareOperator.LESS, - new BinaryComparator(Bytes.toBytes("testValueTwo"))); + f = new ValueFilter(CompareOperator.LESS, new BinaryComparator(Bytes.toBytes("testValueTwo"))); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, expectedRows, expectedKeys); @@ -1317,7 +1261,7 @@ public void testValueFilter() throws IOException { s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowTwo-0 new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), @@ -1338,8 +1282,7 @@ public void testValueFilter() throws IOException { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -1353,7 +1296,7 @@ public void testSkipFilter() throws IOException { Scan s = new Scan(); s.setFilter(f); - KeyValue [] kvs = { + KeyValue[] kvs = { // testRowTwo-0 new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), @@ -1374,8 +1317,7 @@ public void testSkipFilter() throws IOException { new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -1395,9 +1337,7 @@ public void testFilterList() throws IOException { Scan s = new Scan(); s.addFamily(FAMILIES[0]); s.setFilter(f); - KeyValue [] kvs = { - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]) - }; + KeyValue[] kvs = { new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]) }; verifyScanFull(s, kvs); // Test getting everything with a MUST_PASS_ONE filter including row, qf, val @@ -1411,7 +1351,6 @@ public void testFilterList() throws IOException { s.setFilter(f); verifyScanNoEarlyOut(s, this.numRows, this.colsPerRow); - } @Test @@ -1419,14 +1358,12 @@ public void testFirstKeyOnlyFilter() throws IOException { Scan s = new Scan(); s.setFilter(new FirstKeyOnlyFilter()); // Expected KVs, the first KV from each of the remaining 6 rows - KeyValue [] kvs = { - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + KeyValue[] kvs = { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) - }; + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) }; verifyScanFull(s, kvs); } @@ -1436,13 +1373,12 @@ public void testFilterListWithSingleColumnValueFilter() throws IOException { // Scan using SingleColumnValueFilter SingleColumnValueFilter f1 = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], - CompareOperator.EQUAL, VALUES[0]); - f1.setFilterIfMissing( true ); + CompareOperator.EQUAL, VALUES[0]); + f1.setFilterIfMissing(true); Scan s1 = new Scan(); s1.addFamily(FAMILIES[0]); s1.setFilter(f1); - KeyValue [] kvs1 = { - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + KeyValue[] kvs1 = { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), @@ -1450,20 +1386,18 @@ public void testFilterListWithSingleColumnValueFilter() throws IOException { new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - }; + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), }; verifyScanNoEarlyOut(s1, 3, 3); verifyScanFull(s1, kvs1); // Scan using another SingleColumnValueFilter, expect disjoint result SingleColumnValueFilter f2 = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_TWO[0], - CompareOperator.EQUAL, VALUES[1]); - f2.setFilterIfMissing( true ); + CompareOperator.EQUAL, VALUES[1]); + f2.setFilterIfMissing(true); Scan s2 = new Scan(); s2.addFamily(FAMILIES[0]); s2.setFilter(f2); - KeyValue [] kvs2 = { - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + KeyValue[] kvs2 = { new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), @@ -1471,8 +1405,7 @@ public void testFilterListWithSingleColumnValueFilter() throws IOException { new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanNoEarlyOut(s2, 3, 3); verifyScanFull(s2, kvs2); @@ -1483,8 +1416,7 @@ public void testFilterListWithSingleColumnValueFilter() throws IOException { Scan s = new Scan(); s.addFamily(FAMILIES[0]); s.setFilter(f); - KeyValue [] kvs = { - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + KeyValue[] kvs = { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), @@ -1501,8 +1433,7 @@ public void testFilterListWithSingleColumnValueFilter() throws IOException { new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanNoEarlyOut(s, 6, 3); verifyScanFull(s, kvs); } @@ -1513,14 +1444,14 @@ public void testFilterListWithPrefixFilter() throws IOException { byte[] family = Bytes.toBytes("f1"); byte[] qualifier = Bytes.toBytes("q1"); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); HRegion testRegion = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), tableDescriptor); - for(int i=0; i<5; i++) { - Put p = new Put(Bytes.toBytes((char)('a'+i) + "row")); + for (int i = 0; i < 5; i++) { + Put p = new Put(Bytes.toBytes((char) ('a' + i) + "row")); p.setDurability(Durability.SKIP_WAL); p.addColumn(family, qualifier, Bytes.toBytes(String.valueOf(111 + i))); testRegion.put(p); @@ -1528,10 +1459,10 @@ public void testFilterListWithPrefixFilter() throws IOException { testRegion.flush(true); // rows starting with "b" - PrefixFilter pf = new PrefixFilter(new byte[] {'b'}) ; + PrefixFilter pf = new PrefixFilter(new byte[] { 'b' }); // rows with value of column 'q1' set to '113' - SingleColumnValueFilter scvf = new SingleColumnValueFilter( - family, qualifier, CompareOperator.EQUAL, Bytes.toBytes("113")); + SingleColumnValueFilter scvf = + new SingleColumnValueFilter(family, qualifier, CompareOperator.EQUAL, Bytes.toBytes("113")); // combine these two with OR in a FilterList FilterList filterList = new FilterList(Operator.MUST_PASS_ONE, pf, scvf); @@ -1542,17 +1473,17 @@ public void testFilterListWithPrefixFilter() throws IOException { int resultCount = 0; while (scanner.next(results)) { resultCount++; - byte[] row = CellUtil.cloneRow(results.get(0)); + byte[] row = CellUtil.cloneRow(results.get(0)); LOG.debug("Found row: " + Bytes.toStringBinary(row)); - assertTrue(Bytes.equals(row, Bytes.toBytes("brow")) - || Bytes.equals(row, Bytes.toBytes("crow"))); + assertTrue( + Bytes.equals(row, Bytes.toBytes("brow")) || Bytes.equals(row, Bytes.toBytes("crow"))); results.clear(); } assertEquals(2, resultCount); scanner.close(); - WAL wal = ((HRegion)testRegion).getWAL(); - ((HRegion)testRegion).close(); + WAL wal = ((HRegion) testRegion).getWAL(); + ((HRegion) testRegion).close(); wal.close(); } @@ -1572,20 +1503,18 @@ public void testSingleColumnValueFilter() throws IOException { // Since group two rows don't have these qualifiers, they will pass // so limiting scan to group one List filters = new ArrayList<>(); - filters.add(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], - CompareOperator.EQUAL, VALUES[0])); - filters.add(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[2], - CompareOperator.EQUAL, VALUES[1])); + filters.add(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], CompareOperator.EQUAL, + VALUES[0])); + filters.add(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[2], CompareOperator.EQUAL, + VALUES[1])); Filter f = new FilterList(Operator.MUST_PASS_ALL, filters); Scan s = new Scan().withStartRow(ROWS_ONE[0]).withStopRow(ROWS_TWO[0]); s.addFamily(FAMILIES[0]); s.setFilter(f); // Expect only one row, all qualifiers - KeyValue [] kvs = { - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + KeyValue[] kvs = { new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[1]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]) - }; + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]) }; verifyScanNoEarlyOut(s, 1, 3); verifyScanFull(s, kvs); @@ -1593,9 +1522,9 @@ public void testSingleColumnValueFilter() throws IOException { // need to wrap SCVFs in SkipFilters filters = new ArrayList<>(); filters.add(new SkipFilter(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], - CompareOperator.EQUAL, VALUES[0]))); + CompareOperator.EQUAL, VALUES[0]))); filters.add(new SkipFilter(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[2], - CompareOperator.EQUAL, VALUES[1]))); + CompareOperator.EQUAL, VALUES[1]))); f = new FilterList(Operator.MUST_PASS_ALL, filters); s = new Scan().withStartRow(ROWS_ONE[0]).withStopRow(ROWS_TWO[0]); s.addFamily(FAMILIES[0]); @@ -1606,22 +1535,19 @@ public void testSingleColumnValueFilter() throws IOException { // More tests from HBASE-1821 for Clint and filterIfMissing flag - byte [][] ROWS_THREE = { - Bytes.toBytes("rowThree-0"), Bytes.toBytes("rowThree-1"), - Bytes.toBytes("rowThree-2"), Bytes.toBytes("rowThree-3") - }; + byte[][] ROWS_THREE = { Bytes.toBytes("rowThree-0"), Bytes.toBytes("rowThree-1"), + Bytes.toBytes("rowThree-2"), Bytes.toBytes("rowThree-3") }; // Give row 0 and 2 QUALIFIERS_ONE[0] (VALUE[0] VALUE[1]) // Give row 1 and 3 QUALIFIERS_ONE[1] (VALUE[0] VALUE[1]) - KeyValue [] srcKVs = new KeyValue [] { - new KeyValue(ROWS_THREE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_THREE[1], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[1]), - new KeyValue(ROWS_THREE[2], FAMILIES[0], QUALIFIERS_ONE[1], VALUES[0]), - new KeyValue(ROWS_THREE[3], FAMILIES[0], QUALIFIERS_ONE[1], VALUES[1]) - }; + KeyValue[] srcKVs = + new KeyValue[] { new KeyValue(ROWS_THREE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_THREE[1], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[1]), + new KeyValue(ROWS_THREE[2], FAMILIES[0], QUALIFIERS_ONE[1], VALUES[0]), + new KeyValue(ROWS_THREE[3], FAMILIES[0], QUALIFIERS_ONE[1], VALUES[1]) }; - for(KeyValue kv : srcKVs) { + for (KeyValue kv : srcKVs) { Put put = new Put(CellUtil.cloneRow(kv)).add(kv); put.setDurability(Durability.SKIP_WAL); this.region.put(put); @@ -1629,34 +1555,34 @@ public void testSingleColumnValueFilter() throws IOException { // Match VALUES[0] against QUALIFIERS_ONE[0] with filterIfMissing = false // Expect 3 rows (0, 2, 3) - SingleColumnValueFilter scvf = new SingleColumnValueFilter(FAMILIES[0], - QUALIFIERS_ONE[0], CompareOperator.EQUAL, VALUES[0]); + SingleColumnValueFilter scvf = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], + CompareOperator.EQUAL, VALUES[0]); s = new Scan().withStartRow(ROWS_THREE[0]).withStopRow(Bytes.toBytes("rowThree-4")); s.addFamily(FAMILIES[0]); s.setFilter(scvf); - kvs = new KeyValue [] { srcKVs[0], srcKVs[2], srcKVs[3] }; + kvs = new KeyValue[] { srcKVs[0], srcKVs[2], srcKVs[3] }; verifyScanFull(s, kvs); // Match VALUES[0] against QUALIFIERS_ONE[0] with filterIfMissing = true // Expect 1 row (0) - scvf = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], - CompareOperator.EQUAL, VALUES[0]); + scvf = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], CompareOperator.EQUAL, + VALUES[0]); scvf.setFilterIfMissing(true); s = new Scan().withStartRow(ROWS_THREE[0]).withStopRow(Bytes.toBytes("rowThree-4")); s.addFamily(FAMILIES[0]); s.setFilter(scvf); - kvs = new KeyValue [] { srcKVs[0] }; + kvs = new KeyValue[] { srcKVs[0] }; verifyScanFull(s, kvs); // Match VALUES[1] against QUALIFIERS_ONE[1] with filterIfMissing = true // Expect 1 row (3) - scvf = new SingleColumnValueFilter(FAMILIES[0], - QUALIFIERS_ONE[1], CompareOperator.EQUAL, VALUES[1]); + scvf = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[1], CompareOperator.EQUAL, + VALUES[1]); scvf.setFilterIfMissing(true); s = new Scan().withStartRow(ROWS_THREE[0]).withStopRow(Bytes.toBytes("rowThree-4")); s.addFamily(FAMILIES[0]); s.setFilter(scvf); - kvs = new KeyValue [] { srcKVs[3] }; + kvs = new KeyValue[] { srcKVs[3] }; verifyScanFull(s, kvs); // Add QUALIFIERS_ONE[1] to ROWS_THREE[0] with VALUES[0] @@ -1665,13 +1591,13 @@ public void testSingleColumnValueFilter() throws IOException { // Match VALUES[1] against QUALIFIERS_ONE[1] with filterIfMissing = true // Expect 1 row (3) - scvf = new SingleColumnValueFilter(FAMILIES[0], - QUALIFIERS_ONE[1], CompareOperator.EQUAL, VALUES[1]); + scvf = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[1], CompareOperator.EQUAL, + VALUES[1]); scvf.setFilterIfMissing(true); s = new Scan().withStartRow(ROWS_THREE[0]).withStopRow(Bytes.toBytes("rowThree-4")); s.addFamily(FAMILIES[0]); s.setFilter(scvf); - kvs = new KeyValue [] { srcKVs[3] }; + kvs = new KeyValue[] { srcKVs[3] }; verifyScanFull(s, kvs); } @@ -1696,139 +1622,139 @@ public void testColumnValueFilter() throws Exception { Scan scan = new Scan().setFilter( new ColumnValueFilter(FAMILIES[0], QUALIFIERS_FIVE[0], CompareOperator.EQUAL, VALUES[1])); KeyValue[] expectedEquals = - { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]) }; + { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]) }; verifyScanFull(scan, expectedEquals); // 2. Test > f[0]:q5[0]:v[0] scan.setFilter( new ColumnValueFilter(FAMILIES[0], QUALIFIERS_FIVE[0], CompareOperator.GREATER, VALUES[0])); KeyValue[] expectedGreater = - { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]) }; + { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]) }; verifyScanFull(scan, expectedGreater); // 3. Test >= f[0]:q5[0]:v[0] // also test readAllVersions(), since FAMILIES[0] allow multiple versions. scan.readAllVersions().setFilter(new ColumnValueFilter(FAMILIES[0], QUALIFIERS_FIVE[0], - CompareOperator.GREATER_OR_EQUAL, VALUES[0])); + CompareOperator.GREATER_OR_EQUAL, VALUES[0])); KeyValue[] expectedGreaterOrEqual = - { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_ONE[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]) }; + { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_ONE[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]) }; verifyScanFull(scan, expectedGreaterOrEqual); // 4. Test < f[1]:q5[1]:v[1], FAMILIES[1] doesn't support multiple versions - scan.readVersions(1).setFilter(new ColumnValueFilter(FAMILIES[1], QUALIFIERS_FIVE[1], - CompareOperator.LESS, VALUES[1])); + scan.readVersions(1).setFilter( + new ColumnValueFilter(FAMILIES[1], QUALIFIERS_FIVE[1], CompareOperator.LESS, VALUES[1])); KeyValue[] expectedLess = - { new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; + { new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; verifyScanFull(scan, expectedLess); // 5. Test <= f[1]:q5[0]:v[1] scan.setFilter(new ColumnValueFilter(FAMILIES[1], QUALIFIERS_FIVE[1], - CompareOperator.LESS_OR_EQUAL, VALUES[1])); + CompareOperator.LESS_OR_EQUAL, VALUES[1])); KeyValue[] expectedLessOrEqual = - { new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[1]), - new KeyValue(ROWS_ONE[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[1]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[1]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; + { new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[1]), + new KeyValue(ROWS_ONE[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[1]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[1]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; verifyScanFull(scan, expectedLessOrEqual); // 6. Test != f[1]:q5[1]:v[1] scan.setFilter( new ColumnValueFilter(FAMILIES[1], QUALIFIERS_FIVE[1], CompareOperator.NOT_EQUAL, VALUES[1])); KeyValue[] expectedNotEqual = - { new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; + { new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; verifyScanFull(scan, expectedNotEqual); // 7. Test FilterList(MUST_PASS_ONE) combining ColumnValueFilter and QualifierFilter // (ColumnValueFilter, != f[1]:q5[1]:v[1]) || (QualifierFilter, = q5[0]) List orFilters = new ArrayList<>(2); orFilters.add( new ColumnValueFilter(FAMILIES[1], QUALIFIERS_FIVE[1], CompareOperator.NOT_EQUAL, VALUES[1])); - orFilters.add( - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(QUALIFIERS_FIVE[0]))); + orFilters + .add(new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(QUALIFIERS_FIVE[0]))); scan.setFilter(new FilterList(Operator.MUST_PASS_ONE, orFilters)); KeyValue[] expectedMustPassOne = - { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[1], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), // this pass scvf - new KeyValue(ROWS_TWO[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), // this pass scvf - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), // this pass scvf - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[0]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; // this pass scvf + { new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[1], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), // this pass scvf + new KeyValue(ROWS_TWO[1], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), // this pass scvf + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), // this pass scvf + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[0], VALUES[0]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; // this pass + // scvf verifyScanFull(scan, expectedMustPassOne); // 8. Test FilterList(MUST_PASS_ALL) combining ColumnValueFilter and RowFilter // (ColumnValueFilter, != f[1]:q5[1]:v[1]) && (RowFilter, = prefix:"testRow") List andFilters = new ArrayList<>(2); andFilters.add( new ColumnValueFilter(FAMILIES[1], QUALIFIERS_FIVE[1], CompareOperator.NOT_EQUAL, VALUES[1])); - andFilters.add(new RowFilter(CompareOperator.EQUAL, - new BinaryPrefixComparator(Bytes.toBytes("testRow")))); + andFilters.add( + new RowFilter(CompareOperator.EQUAL, new BinaryPrefixComparator(Bytes.toBytes("testRow")))); scan.setFilter(new FilterList(Operator.MUST_PASS_ALL, andFilters)); KeyValue[] expectedMustPassAll = - { new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; + { new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; verifyScanFull(scan, expectedMustPassAll); // 9. Test specified columns with FilterList(MUST_PASS_ONE) which sused in case 7. // Result is different from case 7, because column is strongly constrained by specified columns Scan anotherScan = new Scan().addColumn(FAMILIES[1], QUALIFIERS_FIVE[1]) - .setFilter(new FilterList(Operator.MUST_PASS_ONE, orFilters)); + .setFilter(new FilterList(Operator.MUST_PASS_ONE, orFilters)); KeyValue[] expectedValues = - { new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; + { new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[1], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_FIVE[1], VALUES[0]) }; verifyScanFull(anotherScan, expectedValues); } - private void verifyScan(Scan s, long expectedRows, long expectedKeys) - throws IOException { + private void verifyScan(Scan s, long expectedRows, long expectedKeys) throws IOException { InternalScanner scanner = this.region.getScanner(s); List results = new ArrayList<>(); int i = 0; @@ -1837,56 +1763,52 @@ private void verifyScan(Scan s, long expectedRows, long expectedKeys) Arrays.sort(results.toArray(new Cell[results.size()]), CellComparator.getInstance()); LOG.info("counter=" + i + ", " + results); if (results.isEmpty()) break; - assertTrue("Scanned too many rows! Only expected " + expectedRows + - " total but already scanned " + (i+1), expectedRows > i); - assertEquals("Expected " + expectedKeys + " keys per row but " + - "returned " + results.size(), expectedKeys, results.size()); + assertTrue("Scanned too many rows! Only expected " + expectedRows + + " total but already scanned " + (i + 1), + expectedRows > i); + assertEquals("Expected " + expectedKeys + " keys per row but " + "returned " + results.size(), + expectedKeys, results.size()); results.clear(); } - assertEquals("Expected " + expectedRows + " rows but scanned " + i + - " rows", expectedRows, i); + assertEquals("Expected " + expectedRows + " rows but scanned " + i + " rows", expectedRows, i); } - private void verifyScanNoEarlyOut(Scan s, long expectedRows, - long expectedKeys) - throws IOException { + private void verifyScanNoEarlyOut(Scan s, long expectedRows, long expectedKeys) + throws IOException { InternalScanner scanner = this.region.getScanner(s); List results = new ArrayList<>(); int i = 0; for (boolean done = true; done; i++) { done = scanner.next(results); - Arrays.sort(results.toArray(new Cell[results.size()]), - CellComparator.getInstance()); + Arrays.sort(results.toArray(new Cell[results.size()]), CellComparator.getInstance()); LOG.info("counter=" + i + ", " + results); - if(results.isEmpty()) break; - assertTrue("Scanned too many rows! Only expected " + expectedRows + - " total but already scanned " + (i+1), expectedRows > i); - assertEquals("Expected " + expectedKeys + " keys per row but " + - "returned " + results.size(), expectedKeys, results.size()); + if (results.isEmpty()) break; + assertTrue("Scanned too many rows! Only expected " + expectedRows + + " total but already scanned " + (i + 1), + expectedRows > i); + assertEquals("Expected " + expectedKeys + " keys per row but " + "returned " + results.size(), + expectedKeys, results.size()); results.clear(); } - assertEquals("Expected " + expectedRows + " rows but scanned " + i + - " rows", expectedRows, i); + assertEquals("Expected " + expectedRows + " rows but scanned " + i + " rows", expectedRows, i); } - private void verifyScanFull(Scan s, KeyValue [] kvs) - throws IOException { + private void verifyScanFull(Scan s, KeyValue[] kvs) throws IOException { InternalScanner scanner = this.region.getScanner(s); List results = new ArrayList<>(); int row = 0; int idx = 0; for (boolean done = true; done; row++) { done = scanner.next(results); - Arrays.sort(results.toArray(new Cell[results.size()]), - CellComparator.getInstance()); - if(results.isEmpty()) break; - assertTrue("Scanned too many keys! Only expected " + kvs.length + - " total but already scanned " + (results.size() + idx) + - (results.isEmpty() ? "" : "(" + results.get(0).toString() + ")"), - kvs.length >= idx + results.size()); + Arrays.sort(results.toArray(new Cell[results.size()]), CellComparator.getInstance()); + if (results.isEmpty()) break; + assertTrue( + "Scanned too many keys! Only expected " + kvs.length + " total but already scanned " + + (results.size() + idx) + + (results.isEmpty() ? "" : "(" + results.get(0).toString() + ")"), + kvs.length >= idx + results.size()); for (Cell kv : results) { - LOG.info("row=" + row + ", result=" + kv.toString() + - ", match=" + kvs[idx].toString()); + LOG.info("row=" + row + ", result=" + kv.toString() + ", match=" + kvs[idx].toString()); assertTrue("Row mismatch", CellUtil.matchingRows(kv, kvs[idx])); assertTrue("Family mismatch", CellUtil.matchingFamily(kv, kvs[idx])); assertTrue("Qualifier mismatch", CellUtil.matchingQualifier(kv, kvs[idx])); @@ -1896,28 +1818,25 @@ private void verifyScanFull(Scan s, KeyValue [] kvs) results.clear(); } LOG.info("Looked at " + row + " rows with " + idx + " keys"); - assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, - kvs.length, idx); + assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, kvs.length, idx); } - private void verifyScanFullNoValues(Scan s, KeyValue [] kvs, boolean useLen) - throws IOException { + private void verifyScanFullNoValues(Scan s, KeyValue[] kvs, boolean useLen) throws IOException { InternalScanner scanner = this.region.getScanner(s); List results = new ArrayList<>(); int row = 0; int idx = 0; for (boolean more = true; more; row++) { more = scanner.next(results); - Arrays.sort(results.toArray(new Cell[results.size()]), - CellComparator.getInstance()); - if(results.isEmpty()) break; - assertTrue("Scanned too many keys! Only expected " + kvs.length + - " total but already scanned " + (results.size() + idx) + - (results.isEmpty() ? "" : "(" + results.get(0).toString() + ")"), - kvs.length >= idx + results.size()); - for(Cell kv : results) { - LOG.info("row=" + row + ", result=" + kv.toString() + - ", match=" + kvs[idx].toString()); + Arrays.sort(results.toArray(new Cell[results.size()]), CellComparator.getInstance()); + if (results.isEmpty()) break; + assertTrue( + "Scanned too many keys! Only expected " + kvs.length + " total but already scanned " + + (results.size() + idx) + + (results.isEmpty() ? "" : "(" + results.get(0).toString() + ")"), + kvs.length >= idx + results.size()); + for (Cell kv : results) { + LOG.info("row=" + row + ", result=" + kv.toString() + ", match=" + kvs[idx].toString()); assertTrue("Row mismatch", CellUtil.matchingRows(kv, kvs[idx])); assertTrue("Family mismatch", CellUtil.matchingFamily(kv, kvs[idx])); @@ -1925,10 +1844,10 @@ private void verifyScanFullNoValues(Scan s, KeyValue [] kvs, boolean useLen) assertFalse("Should not have returned whole value", CellUtil.matchingValue(kv, kvs[idx])); if (useLen) { assertEquals("Value in result is not SIZEOF_INT", Bytes.SIZEOF_INT, kv.getValueLength()); - LOG.info("idx = " + idx + ", len=" + kvs[idx].getValueLength() - + ", actual=" + Bytes.toInt(CellUtil.cloneValue(kv))); + LOG.info("idx = " + idx + ", len=" + kvs[idx].getValueLength() + ", actual=" + + Bytes.toInt(CellUtil.cloneValue(kv))); assertEquals("Scan value should be the length of the actual value. ", - kvs[idx].getValueLength(), Bytes.toInt(CellUtil.cloneValue(kv)) ); + kvs[idx].getValueLength(), Bytes.toInt(CellUtil.cloneValue(kv))); LOG.info("good"); } else { assertEquals("Value in result is not empty", 0, kv.getValueLength()); @@ -1938,62 +1857,57 @@ private void verifyScanFullNoValues(Scan s, KeyValue [] kvs, boolean useLen) results.clear(); } LOG.info("Looked at " + row + " rows with " + idx + " keys"); - assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, - kvs.length, idx); + assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, kvs.length, idx); } @Test public void testColumnPaginationFilterColumnOffset() throws Exception { - KeyValue [] expectedKVs = { - // testRowOne-0 - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - // testRowOne-2 - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - // testRowOne-3 - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - }; - KeyValue [] expectedKVs1 = { - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]) - }; - KeyValue [] expectedKVs2 = { - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]) - }; - KeyValue [] expectedKVs3 = { - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - }; + KeyValue[] expectedKVs = { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), }; + KeyValue[] expectedKVs1 = { + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]) }; + KeyValue[] expectedKVs2 = { + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]) }; + KeyValue[] expectedKVs3 = { + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), }; Scan s = new Scan(); // Page size 1. @@ -2057,32 +1971,32 @@ public ReturnCode filterCell(Cell c) throws IOException { s.addColumn(FAMILIES[0], QUALIFIERS_TWO[2]); s.addColumn(FAMILIES[0], QUALIFIERS_TWO[3]); KeyValue[] kvs = { - // testRowOne-0 - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[1]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - - // testRowOne-2 - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - - // testRowOne-3 - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), }; + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[1]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), }; verifyScanFull(s, kvs); } @@ -2094,10 +2008,10 @@ public void testColumnPaginationFilter() throws Exception { p.setDurability(Durability.SKIP_WAL); p.addColumn(FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]); this.region.put(p); - this.region.flush(true); + this.region.flush(true); - // Set of KVs (page: 1; pageSize: 1) - the first set of 1 column per row - KeyValue [] expectedKVs = { + // Set of KVs (page: 1; pageSize: 1) - the first set of 1 column per row + KeyValue[] expectedKVs = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), // testRowOne-2 @@ -2109,11 +2023,10 @@ public void testColumnPaginationFilter() throws Exception { // testRowTwo-2 new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) - }; + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) }; - // Set of KVs (page: 3; pageSize: 1) - the third set of 1 column per row - KeyValue [] expectedKVs2 = { + // Set of KVs (page: 3; pageSize: 1) - the third set of 1 column per row + KeyValue[] expectedKVs2 = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), // testRowOne-2 @@ -2125,11 +2038,10 @@ public void testColumnPaginationFilter() throws Exception { // testRowTwo-2 new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), }; - // Set of KVs (page: 2; pageSize 2) - the 2nd set of 2 columns per row - KeyValue [] expectedKVs3 = { + // Set of KVs (page: 2; pageSize 2) - the 2nd set of 2 columns per row + KeyValue[] expectedKVs3 = { // testRowOne-0 new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), @@ -2147,97 +2059,93 @@ public void testColumnPaginationFilter() throws Exception { new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), // testRowTwo-3 new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - }; + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), }; + // Set of KVs (page: 2; pageSize 2) - the 2nd set of 2 columns per row + KeyValue[] expectedKVs4 = { - // Set of KVs (page: 2; pageSize 2) - the 2nd set of 2 columns per row - KeyValue [] expectedKVs4 = { - - }; + }; - long expectedRows = this.numRows; - long expectedKeys = 1; - Scan s = new Scan(); + long expectedRows = this.numRows; + long expectedKeys = 1; + Scan s = new Scan(); + // Page 1; 1 Column per page (Limit 1, Offset 0) + s.setFilter(new ColumnPaginationFilter(1, 0)); + verifyScan(s, expectedRows, expectedKeys); + this.verifyScanFull(s, expectedKVs); - // Page 1; 1 Column per page (Limit 1, Offset 0) - s.setFilter(new ColumnPaginationFilter(1,0)); - verifyScan(s, expectedRows, expectedKeys); - this.verifyScanFull(s, expectedKVs); + // Page 3; 1 Result per page (Limit 1, Offset 2) + s.setFilter(new ColumnPaginationFilter(1, 2)); + verifyScan(s, expectedRows, expectedKeys); + this.verifyScanFull(s, expectedKVs2); - // Page 3; 1 Result per page (Limit 1, Offset 2) - s.setFilter(new ColumnPaginationFilter(1,2)); - verifyScan(s, expectedRows, expectedKeys); - this.verifyScanFull(s, expectedKVs2); + // Page 2; 2 Results per page (Limit 2, Offset 2) + s.setFilter(new ColumnPaginationFilter(2, 2)); + expectedKeys = 2; + verifyScan(s, expectedRows, expectedKeys); + this.verifyScanFull(s, expectedKVs3); - // Page 2; 2 Results per page (Limit 2, Offset 2) - s.setFilter(new ColumnPaginationFilter(2,2)); - expectedKeys = 2; - verifyScan(s, expectedRows, expectedKeys); - this.verifyScanFull(s, expectedKVs3); - - // Page 8; 20 Results per page (no results) (Limit 20, Offset 140) - s.setFilter(new ColumnPaginationFilter(20,140)); - expectedKeys = 0; - expectedRows = 0; - verifyScan(s, expectedRows, 0); - this.verifyScanFull(s, expectedKVs4); + // Page 8; 20 Results per page (no results) (Limit 20, Offset 140) + s.setFilter(new ColumnPaginationFilter(20, 140)); + expectedKeys = 0; + expectedRows = 0; + verifyScan(s, expectedRows, 0); + this.verifyScanFull(s, expectedKVs4); } @Test public void testKeyOnlyFilter() throws Exception { // KVs in first 6 rows - KeyValue [] expectedKVs = { - // testRowOne-0 - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-2 - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowOne-3 - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), - new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), - // testRowTwo-0 - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-2 - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), - // testRowTwo-3 - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), - new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) - }; + KeyValue[] expectedKVs = { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) }; // Grab all 6 rows long expectedRows = 6; long expectedKeys = this.colsPerRow; - for (boolean useLen : new boolean[]{false,true}) { + for (boolean useLen : new boolean[] { false, true }) { Scan s = new Scan(); s.setFilter(new KeyOnlyFilter(useLen)); verifyScan(s, expectedRows, expectedKeys); @@ -2246,9 +2154,8 @@ public void testKeyOnlyFilter() throws Exception { } /** - * Filter which makes sleeps for a second between each row of a scan. - * This can be useful for manual testing of bugs like HBASE-5973. For example: - * + * Filter which makes sleeps for a second between each row of a scan. This can be useful for + * manual testing of bugs like HBASE-5973. For example: * create 't1', 'f1' * 1.upto(100) { |x| put 't1', 'r' + x.to_s, 'f1:q1', 'hi' } * import org.apache.hadoop.hbase.filter.TestFilter @@ -2259,7 +2166,9 @@ public static class SlowScanFilter extends FilterBase { private static Thread ipcHandlerThread = null; @Override - public byte [] toByteArray() {return null;} + public byte[] toByteArray() { + return null; + } @Override public ReturnCode filterCell(final Cell ignored) throws IOException { @@ -2284,12 +2193,12 @@ public boolean filterRow() throws IOException { public void testNestedFilterListWithSCVF() throws IOException { byte[] columnStatus = Bytes.toBytes("S"); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES[0])).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES[0])).build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); HRegion testRegion = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), tableDescriptor); - for(int i=0; i<10; i++) { + TEST_UTIL.getConfiguration(), tableDescriptor); + for (int i = 0; i < 10; i++) { Put p = new Put(Bytes.toBytes("row" + i)); p.setDurability(Durability.SKIP_WAL); p.addColumn(FAMILIES[0], columnStatus, Bytes.toBytes(i % 2)); @@ -2297,7 +2206,8 @@ public void testNestedFilterListWithSCVF() throws IOException { } testRegion.flush(true); // 1. got rows > "row4" - Filter rowFilter = new RowFilter(CompareOperator.GREATER,new BinaryComparator(Bytes.toBytes("row4"))); + Filter rowFilter = + new RowFilter(CompareOperator.GREATER, new BinaryComparator(Bytes.toBytes("row4"))); Scan s1 = new Scan(); s1.setFilter(rowFilter); InternalScanner scanner = testRegion.getScanner(s1); @@ -2306,25 +2216,25 @@ public void testNestedFilterListWithSCVF() throws IOException { for (boolean done = true; done; i++) { done = scanner.next(results); assertTrue(CellUtil.matchingRows(results.get(0), Bytes.toBytes("row" + i))); - assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2); + assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i % 2); results.clear(); } // 2. got rows <= "row4" and S= FilterList subFilterList = new FilterList(FilterList.Operator.MUST_PASS_ALL); - Filter subFilter1 = new RowFilter(CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(Bytes.toBytes("row4"))); + Filter subFilter1 = + new RowFilter(CompareOperator.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes("row4"))); subFilterList.addFilter(subFilter1); - Filter subFilter2 = new SingleColumnValueFilter(FAMILIES[0], columnStatus, CompareOperator.EQUAL, - Bytes.toBytes(0)); + Filter subFilter2 = new SingleColumnValueFilter(FAMILIES[0], columnStatus, + CompareOperator.EQUAL, Bytes.toBytes(0)); subFilterList.addFilter(subFilter2); s1 = new Scan(); s1.setFilter(subFilterList); scanner = testRegion.getScanner(s1); results = new ArrayList<>(); - for (i=0; i<=4; i+=2) { + for (i = 0; i <= 4; i += 2) { scanner.next(results); assertTrue(CellUtil.matchingRows(results.get(0), Bytes.toBytes("row" + i))); - assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2); + assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i % 2); results.clear(); } assertFalse(scanner.next(results)); @@ -2337,16 +2247,16 @@ public void testNestedFilterListWithSCVF() throws IOException { s1.setFilter(filterList); scanner = testRegion.getScanner(s1); results = new ArrayList<>(); - for (i=0; i<=4; i+=2) { + for (i = 0; i <= 4; i += 2) { scanner.next(results); assertTrue(CellUtil.matchingRows(results.get(0), Bytes.toBytes("row" + i))); - assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2); + assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i % 2); results.clear(); } - for (i=5; i<=9; i++) { + for (i = 5; i <= 9; i++) { scanner.next(results); assertTrue(CellUtil.matchingRows(results.get(0), Bytes.toBytes("row" + i))); - assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2); + assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i % 2); results.clear(); } assertFalse(scanner.next(results)); @@ -2358,21 +2268,21 @@ public void testNestedFilterListWithSCVF() throws IOException { s1.setFilter(filterList); scanner = testRegion.getScanner(s1); results = new ArrayList<>(); - for (i=0; i<=4; i+=2) { + for (i = 0; i <= 4; i += 2) { scanner.next(results); assertTrue(CellUtil.matchingRows(results.get(0), Bytes.toBytes("row" + i))); - assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2); + assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i % 2); results.clear(); } - for (i=5; i<=9; i++) { + for (i = 5; i <= 9; i++) { scanner.next(results); assertTrue(CellUtil.matchingRows(results.get(0), Bytes.toBytes("row" + i))); - assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2); + assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i % 2); results.clear(); } assertFalse(scanner.next(results)); - WAL wal = ((HRegion)testRegion).getWAL(); - ((HRegion)testRegion).close(); + WAL wal = ((HRegion) testRegion).getWAL(); + ((HRegion) testRegion).close(); wal.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java index 75c3e591604f..f670200bab92 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java @@ -135,13 +135,13 @@ public void testFirstKeyOnlyFilterAndBatch() throws IOException { assertEquals(1, results.size()); Cell cell = results.get(0); assertArrayEquals(ROWS[i], - Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); } assertFalse(scanner.next(results)); scanner.close(); } - public static class FirstSeveralCellsFilter extends FilterBase{ + public static class FirstSeveralCellsFilter extends FilterBase { private int count = 0; @Override @@ -162,7 +162,7 @@ public ReturnCode filterCell(final Cell v) { return ReturnCode.SKIP; } - public static Filter parseFrom(final byte [] pbBytes){ + public static Filter parseFrom(final byte[] pbBytes) { return new FirstSeveralCellsFilter(); } } @@ -180,11 +180,11 @@ public void testFirstSeveralCellsFilterAndBatch() throws IOException { assertEquals(NUM_COLS, results.size()); Cell cell = results.get(0); assertArrayEquals(ROWS[i], - Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); assertArrayEquals(FAMILIES[0], - Bytes.copy(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())); - assertArrayEquals(QUALIFIERS[0], Bytes.copy(cell.getQualifierArray(), - cell.getQualifierOffset(), cell.getQualifierLength())); + Bytes.copy(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())); + assertArrayEquals(QUALIFIERS[0], + Bytes.copy(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); } assertFalse(scanner.next(results)); scanner.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java index e96b8dbbbda6..27bcfb5324fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import java.util.Arrays; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CompareOperator; @@ -51,7 +50,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -@Category({FilterTests.class, MediumTests.class}) +@Category({ FilterTests.class, MediumTests.class }) public class TestFilterList { @ClassRule @@ -119,6 +118,7 @@ public void testConstruction() { } catch (IllegalArgumentException e) { } } + /** * Test "must pass one" * @throws Exception @@ -132,33 +132,27 @@ private Filter getFilterMPONE() { List filters = new ArrayList<>(); filters.add(new PageFilter(MAX_PAGES)); filters.add(new WhileMatchFilter(new PrefixFilter(Bytes.toBytes("yyy")))); - Filter filterMPONE = - new FilterList(FilterList.Operator.MUST_PASS_ONE, filters); + Filter filterMPONE = new FilterList(FilterList.Operator.MUST_PASS_ONE, filters); return filterMPONE; } private void mpOneTest(Filter filterMPONE) throws Exception { - /* Filter must do all below steps: - *
        - *
      • {@link #reset()}
      • - *
      • {@link #filterAllRemaining()} -> true indicates scan is over, false, keep going on.
      • - *
      • {@link #filterRowKey(byte[],int,int)} -> true to drop this row, - * if false, we will also call
      • + /* + * Filter must do all below steps:
        • {@link #reset()}
        • {@link + * #filterAllRemaining()} -> true indicates scan is over, false, keep going on.
        • {@link + * #filterRowKey(byte[],int,int)} -> true to drop this row, if false, we will also call
        • *
        • {@link #filterCell(org.apache.hadoop.hbase.KeyValue)} -> true to drop this cell
        • *
        • {@link #filterRow()} -> last chance to drop entire row based on the sequence of - * filterValue() calls. Eg: filter a row if it doesn't contain a specified column. - *
        • - *
        - */ + * filterValue() calls. Eg: filter a row if it doesn't contain a specified column.
      + */ filterMPONE.reset(); assertFalse(filterMPONE.filterAllRemaining()); /* Will pass both */ - byte [] rowkey = Bytes.toBytes("yyyyyyyyy"); + byte[] rowkey = Bytes.toBytes("yyyyyyyyy"); for (int i = 0; i < MAX_PAGES - 1; i++) { assertFalse(filterMPONE.filterRowKey(KeyValueUtil.createFirstOnRow(rowkey))); - KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(i), - Bytes.toBytes(i)); + KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(i), Bytes.toBytes(i)); assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterCell(kv)); assertFalse(filterMPONE.filterRow()); } @@ -166,16 +160,14 @@ private void mpOneTest(Filter filterMPONE) throws Exception { /* Only pass PageFilter */ rowkey = Bytes.toBytes("z"); assertFalse(filterMPONE.filterRowKey(KeyValueUtil.createFirstOnRow(rowkey))); - KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(0), - Bytes.toBytes(0)); + KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(0), Bytes.toBytes(0)); assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterCell(kv)); assertFalse(filterMPONE.filterRow()); /* reach MAX_PAGES already, should filter any rows */ rowkey = Bytes.toBytes("yyy"); assertTrue(filterMPONE.filterRowKey(KeyValueUtil.createFirstOnRow(rowkey))); - kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(0), - Bytes.toBytes(0)); + kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(0), Bytes.toBytes(0)); assertFalse(Filter.ReturnCode.INCLUDE == filterMPONE.filterCell(kv)); assertFalse(filterMPONE.filterRow()); @@ -198,31 +190,25 @@ private Filter getMPALLFilter() { List filters = new ArrayList<>(); filters.add(new PageFilter(MAX_PAGES)); filters.add(new WhileMatchFilter(new PrefixFilter(Bytes.toBytes("yyy")))); - Filter filterMPALL = - new FilterList(FilterList.Operator.MUST_PASS_ALL, filters); + Filter filterMPALL = new FilterList(FilterList.Operator.MUST_PASS_ALL, filters); return filterMPALL; } private void mpAllTest(Filter filterMPALL) throws Exception { - /* Filter must do all below steps: - *
        - *
      • {@link #reset()}
      • - *
      • {@link #filterAllRemaining()} -> true indicates scan is over, false, keep going on.
      • - *
      • {@link #filterRowKey(byte[],int,int)} -> true to drop this row, - * if false, we will also call
      • + /* + * Filter must do all below steps:
        • {@link #reset()}
        • {@link + * #filterAllRemaining()} -> true indicates scan is over, false, keep going on.
        • {@link + * #filterRowKey(byte[],int,int)} -> true to drop this row, if false, we will also call
        • *
        • {@link #filterCell(org.apache.hadoop.hbase.KeyValue)} -> true to drop this cell
        • *
        • {@link #filterRow()} -> last chance to drop entire row based on the sequence of - * filterValue() calls. Eg: filter a row if it doesn't contain a specified column. - *
        • - *
        - */ + * filterValue() calls. Eg: filter a row if it doesn't contain a specified column.
      + */ filterMPALL.reset(); assertFalse(filterMPALL.filterAllRemaining()); - byte [] rowkey = Bytes.toBytes("yyyyyyyyy"); + byte[] rowkey = Bytes.toBytes("yyyyyyyyy"); for (int i = 0; i < MAX_PAGES - 1; i++) { assertFalse(filterMPALL.filterRowKey(KeyValueUtil.createFirstOnRow(rowkey))); - KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(i), - Bytes.toBytes(i)); + KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(i), Bytes.toBytes(i)); assertTrue(Filter.ReturnCode.INCLUDE == filterMPALL.filterCell(kv)); } filterMPALL.reset(); @@ -246,34 +232,28 @@ public Filter getOrderingFilter() { List filters = new ArrayList<>(); filters.add(new PrefixFilter(Bytes.toBytes("yyy"))); filters.add(new PageFilter(MAX_PAGES)); - Filter filterMPONE = - new FilterList(FilterList.Operator.MUST_PASS_ONE, filters); + Filter filterMPONE = new FilterList(FilterList.Operator.MUST_PASS_ONE, filters); return filterMPONE; } public void orderingTest(Filter filterMPONE) throws Exception { - /* Filter must do all below steps: - *
        - *
      • {@link #reset()}
      • - *
      • {@link #filterAllRemaining()} -> true indicates scan is over, false, keep going on.
      • - *
      • {@link #filterRowKey(byte[],int,int)} -> true to drop this row, - * if false, we will also call
      • + /* + * Filter must do all below steps:
        • {@link #reset()}
        • {@link + * #filterAllRemaining()} -> true indicates scan is over, false, keep going on.
        • {@link + * #filterRowKey(byte[],int,int)} -> true to drop this row, if false, we will also call
        • *
        • {@link #filterCell(org.apache.hadoop.hbase.KeyValue)} -> true to drop this key/value
        • *
        • {@link #filterRow()} -> last chance to drop entire row based on the sequence of - * filterValue() calls. Eg: filter a row if it doesn't contain a specified column. - *
        • - *
        - */ + * filterValue() calls. Eg: filter a row if it doesn't contain a specified column.
      + */ filterMPONE.reset(); assertFalse(filterMPONE.filterAllRemaining()); /* We should be able to fill MAX_PAGES without incrementing page counter */ - byte [] rowkey = Bytes.toBytes("yyyyyyyy"); + byte[] rowkey = Bytes.toBytes("yyyyyyyy"); for (int i = 0; i < MAX_PAGES; i++) { assertFalse(filterMPONE.filterRowKey(KeyValueUtil.createFirstOnRow(rowkey))); - KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(i), - Bytes.toBytes(i)); - assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterCell(kv)); + KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(i), Bytes.toBytes(i)); + assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterCell(kv)); assertFalse(filterMPONE.filterRow()); } @@ -281,9 +261,8 @@ public void orderingTest(Filter filterMPONE) throws Exception { rowkey = Bytes.toBytes("xxxxxxx"); for (int i = 0; i < MAX_PAGES; i++) { assertFalse(filterMPONE.filterRowKey(KeyValueUtil.createFirstOnRow(rowkey))); - KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(i), - Bytes.toBytes(i)); - assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterCell(kv)); + KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(i), Bytes.toBytes(i)); + assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterCell(kv)); assertFalse(filterMPONE.filterRow()); } @@ -291,16 +270,15 @@ public void orderingTest(Filter filterMPONE) throws Exception { rowkey = Bytes.toBytes("yyy"); for (int i = 0; i < MAX_PAGES; i++) { assertFalse(filterMPONE.filterRowKey(KeyValueUtil.createFirstOnRow(rowkey))); - KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(i), - Bytes.toBytes(i)); - assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterCell(kv)); + KeyValue kv = new KeyValue(rowkey, rowkey, Bytes.toBytes(i), Bytes.toBytes(i)); + assertTrue(Filter.ReturnCode.INCLUDE == filterMPONE.filterCell(kv)); assertFalse(filterMPONE.filterRow()); } } /** - * When we do a "MUST_PASS_ONE" (a logical 'OR') of the above two filters - * we expect to get the same result as the 'prefix' only result. + * When we do a "MUST_PASS_ONE" (a logical 'OR') of the above two filters we expect to get the + * same result as the 'prefix' only result. * @throws Exception */ @Test @@ -332,8 +310,8 @@ public void testFilterListTwoFiltersMustPassOne() throws Exception { } /** - * When we do a "MUST_PASS_ONE" (a logical 'OR') of the two filters - * we expect to get the same result as the inclusive stop result. + * When we do a "MUST_PASS_ONE" (a logical 'OR') of the two filters we expect to get the same + * result as the inclusive stop result. * @throws Exception */ @Test @@ -379,8 +357,7 @@ public void testSerialization() throws Exception { List filters = new ArrayList<>(); filters.add(new PageFilter(MAX_PAGES)); filters.add(new WhileMatchFilter(new PrefixFilter(Bytes.toBytes("yyy")))); - Filter filterMPALL = - new FilterList(FilterList.Operator.MUST_PASS_ALL, filters); + Filter filterMPALL = new FilterList(FilterList.Operator.MUST_PASS_ALL, filters); // Decompose filterMPALL to bytes. byte[] buffer = filterMPALL.toByteArray(); @@ -412,8 +389,8 @@ public Filter.ReturnCode filterCell(final Cell v) { @Override public Filter.ReturnCode filterCell(final Cell v) { - Filter.ReturnCode returnCode = returnInclude ? Filter.ReturnCode.INCLUDE : - Filter.ReturnCode.SKIP; + Filter.ReturnCode returnCode = + returnInclude ? Filter.ReturnCode.INCLUDE : Filter.ReturnCode.SKIP; returnInclude = !returnInclude; return returnCode; } @@ -424,8 +401,8 @@ public Filter.ReturnCode filterCell(final Cell v) { @Override public Filter.ReturnCode filterCell(final Cell v) { - Filter.ReturnCode returnCode = returnIncludeOnly ? Filter.ReturnCode.INCLUDE : - Filter.ReturnCode.INCLUDE_AND_NEXT_COL; + Filter.ReturnCode returnCode = + returnIncludeOnly ? Filter.ReturnCode.INCLUDE : Filter.ReturnCode.INCLUDE_AND_NEXT_COL; returnIncludeOnly = !returnIncludeOnly; return returnCode; } @@ -455,12 +432,11 @@ public Filter.ReturnCode filterCell(final Cell v) { public void testHintPassThru() throws Exception { final KeyValue minKeyValue = new KeyValue(Bytes.toBytes(0L), null, null); - final KeyValue maxKeyValue = new KeyValue(Bytes.toBytes(Long.MAX_VALUE), - null, null); + final KeyValue maxKeyValue = new KeyValue(Bytes.toBytes(Long.MAX_VALUE), null, null); Filter filterNoHint = new FilterBase() { @Override - public byte [] toByteArray() { + public byte[] toByteArray() { return null; } @@ -482,7 +458,9 @@ public Cell getNextCellHint(Cell currentKV) { } @Override - public byte [] toByteArray() {return null;} + public byte[] toByteArray() { + return null; + } }; Filter filterMaxHint = new FilterBase() { @@ -497,7 +475,9 @@ public Cell getNextCellHint(Cell cell) { } @Override - public byte [] toByteArray() {return null;} + public byte[] toByteArray() { + return null; + } }; CellComparator comparator = CellComparator.getInstance(); @@ -505,16 +485,15 @@ public Cell getNextCellHint(Cell cell) { // Should take the min if given two hints FilterList filterList = new FilterList(Operator.MUST_PASS_ONE, - Arrays.asList(new Filter [] { filterMinHint, filterMaxHint } )); + Arrays.asList(new Filter[] { filterMinHint, filterMaxHint })); assertEquals(0, comparator.compare(filterList.getNextCellHint(null), minKeyValue)); // Should have no hint if any filter has no hint filterList = new FilterList(Operator.MUST_PASS_ONE, - Arrays.asList( - new Filter [] { filterMinHint, filterMaxHint, filterNoHint } )); + Arrays.asList(new Filter[] { filterMinHint, filterMaxHint, filterNoHint })); assertNull(filterList.getNextCellHint(null)); filterList = new FilterList(Operator.MUST_PASS_ONE, - Arrays.asList(new Filter [] { filterNoHint, filterMaxHint } )); + Arrays.asList(new Filter[] { filterNoHint, filterMaxHint })); assertNull(filterList.getNextCellHint(null)); // Should give max hint if its the only one @@ -526,12 +505,12 @@ public Cell getNextCellHint(Cell cell) { // Should take the first hint filterList = new FilterList(Operator.MUST_PASS_ALL, - Arrays.asList(new Filter [] { filterMinHint, filterMaxHint } )); + Arrays.asList(new Filter[] { filterMinHint, filterMaxHint })); filterList.filterCell(null); assertEquals(0, comparator.compare(filterList.getNextCellHint(null), minKeyValue)); filterList = new FilterList(Operator.MUST_PASS_ALL, - Arrays.asList(new Filter [] { filterMaxHint, filterMinHint } )); + Arrays.asList(new Filter[] { filterMaxHint, filterMinHint })); filterList.filterCell(null); assertEquals(0, comparator.compare(filterList.getNextCellHint(null), maxKeyValue)); @@ -551,31 +530,34 @@ public Cell getNextCellHint(Cell cell) { } /** - * Tests the behavior of transform() in a hierarchical filter. - * - * transform() only applies after a filterCell() whose return-code includes the KeyValue. - * Lazy evaluation of AND + * Tests the behavior of transform() in a hierarchical filter. transform() only applies after a + * filterCell() whose return-code includes the KeyValue. Lazy evaluation of AND */ @Test public void testTransformMPO() throws Exception { // Apply the following filter: - // (family=fam AND qualifier=qual1 AND KeyOnlyFilter) - // OR (family=fam AND qualifier=qual2) - final FilterList flist = new FilterList(Operator.MUST_PASS_ONE, Lists.newArrayList( - new FilterList(Operator.MUST_PASS_ALL, Lists.newArrayList( - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("fam"))), - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("qual1"))), - new KeyOnlyFilter())), - new FilterList(Operator.MUST_PASS_ALL, Lists.newArrayList( - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("fam"))), - new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("qual2"))))))); - - final KeyValue kvQual1 = new KeyValue( - Bytes.toBytes("row"), Bytes.toBytes("fam"), Bytes.toBytes("qual1"), Bytes.toBytes("value")); - final KeyValue kvQual2 = new KeyValue( - Bytes.toBytes("row"), Bytes.toBytes("fam"), Bytes.toBytes("qual2"), Bytes.toBytes("value")); - final KeyValue kvQual3 = new KeyValue( - Bytes.toBytes("row"), Bytes.toBytes("fam"), Bytes.toBytes("qual3"), Bytes.toBytes("value")); + // (family=fam AND qualifier=qual1 AND KeyOnlyFilter) + // OR (family=fam AND qualifier=qual2) + final FilterList flist = new FilterList(Operator.MUST_PASS_ONE, + Lists. newArrayList( + new FilterList(Operator.MUST_PASS_ALL, + Lists. newArrayList( + new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("fam"))), + new QualifierFilter(CompareOperator.EQUAL, + new BinaryComparator(Bytes.toBytes("qual1"))), + new KeyOnlyFilter())), + new FilterList(Operator.MUST_PASS_ALL, + Lists. newArrayList( + new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("fam"))), + new QualifierFilter(CompareOperator.EQUAL, + new BinaryComparator(Bytes.toBytes("qual2"))))))); + + final KeyValue kvQual1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("fam"), + Bytes.toBytes("qual1"), Bytes.toBytes("value")); + final KeyValue kvQual2 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("fam"), + Bytes.toBytes("qual2"), Bytes.toBytes("value")); + final KeyValue kvQual3 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("fam"), + Bytes.toBytes("qual3"), Bytes.toBytes("value")); // Value for fam:qual1 should be stripped: assertEquals(Filter.ReturnCode.INCLUDE, flist.filterCell(kvQual1)); @@ -653,13 +635,13 @@ public ReturnCode filterCell(final Cell v) throws IOException { @Override public boolean equals(Object obj) { - if(obj == null || !(obj instanceof MockFilter)){ + if (obj == null || !(obj instanceof MockFilter)) { return false; } - if(obj == this){ + if (obj == this) { return true; } - MockFilter f = (MockFilter)obj; + MockFilter f = (MockFilter) obj; return this.targetRetCode.equals(f.targetRetCode); } @@ -842,13 +824,13 @@ public Cell getNextCellHint(Cell currentCell) throws IOException { @Override public boolean equals(Object obj) { - if(obj == null || !(obj instanceof MockSeekHintFilter)){ + if (obj == null || !(obj instanceof MockSeekHintFilter)) { return false; } - if(obj == this){ + if (obj == this) { return true; } - MockSeekHintFilter f = (MockSeekHintFilter)obj; + MockSeekHintFilter f = (MockSeekHintFilter) obj; return this.returnCell.equals(f.returnCell); } @@ -1028,13 +1010,13 @@ public boolean getTransformed() { @Override public boolean equals(Object obj) { - if(!(obj instanceof TransformFilter)){ + if (!(obj instanceof TransformFilter)) { return false; } if (obj == this) { return true; } - TransformFilter f = (TransformFilter)obj; + TransformFilter f = (TransformFilter) obj; return this.targetRetCode.equals(f.targetRetCode); } @@ -1046,9 +1028,8 @@ public int hashCode() { @Test public void testTransformCell() throws IOException { - KeyValue kv = - new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("column1"), 1, - Bytes.toBytes("value")); + KeyValue kv = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("column1"), + 1, Bytes.toBytes("value")); // case MUST_PASS_ONE TransformFilter filter1 = new TransformFilter(ReturnCode.INCLUDE); @@ -1123,4 +1104,3 @@ public void testFilterListWithORWhenPassingCellMismatchPreviousRC() throws IOExc Assert.assertEquals(ReturnCode.SEEK_NEXT_USING_HINT, filterList.filterCell(kv1)); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOnMini.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOnMini.java index 5677d44f8abd..48e1f3466b28 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOnMini.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOnMini.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -80,10 +80,8 @@ public void testFiltersWithOR() throws Exception { Put put2 = new Put(Bytes.toBytes("0")); put2.addColumn(CF2, Bytes.toBytes("col_b"), Bytes.toBytes(0)); table.put(put2); - FamilyFilter filterCF1 = - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(CF1)); - FamilyFilter filterCF2 = - new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(CF2)); + FamilyFilter filterCF1 = new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(CF1)); + FamilyFilter filterCF2 = new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(CF2)); FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ONE); filterList.addFilter(filterCF1); filterList.addFilter(filterCF2); @@ -105,23 +103,12 @@ public void testColumnPrefixFilterConcatWithOR() throws Exception { byte[] cf1 = Bytes.toBytes("f1"); byte[] row = Bytes.toBytes("row"); byte[] value = Bytes.toBytes("value"); - String[] columns = new String[]{ - "1544768273917010001_lt", - "1544768273917010001_w_1", - "1544768723910010001_ca_1", - "1544768723910010001_lt", - "1544768723910010001_ut_1", - "1544768723910010001_w_5", - "1544769779710010001_lt", - "1544769779710010001_w_5", - "1544769883529010001_lt", - "1544769883529010001_w_5", - "1544769915805010001_lt", - "1544769915805010001_w_5", - "1544779883529010001_lt", - "1544770422942010001_lt", - "1544770422942010001_w_5" - }; + String[] columns = new String[] { "1544768273917010001_lt", "1544768273917010001_w_1", + "1544768723910010001_ca_1", "1544768723910010001_lt", "1544768723910010001_ut_1", + "1544768723910010001_w_5", "1544769779710010001_lt", "1544769779710010001_w_5", + "1544769883529010001_lt", "1544769883529010001_w_5", "1544769915805010001_lt", + "1544769915805010001_w_5", "1544779883529010001_lt", "1544770422942010001_lt", + "1544770422942010001_w_5" }; Table table = TEST_UTIL.createTable(tn, cf1); for (int i = 0; i < columns.length; i++) { Put put = new Put(row).addColumn(cf1, Bytes.toBytes(columns[i]), value); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java index 5fdc50276079..cddb006d77c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,8 +47,8 @@ import org.slf4j.LoggerFactory; /* - * This test is for the optimization added in HBASE-15243. - * FilterList with two MultiRowRangeFilter's is constructed using Operator.MUST_PASS_ONE. + * This test is for the optimization added in HBASE-15243. FilterList with two MultiRowRangeFilter's + * is constructed using Operator.MUST_PASS_ONE. */ @Category(MediumTests.class) public class TestFilterListOrOperatorWithBlkCnt { @@ -76,9 +76,8 @@ public class TestFilterListOrOperatorWithBlkCnt { public static void setUpBeforeClass() throws Exception { long blkSize = 4096; /* - * dfs block size is adjusted so that the specified number of rows would result in - * multiple blocks (8 for this test). - * Later in the test, assertion is made on the number of blocks read. + * dfs block size is adjusted so that the specified number of rows would result in multiple + * blocks (8 for this test). Later in the test, assertion is made on the number of blocks read. */ TEST_UTIL.getConfiguration().setLong("dfs.blocksize", blkSize); TEST_UTIL.getConfiguration().setLong("dfs.bytes-per-checksum", blkSize); @@ -155,10 +154,10 @@ private void generateRows(int numberOfRows, Table ht, byte[] family, byte[] qf, private List getScanResult(byte[] startRow, byte[] stopRow, Table ht) throws IOException { Scan scan = new Scan(); scan.readAllVersions(); - if(!Bytes.toString(startRow).isEmpty()) { + if (!Bytes.toString(startRow).isEmpty()) { scan.withStartRow(startRow); } - if(!Bytes.toString(stopRow).isEmpty()) { + if (!Bytes.toString(stopRow).isEmpty()) { scan.withStopRow(stopRow); } ResultScanner scanner = ht.getScanner(scan); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java index 9f391844285e..75765875c114 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -@Category({FilterTests.class, MediumTests.class}) +@Category({ FilterTests.class, MediumTests.class }) public class TestFilterSerialization { @ClassRule @@ -52,7 +52,7 @@ public void testColumnCountGetFilter() throws Exception { @Test public void testColumnPaginationFilter() throws Exception { - ColumnPaginationFilter columnPaginationFilter = new ColumnPaginationFilter(1,7); + ColumnPaginationFilter columnPaginationFilter = new ColumnPaginationFilter(1, 7); assertTrue(columnPaginationFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(columnPaginationFilter)))); } @@ -74,13 +74,13 @@ public void testColumnPrefixFilter() throws Exception { public void testColumnRangeFilter() throws Exception { // null columns ColumnRangeFilter columnRangeFilter = new ColumnRangeFilter(null, true, null, false); - assertTrue(columnRangeFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(columnRangeFilter)))); + assertTrue(columnRangeFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(columnRangeFilter)))); // non-null columns columnRangeFilter = new ColumnRangeFilter(Bytes.toBytes("a"), false, Bytes.toBytes("b"), true); - assertTrue(columnRangeFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(columnRangeFilter)))); + assertTrue(columnRangeFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(columnRangeFilter)))); } @Test @@ -92,8 +92,8 @@ public void testDependentColumnFilter() throws Exception { // non-null column qualifier/family dependentColumnFilter = new DependentColumnFilter(Bytes.toBytes("family"), - Bytes.toBytes("qual"), true, CompareOperator.GREATER_OR_EQUAL, - new BitComparator(Bytes.toBytes("bitComparator"), BitComparator.BitwiseOp.OR)); + Bytes.toBytes("qual"), true, CompareOperator.GREATER_OR_EQUAL, + new BitComparator(Bytes.toBytes("bitComparator"), BitComparator.BitwiseOp.OR)); assertTrue(dependentColumnFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(dependentColumnFilter)))); } @@ -101,34 +101,32 @@ public void testDependentColumnFilter() throws Exception { @Test public void testFamilyFilter() throws Exception { FamilyFilter familyFilter = new FamilyFilter(CompareOperator.EQUAL, - new BinaryPrefixComparator(Bytes.toBytes("testValueOne"))); - assertTrue(familyFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(familyFilter)))); + new BinaryPrefixComparator(Bytes.toBytes("testValueOne"))); + assertTrue(familyFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(familyFilter)))); } @Test public void testFilterList() throws Exception { // empty filter list FilterList filterList = new FilterList(new LinkedList<>()); - assertTrue(filterList.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(filterList)))); + assertTrue(filterList + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(filterList)))); // non-empty filter list LinkedList list = new LinkedList<>(); list.add(new ColumnCountGetFilter(1)); - list.add(new RowFilter(CompareOperator.EQUAL, - new SubstringComparator("testFilterList"))); - assertTrue(filterList.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(filterList)))); + list.add(new RowFilter(CompareOperator.EQUAL, new SubstringComparator("testFilterList"))); + assertTrue(filterList + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(filterList)))); } @Test public void testFilterWrapper() throws Exception { - FilterWrapper filterWrapper = - new FilterWrapper( + FilterWrapper filterWrapper = new FilterWrapper( new ColumnRangeFilter(Bytes.toBytes("e"), false, Bytes.toBytes("f"), true)); - assertTrue(filterWrapper.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(filterWrapper)))); + assertTrue(filterWrapper + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(filterWrapper)))); } @Test @@ -141,11 +139,11 @@ public void testFirstKeyOnlyFilter() throws Exception { @Test public void testFuzzyRowFilter() throws Exception { LinkedList> fuzzyList = new LinkedList<>(); - fuzzyList.add(new Pair<>(Bytes.toBytes("999"),new byte[] {0, 0, 1})); - fuzzyList.add(new Pair<>(Bytes.toBytes("abcd"),new byte[] {1, 0, 1, 1})); + fuzzyList.add(new Pair<>(Bytes.toBytes("999"), new byte[] { 0, 0, 1 })); + fuzzyList.add(new Pair<>(Bytes.toBytes("abcd"), new byte[] { 1, 0, 1, 1 })); FuzzyRowFilter fuzzyRowFilter = new FuzzyRowFilter(fuzzyList); - assertTrue(fuzzyRowFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(fuzzyRowFilter)))); + assertTrue(fuzzyRowFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(fuzzyRowFilter)))); } @Test @@ -165,21 +163,21 @@ public void testInclusiveStopFilter() throws Exception { public void testKeyOnlyFilter() throws Exception { // KeyOnlyFilter with lenAsVal KeyOnlyFilter keyOnlyFilter = new KeyOnlyFilter(true); - assertTrue(keyOnlyFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(keyOnlyFilter)))); + assertTrue(keyOnlyFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(keyOnlyFilter)))); // KeyOnlyFilter without lenAsVal keyOnlyFilter = new KeyOnlyFilter(); - assertTrue(keyOnlyFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(keyOnlyFilter)))); + assertTrue(keyOnlyFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(keyOnlyFilter)))); } @Test public void testMultipleColumnPrefixFilter() throws Exception { // empty array - byte [][] prefixes = null; + byte[][] prefixes = null; MultipleColumnPrefixFilter multipleColumnPrefixFilter = - new MultipleColumnPrefixFilter(prefixes); + new MultipleColumnPrefixFilter(prefixes); assertTrue(multipleColumnPrefixFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(multipleColumnPrefixFilter)))); @@ -195,59 +193,58 @@ public void testMultipleColumnPrefixFilter() throws Exception { @Test public void testPageFilter() throws Exception { PageFilter pageFilter = new PageFilter(6); - assertTrue(pageFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(pageFilter)))); + assertTrue(pageFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(pageFilter)))); } @Test public void testPrefixFilter() throws Exception { // null prefix PrefixFilter prefixFilter = new PrefixFilter(null); - assertTrue(prefixFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(prefixFilter)))); + assertTrue(prefixFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(prefixFilter)))); // non-null prefix prefixFilter = new PrefixFilter(Bytes.toBytes("abc")); - assertTrue(prefixFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(prefixFilter)))); + assertTrue(prefixFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(prefixFilter)))); } @Test public void testQualifierFilter() throws Exception { - QualifierFilter qualifierFilter = new QualifierFilter(CompareOperator.EQUAL, - new NullComparator()); - assertTrue(qualifierFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(qualifierFilter)))); + QualifierFilter qualifierFilter = + new QualifierFilter(CompareOperator.EQUAL, new NullComparator()); + assertTrue(qualifierFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(qualifierFilter)))); } @Test public void testRandomRowFilter() throws Exception { - RandomRowFilter randomRowFilter = new RandomRowFilter((float)0.1); - assertTrue(randomRowFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(randomRowFilter)))); + RandomRowFilter randomRowFilter = new RandomRowFilter((float) 0.1); + assertTrue(randomRowFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(randomRowFilter)))); } @Test public void testRowFilter() throws Exception { - RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL, - new SubstringComparator("testRowFilter")); - assertTrue(rowFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(rowFilter)))); + RowFilter rowFilter = + new RowFilter(CompareOperator.EQUAL, new SubstringComparator("testRowFilter")); + assertTrue( + rowFilter.areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(rowFilter)))); } @Test public void testSingleColumnValueExcludeFilter() throws Exception { // null family/column SingleColumnValueExcludeFilter SingleColumnValueExcludeFilter singleColumnValueExcludeFilter = - new SingleColumnValueExcludeFilter(null, null, - CompareOperator.GREATER_OR_EQUAL, Bytes.toBytes("value")); + new SingleColumnValueExcludeFilter(null, null, CompareOperator.GREATER_OR_EQUAL, + Bytes.toBytes("value")); assertTrue(singleColumnValueExcludeFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(singleColumnValueExcludeFilter)))); // non-null family/column SingleColumnValueFilter - singleColumnValueExcludeFilter = - new SingleColumnValueExcludeFilter(Bytes.toBytes("fam"), Bytes.toBytes("qual"), - CompareOperator.LESS_OR_EQUAL, new NullComparator(), false, false); + singleColumnValueExcludeFilter = new SingleColumnValueExcludeFilter(Bytes.toBytes("fam"), + Bytes.toBytes("qual"), CompareOperator.LESS_OR_EQUAL, new NullComparator(), false, false); assertTrue(singleColumnValueExcludeFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(singleColumnValueExcludeFilter)))); } @@ -256,15 +253,13 @@ public void testSingleColumnValueExcludeFilter() throws Exception { public void testSingleColumnValueFilter() throws Exception { // null family/column SingleColumnValueFilter SingleColumnValueFilter singleColumnValueFilter = - new SingleColumnValueFilter(null, null, - CompareOperator.LESS, Bytes.toBytes("value")); + new SingleColumnValueFilter(null, null, CompareOperator.LESS, Bytes.toBytes("value")); assertTrue(singleColumnValueFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(singleColumnValueFilter)))); // non-null family/column SingleColumnValueFilter - singleColumnValueFilter = - new SingleColumnValueFilter(Bytes.toBytes("family"), Bytes.toBytes("qualifier"), - CompareOperator.NOT_EQUAL, new NullComparator(), true, true); + singleColumnValueFilter = new SingleColumnValueFilter(Bytes.toBytes("family"), + Bytes.toBytes("qualifier"), CompareOperator.NOT_EQUAL, new NullComparator(), true, true); assertTrue(singleColumnValueFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(singleColumnValueFilter)))); } @@ -272,41 +267,40 @@ public void testSingleColumnValueFilter() throws Exception { @Test public void testSkipFilter() throws Exception { SkipFilter skipFilter = new SkipFilter(new PageFilter(6)); - assertTrue(skipFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(skipFilter)))); + assertTrue(skipFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(skipFilter)))); } @Test public void testTimestampsFilter() throws Exception { // Empty timestamp list TimestampsFilter timestampsFilter = new TimestampsFilter(new LinkedList<>()); - assertTrue(timestampsFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(timestampsFilter)))); + assertTrue(timestampsFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(timestampsFilter)))); // Non-empty timestamp list LinkedList list = new LinkedList<>(); list.add(EnvironmentEdgeManager.currentTime()); list.add(EnvironmentEdgeManager.currentTime()); timestampsFilter = new TimestampsFilter(list); - assertTrue(timestampsFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(timestampsFilter)))); + assertTrue(timestampsFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(timestampsFilter)))); } @Test public void testValueFilter() throws Exception { - ValueFilter valueFilter = new ValueFilter(CompareOperator.NO_OP, - new BinaryComparator(Bytes.toBytes("testValueOne"))); - assertTrue(valueFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(valueFilter)))); + ValueFilter valueFilter = + new ValueFilter(CompareOperator.NO_OP, new BinaryComparator(Bytes.toBytes("testValueOne"))); + assertTrue(valueFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(valueFilter)))); } @Test public void testWhileMatchFilter() throws Exception { - WhileMatchFilter whileMatchFilter = - new WhileMatchFilter( + WhileMatchFilter whileMatchFilter = new WhileMatchFilter( new ColumnRangeFilter(Bytes.toBytes("c"), false, Bytes.toBytes("d"), true)); - assertTrue(whileMatchFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(whileMatchFilter)))); + assertTrue(whileMatchFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(whileMatchFilter)))); } @Test @@ -316,18 +310,16 @@ public void testMultiRowRangeFilter() throws Exception { ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false)); - MultiRowRangeFilter multiRowRangeFilter = - new MultiRowRangeFilter(ranges); + MultiRowRangeFilter multiRowRangeFilter = new MultiRowRangeFilter(ranges); assertTrue(multiRowRangeFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(multiRowRangeFilter)))); } @Test public void testColumnValueFilter() throws Exception { - ColumnValueFilter columnValueFilter = - new ColumnValueFilter(Bytes.toBytes("family"), Bytes.toBytes("qualifier"), - CompareOperator.EQUAL, Bytes.toBytes("value")); - assertTrue(columnValueFilter.areSerializedFieldsEqual( - ProtobufUtil.toFilter(ProtobufUtil.toFilter(columnValueFilter)))); + ColumnValueFilter columnValueFilter = new ColumnValueFilter(Bytes.toBytes("family"), + Bytes.toBytes("qualifier"), CompareOperator.EQUAL, Bytes.toBytes("value")); + assertTrue(columnValueFilter + .areSerializedFieldsEqual(ProtobufUtil.toFilter(ProtobufUtil.toFilter(columnValueFilter)))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java index 84cdd6873099..9b57b456769d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,15 +46,14 @@ /** * Test if Filter is incompatible with scan-limits */ -@Category({FilterTests.class, LargeTests.class}) +@Category({ FilterTests.class, LargeTests.class }) public class TestFilterWithScanLimits extends FilterTestingCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestFilterWithScanLimits.class); - private static final Logger LOG = LoggerFactory - .getLogger(TestFilterWithScanLimits.class); + private static final Logger LOG = LoggerFactory.getLogger(TestFilterWithScanLimits.class); private static final TableName tableName = TableName.valueOf("scanWithLimit"); private static final String columnFamily = "f1"; @@ -66,9 +65,8 @@ public void testScanWithLimit() { Scan scan = new Scan(); // set batch number as 2, which means each Result should contain 2 KVs at most scan.setBatch(2); - SingleColumnValueFilter filter = new SingleColumnValueFilter( - Bytes.toBytes(columnFamily), Bytes.toBytes("c5"), - CompareOperator .EQUAL, new SubstringComparator("2_c5")); + SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes.toBytes(columnFamily), + Bytes.toBytes("c5"), CompareOperator.EQUAL, new SubstringComparator("2_c5")); // add filter after batch defined scan.setFilter(filter); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java index db753c04ee6b..9140273f7a9e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java @@ -62,7 +62,7 @@ * Test if the FilterWrapper retains the same semantics defined in the * {@link org.apache.hadoop.hbase.filter.Filter} */ -@Category({FilterTests.class, MediumTests.class}) +@Category({ FilterTests.class, MediumTests.class }) public class TestFilterWrapper { @ClassRule @@ -85,9 +85,8 @@ public void testFilterWrapper() { Scan scan = new Scan(); List fs = new ArrayList<>(); - DependentColumnFilter f1 = new DependentColumnFilter(Bytes.toBytes("f1"), - Bytes.toBytes("c5"), true, CompareOperator.EQUAL, - new SubstringComparator("c5")); + DependentColumnFilter f1 = new DependentColumnFilter(Bytes.toBytes("f1"), Bytes.toBytes("c5"), + true, CompareOperator.EQUAL, new SubstringComparator("c5")); PageFilter f2 = new PageFilter(2); fs.add(f1); fs.add(f2); @@ -104,7 +103,7 @@ public void testFilterWrapper() { LOG.debug(kv_number + ". kv: " + kv); kv_number++; assertEquals("Returned row is not correct", Bytes.toString(CellUtil.cloneRow(kv)), - "row" + ( row_number + 1 )); + "row" + (row_number + 1)); } } @@ -135,10 +134,9 @@ private static void prepareData() { Put put = new Put(Bytes.toBytes("row" + i)); for (int j = 1; j < 6; j++) { long timestamp = j; - if (i != 1) - timestamp = i; + if (i != 1) timestamp = i; put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("c" + j), timestamp, - Bytes.toBytes(i + "_c" + j)); + Bytes.toBytes(i + "_c" + j)); } puts.add(put); } @@ -154,7 +152,7 @@ private static void createTable() { assertNotNull("HBaseAdmin is not initialized successfully.", admin); if (admin != null) { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(name) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("f1"))).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("f1"))).build(); try { admin.createTable(tableDescriptor); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFiltersWithBinaryComponentComparator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFiltersWithBinaryComponentComparator.java index 5dec5961fc83..0ef053a0aeb8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFiltersWithBinaryComponentComparator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFiltersWithBinaryComponentComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; - import org.apache.commons.codec.binary.Hex; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -86,33 +85,28 @@ public static void tearDownAfterClass() throws Exception { @Test public void testRowFilterWithBinaryComponentComparator() throws IOException { - //SELECT * from table where a=1 and b > 10 and b < 20 and c > 90 and c < 100 and d=1 + // SELECT * from table where a=1 and b > 10 and b < 20 and c > 90 and c < 100 and d=1 tableName = TableName.valueOf(name.getMethodName()); Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE); generateRows(ht, family, qf); FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL); setRowFilters(filterList); Scan scan = createScan(filterList); - List result = getResults(ht,scan); - for(Cell cell: result){ + List result = getResults(ht, scan); + for (Cell cell : result) { byte[] key = CellUtil.cloneRow(cell); - int a = Bytes.readAsInt(key,aOffset,4); - int b = Bytes.readAsInt(key,bOffset,4); - int c = Bytes.readAsInt(key,cOffset,4); - int d = Bytes.readAsInt(key,dOffset,4); - assertTrue(a == 1 && - b > 10 && - b < 20 && - c > 90 && - c < 100 && - d == 1); + int a = Bytes.readAsInt(key, aOffset, 4); + int b = Bytes.readAsInt(key, bOffset, 4); + int c = Bytes.readAsInt(key, cOffset, 4); + int d = Bytes.readAsInt(key, dOffset, 4); + assertTrue(a == 1 && b > 10 && b < 20 && c > 90 && c < 100 && d == 1); } ht.close(); } @Test public void testValueFilterWithBinaryComponentComparator() throws IOException { - //SELECT * from table where value has 'y' at position 1 + // SELECT * from table where value has 'y' at position 1 tableName = TableName.valueOf(name.getMethodName()); Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE); generateRows(ht, family, qf); @@ -120,8 +114,8 @@ public void testValueFilterWithBinaryComponentComparator() throws IOException { setValueFilters(filterList); Scan scan = new Scan(); scan.setFilter(filterList); - List result = getResults(ht,scan); - for(Cell cell: result){ + List result = getResults(ht, scan); + for (Cell cell : result) { byte[] value = CellUtil.cloneValue(cell); assertTrue(Bytes.toString(value).charAt(1) == 'y'); } @@ -130,8 +124,8 @@ public void testValueFilterWithBinaryComponentComparator() throws IOException { @Test public void testRowAndValueFilterWithBinaryComponentComparator() throws IOException { - //SELECT * from table where a=1 and b > 10 and b < 20 and c > 90 and c < 100 and d=1 - //and value has 'y' at position 1" + // SELECT * from table where a=1 and b > 10 and b < 20 and c > 90 and c < 100 and d=1 + // and value has 'y' at position 1" tableName = TableName.valueOf(name.getMethodName()); Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE); generateRows(ht, family, qf); @@ -140,19 +134,14 @@ public void testRowAndValueFilterWithBinaryComponentComparator() throws IOExcept setValueFilters(filterList); Scan scan = new Scan(); scan.setFilter(filterList); - List result = getResults(ht,scan); - for(Cell cell: result){ + List result = getResults(ht, scan); + for (Cell cell : result) { byte[] key = CellUtil.cloneRow(cell); - int a = Bytes.readAsInt(key,aOffset,4); - int b = Bytes.readAsInt(key,bOffset,4); - int c = Bytes.readAsInt(key,cOffset,4); - int d = Bytes.readAsInt(key,dOffset,4); - assertTrue(a == 1 && - b > 10 && - b < 20 && - c > 90 && - c < 100 && - d == 1); + int a = Bytes.readAsInt(key, aOffset, 4); + int b = Bytes.readAsInt(key, bOffset, 4); + int c = Bytes.readAsInt(key, cOffset, 4); + int d = Bytes.readAsInt(key, dOffset, 4); + assertTrue(a == 1 && b > 10 && b < 20 && c > 90 && c < 100 && d == 1); byte[] value = CellUtil.cloneValue(cell); assertTrue(Bytes.toString(value).charAt(1) == 'y'); } @@ -160,40 +149,25 @@ public void testRowAndValueFilterWithBinaryComponentComparator() throws IOExcept } /** - * Since we are trying to emulate - * SQL: SELECT * from table where a = 1 and b > 10 and b < 20 and - * c > 90 and c < 100 and d = 1 - * We are generating rows with: - * a = 1, b >=9 and b < 22, c >= 89 and c < 102, and d = 1 - * At the end the table will look something like this: - * ------------ - * a| b| c|d| - * ------------ - * 1| 9| 89|1|family:qf|xyz| - * ----------- - * 1| 9| 90|1|family:qf|abc| - * ----------- - * 1| 9| 91|1|family:qf|xyz| - * ------------------------- - * . - * ------------------------- - * . - * ------------------------- - * 1|21|101|1|family:qf|xyz| + * Since we are trying to emulate SQL: SELECT * from table where a = 1 and b > 10 and b < 20 and c + * > 90 and c < 100 and d = 1 We are generating rows with: a = 1, b >=9 and b < 22, c >= 89 and c + * < 102, and d = 1 At the end the table will look something like this: ------------ a| b| c|d| + * ------------ 1| 9| 89|1|family:qf|xyz| ----------- 1| 9| 90|1|family:qf|abc| ----------- 1| 9| + * 91|1|family:qf|xyz| ------------------------- . ------------------------- . + * ------------------------- 1|21|101|1|family:qf|xyz| */ - private void generateRows(Table ht, byte[] family, byte[] qf) - throws IOException { - for(int a = 1; a < 2; ++a) { - for(int b = 9; b < 22; ++b) { - for(int c = 89; c < 102; ++c) { - for(int d = 1; d < 2 ; ++d) { + private void generateRows(Table ht, byte[] family, byte[] qf) throws IOException { + for (int a = 1; a < 2; ++a) { + for (int b = 9; b < 22; ++b) { + for (int c = 89; c < 102; ++c) { + for (int d = 1; d < 2; ++d) { byte[] key = new byte[16]; - Bytes.putInt(key,0,a); - Bytes.putInt(key,4,b); - Bytes.putInt(key,8,c); - Bytes.putInt(key,12,d); + Bytes.putInt(key, 0, a); + Bytes.putInt(key, 4, b); + Bytes.putInt(key, 8, c); + Bytes.putInt(key, 12, d); Put row = new Put(key); - if (c%2==0) { + if (c % 2 == 0) { row.addColumn(family, qf, Bytes.toBytes("abc")); if (LOG.isInfoEnabled()) { LOG.info("added row: {} with value 'abc'", Arrays.toString(Hex.encodeHex(key))); @@ -212,70 +186,68 @@ private void generateRows(Table ht, byte[] family, byte[] qf) } private void setRowFilters(FilterList filterList) { - //offset for b as it is second component of "a+b+c+d" - //'a' is at offset 0 + // offset for b as it is second component of "a+b+c+d" + // 'a' is at offset 0 int bOffset = 4; - byte[] b10 = Bytes.toBytes(10); //tests b > 10 - Filter b10Filter = new RowFilter(CompareOperator.GREATER, - new BinaryComponentComparator(b10,bOffset)); + byte[] b10 = Bytes.toBytes(10); // tests b > 10 + Filter b10Filter = + new RowFilter(CompareOperator.GREATER, new BinaryComponentComparator(b10, bOffset)); filterList.addFilter(b10Filter); - byte[] b20 = Bytes.toBytes(20); //tests b < 20 - Filter b20Filter = new RowFilter(CompareOperator.LESS, - new BinaryComponentComparator(b20,bOffset)); + byte[] b20 = Bytes.toBytes(20); // tests b < 20 + Filter b20Filter = + new RowFilter(CompareOperator.LESS, new BinaryComponentComparator(b20, bOffset)); filterList.addFilter(b20Filter); - //offset for c as it is third component of "a+b+c+d" + // offset for c as it is third component of "a+b+c+d" int cOffset = 8; - byte[] c90 = Bytes.toBytes(90); //tests c > 90 - Filter c90Filter = new RowFilter(CompareOperator.GREATER, - new BinaryComponentComparator(c90,cOffset)); + byte[] c90 = Bytes.toBytes(90); // tests c > 90 + Filter c90Filter = + new RowFilter(CompareOperator.GREATER, new BinaryComponentComparator(c90, cOffset)); filterList.addFilter(c90Filter); - byte[] c100 = Bytes.toBytes(100); //tests c < 100 - Filter c100Filter = new RowFilter(CompareOperator.LESS, - new BinaryComponentComparator(c100,cOffset)); + byte[] c100 = Bytes.toBytes(100); // tests c < 100 + Filter c100Filter = + new RowFilter(CompareOperator.LESS, new BinaryComponentComparator(c100, cOffset)); filterList.addFilter(c100Filter); - //offset for d as it is fourth component of "a+b+c+d" + // offset for d as it is fourth component of "a+b+c+d" int dOffset = 12; - byte[] d1 = Bytes.toBytes(1); //tests d == 1 - Filter dFilter = new RowFilter(CompareOperator.EQUAL, - new BinaryComponentComparator(d1,dOffset)); + byte[] d1 = Bytes.toBytes(1); // tests d == 1 + Filter dFilter = + new RowFilter(CompareOperator.EQUAL, new BinaryComponentComparator(d1, dOffset)); filterList.addFilter(dFilter); } /** - * We have rows with either "abc" or "xyz". - * We want values which have 'y' at second position - * of the string. - * As a result only values with "xyz" shall be returned - */ + * We have rows with either "abc" or "xyz". We want values which have 'y' at second position of + * the string. As a result only values with "xyz" shall be returned + */ private void setValueFilters(FilterList filterList) { int offset = 1; byte[] y = Bytes.toBytes("y"); - Filter yFilter = new ValueFilter(CompareOperator.EQUAL, - new BinaryComponentComparator(y,offset)); + Filter yFilter = + new ValueFilter(CompareOperator.EQUAL, new BinaryComponentComparator(y, offset)); filterList.addFilter(yFilter); } private Scan createScan(FilterList list) { - //build start and end key for scan - byte[] startKey = new byte[16]; //key size with four ints - Bytes.putInt(startKey,aOffset,1); //a=1, takes care of a = 1 - Bytes.putInt(startKey,bOffset,11); //b=11, takes care of b > 10 - Bytes.putInt(startKey,cOffset,91); //c=91, - Bytes.putInt(startKey,dOffset,1); //d=1, + // build start and end key for scan + byte[] startKey = new byte[16]; // key size with four ints + Bytes.putInt(startKey, aOffset, 1); // a=1, takes care of a = 1 + Bytes.putInt(startKey, bOffset, 11); // b=11, takes care of b > 10 + Bytes.putInt(startKey, cOffset, 91); // c=91, + Bytes.putInt(startKey, dOffset, 1); // d=1, byte[] endKey = new byte[16]; - Bytes.putInt(endKey,aOffset,1); //a=1, takes care of a = 1 - Bytes.putInt(endKey,bOffset,20); //b=20, takes care of b < 20 - Bytes.putInt(endKey,cOffset,100); //c=100, - Bytes.putInt(endKey,dOffset,1); //d=1, + Bytes.putInt(endKey, aOffset, 1); // a=1, takes care of a = 1 + Bytes.putInt(endKey, bOffset, 20); // b=20, takes care of b < 20 + Bytes.putInt(endKey, cOffset, 100); // c=100, + Bytes.putInt(endKey, dOffset, 1); // d=1, - //setup scan + // setup scan Scan scan = new Scan().withStartRow(startKey).withStopRow(endKey); scan.setFilter(list); return scan; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java index 8560b09b3d10..d389b94ef8ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ /** */ -@Category({FilterTests.class, MediumTests.class}) +@Category({ FilterTests.class, MediumTests.class }) public class TestFuzzyRowAndColumnRangeFilter { @ClassRule @@ -103,8 +103,8 @@ public void tearDown() throws Exception { @Test public void Test() throws Exception { String cf = "f"; - Table ht = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), - Bytes.toBytes(cf), Integer.MAX_VALUE); + Table ht = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), Bytes.toBytes(cf), + Integer.MAX_VALUE); // 10 byte row key - (2 bytes 4 bytes 4 bytes) // 4 byte qualifier @@ -128,8 +128,8 @@ public void Test() throws Exception { p.setDurability(Durability.SKIP_WAL); p.addColumn(Bytes.toBytes(cf), cq, Bytes.toBytes(c)); ht.put(p); - LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: " - + Bytes.toStringBinary(cq)); + LOG.info( + "Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: " + Bytes.toStringBinary(cq)); } } } @@ -150,18 +150,18 @@ private void runTest(Table hTable, int cqStart, int expectedSize) throws IOExcep buf.clear(); buf.putShort((short) 2); for (int i = 0; i < 4; i++) - buf.put((byte)63); - buf.putInt((short)1); + buf.put((byte) 63); + buf.putInt((short) 1); - byte[] mask = new byte[] {0 , 0, 1, 1, 1, 1, 0, 0, 0, 0}; + byte[] mask = new byte[] { 0, 0, 1, 1, 1, 1, 0, 0, 0, 0 }; Pair pair = new Pair<>(fuzzyKey, mask); FuzzyRowFilter fuzzyRowFilter = new FuzzyRowFilter(Lists.newArrayList(pair)); - ColumnRangeFilter columnRangeFilter = new ColumnRangeFilter(Bytes.toBytes(cqStart), true - , Bytes.toBytes(4), true); - //regular test + ColumnRangeFilter columnRangeFilter = + new ColumnRangeFilter(Bytes.toBytes(cqStart), true, Bytes.toBytes(4), true); + // regular test runScanner(hTable, expectedSize, fuzzyRowFilter, columnRangeFilter); - //reverse filter order test + // reverse filter order test runScanner(hTable, expectedSize, columnRangeFilter, fuzzyRowFilter); } @@ -179,7 +179,7 @@ private void runScanner(Table hTable, int expectedSize, Filter... filters) throw while ((result = scanner.next()) != null) { for (Cell kv : result.listCells()) { LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: " - + Bytes.toStringBinary(CellUtil.cloneQualifier(kv))); + + Bytes.toStringBinary(CellUtil.cloneQualifier(kv))); results.add(kv); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilter.java index 07548a846e04..8e94fc66b7c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestFuzzyRowFilter { @ClassRule @@ -39,439 +39,285 @@ public class TestFuzzyRowFilter { public void testSatisfiesNoUnsafeForward() { Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, - FuzzyRowFilter.satisfiesNoUnsafe(false, - new byte[]{1, (byte) -128, 1, 0, 1}, - 0, 5, - new byte[]{1, 0, 1}, - new byte[]{0, 1, 0})); + FuzzyRowFilter.satisfiesNoUnsafe(false, new byte[] { 1, (byte) -128, 1, 0, 1 }, 0, 5, + new byte[] { 1, 0, 1 }, new byte[] { 0, 1, 0 })); Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfiesNoUnsafe(false, - new byte[]{1, (byte) -128, 2, 0, 1}, - 0, 5, - new byte[]{1, 0, 1}, - new byte[]{0, 1, 0})); + FuzzyRowFilter.satisfiesNoUnsafe(false, new byte[] { 1, (byte) -128, 2, 0, 1 }, 0, 5, + new byte[] { 1, 0, 1 }, new byte[] { 0, 1, 0 })); - - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, - FuzzyRowFilter.satisfiesNoUnsafe(false, - new byte[]{1, 2, 1, 3, 3}, - 0, 5, - new byte[]{1, 2, 0, 3}, - new byte[]{0, 0, 1, 0})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, FuzzyRowFilter.satisfiesNoUnsafe(false, + new byte[] { 1, 2, 1, 3, 3 }, 0, 5, new byte[] { 1, 2, 0, 3 }, new byte[] { 0, 0, 1, 0 })); Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfiesNoUnsafe(false, - new byte[]{1, 1, 1, 3, 0}, // row to check - 0, 5, - new byte[]{1, 2, 0, 3}, // fuzzy row - new byte[]{0, 0, 1, 0})); // mask + FuzzyRowFilter.satisfiesNoUnsafe(false, new byte[] { 1, 1, 1, 3, 0 }, // row to check + 0, 5, new byte[] { 1, 2, 0, 3 }, // fuzzy row + new byte[] { 0, 0, 1, 0 })); // mask Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfiesNoUnsafe(false, - new byte[]{1, 1, 1, 3, 0}, - 0, 5, - new byte[]{1, (byte) 245, 0, 3}, - new byte[]{0, 0, 1, 0})); + FuzzyRowFilter.satisfiesNoUnsafe(false, new byte[] { 1, 1, 1, 3, 0 }, 0, 5, + new byte[] { 1, (byte) 245, 0, 3 }, new byte[] { 0, 0, 1, 0 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfiesNoUnsafe(false, - new byte[]{1, 2, 1, 0, 1}, - 0, 5, - new byte[]{0, 1, 2}, - new byte[]{1, 0, 0})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, FuzzyRowFilter.satisfiesNoUnsafe( + false, new byte[] { 1, 2, 1, 0, 1 }, 0, 5, new byte[] { 0, 1, 2 }, new byte[] { 1, 0, 0 })); } @Test public void testSatisfiesForward() { - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, - FuzzyRowFilter.satisfies(false, - new byte[]{1, (byte) -128, 1, 0, 1}, - new byte[]{1, 0, 1}, - new byte[]{-1, 0, -1})); - - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfies(false, - new byte[]{1, (byte) -128, 2, 0, 1}, - new byte[]{1, 0, 1}, - new byte[]{-1, 0, -1})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, FuzzyRowFilter.satisfies(false, + new byte[] { 1, (byte) -128, 1, 0, 1 }, new byte[] { 1, 0, 1 }, new byte[] { -1, 0, -1 })); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, FuzzyRowFilter.satisfies(false, + new byte[] { 1, (byte) -128, 2, 0, 1 }, new byte[] { 1, 0, 1 }, new byte[] { -1, 0, -1 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, - FuzzyRowFilter.satisfies(false, - new byte[]{1, 2, 1, 3, 3}, - new byte[]{1, 2, 0, 3}, - new byte[]{-1, -1, 0, -1})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, FuzzyRowFilter.satisfies(false, + new byte[] { 1, 2, 1, 3, 3 }, new byte[] { 1, 2, 0, 3 }, new byte[] { -1, -1, 0, -1 })); Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfies(false, - new byte[]{1, 1, 1, 3, 0}, // row to check - new byte[]{1, 2, 0, 3}, // fuzzy row - new byte[]{-1, -1, 0, -1})); // mask + FuzzyRowFilter.satisfies(false, new byte[] { 1, 1, 1, 3, 0 }, // row to check + new byte[] { 1, 2, 0, 3 }, // fuzzy row + new byte[] { -1, -1, 0, -1 })); // mask Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfies(false, - new byte[]{1, 1, 1, 3, 0}, - new byte[]{1, (byte) 245, 0, 3}, - new byte[]{-1, -1, 0, -1})); + FuzzyRowFilter.satisfies(false, new byte[] { 1, 1, 1, 3, 0 }, + new byte[] { 1, (byte) 245, 0, 3 }, new byte[] { -1, -1, 0, -1 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfies(false, - new byte[]{1, 2, 1, 0, 1}, - new byte[]{0, 1, 2}, - new byte[]{0, -1, -1})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, FuzzyRowFilter.satisfies(false, + new byte[] { 1, 2, 1, 0, 1 }, new byte[] { 0, 1, 2 }, new byte[] { 0, -1, -1 })); } @Test public void testSatisfiesReverse() { - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, - FuzzyRowFilter.satisfies(true, - new byte[]{1, (byte) -128, 1, 0, 1}, - new byte[]{1, 0, 1}, - new byte[]{-1, 0, -1})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, FuzzyRowFilter.satisfies(true, + new byte[] { 1, (byte) -128, 1, 0, 1 }, new byte[] { 1, 0, 1 }, new byte[] { -1, 0, -1 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfies(true, - new byte[]{1, (byte) -128, 2, 0, 1}, - new byte[]{1, 0, 1}, - new byte[]{-1, 0, -1})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, FuzzyRowFilter.satisfies(true, + new byte[] { 1, (byte) -128, 2, 0, 1 }, new byte[] { 1, 0, 1 }, new byte[] { -1, 0, -1 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfies(true, - new byte[]{2, 3, 1, 1, 1}, - new byte[]{1, 0, 1}, - new byte[]{-1, 0, -1})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, FuzzyRowFilter.satisfies(true, + new byte[] { 2, 3, 1, 1, 1 }, new byte[] { 1, 0, 1 }, new byte[] { -1, 0, -1 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, - FuzzyRowFilter.satisfies(true, - new byte[]{1, 2, 1, 3, 3}, - new byte[]{1, 2, 0, 3}, - new byte[]{-1, -1, 0, -1})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, FuzzyRowFilter.satisfies(true, + new byte[] { 1, 2, 1, 3, 3 }, new byte[] { 1, 2, 0, 3 }, new byte[] { -1, -1, 0, -1 })); Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfies(true, - new byte[]{1, (byte) 245, 1, 3, 0}, - new byte[]{1, 1, 0, 3}, - new byte[]{-1, -1, 0, -1})); + FuzzyRowFilter.satisfies(true, new byte[] { 1, (byte) 245, 1, 3, 0 }, + new byte[] { 1, 1, 0, 3 }, new byte[] { -1, -1, 0, -1 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfies(true, - new byte[]{1, 3, 1, 3, 0}, - new byte[]{1, 2, 0, 3}, - new byte[]{-1, -1, 0, -1})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, FuzzyRowFilter.satisfies(true, + new byte[] { 1, 3, 1, 3, 0 }, new byte[] { 1, 2, 0, 3 }, new byte[] { -1, -1, 0, -1 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfies(true, - new byte[]{2, 1, 1, 1, 0}, - new byte[]{1, 2, 0, 3}, - new byte[]{-1, -1, 0, -1})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, FuzzyRowFilter.satisfies(true, + new byte[] { 2, 1, 1, 1, 0 }, new byte[] { 1, 2, 0, 3 }, new byte[] { -1, -1, 0, -1 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfies(true, - new byte[]{1, 2, 1, 0, 1}, - new byte[]{0, 1, 2}, - new byte[]{0, -1, -1})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, FuzzyRowFilter.satisfies(true, + new byte[] { 1, 2, 1, 0, 1 }, new byte[] { 0, 1, 2 }, new byte[] { 0, -1, -1 })); } @Test public void testSatisfiesNoUnsafeReverse() { Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, - FuzzyRowFilter.satisfiesNoUnsafe(true, - new byte[]{1, (byte) -128, 1, 0, 1}, - 0, 5, - new byte[]{1, 0, 1}, - new byte[]{0, 1, 0})); + FuzzyRowFilter.satisfiesNoUnsafe(true, new byte[] { 1, (byte) -128, 1, 0, 1 }, 0, 5, + new byte[] { 1, 0, 1 }, new byte[] { 0, 1, 0 })); Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfiesNoUnsafe(true, - new byte[]{1, (byte) -128, 2, 0, 1}, - 0, 5, - new byte[]{1, 0, 1}, - new byte[]{0, 1, 0})); + FuzzyRowFilter.satisfiesNoUnsafe(true, new byte[] { 1, (byte) -128, 2, 0, 1 }, 0, 5, + new byte[] { 1, 0, 1 }, new byte[] { 0, 1, 0 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfiesNoUnsafe(true, - new byte[]{2, 3, 1, 1, 1}, - 0, 5, - new byte[]{1, 0, 1}, - new byte[]{0, 1, 0})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, FuzzyRowFilter.satisfiesNoUnsafe( + true, new byte[] { 2, 3, 1, 1, 1 }, 0, 5, new byte[] { 1, 0, 1 }, new byte[] { 0, 1, 0 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, - FuzzyRowFilter.satisfiesNoUnsafe(true, - new byte[]{1, 2, 1, 3, 3}, - 0, 5, - new byte[]{1, 2, 0, 3}, - new byte[]{0, 0, 1, 0})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.YES, FuzzyRowFilter.satisfiesNoUnsafe(true, + new byte[] { 1, 2, 1, 3, 3 }, 0, 5, new byte[] { 1, 2, 0, 3 }, new byte[] { 0, 0, 1, 0 })); Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfiesNoUnsafe(true, - new byte[]{1, (byte) 245, 1, 3, 0}, - 0, 5, - new byte[]{1, 1, 0, 3}, - new byte[]{0, 0, 1, 0})); + FuzzyRowFilter.satisfiesNoUnsafe(true, new byte[] { 1, (byte) 245, 1, 3, 0 }, 0, 5, + new byte[] { 1, 1, 0, 3 }, new byte[] { 0, 0, 1, 0 })); Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfiesNoUnsafe(true, - new byte[]{1, 3, 1, 3, 0}, - 0, 5, - new byte[]{1, 2, 0, 3}, - new byte[]{0, 0, 1, 0})); + FuzzyRowFilter.satisfiesNoUnsafe(true, new byte[] { 1, 3, 1, 3, 0 }, 0, 5, + new byte[] { 1, 2, 0, 3 }, new byte[] { 0, 0, 1, 0 })); Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfiesNoUnsafe(true, - new byte[]{2, 1, 1, 1, 0}, - 0, 5, - new byte[]{1, 2, 0, 3}, - new byte[]{0, 0, 1, 0})); + FuzzyRowFilter.satisfiesNoUnsafe(true, new byte[] { 2, 1, 1, 1, 0 }, 0, 5, + new byte[] { 1, 2, 0, 3 }, new byte[] { 0, 0, 1, 0 })); - Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, - FuzzyRowFilter.satisfiesNoUnsafe(true, - new byte[]{1, 2, 1, 0, 1}, - 0, 5, - new byte[]{0, 1, 2}, - new byte[]{1, 0, 0})); + Assert.assertEquals(FuzzyRowFilter.SatisfiesCode.NEXT_EXISTS, FuzzyRowFilter.satisfiesNoUnsafe( + true, new byte[] { 1, 2, 1, 0, 1 }, 0, 5, new byte[] { 0, 1, 2 }, new byte[] { 1, 0, 0 })); } + @Test public void testGetNextForFuzzyRuleForward() { - assertNext(false, - new byte[]{0, 1, 2}, // fuzzy row - new byte[]{0, -1, -1}, // mask - new byte[]{1, 2, 1, 0, 1}, // current - new byte[]{2, 1, 2}); // expected next - - assertNext(false, - new byte[]{0, 1, 2}, // fuzzy row - new byte[]{0, -1, -1}, // mask - new byte[]{1, 1, 2, 0, 1}, // current - new byte[]{1, 1, 2, 0, 2}); // expected next - - assertNext(false, - new byte[]{0, 1, 0, 2, 0}, // fuzzy row - new byte[]{0, -1, 0, -1, 0}, // mask - new byte[]{1, 0, 2, 0, 1}, // current - new byte[]{1, 1, 0, 2}); // expected next - - assertNext(false, - new byte[]{1, 0, 1}, - new byte[]{-1, 0, -1}, - new byte[]{1, (byte) 128, 2, 0, 1}, - new byte[]{1, (byte) 129, 1}); - - assertNext(false, - new byte[]{0, 1, 0, 1}, - new byte[]{0, -1, 0, -1}, - new byte[]{5, 1, 0, 1}, - new byte[]{5, 1, 1, 1}); - - assertNext(false, - new byte[]{0, 1, 0, 1}, - new byte[]{0, -1, 0, -1}, - new byte[]{5, 1, 0, 1, 1}, - new byte[]{5, 1, 0, 1, 2}); - - assertNext(false, - new byte[]{0, 1, 0, 0}, // fuzzy row - new byte[]{0, -1, 0, 0}, // mask - new byte[]{5, 1, (byte) 255, 1}, // current - new byte[]{5, 1, (byte) 255, 2}); // expected next - - assertNext(false, - new byte[]{0, 1, 0, 1}, // fuzzy row - new byte[]{0, -1, 0, -1}, // mask - new byte[]{5, 1, (byte) 255, 1}, // current - new byte[]{6, 1, 0, 1}); // expected next - - assertNext(false, - new byte[]{0, 1, 0, 1}, // fuzzy row - new byte[]{0, -1, 0, -1}, // mask - new byte[]{5, 1, (byte) 255, 0}, // current - new byte[]{5, 1, (byte) 255, 1}); // expected next - - assertNext(false, - new byte[]{5, 1, 1, 0}, - new byte[]{-1, -1, 0, 0}, - new byte[]{5, 1, (byte) 255, 1}, - new byte[]{5, 1, (byte) 255, 2}); - - assertNext(false, - new byte[]{1, 1, 1, 1}, - new byte[]{-1, -1, 0, 0}, - new byte[]{1, 1, 2, 2}, - new byte[]{1, 1, 2, 3}); - - assertNext(false, - new byte[]{1, 1, 1, 1}, - new byte[]{-1, -1, 0, 0}, - new byte[]{1, 1, 3, 2}, - new byte[]{1, 1, 3, 3}); - - assertNext(false, - new byte[]{1, 1, 1, 1}, - new byte[]{0, 0, 0, 0}, - new byte[]{1, 1, 2, 3}, - new byte[]{1, 1, 2, 4}); - - assertNext(false, - new byte[]{1, 1, 1, 1}, - new byte[]{0, 0, 0, 0}, - new byte[]{1, 1, 3, 2}, - new byte[]{1, 1, 3, 3}); - - assertNext(false, - new byte[]{1, 1, 0, 0}, - new byte[]{-1, -1, 0, 0}, - new byte[]{0, 1, 3, 2}, - new byte[]{1, 1}); + assertNext(false, new byte[] { 0, 1, 2 }, // fuzzy row + new byte[] { 0, -1, -1 }, // mask + new byte[] { 1, 2, 1, 0, 1 }, // current + new byte[] { 2, 1, 2 }); // expected next + + assertNext(false, new byte[] { 0, 1, 2 }, // fuzzy row + new byte[] { 0, -1, -1 }, // mask + new byte[] { 1, 1, 2, 0, 1 }, // current + new byte[] { 1, 1, 2, 0, 2 }); // expected next + + assertNext(false, new byte[] { 0, 1, 0, 2, 0 }, // fuzzy row + new byte[] { 0, -1, 0, -1, 0 }, // mask + new byte[] { 1, 0, 2, 0, 1 }, // current + new byte[] { 1, 1, 0, 2 }); // expected next + + assertNext(false, new byte[] { 1, 0, 1 }, new byte[] { -1, 0, -1 }, + new byte[] { 1, (byte) 128, 2, 0, 1 }, new byte[] { 1, (byte) 129, 1 }); + + assertNext(false, new byte[] { 0, 1, 0, 1 }, new byte[] { 0, -1, 0, -1 }, + new byte[] { 5, 1, 0, 1 }, new byte[] { 5, 1, 1, 1 }); + + assertNext(false, new byte[] { 0, 1, 0, 1 }, new byte[] { 0, -1, 0, -1 }, + new byte[] { 5, 1, 0, 1, 1 }, new byte[] { 5, 1, 0, 1, 2 }); + + assertNext(false, new byte[] { 0, 1, 0, 0 }, // fuzzy row + new byte[] { 0, -1, 0, 0 }, // mask + new byte[] { 5, 1, (byte) 255, 1 }, // current + new byte[] { 5, 1, (byte) 255, 2 }); // expected next + + assertNext(false, new byte[] { 0, 1, 0, 1 }, // fuzzy row + new byte[] { 0, -1, 0, -1 }, // mask + new byte[] { 5, 1, (byte) 255, 1 }, // current + new byte[] { 6, 1, 0, 1 }); // expected next + + assertNext(false, new byte[] { 0, 1, 0, 1 }, // fuzzy row + new byte[] { 0, -1, 0, -1 }, // mask + new byte[] { 5, 1, (byte) 255, 0 }, // current + new byte[] { 5, 1, (byte) 255, 1 }); // expected next + + assertNext(false, new byte[] { 5, 1, 1, 0 }, new byte[] { -1, -1, 0, 0 }, + new byte[] { 5, 1, (byte) 255, 1 }, new byte[] { 5, 1, (byte) 255, 2 }); + + assertNext(false, new byte[] { 1, 1, 1, 1 }, new byte[] { -1, -1, 0, 0 }, + new byte[] { 1, 1, 2, 2 }, new byte[] { 1, 1, 2, 3 }); + + assertNext(false, new byte[] { 1, 1, 1, 1 }, new byte[] { -1, -1, 0, 0 }, + new byte[] { 1, 1, 3, 2 }, new byte[] { 1, 1, 3, 3 }); + + assertNext(false, new byte[] { 1, 1, 1, 1 }, new byte[] { 0, 0, 0, 0 }, + new byte[] { 1, 1, 2, 3 }, new byte[] { 1, 1, 2, 4 }); + + assertNext(false, new byte[] { 1, 1, 1, 1 }, new byte[] { 0, 0, 0, 0 }, + new byte[] { 1, 1, 3, 2 }, new byte[] { 1, 1, 3, 3 }); + + assertNext(false, new byte[] { 1, 1, 0, 0 }, new byte[] { -1, -1, 0, 0 }, + new byte[] { 0, 1, 3, 2 }, new byte[] { 1, 1 }); // No next for this one - Assert.assertNull(FuzzyRowFilter.getNextForFuzzyRule( - new byte[]{2, 3, 1, 1, 1}, // row to check - new byte[]{1, 0, 1}, // fuzzy row - new byte[]{-1, 0, -1})); // mask - Assert.assertNull(FuzzyRowFilter.getNextForFuzzyRule( - new byte[]{1, (byte) 245, 1, 3, 0}, - new byte[]{1, 1, 0, 3}, - new byte[]{-1, -1, 0, -1})); - Assert.assertNull(FuzzyRowFilter.getNextForFuzzyRule( - new byte[]{1, 3, 1, 3, 0}, - new byte[]{1, 2, 0, 3}, - new byte[]{-1, -1, 0, -1})); - Assert.assertNull(FuzzyRowFilter.getNextForFuzzyRule( - new byte[]{2, 1, 1, 1, 0}, - new byte[]{1, 2, 0, 3}, - new byte[]{-1, -1, 0, -1})); + Assert.assertNull(FuzzyRowFilter.getNextForFuzzyRule(new byte[] { 2, 3, 1, 1, 1 }, // row to + // check + new byte[] { 1, 0, 1 }, // fuzzy row + new byte[] { -1, 0, -1 })); // mask + Assert.assertNull(FuzzyRowFilter.getNextForFuzzyRule(new byte[] { 1, (byte) 245, 1, 3, 0 }, + new byte[] { 1, 1, 0, 3 }, new byte[] { -1, -1, 0, -1 })); + Assert.assertNull(FuzzyRowFilter.getNextForFuzzyRule(new byte[] { 1, 3, 1, 3, 0 }, + new byte[] { 1, 2, 0, 3 }, new byte[] { -1, -1, 0, -1 })); + Assert.assertNull(FuzzyRowFilter.getNextForFuzzyRule(new byte[] { 2, 1, 1, 1, 0 }, + new byte[] { 1, 2, 0, 3 }, new byte[] { -1, -1, 0, -1 })); } @Test public void testGetNextForFuzzyRuleReverse() { - assertNext(true, - new byte[]{0, 1, 2}, // fuzzy row - new byte[]{0, -1, -1}, // mask - new byte[]{1, 2, 1, 0, 1}, // current + assertNext(true, new byte[] { 0, 1, 2 }, // fuzzy row + new byte[] { 0, -1, -1 }, // mask + new byte[] { 1, 2, 1, 0, 1 }, // current // TODO: should be {1, 1, 3} ? - new byte[]{1, 1, 2, (byte) 0xFF, (byte) 0xFF}); // expected next + new byte[] { 1, 1, 2, (byte) 0xFF, (byte) 0xFF }); // expected next - assertNext(true, - new byte[]{0, 1, 0, 2, 0}, // fuzzy row - new byte[]{0, -1, 0, -1, 0}, // mask - new byte[]{1, 2, 1, 3, 1}, // current + assertNext(true, new byte[] { 0, 1, 0, 2, 0 }, // fuzzy row + new byte[] { 0, -1, 0, -1, 0 }, // mask + new byte[] { 1, 2, 1, 3, 1 }, // current // TODO: should be {1, 1, 1, 3} ? - new byte[]{1, 1, 0, 2, 0}); // expected next + new byte[] { 1, 1, 0, 2, 0 }); // expected next - assertNext(true, - new byte[]{1, 0, 1}, - new byte[]{-1, 0, -1}, - new byte[]{1, (byte) 128, 2, 0, 1}, + assertNext(true, new byte[] { 1, 0, 1 }, new byte[] { -1, 0, -1 }, + new byte[] { 1, (byte) 128, 2, 0, 1 }, // TODO: should be {1, (byte) 128, 2} ? - new byte[]{1, (byte) 128, 1, (byte) 0xFF, (byte) 0xFF}); + new byte[] { 1, (byte) 128, 1, (byte) 0xFF, (byte) 0xFF }); - assertNext(true, - new byte[]{0, 1, 0, 1}, - new byte[]{0, -1, 0, -1}, - new byte[]{5, 1, 0, 2, 1}, + assertNext(true, new byte[] { 0, 1, 0, 1 }, new byte[] { 0, -1, 0, -1 }, + new byte[] { 5, 1, 0, 2, 1 }, // TODO: should be {5, 1, 0, 2} ? - new byte[]{5, 1, 0, 1, (byte) 0xFF}); + new byte[] { 5, 1, 0, 1, (byte) 0xFF }); - assertNext(true, - new byte[]{0, 1, 0, 0}, // fuzzy row - new byte[]{0, -1, 0, 0}, // mask - new byte[]{5, 1, (byte) 255, 1}, // current - new byte[]{5, 1, (byte) 255, 0}); // expected next + assertNext(true, new byte[] { 0, 1, 0, 0 }, // fuzzy row + new byte[] { 0, -1, 0, 0 }, // mask + new byte[] { 5, 1, (byte) 255, 1 }, // current + new byte[] { 5, 1, (byte) 255, 0 }); // expected next - assertNext(true, - new byte[]{0, 1, 0, 1}, // fuzzy row - new byte[]{0, -1, 0, -1}, // mask - new byte[]{5, 1, 0, 1}, // current - new byte[]{4, 1, (byte) 255, 1}); // expected next + assertNext(true, new byte[] { 0, 1, 0, 1 }, // fuzzy row + new byte[] { 0, -1, 0, -1 }, // mask + new byte[] { 5, 1, 0, 1 }, // current + new byte[] { 4, 1, (byte) 255, 1 }); // expected next - assertNext(true, - new byte[]{0, 1, 0, 1}, // fuzzy row - new byte[]{0, -1, 0, -1}, // mask - new byte[]{5, 1, (byte) 255, 0}, // current - new byte[]{5, 1, (byte) 254, 1}); // expected next + assertNext(true, new byte[] { 0, 1, 0, 1 }, // fuzzy row + new byte[] { 0, -1, 0, -1 }, // mask + new byte[] { 5, 1, (byte) 255, 0 }, // current + new byte[] { 5, 1, (byte) 254, 1 }); // expected next - assertNext(true, - new byte[]{1, 1, 0, 0}, - new byte[]{-1, -1, 0, 0}, - new byte[]{2, 1, 3, 2}, + assertNext(true, new byte[] { 1, 1, 0, 0 }, new byte[] { -1, -1, 0, 0 }, + new byte[] { 2, 1, 3, 2 }, // TODO: should be {1, 0} ? - new byte[]{1, 1, 0, 0}); + new byte[] { 1, 1, 0, 0 }); - assertNext(true, - new byte[]{1, 0, 1}, // fuzzy row - new byte[]{-1, 0, -1}, // mask - new byte[]{2, 3, 1, 1, 1}, // row to check + assertNext(true, new byte[] { 1, 0, 1 }, // fuzzy row + new byte[] { -1, 0, -1 }, // mask + new byte[] { 2, 3, 1, 1, 1 }, // row to check // TODO: should be {1, (byte) 0xFF, 2} ? - new byte[]{1, 0, 1, (byte) 0xFF, (byte) 0xFF}); + new byte[] { 1, 0, 1, (byte) 0xFF, (byte) 0xFF }); - assertNext(true, - new byte[]{1, 1, 0, 3}, - new byte[]{-1, -1, 0, -1}, - new byte[]{1, (byte) 245, 1, 3, 0}, + assertNext(true, new byte[] { 1, 1, 0, 3 }, new byte[] { -1, -1, 0, -1 }, + new byte[] { 1, (byte) 245, 1, 3, 0 }, // TODO: should be {1, 1, (byte) 255, 4} ? - new byte[]{1, 1, 0, 3, (byte) 0xFF}); + new byte[] { 1, 1, 0, 3, (byte) 0xFF }); - assertNext(true, - new byte[]{1, 2, 0, 3}, - new byte[]{-1, -1, 0, -1}, - new byte[]{1, 3, 1, 3, 0}, + assertNext(true, new byte[] { 1, 2, 0, 3 }, new byte[] { -1, -1, 0, -1 }, + new byte[] { 1, 3, 1, 3, 0 }, // TODO: should be 1, 2, (byte) 255, 4 ? - new byte[]{1, 2, 0, 3, (byte) 0xFF}); + new byte[] { 1, 2, 0, 3, (byte) 0xFF }); - assertNext(true, - new byte[]{1, 2, 0, 3}, - new byte[]{-1, -1, 0, -1}, - new byte[]{2, 1, 1, 1, 0}, + assertNext(true, new byte[] { 1, 2, 0, 3 }, new byte[] { -1, -1, 0, -1 }, + new byte[] { 2, 1, 1, 1, 0 }, // TODO: should be {1, 2, (byte) 255, 4} ? - new byte[]{1, 2, 0, 3, (byte) 0xFF}); + new byte[] { 1, 2, 0, 3, (byte) 0xFF }); assertNext(true, // TODO: should be null? - new byte[]{1, 0, 1}, - new byte[]{-1, 0, -1}, - new byte[]{1, (byte) 128, 2}, - new byte[]{1, (byte) 128, 1}); + new byte[] { 1, 0, 1 }, new byte[] { -1, 0, -1 }, new byte[] { 1, (byte) 128, 2 }, + new byte[] { 1, (byte) 128, 1 }); assertNext(true, // TODO: should be null? - new byte[]{0, 1, 0, 1}, - new byte[]{0, -1, 0, -1}, - new byte[]{5, 1, 0, 2}, - new byte[]{5, 1, 0, 1}); + new byte[] { 0, 1, 0, 1 }, new byte[] { 0, -1, 0, -1 }, new byte[] { 5, 1, 0, 2 }, + new byte[] { 5, 1, 0, 1 }); assertNext(true, // TODO: should be null? - new byte[]{5, 1, 1, 0}, - new byte[]{-1, -1, 0, 0}, - new byte[]{5, 1, (byte) 0xFF, 1}, - new byte[]{5, 1, (byte) 0xFF, 0}); + new byte[] { 5, 1, 1, 0 }, new byte[] { -1, -1, 0, 0 }, new byte[] { 5, 1, (byte) 0xFF, 1 }, + new byte[] { 5, 1, (byte) 0xFF, 0 }); assertNext(true, // TODO: should be null? - new byte[]{1, 1, 1, 1}, - new byte[]{-1, -1, 0, 0}, - new byte[]{1, 1, 2, 2}, - new byte[]{1, 1, 2, 1}); + new byte[] { 1, 1, 1, 1 }, new byte[] { -1, -1, 0, 0 }, new byte[] { 1, 1, 2, 2 }, + new byte[] { 1, 1, 2, 1 }); assertNext(true, // TODO: should be null? - new byte[]{1, 1, 1, 1}, - new byte[]{0, 0, 0, 0}, - new byte[]{1, 1, 2, 3}, - new byte[]{1, 1, 2, 2}); - - Assert.assertNull(FuzzyRowFilter.getNextForFuzzyRule(true, - new byte[]{1, 1, 1, 3, 0}, - new byte[]{1, 2, 0, 3}, - new byte[]{-1, -1, 0, -1})); + new byte[] { 1, 1, 1, 1 }, new byte[] { 0, 0, 0, 0 }, new byte[] { 1, 1, 2, 3 }, + new byte[] { 1, 1, 2, 2 }); + + Assert.assertNull(FuzzyRowFilter.getNextForFuzzyRule(true, new byte[] { 1, 1, 1, 3, 0 }, + new byte[] { 1, 2, 0, 3 }, new byte[] { -1, -1, 0, -1 })); } private static void assertNext(boolean reverse, byte[] fuzzyRow, byte[] mask, byte[] current, byte[] expected) { KeyValue kv = KeyValueUtil.createFirstOnRow(current); byte[] nextForFuzzyRule = FuzzyRowFilter.getNextForFuzzyRule(reverse, kv.getRowArray(), - kv.getRowOffset(), kv.getRowLength(), fuzzyRow, mask); + kv.getRowOffset(), kv.getRowLength(), fuzzyRow, mask); Assert.assertEquals(Bytes.toStringBinary(expected), Bytes.toStringBinary(nextForFuzzyRule)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java index da7dd8b3c166..cca148961007 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -127,8 +127,8 @@ public void testAllFixedBits() throws IOException { String cf = "f"; String cq = "q"; - Table ht = - TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), Bytes.toBytes(cf), Integer.MAX_VALUE); + Table ht = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), Bytes.toBytes(cf), + Integer.MAX_VALUE); // Load data String[] rows = new String[] { "\\x9C\\x00\\x044\\x00\\x00\\x00\\x00", "\\x9C\\x00\\x044\\x01\\x00\\x00\\x00", "\\x9C\\x00\\x044\\x00\\x01\\x00\\x00", @@ -171,26 +171,21 @@ public void testAllFixedBits() throws IOException { } @Test - public void testHBASE14782() throws IOException - { + public void testHBASE14782() throws IOException { String cf = "f"; String cq = "q"; - Table ht = - TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), Bytes.toBytes(cf), Integer.MAX_VALUE); + Table ht = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), Bytes.toBytes(cf), + Integer.MAX_VALUE); // Load data - String[] rows = new String[] { - "\\x9C\\x00\\x044\\x00\\x00\\x00\\x00", - "\\x9C\\x00\\x044\\x01\\x00\\x00\\x00", - "\\x9C\\x00\\x044\\x00\\x01\\x00\\x00", - "\\x9C\\x00\\x044\\x00\\x00\\x01\\x00", - "\\x9C\\x00\\x044\\x00\\x01\\x00\\x01", - "\\x9B\\x00\\x044e\\xBB\\xB2\\xBB", - }; + String[] rows = new String[] { "\\x9C\\x00\\x044\\x00\\x00\\x00\\x00", + "\\x9C\\x00\\x044\\x01\\x00\\x00\\x00", "\\x9C\\x00\\x044\\x00\\x01\\x00\\x00", + "\\x9C\\x00\\x044\\x00\\x00\\x01\\x00", "\\x9C\\x00\\x044\\x00\\x01\\x00\\x01", + "\\x9B\\x00\\x044e\\xBB\\xB2\\xBB", }; String badRow = "\\x9C\\x00\\x03\\xE9e\\xBB{X\\x1Fwts\\x1F\\x15vRX"; - for(int i=0; i < rows.length; i++){ + for (int i = 0; i < rows.length; i++) { Put p = new Put(Bytes.toBytesBinary(rows[i])); p.addColumn(Bytes.toBytes(cf), Bytes.toBytes(cq), Bytes.toBytes("value")); ht.put(p); @@ -202,9 +197,9 @@ public void testHBASE14782() throws IOException TEST_UTIL.flush(); - List> data = new ArrayList<>(); + List> data = new ArrayList<>(); byte[] fuzzyKey = Bytes.toBytesBinary("\\x00\\x00\\x044"); - byte[] mask = new byte[] { 1,0,0,0}; + byte[] mask = new byte[] { 1, 0, 0, 0 }; data.add(new Pair<>(fuzzyKey, mask)); FuzzyRowFilter filter = new FuzzyRowFilter(data); @@ -213,7 +208,7 @@ public void testHBASE14782() throws IOException ResultScanner scanner = ht.getScanner(scan); int total = 0; - while(scanner.next() != null){ + while (scanner.next() != null) { total++; } assertEquals(rows.length, total); @@ -362,8 +357,8 @@ private void runScanner(Table hTable, int expectedSize, Filter filter) throws IO @Test public void testFilterList() throws Exception { String cf = "f"; - Table ht = - TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), Bytes.toBytes(cf), Integer.MAX_VALUE); + Table ht = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), Bytes.toBytes(cf), + Integer.MAX_VALUE); // 10 byte row key - (2 bytes 4 bytes 4 bytes) // 4 byte qualifier @@ -388,8 +383,8 @@ public void testFilterList() throws Exception { p.setDurability(Durability.SKIP_WAL); p.addColumn(Bytes.toBytes(cf), cq, Bytes.toBytes(c)); ht.put(p); - LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: " - + Bytes.toStringBinary(cq)); + LOG.info( + "Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: " + Bytes.toStringBinary(cq)); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java index d8c8f2739c61..d0bb0e32e386 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInclusiveStopFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,16 +33,16 @@ /** * Tests the inclusive stop row filter */ -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestInclusiveStopFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestInclusiveStopFilter.class); - private final byte [] STOP_ROW = Bytes.toBytes("stop_row"); - private final byte [] GOOD_ROW = Bytes.toBytes("good_row"); - private final byte [] PAST_STOP_ROW = Bytes.toBytes("zzzzzz"); + private final byte[] STOP_ROW = Bytes.toBytes("stop_row"); + private final byte[] GOOD_ROW = Bytes.toBytes("good_row"); + private final byte[] PAST_STOP_ROW = Bytes.toBytes("zzzzzz"); Filter mainFilter; @@ -89,4 +89,3 @@ private void stopRowTests(Filter filter) throws Exception { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java index f91332cdd167..29038842b9e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java @@ -46,18 +46,17 @@ import org.junit.experimental.categories.Category; /** - * Test the invocation logic of the filters. A filter must be invoked only for - * the columns that are requested for. + * Test the invocation logic of the filters. A filter must be invoked only for the columns that are + * requested for. */ -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestInvocationRecordFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestInvocationRecordFilter.class); - private static final byte[] TABLE_NAME_BYTES = Bytes - .toBytes("invocationrecord"); + private static final byte[] TABLE_NAME_BYTES = Bytes.toBytes("invocationrecord"); private static final byte[] FAMILY_NAME_BYTES = Bytes.toBytes("mycf"); private static final byte[] ROW_BYTES = Bytes.toBytes("row"); @@ -70,7 +69,7 @@ public class TestInvocationRecordFilter { @Before public void setUp() throws Exception { TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE_NAME_BYTES)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME_BYTES)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME_BYTES)).build(); RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); this.region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd); @@ -79,7 +78,7 @@ public void setUp() throws Exception { for (int i = 0; i < 10; i += 2) { // puts 0, 2, 4, 6 and 8 put.addColumn(FAMILY_NAME_BYTES, Bytes.toBytes(QUALIFIER_PREFIX + i), (long) i, - Bytes.toBytes(VALUE_PREFIX + i)); + Bytes.toBytes(VALUE_PREFIX + i)); } this.region.put(put); this.region.flush(true); @@ -92,50 +91,48 @@ public void testFilterInvocation() throws Exception { selectQualifiers.add(-1); verifyInvocationResults(selectQualifiers.toArray(new Integer[selectQualifiers.size()]), - expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); + expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); selectQualifiers.clear(); selectQualifiers.add(0); expectedQualifiers.add(0); verifyInvocationResults(selectQualifiers.toArray(new Integer[selectQualifiers.size()]), - expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); + expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); selectQualifiers.add(3); verifyInvocationResults(selectQualifiers.toArray(new Integer[selectQualifiers.size()]), - expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); + expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); selectQualifiers.add(4); expectedQualifiers.add(4); verifyInvocationResults(selectQualifiers.toArray(new Integer[selectQualifiers.size()]), - expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); + expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); selectQualifiers.add(5); verifyInvocationResults(selectQualifiers.toArray(new Integer[selectQualifiers.size()]), - expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); + expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); selectQualifiers.add(8); expectedQualifiers.add(8); verifyInvocationResults(selectQualifiers.toArray(new Integer[selectQualifiers.size()]), - expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); + expectedQualifiers.toArray(new Integer[expectedQualifiers.size()])); } - public void verifyInvocationResults(Integer[] selectQualifiers, - Integer[] expectedQualifiers) throws Exception { + public void verifyInvocationResults(Integer[] selectQualifiers, Integer[] expectedQualifiers) + throws Exception { Get get = new Get(ROW_BYTES); for (int i = 0; i < selectQualifiers.length; i++) { - get.addColumn(FAMILY_NAME_BYTES, - Bytes.toBytes(QUALIFIER_PREFIX + selectQualifiers[i])); + get.addColumn(FAMILY_NAME_BYTES, Bytes.toBytes(QUALIFIER_PREFIX + selectQualifiers[i])); } get.setFilter(new InvocationRecordFilter()); List expectedValues = new ArrayList<>(); for (int i = 0; i < expectedQualifiers.length; i++) { - expectedValues.add(new KeyValue(ROW_BYTES, FAMILY_NAME_BYTES, Bytes - .toBytes(QUALIFIER_PREFIX + expectedQualifiers[i]), - expectedQualifiers[i], Bytes.toBytes(VALUE_PREFIX - + expectedQualifiers[i]))); + expectedValues.add(new KeyValue(ROW_BYTES, FAMILY_NAME_BYTES, + Bytes.toBytes(QUALIFIER_PREFIX + expectedQualifiers[i]), expectedQualifiers[i], + Bytes.toBytes(VALUE_PREFIX + expectedQualifiers[i]))); } Scan scan = new Scan(get); @@ -147,15 +144,15 @@ public void verifyInvocationResults(Integer[] selectQualifiers, temp.clear(); } actualValues.addAll(temp); - Assert.assertTrue("Actual values " + actualValues - + " differ from the expected values:" + expectedValues, - expectedValues.equals(actualValues)); + Assert.assertTrue( + "Actual values " + actualValues + " differ from the expected values:" + expectedValues, + expectedValues.equals(actualValues)); } @After public void tearDown() throws Exception { - WAL wal = ((HRegion)region).getWAL(); - ((HRegion)region).close(); + WAL wal = ((HRegion) region).getWAL(); + ((HRegion) region).close(); wal.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java index f765437feb18..da584c2579af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java @@ -90,82 +90,77 @@ public void testRowKeyPrefixWithEmptyPrefix() throws IOException { MultiRowRangeFilter filter = new MultiRowRangeFilter(rowKeyPrefixes); List actualRanges = filter.getRowRanges(); List expectedRanges = new ArrayList<>(); - expectedRanges.add( - new RowRange(HConstants.EMPTY_START_ROW, true, HConstants.EMPTY_END_ROW, false) - ); + expectedRanges + .add(new RowRange(HConstants.EMPTY_START_ROW, true, HConstants.EMPTY_END_ROW, false)); assertRangesEqual(expectedRanges, actualRanges); } @Test public void testRowKeyPrefixWithLastIncrementablePrefix() throws IOException { - byte[] prefix = {(byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFE}; + byte[] prefix = { (byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFE }; byte[][] rowKeyPrefixes = new byte[1][]; rowKeyPrefixes[0] = prefix; MultiRowRangeFilter filter = new MultiRowRangeFilter(rowKeyPrefixes); List actualRanges = filter.getRowRanges(); List expectedRanges = new ArrayList<>(); - final byte[] expectedStop = {(byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFF}; - expectedRanges.add(new RowRange(prefix, true, expectedStop , false)); + final byte[] expectedStop = { (byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFF }; + expectedRanges.add(new RowRange(prefix, true, expectedStop, false)); assertRangesEqual(expectedRanges, actualRanges); } @Test public void testRowKeyPrefixWithoutLastIncrementablePrefix() throws IOException { - byte[] prefix = {(byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFF}; + byte[] prefix = { (byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFF }; byte[][] rowKeyPrefixes = new byte[1][]; rowKeyPrefixes[0] = prefix; MultiRowRangeFilter filter = new MultiRowRangeFilter(rowKeyPrefixes); List actualRanges = filter.getRowRanges(); List expectedRanges = new ArrayList<>(); - final byte[] expectedStop = {(byte) 0x12, (byte) 0x24}; - expectedRanges.add(new RowRange(prefix, true, expectedStop , false)); + final byte[] expectedStop = { (byte) 0x12, (byte) 0x24 }; + expectedRanges.add(new RowRange(prefix, true, expectedStop, false)); assertRangesEqual(expectedRanges, actualRanges); } @Test public void testRowKeyPrefixWithMergablePrefix() throws IOException { - byte[] prefix1 = {(byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFE}; - byte[] prefix2 = {(byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFF}; + byte[] prefix1 = { (byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFE }; + byte[] prefix2 = { (byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFF }; byte[][] rowKeyPrefixes = new byte[2][]; rowKeyPrefixes[0] = prefix1; rowKeyPrefixes[1] = prefix2; MultiRowRangeFilter filter = new MultiRowRangeFilter(rowKeyPrefixes); List actualRanges = filter.getRowRanges(); List expectedRanges = new ArrayList<>(); - final byte[] expectedStop = {(byte) 0x12, (byte) 0x24}; - expectedRanges.add(new RowRange(prefix1, true, expectedStop , false)); + final byte[] expectedStop = { (byte) 0x12, (byte) 0x24 }; + expectedRanges.add(new RowRange(prefix1, true, expectedStop, false)); assertRangesEqual(expectedRanges, actualRanges); } @Test public void testRanges() throws IOException { - byte[] key1Start = new byte[] {-3}; - byte[] key1End = new byte[] {-2}; + byte[] key1Start = new byte[] { -3 }; + byte[] key1End = new byte[] { -2 }; - byte[] key2Start = new byte[] {5}; - byte[] key2End = new byte[] {6}; + byte[] key2Start = new byte[] { 5 }; + byte[] key2End = new byte[] { 6 }; - byte[] badKey = new byte[] {-10}; + byte[] badKey = new byte[] { -10 }; - MultiRowRangeFilter filter = new MultiRowRangeFilter(Arrays.asList( - new MultiRowRangeFilter.RowRange(key1Start, true, key1End, false), - new MultiRowRangeFilter.RowRange(key2Start, true, key2End, false) - )); + MultiRowRangeFilter filter = new MultiRowRangeFilter( + Arrays.asList(new MultiRowRangeFilter.RowRange(key1Start, true, key1End, false), + new MultiRowRangeFilter.RowRange(key2Start, true, key2End, false))); filter.filterRowKey(KeyValueUtil.createFirstOnRow(badKey)); /* - * FAILS -- includes BAD key! - * Expected :SEEK_NEXT_USING_HINT - * Actual :INCLUDE - * */ + * FAILS -- includes BAD key! Expected :SEEK_NEXT_USING_HINT Actual :INCLUDE + */ assertEquals(Filter.ReturnCode.SEEK_NEXT_USING_HINT, filter.filterCell(null)); } @Test public void testOutOfOrderScannerNextException() throws Exception { MultiRowRangeFilter filter = new MultiRowRangeFilter(Arrays.asList( - new MultiRowRangeFilter.RowRange(Bytes.toBytes("b"), true, Bytes.toBytes("c"), true), - new MultiRowRangeFilter.RowRange(Bytes.toBytes("d"), true, Bytes.toBytes("e"), true) - )); + new MultiRowRangeFilter.RowRange(Bytes.toBytes("b"), true, Bytes.toBytes("c"), true), + new MultiRowRangeFilter.RowRange(Bytes.toBytes("d"), true, Bytes.toBytes("e"), true))); filter.filterRowKey(KeyValueUtil.createFirstOnRow(Bytes.toBytes("a"))); assertEquals(Filter.ReturnCode.SEEK_NEXT_USING_HINT, filter.filterCell(null)); filter.filterRowKey(KeyValueUtil.createFirstOnRow(Bytes.toBytes("b"))); @@ -213,13 +208,13 @@ public void testMergeAndSortWithEmptyStartRowAndStopRow() throws IOException { assertRangesEqual(expectedRanges, actualRanges); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testMultiRowRangeWithoutRange() throws IOException { List ranges = new ArrayList<>(); new MultiRowRangeFilter(ranges); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testMultiRowRangeWithInvalidRange() throws IOException { List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); @@ -296,13 +291,12 @@ public void testMergeAndSortWithRowInclusive() throws IOException { public void assertRangesEqual(List expected, List actual) { assertEquals(expected.size(), actual.size()); - for(int i = 0; i < expected.size(); i++) { + for (int i = 0; i < expected.size(); i++) { Assert.assertTrue(Bytes.equals(expected.get(i).getStartRow(), actual.get(i).getStartRow())); - Assert.assertTrue(expected.get(i).isStartRowInclusive() == - actual.get(i).isStartRowInclusive()); + Assert + .assertTrue(expected.get(i).isStartRowInclusive() == actual.get(i).isStartRowInclusive()); Assert.assertTrue(Bytes.equals(expected.get(i).getStopRow(), actual.get(i).getStopRow())); - Assert.assertTrue(expected.get(i).isStopRowInclusive() == - actual.get(i).isStopRowInclusive()); + Assert.assertTrue(expected.get(i).isStopRowInclusive() == actual.get(i).isStopRowInclusive()); } } @@ -438,7 +432,7 @@ public void testMultiRowRangeFilterWithExclusive() throws IOException { TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 6000000); TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE); try (Table ht = TEST_UTIL.getConnection().getTableBuilder(tableName, null) - .setReadRpcTimeout(600000).setOperationTimeout(6000000).build()) { + .setReadRpcTimeout(600000).setOperationTimeout(6000000).build()) { generateRows(numRows, ht, family, qf, value); Scan scan = new Scan(); @@ -527,7 +521,7 @@ public void testMultiRowRangeWithFilterListOrOperator() throws IOException { List results2 = getScanResult(Bytes.toBytes(60), Bytes.toBytes(70), ht); List results3 = getScanResult(Bytes.toBytes(80), Bytes.toBytes(90), ht); - assertEquals(results1.size() + results2.size() + results3.size(),resultsSize); + assertEquals(results1.size() + results2.size() + results3.size(), resultsSize); ht.close(); } @@ -569,10 +563,9 @@ public void testReverseMultiRowRangeFilterWithinTable() throws IOException { Scan scan = new Scan(); scan.setReversed(true); - List ranges = Arrays.asList( - new RowRange(Bytes.toBytes(20), true, Bytes.toBytes(30), true), - new RowRange(Bytes.toBytes(50), true, Bytes.toBytes(60), true) - ); + List ranges = + Arrays.asList(new RowRange(Bytes.toBytes(20), true, Bytes.toBytes(30), true), + new RowRange(Bytes.toBytes(50), true, Bytes.toBytes(60), true)); MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges); scan.setFilter(filter); @@ -588,8 +581,8 @@ public void testReverseMultiRowRangeFilterWithinTable() throws IOException { List actualResults = new ArrayList<>(); StringBuilder sb = new StringBuilder(); for (Cell result : results) { - int observedValue = Bytes.toInt( - result.getRowArray(), result.getRowOffset(), result.getRowLength()); + int observedValue = + Bytes.toInt(result.getRowArray(), result.getRowOffset(), result.getRowLength()); actualResults.add(observedValue); if (sb.length() > 0) { sb.append(", "); @@ -613,10 +606,9 @@ public void testReverseMultiRowRangeFilterIncludingMaxRow() throws IOException { Scan scan = new Scan(); scan.setReversed(true); - List ranges = Arrays.asList( - new RowRange(Bytes.toBytes("b"), true, Bytes.toBytes("c"), true), - new RowRange(Bytes.toBytes("f"), true, Bytes.toBytes("h"), true) - ); + List ranges = + Arrays.asList(new RowRange(Bytes.toBytes("b"), true, Bytes.toBytes("c"), true), + new RowRange(Bytes.toBytes("f"), true, Bytes.toBytes("h"), true)); MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges); scan.setFilter(filter); @@ -643,10 +635,9 @@ public void testReverseMultiRowRangeFilterIncludingMinRow() throws IOException { Scan scan = new Scan(); scan.setReversed(true); - List ranges = Arrays.asList( - new RowRange(Bytes.toBytes("a"), true, Bytes.toBytes("c"), true), - new RowRange(Bytes.toBytes("f"), true, Bytes.toBytes("g"), true) - ); + List ranges = + Arrays.asList(new RowRange(Bytes.toBytes("a"), true, Bytes.toBytes("c"), true), + new RowRange(Bytes.toBytes("f"), true, Bytes.toBytes("g"), true)); MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges); scan.setFilter(filter); @@ -673,10 +664,9 @@ public void testReverseMultiRowRangeFilterIncludingMinAndMaxRow() throws IOExcep Scan scan = new Scan(); scan.setReversed(true); - List ranges = Arrays.asList( - new RowRange(Bytes.toBytes("a"), true, Bytes.toBytes("c"), true), - new RowRange(Bytes.toBytes("f"), true, Bytes.toBytes("h"), true) - ); + List ranges = + Arrays.asList(new RowRange(Bytes.toBytes("a"), true, Bytes.toBytes("c"), true), + new RowRange(Bytes.toBytes("f"), true, Bytes.toBytes("h"), true)); MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges); scan.setFilter(filter); @@ -703,10 +693,10 @@ private void generateRows(int numberOfRows, Table ht, byte[] family, byte[] qf, private List getScanResult(byte[] startRow, byte[] stopRow, Table ht) throws IOException { Scan scan = new Scan(); scan.readAllVersions(); - if(!Bytes.toString(startRow).isEmpty()) { + if (!Bytes.toString(startRow).isEmpty()) { scan.withStartRow(startRow); } - if(!Bytes.toString(stopRow).isEmpty()) { + if (!Bytes.toString(stopRow).isEmpty()) { scan.withStopRow(stopRow); } ResultScanner scanner = ht.getScanner(scan); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java index b5567e49a5c8..027e0ecb4dba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,15 +52,14 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({FilterTests.class, MediumTests.class}) +@Category({ FilterTests.class, MediumTests.class }) public class TestMultipleColumnPrefixFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMultipleColumnPrefixFilter.class); - private final static HBaseTestingUtil TEST_UTIL = new - HBaseTestingUtil(); + private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @Rule public TestName name = new TestName(); @@ -69,18 +68,15 @@ public class TestMultipleColumnPrefixFilter { public void testMultipleColumnPrefixFilter() throws IOException { String family = "Family"; TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(family)) - .setMaxVersions(3) - .build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).setMaxVersions(3).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); // HRegionInfo info = new HRegionInfo(htd, null, null, false); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); - HRegion region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL. - getDataTestDir(), TEST_UTIL.getConfiguration(), tableDescriptor); + HRegion region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), + TEST_UTIL.getConfiguration(), tableDescriptor); List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); @@ -96,16 +92,15 @@ public void testMultipleColumnPrefixFilter() throws IOException { String valueString = "ValueString"; - for (String row: rows) { + for (String row : rows) { Put p = new Put(Bytes.toBytes(row)); p.setDurability(Durability.SKIP_WAL); - for (String column: columns) { + for (String column : columns) { for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { - KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, - valueString); + KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, valueString); p.add(kv); kvList.add(kv); - for (String s: prefixMap.keySet()) { + for (String s : prefixMap.keySet()) { if (column.startsWith(s)) { prefixMap.get(s).add(kv); } @@ -118,9 +113,9 @@ public void testMultipleColumnPrefixFilter() throws IOException { MultipleColumnPrefixFilter filter; Scan scan = new Scan(); scan.readAllVersions(); - byte [][] filter_prefix = new byte [2][]; - filter_prefix[0] = new byte [] {'p'}; - filter_prefix[1] = new byte [] {'q'}; + byte[][] filter_prefix = new byte[2][]; + filter_prefix[0] = new byte[] { 'p' }; + filter_prefix[1] = new byte[] { 'q' }; filter = new MultipleColumnPrefixFilter(filter_prefix); scan.setFilter(filter); @@ -138,22 +133,17 @@ public void testMultipleColumnPrefixFilterWithManyFamilies() throws IOException String family1 = "Family1"; String family2 = "Family2"; TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(family1)) - .setMaxVersions(3) - .build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family1)).setMaxVersions(3).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(family2)) - .setMaxVersions(3) - .build(); + columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family2)).setMaxVersions(3).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); - HRegion region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL. - getDataTestDir(), TEST_UTIL.getConfiguration(), tableDescriptor); + HRegion region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), + TEST_UTIL.getConfiguration(), tableDescriptor); List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); @@ -169,10 +159,10 @@ public void testMultipleColumnPrefixFilterWithManyFamilies() throws IOException String valueString = "ValueString"; - for (String row: rows) { + for (String row : rows) { Put p = new Put(Bytes.toBytes(row)); p.setDurability(Durability.SKIP_WAL); - for (String column: columns) { + for (String column : columns) { for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { double rand = Math.random(); Cell kv; @@ -183,7 +173,7 @@ public void testMultipleColumnPrefixFilterWithManyFamilies() throws IOException } p.add(kv); kvList.add(kv); - for (String s: prefixMap.keySet()) { + for (String s : prefixMap.keySet()) { if (column.startsWith(s)) { prefixMap.get(s).add(kv); } @@ -196,9 +186,9 @@ public void testMultipleColumnPrefixFilterWithManyFamilies() throws IOException MultipleColumnPrefixFilter filter; Scan scan = new Scan(); scan.readAllVersions(); - byte [][] filter_prefix = new byte [2][]; - filter_prefix[0] = new byte [] {'p'}; - filter_prefix[1] = new byte [] {'q'}; + byte[][] filter_prefix = new byte[2][]; + filter_prefix[0] = new byte[] { 'p' }; + filter_prefix[1] = new byte[] { 'q' }; filter = new MultipleColumnPrefixFilter(filter_prefix); scan.setFilter(filter); @@ -215,14 +205,14 @@ public void testMultipleColumnPrefixFilterWithManyFamilies() throws IOException public void testMultipleColumnPrefixFilterWithColumnPrefixFilter() throws IOException { String family = "Family"; TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); - HRegion region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL. - getDataTestDir(), TEST_UTIL.getConfiguration(), tableDescriptor); + HRegion region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), + TEST_UTIL.getConfiguration(), tableDescriptor); List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); @@ -230,13 +220,12 @@ public void testMultipleColumnPrefixFilterWithColumnPrefixFilter() throws IOExce String valueString = "ValueString"; - for (String row: rows) { + for (String row : rows) { Put p = new Put(Bytes.toBytes(row)); p.setDurability(Durability.SKIP_WAL); - for (String column: columns) { + for (String column : columns) { for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { - KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, - valueString); + KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, valueString); p.add(kv); } } @@ -246,8 +235,8 @@ public void testMultipleColumnPrefixFilterWithColumnPrefixFilter() throws IOExce MultipleColumnPrefixFilter multiplePrefixFilter; Scan scan1 = new Scan(); scan1.readAllVersions(); - byte [][] filter_prefix = new byte [1][]; - filter_prefix[0] = new byte [] {'p'}; + byte[][] filter_prefix = new byte[1][]; + filter_prefix[0] = new byte[] { 'p' }; multiplePrefixFilter = new MultipleColumnPrefixFilter(filter_prefix); scan1.setFilter(multiplePrefixFilter); @@ -275,7 +264,7 @@ public void testMultipleColumnPrefixFilterWithColumnPrefixFilter() throws IOExce List generateRandomWords(int numberOfWords, String suffix) { Set wordSet = new HashSet<>(); for (int i = 0; i < numberOfWords; i++) { - int lengthOfWords = (int) (Math.random()*2) + 1; + int lengthOfWords = (int) (Math.random() * 2) + 1; char[] wordChar = new char[lengthOfWords]; for (int j = 0; j < wordChar.length; j++) { wordChar[j] = (char) (Math.random() * 26 + 97); @@ -293,5 +282,3 @@ List generateRandomWords(int numberOfWords, String suffix) { } } - - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestNullComparator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestNullComparator.java index 5417d874eaaf..7b0057b94d06 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestNullComparator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestNullComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestNullComparator { @ClassRule @@ -33,8 +33,7 @@ public class TestNullComparator { HBaseClassTestRule.forClass(TestNullComparator.class); @Test - public void testNullValue() - { + public void testNullValue() { // given byte[] value = null; NullComparator comparator = new NullComparator(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java index 5ab0ff971c92..7447cde56e1c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPageFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ /** * Tests for the page filter */ -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestPageFilter { @ClassRule @@ -76,14 +76,14 @@ private void testFiltersBeyondPageSize(final Filter f, final int pageSize) throw for (int i = 0; i < (pageSize * 2); i++) { boolean filterOut = f.filterRow(); - if(filterOut) { + if (filterOut) { break; } else { count++; } // If at last row, should tell us to skip all remaining - if(count == pageSize) { + if (count == pageSize) { assertTrue(f.filterAllRemaining()); } else { assertFalse(f.filterAllRemaining()); @@ -94,4 +94,3 @@ private void testFiltersBeyondPageSize(final Filter f, final int pageSize) throw } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java index fbedc1c0e688..f714ac43a880 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -37,11 +38,10 @@ import org.junit.experimental.categories.Category; /** - * This class tests ParseFilter.java - * It tests the entire work flow from when a string is given by the user - * and how it is parsed to construct the corresponding Filter object + * This class tests ParseFilter.java It tests the entire work flow from when a string is given by + * the user and how it is parsed to construct the corresponding Filter object */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestParseFilter { @ClassRule @@ -67,7 +67,7 @@ public void testKeyOnlyFilter() throws IOException { doTestFilter(filterString, KeyOnlyFilter.class); String filterString2 = "KeyOnlyFilter ('') "; - byte [] filterStringAsByteArray2 = Bytes.toBytes(filterString2); + byte[] filterStringAsByteArray2 = Bytes.toBytes(filterString2); try { filter = f.parseFilterString(filterStringAsByteArray2); assertTrue(false); @@ -82,7 +82,7 @@ public void testFirstKeyOnlyFilter() throws IOException { doTestFilter(filterString, FirstKeyOnlyFilter.class); String filterString2 = " FirstKeyOnlyFilter ('') "; - byte [] filterStringAsByteArray2 = Bytes.toBytes(filterString2); + byte[] filterStringAsByteArray2 = Bytes.toBytes(filterString2); try { filter = f.parseFilterString(filterStringAsByteArray2); assertTrue(false); @@ -95,10 +95,9 @@ public void testFirstKeyOnlyFilter() throws IOException { public void testPrefixFilter() throws IOException { String filterString = " PrefixFilter('row' ) "; PrefixFilter prefixFilter = doTestFilter(filterString, PrefixFilter.class); - byte [] prefix = prefixFilter.getPrefix(); + byte[] prefix = prefixFilter.getPrefix(); assertEquals("row", new String(prefix, StandardCharsets.UTF_8)); - filterString = " PrefixFilter(row)"; try { doTestFilter(filterString, PrefixFilter.class); @@ -111,9 +110,8 @@ public void testPrefixFilter() throws IOException { @Test public void testColumnPrefixFilter() throws IOException { String filterString = " ColumnPrefixFilter('qualifier' ) "; - ColumnPrefixFilter columnPrefixFilter = - doTestFilter(filterString, ColumnPrefixFilter.class); - byte [] columnPrefix = columnPrefixFilter.getPrefix(); + ColumnPrefixFilter columnPrefixFilter = doTestFilter(filterString, ColumnPrefixFilter.class); + byte[] columnPrefix = columnPrefixFilter.getPrefix(); assertEquals("qualifier", new String(columnPrefix, StandardCharsets.UTF_8)); } @@ -121,8 +119,8 @@ public void testColumnPrefixFilter() throws IOException { public void testMultipleColumnPrefixFilter() throws IOException { String filterString = " MultipleColumnPrefixFilter('qualifier1', 'qualifier2' ) "; MultipleColumnPrefixFilter multipleColumnPrefixFilter = - doTestFilter(filterString, MultipleColumnPrefixFilter.class); - byte [][] prefixes = multipleColumnPrefixFilter.getPrefix(); + doTestFilter(filterString, MultipleColumnPrefixFilter.class); + byte[][] prefixes = multipleColumnPrefixFilter.getPrefix(); assertEquals("qualifier1", new String(prefixes[0], StandardCharsets.UTF_8)); assertEquals("qualifier2", new String(prefixes[1], StandardCharsets.UTF_8)); } @@ -131,7 +129,7 @@ public void testMultipleColumnPrefixFilter() throws IOException { public void testColumnCountGetFilter() throws IOException { String filterString = " ColumnCountGetFilter(4)"; ColumnCountGetFilter columnCountGetFilter = - doTestFilter(filterString, ColumnCountGetFilter.class); + doTestFilter(filterString, ColumnCountGetFilter.class); int limit = columnCountGetFilter.getLimit(); assertEquals(4, limit); @@ -155,8 +153,7 @@ public void testColumnCountGetFilter() throws IOException { @Test public void testPageFilter() throws IOException { String filterString = " PageFilter(4)"; - PageFilter pageFilter = - doTestFilter(filterString, PageFilter.class); + PageFilter pageFilter = doTestFilter(filterString, PageFilter.class); long pageSize = pageFilter.getPageSize(); assertEquals(4, pageSize); @@ -173,7 +170,7 @@ public void testPageFilter() throws IOException { public void testColumnPaginationFilter() throws IOException { String filterString = "ColumnPaginationFilter(4, 6)"; ColumnPaginationFilter columnPaginationFilter = - doTestFilter(filterString, ColumnPaginationFilter.class); + doTestFilter(filterString, ColumnPaginationFilter.class); int limit = columnPaginationFilter.getLimit(); assertEquals(4, limit); int offset = columnPaginationFilter.getOffset(); @@ -207,18 +204,15 @@ public void testColumnPaginationFilter() throws IOException { @Test public void testInclusiveStopFilter() throws IOException { String filterString = "InclusiveStopFilter ('row 3')"; - InclusiveStopFilter inclusiveStopFilter = - doTestFilter(filterString, InclusiveStopFilter.class); - byte [] stopRowKey = inclusiveStopFilter.getStopRowKey(); + InclusiveStopFilter inclusiveStopFilter = doTestFilter(filterString, InclusiveStopFilter.class); + byte[] stopRowKey = inclusiveStopFilter.getStopRowKey(); assertEquals("row 3", new String(stopRowKey, StandardCharsets.UTF_8)); } - @Test public void testTimestampsFilter() throws IOException { String filterString = "TimestampsFilter(9223372036854775806, 6)"; - TimestampsFilter timestampsFilter = - doTestFilter(filterString, TimestampsFilter.class); + TimestampsFilter timestampsFilter = doTestFilter(filterString, TimestampsFilter.class); List timestamps = timestampsFilter.getTimestamps(); assertEquals(2, timestamps.size()); assertEquals(Long.valueOf(6), timestamps.get(0)); @@ -248,8 +242,7 @@ public void testTimestampsFilter() throws IOException { @Test public void testRowFilter() throws IOException { String filterString = "RowFilter ( =, 'binary:regionse')"; - RowFilter rowFilter = - doTestFilter(filterString, RowFilter.class); + RowFilter rowFilter = doTestFilter(filterString, RowFilter.class); assertEquals(CompareOperator.EQUAL, rowFilter.getCompareOperator()); assertTrue(rowFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator = (BinaryComparator) rowFilter.getComparator(); @@ -259,36 +252,33 @@ public void testRowFilter() throws IOException { @Test public void testFamilyFilter() throws IOException { String filterString = "FamilyFilter(>=, 'binaryprefix:pre')"; - FamilyFilter familyFilter = - doTestFilter(filterString, FamilyFilter.class); + FamilyFilter familyFilter = doTestFilter(filterString, FamilyFilter.class); assertEquals(CompareOperator.GREATER_OR_EQUAL, familyFilter.getCompareOperator()); assertTrue(familyFilter.getComparator() instanceof BinaryPrefixComparator); BinaryPrefixComparator binaryPrefixComparator = - (BinaryPrefixComparator) familyFilter.getComparator(); + (BinaryPrefixComparator) familyFilter.getComparator(); assertEquals("pre", new String(binaryPrefixComparator.getValue(), StandardCharsets.UTF_8)); } @Test public void testQualifierFilter() throws IOException { String filterString = "QualifierFilter(=, 'regexstring:pre*')"; - QualifierFilter qualifierFilter = - doTestFilter(filterString, QualifierFilter.class); + QualifierFilter qualifierFilter = doTestFilter(filterString, QualifierFilter.class); assertEquals(CompareOperator.EQUAL, qualifierFilter.getCompareOperator()); assertTrue(qualifierFilter.getComparator() instanceof RegexStringComparator); RegexStringComparator regexStringComparator = - (RegexStringComparator) qualifierFilter.getComparator(); + (RegexStringComparator) qualifierFilter.getComparator(); assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); } @Test public void testQualifierFilterNoCase() throws IOException { String filterString = "QualifierFilter(=, 'regexstringnocase:pre*')"; - QualifierFilter qualifierFilter = - doTestFilter(filterString, QualifierFilter.class); + QualifierFilter qualifierFilter = doTestFilter(filterString, QualifierFilter.class); assertEquals(CompareOperator.EQUAL, qualifierFilter.getCompareOperator()); assertTrue(qualifierFilter.getComparator() instanceof RegexStringComparator); RegexStringComparator regexStringComparator = - (RegexStringComparator) qualifierFilter.getComparator(); + (RegexStringComparator) qualifierFilter.getComparator(); assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); int regexComparatorFlags = regexStringComparator.getEngine().getFlags(); assertEquals(Pattern.CASE_INSENSITIVE | Pattern.DOTALL, regexComparatorFlags); @@ -297,20 +287,17 @@ public void testQualifierFilterNoCase() throws IOException { @Test public void testValueFilter() throws IOException { String filterString = "ValueFilter(!=, 'substring:pre')"; - ValueFilter valueFilter = - doTestFilter(filterString, ValueFilter.class); + ValueFilter valueFilter = doTestFilter(filterString, ValueFilter.class); assertEquals(CompareOperator.NOT_EQUAL, valueFilter.getCompareOperator()); assertTrue(valueFilter.getComparator() instanceof SubstringComparator); - SubstringComparator substringComparator = - (SubstringComparator) valueFilter.getComparator(); + SubstringComparator substringComparator = (SubstringComparator) valueFilter.getComparator(); assertEquals("pre", new String(substringComparator.getValue(), StandardCharsets.UTF_8)); } @Test public void testColumnRangeFilter() throws IOException { String filterString = "ColumnRangeFilter('abc', true, 'xyz', false)"; - ColumnRangeFilter columnRangeFilter = - doTestFilter(filterString, ColumnRangeFilter.class); + ColumnRangeFilter columnRangeFilter = doTestFilter(filterString, ColumnRangeFilter.class); assertEquals("abc", new String(columnRangeFilter.getMinColumn(), StandardCharsets.UTF_8)); assertEquals("xyz", new String(columnRangeFilter.getMaxColumn(), StandardCharsets.UTF_8)); assertTrue(columnRangeFilter.isMinColumnInclusive()); @@ -321,26 +308,26 @@ public void testColumnRangeFilter() throws IOException { public void testDependentColumnFilter() throws IOException { String filterString = "DependentColumnFilter('family', 'qualifier', true, =, 'binary:abc')"; DependentColumnFilter dependentColumnFilter = - doTestFilter(filterString, DependentColumnFilter.class); + doTestFilter(filterString, DependentColumnFilter.class); assertEquals("family", new String(dependentColumnFilter.getFamily(), StandardCharsets.UTF_8)); assertEquals("qualifier", - new String(dependentColumnFilter.getQualifier(), StandardCharsets.UTF_8)); + new String(dependentColumnFilter.getQualifier(), StandardCharsets.UTF_8)); assertTrue(dependentColumnFilter.getDropDependentColumn()); assertEquals(CompareOperator.EQUAL, dependentColumnFilter.getCompareOperator()); assertTrue(dependentColumnFilter.getComparator() instanceof BinaryComparator); - BinaryComparator binaryComparator = (BinaryComparator)dependentColumnFilter.getComparator(); + BinaryComparator binaryComparator = (BinaryComparator) dependentColumnFilter.getComparator(); assertEquals("abc", new String(binaryComparator.getValue(), StandardCharsets.UTF_8)); } @Test public void testSingleColumnValueFilter() throws IOException { - String filterString = "SingleColumnValueFilter " + - "('family', 'qualifier', >=, 'binary:a', true, false)"; + String filterString = + "SingleColumnValueFilter " + "('family', 'qualifier', >=, 'binary:a', true, false)"; SingleColumnValueFilter singleColumnValueFilter = - doTestFilter(filterString, SingleColumnValueFilter.class); + doTestFilter(filterString, SingleColumnValueFilter.class); assertEquals("family", new String(singleColumnValueFilter.getFamily(), StandardCharsets.UTF_8)); assertEquals("qualifier", - new String(singleColumnValueFilter.getQualifier(), StandardCharsets.UTF_8)); + new String(singleColumnValueFilter.getQualifier(), StandardCharsets.UTF_8)); assertEquals(CompareOperator.GREATER_OR_EQUAL, singleColumnValueFilter.getCompareOperator()); assertTrue(singleColumnValueFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator = (BinaryComparator) singleColumnValueFilter.getComparator(); @@ -348,16 +335,15 @@ public void testSingleColumnValueFilter() throws IOException { assertTrue(singleColumnValueFilter.getFilterIfMissing()); assertFalse(singleColumnValueFilter.getLatestVersionOnly()); - filterString = "SingleColumnValueFilter ('family', 'qualifier', >, 'binaryprefix:a')"; singleColumnValueFilter = doTestFilter(filterString, SingleColumnValueFilter.class); assertEquals("family", new String(singleColumnValueFilter.getFamily(), StandardCharsets.UTF_8)); assertEquals("qualifier", - new String(singleColumnValueFilter.getQualifier(), StandardCharsets.UTF_8)); + new String(singleColumnValueFilter.getQualifier(), StandardCharsets.UTF_8)); assertEquals(CompareOperator.GREATER, singleColumnValueFilter.getCompareOperator()); assertTrue(singleColumnValueFilter.getComparator() instanceof BinaryPrefixComparator); BinaryPrefixComparator binaryPrefixComparator = - (BinaryPrefixComparator) singleColumnValueFilter.getComparator(); + (BinaryPrefixComparator) singleColumnValueFilter.getComparator(); assertEquals("a", new String(binaryPrefixComparator.getValue(), StandardCharsets.UTF_8)); assertFalse(singleColumnValueFilter.getFilterIfMissing()); assertTrue(singleColumnValueFilter.getLatestVersionOnly()); @@ -366,32 +352,32 @@ public void testSingleColumnValueFilter() throws IOException { @Test public void testSingleColumnValueExcludeFilter() throws IOException { String filterString = - "SingleColumnValueExcludeFilter ('family', 'qualifier', <, 'binaryprefix:a')"; + "SingleColumnValueExcludeFilter ('family', 'qualifier', <, 'binaryprefix:a')"; SingleColumnValueExcludeFilter singleColumnValueExcludeFilter = - doTestFilter(filterString, SingleColumnValueExcludeFilter.class); + doTestFilter(filterString, SingleColumnValueExcludeFilter.class); assertEquals(CompareOperator.LESS, singleColumnValueExcludeFilter.getCompareOperator()); assertEquals("family", - new String(singleColumnValueExcludeFilter.getFamily(), StandardCharsets.UTF_8)); + new String(singleColumnValueExcludeFilter.getFamily(), StandardCharsets.UTF_8)); assertEquals("qualifier", - new String(singleColumnValueExcludeFilter.getQualifier(), StandardCharsets.UTF_8)); + new String(singleColumnValueExcludeFilter.getQualifier(), StandardCharsets.UTF_8)); assertEquals("a", new String(singleColumnValueExcludeFilter.getComparator().getValue(), StandardCharsets.UTF_8)); assertFalse(singleColumnValueExcludeFilter.getFilterIfMissing()); assertTrue(singleColumnValueExcludeFilter.getLatestVersionOnly()); - filterString = "SingleColumnValueExcludeFilter " + - "('family', 'qualifier', <=, 'binaryprefix:a', true, false)"; + filterString = "SingleColumnValueExcludeFilter " + + "('family', 'qualifier', <=, 'binaryprefix:a', true, false)"; singleColumnValueExcludeFilter = - doTestFilter(filterString, SingleColumnValueExcludeFilter.class); + doTestFilter(filterString, SingleColumnValueExcludeFilter.class); assertEquals("family", - new String(singleColumnValueExcludeFilter.getFamily(), StandardCharsets.UTF_8)); + new String(singleColumnValueExcludeFilter.getFamily(), StandardCharsets.UTF_8)); assertEquals("qualifier", - new String(singleColumnValueExcludeFilter.getQualifier(), StandardCharsets.UTF_8)); + new String(singleColumnValueExcludeFilter.getQualifier(), StandardCharsets.UTF_8)); assertEquals(CompareOperator.LESS_OR_EQUAL, - singleColumnValueExcludeFilter.getCompareOperator()); + singleColumnValueExcludeFilter.getCompareOperator()); assertTrue(singleColumnValueExcludeFilter.getComparator() instanceof BinaryPrefixComparator); BinaryPrefixComparator binaryPrefixComparator = - (BinaryPrefixComparator) singleColumnValueExcludeFilter.getComparator(); + (BinaryPrefixComparator) singleColumnValueExcludeFilter.getComparator(); assertEquals("a", new String(binaryPrefixComparator.getValue(), StandardCharsets.UTF_8)); assertTrue(singleColumnValueExcludeFilter.getFilterIfMissing()); assertFalse(singleColumnValueExcludeFilter.getLatestVersionOnly()); @@ -400,8 +386,7 @@ public void testSingleColumnValueExcludeFilter() throws IOException { @Test public void testSkipFilter() throws IOException { String filterString = "SKIP ValueFilter( =, 'binary:0')"; - SkipFilter skipFilter = - doTestFilter(filterString, SkipFilter.class); + SkipFilter skipFilter = doTestFilter(filterString, SkipFilter.class); assertTrue(skipFilter.getFilter() instanceof ValueFilter); ValueFilter valueFilter = (ValueFilter) skipFilter.getFilter(); @@ -414,8 +399,7 @@ public void testSkipFilter() throws IOException { @Test public void testWhileFilter() throws IOException { String filterString = " WHILE RowFilter ( !=, 'binary:row1')"; - WhileMatchFilter whileMatchFilter = - doTestFilter(filterString, WhileMatchFilter.class); + WhileMatchFilter whileMatchFilter = doTestFilter(filterString, WhileMatchFilter.class); assertTrue(whileMatchFilter.getFilter() instanceof RowFilter); RowFilter rowFilter = (RowFilter) whileMatchFilter.getFilter(); @@ -428,24 +412,22 @@ public void testWhileFilter() throws IOException { @Test public void testCompoundFilter1() throws IOException { String filterString = " (PrefixFilter ('realtime')AND FirstKeyOnlyFilter())"; - FilterList filterList = - doTestFilter(filterString, FilterList.class); + FilterList filterList = doTestFilter(filterString, FilterList.class); ArrayList filters = (ArrayList) filterList.getFilters(); assertTrue(filters.get(0) instanceof PrefixFilter); assertTrue(filters.get(1) instanceof FirstKeyOnlyFilter); PrefixFilter PrefixFilter = (PrefixFilter) filters.get(0); - byte [] prefix = PrefixFilter.getPrefix(); + byte[] prefix = PrefixFilter.getPrefix(); assertEquals("realtime", new String(prefix, StandardCharsets.UTF_8)); FirstKeyOnlyFilter firstKeyOnlyFilter = (FirstKeyOnlyFilter) filters.get(1); } @Test public void testCompoundFilter2() throws IOException { - String filterString = "(PrefixFilter('realtime') AND QualifierFilter (>=, 'binary:e'))" + - "OR FamilyFilter (=, 'binary:qualifier') "; - FilterList filterList = - doTestFilter(filterString, FilterList.class); + String filterString = "(PrefixFilter('realtime') AND QualifierFilter (>=, 'binary:e'))" + + "OR FamilyFilter (=, 'binary:qualifier') "; + FilterList filterList = doTestFilter(filterString, FilterList.class); ArrayList filterListFilters = (ArrayList) filterList.getFilters(); assertTrue(filterListFilters.get(0) instanceof FilterList); assertTrue(filterListFilters.get(1) instanceof FamilyFilter); @@ -454,7 +436,7 @@ public void testCompoundFilter2() throws IOException { filterList = (FilterList) filterListFilters.get(0); FamilyFilter familyFilter = (FamilyFilter) filterListFilters.get(1); - filterListFilters = (ArrayList)filterList.getFilters(); + filterListFilters = (ArrayList) filterList.getFilters(); assertTrue(filterListFilters.get(0) instanceof PrefixFilter); assertTrue(filterListFilters.get(1) instanceof QualifierFilter); assertEquals(FilterList.Operator.MUST_PASS_ALL, filterList.getOperator()); @@ -465,7 +447,7 @@ public void testCompoundFilter2() throws IOException { assertEquals("qualifier", new String(binaryComparator.getValue(), StandardCharsets.UTF_8)); PrefixFilter prefixFilter = (PrefixFilter) filterListFilters.get(0); - byte [] prefix = prefixFilter.getPrefix(); + byte[] prefix = prefixFilter.getPrefix(); assertEquals("realtime", new String(prefix, StandardCharsets.UTF_8)); QualifierFilter qualifierFilter = (QualifierFilter) filterListFilters.get(1); @@ -477,10 +459,9 @@ public void testCompoundFilter2() throws IOException { @Test public void testCompoundFilter3() throws IOException { - String filterString = " ColumnPrefixFilter ('realtime')AND " + - "FirstKeyOnlyFilter() OR SKIP FamilyFilter(=, 'substring:hihi')"; - FilterList filterList = - doTestFilter(filterString, FilterList.class); + String filterString = " ColumnPrefixFilter ('realtime')AND " + + "FirstKeyOnlyFilter() OR SKIP FamilyFilter(=, 'substring:hihi')"; + FilterList filterList = doTestFilter(filterString, FilterList.class); ArrayList filters = (ArrayList) filterList.getFilters(); assertTrue(filters.get(0) instanceof FilterList); @@ -494,7 +475,7 @@ public void testCompoundFilter3() throws IOException { assertTrue(filters.get(1) instanceof FirstKeyOnlyFilter); ColumnPrefixFilter columnPrefixFilter = (ColumnPrefixFilter) filters.get(0); - byte [] columnPrefix = columnPrefixFilter.getPrefix(); + byte[] columnPrefix = columnPrefixFilter.getPrefix(); assertEquals("realtime", new String(columnPrefix, StandardCharsets.UTF_8)); FirstKeyOnlyFilter firstKeyOnlyFilter = (FirstKeyOnlyFilter) filters.get(1); @@ -504,17 +485,15 @@ public void testCompoundFilter3() throws IOException { assertEquals(CompareOperator.EQUAL, familyFilter.getCompareOperator()); assertTrue(familyFilter.getComparator() instanceof SubstringComparator); - SubstringComparator substringComparator = - (SubstringComparator) familyFilter.getComparator(); + SubstringComparator substringComparator = (SubstringComparator) familyFilter.getComparator(); assertEquals("hihi", new String(substringComparator.getValue(), StandardCharsets.UTF_8)); } @Test public void testCompoundFilter4() throws IOException { - String filterString = " ColumnPrefixFilter ('realtime') OR " + - "FirstKeyOnlyFilter() OR SKIP FamilyFilter(=, 'substring:hihi')"; - FilterList filterList = - doTestFilter(filterString, FilterList.class); + String filterString = " ColumnPrefixFilter ('realtime') OR " + + "FirstKeyOnlyFilter() OR SKIP FamilyFilter(=, 'substring:hihi')"; + FilterList filterList = doTestFilter(filterString, FilterList.class); ArrayList filters = (ArrayList) filterList.getFilters(); assertTrue(filters.get(0) instanceof ColumnPrefixFilter); @@ -525,7 +504,7 @@ public void testCompoundFilter4() throws IOException { FirstKeyOnlyFilter firstKeyOnlyFilter = (FirstKeyOnlyFilter) filters.get(1); SkipFilter skipFilter = (SkipFilter) filters.get(2); - byte [] columnPrefix = columnPrefixFilter.getPrefix(); + byte[] columnPrefix = columnPrefixFilter.getPrefix(); assertEquals("realtime", new String(columnPrefix, StandardCharsets.UTF_8)); assertTrue(skipFilter.getFilter() instanceof FamilyFilter); @@ -533,8 +512,7 @@ public void testCompoundFilter4() throws IOException { assertEquals(CompareOperator.EQUAL, familyFilter.getCompareOperator()); assertTrue(familyFilter.getComparator() instanceof SubstringComparator); - SubstringComparator substringComparator = - (SubstringComparator) familyFilter.getComparator(); + SubstringComparator substringComparator = (SubstringComparator) familyFilter.getComparator(); assertEquals("hihi", new String(substringComparator.getValue(), StandardCharsets.UTF_8)); } @@ -545,7 +523,7 @@ public void testCompoundFilter5() throws IOException { assertTrue(valueFilter.getComparator() instanceof SubstringComparator); filterStr = "(ValueFilter(>=,'binary:x') AND (ValueFilter(<=,'binary:y')))" - + " OR ValueFilter(=,'binary:ab')"; + + " OR ValueFilter(=,'binary:ab')"; filter = f.parseFilterString(filterStr); assertTrue(filter instanceof FilterList); List list = ((FilterList) filter).getFilters(); @@ -567,7 +545,7 @@ public void testIncorrectCompareOperator() throws IOException { @Test public void testIncorrectComparatorType() throws IOException { - String filterString = "RowFilter ('>=' , 'binaryoperator:region')"; + String filterString = "RowFilter ('>=' , 'binaryoperator:region')"; try { doTestFilter(filterString, RowFilter.class); assertTrue(false); @@ -583,8 +561,8 @@ public void testIncorrectComparatorType() throws IOException { System.out.println("RegexStringComparator can only be used with EQUAL or NOT_EQUAL"); } - filterString = "SingleColumnValueFilter" + - " ('family', 'qualifier', '>=', 'substring:a', 'true', 'false')')"; + filterString = "SingleColumnValueFilter" + + " ('family', 'qualifier', '>=', 'substring:a', 'true', 'false')')"; try { doTestFilter(filterString, RowFilter.class); assertTrue(false); @@ -595,10 +573,9 @@ public void testIncorrectComparatorType() throws IOException { @Test public void testPrecedence1() throws IOException { - String filterString = " (PrefixFilter ('realtime')AND FirstKeyOnlyFilter()" + - " OR KeyOnlyFilter())"; - FilterList filterList = - doTestFilter(filterString, FilterList.class); + String filterString = + " (PrefixFilter ('realtime')AND FirstKeyOnlyFilter()" + " OR KeyOnlyFilter())"; + FilterList filterList = doTestFilter(filterString, FilterList.class); ArrayList filters = (ArrayList) filterList.getFilters(); @@ -611,17 +588,16 @@ public void testPrecedence1() throws IOException { assertTrue(filters.get(0) instanceof PrefixFilter); assertTrue(filters.get(1) instanceof FirstKeyOnlyFilter); - PrefixFilter prefixFilter = (PrefixFilter)filters.get(0); - byte [] prefix = prefixFilter.getPrefix(); + PrefixFilter prefixFilter = (PrefixFilter) filters.get(0); + byte[] prefix = prefixFilter.getPrefix(); assertEquals("realtime", new String(prefix, StandardCharsets.UTF_8)); } @Test public void testPrecedence2() throws IOException { - String filterString = " PrefixFilter ('realtime')AND SKIP FirstKeyOnlyFilter()" + - "OR KeyOnlyFilter()"; - FilterList filterList = - doTestFilter(filterString, FilterList.class); + String filterString = + " PrefixFilter ('realtime')AND SKIP FirstKeyOnlyFilter()" + "OR KeyOnlyFilter()"; + FilterList filterList = doTestFilter(filterString, FilterList.class); ArrayList filters = (ArrayList) filterList.getFilters(); assertTrue(filters.get(0) instanceof FilterList); @@ -633,29 +609,27 @@ public void testPrecedence2() throws IOException { assertTrue(filters.get(0) instanceof PrefixFilter); assertTrue(filters.get(1) instanceof SkipFilter); - PrefixFilter prefixFilter = (PrefixFilter)filters.get(0); - byte [] prefix = prefixFilter.getPrefix(); + PrefixFilter prefixFilter = (PrefixFilter) filters.get(0); + byte[] prefix = prefixFilter.getPrefix(); assertEquals("realtime", new String(prefix, StandardCharsets.UTF_8)); - SkipFilter skipFilter = (SkipFilter)filters.get(1); + SkipFilter skipFilter = (SkipFilter) filters.get(1); assertTrue(skipFilter.getFilter() instanceof FirstKeyOnlyFilter); } @Test public void testUnescapedQuote1() throws IOException { String filterString = "InclusiveStopFilter ('row''3')"; - InclusiveStopFilter inclusiveStopFilter = - doTestFilter(filterString, InclusiveStopFilter.class); - byte [] stopRowKey = inclusiveStopFilter.getStopRowKey(); + InclusiveStopFilter inclusiveStopFilter = doTestFilter(filterString, InclusiveStopFilter.class); + byte[] stopRowKey = inclusiveStopFilter.getStopRowKey(); assertEquals("row'3", new String(stopRowKey, StandardCharsets.UTF_8)); } @Test public void testUnescapedQuote2() throws IOException { String filterString = "InclusiveStopFilter ('row''3''')"; - InclusiveStopFilter inclusiveStopFilter = - doTestFilter(filterString, InclusiveStopFilter.class); - byte [] stopRowKey = inclusiveStopFilter.getStopRowKey(); + InclusiveStopFilter inclusiveStopFilter = doTestFilter(filterString, InclusiveStopFilter.class); + byte[] stopRowKey = inclusiveStopFilter.getStopRowKey(); assertEquals("row'3'", new String(stopRowKey, StandardCharsets.UTF_8)); } @@ -663,14 +637,14 @@ public void testUnescapedQuote2() throws IOException { public void testUnescapedQuote3() throws IOException { String filterString = " InclusiveStopFilter ('''')"; InclusiveStopFilter inclusiveStopFilter = doTestFilter(filterString, InclusiveStopFilter.class); - byte [] stopRowKey = inclusiveStopFilter.getStopRowKey(); + byte[] stopRowKey = inclusiveStopFilter.getStopRowKey(); assertEquals("'", new String(stopRowKey, StandardCharsets.UTF_8)); } @Test public void testIncorrectFilterString() throws IOException { String filterString = "()"; - byte [] filterStringAsByteArray = Bytes.toBytes(filterString); + byte[] filterStringAsByteArray = Bytes.toBytes(filterString); try { filter = f.parseFilterString(filterStringAsByteArray); assertTrue(false); @@ -694,7 +668,7 @@ public void testRegisterFilter() { private T doTestFilter(String filterString, Class clazz) throws IOException { - byte [] filterStringAsByteArray = Bytes.toBytes(filterString); + byte[] filterStringAsByteArray = Bytes.toBytes(filterString); filter = f.parseFilterString(filterStringAsByteArray); assertEquals(clazz, filter.getClass()); return clazz.cast(filter); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPrefixFilter.java index 89f49bd36503..134ede77d418 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestPrefixFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestPrefixFilter { @ClassRule @@ -40,7 +40,7 @@ public class TestPrefixFilter { static final char FIRST_CHAR = 'a'; static final char LAST_CHAR = 'e'; static final String HOST_PREFIX = "org.apache.site-"; - static final byte [] GOOD_BYTES = Bytes.toBytes("abc"); + static final byte[] GOOD_BYTES = Bytes.toBytes("abc"); @Before public void setUp() throws Exception { @@ -73,24 +73,22 @@ private void prefixRowTests(Filter filter) throws Exception { prefixRowTests(filter, false); } - private void prefixRowTests(Filter filter, boolean lastFilterAllRemaining) - throws Exception { + private void prefixRowTests(Filter filter, boolean lastFilterAllRemaining) throws Exception { for (char c = FIRST_CHAR; c <= LAST_CHAR; c++) { - byte [] t = createRow(c); + byte[] t = createRow(c); assertFalse("Failed with character " + c, filter.filterRowKey(KeyValueUtil.createFirstOnRow(t))); assertFalse(filter.filterAllRemaining()); } String yahooSite = "com.yahoo.www"; - byte [] yahooSiteBytes = Bytes.toBytes(yahooSite); - assertTrue("Failed with character " + - yahooSite, filter.filterRowKey(KeyValueUtil.createFirstOnRow(yahooSiteBytes))); + byte[] yahooSiteBytes = Bytes.toBytes(yahooSite); + assertTrue("Failed with character " + yahooSite, + filter.filterRowKey(KeyValueUtil.createFirstOnRow(yahooSiteBytes))); assertEquals(filter.filterAllRemaining(), lastFilterAllRemaining); } - private byte [] createRow(final char c) { + private byte[] createRow(final char c) { return Bytes.toBytes(HOST_PREFIX + Character.toString(c)); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestQualifierFilterWithEmptyQualifier.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestQualifierFilterWithEmptyQualifier.java index 9afc91f3f6f4..3024d2794a08 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestQualifierFilterWithEmptyQualifier.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestQualifierFilterWithEmptyQualifier.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -56,11 +57,11 @@ /** * Test qualifierFilter with empty qualifier column */ -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestQualifierFilterWithEmptyQualifier { - private final static Logger LOG - = LoggerFactory.getLogger(TestQualifierFilterWithEmptyQualifier.class); + private final static Logger LOG = + LoggerFactory.getLogger(TestQualifierFilterWithEmptyQualifier.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestQualifierFilterWithEmptyQualifier.class); @@ -70,23 +71,22 @@ public class TestQualifierFilterWithEmptyQualifier { @Rule public TestName name = new TestName(); - private static final byte[][] ROWS = - { Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"), - Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3") }; + private static final byte[][] ROWS = { Bytes.toBytes("testRowOne-0"), + Bytes.toBytes("testRowOne-1"), Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3") }; private static final byte[] FAMILY = Bytes.toBytes("testFamily"); - private static final byte[][] QUALIFIERS = {HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes("testQualifier")}; + private static final byte[][] QUALIFIERS = + { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("testQualifier") }; private static final byte[] VALUE = Bytes.toBytes("testValueOne"); private long numRows = (long) ROWS.length; @Before public void setUp() throws Exception { - TableDescriptor htd = TableDescriptorBuilder - .newBuilder(TableName.valueOf("TestQualifierFilter")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build()).build(); + TableDescriptor htd = + TableDescriptorBuilder.newBuilder(TableName.valueOf("TestQualifierFilter")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build()).build(); RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); - this.region = HBaseTestingUtil - .createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd); + this.region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), + TEST_UTIL.getConfiguration(), htd); // Insert data for (byte[] ROW : ROWS) { @@ -111,55 +111,49 @@ public void tearDown() throws Exception { public void testQualifierFilterWithEmptyColumn() throws IOException { long colsPerRow = 2; long expectedKeys = colsPerRow / 2; - Filter f = new QualifierFilter(CompareOperator.EQUAL, - new BinaryComparator(QUALIFIERS[0])); + Filter f = new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(QUALIFIERS[0])); Scan s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, this.numRows, expectedKeys); expectedKeys = colsPerRow / 2; - f = new QualifierFilter(CompareOperator.EQUAL, - new BinaryComparator(QUALIFIERS[1])); + f = new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(QUALIFIERS[1])); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, this.numRows, expectedKeys); expectedKeys = colsPerRow / 2; - f = new QualifierFilter(CompareOperator.GREATER, - new BinaryComparator(QUALIFIERS[0])); + f = new QualifierFilter(CompareOperator.GREATER, new BinaryComparator(QUALIFIERS[0])); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, this.numRows, expectedKeys); expectedKeys = colsPerRow; - f = new QualifierFilter(CompareOperator.GREATER_OR_EQUAL, - new BinaryComparator(QUALIFIERS[0])); + f = new QualifierFilter(CompareOperator.GREATER_OR_EQUAL, new BinaryComparator(QUALIFIERS[0])); s = new Scan(); s.setFilter(f); verifyScanNoEarlyOut(s, this.numRows, expectedKeys); } - private void verifyScanNoEarlyOut(Scan s, long expectedRows, - long expectedKeys) + private void verifyScanNoEarlyOut(Scan s, long expectedRows, long expectedKeys) throws IOException { InternalScanner scanner = this.region.getScanner(s); List results = new ArrayList<>(); int i = 0; for (boolean done = true; done; i++) { done = scanner.next(results); - Arrays.sort(results.toArray(new Cell[results.size()]), - CellComparator.getInstance()); + Arrays.sort(results.toArray(new Cell[results.size()]), CellComparator.getInstance()); LOG.info("counter=" + i + ", " + results); - if(results.isEmpty()) { + if (results.isEmpty()) { break; } - assertTrue("Scanned too many rows! Only expected " + expectedRows + - " total but already scanned " + (i+1), expectedRows > i); - assertEquals("Expected " + expectedKeys + " keys per row but " + - "returned " + results.size(), expectedKeys, results.size()); + assertTrue("Scanned too many rows! Only expected " + expectedRows + + " total but already scanned " + (i + 1), + expectedRows > i); + assertEquals("Expected " + expectedKeys + " keys per row but " + "returned " + results.size(), + expectedKeys, results.size()); results.clear(); } - assertEquals("Expected " + expectedRows + " rows but scanned " + i + - " rows", expectedRows, i); + assertEquals("Expected " + expectedRows + " rows but scanned " + i + " rows", expectedRows, i); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRandomRowFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRandomRowFilter.java index 35ad88748b23..c194fd51a615 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRandomRowFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRandomRowFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestRandomRowFilter { @ClassRule @@ -45,7 +45,6 @@ public void setUp() throws Exception { /** * Tests basics - * * @throws Exception */ @Test @@ -61,25 +60,22 @@ public void testBasics() throws Exception { // since we're dealing with randomness, we must have a include an epsilon // tolerance. int epsilon = max / 100; - assertTrue("Roughly 25% should pass the filter", Math.abs(included - max - / 4) < epsilon); + assertTrue("Roughly 25% should pass the filter", Math.abs(included - max / 4) < epsilon); } /** * Tests serialization - * * @throws Exception */ @Test public void testSerialization() throws Exception { RandomRowFilter newFilter = serializationTest(quarterChanceFilter); // use epsilon float comparison - assertTrue("float should be equal", Math.abs(newFilter.getChance() - - quarterChanceFilter.getChance()) < 0.000001f); + assertTrue("float should be equal", + Math.abs(newFilter.getChance() - quarterChanceFilter.getChance()) < 0.000001f); } - private RandomRowFilter serializationTest(RandomRowFilter filter) - throws Exception { + private RandomRowFilter serializationTest(RandomRowFilter filter) throws Exception { // Decompose filter to bytes. byte[] buffer = filter.toByteArray(); @@ -90,4 +86,3 @@ private RandomRowFilter serializationTest(RandomRowFilter filter) } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRegexComparator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRegexComparator.java index 08863bb6d27e..68239cb7d995 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRegexComparator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestRegexComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestRegexComparator { @ClassRule @@ -53,21 +53,19 @@ public void testSerialization() throws Exception { @Test public void testJavaEngine() throws Exception { - for (TestCase t: TEST_CASES) { + for (TestCase t : TEST_CASES) { boolean result = new RegexStringComparator(t.regex, t.flags, EngineType.JAVA) - .compareTo(Bytes.toBytes(t.haystack)) == 0; - assertEquals("Regex '" + t.regex + "' failed test '" + t.haystack + "'", result, - t.expected); + .compareTo(Bytes.toBytes(t.haystack)) == 0; + assertEquals("Regex '" + t.regex + "' failed test '" + t.haystack + "'", result, t.expected); } } @Test public void testJoniEngine() throws Exception { - for (TestCase t: TEST_CASES) { + for (TestCase t : TEST_CASES) { boolean result = new RegexStringComparator(t.regex, t.flags, EngineType.JONI) - .compareTo(Bytes.toBytes(t.haystack)) == 0; - assertEquals("Regex '" + t.regex + "' failed test '" + t.haystack + "'", result, - t.expected); + .compareTo(Bytes.toBytes(t.haystack)) == 0; + assertEquals("Regex '" + t.regex + "' failed test '" + t.haystack + "'", result, t.expected); } } @@ -90,112 +88,64 @@ public TestCase(String regex, int flags, String haystack, boolean expected) { } // These are a subset of the regex tests from OpenJDK 7 - private static TestCase TEST_CASES[] = { - new TestCase("a|b", "a", true), - new TestCase("a|b", "b", true), - new TestCase("a|b", Pattern.CASE_INSENSITIVE, "A", true), - new TestCase("a|b", Pattern.CASE_INSENSITIVE, "B", true), - new TestCase("a|b", "z", false), - new TestCase("a|b|cd", "cd", true), - new TestCase("z(a|ac)b", "zacb", true), - new TestCase("[abc]+", "ababab", true), - new TestCase("[abc]+", "defg", false), - new TestCase("[abc]+[def]+[ghi]+", "zzzaaddggzzz", true), - new TestCase("[a-\\u4444]+", "za-9z", true), - new TestCase("[^abc]+", "ababab", false), - new TestCase("[^abc]+", "aaabbbcccdefg", true), - new TestCase("[abc^b]", "b", true), - new TestCase("[abc[def]]", "b", true), - new TestCase("[abc[def]]", "e", true), - new TestCase("[a-c[d-f[g-i]]]", "h", true), - new TestCase("[a-c[d-f[g-i]]m]", "m", true), - new TestCase("[a-c&&[d-f]]", "a", false), - new TestCase("[a-c&&[d-f]]", "z", false), - new TestCase("[a-m&&m-z&&a-c]", "m", false), - new TestCase("[a-m&&m-z&&a-z]", "m", true), - new TestCase("[[a-m]&&[^a-c]]", "a", false), - new TestCase("[[a-m]&&[^a-c]]", "d", true), - new TestCase("[[a-c][d-f]&&abc[def]]", "e", true), - new TestCase("[[a-c]&&[b-d]&&[c-e]]", "c", true), - new TestCase("[[a-c]&&[b-d][c-e]&&[u-z]]", "c", false), - new TestCase("[[a]&&[b][c][a]&&[^d]]", "a", true), - new TestCase("[[a]&&[b][c][a]&&[^d]]", "d", false), - new TestCase("[[[a-d]&&[c-f]]&&[c]&&c&&[cde]]", "c", true), - new TestCase("[x[[wz]abc&&bcd[z]]&&[u-z]]", "z", true), - new TestCase("a.c.+", "a#c%&", true), - new TestCase("ab.", "ab\n", true), - new TestCase("(?s)ab.", "ab\n", true), - new TestCase("ab\\wc", "abcc", true), - new TestCase("\\W\\w\\W", "#r#", true), - new TestCase("\\W\\w\\W", "rrrr#ggg", false), - new TestCase("abc[\\sdef]*", "abc def", true), - new TestCase("abc[\\sy-z]*", "abc y z", true), - new TestCase("abc[a-d\\sm-p]*", "abcaa mn p", true), - new TestCase("\\s\\s\\s", "blah err", false), - new TestCase("\\S\\S\\s", "blah err", true), - new TestCase("ab\\dc", "ab9c", true), - new TestCase("\\d\\d\\d", "blah45", false), - new TestCase("^abc", "abcdef", true), - new TestCase("^abc", "bcdabc", false), - new TestCase("^(a)?a", "a", true), - new TestCase("^(aa(bb)?)+$", "aabbaa", true), - new TestCase("((a|b)?b)+", "b", true), - new TestCase("^(a(b)?)+$", "aba", true), - new TestCase("^(a(b(c)?)?)?abc", "abc", true), - new TestCase("^(a(b(c))).*", "abc", true), - new TestCase("a?b", "aaaab", true), - new TestCase("a?b", "aaacc", false), - new TestCase("a??b", "aaaab", true), - new TestCase("a??b", "aaacc", false), - new TestCase("a?+b", "aaaab", true), - new TestCase("a?+b", "aaacc", false), - new TestCase("a+b", "aaaab", true), - new TestCase("a+b", "aaacc", false), - new TestCase("a+?b", "aaaab", true), - new TestCase("a+?b", "aaacc", false), - new TestCase("a++b", "aaaab", true), - new TestCase("a++b", "aaacc", false), - new TestCase("a{2,3}", "a", false), - new TestCase("a{2,3}", "aa", true), - new TestCase("a{2,3}", "aaa", true), - new TestCase("a{3,}", "zzzaaaazzz", true), - new TestCase("a{3,}", "zzzaazzz", false), - new TestCase("abc(?=d)", "zzzabcd", true), - new TestCase("abc(?=d)", "zzzabced", false), - new TestCase("abc(?!d)", "zzabcd", false), - new TestCase("abc(?!d)", "zzabced", true), - new TestCase("\\w(?<=a)", "###abc###", true), - new TestCase("\\w(?<=a)", "###ert###", false), - new TestCase("(? expected0 = new ArrayList<>(16); expected0.addAll(Arrays.asList(rowIds)); // Expect all rows - byte[] prefix1 = {(byte) 0x12, (byte) 0x23}; + byte[] prefix1 = { (byte) 0x12, (byte) 0x23 }; List expected1 = new ArrayList<>(16); expected1.add(rowIds[2]); expected1.add(rowIds[3]); expected1.add(rowIds[4]); expected1.add(rowIds[5]); - byte[] prefix2 = {(byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFF}; + byte[] prefix2 = { (byte) 0x12, (byte) 0x23, (byte) 0xFF, (byte) 0xFF }; List expected2 = new ArrayList<>(); expected2.add(rowIds[3]); expected2.add(rowIds[4]); expected2.add(rowIds[5]); - byte[] prefix3 = {(byte) 0x12, (byte) 0x24}; + byte[] prefix3 = { (byte) 0x12, (byte) 0x24 }; List expected3 = new ArrayList<>(); expected3.add(rowIds[6]); expected3.add(rowIds[7]); expected3.add(rowIds[8]); - byte[] prefix4 = {(byte) 0xFF, (byte) 0xFF}; + byte[] prefix4 = { (byte) 0xFF, (byte) 0xFF }; List expected4 = new ArrayList<>(); expected4.add(rowIds[10]); @@ -216,15 +214,11 @@ private void verifyScanResult(Table table, Scan scan, List expectedKeys, String fullMessage = message; if (LOG.isDebugEnabled()) { - fullMessage = message + "\n" + tableOfTwoListsOfByteArrays( - "Expected", expectedKeys, - "Actual ", actualKeys); + fullMessage = message + "\n" + + tableOfTwoListsOfByteArrays("Expected", expectedKeys, "Actual ", actualKeys); } - Assert.assertArrayEquals( - fullMessage, - expectedKeys.toArray(), - actualKeys.toArray()); + Assert.assertArrayEquals(fullMessage, expectedKeys.toArray(), actualKeys.toArray()); } catch (IOException e) { e.printStackTrace(); Assert.fail(); @@ -239,9 +233,8 @@ private String printMultiple(char letter, int count) { return sb.toString(); } - private String tableOfTwoListsOfByteArrays( - String label1, List listOfBytes1, - String label2, List listOfBytes2) { + private String tableOfTwoListsOfByteArrays(String label1, List listOfBytes1, + String label2, List listOfBytes2) { int margin1 = calculateWidth(label1, listOfBytes1); int margin2 = calculateWidth(label2, listOfBytes2); @@ -261,11 +254,8 @@ private String tableOfTwoListsOfByteArrays( } private String printLine(String leftValue, int leftWidth1, String rightValue, int rightWidth) { - return "| " + - leftValue + printMultiple(' ', leftWidth1 - leftValue.length() ) + - " | " + - rightValue + printMultiple(' ', rightWidth - rightValue.length()) + - " |"; + return "| " + leftValue + printMultiple(' ', leftWidth1 - leftValue.length()) + " | " + + rightValue + printMultiple(' ', rightWidth - rightValue.length()) + " |"; } private int calculateWidth(String label1, List listOfBytes1) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSeekHints.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSeekHints.java index 2de70f6518e2..11f963dd8fdb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSeekHints.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSeekHints.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.filter; import static org.junit.Assert.assertArrayEquals; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -47,7 +48,8 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({ FilterTests.class, MediumTests.class }) public class TestSeekHints { +@Category({ FilterTests.class, MediumTests.class }) +public class TestSeekHints { private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static String cf = "f"; @@ -55,12 +57,15 @@ private static String table = "t"; private static Table ht; - @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSeekHints.class); + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSeekHints.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); - @BeforeClass public static void setUpBeforeClass() throws Exception { + @BeforeClass + public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt("hbase.client.scanner.caching", 1000); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, @@ -71,8 +76,7 @@ TEST_UTIL.startMiniCluster(); // load the mini cluster with a single table with 20 rows, with rowkeys of a single byte, 0-19. - ht = TEST_UTIL.createTable(TableName.valueOf(table), Bytes.toBytes(cf), - Integer.MAX_VALUE); + ht = TEST_UTIL.createTable(TableName.valueOf(table), Bytes.toBytes(cf), Integer.MAX_VALUE); for (byte b = 0; b < 20; b++) { Put put = new Put(new byte[] { b }).addColumn(Bytes.toBytes(cf), Bytes.toBytes(cq), Bytes.toBytes("value")); @@ -81,7 +85,8 @@ TEST_UTIL.flush(); } - @AfterClass public static void tearDownAfterClass() throws Exception { + @AfterClass + public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -154,11 +159,13 @@ public KeepAllButSeekFilter(byte seekStartRow, byte seekTargetRow) { this.seekTargetRow = seekTargetRow; } - /* We return SEEK_NEXT_USING_HINT when we hit the specified row, but we return INCLUDE for all + /* + * We return SEEK_NEXT_USING_HINT when we hit the specified row, but we return INCLUDE for all * other rows. This will wind up including the rows between our "seek" row and our "hint" row * only if we don't seek past them. */ - @Override public ReturnCode filterCell(final Cell c) throws IOException { + @Override + public ReturnCode filterCell(final Cell c) throws IOException { byte rowKeyPrefix = CellUtil.cloneRow(c)[0]; if (rowKeyPrefix == seekStartRow) { return ReturnCode.SEEK_NEXT_USING_HINT; @@ -166,16 +173,18 @@ public KeepAllButSeekFilter(byte seekStartRow, byte seekTargetRow) { return ReturnCode.INCLUDE; } - @Override public Cell getNextCellHint(Cell currentCell) { + @Override + public Cell getNextCellHint(Cell currentCell) { return PrivateCellUtil.createFirstOnRow(new byte[] { seekTargetRow }); } - @Override public byte[] toByteArray() { + @Override + public byte[] toByteArray() { return new byte[] { seekStartRow, seekTargetRow }; } public static KeepAllButSeekFilter parseFrom(final byte[] pbBytes) - throws DeserializationException { + throws DeserializationException { return new KeepAllButSeekFilter(pbBytes[0], pbBytes[1]); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java index c365237342a9..15306fbed829 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,12 +36,11 @@ import org.junit.experimental.categories.Category; /** - * Tests for {@link SingleColumnValueExcludeFilter}. Because this filter - * extends {@link SingleColumnValueFilter}, only the added functionality is - * tested. That is, method filterCell(Cell). - * + * Tests for {@link SingleColumnValueExcludeFilter}. Because this filter extends + * {@link SingleColumnValueFilter}, only the added functionality is tested. That is, method + * filterCell(Cell). */ -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestSingleColumnValueExcludeFilter { @ClassRule @@ -62,15 +61,15 @@ public class TestSingleColumnValueExcludeFilter { @Test public void testFilterCell() throws Exception { Filter filter = new SingleColumnValueExcludeFilter(COLUMN_FAMILY, COLUMN_QUALIFIER, - CompareOperator.EQUAL, VAL_1); + CompareOperator.EQUAL, VAL_1); // A 'match' situation List kvs = new ArrayList<>(); KeyValue c = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER_2, VAL_1); - kvs.add (new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER_2, VAL_1)); - kvs.add (new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1)); - kvs.add (new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER_2, VAL_1)); + kvs.add(new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER_2, VAL_1)); + kvs.add(new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1)); + kvs.add(new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER_2, VAL_1)); filter.filterRowCells(kvs); @@ -92,6 +91,4 @@ public void testFilterCell() throws Exception { assertTrue("otherColumn", filter.filterCell(c) == Filter.ReturnCode.NEXT_ROW); } - } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java index 24ed51b30a09..c40962bd07cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ /** * Tests the value filter */ -@Category({FilterTests.class, SmallTests.class}) +@Category({ FilterTests.class, SmallTests.class }) public class TestSingleColumnValueFilter { @ClassRule @@ -48,18 +48,19 @@ public class TestSingleColumnValueFilter { private static final byte[] ROW = Bytes.toBytes("test"); private static final byte[] COLUMN_FAMILY = Bytes.toBytes("test"); - private static final byte [] COLUMN_QUALIFIER = Bytes.toBytes("foo"); + private static final byte[] COLUMN_QUALIFIER = Bytes.toBytes("foo"); private static final byte[] VAL_1 = Bytes.toBytes("a"); private static final byte[] VAL_2 = Bytes.toBytes("ab"); private static final byte[] VAL_3 = Bytes.toBytes("abc"); private static final byte[] VAL_4 = Bytes.toBytes("abcd"); private static final byte[] FULLSTRING_1 = - Bytes.toBytes("The quick brown fox jumps over the lazy dog."); + Bytes.toBytes("The quick brown fox jumps over the lazy dog."); private static final byte[] FULLSTRING_2 = - Bytes.toBytes("The slow grey fox trips over the lazy dog."); + Bytes.toBytes("The slow grey fox trips over the lazy dog."); private static final String QUICK_SUBSTR = "quick"; private static final String QUICK_REGEX = ".+quick.+"; - private static final Pattern QUICK_PATTERN = Pattern.compile("QuIcK", Pattern.CASE_INSENSITIVE | Pattern.DOTALL); + private static final Pattern QUICK_PATTERN = + Pattern.compile("QuIcK", Pattern.CASE_INSENSITIVE | Pattern.DOTALL); Filter basicFilter; Filter nullFilter; @@ -78,7 +79,7 @@ public void setUp() throws Exception { private Filter basicFilterNew() { return new SingleColumnValueFilter(COLUMN_FAMILY, COLUMN_QUALIFIER, - CompareOperator.GREATER_OR_EQUAL, VAL_2); + CompareOperator.GREATER_OR_EQUAL, VAL_2); } private Filter nullFilterNew() { @@ -87,29 +88,25 @@ private Filter nullFilterNew() { } private Filter substrFilterNew() { - return new SingleColumnValueFilter(COLUMN_FAMILY, COLUMN_QUALIFIER, - CompareOperator.EQUAL, - new SubstringComparator(QUICK_SUBSTR)); + return new SingleColumnValueFilter(COLUMN_FAMILY, COLUMN_QUALIFIER, CompareOperator.EQUAL, + new SubstringComparator(QUICK_SUBSTR)); } private Filter regexFilterNew() { - return new SingleColumnValueFilter(COLUMN_FAMILY, COLUMN_QUALIFIER, - CompareOperator.EQUAL, - new RegexStringComparator(QUICK_REGEX)); + return new SingleColumnValueFilter(COLUMN_FAMILY, COLUMN_QUALIFIER, CompareOperator.EQUAL, + new RegexStringComparator(QUICK_REGEX)); } private Filter regexFilterNew(Pattern pattern) { - return new SingleColumnValueFilter(COLUMN_FAMILY, COLUMN_QUALIFIER, - CompareOperator.EQUAL, + return new SingleColumnValueFilter(COLUMN_FAMILY, COLUMN_QUALIFIER, CompareOperator.EQUAL, new RegexStringComparator(pattern.pattern(), pattern.flags())); } @Test public void testLongComparator() throws IOException { - Filter filter = new SingleColumnValueFilter(COLUMN_FAMILY, - COLUMN_QUALIFIER, CompareOperator.GREATER, new LongComparator(100L)); - KeyValue cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, - Bytes.toBytes(1L)); + Filter filter = new SingleColumnValueFilter(COLUMN_FAMILY, COLUMN_QUALIFIER, + CompareOperator.GREATER, new LongComparator(100L)); + KeyValue cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(1L)); assertTrue("less than", filter.filterCell(cell) == Filter.ReturnCode.NEXT_ROW); filter.reset(); byte[] buffer = cell.getBuffer(); @@ -117,8 +114,7 @@ public void testLongComparator() throws IOException { assertTrue("less than", filter.filterCell(c) == Filter.ReturnCode.NEXT_ROW); filter.reset(); - cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, - Bytes.toBytes(100L)); + cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(100L)); assertTrue("Equals 100", filter.filterCell(cell) == Filter.ReturnCode.NEXT_ROW); filter.reset(); buffer = cell.getBuffer(); @@ -126,8 +122,7 @@ public void testLongComparator() throws IOException { assertTrue("Equals 100", filter.filterCell(c) == Filter.ReturnCode.NEXT_ROW); filter.reset(); - cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, - Bytes.toBytes(120L)); + cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(120L)); assertTrue("include 120", filter.filterCell(cell) == Filter.ReturnCode.INCLUDE); filter.reset(); buffer = cell.getBuffer(); @@ -135,8 +130,7 @@ public void testLongComparator() throws IOException { assertTrue("include 120", filter.filterCell(c) == Filter.ReturnCode.INCLUDE); } - private void basicFilterTests(SingleColumnValueFilter filter) - throws Exception { + private void basicFilterTests(SingleColumnValueFilter filter) throws Exception { KeyValue cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_2); assertTrue("basicFilter1", filter.filterCell(cell) == Filter.ReturnCode.INCLUDE); byte[] buffer = cell.getBuffer(); @@ -198,17 +192,13 @@ private void nullFilterTests(Filter filter) throws Exception { assertTrue("null2FilterRow", filter.filterRow()); } - private void substrFilterTests(Filter filter) - throws Exception { - KeyValue cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, - FULLSTRING_1); - assertTrue("substrTrue", - filter.filterCell(cell) == Filter.ReturnCode.INCLUDE); + private void substrFilterTests(Filter filter) throws Exception { + KeyValue cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, FULLSTRING_1); + assertTrue("substrTrue", filter.filterCell(cell) == Filter.ReturnCode.INCLUDE); byte[] buffer = cell.getBuffer(); Cell c = new ByteBufferKeyValue(ByteBuffer.wrap(buffer), 0, buffer.length); assertTrue("substrTrue", filter.filterCell(c) == Filter.ReturnCode.INCLUDE); - cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, - FULLSTRING_2); + cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, FULLSTRING_2); assertTrue("substrFalse", filter.filterCell(cell) == Filter.ReturnCode.INCLUDE); buffer = cell.getBuffer(); c = new ByteBufferKeyValue(ByteBuffer.wrap(buffer), 0, buffer.length); @@ -217,17 +207,13 @@ private void substrFilterTests(Filter filter) assertFalse("substrFilterNotNull", filter.filterRow()); } - private void regexFilterTests(Filter filter) - throws Exception { - KeyValue cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, - FULLSTRING_1); - assertTrue("regexTrue", - filter.filterCell(cell) == Filter.ReturnCode.INCLUDE); + private void regexFilterTests(Filter filter) throws Exception { + KeyValue cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, FULLSTRING_1); + assertTrue("regexTrue", filter.filterCell(cell) == Filter.ReturnCode.INCLUDE); byte[] buffer = cell.getBuffer(); Cell c = new ByteBufferKeyValue(ByteBuffer.wrap(buffer), 0, buffer.length); assertTrue("regexTrue", filter.filterCell(c) == Filter.ReturnCode.INCLUDE); - cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, - FULLSTRING_2); + cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, FULLSTRING_2); assertTrue("regexFalse", filter.filterCell(cell) == Filter.ReturnCode.INCLUDE); buffer = cell.getBuffer(); c = new ByteBufferKeyValue(ByteBuffer.wrap(buffer), 0, buffer.length); @@ -236,12 +222,9 @@ private void regexFilterTests(Filter filter) assertFalse("regexFilterNotNull", filter.filterRow()); } - private void regexPatternFilterTests(Filter filter) - throws Exception { - KeyValue cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, - FULLSTRING_1); - assertTrue("regexTrue", - filter.filterCell(cell) == Filter.ReturnCode.INCLUDE); + private void regexPatternFilterTests(Filter filter) throws Exception { + KeyValue cell = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, FULLSTRING_1); + assertTrue("regexTrue", filter.filterCell(cell) == Filter.ReturnCode.INCLUDE); byte[] buffer = cell.getBuffer(); Cell c = new ByteBufferKeyValue(ByteBuffer.wrap(buffer), 0, buffer.length); assertTrue("regexTrue", filter.filterCell(c) == Filter.ReturnCode.INCLUDE); @@ -249,8 +232,7 @@ private void regexPatternFilterTests(Filter filter) assertFalse("regexFilterNotNull", filter.filterRow()); } - private Filter serializationTest(Filter filter) - throws Exception { + private Filter serializationTest(Filter filter) throws Exception { // Decompose filter to bytes. byte[] buffer = filter.toByteArray(); @@ -279,7 +261,7 @@ public void testStop() throws Exception { @Test public void testSerialization() throws Exception { Filter newFilter = serializationTest(basicFilter); - basicFilterTests((SingleColumnValueFilter)newFilter); + basicFilterTests((SingleColumnValueFilter) newFilter); newFilter = serializationTest(nullFilter); nullFilterTests(newFilter); newFilter = serializationTest(substrFilter); @@ -291,4 +273,3 @@ public void testSerialization() throws Exception { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java index 8095da178188..4c2598e789f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,14 +51,12 @@ import org.slf4j.LoggerFactory; /** - * Tests for the hdfs fix from HBASE-6435. - * - * Please don't add new subtest which involves starting / stopping MiniDFSCluster in this class. - * When stopping MiniDFSCluster, shutdown hooks would be cleared in hadoop's ShutdownHookManager - * in hadoop 3. - * This leads to 'Failed suppression of fs shutdown hook' error in region server. + * Tests for the hdfs fix from HBASE-6435. Please don't add new subtest which involves starting / + * stopping MiniDFSCluster in this class. When stopping MiniDFSCluster, shutdown hooks would be + * cleared in hadoop's ShutdownHookManager in hadoop 3. This leads to 'Failed suppression of fs + * shutdown hook' error in region server. */ -@Category({MiscTests.class, LargeTests.class}) +@Category({ MiscTests.class, LargeTests.class }) public class TestBlockReorder { @ClassRule @@ -83,8 +81,8 @@ public void setUp() throws Exception { htu = new HBaseTestingUtil(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks htu.getConfiguration().setInt("dfs.replication", 3); - htu.startMiniDFSCluster(3, - new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3}); + htu.startMiniDFSCluster(3, new String[] { "/r1", "/r2", "/r3" }, + new String[] { host1, host2, host3 }); conf = htu.getConfiguration(); cluster = htu.getDFSCluster(); @@ -152,13 +150,13 @@ public void testBlockLocationReorder() throws Exception { break; } } - Assert.assertTrue( - "didn't find the server to kill, was looking for " + lookup + " found " + sb, ok); + Assert.assertTrue("didn't find the server to kill, was looking for " + lookup + " found " + sb, + ok); LOG.info("ipc port= " + ipcPort); // Add the hook, with an implementation checking that we don't use the port we've just killed. - Assert.assertTrue(HFileSystem.addLocationsOrderInterceptor(conf, - new HFileSystem.ReorderBlocks() { + Assert + .assertTrue(HFileSystem.addLocationsOrderInterceptor(conf, new HFileSystem.ReorderBlocks() { @Override public void reorderBlocks(Configuration c, LocatedBlocks lbs, String src) { for (LocatedBlock lb : lbs.getLocatedBlocks()) { @@ -175,7 +173,6 @@ public void reorderBlocks(Configuration c, LocatedBlocks lbs, String src) { } })); - final int retries = 10; ServerSocket ss = null; ServerSocket ssI; @@ -183,9 +180,10 @@ public void reorderBlocks(Configuration c, LocatedBlocks lbs, String src) { ss = new ServerSocket(port);// We're taking the port to have a timeout issue later. ssI = new ServerSocket(ipcPort); } catch (BindException be) { - LOG.warn("Got bind exception trying to set up socket on " + port + " or " + ipcPort + - ", this means that the datanode has not closed the socket or" + - " someone else took it. It may happen, skipping this test for this time.", be); + LOG.warn("Got bind exception trying to set up socket on " + port + " or " + ipcPort + + ", this means that the datanode has not closed the socket or" + + " someone else took it. It may happen, skipping this test for this time.", + be); if (ss != null) { ss.close(); } @@ -193,7 +191,7 @@ public void reorderBlocks(Configuration c, LocatedBlocks lbs, String src) { } // Now it will fail with a timeout, unfortunately it does not always connect to the same box, - // so we try retries times; with the reorder it will never last more than a few milli seconds + // so we try retries times; with the reorder it will never last more than a few milli seconds for (int i = 0; i < retries; i++) { start = EnvironmentEdgeManager.currentTime(); fin = dfs.open(p); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java index c38b99705918..2853e7ab2e57 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,14 +45,12 @@ import org.junit.rules.TestName; /** - * Tests for the hdfs fix from HBASE-6435. - * - * Please don't add new subtest which involves starting / stopping MiniDFSCluster in this class. - * When stopping MiniDFSCluster, shutdown hooks would be cleared in hadoop's ShutdownHookManager - * in hadoop 3. - * This leads to 'Failed suppression of fs shutdown hook' error in region server. + * Tests for the hdfs fix from HBASE-6435. Please don't add new subtest which involves starting / + * stopping MiniDFSCluster in this class. When stopping MiniDFSCluster, shutdown hooks would be + * cleared in hadoop's ShutdownHookManager in hadoop 3. This leads to 'Failed suppression of fs + * shutdown hook' error in region server. */ -@Category({MiscTests.class, LargeTests.class}) +@Category({ MiscTests.class, LargeTests.class }) public class TestBlockReorderBlockLocation { @ClassRule @@ -75,8 +73,8 @@ public void setUp() throws Exception { htu = new HBaseTestingUtil(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks htu.getConfiguration().setInt("dfs.replication", 3); - htu.startMiniDFSCluster(3, - new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3}); + htu.startMiniDFSCluster(3, new String[] { "/r1", "/r2", "/r3" }, + new String[] { host1, host2, host3 }); conf = htu.getConfiguration(); cluster = htu.getDFSCluster(); @@ -88,7 +86,6 @@ public void tearDownAfterClass() throws Exception { htu.shutdownMiniCluster(); } - private static ClientProtocol getNamenode(DFSClient dfsc) throws Exception { Field nf = DFSClient.class.getDeclaredField("namenode"); nf.setAccessible(true); @@ -100,12 +97,11 @@ private static ClientProtocol getNamenode(DFSClient dfsc) throws Exception { */ @Test public void testBlockLocation() throws Exception { - // We need to start HBase to get HConstants.HBASE_DIR set in conf + // We need to start HBase to get HConstants.HBASE_DIR set in conf htu.startMiniZKCluster(); SingleProcessHBaseCluster hbm = htu.startMiniHBaseCluster(); conf = hbm.getConfiguration(); - // The "/" is mandatory, without it we've got a null pointer exception on the namenode final String fileName = "/helloWorld"; Path p = new Path(fileName); @@ -119,7 +115,7 @@ public void testBlockLocation() throws Exception { fop.writeDouble(toWrite); fop.close(); - for (int i=0; i<10; i++){ + for (int i = 0; i < 10; i++) { // The interceptor is not set in this test, so we get the raw list at this point LocatedBlocks l; final long max = EnvironmentEdgeManager.currentTime() + 10000; @@ -132,7 +128,7 @@ public void testBlockLocation() throws Exception { } while (l.get(0).getLocations().length != repCount); // Should be filtered, the name is different => The order won't change - Object originalList [] = l.getLocatedBlocks().toArray(); + Object originalList[] = l.getLocatedBlocks().toArray(); HFileSystem.ReorderWALBlocks lrb = new HFileSystem.ReorderWALBlocks(); lrb.reorderBlocks(conf, l, fileName); Assert.assertArrayEquals(originalList, l.getLocatedBlocks().toArray()); @@ -140,8 +136,8 @@ public void testBlockLocation() throws Exception { // Should be reordered, as we pretend to be a file name with a compliant stuff Assert.assertNotNull(conf.get(HConstants.HBASE_DIR)); Assert.assertFalse(conf.get(HConstants.HBASE_DIR).isEmpty()); - String pseudoLogFile = conf.get(HConstants.HBASE_DIR) + "/" + - HConstants.HREGION_LOGDIR_NAME + "/" + host1 + ",6977,6576" + "/mylogfile"; + String pseudoLogFile = conf.get(HConstants.HBASE_DIR) + "/" + HConstants.HREGION_LOGDIR_NAME + + "/" + host1 + ",6977,6576" + "/mylogfile"; // Check that it will be possible to extract a ServerName from our construction Assert.assertNotNull("log= " + pseudoLogFile, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java index 3ff71352b0f9..5825d22ec9e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,14 +63,12 @@ import org.slf4j.LoggerFactory; /** - * Tests for the hdfs fix from HBASE-6435. - * - * Please don't add new subtest which involves starting / stopping MiniDFSCluster in this class. - * When stopping MiniDFSCluster, shutdown hooks would be cleared in hadoop's ShutdownHookManager - * in hadoop 3. - * This leads to 'Failed suppression of fs shutdown hook' error in region server. + * Tests for the hdfs fix from HBASE-6435. Please don't add new subtest which involves starting / + * stopping MiniDFSCluster in this class. When stopping MiniDFSCluster, shutdown hooks would be + * cleared in hadoop's ShutdownHookManager in hadoop 3. This leads to 'Failed suppression of fs + * shutdown hook' error in region server. */ -@Category({MiscTests.class, LargeTests.class}) +@Category({ MiscTests.class, LargeTests.class }) public class TestBlockReorderMultiBlocks { @ClassRule @@ -95,8 +93,8 @@ public void setUp() throws Exception { htu = new HBaseTestingUtil(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks htu.getConfiguration().setInt("dfs.replication", 3); - htu.startMiniDFSCluster(3, - new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3}); + htu.startMiniDFSCluster(3, new String[] { "/r1", "/r2", "/r3" }, + new String[] { host1, host2, host3 }); conf = htu.getConfiguration(); cluster = htu.getDFSCluster(); @@ -121,10 +119,11 @@ public void testHBaseCluster() throws Exception { HRegionServer targetRs = hbm.getRegionServer(0); // We want to have a datanode with the same name as the region server, so - // we're going to get the regionservername, and start a new datanode with this name. + // we're going to get the regionservername, and start a new datanode with this name. String host4 = targetRs.getServerName().getHostname(); LOG.info("Starting a new datanode with the name=" + host4); - cluster.startDataNodes(conf, 1, true, null, new String[]{"/r4"}, new String[]{host4}, null); + cluster.startDataNodes(conf, 1, true, null, new String[] { "/r4" }, new String[] { host4 }, + null); cluster.waitClusterUp(); final int repCount = 3; @@ -140,12 +139,12 @@ public void testHBaseCluster() throws Exception { // Now we need to find the log file, its locations, and look at it - String rootDir = new Path(CommonFSUtils.getWALRootDir(conf) + "/" + - HConstants.HREGION_LOGDIR_NAME + "/" + targetRs.getServerName().toString()).toUri().getPath(); - - DistributedFileSystem mdfs = (DistributedFileSystem) - hbm.getMaster().getMasterFileSystem().getFileSystem(); + String rootDir = + new Path(CommonFSUtils.getWALRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME + "/" + + targetRs.getServerName().toString()).toUri().getPath(); + DistributedFileSystem mdfs = + (DistributedFileSystem) hbm.getMaster().getMasterFileSystem().getFileSystem(); int nbTest = 0; while (nbTest < 10) { @@ -153,11 +152,11 @@ public void testHBaseCluster() throws Exception { final CountDownLatch latch = new CountDownLatch(regions.size()); // listen for successful log rolls final WALActionsListener listener = new WALActionsListener() { - @Override - public void postLogRoll(final Path oldPath, final Path newPath) throws IOException { - latch.countDown(); - } - }; + @Override + public void postLogRoll(final Path oldPath, final Path newPath) throws IOException { + latch.countDown(); + } + }; for (HRegion region : regions) { region.getWAL().registerWALActionsListener(listener); } @@ -168,12 +167,12 @@ public void postLogRoll(final Path oldPath, final Path newPath) throws IOExcepti try { latch.await(); } catch (InterruptedException exception) { - LOG.warn("Interrupted while waiting for the wal of '" + targetRs + "' to roll. If later " + - "tests fail, it's probably because we should still be waiting."); + LOG.warn("Interrupted while waiting for the wal of '" + targetRs + "' to roll. If later " + + "tests fail, it's probably because we should still be waiting."); Thread.currentThread().interrupt(); } for (Region region : regions) { - ((HRegion)region).getWAL().unregisterWALActionsListener(listener); + ((HRegion) region).getWAL().unregisterWALActionsListener(listener); } // We need a sleep as the namenode is informed asynchronously @@ -226,14 +225,16 @@ public void postLogRoll(final Path oldPath, final Path newPath) throws IOExcepti } } } catch (FileNotFoundException exception) { - LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + - "archived out from under us so we'll ignore and retry. If this test hangs " + - "indefinitely you should treat this failure as a symptom.", exception); + LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + + "archived out from under us so we'll ignore and retry. If this test hangs " + + "indefinitely you should treat this failure as a symptom.", + exception); } catch (RemoteException exception) { if (exception.unwrapRemoteException() instanceof FileNotFoundException) { - LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + - "archived out from under us so we'll ignore and retry. If this test hangs " + - "indefinitely you should treat this failure as a symptom.", exception); + LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " + + "archived out from under us so we'll ignore and retry. If this test hangs " + + "indefinitely you should treat this failure as a symptom.", + exception); } else { throw exception; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java index 07b2e64f3a4f..edf35b5d9a50 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.io.File; import java.lang.management.ManagementFactory; import java.net.HttpURLConnection; @@ -113,12 +114,11 @@ public static void beforeClass() throws Exception { KDC.createPrincipal(KEYTAB_FILE, PRINCIPAL, HTTP_PRINCIPAL, USER_ADMIN_STR, USER_NONE_STR); UTIL.startMiniZKCluster(); - HBaseKerberosUtils.setSecuredConfiguration(conf, - PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, PRINCIPAL + "@" + KDC.getRealm(), + HTTP_PRINCIPAL + "@" + KDC.getRealm()); HBaseKerberosUtils.setSSLConfiguration(UTIL, TestInfoServersACL.class); - conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - TokenProvider.class.getName()); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName()); UTIL.startMiniDFSCluster(1); Path rootdir = UTIL.getDataTestDirOnTestFS("TestInfoServersACL"); CommonFSUtils.setRootDir(conf, rootdir); @@ -137,7 +137,7 @@ public static void beforeClass() throws Exception { conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); // only user admin will have acl access conf.set(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, USER_ADMIN_STR); - //conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, ""); + // conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, ""); CLUSTER = new LocalHBaseCluster(conf, 1); CLUSTER.startup(); @@ -161,13 +161,14 @@ public static void shutDownMiniCluster() throws Exception { @Test public void testAuthorizedUser() throws Exception { - UserGroupInformation admin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation admin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); admin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { + @Override + public Void run() throws Exception { // Check the expected content is present in the http response String expectedContent = "Get Log Level"; - Pair pair = getLogLevelPage(); + Pair pair = getLogLevelPage(); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); assertTrue("expected=" + expectedContent + ", content=" + pair.getSecond(), pair.getSecond().contains(expectedContent)); @@ -178,11 +179,12 @@ public void testAuthorizedUser() throws Exception { @Test public void testUnauthorizedUser() throws Exception { - UserGroupInformation nonAdmin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation nonAdmin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); nonAdmin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { - Pair pair = getLogLevelPage(); + @Override + public Void run() throws Exception { + Pair pair = getLogLevelPage(); assertEquals(HttpURLConnection.HTTP_FORBIDDEN, pair.getFirst().intValue()); return null; } @@ -192,12 +194,13 @@ public void testUnauthorizedUser() throws Exception { @Test public void testTableActionsAvailableForAdmins() throws Exception { final String expectedAuthorizedContent = "Actions:"; - UserGroupInformation admin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation admin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); admin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { + @Override + public Void run() throws Exception { // Check the expected content is present in the http response - Pair pair = getTablePage(TableName.META_TABLE_NAME); + Pair pair = getTablePage(TableName.META_TABLE_NAME); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); assertTrue("expected=" + expectedAuthorizedContent + ", content=" + pair.getSecond(), pair.getSecond().contains(expectedAuthorizedContent)); @@ -205,14 +208,16 @@ public void testTableActionsAvailableForAdmins() throws Exception { } }); - UserGroupInformation nonAdmin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation nonAdmin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); nonAdmin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { - Pair pair = getTablePage(TableName.META_TABLE_NAME); + @Override + public Void run() throws Exception { + Pair pair = getTablePage(TableName.META_TABLE_NAME); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); - assertFalse("should not find=" + expectedAuthorizedContent + ", content=" + - pair.getSecond(), pair.getSecond().contains(expectedAuthorizedContent)); + assertFalse( + "should not find=" + expectedAuthorizedContent + ", content=" + pair.getSecond(), + pair.getSecond().contains(expectedAuthorizedContent)); return null; } }); @@ -221,12 +226,13 @@ public void testTableActionsAvailableForAdmins() throws Exception { @Test public void testLogsAvailableForAdmins() throws Exception { final String expectedAuthorizedContent = "Directory: /logs/"; - UserGroupInformation admin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation admin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); admin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { + @Override + public Void run() throws Exception { // Check the expected content is present in the http response - Pair pair = getLogsPage(); + Pair pair = getLogsPage(); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); assertTrue("expected=" + expectedAuthorizedContent + ", content=" + pair.getSecond(), pair.getSecond().contains(expectedAuthorizedContent)); @@ -234,11 +240,12 @@ public void testLogsAvailableForAdmins() throws Exception { } }); - UserGroupInformation nonAdmin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation nonAdmin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); nonAdmin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { - Pair pair = getLogsPage(); + @Override + public Void run() throws Exception { + Pair pair = getLogsPage(); assertEquals(HttpURLConnection.HTTP_FORBIDDEN, pair.getFirst().intValue()); return null; } @@ -248,12 +255,13 @@ public void testLogsAvailableForAdmins() throws Exception { @Test public void testDumpActionsAvailableForAdmins() throws Exception { final String expectedAuthorizedContent = "Master status for"; - UserGroupInformation admin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation admin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); admin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { + @Override + public Void run() throws Exception { // Check the expected content is present in the http response - Pair pair = getMasterDumpPage(); + Pair pair = getMasterDumpPage(); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); assertTrue("expected=" + expectedAuthorizedContent + ", content=" + pair.getSecond(), pair.getSecond().contains(expectedAuthorizedContent)); @@ -261,11 +269,12 @@ public void testDumpActionsAvailableForAdmins() throws Exception { } }); - UserGroupInformation nonAdmin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation nonAdmin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); nonAdmin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { - Pair pair = getMasterDumpPage(); + @Override + public Void run() throws Exception { + Pair pair = getMasterDumpPage(); assertEquals(HttpURLConnection.HTTP_FORBIDDEN, pair.getFirst().intValue()); return null; } @@ -275,12 +284,13 @@ public void testDumpActionsAvailableForAdmins() throws Exception { @Test public void testStackActionsAvailableForAdmins() throws Exception { final String expectedAuthorizedContent = "Process Thread Dump"; - UserGroupInformation admin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation admin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); admin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { + @Override + public Void run() throws Exception { // Check the expected content is present in the http response - Pair pair = getStacksPage(); + Pair pair = getStacksPage(); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); assertTrue("expected=" + expectedAuthorizedContent + ", content=" + pair.getSecond(), pair.getSecond().contains(expectedAuthorizedContent)); @@ -288,11 +298,12 @@ public void testStackActionsAvailableForAdmins() throws Exception { } }); - UserGroupInformation nonAdmin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation nonAdmin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); nonAdmin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { - Pair pair = getStacksPage(); + @Override + public Void run() throws Exception { + Pair pair = getStacksPage(); assertEquals(HttpURLConnection.HTTP_FORBIDDEN, pair.getFirst().intValue()); return null; } @@ -305,8 +316,8 @@ public void testJmxAvailableForAdmins() throws Exception { UTIL.waitFor(30000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - for (ObjectName name: ManagementFactory.getPlatformMBeanServer(). - queryNames(new ObjectName("*:*"), null)) { + for (ObjectName name : ManagementFactory.getPlatformMBeanServer() + .queryNames(new ObjectName("*:*"), null)) { if (name.toString().contains(expectedAuthorizedContent)) { LOG.info("{}", name); return true; @@ -315,12 +326,13 @@ public boolean evaluate() throws Exception { return false; } }); - UserGroupInformation admin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation admin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); admin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { + @Override + public Void run() throws Exception { // Check the expected content is present in the http response - Pair pair = getJmxPage(); + Pair pair = getJmxPage(); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); assertTrue("expected=" + expectedAuthorizedContent + ", content=" + pair.getSecond(), pair.getSecond().contains(expectedAuthorizedContent)); @@ -328,11 +340,12 @@ public boolean evaluate() throws Exception { } }); - UserGroupInformation nonAdmin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation nonAdmin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); nonAdmin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { - Pair pair = getJmxPage(); + @Override + public Void run() throws Exception { + Pair pair = getJmxPage(); assertEquals(HttpURLConnection.HTTP_FORBIDDEN, pair.getFirst().intValue()); return null; } @@ -344,12 +357,13 @@ public void testMetricsAvailableForAdmins() throws Exception { // Looks like there's nothing exported to this, but leave it since // it's Hadoop2 only and will eventually be removed due to that. final String expectedAuthorizedContent = ""; - UserGroupInformation admin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation admin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); admin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { + @Override + public Void run() throws Exception { // Check the expected content is present in the http response - Pair pair = getMetricsPage(); + Pair pair = getMetricsPage(); if (HttpURLConnection.HTTP_NOT_FOUND == pair.getFirst()) { // Not on hadoop 2 return null; @@ -361,11 +375,12 @@ public void testMetricsAvailableForAdmins() throws Exception { } }); - UserGroupInformation nonAdmin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( - USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); + UserGroupInformation nonAdmin = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(USER_NONE_STR, KEYTAB_FILE.getAbsolutePath()); nonAdmin.doAs(new PrivilegedExceptionAction() { - @Override public Void run() throws Exception { - Pair pair = getMetricsPage(); + @Override + public Void run() throws Exception { + Pair pair = getMetricsPage(); if (HttpURLConnection.HTTP_NOT_FOUND == pair.getFirst()) { // Not on hadoop 2 return null; @@ -380,38 +395,38 @@ private String getInfoServerHostAndPort() { return "http://localhost:" + CLUSTER.getActiveMaster().getInfoServer().getPort(); } - private Pair getLogLevelPage() throws Exception { + private Pair getLogLevelPage() throws Exception { // Build the url which we want to connect to URL url = new URL(getInfoServerHostAndPort() + "/logLevel"); return getUrlContent(url); } - private Pair getTablePage(TableName tn) throws Exception { + private Pair getTablePage(TableName tn) throws Exception { URL url = new URL(getInfoServerHostAndPort() + "/table.jsp?name=" + tn.getNameAsString()); return getUrlContent(url); } - private Pair getLogsPage() throws Exception { + private Pair getLogsPage() throws Exception { URL url = new URL(getInfoServerHostAndPort() + "/logs/"); return getUrlContent(url); } - private Pair getMasterDumpPage() throws Exception { + private Pair getMasterDumpPage() throws Exception { URL url = new URL(getInfoServerHostAndPort() + "/dump"); return getUrlContent(url); } - private Pair getStacksPage() throws Exception { + private Pair getStacksPage() throws Exception { URL url = new URL(getInfoServerHostAndPort() + "/stacks"); return getUrlContent(url); } - private Pair getJmxPage() throws Exception { + private Pair getJmxPage() throws Exception { URL url = new URL(getInfoServerHostAndPort() + "/jmx"); return getUrlContent(url); } - private Pair getMetricsPage() throws Exception { + private Pair getMetricsPage() throws Exception { URL url = new URL(getInfoServerHostAndPort() + "/metrics"); return getUrlContent(url); } @@ -420,9 +435,9 @@ private Pair getMetricsPage() throws Exception { * Retrieves the content of the specified URL. The content will only be returned if the status * code for the operation was HTTP 200/OK. */ - private Pair getUrlContent(URL url) throws Exception { - try (CloseableHttpClient client = createHttpClient( - UserGroupInformation.getCurrentUser().getUserName())) { + private Pair getUrlContent(URL url) throws Exception { + try (CloseableHttpClient client = + createHttpClient(UserGroupInformation.getCurrentUser().getUserName())) { CloseableHttpResponse resp = client.execute(new HttpGet(url.toURI())); int code = resp.getStatusLine().getStatusCode(); if (code == HttpURLConnection.HTTP_OK) { @@ -438,10 +453,10 @@ private CloseableHttpClient createHttpClient(String clientPrincipal) throws Exce // jGSS Kerberos login constant Oid oid = new Oid("1.2.840.113554.1.2.2"); GSSName gssClient = gssManager.createName(clientPrincipal, GSSName.NT_USER_NAME); - GSSCredential credential = gssManager.createCredential( - gssClient, GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); + GSSCredential credential = gssManager.createCredential(gssClient, + GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); - Lookup authRegistry = RegistryBuilder.create() + Lookup authRegistry = RegistryBuilder. create() .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestByteBufferOutputStream.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestByteBufferOutputStream.java index ef627532f8be..4a43224b5f00 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestByteBufferOutputStream.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestByteBufferOutputStream.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,14 +37,14 @@ public class TestByteBufferOutputStream { @Test public void testByteBufferReuse() throws IOException { - byte [] someBytes = Bytes.toBytes("some bytes"); + byte[] someBytes = Bytes.toBytes("some bytes"); ByteBuffer bb = ByteBuffer.allocate(someBytes.length); ByteBuffer bbToReuse = write(bb, someBytes); bbToReuse = write(bbToReuse, Bytes.toBytes("less")); assertTrue(bb == bbToReuse); } - private ByteBuffer write(final ByteBuffer bb, final byte [] bytes) throws IOException { + private ByteBuffer write(final ByteBuffer bb, final byte[] bytes) throws IOException { try (ByteBufferOutputStream bbos = new ByteBufferOutputStream(bb)) { bbos.write(bytes); assertTrue(Bytes.compareTo(bytes, bbos.toByteArray(0, bytes.length)) == 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFSDataInputStreamWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFSDataInputStreamWrapper.java index 22b6c62b486b..c21370d04d0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFSDataInputStreamWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFSDataInputStreamWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.io.InputStream; import java.nio.ByteBuffer; import java.util.EnumSet; - import org.apache.hadoop.fs.ByteBufferReadable; import org.apache.hadoop.fs.CanSetDropBehind; import org.apache.hadoop.fs.CanSetReadahead; @@ -44,40 +43,37 @@ public class TestFSDataInputStreamWrapper { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFSDataInputStreamWrapper.class); + HBaseClassTestRule.forClass(TestFSDataInputStreamWrapper.class); @Test public void testUnbuffer() throws Exception { InputStream pc = new ParentClass(); - FSDataInputStreamWrapper fsdisw1 = - new FSDataInputStreamWrapper(new FSDataInputStream(pc)); + FSDataInputStreamWrapper fsdisw1 = new FSDataInputStreamWrapper(new FSDataInputStream(pc)); fsdisw1.unbuffer(); // parent class should be true - assertTrue(((ParentClass)pc).getIsCallUnbuffer()); + assertTrue(((ParentClass) pc).getIsCallUnbuffer()); fsdisw1.close(); InputStream cc1 = new ChildClass1(); - FSDataInputStreamWrapper fsdisw2 = - new FSDataInputStreamWrapper(new FSDataInputStream(cc1)); + FSDataInputStreamWrapper fsdisw2 = new FSDataInputStreamWrapper(new FSDataInputStream(cc1)); fsdisw2.unbuffer(); // child1 class should be true - assertTrue(((ChildClass1)cc1).getIsCallUnbuffer()); + assertTrue(((ChildClass1) cc1).getIsCallUnbuffer()); fsdisw2.close(); } - private class ParentClass extends FSInputStream - implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, - HasEnhancedByteBufferAccess, CanUnbuffer { + private class ParentClass extends FSInputStream implements ByteBufferReadable, CanSetDropBehind, + CanSetReadahead, HasEnhancedByteBufferAccess, CanUnbuffer { public boolean isCallUnbuffer = false; - public boolean getIsCallUnbuffer(){ + public boolean getIsCallUnbuffer() { return isCallUnbuffer; } @Override public void unbuffer() { - isCallUnbuffer = true; + isCallUnbuffer = true; } @Override @@ -86,9 +82,8 @@ public int read() throws IOException { } @Override - public ByteBuffer read(ByteBufferPool paramByteBufferPool, - int paramInt, EnumSet paramEnumSet) - throws IOException, UnsupportedOperationException { + public ByteBuffer read(ByteBufferPool paramByteBufferPool, int paramInt, + EnumSet paramEnumSet) throws IOException, UnsupportedOperationException { return null; } @@ -98,8 +93,7 @@ public void releaseBuffer(ByteBuffer paramByteBuffer) { } @Override - public void setReadahead(Long paramLong) - throws IOException, UnsupportedOperationException { + public void setReadahead(Long paramLong) throws IOException, UnsupportedOperationException { } @@ -130,7 +124,7 @@ public boolean seekToNewSource(long paramLong) throws IOException { } } - private class ChildClass1 extends ParentClass{ + private class ChildClass1 extends ParentClass { @Override public void unbuffer() { isCallUnbuffer = true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java index 6a85f9864288..ed89a9ac937e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,10 +45,10 @@ import org.junit.experimental.categories.Category; /** - * Test that FileLink switches between alternate locations - * when the current location moves or gets deleted. + * Test that FileLink switches between alternate locations when the current location moves or gets + * deleted. */ -@Category({IOTests.class, MediumTests.class}) +@Category({ IOTests.class, MediumTests.class }) public class TestFileLink { @ClassRule @@ -90,8 +90,8 @@ public void testHashCode() { } /** - * Test that the returned link from {@link FileLink#open(FileSystem)} can be unwrapped - * to a {@link HdfsDataInputStream} by + * Test that the returned link from {@link FileLink#open(FileSystem)} can be unwrapped to a + * {@link HdfsDataInputStream} by * {@link FileLink#getUnderlyingFileLinkInputStream(FSDataInputStream)} */ @Test @@ -124,8 +124,7 @@ public void testGetUnderlyingFSDataInputStream() throws Exception { } /** - * Test, on HDFS, that the FileLink is still readable - * even when the current file gets renamed. + * Test, on HDFS, that the FileLink is still readable even when the current file gets renamed. */ @Test public void testHDFSLinkReadDuringRename() throws Exception { @@ -149,16 +148,18 @@ public void testHDFSLinkReadDuringRename() throws Exception { private static class MyDistributedFileSystem extends DistributedFileSystem { MyDistributedFileSystem() { } + @Override - public FSDataInputStream open(Path f, final int bufferSize) - throws IOException { + public FSDataInputStream open(Path f, final int bufferSize) throws IOException { throw new RemoteException(FileNotFoundException.class.getName(), ""); } + @Override public Configuration getConf() { return new Configuration(); } } + @Test(expected = FileNotFoundException.class) public void testLinkReadWithMissingFile() throws Exception { HBaseTestingUtil testUtil = new HBaseTestingUtil(); @@ -176,8 +177,8 @@ public void testLinkReadWithMissingFile() throws Exception { } /** - * Test, on a local filesystem, that the FileLink is still readable - * even when the current file gets renamed. + * Test, on a local filesystem, that the FileLink is still readable even when the current file + * gets renamed. */ @Test public void testLocalLinkReadDuringRename() throws IOException { @@ -194,7 +195,7 @@ private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOExce Path originalPath = new Path(rootDir, "test.file"); Path archivedPath = new Path(rootDir, "archived.file"); - writeSomeData(fs, originalPath, 256 << 20, (byte)2); + writeSomeData(fs, originalPath, 256 << 20, (byte) 2); List files = new ArrayList<>(); files.add(originalPath); @@ -208,7 +209,7 @@ private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOExce // Read from origin int n = in.read(data); - dataVerify(data, n, (byte)2); + dataVerify(data, n, (byte) 2); size += n; if (FSUtils.WINDOWS) { @@ -228,7 +229,7 @@ private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOExce // Try to read to the end while ((n = in.read(data)) > 0) { - dataVerify(data, n, (byte)2); + dataVerify(data, n, (byte) 2); size += n; } @@ -241,15 +242,12 @@ private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOExce } /** - * Test that link is still readable even when the current file gets deleted. - * - * NOTE: This test is valid only on HDFS. - * When a file is deleted from a local file-system, it is simply 'unlinked'. - * The inode, which contains the file's data, is not deleted until all - * processes have finished with it. - * In HDFS when the request exceed the cached block locations, - * a query to the namenode is performed, using the filename, - * and the deleted file doesn't exists anymore (FileNotFoundException). + * Test that link is still readable even when the current file gets deleted. NOTE: This test is + * valid only on HDFS. When a file is deleted from a local file-system, it is simply 'unlinked'. + * The inode, which contains the file's data, is not deleted until all processes have finished + * with it. In HDFS when the request exceed the cached block locations, a query to the namenode is + * performed, using the filename, and the deleted file doesn't exists anymore + * (FileNotFoundException). */ @Test public void testHDFSLinkReadDuringDelete() throws Exception { @@ -267,7 +265,7 @@ public void testHDFSLinkReadDuringDelete() throws Exception { List files = new ArrayList<>(); for (int i = 0; i < 3; i++) { Path path = new Path(String.format("test-data-%d", i)); - writeSomeData(fs, path, 1 << 20, (byte)i); + writeSomeData(fs, path, 1 << 20, (byte) i); files.add(path); } @@ -279,26 +277,26 @@ public void testHDFSLinkReadDuringDelete() throws Exception { // Switch to file 1 n = in.read(data); - dataVerify(data, n, (byte)0); + dataVerify(data, n, (byte) 0); fs.delete(files.get(0), true); - skipBuffer(in, (byte)0); + skipBuffer(in, (byte) 0); // Switch to file 2 n = in.read(data); - dataVerify(data, n, (byte)1); + dataVerify(data, n, (byte) 1); fs.delete(files.get(1), true); - skipBuffer(in, (byte)1); + skipBuffer(in, (byte) 1); // Switch to file 3 n = in.read(data); - dataVerify(data, n, (byte)2); + dataVerify(data, n, (byte) 2); fs.delete(files.get(2), true); - skipBuffer(in, (byte)2); + skipBuffer(in, (byte) 2); // No more files available try { n = in.read(data); - assert(n <= 0); + assert (n <= 0); } catch (FileNotFoundException e) { assertTrue(true); } @@ -313,7 +311,7 @@ public void testHDFSLinkReadDuringDelete() throws Exception { /** * Write up to 'size' bytes with value 'v' into a new file called 'path'. */ - private void writeSomeData (FileSystem fs, Path path, long size, byte v) throws IOException { + private void writeSomeData(FileSystem fs, Path path, long size, byte v) throws IOException { byte[] data = new byte[4096]; for (int i = 0; i < data.length; i++) { data[i] = v; @@ -346,8 +344,7 @@ private static void skipBuffer(FSDataInputStream in, byte v) throws IOException int n; while ((n = in.read(data)) == data.length) { for (int i = 0; i < data.length; ++i) { - if (data[i] != v) - throw new Exception("File changed"); + if (data[i] != v) throw new Exception("File changed"); } } } catch (Exception e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java index 8d6070af011c..1959f026cb63 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,10 +37,10 @@ import org.junit.rules.TestName; /** - * Test that FileLink switches between alternate locations - * when the current location moves or gets deleted. + * Test that FileLink switches between alternate locations when the current location moves or gets + * deleted. */ -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileLink { @ClassRule @@ -52,41 +52,39 @@ public class TestHFileLink { @Test public void testValidLinkNames() { - String validLinkNames[] = {"foo=fefefe-0123456", "ns=foo=abababa-fefefefe"}; + String validLinkNames[] = { "foo=fefefe-0123456", "ns=foo=abababa-fefefefe" }; - for(String name : validLinkNames) { + for (String name : validLinkNames) { Assert.assertTrue("Failed validating:" + name, name.matches(HFileLink.LINK_NAME_REGEX)); } - for(String name : validLinkNames) { + for (String name : validLinkNames) { Assert.assertTrue("Failed validating:" + name, HFileLink.isHFileLink(name)); } String testName = name.getMethodName() + "=fefefe-0123456"; Assert.assertEquals(TableName.valueOf(name.getMethodName()), - HFileLink.getReferencedTableName(testName)); + HFileLink.getReferencedTableName(testName)); Assert.assertEquals("fefefe", HFileLink.getReferencedRegionName(testName)); Assert.assertEquals("0123456", HFileLink.getReferencedHFileName(testName)); Assert.assertEquals(testName, - HFileLink.createHFileLinkName(TableName.valueOf(name.getMethodName()), "fefefe", "0123456")); + HFileLink.createHFileLinkName(TableName.valueOf(name.getMethodName()), "fefefe", "0123456")); testName = "ns=" + name.getMethodName() + "=fefefe-0123456"; Assert.assertEquals(TableName.valueOf("ns", name.getMethodName()), - HFileLink.getReferencedTableName(testName)); + HFileLink.getReferencedTableName(testName)); Assert.assertEquals("fefefe", HFileLink.getReferencedRegionName(testName)); Assert.assertEquals("0123456", HFileLink.getReferencedHFileName(testName)); - Assert.assertEquals(testName, - HFileLink.createHFileLinkName(TableName.valueOf("ns", name.getMethodName()), "fefefe", "0123456")); + Assert.assertEquals(testName, HFileLink + .createHFileLinkName(TableName.valueOf("ns", name.getMethodName()), "fefefe", "0123456")); - for(String name : validLinkNames) { + for (String name : validLinkNames) { Matcher m = HFileLink.LINK_NAME_PATTERN.matcher(name); assertTrue(m.matches()); Assert.assertEquals(HFileLink.getReferencedTableName(name), - TableName.valueOf(m.group(1), m.group(2))); - Assert.assertEquals(HFileLink.getReferencedRegionName(name), - m.group(3)); - Assert.assertEquals(HFileLink.getReferencedHFileName(name), - m.group(4)); + TableName.valueOf(m.group(1), m.group(2))); + Assert.assertEquals(HFileLink.getReferencedRegionName(name), m.group(3)); + Assert.assertEquals(HFileLink.getReferencedHFileName(name), m.group(4)); } } @@ -99,22 +97,22 @@ public void testBackReference() { String encodedRegion = "FEFE"; String cf = "cf1"; - TableName refTables[] = {TableName.valueOf(name.getMethodName()), - TableName.valueOf("ns", name.getMethodName())}; + TableName refTables[] = + { TableName.valueOf(name.getMethodName()), TableName.valueOf("ns", name.getMethodName()) }; - for(TableName refTable : refTables) { + for (TableName refTable : refTables) { Path refTableDir = CommonFSUtils.getTableDir(archiveDir, refTable); Path refRegionDir = HRegion.getRegionDir(refTableDir, encodedRegion); Path refDir = new Path(refRegionDir, cf); Path refLinkDir = new Path(refDir, linkDir); - String refStoreFileName = refTable.getNameAsString().replace( - TableName.NAMESPACE_DELIM, '=') + "=" + encodedRegion + "-" + storeFileName; + String refStoreFileName = refTable.getNameAsString().replace(TableName.NAMESPACE_DELIM, '=') + + "=" + encodedRegion + "-" + storeFileName; - TableName tableNames[] = {TableName.valueOf(name.getMethodName() + "1"), - TableName.valueOf("ns", name.getMethodName() + "2"), - TableName.valueOf(name.getMethodName()+ ":" +name.getMethodName())}; + TableName tableNames[] = { TableName.valueOf(name.getMethodName() + "1"), + TableName.valueOf("ns", name.getMethodName() + "2"), + TableName.valueOf(name.getMethodName() + ":" + name.getMethodName()) }; - for( TableName tableName : tableNames) { + for (TableName tableName : tableNames) { Path tableDir = CommonFSUtils.getTableDir(rootDir, tableName); Path regionDir = HRegion.getRegionDir(tableDir, encodedRegion); Path cfDir = new Path(regionDir, cf); @@ -125,21 +123,19 @@ public void testBackReference() { HFileLink.createBackReferenceName(CommonFSUtils.getTableName(tableDir).getNameAsString(), encodedRegion)); - //verify parsing back reference - Pair parsedRef = - HFileLink.parseBackReferenceName(encodedRegion+"."+ - tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '=')); + // verify parsing back reference + Pair parsedRef = HFileLink.parseBackReferenceName(encodedRegion + "." + + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '=')); assertEquals(parsedRef.getFirst(), tableName); assertEquals(encodedRegion, parsedRef.getSecond()); - //verify resolving back reference - Path storeFileDir = new Path(refLinkDir, encodedRegion+"."+ - tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '=')); + // verify resolving back reference + Path storeFileDir = new Path(refLinkDir, encodedRegion + "." + + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '=')); Path linkPath = new Path(cfDir, refStoreFileName); assertEquals(linkPath, HFileLink.getHFileFromBackReference(rootDir, storeFileDir)); } } } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index e27e2bf7ff64..2fc4182504a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 398817f5c2c1..e3d8f4819a69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,11 +78,10 @@ import org.slf4j.LoggerFactory; /** - * Testing the sizing that HeapSize offers and compares to the size given by - * ClassSize. + * Testing the sizing that HeapSize offers and compares to the size given by ClassSize. */ -@Category({IOTests.class, SmallTests.class}) -public class TestHeapSize { +@Category({ IOTests.class, SmallTests.class }) +public class TestHeapSize { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -120,7 +119,7 @@ public void testNativeSizes() throws IOException { cl = ArrayList.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.ARRAYLIST; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -129,7 +128,7 @@ public void testNativeSizes() throws IOException { cl = ByteBuffer.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.BYTE_BUFFER; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -138,26 +137,26 @@ public void testNativeSizes() throws IOException { cl = Integer.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.INTEGER; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } // Map.Entry - // Interface is public, all others are not. Hard to size via ClassSize -// cl = Map.Entry.class; -// expected = ClassSize.estimateBase(cl, false); -// actual = ClassSize.MAP_ENTRY; -// if(expected != actual) { -// ClassSize.estimateBase(cl, true); -// assertEquals(expected, actual); -// } + // Interface is public, all others are not. Hard to size via ClassSize + // cl = Map.Entry.class; + // expected = ClassSize.estimateBase(cl, false); + // actual = ClassSize.MAP_ENTRY; + // if(expected != actual) { + // ClassSize.estimateBase(cl, true); + // assertEquals(expected, actual); + // } // Object cl = Object.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.align(ClassSize.OBJECT); - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -166,7 +165,7 @@ public void testNativeSizes() throws IOException { cl = TreeMap.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.TREEMAP; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -175,7 +174,7 @@ public void testNativeSizes() throws IOException { cl = String.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.STRING; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -184,7 +183,7 @@ public void testNativeSizes() throws IOException { cl = ConcurrentHashMap.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.CONCURRENT_HASHMAP; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -193,7 +192,7 @@ public void testNativeSizes() throws IOException { cl = ConcurrentSkipListMap.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.CONCURRENT_SKIPLISTMAP; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -202,7 +201,7 @@ public void testNativeSizes() throws IOException { cl = CellArrayMap.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.CELL_ARRAY_MAP; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -211,7 +210,7 @@ public void testNativeSizes() throws IOException { cl = ReentrantReadWriteLock.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.REENTRANT_LOCK; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -220,7 +219,7 @@ public void testNativeSizes() throws IOException { cl = AtomicLong.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.ATOMIC_LONG; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -229,7 +228,7 @@ public void testNativeSizes() throws IOException { cl = AtomicInteger.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.ATOMIC_INTEGER; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -238,7 +237,7 @@ public void testNativeSizes() throws IOException { cl = AtomicBoolean.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.ATOMIC_BOOLEAN; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -247,7 +246,7 @@ public void testNativeSizes() throws IOException { cl = CopyOnWriteArraySet.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.COPYONWRITE_ARRAYSET; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -256,7 +255,7 @@ public void testNativeSizes() throws IOException { cl = CopyOnWriteArrayList.class; expected = ClassSize.estimateBase(cl, false); actual = ClassSize.COPYONWRITE_ARRAYLIST; - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -290,9 +289,8 @@ public void testNativeSizes() throws IOException { } /** - * Testing the classes that implements HeapSize and are a part of 0.20. - * Some are not tested here for example BlockIndex which is tested in - * TestHFile since it is a non public class + * Testing the classes that implements HeapSize and are a part of 0.20. Some are not tested here + * for example BlockIndex which is tested in TestHFile since it is a non public class * @throws IOException */ @Test @@ -301,21 +299,21 @@ public void testSizes() throws IOException { long expected; long actual; - //KeyValue + // KeyValue cl = KeyValue.class; expected = ClassSize.estimateBase(cl, false); KeyValue kv = new KeyValue(); actual = kv.heapSize(); - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } - //LruBlockCache Overhead + // LruBlockCache Overhead cl = LruBlockCache.class; actual = LruBlockCache.CACHE_FIXED_OVERHEAD; expected = ClassSize.estimateBase(cl, false); - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -328,7 +326,7 @@ public void testSizes() throws IOException { expected = ClassSize.estimateBase(cl, false); expected += ClassSize.estimateBase(String.class, false); expected += ClassSize.estimateBase(ByteBuffer.class, false); - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); ClassSize.estimateBase(String.class, true); ClassSize.estimateBase(ByteBuffer.class, true); @@ -339,7 +337,7 @@ public void testSizes() throws IOException { cl = DefaultMemStore.class; actual = DefaultMemStore.FIXED_OVERHEAD; expected = ClassSize.estimateBase(cl, false); - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -359,8 +357,8 @@ public void testSizes() throws IOException { expected += ClassSize.estimateBase(AtomicBoolean.class, false); expected += ClassSize.estimateBase(AtomicBoolean.class, false); expected += ClassSize.estimateBase(CompactionPipeline.class, false); - expected += ClassSize.estimateBase(LinkedList.class, false); //inside CompactionPipeline - expected += ClassSize.estimateBase(LinkedList.class, false); //inside CompactionPipeline + expected += ClassSize.estimateBase(LinkedList.class, false); // inside CompactionPipeline + expected += ClassSize.estimateBase(LinkedList.class, false); // inside CompactionPipeline expected += ClassSize.estimateBase(MemStoreCompactor.class, false); expected += ClassSize.estimateBase(AtomicBoolean.class, false);// inside MemStoreCompactor if (expected != actual) { @@ -388,7 +386,7 @@ public void testSizes() throws IOException { ClassSize.estimateBase(AtomicLong.class, true); ClassSize.estimateBase(AtomicReference.class, true); ClassSize.estimateBase(CellSet.class, true); - ClassSize.estimateBase(ReentrantReadWriteLock.class,true); + ClassSize.estimateBase(ReentrantReadWriteLock.class, true); assertEquals(expected, actual); } @@ -409,10 +407,10 @@ public void testSizes() throws IOException { ClassSize.estimateBase(AtomicLong.class, true); ClassSize.estimateBase(AtomicReference.class, true); ClassSize.estimateBase(CellSet.class, true); - ClassSize.estimateBase(ReentrantReadWriteLock.class,true); + ClassSize.estimateBase(ReentrantReadWriteLock.class, true); ClassSize.estimateBase(SyncTimeRangeTracker.class, true); ClassSize.estimateBase(ConcurrentSkipListMap.class, true); - ClassSize.estimateBase(AtomicBoolean.class,true); + ClassSize.estimateBase(AtomicBoolean.class, true); assertEquals(expected, actual); } @@ -431,7 +429,7 @@ public void testSizes() throws IOException { ClassSize.estimateBase(AtomicLong.class, true); ClassSize.estimateBase(AtomicReference.class, true); ClassSize.estimateBase(CellSet.class, true); - ClassSize.estimateBase(ReentrantReadWriteLock.class,true); + ClassSize.estimateBase(ReentrantReadWriteLock.class, true); ClassSize.estimateBase(NonSyncTimeRangeTracker.class, true); assertEquals(expected, actual); } @@ -451,7 +449,7 @@ public void testSizes() throws IOException { ClassSize.estimateBase(AtomicLong.class, true); ClassSize.estimateBase(AtomicReference.class, true); ClassSize.estimateBase(CellSet.class, true); - ClassSize.estimateBase(ReentrantReadWriteLock.class,true); + ClassSize.estimateBase(ReentrantReadWriteLock.class, true); ClassSize.estimateBase(NonSyncTimeRangeTracker.class, true); ClassSize.estimateBase(ConcurrentSkipListMap.class, true); assertEquals(expected, actual); @@ -471,7 +469,7 @@ public void testSizes() throws IOException { ClassSize.estimateBase(AtomicLong.class, true); ClassSize.estimateBase(AtomicReference.class, true); ClassSize.estimateBase(CellSet.class, true); - ClassSize.estimateBase(ReentrantReadWriteLock.class,true); + ClassSize.estimateBase(ReentrantReadWriteLock.class, true); ClassSize.estimateBase(NonSyncTimeRangeTracker.class, true); ClassSize.estimateBase(CellArrayMap.class, true); assertEquals(expected, actual); @@ -481,7 +479,7 @@ public void testSizes() throws IOException { cl = HStore.class; actual = HStore.FIXED_OVERHEAD; expected = ClassSize.estimateBase(cl, false); - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } @@ -507,7 +505,7 @@ public void testSizes() throws IOException { // size of strings is hard. cl = BlockCacheKey.class; actual = BlockCacheKey.FIXED_OVERHEAD; - expected = ClassSize.estimateBase(cl, false); + expected = ClassSize.estimateBase(cl, false); if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); @@ -516,7 +514,7 @@ public void testSizes() throws IOException { // Currently NOT testing Deep Overheads of many of these classes. // Deep overheads cover a vast majority of stuff, but will not be 100% // accurate because it's unclear when we're referencing stuff that's already - // accounted for. But we have satisfied our two core requirements. + // accounted for. But we have satisfied our two core requirements. // Sizing is quite accurate now, and our tests will throw errors if // any of these classes are modified without updating overhead sizes. } @@ -544,14 +542,14 @@ public void testHFileBlockSize() throws IOException { } @Test - public void testMutations(){ + public void testMutations() { Class cl; long expected; long actual; cl = TimeRange.class; actual = ClassSize.TIMERANGE; - expected = ClassSize.estimateBase(cl, false); + expected = ClassSize.estimateBase(cl, false); if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); @@ -561,7 +559,7 @@ public void testMutations(){ cl = Put.class; actual = Mutation.MUTATION_OVERHEAD + ClassSize.align(ClassSize.ARRAY); expected = ClassSize.estimateBase(cl, false); - //The actual TreeMap is not included in the above calculation + // The actual TreeMap is not included in the above calculation expected += ClassSize.align(ClassSize.TREEMAP); expected += ClassSize.align(ClassSize.INTEGER); // priority if (expected != actual) { @@ -571,8 +569,8 @@ public void testMutations(){ cl = Delete.class; actual = Mutation.MUTATION_OVERHEAD + ClassSize.align(ClassSize.ARRAY); - expected = ClassSize.estimateBase(cl, false); - //The actual TreeMap is not included in the above calculation + expected = ClassSize.estimateBase(cl, false); + // The actual TreeMap is not included in the above calculation expected += ClassSize.align(ClassSize.TREEMAP); expected += ClassSize.align(ClassSize.INTEGER); // priority if (expected != actual) { @@ -608,7 +606,7 @@ public void testObjectSize() throws IOException { @Test public void testAutoCalcFixedOverHead() { Class[] classList = new Class[] { HFileContext.class, HRegion.class, BlockCacheKey.class, - HFileBlock.class, HStore.class, LruBlockCache.class, StoreContext.class }; + HFileBlock.class, HStore.class, LruBlockCache.class, StoreContext.class }; for (Class cl : classList) { // do estimate in advance to ensure class is loaded ClassSize.estimateBase(cl, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.java index 3ab6c826b66f..dca1a30061bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,23 +41,20 @@ public class TestImmutableBytesWritable { @Test public void testHash() throws Exception { - assertEquals( - new ImmutableBytesWritable(Bytes.toBytes("xxabc"), 2, 3).hashCode(), + assertEquals(new ImmutableBytesWritable(Bytes.toBytes("xxabc"), 2, 3).hashCode(), new ImmutableBytesWritable(Bytes.toBytes("abc")).hashCode()); - assertEquals( - new ImmutableBytesWritable(Bytes.toBytes("xxabcd"), 2, 3).hashCode(), + assertEquals(new ImmutableBytesWritable(Bytes.toBytes("xxabcd"), 2, 3).hashCode(), new ImmutableBytesWritable(Bytes.toBytes("abc")).hashCode()); - assertNotSame( - new ImmutableBytesWritable(Bytes.toBytes("xxabc"), 2, 3).hashCode(), + assertNotSame(new ImmutableBytesWritable(Bytes.toBytes("xxabc"), 2, 3).hashCode(), new ImmutableBytesWritable(Bytes.toBytes("xxabc"), 2, 2).hashCode()); } @Test public void testSpecificCompare() { - ImmutableBytesWritable ibw1 = new ImmutableBytesWritable(new byte[]{0x0f}); - ImmutableBytesWritable ibw2 = new ImmutableBytesWritable(new byte[]{0x00, 0x00}); + ImmutableBytesWritable ibw1 = new ImmutableBytesWritable(new byte[] { 0x0f }); + ImmutableBytesWritable ibw2 = new ImmutableBytesWritable(new byte[] { 0x00, 0x00 }); ImmutableBytesWritable.Comparator c = new ImmutableBytesWritable.Comparator(); - assertFalse("ibw1 < ibw2", c.compare( ibw1, ibw2 ) < 0 ); + assertFalse("ibw1 < ibw2", c.compare(ibw1, ibw2) < 0); } @Test @@ -70,46 +67,35 @@ public void testComparison() throws Exception { runTests("", "a", -1); } - private void runTests(String aStr, String bStr, int signum) - throws Exception { - ImmutableBytesWritable a = new ImmutableBytesWritable( - Bytes.toBytes(aStr)); - ImmutableBytesWritable b = new ImmutableBytesWritable( - Bytes.toBytes(bStr)); + private void runTests(String aStr, String bStr, int signum) throws Exception { + ImmutableBytesWritable a = new ImmutableBytesWritable(Bytes.toBytes(aStr)); + ImmutableBytesWritable b = new ImmutableBytesWritable(Bytes.toBytes(bStr)); doComparisonsOnObjects(a, b, signum); doComparisonsOnRaw(a, b, signum); // Tests for when the offset is non-zero - a = new ImmutableBytesWritable(Bytes.toBytes("xxx" + aStr), - 3, aStr.length()); - b = new ImmutableBytesWritable(Bytes.toBytes("yy" + bStr), - 2, bStr.length()); + a = new ImmutableBytesWritable(Bytes.toBytes("xxx" + aStr), 3, aStr.length()); + b = new ImmutableBytesWritable(Bytes.toBytes("yy" + bStr), 2, bStr.length()); doComparisonsOnObjects(a, b, signum); doComparisonsOnRaw(a, b, signum); // Tests for when offset is nonzero and length doesn't extend to end - a = new ImmutableBytesWritable(Bytes.toBytes("xxx" + aStr + "zzz"), - 3, aStr.length()); - b = new ImmutableBytesWritable(Bytes.toBytes("yy" + bStr + "aaa"), - 2, bStr.length()); + a = new ImmutableBytesWritable(Bytes.toBytes("xxx" + aStr + "zzz"), 3, aStr.length()); + b = new ImmutableBytesWritable(Bytes.toBytes("yy" + bStr + "aaa"), 2, bStr.length()); doComparisonsOnObjects(a, b, signum); doComparisonsOnRaw(a, b, signum); } - private int signum(int i) { if (i > 0) return 1; if (i == 0) return 0; return -1; } - private void doComparisonsOnRaw(ImmutableBytesWritable a, - ImmutableBytesWritable b, - int expectedSignum) - throws IOException { - ImmutableBytesWritable.Comparator comparator = - new ImmutableBytesWritable.Comparator(); + private void doComparisonsOnRaw(ImmutableBytesWritable a, ImmutableBytesWritable b, + int expectedSignum) throws IOException { + ImmutableBytesWritable.Comparator comparator = new ImmutableBytesWritable.Comparator(); ByteArrayOutputStream baosA = new ByteArrayOutputStream(); ByteArrayOutputStream baosB = new ByteArrayOutputStream(); @@ -117,31 +103,23 @@ private void doComparisonsOnRaw(ImmutableBytesWritable a, a.write(new DataOutputStream(baosA)); b.write(new DataOutputStream(baosB)); - assertEquals( - "Comparing " + a + " and " + b + " as raw", - signum(comparator.compare(baosA.toByteArray(), 0, baosA.size(), - baosB.toByteArray(), 0, baosB.size())), + assertEquals("Comparing " + a + " and " + b + " as raw", signum(comparator + .compare(baosA.toByteArray(), 0, baosA.size(), baosB.toByteArray(), 0, baosB.size())), expectedSignum); assertEquals( - "Comparing " + a + " and " + b + " as raw (inverse)", - -signum(comparator.compare(baosB.toByteArray(), 0, baosB.size(), - baosA.toByteArray(), 0, baosA.size())), + "Comparing " + a + " and " + b + " as raw (inverse)", -signum(comparator + .compare(baosB.toByteArray(), 0, baosB.size(), baosA.toByteArray(), 0, baosA.size())), expectedSignum); } - private void doComparisonsOnObjects(ImmutableBytesWritable a, - ImmutableBytesWritable b, - int expectedSignum) { - ImmutableBytesWritable.Comparator comparator = - new ImmutableBytesWritable.Comparator(); - assertEquals( - "Comparing " + a + " and " + b + " as objects", - signum(comparator.compare(a, b)), expectedSignum); - assertEquals( - "Comparing " + a + " and " + b + " as objects (inverse)", + private void doComparisonsOnObjects(ImmutableBytesWritable a, ImmutableBytesWritable b, + int expectedSignum) { + ImmutableBytesWritable.Comparator comparator = new ImmutableBytesWritable.Comparator(); + assertEquals("Comparing " + a + " and " + b + " as objects", signum(comparator.compare(a, b)), + expectedSignum); + assertEquals("Comparing " + a + " and " + b + " as objects (inverse)", -signum(comparator.compare(b, a)), expectedSignum); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestMetricsIO.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestMetricsIO.java index c6062044a928..5234d3cb597e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestMetricsIO.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestMetricsIO.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,9 @@ public class TestMetricsIO { public void testMetrics() { MetricsIO metrics = new MetricsIO(new MetricsIOWrapper() { @Override - public long getChecksumFailures() { return 40; } + public long getChecksumFailures() { + return 40; + } }); metrics.updateFsReadTime(100); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java index 43e67d8f0643..7e10a48c596f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -64,19 +63,15 @@ public void doTest(Configuration conf, Path path, Compression.Algorithm compress // Iterate through data block encoding and compression combinations CacheConfig cacheConf = new CacheConfig(conf); - HFileContext fileContext = new HFileContextBuilder() - .withBlockSize(4096) // small block - .withCompression(compression) - .build(); + HFileContext fileContext = new HFileContextBuilder().withBlockSize(4096) // small block + .withCompression(compression).build(); // write a new test HFile LOG.info("Writing with " + fileContext); FSDataOutputStream out = FS.create(path); - HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf) - .withOutputStream(out) - .withFileContext(fileContext) - .create(); + HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf).withOutputStream(out) + .withFileContext(fileContext).create(); try { - for (KeyValue kv: testKvs) { + for (KeyValue kv : testKvs) { writer.append(kv); } } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java index 9c078772af39..760a12920707 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, MediumTests.class}) +@Category({ IOTests.class, MediumTests.class }) public class TestBufferedDataBlockEncoder { @ClassRule @@ -63,8 +63,8 @@ public class TestBufferedDataBlockEncoder { @Test public void testEnsureSpaceForKey() { - BufferedDataBlockEncoder.SeekerState state = new BufferedDataBlockEncoder.SeekerState( - new ObjectIntPair<>(), false); + BufferedDataBlockEncoder.SeekerState state = + new BufferedDataBlockEncoder.SeekerState(new ObjectIntPair<>(), false); for (int i = 1; i <= 65536; ++i) { state.keyLength = i; state.ensureSpaceForKey(); @@ -113,13 +113,13 @@ public void testKVCodecWithTagsForDecodedCellsWithNoTags() throws Exception { kv2.getTagsLength()); KeyValue kv3 = new KeyValue(Bytes.toBytes("r3"), Bytes.toBytes("cf"), Bytes.toBytes("qual"), HConstants.LATEST_TIMESTAMP, Bytes.toBytes("3")); - BufferedDataBlockEncoder.OffheapDecodedExtendedCell - c3 = new BufferedDataBlockEncoder.OffheapDecodedExtendedCell(ByteBuffer.wrap(kv2.getKey()), - kv2.getRowLength(), kv2.getFamilyOffset() - KeyValue.ROW_OFFSET, kv2.getFamilyLength(), - kv2.getQualifierOffset() - KeyValue.ROW_OFFSET, kv2.getQualifierLength(), - kv2.getTimestamp(), kv2.getTypeByte(), ByteBuffer.wrap(kv2.getValueArray()), - kv2.getValueOffset(), kv2.getValueLength(), kv2.getSequenceId(), - ByteBuffer.wrap(kv2.getTagsArray()), kv2.getTagsOffset(), kv2.getTagsLength()); + BufferedDataBlockEncoder.OffheapDecodedExtendedCell c3 = + new BufferedDataBlockEncoder.OffheapDecodedExtendedCell(ByteBuffer.wrap(kv2.getKey()), + kv2.getRowLength(), kv2.getFamilyOffset() - KeyValue.ROW_OFFSET, kv2.getFamilyLength(), + kv2.getQualifierOffset() - KeyValue.ROW_OFFSET, kv2.getQualifierLength(), + kv2.getTimestamp(), kv2.getTypeByte(), ByteBuffer.wrap(kv2.getValueArray()), + kv2.getValueOffset(), kv2.getValueLength(), kv2.getSequenceId(), + ByteBuffer.wrap(kv2.getTagsArray()), kv2.getTagsOffset(), kv2.getTagsLength()); ByteArrayOutputStream os = new ByteArrayOutputStream(); KeyValueCodecWithTags codec = new KeyValueCodecWithTags(); Encoder encoder = codec.getEncoder(os); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java index 8ac59b94346e..74c3a55d3d21 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -61,7 +60,7 @@ /** * Tests changing data block encoding settings of a column family. */ -@Category({IOTests.class, LargeTests.class}) +@Category({ IOTests.class, LargeTests.class }) public class TestChangingEncoding { @ClassRule @@ -83,8 +82,7 @@ public class TestChangingEncoding { private ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder; private TableName tableName; - private static final List ENCODINGS_TO_ITERATE = - createEncodingsToIterate(); + private static final List ENCODINGS_TO_ITERATE = createEncodingsToIterate(); private static final List createEncodingsToIterate() { List encodings = new ArrayList<>(Arrays.asList(DataBlockEncoding.values())); @@ -97,10 +95,8 @@ private static final List createEncodingsToIterate() { private void prepareTest(String testId) throws IOException { tableName = TableName.valueOf("test_table_" + testId); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tableName); - columnFamilyDescriptorBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CF)); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableName); + columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CF)); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptorBuilder.build()); try (Admin admin = TEST_UTIL.getConnection().getAdmin()) { admin.createTable(tableDescriptorBuilder.build()); @@ -115,7 +111,7 @@ public static void setUpBeforeClass() throws Exception { // Disabling split to make sure split does not cause modify column to wait which timesout test // sometime conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, - "org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy"); + "org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy"); // ((Log4JLogger)RpcServerImplementation.LOG).getLogger().setLevel(Level.TRACE); // ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.TRACE); TEST_UTIL.startMiniCluster(); @@ -135,12 +131,10 @@ private static byte[] getQualifier(int j) { } private static byte[] getValue(int batchId, int i, int j) { - return Bytes.toBytes("value_for_" + Bytes.toString(getRowKey(batchId, i)) - + "_col" + j); + return Bytes.toBytes("value_for_" + Bytes.toString(getRowKey(batchId, i)) + "_col" + j); } - static void writeTestDataBatch(TableName tableName, - int batchId) throws Exception { + static void writeTestDataBatch(TableName tableName, int batchId) throws Exception { LOG.debug("Writing test data batch " + batchId); List puts = new ArrayList<>(); for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) { @@ -157,8 +151,7 @@ static void writeTestDataBatch(TableName tableName, } } - static void verifyTestDataBatch(TableName tableName, - int batchId) throws Exception { + static void verifyTestDataBatch(TableName tableName, int batchId) throws Exception { LOG.debug("Verifying test data batch " + batchId); Table table = TEST_UTIL.getConnection().getTable(tableName); for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) { @@ -186,10 +179,9 @@ private void verifyAllData() throws Exception { } } - private void setEncodingConf(DataBlockEncoding encoding, - boolean onlineChange) throws Exception { - LOG.debug("Setting CF encoding to " + encoding + " (ordinal=" - + encoding.ordinal() + "), onlineChange=" + onlineChange); + private void setEncodingConf(DataBlockEncoding encoding, boolean onlineChange) throws Exception { + LOG.debug("Setting CF encoding to " + encoding + " (ordinal=" + encoding.ordinal() + + "), onlineChange=" + onlineChange); columnFamilyDescriptorBuilder.setDataBlockEncoding(encoding); try (Admin admin = TEST_UTIL.getConnection().getAdmin()) { if (!onlineChange) { @@ -210,7 +202,7 @@ private void setEncodingConf(DataBlockEncoding encoding, @Test public void testChangingEncoding() throws Exception { prepareTest("ChangingEncoding"); - for (boolean onlineChange : new boolean[]{false, true}) { + for (boolean onlineChange : new boolean[] { false, true }) { for (DataBlockEncoding encoding : ENCODINGS_TO_ITERATE) { setEncodingConf(encoding, onlineChange); writeSomeNewData(); @@ -222,7 +214,7 @@ public void testChangingEncoding() throws Exception { @Test public void testChangingEncodingWithCompaction() throws Exception { prepareTest("ChangingEncodingWithCompaction"); - for (boolean onlineChange : new boolean[]{false, true}) { + for (boolean onlineChange : new boolean[] { false, true }) { for (DataBlockEncoding encoding : ENCODINGS_TO_ITERATE) { setEncodingConf(encoding, onlineChange); writeSomeNewData(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java index e4d1aa914c6d..9377abc922c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java @@ -31,7 +31,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; @@ -66,10 +65,10 @@ import org.slf4j.LoggerFactory; /** - * Test all of the data block encoding algorithms for correctness. Most of the - * class generate data which will test different branches in code. + * Test all of the data block encoding algorithms for correctness. Most of the class generate data + * which will test different branches in code. */ -@Category({IOTests.class, LargeTests.class}) +@Category({ IOTests.class, LargeTests.class }) @RunWith(Parameterized.class) public class TestDataBlockEncoders { @@ -82,8 +81,8 @@ public class TestDataBlockEncoders { private static int NUMBER_OF_KV = 10000; private static int NUM_RANDOM_SEEKS = 1000; - private static int ENCODED_DATA_OFFSET = HConstants.HFILEBLOCK_HEADER_SIZE - + DataBlockEncoding.ID_SIZE; + private static int ENCODED_DATA_OFFSET = + HConstants.HFILEBLOCK_HEADER_SIZE + DataBlockEncoding.ID_SIZE; static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; private final Configuration conf = HBaseConfiguration.create(); @@ -107,11 +106,9 @@ public TestDataBlockEncoders(boolean includesMemstoreTS, boolean includesTag, private HFileBlockEncodingContext getEncodingContext(Configuration conf, Compression.Algorithm algo, DataBlockEncoding encoding) { DataBlockEncoder encoder = encoding.getEncoder(); - HFileContext meta = new HFileContextBuilder() - .withHBaseCheckSum(false) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTags) - .withCompression(algo).build(); + HFileContext meta = + new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTags).withCompression(algo).build(); if (encoder != null) { return encoder.newDataBlockEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER, meta); } else { @@ -121,9 +118,7 @@ private HFileBlockEncodingContext getEncodingContext(Configuration conf, /** * Test data block encoding of empty KeyValue. - * - * @throws IOException - * On test failure. + * @throws IOException On test failure. */ @Test public void testEmptyKeyValues() throws IOException { @@ -148,9 +143,7 @@ public void testEmptyKeyValues() throws IOException { /** * Test KeyValues with negative timestamp. - * - * @throws IOException - * On test failure. + * @throws IOException On test failure. */ @Test public void testNegativeTimestamps() throws IOException { @@ -173,10 +166,8 @@ public void testNegativeTimestamps() throws IOException { testEncodersOnDataset(kvList, includesMemstoreTS, includesTags); } - /** - * Test whether compression -> decompression gives the consistent results on - * pseudorandom sample. + * Test whether compression -> decompression gives the consistent results on pseudorandom sample. * @throws IOException On test failure. */ @Test @@ -202,15 +193,12 @@ public void testSeekingOnSample() throws IOException { } LOG.info("Encoder: " + encoder); ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, - getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData); - HFileContext meta = new HFileContextBuilder() - .withHBaseCheckSum(false) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTags) - .withCompression(Compression.Algorithm.NONE) - .build(); + getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData); + HFileContext meta = + new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build(); DataBlockEncoder.EncodedSeeker seeker = - encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); + encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer)); encodedSeekers.add(seeker); } @@ -238,7 +226,7 @@ public void testSeekingOnSample() throws IOException { for (boolean seekBefore : new boolean[] { false, true }) { checkSeekingConsistency(encodedSeekers, seekBefore, sampleKv.get(sampleKv.size() - 1)); KeyValue midKv = sampleKv.get(sampleKv.size() / 2); - Cell lastMidKv =PrivateCellUtil.createLastOnRowCol(midKv); + Cell lastMidKv = PrivateCellUtil.createLastOnRowCol(midKv); checkSeekingConsistency(encodedSeekers, seekBefore, lastMidKv); } LOG.info("Done"); @@ -276,15 +264,12 @@ public void testNextOnSample() throws IOException { } DataBlockEncoder encoder = encoding.getEncoder(); ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, - getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData); - HFileContext meta = new HFileContextBuilder() - .withHBaseCheckSum(false) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTags) - .withCompression(Compression.Algorithm.NONE) - .build(); + getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData); + HFileContext meta = + new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build(); DataBlockEncoder.EncodedSeeker seeker = - encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); + encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer)); int i = 0; do { @@ -292,13 +277,14 @@ public void testNextOnSample() throws IOException { Cell cell = seeker.getCell(); if (PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, expectedKeyValue, cell) != 0) { - int commonPrefix = PrivateCellUtil - .findCommonPrefixInFlatKey(expectedKeyValue, cell, false, true); - fail(String.format("next() produces wrong results " - + "encoder: %s i: %d commonPrefix: %d" + "\n expected %s\n actual %s", encoder - .toString(), i, commonPrefix, Bytes.toStringBinary(expectedKeyValue.getBuffer(), - expectedKeyValue.getKeyOffset(), expectedKeyValue.getKeyLength()), CellUtil.toString( - cell, false))); + int commonPrefix = + PrivateCellUtil.findCommonPrefixInFlatKey(expectedKeyValue, cell, false, true); + fail(String.format( + "next() produces wrong results " + "encoder: %s i: %d commonPrefix: %d" + + "\n expected %s\n actual %s", + encoder.toString(), i, commonPrefix, Bytes.toStringBinary(expectedKeyValue.getBuffer(), + expectedKeyValue.getKeyOffset(), expectedKeyValue.getKeyLength()), + CellUtil.toString(cell, false))); } i++; } while (seeker.next()); @@ -319,7 +305,7 @@ public void testFirstKeyInBlockOnSample() throws IOException { } DataBlockEncoder encoder = encoding.getEncoder(); ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, - getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData); + getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData); Cell key = encoder.getFirstKeyCellInBlock(new SingleByteBuff(encodedBuffer)); KeyValue firstKv = sampleKv.get(0); if (0 != PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, key, firstKv)) { @@ -343,10 +329,10 @@ public void testRowIndexWithTagsButNoTagsInCell() throws IOException { ByteBuffer encodedBuffer = encodeKeyValues(encoding, kvList, getEncodingContext(conf, Algorithm.NONE, encoding), false); HFileContext meta = - new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build(); + new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build(); DataBlockEncoder.EncodedSeeker seeker = - encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); + encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer)); Cell cell = seeker.getCell(); Assert.assertEquals(expectedKV.getLength(), ((KeyValue) cell).getLength()); @@ -388,8 +374,8 @@ private void checkSeekingConsistency(List encode private void testEncodersOnDataset(List kvList, boolean includesMemstoreTS, boolean includesTags) throws IOException { - ByteBuffer unencodedDataBuf = RedundantKVGenerator.convertKvToByteBuffer(kvList, - includesMemstoreTS); + ByteBuffer unencodedDataBuf = + RedundantKVGenerator.convertKvToByteBuffer(kvList, includesMemstoreTS); HFileContext fileContext = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS) .withIncludesTags(includesTags).build(); for (DataBlockEncoding encoding : DataBlockEncoding.values()) { @@ -397,9 +383,8 @@ private void testEncodersOnDataset(List kvList, boolean includesMemsto if (encoder == null) { continue; } - HFileBlockEncodingContext encodingContext = - new HFileBlockDefaultEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER, - fileContext); + HFileBlockEncodingContext encodingContext = new HFileBlockDefaultEncodingContext(conf, + encoding, HFILEBLOCK_DUMMY_HEADER, fileContext); ByteArrayOutputStream baos = new ByteArrayOutputStream(); baos.write(HFILEBLOCK_DUMMY_HEADER); DataOutputStream dos = new DataOutputStream(baos); @@ -442,9 +427,9 @@ private void testAlgorithm(byte[] encodedData, ByteBuffer unencodedDataBuf, encodedData.length - ENCODED_DATA_OFFSET); DataInputStream dis = new DataInputStream(bais); ByteBuffer actualDataset; - HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false) - .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags) - .withCompression(Compression.Algorithm.NONE).build(); + HFileContext meta = + new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build(); actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(conf, meta)); actualDataset.rewind(); @@ -452,6 +437,6 @@ private void testAlgorithm(byte[] encodedData, ByteBuffer unencodedDataBuf, // the // mvcc in it. assertEquals("Encoding -> decoding gives different results for " + encoder, - Bytes.toStringBinary(unencodedDataBuf), Bytes.toStringBinary(actualDataset)); + Bytes.toStringBinary(unencodedDataBuf), Bytes.toStringBinary(actualDataset)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoding.java index c5a39f54c9ef..607d7026cf48 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoding.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java index 59616959d1ee..b69ebcd60597 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ /** * Tests encoded seekers by loading and reading values. */ -@Category({IOTests.class, LargeTests.class}) +@Category({ IOTests.class, LargeTests.class }) @RunWith(Parameterized.class) public class TestEncodedSeekers { @@ -110,28 +110,25 @@ public TestEncodedSeekers(DataBlockEncoding encoding, boolean includeTags, boole public void testEncodedSeeker() throws IOException { System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : " + includeTags + ", compressTags : " + compressTags); - if(includeTags) { + if (includeTags) { testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3); } LruBlockCache cache = (LruBlockCache) BlockCacheFactory.createBlockCache(testUtil.getConfiguration()); // Need to disable default row bloom filter for this test to pass. - ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(CF_BYTES).setMaxVersions(MAX_VERSIONS). - setDataBlockEncoding(encoding). - setBlocksize(BLOCK_SIZE). - setBloomFilterType(BloomType.NONE). - setCompressTags(compressTags).build(); + ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(CF_BYTES) + .setMaxVersions(MAX_VERSIONS).setDataBlockEncoding(encoding).setBlocksize(BLOCK_SIZE) + .setBloomFilterType(BloomType.NONE).setCompressTags(compressTags).build(); HRegion region = testUtil.createTestRegion(TABLE_NAME, cfd, cache); - //write the data, but leave some in the memstore + // write the data, but leave some in the memstore doPuts(region); - //verify correctness when memstore contains data + // verify correctness when memstore contains data doGets(region); - //verify correctness again after compacting + // verify correctness again after compacting region.compact(false); doGets(region); @@ -146,9 +143,9 @@ public void testEncodedSeeker() throws IOException { assertTrue(encodingCounts.get(encodingInCache) > 0); } - private void doPuts(HRegion region) throws IOException{ + private void doPuts(HRegion region) throws IOException { LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE); - for (int i = 0; i < NUM_ROWS; ++i) { + for (int i = 0; i < NUM_ROWS; ++i) { byte[] key = Bytes.toBytes(LoadTestKVGenerator.md5PrefixedKey(i)); for (int j = 0; j < NUM_COLS_PER_ROW; ++j) { Put put = new Put(key); @@ -163,9 +160,9 @@ private void doPuts(HRegion region) throws IOException{ } else { put.addColumn(CF_BYTES, col, value); } - if(VERBOSE){ + if (VERBOSE) { KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value); - System.err.println(Strings.padFront(i+"", ' ', 4)+" "+kvPut); + System.err.println(Strings.padFront(i + "", ' ', 4) + " " + kvPut); } region.put(put); } @@ -175,14 +172,14 @@ private void doPuts(HRegion region) throws IOException{ } } - private void doGets(Region region) throws IOException{ + private void doGets(Region region) throws IOException { for (int i = 0; i < NUM_ROWS; ++i) { final byte[] rowKey = Bytes.toBytes(LoadTestKVGenerator.md5PrefixedKey(i)); for (int j = 0; j < NUM_COLS_PER_ROW; ++j) { final String qualStr = String.valueOf(j); if (VERBOSE) { - System.err.println("Reading row " + i + ", column " + j + " " + Bytes.toString(rowKey)+"/" - +qualStr); + System.err.println( + "Reading row " + i + ", column " + j + " " + Bytes.toString(rowKey) + "/" + qualStr); } final byte[] qualBytes = Bytes.toBytes(qualStr); Get get = new Get(rowKey); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java index 7ef8052abcfa..f156636f3715 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ public class TestLoadAndSwitchEncodeOnDisk extends TestMiniClusterLoadSequential /** Un-parameterize the test */ @Parameters public static Collection parameters() { - return Arrays.asList(new Object[][]{ new Object[0] }); + return Arrays.asList(new Object[][] { new Object[0] }); } public TestLoadAndSwitchEncodeOnDisk() { @@ -90,10 +90,9 @@ public void loadTest() throws Exception { System.err.println("\nRe-enabling table\n"); admin.enableTable(TABLE); - System.err.println("\nNew column descriptor: " + - getColumnDesc(admin) + "\n"); + System.err.println("\nNew column descriptor: " + getColumnDesc(admin) + "\n"); - // The table may not have all regions on line yet. Assert online before + // The table may not have all regions on line yet. Assert online before // moving to major compact. assertAllOnLine(t); @@ -111,11 +110,11 @@ public void loadTest() throws Exception { private void assertAllOnLine(final Table t) throws IOException { List regions; - try(RegionLocator rl = TEST_UTIL.getConnection().getRegionLocator(t.getName())) { + try (RegionLocator rl = TEST_UTIL.getConnection().getRegionLocator(t.getName())) { regions = rl.getAllRegionLocations(); } - for (HRegionLocation e: regions) { - byte [] startkey = e.getRegion().getStartKey(); + for (HRegionLocation e : regions) { + byte[] startkey = e.getRegion().getStartKey(); Scan s = new Scan().withStartRow(startkey); ResultScanner scanner = t.getScanner(s); Result r = scanner.next(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java index fd4f5142666f..e169dfdae95a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java @@ -65,8 +65,8 @@ public class TestSeekBeforeWithReverseScan { public void setUp() throws Exception { TableName tableName = TableName.valueOf(getClass().getSimpleName()); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(cfName).setDataBlockEncoding(DataBlockEncoding.FAST_DIFF).build()).build(); + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(cfName).setDataBlockEncoding(DataBlockEncoding.FAST_DIFF).build()).build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); Path path = testUtil.getDataTestDir(getClass().getSimpleName()); region = HBaseTestingUtil.createRegionAndWAL(info, path, testUtil.getConfiguration(), @@ -108,11 +108,11 @@ public void testReverseScanWithoutPadding() throws Exception { count++; } assertEquals("b", Bytes.toString(res.get(0).getRowArray(), res.get(0).getRowOffset(), - res.get(0).getRowLength())); + res.get(0).getRowLength())); assertEquals("ab", Bytes.toString(res.get(1).getRowArray(), res.get(1).getRowOffset(), - res.get(1).getRowLength())); + res.get(1).getRowLength())); assertEquals("a", Bytes.toString(res.get(2).getRowArray(), res.get(2).getRowOffset(), - res.get(2).getRowLength())); + res.get(2).getRowLength())); assertEquals(3, count); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java index 2c40fa394066..279788df2869 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -47,7 +46,7 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) @RunWith(Parameterized.class) public class TestSeekToBlockWithEncoders { @@ -141,8 +140,8 @@ public void testSeekingToBlockToANotAvailableKey() throws IOException { KeyValue kv5 = new KeyValue(Bytes.toBytes("bbbcd"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val")); sampleKv.add(kv5); - KeyValue toSeek = new KeyValue(Bytes.toBytes("bbbce"), Bytes.toBytes("f1"), - Bytes.toBytes("q1"), Bytes.toBytes("val")); + KeyValue toSeek = new KeyValue(Bytes.toBytes("bbbce"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), + Bytes.toBytes("val")); seekToTheKey(kv5, sampleKv, toSeek); } @@ -152,17 +151,17 @@ public void testSeekingToBlockToANotAvailableKey() throws IOException { @Test public void testSeekToBlockWithDecreasingCommonPrefix() throws IOException { List sampleKv = new ArrayList<>(); - KeyValue kv1 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"), - Bytes.toBytes("q1"), Bytes.toBytes("val")); + KeyValue kv1 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), + Bytes.toBytes("val")); sampleKv.add(kv1); - KeyValue kv2 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"), - Bytes.toBytes("q2"), Bytes.toBytes("val")); + KeyValue kv2 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q2"), + Bytes.toBytes("val")); sampleKv.add(kv2); - KeyValue kv3 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"), - Bytes.toBytes("q3"), Bytes.toBytes("val")); + KeyValue kv3 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q3"), + Bytes.toBytes("val")); sampleKv.add(kv3); - KeyValue kv4 = new KeyValue(Bytes.toBytes("row11baa"), Bytes.toBytes("f1"), - Bytes.toBytes("q1"), Bytes.toBytes("val")); + KeyValue kv4 = new KeyValue(Bytes.toBytes("row11baa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), + Bytes.toBytes("val")); sampleKv.add(kv4); Cell toSeek = PrivateCellUtil.createLastOnRow(kv3); seekToTheKey(kv3, sampleKv, toSeek); @@ -272,13 +271,12 @@ public void testSeekToBlockWithDiffFamilyAndQualifer() throws IOException { KeyValue kv5 = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam1"), Bytes.toBytes("q2"), Bytes.toBytes("val")); sampleKv.add(kv5); - KeyValue toSeek = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam2"), - Bytes.toBytes("q2"), Bytes.toBytes("val")); + KeyValue toSeek = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam2"), Bytes.toBytes("q2"), + Bytes.toBytes("val")); seekToTheKey(kv5, sampleKv, toSeek); } - private void seekToTheKey(KeyValue expected, List kvs, Cell toSeek) - throws IOException { + private void seekToTheKey(KeyValue expected, List kvs, Cell toSeek) throws IOException { // create all seekers List encodedSeekers = new ArrayList<>(); for (DataBlockEncoding encoding : DataBlockEncoding.values()) { @@ -286,15 +284,14 @@ private void seekToTheKey(KeyValue expected, List kvs, Cell toSeek) continue; } DataBlockEncoder encoder = encoding.getEncoder(); - HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false) - .withIncludesMvcc(false).withIncludesTags(false) - .withCompression(Compression.Algorithm.NONE).build(); - HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(conf, - encoding, HFILEBLOCK_DUMMY_HEADER, meta); + HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false) + .withIncludesTags(false).withCompression(Compression.Algorithm.NONE).build(); + HFileBlockEncodingContext encodingContext = + encoder.newDataBlockEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER, meta); ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs, encodingContext, this.useOffheapData); DataBlockEncoder.EncodedSeeker seeker = - encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); + encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer)); encodedSeekers.add(seeker); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java index 7daedc45f8df..4b69146ee7bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,32 +48,29 @@ public class CacheTestUtils { private static final boolean includesMemstoreTS = true; /** - * Just checks if heapsize grows when something is cached, and gets smaller - * when the same object is evicted + * Just checks if heapsize grows when something is cached, and gets smaller when the same object + * is evicted */ - public static void testHeapSizeChanges(final BlockCache toBeTested, - final int blockSize) { + public static void testHeapSizeChanges(final BlockCache toBeTested, final int blockSize) { HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1); long heapSize = ((HeapSize) toBeTested).heapSize(); toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block); - /*When we cache something HeapSize should always increase */ + /* When we cache something HeapSize should always increase */ assertTrue(heapSize < ((HeapSize) toBeTested).heapSize()); toBeTested.evictBlock(blocks[0].blockName); - /*Post eviction, heapsize should be the same */ + /* Post eviction, heapsize should be the same */ assertEquals(heapSize, ((HeapSize) toBeTested).heapSize()); } - public static void testCacheMultiThreaded(final BlockCache toBeTested, - final int blockSize, final int numThreads, final int numQueries, - final double passingScore) throws Exception { + public static void testCacheMultiThreaded(final BlockCache toBeTested, final int blockSize, + final int numThreads, final int numQueries, final double passingScore) throws Exception { Configuration conf = new Configuration(); - MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext( - conf); + MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf); final AtomicInteger totalQueries = new AtomicInteger(); final ConcurrentLinkedQueue blocksToTest = new ConcurrentLinkedQueue<>(); @@ -96,8 +92,7 @@ public void doAnAction() throws Exception { return; } toBeTested.cacheBlock(ourBlock.blockName, ourBlock.block); - Cacheable retrievedBlock = toBeTested.getBlock(ourBlock.blockName, - false, false, true); + Cacheable retrievedBlock = toBeTested.getBlock(ourBlock.blockName, false, false, true); if (retrievedBlock != null) { assertEquals(ourBlock.block, retrievedBlock); toBeTested.evictBlock(ourBlock.blockName); @@ -119,13 +114,12 @@ public void doAnAction() throws Exception { } ctx.stop(); if (hits.get() / ((double) hits.get() + (double) miss.get()) < passingScore) { - fail("Too many nulls returned. Hits: " + hits.get() + " Misses: " - + miss.get()); + fail("Too many nulls returned. Hits: " + hits.get() + " Misses: " + miss.get()); } } - public static void testCacheSimple(BlockCache toBeTested, int blockSize, - int numBlocks) throws Exception { + public static void testCacheSimple(BlockCache toBeTested, int blockSize, int numBlocks) + throws Exception { HFileBlockPair[] blocks = generateHFileBlocks(blockSize, numBlocks); // Confirm empty @@ -270,8 +264,8 @@ public CacheableDeserializer getDeserializer() { private static final int deserializerIdentifier; static { - deserializerIdentifier = CacheableDeserializerIdManager - .registerDeserializer(blockDeserializer); + deserializerIdentifier = + CacheableDeserializerIdManager.registerDeserializer(blockDeserializer); } @Override @@ -280,7 +274,6 @@ public BlockType getBlockType() { } } - public static HFileBlockPair[] generateHFileBlocks(int blockSize, int numBlocks) { HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks]; Random rand = ThreadLocalRandom.current(); @@ -297,14 +290,10 @@ public static HFileBlockPair[] generateHFileBlocks(int blockSize, int numBlocks) cachedBuffer.putInt(uncompressedSizeWithoutHeader); cachedBuffer.putLong(prevBlockOffset); cachedBuffer.rewind(); - HFileContext meta = new HFileContextBuilder() - .withHBaseCheckSum(false) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(false) - .withCompression(Compression.Algorithm.NONE) - .withBytesPerCheckSum(0) - .withChecksumType(ChecksumType.NULL) - .build(); + HFileContext meta = + new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(false).withCompression(Compression.Algorithm.NONE) + .withBytesPerCheckSum(0).withChecksumType(ChecksumType.NULL).build(); HFileBlock generated = new HFileBlock(BlockType.DATA, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, ByteBuff.wrap(cachedBuffer), HFileBlock.DONT_FILL_HEADER, blockSize, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/KVGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/KVGenerator.java index 19800a404ee6..ed3b08f1b2e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/KVGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/KVGenerator.java @@ -1,23 +1,23 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.util.Random; - import org.apache.hadoop.hbase.util.RandomDistribution; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.WritableComparator; @@ -25,10 +25,9 @@ /** * Generate random <key, value> pairs. *

      - * Copied from - * hadoop-3315 tfile. - * Remove after tfile is committed and use the tfile version of this class - * instead.

      + * Copied from hadoop-3315 tfile. + * Remove after tfile is committed and use the tfile version of this class instead. + *

      */ class KVGenerator { private final Random random; @@ -39,10 +38,9 @@ class KVGenerator { private static final int MIN_KEY_LEN = 4; private final byte prefix[] = new byte[MIN_KEY_LEN]; - public KVGenerator(Random random, boolean sorted, - RandomDistribution.DiscreteRNG keyLenRNG, - RandomDistribution.DiscreteRNG valLenRNG, - RandomDistribution.DiscreteRNG wordLenRNG, int dictSize) { + public KVGenerator(Random random, boolean sorted, RandomDistribution.DiscreteRNG keyLenRNG, + RandomDistribution.DiscreteRNG valLenRNG, RandomDistribution.DiscreteRNG wordLenRNG, + int dictSize) { this.random = random; dict = new byte[dictSize][]; this.sorted = sorted; @@ -68,10 +66,8 @@ private void fillKey(BytesWritable o) { System.arraycopy(word, 0, o.get(), n, l); n += l; } - if (sorted - && WritableComparator.compareBytes(lastKey.get(), MIN_KEY_LEN, lastKey - .getSize() - - MIN_KEY_LEN, o.get(), MIN_KEY_LEN, o.getSize() - MIN_KEY_LEN) > 0) { + if (sorted && WritableComparator.compareBytes(lastKey.get(), MIN_KEY_LEN, + lastKey.getSize() - MIN_KEY_LEN, o.get(), MIN_KEY_LEN, o.getSize() - MIN_KEY_LEN) > 0) { incrementPrefix(); } @@ -103,8 +99,7 @@ private void incrementPrefix() { public void next(BytesWritable key, BytesWritable value, boolean dupKey) { if (dupKey) { key.set(lastKey); - } - else { + } else { fillKey(key); } fillValue(value); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/KeySampler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/KeySampler.java index 806ffd615a6d..0b315c8b770f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/KeySampler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/KeySampler.java @@ -1,41 +1,37 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; import java.util.Random; - import org.apache.hadoop.hbase.util.RandomDistribution.DiscreteRNG; import org.apache.hadoop.io.BytesWritable; /* -*

      -* Copied from -* hadoop-3315 tfile. -* Remove after tfile is committed and use the tfile version of this class -* instead.

      -*/ + *

      Copied from hadoop-3315 + * tfile. Remove after tfile is committed and use the tfile version of this class instead.

      + */ class KeySampler { Random random; int min, max; DiscreteRNG keyLenRNG; private static final int MIN_KEY_LEN = 4; - public KeySampler(Random random, byte [] first, byte [] last, - DiscreteRNG keyLenRNG) { + public KeySampler(Random random, byte[] first, byte[] last, DiscreteRNG keyLenRNG) { this.random = random; int firstLen = keyPrefixToInt(first); int lastLen = keyPrefixToInt(last); @@ -46,11 +42,11 @@ public KeySampler(Random random, byte [] first, byte [] last, this.keyLenRNG = keyLenRNG; } - private int keyPrefixToInt(byte [] key) { + private int keyPrefixToInt(byte[] key) { byte[] b = key; int o = 0; - return (b[o] & 0xff) << 24 | (b[o + 1] & 0xff) << 16 - | (b[o + 2] & 0xff) << 8 | (b[o + 3] & 0xff); + return (b[o] & 0xff) << 24 | (b[o + 1] & 0xff) << 16 | (b[o + 2] & 0xff) << 8 + | (b[o + 3] & 0xff); } public void next(BytesWritable key) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java index aaf1711b3bb6..801a119c571b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java @@ -1,28 +1,28 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile; /** * A nano-second timer. *

      - * Copied from - * hadoop-3315 tfile. - * Remove after tfile is committed and use the tfile version of this class - * instead.

      + * Copied from hadoop-3315 tfile. + * Remove after tfile is committed and use the tfile version of this class instead. + *

      */ public class NanoTimer { private long last = -1; @@ -31,18 +31,14 @@ public class NanoTimer { /** * Constructor - * - * @param start - * Start the timer upon construction. + * @param start Start the timer upon construction. */ public NanoTimer(boolean start) { if (start) this.start(); } /** - * Start the timer. - * - * Note: No effect if timer is already started. + * Start the timer. Note: No effect if timer is already started. */ public void start() { if (!this.started) { @@ -52,9 +48,7 @@ public void start() { } /** - * Stop the timer. - * - * Note: No effect if timer is already stopped. + * Stop the timer. Note: No effect if timer is already stopped. */ public void stop() { if (this.started) { @@ -65,9 +59,8 @@ public void stop() { /** * Read the timer. - * - * @return the elapsed time in nano-seconds. Note: If the timer is never - * started before, -1 is returned. + * @return the elapsed time in nano-seconds. Note: If the timer is never started before, -1 is + * returned. */ public long read() { if (!readable()) return -1; @@ -86,7 +79,6 @@ public void reset() { /** * Checking whether the timer is started - * * @return true if timer is started. */ public boolean isStarted() { @@ -94,9 +86,8 @@ public boolean isStarted() { } /** - * Format the elapsed time to a human understandable string. - * - * Note: If timer is never started, "ERR" will be returned. + * Format the elapsed time to a human understandable string. Note: If timer is never started, + * "ERR" will be returned. */ @Override public String toString() { @@ -108,11 +99,8 @@ public String toString() { } /** - * A utility method to format a time duration in nano seconds into a human - * understandable stirng. - * - * @param t - * Time duration in nano seconds. + * A utility method to format a time duration in nano seconds into a human understandable stirng. + * @param t Time duration in nano seconds. * @return String representation. */ public static String nanoTimeToString(long t) { @@ -161,20 +149,12 @@ public static String nanoTimeToString(long t) { return String.format("%.2fs", ss); /** - * StringBuilder sb = new StringBuilder(); String sep = ""; - * - * if (dd > 0) { String unit = (dd > 1) ? "days" : "day"; - * sb.append(String.format("%s%d%s", sep, dd, unit)); sep = " "; } - * - * if (hh > 0) { String unit = (hh > 1) ? "hrs" : "hr"; - * sb.append(String.format("%s%d%s", sep, hh, unit)); sep = " "; } - * - * if (mm > 0) { String unit = (mm > 1) ? "mins" : "min"; - * sb.append(String.format("%s%d%s", sep, mm, unit)); sep = " "; } - * - * if (ss > 0) { String unit = (ss > 1) ? "secs" : "sec"; - * sb.append(String.format("%s%.3f%s", sep, ss, unit)); sep = " "; } - * + * StringBuilder sb = new StringBuilder(); String sep = ""; if (dd > 0) { String unit = (dd > 1) + * ? "days" : "day"; sb.append(String.format("%s%d%s", sep, dd, unit)); sep = " "; } if (hh > 0) + * { String unit = (hh > 1) ? "hrs" : "hr"; sb.append(String.format("%s%d%s", sep, hh, unit)); + * sep = " "; } if (mm > 0) { String unit = (mm > 1) ? "mins" : "min"; + * sb.append(String.format("%s%d%s", sep, mm, unit)); sep = " "; } if (ss > 0) { String unit = + * (ss > 1) ? "secs" : "sec"; sb.append(String.format("%s%.3f%s", sep, ss, unit)); sep = " "; } * return sb.toString(); */ } @@ -185,7 +165,6 @@ private boolean readable() { /** * Simple tester. - * * @param args */ public static void main(String[] args) { @@ -196,4 +175,3 @@ public static void main(String[] args) { } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomKeyValueUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomKeyValueUtil.java index 552b0849f22b..2a23fcc35948 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomKeyValueUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomKeyValueUtil.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.hbase.io.hfile; +import java.util.Random; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.util.Bytes; -import java.util.Random; - /** * These helper methods generate random byte[]'s data for KeyValues */ @@ -32,16 +31,13 @@ public class RandomKeyValueUtil { public static final char randomReadableChar(Random rand) { int i = rand.nextInt(26 * 2 + 10 + 1); - if (i < 26) - return (char) ('A' + i); + if (i < 26) return (char) ('A' + i); i -= 26; - if (i < 26) - return (char) ('a' + i); + if (i < 26) return (char) ('a' + i); i -= 26; - if (i < 10) - return (char) ('0' + i); + if (i < 10) return (char) ('0' + i); i -= 10; assert i == 0; @@ -49,16 +45,14 @@ public static final char randomReadableChar(Random rand) { } public static KeyValue randomKeyValue(Random rand) { - return new KeyValue(randomRowOrQualifier(rand), - Bytes.toBytes(COLUMN_FAMILY_NAME), randomRowOrQualifier(rand), - randomValue(rand)); + return new KeyValue(randomRowOrQualifier(rand), Bytes.toBytes(COLUMN_FAMILY_NAME), + randomRowOrQualifier(rand), randomValue(rand)); } public static byte[] randomRowOrQualifier(Random rand) { StringBuilder field = new StringBuilder(); int fieldLen = MIN_ROW_OR_QUALIFIER_LENGTH - + rand.nextInt(MAX_ROW_OR_QUALIFIER_LENGTH - - MIN_ROW_OR_QUALIFIER_LENGTH + 1); + + rand.nextInt(MAX_ROW_OR_QUALIFIER_LENGTH - MIN_ROW_OR_QUALIFIER_LENGTH + 1); for (int i = 0; i < fieldLen; ++i) field.append(randomReadableChar(rand)); return Bytes.toBytes(field.toString()); @@ -75,10 +69,9 @@ public static byte[] randomValue(Random rand) { } /** - * Generates a random key that is guaranteed to increase as the given index i - * increases. The result consists of a prefix, which is a deterministic - * increasing function of i, and a random suffix. - * + * Generates a random key that is guaranteed to increase as the given index i increases. The + * result consists of a prefix, which is a deterministic increasing function of i, and a random + * suffix. * @param rand random number generator to use * @param i * @return the random key @@ -88,10 +81,8 @@ public static byte[] randomOrderedKey(Random rand, int i) { // The fixed-length lexicographically increasing part of the key. for (int bitIndex = 31; bitIndex >= 0; --bitIndex) { - if ((i & (1 << bitIndex)) == 0) - k.append("a"); - else - k.append("b"); + if ((i & (1 << bitIndex)) == 0) k.append("a"); + else k.append("b"); } // A random-length random suffix of the key. @@ -107,10 +98,8 @@ public static byte[] randomOrderedFixedLengthKey(Random rand, int i, int suffixL // The fixed-length lexicographically increasing part of the key. for (int bitIndex = 31; bitIndex >= 0; --bitIndex) { - if ((i & (1 << bitIndex)) == 0) - k.append("a"); - else - k.append("b"); + if ((i & (1 << bitIndex)) == 0) k.append("a"); + else k.append("b"); } // A random suffix of the key. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TagUsage.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TagUsage.java index d73257e00339..4f99d022c7ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TagUsage.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TagUsage.java @@ -1,5 +1,4 @@ - /* - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,14 +20,14 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used in testcases only. + * Used in testcases only. */ @InterfaceAudience.Private public enum TagUsage { // No tags would be added - NO_TAG, - // KVs with tags - ONLY_TAG, + NO_TAG, + // KVs with tags + ONLY_TAG, // kvs with and without tags PARTIAL_TAG } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java index 10f18a82bb2d..d2795796a125 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.Map; import java.util.NavigableSet; import java.util.Objects; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -40,7 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestBlockCacheReporting { @ClassRule @@ -67,7 +66,7 @@ private void addDataAndHits(final BlockCache bc, final int count) { bc.getBlock(bckd, true, false, true); bc.getBlock(bcki, true, false, true); } - assertEquals(2 * count /*Data and Index blocks*/, bc.getStats().getHitCount()); + assertEquals(2 * count /* Data and Index blocks */, bc.getStats().getHitCount()); BlockCacheKey bckd = new BlockCacheKey("f", 0); BlockCacheKey bcki = new BlockCacheKey("f", 0 + count); bc.evictBlock(bckd); @@ -84,7 +83,7 @@ public void testBucketCache() throws IOException { logPerBlock(blockCache); final int count = 3; addDataAndHits(blockCache, count); - // The below has no asserts. It is just exercising toString and toJSON code. + // The below has no asserts. It is just exercising toString and toJSON code. LOG.info(Objects.toString(blockCache.getStats())); BlockCacheUtil.CachedBlocksByFile cbsbf = logPerBlock(blockCache); LOG.info(Objects.toString(cbsbf)); @@ -100,7 +99,7 @@ public void testLruBlockCache() throws IOException { BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); logPerBlock(blockCache); addDataAndHits(blockCache, 3); - // The below has no asserts. It is just exercising toString and toJSON code. + // The below has no asserts. It is just exercising toString and toJSON code. LOG.info("count=" + blockCache.getBlockCount() + ", currentSize=" + blockCache.getCurrentSize() + ", freeSize=" + blockCache.getFreeSize()); LOG.info(Objects.toString(blockCache.getStats())); @@ -113,22 +112,22 @@ public void testLruBlockCache() throws IOException { private void bucketCacheReport(final BlockCache bc) { LOG.info(bc.getClass().getSimpleName() + ": " + bc.getStats()); - BlockCache [] bcs = bc.getBlockCaches(); + BlockCache[] bcs = bc.getBlockCaches(); if (bcs != null) { - for (BlockCache sbc: bc.getBlockCaches()) { + for (BlockCache sbc : bc.getBlockCaches()) { LOG.info(bc.getClass().getSimpleName() + ": " + sbc.getStats()); } } } private void logPerFile(final BlockCacheUtil.CachedBlocksByFile cbsbf) throws IOException { - for (Map.Entry> e: - cbsbf.getCachedBlockStatsByFile().entrySet()) { + for (Map.Entry> e : cbsbf.getCachedBlockStatsByFile() + .entrySet()) { int count = 0; long size = 0; int countData = 0; long sizeData = 0; - for (CachedBlock cb: e.getValue()) { + for (CachedBlock cb : e.getValue()) { count++; size += cb.getSize(); BlockType bt = cb.getBlockType(); @@ -137,9 +136,9 @@ private void logPerFile(final BlockCacheUtil.CachedBlocksByFile cbsbf) throws IO sizeData += cb.getSize(); } } - LOG.info("filename=" + e.getKey() + ", count=" + count + ", countData=" + countData + - ", size=" + size + ", sizeData=" + sizeData); - //LOG.info(BlockCacheUtil.toJSON(e.getKey(), e.getValue())); + LOG.info("filename=" + e.getKey() + ", count=" + count + ", countData=" + countData + + ", size=" + size + ", sizeData=" + sizeData); + // LOG.info(BlockCacheUtil.toJSON(e.getKey(), e.getValue())); } } @@ -147,7 +146,7 @@ private BlockCacheUtil.CachedBlocksByFile logPerBlock(final BlockCache bc) throw BlockCacheUtil.CachedBlocksByFile cbsbf = new BlockCacheUtil.CachedBlocksByFile(); for (CachedBlock cb : bc) { LOG.info(cb.toString()); - //LOG.info(BlockCacheUtil.toJSON(bc)); + // LOG.info(BlockCacheUtil.toJSON(bc)); cbsbf.update(cb); } return cbsbf; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockIOUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockIOUtils.java index 099a7809deed..1d76296ca5d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockIOUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockIOUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,6 @@ import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; - import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -209,8 +208,7 @@ public void testPositionalReadExtraFailed() throws IOException { } @Test - public void testPositionalReadShortReadCompletesNecessaryAndExtraBytes() - throws IOException { + public void testPositionalReadShortReadCompletesNecessaryAndExtraBytes() throws IOException { long position = 0; int bufOffset = 0; int necessaryLen = 10; @@ -249,13 +247,12 @@ public void testPositionalReadPrematureEOF() throws IOException { } /** - * Determine if ByteBufferPositionedReadable API is available - * . + * Determine if ByteBufferPositionedReadable API is available . * @return true if FSDataInputStream implements ByteBufferPositionedReadable API. */ private boolean isByteBufferPositionedReadable() { try { - //long position, ByteBuffer buf + // long position, ByteBuffer buf FSDataInputStream.class.getMethod("read", long.class, ByteBuffer.class); } catch (NoSuchMethodException e) { return false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java index 0ec596e685aa..393fa8015e40 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,10 +55,10 @@ * Tests that {@link CacheConfig} does as expected. */ // This test is marked as a large test though it runs in a short amount of time -// (seconds). It is large because it depends on being able to reset the global -// blockcache instance which is in a global variable. Experience has it that +// (seconds). It is large because it depends on being able to reset the global +// blockcache instance which is in a global variable. Experience has it that // tests clash on the global variable if this test is run as small sized test. -@Category({IOTests.class, MediumTests.class}) +@Category({ IOTests.class, MediumTests.class }) public class TestCacheConfig { @ClassRule @@ -83,8 +83,7 @@ public int getDeserializerIdentifier() { } @Override - public Cacheable deserialize(ByteBuff b, ByteBuffAllocator alloc) - throws IOException { + public Cacheable deserialize(ByteBuff b, ByteBuffAllocator alloc) throws IOException { LOG.info("Deserialized " + b); return cacheable; } @@ -162,8 +161,8 @@ public void setUp() throws Exception { /** * @param bc The block cache instance. * @param cc Cache config. - * @param doubling If true, addition of element ups counter by 2, not 1, because element added - * to onheap and offheap caches. + * @param doubling If true, addition of element ups counter by 2, not 1, because element added to + * onheap and offheap caches. * @param sizing True if we should run sizing test (doesn't always apply). */ void basicBlockCacheOps(final BlockCache bc, final CacheConfig cc, final boolean doubling, @@ -177,7 +176,7 @@ void basicBlockCacheOps(final BlockCache bc, final CacheConfig cc, final boolean assertEquals(doubling ? 2 : 1, bc.getBlockCount() - initialBlockCount); bc.evictBlock(bck); assertEquals(initialBlockCount, bc.getBlockCount()); - // Do size accounting. Do it after the above 'warm-up' because it looks like some + // Do size accounting. Do it after the above 'warm-up' because it looks like some // buffers do lazy allocation so sizes are off on first go around. if (sizing) { long originalSize = bc.getCurrentSize(); @@ -239,11 +238,8 @@ public void testDisableCacheDataBlock() throws IOException { conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, true); conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false); - ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("testDisableCacheDataBlock")) - .setBlockCacheEnabled(false) - .build(); + ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder + .newBuilder(Bytes.toBytes("testDisableCacheDataBlock")).setBlockCacheEnabled(false).build(); cacheConfig = new CacheConfig(conf, columnFamilyDescriptor, null, ByteBuffAllocator.HEAP); assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA)); @@ -316,7 +312,7 @@ private void doBucketCacheConfigTest() { @Test public void testBucketCacheConfigL1L2Setup() { this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); - // Make lru size is smaller than bcSize for sure. Need this to be true so when eviction + // Make lru size is smaller than bcSize for sure. Need this to be true so when eviction // from L1 happens, it does not fail because L2 can't take the eviction because block too big. this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f); MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); @@ -345,7 +341,7 @@ public void testBucketCacheConfigL1L2Setup() { assertEquals(initialL1BlockCount + 1, lbc.getBlockCount()); assertEquals(initialL2BlockCount, bc.getBlockCount()); // Force evictions by putting in a block too big. - final long justTooBigSize = ((LruBlockCache)lbc).acceptableSize() + 1; + final long justTooBigSize = ((LruBlockCache) lbc).acceptableSize() + 1; lbc.cacheBlock(new BlockCacheKey("bck2", 0), new DataCacheEntry() { @Override public long heapSize() { @@ -354,11 +350,12 @@ public long heapSize() { @Override public int getSerializedLength() { - return (int)heapSize(); + return (int) heapSize(); } }); // The eviction thread in lrublockcache needs to run. - while (initialL1BlockCount != lbc.getBlockCount()) Threads.sleep(10); + while (initialL1BlockCount != lbc.getBlockCount()) + Threads.sleep(10); assertEquals(initialL1BlockCount, lbc.getBlockCount()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index bac0c42b1f2e..bb7c8557d2c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,6 @@ import java.util.Map; import java.util.Random; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -77,11 +76,11 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Tests {@link HFile} cache-on-write functionality for the following block - * types: data blocks, non-root index blocks, and Bloom filter blocks. + * Tests {@link HFile} cache-on-write functionality for the following block types: data blocks, + * non-root index blocks, and Bloom filter blocks. */ @RunWith(Parameterized.class) -@Category({IOTests.class, LargeTests.class}) +@Category({ IOTests.class, LargeTests.class }) public class TestCacheOnWrite { @ClassRule @@ -110,38 +109,25 @@ public class TestCacheOnWrite { private static final BloomType BLOOM_TYPE = BloomType.ROWCOL; private static final int CKBYTES = 512; - - private static final Set INDEX_BLOCK_TYPES = ImmutableSet.of( - BlockType.INDEX_V1, - BlockType.INTERMEDIATE_INDEX, - BlockType.ROOT_INDEX, - BlockType.LEAF_INDEX - ); - private static final Set BLOOM_BLOCK_TYPES = ImmutableSet.of( - BlockType.BLOOM_CHUNK, - BlockType.GENERAL_BLOOM_META, - BlockType.DELETE_FAMILY_BLOOM_META - ); - private static final Set DATA_BLOCK_TYPES = ImmutableSet.of( - BlockType.ENCODED_DATA, - BlockType.DATA - ); + private static final Set INDEX_BLOCK_TYPES = ImmutableSet.of(BlockType.INDEX_V1, + BlockType.INTERMEDIATE_INDEX, BlockType.ROOT_INDEX, BlockType.LEAF_INDEX); + private static final Set BLOOM_BLOCK_TYPES = ImmutableSet.of(BlockType.BLOOM_CHUNK, + BlockType.GENERAL_BLOOM_META, BlockType.DELETE_FAMILY_BLOOM_META); + private static final Set DATA_BLOCK_TYPES = + ImmutableSet.of(BlockType.ENCODED_DATA, BlockType.DATA); // All test cases are supposed to generate files for compaction within this range private static final long CACHE_COMPACTION_LOW_THRESHOLD = 10L; private static final long CACHE_COMPACTION_HIGH_THRESHOLD = 1 * 1024 * 1024 * 1024L; /** The number of valid key types possible in a store file */ - private static final int NUM_VALID_KEY_TYPES = - KeyValue.Type.values().length - 2; + private static final int NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2; private enum CacheOnWriteType { - DATA_BLOCKS(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, - BlockType.DATA, BlockType.ENCODED_DATA), - BLOOM_BLOCKS(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, - BlockType.BLOOM_CHUNK), - INDEX_BLOCKS(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, - BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX); + DATA_BLOCKS(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, BlockType.DATA, BlockType.ENCODED_DATA), + BLOOM_BLOCKS(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, BlockType.BLOOM_CHUNK), + INDEX_BLOCKS(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, BlockType.LEAF_INDEX, + BlockType.INTERMEDIATE_INDEX); private final String confKey; private final BlockType blockType1; @@ -174,8 +160,8 @@ public TestCacheOnWrite(CacheOnWriteType cowType, Compression.Algorithm compress this.compress = compress; this.cacheCompressedData = cacheCompressedData; this.blockCache = blockCache; - testDescription = "[cacheOnWrite=" + cowType + ", compress=" + compress + - ", cacheCompressedData=" + cacheCompressedData + "]"; + testDescription = "[cacheOnWrite=" + cowType + ", compress=" + compress + + ", cacheCompressedData=" + cacheCompressedData + "]"; LOG.info(testDescription); } @@ -185,8 +171,9 @@ private static List getBlockCaches() throws IOException { // default blockcaches.add(BlockCacheFactory.createBlockCache(conf)); - //set LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME to 2.0f due to HBASE-16287 - TEST_UTIL.getConfiguration().setFloat(LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, 2.0f); + // set LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME to 2.0f due to HBASE-16287 + TEST_UTIL.getConfiguration().setFloat(LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, + 2.0f); // memory BlockCache lru = new LruBlockCache(128 * 1024 * 1024, 64 * 1024, TEST_UTIL.getConfiguration()); blockcaches.add(lru); @@ -251,9 +238,9 @@ public void setUp() throws IOException { cowType.modifyConf(conf); conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, cowType.shouldBeCached(BlockType.DATA)); conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, - cowType.shouldBeCached(BlockType.LEAF_INDEX)); + cowType.shouldBeCached(BlockType.LEAF_INDEX)); conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, - cowType.shouldBeCached(BlockType.BLOOM_CHUNK)); + cowType.shouldBeCached(BlockType.BLOOM_CHUNK)); cacheConf = new CacheConfig(conf, blockCache); fs = HFileSystem.get(conf); } @@ -276,11 +263,11 @@ private void testStoreFileCacheOnWriteInternals(boolean useTags) throws IOExcept private void readStoreFile(boolean useTags) throws IOException { HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, true, conf); LOG.info("HFile information: " + reader); - HFileContext meta = new HFileContextBuilder().withCompression(compress) - .withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL) - .withBlockSize(DATA_BLOCK_SIZE) - .withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()) - .withIncludesTags(useTags).build(); + HFileContext meta = + new HFileContextBuilder().withCompression(compress).withBytesPerCheckSum(CKBYTES) + .withChecksumType(ChecksumType.NULL).withBlockSize(DATA_BLOCK_SIZE) + .withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()) + .withIncludesTags(useTags).build(); final boolean cacheBlocks = false; final boolean pread = false; HFileScanner scanner = reader.getScanner(conf, cacheBlocks, pread); @@ -295,20 +282,18 @@ private void readStoreFile(boolean useTags) throws IOException { while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { // Flags: don't cache the block, use pread, this is not a compaction. // Also, pass null for expected block type to avoid checking it. - HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, - encodingInCache); + HFileBlock block = + reader.readBlock(offset, -1, false, true, false, true, null, encodingInCache); BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); HFileBlock fromCache = (HFileBlock) blockCache.getBlock(blockCacheKey, true, false, true); boolean isCached = fromCache != null; cachedBlocksOffset.add(offset); cachedBlocks.put(offset, fromCache == null ? null : Pair.newPair(block, fromCache)); boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType()); - assertTrue("shouldBeCached: " + shouldBeCached+ "\n" + - "isCached: " + isCached + "\n" + - "Test description: " + testDescription + "\n" + - "block: " + block + "\n" + - "encodingInCache: " + encodingInCache + "\n" + - "blockCacheKey: " + blockCacheKey, + assertTrue( + "shouldBeCached: " + shouldBeCached + "\n" + "isCached: " + isCached + "\n" + + "Test description: " + testDescription + "\n" + "block: " + block + "\n" + + "encodingInCache: " + encodingInCache + "\n" + "blockCacheKey: " + blockCacheKey, shouldBeCached == isCached); if (isCached) { if (cacheConf.shouldCacheCompressed(fromCache.getBlockType().getCategory())) { @@ -325,8 +310,8 @@ private void readStoreFile(boolean useTags) throws IOException { assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType()); assertEquals(block.getOnDiskSizeWithHeader(), fromCache.getOnDiskSizeWithHeader()); assertEquals(block.getOnDiskSizeWithoutHeader(), fromCache.getOnDiskSizeWithoutHeader()); - assertEquals( - block.getUncompressedSizeWithoutHeader(), fromCache.getUncompressedSizeWithoutHeader()); + assertEquals(block.getUncompressedSizeWithoutHeader(), + fromCache.getUncompressedSizeWithoutHeader()); } offset += block.getOnDiskSizeWithHeader(); BlockType bt = block.getBlockType(); @@ -337,11 +322,13 @@ private void readStoreFile(boolean useTags) throws IOException { LOG.info("Block count by type: " + blockCountByType); String countByType = blockCountByType.toString(); if (useTags) { - assertEquals("{" + BlockType.DATA - + "=2663, LEAF_INDEX=297, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=32}", countByType); + assertEquals( + "{" + BlockType.DATA + "=2663, LEAF_INDEX=297, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=32}", + countByType); } else { - assertEquals("{" + BlockType.DATA - + "=2498, LEAF_INDEX=278, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=31}", countByType); + assertEquals( + "{" + BlockType.DATA + "=2498, LEAF_INDEX=278, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=31}", + countByType); } // iterate all the keyvalue from hfile @@ -349,10 +336,9 @@ private void readStoreFile(boolean useTags) throws IOException { scanner.getCell(); } Iterator iterator = cachedBlocksOffset.iterator(); - while(iterator.hasNext()) { + while (iterator.hasNext()) { Long entry = iterator.next(); - BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), - entry); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), entry); Pair blockPair = cachedBlocks.get(entry); if (blockPair != null) { // Call return twice because for the isCache cased the counter would have got incremented @@ -389,36 +375,32 @@ public static KeyValue.Type generateKeyType(Random rand) { } private void writeStoreFile(boolean useTags) throws IOException { - Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), - "test_cache_on_write"); - HFileContext meta = new HFileContextBuilder().withCompression(compress) - .withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL) - .withBlockSize(DATA_BLOCK_SIZE) - .withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()) - .withIncludesTags(useTags).build(); - StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withOutputDir(storeFileParentDir) - .withFileContext(meta) - .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build(); + Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), "test_cache_on_write"); + HFileContext meta = + new HFileContextBuilder().withCompression(compress).withBytesPerCheckSum(CKBYTES) + .withChecksumType(ChecksumType.NULL).withBlockSize(DATA_BLOCK_SIZE) + .withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()) + .withIncludesTags(useTags).build(); + StoreFileWriter sfw = + new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(storeFileParentDir) + .withFileContext(meta).withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build(); byte[] cf = Bytes.toBytes("fam"); for (int i = 0; i < NUM_KV; ++i) { byte[] row = RandomKeyValueUtil.randomOrderedKey(rand, i); byte[] qualifier = RandomKeyValueUtil.randomRowOrQualifier(rand); byte[] value = RandomKeyValueUtil.randomValue(rand); KeyValue kv; - if(useTags) { + if (useTags) { Tag t = new ArrayBackedTag((byte) 1, "visibility"); List tagList = new ArrayList<>(); tagList.add(t); Tag[] tags = new Tag[1]; tags[0] = t; - kv = - new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, - Math.abs(rand.nextLong()), generateKeyType(rand), value, 0, value.length, tagList); + kv = new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, + Math.abs(rand.nextLong()), generateKeyType(rand), value, 0, value.length, tagList); } else { - kv = - new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, - Math.abs(rand.nextLong()), generateKeyType(rand), value, 0, value.length); + kv = new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, + Math.abs(rand.nextLong()), generateKeyType(rand), value, 0, value.length); } sfw.append(kv); } @@ -428,24 +410,21 @@ private void writeStoreFile(boolean useTags) throws IOException { } private void testCachingDataBlocksDuringCompactionInternals(boolean useTags, - boolean cacheBlocksOnCompaction, long cacheBlocksOnCompactionThreshold) - throws IOException, InterruptedException { + boolean cacheBlocksOnCompaction, long cacheBlocksOnCompactionThreshold) + throws IOException, InterruptedException { // create a localConf boolean localValue = conf.getBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, false); - long localCacheCompactedBlocksThreshold = conf - .getLong(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, - CacheConfig.DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD); - boolean localCacheBloomBlocksValue = conf - .getBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, - CacheConfig.DEFAULT_CACHE_BLOOMS_ON_WRITE); - boolean localCacheIndexBlocksValue = conf - .getBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, - CacheConfig.DEFAULT_CACHE_INDEXES_ON_WRITE); + long localCacheCompactedBlocksThreshold = + conf.getLong(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, + CacheConfig.DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD); + boolean localCacheBloomBlocksValue = conf.getBoolean( + CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, CacheConfig.DEFAULT_CACHE_BLOOMS_ON_WRITE); + boolean localCacheIndexBlocksValue = conf.getBoolean( + CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, CacheConfig.DEFAULT_CACHE_INDEXES_ON_WRITE); try { // Set the conf if testing caching compacted blocks on write - conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, - cacheBlocksOnCompaction); + conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, cacheBlocksOnCompaction); // set size threshold if testing compaction size threshold if (cacheBlocksOnCompactionThreshold > 0) { @@ -460,13 +439,9 @@ private void testCachingDataBlocksDuringCompactionInternals(boolean useTags, final String cf = "myCF"; final byte[] cfBytes = Bytes.toBytes(cf); final int maxVersions = 3; - ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder - .newBuilder(cfBytes) - .setCompressionType(compress) - .setBloomFilterType(BLOOM_TYPE) - .setMaxVersions(maxVersions) - .setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()) - .build(); + ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(cfBytes) + .setCompressionType(compress).setBloomFilterType(BLOOM_TYPE).setMaxVersions(maxVersions) + .setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()).build(); HRegion region = TEST_UTIL.createTestRegion(table, cfd, blockCache); int rowIdx = 0; long ts = EnvironmentEdgeManager.currentTime(); @@ -488,7 +463,7 @@ private void testCachingDataBlocksDuringCompactionInternals(boolean useTags, p.add(kv); } else { KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr), - ts++, Bytes.toBytes(valueStr)); + ts++, Bytes.toBytes(valueStr)); p.add(kv); } } @@ -522,12 +497,11 @@ private void testCachingDataBlocksDuringCompactionInternals(boolean useTags, // Data blocks should be cached in instances where we are caching blocks on write. In the case // of testing // BucketCache, we cannot verify block type as it is not stored in the cache. - boolean cacheOnCompactAndNonBucketCache = cacheBlocksOnCompaction - && !(blockCache instanceof BucketCache); + boolean cacheOnCompactAndNonBucketCache = + cacheBlocksOnCompaction && !(blockCache instanceof BucketCache); - String assertErrorMessage = "\nTest description: " + testDescription + - "\ncacheBlocksOnCompaction: " - + cacheBlocksOnCompaction + "\n"; + String assertErrorMessage = "\nTest description: " + testDescription + + "\ncacheBlocksOnCompaction: " + cacheBlocksOnCompaction + "\n"; if (cacheOnCompactAndNonBucketCache && cacheBlocksOnCompactionThreshold > 0) { if (cacheBlocksOnCompactionThreshold == CACHE_COMPACTION_HIGH_THRESHOLD) { @@ -558,7 +532,6 @@ private void testCachingDataBlocksDuringCompactionInternals(boolean useTags, } } - region.close(); } finally { // reset back @@ -584,7 +557,7 @@ public void testCachingDataBlocksDuringCompaction() throws IOException, Interrup @Test public void testCachingDataBlocksThresholdDuringCompaction() - throws IOException, InterruptedException { + throws IOException, InterruptedException { testCachingDataBlocksDuringCompactionInternals(false, true, CACHE_COMPACTION_HIGH_THRESHOLD); testCachingDataBlocksDuringCompactionInternals(false, true, CACHE_COMPACTION_LOW_THRESHOLD); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java index 91bc32c2a7d5..b898b4881160 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public void testQueue() throws Exception { CachedBlock cb9 = new CachedBlock(1000, "cb9", 9); CachedBlock cb10 = new CachedBlock(1500, "cb10", 10); - LruCachedBlockQueue queue = new LruCachedBlockQueue(10000,1000); + LruCachedBlockQueue queue = new LruCachedBlockQueue(10000, 1000); queue.add(cb1); queue.add(cb2); @@ -61,14 +61,13 @@ public void testQueue() throws Exception { queue.add(cb10); // We expect cb1 through cb8 to be in the queue - long expectedSize = cb1.heapSize() + cb2.heapSize() + cb3.heapSize() + - cb4.heapSize() + cb5.heapSize() + cb6.heapSize() + cb7.heapSize() + - cb8.heapSize(); + long expectedSize = cb1.heapSize() + cb2.heapSize() + cb3.heapSize() + cb4.heapSize() + + cb5.heapSize() + cb6.heapSize() + cb7.heapSize() + cb8.heapSize(); assertEquals(queue.heapSize(), expectedSize); for (int i = 1; i <= 8; i++) { - assertEquals(queue.pollLast().getCacheKey().getHfileName(), "cb"+i); + assertEquals(queue.pollLast().getCacheKey().getHfileName(), "cb" + i); } } @@ -85,7 +84,7 @@ public void testQueueSmallBlockEdgeCase() throws Exception { CachedBlock cb9 = new CachedBlock(1000, "cb9", 9); CachedBlock cb10 = new CachedBlock(1500, "cb10", 10); - LruCachedBlockQueue queue = new LruCachedBlockQueue(10000,1000); + LruCachedBlockQueue queue = new LruCachedBlockQueue(10000, 1000); queue.add(cb1); queue.add(cb2); @@ -106,14 +105,13 @@ public void testQueueSmallBlockEdgeCase() throws Exception { // and we must always maintain heapSize >= maxSize once we achieve it. // We expect cb0 through cb8 to be in the queue - long expectedSize = cb1.heapSize() + cb2.heapSize() + cb3.heapSize() + - cb4.heapSize() + cb5.heapSize() + cb6.heapSize() + cb7.heapSize() + - cb8.heapSize() + cb0.heapSize(); + long expectedSize = cb1.heapSize() + cb2.heapSize() + cb3.heapSize() + cb4.heapSize() + + cb5.heapSize() + cb6.heapSize() + cb7.heapSize() + cb8.heapSize() + cb0.heapSize(); assertEquals(queue.heapSize(), expectedSize); for (int i = 0; i <= 8; i++) { - assertEquals(queue.pollLast().getCacheKey().getHfileName(), "cb"+i); + assertEquals(queue.pollLast().getCacheKey().getHfileName(), "cb" + i); } } @@ -148,4 +146,3 @@ public BlockType getBlockType() { } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java index 22d045dd267a..6f31d1d0bd55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,6 @@ import java.io.IOException; import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -55,7 +54,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestChecksum { @ClassRule @@ -64,21 +63,18 @@ public class TestChecksum { private static final Logger LOG = LoggerFactory.getLogger(TestHFileBlock.class); - static final Compression.Algorithm[] COMPRESSION_ALGORITHMS = { - NONE, GZ }; + static final Compression.Algorithm[] COMPRESSION_ALGORITHMS = { NONE, GZ }; - static final int[] BYTES_PER_CHECKSUM = { - 50, 500, 688, 16*1024, (16*1024+980), 64 * 1024}; + static final int[] BYTES_PER_CHECKSUM = { 50, 500, 688, 16 * 1024, (16 * 1024 + 980), 64 * 1024 }; - private static final HBaseTestingUtil TEST_UTIL = - new HBaseTestingUtil(); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private FileSystem fs; private HFileSystem hfs; @Before public void setUp() throws Exception { fs = HFileSystem.get(TEST_UTIL.getConfiguration()); - hfs = (HFileSystem)fs; + hfs = (HFileSystem) fs; } @Test @@ -99,14 +95,10 @@ public void testNewBlocksHaveDefaultChecksum() throws IOException { FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path); meta = new HFileContextBuilder().withHBaseCheckSum(true).build(); - ReaderContext context = new ReaderContextBuilder() - .withInputStreamWrapper(is) - .withFileSize(totalSize) - .withFileSystem((HFileSystem) fs) - .withFilePath(path) - .build(); - HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, - meta, ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration()); + ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(is) + .withFileSize(totalSize).withFileSystem((HFileSystem) fs).withFilePath(path).build(); + HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP, + TEST_UTIL.getConfiguration()); HFileBlock b = hbr.readBlockData(0, -1, false, false, true); assertTrue(!b.isSharedMem()); assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode()); @@ -115,7 +107,7 @@ public void testNewBlocksHaveDefaultChecksum() throws IOException { private void verifyMBBCheckSum(ByteBuff buf) throws IOException { int size = buf.remaining() / 2 + 1; ByteBuff mbb = new MultiByteBuff(ByteBuffer.allocate(size), ByteBuffer.allocate(size)) - .position(0).limit(buf.remaining()); + .position(0).limit(buf.remaining()); for (int i = buf.position(); i < buf.limit(); i++) { mbb.put(buf.get(i)); } @@ -135,9 +127,7 @@ public void testVerifyCheckSum() throws IOException { for (ChecksumType ckt : ChecksumType.values()) { Path path = new Path(TEST_UTIL.getDataTestDir(), "checksum" + ckt.getName()); FSDataOutputStream os = fs.create(path); - HFileContext meta = new HFileContextBuilder() - .withChecksumType(ckt) - .build(); + HFileContext meta = new HFileContextBuilder().withChecksumType(ckt).build(); HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta); DataOutputStream dos = hbw.startWriting(BlockType.DATA); for (int i = 0; i < intCount; ++i) { @@ -152,14 +142,10 @@ public void testVerifyCheckSum() throws IOException { FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path); meta = new HFileContextBuilder().withHBaseCheckSum(true).build(); - ReaderContext context = new ReaderContextBuilder() - .withInputStreamWrapper(is) - .withFileSize(totalSize) - .withFileSystem((HFileSystem) fs) - .withFilePath(path) - .build(); - HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, - meta, ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration()); + ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(is) + .withFileSize(totalSize).withFileSystem((HFileSystem) fs).withFilePath(path).build(); + HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP, + TEST_UTIL.getConfiguration()); HFileBlock b = hbr.readBlockData(0, -1, false, false, true); assertTrue(!b.isSharedMem()); @@ -184,8 +170,7 @@ public void testVerifyCheckSum() throws IOException { } /** - * Introduce checksum failures and check that we can still read - * the data + * Introduce checksum failures and check that we can still read the data */ @Test public void testChecksumCorruption() throws IOException { @@ -196,17 +181,12 @@ public void testChecksumCorruption() throws IOException { protected void testChecksumCorruptionInternals(boolean useTags) throws IOException { for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for (boolean pread : new boolean[] { false, true }) { - LOG.info("testChecksumCorruption: Compression algorithm: " + algo + - ", pread=" + pread); - Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" - + algo); + LOG.info("testChecksumCorruption: Compression algorithm: " + algo + ", pread=" + pread); + Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo); FSDataOutputStream os = fs.create(path); - HFileContext meta = new HFileContextBuilder() - .withCompression(algo) - .withIncludesMvcc(true) - .withIncludesTags(useTags) - .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) - .build(); + HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true) + .withIncludesTags(useTags).withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) + .build(); HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta); long totalSize = 0; for (int blockId = 0; blockId < 2; ++blockId) { @@ -223,30 +203,21 @@ protected void testChecksumCorruptionInternals(boolean useTags) throws IOExcepti // Do a read that purposely introduces checksum verification failures. FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path); - meta = new HFileContextBuilder() - .withCompression(algo) - .withIncludesMvcc(true) - .withIncludesTags(useTags) - .withHBaseCheckSum(true) - .build(); - ReaderContext context = new ReaderContextBuilder() - .withInputStreamWrapper(is) - .withFileSize(totalSize) - .withFileSystem(fs) - .withFilePath(path) - .build(); - HFileBlock.FSReader hbr = new CorruptedFSReaderImpl(context, meta, - TEST_UTIL.getConfiguration()); + meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true) + .withIncludesTags(useTags).withHBaseCheckSum(true).build(); + ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(is) + .withFileSize(totalSize).withFileSystem(fs).withFilePath(path).build(); + HFileBlock.FSReader hbr = + new CorruptedFSReaderImpl(context, meta, TEST_UTIL.getConfiguration()); HFileBlock b = hbr.readBlockData(0, -1, pread, false, true); b.sanityCheck(); assertEquals(4936, b.getUncompressedSizeWithoutHeader()); assertEquals(algo == GZ ? 2173 : 4936, - b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes()); + b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes()); // read data back from the hfile, exclude header and checksum ByteBuff bb = b.unpack(meta, hbr).getBufferWithoutHeader(); // read back data - DataInputStream in = new DataInputStream( - new ByteArrayInputStream( - bb.array(), bb.arrayOffset(), bb.limit())); + DataInputStream in = + new DataInputStream(new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit())); // assert that we encountered hbase checksum verification failures // but still used hdfs checksums and read data successfully. @@ -256,8 +227,7 @@ protected void testChecksumCorruptionInternals(boolean useTags) throws IOExcepti // A single instance of hbase checksum failure causes the reader to // switch off hbase checksum verification for the next 100 read // requests. Verify that this is correct. - for (int i = 0; i < - HFileBlock.CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD + 1; i++) { + for (int i = 0; i < HFileBlock.CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD + 1; i++) { b = hbr.readBlockData(0, -1, pread, false, true); assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff); assertEquals(0, HFile.getAndResetChecksumFailuresCount()); @@ -282,12 +252,8 @@ protected void testChecksumCorruptionInternals(boolean useTags) throws IOExcepti HFileSystem newfs = new HFileSystem(conf, false); assertEquals(false, newfs.useHBaseChecksum()); is = new FSDataInputStreamWrapper(newfs, path); - context = new ReaderContextBuilder() - .withInputStreamWrapper(is) - .withFileSize(totalSize) - .withFileSystem(newfs) - .withFilePath(path) - .build(); + context = new ReaderContextBuilder().withInputStreamWrapper(is).withFileSize(totalSize) + .withFileSystem(newfs).withFilePath(path).build(); hbr = new CorruptedFSReaderImpl(context, meta, conf); b = hbr.readBlockData(0, -1, pread, false, true); is.close(); @@ -295,11 +261,11 @@ protected void testChecksumCorruptionInternals(boolean useTags) throws IOExcepti b = b.unpack(meta, hbr); assertEquals(4936, b.getUncompressedSizeWithoutHeader()); assertEquals(algo == GZ ? 2173 : 4936, - b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes()); + b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes()); // read data back from the hfile, exclude header and checksum bb = b.getBufferWithoutHeader(); // read back data - in = new DataInputStream(new ByteArrayInputStream( - bb.array(), bb.arrayOffset(), bb.limit())); + in = new DataInputStream( + new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit())); // assert that we did not encounter hbase checksum verification failures // but still used hdfs checksums and read data successfully. @@ -322,18 +288,13 @@ protected void testChecksumInternals(boolean useTags) throws IOException { Compression.Algorithm algo = NONE; for (boolean pread : new boolean[] { false, true }) { for (int bytesPerChecksum : BYTES_PER_CHECKSUM) { - Path path = new Path(TEST_UTIL.getDataTestDir(), "checksumChunk_" + - algo + bytesPerChecksum); + Path path = + new Path(TEST_UTIL.getDataTestDir(), "checksumChunk_" + algo + bytesPerChecksum); FSDataOutputStream os = fs.create(path); - HFileContext meta = new HFileContextBuilder() - .withCompression(algo) - .withIncludesMvcc(true) - .withIncludesTags(useTags) - .withHBaseCheckSum(true) - .withBytesPerCheckSum(bytesPerChecksum) - .build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, - meta); + HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true) + .withIncludesTags(useTags).withHBaseCheckSum(true) + .withBytesPerCheckSum(bytesPerChecksum).build(); + HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta); // write one block. The block has data // that is at least 6 times more than the checksum chunk size @@ -349,12 +310,12 @@ protected void testChecksumInternals(boolean useTags) throws IOException { long totalSize = hbw.getOnDiskSizeWithHeader(); os.close(); - long expectedChunks = ChecksumUtil.numChunks( - dataSize + HConstants.HFILEBLOCK_HEADER_SIZE, - bytesPerChecksum); - LOG.info("testChecksumChunks: pread={}, bytesPerChecksum={}, fileSize={}, " - + "dataSize={}, expectedChunks={}, compression={}", pread, bytesPerChecksum, - totalSize, dataSize, expectedChunks, algo.toString()); + long expectedChunks = + ChecksumUtil.numChunks(dataSize + HConstants.HFILEBLOCK_HEADER_SIZE, bytesPerChecksum); + LOG.info( + "testChecksumChunks: pread={}, bytesPerChecksum={}, fileSize={}, " + + "dataSize={}, expectedChunks={}, compression={}", + pread, bytesPerChecksum, totalSize, dataSize, expectedChunks, algo.toString()); // Verify hbase checksums. assertEquals(true, hfs.useHBaseChecksum()); @@ -362,22 +323,14 @@ protected void testChecksumInternals(boolean useTags) throws IOException { // Read data back from file. FSDataInputStream is = fs.open(path); FSDataInputStream nochecksum = hfs.getNoChecksumFs().open(path); - meta = new HFileContextBuilder() - .withCompression(algo) - .withIncludesMvcc(true) - .withIncludesTags(useTags) - .withHBaseCheckSum(true) - .withBytesPerCheckSum(bytesPerChecksum) - .build(); + meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true) + .withIncludesTags(useTags).withHBaseCheckSum(true) + .withBytesPerCheckSum(bytesPerChecksum).build(); ReaderContext context = new ReaderContextBuilder() .withInputStreamWrapper(new FSDataInputStreamWrapper(is, nochecksum)) - .withFileSize(totalSize) - .withFileSystem(hfs) - .withFilePath(path) - .build(); - HFileBlock.FSReader hbr = - new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP, - TEST_UTIL.getConfiguration()); + .withFileSize(totalSize).withFileSystem(hfs).withFilePath(path).build(); + HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP, + TEST_UTIL.getConfiguration()); HFileBlock b = hbr.readBlockData(0, -1, pread, false, true); assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff); is.close(); @@ -385,8 +338,8 @@ protected void testChecksumInternals(boolean useTags) throws IOException { assertEquals(dataSize, b.getUncompressedSizeWithoutHeader()); // verify that we have the expected number of checksum chunks - assertEquals(totalSize, HConstants.HFILEBLOCK_HEADER_SIZE + dataSize + - expectedChunks * HFileBlock.CHECKSUM_SIZE); + assertEquals(totalSize, + HConstants.HFILEBLOCK_HEADER_SIZE + dataSize + expectedChunks * HFileBlock.CHECKSUM_SIZE); // assert that we did not encounter hbase checksum verification failures assertEquals(0, HFile.getAndResetChecksumFailuresCount()); @@ -404,12 +357,10 @@ private void validateData(DataInputStream in) throws IOException { /** * This class is to test checksum behavior when data is corrupted. It mimics the following - * behavior: - * - When fs checksum is disabled, hbase may get corrupted data from hdfs. If verifyChecksum - * is true, it means hbase checksum is on and fs checksum is off, so we corrupt the data. - * - When fs checksum is enabled, hdfs will get a different copy from another node, and will - * always return correct data. So we don't corrupt the data when verifyChecksum for hbase is - * off. + * behavior: - When fs checksum is disabled, hbase may get corrupted data from hdfs. If + * verifyChecksum is true, it means hbase checksum is on and fs checksum is off, so we corrupt the + * data. - When fs checksum is enabled, hdfs will get a different copy from another node, and will + * always return correct data. So we don't corrupt the data when verifyChecksum for hbase is off. */ static private class CorruptedFSReaderImpl extends HFileBlock.FSReaderImpl { /** @@ -435,7 +386,6 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, return b; } - @Override protected boolean readAtOffset(FSDataInputStream istream, ByteBuff dest, int size, boolean peekIntoNextBlock, long fileOffset, boolean pread) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java index 639c906413ed..36e1a7539e36 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestCombinedBlockCache { @ClassRule @@ -44,8 +44,7 @@ public class TestCombinedBlockCache { public void testCombinedCacheStats() { CacheStats lruCacheStats = new CacheStats("lruCacheStats", 2); CacheStats bucketCacheStats = new CacheStats("bucketCacheStats", 2); - CombinedCacheStats stats = - new CombinedCacheStats(lruCacheStats, bucketCacheStats); + CombinedCacheStats stats = new CombinedCacheStats(lruCacheStats, bucketCacheStats); double delta = 0.01; @@ -54,8 +53,8 @@ public void testCombinedCacheStats() { // bucket cache: 2 hit non-caching,1 miss non-caching/primary,1 fail insert lruCacheStats.hit(true, true, BlockType.DATA); lruCacheStats.miss(true, false, BlockType.DATA); - bucketCacheStats.hit(false,true, BlockType.DATA); - bucketCacheStats.hit(false,true, BlockType.DATA); + bucketCacheStats.hit(false, true, BlockType.DATA); + bucketCacheStats.hit(false, true, BlockType.DATA); bucketCacheStats.miss(false, true, BlockType.DATA); assertEquals(5, stats.getRequestCount()); @@ -71,7 +70,6 @@ public void testCombinedCacheStats() { assertEquals(0.4, stats.getMissRatio(), delta); assertEquals(0.5, stats.getMissCachingRatio(), delta); - // lru cache: 2 evicted, 1 evict // bucket cache: 1 evict lruCacheStats.evicted(1000, true); @@ -83,7 +81,7 @@ public void testCombinedCacheStats() { assertEquals(1, stats.getPrimaryEvictedCount()); assertEquals(1.0, stats.evictedPerEviction(), delta); - // lru cache: 1 fail insert + // lru cache: 1 fail insert lruCacheStats.failInsert(); assertEquals(1, stats.getFailedInserts()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java index 4a385c143af6..eb73d09f789d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MetaCellComparator; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -56,8 +55,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; + @RunWith(Parameterized.class) -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestFixedFileTrailer { @ClassRule @@ -68,8 +69,8 @@ public class TestFixedFileTrailer { private static final int MAX_COMPARATOR_NAME_LENGTH = 128; /** - * The number of used fields by version. Indexed by version minus two. - * Min version that we support is V2 + * The number of used fields by version. Indexed by version minus two. Min version that we support + * is V2 */ private static final int[] NUM_FIELDS_BY_VERSION = new int[] { 14, 15 }; @@ -79,8 +80,7 @@ public class TestFixedFileTrailer { private int version; static { - assert NUM_FIELDS_BY_VERSION.length == HFile.MAX_FORMAT_VERSION - - HFile.MIN_FORMAT_VERSION + 1; + assert NUM_FIELDS_BY_VERSION.length == HFile.MAX_FORMAT_VERSION - HFile.MIN_FORMAT_VERSION + 1; } public TestFixedFileTrailer(int version) { @@ -94,7 +94,7 @@ public TestFixedFileTrailer(int version) { public static Collection getParameters() { List versionsToTest = new ArrayList<>(); for (int v = HFile.MIN_FORMAT_VERSION; v <= HFile.MAX_FORMAT_VERSION; ++v) - versionsToTest.add(new Integer[] { v } ); + versionsToTest.add(new Integer[] { v }); return versionsToTest; } @@ -112,8 +112,7 @@ public void testComparatorIsHBase1Compatible() { assertEquals(KeyValue.COMPARATOR.getClass().getName(), pb.getComparatorClassName()); t.setComparatorClass(MetaCellComparator.META_COMPARATOR.getClass()); pb = t.toProtobuf(); - assertEquals(KeyValue.META_COMPARATOR.getClass().getName(), - pb.getComparatorClassName()); + assertEquals(KeyValue.META_COMPARATOR.getClass().getName(), pb.getComparatorClassName()); } @Test @@ -121,26 +120,26 @@ public void testCreateComparator() throws IOException { FixedFileTrailer t = new FixedFileTrailer(version, HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); try { assertEquals(CellComparatorImpl.class, - t.createComparator(KeyValue.COMPARATOR.getLegacyKeyComparatorName()).getClass()); + t.createComparator(KeyValue.COMPARATOR.getLegacyKeyComparatorName()).getClass()); assertEquals(CellComparatorImpl.class, - t.createComparator(KeyValue.COMPARATOR.getClass().getName()).getClass()); + t.createComparator(KeyValue.COMPARATOR.getClass().getName()).getClass()); assertEquals(CellComparatorImpl.class, - t.createComparator(CellComparator.class.getName()).getClass()); + t.createComparator(CellComparator.class.getName()).getClass()); assertEquals(MetaCellComparator.class, - t.createComparator(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()).getClass()); + t.createComparator(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName()).getClass()); assertEquals(MetaCellComparator.class, - t.createComparator(KeyValue.META_COMPARATOR.getClass().getName()).getClass()); + t.createComparator(KeyValue.META_COMPARATOR.getClass().getName()).getClass()); assertEquals(MetaCellComparator.class, t.createComparator("org.apache.hadoop.hbase.CellComparator$MetaCellComparator").getClass()); assertEquals(MetaCellComparator.class, t.createComparator("org.apache.hadoop.hbase.CellComparatorImpl$MetaCellComparator") .getClass()); - assertEquals(MetaCellComparator.class, t.createComparator( - MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass()); - assertEquals(MetaCellComparator.META_COMPARATOR.getClass(), t.createComparator( - MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass()); - assertEquals(CellComparatorImpl.COMPARATOR.getClass(), t.createComparator( - MetaCellComparator.COMPARATOR.getClass().getName()).getClass()); + assertEquals(MetaCellComparator.class, + t.createComparator(MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass()); + assertEquals(MetaCellComparator.META_COMPARATOR.getClass(), + t.createComparator(MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass()); + assertEquals(CellComparatorImpl.COMPARATOR.getClass(), + t.createComparator(MetaCellComparator.COMPARATOR.getClass().getName()).getClass()); assertNull(t.createComparator(Bytes.BYTES_RAWCOMPARATOR.getClass().getName())); assertNull(t.createComparator("org.apache.hadoop.hbase.KeyValue$RawBytesComparator")); } catch (IOException e) { @@ -156,8 +155,7 @@ public void testCreateComparator() throws IOException { @Test public void testTrailer() throws IOException { - FixedFileTrailer t = new FixedFileTrailer(version, - HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); + FixedFileTrailer t = new FixedFileTrailer(version, HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); t.setDataIndexCount(3); t.setEntryCount(((long) Integer.MAX_VALUE) + 1); @@ -189,8 +187,8 @@ public void testTrailer() throws IOException { // Finished writing, trying to read. { DataInputStream dis = new DataInputStream(bais); - FixedFileTrailer t2 = new FixedFileTrailer(version, - HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); + FixedFileTrailer t2 = + new FixedFileTrailer(version, HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); t2.deserialize(dis); assertEquals(-1, bais.read()); // Ensure we have read everything. checkLoadedTrailer(version, t, t2); @@ -201,7 +199,7 @@ public void testTrailer() throws IOException { { for (byte invalidVersion : new byte[] { HFile.MIN_FORMAT_VERSION - 1, - HFile.MAX_FORMAT_VERSION + 1}) { + HFile.MAX_FORMAT_VERSION + 1 }) { bytes[bytes.length - 1] = invalidVersion; writeTrailer(trailerPath, null, bytes); try { @@ -210,11 +208,15 @@ public void testTrailer() throws IOException { } catch (IllegalArgumentException ex) { // Make it easy to debug this. String msg = ex.getMessage(); - String cleanMsg = msg.replaceAll( - "^(java(\\.[a-zA-Z]+)+:\\s+)?|\\s+\\(.*\\)\\s*$", ""); - assertEquals("Actual exception message is \"" + msg + "\".\n" + - "Cleaned-up message", // will be followed by " expected: ..." - "Invalid HFile version: " + invalidVersion, cleanMsg); + String cleanMsg = msg.replaceAll("^(java(\\.[a-zA-Z]+)+:\\s+)?|\\s+\\(.*\\)\\s*$", ""); + assertEquals("Actual exception message is \"" + msg + "\".\n" + "Cleaned-up message", // will + // be + // followed + // by + // " + // expected: + // ..." + "Invalid HFile version: " + invalidVersion, cleanMsg); LOG.info("Got an expected exception: " + msg); } } @@ -229,17 +231,16 @@ public void testTrailer() throws IOException { checkLoadedTrailer(version, t, t4); String trailerStr = t.toString(); - assertEquals("Invalid number of fields in the string representation " - + "of the trailer: " + trailerStr, NUM_FIELDS_BY_VERSION[version - 2], - trailerStr.split(", ").length); + assertEquals( + "Invalid number of fields in the string representation " + "of the trailer: " + trailerStr, + NUM_FIELDS_BY_VERSION[version - 2], trailerStr.split(", ").length); assertEquals(trailerStr, t4.toString()); } @Test public void testTrailerForV2NonPBCompatibility() throws Exception { if (version == 2) { - FixedFileTrailer t = new FixedFileTrailer(version, - HFileReaderImpl.MINOR_VERSION_NO_CHECKSUM); + FixedFileTrailer t = new FixedFileTrailer(version, HFileReaderImpl.MINOR_VERSION_NO_CHECKSUM); t.setDataIndexCount(3); t.setEntryCount(((long) Integer.MAX_VALUE) + 1); t.setLastDataBlockOffset(291); @@ -265,8 +266,8 @@ public void testTrailerForV2NonPBCompatibility() throws Exception { ByteArrayInputStream bais = new ByteArrayInputStream(bytes); { DataInputStream dis = new DataInputStream(bais); - FixedFileTrailer t2 = new FixedFileTrailer(version, - HFileReaderImpl.MINOR_VERSION_NO_CHECKSUM); + FixedFileTrailer t2 = + new FixedFileTrailer(version, HFileReaderImpl.MINOR_VERSION_NO_CHECKSUM); t2.deserialize(dis); assertEquals(-1, bais.read()); // Ensure we have read everything. checkLoadedTrailer(version, t, t2); @@ -292,21 +293,20 @@ private void serializeAsWritable(DataOutputStream output, FixedFileTrailer fft) output.writeLong(fft.getFirstDataBlockOffset()); output.writeLong(fft.getLastDataBlockOffset()); Bytes.writeStringFixedSize(output, fft.getComparatorClassName(), MAX_COMPARATOR_NAME_LENGTH); - output.writeInt(FixedFileTrailer.materializeVersion(fft.getMajorVersion(), - fft.getMinorVersion())); + output.writeInt( + FixedFileTrailer.materializeVersion(fft.getMajorVersion(), fft.getMinorVersion())); } - private FixedFileTrailer readTrailer(Path trailerPath) throws IOException { FSDataInputStream fsdis = fs.open(trailerPath); - FixedFileTrailer trailerRead = FixedFileTrailer.readFromStream(fsdis, - fs.getFileStatus(trailerPath).getLen()); + FixedFileTrailer trailerRead = + FixedFileTrailer.readFromStream(fsdis, fs.getFileStatus(trailerPath).getLen()); fsdis.close(); return trailerRead; } - private void writeTrailer(Path trailerPath, FixedFileTrailer t, - byte[] useBytesInstead) throws IOException { + private void writeTrailer(Path trailerPath, FixedFileTrailer t, byte[] useBytesInstead) + throws IOException { assert (t == null) != (useBytesInstead == null); // Expect one non-null. FSDataOutputStream fsdos = fs.create(trailerPath); @@ -319,42 +319,33 @@ private void writeTrailer(Path trailerPath, FixedFileTrailer t, fsdos.close(); } - private void checkLoadedTrailer(int version, FixedFileTrailer expected, - FixedFileTrailer loaded) throws IOException { + private void checkLoadedTrailer(int version, FixedFileTrailer expected, FixedFileTrailer loaded) + throws IOException { assertEquals(version, loaded.getMajorVersion()); assertEquals(expected.getDataIndexCount(), loaded.getDataIndexCount()); - assertEquals(Math.min(expected.getEntryCount(), - version == 1 ? Integer.MAX_VALUE : Long.MAX_VALUE), - loaded.getEntryCount()); + assertEquals( + Math.min(expected.getEntryCount(), version == 1 ? Integer.MAX_VALUE : Long.MAX_VALUE), + loaded.getEntryCount()); if (version == 1) { assertEquals(expected.getFileInfoOffset(), loaded.getFileInfoOffset()); } if (version == 2) { - assertEquals(expected.getLastDataBlockOffset(), - loaded.getLastDataBlockOffset()); - assertEquals(expected.getNumDataIndexLevels(), - loaded.getNumDataIndexLevels()); + assertEquals(expected.getLastDataBlockOffset(), loaded.getLastDataBlockOffset()); + assertEquals(expected.getNumDataIndexLevels(), loaded.getNumDataIndexLevels()); assertEquals(expected.createComparator().getClass().getName(), - loaded.createComparator().getClass().getName()); - assertEquals(expected.getFirstDataBlockOffset(), - loaded.getFirstDataBlockOffset()); - assertTrue( - expected.createComparator() instanceof CellComparatorImpl); - assertEquals(expected.getUncompressedDataIndexSize(), - loaded.getUncompressedDataIndexSize()); + loaded.createComparator().getClass().getName()); + assertEquals(expected.getFirstDataBlockOffset(), loaded.getFirstDataBlockOffset()); + assertTrue(expected.createComparator() instanceof CellComparatorImpl); + assertEquals(expected.getUncompressedDataIndexSize(), loaded.getUncompressedDataIndexSize()); } - assertEquals(expected.getLoadOnOpenDataOffset(), - loaded.getLoadOnOpenDataOffset()); + assertEquals(expected.getLoadOnOpenDataOffset(), loaded.getLoadOnOpenDataOffset()); assertEquals(expected.getMetaIndexCount(), loaded.getMetaIndexCount()); - assertEquals(expected.getTotalUncompressedBytes(), - loaded.getTotalUncompressedBytes()); + assertEquals(expected.getTotalUncompressedBytes(), loaded.getTotalUncompressedBytes()); } - } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java index 5074bbaa0814..977dbdae5be8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,15 +45,14 @@ import org.junit.runners.Parameterized.Parameters; /** - * Make sure we always cache important block types, such as index blocks, as - * long as we have a block cache, even though block caching might be disabled - * for the column family. - * - *

      TODO: This test writes a lot of data and only tests the most basic of metrics. Cache stats - * need to reveal more about what is being cached whether DATA or INDEX blocks and then we could - * do more verification in this test. + * Make sure we always cache important block types, such as index blocks, as long as we have a block + * cache, even though block caching might be disabled for the column family. + *

      + * TODO: This test writes a lot of data and only tests the most basic of metrics. Cache stats need + * to reveal more about what is being cached whether DATA or INDEX blocks and then we could do more + * verification in this test. */ -@Category({IOTests.class, MediumTests.class}) +@Category({ IOTests.class, MediumTests.class }) @RunWith(Parameterized.class) public class TestForceCacheImportantBlocks { @@ -77,8 +76,7 @@ public class TestForceCacheImportantBlocks { /** Extremely small block size, so that we can get some index blocks */ private static final int BLOCK_SIZE = 256; - private static final Algorithm COMPRESSION_ALGORITHM = - Compression.Algorithm.GZ; + private static final Algorithm COMPRESSION_ALGORITHM = Compression.Algorithm.GZ; private static final BloomType BLOOM_TYPE = BloomType.ROW; @SuppressWarnings("unused") @@ -89,10 +87,7 @@ public class TestForceCacheImportantBlocks { @Parameters public static Collection parameters() { // HFile versions - return Arrays.asList( - new Object[] { 3, true }, - new Object[] { 3, false } - ); + return Arrays.asList(new Object[] { 3, true }, new Object[] { 3, false }); } public TestForceCacheImportantBlocks(int hfileVersion, boolean cfCacheEnabled) { @@ -120,8 +115,8 @@ public void testCacheBlocks() throws IOException { writeTestData(region); assertEquals(0, stats.getHitCount()); assertEquals(0, HFile.DATABLOCK_READ_COUNT.sum()); - // Do a single get, take count of caches. If we are NOT caching DATA blocks, the miss - // count should go up. Otherwise, all should be cached and the miss count should not rise. + // Do a single get, take count of caches. If we are NOT caching DATA blocks, the miss + // count should go up. Otherwise, all should be cached and the miss count should not rise. region.get(new Get(Bytes.toBytes("row" + 0))); assertTrue(stats.getHitCount() > 0); assertTrue(HFile.DATABLOCK_READ_COUNT.sum() > 0); @@ -137,7 +132,7 @@ private void writeTestData(HRegion region) throws IOException { for (int j = 0; j < NUM_COLS_PER_ROW; ++j) { for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) { put.addColumn(CF_BYTES, Bytes.toBytes("col" + j), ts, - Bytes.toBytes("value" + i + "_" + j + "_" + ts)); + Bytes.toBytes("value" + i + "_" + j + "_" + ts)); } } region.put(put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index cb9fc4880488..6ce3b21f1ecd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import java.util.Objects; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -95,24 +94,23 @@ /** * test hfile features. */ -@Category({IOTests.class, SmallTests.class}) -public class TestHFile { +@Category({ IOTests.class, SmallTests.class }) +public class TestHFile { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFile.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHFile.class); - @Rule public TestName testName = new TestName(); + @Rule + public TestName testName = new TestName(); private static final Logger LOG = LoggerFactory.getLogger(TestHFile.class); private static final int NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2; private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static String ROOT_DIR = - TEST_UTIL.getDataTestDir("TestHFile").toString(); + private static String ROOT_DIR = TEST_UTIL.getDataTestDir("TestHFile").toString(); private final int minBlockSize = 512; private static String localFormatter = "%010d"; private static CacheConfig cacheConf; - private static Configuration conf ; + private static Configuration conf; private static FileSystem fs; @BeforeClass @@ -130,9 +128,8 @@ public static Reader createReaderFromStream(ReaderContext context, CacheConfig c preadReader.close(); context = new ReaderContextBuilder() .withFileSystemAndPath(context.getFileSystem(), context.getFilePath()) - .withReaderType(ReaderType.STREAM) - .build(); - Reader streamReader = HFile.createReader(context, fileInfo, cacheConf, conf); + .withReaderType(ReaderType.STREAM).build(); + Reader streamReader = HFile.createReader(context, fileInfo, cacheConf, conf); return streamReader; } @@ -279,18 +276,16 @@ private void readStoreFile(Path storeFilePath, Configuration conf, ByteBuffAlloc private Path writeStoreFile() throws IOException { Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), "TestHFile"); HFileContext meta = new HFileContextBuilder().withBlockSize(64 * 1024).build(); - StoreFileWriter sfw = - new StoreFileWriter.Builder(conf, fs).withOutputDir(storeFileParentDir) - .withFileContext(meta).build(); + StoreFileWriter sfw = new StoreFileWriter.Builder(conf, fs).withOutputDir(storeFileParentDir) + .withFileContext(meta).build(); final int rowLen = 32; Random rand = ThreadLocalRandom.current(); for (int i = 0; i < 1000; ++i) { byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i); byte[] v = RandomKeyValueUtil.randomValue(rand); int cfLen = rand.nextInt(k.length - rowLen + 1); - KeyValue kv = - new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen, - k.length - rowLen - cfLen, rand.nextLong(), generateKeyType(rand), v, 0, v.length); + KeyValue kv = new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen, + k.length - rowLen - cfLen, rand.nextLong(), generateKeyType(rand), v, 0, v.length); sfw.append(kv); } @@ -313,8 +308,7 @@ public static KeyValue.Type generateKeyType(Random rand) { } /** - * Test empty HFile. - * Test all features work reasonably when hfile is empty of entries. + * Test empty HFile. Test all features work reasonably when hfile is empty of entries. * @throws IOException */ @Test @@ -353,17 +347,13 @@ public void testCorruptOutOfOrderHFileWrite() throws IOException { FSDataOutputStream mockedOutputStream = Mockito.mock(FSDataOutputStream.class); String columnFamily = "MyColumnFamily"; String tableName = "MyTableName"; - HFileContext fileContext = new HFileContextBuilder() - .withHFileName(testName.getMethodName() + "HFile") - .withBlockSize(minBlockSize) - .withColumnFamily(Bytes.toBytes(columnFamily)) - .withTableName(Bytes.toBytes(tableName)) - .withHBaseCheckSum(false) - .withCompression(Compression.Algorithm.NONE) - .withCompressTags(false) - .build(); - HFileWriterImpl writer = new HFileWriterImpl(conf, cacheConf, path, mockedOutputStream, - fileContext); + HFileContext fileContext = + new HFileContextBuilder().withHFileName(testName.getMethodName() + "HFile") + .withBlockSize(minBlockSize).withColumnFamily(Bytes.toBytes(columnFamily)) + .withTableName(Bytes.toBytes(tableName)).withHBaseCheckSum(false) + .withCompression(Compression.Algorithm.NONE).withCompressTags(false).build(); + HFileWriterImpl writer = + new HFileWriterImpl(conf, cacheConf, path, mockedOutputStream, fileContext); CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); byte[] row = Bytes.toBytes("foo"); byte[] qualifier = Bytes.toBytes("qualifier"); @@ -373,13 +363,13 @@ public void testCorruptOutOfOrderHFileWrite() throws IOException { long secondTS = 101L; Cell firstCell = cellBuilder.setRow(row).setValue(val).setTimestamp(firstTS) .setQualifier(qualifier).setFamily(cf).setType(Cell.Type.Put).build(); - Cell secondCell= cellBuilder.setRow(row).setValue(val).setTimestamp(secondTS) + Cell secondCell = cellBuilder.setRow(row).setValue(val).setTimestamp(secondTS) .setQualifier(qualifier).setFamily(cf).setType(Cell.Type.Put).build(); - //second Cell will sort "higher" than the first because later timestamps should come first + // second Cell will sort "higher" than the first because later timestamps should come first writer.append(firstCell); try { writer.append(secondCell); - } catch(IOException ie){ + } catch (IOException ie) { String message = ie.getMessage(); Assert.assertTrue(message.contains("not lexically larger")); Assert.assertTrue(message.contains(tableName)); @@ -392,11 +382,11 @@ public void testCorruptOutOfOrderHFileWrite() throws IOException { public static void truncateFile(FileSystem fs, Path src, Path dst) throws IOException { FileStatus fst = fs.getFileStatus(src); long len = fst.getLen(); - len = len / 2 ; + len = len / 2; // create a truncated hfile FSDataOutputStream fdos = fs.create(dst); - byte[] buf = new byte[(int)len]; + byte[] buf = new byte[(int) len]; FSDataInputStream fdis = fs.open(src); fdis.read(buf); fdos.write(buf); @@ -410,9 +400,9 @@ public static void truncateFile(FileSystem fs, Path src, Path dst) throws IOExce @Test public void testCorruptTruncatedHFile() throws IOException { Path f = new Path(ROOT_DIR, testName.getMethodName()); - HFileContext context = new HFileContextBuilder().build(); - Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f) - .withFileContext(context).create(); + HFileContext context = new HFileContextBuilder().build(); + Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f).withFileContext(context) + .create(); writeSomeRecords(w, 0, 100, false); w.close(); @@ -457,25 +447,22 @@ private void readAllRecords(HFileScanner scanner) throws IOException { } // read the records and check - private int readAndCheckbytes(HFileScanner scanner, int start, int n) - throws IOException { + private int readAndCheckbytes(HFileScanner scanner, int start, int n) throws IOException { String value = "value"; int i = start; for (; i < (start + n); i++) { - ByteBuffer key = ByteBuffer.wrap(((KeyValue)scanner.getKey()).getKey()); + ByteBuffer key = ByteBuffer.wrap(((KeyValue) scanner.getKey()).getKey()); ByteBuffer val = scanner.getValue(); String keyStr = String.format(localFormatter, Integer.valueOf(i)); String valStr = value + keyStr; KeyValue kv = new KeyValue(Bytes.toBytes(keyStr), Bytes.toBytes("family"), Bytes.toBytes("qual"), Bytes.toBytes(valStr)); - byte[] keyBytes = new KeyValue.KeyOnlyKeyValue(Bytes.toBytes(key), 0, - Bytes.toBytes(key).length).getKey(); - assertTrue("bytes for keys do not match " + keyStr + " " + - Bytes.toString(Bytes.toBytes(key)), - Arrays.equals(kv.getKey(), keyBytes)); - byte [] valBytes = Bytes.toBytes(val); - assertTrue("bytes for vals do not match " + valStr + " " + - Bytes.toString(valBytes), + byte[] keyBytes = + new KeyValue.KeyOnlyKeyValue(Bytes.toBytes(key), 0, Bytes.toBytes(key).length).getKey(); + assertTrue("bytes for keys do not match " + keyStr + " " + Bytes.toString(Bytes.toBytes(key)), + Arrays.equals(kv.getKey(), keyBytes)); + byte[] valBytes = Bytes.toBytes(val); + assertTrue("bytes for vals do not match " + valStr + " " + Bytes.toString(valBytes), Arrays.equals(Bytes.toBytes(valStr), valBytes)); if (!scanner.next()) { break; @@ -497,7 +484,7 @@ private void writeRecords(Writer writer, boolean useTags) throws IOException { } private FSDataOutputStream createFSOutput(Path name) throws IOException { - //if (fs.exists(name)) fs.delete(name, true); + // if (fs.exists(name)) fs.delete(name, true); FSDataOutputStream fout = fs.create(name); return fout; } @@ -510,16 +497,12 @@ void basicWithSomeCodec(String codec, boolean useTags) throws IOException { if (useTags) { conf.setInt("hfile.format.version", 3); } - Path ncHFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString() + useTags); + Path ncHFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString() + useTags); FSDataOutputStream fout = createFSOutput(ncHFile); - HFileContext meta = new HFileContextBuilder() - .withBlockSize(minBlockSize) - .withCompression(HFileWriterImpl.compressionByName(codec)) - .build(); - Writer writer = HFile.getWriterFactory(conf, cacheConf) - .withOutputStream(fout) - .withFileContext(meta) - .create(); + HFileContext meta = new HFileContextBuilder().withBlockSize(minBlockSize) + .withCompression(HFileWriterImpl.compressionByName(codec)).build(); + Writer writer = HFile.getWriterFactory(conf, cacheConf).withOutputStream(fout) + .withFileContext(meta).create(); LOG.info(Objects.toString(writer)); writeRecords(writer, useTags); fout.close(); @@ -536,11 +519,10 @@ void basicWithSomeCodec(String codec, boolean useTags) throws IOException { int seekTo = scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(50))); System.out.println(seekTo); assertTrue("location lookup failed", - scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(50))) == 0); + scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(50))) == 0); // read the key and see if it matches - ByteBuffer readKey = ByteBuffer.wrap(((KeyValue)scanner.getKey()).getKey()); - assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50), - Bytes.toBytes(readKey))); + ByteBuffer readKey = ByteBuffer.wrap(((KeyValue) scanner.getKey()).getKey()); + assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50), Bytes.toBytes(readKey))); scanner.seekTo(KeyValueUtil.createKeyValueFromKey(getSomeKey(0))); ByteBuffer val1 = scanner.getValue(); @@ -568,7 +550,11 @@ private void writeNumMetablocks(Writer writer, int n) { for (int i = 0; i < n; i++) { writer.appendMetaBlock("HFileMeta" + i, new Writable() { private int val; - public Writable setVal(int val) { this.val = val; return this; } + + public Writable setVal(int val) { + this.val = val; + return this; + } @Override public void write(DataOutput out) throws IOException { @@ -576,7 +562,8 @@ public void write(DataOutput out) throws IOException { } @Override - public void readFields(DataInput in) throws IOException { } + public void readFields(DataInput in) throws IOException { + } }.setVal(i)); } } @@ -588,13 +575,9 @@ private void someTestingWithMetaBlock(Writer writer) { private void readNumMetablocks(Reader reader, int n) throws IOException { for (int i = 0; i < n; i++) { ByteBuff actual = reader.getMetaBlock("HFileMeta" + i, false).getBufferWithoutHeader(); - ByteBuffer expected = - ByteBuffer.wrap(Bytes.toBytes("something to test" + i)); - assertEquals( - "failed to match metadata", - Bytes.toStringBinary(expected), - Bytes.toStringBinary(actual.array(), actual.arrayOffset() + actual.position(), - actual.capacity())); + ByteBuffer expected = ByteBuffer.wrap(Bytes.toBytes("something to test" + i)); + assertEquals("failed to match metadata", Bytes.toStringBinary(expected), Bytes.toStringBinary( + actual.array(), actual.arrayOffset() + actual.position(), actual.capacity())); } } @@ -605,13 +588,11 @@ private void someReadingWithMetaBlock(Reader reader) throws IOException { private void metablocks(final String compress) throws Exception { Path mFile = new Path(ROOT_DIR, "meta.hfile"); FSDataOutputStream fout = createFSOutput(mFile); - HFileContext meta = new HFileContextBuilder() - .withCompression(HFileWriterImpl.compressionByName(compress)) - .withBlockSize(minBlockSize).build(); - Writer writer = HFile.getWriterFactory(conf, cacheConf) - .withOutputStream(fout) - .withFileContext(meta) - .create(); + HFileContext meta = + new HFileContextBuilder().withCompression(HFileWriterImpl.compressionByName(compress)) + .withBlockSize(minBlockSize).build(); + Writer writer = HFile.getWriterFactory(conf, cacheConf).withOutputStream(fout) + .withFileContext(meta).create(); someTestingWithMetaBlock(writer); writer.close(); fout.close(); @@ -633,18 +614,15 @@ public void testMetaBlocks() throws Exception { @Test public void testNullMetaBlocks() throws Exception { - for (Compression.Algorithm compressAlgo : - HBaseCommonTestingUtil.COMPRESSION_ALGORITHMS) { + for (Compression.Algorithm compressAlgo : HBaseCommonTestingUtil.COMPRESSION_ALGORITHMS) { Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile"); FSDataOutputStream fout = createFSOutput(mFile); HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo) - .withBlockSize(minBlockSize).build(); - Writer writer = HFile.getWriterFactory(conf, cacheConf) - .withOutputStream(fout) - .withFileContext(meta) - .create(); - KeyValue kv = new KeyValue(Bytes.toBytes("foo"), Bytes.toBytes("f1"), null, - Bytes.toBytes("value")); + .withBlockSize(minBlockSize).build(); + Writer writer = HFile.getWriterFactory(conf, cacheConf).withOutputStream(fout) + .withFileContext(meta).create(); + KeyValue kv = + new KeyValue(Bytes.toBytes("foo"), Bytes.toBytes("f1"), null, Bytes.toBytes("value")); writer.append(kv); writer.close(); fout.close(); @@ -667,36 +645,25 @@ public void testCompressionOrdinance() { @Test public void testShortMidpointSameQual() { - Cell left = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(Bytes.toBytes("a")) - .setFamily(Bytes.toBytes("a")) - .setQualifier(Bytes.toBytes("a")) - .setTimestamp(11) - .setType(Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build(); - Cell right = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(Bytes.toBytes("a")) - .setFamily(Bytes.toBytes("a")) - .setQualifier(Bytes.toBytes("a")) - .setTimestamp(9) - .setType(Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build(); + Cell left = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("a")) + .setFamily(Bytes.toBytes("a")).setQualifier(Bytes.toBytes("a")).setTimestamp(11) + .setType(Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build(); + Cell right = + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(Bytes.toBytes("a")) + .setFamily(Bytes.toBytes("a")).setQualifier(Bytes.toBytes("a")).setTimestamp(9) + .setType(Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build(); Cell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); - assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) <= 0); - assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) == 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) <= 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) == 0); } private Cell getCell(byte[] row, byte[] family, byte[] qualifier) { - return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(row) - .setFamily(family) - .setQualifier(qualifier) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build(); + return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row) + .setFamily(family).setQualifier(qualifier).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build(); } @Test @@ -704,72 +671,64 @@ public void testGetShortMidpoint() { Cell left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); Cell right = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); Cell mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) <= 0); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) <= 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = getCell(Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); + assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); left = getCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = getCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); + assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = getCell(Bytes.toBytes("bbbbbbb"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0); + assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0); assertEquals(1, mid.getRowLength()); left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = getCell(Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); + assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = getCell(Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaa"), Bytes.toBytes("b")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0); + assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0); assertEquals(2, mid.getFamilyLength()); left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("aaaaaaaaa")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0); + assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) < 0); assertEquals(2, mid.getQualifierLength()); left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), Bytes.toBytes("b")); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); + assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) <= 0); assertEquals(1, mid.getQualifierLength()); // Verify boundary conditions - left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), new byte[] { 0x00, (byte)0xFE }); - right = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), new byte[] { 0x00, (byte)0xFF }); + left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), new byte[] { 0x00, (byte) 0xFE }); + right = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), new byte[] { 0x00, (byte) 0xFF }); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) == 0); + assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) == 0); left = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), new byte[] { 0x00, 0x12 }); right = getCell(Bytes.toBytes("a"), Bytes.toBytes("a"), new byte[] { 0x00, 0x12, 0x00 }); mid = HFileWriterImpl.getMidpoint(CellComparatorImpl.COMPARATOR, left, right); @@ -782,10 +741,9 @@ public void testGetShortMidpoint() { left = getCell(Bytes.toBytes("g"), Bytes.toBytes("a"), Bytes.toBytes("a")); right = getCell(Bytes.toBytes("i"), Bytes.toBytes("a"), Bytes.toBytes("a")); mid = HFileWriterImpl.getMidpoint(MetaCellComparator.META_COMPARATOR, left, right); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); - assertTrue(PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) == 0); + assertTrue(PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, left, mid) < 0); + assertTrue( + PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, mid, right) == 0); byte[] family = Bytes.toBytes("family"); byte[] qualA = Bytes.toBytes("qfA"); byte[] qualB = Bytes.toBytes("qfB"); @@ -799,7 +757,7 @@ public void testGetShortMidpoint() { assertTrue((keyComparator.compare(kv2, newKey)) > 0); byte[] expectedArray = Bytes.toBytes("the r"); Bytes.equals(newKey.getRowArray(), newKey.getRowOffset(), newKey.getRowLength(), expectedArray, - 0, expectedArray.length); + 0, expectedArray.length); // verify: same with "row + family + qualifier", return rightKey directly kv1 = new KeyValue(Bytes.toBytes("ilovehbase"), family, qualA, 5, Type.Put); @@ -844,7 +802,7 @@ public void testGetShortMidpoint() { assertTrue((keyComparator.compare(kv2, newKey)) > 0); expectedArray = Bytes.toBytes("ilovehbasea"); Bytes.equals(newKey.getRowArray(), newKey.getRowOffset(), newKey.getRowLength(), expectedArray, - 0, expectedArray.length); + 0, expectedArray.length); // verify only 1 offset scenario kv1 = new KeyValue(Bytes.toBytes("100abcdefg"), family, qualA, ts, Type.Put); kv2 = new KeyValue(Bytes.toBytes("101abcdefg"), family, qualA, ts, Type.Put); @@ -854,7 +812,7 @@ public void testGetShortMidpoint() { assertTrue((keyComparator.compare(kv2, newKey)) > 0); expectedArray = Bytes.toBytes("101"); Bytes.equals(newKey.getRowArray(), newKey.getRowOffset(), newKey.getRowLength(), expectedArray, - 0, expectedArray.length); + 0, expectedArray.length); } @Test @@ -865,18 +823,17 @@ public void testDBEShipped() throws IOException { continue; } Path f = new Path(ROOT_DIR, testName.getMethodName() + "_" + encoding); - HFileContext context = new HFileContextBuilder() - .withIncludesTags(false) - .withDataBlockEncoding(encoding).build(); + HFileContext context = + new HFileContextBuilder().withIncludesTags(false).withDataBlockEncoding(encoding).build(); HFileWriterImpl writer = (HFileWriterImpl) HFile.getWriterFactory(conf, cacheConf) .withPath(fs, f).withFileContext(context).create(); KeyValue kv = new KeyValue(Bytes.toBytes("testkey1"), Bytes.toBytes("family"), Bytes.toBytes("qual"), Bytes.toBytes("testvalue")); KeyValue kv2 = new KeyValue(Bytes.toBytes("testkey2"), Bytes.toBytes("family"), - Bytes.toBytes("qual"), Bytes.toBytes("testvalue")); + Bytes.toBytes("qual"), Bytes.toBytes("testvalue")); KeyValue kv3 = new KeyValue(Bytes.toBytes("testkey3"), Bytes.toBytes("family"), - Bytes.toBytes("qual"), Bytes.toBytes("testvalue")); + Bytes.toBytes("qual"), Bytes.toBytes("testvalue")); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); ByteBuffer buffer2 = ByteBuffer.wrap(kv2.getBuffer()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java index 873976f05dfa..62b8e6bbca88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -84,7 +84,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({IOTests.class, LargeTests.class}) +@Category({ IOTests.class, LargeTests.class }) @RunWith(Parameterized.class) public class TestHFileBlock { @@ -218,8 +218,7 @@ static int writeTestKeyValues(HFileBlock.Writer hbw, int seed, boolean includesM RNG.nextBytes(value); } if (0 < i && RNG.nextFloat() < CHANCE_TO_REPEAT) { - timestamp = keyValues.get( - RNG.nextInt(keyValues.size())).getTimestamp(); + timestamp = keyValues.get(RNG.nextInt(keyValues.size())).getTimestamp(); } else { timestamp = RNG.nextLong(); } @@ -247,8 +246,7 @@ static int writeTestKeyValues(HFileBlock.Writer hbw, int seed, boolean includesM return totalSize; } - public byte[] createTestV1Block(Compression.Algorithm algo) - throws IOException { + public byte[] createTestV1Block(Compression.Algorithm algo) throws IOException { Compressor compressor = algo.getCompressor(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); OutputStream os = algo.createCompressionStream(baos, compressor, 0); @@ -260,15 +258,12 @@ public byte[] createTestV1Block(Compression.Algorithm algo) return baos.toByteArray(); } - static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo, - boolean includesMemstoreTS, boolean includesTag) throws IOException { + static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo, boolean includesMemstoreTS, + boolean includesTag) throws IOException { final BlockType blockType = BlockType.DATA; - HFileContext meta = new HFileContextBuilder() - .withCompression(algo) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTag) - .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) - .build(); + HFileContext meta = new HFileContextBuilder().withCompression(algo) + .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag) + .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build(); HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta); DataOutputStream dos = hbw.startWriting(blockType); writeTestBlockContents(dos); @@ -279,8 +274,8 @@ static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo, return hbw; } - public String createTestBlockStr(Compression.Algorithm algo, - int correctLength, boolean useTag) throws IOException { + public String createTestBlockStr(Compression.Algorithm algo, int correctLength, boolean useTag) + throws IOException { HFileBlock.Writer hbw = createTestV2Block(algo, includesMemstoreTS, useTag); byte[] testV2Block = hbw.getHeaderAndDataForTest(); int osOffset = HConstants.HFILEBLOCK_HEADER_SIZE + 9; @@ -301,7 +296,7 @@ public void testNoCompression() throws IOException { Mockito.when(cacheConf.getBlockCache()).thenReturn(Optional.empty()); HFileBlock block = - createTestV2Block(NONE, includesMemstoreTS, false).getBlockForCaching(cacheConf); + createTestV2Block(NONE, includesMemstoreTS, false).getBlockForCaching(cacheConf); assertEquals(4000, block.getUncompressedSizeWithoutHeader()); assertEquals(4004, block.getOnDiskSizeWithoutHeader()); assertTrue(block.isUnpacked()); @@ -310,25 +305,23 @@ public void testNoCompression() throws IOException { @Test public void testGzipCompression() throws IOException { final String correctTestBlockStr = - "DATABLK*\\x00\\x00\\x00>\\x00\\x00\\x0F\\xA0\\xFF\\xFF\\xFF\\xFF" - + "\\xFF\\xFF\\xFF\\xFF" + "DATABLK*\\x00\\x00\\x00>\\x00\\x00\\x0F\\xA0\\xFF\\xFF\\xFF\\xFF" + "\\xFF\\xFF\\xFF\\xFF" + "\\x0" + ChecksumType.getDefaultChecksumType().getCode() + "\\x00\\x00@\\x00\\x00\\x00\\x00[" // gzip-compressed block: http://www.gzip.org/zlib/rfc-gzip.html - + "\\x1F\\x8B" // gzip magic signature - + "\\x08" // Compression method: 8 = "deflate" - + "\\x00" // Flags - + "\\x00\\x00\\x00\\x00" // mtime - + "\\x00" // XFL (extra flags) + + "\\x1F\\x8B" // gzip magic signature + + "\\x08" // Compression method: 8 = "deflate" + + "\\x00" // Flags + + "\\x00\\x00\\x00\\x00" // mtime + + "\\x00" // XFL (extra flags) // OS (0 = FAT filesystems, 3 = Unix). However, this field // sometimes gets set to 0 on Linux and Mac, so we reset it to 3. // This appears to be a difference caused by the availability // (and use) of the native GZ codec. - + "\\x03" - + "\\xED\\xC3\\xC1\\x11\\x00 \\x08\\xC00DD\\xDD\\x7Fa" + + "\\x03" + "\\xED\\xC3\\xC1\\x11\\x00 \\x08\\xC00DD\\xDD\\x7Fa" + "\\xD6\\xE8\\xA3\\xB9K\\x84`\\x96Q\\xD3\\xA8\\xDB\\xA8e\\xD4c" + "\\xD46\\xEA5\\xEA3\\xEA7\\xE7\\x00LI\\x5Cs\\xA0\\x0F\\x00\\x00" - + "\\x00\\x00\\x00\\x00"; // 4 byte checksum (ignored) + + "\\x00\\x00\\x00\\x00"; // 4 byte checksum (ignored) final int correctGzipBlockLength = 95; final String testBlockStr = createTestBlockStr(GZ, correctGzipBlockLength, false); // We ignore the block checksum because createTestBlockStr can change the @@ -357,19 +350,13 @@ protected void testReaderV2Internals() throws IOException { } for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for (boolean pread : new boolean[] { false, true }) { - LOG.info("testReaderV2: Compression algorithm: " + algo + - ", pread=" + pread); - Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" - + algo); + LOG.info("testReaderV2: Compression algorithm: " + algo + ", pread=" + pread); + Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo); FSDataOutputStream os = fs.create(path); - HFileContext meta = new HFileContextBuilder() - .withCompression(algo) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTag) - .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) - .build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(conf, null, - meta); + HFileContext meta = new HFileContextBuilder().withCompression(algo) + .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag) + .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build(); + HFileBlock.Writer hbw = new HFileBlock.Writer(conf, null, meta); long totalSize = 0; for (int blockId = 0; blockId < 2; ++blockId) { DataOutputStream dos = hbw.startWriting(BlockType.DATA); @@ -381,19 +368,14 @@ protected void testReaderV2Internals() throws IOException { os.close(); FSDataInputStream is = fs.open(path); - meta = new HFileContextBuilder() - .withHBaseCheckSum(true) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTag) - .withCompression(algo).build(); - ReaderContext context = new ReaderContextBuilder() - .withInputStreamWrapper(new FSDataInputStreamWrapper(is)) - .withFileSize(totalSize) - .withFilePath(path) - .withFileSystem(fs) - .build(); - HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, - TEST_UTIL.getConfiguration()); + meta = + new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTag).withCompression(algo).build(); + ReaderContext context = + new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)) + .withFileSize(totalSize).withFilePath(path).withFileSystem(fs).build(); + HFileBlock.FSReader hbr = + new HFileBlock.FSReaderImpl(context, meta, alloc, TEST_UTIL.getConfiguration()); HFileBlock b = hbr.readBlockData(0, -1, pread, false, true); is.close(); assertEquals(0, HFile.getAndResetChecksumFailuresCount()); @@ -401,19 +383,16 @@ protected void testReaderV2Internals() throws IOException { b.sanityCheck(); assertEquals(4936, b.getUncompressedSizeWithoutHeader()); assertEquals(algo == GZ ? 2173 : 4936, - b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes()); + b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes()); HFileBlock expected = b; if (algo == GZ) { is = fs.open(path); - ReaderContext readerContext = new ReaderContextBuilder() - .withInputStreamWrapper(new FSDataInputStreamWrapper(is)) - .withFileSize(totalSize) - .withFilePath(path) - .withFileSystem(fs) - .build(); - hbr = new HFileBlock.FSReaderImpl(readerContext, meta, alloc, - TEST_UTIL.getConfiguration()); + ReaderContext readerContext = + new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)) + .withFileSize(totalSize).withFilePath(path).withFileSystem(fs).build(); + hbr = + new HFileBlock.FSReaderImpl(readerContext, meta, alloc, TEST_UTIL.getConfiguration()); b = hbr.readBlockData(0, 2173 + HConstants.HFILEBLOCK_HEADER_SIZE + b.totalChecksumBytes(), pread, false, true); assertEquals(expected, b); @@ -424,9 +403,10 @@ protected void testReaderV2Internals() throws IOException { fail("Exception expected"); } catch (IOException ex) { String expectedPrefix = "Passed in onDiskSizeWithHeader="; - assertTrue("Invalid exception message: '" + ex.getMessage() - + "'.\nMessage is expected to start with: '" + expectedPrefix - + "'", ex.getMessage().startsWith(expectedPrefix)); + assertTrue( + "Invalid exception message: '" + ex.getMessage() + + "'.\nMessage is expected to start with: '" + expectedPrefix + "'", + ex.getMessage().startsWith(expectedPrefix)); } assertRelease(b); is.close(); @@ -448,25 +428,23 @@ public void testDataBlockEncoding() throws IOException { private void testInternals() throws IOException { final int numBlocks = 5; final Configuration conf = TEST_UTIL.getConfiguration(); - if(includesTag) { + if (includesTag) { conf.setInt("hfile.format.version", 3); } for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for (boolean pread : new boolean[] { false, true }) { for (DataBlockEncoding encoding : DataBlockEncoding.values()) { LOG.info("testDataBlockEncoding: Compression algorithm={}, pread={}, dataBlockEncoder={}", - algo.toString(), pread, encoding); - Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" - + algo + "_" + encoding.toString()); + algo.toString(), pread, encoding); + Path path = + new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo + "_" + encoding.toString()); FSDataOutputStream os = fs.create(path); - HFileDataBlockEncoder dataBlockEncoder = (encoding != DataBlockEncoding.NONE) ? - new HFileDataBlockEncoderImpl(encoding) : NoOpDataBlockEncoder.INSTANCE; - HFileContext meta = new HFileContextBuilder() - .withCompression(algo) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTag) - .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) - .build(); + HFileDataBlockEncoder dataBlockEncoder = + (encoding != DataBlockEncoding.NONE) ? new HFileDataBlockEncoderImpl(encoding) + : NoOpDataBlockEncoder.INSTANCE; + HFileContext meta = new HFileContextBuilder().withCompression(algo) + .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag) + .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build(); HFileBlock.Writer hbw = new HFileBlock.Writer(conf, dataBlockEncoder, meta); long totalSize = 0; final List encodedSizes = new ArrayList<>(); @@ -491,20 +469,12 @@ private void testInternals() throws IOException { os.close(); FSDataInputStream is = fs.open(path); - meta = new HFileContextBuilder() - .withHBaseCheckSum(true) - .withCompression(algo) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTag) - .build(); - ReaderContext context = new ReaderContextBuilder() - .withInputStreamWrapper(new FSDataInputStreamWrapper(is)) - .withFileSize(totalSize) - .withFilePath(path) - .withFileSystem(fs) - .build(); - HFileBlock.FSReaderImpl hbr = - new HFileBlock.FSReaderImpl(context, meta, alloc, conf); + meta = new HFileContextBuilder().withHBaseCheckSum(true).withCompression(algo) + .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).build(); + ReaderContext context = + new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)) + .withFileSize(totalSize).withFilePath(path).withFileSystem(fs).build(); + HFileBlock.FSReaderImpl hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, conf); hbr.setDataBlockEncoder(dataBlockEncoder, conf); hbr.setIncludesMemStoreTS(includesMemstoreTS); HFileBlock blockFromHFile, blockUnpacked; @@ -521,8 +491,8 @@ private void testInternals() throws IOException { blockUnpacked = blockFromHFile.unpack(meta, hbr); assertTrue(blockUnpacked.isUnpacked()); if (meta.isCompressedOrEncrypted()) { - LOG.info("packedHeapsize=" + packedHeapsize + ", unpackedHeadsize=" + blockUnpacked - .heapSize()); + LOG.info("packedHeapsize=" + packedHeapsize + ", unpackedHeadsize=" + + blockUnpacked.heapSize()); assertFalse(packedHeapsize == blockUnpacked.heapSize()); assertTrue("Packed heapSize should be < unpacked heapSize", packedHeapsize < blockUnpacked.heapSize()); @@ -576,34 +546,30 @@ static String buildMessageDetails(Algorithm compression, DataBlockEncoding encod return String.format("compression %s, encoding %s, pread %s", compression, encoding, pread); } - static void assertBuffersEqual(ByteBuff expectedBuffer, - ByteBuff actualBuffer, Compression.Algorithm compression, - DataBlockEncoding encoding, boolean pread) { + static void assertBuffersEqual(ByteBuff expectedBuffer, ByteBuff actualBuffer, + Compression.Algorithm compression, DataBlockEncoding encoding, boolean pread) { if (!actualBuffer.equals(expectedBuffer)) { int prefix = 0; int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit()); - while (prefix < minLimit && - expectedBuffer.get(prefix) == actualBuffer.get(prefix)) { + while (prefix < minLimit && expectedBuffer.get(prefix) == actualBuffer.get(prefix)) { prefix++; } - fail(String.format( - "Content mismatch for %s, commonPrefix %d, expected %s, got %s", - buildMessageDetails(compression, encoding, pread), prefix, - nextBytesToStr(expectedBuffer, prefix), - nextBytesToStr(actualBuffer, prefix))); + fail(String.format("Content mismatch for %s, commonPrefix %d, expected %s, got %s", + buildMessageDetails(compression, encoding, pread), prefix, + nextBytesToStr(expectedBuffer, prefix), nextBytesToStr(actualBuffer, prefix))); } } /** - * Convert a few next bytes in the given buffer at the given position to - * string. Used for error messages. + * Convert a few next bytes in the given buffer at the given position to string. Used for error + * messages. */ private static String nextBytesToStr(ByteBuff buf, int pos) { int maxBytes = buf.limit() - pos; int numBytes = Math.min(16, maxBytes); - return Bytes.toStringBinary(buf.array(), buf.arrayOffset() + pos, - numBytes) + (numBytes < maxBytes ? "..." : ""); + return Bytes.toStringBinary(buf.array(), buf.arrayOffset() + pos, numBytes) + + (numBytes < maxBytes ? "..." : ""); } @Test @@ -619,7 +585,7 @@ protected void testPreviousOffsetInternals() throws IOException { for (boolean cacheOnWrite : BOOLEAN_VALUES) { Random rand = defaultRandom(); LOG.info("testPreviousOffset: Compression algorithm={}, pread={}, cacheOnWrite={}", - algo.toString(), pread, cacheOnWrite); + algo.toString(), pread, cacheOnWrite); Path path = new Path(TEST_UTIL.getDataTestDir(), "prev_offset"); List expectedOffsets = new ArrayList<>(); List expectedPrevOffsets = new ArrayList<>(); @@ -629,23 +595,18 @@ protected void testPreviousOffsetInternals() throws IOException { expectedOffsets, expectedPrevOffsets, expectedTypes, expectedContents); FSDataInputStream is = fs.open(path); - HFileContext meta = new HFileContextBuilder() - .withHBaseCheckSum(true) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTag) - .withCompression(algo).build(); - ReaderContext context = new ReaderContextBuilder() - .withInputStreamWrapper(new FSDataInputStreamWrapper(is)) - .withFileSize(totalSize) - .withFilePath(path) - .withFileSystem(fs) - .build(); + HFileContext meta = + new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTag).withCompression(algo).build(); + ReaderContext context = + new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)) + .withFileSize(totalSize).withFilePath(path).withFileSystem(fs).build(); HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, conf); long curOffset = 0; for (int i = 0; i < NUM_TEST_BLOCKS; ++i) { if (!pread) { - assertEquals(is.getPos(), curOffset + (i == 0 ? 0 : - HConstants.HFILEBLOCK_HEADER_SIZE)); + assertEquals(is.getPos(), + curOffset + (i == 0 ? 0 : HConstants.HFILEBLOCK_HEADER_SIZE)); } assertEquals(expectedOffsets.get(i).longValue(), curOffset); @@ -656,11 +617,11 @@ protected void testPreviousOffsetInternals() throws IOException { if (detailedLogging) { LOG.info("Block #" + i + ": " + b); } - assertEquals("Invalid block #" + i + "'s type:", - expectedTypes.get(i), b.getBlockType()); - assertEquals("Invalid previous block offset for block " + i - + " of " + "type " + b.getBlockType() + ":", - (long) expectedPrevOffsets.get(i), b.getPrevBlockOffset()); + assertEquals("Invalid block #" + i + "'s type:", expectedTypes.get(i), + b.getBlockType()); + assertEquals("Invalid previous block offset for block " + i + " of " + "type " + + b.getBlockType() + ":", + (long) expectedPrevOffsets.get(i), b.getPrevBlockOffset()); b.sanityCheck(); assertEquals(curOffset, b.getOffset()); @@ -671,17 +632,14 @@ protected void testPreviousOffsetInternals() throws IOException { b2.sanityCheck(); assertEquals(b.getBlockType(), b2.getBlockType()); - assertEquals(b.getOnDiskSizeWithoutHeader(), - b2.getOnDiskSizeWithoutHeader()); - assertEquals(b.getOnDiskSizeWithHeader(), - b2.getOnDiskSizeWithHeader()); + assertEquals(b.getOnDiskSizeWithoutHeader(), b2.getOnDiskSizeWithoutHeader()); + assertEquals(b.getOnDiskSizeWithHeader(), b2.getOnDiskSizeWithHeader()); assertEquals(b.getUncompressedSizeWithoutHeader(), - b2.getUncompressedSizeWithoutHeader()); + b2.getUncompressedSizeWithoutHeader()); assertEquals(b.getPrevBlockOffset(), b2.getPrevBlockOffset()); assertEquals(curOffset, b2.getOffset()); assertEquals(b.getBytesPerChecksum(), b2.getBytesPerChecksum()); - assertEquals(b.getOnDiskDataSizeWithHeader(), - b2.getOnDiskDataSizeWithHeader()); + assertEquals(b.getOnDiskDataSizeWithHeader(), b2.getOnDiskDataSizeWithHeader()); assertEquals(0, HFile.getAndResetChecksumFailuresCount()); assertRelease(b2); @@ -705,23 +663,19 @@ protected void testPreviousOffsetInternals() throws IOException { if (!bytesAreCorrect) { // Optimization: only construct an error message in case we // will need it. - wrongBytesMsg = "Expected bytes in block #" + i + " (algo=" - + algo + ", pread=" + pread - + ", cacheOnWrite=" + cacheOnWrite + "):\n"; + wrongBytesMsg = "Expected bytes in block #" + i + " (algo=" + algo + ", pread=" + + pread + ", cacheOnWrite=" + cacheOnWrite + "):\n"; wrongBytesMsg += Bytes.toStringBinary(bufExpected.array(), - bufExpected.arrayOffset(), Math.min(32 + 10, bufExpected.limit())) - + ", actual:\n" - + Bytes.toStringBinary(bufRead.array(), - bufRead.arrayOffset(), Math.min(32 + 10, bufRead.limit())); + bufExpected.arrayOffset(), Math.min(32 + 10, bufExpected.limit())) + ", actual:\n" + + Bytes.toStringBinary(bufRead.array(), bufRead.arrayOffset(), + Math.min(32 + 10, bufRead.limit())); if (detailedLogging) { - LOG.warn("expected header" + - HFileBlock.toStringHeader(new SingleByteBuff(bufExpected)) + - "\nfound header" + - HFileBlock.toStringHeader(bufRead)); - LOG.warn("bufread offset " + bufRead.arrayOffset() + - " limit " + bufRead.limit() + - " expected offset " + bufExpected.arrayOffset() + - " limit " + bufExpected.limit()); + LOG.warn( + "expected header" + HFileBlock.toStringHeader(new SingleByteBuff(bufExpected)) + + "\nfound header" + HFileBlock.toStringHeader(bufRead)); + LOG.warn("bufread offset " + bufRead.arrayOffset() + " limit " + bufRead.limit() + + " expected offset " + bufExpected.arrayOffset() + " limit " + + bufExpected.limit()); LOG.warn(wrongBytesMsg); } } @@ -752,9 +706,8 @@ private class BlockReaderThread implements Callable { private final List types; private final long fileSize; - public BlockReaderThread(String clientId, - HFileBlock.FSReader hbr, List offsets, List types, - long fileSize) { + public BlockReaderThread(String clientId, HFileBlock.FSReader hbr, List offsets, + List types, long fileSize) { this.clientId = clientId; this.offsets = offsets; this.hbr = hbr; @@ -808,9 +761,9 @@ public Boolean call() throws Exception { ++numWithOnDiskSize; } } - LOG.info("Client " + clientId + " successfully read " + numBlocksRead + - " blocks (with pread: " + numPositionalRead + ", with onDiskSize " + - "specified: " + numWithOnDiskSize + ")"); + LOG.info( + "Client " + clientId + " successfully read " + numBlocksRead + " blocks (with pread: " + + numPositionalRead + ", with onDiskSize " + "specified: " + numWithOnDiskSize + ")"); return true; } @@ -821,47 +774,40 @@ public void testConcurrentReading() throws Exception { testConcurrentReadingInternals(); } - protected void testConcurrentReadingInternals() throws IOException, - InterruptedException, ExecutionException { + protected void testConcurrentReadingInternals() + throws IOException, InterruptedException, ExecutionException { Configuration conf = TEST_UTIL.getConfiguration(); for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) { Path path = new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading"); Random rand = defaultRandom(); List offsets = new ArrayList<>(); List types = new ArrayList<>(); - writeBlocks(TEST_UTIL.getConfiguration(), rand, compressAlgo, path, offsets, null, - types, null); + writeBlocks(TEST_UTIL.getConfiguration(), rand, compressAlgo, path, offsets, null, types, + null); FSDataInputStream is = fs.open(path); long fileSize = fs.getFileStatus(path).getLen(); - HFileContext meta = new HFileContextBuilder() - .withHBaseCheckSum(true) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTag) - .withCompression(compressAlgo) - .build(); - ReaderContext context = new ReaderContextBuilder() - .withInputStreamWrapper(new FSDataInputStreamWrapper(is)) - .withFileSize(fileSize) - .withFilePath(path) - .withFileSystem(fs) - .build(); + HFileContext meta = + new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTag).withCompression(compressAlgo).build(); + ReaderContext context = + new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)) + .withFileSize(fileSize).withFilePath(path).withFileSystem(fs).build(); HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, conf); Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS); ExecutorCompletionService ecs = new ExecutorCompletionService<>(exec); for (int i = 0; i < NUM_READER_THREADS; ++i) { - ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr, - offsets, types, fileSize)); + ecs.submit( + new BlockReaderThread("reader_" + (char) ('A' + i), hbr, offsets, types, fileSize)); } for (int i = 0; i < NUM_READER_THREADS; ++i) { Future result = ecs.take(); assertTrue(result.get()); if (detailedLogging) { - LOG.info(String.valueOf(i + 1) - + " reader threads finished successfully (algo=" + compressAlgo - + ")"); + LOG.info(String.valueOf(i + 1) + " reader threads finished successfully (algo=" + + compressAlgo + ")"); } } is.close(); @@ -870,17 +816,13 @@ protected void testConcurrentReadingInternals() throws IOException, private long writeBlocks(Configuration conf, Random rand, Compression.Algorithm compressAlgo, Path path, List expectedOffsets, List expectedPrevOffsets, - List expectedTypes, List expectedContents - ) throws IOException { + List expectedTypes, List expectedContents) throws IOException { boolean cacheOnWrite = expectedContents != null; FSDataOutputStream os = fs.create(path); - HFileContext meta = new HFileContextBuilder() - .withHBaseCheckSum(true) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTag) - .withCompression(compressAlgo) - .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) - .build(); + HFileContext meta = + new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTag).withCompression(compressAlgo) + .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build(); HFileBlock.Writer hbw = new HFileBlock.Writer(conf, null, meta); Map prevOffsetByType = new HashMap<>(); long totalSize = 0; @@ -899,8 +841,7 @@ private long writeBlocks(Configuration conf, Random rand, Compression.Algorithm dos.writeInt(j + 1); } - if (expectedOffsets != null) - expectedOffsets.add(os.getPos()); + if (expectedOffsets != null) expectedOffsets.add(os.getPos()); if (expectedPrevOffsets != null) { Long prevOffset = prevOffsetByType.get(bt); @@ -919,16 +860,14 @@ private long writeBlocks(Configuration conf, Random rand, Compression.Algorithm } if (detailedLogging) { - LOG.info("Written block #" + i + " of type " + bt - + ", uncompressed size " + hbw.getUncompressedSizeWithoutHeader() - + ", packed size " + hbw.getOnDiskSizeWithoutHeader() - + " at offset " + pos); + LOG.info("Written block #" + i + " of type " + bt + ", uncompressed size " + + hbw.getUncompressedSizeWithoutHeader() + ", packed size " + + hbw.getOnDiskSizeWithoutHeader() + " at offset " + pos); } } os.close(); - LOG.info("Created a temporary file at " + path + ", " - + fs.getFileStatus(path).getLen() + " byte, compression=" + - compressAlgo); + LOG.info("Created a temporary file at " + path + ", " + fs.getFileStatus(path).getLen() + + " byte, compression=" + compressAlgo); return totalSize; } @@ -947,13 +886,10 @@ protected void testBlockHeapSizeInternals() { for (int size : new int[] { 100, 256, 12345 }) { byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size]; ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size); - HFileContext meta = new HFileContextBuilder() - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(includesTag) - .withHBaseCheckSum(false) - .withCompression(Algorithm.NONE) - .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) - .withChecksumType(ChecksumType.NULL).build(); + HFileContext meta = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(includesTag).withHBaseCheckSum(false).withCompression(Algorithm.NONE) + .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) + .withChecksumType(ChecksumType.NULL).build(); HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, 0, -1, meta, HEAP); long byteBufferExpectedSize = @@ -962,11 +898,11 @@ protected void testBlockHeapSizeInternals() { long hfileMetaSize = ClassSize.align(ClassSize.estimateBase(HFileContext.class, true)); long hfileBlockExpectedSize = ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true)); long expected = hfileBlockExpectedSize + byteBufferExpectedSize + hfileMetaSize; - assertEquals("Block data size: " + size + ", byte buffer expected " + - "size: " + byteBufferExpectedSize + ", HFileBlock class expected " + - "size: " + hfileBlockExpectedSize + " HFileContext class expected size: " - + hfileMetaSize + "; ", expected, - block.heapSize()); + assertEquals( + "Block data size: " + size + ", byte buffer expected " + "size: " + byteBufferExpectedSize + + ", HFileBlock class expected " + "size: " + hfileBlockExpectedSize + + " HFileContext class expected size: " + hfileMetaSize + "; ", + expected, block.heapSize()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index 3673d991f9a9..94c670c186ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,7 +73,7 @@ import org.slf4j.LoggerFactory; @RunWith(Parameterized.class) -@Category({IOTests.class, MediumTests.class}) +@Category({ IOTests.class, MediumTests.class }) public class TestHFileBlockIndex { @ClassRule @@ -92,8 +92,7 @@ public TestHFileBlockIndex(Compression.Algorithm compr) { private static final Logger LOG = LoggerFactory.getLogger(TestHFileBlockIndex.class); private static final Random RNG = new Random(); // This test depends on Random#setSeed private static final int NUM_DATA_BLOCKS = 1000; - private static final HBaseTestingUtil TEST_UTIL = - new HBaseTestingUtil(); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final int SMALL_BLOCK_SIZE = 4096; private static final int NUM_KV = 10000; @@ -110,8 +109,7 @@ public TestHFileBlockIndex(Compression.Algorithm compr) { private static final int[] INDEX_CHUNK_SIZES = { 4096, 512, 384 }; private static final int[] EXPECTED_NUM_LEVELS = { 2, 3, 4 }; - private static final int[] UNCOMPRESSED_INDEX_SIZES = - { 19187, 21813, 23086 }; + private static final int[] UNCOMPRESSED_INDEX_SIZES = { 19187, 21813, 23086 }; private static final boolean includesMemstoreTS = true; @@ -159,8 +157,8 @@ private void testBlockIndexInternals(boolean useTags) throws IOException { } /** - * A wrapper around a block reader which only caches the results of the last - * operation. Not thread-safe. + * A wrapper around a block reader which only caches the results of the last operation. Not + * thread-safe. */ private static class BlockReaderWrapper implements HFile.CachingBlockReader { @@ -178,13 +176,10 @@ public BlockReaderWrapper(HFileBlock.FSReader realReader) { } @Override - public HFileBlock readBlock(long offset, long onDiskSize, - boolean cacheBlock, boolean pread, boolean isCompaction, - boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) - throws IOException { - if (offset == prevOffset && onDiskSize == prevOnDiskSize && - pread == prevPread) { + public HFileBlock readBlock(long offset, long onDiskSize, boolean cacheBlock, boolean pread, + boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, + DataBlockEncoding expectedDataBlockEncoding) throws IOException { + if (offset == prevOffset && onDiskSize == prevOnDiskSize && pread == prevPread) { hitCount += 1; return prevBlock; } @@ -204,23 +199,20 @@ private void readIndex(boolean useTags) throws IOException { LOG.info("Size of {}: {} compression={}", path, fileSize, compr.toString()); FSDataInputStream istream = fs.open(path); - HFileContext meta = new HFileContextBuilder() - .withHBaseCheckSum(true) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(useTags) - .withCompression(compr) - .build(); + HFileContext meta = + new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(useTags).withCompression(compr).build(); ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, path).build(); - HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(context, meta, - ByteBuffAllocator.HEAP, conf); + HFileBlock.FSReader blockReader = + new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP, conf); BlockReaderWrapper brw = new BlockReaderWrapper(blockReader); HFileBlockIndex.BlockIndexReader indexReader = - new HFileBlockIndex.CellBasedKeyBlockIndexReader( - CellComparatorImpl.COMPARATOR, numLevels); + new HFileBlockIndex.CellBasedKeyBlockIndexReader(CellComparatorImpl.COMPARATOR, numLevels); - indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset, - fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries); + indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset, fileSize) + .nextBlockWithBlockType(BlockType.ROOT_INDEX), + numRootEntries); long prevOffset = -1; int i = 0; @@ -231,9 +223,7 @@ private void readIndex(boolean useTags) throws IOException { assertTrue(key != null); assertTrue(indexReader != null); KeyValue.KeyOnlyKeyValue keyOnlyKey = new KeyValue.KeyOnlyKeyValue(key, 0, key.length); - HFileBlock b = - indexReader.seekToDataBlock(keyOnlyKey, null, true, - true, false, null, brw); + HFileBlock b = indexReader.seekToDataBlock(keyOnlyKey, null, true, true, false, null, brw); if (PrivateCellUtil.compare(CellComparatorImpl.COMPARATOR, keyOnlyKey, firstKeyInFile, 0, firstKeyInFile.length) < 0) { assertTrue(b == null); @@ -248,8 +238,7 @@ private void readIndex(boolean useTags) throws IOException { if (prevOffset == b.getOffset()) { assertEquals(++expectedHitCount, brw.hitCount); } else { - LOG.info("First key in a new block: " + keyStr + ", block offset: " - + b.getOffset() + ")"); + LOG.info("First key in a new block: " + keyStr + ", block offset: " + b.getOffset() + ")"); assertTrue(b.getOffset() > prevOffset); assertEquals(++expectedMissCount, brw.missCount); prevOffset = b.getOffset(); @@ -262,18 +251,12 @@ private void readIndex(boolean useTags) throws IOException { private void writeWholeIndex(boolean useTags) throws IOException { assertEquals(0, keys.size()); - HFileContext meta = new HFileContextBuilder() - .withHBaseCheckSum(true) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(useTags) - .withCompression(compr) - .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) - .build(); - HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, - meta); + HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(true) + .withIncludesMvcc(includesMemstoreTS).withIncludesTags(useTags).withCompression(compr) + .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build(); + HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta); FSDataOutputStream outputStream = fs.create(path); - HFileBlockIndex.BlockIndexWriter biw = - new HFileBlockIndex.BlockIndexWriter(hbw, null, null); + HFileBlockIndex.BlockIndexWriter biw = new HFileBlockIndex.BlockIndexWriter(hbw, null, null); for (int i = 0; i < NUM_DATA_BLOCKS; ++i) { hbw.startWriting(BlockType.DATA).write(Bytes.toBytes(String.valueOf(RNG.nextInt(1000)))); long blockOffset = outputStream.getPos(); @@ -283,9 +266,8 @@ private void writeWholeIndex(boolean useTags) throws IOException { byte[] family = Bytes.toBytes("f"); byte[] qualifier = Bytes.toBytes("q"); for (int j = 0; j < 16; ++j) { - byte[] k = - new KeyValue(RandomKeyValueUtil.randomOrderedKey(RNG, i * 16 + j), family, qualifier, - EnvironmentEdgeManager.currentTime(), KeyValue.Type.Put).getKey(); + byte[] k = new KeyValue(RandomKeyValueUtil.randomOrderedKey(RNG, i * 16 + j), family, + qualifier, EnvironmentEdgeManager.currentTime(), KeyValue.Type.Put).getKey(); keys.add(k); if (j == 8) { firstKey = k; @@ -306,21 +288,20 @@ private void writeWholeIndex(boolean useTags) throws IOException { numLevels = biw.getNumLevels(); numRootEntries = biw.getNumRootEntries(); - LOG.info("Index written: numLevels=" + numLevels + ", numRootEntries=" + - numRootEntries + ", rootIndexOffset=" + rootIndexOffset); + LOG.info("Index written: numLevels=" + numLevels + ", numRootEntries=" + numRootEntries + + ", rootIndexOffset=" + rootIndexOffset); } - private void writeInlineBlocks(HFileBlock.Writer hbw, - FSDataOutputStream outputStream, HFileBlockIndex.BlockIndexWriter biw, - boolean isClosing) throws IOException { + private void writeInlineBlocks(HFileBlock.Writer hbw, FSDataOutputStream outputStream, + HFileBlockIndex.BlockIndexWriter biw, boolean isClosing) throws IOException { while (biw.shouldWriteBlock(isClosing)) { long offset = outputStream.getPos(); biw.writeInlineBlock(hbw.startWriting(biw.getInlineBlockType())); hbw.writeHeaderAndData(outputStream); biw.blockWritten(offset, hbw.getOnDiskSizeWithHeader(), - hbw.getUncompressedSizeWithoutHeader()); - LOG.info("Wrote an inline index block at " + offset + ", size " + - hbw.getOnDiskSizeWithHeader()); + hbw.getUncompressedSizeWithoutHeader()); + LOG.info( + "Wrote an inline index block at " + offset + ", size " + hbw.getOnDiskSizeWithHeader()); } } @@ -353,9 +334,8 @@ public void testSecondaryIndexBinarySearch() throws IOException { for (int i = 0; i < numTotalKeys; ++i) { byte[] k = RandomKeyValueUtil.randomOrderedKey(RNG, i * 2); - KeyValue cell = new KeyValue(k, Bytes.toBytes("f"), Bytes.toBytes("q"), - Bytes.toBytes("val")); - //KeyValue cell = new KeyValue.KeyOnlyKeyValue(k, 0, k.length); + KeyValue cell = new KeyValue(k, Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("val")); + // KeyValue cell = new KeyValue.KeyOnlyKeyValue(k, 0, k.length); keys.add(cell.getKey()); String msgPrefix = "Key #" + i + " (" + Bytes.toStringBinary(k) + "): "; StringBuilder padding = new StringBuilder(); @@ -365,10 +345,9 @@ public void testSecondaryIndexBinarySearch() throws IOException { if (i % 2 == 1) { dos.writeInt(curAllEntriesSize); secondaryIndexEntries[i] = curAllEntriesSize; - LOG.info(msgPrefix + "secondary index entry #" + ((i - 1) / 2) + - ", offset " + curAllEntriesSize); - curAllEntriesSize += cell.getKey().length - + HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD; + LOG.info( + msgPrefix + "secondary index entry #" + ((i - 1) / 2) + ", offset " + curAllEntriesSize); + curAllEntriesSize += cell.getKey().length + HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD; ++numEntriesAdded; } else { secondaryIndexEntries[i] = -1; @@ -379,25 +358,23 @@ public void testSecondaryIndexBinarySearch() throws IOException { // Make sure the keys are increasing. for (int i = 0; i < keys.size() - 1; ++i) assertTrue(CellComparatorImpl.COMPARATOR.compare( - new KeyValue.KeyOnlyKeyValue(keys.get(i), 0, keys.get(i).length), - new KeyValue.KeyOnlyKeyValue(keys.get(i + 1), 0, keys.get(i + 1).length)) < 0); + new KeyValue.KeyOnlyKeyValue(keys.get(i), 0, keys.get(i).length), + new KeyValue.KeyOnlyKeyValue(keys.get(i + 1), 0, keys.get(i + 1).length)) < 0); dos.writeInt(curAllEntriesSize); assertEquals(numSearchedKeys, numEntriesAdded); int secondaryIndexOffset = dos.size(); - assertEquals(Bytes.SIZEOF_INT * (numSearchedKeys + 2), - secondaryIndexOffset); + assertEquals(Bytes.SIZEOF_INT * (numSearchedKeys + 2), secondaryIndexOffset); for (int i = 1; i <= numTotalKeys - 1; i += 2) { - assertEquals(dos.size(), - secondaryIndexOffset + secondaryIndexEntries[i]); + assertEquals(dos.size(), secondaryIndexOffset + secondaryIndexEntries[i]); long dummyFileOffset = getDummyFileOffset(i); int dummyOnDiskSize = getDummyOnDiskSize(i); - LOG.debug("Storing file offset=" + dummyFileOffset + " and onDiskSize=" + - dummyOnDiskSize + " at offset " + dos.size()); + LOG.debug("Storing file offset=" + dummyFileOffset + " and onDiskSize=" + dummyOnDiskSize + + " at offset " + dos.size()); dos.writeLong(dummyFileOffset); dos.writeInt(dummyOnDiskSize); - LOG.debug("Stored key " + ((i - 1) / 2) +" at offset " + dos.size()); + LOG.debug("Stored key " + ((i - 1) / 2) + " at offset " + dos.size()); dos.write(keys.get(i)); } @@ -406,20 +383,18 @@ public void testSecondaryIndexBinarySearch() throws IOException { ByteBuffer nonRootIndex = ByteBuffer.wrap(baos.toByteArray()); for (int i = 0; i < numTotalKeys; ++i) { byte[] searchKey = keys.get(i); - byte[] arrayHoldingKey = new byte[searchKey.length + - searchKey.length / 2]; + byte[] arrayHoldingKey = new byte[searchKey.length + searchKey.length / 2]; // To make things a bit more interesting, store the key we are looking // for at a non-zero offset in a new array. - System.arraycopy(searchKey, 0, arrayHoldingKey, searchKey.length / 2, - searchKey.length); + System.arraycopy(searchKey, 0, arrayHoldingKey, searchKey.length / 2, searchKey.length); - KeyValue.KeyOnlyKeyValue cell = new KeyValue.KeyOnlyKeyValue( - arrayHoldingKey, searchKey.length / 2, searchKey.length); + KeyValue.KeyOnlyKeyValue cell = + new KeyValue.KeyOnlyKeyValue(arrayHoldingKey, searchKey.length / 2, searchKey.length); int searchResult = BlockIndexReader.binarySearchNonRootIndex(cell, - new MultiByteBuff(nonRootIndex), CellComparatorImpl.COMPARATOR); - String lookupFailureMsg = "Failed to look up key #" + i + " (" - + Bytes.toStringBinary(searchKey) + ")"; + new MultiByteBuff(nonRootIndex), CellComparatorImpl.COMPARATOR); + String lookupFailureMsg = + "Failed to look up key #" + i + " (" + Bytes.toStringBinary(searchKey) + ")"; int expectedResult; int referenceItem; @@ -443,17 +418,15 @@ public void testSecondaryIndexBinarySearch() throws IOException { // higher-level API function.s boolean locateBlockResult = (BlockIndexReader.locateNonRootIndexEntry(new MultiByteBuff(nonRootIndex), cell, - CellComparatorImpl.COMPARATOR) != -1); + CellComparatorImpl.COMPARATOR) != -1); if (i == 0) { assertFalse(locateBlockResult); } else { assertTrue(locateBlockResult); String errorMsg = "i=" + i + ", position=" + nonRootIndex.position(); - assertEquals(errorMsg, getDummyFileOffset(referenceItem), - nonRootIndex.getLong()); - assertEquals(errorMsg, getDummyOnDiskSize(referenceItem), - nonRootIndex.getInt()); + assertEquals(errorMsg, getDummyFileOffset(referenceItem), nonRootIndex.getLong()); + assertEquals(errorMsg, getDummyOnDiskSize(referenceItem), nonRootIndex.getInt()); } } @@ -487,9 +460,7 @@ public void testBlockIndexChunk() throws IOException { // deeper-level index block's entry ("sub-entry"), assuming a global // 0-based ordering of sub-entries. This is needed for mid-key calculation. for (int i = 0; i < N; ++i) { - for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1]; - j < numSubEntriesAt[i]; - ++j) { + for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1]; j < numSubEntriesAt[i]; ++j) { assertEquals(i, c.getEntryBySubEntry(j)); } } @@ -498,12 +469,10 @@ public void testBlockIndexChunk() throws IOException { /** Checks if the HeapSize calculator is within reason */ @Test public void testHeapSizeForBlockIndex() throws IOException { - Class cl = - HFileBlockIndex.BlockIndexReader.class; + Class cl = HFileBlockIndex.BlockIndexReader.class; long expected = ClassSize.estimateBase(cl, false); - HFileBlockIndex.BlockIndexReader bi = - new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); + HFileBlockIndex.BlockIndexReader bi = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); long actual = bi.heapSize(); // Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets, @@ -519,9 +488,9 @@ public void testHeapSizeForBlockIndex() throws IOException { } /** - * to check if looks good when midKey on a leaf index block boundary - * @throws IOException - */ + * to check if looks good when midKey on a leaf index block boundary + * @throws IOException + */ @Test public void testMidKeyOnLeafIndexBlockBoundary() throws IOException { Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), "hfile_for_midkey"); @@ -534,12 +503,10 @@ public void testMidKeyOnLeafIndexBlockBoundary() throws IOException { // Evict all blocks that were cached-on-write by the previous invocation. blockCache.evictBlocksByHfileName(hfilePath.getName()); // Write the HFile - HFileContext meta = - new HFileContextBuilder().withBlockSize(SMALL_BLOCK_SIZE).withCompression(Algorithm.NONE) - .withDataBlockEncoding(DataBlockEncoding.NONE).build(); - HFile.Writer writer = - HFile.getWriterFactory(conf, cacheConf).withPath(fs, hfilePath).withFileContext(meta) - .create(); + HFileContext meta = new HFileContextBuilder().withBlockSize(SMALL_BLOCK_SIZE) + .withCompression(Algorithm.NONE).withDataBlockEncoding(DataBlockEncoding.NONE).build(); + HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf).withPath(fs, hfilePath) + .withFileContext(meta).create(); Random rand = new Random(19231737); byte[] family = Bytes.toBytes("f"); byte[] qualifier = Bytes.toBytes("q"); @@ -577,24 +544,20 @@ public void testMidKeyOnLeafIndexBlockBoundary() throws IOException { } /** - * Testing block index through the HFile writer/reader APIs. Allows to test - * setting index block size through configuration, intermediate-level index - * blocks, and caching index blocks on write. - * + * Testing block index through the HFile writer/reader APIs. Allows to test setting index block + * size through configuration, intermediate-level index blocks, and caching index blocks on write. * @throws IOException */ @Test public void testHFileWriterAndReader() throws IOException { - Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), - "hfile_for_block_index"); + Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), "hfile_for_block_index"); CacheConfig cacheConf = new CacheConfig(conf, BlockCacheFactory.createBlockCache(conf)); BlockCache blockCache = cacheConf.getBlockCache().get(); for (int testI = 0; testI < INDEX_CHUNK_SIZES.length; ++testI) { int indexBlockSize = INDEX_CHUNK_SIZES[testI]; int expectedNumLevels = EXPECTED_NUM_LEVELS[testI]; - LOG.info("Index block size: " + indexBlockSize + ", compression: " - + compr); + LOG.info("Index block size: " + indexBlockSize + ", compression: " + compr); // Evict all blocks that were cached-on-write by the previous invocation. blockCache.evictBlocksByHfileName(hfilePath.getName()); @@ -605,15 +568,10 @@ public void testHFileWriterAndReader() throws IOException { // Write the HFile { - HFileContext meta = new HFileContextBuilder() - .withBlockSize(SMALL_BLOCK_SIZE) - .withCompression(compr) - .build(); - HFile.Writer writer = - HFile.getWriterFactory(conf, cacheConf) - .withPath(fs, hfilePath) - .withFileContext(meta) - .create(); + HFileContext meta = new HFileContextBuilder().withBlockSize(SMALL_BLOCK_SIZE) + .withCompression(compr).build(); + HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf).withPath(fs, hfilePath) + .withFileContext(meta).create(); Random rand = new Random(19231737); byte[] family = Bytes.toBytes("f"); byte[] qualifier = Bytes.toBytes("q"); @@ -621,17 +579,16 @@ public void testHFileWriterAndReader() throws IOException { byte[] row = RandomKeyValueUtil.randomOrderedKey(rand, i); // Key will be interpreted by KeyValue.KEY_COMPARATOR - KeyValue kv = - new KeyValue(row, family, qualifier, EnvironmentEdgeManager.currentTime(), - RandomKeyValueUtil.randomValue(rand)); + KeyValue kv = new KeyValue(row, family, qualifier, EnvironmentEdgeManager.currentTime(), + RandomKeyValueUtil.randomValue(rand)); byte[] k = kv.getKey(); writer.append(kv); keys[i] = k; values[i] = CellUtil.cloneValue(kv); keyStrSet.add(Bytes.toStringBinary(k)); if (i > 0) { - assertTrue((PrivateCellUtil.compare(CellComparatorImpl.COMPARATOR, kv, keys[i - 1], - 0, keys[i - 1].length)) > 0); + assertTrue((PrivateCellUtil.compare(CellComparatorImpl.COMPARATOR, kv, keys[i - 1], 0, + keys[i - 1].length)) > 0); } } @@ -640,11 +597,10 @@ public void testHFileWriterAndReader() throws IOException { // Read the HFile HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConf, true, conf); - assertEquals(expectedNumLevels, - reader.getTrailer().getNumDataIndexLevels()); + assertEquals(expectedNumLevels, reader.getTrailer().getNumDataIndexLevels()); - assertTrue(Bytes.equals(keys[0], ((KeyValue)reader.getFirstKey().get()).getKey())); - assertTrue(Bytes.equals(keys[NUM_KV - 1], ((KeyValue)reader.getLastKey().get()).getKey())); + assertTrue(Bytes.equals(keys[0], ((KeyValue) reader.getFirstKey().get()).getKey())); + assertTrue(Bytes.equals(keys[NUM_KV - 1], ((KeyValue) reader.getLastKey().get()).getKey())); LOG.info("Last key: " + Bytes.toStringBinary(keys[NUM_KV - 1])); for (boolean pread : new boolean[] { false, true }) { @@ -652,13 +608,13 @@ public void testHFileWriterAndReader() throws IOException { for (int i = 0; i < NUM_KV; ++i) { checkSeekTo(keys, scanner, i); checkKeyValue("i=" + i, keys[i], values[i], - ByteBuffer.wrap(((KeyValue) scanner.getKey()).getKey()), scanner.getValue()); + ByteBuffer.wrap(((KeyValue) scanner.getKey()).getKey()), scanner.getValue()); } assertTrue(scanner.seekTo()); for (int i = NUM_KV - 1; i >= 0; --i) { checkSeekTo(keys, scanner, i); checkKeyValue("i=" + i, keys[i], values[i], - ByteBuffer.wrap(((KeyValue) scanner.getKey()).getKey()), scanner.getValue()); + ByteBuffer.wrap(((KeyValue) scanner.getKey()).getKey()), scanner.getValue()); } } @@ -666,13 +622,12 @@ public void testHFileWriterAndReader() throws IOException { HFile.Reader reader2 = reader; HFileBlock.FSReader fsReader = reader2.getUncachedBlockReader(); - HFileBlock.BlockIterator iter = fsReader.blockRange(0, - reader.getTrailer().getLoadOnOpenDataOffset()); + HFileBlock.BlockIterator iter = + fsReader.blockRange(0, reader.getTrailer().getLoadOnOpenDataOffset()); HFileBlock block; List blockKeys = new ArrayList<>(); while ((block = iter.nextBlock()) != null) { - if (block.getBlockType() != BlockType.LEAF_INDEX) - return; + if (block.getBlockType() != BlockType.LEAF_INDEX) return; ByteBuff b = block.getBufferReadOnly(); int n = b.getIntAfterPosition(0); // One int for the number of items, and n + 1 for the secondary index. @@ -683,55 +638,50 @@ public void testHFileWriterAndReader() throws IOException { int keyRelOffset = b.getIntAfterPosition(Bytes.SIZEOF_INT * (i + 1)); int nextKeyRelOffset = b.getIntAfterPosition(Bytes.SIZEOF_INT * (i + 2)); int keyLen = nextKeyRelOffset - keyRelOffset; - int keyOffset = b.arrayOffset() + entriesOffset + keyRelOffset + - HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD; - byte[] blockKey = Arrays.copyOfRange(b.array(), keyOffset, keyOffset - + keyLen); + int keyOffset = b.arrayOffset() + entriesOffset + keyRelOffset + + HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD; + byte[] blockKey = Arrays.copyOfRange(b.array(), keyOffset, keyOffset + keyLen); String blockKeyStr = Bytes.toString(blockKey); blockKeys.add(blockKey); // If the first key of the block is not among the keys written, we // are not parsing the non-root index block format correctly. assertTrue("Invalid block key from leaf-level block: " + blockKeyStr, - keyStrSet.contains(blockKeyStr)); + keyStrSet.contains(blockKeyStr)); } } // Validate the mid-key. - assertEquals( - Bytes.toStringBinary(blockKeys.get((blockKeys.size() - 1) / 2)), - reader.midKey()); + assertEquals(Bytes.toStringBinary(blockKeys.get((blockKeys.size() - 1) / 2)), + reader.midKey()); assertEquals(UNCOMPRESSED_INDEX_SIZES[testI], - reader.getTrailer().getUncompressedDataIndexSize()); + reader.getTrailer().getUncompressedDataIndexSize()); reader.close(); reader2.close(); } } - private void checkSeekTo(byte[][] keys, HFileScanner scanner, int i) - throws IOException { + private void checkSeekTo(byte[][] keys, HFileScanner scanner, int i) throws IOException { assertEquals("Failed to seek to key #" + i + " (" + Bytes.toStringBinary(keys[i]) + ")", 0, - scanner.seekTo(KeyValueUtil.createKeyValueFromKey(keys[i]))); + scanner.seekTo(KeyValueUtil.createKeyValueFromKey(keys[i]))); } - private void assertArrayEqualsBuffer(String msgPrefix, byte[] arr, - ByteBuffer buf) { - assertEquals(msgPrefix + ": expected " + Bytes.toStringBinary(arr) - + ", actual " + Bytes.toStringBinary(buf), 0, Bytes.compareTo(arr, 0, - arr.length, buf.array(), buf.arrayOffset(), buf.limit())); + private void assertArrayEqualsBuffer(String msgPrefix, byte[] arr, ByteBuffer buf) { + assertEquals( + msgPrefix + ": expected " + Bytes.toStringBinary(arr) + ", actual " + + Bytes.toStringBinary(buf), + 0, Bytes.compareTo(arr, 0, arr.length, buf.array(), buf.arrayOffset(), buf.limit())); } /** Check a key/value pair after it was read by the reader */ - private void checkKeyValue(String msgPrefix, byte[] expectedKey, - byte[] expectedValue, ByteBuffer keyRead, ByteBuffer valueRead) { - if (!msgPrefix.isEmpty()) - msgPrefix += ". "; + private void checkKeyValue(String msgPrefix, byte[] expectedKey, byte[] expectedValue, + ByteBuffer keyRead, ByteBuffer valueRead) { + if (!msgPrefix.isEmpty()) msgPrefix += ". "; assertArrayEqualsBuffer(msgPrefix + "Invalid key", expectedKey, keyRead); - assertArrayEqualsBuffer(msgPrefix + "Invalid value", expectedValue, - valueRead); + assertArrayEqualsBuffer(msgPrefix + "Invalid value", expectedValue, valueRead); } @Test @@ -746,48 +696,40 @@ public void testIntermediateLevelIndicesWithLargeKeysWithMinNumEntries() throws } public void testIntermediateLevelIndicesWithLargeKeys(int minNumEntries) throws IOException { - Path hfPath = new Path(TEST_UTIL.getDataTestDir(), - "testIntermediateLevelIndicesWithLargeKeys.hfile"); + Path hfPath = + new Path(TEST_UTIL.getDataTestDir(), "testIntermediateLevelIndicesWithLargeKeys.hfile"); int maxChunkSize = 1024; FileSystem fs = FileSystem.get(conf); CacheConfig cacheConf = new CacheConfig(conf); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize); conf.setInt(HFileBlockIndex.MIN_INDEX_NUM_ENTRIES_KEY, minNumEntries); HFileContext context = new HFileContextBuilder().withBlockSize(16).build(); - HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf) - .withFileContext(context) - .withPath(fs, hfPath).create(); + HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf).withFileContext(context) + .withPath(fs, hfPath).create(); List keys = new ArrayList<>(); // This should result in leaf-level indices and a root level index - for (int i=0; i < 100; i++) { + for (int i = 0; i < 100; i++) { byte[] rowkey = new byte[maxChunkSize + 1]; byte[] b = Bytes.toBytes(i); System.arraycopy(b, 0, rowkey, rowkey.length - b.length, b.length); keys.add(rowkey); - hfw.append(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(rowkey).setFamily(HConstants.EMPTY_BYTE_ARRAY) - .setQualifier(HConstants.EMPTY_BYTE_ARRAY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build()); + hfw.append(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(rowkey) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(HConstants.EMPTY_BYTE_ARRAY).build()); } hfw.close(); HFile.Reader reader = HFile.createReader(fs, hfPath, cacheConf, true, conf); - // Scanner doesn't do Cells yet. Fix. + // Scanner doesn't do Cells yet. Fix. HFileScanner scanner = reader.getScanner(conf, true, true); for (int i = 0; i < keys.size(); ++i) { scanner.seekTo(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(keys.get(i)).setFamily(HConstants.EMPTY_BYTE_ARRAY) - .setQualifier(HConstants.EMPTY_BYTE_ARRAY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build()); + .setRow(keys.get(i)).setFamily(HConstants.EMPTY_BYTE_ARRAY) + .setQualifier(HConstants.EMPTY_BYTE_ARRAY).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build()); } reader.close(); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java index d428acf6f01f..4fb9c6f70b38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -54,7 +53,7 @@ import org.junit.runners.Parameterized.Parameters; @RunWith(Parameterized.class) -@Category({IOTests.class, MediumTests.class}) +@Category({ IOTests.class, MediumTests.class }) public class TestHFileDataBlockEncoder { @ClassRule @@ -70,17 +69,15 @@ public class TestHFileDataBlockEncoder { * Create test for given data block encoding configuration. * @param blockEncoder What kind of encoding policy will be used. */ - public TestHFileDataBlockEncoder(HFileDataBlockEncoder blockEncoder, - boolean includesMemstoreTS) { + public TestHFileDataBlockEncoder(HFileDataBlockEncoder blockEncoder, boolean includesMemstoreTS) { this.blockEncoder = blockEncoder; this.includesMemstoreTS = includesMemstoreTS; - System.err.println("Encoding: " + blockEncoder.getDataBlockEncoding() - + ", includesMemstoreTS: " + includesMemstoreTS); + System.err.println("Encoding: " + blockEncoder.getDataBlockEncoding() + ", includesMemstoreTS: " + + includesMemstoreTS); } /** - * Test putting and taking out blocks into cache with different - * encoding options. + * Test putting and taking out blocks into cache with different encoding options. */ @Test public void testEncodingWithCache() throws IOException { @@ -93,8 +90,7 @@ private void testEncodingWithCacheInternals(boolean useTag) throws IOException { HFileBlock block = getSampleHFileBlock(kvs, useTag); HFileBlock cacheBlock = createBlockOnDisk(conf, kvs, block, useTag); - LruBlockCache blockCache = - new LruBlockCache(8 * 1024 * 1024, 32 * 1024); + LruBlockCache blockCache = new LruBlockCache(8 * 1024 * 1024, 32 * 1024); BlockCacheKey cacheKey = new BlockCacheKey("test", 0); blockCache.cacheBlock(cacheKey, cacheBlock); @@ -103,8 +99,7 @@ private void testEncodingWithCacheInternals(boolean useTag) throws IOException { HFileBlock returnedBlock = (HFileBlock) heapSize; - if (blockEncoder.getDataBlockEncoding() == - DataBlockEncoding.NONE) { + if (blockEncoder.getDataBlockEncoding() == DataBlockEncoding.NONE) { assertEquals(block.getBufferReadOnly(), returnedBlock.getBufferReadOnly()); } else { if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) { @@ -131,12 +126,9 @@ private void testHeaderSizeInCacheWithoutChecksumInternals(boolean useTags) thro buf.position(headerSize); keyValues.rewind(); buf.put(keyValues); - HFileContext hfileContext = new HFileContextBuilder().withHBaseCheckSum(false) - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(useTags) - .withBlockSize(0) - .withChecksumType(ChecksumType.NULL) - .build(); + HFileContext hfileContext = + new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(useTags).withBlockSize(0).withChecksumType(ChecksumType.NULL).build(); HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, 0, 0, -1, hfileContext, ByteBuffAllocator.HEAP); HFileBlock cacheBlock = createBlockOnDisk(conf, kvs, block, useTags); @@ -154,8 +146,8 @@ public void testEncoding() throws IOException { } /** - * Test encoding with offheap keyvalue. This test just verifies if the encoders - * work with DBB and does not use the getXXXArray() API + * Test encoding with offheap keyvalue. This test just verifies if the encoders work with DBB and + * does not use the getXXXArray() API * @throws IOException */ @Test @@ -178,11 +170,10 @@ private void testEncodingInternals(boolean useTag) throws IOException { HFileBlock block = getSampleHFileBlock(kvs, useTag); HFileBlock blockOnDisk = createBlockOnDisk(conf, kvs, block, useTag); - if (blockEncoder.getDataBlockEncoding() != - DataBlockEncoding.NONE) { + if (blockEncoder.getDataBlockEncoding() != DataBlockEncoding.NONE) { assertEquals(BlockType.ENCODED_DATA, blockOnDisk.getBlockType()); assertEquals(blockEncoder.getDataBlockEncoding().getId(), - blockOnDisk.getDataBlockEncodingId()); + blockOnDisk.getDataBlockEncodingId()); } else { assertEquals(BlockType.DATA, blockOnDisk.getBlockType()); } @@ -195,14 +186,9 @@ private HFileBlock getSampleHFileBlock(List kvs, boolean useTag) { buf.position(HConstants.HFILEBLOCK_HEADER_SIZE); keyValues.rewind(); buf.put(keyValues); - HFileContext meta = new HFileContextBuilder() - .withIncludesMvcc(includesMemstoreTS) - .withIncludesTags(useTag) - .withHBaseCheckSum(true) - .withCompression(Algorithm.NONE) - .withBlockSize(0) - .withChecksumType(ChecksumType.NULL) - .build(); + HFileContext meta = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS) + .withIncludesTags(useTag).withHBaseCheckSum(true).withCompression(Algorithm.NONE) + .withBlockSize(0).withChecksumType(ChecksumType.NULL).build(); HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, 0, 0, -1, meta, ByteBuffAllocator.HEAP); return b; @@ -211,9 +197,9 @@ private HFileBlock getSampleHFileBlock(List kvs, boolean useTag) { private HFileBlock createBlockOnDisk(Configuration conf, List kvs, HFileBlock block, boolean useTags) throws IOException { int size; - HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(conf, - blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER, - block.getHFileContext()); + HFileBlockEncodingContext context = + new HFileBlockDefaultEncodingContext(conf, blockEncoder.getDataBlockEncoding(), + HConstants.HFILEBLOCK_DUMMY_HEADER, block.getHFileContext()); ByteArrayOutputStream baos = new ByteArrayOutputStream(); baos.write(block.getDummyHeaderForVersion()); @@ -233,8 +219,7 @@ private HFileBlock createBlockOnDisk(Configuration conf, List kvs, HFi private void writeBlock(Configuration conf, List kvs, HFileContext fileContext, boolean useTags) throws IOException { HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(conf, - blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER, - fileContext); + blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); ByteArrayOutputStream baos = new ByteArrayOutputStream(); baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER); @@ -254,8 +239,9 @@ public static Collection getAllConfigurations() { for (DataBlockEncoding diskAlgo : DataBlockEncoding.values()) { for (boolean includesMemstoreTS : new boolean[] { false, true }) { - HFileDataBlockEncoder dbe = (diskAlgo == DataBlockEncoding.NONE) ? - NoOpDataBlockEncoder.INSTANCE : new HFileDataBlockEncoderImpl(diskAlgo); + HFileDataBlockEncoder dbe = + (diskAlgo == DataBlockEncoding.NONE) ? NoOpDataBlockEncoder.INSTANCE + : new HFileDataBlockEncoderImpl(diskAlgo); configurations.add(new Object[] { dbe, new Boolean(includesMemstoreTS) }); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java index acc17acdf969..02fdf6999092 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -60,7 +59,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileEncryption { @ClassRule @@ -85,8 +84,7 @@ public static void setUp() throws Exception { fs = FileSystem.get(conf); cryptoContext = Encryption.newContext(conf); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher aes = Encryption.getCipher(conf, algorithm); assertNotNull(aes); cryptoContext.setCipher(aes); @@ -103,10 +101,10 @@ private int writeBlock(Configuration conf, FSDataOutputStream os, HFileContext f dos.writeInt(j); } hbw.writeHeaderAndData(os); - LOG.info("Wrote a block at " + os.getPos() + " with" + - " onDiskSizeWithHeader=" + hbw.getOnDiskSizeWithHeader() + - " uncompressedSizeWithoutHeader=" + hbw.getOnDiskSizeWithoutHeader() + - " uncompressedSizeWithoutHeader=" + hbw.getUncompressedSizeWithoutHeader()); + LOG.info("Wrote a block at " + os.getPos() + " with" + " onDiskSizeWithHeader=" + + hbw.getOnDiskSizeWithHeader() + " uncompressedSizeWithoutHeader=" + + hbw.getOnDiskSizeWithoutHeader() + " uncompressedSizeWithoutHeader=" + + hbw.getUncompressedSizeWithoutHeader()); return hbw.getOnDiskSizeWithHeader(); } @@ -115,13 +113,13 @@ private long readAndVerifyBlock(long pos, HFileContext ctx, HFileBlock.FSReaderI HFileBlock b = hbr.readBlockData(pos, -1, false, false, true); assertEquals(0, HFile.getAndResetChecksumFailuresCount()); b.sanityCheck(); - assertFalse((b.getHFileContext().getCompression() != Compression.Algorithm.NONE) - && b.isUnpacked()); + assertFalse( + (b.getHFileContext().getCompression() != Compression.Algorithm.NONE) && b.isUnpacked()); b = b.unpack(ctx, hbr); - LOG.info("Read a block at " + pos + " with" + - " onDiskSizeWithHeader=" + b.getOnDiskSizeWithHeader() + - " uncompressedSizeWithoutHeader=" + b.getOnDiskSizeWithoutHeader() + - " uncompressedSizeWithoutHeader=" + b.getUncompressedSizeWithoutHeader()); + LOG.info( + "Read a block at " + pos + " with" + " onDiskSizeWithHeader=" + b.getOnDiskSizeWithHeader() + + " uncompressedSizeWithoutHeader=" + b.getOnDiskSizeWithoutHeader() + + " uncompressedSizeWithoutHeader=" + b.getUncompressedSizeWithoutHeader()); DataInputStream dis = b.getByteStream(); for (int i = 0; i < size; i++) { int read = dis.readInt(); @@ -144,10 +142,8 @@ public void testDataBlockEncryption() throws IOException { Path path = new Path(TEST_UTIL.getDataTestDir(), "block_v3_" + compression + "_AES"); LOG.info("testDataBlockEncryption: encryption=AES compression=" + compression); long totalSize = 0; - HFileContext fileContext = new HFileContextBuilder() - .withCompression(compression) - .withEncryptionContext(cryptoContext) - .build(); + HFileContext fileContext = new HFileContextBuilder().withCompression(compression) + .withEncryptionContext(cryptoContext).build(); FSDataOutputStream os = fs.create(path); try { for (int i = 0; i < blocks; i++) { @@ -157,11 +153,9 @@ public void testDataBlockEncryption() throws IOException { os.close(); } FSDataInputStream is = fs.open(path); - ReaderContext context = new ReaderContextBuilder() - .withInputStreamWrapper(new FSDataInputStreamWrapper(is)) - .withFilePath(path) - .withFileSystem(fs) - .withFileSize(totalSize).build(); + ReaderContext context = + new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)) + .withFilePath(path).withFileSystem(fs).withFileSize(totalSize).build(); try { HFileBlock.FSReaderImpl hbr = new HFileBlock.FSReaderImpl(context, fileContext, ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration()); @@ -179,20 +173,17 @@ public void testDataBlockEncryption() throws IOException { public void testHFileEncryptionMetadata() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); CacheConfig cacheConf = new CacheConfig(conf); - HFileContext fileContext = new HFileContextBuilder() - .withEncryptionContext(cryptoContext) - .build(); + HFileContext fileContext = + new HFileContextBuilder().withEncryptionContext(cryptoContext).build(); // write a simple encrypted hfile Path path = new Path(TEST_UTIL.getDataTestDir(), "cryptometa.hfile"); FSDataOutputStream out = fs.create(path); - HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf) - .withOutputStream(out) - .withFileContext(fileContext) - .create(); + HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf).withOutputStream(out) + .withFileContext(fileContext).create(); try { - KeyValue kv = new KeyValue(Bytes.toBytes("foo"), Bytes.toBytes("f1"), null, - Bytes.toBytes("value")); + KeyValue kv = + new KeyValue(Bytes.toBytes("foo"), Bytes.toBytes("f1"), null, Bytes.toBytes("value")); writer.append(kv); } finally { writer.close(); @@ -206,8 +197,7 @@ public void testHFileEncryptionMetadata() throws Exception { assertNotNull(trailer.getEncryptionKey()); Encryption.Context readerContext = reader.getFileContext().getEncryptionContext(); assertEquals(readerContext.getCipher().getName(), cryptoContext.getCipher().getName()); - assertTrue(Bytes.equals(readerContext.getKeyBytes(), - cryptoContext.getKeyBytes())); + assertTrue(Bytes.equals(readerContext.getKeyBytes(), cryptoContext.getKeyBytes())); } finally { reader.close(); } @@ -222,25 +212,20 @@ public void testHFileEncryption() throws Exception { // Iterate through data block encoding and compression combinations Configuration conf = TEST_UTIL.getConfiguration(); CacheConfig cacheConf = new CacheConfig(conf); - for (DataBlockEncoding encoding: DataBlockEncoding.values()) { - for (Compression.Algorithm compression: HBaseCommonTestingUtil.COMPRESSION_ALGORITHMS) { - HFileContext fileContext = new HFileContextBuilder() - .withBlockSize(4096) // small blocks - .withEncryptionContext(cryptoContext) - .withCompression(compression) - .withDataBlockEncoding(encoding) - .build(); + for (DataBlockEncoding encoding : DataBlockEncoding.values()) { + for (Compression.Algorithm compression : HBaseCommonTestingUtil.COMPRESSION_ALGORITHMS) { + HFileContext fileContext = new HFileContextBuilder().withBlockSize(4096) // small blocks + .withEncryptionContext(cryptoContext).withCompression(compression) + .withDataBlockEncoding(encoding).build(); // write a new test HFile LOG.info("Writing with " + fileContext); Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseCommonTestingUtil.getRandomUUID().toString() + ".hfile"); + HBaseCommonTestingUtil.getRandomUUID().toString() + ".hfile"); FSDataOutputStream out = fs.create(path); - HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf) - .withOutputStream(out) - .withFileContext(fileContext) - .create(); + HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf).withOutputStream(out) + .withFileContext(fileContext).create(); try { - for (KeyValue kv: testKvs) { + for (KeyValue kv : testKvs) { writer.append(kv); } } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java index 3adc57a625d9..ac6ca020aeeb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,16 +37,16 @@ /** * Test a case when an inline index chunk is converted to a root one. This reproduces the bug in - * HBASE-6871. We write a carefully selected number of relatively large keys so that we accumulate - * a leaf index chunk that only goes over the configured index chunk size after adding the last + * HBASE-6871. We write a carefully selected number of relatively large keys so that we accumulate a + * leaf index chunk that only goes over the configured index chunk size after adding the last * key/value. The bug is in that when we close the file, we convert that inline (leaf-level) chunk - * into a root chunk, but then look at the size of that root chunk, find that it is greater than - * the configured chunk size, and split it into a number of intermediate index blocks that should - * really be leaf-level blocks. If more keys were added, we would flush the leaf-level block, add - * another entry to the root-level block, and that would prevent us from upgrading the leaf-level - * chunk to the root chunk, thus not triggering the bug. + * into a root chunk, but then look at the size of that root chunk, find that it is greater than the + * configured chunk size, and split it into a number of intermediate index blocks that should really + * be leaf-level blocks. If more keys were added, we would flush the leaf-level block, add another + * entry to the root-level block, and that would prevent us from upgrading the leaf-level chunk to + * the root chunk, thus not triggering the bug. */ -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileInlineToRootChunkConversion { @ClassRule @@ -65,9 +65,8 @@ public void testWriteHFile() throws Exception { CacheConfig cacheConf = new CacheConfig(conf); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize); HFileContext context = new HFileContextBuilder().withBlockSize(16).build(); - HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf) - .withFileContext(context) - .withPath(fs, hfPath).create(); + HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf).withFileContext(context) + .withPath(fs, hfPath).create(); List keys = new ArrayList<>(); StringBuilder sb = new StringBuilder(); @@ -83,27 +82,21 @@ public void testWriteHFile() throws Exception { byte[] k = Bytes.toBytes(keyStr); keys.add(k); byte[] v = Bytes.toBytes("value" + i); - hfw.append(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(k) - .setFamily(HConstants.EMPTY_BYTE_ARRAY) - .setQualifier(HConstants.EMPTY_BYTE_ARRAY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(v).build()); + hfw.append(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(k) + .setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(v).build()); } hfw.close(); HFile.Reader reader = HFile.createReader(fs, hfPath, cacheConf, true, conf); - // Scanner doesn't do Cells yet. Fix. + // Scanner doesn't do Cells yet. Fix. HFileScanner scanner = reader.getScanner(conf, true, true); for (int i = 0; i < keys.size(); ++i) { scanner.seekTo(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(keys.get(i)) - .setFamily(HConstants.EMPTY_BYTE_ARRAY) - .setQualifier(HConstants.EMPTY_BYTE_ARRAY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY).build()); + .setRow(keys.get(i)).setFamily(HConstants.EMPTY_BYTE_ARRAY) + .setQualifier(HConstants.EMPTY_BYTE_ARRAY).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build()); } reader.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java index 7b5b7e217661..6c0f3b922f6b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePrettyPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFilePrettyPrinter { @ClassRule @@ -53,9 +53,9 @@ public class TestHFilePrettyPrinter { private final static HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static FileSystem fs; private static Configuration conf; - private static byte [] cf = Bytes.toBytes("cf"); - private static byte [] fam = Bytes.toBytes("fam"); - private static byte [] value = Bytes.toBytes("val"); + private static byte[] cf = Bytes.toBytes("cf"); + private static byte[] fam = Bytes.toBytes("fam"); + private static byte[] value = Bytes.toBytes("val"); private static PrintStream original; private static PrintStream ps; private static ByteArrayOutputStream stream; @@ -78,13 +78,13 @@ public void teardown() { @Test public void testHFilePrettyPrinterNonRootDir() throws Exception { - Path fileNotInRootDir = UTIL.getDataTestDir("hfile"); + Path fileNotInRootDir = UTIL.getDataTestDir("hfile"); TestHRegionServerBulkLoad.createHFile(fs, fileNotInRootDir, cf, fam, value, 1000); - assertNotEquals("directory used is not an HBase root dir", - UTIL.getDefaultRootDirPath(), fileNotInRootDir); + assertNotEquals("directory used is not an HBase root dir", UTIL.getDefaultRootDirPath(), + fileNotInRootDir); System.setOut(ps); - new HFilePrettyPrinter(conf).run(new String[]{"-v", String.valueOf(fileNotInRootDir)}); + new HFilePrettyPrinter(conf).run(new String[] { "-v", String.valueOf(fileNotInRootDir) }); String result = new String(stream.toByteArray()); String expectedResult = "Scanning -> " + fileNotInRootDir + "\n" + "Scanned kv count -> 1000\n"; assertEquals(expectedResult, result); @@ -96,14 +96,13 @@ public void testHFilePrettyPrinterRootDir() throws Exception { String rootString = rootPath + rootPath.SEPARATOR; Path fileInRootDir = new Path(rootString + "hfile"); TestHRegionServerBulkLoad.createHFile(fs, fileInRootDir, cf, fam, value, 1000); - assertTrue("directory used is a root dir", - fileInRootDir.toString().startsWith(rootString)); + assertTrue("directory used is a root dir", fileInRootDir.toString().startsWith(rootString)); System.setOut(ps); HFilePrettyPrinter printer = new HFilePrettyPrinter(); printer.setConf(conf); printer.processFile(fileInRootDir, true); - printer.run(new String[]{"-v", String.valueOf(fileInRootDir)}); + printer.run(new String[] { "-v", String.valueOf(fileInRootDir) }); String result = new String(stream.toByteArray()); String expectedResult = "Scanning -> " + fileInRootDir + "\n" + "Scanned kv count -> 1000\n"; assertEquals(expectedResult, result); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java index 0809ca8be543..3db5111461c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.io.hfile; import static org.junit.Assert.assertEquals; + import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -40,7 +41,7 @@ /** * Test */ -@Category({ IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileReaderImpl { @ClassRule @@ -65,8 +66,8 @@ Path makeNewFile() throws IOException { HFileContext context = new HFileContextBuilder().withBlockSize(blocksize).withIncludesTags(true).build(); Configuration conf = TEST_UTIL.getConfiguration(); - HFile.Writer writer = HFile.getWriterFactoryNoCache(conf) - .withOutputStream(fout).withFileContext(context).create(); + HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout) + .withFileContext(context).create(); // 4 bytes * 3 * 2 for each key/value + // 3 for keys, 15 for values = 42 (woot) writer.append(toKV("c")); @@ -144,7 +145,7 @@ public void testSeekBefore() throws Exception { protected void deleteTestDir(FileSystem fs) throws IOException { Path dataTestDir = TEST_UTIL.getDataTestDir(); - if(fs.exists(dataTestDir)) { + if (fs.exists(dataTestDir)) { fs.delete(dataTestDir, true); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java index c52c517eafc4..6017b1f534b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -162,9 +161,8 @@ private void writeHFile(Configuration conf, FileSystem fs, Path hfilePath, Algor HFileContext context = new HFileContextBuilder().withBlockSize(1).withDataBlockEncoding(DataBlockEncoding.NONE) .withCompression(compression).withDataBlockEncoding(encoding).build(); - try (HFile.Writer writer = - new HFile.WriterFactory(conf, new CacheConfig(conf)).withPath(fs, hfilePath) - .withFileContext(context).create()) { + try (HFile.Writer writer = new HFile.WriterFactory(conf, new CacheConfig(conf)) + .withPath(fs, hfilePath).withFileContext(context).create()) { for (int i = 0; i < cellCount; ++i) { byte[] keyBytes = Bytes.add(Bytes.toBytes(i), SUFFIX); // A random-length random value. @@ -199,16 +197,14 @@ private void testReleaseBlock(Algorithm compression, DataBlockEncoding encoding) Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels()); HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(conf, true, true, false); - HFileBlock block1 = reader.getDataBlockIndexReader() - .loadDataBlockWithScanInfo(firstCell, null, true, true, false, - DataBlockEncoding.NONE, reader).getHFileBlock(); + HFileBlock block1 = reader.getDataBlockIndexReader().loadDataBlockWithScanInfo(firstCell, null, + true, true, false, DataBlockEncoding.NONE, reader).getHFileBlock(); waitBucketCacheFlushed(defaultBC); Assert.assertTrue(block1.getBlockType().isData()); Assert.assertFalse(block1 instanceof ExclusiveMemHFileBlock); - HFileBlock block2 = reader.getDataBlockIndexReader() - .loadDataBlockWithScanInfo(secondCell, null, true, true, false, - DataBlockEncoding.NONE, reader).getHFileBlock(); + HFileBlock block2 = reader.getDataBlockIndexReader().loadDataBlockWithScanInfo(secondCell, null, + true, true, false, DataBlockEncoding.NONE, reader).getHFileBlock(); waitBucketCacheFlushed(defaultBC); Assert.assertTrue(block2.getBlockType().isData()); Assert.assertFalse(block2 instanceof ExclusiveMemHFileBlock); @@ -283,14 +279,12 @@ public void testSeekBefore() throws Exception { Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels()); HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(conf, true, true, false); - HFileBlock block1 = reader.getDataBlockIndexReader() - .loadDataBlockWithScanInfo(firstCell, null, true, true, false, - DataBlockEncoding.NONE, reader).getHFileBlock(); + HFileBlock block1 = reader.getDataBlockIndexReader().loadDataBlockWithScanInfo(firstCell, null, + true, true, false, DataBlockEncoding.NONE, reader).getHFileBlock(); Assert.assertTrue(block1.getBlockType().isData()); Assert.assertFalse(block1 instanceof ExclusiveMemHFileBlock); - HFileBlock block2 = reader.getDataBlockIndexReader() - .loadDataBlockWithScanInfo(secondCell, null, true, true, false, - DataBlockEncoding.NONE, reader).getHFileBlock(); + HFileBlock block2 = reader.getDataBlockIndexReader().loadDataBlockWithScanInfo(secondCell, null, + true, true, false, DataBlockEncoding.NONE, reader).getHFileBlock(); Assert.assertTrue(block2.getBlockType().isData()); Assert.assertFalse(block2 instanceof ExclusiveMemHFileBlock); // Wait until flushed to IOEngine; @@ -344,9 +338,8 @@ public void testSeekBefore() throws Exception { Assert.assertEquals(0, prevBlock.refCnt()); // Reload the block1 again. - block1 = reader.getDataBlockIndexReader() - .loadDataBlockWithScanInfo(firstCell, null, true, true, false, - DataBlockEncoding.NONE, reader).getHFileBlock(); + block1 = reader.getDataBlockIndexReader().loadDataBlockWithScanInfo(firstCell, null, true, true, + false, DataBlockEncoding.NONE, reader).getHFileBlock(); // Wait until flushed to IOEngine; waitBucketCacheFlushed(defaultBC); Assert.assertTrue(block1.getBlockType().isData()); @@ -413,14 +406,12 @@ public void testWithLruBlockCache() throws Exception { Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels()); HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(conf, true, true, false); - HFileBlock block1 = reader.getDataBlockIndexReader() - .loadDataBlockWithScanInfo(firstCell, null, true, true, false, - DataBlockEncoding.NONE, reader).getHFileBlock(); + HFileBlock block1 = reader.getDataBlockIndexReader().loadDataBlockWithScanInfo(firstCell, null, + true, true, false, DataBlockEncoding.NONE, reader).getHFileBlock(); Assert.assertTrue(block1.getBlockType().isData()); Assert.assertTrue(block1 instanceof ExclusiveMemHFileBlock); - HFileBlock block2 = reader.getDataBlockIndexReader() - .loadDataBlockWithScanInfo(secondCell, null, true, true, false, - DataBlockEncoding.NONE, reader).getHFileBlock(); + HFileBlock block2 = reader.getDataBlockIndexReader().loadDataBlockWithScanInfo(secondCell, null, + true, true, false, DataBlockEncoding.NONE, reader).getHFileBlock(); Assert.assertTrue(block2.getBlockType().isData()); Assert.assertTrue(block2 instanceof ExclusiveMemHFileBlock); // One RPC reference path. @@ -463,9 +454,8 @@ public void testDisabledBlockCache() throws Exception { // We've build a HFile tree with index = 16. Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels()); - HFileBlock block1 = reader.getDataBlockIndexReader() - .loadDataBlockWithScanInfo(firstCell, null, true, true, false, - DataBlockEncoding.NONE, reader).getHFileBlock(); + HFileBlock block1 = reader.getDataBlockIndexReader().loadDataBlockWithScanInfo(firstCell, null, + true, true, false, DataBlockEncoding.NONE, reader).getHFileBlock(); Assert.assertTrue(block1.isSharedMem()); Assert.assertTrue(block1 instanceof SharedMemHFileBlock); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java index 3eb82cd3a7b0..072495b72aec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,10 +56,9 @@ /** * test the performance for seek. *

      - * Copied from - * hadoop-3315 tfile. - * Remove after tfile is committed and use the tfile version of this class - * instead.

      + * Copied from hadoop-3315 tfile. + * Remove after tfile is committed and use the tfile version of this class instead. + *

      */ @Category({ IOTests.class, SmallTests.class }) public class TestHFileSeek { @@ -100,32 +99,25 @@ public void setUp() throws IOException { fs = path.getFileSystem(conf); timer = new NanoTimer(false); rng = new Random(options.seed); - keyLenGen = - new RandomDistribution.Zipf(new Random(rng.nextLong()), - options.minKeyLen, options.maxKeyLen, 1.2); - RandomDistribution.DiscreteRNG valLenGen = - new RandomDistribution.Flat(new Random(rng.nextLong()), - options.minValLength, options.maxValLength); - RandomDistribution.DiscreteRNG wordLenGen = - new RandomDistribution.Flat(new Random(rng.nextLong()), - options.minWordLen, options.maxWordLen); - kvGen = - new KVGenerator(rng, true, keyLenGen, valLenGen, wordLenGen, - options.dictSize); + keyLenGen = new RandomDistribution.Zipf(new Random(rng.nextLong()), options.minKeyLen, + options.maxKeyLen, 1.2); + RandomDistribution.DiscreteRNG valLenGen = new RandomDistribution.Flat( + new Random(rng.nextLong()), options.minValLength, options.maxValLength); + RandomDistribution.DiscreteRNG wordLenGen = new RandomDistribution.Flat( + new Random(rng.nextLong()), options.minWordLen, options.maxWordLen); + kvGen = new KVGenerator(rng, true, keyLenGen, valLenGen, wordLenGen, options.dictSize); } @After public void tearDown() { try { fs.close(); - } - catch (Exception e) { + } catch (Exception e) { // Nothing } } - private static FSDataOutputStream createFSOutput(Path name, FileSystem fs) - throws IOException { + private static FSDataOutputStream createFSOutput(Path name, FileSystem fs) throws IOException { if (fs.exists(name)) { fs.delete(name, true); } @@ -137,14 +129,10 @@ private void createTFile() throws IOException { long totalBytes = 0; FSDataOutputStream fout = createFSOutput(path, fs); try { - HFileContext context = new HFileContextBuilder() - .withBlockSize(options.minBlockSize) - .withCompression(HFileWriterImpl.compressionByName(options.compress)) - .build(); - Writer writer = HFile.getWriterFactoryNoCache(conf) - .withOutputStream(fout) - .withFileContext(context) - .create(); + HFileContext context = new HFileContextBuilder().withBlockSize(options.minBlockSize) + .withCompression(HFileWriterImpl.compressionByName(options.compress)).build(); + Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout) + .withFileContext(context).create(); try { BytesWritable key = new BytesWritable(); BytesWritable val = new BytesWritable(); @@ -156,9 +144,9 @@ private void createTFile() throws IOException { } } kvGen.next(key, val, false); - byte [] k = new byte [key.getLength()]; + byte[] k = new byte[key.getLength()]; System.arraycopy(key.getBytes(), 0, k, 0, key.getLength()); - byte [] v = new byte [val.getLength()]; + byte[] v = new byte[val.getLength()]; System.arraycopy(val.getBytes(), 0, v, 0, key.getLength()); KeyValue kv = new KeyValue(k, CF, QUAL, v); writer.append(kv); @@ -166,23 +154,19 @@ private void createTFile() throws IOException { totalBytes += kv.getValueLength(); } timer.stop(); - } - finally { + } finally { writer.close(); } - } - finally { + } finally { fout.close(); } - double duration = (double)timer.read()/1000; // in us. + double duration = (double) timer.read() / 1000; // in us. long fsize = fs.getFileStatus(path).getLen(); - System.out.printf( - "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", - timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes - / duration); - System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", - timer.toString(), (double) fsize / 1024 / 1024, fsize / duration); + System.out.printf("time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n", timer.toString(), + (double) totalBytes / 1024 / 1024, totalBytes / duration); + System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n", timer.toString(), + (double) fsize / 1024 / 1024, fsize / duration); } public void seekTFile() throws IOException { @@ -211,11 +195,9 @@ public void seekTFile() throws IOException { } } timer.stop(); - System.out.printf( - "time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n", - timer.toString(), NanoTimer.nanoTimeToString(timer.read() - / options.seekCount), options.seekCount - miss, miss, - (double) totalBytes / 1024 / (options.seekCount - miss)); + System.out.printf("time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n", + timer.toString(), NanoTimer.nanoTimeToString(timer.read() / options.seekCount), + options.seekCount - miss, miss, (double) totalBytes / 1024 / (options.seekCount - miss)); } @@ -268,8 +250,7 @@ private static class MyOptions { int maxWordLen = 20; private HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - String rootDir = - TEST_UTIL.getDataTestDir("TestTFileSeek").toString(); + String rootDir = TEST_UTIL.getDataTestDir("TestTFileSeek").toString(); String file = "TestTFileSeek"; // String compress = "lzo"; DISABLED String compress = "none"; @@ -302,8 +283,7 @@ public MyOptions(String[] args) { CommandLine line = parser.parse(opts, args, true); processOptions(line, opts); validateOptions(); - } - catch (ParseException e) { + } catch (ParseException e) { System.out.println(e.getMessage()); System.out.println("Try \"--help\" option for details."); setStopProceed(); @@ -315,112 +295,65 @@ public boolean proceed() { } private Options buildOptions() { - Option compress = - OptionBuilder.withLongOpt("compress").withArgName("[none|lzo|gz|snappy]") - .hasArg().withDescription("compression scheme").create('c'); + Option compress = OptionBuilder.withLongOpt("compress").withArgName("[none|lzo|gz|snappy]") + .hasArg().withDescription("compression scheme").create('c'); - Option fileSize = - OptionBuilder.withLongOpt("file-size").withArgName("size-in-MB") - .hasArg().withDescription("target size of the file (in MB).") - .create('s'); + Option fileSize = OptionBuilder.withLongOpt("file-size").withArgName("size-in-MB").hasArg() + .withDescription("target size of the file (in MB).").create('s'); - Option fsInputBufferSz = - OptionBuilder.withLongOpt("fs-input-buffer").withArgName("size") - .hasArg().withDescription( - "size of the file system input buffer (in bytes).").create( - 'i'); + Option fsInputBufferSz = OptionBuilder.withLongOpt("fs-input-buffer").withArgName("size") + .hasArg().withDescription("size of the file system input buffer (in bytes).").create('i'); Option fsOutputBufferSize = - OptionBuilder.withLongOpt("fs-output-buffer").withArgName("size") - .hasArg().withDescription( - "size of the file system output buffer (in bytes).").create( - 'o'); - - Option keyLen = - OptionBuilder - .withLongOpt("key-length") - .withArgName("min,max") - .hasArg() - .withDescription( - "the length range of the key (in bytes)") - .create('k'); - - Option valueLen = - OptionBuilder - .withLongOpt("value-length") - .withArgName("min,max") - .hasArg() - .withDescription( - "the length range of the value (in bytes)") - .create('v'); - - Option blockSz = - OptionBuilder.withLongOpt("block").withArgName("size-in-KB").hasArg() - .withDescription("minimum block size (in KB)").create('b'); - - Option operation = - OptionBuilder.withLongOpt("operation").withArgName("r|w|rw").hasArg() - .withDescription( - "action: seek-only, create-only, seek-after-create").create( - 'x'); - - Option rootDir = - OptionBuilder.withLongOpt("root-dir").withArgName("path").hasArg() - .withDescription( - "specify root directory where files will be created.") - .create('r'); - - Option file = - OptionBuilder.withLongOpt("file").withArgName("name").hasArg() - .withDescription("specify the file name to be created or read.") - .create('f'); - - Option seekCount = - OptionBuilder - .withLongOpt("seek") - .withArgName("count") - .hasArg() - .withDescription( - "specify how many seek operations we perform (requires -x r or -x rw.") - .create('n'); - - Option trialCount = - OptionBuilder - .withLongOpt("trials") - .withArgName("n") - .hasArg() - .withDescription( - "specify how many times to run the whole benchmark") - .create('t'); - - Option useRawFs = - OptionBuilder - .withLongOpt("rawfs") - .withDescription("use raw instead of checksummed file system") - .create(); - - Option help = - OptionBuilder.withLongOpt("help").hasArg(false).withDescription( - "show this screen").create("h"); - - return new Options().addOption(compress).addOption(fileSize).addOption( - fsInputBufferSz).addOption(fsOutputBufferSize).addOption(keyLen) - .addOption(blockSz).addOption(rootDir).addOption(valueLen) - .addOption(operation).addOption(seekCount).addOption(file) + OptionBuilder.withLongOpt("fs-output-buffer").withArgName("size").hasArg() + .withDescription("size of the file system output buffer (in bytes).").create('o'); + + Option keyLen = OptionBuilder.withLongOpt("key-length").withArgName("min,max").hasArg() + .withDescription("the length range of the key (in bytes)").create('k'); + + Option valueLen = OptionBuilder.withLongOpt("value-length").withArgName("min,max").hasArg() + .withDescription("the length range of the value (in bytes)").create('v'); + + Option blockSz = OptionBuilder.withLongOpt("block").withArgName("size-in-KB").hasArg() + .withDescription("minimum block size (in KB)").create('b'); + + Option operation = OptionBuilder.withLongOpt("operation").withArgName("r|w|rw").hasArg() + .withDescription("action: seek-only, create-only, seek-after-create").create('x'); + + Option rootDir = OptionBuilder.withLongOpt("root-dir").withArgName("path").hasArg() + .withDescription("specify root directory where files will be created.").create('r'); + + Option file = OptionBuilder.withLongOpt("file").withArgName("name").hasArg() + .withDescription("specify the file name to be created or read.").create('f'); + + Option seekCount = OptionBuilder.withLongOpt("seek").withArgName("count").hasArg() + .withDescription("specify how many seek operations we perform (requires -x r or -x rw.") + .create('n'); + + Option trialCount = OptionBuilder.withLongOpt("trials").withArgName("n").hasArg() + .withDescription("specify how many times to run the whole benchmark").create('t'); + + Option useRawFs = OptionBuilder.withLongOpt("rawfs") + .withDescription("use raw instead of checksummed file system").create(); + + Option help = OptionBuilder.withLongOpt("help").hasArg(false) + .withDescription("show this screen").create("h"); + + return new Options().addOption(compress).addOption(fileSize).addOption(fsInputBufferSz) + .addOption(fsOutputBufferSize).addOption(keyLen).addOption(blockSz).addOption(rootDir) + .addOption(valueLen).addOption(operation).addOption(seekCount).addOption(file) .addOption(trialCount).addOption(useRawFs).addOption(help); } - private void processOptions(CommandLine line, Options opts) - throws ParseException { + private void processOptions(CommandLine line, Options opts) throws ParseException { // --help -h and --version -V must be processed first. if (line.hasOption('h')) { HelpFormatter formatter = new HelpFormatter(); System.out.println("TFile and SeqFile benchmark."); System.out.println(); - formatter.printHelp(100, - "java ... TestTFileSeqFileComparison [options]", - "\nSupported options:", opts, ""); + formatter.printHelp(100, "java ... TestTFileSeqFileComparison [options]", + "\nSupported options:", opts, ""); return; } @@ -484,14 +417,11 @@ private void processOptions(CommandLine line, Options opts) String strOp = line.getOptionValue('x'); if (strOp.equals("r")) { op = OP_READ; - } - else if (strOp.equals("w")) { + } else if (strOp.equals("w")) { op = OP_CREATE; - } - else if (strOp.equals("rw")) { + } else if (strOp.equals("rw")) { op = OP_CREATE | OP_READ; - } - else { + } else { throw new ParseException("Unknown action specifier: " + strOp); } } @@ -502,24 +432,21 @@ else if (strOp.equals("rw")) { } private void validateOptions() throws ParseException { - if (!compress.equals("none") && !compress.equals("lzo") - && !compress.equals("gz") && !compress.equals("snappy")) { + if (!compress.equals("none") && !compress.equals("lzo") && !compress.equals("gz") + && !compress.equals("snappy")) { throw new ParseException("Unknown compression scheme: " + compress); } if (minKeyLen >= maxKeyLen) { - throw new ParseException( - "Max key length must be greater than min key length."); + throw new ParseException("Max key length must be greater than min key length."); } if (minValLength >= maxValLength) { - throw new ParseException( - "Max value length must be greater than min value length."); + throw new ParseException("Max value length must be greater than min value length."); } if (minWordLen >= maxWordLen) { - throw new ParseException( - "Max word length must be greater than min word length."); + throw new ParseException("Max word length must be greater than min word length."); } return; } @@ -547,7 +474,7 @@ public static void main(String[] argv) throws IOException { testCase.options = options; for (int i = 0; i < options.trialCount; i++) { - LOG.info("Beginning trial " + (i+1)); + LOG.info("Beginning trial " + (i + 1)); testCase.setUp(); testCase.testSeeks(); testCase.tearDown(); @@ -555,4 +482,3 @@ public static void main(String[] argv) throws IOException { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java index d58668352578..a11bb1d027ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java @@ -67,7 +67,7 @@ * Testing writing a version 3 {@link HFile}. */ @RunWith(Parameterized.class) -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileWriterV3 { @ClassRule @@ -81,9 +81,11 @@ public class TestHFileWriterV3 { private Configuration conf; private FileSystem fs; private boolean useTags; + public TestHFileWriterV3(boolean useTags) { this.useTags = useTags; } + @Parameters public static Collection parameters() { return HBaseCommonTestingUtil.BOOLEAN_PARAMETERIZED; @@ -108,30 +110,24 @@ private void testHFileFormatV3Internals(boolean useTags) throws IOException { } @Test - public void testMidKeyInHFile() throws IOException{ + public void testMidKeyInHFile() throws IOException { testMidKeyInHFileInternals(useTags); } private void testMidKeyInHFileInternals(boolean useTags) throws IOException { - Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), - "testMidKeyInHFile"); + Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), "testMidKeyInHFile"); Compression.Algorithm compressAlgo = Compression.Algorithm.NONE; int entryCount = 50000; writeDataAndReadFromHFile(hfilePath, compressAlgo, entryCount, true, useTags); } - private void writeDataAndReadFromHFile(Path hfilePath, - Algorithm compressAlgo, int entryCount, boolean findMidKey, boolean useTags) throws IOException { - HFileContext context = new HFileContextBuilder() - .withBlockSize(4096) - .withIncludesTags(useTags) - .withDataBlockEncoding(DataBlockEncoding.NONE) - .withCompression(compressAlgo).build(); + private void writeDataAndReadFromHFile(Path hfilePath, Algorithm compressAlgo, int entryCount, + boolean findMidKey, boolean useTags) throws IOException { + HFileContext context = new HFileContextBuilder().withBlockSize(4096).withIncludesTags(useTags) + .withDataBlockEncoding(DataBlockEncoding.NONE).withCompression(compressAlgo).build(); CacheConfig cacheConfig = new CacheConfig(conf); - HFile.Writer writer = new HFile.WriterFactory(conf, cacheConfig) - .withPath(fs, hfilePath) - .withFileContext(context) - .create(); + HFile.Writer writer = new HFile.WriterFactory(conf, cacheConfig).withPath(fs, hfilePath) + .withFileContext(context).create(); List keyValues = new ArrayList<>(entryCount); for (int i = 0; i < entryCount; ++i) { @@ -146,11 +142,10 @@ private void writeDataAndReadFromHFile(Path hfilePath, RNG.nextBytes(tagBytes); tags.add(new ArrayBackedTag((byte) 1, tagBytes)); } - keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, - valueBytes, tags); + keyValue = + new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes, tags); } else { - keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, - valueBytes); + keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes); } writer.append(keyValue); keyValues.add(keyValue); @@ -164,26 +159,19 @@ private void writeDataAndReadFromHFile(Path hfilePath, writer.close(); - FSDataInputStream fsdis = fs.open(hfilePath); long fileSize = fs.getFileStatus(hfilePath).getLen(); - FixedFileTrailer trailer = - FixedFileTrailer.readFromStream(fsdis, fileSize); + FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize); assertEquals(3, trailer.getMajorVersion()); assertEquals(entryCount, trailer.getEntryCount()); - HFileContext meta = new HFileContextBuilder() - .withCompression(compressAlgo) - .withIncludesMvcc(false) - .withIncludesTags(useTags) - .withDataBlockEncoding(DataBlockEncoding.NONE) - .withHBaseCheckSum(true).build(); - ReaderContext readerContext = new ReaderContextBuilder() - .withInputStreamWrapper(new FSDataInputStreamWrapper(fsdis)) - .withFilePath(hfilePath) - .withFileSystem(fs) - .withFileSize(fileSize).build(); + HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo) + .withIncludesMvcc(false).withIncludesTags(useTags) + .withDataBlockEncoding(DataBlockEncoding.NONE).withHBaseCheckSum(true).build(); + ReaderContext readerContext = + new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(fsdis)) + .withFilePath(hfilePath).withFileSystem(fs).withFileSize(fileSize).build(); HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP, conf); // Comparator class name is stored in the trailer in version 3. @@ -194,21 +182,16 @@ private void writeDataAndReadFromHFile(Path hfilePath, HFileBlockIndex.BlockIndexReader metaBlockIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); - HFileBlock.BlockIterator blockIter = blockReader.blockRange( - trailer.getLoadOnOpenDataOffset(), - fileSize - trailer.getTrailerSize()); + HFileBlock.BlockIterator blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), + fileSize - trailer.getTrailerSize()); // Data index. We also read statistics about the block index written after // the root level. dataBlockIndexReader.readMultiLevelIndexRoot( - blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); FSDataInputStreamWrapper wrapper = new FSDataInputStreamWrapper(fs, hfilePath); - readerContext = new ReaderContextBuilder() - .withFilePath(hfilePath) - .withFileSize(fileSize) - .withFileSystem(wrapper.getHfs()) - .withInputStreamWrapper(wrapper) - .build(); + readerContext = new ReaderContextBuilder().withFilePath(hfilePath).withFileSize(fileSize) + .withFileSystem(wrapper.getHfs()).withInputStreamWrapper(wrapper).build(); HFileInfo hfile = new HFileInfo(readerContext, conf); HFile.Reader reader = new HFilePreadReader(readerContext, hfile, cacheConfig, conf); hfile.initMetaAndIndex(reader); @@ -219,14 +202,14 @@ private void writeDataAndReadFromHFile(Path hfilePath, // Meta index. metaBlockIndexReader.readRootIndex( - blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX) - .getByteStream(), trailer.getMetaIndexCount()); + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).getByteStream(), + trailer.getMetaIndexCount()); // File info HFileInfo fileInfo = new HFileInfo(); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); - byte [] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION); - boolean includeMemstoreTS = keyValueFormatVersion != null && - Bytes.toInt(keyValueFormatVersion) > 0; + byte[] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION); + boolean includeMemstoreTS = + keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0; // Counters for the number of key/value pairs and the number of blocks int entriesRead = 0; @@ -261,8 +244,8 @@ private void writeDataAndReadFromHFile(Path hfilePath, } if (includeMemstoreTS) { - ByteArrayInputStream byte_input = new ByteArrayInputStream(buf.array(), buf.arrayOffset() - + buf.position(), buf.remaining()); + ByteArrayInputStream byte_input = new ByteArrayInputStream(buf.array(), + buf.arrayOffset() + buf.position(), buf.remaining()); DataInputStream data_input = new DataInputStream(byte_input); memstoreTS = WritableUtils.readVLong(data_input); @@ -276,18 +259,17 @@ private void writeDataAndReadFromHFile(Path hfilePath, kv.getValueLength()) == 0); if (useTags) { assertNotNull(tagValue); - KeyValue tkv = kv; + KeyValue tkv = kv; assertEquals(tagValue.length, tkv.getTagsLength()); assertTrue(Bytes.compareTo(tagValue, 0, tagValue.length, tkv.getTagsArray(), - tkv.getTagsOffset(), tkv.getTagsLength()) == 0); + tkv.getTagsOffset(), tkv.getTagsLength()) == 0); } ++entriesRead; } ++blocksRead; curBlockPos += block.getOnDiskSizeWithHeader(); } - LOG.info("Finished reading: entries=" + entriesRead + ", blocksRead=" - + blocksRead); + LOG.info("Finished reading: entries=" + entriesRead + ", blocksRead=" + blocksRead); assertEquals(entryCount, entriesRead); // Meta blocks. We can scan until the load-on-open data offset (which is @@ -296,20 +278,19 @@ private void writeDataAndReadFromHFile(Path hfilePath, int metaCounter = 0; while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) { - LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + - trailer.getLoadOnOpenDataOffset()); + LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + + trailer.getLoadOnOpenDataOffset()); HFileBlock block = blockReader.readBlockData(curBlockPos, -1, false, false, true) .unpack(context, blockReader); assertEquals(BlockType.META, block.getBlockType()); Text t = new Text(); ByteBuff buf = block.getBufferWithoutHeader(); if (Writables.getWritable(buf.array(), buf.arrayOffset(), buf.limit(), t) == null) { - throw new IOException("Failed to deserialize block " + this + - " into a " + t.getClass().getSimpleName()); + throw new IOException( + "Failed to deserialize block " + this + " into a " + t.getClass().getSimpleName()); } - Text expectedText = - (metaCounter == 0 ? new Text("Paris") : metaCounter == 1 ? new Text( - "Moscow") : new Text("Washington, D.C.")); + Text expectedText = (metaCounter == 0 ? new Text("Paris") + : metaCounter == 1 ? new Text("Moscow") : new Text("Washington, D.C.")); assertEquals(expectedText, t); LOG.info("Read meta block data: " + t); ++metaCounter; @@ -320,4 +301,3 @@ private void writeDataAndReadFromHFile(Path hfilePath, reader.close(); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java index ce2ad18883ed..49625d025443 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io.hfile; import java.io.IOException; @@ -63,15 +62,15 @@ * Testing writing a version 3 {@link HFile} for all encoded blocks */ @RunWith(Parameterized.class) -@Category({IOTests.class, MediumTests.class}) +@Category({ IOTests.class, MediumTests.class }) public class TestHFileWriterV3WithDataEncoders { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileWriterV3WithDataEncoders.class); + HBaseClassTestRule.forClass(TestHFileWriterV3WithDataEncoders.class); private static final Logger LOG = - LoggerFactory.getLogger(TestHFileWriterV3WithDataEncoders.class); + LoggerFactory.getLogger(TestHFileWriterV3WithDataEncoders.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Random RNG = new Random(9713312); // Just a fixed seed. @@ -80,8 +79,7 @@ public class TestHFileWriterV3WithDataEncoders { private boolean useTags; private DataBlockEncoding dataBlockEncoding; - public TestHFileWriterV3WithDataEncoders(boolean useTags, - DataBlockEncoding dataBlockEncoding) { + public TestHFileWriterV3WithDataEncoders(boolean useTags, DataBlockEncoding dataBlockEncoding) { this.useTags = useTags; this.dataBlockEncoding = dataBlockEncoding; } @@ -95,8 +93,8 @@ public static Collection parameters() { if (dataBlockEncoding == DataBlockEncoding.NONE) { continue; } - params[i++] = new Object[]{false, dataBlockEncoding}; - params[i++] = new Object[]{true, dataBlockEncoding}; + params[i++] = new Object[] { false, dataBlockEncoding }; + params[i++] = new Object[] { true, dataBlockEncoding }; } return Arrays.asList(params); } @@ -120,33 +118,26 @@ private void testHFileFormatV3Internals(boolean useTags) throws IOException { } @Test - public void testMidKeyInHFile() throws IOException{ + public void testMidKeyInHFile() throws IOException { testMidKeyInHFileInternals(useTags); } private void testMidKeyInHFileInternals(boolean useTags) throws IOException { - Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), - "testMidKeyInHFile"); + Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), "testMidKeyInHFile"); Compression.Algorithm compressAlgo = Compression.Algorithm.NONE; int entryCount = 50000; writeDataAndReadFromHFile(hfilePath, compressAlgo, entryCount, true, useTags); } - private void writeDataAndReadFromHFile(Path hfilePath, - Compression.Algorithm compressAlgo, int entryCount, boolean findMidKey, boolean useTags) - throws IOException { + private void writeDataAndReadFromHFile(Path hfilePath, Compression.Algorithm compressAlgo, + int entryCount, boolean findMidKey, boolean useTags) throws IOException { - HFileContext context = new HFileContextBuilder() - .withBlockSize(4096) - .withIncludesTags(useTags) - .withDataBlockEncoding(dataBlockEncoding) - .withCellComparator(CellComparatorImpl.COMPARATOR) - .withCompression(compressAlgo).build(); + HFileContext context = new HFileContextBuilder().withBlockSize(4096).withIncludesTags(useTags) + .withDataBlockEncoding(dataBlockEncoding).withCellComparator(CellComparatorImpl.COMPARATOR) + .withCompression(compressAlgo).build(); CacheConfig cacheConfig = new CacheConfig(conf); - HFile.Writer writer = new HFile.WriterFactory(conf, cacheConfig) - .withPath(fs, hfilePath) - .withFileContext(context) - .create(); + HFile.Writer writer = new HFile.WriterFactory(conf, cacheConfig).withPath(fs, hfilePath) + .withFileContext(context).create(); List keyValues = new ArrayList<>(entryCount); writeKeyValues(entryCount, useTags, writer, RNG, keyValues); @@ -154,34 +145,27 @@ private void writeDataAndReadFromHFile(Path hfilePath, FSDataInputStream fsdis = fs.open(hfilePath); long fileSize = fs.getFileStatus(hfilePath).getLen(); - FixedFileTrailer trailer = - FixedFileTrailer.readFromStream(fsdis, fileSize); + FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize); Assert.assertEquals(3, trailer.getMajorVersion()); Assert.assertEquals(entryCount, trailer.getEntryCount()); - HFileContext meta = new HFileContextBuilder() - .withCompression(compressAlgo) - .withIncludesMvcc(true) - .withIncludesTags(useTags) - .withDataBlockEncoding(dataBlockEncoding) - .withHBaseCheckSum(true).build(); - ReaderContext readerContext = new ReaderContextBuilder() - .withInputStreamWrapper(new FSDataInputStreamWrapper(fsdis)) - .withFilePath(hfilePath) - .withFileSystem(fs) - .withFileSize(fileSize).build(); + HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo) + .withIncludesMvcc(true).withIncludesTags(useTags).withDataBlockEncoding(dataBlockEncoding) + .withHBaseCheckSum(true).build(); + ReaderContext readerContext = + new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(fsdis)) + .withFilePath(hfilePath).withFileSystem(fs).withFileSize(fileSize).build(); HFileBlock.FSReader blockReader = - new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP, conf); + new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP, conf); // Comparator class name is stored in the trailer in version 3. CellComparator comparator = trailer.createComparator(); HFileBlockIndex.BlockIndexReader dataBlockIndexReader = - new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator, - trailer.getNumDataIndexLevels()); + new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator, + trailer.getNumDataIndexLevels()); HFileBlockIndex.BlockIndexReader metaBlockIndexReader = - new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); + new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); - HFileBlock.BlockIterator blockIter = blockReader.blockRange( - trailer.getLoadOnOpenDataOffset(), + HFileBlock.BlockIterator blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), fileSize - trailer.getTrailerSize()); // Data index. We also read statistics about the block index written after // the root level. @@ -189,12 +173,8 @@ private void writeDataAndReadFromHFile(Path hfilePath, blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); FSDataInputStreamWrapper wrapper = new FSDataInputStreamWrapper(fs, hfilePath); - readerContext = new ReaderContextBuilder() - .withFilePath(hfilePath) - .withFileSize(fileSize) - .withFileSystem(wrapper.getHfs()) - .withInputStreamWrapper(wrapper) - .build(); + readerContext = new ReaderContextBuilder().withFilePath(hfilePath).withFileSize(fileSize) + .withFileSystem(wrapper.getHfs()).withInputStreamWrapper(wrapper).build(); HFileInfo hfile = new HFileInfo(readerContext, conf); HFile.Reader reader = new HFilePreadReader(readerContext, hfile, cacheConfig, conf); hfile.initMetaAndIndex(reader); @@ -205,14 +185,14 @@ private void writeDataAndReadFromHFile(Path hfilePath, // Meta index. metaBlockIndexReader.readRootIndex( - blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX) - .getByteStream(), trailer.getMetaIndexCount()); + blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).getByteStream(), + trailer.getMetaIndexCount()); // File info HFileInfo fileInfo = new HFileInfo(); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); - byte [] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION); - boolean includeMemstoreTS = keyValueFormatVersion != null && - Bytes.toInt(keyValueFormatVersion) > 0; + byte[] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION); + boolean includeMemstoreTS = + keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0; // Counters for the number of key/value pairs and the number of blocks int entriesRead = 0; @@ -220,9 +200,8 @@ private void writeDataAndReadFromHFile(Path hfilePath, long memstoreTS = 0; DataBlockEncoder encoder = dataBlockEncoding.getEncoder(); - long curBlockPos = scanBlocks(entryCount, context, keyValues, fsdis, trailer, - meta, blockReader, entriesRead, blocksRead, encoder); - + long curBlockPos = scanBlocks(entryCount, context, keyValues, fsdis, trailer, meta, blockReader, + entriesRead, blocksRead, encoder); // Meta blocks. We can scan until the load-on-open data offset (which is // the root block index offset in version 2) because we are not testing @@ -233,17 +212,16 @@ private void writeDataAndReadFromHFile(Path hfilePath, LOG.info("Current offset: {}, scanning until {}", fsdis.getPos(), trailer.getLoadOnOpenDataOffset()); HFileBlock block = blockReader.readBlockData(curBlockPos, -1, false, false, true) - .unpack(context, blockReader); + .unpack(context, blockReader); Assert.assertEquals(BlockType.META, block.getBlockType()); Text t = new Text(); ByteBuff buf = block.getBufferWithoutHeader(); if (Writables.getWritable(buf.array(), buf.arrayOffset(), buf.limit(), t) == null) { - throw new IOException("Failed to deserialize block " + this + - " into a " + t.getClass().getSimpleName()); + throw new IOException( + "Failed to deserialize block " + this + " into a " + t.getClass().getSimpleName()); } - Text expectedText = - (metaCounter == 0 ? new Text("Paris") : metaCounter == 1 ? new Text( - "Moscow") : new Text("Washington, D.C.")); + Text expectedText = (metaCounter == 0 ? new Text("Paris") + : metaCounter == 1 ? new Text("Moscow") : new Text("Washington, D.C.")); Assert.assertEquals(expectedText, t); LOG.info("Read meta block data: " + t); ++metaCounter; @@ -256,29 +234,29 @@ private void writeDataAndReadFromHFile(Path hfilePath, private long scanBlocks(int entryCount, HFileContext context, List keyValues, FSDataInputStream fsdis, FixedFileTrailer trailer, HFileContext meta, - HFileBlock.FSReader blockReader, int entriesRead, int blocksRead, - DataBlockEncoder encoder) throws IOException { + HFileBlock.FSReader blockReader, int entriesRead, int blocksRead, DataBlockEncoder encoder) + throws IOException { // Scan blocks the way the reader would scan them fsdis.seek(0); long curBlockPos = 0; while (curBlockPos <= trailer.getLastDataBlockOffset()) { HFileBlockDecodingContext ctx = blockReader.getBlockDecodingContext(); HFileBlock block = blockReader.readBlockData(curBlockPos, -1, false, false, true) - .unpack(context, blockReader); + .unpack(context, blockReader); Assert.assertEquals(BlockType.ENCODED_DATA, block.getBlockType()); ByteBuff origBlock = block.getBufferReadOnly(); int pos = block.headerSize() + DataBlockEncoding.ID_SIZE; origBlock.position(pos); origBlock.limit(pos + block.getUncompressedSizeWithoutHeader() - DataBlockEncoding.ID_SIZE); - ByteBuff buf = origBlock.slice(); + ByteBuff buf = origBlock.slice(); DataBlockEncoder.EncodedSeeker seeker = - encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); + encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta)); seeker.setCurrentBuffer(buf); Cell res = seeker.getCell(); KeyValue kv = keyValues.get(entriesRead); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(res, kv)); ++entriesRead; - while(seeker.next()) { + while (seeker.next()) { res = seeker.getCell(); kv = keyValues.get(entriesRead); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(res, kv)); @@ -292,8 +270,8 @@ private long scanBlocks(int entryCount, HFileContext context, List key return curBlockPos; } - private void writeKeyValues(int entryCount, boolean useTags, HFile.Writer writer, - Random rand, List keyValues) throws IOException { + private void writeKeyValues(int entryCount, boolean useTags, HFile.Writer writer, Random rand, + List keyValues) throws IOException { for (int i = 0; i < entryCount; ++i) { byte[] keyBytes = RandomKeyValueUtil.randomOrderedKey(rand, i); @@ -308,11 +286,10 @@ private void writeKeyValues(int entryCount, boolean useTags, HFile.Writer writer rand.nextBytes(tagBytes); tags.add(new ArrayBackedTag((byte) 1, tagBytes)); } - keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, - valueBytes, tags); + keyValue = + new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes, tags); } else { - keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, - valueBytes); + keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes); } writer.append(keyValue); keyValues.add(keyValue); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java index d3bcba1af876..a0414dfd8546 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,10 +53,10 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; /** - * A kind of integration test at the intersection of {@link HFileBlock}, {@link CacheConfig}, - * and {@link LruBlockCache}. + * A kind of integration test at the intersection of {@link HFileBlock}, {@link CacheConfig}, and + * {@link LruBlockCache}. */ -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) @RunWith(Parameterized.class) public class TestLazyDataBlockDecompression { @@ -75,10 +75,7 @@ public class TestLazyDataBlockDecompression { @Parameterized.Parameters public static Iterable data() { - return Arrays.asList(new Object[][] { - { false }, - { true } - }); + return Arrays.asList(new Object[][] { { false }, { true } }); } @Before @@ -92,15 +89,13 @@ public void tearDown() { } /** - * Write {@code entryCount} random keyvalues to a new HFile at {@code path}. Returns the row - * bytes of the KeyValues written, in the order they were written. + * Write {@code entryCount} random keyvalues to a new HFile at {@code path}. Returns the row bytes + * of the KeyValues written, in the order they were written. */ private static void writeHFile(Configuration conf, CacheConfig cc, FileSystem fs, Path path, HFileContext cxt, int entryCount) throws IOException { - HFile.Writer writer = new HFile.WriterFactory(conf, cc) - .withPath(fs, path) - .withFileContext(cxt) - .create(); + HFile.Writer writer = + new HFile.WriterFactory(conf, cc).withPath(fs, path).withFileContext(cxt).create(); // write a bunch of random kvs final byte[] family = Bytes.toBytes("f"); @@ -121,24 +116,18 @@ private static void cacheBlocks(Configuration conf, CacheConfig cacheConfig, Fil Path path, HFileContext cxt) throws IOException { FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, path); long fileSize = fs.getFileStatus(path).getLen(); - FixedFileTrailer trailer = - FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); - ReaderContext context = new ReaderContextBuilder() - .withFilePath(path) - .withFileSize(fileSize) - .withFileSystem(fsdis.getHfs()) - .withInputStreamWrapper(fsdis) - .build(); + FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); + ReaderContext context = new ReaderContextBuilder().withFilePath(path).withFileSize(fileSize) + .withFileSystem(fsdis.getHfs()).withInputStreamWrapper(fsdis).build(); HFileInfo fileInfo = new HFileInfo(context, conf); HFile.Reader reader = new HFilePreadReader(context, fileInfo, cacheConfig, conf); fileInfo.initMetaAndIndex(reader); - long offset = trailer.getFirstDataBlockOffset(), - max = trailer.getLastDataBlockOffset(); + long offset = trailer.getFirstDataBlockOffset(), max = trailer.getLastDataBlockOffset(); List blocks = new ArrayList<>(4); HFileBlock block; while (offset <= max) { block = reader.readBlock(offset, -1, /* cacheBlock */ true, /* pread */ false, - /* isCompaction */ false, /* updateCacheMetrics */ true, null, null); + /* isCompaction */ false, /* updateCacheMetrics */ true, null, null); offset += block.getOnDiskSizeWithHeader(); blocks.add(block); } @@ -150,11 +139,10 @@ private static void cacheBlocks(Configuration conf, CacheConfig cacheConfig, Fil public void testCompressionIncreasesEffectiveBlockCacheSize() throws Exception { // enough room for 2 uncompressed block int maxSize = (int) (HConstants.DEFAULT_BLOCKSIZE * 2.1); - Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), - "testCompressionIncreasesEffectiveBlockcacheSize"); - HFileContext context = new HFileContextBuilder() - .withCompression(Compression.Algorithm.GZ) - .build(); + Path hfilePath = + new Path(TEST_UTIL.getDataTestDir(), "testCompressionIncreasesEffectiveBlockcacheSize"); + HFileContext context = + new HFileContextBuilder().withCompression(Compression.Algorithm.GZ).build(); LOG.info("context=" + context); // setup cache with lazy-decompression disabled. @@ -172,8 +160,8 @@ public void testCompressionIncreasesEffectiveBlockCacheSize() throws Exception { assertEquals("test inconsistency detected.", maxSize, disabledBlockCache.getMaxSize()); assertTrue("eviction thread spawned unintentionally.", disabledBlockCache.getEvictionThread() == null); - assertEquals("freshly created blockcache contains blocks.", - 0, disabledBlockCache.getBlockCount()); + assertEquals("freshly created blockcache contains blocks.", 0, + disabledBlockCache.getBlockCount()); // 2000 kv's is ~3.6 full unencoded data blocks. // Requires a conf and CacheConfig but should not be specific to this instance's cache settings @@ -185,8 +173,8 @@ public void testCompressionIncreasesEffectiveBlockCacheSize() throws Exception { assertTrue("blockcache should contain blocks. disabledBlockCount=" + disabledBlockCount, disabledBlockCount > 0); long disabledEvictedCount = disabledBlockCache.getStats().getEvictedCount(); - for (Map.Entry e : - disabledBlockCache.getMapForTests().entrySet()) { + for (Map.Entry e : disabledBlockCache.getMapForTests() + .entrySet()) { HFileBlock block = (HFileBlock) e.getValue().getBuffer(); assertTrue("found a packed block, block=" + block, block.isUnpacked()); } @@ -206,8 +194,8 @@ public void testCompressionIncreasesEffectiveBlockCacheSize() throws Exception { assertEquals("test inconsistency detected", maxSize, enabledBlockCache.getMaxSize()); assertTrue("eviction thread spawned unintentionally.", enabledBlockCache.getEvictionThread() == null); - assertEquals("freshly created blockcache contains blocks.", - 0, enabledBlockCache.getBlockCount()); + assertEquals("freshly created blockcache contains blocks.", 0, + enabledBlockCache.getBlockCount()); cacheBlocks(lazyCompressEnabled, cc, fs, hfilePath, context); long enabledBlockCount = enabledBlockCache.getBlockCount(); @@ -215,28 +203,31 @@ public void testCompressionIncreasesEffectiveBlockCacheSize() throws Exception { enabledBlockCount > 0); long enabledEvictedCount = enabledBlockCache.getStats().getEvictedCount(); int candidatesFound = 0; - for (Map.Entry e : - enabledBlockCache.getMapForTests().entrySet()) { + for (Map.Entry e : enabledBlockCache.getMapForTests() + .entrySet()) { candidatesFound++; HFileBlock block = (HFileBlock) e.getValue().getBuffer(); if (cc.shouldCacheCompressed(block.getBlockType().getCategory())) { - assertFalse("found an unpacked block, block=" + block + ", block buffer capacity=" + - block.getBufferWithoutHeader().capacity(), block.isUnpacked()); + assertFalse("found an unpacked block, block=" + block + ", block buffer capacity=" + + block.getBufferWithoutHeader().capacity(), + block.isUnpacked()); } } assertTrue("did not find any candidates for compressed caching. Invalid test.", candidatesFound > 0); - LOG.info("disabledBlockCount=" + disabledBlockCount + ", enabledBlockCount=" + - enabledBlockCount); - assertTrue("enabling compressed data blocks should increase the effective cache size. " + - "disabledBlockCount=" + disabledBlockCount + ", enabledBlockCount=" + - enabledBlockCount, disabledBlockCount < enabledBlockCount); - - LOG.info("disabledEvictedCount=" + disabledEvictedCount + ", enabledEvictedCount=" + - enabledEvictedCount); - assertTrue("enabling compressed data blocks should reduce the number of evictions. " + - "disabledEvictedCount=" + disabledEvictedCount + ", enabledEvictedCount=" + - enabledEvictedCount, enabledEvictedCount < disabledEvictedCount); + LOG.info( + "disabledBlockCount=" + disabledBlockCount + ", enabledBlockCount=" + enabledBlockCount); + assertTrue( + "enabling compressed data blocks should increase the effective cache size. " + + "disabledBlockCount=" + disabledBlockCount + ", enabledBlockCount=" + enabledBlockCount, + disabledBlockCount < enabledBlockCount); + + LOG.info("disabledEvictedCount=" + disabledEvictedCount + ", enabledEvictedCount=" + + enabledEvictedCount); + assertTrue("enabling compressed data blocks should reduce the number of evictions. " + + "disabledEvictedCount=" + disabledEvictedCount + ", enabledEvictedCount=" + + enabledEvictedCount, + enabledEvictedCount < disabledEvictedCount); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruAdaptiveBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruAdaptiveBlockCache.java index f600aaca21fe..5660883345f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruAdaptiveBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruAdaptiveBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,18 +51,17 @@ import org.slf4j.LoggerFactory; /** - * Tests the concurrent LruAdaptiveBlockCache.

      - * - * Tests will ensure it grows and shrinks in size properly, - * evictions run when they're supposed to and do what they should, - * and that cached blocks are accessible when expected to be. + * Tests the concurrent LruAdaptiveBlockCache. + *

      + * Tests will ensure it grows and shrinks in size properly, evictions run when they're supposed to + * and do what they should, and that cached blocks are accessible when expected to be. */ -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestLruAdaptiveBlockCache { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLruAdaptiveBlockCache.class); + HBaseClassTestRule.forClass(TestLruAdaptiveBlockCache.class); private static final Logger LOG = LoggerFactory.getLogger(TestLruAdaptiveBlockCache.class); @@ -88,15 +87,15 @@ public void testCacheEvictionThreadSafe() throws Exception { ExecutorService service = Executors.newFixedThreadPool(threads); for (int i = 0; i != threads; ++i) { service.execute(() -> { - for (int blockIndex = 0; blockIndex < blocksPerThread + for (int blockIndex = 0; blockIndex < blocksPerThread || (!cache.isEvictionInProgress()); ++blockIndex) { - CachedItem block = new CachedItem(hfileName, (int) blockSize, - blockCount.getAndIncrement()); - boolean inMemory = Math.random() > 0.5; - cache.cacheBlock(block.cacheKey, block, inMemory); - } - cache.evictBlocksByHfileName(hfileName); - }); + CachedItem block = + new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement()); + boolean inMemory = Math.random() > 0.5; + cache.cacheBlock(block.cacheKey, block, inMemory); + } + cache.evictBlocksByHfileName(hfileName); + }); } service.shutdown(); // The test may fail here if the evict thread frees the blocks too fast @@ -122,10 +121,9 @@ public void testBackgroundEvictionThread() throws Exception { long maxSize = 100000; int numBlocks = 9; long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); - assertTrue("calculateBlockSize appears broken.", - blockSize * numBlocks <= maxSize); + assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); - LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize,blockSize); + LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize); EvictionThread evictionThread = cache.getEvictionThread(); assertNotNull(evictionThread); @@ -161,8 +159,9 @@ public String explainFailure() throws Exception { // acceptableSize, combined with variance between object overhead on // different environments. int n = 0; - for (long prevCnt = 0 /* < number of blocks added */, curCnt = - cache.getBlockCount(); prevCnt != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) { + for (long prevCnt = 0 /* < number of blocks added */, + curCnt = cache.getBlockCount(); prevCnt != curCnt; prevCnt = curCnt, curCnt = + cache.getBlockCount()) { Thread.sleep(200); assertTrue("Cache never stabilized.", n++ < 100); } @@ -179,14 +178,13 @@ public void testCacheSimple() throws Exception { LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize); - CachedItem [] blocks = generateRandomBlocks(100, blockSize); + CachedItem[] blocks = generateRandomBlocks(100, blockSize); long expectedCacheSize = cache.heapSize(); // Confirm empty for (CachedItem block : blocks) { - assertTrue(cache.getBlock(block.cacheKey, true, false, - true) == null); + assertTrue(cache.getBlock(block.cacheKey, true, false, true) == null); } // Add blocks @@ -200,8 +198,7 @@ public void testCacheSimple() throws Exception { // Check if all blocks are properly cached and retrieved for (CachedItem block : blocks) { - HeapSize buf = cache.getBlock(block.cacheKey, true, false, - true); + HeapSize buf = cache.getBlock(block.cacheKey, true, false, true); assertTrue(buf != null); assertEquals(buf.heapSize(), block.heapSize()); } @@ -211,8 +208,7 @@ public void testCacheSimple() throws Exception { for (CachedItem block : blocks) { cache.cacheBlock(block.cacheKey, block); } - assertEquals( - "Cache should ignore cache requests for blocks already in cache", + assertEquals("Cache should ignore cache requests for blocks already in cache", expectedBlockCount, cache.getBlockCount()); // Verify correctly calculated cache heap size @@ -220,8 +216,7 @@ public void testCacheSimple() throws Exception { // Check if all blocks are properly cached and retrieved for (CachedItem block : blocks) { - HeapSize buf = cache.getBlock(block.cacheKey, true, false, - true); + HeapSize buf = cache.getBlock(block.cacheKey, true, false, true); assertTrue(buf != null); assertEquals(buf.heapSize(), block.heapSize()); } @@ -238,9 +233,9 @@ public void testCacheEvictionSimple() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSizeDefault(maxSize, 10); - LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize,blockSize,false); + LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false); - CachedItem [] blocks = generateFixedBlocks(10, blockSize, "block"); + CachedItem[] blocks = generateFixedBlocks(10, blockSize, "block"); long expectedCacheSize = cache.heapSize(); @@ -254,23 +249,18 @@ public void testCacheEvictionSimple() throws Exception { assertEquals(1, cache.getStats().getEvictionCount()); // Our expected size overruns acceptable limit - assertTrue(expectedCacheSize > - (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + assertTrue(expectedCacheSize > (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); // But the cache did not grow beyond max assertTrue(cache.heapSize() < maxSize); // And is still below the acceptable limit - assertTrue(cache.heapSize() < - (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); - - // All blocks except block 0 should be in the cache - assertTrue(cache.getBlock(blocks[0].cacheKey, true, false, - true) == null); - for(int i=1;i - (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + assertTrue(expectedCacheSize > (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); // But the cache did not grow beyond max assertTrue(cache.heapSize() <= maxSize); // And is now below the acceptable limit - assertTrue(cache.heapSize() <= - (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + assertTrue(cache.heapSize() <= (maxSize * LruAdaptiveBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); // We expect fairness across the two priorities. // This test makes multi go barely over its limit, in-memory - // empty, and the rest in single. Two single evictions and + // empty, and the rest in single. Two single evictions and // one multi eviction expected. - assertTrue(cache.getBlock(singleBlocks[0].cacheKey, true, false, - true) == null); - assertTrue(cache.getBlock(multiBlocks[0].cacheKey, true, false, - true) == null); + assertTrue(cache.getBlock(singleBlocks[0].cacheKey, true, false, true) == null); + assertTrue(cache.getBlock(multiBlocks[0].cacheKey, true, false, true) == null); // And all others to be cached - for(int i=1;i<4;i++) { - assertEquals(cache.getBlock(singleBlocks[i].cacheKey, true, false, - true), - singleBlocks[i]); - assertEquals(cache.getBlock(multiBlocks[i].cacheKey, true, false, - true), - multiBlocks[i]); + for (int i = 1; i < 4; i++) { + assertEquals(cache.getBlock(singleBlocks[i].cacheKey, true, false, true), singleBlocks[i]); + assertEquals(cache.getBlock(multiBlocks[i].cacheKey, true, false, true), multiBlocks[i]); } } @@ -343,20 +324,14 @@ public void testCacheEvictionThreePriorities() throws Exception { long blockSize = calculateBlockSize(maxSize, 10); LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, - LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.98f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - 16 * 1024 * 1024, - 10, - 500, - 0.01f); + (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, + LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.98f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 16 * 1024 * 1024, 10, 500, 0.01f); CachedItem[] singleBlocks = generateFixedBlocks(5, blockSize, "single"); CachedItem[] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -365,7 +340,7 @@ public void testCacheEvictionThreePriorities() throws Exception { long expectedCacheSize = cache.heapSize(); // Add 3 blocks from each priority - for(int i=0;i<3;i++) { + for (int i = 0; i < 3; i++) { // Just add single blocks cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); @@ -396,8 +371,7 @@ public void testCacheEvictionThreePriorities() throws Exception { assertEquals(1, cache.getStats().getEvictedCount()); // Verify oldest single block is the one evicted - assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); // Change the oldest remaining single block to a multi cache.getBlock(singleBlocks[1].cacheKey, true, false, true); @@ -410,8 +384,7 @@ public void testCacheEvictionThreePriorities() throws Exception { assertEquals(2, cache.getStats().getEvictedCount()); // Oldest multi block should be evicted now - assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); // Insert another memory block cache.cacheBlock(memoryBlocks[3].cacheKey, memoryBlocks[3], true); @@ -421,11 +394,10 @@ public void testCacheEvictionThreePriorities() throws Exception { assertEquals(3, cache.getStats().getEvictedCount()); // Oldest memory block should be evicted now - assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true)); // Add a block that is twice as big (should force two evictions) - CachedItem [] bigBlocks = generateFixedBlocks(3, blockSize*3, "big"); + CachedItem[] bigBlocks = generateFixedBlocks(3, blockSize * 3, "big"); cache.cacheBlock(bigBlocks[0].cacheKey, bigBlocks[0]); // Four evictions, six evicted (inserted block 3X size, expect +3 evicted) @@ -433,12 +405,9 @@ public void testCacheEvictionThreePriorities() throws Exception { assertEquals(6, cache.getStats().getEvictedCount()); // Expect three remaining singles to be evicted - assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false, true)); // Make the big block a multi block cache.getBlock(bigBlocks[0].cacheKey, true, false, true); @@ -451,12 +420,9 @@ public void testCacheEvictionThreePriorities() throws Exception { assertEquals(9, cache.getStats().getEvictedCount()); // Expect three remaining multis to be evicted - assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true)); // Cache a big memory block cache.cacheBlock(bigBlocks[2].cacheKey, bigBlocks[2], true); @@ -466,12 +432,9 @@ public void testCacheEvictionThreePriorities() throws Exception { assertEquals(12, cache.getStats().getEvictedCount()); // Expect three remaining in-memory to be evicted - assertEquals(null, cache.getBlock(memoryBlocks[1].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(memoryBlocks[2].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(memoryBlocks[3].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(memoryBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(memoryBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(memoryBlocks[3].cacheKey, true, false, true)); } @Test @@ -480,29 +443,23 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { long blockSize = calculateBlockSize(maxSize, 10); LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, - LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.98f, // min - 0.99f, // acceptable - 0.2f, // single - 0.3f, // multi - 0.5f, // memory - 1.2f, // limit - true, - 16 * 1024 * 1024, - 10, - 500, - 0.01f); - - CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); - CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); - CachedItem [] memoryBlocks = generateFixedBlocks(10, blockSize, "memory"); + (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, + LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.98f, // min + 0.99f, // acceptable + 0.2f, // single + 0.3f, // multi + 0.5f, // memory + 1.2f, // limit + true, 16 * 1024 * 1024, 10, 500, 0.01f); + + CachedItem[] singleBlocks = generateFixedBlocks(10, blockSize, "single"); + CachedItem[] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); + CachedItem[] memoryBlocks = generateFixedBlocks(10, blockSize, "memory"); long expectedCacheSize = cache.heapSize(); // 0. Add 5 single blocks and 4 multi blocks to make cache full, si:mu:me = 5:4:0 - for(int i = 0; i < 4; i++) { + for (int i = 0; i < 4; i++) { // Just add single blocks cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); expectedCacheSize += singleBlocks[i].cacheBlockHeapSize(); @@ -525,8 +482,7 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { assertEquals(1, cache.getStats().getEvictionCount()); assertEquals(1, cache.getStats().getEvictedCount()); // Verify oldest single block (index = 0) is the one evicted - assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); // 2. Insert another memory block, another single evicted, si:mu:me = 3:4:2 cache.cacheBlock(memoryBlocks[1].cacheKey, memoryBlocks[1], true); @@ -534,8 +490,7 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { assertEquals(2, cache.getStats().getEvictionCount()); assertEquals(2, cache.getStats().getEvictedCount()); // Current oldest single block (index = 1) should be evicted now - assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); // 3. Insert 4 memory blocks, 2 single and 2 multi evicted, si:mu:me = 1:2:6 cache.cacheBlock(memoryBlocks[2].cacheKey, memoryBlocks[2], true); @@ -546,14 +501,10 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { assertEquals(6, cache.getStats().getEvictionCount()); assertEquals(6, cache.getStats().getEvictedCount()); // two oldest single blocks and two oldest multi blocks evicted - assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(singleBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[3].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); // 4. Insert 3 memory blocks, the remaining 1 single and 2 multi evicted // si:mu:me = 0:0:9 @@ -564,12 +515,9 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { assertEquals(9, cache.getStats().getEvictionCount()); assertEquals(9, cache.getStats().getEvictedCount()); // one oldest single block and two oldest multi blocks evicted - assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(multiBlocks[3].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(singleBlocks[4].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[2].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[3].cacheKey, true, false, true)); // 5. Insert one memory block, the oldest memory evicted // si:mu:me = 0:0:9 @@ -578,19 +526,17 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { assertEquals(10, cache.getStats().getEvictionCount()); assertEquals(10, cache.getStats().getEvictedCount()); // oldest memory block evicted - assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true)); // 6. Insert one new single block, itself evicted immediately since - // all blocks in cache are memory-type which have higher priority + // all blocks in cache are memory-type which have higher priority // si:mu:me = 0:0:9 (no change) cache.cacheBlock(singleBlocks[9].cacheKey, singleBlocks[9]); // one eviction, one evicted. assertEquals(11, cache.getStats().getEvictionCount()); assertEquals(11, cache.getStats().getEvictedCount()); // the single block just cached now evicted (can't evict memory) - assertEquals(null, cache.getBlock(singleBlocks[9].cacheKey, true, false, - true)); + assertEquals(null, cache.getBlock(singleBlocks[9].cacheKey, true, false, true)); } // test scan resistance @@ -601,20 +547,14 @@ public void testScanResistance() throws Exception { long blockSize = calculateBlockSize(maxSize, 10); LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, - LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.66f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - 16 * 1024 * 1024, - 10, - 500, - 0.01f); + (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, + LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 16 * 1024 * 1024, 10, 500, 0.01f); CachedItem[] singleBlocks = generateFixedBlocks(20, blockSize, "single"); CachedItem[] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -637,20 +577,16 @@ public void testScanResistance() throws Exception { assertEquals(4, cache.getStats().getEvictedCount()); // Should have been taken off equally from single and multi - assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, - true)); - assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, - true)); - - // Let's keep "scanning" by adding single blocks. From here on we only + assertEquals(null, cache.getBlock(singleBlocks[0].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(singleBlocks[1].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); + + // Let's keep "scanning" by adding single blocks. From here on we only // expect evictions from the single bucket. // Every time we reach 10 total blocks (every 4 inserts) we get 4 single - // blocks evicted. Inserting 13 blocks should yield 3 more evictions and + // blocks evicted. Inserting 13 blocks should yield 3 more evictions and // 12 more evicted. for (int i = 5; i < 18; i++) { @@ -672,35 +608,29 @@ public void testMaxBlockSize() throws Exception { long blockSize = calculateBlockSize(maxSize, 10); LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, - LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.66f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - 1024, - 10, - 500, - 0.01f); - - CachedItem[] tooLong = generateFixedBlocks(10, 1024+5, "long"); + (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, + LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 1024, 10, 500, 0.01f); + + CachedItem[] tooLong = generateFixedBlocks(10, 1024 + 5, "long"); CachedItem[] small = generateFixedBlocks(15, 600, "small"); - for (CachedItem i:tooLong) { + for (CachedItem i : tooLong) { cache.cacheBlock(i.cacheKey, i); } - for (CachedItem i:small) { + for (CachedItem i : small) { cache.cacheBlock(i.cacheKey, i); } - assertEquals(15,cache.getBlockCount()); - for (CachedItem i:small) { + assertEquals(15, cache.getBlockCount()); + for (CachedItem i : small) { assertNotNull(cache.getBlock(i.cacheKey, true, false, false)); } - for (CachedItem i:tooLong) { + for (CachedItem i : tooLong) { assertNull(cache.getBlock(i.cacheKey, true, false, false)); } @@ -714,20 +644,14 @@ public void testResizeBlockCache() throws Exception { long blockSize = calculateBlockSize(maxSize, 31); LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, - LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.98f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - 16 * 1024 * 1024, - 10, - 500, - 0.01f); + (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, + LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.98f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 16 * 1024 * 1024, 10, 500, 0.01f); CachedItem[] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem[] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -750,7 +674,7 @@ public void testResizeBlockCache() throws Exception { assertEquals(0, cache.getStats().getEvictionCount()); // Resize to half capacity plus an extra block (otherwise we evict an extra) - cache.setMaxSize((long)(maxSize * 0.5f)); + cache.setMaxSize((long) (maxSize * 0.5f)); // Should have run a single eviction assertEquals(1, cache.getStats().getEvictionCount()); @@ -759,23 +683,17 @@ public void testResizeBlockCache() throws Exception { assertEquals(15, cache.getStats().getEvictedCount()); // And the oldest 5 blocks from each category should be gone - for(int i=0;i<5;i++) { - assertEquals(null, cache.getBlock(singleBlocks[i].cacheKey, true, - false, true)); - assertEquals(null, cache.getBlock(multiBlocks[i].cacheKey, true, - false, true)); - assertEquals(null, cache.getBlock(memoryBlocks[i].cacheKey, true, - false, true)); + for (int i = 0; i < 5; i++) { + assertEquals(null, cache.getBlock(singleBlocks[i].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(multiBlocks[i].cacheKey, true, false, true)); + assertEquals(null, cache.getBlock(memoryBlocks[i].cacheKey, true, false, true)); } // And the newest 5 blocks should still be accessible - for(int i=5;i<10;i++) { - assertEquals(singleBlocks[i], cache.getBlock(singleBlocks[i].cacheKey, true, - false, true)); - assertEquals(multiBlocks[i], cache.getBlock(multiBlocks[i].cacheKey, true, - false, true)); - assertEquals(memoryBlocks[i], cache.getBlock(memoryBlocks[i].cacheKey, true, - false, true)); + for (int i = 5; i < 10; i++) { + assertEquals(singleBlocks[i], cache.getBlock(singleBlocks[i].cacheKey, true, false, true)); + assertEquals(multiBlocks[i], cache.getBlock(multiBlocks[i].cacheKey, true, false, true)); + assertEquals(memoryBlocks[i], cache.getBlock(memoryBlocks[i].cacheKey, true, false, true)); } } @@ -838,12 +756,12 @@ public void testPastNPeriodsMetrics() throws Exception { stats.hit(false, true, BlockType.DATA); stats.rollMetricsPeriod(); assertEquals(0.6, stats.getHitRatioPastNPeriods(), delta); - assertEquals((double)1/3, stats.getHitCachingRatioPastNPeriods(), delta); + assertEquals((double) 1 / 3, stats.getHitCachingRatioPastNPeriods(), delta); // period 6, evict period 3 // should be (2/6)=1/3 and (0/4)=0 stats.rollMetricsPeriod(); - assertEquals((double)1/3, stats.getHitRatioPastNPeriods(), delta); + assertEquals((double) 1 / 3, stats.getHitRatioPastNPeriods(), delta); assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); // period 7, evict period 4 @@ -878,28 +796,20 @@ public void testCacheBlockNextBlockMetadataMissing() { byte[] byteArr = new byte[length]; ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size); HFileContext meta = new HFileContextBuilder().build(); - HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, - -1, ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, 52, - -1, meta, HEAP); - HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, - -1, ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, -1, - -1, meta, HEAP); + HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, + ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, 52, -1, meta, HEAP); + HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, + ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, -1, -1, meta, HEAP); LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, - LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.66f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - 1024, - 10, - 500, - 0.01f); + (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, + LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 1024, 10, 500, 0.01f); BlockCacheKey key = new BlockCacheKey("key1", 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); @@ -908,69 +818,67 @@ public void testCacheBlockNextBlockMetadataMissing() { blockWithNextBlockMetadata.serialize(block1Buffer, true); blockWithoutNextBlockMetadata.serialize(block2Buffer, true); - //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back. + // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back. CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, block1Buffer); - //Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back. + // Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back. CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, block1Buffer); - //Clear and add blockWithoutNextBlockMetadata + // Clear and add blockWithoutNextBlockMetadata cache.clearCache(); assertNull(cache.getBlock(key, false, false, false)); CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, block2Buffer); - //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace. + // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace. CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, block1Buffer); } - private CachedItem [] generateFixedBlocks(int numBlocks, int size, String pfx) { - CachedItem [] blocks = new CachedItem[numBlocks]; - for(int i=0;i { for (int i = 0; i < 10000 && !err1.get(); i++) { @@ -1080,20 +986,15 @@ static void testMultiThreadGetAndEvictBlockInternal(BlockCache cache) throws Exc public void testMultiThreadGetAndEvictBlock() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSize(maxSize, 10); - LruAdaptiveBlockCache cache = - new LruAdaptiveBlockCache(maxSize, blockSize, false, - (int) Math.ceil(1.2 * maxSize / blockSize), - LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.66f, // min + LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, false, + (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, + LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min 0.99f, // acceptable 0.33f, // single 0.33f, // multi 0.34f, // memory 1.2f, // limit - false, 1024, - 10, - 500, - 0.01f); + false, 1024, 10, 500, 0.01f); testMultiThreadGetAndEvictBlockInternal(cache); } @@ -1101,24 +1002,17 @@ public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws E long maxSize = 100000000; int numBlocks = 100000; final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); - assertTrue("calculateBlockSize appears broken.", - blockSize * numBlocks <= maxSize); - - final LruAdaptiveBlockCache cache = - new LruAdaptiveBlockCache(maxSize, blockSize, true, - (int) Math.ceil(1.2 * maxSize / blockSize), - LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.5f, // min + assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); + + final LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, true, + (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, + LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.5f, // min 0.99f, // acceptable 0.33f, // single 0.33f, // multi 0.34f, // memory 1.2f, // limit - false, - maxSize, - heavyEvictionCountLimit, - 200, - 0.01f); + false, maxSize, heavyEvictionCountLimit, 200, 0.01f); EvictionThread evictionThread = cache.getEvictionThread(); assertNotNull(evictionThread); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index 13fe197226d0..e5b5b8a03b15 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,13 +51,12 @@ import org.slf4j.LoggerFactory; /** - * Tests the concurrent LruBlockCache.

      - * - * Tests will ensure it grows and shrinks in size properly, - * evictions run when they're supposed to and do what they should, - * and that cached blocks are accessible when expected to be. + * Tests the concurrent LruBlockCache. + *

      + * Tests will ensure it grows and shrinks in size properly, evictions run when they're supposed to + * and do what they should, and that cached blocks are accessible when expected to be. */ -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestLruBlockCache { @ClassRule @@ -90,8 +89,10 @@ public void testCacheEvictionThreadSafe() throws Exception { service.execute(new Runnable() { @Override public void run() { - for (int blockIndex = 0; blockIndex < blocksPerThread || (!cache.isEvictionInProgress()); ++blockIndex) { - CachedItem block = new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement()); + for (int blockIndex = 0; blockIndex < blocksPerThread + || (!cache.isEvictionInProgress()); ++blockIndex) { + CachedItem block = + new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement()); boolean inMemory = Math.random() > 0.5; cache.cacheBlock(block.cacheKey, block, inMemory); } @@ -117,6 +118,7 @@ public String explainFailure() throws Exception { assertEquals(cache.getOverhead(), cache.getCurrentSize()); } } + @Test public void testBackgroundEvictionThread() throws Exception { long maxSize = 100000; @@ -124,7 +126,7 @@ public void testBackgroundEvictionThread() throws Exception { long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); - LruBlockCache cache = new LruBlockCache(maxSize,blockSize); + LruBlockCache cache = new LruBlockCache(maxSize, blockSize); EvictionThread evictionThread = cache.getEvictionThread(); assertTrue(evictionThread != null); @@ -160,8 +162,9 @@ public String explainFailure() throws Exception { // acceptableSize, combined with variance between object overhead on // different environments. int n = 0; - for (long prevCnt = 0 /* < number of blocks added */, curCnt = - cache.getBlockCount(); prevCnt != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) { + for (long prevCnt = 0 /* < number of blocks added */, + curCnt = cache.getBlockCount(); prevCnt != curCnt; prevCnt = curCnt, curCnt = + cache.getBlockCount()) { Thread.sleep(200); assertTrue("Cache never stabilized.", n++ < 100); } @@ -178,7 +181,7 @@ public void testCacheSimple() throws Exception { LruBlockCache cache = new LruBlockCache(maxSize, blockSize); - CachedItem [] blocks = generateRandomBlocks(100, blockSize); + CachedItem[] blocks = generateRandomBlocks(100, blockSize); long expectedCacheSize = cache.heapSize(); @@ -208,9 +211,8 @@ public void testCacheSimple() throws Exception { for (CachedItem block : blocks) { cache.cacheBlock(block.cacheKey, block); } - assertEquals( - "Cache should ignore cache requests for blocks already in cache", - expectedBlockCount, cache.getBlockCount()); + assertEquals("Cache should ignore cache requests for blocks already in cache", + expectedBlockCount, cache.getBlockCount()); // Verify correctly calculated cache heap size assertEquals(expectedCacheSize, cache.heapSize()); @@ -234,9 +236,9 @@ public void testCacheEvictionSimple() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSizeDefault(maxSize, 10); - LruBlockCache cache = new LruBlockCache(maxSize,blockSize,false); + LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false); - CachedItem [] blocks = generateFixedBlocks(10, blockSize, "block"); + CachedItem[] blocks = generateFixedBlocks(10, blockSize, "block"); long expectedCacheSize = cache.heapSize(); @@ -250,21 +252,18 @@ public void testCacheEvictionSimple() throws Exception { assertEquals(1, cache.getStats().getEvictionCount()); // Our expected size overruns acceptable limit - assertTrue(expectedCacheSize > - (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + assertTrue(expectedCacheSize > (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); // But the cache did not grow beyond max assertTrue(cache.heapSize() < maxSize); // And is still below the acceptable limit - assertTrue(cache.heapSize() < - (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + assertTrue(cache.heapSize() < (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); - // All blocks except block 0 should be in the cache + // All blocks except block 0 should be in the cache assertTrue(cache.getBlock(blocks[0].cacheKey, true, false, true) == null); - for(int i=1;i - (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + assertTrue(expectedCacheSize > (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); // But the cache did not grow beyond max assertTrue(cache.heapSize() <= maxSize); // And is now below the acceptable limit - assertTrue(cache.heapSize() <= - (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); + assertTrue(cache.heapSize() <= (maxSize * LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); // We expect fairness across the two priorities. // This test makes multi go barely over its limit, in-memory - // empty, and the rest in single. Two single evictions and + // empty, and the rest in single. Two single evictions and // one multi eviction expected. assertTrue(cache.getBlock(singleBlocks[0].cacheKey, true, false, true) == null); assertTrue(cache.getBlock(multiBlocks[0].cacheKey, true, false, true) == null); // And all others to be cached - for(int i=1;i<4;i++) { - assertEquals(cache.getBlock(singleBlocks[i].cacheKey, true, false, true), - singleBlocks[i]); - assertEquals(cache.getBlock(multiBlocks[i].cacheKey, true, false, true), - multiBlocks[i]); + for (int i = 1; i < 4; i++) { + assertEquals(cache.getBlock(singleBlocks[i].cacheKey, true, false, true), singleBlocks[i]); + assertEquals(cache.getBlock(multiBlocks[i].cacheKey, true, false, true), multiBlocks[i]); } } @@ -331,18 +326,15 @@ public void testCacheEvictionThreePriorities() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSize(maxSize, 10); - LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, - LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.98f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - 16 * 1024 * 1024); + LruBlockCache cache = + new LruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.98f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 16 * 1024 * 1024); CachedItem[] singleBlocks = generateFixedBlocks(5, blockSize, "single"); CachedItem[] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -351,7 +343,7 @@ public void testCacheEvictionThreePriorities() throws Exception { long expectedCacheSize = cache.heapSize(); // Add 3 blocks from each priority - for(int i=0;i<3;i++) { + for (int i = 0; i < 3; i++) { // Just add single blocks cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); @@ -408,7 +400,7 @@ public void testCacheEvictionThreePriorities() throws Exception { assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true)); // Add a block that is twice as big (should force two evictions) - CachedItem [] bigBlocks = generateFixedBlocks(3, blockSize*3, "big"); + CachedItem[] bigBlocks = generateFixedBlocks(3, blockSize * 3, "big"); cache.cacheBlock(bigBlocks[0].cacheKey, bigBlocks[0]); // Four evictions, six evicted (inserted block 3X size, expect +3 evicted) @@ -453,27 +445,24 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSize(maxSize, 10); - LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, - LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.98f, // min - 0.99f, // acceptable - 0.2f, // single - 0.3f, // multi - 0.5f, // memory - 1.2f, // limit - true, - 16 * 1024 * 1024); - - CachedItem [] singleBlocks = generateFixedBlocks(10, blockSize, "single"); - CachedItem [] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); - CachedItem [] memoryBlocks = generateFixedBlocks(10, blockSize, "memory"); + LruBlockCache cache = + new LruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.98f, // min + 0.99f, // acceptable + 0.2f, // single + 0.3f, // multi + 0.5f, // memory + 1.2f, // limit + true, 16 * 1024 * 1024); + + CachedItem[] singleBlocks = generateFixedBlocks(10, blockSize, "single"); + CachedItem[] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); + CachedItem[] memoryBlocks = generateFixedBlocks(10, blockSize, "memory"); long expectedCacheSize = cache.heapSize(); // 0. Add 5 single blocks and 4 multi blocks to make cache full, si:mu:me = 5:4:0 - for(int i = 0; i < 4; i++) { + for (int i = 0; i < 4; i++) { // Just add single blocks cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); expectedCacheSize += singleBlocks[i].cacheBlockHeapSize(); @@ -543,7 +532,7 @@ public void testCacheEvictionInMemoryForceMode() throws Exception { assertEquals(null, cache.getBlock(memoryBlocks[0].cacheKey, true, false, true)); // 6. Insert one new single block, itself evicted immediately since - // all blocks in cache are memory-type which have higher priority + // all blocks in cache are memory-type which have higher priority // si:mu:me = 0:0:9 (no change) cache.cacheBlock(singleBlocks[9].cacheKey, singleBlocks[9]); // one eviction, one evicted. @@ -560,18 +549,15 @@ public void testScanResistance() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSize(maxSize, 10); - LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, - LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.66f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - 16 * 1024 * 1024); + LruBlockCache cache = + new LruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 16 * 1024 * 1024); CachedItem[] singleBlocks = generateFixedBlocks(20, blockSize, "single"); CachedItem[] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); @@ -599,11 +585,11 @@ public void testScanResistance() throws Exception { assertEquals(null, cache.getBlock(multiBlocks[0].cacheKey, true, false, true)); assertEquals(null, cache.getBlock(multiBlocks[1].cacheKey, true, false, true)); - // Let's keep "scanning" by adding single blocks. From here on we only + // Let's keep "scanning" by adding single blocks. From here on we only // expect evictions from the single bucket. // Every time we reach 10 total blocks (every 4 inserts) we get 4 single - // blocks evicted. Inserting 13 blocks should yield 3 more evictions and + // blocks evicted. Inserting 13 blocks should yield 3 more evictions and // 12 more evicted. for (int i = 5; i < 18; i++) { @@ -624,33 +610,29 @@ public void testMaxBlockSize() throws Exception { long maxSize = 100000; long blockSize = calculateBlockSize(maxSize, 10); - LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, - LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.66f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - 1024); - CachedItem[] tooLong = generateFixedBlocks(10, 1024+5, "long"); + LruBlockCache cache = + new LruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 1024); + CachedItem[] tooLong = generateFixedBlocks(10, 1024 + 5, "long"); CachedItem[] small = generateFixedBlocks(15, 600, "small"); - - for (CachedItem i:tooLong) { + for (CachedItem i : tooLong) { cache.cacheBlock(i.cacheKey, i); } - for (CachedItem i:small) { + for (CachedItem i : small) { cache.cacheBlock(i.cacheKey, i); } - assertEquals(15,cache.getBlockCount()); - for (CachedItem i:small) { + assertEquals(15, cache.getBlockCount()); + for (CachedItem i : small) { assertNotNull(cache.getBlock(i.cacheKey, true, false, false)); } - for (CachedItem i:tooLong) { + for (CachedItem i : tooLong) { assertNull(cache.getBlock(i.cacheKey, true, false, false)); } @@ -663,18 +645,15 @@ public void testResizeBlockCache() throws Exception { long maxSize = 300000; long blockSize = calculateBlockSize(maxSize, 31); - LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, - LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.98f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - 16 * 1024 * 1024); + LruBlockCache cache = + new LruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.98f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 16 * 1024 * 1024); CachedItem[] singleBlocks = generateFixedBlocks(10, blockSize, "single"); CachedItem[] multiBlocks = generateFixedBlocks(10, blockSize, "multi"); @@ -697,7 +676,7 @@ public void testResizeBlockCache() throws Exception { assertEquals(0, cache.getStats().getEvictionCount()); // Resize to half capacity plus an extra block (otherwise we evict an extra) - cache.setMaxSize((long)(maxSize * 0.5f)); + cache.setMaxSize((long) (maxSize * 0.5f)); // Should have run a single eviction assertEquals(1, cache.getStats().getEvictionCount()); @@ -706,14 +685,14 @@ public void testResizeBlockCache() throws Exception { assertEquals(15, cache.getStats().getEvictedCount()); // And the oldest 5 blocks from each category should be gone - for(int i=0;i<5;i++) { + for (int i = 0; i < 5; i++) { assertEquals(null, cache.getBlock(singleBlocks[i].cacheKey, true, false, true)); assertEquals(null, cache.getBlock(multiBlocks[i].cacheKey, true, false, true)); assertEquals(null, cache.getBlock(memoryBlocks[i].cacheKey, true, false, true)); } // And the newest 5 blocks should still be accessible - for(int i=5;i<10;i++) { + for (int i = 5; i < 10; i++) { assertEquals(singleBlocks[i], cache.getBlock(singleBlocks[i].cacheKey, true, false, true)); assertEquals(multiBlocks[i], cache.getBlock(multiBlocks[i].cacheKey, true, false, true)); assertEquals(memoryBlocks[i], cache.getBlock(memoryBlocks[i].cacheKey, true, false, true)); @@ -779,12 +758,12 @@ public void testPastNPeriodsMetrics() throws Exception { stats.hit(false, true, BlockType.DATA); stats.rollMetricsPeriod(); assertEquals(0.6, stats.getHitRatioPastNPeriods(), delta); - assertEquals((double)1/3, stats.getHitCachingRatioPastNPeriods(), delta); + assertEquals((double) 1 / 3, stats.getHitCachingRatioPastNPeriods(), delta); // period 6, evict period 3 // should be (2/6)=1/3 and (0/4)=0 stats.rollMetricsPeriod(); - assertEquals((double)1/3, stats.getHitRatioPastNPeriods(), delta); + assertEquals((double) 1 / 3, stats.getHitRatioPastNPeriods(), delta); assertEquals(0.0, stats.getHitCachingRatioPastNPeriods(), delta); // period 7, evict period 4 @@ -824,18 +803,15 @@ public void testCacheBlockNextBlockMetadataMissing() { HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, -1, -1, meta, HEAP); - LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false, - (int)Math.ceil(1.2*maxSize/blockSize), - LruBlockCache.DEFAULT_LOAD_FACTOR, - LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, - 0.66f, // min - 0.99f, // acceptable - 0.33f, // single - 0.33f, // multi - 0.34f, // memory - 1.2f, // limit - false, - 1024); + LruBlockCache cache = + new LruBlockCache(maxSize, blockSize, false, (int) Math.ceil(1.2 * maxSize / blockSize), + LruBlockCache.DEFAULT_LOAD_FACTOR, LruBlockCache.DEFAULT_CONCURRENCY_LEVEL, 0.66f, // min + 0.99f, // acceptable + 0.33f, // single + 0.33f, // multi + 0.34f, // memory + 1.2f, // limit + false, 1024); BlockCacheKey key = new BlockCacheKey("key1", 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); @@ -844,69 +820,67 @@ public void testCacheBlockNextBlockMetadataMissing() { blockWithNextBlockMetadata.serialize(block1Buffer, true); blockWithoutNextBlockMetadata.serialize(block2Buffer, true); - //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back. + // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back. CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, - block1Buffer); + block1Buffer); - //Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back. + // Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back. CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, - block1Buffer); + block1Buffer); - //Clear and add blockWithoutNextBlockMetadata + // Clear and add blockWithoutNextBlockMetadata cache.clearCache(); assertNull(cache.getBlock(key, false, false, false)); CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, - block2Buffer); + block2Buffer); - //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace. + // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace. CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, - block1Buffer); + block1Buffer); } - private CachedItem [] generateFixedBlocks(int numBlocks, int size, String pfx) { - CachedItem [] blocks = new CachedItem[numBlocks]; - for(int i=0;i keyValues = new ArrayList<>(entryCount); @@ -94,7 +93,7 @@ private void writeDataToHFile(Path hfilePath, int entryCount) throws IOException } private void writeKeyValues(int entryCount, HFile.Writer writer, List keyValues) - throws IOException { + throws IOException { for (int i = 0; i < entryCount; ++i) { byte[] keyBytes = intToBytes(i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java index 4d46cc484164..15c662d252d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -307,11 +308,9 @@ private static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] builder.setReadOnly(isReadOnly).setDurability(Durability.SYNC_WAL); for (byte[] family : families) { builder.setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(Integer.MAX_VALUE) - .build()); + ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(Integer.MAX_VALUE).build()); } - return HBaseTestingUtil - .createRegionAndWAL(regionInfo, testUtil.getDataTestDir(callingMethod), conf, - builder.build(), BlockCacheFactory.createBlockCache(conf)); + return HBaseTestingUtil.createRegionAndWAL(regionInfo, testUtil.getDataTestDir(callingMethod), + conf, builder.build(), BlockCacheFactory.createBlockCache(conf)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java index 7ea1503a5339..005e45cba0f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java @@ -56,7 +56,7 @@ * Test the optimization that does not scan files where all key ranges are excluded. */ @RunWith(Parameterized.class) -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestScannerSelectionUsingKeyRange { @ClassRule @@ -90,7 +90,7 @@ public static Collection parameters() { } public TestScannerSelectionUsingKeyRange(Object type, Object count) { - bloomType = (BloomType)type; + bloomType = (BloomType) type; expectedCount = (Integer) count; } @@ -104,9 +104,9 @@ public void testScannerSelection() throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt("hbase.hstore.compactionThreshold", 10000); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_BYTES) - .setBlockCacheEnabled(true).setBloomFilterType(bloomType).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_BYTES) + .setBlockCacheEnabled(true).setBloomFilterType(bloomType).build()) + .build(); RegionInfo info = RegionInfoBuilder.newBuilder(TABLE).build(); HRegion region = HBaseTestingUtil.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), conf, @@ -117,7 +117,7 @@ public void testScannerSelection() throws IOException { Put put = new Put(Bytes.toBytes("row" + iRow)); for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) { put.addColumn(FAMILY_BYTES, Bytes.toBytes("col" + iCol), - Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol)); + Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol)); } region.put(put); } @@ -133,7 +133,7 @@ public void testScannerSelection() throws IOException { scanner.close(); assertEquals(0, results.size()); if (cache instanceof LruBlockCache) { - Set accessedFiles = ((LruBlockCache)cache).getCachedFileNamesForTest(); + Set accessedFiles = ((LruBlockCache) cache).getCachedFileNamesForTest(); assertEquals(expectedCount, accessedFiles.size()); } HBaseTestingUtil.closeRegionAndWAL(region); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index 0954a4eee77f..a993f6914367 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,19 +53,17 @@ import org.slf4j.LoggerFactory; /** - * Test the optimization that does not scan files where all timestamps are - * expired. + * Test the optimization that does not scan files where all timestamps are expired. */ @RunWith(Parameterized.class) -@Category({IOTests.class, LargeTests.class}) +@Category({ IOTests.class, LargeTests.class }) public class TestScannerSelectionUsingTTL { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestScannerSelectionUsingTTL.class); - private static final Logger LOG = - LoggerFactory.getLogger(TestScannerSelectionUsingTTL.class); + private static final Logger LOG = LoggerFactory.getLogger(TestScannerSelectionUsingTTL.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE = TableName.valueOf("myTable"); @@ -101,15 +99,16 @@ public void testScannerSelection() throws IOException { conf.setBoolean("hbase.store.delete.expired.storefile", false); LruBlockCache cache = (LruBlockCache) BlockCacheFactory.createBlockCache(conf); - TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_BYTES).setMaxVersions(Integer.MAX_VALUE) - .setTimeToLive(TTL_SECONDS).build()).build(); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_BYTES) + .setMaxVersions(Integer.MAX_VALUE).setTimeToLive(TTL_SECONDS).build()) + .build(); RegionInfo info = RegionInfoBuilder.newBuilder(TABLE).build(); - HRegion region = HBaseTestingUtil - .createRegionAndWAL(info, TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, td, cache); + HRegion region = HBaseTestingUtil.createRegionAndWAL(info, + TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, td, cache); long ts = EnvironmentEdgeManager.currentTime(); - long version = 0; //make sure each new set of Put's have a new ts + long version = 0; // make sure each new set of Put's have a new ts for (int iFile = 0; iFile < totalNumFiles; ++iFile) { if (iFile == NUM_EXPIRED_FILES) { Threads.sleepWithoutInterrupt(TTL_MS); @@ -120,7 +119,7 @@ public void testScannerSelection() throws IOException { Put put = new Put(Bytes.toBytes("row" + iRow)); for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) { put.addColumn(FAMILY_BYTES, Bytes.toBytes("col" + iCol), ts + version, - Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol)); + Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol)); } region.put(put); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java index 5c78470e6934..d40f730a350b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({IOTests.class, MediumTests.class}) +@Category({ IOTests.class, MediumTests.class }) public class TestSeekBeforeWithInlineBlocks { @ClassRule @@ -54,8 +54,7 @@ public class TestSeekBeforeWithInlineBlocks { private static final Logger LOG = LoggerFactory.getLogger(TestSeekBeforeWithInlineBlocks.class); - private static final HBaseTestingUtil TEST_UTIL = - new HBaseTestingUtil(); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final int NUM_KV = 10000; @@ -73,9 +72,9 @@ public class TestSeekBeforeWithInlineBlocks { /** * Scanner.seekBefore() could fail because when seeking to a previous HFile data block, it needs * to know the size of that data block, which it calculates using current data block offset and - * the previous data block offset. This fails to work when there are leaf-level index blocks in - * the scannable section of the HFile, i.e. starting in HFileV2. This test will try seekBefore() - * on a flat (single-level) and multi-level (2,3) HFile and confirm this bug is now fixed. This + * the previous data block offset. This fails to work when there are leaf-level index blocks in + * the scannable section of the HFile, i.e. starting in HFileV2. This test will try seekBefore() + * on a flat (single-level) and multi-level (2,3) HFile and confirm this bug is now fixed. This * bug also happens for inline Bloom blocks for the same reasons. */ @Test @@ -84,8 +83,8 @@ public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException { TEST_UTIL.getConfiguration().setInt(BloomFilterUtil.PREFIX_LENGTH_KEY, 10); // Try out different HFile versions to ensure reverse scan works on each version - for (int hfileVersion = HFile.MIN_FORMAT_VERSION_WITH_TAGS; - hfileVersion <= HFile.MAX_FORMAT_VERSION; hfileVersion++) { + for (int hfileVersion = + HFile.MIN_FORMAT_VERSION_WITH_TAGS; hfileVersion <= HFile.MAX_FORMAT_VERSION; hfileVersion++) { conf.setInt(HFile.FORMAT_VERSION_KEY, hfileVersion); fs = HFileSystem.get(conf); @@ -107,9 +106,8 @@ public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException { Cell[] cells = new Cell[NUM_KV]; - Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), - String.format("testMultiIndexLevelRandomHFileWithBlooms-%s-%s-%s", - hfileVersion, bloomType, testI)); + Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), String.format( + "testMultiIndexLevelRandomHFileWithBlooms-%s-%s-%s", hfileVersion, bloomType, testI)); // Disable caching to prevent it from hiding any bugs in block seeks/reads conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); @@ -117,16 +115,10 @@ public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException { // Write the HFile { - HFileContext meta = new HFileContextBuilder() - .withBlockSize(DATA_BLOCK_SIZE) - .build(); + HFileContext meta = new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build(); - StoreFileWriter storeFileWriter = - new StoreFileWriter.Builder(conf, cacheConf, fs) - .withFilePath(hfilePath) - .withFileContext(meta) - .withBloomType(bloomType) - .build(); + StoreFileWriter storeFileWriter = new StoreFileWriter.Builder(conf, cacheConf, fs) + .withFilePath(hfilePath).withFileContext(meta).withBloomType(bloomType).build(); for (int i = 0; i < NUM_KV; i++) { byte[] row = RandomKeyValueUtil.randomOrderedKey(RAND, i); @@ -154,12 +146,12 @@ public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException { checkNoSeekBefore(cells, scanner, 0); for (int i = 1; i < NUM_KV; i++) { checkSeekBefore(cells, scanner, i); - checkCell(cells[i-1], scanner.getCell()); + checkCell(cells[i - 1], scanner.getCell()); } assertTrue(scanner.seekTo()); for (int i = NUM_KV - 1; i >= 1; i--) { checkSeekBefore(cells, scanner, i); - checkCell(cells[i-1], scanner.getCell()); + checkCell(cells[i - 1], scanner.getCell()); } checkNoSeekBefore(cells, scanner, 0); scanner.close(); @@ -171,25 +163,21 @@ public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException { } } - private void checkSeekBefore(Cell[] cells, HFileScanner scanner, int i) - throws IOException { - assertEquals("Failed to seek to the key before #" + i + " (" - + CellUtil.getCellKeyAsString(cells[i]) + ")", true, - scanner.seekBefore(cells[i])); + private void checkSeekBefore(Cell[] cells, HFileScanner scanner, int i) throws IOException { + assertEquals( + "Failed to seek to the key before #" + i + " (" + CellUtil.getCellKeyAsString(cells[i]) + ")", + true, scanner.seekBefore(cells[i])); } - private void checkNoSeekBefore(Cell[] cells, HFileScanner scanner, int i) - throws IOException { + private void checkNoSeekBefore(Cell[] cells, HFileScanner scanner, int i) throws IOException { assertEquals("Incorrectly succeeded in seeking to before first key (" - + CellUtil.getCellKeyAsString(cells[i]) + ")", false, - scanner.seekBefore(cells[i])); + + CellUtil.getCellKeyAsString(cells[i]) + ")", + false, scanner.seekBefore(cells[i])); } /** Check a key/value pair after it was read by the reader */ private void checkCell(Cell expected, Cell actual) { - assertTrue(String.format("Expected key %s, but was %s", - CellUtil.getCellKeyAsString(expected), CellUtil.getCellKeyAsString(actual)), - CellUtil.equals(expected, actual)); + assertTrue(String.format("Expected key %s, but was %s", CellUtil.getCellKeyAsString(expected), + CellUtil.getCellKeyAsString(actual)), CellUtil.equals(expected, actual)); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java index ffe28d78bf60..099a63eca747 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,16 +54,16 @@ /** * Test {@link HFileScanner#seekTo(Cell)} and its variants. */ -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) @RunWith(Parameterized.class) public class TestSeekTo { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSeekTo.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSeekTo.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private final DataBlockEncoding encoding; + @Parameters public static Collection parameters() { List paramList = new ArrayList<>(); @@ -72,6 +72,7 @@ public static Collection parameters() { } return paramList; } + static boolean switchKVs = false; public TestSeekTo(DataBlockEncoding encoding) { @@ -80,7 +81,7 @@ public TestSeekTo(DataBlockEncoding encoding) { @Before public void setUp() { - //reset + // reset switchKVs = false; } @@ -97,18 +98,19 @@ static KeyValue toKV(String row, TagUsage tagUsage) { } else { if (!switchKVs) { switchKVs = true; - return new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), - Bytes.toBytes("qualifier"), HConstants.LATEST_TIMESTAMP, Bytes.toBytes("value")); + return new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), + HConstants.LATEST_TIMESTAMP, Bytes.toBytes("value")); } else { switchKVs = false; Tag t = new ArrayBackedTag((byte) 1, "myTag1"); Tag[] tags = new Tag[1]; tags[0] = t; - return new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), - Bytes.toBytes("qualifier"), HConstants.LATEST_TIMESTAMP, Bytes.toBytes("value"), tags); + return new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("qualifier"), + HConstants.LATEST_TIMESTAMP, Bytes.toBytes("value"), tags); } } } + static String toRowStr(Cell c) { return Bytes.toString(c.getRowArray(), c.getRowOffset(), c.getRowLength()); } @@ -118,8 +120,7 @@ Path makeNewFile(TagUsage tagUsage) throws IOException { FSDataOutputStream fout = TEST_UTIL.getTestFileSystem().create(ncTFile); int blocksize = toKV("a", tagUsage).getLength() * 3; HFileContext context = new HFileContextBuilder().withBlockSize(blocksize) - .withDataBlockEncoding(encoding) - .withIncludesTags(true).build(); + .withDataBlockEncoding(encoding).withIncludesTags(true).build(); Configuration conf = TEST_UTIL.getConfiguration(); HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout) .withFileContext(context).create(); @@ -189,7 +190,7 @@ protected void testSeekBeforeInternals(TagUsage tagUsage) throws IOException { protected void deleteTestDir(FileSystem fs) throws IOException { Path dataTestDir = TEST_UTIL.getDataTestDir(); - if(fs.exists(dataTestDir)) { + if (fs.exists(dataTestDir)) { fs.delete(dataTestDir, true); } } @@ -331,29 +332,19 @@ protected void testBlockContainingKeyInternals(TagUsage tagUsage) throws IOExcep FileSystem fs = TEST_UTIL.getTestFileSystem(); Configuration conf = TEST_UTIL.getConfiguration(); HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf); - HFileBlockIndex.BlockIndexReader blockIndexReader = - reader.getDataBlockIndexReader(); + HFileBlockIndex.BlockIndexReader blockIndexReader = reader.getDataBlockIndexReader(); System.out.println(blockIndexReader.toString()); // falls before the start of the file. - assertEquals(-1, blockIndexReader.rootBlockContainingKey( - toKV("a", tagUsage))); - assertEquals(0, blockIndexReader.rootBlockContainingKey( - toKV("c", tagUsage))); - assertEquals(0, blockIndexReader.rootBlockContainingKey( - toKV("d", tagUsage))); - assertEquals(0, blockIndexReader.rootBlockContainingKey( - toKV("e", tagUsage))); - assertEquals(0, blockIndexReader.rootBlockContainingKey( - toKV("g", tagUsage))); + assertEquals(-1, blockIndexReader.rootBlockContainingKey(toKV("a", tagUsage))); + assertEquals(0, blockIndexReader.rootBlockContainingKey(toKV("c", tagUsage))); + assertEquals(0, blockIndexReader.rootBlockContainingKey(toKV("d", tagUsage))); + assertEquals(0, blockIndexReader.rootBlockContainingKey(toKV("e", tagUsage))); + assertEquals(0, blockIndexReader.rootBlockContainingKey(toKV("g", tagUsage))); assertEquals(1, blockIndexReader.rootBlockContainingKey(toKV("h", tagUsage))); - assertEquals(1, blockIndexReader.rootBlockContainingKey( - toKV("i", tagUsage))); - assertEquals(1, blockIndexReader.rootBlockContainingKey( - toKV("j", tagUsage))); - assertEquals(1, blockIndexReader.rootBlockContainingKey( - toKV("k", tagUsage))); - assertEquals(1, blockIndexReader.rootBlockContainingKey( - toKV("l", tagUsage))); + assertEquals(1, blockIndexReader.rootBlockContainingKey(toKV("i", tagUsage))); + assertEquals(1, blockIndexReader.rootBlockContainingKey(toKV("j", tagUsage))); + assertEquals(1, blockIndexReader.rootBlockContainingKey(toKV("k", tagUsage))); + assertEquals(1, blockIndexReader.rootBlockContainingKey(toKV("l", tagUsage))); reader.close(); deleteTestDir(fs); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestTinyLfuBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestTinyLfuBlockCache.java index 83c4129e336e..544e7edbaa3e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestTinyLfuBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestTinyLfuBlockCache.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +25,6 @@ import java.nio.ByteBuffer; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -39,7 +37,7 @@ /** * Tests the concurrent TinyLfuBlockCache. */ -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestTinyLfuBlockCache { @ClassRule @@ -54,7 +52,7 @@ public void testCacheSimple() throws Exception { TinyLfuBlockCache cache = new TinyLfuBlockCache(maxSize, blockSize, blockSize, Runnable::run); - CachedItem [] blocks = generateRandomBlocks(100, blockSize); + CachedItem[] blocks = generateRandomBlocks(100, blockSize); long expectedCacheSize = cache.heapSize(); @@ -84,9 +82,8 @@ public void testCacheSimple() throws Exception { for (CachedItem block : blocks) { cache.cacheBlock(block.cacheKey, block); } - assertEquals( - "Cache should ignore cache requests for blocks already in cache", - expectedBlockCount, cache.getBlockCount()); + assertEquals("Cache should ignore cache requests for blocks already in cache", + expectedBlockCount, cache.getBlockCount()); // Verify correctly calculated cache heap size assertEquals(expectedCacheSize, cache.heapSize()); @@ -110,7 +107,7 @@ public void testCacheEvictionSimple() throws Exception { TinyLfuBlockCache cache = new TinyLfuBlockCache(maxSize, blockSize, blockSize, Runnable::run); - CachedItem [] blocks = generateFixedBlocks(11, blockSize, "block"); + CachedItem[] blocks = generateFixedBlocks(11, blockSize, "block"); // Add all the blocks for (CachedItem block : blocks) { @@ -135,11 +132,11 @@ public void testScanResistance() throws Exception { TinyLfuBlockCache cache = new TinyLfuBlockCache(maxSize, blockSize, blockSize, Runnable::run); - CachedItem [] singleBlocks = generateFixedBlocks(20, blockSize, "single"); - CachedItem [] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); + CachedItem[] singleBlocks = generateFixedBlocks(20, blockSize, "single"); + CachedItem[] multiBlocks = generateFixedBlocks(5, blockSize, "multi"); // Add 5 blocks from each - for(int i=0; i<5; i++) { + for (int i = 0; i < 5; i++) { cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); cache.cacheBlock(multiBlocks[i].cacheKey, multiBlocks[i]); } @@ -152,10 +149,10 @@ public void testScanResistance() throws Exception { } } - // Let's keep "scanning" by adding single blocks. From here on we only + // Let's keep "scanning" by adding single blocks. From here on we only // expect evictions from the single bucket. - for(int i=5;i<18;i++) { + for (int i = 5; i < 18; i++) { cache.cacheBlock(singleBlocks[i].cacheKey, singleBlocks[i]); } @@ -174,20 +171,20 @@ public void testMaxBlockSize() throws Exception { long blockSize = calculateBlockSize(maxSize, 10); TinyLfuBlockCache cache = new TinyLfuBlockCache(maxSize, blockSize, blockSize, Runnable::run); - CachedItem [] tooLong = generateFixedBlocks(10, 2 * blockSize, "long"); - CachedItem [] small = generateFixedBlocks(15, blockSize / 2, "small"); + CachedItem[] tooLong = generateFixedBlocks(10, 2 * blockSize, "long"); + CachedItem[] small = generateFixedBlocks(15, blockSize / 2, "small"); - for (CachedItem i:tooLong) { + for (CachedItem i : tooLong) { cache.cacheBlock(i.cacheKey, i); } - for (CachedItem i:small) { + for (CachedItem i : small) { cache.cacheBlock(i.cacheKey, i); } - assertEquals(15,cache.getBlockCount()); - for (CachedItem i:small) { + assertEquals(15, cache.getBlockCount()); + for (CachedItem i : small) { assertNotNull(cache.getBlock(i.cacheKey, true, false, false)); } - for (CachedItem i:tooLong) { + for (CachedItem i : tooLong) { assertNull(cache.getBlock(i.cacheKey, true, false, false)); } @@ -202,9 +199,9 @@ public void testResizeBlockCache() throws Exception { TinyLfuBlockCache cache = new TinyLfuBlockCache(maxSize, blockSize, blockSize, Runnable::run); - CachedItem [] blocks = generateFixedBlocks(10, blockSize, "block"); + CachedItem[] blocks = generateFixedBlocks(10, blockSize, "block"); - for(CachedItem block : blocks) { + for (CachedItem block : blocks) { cache.cacheBlock(block.cacheKey, block); } @@ -220,50 +217,48 @@ public void testResizeBlockCache() throws Exception { assertEquals(5, cache.getStats().getEvictedCount()); } - private CachedItem [] generateFixedBlocks(int numBlocks, int size, String pfx) { - CachedItem [] blocks = new CachedItem[numBlocks]; - for(int i=0;i data() { - return Arrays.asList(new Object[][] { - { 8192, null }, // TODO: why is 8k the default blocksize for these tests? - { - 16 * 1024, + return Arrays.asList(new Object[][] { { 8192, null }, // TODO: why is 8k the default blocksize + // for these tests? + { 16 * 1024, new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, 28 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024 } } }); @@ -135,7 +134,7 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { @Before public void setup() throws IOException { cache = new MockedBucketCache(ioEngineName, capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, null); + constructedBlockSizes, writeThreads, writerQLen, null); } @After @@ -145,7 +144,6 @@ public void tearDown() { /** * Test Utility to create test dir and return name - * * @return return name of created dir * @throws IOException throws IOException */ @@ -155,7 +153,6 @@ private Path createAndGetTestDir() throws IOException { return testDir; } - /** * Return a random element from {@code a}. */ @@ -288,10 +285,10 @@ public void testRetrieveFromFile() throws Exception { Path testDir = createAndGetTestDir(); String ioEngineName = "file:" + testDir + "/bucket.cache"; testRetrievalUtils(testDir, ioEngineName); - int[] smallBucketSizes = new int[]{3 * 1024, 5 * 1024}; + int[] smallBucketSizes = new int[] { 3 * 1024, 5 * 1024 }; String persistencePath = testDir + "/bucket.persistence"; BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, - smallBucketSizes, writeThreads, writerQLen, persistencePath); + smallBucketSizes, writeThreads, writerQLen, persistencePath); assertFalse(new File(persistencePath).exists()); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); @@ -310,10 +307,10 @@ public void testRetrieveFromPMem() throws Exception { final Path testDir = createAndGetTestDir(); final String ioEngineName = "pmem:" + testDir + "/bucket.cache"; testRetrievalUtils(testDir, ioEngineName); - int[] smallBucketSizes = new int[]{3 * 1024, 5 * 1024}; + int[] smallBucketSizes = new int[] { 3 * 1024, 5 * 1024 }; String persistencePath = testDir + "/bucket.persistence"; BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, - smallBucketSizes, writeThreads, writerQLen, persistencePath); + smallBucketSizes, writeThreads, writerQLen, persistencePath); assertFalse(new File(persistencePath).exists()); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); @@ -321,10 +318,10 @@ public void testRetrieveFromPMem() throws Exception { } private void testRetrievalUtils(Path testDir, String ioEngineName) - throws IOException, InterruptedException { + throws IOException, InterruptedException { final String persistencePath = testDir + "/bucket.persistence"; BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, persistencePath); + constructedBlockSizes, writeThreads, writerQLen, persistencePath); try { long usedSize = bucketCache.getAllocator().getUsedSize(); assertEquals(0, usedSize); @@ -340,7 +337,7 @@ private void testRetrievalUtils(Path testDir, String ioEngineName) bucketCache.shutdown(); assertTrue(new File(persistencePath).exists()); bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, persistencePath); + constructedBlockSizes, writeThreads, writerQLen, persistencePath); assertFalse(new File(persistencePath).exists()); assertEquals(usedSize, bucketCache.getAllocator().getUsedSize()); } finally { @@ -357,8 +354,9 @@ public void testRetrieveUnsupportedIOE() throws Exception { testRetrievalUtils(testDir, ioEngineName); Assert.fail("Should have thrown IllegalArgumentException because of unsupported IOEngine!!"); } catch (IllegalArgumentException e) { - Assert.assertEquals("Don't understand io engine name for cache- prefix with file:, " + - "files:, mmap: or offheap", e.getMessage()); + Assert.assertEquals("Don't understand io engine name for cache- prefix with file:, " + + "files:, mmap: or offheap", + e.getMessage()); } } @@ -368,13 +366,13 @@ public void testRetrieveFromMultipleFiles() throws Exception { final Path newTestDir = new HBaseTestingUtil().getDataTestDir(); HBASE_TESTING_UTILITY.getTestFileSystem().mkdirs(newTestDir); String ioEngineName = new StringBuilder("files:").append(testDirInitial) - .append("/bucket1.cache").append(FileIOEngine.FILE_DELIMITER).append(newTestDir) - .append("/bucket2.cache").toString(); + .append("/bucket1.cache").append(FileIOEngine.FILE_DELIMITER).append(newTestDir) + .append("/bucket2.cache").toString(); testRetrievalUtils(testDirInitial, ioEngineName); - int[] smallBucketSizes = new int[]{3 * 1024, 5 * 1024}; + int[] smallBucketSizes = new int[] { 3 * 1024, 5 * 1024 }; String persistencePath = testDirInitial + "/bucket.persistence"; BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, - smallBucketSizes, writeThreads, writerQLen, persistencePath); + smallBucketSizes, writeThreads, writerQLen, persistencePath); assertFalse(new File(persistencePath).exists()); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); @@ -384,7 +382,7 @@ public void testRetrieveFromMultipleFiles() throws Exception { @Test public void testRetrieveFromFileWithoutPersistence() throws Exception { BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, null); + constructedBlockSizes, writeThreads, writerQLen, null); try { final Path testDir = createAndGetTestDir(); String ioEngineName = "file:" + testDir + "/bucket.cache"; @@ -401,7 +399,7 @@ public void testRetrieveFromFileWithoutPersistence() throws Exception { assertNotEquals(0, usedSize); bucketCache.shutdown(); bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, null); + constructedBlockSizes, writeThreads, writerQLen, null); assertEquals(0, bucketCache.getAllocator().getUsedSize()); } finally { bucketCache.shutdown(); @@ -412,15 +410,16 @@ public void testRetrieveFromFileWithoutPersistence() throws Exception { @Test public void testBucketAllocatorLargeBuckets() throws BucketAllocatorException { long availableSpace = 20 * 1024L * 1024 * 1024; - int[] bucketSizes = new int[]{1024, 1024 * 1024, 1024 * 1024 * 1024}; + int[] bucketSizes = new int[] { 1024, 1024 * 1024, 1024 * 1024 * 1024 }; BucketAllocator allocator = new BucketAllocator(availableSpace, bucketSizes); assertTrue(allocator.getBuckets().length > 0); } @Test public void testGetPartitionSize() throws IOException { - //Test default values - validateGetPartitionSize(cache, BucketCache.DEFAULT_SINGLE_FACTOR, BucketCache.DEFAULT_MIN_FACTOR); + // Test default values + validateGetPartitionSize(cache, BucketCache.DEFAULT_SINGLE_FACTOR, + BucketCache.DEFAULT_MIN_FACTOR); Configuration conf = HBaseConfiguration.create(); conf.setFloat(BucketCache.MIN_FACTOR_CONFIG_NAME, 0.5f); @@ -440,7 +439,7 @@ public void testGetPartitionSize() throws IOException { public void testCacheSizeCapacity() throws IOException { // Test cache capacity (capacity / blockSize) < Integer.MAX_VALUE validateGetPartitionSize(cache, BucketCache.DEFAULT_SINGLE_FACTOR, - BucketCache.DEFAULT_MIN_FACTOR); + BucketCache.DEFAULT_MIN_FACTOR); Configuration conf = HBaseConfiguration.create(); conf.setFloat(BucketCache.MIN_FACTOR_CONFIG_NAME, 0.5f); conf.setFloat(BucketCache.SINGLE_FACTOR_CONFIG_NAME, 0.1f); @@ -448,7 +447,7 @@ public void testCacheSizeCapacity() throws IOException { conf.setFloat(BucketCache.MEMORY_FACTOR_CONFIG_NAME, 0.2f); try { new BucketCache(ioEngineName, Long.MAX_VALUE, 1, constructedBlockSizes, writeThreads, - writerQLen, null, 100, conf); + writerQLen, null, 100, conf); Assert.fail("Should have thrown IllegalArgumentException because of large cache capacity!"); } catch (IllegalArgumentException e) { Assert.assertEquals("Cache capacity is too large, only support 32TB now", e.getMessage()); @@ -469,64 +468,68 @@ public void testValidBucketCacheConfigs() throws IOException { constructedBlockSizes, writeThreads, writerQLen, null, 100, conf); assertEquals(BucketCache.ACCEPT_FACTOR_CONFIG_NAME + " failed to propagate.", 0.9f, - cache.getAcceptableFactor(), 0); + cache.getAcceptableFactor(), 0); assertEquals(BucketCache.MIN_FACTOR_CONFIG_NAME + " failed to propagate.", 0.5f, - cache.getMinFactor(), 0); + cache.getMinFactor(), 0); assertEquals(BucketCache.EXTRA_FREE_FACTOR_CONFIG_NAME + " failed to propagate.", 0.5f, - cache.getExtraFreeFactor(), 0); + cache.getExtraFreeFactor(), 0); assertEquals(BucketCache.SINGLE_FACTOR_CONFIG_NAME + " failed to propagate.", 0.1f, - cache.getSingleFactor(), 0); + cache.getSingleFactor(), 0); assertEquals(BucketCache.MULTI_FACTOR_CONFIG_NAME + " failed to propagate.", 0.7f, - cache.getMultiFactor(), 0); + cache.getMultiFactor(), 0); assertEquals(BucketCache.MEMORY_FACTOR_CONFIG_NAME + " failed to propagate.", 0.2f, - cache.getMemoryFactor(), 0); + cache.getMemoryFactor(), 0); } @Test public void testInvalidAcceptFactorConfig() throws IOException { - float[] configValues = {-1f, 0.2f, 0.86f, 1.05f}; - boolean[] expectedOutcomes = {false, false, true, false}; - Map configMappings = ImmutableMap.of(BucketCache.ACCEPT_FACTOR_CONFIG_NAME, configValues); + float[] configValues = { -1f, 0.2f, 0.86f, 1.05f }; + boolean[] expectedOutcomes = { false, false, true, false }; + Map configMappings = + ImmutableMap.of(BucketCache.ACCEPT_FACTOR_CONFIG_NAME, configValues); Configuration conf = HBaseConfiguration.create(); checkConfigValues(conf, configMappings, expectedOutcomes); } @Test public void testInvalidMinFactorConfig() throws IOException { - float[] configValues = {-1f, 0f, 0.96f, 1.05f}; - //throws due to <0, in expected range, minFactor > acceptableFactor, > 1.0 - boolean[] expectedOutcomes = {false, true, false, false}; - Map configMappings = ImmutableMap - .of(BucketCache.MIN_FACTOR_CONFIG_NAME, configValues); + float[] configValues = { -1f, 0f, 0.96f, 1.05f }; + // throws due to <0, in expected range, minFactor > acceptableFactor, > 1.0 + boolean[] expectedOutcomes = { false, true, false, false }; + Map configMappings = + ImmutableMap.of(BucketCache.MIN_FACTOR_CONFIG_NAME, configValues); Configuration conf = HBaseConfiguration.create(); checkConfigValues(conf, configMappings, expectedOutcomes); } @Test public void testInvalidExtraFreeFactorConfig() throws IOException { - float[] configValues = {-1f, 0f, 0.2f, 1.05f}; - //throws due to <0, in expected range, in expected range, config can be > 1.0 - boolean[] expectedOutcomes = {false, true, true, true}; - Map configMappings = ImmutableMap.of(BucketCache.EXTRA_FREE_FACTOR_CONFIG_NAME, configValues); + float[] configValues = { -1f, 0f, 0.2f, 1.05f }; + // throws due to <0, in expected range, in expected range, config can be > 1.0 + boolean[] expectedOutcomes = { false, true, true, true }; + Map configMappings = + ImmutableMap.of(BucketCache.EXTRA_FREE_FACTOR_CONFIG_NAME, configValues); Configuration conf = HBaseConfiguration.create(); checkConfigValues(conf, configMappings, expectedOutcomes); } @Test public void testInvalidCacheSplitFactorConfig() throws IOException { - float[] singleFactorConfigValues = {0.2f, 0f, -0.2f, 1f}; - float[] multiFactorConfigValues = {0.4f, 0f, 1f, .05f}; - float[] memoryFactorConfigValues = {0.4f, 0f, 0.2f, .5f}; - // All configs add up to 1.0 and are between 0 and 1.0, configs don't add to 1.0, configs can't be negative, configs don't add to 1.0 - boolean[] expectedOutcomes = {true, false, false, false}; + float[] singleFactorConfigValues = { 0.2f, 0f, -0.2f, 1f }; + float[] multiFactorConfigValues = { 0.4f, 0f, 1f, .05f }; + float[] memoryFactorConfigValues = { 0.4f, 0f, 0.2f, .5f }; + // All configs add up to 1.0 and are between 0 and 1.0, configs don't add to 1.0, configs can't + // be negative, configs don't add to 1.0 + boolean[] expectedOutcomes = { true, false, false, false }; Map configMappings = ImmutableMap.of(BucketCache.SINGLE_FACTOR_CONFIG_NAME, - singleFactorConfigValues, BucketCache.MULTI_FACTOR_CONFIG_NAME, multiFactorConfigValues, - BucketCache.MEMORY_FACTOR_CONFIG_NAME, memoryFactorConfigValues); + singleFactorConfigValues, BucketCache.MULTI_FACTOR_CONFIG_NAME, multiFactorConfigValues, + BucketCache.MEMORY_FACTOR_CONFIG_NAME, memoryFactorConfigValues); Configuration conf = HBaseConfiguration.create(); checkConfigValues(conf, configMappings, expectedOutcomes); } - private void checkConfigValues(Configuration conf, Map configMap, boolean[] expectSuccess) throws IOException { + private void checkConfigValues(Configuration conf, Map configMap, + boolean[] expectSuccess) throws IOException { Set configNames = configMap.keySet(); for (int i = 0; i < expectSuccess.length; i++) { try { @@ -535,15 +538,21 @@ private void checkConfigValues(Configuration conf, Map configMa } BucketCache cache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, null, 100, conf); - assertTrue("Created BucketCache and expected it to succeed: " + expectSuccess[i] + ", but it actually was: " + !expectSuccess[i], expectSuccess[i]); + assertTrue("Created BucketCache and expected it to succeed: " + expectSuccess[i] + + ", but it actually was: " + !expectSuccess[i], + expectSuccess[i]); } catch (IllegalArgumentException e) { - assertFalse("Created BucketCache and expected it to succeed: " + expectSuccess[i] + ", but it actually was: " + !expectSuccess[i], expectSuccess[i]); + assertFalse("Created BucketCache and expected it to succeed: " + expectSuccess[i] + + ", but it actually was: " + !expectSuccess[i], + expectSuccess[i]); } } } - private void validateGetPartitionSize(BucketCache bucketCache, float partitionFactor, float minFactor) { - long expectedOutput = (long) Math.floor(bucketCache.getAllocator().getTotalSize() * partitionFactor * minFactor); + private void validateGetPartitionSize(BucketCache bucketCache, float partitionFactor, + float minFactor) { + long expectedOutput = + (long) Math.floor(bucketCache.getAllocator().getTotalSize() * partitionFactor * minFactor); assertEquals(expectedOutput, bucketCache.getPartitionSize(partitionFactor)); } @@ -552,10 +561,9 @@ public void testOffsetProducesPositiveOutput() { // This number is picked because it produces negative output if the values isn't ensured to be // positive. See HBASE-18757 for more information. long testValue = 549888460800L; - BucketEntry bucketEntry = - new BucketEntry(testValue, 10, 10L, true, (entry) -> { - return ByteBuffAllocator.NONE; - }, ByteBuffAllocator.HEAP); + BucketEntry bucketEntry = new BucketEntry(testValue, 10, 10L, true, (entry) -> { + return ByteBuffAllocator.NONE; + }, ByteBuffAllocator.HEAP); assertEquals(testValue, bucketEntry.offset()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java index e9c6e5e2cc63..9b0f435709ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.io.ByteBuffAllocator; @@ -106,7 +105,8 @@ private void disableWriter() { } } - @org.junit.Ignore @Test // Disabled by HBASE-24079. Reenable issue HBASE-24082 + @org.junit.Ignore + @Test // Disabled by HBASE-24079. Reenable issue HBASE-24082 // Flakey TestBucketCacheRefCnt.testBlockInRAMCache:121 expected:<3> but was:<2> public void testBlockInRAMCache() throws IOException { cache = create(1, 1000); @@ -317,6 +317,7 @@ public void testMarkStaleAsEvicted() throws Exception { * by Thread2 and the content of Block1 would be overwritten after it is freed, which may * cause a serious error. * + * * @throws Exception */ @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java index 1d4f1f3d7425..43716f25696c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestBucketWriterThread { @ClassRule @@ -73,18 +73,18 @@ protected void startWriterThreads() { } /** - * Set up variables and get BucketCache and WriterThread into state where tests can manually + * Set up variables and get BucketCache and WriterThread into state where tests can manually * control the running of WriterThread and BucketCache is empty. */ @Before public void setUp() throws Exception { // Arbitrary capacity. final int capacity = 16; - // Run with one writer thread only. Means there will be one writer queue only too. We depend + // Run with one writer thread only. Means there will be one writer queue only too. We depend // on this in below. final int writerThreadsCount = 1; - this.bc = new MockBucketCache("offheap", capacity, 1, new int [] {1}, writerThreadsCount, - capacity, null, 100/*Tolerate ioerrors for 100ms*/); + this.bc = new MockBucketCache("offheap", capacity, 1, new int[] { 1 }, writerThreadsCount, + capacity, null, 100/* Tolerate ioerrors for 100ms */); assertEquals(writerThreadsCount, bc.writerThreads.length); assertEquals(writerThreadsCount, bc.writerQueues.size()); // Get reference to our single WriterThread instance. @@ -117,8 +117,8 @@ public void testNonErrorCase() throws IOException, InterruptedException { } /** - * Pass through a too big entry and ensure it is cleared from queues and ramCache. - * Manually run the WriterThread. + * Pass through a too big entry and ensure it is cleared from queues and ramCache. Manually run + * the WriterThread. * @throws InterruptedException */ @Test @@ -130,8 +130,8 @@ public void testTooBigEntry() throws InterruptedException { } /** - * Do IOE. Take the RAMQueueEntry that was on the queue, doctor it to throw exception, then - * put it back and process it. + * Do IOE. Take the RAMQueueEntry that was on the queue, doctor it to throw exception, then put it + * back and process it. * @throws IOException * @throws InterruptedException */ @@ -141,8 +141,8 @@ public void testIOE() throws IOException, InterruptedException { this.bc.cacheBlock(this.plainKey, plainCacheable); RAMQueueEntry rqe = q.remove(); RAMQueueEntry spiedRqe = Mockito.spy(rqe); - Mockito.doThrow(new IOException("Mocked!")).when(spiedRqe). - writeToCache(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); + Mockito.doThrow(new IOException("Mocked!")).when(spiedRqe).writeToCache(Mockito.any(), + Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); this.q.add(spiedRqe); doDrainOfOneEntry(bc, wt, q); // Cache disabled when ioes w/o ever healing. @@ -155,24 +155,20 @@ public void testIOE() throws IOException, InterruptedException { * @throws InterruptedException */ @Test - public void testCacheFullException() - throws IOException, InterruptedException { + public void testCacheFullException() throws IOException, InterruptedException { this.bc.cacheBlock(this.plainKey, plainCacheable); RAMQueueEntry rqe = q.remove(); RAMQueueEntry spiedRqe = Mockito.spy(rqe); final CacheFullException cfe = new CacheFullException(0, 0); BucketEntry mockedBucketEntry = Mockito.mock(BucketEntry.class); - Mockito.doThrow(cfe). - doReturn(mockedBucketEntry). - when(spiedRqe).writeToCache(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), - Mockito.any()); + Mockito.doThrow(cfe).doReturn(mockedBucketEntry).when(spiedRqe).writeToCache(Mockito.any(), + Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); this.q.add(spiedRqe); doDrainOfOneEntry(bc, wt, q); } private static void doDrainOfOneEntry(final BucketCache bc, final BucketCache.WriterThread wt, - final BlockingQueue q) - throws InterruptedException { + final BlockingQueue q) throws InterruptedException { List rqes = BucketCache.getRAMQueueEntries(q, new ArrayList<>(1)); bc.doDrain(rqes, ByteBuffer.allocate(HFileBlock.BLOCK_METADATA_SPACE)); assertTrue(q.isEmpty()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestByteBufferIOEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestByteBufferIOEngine.java index 677d602297c0..1f129e85920a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestByteBufferIOEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestByteBufferIOEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -132,8 +132,7 @@ static class BufferGrabbingDeserializer implements CacheableDeserializer boundaryStartPositions = new ArrayList(); private final static List boundaryStopPositions = new ArrayList(); @@ -145,7 +144,7 @@ public void testReadFailedShouldReleaseByteBuff() { @Override public ByteBuff answer(InvocationOnMock invocation) throws Throwable { int len = invocation.getArgument(0); - return ByteBuff.wrap(new ByteBuffer[]{ByteBuffer.allocate(len + 1)}, refCnt); + return ByteBuff.wrap(new ByteBuffer[] { ByteBuffer.allocate(len + 1) }, refCnt); } }); int len = 10; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java index 4b0801f858b9..a7a3ae53c69c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.nio.ByteBuffer; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.ByteBuffAllocator; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java index 8c95dd3e8407..994a420bad7d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestVerifyBucketCacheFile.java @@ -1,26 +1,26 @@ /* - * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.hfile.bucket; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; + import java.io.BufferedWriter; import java.io.FileOutputStream; import java.io.OutputStreamWriter; @@ -50,14 +50,15 @@ public class TestVerifyBucketCacheFile { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyBucketCacheFile.class); + HBaseClassTestRule.forClass(TestVerifyBucketCacheFile.class); @Parameterized.Parameters(name = "{index}: blockSize={0}, bucketSizes={1}") public static Iterable data() { - return Arrays.asList(new Object[][] { { 8192, null }, { 16 * 1024, - new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, - 28 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, - 128 * 1024 + 1024 } } }); + return Arrays.asList(new Object[][] { { 8192, null }, + { 16 * 1024, + new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, + 28 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, + 128 * 1024 + 1024 } } }); } @Parameterized.Parameter(0) @@ -71,15 +72,13 @@ public static Iterable data() { final int writerQLen = BucketCache.DEFAULT_WRITER_QUEUE_ITEMS; /** - * Test cache file or persistence file does not exist whether BucketCache starts normally - * (1) Start BucketCache and add some blocks, then shutdown BucketCache and persist cache - * to file. Restart BucketCache and it can restore cache from file. - * (2) Delete bucket cache file after shutdown BucketCache. Restart BucketCache and it can't - * restore cache from file, the cache file and persistence file would be deleted before - * BucketCache start normally. - * (3) Delete persistence file after shutdown BucketCache. Restart BucketCache and it can't - * restore cache from file, the cache file and persistence file would be deleted before - * BucketCache start normally. + * Test cache file or persistence file does not exist whether BucketCache starts normally (1) + * Start BucketCache and add some blocks, then shutdown BucketCache and persist cache to file. + * Restart BucketCache and it can restore cache from file. (2) Delete bucket cache file after + * shutdown BucketCache. Restart BucketCache and it can't restore cache from file, the cache file + * and persistence file would be deleted before BucketCache start normally. (3) Delete persistence + * file after shutdown BucketCache. Restart BucketCache and it can't restore cache from file, the + * cache file and persistence file would be deleted before BucketCache start normally. * @throws Exception the exception */ @Test @@ -89,12 +88,12 @@ public void testRetrieveFromFile() throws Exception { TEST_UTIL.getTestFileSystem().mkdirs(testDir); BucketCache bucketCache = - new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); long usedSize = bucketCache.getAllocator().getUsedSize(); assertEquals(0, usedSize); CacheTestUtils.HFileBlockPair[] blocks = - CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1); + CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1); // Add blocks for (CacheTestUtils.HFileBlockPair block : blocks) { cacheAndWaitUntilFlushedToBucket(bucketCache, block.getBlockName(), block.getBlock()); @@ -105,20 +104,20 @@ public void testRetrieveFromFile() throws Exception { bucketCache.shutdown(); // restore cache from file bucketCache = - new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); assertEquals(usedSize, bucketCache.getAllocator().getUsedSize()); // persist cache to file bucketCache.shutdown(); // 2.delete bucket cache file final java.nio.file.Path cacheFile = - FileSystems.getDefault().getPath(testDir.toString(), "bucket.cache"); + FileSystems.getDefault().getPath(testDir.toString(), "bucket.cache"); assertTrue(Files.deleteIfExists(cacheFile)); // can't restore cache from file bucketCache = - new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); // Add blocks @@ -132,12 +131,12 @@ public void testRetrieveFromFile() throws Exception { // 3.delete backingMap persistence file final java.nio.file.Path mapFile = - FileSystems.getDefault().getPath(testDir.toString(), "bucket.persistence"); + FileSystems.getDefault().getPath(testDir.toString(), "bucket.persistence"); assertTrue(Files.deleteIfExists(mapFile)); // can't restore cache from file bucketCache = - new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); @@ -145,10 +144,10 @@ public void testRetrieveFromFile() throws Exception { } /** - * Test whether BucketCache is started normally after modifying the cache file. - * Start BucketCache and add some blocks, then shutdown BucketCache and persist cache to file. - * Restart BucketCache after modify cache file's data, and it can't restore cache from file, - * the cache file and persistence file would be deleted before BucketCache start normally. + * Test whether BucketCache is started normally after modifying the cache file. Start BucketCache + * and add some blocks, then shutdown BucketCache and persist cache to file. Restart BucketCache + * after modify cache file's data, and it can't restore cache from file, the cache file and + * persistence file would be deleted before BucketCache start normally. * @throws Exception the exception */ @Test @@ -158,13 +157,13 @@ public void testModifiedBucketCacheFileData() throws Exception { TEST_UTIL.getTestFileSystem().mkdirs(testDir); BucketCache bucketCache = - new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); long usedSize = bucketCache.getAllocator().getUsedSize(); assertEquals(0, usedSize); CacheTestUtils.HFileBlockPair[] blocks = - CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1); + CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1); // Add blocks for (CacheTestUtils.HFileBlockPair block : blocks) { cacheAndWaitUntilFlushedToBucket(bucketCache, block.getBlockName(), block.getBlock()); @@ -176,14 +175,14 @@ public void testModifiedBucketCacheFileData() throws Exception { // modified bucket cache file String file = testDir + "/bucket.cache"; - try(BufferedWriter out = new BufferedWriter(new OutputStreamWriter( - new FileOutputStream(file, false)))) { + try (BufferedWriter out = + new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file, false)))) { out.write("test bucket cache"); } // can't restore cache from file bucketCache = - new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); @@ -192,10 +191,10 @@ public void testModifiedBucketCacheFileData() throws Exception { /** * Test whether BucketCache is started normally after modifying the cache file's last modified - * time. First Start BucketCache and add some blocks, then shutdown BucketCache and persist - * cache to file. Then Restart BucketCache after modify cache file's last modified time, and - * it can't restore cache from file, the cache file and persistence file would be deleted - * before BucketCache start normally. + * time. First Start BucketCache and add some blocks, then shutdown BucketCache and persist cache + * to file. Then Restart BucketCache after modify cache file's last modified time, and it can't + * restore cache from file, the cache file and persistence file would be deleted before + * BucketCache start normally. * @throws Exception the exception */ @Test @@ -205,13 +204,13 @@ public void testModifiedBucketCacheFileTime() throws Exception { TEST_UTIL.getTestFileSystem().mkdirs(testDir); BucketCache bucketCache = - new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); long usedSize = bucketCache.getAllocator().getUsedSize(); assertEquals(0, usedSize); CacheTestUtils.HFileBlockPair[] blocks = - CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1); + CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1); // Add blocks for (CacheTestUtils.HFileBlockPair block : blocks) { cacheAndWaitUntilFlushedToBucket(bucketCache, block.getBlockName(), block.getBlock()); @@ -223,12 +222,12 @@ public void testModifiedBucketCacheFileTime() throws Exception { // modified bucket cache file LastModifiedTime final java.nio.file.Path file = - FileSystems.getDefault().getPath(testDir.toString(), "bucket.cache"); + FileSystems.getDefault().getPath(testDir.toString(), "bucket.cache"); Files.setLastModifiedTime(file, FileTime.from(Instant.now().plusMillis(1_000))); // can't restore cache from file bucketCache = - new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence"); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size()); @@ -236,7 +235,7 @@ public void testModifiedBucketCacheFileTime() throws Exception { } private void waitUntilFlushedToBucket(BucketCache cache, BlockCacheKey cacheKey) - throws InterruptedException { + throws InterruptedException { while (!cache.backingMap.containsKey(cacheKey) || cache.ramCache.containsKey(cacheKey)) { Thread.sleep(100); } @@ -245,7 +244,7 @@ private void waitUntilFlushedToBucket(BucketCache cache, BlockCacheKey cacheKey) // BucketCache.cacheBlock is async, it first adds block to ramCache and writeQueue, then writer // threads will flush it to the bucket and put reference entry in backingMap. private void cacheAndWaitUntilFlushedToBucket(BucketCache cache, BlockCacheKey cacheKey, - Cacheable block) throws InterruptedException { + Cacheable block) throws InterruptedException { cache.cacheBlock(cacheKey, block); waitUntilFlushedToBucket(cache, cacheKey); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java index 1309ef7a77d3..ec7ebe1bff5b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java @@ -43,6 +43,7 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.internal.verification.VerificationModeFactory.times; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; @@ -72,9 +73,11 @@ import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoRequestProto; import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoResponseProto; import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EmptyRequestProto; @@ -101,13 +104,11 @@ public abstract class AbstractTestIPC { } protected abstract RpcServer createRpcServer(final Server server, final String name, - final List services, - final InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler) throws IOException; + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler) throws IOException; protected abstract AbstractRpcClient createRpcClientNoCodec(Configuration conf); - @Rule public OpenTelemetryRule traceRule = OpenTelemetryRule.create(); @@ -118,9 +119,8 @@ protected abstract RpcServer createRpcServer(final Server server, final String n public void testNoCodec() throws IOException, ServiceException { Configuration conf = HBaseConfiguration.create(); RpcServer rpcServer = createRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try (AbstractRpcClient client = createRpcClientNoCodec(conf)) { rpcServer.start(); BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); @@ -151,9 +151,8 @@ public void testCompressCellBlock() throws IOException, ServiceException { cells.add(CELL); } RpcServer rpcServer = createRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try (AbstractRpcClient client = createRpcClient(conf)) { rpcServer.start(); @@ -175,16 +174,15 @@ public void testCompressCellBlock() throws IOException, ServiceException { } } - protected abstract AbstractRpcClient createRpcClientRTEDuringConnectionSetup( - Configuration conf) throws IOException; + protected abstract AbstractRpcClient + createRpcClientRTEDuringConnectionSetup(Configuration conf) throws IOException; @Test public void testRTEDuringConnectionSetup() throws Exception { Configuration conf = HBaseConfiguration.create(); RpcServer rpcServer = createRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try (AbstractRpcClient client = createRpcClientRTEDuringConnectionSetup(conf)) { rpcServer.start(); BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); @@ -205,8 +203,8 @@ public void testRTEDuringConnectionSetup() throws Exception { public void testRpcScheduler() throws IOException, ServiceException, InterruptedException { RpcScheduler scheduler = spy(new FifoRpcScheduler(CONF, 1)); RpcServer rpcServer = createRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, scheduler); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, scheduler); verify(scheduler).init(any(RpcScheduler.Context.class)); try (AbstractRpcClient client = createRpcClient(CONF)) { rpcServer.start(); @@ -229,9 +227,8 @@ public void testRpcMaxRequestSize() throws IOException, ServiceException { Configuration conf = new Configuration(CONF); conf.setInt(RpcServer.MAX_REQUEST_SIZE, 1000); RpcServer rpcServer = createRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), conf, - new FifoRpcScheduler(conf, 1)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), conf, new FifoRpcScheduler(conf, 1)); try (AbstractRpcClient client = createRpcClient(conf)) { rpcServer.start(); BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); @@ -248,7 +245,7 @@ public void testRpcMaxRequestSize() throws IOException, ServiceException { } catch (ServiceException e) { LOG.info("Caught expected exception: " + e); assertTrue(e.toString(), - StringUtils.stringifyException(e).contains("RequestTooBigException")); + StringUtils.stringifyException(e).contains("RequestTooBigException")); } finally { rpcServer.stop(); } @@ -262,9 +259,8 @@ public void testRpcMaxRequestSize() throws IOException, ServiceException { public void testRpcServerForNotNullRemoteAddressInCallObject() throws IOException, ServiceException { RpcServer rpcServer = createRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); InetSocketAddress localAddr = new InetSocketAddress("localhost", 0); try (AbstractRpcClient client = createRpcClient(CONF)) { rpcServer.start(); @@ -279,9 +275,8 @@ public void testRpcServerForNotNullRemoteAddressInCallObject() @Test public void testRemoteError() throws IOException, ServiceException { RpcServer rpcServer = createRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try (AbstractRpcClient client = createRpcClient(CONF)) { rpcServer.start(); BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); @@ -299,9 +294,8 @@ public void testRemoteError() throws IOException, ServiceException { @Test public void testTimeout() throws IOException { RpcServer rpcServer = createRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try (AbstractRpcClient client = createRpcClient(CONF)) { rpcServer.start(); BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); @@ -330,18 +324,16 @@ public void testTimeout() throws IOException { } protected abstract RpcServer createTestFailingRpcServer(final Server server, final String name, - final List services, - final InetSocketAddress bindAddress, Configuration conf, - RpcScheduler scheduler) throws IOException; + final List services, final InetSocketAddress bindAddress, + Configuration conf, RpcScheduler scheduler) throws IOException; /** Tests that the connection closing is handled by the client with outstanding RPC calls */ @Test public void testConnectionCloseWithOutstandingRPCs() throws InterruptedException, IOException { Configuration conf = new Configuration(CONF); RpcServer rpcServer = createTestFailingRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try (AbstractRpcClient client = createRpcClient(conf)) { rpcServer.start(); @@ -360,9 +352,8 @@ public void testConnectionCloseWithOutstandingRPCs() throws InterruptedException public void testAsyncEcho() throws IOException { Configuration conf = HBaseConfiguration.create(); RpcServer rpcServer = createRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try (AbstractRpcClient client = createRpcClient(conf)) { rpcServer.start(); Interface stub = newStub(client, rpcServer.getListenerAddress()); @@ -391,9 +382,8 @@ public void testAsyncEcho() throws IOException { public void testAsyncRemoteError() throws IOException { AbstractRpcClient client = createRpcClient(CONF); RpcServer rpcServer = createRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try { rpcServer.start(); Interface stub = newStub(client, rpcServer.getListenerAddress()); @@ -415,9 +405,8 @@ public void testAsyncRemoteError() throws IOException { @Test public void testAsyncTimeout() throws IOException { RpcServer rpcServer = createRpcServer(null, "testRpcServer", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( - SERVICE, null)), new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try (AbstractRpcClient client = createRpcClient(CONF)) { rpcServer.start(); Interface stub = newStub(client, rpcServer.getListenerAddress()); @@ -452,60 +441,40 @@ public void testAsyncTimeout() throws IOException { } private SpanData waitSpan(Matcher matcher) { - Waiter.waitFor(CONF, 1000, new MatcherPredicate<>( - () -> traceRule.getSpans(), hasItem(matcher))); - return traceRule.getSpans() - .stream() - .filter(matcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); + Waiter.waitFor(CONF, 1000, + new MatcherPredicate<>(() -> traceRule.getSpans(), hasItem(matcher))); + return traceRule.getSpans().stream().filter(matcher::matches).findFirst() + .orElseThrow(AssertionError::new); } private static String buildIpcSpanName(final String packageAndService, final String methodName) { return packageAndService + "/" + methodName; } - private static Matcher buildIpcClientSpanMatcher( - final String packageAndService, - final String methodName - ) { - return allOf( - hasName(buildIpcSpanName(packageAndService, methodName)), - hasKind(SpanKind.CLIENT) - ); + private static Matcher buildIpcClientSpanMatcher(final String packageAndService, + final String methodName) { + return allOf(hasName(buildIpcSpanName(packageAndService, methodName)), + hasKind(SpanKind.CLIENT)); } - private static Matcher buildIpcServerSpanMatcher( - final String packageAndService, - final String methodName - ) { - return allOf( - hasName(buildIpcSpanName(packageAndService, methodName)), - hasKind(SpanKind.SERVER) - ); + private static Matcher buildIpcServerSpanMatcher(final String packageAndService, + final String methodName) { + return allOf(hasName(buildIpcSpanName(packageAndService, methodName)), + hasKind(SpanKind.SERVER)); } private static Matcher buildIpcClientSpanAttributesMatcher( - final String packageAndService, - final String methodName, - final InetSocketAddress isa - ) { - return hasAttributes(allOf( - containsEntry("rpc.system", "HBASE_RPC"), - containsEntry("rpc.service", packageAndService), - containsEntry("rpc.method", methodName), + final String packageAndService, final String methodName, final InetSocketAddress isa) { + return hasAttributes(allOf(containsEntry("rpc.system", "HBASE_RPC"), + containsEntry("rpc.service", packageAndService), containsEntry("rpc.method", methodName), containsEntry("net.peer.name", isa.getHostName()), containsEntry(AttributeKey.longKey("net.peer.port"), (long) isa.getPort()))); } - private static Matcher buildIpcServerSpanAttributesMatcher( - final String packageAndService, - final String methodName - ) { - return hasAttributes(allOf( - containsEntry("rpc.system", "HBASE_RPC"), - containsEntry("rpc.service", packageAndService), - containsEntry("rpc.method", methodName))); + private static Matcher + buildIpcServerSpanAttributesMatcher(final String packageAndService, final String methodName) { + return hasAttributes(allOf(containsEntry("rpc.system", "HBASE_RPC"), + containsEntry("rpc.service", packageAndService), containsEntry("rpc.method", methodName))); } private void assertRemoteSpan() { @@ -518,28 +487,27 @@ private void assertRemoteSpan() { public void testTracingSuccessIpc() throws IOException, ServiceException { RpcServer rpcServer = createRpcServer(null, "testRpcServer", Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), - new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try (AbstractRpcClient client = createRpcClient(CONF)) { rpcServer.start(); BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); stub.pause(null, PauseRequestProto.newBuilder().setMs(100).build()); // use the ISA from the running server so that we can get the port selected. final InetSocketAddress isa = rpcServer.getListenerAddress(); - final SpanData pauseClientSpan = waitSpan(buildIpcClientSpanMatcher( - "hbase.test.pb.TestProtobufRpcProto", "pause")); - assertThat(pauseClientSpan, buildIpcClientSpanAttributesMatcher( - "hbase.test.pb.TestProtobufRpcProto", "pause", isa)); - final SpanData pauseServerSpan = waitSpan(buildIpcServerSpanMatcher( - "hbase.test.pb.TestProtobufRpcProto", "pause")); - assertThat(pauseServerSpan, buildIpcServerSpanAttributesMatcher( - "hbase.test.pb.TestProtobufRpcProto", "pause")); + final SpanData pauseClientSpan = + waitSpan(buildIpcClientSpanMatcher("hbase.test.pb.TestProtobufRpcProto", "pause")); + assertThat(pauseClientSpan, + buildIpcClientSpanAttributesMatcher("hbase.test.pb.TestProtobufRpcProto", "pause", isa)); + final SpanData pauseServerSpan = + waitSpan(buildIpcServerSpanMatcher("hbase.test.pb.TestProtobufRpcProto", "pause")); + assertThat(pauseServerSpan, + buildIpcServerSpanAttributesMatcher("hbase.test.pb.TestProtobufRpcProto", "pause")); assertRemoteSpan(); assertFalse("no spans provided", traceRule.getSpans().isEmpty()); - assertThat(traceRule.getSpans(), everyItem(allOf( - hasStatusWithCode(StatusCode.OK), - hasTraceId(traceRule.getSpans().iterator().next().getTraceId()), - hasDuration(greaterThanOrEqualTo(Duration.ofMillis(100L)))))); + assertThat(traceRule.getSpans(), + everyItem(allOf(hasStatusWithCode(StatusCode.OK), + hasTraceId(traceRule.getSpans().iterator().next().getTraceId()), + hasDuration(greaterThanOrEqualTo(Duration.ofMillis(100L)))))); } } @@ -547,8 +515,7 @@ public void testTracingSuccessIpc() throws IOException, ServiceException { public void testTracingErrorIpc() throws IOException { RpcServer rpcServer = createRpcServer(null, "testRpcServer", Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), - new InetSocketAddress("localhost", 0), CONF, - new FifoRpcScheduler(CONF, 1)); + new InetSocketAddress("localhost", 0), CONF, new FifoRpcScheduler(CONF, 1)); try (AbstractRpcClient client = createRpcClient(CONF)) { rpcServer.start(); BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); @@ -556,18 +523,17 @@ public void testTracingErrorIpc() throws IOException { assertThrows(ServiceException.class, () -> stub.error(null, EmptyRequestProto.getDefaultInstance())); final InetSocketAddress isa = rpcServer.getListenerAddress(); - final SpanData errorClientSpan = waitSpan(buildIpcClientSpanMatcher( - "hbase.test.pb.TestProtobufRpcProto", "error")); - assertThat(errorClientSpan, buildIpcClientSpanAttributesMatcher( - "hbase.test.pb.TestProtobufRpcProto", "error", isa)); - final SpanData errorServerSpan = waitSpan(buildIpcServerSpanMatcher( - "hbase.test.pb.TestProtobufRpcProto", "error")); - assertThat(errorServerSpan, buildIpcServerSpanAttributesMatcher( - "hbase.test.pb.TestProtobufRpcProto", "error")); + final SpanData errorClientSpan = + waitSpan(buildIpcClientSpanMatcher("hbase.test.pb.TestProtobufRpcProto", "error")); + assertThat(errorClientSpan, + buildIpcClientSpanAttributesMatcher("hbase.test.pb.TestProtobufRpcProto", "error", isa)); + final SpanData errorServerSpan = + waitSpan(buildIpcServerSpanMatcher("hbase.test.pb.TestProtobufRpcProto", "error")); + assertThat(errorServerSpan, + buildIpcServerSpanAttributesMatcher("hbase.test.pb.TestProtobufRpcProto", "error")); assertRemoteSpan(); assertFalse("no spans provided", traceRule.getSpans().isEmpty()); - assertThat(traceRule.getSpans(), everyItem(allOf( - hasStatusWithCode(StatusCode.ERROR), + assertThat(traceRule.getSpans(), everyItem(allOf(hasStatusWithCode(StatusCode.ERROR), hasTraceId(traceRule.getSpans().iterator().next().getTraceId())))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java index 4aa5cab9f163..7d2836e7c6ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; public class DelegatingRpcScheduler extends RpcScheduler { @@ -29,14 +28,17 @@ public DelegatingRpcScheduler(RpcScheduler delegate) { public void stop() { delegate.stop(); } + @Override public void start() { delegate.start(); } + @Override public void init(Context context) { delegate.init(context); } + @Override public int getReplicationQueueLength() { return delegate.getReplicationQueueLength(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java index c095c92dac07..f525c027a409 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperStub.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,10 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; -public class MetricsHBaseServerWrapperStub implements MetricsHBaseServerWrapper{ +public class MetricsHBaseServerWrapperStub implements MetricsHBaseServerWrapper { @Override public long getTotalQueueSize() { return 101; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/QosTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/QosTestBase.java index f90a599fcced..2b7d8d1324d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/QosTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/QosTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,12 +29,12 @@ public class QosTestBase { protected final void checkMethod(Configuration conf, final String methodName, final int expected, - final AnnotationReadingPriorityFunction qosf) { + final AnnotationReadingPriorityFunction qosf) { checkMethod(conf, methodName, expected, qosf, null); } protected final void checkMethod(Configuration conf, final String methodName, final int expected, - final AnnotationReadingPriorityFunction qosf, final Message param) { + final AnnotationReadingPriorityFunction qosf, final Message param) { RPCProtos.RequestHeader.Builder builder = RPCProtos.RequestHeader.newBuilder(); builder.setMethodName(methodName); assertEquals(methodName, expected, qosf.getPriority(builder.build(), param, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java index 25e3c7de119d..86d3525ac780 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBlockingIPC.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,8 @@ public class TestBlockingIPC extends AbstractTestIPC { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestBlockingIPC.class); - @Override protected RpcServer createRpcServer(Server server, String name, + @Override + protected RpcServer createRpcServer(Server server, String name, List services, InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException { return RpcServerFactory.createRpcServer(server, name, services, bindAddress, conf, scheduler); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBufferChain.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBufferChain.java index 1280872a6db7..e1fc95e6a4c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBufferChain.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestBufferChain.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Charsets; import org.apache.hbase.thirdparty.com.google.common.io.Files; -@Category({RPCTests.class, SmallTests.class}) +@Category({ RPCTests.class, SmallTests.class }) public class TestBufferChain { @ClassRule @@ -47,11 +47,9 @@ public class TestBufferChain { private File tmpFile; - private static final byte[][] HELLO_WORLD_CHUNKS = new byte[][] { - "hello".getBytes(Charsets.UTF_8), - " ".getBytes(Charsets.UTF_8), - "world".getBytes(Charsets.UTF_8) - }; + private static final byte[][] HELLO_WORLD_CHUNKS = + new byte[][] { "hello".getBytes(Charsets.UTF_8), " ".getBytes(Charsets.UTF_8), + "world".getBytes(Charsets.UTF_8) }; @Before public void setup() throws IOException { @@ -88,20 +86,16 @@ public void testChainChunkBiggerThanSomeArrays() throws IOException { @Test public void testLimitOffset() throws IOException { - ByteBuffer[] bufs = new ByteBuffer[] { - stringBuf("XXXhelloYYY", 3, 5), - stringBuf(" ", 0, 1), + ByteBuffer[] bufs = new ByteBuffer[] { stringBuf("XXXhelloYYY", 3, 5), stringBuf(" ", 0, 1), stringBuf("XXXXworldY", 4, 5) }; BufferChain chain = new BufferChain(bufs); - writeAndVerify(chain , "hello world", 3); + writeAndVerify(chain, "hello world", 3); assertNoRemaining(bufs); } @Test public void testWithSpy() throws IOException { - ByteBuffer[] bufs = new ByteBuffer[] { - stringBuf("XXXhelloYYY", 3, 5), - stringBuf(" ", 0, 1), + ByteBuffer[] bufs = new ByteBuffer[] { stringBuf("XXXhelloYYY", 3, 5), stringBuf(" ", 0, 1), stringBuf("XXXXworldY", 4, 5) }; BufferChain chain = new BufferChain(bufs); FileOutputStream fos = new FileOutputStream(tmpFile); @@ -143,8 +137,7 @@ private ByteBuffer[] wrapArrays(byte[][] arrays) { return ret; } - private void writeAndVerify(BufferChain chain, String string, int chunkSize) - throws IOException { + private void writeAndVerify(BufferChain chain, String string, int chunkSize) throws IOException { FileOutputStream fos = new FileOutputStream(tmpFile); FileChannel ch = fos.getChannel(); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java index d18ddbd49f5f..8d892430f8c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,6 +25,7 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.hasItem; + import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule; import java.net.InetSocketAddress; @@ -48,7 +49,7 @@ import org.junit.rules.TestName; import org.mockito.Mockito; -@Category({RPCTests.class, SmallTests.class}) +@Category({ RPCTests.class, SmallTests.class }) public class TestCallRunner { @ClassRule @@ -84,15 +85,11 @@ public void testSimpleCall() { cr.run(); }, testName.getMethodName()); - Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>( - otelRule::getSpans, hasItem(allOf( - hasName(testName.getMethodName()), - hasEnded())))); + Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>(otelRule::getSpans, + hasItem(allOf(hasName(testName.getMethodName()), hasEnded())))); - assertThat(otelRule.getSpans(), hasItem(allOf( - hasName(testName.getMethodName()), - hasStatusWithCode(StatusCode.OK), - hasEnded()))); + assertThat(otelRule.getSpans(), hasItem( + allOf(hasName(testName.getMethodName()), hasStatusWithCode(StatusCode.OK), hasEnded()))); } @Test @@ -124,16 +121,12 @@ public void testCallRunnerDropDisconnected() { }, testName.getMethodName()); Mockito.verify(mockCall, Mockito.times(1)).cleanup(); - Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>( - otelRule::getSpans, hasItem(allOf( - hasName(testName.getMethodName()), - hasEnded())))); + Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>(otelRule::getSpans, + hasItem(allOf(hasName(testName.getMethodName()), hasEnded())))); - assertThat(otelRule.getSpans(), hasItem(allOf( - hasName(testName.getMethodName()), - hasStatusWithCode(StatusCode.OK), - hasEvents(hasItem(EventMatchers.hasName("Client disconnect detected"))), - hasEnded()))); + assertThat(otelRule.getSpans(), + hasItem(allOf(hasName(testName.getMethodName()), hasStatusWithCode(StatusCode.OK), + hasEvents(hasItem(EventMatchers.hasName("Client disconnect detected"))), hasEnded()))); } @Test @@ -143,7 +136,7 @@ public void testCallRunnerDropConnected() { Mockito.when(mockRpcServer.getMetrics()).thenReturn(mockMetrics); Mockito.when(mockRpcServer.isStarted()).thenReturn(true); Mockito.when(mockRpcServer.getListenerAddress()) - .thenReturn(InetSocketAddress.createUnresolved("foo", 60020)); + .thenReturn(InetSocketAddress.createUnresolved("foo", 60020)); ServerCall mockCall = Mockito.mock(ServerCall.class); Mockito.when(mockCall.disconnectSince()).thenReturn(-1L); @@ -155,18 +148,14 @@ public void testCallRunnerDropConnected() { Mockito.verify(mockCall, Mockito.times(1)).cleanup(); Mockito.verify(mockMetrics).exception(Mockito.any(CallDroppedException.class)); - Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>( - otelRule::getSpans, hasItem(allOf( - hasName(testName.getMethodName()), - hasEnded())))); - - assertThat(otelRule.getSpans(), hasItem(allOf( - hasName(testName.getMethodName()), - hasStatusWithCode(StatusCode.ERROR), - hasEvents(hasItem(allOf( - EventMatchers.hasName("exception"), - EventMatchers.hasAttributes( - containsEntry("exception.type", CallDroppedException.class.getName()))))), - hasEnded()))); + Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>(otelRule::getSpans, + hasItem(allOf(hasName(testName.getMethodName()), hasEnded())))); + + assertThat(otelRule.getSpans(), + hasItem(allOf(hasName(testName.getMethodName()), hasStatusWithCode(StatusCode.ERROR), + hasEvents(hasItem(allOf(EventMatchers.hasName("exception"), + EventMatchers.hasAttributes( + containsEntry("exception.type", CallDroppedException.class.getName()))))), + hasEnded()))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestFifoRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestFifoRpcScheduler.java index 940bc67ff5a5..a3674259b4f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestFifoRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestFifoRpcScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.IOException; import java.lang.reflect.Field; import java.net.InetSocketAddress; @@ -42,7 +43,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RPCTests.class, SmallTests.class}) +@Category({ RPCTests.class, SmallTests.class }) public class TestFifoRpcScheduler { @ClassRule @@ -68,7 +69,7 @@ public void setUp() { } private ThreadPoolExecutor disableHandlers(RpcScheduler scheduler) { - ThreadPoolExecutor rpcExecutor=null; + ThreadPoolExecutor rpcExecutor = null; try { Field ExecutorField = scheduler.getClass().getDeclaredField("executor"); @@ -86,11 +87,11 @@ private ThreadPoolExecutor disableHandlers(RpcScheduler scheduler) { Thread.sleep(2000); } catch (NoSuchFieldException e) { - LOG.error("No such field exception:"+e); + LOG.error("No such field exception:" + e); } catch (IllegalAccessException e) { - LOG.error("Illegal access exception:"+e); + LOG.error("Illegal access exception:" + e); } catch (InterruptedException e) { - LOG.error("Interrupted exception:"+e); + LOG.error("Interrupted exception:" + e); } return rpcExecutor; @@ -100,8 +101,7 @@ private ThreadPoolExecutor disableHandlers(RpcScheduler scheduler) { public void testCallQueueInfo() throws IOException, InterruptedException { ThreadPoolExecutor rpcExecutor; - RpcScheduler scheduler = new FifoRpcScheduler( - conf, 1); + RpcScheduler scheduler = new FifoRpcScheduler(conf, 1); scheduler.init(CONTEXT); @@ -111,24 +111,23 @@ public void testCallQueueInfo() throws IOException, InterruptedException { int totalCallMethods = 30; int unableToDispatch = 0; - for (int i = totalCallMethods; i>0; i--) { + for (int i = totalCallMethods; i > 0; i--) { CallRunner task = createMockTask(); task.setStatus(new MonitoredRPCHandlerImpl()); - if(!scheduler.dispatch(task)) { + if (!scheduler.dispatch(task)) { unableToDispatch++; } Thread.sleep(10); } - CallQueueInfo callQueueInfo = scheduler.getCallQueueInfo(); int executionCount = callExecutionCount.get(); int callQueueSize = 0; - for (String callQueueName:callQueueInfo.getCallQueueNames()) { - for (String calledMethod: callQueueInfo.getCalledMethodNames(callQueueName)) { + for (String callQueueName : callQueueInfo.getCallQueueNames()) { + for (String calledMethod : callQueueInfo.getCalledMethodNames(callQueueName)) { callQueueSize += callQueueInfo.getCallMethodCount(callQueueName, calledMethod); } } @@ -144,7 +143,8 @@ private CallRunner createMockTask() { when(task.getRpcCall()).thenReturn(call); doAnswer(new Answer() { - @Override public Void answer (InvocationOnMock invocation) throws Throwable { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { callExecutionCount.incrementAndGet(); Thread.sleep(1000); return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseClient.java index 01b840cff66a..5a0d8884878d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseClient.java @@ -29,28 +29,28 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RPCTests.class, SmallTests.class}) +@Category({ RPCTests.class, SmallTests.class }) public class TestHBaseClient { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHBaseClient.class); @Test - public void testFailedServer(){ + public void testFailedServer() { ManualEnvironmentEdge ee = new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(ee); FailedServers fs = new FailedServers(new Configuration()); - Throwable testThrowable = new Throwable();//throwable already tested in TestFailedServers.java + Throwable testThrowable = new Throwable();// throwable already tested in TestFailedServers.java Address ia = Address.fromParts("bad", 12); - // same server as ia + // same server as ia Address ia2 = Address.fromParts("bad", 12); Address ia3 = Address.fromParts("badtoo", 12); Address ia4 = Address.fromParts("badtoo", 13); Assert.assertFalse(fs.isFailedServer(ia)); - fs.addToFailedServers(ia,testThrowable); + fs.addToFailedServers(ia, testThrowable); Assert.assertTrue(fs.isFailedServer(ia)); Assert.assertTrue(fs.isFailedServer(ia2)); @@ -62,9 +62,9 @@ public void testFailedServer(){ Assert.assertFalse(fs.isFailedServer(ia)); Assert.assertFalse(fs.isFailedServer(ia2)); - fs.addToFailedServers(ia,testThrowable); - fs.addToFailedServers(ia3,testThrowable); - fs.addToFailedServers(ia4,testThrowable); + fs.addToFailedServers(ia, testThrowable); + fs.addToFailedServers(ia3, testThrowable); + fs.addToFailedServers(ia4, testThrowable); Assert.assertTrue(fs.isFailedServer(ia)); Assert.assertTrue(fs.isFailedServer(ia2)); @@ -77,8 +77,7 @@ public void testFailedServer(){ Assert.assertFalse(fs.isFailedServer(ia3)); Assert.assertFalse(fs.isFailedServer(ia4)); - - fs.addToFailedServers(ia3,testThrowable); + fs.addToFailedServers(ia3, testThrowable); Assert.assertFalse(fs.isFailedServer(ia4)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMasterFifoRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMasterFifoRpcScheduler.java index af9cf66523ae..27119d23a867 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMasterFifoRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMasterFifoRpcScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcServer.java index 85e3ab292267..2525cdab416f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,9 +66,8 @@ public void setup() { @BeforeClass public static void setupBeforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtil(); - TEST_UTIL.getConfiguration().set( - RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, - NettyRpcServer.class.getName()); + TEST_UTIL.getConfiguration().set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, + NettyRpcServer.class.getName()); TEST_UTIL.startMiniCluster(); } @@ -100,11 +99,9 @@ public void testNettyRpcServer() throws Exception { rowcnt++; int rownum = Bytes.toInt(r.getRow()); assertTrue(r.containsColumn(FAMILY, PRIVATE_COL)); - assertEquals("secret " + rownum, - Bytes.toString(r.getValue(FAMILY, PRIVATE_COL))); + assertEquals("secret " + rownum, Bytes.toString(r.getValue(FAMILY, PRIVATE_COL))); assertTrue(r.containsColumn(FAMILY, PUBLIC_COL)); - assertEquals("info " + rownum, - Bytes.toString(r.getValue(FAMILY, PUBLIC_COL))); + assertEquals("info " + rownum, Bytes.toString(r.getValue(FAMILY, PUBLIC_COL))); } assertEquals("Expected 100 rows returned", 100, rowcnt); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestPluggableQueueImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestPluggableQueueImpl.java index eeb057cbeafc..00b7a4bd9a67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestPluggableQueueImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestPluggableQueueImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,13 +26,11 @@ import org.apache.hadoop.hbase.util.BoundedPriorityBlockingQueue; /** - * Implementation of the PluggableBlockingQueue abstract class. - * - * Used to verify that the pluggable call queue type for the RpcExecutor can load correctly - * via the FQCN reflection semantics. + * Implementation of the PluggableBlockingQueue abstract class. Used to verify that the pluggable + * call queue type for the RpcExecutor can load correctly via the FQCN reflection semantics. */ -public class TestPluggableQueueImpl extends PluggableBlockingQueue implements - ConfigurationObserver { +public class TestPluggableQueueImpl extends PluggableBlockingQueue + implements ConfigurationObserver { private final BoundedPriorityBlockingQueue inner; private static boolean configurationRecentlyChanged = false; @@ -44,104 +42,129 @@ public TestPluggableQueueImpl(int maxQueueLength, PriorityFunction priority, Con configurationRecentlyChanged = false; } - @Override public boolean add(CallRunner callRunner) { + @Override + public boolean add(CallRunner callRunner) { return inner.add(callRunner); } - @Override public boolean offer(CallRunner callRunner) { + @Override + public boolean offer(CallRunner callRunner) { return inner.offer(callRunner); } - @Override public CallRunner remove() { + @Override + public CallRunner remove() { return inner.remove(); } - @Override public CallRunner poll() { + @Override + public CallRunner poll() { return inner.poll(); } - @Override public CallRunner element() { + @Override + public CallRunner element() { return inner.element(); } - @Override public CallRunner peek() { + @Override + public CallRunner peek() { return inner.peek(); } - @Override public void put(CallRunner callRunner) throws InterruptedException { + @Override + public void put(CallRunner callRunner) throws InterruptedException { inner.put(callRunner); } - @Override public boolean offer(CallRunner callRunner, long timeout, TimeUnit unit) - throws InterruptedException { + @Override + public boolean offer(CallRunner callRunner, long timeout, TimeUnit unit) + throws InterruptedException { return inner.offer(callRunner, timeout, unit); } - @Override public CallRunner take() throws InterruptedException { + @Override + public CallRunner take() throws InterruptedException { return inner.take(); } - @Override public CallRunner poll(long timeout, TimeUnit unit) throws InterruptedException { + @Override + public CallRunner poll(long timeout, TimeUnit unit) throws InterruptedException { return inner.poll(timeout, unit); } - @Override public int remainingCapacity() { + @Override + public int remainingCapacity() { return inner.remainingCapacity(); } - @Override public boolean remove(Object o) { + @Override + public boolean remove(Object o) { return inner.remove(o); } - @Override public boolean containsAll(Collection c) { + @Override + public boolean containsAll(Collection c) { return inner.containsAll(c); } - @Override public boolean addAll(Collection c) { + @Override + public boolean addAll(Collection c) { return inner.addAll(c); } - @Override public boolean removeAll(Collection c) { + @Override + public boolean removeAll(Collection c) { return inner.removeAll(c); } - @Override public boolean retainAll(Collection c) { + @Override + public boolean retainAll(Collection c) { return inner.retainAll(c); } - @Override public void clear() { + @Override + public void clear() { inner.clear(); } - @Override public int size() { + @Override + public int size() { return inner.size(); } - @Override public boolean isEmpty() { + @Override + public boolean isEmpty() { return inner.isEmpty(); } - @Override public boolean contains(Object o) { + @Override + public boolean contains(Object o) { return inner.contains(o); } - @Override public Iterator iterator() { + @Override + public Iterator iterator() { return inner.iterator(); } - @Override public Object[] toArray() { + @Override + public Object[] toArray() { return inner.toArray(); } - @Override public T[] toArray(T[] a) { + @Override + public T[] toArray(T[] a) { return inner.toArray(a); } - @Override public int drainTo(Collection c) { + @Override + public int drainTo(Collection c) { return inner.drainTo(c); } - @Override public int drainTo(Collection c, int maxElements) { + @Override + public int drainTo(Collection c, int maxElements) { return inner.drainTo(c, maxElements); } @@ -149,7 +172,8 @@ public static boolean hasObservedARecentConfigurationChange() { return configurationRecentlyChanged; } - @Override public void onConfigurationChange(Configuration conf) { + @Override + public void onConfigurationChange(Configuration conf) { configurationRecentlyChanged = true; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java index a45804a45159..de4e71fb1b96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java @@ -72,7 +72,7 @@ public class TestProtoBufRpc { @Parameters(name = "{index}: rpcServerImpl={0}") public static Collection parameters() { return Arrays.asList(new Object[] { SimpleRpcServer.class.getName() }, - new Object[] { NettyRpcServer.class.getName() }); + new Object[] { NettyRpcServer.class.getName() }); } @Parameter(0) @@ -81,15 +81,14 @@ public static Collection parameters() { @Before public void setUp() throws IOException { // Setup server for both protocols this.conf = HBaseConfiguration.create(); - this.conf.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, - rpcServerImpl); + this.conf.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, rpcServerImpl); Log4jUtils.setLogLevel("org.apache.hadoop.ipc.HBaseServer", "ERROR"); Log4jUtils.setLogLevel("org.apache.hadoop.ipc.HBaseServer.trace", "TRACE"); // Create server side implementation // Get RPC server for server side implementation this.server = RpcServerFactory.createRpcServer(null, "testrpc", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), - new InetSocketAddress(ADDRESS, PORT), conf, new FifoRpcScheduler(conf, 10)); + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress(ADDRESS, PORT), conf, new FifoRpcScheduler(conf, 10)); InetSocketAddress address = server.getListenerAddress(); if (address == null) { throw new IOException("Listener channel is closed"); @@ -103,8 +102,8 @@ public void tearDown() throws Exception { server.stop(); } - @Test (expected=org.apache.hbase.thirdparty.com.google.protobuf.ServiceException.class - /*Thrown when we call stub.error*/) + @Test(expected = org.apache.hbase.thirdparty.com.google.protobuf.ServiceException.class + /* Thrown when we call stub.error */) public void testProtoBufRpc() throws Exception { RpcClient rpcClient = RpcClientFactory.createClient(conf, HConstants.CLUSTER_ID_DEFAULT); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java index 61d91e764043..0d262ae4f892 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,8 +48,8 @@ @InterfaceAudience.Private public class TestProtobufRpcServiceImpl implements BlockingInterface { - public static final BlockingService SERVICE = TestProtobufRpcProto - .newReflectiveBlockingService(new TestProtobufRpcServiceImpl()); + public static final BlockingService SERVICE = + TestProtobufRpcProto.newReflectiveBlockingService(new TestProtobufRpcServiceImpl()); public static BlockingInterface newBlockingStub(RpcClient client, InetSocketAddress addr) throws IOException { @@ -59,14 +59,14 @@ public static BlockingInterface newBlockingStub(RpcClient client, InetSocketAddr public static BlockingInterface newBlockingStub(RpcClient client, InetSocketAddress addr, User user) throws IOException { return TestProtobufRpcProto.newBlockingStub(client.createBlockingRpcChannel( - ServerName.valueOf(addr.getHostName(), addr.getPort(), - EnvironmentEdgeManager.currentTime()), user, 0)); + ServerName.valueOf(addr.getHostName(), addr.getPort(), EnvironmentEdgeManager.currentTime()), + user, 0)); } public static Interface newStub(RpcClient client, InetSocketAddress addr) throws IOException { return TestProtobufRpcProto.newStub(client.createRpcChannel( - ServerName.valueOf(addr.getHostName(), addr.getPort(), - EnvironmentEdgeManager.currentTime()), User.getCurrent(), 0)); + ServerName.valueOf(addr.getHostName(), addr.getPort(), EnvironmentEdgeManager.currentTime()), + User.getCurrent(), 0)); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRWQueueRpcExecutor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRWQueueRpcExecutor.java index ae4fc415fd7f..4d0376cc9c9b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRWQueueRpcExecutor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRWQueueRpcExecutor.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.hbase.ipc.RWQueueRpcExecutor.CALL_QUEUE_READ_SHARE_CONF_KEY; @@ -25,6 +23,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.*; + import java.util.List; import java.util.concurrent.BlockingQueue; import org.apache.hadoop.conf.Configuration; @@ -39,12 +38,12 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({ RPCTests.class, MediumTests.class}) +@Category({ RPCTests.class, MediumTests.class }) public class TestRWQueueRpcExecutor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRWQueueRpcExecutor.class); + HBaseClassTestRule.forClass(TestRWQueueRpcExecutor.class); @Rule public TestName testName = new TestName(); @@ -63,7 +62,7 @@ public void setUp() { public void itProvidesCorrectQueuesToBalancers() throws InterruptedException { PriorityFunction qosFunction = mock(PriorityFunction.class); RWQueueRpcExecutor executor = - new RWQueueRpcExecutor(testName.getMethodName(), 100, 100, qosFunction, conf, null); + new RWQueueRpcExecutor(testName.getMethodName(), 100, 100, qosFunction, conf, null); QueueBalancer readBalancer = executor.getReadBalancer(); QueueBalancer writeBalancer = executor.getWriteBalancer(); @@ -87,8 +86,8 @@ public void itProvidesCorrectQueuesToBalancers() throws InterruptedException { } - private void verifyDistinct(List> queues, List>... others) - throws InterruptedException { + private void verifyDistinct(List> queues, + List>... others) throws InterruptedException { CallRunner mock = mock(CallRunner.class); for (BlockingQueue queue : queues) { queue.put(mock); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java index b70ae407bbaa..2b27ca18fb48 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ public class TestRpcClientLeaks { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRpcClientLeaks.class); + HBaseClassTestRule.forClass(TestRpcClientLeaks.class); @Rule public TestName name = new TestName(); @@ -118,7 +118,7 @@ public void testSocketClosed() throws IOException, InterruptedException { conf.set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY, MyRpcClientImpl.class.getName()); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(TableName.valueOf(name.getMethodName()))) { + Table table = connection.getTable(TableName.valueOf(name.getMethodName()))) { MyRpcClientImpl.enableThrowExceptions(); table.get(new Get(Bytes.toBytes("asd"))); fail("Should fail because the injected error"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java index e173e23ae28f..45321857d519 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,7 +78,7 @@ public boolean isAborted() { @Parameters(name = "{index}: rpcServerImpl={0}") public static Collection parameters() { return Arrays.asList(new Object[] { SimpleRpcServer.class.getName() }, - new Object[] { NettyRpcServer.class.getName() }); + new Object[] { NettyRpcServer.class.getName() }); } @Parameter(0) @@ -95,8 +95,8 @@ public void testRpcScheduler() throws IOException, InterruptedException { CONF.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, rpcServerImpl); RpcScheduler scheduler = new SimpleRpcScheduler(CONF, 2, 0, 0, 0, qosFunction, abortable, 0); RpcServer rpcServer = RpcServerFactory.createRpcServer(null, "testRpcServer", - Lists.newArrayList(new BlockingServiceAndInterface((BlockingService) SERVICE, null)), - new InetSocketAddress("localhost", 0), CONF, scheduler); + Lists.newArrayList(new BlockingServiceAndInterface((BlockingService) SERVICE, null)), + new InetSocketAddress("localhost", 0), CONF, scheduler); try (BlockingRpcClient client = new BlockingRpcClient(CONF)) { rpcServer.start(); BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java index 9e993e48e441..c4d0066a3e95 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.ipc; import static org.junit.Assert.assertEquals; + import org.apache.hadoop.hbase.CallDroppedException; import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -35,7 +36,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RPCTests.class, SmallTests.class}) +@Category({ RPCTests.class, SmallTests.class }) public class TestRpcMetrics { @ClassRule @@ -46,10 +47,12 @@ public class TestRpcMetrics { @Test public void testFactory() { - MetricsHBaseServer masterMetrics = new MetricsHBaseServer("HMaster", new MetricsHBaseServerWrapperStub()); + MetricsHBaseServer masterMetrics = + new MetricsHBaseServer("HMaster", new MetricsHBaseServerWrapperStub()); MetricsHBaseServerSource masterSource = masterMetrics.getMetricsSource(); - MetricsHBaseServer rsMetrics = new MetricsHBaseServer("HRegionServer", new MetricsHBaseServerWrapperStub()); + MetricsHBaseServer rsMetrics = + new MetricsHBaseServer("HRegionServer", new MetricsHBaseServerWrapperStub()); MetricsHBaseServerSource rsSource = rsMetrics.getMetricsSource(); assertEquals("master", masterSource.getMetricsContext()); @@ -68,7 +71,8 @@ public void testFactory() { */ @Test public void testWrapperSource() { - MetricsHBaseServer mrpc = new MetricsHBaseServer("HMaster", new MetricsHBaseServerWrapperStub()); + MetricsHBaseServer mrpc = + new MetricsHBaseServer("HMaster", new MetricsHBaseServerWrapperStub()); MetricsHBaseServerSource serverSource = mrpc.getMetricsSource(); HELPER.assertGauge("queueSize", 101, serverSource); HELPER.assertGauge("numCallsInGeneralQueue", 102, serverSource); @@ -92,30 +96,28 @@ public void testWrapperSource() { */ @Test public void testSourceMethods() { - MetricsHBaseServer mrpc = new MetricsHBaseServer("HMaster", new MetricsHBaseServerWrapperStub()); + MetricsHBaseServer mrpc = + new MetricsHBaseServer("HMaster", new MetricsHBaseServerWrapperStub()); MetricsHBaseServerSource serverSource = mrpc.getMetricsSource(); - for (int i=0; i < 12; i++) { + for (int i = 0; i < 12; i++) { mrpc.authenticationFailure(); } - for (int i=0; i < 13; i++) { + for (int i = 0; i < 13; i++) { mrpc.authenticationSuccess(); } HELPER.assertCounter("authenticationFailures", 12, serverSource); HELPER.assertCounter("authenticationSuccesses", 13, serverSource); - - - for (int i=0; i < 14; i++) { + for (int i = 0; i < 14; i++) { mrpc.authorizationSuccess(); } - for (int i=0; i < 15; i++) { + for (int i = 0; i < 15; i++) { mrpc.authorizationFailure(); } HELPER.assertCounter("authorizationSuccesses", 14, serverSource); HELPER.assertCounter("authorizationFailures", 15, serverSource); - mrpc.dequeuedCall(100); mrpc.processedCall(101); mrpc.totalCall(102); @@ -195,4 +197,3 @@ public void testServerContextNameWithHostName() { } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerSlowConnectionSetup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerSlowConnectionSetup.java index 3627262817d7..1cdaaed8c202 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerSlowConnectionSetup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerSlowConnectionSetup.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerTraceLogging.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerTraceLogging.java index 122517574f7c..01e444182792 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerTraceLogging.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServerTraceLogging.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import static org.junit.Assert.assertEquals; @@ -36,18 +35,18 @@ public class TestRpcServerTraceLogging { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRpcServerTraceLogging.class); + HBaseClassTestRule.forClass(TestRpcServerTraceLogging.class); private static final org.apache.logging.log4j.core.Logger rpcServerLog = - (org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager - .getLogger(RpcServer.class); + (org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager + .getLogger(RpcServer.class); static final String TRACE_LOG_MSG = - "This is dummy message for testing:: region { type: REGION_NAME value: \"hbase:meta,,1\" }" + - " scan { column { family: \"info\" } time_range { from: 0 to: 9223372036854775807 } " + - "max_versions: 1 cache_blocks: true max_result_size: 2097152 caching: 2147483647 } " + - "number_of_rows: 2147483647 close_scanner: false client_handles_partials: " + - "true client_handles_heartbeats: true track_scan_metrics: false"; + "This is dummy message for testing:: region { type: REGION_NAME value: \"hbase:meta,,1\" }" + + " scan { column { family: \"info\" } time_range { from: 0 to: 9223372036854775807 } " + + "max_versions: 1 cache_blocks: true max_result_size: 2097152 caching: 2147483647 } " + + "number_of_rows: 2147483647 close_scanner: false client_handles_partials: " + + "true client_handles_heartbeats: true track_scan_metrics: false"; static final int TRACE_LOG_LENGTH = TRACE_LOG_MSG.length(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java index ce283701b47f..d55fdd0e2124 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java @@ -29,6 +29,7 @@ import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; + import java.io.IOException; import java.lang.reflect.Field; import java.net.InetSocketAddress; @@ -64,16 +65,18 @@ import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -@Category({RPCTests.class, MediumTests.class}) +@Category({ RPCTests.class, MediumTests.class }) public class TestSimpleRpcScheduler { @ClassRule @@ -102,8 +105,7 @@ public void setUp() { public void testBasic() throws IOException, InterruptedException { PriorityFunction qosFunction = mock(PriorityFunction.class); - RpcScheduler scheduler = new SimpleRpcScheduler( - conf, 10, 0, 0, qosFunction, 0); + RpcScheduler scheduler = new SimpleRpcScheduler(conf, 10, 0, 0, qosFunction, 0); scheduler.init(CONTEXT); scheduler.start(); CallRunner task = createMockTask(); @@ -118,30 +120,30 @@ private RpcScheduler disableHandlers(RpcScheduler scheduler) { Field ExecutorField = scheduler.getClass().getDeclaredField("callExecutor"); ExecutorField.setAccessible(true); - RpcExecutor rpcExecutor = (RpcExecutor)ExecutorField.get(scheduler); + RpcExecutor rpcExecutor = (RpcExecutor) ExecutorField.get(scheduler); - Field handlerCountField = rpcExecutor.getClass().getSuperclass().getSuperclass(). - getDeclaredField("handlerCount"); + Field handlerCountField = + rpcExecutor.getClass().getSuperclass().getSuperclass().getDeclaredField("handlerCount"); handlerCountField.setAccessible(true); handlerCountField.set(rpcExecutor, 0); - Field numCallQueuesField = rpcExecutor.getClass().getSuperclass().getSuperclass(). - getDeclaredField("numCallQueues"); + Field numCallQueuesField = + rpcExecutor.getClass().getSuperclass().getSuperclass().getDeclaredField("numCallQueues"); numCallQueuesField.setAccessible(true); numCallQueuesField.set(rpcExecutor, 1); - Field currentQueueLimitField = rpcExecutor.getClass().getSuperclass().getSuperclass(). - getDeclaredField("currentQueueLimit"); + Field currentQueueLimitField = rpcExecutor.getClass().getSuperclass().getSuperclass() + .getDeclaredField("currentQueueLimit"); currentQueueLimitField.setAccessible(true); currentQueueLimitField.set(rpcExecutor, 100); } catch (NoSuchFieldException e) { - LOG.error("No such field exception"+e); + LOG.error("No such field exception" + e); } catch (IllegalAccessException e) { - LOG.error("Illegal access exception"+e); + LOG.error("Illegal access exception" + e); } return scheduler; @@ -151,8 +153,7 @@ private RpcScheduler disableHandlers(RpcScheduler scheduler) { public void testCallQueueInfo() throws IOException, InterruptedException { PriorityFunction qosFunction = mock(PriorityFunction.class); - RpcScheduler scheduler = new SimpleRpcScheduler( - conf, 0, 0, 0, qosFunction, 0); + RpcScheduler scheduler = new SimpleRpcScheduler(conf, 0, 0, 0, qosFunction, 0); scheduler.init(CONTEXT); @@ -161,20 +162,19 @@ public void testCallQueueInfo() throws IOException, InterruptedException { scheduler.start(); int totalCallMethods = 10; - for (int i = totalCallMethods; i>0; i--) { + for (int i = totalCallMethods; i > 0; i--) { CallRunner task = createMockTask(); task.setStatus(new MonitoredRPCHandlerImpl()); scheduler.dispatch(task); } - CallQueueInfo callQueueInfo = scheduler.getCallQueueInfo(); - for (String callQueueName:callQueueInfo.getCallQueueNames()) { + for (String callQueueName : callQueueInfo.getCallQueueNames()) { - for (String calledMethod: callQueueInfo.getCalledMethodNames(callQueueName)) { + for (String calledMethod : callQueueInfo.getCalledMethodNames(callQueueName)) { assertEquals(totalCallMethods, - callQueueInfo.getCallMethodCount(callQueueName, calledMethod)); + callQueueInfo.getCallMethodCount(callQueueName, calledMethod)); } } @@ -183,20 +183,14 @@ public void testCallQueueInfo() throws IOException, InterruptedException { } - @Test public void testHandlerIsolation() throws IOException, InterruptedException { CallRunner generalTask = createMockTask(); CallRunner priorityTask = createMockTask(); CallRunner replicationTask = createMockTask(); - List tasks = ImmutableList.of( - generalTask, - priorityTask, - replicationTask); - Map qos = ImmutableMap.of( - generalTask, 0, - priorityTask, HConstants.HIGH_QOS + 1, - replicationTask, HConstants.REPLICATION_QOS); + List tasks = ImmutableList.of(generalTask, priorityTask, replicationTask); + Map qos = ImmutableMap.of(generalTask, 0, priorityTask, + HConstants.HIGH_QOS + 1, replicationTask, HConstants.REPLICATION_QOS); PriorityFunction qosFunction = mock(PriorityFunction.class); final Map handlerThreads = Maps.newHashMap(); final CountDownLatch countDownLatch = new CountDownLatch(tasks.size()); @@ -204,9 +198,7 @@ public void testHandlerIsolation() throws IOException, InterruptedException { @Override public Void answer(InvocationOnMock invocationOnMock) throws Throwable { synchronized (handlerThreads) { - handlerThreads.put( - (CallRunner) invocationOnMock.getMock(), - Thread.currentThread()); + handlerThreads.put((CallRunner) invocationOnMock.getMock(), Thread.currentThread()); } countDownLatch.countDown(); return null; @@ -217,8 +209,8 @@ public Void answer(InvocationOnMock invocationOnMock) throws Throwable { doAnswer(answerToRun).when(task).run(); } - RpcScheduler scheduler = new SimpleRpcScheduler( - conf, 1, 1 ,1, qosFunction, HConstants.HIGH_QOS); + RpcScheduler scheduler = + new SimpleRpcScheduler(conf, 1, 1, 1, qosFunction, HConstants.HIGH_QOS); scheduler.init(CONTEXT); scheduler.start(); for (CallRunner task : tasks) { @@ -254,8 +246,7 @@ public void testPluggableRpcQueue() throws Exception { "org.apache.hadoop.hbase.ipc.TestPluggableQueueImpl"); try { - testRpcScheduler(RpcExecutor.CALL_QUEUE_TYPE_PLUGGABLE_CONF_VALUE, - "MissingClass"); + testRpcScheduler(RpcExecutor.CALL_QUEUE_TYPE_PLUGGABLE_CONF_VALUE, "MissingClass"); fail("Expected a PluggableRpcQueueNotFound for unloaded class"); } catch (PluggableRpcQueueNotFound e) { // expected @@ -279,13 +270,14 @@ public void testPluggableRpcQueueWireUpWithFastPathExecutor() throws Exception { String queueType = RpcExecutor.CALL_QUEUE_TYPE_PLUGGABLE_CONF_VALUE; Configuration schedConf = HBaseConfiguration.create(); schedConf.set(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, queueType); - schedConf.set(RpcExecutor.PLUGGABLE_CALL_QUEUE_CLASS_NAME, "org.apache.hadoop.hbase.ipc.TestPluggableQueueImpl"); + schedConf.set(RpcExecutor.PLUGGABLE_CALL_QUEUE_CLASS_NAME, + "org.apache.hadoop.hbase.ipc.TestPluggableQueueImpl"); schedConf.setBoolean(RpcExecutor.PLUGGABLE_CALL_QUEUE_WITH_FAST_PATH_ENABLED, true); PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(), any(), any())).thenReturn(HConstants.NORMAL_QOS); - SimpleRpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 0, 0, 0, priority, - HConstants.QOS_THRESHOLD); + SimpleRpcScheduler scheduler = + new SimpleRpcScheduler(schedConf, 0, 0, 0, priority, HConstants.QOS_THRESHOLD); Field f = scheduler.getClass().getDeclaredField("callExecutor"); f.setAccessible(true); @@ -297,12 +289,13 @@ public void testPluggableRpcQueueWireUpWithoutFastPathExecutor() throws Exceptio String queueType = RpcExecutor.CALL_QUEUE_TYPE_PLUGGABLE_CONF_VALUE; Configuration schedConf = HBaseConfiguration.create(); schedConf.set(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, queueType); - schedConf.set(RpcExecutor.PLUGGABLE_CALL_QUEUE_CLASS_NAME, "org.apache.hadoop.hbase.ipc.TestPluggableQueueImpl"); + schedConf.set(RpcExecutor.PLUGGABLE_CALL_QUEUE_CLASS_NAME, + "org.apache.hadoop.hbase.ipc.TestPluggableQueueImpl"); PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(), any(), any())).thenReturn(HConstants.NORMAL_QOS); - SimpleRpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 0, 0, 0, priority, - HConstants.QOS_THRESHOLD); + SimpleRpcScheduler scheduler = + new SimpleRpcScheduler(schedConf, 0, 0, 0, priority, HConstants.QOS_THRESHOLD); Field f = scheduler.getClass().getDeclaredField("callExecutor"); f.setAccessible(true); @@ -323,15 +316,15 @@ public void testPluggableRpcQueueCanListenToConfigurationChanges() throws Except PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(), any(), any())).thenReturn(HConstants.NORMAL_QOS); - SimpleRpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 0, 0, 0, priority, - HConstants.QOS_THRESHOLD); + SimpleRpcScheduler scheduler = + new SimpleRpcScheduler(schedConf, 0, 0, 0, priority, HConstants.QOS_THRESHOLD); try { scheduler.start(); CallRunner putCallTask = mock(CallRunner.class); ServerCall putCall = mock(ServerCall.class); - putCall.param = RequestConverter.buildMutateRequest( - Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))); + putCall.param = + RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))); RequestHeader putHead = RequestHeader.newBuilder().setMethodName("mutate").build(); when(putCallTask.getRpcCall()).thenReturn(putCall); when(putCall.getHeader()).thenReturn(putHead); @@ -351,7 +344,8 @@ private void testRpcScheduler(final String queueType) throws Exception { testRpcScheduler(queueType, null); } - private void testRpcScheduler(final String queueType, final String pluggableQueueClass) throws Exception { + private void testRpcScheduler(final String queueType, final String pluggableQueueClass) + throws Exception { Configuration schedConf = HBaseConfiguration.create(); schedConf.set(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, queueType); @@ -362,8 +356,8 @@ private void testRpcScheduler(final String queueType, final String pluggableQueu PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(), any(), any())).thenReturn(HConstants.NORMAL_QOS); - RpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 1, 1, 1, priority, - HConstants.QOS_THRESHOLD); + RpcScheduler scheduler = + new SimpleRpcScheduler(schedConf, 1, 1, 1, priority, HConstants.QOS_THRESHOLD); try { scheduler.start(); @@ -417,14 +411,14 @@ private void testRpcScheduler(final String queueType, final String pluggableQueu LOG.debug("Total Time: " + totalTime); // -> [small small small huge small large small small] - // -> NO REORDER [10 10 10 100 10 50 10 10] -> 930 (FIFO Queue) + // -> NO REORDER [10 10 10 100 10 50 10 10] -> 930 (FIFO Queue) // -> WITH REORDER [10 10 10 10 10 10 50 100] -> 530 (Deadline Queue) if (queueType.equals(RpcExecutor.CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE)) { assertEquals(530, totalTime); - } else if (queueType.equals(RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE) || - queueType.equals(RpcExecutor.CALL_QUEUE_TYPE_PLUGGABLE_CONF_VALUE)) { - assertEquals(930, totalTime); - } + } else if (queueType.equals(RpcExecutor.CALL_QUEUE_TYPE_FIFO_CONF_VALUE) + || queueType.equals(RpcExecutor.CALL_QUEUE_TYPE_PLUGGABLE_CONF_VALUE)) { + assertEquals(930, totalTime); + } } finally { scheduler.stop(); } @@ -441,8 +435,8 @@ public void testScanQueueWithZeroScanRatio() throws Exception { PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(), any(), any())).thenReturn(HConstants.NORMAL_QOS); - RpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 2, 1, 1, priority, - HConstants.QOS_THRESHOLD); + RpcScheduler scheduler = + new SimpleRpcScheduler(schedConf, 2, 1, 1, priority, HConstants.QOS_THRESHOLD); assertNotEquals(null, scheduler); } @@ -456,15 +450,15 @@ public void testScanQueues() throws Exception { PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(), any(), any())).thenReturn(HConstants.NORMAL_QOS); - RpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 3, 1, 1, priority, - HConstants.QOS_THRESHOLD); + RpcScheduler scheduler = + new SimpleRpcScheduler(schedConf, 3, 1, 1, priority, HConstants.QOS_THRESHOLD); try { scheduler.start(); CallRunner putCallTask = mock(CallRunner.class); ServerCall putCall = mock(ServerCall.class); - putCall.param = RequestConverter.buildMutateRequest( - Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))); + putCall.param = + RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))); RequestHeader putHead = RequestHeader.newBuilder().setMethodName("mutate").build(); when(putCallTask.getRpcCall()).thenReturn(putCall); when(putCall.getHeader()).thenReturn(putHead); @@ -515,8 +509,8 @@ public void testScanQueues() throws Exception { } } - private void doAnswerTaskExecution(final CallRunner callTask, - final ArrayList results, final int value, final int sleepInterval) { + private void doAnswerTaskExecution(final CallRunner callTask, final ArrayList results, + final int value, final int sleepInterval) { callTask.setStatus(new MonitoredRPCHandlerImpl()); doAnswer(new Answer() { @Override @@ -549,15 +543,15 @@ public void testSoftAndHardQueueLimits() throws Exception { PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(), any(), any())).thenReturn(HConstants.NORMAL_QOS); - SimpleRpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 0, 0, 0, priority, - HConstants.QOS_THRESHOLD); + SimpleRpcScheduler scheduler = + new SimpleRpcScheduler(schedConf, 0, 0, 0, priority, HConstants.QOS_THRESHOLD); try { scheduler.start(); CallRunner putCallTask = mock(CallRunner.class); ServerCall putCall = mock(ServerCall.class); - putCall.param = RequestConverter.buildMutateRequest( - Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))); + putCall.param = + RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))); RequestHeader putHead = RequestHeader.newBuilder().setMethodName("mutate").build(); when(putCallTask.getRpcCall()).thenReturn(putCall); when(putCall.getHeader()).thenReturn(putHead); @@ -671,10 +665,9 @@ public void testCoDelScheduling() throws Exception { // make sure somewhat slow calls are handled waitUntilQueueEmpty(scheduler); Thread.sleep(100); - assertTrue( - "There should have been at least 12 calls dropped however there were " - + scheduler.getNumGeneralCallsDropped(), - scheduler.getNumGeneralCallsDropped() > 12); + assertTrue("There should have been at least 12 calls dropped however there were " + + scheduler.getNumGeneralCallsDropped(), + scheduler.getNumGeneralCallsDropped() > 12); } finally { scheduler.stop(); } @@ -690,11 +683,11 @@ public void testFastPathBalancedQueueRpcExecutorWithQueueLength0() throws Except Configuration conf = HBaseConfiguration.create(); Abortable abortable = mock(Abortable.class); FastPathBalancedQueueRpcExecutor executor = - Mockito.spy(new FastPathBalancedQueueRpcExecutor(name, - handlerCount, callQueueType, maxQueueLength, priority, conf, abortable)); + Mockito.spy(new FastPathBalancedQueueRpcExecutor(name, handlerCount, callQueueType, + maxQueueLength, priority, conf, abortable)); CallRunner task = mock(CallRunner.class); assertFalse(executor.dispatch(task)); - //make sure we never internally get a handler, which would skip the queue validation + // make sure we never internally get a handler, which would skip the queue validation Mockito.verify(executor, Mockito.never()).getHandler(Mockito.any(), Mockito.anyDouble(), Mockito.anyInt(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); } @@ -709,15 +702,15 @@ public void testMetaRWScanQueues() throws Exception { PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(), any(), any())).thenReturn(HConstants.HIGH_QOS); - RpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 3, 3, 1, priority, - HConstants.QOS_THRESHOLD); + RpcScheduler scheduler = + new SimpleRpcScheduler(schedConf, 3, 3, 1, priority, HConstants.QOS_THRESHOLD); try { scheduler.start(); CallRunner putCallTask = mock(CallRunner.class); ServerCall putCall = mock(ServerCall.class); - putCall.param = RequestConverter.buildMutateRequest( - Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))); + putCall.param = + RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))); RequestHeader putHead = RequestHeader.newBuilder().setMethodName("mutate").build(); when(putCallTask.getRpcCall()).thenReturn(putCall); when(putCall.getHeader()).thenReturn(putHead); @@ -771,15 +764,16 @@ public void testMetaRWScanQueues() throws Exception { // Get mocked call that has the CallRunner sleep for a while so that the fast // path isn't hit. private CallRunner getMockedCallRunner(long timestamp, final long sleepTime) throws IOException { - ServerCall putCall = new ServerCall(1, null, null, - RPCProtos.RequestHeader.newBuilder().setMethodName("mutate").build(), - RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))), - null, null, 9, null, timestamp, 0, null, null, null) { - - @Override - public void sendResponseIfReady() throws IOException { - } - }; + ServerCall putCall = + new ServerCall(1, null, null, + RPCProtos.RequestHeader.newBuilder().setMethodName("mutate").build(), RequestConverter + .buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))), + null, null, 9, null, timestamp, 0, null, null, null) { + + @Override + public void sendResponseIfReady() throws IOException { + } + }; return new CallRunner(null, putCall) { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java index 69f738741eb1..d718199335af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MiniMRCluster; @@ -30,10 +29,8 @@ import org.apache.hadoop.mapreduce.JobID; /** - * This class provides shims for HBase to interact with the Hadoop 1.0.x and the - * Hadoop 0.23.x series. - * - * NOTE: No testing done against 0.22.x, or 0.21.x. + * This class provides shims for HBase to interact with the Hadoop 1.0.x and the Hadoop 0.23.x + * series. NOTE: No testing done against 0.22.x, or 0.21.x. */ abstract public class MapreduceTestingShim { private static MapreduceTestingShim instance; @@ -42,28 +39,25 @@ abstract public class MapreduceTestingShim { static { try { // This class exists in hadoop 0.22+ but not in Hadoop 20.x/1.x - Class c = Class - .forName("org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl"); + Class c = Class.forName("org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl"); instance = new MapreduceV2Shim(); } catch (Exception e) { instance = new MapreduceV1Shim(); } } - abstract public JobContext newJobContext(Configuration jobConf) - throws IOException; + abstract public JobContext newJobContext(Configuration jobConf) throws IOException; abstract public Job newJob(Configuration conf) throws IOException; - + abstract public JobConf obtainJobConf(MiniMRCluster cluster); abstract public String obtainMROutputDirProp(); - - public static JobContext createJobContext(Configuration jobConf) - throws IOException { + + public static JobContext createJobContext(Configuration jobConf) throws IOException { return instance.newJobContext(jobConf); } - + public static JobConf getJobConf(MiniMRCluster cluster) { return instance.obtainJobConf(cluster); } @@ -75,7 +69,7 @@ public static Job createJob(Configuration conf) throws IOException { public static String getMROutputDirProp() { return instance.obtainMROutputDirProp(); } - + private static class MapreduceV1Shim extends MapreduceTestingShim { @Override public JobContext newJobContext(Configuration jobConf) throws IOException { @@ -101,20 +95,19 @@ public Job newJob(Configuration conf) throws IOException { c = Job.class.getConstructor(Configuration.class); return c.newInstance(conf); } catch (Exception e) { - throw new IllegalStateException( - "Failed to instantiate new Job(conf)", e); + throw new IllegalStateException("Failed to instantiate new Job(conf)", e); } } - + @Override public JobConf obtainJobConf(MiniMRCluster cluster) { if (cluster == null) return null; try { Object runner = cluster.getJobTrackerRunner(); Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam); - Object tracker = meth.invoke(runner, new Object []{}); + Object tracker = meth.invoke(runner, new Object[] {}); Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam); - return (JobConf) m.invoke(tracker, new Object []{}); + return (JobConf) m.invoke(tracker, new Object[] {}); } catch (NoSuchMethodException nsme) { return null; } catch (InvocationTargetException ite) { @@ -145,16 +138,15 @@ public Job newJob(Configuration jobConf) { return (Job) m.invoke(null, jobConf); // static method, then arg } catch (Exception e) { e.printStackTrace(); - throw new IllegalStateException( - "Failed to return from Job.getInstance(jobConf)"); + throw new IllegalStateException("Failed to return from Job.getInstance(jobConf)"); } } - + @Override public JobConf obtainJobConf(MiniMRCluster cluster) { try { Method meth = MiniMRCluster.class.getMethod("getJobTrackerConf", emptyParam); - return (JobConf) meth.invoke(cluster, new Object []{}); + return (JobConf) meth.invoke(cluster, new Object[] {}); } catch (NoSuchMethodException nsme) { return null; } catch (InvocationTargetException ite) { @@ -166,8 +158,8 @@ public JobConf obtainJobConf(MiniMRCluster cluster) { @Override public String obtainMROutputDirProp() { - // This is a copy of o.a.h.mapreduce.lib.output.FileOutputFormat.OUTDIR - // from Hadoop 0.23.x. If we use the source directly we break the hadoop 1.x compile. + // This is a copy of o.a.h.mapreduce.lib.output.FileOutputFormat.OUTDIR + // from Hadoop 0.23.x. If we use the source directly we break the hadoop 1.x compile. return "mapreduce.output.fileoutputformat.outputdir"; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java index a0f26c001e28..c542251b588a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java @@ -132,8 +132,8 @@ private void startCluster(int numRS) throws Exception { conf.setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, 3); conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); conf.set("hbase.wal.provider", getWalProvider()); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numMasters(NUM_MASTERS).numRegionServers(numRS).build(); + StartTestingClusterOption option = + StartTestingClusterOption.builder().numMasters(NUM_MASTERS).numRegionServers(numRS).build(); TEST_UTIL.startMiniHBaseCluster(option); cluster = TEST_UTIL.getHBaseCluster(); LOG.info("Waiting for active/ready master"); @@ -199,13 +199,12 @@ public boolean evaluate() throws Exception { TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return (HBaseTestingUtil.getAllOnlineRegions(cluster) - .size() >= (numRegionsToCreate + 1)); + return (HBaseTestingUtil.getAllOnlineRegions(cluster).size() >= (numRegionsToCreate + 1)); } }); - LOG.info("Current Open Regions After Master Node Starts Up:" + - HBaseTestingUtil.getAllOnlineRegions(cluster).size()); + LOG.info("Current Open Regions After Master Node Starts Up:" + + HBaseTestingUtil.getAllOnlineRegions(cluster).size()); assertEquals(numLogLines, TEST_UTIL.countRows(ht)); } @@ -255,16 +254,16 @@ public String explainFailure() throws Exception { @Test public void testDelayedDeleteOnFailure() throws Exception { if (!this.conf.getBoolean(HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK, - HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) { + HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) { // This test depends on zk coordination.... - return; + return; } LOG.info("testDelayedDeleteOnFailure"); startCluster(1); final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager(); final FileSystem fs = master.getMasterFileSystem().getFileSystem(); final Path rootLogDir = - new Path(CommonFSUtils.getWALRootDir(conf), HConstants.HREGION_LOGDIR_NAME); + new Path(CommonFSUtils.getWALRootDir(conf), HConstants.HREGION_LOGDIR_NAME); final Path logDir = new Path(rootLogDir, ServerName.valueOf("x", 1, 1).toString()); fs.mkdirs(logDir); ExecutorService executor = null; @@ -434,10 +433,11 @@ public void makeWAL(HRegionServer hrs, List regions, int numEdits, i row = Arrays.copyOfRange(row, 3, 8); // use last 5 bytes because // HBaseTestingUtility.createMultiRegions use 5 bytes key byte[] qualifier = Bytes.toBytes("c" + Integer.toString(i)); - e.add(new KeyValue(row, COLUMN_FAMILY, qualifier, EnvironmentEdgeManager.currentTime(), - value)); + e.add( + new KeyValue(row, COLUMN_FAMILY, qualifier, EnvironmentEdgeManager.currentTime(), value)); log.appendData(curRegionInfo, new WALKeyImpl(curRegionInfo.getEncodedNameAsBytes(), - tableName, EnvironmentEdgeManager.currentTime(), mvcc), e); + tableName, EnvironmentEdgeManager.currentTime(), mvcc), + e); if (0 == i % syncEvery) { log.sync(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestRestartCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestRestartCluster.java index 07a87fa7dc16..d46cc5589869 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestRestartCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestRestartCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ public abstract class AbstractTestRestartCluster { protected HBaseTestingUtil UTIL = new HBaseTestingUtil(); protected static final TableName[] TABLES = { TableName.valueOf("restartTableOne"), - TableName.valueOf("restartTableTwo"), TableName.valueOf("restartTableThree") }; + TableName.valueOf("restartTableTwo"), TableName.valueOf("restartTableThree") }; protected static final byte[] FAMILY = Bytes.toBytes("family"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java index 3d36db71242a..f8aed58cc1fa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AlwaysStandByHMaster.java @@ -38,14 +38,12 @@ public class AlwaysStandByHMaster extends HMaster { * An implementation of ActiveMasterManager that never transitions it's master to active state. It * always remains as a stand by master. With the master registry implementation (HBASE-18095) it * is expected to have at least one active / standby master always running at any point in time - * since they serve as the gateway for client connections. - * - * With this implementation, tests can simulate the scenario of not having an active master yet - * the client connections to the cluster succeed. + * since they serve as the gateway for client connections. With this implementation, tests can + * simulate the scenario of not having an active master yet the client connections to the cluster + * succeed. */ private static class AlwaysStandByMasterManager extends ActiveMasterManager { - private static final Logger LOG = - LoggerFactory.getLogger(AlwaysStandByMasterManager.class); + private static final Logger LOG = LoggerFactory.getLogger(AlwaysStandByMasterManager.class); AlwaysStandByMasterManager(ZKWatcher watcher, ServerName sn, Server master) throws InterruptedIOException { @@ -78,13 +76,12 @@ boolean blockUntilBecomingActiveMaster(int checkInterval, MonitoredTask startupS clusterHasActiveMaster.wait(checkInterval); } catch (InterruptedException e) { // We expect to be interrupted when a master dies, - // will fall out if so + // will fall out if so LOG.debug("Interrupted waiting for master to die", e); } } if (clusterShutDown.get()) { - this.master.stop( - "Cluster went down before this master became active"); + this.master.stop("Cluster went down before this master became active"); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionProcedure.java index 8d0df5f79163..da1be942bf77 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionProcedureState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionProcedureState.java index bcce7e67636a..92ac4b6a8841 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionProcedureState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionProcedureState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionServerList.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionServerList.java index b890e1a61365..2e1d46fc32e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionServerList.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/DummyRegionServerList.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index d53cf81fa835..4dbbb64573cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master; import static org.mockito.Mockito.mock; + import java.io.IOException; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -59,6 +60,7 @@ import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; + import org.apache.hbase.thirdparty.com.google.protobuf.Service; public class MockNoopMasterServices implements MasterServices { @@ -76,15 +78,12 @@ public MockNoopMasterServices(final Configuration conf) { @Override public void checkTableModifiable(TableName tableName) throws IOException { - //no-op + // no-op } @Override - public long createTable( - final TableDescriptor desc, - final byte[][] splitKeys, - final long nonceGroup, - final long nonce) throws IOException { + public long createTable(final TableDescriptor desc, final byte[][] splitKeys, + final long nonceGroup, final long nonce) throws IOException { // no-op return -1; } @@ -134,7 +133,8 @@ public MasterQuotaManager getMasterQuotaManager() { return null; } - @Override public RegionNormalizerManager getRegionNormalizerManager() { + @Override + public RegionNormalizerManager getRegionNormalizerManager() { return null; } @@ -180,7 +180,7 @@ public ServerName getServerName() { @Override public void abort(String why, Throwable e) { - //no-op + // no-op } @Override @@ -218,12 +218,12 @@ public boolean registerService(Service instance) { @Override public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) throws IOException { - return false; //To change body of implemented methods use File | Settings | File Templates. + return false; // To change body of implemented methods use File | Settings | File Templates. } @Override public List> getProcedures() throws IOException { - return null; //To change body of implemented methods use File | Settings | File Templates. + return null; // To change body of implemented methods use File | Settings | File Templates. } @Override @@ -233,7 +233,7 @@ public List getLocks() throws IOException { @Override public List listTableDescriptorsByNamespace(String name) throws IOException { - return null; //To change body of implemented methods use File | Settings | File Templates. + return null; // To change body of implemented methods use File | Settings | File Templates. } @Override @@ -242,45 +242,32 @@ public List listTableNamesByNamespace(String name) throws IOException } @Override - public long deleteTable( - final TableName tableName, - final long nonceGroup, - final long nonce) throws IOException { + public long deleteTable(final TableName tableName, final long nonceGroup, final long nonce) + throws IOException { return -1; } @Override - public long truncateTable( - final TableName tableName, - final boolean preserveSplits, - final long nonceGroup, - final long nonce) throws IOException { + public long truncateTable(final TableName tableName, final boolean preserveSplits, + final long nonceGroup, final long nonce) throws IOException { return -1; } - @Override - public long modifyTable( - final TableName tableName, - final TableDescriptor descriptor, - final long nonceGroup, - final long nonce) throws IOException { + public long modifyTable(final TableName tableName, final TableDescriptor descriptor, + final long nonceGroup, final long nonce) throws IOException { return -1; } @Override - public long enableTable( - final TableName tableName, - final long nonceGroup, - final long nonce) throws IOException { + public long enableTable(final TableName tableName, final long nonceGroup, final long nonce) + throws IOException { return -1; } @Override - public long disableTable( - TableName tableName, - final long nonceGroup, - final long nonce) throws IOException { + public long disableTable(TableName tableName, final long nonceGroup, final long nonce) + throws IOException { return -1; } @@ -303,19 +290,13 @@ public long deleteColumn(final TableName tableName, final byte[] columnName, } @Override - public long mergeRegions( - final RegionInfo[] regionsToMerge, - final boolean forcible, - final long nonceGroup, - final long nonce) throws IOException { + public long mergeRegions(final RegionInfo[] regionsToMerge, final boolean forcible, + final long nonceGroup, final long nonce) throws IOException { return -1; } @Override - public long splitRegion( - final RegionInfo regionInfo, - final byte[] splitRow, - final long nonceGroup, + public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long nonceGroup, final long nonce) throws IOException { return -1; } @@ -340,7 +321,8 @@ public boolean isInMaintenanceMode() { return false; } - @Override public boolean skipRegionManagementAction(String action) { + @Override + public boolean skipRegionManagementAction(String action) { return false; } @@ -406,8 +388,8 @@ public long disableReplicationPeer(String peerId) throws ReplicationException, I } @Override - public ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws ReplicationException, - IOException { + public ReplicationPeerConfig getReplicationPeerConfig(String peerId) + throws ReplicationException, IOException { return null; } @@ -468,7 +450,7 @@ public boolean isClusterUp() { } public long transitReplicationPeerSyncReplicationState(String peerId, - SyncReplicationState clusterState) throws ReplicationException, IOException { + SyncReplicationState clusterState) throws ReplicationException, IOException { return 0; } @@ -523,13 +505,13 @@ public MetaLocationSyncer getMetaLocationSyncer() { @Override public long modifyTableStoreFileTracker(TableName tableName, String dstSFT, long nonceGroup, - long nonce) throws IOException { + long nonce) throws IOException { return -1; } @Override public long modifyColumnStoreFileTracker(TableName tableName, byte[] family, String dstSFT, - long nonceGroup, long nonce) throws IOException { + long nonceGroup, long nonce) throws IOException { return -1; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index f4f63a3badc3..b4dc10a95d9c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -30,7 +30,6 @@ import java.util.TreeMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.Abortable; @@ -143,12 +142,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; /** - * A mock RegionServer implementation. - * Use this when you can't bend Mockito to your liking (e.g. return null result - * when 'scanning' until master timesout and then return a coherent meta row - * result thereafter. Have some facility for faking gets and scans. See - * setGetResult(byte[], byte[], Result) for how to fill the backing data - * store that the get pulls from. + * A mock RegionServer implementation. Use this when you can't bend Mockito to your liking (e.g. + * return null result when 'scanning' until master timesout and then return a coherent meta row + * result thereafter. Have some facility for faking gets and scans. See setGetResult(byte[], byte[], + * Result) for how to fill the backing data store that the get pulls from. */ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface, ClientProtos.ClientService.BlockingInterface, RegionServerServices { @@ -158,16 +155,16 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface, /** * Map of regions to map of rows and {@link Result}. Used as data source when - * {@link #get(RpcController, ClientProtos.GetRequest)} is called. Because we have a byte - * key, need to use TreeMap and provide a Comparator. Use - * {@link #setGetResult(byte[], byte[], Result)} filling this map. + * {@link #get(RpcController, ClientProtos.GetRequest)} is called. Because we have a byte key, + * need to use TreeMap and provide a Comparator. Use {@link #setGetResult(byte[], byte[], Result)} + * filling this map. */ - private final Map> gets = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private final Map> gets = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * Map of regions to results to return when scanning. */ - private final Map nexts = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private final Map nexts = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * Data structure that holds regionname and index used scanning. @@ -202,7 +199,7 @@ int getThenIncrement() { * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException */ MockRegionServer(final Configuration conf, final ServerName sn) - throws ZooKeeperConnectionException, IOException { + throws ZooKeeperConnectionException, IOException { this.sn = sn; this.conf = conf; this.zkw = new ZKWatcher(conf, sn.toString(), this, true); @@ -215,11 +212,11 @@ int getThenIncrement() { * @param row the row key * @param r the single row result */ - void setGetResult(final byte [] regionName, final byte [] row, final Result r) { - Map value = this.gets.get(regionName); + void setGetResult(final byte[] regionName, final byte[] row, final Result r) { + Map value = this.gets.get(regionName); if (value == null) { - // If no value already, create one. Needs to be treemap because we are - // using byte array as key. Not thread safe. + // If no value already, create one. Needs to be treemap because we are + // using byte array as key. Not thread safe. value = new TreeMap<>(Bytes.BYTES_COMPARATOR); this.gets.put(regionName, value); } @@ -231,7 +228,7 @@ void setGetResult(final byte [] regionName, final byte [] row, final Result r) { * @param regionName * @param rs */ - void setNextResults(final byte [] regionName, final Result [] rs) { + void setNextResults(final byte[] regionName, final Result[] rs) { this.nexts.put(regionName, rs); } @@ -259,15 +256,15 @@ public long openScanner(byte[] regionName, Scan scan) throws IOException { public Result next(long scannerId) throws IOException { RegionNameAndIndex rnai = this.scannersAndOffsets.get(scannerId); int index = rnai.getThenIncrement(); - Result [] results = this.nexts.get(rnai.getRegionName()); + Result[] results = this.nexts.get(rnai.getRegionName()); if (results == null) return null; - return index < results.length? results[index]: null; + return index < results.length ? results[index] : null; } - public Result [] next(long scannerId, int numberOfRows) throws IOException { + public Result[] next(long scannerId, int numberOfRows) throws IOException { // Just return one result whatever they ask for. Result r = next(scannerId); - return r == null? null: new Result [] {r}; + return r == null ? null : new Result[] { r }; } public void close(final long scannerId) throws IOException { @@ -327,10 +324,12 @@ public boolean isStopping() { public FlushRequester getFlushRequester() { return null; } + @Override public CompactionRequester getCompactionRequestor() { return null; } + @Override public RegionServerAccounting getRegionServerAccounting() { return null; @@ -361,10 +360,9 @@ public FileSystem getFileSystem() { } @Override - public GetResponse get(RpcController controller, GetRequest request) - throws ServiceException { + public GetResponse get(RpcController controller, GetRequest request) throws ServiceException { byte[] regionName = request.getRegion().getValue().toByteArray(); - Map m = this.gets.get(regionName); + Map m = this.gets.get(regionName); GetResponse.Builder builder = GetResponse.newBuilder(); if (m != null) { byte[] row = request.getGet().getRow().toByteArray(); @@ -380,27 +378,23 @@ public MutateResponse mutate(RpcController controller, MutateRequest request) } @Override - public ScanResponse scan(RpcController controller, ScanRequest request) - throws ServiceException { + public ScanResponse scan(RpcController controller, ScanRequest request) throws ServiceException { ScanResponse.Builder builder = ScanResponse.newBuilder(); try { if (request.hasScan()) { byte[] regionName = request.getRegion().getValue().toByteArray(); builder.setScannerId(openScanner(regionName, null)); builder.setMoreResults(true); - } - else { + } else { long scannerId = request.getScannerId(); Result result = next(scannerId); if (result != null) { builder.addCellsPerResult(result.size()); List results = new ArrayList<>(1); results.add(result); - ((HBaseRpcController) controller).setCellScanner(CellUtil - .createCellScanner(results)); + ((HBaseRpcController) controller).setCellScanner(CellUtil.createCellScanner(results)); builder.setMoreResults(true); - } - else { + } else { builder.setMoreResults(false); close(scannerId); } @@ -412,8 +406,8 @@ public ScanResponse scan(RpcController controller, ScanRequest request) } @Override - public BulkLoadHFileResponse bulkLoadHFile(RpcController controller, - BulkLoadHFileRequest request) throws ServiceException { + public BulkLoadHFileResponse bulkLoadHFile(RpcController controller, BulkLoadHFileRequest request) + throws ServiceException { return null; } @@ -424,35 +418,35 @@ public ClientProtos.CoprocessorServiceResponse execService(RpcController control } @Override - public ClientProtos.MultiResponse multi( - RpcController controller, MultiRequest request) throws ServiceException { + public ClientProtos.MultiResponse multi(RpcController controller, MultiRequest request) + throws ServiceException { return null; } @Override - public GetRegionInfoResponse getRegionInfo(RpcController controller, - GetRegionInfoRequest request) throws ServiceException { + public GetRegionInfoResponse getRegionInfo(RpcController controller, GetRegionInfoRequest request) + throws ServiceException { GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); builder.setRegionInfo(ProtobufUtil.toRegionInfo(RegionInfoBuilder.FIRST_META_REGIONINFO)); return builder.build(); } @Override - public GetRegionLoadResponse getRegionLoad(RpcController controller, - GetRegionLoadRequest request) throws ServiceException { + public GetRegionLoadResponse getRegionLoad(RpcController controller, GetRegionLoadRequest request) + throws ServiceException { GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder(); return builder.build(); } @Override public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, - ClearCompactionQueuesRequest request) throws ServiceException { + ClearCompactionQueuesRequest request) throws ServiceException { return null; } @Override - public GetStoreFileResponse getStoreFile(RpcController controller, - GetStoreFileRequest request) throws ServiceException { + public GetStoreFileResponse getStoreFile(RpcController controller, GetStoreFileRequest request) + throws ServiceException { return null; } @@ -468,25 +462,26 @@ public List getRegions() { } @Override - public OpenRegionResponse openRegion(RpcController controller, - OpenRegionRequest request) throws ServiceException { + public OpenRegionResponse openRegion(RpcController controller, OpenRegionRequest request) + throws ServiceException { return null; } @Override - public WarmupRegionResponse warmupRegion(RpcController controller, - WarmupRegionRequest request) throws ServiceException { + public WarmupRegionResponse warmupRegion(RpcController controller, WarmupRegionRequest request) + throws ServiceException { return null; } + @Override - public CloseRegionResponse closeRegion(RpcController controller, - CloseRegionRequest request) throws ServiceException { + public CloseRegionResponse closeRegion(RpcController controller, CloseRegionRequest request) + throws ServiceException { return null; } @Override - public FlushRegionResponse flushRegion(RpcController controller, - FlushRegionRequest request) throws ServiceException { + public FlushRegionResponse flushRegion(RpcController controller, FlushRegionRequest request) + throws ServiceException { return null; } @@ -497,8 +492,8 @@ public CompactionSwitchResponse compactionSwitch(RpcController controller, } @Override - public CompactRegionResponse compactRegion(RpcController controller, - CompactRegionRequest request) throws ServiceException { + public CompactRegionResponse compactRegion(RpcController controller, CompactRegionRequest request) + throws ServiceException { return null; } @@ -509,20 +504,20 @@ public ReplicateWALEntryResponse replicateWALEntry(RpcController controller, } @Override - public RollWALWriterResponse rollWALWriter(RpcController controller, - RollWALWriterRequest request) throws ServiceException { + public RollWALWriterResponse rollWALWriter(RpcController controller, RollWALWriterRequest request) + throws ServiceException { return null; } @Override - public GetServerInfoResponse getServerInfo(RpcController controller, - GetServerInfoRequest request) throws ServiceException { + public GetServerInfoResponse getServerInfo(RpcController controller, GetServerInfoRequest request) + throws ServiceException { return null; } @Override - public StopServerResponse stopServer(RpcController controller, - StopServerRequest request) throws ServiceException { + public StopServerResponse stopServer(RpcController controller, StopServerRequest request) + throws ServiceException { return null; } @@ -567,9 +562,8 @@ public InetSocketAddress[] getFavoredNodesForRegion(String encodedRegionName) { } @Override - public ReplicateWALEntryResponse - replay(RpcController controller, ReplicateWALEntryRequest request) - throws ServiceException { + public ReplicateWALEntryResponse replay(RpcController controller, + ReplicateWALEntryRequest request) throws ServiceException { return null; } @@ -601,16 +595,14 @@ public CoprocessorServiceResponse execRegionServerService(RpcController controll } @Override - public UpdateConfigurationResponse updateConfiguration( - RpcController controller, UpdateConfigurationRequest request) - throws ServiceException { + public UpdateConfigurationResponse updateConfiguration(RpcController controller, + UpdateConfigurationRequest request) throws ServiceException { return null; } @Override public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, - ClearRegionBlockCacheRequest request) - throws ServiceException { + ClearRegionBlockCacheRequest request) throws ServiceException { return null; } @@ -690,9 +682,8 @@ public HBaseProtos.LogEntry getLogEntries(RpcController controller, } @Override - public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots( - RpcController controller, GetSpaceQuotaSnapshotsRequest request) - throws ServiceException { + public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController controller, + GetSpaceQuotaSnapshotsRequest request) throws ServiceException { return null; } @@ -707,8 +698,8 @@ public boolean reportRegionSizesForQuotas(RegionSizeStore sizeStore) { } @Override - public boolean reportFileArchivalForQuotas( - TableName tableName, Collection> archivedFiles) { + public boolean reportFileArchivalForQuotas(TableName tableName, + Collection> archivedFiles) { return false; } @@ -758,7 +749,7 @@ public RegionReplicationBufferManager getRegionReplicationBufferManager() { @Override public ReplicateWALEntryResponse replicateToReplica(RpcController controller, - ReplicateWALEntryRequest request) throws ServiceException { + ReplicateWALEntryRequest request) throws ServiceException { return null; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index 8ba6f10e084d..7e5754d5b7f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.Semaphore; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -56,7 +55,7 @@ /** * Test the {@link ActiveMasterManager}. */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestActiveMasterManager { @ClassRule @@ -76,9 +75,10 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniZKCluster(); } - @Test public void testRestartMaster() throws IOException, KeeperException { - try (ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(), - "testActiveMasterManagerFromZK", null, true)) { + @Test + public void testRestartMaster() throws IOException, KeeperException { + try (ZKWatcher zk = + new ZKWatcher(TEST_UTIL.getConfiguration(), "testActiveMasterManagerFromZK", null, true)) { try { ZKUtil.deleteNode(zk, zk.getZNodePaths().masterAddressZNode); ZKUtil.deleteNode(zk, zk.getZNodePaths().clusterStateZNode); @@ -89,10 +89,8 @@ public static void tearDownAfterClass() throws Exception { ServerName master = ServerName.valueOf("localhost", 1, EnvironmentEdgeManager.currentTime()); // Should not have a master yet DummyMaster dummyMaster = new DummyMaster(zk, master); - ClusterStatusTracker clusterStatusTracker = - dummyMaster.getClusterStatusTracker(); - ActiveMasterManager activeMasterManager = - dummyMaster.getActiveMasterManager(); + ClusterStatusTracker clusterStatusTracker = dummyMaster.getClusterStatusTracker(); + ActiveMasterManager activeMasterManager = dummyMaster.getActiveMasterManager(); assertFalse(activeMasterManager.clusterHasActiveMaster.get()); assertFalse(activeMasterManager.getActiveMasterServerName().isPresent()); @@ -107,8 +105,7 @@ public static void tearDownAfterClass() throws Exception { // Now pretend master restart DummyMaster secondDummyMaster = new DummyMaster(zk, master); - ActiveMasterManager secondActiveMasterManager = - secondDummyMaster.getActiveMasterManager(); + ActiveMasterManager secondActiveMasterManager = secondDummyMaster.getActiveMasterManager(); assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get()); activeMasterManager.blockUntilBecomingActiveMaster(100, status); assertTrue(activeMasterManager.clusterHasActiveMaster.get()); @@ -119,14 +116,14 @@ public static void tearDownAfterClass() throws Exception { } /** - * Unit tests that uses ZooKeeper but does not use the master-side methods - * but rather acts directly on ZK. + * Unit tests that uses ZooKeeper but does not use the master-side methods but rather acts + * directly on ZK. * @throws Exception */ @Test public void testActiveMasterManagerFromZK() throws Exception { - try (ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(), - "testActiveMasterManagerFromZK", null, true)) { + try (ZKWatcher zk = + new ZKWatcher(TEST_UTIL.getConfiguration(), "testActiveMasterManagerFromZK", null, true)) { try { ZKUtil.deleteNode(zk, zk.getZNodePaths().masterAddressZNode); ZKUtil.deleteNode(zk, zk.getZNodePaths().clusterStateZNode); @@ -141,17 +138,14 @@ public void testActiveMasterManagerFromZK() throws Exception { // Should not have a master yet DummyMaster ms1 = new DummyMaster(zk, firstMasterAddress); - ActiveMasterManager activeMasterManager = - ms1.getActiveMasterManager(); + ActiveMasterManager activeMasterManager = ms1.getActiveMasterManager(); assertFalse(activeMasterManager.clusterHasActiveMaster.get()); assertFalse(activeMasterManager.getActiveMasterServerName().isPresent()); // First test becoming the active master uninterrupted - ClusterStatusTracker clusterStatusTracker = - ms1.getClusterStatusTracker(); + ClusterStatusTracker clusterStatusTracker = ms1.getClusterStatusTracker(); clusterStatusTracker.setClusterUp(); - activeMasterManager.blockUntilBecomingActiveMaster(100, - Mockito.mock(MonitoredTask.class)); + activeMasterManager.blockUntilBecomingActiveMaster(100, Mockito.mock(MonitoredTask.class)); assertTrue(activeMasterManager.clusterHasActiveMaster.get()); assertMaster(zk, firstMasterAddress); assertMaster(zk, activeMasterManager.getActiveMasterServerName().get()); @@ -179,8 +173,8 @@ public void testActiveMasterManagerFromZK() throws Exception { ms1.stop("stopping first server"); // Use a listener to capture when the node is actually deleted - NodeDeletionListener listener = new NodeDeletionListener(zk, - zk.getZNodePaths().masterAddressZNode); + NodeDeletionListener listener = + new NodeDeletionListener(zk, zk.getZNodePaths().masterAddressZNode); zk.registerListener(listener); LOG.info("Deleting master node"); @@ -217,16 +211,15 @@ public void testBackupMasterUpdates() throws Exception { ServerName sn1 = ServerName.valueOf("localhost", 1, -1); DummyMaster master1 = new DummyMaster(zk, sn1); ActiveMasterManager activeMasterManager = master1.getActiveMasterManager(); - activeMasterManager.blockUntilBecomingActiveMaster(100, - Mockito.mock(MonitoredTask.class)); + activeMasterManager.blockUntilBecomingActiveMaster(100, Mockito.mock(MonitoredTask.class)); assertEquals(sn1, activeMasterManager.getActiveMasterServerName().get()); assertEquals(0, activeMasterManager.getBackupMasters().size()); // Add backup masters List backupZNodes = new ArrayList<>(); for (int i = 1; i <= 10; i++) { ServerName backupSn = ServerName.valueOf("localhost", 1000 + i, -1); - String backupZn = ZNodePaths.joinZNode( - zk.getZNodePaths().backupMasterAddressesZNode, backupSn.toString()); + String backupZn = ZNodePaths.joinZNode(zk.getZNodePaths().backupMasterAddressesZNode, + backupSn.toString()); backupZNodes.add(backupZn); MasterAddressTracker.setMasterAddress(zk, backupZn, backupSn, 1234); TEST_UTIL.waitFor(10000, @@ -234,7 +227,7 @@ public void testBackupMasterUpdates() throws Exception { } // Remove backup masters int numBackups = backupZNodes.size(); - for (String backupZNode: backupZNodes) { + for (String backupZNode : backupZNodes) { ZKUtil.deleteNode(zk, backupZNode); final int currentBackups = --numBackups; TEST_UTIL.waitFor(10000, @@ -250,8 +243,8 @@ public void testBackupMasterUpdates() throws Exception { * @throws KeeperException unexpected Zookeeper exception * @throws IOException if an IO problem is encountered */ - private void assertMaster(ZKWatcher zk, ServerName expectedAddress) throws - KeeperException, IOException { + private void assertMaster(ZKWatcher zk, ServerName expectedAddress) + throws KeeperException, IOException { ServerName readAddress = MasterAddressTracker.getMasterAddress(zk); assertNotNull(readAddress); assertEquals(expectedAddress, readAddress); @@ -264,15 +257,14 @@ public static class WaitToBeMasterThread extends Thread { boolean isActiveMaster; public WaitToBeMasterThread(ZKWatcher zk, ServerName address) throws InterruptedIOException { - this.dummyMaster = new DummyMaster(zk,address); + this.dummyMaster = new DummyMaster(zk, address); this.manager = this.dummyMaster.getActiveMasterManager(); isActiveMaster = false; } @Override public void run() { - manager.blockUntilBecomingActiveMaster(100, - Mockito.mock(MonitoredTask.class)); + manager.blockUntilBecomingActiveMaster(100, Mockito.mock(MonitoredTask.class)); LOG.info("Second master has become the active master!"); isActiveMaster = true; } @@ -292,7 +284,7 @@ public NodeDeletionListener(ZKWatcher watcher, String node) { @Override public void nodeDeleted(String path) { - if(path.equals(node)) { + if (path.equals(node)) { LOG.debug("nodeDeleted(" + path + ")"); lock.release(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAlwaysStandByHMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAlwaysStandByHMaster.java index 2ed4687f186f..b67e55192588 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAlwaysStandByHMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAlwaysStandByHMaster.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.MiniClusterRule; @@ -31,29 +32,26 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MediumTests.class, MasterTests.class}) +@Category({ MediumTests.class, MasterTests.class }) public class TestAlwaysStandByHMaster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAlwaysStandByHMaster.class); + HBaseClassTestRule.forClass(TestAlwaysStandByHMaster.class); private static final StartTestingClusterOption OPTION = StartTestingClusterOption.builder() - .numAlwaysStandByMasters(1) - .numMasters(1) - .numRegionServers(1) - .build(); + .numAlwaysStandByMasters(1).numMasters(1).numRegionServers(1).build(); @ClassRule - public static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder() - .setMiniClusterOption(OPTION) - .build(); + public static final MiniClusterRule miniClusterRule = + MiniClusterRule.newBuilder().setMiniClusterOption(OPTION).build(); /** * Tests that the AlwaysStandByHMaster does not transition to active state even if no active * master exists. */ - @Test public void testAlwaysStandBy() throws Exception { + @Test + public void testAlwaysStandBy() throws Exception { HBaseTestingUtil testUtil = miniClusterRule.getTestingUtility(); // Make sure there is an active master. assertNotNull(testUtil.getMiniHBaseCluster().getMaster()); @@ -67,6 +65,6 @@ public class TestAlwaysStandByHMaster { assertTrue(testUtil.getMiniHBaseCluster().waitForActiveAndReadyMaster(5000)); // Newly added master should be the active. assertEquals(newActive.getServerName(), - testUtil.getMiniHBaseCluster().getMaster().getServerName()); + testUtil.getMiniHBaseCluster().getMaster().getServerName()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java index fa99663d326f..3d753d24297a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,8 +58,8 @@ public class TestAssignmentManagerMetrics { HBaseClassTestRule.forClass(TestAssignmentManagerMetrics.class); private static final Logger LOG = LoggerFactory.getLogger(TestAssignmentManagerMetrics.class); - private static final MetricsAssertHelper METRICS_HELPER = CompatibilityFactory - .getInstance(MetricsAssertHelper.class); + private static final MetricsAssertHelper METRICS_HELPER = + CompatibilityFactory.getInstance(MetricsAssertHelper.class); private static SingleProcessHBaseCluster CLUSTER; private static HMaster MASTER; @@ -112,7 +112,7 @@ public static void after() throws Exception { public void testRITAssignmentManagerMetrics() throws Exception { final TableName TABLENAME = TableName.valueOf(name.getMethodName()); final byte[] FAMILY = Bytes.toBytes("family"); - try (Table table = TEST_UTIL.createTable(TABLENAME, FAMILY)){ + try (Table table = TEST_UTIL.createTable(TABLENAME, FAMILY)) { final byte[] row = Bytes.toBytes("row"); final byte[] qualifier = Bytes.toBytes("qualifier"); final byte[] value = Bytes.toBytes("value"); @@ -130,19 +130,16 @@ public void testRITAssignmentManagerMetrics() throws Exception { METRICS_HELPER.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_NAME, 0, amSource); METRICS_HELPER.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_OVER_THRESHOLD_NAME, 0, - amSource); + amSource); // alter table with a non-existing coprocessor TableDescriptor htd = TableDescriptorBuilder.newBuilder(TABLENAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder("com.foo.FooRegionObserver") - .setJarPath("hdfs:///foo.jar") - .setPriority(1001) - .setProperty("arg1", "1") - .setProperty("arg2", "2") - .build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) + .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder("com.foo.FooRegionObserver") + .setJarPath("hdfs:///foo.jar").setPriority(1001).setProperty("arg1", "1") + .setProperty("arg2", "2").build()) + .build(); try { TEST_UTIL.getAdmin().modifyTable(htd); fail("Expected region failed to open"); @@ -161,8 +158,8 @@ public void testRITAssignmentManagerMetrics() throws Exception { METRICS_HELPER.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_NAME, 1, amSource); METRICS_HELPER.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_OVER_THRESHOLD_NAME, 1, amSource); - METRICS_HELPER.assertCounter(MetricsAssignmentManagerSource.ASSIGN_METRIC_PREFIX - + "SubmittedCount", 2, amSource); + METRICS_HELPER.assertCounter( + MetricsAssignmentManagerSource.ASSIGN_METRIC_PREFIX + "SubmittedCount", 2, amSource); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestBalancer.java index 7fef729e91ab..95445a7aaca8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestBalancer.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -84,8 +83,8 @@ public void testAssignmentsForBalancer() throws Exception { TableStateManager tableStateManager = master.getTableStateManager(); ServerManager serverManager = master.getServerManager(); Map>> assignments = - assignmentManager.getRegionStates() - .getAssignmentsForBalancer(tableStateManager, serverManager.getOnlineServersList()); + assignmentManager.getRegionStates().getAssignmentsForBalancer(tableStateManager, + serverManager.getOnlineServersList()); assertFalse(assignments.containsKey(disableTableName)); assertTrue(assignments.containsKey(tableName)); assertFalse(assignments.get(tableName).containsKey(sn1)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java index b2e204cc1e31..7b1153ee8e25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClientMetaServiceRPCs.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,7 +55,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsResponse; -@Category({MediumTests.class, MasterTests.class}) +@Category({ MediumTests.class, MasterTests.class }) public class TestClientMetaServiceRPCs { @ClassRule @@ -77,10 +76,10 @@ public static void setUp() throws Exception { builder.numMasters(MASTER_COUNT).numRegionServers(3); TEST_UTIL.startMiniCluster(builder.build()); conf = TEST_UTIL.getConfiguration(); - rpcTimeout = (int) Math.min(Integer.MAX_VALUE, TimeUnit.MILLISECONDS.toNanos( - conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT))); + rpcTimeout = (int) Math.min(Integer.MAX_VALUE, TimeUnit.MILLISECONDS + .toNanos(conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT))); rpcClient = RpcClientFactory.createClient(conf, - TEST_UTIL.getMiniHBaseCluster().getMaster().getClusterId()); + TEST_UTIL.getMiniHBaseCluster().getMaster().getClusterId()); } @AfterClass @@ -93,8 +92,8 @@ public static void tearDown() throws Exception { private static ClientMetaService.BlockingInterface getMasterStub(ServerName server) throws IOException { - return ClientMetaService.newBlockingStub( - rpcClient.createBlockingRpcChannel(server, User.getCurrent(), rpcTimeout)); + return ClientMetaService + .newBlockingStub(rpcClient.createBlockingRpcChannel(server, User.getCurrent(), rpcTimeout)); } private static HBaseRpcController getRpcController() { @@ -104,12 +103,13 @@ private static HBaseRpcController getRpcController() { /** * Verifies the cluster ID from all running masters. */ - @Test public void TestClusterID() throws Exception { + @Test + public void TestClusterID() throws Exception { HBaseRpcController rpcController = getRpcController(); String clusterID = TEST_UTIL.getMiniHBaseCluster().getMaster().getClusterId(); int rpcCount = 0; - for (JVMClusterUtil.MasterThread masterThread: - TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) { + for (JVMClusterUtil.MasterThread masterThread : TEST_UTIL.getMiniHBaseCluster() + .getMasterThreads()) { ClientMetaService.BlockingInterface stub = getMasterStub(masterThread.getMaster().getServerName()); GetClusterIdResponse resp = @@ -123,12 +123,13 @@ private static HBaseRpcController getRpcController() { /** * Verifies the active master ServerName as seen by all masters. */ - @Test public void TestActiveMaster() throws Exception { + @Test + public void TestActiveMaster() throws Exception { HBaseRpcController rpcController = getRpcController(); ServerName activeMaster = TEST_UTIL.getMiniHBaseCluster().getMaster().getServerName(); int rpcCount = 0; - for (JVMClusterUtil.MasterThread masterThread: - TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) { + for (JVMClusterUtil.MasterThread masterThread : TEST_UTIL.getMiniHBaseCluster() + .getMasterThreads()) { ClientMetaService.BlockingInterface stub = getMasterStub(masterThread.getMaster().getServerName()); GetActiveMasterResponse resp = @@ -142,21 +143,22 @@ private static HBaseRpcController getRpcController() { /** * Verifies that the meta region locations RPC returns consistent results across all masters. */ - @Test public void TestMetaLocations() throws Exception { + @Test + public void TestMetaLocations() throws Exception { HBaseRpcController rpcController = getRpcController(); List metaLocations = - TEST_UTIL.getMiniHBaseCluster().getMaster().getMetaLocations(); + TEST_UTIL.getMiniHBaseCluster().getMaster().getMetaLocations(); Collections.sort(metaLocations); int rpcCount = 0; - for (JVMClusterUtil.MasterThread masterThread: - TEST_UTIL.getMiniHBaseCluster().getMasterThreads()) { + for (JVMClusterUtil.MasterThread masterThread : TEST_UTIL.getMiniHBaseCluster() + .getMasterThreads()) { ClientMetaService.BlockingInterface stub = getMasterStub(masterThread.getMaster().getServerName()); - GetMetaRegionLocationsResponse resp = stub.getMetaRegionLocations( - rpcController, GetMetaRegionLocationsRequest.getDefaultInstance()); + GetMetaRegionLocationsResponse resp = stub.getMetaRegionLocations(rpcController, + GetMetaRegionLocationsRequest.getDefaultInstance()); List result = new ArrayList<>(); - resp.getMetaLocationsList().forEach( - location -> result.add(ProtobufUtil.toRegionLocation(location))); + resp.getMetaLocationsList() + .forEach(location -> result.add(ProtobufUtil.toRegionLocation(location))); Collections.sort(result); assertEquals(metaLocations, result); rpcCount++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java index 0fe453e43d94..b3d85ffac65b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,21 +35,20 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestClockSkewDetection { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestClockSkewDetection.class); - private static final Logger LOG = - LoggerFactory.getLogger(TestClockSkewDetection.class); + private static final Logger LOG = LoggerFactory.getLogger(TestClockSkewDetection.class); @Test public void testClockSkewDetection() throws Exception { final Configuration conf = HBaseConfiguration.create(); ServerManager sm = - new ServerManager(new MockNoopMasterServices(conf), new DummyRegionServerList()); + new ServerManager(new MockNoopMasterServices(conf), new DummyRegionServerList()); LOG.debug("regionServerStartup 1"); InetAddress ia1 = InetAddress.getLocalHost(); @@ -64,7 +63,7 @@ public void testClockSkewDetection() throws Exception { long warningSkew = c.getLong("hbase.master.warningclockskew", 1000); try { - //Master Time > Region Server Time + // Master Time > Region Server Time LOG.debug("Test: Master Time > Region Server Time"); LOG.debug("regionServerStartup 2"); InetAddress ia2 = InetAddress.getLocalHost(); @@ -74,9 +73,9 @@ public void testClockSkewDetection() throws Exception { request.setServerCurrentTime(EnvironmentEdgeManager.currentTime() - maxSkew * 2); sm.regionServerStartup(request.build(), 0, "0.0.0", ia2); fail("HMaster should have thrown a ClockOutOfSyncException but didn't."); - } catch(ClockOutOfSyncException e) { - //we want an exception - LOG.info("Received expected exception: "+e); + } catch (ClockOutOfSyncException e) { + // we want an exception + LOG.info("Received expected exception: " + e); } try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java index 73ff4155b49c..d0248341fae4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCloseAnOpeningRegion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ public class TestCloseAnOpeningRegion { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCloseAnOpeningRegion.class); + HBaseClassTestRule.forClass(TestCloseAnOpeningRegion.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -71,12 +71,12 @@ public MockHMaster(Configuration conf) throws IOException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManager(master, masterRegion) { @Override public ReportRegionStateTransitionResponse reportRegionStateTransition( - ReportRegionStateTransitionRequest req) throws PleaseHoldException { + ReportRegionStateTransitionRequest req) throws PleaseHoldException { ReportRegionStateTransitionResponse resp = super.reportRegionStateTransition(req); TransitionCode code = req.getTransition(0).getTransitionCode(); if (code == TransitionCode.OPENED && ARRIVE != null) { @@ -97,7 +97,7 @@ public ReportRegionStateTransitionResponse reportRegionStateTransition( public static void setUp() throws Exception { UTIL.getConfiguration().setInt(HConstants.HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY, 60000); UTIL.startMiniCluster(StartTestingClusterOption.builder().numRegionServers(2) - .masterClass(MockHMaster.class).build()); + .masterClass(MockHMaster.class).build()); UTIL.createTable(TABLE_NAME, CF); UTIL.getAdmin().balancerSwitch(false, true); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java index e880f9790477..2fb7e9fb7794 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestart.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestClusterRestart extends AbstractTestRestartCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClusterRestart.class); + HBaseClassTestRule.forClass(TestClusterRestart.class); private static final Logger LOG = LoggerFactory.getLogger(TestClusterRestart.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java index c2244403110f..619f3b53730b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailover.java @@ -55,11 +55,11 @@ public class TestClusterRestartFailover extends AbstractTestRestartCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClusterRestartFailover.class); + HBaseClassTestRule.forClass(TestClusterRestartFailover.class); private static final Logger LOG = LoggerFactory.getLogger(TestClusterRestartFailover.class); private static final MetricsAssertHelper metricsHelper = - CompatibilityFactory.getInstance(MetricsAssertHelper.class); + CompatibilityFactory.getInstance(MetricsAssertHelper.class); private volatile static CountDownLatch SCP_LATCH; private static ServerName SERVER_FOR_TEST; @@ -71,7 +71,7 @@ protected boolean splitWALCoordinatedByZk() { private ServerStateNode getServerStateNode(ServerName serverName) { return UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates() - .getServerNode(serverName); + .getServerNode(serverName); } /** @@ -87,7 +87,7 @@ public void test() throws Exception { ServerStateNode serverNode = getServerStateNode(SERVER_FOR_TEST); assertNotNull(serverNode); assertTrue("serverNode should be ONLINE when cluster runs normally", - serverNode.isInState(ServerState.ONLINE)); + serverNode.isInState(ServerState.ONLINE)); SCP_LATCH = new CountDownLatch(1); @@ -107,39 +107,40 @@ public void test() throws Exception { UTIL.waitFor(60000, () -> getServerStateNode(SERVER_FOR_TEST) != null); serverNode = getServerStateNode(SERVER_FOR_TEST); assertFalse("serverNode should not be ONLINE during SCP processing", - serverNode.isInState(ServerState.ONLINE)); + serverNode.isInState(ServerState.ONLINE)); Optional> procedure = UTIL.getHBaseCluster().getMaster().getProcedures().stream() - .filter(p -> (p instanceof ServerCrashProcedure) && - ((ServerCrashProcedure) p).getServerName().equals(SERVER_FOR_TEST)).findAny(); + .filter(p -> (p instanceof ServerCrashProcedure) + && ((ServerCrashProcedure) p).getServerName().equals(SERVER_FOR_TEST)) + .findAny(); assertTrue("Should have one SCP for " + SERVER_FOR_TEST, procedure.isPresent()); assertTrue("Submit the SCP for the same serverName " + SERVER_FOR_TEST + " which should fail", - UTIL.getHBaseCluster().getMaster().getServerManager().expireServer(SERVER_FOR_TEST) == - Procedure.NO_PROC_ID); + UTIL.getHBaseCluster().getMaster().getServerManager() + .expireServer(SERVER_FOR_TEST) == Procedure.NO_PROC_ID); // Wait the SCP to finish LOG.info("Waiting on latch"); SCP_LATCH.countDown(); UTIL.waitFor(60000, () -> procedure.get().isFinished()); - assertFalse("Even when the SCP is finished, the duplicate SCP should not be scheduled for " + - SERVER_FOR_TEST, - UTIL.getHBaseCluster().getMaster().getServerManager().expireServer(SERVER_FOR_TEST) == - Procedure.NO_PROC_ID); + assertFalse( + "Even when the SCP is finished, the duplicate SCP should not be scheduled for " + + SERVER_FOR_TEST, + UTIL.getHBaseCluster().getMaster().getServerManager() + .expireServer(SERVER_FOR_TEST) == Procedure.NO_PROC_ID); serverNode = UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates() .getServerNode(SERVER_FOR_TEST); assertNull("serverNode should be deleted after SCP finished", serverNode); - MetricsMasterSource masterSource = UTIL.getHBaseCluster().getMaster().getMasterMetrics() - .getMetricsSource(); - metricsHelper.assertCounter(MetricsMasterSource.SERVER_CRASH_METRIC_PREFIX+"SubmittedCount", + MetricsMasterSource masterSource = + UTIL.getHBaseCluster().getMaster().getMasterMetrics().getMetricsSource(); + metricsHelper.assertCounter(MetricsMasterSource.SERVER_CRASH_METRIC_PREFIX + "SubmittedCount", 4, masterSource); } private void setupCluster() throws Exception { LOG.info("Setup cluster"); - UTIL.startMiniCluster( - StartTestingClusterOption.builder().masterClass(HMasterForTest.class).numMasters(1) - .numRegionServers(3).build()); + UTIL.startMiniCluster(StartTestingClusterOption.builder().masterClass(HMasterForTest.class) + .numMasters(1).numRegionServers(3).build()); LOG.info("Cluster is up"); UTIL.waitFor(60000, () -> UTIL.getMiniHBaseCluster().getMaster().isInitialized()); LOG.info("Master is up"); @@ -167,7 +168,7 @@ public HMasterForTest(Configuration conf) throws IOException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManagerForTest(master, masterRegion); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailoverSplitWithoutZk.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailoverSplitWithoutZk.java index 63d0f2be9c02..15b30ceeee7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailoverSplitWithoutZk.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartFailoverSplitWithoutZk.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ public class TestClusterRestartFailoverSplitWithoutZk extends TestClusterRestart @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClusterRestartFailoverSplitWithoutZk.class); + HBaseClassTestRule.forClass(TestClusterRestartFailoverSplitWithoutZk.class); @Override protected boolean splitWALCoordinatedByZk() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartSplitWithoutZk.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartSplitWithoutZk.java index 0278053d10bd..2d19f73ed3c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartSplitWithoutZk.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterRestartSplitWithoutZk.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ public class TestClusterRestartSplitWithoutZk extends TestClusterRestart { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClusterRestartSplitWithoutZk.class); + HBaseClassTestRule.forClass(TestClusterRestartSplitWithoutZk.class); @Override protected boolean splitWALCoordinatedByZk() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java index 8ac5442bc486..0782c1ca4f7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MasterTests.class, SmallTests.class}) // Plays with the ManualEnvironmentEdge +@Category({ MasterTests.class, SmallTests.class }) // Plays with the ManualEnvironmentEdge public class TestClusterStatusPublisher { @ClassRule @@ -100,7 +100,6 @@ protected List> getDeadServers(long since) { } }; - mee.setValue(3); List allSNS = csp.generateDeadServersListToSend(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSAsyncFSWAL.java index 9d4fc9acd935..46758f8e8627 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSAsyncFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSAsyncFSWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSFSHLog.java index 735f163fc519..24a8ab930818 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSFSHLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSFSHLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java index 2a072cc95189..28edbbfe9d34 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestDeadServer { @ClassRule @@ -67,7 +67,8 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test public void testIsDead() { + @Test + public void testIsDead() { DeadServer ds = new DeadServer(); ds.putIfAbsent(hostname123); @@ -75,8 +76,8 @@ public static void tearDownAfterClass() throws Exception { ds.putIfAbsent(hostname12345); - // Already dead = 127.0.0.1,9090,112321 - // Coming back alive = 127.0.0.1,9090,223341 + // Already dead = 127.0.0.1,9090,112321 + // Coming back alive = 127.0.0.1,9090,223341 final ServerName deadServer = ServerName.valueOf("127.0.0.1", 9090, 112321L); assertFalse(ds.cleanPreviousInstance(deadServer)); @@ -86,8 +87,7 @@ public static void tearDownAfterClass() throws Exception { for (ServerName eachDeadServer : deadServerNames) { Assert.assertNotNull(ds.getTimeOfDeath(eachDeadServer)); } - final ServerName deadServerHostComingAlive = - ServerName.valueOf("127.0.0.1", 9090, 223341L); + final ServerName deadServerHostComingAlive = ServerName.valueOf("127.0.0.1", 9090, 223341L); assertTrue(ds.cleanPreviousInstance(deadServerHostComingAlive)); assertFalse(ds.isDeadServer(deadServer)); assertFalse(ds.cleanPreviousInstance(deadServerHostComingAlive)); @@ -97,8 +97,8 @@ public static void tearDownAfterClass() throws Exception { public void testCrashProcedureReplay() throws Exception { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); final ProcedureExecutor pExecutor = master.getMasterProcedureExecutor(); - ServerCrashProcedure proc = new ServerCrashProcedure( - pExecutor.getEnvironment(), hostname123, false, false); + ServerCrashProcedure proc = + new ServerCrashProcedure(pExecutor.getEnvironment(), hostname123, false, false); pExecutor.stop(); ProcedureTestingUtility.submitAndWait(pExecutor, proc); @@ -110,7 +110,7 @@ public void testCrashProcedureReplay() throws Exception { } @Test - public void testSortExtract(){ + public void testSortExtract() { ManualEnvironmentEdge mee = new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(mee); mee.setValue(1); @@ -136,7 +136,7 @@ public void testSortExtract(){ } @Test - public void testClean(){ + public void testClean() { DeadServer d = new DeadServer(); d.putIfAbsent(hostname123); @@ -151,7 +151,7 @@ public void testClean(){ } @Test - public void testClearDeadServer(){ + public void testClearDeadServer() { DeadServer d = new DeadServer(); d.putIfAbsent(hostname123); d.putIfAbsent(hostname1234); @@ -169,4 +169,3 @@ public void testClearDeadServer(){ Assert.assertTrue(d.isEmpty()); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetInfoPort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetInfoPort.java index c70efc94a014..23d077e03714 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetInfoPort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetInfoPort.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java index d88f6dd0dc13..ef591a850cb6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -76,11 +76,11 @@ public void tearDown() throws Exception { @Test public void test() throws IOException, InterruptedException { - testUtil.getAdmin().createNamespace( - NamespaceDescriptor.create(tableName.getNamespaceAsString()).build()); + testUtil.getAdmin() + .createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()).build()); Table table = testUtil.createTable(tableName, families); - table.put(new Put(Bytes.toBytes("k")) - .addColumn(family, Bytes.toBytes("q"), Bytes.toBytes("v"))); + table + .put(new Put(Bytes.toBytes("k")).addColumn(family, Bytes.toBytes("q"), Bytes.toBytes("v"))); SingleProcessHBaseCluster cluster = testUtil.getMiniHBaseCluster(); List rsts = cluster.getRegionServerThreads(); Region region = null; @@ -94,7 +94,7 @@ public void test() throws IOException, InterruptedException { assertNotNull(region); Thread.sleep(2000); RegionStoreSequenceIds ids = testUtil.getHBaseCluster().getMaster().getServerManager() - .getLastFlushedSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); + .getLastFlushedSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId()); // This will be the sequenceid just before that of the earliest edit in memstore. long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId(); @@ -102,7 +102,7 @@ public void test() throws IOException, InterruptedException { testUtil.getAdmin().flush(tableName); Thread.sleep(2000); ids = testUtil.getHBaseCluster().getMaster().getServerManager() - .getLastFlushedSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); + .getLastFlushedSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); assertTrue(ids.getLastFlushedSequenceId() + " > " + storeSequenceId, ids.getLastFlushedSequenceId() > storeSequenceId); assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetReplicationLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetReplicationLoad.java index d7d5b38ec6d6..2bf6a6d262b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetReplicationLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetReplicationLoad.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.master; @@ -15,7 +22,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -63,8 +69,8 @@ public static void startCluster() throws Exception { LOG.info("Starting cluster"); TEST_UTIL = new HBaseTestingUtil(); // Set master class and use default values for other options. - StartTestingClusterOption option = StartTestingClusterOption.builder() - .masterClass(TestMasterMetrics.MyMaster.class).build(); + StartTestingClusterOption option = + StartTestingClusterOption.builder().masterClass(TestMasterMetrics.MyMaster.class).build(); TEST_UTIL.startMiniCluster(option); cluster = TEST_UTIL.getHBaseCluster(); LOG.info("Waiting for active/ready master"); @@ -81,51 +87,29 @@ public static void after() throws Exception { @Test public void testGetReplicationMetrics() throws Exception { - String peer1 = "test1", peer2 = "test2", queueId="1"; - long ageOfLastShippedOp = 2, - replicationLag = 3, - timeStampOfLastShippedOp = 4, - timeStampOfNextToReplicate=5, - editsRead=6, - oPsShipped=7; + String peer1 = "test1", peer2 = "test2", queueId = "1"; + long ageOfLastShippedOp = 2, replicationLag = 3, timeStampOfLastShippedOp = 4, + timeStampOfNextToReplicate = 5, editsRead = 6, oPsShipped = 7; int sizeOfLogQueue = 8; - boolean recovered=false, - running=false, - editsSinceRestart=false; + boolean recovered = false, running = false, editsSinceRestart = false; RegionServerStatusProtos.RegionServerReportRequest.Builder request = RegionServerStatusProtos.RegionServerReportRequest.newBuilder(); ServerName serverName = cluster.getMaster(0).getServerName(); request.setServer(ProtobufUtil.toServerName(serverName)); ClusterStatusProtos.ReplicationLoadSource rload1 = ClusterStatusProtos.ReplicationLoadSource - .newBuilder().setPeerID(peer1) - .setAgeOfLastShippedOp(ageOfLastShippedOp) - .setReplicationLag(replicationLag) - .setTimeStampOfLastShippedOp(timeStampOfLastShippedOp) - .setSizeOfLogQueue(sizeOfLogQueue) - .setTimeStampOfNextToReplicate(timeStampOfNextToReplicate) - .setQueueId(queueId) - .setEditsRead(editsRead) - .setOPsShipped(oPsShipped) - .setRunning(running) - .setRecovered(recovered) - .setEditsSinceRestart(editsSinceRestart) - .build(); + .newBuilder().setPeerID(peer1).setAgeOfLastShippedOp(ageOfLastShippedOp) + .setReplicationLag(replicationLag).setTimeStampOfLastShippedOp(timeStampOfLastShippedOp) + .setSizeOfLogQueue(sizeOfLogQueue).setTimeStampOfNextToReplicate(timeStampOfNextToReplicate) + .setQueueId(queueId).setEditsRead(editsRead).setOPsShipped(oPsShipped).setRunning(running) + .setRecovered(recovered).setEditsSinceRestart(editsSinceRestart).build(); ClusterStatusProtos.ReplicationLoadSource rload2 = - ClusterStatusProtos.ReplicationLoadSource - .newBuilder() - .setPeerID(peer2) - .setAgeOfLastShippedOp(ageOfLastShippedOp + 1) - .setReplicationLag(replicationLag + 1) + ClusterStatusProtos.ReplicationLoadSource.newBuilder().setPeerID(peer2) + .setAgeOfLastShippedOp(ageOfLastShippedOp + 1).setReplicationLag(replicationLag + 1) .setTimeStampOfLastShippedOp(timeStampOfLastShippedOp + 1) .setSizeOfLogQueue(sizeOfLogQueue + 1) - .setTimeStampOfNextToReplicate(timeStampOfNextToReplicate+1) - .setQueueId(queueId) - .setEditsRead(editsRead+1) - .setOPsShipped(oPsShipped+1) - .setRunning(running) - .setRecovered(recovered) - .setEditsSinceRestart(editsSinceRestart) - .build(); + .setTimeStampOfNextToReplicate(timeStampOfNextToReplicate + 1).setQueueId(queueId) + .setEditsRead(editsRead + 1).setOPsShipped(oPsShipped + 1).setRunning(running) + .setRecovered(recovered).setEditsSinceRestart(editsSinceRestart).build(); ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder() .addReplLoadSource(rload1).addReplLoadSource(rload2).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterCommandLine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterCommandLine.java index f1f4ad9864a4..a5f2b2f089e3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterCommandLine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterCommandLine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestHMasterCommandLine { @ClassRule @@ -35,10 +35,11 @@ public class TestHMasterCommandLine { HBaseClassTestRule.forClass(TestHMasterCommandLine.class); private static final HBaseTestingUtil TESTING_UTIL = new HBaseTestingUtil(); + @Test public void testRun() throws Exception { HMasterCommandLine masterCommandLine = new HMasterCommandLine(HMaster.class); masterCommandLine.setConf(TESTING_UTIL.getConfiguration()); - assertEquals(0, masterCommandLine.run(new String [] {"clear"})); + assertEquals(0, masterCommandLine.run(new String[] { "clear" })); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java index cfeba2534bcd..34ae13881648 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,7 +73,7 @@ public void setUp() throws Exception { ZKWatcher watcher = testUtil.getZooKeeperWatcher(); ZKUtil.createWithParents(watcher, watcher.getZNodePaths().masterAddressZNode, - Bytes.toBytes("fake:123")); + Bytes.toBytes("fake:123")); master = new HMaster(conf); rpcClient = RpcClientFactory.createClient(conf, HConstants.CLUSTER_ID_DEFAULT); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestLoadProcedureError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestLoadProcedureError.java index d2b9eb3125d5..d847dd88d3e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestLoadProcedureError.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestLoadProcedureError.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ public class TestLoadProcedureError { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLoadProcedureError.class); + HBaseClassTestRule.forClass(TestLoadProcedureError.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -120,7 +120,7 @@ private void waitNoMaster() { @Test public void testLoadError() throws Exception { ProcedureExecutor procExec = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); ARRIVE = new CountDownLatch(1); long procId = procExec.submitProcedure(new TestProcedure()); ARRIVE.await(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index 65a205fd3c93..7f981bede64b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,12 +70,11 @@ import org.apache.hbase.thirdparty.com.google.common.base.Joiner; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMaster { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMaster.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMaster.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Logger LOG = LoggerFactory.getLogger(TestMaster.class); @@ -121,7 +120,7 @@ public boolean visit(Result data) throws IOException { return true; } Pair pair = new Pair<>(CatalogFamilyFormat.getRegionInfo(data), - CatalogFamilyFormat.getServerName(data, 0)); + CatalogFamilyFormat.getServerName(data, 0)); if (!pair.getFirst().getTable().equals(tableName)) { return false; } @@ -145,14 +144,12 @@ public void testMasterOpsWhileSplitting() throws Exception { TEST_UTIL.loadTable(ht, FAMILYNAME, false); } - List> tableRegions = MetaTableAccessor.getTableRegionsAndLocations( - m.getConnection(), TABLENAME); + List> tableRegions = + MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), TABLENAME); LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions)); assertEquals(1, tableRegions.size()); - assertArrayEquals(HConstants.EMPTY_START_ROW, - tableRegions.get(0).getFirst().getStartKey()); - assertArrayEquals(HConstants.EMPTY_END_ROW, - tableRegions.get(0).getFirst().getEndKey()); + assertArrayEquals(HConstants.EMPTY_START_ROW, tableRegions.get(0).getFirst().getStartKey()); + assertArrayEquals(HConstants.EMPTY_END_ROW, tableRegions.get(0).getFirst().getEndKey()); // Now trigger a split and stop when the split is in progress LOG.info("Splitting table"); @@ -160,8 +157,8 @@ public void testMasterOpsWhileSplitting() throws Exception { LOG.info("Making sure we can call getTableRegions while opening"); while (tableRegions.size() < 3) { - tableRegions = MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), - TABLENAME, false); + tableRegions = + MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), TABLENAME, false); Thread.sleep(100); } LOG.info("Regions: " + Joiner.on(',').join(tableRegions)); @@ -171,8 +168,7 @@ public void testMasterOpsWhileSplitting() throws Exception { Pair pair = getTableRegionForRow(m, TABLENAME, Bytes.toBytes("cde")); LOG.info("Result is: " + pair); Pair tableRegionFromName = - MetaTableAccessor.getRegion(m.getConnection(), - pair.getFirst().getRegionName()); + MetaTableAccessor.getRegion(m.getConnection(), pair.getFirst().getRegionName()); assertTrue(RegionInfo.COMPARATOR.compare(tableRegionFromName.getFirst(), pair.getFirst()) == 0); } @@ -195,18 +191,15 @@ public void testMoveRegionWhenNotInitialized() { @Test public void testMoveThrowsUnknownRegionException() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tableName); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("value")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("value")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); try { - RegionInfo hri = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(Bytes.toBytes("A")) - .setEndKey(Bytes.toBytes("Z")) - .build(); + RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("A")) + .setEndKey(Bytes.toBytes("Z")).build(); admin.move(hri.getEncodedNameAsBytes()); fail("Region should not be moved since it is fake"); } catch (IOException ioe) { @@ -220,10 +213,9 @@ public void testMoveThrowsUnknownRegionException() throws IOException { public void testMoveThrowsPleaseHoldException() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tableName); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("value")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("value")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); @@ -248,7 +240,7 @@ public void testFlushedSequenceIdPersistLoad() throws Exception { // insert some data into META TableName tableName = TableName.valueOf("testFlushSeqId"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))).build(); Table table = TEST_UTIL.createTable(tableDescriptor, null); // flush META region TEST_UTIL.flush(TableName.META_TABLE_NAME); @@ -256,16 +248,14 @@ public void testFlushedSequenceIdPersistLoad() throws Exception { Threads.sleep(msgInterval * 2); // record flush seqid before cluster shutdown Map regionMapBefore = - TEST_UTIL.getHBaseCluster().getMaster().getServerManager() - .getFlushedSequenceIdByRegion(); + TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getFlushedSequenceIdByRegion(); // restart hbase cluster which will cause flushed sequence id persist and reload TEST_UTIL.getMiniHBaseCluster().shutdown(); TEST_UTIL.restartHBaseCluster(2); TEST_UTIL.waitUntilNoRegionsInTransition(); // check equality after reloading flushed sequence id map Map regionMapAfter = - TEST_UTIL.getHBaseCluster().getMaster().getServerManager() - .getFlushedSequenceIdByRegion(); + TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getFlushedSequenceIdByRegion(); assertTrue(regionMapBefore.equals(regionMapAfter)); } @@ -273,16 +263,16 @@ public void testFlushedSequenceIdPersistLoad() throws Exception { public void testBlockingHbkc1WithLockFile() throws IOException { // This is how the patch to the lock file is created inside in HBaseFsck. Too hard to use its // actual method without disturbing HBaseFsck... Do the below mimic instead. - Path hbckLockPath = new Path(HBaseFsck.getTmpDir(TEST_UTIL.getConfiguration()), - HBaseFsck.HBCK_LOCK_FILE); + Path hbckLockPath = + new Path(HBaseFsck.getTmpDir(TEST_UTIL.getConfiguration()), HBaseFsck.HBCK_LOCK_FILE); FileSystem fs = TEST_UTIL.getTestFileSystem(); assertTrue(fs.exists(hbckLockPath)); - TEST_UTIL.getMiniHBaseCluster(). - killMaster(TEST_UTIL.getMiniHBaseCluster().getMaster().getServerName()); + TEST_UTIL.getMiniHBaseCluster() + .killMaster(TEST_UTIL.getMiniHBaseCluster().getMaster().getServerName()); assertTrue(fs.exists(hbckLockPath)); TEST_UTIL.getMiniHBaseCluster().startMaster(); - TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getMaster() != null && - TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized()); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getMaster() != null + && TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized()); assertTrue(fs.exists(hbckLockPath)); // Start a second Master. Should be fine. TEST_UTIL.getMiniHBaseCluster().startMaster(); @@ -290,8 +280,8 @@ public void testBlockingHbkc1WithLockFile() throws IOException { fs.delete(hbckLockPath, true); assertFalse(fs.exists(hbckLockPath)); // Kill all Masters. - TEST_UTIL.getMiniHBaseCluster().getLiveMasterThreads().stream(). - map(sn -> sn.getMaster().getServerName()).forEach(sn -> { + TEST_UTIL.getMiniHBaseCluster().getLiveMasterThreads().stream() + .map(sn -> sn.getMaster().getServerName()).forEach(sn -> { try { TEST_UTIL.getMiniHBaseCluster().killMaster(sn); } catch (IOException e) { @@ -300,10 +290,9 @@ public void testBlockingHbkc1WithLockFile() throws IOException { }); // Start a new one. TEST_UTIL.getMiniHBaseCluster().startMaster(); - TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getMaster() != null && - TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized()); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getMaster() != null + && TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized()); // Assert lock gets put in place again. assertTrue(fs.exists(hbckLockPath)); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterAbortAndRSGotKilled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterAbortAndRSGotKilled.java index 8b364c4d9030..0fb6d495fa0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterAbortAndRSGotKilled.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterAbortAndRSGotKilled.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,11 +49,11 @@ @Category({ MasterTests.class, MediumTests.class }) public class TestMasterAbortAndRSGotKilled { private static Logger LOG = - LoggerFactory.getLogger(TestMasterAbortAndRSGotKilled.class.getName()); + LoggerFactory.getLogger(TestMasterAbortAndRSGotKilled.class.getName()); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterAbortAndRSGotKilled.class); + HBaseClassTestRule.forClass(TestMasterAbortAndRSGotKilled.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -82,7 +82,7 @@ public static void tearDown() throws Exception { public void test() throws Exception { JVMClusterUtil.RegionServerThread rsThread = null; for (JVMClusterUtil.RegionServerThread t : UTIL.getMiniHBaseCluster() - .getRegionServerThreads()) { + .getRegionServerThreads()) { if (!t.getRegionServer().getRegions(TABLE_NAME).isEmpty()) { rsThread = t; break; @@ -94,16 +94,16 @@ public void test() throws Exception { TransitRegionStateProcedure moveRegionProcedure = TransitRegionStateProcedure.reopen( UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(), hri); RegionStateNode regionNode = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getOrCreateRegionStateNode(hri); + .getRegionStates().getOrCreateRegionStateNode(hri); regionNode.setProcedure(moveRegionProcedure); UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor() - .submitProcedure(moveRegionProcedure); + .submitProcedure(moveRegionProcedure); countDownLatch.await(); UTIL.getMiniHBaseCluster().stopMaster(0); UTIL.getMiniHBaseCluster().startMaster(); // wait until master initialized - UTIL.waitFor(30000, () -> UTIL.getMiniHBaseCluster().getMaster() != null && - UTIL.getMiniHBaseCluster().getMaster().isInitialized()); + UTIL.waitFor(30000, () -> UTIL.getMiniHBaseCluster().getMaster() != null + && UTIL.getMiniHBaseCluster().getMaster().isInitialized()); Assert.assertTrue("Should be 3 RS after master restart", UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size() == 3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalanceThrottling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalanceThrottling.java index 7bc8e5b56ea7..16d1481437bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalanceThrottling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalanceThrottling.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ import org.junit.experimental.categories.Category; @Ignore // SimpleLoadBalancer seems borked whether AMv2 or not. Disabling till gets attention. -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterBalanceThrottling { @ClassRule @@ -52,7 +52,7 @@ public class TestMasterBalanceThrottling { @Before public void setupConfiguration() { TEST_UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - "org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer"); + "org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer"); } @After @@ -116,8 +116,7 @@ private TableName createTable(String table) throws IOException { TableName tableName = TableName.valueOf(table); byte[] startKey = new byte[] { 0x00 }; byte[] stopKey = new byte[] { 0x7f }; - TEST_UTIL.createTable(tableName, new byte[][] { FAMILYNAME }, 1, startKey, stopKey, - 100); + TEST_UTIL.createTable(tableName, new byte[][] { FAMILYNAME }, 1, startKey, stopKey, 100); return tableName; } @@ -128,7 +127,7 @@ private Thread startBalancerChecker(final HMaster master, final AtomicInteger ma public void run() { while (!stop.get()) { maxCount.set(Math.max(maxCount.get(), - master.getAssignmentManager().getRegionStates().getRegionsInTransitionCount())); + master.getAssignmentManager().getRegionStates().getRegionsInTransitionCount())); try { Thread.sleep(10); } catch (InterruptedException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalancerNPE.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalancerNPE.java index 5ce8a6842826..d7ed86d8fd6b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalancerNPE.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalancerNPE.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicReference; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; @@ -154,7 +153,6 @@ public void testBalancerNPE() throws Exception { return invocation.callRealMethod(); }).when(spiedAssignmentManager).balance(Mockito.any()); - try { final AtomicReference exceptionRef = new AtomicReference(null); Thread unassignThread = new Thread(() -> { @@ -186,8 +184,8 @@ public void testBalancerNPE() throws Exception { */ TEST_UTIL.getAdmin().balancerSwitch(true, false); /** - * Before HBASE-26712,here invokes {@link AssignmentManager#balance(RegionPlan)} - * which may throw NPE. + * Before HBASE-26712,here invokes {@link AssignmentManager#balance(RegionPlan)} which may + * throw NPE. */ master.balanceOrUpdateMetrics(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java index 45b57b40c101..984796efffa6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import java.lang.reflect.Field; @@ -41,12 +40,12 @@ /** * Tests to validate if HMaster default chores are scheduled */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterChoreScheduled { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterChoreScheduled.class); + HBaseClassTestRule.forClass(TestMasterChoreScheduled.class); private static HMaster hMaster; @@ -77,15 +76,15 @@ public void testDefaultScheduledChores() { // test if replicationBarrierCleaner chore is scheduled by default in HMaster init TestChoreField replicationBarrierCleanerTestChoreField = - new TestChoreField<>(); + new TestChoreField<>(); ReplicationBarrierCleaner replicationBarrierCleaner = - replicationBarrierCleanerTestChoreField.getChoreObj("replicationBarrierCleaner"); + replicationBarrierCleanerTestChoreField.getChoreObj("replicationBarrierCleaner"); replicationBarrierCleanerTestChoreField.testIfChoreScheduled(replicationBarrierCleaner); // test if clusterStatusChore chore is scheduled by default in HMaster init TestChoreField clusterStatusChoreTestChoreField = new TestChoreField<>(); - ClusterStatusChore clusterStatusChore = clusterStatusChoreTestChoreField - .getChoreObj("clusterStatusChore"); + ClusterStatusChore clusterStatusChore = + clusterStatusChoreTestChoreField.getChoreObj("clusterStatusChore"); clusterStatusChoreTestChoreField.testIfChoreScheduled(clusterStatusChore); // test if balancerChore chore is scheduled by default in HMaster init @@ -94,16 +93,14 @@ public void testDefaultScheduledChores() { balancerChoreTestChoreField.testIfChoreScheduled(balancerChore); // test if normalizerChore chore is scheduled by default in HMaster init - ScheduledChore regionNormalizerChore = hMaster.getRegionNormalizerManager() - .getRegionNormalizerChore(); - TestChoreField regionNormalizerChoreTestChoreField = - new TestChoreField<>(); + ScheduledChore regionNormalizerChore = + hMaster.getRegionNormalizerManager().getRegionNormalizerChore(); + TestChoreField regionNormalizerChoreTestChoreField = new TestChoreField<>(); regionNormalizerChoreTestChoreField.testIfChoreScheduled(regionNormalizerChore); // test if catalogJanitorChore chore is scheduled by default in HMaster init TestChoreField catalogJanitorTestChoreField = new TestChoreField<>(); - CatalogJanitor catalogJanitor = catalogJanitorTestChoreField - .getChoreObj("catalogJanitorChore"); + CatalogJanitor catalogJanitor = catalogJanitorTestChoreField.getChoreObj("catalogJanitorChore"); catalogJanitorTestChoreField.testIfChoreScheduled(catalogJanitor); // test if hbckChore chore is scheduled by default in HMaster init @@ -113,8 +110,8 @@ public void testDefaultScheduledChores() { } /** - * Reflect into the {@link HMaster} instance and find by field name a specified instance - * of {@link ScheduledChore}. + * Reflect into the {@link HMaster} instance and find by field name a specified instance of + * {@link ScheduledChore}. */ private static class TestChoreField { @@ -126,7 +123,7 @@ private E getChoreObj(String fieldName) { return (E) masterField.get(hMaster); } catch (Exception e) { throw new AssertionError( - "Unable to retrieve field '" + fieldName + "' from HMaster instance.", e); + "Unable to retrieve field '" + fieldName + "' from HMaster instance.", e); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterCoprocessorServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterCoprocessorServices.java index 293ac5c96766..dd1ab3fb277e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterCoprocessorServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterCoprocessorServices.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,10 +64,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService; /** - * Tests that the MasterRpcServices is correctly searching for implementations of the - * Coprocessor Service and not just the "default" implementations of those services. + * Tests that the MasterRpcServices is correctly searching for implementations of the Coprocessor + * Service and not just the "default" implementations of those services. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestMasterCoprocessorServices { @ClassRule @@ -79,19 +79,23 @@ private static class MockAccessController implements AccessControlService.Interf @Override public void grant(RpcController controller, GrantRequest request, - RpcCallback done) {} + RpcCallback done) { + } @Override public void revoke(RpcController controller, RevokeRequest request, - RpcCallback done) {} + RpcCallback done) { + } @Override public void getUserPermissions(RpcController controller, GetUserPermissionsRequest request, - RpcCallback done) {} + RpcCallback done) { + } @Override public void checkPermissions(RpcController controller, CheckPermissionsRequest request, - RpcCallback done) {} + RpcCallback done) { + } @Override public void hasPermission(RpcController controller, HasPermissionRequest request, @@ -134,12 +138,12 @@ public void listLabels(RpcController controller, ListLabelsRequest request, @Before public void setup() { masterServices = mock(MasterRpcServices.class); - when(masterServices.hasAccessControlServiceCoprocessor( - any(MasterCoprocessorHost.class))).thenCallRealMethod(); - when(masterServices.hasVisibilityLabelsServiceCoprocessor( - any(MasterCoprocessorHost.class))).thenCallRealMethod(); - when(masterServices.checkCoprocessorWithService( - any(List.class), any(Class.class))).thenCallRealMethod(); + when(masterServices.hasAccessControlServiceCoprocessor(any(MasterCoprocessorHost.class))) + .thenCallRealMethod(); + when(masterServices.hasVisibilityLabelsServiceCoprocessor(any(MasterCoprocessorHost.class))) + .thenCallRealMethod(); + when(masterServices.checkCoprocessorWithService(any(List.class), any(Class.class))) + .thenCallRealMethod(); } @Test @@ -147,20 +151,20 @@ public void testAccessControlServices() { MasterCoprocessor defaultImpl = new AccessController(); MasterCoprocessor customImpl = new MockAccessController(); MasterCoprocessor unrelatedImpl = new JMXListener(); - assertTrue(masterServices.checkCoprocessorWithService( - Collections.singletonList(defaultImpl), AccessControlService.Interface.class)); - assertTrue(masterServices.checkCoprocessorWithService( - Collections.singletonList(customImpl), AccessControlService.Interface.class)); - assertFalse(masterServices.checkCoprocessorWithService( - Collections.emptyList(), AccessControlService.Interface.class)); - assertFalse(masterServices.checkCoprocessorWithService( - null, AccessControlService.Interface.class)); - assertFalse(masterServices.checkCoprocessorWithService( - Collections.singletonList(unrelatedImpl), AccessControlService.Interface.class)); - assertTrue(masterServices.checkCoprocessorWithService( - Arrays.asList(unrelatedImpl, customImpl), AccessControlService.Interface.class)); - assertTrue(masterServices.checkCoprocessorWithService( - Arrays.asList(unrelatedImpl, defaultImpl), AccessControlService.Interface.class)); + assertTrue(masterServices.checkCoprocessorWithService(Collections.singletonList(defaultImpl), + AccessControlService.Interface.class)); + assertTrue(masterServices.checkCoprocessorWithService(Collections.singletonList(customImpl), + AccessControlService.Interface.class)); + assertFalse(masterServices.checkCoprocessorWithService(Collections.emptyList(), + AccessControlService.Interface.class)); + assertFalse( + masterServices.checkCoprocessorWithService(null, AccessControlService.Interface.class)); + assertFalse(masterServices.checkCoprocessorWithService(Collections.singletonList(unrelatedImpl), + AccessControlService.Interface.class)); + assertTrue(masterServices.checkCoprocessorWithService(Arrays.asList(unrelatedImpl, customImpl), + AccessControlService.Interface.class)); + assertTrue(masterServices.checkCoprocessorWithService(Arrays.asList(unrelatedImpl, defaultImpl), + AccessControlService.Interface.class)); } @Test @@ -168,19 +172,19 @@ public void testVisibilityLabelServices() { MasterCoprocessor defaultImpl = new VisibilityController(); MasterCoprocessor customImpl = new MockVisibilityController(); MasterCoprocessor unrelatedImpl = new JMXListener(); - assertTrue(masterServices.checkCoprocessorWithService( - Collections.singletonList(defaultImpl), VisibilityLabelsService.Interface.class)); - assertTrue(masterServices.checkCoprocessorWithService( - Collections.singletonList(customImpl), VisibilityLabelsService.Interface.class)); - assertFalse(masterServices.checkCoprocessorWithService( - Collections.emptyList(), VisibilityLabelsService.Interface.class)); - assertFalse(masterServices.checkCoprocessorWithService( - null, VisibilityLabelsService.Interface.class)); - assertFalse(masterServices.checkCoprocessorWithService( - Collections.singletonList(unrelatedImpl), VisibilityLabelsService.Interface.class)); - assertTrue(masterServices.checkCoprocessorWithService( - Arrays.asList(unrelatedImpl, customImpl), VisibilityLabelsService.Interface.class)); - assertTrue(masterServices.checkCoprocessorWithService( - Arrays.asList(unrelatedImpl, defaultImpl), VisibilityLabelsService.Interface.class)); + assertTrue(masterServices.checkCoprocessorWithService(Collections.singletonList(defaultImpl), + VisibilityLabelsService.Interface.class)); + assertTrue(masterServices.checkCoprocessorWithService(Collections.singletonList(customImpl), + VisibilityLabelsService.Interface.class)); + assertFalse(masterServices.checkCoprocessorWithService(Collections.emptyList(), + VisibilityLabelsService.Interface.class)); + assertFalse( + masterServices.checkCoprocessorWithService(null, VisibilityLabelsService.Interface.class)); + assertFalse(masterServices.checkCoprocessorWithService(Collections.singletonList(unrelatedImpl), + VisibilityLabelsService.Interface.class)); + assertTrue(masterServices.checkCoprocessorWithService(Arrays.asList(unrelatedImpl, customImpl), + VisibilityLabelsService.Interface.class)); + assertTrue(masterServices.checkCoprocessorWithService(Arrays.asList(unrelatedImpl, defaultImpl), + VisibilityLabelsService.Interface.class)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterDryRunBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterDryRunBalancer.java index 5b41e0a9b1b8..50fc2de1177c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterDryRunBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterDryRunBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.io.IOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -39,11 +40,11 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({ MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterDryRunBalancer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterDryRunBalancer.class); + HBaseClassTestRule.forClass(TestMasterDryRunBalancer.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final byte[] FAMILYNAME = Bytes.toBytes("fam"); @@ -72,8 +73,7 @@ public void testDryRunBalancer() throws Exception { BalanceResponse response = master.balance(BalanceRequest.newBuilder().setDryRun(true).build()); assertTrue(response.isBalancerRan()); // we don't know for sure that it will be exactly half the regions - assertTrue( - response.getMovesCalculated() >= (regionsPerRs - 1) + assertTrue(response.getMovesCalculated() >= (regionsPerRs - 1) && response.getMovesCalculated() <= (regionsPerRs + 1)); // but we expect no moves executed due to dry run assertEquals(0, response.getMovesExecuted()); @@ -93,7 +93,6 @@ private TableName createTable(String table, int numRegions) throws IOException { return tableName; } - private HRegionServer unbalance(HMaster master, TableName tableName) throws Exception { waitForRegionsToSettle(master); @@ -112,7 +111,7 @@ private HRegionServer unbalance(HMaster master, TableName tableName) throws Exce } private void assertServerContainsAllRegions(ServerName serverName, TableName tableName) - throws IOException { + throws IOException { int numRegions = TEST_UTIL.getAdmin().getRegions(tableName).size(); assertEquals(numRegions, TEST_UTIL.getMiniHBaseCluster().getRegionServer(serverName).getRegions(tableName).size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index 9e0333b51031..b87e3734bc2f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; + import java.util.List; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.ClusterMetrics; @@ -42,7 +43,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({FlakeyTests.class, LargeTests.class}) +@Category({ FlakeyTests.class, LargeTests.class }) public class TestMasterFailover { @ClassRule @@ -50,14 +51,14 @@ public class TestMasterFailover { HBaseClassTestRule.forClass(TestMasterFailover.class); private static final Logger LOG = LoggerFactory.getLogger(TestMasterFailover.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); /** * Simple test of master failover. *

      - * Starts with three masters. Kills a backup master. Then kills the active - * master. Ensures the final master becomes active and we can still contact - * the cluster. + * Starts with three masters. Kills a backup master. Then kills the active master. Ensures the + * final master becomes active and we can still contact the cluster. */ @Test public void testSimpleMasterFailover() throws Exception { @@ -67,8 +68,8 @@ public void testSimpleMasterFailover() throws Exception { // Start the cluster HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); try { - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS) + .numRegionServers(NUM_RS).numDataNodes(NUM_RS).build(); TEST_UTIL.startMiniCluster(option); SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); @@ -121,15 +122,15 @@ public void testSimpleMasterFailover() throws Exception { assertEquals(1, numActive); assertEquals(2, masterThreads.size()); int rsCount = masterThreads.get(activeIndex).getMaster().getClusterMetrics() - .getLiveServerMetrics().size(); - LOG.info("Active master " + active.getServerName() + " managing " + rsCount + - " regions servers"); + .getLiveServerMetrics().size(); + LOG.info( + "Active master " + active.getServerName() + " managing " + rsCount + " regions servers"); assertEquals(3, rsCount); // wait for the active master to acknowledge loss of the backup from ZK final HMaster activeFinal = active; - TEST_UTIL.waitFor( - TimeUnit.MINUTES.toMillis(5), () -> activeFinal.getBackupMasters().size() == 1); + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), + () -> activeFinal.getBackupMasters().size() == 1); // Check that ClusterStatus reports the correct active and backup masters assertNotNull(active); @@ -168,9 +169,9 @@ public void testSimpleMasterFailover() throws Exception { } /** - * Test meta in transition when master failover. - * This test used to manipulate region state up in zk. That is not allowed any more in hbase2 - * so I removed that messing. That makes this test anemic. + * Test meta in transition when master failover. This test used to manipulate region state up in + * zk. That is not allowed any more in hbase2 so I removed that messing. That makes this test + * anemic. */ @Test public void testMetaInTransitionWhenMasterFailover() throws Exception { @@ -193,8 +194,7 @@ public void testMetaInTransitionWhenMasterFailover() throws Exception { // meta should remain where it was RegionState metaState = MetaTableLocator.getMetaRegionState(hrs.getZooKeeper()); - assertEquals("hbase:meta should be online on RS", - metaState.getServerName(), metaServerName); + assertEquals("hbase:meta should be online on RS", metaState.getServerName(), metaServerName); assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState()); // Start up a new master @@ -206,8 +206,7 @@ public void testMetaInTransitionWhenMasterFailover() throws Exception { // ensure meta is still deployed on RS metaState = MetaTableLocator.getMetaRegionState(activeMaster.getZooKeeper()); - assertEquals("hbase:meta should be online on RS", - metaState.getServerName(), metaServerName); + assertEquals("hbase:meta should be online on RS", metaState.getServerName(), metaServerName); assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState()); // Done, shutdown the cluster @@ -216,4 +215,3 @@ public void testMetaInTransitionWhenMasterFailover() throws Exception { } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java index 56f2d52e12d2..95e27a3a91c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestMasterFailoverBalancerPersistence { @ClassRule @@ -43,9 +43,8 @@ public class TestMasterFailoverBalancerPersistence { HBaseClassTestRule.forClass(TestMasterFailoverBalancerPersistence.class); /** - * Test that if the master fails, the load balancer maintains its - * state (running or not) when the next master takes over - * + * Test that if the master fails, the load balancer maintains its state (running or not) when the + * next master takes over * @throws Exception */ @Test @@ -53,8 +52,7 @@ public void testMasterFailoverBalancerPersistence() throws Exception { // Start the cluster HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numMasters(3).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(3).build(); TEST_UTIL.startMiniCluster(option); SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); @@ -86,7 +84,6 @@ public void testMasterFailoverBalancerPersistence() throws Exception { /** * Kill the master and wait for a new active master to show up - * * @param cluster * @return the new active master * @throws InterruptedException @@ -107,12 +104,10 @@ private HMaster killActiveAndWaitForNewActive(SingleProcessHBaseCluster cluster) /** * return the index of the active master in the cluster - * - * @throws org.apache.hadoop.hbase.MasterNotRunningException - * if no active master found + * @throws org.apache.hadoop.hbase.MasterNotRunningException if no active master found */ private int getActiveMasterIndex(SingleProcessHBaseCluster cluster) - throws MasterNotRunningException { + throws MasterNotRunningException { // get all the master threads List masterThreads = cluster.getMasterThreads(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java index f08462424a50..7e08749996a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ /** * Test the master filesystem in a local cluster */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterFileSystem { @ClassRule @@ -85,13 +85,12 @@ public void testFsUriSetProperly() throws Exception { @Test public void testCheckNoTempDir() throws Exception { final MasterFileSystem masterFileSystem = - UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); + UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); final TableName tableName = TableName.valueOf(name.getMethodName()); final byte[] FAM = Bytes.toBytes("fam"); - final byte[][] splitKeys = new byte[][] { - Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d") - }; + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d") }; UTIL.createTable(tableName, FAM, splitKeys); @@ -109,8 +108,7 @@ public void testCheckNoTempDir() throws Exception { UTIL.getAdmin().disableTable(tableName); final Path tempDir = masterFileSystem.getTempDir(); - final Path tempNsDir = CommonFSUtils.getNamespaceDir(tempDir, - tableName.getNamespaceAsString()); + final Path tempNsDir = CommonFSUtils.getNamespaceDir(tempDir, tableName.getNamespaceAsString()); final FileSystem fs = masterFileSystem.getFileSystem(); // checks the temporary directory does not exist diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java index 62edb76da0f2..3185368b26b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.Trackers.FILE; import static org.junit.Assert.assertEquals; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -35,10 +36,10 @@ import org.junit.rules.TestName; /** - * Test the master filesystem in a local cluster with - * Store File Tracking explicitly set in global config + * Test the master filesystem in a local cluster with Store File Tracking explicitly set in global + * config */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterFileSystemWithStoreFileTracking { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithWALDir.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithWALDir.java index 51cedf65188b..986d6ae5039a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithWALDir.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithWALDir.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ /** * Test the master filesystem in a local cluster */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterFileSystemWithWALDir { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterHandlerFullWhenTransitRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterHandlerFullWhenTransitRegion.java index f4fcd9deb6e3..99696da95277 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterHandlerFullWhenTransitRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterHandlerFullWhenTransitRegion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.Optional; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -42,8 +41,8 @@ @Category({ MasterTests.class, LargeTests.class }) public class TestMasterHandlerFullWhenTransitRegion { - private static Logger LOG = LoggerFactory - .getLogger(TestMasterHandlerFullWhenTransitRegion.class.getName()); + private static Logger LOG = + LoggerFactory.getLogger(TestMasterHandlerFullWhenTransitRegion.class.getName()); @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -56,8 +55,8 @@ public class TestMasterHandlerFullWhenTransitRegion { @BeforeClass public static void setUp() throws Exception { UTIL.getConfiguration().setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - DelayOpenCP.class.getName()); - //set handler number to 1. + DelayOpenCP.class.getName()); + // set handler number to 1. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 1); UTIL.startMiniCluster(2); UTIL.createTable(TableName.valueOf(TABLENAME), "fa"); @@ -66,14 +65,13 @@ public static void setUp() throws Exception { @Test public void test() throws Exception { RegionInfo regionInfo = UTIL.getAdmin().getRegions(TableName.valueOf(TABLENAME)).get(0); - //See HBASE-21754 - //There is Only one handler, if ReportRegionStateTransitionRequest executes in the same kind + // See HBASE-21754 + // There is Only one handler, if ReportRegionStateTransitionRequest executes in the same kind // of thread with moveRegion, it will lock each other. Making the move operation can not finish. UTIL.getAdmin().move(regionInfo.getEncodedNameAsBytes()); LOG.info("Region move complete"); } - /** * Make open region very slow */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java index 068dead7255a..ce5e4a63ec84 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,11 +56,11 @@ public class TestMasterMetrics { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterMetrics.class); + HBaseClassTestRule.forClass(TestMasterMetrics.class); private static final Logger LOG = LoggerFactory.getLogger(TestMasterMetrics.class); private static final MetricsAssertHelper metricsHelper = - CompatibilityFactory.getInstance(MetricsAssertHelper.class); + CompatibilityFactory.getInstance(MetricsAssertHelper.class); private static SingleProcessHBaseCluster cluster; private static HMaster master; @@ -78,7 +78,7 @@ protected MasterRpcServices createRpcServices() throws IOException { @Override public RegionServerStartupResponse regionServerStartup(RpcController controller, - RegionServerStartupRequest request) throws ServiceException { + RegionServerStartupRequest request) throws ServiceException { RegionServerStartupResponse resp = super.regionServerStartup(controller, request); ServerManager serverManager = getServerManager(); // to let the region server actual online otherwise we can not assign meta region @@ -87,9 +87,8 @@ public RegionServerStartupResponse regionServerStartup(RpcController controller, try { serverManager.regionServerReport(sn, ServerMetricsBuilder.newBuilder(sn).setVersionNumber(sm.getVersionNumber()) - .setVersion(sm.getVersion()) - .setLastReportTimestamp(EnvironmentEdgeManager.currentTime()) - .build()); + .setVersion(sm.getVersion()) + .setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build()); } catch (YouAreDeadException e) { throw new UncheckedIOException(e); } @@ -102,7 +101,7 @@ public RegionServerStartupResponse regionServerStartup(RpcController controller, } public static class MyRegionServer - extends SingleProcessHBaseCluster.MiniHBaseClusterRegionServer { + extends SingleProcessHBaseCluster.MiniHBaseClusterRegionServer { public MyRegionServer(Configuration conf) throws IOException, InterruptedException { super(conf); @@ -119,7 +118,7 @@ public static void startCluster() throws Exception { LOG.info("Starting cluster"); // Set master class and use default values for other options. StartTestingClusterOption option = StartTestingClusterOption.builder() - .masterClass(MyMaster.class).rsClass(MyRegionServer.class).build(); + .masterClass(MyMaster.class).rsClass(MyRegionServer.class).build(); TEST_UTIL.startMiniCluster(option); cluster = TEST_UTIL.getHBaseCluster(); LOG.info("Waiting for active/ready master"); @@ -137,16 +136,15 @@ public static void after() throws Exception { public void testClusterRequests() throws Exception { // sending fake request to master to see how metric value has changed RegionServerStatusProtos.RegionServerReportRequest.Builder request = - RegionServerStatusProtos.RegionServerReportRequest.newBuilder(); + RegionServerStatusProtos.RegionServerReportRequest.newBuilder(); ServerName serverName = cluster.getMaster(0).getServerName(); request.setServer(ProtobufUtil.toServerName(serverName)); long expectedRequestNumber = 10000; MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource(); ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder() - .setTotalNumberOfRequests(expectedRequestNumber) - .setReadRequestsCount(expectedRequestNumber) - .setWriteRequestsCount(expectedRequestNumber).build(); + .setTotalNumberOfRequests(expectedRequestNumber).setReadRequestsCount(expectedRequestNumber) + .setWriteRequestsCount(expectedRequestNumber).build(); request.setLoad(sl); master.getMasterRpcServices().regionServerReport(null, request.build()); @@ -156,10 +154,9 @@ public void testClusterRequests() throws Exception { expectedRequestNumber = 15000; - sl = ClusterStatusProtos.ServerLoad.newBuilder() - .setTotalNumberOfRequests(expectedRequestNumber) - .setReadRequestsCount(expectedRequestNumber) - .setWriteRequestsCount(expectedRequestNumber).build(); + sl = ClusterStatusProtos.ServerLoad.newBuilder().setTotalNumberOfRequests(expectedRequestNumber) + .setReadRequestsCount(expectedRequestNumber).setWriteRequestsCount(expectedRequestNumber) + .build(); request.setLoad(sl); master.getMasterRpcServices().regionServerReport(null, request.build()); @@ -184,7 +181,7 @@ public void testDefaultMasterMetrics() throws Exception { metricsHelper.assertTag("clusterId", master.getClusterId(), masterSource); metricsHelper.assertTag("zookeeperQuorum", master.getZooKeeper().getQuorum(), masterSource); - metricsHelper.assertCounter(MetricsMasterSource.SERVER_CRASH_METRIC_PREFIX+"SubmittedCount", + metricsHelper.assertCounter(MetricsMasterSource.SERVER_CRASH_METRIC_PREFIX + "SubmittedCount", 0, masterSource); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java index 960522c2463f..f301814587a0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java @@ -47,7 +47,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterMetricsWrapper { @ClassRule @@ -73,16 +73,17 @@ public static void teardown() throws Exception { public void testInfo() { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl(master); - assertEquals( - master.getRegionNormalizerManager().getSplitPlanCount(), info.getSplitPlanCount(), 0); - assertEquals( - master.getRegionNormalizerManager().getMergePlanCount(), info.getMergePlanCount(), 0); + assertEquals(master.getRegionNormalizerManager().getSplitPlanCount(), info.getSplitPlanCount(), + 0); + assertEquals(master.getRegionNormalizerManager().getMergePlanCount(), info.getMergePlanCount(), + 0); assertEquals(master.getAverageLoad(), info.getAverageLoad(), 0); assertEquals(master.getClusterId(), info.getClusterId()); assertEquals(master.getMasterActiveTime(), info.getActiveTime()); assertEquals(master.getMasterStartTime(), info.getStartTime()); assertEquals(master.getMasterCoprocessors().length, info.getCoprocessors().length); - assertEquals(master.getServerManager().getOnlineServersList().size(), info.getNumRegionServers()); + assertEquals(master.getServerManager().getOnlineServersList().size(), + info.getNumRegionServers()); int regionServerCount = NUM_RS; assertEquals(regionServerCount, info.getNumRegionServers()); @@ -95,8 +96,8 @@ public void testInfo() { TEST_UTIL.getMiniHBaseCluster().waitOnRegionServer(index); // We stopped the regionserver but could take a while for the master to notice it so hang here // until it does... then move forward to see if metrics wrapper notices. - while (TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() == - regionServerCount ) { + while (TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers() + .size() == regionServerCount) { Threads.sleep(10); } assertEquals(regionServerCount - 1, info.getNumRegionServers()); @@ -108,14 +109,12 @@ public void testInfo() { @Test public void testQuotaSnapshotConversion() { - MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl( - TEST_UTIL.getHBaseCluster().getMaster()); - assertEquals(new SimpleImmutableEntry(1024L, 2048L), - info.convertSnapshot(new SpaceQuotaSnapshot( - SpaceQuotaStatus.notInViolation(), 1024L, 2048L))); - assertEquals(new SimpleImmutableEntry(4096L, 2048L), - info.convertSnapshot(new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 4096L, 2048L))); + MetricsMasterWrapperImpl info = + new MetricsMasterWrapperImpl(TEST_UTIL.getHBaseCluster().getMaster()); + assertEquals(new SimpleImmutableEntry(1024L, 2048L), info + .convertSnapshot(new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 1024L, 2048L))); + assertEquals(new SimpleImmutableEntry(4096L, 2048L), info.convertSnapshot( + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 4096L, 2048L))); } /** @@ -130,15 +129,14 @@ public void testOfflineRegion() throws Exception { RegionInfo hri; byte[] FAMILY = Bytes.toBytes("FAMILY"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(table) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); - TEST_UTIL.getAdmin().createTable(tableDescriptor, Bytes.toBytes("A"), - Bytes.toBytes("Z"), 5); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + TEST_UTIL.getAdmin().createTable(tableDescriptor, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5); // wait till the table is assigned long timeoutTime = EnvironmentEdgeManager.currentTime() + 1000; while (true) { - List regions = master.getAssignmentManager(). - getRegionStates().getRegionsOfTable(table); + List regions = + master.getAssignmentManager().getRegionStates().getRegionsOfTable(table); if (regions.size() > 3) { hri = regions.get(2); break; @@ -159,8 +157,8 @@ public void testOfflineRegion() throws Exception { timeoutTime = EnvironmentEdgeManager.currentTime() + 800; RegionStates regionStates = master.getAssignmentManager().getRegionStates(); while (true) { - if (regionStates.getRegionByStateOfTable(table) - .get(RegionState.State.OFFLINE).contains(hri)) { + if (regionStates.getRegionByStateOfTable(table).get(RegionState.State.OFFLINE) + .contains(hri)) { break; } long now = EnvironmentEdgeManager.currentTime(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index 939ebe20f9ff..1488a0735b88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,14 +48,13 @@ import org.slf4j.LoggerFactory; /** - * Standup the master and fake it to test various aspects of master function. - * Does NOT spin up a mini hbase nor mini dfs cluster testing master (it does - * put up a zk cluster but this is usually pretty fast compared). Also, should - * be possible to inject faults at points difficult to get at in cluster context. - * TODO: Speed up the zk connection by Master. It pauses 5 seconds establishing + * Standup the master and fake it to test various aspects of master function. Does NOT spin up a + * mini hbase nor mini dfs cluster testing master (it does put up a zk cluster but this is usually + * pretty fast compared). Also, should be possible to inject faults at points difficult to get at in + * cluster context. TODO: Speed up the zk connection by Master. It pauses 5 seconds establishing * session. */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterNoCluster { @ClassRule @@ -72,7 +71,7 @@ public class TestMasterNoCluster { @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration c = TESTUTIL.getConfiguration(); - // We use local filesystem. Set it so it writes into the testdir. + // We use local filesystem. Set it so it writes into the testdir. CommonFSUtils.setRootDir(c, TESTUTIL.getDataTestDir()); DefaultMetricsSystem.setMiniClusterMode(true); // Startup a mini zk cluster. @@ -85,11 +84,9 @@ public static void tearDownAfterClass() throws Exception { } @After - public void tearDown() - throws KeeperException, ZooKeeperConnectionException, IOException { + public void tearDown() throws KeeperException, ZooKeeperConnectionException, IOException { // Make sure zk is clean before we run the next test. - ZKWatcher zkw = new ZKWatcher(TESTUTIL.getConfiguration(), - "@Before", new Abortable() { + ZKWatcher zkw = new ZKWatcher(TESTUTIL.getConfiguration(), "@Before", new Abortable() { @Override public void abort(String why, Throwable e) { throw new RuntimeException(why, e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java index 54416cdd944b..53910c105b5d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,15 +78,15 @@ public class TestMasterOperationsForRegionReplicas { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterOperationsForRegionReplicas.class); + HBaseClassTestRule.forClass(TestMasterOperationsForRegionReplicas.class); private static final Logger LOG = LoggerFactory.getLogger(TestRegionPlacement.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Connection CONNECTION = null; private static Admin ADMIN; private static int numSlaves = 2; - private final static StartTestingClusterOption option = StartTestingClusterOption.builder(). - numRegionServers(numSlaves).numMasters(1).numAlwaysStandByMasters(1).build(); + private final static StartTestingClusterOption option = StartTestingClusterOption.builder() + .numRegionServers(numSlaves).numMasters(1).numAlwaysStandByMasters(1).build(); private static Configuration conf; @Rule @@ -100,7 +100,7 @@ public static void setupBeforeClass() throws Exception { TEST_UTIL.getAdmin().balancerSwitch(false, true); resetConnections(); while (ADMIN.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics() - .size() < numSlaves) { + .size() < numSlaves) { Thread.sleep(100); } } @@ -126,8 +126,8 @@ public void testCreateTableWithSingleReplica() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); try { TableDescriptor desc = - TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(numReplica) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); + TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(numReplica) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); TEST_UTIL.waitUntilNoRegionsInTransition(); @@ -148,8 +148,8 @@ public void testCreateTableWithMultipleReplicas() throws Exception { final int numReplica = 2; try { TableDescriptor desc = - TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(numReplica) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); + TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(numReplica) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); TEST_UTIL.waitUntilNoRegionsInTransition(); @@ -197,12 +197,12 @@ public void testCreateTableWithMultipleReplicas() throws Exception { // up at same coordinates -- and the assignment retention logic has a chance to cut in. List rsports = new ArrayList<>(); for (JVMClusterUtil.RegionServerThread rst : TEST_UTIL.getHBaseCluster() - .getLiveRegionServerThreads()) { + .getLiveRegionServerThreads()) { rsports.add(rst.getRegionServer().getRpcServer().getListenerAddress().getPort()); } TEST_UTIL.shutdownMiniHBaseCluster(); StartTestingClusterOption option = - StartTestingClusterOption.builder().numRegionServers(numSlaves).rsPorts(rsports).build(); + StartTestingClusterOption.builder().numRegionServers(numSlaves).rsPorts(rsports).build(); TEST_UTIL.startMiniHBaseCluster(option); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); TEST_UTIL.waitUntilNoRegionsInTransition(); @@ -234,9 +234,10 @@ public void testCreateTableWithMultipleReplicas() throws Exception { TEST_UTIL.waitUntilAllRegionsAssigned(tableName); TEST_UTIL.waitUntilNoRegionsInTransition(); List regions = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getRegionsOfTable(tableName); - assertTrue("regions.size=" + regions.size() + ", numRegions=" + numRegions + ", numReplica=" + - numReplica, regions.size() == numRegions * (numReplica + 1)); + .getRegionStates().getRegionsOfTable(tableName); + assertTrue("regions.size=" + regions.size() + ", numRegions=" + numRegions + ", numReplica=" + + numReplica, + regions.size() == numRegions * (numReplica + 1)); // decrease the replica(earlier, table was modified to have a replica count of numReplica + 1) ADMIN.disableTable(tableName); @@ -247,7 +248,7 @@ public void testCreateTableWithMultipleReplicas() throws Exception { TEST_UTIL.waitUntilAllRegionsAssigned(tableName); TEST_UTIL.waitUntilNoRegionsInTransition(); regions = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates() - .getRegionsOfTable(tableName); + .getRegionsOfTable(tableName); assertEquals(numRegions * numReplica, regions.size()); // also make sure the meta table has the replica locations removed hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName); @@ -276,9 +277,8 @@ private void assertRegionStateNotNull(List hris, int numRegions, int for (int i = 0; i < numRegions; i++) { for (int j = 0; j < numReplica; j++) { RegionInfo replica = RegionReplicaUtil.getRegionInfoForReplica(hris.get(i), j); - RegionState state = - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates() - .getRegionState(replica); + RegionState state = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() + .getRegionStates().getRegionState(replica); assertNotNull(state); } } @@ -293,8 +293,8 @@ public void testIncompleteMetaTableReplicaInformation() throws Exception { // Create a table and let the meta table be updated with the location of the // region locations. TableDescriptor desc = - TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(numReplica) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); + TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(numReplica) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); TEST_UTIL.waitUntilNoRegionsInTransition(); @@ -325,7 +325,7 @@ public void testIncompleteMetaTableReplicaInformation() throws Exception { TEST_UTIL.waitUntilAllRegionsAssigned(tableName); TEST_UTIL.waitUntilNoRegionsInTransition(); List regions = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getRegionsOfTable(tableName); + .getRegionStates().getRegionsOfTable(tableName); assertEquals(numRegions * numReplica, regions.size()); } finally { ADMIN.disableTable(tableName); @@ -350,10 +350,10 @@ public boolean visit(Result r) throws IOException { assertEquals(numRegions, count.get()); } - private void validateFromSnapshotFromMeta(HBaseTestingUtil util, TableName table, - int numRegions, int numReplica, Connection connection) throws IOException { + private void validateFromSnapshotFromMeta(HBaseTestingUtil util, TableName table, int numRegions, + int numReplica, Connection connection) throws IOException { SnapshotOfRegionAssignmentFromMeta snapshot = - new SnapshotOfRegionAssignmentFromMeta(connection); + new SnapshotOfRegionAssignmentFromMeta(connection); snapshot.initialize(); Map regionToServerMap = snapshot.getRegionToRegionServerMap(); assert (regionToServerMap.size() == numRegions * numReplica); @@ -380,7 +380,7 @@ private void validateFromSnapshotFromMeta(HBaseTestingUtil util, TableName table private void validateSingleRegionServerAssignment(Connection connection, int numRegions, int numReplica) throws IOException { SnapshotOfRegionAssignmentFromMeta snapshot = - new SnapshotOfRegionAssignmentFromMeta(connection); + new SnapshotOfRegionAssignmentFromMeta(connection); snapshot.initialize(); Map regionToServerMap = snapshot.getRegionToRegionServerMap(); assertEquals(regionToServerMap.size(), numRegions * numReplica); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java index 63e5018f644a..8734bae803ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestMasterQosFunction extends QosTestBase { @ClassRule @@ -52,7 +52,6 @@ public class TestMasterQosFunction extends QosTestBase { private MasterRpcServices rpcServices; private MasterAnnotationReadingPriorityFunction qosFunction; - @Before public void setUp() { conf = HBaseConfiguration.create(); @@ -65,29 +64,25 @@ public void setUp() { public void testRegionInTransition() throws IOException { // Check ReportRegionInTransition HBaseProtos.RegionInfo meta_ri = - ProtobufUtil.toRegionInfo(RegionInfoBuilder.FIRST_META_REGIONINFO); + ProtobufUtil.toRegionInfo(RegionInfoBuilder.FIRST_META_REGIONINFO); HBaseProtos.RegionInfo normal_ri = - ProtobufUtil.toRegionInfo(RegionInfoBuilder.newBuilder(TableName.valueOf("test:table")) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build()); - + ProtobufUtil.toRegionInfo(RegionInfoBuilder.newBuilder(TableName.valueOf("test:table")) + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build()); - RegionServerStatusProtos.RegionStateTransition metaTransition = RegionServerStatusProtos - .RegionStateTransition.newBuilder() - .addRegionInfo(meta_ri) - .setTransitionCode(RegionServerStatusProtos.RegionStateTransition.TransitionCode.CLOSED) - .build(); + RegionServerStatusProtos.RegionStateTransition metaTransition = + RegionServerStatusProtos.RegionStateTransition.newBuilder().addRegionInfo(meta_ri) + .setTransitionCode(RegionServerStatusProtos.RegionStateTransition.TransitionCode.CLOSED) + .build(); - RegionServerStatusProtos.RegionStateTransition normalTransition = RegionServerStatusProtos - .RegionStateTransition.newBuilder() - .addRegionInfo(normal_ri) - .setTransitionCode(RegionServerStatusProtos.RegionStateTransition.TransitionCode.CLOSED) - .build(); + RegionServerStatusProtos.RegionStateTransition normalTransition = + RegionServerStatusProtos.RegionStateTransition.newBuilder().addRegionInfo(normal_ri) + .setTransitionCode(RegionServerStatusProtos.RegionStateTransition.TransitionCode.CLOSED) + .build(); RegionServerStatusProtos.ReportRegionStateTransitionRequest metaTransitionRequest = RegionServerStatusProtos.ReportRegionStateTransitionRequest.newBuilder() .setServer(ProtobufUtil.toServerName(ServerName.valueOf("locahost:60020", 100))) - .addTransition(normalTransition) - .addTransition(metaTransition).build(); + .addTransition(normalTransition).addTransition(metaTransition).build(); RegionServerStatusProtos.ReportRegionStateTransitionRequest normalTransitionRequest = RegionServerStatusProtos.ReportRegionStateTransitionRequest.newBuilder() @@ -95,8 +90,7 @@ public void testRegionInTransition() throws IOException { .addTransition(normalTransition).build(); final String reportFuncName = "ReportRegionStateTransition"; - checkMethod(conf, reportFuncName, 300, qosFunction, - metaTransitionRequest); + checkMethod(conf, reportFuncName, 300, qosFunction, metaTransitionRequest); checkMethod(conf, reportFuncName, HConstants.HIGH_QOS, qosFunction, normalTransitionRequest); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java index 910692d93c30..c9f708388ed4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java @@ -56,7 +56,7 @@ public class TestMasterRepairMode { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterRepairMode.class); + HBaseClassTestRule.forClass(TestMasterRepairMode.class); @Rule public TestName name = new TestName(); @@ -94,7 +94,7 @@ public void testNewCluster() throws Exception { assertTrue(conn.getAdmin().isMasterInMaintenanceMode()); try (Table table = conn.getTable(TableName.META_TABLE_NAME); - ResultScanner scanner = table.getScanner(new Scan())) { + ResultScanner scanner = table.getScanner(new Scan())) { assertNotNull("Could not read meta.", scanner.next()); } } @@ -121,14 +121,14 @@ public void testExistingCluster() throws Exception { assertTrue(conn.getAdmin().isMasterInMaintenanceMode()); try (Table table = conn.getTable(TableName.META_TABLE_NAME); - ResultScanner scanner = table.getScanner(HConstants.TABLE_FAMILY); - Stream results = StreamSupport.stream(scanner.spliterator(), false)) { + ResultScanner scanner = table.getScanner(HConstants.TABLE_FAMILY); + Stream results = StreamSupport.stream(scanner.spliterator(), false)) { assertTrue("Did not find user table records while reading hbase:meta", results.anyMatch(r -> Arrays.equals(r.getRow(), testRepairMode.getName()))); } // use async table so we can set the timeout and retry value to let the operation fail fast AsyncTable table = conn.toAsyncConnection().getTableBuilder(testRepairMode) - .setScanTimeout(5, TimeUnit.SECONDS).setMaxRetries(2).build(); + .setScanTimeout(5, TimeUnit.SECONDS).setMaxRetries(2).build(); assertThrows("Should not be able to access user-space tables in repair mode.", Exception.class, () -> { try (ResultScanner scanner = table.getScanner(new Scan())) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java index aeba846c52c5..28a7a331a3ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestMasterRestartAfterDisablingTable { @ClassRule @@ -59,8 +59,7 @@ public class TestMasterRestartAfterDisablingTable { public TestName name = new TestName(); @Test - public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() - throws Exception { + public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() throws Exception { final int NUM_MASTERS = 2; final int NUM_REGIONS_TO_CREATE = 4; @@ -68,8 +67,8 @@ public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() log("Starting cluster"); Configuration conf = HBaseConfiguration.create(); HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(conf); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numMasters(NUM_MASTERS).build(); + StartTestingClusterOption option = + StartTestingClusterOption.builder().numMasters(NUM_MASTERS).build(); TEST_UTIL.startMiniCluster(option); SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); log("Waiting for active/ready master"); @@ -91,8 +90,9 @@ public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() TEST_UTIL.getAdmin().disableTable(tableName); NavigableSet regions = HBaseTestingUtil.getAllOnlineRegions(cluster); - assertEquals("The number of regions for the table tableRestart should be 0 and only" + - "the catalog table should be present.", 1, regions.size()); + assertEquals("The number of regions for the table tableRestart should be 0 and only" + + "the catalog table should be present.", + 1, regions.size()); List masterThreads = cluster.getMasterThreads(); MasterThread activeMaster = null; @@ -101,13 +101,13 @@ public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() } else { activeMaster = masterThreads.get(1); } - activeMaster.getMaster().stop( - "stopping the active master so that the backup can become active"); + activeMaster.getMaster() + .stop("stopping the active master so that the backup can become active"); cluster.hbaseCluster.waitOnMaster(activeMaster); cluster.waitForActiveAndReadyMaster(); assertTrue("The table should not be in enabled state", - cluster.getMaster().getTableStateManager().isTableState( + cluster.getMaster().getTableStateManager().isTableState( TableName.valueOf(name.getMethodName()), TableState.State.DISABLED, TableState.State.DISABLING)); log("Enabling table\n"); @@ -119,10 +119,11 @@ public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() TEST_UTIL.waitUntilNoRegionsInTransition(60000); log("Verifying there are " + numRegions + " assigned on cluster\n"); regions = HBaseTestingUtil.getAllOnlineRegions(cluster); - assertEquals("The assigned regions were not onlined after master" + - " switch except for the catalog table.", 5, regions.size()); + assertEquals("The assigned regions were not onlined after master" + + " switch except for the catalog table.", + 5, regions.size()); assertTrue("The table should be in enabled state", cluster.getMaster().getTableStateManager() - .isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED)); + .isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED)); ht.close(); TEST_UTIL.shutdownMiniCluster(); } @@ -131,4 +132,3 @@ private void log(String msg) { LOG.debug("\n\nTRR: " + msg + "\n"); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java index 41ad2cde7830..2695a2ea5856 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; + import java.io.IOException; import java.util.List; import java.util.concurrent.TimeUnit; @@ -43,15 +44,16 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestMasterShutdown { private static final Logger LOG = LoggerFactory.getLogger(TestMasterShutdown.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterShutdown.class); + HBaseClassTestRule.forClass(TestMasterShutdown.class); private HBaseTestingUtil htu; @@ -68,8 +70,8 @@ public void shutdownCluster() throws IOException { /** * Simple test of shutdown. *

      - * Starts with three masters. Tells the active master to shutdown the cluster. - * Verifies that all masters are properly shutdown. + * Starts with three masters. Tells the active master to shutdown the cluster. Verifies that all + * masters are properly shutdown. */ @Test public void testMasterShutdown() throws Exception { @@ -79,11 +81,8 @@ public void testMasterShutdown() throws Exception { // Start the cluster try { htu = new HBaseTestingUtil(conf); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numMasters(3) - .numRegionServers(1) - .numDataNodes(1) - .build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(3) + .numRegionServers(1).numDataNodes(1).build(); final SingleProcessHBaseCluster cluster = htu.startMiniCluster(option); // wait for all master thread to spawn and start their run loop. @@ -91,9 +90,8 @@ public void testMasterShutdown() throws Exception { final long oneSecond = TimeUnit.SECONDS.toMillis(1); assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond, () -> { final List masterThreads = cluster.getMasterThreads(); - return masterThreads != null - && masterThreads.size() >= 3 - && masterThreads.stream().allMatch(Thread::isAlive); + return masterThreads != null && masterThreads.size() >= 3 + && masterThreads.stream().allMatch(Thread::isAlive); })); // find the active master @@ -128,18 +126,13 @@ public void testMasterShutdown() throws Exception { public void testMasterShutdownBeforeStartingAnyRegionServer() throws Exception { LocalHBaseCluster hbaseCluster = null; try { - htu = new HBaseTestingUtil( - createMasterShutdownBeforeStartingAnyRegionServerConfiguration()); + htu = new HBaseTestingUtil(createMasterShutdownBeforeStartingAnyRegionServerConfiguration()); // configure a cluster with - final StartTestingClusterOption options = StartTestingClusterOption.builder() - .numDataNodes(1) - .numMasters(1) - .numRegionServers(0) - .masterClass(HMaster.class) - .rsClass(SingleProcessHBaseCluster.MiniHBaseClusterRegionServer.class) - .createRootDir(true) - .build(); + final StartTestingClusterOption options = StartTestingClusterOption.builder().numDataNodes(1) + .numMasters(1).numRegionServers(0).masterClass(HMaster.class) + .rsClass(SingleProcessHBaseCluster.MiniHBaseClusterRegionServer.class).createRootDir(true) + .build(); // Can't simply `htu.startMiniCluster(options)` because that method waits for the master to // start completely. However, this test's premise is that a partially started master should @@ -150,7 +143,7 @@ public void testMasterShutdownBeforeStartingAnyRegionServer() throws Exception { htu.startMiniZKCluster(options.getNumZkServers()); htu.createRootDir(); hbaseCluster = new LocalHBaseCluster(htu.getConfiguration(), options.getNumMasters(), - options.getNumRegionServers(), options.getMasterClass(), options.getRsClass()); + options.getNumRegionServers(), options.getMasterClass(), options.getRsClass()); final MasterThread masterThread = hbaseCluster.getMasters().get(0); masterThread.start(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java index 372224e56c17..36ad267be118 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,10 +46,10 @@ import org.slf4j.LoggerFactory; /** - * Test transitions of state across the master. Sets up the cluster once and - * then runs a couple of tests. + * Test transitions of state across the master. Sets up the cluster once and then runs a couple of + * tests. */ -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestMasterTransitions { @ClassRule @@ -59,16 +59,17 @@ public class TestMasterTransitions { private static final Logger LOG = LoggerFactory.getLogger(TestMasterTransitions.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final TableName TABLENAME = TableName.valueOf("master_transitions"); - private static final byte [][] FAMILIES = new byte [][] {Bytes.toBytes("a"), - Bytes.toBytes("b"), Bytes.toBytes("c")}; + private static final byte[][] FAMILIES = + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; /** * Start up a mini cluster and put a small table of many empty regions into it. * @throws Exception */ - @BeforeClass public static void beforeAllTests() throws Exception { + @BeforeClass + public static void beforeAllTests() throws Exception { TEST_UTIL.startMiniCluster(2); - // Create a table of three families. This will assign a region. + // Create a table of three families. This will assign a region. TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILIES); Table t = TEST_UTIL.getConnection().getTable(TABLENAME); int countOfRegions = -1; @@ -80,412 +81,226 @@ public class TestMasterTransitions { t.close(); } - @AfterClass public static void afterAllTests() throws Exception { + @AfterClass + public static void afterAllTests() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Before public void setup() throws IOException { + @Before + public void setup() throws IOException { TEST_UTIL.ensureSomeRegionServersAvailable(2); } /** - * Listener for regionserver events testing hbase-2428 (Infinite loop of - * region closes if hbase:meta region is offline). In particular, listen - * for the close of the 'metaServer' and when it comes in, requeue it with a - * delay as though there were an issue processing the shutdown. As part of - * the requeuing, send over a close of a region on 'otherServer' so it comes - * into a master that has its meta region marked as offline. + * Listener for regionserver events testing hbase-2428 (Infinite loop of region closes if + * hbase:meta region is offline). In particular, listen for the close of the 'metaServer' and when + * it comes in, requeue it with a delay as though there were an issue processing the shutdown. As + * part of the requeuing, send over a close of a region on 'otherServer' so it comes into a master + * that has its meta region marked as offline. */ /* - static class HBase2428Listener implements RegionServerOperationListener { - // Map of what we've delayed so we don't do do repeated delays. - private final Set postponed = - new CopyOnWriteArraySet(); - private boolean done = false;; - private boolean metaShutdownReceived = false; - private final HServerAddress metaAddress; - private final MiniHBaseCluster cluster; - private final int otherServerIndex; - private final RegionInfo hri; - private int closeCount = 0; - static final int SERVER_DURATION = 3 * 1000; - static final int CLOSE_DURATION = 1 * 1000; - - HBase2428Listener(final MiniHBaseCluster c, final HServerAddress metaAddress, - final RegionInfo closingHRI, final int otherServerIndex) { - this.cluster = c; - this.metaAddress = metaAddress; - this.hri = closingHRI; - this.otherServerIndex = otherServerIndex; - } - - @Override - public boolean process(final RegionServerOperation op) throws IOException { - // If a regionserver shutdown and its of the meta server, then we want to - // delay the processing of the shutdown and send off a close of a region on - // the 'otherServer. - boolean result = true; - if (op instanceof ProcessServerShutdown) { - ProcessServerShutdown pss = (ProcessServerShutdown)op; - if (pss.getDeadServerAddress().equals(this.metaAddress)) { - // Don't postpone more than once. - if (!this.postponed.contains(pss)) { - // Close some region. - this.cluster.addMessageToSendRegionServer(this.otherServerIndex, - new HMsg(HMsg.Type.MSG_REGION_CLOSE, hri, - Bytes.toBytes("Forcing close in test"))); - this.postponed.add(pss); - // Put off the processing of the regionserver shutdown processing. - pss.setDelay(SERVER_DURATION); - this.metaShutdownReceived = true; - // Return false. This will add this op to the delayed queue. - result = false; - } - } - } else { - // Have the close run frequently. - if (isWantedCloseOperation(op) != null) { - op.setDelay(CLOSE_DURATION); - // Count how many times it comes through here. - this.closeCount++; - } - } - return result; - } - - public void processed(final RegionServerOperation op) { - if (isWantedCloseOperation(op) != null) return; - this.done = true; - } -*/ - /* - * @param op - * @return Null if not the wanted ProcessRegionClose, else op - * cast as a ProcessRegionClose. - */ + * static class HBase2428Listener implements RegionServerOperationListener { // Map of what we've + * delayed so we don't do do repeated delays. private final Set postponed = + * new CopyOnWriteArraySet(); private boolean done = false;; private + * boolean metaShutdownReceived = false; private final HServerAddress metaAddress; private final + * MiniHBaseCluster cluster; private final int otherServerIndex; private final RegionInfo hri; + * private int closeCount = 0; static final int SERVER_DURATION = 3 * 1000; static final int + * CLOSE_DURATION = 1 * 1000; HBase2428Listener(final MiniHBaseCluster c, final HServerAddress + * metaAddress, final RegionInfo closingHRI, final int otherServerIndex) { this.cluster = c; + * this.metaAddress = metaAddress; this.hri = closingHRI; this.otherServerIndex = + * otherServerIndex; } + * @Override public boolean process(final RegionServerOperation op) throws IOException { // If a + * regionserver shutdown and its of the meta server, then we want to // delay the processing of + * the shutdown and send off a close of a region on // the 'otherServer. boolean result = true; if + * (op instanceof ProcessServerShutdown) { ProcessServerShutdown pss = (ProcessServerShutdown)op; + * if (pss.getDeadServerAddress().equals(this.metaAddress)) { // Don't postpone more than once. if + * (!this.postponed.contains(pss)) { // Close some region. + * this.cluster.addMessageToSendRegionServer(this.otherServerIndex, new + * HMsg(HMsg.Type.MSG_REGION_CLOSE, hri, Bytes.toBytes("Forcing close in test"))); + * this.postponed.add(pss); // Put off the processing of the regionserver shutdown processing. + * pss.setDelay(SERVER_DURATION); this.metaShutdownReceived = true; // Return false. This will add + * this op to the delayed queue. result = false; } } } else { // Have the close run frequently. if + * (isWantedCloseOperation(op) != null) { op.setDelay(CLOSE_DURATION); // Count how many times it + * comes through here. this.closeCount++; } } return result; } public void processed(final + * RegionServerOperation op) { if (isWantedCloseOperation(op) != null) return; this.done = true; } + */ /* - private ProcessRegionClose isWantedCloseOperation(final RegionServerOperation op) { - // Count every time we get a close operation. - if (op instanceof ProcessRegionClose) { - ProcessRegionClose c = (ProcessRegionClose)op; - if (c.regionInfo.equals(hri)) { - return c; - } - } - return null; - } - - boolean isDone() { - return this.done; - } - - boolean isMetaShutdownReceived() { - return metaShutdownReceived; - } - - int getCloseCount() { - return this.closeCount; - } - - @Override - public boolean process(HServerInfo serverInfo, HMsg incomingMsg) { - return true; - } - } -*/ + * @param op + * @return Null if not the wanted ProcessRegionClose, else op cast as a + * ProcessRegionClose. + */ + /* + * private ProcessRegionClose isWantedCloseOperation(final RegionServerOperation op) { // Count + * every time we get a close operation. if (op instanceof ProcessRegionClose) { ProcessRegionClose + * c = (ProcessRegionClose)op; if (c.regionInfo.equals(hri)) { return c; } } return null; } + * boolean isDone() { return this.done; } boolean isMetaShutdownReceived() { return + * metaShutdownReceived; } int getCloseCount() { return this.closeCount; } + * @Override public boolean process(HServerInfo serverInfo, HMsg incomingMsg) { return true; } } + */ /** - * In 2428, the meta region has just been set offline and then a close comes - * in. + * In 2428, the meta region has just been set offline and then a close comes in. * @see HBASE-2428 */ - @Ignore @Test - public void testRegionCloseWhenNoMetaHBase2428() - throws Exception { + @Ignore + @Test + public void testRegionCloseWhenNoMetaHBase2428() throws Exception { /* - LOG.info("Running testRegionCloseWhenNoMetaHBase2428"); - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - final HMaster master = cluster.getMaster(); - int metaIndex = cluster.getServerWithMeta(); - // Figure the index of the server that is not server the hbase:meta - int otherServerIndex = -1; - for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) { - if (i == metaIndex) continue; - otherServerIndex = i; - break; - } - final HRegionServer otherServer = cluster.getRegionServer(otherServerIndex); - final HRegionServer metaHRS = cluster.getRegionServer(metaIndex); - - // Get a region out on the otherServer. - final RegionInfo hri = - otherServer.getOnlineRegions().iterator().next().getRegionInfo(); - - // Add our RegionServerOperationsListener - HBase2428Listener listener = new HBase2428Listener(cluster, - metaHRS.getHServerInfo().getServerAddress(), hri, otherServerIndex); - master.getRegionServerOperationQueue(). - registerRegionServerOperationListener(listener); - try { - // Now close the server carrying meta. - cluster.abortRegionServer(metaIndex); - - // First wait on receipt of meta server shutdown message. - while(!listener.metaShutdownReceived) Threads.sleep(100); - while(!listener.isDone()) Threads.sleep(10); - // We should not have retried the close more times than it took for the - // server shutdown message to exit the delay queue and get processed - // (Multiple by two to add in some slop in case of GC or something). - assertTrue(listener.getCloseCount() > 1); - assertTrue(listener.getCloseCount() < - ((HBase2428Listener.SERVER_DURATION/HBase2428Listener.CLOSE_DURATION) * 2)); - - // Assert the closed region came back online - assertRegionIsBackOnline(hri); - } finally { - master.getRegionServerOperationQueue(). - unregisterRegionServerOperationListener(listener); - } - */ + * LOG.info("Running testRegionCloseWhenNoMetaHBase2428"); MiniHBaseCluster cluster = + * TEST_UTIL.getHBaseCluster(); final HMaster master = cluster.getMaster(); int metaIndex = + * cluster.getServerWithMeta(); // Figure the index of the server that is not server the + * hbase:meta int otherServerIndex = -1; for (int i = 0; i < + * cluster.getRegionServerThreads().size(); i++) { if (i == metaIndex) continue; + * otherServerIndex = i; break; } final HRegionServer otherServer = + * cluster.getRegionServer(otherServerIndex); final HRegionServer metaHRS = + * cluster.getRegionServer(metaIndex); // Get a region out on the otherServer. final RegionInfo + * hri = otherServer.getOnlineRegions().iterator().next().getRegionInfo(); // Add our + * RegionServerOperationsListener HBase2428Listener listener = new HBase2428Listener(cluster, + * metaHRS.getHServerInfo().getServerAddress(), hri, otherServerIndex); + * master.getRegionServerOperationQueue(). registerRegionServerOperationListener(listener); try + * { // Now close the server carrying meta. cluster.abortRegionServer(metaIndex); // First wait + * on receipt of meta server shutdown message. while(!listener.metaShutdownReceived) + * Threads.sleep(100); while(!listener.isDone()) Threads.sleep(10); // We should not have + * retried the close more times than it took for the // server shutdown message to exit the + * delay queue and get processed // (Multiple by two to add in some slop in case of GC or + * something). assertTrue(listener.getCloseCount() > 1); assertTrue(listener.getCloseCount() < + * ((HBase2428Listener.SERVER_DURATION/HBase2428Listener.CLOSE_DURATION) * 2)); // Assert the + * closed region came back online assertRegionIsBackOnline(hri); } finally { + * master.getRegionServerOperationQueue(). unregisterRegionServerOperationListener(listener); } + */ } /** - * Test adding in a new server before old one on same host+port is dead. - * Make the test more onerous by having the server under test carry the meta. - * If confusion between old and new, purportedly meta never comes back. Test - * that meta gets redeployed. + * Test adding in a new server before old one on same host+port is dead. Make the test more + * onerous by having the server under test carry the meta. If confusion between old and new, + * purportedly meta never comes back. Test that meta gets redeployed. */ - @Ignore @Test - public void testAddingServerBeforeOldIsDead2413() - throws IOException { + @Ignore + @Test + public void testAddingServerBeforeOldIsDead2413() throws IOException { /* - LOG.info("Running testAddingServerBeforeOldIsDead2413"); - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - int count = count(); - int metaIndex = cluster.getServerWithMeta(); - MiniHBaseClusterRegionServer metaHRS = - (MiniHBaseClusterRegionServer)cluster.getRegionServer(metaIndex); - int port = metaHRS.getServerInfo().getServerAddress().getPort(); - Configuration c = TEST_UTIL.getConfiguration(); - String oldPort = c.get(HConstants.REGIONSERVER_PORT, "0"); - try { - LOG.info("KILLED=" + metaHRS); - metaHRS.kill(); - c.set(HConstants.REGIONSERVER_PORT, Integer.toString(port)); - // Try and start new regionserver. It might clash with the old - // regionserver port so keep trying to get past the BindException. - HRegionServer hrs = null; - while (true) { - try { - hrs = cluster.startRegionServer().getRegionServer(); - break; - } catch (IOException e) { - if (e.getCause() != null && e.getCause() instanceof InvocationTargetException) { - InvocationTargetException ee = (InvocationTargetException)e.getCause(); - if (ee.getCause() != null && ee.getCause() instanceof BindException) { - LOG.info("BindException; retrying: " + e.toString()); - } - } - } - } - LOG.info("STARTED=" + hrs); - // Wait until he's been given at least 3 regions before we go on to try - // and count rows in table. - while (hrs.getOnlineRegions().size() < 3) Threads.sleep(100); - LOG.info(hrs.toString() + " has " + hrs.getOnlineRegions().size() + - " regions"); - assertEquals(count, count()); - } finally { - c.set(HConstants.REGIONSERVER_PORT, oldPort); - } - */ + * LOG.info("Running testAddingServerBeforeOldIsDead2413"); MiniHBaseCluster cluster = + * TEST_UTIL.getHBaseCluster(); int count = count(); int metaIndex = + * cluster.getServerWithMeta(); MiniHBaseClusterRegionServer metaHRS = + * (MiniHBaseClusterRegionServer)cluster.getRegionServer(metaIndex); int port = + * metaHRS.getServerInfo().getServerAddress().getPort(); Configuration c = + * TEST_UTIL.getConfiguration(); String oldPort = c.get(HConstants.REGIONSERVER_PORT, "0"); try + * { LOG.info("KILLED=" + metaHRS); metaHRS.kill(); c.set(HConstants.REGIONSERVER_PORT, + * Integer.toString(port)); // Try and start new regionserver. It might clash with the old // + * regionserver port so keep trying to get past the BindException. HRegionServer hrs = null; + * while (true) { try { hrs = cluster.startRegionServer().getRegionServer(); break; } catch + * (IOException e) { if (e.getCause() != null && e.getCause() instanceof + * InvocationTargetException) { InvocationTargetException ee = + * (InvocationTargetException)e.getCause(); if (ee.getCause() != null && ee.getCause() + * instanceof BindException) { LOG.info("BindException; retrying: " + e.toString()); } } } } + * LOG.info("STARTED=" + hrs); // Wait until he's been given at least 3 regions before we go on + * to try // and count rows in table. while (hrs.getOnlineRegions().size() < 3) + * Threads.sleep(100); LOG.info(hrs.toString() + " has " + hrs.getOnlineRegions().size() + + * " regions"); assertEquals(count, count()); } finally { c.set(HConstants.REGIONSERVER_PORT, + * oldPort); } + */ } /** - * HBase2482 is about outstanding region openings. If any are outstanding - * when a regionserver goes down, then they'll never deploy. They'll be - * stuck in the regions-in-transition list for ever. This listener looks - * for a region opening HMsg and if its from the server passed on construction, - * then we kill it. It also looks out for a close message on the victim - * server because that signifies start of the fireworks. + * HBase2482 is about outstanding region openings. If any are outstanding when a regionserver goes + * down, then they'll never deploy. They'll be stuck in the regions-in-transition list for ever. + * This listener looks for a region opening HMsg and if its from the server passed on + * construction, then we kill it. It also looks out for a close message on the victim server + * because that signifies start of the fireworks. */ /* - static class HBase2482Listener implements RegionServerOperationListener { - private final HRegionServer victim; - private boolean abortSent = false; - // We closed regions on new server. - private volatile boolean closed = false; - // Copy of regions on new server - private final Collection copyOfOnlineRegions; - // This is the region that was in transition on the server we aborted. Test - // passes if this region comes back online successfully. - private RegionInfo regionToFind; - - HBase2482Listener(final HRegionServer victim) { - this.victim = victim; - // Copy regions currently open on this server so I can notice when - // there is a close. - this.copyOfOnlineRegions = - this.victim.getCopyOfOnlineRegionsSortedBySize().values(); - } - - @Override - public boolean process(HServerInfo serverInfo, HMsg incomingMsg) { - if (!victim.getServerInfo().equals(serverInfo) || - this.abortSent || !this.closed) { - return true; - } - if (!incomingMsg.isType(HMsg.Type.MSG_REPORT_PROCESS_OPEN)) return true; - // Save the region that is in transition so can test later it came back. - this.regionToFind = incomingMsg.getRegionInfo(); - String msg = "ABORTING " + this.victim + " because got a " + - HMsg.Type.MSG_REPORT_PROCESS_OPEN + " on this server for " + - incomingMsg.getRegionInfo().getRegionNameAsString(); - this.victim.abort(msg); - this.abortSent = true; - return true; - } - - @Override - public boolean process(RegionServerOperation op) throws IOException { - return true; - } - - @Override - public void processed(RegionServerOperation op) { - if (this.closed || !(op instanceof ProcessRegionClose)) return; - ProcessRegionClose close = (ProcessRegionClose)op; - for (HRegion r: this.copyOfOnlineRegions) { - if (r.getRegionInfo().equals(close.regionInfo)) { - // We've closed one of the regions that was on the victim server. - // Now can start testing for when all regions are back online again - LOG.info("Found close of " + - r.getRegionInfo().getRegionNameAsString() + - "; setting close happened flag"); - this.closed = true; - break; - } - } - } - } -*/ + * static class HBase2482Listener implements RegionServerOperationListener { private final + * HRegionServer victim; private boolean abortSent = false; // We closed regions on new server. + * private volatile boolean closed = false; // Copy of regions on new server private final + * Collection copyOfOnlineRegions; // This is the region that was in transition on the + * server we aborted. Test // passes if this region comes back online successfully. private + * RegionInfo regionToFind; HBase2482Listener(final HRegionServer victim) { this.victim = victim; + * // Copy regions currently open on this server so I can notice when // there is a close. + * this.copyOfOnlineRegions = this.victim.getCopyOfOnlineRegionsSortedBySize().values(); } + * @Override public boolean process(HServerInfo serverInfo, HMsg incomingMsg) { if + * (!victim.getServerInfo().equals(serverInfo) || this.abortSent || !this.closed) { return true; } + * if (!incomingMsg.isType(HMsg.Type.MSG_REPORT_PROCESS_OPEN)) return true; // Save the region + * that is in transition so can test later it came back. this.regionToFind = + * incomingMsg.getRegionInfo(); String msg = "ABORTING " + this.victim + " because got a " + + * HMsg.Type.MSG_REPORT_PROCESS_OPEN + " on this server for " + + * incomingMsg.getRegionInfo().getRegionNameAsString(); this.victim.abort(msg); this.abortSent = + * true; return true; } + * @Override public boolean process(RegionServerOperation op) throws IOException { return true; } + * @Override public void processed(RegionServerOperation op) { if (this.closed || !(op instanceof + * ProcessRegionClose)) return; ProcessRegionClose close = (ProcessRegionClose)op; for (HRegion r: + * this.copyOfOnlineRegions) { if (r.getRegionInfo().equals(close.regionInfo)) { // We've closed + * one of the regions that was on the victim server. // Now can start testing for when all regions + * are back online again LOG.info("Found close of " + r.getRegionInfo().getRegionNameAsString() + + * "; setting close happened flag"); this.closed = true; break; } } } } + */ /** - * In 2482, a RS with an opening region on it dies. The said region is then - * stuck in the master's regions-in-transition and never leaves it. This - * test works by bringing up a new regionserver, waiting for the load - * balancer to give it some regions. Then, we close all on the new server. - * After sending all the close messages, we send the new regionserver the - * special blocking message so it can not process any more messages. - * Meantime reopening of the just-closed regions is backed up on the new - * server. Soon as master gets an opening region from the new regionserver, - * we kill it. We then wait on all regions to come back on line. If bug - * is fixed, this should happen soon as the processing of the killed server is - * done. + * In 2482, a RS with an opening region on it dies. The said region is then stuck in the master's + * regions-in-transition and never leaves it. This test works by bringing up a new regionserver, + * waiting for the load balancer to give it some regions. Then, we close all on the new server. + * After sending all the close messages, we send the new regionserver the special blocking message + * so it can not process any more messages. Meantime reopening of the just-closed regions is + * backed up on the new server. Soon as master gets an opening region from the new regionserver, + * we kill it. We then wait on all regions to come back on line. If bug is fixed, this should + * happen soon as the processing of the killed server is done. * @see HBASE-2482 */ - @Ignore @Test - public void testKillRSWithOpeningRegion2482() - throws Exception { + @Ignore + @Test + public void testKillRSWithOpeningRegion2482() throws Exception { /* - LOG.info("Running testKillRSWithOpeningRegion2482"); - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - if (cluster.getLiveRegionServerThreads().size() < 2) { - // Need at least two servers. - cluster.startRegionServer(); - } - // Count how many regions are online. They need to be all back online for - // this test to succeed. - int countOfMetaRegions = countOfMetaRegions(); - // Add a listener on the server. - HMaster m = cluster.getMaster(); - // Start new regionserver. - MiniHBaseClusterRegionServer hrs = - (MiniHBaseClusterRegionServer)cluster.startRegionServer().getRegionServer(); - LOG.info("Started new regionserver: " + hrs.toString()); - // Wait until has some regions before proceeding. Balancer will give it some. - int minimumRegions = - countOfMetaRegions/(cluster.getRegionServerThreads().size() * 2); - while (hrs.getOnlineRegions().size() < minimumRegions) Threads.sleep(100); - // Set the listener only after some regions have been opened on new server. - HBase2482Listener listener = new HBase2482Listener(hrs); - m.getRegionServerOperationQueue(). - registerRegionServerOperationListener(listener); - try { - // Go close all non-catalog regions on this new server - closeAllNonCatalogRegions(cluster, hrs); - // After all closes, add blocking message before the region opens start to - // come in. - cluster.addMessageToSendRegionServer(hrs, - new HMsg(HMsg.Type.TESTING_BLOCK_REGIONSERVER)); - // Wait till one of the above close messages has an effect before we start - // wait on all regions back online. - while (!listener.closed) Threads.sleep(100); - LOG.info("Past close"); - // Make sure the abort server message was sent. - while(!listener.abortSent) Threads.sleep(100); - LOG.info("Past abort send; waiting on all regions to redeploy"); - // Now wait for regions to come back online. - assertRegionIsBackOnline(listener.regionToFind); - } finally { - m.getRegionServerOperationQueue(). - unregisterRegionServerOperationListener(listener); - } - */ + * LOG.info("Running testKillRSWithOpeningRegion2482"); MiniHBaseCluster cluster = + * TEST_UTIL.getHBaseCluster(); if (cluster.getLiveRegionServerThreads().size() < 2) { // Need + * at least two servers. cluster.startRegionServer(); } // Count how many regions are online. + * They need to be all back online for // this test to succeed. int countOfMetaRegions = + * countOfMetaRegions(); // Add a listener on the server. HMaster m = cluster.getMaster(); // + * Start new regionserver. MiniHBaseClusterRegionServer hrs = + * (MiniHBaseClusterRegionServer)cluster.startRegionServer().getRegionServer(); + * LOG.info("Started new regionserver: " + hrs.toString()); // Wait until has some regions + * before proceeding. Balancer will give it some. int minimumRegions = + * countOfMetaRegions/(cluster.getRegionServerThreads().size() * 2); while + * (hrs.getOnlineRegions().size() < minimumRegions) Threads.sleep(100); // Set the listener only + * after some regions have been opened on new server. HBase2482Listener listener = new + * HBase2482Listener(hrs); m.getRegionServerOperationQueue(). + * registerRegionServerOperationListener(listener); try { // Go close all non-catalog regions on + * this new server closeAllNonCatalogRegions(cluster, hrs); // After all closes, add blocking + * message before the region opens start to // come in. + * cluster.addMessageToSendRegionServer(hrs, new HMsg(HMsg.Type.TESTING_BLOCK_REGIONSERVER)); // + * Wait till one of the above close messages has an effect before we start // wait on all + * regions back online. while (!listener.closed) Threads.sleep(100); LOG.info("Past close"); // + * Make sure the abort server message was sent. while(!listener.abortSent) Threads.sleep(100); + * LOG.info("Past abort send; waiting on all regions to redeploy"); // Now wait for regions to + * come back online. assertRegionIsBackOnline(listener.regionToFind); } finally { + * m.getRegionServerOperationQueue(). unregisterRegionServerOperationListener(listener); } + */ } /* * @return Count of all non-catalog regions on the designated server */ -/* - private int closeAllNonCatalogRegions(final MiniHBaseCluster cluster, - final MiniHBaseCluster.MiniHBaseClusterRegionServer hrs) - throws IOException { - int countOfRegions = 0; - for (HRegion r: hrs.getOnlineRegions()) { - if (r.getRegionInfo().isMetaRegion()) continue; - cluster.addMessageToSendRegionServer(hrs, - new HMsg(HMsg.Type.MSG_REGION_CLOSE, r.getRegionInfo())); - LOG.info("Sent close of " + r.getRegionInfo().getRegionNameAsString() + - " on " + hrs.toString()); - countOfRegions++; - } - return countOfRegions; - } - - private void assertRegionIsBackOnline(final RegionInfo hri) - throws IOException { - // Region should have an entry in its startkey because of addRowToEachRegion. - byte [] row = getStartKey(hri); - HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); - Get g = new Get(row); - assertTrue((t.get(g)).size() > 0); - } - /* + * private int closeAllNonCatalogRegions(final MiniHBaseCluster cluster, final + * MiniHBaseCluster.MiniHBaseClusterRegionServer hrs) throws IOException { int countOfRegions = 0; + * for (HRegion r: hrs.getOnlineRegions()) { if (r.getRegionInfo().isMetaRegion()) continue; + * cluster.addMessageToSendRegionServer(hrs, new HMsg(HMsg.Type.MSG_REGION_CLOSE, + * r.getRegionInfo())); LOG.info("Sent close of " + r.getRegionInfo().getRegionNameAsString() + + * " on " + hrs.toString()); countOfRegions++; } return countOfRegions; } private void + * assertRegionIsBackOnline(final RegionInfo hri) throws IOException { // Region should have an + * entry in its startkey because of addRowToEachRegion. byte [] row = getStartKey(hri); HTable t = + * new HTable(TEST_UTIL.getConfiguration(), TABLENAME); Get g = new Get(row); + * assertTrue((t.get(g)).size() > 0); } /* * @return Count of regions in meta table. * @throws IOException */ /* - private static int countOfMetaRegions() - throws IOException { - HTable meta = new HTable(TEST_UTIL.getConfiguration(), - HConstants.META_TABLE_NAME); - int rows = 0; - Scan scan = new Scan(); - scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); - ResultScanner s = meta.getScanner(scan); - for (Result r = null; (r = s.next()) != null;) { - byte [] b = - r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); - if (b == null || b.length <= 0) break; - rows++; - } - s.close(); - return rows; - } -*/ + * private static int countOfMetaRegions() throws IOException { HTable meta = new + * HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME); int rows = 0; Scan scan = new + * Scan(); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); ResultScanner s + * = meta.getScanner(scan); for (Result r = null; (r = s.next()) != null;) { byte [] b = + * r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); if (b == null || b.length + * <= 0) break; rows++; } s.close(); return rows; } + */ /* - * Add to each of the regions in hbase:meta a value. Key is the startrow of the - * region (except its 'aaa' for first region). Actual value is the row name. + * Add to each of the regions in hbase:meta a value. Key is the startrow of the region (except its + * 'aaa' for first region). Actual value is the row name. * @param expected * @return * @throws IOException @@ -505,10 +320,10 @@ private static int addToEachStartKey(final int expected) throws IOException { } // If start key, add 'aaa'. - if(!hri.getTable().equals(TABLENAME)) { + if (!hri.getTable().equals(TABLENAME)) { continue; } - byte [] row = getStartKey(hri); + byte[] row = getStartKey(hri); Put p = new Put(row); p.setDurability(Durability.SKIP_WAL); p.addColumn(getTestFamily(), getTestQualifier(), row); @@ -526,16 +341,16 @@ private static int addToEachStartKey(final int expected) throws IOException { * @param hri * @return Start key for hri (If start key is '', then return 'aaa'. */ - private static byte [] getStartKey(final RegionInfo hri) { - return Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey())? - Bytes.toBytes("aaa"): hri.getStartKey(); + private static byte[] getStartKey(final RegionInfo hri) { + return Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey()) ? Bytes.toBytes("aaa") + : hri.getStartKey(); } - private static byte [] getTestFamily() { + private static byte[] getTestFamily() { return FAMILIES[0]; } - private static byte [] getTestQualifier() { + private static byte[] getTestQualifier() { return getTestFamily(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMergeTableRegionsWhileRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMergeTableRegionsWhileRSCrash.java index 257a8f87545b..c948a9d51505 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMergeTableRegionsWhileRSCrash.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMergeTableRegionsWhileRSCrash.java @@ -45,17 +45,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - - -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMergeTableRegionsWhileRSCrash { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMergeTableRegionsWhileRSCrash.class); - private static final Logger LOG = LoggerFactory - .getLogger(TestMergeTableRegionsWhileRSCrash.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestMergeTableRegionsWhileRSCrash.class); protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("test"); @@ -65,7 +63,6 @@ public class TestMergeTableRegionsWhileRSCrash { private static CountDownLatch mergeCommitArrive = new CountDownLatch(1); private static Table TABLE; - @BeforeClass public static void setupCluster() throws Exception { UTIL.startMiniCluster(1); @@ -87,27 +84,27 @@ public static void cleanupTest() throws Exception { @Test public void test() throws Exception { - //write some rows to the table + // write some rows to the table for (int i = 0; i < 10; i++) { byte[] row = Bytes.toBytes("row" + i); Put put = new Put(row); put.addColumn(CF, CF, CF); TABLE.put(put); } - MasterProcedureEnv env = UTIL.getMiniHBaseCluster().getMaster() - .getMasterProcedureExecutor().getEnvironment(); - final ProcedureExecutor executor = UTIL.getMiniHBaseCluster() - .getMaster().getMasterProcedureExecutor(); + MasterProcedureEnv env = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); + final ProcedureExecutor executor = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); List regionInfos = admin.getRegions(TABLE_NAME); - MergeTableRegionsProcedure mergeTableRegionsProcedure = new MergeTableRegionsProcedure( - env, new RegionInfo [] {regionInfos.get(0), regionInfos.get(1)}, false); + MergeTableRegionsProcedure mergeTableRegionsProcedure = new MergeTableRegionsProcedure(env, + new RegionInfo[] { regionInfos.get(0), regionInfos.get(1) }, false); executor.submitProcedure(mergeTableRegionsProcedure); UTIL.waitFor(30000, () -> executor.getProcedures().stream().filter(p -> p instanceof TransitRegionStateProcedure) - .map(p -> (TransitRegionStateProcedure) p) - .anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); - UTIL.getMiniHBaseCluster().killRegionServer( - UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName()); + .map(p -> (TransitRegionStateProcedure) p) + .anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); + UTIL.getMiniHBaseCluster() + .killRegionServer(UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName()); UTIL.getMiniHBaseCluster().startRegionServer(); UTIL.waitUntilNoRegionsInTransition(); Scan scan = new Scan(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java index 6ad4f0806055..01445e5e1773 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,8 +41,7 @@ @Category({ LargeTests.class }) public class TestMetaAssignmentWithStopMaster { - private static final Logger LOG = - LoggerFactory.getLogger(TestMetaAssignmentWithStopMaster.class); + private static final Logger LOG = LoggerFactory.getLogger(TestMetaAssignmentWithStopMaster.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -54,8 +53,8 @@ public class TestMetaAssignmentWithStopMaster { @BeforeClass public static void setUpBeforeClass() throws Exception { - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numMasters(2).numRegionServers(3).numDataNodes(3).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(2) + .numRegionServers(3).numDataNodes(3).build(); UTIL.startMiniCluster(option); } @@ -73,8 +72,8 @@ public void testStopActiveMaster() throws Exception { UTIL.getMiniHBaseCluster().getMaster().stop("Stop master for test"); long startTime = EnvironmentEdgeManager.currentTime(); - while (UTIL.getMiniHBaseCluster().getMaster() == null || - UTIL.getMiniHBaseCluster().getMaster().getServerName().equals(oldMaster)) { + while (UTIL.getMiniHBaseCluster().getMaster() == null + || UTIL.getMiniHBaseCluster().getMaster().getServerName().equals(oldMaster)) { LOG.info("Wait the standby master become active"); Thread.sleep(3000); if (EnvironmentEdgeManager.currentTime() - startTime > WAIT_TIMEOUT) { @@ -91,8 +90,9 @@ public void testStopActiveMaster() throws Exception { } ServerName newMetaServer = locator.getAllRegionLocations().get(0).getServerName(); - assertTrue("The new meta server " + newMetaServer + " should be same with" + - " the old meta server " + oldMetaServer, newMetaServer.equals(oldMetaServer)); + assertTrue("The new meta server " + newMetaServer + " should be same with" + + " the old meta server " + oldMetaServer, + newMetaServer.equals(oldMetaServer)); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java index cf35ae24bf97..a7b061aa7c15 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java @@ -62,8 +62,8 @@ public class TestMetaShutdownHandler { @BeforeClass public static void setUpBeforeClass() throws Exception { - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numRegionServers(3).rsClass(MyRegionServer.class).numDataNodes(3).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(3) + .rsClass(MyRegionServer.class).numDataNodes(3).build(); TEST_UTIL.startMiniCluster(option); } @@ -73,13 +73,10 @@ public static void tearDownAfterClass() throws Exception { } /** - * This test will test the expire handling of a meta-carrying - * region server. - * After HBaseMiniCluster is up, we will delete the ephemeral - * node of the meta-carrying region server, which will trigger - * the expire of this region server on the master. - * On the other hand, we will slow down the abort process on - * the region server so that it is still up during the master SSH. + * This test will test the expire handling of a meta-carrying region server. After + * HBaseMiniCluster is up, we will delete the ephemeral node of the meta-carrying region server, + * which will trigger the expire of this region server on the master. On the other hand, we will + * slow down the abort process on the region server so that it is still up during the master SSH. * We will check that the master SSH is still successfully done. */ @Test @@ -88,17 +85,17 @@ public void testExpireMetaRegionServer() throws Exception { HMaster master = cluster.getMaster(); RegionStates regionStates = master.getAssignmentManager().getRegionStates(); ServerName metaServerName = - regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO); - if (master.getServerName().equals(metaServerName) || metaServerName == null || - !metaServerName.equals(cluster.getServerHoldingMeta())) { + regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO); + if (master.getServerName().equals(metaServerName) || metaServerName == null + || !metaServerName.equals(cluster.getServerHoldingMeta())) { // Move meta off master metaServerName = - cluster.getLiveRegionServerThreads().get(0).getRegionServer().getServerName(); + cluster.getLiveRegionServerThreads().get(0).getRegionServer().getServerName(); master.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), Bytes.toBytes(metaServerName.getServerName())); TEST_UTIL.waitUntilNoRegionsInTransition(60000); metaServerName = - regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO); + regionStates.getRegionServerOfRegion(RegionInfoBuilder.FIRST_META_REGIONINFO); } RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); assertEquals("Wrong state for meta!", RegionState.State.OPEN, metaState.getState()); @@ -107,9 +104,8 @@ public void testExpireMetaRegionServer() throws Exception { // Delete the ephemeral node of the meta-carrying region server. // This is trigger the expire of this region server on the master. - String rsEphemeralNodePath = - ZNodePaths.joinZNode(master.getZooKeeper().getZNodePaths().rsZNode, - metaServerName.toString()); + String rsEphemeralNodePath = ZNodePaths.joinZNode(master.getZooKeeper().getZNodePaths().rsZNode, + metaServerName.toString()); ZKUtil.deleteNode(master.getZooKeeper(), rsEphemeralNodePath); LOG.info("Deleted the znode for the RegionServer hosting hbase:meta; waiting on SSH"); // Wait for SSH to finish @@ -139,8 +135,8 @@ public boolean evaluate() throws Exception { public static class MyRegionServer extends MiniHBaseClusterRegionServer { - public MyRegionServer(Configuration conf) throws IOException, KeeperException, - InterruptedException { + public MyRegionServer(Configuration conf) + throws IOException, KeeperException, InterruptedException { super(conf); } @@ -148,7 +144,7 @@ public MyRegionServer(Configuration conf) throws IOException, KeeperException, public void abort(String reason, Throwable cause) { // sleep to slow down the region server abort try { - Thread.sleep(30*1000); + Thread.sleep(30 * 1000); } catch (InterruptedException e) { return; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java index de677195ad53..580e2bbf203a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ public class TestMigrateAndMirrorMetaLocations { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMigrateAndMirrorMetaLocations.class); + HBaseClassTestRule.forClass(TestMigrateAndMirrorMetaLocations.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -85,7 +85,7 @@ private void assertLocationEquals(Result result, int replicaCount) throws Except data = removeMetaData(data); int prefixLen = lengthOfPBMagic(); ZooKeeperProtos.MetaRegionServer zkProto = ZooKeeperProtos.MetaRegionServer.parser() - .parseFrom(data, prefixLen, data.length - prefixLen); + .parseFrom(data, prefixLen, data.length - prefixLen); ServerName sn = ProtobufUtil.toServerName(zkProto.getServer()); assertEquals(locs.getRegionLocation(i).getServerName(), sn); } @@ -95,7 +95,7 @@ private void assertLocationEquals(Result result, int replicaCount) throws Except private void checkMirrorLocation(int replicaCount) throws Exception { MasterRegion masterRegion = UTIL.getMiniHBaseCluster().getMaster().getMasterRegion(); try (RegionScanner scanner = - masterRegion.getRegionScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) { + masterRegion.getRegionScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) { List cells = new ArrayList<>(); boolean moreRows = scanner.next(cells); // should only have one row as we have only one meta region, different replicas will be in the @@ -110,7 +110,7 @@ private void checkMirrorLocation(int replicaCount) throws Exception { private void waitUntilNoSCP() throws IOException { UTIL.waitFor(30000, () -> UTIL.getMiniHBaseCluster().getMaster().getProcedures().stream() - .filter(p -> p instanceof ServerCrashProcedure).allMatch(Procedure::isSuccess)); + .filter(p -> p instanceof ServerCrashProcedure).allMatch(Procedure::isSuccess)); } @Test @@ -118,14 +118,14 @@ public void test() throws Exception { checkMirrorLocation(2); MasterRegion masterRegion = UTIL.getMiniHBaseCluster().getMaster().getMasterRegion(); try (RegionScanner scanner = - masterRegion.getRegionScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) { + masterRegion.getRegionScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) { List cells = new ArrayList<>(); scanner.next(cells); Cell cell = cells.get(0); // delete the only row masterRegion.update( r -> r.delete(new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) - .addFamily(HConstants.CATALOG_FAMILY))); + .addFamily(HConstants.CATALOG_FAMILY))); masterRegion.flush(true); } // restart the whole cluster, to see if we can migrate the data on zookeeper to master local @@ -134,7 +134,7 @@ public void test() throws Exception { UTIL.startMiniHBaseCluster(StartTestingClusterOption.builder().numRegionServers(3).build()); masterRegion = UTIL.getMiniHBaseCluster().getMaster().getMasterRegion(); try (RegionScanner scanner = - masterRegion.getRegionScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) { + masterRegion.getRegionScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY))) { List cells = new ArrayList<>(); boolean moreRows = scanner.next(cells); assertFalse(moreRows); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java index c687abdf414a..4ddae1149d17 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,14 +50,14 @@ public class TestMigrateNamespaceTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMigrateNamespaceTable.class); + HBaseClassTestRule.forClass(TestMigrateNamespaceTable.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @BeforeClass public static void setUp() throws Exception { - StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(1). - numAlwaysStandByMasters(1).numRegionServers(1).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(1) + .numAlwaysStandByMasters(1).numRegionServers(1).build(); UTIL.startMiniCluster(option); } @@ -72,13 +72,13 @@ public void testMigrate() throws IOException, InterruptedException { try (Table table = UTIL.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME)) { for (int i = 0; i < 5; i++) { NamespaceDescriptor nd = NamespaceDescriptor.create("Test-NS-" + i) - .addConfiguration("key-" + i, "value-" + i).build(); + .addConfiguration("key-" + i, "value-" + i).build(); table.put(new Put(Bytes.toBytes(nd.getName())).addColumn( TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES, TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES, ProtobufUtil.toProtoNamespaceDescriptor(nd).toByteArray())); AbstractStateMachineNamespaceProcedure - .createDirectory(UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(), nd); + .createDirectory(UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(), nd); } } MasterThread masterThread = UTIL.getMiniHBaseCluster().getMasterThread(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestNewStartedRegionServerVersion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestNewStartedRegionServerVersion.java index 2d9269a337aa..0d69dd5dc498 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestNewStartedRegionServerVersion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestNewStartedRegionServerVersion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,10 +40,10 @@ public class TestNewStartedRegionServerVersion { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNewStartedRegionServerVersion.class); + HBaseClassTestRule.forClass(TestNewStartedRegionServerVersion.class); private static final Logger LOG = - LoggerFactory.getLogger(TestNewStartedRegionServerVersion.class); + LoggerFactory.getLogger(TestNewStartedRegionServerVersion.class); private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java index 0fc8cddd19a3..890175d4c3f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java @@ -75,7 +75,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestRegionPlacement { @ClassRule @@ -97,8 +97,8 @@ public class TestRegionPlacement { public static void setupBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // Enable the favored nodes based load balancer - conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - FavoredNodeLoadBalancer.class, LoadBalancer.class); + conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, FavoredNodeLoadBalancer.class, + LoadBalancer.class); conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); TEST_UTIL.startMiniCluster(SLAVES); CONNECTION = TEST_UTIL.getConnection(); @@ -111,7 +111,8 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Ignore ("Test for unfinished feature") @Test + @Ignore("Test for unfinished feature") + @Test public void testRegionPlacement() throws Exception { String tableStr = "testRegionAssignment"; TableName table = TableName.valueOf(tableStr); @@ -136,18 +137,18 @@ public void testRegionPlacement() throws Exception { // Shuffle the secondary with tertiary favored nodes FavoredNodesPlan shuffledPlan = this.shuffleAssignmentPlan(currentPlan, - FavoredNodesPlan.Position.SECONDARY, FavoredNodesPlan.Position.TERTIARY); + FavoredNodesPlan.Position.SECONDARY, FavoredNodesPlan.Position.TERTIARY); // Let the region placement update the hbase:meta and Region Servers rp.updateAssignmentPlan(shuffledPlan); // Verify the region assignment. There are supposed to no region reassignment // All the regions are still on the primary region server - verifyRegionAssignment(shuffledPlan,0, REGION_NUM); + verifyRegionAssignment(shuffledPlan, 0, REGION_NUM); // Shuffle the plan by switching the primary with secondary and // verify the region reassignment is consistent with the plan. - shuffledPlan = this.shuffleAssignmentPlan(currentPlan, - FavoredNodesPlan.Position.PRIMARY, FavoredNodesPlan.Position.SECONDARY); + shuffledPlan = this.shuffleAssignmentPlan(currentPlan, FavoredNodesPlan.Position.PRIMARY, + FavoredNodesPlan.Position.SECONDARY); // Let the region placement update the hbase:meta and Region Servers rp.updateAssignmentPlan(shuffledPlan); @@ -157,15 +158,17 @@ public void testRegionPlacement() throws Exception { // also verify that the AssignmentVerificationReport has the correct information RegionPlacementMaintainer rp = new RegionPlacementMaintainer(TEST_UTIL.getConfiguration()); // we are interested in only one table (and hence one report) - rp.setTargetTableName(new String[]{tableStr}); + rp.setTargetTableName(new String[] { tableStr }); List reports = rp.verifyRegionPlacement(false); AssignmentVerificationReport report = reports.get(0); assertTrue(report.getRegionsWithoutValidFavoredNodes().isEmpty()); assertTrue(report.getNonFavoredAssignedRegions().isEmpty()); assertTrue(report.getTotalFavoredAssignments() >= REGION_NUM); assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.PRIMARY) != 0); - assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) == 0); - assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY) == 0); + assertTrue( + report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) == 0); + assertTrue( + report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY) == 0); assertTrue(report.getUnassignedRegions().isEmpty()); // Check when a RS stops, the regions get assigned to their secondary/tertiary @@ -178,14 +181,17 @@ public void testRegionPlacement() throws Exception { assertTrue(report.getNonFavoredAssignedRegions().isEmpty()); assertTrue(report.getTotalFavoredAssignments() >= REGION_NUM); assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.PRIMARY) > 0); - assertTrue("secondary " + - report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) + " tertiary " - + report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY), - (report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) > 0 - || report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY) > 0)); - assertTrue((report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.PRIMARY) + - report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) + - report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY)) == REGION_NUM); + assertTrue( + "secondary " + + report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) + + " tertiary " + + report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY), + (report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) > 0 + || report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY) > 0)); + assertTrue((report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.PRIMARY) + + report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) + + report.getNumRegionsOnFavoredNodeByPosition( + FavoredNodesPlan.Position.TERTIARY)) == REGION_NUM); RegionPlacementMaintainer.printAssignmentPlan(currentPlan); } @@ -211,8 +217,8 @@ private void killRandomServerAndVerifyAssignment() break; } } - } while (ServerName.isSameAddress(metaServer, serverToKill) || isNamespaceServer || - TEST_UTIL.getHBaseCluster().getRegionServer(killIndex).getNumberOfOnlineRegions() == 0); + } while (ServerName.isSameAddress(metaServer, serverToKill) || isNamespaceServer + || TEST_UTIL.getHBaseCluster().getRegionServer(killIndex).getNumberOfOnlineRegions() == 0); LOG.debug("Stopping RS " + serverToKill); Map> regionsToVerify = new HashMap<>(); // mark the regions to track @@ -220,8 +226,8 @@ private void killRandomServerAndVerifyAssignment() ServerName s = entry.getValue()[0]; if (ServerName.isSameAddress(s, serverToKill)) { regionsToVerify.put(entry.getKey(), new Pair<>(entry.getValue()[1], entry.getValue()[2])); - LOG.debug("Adding " + entry.getKey() + " with sedcondary/tertiary " + - entry.getValue()[1] + " " + entry.getValue()[2]); + LOG.debug("Adding " + entry.getKey() + " with sedcondary/tertiary " + entry.getValue()[1] + + " " + entry.getValue()[2]); } } int orig = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getNumRegionsOpened(); @@ -229,21 +235,21 @@ private void killRandomServerAndVerifyAssignment() TEST_UTIL.getHBaseCluster().waitForRegionServerToStop(serverToKill, 60000); int curr = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getNumRegionsOpened(); while (curr - orig < regionsToVerify.size()) { - LOG.debug("Waiting for " + regionsToVerify.size() + " to come online " + - " Current #regions " + curr + " Original #regions " + orig); + LOG.debug("Waiting for " + regionsToVerify.size() + " to come online " + " Current #regions " + + curr + " Original #regions " + orig); Thread.sleep(200); curr = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getNumRegionsOpened(); } // now verify for (Map.Entry> entry : regionsToVerify.entrySet()) { - ServerName newDestination = TEST_UTIL.getHBaseCluster().getMaster() - .getAssignmentManager().getRegionStates().getRegionServerOfRegion(entry.getKey()); + ServerName newDestination = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() + .getRegionStates().getRegionServerOfRegion(entry.getKey()); Pair secondaryTertiaryServers = entry.getValue(); - LOG.debug("New destination for region " + entry.getKey().getEncodedName() + - " " + newDestination +". Secondary/Tertiary are " + secondaryTertiaryServers.getFirst() - + "/" + secondaryTertiaryServers.getSecond()); - if (!(ServerName.isSameAddress(newDestination, secondaryTertiaryServers.getFirst())|| - ServerName.isSameAddress(newDestination, secondaryTertiaryServers.getSecond()))){ + LOG.debug("New destination for region " + entry.getKey().getEncodedName() + " " + + newDestination + ". Secondary/Tertiary are " + secondaryTertiaryServers.getFirst() + "/" + + secondaryTertiaryServers.getSecond()); + if (!(ServerName.isSameAddress(newDestination, secondaryTertiaryServers.getFirst()) + || ServerName.isSameAddress(newDestination, secondaryTertiaryServers.getSecond()))) { fail("Region " + entry.getKey() + " not present on any of the expected servers"); } } @@ -254,7 +260,8 @@ private void killRandomServerAndVerifyAssignment() /** * Used to test the correctness of this class. */ - @Ignore ("Test for unfinished feature") @Test + @Ignore("Test for unfinished feature") + @Test public void testRandomizedMatrix() { int rows = 100; int cols = 100; @@ -268,7 +275,7 @@ public void testRandomizedMatrix() { // Test that inverting a transformed matrix gives the original matrix. RegionPlacementMaintainer.RandomizedMatrix rm = - new RegionPlacementMaintainer.RandomizedMatrix(rows, cols); + new RegionPlacementMaintainer.RandomizedMatrix(rows, cols); float[][] transformed = rm.transform(matrix); float[][] invertedTransformed = rm.invert(transformed); for (int i = 0; i < rows; i++) { @@ -312,8 +319,7 @@ private FavoredNodesPlan shuffleAssignmentPlan(FavoredNodesPlan plan, Map regionToHRegion = rp.getRegionAssignmentSnapshot().getRegionNameToRegionInfoMap(); - for (Map.Entry> entry : - plan.getAssignmentMap().entrySet()) { + for (Map.Entry> entry : plan.getAssignmentMap().entrySet()) { // copy the server list from the original plan List shuffledServerList = new ArrayList<>(); @@ -330,21 +336,17 @@ private FavoredNodesPlan shuffleAssignmentPlan(FavoredNodesPlan plan, } /** - * To verify the region assignment status. - * It will check the assignment plan consistency between hbase:meta and - * region servers. - * Also it will verify weather the number of region movement and + * To verify the region assignment status. It will check the assignment plan consistency between + * hbase:meta and region servers. Also it will verify weather the number of region movement and * the number regions on the primary region server are expected - * * @param plan * @param regionMovementNum * @param numRegionsOnPrimaryRS * @throws InterruptedException * @throws IOException */ - private void verifyRegionAssignment(FavoredNodesPlan plan, - int regionMovementNum, int numRegionsOnPrimaryRS) - throws InterruptedException, IOException { + private void verifyRegionAssignment(FavoredNodesPlan plan, int regionMovementNum, + int numRegionsOnPrimaryRS) throws InterruptedException, IOException { // Verify the assignment plan in hbase:meta is consistent with the expected plan. verifyMETAUpdated(plan); @@ -364,18 +366,16 @@ private void verifyRegionAssignment(FavoredNodesPlan plan, * @param expectedPlan the region assignment plan * @throws IOException if an IO problem is encountered */ - private void verifyMETAUpdated(FavoredNodesPlan expectedPlan) - throws IOException { + private void verifyMETAUpdated(FavoredNodesPlan expectedPlan) throws IOException { FavoredNodesPlan planFromMETA = rp.getRegionAssignmentSnapshot().getExistingAssignmentPlan(); assertTrue("The assignment plan is NOT consistent with the expected plan ", - planFromMETA.equals(expectedPlan)); + planFromMETA.equals(expectedPlan)); } /** * Verify the number of region movement is expected */ - private void verifyRegionMovementNum(int expected) - throws InterruptedException, IOException { + private void verifyRegionMovementNum(int expected) throws InterruptedException, IOException { SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); HMaster m = cluster.getMaster(); int lastRegionOpenedCount = m.getAssignmentManager().getNumRegionsOpened(); @@ -388,36 +388,35 @@ private void verifyRegionMovementNum(int expected) int currentRegionOpened, regionMovement; do { currentRegionOpened = m.getAssignmentManager().getNumRegionsOpened(); - regionMovement= currentRegionOpened - lastRegionOpenedCount; - LOG.debug("There are " + regionMovement + "/" + expected + - " regions moved after " + attempt + " attempts"); + regionMovement = currentRegionOpened - lastRegionOpenedCount; + LOG.debug("There are " + regionMovement + "/" + expected + " regions moved after " + attempt + + " attempts"); Thread.sleep((++attempt) * sleep); } while (regionMovement != expected && attempt <= retry); // update the lastRegionOpenedCount lastRegionOpenedCount = currentRegionOpened; - assertEquals("There are only " + regionMovement + " instead of " - + expected + " region movement for " + attempt + " attempts", expected, regionMovement); + assertEquals("There are only " + regionMovement + " instead of " + expected + + " region movement for " + attempt + " attempts", + expected, regionMovement); } /** - * Verify the number of user regions is assigned to the primary - * region server based on the plan is expected + * Verify the number of user regions is assigned to the primary region server based on the plan is + * expected * @param expectedNum the expected number of assigned regions * @throws IOException */ - private void verifyRegionOnPrimaryRS(int expectedNum) - throws IOException { + private void verifyRegionOnPrimaryRS(int expectedNum) throws IOException { lastRegionOnPrimaryRSCount = getNumRegionisOnPrimaryRS(); - assertEquals("Only " + expectedNum + " of user regions running " + - "on the primary region server", expectedNum , - lastRegionOnPrimaryRSCount); + assertEquals( + "Only " + expectedNum + " of user regions running " + "on the primary region server", + expectedNum, lastRegionOnPrimaryRSCount); } /** - * Verify all the online region servers has been updated to the - * latest assignment plan + * Verify all the online region servers has been updated to the latest assignment plan * @param plan * @throws IOException */ @@ -426,9 +425,9 @@ private void verifyRegionServerUpdated(FavoredNodesPlan plan) throws IOException SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); for (int i = 0; i < SLAVES; i++) { HRegionServer rs = cluster.getRegionServer(i); - for (Region region: rs.getRegions(TableName.valueOf("testRegionAssignment"))) { - InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion( - region.getRegionInfo().getEncodedName()); + for (Region region : rs.getRegions(TableName.valueOf("testRegionAssignment"))) { + InetSocketAddress[] favoredSocketAddress = + rs.getFavoredNodesForRegion(region.getRegionInfo().getEncodedName()); String regionName = region.getRegionInfo().getRegionNameAsString(); List favoredServerList = plan.getAssignmentMap().get(regionName); @@ -438,9 +437,9 @@ private void verifyRegionServerUpdated(FavoredNodesPlan plan) throws IOException TableDescriptor desc = region.getTableDescriptor(); // Verify they are ROOT and hbase:meta regions since no favored nodes assertNull(favoredSocketAddress); - assertTrue("User region " + - region.getTableDescriptor().getTableName() + - " should have favored nodes", desc.isMetaRegion()); + assertTrue("User region " + region.getTableDescriptor().getTableName() + + " should have favored nodes", + desc.isMetaRegion()); } else { // For user region, the favored nodes in the region server should be // identical to favored nodes in the assignmentPlan @@ -449,15 +448,15 @@ private void verifyRegionServerUpdated(FavoredNodesPlan plan) throws IOException for (int j = 0; j < favoredServerList.size(); j++) { InetSocketAddress addrFromRS = favoredSocketAddress[j]; InetSocketAddress addrFromPlan = InetSocketAddress.createUnresolved( - favoredServerList.get(j).getHostname(), favoredServerList.get(j).getPort()); + favoredServerList.get(j).getHostname(), favoredServerList.get(j).getPort()); assertNotNull(addrFromRS); assertNotNull(addrFromPlan); - assertTrue("Region server " + rs.getServerName().getAddress() - + " has the " + positions[j] + - " for region " + region.getRegionInfo().getRegionNameAsString() + " is " + - addrFromRS + " which is inconsistent with the plan " - + addrFromPlan, addrFromRS.equals(addrFromPlan)); + assertTrue( + "Region server " + rs.getServerName().getAddress() + " has the " + positions[j] + + " for region " + region.getRegionInfo().getRegionNameAsString() + " is " + + addrFromRS + " which is inconsistent with the plan " + addrFromPlan, + addrFromRS.equals(addrFromPlan)); } } } @@ -465,10 +464,9 @@ private void verifyRegionServerUpdated(FavoredNodesPlan plan) throws IOException } /** - * Check whether regions are assigned to servers consistent with the explicit - * hints that are persisted in the hbase:meta table. - * Also keep track of the number of the regions are assigned to the - * primary region server. + * Check whether regions are assigned to servers consistent with the explicit hints that are + * persisted in the hbase:meta table. Also keep track of the number of the regions are assigned to + * the primary region server. * @return the number of regions are assigned to the primary region server * @throws IOException */ @@ -482,14 +480,13 @@ public boolean visit(Result result) throws IOException { try { @SuppressWarnings("deprecation") RegionInfo info = CatalogFamilyFormat.getRegionInfo(result); - if(info.getTable().getNamespaceAsString() + if (info.getTable().getNamespaceAsString() .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { return true; } - byte[] server = result.getValue(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER); + byte[] server = result.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); byte[] favoredNodes = result.getValue(HConstants.CATALOG_FAMILY, - FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER); + FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER); // Add the favored nodes into assignment plan ServerName[] favoredServerList = FavoredNodeAssignmentHelper.getFavoredNodesList(favoredNodes); @@ -499,8 +496,7 @@ public boolean visit(Result result) throws IOException { if (info != null) { totalRegionNum.incrementAndGet(); if (server != null) { - ServerName serverName = - ServerName.valueOf(Bytes.toString(server), -1); + ServerName serverName = ServerName.valueOf(Bytes.toString(server), -1); if (favoredNodes != null) { String placement = "[NOT FAVORED NODE]"; for (int i = 0; i < favoredServerList.length; i++) { @@ -512,15 +508,13 @@ public boolean visit(Result result) throws IOException { break; } } - LOG.info(info.getRegionNameAsString() + " on " + - serverName + " " + placement); + LOG.info(info.getRegionNameAsString() + " on " + serverName + " " + placement); } else { - LOG.info(info.getRegionNameAsString() + " running on " + - serverName + " but there is no favored region server"); + LOG.info(info.getRegionNameAsString() + " running on " + serverName + + " but there is no favored region server"); } } else { - LOG.info(info.getRegionNameAsString() + - " not assigned to any server"); + LOG.info(info.getRegionNameAsString() + " not assigned to any server"); } } return true; @@ -531,10 +525,9 @@ public boolean visit(Result result) throws IOException { } }; MetaTableAccessor.fullScanRegions(CONNECTION, visitor); - LOG.info("There are " + regionOnPrimaryNum.intValue() + " out of " + - totalRegionNum.intValue() + " regions running on the primary" + - " region servers" ); - return regionOnPrimaryNum.intValue() ; + LOG.info("There are " + regionOnPrimaryNum.intValue() + " out of " + totalRegionNum.intValue() + + " regions running on the primary" + " region servers"); + return regionOnPrimaryNum.intValue(); } /** @@ -543,8 +536,7 @@ public boolean visit(Result result) throws IOException { * @param regionNum number of regions to create * @throws IOException */ - private static void createTable(TableName tableName, int regionNum) - throws IOException { + private static void createTable(TableName tableName, int regionNum) throws IOException { int expectedRegions = regionNum; byte[][] splitKeys = new byte[expectedRegions - 1][]; for (int i = 1; i < expectedRegions; i++) { @@ -553,13 +545,14 @@ private static void createTable(TableName tableName, int regionNum) } TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); admin.createTable(tableDescriptor, splitKeys); try (RegionLocator r = CONNECTION.getRegionLocator(tableName)) { List regions = r.getAllRegionLocations(); - assertEquals("Tried to create " + expectedRegions + " regions " - + "but only found " + regions.size(), expectedRegions, regions.size()); + assertEquals( + "Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), + expectedRegions, regions.size()); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java index 320770787c4d..c4c20b1fcd3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestRegionPlacement2 { @ClassRule @@ -68,8 +68,8 @@ public class TestRegionPlacement2 { public static void setupBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // Enable the favored nodes based load balancer - conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - FavoredNodeLoadBalancer.class, LoadBalancer.class); + conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, FavoredNodeLoadBalancer.class, + LoadBalancer.class); conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); TEST_UTIL.startMiniCluster(SLAVES); } @@ -82,11 +82,11 @@ public static void tearDownAfterClass() throws Exception { @Test public void testFavoredNodesPresentForRoundRobinAssignment() throws IOException { FavoredNodeLoadBalancer balancer = - (FavoredNodeLoadBalancer) LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); + (FavoredNodeLoadBalancer) LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); balancer.setClusterInfoProvider( new MasterClusterInfoProvider(TEST_UTIL.getMiniHBaseCluster().getMaster())); - balancer - .setFavoredNodesManager(TEST_UTIL.getMiniHBaseCluster().getMaster().getFavoredNodesManager()); + balancer.setFavoredNodesManager( + TEST_UTIL.getMiniHBaseCluster().getMaster().getFavoredNodesManager()); balancer.initialize(); List servers = new ArrayList<>(); for (int i = 0; i < SLAVES; i++) { @@ -94,23 +94,24 @@ public void testFavoredNodesPresentForRoundRobinAssignment() throws IOException servers.add(server); } List regions = new ArrayList<>(1); - RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + RegionInfo region = + RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); regions.add(region); - Map> assignmentMap = balancer.roundRobinAssignment(regions, - servers); + Map> assignmentMap = + balancer.roundRobinAssignment(regions, servers); Set serverBefore = assignmentMap.keySet(); List favoredNodesBefore = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); + ((FavoredNodeLoadBalancer) balancer).getFavoredNodes(region); assertTrue(favoredNodesBefore.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); // the primary RS should be the one that the balancer's assignment returns - assertTrue(ServerName.isSameAddress(serverBefore.iterator().next(), - favoredNodesBefore.get(PRIMARY))); + assertTrue( + ServerName.isSameAddress(serverBefore.iterator().next(), favoredNodesBefore.get(PRIMARY))); // now remove the primary from the list of available servers List removedServers = removeMatchingServers(serverBefore, servers); // call roundRobinAssignment with the modified servers list assignmentMap = balancer.roundRobinAssignment(regions, servers); List favoredNodesAfter = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); + ((FavoredNodeLoadBalancer) balancer).getFavoredNodes(region); assertTrue(favoredNodesAfter.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); // We don't expect the favored nodes assignments to change in multiple calls // to the roundRobinAssignment method in the balancer (relevant for AssignmentManager.assign @@ -119,10 +120,10 @@ public void testFavoredNodesPresentForRoundRobinAssignment() throws IOException Set serverAfter = assignmentMap.keySet(); // We expect the new RegionServer assignee to be one of the favored nodes // chosen earlier. - assertTrue(ServerName.isSameAddress(serverAfter.iterator().next(), - favoredNodesBefore.get(SECONDARY)) || - ServerName.isSameAddress(serverAfter.iterator().next(), - favoredNodesBefore.get(TERTIARY))); + assertTrue( + ServerName.isSameAddress(serverAfter.iterator().next(), favoredNodesBefore.get(SECONDARY)) + || ServerName.isSameAddress(serverAfter.iterator().next(), + favoredNodesBefore.get(TERTIARY))); // put back the primary in the list of available servers servers.addAll(removedServers); @@ -136,22 +137,21 @@ public void testFavoredNodesPresentForRoundRobinAssignment() throws IOException removeMatchingServers(favoredNodesAfter, servers); // call roundRobinAssignment with the modified servers list assignmentMap = balancer.roundRobinAssignment(regions, servers); - List favoredNodesNow = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); + List favoredNodesNow = ((FavoredNodeLoadBalancer) balancer).getFavoredNodes(region); assertTrue(favoredNodesNow.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); - assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) && - !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) && - !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY))); + assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) + && !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) + && !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY))); } @Test public void testFavoredNodesPresentForRandomAssignment() throws IOException { FavoredNodeLoadBalancer balancer = - (FavoredNodeLoadBalancer) LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); + (FavoredNodeLoadBalancer) LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); balancer.setClusterInfoProvider( new MasterClusterInfoProvider(TEST_UTIL.getMiniHBaseCluster().getMaster())); - balancer - .setFavoredNodesManager(TEST_UTIL.getMiniHBaseCluster().getMaster().getFavoredNodesManager()); + balancer.setFavoredNodesManager( + TEST_UTIL.getMiniHBaseCluster().getMaster().getFavoredNodesManager()); balancer.initialize(); List servers = new ArrayList<>(); for (int i = 0; i < SLAVES; i++) { @@ -159,20 +159,21 @@ public void testFavoredNodesPresentForRandomAssignment() throws IOException { servers.add(server); } List regions = new ArrayList<>(1); - RegionInfo region = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + RegionInfo region = + RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); regions.add(region); ServerName serverBefore = balancer.randomAssignment(region, servers); List favoredNodesBefore = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); + ((FavoredNodeLoadBalancer) balancer).getFavoredNodes(region); assertTrue(favoredNodesBefore.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); // the primary RS should be the one that the balancer's assignment returns - assertTrue(ServerName.isSameAddress(serverBefore,favoredNodesBefore.get(PRIMARY))); + assertTrue(ServerName.isSameAddress(serverBefore, favoredNodesBefore.get(PRIMARY))); // now remove the primary from the list of servers removeMatchingServers(serverBefore, servers); // call randomAssignment with the modified servers list ServerName serverAfter = balancer.randomAssignment(region, servers); List favoredNodesAfter = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); + ((FavoredNodeLoadBalancer) balancer).getFavoredNodes(region); assertTrue(favoredNodesAfter.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); // We don't expect the favored nodes assignments to change in multiple calls // to the randomAssignment method in the balancer (relevant for AssignmentManager.assign @@ -180,18 +181,17 @@ public void testFavoredNodesPresentForRandomAssignment() throws IOException { assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore)); // We expect the new RegionServer assignee to be one of the favored nodes // chosen earlier. - assertTrue(ServerName.isSameAddress(serverAfter, favoredNodesBefore.get(SECONDARY)) || - ServerName.isSameAddress(serverAfter, favoredNodesBefore.get(TERTIARY))); + assertTrue(ServerName.isSameAddress(serverAfter, favoredNodesBefore.get(SECONDARY)) + || ServerName.isSameAddress(serverAfter, favoredNodesBefore.get(TERTIARY))); // Make all the favored nodes unavailable for assignment removeMatchingServers(favoredNodesAfter, servers); // call randomAssignment with the modified servers list balancer.randomAssignment(region, servers); - List favoredNodesNow = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); + List favoredNodesNow = ((FavoredNodeLoadBalancer) balancer).getFavoredNodes(region); assertTrue(favoredNodesNow.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); - assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) && - !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) && - !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY))); + assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) + && !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) + && !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY))); } private List removeMatchingServers(Collection serversWithoutStartCode, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlansWithThrottle.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlansWithThrottle.java index 96b419b36372..780eba4cf24f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlansWithThrottle.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlansWithThrottle.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import java.util.ArrayList; @@ -40,12 +39,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestRegionPlansWithThrottle { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionPlansWithThrottle.class); + HBaseClassTestRule.forClass(TestRegionPlansWithThrottle.class); private static HMaster hMaster; @@ -67,7 +66,7 @@ public void testExecuteRegionPlansWithThrottling() throws Exception { final TableName tableName = TableName.valueOf("testExecuteRegionPlansWithThrottling"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))).build(); UTIL.getAdmin().createTable(tableDescriptor); Table table = UTIL.getConnection().getTable(tableName); @@ -87,9 +86,9 @@ public void testExecuteRegionPlansWithThrottling() throws Exception { List plans = new ArrayList<>(); List regionInfos = UTIL.getAdmin().getRegions(tableName); for (RegionInfo regionInfo : regionInfos) { - plans.add( - new RegionPlan(regionInfo, UTIL.getHBaseCluster().getRegionServer(0).getServerName(), - UTIL.getHBaseCluster().getRegionServer(1).getServerName())); + plans + .add(new RegionPlan(regionInfo, UTIL.getHBaseCluster().getRegionServer(0).getServerName(), + UTIL.getHBaseCluster().getRegionServer(1).getServerName())); } List successPlans = hMaster.executeRegionPlansWithThrottling(plans); Assert.assertEquals(regionInfos.size(), successPlans.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionState.java index e686610ba23f..518d040f362d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionState { @ClassRule @@ -45,14 +45,14 @@ public class TestRegionState { @Test public void testSerializeDeserialize() { final TableName tableName = TableName.valueOf("testtb"); - for (RegionState.State state: RegionState.State.values()) { + for (RegionState.State state : RegionState.State.values()) { testSerializeDeserialize(tableName, state); } } private void testSerializeDeserialize(final TableName tableName, final RegionState.State state) { RegionState state1 = - RegionState.createForTesting(RegionInfoBuilder.newBuilder(tableName).build(), state); + RegionState.createForTesting(RegionInfoBuilder.newBuilder(tableName).build(), state); ClusterStatusProtos.RegionState protobuf1 = state1.convert(); RegionState state2 = RegionState.convert(protobuf1); ClusterStatusProtos.RegionState protobuf2 = state1.convert(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java index c3955f7d2915..89989e90c984 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import edu.umd.cs.findbugs.annotations.Nullable; @@ -25,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -59,12 +57,12 @@ /** * Test for RegionsRecoveryChore */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionsRecoveryChore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionsRecoveryChore.class); + HBaseClassTestRule.forClass(TestRegionsRecoveryChore.class); private static final Logger LOG = LoggerFactory.getLogger(TestRegionsRecoveryChore.class); @@ -79,19 +77,18 @@ public class TestRegionsRecoveryChore { private RegionsRecoveryChore regionsRecoveryChore; private static int regionNo; - public static final byte[][] REGION_NAME_LIST = new byte[][]{ - new byte[]{114, 101, 103, 105, 111, 110, 50, 49, 95, 51}, - new byte[]{114, 101, 103, 105, 111, 110, 50, 53, 95, 51}, - new byte[]{114, 101, 103, 105, 111, 110, 50, 54, 95, 52}, - new byte[]{114, 101, 103, 105, 111, 110, 51, 50, 95, 53}, - new byte[]{114, 101, 103, 105, 111, 110, 51, 49, 95, 52}, - new byte[]{114, 101, 103, 105, 111, 110, 51, 48, 95, 51}, - new byte[]{114, 101, 103, 105, 111, 110, 50, 48, 95, 50}, - new byte[]{114, 101, 103, 105, 111, 110, 50, 52, 95, 50}, - new byte[]{114, 101, 103, 105, 111, 110, 50, 57, 95, 50}, - new byte[]{114, 101, 103, 105, 111, 110, 51, 53, 95, 50}, - new byte[]{114, 101, 103, 105, 111, 110, 49, 48, 56, 95, 49, 49} - }; + public static final byte[][] REGION_NAME_LIST = + new byte[][] { new byte[] { 114, 101, 103, 105, 111, 110, 50, 49, 95, 51 }, + new byte[] { 114, 101, 103, 105, 111, 110, 50, 53, 95, 51 }, + new byte[] { 114, 101, 103, 105, 111, 110, 50, 54, 95, 52 }, + new byte[] { 114, 101, 103, 105, 111, 110, 51, 50, 95, 53 }, + new byte[] { 114, 101, 103, 105, 111, 110, 51, 49, 95, 52 }, + new byte[] { 114, 101, 103, 105, 111, 110, 51, 48, 95, 51 }, + new byte[] { 114, 101, 103, 105, 111, 110, 50, 48, 95, 50 }, + new byte[] { 114, 101, 103, 105, 111, 110, 50, 52, 95, 50 }, + new byte[] { 114, 101, 103, 105, 111, 110, 50, 57, 95, 50 }, + new byte[] { 114, 101, 103, 105, 111, 110, 51, 53, 95, 50 }, + new byte[] { 114, 101, 103, 105, 111, 110, 49, 48, 56, 95, 49, 49 } }; private Configuration getCustomConf() { Configuration conf = HBASE_TESTING_UTILITY.getConfiguration(); @@ -115,21 +112,20 @@ public void tearDown() throws Exception { public void testRegionReopensWithStoreRefConfig() throws Exception { regionNo = 0; ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(4); - final Map serverMetricsMap = - clusterMetrics.getLiveServerMetrics(); + final Map serverMetricsMap = clusterMetrics.getLiveServerMetrics(); LOG.debug("All Region Names with refCount...."); for (ServerMetrics serverMetrics : serverMetricsMap.values()) { Map regionMetricsMap = serverMetrics.getRegionMetrics(); for (RegionMetrics regionMetrics : regionMetricsMap.values()) { - LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + - regionMetrics.getStoreRefCount()); + LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + + regionMetrics.getStoreRefCount()); } } Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics); Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager); for (byte[] regionName : REGION_NAME_LIST) { Mockito.when(assignmentManager.getRegionInfo(regionName)) - .thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName)); + .thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName)); } Stoppable stoppable = new StoppableImplementation(); Configuration configuration = getCustomConf(); @@ -144,29 +140,27 @@ public void testRegionReopensWithStoreRefConfig() throws Exception { // Verify that we need to reopen total 3 regions that have refCount > 300 Mockito.verify(hMaster, Mockito.times(3)).getAssignmentManager(); - Mockito.verify(assignmentManager, Mockito.times(3)) - .getRegionInfo(Mockito.any()); + Mockito.verify(assignmentManager, Mockito.times(3)).getRegionInfo(Mockito.any()); } @Test public void testRegionReopensWithLessThreshold() throws Exception { regionNo = 0; ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(4); - final Map serverMetricsMap = - clusterMetrics.getLiveServerMetrics(); + final Map serverMetricsMap = clusterMetrics.getLiveServerMetrics(); LOG.debug("All Region Names with refCount...."); for (ServerMetrics serverMetrics : serverMetricsMap.values()) { Map regionMetricsMap = serverMetrics.getRegionMetrics(); for (RegionMetrics regionMetrics : regionMetricsMap.values()) { - LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + - regionMetrics.getStoreRefCount()); + LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + + regionMetrics.getStoreRefCount()); } } Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics); Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager); for (byte[] regionName : REGION_NAME_LIST) { Mockito.when(assignmentManager.getRegionInfo(regionName)) - .thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName)); + .thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName)); } Stoppable stoppable = new StoppableImplementation(); Configuration configuration = getCustomConf(); @@ -181,29 +175,27 @@ public void testRegionReopensWithLessThreshold() throws Exception { // Verify that we need to reopen only 1 region with refCount > 400 Mockito.verify(hMaster, Mockito.times(1)).getAssignmentManager(); - Mockito.verify(assignmentManager, Mockito.times(1)) - .getRegionInfo(Mockito.any()); + Mockito.verify(assignmentManager, Mockito.times(1)).getRegionInfo(Mockito.any()); } @Test public void testRegionReopensWithoutStoreRefConfig() throws Exception { regionNo = 0; ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(10); - final Map serverMetricsMap = - clusterMetrics.getLiveServerMetrics(); + final Map serverMetricsMap = clusterMetrics.getLiveServerMetrics(); LOG.debug("All Region Names with refCount...."); for (ServerMetrics serverMetrics : serverMetricsMap.values()) { Map regionMetricsMap = serverMetrics.getRegionMetrics(); for (RegionMetrics regionMetrics : regionMetricsMap.values()) { - LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + - regionMetrics.getStoreRefCount()); + LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + + regionMetrics.getStoreRefCount()); } } Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics); Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager); for (byte[] regionName : REGION_NAME_LIST) { Mockito.when(assignmentManager.getRegionInfo(regionName)) - .thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName)); + .thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName)); } Stoppable stoppable = new StoppableImplementation(); Configuration configuration = getCustomConf(); @@ -218,8 +210,7 @@ public void testRegionReopensWithoutStoreRefConfig() throws Exception { // default maxCompactedStoreFileRefCount is -1 (no regions to be reopened using AM) Mockito.verify(hMaster, Mockito.times(0)).getAssignmentManager(); - Mockito.verify(assignmentManager, Mockito.times(0)) - .getRegionInfo(Mockito.any()); + Mockito.verify(assignmentManager, Mockito.times(0)).getRegionInfo(Mockito.any()); } private static ClusterMetrics getClusterMetrics(int noOfLiveServer) { @@ -374,7 +365,8 @@ public Map getRegionMetrics() { return regionMetricsMap; } - @Override public Map getUserMetrics() { + @Override + public Map getUserMetrics() { return new HashMap<>(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java index 4bcc97feacc4..1b78dc210424 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryConfigManager.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import static org.junit.Assert.assertFalse; @@ -43,7 +42,7 @@ public class TestRegionsRecoveryConfigManager { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionsRecoveryConfigManager.class); + HBaseClassTestRule.forClass(TestRegionsRecoveryConfigManager.class); private static final HBaseTestingUtil HBASE_TESTING_UTILITY = new HBaseTestingUtil(); @@ -61,7 +60,7 @@ public void setup() throws Exception { conf.unset("hbase.regions.recovery.store.file.ref.count"); conf.unset("hbase.master.regions.recovery.check.interval"); StartTestingClusterOption option = StartTestingClusterOption.builder() - .masterClass(TestHMaster.class).numRegionServers(1).numDataNodes(1).build(); + .masterClass(TestHMaster.class).numRegionServers(1).numDataNodes(1).build(); HBASE_TESTING_UTILITY.startMiniCluster(option); cluster = HBASE_TESTING_UTILITY.getMiniHBaseCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java index c40317a4a923..34232c79be1d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -89,7 +89,8 @@ public void testRestart() throws IOException, InterruptedException { // let's cleanup the WAL directory UTIL.getTestFileSystem().delete(new Path(CommonFSUtils.getWALRootDir(UTIL.getConfiguration()), - HConstants.HREGION_LOGDIR_NAME), true); + HConstants.HREGION_LOGDIR_NAME), + true); // restart the cluster UTIL.getMiniHBaseCluster().startMaster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java index db648d3a8459..0f82d29b1d22 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestart.java @@ -235,7 +235,7 @@ private void setupCluster() throws Exception, IOException, InterruptedException // Enable retain assignment during ServerCrashProcedure UTIL.getConfiguration().setBoolean(ServerCrashProcedure.MASTER_SCP_RETAIN_ASSIGNMENT, true); UTIL.startMiniCluster(StartTestingClusterOption.builder().masterClass(HMasterForTest.class) - .numRegionServers(NUM_OF_RS).build()); + .numRegionServers(NUM_OF_RS).build()); // Turn off balancer UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().synchronousBalanceSwitch(false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java index 2cbb2c7e2f63..bb43b9c8aead 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRetainAssignmentOnRestartSplitWithoutZk.java @@ -24,8 +24,7 @@ import org.junit.experimental.categories.Category; @Category({ MasterTests.class, MediumTests.class }) -public class TestRetainAssignmentOnRestartSplitWithoutZk - extends TestRetainAssignmentOnRestart { +public class TestRetainAssignmentOnRestartSplitWithoutZk extends TestRetainAssignmentOnRestart { @ClassRule public static final HBaseClassTestRule CLASS_RULE = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java index 878bb4f0b797..93753baccc28 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ * Tests the restarting of everything as done during rolling restarts. */ @RunWith(Parameterized.class) -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestRollingRestart { @ClassRule @@ -89,11 +89,10 @@ public void testBasicRollingRestart() throws Exception { // Start the cluster log("Starting cluster"); Configuration conf = HBaseConfiguration.create(); - conf.setBoolean(HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK, - splitWALCoordinatedByZK); + conf.setBoolean(HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK, splitWALCoordinatedByZK); TEST_UTIL = new HBaseTestingUtil(conf); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS) + .numRegionServers(NUM_RS).numDataNodes(NUM_RS).build(); TEST_UTIL.startMiniCluster(option); SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); log("Waiting for active/ready master"); @@ -102,7 +101,7 @@ public void testBasicRollingRestart() throws Exception { // Create a table with regions final TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[\\[|\\]]", "-")); - byte [] family = Bytes.toBytes("family"); + byte[] family = Bytes.toBytes("family"); log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions"); Table ht = TEST_UTIL.createMultiRegionTable(tableName, family, NUM_REGIONS_TO_CREATE); int numRegions = -1; @@ -182,14 +181,12 @@ public void testBasicRollingRestart() throws Exception { // RegionServer Restarts // Bring them down, one at a time, waiting between each to complete - List regionServers = - cluster.getLiveRegionServerThreads(); + List regionServers = cluster.getLiveRegionServerThreads(); int num = 1; int total = regionServers.size(); for (RegionServerThread rst : regionServers) { ServerName serverName = rst.getRegionServer().getServerName(); - log("Stopping region server " + num + " of " + total + " [ " + - serverName + "]"); + log("Stopping region server " + num + " of " + total + " [ " + serverName + "]"); rst.getRegionServer().stop("Stopping RS during rolling restart"); cluster.hbaseCluster.waitOnRegionServer(rst); log("Waiting for RS shutdown to be handled by master"); @@ -232,16 +229,16 @@ private boolean isDeadServerSCPExecuted(ServerName serverName) throws IOExceptio && ((ServerCrashProcedure) p).getServerName().equals(serverName)); } - private void waitForRSShutdownToStartAndFinish(MasterThread activeMaster, - ServerName serverName) throws InterruptedException, IOException { + private void waitForRSShutdownToStartAndFinish(MasterThread activeMaster, ServerName serverName) + throws InterruptedException, IOException { ServerManager sm = activeMaster.getMaster().getServerManager(); // First wait for it to be in dead list while (!sm.getDeadServers().isDeadServer(serverName)) { log("Waiting for [" + serverName + "] to be listed as dead in master"); Thread.sleep(1); } - log("Server [" + serverName + "] marked as dead, waiting for it to " + - "finish dead processing"); + log( + "Server [" + serverName + "] marked as dead, waiting for it to " + "finish dead processing"); TEST_UTIL.waitFor(60000, () -> isDeadServerSCPExecuted(serverName)); @@ -264,14 +261,12 @@ private int getNumberOfOnlineRegions(SingleProcessHBaseCluster cluster) { return numFound; } - private void assertRegionsAssigned(SingleProcessHBaseCluster cluster, - Set expectedRegions) throws IOException { + private void assertRegionsAssigned(SingleProcessHBaseCluster cluster, Set expectedRegions) + throws IOException { int numFound = getNumberOfOnlineRegions(cluster); if (expectedRegions.size() > numFound) { - log("Expected to find " + expectedRegions.size() + " but only found" - + " " + numFound); - NavigableSet foundRegions = - HBaseTestingUtil.getAllOnlineRegions(cluster); + log("Expected to find " + expectedRegions.size() + " but only found" + " " + numFound); + NavigableSet foundRegions = HBaseTestingUtil.getAllOnlineRegions(cluster); for (String region : expectedRegions) { if (!foundRegions.contains(region)) { log("Missing region: " + region); @@ -280,8 +275,8 @@ private void assertRegionsAssigned(SingleProcessHBaseCluster cluster, assertEquals(expectedRegions.size(), numFound); } else if (expectedRegions.size() < numFound) { int doubled = numFound - expectedRegions.size(); - log("Expected to find " + expectedRegions.size() + " but found" - + " " + numFound + " (" + doubled + " double assignments?)"); + log("Expected to find " + expectedRegions.size() + " but found" + " " + numFound + " (" + + doubled + " double assignments?)"); NavigableSet doubleRegions = getDoubleAssignedRegions(cluster); for (String region : doubleRegions) { log("Region is double assigned: " + region); @@ -292,14 +287,14 @@ private void assertRegionsAssigned(SingleProcessHBaseCluster cluster, } } - private NavigableSet getDoubleAssignedRegions( - SingleProcessHBaseCluster cluster) throws IOException { + private NavigableSet getDoubleAssignedRegions(SingleProcessHBaseCluster cluster) + throws IOException { NavigableSet online = new TreeSet<>(); NavigableSet doubled = new TreeSet<>(); for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) { - for (RegionInfo region : ProtobufUtil.getOnlineRegions( - rst.getRegionServer().getRSRpcServices())) { - if(!online.add(region.getRegionNameAsString())) { + for (RegionInfo region : ProtobufUtil + .getOnlineRegions(rst.getRegionServer().getRSRpcServices())) { + if (!online.add(region.getRegionNameAsString())) { doubled.add(region.getRegionNameAsString()); } } @@ -307,10 +302,8 @@ private NavigableSet getDoubleAssignedRegions( return doubled; } - @Parameterized.Parameters public static Collection coordinatedByZK() { return Arrays.asList(false, true); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRoundRobinAssignmentOnRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRoundRobinAssignmentOnRestart.java index 2b63e13e7de7..d281c1be1f2a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRoundRobinAssignmentOnRestart.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRoundRobinAssignmentOnRestart.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; @@ -112,6 +111,6 @@ public void test() throws Exception { cluster.getMaster().getAssignmentManager().getRegionsOnServer(newTestServer); LOG.debug("RegionServer {} has {} regions", newTestServer, newRegionInfos.size()); assertTrue("Should not retain all regions when restart", - newRegionInfos.size() < regionInfos.size()); + newRegionInfos.size() < regionInfos.size()); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRoundRobinAssignmentOnRestartSplitWithoutZk.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRoundRobinAssignmentOnRestartSplitWithoutZk.java index d16f36f9e577..d85ada743425 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRoundRobinAssignmentOnRestartSplitWithoutZk.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRoundRobinAssignmentOnRestartSplitWithoutZk.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java index ed50c883a764..bb91e9fbac69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ public class TestServerCrashProcedureCarryingMetaStuck { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestServerCrashProcedureCarryingMetaStuck.class); + HBaseClassTestRule.forClass(TestServerCrashProcedureCarryingMetaStuck.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -77,16 +77,16 @@ public void test() throws Exception { long procId = master.getMasterProcedureExecutor().submitProcedure(proc); proc.waitUntilArrive(); try (AsyncConnection conn = - ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) { + ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) { AsyncAdmin admin = conn.getAdmin(); CompletableFuture future = admin.move(hri.getRegionName()); rs.abort("For testing!"); UTIL.waitFor(30000, () -> executor.getProcedures().stream() - .filter(p -> p instanceof TransitRegionStateProcedure) - .map(p -> (TransitRegionStateProcedure) p) - .anyMatch(p -> Bytes.equals(hri.getRegionName(), p.getRegion().getRegionName()))); + .filter(p -> p instanceof TransitRegionStateProcedure) + .map(p -> (TransitRegionStateProcedure) p) + .anyMatch(p -> Bytes.equals(hri.getRegionName(), p.getRegion().getRegionName()))); proc.resume(); UTIL.waitFor(30000, () -> executor.isFinished(procId)); // see whether the move region procedure can finish properly diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureStuck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureStuck.java index 79bb53ec6e49..68355f33c76c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureStuck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureStuck.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ public class TestServerCrashProcedureStuck { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestServerCrashProcedureStuck.class); + HBaseClassTestRule.forClass(TestServerCrashProcedureStuck.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -86,16 +86,16 @@ public void test() throws Exception { long procId = master.getMasterProcedureExecutor().submitProcedure(proc); proc.waitUntilArrive(); try (AsyncConnection conn = - ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) { + ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) { AsyncAdmin admin = conn.getAdmin(); CompletableFuture future = admin.move(hri.getRegionName()); rs.abort("For testing!"); UTIL.waitFor(30000, () -> executor.getProcedures().stream() - .filter(p -> p instanceof TransitRegionStateProcedure) - .map(p -> (TransitRegionStateProcedure) p) - .anyMatch(p -> Bytes.equals(hri.getRegionName(), p.getRegion().getRegionName()))); + .filter(p -> p instanceof TransitRegionStateProcedure) + .map(p -> (TransitRegionStateProcedure) p) + .anyMatch(p -> Bytes.equals(hri.getRegionName(), p.getRegion().getRegionName()))); proc.resume(); UTIL.waitFor(30000, () -> executor.isFinished(procId)); // see whether the move region procedure can finish properly diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestShutdownBackupMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestShutdownBackupMaster.java index c776cb268e8d..a31624604193 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestShutdownBackupMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestShutdownBackupMaster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,8 +72,8 @@ protected void initClusterSchemaService() throws IOException, InterruptedExcepti @BeforeClass public static void setUpBeforeClass() throws Exception { UTIL.getConfiguration().setClass(HConstants.MASTER_IMPL, MockHMaster.class, HMaster.class); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numMasters(2).numRegionServers(2).numDataNodes(2).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(2) + .numRegionServers(2).numDataNodes(2).build(); UTIL.startMiniCluster(option); UTIL.waitUntilAllSystemRegionsAssigned(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestShutdownWithNoRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestShutdownWithNoRegionServer.java index f3a4cfff4e3f..21ed71dd4108 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestShutdownWithNoRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestShutdownWithNoRegionServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestShutdownWithNoRegionServer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestShutdownWithNoRegionServer.class); + HBaseClassTestRule.forClass(TestShutdownWithNoRegionServer.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java index eaff93796583..84dfc710bfef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java @@ -72,7 +72,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestSplitLogManager { @ClassRule @@ -204,8 +204,8 @@ private Task findOrCreateOrphanTask(String path) { }); } - private String submitTaskAndWait(TaskBatch batch, String name) throws KeeperException, - InterruptedException { + private String submitTaskAndWait(TaskBatch batch, String name) + throws KeeperException, InterruptedException { String tasknode = ZKSplitLog.getEncodedNodeName(zkw, name); NodeCreationListener listener = new NodeCreationListener(zkw, tasknode); zkw.registerListener(listener); @@ -247,36 +247,34 @@ public void testOrphanTaskAcquisition() throws Exception { String tasknode = ZKSplitLog.getEncodedNodeName(zkw, "orphan/test/slash"); SplitLogTask slt = new SplitLogTask.Owned(master.getServerName()); zkw.getRecoverableZooKeeper().create(tasknode, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, - CreateMode.PERSISTENT); + CreateMode.PERSISTENT); slm = new SplitLogManager(master, conf); - waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, to/2); + waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, to / 2); Task task = findOrCreateOrphanTask(tasknode); assertTrue(task.isOrphan()); - waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); + waitForCounter(tot_mgr_heartbeat, 0, 1, to / 2); assertFalse(task.isUnassigned()); long curt = EnvironmentEdgeManager.currentTime(); - assertTrue((task.last_update <= curt) && - (task.last_update > (curt - 1000))); + assertTrue((task.last_update <= curt) && (task.last_update > (curt - 1000))); LOG.info("waiting for manager to resubmit the orphan task"); - waitForCounter(tot_mgr_resubmit, 0, 1, to + to/2); + waitForCounter(tot_mgr_resubmit, 0, 1, to + to / 2); assertTrue(task.isUnassigned()); - waitForCounter(tot_mgr_rescan, 0, 1, to + to/2); + waitForCounter(tot_mgr_rescan, 0, 1, to + to / 2); } @Test public void testUnassignedOrphan() throws Exception { - LOG.info("TestUnassignedOrphan - an unassigned task is resubmitted at" + - " startup"); + LOG.info("TestUnassignedOrphan - an unassigned task is resubmitted at" + " startup"); String tasknode = ZKSplitLog.getEncodedNodeName(zkw, "orphan/test/slash"); - //create an unassigned orphan task + // create an unassigned orphan task SplitLogTask slt = new SplitLogTask.Unassigned(master.getServerName()); zkw.getRecoverableZooKeeper().create(tasknode, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, - CreateMode.PERSISTENT); + CreateMode.PERSISTENT); int version = ZKUtil.checkExists(zkw, tasknode); slm = new SplitLogManager(master, conf); - waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, to/2); + waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, to / 2); Task task = findOrCreateOrphanTask(tasknode); assertTrue(task.isOrphan()); assertTrue(task.isUnassigned()); @@ -307,21 +305,21 @@ public void testMultipleResubmits() throws Exception { final ServerName worker3 = ServerName.valueOf("worker3,1,1"); SplitLogTask slt = new SplitLogTask.Owned(worker1); ZKUtil.setData(zkw, tasknode, slt.toByteArray()); - waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); - waitForCounter(tot_mgr_resubmit, 0, 1, to + to/2); + waitForCounter(tot_mgr_heartbeat, 0, 1, to / 2); + waitForCounter(tot_mgr_resubmit, 0, 1, to + to / 2); int version1 = ZKUtil.checkExists(zkw, tasknode); assertTrue(version1 > version); slt = new SplitLogTask.Owned(worker2); ZKUtil.setData(zkw, tasknode, slt.toByteArray()); - waitForCounter(tot_mgr_heartbeat, 1, 2, to/2); - waitForCounter(tot_mgr_resubmit, 1, 2, to + to/2); + waitForCounter(tot_mgr_heartbeat, 1, 2, to / 2); + waitForCounter(tot_mgr_resubmit, 1, 2, to + to / 2); int version2 = ZKUtil.checkExists(zkw, tasknode); assertTrue(version2 > version1); slt = new SplitLogTask.Owned(worker3); ZKUtil.setData(zkw, tasknode, slt.toByteArray()); - waitForCounter(tot_mgr_heartbeat, 2, 3, to/2); - waitForCounter(tot_mgr_resubmit_threshold_reached, 0, 1, to + to/2); - Thread.sleep(to + to/2); + waitForCounter(tot_mgr_heartbeat, 2, 3, to / 2); + waitForCounter(tot_mgr_resubmit_threshold_reached, 0, 1, to + to / 2); + Thread.sleep(to + to / 2); assertEquals(2L, tot_mgr_resubmit.sum() - tot_mgr_resubmit_force.sum()); } @@ -337,22 +335,22 @@ public void testRescanCleanup() throws Exception { final ServerName worker1 = ServerName.valueOf("worker1,1,1"); SplitLogTask slt = new SplitLogTask.Owned(worker1); ZKUtil.setData(zkw, tasknode, slt.toByteArray()); - waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); + waitForCounter(tot_mgr_heartbeat, 0, 1, to / 2); waitForCounter(new Expr() { @Override public long eval() { return (tot_mgr_resubmit.sum() + tot_mgr_resubmit_failed.sum()); } - }, 0, 1, 5*60000); // wait long enough - Assert.assertEquals("Could not run test. Lost ZK connection?", - 0, tot_mgr_resubmit_failed.sum()); + }, 0, 1, 5 * 60000); // wait long enough + Assert.assertEquals("Could not run test. Lost ZK connection?", 0, + tot_mgr_resubmit_failed.sum()); int version1 = ZKUtil.checkExists(zkw, tasknode); assertTrue(version1 > version); byte[] taskstate = ZKUtil.getData(zkw, tasknode); slt = SplitLogTask.parseFrom(taskstate); assertTrue(slt.isUnassigned(master.getServerName())); - waitForCounter(tot_mgr_rescan_deleted, 0, 1, to/2); + waitForCounter(tot_mgr_rescan_deleted, 0, 1, to / 2); } @Test @@ -370,7 +368,7 @@ public void testTaskDone() throws Exception { batch.wait(); } } - waitForCounter(tot_mgr_task_deleted, 0, 1, to/2); + waitForCounter(tot_mgr_task_deleted, 0, 1, to / 2); assertTrue(ZKUtil.checkExists(zkw, tasknode) == -1); } @@ -392,7 +390,7 @@ public void testTaskErr() throws Exception { batch.wait(); } } - waitForCounter(tot_mgr_task_deleted, 0, 1, to/2); + waitForCounter(tot_mgr_task_deleted, 0, 1, to / 2); assertTrue(ZKUtil.checkExists(zkw, tasknode) == -1); conf.setInt("hbase.splitlog.max.resubmit", ZKSplitLogManagerCoordination.DEFAULT_MAX_RESUBMIT); } @@ -414,7 +412,7 @@ public void testTaskResigned() throws Exception { ZKUtil.checkExists(zkw, tasknode); // Could be small race here. if (tot_mgr_resubmit.sum() == 0) { - waitForCounter(tot_mgr_resubmit, 0, 1, to/2); + waitForCounter(tot_mgr_resubmit, 0, 1, to / 2); } assertEquals(1, tot_mgr_resubmit.sum()); @@ -425,25 +423,24 @@ public void testTaskResigned() throws Exception { @Test public void testUnassignedTimeout() throws Exception { - LOG.info("TestUnassignedTimeout - iff all tasks are unassigned then" + - " resubmit"); + LOG.info("TestUnassignedTimeout - iff all tasks are unassigned then" + " resubmit"); // create an orphan task in OWNED state String tasknode1 = ZKSplitLog.getEncodedNodeName(zkw, "orphan/1"); final ServerName worker1 = ServerName.valueOf("worker1,1,1"); SplitLogTask slt = new SplitLogTask.Owned(worker1); zkw.getRecoverableZooKeeper().create(tasknode1, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, - CreateMode.PERSISTENT); + CreateMode.PERSISTENT); slm = new SplitLogManager(master, conf); - waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, to/2); + waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, to / 2); // submit another task which will stay in unassigned mode TaskBatch batch = new TaskBatch(); submitTaskAndWait(batch, "foo/1"); // keep updating the orphan owned node every to/2 seconds - for (int i = 0; i < (3 * to)/100; i++) { + for (int i = 0; i < (3 * to) / 100; i++) { Thread.sleep(100); final ServerName worker2 = ServerName.valueOf("worker1,1,1"); slt = new SplitLogTask.Owned(worker2); @@ -453,10 +450,10 @@ public void testUnassignedTimeout() throws Exception { // since we have stopped heartbeating the owned node therefore it should // get resubmitted LOG.info("waiting for manager to resubmit the orphan task"); - waitForCounter(tot_mgr_resubmit, 0, 1, to + to/2); + waitForCounter(tot_mgr_resubmit, 0, 1, to + to / 2); // now all the nodes are unassigned. manager should post another rescan - waitForCounter(tot_mgr_resubmit_unassigned, 0, 1, 2 * to + to/2); + waitForCounter(tot_mgr_resubmit_unassigned, 0, 1, 2 * to + to / 2); } @Test @@ -473,14 +470,14 @@ public void testDeadWorker() throws Exception { SplitLogTask slt = new SplitLogTask.Owned(worker1); ZKUtil.setData(zkw, tasknode, slt.toByteArray()); if (tot_mgr_heartbeat.sum() == 0) { - waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); + waitForCounter(tot_mgr_heartbeat, 0, 1, to / 2); } slm.handleDeadWorker(worker1); if (tot_mgr_resubmit.sum() == 0) { - waitForCounter(tot_mgr_resubmit, 0, 1, to+to/2); + waitForCounter(tot_mgr_resubmit, 0, 1, to + to / 2); } if (tot_mgr_resubmit_dead_server_task.sum() == 0) { - waitForCounter(tot_mgr_resubmit_dead_server_task, 0, 1, to + to/2); + waitForCounter(tot_mgr_resubmit_dead_server_task, 0, 1, to + to / 2); } int version1 = ZKUtil.checkExists(zkw, tasknode); @@ -502,7 +499,7 @@ public void testWorkerCrash() throws Exception { SplitLogTask slt = new SplitLogTask.Owned(worker1); ZKUtil.setData(zkw, tasknode, slt.toByteArray()); if (tot_mgr_heartbeat.sum() == 0) { - waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); + waitForCounter(tot_mgr_heartbeat, 0, 1, to / 2); } // Not yet resubmitted. @@ -522,9 +519,9 @@ public void testEmptyLogDir() throws Exception { LOG.info("testEmptyLogDir"); slm = new SplitLogManager(master, conf); FileSystem fs = TEST_UTIL.getTestFileSystem(); - Path emptyLogDirPath = new Path(new Path(fs.getWorkingDirectory(), - HConstants.HREGION_LOGDIR_NAME), - ServerName.valueOf("emptyLogDir", 1, 1).toString()); + Path emptyLogDirPath = + new Path(new Path(fs.getWorkingDirectory(), HConstants.HREGION_LOGDIR_NAME), + ServerName.valueOf("emptyLogDir", 1, 1).toString()); fs.mkdirs(emptyLogDirPath); slm.splitLogDistributed(emptyLogDirPath); assertFalse(fs.exists(emptyLogDirPath)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitRegionWhileRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitRegionWhileRSCrash.java index 22ba74c30adf..e3e879bdbcd9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitRegionWhileRSCrash.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitRegionWhileRSCrash.java @@ -45,12 +45,12 @@ import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestSplitRegionWhileRSCrash { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitRegionWhileRSCrash.class); + HBaseClassTestRule.forClass(TestSplitRegionWhileRSCrash.class); private static final Logger LOG = LoggerFactory.getLogger(TestSplitRegionWhileRSCrash.class); @@ -60,7 +60,6 @@ public class TestSplitRegionWhileRSCrash { private static byte[] CF = Bytes.toBytes("cf"); private static Table TABLE; - @BeforeClass public static void setupCluster() throws Exception { UTIL.startMiniCluster(1); @@ -78,14 +77,14 @@ public static void cleanupTest() throws Exception { @Test public void test() throws Exception { MasterProcedureEnv env = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); final ProcedureExecutor executor = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); List regionInfos = ADMIN.getRegions(TABLE_NAME); // Since a flush request will be sent while initializing SplitTableRegionProcedure // Create SplitTableRegionProcedure first before put data SplitTableRegionProcedure splitProcedure = - new SplitTableRegionProcedure(env, regionInfos.get(0), Bytes.toBytes("row5")); + new SplitTableRegionProcedure(env, regionInfos.get(0), Bytes.toBytes("row5")); // write some rows to the table LOG.info("Begin to put data"); for (int i = 0; i < 10; i++) { @@ -98,10 +97,10 @@ public void test() throws Exception { LOG.info("SplitProcedure submitted"); UTIL.waitFor(30000, () -> executor.getProcedures().stream().filter(p -> p instanceof TransitRegionStateProcedure) - .map(p -> (TransitRegionStateProcedure) p) - .anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); + .map(p -> (TransitRegionStateProcedure) p) + .anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); UTIL.getMiniHBaseCluster() - .killRegionServer(UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName()); + .killRegionServer(UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName()); UTIL.getMiniHBaseCluster().startRegionServer(); UTIL.waitUntilNoRegionsInTransition(); Scan scan = new Scan(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java index e97d43b724e5..a8ed646d2321 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; import static org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType.SPLIT_WAL; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -53,11 +54,13 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @Category({ MasterTests.class, LargeTests.class }) @@ -95,8 +98,8 @@ public void teardown() throws Exception { public void testAcquireAndRelease() throws Exception { List testProcedures = new ArrayList<>(); for (int i = 0; i < 4; i++) { - testProcedures.add(new FakeServerProcedure( - TEST_UTIL.getHBaseCluster().getServerHoldingMeta())); + testProcedures + .add(new FakeServerProcedure(TEST_UTIL.getHBaseCluster().getServerHoldingMeta())); } ServerName server = splitWALManager.acquireSplitWALWorker(testProcedures.get(0)); Assert.assertNotNull(server); @@ -121,8 +124,8 @@ public void testAcquireAndRelease() throws Exception { public void testAddNewServer() throws Exception { List testProcedures = new ArrayList<>(); for (int i = 0; i < 4; i++) { - testProcedures.add(new FakeServerProcedure( - TEST_UTIL.getHBaseCluster().getServerHoldingMeta())); + testProcedures + .add(new FakeServerProcedure(TEST_UTIL.getHBaseCluster().getServerHoldingMeta())); } ServerName server = splitWALManager.acquireSplitWALWorker(testProcedures.get(0)); Assert.assertNotNull(server); @@ -181,13 +184,13 @@ public void testAcquireAndReleaseSplitWALWorker() throws Exception { new FakeServerProcedure(TEST_UTIL.getHBaseCluster().getRegionServer(i).getServerName()); testProcedures.add(procedure); ProcedureTestingUtility.submitProcedure(masterPE, procedure, HConstants.NO_NONCE, - HConstants.NO_NONCE); + HConstants.NO_NONCE); } TEST_UTIL.waitFor(10000, () -> testProcedures.get(2).isWorkerAcquired()); FakeServerProcedure failedProcedure = new FakeServerProcedure(TEST_UTIL.getHBaseCluster().getServerHoldingMeta()); ProcedureTestingUtility.submitProcedure(masterPE, failedProcedure, HConstants.NO_NONCE, - HConstants.NO_NONCE); + HConstants.NO_NONCE); TEST_UTIL.waitFor(20000, () -> failedProcedure.isTriedToAcquire()); Assert.assertFalse(failedProcedure.isWorkerAcquired()); // let one procedure finish and release worker @@ -216,10 +219,10 @@ public void testGetWALsToSplit() throws Exception { private void splitLogsTestHelper(HBaseTestingUtil testUtil) throws Exception { HMaster hmaster = testUtil.getHBaseCluster().getMaster(); SplitWALManager splitWALManager = hmaster.getSplitWALManager(); - LOG.info("The Master FS is pointing to: " + hmaster.getMasterFileSystem() - .getFileSystem().getUri()); - LOG.info("The WAL FS is pointing to: " + hmaster.getMasterFileSystem() - .getWALFileSystem().getUri()); + LOG.info( + "The Master FS is pointing to: " + hmaster.getMasterFileSystem().getFileSystem().getUri()); + LOG.info( + "The WAL FS is pointing to: " + hmaster.getMasterFileSystem().getWALFileSystem().getUri()); testUtil.createTable(TABLE_NAME, FAMILY, testUtil.KEYS_FOR_HBA_CREATE_TABLE); // load table @@ -227,8 +230,8 @@ private void splitLogsTestHelper(HBaseTestingUtil testUtil) throws Exception { ProcedureExecutor masterPE = hmaster.getMasterProcedureExecutor(); ServerName metaServer = testUtil.getHBaseCluster().getServerHoldingMeta(); ServerName testServer = testUtil.getHBaseCluster().getRegionServerThreads().stream() - .map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny() - .get(); + .map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny() + .get(); List procedures = splitWALManager.splitWALs(testServer, false); Assert.assertEquals(1, procedures.size()); ProcedureTestingUtility.submitAndWait(masterPE, procedures.get(0)); @@ -256,7 +259,7 @@ public void testSplitLogs() throws Exception { } @Test - public void testSplitLogsWithDifferentWalAndRootFS() throws Exception{ + public void testSplitLogsWithDifferentWalAndRootFS() throws Exception { HBaseTestingUtil testUtil2 = new HBaseTestingUtil(); testUtil2.getConfiguration().setBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, false); testUtil2.getConfiguration().setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, 1); @@ -331,7 +334,7 @@ public ServerOperationType getServerOperationType() { @Override protected Flow executeFromState(MasterProcedureEnv env, - MasterProcedureProtos.SplitWALState state) + MasterProcedureProtos.SplitWALState state) throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { SplitWALManager splitWALManager = env.getMasterServices().getSplitWALManager(); switch (state) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java index 1f17480dce6a..acb7e8bb45ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,12 +49,12 @@ import org.slf4j.LoggerFactory; /** - * Run tests that use the HBase clients; {@link org.apache.hadoop.hbase.client.TableBuilder}. - * Sets up the HBase mini cluster once at start and runs through all client tests. - * Each creates a table named for the method and does its stuff against that. + * Run tests that use the HBase clients; {@link org.apache.hadoop.hbase.client.TableBuilder}. Sets + * up the HBase mini cluster once at start and runs through all client tests. Each creates a table + * named for the method and does its stuff against that. */ -@Category({MasterTests.class, LargeTests.class}) -@SuppressWarnings ("deprecation") +@Category({ MasterTests.class, LargeTests.class }) +@SuppressWarnings("deprecation") public class TestWarmupRegion { @ClassRule @@ -64,10 +64,10 @@ public class TestWarmupRegion { private static final Logger LOG = LoggerFactory.getLogger(TestWarmupRegion.class); protected TableName TABLENAME = TableName.valueOf("testPurgeFutureDeletes"); protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static byte [] ROW = Bytes.toBytes("testRow"); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); - private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); - private static byte [] VALUE = Bytes.toBytes("testValue"); + private static byte[] ROW = Bytes.toBytes("testRow"); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte[] VALUE = Bytes.toBytes("testValue"); private static byte[] COLUMN = Bytes.toBytes("column"); private static int numRows = 10000; protected static int SLAVES = 3; @@ -114,15 +114,13 @@ public void setUp() throws Exception { TEST_UTIL.waitFor(6000, new Waiter.Predicate() { @Override public boolean evaluate() throws IOException { - return TEST_UTIL.getAdmin().getCompactionState(TABLENAME) == - CompactionState.NONE; + return TEST_UTIL.getAdmin().getCompactionState(TABLENAME) == CompactionState.NONE; } }); table.close(); } - /** * @throws java.lang.Exception */ @@ -131,7 +129,7 @@ public void tearDown() throws Exception { // Nothing to do. } - protected void runwarmup() throws InterruptedException{ + protected void runwarmup() throws InterruptedException { Thread thread = new Thread(new Runnable() { @Override public void run() { @@ -156,19 +154,19 @@ public void run() { /** * Basic client side validation of HBASE-4536 */ - @Test - public void testWarmup() throws Exception { - int serverid = 0; - HRegion region = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLENAME).get(0); - RegionInfo info = region.getRegionInfo(); - runwarmup(); - for (int i = 0; i < 10; i++) { - HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(serverid); - byte [] destName = Bytes.toBytes(rs.getServerName().toString()); - assertTrue(destName != null); - LOG.info("i=" + i ); - TEST_UTIL.getMiniHBaseCluster().getMaster().move(info.getEncodedNameAsBytes(), destName); - serverid = (serverid + 1) % 2; - } - } + @Test + public void testWarmup() throws Exception { + int serverid = 0; + HRegion region = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLENAME).get(0); + RegionInfo info = region.getRegionInfo(); + runwarmup(); + for (int i = 0; i < 10; i++) { + HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(serverid); + byte[] destName = Bytes.toBytes(rs.getServerName().toString()); + assertTrue(destName != null); + LOG.info("i=" + i); + TEST_UTIL.getMiniHBaseCluster().getMaster().move(info.getEncodedNameAsBytes(), destName); + serverid = (serverid + 1) % 2; + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java index 4d7a4830c41e..6fd9e681e482 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,8 @@ public final class AssignmentTestingUtil { private static final Logger LOG = LoggerFactory.getLogger(AssignmentTestingUtil.class); - private AssignmentTestingUtil() {} + private AssignmentTestingUtil() { + } public static void waitForRegionToBeInTransition(final HBaseTestingUtil util, final RegionInfo hri) throws Exception { @@ -54,8 +55,8 @@ public static void waitForRegionToBeInTransition(final HBaseTestingUtil util, } } - public static void waitForRsToBeDead(final HBaseTestingUtil util, - final ServerName serverName) throws Exception { + public static void waitForRsToBeDead(final HBaseTestingUtil util, final ServerName serverName) + throws Exception { util.waitFor(60000, new ExplainingPredicate() { @Override public boolean evaluate() { @@ -92,19 +93,19 @@ public static void crashRs(final HBaseTestingUtil util, final ServerName serverN } } - public static ServerName crashRsWithRegion(final HBaseTestingUtil util, - final RegionInfo hri, final boolean kill) throws Exception { + public static ServerName crashRsWithRegion(final HBaseTestingUtil util, final RegionInfo hri, + final boolean kill) throws Exception { ServerName serverName = getServerHoldingRegion(util, hri); crashRs(util, serverName, kill); return serverName; } - public static ServerName getServerHoldingRegion(final HBaseTestingUtil util, - final RegionInfo hri) throws Exception { - ServerName serverName = util.getMiniHBaseCluster().getServerHoldingRegion( - hri.getTable(), hri.getRegionName()); - ServerName amServerName = getMaster(util).getAssignmentManager().getRegionStates() - .getRegionServerOfRegion(hri); + public static ServerName getServerHoldingRegion(final HBaseTestingUtil util, final RegionInfo hri) + throws Exception { + ServerName serverName = + util.getMiniHBaseCluster().getServerHoldingRegion(hri.getTable(), hri.getRegionName()); + ServerName amServerName = + getMaster(util).getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri); // Make sure AM and MiniCluster agrees on the Server holding the region // and that the server is online. @@ -115,7 +116,7 @@ public static ServerName getServerHoldingRegion(final HBaseTestingUtil util, public static boolean isServerHoldingMeta(final HBaseTestingUtil util, final ServerName serverName) throws Exception { - for (RegionInfo hri: getMetaRegions(util)) { + for (RegionInfo hri : getMetaRegions(util)) { if (serverName.equals(getServerHoldingRegion(util, hri))) { return true; } @@ -158,12 +159,12 @@ public static boolean waitForAssignment(AssignmentManager am, RegionInfo regionI } public static void insertData(final HBaseTestingUtil UTIL, final TableName tableName, - int rowCount, int startRowNum, String... cfs) throws IOException { + int rowCount, int startRowNum, String... cfs) throws IOException { insertData(UTIL, tableName, rowCount, startRowNum, false, cfs); } public static void insertData(final HBaseTestingUtil UTIL, final TableName tableName, - int rowCount, int startRowNum, boolean flushOnce, String... cfs) throws IOException { + int rowCount, int startRowNum, boolean flushOnce, String... cfs) throws IOException { Table t = UTIL.getConnection().getTable(tableName); Put p; for (int i = 0; i < rowCount / 2; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java index a6eceb8b1a90..55d77ca1ad86 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.master.assignment; + import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; import static org.mockito.ArgumentMatchers.any; @@ -79,8 +80,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException; /** - * A mocked master services. - * Tries to fake it. May not always work. + * A mocked master services. Tries to fake it. May not always work. */ public class MockMasterServices extends MockNoopMasterServices { private final MasterFileSystem fileSystemManager; @@ -109,18 +109,18 @@ public MockMasterServices(Configuration conf, this.fileSystemManager = new MasterFileSystem(conf); this.walManager = new MasterWalManager(this); this.splitWALManager = - conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)? - null: new SplitWALManager(this); + conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK) + ? null + : new SplitWALManager(this); this.masterRegion = MasterRegionFactory.create(this); // Mock an AM. this.assignmentManager = - new AssignmentManager(this, masterRegion, new MockRegionStateStore(this, masterRegion)); + new AssignmentManager(this, masterRegion, new MockRegionStateStore(this, masterRegion)); this.balancer = LoadBalancerFactory.getLoadBalancer(conf); this.serverManager = new ServerManager(this, new DummyRegionServerList()); this.tableStateManager = Mockito.mock(TableStateManager.class); - Mockito.when(this.tableStateManager.getTableState(Mockito.any())). - thenReturn(new TableState(TableName.valueOf("AnyTableNameSetInMockMasterServcies"), - TableState.State.ENABLED)); + Mockito.when(this.tableStateManager.getTableState(Mockito.any())).thenReturn(new TableState( + TableName.valueOf("AnyTableNameSetInMockMasterServcies"), TableState.State.ENABLED)); // Mock up a Client Interface ClientProtos.ClientService.BlockingInterface ri = @@ -134,11 +134,11 @@ public MockMasterServices(Configuration conf, } try { Mockito.when(ri.multi(any(), any())).thenAnswer(new Answer() { - @Override - public MultiResponse answer(InvocationOnMock invocation) throws Throwable { - return buildMultiResponse(invocation.getArgument(1)); - } - }); + @Override + public MultiResponse answer(InvocationOnMock invocation) throws Throwable { + return buildMultiResponse(invocation.getArgument(1)); + } + }); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } @@ -155,17 +155,15 @@ public void start(final int numServes, final RSProcedureDispatcher remoteDispatc for (int i = 0; i < numServes; ++i) { ServerName sn = ServerName.valueOf("localhost", 100 + i, 1); serverManager.regionServerReport(sn, ServerMetricsBuilder.newBuilder(sn) - .setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build()); + .setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build()); } this.procedureExecutor.getEnvironment().setEventReady(initialized, true); } /** - * Call this restart method only after running MockMasterServices#start() - * The RSs can be differentiated by the port number, see - * ServerName in MockMasterServices#start() method above. + * Call this restart method only after running MockMasterServices#start() The RSs can be + * differentiated by the port number, see ServerName in MockMasterServices#start() method above. * Restart of region server will have new startcode in server name - * * @param serverName Server name to be restarted */ public void restartRegionServer(ServerName serverName) throws IOException { @@ -182,7 +180,7 @@ public void restartRegionServer(ServerName serverName) throws IOException { } ServerName sn = ServerName.valueOf(serverName.getAddress().toString(), startCode); serverManager.regionServerReport(sn, ServerMetricsBuilder.newBuilder(sn) - .setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build()); + .setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build()); } @Override @@ -204,17 +202,17 @@ public void abortProcess() { }); this.procedureEnv = new MasterProcedureEnv(this, - remoteDispatcher != null ? remoteDispatcher : new RSProcedureDispatcher(this)); + remoteDispatcher != null ? remoteDispatcher : new RSProcedureDispatcher(this)); this.procedureExecutor = new ProcedureExecutor<>(conf, procedureEnv, procedureStore, - procedureEnv.getProcedureScheduler()); + procedureEnv.getProcedureScheduler()); final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, - Math.max(Runtime.getRuntime().availableProcessors(), - MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS)); - final boolean abortOnCorruption = conf.getBoolean( - MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION, - MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION); + Math.max(Runtime.getRuntime().availableProcessors(), + MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS)); + final boolean abortOnCorruption = + conf.getBoolean(MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION, + MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION); this.procedureStore.start(numThreads); ProcedureTestingUtility.initAndStartWorkers(procedureExecutor, numThreads, abortOnCorruption); this.procedureEnv.getRemoteDispatcher().start(); @@ -340,12 +338,11 @@ public void update(TableDescriptor htd, boolean cacheOnly) throws IOException { private static MultiResponse buildMultiResponse(MultiRequest req) { MultiResponse.Builder builder = MultiResponse.newBuilder(); - RegionActionResult.Builder regionActionResultBuilder = - RegionActionResult.newBuilder(); + RegionActionResult.Builder regionActionResultBuilder = RegionActionResult.newBuilder(); ResultOrException.Builder roeBuilder = ResultOrException.newBuilder(); - for (RegionAction regionAction: req.getRegionActionList()) { + for (RegionAction regionAction : req.getRegionActionList()) { regionActionResultBuilder.clear(); - for (ClientProtos.Action action: regionAction.getActionList()) { + for (ClientProtos.Action action : regionAction.getActionList()) { roeBuilder.clear(); roeBuilder.setResult(ClientProtos.Result.getDefaultInstance()); roeBuilder.setIndex(action.getIndex()); @@ -356,7 +353,8 @@ private static MultiResponse buildMultiResponse(MultiRequest req) { return builder.build(); } - @Override public SplitWALManager getSplitWALManager() { + @Override + public SplitWALManager getSplitWALManager() { return splitWALManager; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAMAssignWithRandExec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAMAssignWithRandExec.java index 71e0a2759667..e46bbc7cb476 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAMAssignWithRandExec.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAMAssignWithRandExec.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ public class TestAMAssignWithRandExec extends TestAssignmentManagerBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAMAssignWithRandExec.class); + HBaseClassTestRule.forClass(TestAMAssignWithRandExec.class); private static final Logger LOG = LoggerFactory.getLogger(TestAMAssignWithRandExec.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAMServerFailedOpen.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAMServerFailedOpen.java index b69218a6e3a4..b6f552bbee45 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAMServerFailedOpen.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAMServerFailedOpen.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestAMServerFailedOpen extends TestAssignmentManagerBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAMServerFailedOpen.class); + HBaseClassTestRule.forClass(TestAMServerFailedOpen.class); private static final Logger LOG = LoggerFactory.getLogger(TestAMServerFailedOpen.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignRegionToUninitializedRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignRegionToUninitializedRegionServer.java index 38f11391fd2f..8de1cce42fcc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignRegionToUninitializedRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignRegionToUninitializedRegionServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,7 +56,7 @@ public class TestAssignRegionToUninitializedRegionServer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAssignRegionToUninitializedRegionServer.class); + HBaseClassTestRule.forClass(TestAssignRegionToUninitializedRegionServer.class); private static CountDownLatch ARRIVE; @@ -72,7 +72,7 @@ public RSRpcServicesForTest(HRegionServer rs) throws IOException { @Override public ExecuteProceduresResponse executeProcedures(RpcController controller, - ExecuteProceduresRequest request) throws ServiceException { + ExecuteProceduresRequest request) throws ServiceException { if (request.getOpenRegionCount() > 0) { ASSIGN_CALLED.set(true); } @@ -88,7 +88,7 @@ public RegionServerForTest(Configuration conf) throws IOException { @Override protected void tryRegionServerReport(long reportStartTime, long reportEndTime) - throws IOException { + throws IOException { if (ARRIVE != null) { ARRIVE.countDown(); ARRIVE = null; @@ -134,7 +134,7 @@ public void testMove() throws Exception { // restart a new region server, and wait until it finish initialization and want to call // regionServerReport, so it will load the peer state to peer cache. Future regionServerFuture = ForkJoinPool.commonPool() - .submit(() -> UTIL.getMiniHBaseCluster().startRegionServer().getRegionServer()); + .submit(() -> UTIL.getMiniHBaseCluster().startRegionServer().getRegionServer()); ARRIVE.await(); // try move region to the new region server, it will fail, but we need to make sure that we do // not try to assign it to the new server. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java index be3bb24d2df0..19fc93614b53 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public class TestAssignmentManager extends TestAssignmentManagerBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAssignmentManager.class); + HBaseClassTestRule.forClass(TestAssignmentManager.class); private static final Logger LOG = LoggerFactory.getLogger(TestAssignmentManager.class); @@ -280,8 +280,8 @@ public void testReopen() throws Exception { // collect AM metrics before test collectAssignmentManagerMetrics(); - TransitRegionStateProcedure proc = - TransitRegionStateProcedure.reopen(master.getMasterProcedureExecutor().getEnvironment(), hri); + TransitRegionStateProcedure proc = TransitRegionStateProcedure + .reopen(master.getMasterProcedureExecutor().getEnvironment(), hri); am.getRegionStates().getRegionStateNode(hri).setProcedure(proc); waitOnFuture(submitProcedure(proc)); @@ -295,20 +295,22 @@ public void testLoadRegionFromMetaAfterRegionManuallyAdded() throws Exception { try { this.util.startMiniCluster(); final AssignmentManager am = this.util.getHBaseCluster().getMaster().getAssignmentManager(); - final TableName tableName = TableName. - valueOf("testLoadRegionFromMetaAfterRegionManuallyAdded"); + final TableName tableName = + TableName.valueOf("testLoadRegionFromMetaAfterRegionManuallyAdded"); this.util.createTable(tableName, "f"); RegionInfo hri = createRegionInfo(tableName, 1); assertNull("RegionInfo was just instantiated by the test, but " - + "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri)); + + "shouldn't be in AM regionStates yet.", + am.getRegionStates().getRegionState(hri)); MetaTableAccessor.addRegionsToMeta(this.util.getConnection(), Collections.singletonList(hri), 1); - assertNull("RegionInfo was manually added in META, but " - + "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri)); + assertNull( + "RegionInfo was manually added in META, but " + "shouldn't be in AM regionStates yet.", + am.getRegionStates().getRegionState(hri)); hri = am.loadRegionFromMeta(hri.getEncodedName()); assertEquals(hri.getEncodedName(), am.getRegionStates().getRegionState(hri).getRegion().getEncodedName()); - }finally { + } finally { this.util.killMiniHBaseCluster(); } } @@ -322,10 +324,11 @@ public void testLoadRegionFromMetaRegionNotInMeta() throws Exception { this.util.createTable(tableName, "f"); final RegionInfo hri = createRegionInfo(tableName, 1); assertNull("RegionInfo was just instantiated by the test, but " - + "shouldn't be in AM regionStates yet.", am.getRegionStates().getRegionState(hri)); + + "shouldn't be in AM regionStates yet.", + am.getRegionStates().getRegionState(hri)); assertNull("RegionInfo was never added in META, should had returned null.", am.loadRegionFromMeta(hri.getEncodedName())); - }finally { + } finally { this.util.killMiniHBaseCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java index 696e71f0a2c0..de7839aabdb1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -108,7 +108,7 @@ public abstract class TestAssignmentManagerBase { protected MockMasterServices master; protected AssignmentManager am; protected NavigableMap> regionsToRegionServers = - new ConcurrentSkipListMap>(); + new ConcurrentSkipListMap>(); // Simple executor to run some simple tasks. protected ScheduledExecutorService executor; @@ -155,7 +155,7 @@ protected void setupConfiguration(Configuration conf) throws Exception { public void setUp() throws Exception { util = new HBaseTestingUtil(); this.executor = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder() - .setUncaughtExceptionHandler((t, e) -> LOG.warn("Uncaught: ", e)).build()); + .setUncaughtExceptionHandler((t, e) -> LOG.warn("Uncaught: ", e)).build()); setupConfiguration(util.getConfiguration()); master = new MockMasterServices(util.getConfiguration(), this.regionsToRegionServers); rsDispatcher = new MockRSProcedureDispatcher(master); @@ -270,7 +270,7 @@ protected TransitRegionStateProcedure createAndSubmitAssign(TableName tableName, protected RegionInfo createRegionInfo(final TableName tableName, final long regionId) { return RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(regionId)) - .setEndKey(Bytes.toBytes(regionId + 1)).setSplit(false).setRegionId(0).build(); + .setEndKey(Bytes.toBytes(regionId + 1)).setSplit(false).setRegionId(0).build(); } protected TransitRegionStateProcedure createAssignProcedure(RegionInfo hri) { @@ -284,7 +284,7 @@ protected TransitRegionStateProcedure createUnassignProcedure(RegionInfo hri) { try { assertFalse(regionNode.isInTransition()); proc = TransitRegionStateProcedure - .unassign(master.getMasterProcedureExecutor().getEnvironment(), hri); + .unassign(master.getMasterProcedureExecutor().getEnvironment(), hri); regionNode.setProcedure(proc); } finally { regionNode.unlock(); @@ -296,10 +296,10 @@ protected void sendTransitionReport(final ServerName serverName, final org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo, final TransitionCode state, long seqId) throws IOException { ReportRegionStateTransitionRequest.Builder req = - ReportRegionStateTransitionRequest.newBuilder(); + ReportRegionStateTransitionRequest.newBuilder(); req.setServer(ProtobufUtil.toServerName(serverName)); req.addTransition(RegionStateTransition.newBuilder().addRegionInfo(regionInfo) - .setTransitionCode(state).setOpenSeqNum(seqId).build()); + .setTransitionCode(state).setOpenSeqNum(seqId).build()); am.reportRegionStateTransition(req.build()); } @@ -311,7 +311,7 @@ protected void doCrash(final ServerName serverName) { newRsAdded++; try { this.master.getServerManager().regionServerReport(newSn, ServerMetricsBuilder - .newBuilder(newSn).setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build()); + .newBuilder(newSn).setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build()); } catch (YouAreDeadException e) { // should not happen throw new UncheckedIOException(e); @@ -332,7 +332,7 @@ protected RegionOpeningState execOpenRegion(ServerName server, RegionOpenInfo op throws IOException { RegionInfo hri = ProtobufUtil.toRegionInfo(openReq.getRegion()); long previousOpenSeqNum = - am.getRegionStates().getOrCreateRegionStateNode(hri).getOpenSeqNum(); + am.getRegionStates().getOrCreateRegionStateNode(hri).getOpenSeqNum(); sendTransitionReport(server, openReq.getRegion(), TransitionCode.OPENED, previousOpenSeqNum + 2); // Concurrency? @@ -603,7 +603,7 @@ protected RegionOpeningState execOpenRegion(final ServerName server, RegionOpenI throws IOException { RegionInfo hri = ProtobufUtil.toRegionInfo(openReq.getRegion()); long previousOpenSeqNum = - am.getRegionStates().getOrCreateRegionStateNode(hri).getOpenSeqNum(); + am.getRegionStates().getOrCreateRegionStateNode(hri).getOpenSeqNum(); switch (ThreadLocalRandom.current().nextInt(3)) { case 0: LOG.info("Return OPENED response"); @@ -619,8 +619,8 @@ protected RegionOpeningState execOpenRegion(final ServerName server, RegionOpenI } // The procedure on master will just hang forever because nothing comes back // from the RS in this case. - LOG.info("Return null as response; means proc stuck so we send in a crash report after" + - " a few seconds..."); + LOG.info("Return null as response; means proc stuck so we send in a crash report after" + + " a few seconds..."); executor.schedule(new Runnable() { @Override public void run() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerLoadMetaRegionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerLoadMetaRegionState.java index 5bb9f8557516..08d9a50cc2c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerLoadMetaRegionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerLoadMetaRegionState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ public class TestAssignmentManagerLoadMetaRegionState { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAssignmentManagerLoadMetaRegionState.class); + HBaseClassTestRule.forClass(TestAssignmentManagerLoadMetaRegionState.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerUtil.java index f5216d45d3bd..8c9420f1a3fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestAssignmentManagerUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAssignmentManagerUtil.class); + HBaseClassTestRule.forClass(TestAssignmentManagerUtil.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -67,8 +67,9 @@ public static void setUp() throws Exception { UTIL.startMiniCluster(1); UTIL.getAdmin().balancerSwitch(false, true); UTIL.createTable(TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")) - .setRegionReplication(REGION_REPLICATION).build(), new byte[][] { Bytes.toBytes(0) }); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")) + .setRegionReplication(REGION_REPLICATION).build(), + new byte[][] { Bytes.toBytes(0) }); UTIL.waitTableAvailable(TABLE_NAME); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); ENV = master.getMasterProcedureExecutor().getEnvironment(); @@ -95,14 +96,14 @@ public static void tearDown() throws Exception { private List getPrimaryRegions() throws IOException { return UTIL.getAdmin().getRegions(TABLE_NAME).stream() - .filter(r -> RegionReplicaUtil.isDefaultReplica(r)).collect(Collectors.toList()); + .filter(r -> RegionReplicaUtil.isDefaultReplica(r)).collect(Collectors.toList()); } @Test public void testCreateUnassignProcedureForSplitFail() throws IOException { RegionInfo region = getPrimaryRegions().get(0); AM.getRegionStates().getRegionStateNode(region) - .setProcedure(TransitRegionStateProcedure.unassign(ENV, region)); + .setProcedure(TransitRegionStateProcedure.unassign(ENV, region)); try { AssignmentManagerUtil.createUnassignProceduresForSplitOrMerge(ENV, Stream.of(region), REGION_REPLICATION); @@ -118,7 +119,7 @@ public void testCreateUnassignProceduresForMergeFail() throws IOException { RegionInfo regionA = regions.get(0); RegionInfo regionB = regions.get(1); AM.getRegionStates().getRegionStateNode(regionB) - .setProcedure(TransitRegionStateProcedure.unassign(ENV, regionB)); + .setProcedure(TransitRegionStateProcedure.unassign(ENV, regionB)); try { AssignmentManagerUtil.createUnassignProceduresForSplitOrMerge(ENV, Stream.of(regionA, regionB), REGION_REPLICATION); @@ -127,8 +128,8 @@ public void testCreateUnassignProceduresForMergeFail() throws IOException { // expected } IntStream.range(0, REGION_REPLICATION) - .mapToObj(i -> RegionReplicaUtil.getRegionInfoForReplica(regionA, i)) - .map(AM.getRegionStates()::getRegionStateNode).forEachOrdered( - rn -> assertFalse("Should have unset the proc for " + rn, rn.isInTransition())); + .mapToObj(i -> RegionReplicaUtil.getRegionInfoForReplica(regionA, i)) + .map(AM.getRegionStates()::getRegionStateNode).forEachOrdered( + rn -> assertFalse("Should have unset the proc for " + rn, rn.isInTransition())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java index 9ec5110df910..31a9512f595c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestAssignmentOnRSCrash { @ClassRule @@ -72,9 +72,8 @@ public void setup() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); - UTIL.createTable(TEST_TABLE, new byte[][] { FAMILY }, new byte[][] { - Bytes.toBytes("B"), Bytes.toBytes("D"), Bytes.toBytes("F"), Bytes.toBytes("L") - }); + UTIL.createTable(TEST_TABLE, new byte[][] { FAMILY }, new byte[][] { Bytes.toBytes("B"), + Bytes.toBytes("D"), Bytes.toBytes("F"), Bytes.toBytes("L") }); } @After @@ -106,7 +105,7 @@ private void testCrashRsWithUserRegion(final boolean kill, final boolean withDat throws Exception { final int NROWS = 100; int nkilled = 0; - for (RegionInfo hri: UTIL.getAdmin().getRegions(TEST_TABLE)) { + for (RegionInfo hri : UTIL.getAdmin().getRegions(TEST_TABLE)) { ServerName serverName = AssignmentTestingUtil.getServerHoldingRegion(UTIL, hri); if (AssignmentTestingUtil.isServerHoldingMeta(UTIL, serverName)) continue; @@ -145,7 +144,7 @@ public void testStopRsWithMetaRegion() throws Exception { private void testCrashRsWithMetaRegion(final boolean kill) throws Exception { int nkilled = 0; - for (RegionInfo hri: AssignmentTestingUtil.getMetaRegions(UTIL)) { + for (RegionInfo hri : AssignmentTestingUtil.getMetaRegions(UTIL)) { ServerName serverName = AssignmentTestingUtil.crashRsWithRegion(UTIL, hri, kill); // wait for region to enter in transition and then to get out of transition @@ -179,8 +178,7 @@ public int testGet(final RegionInfo hri, final int nrows) throws IOException { for (int i = 0; i < nrows; ++i) { final byte[] row = Bytes.add(hri.getStartKey(), Bytes.toBytes(i)); final Result result = table.get(new Get(row)); - if (result != null && !result.isEmpty() && - Bytes.equals(row, result.getValue(FAMILY, null))) { + if (result != null && !result.isEmpty() && Bytes.equals(row, result.getValue(FAMILY, null))) { nresults++; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java index f159926e06f9..85ae940db6b6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ public class TestCloseRegionWhileRSCrash { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCloseRegionWhileRSCrash.class); + HBaseClassTestRule.forClass(TestCloseRegionWhileRSCrash.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -169,7 +169,7 @@ public void testRetryBackoff() throws IOException, InterruptedException { RegionInfo region = srcRs.getRegions(TABLE_NAME).get(0).getRegionInfo(); HRegionServer dstRs = UTIL.getOtherRegionServer(srcRs); ProcedureExecutor procExec = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); procExec.submitProcedure(new DummyServerProcedure(srcRs.getServerName())); ARRIVE.await(); UTIL.getMiniHBaseCluster().killRegionServer(srcRs.getServerName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestDeadServerMetricRegionChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestDeadServerMetricRegionChore.java index 3034d8b778e0..1922c2c581f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestDeadServerMetricRegionChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestDeadServerMetricRegionChore.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.assignment; import static org.junit.Assert.fail; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -36,7 +37,7 @@ public class TestDeadServerMetricRegionChore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDeadServerMetricRegionChore.class); + HBaseClassTestRule.forClass(TestDeadServerMetricRegionChore.class); protected HBaseTestingUtil util; @@ -45,7 +46,7 @@ public void setUp() throws Exception { util = new HBaseTestingUtil(); // Disable DeadServerMetricRegionChore util.getConfiguration() - .setInt(AssignmentManager.DEAD_REGION_METRIC_CHORE_INTERVAL_MSEC_CONF_KEY, -1); + .setInt(AssignmentManager.DEAD_REGION_METRIC_CHORE_INTERVAL_MSEC_CONF_KEY, -1); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestExceptionInAssignRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestExceptionInAssignRegion.java index ad9c50ea72c5..8afca4ea7321 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestExceptionInAssignRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestExceptionInAssignRegion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public class TestExceptionInAssignRegion { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExceptionInAssignRegion.class); + HBaseClassTestRule.forClass(TestExceptionInAssignRegion.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -75,11 +75,11 @@ public static void tearDown() throws Exception { @Test public void testExceptionInAssignRegion() { ProcedureExecutor procedureExecutor = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); JVMClusterUtil.RegionServerThread rsThread = null; for (JVMClusterUtil.RegionServerThread t : UTIL.getMiniHBaseCluster() - .getRegionServerThreads()) { + .getRegionServerThreads()) { if (!t.getRegionServer().getRegions(TABLE_NAME).isEmpty()) { rsThread = t; break; @@ -89,10 +89,10 @@ public void testExceptionInAssignRegion() { HRegionServer rs = rsThread.getRegionServer(); RegionInfo hri = rs.getRegions(TABLE_NAME).get(0).getRegionInfo(); TransitRegionStateProcedure assignRegionProcedure = TransitRegionStateProcedure.move( - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(), - hri, null); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(), hri, + null); RegionStateNode regionNode = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getOrCreateRegionStateNode(hri); + .getRegionStates().getOrCreateRegionStateNode(hri); regionNode.setProcedure(assignRegionProcedure); countDownLatch.countDown(); long prodId = procedureExecutor.submitProcedure(assignRegionProcedure); @@ -113,7 +113,8 @@ private HRegionServer getRegionServer(int index) { } public static class ThrowInOpenCP implements RegionCoprocessor, RegionObserver { - @Override public void preOpen(ObserverContext c) { + @Override + public void preOpen(ObserverContext c) { if (countDownLatch.getCount() == 1) { // We want to throw exception only first time in move region call // After that RS aborts and we don't want to throw in any other open region diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestExceptionInUnassignedRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestExceptionInUnassignedRegion.java index c7aab794570b..0c30974c23d0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestExceptionInUnassignedRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestExceptionInUnassignedRegion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public class TestExceptionInUnassignedRegion { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExceptionInUnassignedRegion.class); + HBaseClassTestRule.forClass(TestExceptionInUnassignedRegion.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -72,11 +72,11 @@ public static void tearDown() throws Exception { @Test public void testExceptionInUnassignRegion() { ProcedureExecutor procedureExecutor = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); JVMClusterUtil.RegionServerThread rsThread = null; for (JVMClusterUtil.RegionServerThread t : UTIL.getMiniHBaseCluster() - .getRegionServerThreads()) { + .getRegionServerThreads()) { if (!t.getRegionServer().getRegions(TABLE_NAME).isEmpty()) { rsThread = t; break; @@ -88,7 +88,7 @@ public void testExceptionInUnassignRegion() { TransitRegionStateProcedure moveRegionProcedure = TransitRegionStateProcedure.reopen( UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(), hri); RegionStateNode regionNode = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getOrCreateRegionStateNode(hri); + .getRegionStates().getOrCreateRegionStateNode(hri); regionNode.setProcedure(moveRegionProcedure); long prodId = procedureExecutor.submitProcedure(moveRegionProcedure); ProcedureTestingUtility.waitProcedure(procedureExecutor, prodId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java index 6c5a81177833..2a8aec1d1b5b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestHbckChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -170,8 +170,8 @@ public void testForDisabledTable() throws Exception { // Set table state to disabled, then not in inconsistent regions. TableStateManager tableStateManager = master.getTableStateManager(); - Mockito.when(tableStateManager.isTableState(tableName, TableState.State.DISABLED)). - thenReturn(true); + Mockito.when(tableStateManager.isTableState(tableName, TableState.State.DISABLED)) + .thenReturn(true); hbckChore.choreForTesting(); inconsistentRegions = hbckChore.getInconsistentRegions(); assertFalse(inconsistentRegions.containsKey(regionName)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMasterAbortWhileMergingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMasterAbortWhileMergingTable.java index c119ba8e66a6..8b30555c1e01 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMasterAbortWhileMergingTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMasterAbortWhileMergingTable.java @@ -20,7 +20,6 @@ import java.util.List; import java.util.Optional; import java.util.concurrent.CountDownLatch; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -44,15 +43,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterAbortWhileMergingTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMasterAbortWhileMergingTable.class); - private static final Logger LOG = LoggerFactory - .getLogger(TestMasterAbortWhileMergingTable.class); + private static final Logger LOG = LoggerFactory.getLogger(TestMasterAbortWhileMergingTable.class); protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("test"); @@ -61,12 +59,10 @@ public class TestMasterAbortWhileMergingTable { private static byte[] SPLITKEY = Bytes.toBytes("bbbbbbb"); private static CountDownLatch mergeCommitArrive = new CountDownLatch(1); - - @BeforeClass public static void setupCluster() throws Exception { UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - MergeRegionObserver.class.getName()); + MergeRegionObserver.class.getName()); UTIL.startMiniCluster(3); admin = UTIL.getAdmin(); byte[][] splitKeys = new byte[1][]; @@ -88,27 +84,26 @@ public static void cleanupTest() throws Exception { public void test() throws Exception { List regionInfos = admin.getRegions(TABLE_NAME); MergeTableRegionsProcedure mergeTableRegionsProcedure = new MergeTableRegionsProcedure( - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor() - .getEnvironment(), new RegionInfo [] {regionInfos.get(0), regionInfos.get(1)}, false); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(), + new RegionInfo[] { regionInfos.get(0), regionInfos.get(1) }, false); long procID = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor() .submitProcedure(mergeTableRegionsProcedure); mergeCommitArrive.await(); UTIL.getMiniHBaseCluster().stopMaster(0); UTIL.getMiniHBaseCluster().startMaster(); - //wait until master initialized + // wait until master initialized + UTIL.waitFor(30000, () -> UTIL.getMiniHBaseCluster().getMaster() != null + && UTIL.getMiniHBaseCluster().getMaster().isInitialized()); UTIL.waitFor(30000, - () -> UTIL.getMiniHBaseCluster().getMaster() != null && UTIL - .getMiniHBaseCluster().getMaster().isInitialized()); - UTIL.waitFor(30000, () -> UTIL.getMiniHBaseCluster().getMaster() - .getMasterProcedureExecutor().isFinished(procID)); - Assert.assertTrue("Found region RIT, that's impossible! " + - UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionsInTransition(), - UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() - .getRegionsInTransition().size() == 0); + () -> UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().isFinished(procID)); + Assert.assertTrue( + "Found region RIT, that's impossible! " + + UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionsInTransition(), + UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionsInTransition() + .size() == 0); } - public static class MergeRegionObserver implements MasterCoprocessor, - MasterObserver { + public static class MergeRegionObserver implements MasterCoprocessor, MasterObserver { @Override public Optional getMasterObserver() { @@ -116,8 +111,7 @@ public Optional getMasterObserver() { } @Override - public void preMergeRegionsCommitAction( - ObserverContext ctx, + public void preMergeRegionsCommitAction(ObserverContext ctx, RegionInfo[] regionsToMerge, List metaEntries) { mergeCommitArrive.countDown(); LOG.error("mergeCommitArrive countdown"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java index 471f0def9b47..095b356af5bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.java @@ -59,7 +59,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestMergeTableRegionsProcedure { @ClassRule @@ -125,7 +125,7 @@ public void setup() throws Exception { @After public void tearDown() throws Exception { resetProcExecutorTestingKillFlag(); - for (TableDescriptor htd: admin.listTableDescriptors()) { + for (TableDescriptor htd : admin.listTableDescriptors()) { LOG.info("Tear down, remove table=" + htd.getTableName()); UTIL.deleteTable(htd.getTableName()); } @@ -137,29 +137,28 @@ private void resetProcExecutorTestingKillFlag() { assertTrue("expected executor to be running", procExec.isRunning()); } - private int loadARowPerRegion(final Table t, List ris) - throws IOException { + private int loadARowPerRegion(final Table t, List ris) throws IOException { List puts = new ArrayList<>(); - for (RegionInfo ri: ris) { - Put put = new Put(ri.getStartKey() == null || ri.getStartKey().length == 0? - new byte [] {'a'}: ri.getStartKey()); + for (RegionInfo ri : ris) { + Put put = + new Put(ri.getStartKey() == null || ri.getStartKey().length == 0 ? new byte[] { 'a' } + : ri.getStartKey()); put.addColumn(HConstants.CATALOG_FAMILY, HConstants.CATALOG_FAMILY, - HConstants.CATALOG_FAMILY); + HConstants.CATALOG_FAMILY); puts.add(put); } t.put(puts); return puts.size(); } - /** * This tests two region merges */ @Test public void testMergeTwoRegions() throws Exception { final TableName tableName = TableName.valueOf(this.name.getMethodName()); - UTIL.createTable(tableName, new byte[][]{HConstants.CATALOG_FAMILY}, - new byte[][]{new byte[]{'b'}, new byte[]{'c'}, new byte[]{'d'}, new byte[]{'e'}}); + UTIL.createTable(tableName, new byte[][] { HConstants.CATALOG_FAMILY }, new byte[][] { + new byte[] { 'b' }, new byte[] { 'c' }, new byte[] { 'd' }, new byte[] { 'e' } }); testMerge(tableName, 2); } @@ -167,7 +166,7 @@ private void testMerge(TableName tableName, int mergeCount) throws IOException { List ris = MetaTableAccessor.getTableRegions(UTIL.getConnection(), tableName); int originalRegionCount = ris.size(); assertTrue(originalRegionCount > mergeCount); - RegionInfo[] regionsToMerge = ris.subList(0, mergeCount).toArray(new RegionInfo [] {}); + RegionInfo[] regionsToMerge = ris.subList(0, mergeCount).toArray(new RegionInfo[] {}); int countOfRowsLoaded = 0; try (Table table = UTIL.getConnection().getTable(tableName)) { countOfRowsLoaded = loadARowPerRegion(table, ris); @@ -184,14 +183,14 @@ private void testMerge(TableName tableName, int mergeCount) throws IOException { ProcedureTestingUtility.assertProcNotFailed(procExec, procId); MetaTableAccessor.fullScanMetaAndPrint(UTIL.getConnection()); assertEquals(originalRegionCount - mergeCount + 1, - MetaTableAccessor.getTableRegions(UTIL.getConnection(), tableName).size()); + MetaTableAccessor.getTableRegions(UTIL.getConnection(), tableName).size()); assertEquals(mergeSubmittedCount + 1, mergeProcMetrics.getSubmittedCounter().getCount()); assertEquals(mergeFailedCount, mergeProcMetrics.getFailedCounter().getCount()); assertEquals(assignSubmittedCount + 1, assignProcMetrics.getSubmittedCounter().getCount()); assertEquals(assignFailedCount, assignProcMetrics.getFailedCounter().getCount()); assertEquals(unassignSubmittedCount + mergeCount, - unassignProcMetrics.getSubmittedCounter().getCount()); + unassignProcMetrics.getSubmittedCounter().getCount()); assertEquals(unassignFailedCount, unassignProcMetrics.getFailedCounter().getCount()); // Need to get the references cleaned out. Close of region will move them @@ -208,7 +207,7 @@ private void testMerge(TableName tableName, int mergeCount) throws IOException { UTIL.getHBaseCluster().getMaster().getCatalogJanitor().triggerNow(); RegionInfo mergedRegion = proc.getMergedRegion(); RegionStateStore regionStateStore = - UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); + UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); while (ris != null && ris.get(0) != null && ris.get(1) != null) { ris = regionStateStore.getMergeRegions(mergedRegion); LOG.info("{} {}", Bytes.toStringBinary(mergedRegion.getRegionName()), ris); @@ -248,10 +247,10 @@ public void testMergeRegionsConcurrently() throws Exception { // collect AM metrics before test collectAssignmentManagerMetrics(); - long procId1 = procExec.submitProcedure(new MergeTableRegionsProcedure( - procExec.getEnvironment(), regionsToMerge1, true)); - long procId2 = procExec.submitProcedure(new MergeTableRegionsProcedure( - procExec.getEnvironment(), regionsToMerge2, true)); + long procId1 = procExec.submitProcedure( + new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge1, true)); + long procId2 = procExec.submitProcedure( + new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge2, true)); ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.waitProcedure(procExec, procId2); ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); @@ -350,7 +349,7 @@ public void testMergeWithoutPONR() throws Exception { private List createTable(final TableName tableName) throws Exception { TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); byte[][] splitRows = new byte[initialRegionCount - 1][]; for (int i = 0; i < splitRows.length; ++i) { splitRows[i] = Bytes.toBytes(String.format("%d", i)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestModifyTableWhileMerging.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestModifyTableWhileMerging.java index 29964464e117..81f443737f69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestModifyTableWhileMerging.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestModifyTableWhileMerging.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.master.assignment; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -44,16 +43,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; - -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestModifyTableWhileMerging { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestModifyTableWhileMerging.class); - private static final Logger LOG = LoggerFactory - .getLogger(TestModifyTableWhileMerging.class); + private static final Logger LOG = LoggerFactory.getLogger(TestModifyTableWhileMerging.class); protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("test"); @@ -64,7 +61,7 @@ public class TestModifyTableWhileMerging { @BeforeClass public static void setupCluster() throws Exception { - //Set procedure executor thread to 1, making reproducing this issue of HBASE-20921 easier + // Set procedure executor thread to 1, making reproducing this issue of HBASE-20921 easier UTIL.getConfiguration().setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); UTIL.startMiniCluster(1); admin = UTIL.getAdmin(); @@ -86,24 +83,23 @@ public static void cleanupTest() throws Exception { @Test public void test() throws Exception { TableDescriptor tableDescriptor = client.getDescriptor(); - ProcedureExecutor executor = UTIL.getMiniHBaseCluster().getMaster() - .getMasterProcedureExecutor(); + ProcedureExecutor executor = + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); MasterProcedureEnv env = executor.getEnvironment(); List regionInfos = admin.getRegions(TABLE_NAME); MergeTableRegionsProcedure mergeTableRegionsProcedure = new MergeTableRegionsProcedure( - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor() - .getEnvironment(), new RegionInfo [] {regionInfos.get(0), regionInfos.get(1)}, false); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(), + new RegionInfo[] { regionInfos.get(0), regionInfos.get(1) }, false); ModifyTableProcedure modifyTableProcedure = new ModifyTableProcedure(env, tableDescriptor); long procModify = executor.submitProcedure(modifyTableProcedure); - UTIL.waitFor(30000, () -> executor.getProcedures().stream() - .filter(p -> p instanceof ModifyTableProcedure) - .map(p -> (ModifyTableProcedure) p) - .anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); + UTIL.waitFor(30000, + () -> executor.getProcedures().stream().filter(p -> p instanceof ModifyTableProcedure) + .map(p -> (ModifyTableProcedure) p).anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); long proc = executor.submitProcedure(mergeTableRegionsProcedure); - UTIL.waitFor(3000000, () -> UTIL.getMiniHBaseCluster().getMaster() - .getMasterProcedureExecutor().isFinished(procModify)); + UTIL.waitFor(3000000, () -> UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor() + .isFinished(procModify)); Assert.assertEquals("Modify Table procedure should success!", - ProcedureProtos.ProcedureState.SUCCESS, modifyTableProcedure.getState()); + ProcedureProtos.ProcedureState.SUCCESS, modifyTableProcedure.getState()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java index 4c0eac0b2bbf..f966aa64d1ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestOpenRegionProcedureBackoff { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestOpenRegionProcedureBackoff.class); + HBaseClassTestRule.forClass(TestOpenRegionProcedureBackoff.class); private static volatile boolean FAIL = false; @@ -77,7 +77,7 @@ public HMasterForTest(Configuration conf) throws IOException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManagerForTest(master, masterRegion); } } @@ -110,9 +110,9 @@ private void assertBackoffIncrease() throws IOException, InterruptedException { public void testBackoff() throws IOException, InterruptedException, ExecutionException { FAIL = true; AsyncAdmin admin = UTIL.getAsyncConnection().getAdminBuilder() - .setRpcTimeout(5, TimeUnit.MINUTES).setOperationTimeout(10, TimeUnit.MINUTES).build(); + .setRpcTimeout(5, TimeUnit.MINUTES).setOperationTimeout(10, TimeUnit.MINUTES).build(); CompletableFuture future = admin.createTable(TableDescriptorBuilder.newBuilder(NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build()); assertBackoffIncrease(); FAIL = false; future.get(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java index 93711ba98369..375246c7611e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureHang.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,7 +62,7 @@ public class TestOpenRegionProcedureHang { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestOpenRegionProcedureHang.class); + HBaseClassTestRule.forClass(TestOpenRegionProcedureHang.class); private static final Logger LOG = LoggerFactory.getLogger(TestOpenRegionProcedureHang.class); @@ -75,7 +75,7 @@ public class TestOpenRegionProcedureHang { private static final class AssignmentManagerForTest extends AssignmentManager { - public AssignmentManagerForTest(MasterServices master,MasterRegion masterRegion) { + public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) { super(master, masterRegion); } @@ -83,9 +83,9 @@ public AssignmentManagerForTest(MasterServices master,MasterRegion masterRegion) public ReportRegionStateTransitionResponse reportRegionStateTransition( ReportRegionStateTransitionRequest req) throws PleaseHoldException { RegionStateTransition transition = req.getTransition(0); - if (transition.getTransitionCode() == TransitionCode.OPENED && - ProtobufUtil.toTableName(transition.getRegionInfo(0).getTableName()).equals(NAME) && - ARRIVE != null) { + if (transition.getTransitionCode() == TransitionCode.OPENED + && ProtobufUtil.toTableName(transition.getRegionInfo(0).getTableName()).equals(NAME) + && ARRIVE != null) { ARRIVE.countDown(); try { RESUME.await(); @@ -112,7 +112,7 @@ public HMasterForTest(Configuration conf) throws IOException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManagerForTest(master, masterRegion); } @@ -185,20 +185,20 @@ public void test() throws InterruptedException, KeeperException, IOException { return false; }); ProcedureExecutor procExec = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); UTIL.waitFor(30000, () -> procExec.getProcedures().stream().filter(p -> p instanceof OpenRegionProcedure) - .map(p -> (OpenRegionProcedure) p).anyMatch(p -> p.region.getTable().equals(NAME))); + .map(p -> (OpenRegionProcedure) p).anyMatch(p -> p.region.getTable().equals(NAME))); OpenRegionProcedure proc = procExec.getProcedures().stream() - .filter(p -> p instanceof OpenRegionProcedure).map(p -> (OpenRegionProcedure) p) - .filter(p -> p.region.getTable().equals(NAME)).findFirst().get(); + .filter(p -> p instanceof OpenRegionProcedure).map(p -> (OpenRegionProcedure) p) + .filter(p -> p.region.getTable().equals(NAME)).findFirst().get(); // wait a bit to let the OpenRegionProcedure send out the request Thread.sleep(2000); RESUME.countDown(); if (!FINISH.await(15, TimeUnit.SECONDS)) { - LOG.info("Wait reportRegionStateTransition to finish timed out, this is possible if" + - " we update the procedure store, as the WALProcedureStore" + - " will retry forever to roll the writer if it is not closed"); + LOG.info("Wait reportRegionStateTransition to finish timed out, this is possible if" + + " we update the procedure store, as the WALProcedureStore" + + " will retry forever to roll the writer if it is not closed"); } FINISH = null; // if the reportRegionTransition is finished, wait a bit to let it return the data to RS diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java index 21fb63e08962..a82f477c6013 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndDTP.java @@ -56,7 +56,7 @@ public class TestRaceBetweenSCPAndDTP { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRaceBetweenSCPAndDTP.class); + HBaseClassTestRule.forClass(TestRaceBetweenSCPAndDTP.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -71,7 +71,7 @@ public class TestRaceBetweenSCPAndDTP { private static final class AssignmentManagerForTest extends AssignmentManager { public AssignmentManagerForTest(MasterServices master, MasterRegion masterRegion) { - super(master,masterRegion); + super(master, masterRegion); } @Override @@ -97,7 +97,7 @@ public HMasterForTest(Configuration conf) throws IOException, KeeperException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManagerForTest(master, masterRegion); } } @@ -132,7 +132,7 @@ public void test() throws Exception { cdl.await(); ProcedureExecutor procExec = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); UTIL.getMiniHBaseCluster().stopRegionServer(sn); long pid = Procedure.NO_PROC_ID; do { @@ -153,8 +153,9 @@ public void test() throws Exception { * @return Returns {@link Procedure#NO_PROC_ID} if no SCP found else actual pid. */ private long getSCPPID(ProcedureExecutor e) { - Optional optional = e.getProcedures().stream(). - filter(p -> p instanceof ServerCrashProcedure).map(p -> (ServerCrashProcedure) p).findAny(); - return optional.isPresent()? optional.get().getProcId(): Procedure.NO_PROC_ID; + Optional optional = + e.getProcedures().stream().filter(p -> p instanceof ServerCrashProcedure) + .map(p -> (ServerCrashProcedure) p).findAny(); + return optional.isPresent() ? optional.get().getProcId() : Procedure.NO_PROC_ID; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java index ae21c75715bc..a56f5ead39c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRaceBetweenSCPAndTRSP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ public class TestRaceBetweenSCPAndTRSP { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRaceBetweenSCPAndTRSP.class); + HBaseClassTestRule.forClass(TestRaceBetweenSCPAndTRSP.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -110,7 +110,7 @@ public HMasterForTest(Configuration conf) throws IOException, KeeperException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManagerForTest(master, masterRegion); } } @@ -153,10 +153,10 @@ public void test() throws Exception { moveFuture.get(); ProcedureExecutor procExec = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); long scpProcId = - procExec.getProcedures().stream().filter(p -> p instanceof ServerCrashProcedure) - .map(p -> (ServerCrashProcedure) p).findAny().get().getProcId(); + procExec.getProcedures().stream().filter(p -> p instanceof ServerCrashProcedure) + .map(p -> (ServerCrashProcedure) p).findAny().get().getProcId(); RESUME_GET_REGIONS_ON_SERVER.countDown(); UTIL.waitFor(60000, () -> procExec.isFinished(scpProcId)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java index 3bcf285e20f4..6abbc35d6153 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ public class TestRegionAssignedToMultipleRegionServers { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionAssignedToMultipleRegionServers.class); + HBaseClassTestRule.forClass(TestRegionAssignedToMultipleRegionServers.class); private static final List EXCLUDE_SERVERS = new ArrayList<>(); @@ -117,13 +117,13 @@ public HMasterForTest(Configuration conf) throws IOException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManagerForTest(master, masterRegion); } @Override - protected ServerManager createServerManager(MasterServices master, - RegionServerList storage) throws IOException { + protected ServerManager createServerManager(MasterServices master, RegionServerList storage) + throws IOException { setupClusterConnection(); return new ServerManagerForTest(master, storage); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java index a1465593d166..318efc6d79bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,11 +51,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionStateTransitionState; - /** * Tests bypass on a region assign/unassign */ -@Category({LargeTests.class}) +@Category({ LargeTests.class }) public class TestRegionBypass { private final static Logger LOG = LoggerFactory.getLogger(TestRegionBypass.class); @@ -93,34 +92,34 @@ public void testBypass() throws IOException, InterruptedException { MasterProcedureEnv env = TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); List regions = admin.getRegions(this.tableName); - for (RegionInfo ri: regions) { + for (RegionInfo ri : regions) { admin.unassign(ri.getRegionName(), false); } List pids = new ArrayList<>(regions.size()); - for (RegionInfo ri: regions) { - Procedure p = new StallingAssignProcedure(env, ri, null, false, - TransitionType.ASSIGN); - pids.add(TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(). - submitProcedure(p)); + for (RegionInfo ri : regions) { + Procedure p = + new StallingAssignProcedure(env, ri, null, false, TransitionType.ASSIGN); + pids.add( + TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().submitProcedure(p)); } - for (Long pid: pids) { + for (Long pid : pids) { while (!TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().isStarted(pid)) { Thread.sleep(100); } } List> ps = TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().getProcedures(); - for (Procedure p: ps) { + for (Procedure p : ps) { if (p instanceof StallingAssignProcedure) { - List bs = TEST_UTIL.getHbck(). - bypassProcedure(Arrays.asList(p.getProcId()), 1000, true, false); - for (Boolean b: bs) { + List bs = + TEST_UTIL.getHbck().bypassProcedure(Arrays.asList(p.getProcId()), 1000, true, false); + for (Boolean b : bs) { LOG.info("BYPASSED {} {}", p.getProcId(), b); } } } // Try and assign WITHOUT override flag. Should fail!. - for (RegionInfo ri: regions) { + for (RegionInfo ri : regions) { try { admin.assign(ri.getRegionName()); } catch (Throwable dnrioe) { @@ -128,31 +127,32 @@ public void testBypass() throws IOException, InterruptedException { LOG.info("Expected {}", dnrioe); } } - while (!TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(). - getActiveProcIds().isEmpty()) { + while (!TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().getActiveProcIds() + .isEmpty()) { Thread.sleep(100); } // Now assign with the override flag. - for (RegionInfo ri: regions) { - TEST_UTIL.getHbck().assigns(Arrays.asList(ri.getEncodedName()), true); + for (RegionInfo ri : regions) { + TEST_UTIL.getHbck().assigns(Arrays. asList(ri.getEncodedName()), true); } - while (!TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(). - getActiveProcIds().isEmpty()) { + while (!TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().getActiveProcIds() + .isEmpty()) { Thread.sleep(100); } - for (RegionInfo ri: regions) { - assertTrue(ri.toString(), TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(). - getRegionStates().isRegionOnline(ri)); + for (RegionInfo ri : regions) { + assertTrue(ri.toString(), TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() + .getRegionStates().isRegionOnline(ri)); } } /** * An AssignProcedure that Stalls just before the finish. */ - public static class StallingAssignProcedure extends TransitRegionStateProcedure{ + public static class StallingAssignProcedure extends TransitRegionStateProcedure { public final CountDownLatch latch = new CountDownLatch(2); - public StallingAssignProcedure(){} + public StallingAssignProcedure() { + } public StallingAssignProcedure(MasterProcedureEnv env, RegionInfo hri, ServerName assignCandidate, boolean forceNewPlan, TransitionType type) { @@ -160,13 +160,12 @@ public StallingAssignProcedure(MasterProcedureEnv env, RegionInfo hri, init(env); } - private void init(MasterProcedureEnv env){ + private void init(MasterProcedureEnv env) { RegionStateNode regionNode = env.getAssignmentManager().getRegionStates().getOrCreateRegionStateNode(getRegion()); regionNode.setProcedure(this); } - @Override protected Flow executeFromState(MasterProcedureEnv env, RegionStateTransitionState state) throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java index 46e91da943bc..a43f088555fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.assignment; import static org.junit.Assert.assertEquals; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; @@ -41,6 +42,7 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; /** @@ -52,7 +54,7 @@ public class TestRegionMoveAndAbandon { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionMoveAndAbandon.class); + HBaseClassTestRule.forClass(TestRegionMoveAndAbandon.class); @Rule public TestName name = new TestName(); @@ -70,7 +72,7 @@ public void setup() throws Exception { UTIL = new HBaseTestingUtil(); zkCluster = UTIL.startMiniZKCluster(); StartTestingClusterOption option = - StartTestingClusterOption.builder().numRegionServers(2).build(); + StartTestingClusterOption.builder().numRegionServers(2).build(); cluster = UTIL.startMiniHBaseCluster(option); rs1 = cluster.getRegionServer(0); rs2 = cluster.getRegionServer(1); @@ -116,8 +118,7 @@ public void test() throws Exception { UTIL.waitFor(30_000, () -> rs2.isStopped() && !rs2.isAlive()); UTIL.waitFor(30_000, () -> rs1.isStopped() && !rs1.isAlive()); // make sure none of regionserver threads is alive. - UTIL.waitFor(30_000, () -> - UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().isEmpty()); + UTIL.waitFor(30_000, () -> UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().isEmpty()); // Start up everything again LOG.info("Starting cluster"); UTIL.getMiniHBaseCluster().startMaster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java index f308a71299d3..fa6f971d42a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +19,10 @@ import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -120,8 +119,7 @@ public void testRegionReplicaSplitRegionAssignment() throws Exception { for (Region r : rs.getRegionServer().getRegions(table.getName())) { // Make sure that every region has some data (even for split daughter regions). if (RegionReplicaUtil.isDefaultReplica(r.getRegionInfo())) { - assertTrue(r.getStore(f).hasReferences() || - r.getStore(f).getStorefiles().size() > 0); + assertTrue(r.getStore(f).hasReferences() || r.getStore(f).getStorefiles().size() > 0); } count++; } @@ -145,9 +143,8 @@ public void testAssignFakeReplicaRegion() throws Exception { Table table = null; try { table = createTableAndLoadData(tn); - final RegionInfo fakeHri = - RegionInfoBuilder.newBuilder(table.getName()).setStartKey(Bytes.toBytes("a")) - .setEndKey(Bytes.toBytes("b")).setReplicaId(1) + final RegionInfo fakeHri = RegionInfoBuilder.newBuilder(table.getName()) + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).setReplicaId(1) .setRegionId(EnvironmentEdgeManager.currentTime()).build(); // To test AssignProcedure can defend this case. @@ -155,7 +152,7 @@ public void testAssignFakeReplicaRegion() throws Exception { // Wait until all assigns are done. HBaseTestingUtil.await(50, () -> { return HTU.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getActiveProcIds() - .isEmpty(); + .isEmpty(); }); // Make sure the region is not online. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplit.java index 105f72f006e8..efeb8bb28277 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplit.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; - import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -54,7 +53,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestRegionSplit { @ClassRule @@ -79,8 +78,8 @@ private static void setupConf(Configuration conf) { @BeforeClass public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); - StartTestingClusterOption option = - StartTestingClusterOption.builder().numMasters(1).numRegionServers(3).numDataNodes(3).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(1) + .numRegionServers(3).numDataNodes(3).build(); UTIL.startMiniCluster(option); } @@ -99,8 +98,7 @@ public void setup() throws Exception { UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(false); // Disable compaction. for (int i = 0; i < UTIL.getHBaseCluster().getLiveRegionServerThreads().size(); i++) { - UTIL.getHBaseCluster().getRegionServer(i).getCompactSplitThread().switchCompaction( - false); + UTIL.getHBaseCluster().getRegionServer(i).getCompactSplitThread().switchCompaction(false); } } @@ -117,7 +115,7 @@ public void testSplitTableRegion() throws Exception { final ProcedureExecutor procExec = getMasterProcedureExecutor(); RegionInfo[] regions = - MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName); + MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName); insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName); int splitRowNum = startRowNum + rowCount / 2; byte[] splitKey = Bytes.toBytes("" + splitRowNum); @@ -134,16 +132,16 @@ public void testSplitTableRegion() throws Exception { assertTrue("not able to split table", UTIL.getHBaseCluster().getRegions(tableName).size() == 2); - //disable table + // disable table UTIL.getAdmin().disableTable(tableName); Thread.sleep(500); - //stop master + // stop master UTIL.getHBaseCluster().stopMaster(0); UTIL.getHBaseCluster().waitOnMaster(0); Thread.sleep(500); - //restart master + // restart master JVMClusterUtil.MasterThread t = UTIL.getHBaseCluster().startMaster(); Thread.sleep(500); @@ -155,7 +153,7 @@ public void testSplitTableRegion() throws Exception { List tableRegions = UTIL.getHBaseCluster().getRegions(tableName); assertEquals("Table region not correct.", 2, tableRegions.size()); Map regionInfoMap = UTIL.getHBaseCluster().getMaster() - .getAssignmentManager().getRegionStates().getRegionAssignments(); + .getAssignmentManager().getRegionStates().getRegionAssignments(); assertEquals(regionInfoMap.get(tableRegions.get(0).getRegionInfo()), regionInfoMap.get(tableRegions.get(1).getRegionInfo())); } @@ -165,14 +163,14 @@ public void testSplitStoreFiles() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, - null, columnFamilyName); + RegionInfo[] regions = + MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName); // flush the memstore insertData(UTIL, tableName, rowCount, startRowNum, true, columnFamilyName); // assert the hfile count of the table int storeFilesCountSum = 0; - for(HRegion region : UTIL.getHBaseCluster().getRegions(tableName)){ + for (HRegion region : UTIL.getHBaseCluster().getRegions(tableName)) { storeFilesCountSum += region.getStore(Bytes.toBytes(columnFamilyName)).getStorefiles().size(); } assertEquals(1, storeFilesCountSum); @@ -190,12 +188,11 @@ public void testSplitStoreFiles() throws Exception { ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); - assertEquals("Not able to split table", - 2, UTIL.getHBaseCluster().getRegions(tableName).size()); + assertEquals("Not able to split table", 2, UTIL.getHBaseCluster().getRegions(tableName).size()); // assert sum of the hfiles of all regions int childStoreFilesSum = 0; - for(HRegion region : UTIL.getHBaseCluster().getRegions(tableName)){ + for (HRegion region : UTIL.getHBaseCluster().getRegions(tableName)) { childStoreFilesSum += region.getStore(Bytes.toBytes(columnFamilyName)).getStorefiles().size(); } assertEquals(1, childStoreFilesSum); @@ -203,7 +200,7 @@ public void testSplitStoreFiles() throws Exception { List tableRegions = UTIL.getHBaseCluster().getRegions(tableName); assertEquals("Table region not correct.", 2, tableRegions.size()); Map regionInfoMap = UTIL.getHBaseCluster().getMaster() - .getAssignmentManager().getRegionStates().getRegionAssignments(); + .getAssignmentManager().getRegionStates().getRegionAssignments(); assertEquals(regionInfoMap.get(tableRegions.get(0).getRegionInfo()), regionInfoMap.get(tableRegions.get(1).getRegionInfo())); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplitAndSeparateChildren.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplitAndSeparateChildren.java index 317782fcc00e..c00d369cb5c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplitAndSeparateChildren.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionSplitAndSeparateChildren.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,11 @@ */ package org.apache.hadoop.hbase.master.assignment; +import static org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil.insertData; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -48,20 +53,16 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil.insertData; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; -@Category({ MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestRegionSplitAndSeparateChildren { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionSplitAndSeparateChildren.class); + HBaseClassTestRule.forClass(TestRegionSplitAndSeparateChildren.class); - private static final Logger LOG = LoggerFactory.getLogger( - TestRegionSplitAndSeparateChildren.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestRegionSplitAndSeparateChildren.class); protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -81,8 +82,8 @@ private static void setupConf(Configuration conf) { @BeforeClass public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); - StartTestingClusterOption option = - StartTestingClusterOption.builder().numMasters(1).numRegionServers(3).numDataNodes(3).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(1) + .numRegionServers(3).numDataNodes(3).build(); UTIL.startMiniCluster(option); } @@ -101,8 +102,7 @@ public void setup() throws Exception { UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(false); // Disable compaction. for (int i = 0; i < UTIL.getHBaseCluster().getLiveRegionServerThreads().size(); i++) { - UTIL.getHBaseCluster().getRegionServer(i).getCompactSplitThread().switchCompaction( - false); + UTIL.getHBaseCluster().getRegionServer(i).getCompactSplitThread().switchCompaction(false); } } @@ -119,7 +119,7 @@ public void testSplitTableRegionAndSeparateChildRegions() throws Exception { final ProcedureExecutor procExec = getMasterProcedureExecutor(); RegionInfo[] regions = - MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName); + MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName); insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName); int splitRowNum = startRowNum + rowCount / 2; byte[] splitKey = Bytes.toBytes("" + splitRowNum); @@ -134,19 +134,18 @@ public void testSplitTableRegionAndSeparateChildRegions() throws Exception { ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); - assertTrue("not able to split table", - UTIL.getHBaseCluster().getRegions(tableName).size() == 2); + assertTrue("not able to split table", UTIL.getHBaseCluster().getRegions(tableName).size() == 2); - //disable table + // disable table UTIL.getAdmin().disableTable(tableName); Thread.sleep(500); - //stop master + // stop master UTIL.getHBaseCluster().stopMaster(0); UTIL.getHBaseCluster().waitOnMaster(0); Thread.sleep(500); - //restart master + // restart master JVMClusterUtil.MasterThread t = UTIL.getHBaseCluster().startMaster(); Thread.sleep(500); @@ -158,7 +157,7 @@ public void testSplitTableRegionAndSeparateChildRegions() throws Exception { List tableRegions = UTIL.getHBaseCluster().getRegions(tableName); assertEquals("Table region not correct.", 2, tableRegions.size()); Map regionInfoMap = UTIL.getHBaseCluster().getMaster() - .getAssignmentManager().getRegionStates().getRegionAssignments(); + .getAssignmentManager().getRegionStates().getRegionAssignments(); assertNotEquals(regionInfoMap.get(tableRegions.get(0).getRegionInfo()), regionInfoMap.get(tableRegions.get(1).getRegionInfo())); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java index 83e5431a8181..fcd268189e97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java @@ -64,13 +64,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; - @Category({ MasterTests.class, MediumTests.class }) public class TestRegionStateStore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionStateStore.class); + HBaseClassTestRule.forClass(TestRegionStateStore.class); private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -93,13 +92,13 @@ public void testVisitMetaForRegionExistingRegion() throws Exception { UTIL.createTable(tableName, "cf"); final List regions = UTIL.getHBaseCluster().getRegions(tableName); final String encodedName = regions.get(0).getRegionInfo().getEncodedName(); - final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster(). - getAssignmentManager().getRegionStateStore(); + final RegionStateStore regionStateStore = + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); final AtomicBoolean visitorCalled = new AtomicBoolean(false); regionStateStore.visitMetaForRegion(encodedName, new RegionStateStore.RegionStateVisitor() { @Override public void visitRegionState(Result result, RegionInfo regionInfo, RegionState.State state, - ServerName regionLocation, ServerName lastHost, long openSeqNum) { + ServerName regionLocation, ServerName lastHost, long openSeqNum) { assertEquals(encodedName, regionInfo.getEncodedName()); visitorCalled.set(true); } @@ -113,14 +112,14 @@ public void testVisitMetaForBadRegionState() throws Exception { UTIL.createTable(tableName, "cf"); final List regions = UTIL.getHBaseCluster().getRegions(tableName); final String encodedName = regions.get(0).getRegionInfo().getEncodedName(); - final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster(). - getAssignmentManager().getRegionStateStore(); + final RegionStateStore regionStateStore = + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); // add the BAD_STATE which does not exist in enum RegionState.State Put put = new Put(regions.get(0).getRegionInfo().getRegionName(), EnvironmentEdgeManager.currentTime()); put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER, - Bytes.toBytes("BAD_STATE")); + Bytes.toBytes("BAD_STATE")); try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { table.put(put); @@ -129,9 +128,8 @@ public void testVisitMetaForBadRegionState() throws Exception { final AtomicBoolean visitorCalled = new AtomicBoolean(false); regionStateStore.visitMetaForRegion(encodedName, new RegionStateStore.RegionStateVisitor() { @Override - public void visitRegionState(Result result, RegionInfo regionInfo, - RegionState.State state, ServerName regionLocation, - ServerName lastHost, long openSeqNum) { + public void visitRegionState(Result result, RegionInfo regionInfo, RegionState.State state, + ServerName regionLocation, ServerName lastHost, long openSeqNum) { assertEquals(encodedName, regionInfo.getEncodedName()); assertNull(state); visitorCalled.set(true); @@ -143,13 +141,13 @@ public void visitRegionState(Result result, RegionInfo regionInfo, @Test public void testVisitMetaForRegionNonExistingRegion() throws Exception { final String encodedName = "fakeencodedregionname"; - final RegionStateStore regionStateStore = UTIL.getHBaseCluster().getMaster(). - getAssignmentManager().getRegionStateStore(); + final RegionStateStore regionStateStore = + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); final AtomicBoolean visitorCalled = new AtomicBoolean(false); regionStateStore.visitMetaForRegion(encodedName, new RegionStateStore.RegionStateVisitor() { @Override public void visitRegionState(Result result, RegionInfo regionInfo, RegionState.State state, - ServerName regionLocation, ServerName lastHost, long openSeqNum) { + ServerName regionLocation, ServerName lastHost, long openSeqNum) { visitorCalled.set(true); } }); @@ -160,22 +158,22 @@ public void visitRegionState(Result result, RegionInfo regionInfo, RegionState.S public void testMetaLocationForRegionReplicasIsAddedAtRegionSplit() throws IOException { long regionId = EnvironmentEdgeManager.currentTime(); ServerName serverName0 = - ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong()); + ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong()); TableName tableName = name.getTableName(); RegionInfo parent = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) - .setRegionId(regionId).setReplicaId(0).build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) + .setRegionId(regionId).setReplicaId(0).build(); RegionInfo splitA = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false) - .setRegionId(regionId + 1).setReplicaId(0).build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false) + .setRegionId(regionId + 1).setReplicaId(0).build(); RegionInfo splitB = RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("a")) - .setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId + 1).setReplicaId(0) - .build(); + .setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId + 1) + .setReplicaId(0).build(); List regionInfos = Lists.newArrayList(parent); MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 3); final RegionStateStore regionStateStore = - UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); regionStateStore.splitRegion(parent, splitA, splitB, serverName0, TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(3).build()); try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) { @@ -191,20 +189,20 @@ public void testEmptyMetaDaughterLocationDuringSplit() throws IOException { TableName tableName = name.getTableName(); long regionId = EnvironmentEdgeManager.currentTime(); ServerName serverName0 = - ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong()); + ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong()); RegionInfo parent = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) - .setRegionId(regionId).setReplicaId(0).build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) + .setRegionId(regionId).setReplicaId(0).build(); RegionInfo splitA = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false) - .setRegionId(regionId + 1).setReplicaId(0).build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false) + .setRegionId(regionId + 1).setReplicaId(0).build(); RegionInfo splitB = RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("a")) - .setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId + 1).setReplicaId(0) - .build(); + .setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId + 1) + .setReplicaId(0).build(); List regionInfos = Lists.newArrayList(parent); MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 3); final RegionStateStore regionStateStore = - UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); regionStateStore.splitRegion(parent, splitA, splitB, serverName0, TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(3).build()); try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) { @@ -232,22 +230,22 @@ public void testEmptyMetaDaughterLocationDuringSplit() throws IOException { public void testMetaLocationForRegionReplicasIsAddedAtRegionMerge() throws IOException { long regionId = EnvironmentEdgeManager.currentTime(); ServerName serverName0 = - ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong()); + ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong()); TableName tableName = name.getTableName(); RegionInfo parentA = RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("a")) - .setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId).setReplicaId(0) - .build(); + .setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId).setReplicaId(0) + .build(); RegionInfo parentB = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false) - .setRegionId(regionId).setReplicaId(0).build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false) + .setRegionId(regionId).setReplicaId(0).build(); RegionInfo merged = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) - .setRegionId(regionId + 1).setReplicaId(0).build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) + .setRegionId(regionId + 1).setReplicaId(0).build(); final RegionStateStore regionStateStore = - UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) { List regionInfos = Lists.newArrayList(parentA, parentB); @@ -265,15 +263,15 @@ public void testMastersSystemTimeIsUsedInMergeRegions() throws IOException { TableName tableName = name.getTableName(); RegionInfo regionInfoA = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(new byte[] { 'a' }).setSplit(false) - .setRegionId(regionId).setReplicaId(0).build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(new byte[] { 'a' }).setSplit(false) + .setRegionId(regionId).setReplicaId(0).build(); RegionInfo regionInfoB = RegionInfoBuilder.newBuilder(tableName).setStartKey(new byte[] { 'a' }) - .setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId).setReplicaId(0) - .build(); + .setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId).setReplicaId(0) + .build(); RegionInfo mergedRegionInfo = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) - .setRegionId(regionId).setReplicaId(0).build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) + .setRegionId(regionId).setReplicaId(0).build(); ServerName sn = ServerName.valueOf("bar", 0, 0); try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) { @@ -299,7 +297,7 @@ public void testMastersSystemTimeIsUsedInMergeRegions() throws IOException { assertEquals(serverNameTime, serverCell.getTimestamp()); final RegionStateStore regionStateStore = - UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); ManualEnvironmentEdge edge = new ManualEnvironmentEdge(); edge.setValue(masterSystemTime); @@ -338,13 +336,13 @@ public void testGetMergeRegions() throws Exception { List regions = admin.getRegions(tn); assertEquals(4, regions.size()); admin - .mergeRegionsAsync( - new byte[][] { regions.get(0).getRegionName(), regions.get(1).getRegionName() }, false) - .get(60, TimeUnit.SECONDS); + .mergeRegionsAsync( + new byte[][] { regions.get(0).getRegionName(), regions.get(1).getRegionName() }, false) + .get(60, TimeUnit.SECONDS); admin - .mergeRegionsAsync( - new byte[][] { regions.get(2).getRegionName(), regions.get(3).getRegionName() }, false) - .get(60, TimeUnit.SECONDS); + .mergeRegionsAsync( + new byte[][] { regions.get(2).getRegionName(), regions.get(3).getRegionName() }, false) + .get(60, TimeUnit.SECONDS); List mergedRegions = admin.getRegions(tn); assertEquals(2, mergedRegions.size()); @@ -352,7 +350,7 @@ public void testGetMergeRegions() throws Exception { RegionInfo mergedRegion1 = mergedRegions.get(1); final RegionStateStore regionStateStore = - UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); List mergeParents = regionStateStore.getMergeRegions(mergedRegion0); assertTrue(mergeParents.contains(regions.get(0))); @@ -379,8 +377,8 @@ public void testAddMergeRegions() throws IOException { int limit = 10; byte[] previous = HConstants.EMPTY_START_ROW; for (int i = 0; i < limit; i++) { - RegionInfo ri = - RegionInfoBuilder.newBuilder(tn).setStartKey(previous).setEndKey(Bytes.toBytes(i)).build(); + RegionInfo ri = RegionInfoBuilder.newBuilder(tn).setStartKey(previous) + .setEndKey(Bytes.toBytes(i)).build(); ris.add(ri); } put = RegionStateStore.addMergeRegions(put, ris); @@ -400,14 +398,14 @@ public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws I long regionId = EnvironmentEdgeManager.currentTime(); TableName tableName = name.getTableName(); RegionInfo primary = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) - .setRegionId(regionId).setReplicaId(0).build(); + .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) + .setRegionId(regionId).setReplicaId(0).build(); try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) { List regionInfos = Lists.newArrayList(primary); MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 3); final RegionStateStore regionStateStore = - UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); regionStateStore.removeRegionReplicas(tableName, 3, 1); Get get = new Get(primary.getRegionName()); Result result = meta.get(get); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java index 75bd310138ae..e06ff4142087 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -47,7 +46,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +@Category({ MasterTests.class, MediumTests.class }) public class TestRegionStates { @ClassRule @@ -65,8 +66,8 @@ public class TestRegionStates { public static void setUp() throws Exception { threadPool = Threads.getBoundedCachedThreadPool(32, 60L, TimeUnit.SECONDS, new ThreadFactoryBuilder().setNameFormat("ProcedureDispatcher-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler((t, e) -> LOG.warn("Failed thread " + t.getName(), e)) - .build()); + .setUncaughtExceptionHandler((t, e) -> LOG.warn("Failed thread " + t.getName(), e)) + .build()); executorService = new ExecutorCompletionService<>(threadPool); } @@ -95,7 +96,7 @@ private static void waitExecutorService(final int count) throws Exception { } // ========================================================================== - // Regions related + // Regions related // ========================================================================== @Test @@ -131,7 +132,7 @@ public void testRegionDoubleCreation() throws Exception { } private void checkTableRegions(final RegionStates stateMap, final TableName tableName, - final int nregions) { + final int nregions) { List rns = stateMap.getTableRegionStateNodes(tableName); assertEquals(nregions, rns.size()); for (int i = 1; i < rns.size(); ++i) { @@ -141,28 +142,21 @@ private void checkTableRegions(final RegionStates stateMap, final TableName tabl } } - private void addRegionNode(final RegionStates stateMap, - final TableName tableName, final long regionId) { + private void addRegionNode(final RegionStates stateMap, final TableName tableName, + final long regionId) { executorService.submit(new Callable() { @Override public Object call() { - return stateMap.getOrCreateRegionStateNode(RegionInfoBuilder.newBuilder(tableName) - .setStartKey(Bytes.toBytes(regionId)) - .setEndKey(Bytes.toBytes(regionId + 1)) - .setSplit(false) - .setRegionId(0) - .build()); + return stateMap.getOrCreateRegionStateNode( + RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(regionId)) + .setEndKey(Bytes.toBytes(regionId + 1)).setSplit(false).setRegionId(0).build()); } }); } private RegionInfo createRegionInfo(final TableName tableName, final long regionId) { - return RegionInfoBuilder.newBuilder(tableName) - .setStartKey(Bytes.toBytes(regionId)) - .setEndKey(Bytes.toBytes(regionId + 1)) - .setSplit(false) - .setRegionId(0) - .build(); + return RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(regionId)) + .setEndKey(Bytes.toBytes(regionId + 1)).setSplit(false).setRegionId(0).build(); } @Test @@ -184,8 +178,7 @@ public Object call() { } waitExecutorService(NRUNS); long et = EnvironmentEdgeManager.currentTime(); - LOG.info(String.format("PERF STATEMAP INSERT: %s %s/sec", - StringUtils.humanTimeDiff(et - st), + LOG.info(String.format("PERF STATEMAP INSERT: %s %s/sec", StringUtils.humanTimeDiff(et - st), StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f)))); st = EnvironmentEdgeManager.currentTime(); @@ -202,8 +195,7 @@ public Object call() { waitExecutorService(NRUNS); et = EnvironmentEdgeManager.currentTime(); - LOG.info(String.format("PERF STATEMAP GET: %s %s/sec", - StringUtils.humanTimeDiff(et - st), + LOG.info(String.format("PERF STATEMAP GET: %s %s/sec", StringUtils.humanTimeDiff(et - st), StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f)))); } @@ -218,8 +210,7 @@ public void testPerfSingleThread() { stateMap.createRegionStateNode(createRegionInfo(TABLE_NAME, i)); } long et = EnvironmentEdgeManager.currentTime(); - LOG.info(String.format("PERF SingleThread: %s %s/sec", - StringUtils.humanTimeDiff(et - st), + LOG.info(String.format("PERF SingleThread: %s %s/sec", StringUtils.humanTimeDiff(et - st), StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f)))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java index f17c09dafcb2..48b7edc35355 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportOnlineRegionsRace.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ public class TestReportOnlineRegionsRace { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReportOnlineRegionsRace.class); + HBaseClassTestRule.forClass(TestReportOnlineRegionsRace.class); private static volatile CountDownLatch ARRIVE_RS_REPORT; private static volatile CountDownLatch RESUME_RS_REPORT; @@ -112,7 +112,7 @@ public HMasterForTest(Configuration conf) throws IOException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManagerForTest(master, masterRegion); } } @@ -128,7 +128,7 @@ public static void setUp() throws Exception { UTIL.getConfiguration().setClass(HConstants.MASTER_IMPL, HMasterForTest.class, HMaster.class); UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 1000); UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, - HConstants.DEFAULT_REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT); + HConstants.DEFAULT_REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT); UTIL.startMiniCluster(1); UTIL.createTable(NAME, CF); UTIL.waitTableAvailable(NAME); @@ -143,7 +143,7 @@ public static void tearDown() throws Exception { public void testRace() throws Exception { RegionInfo region = UTIL.getMiniHBaseCluster().getRegions(NAME).get(0).getRegionInfo(); ProcedureExecutor procExec = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); AssignmentManager am = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); RegionStateNode rsn = am.getRegionStates().getRegionStateNode(region); @@ -157,10 +157,10 @@ public void testRace() throws Exception { // schedule a TRSP to REOPEN the region RESUME_REPORT_STATE = new CountDownLatch(1); Future future = - am.moveAsync(new RegionPlan(region, rsn.getRegionLocation(), rsn.getRegionLocation())); + am.moveAsync(new RegionPlan(region, rsn.getRegionLocation(), rsn.getRegionLocation())); TransitRegionStateProcedure proc = - procExec.getProcedures().stream().filter(p -> p instanceof TransitRegionStateProcedure) - .filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p).findAny().get(); + procExec.getProcedures().stream().filter(p -> p instanceof TransitRegionStateProcedure) + .filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p).findAny().get(); IdLock procExecLock = procExec.getProcExecutionLock(); // a CloseRegionProcedure and then the OpenRegionProcedure we want to block IdLock.Entry lockEntry = procExecLock.getLockEntry(proc.getProcId() + 2); @@ -183,7 +183,7 @@ public void testRace() throws Exception { // confirm that the region can still be write, i.e, the regionServerReport method should not // change the region state to OPEN try (Table table = UTIL.getConnection().getTableBuilder(NAME, null).setWriteRpcTimeout(1000) - .setOperationTimeout(2000).build()) { + .setOperationTimeout(2000).build()) { table.put( new Put(Bytes.toBytes("key")).addColumn(CF, Bytes.toBytes("cq"), Bytes.toBytes("val"))); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java index 8dd51d528776..df1a96279484 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ public class TestReportRegionStateTransitionFromDeadServer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReportRegionStateTransitionFromDeadServer.class); + HBaseClassTestRule.forClass(TestReportRegionStateTransitionFromDeadServer.class); private static final List EXCLUDE_SERVERS = new ArrayList<>(); @@ -105,7 +105,7 @@ public List getRegionsOnServer(ServerName serverName) { public ReportRegionStateTransitionResponse reportRegionStateTransition( ReportRegionStateTransitionRequest req) throws PleaseHoldException { if (ARRIVE_REPORT != null && req.getTransitionList().stream() - .allMatch(t -> !ProtobufUtil.toRegionInfo(t.getRegionInfo(0)).isMetaRegion())) { + .allMatch(t -> !ProtobufUtil.toRegionInfo(t.getRegionInfo(0)).isMetaRegion())) { ARRIVE_REPORT.countDown(); try { RESUME_REPORT.await(); @@ -124,13 +124,13 @@ public HMasterForTest(Configuration conf) throws IOException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManagerForTest(master, masterRegion); } @Override - protected ServerManager createServerManager(MasterServices master, - RegionServerList storage) throws IOException { + protected ServerManager createServerManager(MasterServices master, RegionServerList storage) + throws IOException { setupClusterConnection(); return new ServerManagerForTest(master, storage); } @@ -168,12 +168,12 @@ public void test() throws HBaseIOException, InterruptedException, ExecutionExcep HRegionServer rs0 = UTIL.getMiniHBaseCluster().getRegionServer(rsn.getRegionLocation()); HRegionServer rs1 = UTIL.getOtherRegionServer(rs0); HRegionServer rs2 = UTIL.getMiniHBaseCluster().getRegionServerThreads().stream() - .map(t -> t.getRegionServer()).filter(rs -> rs != rs0 && rs != rs1).findAny().get(); + .map(t -> t.getRegionServer()).filter(rs -> rs != rs0 && rs != rs1).findAny().get(); RESUME_REPORT = new CountDownLatch(1); ARRIVE_REPORT = new CountDownLatch(1); Future future = - am.moveAsync(new RegionPlan(region, rs0.getServerName(), rs1.getServerName())); + am.moveAsync(new RegionPlan(region, rs0.getServerName(), rs1.getServerName())); ARRIVE_REPORT.await(); RESUME_GET_REGIONS = new CountDownLatch(1); @@ -194,7 +194,7 @@ public void test() throws HBaseIOException, InterruptedException, ExecutionExcep RESUME_GET_REGIONS.countDown(); // wait until there are no running procedures, no SCP and no TRSP UTIL.waitFor(30000, () -> UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor() - .getActiveProcIds().isEmpty()); + .getActiveProcIds().isEmpty()); boolean onRS1 = !rs1.getRegions(NAME).isEmpty(); boolean onRS2 = !rs2.getRegions(NAME).isEmpty(); assertNotEquals( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java index 1aa0f3448dcd..5a19855a4915 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionRetry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ public class TestReportRegionStateTransitionRetry { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReportRegionStateTransitionRetry.class); + HBaseClassTestRule.forClass(TestReportRegionStateTransitionRetry.class); private static final AtomicReference RESUME_AND_FAIL = new AtomicReference<>(); @@ -90,7 +90,7 @@ public HMasterForTest(Configuration conf) throws IOException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManagerForTest(master, masterRegion); } } @@ -118,17 +118,17 @@ public static void tearDown() throws Exception { public void testRetryOnClose() throws Exception { RegionInfo region = UTIL.getMiniHBaseCluster().getRegions(NAME).get(0).getRegionInfo(); ProcedureExecutor procExec = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); AssignmentManager am = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); RegionStateNode rsn = am.getRegionStates().getRegionStateNode(region); CountDownLatch latch = new CountDownLatch(1); RESUME_AND_FAIL.set(latch); Future future = - am.moveAsync(new RegionPlan(region, rsn.getRegionLocation(), rsn.getRegionLocation())); + am.moveAsync(new RegionPlan(region, rsn.getRegionLocation(), rsn.getRegionLocation())); TransitRegionStateProcedure proc = - procExec.getProcedures().stream().filter(p -> p instanceof TransitRegionStateProcedure) - .filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p).findAny().get(); + procExec.getProcedures().stream().filter(p -> p instanceof TransitRegionStateProcedure) + .filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p).findAny().get(); // wait until we schedule the OpenRegionProcedure UTIL.waitFor(10000, @@ -139,7 +139,7 @@ public void testRetryOnClose() throws Exception { // confirm that the region can still be write try (Table table = UTIL.getConnection().getTableBuilder(NAME, null).setWriteRpcTimeout(1000) - .setOperationTimeout(2000).build()) { + .setOperationTimeout(2000).build()) { table.put( new Put(Bytes.toBytes("key")).addColumn(CF, Bytes.toBytes("cq"), Bytes.toBytes("val"))); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java index ead36a1ff3d9..42003918517b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,7 +66,7 @@ /** * Tests to verify master/ assignment manager functionality against rogue RS */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestRogueRSAssignment { @ClassRule @@ -131,7 +131,7 @@ public void setup() throws IOException { @After public void tearDown() throws Exception { - for (TableDescriptor td: UTIL.getAdmin().listTableDescriptors()) { + for (TableDescriptor td : UTIL.getAdmin().listTableDescriptors()) { LOG.info("Tear down, remove table=" + td.getTableName()); UTIL.deleteTable(td.getTableName()); } @@ -149,9 +149,8 @@ public void testReportRSWithWrongRegion() throws Exception { List tableRegions = createTable(tableName); - final ServerName sn = ServerName.parseVersionedServerName( - ServerName.valueOf("1.example.org", 1, EnvironmentEdgeManager.currentTime()) - .getVersionedBytes()); + final ServerName sn = ServerName.parseVersionedServerName(ServerName + .valueOf("1.example.org", 1, EnvironmentEdgeManager.currentTime()).getVersionedBytes()); // make fake request with a region assigned to different RS RegionServerStatusProtos.RegionServerReportRequest.Builder request = @@ -173,15 +172,14 @@ public void testReportRSWithWrongRegion() throws Exception { rs.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME); rs.setValue(UnsafeByteOperations.unsafeWrap(regions[i].getRegionName())); - ClusterStatusProtos.RegionLoad.Builder rl = ClusterStatusProtos.RegionLoad.newBuilder() - .setRegionSpecifier(rs.build()); + ClusterStatusProtos.RegionLoad.Builder rl = + ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rs.build()); sl.addRegionLoads(i, rl.build()); } return RegionServerStatusProtos.RegionServerReportRequest.newBuilder() - .setServer(ProtobufUtil.toServerName(sn)) - .setLoad(sl); + .setServer(ProtobufUtil.toServerName(sn)).setLoad(sl); } private List createTable(final TableName tableName) throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java index f1f6e719820c..b102016a5150 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSCPGetRegionsRace.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ public class TestSCPGetRegionsRace { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCPGetRegionsRace.class); + HBaseClassTestRule.forClass(TestSCPGetRegionsRace.class); private static final List EXCLUDE_SERVERS = new ArrayList<>(); @@ -137,13 +137,13 @@ public HMasterForTest(Configuration conf) throws IOException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AssignmentManagerForTest(master, masterRegion); } @Override - protected ServerManager createServerManager(MasterServices master, - RegionServerList storage) throws IOException { + protected ServerManager createServerManager(MasterServices master, RegionServerList storage) + throws IOException { setupClusterConnection(); return new ServerManagerForTest(master, storage); } @@ -158,7 +158,7 @@ protected ServerManager createServerManager(MasterServices master, @BeforeClass public static void setUp() throws Exception { UTIL.startMiniCluster(StartTestingClusterOption.builder().masterClass(HMasterForTest.class) - .numMasters(1).numRegionServers(3).build()); + .numMasters(1).numRegionServers(3).build()); UTIL.createTable(NAME, CF); UTIL.waitTableAvailable(NAME); UTIL.getAdmin().balancerSwitch(false, true); @@ -172,13 +172,13 @@ public static void tearDown() throws Exception { @Test public void test() throws Exception { RegionInfo region = - Iterables.getOnlyElement(UTIL.getMiniHBaseCluster().getRegions(NAME)).getRegionInfo(); + Iterables.getOnlyElement(UTIL.getMiniHBaseCluster().getRegions(NAME)).getRegionInfo(); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); AssignmentManager am = master.getAssignmentManager(); RegionStateNode rsn = am.getRegionStates().getRegionStateNode(region); ServerName source = rsn.getRegionLocation(); - ServerName dest = - UTIL.getAdmin().getRegionServers().stream().filter(sn -> !sn.equals(source)).findAny().get(); + ServerName dest = UTIL.getAdmin().getRegionServers().stream().filter(sn -> !sn.equals(source)) + .findAny().get(); ARRIVE_REPORT = new CountDownLatch(1); RESUME_REPORT = new CountDownLatch(1); @@ -190,7 +190,7 @@ public void test() throws Exception { // let's get procedure lock to stop the TRSP IdLock procExecutionLock = master.getMasterProcedureExecutor().getProcExecutionLock(); long procId = master.getProcedures().stream() - .filter(p -> p instanceof RegionRemoteProcedureBase).findAny().get().getProcId(); + .filter(p -> p instanceof RegionRemoteProcedureBase).findAny().get().getProcId(); IdLock.Entry lockEntry = procExecutionLock.getLockEntry(procId); RESUME_REPORT.countDown(); @@ -210,8 +210,8 @@ public void test() throws Exception { EXCLUDE_SERVERS.add(dest); RESUME_GET.countDown(); // wait until there are no SCPs and TRSPs - UTIL.waitFor(60000, () -> master.getProcedures().stream().allMatch(p -> p.isFinished() || - (!(p instanceof ServerCrashProcedure) && !(p instanceof TransitRegionStateProcedure)))); + UTIL.waitFor(60000, () -> master.getProcedures().stream().allMatch(p -> p.isFinished() + || (!(p instanceof ServerCrashProcedure) && !(p instanceof TransitRegionStateProcedure)))); // assert the region is only on the dest server. HRegionServer rs = UTIL.getMiniHBaseCluster().getRegionServer(dest); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java index 9cb6fd8490e7..48d339198cfb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +19,8 @@ import static org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil.insertData; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; @@ -69,7 +69,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestSplitTableRegionProcedure { @ClassRule @@ -114,7 +114,7 @@ private static void setupConf(Configuration conf) { * This copro is used to slow down opening of the replica regions. */ public static class RegionServerHostingReplicaSlowOpenCopro - implements RegionCoprocessor, RegionObserver { + implements RegionCoprocessor, RegionObserver { static int countForReplica = 0; static boolean slowDownReplicaOpen = false; @@ -127,7 +127,7 @@ public Optional getRegionObserver() { public void preOpen(ObserverContext c) throws IOException { int replicaId = c.getEnvironment().getRegion().getRegionInfo().getReplicaId(); if ((replicaId != RegionInfo.DEFAULT_REPLICA_ID) && (countForReplica == 0)) { - countForReplica ++; + countForReplica++; while (slowDownReplicaOpen) { LOG.info("Slow down replica region open a bit"); try { @@ -177,16 +177,14 @@ public void tearDown() throws Exception { } } - @Test public void testRollbackForSplitTableRegionWithReplica() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionServerHostingReplicaSlowOpenCopro.slowDownReplicaOpen = true; - RegionInfo [] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, columnFamilyName1); + RegionInfo[] regions = + MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName1); try { HBaseTestingUtil.setReplicas(UTIL.getAdmin(), tableName, 2); @@ -210,8 +208,8 @@ public void testRollbackForSplitTableRegionWithReplica() throws Exception { // Split region of the table, it will fail and rollback as replica parent region // is still at OPENING state. - long procId = procExec.submitProcedure(new SplitTableRegionProcedure( - procExec.getEnvironment(), regions[0], HConstants.CATALOG_FAMILY)); + long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), + regions[0], HConstants.CATALOG_FAMILY)); // Wait for the completion. ProcedureTestingUtility.waitProcedure(procExec, procId); @@ -235,7 +233,8 @@ public void testRollbackForSplitTableRegionWithReplica() throws Exception { ProcedureTestingUtility.assertProcFailed(procExec, procId); // There should not be any active OpenRegionProcedure - procExec.getActiveProceduresNoCopy().forEach(p -> assertFalse(p instanceof OpenRegionProcedure)); + procExec.getActiveProceduresNoCopy() + .forEach(p -> assertFalse(p instanceof OpenRegionProcedure)); } @Test @@ -243,8 +242,8 @@ public void testSplitTableRegion() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo [] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, columnFamilyName1, columnFamilyName2); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, + columnFamilyName1, columnFamilyName2); insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2); int splitRowNum = startRowNum + rowCount / 2; byte[] splitKey = Bytes.toBytes("" + splitRowNum); @@ -270,15 +269,15 @@ public void testSplitTableRegion() throws Exception { assertEquals(assignFailedCount, assignProcMetrics.getFailedCounter().getCount()); assertEquals(unassignSubmittedCount + 1, unassignProcMetrics.getSubmittedCounter().getCount()); assertEquals(unassignFailedCount, unassignProcMetrics.getFailedCounter().getCount()); -} + } @Test public void testSplitTableRegionNoStoreFile() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo [] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, columnFamilyName1, columnFamilyName2); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, + columnFamilyName1, columnFamilyName2); int splitRowNum = startRowNum + rowCount / 2; byte[] splitKey = Bytes.toBytes("" + splitRowNum); @@ -307,8 +306,8 @@ public void testSplitTableRegionUnevenDaughter() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo [] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, columnFamilyName1, columnFamilyName2); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, + columnFamilyName1, columnFamilyName2); insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2); // Split to two daughters with one of them only has 1 row int splitRowNum = startRowNum + rowCount / 4; @@ -338,8 +337,8 @@ public void testSplitTableRegionEmptyDaughter() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo [] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, columnFamilyName1, columnFamilyName2); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, + columnFamilyName1, columnFamilyName2); insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2); // Split to two daughters with one of them only has 1 row int splitRowNum = startRowNum + rowCount; @@ -364,8 +363,7 @@ public void testSplitTableRegionEmptyDaughter() throws Exception { assertTrue(UTIL.countRows(tableName) == rowCount); assertTrue(UTIL.countRows(daughters.get(0)) == 0 || UTIL.countRows(daughters.get(1)) == 0); - assertEquals(splitSubmittedCount + 1, - splitProcMetrics.getSubmittedCounter().getCount()); + assertEquals(splitSubmittedCount + 1, splitProcMetrics.getSubmittedCounter().getCount()); assertEquals(splitFailedCount, splitProcMetrics.getFailedCounter().getCount()); } @@ -374,8 +372,8 @@ public void testSplitTableRegionDeletedRowsDaughter() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo [] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, columnFamilyName1, columnFamilyName2); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, + columnFamilyName1, columnFamilyName2); insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2); // Split to two daughters with one of them only has 1 row int splitRowNum = rowCount; @@ -420,8 +418,8 @@ public void testInvalidSplitKey() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo [] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, columnFamilyName1, columnFamilyName2); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, + columnFamilyName1, columnFamilyName2); insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2); assertTrue("not able to find a splittable region", regions != null); @@ -449,8 +447,8 @@ public void testRollbackAndDoubleExecution() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, columnFamilyName1, columnFamilyName2); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, + columnFamilyName1, columnFamilyName2); insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2); int splitRowNum = startRowNum + rowCount / 2; byte[] splitKey = Bytes.toBytes("" + splitRowNum); @@ -472,15 +470,14 @@ public void testRollbackAndDoubleExecution() throws Exception { // NOTE: the 7 (number of SPLIT_TABLE_REGION_UPDATE_META step) is // hardcoded, so you have to look at this test at least once when you add a new step. int lastStep = 7; - MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep, - true); + MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep, true); // check that we have only 1 region assertEquals(1, UTIL.getAdmin().getRegions(tableName).size()); UTIL.waitUntilAllRegionsAssigned(tableName); List newRegions = UTIL.getMiniHBaseCluster().getRegions(tableName); assertEquals(1, newRegions.size()); - verifyData(newRegions.get(0), startRowNum, rowCount, - Bytes.toBytes(columnFamilyName1), Bytes.toBytes(columnFamilyName2)); + verifyData(newRegions.get(0), startRowNum, rowCount, Bytes.toBytes(columnFamilyName1), + Bytes.toBytes(columnFamilyName2)); assertEquals(splitSubmittedCount + 1, splitProcMetrics.getSubmittedCounter().getCount()); assertEquals(splitFailedCount + 1, splitProcMetrics.getFailedCounter().getCount()); @@ -491,8 +488,8 @@ public void testRecoveryAndDoubleExecution() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo [] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, columnFamilyName1, columnFamilyName2); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, + columnFamilyName1, columnFamilyName2); insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2); int splitRowNum = startRowNum + rowCount / 2; byte[] splitKey = Bytes.toBytes("" + splitRowNum); @@ -525,8 +522,8 @@ public void testSplitWithoutPONR() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo [] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, columnFamilyName1, columnFamilyName2); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, + columnFamilyName1, columnFamilyName2); insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2); int splitRowNum = startRowNum + rowCount / 2; byte[] splitKey = Bytes.toBytes("" + splitRowNum); @@ -538,7 +535,7 @@ public void testSplitWithoutPONR() throws Exception { // Split region of the table long procId = procExec.submitProcedure( - new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey)); + new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey)); // Execute until step 7 of split procedure // NOTE: the 7 (number after SPLIT_TABLE_REGION_UPDATE_META step) @@ -553,13 +550,12 @@ public void testSplitWithoutPONR() throws Exception { verify(tableName, splitRowNum); } - private void deleteData( - final TableName tableName, - final int startDeleteRowNum) throws IOException, InterruptedException { + private void deleteData(final TableName tableName, final int startDeleteRowNum) + throws IOException, InterruptedException { Table t = UTIL.getConnection().getTable(tableName); final int numRows = rowCount + startRowNum - startDeleteRowNum; Delete d; - for (int i= startDeleteRowNum; i <= numRows + startDeleteRowNum; i++) { + for (int i = startDeleteRowNum; i <= numRows + startDeleteRowNum; i++) { d = new Delete(Bytes.toBytes("" + i)); t.delete(d); if (i % 5 == 0) { @@ -576,29 +572,21 @@ private void verify(final TableName tableName, final int splitRowNum) throws IOE int startRow; int numRows; for (int i = 0; i < daughters.size(); i++) { - if (Bytes.compareTo( - daughters.get(i).getRegionInfo().getStartKey(), HConstants.EMPTY_BYTE_ARRAY) == 0) { + if (Bytes.compareTo(daughters.get(i).getRegionInfo().getStartKey(), + HConstants.EMPTY_BYTE_ARRAY) == 0) { startRow = startRowNum; // first region numRows = splitRowNum - startRowNum; } else { startRow = splitRowNum; numRows = rowCount + startRowNum - splitRowNum; } - verifyData( - daughters.get(i), - startRow, - numRows, - Bytes.toBytes(columnFamilyName1), + verifyData(daughters.get(i), startRow, numRows, Bytes.toBytes(columnFamilyName1), Bytes.toBytes(columnFamilyName2)); } } - private void verifyData( - final HRegion newReg, - final int startRow, - final int numRows, - final byte[]... families) - throws IOException { + private void verifyData(final HRegion newReg, final int startRow, final int numRows, + final byte[]... families) throws IOException { for (int i = startRow; i < startRow + numRows; i++) { byte[] row = Bytes.toBytes("" + i); Get get = new Get(row); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java index 0da36a4a98a2..5f3100607068 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestTransitRegionStateProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ public class TestTransitRegionStateProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTransitRegionStateProcedure.class); + HBaseClassTestRule.forClass(TestTransitRegionStateProcedure.class); private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -84,7 +84,7 @@ public void setUp() throws IOException, InterruptedException { private void resetProcExecutorTestingKillFlag() { ProcedureExecutor procExec = - UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); assertTrue("expected executor to be running", procExec.isRunning()); } @@ -113,11 +113,11 @@ private void testRecoveryAndDoubleExcution(TransitRegionStateProcedure proc) thr @Test public void testRecoveryAndDoubleExecutionMove() throws Exception { MasterProcedureEnv env = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); HRegion region = UTIL.getMiniHBaseCluster().getRegions(tableName).get(0); long openSeqNum = region.getOpenSeqNum(); TransitRegionStateProcedure proc = - TransitRegionStateProcedure.move(env, region.getRegionInfo(), null); + TransitRegionStateProcedure.move(env, region.getRegionInfo(), null); testRecoveryAndDoubleExcution(proc); HRegion region2 = UTIL.getMiniHBaseCluster().getRegions(tableName).get(0); long openSeqNum2 = region2.getOpenSeqNum(); @@ -128,14 +128,14 @@ public void testRecoveryAndDoubleExecutionMove() throws Exception { @Test public void testRecoveryAndDoubleExecutionReopen() throws Exception { MasterProcedureEnv env = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); HRegionServer rs = UTIL.getRSForFirstRegionInTable(tableName); HRegion region = rs.getRegions(tableName).get(0); region.addReadRequestsCount(1); region.addWriteRequestsCount(2); long openSeqNum = region.getOpenSeqNum(); TransitRegionStateProcedure proc = - TransitRegionStateProcedure.reopen(env, region.getRegionInfo()); + TransitRegionStateProcedure.reopen(env, region.getRegionInfo()); testRecoveryAndDoubleExcution(proc); // should still be on the same RS HRegion region2 = rs.getRegions(tableName).get(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java index 59c381de0424..b6f1a68d54c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,7 +73,7 @@ public class TestWakeUpUnexpectedProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWakeUpUnexpectedProcedure.class); + HBaseClassTestRule.forClass(TestWakeUpUnexpectedProcedure.class); private static final Logger LOG = LoggerFactory.getLogger(TestWakeUpUnexpectedProcedure.class); @@ -145,8 +145,8 @@ public AMForTest(MasterServices master, MasterRegion masterRegion) { public ReportRegionStateTransitionResponse reportRegionStateTransition( ReportRegionStateTransitionRequest req) throws PleaseHoldException { RegionStateTransition rst = req.getTransition(0); - if (rst.getTransitionCode() == TransitionCode.OPENED && - ProtobufUtil.toTableName(rst.getRegionInfo(0).getTableName()).equals(NAME)) { + if (rst.getTransitionCode() == TransitionCode.OPENED + && ProtobufUtil.toTableName(rst.getRegionInfo(0).getTableName()).equals(NAME)) { CountDownLatch arrive = ARRIVE_REPORT; if (ARRIVE_REPORT != null) { ARRIVE_REPORT = null; @@ -205,13 +205,13 @@ public HMasterForTest(Configuration conf) throws IOException { @Override protected AssignmentManager createAssignmentManager(MasterServices master, - MasterRegion masterRegion) { + MasterRegion masterRegion) { return new AMForTest(master, masterRegion); } @Override - protected ServerManager createServerManager(MasterServices master, - RegionServerList storage) throws IOException { + protected ServerManager createServerManager(MasterServices master, RegionServerList storage) + throws IOException { setupClusterConnection(); return new SMForTest(master, storage); } @@ -220,7 +220,7 @@ protected ServerManager createServerManager(MasterServices master, @BeforeClass public static void setUp() throws Exception { UTIL.startMiniCluster(StartTestingClusterOption.builder().numMasters(1) - .masterClass(HMasterForTest.class).numRegionServers(3).rsClass(RSForTest.class).build()); + .masterClass(HMasterForTest.class).numRegionServers(3).rsClass(RSForTest.class).build()); UTIL.createTable(NAME, CF); // Here the test region must not be hosted on the same rs with meta region. // We have 3 RSes and only two regions(meta and the test region), so they will not likely to be diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadOnlyFavoredStochasticBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadOnlyFavoredStochasticBalancer.java index f7521c9250ac..ba6ee4af843f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadOnlyFavoredStochasticBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/LoadOnlyFavoredStochasticBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java index 964528d2810a..a925f860e009 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/RSGroupableBalancerTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,6 +54,7 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -74,11 +75,10 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase { static Configuration conf = HBaseConfiguration.create(); /** - * Invariant is that all servers of a group have load between floor(avg) and - * ceiling(avg) number of regions. + * Invariant is that all servers of a group have load between floor(avg) and ceiling(avg) number + * of regions. */ - protected void assertClusterAsBalanced( - ArrayListMultimap groupLoadMap) { + protected void assertClusterAsBalanced(ArrayListMultimap groupLoadMap) { for (String gName : groupLoadMap.keySet()) { List groupLoad = groupLoadMap.get(gName); int numServers = groupLoad.size(); @@ -134,21 +134,19 @@ protected void assertImmediateAssignment(List regions, List *
    • Every input region has an assignment, and to an online server - *
    • If a region had an existing assignment to a server with the same - * address a a currently online server, it will be assigned to it + *
    • If a region had an existing assignment to a server with the same address a a currently + * online server, it will be assigned to it * */ - protected void assertRetainedAssignment( - Map existing, List servers, - Map> assignment) + protected void assertRetainedAssignment(Map existing, + List servers, Map> assignment) throws FileNotFoundException, IOException { // Verify condition 1, every region assigned, and to online server Set onlineServerSet = new TreeSet<>(servers); Set assignedRegions = new TreeSet<>(RegionInfo.COMPARATOR); for (Map.Entry> a : assignment.entrySet()) { - assertTrue( - "Region assigned to server that was not listed as online", - onlineServerSet.contains(a.getKey())); + assertTrue("Region assigned to server that was not listed as online", + onlineServerSet.contains(a.getKey())); for (RegionInfo r : a.getValue()) { assignedRegions.add(r); } @@ -172,8 +170,8 @@ protected void assertRetainedAssignment( RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName); assertTrue("Region is not correctly assigned to group servers.", gInfo.containsServer(currentServer.getAddress())); - if (oldAssignedServer != null && - onlineHostNames.contains(oldAssignedServer.getHostname())) { + if (oldAssignedServer != null + && onlineHostNames.contains(oldAssignedServer.getHostname())) { // this region was previously assigned somewhere, and that // host is still around, then the host must have been is a // different group. @@ -185,8 +183,7 @@ protected void assertRetainedAssignment( } } - protected String printStats( - ArrayListMultimap groupBasedLoad) { + protected String printStats(ArrayListMultimap groupBasedLoad) { StringBuilder sb = new StringBuilder(); sb.append("\n"); for (String groupName : groupBasedLoad.keySet()) { @@ -199,16 +196,15 @@ protected String printStats( int totalRegions = 0; sb.append("Per Server Load: \n"); for (ServerAndLoad sLoad : groupLoad) { - sb.append("Server :" + sLoad.getServerName() + " Load : " - + sLoad.getLoad() + "\n"); + sb.append("Server :" + sLoad.getServerName() + " Load : " + sLoad.getLoad() + "\n"); totalRegions += sLoad.getLoad(); } sb.append(" Group Statistics : \n"); float average = (float) totalRegions / numServers; int max = (int) Math.ceil(average); int min = (int) Math.floor(average); - sb.append("[srvr=" + numServers + " rgns=" + totalRegions + " avg=" - + average + " max=" + max + " min=" + min + "]"); + sb.append("[srvr=" + numServers + " rgns=" + totalRegions + " avg=" + average + " max=" + max + + " min=" + min + "]"); sb.append("\n"); sb.append("==============================="); sb.append("\n"); @@ -218,32 +214,28 @@ protected String printStats( protected ArrayListMultimap convertToGroupBasedMap( final Map> serversMap) throws IOException { - ArrayListMultimap loadMap = ArrayListMultimap - .create(); + ArrayListMultimap loadMap = ArrayListMultimap.create(); for (RSGroupInfo gInfo : getMockedGroupInfoManager().listRSGroups()) { Set
      groupServers = gInfo.getServers(); for (Address hostPort : groupServers) { ServerName actual = null; - for(ServerName entry: servers) { - if(entry.getAddress().equals(hostPort)) { + for (ServerName entry : servers) { + if (entry.getAddress().equals(hostPort)) { actual = entry; break; } } List regions = serversMap.get(actual); assertTrue("No load for " + actual, regions != null); - loadMap.put(gInfo.getName(), - new ServerAndLoad(actual, regions.size())); + loadMap.put(gInfo.getName(), new ServerAndLoad(actual, regions.size())); } } return loadMap; } - protected ArrayListMultimap reconcile( - ArrayListMultimap previousLoad, - List plans) { - ArrayListMultimap result = ArrayListMultimap - .create(); + protected ArrayListMultimap + reconcile(ArrayListMultimap previousLoad, List plans) { + ArrayListMultimap result = ArrayListMultimap.create(); result.putAll(previousLoad); if (plans != null) { for (RegionPlan plan : plans) { @@ -256,8 +248,7 @@ protected ArrayListMultimap reconcile( return result; } - protected void updateLoad( - ArrayListMultimap previousLoad, + protected void updateLoad(ArrayListMultimap previousLoad, final ServerName sn, final int diff) { for (String groupName : previousLoad.keySet()) { ServerAndLoad newSAL = null; @@ -290,7 +281,6 @@ protected Map> mockClusterServers() throws IOExcept /** * Generate a list of regions evenly distributed between the tables. - * * @param numRegions The number of regions to be generated. * @return List of RegionInfo. */ @@ -305,19 +295,14 @@ protected List randomRegions(int numRegions) { Bytes.putInt(start, 0, numRegions << 1); Bytes.putInt(end, 0, (numRegions << 1) + 1); int tableIndex = (i + regionIdx) % tables.length; - regions.add(RegionInfoBuilder.newBuilder(tables[tableIndex]) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .setRegionId(regionId++) - .build()); + regions.add(RegionInfoBuilder.newBuilder(tables[tableIndex]).setStartKey(start).setEndKey(end) + .setSplit(false).setRegionId(regionId++).build()); } return regions; } /** * Generate assigned regions to a given server using group information. - * * @param numRegions the num regions to generate * @param sn the servername * @return the list of regions @@ -331,12 +316,8 @@ protected List assignedRegions(int numRegions, ServerName sn) throws Bytes.putInt(end, 0, (numRegions << 1) + 1); for (int i = 0; i < numRegions; i++) { TableName tableName = getTableName(sn); - regions.add(RegionInfoBuilder.newBuilder(tableName) - .setStartKey(start) - .setEndKey(end) - .setSplit(false) - .setRegionId(regionId++) - .build()); + regions.add(RegionInfoBuilder.newBuilder(tableName).setStartKey(start).setEndKey(end) + .setSplit(false).setRegionId(regionId++).build()); } return regions; } @@ -425,8 +406,7 @@ public RSGroupInfo answer(InvocationOnMock invocation) throws Throwable { return groupMap.get(invocation.getArgument(0)); } }); - Mockito.when(gm.listRSGroups()).thenReturn( - Lists.newLinkedList(groupMap.values())); + Mockito.when(gm.listRSGroups()).thenReturn(Lists.newLinkedList(groupMap.values())); Mockito.when(gm.isOnline()).thenReturn(true); return gm; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerDecision.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerDecision.java index f7e1110283d1..ba7f9afb537c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerDecision.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerDecision.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import static org.mockito.Mockito.mock; @@ -54,7 +53,7 @@ public class TestBalancerDecision extends StochasticBalancerTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBalancerDecision.class); + HBaseClassTestRule.forClass(TestBalancerDecision.class); @Test public void testBalancerDecisions() { @@ -68,14 +67,14 @@ public void testBalancerDecisions() { conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 1.0f); try { // Test with/without per table balancer. - boolean[] perTableBalancerConfigs = {true, false}; + boolean[] perTableBalancerConfigs = { true, false }; for (boolean isByTable : perTableBalancerConfigs) { conf.setBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, isByTable); loadBalancer.onConfigurationChange(conf); for (int[] mockCluster : clusterStateMocks) { Map> servers = mockClusterServers(mockCluster); Map>> LoadOfAllTable = - (Map) mockClusterServersWithTables(servers); + (Map) mockClusterServersWithTables(servers); List plans = loadBalancer.balanceCluster(LoadOfAllTable); boolean emptyPlans = plans == null || plans.isEmpty(); Assert.assertTrue(emptyPlans || needsBalanceIdleRegion(mockCluster)); @@ -84,17 +83,14 @@ public void testBalancerDecisions() { final NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest(); namedQueueGetRequest.setNamedQueueEvent(BalancerDecisionDetails.BALANCER_DECISION_EVENT); namedQueueGetRequest - .setBalancerDecisionsRequest(MasterProtos.BalancerDecisionsRequest.getDefaultInstance()); + .setBalancerDecisionsRequest(MasterProtos.BalancerDecisionsRequest.getDefaultInstance()); NamedQueueGetResponse namedQueueGetResponse = - provider.getNamedQueueRecorder().getNamedQueueRecords(namedQueueGetRequest); + provider.getNamedQueueRecorder().getNamedQueueRecords(namedQueueGetRequest); List balancerDecisions = - namedQueueGetResponse.getBalancerDecisions(); - MasterProtos.BalancerDecisionsResponse response = - MasterProtos.BalancerDecisionsResponse.newBuilder() - .addAllBalancerDecision(balancerDecisions) - .build(); - List balancerDecisionRecords = - ProtobufUtil.getBalancerDecisionEntries(response); + namedQueueGetResponse.getBalancerDecisions(); + MasterProtos.BalancerDecisionsResponse response = MasterProtos.BalancerDecisionsResponse + .newBuilder().addAllBalancerDecision(balancerDecisions).build(); + List balancerDecisionRecords = ProtobufUtil.getBalancerDecisionEntries(response); Assert.assertTrue(balancerDecisionRecords.size() > 160); } finally { // reset config @@ -105,7 +101,7 @@ public void testBalancerDecisions() { } private static boolean needsBalanceIdleRegion(int[] cluster) { - return (Arrays.stream(cluster).anyMatch(x -> x > 1)) && (Arrays.stream(cluster) - .anyMatch(x -> x < 1)); + return (Arrays.stream(cluster).anyMatch(x -> x > 1)) + && (Arrays.stream(cluster).anyMatch(x -> x < 1)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerRejection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerRejection.java index 98ab3eda6f5c..64bac25da486 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerRejection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerRejection.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import static org.mockito.Mockito.mock; @@ -52,9 +51,9 @@ public class TestBalancerRejection extends StochasticBalancerTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBalancerRejection.class); + HBaseClassTestRule.forClass(TestBalancerRejection.class); - static class MockCostFunction extends CostFunction{ + static class MockCostFunction extends CostFunction { public static double mockCost; public MockCostFunction(Configuration c) { @@ -72,44 +71,44 @@ float getMultiplier() { } @Test - public void testBalancerRejections() throws Exception{ + public void testBalancerRejections() throws Exception { try { - //enabled balancer rejection recording + // enabled balancer rejection recording conf.setBoolean(BaseLoadBalancer.BALANCER_REJECTION_BUFFER_ENABLED, true); - conf.set(StochasticLoadBalancer.COST_FUNCTIONS_COST_FUNCTIONS_KEY, MockCostFunction.class.getName()); + conf.set(StochasticLoadBalancer.COST_FUNCTIONS_COST_FUNCTIONS_KEY, + MockCostFunction.class.getName()); MasterServices services = mock(MasterServices.class); when(services.getConfiguration()).thenReturn(conf); MasterClusterInfoProvider provider = new MasterClusterInfoProvider(services); loadBalancer.setClusterInfoProvider(provider); loadBalancer.onConfigurationChange(conf); - //Simulate 2 servers with 5 regions. + // Simulate 2 servers with 5 regions. Map> servers = mockClusterServers(new int[] { 5, 5 }); - Map>> LoadOfAllTable = (Map) mockClusterServersWithTables(servers); + Map>> LoadOfAllTable = + (Map) mockClusterServersWithTables(servers); - //Reject case 1: Total cost < 0 + // Reject case 1: Total cost < 0 MockCostFunction.mockCost = -Double.MAX_VALUE; - //Since the Balancer was rejected, there should not be any plans + // Since the Balancer was rejected, there should not be any plans Assert.assertNull(loadBalancer.balanceCluster(LoadOfAllTable)); - //Reject case 2: Cost < minCostNeedBalance + // Reject case 2: Cost < minCostNeedBalance MockCostFunction.mockCost = 1; conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", Float.MAX_VALUE); loadBalancer.onConfigurationChange(conf); Assert.assertNull(loadBalancer.balanceCluster(LoadOfAllTable)); - //NamedQueue is an async Producer-consumer Pattern, waiting here until it completed + // NamedQueue is an async Producer-consumer Pattern, waiting here until it completed int maxWaitingCount = 10; while (maxWaitingCount-- > 0 && getBalancerRejectionLogEntries(provider).size() != 2) { Thread.sleep(1000); } - //There are two cases, should be 2 logEntries + // There are two cases, should be 2 logEntries List logEntries = getBalancerRejectionLogEntries(provider); Assert.assertEquals(2, logEntries.size()); - Assert.assertTrue( - logEntries.get(0).toJsonPrettyPrint().contains("minCostNeedBalance")); - Assert.assertTrue( - logEntries.get(1).toJsonPrettyPrint().contains("cost1*multiplier1")); - }finally { + Assert.assertTrue(logEntries.get(0).toJsonPrettyPrint().contains("minCostNeedBalance")); + Assert.assertTrue(logEntries.get(1).toJsonPrettyPrint().contains("cost1*multiplier1")); + } finally { conf.unset(StochasticLoadBalancer.COST_FUNCTIONS_COST_FUNCTIONS_KEY); conf.unset(BaseLoadBalancer.BALANCER_REJECTION_BUFFER_ENABLED); loadBalancer.onConfigurationChange(conf); @@ -120,13 +119,13 @@ private List getBalancerRejectionLogEntries(MasterClusterInfoProvider NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest(); namedQueueGetRequest.setNamedQueueEvent(BalancerRejectionDetails.BALANCER_REJECTION_EVENT); namedQueueGetRequest - .setBalancerRejectionsRequest(MasterProtos.BalancerRejectionsRequest.getDefaultInstance()); + .setBalancerRejectionsRequest(MasterProtos.BalancerRejectionsRequest.getDefaultInstance()); NamedQueueGetResponse namedQueueGetResponse = - provider.getNamedQueueRecorder().getNamedQueueRecords(namedQueueGetRequest); + provider.getNamedQueueRecorder().getNamedQueueRecords(namedQueueGetRequest); List balancerRejections = - namedQueueGetResponse.getBalancerRejections(); + namedQueueGetResponse.getBalancerRejections(); MasterProtos.BalancerRejectionsResponse response = MasterProtos.BalancerRejectionsResponse - .newBuilder().addAllBalancerRejection(balancerRejections).build(); + .newBuilder().addAllBalancerRejection(balancerRejections).build(); List balancerRejectionRecords = ProtobufUtil.getBalancerRejectionEntries(response); return balancerRejectionRecords; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerStatusTagInJMXMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerStatusTagInJMXMetrics.java index a3c566a75c2e..717e342fd92b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerStatusTagInJMXMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBalancerStatusTagInJMXMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -46,7 +45,8 @@ public class TestBalancerStatusTagInJMXMetrics extends BalancerTestBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestBalancerStatusTagInJMXMetrics.class); - private static final Logger LOG = LoggerFactory.getLogger(TestBalancerStatusTagInJMXMetrics.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestBalancerStatusTagInJMXMetrics.class); private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static int connectorPort = 61120; private static HMaster master; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredNodeTableImport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredNodeTableImport.java index 4489c421d6f5..d980a46c88ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredNodeTableImport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredNodeTableImport.java @@ -50,8 +50,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /* - * This case tests a scenario when a cluster with tables is moved from Stochastic Load Balancer - * to FavoredStochasticLoadBalancer and the generation of favored nodes after switch. + * This case tests a scenario when a cluster with tables is moved from Stochastic Load Balancer to + * FavoredStochasticLoadBalancer and the generation of favored nodes after switch. */ @Category(MediumTests.class) public class TestFavoredNodeTableImport { @@ -86,8 +86,8 @@ public void testTableCreation() throws Exception { String tableName = "testFNImport"; TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); admin.createTable(tableDescriptor, Bytes.toBytes("a"), Bytes.toBytes("z"), REGION_NUM); UTIL.waitTableAvailable(tableDescriptor.getTableName()); admin.balancerSwitch(true, true); @@ -98,7 +98,7 @@ public void testTableCreation() throws Exception { Thread.sleep(2000); LOG.info("Starting cluster again with FN Balancer"); UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - FavoredStochasticBalancer.class.getName()); + FavoredStochasticBalancer.class.getName()); UTIL.restartHBaseCluster(SLAVES); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); while (!master.isInitialized()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java index 1a447d2a98fa..39920015cc93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -94,7 +94,7 @@ public static void setupBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); // Enable favored nodes based load balancer conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - LoadOnlyFavoredStochasticBalancer.class, LoadBalancer.class); + LoadOnlyFavoredStochasticBalancer.class, LoadBalancer.class); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 30000); conf.setInt("hbase.master.balancer.stochastic.moveCost", 0); conf.setBoolean("hbase.master.balancer.stochastic.execute.maxSteps", true); @@ -104,7 +104,7 @@ public static void setupBeforeClass() throws Exception { public void startCluster() throws Exception { TEST_UTIL.startMiniCluster(SLAVES); TEST_UTIL.getDFSCluster().waitClusterUp(); - TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(120*1000); + TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(120 * 1000); cluster = TEST_UTIL.getMiniHBaseCluster(); admin = TEST_UTIL.getAdmin(); admin.balancerSwitch(false, true); @@ -116,16 +116,13 @@ public void stopCluster() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test public void testPickers() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY).build(); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(tableName) - .setColumnFamily(columnFamilyDescriptor) - .build(); + TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(columnFamilyDescriptor).build(); admin.createTable(desc, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGIONS); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); TEST_UTIL.loadTable(admin.getConnection().getTable(tableName), HConstants.CATALOG_FAMILY); @@ -142,7 +139,7 @@ public void testPickers() throws Exception { // Lets find another server with more regions to calculate number of regions to move ServerName source = getRSWithMaxRegions(tableName, excludedServers); assertNotNull(source); - int regionsToMove = getTableRegionsFromServer(tableName, source).size()/2; + int regionsToMove = getTableRegionsFromServer(tableName, source).size() / 2; // Since move only works if the target is part of favored nodes of the region, lets get all // regions that are movable to mostLoadedServer @@ -155,8 +152,8 @@ public void testPickers() throws Exception { TEST_UTIL.waitFor(60000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return ServerName.isSameAddress( - rst.getRegionServerOfRegion(regionInfo), mostLoadedServer); + return ServerName.isSameAddress(rst.getRegionServerOfRegion(regionInfo), + mostLoadedServer); } }); } @@ -184,21 +181,21 @@ public boolean evaluate() throws Exception { regionFinder.setClusterInfoProvider( new MasterClusterInfoProvider(TEST_UTIL.getMiniHBaseCluster().getMaster())); BalancerClusterState cluster = - new BalancerClusterState(serverAssignments, null, regionFinder, new RackManager(conf)); + new BalancerClusterState(serverAssignments, null, regionFinder, new RackManager(conf)); LoadOnlyFavoredStochasticBalancer balancer = (LoadOnlyFavoredStochasticBalancer) TEST_UTIL - .getMiniHBaseCluster().getMaster().getLoadBalancer().getInternalBalancer(); + .getMiniHBaseCluster().getMaster().getLoadBalancer().getInternalBalancer(); cluster.sortServersByRegionCount(); Integer[] servers = cluster.serverIndicesSortedByRegionCount; LOG.info("Servers sorted by region count:" + Arrays.toString(servers)); LOG.info("Cluster dump: " + cluster); - if (!mostLoadedServer.equals(cluster.servers[servers[servers.length -1]])) { + if (!mostLoadedServer.equals(cluster.servers[servers[servers.length - 1]])) { LOG.error("Most loaded server: " + mostLoadedServer + " does not match: " - + cluster.servers[servers[servers.length -1]]); + + cluster.servers[servers[servers.length - 1]]); } assertEquals(mostLoadedServer, cluster.servers[servers[servers.length - 1]]); FavoredStochasticBalancer.FavoredNodeLoadPicker loadPicker = - balancer.new FavoredNodeLoadPicker(); + balancer.new FavoredNodeLoadPicker(); boolean userRegionPicked = false; for (int i = 0; i < 100; i++) { if (userRegionPicked) { @@ -213,8 +210,7 @@ public boolean evaluate() throws Exception { assertEquals(cluster.servers[moveRegionAction.getFromServer()], mostLoadedServer); if (!region.getTable().isSystemTable()) { List favNodes = fnm.getFavoredNodes(region); - assertTrue(favNodes.contains( - ServerName.valueOf(destinationServer.getAddress(), -1))); + assertTrue(favNodes.contains(ServerName.valueOf(destinationServer.getAddress(), -1))); userRegionPicked = true; } } @@ -224,17 +220,16 @@ public boolean evaluate() throws Exception { } /* - * A region can only be moved to one of its favored node. Hence this method helps us to - * get that list which makes it easy to write non-flaky tests. + * A region can only be moved to one of its favored node. Hence this method helps us to get that + * list which makes it easy to write non-flaky tests. */ - private List getRegionsThatCanBeMoved(TableName tableName, - ServerName serverName) { + private List getRegionsThatCanBeMoved(TableName tableName, ServerName serverName) { List regions = Lists.newArrayList(); RegionStates rst = cluster.getMaster().getAssignmentManager().getRegionStates(); FavoredNodesManager fnm = cluster.getMaster().getFavoredNodesManager(); for (RegionInfo regionInfo : fnm.getRegionsOfFavoredNode(serverName)) { - if (regionInfo.getTable().equals(tableName) && - !ServerName.isSameAddress(rst.getRegionServerOfRegion(regionInfo), serverName)) { + if (regionInfo.getTable().equals(tableName) + && !ServerName.isSameAddress(rst.getRegionServerOfRegion(regionInfo), serverName)) { regions.add(regionInfo); } } @@ -261,8 +256,8 @@ private ServerName getRSWithMaxRegions(TableName tableName, List exc List regions = rst.getRegionServer().getRegions(tableName); LOG.debug("Server: " + rst.getRegionServer().getServerName() + " regions: " + regions.size()); if (regions.size() > maxRegions) { - if (excludeNodes == null || - !doesMatchExcludeNodes(excludeNodes, rst.getRegionServer().getServerName())) { + if (excludeNodes == null + || !doesMatchExcludeNodes(excludeNodes, rst.getRegionServer().getServerName())) { maxRegions = regions.size(); maxLoadedServer = rst.getRegionServer().getServerName(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java index 02a3e65c631a..1a73628ab380 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java @@ -95,7 +95,7 @@ public static void setupBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // Enable the favored nodes based load balancer conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - LoadOnlyFavoredStochasticBalancer.class, LoadBalancer.class); + LoadOnlyFavoredStochasticBalancer.class, LoadBalancer.class); } @Before @@ -119,7 +119,7 @@ public void testBasicBalance() throws Exception { TableName tableName = TableName.valueOf("testBasicBalance"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); admin.createTable(tableDescriptor, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM); TEST_UTIL.waitTableAvailable(tableName); TEST_UTIL.loadTable(admin.getConnection().getTable(tableName), HConstants.CATALOG_FAMILY); @@ -152,7 +152,7 @@ public void testRoundRobinAssignment() throws Exception { TableName tableName = TableName.valueOf("testRoundRobinAssignment"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); admin.createTable(tableDescriptor, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM); TEST_UTIL.waitTableAvailable(tableName); TEST_UTIL.loadTable(admin.getConnection().getTable(tableName), HConstants.CATALOG_FAMILY); @@ -170,13 +170,12 @@ public void testRoundRobinAssignment() throws Exception { assertEquals("No region should be missed by balancer", 0, regions.size()); } - @Test public void testBasicRegionPlacementAndReplicaLoad() throws Exception { String tableName = "testBasicRegionPlacement"; TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); admin.createTable(tableDescriptor, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM); TEST_UTIL.waitTableAvailable(tableDescriptor.getTableName()); @@ -188,12 +187,11 @@ public void testBasicRegionPlacementAndReplicaLoad() throws Exception { assertEquals(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, favNodes.size()); } - Map> replicaLoadMap = fnm.getReplicaLoad( - Lists.newArrayList(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet())); + Map> replicaLoadMap = fnm.getReplicaLoad(Lists.newArrayList( + admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet())); assertTrue("Not all replica load collected.", - admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().size() == replicaLoadMap.size()); + admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics() + .size() == replicaLoadMap.size()); for (Entry> entry : replicaLoadMap.entrySet()) { assertTrue(entry.getValue().size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); assertTrue(entry.getValue().get(0) >= 0); @@ -206,9 +204,8 @@ public void testBasicRegionPlacementAndReplicaLoad() throws Exception { replicaLoadMap = fnm.getReplicaLoad(Lists.newArrayList( admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet())); assertTrue("replica load found " + replicaLoadMap.size() + " instead of 0.", - replicaLoadMap.size() == admin - .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics() - .size()); + replicaLoadMap.size() == admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) + .getLiveServerMetrics().size()); } @Test @@ -216,8 +213,8 @@ public void testRandomAssignmentWithNoFavNodes() throws Exception { final String tableName = "testRandomAssignmentWithNoFavNodes"; TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); admin.createTable(tableDescriptor); TEST_UTIL.waitTableAvailable(tableDescriptor.getTableName()); @@ -228,9 +225,9 @@ public void testRandomAssignmentWithNoFavNodes() throws Exception { assertNull("Favored nodes not found null after delete", fnm.getFavoredNodes(hri)); LoadBalancer balancer = master.getLoadBalancer(); - ServerName destination = balancer.randomAssignment(hri, Lists.newArrayList(admin - .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics() - .keySet().stream().collect(Collectors.toList()))); + ServerName destination = balancer.randomAssignment(hri, + Lists.newArrayList(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) + .getLiveServerMetrics().keySet().stream().collect(Collectors.toList()))); assertNotNull(destination); List favoredNodes = fnm.getFavoredNodes(hri); assertNotNull(favoredNodes); @@ -248,7 +245,7 @@ public void testBalancerWithoutFavoredNodes() throws Exception { TableName tableName = TableName.valueOf("testBalancerWithoutFavoredNodes"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); admin.createTable(tableDescriptor, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM); TEST_UTIL.waitTableAvailable(tableName); @@ -272,8 +269,8 @@ public void testBalancerWithoutFavoredNodes() throws Exception { currentFN = fnm.getFavoredNodes(region); assertNotNull(currentFN); - assertEquals("Expected number of FN not present", - FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, currentFN.size()); + assertEquals("Expected number of FN not present", FavoredNodeAssignmentHelper.FAVORED_NODES_NUM, + currentFN.size()); assertTrue("Balancer did not run", admin.balance()); TEST_UTIL.waitUntilNoRegionsInTransition(60000); @@ -281,11 +278,12 @@ public void testBalancerWithoutFavoredNodes() throws Exception { checkFavoredNodeAssignments(tableName, fnm, regionStates); } - @Ignore @Test + @Ignore + @Test public void testMisplacedRegions() throws Exception { TableName tableName = TableName.valueOf("testMisplacedRegions"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); admin.createTable(tableDescriptor, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM); TEST_UTIL.waitTableAvailable(tableName); @@ -296,7 +294,7 @@ public void testMisplacedRegions() throws Exception { List serversForNewFN = Lists.newArrayList(); for (ServerName sn : admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet()) { + .getLiveServerMetrics().keySet()) { serversForNewFN.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE)); } for (ServerName sn : currentFN) { @@ -314,7 +312,7 @@ public void testMisplacedRegions() throws Exception { final RegionStates regionStates = master.getAssignmentManager().getRegionStates(); final ServerName current = regionStates.getRegionServerOfRegion(misplacedRegion); assertNull("Misplaced region is still hosted on favored node, not expected.", - FavoredNodesPlan.getFavoredServerPosition(fnm.getFavoredNodes(misplacedRegion), current)); + FavoredNodesPlan.getFavoredServerPosition(fnm.getFavoredNodes(misplacedRegion), current)); admin.balancerSwitch(true, true); assertTrue("Balancer did not run", admin.balance()); TEST_UTIL.waitFor(120000, 30000, new Waiter.Predicate() { @@ -331,7 +329,7 @@ public boolean evaluate() throws Exception { public void test2FavoredNodesDead() throws Exception { TableName tableName = TableName.valueOf("testAllFavoredNodesDead"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); admin.createTable(tableDescriptor, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM); TEST_UTIL.waitTableAvailable(tableName); @@ -364,11 +362,12 @@ public boolean evaluate() throws Exception { checkFavoredNodeAssignments(tableName, fnm, regionStates); } - @Ignore @Test + @Ignore + @Test public void testAllFavoredNodesDead() throws Exception { TableName tableName = TableName.valueOf("testAllFavoredNodesDead"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); admin.createTable(tableDescriptor, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM); TEST_UTIL.waitTableAvailable(tableName); @@ -390,19 +389,19 @@ public boolean evaluate() throws Exception { }); assertTrue("Region: " + region + " should be RIT", - regionStates.getRegionState(region).isFailedOpen()); + regionStates.getRegionState(region).isFailedOpen()); // Regenerate FN and assign, everything else should be fine List serversForNewFN = Lists.newArrayList(); for (ServerName sn : admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet()) { + .getLiveServerMetrics().keySet()) { serversForNewFN.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE)); } FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(serversForNewFN, conf); helper.initialize(); - for (RegionStateNode regionState: regionStates.getRegionsInTransition()) { + for (RegionStateNode regionState : regionStates.getRegionsInTransition()) { RegionInfo regionInfo = regionState.getRegionInfo(); List newFavoredNodes = helper.generateFavoredNodes(regionInfo); assertNotNull(newFavoredNodes); @@ -425,11 +424,12 @@ public boolean evaluate() throws Exception { checkFavoredNodeAssignments(tableName, fnm, regionStates); } - @Ignore @Test + @Ignore + @Test public void testAllFavoredNodesDeadMasterRestarted() throws Exception { TableName tableName = TableName.valueOf("testAllFavoredNodesDeadMasterRestarted"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); admin.createTable(tableDescriptor, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), REGION_NUM); TEST_UTIL.waitTableAvailable(tableName); @@ -442,8 +442,7 @@ public void testAllFavoredNodesDeadMasterRestarted() throws Exception { // Lets kill all the RS that are favored nodes for this region. stopServersAndWaitUntilProcessed(currentFN); - final RegionStates regionStatesBeforeMaster = - master.getAssignmentManager().getRegionStates(); + final RegionStates regionStatesBeforeMaster = master.getAssignmentManager().getRegionStates(); TEST_UTIL.waitFor(10000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { @@ -452,22 +451,22 @@ public boolean evaluate() throws Exception { }); assertTrue("Region: " + region + " should be RIT", - regionStatesBeforeMaster.getRegionState(region).isFailedOpen()); + regionStatesBeforeMaster.getRegionState(region).isFailedOpen()); List rit = Lists.newArrayList(); - for (RegionStateNode regionState: regionStatesBeforeMaster.getRegionsInTransition()) { + for (RegionStateNode regionState : regionStatesBeforeMaster.getRegionsInTransition()) { RegionInfo regionInfo = regionState.getRegionInfo(); LOG.debug("Region in transition after stopping FN's: " + regionInfo); rit.add(regionInfo); assertTrue("Region: " + regionInfo + " should be RIT", - regionStatesBeforeMaster.getRegionState(regionInfo).isFailedOpen()); - assertEquals("Region: " + regionInfo + " does not belong to table: " + tableName, - tableName, regionInfo.getTable()); + regionStatesBeforeMaster.getRegionState(regionInfo).isFailedOpen()); + assertEquals("Region: " + regionInfo + " does not belong to table: " + tableName, tableName, + regionInfo.getTable()); } Configuration conf = cluster.getConf(); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, - SLAVES - FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); + SLAVES - FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); cluster.stopMaster(master.getServerName()); cluster.waitForMasterToStop(master.getServerName(), 60000); @@ -479,17 +478,17 @@ public boolean evaluate() throws Exception { RegionStates regionStates = master.getAssignmentManager().getRegionStates(); assertTrue("Region: " + region + " should be RIT", - regionStates.getRegionState(region).isFailedOpen()); + regionStates.getRegionState(region).isFailedOpen()); for (RegionInfo regionInfo : rit) { assertTrue("Region: " + regionInfo + " should be RIT", - regionStates.getRegionState(regionInfo).isFailedOpen()); + regionStates.getRegionState(regionInfo).isFailedOpen()); } // Regenerate FN and assign, everything else should be fine List serversForNewFN = Lists.newArrayList(); for (ServerName sn : admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet()) { + .getLiveServerMetrics().keySet()) { serversForNewFN.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), NON_STARTCODE)); } @@ -522,9 +521,10 @@ private void checkFavoredNodeAssignments(TableName tableName, FavoredNodesManage RegionStates regionStates) throws IOException { for (RegionInfo hri : admin.getRegions(tableName)) { ServerName host = regionStates.getRegionServerOfRegion(hri); - assertNotNull("Region: " + hri.getEncodedName() + " not on FN, current: " + host - + " FN list: " + fnm.getFavoredNodes(hri), - FavoredNodesPlan.getFavoredServerPosition(fnm.getFavoredNodes(hri), host)); + assertNotNull( + "Region: " + hri.getEncodedName() + " not on FN, current: " + host + " FN list: " + + fnm.getFavoredNodes(hri), + FavoredNodesPlan.getFavoredServerPosition(fnm.getFavoredNodes(hri), host)); } } @@ -547,13 +547,13 @@ public boolean evaluate() throws Exception { } }); - assertEquals("Not all servers killed", - SLAVES - currentFN.size(), cluster.getLiveRegionServerThreads().size()); + assertEquals("Not all servers killed", SLAVES - currentFN.size(), + cluster.getLiveRegionServerThreads().size()); } private void compactTable(TableName tableName) throws IOException { - for(JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) { - for(HRegion region : t.getRegionServer().getRegions(tableName)) { + for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) { + for (HRegion region : t.getRegionServer().getRegions(tableName)) { region.compact(true); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index be19f2fca4fa..6e0d2b32703b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -76,10 +76,8 @@ public static void beforeAllTests() throws Exception { } /** - * Test the load balancing algorithm. - * - * Invariant is that all servers of the group should be hosting either floor(average) or - * ceiling(average) + * Test the load balancing algorithm. Invariant is that all servers of the group should be hosting + * either floor(average) or ceiling(average) */ @Test public void testBalanceCluster() throws Exception { @@ -145,10 +143,10 @@ public void testRetainAssignment() throws Exception { inputForTest.put(region, sn); } } - //verify region->null server assignment is handled + // verify region->null server assignment is handled inputForTest.put(randomRegions(1).get(0), null); - Map> newAssignment = loadBalancer - .retainAssignment(inputForTest, servers); + Map> newAssignment = + loadBalancer.retainAssignment(inputForTest, servers); assertRetainedAssignment(inputForTest, servers, newAssignment); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java index 7cde724f362a..9fee59c6a93c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,8 +55,8 @@ public class TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal extends RSGroupableBalancerTestBase { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass( - TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule + .forClass(TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.class); private static RSGroupBasedLoadBalancer loadBalancer; @BeforeClass @@ -79,7 +79,7 @@ private ServerMetrics mockServerMetricsWithReadRequests(ServerName server, List regionsOnServer, long readRequestCount) { ServerMetrics serverMetrics = mock(ServerMetrics.class); Map regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for(RegionInfo info : regionsOnServer){ + for (RegionInfo info : regionsOnServer) { RegionMetrics rl = mock(RegionMetrics.class); when(rl.getReadRequestCount()).thenReturn(readRequestCount); when(rl.getCpRequestCount()).thenReturn(0L); @@ -124,8 +124,8 @@ public void testBalanceCluster() throws IOException { // serverC : 0,0,0 // so should move two regions from serverA to serverB & serverC serverMetricsMap = new TreeMap<>(); - serverMetricsMap.put(serverA, mockServerMetricsWithReadRequests(serverA, - regionsOnServerA, 1000)); + serverMetricsMap.put(serverA, + mockServerMetricsWithReadRequests(serverA, regionsOnServerA, 1000)); serverMetricsMap.put(serverB, mockServerMetricsWithReadRequests(serverB, regionsOnServerB, 0)); serverMetricsMap.put(serverC, mockServerMetricsWithReadRequests(serverC, regionsOnServerC, 0)); clusterStatus = mock(ClusterMetrics.class); @@ -137,8 +137,8 @@ public void testBalanceCluster() throws IOException { List plans = loadBalancer.balanceCluster(LoadOfAllTable); Set regionsMoveFromServerA = new HashSet<>(); Set targetServers = new HashSet<>(); - for(RegionPlan plan : plans) { - if(plan.getSource().equals(serverA)) { + for (RegionPlan plan : plans) { + if (plan.getSource().equals(serverA)) { regionsMoveFromServerA.add(plan.getRegionInfo()); targetServers.add(plan.getDestination()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java index ca9133961299..36899e22fa7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticBalancerJmxMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import java.util.Random; import java.util.Set; import java.util.concurrent.ThreadLocalRandom; - import javax.management.MBeanAttributeInfo; import javax.management.MBeanInfo; import javax.management.MBeanServerConnection; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRulesLoadFromHDFS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRulesLoadFromHDFS.java index b0e8cf3ab79d..72c7423fcea7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRulesLoadFromHDFS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancerHeterogeneousCostRulesLoadFromHDFS.java @@ -38,11 +38,11 @@ @Category({ MasterTests.class, MediumTests.class }) public class TestStochasticLoadBalancerHeterogeneousCostRulesLoadFromHDFS - extends StochasticBalancerTestBase { + extends StochasticBalancerTestBase { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStochasticLoadBalancerHeterogeneousCostRulesLoadFromHDFS.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule + .forClass(TestStochasticLoadBalancerHeterogeneousCostRulesLoadFromHDFS.class); private HeterogeneousRegionCountCostFunction costFunction; private static final HBaseTestingUtil HTU = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java index 06ad2f043e21..86bc3de49331 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,12 +49,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestCleanerChore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCleanerChore.class); + HBaseClassTestRule.forClass(TestCleanerChore.class); private static final Logger LOG = LoggerFactory.getLogger(TestCleanerChore.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -82,7 +82,7 @@ public void testSavesFilesOnRequest() throws Exception { conf.set(confKey, NeverDelete.class.getName()); AllValidPaths chore = - new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); + new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); // create the directory layout in the directory to clean Path parent = new Path(testDir, "parent"); @@ -125,7 +125,7 @@ public FileStatus[] listStatus(Path f) throws IOException { }; AllValidPaths chore = - new AllValidPaths("test-retry-ioe", stop, conf, filtered, testDir, confKey, POOL); + new AllValidPaths("test-retry-ioe", stop, conf, filtered, testDir, confKey, POOL); // trouble talking to the filesystem Boolean result = chore.runCleaner(); @@ -157,7 +157,7 @@ public void testDeletesEmptyDirectories() throws Exception { conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = - new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); + new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); // create the directory layout in the directory to clean Path parent = new Path(testDir, "parent"); @@ -199,7 +199,7 @@ public void testDoesNotCheckDirectories() throws Exception { conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = - new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); + new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); // spy on the delegate to ensure that we don't check for directories AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0); AlwaysDelete spy = Mockito.spy(delegate); @@ -231,7 +231,7 @@ public void testStoppedCleanerDoesNotDeleteFiles() throws Exception { conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = - new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); + new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); // also create a file in the top level directory Path topFile = new Path(testDir, "topFile"); @@ -263,7 +263,7 @@ public void testCleanerDoesNotDeleteDirectoryWithLateAddedFiles() throws IOExcep conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = - new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); + new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); // spy on the delegate to ensure that we don't check for directories AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0); AlwaysDelete spy = Mockito.spy(delegate); @@ -323,7 +323,7 @@ public void testNoExceptionFromDirectoryWithRacyChildren() throws Exception { conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = - new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); + new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); // spy on the delegate to ensure that we don't check for directories AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0); AlwaysDelete spy = Mockito.spy(delegate); @@ -368,7 +368,7 @@ public void testDeleteFileWithCleanerEnabled() throws Exception { conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = - new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); + new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); // Enable cleaner chore.setEnabled(true); @@ -402,7 +402,7 @@ public void testDeleteFileWithCleanerDisabled() throws Exception { conf.set(confKey, AlwaysDelete.class.getName()); AllValidPaths chore = - new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); + new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); // Disable cleaner chore.setEnabled(false); @@ -445,7 +445,7 @@ public void testOnConfigurationChange() throws Exception { conf.set(confKey, AlwaysDelete.class.getName()); conf.set(CleanerChore.CHORE_POOL_SIZE, String.valueOf(initPoolSize)); AllValidPaths chore = - new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); + new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey, POOL); chore.setEnabled(true); // Create subdirs under testDir int dirNums = 6; @@ -552,7 +552,7 @@ private void createFiles(FileSystem fs, Path parentDir, int numOfFiles) throws I private static class AllValidPaths extends CleanerChore { public AllValidPaths(String name, Stoppable s, Configuration conf, FileSystem fs, - Path oldFileDir, String confkey, DirScanPool pool) { + Path oldFileDir, String confkey, DirScanPool pool) { super(name, Integer.MAX_VALUE, s, conf, fs, oldFileDir, confkey, pool); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java index ca41c559f5a6..5a9f3aaf4c56 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -55,7 +54,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestHFileCleaner { @ClassRule @@ -96,7 +95,8 @@ public void testTTLCleaner() throws IOException { conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100); cleaner.setConf(conf); assertTrue("File not set deletable - check mod time:" + getFileStats(file, fs) - + " with create time:" + createTime, cleaner.isFileDeletable(fs.getFileStatus(file))); + + " with create time:" + createTime, + cleaner.isFileDeletable(fs.getFileStatus(file))); } @Test @@ -105,8 +105,8 @@ public void testManualMobCleanerStopsMobRemoval() throws IOException { Path root = UTIL.getDataTestDirOnTestFS(); TableName table = TableName.valueOf("testManualMobCleanerStopsMobRemoval"); Path mob = HFileArchiveUtil.getRegionArchiveDir(root, table, - MobUtils.getMobRegionInfo(table).getEncodedName()); - Path family= new Path(mob, "family"); + MobUtils.getMobRegionInfo(table).getEncodedName()); + Path family = new Path(mob, "family"); Path file = new Path(family, "someHFileThatWouldBeAUUID"); fs.createNewFile(file); @@ -114,8 +114,8 @@ public void testManualMobCleanerStopsMobRemoval() throws IOException { ManualMobMaintHFileCleaner cleaner = new ManualMobMaintHFileCleaner(); - assertFalse("Mob File shouldn't have been deletable. check path. '"+file+"'", - cleaner.isFileDeletable(fs.getFileStatus(file))); + assertFalse("Mob File shouldn't have been deletable. check path. '" + file + "'", + cleaner.isFileDeletable(fs.getFileStatus(file))); } @Test @@ -125,7 +125,7 @@ public void testManualMobCleanerLetsNonMobGo() throws IOException { TableName table = TableName.valueOf("testManualMobCleanerLetsNonMobGo"); Path nonmob = HFileArchiveUtil.getRegionArchiveDir(root, table, RegionInfoBuilder.newBuilder(table).build().getEncodedName()); - Path family= new Path(nonmob, "family"); + Path family = new Path(nonmob, "family"); Path file = new Path(family, "someHFileThatWouldBeAUUID"); fs.createNewFile(file); @@ -133,8 +133,8 @@ public void testManualMobCleanerLetsNonMobGo() throws IOException { ManualMobMaintHFileCleaner cleaner = new ManualMobMaintHFileCleaner(); - assertTrue("Non-Mob File should have been deletable. check path. '"+file+"'", - cleaner.isFileDeletable(fs.getFileStatus(file))); + assertTrue("Non-Mob File should have been deletable. check path. '" + file + "'", + cleaner.isFileDeletable(fs.getFileStatus(file))); } /** @@ -155,12 +155,12 @@ public void testHFileCleaning() throws Exception { // set TTL long ttl = 2000; conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, - "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner," + - "org.apache.hadoop.hbase.mob.ManualMobMaintHFileCleaner"); + "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner," + + "org.apache.hadoop.hbase.mob.ManualMobMaintHFileCleaner"); conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl); Server server = new DummyServer(); Path archivedHfileDir = - new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY); + new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY); FileSystem fs = FileSystem.get(conf); HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL); @@ -226,7 +226,7 @@ public void testRemovesEmptyDirectories() throws Exception { conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, ""); Server server = new DummyServer(); Path archivedHfileDir = - new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY); + new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY); // setup the cleaner FileSystem fs = UTIL.getDFSCluster().getFileSystem(); @@ -354,9 +354,9 @@ public void testOnConfigurationChange() throws Exception { Assert.assertEquals(ORIGINAL_QUEUE_INIT_SIZE, cleaner.getLargeQueueInitSize()); Assert.assertEquals(ORIGINAL_QUEUE_INIT_SIZE, cleaner.getSmallQueueInitSize()); Assert.assertEquals(HFileCleaner.DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC, - cleaner.getCleanerThreadTimeoutMsec()); + cleaner.getCleanerThreadTimeoutMsec()); Assert.assertEquals(HFileCleaner.DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC, - cleaner.getCleanerThreadCheckIntervalMsec()); + cleaner.getCleanerThreadCheckIntervalMsec()); // clean up archive directory and create files for testing fs.delete(archivedHfileDir, true); @@ -386,7 +386,7 @@ public void run() { newConf.setInt(HFileCleaner.SMALL_HFILE_DELETE_THREAD_NUMBER, SMALL_THREAD_NUM); newConf.setLong(HFileCleaner.HFILE_DELETE_THREAD_TIMEOUT_MSEC, THREAD_TIMEOUT_MSEC); newConf.setLong(HFileCleaner.HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC, - THREAD_CHECK_INTERVAL_MSEC); + THREAD_CHECK_INTERVAL_MSEC); LOG.debug("File deleted from large queue: " + cleaner.getNumOfDeletedLargeFiles() + "; from small queue: " + cleaner.getNumOfDeletedSmallFiles()); @@ -410,11 +410,13 @@ public void run() { t.join(); LOG.debug("File deleted from large queue: " + cleaner.getNumOfDeletedLargeFiles() + "; from small queue: " + cleaner.getNumOfDeletedSmallFiles()); - Assert.assertTrue("Should delete more than " + LARGE_FILE_NUM - + " files from large queue but actually " + cleaner.getNumOfDeletedLargeFiles(), + Assert.assertTrue( + "Should delete more than " + LARGE_FILE_NUM + " files from large queue but actually " + + cleaner.getNumOfDeletedLargeFiles(), cleaner.getNumOfDeletedLargeFiles() > LARGE_FILE_NUM); - Assert.assertTrue("Should delete less than " + SMALL_FILE_NUM - + " files from small queue but actually " + cleaner.getNumOfDeletedSmallFiles(), + Assert.assertTrue( + "Should delete less than " + SMALL_FILE_NUM + " files from small queue but actually " + + cleaner.getNumOfDeletedSmallFiles(), cleaner.getNumOfDeletedSmallFiles() < SMALL_FILE_NUM); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java index 32ffaeca2372..63ed4c8830bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -56,7 +55,7 @@ public class TestHFileLinkCleaner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileLinkCleaner.class); + HBaseClassTestRule.forClass(TestHFileLinkCleaner.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -92,8 +91,8 @@ public void testHFileLinkCleaning() throws Exception { RegionInfo hriLink = RegionInfoBuilder.newBuilder(tableLinkName).build(); Path archiveDir = HFileArchiveUtil.getArchivePath(conf); - Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, - tableName, hri.getEncodedName(), familyName); + Path archiveStoreDir = + HFileArchiveUtil.getStoreArchivePath(conf, tableName, hri.getEncodedName(), familyName); // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf); Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName); @@ -103,7 +102,7 @@ public void testHFileLinkCleaning() throws Exception { // Create link to hfile Path familyLinkPath = - getFamilyDirPath(rootDir, tableLinkName, hriLink.getEncodedName(), familyName); + getFamilyDirPath(rootDir, tableLinkName, hriLink.getEncodedName(), familyName); fs.mkdirs(familyLinkPath); HFileLink.create(conf, fs, familyLinkPath, hri, hfileName); Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName); @@ -146,7 +145,7 @@ public void testHFileLinkCleaning() throws Exception { } private static Path getFamilyDirPath(final Path rootDir, final TableName table, - final String region, final String family) { + final String region, final String family) { return new Path(new Path(CommonFSUtils.getTableDir(rootDir, table), region), family); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index ff76e2379240..500adbc7856f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,7 +69,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestLogsCleaner { @ClassRule @@ -82,8 +82,7 @@ public class TestLogsCleaner { private static final Path OLD_WALS_DIR = new Path(TEST_UTIL.getDataTestDir(), HConstants.HREGION_OLDLOGDIR_NAME); - private static final Path OLD_PROCEDURE_WALS_DIR = - new Path(OLD_WALS_DIR, "masterProcedureWALs"); + private static final Path OLD_PROCEDURE_WALS_DIR = new Path(OLD_WALS_DIR, "masterProcedureWALs"); private static Configuration conf; @@ -116,21 +115,11 @@ public void beforeTest() throws IOException { } /** - * This tests verifies LogCleaner works correctly with WALs and Procedure WALs located - * in the same oldWALs directory. - * Created files: - * - 2 invalid files - * - 5 old Procedure WALs - * - 30 old WALs from which 3 are in replication - * - 5 recent Procedure WALs - * - 1 recent WAL - * - 1 very new WAL (timestamp in future) - * - masterProcedureWALs subdirectory - * Files which should stay: - * - 3 replication WALs - * - 2 new WALs - * - 5 latest Procedure WALs - * - masterProcedureWALs subdirectory + * This tests verifies LogCleaner works correctly with WALs and Procedure WALs located in the same + * oldWALs directory. Created files: - 2 invalid files - 5 old Procedure WALs - 30 old WALs from + * which 3 are in replication - 5 recent Procedure WALs - 1 recent WAL - 1 very new WAL (timestamp + * in future) - masterProcedureWALs subdirectory Files which should stay: - 3 replication WALs - 2 + * new WALs - 5 latest Procedure WALs - masterProcedureWALs subdirectory */ @Test public void testLogCleaning() throws Exception { @@ -145,8 +134,8 @@ public void testLogCleaning() throws Exception { ReplicationQueueStorage queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), conf); - String fakeMachineName = URLEncoder.encode( - server.getServerName().toString(), StandardCharsets.UTF_8.name()); + String fakeMachineName = + URLEncoder.encode(server.getServerName().toString(), StandardCharsets.UTF_8.name()); final FileSystem fs = FileSystem.get(conf); fs.mkdirs(OLD_PROCEDURE_WALS_DIR); @@ -159,8 +148,7 @@ public void testLogCleaning() throws Exception { // Case 2: 5 Procedure WALs that are old which would be deleted for (int i = 1; i <= 5; i++) { - final Path fileName = - new Path(OLD_PROCEDURE_WALS_DIR, String.format("pv2-%020d.log", i)); + final Path fileName = new Path(OLD_PROCEDURE_WALS_DIR, String.format("pv2-%020d.log", i)); fs.createNewFile(fileName); } @@ -181,8 +169,7 @@ public void testLogCleaning() throws Exception { // Case 5: 5 Procedure WALs that are new, will stay for (int i = 6; i <= 10; i++) { - Path fileName = - new Path(OLD_PROCEDURE_WALS_DIR, String.format("pv2-%020d.log", i)); + Path fileName = new Path(OLD_PROCEDURE_WALS_DIR, String.format("pv2-%020d.log", i)); fs.createNewFile(fileName); } @@ -207,18 +194,17 @@ public void testLogCleaning() throws Exception { // In oldWALs we end up with the current WAL, a newer WAL, the 3 old WALs which // are scheduled for replication and masterProcedureWALs directory - TEST_UTIL.waitFor(1000, (Waiter.Predicate) () -> 6 == fs - .listStatus(OLD_WALS_DIR).length); + TEST_UTIL.waitFor(1000, + (Waiter.Predicate) () -> 6 == fs.listStatus(OLD_WALS_DIR).length); // In masterProcedureWALs we end up with 5 newer Procedure WALs - TEST_UTIL.waitFor(1000, (Waiter.Predicate) () -> 5 == fs - .listStatus(OLD_PROCEDURE_WALS_DIR).length); + TEST_UTIL.waitFor(1000, + (Waiter.Predicate) () -> 5 == fs.listStatus(OLD_PROCEDURE_WALS_DIR).length); if (LOG.isDebugEnabled()) { FileStatus[] statusOldWALs = fs.listStatus(OLD_WALS_DIR); FileStatus[] statusProcedureWALs = fs.listStatus(OLD_PROCEDURE_WALS_DIR); LOG.debug("Kept log file for oldWALs: {}", Arrays.toString(statusOldWALs)); - LOG.debug("Kept log file for masterProcedureWALs: {}", - Arrays.toString(statusProcedureWALs)); + LOG.debug("Kept log file for masterProcedureWALs: {}", Arrays.toString(statusProcedureWALs)); } } @@ -228,8 +214,7 @@ public void testZooKeeperRecoveryDuringGetListOfReplicators() throws Exception { List dummyFiles = Arrays.asList( new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("log1")), - new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("log2")) - ); + new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("log2"))); FaultyZooKeeperWatcher faultyZK = new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null); @@ -237,8 +222,8 @@ public void testZooKeeperRecoveryDuringGetListOfReplicators() throws Exception { try { faultyZK.init(false); - ReplicationQueueStorage queueStorage = spy(ReplicationStorageFactory - .getReplicationQueueStorage(faultyZK, conf)); + ReplicationQueueStorage queueStorage = + spy(ReplicationStorageFactory.getReplicationQueueStorage(faultyZK, conf)); doAnswer(new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { @@ -261,7 +246,7 @@ public Object answer(InvocationOnMock invocation) throws Throwable { assertFalse(toDelete.iterator().hasNext()); assertFalse(cleaner.isStopped()); - //zk recovery. + // zk recovery. faultyZK.init(true); cleaner.preClean(); Iterable filesToDelete = cleaner.getDeletableFiles(dummyFiles); @@ -288,10 +273,9 @@ public void testZooKeeperNormal() throws Exception { // Subtract 1000 from current time so modtime is for sure older // than 'now'. long modTime = EnvironmentEdgeManager.currentTime() - 1000; - List dummyFiles = Arrays.asList( - new FileStatus(100, false, 3, 100, modTime, new Path("log1")), - new FileStatus(100, false, 3, 100, modTime, new Path("log2")) - ); + List dummyFiles = + Arrays.asList(new FileStatus(100, false, 3, 100, modTime, new Path("log1")), + new FileStatus(100, false, 3, 100, modTime, new Path("log2"))); ZKWatcher zkw = new ZKWatcher(conf, "testZooKeeperAbort-normal", null); try { @@ -318,7 +302,7 @@ public void testOnConfigurationChange() throws Exception { LogCleaner cleaner = new LogCleaner(3000, server, conf, fs, OLD_WALS_DIR, POOL, null); int size = cleaner.getSizeOfCleaners(); assertEquals(LogCleaner.DEFAULT_OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC, - cleaner.getCleanerThreadTimeoutMsec()); + cleaner.getCleanerThreadTimeoutMsec()); // Create dir and files for test int numOfFiles = 10; createFiles(fs, OLD_WALS_DIR, numOfFiles); @@ -383,8 +367,8 @@ public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable a public void init(boolean autoRecovery) throws Exception { this.zk = spy(super.getRecoverableZooKeeper()); if (!autoRecovery) { - doThrow(new KeeperException.ConnectionLossException()) - .when(zk).getChildren("/hbase/replication/rs", null); + doThrow(new KeeperException.ConnectionLossException()).when(zk) + .getChildren("/hbase/replication/rs", null); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java index 9acba4c4a984..1ca0b586d899 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,7 +73,7 @@ public class TestReplicationBarrierCleaner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationBarrierCleaner.class); + HBaseClassTestRule.forClass(TestReplicationBarrierCleaner.class); private static final Logger LOG = LoggerFactory.getLogger(TestHFileCleaner.class); @@ -95,8 +95,9 @@ public static void tearDownAfterClass() throws Exception { @After public void tearDown() throws IOException { try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - ResultScanner scanner = table.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY) - .addFamily(HConstants.REPLICATION_BARRIER_FAMILY).setFilter(new FirstKeyOnlyFilter()))) { + ResultScanner scanner = table.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY) + .addFamily(HConstants.REPLICATION_BARRIER_FAMILY) + .setFilter(new FirstKeyOnlyFilter()))) { for (;;) { Result result = scanner.next(); if (result == null) { @@ -139,7 +140,7 @@ private ReplicationQueueStorage create(Long lastPushedSeqId, Long... lastPushedS private ReplicationBarrierCleaner create(ReplicationPeerManager peerManager) throws IOException { return new ReplicationBarrierCleaner(UTIL.getConfiguration(), new WarnOnlyStoppable(), - UTIL.getConnection(), peerManager); + UTIL.getConnection(), peerManager); } private void addBarrier(RegionInfo region, long... barriers) throws IOException { @@ -179,27 +180,27 @@ public void testNothing() throws IOException { public void testCleanNoPeers() throws IOException { TableName tableName1 = TableName.valueOf(name.getMethodName() + "_1"); RegionInfo region11 = - RegionInfoBuilder.newBuilder(tableName1).setEndKey(Bytes.toBytes(1)).build(); + RegionInfoBuilder.newBuilder(tableName1).setEndKey(Bytes.toBytes(1)).build(); addBarrier(region11, 10, 20, 30, 40, 50, 60); fillCatalogFamily(region11); RegionInfo region12 = - RegionInfoBuilder.newBuilder(tableName1).setStartKey(Bytes.toBytes(1)).build(); + RegionInfoBuilder.newBuilder(tableName1).setStartKey(Bytes.toBytes(1)).build(); addBarrier(region12, 20, 30, 40, 50, 60, 70); fillCatalogFamily(region12); TableName tableName2 = TableName.valueOf(name.getMethodName() + "_2"); RegionInfo region21 = - RegionInfoBuilder.newBuilder(tableName2).setEndKey(Bytes.toBytes(1)).build(); + RegionInfoBuilder.newBuilder(tableName2).setEndKey(Bytes.toBytes(1)).build(); addBarrier(region21, 100, 200, 300, 400); fillCatalogFamily(region21); RegionInfo region22 = - RegionInfoBuilder.newBuilder(tableName2).setStartKey(Bytes.toBytes(1)).build(); + RegionInfoBuilder.newBuilder(tableName2).setStartKey(Bytes.toBytes(1)).build(); addBarrier(region22, 200, 300, 400, 500, 600); fillCatalogFamily(region22); @SuppressWarnings("unchecked") ReplicationPeerManager peerManager = - create(null, Collections.emptyList(), Collections.emptyList()); + create(null, Collections.emptyList(), Collections.emptyList()); ReplicationBarrierCleaner cleaner = create(peerManager); cleaner.chore(); @@ -209,14 +210,14 @@ public void testCleanNoPeers() throws IOException { verify(peerManager, times(2)).getSerialPeerIdsBelongsTo(any(TableName.class)); assertArrayEquals(new long[] { 60 }, ReplicationBarrierFamilyFormat - .getReplicationBarriers(UTIL.getConnection(), region11.getRegionName())); + .getReplicationBarriers(UTIL.getConnection(), region11.getRegionName())); assertArrayEquals(new long[] { 70 }, ReplicationBarrierFamilyFormat - .getReplicationBarriers(UTIL.getConnection(), region12.getRegionName())); + .getReplicationBarriers(UTIL.getConnection(), region12.getRegionName())); assertArrayEquals(new long[] { 400 }, ReplicationBarrierFamilyFormat - .getReplicationBarriers(UTIL.getConnection(), region21.getRegionName())); + .getReplicationBarriers(UTIL.getConnection(), region21.getRegionName())); assertArrayEquals(new long[] { 600 }, ReplicationBarrierFamilyFormat - .getReplicationBarriers(UTIL.getConnection(), region22.getRegionName())); + .getReplicationBarriers(UTIL.getConnection(), region22.getRegionName())); } @Test @@ -230,33 +231,33 @@ public void testDeleteBarriers() throws IOException, ReplicationException { @SuppressWarnings("unchecked") ReplicationPeerManager peerManager = - create(queueStorage, peerIds, peerIds, peerIds, peerIds, peerIds); + create(queueStorage, peerIds, peerIds, peerIds, peerIds, peerIds); ReplicationBarrierCleaner cleaner = create(peerManager); // beyond the first barrier, no deletion cleaner.chore(); assertArrayEquals(new long[] { 10, 20, 30, 40, 50, 60 }, ReplicationBarrierFamilyFormat - .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); + .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); // in the first range, still no deletion cleaner.chore(); assertArrayEquals(new long[] { 10, 20, 30, 40, 50, 60 }, ReplicationBarrierFamilyFormat - .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); + .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); // in the second range, 10 is deleted cleaner.chore(); assertArrayEquals(new long[] { 20, 30, 40, 50, 60 }, ReplicationBarrierFamilyFormat - .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); + .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); // between 50 and 60, so the barriers before 50 will be deleted cleaner.chore(); assertArrayEquals(new long[] { 50, 60 }, ReplicationBarrierFamilyFormat - .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); + .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); // in the last open range, 50 is deleted cleaner.chore(); assertArrayEquals(new long[] { 60 }, ReplicationBarrierFamilyFormat - .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); + .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); } @Test @@ -275,15 +276,15 @@ public void testDeleteRowForDeletedRegion() throws IOException, ReplicationExcep // we have something in catalog family, so only delete 40 cleaner.chore(); assertArrayEquals(new long[] { 50, 60 }, ReplicationBarrierFamilyFormat - .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); + .getReplicationBarriers(UTIL.getConnection(), region.getRegionName())); verify(queueStorage, never()).removeLastSequenceIds(anyString(), anyList()); // No catalog family, then we should remove the whole row clearCatalogFamily(region); cleaner.chore(); try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { - assertFalse(table - .exists(new Get(region.getRegionName()).addFamily(HConstants.REPLICATION_BARRIER_FAMILY))); + assertFalse(table.exists( + new Get(region.getRegionName()).addFamily(HConstants.REPLICATION_BARRIER_FAMILY))); } verify(queueStorage, times(1)).removeLastSequenceIds(peerId, Arrays.asList(region.getEncodedName())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java index cc903e8c240a..4df6f55f82f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -193,11 +192,9 @@ public void testGetDeletableFiles() throws Exception { public void testZooKeeperAbort() throws Exception { ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner(); - List dummyFiles = - Lists.newArrayList(new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), - new Path("hfile1")), - new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), - new Path("hfile2"))); + List dummyFiles = Lists.newArrayList( + new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("hfile1")), + new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("hfile2"))); FaultyZooKeeperWatcher faultyZK = new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null); @@ -249,6 +246,7 @@ public ZKWatcher getZooKeeper() { static class FaultyZooKeeperWatcher extends ZKWatcher { private RecoverableZooKeeper zk; + public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable abortable) throws ZooKeeperConnectionException, IOException { super(conf, identifier, abortable); @@ -256,8 +254,8 @@ public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable a public void init() throws Exception { this.zk = spy(super.getRecoverableZooKeeper()); - doThrow(new KeeperException.ConnectionLossException()) - .when(zk).getData("/hbase/replication/hfile-refs", null, new Stat()); + doThrow(new KeeperException.ConnectionLossException()).when(zk) + .getData("/hbase/replication/hfile-refs", null, new Stat()); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotCleanerChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotCleanerChore.java index 56e62b6d1e54..44590e69eb49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotCleanerChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotCleanerChore.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.cleaner; import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -39,16 +37,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; - /** * Tests for SnapshotsCleanerChore */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestSnapshotCleanerChore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotCleanerChore.class); + HBaseClassTestRule.forClass(TestSnapshotCleanerChore.class); private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotCleanerChore.class); @@ -62,14 +59,13 @@ private Configuration getSnapshotCleanerConf() { return conf; } - @Test public void testSnapshotCleanerWithoutAnyCompletedSnapshot() throws IOException { snapshotManager = Mockito.mock(SnapshotManager.class); Stoppable stopper = new StoppableImplementation(); Configuration conf = getSnapshotCleanerConf(); SnapshotCleanerChore snapshotCleanerChore = - new SnapshotCleanerChore(stopper, conf, snapshotManager); + new SnapshotCleanerChore(stopper, conf, snapshotManager); try { snapshotCleanerChore.chore(); } finally { @@ -84,12 +80,12 @@ public void testSnapshotCleanerWithNoTtlExpired() throws IOException { Stoppable stopper = new StoppableImplementation(); Configuration conf = getSnapshotCleanerConf(); SnapshotCleanerChore snapshotCleanerChore = - new SnapshotCleanerChore(stopper, conf, snapshotManager); + new SnapshotCleanerChore(stopper, conf, snapshotManager); List snapshotDescriptionList = new ArrayList<>(); snapshotDescriptionList.add(getSnapshotDescription(-2, "snapshot01", "table01", - EnvironmentEdgeManager.currentTime() - 100000)); - snapshotDescriptionList.add(getSnapshotDescription(10, "snapshot02", "table02", - EnvironmentEdgeManager.currentTime())); + EnvironmentEdgeManager.currentTime() - 100000)); + snapshotDescriptionList.add( + getSnapshotDescription(10, "snapshot02", "table02", EnvironmentEdgeManager.currentTime())); Mockito.when(snapshotManager.getCompletedSnapshots()).thenReturn(snapshotDescriptionList); try { LOG.info("2 Snapshots are completed but TTL is not expired for any of them"); @@ -106,16 +102,16 @@ public void testSnapshotCleanerWithSomeTtlExpired() throws IOException { Stoppable stopper = new StoppableImplementation(); Configuration conf = getSnapshotCleanerConf(); SnapshotCleanerChore snapshotCleanerChore = - new SnapshotCleanerChore(stopper, conf, snapshotManager); + new SnapshotCleanerChore(stopper, conf, snapshotManager); List snapshotDescriptionList = new ArrayList<>(); snapshotDescriptionList.add(getSnapshotDescription(10, "snapshot01", "table01", 1)); snapshotDescriptionList.add(getSnapshotDescription(5, "snapshot02", "table02", 2)); - snapshotDescriptionList.add(getSnapshotDescription(30, "snapshot01", "table01", - EnvironmentEdgeManager.currentTime())); - snapshotDescriptionList.add(getSnapshotDescription(0, "snapshot02", "table02", - EnvironmentEdgeManager.currentTime())); - snapshotDescriptionList.add(getSnapshotDescription(40, "snapshot03", "table03", - EnvironmentEdgeManager.currentTime())); + snapshotDescriptionList.add( + getSnapshotDescription(30, "snapshot01", "table01", EnvironmentEdgeManager.currentTime())); + snapshotDescriptionList.add( + getSnapshotDescription(0, "snapshot02", "table02", EnvironmentEdgeManager.currentTime())); + snapshotDescriptionList.add( + getSnapshotDescription(40, "snapshot03", "table03", EnvironmentEdgeManager.currentTime())); Mockito.when(snapshotManager.getCompletedSnapshots()).thenReturn(snapshotDescriptionList); try { LOG.info("5 Snapshots are completed. TTL is expired for 2 them. Going to delete them"); @@ -132,11 +128,11 @@ public void testSnapshotCleanerWithReadIOE() throws IOException { Stoppable stopper = new StoppableImplementation(); Configuration conf = new HBaseTestingUtil().getConfiguration(); SnapshotCleanerChore snapshotCleanerChore = - new SnapshotCleanerChore(stopper, conf, snapshotManager); + new SnapshotCleanerChore(stopper, conf, snapshotManager); Mockito.when(snapshotManager.getCompletedSnapshots()).thenThrow(IOException.class); try { LOG.info("While getting completed Snapshots, IOException would occur. Hence, No Snapshot" - + " should be deleted"); + + " should be deleted"); snapshotCleanerChore.chore(); } finally { stopper.stop("Stopping Test Stopper"); @@ -154,7 +150,7 @@ public void testSnapshotChoreWithTtlOutOfRange() throws IOException { snapshotDescriptionList.add(getSnapshotDescription(5, "snapshot02", "table02", 2)); Mockito.when(snapshotManager.getCompletedSnapshots()).thenReturn(snapshotDescriptionList); SnapshotCleanerChore snapshotCleanerChore = - new SnapshotCleanerChore(stopper, conf, snapshotManager); + new SnapshotCleanerChore(stopper, conf, snapshotManager); try { LOG.info("Snapshot Chore is disabled. No cleanup performed for Expired Snapshots"); snapshotCleanerChore.chore(); @@ -165,9 +161,9 @@ public void testSnapshotChoreWithTtlOutOfRange() throws IOException { } private SnapshotProtos.SnapshotDescription getSnapshotDescription(final long ttl, - final String snapshotName, final String tableName, final long snapshotCreationTime) { + final String snapshotName, final String tableName, final long snapshotCreationTime) { SnapshotProtos.SnapshotDescription.Builder snapshotDescriptionBuilder = - SnapshotProtos.SnapshotDescription.newBuilder(); + SnapshotProtos.SnapshotDescription.newBuilder(); snapshotDescriptionBuilder.setTtl(ttl); snapshotDescriptionBuilder.setName(snapshotName); snapshotDescriptionBuilder.setTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java index 5fc60a7c0908..5eb381812b51 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -80,20 +80,17 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .IsSnapshotCleanupEnabledRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .IsSnapshotCleanupEnabledResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .SetSnapshotCleanupRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** * Test the master-related aspects of a snapshot */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestSnapshotFromMaster { @ClassRule @@ -110,8 +107,7 @@ public class TestSnapshotFromMaster { // for hfile archiving test. private static Path archiveDir; private static final byte[] TEST_FAM = Bytes.toBytes("fam"); - private static final TableName TABLE_NAME = - TableName.valueOf("test"); + private static final TableName TABLE_NAME = TableName.valueOf("test"); // refresh the cache every 1/2 second private static final long cacheRefreshPeriod = 500; private static final int blockingStoreFiles = 12; @@ -196,8 +192,8 @@ public void testIsDoneContract() throws Exception { UnknownSnapshotException.class); // and that we get the same issue, even if we specify a name - SnapshotDescription desc = SnapshotDescription.newBuilder() - .setName(snapshotName).setTable(TABLE_NAME.getNameAsString()).build(); + SnapshotDescription desc = SnapshotDescription.newBuilder().setName(snapshotName) + .setTable(TABLE_NAME.getNameAsString()).build(); builder.setSnapshot(desc); SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(), UnknownSnapshotException.class); @@ -208,10 +204,9 @@ public void testIsDoneContract() throws Exception { Mockito.when(mockHandler.getSnapshot()).thenReturn(desc); Mockito.when(mockHandler.isFinished()).thenReturn(Boolean.TRUE); Mockito.when(mockHandler.getCompletionTimestamp()) - .thenReturn(EnvironmentEdgeManager.currentTime()); + .thenReturn(EnvironmentEdgeManager.currentTime()); - master.getSnapshotManager() - .setSnapshotHandlerForTesting(TABLE_NAME, mockHandler); + master.getSnapshotManager().setSnapshotHandlerForTesting(TABLE_NAME, mockHandler); // if we do a lookup without a snapshot name, we should fail - you should always know your name builder = IsSnapshotDoneRequest.newBuilder(); @@ -221,7 +216,7 @@ public void testIsDoneContract() throws Exception { // then do the lookup for the snapshot that it is done builder.setSnapshot(desc); IsSnapshotDoneResponse response = - master.getMasterRpcServices().isSnapshotDone(null, builder.build()); + master.getMasterRpcServices().isSnapshotDone(null, builder.build()); assertTrue("Snapshot didn't complete when it should have.", response.getDone()); // now try the case where we are looking for a snapshot we didn't take @@ -243,7 +238,7 @@ public void testGetCompletedSnapshots() throws Exception { // first check when there are no snapshots GetCompletedSnapshotsRequest request = GetCompletedSnapshotsRequest.newBuilder().build(); GetCompletedSnapshotsResponse response = - master.getMasterRpcServices().getCompletedSnapshots(null, request); + master.getMasterRpcServices().getCompletedSnapshots(null, request); assertEquals("Found unexpected number of snapshots", 0, response.getSnapshotsCount()); // write one snapshot to the fs @@ -275,8 +270,8 @@ public void testDeleteSnapshot() throws Exception { String snapshotName = "completed"; SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName(snapshotName).build(); - DeleteSnapshotRequest request = DeleteSnapshotRequest.newBuilder().setSnapshot(snapshot) - .build(); + DeleteSnapshotRequest request = + DeleteSnapshotRequest.newBuilder().setSnapshot(snapshot).build(); try { master.getMasterRpcServices().deleteSnapshot(null, request); fail("Master didn't throw exception when attempting to delete snapshot that doesn't exist"); @@ -359,22 +354,18 @@ public void testSnapshotCleanupStatus() throws Exception { // Check if auto snapshot cleanup is enabled IsSnapshotCleanupEnabledRequest isSnapshotCleanupEnabledRequest = IsSnapshotCleanupEnabledRequest.newBuilder().build(); - IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabledResponse = - master.getMasterRpcServices().isSnapshotCleanupEnabled(null, - isSnapshotCleanupEnabledRequest); + IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabledResponse = master + .getMasterRpcServices().isSnapshotCleanupEnabled(null, isSnapshotCleanupEnabledRequest); Assert.assertTrue(isSnapshotCleanupEnabledResponse.getEnabled()); // Disable auto snapshot cleanup for the cluster - setSnapshotCleanupRequest = SetSnapshotCleanupRequest.newBuilder() - .setEnabled(false).build(); + setSnapshotCleanupRequest = SetSnapshotCleanupRequest.newBuilder().setEnabled(false).build(); master.getMasterRpcServices().switchSnapshotCleanup(null, setSnapshotCleanupRequest); // Check if auto snapshot cleanup is disabled - isSnapshotCleanupEnabledRequest = IsSnapshotCleanupEnabledRequest - .newBuilder().build(); - isSnapshotCleanupEnabledResponse = - master.getMasterRpcServices().isSnapshotCleanupEnabled(null, - isSnapshotCleanupEnabledRequest); + isSnapshotCleanupEnabledRequest = IsSnapshotCleanupEnabledRequest.newBuilder().build(); + isSnapshotCleanupEnabledResponse = master.getMasterRpcServices().isSnapshotCleanupEnabled(null, + isSnapshotCleanupEnabledRequest); Assert.assertFalse(isSnapshotCleanupEnabledResponse.getEnabled()); } @@ -393,13 +384,12 @@ public void testSnapshotHFileArchiving() throws Exception { // snapshot, the call after snapshot will be a no-op and checks will fail UTIL.deleteTable(TABLE_NAME); TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAM)) - .setCompactionEnabled(false) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAM)).setCompactionEnabled(false) + .build(); UTIL.getAdmin().createTable(td); // load the table - for (int i = 0; i < blockingStoreFiles / 2; i ++) { + for (int i = 0; i < blockingStoreFiles / 2; i++) { UTIL.loadTable(UTIL.getConnection().getTable(TABLE_NAME), TEST_FAM); UTIL.flush(TABLE_NAME); } @@ -418,9 +408,7 @@ public void testSnapshotHFileArchiving() throws Exception { // ensure we only have one snapshot SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshotNameBytes, TABLE_NAME); - td = TableDescriptorBuilder.newBuilder(td) - .setCompactionEnabled(true) - .build(); + td = TableDescriptorBuilder.newBuilder(td).setCompactionEnabled(true).build(); // enable compactions now admin.modifyTable(td); @@ -433,8 +421,8 @@ public void testSnapshotHFileArchiving() throws Exception { region.waitForFlushesAndCompactions(); // enable can trigger a compaction, wait for it. region.compactStores(); // min is 2 so will compact and archive } - List regionServerThreads = UTIL.getMiniHBaseCluster() - .getRegionServerThreads(); + List regionServerThreads = + UTIL.getMiniHBaseCluster().getRegionServerThreads(); HRegionServer hrs = null; for (RegionServerThread rs : regionServerThreads) { if (!rs.getRegionServer().getRegions(TABLE_NAME).isEmpty()) { @@ -455,8 +443,8 @@ public void testSnapshotHFileArchiving() throws Exception { // get the snapshot files for the table Path snapshotTable = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); - Set snapshotHFiles = SnapshotReferenceUtil.getHFileNames( - UTIL.getConfiguration(), fs, snapshotTable); + Set snapshotHFiles = + SnapshotReferenceUtil.getHFileNames(UTIL.getConfiguration(), fs, snapshotTable); // check that the files in the archive contain the ones that we need for the snapshot LOG.debug("Have snapshot hfiles:"); for (String fileName : snapshotHFiles) { @@ -471,8 +459,9 @@ public void testSnapshotHFileArchiving() throws Exception { // and make sure that there is a proper subset for (String fileName : snapshotHFiles) { boolean exist = archives.contains(fileName) || hfiles.contains(fileName); - assertTrue("Archived hfiles " + archives - + " and table hfiles " + hfiles + " is missing snapshot file:" + fileName, exist); + assertTrue("Archived hfiles " + archives + " and table hfiles " + hfiles + + " is missing snapshot file:" + fileName, + exist); } // delete the existing snapshot @@ -481,11 +470,11 @@ public void testSnapshotHFileArchiving() throws Exception { // make sure that we don't keep around the hfiles that aren't in a snapshot // make sure we wait long enough to refresh the snapshot hfile - List delegates = UTIL.getMiniHBaseCluster().getMaster() - .getHFileCleaner().cleanersChain; - for (BaseHFileCleanerDelegate delegate: delegates) { + List delegates = + UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().cleanersChain; + for (BaseHFileCleanerDelegate delegate : delegates) { if (delegate instanceof SnapshotHFileCleaner) { - ((SnapshotHFileCleaner)delegate).getFileCacheForTesting().triggerCacheRefreshForTesting(); + ((SnapshotHFileCleaner) delegate).getFileCacheForTesting().triggerCacheRefreshForTesting(); } } // run the cleaner again @@ -507,7 +496,7 @@ public boolean evaluate() throws Exception { @Override public String explainFailure() throws Exception { return "Still have some hfiles in the archive, when their snapshot has been deleted: " - + getHFiles(archiveDir, fs, TABLE_NAME); + + getHFiles(archiveDir, fs, TABLE_NAME); } }); } @@ -516,7 +505,8 @@ public String explainFailure() throws Exception { * @return all the HFiles for a given table in the specified dir * @throws IOException on expected failure */ - private final Collection getHFiles(Path dir, FileSystem fs, TableName tableName) throws IOException { + private final Collection getHFiles(Path dir, FileSystem fs, TableName tableName) + throws IOException { Path tableDir = CommonFSUtils.getTableDir(dir, tableName); return SnapshotTestingUtils.listHFileNames(fs, tableDir); } @@ -530,9 +520,9 @@ private static void ensureHFileCleanersRun() { private SnapshotDescription createSnapshot(final String snapshotName) throws IOException { SnapshotTestingUtils.SnapshotMock snapshotMock = - new SnapshotTestingUtils.SnapshotMock(UTIL.getConfiguration(), fs, rootDir); + new SnapshotTestingUtils.SnapshotMock(UTIL.getConfiguration(), fs, rootDir); SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = - snapshotMock.createSnapshotV2(snapshotName, "test", 0); + snapshotMock.createSnapshotV2(snapshotName, "test", 0); builder.commit(); return builder.getSnapshotDescription(); } @@ -557,8 +547,8 @@ public void testAsyncSnapshotWillNotBlockSnapshotHFileCleaner() throws Exception } String snapshotName = "testAsyncSnapshotWillNotBlockSnapshotHFileCleaner01"; Future future = - UTIL.getAdmin().snapshotAsync(new org.apache.hadoop.hbase.client.SnapshotDescription( - snapshotName, TABLE_NAME, SnapshotType.FLUSH)); + UTIL.getAdmin().snapshotAsync(new org.apache.hadoop.hbase.client.SnapshotDescription( + snapshotName, TABLE_NAME, SnapshotType.FLUSH)); Waiter.waitFor(UTIL.getConfiguration(), 10 * 1000L, 200L, () -> UTIL.getAdmin().listSnapshots(Pattern.compile(snapshotName)).size() == 1); UTIL.waitFor(30000, () -> !master.getSnapshotManager().isTakingAnySnapshot()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestApiV1ClusterMetricsResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestApiV1ClusterMetricsResource.java index 1426b3a0d96b..e7da30f0c6da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestApiV1ClusterMetricsResource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestApiV1ClusterMetricsResource.java @@ -23,6 +23,7 @@ import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.assertThrows; + import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; @@ -49,6 +50,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.ExternalResource; import org.junit.rules.RuleChain; + import org.apache.hbase.thirdparty.javax.ws.rs.NotAcceptableException; import org.apache.hbase.thirdparty.javax.ws.rs.client.Client; import org.apache.hbase.thirdparty.javax.ws.rs.client.ClientBuilder; @@ -58,29 +60,25 @@ /** * Tests for the master api_v1 {@link ClusterMetricsResource}. */ -@Category({ MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestApiV1ClusterMetricsResource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestApiV1ClusterMetricsResource.class); + HBaseClassTestRule.forClass(TestApiV1ClusterMetricsResource.class); private static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder() - .setMiniClusterOption(StartTestingClusterOption.builder() - .numZkServers(3) - .numMasters(3) - .numDataNodes(3) - .build()) - .setConfiguration(() -> { - // enable Master InfoServer and random port selection - final Configuration conf = HBaseConfiguration.create(); - conf.setInt(HConstants.MASTER_INFO_PORT, 0); - conf.set("hbase.http.jersey.tracing.type", "ON_DEMAND"); - return conf; - }) - .build(); + .setMiniClusterOption( + StartTestingClusterOption.builder().numZkServers(3).numMasters(3).numDataNodes(3).build()) + .setConfiguration(() -> { + // enable Master InfoServer and random port selection + final Configuration conf = HBaseConfiguration.create(); + conf.setInt(HConstants.MASTER_INFO_PORT, 0); + conf.set("hbase.http.jersey.tracing.type", "ON_DEMAND"); + return conf; + }).build(); private static final ConnectionRule connectionRule = - ConnectionRule.createAsyncConnectionRule(miniClusterRule::createAsyncConnection); + ConnectionRule.createAsyncConnectionRule(miniClusterRule::createAsyncConnection); private static final ClassSetup classRule = new ClassSetup(connectionRule::getAsyncConnection); private static final class ClassSetup extends ExternalResource { @@ -103,20 +101,15 @@ public WebTarget getTarget() { protected void before() throws Throwable { final AsyncConnection conn = connectionSupplier.get(); admin = conn.getAdmin(); - final TableDescriptor tableDescriptor = TableDescriptorBuilder - .newBuilder(tableName) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("c")).build()) - .setDurability(Durability.SKIP_WAL) - .build(); + final TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("c")).build()) + .setDurability(Durability.SKIP_WAL).build(); admin.createTable(tableDescriptor).get(); - final String baseUrl = admin.getMaster() - .thenApply(ServerName::getHostname) - .thenCombine( - admin.getMasterInfoPort(), - (hostName, infoPort) -> "http://" + hostName + ":" + infoPort) - .get(); + final String baseUrl = admin.getMaster().thenApply(ServerName::getHostname) + .thenCombine(admin.getMasterInfoPort(), + (hostName, infoPort) -> "http://" + hostName + ":" + infoPort) + .get(); final Client client = ClientBuilder.newClient(); target = client.target(baseUrl).path("api/v1/admin/cluster_metrics"); } @@ -125,16 +118,14 @@ protected void before() throws Throwable { protected void after() { final TableName tableName = TableName.valueOf("test"); try { - admin.tableExists(tableName) - .thenCompose(val -> { - if (val) { - return admin.disableTable(tableName) + admin.tableExists(tableName).thenCompose(val -> { + if (val) { + return admin.disableTable(tableName) .thenCompose(ignored -> admin.deleteTable(tableName)); - } else { - return CompletableFuture.completedFuture(null); - } - }) - .get(); + } else { + return CompletableFuture.completedFuture(null); + } + }).get(); } catch (Exception e) { throw new RuntimeException(e); } @@ -142,70 +133,52 @@ protected void after() { } @ClassRule - public static RuleChain ruleChain = RuleChain.outerRule(miniClusterRule) - .around(connectionRule) - .around(classRule); + public static RuleChain ruleChain = + RuleChain.outerRule(miniClusterRule).around(connectionRule).around(classRule); @Test public void testGetRoot() { - final String response = classRule.getTarget() - .request(MediaType.APPLICATION_JSON_TYPE) - .header("X-Jersey-Tracing-Accept", true) - .get(String.class); - assertThat(response, allOf( - containsString("\"hbase_version\":"), - containsString("\"cluster_id\":"), - containsString("\"master_name\":"), - containsString("\"backup_master_names\":"))); + final String response = classRule.getTarget().request(MediaType.APPLICATION_JSON_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class); + assertThat(response, + allOf(containsString("\"hbase_version\":"), containsString("\"cluster_id\":"), + containsString("\"master_name\":"), containsString("\"backup_master_names\":"))); } @Test public void testGetRootHtml() { - assertThrows(NotAcceptableException.class, () -> classRule.getTarget() - .request(MediaType.TEXT_HTML_TYPE) - .header("X-Jersey-Tracing-Accept", true) - .get(String.class)); + assertThrows(NotAcceptableException.class, + () -> classRule.getTarget().request(MediaType.TEXT_HTML_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class)); } @Test public void testGetLiveServers() { - final String response = classRule.getTarget() - .path("live_servers") - .request(MediaType.APPLICATION_JSON_TYPE) - .header("X-Jersey-Tracing-Accept", true) - .get(String.class); - assertThat(response, allOf( - startsWith("{\"data\":["), - endsWith("]}"))); + final String response = + classRule.getTarget().path("live_servers").request(MediaType.APPLICATION_JSON_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class); + assertThat(response, allOf(startsWith("{\"data\":["), endsWith("]}"))); } @Test public void testGetLiveServersHtml() { - assertThrows(NotAcceptableException.class, () -> classRule.getTarget() - .path("live_servers") - .request(MediaType.TEXT_HTML_TYPE) - .header("X-Jersey-Tracing-Accept", true) - .get(String.class)); + assertThrows(NotAcceptableException.class, + () -> classRule.getTarget().path("live_servers").request(MediaType.TEXT_HTML_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class)); } @Test public void testGetDeadServers() { - final String response = classRule.getTarget() - .path("dead_servers") - .request(MediaType.APPLICATION_JSON_TYPE) - .header("X-Jersey-Tracing-Accept", true) - .get(String.class); - assertThat(response, allOf( - startsWith("{\"data\":["), - endsWith("]}"))); + final String response = + classRule.getTarget().path("dead_servers").request(MediaType.APPLICATION_JSON_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class); + assertThat(response, allOf(startsWith("{\"data\":["), endsWith("]}"))); } @Test public void testGetDeadServersHtml() { - assertThrows(NotAcceptableException.class, () -> classRule.getTarget() - .path("dead_servers") - .request(MediaType.TEXT_HTML_TYPE) - .header("X-Jersey-Tracing-Accept", true) - .get(String.class)); + assertThrows(NotAcceptableException.class, + () -> classRule.getTarget().path("dead_servers").request(MediaType.TEXT_HTML_TYPE) + .header("X-Jersey-Tracing-Accept", true).get(String.class)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusServlet.java index d46ece428493..1bda533e5120 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ /** * Tests for the master status page and its template. */ -@Category({MasterTests.class,MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterStatusServlet { @ClassRule @@ -69,13 +69,12 @@ public class TestMasterStatusServlet { private Configuration conf; private Admin admin; - static final ServerName FAKE_HOST = - ServerName.valueOf("fakehost", 12345, 1234567890); + static final ServerName FAKE_HOST = ServerName.valueOf("fakehost", 12345, 1234567890); static final TableDescriptor FAKE_TABLE = - TableDescriptorBuilder.newBuilder(TableName.valueOf("mytable")).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf("mytable")).build(); static final RegionInfo FAKE_HRI = RegionInfoBuilder.newBuilder(FAKE_TABLE.getTableName()) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); @Before public void setupBasicMocks() { @@ -85,7 +84,7 @@ public void setupBasicMocks() { Mockito.doReturn(FAKE_HOST).when(master).getServerName(); Mockito.doReturn(conf).when(master).getConfiguration(); - //Fake DeadServer + // Fake DeadServer DeadServer deadServer = Mockito.mock(DeadServer.class); // Fake serverManager ServerManager serverManager = Mockito.mock(ServerManager.class); @@ -97,7 +96,8 @@ public void setupBasicMocks() { AssignmentManager am = Mockito.mock(AssignmentManager.class); RegionStates rs = Mockito.mock(RegionStates.class); List regionsInTransition = new ArrayList<>(); - regionsInTransition.add(new RegionState(FAKE_HRI, RegionState.State.CLOSING, 12345L, FAKE_HOST)); + regionsInTransition + .add(new RegionState(FAKE_HRI, RegionState.State.CLOSING, 12345L, FAKE_HOST)); Mockito.doReturn(rs).when(am).getRegionStates(); Mockito.doReturn(regionsInTransition).when(rs).getRegionsInTransition(); Mockito.doReturn(am).when(master).getAssignmentManager(); @@ -118,8 +118,8 @@ public void setupBasicMocks() { private void setupMockTables() throws IOException { List tables = - Arrays.asList(TableDescriptorBuilder.newBuilder(TableName.valueOf("foo")).build(), - TableDescriptorBuilder.newBuilder(TableName.valueOf("bar")).build()); + Arrays.asList(TableDescriptorBuilder.newBuilder(TableName.valueOf("foo")).build(), + TableDescriptorBuilder.newBuilder(TableName.valueOf("bar")).build()); Mockito.doReturn(tables).when(admin).listTableDescriptors(); } @@ -132,28 +132,21 @@ public void testStatusTemplateNoTables() throws IOException { public void testStatusTemplateMetaAvailable() throws IOException { setupMockTables(); - new MasterStatusTmpl() - .setMetaLocation(ServerName.valueOf("metaserver,123,12345")) - .render(new StringWriter(), master); + new MasterStatusTmpl().setMetaLocation(ServerName.valueOf("metaserver,123,12345")) + .render(new StringWriter(), master); } @Test public void testStatusTemplateWithServers() throws IOException { setupMockTables(); - List servers = Lists.newArrayList( - ServerName.valueOf("rootserver,123,12345"), - ServerName.valueOf("metaserver,123,12345")); - Set deadServers = new HashSet<>( - Lists.newArrayList( - ServerName.valueOf("badserver,123,12345"), - ServerName.valueOf("uglyserver,123,12345")) - ); - - new MasterStatusTmpl() - .setMetaLocation(ServerName.valueOf("metaserver,123,12345")) - .setServers(servers) - .setDeadServers(deadServers) - .render(new StringWriter(), master); + List servers = Lists.newArrayList(ServerName.valueOf("rootserver,123,12345"), + ServerName.valueOf("metaserver,123,12345")); + Set deadServers = + new HashSet<>(Lists.newArrayList(ServerName.valueOf("badserver,123,12345"), + ServerName.valueOf("uglyserver,123,12345"))); + + new MasterStatusTmpl().setMetaLocation(ServerName.valueOf("metaserver,123,12345")) + .setServers(servers).setDeadServers(deadServers).render(new StringWriter(), master); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowser.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowser.java index dac3c727a46a..6238e962f1d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowser.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowser.java @@ -26,6 +26,7 @@ import static org.hamcrest.Matchers.startsWith; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.util.List; import java.util.concurrent.CompletableFuture; import javax.servlet.http.HttpServletRequest; @@ -53,28 +54,29 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestName; import org.junit.rules.TestRule; + import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils; /** * Cluster-backed correctness tests for the functionality provided by {@link MetaBrowser}. */ -@Category({ MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMetaBrowser { @ClassRule public static final HBaseClassTestRule testRule = - HBaseClassTestRule.forClass(TestMetaBrowser.class); + HBaseClassTestRule.forClass(TestMetaBrowser.class); @ClassRule public static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder().build(); private final ConnectionRule connectionRule = - ConnectionRule.createAsyncConnectionRule(miniClusterRule::createAsyncConnection); + ConnectionRule.createAsyncConnectionRule(miniClusterRule::createAsyncConnection); private final ClearUserNamespacesAndTablesRule clearUserNamespacesAndTablesRule = - new ClearUserNamespacesAndTablesRule(connectionRule::getAsyncConnection); + new ClearUserNamespacesAndTablesRule(connectionRule::getAsyncConnection); @Rule - public TestRule rule = RuleChain.outerRule(connectionRule) - .around(clearUserNamespacesAndTablesRule); + public TestRule rule = + RuleChain.outerRule(connectionRule).around(clearUserNamespacesAndTablesRule); @Rule public TestName testNameRule = new TestName(); @@ -94,20 +96,18 @@ public void noFilters() { final TableName a = TableName.valueOf("a"); final TableName b = TableName.valueOf(namespaceName, "b"); - CompletableFuture.allOf( - createTable(a), - createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2))) - .join(); + CompletableFuture.allOf(createTable(a), + createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2))).join(); final HttpServletRequest request = new MockRequestBuilder().build(); final List rows; try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) { rows = IterableUtils.toList(results); } - assertThat(rows, contains( - hasProperty("row", bytesAsStringBinary(startsWith(a + ",,"))), - hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), - hasProperty("row", bytesAsStringBinary(startsWith(b + ",80000000"))))); + assertThat(rows, + contains(hasProperty("row", bytesAsStringBinary(startsWith(a + ",,"))), + hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), + hasProperty("row", bytesAsStringBinary(startsWith(b + ",80000000"))))); } @Test @@ -115,19 +115,17 @@ public void limit() { final String tableName = testNameRule.getMethodName(); createTable(TableName.valueOf(tableName), 8).join(); - final HttpServletRequest request = new MockRequestBuilder() - .setLimit(5) - .build(); + final HttpServletRequest request = new MockRequestBuilder().setLimit(5).build(); final List rows; try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) { rows = IterableUtils.toList(results); } - assertThat(rows, contains( - hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",,"))), - hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",20000000"))), - hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",40000000"))), - hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",60000000"))), - hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",80000000"))))); + assertThat(rows, + contains(hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",,"))), + hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",20000000"))), + hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",40000000"))), + hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",60000000"))), + hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",80000000"))))); } @Test @@ -137,23 +135,19 @@ public void regionStateFilter() { final TableName bar = TableName.valueOf(namespaceName, "bar"); createNamespace(namespaceName) - .thenCompose(_void1 -> CompletableFuture.allOf( - createTable(foo, 2).thenCompose(_void2 -> admin.disableTable(foo)), - createTable(bar, 2))) - .join(); - - final HttpServletRequest request = new MockRequestBuilder() - .setLimit(10_000) - .setRegionState(RegionState.State.OPEN) - .setTable(namespaceName) - .build(); + .thenCompose(_void1 -> CompletableFuture.allOf( + createTable(foo, 2).thenCompose(_void2 -> admin.disableTable(foo)), createTable(bar, 2))) + .join(); + + final HttpServletRequest request = new MockRequestBuilder().setLimit(10_000) + .setRegionState(RegionState.State.OPEN).setTable(namespaceName).build(); final List rows; try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) { rows = IterableUtils.toList(results); } - assertThat(rows, contains( - hasProperty("row", bytesAsStringBinary(startsWith(bar.toString() + ",,"))), - hasProperty("row", bytesAsStringBinary(startsWith(bar.toString() + ",80000000"))))); + assertThat(rows, + contains(hasProperty("row", bytesAsStringBinary(startsWith(bar.toString() + ",,"))), + hasProperty("row", bytesAsStringBinary(startsWith(bar.toString() + ",80000000"))))); } @Test @@ -162,20 +156,15 @@ public void scanTableFilter() { final TableName a = TableName.valueOf("a"); final TableName b = TableName.valueOf(namespaceName, "b"); - CompletableFuture.allOf( - createTable(a), - createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2))) - .join(); + CompletableFuture.allOf(createTable(a), + createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2))).join(); - final HttpServletRequest request = new MockRequestBuilder() - .setTable(namespaceName) - .build(); + final HttpServletRequest request = new MockRequestBuilder().setTable(namespaceName).build(); final List rows; try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) { rows = IterableUtils.toList(results); } - assertThat(rows, contains( - hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), + assertThat(rows, contains(hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), hasProperty("row", bytesAsStringBinary(startsWith(b + ",80000000"))))); } @@ -185,36 +174,28 @@ public void paginateWithReplicas() { final TableName a = TableName.valueOf("a"); final TableName b = TableName.valueOf(namespaceName, "b"); - CompletableFuture.allOf( - createTableWithReplicas(a, 2), - createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2))) - .join(); + CompletableFuture.allOf(createTableWithReplicas(a, 2), + createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2))).join(); - final HttpServletRequest request1 = new MockRequestBuilder() - .setLimit(2) - .build(); + final HttpServletRequest request1 = new MockRequestBuilder().setLimit(2).build(); final List rows1; try (final MetaBrowser.Results results = new MetaBrowser(connection, request1).getResults()) { rows1 = IterableUtils.toList(results); } - assertThat(rows1, contains( - allOf( - hasProperty("regionName", bytesAsStringBinary(startsWith(a + ",,"))), - hasProperty("replicaId", equalTo(0))), - allOf( - hasProperty("regionName", bytesAsStringBinary(startsWith(a + ",,"))), - hasProperty("replicaId", equalTo(1))))); - - final HttpServletRequest request2 = new MockRequestBuilder() - .setLimit(2) - .setStart(MetaBrowser.buildStartParamFrom(rows1.get(rows1.size() - 1).getRow())) - .build(); + assertThat(rows1, + contains( + allOf(hasProperty("regionName", bytesAsStringBinary(startsWith(a + ",,"))), + hasProperty("replicaId", equalTo(0))), + allOf(hasProperty("regionName", bytesAsStringBinary(startsWith(a + ",,"))), + hasProperty("replicaId", equalTo(1))))); + + final HttpServletRequest request2 = new MockRequestBuilder().setLimit(2) + .setStart(MetaBrowser.buildStartParamFrom(rows1.get(rows1.size() - 1).getRow())).build(); final List rows2; try (final MetaBrowser.Results results = new MetaBrowser(connection, request2).getResults()) { rows2 = IterableUtils.toList(results); } - assertThat(rows2, contains( - hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), + assertThat(rows2, contains(hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), hasProperty("row", bytesAsStringBinary(startsWith(b + ",80000000"))))); } @@ -224,47 +205,35 @@ public void paginateWithTableFilter() { final TableName a = TableName.valueOf("a"); final TableName b = TableName.valueOf(namespaceName, "b"); - CompletableFuture.allOf( - createTable(a), - createNamespace(namespaceName).thenCompose(_void -> createTable(b, 5))) - .join(); + CompletableFuture.allOf(createTable(a), + createNamespace(namespaceName).thenCompose(_void -> createTable(b, 5))).join(); - final HttpServletRequest request1 = new MockRequestBuilder() - .setLimit(2) - .setTable(namespaceName) - .build(); + final HttpServletRequest request1 = + new MockRequestBuilder().setLimit(2).setTable(namespaceName).build(); final List rows1; try (final MetaBrowser.Results results = new MetaBrowser(connection, request1).getResults()) { rows1 = IterableUtils.toList(results); } - assertThat(rows1, contains( - hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), + assertThat(rows1, contains(hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), hasProperty("row", bytesAsStringBinary(startsWith(b + ",33333333"))))); - final HttpServletRequest request2 = new MockRequestBuilder() - .setLimit(2) - .setTable(namespaceName) - .setStart(MetaBrowser.buildStartParamFrom(rows1.get(rows1.size() - 1).getRow())) - .build(); + final HttpServletRequest request2 = new MockRequestBuilder().setLimit(2).setTable(namespaceName) + .setStart(MetaBrowser.buildStartParamFrom(rows1.get(rows1.size() - 1).getRow())).build(); final List rows2; try (final MetaBrowser.Results results = new MetaBrowser(connection, request2).getResults()) { rows2 = IterableUtils.toList(results); } - assertThat(rows2, contains( - hasProperty("row", bytesAsStringBinary(startsWith(b + ",66666666"))), + assertThat(rows2, contains(hasProperty("row", bytesAsStringBinary(startsWith(b + ",66666666"))), hasProperty("row", bytesAsStringBinary(startsWith(b + ",99999999"))))); - final HttpServletRequest request3 = new MockRequestBuilder() - .setLimit(2) - .setTable(namespaceName) - .setStart(MetaBrowser.buildStartParamFrom(rows2.get(rows2.size() - 1).getRow())) - .build(); + final HttpServletRequest request3 = new MockRequestBuilder().setLimit(2).setTable(namespaceName) + .setStart(MetaBrowser.buildStartParamFrom(rows2.get(rows2.size() - 1).getRow())).build(); final List rows3; try (final MetaBrowser.Results results = new MetaBrowser(connection, request3).getResults()) { rows3 = IterableUtils.toList(results); } - assertThat(rows3, contains( - hasProperty("row", bytesAsStringBinary(startsWith(b + ",cccccccc"))))); + assertThat(rows3, + contains(hasProperty("row", bytesAsStringBinary(startsWith(b + ",cccccccc"))))); } private ColumnFamilyDescriptor columnFamilyDescriptor() { @@ -272,16 +241,13 @@ private ColumnFamilyDescriptor columnFamilyDescriptor() { } private TableDescriptor tableDescriptor(final TableName tableName) { - return TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(columnFamilyDescriptor()) - .build(); + return TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(columnFamilyDescriptor()) + .build(); } private TableDescriptor tableDescriptor(final TableName tableName, final int replicaCount) { - return TableDescriptorBuilder.newBuilder(tableName) - .setRegionReplication(replicaCount) - .setColumnFamily(columnFamilyDescriptor()) - .build(); + return TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(replicaCount) + .setColumnFamily(columnFamilyDescriptor()).build(); } private CompletableFuture createTable(final TableName tableName) { @@ -289,13 +255,12 @@ private CompletableFuture createTable(final TableName tableName) { } private CompletableFuture createTable(final TableName tableName, final int splitCount) { - return admin.createTable( - tableDescriptor(tableName), + return admin.createTable(tableDescriptor(tableName), new RegionSplitter.HexStringSplit().split(splitCount)); } private CompletableFuture createTableWithReplicas(final TableName tableName, - final int replicaCount) { + final int replicaCount) { return admin.createTable(tableDescriptor(tableName, replicaCount)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowserNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowserNoCluster.java index 5fbbfe27698c..578d5d9fd234 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowserNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowserNoCluster.java @@ -22,6 +22,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; + import javax.servlet.http.HttpServletRequest; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; @@ -40,12 +41,12 @@ /** * Cluster-backed correctness tests for the functionality provided by {@link MetaBrowser}. */ -@Category({ MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestMetaBrowserNoCluster { @ClassRule public static final HBaseClassTestRule testRule = - HBaseClassTestRule.forClass(TestMetaBrowserNoCluster.class); + HBaseClassTestRule.forClass(TestMetaBrowserNoCluster.class); @Mock private AsyncConnection connection; @@ -70,21 +71,15 @@ public void buildFirstPageQueryStringNoParams() { @Test public void buildFirstPageQueryStringNonNullParams() { - final HttpServletRequest request = new MockRequestBuilder() - .setLimit(50) - .setRegionState(RegionState.State.ABNORMALLY_CLOSED) - .setTable("foo%3Abar") - .build(); + final HttpServletRequest request = new MockRequestBuilder().setLimit(50) + .setRegionState(RegionState.State.ABNORMALLY_CLOSED).setTable("foo%3Abar").build(); final MetaBrowser metaBrowser = new MetaBrowser(connection, request); assertEquals(50, metaBrowser.getScanLimit().intValue()); assertEquals(RegionState.State.ABNORMALLY_CLOSED, metaBrowser.getScanRegionState()); assertEquals(TableName.valueOf("foo", "bar"), metaBrowser.getScanTable()); - assertEquals( - "/table.jsp?name=hbase%3Ameta" - + "&scan_limit=50" - + "&scan_region_state=ABNORMALLY_CLOSED" - + "&scan_table=foo%3Abar", + assertEquals("/table.jsp?name=hbase%3Ameta" + "&scan_limit=50" + + "&scan_region_state=ABNORMALLY_CLOSED" + "&scan_table=foo%3Abar", metaBrowser.buildNextPageUrl(null)); } @@ -93,76 +88,60 @@ public void buildNextPageQueryString() { final HttpServletRequest request = new MockRequestBuilder().build(); final MetaBrowser metaBrowser = new MetaBrowser(connection, request); - assertEquals( - "/table.jsp?name=hbase%3Ameta&scan_start=%255Cx80%255Cx00%255Cx7F", + assertEquals("/table.jsp?name=hbase%3Ameta&scan_start=%255Cx80%255Cx00%255Cx7F", metaBrowser.buildNextPageUrl(new byte[] { Byte.MIN_VALUE, (byte) 0, Byte.MAX_VALUE })); } @Test public void unparseableLimitParam() { - final HttpServletRequest request = new MockRequestBuilder() - .setLimit("foo") - .build(); + final HttpServletRequest request = new MockRequestBuilder().setLimit("foo").build(); final MetaBrowser metaBrowser = new MetaBrowser(connection, request); assertNull(metaBrowser.getScanLimit()); - assertThat(metaBrowser.getErrorMessages(), contains( - "Requested SCAN_LIMIT value 'foo' cannot be parsed as an integer.")); + assertThat(metaBrowser.getErrorMessages(), + contains("Requested SCAN_LIMIT value 'foo' cannot be parsed as an integer.")); } @Test public void zeroLimitParam() { - final HttpServletRequest request = new MockRequestBuilder() - .setLimit(0) - .build(); + final HttpServletRequest request = new MockRequestBuilder().setLimit(0).build(); final MetaBrowser metaBrowser = new MetaBrowser(connection, request); assertEquals(MetaBrowser.SCAN_LIMIT_DEFAULT, metaBrowser.getScanLimit().intValue()); - assertThat(metaBrowser.getErrorMessages(), contains( - "Requested SCAN_LIMIT value 0 is <= 0.")); + assertThat(metaBrowser.getErrorMessages(), contains("Requested SCAN_LIMIT value 0 is <= 0.")); } @Test public void negativeLimitParam() { - final HttpServletRequest request = new MockRequestBuilder() - .setLimit(-10) - .build(); + final HttpServletRequest request = new MockRequestBuilder().setLimit(-10).build(); final MetaBrowser metaBrowser = new MetaBrowser(connection, request); assertEquals(MetaBrowser.SCAN_LIMIT_DEFAULT, metaBrowser.getScanLimit().intValue()); - assertThat(metaBrowser.getErrorMessages(), contains( - "Requested SCAN_LIMIT value -10 is <= 0.")); + assertThat(metaBrowser.getErrorMessages(), contains("Requested SCAN_LIMIT value -10 is <= 0.")); } @Test public void excessiveLimitParam() { - final HttpServletRequest request = new MockRequestBuilder() - .setLimit(10_001) - .build(); + final HttpServletRequest request = new MockRequestBuilder().setLimit(10_001).build(); final MetaBrowser metaBrowser = new MetaBrowser(connection, request); assertEquals(MetaBrowser.SCAN_LIMIT_MAX, metaBrowser.getScanLimit().intValue()); - assertThat(metaBrowser.getErrorMessages(), contains( - "Requested SCAN_LIMIT value 10001 exceeds maximum value 10000.")); + assertThat(metaBrowser.getErrorMessages(), + contains("Requested SCAN_LIMIT value 10001 exceeds maximum value 10000.")); } @Test public void invalidRegionStateParam() { - final HttpServletRequest request = new MockRequestBuilder() - .setRegionState("foo") - .build(); + final HttpServletRequest request = new MockRequestBuilder().setRegionState("foo").build(); final MetaBrowser metaBrowser = new MetaBrowser(connection, request); assertNull(metaBrowser.getScanRegionState()); - assertThat(metaBrowser.getErrorMessages(), contains( - "Requested SCAN_REGION_STATE value 'foo' cannot be parsed as a RegionState.")); + assertThat(metaBrowser.getErrorMessages(), + contains("Requested SCAN_REGION_STATE value 'foo' cannot be parsed as a RegionState.")); } @Test public void multipleErrorMessages() { - final HttpServletRequest request = new MockRequestBuilder() - .setLimit("foo") - .setRegionState("bar") - .build(); + final HttpServletRequest request = + new MockRequestBuilder().setLimit("foo").setRegionState("bar").build(); final MetaBrowser metaBrowser = new MetaBrowser(connection, request); - assertThat(metaBrowser.getErrorMessages(), containsInAnyOrder( - "Requested SCAN_LIMIT value 'foo' cannot be parsed as an integer.", - "Requested SCAN_REGION_STATE value 'bar' cannot be parsed as a RegionState." - )); + assertThat(metaBrowser.getErrorMessages(), + containsInAnyOrder("Requested SCAN_LIMIT value 'foo' cannot be parsed as an integer.", + "Requested SCAN_REGION_STATE value 'bar' cannot be parsed as a RegionState.")); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestRegionVisualizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestRegionVisualizer.java index b6b098c14732..1cab3367d541 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestRegionVisualizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestRegionVisualizer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.http; import static org.junit.Assert.assertEquals; + import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; @@ -41,15 +42,16 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.com.google.gson.JsonObject; -@Category({ MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionVisualizer { @ClassRule public static final HBaseClassTestRule testRule = - HBaseClassTestRule.forClass(TestRegionVisualizer.class); + HBaseClassTestRule.forClass(TestRegionVisualizer.class); private static final Random rand = new Random(); private static List regionMetricsBuilderLongValueSetters; @@ -57,20 +59,20 @@ public class TestRegionVisualizer { @BeforeClass public static void beforeClass() { regionMetricsBuilderLongValueSetters = - Arrays.stream(RegionMetricsBuilder.class.getDeclaredMethods()) - .filter(method -> method.getName().startsWith("set")) - .filter(method -> method.getParameterTypes().length == 1) - .filter(method -> Objects.equals(method.getParameterTypes()[0], long.class)) - .collect(Collectors.toList()); + Arrays.stream(RegionMetricsBuilder.class.getDeclaredMethods()) + .filter(method -> method.getName().startsWith("set")) + .filter(method -> method.getParameterTypes().length == 1) + .filter(method -> Objects.equals(method.getParameterTypes()[0], long.class)) + .collect(Collectors.toList()); } @Test public void testRegionDetailsJsonSerialization() throws Exception { final ServerName serverName = - ServerName.valueOf("example.org", 1234, System.currentTimeMillis()); + ServerName.valueOf("example.org", 1234, System.currentTimeMillis()); final TableName tableName = TableName.valueOf("foo", "bar"); final RegionDetails regionDetails = - new RegionDetails(serverName, tableName, buildRegionMetrics(tableName)); + new RegionDetails(serverName, tableName, buildRegionMetrics(tableName)); final Gson gson = RegionVisualizer.buildGson(); final JsonObject result = gson.fromJson(gson.toJson(regionDetails), JsonObject.class); @@ -89,7 +91,7 @@ final RegionMetrics buildRegionMetrics(final TableName tableName) throws Excepti final RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build(); final RegionMetricsBuilder builder = - RegionMetricsBuilder.newBuilder(regionInfo.getRegionName()); + RegionMetricsBuilder.newBuilder(regionInfo.getRegionName()); for (final Method setter : setters.subList(0, 3)) { setter.invoke(builder, rand.nextLong()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/gson/GsonFactoryTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/gson/GsonFactoryTest.java index 3089f66fa0b5..0bcb357c9a73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/gson/GsonFactoryTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/gson/GsonFactoryTest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.http.gson; import static org.junit.Assert.assertEquals; + import java.util.Map; import java.util.TreeMap; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -29,14 +30,15 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.gson.Gson; -@Category({ MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class GsonFactoryTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(GsonFactoryTest.class); + HBaseClassTestRule.forClass(GsonFactoryTest.class); private static Gson gson; @@ -69,9 +71,9 @@ public void testSerializeNonPrintableByteArrays() { input.put(Bytes.toBytes("this is printable"), new byte[] { 0, 1, 2, 3, 4, 5 }); input.put(new byte[] { -127, -63, 0, 63, 127 }, Bytes.toBytes("test")); final String actual = gson.toJson(input); - final String expected = "{" + - "\"this is printable\":\"\\u0000\\u0001\\u0002\\u0003\\u0004\\u0005\"," + - "\"��\\u0000?\u007F\":\"test\"}"; + final String expected = + "{" + "\"this is printable\":\"\\u0000\\u0001\\u0002\\u0003\\u0004\\u0005\"," + + "\"��\\u0000?\u007F\":\"test\"}"; assertEquals(expected, actual); } @@ -80,11 +82,7 @@ private static final class SomeBean { private final int anInt; private final String aString; - public SomeBean( - final boolean aBoolean, - final int anInt, - final String aString - ) { + public SomeBean(final boolean aBoolean, final int anInt, final String aString) { this.aBoolean = aBoolean; this.anInt = anInt; this.aString = aString; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java index bc1dea93736d..ec6df390c3ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java @@ -82,7 +82,7 @@ public class TestCatalogJanitor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCatalogJanitor.class); + HBaseClassTestRule.forClass(TestCatalogJanitor.class); private static final Logger LOG = LoggerFactory.getLogger(TestCatalogJanitor.class); @@ -104,7 +104,7 @@ public static void beforeClass() throws Exception { public void setup() throws IOException, KeeperException { setRootDirAndCleanIt(HTU, this.name.getMethodName()); NavigableMap> regionsToRegionServers = - new ConcurrentSkipListMap>(); + new ConcurrentSkipListMap>(); this.masterServices = new MockMasterServices(HTU.getConfiguration(), regionsToRegionServers); this.masterServices.start(10, null); this.janitor = new CatalogJanitor(masterServices); @@ -121,9 +121,9 @@ private RegionInfo createRegionInfo(TableName tableName, byte[] startKey, byte[] } private RegionInfo createRegionInfo(TableName tableName, byte[] startKey, byte[] endKey, - boolean split) { + boolean split) { return RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(endKey) - .setSplit(split).build(); + .setSplit(split).build(); } /** @@ -134,19 +134,19 @@ public void testCleanParent() throws IOException, InterruptedException { TableDescriptor td = createTableDescriptorForCurrentMethod(); // Create regions. RegionInfo parent = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); + createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); RegionInfo splita = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); + createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); RegionInfo splitb = - createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); + createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); // Test that when both daughter regions are in place, that we do not remove the parent. Result r = createResult(parent, splita, splitb); // Add a reference under splitA directory so we don't clear out the parent. Path rootdir = this.masterServices.getMasterFileSystem().getRootDir(); Path tabledir = CommonFSUtils.getTableDir(rootdir, td.getTableName()); Path parentdir = new Path(tabledir, parent.getEncodedName()); - Path storedir = HRegionFileSystem.getStoreHomedir(tabledir, splita, - td.getColumnFamilies()[0].getName()); + Path storedir = + HRegionFileSystem.getStoreHomedir(tabledir, splita, td.getColumnFamilies()[0].getName()); Reference ref = Reference.createTopReference(Bytes.toBytes("ccc")); long now = EnvironmentEdgeManager.currentTime(); // Reference name has this format: StoreFile#REF_NAME_PARSER @@ -183,7 +183,7 @@ public void testParentCleanedEvenIfDaughterGoneFirst() throws IOException, Inter */ @Test public void testLastParentCleanedEvenIfDaughterGoneFirst() - throws IOException, InterruptedException { + throws IOException, InterruptedException { parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(this.name.getMethodName(), new byte[0]); } @@ -193,9 +193,9 @@ public void testLastParentCleanedEvenIfDaughterGoneFirst() */ private TableDescriptor createTableDescriptorForCurrentMethod() { ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(MockMasterServices.DEFAULT_COLUMN_FAMILY_NAME)).build(); + .newBuilder(Bytes.toBytes(MockMasterServices.DEFAULT_COLUMN_FAMILY_NAME)).build(); return TableDescriptorBuilder.newBuilder(TableName.valueOf(this.name.getMethodName())) - .setColumnFamily(columnFamilyDescriptor).build(); + .setColumnFamily(columnFamilyDescriptor).build(); } /** @@ -205,7 +205,7 @@ private TableDescriptor createTableDescriptorForCurrentMethod() { * @param lastEndKey the end key of the split parent */ private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(final String rootDir, - final byte[] lastEndKey) throws IOException, InterruptedException { + final byte[] lastEndKey) throws IOException, InterruptedException { TableDescriptor td = createTableDescriptorForCurrentMethod(); // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc. RegionInfo parent = createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), lastEndKey); @@ -215,25 +215,25 @@ private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(final Strin // Daughter a RegionInfo splita = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); + createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); Thread.sleep(1001); // Make daughters of daughter a; splitaa and splitab. RegionInfo splitaa = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb")); + createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb")); RegionInfo splitab = - createRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc")); + createRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc")); // Daughter b RegionInfo splitb = createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), lastEndKey); Thread.sleep(1001); // Make Daughters of daughterb; splitba and splitbb. RegionInfo splitba = - createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("ddd")); + createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("ddd")); RegionInfo splitbb = createRegionInfo(td.getTableName(), Bytes.toBytes("ddd"), lastEndKey); // First test that our Comparator works right up in CatalogJanitor. SortedMap regions = - new TreeMap<>(new CatalogJanitor.SplitParentFirstComparator()); + new TreeMap<>(new CatalogJanitor.SplitParentFirstComparator()); // Now make sure that this regions map sorts as we expect it to. regions.put(parent, createResult(parent, splita, splitb)); regions.put(splitb, createResult(splitb, splitba, splitbb)); @@ -253,7 +253,7 @@ private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(final Strin // Now play around with the cleanParent function. Create a ref from splita up to the parent. Path splitaRef = - createReferences(this.masterServices, td, parent, splita, Bytes.toBytes("ccc"), false); + createReferences(this.masterServices, td, parent, splita, Bytes.toBytes("ccc"), false); // Make sure actual super parent sticks around because splita has a ref. assertFalse(CatalogJanitor.cleanParent(masterServices, parent, regions.get(parent))); @@ -269,9 +269,9 @@ private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(final Strin assertTrue(fs.delete(splitaRef, true)); // Create the refs from daughters of splita. Path splitaaRef = - createReferences(this.masterServices, td, splita, splitaa, Bytes.toBytes("bbb"), false); + createReferences(this.masterServices, td, splita, splitaa, Bytes.toBytes("bbb"), false); Path splitabRef = - createReferences(this.masterServices, td, splita, splitab, Bytes.toBytes("bbb"), true); + createReferences(this.masterServices, td, splita, splitab, Bytes.toBytes("bbb"), true); // Test splita. It should stick around because references from splitab, etc. assertFalse(CatalogJanitor.cleanParent(masterServices, splita, regions.get(splita))); @@ -295,26 +295,26 @@ public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception { // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc. // Parent - RegionInfo parent = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), HConstants.EMPTY_BYTE_ARRAY, true); + RegionInfo parent = createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), + HConstants.EMPTY_BYTE_ARRAY, true); // Sleep a second else the encoded name on these regions comes out // same for all with same start key and made in same second. Thread.sleep(1001); // Daughter a RegionInfo splita = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"), true); + createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"), true); Thread.sleep(1001); // Make daughters of daughter a; splitaa and splitab. RegionInfo splitaa = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), false); + createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), false); RegionInfo splitab = - createRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), false); + createRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), false); // Daughter b RegionInfo splitb = - createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), HConstants.EMPTY_BYTE_ARRAY); + createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), HConstants.EMPTY_BYTE_ARRAY); Thread.sleep(1001); // Parent has daughters splita and splitb. Splita has daughters splitaa and splitab. @@ -337,7 +337,7 @@ public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception { // Create ref from splita to parent LOG.info("parent=" + parent.getShortNameToLog() + ", splita=" + splita.getShortNameToLog()); Path splitaRef = - createReferences(this.masterServices, td, parent, splita, Bytes.toBytes("ccc"), false); + createReferences(this.masterServices, td, parent, splita, Bytes.toBytes("ccc"), false); LOG.info("Created reference " + splitaRef); // Parent and splita should not be removed because a reference from splita to parent. @@ -373,9 +373,9 @@ public void testSplitParentFirstComparator() { RegionInfo rootRegion = createRegionInfo(td.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, true); RegionInfo firstRegion = - createRegionInfo(td.getTableName(), HConstants.EMPTY_START_ROW, Bytes.toBytes("bbb"), true); + createRegionInfo(td.getTableName(), HConstants.EMPTY_START_ROW, Bytes.toBytes("bbb"), true); RegionInfo lastRegion = - createRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), HConstants.EMPTY_END_ROW, true); + createRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), HConstants.EMPTY_END_ROW, true); assertTrue(comp.compare(rootRegion, rootRegion) == 0); assertTrue(comp.compare(firstRegion, firstRegion) == 0); @@ -386,14 +386,14 @@ public void testSplitParentFirstComparator() { // first region split into a, b RegionInfo firstRegiona = - createRegionInfo(td.getTableName(), HConstants.EMPTY_START_ROW, Bytes.toBytes("aaa"), true); + createRegionInfo(td.getTableName(), HConstants.EMPTY_START_ROW, Bytes.toBytes("aaa"), true); RegionInfo firstRegionb = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), true); + createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), true); // last region split into a, b RegionInfo lastRegiona = - createRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ddd"), true); + createRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ddd"), true); RegionInfo lastRegionb = - createRegionInfo(td.getTableName(), Bytes.toBytes("ddd"), HConstants.EMPTY_END_ROW, true); + createRegionInfo(td.getTableName(), Bytes.toBytes("ddd"), HConstants.EMPTY_END_ROW, true); assertTrue(comp.compare(firstRegiona, firstRegiona) == 0); assertTrue(comp.compare(firstRegionb, firstRegionb) == 0); @@ -417,9 +417,9 @@ public void testSplitParentFirstComparator() { assertTrue(comp.compare(firstRegionb, lastRegionb) < 0); RegionInfo lastRegionaa = - createRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), false); + createRegionInfo(td.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), false); RegionInfo lastRegionab = - createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), false); + createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), false); assertTrue(comp.compare(lastRegiona, lastRegionaa) < 0); assertTrue(comp.compare(lastRegiona, lastRegionab) < 0); @@ -431,11 +431,11 @@ public void testArchiveOldRegion() throws Exception { // Create regions. TableDescriptor td = createTableDescriptorForCurrentMethod(); RegionInfo parent = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); + createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); RegionInfo splita = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); + createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); RegionInfo splitb = - createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); + createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); // Test that when both daughter regions are in place, that we do not // remove the parent. @@ -447,8 +447,8 @@ public void testArchiveOldRegion() throws Exception { // the single test passes, but when the full suite is run, things get borked). CommonFSUtils.setRootDir(fs.getConf(), rootdir); Path tabledir = CommonFSUtils.getTableDir(rootdir, td.getTableName()); - Path storedir = HRegionFileSystem.getStoreHomedir(tabledir, parent, - td.getColumnFamilies()[0].getName()); + Path storedir = + HRegionFileSystem.getStoreHomedir(tabledir, parent, td.getColumnFamilies()[0].getName()); Path storeArchive = HFileArchiveUtil.getStoreArchivePath(this.masterServices.getConfiguration(), parent, tabledir, td.getColumnFamilies()[0].getName()); LOG.debug("Table dir:" + tabledir); @@ -507,11 +507,11 @@ public void testDuplicateHFileResolution() throws Exception { // Create regions. RegionInfo parent = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); + createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); RegionInfo splita = - createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); + createRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); RegionInfo splitb = - createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); + createRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); // Test that when both daughter regions are in place, that we do not // remove the parent. Result r = createResult(parent, splita, splitb); @@ -522,8 +522,8 @@ public void testDuplicateHFileResolution() throws Exception { // the single test passes, but when the full suite is run, things get borked). CommonFSUtils.setRootDir(fs.getConf(), rootdir); Path tabledir = CommonFSUtils.getTableDir(rootdir, parent.getTable()); - Path storedir = HRegionFileSystem.getStoreHomedir(tabledir, parent, - td.getColumnFamilies()[0].getName()); + Path storedir = + HRegionFileSystem.getStoreHomedir(tabledir, parent, td.getColumnFamilies()[0].getName()); LOG.info("Old root:" + rootdir); LOG.info("Old table:" + tabledir); LOG.info("Old store:" + storedir); @@ -585,7 +585,7 @@ public void testAlreadyRunningStatus() throws Exception { } private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir) - throws IOException { + throws IOException { // get the existing store files FileSystem fs = services.getMasterFileSystem().getFileSystem(); fs.mkdirs(storedir); @@ -604,7 +604,7 @@ private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path } private String setRootDirAndCleanIt(final HBaseTestingUtil htu, final String subdir) - throws IOException { + throws IOException { Path testdir = htu.getDataTestDir(subdir); FileSystem fs = FileSystem.get(htu.getConfiguration()); if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true)); @@ -613,14 +613,14 @@ private String setRootDirAndCleanIt(final HBaseTestingUtil htu, final String sub } private Path createReferences(final MasterServices services, final TableDescriptor td, - final RegionInfo parent, final RegionInfo daughter, final byte[] midkey, final boolean top) - throws IOException { + final RegionInfo parent, final RegionInfo daughter, final byte[] midkey, final boolean top) + throws IOException { Path rootdir = services.getMasterFileSystem().getRootDir(); Path tabledir = CommonFSUtils.getTableDir(rootdir, parent.getTable()); - Path storedir = HRegionFileSystem.getStoreHomedir(tabledir, daughter, - td.getColumnFamilies()[0].getName()); + Path storedir = + HRegionFileSystem.getStoreHomedir(tabledir, daughter, td.getColumnFamilies()[0].getName()); Reference ref = - top ? Reference.createTopReference(midkey) : Reference.createBottomReference(midkey); + top ? Reference.createTopReference(midkey) : Reference.createBottomReference(midkey); long now = EnvironmentEdgeManager.currentTime(); // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); @@ -630,7 +630,7 @@ private Path createReferences(final MasterServices services, final TableDescript } private Result createResult(final RegionInfo parent, final RegionInfo a, final RegionInfo b) - throws IOException { + throws IOException { return MetaMockingUtil.getMetaTableRowResult(parent, null, a, b); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java index f1ef9377a6b8..5e27776ebee4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java @@ -57,7 +57,7 @@ public class TestCatalogJanitorCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCatalogJanitorCluster.class); + HBaseClassTestRule.forClass(TestCatalogJanitorCluster.class); @Rule public final TestName name = new TestName(); @@ -77,7 +77,7 @@ public void before() throws Exception { TEST_UTIL.createMultiRegionTable(T3, new byte[][] { HConstants.CATALOG_FAMILY }); final byte[][] keysForT4 = - { Bytes.toBytes("aa"), Bytes.toBytes("bb"), Bytes.toBytes("cc"), Bytes.toBytes("dd") }; + { Bytes.toBytes("aa"), Bytes.toBytes("bb"), Bytes.toBytes("cc"), Bytes.toBytes("dd") }; TEST_UTIL.createTable(T4, HConstants.CATALOG_FAMILY, keysForT4); @@ -102,7 +102,7 @@ public void after() throws Exception { public void testConsistency() throws IOException { CatalogJanitor janitor = TEST_UTIL.getHBaseCluster().getMaster().getCatalogJanitor(); RegionStateStore regionStateStore = - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); + TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); janitor.scan(); Report report = janitor.getLastReport(); // Assert no problems. @@ -115,18 +115,19 @@ public void testConsistency() throws IOException { assertFalse(report.isEmpty()); assertEquals(1, report.getHoles().size()); assertTrue(report.getHoles().get(0).getFirst().getTable() - .equals(RegionInfoBuilder.UNDEFINED.getTable())); + .equals(RegionInfoBuilder.UNDEFINED.getTable())); assertTrue(report.getHoles().get(0).getSecond().getTable().equals(T2)); assertEquals(0, report.getOverlaps().size()); // Next, add overlaps to first row in t3 List t3Ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), T3); RegionInfo ri = t3Ris.get(0); - RegionInfo newRi1 = RegionInfoBuilder.newBuilder(ri.getTable()) - .setStartKey(incrementRow(ri.getStartKey())).setEndKey(incrementRow(ri.getEndKey())).build(); + RegionInfo newRi1 = + RegionInfoBuilder.newBuilder(ri.getTable()).setStartKey(incrementRow(ri.getStartKey())) + .setEndKey(incrementRow(ri.getEndKey())).build(); Put p1 = MetaTableAccessor.makePutFromRegionInfo(newRi1, EnvironmentEdgeManager.currentTime()); RegionInfo newRi2 = RegionInfoBuilder.newBuilder(newRi1.getTable()) - .setStartKey(incrementRow(newRi1.getStartKey())).setEndKey(incrementRow(newRi1.getEndKey())) - .build(); + .setStartKey(incrementRow(newRi1.getStartKey())).setEndKey(incrementRow(newRi1.getEndKey())) + .build(); Put p2 = MetaTableAccessor.makePutFromRegionInfo(newRi2, EnvironmentEdgeManager.currentTime()); MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(p1, p2)); janitor.scan(); @@ -183,9 +184,9 @@ public void testConsistency() throws IOException { // add a new region [a, cc) RegionInfo newRiT4 = RegionInfoBuilder.newBuilder(T4).setStartKey("a".getBytes()) - .setEndKey("cc".getBytes()).build(); - Put putForT4 = MetaTableAccessor.makePutFromRegionInfo(newRiT4, - EnvironmentEdgeManager.currentTime()); + .setEndKey("cc".getBytes()).build(); + Put putForT4 = + MetaTableAccessor.makePutFromRegionInfo(newRiT4, EnvironmentEdgeManager.currentTime()); MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(putForT4)); janitor.scan(); @@ -206,9 +207,9 @@ public void testConsistency() throws IOException { // add a new region [a, g) RegionInfo newRiT5 = RegionInfoBuilder.newBuilder(T5).setStartKey("a".getBytes()) - .setEndKey("g".getBytes()).build(); - Put putForT5 = MetaTableAccessor.makePutFromRegionInfo(newRiT5, - EnvironmentEdgeManager.currentTime()); + .setEndKey("g".getBytes()).build(); + Put putForT5 = + MetaTableAccessor.makePutFromRegionInfo(newRiT5, EnvironmentEdgeManager.currentTime()); MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(putForT5)); janitor.scan(); @@ -264,7 +265,7 @@ private void verifyMiddleHole(CatalogJanitor janitor) throws IOException { RegionInfo secondRegion = getRegionInfo(T3, "bbb".getBytes()); RegionInfo thirdRegion = getRegionInfo(T3, "ccc".getBytes()); TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore() - .deleteRegion(secondRegion); + .deleteRegion(secondRegion); LinkedList> holes = getHoles(janitor, T3); Pair regionInfoRegionInfoPair = holes.getFirst(); assertTrue(regionInfoRegionInfoPair.getFirst().getTable().equals(T3)); @@ -277,7 +278,7 @@ private void verifyMiddleHole(CatalogJanitor janitor) throws IOException { private void verifyCornerHoles(CatalogJanitor janitor, TableName tableName) throws IOException { RegionStateStore regionStateStore = - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); + TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore(); RegionInfo firstRegion = getRegionInfo(tableName, "".getBytes()); RegionInfo secondRegion = getRegionInfo(tableName, "bbb".getBytes()); regionStateStore.deleteRegion(firstRegion); @@ -286,7 +287,7 @@ private void verifyCornerHoles(CatalogJanitor janitor, TableName tableName) thro assertEquals(1, holes.size()); Pair regionInfoRegionInfoPair = holes.get(0); assertTrue(regionInfoRegionInfoPair.getFirst().getTable() - .equals(RegionInfoBuilder.UNDEFINED.getTable())); + .equals(RegionInfoBuilder.UNDEFINED.getTable())); assertTrue(regionInfoRegionInfoPair.getSecond().getTable().equals(tableName)); assertTrue( regionInfoRegionInfoPair.getSecond().getEncodedName().equals(secondRegion.getEncodedName())); @@ -298,21 +299,21 @@ private void verifyCornerHoles(CatalogJanitor janitor, TableName tableName) thro assertEquals(2, holes.size()); regionInfoRegionInfoPair = holes.get(1); assertTrue(regionInfoRegionInfoPair.getFirst().getEncodedName() - .equals(secondLastRegion.getEncodedName())); + .equals(secondLastRegion.getEncodedName())); assertTrue(regionInfoRegionInfoPair.getSecond().getTable() - .equals(RegionInfoBuilder.UNDEFINED.getTable())); + .equals(RegionInfoBuilder.UNDEFINED.getTable())); } // Get Holes filter by table private LinkedList> getHoles(CatalogJanitor janitor, - TableName tableName) throws IOException { + TableName tableName) throws IOException { janitor.scan(); Report lastReport = janitor.getLastReport(); assertFalse(lastReport.isEmpty()); LinkedList> holes = new LinkedList<>(); for (Pair hole : lastReport.getHoles()) { - if (hole.getFirst().getTable().equals(tableName) || - hole.getSecond().getTable().equals(tableName)) { + if (hole.getFirst().getTable().equals(tableName) + || hole.getSecond().getTable().equals(tableName)) { holes.add(hole); } } @@ -321,7 +322,7 @@ private LinkedList> getHoles(CatalogJanitor janitor private RegionInfo getRegionInfo(TableName tableName, byte[] row) throws IOException { RegionInfo regionInfo = - TEST_UTIL.getConnection().getRegionLocator(tableName).getRegionLocation(row).getRegion(); + TEST_UTIL.getConnection().getRegionLocator(tableName).getRegionLocation(row).getRegion(); assertNotNull(regionInfo); return regionInfo; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java index 623bf9d8e7c7..27dca43fb926 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,7 +69,7 @@ public class TestCatalogJanitorInMemoryStates { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCatalogJanitorInMemoryStates.class); + HBaseClassTestRule.forClass(TestCatalogJanitorInMemoryStates.class); private static final Logger LOG = LoggerFactory.getLogger(TestCatalogJanitorInMemoryStates.class); @@ -95,7 +95,7 @@ public static void tearDownAfterClass() throws Exception { */ @Test public void testInMemoryParentCleanup() - throws IOException, InterruptedException, ExecutionException { + throws IOException, InterruptedException, ExecutionException { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); final AssignmentManager am = master.getAssignmentManager(); final ServerManager sm = master.getServerManager(); @@ -132,11 +132,10 @@ public void testInMemoryParentCleanup() @Override public boolean evaluate() throws Exception { ProcedureExecutor pe = master.getMasterProcedureExecutor(); - for (Procedure proc: pe.getProcedures()) { - if (proc.getClass().isAssignableFrom(GCRegionProcedure.class) && - proc.isFinished()) { + for (Procedure proc : pe.getProcedures()) { + if (proc.getClass().isAssignableFrom(GCRegionProcedure.class) && proc.isFinished()) { return true; - } + } } return false; } @@ -155,7 +154,7 @@ public boolean evaluate() throws Exception { * @return List of region locations */ private List splitRegion(final RegionInfo r) - throws IOException, InterruptedException, ExecutionException { + throws IOException, InterruptedException, ExecutionException { List locations = new ArrayList<>(); // Split this table in two. Admin admin = TEST_UTIL.getAdmin(); @@ -183,7 +182,7 @@ private PairOfSameType waitOnDaughters(final RegionInfo r) throws IO long start = EnvironmentEdgeManager.currentTime(); PairOfSameType pair = null; try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) { + Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) { Result result = null; RegionInfo region = null; while ((EnvironmentEdgeManager.currentTime() - start) < 60000) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java index 6a0740d7c755..b333c4841c88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java @@ -64,7 +64,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestMetaFixer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -91,7 +91,7 @@ private void deleteRegion(MasterServices services, RegionInfo ri) throws IOExcep } private void testPlugsHolesWithReadReplicaInternal(final TableName tn, final int replicaCount) - throws Exception { + throws Exception { TEST_UTIL.createMultiRegionTable(tn, replicaCount, new byte[][] { HConstants.CATALOG_FAMILY }); List ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tn); MasterServices services = TEST_UTIL.getHBaseCluster().getMaster(); @@ -100,8 +100,9 @@ private void testPlugsHolesWithReadReplicaInternal(final TableName tn, final int Report report = services.getCatalogJanitor().getLastReport(); assertTrue(report.isEmpty()); int originalCount = ris.size(); - // Remove first, last and middle region. See if hole gets plugged. Table has 26 * replicaCount regions. - for (int i = 0; i < replicaCount; i ++) { + // Remove first, last and middle region. See if hole gets plugged. Table has 26 * replicaCount + // regions. + for (int i = 0; i < replicaCount; i++) { deleteRegion(services, ris.get(3 * replicaCount + i)); deleteRegion(services, ris.get(i)); deleteRegion(services, ris.get(ris.size() - 1 - i)); @@ -141,10 +142,9 @@ public void testPlugsHolesWithReadReplica() throws Exception { } /** - * Just make sure running fixMeta does right thing for the case - * of a single-region Table where the region gets dropped. - * There is nothing much we can do. We can't restore what - * we don't know about (at least from a read of hbase:meta). + * Just make sure running fixMeta does right thing for the case of a single-region Table where the + * region gets dropped. There is nothing much we can do. We can't restore what we don't know about + * (at least from a read of hbase:meta). */ @Test public void testOneRegionTable() throws IOException { @@ -168,10 +168,8 @@ public void testOneRegionTable() throws IOException { private static RegionInfo makeOverlap(MasterServices services, RegionInfo a, RegionInfo b) throws IOException { - RegionInfo overlapRegion = RegionInfoBuilder.newBuilder(a.getTable()). - setStartKey(a.getStartKey()). - setEndKey(b.getEndKey()). - build(); + RegionInfo overlapRegion = RegionInfoBuilder.newBuilder(a.getTable()) + .setStartKey(a.getStartKey()).setEndKey(b.getEndKey()).build(); MetaTableAccessor.putsToMetaTable(services.getConnection(), Collections.singletonList(MetaTableAccessor.makePutFromRegionInfo(overlapRegion, EnvironmentEdgeManager.currentTime()))); @@ -207,12 +205,11 @@ public void testOverlap() throws Exception { cj.scan(); Report report = cj.getLastReport(); assertEquals(6, report.getOverlaps().size()); - assertEquals(1, - MetaFixer.calculateMerges(10, report.getOverlaps()).size()); + assertEquals(1, MetaFixer.calculateMerges(10, report.getOverlaps()).size()); MetaFixer fixer = new MetaFixer(services); fixer.fixOverlaps(report); - HBaseTestingUtil. await(10, () -> { + HBaseTestingUtil.await(10, () -> { try { if (cj.scan() > 0) { // It submits GC once, then it will immediately kick off another GC to test if @@ -223,8 +220,8 @@ HBaseTestingUtil. await(10, () -> { List parents = CatalogFamilyFormat.getMergeRegions(e.getValue().rawCells()); if (parents != null) { ProcedureExecutor pe = services.getMasterProcedureExecutor(); - pe.submitProcedure(new GCMultipleMergedRegionsProcedure(pe.getEnvironment(), - e.getKey(), parents)); + pe.submitProcedure( + new GCMultipleMergedRegionsProcedure(pe.getEnvironment(), e.getKey(), parents)); } } return true; @@ -295,13 +292,12 @@ public void testOverlapWithSmallMergeCount() throws Exception { cj.scan(); Report report = cj.getLastReport(); assertEquals(6, report.getOverlaps().size()); - assertEquals(2, - MetaFixer.calculateMerges(5, report.getOverlaps()).size()); + assertEquals(2, MetaFixer.calculateMerges(5, report.getOverlaps()).size()); // The max merge count is set to 5 so overlap regions are divided into // two merge requests. - TEST_UTIL.getHBaseCluster().getMaster().getConfiguration().setInt( - "hbase.master.metafixer.max.merge.count", 5); + TEST_UTIL.getHBaseCluster().getMaster().getConfiguration() + .setInt("hbase.master.metafixer.max.merge.count", 5); // Get overlap regions HashSet overlapRegions = new HashSet<>(); @@ -323,16 +319,16 @@ public void testOverlapWithSmallMergeCount() throws Exception { // Make sure that two merged regions are opened and GCs are done. if (postReport.getOverlaps().size() == 1) { Pair pair = postReport.getOverlaps().get(0); - if ((!overlapRegions.contains(pair.getFirst().getRegionNameAsString()) && - regionStates.getRegionState(pair.getFirst()).isOpened()) && - (!overlapRegions.contains(pair.getSecond().getRegionNameAsString()) && - regionStates.getRegionState(pair.getSecond()).isOpened())) { + if ((!overlapRegions.contains(pair.getFirst().getRegionNameAsString()) + && regionStates.getRegionState(pair.getFirst()).isOpened()) + && (!overlapRegions.contains(pair.getSecond().getRegionNameAsString()) + && regionStates.getRegionState(pair.getSecond()).isOpened())) { // Make sure GC is done. List firstParents = regionStateStore.getMergeRegions(pair.getFirst()); List secondParents = regionStateStore.getMergeRegions(pair.getSecond()); - return (firstParents == null || firstParents.isEmpty()) && - (secondParents == null || secondParents.isEmpty()); + return (firstParents == null || firstParents.isEmpty()) + && (secondParents == null || secondParents.isEmpty()); } } return false; @@ -360,17 +356,17 @@ public void testOverlapWithSmallMergeCount() throws Exception { assertTrue(postReport.isEmpty()); } finally { - TEST_UTIL.getHBaseCluster().getMaster().getConfiguration().unset( - "hbase.master.metafixer.max.merge.count"); + TEST_UTIL.getHBaseCluster().getMaster().getConfiguration() + .unset("hbase.master.metafixer.max.merge.count"); TEST_UTIL.deleteTable(tn); } } /** - * This test covers the case that one of merged parent regions is a merged child region that - * has not been GCed but there is no reference files anymore. In this case, it will kick off - * a GC procedure, but no merge will happen. + * This test covers the case that one of merged parent regions is a merged child region that has + * not been GCed but there is no reference files anymore. In this case, it will kick off a GC + * procedure, but no merge will happen. */ @Test public void testMergeWithMergedChildRegion() throws Exception { @@ -390,22 +386,17 @@ public void testMergeWithMergedChildRegion() throws Exception { assertEquals(2, report.getOverlaps().size()); // Mark it as a merged child region. - RegionInfo fakedParentRegion = RegionInfoBuilder.newBuilder(tn). - setStartKey(overlapRegion.getStartKey()). - build(); + RegionInfo fakedParentRegion = + RegionInfoBuilder.newBuilder(tn).setStartKey(overlapRegion.getStartKey()).build(); Table meta = MetaTableAccessor.getMetaHTable(TEST_UTIL.getConnection()); - Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(overlapRegion, - HConstants.LATEST_TIMESTAMP); + Put putOfMerged = + MetaTableAccessor.makePutFromRegionInfo(overlapRegion, HConstants.LATEST_TIMESTAMP); String qualifier = String.format(HConstants.MERGE_QUALIFIER_PREFIX_STR + "%04d", 0); - putOfMerged.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow( - putOfMerged.getRow()). - setFamily(HConstants.CATALOG_FAMILY). - setQualifier(Bytes.toBytes(qualifier)). - setTimestamp(putOfMerged.getTimestamp()). - setType(Cell.Type.Put). - setValue(RegionInfo.toByteArray(fakedParentRegion)). - build()); + putOfMerged.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + .setRow(putOfMerged.getRow()).setFamily(HConstants.CATALOG_FAMILY) + .setQualifier(Bytes.toBytes(qualifier)).setTimestamp(putOfMerged.getTimestamp()) + .setType(Cell.Type.Put).setValue(RegionInfo.toByteArray(fakedParentRegion)).build()); meta.put(putOfMerged); @@ -436,8 +427,8 @@ public void testMergeWithMergedChildRegion() throws Exception { } /** - * Make it so a big overlap spans many Regions, some of which are non-contiguous. Make it so - * we can fix this condition. HBASE-24247 + * Make it so a big overlap spans many Regions, some of which are non-contiguous. Make it so we + * can fix this condition. HBASE-24247 */ @Test public void testOverlapWithMergeOfNonContiguous() throws Exception { @@ -458,7 +449,7 @@ public void testOverlapWithMergeOfNonContiguous() throws Exception { Threads.sleep(100); } GCRegionProcedure procedure = - new GCRegionProcedure(services.getMasterProcedureExecutor().getEnvironment(), ris.get(3)); + new GCRegionProcedure(services.getMasterProcedureExecutor().getEnvironment(), ris.get(3)); pid = services.getMasterProcedureExecutor().submitProcedure(procedure); while (!services.getMasterProcedureExecutor().isFinished(pid)) { Threads.sleep(100); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java index 614385ec04d6..d8ed4ec4ffad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java @@ -43,28 +43,28 @@ public class TestMetaFixerNoCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetaFixerNoCluster.class); + HBaseClassTestRule.forClass(TestMetaFixerNoCluster.class); private static byte[] A = Bytes.toBytes("a"); private static byte[] B = Bytes.toBytes("b"); private static byte[] C = Bytes.toBytes("c"); private static byte[] D = Bytes.toBytes("d"); private static RegionInfo ALL = RegionInfoBuilder.FIRST_META_REGIONINFO; private static RegionInfo _ARI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(A).build(); + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(A).build(); private static RegionInfo _BRI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(B).build(); + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(B).build(); private static RegionInfo ABRI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(B).build(); + RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(B).build(); private static RegionInfo ACRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(C).build(); + .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(C).build(); private static RegionInfo CDRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).setEndKey(D).build(); + .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).setEndKey(D).build(); private static RegionInfo ADRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(D).build(); + .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(D).build(); private static RegionInfo D_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(D).build(); + .newBuilder(TableName.META_TABLE_NAME).setStartKey(D).build(); private static RegionInfo C_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).build(); + .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).build(); @Test public void testGetRegionInfoWithLargestEndKey() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java index 5b6082ee3e9f..598447bb874b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,7 +71,7 @@ public class TestLockManager { private static void setupConf(Configuration conf) { conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); - conf.setBoolean("hbase.procedure.check.owner.set", false); // since rpc user will be null + conf.setBoolean("hbase.procedure.check.owner.set", false); // since rpc user will be null conf.setInt(LockProcedure.LOCAL_MASTER_LOCKS_TIMEOUT_MS_CONF, LOCAL_LOCKS_TIMEOUT); } @@ -81,8 +81,8 @@ public static void setupCluster() throws Exception { UTIL.startMiniCluster(1); masterServices = UTIL.getMiniHBaseCluster().getMaster(); UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build()); - UTIL.createTable(tableName, new byte[][]{Bytes.toBytes("fam")}, - new byte[][] {Bytes.toBytes("1")}); + UTIL.createTable(tableName, new byte[][] { Bytes.toBytes("fam") }, + new byte[][] { Bytes.toBytes("1") }); List regions = UTIL.getAdmin().getRegions(tableName); assert regions.size() > 0; tableRegions = new RegionInfo[regions.size()]; @@ -118,8 +118,8 @@ private ProcedureExecutor getMasterProcedureExecutor() { */ @Test public void testMasterLockAcquire() throws Exception { - LockManager.MasterLock lock = masterServices.getLockManager().createMasterLock(namespace, - LockType.EXCLUSIVE, "desc"); + LockManager.MasterLock lock = + masterServices.getLockManager().createMasterLock(namespace, LockType.EXCLUSIVE, "desc"); assertTrue(lock.tryAcquire(2000)); assertTrue(lock.getProc().isLocked()); lock.release(); @@ -131,12 +131,12 @@ public void testMasterLockAcquire() throws Exception { */ @Test public void testMasterLockAcquireTimeout() throws Exception { - LockManager.MasterLock lock = masterServices.getLockManager().createMasterLock( - tableName, LockType.EXCLUSIVE, "desc"); - LockManager.MasterLock lock2 = masterServices.getLockManager().createMasterLock( - tableName, LockType.EXCLUSIVE, "desc"); + LockManager.MasterLock lock = + masterServices.getLockManager().createMasterLock(tableName, LockType.EXCLUSIVE, "desc"); + LockManager.MasterLock lock2 = + masterServices.getLockManager().createMasterLock(tableName, LockType.EXCLUSIVE, "desc"); assertTrue(lock.tryAcquire(2000)); - assertFalse(lock2.tryAcquire(LOCAL_LOCKS_TIMEOUT/2)); // wait less than other lock's timeout + assertFalse(lock2.tryAcquire(LOCAL_LOCKS_TIMEOUT / 2)); // wait less than other lock's timeout assertEquals(null, lock2.getProc()); lock.release(); assertTrue(lock2.tryAcquire(2000)); @@ -149,12 +149,12 @@ public void testMasterLockAcquireTimeout() throws Exception { */ @Test public void testMasterLockAcquireTimeoutRegionVsTableExclusive() throws Exception { - LockManager.MasterLock lock = masterServices.getLockManager().createMasterLock( - tableRegions, "desc"); - LockManager.MasterLock lock2 = masterServices.getLockManager().createMasterLock( - tableName, LockType.EXCLUSIVE, "desc"); + LockManager.MasterLock lock = + masterServices.getLockManager().createMasterLock(tableRegions, "desc"); + LockManager.MasterLock lock2 = + masterServices.getLockManager().createMasterLock(tableName, LockType.EXCLUSIVE, "desc"); assertTrue(lock.tryAcquire(2000)); - assertFalse(lock2.tryAcquire(LOCAL_LOCKS_TIMEOUT/2)); // wait less than other lock's timeout + assertFalse(lock2.tryAcquire(LOCAL_LOCKS_TIMEOUT / 2)); // wait less than other lock's timeout assertEquals(null, lock2.getProc()); lock.release(); assertTrue(lock2.tryAcquire(2000)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java index 822ae9bcd4c9..2d2c24239a13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -59,14 +60,16 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestLockProcedure { @ClassRule @@ -96,7 +99,7 @@ public class TestLockProcedure { private static void setupConf(Configuration conf) { conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); - conf.setBoolean("hbase.procedure.check.owner.set", false); // since rpc user will be null + conf.setBoolean("hbase.procedure.check.owner.set", false); // since rpc user will be null conf.setInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF, HEARTBEAT_TIMEOUT); conf.setInt(LockProcedure.LOCAL_MASTER_LOCKS_TIMEOUT_MS_CONF, LOCAL_LOCKS_TIMEOUT); } @@ -106,10 +109,10 @@ public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(1); UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build()); - UTIL.createTable(tableName1, - new byte[][]{ Bytes.toBytes("fam")}, new byte[][] {Bytes.toBytes("1")}); - UTIL.createTable(tableName2, - new byte[][]{Bytes.toBytes("fam")}, new byte[][] {Bytes.toBytes("1")}); + UTIL.createTable(tableName1, new byte[][] { Bytes.toBytes("fam") }, + new byte[][] { Bytes.toBytes("1") }); + UTIL.createTable(tableName2, new byte[][] { Bytes.toBytes("fam") }, + new byte[][] { Bytes.toBytes("1") }); masterRpcService = UTIL.getHBaseCluster().getMaster().getMasterRpcServices(); procExec = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); tableRegions1 = UTIL.getAdmin().getRegions(tableName1); @@ -145,26 +148,26 @@ public void tearDown() throws Exception { } private LockRequest getNamespaceLock(String namespace, String description) { - return LockServiceClient.buildLockRequest(LockServiceProtos.LockType.EXCLUSIVE, - namespace, null, null, description, HConstants.NO_NONCE, HConstants.NO_NONCE); + return LockServiceClient.buildLockRequest(LockServiceProtos.LockType.EXCLUSIVE, namespace, null, + null, description, HConstants.NO_NONCE, HConstants.NO_NONCE); } private LockRequest getTableExclusiveLock(TableName tableName, String description) { - return LockServiceClient.buildLockRequest(LockServiceProtos.LockType.EXCLUSIVE, - null, tableName, null, description, HConstants.NO_NONCE, HConstants.NO_NONCE); + return LockServiceClient.buildLockRequest(LockServiceProtos.LockType.EXCLUSIVE, null, tableName, + null, description, HConstants.NO_NONCE, HConstants.NO_NONCE); } private LockRequest getRegionLock(List regionInfos, String description) { - return LockServiceClient.buildLockRequest(LockServiceProtos.LockType.EXCLUSIVE, - null, null, regionInfos, description, HConstants.NO_NONCE, HConstants.NO_NONCE); + return LockServiceClient.buildLockRequest(LockServiceProtos.LockType.EXCLUSIVE, null, null, + regionInfos, description, HConstants.NO_NONCE, HConstants.NO_NONCE); } private void validateLockRequestException(LockRequest lockRequest, String message) throws Exception { exception.expect(ServiceException.class); exception.expectCause(IsInstanceOf.instanceOf(DoNotRetryIOException.class)); - exception.expectMessage( - StringStartsWith.startsWith("org.apache.hadoop.hbase.DoNotRetryIOException: " + exception + .expectMessage(StringStartsWith.startsWith("org.apache.hadoop.hbase.DoNotRetryIOException: " + "java.lang.IllegalArgumentException: " + message)); masterRpcService.requestLock(null, lockRequest); } @@ -185,7 +188,7 @@ public void testLockRequestValidationRegionsFromDifferentTable() throws Exceptio regions.addAll(tableRegions1); regions.addAll(tableRegions2); validateLockRequestException(getRegionLock(regions, "desc"), - "All regions should be from same table"); + "All regions should be from same table"); } /** @@ -196,7 +199,7 @@ private boolean awaitForLocked(long procId, long timeoutInMs) throws Exception { long deadline = EnvironmentEdgeManager.currentTime() + timeoutInMs; while (EnvironmentEdgeManager.currentTime() < deadline) { LockHeartbeatResponse response = masterRpcService.lockHeartbeat(null, - LockHeartbeatRequest.newBuilder().setProcId(procId).build()); + LockHeartbeatRequest.newBuilder().setProcId(procId).build()); if (response.getLockStatus() == LockHeartbeatResponse.LockStatus.LOCKED) { assertEquals(HEARTBEAT_TIMEOUT, response.getTimeoutMs()); LOG.debug(String.format("Proc id %s acquired lock.", procId)); @@ -214,7 +217,7 @@ private long queueLock(LockRequest lockRequest) throws ServiceException { private void sendHeartbeatAndCheckLocked(long procId, boolean isLocked) throws ServiceException { LockHeartbeatResponse response = masterRpcService.lockHeartbeat(null, - LockHeartbeatRequest.newBuilder().setProcId(procId).build()); + LockHeartbeatRequest.newBuilder().setProcId(procId).build()); if (isLocked) { assertEquals(LockHeartbeatResponse.LockStatus.LOCKED, response.getLockStatus()); } else { @@ -225,7 +228,7 @@ private void sendHeartbeatAndCheckLocked(long procId, boolean isLocked) throws S private void releaseLock(long procId) throws ServiceException { masterRpcService.lockHeartbeat(null, - LockHeartbeatRequest.newBuilder().setProcId(procId).setKeepAlive(false).build()); + LockHeartbeatRequest.newBuilder().setProcId(procId).setKeepAlive(false).build()); } @Test @@ -233,11 +236,11 @@ public void testUpdateHeartbeatAndUnlockForTable() throws Exception { LockRequest lock = getTableExclusiveLock(tableName1, testMethodName); final long procId = queueLock(lock); assertTrue(awaitForLocked(procId, 2000)); - Thread.sleep(HEARTBEAT_TIMEOUT /2); + Thread.sleep(HEARTBEAT_TIMEOUT / 2); sendHeartbeatAndCheckLocked(procId, true); - Thread.sleep(HEARTBEAT_TIMEOUT /2); + Thread.sleep(HEARTBEAT_TIMEOUT / 2); sendHeartbeatAndCheckLocked(procId, true); - Thread.sleep(HEARTBEAT_TIMEOUT /2); + Thread.sleep(HEARTBEAT_TIMEOUT / 2); sendHeartbeatAndCheckLocked(procId, true); releaseLock(procId); sendHeartbeatAndCheckLocked(procId, false); @@ -261,11 +264,11 @@ public void testUpdateHeartbeatAndUnlockForNamespace() throws Exception { LockRequest lock = getNamespaceLock(namespace, testMethodName); final long procId = queueLock(lock); assertTrue(awaitForLocked(procId, 2000)); - Thread.sleep(HEARTBEAT_TIMEOUT /2); + Thread.sleep(HEARTBEAT_TIMEOUT / 2); sendHeartbeatAndCheckLocked(procId, true); - Thread.sleep(HEARTBEAT_TIMEOUT /2); + Thread.sleep(HEARTBEAT_TIMEOUT / 2); sendHeartbeatAndCheckLocked(procId, true); - Thread.sleep(HEARTBEAT_TIMEOUT /2); + Thread.sleep(HEARTBEAT_TIMEOUT / 2); sendHeartbeatAndCheckLocked(procId, true); releaseLock(procId); sendHeartbeatAndCheckLocked(procId, false); @@ -292,7 +295,7 @@ public void testTimeout() throws Exception { public void testMultipleLocks() throws Exception { LockRequest nsLock = getNamespaceLock(namespace, testMethodName); LockRequest tableLock1 = getTableExclusiveLock(tableName1, testMethodName); - LockRequest tableLock2 = getTableExclusiveLock(tableName2, testMethodName); + LockRequest tableLock2 = getTableExclusiveLock(tableName2, testMethodName); LockRequest regionsLock1 = getRegionLock(tableRegions1, testMethodName); LockRequest regionsLock2 = getRegionLock(tableRegions2, testMethodName); // Acquire namespace lock, then queue other locks. @@ -308,7 +311,8 @@ public void testMultipleLocks() throws Exception { // Assert tables & region locks are waiting because of namespace lock. long now = EnvironmentEdgeManager.currentTime(); // leave extra 10 msec in case more than half the HEARTBEAT_TIMEOUT has passed - Thread.sleep(Math.min(HEARTBEAT_TIMEOUT / 2, Math.max(HEARTBEAT_TIMEOUT-(now-start)-10, 0))); + Thread.sleep( + Math.min(HEARTBEAT_TIMEOUT / 2, Math.max(HEARTBEAT_TIMEOUT - (now - start) - 10, 0))); sendHeartbeatAndCheckLocked(nsProcId, true); sendHeartbeatAndCheckLocked(table1ProcId, false); sendHeartbeatAndCheckLocked(table2ProcId, false); @@ -354,8 +358,7 @@ public void testMultipleLocks() throws Exception { public void testLatch() throws Exception { CountDownLatch latch = new CountDownLatch(1); // MasterRpcServices don't set latch with LockProcedure, so create one and submit it directly. - LockProcedure lockProc = new LockProcedure(UTIL.getConfiguration(), - TableName.valueOf("table"), + LockProcedure lockProc = new LockProcedure(UTIL.getConfiguration(), TableName.valueOf("table"), org.apache.hadoop.hbase.procedure2.LockType.EXCLUSIVE, "desc", latch); procExec.submitProcedure(lockProc); assertTrue(latch.await(2000, TimeUnit.MILLISECONDS)); @@ -369,8 +372,8 @@ public void testLatch() throws Exception { public void testLocalLockTimeout() throws Exception { CountDownLatch latch = new CountDownLatch(1); // MasterRpcServices don't set latch with LockProcedure, so create one and submit it directly. - LockProcedure lockProc = new LockProcedure(UTIL.getConfiguration(), - TableName.valueOf("table"), LockType.EXCLUSIVE, "desc", latch); + LockProcedure lockProc = new LockProcedure(UTIL.getConfiguration(), TableName.valueOf("table"), + LockType.EXCLUSIVE, "desc", latch); procExec.submitProcedure(lockProc); assertTrue(awaitForLocked(lockProc.getProcId(), 2000)); Thread.sleep(LOCAL_LOCKS_TIMEOUT / 2); @@ -399,11 +402,11 @@ private void testRemoteLockRecovery(LockRequest lock) throws Exception { // After recovery, remote locks should reacquire locks and function normally. assertTrue(awaitForLocked(procId, 2000)); - Thread.sleep(HEARTBEAT_TIMEOUT/2); + Thread.sleep(HEARTBEAT_TIMEOUT / 2); sendHeartbeatAndCheckLocked(procId, true); - Thread.sleep(HEARTBEAT_TIMEOUT/2); + Thread.sleep(HEARTBEAT_TIMEOUT / 2); sendHeartbeatAndCheckLocked(procId, true); - Thread.sleep(2 * HEARTBEAT_TIMEOUT + HEARTBEAT_TIMEOUT/2); + Thread.sleep(2 * HEARTBEAT_TIMEOUT + HEARTBEAT_TIMEOUT / 2); sendHeartbeatAndCheckLocked(procId, false); ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); @@ -431,8 +434,8 @@ public void testRemoteRegionLockRecovery() throws Exception { public void testLocalMasterLockRecovery() throws Exception { ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); CountDownLatch latch = new CountDownLatch(1); - LockProcedure lockProc = new LockProcedure(UTIL.getConfiguration(), - TableName.valueOf("table"), LockType.EXCLUSIVE, "desc", latch); + LockProcedure lockProc = new LockProcedure(UTIL.getConfiguration(), TableName.valueOf("table"), + LockType.EXCLUSIVE, "desc", latch); procExec.submitProcedure(lockProc); assertTrue(latch.await(2000, TimeUnit.MILLISECONDS)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/migrate/TestInitializeStoreFileTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/migrate/TestInitializeStoreFileTracker.java index 10b0adb69cf4..bd31da84d1f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/migrate/TestInitializeStoreFileTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/migrate/TestInitializeStoreFileTracker.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +45,7 @@ public class TestInitializeStoreFileTracker { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestInitializeStoreFileTracker.class); + HBaseClassTestRule.forClass(TestInitializeStoreFileTracker.class); private final static String[] tables = new String[] { "t1", "t2", "t3", "t4", "t5", "t6" }; private final static String famStr = "f1"; private final static byte[] fam = Bytes.toBytes(famStr); @@ -75,20 +74,20 @@ public void tearDown() throws Exception { @Test public void testMigrateStoreFileTracker() throws IOException, InterruptedException { - //create tables to test + // create tables to test for (int i = 0; i < tables.length; i++) { tableDescriptor = HTU.createModifyableTableDescriptor(tables[i]) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam).build()).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam).build()).build(); HTU.createTable(tableDescriptor, null); } TableDescriptors tableDescriptors = HTU.getMiniHBaseCluster().getMaster().getTableDescriptors(); for (int i = 0; i < tables.length; i++) { TableDescriptor tdAfterCreated = tableDescriptors.get(TableName.valueOf(tables[i])); - //make sure that TRACKER_IMPL was set by default after tables have been created. + // make sure that TRACKER_IMPL was set by default after tables have been created. Assert.assertNotNull(tdAfterCreated.getValue(StoreFileTrackerFactory.TRACKER_IMPL)); - //Remove StoreFileTracker impl from tableDescriptor + // Remove StoreFileTracker impl from tableDescriptor TableDescriptor tdRemovedSFT = TableDescriptorBuilder.newBuilder(tdAfterCreated) - .removeValue(StoreFileTrackerFactory.TRACKER_IMPL).build(); + .removeValue(StoreFileTrackerFactory.TRACKER_IMPL).build(); tableDescriptors.update(tdRemovedSFT); } HTU.getMiniHBaseCluster().stopMaster(0).join(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java index 3e96f4b3ea97..5be9e6110b57 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerManagerConfigurationObserver.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; @@ -37,28 +38,33 @@ import org.junit.experimental.categories.Category; import org.mockito.Mock; import org.mockito.MockitoAnnotations; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.RateLimiter; /** * Test that configuration changes are propagated to all children. */ -@Category({ MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionNormalizerManagerConfigurationObserver { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionNormalizerManagerConfigurationObserver.class); + HBaseClassTestRule.forClass(TestRegionNormalizerManagerConfigurationObserver.class); private static final HBaseTestingUtil testUtil = new HBaseTestingUtil(); private static final Pattern rateLimitPattern = - Pattern.compile("RateLimiter\\[stableRate=(?.+)qps]"); + Pattern.compile("RateLimiter\\[stableRate=(?.+)qps]"); private Configuration conf; private SimpleRegionNormalizer normalizer; - @Mock private MasterServices masterServices; - @Mock private RegionNormalizerTracker tracker; - @Mock private RegionNormalizerChore chore; - @Mock private RegionNormalizerWorkQueue queue; + @Mock + private MasterServices masterServices; + @Mock + private RegionNormalizerTracker tracker; + @Mock + private RegionNormalizerChore chore; + @Mock + private RegionNormalizerWorkQueue queue; private RegionNormalizerWorker worker; private ConfigurationManager configurationManager; @@ -69,7 +75,7 @@ public void before() { normalizer = new SimpleRegionNormalizer(); worker = new RegionNormalizerWorker(conf, masterServices, normalizer, queue); final RegionNormalizerManager normalizerManager = - new RegionNormalizerManager(tracker, chore, queue, worker); + new RegionNormalizerManager(tracker, chore, queue, worker); configurationManager = new ConfigurationManager(); configurationManager.registerObserver(normalizerManager); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java index 7e6c74910edf..f70aaca213a0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java @@ -24,6 +24,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -51,12 +52,12 @@ /** * Tests that {@link RegionNormalizerWorkQueue} implements the contract described in its docstring. */ -@Category({ MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionNormalizerWorkQueue { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionNormalizerWorkQueue.class); + HBaseClassTestRule.forClass(TestRegionNormalizerWorkQueue.class); @Rule public TestName testName = new TestName(); @@ -65,9 +66,7 @@ public class TestRegionNormalizerWorkQueue { public void testElementUniquenessAndFIFO() throws Exception { final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>(); final List content = new LinkedList<>(); - IntStream.of(4, 3, 2, 1, 4, 3, 2, 1) - .boxed() - .forEach(queue::put); + IntStream.of(4, 3, 2, 1, 4, 3, 2, 1).boxed().forEach(queue::put); assertEquals(4, queue.size()); while (queue.size() > 0) { content.add(queue.take()); @@ -94,8 +93,8 @@ public void testPriorityAndFIFO() throws Exception { queue.putFirst(0); assertEquals(5, queue.size()); drainTo(queue, content); - assertThat("putFirst items should jump the queue, preserving existing order", - content, contains(0, 4, 3, 2, 1)); + assertThat("putFirst items should jump the queue, preserving existing order", content, + contains(0, 4, 3, 2, 1)); queue.clear(); content.clear(); @@ -103,8 +102,8 @@ public void testPriorityAndFIFO() throws Exception { queue.putFirst(1); assertEquals(4, queue.size()); drainTo(queue, content); - assertThat("existing items re-added with putFirst should jump the queue", - content, contains(1, 4, 3, 2)); + assertThat("existing items re-added with putFirst should jump the queue", content, + contains(1, 4, 3, 2)); queue.clear(); content.clear(); @@ -118,10 +117,7 @@ public void testPriorityAndFIFO() throws Exception { } private enum Action { - PUT, - PUT_FIRST, - PUT_ALL, - PUT_ALL_FIRST, + PUT, PUT_FIRST, PUT_ALL, PUT_ALL_FIRST, } /** @@ -147,16 +143,14 @@ public void testConcurrentPut() throws Exception { break; } case PUT_ALL: { - final List vals = rand.ints(5, 0, maxValue) - .boxed() - .collect(Collectors.toList()); + final List vals = + rand.ints(5, 0, maxValue).boxed().collect(Collectors.toList()); queue.putAll(vals); break; } case PUT_ALL_FIRST: { - final List vals = rand.ints(5, 0, maxValue) - .boxed() - .collect(Collectors.toList()); + final List vals = + rand.ints(5, 0, maxValue).boxed().collect(Collectors.toList()); queue.putAllFirst(vals); break; } @@ -168,14 +162,13 @@ public void testConcurrentPut() throws Exception { final int numThreads = 5; final CompletableFuture[] futures = IntStream.range(0, numThreads) - .mapToObj(val -> CompletableFuture.runAsync(producer)) - .toArray(CompletableFuture[]::new); + .mapToObj(val -> CompletableFuture.runAsync(producer)).toArray(CompletableFuture[]::new); CompletableFuture.allOf(futures).join(); final List content = new ArrayList<>(queue.size()); drainTo(queue, content); - assertThat("at most `maxValue` items should be present.", - content.size(), lessThanOrEqualTo(maxValue)); + assertThat("at most `maxValue` items should be present.", content.size(), + lessThanOrEqualTo(maxValue)); assertEquals("all items should be unique.", content.size(), new HashSet<>(content).size()); } @@ -225,7 +218,7 @@ public void testTake() throws Exception { } private static void drainTo(final RegionNormalizerWorkQueue queue, Collection dest) - throws InterruptedException { + throws InterruptedException { assertThat(queue.size(), greaterThan(0)); while (queue.size() > 0) { dest.add(queue.take()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java index 53f10b73b305..7786924c9303 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java @@ -28,6 +28,7 @@ import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.when; + import java.time.Duration; import java.util.Arrays; import java.util.concurrent.ExecutorService; @@ -64,21 +65,22 @@ import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnit; import org.mockito.junit.MockitoRule; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * A test over {@link RegionNormalizerWorker}. Being a background thread, the only points of - * interaction we have to this class are its input source ({@link RegionNormalizerWorkQueue} and - * its callbacks invoked against {@link RegionNormalizer} and {@link MasterServices}. The work - * queue is simple enough to use directly; for {@link MasterServices}, use a mock because, as of - * now, the worker only invokes 4 methods. + * interaction we have to this class are its input source ({@link RegionNormalizerWorkQueue} and its + * callbacks invoked against {@link RegionNormalizer} and {@link MasterServices}. The work queue is + * simple enough to use directly; for {@link MasterServices}, use a mock because, as of now, the + * worker only invokes 4 methods. */ -@Category({ MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionNormalizerWorker { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionNormalizerWorker.class); + HBaseClassTestRule.forClass(TestRegionNormalizerWorker.class); @Rule public TestName testName = new TestName(); @@ -108,12 +110,10 @@ public void before() throws Exception { workerThreadThrowable.set(null); final String threadNameFmt = - TestRegionNormalizerWorker.class.getSimpleName() + "-" + testName.getMethodName() + "-%d"; - final ThreadFactory threadFactory = new ThreadFactoryBuilder() - .setNameFormat(threadNameFmt) - .setDaemon(true) - .setUncaughtExceptionHandler((t, e) -> workerThreadThrowable.set(e)) - .build(); + TestRegionNormalizerWorker.class.getSimpleName() + "-" + testName.getMethodName() + "-%d"; + final ThreadFactory threadFactory = + new ThreadFactoryBuilder().setNameFormat(threadNameFmt).setDaemon(true) + .setUncaughtExceptionHandler((t, e) -> workerThreadThrowable.set(e)).build(); workerPool = Executors.newSingleThreadExecutor(threadFactory); } @@ -129,49 +129,42 @@ public void after() throws Exception { @Test public void testMergeCounter() throws Exception { final TableName tn = tableName.getTableName(); - final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn) - .setNormalizationEnabled(true) - .build(); + final TableDescriptor tnDescriptor = + TableDescriptorBuilder.newBuilder(tn).setNormalizationEnabled(true).build(); when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor); - when(masterServices.mergeRegions(any(), anyBoolean(), anyLong(), anyLong())) - .thenReturn(1L); - when(regionNormalizer.computePlansForTable(tnDescriptor)) - .thenReturn(singletonList(new MergeNormalizationPlan.Builder() - .addTarget(RegionInfoBuilder.newBuilder(tn).build(), 10) - .addTarget(RegionInfoBuilder.newBuilder(tn).build(), 20) - .build())); + when(masterServices.mergeRegions(any(), anyBoolean(), anyLong(), anyLong())).thenReturn(1L); + when(regionNormalizer.computePlansForTable(tnDescriptor)).thenReturn(singletonList( + new MergeNormalizationPlan.Builder().addTarget(RegionInfoBuilder.newBuilder(tn).build(), 10) + .addTarget(RegionInfoBuilder.newBuilder(tn).build(), 20).build())); final RegionNormalizerWorker worker = new RegionNormalizerWorker( - testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); + testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); final long beforeMergePlanCount = worker.getMergePlanCount(); workerPool.submit(worker); queue.put(tn); - assertThatEventually("executing work should see plan count increase", - worker::getMergePlanCount, greaterThan(beforeMergePlanCount)); + assertThatEventually("executing work should see plan count increase", worker::getMergePlanCount, + greaterThan(beforeMergePlanCount)); } @Test public void testSplitCounter() throws Exception { final TableName tn = tableName.getTableName(); - final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn) - .setNormalizationEnabled(true) - .build(); + final TableDescriptor tnDescriptor = + TableDescriptorBuilder.newBuilder(tn).setNormalizationEnabled(true).build(); when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor); - when(masterServices.splitRegion(any(), any(), anyLong(), anyLong())) - .thenReturn(1L); - when(regionNormalizer.computePlansForTable(tnDescriptor)) - .thenReturn(singletonList( - new SplitNormalizationPlan(RegionInfoBuilder.newBuilder(tn).build(), 10))); + when(masterServices.splitRegion(any(), any(), anyLong(), anyLong())).thenReturn(1L); + when(regionNormalizer.computePlansForTable(tnDescriptor)).thenReturn( + singletonList(new SplitNormalizationPlan(RegionInfoBuilder.newBuilder(tn).build(), 10))); final RegionNormalizerWorker worker = new RegionNormalizerWorker( - testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); + testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); final long beforeSplitPlanCount = worker.getSplitPlanCount(); workerPool.submit(worker); queue.put(tn); - assertThatEventually("executing work should see plan count increase", - worker::getSplitPlanCount, greaterThan(beforeSplitPlanCount)); + assertThatEventually("executing work should see plan count increase", worker::getSplitPlanCount, + greaterThan(beforeSplitPlanCount)); } /** @@ -181,30 +174,23 @@ public void testSplitCounter() throws Exception { @Test public void testRateLimit() throws Exception { final TableName tn = tableName.getTableName(); - final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn) - .setNormalizationEnabled(true) - .build(); + final TableDescriptor tnDescriptor = + TableDescriptorBuilder.newBuilder(tn).setNormalizationEnabled(true).build(); final RegionInfo splitRegionInfo = RegionInfoBuilder.newBuilder(tn).build(); final RegionInfo mergeRegionInfo1 = RegionInfoBuilder.newBuilder(tn).build(); final RegionInfo mergeRegionInfo2 = RegionInfoBuilder.newBuilder(tn).build(); when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor); - when(masterServices.splitRegion(any(), any(), anyLong(), anyLong())) - .thenReturn(1L); - when(masterServices.mergeRegions(any(), anyBoolean(), anyLong(), anyLong())) - .thenReturn(1L); - when(regionNormalizer.computePlansForTable(tnDescriptor)) - .thenReturn(Arrays.asList( - new SplitNormalizationPlan(splitRegionInfo, 2), - new MergeNormalizationPlan.Builder() - .addTarget(mergeRegionInfo1, 1) - .addTarget(mergeRegionInfo2, 2) - .build(), - new SplitNormalizationPlan(splitRegionInfo, 1))); + when(masterServices.splitRegion(any(), any(), anyLong(), anyLong())).thenReturn(1L); + when(masterServices.mergeRegions(any(), anyBoolean(), anyLong(), anyLong())).thenReturn(1L); + when(regionNormalizer.computePlansForTable(tnDescriptor)).thenReturn(Arrays.asList( + new SplitNormalizationPlan(splitRegionInfo, 2), new MergeNormalizationPlan.Builder() + .addTarget(mergeRegionInfo1, 1).addTarget(mergeRegionInfo2, 2).build(), + new SplitNormalizationPlan(splitRegionInfo, 1))); final Configuration conf = testingUtility.getConfiguration(); conf.set("hbase.normalizer.throughput.max_bytes_per_sec", "1m"); final RegionNormalizerWorker worker = new RegionNormalizerWorker( - testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); + testingUtility.getConfiguration(), masterServices, regionNormalizer, queue); workerPool.submit(worker); final long startTime = System.nanoTime(); queue.put(tn); @@ -220,30 +206,26 @@ public void testRateLimit() throws Exception { } /** - * Repeatedly evaluates {@code matcher} against the result of calling {@code actualSupplier} - * until the matcher succeeds or the timeout period of 30 seconds is exhausted. + * Repeatedly evaluates {@code matcher} against the result of calling {@code actualSupplier} until + * the matcher succeeds or the timeout period of 30 seconds is exhausted. */ - private void assertThatEventually( - final String reason, - final Supplier actualSupplier, - final Matcher matcher - ) throws Exception { + private void assertThatEventually(final String reason, + final Supplier actualSupplier, final Matcher matcher) + throws Exception { testingUtility.waitFor(TimeUnit.SECONDS.toMillis(30), new Waiter.ExplainingPredicate() { private T lastValue = null; @Override public String explainFailure() { - final Description description = new StringDescription() - .appendText(reason) - .appendText("\nExpected: ") - .appendDescriptionOf(matcher) - .appendText("\n but: "); + final Description description = new StringDescription().appendText(reason) + .appendText("\nExpected: ").appendDescriptionOf(matcher).appendText("\n but: "); matcher.describeMismatch(lastValue, description); return description.toString(); } - @Override public boolean evaluate() { + @Override + public boolean evaluate() { lastValue = actualSupplier.get(); return matcher.matches(lastValue); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index 7cbfba7a1d5c..4608bb4d315d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -38,6 +38,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.RETURNS_DEEP_STUBS; import static org.mockito.Mockito.when; + import java.time.Instant; import java.time.Period; import java.util.ArrayList; @@ -73,7 +74,7 @@ /** * Tests logic of {@link SimpleRegionNormalizer}. */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestSimpleRegionNormalizer { @ClassRule @@ -121,8 +122,7 @@ public void testNoNormalizationIfTooFewRegions() { public void testNoNormalizationOnNormalizedCluster() { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 4); - final Map regionSizes = - createRegionSizesMap(regionInfos, 10, 15, 8, 10); + final Map regionSizes = createRegionSizesMap(regionInfos, 10, 15, 8, 10); setupMocksForNormalizer(regionSizes, regionInfos); List plans = normalizer.computePlansForTable(tableDescriptor); @@ -135,9 +135,9 @@ private void noNormalizationOnTransitioningRegions(final RegionState.State state final Map regionSizes = createRegionSizesMap(regionInfos, 10, 1, 100); setupMocksForNormalizer(regionSizes, regionInfos); - when(masterServices.getAssignmentManager().getRegionStates() - .getRegionState(any(RegionInfo.class))) - .thenReturn(RegionState.createForTesting(null, state)); + when( + masterServices.getAssignmentManager().getRegionStates().getRegionState(any(RegionInfo.class))) + .thenReturn(RegionState.createForTesting(null, state)); assertThat(normalizer.getMergeMinRegionCount(), greaterThanOrEqualTo(regionInfos.size())); List plans = normalizer.computePlansForTable(tableDescriptor); @@ -178,16 +178,12 @@ public void testNoNormalizationOnSplitRegions() { public void testMergeOfSmallRegions() { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 5); - final Map regionSizes = - createRegionSizesMap(regionInfos, 15, 5, 5, 15, 16); + final Map regionSizes = createRegionSizesMap(regionInfos, 15, 5, 5, 15, 16); setupMocksForNormalizer(regionSizes, regionInfos); - assertThat( - normalizer.computePlansForTable(tableDescriptor), - contains(new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(1), 5) - .addTarget(regionInfos.get(2), 5) - .build())); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(1), 5) + .addTarget(regionInfos.get(2), 5).build())); } // Test for situation illustrated in HBASE-14867 @@ -196,23 +192,19 @@ public void testMergeOfSecondSmallestRegions() { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 6); final Map regionSizes = - createRegionSizesMap(regionInfos, 1, 10000, 10000, 10000, 2700, 2700); + createRegionSizesMap(regionInfos, 1, 10000, 10000, 10000, 2700, 2700); setupMocksForNormalizer(regionSizes, regionInfos); - assertThat( - normalizer.computePlansForTable(tableDescriptor), - contains(new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(4), 2700) - .addTarget(regionInfos.get(5), 2700) - .build())); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(4), 2700) + .addTarget(regionInfos.get(5), 2700).build())); } @Test public void testMergeOfSmallNonAdjacentRegions() { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 5); - final Map regionSizes = - createRegionSizesMap(regionInfos, 15, 5, 16, 15, 5); + final Map regionSizes = createRegionSizesMap(regionInfos, 15, 5, 16, 15, 5); setupMocksForNormalizer(regionSizes, regionInfos); List plans = normalizer.computePlansForTable(tableDescriptor); @@ -223,12 +215,11 @@ public void testMergeOfSmallNonAdjacentRegions() { public void testSplitOfLargeRegion() { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 4); - final Map regionSizes = - createRegionSizesMap(regionInfos, 8, 6, 10, 30); + final Map regionSizes = createRegionSizesMap(regionInfos, 8, 6, 10, 30); setupMocksForNormalizer(regionSizes, regionInfos); - assertThat(normalizer.computePlansForTable(tableDescriptor), contains( - new SplitNormalizationPlan(regionInfos.get(3), 30))); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new SplitNormalizationPlan(regionInfos.get(3), 30))); } @Test @@ -236,53 +227,43 @@ public void testWithTargetRegionSize() throws Exception { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 6); final Map regionSizes = - createRegionSizesMap(regionInfos, 20, 40, 60, 80, 100, 120); + createRegionSizesMap(regionInfos, 20, 40, 60, 80, 100, 120); setupMocksForNormalizer(regionSizes, regionInfos); // test when target region size is 20 when(tableDescriptor.getNormalizerTargetRegionSize()).thenReturn(20L); - assertThat(normalizer.computePlansForTable(tableDescriptor), contains( - new SplitNormalizationPlan(regionInfos.get(2), 60), - new SplitNormalizationPlan(regionInfos.get(3), 80), - new SplitNormalizationPlan(regionInfos.get(4), 100), - new SplitNormalizationPlan(regionInfos.get(5), 120) - )); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new SplitNormalizationPlan(regionInfos.get(2), 60), + new SplitNormalizationPlan(regionInfos.get(3), 80), + new SplitNormalizationPlan(regionInfos.get(4), 100), + new SplitNormalizationPlan(regionInfos.get(5), 120))); // test when target region size is 200 when(tableDescriptor.getNormalizerTargetRegionSize()).thenReturn(200L); - assertThat( - normalizer.computePlansForTable(tableDescriptor), - contains( - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(0), 20) - .addTarget(regionInfos.get(1), 40) - .addTarget(regionInfos.get(2), 60) - .addTarget(regionInfos.get(3), 80) - .build())); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 20) + .addTarget(regionInfos.get(1), 40).addTarget(regionInfos.get(2), 60) + .addTarget(regionInfos.get(3), 80).build())); } @Test public void testSplitWithTargetRegionCount() throws Exception { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 4); - final Map regionSizes = - createRegionSizesMap(regionInfos, 20, 40, 60, 80); + final Map regionSizes = createRegionSizesMap(regionInfos, 20, 40, 60, 80); setupMocksForNormalizer(regionSizes, regionInfos); // test when target region count is 8 when(tableDescriptor.getNormalizerTargetRegionCount()).thenReturn(8); - assertThat(normalizer.computePlansForTable(tableDescriptor), contains( - new SplitNormalizationPlan(regionInfos.get(2), 60), - new SplitNormalizationPlan(regionInfos.get(3), 80))); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new SplitNormalizationPlan(regionInfos.get(2), 60), + new SplitNormalizationPlan(regionInfos.get(3), 80))); // test when target region count is 3 when(tableDescriptor.getNormalizerTargetRegionCount()).thenReturn(3); - assertThat( - normalizer.computePlansForTable(tableDescriptor), - contains(new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(0), 20) - .addTarget(regionInfos.get(1), 40) - .build())); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 20) + .addTarget(regionInfos.get(1), 40).build())); } @Test @@ -290,11 +271,9 @@ public void testHonorsSplitEnabled() { conf.setBoolean(SPLIT_ENABLED_KEY, true); final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 5); - final Map regionSizes = - createRegionSizesMap(regionInfos, 5, 5, 20, 5, 5); + final Map regionSizes = createRegionSizesMap(regionInfos, 5, 5, 20, 5, 5); setupMocksForNormalizer(regionSizes, regionInfos); - assertThat( - normalizer.computePlansForTable(tableDescriptor), + assertThat(normalizer.computePlansForTable(tableDescriptor), contains(instanceOf(SplitNormalizationPlan.class))); conf.setBoolean(SPLIT_ENABLED_KEY, false); @@ -307,11 +286,9 @@ public void testHonorsSplitEnabledInTD() { conf.setBoolean(SPLIT_ENABLED_KEY, true); final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 5); - final Map regionSizes = - createRegionSizesMap(regionInfos, 5, 5, 20, 5, 5); + final Map regionSizes = createRegionSizesMap(regionInfos, 5, 5, 20, 5, 5); setupMocksForNormalizer(regionSizes, regionInfos); - assertThat( - normalizer.computePlansForTable(tableDescriptor), + assertThat(normalizer.computePlansForTable(tableDescriptor), contains(instanceOf(SplitNormalizationPlan.class))); // When hbase.normalizer.split.enabled is true in configuration, but false in table descriptor @@ -322,8 +299,7 @@ public void testHonorsSplitEnabledInTD() { conf.setBoolean(SPLIT_ENABLED_KEY, false); setupMocksForNormalizer(regionSizes, regionInfos); when(tableDescriptor.getValue(SPLIT_ENABLED_KEY)).thenReturn("true"); - assertThat( - normalizer.computePlansForTable(tableDescriptor), + assertThat(normalizer.computePlansForTable(tableDescriptor), contains(instanceOf(SplitNormalizationPlan.class))); } @@ -332,11 +308,9 @@ public void testHonorsMergeEnabled() { conf.setBoolean(MERGE_ENABLED_KEY, true); final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 5); - final Map regionSizes = - createRegionSizesMap(regionInfos, 20, 5, 5, 20, 20); + final Map regionSizes = createRegionSizesMap(regionInfos, 20, 5, 5, 20, 20); setupMocksForNormalizer(regionSizes, regionInfos); - assertThat( - normalizer.computePlansForTable(tableDescriptor), + assertThat(normalizer.computePlansForTable(tableDescriptor), contains(instanceOf(MergeNormalizationPlan.class))); conf.setBoolean(MERGE_ENABLED_KEY, false); @@ -349,11 +323,9 @@ public void testHonorsMergeEnabledInTD() { conf.setBoolean(MERGE_ENABLED_KEY, true); final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 5); - final Map regionSizes = - createRegionSizesMap(regionInfos, 20, 5, 5, 20, 20); + final Map regionSizes = createRegionSizesMap(regionInfos, 20, 5, 5, 20, 20); setupMocksForNormalizer(regionSizes, regionInfos); - assertThat( - normalizer.computePlansForTable(tableDescriptor), + assertThat(normalizer.computePlansForTable(tableDescriptor), contains(instanceOf(MergeNormalizationPlan.class))); // When hbase.normalizer.merge.enabled is true in configuration, but false in table descriptor @@ -364,8 +336,7 @@ public void testHonorsMergeEnabledInTD() { conf.setBoolean(MERGE_ENABLED_KEY, false); setupMocksForNormalizer(regionSizes, regionInfos); when(tableDescriptor.getValue(MERGE_ENABLED_KEY)).thenReturn("true"); - assertThat( - normalizer.computePlansForTable(tableDescriptor), + assertThat(normalizer.computePlansForTable(tableDescriptor), contains(instanceOf(MergeNormalizationPlan.class))); } @@ -394,19 +365,17 @@ private void honorsMinimumRegionCount(String confKey) { assertEquals(1, normalizer.getMergeMinRegionCount()); List plans = normalizer.computePlansForTable(tableDescriptor); - assertThat(plans, contains( - new SplitNormalizationPlan(regionInfos.get(2), 10), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(0), 1) - .addTarget(regionInfos.get(1), 1) - .build())); + assertThat(plans, + contains(new SplitNormalizationPlan(regionInfos.get(2), 10), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 1) + .addTarget(regionInfos.get(1), 1).build())); // have to call setupMocks again because we don't have dynamic config update on normalizer. conf.setInt(confKey, 4); setupMocksForNormalizer(regionSizes, regionInfos); assertEquals(4, normalizer.getMergeMinRegionCount()); - assertThat(normalizer.computePlansForTable(tableDescriptor), contains( - new SplitNormalizationPlan(regionInfos.get(2), 10))); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new SplitNormalizationPlan(regionInfos.get(2), 10))); } @Test @@ -435,16 +404,14 @@ private void honorsOldMinimumRegionCountInTD(String confKey) { assertEquals(1, normalizer.getMergeMinRegionCount()); List plans = normalizer.computePlansForTable(tableDescriptor); - assertThat(plans, contains( - new SplitNormalizationPlan(regionInfos.get(2), 10), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(0), 1) - .addTarget(regionInfos.get(1), 1) - .build())); + assertThat(plans, + contains(new SplitNormalizationPlan(regionInfos.get(2), 10), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 1) + .addTarget(regionInfos.get(1), 1).build())); when(tableDescriptor.getValue(confKey)).thenReturn("4"); - assertThat(normalizer.computePlansForTable(tableDescriptor), contains( - new SplitNormalizationPlan(regionInfos.get(2), 10))); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new SplitNormalizationPlan(regionInfos.get(2), 10))); } @Test @@ -452,19 +419,17 @@ public void testHonorsMergeMinRegionAge() { conf.setInt(MERGE_MIN_REGION_AGE_DAYS_KEY, 7); final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 4); - final Map regionSizes = - createRegionSizesMap(regionInfos, 1, 1, 10, 10); + final Map regionSizes = createRegionSizesMap(regionInfos, 1, 1, 10, 10); setupMocksForNormalizer(regionSizes, regionInfos); assertEquals(Period.ofDays(7), normalizer.getMergeMinRegionAge()); - assertThat( - normalizer.computePlansForTable(tableDescriptor), + assertThat(normalizer.computePlansForTable(tableDescriptor), everyItem(not(instanceOf(MergeNormalizationPlan.class)))); // have to call setupMocks again because we don't have dynamic config update on normalizer. conf.unset(MERGE_MIN_REGION_AGE_DAYS_KEY); setupMocksForNormalizer(regionSizes, regionInfos); - assertEquals( - Period.ofDays(DEFAULT_MERGE_MIN_REGION_AGE_DAYS), normalizer.getMergeMinRegionAge()); + assertEquals(Period.ofDays(DEFAULT_MERGE_MIN_REGION_AGE_DAYS), + normalizer.getMergeMinRegionAge()); final List plans = normalizer.computePlansForTable(tableDescriptor); assertThat(plans, not(empty())); assertThat(plans, everyItem(instanceOf(MergeNormalizationPlan.class))); @@ -475,12 +440,10 @@ public void testHonorsMergeMinRegionAgeInTD() { conf.setInt(MERGE_MIN_REGION_AGE_DAYS_KEY, 7); final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 4); - final Map regionSizes = - createRegionSizesMap(regionInfos, 1, 1, 10, 10); + final Map regionSizes = createRegionSizesMap(regionInfos, 1, 1, 10, 10); setupMocksForNormalizer(regionSizes, regionInfos); assertEquals(Period.ofDays(7), normalizer.getMergeMinRegionAge()); - assertThat( - normalizer.computePlansForTable(tableDescriptor), + assertThat(normalizer.computePlansForTable(tableDescriptor), everyItem(not(instanceOf(MergeNormalizationPlan.class)))); conf.unset(MERGE_MIN_REGION_AGE_DAYS_KEY); @@ -501,18 +464,14 @@ public void testHonorsMergeMinRegionSize() { conf.setBoolean(SPLIT_ENABLED_KEY, false); final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 5); - final Map regionSizes = - createRegionSizesMap(regionInfos, 1, 2, 0, 10, 10); + final Map regionSizes = createRegionSizesMap(regionInfos, 1, 2, 0, 10, 10); setupMocksForNormalizer(regionSizes, regionInfos); assertFalse(normalizer.isSplitEnabled()); assertEquals(1, normalizer.getMergeMinRegionSizeMb()); - assertThat( - normalizer.computePlansForTable(tableDescriptor), - contains(new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(0), 1) - .addTarget(regionInfos.get(1), 2) - .build())); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 1) + .addTarget(regionInfos.get(1), 2).build())); conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 3); setupMocksForNormalizer(regionSizes, regionInfos); @@ -525,18 +484,14 @@ public void testHonorsMergeMinRegionSizeInTD() { conf.setBoolean(SPLIT_ENABLED_KEY, false); final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 5); - final Map regionSizes = - createRegionSizesMap(regionInfos, 1, 2, 0, 10, 10); + final Map regionSizes = createRegionSizesMap(regionInfos, 1, 2, 0, 10, 10); setupMocksForNormalizer(regionSizes, regionInfos); assertFalse(normalizer.isSplitEnabled()); assertEquals(1, normalizer.getMergeMinRegionSizeMb()); - assertThat( - normalizer.computePlansForTable(tableDescriptor), - contains(new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(0), 1) - .addTarget(regionInfos.get(1), 2) - .build())); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 1) + .addTarget(regionInfos.get(1), 2).build())); when(tableDescriptor.getValue(MERGE_MIN_REGION_SIZE_MB_KEY)).thenReturn("3"); assertThat(normalizer.computePlansForTable(tableDescriptor), empty()); @@ -549,24 +504,19 @@ public void testMergeEmptyRegions0() { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 7); final Map regionSizes = - createRegionSizesMap(regionInfos, 0, 1, 10, 0, 9, 10, 0); + createRegionSizesMap(regionInfos, 0, 1, 10, 0, 9, 10, 0); setupMocksForNormalizer(regionSizes, regionInfos); assertFalse(normalizer.isSplitEnabled()); assertEquals(0, normalizer.getMergeMinRegionSizeMb()); - assertThat(normalizer.computePlansForTable(tableDescriptor), contains( - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(0), 0) - .addTarget(regionInfos.get(1), 1) - .build(), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(2), 10) - .addTarget(regionInfos.get(3), 0) - .build(), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(5), 10) - .addTarget(regionInfos.get(6), 0) - .build())); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains( + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 0) + .addTarget(regionInfos.get(1), 1).build(), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(2), 10) + .addTarget(regionInfos.get(3), 0).build(), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(5), 10) + .addTarget(regionInfos.get(6), 0).build())); } @Test @@ -576,28 +526,21 @@ public void testMergeEmptyRegions1() { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 8); final Map regionSizes = - createRegionSizesMap(regionInfos, 0, 1, 10, 0, 9, 0, 10, 0); + createRegionSizesMap(regionInfos, 0, 1, 10, 0, 9, 0, 10, 0); setupMocksForNormalizer(regionSizes, regionInfos); assertFalse(normalizer.isSplitEnabled()); assertEquals(0, normalizer.getMergeMinRegionSizeMb()); - assertThat(normalizer.computePlansForTable(tableDescriptor), contains( - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(0), 0) - .addTarget(regionInfos.get(1), 1) - .build(), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(2), 10) - .addTarget(regionInfos.get(3), 0) - .build(), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(4), 9) - .addTarget(regionInfos.get(5), 0) - .build(), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(6), 10) - .addTarget(regionInfos.get(7), 0) - .build())); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains( + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 0) + .addTarget(regionInfos.get(1), 1).build(), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(2), 10) + .addTarget(regionInfos.get(3), 0).build(), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(4), 9) + .addTarget(regionInfos.get(5), 0).build(), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(6), 10) + .addTarget(regionInfos.get(7), 0).build())); } @Test @@ -607,29 +550,22 @@ public void testMergeEmptyRegions2() { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 8); final Map regionSizes = - createRegionSizesMap(regionInfos, 0, 10, 1, 0, 9, 0, 10, 0); + createRegionSizesMap(regionInfos, 0, 10, 1, 0, 9, 0, 10, 0); setupMocksForNormalizer(regionSizes, regionInfos); assertFalse(normalizer.isSplitEnabled()); assertEquals(0, normalizer.getMergeMinRegionSizeMb()); List plans = normalizer.computePlansForTable(tableDescriptor); - assertThat(plans, contains( - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(0), 0) - .addTarget(regionInfos.get(1), 10) - .build(), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(2), 1) - .addTarget(regionInfos.get(3), 0) - .build(), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(4), 9) - .addTarget(regionInfos.get(5), 0) - .build(), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(6), 10) - .addTarget(regionInfos.get(7), 0) - .build())); + assertThat(plans, + contains( + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 0) + .addTarget(regionInfos.get(1), 10).build(), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(2), 1) + .addTarget(regionInfos.get(3), 0).build(), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(4), 9) + .addTarget(regionInfos.get(5), 0).build(), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(6), 10) + .addTarget(regionInfos.get(7), 0).build())); } @Test @@ -638,24 +574,18 @@ public void testSplitAndMultiMerge() { final TableName tableName = name.getTableName(); final List regionInfos = createRegionInfos(tableName, 8); final Map regionSizes = - createRegionSizesMap(regionInfos, 3, 1, 1, 30, 9, 3, 1, 0); + createRegionSizesMap(regionInfos, 3, 1, 1, 30, 9, 3, 1, 0); setupMocksForNormalizer(regionSizes, regionInfos); assertTrue(normalizer.isMergeEnabled()); assertTrue(normalizer.isSplitEnabled()); assertEquals(0, normalizer.getMergeMinRegionSizeMb()); - assertThat(normalizer.computePlansForTable(tableDescriptor), contains( - new SplitNormalizationPlan(regionInfos.get(3), 30), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(0), 3) - .addTarget(regionInfos.get(1), 1) - .addTarget(regionInfos.get(2), 1) - .build(), - new MergeNormalizationPlan.Builder() - .addTarget(regionInfos.get(5), 3) - .addTarget(regionInfos.get(6), 1) - .addTarget(regionInfos.get(7), 0) - .build())); + assertThat(normalizer.computePlansForTable(tableDescriptor), + contains(new SplitNormalizationPlan(regionInfos.get(3), 30), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(0), 3) + .addTarget(regionInfos.get(1), 1).addTarget(regionInfos.get(2), 1).build(), + new MergeNormalizationPlan.Builder().addTarget(regionInfos.get(5), 3) + .addTarget(regionInfos.get(6), 1).addTarget(regionInfos.get(7), 0).build())); } // This test is to make sure that normalizer is only going to merge adjacent regions. @@ -666,17 +596,10 @@ public void testNormalizerCannotMergeNonAdjacentRegions() { // [, "aa"), ["aa", "aa1"), ["aa1", "aa1!"), ["aa1!", "aa2"), ["aa2", ) // Region ["aa", "aa1") and ["aa1!", "aa2") are not adjacent, they are not supposed to // merged. - final byte[][] keys = { - null, - Bytes.toBytes("aa"), - Bytes.toBytes("aa1!"), - Bytes.toBytes("aa1"), - Bytes.toBytes("aa2"), - null, - }; + final byte[][] keys = { null, Bytes.toBytes("aa"), Bytes.toBytes("aa1!"), Bytes.toBytes("aa1"), + Bytes.toBytes("aa2"), null, }; final List regionInfos = createRegionInfos(tableName, keys); - final Map regionSizes = - createRegionSizesMap(regionInfos, 3, 1, 1, 3, 5); + final Map regionSizes = createRegionSizesMap(regionInfos, 3, 1, 1, 3, 5); setupMocksForNormalizer(regionSizes, regionInfos); // Compute the plan, no merge plan returned as they are not adjacent. @@ -686,31 +609,31 @@ public void testNormalizerCannotMergeNonAdjacentRegions() { @SuppressWarnings("MockitoCast") private void setupMocksForNormalizer(Map regionSizes, - List regionInfoList) { + List regionInfoList) { masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS); tableDescriptor = Mockito.mock(TableDescriptor.class, RETURNS_DEEP_STUBS); // for simplicity all regions are assumed to be on one server; doesn't matter to us ServerName sn = ServerName.valueOf("localhost", 0, 0L); - when(masterServices.getAssignmentManager().getRegionStates() - .getRegionsOfTable(any())).thenReturn(regionInfoList); - when(masterServices.getAssignmentManager().getRegionStates() - .getRegionServerOfRegion(any())).thenReturn(sn); - when(masterServices.getAssignmentManager().getRegionStates() - .getRegionState(any(RegionInfo.class))).thenReturn( - RegionState.createForTesting(null, RegionState.State.OPEN)); + when(masterServices.getAssignmentManager().getRegionStates().getRegionsOfTable(any())) + .thenReturn(regionInfoList); + when(masterServices.getAssignmentManager().getRegionStates().getRegionServerOfRegion(any())) + .thenReturn(sn); + when( + masterServices.getAssignmentManager().getRegionStates().getRegionState(any(RegionInfo.class))) + .thenReturn(RegionState.createForTesting(null, RegionState.State.OPEN)); for (Map.Entry region : regionSizes.entrySet()) { RegionMetrics regionLoad = Mockito.mock(RegionMetrics.class); when(regionLoad.getRegionName()).thenReturn(region.getKey()); when(regionLoad.getStoreFileSize()) - .thenReturn(new Size(region.getValue(), Size.Unit.MEGABYTE)); + .thenReturn(new Size(region.getValue(), Size.Unit.MEGABYTE)); // this is possibly broken with jdk9, unclear if false positive or not // suppress it for now, fix it when we get to running tests on 9 // see: http://errorprone.info/bugpattern/MockitoCast - when((Object) masterServices.getServerManager().getLoad(sn) - .getRegionMetrics().get(region.getKey())).thenReturn(regionLoad); + when((Object) masterServices.getServerManager().getLoad(sn).getRegionMetrics() + .get(region.getKey())).thenReturn(regionLoad); } when(masterServices.isSplitOrMergeEnabled(any())).thenReturn(true); @@ -738,37 +661,33 @@ private static List createRegionInfos(final TableName tableName, fin final byte[][] splitKeys = Bytes.split(startKey, endKey, length - 1); final List ret = new ArrayList<>(length); for (int i = 0; i < splitKeys.length - 1; i++) { - ret.add(createRegionInfo(tableName, splitKeys[i], splitKeys[i+1])); + ret.add(createRegionInfo(tableName, splitKeys[i], splitKeys[i + 1])); } return ret; } private static RegionInfo createRegionInfo(final TableName tableName, final byte[] startKey, - final byte[] endKey) { - return RegionInfoBuilder.newBuilder(tableName) - .setStartKey(startKey) - .setEndKey(endKey) - .setRegionId(generateRegionId()) - .build(); + final byte[] endKey) { + return RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(endKey) + .setRegionId(generateRegionId()).build(); } private static long generateRegionId() { return Instant.ofEpochMilli(EnvironmentEdgeManager.currentTime()) - .minus(Period.ofDays(DEFAULT_MERGE_MIN_REGION_AGE_DAYS + 1)) - .toEpochMilli(); + .minus(Period.ofDays(DEFAULT_MERGE_MIN_REGION_AGE_DAYS + 1)).toEpochMilli(); } private static List createRegionInfos(final TableName tableName, - final byte[][] splitKeys) { + final byte[][] splitKeys) { final List ret = new ArrayList<>(splitKeys.length); for (int i = 0; i < splitKeys.length - 1; i++) { - ret.add(createRegionInfo(tableName, splitKeys[i], splitKeys[i+1])); + ret.add(createRegionInfo(tableName, splitKeys[i], splitKeys[i + 1])); } return ret; } private static Map createRegionSizesMap(final List regionInfos, - int... sizes) { + int... sizes) { if (regionInfos.size() != sizes.length) { throw new IllegalStateException("Parameter lengths must match."); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java index 4abafe4a525b..ba9c0af86e2d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java @@ -25,6 +25,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -77,10 +78,10 @@ /** * Testing {@link SimpleRegionNormalizer} on minicluster. */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestSimpleRegionNormalizerOnCluster { private static final Logger LOG = - LoggerFactory.getLogger(TestSimpleRegionNormalizerOnCluster.class); + LoggerFactory.getLogger(TestSimpleRegionNormalizerOnCluster.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -134,10 +135,9 @@ public void testHonorsNormalizerSwitch() throws Exception { } /** - * Test that disabling normalizer via table configuration is honored. There's - * no side-effect to look for (other than a log message), so normalize two - * tables, one with the disabled setting, and look for change in one and no - * change in the other. + * Test that disabling normalizer via table configuration is honored. There's no side-effect to + * look for (other than a log message), so normalize two tables, one with the disabled setting, + * and look for change in one and no change in the other. */ @Test public void testHonorsNormalizerTableSetting() throws Exception { @@ -157,17 +157,11 @@ public void testHonorsNormalizerTableSetting() throws Exception { // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn2RegionCount number of regions because normalizer has not been enabled on it. // tn3 has tn3RegionCount number of regions because two plans are run: - // 1. split one region to two - // 2. merge two regions into one + // 1. split one region to two + // 2. merge two regions into one // and hence, total number of regions for tn3 remains same - assertEquals( - tn1 + " should have split.", - tn1RegionCount + 1, - getRegionCount(tn1)); - assertEquals( - tn2 + " should not have split.", - tn2RegionCount, - getRegionCount(tn2)); + assertEquals(tn1 + " should have split.", tn1RegionCount + 1, getRegionCount(tn1)); + assertEquals(tn2 + " should not have split.", tn2RegionCount, getRegionCount(tn2)); LOG.debug("waiting for t3 to settle..."); waitForTableRegionCount(tn3, comparesEqualTo(tn3RegionCount)); } finally { @@ -183,33 +177,28 @@ public void testRegionNormalizationSplitWithoutQuotaLimit() throws Exception { } @Test - public void testRegionNormalizationSplitWithQuotaLimit() throws Exception { + public void testRegionNormalizationSplitWithQuotaLimit() throws Exception { testRegionNormalizationSplit(true); } void testRegionNormalizationSplit(boolean limitedByQuota) throws Exception { TableName tableName = null; try { - tableName = limitedByQuota - ? buildTableNameForQuotaTest(name.getMethodName()) - : TableName.valueOf(name.getMethodName()); + tableName = limitedByQuota ? buildTableNameForQuotaTest(name.getMethodName()) + : TableName.valueOf(name.getMethodName()); final int currentRegionCount = createTableBegsSplit(tableName, true, false); - final long existingSkippedSplitCount = master.getRegionNormalizerManager() - .getSkippedCount(PlanType.SPLIT); + final long existingSkippedSplitCount = + master.getRegionNormalizerManager().getSkippedCount(PlanType.SPLIT); assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize().get()); if (limitedByQuota) { waitForSkippedSplits(master, existingSkippedSplitCount); - assertEquals( - tableName + " should not have split.", - currentRegionCount, + assertEquals(tableName + " should not have split.", currentRegionCount, getRegionCount(tableName)); } else { waitForTableRegionCount(tableName, greaterThanOrEqualTo(currentRegionCount + 1)); - assertEquals( - tableName + " should have split.", - currentRegionCount + 1, + assertEquals(tableName + " should have split.", currentRegionCount + 1, getRegionCount(tableName)); } } finally { @@ -225,9 +214,7 @@ public void testRegionNormalizationMerge() throws Exception { assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize().get()); waitForTableRegionCount(tableName, lessThanOrEqualTo(currentRegionCount - 1)); - assertEquals( - tableName + " should have merged.", - currentRegionCount - 1, + assertEquals(tableName + " should have merged.", currentRegionCount - 1, getRegionCount(tableName)); } finally { dropIfExists(tableName); @@ -244,9 +231,8 @@ public void testHonorsNamespaceFilter() throws Exception { admin.createNamespace(namespaceDescriptor).get(); final int tn1RegionCount = createTableBegsSplit(tn1, true, false); final int tn2RegionCount = createTableBegsSplit(tn2, true, false); - final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder() - .namespace("ns") - .build(); + final NormalizeTableFilterParams ntfp = + new NormalizeTableFilterParams.Builder().namespace("ns").build(); assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize(ntfp).get()); @@ -254,10 +240,7 @@ public void testHonorsNamespaceFilter() throws Exception { // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn2RegionCount number of regions because it's not a member of the target namespace. - assertEquals( - tn1 + " should have split.", - tn1RegionCount + 1, - getRegionCount(tn1)); + assertEquals(tn1 + " should have split.", tn1RegionCount + 1, getRegionCount(tn1)); waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount)); } finally { dropIfExists(tn1); @@ -273,9 +256,8 @@ public void testHonorsPatternFilter() throws Exception { try { final int tn1RegionCount = createTableBegsSplit(tn1, true, false); final int tn2RegionCount = createTableBegsSplit(tn2, true, false); - final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder() - .regex(".*[1]") - .build(); + final NormalizeTableFilterParams ntfp = + new NormalizeTableFilterParams.Builder().regex(".*[1]").build(); assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize(ntfp).get()); @@ -283,10 +265,7 @@ public void testHonorsPatternFilter() throws Exception { // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn2RegionCount number of regions because it fails filter. - assertEquals( - tn1 + " should have split.", - tn1RegionCount + 1, - getRegionCount(tn1)); + assertEquals(tn1 + " should have split.", tn1RegionCount + 1, getRegionCount(tn1)); waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount)); } finally { dropIfExists(tn1); @@ -303,8 +282,7 @@ public void testHonorsNameFilter() throws Exception { final int tn1RegionCount = createTableBegsSplit(tn1, true, false); final int tn2RegionCount = createTableBegsSplit(tn2, true, false); final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder() - .tableNames(Collections.singletonList(tn1)) - .build(); + .tableNames(Collections.singletonList(tn1)).build(); assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize(ntfp).get()); @@ -312,10 +290,7 @@ public void testHonorsNameFilter() throws Exception { // confirm that tn1 has (tn1RegionCount + 1) number of regions. // tn2 has tn3RegionCount number of regions because it fails filter: - assertEquals( - tn1 + " should have split.", - tn1RegionCount + 1, - getRegionCount(tn1)); + assertEquals(tn1 + " should have split.", tn1RegionCount + 1, getRegionCount(tn1)); waitForTableRegionCount(tn2, comparesEqualTo(tn2RegionCount)); } finally { dropIfExists(tn1); @@ -334,10 +309,9 @@ public void testTargetOfSplitAndMerge() throws Exception { final int tnRegionCount = createTableTargetOfSplitAndMerge(tn); assertFalse(admin.normalizerSwitch(true).get()); assertTrue(admin.normalize().get()); - TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>( - "expected " + tn + " to split or merge (probably split)", - () -> getRegionCountUnchecked(tn), - not(comparesEqualTo(tnRegionCount)))); + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), + new MatcherPredicate<>("expected " + tn + " to split or merge (probably split)", + () -> getRegionCountUnchecked(tn), not(comparesEqualTo(tnRegionCount)))); } finally { dropIfExists(tn); } @@ -346,31 +320,29 @@ public void testTargetOfSplitAndMerge() throws Exception { private static TableName buildTableNameForQuotaTest(final String methodName) throws Exception { String nsp = "np2"; NamespaceDescriptor nspDesc = - NamespaceDescriptor.create(nsp) - .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5") - .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); + NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5") + .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); admin.createNamespace(nspDesc).get(); return TableName.valueOf(nsp, methodName); } private static void waitForSkippedSplits(final HMaster master, - final long existingSkippedSplitCount) { - TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>( - "waiting to observe split attempt and skipped.", - () -> master.getRegionNormalizerManager().getSkippedCount(PlanType.SPLIT), - Matchers.greaterThan(existingSkippedSplitCount))); + final long existingSkippedSplitCount) { + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), + new MatcherPredicate<>("waiting to observe split attempt and skipped.", + () -> master.getRegionNormalizerManager().getSkippedCount(PlanType.SPLIT), + Matchers.greaterThan(existingSkippedSplitCount))); } private static void waitForTableRegionCount(final TableName tableName, - Matcher matcher) { - TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new MatcherPredicate<>( - "region count for table " + tableName + " does not match expected", - () -> getRegionCountUnchecked(tableName), - matcher)); + Matcher matcher) { + TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), + new MatcherPredicate<>("region count for table " + tableName + " does not match expected", + () -> getRegionCountUnchecked(tableName), matcher)); } private static List generateTestData(final TableName tableName, - final int... regionSizesMb) throws IOException { + final int... regionSizesMb) throws IOException { final List generatedRegions; final int numRegions = regionSizesMb.length; LOG.debug("generating test data into {}, {} regions of sizes (mb) {}", tableName, numRegions, @@ -406,14 +378,11 @@ private static void generateTestData(Region region, int numRows) throws IOExcept } private static double getRegionSizeMB(final MasterServices masterServices, - final RegionInfo regionInfo) { - final ServerName sn = masterServices.getAssignmentManager() - .getRegionStates() - .getRegionServerOfRegion(regionInfo); - final RegionMetrics regionLoad = masterServices.getServerManager() - .getLoad(sn) - .getRegionMetrics() - .get(regionInfo.getRegionName()); + final RegionInfo regionInfo) { + final ServerName sn = + masterServices.getAssignmentManager().getRegionStates().getRegionServerOfRegion(regionInfo); + final RegionMetrics regionLoad = masterServices.getServerManager().getLoad(sn) + .getRegionMetrics().get(regionInfo.getRegionName()); if (regionLoad == null) { LOG.debug("{} was not found in RegionsLoad", regionInfo.getRegionNameAsString()); return -1; @@ -422,51 +391,51 @@ private static double getRegionSizeMB(final MasterServices masterServices, } /** - * create a table with 5 regions, having region sizes so as to provoke a split - * of the largest region. + * create a table with 5 regions, having region sizes so as to provoke a split of the largest + * region. *
        - *
      • total table size: 12
      • - *
      • average region size: 2.4
      • - *
      • split threshold: 2.4 * 2 = 4.8
      • + *
      • total table size: 12
      • + *
      • average region size: 2.4
      • + *
      • split threshold: 2.4 * 2 = 4.8
      • *
      */ private static int createTableBegsSplit(final TableName tableName, - final boolean normalizerEnabled, final boolean isMergeEnabled) - throws Exception { + final boolean normalizerEnabled, final boolean isMergeEnabled) throws Exception { final List generatedRegions = generateTestData(tableName, 1, 1, 2, 3, 5); assertEquals(5, getRegionCount(tableName)); admin.flush(tableName).get(); - final TableDescriptor td = TableDescriptorBuilder - .newBuilder(admin.getDescriptor(tableName).get()) - .setNormalizationEnabled(normalizerEnabled) - .setMergeEnabled(isMergeEnabled) - .build(); + final TableDescriptor td = + TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName).get()) + .setNormalizationEnabled(normalizerEnabled).setMergeEnabled(isMergeEnabled).build(); admin.modifyTable(td).get(); // make sure relatively accurate region statistics are available for the test table. use // the last/largest region as clue. TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(1), new ExplainingPredicate() { - @Override public String explainFailure() { + @Override + public String explainFailure() { return "expected largest region to be >= 4mb."; } - @Override public boolean evaluate() { + + @Override + public boolean evaluate() { return generatedRegions.stream() - .mapToDouble(val -> getRegionSizeMB(master, val.getRegionInfo())) - .allMatch(val -> val > 0) - && getRegionSizeMB(master, generatedRegions.get(4).getRegionInfo()) >= 4.0; + .mapToDouble(val -> getRegionSizeMB(master, val.getRegionInfo())) + .allMatch(val -> val > 0) + && getRegionSizeMB(master, generatedRegions.get(4).getRegionInfo()) >= 4.0; } }); return 5; } /** - * create a table with 5 regions, having region sizes so as to provoke a merge - * of the smallest regions. + * create a table with 5 regions, having region sizes so as to provoke a merge of the smallest + * regions. *
        - *
      • total table size: 13
      • - *
      • average region size: 2.6
      • - *
      • sum of sizes of first two regions < average
      • + *
      • total table size: 13
      • + *
      • average region size: 2.6
      • + *
      • sum of sizes of first two regions < average
      • *
      */ private static int createTableBegsMerge(final TableName tableName) throws Exception { @@ -476,23 +445,24 @@ private static int createTableBegsMerge(final TableName tableName) throws Except admin.flush(tableName).get(); final TableDescriptor td = TableDescriptorBuilder - .newBuilder(admin.getDescriptor(tableName).get()) - .setNormalizationEnabled(true) - .build(); + .newBuilder(admin.getDescriptor(tableName).get()).setNormalizationEnabled(true).build(); admin.modifyTable(td).get(); // make sure relatively accurate region statistics are available for the test table. use // the last/largest region as clue. LOG.debug("waiting for region statistics to settle."); TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(1), new ExplainingPredicate() { - @Override public String explainFailure() { + @Override + public String explainFailure() { return "expected largest region to be >= 4mb."; } - @Override public boolean evaluate() { + + @Override + public boolean evaluate() { return generatedRegions.stream() - .mapToDouble(val -> getRegionSizeMB(master, val.getRegionInfo())) - .allMatch(val -> val > 0) - && getRegionSizeMB(master, generatedRegions.get(4).getRegionInfo()) >= 4.0; + .mapToDouble(val -> getRegionSizeMB(master, val.getRegionInfo())) + .allMatch(val -> val > 0) + && getRegionSizeMB(master, generatedRegions.get(4).getRegionInfo()) >= 4.0; } }); return 5; @@ -502,8 +472,8 @@ private static int createTableBegsMerge(final TableName tableName) throws Except * Create a table with 4 regions, having region sizes so as to provoke a split of the largest * region and a merge of an empty region into the largest. *
        - *
      • total table size: 14
      • - *
      • average region size: 3.5
      • + *
      • total table size: 14
      • + *
      • average region size: 3.5
      • *
      */ private static int createTableTargetOfSplitAndMerge(final TableName tableName) throws Exception { @@ -513,19 +483,20 @@ private static int createTableTargetOfSplitAndMerge(final TableName tableName) t admin.flush(tableName).get(); final TableDescriptor td = TableDescriptorBuilder - .newBuilder(admin.getDescriptor(tableName).get()) - .setNormalizationEnabled(true) - .build(); + .newBuilder(admin.getDescriptor(tableName).get()).setNormalizationEnabled(true).build(); admin.modifyTable(td).get(); // make sure relatively accurate region statistics are available for the test table. use // the last/largest region as clue. LOG.debug("waiting for region statistics to settle."); TEST_UTIL.waitFor(TimeUnit.MINUTES.toMillis(5), new ExplainingPredicate() { - @Override public String explainFailure() { + @Override + public String explainFailure() { return "expected largest region to be >= 10mb."; } - @Override public boolean evaluate() { + + @Override + public boolean evaluate() { for (int i = 0; i < generatedRegions.size(); i++) { final RegionInfo regionInfo = generatedRegions.get(i).getRegionInfo(); if (!(getRegionSizeMB(master, regionInfo) >= regionSizesMb[i])) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterFailoverWithProceduresTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterFailoverWithProceduresTestBase.java index 2814ba4cce6c..a515b11a14eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterFailoverWithProceduresTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterFailoverWithProceduresTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ public abstract class MasterFailoverWithProceduresTestBase { private static final Logger LOG = - LoggerFactory.getLogger(MasterFailoverWithProceduresTestBase.class); + LoggerFactory.getLogger(MasterFailoverWithProceduresTestBase.class); protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -63,9 +63,9 @@ protected static Path getRootDir() { } protected static void testRecoveryAndDoubleExecution(final HBaseTestingUtil testUtil, - final long procId, final int lastStepBeforeFailover) throws Exception { + final long procId, final int lastStepBeforeFailover) throws Exception { ProcedureExecutor procExec = - testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor(); ProcedureTestingUtility.waitProcedure(procExec, procId); final Procedure proc = procExec.getProcedure(procId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java index ae0c4c623125..1d90a0d472fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; @@ -32,14 +31,14 @@ import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; /** * Tool to test performance of locks and queues in procedure scheduler independently from other - * framework components. - * Inserts table and region operations in the scheduler, then polls them and exercises their locks - * Number of tables, regions and operations can be set using cli args. + * framework components. Inserts table and region operations in the scheduler, then polls them and + * exercises their locks Number of tables, regions and operations can be set using cli args. */ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBaseTool { protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -51,7 +50,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase public static final int DEFAULT_REGIONS_PER_TABLE = 10; public static final Option REGIONS_PER_TABLE_OPTION = new Option("regions_per_table", true, "Total number of regions per table. Default: " + DEFAULT_REGIONS_PER_TABLE); - public static final int DEFAULT_NUM_OPERATIONS = 10000000; // 10M + public static final int DEFAULT_NUM_OPERATIONS = 10000000; // 10M public static final Option NUM_OPERATIONS_OPTION = new Option("num_ops", true, "Total number of operations to schedule. Default: " + DEFAULT_NUM_OPERATIONS); public static final int DEFAULT_NUM_THREADS = 10; @@ -60,8 +59,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase public static final String DEFAULT_OPS_TYPE = "both"; public static final Option OPS_TYPE_OPTION = new Option("ops_type", true, "Type of operations to run. Value can be table/region/both. In case of 'both', " - + "proportion of table:region ops is 1:regions_per_table. Default: " - + DEFAULT_OPS_TYPE); + + "proportion of table:region ops is 1:regions_per_table. Default: " + DEFAULT_OPS_TYPE); private int numTables = DEFAULT_NUM_TABLES; private int regionsPerTable = DEFAULT_REGIONS_PER_TABLE; @@ -86,8 +84,9 @@ private class RegionProcedure extends TestMasterProcedureScheduler.TestRegionPro @Override public LockState acquireLock(Void env) { - return procedureScheduler.waitRegions(this, getTableName(), getRegionInfo())? - LockState.LOCK_EVENT_WAIT: LockState.LOCK_ACQUIRED; + return procedureScheduler.waitRegions(this, getTableName(), getRegionInfo()) + ? LockState.LOCK_EVENT_WAIT + : LockState.LOCK_ACQUIRED; } @Override @@ -117,8 +116,9 @@ private class TableProcedure extends TestMasterProcedureScheduler.TestTableProce @Override public LockState acquireLock(Void env) { - return procedureScheduler.waitTableExclusiveLock(this, getTableName())? - LockState.LOCK_EVENT_WAIT: LockState.LOCK_ACQUIRED; + return procedureScheduler.waitTableExclusiveLock(this, getTableName()) + ? LockState.LOCK_EVENT_WAIT + : LockState.LOCK_ACQUIRED; } @Override @@ -151,8 +151,8 @@ private void setupOperations() throws Exception { for (int i = 0; i < numTables; ++i) { for (int j = 0; j < regionsPerTable; ++j) { regionOps[i * regionsPerTable + j] = new RegionProcedureFactory( - RegionInfoBuilder.newBuilder(((TableProcedureFactory) tableOps[i]).tableName) - .setStartKey(Bytes.toBytes(j)).setEndKey(Bytes.toBytes(j + 1)).build()); + RegionInfoBuilder.newBuilder(((TableProcedureFactory) tableOps[i]).tableName) + .setStartKey(Bytes.toBytes(j)).setEndKey(Bytes.toBytes(j + 1)).build()); } } @@ -164,7 +164,7 @@ private void setupOperations() throws Exception { ops = regionOps; } else if (opsType.equals("both")) { System.out.println("Operations: both (table + region)"); - ops = (ProcedureFactory[])ArrayUtils.addAll(tableOps, regionOps); + ops = (ProcedureFactory[]) ArrayUtils.addAll(tableOps, regionOps); } else { throw new Exception("-ops_type should be one of table/region/both."); } @@ -182,10 +182,9 @@ protected void addOptions() { @Override protected void processOptions(CommandLine cmd) { numTables = getOptionAsInt(cmd, NUM_TABLES_OPTION.getOpt(), DEFAULT_NUM_TABLES); - regionsPerTable = getOptionAsInt(cmd, REGIONS_PER_TABLE_OPTION.getOpt(), - DEFAULT_REGIONS_PER_TABLE); - numOps = getOptionAsInt(cmd, NUM_OPERATIONS_OPTION.getOpt(), - DEFAULT_NUM_OPERATIONS); + regionsPerTable = + getOptionAsInt(cmd, REGIONS_PER_TABLE_OPTION.getOpt(), DEFAULT_REGIONS_PER_TABLE); + numOps = getOptionAsInt(cmd, NUM_OPERATIONS_OPTION.getOpt(), DEFAULT_NUM_OPERATIONS); numThreads = getOptionAsInt(cmd, NUM_THREADS_OPTION.getOpt(), DEFAULT_NUM_THREADS); opsType = cmd.getOptionValue(OPS_TYPE_OPTION.getOpt(), DEFAULT_OPS_TYPE); } @@ -216,7 +215,7 @@ private class PollAndLockWorker extends Thread { public void run() { while (completed.get() < numOps) { // With lock/unlock being ~100ns, and no other workload, 1000ns wait seams reasonable. - TestProcedure proc = (TestProcedure)procedureScheduler.poll(1000); + TestProcedure proc = (TestProcedure) procedureScheduler.poll(1000); if (proc == null) { yield.incrementAndGet(); continue; @@ -291,11 +290,12 @@ protected int doWork() throws Exception { System.out.println("Threads : " + numThreads); System.out.println("******************************************"); System.out.println("Raw format for scripts"); - System.out.println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " - + "num_yield=%s, time_addback_ms=%s, time_poll_ms=%s]", - NUM_OPERATIONS_OPTION.getOpt(), numOps, OPS_TYPE_OPTION.getOpt(), opsType, - NUM_TABLES_OPTION.getOpt(), numTables, REGIONS_PER_TABLE_OPTION.getOpt(), regionsPerTable, - NUM_THREADS_OPTION.getOpt(), numThreads, yield.get(), addBackTime, pollTime)); + System.out.println(String.format( + "RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " + + "num_yield=%s, time_addback_ms=%s, time_poll_ms=%s]", + NUM_OPERATIONS_OPTION.getOpt(), numOps, OPS_TYPE_OPTION.getOpt(), opsType, + NUM_TABLES_OPTION.getOpt(), numTables, REGIONS_PER_TABLE_OPTION.getOpt(), regionsPerTable, + NUM_THREADS_OPTION.getOpt(), numThreads, yield.get(), addBackTime, pollTime)); return 0; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index 726e117ef1ac..75410870854f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; @@ -76,7 +75,8 @@ public class MasterProcedureTestingUtility { private static final Logger LOG = LoggerFactory.getLogger(MasterProcedureTestingUtility.class); - private MasterProcedureTestingUtility() { } + private MasterProcedureTestingUtility() { + } public static void restartMasterProcedureExecutor(ProcedureExecutor procExec) throws Exception { @@ -89,7 +89,7 @@ public static void restartMasterProcedureExecutor(ProcedureExecutor !p.isSuccess()) - .filter(p -> p instanceof TransitRegionStateProcedure) - .map(p -> (TransitRegionStateProcedure) p).collect(Collectors.toList())); + .filter(p -> p instanceof TransitRegionStateProcedure) + .map(p -> (TransitRegionStateProcedure) p).collect(Collectors.toList())); return null; } }, @@ -130,10 +130,9 @@ public Void call() throws Exception { } // ========================================================================== - // Master failover utils + // Master failover utils // ========================================================================== - public static void masterFailover(final HBaseTestingUtil testUtil) - throws Exception { + public static void masterFailover(final HBaseTestingUtil testUtil) throws Exception { SingleProcessHBaseCluster cluster = testUtil.getMiniHBaseCluster(); // Kill the master @@ -144,8 +143,8 @@ public static void masterFailover(final HBaseTestingUtil testUtil) waitBackupMaster(testUtil, oldMaster); } - public static void waitBackupMaster(final HBaseTestingUtil testUtil, - final HMaster oldMaster) throws Exception { + public static void waitBackupMaster(final HBaseTestingUtil testUtil, final HMaster oldMaster) + throws Exception { SingleProcessHBaseCluster cluster = testUtil.getMiniHBaseCluster(); HMaster newMaster = cluster.getMaster(); @@ -160,7 +159,7 @@ public static void waitBackupMaster(final HBaseTestingUtil testUtil, } // ========================================================================== - // Table Helpers + // Table Helpers // ========================================================================== public static TableDescriptor createHTD(final TableName tableName, final String... family) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); @@ -190,7 +189,7 @@ public static void validateTableCreation(final HMaster master, final TableName t // check filesystem final FileSystem fs = master.getMasterFileSystem().getFileSystem(); final Path tableDir = - CommonFSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); + CommonFSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); assertTrue(fs.exists(tableDir)); CommonFSUtils.logFileSystemState(fs, tableDir, LOG); List unwantedRegionDirs = FSUtils.getRegionDirs(fs, tableDir); @@ -225,22 +224,23 @@ public static void validateTableCreation(final HMaster master, final TableName t TableDescriptor htd = master.getTableDescriptors().get(tableName); assertTrue("table descriptor not found", htd != null); for (int i = 0; i < family.length; ++i) { - assertTrue("family not found " + family[i], htd.getColumnFamily(Bytes.toBytes(family[i])) != null); + assertTrue("family not found " + family[i], + htd.getColumnFamily(Bytes.toBytes(family[i])) != null); } assertEquals(family.length, htd.getColumnFamilyCount()); // checks store file tracker impl has been properly set in htd String storeFileTrackerImpl = - StoreFileTrackerFactory.getStoreFileTrackerName(master.getConfiguration()); + StoreFileTrackerFactory.getStoreFileTrackerName(master.getConfiguration()); assertEquals(storeFileTrackerImpl, htd.getValue(TRACKER_IMPL)); } - public static void validateTableDeletion( - final HMaster master, final TableName tableName) throws IOException { + public static void validateTableDeletion(final HMaster master, final TableName tableName) + throws IOException { // check filesystem final FileSystem fs = master.getMasterFileSystem().getFileSystem(); final Path tableDir = - CommonFSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); + CommonFSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); assertFalse(fs.exists(tableDir)); // check meta @@ -248,8 +248,7 @@ public static void validateTableDeletion( assertEquals(0, countMetaRegions(master, tableName)); // check htd - assertTrue("found htd of deleted table", - master.getTableDescriptors().get(tableName) == null); + assertTrue("found htd of deleted table", master.getTableDescriptors().get(tableName) == null); } private static int countMetaRegions(final HMaster master, final TableName tableName) @@ -320,7 +319,7 @@ public static void validateColumnFamilyDeletion(final HMaster master, final Tabl // verify fs final FileSystem fs = master.getMasterFileSystem().getFileSystem(); final Path tableDir = - CommonFSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); + CommonFSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); for (Path regionDir : FSUtils.getRegionDirs(fs, tableDir)) { final Path familyDir = new Path(regionDir, family); assertFalse(family + " family dir should not exist", fs.exists(familyDir)); @@ -337,8 +336,8 @@ public static void validateColumnFamilyModification(final HMaster master, assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(hcfd, columnDescriptor)); } - public static void loadData(final Connection connection, final TableName tableName, - int rows, final byte[][] splitKeys, final String... sfamilies) throws IOException { + public static void loadData(final Connection connection, final TableName tableName, int rows, + final byte[][] splitKeys, final String... sfamilies) throws IOException { byte[][] families = new byte[sfamilies.length][]; for (int i = 0; i < families.length; ++i) { families[i] = Bytes.toBytes(sfamilies[i]); @@ -348,7 +347,7 @@ public static void loadData(final Connection connection, final TableName tableNa // Ensure one row per region assertTrue(rows >= splitKeys.length); - for (byte[] k: splitKeys) { + for (byte[] k : splitKeys) { byte[] value = Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), k); byte[] key = Bytes.add(k, Bytes.toBytes(MD5Hash.getMD5AsHex(value))); mutator.mutate(createPut(families, key, value)); @@ -357,8 +356,8 @@ public static void loadData(final Connection connection, final TableName tableNa // Add other extra rows. more rows, more files while (rows-- > 0) { - byte[] value = Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), - Bytes.toBytes(rows)); + byte[] value = + Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), Bytes.toBytes(rows)); byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value)); mutator.mutate(createPut(families, key, value)); } @@ -369,14 +368,14 @@ private static Put createPut(final byte[][] families, final byte[] key, final by byte[] q = Bytes.toBytes("q"); Put put = new Put(key); put.setDurability(Durability.SKIP_WAL); - for (byte[] family: families) { + for (byte[] family : families) { put.addColumn(family, q, value); } return put; } // ========================================================================== - // Procedure Helpers + // Procedure Helpers // ========================================================================== public static long generateNonceGroup(final HMaster master) { return master.getAsyncClusterConnection().getNonceGenerator().getNonceGroup(); @@ -389,28 +388,28 @@ public static long generateNonce(final HMaster master) { /** * Run through all procedure flow states TWICE while also restarting procedure executor at each * step; i.e force a reread of procedure store. - * - *

      It does - *

      1. Execute step N - kill the executor before store update + *

        + * It does + *

          + *
        1. Execute step N - kill the executor before store update *
        2. Restart executor/store *
        3. Execute step N - and then save to store *
        - * - *

        This is a good test for finding state that needs persisting and steps that are not - * idempotent. Use this version of the test when a procedure executes all flow steps from start to - * finish. + *

        + * This is a good test for finding state that needs persisting and steps that are not idempotent. + * Use this version of the test when a procedure executes all flow steps from start to finish. * @see #testRecoveryAndDoubleExecution(ProcedureExecutor, long) */ public static void testRecoveryAndDoubleExecution( - final ProcedureExecutor procExec, final long procId, - final int lastStep, final boolean expectExecRunning) throws Exception { + final ProcedureExecutor procExec, final long procId, final int lastStep, + final boolean expectExecRunning) throws Exception { ProcedureTestingUtility.waitProcedure(procExec, procId); assertEquals(false, procExec.isRunning()); // Restart the executor and execute the step twice - // execute step N - kill before store update - // restart executor/store - // execute step N - save on store + // execute step N - kill before store update + // restart executor/store + // execute step N - save on store // NOTE: currently we make assumption that states/ steps are sequential. There are already // instances of a procedures which skip (don't use) intermediate states/ steps. In future, // intermediate states/ steps can be added with ordinal greater than lastStep. If and when @@ -422,8 +421,9 @@ public static void testRecoveryAndDoubleExecution( // state is in that list. Current assumption of sequential proregression of steps/ states is // made at multiple places so we can keep while condition below for simplicity. Procedure proc = procExec.getProcedure(procId); - int stepNum = proc instanceof StateMachineProcedure ? - ((StateMachineProcedure) proc).getCurrentStateId() : 0; + int stepNum = + proc instanceof StateMachineProcedure ? ((StateMachineProcedure) proc).getCurrentStateId() + : 0; for (;;) { if (stepNum == lastStep) { break; @@ -434,28 +434,29 @@ public static void testRecoveryAndDoubleExecution( ProcedureTestingUtility.waitProcedure(procExec, procId); // Old proc object is stale, need to get the new one after ProcedureExecutor restart proc = procExec.getProcedure(procId); - stepNum = proc instanceof StateMachineProcedure ? - ((StateMachineProcedure) proc).getCurrentStateId() : stepNum + 1; + stepNum = + proc instanceof StateMachineProcedure ? ((StateMachineProcedure) proc).getCurrentStateId() + : stepNum + 1; } assertEquals(expectExecRunning, procExec.isRunning()); } /** - * Run through all procedure flow states TWICE while also restarting - * procedure executor at each step; i.e force a reread of procedure store. - * - *

        It does - *

        1. Execute step N - kill the executor before store update + * Run through all procedure flow states TWICE while also restarting procedure executor at each + * step; i.e force a reread of procedure store. + *

          + * It does + *

            + *
          1. Execute step N - kill the executor before store update *
          2. Restart executor/store *
          3. Executes hook for each step twice *
          4. Execute step N - and then save to store *
          - * - *

          This is a good test for finding state that needs persisting and steps that are not - * idempotent. Use this version of the test when the order in which flow steps are executed is - * not start to finish; where the procedure may vary the flow steps dependent on circumstance - * found. + *

          + * This is a good test for finding state that needs persisting and steps that are not idempotent. + * Use this version of the test when the order in which flow steps are executed is not start to + * finish; where the procedure may vary the flow steps dependent on circumstance found. * @see #testRecoveryAndDoubleExecution(ProcedureExecutor, long, int, boolean) */ public static void testRecoveryAndDoubleExecution( @@ -483,7 +484,7 @@ public static void testRecoveryAndDoubleExecution( /** * Hook which will be executed on each step */ - public interface StepHook{ + public interface StepHook { /** * @param step Step no. at which this will be executed * @return false if test should fail otherwise true @@ -493,29 +494,28 @@ public interface StepHook{ } /** - * Execute the procedure up to "lastStep" and then the ProcedureExecutor - * is restarted and an abort() is injected. - * If the procedure implement abort() this should result in rollback being triggered. - * Each rollback step is called twice, by restarting the executor after every step. - * At the end of this call the procedure should be finished and rolledback. - * This method assert on the procedure being terminated with an AbortException. + * Execute the procedure up to "lastStep" and then the ProcedureExecutor is restarted and an + * abort() is injected. If the procedure implement abort() this should result in rollback being + * triggered. Each rollback step is called twice, by restarting the executor after every step. At + * the end of this call the procedure should be finished and rolledback. This method assert on the + * procedure being terminated with an AbortException. */ public static void testRollbackAndDoubleExecution( - final ProcedureExecutor procExec, final long procId, - final int lastStep) throws Exception { + final ProcedureExecutor procExec, final long procId, final int lastStep) + throws Exception { testRollbackAndDoubleExecution(procExec, procId, lastStep, false); } public static void testRollbackAndDoubleExecution( - final ProcedureExecutor procExec, final long procId, - final int lastStep, boolean waitForAsyncProcs) throws Exception { + final ProcedureExecutor procExec, final long procId, final int lastStep, + boolean waitForAsyncProcs) throws Exception { // Execute up to last step testRecoveryAndDoubleExecution(procExec, procId, lastStep, false); // Restart the executor and rollback the step twice - // rollback step N - kill before store update - // restart executor/store - // rollback step N - save on store + // rollback step N - kill before store update + // restart executor/store + // rollback step N - save on store InjectAbortOnLoadListener abortListener = new InjectAbortOnLoadListener(procExec); abortListener.addProcId(procId); procExec.registerListener(abortListener); @@ -538,8 +538,8 @@ public static void testRollbackAndDoubleExecution( // check 3 times to confirm that the procedure executor has not been killed for (int i = 0; i < 3; i++) { if (!procExec.isRunning()) { - LOG.warn("ProcedureExecutor not running, may have been stopped by pending procedure due" + - " to KillAndToggleBeforeStoreUpdate flag."); + LOG.warn("ProcedureExecutor not running, may have been stopped by pending procedure due" + + " to KillAndToggleBeforeStoreUpdate flag."); restartMasterProcedureExecutor(procExec); break; } @@ -553,15 +553,14 @@ public static void testRollbackAndDoubleExecution( } /** - * Execute the procedure up to "lastStep" and then the ProcedureExecutor - * is restarted and an abort() is injected. - * If the procedure implement abort() this should result in rollback being triggered. - * At the end of this call the procedure should be finished and rolledback. - * This method assert on the procedure being terminated with an AbortException. + * Execute the procedure up to "lastStep" and then the ProcedureExecutor is restarted and an + * abort() is injected. If the procedure implement abort() this should result in rollback being + * triggered. At the end of this call the procedure should be finished and rolledback. This method + * assert on the procedure being terminated with an AbortException. */ public static void testRollbackRetriableFailure( - final ProcedureExecutor procExec, final long procId, - final int lastStep) throws Exception { + final ProcedureExecutor procExec, final long procId, final int lastStep) + throws Exception { // Execute up to last step testRecoveryAndDoubleExecution(procExec, procId, lastStep, false); @@ -573,9 +572,9 @@ public static void testRollbackRetriableFailure( } /** - * Restart the ProcedureExecutor and inject an abort to the specified procedure. - * If the procedure implement abort() this should result in rollback being triggered. - * At the end of this call the procedure should be finished and rolledback, if abort is implemnted + * Restart the ProcedureExecutor and inject an abort to the specified procedure. If the procedure + * implement abort() this should result in rollback being triggered. At the end of this call the + * procedure should be finished and rolledback, if abort is implemnted */ public static void testRestartWithAbort(ProcedureExecutor procExec, long procId) throws Exception { @@ -624,9 +623,11 @@ public void procedureLoaded(long procId) { } @Override - public void procedureAdded(long procId) { /* no-op */ } + public void procedureAdded(long procId) { + /* no-op */ } @Override - public void procedureFinished(long procId) { /* no-op */ } + public void procedureFinished(long procId) { + /* no-op */ } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java index 5363b7afabbe..cba5fa563ba5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestCloneSnapshotProcedure extends TestTableDDLProcedureBase { @ClassRule @@ -98,11 +98,11 @@ private int getNumReplicas() { private static TableDescriptor createTableDescriptor(TableName tableName, byte[]... family) { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(tableName).setValue(StoreFileTrackerFactory.TRACKER_IMPL, - UTIL.getConfiguration().get(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.DEFAULT.name())); + TableDescriptorBuilder.newBuilder(tableName).setValue(StoreFileTrackerFactory.TRACKER_IMPL, + UTIL.getConfiguration().get(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name())); Stream.of(family).map(ColumnFamilyDescriptorBuilder::of) - .forEachOrdered(builder::setColumnFamily); + .forEachOrdered(builder::setColumnFamily); return builder.build(); } @@ -115,11 +115,10 @@ public void testCloneSnapshot() throws Exception { // take the snapshot SnapshotProtos.SnapshotDescription snapshotDesc = getSnapshot(); - long procId = ProcedureTestingUtility.submitAndWait( - procExec, new CloneSnapshotProcedure(procExec.getEnvironment(), htd, snapshotDesc)); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new CloneSnapshotProcedure(procExec.getEnvironment(), htd, snapshotDesc)); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId)); - MasterProcedureTestingUtility.validateTableIsEnabled( - UTIL.getHBaseCluster().getMaster(), + MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(), clonedTableName); } @@ -132,13 +131,12 @@ public void testCloneSnapshotToSameTable() throws Exception { final TableName clonedTableName = TableName.valueOf(snapshotDesc.getTable()); final TableDescriptor htd = createTableDescriptor(clonedTableName, CF); - long procId = ProcedureTestingUtility.submitAndWait( - procExec, new CloneSnapshotProcedure(procExec.getEnvironment(), htd, snapshotDesc)); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new CloneSnapshotProcedure(procExec.getEnvironment(), htd, snapshotDesc)); Procedure result = procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Clone snapshot failed with exception: " + result.getException()); - assertTrue( - ProcedureTestingUtility.getExceptionCause(result) instanceof TableExistsException); + assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof TableExistsException); } @Test @@ -156,14 +154,13 @@ public void testRecoveryAndDoubleExecution() throws Exception { ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Start the Clone snapshot procedure && kill the executor - long procId = procExec.submitProcedure( - new CloneSnapshotProcedure(procExec.getEnvironment(), htd, snapshotDesc)); + long procId = procExec + .submitProcedure(new CloneSnapshotProcedure(procExec.getEnvironment(), htd, snapshotDesc)); // Restart the executor and execute the step twice MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); - MasterProcedureTestingUtility.validateTableIsEnabled( - UTIL.getHBaseCluster().getMaster(), + MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(), clonedTableName); } @@ -187,7 +184,7 @@ public void testRecoverWithRestoreAclFlag() throws Exception { MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); - CloneSnapshotProcedure result = (CloneSnapshotProcedure)procExec.getResult(procId); + CloneSnapshotProcedure result = (CloneSnapshotProcedure) procExec.getResult(procId); // check whether the 'restoreAcl' flag is true after deserialization from Pb. assertEquals(true, result.getRestoreAcl()); } @@ -205,13 +202,13 @@ public void testRollbackAndDoubleExecution() throws Exception { ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Start the Clone snapshot procedure && kill the executor - long procId = procExec.submitProcedure( - new CloneSnapshotProcedure(procExec.getEnvironment(), htd, snapshotDesc)); + long procId = procExec + .submitProcedure(new CloneSnapshotProcedure(procExec.getEnvironment(), htd, snapshotDesc)); int lastStep = 2; // failing before CLONE_SNAPSHOT_WRITE_FS_LAYOUT MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep); - MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), clonedTableName); + MasterProcedureTestingUtility.validateTableDeletion(UTIL.getHBaseCluster().getMaster(), + clonedTableName); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedureFileBasedSFT.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedureFileBasedSFT.java index f3ae1283b48a..d5903fa9ddd8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedureFileBasedSFT.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedureFileBasedSFT.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.Trackers.FILE; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -26,7 +27,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({ MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestCloneSnapshotProcedureFileBasedSFT extends TestCloneSnapshotProcedure { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java index 5bffea7ca52d..9f4a413e5811 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestCreateNamespaceProcedure { @ClassRule @@ -89,8 +89,8 @@ public void testCreateNamespace() throws Exception { final NamespaceDescriptor nsd = NamespaceDescriptor.create("testCreateNamespace").build(); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - long procId = procExec.submitProcedure( - new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId = + procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); @@ -104,15 +104,15 @@ public void testCreateSameNamespaceTwice() throws Exception { NamespaceDescriptor.create("testCreateSameNamespaceTwice").build(); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - long procId1 = procExec.submitProcedure( - new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId1 = + procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); // Create the namespace that exists - long procId2 = procExec.submitProcedure( - new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId2 = + procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId2); @@ -130,8 +130,8 @@ public void testCreateSystemNamespace() throws Exception { UTIL.getAdmin().getNamespaceDescriptor(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - long procId = procExec.submitProcedure( - new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId = + procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); Procedure result = procExec.getResult(procId); @@ -151,8 +151,8 @@ public void testCreateNamespaceWithInvalidRegionCount() throws Exception { nsd.setConfiguration(nsKey, nsValue); - long procId = procExec.submitProcedure( - new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId = + procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); Procedure result = procExec.getResult(procId); @@ -171,8 +171,8 @@ public void testCreateNamespaceWithInvalidTableCount() throws Exception { nsd.setConfiguration(nsKey, nsValue); - long procId = procExec.submitProcedure( - new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId = + procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); Procedure result = procExec.getResult(procId); @@ -191,8 +191,8 @@ public void testRecoveryAndDoubleExecution() throws Exception { ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Start the CreateNamespace procedure && kill the executor - long procId = procExec.submitProcedure( - new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId = + procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); // Restart the executor and execute the step twice MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); @@ -212,8 +212,8 @@ public void testRollbackAndDoubleExecution() throws Exception { ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Start the CreateNamespace procedure && kill the executor - long procId = procExec.submitProcedure( - new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId = + procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); int lastStep = 2; // failing before CREATE_NAMESPACE_CREATE_DIRECTORY MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep); @@ -233,8 +233,7 @@ private ProcedureExecutor getMasterProcedureExecutor() { } private void validateNamespaceCreated(NamespaceDescriptor nsd) throws IOException { - NamespaceDescriptor createdNsDescriptor = - UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); + NamespaceDescriptor createdNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); assertNotNull(createdNsDescriptor); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableNoRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableNoRegionServer.java index eff1cb5c8207..8f7a733d3aba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableNoRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableNoRegionServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,14 +55,14 @@ public class TestCreateTableNoRegionServer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCreateTableNoRegionServer.class); + HBaseClassTestRule.forClass(TestCreateTableNoRegionServer.class); private static final Logger LOG = LoggerFactory.getLogger(TestCreateTableNoRegionServer.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = TableName.valueOf("test"); - + private static byte[] FAMILY = Bytes.toBytes("f1"); private static CountDownLatch ARRIVE; @@ -78,12 +78,12 @@ public HMasterForTest(Configuration conf) throws IOException { private boolean isInAssignRegionsState() { try { for (StackTraceElement e : Thread.currentThread().getStackTrace()) { - if (e.getClassName().equals(CreateTableProcedure.class.getName()) && - e.getMethodName().equals("executeFromState")) { + if (e.getClassName().equals(CreateTableProcedure.class.getName()) + && e.getMethodName().equals("executeFromState")) { for (Procedure proc : getProcedures()) { - if (proc instanceof CreateTableProcedure && !proc.isFinished() && - ((CreateTableProcedure) proc) - .getCurrentStateId() == CreateTableState.CREATE_TABLE_ASSIGN_REGIONS_VALUE) { + if (proc instanceof CreateTableProcedure && !proc.isFinished() + && ((CreateTableProcedure) proc) + .getCurrentStateId() == CreateTableState.CREATE_TABLE_ASSIGN_REGIONS_VALUE) { return true; } } @@ -130,7 +130,7 @@ public static void tearDown() throws Exception { @Test public void testCreate() throws Exception { TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); Admin admin = UTIL.getAdmin(); ARRIVE = new CountDownLatch(1); RESUME = new CountDownLatch(1); @@ -148,9 +148,9 @@ public void testCreate() throws Exception { // the procedure should still be in the CREATE_TABLE_ASSIGN_REGIONS state, but here, we just // warn it as it may cause more serious problem later. for (Procedure proc : UTIL.getMiniHBaseCluster().getMaster().getProcedures()) { - if (proc instanceof CreateTableProcedure && !proc.isFinished() && - ((CreateTableProcedure) proc) - .getCurrentStateId() != CreateTableState.CREATE_TABLE_ASSIGN_REGIONS_VALUE) { + if (proc instanceof CreateTableProcedure && !proc.isFinished() + && ((CreateTableProcedure) proc) + .getCurrentStateId() != CreateTableState.CREATE_TABLE_ASSIGN_REGIONS_VALUE) { LOG.warn("Create table procedure {} assigned regions without a region server!", proc); } } @@ -158,10 +158,9 @@ public void testCreate() throws Exception { // the creation should finally be done future.get(30, TimeUnit.SECONDS); // make sure we could put to the table - try (Table table = UTIL.getConnection().getTableBuilder(TABLE_NAME, null) - .setOperationTimeout(5000).build()) { - table.put(new Put(Bytes.toBytes(0)).addColumn(FAMILY, - Bytes.toBytes("q"), Bytes.toBytes(0))); + try (Table table = + UTIL.getConnection().getTableBuilder(TABLE_NAME, null).setOperationTimeout(5000).build()) { + table.put(new Put(Bytes.toBytes(0)).addColumn(FAMILY, Bytes.toBytes("q"), Bytes.toBytes(0))); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java index bb9985e53143..dfc93c8dd629 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,12 +56,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestCreateTableProcedure extends TestTableDDLProcedureBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCreateTableProcedure.class); + HBaseClassTestRule.forClass(TestCreateTableProcedure.class); private static final String F1 = "f1"; private static final String F2 = "f2"; @@ -79,15 +79,14 @@ public void testSimpleCreate() throws Exception { @Test public void testSimpleCreateWithSplits() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); - final byte[][] splitKeys = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") - }; + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; testSimpleCreate(tableName, splitKeys); } private void testSimpleCreate(final TableName tableName, byte[][] splitKeys) throws Exception { - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - getMasterProcedureExecutor(), tableName, splitKeys, F1, F2); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), + tableName, splitKeys, F1, F2); MasterProcedureTestingUtility.validateTableCreation(getMaster(), tableName, regions, F1, F2); } @@ -127,24 +126,23 @@ public void testCreateWithoutColumnFamily() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); // create table with 0 families will fail final TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName)); + TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName)); // disable sanity check builder.setValue(TableDescriptorChecker.TABLE_SANITY_CHECKS, Boolean.FALSE.toString()); TableDescriptor htd = builder.build(); final RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null); - long procId = - ProcedureTestingUtility.submitAndWait(procExec, - new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); final Procedure result = procExec.getResult(procId); assertEquals(true, result.isFailed()); Throwable cause = ProcedureTestingUtility.getExceptionCause(result); assertTrue("expected DoNotRetryIOException, got " + cause, - cause instanceof DoNotRetryIOException); + cause instanceof DoNotRetryIOException); } - @Test(expected=TableExistsException.class) + @Test(expected = TableExistsException.class) public void testCreateExisting() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); @@ -152,13 +150,13 @@ public void testCreateExisting() throws Exception { final RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, null); // create the table - long procId1 = procExec.submitProcedure( - new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); + long procId1 = + procExec.submitProcedure(new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); // create another with the same name ProcedurePrepareLatch latch2 = new ProcedurePrepareLatch.CompatibilityLatch(); - long procId2 = procExec.submitProcedure( - new CreateTableProcedure(procExec.getEnvironment(), htd, regions, latch2)); + long procId2 = procExec + .submitProcedure(new CreateTableProcedure(procExec.getEnvironment(), htd, regions, latch2)); ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1)); @@ -179,8 +177,8 @@ public void testRecoveryAndDoubleExecution() throws Exception { byte[][] splitKeys = null; TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2"); RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys); - long procId = procExec.submitProcedure( - new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); + long procId = + procExec.submitProcedure(new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); // Restart the executor and execute the step twice MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); @@ -191,18 +189,16 @@ public void testRecoveryAndDoubleExecution() throws Exception { public void testRollbackAndDoubleExecution() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); testRollbackAndDoubleExecution(TableDescriptorBuilder - .newBuilder(MasterProcedureTestingUtility.createHTD(tableName, F1, F2))); + .newBuilder(MasterProcedureTestingUtility.createHTD(tableName, F1, F2))); } @Test public void testRollbackAndDoubleExecutionOnMobTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, F1, F2); - TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd) - .modifyColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(htd.getColumnFamily(Bytes.toBytes(F1))) - .setMobEnabled(true) - .build()); + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(htd).modifyColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(htd.getColumnFamily(Bytes.toBytes(F1))).setMobEnabled(true).build()); testRollbackAndDoubleExecution(builder); } @@ -212,14 +208,13 @@ private void testRollbackAndDoubleExecution(TableDescriptorBuilder builder) thro ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Start the Create procedure && kill the executor - final byte[][] splitKeys = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") - }; + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; builder.setRegionReplication(3); TableDescriptor htd = builder.build(); RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys); - long procId = procExec.submitProcedure( - new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); + long procId = + procExec.submitProcedure(new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); int lastStep = 2; // failing before CREATE_TABLE_WRITE_FS_LAYOUT MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep); @@ -241,17 +236,17 @@ public CreateTableProcedureOnHDFSFailure() { } public CreateTableProcedureOnHDFSFailure(final MasterProcedureEnv env, - final TableDescriptor tableDescriptor, final RegionInfo[] newRegions) - throws HBaseIOException { + final TableDescriptor tableDescriptor, final RegionInfo[] newRegions) + throws HBaseIOException { super(env, tableDescriptor, newRegions); } @Override protected Flow executeFromState(MasterProcedureEnv env, - MasterProcedureProtos.CreateTableState state) throws InterruptedException { + MasterProcedureProtos.CreateTableState state) throws InterruptedException { - if (!failOnce && - state == MasterProcedureProtos.CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT) { + if (!failOnce + && state == MasterProcedureProtos.CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT) { try { // To emulate an HDFS failure, create only the first region directory RegionInfo regionInfo = getFirstRegionInfo(); @@ -280,9 +275,8 @@ public void testOnHDFSFailure() throws Exception { // create the table final ProcedureExecutor procExec = getMasterProcedureExecutor(); - final byte[][] splitKeys = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") - }; + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2"); RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys); long procId = ProcedureTestingUtility.submitAndWait(procExec, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedureMuitipleRegions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedureMuitipleRegions.java index 10761e5c144d..bd66dced29d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedureMuitipleRegions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedureMuitipleRegions.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestCreateTableProcedureMuitipleRegions { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCreateTableProcedureMuitipleRegions.class); + HBaseClassTestRule.forClass(TestCreateTableProcedureMuitipleRegions.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -60,7 +60,7 @@ public void testMRegions() throws Exception { } TableDescriptor htd = - MasterProcedureTestingUtility.createHTD(TableName.valueOf("TestMRegions"), F1, F2); + MasterProcedureTestingUtility.createHTD(TableName.valueOf("TestMRegions"), F1, F2); UTIL.getAdmin().createTableAsync(htd, splitKeys).get(10, java.util.concurrent.TimeUnit.HOURS); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableWithMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableWithMasterFailover.java index 3d4740a397b6..78eb3af7246f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableWithMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableWithMasterFailover.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestCreateTableWithMasterFailover extends MasterFailoverWithProcedu @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCreateTableWithMasterFailover.class); + HBaseClassTestRule.forClass(TestCreateTableWithMasterFailover.class); // ========================================================================== // Test Create Table @@ -65,7 +65,7 @@ private void testCreateWithFailoverAtStep(final int step) throws Exception { TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2"); RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys); long procId = - procExec.submitProcedure(new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); + procExec.submitProcedure(new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); testRecoveryAndDoubleExecution(UTIL, procId, step); MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java index adb528aaec7c..1cb2d0281ec7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestDeleteColumnFamilyProcedureFromClient { @ClassRule @@ -58,10 +58,9 @@ public class TestDeleteColumnFamilyProcedureFromClient { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final TableName TABLENAME = - TableName.valueOf("column_family_handlers"); - private static final byte[][] FAMILIES = new byte[][] { Bytes.toBytes("cf1"), - Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; + private static final TableName TABLENAME = TableName.valueOf("column_family_handlers"); + private static final byte[][] FAMILIES = + new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; /** * Start up a mini cluster and put a small table of empty regions into it. @@ -134,8 +133,7 @@ public boolean accept(Path p) { }); int k = 1; for (int j = 0; j < cf.length; j++) { - if (cf[j].isDirectory() == true - && cf[j].getPath().getName().startsWith(".") == false) { + if (cf[j].isDirectory() == true && cf[j].getPath().getName().startsWith(".") == false) { assertEquals(cf[j].getPath().getName(), "cf" + k); k++; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java index 4a5f225c10ef..60ccade65f06 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestDeleteNamespaceProcedure { @ClassRule @@ -88,7 +88,7 @@ public void setup() throws Exception { @After public void tearDown() throws Exception { ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); - for (TableDescriptor htd: UTIL.getAdmin().listTableDescriptors()) { + for (TableDescriptor htd : UTIL.getAdmin().listTableDescriptors()) { LOG.info("Tear down, remove table=" + htd.getTableName()); UTIL.deleteTable(htd.getTableName()); } @@ -101,8 +101,8 @@ public void testDeleteNamespace() throws Exception { createNamespaceForTesting(namespaceName); - long procId = procExec.submitProcedure( - new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); + long procId = procExec + .submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); @@ -117,8 +117,8 @@ public void testDeleteNonExistNamespace() throws Exception { validateNamespaceNotExist(namespaceName); - long procId = procExec.submitProcedure( - new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); + long procId = procExec + .submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); // Expect fail with NamespaceNotFoundException @@ -134,8 +134,8 @@ public void testDeleteSystemNamespace() throws Exception { final String namespaceName = NamespaceDescriptor.SYSTEM_NAMESPACE.getName(); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - long procId = procExec.submitProcedure( - new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); + long procId = procExec + .submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); Procedure result = procExec.getResult(procId); @@ -147,15 +147,16 @@ public void testDeleteSystemNamespace() throws Exception { @Test public void testDeleteNonEmptyNamespace() throws Exception { final String namespaceName = "testDeleteNonExistNamespace"; - final TableName tableName = TableName.valueOf("testDeleteNonExistNamespace:" + name.getMethodName()); + final TableName tableName = + TableName.valueOf("testDeleteNonExistNamespace:" + name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); // create namespace createNamespaceForTesting(namespaceName); // create the table under the new namespace MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1"); - long procId = procExec.submitProcedure( - new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); + long procId = procExec + .submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); Procedure result = procExec.getResult(procId); @@ -175,8 +176,8 @@ public void testRecoveryAndDoubleExecution() throws Exception { ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Start the DeleteNamespace procedure && kill the executor - long procId = procExec.submitProcedure( - new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); + long procId = procExec + .submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); // Restart the executor and execute the step twice MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); @@ -197,15 +198,14 @@ public void testRollbackAndDoubleExecution() throws Exception { ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Start the DeleteNamespace procedure && kill the executor - long procId = procExec.submitProcedure( - new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); + long procId = procExec + .submitProcedure(new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); int lastStep = 2; // failing before DELETE_NAMESPACE_DELETE_FROM_NS_TABLE MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep); // Validate the namespace still exists - NamespaceDescriptor createdNsDescriptor= - UTIL.getAdmin().getNamespaceDescriptor(namespaceName); + NamespaceDescriptor createdNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(namespaceName); assertNotNull(createdNsDescriptor); } @@ -217,8 +217,8 @@ private void createNamespaceForTesting(final String namespaceName) throws Except final NamespaceDescriptor nsd = NamespaceDescriptor.create(namespaceName).build(); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - long procId = procExec.submitProcedure( - new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId = + procExec.submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java index 9367a575958b..4a9283425ac5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,8 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestDeleteTableProcedure extends TestTableDDLProcedureBase { @ClassRule @@ -51,20 +50,21 @@ public class TestDeleteTableProcedure extends TestTableDDLProcedureBase { HBaseClassTestRule.forClass(TestDeleteTableProcedure.class); private static final Logger LOG = LoggerFactory.getLogger(TestDeleteTableProcedure.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); - @Test(expected=TableNotFoundException.class) + @Test(expected = TableNotFoundException.class) public void testDeleteNotExistentTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); ProcedurePrepareLatch latch = new ProcedurePrepareLatch.CompatibilityLatch(); long procId = ProcedureTestingUtility.submitAndWait(procExec, - new DeleteTableProcedure(procExec.getEnvironment(), tableName, latch)); + new DeleteTableProcedure(procExec.getEnvironment(), tableName, latch)); latch.await(); } - @Test(expected=TableNotDisabledException.class) + @Test(expected = TableNotDisabledException.class) public void testDeleteNotDisabledTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); @@ -73,7 +73,7 @@ public void testDeleteNotDisabledTable() throws Exception { ProcedurePrepareLatch latch = new ProcedurePrepareLatch.CompatibilityLatch(); long procId = ProcedureTestingUtility.submitAndWait(procExec, - new DeleteTableProcedure(procExec.getEnvironment(), tableName, latch)); + new DeleteTableProcedure(procExec.getEnvironment(), tableName, latch)); latch.await(); } @@ -82,16 +82,16 @@ public void testDeleteDeletedTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, "f"); + RegionInfo[] regions = + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); UTIL.getAdmin().disableTable(tableName); // delete the table (that exists) - long procId1 = procExec.submitProcedure( - new DeleteTableProcedure(procExec.getEnvironment(), tableName)); + long procId1 = + procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(), tableName)); // delete the table (that will no longer exist) - long procId2 = procExec.submitProcedure( - new DeleteTableProcedure(procExec.getEnvironment(), tableName)); + long procId2 = + procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(), tableName)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId1); @@ -118,17 +118,16 @@ public void testSimpleDelete() throws Exception { @Test public void testSimpleDeleteWithSplits() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); - final byte[][] splitKeys = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") - }; + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; testSimpleDelete(tableName, splitKeys); } @Test public void testDeleteFromMeta() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - getMasterProcedureExecutor(), tableName, null, "f1", "f2"); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), + tableName, null, "f1", "f2"); List regionsList = new ArrayList<>(); UTIL.getAdmin().disableTable(tableName); MasterProcedureEnv procedureEnv = getMasterProcedureExecutor().getEnvironment(); @@ -140,8 +139,8 @@ public void testDeleteFromMeta() throws Exception { } private void testSimpleDelete(final TableName tableName, byte[][] splitKeys) throws Exception { - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), + tableName, splitKeys, "f1", "f2"); UTIL.getAdmin().disableTable(tableName); // delete the table @@ -158,8 +157,8 @@ public void testRecoveryAndDoubleExecution() throws Exception { // create the table byte[][] splitKeys = null; - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), + tableName, splitKeys, "f1", "f2"); UTIL.getAdmin().disableTable(tableName); final ProcedureExecutor procExec = getMasterProcedureExecutor(); @@ -167,8 +166,8 @@ public void testRecoveryAndDoubleExecution() throws Exception { ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Start the Delete procedure && kill the executor - long procId = procExec.submitProcedure( - new DeleteTableProcedure(procExec.getEnvironment(), tableName)); + long procId = + procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(), tableName)); // Restart the executor and execute the step twice MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableWithMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableWithMasterFailover.java index 66990da40fa0..a7bf58e42f7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableWithMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableWithMasterFailover.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestDeleteTableWithMasterFailover extends MasterFailoverWithProcedu @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDeleteTableWithMasterFailover.class); + HBaseClassTestRule.forClass(TestDeleteTableWithMasterFailover.class); // ========================================================================== // Test Delete Table @@ -67,7 +67,7 @@ private void testDeleteWithFailoverAtStep(final int step) throws Exception { // Start the Delete procedure && kill the executor long procId = - procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(), tableName)); + procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(), tableName)); testRecoveryAndDoubleExecution(UTIL, procId, step); MasterProcedureTestingUtility.validateTableDeletion(UTIL.getHBaseCluster().getMaster(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java index 2694d35970ee..016d03bd2e1d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestDisableTableProcedure extends TestTableDDLProcedureBase { @ClassRule @@ -46,7 +46,8 @@ public class TestDisableTableProcedure extends TestTableDDLProcedureBase { private static final Logger LOG = LoggerFactory.getLogger(TestDisableTableProcedure.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); @Test public void testDisableTable() throws Exception { @@ -56,8 +57,8 @@ public void testDisableTable() throws Exception { MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2"); // Disable the table - long procId = procExec.submitProcedure( - new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); + long procId = procExec + .submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); @@ -72,8 +73,8 @@ public void testDisableTableMultipleTimes() throws Exception { MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2"); // Disable the table - long procId1 = procExec.submitProcedure(new DisableTableProcedure( - procExec.getEnvironment(), tableName, false)); + long procId1 = procExec + .submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); @@ -84,8 +85,8 @@ public void testDisableTableMultipleTimes() throws Exception { Throwable e = null; Throwable cause = null; try { - long procId2 = procExec.submitProcedure(new DisableTableProcedure( - procExec.getEnvironment(), tableName, false)); + long procId2 = procExec + .submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId2); Procedure result = procExec.getResult(procId2); @@ -104,8 +105,8 @@ public void testDisableTableMultipleTimes() throws Exception { try { final ProcedurePrepareLatch prepareLatch = new ProcedurePrepareLatch.CompatibilityLatch(); - long procId3 = procExec.submitProcedure(new DisableTableProcedure( - procExec.getEnvironment(), tableName, false, prepareLatch)); + long procId3 = procExec.submitProcedure( + new DisableTableProcedure(procExec.getEnvironment(), tableName, false, prepareLatch)); prepareLatch.await(); Assert.fail("Disable should throw exception through latch."); } catch (TableNotEnabledException tnee) { @@ -115,8 +116,8 @@ public void testDisableTableMultipleTimes() throws Exception { // Disable the table again with skipping table state check flag (simulate recovery scenario) try { - long procId4 = procExec.submitProcedure(new DisableTableProcedure( - procExec.getEnvironment(), tableName, true)); + long procId4 = procExec + .submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, true)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId4); ProcedureTestingUtility.assertProcNotFailed(procExec, procId4); @@ -132,16 +133,15 @@ public void testRecoveryAndDoubleExecution() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final ProcedureExecutor procExec = getMasterProcedureExecutor(); - final byte[][] splitKeys = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") - }; + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2"); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Start the Disable procedure && kill the executor - long procId = procExec.submitProcedure( - new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); + long procId = procExec + .submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); // Restart the executor and execute the step twice MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableWithMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableWithMasterFailover.java index 4a54b1c84988..3370780ba16c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableWithMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableWithMasterFailover.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestDisableTableWithMasterFailover extends MasterFailoverWithProced @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDisableTableWithMasterFailover.class); + HBaseClassTestRule.forClass(TestDisableTableWithMasterFailover.class); // ========================================================================== // Test Disable Table @@ -56,7 +56,7 @@ private void testDisableTableWithFailoverAtStep(final int step) throws Exception // create the table final byte[][] splitKeys = - new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); @@ -65,7 +65,7 @@ private void testDisableTableWithFailoverAtStep(final int step) throws Exception // Start the Delete procedure && kill the executor long procId = procExec - .submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); + .submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); testRecoveryAndDoubleExecution(UTIL, procId, step); MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java index 3a14bae112c8..690fe8ae7bb2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.procedure; import static org.junit.Assert.assertTrue; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; @@ -40,7 +41,7 @@ public class TestEnableTableProcedure extends TestTableDDLProcedureBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestEnableTableProcedure.class); + HBaseClassTestRule.forClass(TestEnableTableProcedure.class); private static final Logger LOG = LoggerFactory.getLogger(TestEnableTableProcedure.class); @@ -57,7 +58,7 @@ public void testEnableTable() throws Exception { // Enable the table long procId = - procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(), tableName)); + procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(), tableName)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); @@ -73,7 +74,7 @@ public void testEnableNonDisabledTable() throws Exception { // Enable the table - expect failure long procId1 = - procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(), tableName)); + procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(), tableName)); ProcedureTestingUtility.waitProcedure(procExec, procId1); Procedure result = procExec.getResult(procId1); @@ -95,7 +96,7 @@ public void testRecoveryAndDoubleExecution() throws Exception { final ProcedureExecutor procExec = getMasterProcedureExecutor(); final byte[][] splitKeys = - new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2"); UTIL.getAdmin().disableTable(tableName); ProcedureTestingUtility.waitNoProcedureRunning(procExec); @@ -104,7 +105,7 @@ public void testRecoveryAndDoubleExecution() throws Exception { // Start the Enable procedure && kill the executor long procId = - procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(), tableName)); + procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(), tableName)); // Restart the executor and execute the step twice MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); @@ -118,7 +119,7 @@ public void testRollbackAndDoubleExecution() throws Exception { final ProcedureExecutor procExec = getMasterProcedureExecutor(); final byte[][] splitKeys = - new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2"); UTIL.getAdmin().disableTable(tableName); ProcedureTestingUtility.waitNoProcedureRunning(procExec); @@ -126,7 +127,7 @@ public void testRollbackAndDoubleExecution() throws Exception { // Start the Enable procedure && kill the executor long procId = - procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(), tableName)); + procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(), tableName)); int lastStep = 3; // fail before ENABLE_TABLE_SET_ENABLING_TABLE_STATE MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableWithMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableWithMasterFailover.java index 0e47cf4148eb..33b8161bd4cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableWithMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableWithMasterFailover.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestEnableTableWithMasterFailover extends MasterFailoverWithProcedu @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestEnableTableWithMasterFailover.class); + HBaseClassTestRule.forClass(TestEnableTableWithMasterFailover.class); // ========================================================================== // Test Enable Table @@ -55,7 +55,7 @@ private void testEnableTableWithFailoverAtStep(final int step) throws Exception // create the table final byte[][] splitKeys = - new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); UTIL.getAdmin().disableTable(tableName); @@ -65,7 +65,7 @@ private void testEnableTableWithFailoverAtStep(final int step) throws Exception // Start the Delete procedure && kill the executor long procId = - procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(), tableName)); + procExec.submitProcedure(new EnableTableProcedure(procExec.getEnvironment(), tableName)); testRecoveryAndDoubleExecution(UTIL, procId, step); MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFastFailOnProcedureNotRegistered.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFastFailOnProcedureNotRegistered.java index 48602dc412f9..0a2e3130e5a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFastFailOnProcedureNotRegistered.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestFastFailOnProcedureNotRegistered.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,18 +29,18 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestFastFailOnProcedureNotRegistered extends TestTableDDLProcedureBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestFastFailOnProcedureNotRegistered.class); - @Test(expected=DoNotRetryIOException.class) + @Test(expected = DoNotRetryIOException.class) public void testFastFailOnProcedureNotRegistered() throws IOException { Admin admin = UTIL.getAdmin(); Map props = new HashMap(); - admin.execProcedure("fake1","fake2", props); + admin.execProcedure("fake1", "fake2", props); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java index ff84afdabab4..e23e86051f74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java @@ -54,14 +54,13 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; - /** - * Test of the HBCK-version of SCP. - * The HBCKSCP is an SCP only it reads hbase:meta for list of Regions that were - * on the server-to-process rather than consult Master in-memory-state. + * Test of the HBCK-version of SCP. The HBCKSCP is an SCP only it reads hbase:meta for list of + * Regions that were on the server-to-process rather than consult Master in-memory-state. */ @Category({ MasterTests.class, LargeTests.class }) public class TestHBCKSCP extends TestSCPBase { @@ -102,7 +101,7 @@ public void test() throws Exception { Result r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI.getRegionName()); // Assert region is OPEN. assertEquals(RegionState.State.OPEN.toString(), - Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER))); + Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER))); ServerName serverName = CatalogFamilyFormat.getServerName(r, 0); assertEquals(rsServerName, serverName); // moveFrom adds to dead servers and adds it to processing list only we will @@ -128,7 +127,7 @@ public void test() throws Exception { // Assert region is OPEN on dead server still. r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI.getRegionName()); assertEquals(RegionState.State.OPEN.toString(), - Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER))); + Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER))); serverName = CatalogFamilyFormat.getServerName(r, 0); assertNotNull(cluster.getRegionServer(serverName)); assertEquals(rsServerName, serverName); @@ -143,7 +142,7 @@ public void test() throws Exception { // After SCP, assert region is OPEN on new server. r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI.getRegionName()); assertEquals(RegionState.State.OPEN.toString(), - Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER))); + Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER))); serverName = CatalogFamilyFormat.getServerName(r, 0); assertNotNull(cluster.getRegionServer(serverName)); assertNotEquals(rsServerName, serverName); @@ -152,10 +151,9 @@ public void test() throws Exception { } protected long scheduleHBCKSCP(ServerName rsServerName, HMaster master) throws ServiceException { - MasterProtos.ScheduleServerCrashProcedureResponse response = - master.getMasterRpcServices().scheduleServerCrashProcedure(null, - MasterProtos.ScheduleServerCrashProcedureRequest.newBuilder(). - addServerName(ProtobufUtil.toServerName(rsServerName)).build()); + MasterProtos.ScheduleServerCrashProcedureResponse response = master.getMasterRpcServices() + .scheduleServerCrashProcedure(null, MasterProtos.ScheduleServerCrashProcedureRequest + .newBuilder().addServerName(ProtobufUtil.toServerName(rsServerName)).build()); assertEquals(1, response.getPidCount()); long pid = response.getPid(0); return pid; @@ -166,8 +164,8 @@ protected long scheduleHBCKSCP(ServerName rsServerName, HMaster master) throws S */ private boolean searchMeta(HMaster master, ServerName sn) throws IOException { List> ps = - MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), null); - for (Pair p: ps) { + MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), null); + for (Pair p : ps) { if (p.getSecond().equals(sn)) { return true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCPUnknown.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCPUnknown.java index 6702f4023398..18418e00d961 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCPUnknown.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCPUnknown.java @@ -24,17 +24,16 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; - import org.junit.ClassRule; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; /** - * Test of the HBCK-version of SCP. - * The HBCKSCP is an SCP only it reads hbase:meta for list of Regions that were - * on the server-to-process rather than consult Master in-memory-state. + * Test of the HBCK-version of SCP. The HBCKSCP is an SCP only it reads hbase:meta for list of + * Regions that were on the server-to-process rather than consult Master in-memory-state. */ @Category({ MasterTests.class, LargeTests.class }) public class TestHBCKSCPUnknown extends TestHBCKSCP { @@ -47,7 +46,7 @@ public class TestHBCKSCPUnknown extends TestHBCKSCP { protected long scheduleHBCKSCP(ServerName rsServerName, HMaster master) throws ServiceException { MasterProtos.ScheduleSCPsForUnknownServersResponse response = master.getMasterRpcServices().scheduleSCPsForUnknownServers(null, - MasterProtos.ScheduleSCPsForUnknownServersRequest.newBuilder().build()); + MasterProtos.ScheduleSCPsForUnknownServersRequest.newBuilder().build()); assertEquals(1, response.getPidCount()); long pid = response.getPid(0); return pid; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestIgnoreUnknownFamily.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestIgnoreUnknownFamily.java index 06bacdb8c460..10058fe1bcdd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestIgnoreUnknownFamily.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestIgnoreUnknownFamily.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.master.procedure; @@ -81,7 +88,7 @@ public void tearDownAfterTest() throws IOException { private void addStoreFileToKnownFamily(RegionInfo region) throws IOException { MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path regionDir = - FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(mfs.getConfiguration()), region); + FSUtils.getRegionDirFromRootDir(CommonFSUtils.getRootDir(mfs.getConfiguration()), region); Path familyDir = new Path(regionDir, Bytes.toString(UNKNOWN_FAMILY)); StoreFileWriter writer = new StoreFileWriter.Builder(mfs.getConfiguration(), mfs.getFileSystem()) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterObserverPostCalls.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterObserverPostCalls.java index 2d29aefc7535..ab64c9c07be3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterObserverPostCalls.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterObserverPostCalls.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,9 +52,10 @@ import org.slf4j.LoggerFactory; /** - * Tests class that validates that "post" observer hook methods are only invoked when the operation was successful. + * Tests class that validates that "post" observer hook methods are only invoked when the operation + * was successful. */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterObserverPostCalls { @ClassRule @@ -73,7 +74,7 @@ public static void setupCluster() throws Exception { private static void setupConf(Configuration conf) { conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); conf.set(MasterCoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - MasterObserverForTest.class.getName()); + MasterObserverForTest.class.getName()); } @AfterClass @@ -105,28 +106,25 @@ public void postDeleteNamespace(ObserverContext ct } @Override - public void postModifyNamespace( - ObserverContext ctx, NamespaceDescriptor oldNsDesc, - NamespaceDescriptor currentNsDesc) { + public void postModifyNamespace(ObserverContext ctx, + NamespaceDescriptor oldNsDesc, NamespaceDescriptor currentNsDesc) { postHookCalls.incrementAndGet(); } @Override - public void postCreateNamespace( - ObserverContext ctx, NamespaceDescriptor desc) { + public void postCreateNamespace(ObserverContext ctx, + NamespaceDescriptor desc) { postHookCalls.incrementAndGet(); } @Override - public void postCreateTable( - ObserverContext ctx, TableDescriptor td, - RegionInfo[] regions) { + public void postCreateTable(ObserverContext ctx, + TableDescriptor td, RegionInfo[] regions) { postHookCalls.incrementAndGet(); } @Override - public void postModifyTable( - ObserverContext ctx, TableName tn, + public void postModifyTable(ObserverContext ctx, TableName tn, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor) { postHookCalls.incrementAndGet(); } @@ -154,8 +152,8 @@ public void testPostDeleteNamespace() throws IOException { .build()); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); - MasterObserverForTest observer = master.getMasterCoprocessorHost().findCoprocessor( - MasterObserverForTest.class); + MasterObserverForTest observer = + master.getMasterCoprocessorHost().findCoprocessor(MasterObserverForTest.class); int preCount = observer.postHookCalls.get(); try { admin.deleteNamespace(ns); @@ -165,7 +163,7 @@ public void testPostDeleteNamespace() throws IOException { } int postCount = observer.postHookCalls.get(); assertEquals("Expected no invocations of postDeleteNamespace when the operation fails", - preCount, postCount); + preCount, postCount); // Disable and delete the table so that we can delete the NS. admin.disableTable(tn1); @@ -187,8 +185,8 @@ public void testPostModifyNamespace() throws IOException { admin.createNamespace(nsDesc); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); - MasterObserverForTest observer = master.getMasterCoprocessorHost().findCoprocessor( - MasterObserverForTest.class); + MasterObserverForTest observer = + master.getMasterCoprocessorHost().findCoprocessor(MasterObserverForTest.class); int preCount = observer.postHookCalls.get(); try { admin.modifyNamespace(NamespaceDescriptor.create("nonexistent").build()); @@ -198,12 +196,12 @@ public void testPostModifyNamespace() throws IOException { } int postCount = observer.postHookCalls.get(); assertEquals("Expected no invocations of postModifyNamespace when the operation fails", - preCount, postCount); + preCount, postCount); // Validate that the postDeletNS hook is invoked preCount = observer.postHookCalls.get(); - admin.modifyNamespace( - NamespaceDescriptor.create(nsDesc).addConfiguration("foo", "bar").build()); + admin + .modifyNamespace(NamespaceDescriptor.create(nsDesc).addConfiguration("foo", "bar").build()); postCount = observer.postHookCalls.get(); assertEquals("Expected 1 invocation of postModifyNamespace", preCount + 1, postCount); } @@ -214,8 +212,8 @@ public void testPostCreateNamespace() throws IOException { final String ns = "postcreatens"; HMaster master = UTIL.getMiniHBaseCluster().getMaster(); - MasterObserverForTest observer = master.getMasterCoprocessorHost().findCoprocessor( - MasterObserverForTest.class); + MasterObserverForTest observer = + master.getMasterCoprocessorHost().findCoprocessor(MasterObserverForTest.class); // Validate that the post hook is called int preCount = observer.postHookCalls.get(); @@ -234,19 +232,20 @@ public void testPostCreateNamespace() throws IOException { } postCount = observer.postHookCalls.get(); assertEquals("Expected no invocations of postModifyNamespace when the operation fails", - preCount, postCount); + preCount, postCount); } @Test public void testPostCreateTable() throws IOException { final Admin admin = UTIL.getAdmin(); final TableName tn = TableName.valueOf("postcreatetable"); - final TableDescriptor td = TableDescriptorBuilder.newBuilder(tn).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()).build(); + final TableDescriptor td = TableDescriptorBuilder.newBuilder(tn) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()) + .build(); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); - MasterObserverForTest observer = master.getMasterCoprocessorHost().findCoprocessor( - MasterObserverForTest.class); + MasterObserverForTest observer = + master.getMasterCoprocessorHost().findCoprocessor(MasterObserverForTest.class); // Validate that the post hook is called int preCount = observer.postHookCalls.get(); @@ -263,20 +262,21 @@ public void testPostCreateTable() throws IOException { // Pass } postCount = observer.postHookCalls.get(); - assertEquals("Expected no invocations of postCreateTable when the operation fails", - preCount, postCount); + assertEquals("Expected no invocations of postCreateTable when the operation fails", preCount, + postCount); } @Test public void testPostModifyTable() throws IOException { final Admin admin = UTIL.getAdmin(); final TableName tn = TableName.valueOf("postmodifytable"); - final TableDescriptor td = TableDescriptorBuilder.newBuilder(tn).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()).build(); + final TableDescriptor td = TableDescriptorBuilder.newBuilder(tn) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()) + .build(); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); - MasterObserverForTest observer = master.getMasterCoprocessorHost().findCoprocessor( - MasterObserverForTest.class); + MasterObserverForTest observer = + master.getMasterCoprocessorHost().findCoprocessor(MasterObserverForTest.class); // Create the table admin.createTable(td); @@ -297,20 +297,21 @@ public void testPostModifyTable() throws IOException { // Pass } postCount = observer.postHookCalls.get(); - assertEquals("Expected no invocations of postModifyTable when the operation fails", - preCount, postCount); + assertEquals("Expected no invocations of postModifyTable when the operation fails", preCount, + postCount); } @Test public void testPostDisableTable() throws IOException { final Admin admin = UTIL.getAdmin(); final TableName tn = TableName.valueOf("postdisabletable"); - final TableDescriptor td = TableDescriptorBuilder.newBuilder(tn).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()).build(); + final TableDescriptor td = TableDescriptorBuilder.newBuilder(tn) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()) + .build(); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); - MasterObserverForTest observer = master.getMasterCoprocessorHost().findCoprocessor( - MasterObserverForTest.class); + MasterObserverForTest observer = + master.getMasterCoprocessorHost().findCoprocessor(MasterObserverForTest.class); // Create the table and disable it admin.createTable(td); @@ -330,20 +331,21 @@ public void testPostDisableTable() throws IOException { // Pass } postCount = observer.postHookCalls.get(); - assertEquals("Expected no invocations of postDisableTable when the operation fails", - preCount, postCount); + assertEquals("Expected no invocations of postDisableTable when the operation fails", preCount, + postCount); } @Test public void testPostDeleteTable() throws IOException { final Admin admin = UTIL.getAdmin(); final TableName tn = TableName.valueOf("postdeletetable"); - final TableDescriptor td = TableDescriptorBuilder.newBuilder(tn).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()).build(); + final TableDescriptor td = TableDescriptorBuilder.newBuilder(tn) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()) + .build(); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); - MasterObserverForTest observer = master.getMasterCoprocessorHost().findCoprocessor( - MasterObserverForTest.class); + MasterObserverForTest observer = + master.getMasterCoprocessorHost().findCoprocessor(MasterObserverForTest.class); // Create the table and disable it admin.createTable(td); @@ -364,7 +366,7 @@ public void testPostDeleteTable() throws IOException { // Pass } postCount = observer.postHookCalls.get(); - assertEquals("Expected no invocations of postDeleteTable when the operation fails", - preCount, postCount); + assertEquals("Expected no invocations of postDeleteTable when the operation fails", preCount, + postCount); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java index a86b13c2a4b1..85b6b59d588e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -51,7 +50,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterProcedureEvents { @ClassRule @@ -91,7 +90,7 @@ public static void cleanupTest() throws Exception { @After public void tearDown() throws Exception { - for (TableDescriptor htd: UTIL.getAdmin().listTableDescriptors()) { + for (TableDescriptor htd : UTIL.getAdmin().listTableDescriptors()) { LOG.info("Tear down, remove table=" + htd.getTableName()); UTIL.deleteTable(htd.getTableName()); } @@ -105,7 +104,7 @@ public void testMasterInitializedEvent() throws Exception { RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); while (!master.isInitialized()) { Thread.sleep(250); @@ -136,7 +135,8 @@ private void testProcedureEventWaitWake(final HMaster master, final ProcedureEve // wait until the event is in the queue (proc executed and got into suspended state) LOG.debug("wait procedure suspended on " + event); - while (event.getSuspendedProcedures().size() < 1) Thread.sleep(25); + while (event.getSuspendedProcedures().size() < 1) + Thread.sleep(25); // check that the proc is in the event queue LOG.debug("checking " + event + " size=" + event.getSuspendedProcedures().size()); @@ -155,8 +155,8 @@ private void testProcedureEventWaitWake(final HMaster master, final ProcedureEve // check that nothing is in the event queue and the event is not suspended assertEquals(true, event.isReady()); assertEquals(0, event.getSuspendedProcedures().size()); - LOG.debug("completed execution of " + proc + - " pollCalls=" + (procSched.getPollCalls() - startPollCalls) + - " nullPollCalls=" + (procSched.getNullPollCalls() - startNullPollCalls)); + LOG.debug( + "completed execution of " + proc + " pollCalls=" + (procSched.getPollCalls() - startPollCalls) + + " nullPollCalls=" + (procSched.getNullPollCalls() - startNullPollCalls)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java index 03c6dfb5cd27..0f428fccd8cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ public class TestMasterProcedureScheduler { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterProcedureScheduler.class); + HBaseClassTestRule.forClass(TestMasterProcedureScheduler.class); private static final Logger LOG = LoggerFactory.getLogger(TestMasterProcedureScheduler.class); @@ -95,7 +95,7 @@ public void testSimpleTableOpsQueues() throws Exception { // insert items for (int j = 1; j <= NUM_ITEMS; ++j) { queue.addBack(new TestTableProcedure(i * 1000 + j, tableName, - TableProcedureInterface.TableOperationType.EDIT)); + TableProcedureInterface.TableOperationType.EDIT)); assertEquals(++count, queue.size()); } } @@ -118,7 +118,7 @@ public void testSimpleTableOpsQueues() throws Exception { for (int i = 1; i <= NUM_TABLES; ++i) { final TableName tableName = TableName.valueOf(String.format("test-%04d", i)); final TestTableProcedure dummyProc = - new TestTableProcedure(100, tableName, TableProcedureInterface.TableOperationType.DELETE); + new TestTableProcedure(100, tableName, TableProcedureInterface.TableOperationType.DELETE); // complete the table deletion assertTrue(queue.markTableAsDeleted(tableName, dummyProc)); } @@ -133,7 +133,7 @@ public void testCreateDeleteTableOperationsWithWriteLock() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final TestTableProcedure dummyProc = - new TestTableProcedure(100, tableName, TableProcedureInterface.TableOperationType.DELETE); + new TestTableProcedure(100, tableName, TableProcedureInterface.TableOperationType.DELETE); queue.addBack( new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.EDIT)); @@ -165,7 +165,7 @@ public void testCreateDeleteTableOperationsWithReadLock() throws Exception { final int nitems = 2; final TestTableProcedure dummyProc = - new TestTableProcedure(100, tableName, TableProcedureInterface.TableOperationType.DELETE); + new TestTableProcedure(100, tableName, TableProcedureInterface.TableOperationType.DELETE); for (int i = 1; i <= nitems; ++i) { queue.addBack( @@ -351,10 +351,10 @@ public void testVerifyNamespaceXLock() throws Exception { public void testXLockWaitingForExecutingSharedLockToRelease() { final TableName tableName = TableName.valueOf(name.getMethodName()); final RegionInfo regionA = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); queue.addBack(new TestRegionProcedure(1, tableName, - TableProcedureInterface.TableOperationType.REGION_ASSIGN, regionA)); + TableProcedureInterface.TableOperationType.REGION_ASSIGN, regionA)); queue.addBack( new TestTableProcedure(2, tableName, TableProcedureInterface.TableOperationType.EDIT)); @@ -375,7 +375,7 @@ public void testXLockWaitingForExecutingSharedLockToRelease() { assertEquals(false, queue.waitTableExclusiveLock(proc, tableName)); queue.addBack(new TestRegionProcedure(3, tableName, - TableProcedureInterface.TableOperationType.REGION_UNASSIGN, regionA)); + TableProcedureInterface.TableOperationType.REGION_UNASSIGN, regionA)); // everything is locked by the table operation assertEquals(null, queue.poll(0)); @@ -397,22 +397,22 @@ public void testXLockWaitingForExecutingSharedLockToRelease() { public void testVerifyRegionLocks() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final RegionInfo regionA = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); final RegionInfo regionB = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(Bytes.toBytes("b")).setEndKey(Bytes.toBytes("c")).build(); + .setStartKey(Bytes.toBytes("b")).setEndKey(Bytes.toBytes("c")).build(); final RegionInfo regionC = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(Bytes.toBytes("c")).setEndKey(Bytes.toBytes("d")).build(); + .setStartKey(Bytes.toBytes("c")).setEndKey(Bytes.toBytes("d")).build(); queue.addBack( new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.EDIT)); queue.addBack(new TestRegionProcedure(2, tableName, - TableProcedureInterface.TableOperationType.REGION_MERGE, regionA, regionB)); + TableProcedureInterface.TableOperationType.REGION_MERGE, regionA, regionB)); queue.addBack(new TestRegionProcedure(3, tableName, - TableProcedureInterface.TableOperationType.REGION_SPLIT, regionA)); + TableProcedureInterface.TableOperationType.REGION_SPLIT, regionA)); queue.addBack(new TestRegionProcedure(4, tableName, - TableProcedureInterface.TableOperationType.REGION_SPLIT, regionB)); + TableProcedureInterface.TableOperationType.REGION_SPLIT, regionB)); queue.addBack(new TestRegionProcedure(5, tableName, - TableProcedureInterface.TableOperationType.REGION_UNASSIGN, regionC)); + TableProcedureInterface.TableOperationType.REGION_UNASSIGN, regionC)); // Fetch the 1st item and take the write lock Procedure proc = queue.poll(); @@ -473,11 +473,11 @@ public void testVerifyRegionLocks() throws Exception { public void testVerifySubProcRegionLocks() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final RegionInfo regionA = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); final RegionInfo regionB = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(Bytes.toBytes("b")).setEndKey(Bytes.toBytes("c")).build(); + .setStartKey(Bytes.toBytes("b")).setEndKey(Bytes.toBytes("c")).build(); final RegionInfo regionC = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(Bytes.toBytes("c")).setEndKey(Bytes.toBytes("d")).build(); + .setStartKey(Bytes.toBytes("c")).setEndKey(Bytes.toBytes("d")).build(); queue.addBack( new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.ENABLE)); @@ -492,12 +492,12 @@ public void testVerifySubProcRegionLocks() throws Exception { // we should get 3 sub-proc back, one for each region. // (this step is done by the executor/rootProc, we are simulating it) Procedure[] subProcs = new Procedure[] { - new TestRegionProcedure(1, 2, tableName, - TableProcedureInterface.TableOperationType.REGION_EDIT, regionA), - new TestRegionProcedure(1, 3, tableName, - TableProcedureInterface.TableOperationType.REGION_EDIT, regionB), - new TestRegionProcedure(1, 4, tableName, - TableProcedureInterface.TableOperationType.REGION_EDIT, regionC), }; + new TestRegionProcedure(1, 2, tableName, + TableProcedureInterface.TableOperationType.REGION_EDIT, regionA), + new TestRegionProcedure(1, 3, tableName, + TableProcedureInterface.TableOperationType.REGION_EDIT, regionB), + new TestRegionProcedure(1, 4, tableName, + TableProcedureInterface.TableOperationType.REGION_EDIT, regionC), }; // at this point the rootProc is going in a waiting state // and the sub-procedures will be added in the queue. @@ -535,14 +535,14 @@ public void testVerifySubProcRegionLocks() throws Exception { public void testInheritedRegionXLock() { final TableName tableName = TableName.valueOf(name.getMethodName()); final RegionInfo region = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); queue.addBack(new TestRegionProcedure(1, tableName, - TableProcedureInterface.TableOperationType.REGION_SPLIT, region)); + TableProcedureInterface.TableOperationType.REGION_SPLIT, region)); queue.addBack(new TestRegionProcedure(1, 2, tableName, - TableProcedureInterface.TableOperationType.REGION_UNASSIGN, region)); + TableProcedureInterface.TableOperationType.REGION_UNASSIGN, region)); queue.addBack(new TestRegionProcedure(3, tableName, - TableProcedureInterface.TableOperationType.REGION_EDIT, region)); + TableProcedureInterface.TableOperationType.REGION_EDIT, region)); // fetch the root proc and take the lock on the region Procedure rootProc = queue.poll(); @@ -605,12 +605,12 @@ public void testSuspendedProcedure() throws Exception { private static RegionInfo[] generateRegionInfo(final TableName tableName) { return new RegionInfo[] { - RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("a")) - .setEndKey(Bytes.toBytes("b")).build(), - RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("b")) - .setEndKey(Bytes.toBytes("c")).build(), - RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("c")) - .setEndKey(Bytes.toBytes("d")).build() }; + RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("a")) + .setEndKey(Bytes.toBytes("b")).build(), + RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("b")) + .setEndKey(Bytes.toBytes("c")).build(), + RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("c")) + .setEndKey(Bytes.toBytes("d")).build() }; } @Test @@ -620,7 +620,7 @@ public void testParentXLockAndChildrenSharedLock() throws Exception { final TestRegionProcedure[] childProcs = new TestRegionProcedure[regions.length]; for (int i = 0; i < regions.length; ++i) { childProcs[i] = new TestRegionProcedure(1, 2 + i, tableName, - TableProcedureInterface.TableOperationType.REGION_ASSIGN, regions[i]); + TableProcedureInterface.TableOperationType.REGION_ASSIGN, regions[i]); } testInheritedXLockAndChildrenSharedLock(tableName, new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.CREATE), @@ -634,7 +634,7 @@ public void testRootXLockAndChildrenSharedLock() throws Exception { final TestRegionProcedure[] childProcs = new TestRegionProcedure[regions.length]; for (int i = 0; i < regions.length; ++i) { childProcs[i] = new TestRegionProcedure(1, 2, 3 + i, tableName, - TableProcedureInterface.TableOperationType.REGION_ASSIGN, regions[i]); + TableProcedureInterface.TableOperationType.REGION_ASSIGN, regions[i]); } testInheritedXLockAndChildrenSharedLock(tableName, new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.CREATE), @@ -1037,7 +1037,7 @@ public void testListLocksTable() throws Exception { public void testListLocksRegion() throws Exception { LockProcedure procedure = createExclusiveLockProcedure(3); RegionInfo regionInfo = - RegionInfoBuilder.newBuilder(TableName.valueOf("ns3", "table3")).build(); + RegionInfoBuilder.newBuilder(TableName.valueOf("ns3", "table3")).build(); queue.waitRegion(procedure, regionInfo); @@ -1135,7 +1135,7 @@ public void testAcquireSharedLockWhileParentHoldingExclusiveLock() { TestTableProcedure parentProc = new TestTableProcedure(1, tableName, TableOperationType.EDIT); TestRegionProcedure proc = - new TestRegionProcedure(1, 2, tableName, TableOperationType.REGION_EDIT, regionInfo); + new TestRegionProcedure(1, 2, tableName, TableOperationType.REGION_EDIT, regionInfo); queue.addBack(parentProc); assertSame(parentProc, queue.poll()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java index 7a43f755734c..ec14dd8fb5f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestMasterProcedureSchedulerConcurrency { @ClassRule @@ -109,12 +109,14 @@ public void run() { try { long procId = proc.getProcId(); int concurrent = concurrentCount.incrementAndGet(); - assertTrue("inc-concurrent="+ concurrent +" 1 <= concurrent <= "+ NUM_PEERS, + assertTrue("inc-concurrent=" + concurrent + " 1 <= concurrent <= " + NUM_PEERS, concurrent >= 1 && concurrent <= NUM_PEERS); - LOG.debug("[S] peerId="+ peerId +" procId="+ procId +" concurrent="+ concurrent); + LOG.debug( + "[S] peerId=" + peerId + " procId=" + procId + " concurrent=" + concurrent); Thread.sleep(2000); concurrent = concurrentCount.decrementAndGet(); - LOG.debug("[E] peerId="+ peerId +" procId="+ procId +" concurrent="+ concurrent); + LOG.debug( + "[E] peerId=" + peerId + " procId=" + procId + " concurrent=" + concurrent); assertTrue("dec-concurrent=" + concurrent, concurrent < NUM_PEERS); } finally { synchronized (concurrentPeers) { @@ -145,8 +147,8 @@ public void run() { } /** - * Verify that "write" operations for a single table are serialized, - * but different tables can be executed in parallel. + * Verify that "write" operations for a single table are serialized, but different tables can be + * executed in parallel. */ @Test public void testConcurrentWriteOps() throws Exception { @@ -159,7 +161,7 @@ public void testConcurrentWriteOps() throws Exception { TableName tableName = TableName.valueOf(String.format("testtb-%04d", i)); for (int j = 1; j < NUM_ITEMS; ++j) { procSet.addBack(new TestTableProcedure(i * 100 + j, tableName, - TableProcedureInterface.TableOperationType.EDIT)); + TableProcedureInterface.TableOperationType.EDIT)); opsCount.incrementAndGet(); } } @@ -192,12 +194,14 @@ public void run() { try { long procId = proc.getProcId(); int concurrent = concurrentCount.incrementAndGet(); - assertTrue("inc-concurrent="+ concurrent +" 1 <= concurrent <= "+ NUM_TABLES, + assertTrue("inc-concurrent=" + concurrent + " 1 <= concurrent <= " + NUM_TABLES, concurrent >= 1 && concurrent <= NUM_TABLES); - LOG.debug("[S] tableId="+ tableId +" procId="+ procId +" concurrent="+ concurrent); + LOG.debug( + "[S] tableId=" + tableId + " procId=" + procId + " concurrent=" + concurrent); Thread.sleep(2000); concurrent = concurrentCount.decrementAndGet(); - LOG.debug("[E] tableId="+ tableId +" procId="+ procId +" concurrent="+ concurrent); + LOG.debug( + "[E] tableId=" + tableId + " procId=" + procId + " concurrent=" + concurrent); assertTrue("dec-concurrent=" + concurrent, concurrent < NUM_TABLES); } finally { synchronized (concurrentTables) { @@ -227,8 +231,8 @@ public void run() { for (int i = 1; i <= NUM_TABLES; ++i) { final TableName table = TableName.valueOf(String.format("testtb-%04d", i)); - final TestTableProcedure dummyProc = new TestTableProcedure(100, table, - TableProcedureInterface.TableOperationType.DELETE); + final TestTableProcedure dummyProc = + new TestTableProcedure(100, table, TableProcedureInterface.TableOperationType.DELETE); assertTrue("queue should be deleted, table=" + table, queue.markTableAsDeleted(table, dummyProc)); } @@ -237,9 +241,7 @@ public void run() { @Test public void testMasterProcedureSchedulerPerformanceEvaluation() throws Exception { // Make sure the tool does not get stuck - MasterProcedureSchedulerPerformanceEvaluation.main(new String[] { - "-num_ops", "1000" - }); + MasterProcedureSchedulerPerformanceEvaluation.main(new String[] { "-num_ops", "1000" }); } public static class TestTableProcSet { @@ -295,11 +297,11 @@ public void release(Procedure proc) { } public TableName getTableName(Procedure proc) { - return ((TableProcedureInterface)proc).getTableName(); + return ((TableProcedureInterface) proc).getTableName(); } public TableProcedureInterface.TableOperationType getTableOperationType(Procedure proc) { - return ((TableProcedureInterface)proc).getTableOperationType(); + return ((TableProcedureInterface) proc).getTableOperationType(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java index 79ec0eb2b533..e26ce1021dbd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestModifyNamespaceProcedure { @ClassRule @@ -81,13 +81,12 @@ public void setup() throws Exception { @After public void tearDown() throws Exception { ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); - for (TableDescriptor htd: UTIL.getAdmin().listTableDescriptors()) { + for (TableDescriptor htd : UTIL.getAdmin().listTableDescriptors()) { LOG.info("Tear down, remove table=" + htd.getTableName()); UTIL.deleteTable(htd.getTableName()); } } - @Test public void testModifyNamespace() throws Exception { final NamespaceDescriptor nsd = NamespaceDescriptor.create("testModifyNamespace").build(); @@ -102,8 +101,7 @@ public void testModifyNamespace() throws Exception { createNamespaceForTesting(nsd); // Before modify - NamespaceDescriptor currentNsDescriptor = - UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); + NamespaceDescriptor currentNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); assertEquals(nsValue1before, currentNsDescriptor.getConfigurationValue(nsKey1)); assertNull(currentNsDescriptor.getConfigurationValue(nsKey2)); @@ -111,15 +109,14 @@ public void testModifyNamespace() throws Exception { nsd.setConfiguration(nsKey1, nsValue1after); nsd.setConfiguration(nsKey2, nsValue2); - long procId1 = procExec.submitProcedure( - new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId1 = + procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); // Verify the namespace is updated. - currentNsDescriptor = - UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); + currentNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); assertEquals(nsValue1after, nsd.getConfigurationValue(nsKey1)); assertEquals(nsValue2, currentNsDescriptor.getConfigurationValue(nsKey2)); } @@ -139,8 +136,8 @@ public void testModifyNonExistNamespace() throws Exception { final NamespaceDescriptor nsd = NamespaceDescriptor.create(namespaceName).build(); - long procId = procExec.submitProcedure( - new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId = + procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); @@ -165,8 +162,8 @@ public void testModifyNamespaceWithInvalidRegionCount() throws Exception { // Modify nsd.setConfiguration(nsKey, nsValue); - long procId = procExec.submitProcedure( - new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId = + procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); Procedure result = procExec.getResult(procId); @@ -188,8 +185,8 @@ public void testModifyNamespaceWithInvalidTableCount() throws Exception { // Modify nsd.setConfiguration(nsKey, nsValue); - long procId = procExec.submitProcedure( - new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId = + procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); Procedure result = procExec.getResult(procId); @@ -214,16 +211,15 @@ public void testRecoveryAndDoubleExecution() throws Exception { nsd.setConfiguration(nsKey, nsValue); // Start the Modify procedure && kill the executor - long procId = procExec.submitProcedure( - new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd)); + long procId = + procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd)); // Restart the executor and execute the step twice MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); // Validate - NamespaceDescriptor currentNsDescriptor = - UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); + NamespaceDescriptor currentNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); assertEquals(nsValue, currentNsDescriptor.getConfigurationValue(nsKey)); } @@ -242,14 +238,13 @@ public void testRollbackAndDoubleExecution() throws Exception { // Modify // Start the Modify procedure && kill the executor long procId = procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(), - NamespaceDescriptor.create(nsd).addConfiguration(nsKey, nsValue).build())); + NamespaceDescriptor.create(nsd).addConfiguration(nsKey, nsValue).build())); int lastStep = 2; // failing before MODIFY_NAMESPACE_UPDATE_NS_TABLE MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep); // Validate - NamespaceDescriptor currentNsDescriptor = - UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); + NamespaceDescriptor currentNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); assertNull(currentNsDescriptor.getConfigurationValue(nsKey)); } @@ -260,8 +255,8 @@ private ProcedureExecutor getMasterProcedureExecutor() { private void createNamespaceForTesting(NamespaceDescriptor nsDescriptor) throws Exception { final ProcedureExecutor procExec = getMasterProcedureExecutor(); - long procId = procExec.submitProcedure( - new CreateNamespaceProcedure(procExec.getEnvironment(), nsDescriptor)); + long procId = procExec + .submitProcedure(new CreateNamespaceProcedure(procExec.getEnvironment(), nsDescriptor)); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java index f5cc543e86d4..3effb09b027f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,14 +49,15 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestModifyTableProcedure extends TestTableDDLProcedureBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestModifyTableProcedure.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final String column_Family1 = "cf1"; private static final String column_Family2 = "cf2"; @@ -76,7 +77,7 @@ public void testModifyTable() throws Exception { // Test 1: Modify 1 property long newMaxFileSize = htd.getMaxFileSize() * 2; htd = TableDescriptorBuilder.newBuilder(htd).setMaxFileSize(newMaxFileSize) - .setRegionReplication(3).build(); + .setRegionReplication(3).build(); long procId1 = ProcedureTestingUtility.submitAndWait(procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd)); @@ -89,10 +90,10 @@ public void testModifyTable() throws Exception { boolean newReadOnlyOption = htd.isReadOnly() ? false : true; long newMemStoreFlushSize = htd.getMemStoreFlushSize() * 2; htd = TableDescriptorBuilder.newBuilder(htd).setReadOnly(newReadOnlyOption) - .setMemStoreFlushSize(newMemStoreFlushSize).build(); + .setMemStoreFlushSize(newMemStoreFlushSize).build(); - long procId2 = ProcedureTestingUtility.submitAndWait( - procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd)); + long procId2 = ProcedureTestingUtility.submitAndWait(procExec, + new ModifyTableProcedure(procExec.getEnvironment(), htd)); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2)); currentHtd = UTIL.getAdmin().getDescriptor(tableName); @@ -112,14 +113,13 @@ public void testModifyTableAddCF() throws Exception { // Test 1: Modify the table descriptor online String cf2 = "cf2"; TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(UTIL.getAdmin().getDescriptor(tableName)); + TableDescriptorBuilder.newBuilder(UTIL.getAdmin().getDescriptor(tableName)); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(cf2)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(cf2)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - long procId = ProcedureTestingUtility.submitAndWait( - procExec, new ModifyTableProcedure( - procExec.getEnvironment(), tableDescriptorBuilder.build())); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new ModifyTableProcedure(procExec.getEnvironment(), tableDescriptorBuilder.build())); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId)); currentHtd = UTIL.getAdmin().getDescriptor(tableName); @@ -131,14 +131,12 @@ procExec, new ModifyTableProcedure( ProcedureTestingUtility.waitNoProcedureRunning(procExec); String cf3 = "cf3"; tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(UTIL.getAdmin().getDescriptor(tableName)); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(cf3)).build(); + TableDescriptorBuilder.newBuilder(UTIL.getAdmin().getDescriptor(tableName)); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(cf3)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - long procId2 = - ProcedureTestingUtility.submitAndWait(procExec, - new ModifyTableProcedure(procExec.getEnvironment(), tableDescriptorBuilder.build())); + long procId2 = ProcedureTestingUtility.submitAndWait(procExec, + new ModifyTableProcedure(procExec.getEnvironment(), tableDescriptorBuilder.build())); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2)); currentHtd = UTIL.getAdmin().getDescriptor(tableName); @@ -162,8 +160,8 @@ public void testModifyTableDeleteCF() throws Exception { TableDescriptor htd = UTIL.getAdmin().getDescriptor(tableName); htd = TableDescriptorBuilder.newBuilder(htd).removeColumnFamily(Bytes.toBytes(cf2)).build(); - long procId = ProcedureTestingUtility.submitAndWait( - procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd)); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new ModifyTableProcedure(procExec.getEnvironment(), htd)); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId)); currentHtd = UTIL.getAdmin().getDescriptor(tableName); @@ -177,12 +175,10 @@ public void testModifyTableDeleteCF() throws Exception { TableDescriptor htd2 = UTIL.getAdmin().getDescriptor(tableName); // Disable Sanity check htd2 = TableDescriptorBuilder.newBuilder(htd2).removeColumnFamily(Bytes.toBytes(cf3)) - .setValue(TableDescriptorChecker.TABLE_SANITY_CHECKS, Boolean.FALSE.toString()) - .build(); + .setValue(TableDescriptorChecker.TABLE_SANITY_CHECKS, Boolean.FALSE.toString()).build(); - long procId2 = - ProcedureTestingUtility.submitAndWait(procExec, - new ModifyTableProcedure(procExec.getEnvironment(), htd2)); + long procId2 = ProcedureTestingUtility.submitAndWait(procExec, + new ModifyTableProcedure(procExec.getEnvironment(), htd2)); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2)); currentHtd = UTIL.getAdmin().getDescriptor(tableName); @@ -192,14 +188,13 @@ public void testModifyTableDeleteCF() throws Exception { // Removing the last family will fail TableDescriptor htd3 = UTIL.getAdmin().getDescriptor(tableName); htd3 = TableDescriptorBuilder.newBuilder(htd3).removeColumnFamily(Bytes.toBytes(cf1)).build(); - long procId3 = - ProcedureTestingUtility.submitAndWait(procExec, - new ModifyTableProcedure(procExec.getEnvironment(), htd3)); + long procId3 = ProcedureTestingUtility.submitAndWait(procExec, + new ModifyTableProcedure(procExec.getEnvironment(), htd3)); final Procedure result = procExec.getResult(procId3); assertEquals(true, result.isFailed()); Throwable cause = ProcedureTestingUtility.getExceptionCause(result); assertTrue("expected DoNotRetryIOException, got " + cause, - cause instanceof DoNotRetryIOException); + cause instanceof DoNotRetryIOException); assertEquals(1, currentHtd.getColumnFamilyNames().size()); assertTrue(currentHtd.hasColumnFamily(Bytes.toBytes(cf1))); } @@ -212,8 +207,8 @@ public void testRecoveryAndDoubleExecutionOffline() throws Exception { final ProcedureExecutor procExec = getMasterProcedureExecutor(); // create the table - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, "cf1", cf3); + RegionInfo[] regions = + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1", cf3); UTIL.getAdmin().disableTable(tableName); ProcedureTestingUtility.waitNoProcedureRunning(procExec); @@ -224,13 +219,11 @@ public void testRecoveryAndDoubleExecutionOffline() throws Exception { TableDescriptor newDescriptor = TableDescriptorBuilder.newBuilder(oldDescriptor) .setCompactionEnabled(!oldDescriptor.isCompactionEnabled()) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf2)) - .removeColumnFamily(Bytes.toBytes(cf3)) - .setRegionReplication(3) - .build(); + .removeColumnFamily(Bytes.toBytes(cf3)).setRegionReplication(3).build(); // Start the Modify procedure && kill the executor - long procId = procExec.submitProcedure( - new ModifyTableProcedure(procExec.getEnvironment(), newDescriptor)); + long procId = procExec + .submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), newDescriptor)); // Restart the executor and execute the step twice MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); @@ -253,16 +246,16 @@ public void testRecoveryAndDoubleExecutionOnline() throws Exception { final ProcedureExecutor procExec = getMasterProcedureExecutor(); // create the table - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, "cf1", cf3); + RegionInfo[] regions = + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1", cf3); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Modify multiple properties of the table. TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(UTIL.getAdmin().getDescriptor(tableName)); + TableDescriptorBuilder.newBuilder(UTIL.getAdmin().getDescriptor(tableName)); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(cf2)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(cf2)).build(); boolean newCompactionEnableOption = !tableDescriptorBuilder.build().isCompactionEnabled(); tableDescriptorBuilder.setCompactionEnabled(newCompactionEnableOption); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); @@ -303,7 +296,7 @@ public void testColumnFamilyAdditionTwiceWithNonce() throws Exception { TableDescriptor td = UTIL.getAdmin().getDescriptor(tableName); TableDescriptor newTd = TableDescriptorBuilder.newBuilder(td).setCompactionEnabled(!td.isCompactionEnabled()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf2)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf2)).build(); PerClientRandomNonceGenerator nonceGenerator = PerClientRandomNonceGenerator.get(); long nonceGroup = nonceGenerator.getNonceGroup(); @@ -329,7 +322,7 @@ public boolean execute(int step) throws IOException { } }); - //Try with different nonce, now it should fail the checks + // Try with different nonce, now it should fail the checks try { UTIL.getHBaseCluster().getMaster().addColumn(tableName, ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(cf2)).build(), nonceGroup, @@ -357,19 +350,19 @@ public void testRollbackAndDoubleExecutionOnline() throws Exception { final ProcedureExecutor procExec = getMasterProcedureExecutor(); // create the table - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, "cf1"); + RegionInfo[] regions = + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1"); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); TableDescriptor td = UTIL.getAdmin().getDescriptor(tableName); TableDescriptor newTd = - TableDescriptorBuilder.newBuilder(td).setCompactionEnabled(!td.isCompactionEnabled()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName)).build(); + TableDescriptorBuilder.newBuilder(td).setCompactionEnabled(!td.isCompactionEnabled()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName)).build(); // Start the Modify procedure && kill the executor long procId = - procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), newTd)); + procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), newTd)); int lastStep = 8; // failing before MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep); @@ -386,8 +379,8 @@ public void testRollbackAndDoubleExecutionOffline() throws Exception { final ProcedureExecutor procExec = getMasterProcedureExecutor(); // create the table - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - procExec, tableName, null, "cf1"); + RegionInfo[] regions = + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1"); UTIL.getAdmin().disableTable(tableName); ProcedureTestingUtility.waitNoProcedureRunning(procExec); @@ -395,13 +388,13 @@ public void testRollbackAndDoubleExecutionOffline() throws Exception { TableDescriptor td = UTIL.getAdmin().getDescriptor(tableName); TableDescriptor newTd = - TableDescriptorBuilder.newBuilder(td).setCompactionEnabled(!td.isCompactionEnabled()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName)).setRegionReplication(3) - .build(); + TableDescriptorBuilder.newBuilder(td).setCompactionEnabled(!td.isCompactionEnabled()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName)).setRegionReplication(3) + .build(); // Start the Modify procedure && kill the executor - long procId = procExec.submitProcedure( - new ModifyTableProcedure(procExec.getEnvironment(), newTd)); + long procId = + procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), newTd)); // Restart the executor and rollback the step twice int lastStep = 8; // failing before MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR @@ -440,13 +433,11 @@ public void run() { } } ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column_Family2)).build(); - ConcurrentAddColumnFamily t1 = - new ConcurrentAddColumnFamily(tableName, columnFamilyDescriptor); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column_Family2)).build(); + ConcurrentAddColumnFamily t1 = new ConcurrentAddColumnFamily(tableName, columnFamilyDescriptor); columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column_Family3)).build(); - ConcurrentAddColumnFamily t2 = - new ConcurrentAddColumnFamily(tableName, columnFamilyDescriptor); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column_Family3)).build(); + ConcurrentAddColumnFamily t2 = new ConcurrentAddColumnFamily(tableName, columnFamilyDescriptor); t1.start(); t2.start(); @@ -461,16 +452,15 @@ public void run() { @Test public void testConcurrentDeleteColumnFamily() throws IOException, InterruptedException { final TableName tableName = TableName.valueOf(name.getMethodName()); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tableName); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column_Family1)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column_Family1)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column_Family2)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column_Family2)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column_Family3)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column_Family3)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); UTIL.getAdmin().createTable(tableDescriptorBuilder.build()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java index 5c225cca0ca4..0d4baaf44161 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.List; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -46,7 +45,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureAdmin { @ClassRule @@ -54,7 +53,8 @@ public class TestProcedureAdmin { HBaseClassTestRule.forClass(TestProcedureAdmin.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureAdmin.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -88,7 +88,7 @@ public void setup() throws Exception { public void tearDown() throws Exception { assertTrue("expected executor to be running", getMasterProcedureExecutor().isRunning()); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); - for (TableDescriptor htd: UTIL.getAdmin().listTableDescriptors()) { + for (TableDescriptor htd : UTIL.getAdmin().listTableDescriptors()) { LOG.info("Tear down, remove table=" + htd.getTableName()); UTIL.deleteTable(htd.getTableName()); } @@ -103,8 +103,8 @@ public void testAbortProcedureSuccess() throws Exception { ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Submit an abortable procedure - long procId = procExec.submitProcedure( - new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); + long procId = procExec + .submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); // Wait for one step to complete ProcedureTestingUtility.waitProcedure(procExec, procId); @@ -114,8 +114,7 @@ public void testAbortProcedureSuccess() throws Exception { MasterProcedureTestingUtility.testRestartWithAbort(procExec, procId); ProcedureTestingUtility.waitNoProcedureRunning(procExec); // Validate the disable table procedure was aborted successfully - MasterProcedureTestingUtility.validateTableIsEnabled( - UTIL.getHBaseCluster().getMaster(), + MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(), tableName); } @@ -130,8 +129,8 @@ public void testAbortProcedureFailure() throws Exception { ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Submit an un-abortable procedure - long procId = procExec.submitProcedure( - new DeleteTableProcedure(procExec.getEnvironment(), tableName)); + long procId = + procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(), tableName)); // Wait for a couple of steps to complete (first step "prepare" is abortable) ProcedureTestingUtility.waitProcedure(procExec, procId); for (int i = 0; i < 2; ++i) { @@ -147,8 +146,8 @@ public void testAbortProcedureFailure() throws Exception { ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); // Validate the delete table procedure was not aborted - MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName); + MasterProcedureTestingUtility.validateTableDeletion(UTIL.getHBaseCluster().getMaster(), + tableName); } @Test @@ -161,8 +160,8 @@ public void testAbortProcedureInterruptedNotAllowed() throws Exception { ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Submit a procedure - long procId = procExec.submitProcedure( - new DisableTableProcedure(procExec.getEnvironment(), tableName, true)); + long procId = procExec + .submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, true)); // Wait for one step to complete ProcedureTestingUtility.waitProcedure(procExec, procId); @@ -175,8 +174,8 @@ public void testAbortProcedureInterruptedNotAllowed() throws Exception { ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); // Validate the delete table procedure was not aborted - MasterProcedureTestingUtility.validateTableIsDisabled( - UTIL.getHBaseCluster().getMaster(), tableName); + MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), + tableName); } @Test @@ -201,15 +200,15 @@ public void testGetProcedure() throws Exception { ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); - long procId = procExec.submitProcedure( - new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); + long procId = procExec + .submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); // Wait for one step to complete ProcedureTestingUtility.waitProcedure(procExec, procId); List> procedures = procExec.getProcedures(); assertTrue(procedures.size() >= 1); boolean found = false; - for (Procedure proc: procedures) { + for (Procedure proc : procedures) { if (proc.getProcId() == procId) { assertTrue(proc.isRunnable()); found = true; @@ -224,7 +223,7 @@ public void testGetProcedure() throws Exception { ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); procedures = procExec.getProcedures(); - for (Procedure proc: procedures) { + for (Procedure proc : procedures) { assertTrue(proc.isSuccess()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java index 49c7d282f2a0..22d6841e2398 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ public class TestProcedurePriority { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedurePriority.class); + HBaseClassTestRule.forClass(TestProcedurePriority.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -112,7 +112,7 @@ public static void setUp() throws Exception { UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MyCP.class.getName()); UTIL.startMiniCluster(3); CORE_POOL_SIZE = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getCorePoolSize(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getCorePoolSize(); TABLE_COUNT = 50 * CORE_POOL_SIZE; List> futures = new ArrayList<>(); AsyncAdmin admin = UTIL.getAsyncConnection().getAdmin(); @@ -120,9 +120,9 @@ public static void setUp() throws Exception { for (int i = 0; i < TABLE_COUNT; i++) { concurrency.acquire(); futures.add(admin - .createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE_NAME_PREFIX + i)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build()) - .whenComplete((r, e) -> concurrency.release())); + .createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE_NAME_PREFIX + i)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build()) + .whenComplete((r, e) -> concurrency.release())); } for (Future future : futures) { future.get(3, TimeUnit.MINUTES); @@ -139,15 +139,15 @@ public static void tearDown() throws Exception { @Test public void test() throws Exception { RegionServerThread rsWithMetaThread = UTIL.getMiniHBaseCluster().getRegionServerThreads() - .stream().filter(t -> !t.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty()) - .findAny().get(); + .stream().filter(t -> !t.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty()) + .findAny().get(); HRegionServer rsNoMeta = UTIL.getOtherRegionServer(rsWithMetaThread.getRegionServer()); FAIL = true; UTIL.getMiniHBaseCluster().killRegionServer(rsNoMeta.getServerName()); // wait until all the worker thread are stuck, which means that the stuck checker will start to // add new worker thread. ProcedureExecutor executor = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); UTIL.waitFor(60000, new ExplainingPredicate() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBackoff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBackoff.java index 45d840b548de..f0d25760ce58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBackoff.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBackoff.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,10 +52,10 @@ public class TestReopenTableRegionsProcedureBackoff { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReopenTableRegionsProcedureBackoff.class); + HBaseClassTestRule.forClass(TestReopenTableRegionsProcedureBackoff.class); private static final Logger LOG = - LoggerFactory.getLogger(TestReopenTableRegionsProcedureBackoff.class); + LoggerFactory.getLogger(TestReopenTableRegionsProcedureBackoff.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -79,12 +79,12 @@ public static void tearDown() throws Exception { public void testRetryBackoff() throws IOException, InterruptedException { AssignmentManager am = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); ProcedureExecutor procExec = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); RegionInfo regionInfo = UTIL.getAdmin().getRegions(TABLE_NAME).get(0); RegionStateNode regionNode = am.getRegionStates().getRegionStateNode(regionInfo); // just a dummy one TransitRegionStateProcedure trsp = - TransitRegionStateProcedure.unassign(procExec.getEnvironment(), regionInfo); + TransitRegionStateProcedure.unassign(procExec.getEnvironment(), regionInfo); long openSeqNum; regionNode.lock(); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureInfiniteLoop.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureInfiniteLoop.java index 6686a66ab817..7a6fd0d42c18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureInfiniteLoop.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureInfiniteLoop.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ public class TestReopenTableRegionsProcedureInfiniteLoop { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReopenTableRegionsProcedureInfiniteLoop.class); + HBaseClassTestRule.forClass(TestReopenTableRegionsProcedureInfiniteLoop.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -79,7 +79,7 @@ public void testInfiniteLoop() throws IOException { procId = exec.submitProcedure(proc); UTIL.waitFor(30000, () -> proc.hasLock()); TransitRegionStateProcedure trsp = - TransitRegionStateProcedure.reopen(exec.getEnvironment(), regionInfo); + TransitRegionStateProcedure.reopen(exec.getEnvironment(), regionInfo); regionNode.setProcedure(trsp); exec.submitProcedure(trsp); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java index e753ca6da6d6..aba496a357f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestRestoreSnapshotProcedure extends TestTableDDLProcedureBase { @ClassRule @@ -121,10 +121,8 @@ private void setupSnapshotAndUpdateTable() throws Exception { snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotList.get(0)); // modify the table - ColumnFamilyDescriptor columnFamilyDescriptor3 = - ColumnFamilyDescriptorBuilder.of(CF3); - ColumnFamilyDescriptor columnFamilyDescriptor4 = - ColumnFamilyDescriptorBuilder.of(CF4); + ColumnFamilyDescriptor columnFamilyDescriptor3 = ColumnFamilyDescriptorBuilder.of(CF3); + ColumnFamilyDescriptor columnFamilyDescriptor4 = ColumnFamilyDescriptorBuilder.of(CF4); admin.addColumnFamily(snapshotTableName, columnFamilyDescriptor3); admin.addColumnFamily(snapshotTableName, columnFamilyDescriptor4); admin.deleteColumnFamily(snapshotTableName, CF2); @@ -146,7 +144,7 @@ private void setupSnapshotAndUpdateTable() throws Exception { } private static TableDescriptor createHTableDescriptor(final TableName tableName, - final byte[]... family) { + final byte[]... family) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (int i = 0; i < family.length; ++i) { @@ -159,8 +157,7 @@ private static TableDescriptor createHTableDescriptor(final TableName tableName, public void testRestoreSnapshot() throws Exception { final ProcedureExecutor procExec = getMasterProcedureExecutor(); - long procId = ProcedureTestingUtility.submitAndWait( - procExec, + long procId = ProcedureTestingUtility.submitAndWait(procExec, new RestoreSnapshotProcedure(procExec.getEnvironment(), snapshotHTD, snapshot)); ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId)); @@ -173,14 +170,12 @@ public void testRestoreSnapshotToDifferentTable() throws Exception { final TableName restoredTableName = TableName.valueOf(name.getMethodName()); final TableDescriptor tableDescriptor = createHTableDescriptor(restoredTableName, CF1, CF2); - long procId = ProcedureTestingUtility.submitAndWait( - procExec, new RestoreSnapshotProcedure(procExec.getEnvironment(), tableDescriptor, - snapshot)); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new RestoreSnapshotProcedure(procExec.getEnvironment(), tableDescriptor, snapshot)); Procedure result = procExec.getResult(procId); assertTrue(result.isFailed()); LOG.debug("Restore snapshot failed with exception: " + result.getException()); - assertTrue( - ProcedureTestingUtility.getExceptionCause(result) instanceof TableNotFoundException); + assertTrue(ProcedureTestingUtility.getExceptionCause(result) instanceof TableNotFoundException); } @Test @@ -190,8 +185,7 @@ public void testRestoreSnapshotToEnabledTable() throws Exception { try { UTIL.getAdmin().enableTable(snapshotTableName); - long procId = ProcedureTestingUtility.submitAndWait( - procExec, + long procId = ProcedureTestingUtility.submitAndWait(procExec, new RestoreSnapshotProcedure(procExec.getEnvironment(), snapshotHTD, snapshot)); Procedure result = procExec.getResult(procId); assertTrue(result.isFailed()); @@ -234,7 +228,7 @@ public void testRecoverWithRestoreAclFlag() throws Exception { new RestoreSnapshotProcedure(procExec.getEnvironment(), snapshotHTD, snapshot, true)); MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); - RestoreSnapshotProcedure result = (RestoreSnapshotProcedure)procExec.getResult(procId); + RestoreSnapshotProcedure result = (RestoreSnapshotProcedure) procExec.getResult(procId); // check whether the restoreAcl flag is true after deserialization from Pb. assertEquals(true, result.getRestoreAcl()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCP.java index c7505c39da1d..a08611f6d147 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCP.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,7 +71,7 @@ public void testConcurrentSCPForSameServer() throws Exception { HMaster master = util.getHBaseCluster().getMaster(); final ProcedureExecutor pExecutor = master.getMasterProcedureExecutor(); ServerCrashProcedure procB = - new ServerCrashProcedure(pExecutor.getEnvironment(), rsToKill, false, false); + new ServerCrashProcedure(pExecutor.getEnvironment(), rsToKill, false, false); AssignmentTestingUtil.killRs(util, rsToKill); long procId = getSCPProcId(pExecutor); Procedure procA = pExecutor.getProcedure(procId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPBase.java index 996e1f3b40cb..722e704f83ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPBase.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -77,8 +78,8 @@ public void tearDown() throws Exception { */ protected void testRecoveryAndDoubleExecution(boolean carryingMeta, boolean doubleExecution) throws Exception { - final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution-carryingMeta-" + - carryingMeta + "-doubleExecution-" + doubleExecution); + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution-carryingMeta-" + + carryingMeta + "-doubleExecution-" + doubleExecution); try (Table t = createTable(tableName)) { // Load the table with a bit of data so some logs to split and some edits in each region. this.util.loadTable(t, HBaseTestingUtil.COLUMNS[0]); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMeta.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMeta.java index 09d9d87a6d97..ebd588dcc428 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMeta.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMeta.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ public class TestSCPWithMeta extends TestSCPBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCPWithMeta.class); + HBaseClassTestRule.forClass(TestSCPWithMeta.class); @Test public void testRecoveryAndDoubleExecutionOnRsWithMeta() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithReplicas.java index fd6471bd2c1d..9167beee7315 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithReplicas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ public class TestSCPWithMetaWithReplicas extends TestSCPWithMeta { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCPWithMetaWithReplicas.class); + HBaseClassTestRule.forClass(TestSCPWithMetaWithReplicas.class); @Override protected void startMiniCluster() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithReplicasWithoutZKCoordinated.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithReplicasWithoutZKCoordinated.java index 76b8c8f2edfd..4737fb257d90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithReplicasWithoutZKCoordinated.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithReplicasWithoutZKCoordinated.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestSCPWithMetaWithReplicasWithoutZKCoordinated extends TestSCPWith @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCPWithMetaWithReplicasWithoutZKCoordinated.class); + HBaseClassTestRule.forClass(TestSCPWithMetaWithReplicasWithoutZKCoordinated.class); @Override protected void setupConf(Configuration conf) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithoutZKCoordinated.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithoutZKCoordinated.java index b38eec26100d..dbabe714f924 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithoutZKCoordinated.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithMetaWithoutZKCoordinated.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestSCPWithMetaWithoutZKCoordinated extends TestSCPWithMeta { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCPWithMetaWithoutZKCoordinated.class); + HBaseClassTestRule.forClass(TestSCPWithMetaWithoutZKCoordinated.class); @Override protected void setupConf(Configuration conf) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicas.java index 247905332e71..336e852cf2a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicas.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.master.procedure; @@ -21,7 +28,7 @@ public class TestSCPWithReplicas extends TestSCP { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCPWithReplicas.class); + HBaseClassTestRule.forClass(TestSCPWithReplicas.class); @Override protected void startMiniCluster() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java index 0acc603b503b..e148238d3129 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithRSGroup.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ public class TestSCPWithReplicasWithRSGroup extends TestSCPBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCPWithReplicasWithRSGroup.class); + HBaseClassTestRule.forClass(TestSCPWithReplicasWithRSGroup.class); @Override protected void setupConf(Configuration conf) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithoutZKCoordinated.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithoutZKCoordinated.java index ead65729e07c..13c476fc072f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithoutZKCoordinated.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithReplicasWithoutZKCoordinated.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMeta.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMeta.java index 33477253c04a..cd5cba8a1b68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMeta.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMeta.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ public class TestSCPWithoutMeta extends TestSCPBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCPWithoutMeta.class); + HBaseClassTestRule.forClass(TestSCPWithoutMeta.class); @Test public void testRecoveryAndDoubleExecutionOnRsWithoutMeta() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithReplicas.java index 44f98f2c7d0b..8c08fabdd183 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithReplicas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ public class TestSCPWithoutMetaWithReplicas extends TestSCPWithoutMeta { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCPWithoutMetaWithReplicas.class); + HBaseClassTestRule.forClass(TestSCPWithoutMetaWithReplicas.class); @Override protected void startMiniCluster() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithReplicasWithoutZKCoordinated.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithReplicasWithoutZKCoordinated.java index bf093db0d902..a353486a9c0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithReplicasWithoutZKCoordinated.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithReplicasWithoutZKCoordinated.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ public class TestSCPWithoutMetaWithReplicasWithoutZKCoordinated @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCPWithoutMetaWithReplicasWithoutZKCoordinated.class); + HBaseClassTestRule.forClass(TestSCPWithoutMetaWithReplicasWithoutZKCoordinated.class); @Override protected void setupConf(Configuration conf) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithoutZKCoordinated.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithoutZKCoordinated.java index 83f42860a495..a1094ad9afd6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithoutZKCoordinated.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutMetaWithoutZKCoordinated.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestSCPWithoutMetaWithoutZKCoordinated extends TestSCPWithoutMeta { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSCPWithoutMetaWithoutZKCoordinated.class); + HBaseClassTestRule.forClass(TestSCPWithoutMetaWithoutZKCoordinated.class); @Override protected void setupConf(Configuration conf) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutZKCoordinated.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutZKCoordinated.java index 059ece2e31aa..f8721cf44b56 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutZKCoordinated.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPWithoutZKCoordinated.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestSCPWithoutZKCoordinated extends TestSCP { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java index da8103154f7d..c873776265a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -83,6 +83,7 @@ public void setup() throws Exception { private ProcedureExecutor getMasterProcedureExecutor() { return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); } + private void resetProcExecutorTestingKillFlag() { final ProcedureExecutor procExec = getMasterProcedureExecutor(); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); @@ -96,11 +97,10 @@ public void tearDown() throws Exception { @Test public void testSafemodeBringsDownMaster() throws Exception { final TableName tableName = TableName.valueOf("testSafemodeBringsDownMaster"); - final byte[][] splitKeys = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") - }; - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), + tableName, splitKeys, "f1", "f2"); MiniDFSCluster dfsCluster = UTIL.getDFSCluster(); DistributedFileSystem dfs = (DistributedFileSystem) dfsCluster.getFileSystem(); dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); @@ -109,17 +109,17 @@ public void testSafemodeBringsDownMaster() throws Exception { int index = -1; do { index = UTIL.getMiniHBaseCluster().getServerWithMeta(); - } while (index == -1 && - startTime + timeOut < EnvironmentEdgeManager.currentTime()); + } while (index == -1 && startTime + timeOut < EnvironmentEdgeManager.currentTime()); - if (index != -1){ + if (index != -1) { UTIL.getMiniHBaseCluster().abortRegionServer(index); UTIL.getMiniHBaseCluster().waitOnRegionServer(index); } UTIL.waitFor(timeOut, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - List threads = UTIL.getMiniHBaseCluster().getLiveMasterThreads(); + List threads = + UTIL.getMiniHBaseCluster().getLiveMasterThreads(); return threads == null || threads.isEmpty(); } }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSchedulerQueueDeadLock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSchedulerQueueDeadLock.java index 20653cfc6174..a41bc10f8ca1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSchedulerQueueDeadLock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSchedulerQueueDeadLock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ public class TestSchedulerQueueDeadLock { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSchedulerQueueDeadLock.class); + HBaseClassTestRule.forClass(TestSchedulerQueueDeadLock.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -165,7 +165,7 @@ public void setUp() throws IOException { procStore.start(1); MasterProcedureScheduler scheduler = new MasterProcedureScheduler(pid -> null); procExec = new ProcedureExecutor<>(UTIL.getConfiguration(), new TestEnv(scheduler), procStore, - scheduler); + scheduler); procExec.init(1, false); } @@ -267,7 +267,7 @@ public void testTableProcedureSubProcedureDeadLock() throws Exception { UTIL.waitFor(10000, () -> procExec.getProcedures().stream().anyMatch(p -> p instanceof TableSharedProcedure)); procExec.getProcedures().stream().filter(p -> p instanceof TableSharedProcedure) - .map(p -> (TableSharedProcedure) p).forEach(p -> p.latch.release()); + .map(p -> (TableSharedProcedure) p).forEach(p -> p.latch.release()); ((TableExclusiveProcedure) procExec.getProcedure(procId2)).latch.release(); UTIL.waitFor(10000, () -> procExec.isFinished(procId1)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java index 82e57fcd865d..63b2acef6e90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerRemoteProcedure.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -140,7 +139,7 @@ public void testRemoteCompleteAndFailedAtTheSameTime() throws Exception { public void testRegionOpenProcedureIsNotHandledByDispatcher() throws Exception { TableName tableName = TableName.valueOf("testRegionOpenProcedureIsNotHandledByDisPatcher"); RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(1)) - .setEndKey(Bytes.toBytes(2)).setSplit(false).setRegionId(0).build(); + .setEndKey(Bytes.toBytes(2)).setSplit(false).setRegionId(0).build(); MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment(); env.getAssignmentManager().getRegionStates().getOrCreateRegionStateNode(hri); TransitRegionStateProcedure proc = TransitRegionStateProcedure.assign(env, hri, null); @@ -190,8 +189,8 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws } @Override - public Optional remoteCallBuild( - MasterProcedureEnv env, ServerName serverName) { + public Optional + remoteCallBuild(MasterProcedureEnv env, ServerName serverName) { return Optional .of(new RSProcedureDispatcher.ServerOperation(null, 0L, this.getClass(), new byte[0])); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedure.java index c74617b4c49f..9da7677a283e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedure.java @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Optional; import org.apache.hadoop.conf.Configuration; @@ -49,6 +49,7 @@ import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SnapshotState; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; @@ -59,7 +60,7 @@ public class TestSnapshotProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotProcedure.class); + HBaseClassTestRule.forClass(TestSnapshotProcedure.class); protected static HBaseTestingUtil TEST_UTIL; protected HMaster master; @@ -95,19 +96,18 @@ public void setup() throws Exception { TEST_UTIL.loadTable(table, CF, false); } - public > T waitProcedureRunnableAndGetFirst( - Class clazz, long timeout) throws IOException { - TEST_UTIL.waitFor(timeout, () -> master.getProcedures().stream() - .anyMatch(clazz::isInstance)); - Optional procOpt = master.getMasterProcedureExecutor().getProcedures().stream() - .filter(clazz::isInstance).map(clazz::cast).findFirst(); + public > T + waitProcedureRunnableAndGetFirst(Class clazz, long timeout) throws IOException { + TEST_UTIL.waitFor(timeout, () -> master.getProcedures().stream().anyMatch(clazz::isInstance)); + Optional procOpt = master.getMasterProcedureExecutor().getProcedures().stream() + .filter(clazz::isInstance).map(clazz::cast).findFirst(); assertTrue(procOpt.isPresent()); return procOpt.get(); } - protected SnapshotProcedure getDelayedOnSpecificStateSnapshotProcedure( - SnapshotProcedure sp, MasterProcedureEnv env, SnapshotState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + protected SnapshotProcedure getDelayedOnSpecificStateSnapshotProcedure(SnapshotProcedure sp, + MasterProcedureEnv env, SnapshotState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { SnapshotProcedure spySp = Mockito.spy(sp); Mockito.doAnswer(new AnswersWithDelay(60000, new Answer() { @Override @@ -121,8 +121,8 @@ public Object answer(InvocationOnMock invocation) throws Throwable { @After public void teardown() throws Exception { if (this.master != null) { - ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate( - master.getMasterProcedureExecutor(), false); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(master.getMasterProcedureExecutor(), + false); } TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureBasicSnapshot.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureBasicSnapshot.java index 8e5c0f8e5c25..eb66298777af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureBasicSnapshot.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureBasicSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import static org.junit.Assert.fail; @@ -36,7 +35,7 @@ public class TestSnapshotProcedureBasicSnapshot extends TestSnapshotProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotProcedureBasicSnapshot.class); + HBaseClassTestRule.forClass(TestSnapshotProcedureBasicSnapshot.class); @Test public void testSimpleSnapshotTable() throws Exception { @@ -62,7 +61,7 @@ public void run() { Thread.sleep(1000); // we don't allow different snapshot with same name SnapshotDescription snapshotWithSameName = - new SnapshotDescription(SNAPSHOT_NAME, TABLE_NAME, SnapshotType.SKIPFLUSH); + new SnapshotDescription(SNAPSHOT_NAME, TABLE_NAME, SnapshotType.SKIPFLUSH); TEST_UTIL.getAdmin().snapshot(snapshotWithSameName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureConcurrently.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureConcurrently.java index 4f2be1f116fc..371f2ec94d9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureConcurrently.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureConcurrently.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import static org.junit.Assert.assertEquals; @@ -50,29 +49,31 @@ public class TestSnapshotProcedureConcurrently extends TestSnapshotProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotProcedureConcurrently.class); + HBaseClassTestRule.forClass(TestSnapshotProcedureConcurrently.class); @Test public void testRunningTwoSnapshotProcedureOnSameTable() throws Exception { String newSnapshotName = SNAPSHOT_NAME + "_2"; SnapshotProtos.SnapshotDescription snapshotProto2 = SnapshotProtos.SnapshotDescription - .newBuilder(snapshotProto).setName(newSnapshotName).build(); + .newBuilder(snapshotProto).setName(newSnapshotName).build(); ProcedureExecutor procExec = master.getMasterProcedureExecutor(); MasterProcedureEnv env = procExec.getEnvironment(); SnapshotProcedure sp1 = new SnapshotProcedure(env, snapshotProto); SnapshotProcedure sp2 = new SnapshotProcedure(env, snapshotProto2); - SnapshotProcedure spySp1 = getDelayedOnSpecificStateSnapshotProcedure(sp1, - procExec.getEnvironment(), MasterProcedureProtos.SnapshotState.SNAPSHOT_SNAPSHOT_ONLINE_REGIONS); - SnapshotProcedure spySp2 = getDelayedOnSpecificStateSnapshotProcedure(sp2, - procExec.getEnvironment(), MasterProcedureProtos.SnapshotState.SNAPSHOT_SNAPSHOT_ONLINE_REGIONS); + SnapshotProcedure spySp1 = + getDelayedOnSpecificStateSnapshotProcedure(sp1, procExec.getEnvironment(), + MasterProcedureProtos.SnapshotState.SNAPSHOT_SNAPSHOT_ONLINE_REGIONS); + SnapshotProcedure spySp2 = + getDelayedOnSpecificStateSnapshotProcedure(sp2, procExec.getEnvironment(), + MasterProcedureProtos.SnapshotState.SNAPSHOT_SNAPSHOT_ONLINE_REGIONS); long procId1 = procExec.submitProcedure(spySp1); long procId2 = procExec.submitProcedure(spySp2); - TEST_UTIL.waitFor(2000, () -> env.getMasterServices().getProcedures() - .stream().map(Procedure::getProcId).collect(Collectors.toList()) - .containsAll(Arrays.asList(procId1, procId2))); + TEST_UTIL.waitFor(2000, + () -> env.getMasterServices().getProcedures().stream().map(Procedure::getProcId) + .collect(Collectors.toList()).containsAll(Arrays.asList(procId1, procId2))); assertFalse(procExec.isFinished(procId1)); assertFalse(procExec.isFinished(procId2)); @@ -81,7 +82,7 @@ public void testRunningTwoSnapshotProcedureOnSameTable() throws Exception { ProcedureTestingUtility.waitProcedure(master.getMasterProcedureExecutor(), procId2); List snapshots = - master.getSnapshotManager().getCompletedSnapshots(); + master.getSnapshotManager().getCompletedSnapshots(); assertEquals(2, snapshots.size()); snapshots.sort(Comparator.comparing(SnapshotProtos.SnapshotDescription::getName)); assertEquals(SNAPSHOT_NAME, snapshots.get(0).getName()); @@ -108,14 +109,14 @@ public void run() { Thread.sleep(1000); SnapshotManager sm = master.getSnapshotManager(); - TEST_UTIL.waitFor(2000, 50, () -> !sm.isTakingSnapshot(TABLE_NAME) - && sm.isTableTakingAnySnapshot(TABLE_NAME)); + TEST_UTIL.waitFor(2000, 50, + () -> !sm.isTakingSnapshot(TABLE_NAME) && sm.isTableTakingAnySnapshot(TABLE_NAME)); TEST_UTIL.getConfiguration().setBoolean("hbase.snapshot.zk.coordinated", true); SnapshotDescription snapshotOnSameTable = - new SnapshotDescription(newSnapshotName, TABLE_NAME, SnapshotType.SKIPFLUSH); - SnapshotProtos.SnapshotDescription snapshotOnSameTableProto = ProtobufUtil - .createHBaseProtosSnapshotDesc(snapshotOnSameTable); + new SnapshotDescription(newSnapshotName, TABLE_NAME, SnapshotType.SKIPFLUSH); + SnapshotProtos.SnapshotDescription snapshotOnSameTableProto = + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotOnSameTable); Thread second = new Thread("zk-snapshot") { @Override public void run() { @@ -130,8 +131,8 @@ public void run() { second.start(); TEST_UTIL.waitFor(2000, () -> sm.isTakingSnapshot(TABLE_NAME)); - TEST_UTIL.waitFor(60000, () -> sm.isSnapshotDone(snapshotOnSameTableProto) - && !sm.isTakingAnySnapshot()); + TEST_UTIL.waitFor(60000, + () -> sm.isSnapshotDone(snapshotOnSameTableProto) && !sm.isTakingAnySnapshot()); SnapshotTestingUtils.confirmSnapshotValid(TEST_UTIL, snapshotProto, TABLE_NAME, CF); SnapshotTestingUtils.confirmSnapshotValid(TEST_UTIL, snapshotOnSameTableProto, TABLE_NAME, CF); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureMasterRestarts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureMasterRestarts.java index b1551fedeb70..1ca866d19716 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureMasterRestarts.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureMasterRestarts.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import static org.junit.Assert.assertEquals; @@ -44,7 +43,7 @@ public class TestSnapshotProcedureMasterRestarts extends TestSnapshotProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotProcedureMasterRestarts.class); + HBaseClassTestRule.forClass(TestSnapshotProcedureMasterRestarts.class); @Test public void testMasterRestarts() throws Exception { @@ -56,8 +55,8 @@ public void testMasterRestarts() throws Exception { long procId = procExec.submitProcedure(spySp); - TEST_UTIL.waitFor(2000, () -> env.getMasterServices().getProcedures() - .stream().map(Procedure::getProcId).collect(Collectors.toList()).contains(procId)); + TEST_UTIL.waitFor(2000, () -> env.getMasterServices().getProcedures().stream() + .map(Procedure::getProcId).collect(Collectors.toList()).contains(procId)); TEST_UTIL.getHBaseCluster().killMaster(master.getServerName()); TEST_UTIL.getHBaseCluster().waitForMasterToStop(master.getServerName(), 30000); TEST_UTIL.getHBaseCluster().startMaster(); @@ -67,11 +66,9 @@ public void testMasterRestarts() throws Exception { assertTrue(master.getSnapshotManager().isTakingAnySnapshot()); assertTrue(master.getSnapshotManager().isTableTakingAnySnapshot(TABLE_NAME)); - List unfinishedProcedures = master - .getMasterProcedureExecutor().getProcedures().stream() - .filter(p -> p instanceof SnapshotProcedure) - .filter(p -> !p.isFinished()).map(p -> (SnapshotProcedure) p) - .collect(Collectors.toList()); + List unfinishedProcedures = master.getMasterProcedureExecutor() + .getProcedures().stream().filter(p -> p instanceof SnapshotProcedure) + .filter(p -> !p.isFinished()).map(p -> (SnapshotProcedure) p).collect(Collectors.toList()); assertEquals(unfinishedProcedures.size(), 1); long newProcId = unfinishedProcedures.get(0).getProcId(); assertEquals(procId, newProcId); @@ -79,8 +76,8 @@ public void testMasterRestarts() throws Exception { ProcedureTestingUtility.waitProcedure(master.getMasterProcedureExecutor(), newProcId); assertFalse(master.getSnapshotManager().isTableTakingAnySnapshot(TABLE_NAME)); - List snapshots - = master.getSnapshotManager().getCompletedSnapshots(); + List snapshots = + master.getSnapshotManager().getCompletedSnapshots(); assertEquals(1, snapshots.size()); assertEquals(SNAPSHOT_NAME, snapshots.get(0).getName()); assertEquals(TABLE_NAME, TableName.valueOf(snapshots.get(0).getTable())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureRIT.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureRIT.java index 73f913e791f9..e94fb6bf0676 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureRIT.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureRIT.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import java.util.List; @@ -39,23 +38,23 @@ public class TestSnapshotProcedureRIT extends TestSnapshotProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotProcedureRIT.class); + HBaseClassTestRule.forClass(TestSnapshotProcedureRIT.class); @Test public void testTableInMergeWhileTakingSnapshot() throws Exception { ProcedureExecutor procExec = master.getMasterProcedureExecutor(); List regions = master.getAssignmentManager().getTableRegions(TABLE_NAME, true) - .stream().sorted(RegionInfo.COMPARATOR).collect(Collectors.toList()); - MergeTableRegionsProcedure mergeProc = new MergeTableRegionsProcedure( - procExec.getEnvironment(), new RegionInfo[] {regions.get(0), regions.get(1)}, false); + .stream().sorted(RegionInfo.COMPARATOR).collect(Collectors.toList()); + MergeTableRegionsProcedure mergeProc = new MergeTableRegionsProcedure(procExec.getEnvironment(), + new RegionInfo[] { regions.get(0), regions.get(1) }, false); long mergeProcId = procExec.submitProcedure(mergeProc); // wait until merge region procedure running - TEST_UTIL.waitFor(10000, () -> - procExec.getProcedure(mergeProcId).getState() == ProcedureState.RUNNABLE); + TEST_UTIL.waitFor(10000, + () -> procExec.getProcedure(mergeProcId).getState() == ProcedureState.RUNNABLE); SnapshotProcedure sp = new SnapshotProcedure(procExec.getEnvironment(), snapshotProto); long snapshotProcId = procExec.submitProcedure(sp); - TEST_UTIL.waitFor(2000, 1000, () -> procExec.getProcedure(snapshotProcId) != null && - procExec.getProcedure(snapshotProcId).getState() == ProcedureState.WAITING_TIMEOUT); + TEST_UTIL.waitFor(2000, 1000, () -> procExec.getProcedure(snapshotProcId) != null + && procExec.getProcedure(snapshotProcId).getState() == ProcedureState.WAITING_TIMEOUT); ProcedureTestingUtility.waitProcedure(procExec, snapshotProcId); SnapshotTestingUtils.confirmSnapshotValid(TEST_UTIL, snapshotProto, TABLE_NAME, CF); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureRSCrashes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureRSCrashes.java index 084c007c8447..5751230258e3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureRSCrashes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureRSCrashes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -35,7 +34,7 @@ public class TestSnapshotProcedureRSCrashes extends TestSnapshotProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotProcedureRSCrashes.class); + HBaseClassTestRule.forClass(TestSnapshotProcedureRSCrashes.class); @Test public void testRegionServerCrashWhileTakingSnapshot() throws Exception { @@ -44,10 +43,10 @@ public void testRegionServerCrashWhileTakingSnapshot() throws Exception { SnapshotProcedure sp = new SnapshotProcedure(env, snapshotProto); long procId = procExec.submitProcedure(sp); - SnapshotRegionProcedure snp = waitProcedureRunnableAndGetFirst( - SnapshotRegionProcedure.class, 60000); + SnapshotRegionProcedure snp = + waitProcedureRunnableAndGetFirst(SnapshotRegionProcedure.class, 60000); ServerName targetServer = env.getAssignmentManager().getRegionStates() - .getRegionStateNode(snp.getRegion()).getRegionLocation(); + .getRegionStateNode(snp.getRegion()).getRegionLocation(); TEST_UTIL.getHBaseCluster().killRegionServer(targetServer); TEST_UTIL.waitFor(60000, () -> snp.inRetrying()); @@ -64,15 +63,15 @@ public void testRegionServerCrashWhileVerifyingSnapshot() throws Exception { SnapshotProcedure sp = new SnapshotProcedure(env, snapshotProto); long procId = procExec.submitProcedure(sp); - SnapshotVerifyProcedure svp = waitProcedureRunnableAndGetFirst( - SnapshotVerifyProcedure.class, 60000); + SnapshotVerifyProcedure svp = + waitProcedureRunnableAndGetFirst(SnapshotVerifyProcedure.class, 60000); TEST_UTIL.waitFor(10000, () -> svp.getServerName() != null); ServerName previousTargetServer = svp.getServerName(); HRegionServer rs = TEST_UTIL.getHBaseCluster().getRegionServer(previousTargetServer); TEST_UTIL.getHBaseCluster().killRegionServer(rs.getServerName()); - TEST_UTIL.waitFor(60000, () -> svp.getServerName() != null - && !svp.getServerName().equals(previousTargetServer)); + TEST_UTIL.waitFor(60000, + () -> svp.getServerName() != null && !svp.getServerName().equals(previousTargetServer)); ProcedureTestingUtility.waitProcedure(procExec, procId); SnapshotTestingUtils.assertOneSnapshotThatMatches(TEST_UTIL.getAdmin(), snapshotProto); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureSnapshotCorrupted.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureSnapshotCorrupted.java index f177a44e99d9..26ae113ad1fa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureSnapshotCorrupted.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureSnapshotCorrupted.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import static org.junit.Assert.assertFalse; @@ -43,34 +42,32 @@ public class TestSnapshotProcedureSnapshotCorrupted extends TestSnapshotProcedur @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotProcedureSnapshotCorrupted.class); + HBaseClassTestRule.forClass(TestSnapshotProcedureSnapshotCorrupted.class); @Test public void testSnapshotCorruptedAndRollback() throws Exception { ProcedureExecutor procExec = master.getMasterProcedureExecutor(); SnapshotProcedure sp = new SnapshotProcedure(procExec.getEnvironment(), snapshotProto); procExec.submitProcedure(sp); - TEST_UTIL.waitFor(60000, 500, () -> sp.getCurrentStateId() > - MasterProcedureProtos.SnapshotState.SNAPSHOT_CONSOLIDATE_SNAPSHOT_VALUE); + TEST_UTIL.waitFor(60000, 500, () -> sp + .getCurrentStateId() > MasterProcedureProtos.SnapshotState.SNAPSHOT_CONSOLIDATE_SNAPSHOT_VALUE); DistributedFileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); Optional region = TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME).stream() - .filter(r -> !r.getStoreFileList(new byte[][] { CF }).isEmpty()) - .findFirst(); + .filter(r -> !r.getStoreFileList(new byte[][] { CF }).isEmpty()).findFirst(); assertTrue(region.isPresent()); region.get().getStoreFileList(new byte[][] { CF }).forEach(s -> { - try { - // delete real data files to trigger the CorruptedSnapshotException - dfs.delete(new Path(s), true); - LOG.info("delete {} to make snapshot corrupt", s); - } catch (Exception e) { - LOG.warn("Failed delete {} to make snapshot corrupt", s, e); - } + try { + // delete real data files to trigger the CorruptedSnapshotException + dfs.delete(new Path(s), true); + LOG.info("delete {} to make snapshot corrupt", s); + } catch (Exception e) { + LOG.warn("Failed delete {} to make snapshot corrupt", s, e); } - ); + }); TEST_UTIL.waitFor(60000, () -> sp.isFailed() && sp.isFinished()); Configuration conf = master.getConfiguration(); - Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir( - snapshotProto, CommonFSUtils.getRootDir(conf), conf); + Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshotProto, + CommonFSUtils.getRootDir(conf), conf); assertFalse(dfs.exists(workingDir)); assertFalse(master.getSnapshotManager().isTakingSnapshot(TABLE_NAME)); assertFalse(master.getSnapshotManager().isTakingAnySnapshot()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotRegionProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotRegionProcedure.java index a6f6ed756513..8ac8b9552658 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotRegionProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotRegionProcedure.java @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -51,6 +51,7 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; @@ -60,7 +61,7 @@ public class TestSnapshotRegionProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotRegionProcedure.class); + HBaseClassTestRule.forClass(TestSnapshotRegionProcedure.class); private static HBaseTestingUtil TEST_UTIL; private HMaster master; @@ -86,7 +87,7 @@ public void setup() throws Exception { byte[] cf = Bytes.toBytes("cf"); String SNAPSHOT_NAME = "SnapshotRegionProcedureTest"; SnapshotDescription snapshot = - new SnapshotDescription(SNAPSHOT_NAME, tableName, SnapshotType.FLUSH); + new SnapshotDescription(SNAPSHOT_NAME, tableName, SnapshotType.FLUSH); snapshotProto = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); snapshotProto = SnapshotDescriptionUtils.validate(snapshotProto, master.getConfiguration()); final byte[][] splitKeys = new RegionSplitter.HexStringSplit().split(10); @@ -111,7 +112,7 @@ private boolean assertRegionManifestGenerated(RegionInfo region) throws Exceptio public void testSimpleSnapshotRegion() throws Exception { ProcedureExecutor procExec = master.getMasterProcedureExecutor(); List> regions = - master.getAssignmentManager().getTableRegionsAndLocations(tableName, true); + master.getAssignmentManager().getTableRegionsAndLocations(tableName, true); assertEquals(10, regions.size()); Pair region = regions.get(0); SnapshotRegionProcedure srp = new SnapshotRegionProcedure(snapshotProto, region.getFirst()); @@ -124,14 +125,14 @@ public void testSimpleSnapshotRegion() throws Exception { public void testRegionServerCrashWhileTakingSnapshotRegion() throws Exception { ProcedureExecutor procExec = master.getMasterProcedureExecutor(); List> regions = - master.getAssignmentManager().getTableRegionsAndLocations(tableName, true); + master.getAssignmentManager().getTableRegionsAndLocations(tableName, true); assertEquals(10, regions.size()); Pair pair = regions.get(0); SnapshotRegionProcedure srp = new SnapshotRegionProcedure(snapshotProto, pair.getFirst()); long procId = procExec.submitProcedure(srp); TEST_UTIL.getHBaseCluster().killRegionServer(pair.getSecond()); TEST_UTIL.waitFor(60000, () -> !pair.getSecond().equals(master.getAssignmentManager() - .getRegionStates().getRegionStateNode(pair.getFirst()).getRegionLocation())); + .getRegionStates().getRegionStateNode(pair.getFirst()).getRegionLocation())); TEST_UTIL.waitFor(60000, () -> srp.inRetrying()); ProcedureTestingUtility.waitProcedure(procExec, procId); assertTrue(assertRegionManifestGenerated(pair.getFirst())); @@ -140,8 +141,8 @@ public void testRegionServerCrashWhileTakingSnapshotRegion() throws Exception { @After public void teardown() throws Exception { if (this.master != null) { - ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate( - master.getMasterProcedureExecutor(), false); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(master.getMasterProcedureExecutor(), + false); } TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotVerifyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotVerifyProcedure.java index 0e2bdcc88f2b..5cb757d0ff11 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotVerifyProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotVerifyProcedure.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; @@ -53,6 +52,7 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; @@ -62,15 +62,15 @@ public class TestSnapshotVerifyProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotVerifyProcedure.class); + HBaseClassTestRule.forClass(TestSnapshotVerifyProcedure.class); private HBaseTestingUtil TEST_UTIL; private final TableName tableName = TableName.valueOf("TestRSSnapshotVerifier"); private final byte[] cf = Bytes.toBytes("cf"); private final SnapshotDescription snapshot = - new SnapshotDescription("test-snapshot", tableName, SnapshotType.FLUSH); + new SnapshotDescription("test-snapshot", tableName, SnapshotType.FLUSH); private SnapshotProtos.SnapshotDescription snapshotProto = - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); @Before public void setup() throws Exception { @@ -94,31 +94,30 @@ public void setup() throws Exception { workingDirFs.mkdirs(workingDir); } ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(snapshot.getName()); - SnapshotManifest manifest = SnapshotManifest - .create(conf, workingDirFs, workingDir, snapshotProto, monitor); - manifest.addTableDescriptor(TEST_UTIL.getHBaseCluster() - .getMaster().getTableDescriptors().get(tableName)); + SnapshotManifest manifest = + SnapshotManifest.create(conf, workingDirFs, workingDir, snapshotProto, monitor); + manifest.addTableDescriptor( + TEST_UTIL.getHBaseCluster().getMaster().getTableDescriptors().get(tableName)); SnapshotDescriptionUtils.writeSnapshotInfo(snapshotProto, workingDir, workingDirFs); - TEST_UTIL.getHBaseCluster() - .getRegions(tableName).forEach(r -> { - try { - r.addRegionToSnapshot(snapshotProto, monitor); - } catch (IOException e) { - LOG.warn("Failed snapshot region {}", r.getRegionInfo()); - } - }); + TEST_UTIL.getHBaseCluster().getRegions(tableName).forEach(r -> { + try { + r.addRegionToSnapshot(snapshotProto, monitor); + } catch (IOException e) { + LOG.warn("Failed snapshot region {}", r.getRegionInfo()); + } + }); manifest.consolidate(); } @Test public void testSimpleVerify() throws Exception { - Optional regionOpt = TEST_UTIL.getHBaseCluster().getRegions(tableName) - .stream().filter(r -> !r.getStore(cf).getStorefiles().isEmpty()).findFirst(); + Optional regionOpt = TEST_UTIL.getHBaseCluster().getRegions(tableName).stream() + .filter(r -> !r.getStore(cf).getStorefiles().isEmpty()).findFirst(); Assert.assertTrue(regionOpt.isPresent()); HRegion region = regionOpt.get(); SnapshotVerifyProcedure p1 = new SnapshotVerifyProcedure(snapshotProto, region.getRegionInfo()); ProcedureExecutor procExec = - TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); long procId = procExec.submitProcedure(p1); ProcedureTestingUtility.waitProcedure(procExec, procId); Assert.assertTrue(p1.isSuccess()); @@ -152,8 +151,8 @@ public void testRestartMaster() throws Exception { // restore used worker master = TEST_UTIL.getHBaseCluster().getMaster(); - SnapshotVerifyProcedure svp2 = master.getMasterProcedureExecutor() - .getProcedure(SnapshotVerifyProcedure.class, procId); + SnapshotVerifyProcedure svp2 = + master.getMasterProcedureExecutor().getProcedure(SnapshotVerifyProcedure.class, procId); Assert.assertNotNull(svp2); Assert.assertFalse(svp2.isFinished()); Assert.assertNotNull(svp2.getServerName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java index efdce89b7d20..eea61a020086 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.List; import java.util.Optional; - import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -72,8 +71,8 @@ public void setup() throws Exception { @After public void teardown() throws Exception { if (this.master != null) { - ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate( - master.getMasterProcedureExecutor(), false); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(master.getMasterProcedureExecutor(), + false); } TEST_UTIL.shutdownMiniCluster(); } @@ -114,7 +113,7 @@ public void testMasterRestart() throws Exception { SplitWALProcedure splitWALProcedure = new SplitWALProcedure(wals.get(0).getPath().toString(), testServer.getServerName()); long pid = ProcedureTestingUtility.submitProcedure(master.getMasterProcedureExecutor(), - splitWALProcedure, HConstants.NO_NONCE, HConstants.NO_NONCE); + splitWALProcedure, HConstants.NO_NONCE, HConstants.NO_NONCE); TEST_UTIL.waitFor(5000, () -> splitWALProcedure.getWorker() != null); // Kill master TEST_UTIL.getHBaseCluster().killMaster(master.getServerName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java index 8327e7dd8daa..b474a3df6047 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import static org.junit.Assert.assertTrue; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java index b6fe4374a518..900bb95be16f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,17 +49,18 @@ import org.junit.rules.TestName; /** - * Verify that the HTableDescriptor is updated after - * addColumn(), deleteColumn() and modifyTable() operations. + * Verify that the HTableDescriptor is updated after addColumn(), deleteColumn() and modifyTable() + * operations. */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestTableDescriptorModificationFromClient { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestTableDescriptorModificationFromClient.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static TableName TABLE_NAME = null; private static final byte[] FAMILY_0 = Bytes.toBytes("cf0"); @@ -67,7 +68,6 @@ public class TestTableDescriptorModificationFromClient { /** * Start up a mini cluster and put a small table of empty regions into it. - * * @throws Exception */ @BeforeClass @@ -91,7 +91,7 @@ public void testModifyTable() throws IOException { Admin admin = TEST_UTIL.getAdmin(); // Create a table with one family TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)).build(); admin.createTable(tableDescriptor); admin.disableTable(TABLE_NAME); try { @@ -100,8 +100,8 @@ public void testModifyTable() throws IOException { // Modify the table adding another family and verify the descriptor TableDescriptor modifiedtableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1)).build(); admin.modifyTable(modifiedtableDescriptor); verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); } finally { @@ -114,7 +114,7 @@ public void testAddColumn() throws IOException { Admin admin = TEST_UTIL.getAdmin(); // Create a table with two families TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)).build(); admin.createTable(tableDescriptor); admin.disableTable(TABLE_NAME); try { @@ -134,7 +134,7 @@ public void testAddSameColumnFamilyTwice() throws IOException { Admin admin = TEST_UTIL.getAdmin(); // Create a table with one families TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)).build(); admin.createTable(tableDescriptor); admin.disableTable(TABLE_NAME); try { @@ -166,7 +166,7 @@ public void testModifyColumnFamily() throws IOException { int blockSize = cfDescriptor.getBlocksize(); // Create a table with one families TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(cfDescriptor).build(); + TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(cfDescriptor).build(); admin.createTable(tableDescriptor); admin.disableTable(TABLE_NAME); try { @@ -175,7 +175,7 @@ public void testModifyColumnFamily() throws IOException { int newBlockSize = 2 * blockSize; cfDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(cfDescriptor).setBlocksize(newBlockSize).build(); + ColumnFamilyDescriptorBuilder.newBuilder(cfDescriptor).setBlocksize(newBlockSize).build(); // Modify colymn family admin.modifyColumnFamily(TABLE_NAME, cfDescriptor); @@ -196,7 +196,7 @@ public void testModifyNonExistingColumnFamily() throws IOException { int blockSize = cfDescriptor.getBlocksize(); // Create a table with one families TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)).build(); admin.createTable(tableDescriptor); admin.disableTable(TABLE_NAME); try { @@ -205,7 +205,7 @@ public void testModifyNonExistingColumnFamily() throws IOException { int newBlockSize = 2 * blockSize; cfDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(cfDescriptor).setBlocksize(newBlockSize).build(); + ColumnFamilyDescriptorBuilder.newBuilder(cfDescriptor).setBlocksize(newBlockSize).build(); // Modify a column family that is not in the table. try { @@ -225,8 +225,8 @@ public void testDeleteColumn() throws IOException { Admin admin = TEST_UTIL.getAdmin(); // Create a table with two families TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1)).build(); admin.createTable(tableDescriptor); admin.disableTable(TABLE_NAME); try { @@ -246,8 +246,8 @@ public void testDeleteSameColumnFamilyTwice() throws IOException { Admin admin = TEST_UTIL.getAdmin(); // Create a table with two families TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1)).build(); admin.createTable(tableDescriptor); admin.disableTable(TABLE_NAME); try { @@ -281,17 +281,16 @@ private void verifyTableDescriptor(final TableName tableName, final byte[]... fa // Verify descriptor from HDFS MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), tableName); - TableDescriptor td = - FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); + TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); verifyTableDescriptor(td, tableName, families); } - private void verifyTableDescriptor(final TableDescriptor htd, - final TableName tableName, final byte[]... families) { + private void verifyTableDescriptor(final TableDescriptor htd, final TableName tableName, + final byte[]... families) { Set htdFamilies = htd.getColumnFamilyNames(); assertEquals(tableName, htd.getTableName()); assertEquals(families.length, htdFamilies.size()); - for (byte[] familyName: families) { + for (byte[] familyName : families) { assertTrue("Expected family " + Bytes.toString(familyName), htdFamilies.contains(familyName)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java index 95468d413750..99a3f797eca7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ public class TestTruncateTableProcedure extends TestTableDDLProcedureBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTruncateTableProcedure.class); + HBaseClassTestRule.forClass(TestTruncateTableProcedure.class); private static final Logger LOG = LoggerFactory.getLogger(TestTruncateTableProcedure.class); @@ -78,7 +78,7 @@ public void testTruncateNotExistentTable() throws Exception { Throwable cause = null; try { long procId = ProcedureTestingUtility.submitAndWait(procExec, - new TruncateTableProcedure(procExec.getEnvironment(), tableName, true)); + new TruncateTableProcedure(procExec.getEnvironment(), tableName, true)); // Second delete should fail with TableNotFound Procedure result = procExec.getResult(procId); @@ -103,7 +103,7 @@ public void testTruncateNotDisabledTable() throws Exception { Throwable cause = null; try { long procId = ProcedureTestingUtility.submitAndWait(procExec, - new TruncateTableProcedure(procExec.getEnvironment(), tableName, false)); + new TruncateTableProcedure(procExec.getEnvironment(), tableName, false)); // Second delete should fail with TableNotDisabled Procedure result = procExec.getResult(procId); @@ -131,15 +131,14 @@ public void testSimpleTruncateNoPreserveSplits() throws Exception { private void testSimpleTruncate(final TableName tableName, final boolean preserveSplits) throws Exception { final String[] families = new String[] { "f1", "f2" }; - final byte[][] splitKeys = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") - }; + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - getMasterProcedureExecutor(), tableName, splitKeys, families); + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), + tableName, splitKeys, families); // load and verify that there are rows in the table - MasterProcedureTestingUtility.loadData( - UTIL.getConnection(), tableName, 100, splitKeys, families); + MasterProcedureTestingUtility.loadData(UTIL.getConnection(), tableName, 100, splitKeys, + families); assertEquals(100, UTIL.countRows(tableName)); // disable the table UTIL.getAdmin().disableTable(tableName); @@ -161,15 +160,15 @@ private void testSimpleTruncate(final TableName tableName, final boolean preserv } else { assertEquals(1, regions.length); } - MasterProcedureTestingUtility.validateTableCreation( - UTIL.getHBaseCluster().getMaster(), tableName, regions, families); + MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + tableName, regions, families); // verify that there are no rows in the table assertEquals(0, UTIL.countRows(tableName)); // verify that the table is read/writable - MasterProcedureTestingUtility.loadData( - UTIL.getConnection(), tableName, 50, splitKeys, families); + MasterProcedureTestingUtility.loadData(UTIL.getConnection(), tableName, 50, splitKeys, + families); assertEquals(50, UTIL.countRows(tableName)); } @@ -190,14 +189,13 @@ private void testRecoveryAndDoubleExecution(final TableName tableName, final String[] families = new String[] { "f1", "f2" }; // create the table - final byte[][] splitKeys = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") - }; - RegionInfo[] regions = MasterProcedureTestingUtility.createTable( - getMasterProcedureExecutor(), tableName, splitKeys, families); + final byte[][] splitKeys = + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; + RegionInfo[] regions = MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), + tableName, splitKeys, families); // load and verify that there are rows in the table - MasterProcedureTestingUtility.loadData( - UTIL.getConnection(), tableName, 100, splitKeys, families); + MasterProcedureTestingUtility.loadData(UTIL.getConnection(), tableName, 100, splitKeys, + families); assertEquals(100, UTIL.countRows(tableName)); // disable the table UTIL.getAdmin().disableTable(tableName); @@ -224,15 +222,15 @@ private void testRecoveryAndDoubleExecution(final TableName tableName, } else { assertEquals(1, regions.length); } - MasterProcedureTestingUtility.validateTableCreation( - UTIL.getHBaseCluster().getMaster(), tableName, regions, families); + MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + tableName, regions, families); // verify that there are no rows in the table assertEquals(0, UTIL.countRows(tableName)); // verify that the table is read/writable - MasterProcedureTestingUtility.loadData( - UTIL.getConnection(), tableName, 50, splitKeys, families); + MasterProcedureTestingUtility.loadData(UTIL.getConnection(), tableName, 50, splitKeys, + families); assertEquals(50, UTIL.countRows(tableName)); } @@ -258,17 +256,16 @@ public TruncateTableProcedureOnHDFSFailure() { } public TruncateTableProcedureOnHDFSFailure(final MasterProcedureEnv env, TableName tableName, - boolean preserveSplits) - throws HBaseIOException { + boolean preserveSplits) throws HBaseIOException { super(env, tableName, preserveSplits); } @Override protected Flow executeFromState(MasterProcedureEnv env, - MasterProcedureProtos.TruncateTableState state) throws InterruptedException { + MasterProcedureProtos.TruncateTableState state) throws InterruptedException { - if (!failOnce && - state == MasterProcedureProtos.TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT) { + if (!failOnce + && state == MasterProcedureProtos.TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT) { try { // To emulate an HDFS failure, create only the first region directory RegionInfo regionInfo = getFirstRegionInfo(); @@ -293,17 +290,16 @@ protected Flow executeFromState(MasterProcedureEnv env, private void testOnHDFSFailure(TableName tableName, boolean preserveSplits) throws Exception { String[] families = new String[] { "f1", "f2" }; - byte[][] splitKeys = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") - }; + byte[][] splitKeys = + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; // create a table - MasterProcedureTestingUtility.createTable( - getMasterProcedureExecutor(), tableName, splitKeys, families); + MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), tableName, splitKeys, + families); // load and verify that there are rows in the table - MasterProcedureTestingUtility.loadData( - UTIL.getConnection(), tableName, 100, splitKeys, families); + MasterProcedureTestingUtility.loadData(UTIL.getConnection(), tableName, 100, splitKeys, + families); assertEquals(100, UTIL.countRows(tableName)); // disable the table @@ -311,9 +307,9 @@ private void testOnHDFSFailure(TableName tableName, boolean preserveSplits) thro // truncate the table final ProcedureExecutor procExec = getMasterProcedureExecutor(); - long procId = ProcedureTestingUtility.submitAndWait(procExec, - new TruncateTableProcedureOnHDFSFailure(procExec.getEnvironment(), tableName, - preserveSplits)); + long procId = + ProcedureTestingUtility.submitAndWait(procExec, new TruncateTableProcedureOnHDFSFailure( + procExec.getEnvironment(), tableName, preserveSplits)); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); } @@ -321,7 +317,7 @@ private void testOnHDFSFailure(TableName tableName, boolean preserveSplits) thro public void testTruncateWithPreserveAfterSplit() throws Exception { String[] families = new String[] { "f1", "f2" }; byte[][] splitKeys = - new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; TableName tableName = TableName.valueOf(name.getMethodName()); RegionInfo[] regions = MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), tableName, splitKeys, families); @@ -332,15 +328,15 @@ public void testTruncateWithPreserveAfterSplit() throws Exception { public void testTruncatePreserveWithReplicaRegionAfterSplit() throws Exception { String[] families = new String[] { "f1", "f2" }; byte[][] splitKeys = - new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; TableName tableName = TableName.valueOf(name.getMethodName()); // create a table with region replications TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(3) - .setColumnFamilies(Arrays.stream(families) - .map(fam -> ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(fam)).build()) - .collect(Collectors.toList())) - .build(); + .setColumnFamilies(Arrays.stream(families) + .map(fam -> ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(fam)).build()) + .collect(Collectors.toList())) + .build(); RegionInfo[] regions = ModifyRegionUtils.createRegionInfos(htd, splitKeys); ProcedureExecutor procExec = getMasterProcedureExecutor(); long procId = ProcedureTestingUtility.submitAndWait(procExec, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableWithMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableWithMasterFailover.java index 4ee39c9f6641..11157f8b567b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableWithMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableWithMasterFailover.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ public class TestTruncateTableWithMasterFailover extends MasterFailoverWithProce @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTruncateTableWithMasterFailover.class); + HBaseClassTestRule.forClass(TestTruncateTableWithMasterFailover.class); // ========================================================================== // Test Truncate Table @@ -54,13 +54,13 @@ public void testTruncateWithFailover() throws Exception { } private void testTruncateWithFailoverAtStep(final boolean preserveSplits, final int step) - throws Exception { + throws Exception { final TableName tableName = TableName.valueOf("testTruncateWithFailoverAtStep" + step); // create the table final String[] families = new String[] { "f1", "f2" }; final byte[][] splitKeys = - new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; + new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") }; RegionInfo[] regions = MasterProcedureTestingUtility.createTable(getMasterProcedureExecutor(), tableName, splitKeys, families); // load and verify that there are rows in the table diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestWALProcedureStoreOnHDFS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestWALProcedureStoreOnHDFS.java index 816bdcdbd366..80607ce6948a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestWALProcedureStoreOnHDFS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestWALProcedureStoreOnHDFS.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestWALProcedureStoreOnHDFS { @ClassRule @@ -57,16 +57,18 @@ public class TestWALProcedureStoreOnHDFS { private WALProcedureStore store; - private ProcedureStore.ProcedureStoreListener stopProcedureListener = new ProcedureStore.ProcedureStoreListener() { - @Override - public void postSync() {} + private ProcedureStore.ProcedureStoreListener stopProcedureListener = + new ProcedureStore.ProcedureStoreListener() { + @Override + public void postSync() { + } - @Override - public void abortProcess() { - LOG.error(HBaseMarkers.FATAL, "Abort the Procedure Store"); - store.stop(true); - } - }; + @Override + public void abortProcess() { + LOG.error(HBaseMarkers.FATAL, "Abort the Procedure Store"); + store.stop(true); + } + }; @Before public void initConfig() { @@ -107,7 +109,7 @@ public void tearDown() throws Exception { } } - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testWalAbortOnLowReplication() throws Exception { setupDFS(); @@ -133,10 +135,13 @@ public void testWalAbortOnLowReplicationWithQueuedWriters() throws Exception { assertEquals(3, UTIL.getDFSCluster().getDataNodes().size()); store.registerListener(new ProcedureStore.ProcedureStoreListener() { @Override - public void postSync() { Threads.sleepWithoutInterrupt(2000); } + public void postSync() { + Threads.sleepWithoutInterrupt(2000); + } @Override - public void abortProcess() {} + public void abortProcess() { + } }); final AtomicInteger reCount = new AtomicInteger(0); @@ -166,8 +171,8 @@ public void abortProcess() {} } assertFalse(store.isRunning()); - assertTrue(reCount.toString(), reCount.get() >= store.getNumThreads() && - reCount.get() < thread.length); + assertTrue(reCount.toString(), + reCount.get() >= store.getNumThreads() && reCount.get() < thread.length); } @Test @@ -196,7 +201,7 @@ public void waitForNumReplicas(int numReplicas) throws Exception { } for (int i = 0; i < numReplicas; ++i) { - for (DataNode dn: UTIL.getDFSCluster().getDataNodes()) { + for (DataNode dn : UTIL.getDFSCluster().getDataNodes()) { while (!dn.isDatanodeFullyStarted()) { Thread.sleep(100); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java index 2c58c58a4505..963824ac5cc3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,9 +62,9 @@ public class MasterRegionTestBase { protected static String REGION_DIR_NAME = "local"; protected static TableDescriptor TD = - TableDescriptorBuilder.newBuilder(TableName.valueOf("test:local")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF1)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF2)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf("test:local")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF1)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF2)).build(); protected void configure(Configuration conf) throws IOException { } @@ -97,21 +97,21 @@ protected final void createMasterRegion() throws IOException { Server server = mock(Server.class); when(server.getConfiguration()).thenReturn(conf); when(server.getServerName()) - .thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())); + .thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())); when(server.getChoreService()).thenReturn(choreService); Path testDir = htu.getDataTestDir(); CommonFSUtils.setRootDir(conf, testDir); MasterRegionParams params = new MasterRegionParams(); - TableDescriptor td = TableDescriptorBuilder - .newBuilder(TD).setValue(StoreFileTrackerFactory.TRACKER_IMPL, conf - .get(StoreFileTrackerFactory.TRACKER_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name())) - .build(); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TD) + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, conf.get( + StoreFileTrackerFactory.TRACKER_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name())) + .build(); params.server(server).regionDirName(REGION_DIR_NAME).tableDescriptor(td) - .flushSize(TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE).flushPerChanges(1_000_000) - .flushIntervalMs(TimeUnit.MINUTES.toMillis(15)).compactMin(4).maxWals(32).useHsync(false) - .ringBufferSlotCount(16).rollPeriodMs(TimeUnit.MINUTES.toMillis(15)) - .archivedWalSuffix(MasterRegionFactory.ARCHIVED_WAL_SUFFIX) - .archivedHFileSuffix(MasterRegionFactory.ARCHIVED_HFILE_SUFFIX); + .flushSize(TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE).flushPerChanges(1_000_000) + .flushIntervalMs(TimeUnit.MINUTES.toMillis(15)).compactMin(4).maxWals(32).useHsync(false) + .ringBufferSlotCount(16).rollPeriodMs(TimeUnit.MINUTES.toMillis(15)) + .archivedWalSuffix(MasterRegionFactory.ARCHIVED_WAL_SUFFIX) + .archivedHFileSuffix(MasterRegionFactory.ARCHIVED_HFILE_SUFFIX); configure(params); region = MasterRegion.create(params); postSetUp(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestChangeSFTForMasterRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestChangeSFTForMasterRegion.java index 0b0eb47ea480..d042d60a3d34 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestChangeSFTForMasterRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestChangeSFTForMasterRegion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public class TestChangeSFTForMasterRegion { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestChangeSFTForMasterRegion.class); + HBaseClassTestRule.forClass(TestChangeSFTForMasterRegion.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -79,7 +79,7 @@ public void test() throws Exception { UTIL.waitTableAvailable(NAME); // confirm that we have changed the SFT to FILE TableDescriptor td = - UTIL.getMiniHBaseCluster().getMaster().getMasterRegion().region.getTableDescriptor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterRegion().region.getTableDescriptor(); assertEquals(StoreFileTrackerFactory.Trackers.FILE.name(), td.getValue(StoreFileTrackerFactory.TRACKER_IMPL)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java index 6759903608e8..c94ba5d8f038 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public class TestMasterRegionCompaction extends MasterRegionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterRegionCompaction.class); + HBaseClassTestRule.forClass(TestMasterRegionCompaction.class); private int compactMin = 4; @@ -86,7 +86,7 @@ private int getStorefilesCount() { } private void assertFileCount(FileSystem fs, Path storeArchiveDir, int expected) - throws IOException { + throws IOException { FileStatus[] compactedHFiles = fs.listStatus(storeArchiveDir); assertEquals(expected, compactedHFiles.length); } @@ -97,7 +97,7 @@ public void test() throws IOException, InterruptedException { final int index = i; region.update( r -> r.put(new Put(Bytes.toBytes(index)).addColumn(CF1, QUALIFIER, Bytes.toBytes(index)) - .addColumn(CF2, QUALIFIER, Bytes.toBytes(index)))); + .addColumn(CF2, QUALIFIER, Bytes.toBytes(index)))); region.flush(true); } assertEquals(2 * (compactMin - 1), getStorefilesCount()); @@ -115,8 +115,8 @@ public void test() throws IOException, InterruptedException { try { FileStatus[] fses1 = fs.listStatus(store1ArchiveDir); FileStatus[] fses2 = fs.listStatus(store2ArchiveDir); - return fses1 != null && fses1.length == compactMin && fses2 != null && - fses2.length == compactMin - 1; + return fses1 != null && fses1.length == compactMin && fses2 != null + && fses2.length == compactMin - 1; } catch (FileNotFoundException e) { return false; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionFlush.java index 93cbf2c55210..4c95c8098b08 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionFlush.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ public class TestMasterRegionFlush { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterRegionFlush.class); + HBaseClassTestRule.forClass(TestMasterRegionFlush.class); private Configuration conf; @@ -73,7 +73,7 @@ public void setUp() throws IOException { when(store.getStorefilesCount()).thenReturn(1); when(region.getStores()).thenReturn(Collections.singletonList(store)); when(region.getRegionInfo()) - .thenReturn(RegionInfoBuilder.newBuilder(TableName.valueOf("hbase:local")).build()); + .thenReturn(RegionInfoBuilder.newBuilder(TableName.valueOf("hbase:local")).build()); flushCalled = new AtomicInteger(0); memstoreHeapSize = new AtomicLong(0); memstoreOffHeapSize = new AtomicLong(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionInitialize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionInitialize.java index 5ebde72db5d0..5f0f2a534687 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionInitialize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionInitialize.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,13 +46,13 @@ public class TestMasterRegionInitialize extends MasterRegionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterRegionInitialize.class); + HBaseClassTestRule.forClass(TestMasterRegionInitialize.class); @Test public void testUpgrade() throws IOException { Path rootDir = new Path(htu.getDataTestDir(), REGION_DIR_NAME); Path tableDir = - CommonFSUtils.getTableDir(rootDir, region.region.getTableDescriptor().getTableName()); + CommonFSUtils.getTableDir(rootDir, region.region.getTableDescriptor().getTableName()); Path initializingFlag = new Path(tableDir, MasterRegion.INITIALIZING_FLAG); Path initializedFlag = new Path(tableDir, MasterRegion.INITIALIZED_FLAG); HRegionFileSystem hfs = region.region.getRegionFileSystem(); @@ -88,7 +88,7 @@ public void testUpgrade() throws IOException { public void testInitializingCleanup() throws IOException { Path rootDir = new Path(htu.getDataTestDir(), REGION_DIR_NAME); Path tableDir = - CommonFSUtils.getTableDir(rootDir, region.region.getTableDescriptor().getTableName()); + CommonFSUtils.getTableDir(rootDir, region.region.getTableDescriptor().getTableName()); Path initializingFlag = new Path(tableDir, MasterRegion.INITIALIZING_FLAG); Path initializedFlag = new Path(tableDir, MasterRegion.INITIALIZED_FLAG); HRegionFileSystem hfs = region.region.getRegionFileSystem(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java index ed4784276777..55c01e87c1a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java @@ -77,7 +77,7 @@ public class TestMasterRegionOnTwoFileSystems { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterRegionOnTwoFileSystems.class); + HBaseClassTestRule.forClass(TestMasterRegionOnTwoFileSystems.class); private static final HBaseCommonTestingUtil HFILE_UTIL = new HBaseCommonTestingUtil(); @@ -87,11 +87,12 @@ public class TestMasterRegionOnTwoFileSystems { private static byte[] CQ = Bytes.toBytes("q"); - private static TableDescriptor TD = TableDescriptorBuilder - .newBuilder(TableName.valueOf("test:local")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .build(); + private static TableDescriptor TD = + TableDescriptorBuilder.newBuilder(TableName.valueOf("test:local")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)) + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()) + .build(); private static int COMPACT_MIN = 4; @@ -124,11 +125,11 @@ private MasterRegion createMasterRegion(ServerName serverName) throws IOExceptio when(server.getServerName()).thenReturn(serverName); MasterRegionParams params = new MasterRegionParams(); params.server(server).regionDirName("local").tableDescriptor(TD) - .flushSize(TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE).flushPerChanges(1_000_000) - .flushIntervalMs(TimeUnit.MINUTES.toMillis(15)).compactMin(COMPACT_MIN).maxWals(32) - .useHsync(false).ringBufferSlotCount(16).rollPeriodMs(TimeUnit.MINUTES.toMillis(15)) - .archivedWalSuffix(MasterRegionFactory.ARCHIVED_WAL_SUFFIX) - .archivedHFileSuffix(MasterRegionFactory.ARCHIVED_HFILE_SUFFIX); + .flushSize(TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE).flushPerChanges(1_000_000) + .flushIntervalMs(TimeUnit.MINUTES.toMillis(15)).compactMin(COMPACT_MIN).maxWals(32) + .useHsync(false).ringBufferSlotCount(16).rollPeriodMs(TimeUnit.MINUTES.toMillis(15)) + .archivedWalSuffix(MasterRegionFactory.ARCHIVED_WAL_SUFFIX) + .archivedHFileSuffix(MasterRegionFactory.ARCHIVED_HFILE_SUFFIX); return MasterRegion.create(params); } @@ -140,8 +141,8 @@ public void setUpBeforeTest() throws IOException { Path walRootDir = WAL_UTIL.getDataTestDirOnTestFS(); FileSystem walFs = WAL_UTIL.getTestFileSystem(); walFs.delete(walRootDir, true); - region = createMasterRegion(ServerName.valueOf("localhost", 12345, - EnvironmentEdgeManager.currentTime())); + region = createMasterRegion( + ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())); } @After @@ -158,8 +159,8 @@ public void testFlushAndCompact() throws Exception { int compactMinMinusOne = COMPACT_MIN - 1; for (int i = 0; i < compactMinMinusOne; i++) { final int index = i; - region - .update(r -> r.put(new Put(Bytes.toBytes(index)).addColumn(CF, CQ, Bytes.toBytes(index)))); + region.update( + r -> r.put(new Put(Bytes.toBytes(index)).addColumn(CF, CQ, Bytes.toBytes(index)))); region.flush(true); } byte[] bytes = Bytes.toBytes(compactMinMinusOne); @@ -181,15 +182,15 @@ public void testFlushAndCompact() throws Exception { } }); LOG.info("hfile archive content {}", Arrays.stream(rootFs.listStatus(storeArchiveDir)) - .map(f -> f.getPath().toString()).collect(Collectors.joining(","))); + .map(f -> f.getPath().toString()).collect(Collectors.joining(","))); // make sure the archived wal files are on the wal fs Path walArchiveDir = new Path(CommonFSUtils.getWALRootDir(HFILE_UTIL.getConfiguration()), - HConstants.HREGION_OLDLOGDIR_NAME); + HConstants.HREGION_OLDLOGDIR_NAME); LOG.info("wal archive dir {}", walArchiveDir); AbstractFSWAL wal = (AbstractFSWAL) region.region.getWAL(); Path currentWALFile = wal.getCurrentFileName(); - for (int i = 0; ; i++) { + for (int i = 0;; i++) { region.requestRollAll(); region.waitUntilWalRollFinished(); Path newWALFile = wal.getCurrentFileName(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java index f936e9e4f592..55551d0b8f72 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestMasterRegionWALCleaner extends MasterRegionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterRegionWALCleaner.class); + HBaseClassTestRule.forClass(TestMasterRegionWALCleaner.class); private static long TTL_MS = 5000; @@ -79,7 +79,7 @@ public boolean isStopped() { @Test public void test() throws IOException, InterruptedException { region - .update(r -> r.put(new Put(Bytes.toBytes(1)).addColumn(CF1, QUALIFIER, Bytes.toBytes(1)))); + .update(r -> r.put(new Put(Bytes.toBytes(1)).addColumn(CF1, QUALIFIER, Bytes.toBytes(1)))); region.flush(true); Path testDir = htu.getDataTestDir(); FileSystem fs = testDir.getFileSystem(htu.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALRecovery.java index ad04f16ad800..4c0331826779 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionWALRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.io.IOException; import java.util.Arrays; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -50,7 +49,7 @@ public class TestMasterRegionWALRecovery extends MasterRegionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterRegionWALRecovery.class); + HBaseClassTestRule.forClass(TestMasterRegionWALRecovery.class); private Path masterRegionDir; @@ -65,7 +64,7 @@ protected void postSetUp() throws IOException { @Test public void test() throws IOException, InterruptedException { region - .update(r -> r.put(new Put(Bytes.toBytes(1)).addColumn(CF1, QUALIFIER, Bytes.toBytes(1)))); + .update(r -> r.put(new Put(Bytes.toBytes(1)).addColumn(CF1, QUALIFIER, Bytes.toBytes(1)))); region.flush(true); Path testDir = htu.getDataTestDir(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestModifyPeerProcedureRetryBackoff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestModifyPeerProcedureRetryBackoff.java index 78bdde33a3b2..9c858a6de916 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestModifyPeerProcedureRetryBackoff.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestModifyPeerProcedureRetryBackoff.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public class TestModifyPeerProcedureRetryBackoff { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestModifyPeerProcedureRetryBackoff.class); + HBaseClassTestRule.forClass(TestModifyPeerProcedureRetryBackoff.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -70,8 +70,8 @@ private void tryFail() throws ReplicationException { } @Override - protected > void addChildProcedure( - @SuppressWarnings("unchecked") T... subProcedure) { + protected > void + addChildProcedure(@SuppressWarnings("unchecked") T... subProcedure) { // Make it a no-op } @@ -145,7 +145,7 @@ private void assertBackoffIncrease() throws IOException, InterruptedException { @Test public void test() throws IOException, InterruptedException { ProcedureExecutor procExec = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); long procId = procExec.submitProcedure(new TestModifyPeerProcedure("1")); // PRE_PEER_MODIFICATION assertBackoffIncrease(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java index 4dff86d6ce1e..a9f8694d2a11 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public class TestRegisterPeerWorkerWhenRestarting extends SyncReplicationTestBas @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegisterPeerWorkerWhenRestarting.class); + HBaseClassTestRule.forClass(TestRegisterPeerWorkerWhenRestarting.class); private static volatile boolean FAIL = false; @@ -59,7 +59,7 @@ public HMasterForTest(Configuration conf) throws IOException { @Override public void remoteProcedureCompleted(long procId) { if (FAIL && getMasterProcedureExecutor() - .getProcedure(procId) instanceof SyncReplicationReplayWALRemoteProcedure) { + .getProcedure(procId) instanceof SyncReplicationReplayWALRemoteProcedure) { throw new RuntimeException("Inject error"); } super.remoteProcedureCompleted(procId); @@ -108,9 +108,9 @@ public void run() { // wait until we are in the states where we need to register peer worker when restarting UTIL2.waitFor(60000, () -> procExec.getProcedures().stream().filter(p -> p instanceof RecoverStandbyProcedure) - .map(p -> (RecoverStandbyProcedure) p) - .anyMatch(p -> p.getCurrentStateId() == DISPATCH_WALS_VALUE || - p.getCurrentStateId() == UNREGISTER_PEER_FROM_WORKER_STORAGE_VALUE)); + .map(p -> (RecoverStandbyProcedure) p) + .anyMatch(p -> p.getCurrentStateId() == DISPATCH_WALS_VALUE + || p.getCurrentStateId() == UNREGISTER_PEER_FROM_WORKER_STORAGE_VALUE)); // failover to another master MasterThread mt = UTIL2.getMiniHBaseCluster().getMasterThread(); mt.getMaster().abort("for testing"); @@ -119,7 +119,7 @@ public void run() { t.join(); // make sure the new master can finish the transition UTIL2.waitFor(60000, () -> UTIL2.getAdmin() - .getReplicationPeerSyncReplicationState(PEER_ID) == SyncReplicationState.DOWNGRADE_ACTIVE); + .getReplicationPeerSyncReplicationState(PEER_ID) == SyncReplicationState.DOWNGRADE_ACTIVE); verify(UTIL2, 0, 100); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestSyncReplicationReplayWALManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestSyncReplicationReplayWALManager.java index f7f4efcd20f7..9d0b8913e756 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestSyncReplicationReplayWALManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestSyncReplicationReplayWALManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,7 +66,7 @@ public class TestSyncReplicationReplayWALManager { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSyncReplicationReplayWALManager.class); + HBaseClassTestRule.forClass(TestSyncReplicationReplayWALManager.class); private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -87,11 +87,11 @@ public void setUp() throws IOException, ReplicationException { listeners = new ArrayList<>(); ServerManager serverManager = mock(ServerManager.class); doAnswer(inv -> listeners.add(inv.getArgument(0))).when(serverManager) - .registerListener(any(ServerListener.class)); + .registerListener(any(ServerListener.class)); ServerMetrics serverMetrics = mock(ServerMetrics.class); doAnswer(inv -> onlineServers.stream() - .collect(Collectors.toMap(Function.identity(), k -> serverMetrics))).when(serverManager) - .getOnlineServers(); + .collect(Collectors.toMap(Function.identity(), k -> serverMetrics))).when(serverManager) + .getOnlineServers(); MasterFileSystem mfs = mock(MasterFileSystem.class); when(mfs.getFileSystem()).thenReturn(UTIL.getTestFileSystem()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureBackoff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureBackoff.java index e03b71365bcc..49de0aa6329a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureBackoff.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureBackoff.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.master.replication; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ProcedureTestUtil; @@ -40,7 +39,7 @@ public class TestTransitPeerSyncReplicationStateProcedureBackoff { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTransitPeerSyncReplicationStateProcedureBackoff.class); + HBaseClassTestRule.forClass(TestTransitPeerSyncReplicationStateProcedureBackoff.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -66,8 +65,8 @@ private void tryFail() throws ReplicationException { } @Override - protected > void addChildProcedure( - @SuppressWarnings("unchecked") T... subProcedure) { + protected > void + addChildProcedure(@SuppressWarnings("unchecked") T... subProcedure) { // Make it a no-op } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureRetry.java index 9b73039c180c..da73c708d497 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureRetry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureRetry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public class TestTransitPeerSyncReplicationStateProcedureRetry extends SyncRepli @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTransitPeerSyncReplicationStateProcedureRetry.class); + HBaseClassTestRule.forClass(TestTransitPeerSyncReplicationStateProcedureRetry.class); @BeforeClass public static void setUp() throws Exception { @@ -86,10 +86,10 @@ public void run() { }; t.start(); UTIL2.waitFor(30000, () -> procExec.getProcedures().stream() - .anyMatch(p -> p instanceof TransitPeerSyncReplicationStateProcedure && !p.isFinished())); + .anyMatch(p -> p instanceof TransitPeerSyncReplicationStateProcedure && !p.isFinished())); long procId = procExec.getProcedures().stream() - .filter(p -> p instanceof TransitPeerSyncReplicationStateProcedure && !p.isFinished()) - .mapToLong(Procedure::getProcId).min().getAsLong(); + .filter(p -> p instanceof TransitPeerSyncReplicationStateProcedure && !p.isFinished()) + .mapToLong(Procedure::getProcId).min().getAsLong(); MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); assertEquals(SyncReplicationState.DOWNGRADE_ACTIVE, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java index 35cf935e06a1..f97743549cef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ /** * Test that we correctly reload the cache, filter directories, etc. */ -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestSnapshotFileCache { @ClassRule @@ -107,7 +107,7 @@ public void cleanupFiles() throws Exception { @Test public void testLoadAndDelete() throws IOException { SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, workingFs, workingDir, PERIOD, - 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()); + 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()); createAndTestSnapshotV1(cache, "snapshot1a", false, true, false); createAndTestSnapshotV1(cache, "snapshot1b", true, true, false); @@ -119,7 +119,7 @@ public void testLoadAndDelete() throws IOException { @Test public void testReloadModifiedDirectory() throws IOException { SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, workingFs, workingDir, PERIOD, - 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()); + 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()); createAndTestSnapshotV1(cache, "snapshot1", false, true, false); // now delete the snapshot and add a file with a different name @@ -133,7 +133,7 @@ public void testReloadModifiedDirectory() throws IOException { @Test public void testSnapshotTempDirReload() throws IOException { SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, workingFs, workingDir, PERIOD, - 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()); + 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()); // Add a new non-tmp snapshot createAndTestSnapshotV1(cache, "snapshot0v1", false, false, false); @@ -143,7 +143,7 @@ public void testSnapshotTempDirReload() throws IOException { @Test public void testCacheUpdatedWhenLastModifiedOfSnapDirNotUpdated() throws IOException { SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, workingFs, workingDir, PERIOD, - 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()); + 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()); // Add a new non-tmp snapshot createAndTestSnapshotV1(cache, "snapshot1v1", false, false, true); @@ -163,16 +163,16 @@ public void testWeNeverCacheTmpDirAndLoadIt() throws Exception { // don't refresh the cache unless we tell it to long period = Long.MAX_VALUE; SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, workingFs, workingDir, period, - 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()) { + 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()) { @Override - List getSnapshotsInProgress() - throws IOException { + List getSnapshotsInProgress() throws IOException { List result = super.getSnapshotsInProgress(); count.incrementAndGet(); return result; } - @Override public void triggerCacheRefreshForTesting() { + @Override + public void triggerCacheRefreshForTesting() { super.triggerCacheRefreshForTesting(); } }; @@ -201,15 +201,15 @@ List getSnapshotsInProgress() private List getStoreFilesForSnapshot(SnapshotMock.SnapshotBuilder builder) throws IOException { final List allStoreFiles = Lists.newArrayList(); - SnapshotReferenceUtil - .visitReferencedFiles(conf, fs, builder.getSnapshotsDir(), - new SnapshotReferenceUtil.SnapshotVisitor() { - @Override public void storeFile(RegionInfo regionInfo, String familyName, - SnapshotProtos.SnapshotRegionManifest.StoreFile storeFile) throws IOException { - FileStatus status = mockStoreFile(storeFile.getName()); - allStoreFiles.add(status); - } - }); + SnapshotReferenceUtil.visitReferencedFiles(conf, fs, builder.getSnapshotsDir(), + new SnapshotReferenceUtil.SnapshotVisitor() { + @Override + public void storeFile(RegionInfo regionInfo, String familyName, + SnapshotProtos.SnapshotRegionManifest.StoreFile storeFile) throws IOException { + FileStatus status = mockStoreFile(storeFile.getName()); + allStoreFiles.add(status); + } + }); return allStoreFiles; } @@ -223,9 +223,9 @@ private FileStatus mockStoreFile(String storeFileName) { class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector { @Override - public Collection filesUnderSnapshot(final FileSystem workingFs, - final Path snapshotDir) throws IOException { - Collection files = new HashSet<>(); + public Collection filesUnderSnapshot(final FileSystem workingFs, final Path snapshotDir) + throws IOException { + Collection files = new HashSet<>(); files.addAll(SnapshotReferenceUtil.getHFileNames(conf, workingFs, snapshotDir)); return files; } @@ -248,11 +248,11 @@ private void createAndTestSnapshotV2(final SnapshotFileCache cache, final String } private void createAndTestSnapshot(final SnapshotFileCache cache, - final SnapshotMock.SnapshotBuilder builder, - final boolean tmp, final boolean removeOnExit, boolean setFolderTime) throws IOException { + final SnapshotMock.SnapshotBuilder builder, final boolean tmp, final boolean removeOnExit, + boolean setFolderTime) throws IOException { List files = new ArrayList<>(); for (int i = 0; i < 3; ++i) { - for (Path filePath: builder.addRegion()) { + for (Path filePath : builder.addRegion()) { if (tmp) { // We should be able to find all the files while the snapshot creation is in-progress CommonFSUtils.logFileSystemState(fs, rootDir, LOG); @@ -273,7 +273,7 @@ private void createAndTestSnapshot(final SnapshotFileCache cache, } // Make sure that all files are still present - for (Path path: files) { + for (Path path : files) { assertFalse("Cache didn't find " + path, contains(getNonSnapshotFiles(cache, path), path)); } @@ -295,7 +295,7 @@ private void createAndTestSnapshot(final SnapshotFileCache cache, } private static boolean contains(Iterable files, Path filePath) { - for (FileStatus status: files) { + for (FileStatus status : files) { LOG.debug("debug in contains, 3.1: " + status.getPath() + " filePath:" + filePath); if (filePath.equals(status.getPath())) { return true; @@ -305,7 +305,7 @@ private static boolean contains(Iterable files, Path filePath) { } private static Iterable getNonSnapshotFiles(SnapshotFileCache cache, Path storeFile) - throws IOException { + throws IOException { return cache.getUnreferencedFiles( Arrays.asList(CommonFSUtils.listStatus(fs, storeFile.getParent())), null); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCacheWithDifferentWorkingDir.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCacheWithDifferentWorkingDir.java index 59bf46d32f73..f3e709bd4557 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCacheWithDifferentWorkingDir.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCacheWithDifferentWorkingDir.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,17 +32,18 @@ import org.junit.experimental.categories.Category; /** - * Test that we correctly reload the cache, filter directories, etc. - * while the temporary directory is on a different file system than the root directory + * Test that we correctly reload the cache, filter directories, etc. while the temporary directory + * is on a different file system than the root directory */ -@Category({MasterTests.class, LargeTests.class}) +@Category({ MasterTests.class, LargeTests.class }) public class TestSnapshotFileCacheWithDifferentWorkingDir extends TestSnapshotFileCache { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSnapshotFileCacheWithDifferentWorkingDir.class); + HBaseClassTestRule.forClass(TestSnapshotFileCacheWithDifferentWorkingDir.class); - protected static String TEMP_DIR = Paths.get(".", UUID.randomUUID().toString()).toAbsolutePath().toString(); + protected static String TEMP_DIR = + Paths.get(".", UUID.randomUUID().toString()).toAbsolutePath().toString(); @BeforeClass public static void startCluster() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java index 2c5d64f7c400..3a3cf58025e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ /** * Test that the snapshot hfile cleaner finds hfiles referenced in a snapshot */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestSnapshotHFileCleaner { @ClassRule @@ -76,7 +76,6 @@ public static void setup() throws Exception { fs = FileSystem.get(conf); } - @AfterClass public static void cleanup() throws IOException { // cleanup @@ -87,7 +86,8 @@ public static void cleanup() throws IOException { public void testFindsSnapshotFilesWhenCleaning() throws IOException { CommonFSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir()); Path rootDir = CommonFSUtils.getRootDir(conf); - Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY); + Path archivedHfileDir = + new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY); FileSystem fs = FileSystem.get(conf); SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner(); @@ -117,9 +117,9 @@ public void testFindsSnapshotFilesWhenCleaning() throws IOException { static class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector { @Override - public Collection filesUnderSnapshot(final FileSystem workingFs, - final Path snapshotDir) throws IOException { - Collection files = new HashSet<>(); + public Collection filesUnderSnapshot(final FileSystem workingFs, final Path snapshotDir) + throws IOException { + Collection files = new HashSet<>(); files.addAll(SnapshotReferenceUtil.getHFileNames(conf, workingFs, snapshotDir)); return files; } @@ -131,10 +131,10 @@ public Collection filesUnderSnapshot(final FileSystem workingFs, */ @Test public void testCorruptedRegionManifest() throws IOException { - SnapshotTestingUtils.SnapshotMock - snapshotMock = new SnapshotTestingUtils.SnapshotMock(conf, fs, rootDir); - SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2( - SNAPSHOT_NAME_STR, TABLE_NAME_STR); + SnapshotTestingUtils.SnapshotMock snapshotMock = + new SnapshotTestingUtils.SnapshotMock(conf, fs, rootDir); + SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = + snapshotMock.createSnapshotV2(SNAPSHOT_NAME_STR, TABLE_NAME_STR); builder.addRegionV2(); builder.corruptOneRegionManifest(); @@ -149,15 +149,15 @@ public void testCorruptedRegionManifest() throws IOException { } /** - * If there is a corrupted data manifest, it should throw out CorruptedSnapshotException, - * instead of an IOException + * If there is a corrupted data manifest, it should throw out CorruptedSnapshotException, instead + * of an IOException */ @Test public void testCorruptedDataManifest() throws IOException { - SnapshotTestingUtils.SnapshotMock - snapshotMock = new SnapshotTestingUtils.SnapshotMock(conf, fs, rootDir); - SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2( - SNAPSHOT_NAME_STR, TABLE_NAME_STR); + SnapshotTestingUtils.SnapshotMock snapshotMock = + new SnapshotTestingUtils.SnapshotMock(conf, fs, rootDir); + SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = + snapshotMock.createSnapshotV2(SNAPSHOT_NAME_STR, TABLE_NAME_STR); builder.addRegionV2(); // consolidate to generate a data.manifest file builder.consolidate(); @@ -169,8 +169,9 @@ public void testCorruptedDataManifest() throws IOException { try { cache.getSnapshotsInProgress(); } finally { - fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir, - TEST_UTIL.getConfiguration()), true); + fs.delete( + SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir, TEST_UTIL.getConfiguration()), + true); } } @@ -178,8 +179,8 @@ public void testCorruptedDataManifest() throws IOException { public void testMissedTmpSnapshot() throws IOException { SnapshotTestingUtils.SnapshotMock snapshotMock = new SnapshotTestingUtils.SnapshotMock(conf, fs, rootDir); - SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2( - SNAPSHOT_NAME_STR, TABLE_NAME_STR); + SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = + snapshotMock.createSnapshotV2(SNAPSHOT_NAME_STR, TABLE_NAME_STR); builder.addRegionV2(); builder.missOneRegionSnapshotFile(); long period = Long.MAX_VALUE; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java index c435cee7d1e2..bba197eb0567 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,7 +57,7 @@ /** * Test basic snapshot manager functionality */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestSnapshotManager { @ClassRule @@ -82,7 +82,7 @@ public class TestSnapshotManager { } } - private SnapshotManager getNewManager() throws IOException, KeeperException { + private SnapshotManager getNewManager() throws IOException, KeeperException { return getNewManager(UTIL.getConfiguration()); } @@ -132,14 +132,14 @@ public void testInProcess() throws KeeperException, IOException { SnapshotManager manager = getNewManager(); TakeSnapshotHandler handler = Mockito.mock(TakeSnapshotHandler.class); assertFalse("Manager is in process when there is no current handler", - manager.isTakingSnapshot(tableName)); + manager.isTakingSnapshot(tableName)); manager.setSnapshotHandlerForTesting(tableName, handler); Mockito.when(handler.isFinished()).thenReturn(false); assertTrue("Manager isn't in process when handler is running", - manager.isTakingSnapshot(tableName)); + manager.isTakingSnapshot(tableName)); Mockito.when(handler.isFinished()).thenReturn(true); assertFalse("Manager is process when handler isn't running", - manager.isTakingSnapshot(tableName)); + manager.isTakingSnapshot(tableName)); } /** @@ -166,24 +166,24 @@ public void testSnapshotSupportConfiguration() throws Exception { // force snapshot feature to be disabled, even if cleaners are present conf = new Configuration(); - conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, - SnapshotHFileCleaner.class.getName(), HFileLinkCleaner.class.getName()); + conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, SnapshotHFileCleaner.class.getName(), + HFileLinkCleaner.class.getName()); conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, false); manager = getNewManager(conf); assertFalse("Snapshot should be disabled", isSnapshotSupported(manager)); // cleaners are present, but missing snapshot enabled property conf = new Configuration(); - conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, - SnapshotHFileCleaner.class.getName(), HFileLinkCleaner.class.getName()); + conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, SnapshotHFileCleaner.class.getName(), + HFileLinkCleaner.class.getName()); manager = getNewManager(conf); assertTrue("Snapshot should be enabled, because cleaners are present", isSnapshotSupported(manager)); // Create a "test snapshot" Path rootDir = UTIL.getDataTestDir(); - Path testSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir( - "testSnapshotSupportConfiguration", rootDir); + Path testSnapshotDir = SnapshotDescriptionUtils + .getCompletedSnapshotDir("testSnapshotSupportConfiguration", rootDir); fs.mkdirs(testSnapshotDir); try { // force snapshot feature to be disabled, but snapshots are present @@ -215,8 +215,8 @@ public void testDisableSnapshotAndNotDeleteBackReference() throws Exception { RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build(); RegionInfo hriLink = RegionInfoBuilder.newBuilder(tableLinkName).build(); Path archiveDir = HFileArchiveUtil.getArchivePath(conf); - Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, - tableName, hri.getEncodedName(), familyName); + Path archiveStoreDir = + HFileArchiveUtil.getStoreArchivePath(conf, tableName, hri.getEncodedName(), familyName); // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf); Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName); @@ -224,7 +224,7 @@ public void testDisableSnapshotAndNotDeleteBackReference() throws Exception { fs.createNewFile(hfilePath); // Create link to hfile Path familyLinkPath = - getFamilyDirPath(rootDir, tableLinkName, hriLink.getEncodedName(), familyName); + getFamilyDirPath(rootDir, tableLinkName, hriLink.getEncodedName(), familyName); HFileLink.create(conf, fs, familyLinkPath, hri, hfileName); Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName); assertTrue(fs.exists(linkBackRefDir)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestTakeSnapshotHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestTakeSnapshotHandler.java index ab062683e585..0dfa08ac2f0f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestTakeSnapshotHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestTakeSnapshotHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,43 +39,41 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; - /** * Unfortunately, couldn't test TakeSnapshotHandler using mocks, because it relies on TableLock, * which is tightly coupled to LockManager and LockProcedure classes, which are both final and - * prevents us from mocking its behaviour. Looks like an overkill having to emulate a - * whole cluster run for such a small optional property behaviour. + * prevents us from mocking its behaviour. Looks like an overkill having to emulate a whole cluster + * run for such a small optional property behaviour. */ -@Category({ MediumTests.class}) +@Category({ MediumTests.class }) public class TestTakeSnapshotHandler { private static HBaseTestingUtil UTIL; @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTakeSnapshotHandler.class); + HBaseClassTestRule.forClass(TestTakeSnapshotHandler.class); @Rule public TestName name = new TestName(); - @Before - public void setup() { + public void setup() { UTIL = new HBaseTestingUtil(); } public TableDescriptor createTableInsertDataAndTakeSnapshot(Map snapshotProps) throws Exception { TableDescriptor descriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).build()).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).build()) + .build(); UTIL.getConnection().getAdmin().createTable(descriptor); Table table = UTIL.getConnection().getTable(descriptor.getTableName()); Put put = new Put(Bytes.toBytes("1")); put.addColumn(Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("v1")); table.put(put); - String snapName = "snap"+name.getMethodName(); + String snapName = "snap" + name.getMethodName(); UTIL.getAdmin().snapshot(snapName, descriptor.getTableName(), snapshotProps); TableName cloned = TableName.valueOf(name.getMethodName() + "clone"); UTIL.getAdmin().cloneSnapshot(snapName, cloned); @@ -89,8 +87,7 @@ public void testPreparePreserveMaxFileSizeEnabled() throws Exception { snapshotProps.put(TableDescriptorBuilder.MAX_FILESIZE, Long.parseLong("21474836480")); TableDescriptor descriptor = createTableInsertDataAndTakeSnapshot(snapshotProps); TableName cloned = TableName.valueOf(name.getMethodName() + "clone"); - assertEquals(-1, - UTIL.getAdmin().getDescriptor(descriptor.getTableName()).getMaxFileSize()); + assertEquals(-1, UTIL.getAdmin().getDescriptor(descriptor.getTableName()).getMaxFileSize()); assertEquals(21474836480L, UTIL.getAdmin().getDescriptor(cloned).getMaxFileSize()); } @@ -99,8 +96,7 @@ public void testPreparePreserveMaxFileSizeDisabled() throws Exception { UTIL.startMiniCluster(); TableDescriptor descriptor = createTableInsertDataAndTakeSnapshot(null); TableName cloned = TableName.valueOf(name.getMethodName() + "clone"); - assertEquals(-1, - UTIL.getAdmin().getDescriptor(descriptor.getTableName()).getMaxFileSize()); + assertEquals(-1, UTIL.getAdmin().getDescriptor(descriptor.getTableName()).getMaxFileSize()); assertEquals(-1, UTIL.getAdmin().getDescriptor(cloned).getMaxFileSize()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java index 813c288c61dc..88eaef5c035e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/FaultyMobStoreCompactor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +26,6 @@ import java.util.Optional; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -53,22 +51,24 @@ import org.slf4j.LoggerFactory; /** - * This class is used for testing only. The main purpose is to emulate - * random failures during MOB compaction process. - * Example of usage: - *
          {@code
          - * public class SomeTest {
          + * This class is used for testing only. The main purpose is to emulate random failures during MOB
          + * compaction process. Example of usage:
          + * 
          + * 
          + * {
          + *   @code
          + *   public class SomeTest {
            *
          - *   public void initConfiguration(Configuration conf){
          - *     conf.set(MobStoreEngine.DEFAULT_MOB_COMPACTOR_CLASS_KEY,
          -         FaultyMobStoreCompactor.class.getName());
          -       conf.setDouble("hbase.mob.compaction.fault.probability", 0.1);
          + *     public void initConfiguration(Configuration conf) {
          + *       conf.set(MobStoreEngine.DEFAULT_MOB_COMPACTOR_CLASS_KEY,
          + *         FaultyMobStoreCompactor.class.getName());
          + *       conf.setDouble("hbase.mob.compaction.fault.probability", 0.1);
          + *     }
            *   }
            * }
          - * }
          - * @see org.apache.hadoop.hbase.mob.MobStressToolRunner on how to use and configure - * this class. - * + *
          + * + * @see org.apache.hadoop.hbase.mob.MobStressToolRunner on how to use and configure this class. */ @InterfaceAudience.Private public class FaultyMobStoreCompactor extends DefaultMobStoreCompactor { @@ -194,7 +194,7 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, mobCell = mobStore.resolve(c, true, false).getCell(); } catch (DoNotRetryIOException e) { if (discardMobMiss && e.getCause() != null - && e.getCause() instanceof FileNotFoundException) { + && e.getCause() instanceof FileNotFoundException) { LOG.error("Missing MOB cell: file={} not found cell={}", fName, c); continue; } else { @@ -252,9 +252,10 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, mobRefSet.get().put(refTable.get(), MobUtils.getMobFileName(c)); writer.append(c); } else { - throw new IOException(String.format("MOB cell did not contain a tablename " + - "tag. should not be possible. see ref guide on mob troubleshooting. " + - "store=%s cell=%s", getStoreInfo(), c)); + throw new IOException(String.format("MOB cell did not contain a tablename " + + "tag. should not be possible. see ref guide on mob troubleshooting. " + + "store=%s cell=%s", + getStoreInfo(), c)); } } else { // If the value is not larger than the threshold, it's not regarded a mob. Retrieve @@ -276,9 +277,10 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, mobRefSet.get().put(refTable.get(), MobUtils.getMobFileName(c)); writer.append(c); } else { - throw new IOException(String.format("MOB cell did not contain a tablename " + - "tag. should not be possible. see ref guide on mob troubleshooting. " + - "store=%s cell=%s", getStoreInfo(), c)); + throw new IOException(String.format("MOB cell did not contain a tablename " + + "tag. should not be possible. see ref guide on mob troubleshooting. " + + "store=%s cell=%s", + getStoreInfo(), c)); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressTool.java index e9c6969d9697..060ae24c1a49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.mob; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.util.AbstractHBaseTool; @@ -30,7 +29,6 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; - @InterfaceAudience.Private @InterfaceStability.Evolving public class MobStressTool extends AbstractHBaseTool { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressToolRunner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressToolRunner.java index 585bdf7d7d16..90b1775d6db3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressToolRunner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobStressToolRunner.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.mob; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -43,24 +43,17 @@ import org.apache.hadoop.hbase.util.Bytes; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - Reproduction for MOB data loss - - 1. Settings: Region Size 200 MB, Flush threshold 800 KB. - 2. Insert 10 Million records - 3. MOB Compaction and Archiver - a) Trigger MOB Compaction (every 2 minutes) - b) Trigger major compaction (every 2 minutes) - c) Trigger archive cleaner (every 3 minutes) - 4. Validate MOB data after complete data load. - - This class is used by MobStressTool only. This is not a unit test +/** + * Reproduction for MOB data loss 1. Settings: Region Size 200 MB, Flush threshold 800 KB. 2. Insert + * 10 Million records 3. MOB Compaction and Archiver a) Trigger MOB Compaction (every 2 minutes) b) + * Trigger major compaction (every 2 minutes) c) Trigger archive cleaner (every 3 minutes) 4. + * Validate MOB data after complete data load. This class is used by MobStressTool only. This is not + * a unit test */ public class MobStressToolRunner { private static final Logger LOG = LoggerFactory.getLogger(MobStressToolRunner.class); - private HBaseTestingUtil HTU; private final static String famStr = "f1"; @@ -92,10 +85,10 @@ public void init(Configuration conf, long numRows) throws IOException { Connection conn = ConnectionFactory.createConnection(this.conf); this.admin = conn.getAdmin(); this.familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(fam).setMobEnabled(true) - .setMobThreshold(mobLen).setMaxVersions(1).build(); + .setMobThreshold(mobLen).setMaxVersions(1).build(); this.tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf("testMobCompactTable")) - .setColumnFamily(familyDescriptor).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf("testMobCompactTable")) + .setColumnFamily(familyDescriptor).build(); if (admin.tableExists(tableDescriptor.getTableName())) { admin.disableTable(tableDescriptor.getTableName()); admin.deleteTable(tableDescriptor.getTableName()); @@ -131,13 +124,12 @@ private void initConf() { conf.setInt("hbase.hstore.compaction.throughput.lower.bound", 52428800); conf.setInt("hbase.hstore.compaction.throughput.higher.bound", 2 * 52428800); conf.setDouble("hbase.mob.compaction.fault.probability", failureProb); -// conf.set(MobStoreEngine.DEFAULT_MOB_COMPACTOR_CLASS_KEY, -// FaultyMobStoreCompactor.class.getName()); + // conf.set(MobStoreEngine.DEFAULT_MOB_COMPACTOR_CLASS_KEY, + // FaultyMobStoreCompactor.class.getName()); conf.setLong(MobConstants.MOB_COMPACTION_CHORE_PERIOD, 0); conf.setLong(MobConstants.MOB_CLEANER_PERIOD, 0); conf.setLong(MobConstants.MIN_AGE_TO_ARCHIVE_KEY, 120000); - conf.set(MobConstants.MOB_COMPACTION_TYPE_KEY, - MobConstants.OPTIMIZED_MOB_COMPACTION_TYPE); + conf.set(MobConstants.MOB_COMPACTION_TYPE_KEY, MobConstants.OPTIMIZED_MOB_COMPACTION_TYPE); conf.setLong(MobConstants.MOB_COMPACTION_MAX_FILE_SIZE_KEY, 1000000); } @@ -192,7 +184,7 @@ public void run() { for (int i = 0; i < rows; i++) { byte[] key = Bytes.toBytes(i); Put p = new Put(key); - p.addColumn(fam, qualifier, Bytes.add(key,mobVal)); + p.addColumn(fam, qualifier, Bytes.add(key, mobVal)); table.put(p); if (i % 10000 == 0) { LOG.info("LOADED=" + i); @@ -257,7 +249,7 @@ public void runStressTest() throws InterruptedException, IOException { } - private long getNumberOfMobFiles(Configuration conf, TableName tableName, String family) + private long getNumberOfMobFiles(Configuration conf, TableName tableName, String family) throws IOException { FileSystem fs = FileSystem.get(conf); Path dir = MobUtils.getMobFamilyPath(conf, tableName, family); @@ -265,7 +257,7 @@ private long getNumberOfMobFiles(Configuration conf, TableName tableName, Strin long size = 0; for (FileStatus st : stat) { LOG.debug("MOB Directory content: {} len={}", st.getPath(), st.getLen()); - size+= st.getLen(); + size += st.getLen(); } LOG.debug("MOB Directory content total files: {}, total size={}", stat.length, size); @@ -288,10 +280,9 @@ private void scanTable() { int counter = 0; while ((result = scanner.next()) != null) { byte[] key = result.getRow(); - assertTrue(Arrays.equals(result.getValue(fam, qualifier), - Bytes.add(key,mobVal))); + assertTrue(Arrays.equals(result.getValue(fam, qualifier), Bytes.add(key, mobVal))); if (counter % 10000 == 0) { - LOG.info("GET=" + counter+" key=" + Bytes.toInt(key)); + LOG.info("GET=" + counter + " key=" + Bytes.toInt(key)); } counter++; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java index 4fe33d86ee02..495ca96e13af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -50,21 +48,19 @@ protected static String generateRandomString(int demoLength) { } return sb.toString(); } + protected static void writeStoreFile(final StoreFileWriter writer, String caseName) throws IOException { writeStoreFile(writer, Bytes.toBytes(caseName), Bytes.toBytes(caseName)); } /* - * Writes HStoreKey and ImmutableBytes data to passed writer and then closes - * it. - * + * Writes HStoreKey and ImmutableBytes data to passed writer and then closes it. * @param writer - * * @throws IOException */ - private static void writeStoreFile(final StoreFileWriter writer, byte[] fam, - byte[] qualifier) throws IOException { + private static void writeStoreFile(final StoreFileWriter writer, byte[] fam, byte[] qualifier) + throws IOException { long now = EnvironmentEdgeManager.currentTime(); try { for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) { @@ -82,23 +78,22 @@ private static void writeStoreFile(final StoreFileWriter writer, byte[] fam, * Compare two Cells only for their row family qualifier value */ public static void assertCellEquals(Cell firstKeyValue, Cell secondKeyValue) { - Assert.assertArrayEquals(CellUtil.cloneRow(firstKeyValue), - CellUtil.cloneRow(secondKeyValue)); + Assert.assertArrayEquals(CellUtil.cloneRow(firstKeyValue), CellUtil.cloneRow(secondKeyValue)); Assert.assertArrayEquals(CellUtil.cloneFamily(firstKeyValue), - CellUtil.cloneFamily(secondKeyValue)); + CellUtil.cloneFamily(secondKeyValue)); Assert.assertArrayEquals(CellUtil.cloneQualifier(firstKeyValue), - CellUtil.cloneQualifier(secondKeyValue)); + CellUtil.cloneQualifier(secondKeyValue)); Assert.assertArrayEquals(CellUtil.cloneValue(firstKeyValue), - CellUtil.cloneValue(secondKeyValue)); + CellUtil.cloneValue(secondKeyValue)); } - public static void assertCellsValue(Table table, Scan scan, - byte[] expectedValue, int expectedCount) throws IOException { + public static void assertCellsValue(Table table, Scan scan, byte[] expectedValue, + int expectedCount) throws IOException { ResultScanner results = table.getScanner(scan); int count = 0; for (Result res : results) { List cells = res.listCells(); - for(Cell cell : cells) { + for (Cell cell : cells) { // Verify the value Assert.assertArrayEquals(expectedValue, CellUtil.cloneValue(cell)); count++; @@ -110,7 +105,7 @@ public static void assertCellsValue(Table table, Scan scan, /** * Gets the number of rows in the given table. - * @param table to get the scanner + * @param table to get the scanner * @return the number of rows */ public static int countMobRows(final Table table) throws IOException { @@ -118,5 +113,5 @@ public static int countMobRows(final Table table) throws IOException { // Do not retrieve the mob data when scanning scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE)); return HBaseTestingUtil.countRows(table, scan); - } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java index 99f7484d0c93..08695f86bcd2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,9 +66,9 @@ public void testOpenClose() throws Exception { String caseName = testName.getMethodName(); Path testDir = TEST_UTIL.getDataTestDir(); FileSystem fs = testDir.getFileSystem(conf); - HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build(); - StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withOutputDir(testDir).withFileContext(meta).build(); + HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir) + .withFileContext(meta).build(); MobTestUtil.writeStoreFile(writer, caseName); CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf); assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount()); @@ -96,9 +96,7 @@ public void testCompare() throws Exception { CachedMobFile cachedMobFile1 = CachedMobFile.create(fs, writer1.getPath(), conf, cacheConf); Path outputDir2 = new Path(testDir, FAMILY2); StoreFileWriter writer2 = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withOutputDir(outputDir2) - .withFileContext(meta) - .build(); + .withOutputDir(outputDir2).withFileContext(meta).build(); MobTestUtil.writeStoreFile(writer2, caseName); CachedMobFile cachedMobFile2 = CachedMobFile.create(fs, writer2.getPath(), conf, cacheConf); cachedMobFile1.access(1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java index 70e7bbf4cb4a..fa2a37e9131d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java @@ -47,7 +47,7 @@ public class TestDefaultMobStoreFlusher { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDefaultMobStoreFlusher.class); + HBaseClassTestRule.forClass(TestDefaultMobStoreFlusher.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private final static byte[] row1 = Bytes.toBytes("row1"); @@ -75,8 +75,8 @@ public static void tearDownAfterClass() throws Exception { public void testFlushNonMobFile() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(4).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(4).build()) + .build(); testFlushFile(tableDescriptor); } @@ -84,9 +84,9 @@ public void testFlushNonMobFile() throws Exception { public void testFlushMobFile() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setMobEnabled(true) - .setMobThreshold(3L).setMaxVersions(4).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setMobEnabled(true) + .setMobThreshold(3L).setMaxVersions(4).build()) + .build(); testFlushFile(tableDescriptor); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java index 36acf60dba5f..3e8a300e1a58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -85,29 +85,21 @@ public void tearDown() throws Exception { } private void init() throws Exception { - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tableName); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(family)) - .setMobEnabled(true) - .setMobThreshold(3L) - .setMaxVersions(4) - .build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).setMobEnabled(true) + .setMobThreshold(3L).setMaxVersions(4).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin = TEST_UTIL.getAdmin(); admin.createTable(tableDescriptorBuilder.build()); table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()) - .getBufferedMutator(tableName); + .getBufferedMutator(tableName); } private void modifyColumnExpiryDays(int expireDays) throws Exception { - ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(family)) - .setMobEnabled(true) - .setMobThreshold(3L); + ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder + .newBuilder(Bytes.toBytes(family)).setMobEnabled(true).setMobThreshold(3L); // change ttl as expire days to make some row expired int timeToLive = expireDays * secondsOfDay(); columnFamilyDescriptorBuilder.setTimeToLive(timeToLive); @@ -127,9 +119,9 @@ private void putKVAndFlush(BufferedMutator table, byte[] row, byte[] value, long } /** - * Creates a 3 day old hfile and an 1 day old hfile then sets expiry to 2 days. - * Verifies that the 3 day old hfile is removed but the 1 day one is still present - * after the expiry based cleaner is run. + * Creates a 3 day old hfile and an 1 day old hfile then sets expiry to 2 days. Verifies that the + * 3 day old hfile is removed but the 1 day one is still present after the expiry based cleaner is + * run. */ @Test public void testCleaner() throws Exception { @@ -141,14 +133,14 @@ public void testCleaner() throws Exception { long ts = EnvironmentEdgeManager.currentTime() - 3 * secondsOfDay() * 1000; // 3 days before putKVAndFlush(table, row1, dummyData, ts); FileStatus[] firstFiles = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath); - //the first mob file + // the first mob file assertEquals("Before cleanup without delay 1", 1, firstFiles.length); String firstFile = firstFiles[0].getPath().getName(); ts = EnvironmentEdgeManager.currentTime() - 1 * secondsOfDay() * 1000; // 1 day before putKVAndFlush(table, row2, dummyData, ts); FileStatus[] secondFiles = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath); - //now there are 2 mob files + // now there are 2 mob files assertEquals("Before cleanup without delay 2", 2, secondFiles.length); String f1 = secondFiles[0].getPath().getName(); String f2 = secondFiles[1].getPath().getName(); @@ -156,7 +148,7 @@ public void testCleaner() throws Exception { modifyColumnExpiryDays(2); // ttl = 2, make the first row expired - //run the cleaner + // run the cleaner String[] args = new String[2]; args[0] = tableName.getNameAsString(); args[1] = family; @@ -164,7 +156,7 @@ public void testCleaner() throws Exception { FileStatus[] filesAfterClean = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath); String lastFile = filesAfterClean[0].getPath().getName(); - //the first mob fie is removed + // the first mob fie is removed assertEquals("After cleanup without delay 1", 1, filesAfterClean.length); assertEquals("After cleanup without delay 2", secondFile, lastFile); } @@ -174,7 +166,7 @@ private int secondsOfDay() { } private byte[] makeDummyData(int size) { - byte [] dummyData = new byte[size]; + byte[] dummyData = new byte[size]; Bytes.random(dummyData); return dummyData; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionOptMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionOptMode.java index cacc90e660eb..131c97299870 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionOptMode.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionOptMode.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.mob; + import java.io.IOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -25,19 +25,14 @@ import org.junit.experimental.categories.Category; /** - * Mob file compaction chore in a generational non-batch mode test. - * 1. Uses default (non-batch) mode for regular MOB compaction, sets generational mode ON - * 2. Disables periodic MOB compactions, sets minimum age to archive to 10 sec - * 3. Creates MOB table with 20 regions - * 4. Loads MOB data (randomized keys, 1000 rows), flushes data. - * 5. Repeats 4. two more times - * 6. Verifies that we have 20 *3 = 60 mob files (equals to number of regions x 3) - * 7. Runs major MOB compaction. - * 8. Verifies that number of MOB files in a mob directory is 20 x4 = 80 - * 9. Waits for a period of time larger than minimum age to archive - * 10. Runs Mob cleaner chore - * 11 Verifies that number of MOB files in a mob directory is 20. - * 12 Runs scanner and checks all 3 * 1000 rows. + * Mob file compaction chore in a generational non-batch mode test. 1. Uses default (non-batch) mode + * for regular MOB compaction, sets generational mode ON 2. Disables periodic MOB compactions, sets + * minimum age to archive to 10 sec 3. Creates MOB table with 20 regions 4. Loads MOB data + * (randomized keys, 1000 rows), flushes data. 5. Repeats 4. two more times 6. Verifies that we have + * 20 *3 = 60 mob files (equals to number of regions x 3) 7. Runs major MOB compaction. 8. Verifies + * that number of MOB files in a mob directory is 20 x4 = 80 9. Waits for a period of time larger + * than minimum age to archive 10. Runs Mob cleaner chore 11 Verifies that number of MOB files in a + * mob directory is 20. 12 Runs scanner and checks all 3 * 1000 rows. */ @Category(LargeTests.class) public class TestMobCompactionOptMode extends TestMobCompactionWithDefaults { @@ -49,8 +44,7 @@ public class TestMobCompactionOptMode extends TestMobCompactionWithDefaults { @BeforeClass public static void configureOptimizedCompaction() throws InterruptedException, IOException { HTU.shutdownMiniHBaseCluster(); - conf.set(MobConstants.MOB_COMPACTION_TYPE_KEY, - MobConstants.OPTIMIZED_MOB_COMPACTION_TYPE); + conf.set(MobConstants.MOB_COMPACTION_TYPE_KEY, MobConstants.OPTIMIZED_MOB_COMPACTION_TYPE); conf.setLong(MobConstants.MOB_COMPACTION_MAX_FILE_SIZE_KEY, 1000000); HTU.startMiniHBaseCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionOptRegionBatchMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionOptRegionBatchMode.java index 46ef3b040880..071ec8216b20 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionOptRegionBatchMode.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionOptRegionBatchMode.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,8 +16,8 @@ * limitations under the License. */ package org.apache.hadoop.hbase.mob; -import java.io.IOException; +import java.io.IOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -31,20 +30,14 @@ import org.slf4j.LoggerFactory; /** - * Mob file compaction chore in a generational batch mode test. - * 1. Enables batch mode for regular MOB compaction, - * Sets batch size to 7 regions. Enables generational mode. - * 2. Disables periodic MOB compactions, sets minimum age to archive to 10 sec - * 3. Creates MOB table with 20 regions - * 4. Loads MOB data (randomized keys, 1000 rows), flushes data. - * 5. Repeats 4. two more times - * 6. Verifies that we have 20 *3 = 60 mob files (equals to number of regions x 3) - * 7. Runs major MOB compaction. - * 8. Verifies that number of MOB files in a mob directory is 20 x4 = 80 - * 9. Waits for a period of time larger than minimum age to archive - * 10. Runs Mob cleaner chore - * 11 Verifies that number of MOB files in a mob directory is 20. - * 12 Runs scanner and checks all 3 * 1000 rows. + * Mob file compaction chore in a generational batch mode test. 1. Enables batch mode for regular + * MOB compaction, Sets batch size to 7 regions. Enables generational mode. 2. Disables periodic MOB + * compactions, sets minimum age to archive to 10 sec 3. Creates MOB table with 20 regions 4. Loads + * MOB data (randomized keys, 1000 rows), flushes data. 5. Repeats 4. two more times 6. Verifies + * that we have 20 *3 = 60 mob files (equals to number of regions x 3) 7. Runs major MOB compaction. + * 8. Verifies that number of MOB files in a mob directory is 20 x4 = 80 9. Waits for a period of + * time larger than minimum age to archive 10. Runs Mob cleaner chore 11 Verifies that number of MOB + * files in a mob directory is 20. 12 Runs scanner and checks all 3 * 1000 rows. */ @Category(LargeTests.class) public class TestMobCompactionOptRegionBatchMode extends TestMobCompactionWithDefaults { @@ -68,8 +61,7 @@ public static void configureOptimizedCompactionAndBatches() throws InterruptedException, IOException { HTU.shutdownMiniHBaseCluster(); conf.setInt(MobConstants.MOB_MAJOR_COMPACTION_REGION_BATCH_SIZE, batchSize); - conf.set(MobConstants.MOB_COMPACTION_TYPE_KEY, - MobConstants.OPTIMIZED_MOB_COMPACTION_TYPE); + conf.set(MobConstants.MOB_COMPACTION_TYPE_KEY, MobConstants.OPTIMIZED_MOB_COMPACTION_TYPE); conf.setLong(MobConstants.MOB_COMPACTION_MAX_FILE_SIZE_KEY, 1000000); HTU.startMiniHBaseCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionRegularRegionBatchMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionRegularRegionBatchMode.java index bc7042529c9e..fe0c0280db80 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionRegularRegionBatchMode.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionRegularRegionBatchMode.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,8 +16,8 @@ * limitations under the License. */ package org.apache.hadoop.hbase.mob; -import java.io.IOException; +import java.io.IOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -31,20 +30,14 @@ import org.slf4j.LoggerFactory; /** - * Mob file compaction chore in a regular batch mode test. - * 1. Enables batch mode for regular MOB compaction, - * Sets batch size to 7 regions. - * 2. Disables periodic MOB compactions, sets minimum age to archive to 10 sec - * 3. Creates MOB table with 20 regions - * 4. Loads MOB data (randomized keys, 1000 rows), flushes data. - * 5. Repeats 4. two more times - * 6. Verifies that we have 20 *3 = 60 mob files (equals to number of regions x 3) - * 7. Runs major MOB compaction. - * 8. Verifies that number of MOB files in a mob directory is 20 x4 = 80 - * 9. Waits for a period of time larger than minimum age to archive - * 10. Runs Mob cleaner chore - * 11 Verifies that number of MOB files in a mob directory is 20. - * 12 Runs scanner and checks all 3 * 1000 rows. + * Mob file compaction chore in a regular batch mode test. 1. Enables batch mode for regular MOB + * compaction, Sets batch size to 7 regions. 2. Disables periodic MOB compactions, sets minimum age + * to archive to 10 sec 3. Creates MOB table with 20 regions 4. Loads MOB data (randomized keys, + * 1000 rows), flushes data. 5. Repeats 4. two more times 6. Verifies that we have 20 *3 = 60 mob + * files (equals to number of regions x 3) 7. Runs major MOB compaction. 8. Verifies that number of + * MOB files in a mob directory is 20 x4 = 80 9. Waits for a period of time larger than minimum age + * to archive 10. Runs Mob cleaner chore 11 Verifies that number of MOB files in a mob directory is + * 20. 12 Runs scanner and checks all 3 * 1000 rows. */ @Category(LargeTests.class) public class TestMobCompactionRegularRegionBatchMode extends TestMobCompactionWithDefaults { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithDefaults.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithDefaults.java index fc96f87cfd83..4a02f46c0db5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithDefaults.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobCompactionWithDefaults.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.mob; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -57,25 +57,18 @@ import org.slf4j.LoggerFactory; /** - * Mob file compaction base test. - * 1. Enables batch mode for regular MOB compaction, - * Sets batch size to 7 regions. (Optional) - * 2. Disables periodic MOB compactions, sets minimum age to archive to 10 sec - * 3. Creates MOB table with 20 regions - * 4. Loads MOB data (randomized keys, 1000 rows), flushes data. - * 5. Repeats 4. two more times - * 6. Verifies that we have 20 *3 = 60 mob files (equals to number of regions x 3) - * 7. Runs major MOB compaction. - * 8. Verifies that number of MOB files in a mob directory is 20 x4 = 80 - * 9. Waits for a period of time larger than minimum age to archive - * 10. Runs Mob cleaner chore - * 11 Verifies that number of MOB files in a mob directory is 20. - * 12 Runs scanner and checks all 3 * 1000 rows. + * Mob file compaction base test. 1. Enables batch mode for regular MOB compaction, Sets batch size + * to 7 regions. (Optional) 2. Disables periodic MOB compactions, sets minimum age to archive to 10 + * sec 3. Creates MOB table with 20 regions 4. Loads MOB data (randomized keys, 1000 rows), flushes + * data. 5. Repeats 4. two more times 6. Verifies that we have 20 *3 = 60 mob files (equals to + * number of regions x 3) 7. Runs major MOB compaction. 8. Verifies that number of MOB files in a + * mob directory is 20 x4 = 80 9. Waits for a period of time larger than minimum age to archive 10. + * Runs Mob cleaner chore 11 Verifies that number of MOB files in a mob directory is 20. 12 Runs + * scanner and checks all 3 * 1000 rows. */ @Category(LargeTests.class) public class TestMobCompactionWithDefaults { - private static final Logger LOG = - LoggerFactory.getLogger(TestMobCompactionWithDefaults.class); + private static final Logger LOG = LoggerFactory.getLogger(TestMobCompactionWithDefaults.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMobCompactionWithDefaults.class); @@ -114,7 +107,7 @@ public static void htuStart() throws Exception { // Set minimum age to archive to 10 sec conf.setLong(MobConstants.MIN_AGE_TO_ARCHIVE_KEY, minAgeToArchive); // Set compacted file discharger interval to a half minAgeToArchive - conf.setLong("hbase.hfile.compaction.discharger.interval", minAgeToArchive/2); + conf.setLong("hbase.hfile.compaction.discharger.interval", minAgeToArchive / 2); conf.setBoolean("hbase.regionserver.compaction.enabled", false); HTU.startMiniCluster(); } @@ -129,9 +122,9 @@ public void setUp() throws Exception { admin = HTU.getAdmin(); cleanerChore = new MobFileCleanerChore(); familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(fam).setMobEnabled(true) - .setMobThreshold(mobLen).setMaxVersions(1).build(); + .setMobThreshold(mobLen).setMaxVersions(1).build(); tableDescriptor = HTU.createModifyableTableDescriptor(test.getMethodName()) - .setColumnFamily(familyDescriptor).build(); + .setColumnFamily(familyDescriptor).build(); RegionSplitter.UniformSplit splitAlgo = new RegionSplitter.UniformSplit(); byte[][] splitKeys = splitAlgo.split(numRegions); table = HTU.createTable(tableDescriptor, splitKeys).getName(); @@ -167,8 +160,8 @@ public void baseTestMobFileCompaction() throws InterruptedException, IOException loadAndFlushThreeTimes(rows, table, famStr); mobCompact(tableDescriptor, familyDescriptor); assertEquals("Should have 4 MOB files per region due to 3xflush + compaction.", numRegions * 4, - getNumberOfMobFiles(table, famStr)); - cleanupAndVerifyCounts(table, famStr, 3*rows); + getNumberOfMobFiles(table, famStr)); + cleanupAndVerifyCounts(table, famStr, 3 * rows); LOG.info("MOB compaction " + description() + " finished OK"); } @@ -181,17 +174,17 @@ public void testMobFileCompactionAfterSnapshotClone() throws InterruptedExceptio admin.snapshot(test.getMethodName(), table); admin.cloneSnapshot(test.getMethodName(), clone); assertEquals("Should have 3 hlinks per region in MOB area from snapshot clone", 3 * numRegions, - getNumberOfMobFiles(clone, famStr)); + getNumberOfMobFiles(clone, famStr)); mobCompact(admin.getDescriptor(clone), familyDescriptor); assertEquals("Should have 3 hlinks + 1 MOB file per region due to clone + compact", - 4 * numRegions, getNumberOfMobFiles(clone, famStr)); - cleanupAndVerifyCounts(clone, famStr, 3*rows); + 4 * numRegions, getNumberOfMobFiles(clone, famStr)); + cleanupAndVerifyCounts(clone, famStr, 3 * rows); LOG.info("MOB compaction of cloned snapshot, " + description() + " finished OK"); } @Test - public void testMobFileCompactionAfterSnapshotCloneAndFlush() throws InterruptedException, - IOException { + public void testMobFileCompactionAfterSnapshotCloneAndFlush() + throws InterruptedException, IOException { final TableName clone = TableName.valueOf(test.getMethodName() + "-clone"); LOG.info("MOB compaction of cloned snapshot after flush, " + description() + " started"); loadAndFlushThreeTimes(rows, table, famStr); @@ -199,12 +192,12 @@ public void testMobFileCompactionAfterSnapshotCloneAndFlush() throws Interrupted admin.snapshot(test.getMethodName(), table); admin.cloneSnapshot(test.getMethodName(), clone); assertEquals("Should have 3 hlinks per region in MOB area from snapshot clone", 3 * numRegions, - getNumberOfMobFiles(clone, famStr)); + getNumberOfMobFiles(clone, famStr)); loadAndFlushThreeTimes(rows, clone, famStr); mobCompact(admin.getDescriptor(clone), familyDescriptor); assertEquals("Should have 7 MOB file per region due to clone + 3xflush + compact", - 7 * numRegions, getNumberOfMobFiles(clone, famStr)); - cleanupAndVerifyCounts(clone, famStr, 6*rows); + 7 * numRegions, getNumberOfMobFiles(clone, famStr)); + cleanupAndVerifyCounts(clone, famStr, 6 * rows); LOG.info("MOB compaction of cloned snapshot w flush, " + description() + " finished OK"); } @@ -215,8 +208,8 @@ protected void loadAndFlushThreeTimes(int rows, TableName table, String family) loadData(table, rows); loadData(table, rows); loadData(table, rows); - assertEquals("Should have 3 more mob files per region from flushing.", start + numRegions * 3, - getNumberOfMobFiles(table, family)); + assertEquals("Should have 3 more mob files per region from flushing.", start + numRegions * 3, + getNumberOfMobFiles(table, family)); } protected String description() { @@ -225,20 +218,19 @@ protected String description() { protected void enableCompactions() throws IOException { final List serverList = admin.getRegionServers().stream().map(sn -> sn.getServerName()) - .collect(Collectors.toList()); + .collect(Collectors.toList()); admin.compactionSwitch(true, serverList); } protected void disableCompactions() throws IOException { final List serverList = admin.getRegionServers().stream().map(sn -> sn.getServerName()) - .collect(Collectors.toList()); + .collect(Collectors.toList()); admin.compactionSwitch(false, serverList); } /** - * compact the given table and return once it is done. - * should presume compactions are disabled when called. - * should ensure compactions are disabled before returning. + * compact the given table and return once it is done. should presume compactions are disabled + * when called. should ensure compactions are disabled before returning. */ protected void mobCompact(TableDescriptor tableDescriptor, ColumnFamilyDescriptor familyDescriptor) throws IOException, InterruptedException { @@ -250,9 +242,8 @@ protected void mobCompact(TableDescriptor tableDescriptor, } /** - * Call the API for compaction specific to the test set. - * should not wait for compactions to finish. - * may assume compactions are enabled when called. + * Call the API for compaction specific to the test set. should not wait for compactions to + * finish. may assume compactions are enabled when called. */ protected void mobCompactImpl(TableDescriptor tableDescriptor, ColumnFamilyDescriptor familyDescriptor) throws IOException, InterruptedException { @@ -282,7 +273,7 @@ protected void cleanupAndVerifyCounts(TableName table, String family, int rows) cleanerChore.cleanupObsoleteMobFiles(conf, table); assertEquals("After cleaning, we should have 1 MOB file per region based on size.", numRegions, - getNumberOfMobFiles(table, family)); + getNumberOfMobFiles(table, family)); LOG.debug("checking count of rows"); long scanned = scanTable(table); @@ -290,8 +281,7 @@ protected void cleanupAndVerifyCounts(TableName table, String family, int rows) } - protected long getNumberOfMobFiles(TableName tableName, String family) - throws IOException { + protected long getNumberOfMobFiles(TableName tableName, String family) throws IOException { FileSystem fs = FileSystem.get(conf); Path dir = MobUtils.getMobFamilyPath(conf, tableName, family); FileStatus[] stat = fs.listStatus(dir); @@ -303,10 +293,9 @@ protected long getNumberOfMobFiles(TableName tableName, String family) return stat.length; } - protected long scanTable(TableName tableName) { try (final Table table = HTU.getConnection().getTable(tableName); - final ResultScanner scanner = table.getScanner(fam)) { + final ResultScanner scanner = table.getScanner(fam)) { Result result; long counter = 0; while ((result = scanner.next()) != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java index 81376e440b4e..411f45930492 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java @@ -47,10 +47,10 @@ public class TestMobDataBlockEncoding { HBaseClassTestRule.forClass(TestMobDataBlockEncoding.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private final static byte [] row1 = Bytes.toBytes("row1"); - private final static byte [] family = Bytes.toBytes("family"); - private final static byte [] qf1 = Bytes.toBytes("qualifier1"); - private final static byte [] qf2 = Bytes.toBytes("qualifier2"); + private final static byte[] row1 = Bytes.toBytes("row1"); + private final static byte[] family = Bytes.toBytes("family"); + private final static byte[] qf1 = Bytes.toBytes("qualifier1"); + private final static byte[] qf2 = Bytes.toBytes("qualifier2"); protected final byte[] qf3 = Bytes.toBytes("qualifier3"); private static Table table; private static Admin admin; @@ -68,21 +68,19 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - public void setUp(long threshold, String TN, DataBlockEncoding encoding) - throws Exception { + public void setUp(long threshold, String TN, DataBlockEncoding encoding) throws Exception { columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(family).setMobEnabled(true) - .setMobThreshold(threshold).setMaxVersions(4).setDataBlockEncoding(encoding).build(); + .setMobThreshold(threshold).setMaxVersions(4).setDataBlockEncoding(encoding).build(); tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(TN)) - .setColumnFamily(columnFamilyDescriptor).build(); + .setColumnFamily(columnFamilyDescriptor).build(); admin = TEST_UTIL.getAdmin(); admin.createTable(tableDescriptor); table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()) - .getTable(TableName.valueOf(TN)); + .getTable(TableName.valueOf(TN)); } /** * Generate the mob value. - * * @param size the size of the value * @return the mob value generated */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java index e7e6d7889838..7f55b23ff244 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ public class TestMobFile { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private Configuration conf = TEST_UTIL.getConfiguration(); - private CacheConfig cacheConf = new CacheConfig(conf); + private CacheConfig cacheConf = new CacheConfig(conf); @Rule public TestName testName = new TestName(); @@ -111,11 +111,9 @@ public void testReadKeyValue() throws Exception { public void testGetScanner() throws Exception { Path testDir = TEST_UTIL.getDataTestDir(); FileSystem fs = testDir.getFileSystem(conf); - HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build(); - StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withOutputDir(testDir) - .withFileContext(meta) - .build(); + HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir) + .withFileContext(meta).build(); MobTestUtil.writeStoreFile(writer, testName.getMethodName()); MobFile mobFile = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java index 80aa5f405ce0..f78a879337b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -88,32 +88,23 @@ public void setUp() throws Exception { conf = UTIL.getConfiguration(); conf.set(MobConstants.MOB_FILE_CACHE_SIZE_KEY, TEST_CACHE_SIZE); TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(UTIL.createTableDescriptor( - TableName.valueOf("testMobFileCache"), ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, - 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED)); - ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY1)) - .setMobEnabled(true) - .setMobThreshold(0) - .build(); + TableDescriptorBuilder.newBuilder(UTIL.createTableDescriptor( + TableName.valueOf("testMobFileCache"), ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, + 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED)); + ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder + .newBuilder(Bytes.toBytes(FAMILY1)).setMobEnabled(true).setMobThreshold(0).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY2)) - .setMobEnabled(true) - .setMobThreshold(0) - .build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY2)) + .setMobEnabled(true).setMobThreshold(0).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY3)) - .setMobEnabled(true) - .setMobThreshold(0) - .build(); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY3)) + .setMobEnabled(true).setMobThreshold(0).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); mobFileCache = new MobFileCache(conf); - region = HBaseTestingUtil - .createRegionAndWAL(regionInfo, UTIL.getDataTestDir(), conf, tableDescriptor, mobFileCache); + region = HBaseTestingUtil.createRegionAndWAL(regionInfo, UTIL.getDataTestDir(), conf, + tableDescriptor, mobFileCache); } @After @@ -133,11 +124,8 @@ private Path createMobStoreFile(String family) throws IOException { * Create the mob store file */ private Path createMobStoreFile(Configuration conf, String family) throws IOException { - ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(family)) - .setMaxVersions(4) - .setMobEnabled(true).build(); + ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder + .newBuilder(Bytes.toBytes(family)).setMaxVersions(4).setMobEnabled(true).build(); return createMobStoreFile(columnFamilyDescriptor); } @@ -148,8 +136,7 @@ private Path createMobStoreFile(ColumnFamilyDescriptor columnFamilyDescriptor) throws IOException { // Setting up a Store TableName tn = TableName.valueOf(TABLE); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tn); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tn); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); HMobStore mobStore = (HMobStore) region.getStore(columnFamilyDescriptor.getName()); KeyValue key1 = new KeyValue(ROW, columnFamilyDescriptor.getName(), QF1, 1, VALUE); @@ -158,9 +145,8 @@ private Path createMobStoreFile(ColumnFamilyDescriptor columnFamilyDescriptor) KeyValue[] keys = new KeyValue[] { key1, key2, key3 }; int maxKeyCount = keys.length; RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tn).build(); - StoreFileWriter mobWriter = mobStore.createWriterInTmp(currentDate, - maxKeyCount, columnFamilyDescriptor.getCompactionCompressionType(), - regionInfo.getStartKey(), false); + StoreFileWriter mobWriter = mobStore.createWriterInTmp(currentDate, maxKeyCount, + columnFamilyDescriptor.getCompactionCompressionType(), regionInfo.getStartKey(), false); Path mobFilePath = mobWriter.getPath(); String fileName = mobFilePath.getName(); mobWriter.append(key1); @@ -184,8 +170,7 @@ public void testMobFileCache() throws Exception { // Before open one file by the MobFileCache assertEquals(EXPECTED_CACHE_SIZE_ZERO, mobFileCache.getCacheSize()); // Open one file by the MobFileCache - CachedMobFile cachedMobFile1 = (CachedMobFile) mobFileCache.openFile( - fs, file1Path, cacheConf); + CachedMobFile cachedMobFile1 = (CachedMobFile) mobFileCache.openFile(fs, file1Path, cacheConf); assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize()); assertNotNull(cachedMobFile1); assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile1.getReferenceCount()); @@ -193,25 +178,22 @@ public void testMobFileCache() throws Exception { // The evict is also managed by a schedule thread pool. // And its check period is set as 3600 seconds by default. // This evict should get the lock at the most time - mobFileCache.evict(); // Cache not full, evict it + mobFileCache.evict(); // Cache not full, evict it assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize()); assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile1.getReferenceCount()); - mobFileCache.evictFile(file1Path.getName()); // Evict one file + mobFileCache.evictFile(file1Path.getName()); // Evict one file assertEquals(EXPECTED_CACHE_SIZE_ZERO, mobFileCache.getCacheSize()); assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile1.getReferenceCount()); - cachedMobFile1.close(); // Close the cached mob file + cachedMobFile1.close(); // Close the cached mob file // Reopen three cached file - cachedMobFile1 = (CachedMobFile) mobFileCache.openFile( - fs, file1Path, cacheConf); + cachedMobFile1 = (CachedMobFile) mobFileCache.openFile(fs, file1Path, cacheConf); assertEquals(EXPECTED_CACHE_SIZE_ONE, mobFileCache.getCacheSize()); - CachedMobFile cachedMobFile2 = (CachedMobFile) mobFileCache.openFile( - fs, file2Path, cacheConf); + CachedMobFile cachedMobFile2 = (CachedMobFile) mobFileCache.openFile(fs, file2Path, cacheConf); assertEquals(EXPECTED_CACHE_SIZE_TWO, mobFileCache.getCacheSize()); - CachedMobFile cachedMobFile3 = (CachedMobFile) mobFileCache.openFile( - fs, file3Path, cacheConf); + CachedMobFile cachedMobFile3 = (CachedMobFile) mobFileCache.openFile(fs, file3Path, cacheConf); // Before the evict // Evict the cache, should close the first file 1 assertEquals(EXPECTED_CACHE_SIZE_THREE, mobFileCache.getCacheSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCleanerChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCleanerChore.java index f70c2539af6b..90d08e6b2007 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCleanerChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCleanerChore.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.mob; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -50,14 +50,10 @@ import org.slf4j.LoggerFactory; /** - * Mob file cleaner chore test. - * 1. Creates MOB table - * 2. Load MOB data and flushes it N times - * 3. Runs major MOB compaction (N MOB files go to archive) - * 4. Verifies that number of MOB files in a mob directory is N+1 - * 5. Waits for a period of time larger than minimum age to archive - * 6. Runs Mob cleaner chore - * 7 Verifies that number of MOB files in a mob directory is 1. + * Mob file cleaner chore test. 1. Creates MOB table 2. Load MOB data and flushes it N times 3. Runs + * major MOB compaction (N MOB files go to archive) 4. Verifies that number of MOB files in a mob + * directory is N+1 5. Waits for a period of time larger than minimum age to archive 6. Runs Mob + * cleaner chore 7 Verifies that number of MOB files in a mob directory is 1. */ @Category(MediumTests.class) public class TestMobFileCleanerChore { @@ -97,9 +93,9 @@ public void setUp() throws Exception { admin = HTU.getAdmin(); chore = new MobFileCleanerChore(); familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(fam).setMobEnabled(true) - .setMobThreshold(mobLen).setMaxVersions(1).build(); + .setMobThreshold(mobLen).setMaxVersions(1).build(); tableDescriptor = HTU.createModifyableTableDescriptor("testMobCompactTable") - .setColumnFamily(familyDescriptor).build(); + .setColumnFamily(familyDescriptor).build(); table = HTU.createTable(tableDescriptor, null); } @@ -113,8 +109,8 @@ private void initConf() { conf.setInt("hbase.hstore.blockingStoreFiles", 150); conf.setInt("hbase.hstore.compaction.throughput.lower.bound", 52428800); conf.setInt("hbase.hstore.compaction.throughput.higher.bound", 2 * 52428800); - //conf.set(MobStoreEngine.DEFAULT_MOB_COMPACTOR_CLASS_KEY, - // FaultyMobStoreCompactor.class.getName()); + // conf.set(MobStoreEngine.DEFAULT_MOB_COMPACTOR_CLASS_KEY, + // FaultyMobStoreCompactor.class.getName()); // Disable automatic MOB compaction conf.setLong(MobConstants.MOB_COMPACTION_CHORE_PERIOD, 0); // Disable automatic MOB file cleaner chore @@ -122,7 +118,7 @@ private void initConf() { // Set minimum age to archive to 10 sec conf.setLong(MobConstants.MIN_AGE_TO_ARCHIVE_KEY, minAgeToArchive); // Set compacted file discharger interval to a half minAgeToArchive - conf.setLong("hbase.hfile.compaction.discharger.interval", minAgeToArchive/2); + conf.setLong("hbase.hfile.compaction.discharger.interval", minAgeToArchive / 2); } private void loadData(int start, int num) { @@ -180,7 +176,7 @@ public void testMobFileCleanerChore() throws InterruptedException, IOException { assertEquals(30, scanned); } - private long getNumberOfMobFiles(Configuration conf, TableName tableName, String family) + private long getNumberOfMobFiles(Configuration conf, TableName tableName, String family) throws IOException { FileSystem fs = FileSystem.get(conf); Path dir = MobUtils.getMobFamilyPath(conf, tableName, family); @@ -193,7 +189,6 @@ private long getNumberOfMobFiles(Configuration conf, TableName tableName, Strin return stat.length; } - private long scanTable() { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileLink.java index a60153e305e9..0c9b4c7de1cb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileLink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileLink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,12 +58,13 @@ public void testMobFilePath() throws IOException { String columnFamily = "columnFamily"; Path regionDir = new Path(tableDir, encodedRegionName); Path archivedRegionDir = new Path(archiveDir, encodedRegionName); - Path expectedMobFilePath = new Path(MobUtils.getMobFamilyPath(conf, tableName, columnFamily), - fileName).makeQualified(fs.getUri(), fs.getWorkingDirectory()); - Path expectedOriginPath = new Path(new Path(regionDir, columnFamily), fileName).makeQualified( - fs.getUri(), fs.getWorkingDirectory()); + Path expectedMobFilePath = + new Path(MobUtils.getMobFamilyPath(conf, tableName, columnFamily), fileName) + .makeQualified(fs.getUri(), fs.getWorkingDirectory()); + Path expectedOriginPath = new Path(new Path(regionDir, columnFamily), fileName) + .makeQualified(fs.getUri(), fs.getWorkingDirectory()); Path expectedArchivePath = new Path(new Path(archivedRegionDir, columnFamily), fileName) - .makeQualified(fs.getUri(), fs.getWorkingDirectory()); + .makeQualified(fs.getUri(), fs.getWorkingDirectory()); String hfileLinkName = tableName.getNameAsString() + "=" + encodedRegionName + "-" + fileName; Path hfileLinkPath = new Path(columnFamily, hfileLinkName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java index 426e2d49a314..ba592db87aec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.Date; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -59,9 +58,9 @@ public void setUp() { @Test public void testHashCode() { assertEquals(MobFileName.create(startKey, dateStr, uuid, regionName).hashCode(), - MobFileName.create(startKey, dateStr, uuid, regionName).hashCode()); + MobFileName.create(startKey, dateStr, uuid, regionName).hashCode()); assertNotSame(MobFileName.create(startKey, dateStr, uuid, regionName), - MobFileName.create(startKey, dateStr, uuid, regionName)); + MobFileName.create(startKey, dateStr, uuid, regionName)); } @Test @@ -75,8 +74,8 @@ public void testGet() { MobFileName mobFileName = MobFileName.create(startKey, dateStr, uuid, regionName); assertEquals(MD5Hash.getMD5AsHex(startKey, 0, startKey.length), mobFileName.getStartKey()); assertEquals(dateStr, mobFileName.getDate()); - assertEquals(mobFileName.getFileName(), MD5Hash.getMD5AsHex(startKey, 0, startKey.length) - + dateStr + uuid+"_"+regionName); + assertEquals(mobFileName.getFileName(), + MD5Hash.getMD5AsHex(startKey, 0, startKey.length) + dateStr + uuid + "_" + regionName); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java index e1a64aa2b1b3..a05740fbf8c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java @@ -31,7 +31,6 @@ import java.util.Map; import java.util.Optional; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -114,13 +113,13 @@ private void init(Configuration conf, long mobThreshold) throws Exception { compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(COLUMN_FAMILY).setMobEnabled(true) - .setMobThreshold(mobThreshold).setMaxVersions(1).build(); + .setMobThreshold(mobThreshold).setMaxVersions(1).build(); tableDescriptor = UTIL.createModifyableTableDescriptor(name.getMethodName()) - .modifyColumnFamily(familyDescriptor).build(); + .modifyColumnFamily(familyDescriptor).build(); RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); - region = HBaseTestingUtil.createRegionAndWAL(regionInfo, - UTIL.getDataTestDir(), conf, tableDescriptor, new MobFileCache(conf)); + region = HBaseTestingUtil.createRegionAndWAL(regionInfo, UTIL.getDataTestDir(), conf, + tableDescriptor, new MobFileCache(conf)); fs = FileSystem.get(conf); } @@ -176,23 +175,21 @@ public void testLargerValue() throws Exception { assertEquals("Before compaction: rows", compactionThreshold, UTIL.countRows(region)); assertEquals("Before compaction: mob rows", compactionThreshold, countMobRows()); assertEquals("Before compaction: number of mob cells", compactionThreshold, - countMobCellsInMetadata()); + countMobCellsInMetadata()); // Change the threshold larger than the data size setMobThreshold(region, COLUMN_FAMILY, 500); region.initialize(); List stores = region.getStores(); - for (HStore store: stores) { + for (HStore store : stores) { // Force major compaction store.triggerMajorCompaction(); - Optional context = - store.requestCompaction(HStore.PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, - User.getCurrent()); + Optional context = store.requestCompaction(HStore.PRIORITY_USER, + CompactionLifeCycleTracker.DUMMY, User.getCurrent()); if (!context.isPresent()) { continue; } - region.compact(context.get(), store, - NoLimitThroughputController.INSTANCE, User.getCurrent()); + region.compact(context.get(), store, NoLimitThroughputController.INSTANCE, User.getCurrent()); } assertEquals("After compaction: store files", 1, countStoreFiles()); @@ -204,21 +201,17 @@ public void testLargerValue() throws Exception { private static HRegion setMobThreshold(HRegion region, byte[] cfName, long modThreshold) { ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder - .newBuilder(region.getTableDescriptor().getColumnFamily(cfName)) - .setMobThreshold(modThreshold) - .build(); - TableDescriptor td = TableDescriptorBuilder - .newBuilder(region.getTableDescriptor()) - .removeColumnFamily(cfName) - .setColumnFamily(cfd) - .build(); + .newBuilder(region.getTableDescriptor().getColumnFamily(cfName)) + .setMobThreshold(modThreshold).build(); + TableDescriptor td = TableDescriptorBuilder.newBuilder(region.getTableDescriptor()) + .removeColumnFamily(cfName).setColumnFamily(cfd).build(); region.setTableDescriptor(td); return region; } /** - * This test will first generate store files, then bulk load them and trigger the compaction. - * When compaction, the cell value will be larger than the threshold. + * This test will first generate store files, then bulk load them and trigger the compaction. When + * compaction, the cell value will be larger than the threshold. */ @Test public void testMobCompactionWithBulkload() throws Exception { @@ -253,7 +246,7 @@ public void testMobCompactionWithBulkload() throws Exception { assertEquals("After compaction: mob rows", compactionThreshold, countMobRows()); assertEquals("After compaction: referenced mob file count", 1, countReferencedMobFiles()); assertEquals("After compaction: number of mob cells", compactionThreshold, - countMobCellsInMetadata()); + countMobCellsInMetadata()); } @Test @@ -407,7 +400,7 @@ private int countReferencedMobFiles() throws IOException { } files.add(fileName); Path familyPath = MobUtils.getMobFamilyPath(conf, tableDescriptor.getTableName(), - familyDescriptor.getNameAsString()); + familyDescriptor.getNameAsString()); assertTrue(fs.exists(new Path(familyPath, fileName))); } } while (hasMore); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreScanner.java index e9514350b640..b4c2a112fab2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreScanner.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -68,11 +67,11 @@ public class TestMobStoreScanner { HBaseClassTestRule.forClass(TestMobStoreScanner.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private final static byte [] row1 = Bytes.toBytes("row1"); - private final static byte [] row2 = Bytes.toBytes("row2"); - private final static byte [] family = Bytes.toBytes("family"); - private final static byte [] qf1 = Bytes.toBytes("qualifier1"); - private final static byte [] qf2 = Bytes.toBytes("qualifier2"); + private final static byte[] row1 = Bytes.toBytes("row1"); + private final static byte[] row2 = Bytes.toBytes("row2"); + private final static byte[] family = Bytes.toBytes("family"); + private final static byte[] qf1 = Bytes.toBytes("qualifier1"); + private final static byte[] qf2 = Bytes.toBytes("qualifier2"); protected final byte[] qf3 = Bytes.toBytes("qualifier3"); private static Table table; private static Admin admin; @@ -88,7 +87,7 @@ public class TestMobStoreScanner { @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt(ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY, - 100 * 1024 * 1024); + 100 * 1024 * 1024); TEST_UTIL.getConfiguration().setInt(HRegion.HBASE_MAX_CELL_SIZE_KEY, 100 * 1024 * 1024); TEST_UTIL.startMiniCluster(1); } @@ -102,9 +101,9 @@ public void setUp(long threshold, TableName tn) throws Exception { conf = TEST_UTIL.getConfiguration(); fs = FileSystem.get(conf); familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(family).setMobEnabled(true) - .setMobThreshold(threshold).setMaxVersions(4).build(); + .setMobThreshold(threshold).setMaxVersions(4).build(); tableDescriptor = - TableDescriptorBuilder.newBuilder(tn).setColumnFamily(familyDescriptor).build(); + TableDescriptorBuilder.newBuilder(tn).setColumnFamily(familyDescriptor).build(); admin = TEST_UTIL.getAdmin(); admin.createTable(tableDescriptor); table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()).getTable(tn); @@ -112,7 +111,6 @@ public void setUp(long threshold, TableName tn) throws Exception { /** * Generate the mob value. - * * @param size the size of the value * @return the mob value generated */ @@ -124,14 +122,13 @@ private static byte[] generateMobValue(int size) { /** * Set the scan attribute - * * @param reversed if true, scan will be backward order * @param mobScanRaw if true, scan will get the mob reference */ public void setScan(Scan scan, boolean reversed, boolean mobScanRaw) { scan.setReversed(reversed); scan.readVersions(4); - if(mobScanRaw) { + if (mobScanRaw) { scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE)); } } @@ -158,10 +155,10 @@ public void testReversedMobStoreScanner() throws Exception { public void testGetMassive() throws Exception { setUp(defaultThreshold, TableName.valueOf(name.getMethodName())); - // Put some data 5 10, 15, 20 mb ok (this would be right below protobuf + // Put some data 5 10, 15, 20 mb ok (this would be right below protobuf // default max size of 64MB. - // 25, 30, 40 fail. these is above protobuf max size of 64MB - byte[] bigValue = new byte[25*1024*1024]; + // 25, 30, 40 fail. these is above protobuf max size of 64MB + byte[] bigValue = new byte[25 * 1024 * 1024]; Put put = new Put(row1); Bytes.random(bigValue); @@ -246,7 +243,7 @@ public void testReadFromCorruptMobFiles() throws Exception { } private void createRecordAndCorruptMobFile(TableName tn, byte[] row, byte[] family, byte[] qf, - byte[] value) throws IOException { + byte[] value) throws IOException { Put put1 = new Put(row); put1.addColumn(family, qf, value); table.put(put1); @@ -261,7 +258,7 @@ private void createRecordAndCorruptMobFile(TableName tn, byte[] row, byte[] fami } private Path getFlushedMobFile(Configuration conf, FileSystem fs, TableName table, String family) - throws IOException { + throws IOException { Path famDir = MobUtils.getMobFamilyPath(conf, table, family); FileStatus[] hfFss = fs.listStatus(famDir); for (FileStatus hfs : hfFss) { @@ -282,13 +279,12 @@ private void testGetFromMemStore(boolean reversed) throws Exception { testGet(tn, reversed, false); } - private void testGet(TableName tableName, boolean reversed, boolean doFlush) - throws Exception { + private void testGet(TableName tableName, boolean reversed, boolean doFlush) throws Exception { setUp(defaultThreshold, tableName); long ts1 = EnvironmentEdgeManager.currentTime(); long ts2 = ts1 + 1; long ts3 = ts1 + 2; - byte [] value = generateMobValue((int)defaultThreshold+1); + byte[] value = generateMobValue((int) defaultThreshold + 1); Put put1 = new Put(row1); put1.addColumn(family, qf1, ts3, value); @@ -311,7 +307,7 @@ private void testGetReferences(boolean reversed) throws Exception { long ts1 = EnvironmentEdgeManager.currentTime(); long ts2 = ts1 + 1; long ts3 = ts1 + 2; - byte [] value = generateMobValue((int)defaultThreshold+1); + byte[] value = generateMobValue((int) defaultThreshold + 1); Put put1 = new Put(row1); put1.addColumn(family, qf1, ts3, value); @@ -328,7 +324,7 @@ private void testGetReferences(boolean reversed) throws Exception { int count = 0; for (Result res : results) { List cells = res.listCells(); - for(Cell cell : cells) { + for (Cell cell : cells) { // Verify the value assertIsMobReference(cell, row1, family, value, tn); count++; @@ -341,9 +337,9 @@ private void testGetReferences(boolean reversed) throws Exception { private void testMobThreshold(boolean reversed) throws Exception { TableName tn = TableName.valueOf("testMobThreshold" + reversed); setUp(defaultThreshold, tn); - byte [] valueLess = generateMobValue((int)defaultThreshold-1); - byte [] valueEqual = generateMobValue((int)defaultThreshold); - byte [] valueGreater = generateMobValue((int)defaultThreshold+1); + byte[] valueLess = generateMobValue((int) defaultThreshold - 1); + byte[] valueEqual = generateMobValue((int) defaultThreshold); + byte[] valueGreater = generateMobValue((int) defaultThreshold + 1); long ts1 = EnvironmentEdgeManager.currentTime(); long ts2 = ts1 + 1; long ts3 = ts1 + 2; @@ -359,23 +355,23 @@ private void testMobThreshold(boolean reversed) throws Exception { Scan scan = new Scan(); setScan(scan, reversed, true); - Cell cellLess= null; + Cell cellLess = null; Cell cellEqual = null; Cell cellGreater = null; ResultScanner results = table.getScanner(scan); int count = 0; for (Result res : results) { List cells = res.listCells(); - for(Cell cell : cells) { + for (Cell cell : cells) { // Verify the value String qf = Bytes.toString(CellUtil.cloneQualifier(cell)); - if(qf.equals(Bytes.toString(qf1))) { + if (qf.equals(Bytes.toString(qf1))) { cellLess = cell; } - if(qf.equals(Bytes.toString(qf2))) { + if (qf.equals(Bytes.toString(qf2))) { cellEqual = cell; } - if(qf.equals(Bytes.toString(qf3))) { + if (qf.equals(Bytes.toString(qf3))) { cellGreater = cell; } count++; @@ -394,7 +390,7 @@ private void testGetFromArchive(boolean reversed) throws Exception { long ts1 = EnvironmentEdgeManager.currentTime(); long ts2 = ts1 + 1; long ts3 = ts1 + 2; - byte [] value = generateMobValue((int)defaultThreshold+1); + byte[] value = generateMobValue((int) defaultThreshold + 1); // Put some data Put put1 = new Put(row1); put1.addColumn(family, qf1, ts3, value); @@ -406,8 +402,8 @@ private void testGetFromArchive(boolean reversed) throws Exception { // Get the files in the mob path Path mobFamilyPath; - mobFamilyPath = MobUtils.getMobFamilyPath( - TEST_UTIL.getConfiguration(), tn, familyDescriptor.getNameAsString()); + mobFamilyPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, + familyDescriptor.getNameAsString()); FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); FileStatus[] files = fs.listStatus(mobFamilyPath); @@ -416,12 +412,12 @@ private void testGetFromArchive(boolean reversed) throws Exception { Path tableDir = CommonFSUtils.getTableDir(rootDir, tn); RegionInfo regionInfo = MobUtils.getMobRegionInfo(tn); Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), - regionInfo, tableDir, family); + regionInfo, tableDir, family); // Move the files from mob path to archive path fs.mkdirs(storeArchiveDir); int fileCount = 0; - for(FileStatus file : files) { + for (FileStatus file : files) { fileCount++; Path filePath = file.getPath(); Path src = new Path(mobFamilyPath, filePath.getName()); @@ -444,8 +440,8 @@ private void testGetFromArchive(boolean reversed) throws Exception { /** * Assert the value is not store in mob. */ - private static void assertNotMobReference(Cell cell, byte[] row, byte[] family, - byte[] value) throws IOException { + private static void assertNotMobReference(Cell cell, byte[] row, byte[] family, byte[] value) + throws IOException { Assert.assertArrayEquals(row, CellUtil.cloneRow(cell)); Assert.assertArrayEquals(family, CellUtil.cloneFamily(cell)); Assert.assertArrayEquals(value, CellUtil.cloneValue(cell)); @@ -454,8 +450,8 @@ private static void assertNotMobReference(Cell cell, byte[] row, byte[] family, /** * Assert the value is store in mob. */ - private static void assertIsMobReference(Cell cell, byte[] row, byte[] family, - byte[] value, TableName tn) throws IOException { + private static void assertIsMobReference(Cell cell, byte[] row, byte[] family, byte[] value, + TableName tn) throws IOException { Assert.assertArrayEquals(row, CellUtil.cloneRow(cell)); Assert.assertArrayEquals(family, CellUtil.cloneFamily(cell)); Assert.assertFalse(Bytes.equals(value, CellUtil.cloneValue(cell))); @@ -463,8 +459,8 @@ private static void assertIsMobReference(Cell cell, byte[] row, byte[] family, String fileName = MobUtils.getMobFileName(cell); int valLen = Bytes.toInt(referenceValue, 0, Bytes.SIZEOF_INT); Assert.assertEquals(value.length, valLen); - Path mobFamilyPath = MobUtils.getMobFamilyPath( - TEST_UTIL.getConfiguration(), tn, familyDescriptor.getNameAsString()); + Path mobFamilyPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, + familyDescriptor.getNameAsString()); Path targetPath = new Path(mobFamilyPath, fileName); FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); Assert.assertTrue(fs.exists(targetPath)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobUtils.java index 9bb003cb95a9..a4718cb76ac4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +27,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSetMultimap; @@ -35,7 +36,7 @@ public class TestMobUtils { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMobUtils.class); + HBaseClassTestRule.forClass(TestMobUtils.class); public static final TableName TEST_TABLE_1 = TableName.valueOf("testTable1"); public static final TableName TEST_TABLE_2 = TableName.valueOf("testTable2"); public static final TableName TEST_TABLE_3 = TableName.valueOf("testTable3"); @@ -43,9 +44,7 @@ public class TestMobUtils { @Test public void serializeSingleMobFileRefs() { ImmutableSetMultimap mobRefSet = - ImmutableSetMultimap.builder() - .putAll(TEST_TABLE_1, "file1a") - .build(); + ImmutableSetMultimap. builder().putAll(TEST_TABLE_1, "file1a").build(); byte[] result = MobUtils.serializeMobFileRefs(mobRefSet); assertEquals("testTable1/file1a", Bytes.toString(result)); } @@ -53,11 +52,8 @@ public void serializeSingleMobFileRefs() { @Test public void serializeMultipleMobFileRefs() { ImmutableSetMultimap mobRefSet = - ImmutableSetMultimap.builder() - .putAll(TEST_TABLE_1, "file1a", "file1b") - .putAll(TEST_TABLE_2, "file2a") - .putAll(TEST_TABLE_3, "file3a", "file3b") - .build(); + ImmutableSetMultimap. builder().putAll(TEST_TABLE_1, "file1a", "file1b") + .putAll(TEST_TABLE_2, "file2a").putAll(TEST_TABLE_3, "file3a", "file3b").build(); byte[] result = MobUtils.serializeMobFileRefs(mobRefSet); assertEquals("testTable1/file1a,file1b//testTable2/file2a//testTable3/file3a,file3b", Bytes.toString(result)); @@ -66,7 +62,7 @@ public void serializeMultipleMobFileRefs() { @Test public void deserializeSingleMobFileRefs() { ImmutableSetMultimap mobRefSet = - MobUtils.deserializeMobFileRefs(Bytes.toBytes("testTable1/file1a")).build(); + MobUtils.deserializeMobFileRefs(Bytes.toBytes("testTable1/file1a")).build(); assertEquals(1, mobRefSet.size()); ImmutableSet testTable1Refs = mobRefSet.get(TEST_TABLE_1); assertEquals(1, testTable1Refs.size()); @@ -75,9 +71,10 @@ public void deserializeSingleMobFileRefs() { @Test public void deserializeMultipleMobFileRefs() { - ImmutableSetMultimap mobRefSet = - MobUtils.deserializeMobFileRefs(Bytes.toBytes( - "testTable1/file1a,file1b//testTable2/file2a//testTable3/file3a,file3b")).build(); + ImmutableSetMultimap mobRefSet = MobUtils + .deserializeMobFileRefs( + Bytes.toBytes("testTable1/file1a,file1b//testTable2/file2a//testTable3/file3a,file3b")) + .build(); assertEquals(5, mobRefSet.size()); ImmutableSet testTable1Refs = mobRefSet.get(TEST_TABLE_1); ImmutableSet testTable2Refs = mobRefSet.get(TEST_TABLE_2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobWithByteBuffAllocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobWithByteBuffAllocator.java index e80a2396c066..852ba541b6df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobWithByteBuffAllocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobWithByteBuffAllocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mob; import org.apache.hadoop.conf.Configuration; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestMemoryBoundedLogMessageBuffer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestMemoryBoundedLogMessageBuffer.java index 57c86e92eca0..30f30915fd38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestMemoryBoundedLogMessageBuffer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestMemoryBoundedLogMessageBuffer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,11 +30,10 @@ import org.junit.experimental.categories.Category; /** - * Test case for the MemoryBoundedLogMessageBuffer utility. - * Ensures that it uses no more memory than it's supposed to, - * and that it properly deals with multibyte encodings. + * Test case for the MemoryBoundedLogMessageBuffer utility. Ensures that it uses no more memory than + * it's supposed to, and that it properly deals with multibyte encodings. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestMemoryBoundedLogMessageBuffer { @ClassRule @@ -46,30 +45,24 @@ public class TestMemoryBoundedLogMessageBuffer { @Test public void testBuffer() { - MemoryBoundedLogMessageBuffer buf = - new MemoryBoundedLogMessageBuffer(TEN_KB); + MemoryBoundedLogMessageBuffer buf = new MemoryBoundedLogMessageBuffer(TEN_KB); for (int i = 0; i < 1000; i++) { buf.add("hello " + i); } - assertTrue("Usage too big: " + buf.estimateHeapUsage(), - buf.estimateHeapUsage() < TEN_KB); - assertTrue("Too many retained: " + buf.getMessages().size(), - buf.getMessages().size() < 100); + assertTrue("Usage too big: " + buf.estimateHeapUsage(), buf.estimateHeapUsage() < TEN_KB); + assertTrue("Too many retained: " + buf.getMessages().size(), buf.getMessages().size() < 100); StringWriter sw = new StringWriter(); buf.dumpTo(new PrintWriter(sw)); String dump = sw.toString(); String eol = System.getProperty("line.separator"); - assertFalse("The early log messages should be evicted", - dump.contains("hello 1" + eol)); - assertTrue("The late log messages should be retained", - dump.contains("hello 999" + eol)); + assertFalse("The early log messages should be evicted", dump.contains("hello 1" + eol)); + assertTrue("The late log messages should be retained", dump.contains("hello 999" + eol)); } @Test public void testNonAsciiEncoding() { - MemoryBoundedLogMessageBuffer buf = - new MemoryBoundedLogMessageBuffer(TEN_KB); + MemoryBoundedLogMessageBuffer buf = new MemoryBoundedLogMessageBuffer(TEN_KB); buf.add(JP_TEXT); StringWriter sw = new StringWriter(); @@ -79,4 +72,3 @@ public void testNonAsciiEncoding() { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestTaskMonitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestTaskMonitor.java index c9a9fc98069a..25a19a4492ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestTaskMonitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestTaskMonitor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -40,7 +41,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestTaskMonitor { private static final Logger LOG = LoggerFactory.getLogger(TestTaskMonitor.class); @@ -51,8 +52,7 @@ public class TestTaskMonitor { @Test public void testTaskMonitorBasics() { TaskMonitor tm = new TaskMonitor(new Configuration()); - assertTrue("Task monitor should start empty", - tm.getTasks().isEmpty()); + assertTrue("Task monitor should start empty", tm.getTasks().isEmpty()); // Make a task and fetch it back out MonitoredTask task = tm.createStatus("Test task"); @@ -80,8 +80,7 @@ public void testTaskMonitorBasics() { @Test public void testTasksGetAbortedOnLeak() throws InterruptedException { final TaskMonitor tm = new TaskMonitor(new Configuration()); - assertTrue("Task monitor should start empty", - tm.getTasks().isEmpty()); + assertTrue("Task monitor should start empty", tm.getTasks().isEmpty()); final AtomicBoolean threadSuccess = new AtomicBoolean(false); // Make a task in some other thread and leak it @@ -128,15 +127,15 @@ public void testTaskLimit() throws Exception { public void testDoNotPurgeRPCTask() throws Exception { int RPCTaskNums = 10; TaskMonitor tm = TaskMonitor.get(); - for(int i = 0; i < RPCTaskNums; i++) { + for (int i = 0; i < RPCTaskNums; i++) { tm.createRPCStatus("PRCTask" + i); } - for(int i = 0; i < TaskMonitor.DEFAULT_MAX_TASKS; i++) { + for (int i = 0; i < TaskMonitor.DEFAULT_MAX_TASKS; i++) { tm.createStatus("otherTask" + i); } int remainRPCTask = 0; - for(MonitoredTask task: tm.getTasks()) { - if(task instanceof MonitoredRPCHandler) { + for (MonitoredTask task : tm.getTasks()) { + if (task instanceof MonitoredRPCHandler) { remainRPCTask++; } } @@ -187,11 +186,11 @@ public void testGetTasksWithFilter() throws Exception { Mutation m = new Put(row); Query q = new Scan(); String notOperation = "for test"; - rpcHandlers.get(0).setRPC("operations", new Object[]{ m, q }, 3000); - rpcHandlers.get(1).setRPC("operations", new Object[]{ m, q }, 3000); - rpcHandlers.get(2).setRPC("operations", new Object[]{ m, q }, 3000); - rpcHandlers.get(3).setRPC("operations", new Object[]{ notOperation }, 3000); - rpcHandlers.get(4).setRPC("operations", new Object[]{ m, q }, 3000); + rpcHandlers.get(0).setRPC("operations", new Object[] { m, q }, 3000); + rpcHandlers.get(1).setRPC("operations", new Object[] { m, q }, 3000); + rpcHandlers.get(2).setRPC("operations", new Object[] { m, q }, 3000); + rpcHandlers.get(3).setRPC("operations", new Object[] { notOperation }, 3000); + rpcHandlers.get(4).setRPC("operations", new Object[] { m, q }, 3000); MonitoredRPCHandler completed = rpcHandlers.get(4); completed.markComplete("Completed!"); // Test get tasks with filter @@ -238,7 +237,7 @@ public void testClone() throws Exception { MonitoredRPCHandlerImpl monitor = new MonitoredRPCHandlerImpl(); monitor.abort("abort RPC"); TestParam testParam = new TestParam("param1"); - monitor.setRPC("method1", new Object[]{ testParam }, 0); + monitor.setRPC("method1", new Object[] { testParam }, 0); MonitoredRPCHandlerImpl clone = monitor.clone(); assertEquals(clone.getDescription(), monitor.getDescription()); assertEquals(clone.getState(), monitor.getState()); @@ -265,10 +264,9 @@ public void testClone() throws Exception { String.valueOf(((Map) clone.toMap().get("rpcCall")).get("params"))); monitor.resume("resume"); - monitor.setRPC("method2", new Object[]{new TestParam("param2")}, 1); + monitor.setRPC("method2", new Object[] { new TestParam("param2") }, 1); assertNotEquals(((Map) clone.toMap().get("rpcCall")).get("params"), - ((Map) monitor.toMap().get("rpcCall")).get( - "params")); + ((Map) monitor.toMap().get("rpcCall")).get("params")); LOG.info(String.valueOf(clone.toMap())); LOG.info(String.valueOf(monitor.toMap())); assertNotEquals(clone.toString(), monitor.toString()); @@ -294,4 +292,3 @@ public String toString() { } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java index 58fe93803a64..1e1f3eb4ba02 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestNamedQueueRecorder.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.namequeues; import java.io.IOException; @@ -29,7 +27,6 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -64,12 +61,12 @@ /** * Tests for Online SlowLog Provider Service */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestNamedQueueRecorder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamedQueueRecorder.class); + HBaseClassTestRule.forClass(TestNamedQueueRecorder.class); private static final Logger LOG = LoggerFactory.getLogger(TestNamedQueueRecorder.class); @@ -87,9 +84,8 @@ private static Configuration applySlowLogRecorderConf(int eventSize) { } /** - * confirm that for a ringbuffer of slow logs, payload on given index of buffer - * has expected elements - * + * confirm that for a ringbuffer of slow logs, payload on given index of buffer has expected + * elements * @param i index of ringbuffer logs * @param j data value that was put on index i * @param slowLogPayloads list of payload retrieved from {@link NamedQueueRecorder} @@ -103,15 +99,15 @@ private boolean confirmPayloadParams(int i, int j, List slowLogP } @Test - public void testOnlieSlowLogConsumption() throws Exception{ + public void testOnlieSlowLogConsumption() throws Exception { Configuration conf = applySlowLogRecorderConf(8); Constructor constructor = - NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); + NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); constructor.setAccessible(true); namedQueueRecorder = constructor.newInstance(conf); AdminProtos.SlowLogResponseRequest request = - AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(15).build(); + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(15).build(); namedQueueRecorder.clearNamedQueue(NamedQueuePayload.NamedQueueEvent.SLOW_LOG); Assert.assertEquals(getSlowLogPayloads(request).size(), 0); @@ -122,7 +118,7 @@ public void testOnlieSlowLogConsumption() throws Exception{ // add 5 records initially for (; i < 5; i++) { RpcLogDetails rpcLogDetails = - getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } @@ -138,97 +134,81 @@ public void testOnlieSlowLogConsumption() throws Exception{ // add 2 more records for (; i < 7; i++) { RpcLogDetails rpcLogDetails = - getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> getSlowLogPayloads(request).size() == 7)); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(request).size() == 7)); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> { - List slowLogPayloadsList = getSlowLogPayloads(request); - return slowLogPayloadsList.size() == 7 - && confirmPayloadParams(0, 7, slowLogPayloadsList) + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + List slowLogPayloadsList = getSlowLogPayloads(request); + return slowLogPayloadsList.size() == 7 && confirmPayloadParams(0, 7, slowLogPayloadsList) && confirmPayloadParams(5, 2, slowLogPayloadsList) && confirmPayloadParams(6, 1, slowLogPayloadsList); - }) - ); + })); // add 3 more records for (; i < 10; i++) { RpcLogDetails rpcLogDetails = - getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> getSlowLogPayloads(request).size() == 8)); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(request).size() == 8)); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> { - List slowLogPayloadsList = getSlowLogPayloads(request); - // confirm ringbuffer is full - return slowLogPayloadsList.size() == 8 - && confirmPayloadParams(7, 3, slowLogPayloadsList) + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + List slowLogPayloadsList = getSlowLogPayloads(request); + // confirm ringbuffer is full + return slowLogPayloadsList.size() == 8 && confirmPayloadParams(7, 3, slowLogPayloadsList) && confirmPayloadParams(0, 10, slowLogPayloadsList) && confirmPayloadParams(1, 9, slowLogPayloadsList); - }) - ); + })); // add 4 more records for (; i < 14; i++) { RpcLogDetails rpcLogDetails = - getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> getSlowLogPayloads(request).size() == 8)); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(request).size() == 8)); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> { - List slowLogPayloadsList = getSlowLogPayloads(request); - // confirm ringbuffer is full - // and ordered events - return slowLogPayloadsList.size() == 8 - && confirmPayloadParams(0, 14, slowLogPayloadsList) + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + List slowLogPayloadsList = getSlowLogPayloads(request); + // confirm ringbuffer is full + // and ordered events + return slowLogPayloadsList.size() == 8 && confirmPayloadParams(0, 14, slowLogPayloadsList) && confirmPayloadParams(1, 13, slowLogPayloadsList) && confirmPayloadParams(2, 12, slowLogPayloadsList) && confirmPayloadParams(3, 11, slowLogPayloadsList); - }) - ); + })); AdminProtos.SlowLogResponseRequest largeLogRequest = - AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(15) - .setLogType(AdminProtos.SlowLogResponseRequest.LogType.LARGE_LOG) - .build(); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> { - List slowLogPayloadsList = getSlowLogPayloads(largeLogRequest); - // confirm ringbuffer is full - // and ordered events - return slowLogPayloadsList.size() == 8 - && confirmPayloadParams(0, 14, slowLogPayloadsList) + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(15) + .setLogType(AdminProtos.SlowLogResponseRequest.LogType.LARGE_LOG).build(); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + List slowLogPayloadsList = getSlowLogPayloads(largeLogRequest); + // confirm ringbuffer is full + // and ordered events + return slowLogPayloadsList.size() == 8 && confirmPayloadParams(0, 14, slowLogPayloadsList) && confirmPayloadParams(1, 13, slowLogPayloadsList) && confirmPayloadParams(2, 12, slowLogPayloadsList) && confirmPayloadParams(3, 11, slowLogPayloadsList); - }) - ); + })); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> { - boolean isRingBufferCleaned = namedQueueRecorder.clearNamedQueue( - NamedQueuePayload.NamedQueueEvent.SLOW_LOG); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + boolean isRingBufferCleaned = + namedQueueRecorder.clearNamedQueue(NamedQueuePayload.NamedQueueEvent.SLOW_LOG); - LOG.debug("cleared the ringbuffer of Online Slow Log records"); + LOG.debug("cleared the ringbuffer of Online Slow Log records"); - List slowLogPayloadsList = getSlowLogPayloads(request); - // confirm ringbuffer is empty - return slowLogPayloadsList.size() == 0 && isRingBufferCleaned; - }) - ); + List slowLogPayloadsList = getSlowLogPayloads(request); + // confirm ringbuffer is empty + return slowLogPayloadsList.size() == 0 && isRingBufferCleaned; + })); } @@ -237,9 +217,9 @@ private List getSlowLogPayloads(AdminProtos.SlowLogResponseReque namedQueueGetRequest.setNamedQueueEvent(RpcLogDetails.SLOW_LOG_EVENT); namedQueueGetRequest.setSlowLogResponseRequest(request); NamedQueueGetResponse namedQueueGetResponse = - namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); - return namedQueueGetResponse == null ? - Collections.emptyList() : namedQueueGetResponse.getSlowLogPayloads(); + namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); + return namedQueueGetResponse == null ? Collections.emptyList() + : namedQueueGetResponse.getSlowLogPayloads(); } @Test @@ -247,32 +227,30 @@ public void testOnlineSlowLogWithHighRecords() throws Exception { Configuration conf = applySlowLogRecorderConf(14); Constructor constructor = - NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); + NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); constructor.setAccessible(true); namedQueueRecorder = constructor.newInstance(conf); AdminProtos.SlowLogResponseRequest request = - AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(14 * 11).build(); + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(14 * 11).build(); Assert.assertEquals(getSlowLogPayloads(request).size(), 0); LOG.debug("Initially ringbuffer of Slow Log records is empty"); for (int i = 0; i < 14 * 11; i++) { RpcLogDetails rpcLogDetails = - getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } LOG.debug("Added 14 * 11 records, ringbuffer should only provide latest 14 records"); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> getSlowLogPayloads(request).size() == 14)); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(request).size() == 14)); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> { - List slowLogPayloads = getSlowLogPayloads(request); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + List slowLogPayloads = getSlowLogPayloads(request); - // confirm strict order of slow log payloads - return slowLogPayloads.size() == 14 - && confirmPayloadParams(0, 154, slowLogPayloads) + // confirm strict order of slow log payloads + return slowLogPayloads.size() == 14 && confirmPayloadParams(0, 154, slowLogPayloads) && confirmPayloadParams(1, 153, slowLogPayloads) && confirmPayloadParams(2, 152, slowLogPayloads) && confirmPayloadParams(3, 151, slowLogPayloads) @@ -286,11 +264,10 @@ && confirmPayloadParams(10, 144, slowLogPayloads) && confirmPayloadParams(11, 143, slowLogPayloads) && confirmPayloadParams(12, 142, slowLogPayloads) && confirmPayloadParams(13, 141, slowLogPayloads); - }) - ); + })); - boolean isRingBufferCleaned = namedQueueRecorder.clearNamedQueue( - NamedQueuePayload.NamedQueueEvent.SLOW_LOG); + boolean isRingBufferCleaned = + namedQueueRecorder.clearNamedQueue(NamedQueuePayload.NamedQueueEvent.SLOW_LOG); Assert.assertTrue(isRingBufferCleaned); LOG.debug("cleared the ringbuffer of Online Slow Log records"); List slowLogPayloads = getSlowLogPayloads(request); @@ -305,24 +282,22 @@ public void testOnlineSlowLogWithDefaultDisableConfig() throws Exception { conf.unset(HConstants.SLOW_LOG_BUFFER_ENABLED_KEY); Constructor constructor = - NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); + NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); constructor.setAccessible(true); namedQueueRecorder = constructor.newInstance(conf); AdminProtos.SlowLogResponseRequest request = - AdminProtos.SlowLogResponseRequest.newBuilder().build(); + AdminProtos.SlowLogResponseRequest.newBuilder().build(); Assert.assertEquals(getSlowLogPayloads(request).size(), 0); LOG.debug("Initially ringbuffer of Slow Log records is empty"); for (int i = 0; i < 300; i++) { RpcLogDetails rpcLogDetails = - getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> { - List slowLogPayloads = getSlowLogPayloads(request); - return slowLogPayloads.size() == 0; - }) - ); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + List slowLogPayloads = getSlowLogPayloads(request); + return slowLogPayloads.size() == 0; + })); } @@ -331,25 +306,23 @@ public void testOnlineSlowLogWithDisableConfig() throws Exception { Configuration conf = HBASE_TESTING_UTILITY.getConfiguration(); conf.setBoolean(HConstants.SLOW_LOG_BUFFER_ENABLED_KEY, false); Constructor constructor = - NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); + NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); constructor.setAccessible(true); namedQueueRecorder = constructor.newInstance(conf); AdminProtos.SlowLogResponseRequest request = - AdminProtos.SlowLogResponseRequest.newBuilder().build(); + AdminProtos.SlowLogResponseRequest.newBuilder().build(); Assert.assertEquals(getSlowLogPayloads(request).size(), 0); LOG.debug("Initially ringbuffer of Slow Log records is empty"); for (int i = 0; i < 300; i++) { RpcLogDetails rpcLogDetails = - getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> { - List slowLogPayloads = getSlowLogPayloads(request); - return slowLogPayloads.size() == 0; - }) - ); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + List slowLogPayloads = getSlowLogPayloads(request); + return slowLogPayloads.size() == 0; + })); conf.setBoolean(HConstants.SLOW_LOG_BUFFER_ENABLED_KEY, true); } @@ -358,14 +331,11 @@ public void testSlowLogFilters() throws Exception { Configuration conf = applySlowLogRecorderConf(30); Constructor constructor = - NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); + NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); constructor.setAccessible(true); namedQueueRecorder = constructor.newInstance(conf); - AdminProtos.SlowLogResponseRequest request = - AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(15) - .setUserName("userName_87") - .build(); + AdminProtos.SlowLogResponseRequest request = AdminProtos.SlowLogResponseRequest.newBuilder() + .setLimit(15).setUserName("userName_87").build(); Assert.assertEquals(getSlowLogPayloads(request).size(), 0); @@ -373,28 +343,23 @@ public void testSlowLogFilters() throws Exception { for (int i = 0; i < 100; i++) { RpcLogDetails rpcLogDetails = - getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } LOG.debug("Added 100 records, ringbuffer should only 1 record with matching filter"); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> getSlowLogPayloads(request).size() == 1)); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(request).size() == 1)); - AdminProtos.SlowLogResponseRequest requestClient = - AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(15) - .setClientAddress("client_85") - .build(); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> getSlowLogPayloads(requestClient).size() == 1)); + AdminProtos.SlowLogResponseRequest requestClient = AdminProtos.SlowLogResponseRequest + .newBuilder().setLimit(15).setClientAddress("client_85").build(); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(requestClient).size() == 1)); AdminProtos.SlowLogResponseRequest requestSlowLog = - AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(15) - .build(); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> getSlowLogPayloads(requestSlowLog).size() == 15)); + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(15).build(); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(requestSlowLog).size() == 15)); } @Test @@ -402,16 +367,14 @@ public void testConcurrentSlowLogEvents() throws Exception { Configuration conf = applySlowLogRecorderConf(50000); Constructor constructor = - NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); + NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); constructor.setAccessible(true); namedQueueRecorder = constructor.newInstance(conf); AdminProtos.SlowLogResponseRequest request = - AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(500000).build(); + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(500000).build(); AdminProtos.SlowLogResponseRequest largeLogRequest = - AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(500000) - .setLogType(AdminProtos.SlowLogResponseRequest.LogType.LARGE_LOG) - .build(); + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(500000) + .setLogType(AdminProtos.SlowLogResponseRequest.LogType.LARGE_LOG).build(); Assert.assertEquals(getSlowLogPayloads(request).size(), 0); LOG.debug("Initially ringbuffer of Slow Log records is empty"); @@ -420,7 +383,7 @@ public void testConcurrentSlowLogEvents() throws Exception { CompletableFuture.runAsync(() -> { for (int i = 0; i < 3500; i++) { RpcLogDetails rpcLogDetails = - getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } }); @@ -429,22 +392,22 @@ public void testConcurrentSlowLogEvents() throws Exception { Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor( - 5000, () -> getSlowLogPayloads(request).size() > 10000)); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor( - 5000, () -> getSlowLogPayloads(largeLogRequest).size() > 10000)); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(5000, () -> getSlowLogPayloads(request).size() > 10000)); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(5000, + () -> getSlowLogPayloads(largeLogRequest).size() > 10000)); } @Test public void testSlowLargeLogEvents() throws Exception { Configuration conf = applySlowLogRecorderConf(28); Constructor constructor = - NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); + NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); constructor.setAccessible(true); namedQueueRecorder = constructor.newInstance(conf); AdminProtos.SlowLogResponseRequest request = - AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(14 * 11).build(); + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(14 * 11).build(); Assert.assertEquals(getSlowLogPayloads(request).size(), 0); LOG.debug("Initially ringbuffer of Slow Log records is empty"); @@ -459,23 +422,20 @@ public void testSlowLargeLogEvents() throws Exception { isSlowLog = false; isLargeLog = true; } - RpcLogDetails rpcLogDetails = - getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1), - isSlowLog, isLargeLog); + RpcLogDetails rpcLogDetails = getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), + "class_" + (i + 1), isSlowLog, isLargeLog); namedQueueRecorder.addRecord(rpcLogDetails); } LOG.debug("Added 14 * 11 records, ringbuffer should only provide latest 14 records"); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> getSlowLogPayloads(request).size() == 14)); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(request).size() == 14)); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> { - List slowLogPayloads = getSlowLogPayloads(request); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + List slowLogPayloads = getSlowLogPayloads(request); - // confirm strict order of slow log payloads - return slowLogPayloads.size() == 14 - && confirmPayloadParams(0, 153, slowLogPayloads) + // confirm strict order of slow log payloads + return slowLogPayloads.size() == 14 && confirmPayloadParams(0, 153, slowLogPayloads) && confirmPayloadParams(1, 151, slowLogPayloads) && confirmPayloadParams(2, 149, slowLogPayloads) && confirmPayloadParams(3, 147, slowLogPayloads) @@ -489,25 +449,20 @@ && confirmPayloadParams(10, 133, slowLogPayloads) && confirmPayloadParams(11, 131, slowLogPayloads) && confirmPayloadParams(12, 129, slowLogPayloads) && confirmPayloadParams(13, 127, slowLogPayloads); - }) - ); + })); AdminProtos.SlowLogResponseRequest largeLogRequest = - AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(14 * 11) - .setLogType(AdminProtos.SlowLogResponseRequest.LogType.LARGE_LOG) - .build(); + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(14 * 11) + .setLogType(AdminProtos.SlowLogResponseRequest.LogType.LARGE_LOG).build(); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> getSlowLogPayloads(largeLogRequest).size() == 14)); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(largeLogRequest).size() == 14)); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> { - List largeLogPayloads = getSlowLogPayloads(largeLogRequest); + Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> { + List largeLogPayloads = getSlowLogPayloads(largeLogRequest); - // confirm strict order of slow log payloads - return largeLogPayloads.size() == 14 - && confirmPayloadParams(0, 154, largeLogPayloads) + // confirm strict order of slow log payloads + return largeLogPayloads.size() == 14 && confirmPayloadParams(0, 154, largeLogPayloads) && confirmPayloadParams(1, 152, largeLogPayloads) && confirmPayloadParams(2, 150, largeLogPayloads) && confirmPayloadParams(3, 148, largeLogPayloads) @@ -521,8 +476,7 @@ && confirmPayloadParams(10, 134, largeLogPayloads) && confirmPayloadParams(11, 132, largeLogPayloads) && confirmPayloadParams(12, 130, largeLogPayloads) && confirmPayloadParams(13, 128, largeLogPayloads); - }) - ); + })); } @Test @@ -530,67 +484,46 @@ public void testSlowLogMixedFilters() throws Exception { Configuration conf = applySlowLogRecorderConf(30); Constructor constructor = - NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); + NamedQueueRecorder.class.getDeclaredConstructor(Configuration.class); constructor.setAccessible(true); namedQueueRecorder = constructor.newInstance(conf); - AdminProtos.SlowLogResponseRequest request = - AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(15) - .setUserName("userName_87") - .setClientAddress("client_88") - .build(); + AdminProtos.SlowLogResponseRequest request = AdminProtos.SlowLogResponseRequest.newBuilder() + .setLimit(15).setUserName("userName_87").setClientAddress("client_88").build(); Assert.assertEquals(getSlowLogPayloads(request).size(), 0); for (int i = 0; i < 100; i++) { RpcLogDetails rpcLogDetails = - getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> getSlowLogPayloads(request).size() == 2)); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(request).size() == 2)); AdminProtos.SlowLogResponseRequest request2 = AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(15) - .setUserName("userName_1") - .setClientAddress("client_2") - .build(); + .setLimit(15).setUserName("userName_1").setClientAddress("client_2").build(); Assert.assertEquals(0, getSlowLogPayloads(request2).size()); - AdminProtos.SlowLogResponseRequest request3 = - AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(15) - .setUserName("userName_87") - .setClientAddress("client_88") - .setFilterByOperator(AdminProtos.SlowLogResponseRequest.FilterByOperator.AND) - .build(); + AdminProtos.SlowLogResponseRequest request3 = AdminProtos.SlowLogResponseRequest.newBuilder() + .setLimit(15).setUserName("userName_87").setClientAddress("client_88") + .setFilterByOperator(AdminProtos.SlowLogResponseRequest.FilterByOperator.AND).build(); Assert.assertEquals(0, getSlowLogPayloads(request3).size()); - AdminProtos.SlowLogResponseRequest request4 = - AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(15) - .setUserName("userName_87") - .setClientAddress("client_87") - .setFilterByOperator(AdminProtos.SlowLogResponseRequest.FilterByOperator.AND) - .build(); + AdminProtos.SlowLogResponseRequest request4 = AdminProtos.SlowLogResponseRequest.newBuilder() + .setLimit(15).setUserName("userName_87").setClientAddress("client_87") + .setFilterByOperator(AdminProtos.SlowLogResponseRequest.FilterByOperator.AND).build(); Assert.assertEquals(1, getSlowLogPayloads(request4).size()); - AdminProtos.SlowLogResponseRequest request5 = - AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(15) - .setUserName("userName_88") - .setClientAddress("client_89") - .setFilterByOperator(AdminProtos.SlowLogResponseRequest.FilterByOperator.OR) - .build(); + AdminProtos.SlowLogResponseRequest request5 = AdminProtos.SlowLogResponseRequest.newBuilder() + .setLimit(15).setUserName("userName_88").setClientAddress("client_89") + .setFilterByOperator(AdminProtos.SlowLogResponseRequest.FilterByOperator.OR).build(); Assert.assertEquals(2, getSlowLogPayloads(request5).size()); AdminProtos.SlowLogResponseRequest requestSlowLog = - AdminProtos.SlowLogResponseRequest.newBuilder() - .setLimit(15) - .build(); - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, - () -> getSlowLogPayloads(requestSlowLog).size() == 15)); + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(15).build(); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(requestSlowLog).size() == 15)); } static RpcLogDetails getRpcLogDetails(String userName, String clientAddress, String className) { @@ -598,11 +531,11 @@ static RpcLogDetails getRpcLogDetails(String userName, String clientAddress, Str return new RpcLogDetails(rpcCall, rpcCall.getParam(), clientAddress, 0, className, true, true); } - private RpcLogDetails getRpcLogDetails(String userName, String clientAddress, - String className, boolean isSlowLog, boolean isLargeLog) { + private RpcLogDetails getRpcLogDetails(String userName, String clientAddress, String className, + boolean isSlowLog, boolean isLargeLog) { RpcCall rpcCall = getRpcCall(userName); return new RpcLogDetails(rpcCall, rpcCall.getParam(), clientAddress, 0, className, isSlowLog, - isLargeLog); + isLargeLog); } private static RpcCall getRpcCall(String userName) { @@ -673,8 +606,8 @@ public int getRemotePort() { } @Override - public void setResponse(Message param, CellScanner cells, - Throwable errorThrowable, String error) { + public void setResponse(Message param, CellScanner cells, Throwable errorThrowable, + String error) { } @Override @@ -763,34 +696,32 @@ private static Message getMessage() { switch (i) { case 0: { - message = ClientProtos.ScanRequest.newBuilder() - .setRegion(HBaseProtos.RegionSpecifier.newBuilder() - .setValue(ByteString.copyFromUtf8("region1")) - .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) - .build()) - .build(); + message = + ClientProtos.ScanRequest.newBuilder() + .setRegion(HBaseProtos.RegionSpecifier.newBuilder() + .setValue(ByteString.copyFromUtf8("region1")) + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME).build()) + .build(); break; } case 1: { - message = ClientProtos.MutateRequest.newBuilder() - .setRegion(HBaseProtos.RegionSpecifier.newBuilder() - .setValue(ByteString.copyFromUtf8("region2")) - .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)) - .setMutation(ClientProtos.MutationProto.newBuilder() - .setRow(ByteString.copyFromUtf8("row123")) - .build()) - .build(); + message = + ClientProtos.MutateRequest.newBuilder() + .setRegion(HBaseProtos.RegionSpecifier.newBuilder() + .setValue(ByteString.copyFromUtf8("region2")) + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)) + .setMutation(ClientProtos.MutationProto.newBuilder() + .setRow(ByteString.copyFromUtf8("row123")).build()) + .build(); break; } case 2: { message = ClientProtos.GetRequest.newBuilder() - .setRegion(HBaseProtos.RegionSpecifier.newBuilder() - .setValue(ByteString.copyFromUtf8("region2")) - .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)) - .setGet(ClientProtos.Get.newBuilder() - .setRow(ByteString.copyFromUtf8("row123")) - .build()) - .build(); + .setRegion( + HBaseProtos.RegionSpecifier.newBuilder().setValue(ByteString.copyFromUtf8("region2")) + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)) + .setGet(ClientProtos.Get.newBuilder().setRow(ByteString.copyFromUtf8("row123")).build()) + .build(); break; } default: diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestSlowLogAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestSlowLogAccessor.java index c94d8e45154c..ef7045c5f504 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestSlowLogAccessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestSlowLogAccessor.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.namequeues; import java.io.IOException; @@ -61,7 +59,7 @@ public class TestSlowLogAccessor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSlowLogAccessor.class); + HBaseClassTestRule.forClass(TestSlowLogAccessor.class); private static final Logger LOG = LoggerFactory.getLogger(TestNamedQueueRecorder.class); @@ -95,13 +93,13 @@ public void setUp() throws Exception { this.namedQueueRecorder = hRegionServer.getNamedQueueRecorder(); } - private List getSlowLogPayloads( - AdminProtos.SlowLogResponseRequest request) { + private List + getSlowLogPayloads(AdminProtos.SlowLogResponseRequest request) { NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest(); namedQueueGetRequest.setNamedQueueEvent(RpcLogDetails.SLOW_LOG_EVENT); namedQueueGetRequest.setSlowLogResponseRequest(request); NamedQueueGetResponse namedQueueGetResponse = - namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); + namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); return namedQueueGetResponse.getSlowLogPayloads(); } @@ -109,7 +107,7 @@ private List getSlowLogPayloads( public void testSlowLogRecords() throws Exception { AdminProtos.SlowLogResponseRequest request = - AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(15).build(); + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(15).build(); namedQueueRecorder.clearNamedQueue(NamedQueuePayload.NamedQueueEvent.SLOW_LOG); Assert.assertEquals(getSlowLogPayloads(request).size(), 0); @@ -119,34 +117,34 @@ public void testSlowLogRecords() throws Exception { Connection connection = waitForSlowLogTableCreation(); // add 5 records initially for (; i < 5; i++) { - RpcLogDetails rpcLogDetails = TestNamedQueueRecorder - .getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + RpcLogDetails rpcLogDetails = TestNamedQueueRecorder.getRpcLogDetails("userName_" + (i + 1), + "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } // add 2 more records for (; i < 7; i++) { - RpcLogDetails rpcLogDetails = TestNamedQueueRecorder - .getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + RpcLogDetails rpcLogDetails = TestNamedQueueRecorder.getRpcLogDetails("userName_" + (i + 1), + "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } // add 3 more records for (; i < 10; i++) { - RpcLogDetails rpcLogDetails = TestNamedQueueRecorder - .getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + RpcLogDetails rpcLogDetails = TestNamedQueueRecorder.getRpcLogDetails("userName_" + (i + 1), + "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } // add 4 more records for (; i < 14; i++) { - RpcLogDetails rpcLogDetails = TestNamedQueueRecorder - .getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + RpcLogDetails rpcLogDetails = TestNamedQueueRecorder.getRpcLogDetails("userName_" + (i + 1), + "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } - Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY - .waitFor(3000, () -> getSlowLogPayloads(request).size() == 14)); + Assert.assertNotEquals(-1, + HBASE_TESTING_UTILITY.waitFor(3000, () -> getSlowLogPayloads(request).size() == 14)); Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(3000, () -> getTableCount(connection) == 14)); @@ -169,7 +167,7 @@ private Connection waitForSlowLogTableCreation() throws IOException { Assert.assertNotEquals(-1, HBASE_TESTING_UTILITY.waitFor(2000, () -> { try { return HBASE_TESTING_UTILITY.getAdmin() - .tableExists(SlowLogTableAccessor.SLOW_LOG_TABLE_NAME); + .tableExists(SlowLogTableAccessor.SLOW_LOG_TABLE_NAME); } catch (IOException e) { return false; } @@ -183,7 +181,7 @@ public void testHigherSlowLogs() throws Exception { namedQueueRecorder.clearNamedQueue(NamedQueuePayload.NamedQueueEvent.SLOW_LOG); AdminProtos.SlowLogResponseRequest request = - AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(500000).build(); + AdminProtos.SlowLogResponseRequest.newBuilder().setLimit(500000).build(); Assert.assertEquals(getSlowLogPayloads(request).size(), 0); for (int j = 0; j < 100; j++) { @@ -193,7 +191,7 @@ public void testHigherSlowLogs() throws Exception { Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS); } RpcLogDetails rpcLogDetails = TestNamedQueueRecorder - .getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); + .getRpcLogDetails("userName_" + (i + 1), "client_" + (i + 1), "class_" + (i + 1)); namedQueueRecorder.addRecord(rpcLogDetails); } }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java index 928b54676c27..2bc6b168dbd9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -108,9 +108,8 @@ public class TestNamespaceAuditor { public static void before() throws Exception { Configuration conf = UTIL.getConfiguration(); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, CustomObserver.class.getName()); - conf.setStrings( - CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - MasterSyncObserver.class.getName(), CPMasterObserver.class.getName()); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MasterSyncObserver.class.getName(), + CPMasterObserver.class.getName()); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); conf.setClass("hbase.coprocessor.regionserver.classes", CPRegionServerObserver.class, @@ -137,8 +136,8 @@ public void cleanup() throws Exception, KeeperException { ADMIN.deleteNamespace(ns.getName()); } } - assertTrue("Quota manager not initialized", UTIL.getHBaseCluster().getMaster() - .getMasterQuotaManager().isQuotaInitialized()); + assertTrue("Quota manager not initialized", + UTIL.getHBaseCluster().getMaster().getMasterQuotaManager().isQuotaInitialized()); } @Test @@ -151,16 +150,16 @@ public void testTableOperations() throws Exception { assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp)); assertEquals(3, ADMIN.listNamespaceDescriptors().length); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); TableDescriptorBuilder tableDescOne = TableDescriptorBuilder - .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1")); + .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1")); tableDescOne.setColumnFamily(columnFamilyDescriptor); TableDescriptorBuilder tableDescTwo = TableDescriptorBuilder - .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2")); + .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2")); tableDescTwo.setColumnFamily(columnFamilyDescriptor); TableDescriptorBuilder tableDescThree = TableDescriptorBuilder - .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table3")); + .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table3")); tableDescThree.setColumnFamily(columnFamilyDescriptor); ADMIN.createTable(tableDescOne.build()); boolean constraintViolated = false; @@ -195,10 +194,9 @@ public void testValidQuotas() throws Exception { boolean exceptionCaught = false; FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - NamespaceDescriptor nspDesc = - NamespaceDescriptor.create(prefix + "vq1") - .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "hihdufh") - .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(prefix + "vq1") + .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "hihdufh") + .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); try { ADMIN.createNamespace(nspDesc); } catch (Exception exp) { @@ -208,10 +206,9 @@ public void testValidQuotas() throws Exception { assertTrue(exceptionCaught); assertFalse(fs.exists(CommonFSUtils.getNamespaceDir(rootDir, nspDesc.getName()))); } - nspDesc = - NamespaceDescriptor.create(prefix + "vq2") - .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "-456") - .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); + nspDesc = NamespaceDescriptor.create(prefix + "vq2") + .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "-456") + .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); try { ADMIN.createNamespace(nspDesc); } catch (Exception exp) { @@ -221,10 +218,9 @@ public void testValidQuotas() throws Exception { assertTrue(exceptionCaught); assertFalse(fs.exists(CommonFSUtils.getNamespaceDir(rootDir, nspDesc.getName()))); } - nspDesc = - NamespaceDescriptor.create(prefix + "vq3") - .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10") - .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "sciigd").build(); + nspDesc = NamespaceDescriptor.create(prefix + "vq3") + .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10") + .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "sciigd").build(); try { ADMIN.createNamespace(nspDesc); } catch (Exception exp) { @@ -234,10 +230,9 @@ public void testValidQuotas() throws Exception { assertTrue(exceptionCaught); assertFalse(fs.exists(CommonFSUtils.getNamespaceDir(rootDir, nspDesc.getName()))); } - nspDesc = - NamespaceDescriptor.create(prefix + "vq4") - .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10") - .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "-1500").build(); + nspDesc = NamespaceDescriptor.create(prefix + "vq4") + .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10") + .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "-1500").build(); try { ADMIN.createNamespace(nspDesc); } catch (Exception exp) { @@ -252,21 +247,20 @@ public void testValidQuotas() throws Exception { @Test public void testDeleteTable() throws Exception { String namespace = prefix + "_dummy"; - NamespaceDescriptor nspDesc = - NamespaceDescriptor.create(namespace) - .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "100") - .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "3").build(); + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(namespace) + .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "100") + .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "3").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(namespace)); NamespaceTableAndRegionInfo stateInfo = getNamespaceState(nspDesc.getName()); assertNotNull("Namespace state found null for " + namespace, stateInfo); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); TableDescriptorBuilder tableDescOne = TableDescriptorBuilder - .newBuilder(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table1")); + .newBuilder(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table1")); tableDescOne.setColumnFamily(columnFamilyDescriptor); TableDescriptorBuilder tableDescTwo = TableDescriptorBuilder - .newBuilder(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table2")); + .newBuilder(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table2")); tableDescTwo.setColumnFamily(columnFamilyDescriptor); ADMIN.createTable(tableDescOne.build()); ADMIN.createTable(tableDescTwo.build(), Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5); @@ -324,8 +318,8 @@ public Optional getMasterObserver() { @Override public synchronized void preMergeRegionsAction( - final ObserverContext ctx, - final RegionInfo[] regionsToMerge) throws IOException { + final ObserverContext ctx, final RegionInfo[] regionsToMerge) + throws IOException { notifyAll(); if (shouldFailMerge) { throw new IOException("fail merge"); @@ -337,15 +331,14 @@ public synchronized void preMergeRegionsAction( public void testRegionMerge() throws Exception { String nsp1 = prefix + "_regiontest"; final int initialRegions = 3; - NamespaceDescriptor nspDesc = - NamespaceDescriptor.create(nsp1) - .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "" + initialRegions) - .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1) + .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "" + initialRegions) + .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); ADMIN.createNamespace(nspDesc); final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2"); byte[] columnFamily = Bytes.toBytes("info"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableTwo) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnFamily)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnFamily)).build(); ADMIN.createTable(tableDescriptor, Bytes.toBytes("0"), Bytes.toBytes("9"), initialRegions); Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); try (Table table = connection.getTable(tableTwo)) { @@ -355,10 +348,8 @@ public void testRegionMerge() throws Exception { List hris = ADMIN.getRegions(tableTwo); assertEquals(initialRegions, hris.size()); Collections.sort(hris, RegionInfo.COMPARATOR); - Future f = ADMIN.mergeRegionsAsync( - hris.get(0).getEncodedNameAsBytes(), - hris.get(1).getEncodedNameAsBytes(), - false); + Future f = ADMIN.mergeRegionsAsync(hris.get(0).getEncodedNameAsBytes(), + hris.get(1).getEncodedNameAsBytes(), false); f.get(10, TimeUnit.SECONDS); hris = ADMIN.getRegions(tableTwo); @@ -366,7 +357,7 @@ public void testRegionMerge() throws Exception { Collections.sort(hris, RegionInfo.COMPARATOR); byte[] splitKey = Bytes.toBytes("3"); HRegion regionToSplit = UTIL.getMiniHBaseCluster().getRegions(tableTwo).stream() - .filter(r -> r.getRegionInfo().containsRow(splitKey)).findFirst().get(); + .filter(r -> r.getRegionInfo().containsRow(splitKey)).findFirst().get(); regionToSplit.compact(true); // Waiting for compaction to finish UTIL.waitFor(30000, new Waiter.Predicate() { @@ -401,10 +392,8 @@ public boolean evaluate() throws Exception { CPMasterObserver masterObserver = (CPMasterObserver) coprocessor; masterObserver.failMerge(true); - f = ADMIN.mergeRegionsAsync( - hris.get(1).getEncodedNameAsBytes(), - hris.get(2).getEncodedNameAsBytes(), - false); + f = ADMIN.mergeRegionsAsync(hris.get(1).getEncodedNameAsBytes(), + hris.get(2).getEncodedNameAsBytes(), false); try { f.get(10, TimeUnit.SECONDS); fail("Merge was supposed to fail!"); @@ -433,15 +422,14 @@ public boolean evaluate() throws Exception { @Test public void testRecreateTableWithSameNameAfterFirstTimeFailure() throws Exception { String nsp1 = prefix + "_testRecreateTable"; - NamespaceDescriptor nspDesc = - NamespaceDescriptor.create(nsp1) - .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "20") - .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "1").build(); + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1) + .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "20") + .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "1").build(); ADMIN.createNamespace(nspDesc); final TableName tableOne = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table1"); byte[] columnFamily = Bytes.toBytes("info"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableOne) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnFamily)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnFamily)).build(); MasterSyncObserver.throwExceptionInPreCreateTableAction = true; try { try { @@ -454,7 +442,8 @@ public void testRecreateTableWithSameNameAfterFirstTimeFailure() throws Exceptio NamespaceTableAndRegionInfo nstate = getNamespaceState(nsp1); assertEquals("First table creation failed in namespace so number of tables in namespace " - + "should be 0.", 0, nstate.getTables().size()); + + "should be 0.", + 0, nstate.getTables().size()); MasterSyncObserver.throwExceptionInPreCreateTableAction = false; try { @@ -465,8 +454,9 @@ public void testRecreateTableWithSameNameAfterFirstTimeFailure() throws Exceptio } assertTrue(ADMIN.tableExists(tableOne)); nstate = getNamespaceState(nsp1); - assertEquals("First table was created successfully so table size in namespace should " - + "be one now.", 1, nstate.getTables().size()); + assertEquals( + "First table was created successfully so table size in namespace should " + "be one now.", + 1, nstate.getTables().size()); } finally { MasterSyncObserver.throwExceptionInPreCreateTableAction = false; if (ADMIN.tableExists(tableOne)) { @@ -477,8 +467,8 @@ public void testRecreateTableWithSameNameAfterFirstTimeFailure() throws Exceptio } } - private NamespaceTableAndRegionInfo getNamespaceState(String namespace) throws KeeperException, - IOException { + private NamespaceTableAndRegionInfo getNamespaceState(String namespace) + throws KeeperException, IOException { return getQuotaManager().getState(namespace); } @@ -514,15 +504,12 @@ public void testStatePreserve() throws Exception { TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2"); TableName tableThree = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table3"); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); - TableDescriptorBuilder tableDescOne = TableDescriptorBuilder - .newBuilder(tableOne); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); + TableDescriptorBuilder tableDescOne = TableDescriptorBuilder.newBuilder(tableOne); tableDescOne.setColumnFamily(columnFamilyDescriptor); - TableDescriptorBuilder tableDescTwo = TableDescriptorBuilder - .newBuilder(tableTwo); + TableDescriptorBuilder tableDescTwo = TableDescriptorBuilder.newBuilder(tableTwo); tableDescTwo.setColumnFamily(columnFamilyDescriptor); - TableDescriptorBuilder tableDescThree = TableDescriptorBuilder - .newBuilder(tableThree); + TableDescriptorBuilder tableDescThree = TableDescriptorBuilder.newBuilder(tableThree); tableDescThree.setColumnFamily(columnFamilyDescriptor); ADMIN.createTable(tableDescOne.build(), Bytes.toBytes("1"), Bytes.toBytes("1000"), 3); @@ -540,8 +527,8 @@ public boolean evaluate() throws Exception { NamespaceTableAndRegionInfo before = getNamespaceState(nsp1); killActiveMaster(); NamespaceTableAndRegionInfo after = getNamespaceState(nsp1); - assertEquals("Expected: " + before.getTables() + " Found: " + after.getTables(), before - .getTables().size(), after.getTables().size()); + assertEquals("Expected: " + before.getTables() + " Found: " + after.getTables(), + before.getTables().size(), after.getTables().size()); } public static void waitForQuotaInitialize(final HBaseTestingUtil util) throws Exception { @@ -565,8 +552,7 @@ private void killActiveMaster() throws Exception { } private NamespaceAuditor getQuotaManager() { - return UTIL.getHBaseCluster().getMaster() - .getMasterQuotaManager().getNamespaceQuotaManager(); + return UTIL.getHBaseCluster().getMaster().getMasterQuotaManager().getNamespaceQuotaManager(); } public static class MasterSyncObserver implements MasterCoprocessor, MasterObserver { @@ -586,8 +572,8 @@ public void preDeleteTable(ObserverContext ctx, @Override public void postCompletedDeleteTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException { + final ObserverContext ctx, final TableName tableName) + throws IOException { tableDeletionLatch.countDown(); } @@ -603,8 +589,8 @@ public void preCreateTableAction(ObserverContext c private void deleteTable(final TableName tableName) throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncObserver observer = UTIL.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class); + MasterSyncObserver observer = UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost() + .findCoprocessor(MasterSyncObserver.class); ADMIN.deleteTable(tableName); observer.tableDeletionLatch.await(); } @@ -612,19 +598,18 @@ private void deleteTable(final TableName tableName) throws Exception { @Test(expected = QuotaExceededException.class) public void testExceedTableQuotaInNamespace() throws Exception { String nsp = prefix + "_testExceedTableQuotaInNamespace"; - NamespaceDescriptor nspDesc = - NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "1") - .build(); + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp) + .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "1").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp)); assertEquals(3, ADMIN.listNamespaceDescriptors().length); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); TableDescriptorBuilder tableDescOne = TableDescriptorBuilder - .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1")); + .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1")); tableDescOne.setColumnFamily(columnFamilyDescriptor); TableDescriptorBuilder tableDescTwo = TableDescriptorBuilder - .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2")); + .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2")); tableDescTwo.setColumnFamily(columnFamilyDescriptor); ADMIN.createTable(tableDescOne.build()); ADMIN.createTable(tableDescTwo.build(), Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4); @@ -633,17 +618,15 @@ public void testExceedTableQuotaInNamespace() throws Exception { @Test(expected = QuotaExceededException.class) public void testCloneSnapshotQuotaExceed() throws Exception { String nsp = prefix + "_testTableQuotaExceedWithCloneSnapshot"; - NamespaceDescriptor nspDesc = - NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "1") - .build(); + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp) + .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "1").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp)); TableName tableName = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"); TableName cloneTableName = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2"); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); - TableDescriptorBuilder tableDescOne = TableDescriptorBuilder - .newBuilder(tableName); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); + TableDescriptorBuilder tableDescOne = TableDescriptorBuilder.newBuilder(tableName); tableDescOne.setColumnFamily(columnFamilyDescriptor); ADMIN.createTable(tableDescOne.build()); String snapshot = "snapshot_testTableQuotaExceedWithCloneSnapshot"; @@ -664,9 +647,8 @@ public void testCloneSnapshot() throws Exception { TableName cloneTableName = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2"); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); - TableDescriptorBuilder tableDescOne = TableDescriptorBuilder - .newBuilder(tableName); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); + TableDescriptorBuilder tableDescOne = TableDescriptorBuilder.newBuilder(tableName); tableDescOne.setColumnFamily(columnFamilyDescriptor); ADMIN.createTable(tableDescOne.build(), Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4); @@ -695,16 +677,14 @@ public void testCloneSnapshot() throws Exception { @Test public void testRestoreSnapshot() throws Exception { String nsp = prefix + "_testRestoreSnapshot"; - NamespaceDescriptor nspDesc = - NamespaceDescriptor.create(nsp) - .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10").build(); + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp) + .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp)); TableName tableName1 = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); - TableDescriptorBuilder tableDescOne = TableDescriptorBuilder - .newBuilder(tableName1); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); + TableDescriptorBuilder tableDescOne = TableDescriptorBuilder.newBuilder(tableName1); tableDescOne.setColumnFamily(columnFamilyDescriptor); ADMIN.createTable(tableDescOne.build(), Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4); @@ -733,17 +713,15 @@ public void testRestoreSnapshot() throws Exception { @Test public void testRestoreSnapshotQuotaExceed() throws Exception { String nsp = prefix + "_testRestoreSnapshotQuotaExceed"; - NamespaceDescriptor nspDesc = - NamespaceDescriptor.create(nsp) - .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10").build(); + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp) + .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10").build(); ADMIN.createNamespace(nspDesc); NamespaceDescriptor ndesc = ADMIN.getNamespaceDescriptor(nsp); assertNotNull("Namespace descriptor found null.", ndesc); TableName tableName1 = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); - TableDescriptorBuilder tableDescOne = TableDescriptorBuilder - .newBuilder(tableName1); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("fam1")).build(); + TableDescriptorBuilder tableDescOne = TableDescriptorBuilder.newBuilder(tableName1); tableDescOne.setColumnFamily(columnFamilyDescriptor); ADMIN.createTable(tableDescOne.build(), Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java index e1d46ef92eef..cbf3150ea1bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,19 +22,19 @@ import java.util.HashMap; import java.util.List; import java.util.concurrent.ThreadPoolExecutor; - import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MetricsMaster; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; + public class SimpleMasterProcedureManager extends MasterProcedureManager { public static final String SIMPLE_SIGNATURE = "simple_test"; @@ -66,8 +66,8 @@ public void initialize(MasterServices master, MetricsMaster metricsMaster) // setup the default procedure coordinator String name = master.getServerName().toString(); ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, 1); - ProcedureCoordinatorRpcs comms = new ZKProcedureCoordinator( - master.getZooKeeper(), getProcedureSignature(), name); + ProcedureCoordinatorRpcs comms = + new ZKProcedureCoordinator(master.getZooKeeper(), getProcedureSignature(), name); this.coordinator = new ProcedureCoordinator(comms, tpool); } @@ -90,15 +90,14 @@ public byte[] execProcedureWithRet(ProcedureDescription desc) throws IOException } Procedure proc = coordinator.startProcedure(monitor, desc.getInstance(), new byte[0], servers); if (proc == null) { - String msg = "Failed to submit distributed procedure for '" - + getProcedureSignature() + "'"; + String msg = "Failed to submit distributed procedure for '" + getProcedureSignature() + "'"; LOG.error(msg); throw new IOException(msg); } HashMap returnData = null; try { - // wait for the procedure to complete. A timer thread is kicked off that should cancel this + // wait for the procedure to complete. A timer thread is kicked off that should cancel this // if it takes too long. returnData = proc.waitForCompletedWithRet(); LOG.info("Done waiting - exec procedure for " + desc.getInstance()); @@ -117,7 +116,8 @@ public byte[] execProcedureWithRet(ProcedureDescription desc) throws IOException @Override public void checkPermissions(ProcedureDescription desc, AccessChecker accessChecker, User user) - throws IOException {} + throws IOException { + } @Override public boolean isProcedureDone(ProcedureDescription desc) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java index 9ccee661586a..bbb268ed74cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,20 +28,20 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.hadoop.hbase.errorhandling.ForeignException; -import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + public class SimpleRSProcedureManager extends RegionServerProcedureManager { private static final Logger LOG = LoggerFactory.getLogger(SimpleRSProcedureManager.class); @@ -56,8 +56,7 @@ public void initialize(RegionServerServices rss) throws KeeperException { ZKWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, getProcedureSignature()); - ThreadPoolExecutor pool = - ProcedureMember.defaultPool(rss.getServerName().toString(), 1); + ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(), 1); this.member = new ProcedureMember(memberRpcs, pool, new SimleSubprocedureBuilder()); LOG.info("Initialized: " + rss.getServerName().toString()); } @@ -91,8 +90,8 @@ public Subprocedure buildSubprocedure(String name) { // don't run a procedure if the parent is stop(ping) if (rss.isStopping() || rss.isStopped()) { - throw new IllegalStateException("Can't start procedure on RS: " + rss.getServerName() - + ", because stopping/stopped!"); + throw new IllegalStateException( + "Can't start procedure on RS: " + rss.getServerName() + ", because stopping/stopped!"); } LOG.info("Attempting to run a procedure."); @@ -126,9 +125,9 @@ public static class SimpleSubprocedurePool implements Closeable, Abortable { public SimpleSubprocedurePool(String name, Configuration conf) { this.name = name; - executor = Executors.newSingleThreadExecutor( - new ThreadFactoryBuilder().setNameFormat("rs(" + name + ")-procedure-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + executor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder() + .setNameFormat("rs(" + name + ")-procedure-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); taskPool = new ExecutorCompletionService<>(executor); } @@ -142,7 +141,6 @@ public void submitTask(final Callable task) { /** * Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)} - * * @return true on success, false otherwise * @throws ForeignException */ @@ -150,13 +148,14 @@ public boolean waitForOutstandingTasks() throws ForeignException { LOG.debug("Waiting for procedure to finish."); try { - for (Future f: futures) { + for (Future f : futures) { f.get(); } return true; } catch (InterruptedException e) { - if (aborted) throw new ForeignException( - "Interrupted and found to be aborted while waiting for tasks!", e); + if (aborted) + throw new ForeignException("Interrupted and found to be aborted while waiting for tasks!", + e); Thread.currentThread().interrupt(); } catch (ExecutionException e) { if (e.getCause() instanceof ForeignException) { @@ -165,7 +164,7 @@ public boolean waitForOutstandingTasks() throws ForeignException { throw new ForeignException(name, e.getCause()); } finally { // close off remaining tasks - for (Future f: futures) { + for (Future f : futures) { if (!f.isDone()) { f.cancel(true); } @@ -211,12 +210,12 @@ public SimpleSubprocedure(RegionServerServices rss, ProcedureMember member, } /** - * Callable task. - * TODO. We don't need a thread pool to execute roll log. This can be simplified + * Callable task. TODO. We don't need a thread pool to execute roll log. This can be simplified * with no use of subprocedurepool. */ class RSSimpleTask implements Callable { - RSSimpleTask() {} + RSSimpleTask() { + } @Override public Void call() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestFailedProcCleanup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestFailedProcCleanup.java index e05c0c4f81a8..42cfb792e22b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestFailedProcCleanup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestFailedProcCleanup.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,6 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -140,9 +139,8 @@ public Optional getMasterObserver() { public static class CreateFailObserverHandler implements MasterCoprocessor, MasterObserver { @Override - public void preCreateTableAction( - final ObserverContext ctx, final TableDescriptor desc, - final RegionInfo[] regions) throws IOException { + public void preCreateTableAction(final ObserverContext ctx, + final TableDescriptor desc, final RegionInfo[] regions) throws IOException { if (desc.getTableName().equals(TABLE)) { throw new AccessDeniedException("Don't allow creation of table"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java index def375d12f54..296e3aa02b7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ /** * Demonstrate how Procedure handles single members, multiple members, and errors semantics */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedure { @ClassRule @@ -63,8 +63,7 @@ static class LatchedProcedure extends Procedure { CountDownLatch completedProcedure = new CountDownLatch(1); public LatchedProcedure(ProcedureCoordinator coord, ForeignExceptionDispatcher monitor, - long wakeFreq, long timeout, String opName, byte[] data, - List expectedMembers) { + long wakeFreq, long timeout, String opName, byte[] data, List expectedMembers) { super(coord, monitor, wakeFreq, timeout, opName, data, expectedMembers); } @@ -85,13 +84,13 @@ public void sendGlobalBarrierComplete() { } /** - * With a single member, verify ordered execution. The Coordinator side is run in a separate + * With a single member, verify ordered execution. The Coordinator side is run in a separate * thread so we can only trigger from members and wait for particular state latches. */ @Test public void testSingleMember() throws Exception { // The member - List members = new ArrayList<>(); + List members = new ArrayList<>(); members.add("member"); LatchedProcedure proc = new LatchedProcedure(coord, new ForeignExceptionDispatcher(), 100, Integer.MAX_VALUE, "op", null, members); @@ -136,7 +135,7 @@ public void run() { @Test public void testMultipleMember() throws Exception { // 2 members - List members = new ArrayList<>(); + List members = new ArrayList<>(); members.add("member1"); members.add("member2"); @@ -188,10 +187,10 @@ public void run() { @Test public void testErrorPropagation() throws Exception { - List members = new ArrayList<>(); + List members = new ArrayList<>(); members.add("member"); - Procedure proc = new Procedure(coord, new ForeignExceptionDispatcher(), 100, - Integer.MAX_VALUE, "op", null, members); + Procedure proc = new Procedure(coord, new ForeignExceptionDispatcher(), 100, Integer.MAX_VALUE, + "op", null, members); final Procedure procspy = spy(proc); ForeignException cause = new ForeignException("SRC", "External Exception"); @@ -214,7 +213,7 @@ public void run() { @Test public void testBarrieredErrorPropagation() throws Exception { - List members = new ArrayList<>(); + List members = new ArrayList<>(); members.add("member"); LatchedProcedure proc = new LatchedProcedure(coord, new ForeignExceptionDispatcher(), 100, Integer.MAX_VALUE, "op", null, members); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureCoordinator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureCoordinator.java index 6bc35d68a9d2..41311f0e2292 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureCoordinator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureCoordinator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ * This only works correctly when we do class level parallelization of tests. If we do method * level serialization this class will likely throw all kinds of errors. */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureCoordinator { @ClassRule @@ -98,21 +98,21 @@ private ProcedureCoordinator buildNewCoordinator() { } /** - * Currently we can only handle one procedure at a time. This makes sure we handle that and - * reject submitting more. + * Currently we can only handle one procedure at a time. This makes sure we handle that and reject + * submitting more. */ @Test public void testThreadPoolSize() throws Exception { ProcedureCoordinator coordinator = buildNewCoordinator(); - Procedure proc = new Procedure(coordinator, monitor, - WAKE_FREQUENCY, TIMEOUT, procName, procData, expected); + Procedure proc = + new Procedure(coordinator, monitor, WAKE_FREQUENCY, TIMEOUT, procName, procData, expected); Procedure procSpy = spy(proc); - Procedure proc2 = new Procedure(coordinator, monitor, - WAKE_FREQUENCY, TIMEOUT, procName +"2", procData, expected); + Procedure proc2 = new Procedure(coordinator, monitor, WAKE_FREQUENCY, TIMEOUT, procName + "2", + procData, expected); Procedure procSpy2 = spy(proc2); when(coordinator.createProcedure(any(), eq(procName), eq(procData), anyListOf(String.class))) - .thenReturn(procSpy, procSpy2); + .thenReturn(procSpy, procSpy2); coordinator.startProcedure(procSpy.getErrorMonitor(), procName, procData, expected); // null here means second procedure failed to start. @@ -128,8 +128,8 @@ public void testUnreachableControllerDuringPrepare() throws Exception { coordinator = buildNewCoordinator(); // setup the proc List expected = Arrays.asList("cohort"); - Procedure proc = new Procedure(coordinator, WAKE_FREQUENCY, - TIMEOUT, procName, procData, expected); + Procedure proc = + new Procedure(coordinator, WAKE_FREQUENCY, TIMEOUT, procName, procData, expected); final Procedure procSpy = spy(proc); when(coordinator.createProcedure(any(), eq(procName), eq(procData), anyListOf(String.class))) @@ -137,18 +137,18 @@ public void testUnreachableControllerDuringPrepare() throws Exception { // use the passed controller responses IOException cause = new IOException("Failed to reach comms during acquire"); - doThrow(cause).when(controller) - .sendGlobalBarrierAcquire(eq(procSpy), eq(procData), anyListOf(String.class)); + doThrow(cause).when(controller).sendGlobalBarrierAcquire(eq(procSpy), eq(procData), + anyListOf(String.class)); // run the operation proc = coordinator.startProcedure(proc.getErrorMonitor(), procName, procData, expected); // and wait for it to finish - while(!proc.completedLatch.await(WAKE_FREQUENCY, TimeUnit.MILLISECONDS)); + while (!proc.completedLatch.await(WAKE_FREQUENCY, TimeUnit.MILLISECONDS)) + ; verify(procSpy, atLeastOnce()).receive(any()); verify(coordinator, times(1)).rpcConnectionFailure(anyString(), eq(cause)); verify(controller, times(1)).sendGlobalBarrierAcquire(procSpy, procData, expected); - verify(controller, never()).sendGlobalBarrierReached(any(), - anyListOf(String.class)); + verify(controller, never()).sendGlobalBarrierReached(any(), anyListOf(String.class)); } /** @@ -160,28 +160,29 @@ public void testUnreachableControllerDuringCommit() throws Exception { // setup the task and spy on it List expected = Arrays.asList("cohort"); - final Procedure spy = spy(new Procedure(coordinator, - WAKE_FREQUENCY, TIMEOUT, procName, procData, expected)); + final Procedure spy = + spy(new Procedure(coordinator, WAKE_FREQUENCY, TIMEOUT, procName, procData, expected)); when(coordinator.createProcedure(any(), eq(procName), eq(procData), anyListOf(String.class))) - .thenReturn(spy); + .thenReturn(spy); // use the passed controller responses IOException cause = new IOException("Failed to reach controller during prepare"); - doAnswer(new AcquireBarrierAnswer(procName, new String[] { "cohort" })) - .when(controller).sendGlobalBarrierAcquire(eq(spy), eq(procData), anyListOf(String.class)); + doAnswer(new AcquireBarrierAnswer(procName, new String[] { "cohort" })).when(controller) + .sendGlobalBarrierAcquire(eq(spy), eq(procData), anyListOf(String.class)); doThrow(cause).when(controller).sendGlobalBarrierReached(eq(spy), anyListOf(String.class)); // run the operation - Procedure task = coordinator.startProcedure(spy.getErrorMonitor(), procName, procData, expected); + Procedure task = + coordinator.startProcedure(spy.getErrorMonitor(), procName, procData, expected); // and wait for it to finish - while(!task.completedLatch.await(WAKE_FREQUENCY, TimeUnit.MILLISECONDS)); + while (!task.completedLatch.await(WAKE_FREQUENCY, TimeUnit.MILLISECONDS)) + ; verify(spy, atLeastOnce()).receive(any()); verify(coordinator, times(1)).rpcConnectionFailure(anyString(), eq(cause)); - verify(controller, times(1)).sendGlobalBarrierAcquire(eq(spy), - eq(procData), anyListOf(String.class)); - verify(controller, times(1)).sendGlobalBarrierReached(any(), - anyListOf(String.class)); + verify(controller, times(1)).sendGlobalBarrierAcquire(eq(spy), eq(procData), + anyListOf(String.class)); + verify(controller, times(1)).sendGlobalBarrierReached(any(), anyListOf(String.class)); } @Test @@ -201,8 +202,8 @@ public void testMultipleCohortOrchestration() throws Exception { public void runSimpleProcedure(String... members) throws Exception { coordinator = buildNewCoordinator(); - Procedure task = new Procedure(coordinator, monitor, WAKE_FREQUENCY, - TIMEOUT, procName, procData, Arrays.asList(members)); + Procedure task = new Procedure(coordinator, monitor, WAKE_FREQUENCY, TIMEOUT, procName, + procData, Arrays.asList(members)); final Procedure spy = spy(task); runCoordinatedProcedure(spy, members); } @@ -215,8 +216,8 @@ public void testEarlyJoiningBarrier() throws Exception { final String[] cohort = new String[] { "one", "two", "three", "four" }; coordinator = buildNewCoordinator(); final ProcedureCoordinator ref = coordinator; - Procedure task = new Procedure(coordinator, monitor, WAKE_FREQUENCY, - TIMEOUT, procName, procData, Arrays.asList(cohort)); + Procedure task = new Procedure(coordinator, monitor, WAKE_FREQUENCY, TIMEOUT, procName, + procData, Arrays.asList(cohort)); final Procedure spy = spy(task); AcquireBarrierAnswer prepare = new AcquireBarrierAnswer(procName, cohort) { @@ -249,8 +250,7 @@ public void doWork() { /** * Just run a procedure with the standard name and data, with not special task for the mock * coordinator (it works just like a regular coordinator). For custom behavior see - * {@link #runCoordinatedOperation(Procedure, AcquireBarrierAnswer, BarrierAnswer, String[])} - * . + * {@link #runCoordinatedOperation(Procedure, AcquireBarrierAnswer, BarrierAnswer, String[])} . * @param spy Spy on a real {@link Procedure} * @param cohort expected cohort members * @throws Exception on failure @@ -260,13 +260,13 @@ public void runCoordinatedProcedure(Procedure spy, String... cohort) throws Exce new BarrierAnswer(procName, cohort), cohort); } - public void runCoordinatedOperation(Procedure spy, AcquireBarrierAnswer prepare, - String... cohort) throws Exception { + public void runCoordinatedOperation(Procedure spy, AcquireBarrierAnswer prepare, String... cohort) + throws Exception { runCoordinatedOperation(spy, prepare, new BarrierAnswer(procName, cohort), cohort); } - public void runCoordinatedOperation(Procedure spy, BarrierAnswer commit, - String... cohort) throws Exception { + public void runCoordinatedOperation(Procedure spy, BarrierAnswer commit, String... cohort) + throws Exception { runCoordinatedOperation(spy, new AcquireBarrierAnswer(procName, cohort), commit, cohort); } @@ -274,15 +274,16 @@ public void runCoordinatedOperation(Procedure spy, AcquireBarrierAnswer prepareO BarrierAnswer commitOperation, String... cohort) throws Exception { List expected = Arrays.asList(cohort); when(coordinator.createProcedure(any(), eq(procName), eq(procData), anyListOf(String.class))) - .thenReturn(spy); + .thenReturn(spy); // use the passed controller responses doAnswer(prepareOperation).when(controller).sendGlobalBarrierAcquire(spy, procData, expected); - doAnswer(commitOperation).when(controller) - .sendGlobalBarrierReached(eq(spy), anyListOf(String.class)); + doAnswer(commitOperation).when(controller).sendGlobalBarrierReached(eq(spy), + anyListOf(String.class)); // run the operation - Procedure task = coordinator.startProcedure(spy.getErrorMonitor(), procName, procData, expected); + Procedure task = + coordinator.startProcedure(spy.getErrorMonitor(), procName, procData, expected); // and wait for it to finish task.waitForCompleted(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureDescriber.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureDescriber.java index 836c54d31c2a..993c37a1a094 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureDescriber.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureDescriber.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.BytesValue; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureDescriber { @ClassRule @@ -45,14 +45,13 @@ public class TestProcedureDescriber { public static class TestProcedure extends Procedure { @Override - protected Procedure[] execute(Object env) throws ProcedureYieldException, - ProcedureSuspendedException, InterruptedException { + protected Procedure[] execute(Object env) + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { return null; } @Override - protected void rollback(Object env) - throws IOException, InterruptedException { + protected void rollback(Object env) throws IOException, InterruptedException { } @Override @@ -61,16 +60,14 @@ protected boolean abort(Object env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { ByteString byteString = ByteString.copyFrom(new byte[] { 'A' }); BytesValue state = BytesValue.newBuilder().setValue(byteString).build(); serializer.serialize(state); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } @@ -84,6 +81,7 @@ public void test() { assertEquals("{ ID => '-1', PARENT_ID => '-1', STATE => 'INITIALIZING', OWNER => '', " + "TYPE => 'org.apache.hadoop.hbase.procedure.TestProcedureDescriber$TestProcedure', " + "START_TIME => '" + epoch + "', LAST_UPDATE => '" + epoch + "', PARAMETERS => [ " - + "{ value => 'QQ==' } ] }", result); + + "{ value => 'QQ==' } ] }", + result); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureManager.java index 6aa4c51324b2..b2104a923e7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureManager { @ClassRule @@ -50,9 +50,9 @@ public static void setupBeforeClass() throws Exception { Configuration conf = util.getConfiguration(); conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, - SimpleMasterProcedureManager.class.getName()); + SimpleMasterProcedureManager.class.getName()); conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, - SimpleRSProcedureManager.class.getName()); + SimpleRSProcedureManager.class.getName()); util.startMiniCluster(NUM_RS); } @@ -67,8 +67,8 @@ public void testSimpleProcedureManager() throws IOException { Admin admin = util.getAdmin(); byte[] result = admin.execProcedureWithReturn(SimpleMasterProcedureManager.SIMPLE_SIGNATURE, - "mytest", new HashMap<>()); + "mytest", new HashMap<>()); assertArrayEquals("Incorrect return data from execProcedure", - Bytes.toBytes(SimpleMasterProcedureManager.SIMPLE_DATA), result); + Bytes.toBytes(SimpleMasterProcedureManager.SIMPLE_DATA), result); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java index 61146a6c7070..1e25c19bbdda 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ /** * Test the procedure member, and it's error handling mechanisms. */ -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureMember { @ClassRule @@ -64,11 +64,10 @@ public class TestProcedureMember { private final String op = "some op"; private final byte[] data = new byte[0]; - private final ForeignExceptionDispatcher mockListener = Mockito - .spy(new ForeignExceptionDispatcher()); + private final ForeignExceptionDispatcher mockListener = + Mockito.spy(new ForeignExceptionDispatcher()); private final SubprocedureFactory mockBuilder = mock(SubprocedureFactory.class); - private final ProcedureMemberRpcs mockMemberComms = Mockito - .mock(ProcedureMemberRpcs.class); + private final ProcedureMemberRpcs mockMemberComms = Mockito.mock(ProcedureMemberRpcs.class); private ProcedureMember member; private ForeignExceptionDispatcher dispatcher; Subprocedure spySub; @@ -79,12 +78,11 @@ public class TestProcedureMember { @After public void resetTest() { reset(mockListener, mockBuilder, mockMemberComms); - if (member != null) - try { - member.close(); - } catch (IOException e) { - e.printStackTrace(); - } + if (member != null) try { + member.close(); + } catch (IOException e) { + e.printStackTrace(); + } } /** @@ -105,14 +103,14 @@ private void buildCohortMemberPair() throws IOException { String name = "node"; ThreadPoolExecutor pool = ProcedureMember.defaultPool(name, 1, POOL_KEEP_ALIVE); member = new ProcedureMember(mockMemberComms, pool, mockBuilder); - when(mockMemberComms.getMemberName()).thenReturn("membername"); // needed for generating exception + when(mockMemberComms.getMemberName()).thenReturn("membername"); // needed for generating + // exception Subprocedure subproc = new EmptySubprocedure(member, dispatcher); spySub = spy(subproc); when(mockBuilder.buildSubprocedure(op, data)).thenReturn(spySub); addCommitAnswer(); } - /** * Add a 'in barrier phase' response to the mock controller when it gets a acquired notification */ @@ -152,8 +150,7 @@ public void testSimpleRun() throws Exception { order.verify(mockMemberComms).sendMemberAcquired(eq(spy)); order.verify(spy).insideBarrier(); order.verify(mockMemberComms).sendMemberCompleted(eq(spy), eq(data)); - order.verify(mockMemberComms, never()).sendMemberAborted(eq(spy), - any()); + order.verify(mockMemberComms, never()).sendMemberAborted(eq(spy), any()); } /** @@ -165,13 +162,12 @@ public void testMemberPrepareException() throws Exception { buildCohortMemberPair(); // mock an exception on Subprocedure's prepare - doAnswer( - new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - throw new IOException("Forced IOException in member acquireBarrier"); - } - }).when(spySub).acquireBarrier(); + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + throw new IOException("Forced IOException in member acquireBarrier"); + } + }).when(spySub).acquireBarrier(); // run the operation // build a new operation @@ -200,13 +196,12 @@ public void testSendMemberAcquiredCommsFailure() throws Exception { buildCohortMemberPair(); // mock an exception on Subprocedure's prepare - doAnswer( - new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - throw new IOException("Forced IOException in member prepare"); - } - }).when(mockMemberComms).sendMemberAcquired(any()); + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + throw new IOException("Forced IOException in member prepare"); + } + }).when(mockMemberComms).sendMemberAcquired(any()); // run the operation // build a new operation @@ -229,28 +224,27 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } /** - * Fail correctly if coordinator aborts the procedure. The subprocedure will not interrupt a - * running {@link Subprocedure#acquireBarrier()} -- prepare needs to finish first, and the the abort - * is checked. Thus, the {@link Subprocedure#acquireBarrier()} should succeed but later get rolled back - * via {@link Subprocedure#cleanup}. + * Fail correctly if coordinator aborts the procedure. The subprocedure will not interrupt a + * running {@link Subprocedure#acquireBarrier()} -- prepare needs to finish first, and the the + * abort is checked. Thus, the {@link Subprocedure#acquireBarrier()} should succeed but later get + * rolled back via {@link Subprocedure#cleanup}. */ @Test public void testCoordinatorAbort() throws Exception { buildCohortMemberPair(); // mock that another node timed out or failed to prepare - final TimeoutException oate = new TimeoutException("bogus timeout", 1,2,0); - doAnswer( - new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - // inject a remote error (this would have come from an external thread) - spySub.cancel("bogus message", oate); - // sleep the wake frequency since that is what we promised - Thread.sleep(WAKE_FREQUENCY); - return null; - } - }).when(spySub).waitForReachedGlobalBarrier(); + final TimeoutException oate = new TimeoutException("bogus timeout", 1, 2, 0); + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + // inject a remote error (this would have come from an external thread) + spySub.cancel("bogus message", oate); + // sleep the wake frequency since that is what we promised + Thread.sleep(WAKE_FREQUENCY); + return null; + } + }).when(spySub).waitForReachedGlobalBarrier(); // run the operation // build a new operation @@ -272,25 +266,22 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } /** - * Handle failures if a member's commit phase fails. - * - * NOTE: This is the core difference that makes this different from traditional 2PC. In true - * 2PC the transaction is committed just before the coordinator sends commit messages to the - * member. Members are then responsible for reading its TX log. This implementation actually - * rolls back, and thus breaks the normal TX guarantees. - */ + * Handle failures if a member's commit phase fails. NOTE: This is the core difference that makes + * this different from traditional 2PC. In true 2PC the transaction is committed just before the + * coordinator sends commit messages to the member. Members are then responsible for reading its + * TX log. This implementation actually rolls back, and thus breaks the normal TX guarantees. + */ @Test public void testMemberCommitException() throws Exception { buildCohortMemberPair(); // mock an exception on Subprocedure's prepare - doAnswer( - new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - throw new IOException("Forced IOException in member prepare"); - } - }).when(spySub).insideBarrier(); + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + throw new IOException("Forced IOException in member prepare"); + } + }).when(spySub).insideBarrier(); // run the operation // build a new operation @@ -313,28 +304,26 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } /** - * Handle Failures if a member's commit phase succeeds but notification to coordinator fails - * - * NOTE: This is the core difference that makes this different from traditional 2PC. In true - * 2PC the transaction is committed just before the coordinator sends commit messages to the - * member. Members are then responsible for reading its TX log. This implementation actually - * rolls back, and thus breaks the normal TX guarantees. - */ + * Handle Failures if a member's commit phase succeeds but notification to coordinator fails NOTE: + * This is the core difference that makes this different from traditional 2PC. In true 2PC the + * transaction is committed just before the coordinator sends commit messages to the member. + * Members are then responsible for reading its TX log. This implementation actually rolls back, + * and thus breaks the normal TX guarantees. + */ @Test public void testMemberCommitCommsFailure() throws Exception { buildCohortMemberPair(); - final TimeoutException oate = new TimeoutException("bogus timeout",1,2,0); - doAnswer( - new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - // inject a remote error (this would have come from an external thread) - spySub.cancel("commit comms fail", oate); - // sleep the wake frequency since that is what we promised - Thread.sleep(WAKE_FREQUENCY); - return null; - } - }).when(mockMemberComms).sendMemberCompleted(any(), eq(data)); + final TimeoutException oate = new TimeoutException("bogus timeout", 1, 2, 0); + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + // inject a remote error (this would have come from an external thread) + spySub.cancel("commit comms fail", oate); + // sleep the wake frequency since that is what we promised + Thread.sleep(WAKE_FREQUENCY); + return null; + } + }).when(mockMemberComms).sendMemberCompleted(any(), eq(data)); // run the operation // build a new operation @@ -374,9 +363,8 @@ public void testPropagateConnectionErrorBackToManager() throws Exception { // fail during the prepare phase doThrow(new ForeignException("SRC", "prepare exception")).when(spy).acquireBarrier(); // and throw a connection error when we try to tell the controller about it - doThrow(new IOException("Controller is down!")).when(mockMemberComms) - .sendMemberAborted(eq(spy), any()); - + doThrow(new IOException("Controller is down!")).when(mockMemberComms).sendMemberAborted(eq(spy), + any()); // run the operation // build a new operation @@ -393,10 +381,10 @@ public void testPropagateConnectionErrorBackToManager() throws Exception { // TODO Need to do another refactor to get this to propagate to the coordinator. // make sure we pass a remote exception back the controller -// order.verify(mockMemberComms).sendMemberAborted(eq(spy), -// any()); -// order.verify(dispSpy).receiveError(anyString(), -// any(), any()); + // order.verify(mockMemberComms).sendMemberAborted(eq(spy), + // any()); + // order.verify(dispSpy).receiveError(anyString(), + // any(), any()); } /** @@ -407,8 +395,9 @@ public void testPropagateConnectionErrorBackToManager() throws Exception { @Test public void testNoTaskToBeRunFromRequest() throws Exception { ThreadPoolExecutor pool = mock(ThreadPoolExecutor.class); - when(mockBuilder.buildSubprocedure(op, data)).thenReturn(null) - .thenThrow(new IllegalStateException("Wrong state!"), new IllegalArgumentException("can't understand the args")); + when(mockBuilder.buildSubprocedure(op, data)).thenReturn(null).thenThrow( + new IllegalStateException("Wrong state!"), + new IllegalArgumentException("can't understand the args")); member = new ProcedureMember(mockMemberComms, pool, mockBuilder); // builder returns null // build a new operation @@ -441,8 +430,8 @@ public void testNoTaskToBeRunFromRequest() throws Exception { */ public class EmptySubprocedure extends SubprocedureImpl { public EmptySubprocedure(ProcedureMember member, ForeignExceptionDispatcher dispatcher) { - super( member, op, dispatcher, - // TODO 1000000 is an arbitrary number that I picked. + super(member, op, dispatcher, + // TODO 1000000 is an arbitrary number that I picked. WAKE_FREQUENCY, TIMEOUT); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java index b68c6fdc8de1..d5ed077948b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,7 +63,7 @@ /** * Cluster-wide testing of a distributed three-phase commit using a 'real' zookeeper cluster */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestZKProcedure { @ClassRule @@ -95,8 +95,8 @@ private static ZKWatcher newZooKeeperWatcher() throws IOException { return new ZKWatcher(UTIL.getConfiguration(), "testing utility", new Abortable() { @Override public void abort(String why, Throwable e) { - throw new RuntimeException( - "Unexpected abort in distributed three phase commit test:" + why, e); + throw new RuntimeException("Unexpected abort in distributed three phase commit test:" + why, + e); } @Override @@ -118,7 +118,7 @@ public void testSingleMember() throws Exception { @Test public void testMultipleMembers() throws Exception { - runCommit("one", "two", "three", "four" ); + runCommit("one", "two", "three", "four"); } private void runCommit(String... members) throws Exception { @@ -133,13 +133,14 @@ private void runCommit(String... members) throws Exception { String opDescription = "coordination test - " + members.length + " cohort members"; // start running the controller - ZKProcedureCoordinator coordinatorComms = new ZKProcedureCoordinator( - coordZkw, opDescription, COORDINATOR_NODE_NAME); - ThreadPoolExecutor pool = ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE); + ZKProcedureCoordinator coordinatorComms = + new ZKProcedureCoordinator(coordZkw, opDescription, COORDINATOR_NODE_NAME); + ThreadPoolExecutor pool = + ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE); ProcedureCoordinator coordinator = new ProcedureCoordinator(coordinatorComms, pool) { @Override - public Procedure createProcedure(ForeignExceptionDispatcher fed, String procName, byte[] procArgs, - List expectedMembers) { + public Procedure createProcedure(ForeignExceptionDispatcher fed, String procName, + byte[] procArgs, List expectedMembers) { return Mockito.spy(super.createProcedure(fed, procName, procArgs, expectedMembers)); } }; @@ -147,7 +148,8 @@ public Procedure createProcedure(ForeignExceptionDispatcher fed, String procName // build and start members // NOTE: There is a single subprocedure builder for all members here. SubprocedureFactory subprocFactory = Mockito.mock(SubprocedureFactory.class); - List> procMembers = new ArrayList<>(members.length); + List> procMembers = + new ArrayList<>(members.length); // start each member for (String member : members) { ZKWatcher watcher = newZooKeeperWatcher(); @@ -162,17 +164,15 @@ public Procedure createProcedure(ForeignExceptionDispatcher fed, String procName final List subprocs = new ArrayList<>(); for (int i = 0; i < procMembers.size(); i++) { ForeignExceptionDispatcher cohortMonitor = new ForeignExceptionDispatcher(); - Subprocedure commit = Mockito - .spy(new SubprocedureImpl(procMembers.get(i).getFirst(), opName, cohortMonitor, - WAKE_FREQUENCY, TIMEOUT)); + Subprocedure commit = Mockito.spy(new SubprocedureImpl(procMembers.get(i).getFirst(), opName, + cohortMonitor, WAKE_FREQUENCY, TIMEOUT)); subprocs.add(commit); } // link subprocedure to buildNewOperation invocation. final AtomicInteger i = new AtomicInteger(0); // NOTE: would be racy if not an AtomicInteger Mockito.when(subprocFactory.buildSubprocedure(Mockito.eq(opName), - (byte[]) Mockito.argThat(new ArrayEquals(data)))).thenAnswer( - new Answer() { + (byte[]) Mockito.argThat(new ArrayEquals(data)))).thenAnswer(new Answer() { @Override public Subprocedure answer(InvocationOnMock invocation) throws Throwable { int index = i.getAndIncrement(); @@ -183,15 +183,17 @@ public Subprocedure answer(InvocationOnMock invocation) throws Throwable { }); // setup spying on the coordinator -// Procedure proc = Mockito.spy(procBuilder.createProcedure(coordinator, opName, data, expected)); -// Mockito.when(procBuilder.build(coordinator, opName, data, expected)).thenReturn(proc); + // Procedure proc = Mockito.spy(procBuilder.createProcedure(coordinator, opName, data, + // expected)); + // Mockito.when(procBuilder.build(coordinator, opName, data, expected)).thenReturn(proc); // start running the operation - Procedure task = coordinator.startProcedure(new ForeignExceptionDispatcher(), opName, data, expected); -// assertEquals("Didn't mock coordinator task", proc, task); + Procedure task = + coordinator.startProcedure(new ForeignExceptionDispatcher(), opName, data, expected); + // assertEquals("Didn't mock coordinator task", proc, task); // verify all things ran as expected -// waitAndVerifyProc(proc, once, once, never(), once, false); + // waitAndVerifyProc(proc, once, once, never(), once, false); waitAndVerifyProc(task, once, once, never(), once, false); verifyCohortSuccessful(expected, subprocFactory, subprocs, once, once, never(), once, false); @@ -214,9 +216,10 @@ public void testMultiCohortWithMemberTimeoutDuringPrepare() throws Exception { // start running the coordinator and its controller ZKWatcher coordinatorWatcher = newZooKeeperWatcher(); - ZKProcedureCoordinator coordinatorController = new ZKProcedureCoordinator( - coordinatorWatcher, opDescription, COORDINATOR_NODE_NAME); - ThreadPoolExecutor pool = ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE); + ZKProcedureCoordinator coordinatorController = + new ZKProcedureCoordinator(coordinatorWatcher, opDescription, COORDINATOR_NODE_NAME); + ThreadPoolExecutor pool = + ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE); ProcedureCoordinator coordinator = spy(new ProcedureCoordinator(coordinatorController, pool)); // start a member for each node @@ -237,8 +240,8 @@ public void testMultiCohortWithMemberTimeoutDuringPrepare() throws Exception { for (int i = 0; i < members.size(); i++) { ForeignExceptionDispatcher cohortMonitor = new ForeignExceptionDispatcher(); final ProcedureMember comms = members.get(i).getFirst(); - Subprocedure commit = Mockito - .spy(new SubprocedureImpl(comms, opName, cohortMonitor, WAKE_FREQUENCY, TIMEOUT)); + Subprocedure commit = + Mockito.spy(new SubprocedureImpl(comms, opName, cohortMonitor, WAKE_FREQUENCY, TIMEOUT)); // This nasty bit has one of the impls throw a TimeoutException Mockito.doAnswer(new Answer() { @Override @@ -246,8 +249,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { int index = elem[0]; if (index == memberErrorIndex) { LOG.debug("Sending error to coordinator"); - ForeignException remoteCause = new ForeignException("TIMER", - new TimeoutException("subprocTimeout" , 1, 2, 0)); + ForeignException remoteCause = + new ForeignException("TIMER", new TimeoutException("subprocTimeout", 1, 2, 0)); Subprocedure r = ((Subprocedure) invocation.getMock()); LOG.error("Remote commit failure, not propagating error:" + remoteCause); comms.receiveAbortProcedure(r.getName(), remoteCause); @@ -255,10 +258,11 @@ public Void answer(InvocationOnMock invocation) throws Throwable { // don't complete the error phase until the coordinator has gotten the error // notification (which ensures that we never progress past prepare) try { - Procedure.waitForLatch(coordinatorReceivedErrorLatch, new ForeignExceptionDispatcher(), - WAKE_FREQUENCY, "coordinator received error"); + Procedure.waitForLatch(coordinatorReceivedErrorLatch, + new ForeignExceptionDispatcher(), WAKE_FREQUENCY, "coordinator received error"); } catch (InterruptedException e) { - LOG.debug("Wait for latch interrupted, done:" + (coordinatorReceivedErrorLatch.getCount() == 0)); + LOG.debug("Wait for latch interrupted, done:" + + (coordinatorReceivedErrorLatch.getCount() == 0)); // reset the interrupt status on the thread Thread.currentThread().interrupt(); } @@ -272,10 +276,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { // pass out a task per member final AtomicInteger taskIndex = new AtomicInteger(); - Mockito.when( - subprocFactory.buildSubprocedure(Mockito.eq(opName), - (byte[]) Mockito.argThat(new ArrayEquals(data)))).thenAnswer( - new Answer() { + Mockito.when(subprocFactory.buildSubprocedure(Mockito.eq(opName), + (byte[]) Mockito.argThat(new ArrayEquals(data)))).thenAnswer(new Answer() { @Override public Subprocedure answer(InvocationOnMock invocation) throws Throwable { int index = taskIndex.getAndIncrement(); @@ -285,13 +287,12 @@ public Subprocedure answer(InvocationOnMock invocation) throws Throwable { }); // setup spying on the coordinator - ForeignExceptionDispatcher coordinatorTaskErrorMonitor = Mockito - .spy(new ForeignExceptionDispatcher()); - Procedure coordinatorTask = Mockito.spy(new Procedure(coordinator, - coordinatorTaskErrorMonitor, WAKE_FREQUENCY, TIMEOUT, - opName, data, expected)); + ForeignExceptionDispatcher coordinatorTaskErrorMonitor = + Mockito.spy(new ForeignExceptionDispatcher()); + Procedure coordinatorTask = Mockito.spy(new Procedure(coordinator, coordinatorTaskErrorMonitor, + WAKE_FREQUENCY, TIMEOUT, opName, data, expected)); when(coordinator.createProcedure(any(), eq(opName), eq(data), anyListOf(String.class))) - .thenReturn(coordinatorTask); + .thenReturn(coordinatorTask); // count down the error latch when we get the remote error Mockito.doAnswer(new Answer() { @Override @@ -308,7 +309,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { // start running the operation // ---------------------------- - Procedure task = coordinator.startProcedure(coordinatorTaskErrorMonitor, opName, data, expected); + Procedure task = + coordinator.startProcedure(coordinatorTaskErrorMonitor, opName, data, expected); assertEquals("Didn't mock coordinator task", coordinatorTask, task); // wait for the task to complete @@ -325,8 +327,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { // always expect prepared, never committed, and possible to have cleanup and finish (racy since // error case) waitAndVerifyProc(coordinatorTask, once, never(), once, atMost(1), true); - verifyCohortSuccessful(expected, subprocFactory, cohortTasks, once, never(), once, - once, true); + verifyCohortSuccessful(expected, subprocFactory, cohortTasks, once, never(), once, once, true); // close all the open things closeAll(coordinator, coordinatorController, members); @@ -342,9 +343,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { * @param opHasError the operation error state * @throws Exception on unexpected failure */ - private void waitAndVerifyProc(Procedure proc, VerificationMode prepare, - VerificationMode commit, VerificationMode cleanup, VerificationMode finish, boolean opHasError) - throws Exception { + private void waitAndVerifyProc(Procedure proc, VerificationMode prepare, VerificationMode commit, + VerificationMode cleanup, VerificationMode finish, boolean opHasError) throws Exception { boolean caughtError = false; try { proc.waitForCompleted(); @@ -355,8 +355,8 @@ private void waitAndVerifyProc(Procedure proc, VerificationMode prepare, Mockito.verify(proc, prepare).sendGlobalBarrierStart(); Mockito.verify(proc, commit).sendGlobalBarrierReached(); Mockito.verify(proc, finish).sendGlobalBarrierComplete(); - assertEquals("Operation error state was unexpected", opHasError, proc.getErrorMonitor() - .hasException()); + assertEquals("Operation error state was unexpected", opHasError, + proc.getErrorMonitor().hasException()); assertEquals("Operation error state was unexpected", opHasError, caughtError); } @@ -372,8 +372,8 @@ private void waitAndVerifyProc(Procedure proc, VerificationMode prepare, * @throws Exception on unexpected failure */ private void waitAndVerifySubproc(Subprocedure op, VerificationMode prepare, - VerificationMode commit, VerificationMode cleanup, VerificationMode finish, boolean opHasError) - throws Exception { + VerificationMode commit, VerificationMode cleanup, VerificationMode finish, + boolean opHasError) throws Exception { boolean caughtError = false; try { op.waitForLocallyCompleted(); @@ -385,20 +385,19 @@ private void waitAndVerifySubproc(Subprocedure op, VerificationMode prepare, Mockito.verify(op, commit).insideBarrier(); // We cannot guarantee that cleanup has run so we don't check it. - assertEquals("Operation error state was unexpected", opHasError, op.getErrorCheckable() - .hasException()); + assertEquals("Operation error state was unexpected", opHasError, + op.getErrorCheckable().hasException()); assertEquals("Operation error state was unexpected", opHasError, caughtError); } - private void verifyCohortSuccessful(List cohortNames, - SubprocedureFactory subprocFactory, Iterable cohortTasks, - VerificationMode prepare, VerificationMode commit, VerificationMode cleanup, - VerificationMode finish, boolean opHasError) throws Exception { + private void verifyCohortSuccessful(List cohortNames, SubprocedureFactory subprocFactory, + Iterable cohortTasks, VerificationMode prepare, VerificationMode commit, + VerificationMode cleanup, VerificationMode finish, boolean opHasError) throws Exception { // make sure we build the correct number of cohort members - Mockito.verify(subprocFactory, Mockito.times(cohortNames.size())).buildSubprocedure( - Mockito.eq(opName), (byte[]) Mockito.argThat(new ArrayEquals(data))); + Mockito.verify(subprocFactory, Mockito.times(cohortNames.size())) + .buildSubprocedure(Mockito.eq(opName), (byte[]) Mockito.argThat(new ArrayEquals(data))); // verify that we ran each of the operations cleanly int j = 0; for (Subprocedure op : cohortTasks) { @@ -407,11 +406,9 @@ private void verifyCohortSuccessful(List cohortNames, } } - private void closeAll( - ProcedureCoordinator coordinator, + private void closeAll(ProcedureCoordinator coordinator, ZKProcedureCoordinator coordinatorController, - List> cohort) - throws IOException { + List> cohort) throws IOException { // make sure we close all the resources for (Pair member : cohort) { member.getFirst().close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java index 9d1c4a614545..83ee49705a2e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ /** * Test zookeeper-based, procedure controllers */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestZKProcedureControllers { @ClassRule @@ -97,12 +97,10 @@ public void testSimpleZKCohortMemberController() throws Exception { final CountDownLatch committed = new CountDownLatch(1); final ForeignExceptionDispatcher monitor = spy(new ForeignExceptionDispatcher()); - final ZKProcedureMemberRpcs controller = new ZKProcedureMemberRpcs( - watcher, "testSimple"); + final ZKProcedureMemberRpcs controller = new ZKProcedureMemberRpcs(watcher, "testSimple"); // mock out cohort member callbacks - final ProcedureMember member = Mockito - .mock(ProcedureMember.class); + final ProcedureMember member = Mockito.mock(ProcedureMember.class); Mockito.doReturn(sub).when(member).createSubprocedure(operationName, data); Mockito.doAnswer(new Answer() { @Override @@ -125,13 +123,15 @@ public Void answer(InvocationOnMock invocation) throws Throwable { controller.start(COHORT_NODE_NAME, member); // set a prepare node from a 'coordinator' - String prepare = ZKProcedureUtil.getAcquireBarrierNode(controller.getZkController(), operationName); + String prepare = + ZKProcedureUtil.getAcquireBarrierNode(controller.getZkController(), operationName); ZKUtil.createSetData(watcher, prepare, ProtobufUtil.prependPBMagic(data)); // wait for the operation to be prepared prepared.await(); // create the commit node so we update the operation to enter the commit phase - String commit = ZKProcedureUtil.getReachedBarrierNode(controller.getZkController(), operationName); + String commit = + ZKProcedureUtil.getReachedBarrierNode(controller.getZkController(), operationName); LOG.debug("Found prepared, posting commit node:" + commit); ZKUtil.createAndFailSilent(watcher, commit); LOG.debug("Commit node:" + commit + ", exists:" + ZKUtil.checkExists(watcher, commit)); @@ -139,8 +139,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { verify(monitor, never()).receive(Mockito.any()); // XXX: broken due to composition. -// verify(member, never()).getManager().controllerConnectionFailure(Mockito.anyString(), -// Mockito.any()); + // verify(member, never()).getManager().controllerConnectionFailure(Mockito.anyString(), + // Mockito.any()); // cleanup after the test ZKUtil.deleteNodeRecursively(watcher, controller.getZkController().getBaseZnode()); assertEquals("Didn't delete prepare node", -1, ZKUtil.checkExists(watcher, prepare)); @@ -189,13 +189,13 @@ private void runMockCommitWithOrchestratedControllers(StartControllers controlle ArrayList dataFromMembers = new ArrayList<>(); // mock out coordinator so we can keep track of zk progress - ProcedureCoordinator coordinator = setupMockCoordinator(operationName, - prepared, committed, dataFromMembers); + ProcedureCoordinator coordinator = + setupMockCoordinator(operationName, prepared, committed, dataFromMembers); ProcedureMember member = Mockito.mock(ProcedureMember.class); - Pair> pair = controllers - .start(watcher, operationName, coordinator, CONTROLLER_NODE_NAME, member, expected); + Pair> pair = controllers.start(watcher, + operationName, coordinator, CONTROLLER_NODE_NAME, member, expected); ZKProcedureCoordinator controller = pair.getFirst(); List cohortControllers = pair.getSecond(); // start the operation @@ -212,8 +212,8 @@ private void runMockCommitWithOrchestratedControllers(StartControllers controlle // wait for all the notifications to reach the coordinator prepared.await(); // make sure we got the all the nodes and no more - Mockito.verify(coordinator, times(expected.size())).memberAcquiredBarrier(Mockito.eq(operationName), - Mockito.anyString()); + Mockito.verify(coordinator, times(expected.size())) + .memberAcquiredBarrier(Mockito.eq(operationName), Mockito.anyString()); // kick off the commit phase controller.sendGlobalBarrierReached(p, expected); @@ -226,8 +226,8 @@ private void runMockCommitWithOrchestratedControllers(StartControllers controlle // wait for all commit notifications to reach the coordinator committed.await(); // make sure we got the all the nodes and no more - Mockito.verify(coordinator, times(expected.size())).memberFinishedBarrier(Mockito.eq(operationName), - Mockito.anyString(), Mockito.eq(memberData)); + Mockito.verify(coordinator, times(expected.size())).memberFinishedBarrier( + Mockito.eq(operationName), Mockito.anyString(), Mockito.eq(memberData)); assertEquals("Incorrect number of members returnd data", expected.size(), dataFromMembers.size()); @@ -244,13 +244,13 @@ private void runMockCommitWithOrchestratedControllers(StartControllers controlle } // TODO Broken by composition. -// @Test -// public void testCoordinatorControllerHandlesEarlyPrepareNodes() throws Exception { -// runEarlyPrepareNodes(startCoordinatorFirst, "testEarlyPreparenodes", new byte[] { 1, 2, 3 }, -// "cohort1", "cohort2"); -// runEarlyPrepareNodes(startCohortFirst, "testEarlyPreparenodes", new byte[] { 1, 2, 3 }, -// "cohort1", "cohort2"); -// } + // @Test + // public void testCoordinatorControllerHandlesEarlyPrepareNodes() throws Exception { + // runEarlyPrepareNodes(startCoordinatorFirst, "testEarlyPreparenodes", new byte[] { 1, 2, 3 }, + // "cohort1", "cohort2"); + // runEarlyPrepareNodes(startCohortFirst, "testEarlyPreparenodes", new byte[] { 1, 2, 3 }, + // "cohort1", "cohort2"); + // } public void runEarlyPrepareNodes(StartControllers controllers, String operationName, byte[] data, String... cohort) throws Exception { @@ -265,15 +265,15 @@ public void runEarlyPrepareNodes(StartControllers controllers, String operationN ArrayList dataFromMembers = new ArrayList<>(); // mock out coordinator so we can keep track of zk progress - ProcedureCoordinator coordinator = setupMockCoordinator(operationName, - prepared, committed, dataFromMembers); + ProcedureCoordinator coordinator = + setupMockCoordinator(operationName, prepared, committed, dataFromMembers); ProcedureMember member = Mockito.mock(ProcedureMember.class); Procedure p = Mockito.mock(Procedure.class); Mockito.when(p.getName()).thenReturn(operationName); - Pair> pair = controllers - .start(watcher, operationName, coordinator, CONTROLLER_NODE_NAME, member, expected); + Pair> pair = controllers.start(watcher, + operationName, coordinator, CONTROLLER_NODE_NAME, member, expected); ZKProcedureCoordinator controller = pair.getFirst(); List cohortControllers = pair.getSecond(); @@ -293,8 +293,8 @@ public void runEarlyPrepareNodes(StartControllers controllers, String operationN // wait for all the notifications to reach the coordinator prepared.await(); // make sure we got the all the nodes and no more - Mockito.verify(coordinator, times(expected.size())).memberAcquiredBarrier(Mockito.eq(operationName), - Mockito.anyString()); + Mockito.verify(coordinator, times(expected.size())) + .memberAcquiredBarrier(Mockito.eq(operationName), Mockito.anyString()); // kick off the commit phase controller.sendGlobalBarrierReached(p, expected); @@ -307,8 +307,8 @@ public void runEarlyPrepareNodes(StartControllers controllers, String operationN // wait for all commit notifications to reach the coordiantor committed.await(); // make sure we got the all the nodes and no more - Mockito.verify(coordinator, times(expected.size())).memberFinishedBarrier(Mockito.eq(operationName), - Mockito.anyString(), Mockito.eq(memberData)); + Mockito.verify(coordinator, times(expected.size())).memberFinishedBarrier( + Mockito.eq(operationName), Mockito.anyString(), Mockito.eq(memberData)); controller.resetMembers(p); @@ -320,14 +320,13 @@ public void runEarlyPrepareNodes(StartControllers controllers, String operationN /** * @param dataFromMembers - * @return a mock {@link ProcedureCoordinator} that just counts down the - * prepared and committed latch for called to the respective method + * @return a mock {@link ProcedureCoordinator} that just counts down the prepared and committed + * latch for called to the respective method */ private ProcedureCoordinator setupMockCoordinator(String operationName, final CountDownLatch prepared, final CountDownLatch committed, final ArrayList dataFromMembers) { - ProcedureCoordinator coordinator = Mockito - .mock(ProcedureCoordinator.class); + ProcedureCoordinator coordinator = Mockito.mock(ProcedureCoordinator.class); Mockito.doAnswer(new Answer() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { @@ -363,20 +362,19 @@ private void verifyZooKeeperClean(String operationName, ZKWatcher watcher, /** * Verify the cohort controller got called once per expected node to start the operation */ - private void verifyCohort(ProcedureMember member, int cohortSize, - String operationName, byte[] data) { -// verify(member, Mockito.times(cohortSize)).submitSubprocedure(Mockito.eq(operationName), -// (byte[]) Mockito.argThat(new ArrayEquals(data))); - Mockito.verify(member, - Mockito.atLeast(cohortSize)).submitSubprocedure(Mockito.any()); + private void verifyCohort(ProcedureMember member, int cohortSize, String operationName, + byte[] data) { + // verify(member, Mockito.times(cohortSize)).submitSubprocedure(Mockito.eq(operationName), + // (byte[]) Mockito.argThat(new ArrayEquals(data))); + Mockito.verify(member, Mockito.atLeast(cohortSize)).submitSubprocedure(Mockito.any()); } /** * Verify that the coordinator only got called once for each expected node */ - private void verifyCoordinator(String operationName, - ProcedureCoordinator coordinator, List expected) { + private void verifyCoordinator(String operationName, ProcedureCoordinator coordinator, + List expected) { // verify that we got all the expected nodes for (String node : expected) { verify(coordinator, once).memberAcquiredBarrier(operationName, node); @@ -389,21 +387,19 @@ private void verifyCoordinator(String operationName, */ private abstract class StartControllers { public abstract Pair> start( - ZKWatcher watcher, String operationName, - ProcedureCoordinator coordinator, String controllerName, - ProcedureMember member, List cohortNames) throws Exception; + ZKWatcher watcher, String operationName, ProcedureCoordinator coordinator, + String controllerName, ProcedureMember member, List cohortNames) throws Exception; } private final StartControllers startCoordinatorFirst = new StartControllers() { @Override - public Pair> start( - ZKWatcher watcher, String operationName, - ProcedureCoordinator coordinator, String controllerName, - ProcedureMember member, List expected) throws Exception { + public Pair> start(ZKWatcher watcher, + String operationName, ProcedureCoordinator coordinator, String controllerName, + ProcedureMember member, List expected) throws Exception { // start the controller - ZKProcedureCoordinator controller = new ZKProcedureCoordinator( - watcher, operationName, CONTROLLER_NODE_NAME); + ZKProcedureCoordinator controller = + new ZKProcedureCoordinator(watcher, operationName, CONTROLLER_NODE_NAME); controller.start(coordinator); // make a cohort controller for each expected node @@ -425,10 +421,9 @@ public Pair> start( private final StartControllers startCohortFirst = new StartControllers() { @Override - public Pair> start( - ZKWatcher watcher, String operationName, - ProcedureCoordinator coordinator, String controllerName, - ProcedureMember member, List expected) throws Exception { + public Pair> start(ZKWatcher watcher, + String operationName, ProcedureCoordinator coordinator, String controllerName, + ProcedureMember member, List expected) throws Exception { // make a cohort controller for each expected node List cohortControllers = new ArrayList<>(); @@ -439,8 +434,8 @@ public Pair> start( } // start the controller - ZKProcedureCoordinator controller = new ZKProcedureCoordinator( - watcher, operationName, CONTROLLER_NODE_NAME); + ZKProcedureCoordinator controller = + new ZKProcedureCoordinator(watcher, operationName, CONTROLLER_NODE_NAME); controller.start(coordinator); return new Pair<>(controller, cohortControllers); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java index 1a9fee8bca75..4aaf19793213 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.lang.management.MemoryType; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -36,14 +35,14 @@ import org.apache.hadoop.hbase.util.Pair; public class RegionProcedureStorePerformanceEvaluation - extends ProcedureStorePerformanceEvaluation { + extends ProcedureStorePerformanceEvaluation { private static final class DummyServer extends MockServer { private final Configuration conf; private final ServerName serverName = - ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()); + ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()); public DummyServer(Configuration conf) { this.conf = conf; @@ -67,10 +66,10 @@ protected RegionProcedureStore createProcedureStore(Path storeDir) throws IOExce Pair pair = MemorySizeUtil.getGlobalMemStoreSize(conf); long globalMemStoreSize = pair.getFirst(); boolean offheap = pair.getSecond() == MemoryType.NON_HEAP; - float poolSizePercentage = offheap ? 1.0F : - conf.getFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, MemStoreLAB.POOL_MAX_SIZE_DEFAULT); - float initialCountPercentage = - conf.getFloat(MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT); + float poolSizePercentage = offheap ? 1.0F + : conf.getFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, MemStoreLAB.POOL_MAX_SIZE_DEFAULT); + float initialCountPercentage = conf.getFloat(MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY, + MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT); int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT); float indexChunkSizePercent = conf.getFloat(MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_KEY, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); @@ -102,7 +101,7 @@ protected void postStop(RegionProcedureStore store) throws IOException { public static void main(String[] args) throws IOException { RegionProcedureStorePerformanceEvaluation tool = - new RegionProcedureStorePerformanceEvaluation(); + new RegionProcedureStorePerformanceEvaluation(); tool.setConf(HBaseConfiguration.create()); tool.run(args); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java index 9825828bff04..c05eb9a8ce3e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java index 324616940d6a..fe8b9c17f94f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,12 +40,12 @@ static Server mockServer(Configuration conf) { Server server = mock(Server.class); when(server.getConfiguration()).thenReturn(conf); when(server.getServerName()) - .thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())); + .thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())); return server; } static RegionProcedureStore createStore(Server server, MasterRegion region, - ProcedureLoader loader) throws IOException { + ProcedureLoader loader) throws IOException { RegionProcedureStore store = new RegionProcedureStore(server, region, new LeaseRecovery() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestProcedure.java index f81d19380d54..96517e4ba78a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestHFileProcedurePrettyPrinter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestHFileProcedurePrettyPrinter.java index d9a123663773..7a902820b404 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestHFileProcedurePrettyPrinter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestHFileProcedurePrettyPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,12 +53,12 @@ public class TestHFileProcedurePrettyPrinter extends RegionProcedureStoreTestBas @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHFileProcedurePrettyPrinter.class); + HBaseClassTestRule.forClass(TestHFileProcedurePrettyPrinter.class); private static final Logger LOG = LoggerFactory.getLogger(TestHFileProcedurePrettyPrinter.class); private List checkOutput(BufferedReader reader, MutableLong putCount, - MutableLong deleteCount, MutableLong markDeletedCount) throws IOException { + MutableLong deleteCount, MutableLong markDeletedCount) throws IOException { putCount.setValue(0); deleteCount.setValue(0); markDeletedCount.setValue(0); @@ -102,11 +102,12 @@ public void test() throws Exception { store.cleanup(); store.region.flush(true); Path tableDir = CommonFSUtils.getTableDir( - new Path(htu.getDataTestDir(), MasterRegionFactory.MASTER_STORE_DIR), MasterRegionFactory.TABLE_NAME); + new Path(htu.getDataTestDir(), MasterRegionFactory.MASTER_STORE_DIR), + MasterRegionFactory.TABLE_NAME); FileSystem fs = tableDir.getFileSystem(htu.getConfiguration()); Path regionDir = - fs.listStatus(tableDir, p -> RegionInfo.isEncodedRegionName(Bytes.toBytes(p.getName())))[0] - .getPath(); + fs.listStatus(tableDir, p -> RegionInfo.isEncodedRegionName(Bytes.toBytes(p.getName())))[0] + .getPath(); List storefiles = HFile.getStoreFiles(fs, regionDir); ByteArrayOutputStream bos = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bos); @@ -119,8 +120,8 @@ public void test() throws Exception { assertEquals(0, ToolRunner.run(htu.getConfiguration(), printer, new String[] { "-f", file.toString() })); try (BufferedReader reader = - new BufferedReader(new InputStreamReader(new ByteArrayInputStream(bos.toByteArray()), - StandardCharsets.UTF_8))) { + new BufferedReader(new InputStreamReader(new ByteArrayInputStream(bos.toByteArray()), + StandardCharsets.UTF_8))) { List fileScanned = checkOutput(reader, putCount, deleteCount, markDeletedCount); assertEquals(1, fileScanned.size()); assertEquals(file.toString(), fileScanned.get(0)); @@ -141,8 +142,9 @@ public void test() throws Exception { bos.reset(); printer = new HFileProcedurePrettyPrinter(out); assertEquals(0, ToolRunner.run(htu.getConfiguration(), printer, new String[] { "-a" })); - try (BufferedReader reader = new BufferedReader( - new InputStreamReader(new ByteArrayInputStream(bos.toByteArray()), StandardCharsets.UTF_8))) { + try (BufferedReader reader = + new BufferedReader(new InputStreamReader(new ByteArrayInputStream(bos.toByteArray()), + StandardCharsets.UTF_8))) { List fileScanned = checkOutput(reader, putCount, deleteCount, markDeletedCount); assertEquals(3, fileScanned.size()); assertEquals(10, putCount.longValue()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java index b05cc679d278..5abd06c803e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.HashSet; import java.util.Optional; import java.util.Set; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.client.Get; @@ -59,7 +58,7 @@ public class TestRegionProcedureStore extends RegionProcedureStoreTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionProcedureStore.class); + HBaseClassTestRule.forClass(TestRegionProcedureStore.class); private static final Logger LOG = LoggerFactory.getLogger(TestRegionProcedureStore.class); @@ -128,24 +127,24 @@ public void testCleanup() throws Exception { // the row should still be there assertTrue(store.region - .get(new Get(Bytes.toBytes(proc3.getProcId())).setCheckExistenceOnly(true)).getExists()); + .get(new Get(Bytes.toBytes(proc3.getProcId())).setCheckExistenceOnly(true)).getExists()); assertTrue(store.region - .get(new Get(Bytes.toBytes(proc2.getProcId())).setCheckExistenceOnly(true)).getExists()); + .get(new Get(Bytes.toBytes(proc2.getProcId())).setCheckExistenceOnly(true)).getExists()); // proc2 will be deleted after cleanup, but proc3 should still be there as it holds the max proc // id store.cleanup(); assertTrue(store.region - .get(new Get(Bytes.toBytes(proc3.getProcId())).setCheckExistenceOnly(true)).getExists()); + .get(new Get(Bytes.toBytes(proc3.getProcId())).setCheckExistenceOnly(true)).getExists()); assertFalse(store.region - .get(new Get(Bytes.toBytes(proc2.getProcId())).setCheckExistenceOnly(true)).getExists()); + .get(new Get(Bytes.toBytes(proc2.getProcId())).setCheckExistenceOnly(true)).getExists()); RegionProcedureStoreTestProcedure proc4 = new RegionProcedureStoreTestProcedure(); store.insert(proc4, null); store.cleanup(); // proc3 should also be deleted as now proc4 holds the max proc id assertFalse(store.region - .get(new Get(Bytes.toBytes(proc3.getProcId())).setCheckExistenceOnly(true)).getExists()); + .get(new Get(Bytes.toBytes(proc3.getProcId())).setCheckExistenceOnly(true)).getExists()); } /** @@ -228,7 +227,7 @@ public int getRemotePort() { @Override public void setResponse(Message param, CellScanner cells, Throwable errorThrowable, - String error) { + String error) { } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java index cd39e996bcdb..68adc0162858 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,7 +62,7 @@ public class TestRegionProcedureStoreMigration { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionProcedureStoreMigration.class); + HBaseClassTestRule.forClass(TestRegionProcedureStoreMigration.class); private HBaseCommonTestingUtil htu; @@ -119,7 +119,7 @@ public void test() throws IOException { } walStore.stop(true); SortedSet loadedProcs = - new TreeSet<>((p1, p2) -> Long.compare(p1.getProcId(), p2.getProcId())); + new TreeSet<>((p1, p2) -> Long.compare(p1.getProcId(), p2.getProcId())); MutableLong maxProcIdSet = new MutableLong(0); store = RegionProcedureStoreTestHelper.createStore(server, region, new ProcedureLoader() { @@ -132,7 +132,7 @@ public void setMaxProcId(long maxProcId) { public void load(ProcedureIterator procIter) throws IOException { while (procIter.hasNext()) { RegionProcedureStoreTestProcedure proc = - (RegionProcedureStoreTestProcedure) procIter.next(); + (RegionProcedureStoreTestProcedure) procIter.next(); loadedProcs.add(proc); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestWALProcedurePrettyPrinter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestWALProcedurePrettyPrinter.java index 62e9575e1105..14dd1bff4387 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestWALProcedurePrettyPrinter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestWALProcedurePrettyPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public class TestWALProcedurePrettyPrinter extends RegionProcedureStoreTestBase @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALProcedurePrettyPrinter.class); + HBaseClassTestRule.forClass(TestWALProcedurePrettyPrinter.class); private static final Logger LOG = LoggerFactory.getLogger(TestWALProcedurePrettyPrinter.class); @@ -64,7 +64,7 @@ public void test() throws Exception { } store.cleanup(); Path walParentDir = new Path(htu.getDataTestDir(), - MasterRegionFactory.MASTER_STORE_DIR + "/" + HConstants.HREGION_LOGDIR_NAME); + MasterRegionFactory.MASTER_STORE_DIR + "/" + HConstants.HREGION_LOGDIR_NAME); FileSystem fs = walParentDir.getFileSystem(htu.getConfiguration()); Path walDir = fs.listStatus(walParentDir)[0].getPath(); Path walFile = fs.listStatus(walDir)[0].getPath(); @@ -75,8 +75,9 @@ public void test() throws Exception { WALProcedurePrettyPrinter printer = new WALProcedurePrettyPrinter(out); assertEquals(0, ToolRunner.run(htu.getConfiguration(), printer, new String[] { fs.makeQualified(walFile).toString() })); - try (BufferedReader reader = new BufferedReader( - new InputStreamReader(new ByteArrayInputStream(bos.toByteArray()), StandardCharsets.UTF_8))) { + try (BufferedReader reader = + new BufferedReader(new InputStreamReader(new ByteArrayInputStream(bos.toByteArray()), + StandardCharsets.UTF_8))) { long inserted = 0; long markedDeleted = 0; long deleted = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java index 615fa6445227..2b90b2b08959 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestReplicationProtobuf { @ClassRule @@ -77,7 +77,6 @@ public void testGetCellScanner() throws IOException { private void testAdvancetHasSameRow(CellScanner scanner, final KeyValue kv) throws IOException { scanner.advance(); assertTrue(Bytes.equals(scanner.current().getRowArray(), scanner.current().getRowOffset(), - scanner.current().getRowLength(), - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); + scanner.current().getRowLength(), kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java index eada244cf845..2f3dcd55de7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -83,8 +84,8 @@ public class SpaceQuotaHelperForTests { private final AtomicLong counter; private static final int NUM_RETRIES = 10; - public SpaceQuotaHelperForTests( - HBaseTestingUtil testUtil, TestName testName, AtomicLong counter) { + public SpaceQuotaHelperForTests(HBaseTestingUtil testUtil, TestName testName, + AtomicLong counter) { this.testUtil = Objects.requireNonNull(testUtil); this.testName = Objects.requireNonNull(testName); this.counter = Objects.requireNonNull(counter); @@ -130,20 +131,19 @@ long listNumDefinedQuotas(Connection conn) throws IOException { } /** - * Writes the given mutation into a table until it violates the given policy. - * Verifies that the policy has been violated & then returns the name of - * the table created & written into. + * Writes the given mutation into a table until it violates the given policy. Verifies that the + * policy has been violated & then returns the name of the table created & written into. */ - TableName writeUntilViolationAndVerifyViolation( - SpaceViolationPolicy policyToViolate, Mutation m) throws Exception { + TableName writeUntilViolationAndVerifyViolation(SpaceViolationPolicy policyToViolate, Mutation m) + throws Exception { final TableName tn = writeUntilViolation(policyToViolate); verifyViolation(policyToViolate, tn, m); return tn; } /** - * Writes the given mutation into a table until it violates the given policy. - * Returns the name of the table created & written into. + * Writes the given mutation into a table until it violates the given policy. Returns the name of + * the table created & written into. */ TableName writeUntilViolation(SpaceViolationPolicy policyToViolate) throws Exception { TableName tn = createTableWithRegions(10); @@ -157,16 +157,16 @@ TableName writeUntilViolation(SpaceViolationPolicy policyToViolate) throws Excep return tn; } - - TableName writeUntilViolationAndVerifyViolationInNamespace( - String ns, SpaceViolationPolicy policyToViolate, Mutation m) throws Exception { + TableName writeUntilViolationAndVerifyViolationInNamespace(String ns, + SpaceViolationPolicy policyToViolate, Mutation m) throws Exception { final TableName tn = writeUntilViolationInNamespace(ns, policyToViolate); verifyViolation(policyToViolate, tn, m); return tn; } - TableName writeUntilViolationInNamespace(String ns, SpaceViolationPolicy policyToViolate) throws Exception { - TableName tn = createTableWithRegions(ns,10); + TableName writeUntilViolationInNamespace(String ns, SpaceViolationPolicy policyToViolate) + throws Exception { + TableName tn = createTableWithRegions(ns, 10); setQuotaLimit(ns, policyToViolate, 4L); @@ -199,8 +199,7 @@ void verifyViolation(SpaceViolationPolicy policyToViolate, TableName tn, Mutatio table.increment((Increment) m); } else { fail( - "Failed to apply " + m.getClass().getSimpleName() + - " to the table. Programming error"); + "Failed to apply " + m.getClass().getSimpleName() + " to the table. Programming error"); } LOG.info("Did not reject the " + m.getClass().getSimpleName() + ", will sleep and retry"); Thread.sleep(2000); @@ -230,15 +229,15 @@ void verifyViolation(SpaceViolationPolicy policyToViolate, TableName tn, Mutatio } else { if (policyToViolate.equals(SpaceViolationPolicy.DISABLE)) { assertTrue( - msg.contains("TableNotEnabledException") || msg.contains(policyToViolate.name())); + msg.contains("TableNotEnabledException") || msg.contains(policyToViolate.name())); } else { assertTrue("Expected exception message to contain the word '" + policyToViolate.name() - + "', but was " + msg, - msg.contains(policyToViolate.name())); + + "', but was " + msg, + msg.contains(policyToViolate.name())); } } - assertTrue( - "Expected to see an exception writing data to a table exceeding its quota", sawError); + assertTrue("Expected to see an exception writing data to a table exceeding its quota", + sawError); } /** @@ -291,7 +290,8 @@ void verifyTableUsageSnapshotForSpaceQuotaExist(TableName tn) throws Exception { ResultScanner rs = quotaTable.getScanner(s); sawUsageSnapshot = (rs.next() != null); } - assertTrue("Expected to succeed in getting table usage snapshots for space quota", sawUsageSnapshot); + assertTrue("Expected to succeed in getting table usage snapshots for space quota", + sawUsageSnapshot); } /** @@ -308,8 +308,7 @@ void setQuotaLimit(final TableName tn, SpaceViolationPolicy policy, long sizeInM /** * Sets the given quota (policy & limit) on the passed namespace. */ - void setQuotaLimit(String ns, SpaceViolationPolicy policy, long sizeInMBs) - throws Exception { + void setQuotaLimit(String ns, SpaceViolationPolicy policy, long sizeInMBs) throws Exception { final long sizeLimit = sizeInMBs * SpaceQuotaHelperForTests.ONE_MEGABYTE; QuotaSettings settings = QuotaSettingsFactory.limitNamespaceSpace(ns, sizeLimit, policy); testUtil.getAdmin().setQuota(settings); @@ -379,8 +378,8 @@ void removeAllQuotas(Connection conn) throws IOException { } QuotaSettings getTableSpaceQuota(Connection conn, TableName tn) throws IOException { - try (QuotaRetriever scanner = QuotaRetriever.open( - conn.getConfiguration(), new QuotaFilter().setTableFilter(tn.getNameAsString()))) { + try (QuotaRetriever scanner = QuotaRetriever.open(conn.getConfiguration(), + new QuotaFilter().setTableFilter(tn.getNameAsString()))) { for (QuotaSettings setting : scanner) { if (setting.getTableName().equals(tn) && setting.getQuotaType() == QuotaType.SPACE) { return setting; @@ -502,8 +501,8 @@ Multimap createTablesWithSpaceQuotas() throws Exceptio final long sizeLimit3 = 1024L * 1024L * 1024L * 1024L * 100L; // 100TB final SpaceViolationPolicy violationPolicy3 = SpaceViolationPolicy.NO_INSERTS; - QuotaSettings qs3 = QuotaSettingsFactory.limitNamespaceSpace( - nd.getName(), sizeLimit3, violationPolicy3); + QuotaSettings qs3 = + QuotaSettingsFactory.limitNamespaceSpace(nd.getName(), sizeLimit3, violationPolicy3); tablesWithQuotas.put(tn3, qs3); tablesWithQuotas.put(tn4, qs3); tablesWithQuotas.put(tn5, qs3); @@ -538,7 +537,7 @@ TableName createTableWithRegions(int numRegions) throws Exception { TableName createTableWithRegions(Admin admin, int numRegions) throws Exception { return createTableWithRegions(admin, NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR, numRegions, - 0); + 0); } TableName createTableWithRegions(String namespace, int numRegions) throws Exception { @@ -574,8 +573,8 @@ TableName createTableWithRegions(Admin admin, String namespace, int numRegions, TableName createTableInNamespace(NamespaceDescriptor nd) throws Exception { final Admin admin = testUtil.getAdmin(); - final TableName tn = TableName.valueOf(nd.getName(), - testName.getMethodName() + counter.getAndIncrement()); + final TableName tn = + TableName.valueOf(nd.getName(), testName.getMethodName() + counter.getAndIncrement()); // Delete the old table if (admin.tableExists(tn)) { @@ -591,7 +590,7 @@ TableName createTableInNamespace(NamespaceDescriptor nd) throws Exception { return tn; } - void partitionTablesByQuotaTarget(Multimap quotas, + void partitionTablesByQuotaTarget(Multimap quotas, Set tablesWithTableQuota, Set tablesWithNamespaceQuota) { // Partition the tables with quotas by table and ns quota for (Entry entry : quotas.entries()) { @@ -631,10 +630,9 @@ Map> generateFileToLoad(TableName tn, int numFiles, int numRo } /** - * Abstraction to simplify the case where a test needs to verify a certain state - * on a {@code SpaceQuotaSnapshot}. This class fails-fast when there is no such - * snapshot obtained from the Master. As such, it is not useful to verify the - * lack of a snapshot. + * Abstraction to simplify the case where a test needs to verify a certain state on a + * {@code SpaceQuotaSnapshot}. This class fails-fast when there is no such snapshot obtained from + * the Master. As such, it is not useful to verify the lack of a snapshot. */ static abstract class SpaceQuotaSnapshotPredicate implements Predicate { private final Connection conn; @@ -677,7 +675,6 @@ public boolean evaluate() throws Exception { /** * Must determine if the given {@code SpaceQuotaSnapshot} meets some criteria. - * * @param snapshot a non-null snapshot obtained from the HBase Master * @return true if the criteria is met, false otherwise */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierForTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierForTest.java index 46fb1e890860..f1ec089f3625 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierForTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierForTest.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +19,11 @@ import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.Connection; /** * A SpaceQuotaSnapshotNotifier implementation for testing. @@ -33,10 +33,11 @@ public class SpaceQuotaSnapshotNotifierForTest implements SpaceQuotaSnapshotNoti private static final Logger LOG = LoggerFactory.getLogger(SpaceQuotaSnapshotNotifierForTest.class); - private final Map tableQuotaSnapshots = new HashMap<>(); + private final Map tableQuotaSnapshots = new HashMap<>(); @Override - public void initialize(Connection conn) {} + public void initialize(Connection conn) { + } @Override public synchronized void transitionTable(TableName tableName, SpaceQuotaSnapshot snapshot) { @@ -46,7 +47,7 @@ public synchronized void transitionTable(TableName tableName, SpaceQuotaSnapshot tableQuotaSnapshots.put(tableName, snapshot); } - public synchronized Map copySnapshots() { + public synchronized Map copySnapshots() { return new HashMap<>(this.tableQuotaSnapshots); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestActivePolicyEnforcement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestActivePolicyEnforcement.java index b4711998d06f..79981b817679 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestActivePolicyEnforcement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestActivePolicyEnforcement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,56 +68,54 @@ public void testGetter() { @Test public void testNoPolicyReturnsNoopEnforcement() { - ActivePolicyEnforcement ape = new ActivePolicyEnforcement( - new HashMap<>(), Collections.emptyMap(), mock(RegionServerServices.class)); - SpaceViolationPolicyEnforcement enforcement = ape.getPolicyEnforcement( - TableName.valueOf("nonexistent")); + ActivePolicyEnforcement ape = new ActivePolicyEnforcement(new HashMap<>(), + Collections.emptyMap(), mock(RegionServerServices.class)); + SpaceViolationPolicyEnforcement enforcement = + ape.getPolicyEnforcement(TableName.valueOf("nonexistent")); assertNotNull(enforcement); assertTrue( - "Expected an instance of MissingSnapshotViolationPolicyEnforcement, but got " - + enforcement.getClass(), - enforcement instanceof MissingSnapshotViolationPolicyEnforcement); + "Expected an instance of MissingSnapshotViolationPolicyEnforcement, but got " + + enforcement.getClass(), + enforcement instanceof MissingSnapshotViolationPolicyEnforcement); } @Test public void testNoBulkLoadChecksOnNoSnapshot() { ActivePolicyEnforcement ape = new ActivePolicyEnforcement( new HashMap(), - Collections. emptyMap(), - mock(RegionServerServices.class)); - SpaceViolationPolicyEnforcement enforcement = ape.getPolicyEnforcement( - TableName.valueOf("nonexistent")); + Collections. emptyMap(), mock(RegionServerServices.class)); + SpaceViolationPolicyEnforcement enforcement = + ape.getPolicyEnforcement(TableName.valueOf("nonexistent")); assertFalse("Should not check bulkloads", enforcement.shouldCheckBulkLoads()); } @Test public void testNoQuotaReturnsSingletonPolicyEnforcement() { - final ActivePolicyEnforcement ape = new ActivePolicyEnforcement( - Collections.emptyMap(), Collections.emptyMap(), rss); + final ActivePolicyEnforcement ape = + new ActivePolicyEnforcement(Collections.emptyMap(), Collections.emptyMap(), rss); final TableName tableName = TableName.valueOf("my_table"); SpaceViolationPolicyEnforcement policyEnforcement = ape.getPolicyEnforcement(tableName); // This should be the same exact instance, the singleton assertTrue(policyEnforcement == MissingSnapshotViolationPolicyEnforcement.getInstance()); assertEquals(1, ape.getLocallyCachedPolicies().size()); - Entry entry = + Entry entry = ape.getLocallyCachedPolicies().entrySet().iterator().next(); assertTrue(policyEnforcement == entry.getValue()); } @Test public void testNonViolatingQuotaCachesPolicyEnforcment() { - final Map snapshots = new HashMap<>(); + final Map snapshots = new HashMap<>(); final TableName tableName = TableName.valueOf("my_table"); snapshots.put(tableName, new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 0, 1024)); - final ActivePolicyEnforcement ape = new ActivePolicyEnforcement( - Collections.emptyMap(), snapshots, rss); + final ActivePolicyEnforcement ape = + new ActivePolicyEnforcement(Collections.emptyMap(), snapshots, rss); SpaceViolationPolicyEnforcement policyEnforcement = ape.getPolicyEnforcement(tableName); - assertTrue( - "Found the wrong class: " + policyEnforcement.getClass(), - policyEnforcement instanceof DefaultViolationPolicyEnforcement); + assertTrue("Found the wrong class: " + policyEnforcement.getClass(), + policyEnforcement instanceof DefaultViolationPolicyEnforcement); SpaceViolationPolicyEnforcement copy = ape.getPolicyEnforcement(tableName); assertTrue("Expected the instance to be cached", policyEnforcement == copy); - Entry entry = + Entry entry = ape.getLocallyCachedPolicies().entrySet().iterator().next(); assertTrue(policyEnforcement == entry.getValue()); } @@ -126,10 +124,10 @@ public void testNonViolatingQuotaCachesPolicyEnforcment() { public void testViolatingQuotaCachesNothing() { final TableName tableName = TableName.valueOf("my_table"); SpaceViolationPolicyEnforcement policyEnforcement = mock(SpaceViolationPolicyEnforcement.class); - final Map activePolicies = new HashMap<>(); + final Map activePolicies = new HashMap<>(); activePolicies.put(tableName, policyEnforcement); - final ActivePolicyEnforcement ape = new ActivePolicyEnforcement( - activePolicies, Collections.emptyMap(), rss); + final ActivePolicyEnforcement ape = + new ActivePolicyEnforcement(activePolicies, Collections.emptyMap(), rss); assertTrue(ape.getPolicyEnforcement(tableName) == policyEnforcement); assertEquals(0, ape.getLocallyCachedPolicies().size()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java index 955918dd420c..079695c9cf68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestClusterScopeQuotaThrottle.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import static org.junit.Assert.assertTrue; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -181,7 +180,8 @@ public void testUserClusterScopeQuota() throws Exception { triggerUserCacheRefresh(TEST_UTIL, true, TABLE_NAMES); } - @org.junit.Ignore @Test // Spews the log w/ triggering of scheduler? HBASE-24035 + @org.junit.Ignore + @Test // Spews the log w/ triggering of scheduler? HBASE-24035 public void testUserNamespaceClusterScopeQuota() throws Exception { final Admin admin = TEST_UTIL.getAdmin(); final String userName = User.getCurrent().getShortName(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileArchiverNotifierImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileArchiverNotifierImpl.java index 5ce888a885df..1e9b297b7ccb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileArchiverNotifierImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileArchiverNotifierImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -128,8 +129,9 @@ public void testSnapshotSizePersistence() throws IOException { admin.disableTable(tn); admin.deleteTable(tn); } - TableDescriptor desc = TableDescriptorBuilder.newBuilder(tn).setColumnFamily( - ColumnFamilyDescriptorBuilder.of(QuotaTableUtil.QUOTA_FAMILY_USAGE)).build(); + TableDescriptor desc = TableDescriptorBuilder.newBuilder(tn) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(QuotaTableUtil.QUOTA_FAMILY_USAGE)) + .build(); admin.createTable(desc); FileArchiverNotifierImpl notifier = new FileArchiverNotifierImpl(conn, conf, fs, tn); @@ -162,8 +164,8 @@ public void testIncrementalFileArchiving() throws Exception { } final Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME); final TableName tn1 = helper.createTableWithRegions(1); - admin.setQuota(QuotaSettingsFactory.limitTableSpace( - tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS)); + admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, + SpaceViolationPolicy.NO_INSERTS)); // Write some data and flush it helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); @@ -178,7 +180,7 @@ public void testIncrementalFileArchiving() throws Exception { long snapshotSize = notifier.computeAndStoreSnapshotSizes(Arrays.asList(snapshotName1)); assertEquals("The size of the snapshots should be zero", 0, snapshotSize); assertTrue("Last compute time was not less than current compute time", - t1 < notifier.getLastFullCompute()); + t1 < notifier.getLastFullCompute()); // No recently archived files and the snapshot should have no size assertEquals(0, extractSnapshotSize(quotaTable, tn, snapshotName1)); @@ -216,7 +218,7 @@ public void testIncrementalFileArchiving() throws Exception { assertEquals(0, extractSnapshotSize(quotaTable, tn, snapshotName1)); // We should also have no recently archived files after a re-computation assertTrue("Last compute time was not less than current compute time", - t2 < notifier.getLastFullCompute()); + t2 < notifier.getLastFullCompute()); } @Test @@ -228,8 +230,8 @@ public void testParseOldNamespaceSnapshotSize() throws Exception { admin.disableTable(fakeQuotaTableName); admin.deleteTable(fakeQuotaTableName); } - TableDescriptor desc = TableDescriptorBuilder.newBuilder(fakeQuotaTableName).setColumnFamily( - ColumnFamilyDescriptorBuilder.of(QuotaTableUtil.QUOTA_FAMILY_USAGE)) + TableDescriptor desc = TableDescriptorBuilder.newBuilder(fakeQuotaTableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(QuotaTableUtil.QUOTA_FAMILY_USAGE)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(QuotaUtil.QUOTA_FAMILY_INFO)).build(); admin.createTable(desc); @@ -261,8 +263,8 @@ private long count(Table t) throws IOException { } } - private long extractSnapshotSize( - Table quotaTable, TableName tn, String snapshot) throws IOException { + private long extractSnapshotSize(Table quotaTable, TableName tn, String snapshot) + throws IOException { Get g = QuotaTableUtil.makeGetForSnapshotSize(tn, snapshot); Result r = quotaTable.get(g); assertNotNull(r); @@ -270,8 +272,8 @@ private long extractSnapshotSize( assertTrue(cs.advance()); Cell c = cs.current(); assertNotNull(c); - return QuotaTableUtil.extractSnapshotSize( - c.getValueArray(), c.getValueOffset(), c.getValueLength()); + return QuotaTableUtil.extractSnapshotSize(c.getValueArray(), c.getValueOffset(), + c.getValueLength()); } private void verify(Table t, IOThrowingRunnable test) throws IOException { @@ -287,10 +289,10 @@ private interface IOThrowingRunnable { private Set getFilesReferencedBySnapshot(String snapshotName) throws IOException { HashSet files = new HashSet<>(); - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir( - snapshotName, CommonFSUtils.getRootDir(conf)); - SnapshotProtos.SnapshotDescription sd = SnapshotDescriptionUtils.readSnapshotInfo( - fs, snapshotDir); + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, + CommonFSUtils.getRootDir(conf)); + SnapshotProtos.SnapshotDescription sd = + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd); // For each region referenced by the snapshot for (SnapshotRegionManifest rm : manifest.getRegionManifests()) { @@ -305,7 +307,7 @@ private Set getFilesReferencedBySnapshot(String snapshotName) throws IOE return files; } - private Entry entry(K k, V v) { + private Entry entry(K k, V v) { return Maps.immutableEntry(k, v); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java index 38d98e4454e7..c5fde09f2bfc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,8 +64,7 @@ public void testNoOnlineRegions() { final Configuration conf = getDefaultHBaseConfiguration(); final HRegionServer rs = mockRegionServer(conf); final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); - doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes))) - .when(rs) + doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes))).when(rs) .reportRegionSizesForQuotas(any(RegionSizeStore.class)); final Region region = mockRegionWithSize(regionSizes); @@ -80,8 +79,7 @@ public void testRegionSizes() { final Configuration conf = getDefaultHBaseConfiguration(); final HRegionServer rs = mockRegionServer(conf); final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); - doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes))) - .when(rs) + doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes))).when(rs) .reportRegionSizesForQuotas(any(RegionSizeStore.class)); final Region region = mockRegionWithSize(regionSizes); @@ -104,8 +102,7 @@ public void testMultipleRegionSizes() { final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum, r2Sum, r3Sum)))) - .when(rs) - .reportRegionSizesForQuotas(any(RegionSizeStore.class)); + .when(rs).reportRegionSizesForQuotas(any(RegionSizeStore.class)); final Region r1 = mockRegionWithSize(r1Sizes); final Region r2 = mockRegionWithSize(r2Sizes); @@ -120,13 +117,11 @@ public void testDefaultConfigurationProperties() { final HRegionServer rs = mockRegionServer(conf); final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); // Verify that the expected default values are actually represented. - assertEquals( - FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_DEFAULT, chore.getPeriod()); - assertEquals( - FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_DEFAULT, chore.getInitialDelay()); - assertEquals( - TimeUnit.valueOf(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT), - chore.getTimeUnit()); + assertEquals(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_DEFAULT, chore.getPeriod()); + assertEquals(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_DEFAULT, + chore.getInitialDelay()); + assertEquals(TimeUnit.valueOf(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT), + chore.getTimeUnit()); } @Test @@ -168,8 +163,7 @@ Iterator getLeftoverRegions() { } }; doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(leftover1Sum, leftover2Sum)))) - .when(rs) - .reportRegionSizesForQuotas(any(RegionSizeStore.class)); + .when(rs).reportRegionSizesForQuotas(any(RegionSizeStore.class)); // We shouldn't compute all of these region sizes, just the leftovers final Region r1 = mockRegionWithSize(Arrays.asList(1024L, 2048L)); @@ -198,8 +192,7 @@ Iterator getLeftoverRegions() { return Arrays.asList(lr1, lr2).iterator(); } }; - doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(leftover1Sum)))) - .when(rs) + doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(leftover1Sum)))).when(rs) .reportRegionSizesForQuotas(any(RegionSizeStore.class)); // We shouldn't compute all of these region sizes, just the leftovers @@ -223,8 +216,7 @@ public void testIgnoreSplitParents() { final List r2Sizes = Arrays.asList(1024L * 1024L); final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); - doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum)))) - .when(rs) + doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum)))).when(rs) .reportRegionSizesForQuotas(any(RegionSizeStore.class)); final Region r1 = mockRegionWithSize(r1Sizes); @@ -244,8 +236,7 @@ public void testIgnoreRegionReplicas() { final List r2Sizes = Arrays.asList(1024L * 1024L); final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); - doAnswer(new ExpectedRegionSizeSummationAnswer(r1Sum)) - .when(rs) + doAnswer(new ExpectedRegionSizeSummationAnswer(r1Sum)).when(rs) .reportRegionSizesForQuotas(any(RegionSizeStore.class)); final Region r1 = mockRegionWithSize(r1Sizes); @@ -270,9 +261,9 @@ public void testNonHFilesAreIgnored() { // We expect that only the hfiles would be counted (hfile links are ignored) final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); - doAnswer(new ExpectedRegionSizeSummationAnswer( - sum(Arrays.asList(r1HFileSizeSum, r2HFileSizeSum)))) - .when(rs).reportRegionSizesForQuotas(any(RegionSizeStore.class)); + doAnswer( + new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1HFileSizeSum, r2HFileSizeSum)))) + .when(rs).reportRegionSizesForQuotas(any(RegionSizeStore.class)); final Region r1 = mockRegionWithHFileLinks(r1StoreFileSizes, r1HFileSizes); final Region r2 = mockRegionWithHFileLinks(r2StoreFileSizes, r2HFileSizes); @@ -316,7 +307,6 @@ private long sum(Collection values) { /** * Creates a region with a number of Stores equal to the length of {@code storeSizes}. Each * {@link Store} will have a reported size corresponding to the element in {@code storeSizes}. - * * @param storeSizes A list of sizes for each Store. * @return A mocked Region. */ @@ -334,15 +324,15 @@ private Region mockRegionWithSize(Collection storeSizes) { return r; } - private Region mockRegionWithHFileLinks(Collection storeSizes, Collection hfileSizes) { + private Region mockRegionWithHFileLinks(Collection storeSizes, + Collection hfileSizes) { final Region r = mock(Region.class); final RegionInfo info = mock(RegionInfo.class); when(r.getRegionInfo()).thenReturn(info); List stores = new ArrayList<>(); when(r.getStores()).thenReturn((List) stores); - assertEquals( - "Logic error, storeSizes and linkSizes must be equal in size", storeSizes.size(), - hfileSizes.size()); + assertEquals("Logic error, storeSizes and linkSizes must be equal in size", storeSizes.size(), + hfileSizes.size()); Iterator storeSizeIter = storeSizes.iterator(); Iterator hfileSizeIter = hfileSizes.iterator(); while (storeSizeIter.hasNext() && hfileSizeIter.hasNext()) { @@ -358,7 +348,6 @@ private Region mockRegionWithHFileLinks(Collection storeSizes, Collection< /** * Creates a region which is the parent of a split. - * * @param storeSizes A list of sizes for each Store. * @return A mocked Region. */ @@ -371,7 +360,6 @@ private Region mockSplitParentRegionWithSize(Collection storeSizes) { /** * Creates a region who has a replicaId of 1. - * * @param storeSizes A list of sizes for each Store. * @return A mocked Region. */ @@ -397,7 +385,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { Object[] args = invocation.getArguments(); assertEquals(1, args.length); @SuppressWarnings("unchecked") - Map regionSizes = (Map) args[0]; + Map regionSizes = (Map) args[0]; long sum = 0L; for (Long regionSize : regionSizes.values()) { sum += regionSize; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestGlobalQuotaSettingsImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestGlobalQuotaSettingsImpl.java index 55938dc5f5d5..66e422f4ad93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestGlobalQuotaSettingsImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestGlobalQuotaSettingsImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,29 +38,27 @@ public class TestGlobalQuotaSettingsImpl { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestGlobalQuotaSettingsImpl.class); - QuotaProtos.TimedQuota REQUEST_THROTTLE = QuotaProtos.TimedQuota.newBuilder() - .setScope(QuotaProtos.QuotaScope.MACHINE).setSoftLimit(100) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); - QuotaProtos.Throttle THROTTLE = QuotaProtos.Throttle.newBuilder() - .setReqNum(REQUEST_THROTTLE).build(); + QuotaProtos.TimedQuota REQUEST_THROTTLE = + QuotaProtos.TimedQuota.newBuilder().setScope(QuotaProtos.QuotaScope.MACHINE).setSoftLimit(100) + .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + QuotaProtos.Throttle THROTTLE = + QuotaProtos.Throttle.newBuilder().setReqNum(REQUEST_THROTTLE).build(); - QuotaProtos.SpaceQuota SPACE_QUOTA = QuotaProtos.SpaceQuota.newBuilder() - .setSoftLimit(1024L * 1024L).setViolationPolicy(QuotaProtos.SpaceViolationPolicy.NO_WRITES) - .build(); + QuotaProtos.SpaceQuota SPACE_QUOTA = + QuotaProtos.SpaceQuota.newBuilder().setSoftLimit(1024L * 1024L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.NO_WRITES).build(); @Test public void testMergeThrottle() throws IOException { - QuotaProtos.Quotas quota = QuotaProtos.Quotas.newBuilder() - .setThrottle(THROTTLE).build(); - QuotaProtos.TimedQuota writeQuota = REQUEST_THROTTLE.toBuilder() - .setSoftLimit(500).build(); + QuotaProtos.Quotas quota = QuotaProtos.Quotas.newBuilder().setThrottle(THROTTLE).build(); + QuotaProtos.TimedQuota writeQuota = REQUEST_THROTTLE.toBuilder().setSoftLimit(500).build(); // Unset the req throttle, set a write throttle QuotaProtos.ThrottleRequest writeThrottle = QuotaProtos.ThrottleRequest.newBuilder() .setTimedQuota(writeQuota).setType(QuotaProtos.ThrottleType.WRITE_NUMBER).build(); GlobalQuotaSettingsImpl settings = new GlobalQuotaSettingsImpl("joe", null, null, null, quota); - GlobalQuotaSettingsImpl merged = settings.merge( - new ThrottleSettings("joe", null, null, null, writeThrottle)); + GlobalQuotaSettingsImpl merged = + settings.merge(new ThrottleSettings("joe", null, null, null, writeThrottle)); QuotaProtos.Throttle mergedThrottle = merged.getThrottleProto(); // Verify the request throttle is in place @@ -77,37 +75,34 @@ public void testMergeThrottle() throws IOException { @Test public void testMergeSpace() throws IOException { TableName tn = TableName.valueOf("foo"); - QuotaProtos.Quotas quota = QuotaProtos.Quotas.newBuilder() - .setSpace(SPACE_QUOTA).build(); + QuotaProtos.Quotas quota = QuotaProtos.Quotas.newBuilder().setSpace(SPACE_QUOTA).build(); GlobalQuotaSettingsImpl settings = new GlobalQuotaSettingsImpl(null, tn, null, null, quota); // Switch the violation policy to DISABLE GlobalQuotaSettingsImpl merged = settings.merge( - new SpaceLimitSettings(tn, SPACE_QUOTA.getSoftLimit(), SpaceViolationPolicy.DISABLE)); + new SpaceLimitSettings(tn, SPACE_QUOTA.getSoftLimit(), SpaceViolationPolicy.DISABLE)); QuotaProtos.SpaceQuota mergedSpaceQuota = merged.getSpaceProto(); assertEquals(SPACE_QUOTA.getSoftLimit(), mergedSpaceQuota.getSoftLimit()); - assertEquals( - QuotaProtos.SpaceViolationPolicy.DISABLE, mergedSpaceQuota.getViolationPolicy()); + assertEquals(QuotaProtos.SpaceViolationPolicy.DISABLE, mergedSpaceQuota.getViolationPolicy()); } @Test public void testMergeThrottleAndSpace() throws IOException { final String ns = "org1"; - QuotaProtos.Quotas quota = QuotaProtos.Quotas.newBuilder() - .setThrottle(THROTTLE).setSpace(SPACE_QUOTA).build(); + QuotaProtos.Quotas quota = + QuotaProtos.Quotas.newBuilder().setThrottle(THROTTLE).setSpace(SPACE_QUOTA).build(); GlobalQuotaSettingsImpl settings = new GlobalQuotaSettingsImpl(null, null, ns, null, quota); - QuotaProtos.TimedQuota writeQuota = REQUEST_THROTTLE.toBuilder() - .setSoftLimit(500).build(); + QuotaProtos.TimedQuota writeQuota = REQUEST_THROTTLE.toBuilder().setSoftLimit(500).build(); // Add a write throttle QuotaProtos.ThrottleRequest writeThrottle = QuotaProtos.ThrottleRequest.newBuilder() .setTimedQuota(writeQuota).setType(QuotaProtos.ThrottleType.WRITE_NUMBER).build(); - GlobalQuotaSettingsImpl merged = settings.merge( - new ThrottleSettings(null, null, ns, null, writeThrottle)); - GlobalQuotaSettingsImpl finalQuota = merged.merge(new SpaceLimitSettings( - ns, SPACE_QUOTA.getSoftLimit(), SpaceViolationPolicy.NO_WRITES_COMPACTIONS)); + GlobalQuotaSettingsImpl merged = + settings.merge(new ThrottleSettings(null, null, ns, null, writeThrottle)); + GlobalQuotaSettingsImpl finalQuota = merged.merge(new SpaceLimitSettings(ns, + SPACE_QUOTA.getSoftLimit(), SpaceViolationPolicy.NO_WRITES_COMPACTIONS)); // Verify both throttle quotas QuotaProtos.Throttle throttle = finalQuota.getThrottleProto(); @@ -122,8 +117,7 @@ public void testMergeThrottleAndSpace() throws IOException { // Verify space quota QuotaProtos.SpaceQuota finalSpaceQuota = finalQuota.getSpaceProto(); assertEquals(SPACE_QUOTA.getSoftLimit(), finalSpaceQuota.getSoftLimit()); - assertEquals( - QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS, - finalSpaceQuota.getViolationPolicy()); + assertEquals(QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS, + finalSpaceQuota.getViolationPolicy()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java index bd4a29a3d624..f5b520d61661 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestLowLatencySpaceQuotas.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -55,7 +56,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestLowLatencySpaceQuotas { @ClassRule @@ -106,8 +107,8 @@ public void removeAllQuotas() throws Exception { public void testFlushes() throws Exception { TableName tn = helper.createTableWithRegions(1); // Set a quota - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, + SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings); // Write some data @@ -120,7 +121,8 @@ public void testFlushes() throws Exception { // We should be able to observe the system recording an increase in size (even // though we know the filesystem scanning did not happen). TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { return snapshot.getUsage() >= initialSize; } }); @@ -130,8 +132,8 @@ public void testFlushes() throws Exception { public void testMajorCompaction() throws Exception { TableName tn = helper.createTableWithRegions(1); // Set a quota - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, + SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings); // Write some data and flush it to disk. @@ -145,7 +147,8 @@ public void testMajorCompaction() throws Exception { // After two flushes, both hfiles would contain similar data. We should see 2x the data. TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { return snapshot.getUsage() >= 2L * sizePerBatch; } }); @@ -156,7 +159,8 @@ public void testMajorCompaction() throws Exception { // After we major compact the table, we should notice quickly that the amount of data in the // table is much closer to reality (the duplicate entries across the two files are removed). TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { return snapshot.getUsage() >= sizePerBatch && snapshot.getUsage() <= 2L * sizePerBatch; } }); @@ -166,8 +170,8 @@ public void testMajorCompaction() throws Exception { public void testMinorCompaction() throws Exception { TableName tn = helper.createTableWithRegions(1); // Set a quota - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, + SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings); // Write some data and flush it to disk. @@ -184,7 +188,8 @@ public void testMinorCompaction() throws Exception { // After two flushes, both hfiles would contain similar data. We should see 2x the data. TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { return snapshot.getUsage() >= numFiles * sizePerBatch; } }); @@ -196,9 +201,10 @@ public void testMinorCompaction() throws Exception { // After we major compact the table, we should notice quickly that the amount of data in the // table is much closer to reality (the duplicate entries across the two files are removed). TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { - return snapshot.getUsage() >= numFilesAfterMinorCompaction * sizePerBatch && - snapshot.getUsage() <= (numFilesAfterMinorCompaction + 1) * sizePerBatch; + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + return snapshot.getUsage() >= numFilesAfterMinorCompaction * sizePerBatch + && snapshot.getUsage() <= (numFilesAfterMinorCompaction + 1) * sizePerBatch; } }); } @@ -211,22 +217,21 @@ private long getNumHFilesForRegion(HRegion region) { public void testBulkLoading() throws Exception { TableName tn = helper.createTableWithRegions(1); // Set a quota - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, + SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings); admin.compactionSwitch(false, admin.getRegionServers().stream().map(ServerName::toString).collect(Collectors.toList())); Map> family2Files = helper.generateFileToLoad(tn, 3, 550); // Make sure the files are about as long as we expect FileSystem fs = TEST_UTIL.getTestFileSystem(); - FileStatus[] files = fs.listStatus( - new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files")); + FileStatus[] files = + fs.listStatus(new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files")); long totalSize = 0; for (FileStatus file : files) { - assertTrue( - "Expected the file, " + file.getPath() + ", length to be larger than 25KB, but was " - + file.getLen(), - file.getLen() > 25 * SpaceQuotaHelperForTests.ONE_KILOBYTE); + assertTrue("Expected the file, " + file.getPath() + + ", length to be larger than 25KB, but was " + file.getLen(), + file.getLen() > 25 * SpaceQuotaHelperForTests.ONE_KILOBYTE); totalSize += file.getLen(); } @@ -251,8 +256,8 @@ boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { public void testSnapshotSizes() throws Exception { TableName tn = helper.createTableWithRegions(1); // Set a quota - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, + SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings); // Write some data and flush it to disk. @@ -274,7 +279,8 @@ public void testSnapshotSizes() throws Exception { // Wait for the table to show the usage TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { return snapshot.getUsage() == storeFileSize; } }); @@ -292,7 +298,8 @@ public void testSnapshotSizes() throws Exception { // We have a new file created by the majc referenced by the table and the snapshot still // referencing the old file. TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { return snapshot.getUsage() >= 2 * storeFileSize; } }); @@ -301,14 +308,14 @@ public void testSnapshotSizes() throws Exception { Result r = quotaTable.get(QuotaTableUtil.makeGetForSnapshotSize(tn, snapshot1)); assertTrue("Expected a non-null, non-empty Result", r != null && !r.isEmpty()); assertTrue(r.advance()); - assertEquals("The snapshot's size should be the same as the origin store file", - storeFileSize, QuotaTableUtil.parseSnapshotSize(r.current())); + assertEquals("The snapshot's size should be the same as the origin store file", storeFileSize, + QuotaTableUtil.parseSnapshotSize(r.current())); r = quotaTable.get(QuotaTableUtil.createGetNamespaceSnapshotSize(tn.getNamespaceAsString())); assertTrue("Expected a non-null, non-empty Result", r != null && !r.isEmpty()); assertTrue(r.advance()); - assertEquals("The snapshot's size should be the same as the origin store file", - storeFileSize, QuotaTableUtil.parseSnapshotSize(r.current())); + assertEquals("The snapshot's size should be the same as the origin store file", storeFileSize, + QuotaTableUtil.parseSnapshotSize(r.current())); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotaManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotaManager.java index cf879cea57a8..a76c7ee546ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotaManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotaManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java index f74003f730ee..c4ada8da5ea4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -104,8 +104,8 @@ public void testTableSpaceQuotaRemoved() throws Exception { assertEquals(0, getNumSpaceQuotas()); // Set space quota - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, 1024L, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = + QuotaSettingsFactory.limitTableSpace(tn, 1024L, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings); assertEquals(1, getNumSpaceQuotas()); @@ -210,8 +210,8 @@ public void testNamespaceSpaceQuotaRemoved() throws Exception { assertEquals(0, getNumSpaceQuotas()); // Set a quota - QuotaSettings settings = QuotaSettingsFactory.limitNamespaceSpace( - ns, 1024L, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = + QuotaSettingsFactory.limitNamespaceSpace(ns, 1024L, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings); assertEquals(1, getNumSpaceQuotas()); @@ -312,9 +312,8 @@ public void testObserverAddedByDefault() throws Exception { final HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); final MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost(); Set coprocessorNames = cpHost.getCoprocessors(); - assertTrue( - "Did not find MasterQuotasObserver in list of CPs: " + coprocessorNames, - coprocessorNames.contains(MasterQuotasObserver.class.getSimpleName())); + assertTrue("Did not find MasterQuotasObserver in list of CPs: " + coprocessorNames, + coprocessorNames.contains(MasterQuotasObserver.class.getSimpleName())); } public boolean namespaceExists(String ns) throws IOException { @@ -351,15 +350,14 @@ public int getThrottleQuotas() throws Exception { private void createTable(Admin admin, TableName tn) throws Exception { // Create a table - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tn); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tn); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("F1")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("F1")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); } - private void dropTable(Admin admin, TableName tn) throws Exception { + private void dropTable(Admin admin, TableName tn) throws Exception { admin.disableTable(tn); admin.deleteTable(tn); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserverWithMocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserverWithMocks.java index abb0d8b01d08..be60e9e9986c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserverWithMocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserverWithMocks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,8 +56,7 @@ public class TestMasterQuotasObserverWithMocks { public void setup() { conf = HBaseConfiguration.create(); master = mock(HMaster.class); - doCallRealMethod().when(master).updateConfigurationForQuotasObserver( - any()); + doCallRealMethod().when(master).updateConfigurationForQuotasObserver(any()); } @Test @@ -80,11 +79,9 @@ public void testAppendsObserver() { master.updateConfigurationForQuotasObserver(conf); Set coprocs = new HashSet<>(conf.getStringCollection(MASTER_COPROCESSOR_CONF_KEY)); assertEquals(2, coprocs.size()); - assertTrue( - "Observed coprocessors were: " + coprocs, - coprocs.contains(AccessController.class.getName())); - assertTrue( - "Observed coprocessors were: " + coprocs, - coprocs.contains(MasterQuotasObserver.class.getName())); + assertTrue("Observed coprocessors were: " + coprocs, + coprocs.contains(AccessController.class.getName())); + assertTrue("Observed coprocessors were: " + coprocs, + coprocs.contains(MasterQuotasObserver.class.getName())); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java index 99a0f3b98b41..f4a98375390a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -81,12 +81,8 @@ public void testGetSpaceQuota() throws Exception { NamespaceQuotaSnapshotStore mockStore = mock(NamespaceQuotaSnapshotStore.class); when(mockStore.getSpaceQuota(any())).thenCallRealMethod(); - Quotas quotaWithSpace = Quotas.newBuilder().setSpace( - SpaceQuota.newBuilder() - .setSoftLimit(1024L) - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) - .build()) - .build(); + Quotas quotaWithSpace = Quotas.newBuilder().setSpace(SpaceQuota.newBuilder().setSoftLimit(1024L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE).build()).build(); Quotas quotaWithoutSpace = Quotas.newBuilder().build(); AtomicReference quotaRef = new AtomicReference<>(); @@ -110,8 +106,7 @@ public void testTargetViolationState() throws IOException { TableName tn1 = TableName.valueOf(NS, "tn1"); TableName tn2 = TableName.valueOf(NS, "tn2"); TableName tn3 = TableName.valueOf("tn3"); - SpaceQuota quota = SpaceQuota.newBuilder() - .setSoftLimit(ONE_MEGABYTE) + SpaceQuota quota = SpaceQuota.newBuilder().setSoftLimit(ONE_MEGABYTE) .setViolationPolicy(ProtobufUtil.toProtoViolationPolicy(SpaceViolationPolicy.DISABLE)) .build(); @@ -119,37 +114,31 @@ public void testTargetViolationState() throws IOException { // immediately violate the quota. for (int i = 0; i < 3; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn3) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), - 5L * ONE_MEGABYTE); + regionReports.put(RegionInfoBuilder.newBuilder(tn3).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 5L * ONE_MEGABYTE); } - regionReports.put(RegionInfoBuilder.newBuilder(tn1) - .setStartKey(Bytes.toBytes(0)) - .setEndKey(Bytes.toBytes(1)) - .build(), 1024L * 512L); - regionReports.put(RegionInfoBuilder.newBuilder(tn1) - .setStartKey(Bytes.toBytes(1)) - .setEndKey(Bytes.toBytes(2)) - .build(), 1024L * 256L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1).setStartKey(Bytes.toBytes(0)) + .setEndKey(Bytes.toBytes(1)).build(), + 1024L * 512L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1).setStartKey(Bytes.toBytes(1)) + .setEndKey(Bytes.toBytes(2)).build(), + 1024L * 256L); // Below the quota assertEquals(false, store.getTargetState(NS, quota).getQuotaStatus().isInViolation()); - regionReports.put(RegionInfoBuilder.newBuilder(tn2) - .setStartKey(Bytes.toBytes(2)) - .setEndKey(Bytes.toBytes(3)) - .build(), 1024L * 256L); + regionReports.put(RegionInfoBuilder.newBuilder(tn2).setStartKey(Bytes.toBytes(2)) + .setEndKey(Bytes.toBytes(3)).build(), + 1024L * 256L); // Equal to the quota is still in observance assertEquals(false, store.getTargetState(NS, quota).getQuotaStatus().isInViolation()); - regionReports.put(RegionInfoBuilder.newBuilder(tn2) - .setStartKey(Bytes.toBytes(3)) - .setEndKey(Bytes.toBytes(4)) - .build(), 1024L); + regionReports.put(RegionInfoBuilder.newBuilder(tn2).setStartKey(Bytes.toBytes(3)) + .setEndKey(Bytes.toBytes(4)).build(), + 1024L); // Exceeds the quota, should be in violation assertEquals(true, store.getTargetState(NS, quota).getQuotaStatus().isInViolation()); @@ -167,28 +156,24 @@ public void testFilterRegionsByNamespace() { assertEquals(0, size(store.filterBySubject("asdf"))); for (int i = 0; i < 5; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn1) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 0L); } for (int i = 0; i < 3; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn2) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn2).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 0L); } for (int i = 0; i < 10; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn3) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn3).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 0L); } for (int i = 0; i < 8; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn4) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn4).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 0L); } assertEquals(26, regionReports.size()); assertEquals(5, size(store.filterBySubject(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java index 50ee386542e0..f43268e03ce1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import java.util.List; import java.util.Objects; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -67,9 +66,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest; /** - * minicluster tests that validate that quota entries are properly set in the quota table + * minicluster tests that validate that quota entries are properly set in the quota table */ -@Category({ClientTests.class, LargeTests.class}) +@Category({ ClientTests.class, LargeTests.class }) public class TestQuotaAdmin { @ClassRule @@ -119,18 +118,18 @@ public void testThrottleType() throws Exception { String userName = User.getCurrent().getShortName(); admin.setQuota( - QuotaSettingsFactory.throttleUser(userName, ThrottleType.READ_NUMBER, 6, TimeUnit.MINUTES)); - admin.setQuota(QuotaSettingsFactory - .throttleUser(userName, ThrottleType.WRITE_NUMBER, 12, TimeUnit.MINUTES)); + QuotaSettingsFactory.throttleUser(userName, ThrottleType.READ_NUMBER, 6, TimeUnit.MINUTES)); + admin.setQuota( + QuotaSettingsFactory.throttleUser(userName, ThrottleType.WRITE_NUMBER, 12, TimeUnit.MINUTES)); admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName, true)); try (QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration())) { int countThrottle = 0; int countGlobalBypass = 0; - for (QuotaSettings settings: scanner) { + for (QuotaSettings settings : scanner) { switch (settings.getQuotaType()) { case THROTTLE: - ThrottleSettings throttle = (ThrottleSettings)settings; + ThrottleSettings throttle = (ThrottleSettings) settings; if (throttle.getSoftLimit() == 6) { assertEquals(ThrottleType.READ_NUMBER, throttle.getThrottleType()); } else if (throttle.getSoftLimit() == 12) { @@ -166,18 +165,18 @@ public void testSimpleScan() throws Exception { Admin admin = TEST_UTIL.getAdmin(); String userName = User.getCurrent().getShortName(); - admin.setQuota(QuotaSettingsFactory - .throttleUser(userName, ThrottleType.REQUEST_NUMBER, 6, TimeUnit.MINUTES)); + admin.setQuota(QuotaSettingsFactory.throttleUser(userName, ThrottleType.REQUEST_NUMBER, 6, + TimeUnit.MINUTES)); admin.setQuota(QuotaSettingsFactory.bypassGlobals(userName, true)); try (QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration())) { int countThrottle = 0; int countGlobalBypass = 0; - for (QuotaSettings settings: scanner) { + for (QuotaSettings settings : scanner) { LOG.debug(Objects.toString(settings)); switch (settings.getQuotaType()) { case THROTTLE: - ThrottleSettings throttle = (ThrottleSettings)settings; + ThrottleSettings throttle = (ThrottleSettings) settings; assertEquals(userName, throttle.getUserName()); assertEquals(null, throttle.getTableName()); assertEquals(null, throttle.getNamespace()); @@ -218,14 +217,14 @@ public void testMultiQuotaThrottling() throws Exception { // Set up the quota. admin.setQuota(QuotaSettingsFactory.throttleTable(tableName, ThrottleType.WRITE_NUMBER, 6, - TimeUnit.SECONDS)); + TimeUnit.SECONDS)); Thread.sleep(1000); - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegionServerRpcQuotaManager(). - getQuotaCache().triggerCacheRefresh(); + TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegionServerRpcQuotaManager().getQuotaCache() + .triggerCacheRefresh(); Thread.sleep(1000); - Table t = TEST_UTIL.getConnection().getTable(tableName); + Table t = TEST_UTIL.getConnection().getTable(tableName); try { int size = 5; List actions = new ArrayList(); @@ -245,41 +244,39 @@ public void testMultiQuotaThrottling() throws Exception { } } - @Test public void testQuotaRetrieverFilter() throws Exception { Admin admin = TEST_UTIL.getAdmin(); - TableName[] tables = new TableName[] { - TableName.valueOf("T0"), TableName.valueOf("T01"), TableName.valueOf("NS0:T2"), - }; + TableName[] tables = new TableName[] { TableName.valueOf("T0"), TableName.valueOf("T01"), + TableName.valueOf("NS0:T2"), }; String[] namespaces = new String[] { "NS0", "NS01", "NS2" }; String[] users = new String[] { "User0", "User01", "User2" }; - for (String user: users) { - admin.setQuota(QuotaSettingsFactory - .throttleUser(user, ThrottleType.REQUEST_NUMBER, 1, TimeUnit.MINUTES)); + for (String user : users) { + admin.setQuota( + QuotaSettingsFactory.throttleUser(user, ThrottleType.REQUEST_NUMBER, 1, TimeUnit.MINUTES)); - for (TableName table: tables) { - admin.setQuota(QuotaSettingsFactory - .throttleUser(user, table, ThrottleType.REQUEST_NUMBER, 2, TimeUnit.MINUTES)); + for (TableName table : tables) { + admin.setQuota(QuotaSettingsFactory.throttleUser(user, table, ThrottleType.REQUEST_NUMBER, + 2, TimeUnit.MINUTES)); } - for (String ns: namespaces) { - admin.setQuota(QuotaSettingsFactory - .throttleUser(user, ns, ThrottleType.REQUEST_NUMBER, 3, TimeUnit.MINUTES)); + for (String ns : namespaces) { + admin.setQuota(QuotaSettingsFactory.throttleUser(user, ns, ThrottleType.REQUEST_NUMBER, 3, + TimeUnit.MINUTES)); } } assertNumResults(21, null); - for (TableName table: tables) { - admin.setQuota(QuotaSettingsFactory - .throttleTable(table, ThrottleType.REQUEST_NUMBER, 4, TimeUnit.MINUTES)); + for (TableName table : tables) { + admin.setQuota(QuotaSettingsFactory.throttleTable(table, ThrottleType.REQUEST_NUMBER, 4, + TimeUnit.MINUTES)); } assertNumResults(24, null); - for (String ns: namespaces) { - admin.setQuota(QuotaSettingsFactory - .throttleNamespace(ns, ThrottleType.REQUEST_NUMBER, 5, TimeUnit.MINUTES)); + for (String ns : namespaces) { + admin.setQuota(QuotaSettingsFactory.throttleNamespace(ns, ThrottleType.REQUEST_NUMBER, 5, + TimeUnit.MINUTES)); } assertNumResults(27, null); @@ -293,8 +290,8 @@ public void testQuotaRetrieverFilter() throws Exception { assertNumResults(3, new QuotaFilter().setUserFilter("User.*").setNamespaceFilter("NS0")); assertNumResults(0, new QuotaFilter().setUserFilter("User.*").setNamespaceFilter("NS")); assertNumResults(9, new QuotaFilter().setUserFilter("User.*").setNamespaceFilter("NS.*")); - assertNumResults(6, new QuotaFilter().setUserFilter("User.*") - .setTableFilter("T0").setNamespaceFilter("NS0")); + assertNumResults(6, + new QuotaFilter().setUserFilter("User.*").setTableFilter("T0").setNamespaceFilter("NS0")); assertNumResults(1, new QuotaFilter().setTableFilter("T0")); assertNumResults(0, new QuotaFilter().setTableFilter("T")); assertNumResults(2, new QuotaFilter().setTableFilter("T.*")); @@ -303,23 +300,23 @@ public void testQuotaRetrieverFilter() throws Exception { assertNumResults(0, new QuotaFilter().setNamespaceFilter("NS")); assertNumResults(3, new QuotaFilter().setNamespaceFilter("NS.*")); - for (String user: users) { + for (String user : users) { admin.setQuota(QuotaSettingsFactory.unthrottleUser(user)); - for (TableName table: tables) { + for (TableName table : tables) { admin.setQuota(QuotaSettingsFactory.unthrottleUser(user, table)); } - for (String ns: namespaces) { + for (String ns : namespaces) { admin.setQuota(QuotaSettingsFactory.unthrottleUser(user, ns)); } } assertNumResults(6, null); - for (TableName table: tables) { + for (TableName table : tables) { admin.setQuota(QuotaSettingsFactory.unthrottleTable(table)); } assertNumResults(3, null); - for (String ns: namespaces) { + for (String ns : namespaces) { admin.setQuota(QuotaSettingsFactory.unthrottleNamespace(ns)); } assertNumResults(0, null); @@ -384,8 +381,8 @@ public void testSetModifyRemoveSpaceQuota() throws Exception { final TableName tn = TableName.valueOf("sq_table2"); final long originalSizeLimit = 1024L * 1024L * 1024L * 1024L * 5L; // 5TB final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.NO_WRITES; - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, originalSizeLimit, - violationPolicy); + QuotaSettings settings = + QuotaSettingsFactory.limitTableSpace(tn, originalSizeLimit, violationPolicy); admin.setQuota(settings); // Verify the Quotas in the table @@ -412,8 +409,8 @@ public void testSetModifyRemoveSpaceQuota() throws Exception { // Setting a new size and policy should be reflected final long newSizeLimit = 1024L * 1024L * 1024L * 1024L; // 1TB final SpaceViolationPolicy newViolationPolicy = SpaceViolationPolicy.NO_WRITES_COMPACTIONS; - QuotaSettings newSettings = QuotaSettingsFactory.limitTableSpace(tn, newSizeLimit, - newViolationPolicy); + QuotaSettings newSettings = + QuotaSettingsFactory.limitTableSpace(tn, newSizeLimit, newViolationPolicy); admin.setQuota(newSettings); // Verify the new Quotas in the table @@ -683,14 +680,14 @@ public void testQuotaScope() throws Exception { QuotaScope.CLUSTER); admin.setQuota(QuotaSettingsFactory.unthrottleUser(user)); - // set CLUSTER quota scope for user and table + // set CLUSTER quota scope for user and table admin.setQuota(QuotaSettingsFactory.throttleUser(user, tableName, ThrottleType.REQUEST_NUMBER, 10, TimeUnit.MINUTES, QuotaScope.CLUSTER)); verifyRecordPresentInQuotaTable(ThrottleType.REQUEST_NUMBER, 10, TimeUnit.MINUTES, QuotaScope.CLUSTER); admin.setQuota(QuotaSettingsFactory.unthrottleUser(user)); - // set CLUSTER quota scope for user and namespace + // set CLUSTER quota scope for user and namespace admin.setQuota(QuotaSettingsFactory.throttleUser(user, namespace, ThrottleType.REQUEST_NUMBER, 10, TimeUnit.MINUTES, QuotaScope.CLUSTER)); verifyRecordPresentInQuotaTable(ThrottleType.REQUEST_NUMBER, 10, TimeUnit.MINUTES, @@ -750,8 +747,8 @@ private void verifyNotFetchableViaAPI(Admin admin) throws Exception { private void assertRPCQuota(ThrottleType type, long limit, TimeUnit tu, QuotaScope scope, Cell cell) throws Exception { - Quotas q = QuotaTableUtil - .quotasFromData(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + Quotas q = QuotaTableUtil.quotasFromData(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength()); assertTrue("Quota should have rpc quota defined", q.hasThrottle()); QuotaProtos.Throttle rpcQuota = q.getThrottle(); @@ -804,40 +801,41 @@ private void assertRPCQuota(ThrottleType type, long limit, TimeUnit tu, QuotaSco private void assertRPCQuota(ThrottleType type, long limit, TimeUnit tu, QuotaSettings actualSettings) throws Exception { - assertTrue( - "The actual QuotaSettings was not an instance of " + ThrottleSettings.class + " but of " - + actualSettings.getClass(), actualSettings instanceof ThrottleSettings); + assertTrue("The actual QuotaSettings was not an instance of " + ThrottleSettings.class + + " but of " + actualSettings.getClass(), + actualSettings instanceof ThrottleSettings); QuotaProtos.ThrottleRequest throttleRequest = ((ThrottleSettings) actualSettings).getProto(); assertEquals(limit, throttleRequest.getTimedQuota().getSoftLimit()); assertEquals(ProtobufUtil.toProtoTimeUnit(tu), throttleRequest.getTimedQuota().getTimeUnit()); assertEquals(ProtobufUtil.toProtoThrottleType(type), throttleRequest.getType()); } - private void assertSpaceQuota( - long sizeLimit, SpaceViolationPolicy violationPolicy, Cell cell) throws Exception { - Quotas q = QuotaTableUtil.quotasFromData( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + private void assertSpaceQuota(long sizeLimit, SpaceViolationPolicy violationPolicy, Cell cell) + throws Exception { + Quotas q = QuotaTableUtil.quotasFromData(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength()); assertTrue("Quota should have space quota defined", q.hasSpace()); QuotaProtos.SpaceQuota spaceQuota = q.getSpace(); assertEquals(sizeLimit, spaceQuota.getSoftLimit()); assertEquals(violationPolicy, ProtobufUtil.toViolationPolicy(spaceQuota.getViolationPolicy())); } - private void assertSpaceQuota( - long sizeLimit, SpaceViolationPolicy violationPolicy, QuotaSettings actualSettings) { + private void assertSpaceQuota(long sizeLimit, SpaceViolationPolicy violationPolicy, + QuotaSettings actualSettings) { assertTrue("The actual QuotaSettings was not an instance of " + SpaceLimitSettings.class - + " but of " + actualSettings.getClass(), actualSettings instanceof SpaceLimitSettings); + + " but of " + actualSettings.getClass(), + actualSettings instanceof SpaceLimitSettings); SpaceLimitRequest spaceLimitRequest = ((SpaceLimitSettings) actualSettings).getProto(); assertEquals(sizeLimit, spaceLimitRequest.getQuota().getSoftLimit()); assertEquals(violationPolicy, - ProtobufUtil.toViolationPolicy(spaceLimitRequest.getQuota().getViolationPolicy())); + ProtobufUtil.toViolationPolicy(spaceLimitRequest.getQuota().getViolationPolicy())); } private int countResults(final QuotaFilter filter) throws Exception { QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration(), filter); try { int count = 0; - for (QuotaSettings settings: scanner) { + for (QuotaSettings settings : scanner) { LOG.debug(Objects.toString(settings)); count++; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChore.java index 035b87573bc2..f6dd47a7f6e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,26 +65,23 @@ public void testNumRegionsForTable() { final int numTable1Regions = 10; final int numTable2Regions = 15; final int numTable3Regions = 8; - Map regionReports = new HashMap<>(); + Map regionReports = new HashMap<>(); for (int i = 0; i < numTable1Regions; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn1) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 0L); } for (int i = 0; i < numTable2Regions; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn2) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn2).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 0L); } for (int i = 0; i < numTable3Regions; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn3) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn3).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 0L); } TableQuotaSnapshotStore store = new TableQuotaSnapshotStore(conn, chore, regionReports); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java index 28529acd7d9f..bcd65fac3948 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java @@ -111,8 +111,8 @@ public boolean evaluate() throws Exception { // Create a table final TableName tn = TableName.valueOf("reportExpiration"); - TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tn).setColumnFamily( - ColumnFamilyDescriptorBuilder.of(FAM1)).build(); + TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tn) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAM1)).build(); TEST_UTIL.getAdmin().createTable(tableDesc); // No reports right after we created this table. @@ -163,8 +163,8 @@ public boolean evaluate() throws Exception { // Create a table final TableName tn = TableName.valueOf("quotaAcceptanceWithoutReports"); - TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tn).setColumnFamily( - ColumnFamilyDescriptorBuilder.of(FAM1)).build(); + TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tn) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAM1)).build(); TEST_UTIL.getAdmin().createTable(tableDesc); // Set a quota @@ -224,11 +224,10 @@ public boolean evaluate() throws Exception { assertFalse("Quota should not be in violation", snapshot.getQuotaStatus().isInViolation()); } - private SpaceQuotaSnapshot getSnapshotForTable( - Connection conn, TableName tn) throws IOException { + private SpaceQuotaSnapshot getSnapshotForTable(Connection conn, TableName tn) throws IOException { try (Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME); ResultScanner scanner = quotaTable.getScanner(QuotaTableUtil.makeQuotaSnapshotScan())) { - Map activeViolations = new HashMap<>(); + Map activeViolations = new HashMap<>(); for (Result result : scanner) { try { QuotaTableUtil.extractQuotaSnapshot(result, activeViolations); @@ -242,9 +241,9 @@ private SpaceQuotaSnapshot getSnapshotForTable( } } - private int getRegionReportsForTable(Map reports, TableName tn) { + private int getRegionReportsForTable(Map reports, TableName tn) { int numReports = 0; - for (Entry entry : reports.entrySet()) { + for (Entry entry : reports.entrySet()) { if (tn.equals(entry.getKey().getTable())) { numReports++; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java index 41db656042b4..f8ee82b188c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -88,7 +88,7 @@ public static void setUp() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); SpaceQuotaHelperForTests.updateConfigForQuotas(conf); conf.setClass(SpaceQuotaSnapshotNotifierFactory.SNAPSHOT_NOTIFIER_KEY, - SpaceQuotaSnapshotNotifierForTest.class, SpaceQuotaSnapshotNotifier.class); + SpaceQuotaSnapshotNotifierForTest.class, SpaceQuotaSnapshotNotifier.class); TEST_UTIL.startMiniCluster(1); } @@ -113,8 +113,7 @@ public void removeAllQuotas() throws Exception { } master = TEST_UTIL.getMiniHBaseCluster().getMaster(); - snapshotNotifier = - (SpaceQuotaSnapshotNotifierForTest) master.getSpaceQuotaSnapshotNotifier(); + snapshotNotifier = (SpaceQuotaSnapshotNotifierForTest) master.getSpaceQuotaSnapshotNotifier(); assertNotNull(snapshotNotifier); snapshotNotifier.clearSnapshots(); chore = master.getQuotaObserverChore(); @@ -132,7 +131,7 @@ public void testTableViolatesQuota() throws Exception { // Write more data than should be allowed helper.writeData(tn, 3L * SpaceQuotaHelperForTests.ONE_MEGABYTE); - Map quotaSnapshots = snapshotNotifier.copySnapshots(); + Map quotaSnapshots = snapshotNotifier.copySnapshots(); boolean foundSnapshot = false; while (!foundSnapshot) { if (quotaSnapshots.isEmpty()) { @@ -141,7 +140,8 @@ public void testTableViolatesQuota() throws Exception { sleepWithInterrupt(DEFAULT_WAIT_MILLIS); quotaSnapshots = snapshotNotifier.copySnapshots(); } else { - Entry entry = Iterables.getOnlyElement(quotaSnapshots.entrySet()); + Entry entry = + Iterables.getOnlyElement(quotaSnapshots.entrySet()); assertEquals(tn, entry.getKey()); final SpaceQuotaSnapshot snapshot = entry.getValue(); if (!snapshot.getQuotaStatus().isInViolation()) { @@ -155,14 +155,15 @@ public void testTableViolatesQuota() throws Exception { } Entry entry = - Iterables.getOnlyElement(quotaSnapshots.entrySet()); + Iterables.getOnlyElement(quotaSnapshots.entrySet()); assertEquals(tn, entry.getKey()); final SpaceQuotaSnapshot snapshot = entry.getValue(); assertEquals("Snapshot was " + snapshot, violationPolicy, snapshot.getQuotaStatus().getPolicy().get()); assertEquals(sizeLimit, snapshot.getLimit()); - assertTrue("The usage should be greater than the limit, but were " + snapshot.getUsage() + - " and " + snapshot.getLimit() + ", respectively", snapshot.getUsage() > snapshot.getLimit()); + assertTrue("The usage should be greater than the limit, but were " + snapshot.getUsage() + + " and " + snapshot.getLimit() + ", respectively", + snapshot.getUsage() > snapshot.getLimit()); } @Test @@ -183,21 +184,21 @@ public void testNamespaceViolatesQuota() throws Exception { final long sizeLimit = 5L * SpaceQuotaHelperForTests.ONE_MEGABYTE; final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.DISABLE; - QuotaSettings settings = QuotaSettingsFactory.limitNamespaceSpace(namespace, sizeLimit, violationPolicy); + QuotaSettings settings = + QuotaSettingsFactory.limitNamespaceSpace(namespace, sizeLimit, violationPolicy); admin.setQuota(settings); helper.writeData(tn1, 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE); admin.flush(tn1); - Map snapshots = snapshotNotifier.copySnapshots(); + Map snapshots = snapshotNotifier.copySnapshots(); for (int i = 0; i < 5; i++) { // Check a few times to make sure we don't prematurely move to violation - assertEquals( - "Should not see any quota violations after writing 2MB of data", 0, - numSnapshotsInViolation(snapshots)); + assertEquals("Should not see any quota violations after writing 2MB of data", 0, + numSnapshotsInViolation(snapshots)); try { Thread.sleep(DEFAULT_WAIT_MILLIS); } catch (InterruptedException e) { - LOG.debug("Interrupted while sleeping." , e); + LOG.debug("Interrupted while sleeping.", e); } snapshots = snapshotNotifier.copySnapshots(); } @@ -208,11 +209,11 @@ public void testNamespaceViolatesQuota() throws Exception { for (int i = 0; i < 5; i++) { // Check a few times to make sure we don't prematurely move to violation assertEquals("Should not see any quota violations after writing 4MB of data", 0, - numSnapshotsInViolation(snapshots)); + numSnapshotsInViolation(snapshots)); try { Thread.sleep(DEFAULT_WAIT_MILLIS); } catch (InterruptedException e) { - LOG.debug("Interrupted while sleeping." , e); + LOG.debug("Interrupted while sleeping.", e); } snapshots = snapshotNotifier.copySnapshots(); } @@ -264,20 +265,20 @@ public void testTableQuotaOverridesNamespaceQuota() throws Exception { final long namespaceSizeLimit = 3L * SpaceQuotaHelperForTests.ONE_MEGABYTE; final SpaceViolationPolicy namespaceViolationPolicy = SpaceViolationPolicy.DISABLE; QuotaSettings namespaceSettings = QuotaSettingsFactory.limitNamespaceSpace(namespace, - namespaceSizeLimit, namespaceViolationPolicy); + namespaceSizeLimit, namespaceViolationPolicy); admin.setQuota(namespaceSettings); helper.writeData(tn1, 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE); admin.flush(tn1); - Map snapshots = snapshotNotifier.copySnapshots(); + Map snapshots = snapshotNotifier.copySnapshots(); for (int i = 0; i < 5; i++) { // Check a few times to make sure we don't prematurely move to violation assertEquals("Should not see any quota violations after writing 2MB of data: " + snapshots, 0, - numSnapshotsInViolation(snapshots)); + numSnapshotsInViolation(snapshots)); try { Thread.sleep(DEFAULT_WAIT_MILLIS); } catch (InterruptedException e) { - LOG.debug("Interrupted while sleeping." , e); + LOG.debug("Interrupted while sleeping.", e); } snapshots = snapshotNotifier.copySnapshots(); } @@ -307,8 +308,8 @@ public void testTableQuotaOverridesNamespaceQuota() throws Exception { // Override the namespace quota with a table quota final long tableSizeLimit = SpaceQuotaHelperForTests.ONE_MEGABYTE; final SpaceViolationPolicy tableViolationPolicy = SpaceViolationPolicy.NO_INSERTS; - QuotaSettings tableSettings = QuotaSettingsFactory.limitTableSpace(tn1, tableSizeLimit, - tableViolationPolicy); + QuotaSettings tableSettings = + QuotaSettingsFactory.limitTableSpace(tn1, tableSizeLimit, tableViolationPolicy); admin.setQuota(tableSettings); // Keep checking for the table quota policy to override the namespace quota @@ -346,7 +347,8 @@ public void testGetAllTablesWithQuotas() throws Exception { TablesWithQuotas tables = chore.fetchAllTablesWithQuotasDefined(); assertEquals("Found tables: " + tables, tablesWithQuotas, tables.getTableQuotaTables()); - assertEquals("Found tables: " + tables, namespaceTablesWithQuotas, tables.getNamespaceQuotaTables()); + assertEquals("Found tables: " + tables, namespaceTablesWithQuotas, + tables.getNamespaceQuotaTables()); } @Test @@ -358,31 +360,32 @@ public void testRpcQuotaTablesAreFiltered() throws Exception { helper.partitionTablesByQuotaTarget(quotas, tablesWithQuotas, namespaceTablesWithQuotas); TableName rpcQuotaTable = helper.createTable(); - TEST_UTIL.getAdmin().setQuota(QuotaSettingsFactory - .throttleTable(rpcQuotaTable, ThrottleType.READ_NUMBER, 6, TimeUnit.MINUTES)); + TEST_UTIL.getAdmin().setQuota(QuotaSettingsFactory.throttleTable(rpcQuotaTable, + ThrottleType.READ_NUMBER, 6, TimeUnit.MINUTES)); // The `rpcQuotaTable` should not be included in this Set TablesWithQuotas tables = chore.fetchAllTablesWithQuotasDefined(); assertEquals("Found tables: " + tables, tablesWithQuotas, tables.getTableQuotaTables()); - assertEquals("Found tables: " + tables, namespaceTablesWithQuotas, tables.getNamespaceQuotaTables()); + assertEquals("Found tables: " + tables, namespaceTablesWithQuotas, + tables.getNamespaceQuotaTables()); } @Test public void testFilterRegions() throws Exception { - Map mockReportedRegions = new HashMap<>(); + Map mockReportedRegions = new HashMap<>(); // Can't mock because of primitive int as a return type -- Mockito // can only handle an Integer. - TablesWithQuotas tables = new TablesWithQuotas(TEST_UTIL.getConnection(), - TEST_UTIL.getConfiguration()) { - @Override - int getNumReportedRegions(TableName table, QuotaSnapshotStore tableStore) { - Integer i = mockReportedRegions.get(table); - if (i == null) { - return 0; - } - return i; - } - }; + TablesWithQuotas tables = + new TablesWithQuotas(TEST_UTIL.getConnection(), TEST_UTIL.getConfiguration()) { + @Override + int getNumReportedRegions(TableName table, QuotaSnapshotStore tableStore) { + Integer i = mockReportedRegions.get(table); + if (i == null) { + return 0; + } + return i; + } + }; // Create the tables TableName tn1 = helper.createTableWithRegions(20); @@ -407,16 +410,16 @@ int getNumReportedRegions(TableName table, QuotaSnapshotStore tableSt @Test public void testFetchSpaceQuota() throws Exception { - Multimap tables = helper.createTablesWithSpaceQuotas(); + Multimap tables = helper.createTablesWithSpaceQuotas(); // Can pass in an empty map, we're not consulting it. chore.initializeSnapshotStores(Collections.emptyMap()); // All tables that were created should have a quota defined. - for (Entry entry : tables.entries()) { + for (Entry entry : tables.entries()) { final TableName table = entry.getKey(); final QuotaSettings qs = entry.getValue(); assertTrue("QuotaSettings was an instance of " + qs.getClass(), - qs instanceof SpaceLimitSettings); + qs instanceof SpaceLimitSettings); SpaceQuota spaceQuota = null; if (qs.getTableName() != null) { @@ -424,7 +427,8 @@ public void testFetchSpaceQuota() throws Exception { assertNotNull("Could not find table space quota for " + table, spaceQuota); } else if (qs.getNamespace() != null) { spaceQuota = chore.getNamespaceSnapshotStore().getSpaceQuota(table.getNamespaceAsString()); - assertNotNull("Could not find namespace space quota for " + table.getNamespaceAsString(), spaceQuota); + assertNotNull("Could not find namespace space quota for " + table.getNamespaceAsString(), + spaceQuota); } else { fail("Expected table or namespace space quota"); } @@ -437,7 +441,7 @@ public void testFetchSpaceQuota() throws Exception { assertNull(chore.getTableSnapshotStore().getSpaceQuota(tableWithoutQuota)); } - private int numSnapshotsInViolation(Map snapshots) { + private int numSnapshotsInViolation(Map snapshots) { int sum = 0; for (SpaceQuotaSnapshot snapshot : snapshots.values()) { if (snapshot.getQuotaStatus().isInViolation()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java index 4c359f0d1bc8..1a18cb1798c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestQuotaState { @ClassRule @@ -133,7 +133,7 @@ public void testQuotaStateUpdateGlobalThrottle() { assertEquals(LAST_UPDATE_2, quotaInfo.getLastUpdate()); assertFalse(quotaInfo.isBypass()); assertThrottleException(quotaInfo.getGlobalLimiter(), - NUM_GLOBAL_THROTTLE_2 - NUM_GLOBAL_THROTTLE_1); + NUM_GLOBAL_THROTTLE_2 - NUM_GLOBAL_THROTTLE_1); // Remove global throttle otherQuotaState = new QuotaState(LAST_UPDATE_3); @@ -188,7 +188,7 @@ public void testQuotaStateUpdateTableThrottle() { assertEquals(LAST_UPDATE_2, quotaInfo.getLastUpdate()); assertFalse(quotaInfo.isBypass()); assertThrottleException(quotaInfo.getTableLimiter(tableNameA), - TABLE_A_THROTTLE_2 - TABLE_A_THROTTLE_1); + TABLE_A_THROTTLE_2 - TABLE_A_THROTTLE_1); assertThrottleException(quotaInfo.getTableLimiter(tableNameC), TABLE_C_THROTTLE); assertNoopLimiter(quotaInfo.getTableLimiter(tableNameB)); @@ -232,11 +232,9 @@ public void testTableThrottleWithBatch() { } private Quotas buildReqNumThrottle(final long limit) { - return Quotas.newBuilder() - .setThrottle(Throttle.newBuilder() - .setReqNum(ProtobufUtil.toTimedQuota(limit, TimeUnit.MINUTES, QuotaScope.MACHINE)) - .build()) - .build(); + return Quotas.newBuilder().setThrottle(Throttle.newBuilder() + .setReqNum(ProtobufUtil.toTimedQuota(limit, TimeUnit.MINUTES, QuotaScope.MACHINE)).build()) + .build(); } private void assertThrottleException(final QuotaLimiter limiter, final int availReqs) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java index 167e7dc97a64..d8ffee4d0f05 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ /** * Test class for the quota status RPCs in the master and regionserver. */ -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestQuotaStatusRPCs { @ClassRule @@ -101,10 +101,10 @@ public void testRegionSizesFromMaster() throws Exception { Waiter.waitFor(TEST_UTIL.getConfiguration(), 30 * 1000, new Predicate() { @Override public boolean evaluate() throws Exception { - Map regionSizes = quotaManager.snapshotRegionSizes(); + Map regionSizes = quotaManager.snapshotRegionSizes(); LOG.trace("Region sizes=" + regionSizes); - return numRegions == countRegionsForTable(tn, regionSizes) && - tableSize <= getTableSize(tn, regionSizes); + return numRegions == countRegionsForTable(tn, regionSizes) + && tableSize <= getTableSize(tn, regionSizes); } }); @@ -122,8 +122,8 @@ public void testQuotaSnapshotsFromRS() throws Exception { final TableName tn = helper.createTableWithRegions(numRegions); // Define the quota - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, sizeLimit, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = + QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, SpaceViolationPolicy.NO_INSERTS); TEST_UTIL.getAdmin().setQuota(settings); // Write at least `tableSize` data @@ -144,12 +144,10 @@ public boolean evaluate() throws Exception { @SuppressWarnings("unchecked") Map snapshots = (Map) TEST_UTIL - .getAdmin().getRegionServerSpaceQuotaSnapshots(rs.getServerName()); + .getAdmin().getRegionServerSpaceQuotaSnapshots(rs.getServerName()); SpaceQuotaSnapshot snapshot = snapshots.get(tn); assertNotNull("Did not find snapshot for " + tn, snapshot); - assertTrue( - "Observed table usage was " + snapshot.getUsage(), - snapshot.getUsage() >= tableSize); + assertTrue("Observed table usage was " + snapshot.getUsage(), snapshot.getUsage() >= tableSize); assertEquals(sizeLimit, snapshot.getLimit()); SpaceQuotaStatus pbStatus = snapshot.getQuotaStatus(); assertFalse(pbStatus.isInViolation()); @@ -163,8 +161,8 @@ public void testQuotaEnforcementsFromRS() throws Exception { final TableName tn = helper.createTableWithRegions(numRegions); // Define the quota - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, sizeLimit, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = + QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, SpaceViolationPolicy.NO_INSERTS); TEST_UTIL.getAdmin().setQuota(settings); // Write at least `tableSize` data @@ -192,7 +190,7 @@ public boolean evaluate() throws Exception { // We obtain the violations for a RegionServer by observing the snapshots @SuppressWarnings("unchecked") Map snapshots = (Map) TEST_UTIL - .getAdmin().getRegionServerSpaceQuotaSnapshots(rs.getServerName()); + .getAdmin().getRegionServerSpaceQuotaSnapshots(rs.getServerName()); SpaceQuotaSnapshot snapshot = snapshots.get(tn); assertNotNull("Did not find snapshot for " + tn, snapshot); assertTrue(snapshot.getQuotaStatus().isInViolation()); @@ -211,11 +209,11 @@ public void testQuotaStatusFromMaster() throws Exception { final TableName tn = helper.createTableWithRegions(numRegions); // Define the quota - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, sizeLimit, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = + QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, SpaceViolationPolicy.NO_INSERTS); TEST_UTIL.getAdmin().setQuota(settings); - QuotaSettings nsSettings = QuotaSettingsFactory.limitNamespaceSpace( - tn.getNamespaceAsString(), nsLimit, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings nsSettings = QuotaSettingsFactory.limitNamespaceSpace(tn.getNamespaceAsString(), + nsLimit, SpaceViolationPolicy.NO_INSERTS); TEST_UTIL.getAdmin().setQuota(nsSettings); // Write at least `tableSize` data @@ -227,7 +225,7 @@ public void testQuotaStatusFromMaster() throws Exception { @Override public boolean evaluate() throws Exception { SpaceQuotaSnapshot snapshot = - (SpaceQuotaSnapshot) conn.getAdmin().getCurrentSpaceQuotaSnapshot(tn); + (SpaceQuotaSnapshot) conn.getAdmin().getCurrentSpaceQuotaSnapshot(tn); LOG.info("Table snapshot after initial ingest: " + snapshot); if (snapshot == null) { return false; @@ -241,7 +239,7 @@ public boolean evaluate() throws Exception { @Override public boolean evaluate() throws Exception { SpaceQuotaSnapshot snapshot = (SpaceQuotaSnapshot) conn.getAdmin() - .getCurrentSpaceQuotaSnapshot(tn.getNamespaceAsString()); + .getCurrentSpaceQuotaSnapshot(tn.getNamespaceAsString()); LOG.debug("Namespace snapshot after initial ingest: " + snapshot); if (snapshot == null) { return false; @@ -254,9 +252,9 @@ public boolean evaluate() throws Exception { // Sanity check: the below assertions will fail if we somehow write too much data // and force the table to move into violation before we write the second bit of data. SpaceQuotaSnapshot snapshot = - (SpaceQuotaSnapshot) conn.getAdmin().getCurrentSpaceQuotaSnapshot(tn); + (SpaceQuotaSnapshot) conn.getAdmin().getCurrentSpaceQuotaSnapshot(tn); assertTrue("QuotaSnapshot for " + tn + " should be non-null and not in violation", - snapshot != null && !snapshot.getQuotaStatus().isInViolation()); + snapshot != null && !snapshot.getQuotaStatus().isInViolation()); try { helper.writeData(tn, tableSize * 2L); @@ -269,7 +267,7 @@ public boolean evaluate() throws Exception { @Override public boolean evaluate() throws Exception { SpaceQuotaSnapshot snapshot = - (SpaceQuotaSnapshot) conn.getAdmin().getCurrentSpaceQuotaSnapshot(tn); + (SpaceQuotaSnapshot) conn.getAdmin().getCurrentSpaceQuotaSnapshot(tn); LOG.info("Table snapshot after second ingest: " + snapshot); if (snapshot == null) { return false; @@ -282,7 +280,7 @@ public boolean evaluate() throws Exception { @Override public boolean evaluate() throws Exception { SpaceQuotaSnapshot snapshot = (SpaceQuotaSnapshot) conn.getAdmin() - .getCurrentSpaceQuotaSnapshot(tn.getNamespaceAsString()); + .getCurrentSpaceQuotaSnapshot(tn.getNamespaceAsString()); LOG.debug("Namespace snapshot after second ingest: " + snapshot); if (snapshot == null) { return false; @@ -292,7 +290,7 @@ public boolean evaluate() throws Exception { }); } - private int countRegionsForTable(TableName tn, Map regionSizes) { + private int countRegionsForTable(TableName tn, Map regionSizes) { int size = 0; for (RegionInfo regionInfo : regionSizes.keySet()) { if (tn.equals(regionInfo.getTable())) { @@ -302,9 +300,9 @@ private int countRegionsForTable(TableName tn, Map regionSizes) return size; } - private int getTableSize(TableName tn, Map regionSizes) { + private int getTableSize(TableName tn, Map regionSizes) { int tableSize = 0; - for (Entry entry : regionSizes.entrySet()) { + for (Entry entry : regionSizes.entrySet()) { RegionInfo regionInfo = entry.getKey(); long regionSize = entry.getValue(); if (tn.equals(regionInfo.getTable())) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java index faaac4df3e51..cf443cb38882 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -69,7 +68,7 @@ /** * Test the quota table helpers (e.g. CRUD operations) */ -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestQuotaTableUtil { @ClassRule @@ -119,8 +118,8 @@ public void after() throws IOException { public void testDeleteSnapshots() throws Exception { TableName tn = TableName.valueOf(name.getMethodName()); try (Table t = connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { - Quotas quota = Quotas.newBuilder().setSpace( - QuotaProtos.SpaceQuota.newBuilder().setSoftLimit(7L) + Quotas quota = + Quotas.newBuilder().setSpace(QuotaProtos.SpaceQuota.newBuilder().setSoftLimit(7L) .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.NO_WRITES).build()).build(); QuotaUtil.addTableQuota(connection, tn, quota); @@ -156,8 +155,8 @@ public void testDeleteSnapshots() throws Exception { t.put(QuotaTableUtil.createPutForNamespaceSnapshotSize("ns2", 3L)); t.put(QuotaTableUtil.createPutForNamespaceSnapshotSize("ns3", 3L)); - assertEquals(5,QuotaTableUtil.getTableSnapshots(connection).size()); - assertEquals(3,QuotaTableUtil.getNamespaceSnapshots(connection).size()); + assertEquals(5, QuotaTableUtil.getTableSnapshots(connection).size()); + assertEquals(3, QuotaTableUtil.getNamespaceSnapshots(connection).size()); Multimap tableSnapshotEntriesToRemove = HashMultimap.create(); tableSnapshotEntriesToRemove.put(TableName.valueOf("t1"), "s1"); @@ -181,13 +180,11 @@ public void testDeleteSnapshots() throws Exception { public void testTableQuotaUtil() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); - Quotas quota = Quotas.newBuilder() - .setThrottle(Throttle.newBuilder() - .setReqNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .setWriteNum(ProtobufUtil.toTimedQuota(600, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .setReadSize(ProtobufUtil.toTimedQuota(8192, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .build()) - .build(); + Quotas quota = Quotas.newBuilder().setThrottle(Throttle.newBuilder() + .setReqNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE)) + .setWriteNum(ProtobufUtil.toTimedQuota(600, TimeUnit.SECONDS, QuotaScope.MACHINE)) + .setReadSize(ProtobufUtil.toTimedQuota(8192, TimeUnit.SECONDS, QuotaScope.MACHINE)).build()) + .build(); // Add user quota and verify it QuotaUtil.addTableQuota(this.connection, tableName, quota); @@ -204,13 +201,11 @@ public void testTableQuotaUtil() throws Exception { public void testNamespaceQuotaUtil() throws Exception { final String namespace = "testNamespaceQuotaUtilNS"; - Quotas quota = Quotas.newBuilder() - .setThrottle(Throttle.newBuilder() - .setReqNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .setWriteNum(ProtobufUtil.toTimedQuota(600, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .setReadSize(ProtobufUtil.toTimedQuota(8192, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .build()) - .build(); + Quotas quota = Quotas.newBuilder().setThrottle(Throttle.newBuilder() + .setReqNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE)) + .setWriteNum(ProtobufUtil.toTimedQuota(600, TimeUnit.SECONDS, QuotaScope.MACHINE)) + .setReadSize(ProtobufUtil.toTimedQuota(8192, TimeUnit.SECONDS, QuotaScope.MACHINE)).build()) + .build(); // Add user quota and verify it QuotaUtil.addNamespaceQuota(this.connection, namespace, quota); @@ -229,25 +224,21 @@ public void testUserQuotaUtil() throws Exception { final String namespace = "testNS"; final String user = "testUser"; - Quotas quotaNamespace = Quotas.newBuilder() - .setThrottle(Throttle.newBuilder() - .setReqNum(ProtobufUtil.toTimedQuota(50000, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .build()) - .build(); + Quotas quotaNamespace = Quotas.newBuilder().setThrottle(Throttle.newBuilder() + .setReqNum(ProtobufUtil.toTimedQuota(50000, TimeUnit.SECONDS, QuotaScope.MACHINE)).build()) + .build(); Quotas quotaTable = Quotas.newBuilder() - .setThrottle(Throttle.newBuilder() - .setReqNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .setWriteNum(ProtobufUtil.toTimedQuota(600, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .setReadSize(ProtobufUtil.toTimedQuota(10000, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .build()) - .build(); - Quotas quota = Quotas.newBuilder() - .setThrottle(Throttle.newBuilder() - .setReqSize(ProtobufUtil.toTimedQuota(8192, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .setWriteSize(ProtobufUtil.toTimedQuota(4096, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .setReadNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE)) - .build()) - .build(); + .setThrottle(Throttle.newBuilder() + .setReqNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE)) + .setWriteNum(ProtobufUtil.toTimedQuota(600, TimeUnit.SECONDS, QuotaScope.MACHINE)) + .setReadSize(ProtobufUtil.toTimedQuota(10000, TimeUnit.SECONDS, QuotaScope.MACHINE)) + .build()) + .build(); + Quotas quota = Quotas.newBuilder().setThrottle(Throttle.newBuilder() + .setReqSize(ProtobufUtil.toTimedQuota(8192, TimeUnit.SECONDS, QuotaScope.MACHINE)) + .setWriteSize(ProtobufUtil.toTimedQuota(4096, TimeUnit.SECONDS, QuotaScope.MACHINE)) + .setReadNum(ProtobufUtil.toTimedQuota(1000, TimeUnit.SECONDS, QuotaScope.MACHINE)).build()) + .build(); // Add user global quota QuotaUtil.addUserQuota(this.connection, user, quota); @@ -283,24 +274,24 @@ public void testUserQuotaUtil() throws Exception { @Test public void testSerDeViolationPolicies() throws Exception { final TableName tn1 = getUniqueTableName(); - final SpaceQuotaSnapshot snapshot1 = new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 512L, 1024L); + final SpaceQuotaSnapshot snapshot1 = + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 512L, 1024L); final TableName tn2 = getUniqueTableName(); - final SpaceQuotaSnapshot snapshot2 = new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 512L, 1024L); + final SpaceQuotaSnapshot snapshot2 = + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 512L, 1024L); final TableName tn3 = getUniqueTableName(); - final SpaceQuotaSnapshot snapshot3 = new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 512L, 1024L); + final SpaceQuotaSnapshot snapshot3 = + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 512L, 1024L); List puts = new ArrayList<>(); puts.add(QuotaTableUtil.createPutForSpaceSnapshot(tn1, snapshot1)); puts.add(QuotaTableUtil.createPutForSpaceSnapshot(tn2, snapshot2)); puts.add(QuotaTableUtil.createPutForSpaceSnapshot(tn3, snapshot3)); - final Map expectedPolicies = new HashMap<>(); + final Map expectedPolicies = new HashMap<>(); expectedPolicies.put(tn1, snapshot1); expectedPolicies.put(tn2, snapshot2); expectedPolicies.put(tn3, snapshot3); - final Map actualPolicies = new HashMap<>(); + final Map actualPolicies = new HashMap<>(); try (Table quotaTable = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { quotaTable.put(puts); ResultScanner scanner = quotaTable.getScanner(QuotaTableUtil.makeQuotaSnapshotScan()); @@ -319,11 +310,11 @@ public void testSerdeTableSnapshotSizes() throws Exception { TableName tn2 = TableName.valueOf("tn2"); try (Table quotaTable = connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { for (int i = 0; i < 3; i++) { - Put p = QuotaTableUtil.createPutForSnapshotSize(tn1, "tn1snap" + i, 1024L * (1+i)); + Put p = QuotaTableUtil.createPutForSnapshotSize(tn1, "tn1snap" + i, 1024L * (1 + i)); quotaTable.put(p); } for (int i = 0; i < 3; i++) { - Put p = QuotaTableUtil.createPutForSnapshotSize(tn2, "tn2snap" + i, 2048L * (1+i)); + Put p = QuotaTableUtil.createPutForSnapshotSize(tn2, "tn2snap" + i, 2048L * (1 + i)); quotaTable.put(p); } @@ -361,15 +352,16 @@ private TableName getUniqueTableName() { return TableName.valueOf(testName.getMethodName() + "_" + tableNameCounter++); } - private void verifyTableSnapshotSize( - Table quotaTable, TableName tn, String snapshotName, long expectedSize) throws IOException { + private void verifyTableSnapshotSize(Table quotaTable, TableName tn, String snapshotName, + long expectedSize) throws IOException { Result r = quotaTable.get(QuotaTableUtil.makeGetForSnapshotSize(tn, snapshotName)); CellScanner cs = r.cellScanner(); assertTrue(cs.advance()); Cell c = cs.current(); - assertEquals(expectedSize, QuotaProtos.SpaceQuotaSnapshot.parseFrom( - UnsafeByteOperations.unsafeWrap( - c.getValueArray(), c.getValueOffset(), c.getValueLength())).getQuotaUsage()); + assertEquals(expectedSize, + QuotaProtos.SpaceQuotaSnapshot.parseFrom( + UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength())) + .getQuotaUsage()); assertFalse(cs.advance()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java index 3d063d607101..34d8335c15a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import static org.junit.Assert.assertEquals; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -52,7 +51,7 @@ import org.slf4j.LoggerFactory; @Ignore // Disabled because flakey. Fails ~30% on a resource constrained GCE though not on Apache. -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestQuotaThrottle { @ClassRule @@ -67,11 +66,9 @@ public class TestQuotaThrottle { private final static byte[] FAMILY = Bytes.toBytes("cf"); private final static byte[] QUALIFIER = Bytes.toBytes("q"); - private final static TableName[] TABLE_NAMES = new TableName[] { - TableName.valueOf("TestQuotaAdmin0"), - TableName.valueOf("TestQuotaAdmin1"), - TableName.valueOf("TestQuotaAdmin2") - }; + private final static TableName[] TABLE_NAMES = + new TableName[] { TableName.valueOf("TestQuotaAdmin0"), TableName.valueOf("TestQuotaAdmin1"), + TableName.valueOf("TestQuotaAdmin2") }; private static Table[] tables; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java index 41fcf923e6d6..38f352c845ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ /** * Verify the behaviour of the Rate Limiter. */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestRateLimiter { @ClassRule @@ -95,7 +95,7 @@ private void testWaitInterval(final TimeUnit timeUnit, final long limit, long temp = nowTs + 500; limiter.setNextRefillTime(limiter.getNextRefillTime() + temp); assertFalse(limiter.canExecute()); - //Roll back the nextRefillTime set to continue further testing + // Roll back the nextRefillTime set to continue further testing limiter.setNextRefillTime(limiter.getNextRefillTime() - temp); } } @@ -193,7 +193,8 @@ public void testLimiterBySmallerRate() throws InterruptedException { @Test public void testCanExecuteOfAverageIntervalRateLimiter() throws InterruptedException { RateLimiter limiter = new AverageIntervalRateLimiter(); - // when set limit is 100 per sec, this AverageIntervalRateLimiter will support at max 200 per sec + // when set limit is 100 per sec, this AverageIntervalRateLimiter will support at max 200 per + // sec limiter.set(100, TimeUnit.SECONDS); limiter.setNextRefillTime(EnvironmentEdgeManager.currentTime()); assertEquals(50, testCanExecuteByRate(limiter, 50)); @@ -397,12 +398,9 @@ public void testExtremeLimiters() throws InterruptedException { } /* - * This test case is tricky. Basically, it simulates the following events: - * Thread-1 Thread-2 - * t0: canExecute(100) and consume(100) - * t1: canExecute(100), avail may be increased by 80 - * t2: consume(-80) as actual size is 20 - * It will check if consume(-80) can handle overflow correctly. + * This test case is tricky. Basically, it simulates the following events: Thread-1 Thread-2 t0: + * canExecute(100) and consume(100) t1: canExecute(100), avail may be increased by 80 t2: + * consume(-80) as actual size is 20 It will check if consume(-80) can handle overflow correctly. */ @Test public void testLimiterCompensationOverflow() throws InterruptedException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionServerSpaceQuotaManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionServerSpaceQuotaManager.java index d47efeba8dd5..49520db8b669 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionServerSpaceQuotaManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionServerSpaceQuotaManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,15 +69,15 @@ public void testSpacePoliciesFromEnforcements() { when(quotaManager.getActivePoliciesAsMap()).thenCallRealMethod(); NoInsertsViolationPolicyEnforcement noInsertsPolicy = new NoInsertsViolationPolicyEnforcement(); - SpaceQuotaSnapshot noInsertsSnapshot = new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 256L, 1024L); + SpaceQuotaSnapshot noInsertsSnapshot = + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 256L, 1024L); noInsertsPolicy.initialize(rss, TableName.valueOf("no_inserts"), noInsertsSnapshot); enforcements.put(noInsertsPolicy.getTableName(), noInsertsPolicy); expectedPolicies.put(noInsertsPolicy.getTableName(), noInsertsSnapshot); NoWritesViolationPolicyEnforcement noWritesPolicy = new NoWritesViolationPolicyEnforcement(); - SpaceQuotaSnapshot noWritesSnapshot = new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 512L, 2048L); + SpaceQuotaSnapshot noWritesSnapshot = + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 512L, 2048L); noWritesPolicy.initialize(rss, TableName.valueOf("no_writes"), noWritesSnapshot); enforcements.put(noWritesPolicy.getTableName(), noWritesPolicy); expectedPolicies.put(noWritesPolicy.getTableName(), noWritesSnapshot); @@ -86,22 +86,20 @@ public void testSpacePoliciesFromEnforcements() { new NoWritesCompactionsViolationPolicyEnforcement(); SpaceQuotaSnapshot noWritesCompactionsSnapshot = new SpaceQuotaSnapshot( new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES_COMPACTIONS), 1024L, 4096L); - noWritesCompactionsPolicy.initialize( - rss, TableName.valueOf("no_writes_compactions"), noWritesCompactionsSnapshot); + noWritesCompactionsPolicy.initialize(rss, TableName.valueOf("no_writes_compactions"), + noWritesCompactionsSnapshot); enforcements.put(noWritesCompactionsPolicy.getTableName(), noWritesCompactionsPolicy); - expectedPolicies.put(noWritesCompactionsPolicy.getTableName(), - noWritesCompactionsSnapshot); + expectedPolicies.put(noWritesCompactionsPolicy.getTableName(), noWritesCompactionsSnapshot); DisableTableViolationPolicyEnforcement disablePolicy = new DisableTableViolationPolicyEnforcement(); - SpaceQuotaSnapshot disableSnapshot = new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 2048L, 8192L); + SpaceQuotaSnapshot disableSnapshot = + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 2048L, 8192L); disablePolicy.initialize(rss, TableName.valueOf("disable"), disableSnapshot); enforcements.put(disablePolicy.getTableName(), disablePolicy); expectedPolicies.put(disablePolicy.getTableName(), disableSnapshot); - enforcements.put( - TableName.valueOf("no_policy"), new DefaultViolationPolicyEnforcement()); + enforcements.put(TableName.valueOf("no_policy"), new DefaultViolationPolicyEnforcement()); Map actualPolicies = quotaManager.getActivePoliciesAsMap(); assertEquals(expectedPolicies, actualPolicies); @@ -110,11 +108,11 @@ public void testSpacePoliciesFromEnforcements() { @Test public void testExceptionOnPolicyEnforcementEnable() throws Exception { final TableName tableName = TableName.valueOf("foo"); - final SpaceQuotaSnapshot snapshot = new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L, 2048L); + final SpaceQuotaSnapshot snapshot = + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L, 2048L); RegionServerServices rss = mock(RegionServerServices.class); - SpaceViolationPolicyEnforcementFactory factory = mock( - SpaceViolationPolicyEnforcementFactory.class); + SpaceViolationPolicyEnforcementFactory factory = + mock(SpaceViolationPolicyEnforcementFactory.class); SpaceViolationPolicyEnforcement enforcement = mock(SpaceViolationPolicyEnforcement.class); RegionServerSpaceQuotaManager realManager = new RegionServerSpaceQuotaManager(rss, factory); @@ -125,17 +123,17 @@ public void testExceptionOnPolicyEnforcementEnable() throws Exception { Map enforcements = realManager.copyActiveEnforcements(); assertTrue("Expected active enforcements to be empty, but were " + enforcements, - enforcements.isEmpty()); + enforcements.isEmpty()); } @Test public void testExceptionOnPolicyEnforcementDisable() throws Exception { final TableName tableName = TableName.valueOf("foo"); - final SpaceQuotaSnapshot snapshot = new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L, 2048L); + final SpaceQuotaSnapshot snapshot = + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L, 2048L); RegionServerServices rss = mock(RegionServerServices.class); - SpaceViolationPolicyEnforcementFactory factory = mock( - SpaceViolationPolicyEnforcementFactory.class); + SpaceViolationPolicyEnforcementFactory factory = + mock(SpaceViolationPolicyEnforcementFactory.class); SpaceViolationPolicyEnforcement enforcement = mock(SpaceViolationPolicyEnforcement.class); RegionServerSpaceQuotaManager realManager = new RegionServerSpaceQuotaManager(rss, factory); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeImpl.java index 921776245827..1eca9aabc832 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -24,7 +25,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRegionSizeImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeReportingChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeReportingChore.java index 6541cdc64b0c..c754c759a56e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeReportingChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeReportingChore.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,7 +27,6 @@ import java.util.Collections; import java.util.HashSet; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -40,7 +40,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRegionSizeReportingChore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -51,14 +51,13 @@ public void testDefaultConfigurationProperties() { final Configuration conf = getDefaultHBaseConfiguration(); final HRegionServer rs = mockRegionServer(conf); RegionSizeReportingChore chore = new RegionSizeReportingChore(rs); + assertEquals(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_DELAY_DEFAULT, + chore.getInitialDelay()); + assertEquals(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_PERIOD_DEFAULT, + chore.getPeriod()); assertEquals( - RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_DELAY_DEFAULT, - chore.getInitialDelay()); - assertEquals( - RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_PERIOD_DEFAULT, chore.getPeriod()); - assertEquals( - TimeUnit.valueOf(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_TIMEUNIT_DEFAULT), - chore.getTimeUnit()); + TimeUnit.valueOf(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_TIMEUNIT_DEFAULT), + chore.getTimeUnit()); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeStoreImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeStoreImpl.java index 688fde07850f..49e0185cbb7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeStoreImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeStoreImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +26,6 @@ import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -36,7 +36,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRegionSizeStoreImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -75,8 +75,8 @@ public void testSizeUpdates() { store.put(INFOB, 128L); assertEquals(2, store.size()); - Map records = new HashMap<>(); - for (Entry entry : store) { + Map records = new HashMap<>(); + for (Entry entry : store) { records.put(entry.getKey(), entry.getValue()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java index 5b475ae43431..9e6ddc3e882a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -99,7 +98,7 @@ public void testBasicRegionSizeReports() throws Exception { HMaster master = cluster.getMaster(); MasterQuotaManager quotaManager = master.getMasterQuotaManager(); - Map regionSizes = quotaManager.snapshotRegionSizes(); + Map regionSizes = quotaManager.snapshotRegionSizes(); // Wait until we get all of the region reports for our table // The table may split, so make sure we have at least as many as expected right after we // finished writing the data. @@ -118,12 +117,12 @@ public void testBasicRegionSizeReports() throws Exception { totalRegionSize += regionSize; } assertTrue("Expected region size report to exceed " + bytesWritten + ", but was " - + totalRegionSize + ". RegionSizes=" + regionSizes, bytesWritten < totalRegionSize); + + totalRegionSize + ". RegionSizes=" + regionSizes, + bytesWritten < totalRegionSize); } /** * Writes at least {@code sizeInBytes} bytes of data to HBase and returns the TableName used. - * * @param sizeInBytes The amount of data to write in bytes. * @return The table the data was written to */ @@ -139,14 +138,13 @@ private TableName writeData(long sizeInBytes) throws IOException { } // Create the table - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tn); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tn); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(F1)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(F1)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - admin.createTable(tableDescriptorBuilder.build(), Bytes.toBytes("1"), - Bytes.toBytes("9"), NUM_SPLITS); + admin.createTable(tableDescriptorBuilder.build(), Bytes.toBytes("1"), Bytes.toBytes("9"), + NUM_SPLITS); final Table table = conn.getTable(tn); try { @@ -188,14 +186,13 @@ private TableName writeData(long sizeInBytes) throws IOException { /** * Computes the number of regions for the given table that have a positive size. - * * @param tn The TableName in question * @param regions A collection of region sizes * @return The number of regions for the given table. */ - private int numRegionsForTable(TableName tn, Map regions) { + private int numRegionsForTable(TableName tn, Map regions) { int sum = 0; - for (Entry entry : regions.entrySet()) { + for (Entry entry : regions.entrySet()) { if (tn.equals(entry.getKey().getTable()) && 0 < entry.getValue()) { sum++; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java index b6b650728549..c8c706b54bfd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.Cell; @@ -114,9 +113,8 @@ public void setup() throws Exception { helper = new SpaceQuotaHelperForTests(TEST_UTIL, testName, COUNTER); master = TEST_UTIL.getHBaseCluster().getMaster(); helper.removeAllQuotas(conn); - testChore = new SnapshotQuotaObserverChore( - TEST_UTIL.getConnection(), TEST_UTIL.getConfiguration(), master.getFileSystem(), master, - null); + testChore = new SnapshotQuotaObserverChore(TEST_UTIL.getConnection(), + TEST_UTIL.getConfiguration(), master.getFileSystem(), master, null); } @Test @@ -126,17 +124,17 @@ public void testSnapshotsFromTables() throws Exception { TableName tn3 = helper.createTableWithRegions(1); // Set a space quota on table 1 and 2 (but not 3) - admin.setQuota(QuotaSettingsFactory.limitTableSpace( - tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS)); - admin.setQuota(QuotaSettingsFactory.limitTableSpace( - tn2, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS)); + admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, + SpaceViolationPolicy.NO_INSERTS)); + admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn2, SpaceQuotaHelperForTests.ONE_GIGABYTE, + SpaceViolationPolicy.NO_INSERTS)); // Create snapshots on each table (we didn't write any data, so just skipflush) admin.snapshot(new SnapshotDescription(tn1 + "snapshot", tn1, SnapshotType.SKIPFLUSH)); admin.snapshot(new SnapshotDescription(tn2 + "snapshot", tn2, SnapshotType.SKIPFLUSH)); admin.snapshot(new SnapshotDescription(tn3 + "snapshot", tn3, SnapshotType.SKIPFLUSH)); - Multimap mapping = testChore.getSnapshotsToComputeSize(); + Multimap mapping = testChore.getSnapshotsToComputeSize(); assertEquals(2, mapping.size()); assertEquals(1, mapping.get(tn1).size()); assertEquals(tn1 + "snapshot", mapping.get(tn1).iterator().next()); @@ -151,8 +149,8 @@ public void testSnapshotsFromTables() throws Exception { assertEquals(1, mapping.get(tn1).size()); assertEquals(tn1 + "snapshot", mapping.get(tn1).iterator().next()); assertEquals(2, mapping.get(tn2).size()); - assertEquals( - new HashSet(Arrays.asList(tn2 + "snapshot", tn2 + "snapshot1")), mapping.get(tn2)); + assertEquals(new HashSet(Arrays.asList(tn2 + "snapshot", tn2 + "snapshot1")), + mapping.get(tn2)); } @Test @@ -172,45 +170,44 @@ public void testSnapshotsFromNamespaces() throws Exception { QuotaSettingsFactory.throttleUser("user", ThrottleType.WRITE_NUMBER, 100, TimeUnit.MINUTES)); // Set a space quota on the namespace - admin.setQuota(QuotaSettingsFactory.limitNamespaceSpace( - ns.getName(), SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS)); + admin.setQuota(QuotaSettingsFactory.limitNamespaceSpace(ns.getName(), + SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS)); // Create snapshots on each table (we didn't write any data, so just skipflush) - admin.snapshot(new SnapshotDescription( - tn1.getQualifierAsString() + "snapshot", tn1, SnapshotType.SKIPFLUSH)); - admin.snapshot(new SnapshotDescription( - tn2.getQualifierAsString() + "snapshot", tn2, SnapshotType.SKIPFLUSH)); - admin.snapshot(new SnapshotDescription( - tn3.getQualifierAsString() + "snapshot", tn3, SnapshotType.SKIPFLUSH)); - - Multimap mapping = testChore.getSnapshotsToComputeSize(); + admin.snapshot(new SnapshotDescription(tn1.getQualifierAsString() + "snapshot", tn1, + SnapshotType.SKIPFLUSH)); + admin.snapshot(new SnapshotDescription(tn2.getQualifierAsString() + "snapshot", tn2, + SnapshotType.SKIPFLUSH)); + admin.snapshot(new SnapshotDescription(tn3.getQualifierAsString() + "snapshot", tn3, + SnapshotType.SKIPFLUSH)); + + Multimap mapping = testChore.getSnapshotsToComputeSize(); assertEquals(2, mapping.size()); assertEquals(1, mapping.get(tn1).size()); assertEquals(tn1.getQualifierAsString() + "snapshot", mapping.get(tn1).iterator().next()); assertEquals(1, mapping.get(tn2).size()); assertEquals(tn2.getQualifierAsString() + "snapshot", mapping.get(tn2).iterator().next()); - admin.snapshot(new SnapshotDescription( - tn2.getQualifierAsString() + "snapshot1", tn2, SnapshotType.SKIPFLUSH)); - admin.snapshot(new SnapshotDescription( - tn3.getQualifierAsString() + "snapshot2", tn3, SnapshotType.SKIPFLUSH)); + admin.snapshot(new SnapshotDescription(tn2.getQualifierAsString() + "snapshot1", tn2, + SnapshotType.SKIPFLUSH)); + admin.snapshot(new SnapshotDescription(tn3.getQualifierAsString() + "snapshot2", tn3, + SnapshotType.SKIPFLUSH)); mapping = testChore.getSnapshotsToComputeSize(); assertEquals(3, mapping.size()); assertEquals(1, mapping.get(tn1).size()); assertEquals(tn1.getQualifierAsString() + "snapshot", mapping.get(tn1).iterator().next()); assertEquals(2, mapping.get(tn2).size()); - assertEquals( - new HashSet(Arrays.asList(tn2.getQualifierAsString() + "snapshot", - tn2.getQualifierAsString() + "snapshot1")), mapping.get(tn2)); + assertEquals(new HashSet(Arrays.asList(tn2.getQualifierAsString() + "snapshot", + tn2.getQualifierAsString() + "snapshot1")), mapping.get(tn2)); } @Test public void testSnapshotSize() throws Exception { // Create a table and set a quota TableName tn1 = helper.createTableWithRegions(5); - admin.setQuota(QuotaSettingsFactory.limitTableSpace( - tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS)); + admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, + SpaceViolationPolicy.NO_INSERTS)); // Write some data and flush it helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); @@ -232,13 +229,12 @@ boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { admin.snapshot(new SnapshotDescription(snapshotName, tn1, SnapshotType.SKIPFLUSH)); // Get the snapshots - Multimap snapshotsToCompute = testChore.getSnapshotsToComputeSize(); - assertEquals( - "Expected to see the single snapshot: " + snapshotsToCompute, 1, snapshotsToCompute.size()); + Multimap snapshotsToCompute = testChore.getSnapshotsToComputeSize(); + assertEquals("Expected to see the single snapshot: " + snapshotsToCompute, 1, + snapshotsToCompute.size()); // Get the size of our snapshot - Map namespaceSnapshotSizes = testChore.computeSnapshotSizes( - snapshotsToCompute); + Map namespaceSnapshotSizes = testChore.computeSnapshotSizes(snapshotsToCompute); assertEquals(1, namespaceSnapshotSizes.size()); Long size = namespaceSnapshotSizes.get(tn1.getNamespaceAsString()); assertNotNull(size); @@ -260,7 +256,7 @@ boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { LOG.debug("Current usage=" + snapshot.getUsage() + " snapshotSize=" + snapshotSize); // The usage of table space consists of region size and snapshot size return closeInSize(snapshot.getUsage(), snapshotSize + regionSize, - SpaceQuotaHelperForTests.ONE_KILOBYTE); + SpaceQuotaHelperForTests.ONE_KILOBYTE); } }); @@ -269,10 +265,9 @@ boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { // Still should see only one snapshot snapshotsToCompute = testChore.getSnapshotsToComputeSize(); - assertEquals( - "Expected to see the single snapshot: " + snapshotsToCompute, 1, snapshotsToCompute.size()); - namespaceSnapshotSizes = testChore.computeSnapshotSizes( - snapshotsToCompute); + assertEquals("Expected to see the single snapshot: " + snapshotsToCompute, 1, + snapshotsToCompute.size()); + namespaceSnapshotSizes = testChore.computeSnapshotSizes(snapshotsToCompute); assertEquals(1, namespaceSnapshotSizes.size()); size = namespaceSnapshotSizes.get(tn1.getNamespaceAsString()); assertNotNull(size); @@ -289,14 +284,16 @@ public void testPersistingSnapshotsForNamespaces() throws Exception { TableName tn5 = TableName.valueOf("tn1"); // Shim in a custom factory to avoid computing snapshot sizes. FileArchiverNotifierFactory test = new FileArchiverNotifierFactory() { - Map tableToSize = ImmutableMap.of( - tn1, 1024L, tn2, 1024L, tn3, 512L, tn4, 1024L, tn5, 3072L); + Map tableToSize = + ImmutableMap.of(tn1, 1024L, tn2, 1024L, tn3, 512L, tn4, 1024L, tn5, 3072L); + @Override - public FileArchiverNotifier get( - Connection conn, Configuration conf, FileSystem fs, TableName tn) { + public FileArchiverNotifier get(Connection conn, Configuration conf, FileSystem fs, + TableName tn) { return new FileArchiverNotifier() { - @Override public void addArchivedFiles(Set> fileSizes) - throws IOException {} + @Override + public void addArchivedFiles(Set> fileSizes) throws IOException { + } @Override public long computeAndStoreSnapshotSizes(Collection currentSnapshots) @@ -309,13 +306,13 @@ public long computeAndStoreSnapshotSizes(Collection currentSnapshots) try { FileArchiverNotifierFactoryImpl.setInstance(test); - Multimap snapshotsToCompute = HashMultimap.create(); + Multimap snapshotsToCompute = HashMultimap.create(); snapshotsToCompute.put(tn1, ""); snapshotsToCompute.put(tn2, ""); snapshotsToCompute.put(tn3, ""); snapshotsToCompute.put(tn4, ""); snapshotsToCompute.put(tn5, ""); - Map nsSizes = testChore.computeSnapshotSizes(snapshotsToCompute); + Map nsSizes = testChore.computeSnapshotSizes(snapshotsToCompute); assertEquals(3, nsSizes.size()); assertEquals(2048L, (long) nsSizes.get("ns1")); assertEquals(1536L, (long) nsSizes.get("ns2")); @@ -330,7 +327,7 @@ public void testRemovedSnapshots() throws Exception { // Create a table and set a quota TableName tn1 = helper.createTableWithRegions(1); admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, - SpaceViolationPolicy.NO_INSERTS)); + SpaceViolationPolicy.NO_INSERTS)); // Write some data and flush it helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); // 256 KB @@ -415,8 +412,8 @@ boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { public void testBucketingFilesToSnapshots() throws Exception { // Create a table and set a quota TableName tn1 = helper.createTableWithRegions(1); - admin.setQuota(QuotaSettingsFactory.limitTableSpace( - tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS)); + admin.setQuota(QuotaSettingsFactory.limitTableSpace(tn1, SpaceQuotaHelperForTests.ONE_GIGABYTE, + SpaceViolationPolicy.NO_INSERTS)); // Write some data and flush it helper.writeData(tn1, 256L * SpaceQuotaHelperForTests.ONE_KILOBYTE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaBasicFunctioning.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaBasicFunctioning.java index 020e8a33793c..46c5efca3764 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaBasicFunctioning.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaBasicFunctioning.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -22,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -91,7 +92,7 @@ public void removeAllQuotas() throws Exception { public void testNoInsertsWithPut() throws Exception { Put p = new Put(Bytes.toBytes("to_reject")); p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_INSERTS, p); } @@ -99,7 +100,7 @@ public void testNoInsertsWithPut() throws Exception { public void testNoInsertsWithAppend() throws Exception { Append a = new Append(Bytes.toBytes("to_reject")); a.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_INSERTS, a); } @@ -127,7 +128,7 @@ public void testDeletesAfterNoInserts() throws Exception { public void testNoWritesWithPut() throws Exception { Put p = new Put(Bytes.toBytes("to_reject")); p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_WRITES, p); } @@ -135,7 +136,7 @@ public void testNoWritesWithPut() throws Exception { public void testNoWritesWithAppend() throws Exception { Append a = new Append(Bytes.toBytes("to_reject")); a.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_WRITES, a); } @@ -156,7 +157,7 @@ public void testNoWritesWithDelete() throws Exception { public void testNoCompactions() throws Exception { Put p = new Put(Bytes.toBytes("to_reject")); p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); final TableName tn = helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_WRITES_COMPACTIONS, p); // We know the policy is active at this point @@ -181,7 +182,7 @@ public void testNoCompactions() throws Exception { public void testNoEnableAfterDisablePolicy() throws Exception { Put p = new Put(Bytes.toBytes("to_reject")); p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); final TableName tn = helper.writeUntilViolation(SpaceViolationPolicy.DISABLE); final Admin admin = TEST_UTIL.getAdmin(); // Disabling a table relies on some external action (over the other policies), so wait a bit @@ -199,8 +200,8 @@ public void testNoEnableAfterDisablePolicy() throws Exception { String exceptionContents = StringUtils.stringifyException(e); final String expectedText = "violated space quota"; assertTrue( - "Expected the exception to contain " + expectedText + ", but was: " + exceptionContents, - exceptionContents.contains(expectedText)); + "Expected the exception to contain " + expectedText + ", but was: " + exceptionContents, + exceptionContents.contains(expectedText)); } } @@ -213,8 +214,8 @@ public void testTableQuotaOverridesNamespaceQuota() throws Exception { final long tableLimit = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; final long namespaceLimit = 1024L * SpaceQuotaHelperForTests.ONE_MEGABYTE; TEST_UTIL.getAdmin().setQuota(QuotaSettingsFactory.limitTableSpace(tn, tableLimit, policy)); - TEST_UTIL.getAdmin().setQuota(QuotaSettingsFactory - .limitNamespaceSpace(tn.getNamespaceAsString(), namespaceLimit, policy)); + TEST_UTIL.getAdmin().setQuota( + QuotaSettingsFactory.limitNamespaceSpace(tn.getNamespaceAsString(), namespaceLimit, policy)); // Write more data than should be allowed and flush it to disk helper.writeData(tn, 3L * SpaceQuotaHelperForTests.ONE_MEGABYTE); @@ -225,7 +226,7 @@ public void testTableQuotaOverridesNamespaceQuota() throws Exception { // The write should be rejected because the table quota takes priority over the namespace Put p = new Put(Bytes.toBytes("to_reject")); p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); helper.verifyViolation(policy, tn, p); } @@ -234,8 +235,7 @@ public void testDisablePolicyQuotaAndViolate() throws Exception { TableName tableName = helper.createTable(); helper.setQuotaLimit(tableName, SpaceViolationPolicy.DISABLE, 1L); helper.writeData(tableName, SpaceQuotaHelperForTests.ONE_MEGABYTE * 2L); - TEST_UTIL.getConfiguration() - .setLong("hbase.master.quotas.region.report.retention.millis", 100); + TEST_UTIL.getConfiguration().setLong("hbase.master.quotas.region.report.retention.millis", 100); HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); MasterQuotaManager quotaManager = master.getMasterQuotaManager(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaDropTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaDropTable.java index 2b262817d553..36cf3bd7804a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaDropTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaDropTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,7 +20,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -142,7 +143,7 @@ public boolean evaluate() throws Exception { private void setQuotaAndThenDropTable(SpaceViolationPolicy policy) throws Exception { Put put = new Put(Bytes.toBytes("to_reject")); put.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); // Do puts until we violate space policy final TableName tn = helper.writeUntilViolationAndVerifyViolation(policy, put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaIncrease.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaIncrease.java index 731e5ef2c151..aff4e55cd49d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaIncrease.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaIncrease.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,7 +18,6 @@ package org.apache.hadoop.hbase.quotas; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -87,7 +88,7 @@ public void testSetQuotaAndThenIncreaseQuotaWithDisable() throws Exception { private void setQuotaAndThenIncreaseQuota(SpaceViolationPolicy policy) throws Exception { Put put = new Put(Bytes.toBytes("to_reject")); put.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); // Do puts until we violate space policy final TableName tn = helper.writeUntilViolationAndVerifyViolation(policy, put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java index cb05f3dcc851..c8d25b356034 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnBulkLoad.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -92,7 +94,7 @@ public void removeAllQuotas() throws Exception { public void testNoBulkLoadsWithNoWrites() throws Exception { Put p = new Put(Bytes.toBytes("to_reject")); p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); TableName tableName = helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_WRITES, p); @@ -142,7 +144,7 @@ public void testAtomicBulkLoadUnderQuota() throws Exception { ActivePolicyEnforcement activePolicies = spaceQuotaManager.getActiveEnforcements(); SpaceViolationPolicyEnforcement enforcement = activePolicies.getPolicyEnforcement(tn); assertTrue("Expected to find Noop policy, but got " + enforcement.getClass().getSimpleName(), - enforcement instanceof DefaultViolationPolicyEnforcement); + enforcement instanceof DefaultViolationPolicyEnforcement); // Should generate two files, each of which is over 25KB each Map> family2Files = helper.generateFileToLoad(tn, 2, 525); @@ -150,9 +152,9 @@ public void testAtomicBulkLoadUnderQuota() throws Exception { FileStatus[] files = fs.listStatus(new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files")); for (FileStatus file : files) { - assertTrue( - "Expected the file, " + file.getPath() + ", length to be larger than 25KB, but was " - + file.getLen(), file.getLen() > 25 * SpaceQuotaHelperForTests.ONE_KILOBYTE); + assertTrue("Expected the file, " + file.getPath() + + ", length to be larger than 25KB, but was " + file.getLen(), + file.getLen() > 25 * SpaceQuotaHelperForTests.ONE_KILOBYTE); LOG.debug(file.getPath() + " -> " + file.getLen() + "B"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnNonExistingTables.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnNonExistingTables.java index 21417217b3a3..f48552bfadd0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnNonExistingTables.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaOnNonExistingTables.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,7 +18,6 @@ package org.apache.hadoop.hbase.quotas; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaRemoval.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaRemoval.java index d94b0d5517a9..10dfe839c578 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaRemoval.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaRemoval.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,7 +18,6 @@ package org.apache.hadoop.hbase.quotas; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -128,7 +129,7 @@ public void testSetQuotaAndThenDisableIncrEnableWithDisable() throws Exception { private void setQuotaAndThenRemove(SpaceViolationPolicy policy) throws Exception { Put put = new Put(Bytes.toBytes("to_reject")); put.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); // Do puts until we violate space policy final TableName tn = helper.writeUntilViolationAndVerifyViolation(policy, put); @@ -148,11 +149,11 @@ public void testDeleteTableUsageSnapshotsForNamespace() throws Exception { SpaceViolationPolicy policy = SpaceViolationPolicy.NO_INSERTS; - //Create a namespace + // Create a namespace String ns1 = "nsnew"; NamespaceDescriptor nsd = helper.createNamespace(ns1); - //Create 2nd namespace with name similar to ns1 + // Create 2nd namespace with name similar to ns1 String ns2 = ns1 + "test"; NamespaceDescriptor nsd2 = helper.createNamespace(ns2); @@ -179,11 +180,11 @@ public void testDeleteTableUsageSnapshotsForNamespace() throws Exception { public void testSetNamespaceSizeQuotaAndThenRemove() throws Exception { Put put = new Put(Bytes.toBytes("to_reject")); put.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); SpaceViolationPolicy policy = SpaceViolationPolicy.NO_INSERTS; - //Create namespace + // Create namespace NamespaceDescriptor nsd = helper.createNamespace(); String ns = nsd.getName(); @@ -201,7 +202,7 @@ private void setQuotaAndThenRemoveInOneAmongTwoTables(SpaceViolationPolicy polic throws Exception { Put put = new Put(Bytes.toBytes("to_reject")); put.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); // Do puts until we violate space policy on table tn1 final TableName tn1 = helper.writeUntilViolationAndVerifyViolation(policy, put); @@ -222,7 +223,7 @@ private void setQuotaNextDisableThenIncreaseFinallyEnable(SpaceViolationPolicy p throws Exception { Put put = new Put(Bytes.toBytes("to_reject")); put.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); // Do puts until we violate space policy final TableName tn = helper.writeUntilViolationAndVerifyViolation(policy, put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaSwitchPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaSwitchPolicies.java index f5df8ff24359..1b050de05a9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaSwitchPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaSwitchPolicies.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,7 +18,6 @@ package org.apache.hadoop.hbase.quotas; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -67,38 +68,38 @@ public void removeAllQuotas() throws Exception { @Test public void testSetQuotaFirstWithDisableNextNoWrites() throws Exception { setQuotaAndViolateNextSwitchPoliciesAndValidate(SpaceViolationPolicy.DISABLE, - SpaceViolationPolicy.NO_WRITES); + SpaceViolationPolicy.NO_WRITES); } @Test public void testSetQuotaFirstWithDisableNextAgainDisable() throws Exception { setQuotaAndViolateNextSwitchPoliciesAndValidate(SpaceViolationPolicy.DISABLE, - SpaceViolationPolicy.DISABLE); + SpaceViolationPolicy.DISABLE); } @Test public void testSetQuotaFirstWithDisableNextNoInserts() throws Exception { setQuotaAndViolateNextSwitchPoliciesAndValidate(SpaceViolationPolicy.DISABLE, - SpaceViolationPolicy.NO_INSERTS); + SpaceViolationPolicy.NO_INSERTS); } @Test public void testSetQuotaFirstWithDisableNextNoWritesCompaction() throws Exception { setQuotaAndViolateNextSwitchPoliciesAndValidate(SpaceViolationPolicy.DISABLE, - SpaceViolationPolicy.NO_WRITES_COMPACTIONS); + SpaceViolationPolicy.NO_WRITES_COMPACTIONS); } @Test public void testSetQuotaFirstWithNoWritesNextWithDisable() throws Exception { setQuotaAndViolateNextSwitchPoliciesAndValidate(SpaceViolationPolicy.NO_WRITES, - SpaceViolationPolicy.DISABLE); + SpaceViolationPolicy.DISABLE); } private void setQuotaAndViolateNextSwitchPoliciesAndValidate(SpaceViolationPolicy policy1, SpaceViolationPolicy policy2) throws Exception { Put put = new Put(Bytes.toBytes("to_reject")); put.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); // Do puts until we violate space violation policy1 final TableName tn = helper.writeUntilViolationAndVerifyViolation(policy1, put); @@ -114,4 +115,3 @@ private void setQuotaAndViolateNextSwitchPoliciesAndValidate(SpaceViolationPolic helper.verifyViolation(policy2, tn, put); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaViolationPolicyRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaViolationPolicyRefresherChore.java index aa871f12394f..7110cde9c59b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaViolationPolicyRefresherChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotaViolationPolicyRefresherChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,7 +78,6 @@ public void setup() throws IOException { when(manager.getRegionServerServices()).thenReturn(rss); when(rss.getConfiguration()).thenReturn(conf); - chore = mock(SpaceQuotaRefresherChore.class); when(chore.getConnection()).thenReturn(conn); when(chore.getManager()).thenReturn(manager); @@ -91,20 +90,15 @@ public void setup() throws IOException { @Test public void testPoliciesAreEnforced() throws IOException { // Create a number of policies that should be enforced (usage > limit) - final Map policiesToEnforce = new HashMap<>(); - policiesToEnforce.put( - TableName.valueOf("table1"), - new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L, 512L)); - policiesToEnforce.put( - TableName.valueOf("table2"), - new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 2048L, 512L)); - policiesToEnforce.put( - TableName.valueOf("table3"), - new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 4096L, 512L)); - policiesToEnforce.put( - TableName.valueOf("table4"), - new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES_COMPACTIONS), 8192L, 512L)); + final Map policiesToEnforce = new HashMap<>(); + policiesToEnforce.put(TableName.valueOf("table1"), + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L, 512L)); + policiesToEnforce.put(TableName.valueOf("table2"), + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 2048L, 512L)); + policiesToEnforce.put(TableName.valueOf("table3"), + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 4096L, 512L)); + policiesToEnforce.put(TableName.valueOf("table4"), new SpaceQuotaSnapshot( + new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES_COMPACTIONS), 8192L, 512L)); // No active enforcements when(manager.copyQuotaSnapshots()).thenReturn(Collections.emptyMap()); @@ -113,7 +107,7 @@ public void testPoliciesAreEnforced() throws IOException { chore.chore(); - for (Entry entry : policiesToEnforce.entrySet()) { + for (Entry entry : policiesToEnforce.entrySet()) { // Ensure we enforce the policy verify(manager).enforceViolationPolicy(entry.getKey(), entry.getValue()); // Don't disable any policies @@ -123,27 +117,21 @@ public void testPoliciesAreEnforced() throws IOException { @Test public void testOldPoliciesAreRemoved() throws IOException { - final Map previousPolicies = new HashMap<>(); - previousPolicies.put( - TableName.valueOf("table3"), - new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 4096L, 512L)); - previousPolicies.put( - TableName.valueOf("table4"), - new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 8192L, 512L)); - - final Map policiesToEnforce = new HashMap<>(); - policiesToEnforce.put( - TableName.valueOf("table1"), - new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L, 512L)); - policiesToEnforce.put( - TableName.valueOf("table2"), - new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 2048L, 512L)); - policiesToEnforce.put( - TableName.valueOf("table3"), - new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 256L, 512L)); - policiesToEnforce.put( - TableName.valueOf("table4"), - new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 128L, 512L)); + final Map previousPolicies = new HashMap<>(); + previousPolicies.put(TableName.valueOf("table3"), + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 4096L, 512L)); + previousPolicies.put(TableName.valueOf("table4"), + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 8192L, 512L)); + + final Map policiesToEnforce = new HashMap<>(); + policiesToEnforce.put(TableName.valueOf("table1"), + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L, 512L)); + policiesToEnforce.put(TableName.valueOf("table2"), + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 2048L, 512L)); + policiesToEnforce.put(TableName.valueOf("table3"), + new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 256L, 512L)); + policiesToEnforce.put(TableName.valueOf("table4"), + new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 128L, 512L)); // No active enforcements when(manager.copyQuotaSnapshots()).thenReturn(previousPolicies); @@ -152,10 +140,10 @@ public void testOldPoliciesAreRemoved() throws IOException { chore.chore(); - verify(manager).enforceViolationPolicy( - TableName.valueOf("table1"), policiesToEnforce.get(TableName.valueOf("table1"))); - verify(manager).enforceViolationPolicy( - TableName.valueOf("table2"), policiesToEnforce.get(TableName.valueOf("table2"))); + verify(manager).enforceViolationPolicy(TableName.valueOf("table1"), + policiesToEnforce.get(TableName.valueOf("table1"))); + verify(manager).enforceViolationPolicy(TableName.valueOf("table2"), + policiesToEnforce.get(TableName.valueOf("table2"))); verify(manager).disableViolationPolicyEnforcement(TableName.valueOf("table3")); verify(manager).disableViolationPolicyEnforcement(TableName.valueOf("table4")); @@ -163,21 +151,17 @@ public void testOldPoliciesAreRemoved() throws IOException { @Test public void testNewPolicyOverridesOld() throws IOException { - final Map policiesToEnforce = new HashMap<>(); - policiesToEnforce.put( - TableName.valueOf("table1"), - new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L, 512L)); - policiesToEnforce.put( - TableName.valueOf("table2"), - new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 2048L, 512L)); - policiesToEnforce.put( - TableName.valueOf("table3"), - new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 4096L, 512L)); - - final Map previousPolicies = new HashMap<>(); - previousPolicies.put( - TableName.valueOf("table1"), - new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 8192L, 512L)); + final Map policiesToEnforce = new HashMap<>(); + policiesToEnforce.put(TableName.valueOf("table1"), + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L, 512L)); + policiesToEnforce.put(TableName.valueOf("table2"), + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 2048L, 512L)); + policiesToEnforce.put(TableName.valueOf("table3"), + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 4096L, 512L)); + + final Map previousPolicies = new HashMap<>(); + previousPolicies.put(TableName.valueOf("table1"), + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES), 8192L, 512L)); // No active enforcements when(manager.getActivePoliciesAsMap()).thenReturn(previousPolicies); @@ -186,7 +170,7 @@ public void testNewPolicyOverridesOld() throws IOException { chore.chore(); - for (Entry entry : policiesToEnforce.entrySet()) { + for (Entry entry : policiesToEnforce.entrySet()) { verify(manager).enforceViolationPolicy(entry.getKey(), entry.getValue()); } verify(manager, never()).disableViolationPolicyEnforcement(TableName.valueOf("table1")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithRegionReplicas.java index 9f08e1140167..ef3fb360e90c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithRegionReplicas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,7 +18,6 @@ package org.apache.hadoop.hbase.quotas; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -36,7 +37,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - @Category(LargeTests.class) public class TestSpaceQuotasWithRegionReplicas { @@ -101,12 +101,12 @@ public void testSetQuotaWithMultipleRegionZeroRegionReplicas() throws Exception private void setQuotaAndVerifyForRegionReplication(int region, int replicatedRegion, SpaceViolationPolicy policy) throws Exception { TableName tn = helper.createTableWithRegions(TEST_UTIL.getAdmin(), - NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR, region, replicatedRegion); + NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR, region, replicatedRegion); helper.setQuotaLimit(tn, policy, 5L); helper.writeData(tn, 5L * SpaceQuotaHelperForTests.ONE_MEGABYTE); Put p = new Put(Bytes.toBytes("to_reject")); p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"), - Bytes.toBytes("reject")); + Bytes.toBytes("reject")); helper.verifyViolation(policy, tn, p); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithSnapshots.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithSnapshots.java index 6c489da6320b..9232882eb7d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithSnapshots.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithSnapshots.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ /** * Test class to exercise the inclusion of snapshots in space quotas */ -@Category({LargeTests.class}) +@Category({ LargeTests.class }) public class TestSpaceQuotasWithSnapshots { @ClassRule @@ -111,8 +111,8 @@ public void testTablesInheritSnapshotSize() throws Exception { TableName tn = helper.createTableWithRegions(1); LOG.info("Writing data"); // Set a quota - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, + SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings); // Write some data final long initialSize = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; @@ -121,7 +121,8 @@ public void testTablesInheritSnapshotSize() throws Exception { LOG.info("Waiting until table size reflects written data"); // Wait until that data is seen by the master TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { return snapshot.getUsage() >= initialSize; } }); @@ -150,8 +151,8 @@ public void testTablesInheritSnapshotSize() throws Exception { final long lowerBound = initialSize - FUDGE_FOR_TABLE_SIZE; // Store the actual size after writing more data and then compacting it down to one file - LOG.info("Waiting for the region reports to reflect the correct size, between (" - + lowerBound + ", " + upperBound + ")"); + LOG.info("Waiting for the region reports to reflect the correct size, between (" + lowerBound + + ", " + upperBound + ")"); TEST_UTIL.waitFor(30 * 1000, 500, new Predicate() { @Override public boolean evaluate() throws Exception { @@ -179,23 +180,22 @@ public boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { // The final usage should be the sum of the initial size (referenced by the snapshot) and the // new size we just wrote above. long expectedFinalSize = actualInitialSize + finalSize; - LOG.info( - "Expecting table usage to be " + actualInitialSize + " + " + finalSize - + " = " + expectedFinalSize); + LOG.info("Expecting table usage to be " + actualInitialSize + " + " + finalSize + " = " + + expectedFinalSize); // The size of the table (WRT quotas) should now be approximately double what it was previously TEST_UTIL.waitFor(30 * 1000, 1000, new SpaceQuotaSnapshotPredicate(conn, tn) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { LOG.debug("Checking for " + expectedFinalSize + " == " + snapshot.getUsage()); return expectedFinalSize == snapshot.getUsage(); } }); - Map snapshotSizes = QuotaTableUtil.getObservedSnapshotSizes(conn); + Map snapshotSizes = QuotaTableUtil.getObservedSnapshotSizes(conn); Long size = snapshotSizes.get(snapshot1); assertNotNull("Did not observe the size of the snapshot", size); - assertEquals( - "The recorded size of the HBase snapshot was not the size we expected", actualInitialSize, - size.longValue()); + assertEquals("The recorded size of the HBase snapshot was not the size we expected", + actualInitialSize, size.longValue()); } @Test @@ -204,8 +204,8 @@ public void testNamespacesInheritSnapshotSize() throws Exception { TableName tn = helper.createTableWithRegions(ns, 1); LOG.info("Writing data"); // Set a quota - QuotaSettings settings = QuotaSettingsFactory.limitNamespaceSpace( - ns, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = QuotaSettingsFactory.limitNamespaceSpace(ns, + SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings); // Write some data and flush it to disk @@ -216,7 +216,8 @@ public void testNamespacesInheritSnapshotSize() throws Exception { LOG.info("Waiting until namespace size reflects written data"); // Wait until that data is seen by the master TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, ns) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { return snapshot.getUsage() >= initialSize; } }); @@ -244,8 +245,8 @@ public void testNamespacesInheritSnapshotSize() throws Exception { final long upperBound = initialSize + FUDGE_FOR_TABLE_SIZE; final long lowerBound = initialSize - FUDGE_FOR_TABLE_SIZE; - LOG.info("Waiting for the region reports to reflect the correct size, between (" - + lowerBound + ", " + upperBound + ")"); + LOG.info("Waiting for the region reports to reflect the correct size, between (" + lowerBound + + ", " + upperBound + ")"); TEST_UTIL.waitFor(30 * 1000, 500, new Predicate() { @Override public boolean evaluate() throws Exception { @@ -278,23 +279,22 @@ public boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { // The final usage should be the sum of the initial size (referenced by the snapshot) and the // new size we just wrote above. long expectedFinalSize = actualInitialSize + finalSize; - LOG.info( - "Expecting namespace usage to be " + actualInitialSize + " + " + finalSize - + " = " + expectedFinalSize); + LOG.info("Expecting namespace usage to be " + actualInitialSize + " + " + finalSize + " = " + + expectedFinalSize); // The size of the table (WRT quotas) should now be approximately double what it was previously TEST_UTIL.waitFor(30 * 1000, 1000, new SpaceQuotaSnapshotPredicate(conn, ns) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { LOG.debug("Checking for " + expectedFinalSize + " == " + snapshot.getUsage()); return expectedFinalSize == snapshot.getUsage(); } }); - Map snapshotSizes = QuotaTableUtil.getObservedSnapshotSizes(conn); + Map snapshotSizes = QuotaTableUtil.getObservedSnapshotSizes(conn); Long size = snapshotSizes.get(snapshot1); assertNotNull("Did not observe the size of the snapshot", size); - assertEquals( - "The recorded size of the HBase snapshot was not the size we expected", actualInitialSize, - size.longValue()); + assertEquals("The recorded size of the HBase snapshot was not the size we expected", + actualInitialSize, size.longValue()); } @Test @@ -336,12 +336,11 @@ public boolean evaluate() throws Exception { CellScanner cs = r.cellScanner(); assertTrue(cs.advance()); Cell c = cs.current(); - SpaceQuotaSnapshot snapshot = SpaceQuotaSnapshot.toSpaceQuotaSnapshot( - QuotaProtos.SpaceQuotaSnapshot.parseFrom( - UnsafeByteOperations.unsafeWrap( - c.getValueArray(), c.getValueOffset(), c.getValueLength()))); + SpaceQuotaSnapshot snapshot = SpaceQuotaSnapshot + .toSpaceQuotaSnapshot(QuotaProtos.SpaceQuotaSnapshot.parseFrom(UnsafeByteOperations + .unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength()))); LOG.info( - snapshot.getUsage() + "/" + snapshot.getLimit() + " " + snapshot.getQuotaStatus()); + snapshot.getUsage() + "/" + snapshot.getLimit() + " " + snapshot.getQuotaStatus()); // We expect to see the table move to violation return snapshot.getQuotaStatus().isInViolation(); } finally { @@ -360,11 +359,11 @@ public void testRematerializedTablesDoNoInheritSpace() throws Exception { TableName tn2 = helper.getNextTableName(); LOG.info("Writing data"); // Set a quota on both tables - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, + SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings); - QuotaSettings settings2 = QuotaSettingsFactory.limitTableSpace( - tn2, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); + QuotaSettings settings2 = QuotaSettingsFactory.limitTableSpace(tn2, + SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings2); // Write some data final long initialSize = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; @@ -373,7 +372,8 @@ public void testRematerializedTablesDoNoInheritSpace() throws Exception { LOG.info("Waiting until table size reflects written data"); // Wait until that data is seen by the master TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { return snapshot.getUsage() >= initialSize; } }); @@ -406,7 +406,7 @@ boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { // We know that reports were sent by our RS, verify that they take up zero size. SpaceQuotaSnapshot snapshot = - (SpaceQuotaSnapshot) conn.getAdmin().getCurrentSpaceQuotaSnapshot(tn2); + (SpaceQuotaSnapshot) conn.getAdmin().getCurrentSpaceQuotaSnapshot(tn2); assertNotNull(snapshot); assertEquals(0, snapshot.getUsage()); @@ -428,7 +428,8 @@ void waitForStableQuotaSize(Connection conn, TableName tn, String ns) throws Exc AtomicLong lastValue = new AtomicLong(-1); AtomicInteger counter = new AtomicInteger(0); TEST_UTIL.waitFor(15_000, 500, new SpaceQuotaSnapshotPredicate(conn, tn, ns) { - @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + @Override + boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { LOG.debug("Last observed size=" + lastValue.get()); if (snapshot.getUsage() == lastValue.get()) { int numMatches = counter.incrementAndGet(); @@ -460,7 +461,8 @@ void waitForStableRegionSizeReport(Connection conn, TableName tn) throws Excepti AtomicLong lastValue = new AtomicLong(-1); AtomicInteger counter = new AtomicInteger(0); TEST_UTIL.waitFor(15_000, 500, new Predicate() { - @Override public boolean evaluate() throws Exception { + @Override + public boolean evaluate() throws Exception { LOG.debug("Last observed size=" + lastValue.get()); long actual = getRegionSizeReportForTable(conn, tn); if (actual == lastValue.get()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java index d5449b1efab7..943dd66222dd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -128,8 +128,8 @@ public TableName call() throws Exception { final TableName tn = helper.createTableWithRegions(admin, 5); // Grant the normal user permissions try { - AccessControlClient.grant( - conn, tn, REGULARUSER_NAME, null, null, Action.READ, Action.WRITE); + AccessControlClient.grant(conn, tn, REGULARUSER_NAME, null, null, Action.READ, + Action.WRITE); } catch (Throwable t) { if (t instanceof Exception) { throw (Exception) t; @@ -156,8 +156,8 @@ public Void call() throws Exception { }); final long sizeLimit = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, sizeLimit, SpaceViolationPolicy.NO_WRITES_COMPACTIONS); + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, + SpaceViolationPolicy.NO_WRITES_COMPACTIONS); try (Connection conn = getConnection()) { conn.getAdmin().setQuota(settings); @@ -182,7 +182,7 @@ public Void call() throws Exception { LOG.debug("message", e); } - try{ + try { // Should not throw an exception (superuser can do anything) doAsSuperUser(new Callable() { @Override @@ -213,13 +213,13 @@ public TableName call() throws Exception { final Admin admin = conn.getAdmin(); final TableName tn = helper.createTableWithRegions(admin, 5); final long sizeLimit = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; - QuotaSettings settings = QuotaSettingsFactory.limitTableSpace( - tn, sizeLimit, SpaceViolationPolicy.NO_WRITES_COMPACTIONS); + QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, + SpaceViolationPolicy.NO_WRITES_COMPACTIONS); admin.setQuota(settings); // Grant the normal user permission to create a table and set a quota try { - AccessControlClient.grant( - conn, tn, REGULARUSER_NAME, null, null, Action.READ, Action.WRITE); + AccessControlClient.grant(conn, tn, REGULARUSER_NAME, null, null, Action.READ, + Action.WRITE); } catch (Throwable t) { if (t instanceof Exception) { throw (Exception) t; @@ -305,7 +305,7 @@ private void waitForTableToEnterQuotaViolation(TableName tn) throws Exception { Waiter.waitFor(TEST_UTIL.getConfiguration(), 30 * 1000, 1000, new Predicate() { @Override public boolean evaluate() throws Exception { - Map snapshots = + Map snapshots = rs.getRegionServerSpaceQuotaManager().copyQuotaSnapshots(); SpaceQuotaSnapshot snapshot = snapshots.get(tn); if (snapshot == null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java index 6803ee51267d..36956bc98bdf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -87,22 +87,19 @@ public void testFilterRegionsByTable() throws Exception { assertEquals(0, size(store.filterBySubject(tn1))); for (int i = 0; i < 5; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn1) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 0L); } for (int i = 0; i < 3; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn2) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn2).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 0L); } for (int i = 0; i < 10; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn3) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 0L); + regionReports.put(RegionInfoBuilder.newBuilder(tn3).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 0L); } assertEquals(18, regionReports.size()); assertEquals(5, size(store.filterBySubject(tn1))); @@ -116,54 +113,47 @@ public void testTargetViolationState() throws IOException { TableName tn1 = TableName.valueOf("violation1"); TableName tn2 = TableName.valueOf("observance1"); TableName tn3 = TableName.valueOf("observance2"); - SpaceQuota quota = SpaceQuota.newBuilder() - .setSoftLimit(1024L * 1024L) + SpaceQuota quota = SpaceQuota.newBuilder().setSoftLimit(1024L * 1024L) .setViolationPolicy(ProtobufUtil.toProtoViolationPolicy(SpaceViolationPolicy.DISABLE)) .build(); // Create some junk data to filter. Makes sure it's so large that it would // immediately violate the quota. for (int i = 0; i < 3; i++) { - regionReports.put(RegionInfoBuilder.newBuilder(tn2) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 5L * ONE_MEGABYTE); - regionReports.put(RegionInfoBuilder.newBuilder(tn3) - .setStartKey(Bytes.toBytes(i)) - .setEndKey(Bytes.toBytes(i + 1)) - .build(), 5L * ONE_MEGABYTE); + regionReports.put(RegionInfoBuilder.newBuilder(tn2).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 5L * ONE_MEGABYTE); + regionReports.put(RegionInfoBuilder.newBuilder(tn3).setStartKey(Bytes.toBytes(i)) + .setEndKey(Bytes.toBytes(i + 1)).build(), + 5L * ONE_MEGABYTE); } - regionReports.put(RegionInfoBuilder.newBuilder(tn1) - .setStartKey(Bytes.toBytes(0)) - .setEndKey(Bytes.toBytes(1)) - .build(), 1024L * 512L); - regionReports.put(RegionInfoBuilder.newBuilder(tn1) - .setStartKey(Bytes.toBytes(1)) - .setEndKey(Bytes.toBytes(2)) - .build(), 1024L * 256L); - - SpaceQuotaSnapshot tn1Snapshot = new SpaceQuotaSnapshot( - SpaceQuotaStatus.notInViolation(), 1024L * 768L, 1024L * 1024L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1).setStartKey(Bytes.toBytes(0)) + .setEndKey(Bytes.toBytes(1)).build(), + 1024L * 512L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1).setStartKey(Bytes.toBytes(1)) + .setEndKey(Bytes.toBytes(2)).build(), + 1024L * 256L); + + SpaceQuotaSnapshot tn1Snapshot = + new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 1024L * 768L, 1024L * 1024L); // Below the quota assertEquals(tn1Snapshot, store.getTargetState(tn1, quota)); - - regionReports.put(RegionInfoBuilder.newBuilder(tn1) - .setStartKey(Bytes.toBytes(2)) - .setEndKey(Bytes.toBytes(3)) - .build(), 1024L * 256L); - tn1Snapshot = new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 1024L * 1024L, 1024L * 1024L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1).setStartKey(Bytes.toBytes(2)) + .setEndKey(Bytes.toBytes(3)).build(), + 1024L * 256L); + tn1Snapshot = + new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 1024L * 1024L, 1024L * 1024L); // Equal to the quota is still in observance assertEquals(tn1Snapshot, store.getTargetState(tn1, quota)); - regionReports.put(RegionInfoBuilder.newBuilder(tn1) - .setStartKey(Bytes.toBytes(3)) - .setEndKey(Bytes.toBytes(4)) - .build(), 1024L); - tn1Snapshot = new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), 1024L * 1024L + 1024L, 1024L * 1024L); + regionReports.put(RegionInfoBuilder.newBuilder(tn1).setStartKey(Bytes.toBytes(3)) + .setEndKey(Bytes.toBytes(4)).build(), + 1024L); + tn1Snapshot = new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.DISABLE), + 1024L * 1024L + 1024L, 1024L * 1024L); // Exceeds the quota, should be in violation assertEquals(tn1Snapshot, store.getTargetState(tn1, quota)); @@ -174,12 +164,8 @@ public void testGetSpaceQuota() throws Exception { TableQuotaSnapshotStore mockStore = mock(TableQuotaSnapshotStore.class); when(mockStore.getSpaceQuota(any())).thenCallRealMethod(); - Quotas quotaWithSpace = Quotas.newBuilder().setSpace( - SpaceQuota.newBuilder() - .setSoftLimit(1024L) - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) - .build()) - .build(); + Quotas quotaWithSpace = Quotas.newBuilder().setSpace(SpaceQuota.newBuilder().setSoftLimit(1024L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE).build()).build(); Quotas quotaWithoutSpace = Quotas.newBuilder().build(); AtomicReference quotaRef = new AtomicReference<>(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java index 8f8db51c2100..0d9ad5076708 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,18 +68,16 @@ public void setup() throws Exception { @Test public void testToViolation() throws Exception { final TableName tn = TableName.valueOf("inviolation"); - final SpaceQuotaSnapshot snapshot = new SpaceQuotaSnapshot( - new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 1024L, 512L); + final SpaceQuotaSnapshot snapshot = + new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_INSERTS), 1024L, 512L); final Table quotaTable = mock(Table.class); when(conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)).thenReturn(quotaTable); final Put expectedPut = new Put(Bytes.toBytes("t." + tn.getNameAsString())); final QuotaProtos.SpaceQuotaSnapshot protoQuota = QuotaProtos.SpaceQuotaSnapshot.newBuilder() .setQuotaStatus(QuotaProtos.SpaceQuotaStatus.newBuilder().setInViolation(true) - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.NO_INSERTS)) - .setQuotaLimit(512L) - .setQuotaUsage(1024L) - .build(); + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.NO_INSERTS)) + .setQuotaLimit(512L).setQuotaUsage(1024L).build(); expectedPut.addColumn(Bytes.toBytes("u"), Bytes.toBytes("p"), protoQuota.toByteArray()); notifier.transitionTable(tn, snapshot); @@ -109,10 +107,10 @@ public boolean matches(T argument) { if (expected.size() != actual.size()) { return false; } - NavigableMap> expectedCells = expected.getFamilyCellMap(); - NavigableMap> actualCells = actual.getFamilyCellMap(); - Entry> expectedEntry = expectedCells.entrySet().iterator().next(); - Entry> actualEntry = actualCells.entrySet().iterator().next(); + NavigableMap> expectedCells = expected.getFamilyCellMap(); + NavigableMap> actualCells = actual.getFamilyCellMap(); + Entry> expectedEntry = expectedCells.entrySet().iterator().next(); + Entry> actualEntry = actualCells.entrySet().iterator().next(); if (!Arrays.equals(expectedEntry.getKey(), actualEntry.getKey())) { return false; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTablesWithQuotas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTablesWithQuotas.java index 450c554e6b95..a115a2b13f0e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTablesWithQuotas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTablesWithQuotas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -100,8 +100,8 @@ public void testImmutableGetters() { @Test public void testInsufficientlyReportedTableFiltering() throws Exception { - final Map reportedRegions = new HashMap<>(); - final Map actualRegions = new HashMap<>(); + final Map reportedRegions = new HashMap<>(); + final Map actualRegions = new HashMap<>(); final Configuration conf = HBaseConfiguration.create(); conf.setDouble(QuotaObserverChore.QUOTA_OBSERVER_CHORE_REPORT_PERCENT_KEY, 0.95); @@ -144,7 +144,8 @@ int getNumReportedRegions(TableName table, QuotaSnapshotStore tableSt Set filteredTablesWithTableQuotas = tables.getTableQuotaTables(); assertEquals(Collections.singleton(sufficientRegionsTable), filteredTablesWithTableQuotas); Set filteredTablesWithNamespaceQutoas = tables.getNamespaceQuotaTables(); - assertEquals(Collections.singleton(sufficientRegionsNamespaceTable), filteredTablesWithNamespaceQutoas); + assertEquals(Collections.singleton(sufficientRegionsNamespaceTable), + filteredTablesWithNamespaceQutoas); } @Test @@ -158,20 +159,16 @@ public void testGetTablesByNamespace() { tables.addNamespaceQuotaTable(TableName.valueOf("ns2", "t1")); tables.addNamespaceQuotaTable(TableName.valueOf("ns2", "t2")); - Multimap tablesByNamespace = tables.getTablesByNamespace(); + Multimap tablesByNamespace = tables.getTablesByNamespace(); Collection tablesInNs = tablesByNamespace.get("ns1"); assertEquals(3, tablesInNs.size()); assertTrue("Unexpected results for ns1: " + tablesInNs, - tablesInNs.containsAll(Arrays.asList( - TableName.valueOf("ns1", "t1"), - TableName.valueOf("ns1", "t2"), - TableName.valueOf("ns1", "t3")))); + tablesInNs.containsAll(Arrays.asList(TableName.valueOf("ns1", "t1"), + TableName.valueOf("ns1", "t2"), TableName.valueOf("ns1", "t3")))); tablesInNs = tablesByNamespace.get("ns2"); assertEquals(2, tablesInNs.size()); - assertTrue("Unexpected results for ns2: " + tablesInNs, - tablesInNs.containsAll(Arrays.asList( - TableName.valueOf("ns2", "t1"), - TableName.valueOf("ns2", "t2")))); + assertTrue("Unexpected results for ns2: " + tablesInNs, tablesInNs.containsAll( + Arrays.asList(TableName.valueOf("ns2", "t1"), TableName.valueOf("ns2", "t2")))); } @Test @@ -183,7 +180,7 @@ public void testFilteringMissingTables() throws Exception { when(admin.getRegions(missingTable)).thenReturn(null); QuotaObserverChore chore = mock(QuotaObserverChore.class); - Map regionUsage = new HashMap<>(); + Map regionUsage = new HashMap<>(); TableQuotaSnapshotStore store = new TableQuotaSnapshotStore(conn, chore, regionUsage); // A super dirty hack to verify that, after getting no regions for our table, @@ -199,7 +196,7 @@ int getNumReportedRegions(TableName table, QuotaSnapshotStore tableSt tables.filterInsufficientlyReportedTables(store); final Set tablesWithQuotas = tables.getTableQuotaTables(); - assertTrue( - "Expected to find no tables, but found " + tablesWithQuotas, tablesWithQuotas.isEmpty()); + assertTrue("Expected to find no tables, but found " + tablesWithQuotas, + tablesWithQuotas.isEmpty()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java index 3b98cba05030..b6d65eff3d8e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/ThrottleQuotaTestUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java index ec8f1bf3d5b2..14701e0dbc02 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java index 4995de7024e3..2c35bc40a154 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,7 +74,8 @@ public void testFilesUnderLimit() throws Exception { } // Quota is not in violation now - SpaceQuotaSnapshot snapshot = new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 0, length * 6); + SpaceQuotaSnapshot snapshot = + new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 0, length * 6); policy.initialize(rss, tableName, snapshot); @@ -92,7 +93,8 @@ public void testFileIsNotAFile() throws Exception { paths.add(path); // Quota is not in violation now - SpaceQuotaSnapshot snapshot = new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 0, Long.MAX_VALUE); + SpaceQuotaSnapshot snapshot = + new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 0, Long.MAX_VALUE); policy.initialize(rss, tableName, snapshot); @@ -116,7 +118,8 @@ public void testOneFileInBatchOverLimit() throws Exception { } // Quota is not in violation now - SpaceQuotaSnapshot snapshot = new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 0, 1024L); + SpaceQuotaSnapshot snapshot = + new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 0, 1024L); policy.initialize(rss, tableName, snapshot); @@ -139,7 +142,8 @@ public void testSumOfFilesOverLimit() throws Exception { } // Quota is not in violation now, but 5*1024 files would push us to violation - SpaceQuotaSnapshot snapshot = new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 0, 5000L); + SpaceQuotaSnapshot snapshot = + new SpaceQuotaSnapshot(SpaceQuotaStatus.notInViolation(), 0, 5000L); policy.initialize(rss, tableName, snapshot); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestDisableTableViolationPolicyEnforcement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestDisableTableViolationPolicyEnforcement.java index bef60dac49f3..3b7f0c572a10 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestDisableTableViolationPolicyEnforcement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestDisableTableViolationPolicyEnforcement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoInsertsViolationPolicyEnforcement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoInsertsViolationPolicyEnforcement.java index 66c308939926..00cec42bb6f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoInsertsViolationPolicyEnforcement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoInsertsViolationPolicyEnforcement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoWritesCompactionsViolationPolicyEnforcement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoWritesCompactionsViolationPolicyEnforcement.java index f4d0c4d61831..ae72e83ae68d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoWritesCompactionsViolationPolicyEnforcement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoWritesCompactionsViolationPolicyEnforcement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoWritesViolationPolicyEnforcement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoWritesViolationPolicyEnforcement.java index 116814596c1d..e1f27f67dc7f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoWritesViolationPolicyEnforcement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestNoWritesViolationPolicyEnforcement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java index 15a59ab62faa..7130e1a32fc6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.Arrays; import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -50,7 +49,8 @@ protected ArrayList sfCreate(long[] minTimestamps, long[] maxTimesta for (int i = 0; i < sizes.length; i++) { MockHStoreFile msf = new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes[i], ageInDisk.get(i), false, i); - msf.setTimeRangeTracker(TimeRangeTracker.create(TimeRangeTracker.Type.SYNC, minTimestamps[i], maxTimestamps[i])); + msf.setTimeRangeTracker( + TimeRangeTracker.create(TimeRangeTracker.Type.SYNC, minTimestamps[i], maxTimestamps[i])); ret.add(msf); } return ret; @@ -72,7 +72,7 @@ private DateTieredCompactionRequest getRequest(long now, ArrayList c timeMachine.setValue(now); DateTieredCompactionRequest request; DateTieredCompactionPolicy policy = - (DateTieredCompactionPolicy) store.storeEngine.getCompactionPolicy(); + (DateTieredCompactionPolicy) store.storeEngine.getCompactionPolicy(); if (isMajor) { for (HStoreFile file : candidates) { ((MockHStoreFile) file).setIsMajor(true); @@ -82,7 +82,7 @@ private DateTieredCompactionRequest getRequest(long now, ArrayList c } else { assertEquals(toCompact, policy.needsCompaction(candidates, ImmutableList.of())); request = - (DateTieredCompactionRequest) policy.selectMinorCompaction(candidates, false, false); + (DateTieredCompactionRequest) policy.selectMinorCompaction(candidates, false, false); } return request; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java index 3b27971f878a..2d496ef92cc2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.util.Arrays; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -39,6 +37,7 @@ import org.apache.hadoop.io.BytesWritable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; @@ -52,13 +51,11 @@ public class CreateRandomStoreFile { /** - * As much as this number of bytes can be added or subtracted from key/value - * lengths. + * As much as this number of bytes can be added or subtracted from key/value lengths. */ private static final int LEN_VARIATION = 5; - private static final Logger LOG = - LoggerFactory.getLogger(CreateRandomStoreFile.class); + private static final Logger LOG = LoggerFactory.getLogger(CreateRandomStoreFile.class); private static final String OUTPUT_DIR_OPTION = "o"; private static final String NUM_KV_OPTION = "n"; private static final String HFILE_VERSION_OPTION = "h"; @@ -75,8 +72,7 @@ public class CreateRandomStoreFile { private static final int EXIT_FAILURE = 1; /** The number of valid key types in a store file */ - private static final int NUM_VALID_KEY_TYPES = - KeyValue.Type.values().length - 2; + private static final int NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2; private Options options = new Options(); @@ -84,40 +80,30 @@ public class CreateRandomStoreFile { /** * Runs the tools. - * * @param args command-line arguments * @return true in case of success * @throws IOException */ public boolean run(String[] args) throws IOException { - options.addOption(OUTPUT_DIR_OPTION, "output_dir", true, - "Output directory"); - options.addOption(NUM_KV_OPTION, "num_kv", true, - "Number of key/value pairs"); + options.addOption(OUTPUT_DIR_OPTION, "output_dir", true, "Output directory"); + options.addOption(NUM_KV_OPTION, "num_kv", true, "Number of key/value pairs"); options.addOption(KEY_SIZE_OPTION, "key_size", true, "Average key size"); - options.addOption(VALUE_SIZE_OPTION, "value_size", true, - "Average value size"); - options.addOption(HFILE_VERSION_OPTION, "hfile_version", true, - "HFile version to create"); + options.addOption(VALUE_SIZE_OPTION, "value_size", true, "Average value size"); + options.addOption(HFILE_VERSION_OPTION, "hfile_version", true, "HFile version to create"); options.addOption(COMPRESSION_OPTION, "compression", true, - " Compression type, one of " - + Arrays.toString(Compression.Algorithm.values())); + " Compression type, one of " + Arrays.toString(Compression.Algorithm.values())); options.addOption(BLOOM_FILTER_OPTION, "bloom_filter", true, - "Bloom filter type, one of " - + Arrays.toString(BloomType.values())); + "Bloom filter type, one of " + Arrays.toString(BloomType.values())); options.addOption(BLOOM_FILTER_PARAM_OPTION, "bloom_param", true, - "the parameter of the bloom filter"); - options.addOption(BLOCK_SIZE_OPTION, "block_size", true, - "HFile block size"); + "the parameter of the bloom filter"); + options.addOption(BLOCK_SIZE_OPTION, "block_size", true, "HFile block size"); options.addOption(BLOOM_BLOCK_SIZE_OPTION, "bloom_block_size", true, - "Compound Bloom filters block size"); - options.addOption(INDEX_BLOCK_SIZE_OPTION, "index_block_size", true, - "Index block size"); + "Compound Bloom filters block size"); + options.addOption(INDEX_BLOCK_SIZE_OPTION, "index_block_size", true, "Index block size"); if (args.length == 0) { HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp(CreateRandomStoreFile.class.getSimpleName(), options, - true); + formatter.printHelp(CreateRandomStoreFile.class.getSimpleName(), options, true); return false; } @@ -155,22 +141,19 @@ public boolean run(String[] args) throws IOException { Path outputDir = new Path(cmdLine.getOptionValue(OUTPUT_DIR_OPTION)); long numKV = Long.parseLong(cmdLine.getOptionValue(NUM_KV_OPTION)); - configureKeyValue(numKV, - Integer.parseInt(cmdLine.getOptionValue(KEY_SIZE_OPTION)), - Integer.parseInt(cmdLine.getOptionValue(VALUE_SIZE_OPTION))); + configureKeyValue(numKV, Integer.parseInt(cmdLine.getOptionValue(KEY_SIZE_OPTION)), + Integer.parseInt(cmdLine.getOptionValue(VALUE_SIZE_OPTION))); FileSystem fs = FileSystem.get(conf); Compression.Algorithm compr = Compression.Algorithm.NONE; if (cmdLine.hasOption(COMPRESSION_OPTION)) { - compr = Compression.Algorithm.valueOf( - cmdLine.getOptionValue(COMPRESSION_OPTION)); + compr = Compression.Algorithm.valueOf(cmdLine.getOptionValue(COMPRESSION_OPTION)); } BloomType bloomType = BloomType.NONE; if (cmdLine.hasOption(BLOOM_FILTER_OPTION)) { - bloomType = BloomType.valueOf(cmdLine.getOptionValue( - BLOOM_FILTER_OPTION)); + bloomType = BloomType.valueOf(cmdLine.getOptionValue(BLOOM_FILTER_OPTION)); } if (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH) { @@ -179,7 +162,7 @@ public boolean run(String[] args) throws IOException { return false; } else { conf.set(BloomFilterUtil.PREFIX_LENGTH_KEY, - cmdLine.getOptionValue(BLOOM_FILTER_PARAM_OPTION)); + cmdLine.getOptionValue(BLOOM_FILTER_PARAM_OPTION)); } } @@ -189,23 +172,19 @@ public boolean run(String[] args) throws IOException { if (cmdLine.hasOption(BLOOM_BLOCK_SIZE_OPTION)) { conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, - Integer.valueOf(cmdLine.getOptionValue(BLOOM_BLOCK_SIZE_OPTION))); + Integer.valueOf(cmdLine.getOptionValue(BLOOM_BLOCK_SIZE_OPTION))); } if (cmdLine.hasOption(INDEX_BLOCK_SIZE_OPTION)) { conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, - Integer.valueOf(cmdLine.getOptionValue(INDEX_BLOCK_SIZE_OPTION))); + Integer.valueOf(cmdLine.getOptionValue(INDEX_BLOCK_SIZE_OPTION))); } - HFileContext meta = new HFileContextBuilder().withCompression(compr) - .withBlockSize(blockSize).build(); - StoreFileWriter sfw = new StoreFileWriter.Builder(conf, - new CacheConfig(conf), fs) - .withOutputDir(outputDir) - .withBloomType(bloomType) - .withMaxKeyCount(numKV) - .withFileContext(meta) - .build(); + HFileContext meta = + new HFileContextBuilder().withCompression(compr).withBlockSize(blockSize).build(); + StoreFileWriter sfw = + new StoreFileWriter.Builder(conf, new CacheConfig(conf), fs).withOutputDir(outputDir) + .withBloomType(bloomType).withMaxKeyCount(numKV).withFileContext(meta).build(); LOG.info("Writing " + numKV + " key/value pairs"); for (long i = 0; i < numKV; ++i) { @@ -215,8 +194,7 @@ public boolean run(String[] args) throws IOException { int numMetaBlocks = ThreadLocalRandom.current().nextInt(10) + 1; LOG.info("Writing " + numMetaBlocks + " meta blocks"); for (int metaI = 0; metaI < numMetaBlocks; ++metaI) { - sfw.getHFileWriter().appendMetaBlock(generateString(), - new BytesWritable(generateValue())); + sfw.getHFileWriter().appendMetaBlock(generateString(), new BytesWritable(generateValue())); } sfw.close(); @@ -252,13 +230,8 @@ public KeyValue generateKeyValue(long i) { byte[] k = generateKey(i); byte[] v = generateValue(); Random rand = ThreadLocalRandom.current(); - return new KeyValue( - k, 0, rowLen, - k, rowLen, cfLen, - k, rowLen + cfLen, k.length - rowLen - cfLen, - rand.nextLong(), - generateKeyType(rand), - v, 0, v.length); + return new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen, + k.length - rowLen - cfLen, rand.nextLong(), generateKeyType(rand), v, 0, v.length); } public static KeyValue.Type generateKeyType(Random rand) { @@ -266,12 +239,10 @@ public static KeyValue.Type generateKeyType(Random rand) { // Let's make half of KVs puts. return KeyValue.Type.Put; } else { - KeyValue.Type keyType = - KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; - if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) - { - throw new RuntimeException("Generated an invalid key type: " + keyType - + ". " + "Probably the layout of KeyValue.Type has changed."); + KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; + if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) { + throw new RuntimeException("Generated an invalid key type: " + keyType + ". " + + "Probably the layout of KeyValue.Type has changed."); } return keyType; } @@ -288,8 +259,7 @@ private String generateString() { private byte[] generateKey(long i) { Random rand = ThreadLocalRandom.current(); - byte[] k = new byte[Math.max(keyPrefixLen, keyLen - + nextInRange(LEN_VARIATION))]; + byte[] k = new byte[Math.max(keyPrefixLen, keyLen + nextInRange(LEN_VARIATION))]; for (int pos = keyPrefixLen - 1; pos >= 0; --pos) { k[pos] = (byte) (i & 0xFF); i >>>= 8; @@ -312,8 +282,7 @@ private byte[] generateValue() { public static void main(String[] args) { CreateRandomStoreFile app = new CreateRandomStoreFile(); try { - if (!app.run(args)) - System.exit(EXIT_FAILURE); + if (!app.run(args)) System.exit(EXIT_FAILURE); } catch (IOException ex) { LOG.error(ex.toString(), ex); System.exit(EXIT_FAILURE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index 01f40be93f51..97a1724df4fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; @@ -26,7 +27,6 @@ import java.util.Iterator; import java.util.List; import java.util.Locale; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -51,6 +51,7 @@ import org.apache.hadoop.io.compress.Decompressor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -59,25 +60,23 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.PosixParser; /** - * Tests various algorithms for key compression on an existing HFile. Useful - * for testing, debugging and benchmarking. + * Tests various algorithms for key compression on an existing HFile. Useful for testing, debugging + * and benchmarking. */ public class DataBlockEncodingTool { - private static final Logger LOG = LoggerFactory.getLogger( - DataBlockEncodingTool.class); + private static final Logger LOG = LoggerFactory.getLogger(DataBlockEncodingTool.class); private static final boolean includesMemstoreTS = true; /** - * How many times to run the benchmark. More times means better data in terms - * of statistics but slower execution. Has to be strictly larger than - * {@link #DEFAULT_BENCHMARK_N_OMIT}. + * How many times to run the benchmark. More times means better data in terms of statistics but + * slower execution. Has to be strictly larger than {@link #DEFAULT_BENCHMARK_N_OMIT}. */ private static final int DEFAULT_BENCHMARK_N_TIMES = 12; /** - * How many first runs should not be included in the benchmark. Done in order - * to exclude setup cost. + * How many first runs should not be included in the benchmark. Done in order to exclude setup + * cost. */ private static final int DEFAULT_BENCHMARK_N_OMIT = 2; @@ -103,11 +102,9 @@ public class DataBlockEncodingTool { private static final String OPT_BENCHMARK_N_OMIT = "omit"; /** Compression algorithm to use if not specified on the command line */ - private static final Algorithm DEFAULT_COMPRESSION = - Compression.Algorithm.GZ; + private static final Algorithm DEFAULT_COMPRESSION = Compression.Algorithm.GZ; - private static final DecimalFormat DELIMITED_DECIMAL_FORMAT = - new DecimalFormat(); + private static final DecimalFormat DELIMITED_DECIMAL_FORMAT = new DecimalFormat(); static { DELIMITED_DECIMAL_FORMAT.setGroupingSize(3); @@ -139,10 +136,7 @@ public class DataBlockEncodingTool { private static boolean USE_TAG = false; private enum Manipulation { - ENCODING, - DECODING, - COMPRESSION, - DECOMPRESSION; + ENCODING, DECODING, COMPRESSION, DECOMPRESSION; @Override public String toString() { @@ -155,14 +149,13 @@ public String toString() { } /** - * @param compressionAlgorithmName What kind of algorithm should be used - * as baseline for comparison (e.g. lzo, gz). + * @param compressionAlgorithmName What kind of algorithm should be used as baseline for + * comparison (e.g. lzo, gz). */ public DataBlockEncodingTool(Configuration conf, String compressionAlgorithmName) { this.conf = conf; this.compressionAlgorithmName = compressionAlgorithmName; - this.compressionAlgorithm = Compression.getCompressionAlgorithmByName( - compressionAlgorithmName); + this.compressionAlgorithm = Compression.getCompressionAlgorithmByName(compressionAlgorithmName); this.compressor = this.compressionAlgorithm.getCompressor(); this.decompressor = this.compressionAlgorithm.getDecompressor(); } @@ -173,8 +166,7 @@ public DataBlockEncodingTool(Configuration conf, String compressionAlgorithmName * @param kvLimit Maximal count of KeyValue which will be processed. * @throws IOException thrown if scanner is invalid */ - public void checkStatistics(final KeyValueScanner scanner, final int kvLimit) - throws IOException { + public void checkStatistics(final KeyValueScanner scanner, final int kvLimit) throws IOException { scanner.seek(KeyValue.LOWESTKEY); KeyValue currentKV; @@ -184,8 +176,7 @@ public void checkStatistics(final KeyValueScanner scanner, final int kvLimit) DataBlockEncoding[] encodings = DataBlockEncoding.values(); - ByteArrayOutputStream uncompressedOutputStream = - new ByteArrayOutputStream(); + ByteArrayOutputStream uncompressedOutputStream = new ByteArrayOutputStream(); int j = 0; while ((currentKV = KeyValueUtil.ensureKeyValue(scanner.next())) != null && j < kvLimit) { @@ -193,8 +184,8 @@ public void checkStatistics(final KeyValueScanner scanner, final int kvLimit) j++; currentKey = currentKV.getKey(); if (previousKey != null) { - for (int i = 0; i < previousKey.length && i < currentKey.length && - previousKey[i] == currentKey[i]; ++i) { + for (int i = 0; i < previousKey.length && i < currentKey.length + && previousKey[i] == currentKey[i]; ++i) { totalKeyRedundancyLength++; } } @@ -205,18 +196,18 @@ public void checkStatistics(final KeyValueScanner scanner, final int kvLimit) // include tags. If USE_TAG is true, HFile contains cells with tags, // if the cell tagsLen equals 0, it means other cells may have tags. if (USE_TAG && currentKV.getTagsLength() == 0) { - uncompressedOutputStream.write(currentKV.getBuffer(), - currentKV.getOffset(), currentKV.getLength()); + uncompressedOutputStream.write(currentKV.getBuffer(), currentKV.getOffset(), + currentKV.getLength()); // write tagsLen = 0. uncompressedOutputStream.write(Bytes.toBytes((short) 0)); } else { - uncompressedOutputStream.write(currentKV.getBuffer(), - currentKV.getOffset(), currentKV.getLength()); + uncompressedOutputStream.write(currentKV.getBuffer(), currentKV.getOffset(), + currentKV.getLength()); } - if(includesMemstoreTS) { - WritableUtils.writeVLong( - new DataOutputStream(uncompressedOutputStream), currentKV.getSequenceId()); + if (includesMemstoreTS) { + WritableUtils.writeVLong(new DataOutputStream(uncompressedOutputStream), + currentKV.getSequenceId()); } previousKey = currentKey; @@ -239,10 +230,8 @@ public void checkStatistics(final KeyValueScanner scanner, final int kvLimit) continue; } DataBlockEncoder d = encoding.getEncoder(); - HFileContext meta = new HFileContextBuilder() - .withDataBlockEncoding(encoding) - .withCompression(Compression.Algorithm.NONE) - .withIncludesMvcc(includesMemstoreTS) + HFileContext meta = new HFileContextBuilder().withDataBlockEncoding(encoding) + .withCompression(Compression.Algorithm.NONE).withIncludesMvcc(includesMemstoreTS) .withIncludesTags(USE_TAG).build(); codecs.add(new EncodedDataBlock(conf, d, encoding, rawKVs, meta)); } @@ -250,19 +239,17 @@ public void checkStatistics(final KeyValueScanner scanner, final int kvLimit) /** * Verify if all data block encoders are working properly. - * * @param scanner Of file which was compressed. * @param kvLimit Maximal count of KeyValue which will be processed. * @return true if all data block encoders compressed/decompressed correctly. * @throws IOException thrown if scanner is invalid */ - public boolean verifyCodecs(final KeyValueScanner scanner, final int kvLimit) - throws IOException { + public boolean verifyCodecs(final KeyValueScanner scanner, final int kvLimit) throws IOException { KeyValue currentKv; scanner.seek(KeyValue.LOWESTKEY); List> codecIterators = new ArrayList<>(); - for(EncodedDataBlock codec : codecs) { + for (EncodedDataBlock codec : codecs) { codecIterators.add(codec.getIterator(HFileBlock.headerSize(useHBaseChecksum))); } @@ -273,47 +260,38 @@ public boolean verifyCodecs(final KeyValueScanner scanner, final int kvLimit) for (Iterator it : codecIterators) { Cell c = it.next(); KeyValue codecKv = KeyValueUtil.ensureKeyValue(c); - if (codecKv == null || 0 != Bytes.compareTo( - codecKv.getBuffer(), codecKv.getOffset(), codecKv.getLength(), - currentKv.getBuffer(), currentKv.getOffset(), - currentKv.getLength())) { + if (codecKv == null + || 0 != Bytes.compareTo(codecKv.getBuffer(), codecKv.getOffset(), codecKv.getLength(), + currentKv.getBuffer(), currentKv.getOffset(), currentKv.getLength())) { if (codecKv == null) { - LOG.error("There is a bug in codec " + it + - " it returned null KeyValue,"); + LOG.error("There is a bug in codec " + it + " it returned null KeyValue,"); } else { int prefix = 0; - int limitLength = 2 * Bytes.SIZEOF_INT + - Math.min(codecKv.getLength(), currentKv.getLength()); - while (prefix < limitLength && - codecKv.getBuffer()[prefix + codecKv.getOffset()] == - currentKv.getBuffer()[prefix + currentKv.getOffset()]) { + int limitLength = + 2 * Bytes.SIZEOF_INT + Math.min(codecKv.getLength(), currentKv.getLength()); + while (prefix < limitLength && codecKv.getBuffer()[prefix + + codecKv.getOffset()] == currentKv.getBuffer()[prefix + currentKv.getOffset()]) { prefix++; } - LOG.error("There is bug in codec " + it.toString() + - "\n on element " + j + - "\n codecKv.getKeyLength() " + codecKv.getKeyLength() + - "\n codecKv.getValueLength() " + codecKv.getValueLength() + - "\n codecKv.getLength() " + codecKv.getLength() + - "\n currentKv.getKeyLength() " + currentKv.getKeyLength() + - "\n currentKv.getValueLength() " + currentKv.getValueLength() + - "\n codecKv.getLength() " + currentKv.getLength() + - "\n currentKV rowLength " + currentKv.getRowLength() + - " familyName " + currentKv.getFamilyLength() + - " qualifier " + currentKv.getQualifierLength() + - "\n prefix " + prefix + - "\n codecKv '" + Bytes.toStringBinary(codecKv.getBuffer(), - codecKv.getOffset(), prefix) + "' diff '" + - Bytes.toStringBinary(codecKv.getBuffer(), - codecKv.getOffset() + prefix, codecKv.getLength() - - prefix) + "'" + - "\n currentKv '" + Bytes.toStringBinary( - currentKv.getBuffer(), - currentKv.getOffset(), prefix) + "' diff '" + - Bytes.toStringBinary(currentKv.getBuffer(), - currentKv.getOffset() + prefix, currentKv.getLength() - - prefix) + "'" - ); + LOG.error("There is bug in codec " + it.toString() + "\n on element " + j + + "\n codecKv.getKeyLength() " + codecKv.getKeyLength() + + "\n codecKv.getValueLength() " + codecKv.getValueLength() + + "\n codecKv.getLength() " + codecKv.getLength() + "\n currentKv.getKeyLength() " + + currentKv.getKeyLength() + "\n currentKv.getValueLength() " + + currentKv.getValueLength() + "\n codecKv.getLength() " + currentKv.getLength() + + "\n currentKV rowLength " + currentKv.getRowLength() + " familyName " + + currentKv.getFamilyLength() + " qualifier " + currentKv.getQualifierLength() + + "\n prefix " + prefix + "\n codecKv '" + + Bytes.toStringBinary(codecKv.getBuffer(), codecKv.getOffset(), prefix) + + "' diff '" + + Bytes.toStringBinary(codecKv.getBuffer(), codecKv.getOffset() + prefix, + codecKv.getLength() - prefix) + + "'" + "\n currentKv '" + + Bytes.toStringBinary(currentKv.getBuffer(), currentKv.getOffset(), prefix) + + "' diff '" + Bytes.toStringBinary(currentKv.getBuffer(), + currentKv.getOffset() + prefix, currentKv.getLength() - prefix) + + "'"); } return false; } @@ -340,8 +318,7 @@ public void benchmarkCodecs() throws IOException { /** * Benchmark compression/decompression throughput. - * @param previousTotalSize Total size used for verification. Use -1 if - * unknown. + * @param previousTotalSize Total size used for verification. Use -1 if unknown. * @param codec Tested encoder. * @return Size of uncompressed data. */ @@ -370,8 +347,8 @@ private int benchmarkEncoder(int previousTotalSize, EncodedDataBlock codec) { } if (prevTotalSize != -1 && prevTotalSize != totalSize) { - throw new IllegalStateException(String.format( - "Algorithm '%s' decoded data to different size", codec.toString())); + throw new IllegalStateException( + String.format("Algorithm '%s' decoded data to different size", codec.toString())); } prevTotalSize = totalSize; } @@ -394,10 +371,9 @@ private int benchmarkEncoder(int previousTotalSize, EncodedDataBlock codec) { return prevTotalSize; } - private void benchmarkDefaultCompression(int totalSize, byte[] rawBuffer) - throws IOException { - benchmarkAlgorithm(compressionAlgorithm, - compressionAlgorithmName.toUpperCase(Locale.ROOT), rawBuffer, 0, totalSize); + private void benchmarkDefaultCompression(int totalSize, byte[] rawBuffer) throws IOException { + benchmarkAlgorithm(compressionAlgorithm, compressionAlgorithmName.toUpperCase(Locale.ROOT), + rawBuffer, 0, totalSize); } /** @@ -409,8 +385,8 @@ private void benchmarkDefaultCompression(int totalSize, byte[] rawBuffer) * @param length Length of data in buffer. * @throws IOException */ - public void benchmarkAlgorithm(Compression.Algorithm algorithm, String name, - byte[] buffer, int offset, int length) throws IOException { + public void benchmarkAlgorithm(Compression.Algorithm algorithm, String name, byte[] buffer, + int offset, int length) throws IOException { System.out.println(name + ":"); // compress it @@ -437,9 +413,9 @@ public void benchmarkAlgorithm(Compression.Algorithm algorithm, String name, } } } catch (IOException e) { - throw new RuntimeException(String.format( - "Benchmark, or encoding algorithm '%s' cause some stream problems", - name), e); + throw new RuntimeException( + String.format("Benchmark, or encoding algorithm '%s' cause some stream problems", name), + e); } compressingStream.close(); printBenchmarkResult(length, compressDurations, Manipulation.COMPRESSION); @@ -453,10 +429,10 @@ public void benchmarkAlgorithm(Compression.Algorithm algorithm, String name, byte[] newBuf = new byte[length + 1]; try { - ByteArrayInputStream downStream = new ByteArrayInputStream(compBuffer, - 0, compBuffer.length); - InputStream decompressedStream = algorithm.createDecompressionStream( - downStream, decompressor, 0); + ByteArrayInputStream downStream = + new ByteArrayInputStream(compBuffer, 0, compBuffer.length); + InputStream decompressedStream = + algorithm.createDecompressionStream(downStream, decompressor, 0); int destOffset = 0; int nextChunk; @@ -466,8 +442,8 @@ public void benchmarkAlgorithm(Compression.Algorithm algorithm, String name, decompressedStream.close(); } catch (IOException e) { - throw new RuntimeException(String.format( - "Decoding path in '%s' algorithm cause exception ", name), e); + throw new RuntimeException( + String.format("Decoding path in '%s' algorithm cause exception ", name), e); } final long finishTime = System.nanoTime(); @@ -475,13 +451,12 @@ public void benchmarkAlgorithm(Compression.Algorithm algorithm, String name, // check correctness if (0 != Bytes.compareTo(buffer, 0, length, newBuf, 0, length)) { int prefix = 0; - for(; prefix < buffer.length && prefix < newBuf.length; ++prefix) { + for (; prefix < buffer.length && prefix < newBuf.length; ++prefix) { if (buffer[prefix] != newBuf[prefix]) { break; } } - throw new RuntimeException(String.format( - "Algorithm '%s' is corrupting the data", name)); + throw new RuntimeException(String.format("Algorithm '%s' is corrupting the data", name)); } // add time record @@ -497,8 +472,8 @@ public void benchmarkAlgorithm(Compression.Algorithm algorithm, String name, private static final double NS_IN_SEC = 1000.0 * 1000.0 * 1000.0; private static final double MB_SEC_COEF = NS_IN_SEC / BYTES_IN_MB; - private static void printBenchmarkResult(int totalSize, - List durationsInNanoSec, Manipulation manipulation) { + private static void printBenchmarkResult(int totalSize, List durationsInNanoSec, + Manipulation manipulation) { final int n = durationsInNanoSec.size(); long meanTime = 0; for (long time : durationsInNanoSec) { @@ -517,12 +492,11 @@ private static void printBenchmarkResult(int totalSize, mbPerSecSTD = Math.sqrt(mbPerSecSTD / n); } - outputTuple(manipulation + " performance", "%6.2f MB/s (+/- %.2f MB/s)", - meanMBPerSec, mbPerSecSTD); + outputTuple(manipulation + " performance", "%6.2f MB/s (+/- %.2f MB/s)", meanMBPerSec, + mbPerSecSTD); } - private static void outputTuple(String caption, String format, - Object... values) { + private static void outputTuple(String caption, String format, Object... values) { if (format.startsWith(INT_FORMAT)) { format = "%s" + format.substring(INT_FORMAT.length()); values[0] = DELIMITED_DECIMAL_FORMAT.format(values[0]); @@ -558,10 +532,9 @@ public void displayStatistics() throws IOException { outputTuplePct("CF overhead", totalCFLength); outputTuplePct("Total key redundancy", totalKeyRedundancyLength); - int compressedSize = EncodedDataBlock.getCompressedSize( - compressionAlgorithm, compressor, rawKVs, 0, rawKVs.length); - outputTuple(comprAlgo + " only size", INT_FORMAT, - compressedSize); + int compressedSize = EncodedDataBlock.getCompressedSize(compressionAlgorithm, compressor, + rawKVs, 0, rawKVs.length); + outputTuple(comprAlgo + " only size", INT_FORMAT, compressedSize); outputSavings(comprAlgo + " only", compressedSize, rawBytes); System.out.println(); @@ -569,32 +542,26 @@ public void displayStatistics() throws IOException { System.out.println(codec.toString()); long encodedBytes = codec.getSize(); outputTuple("Encoded bytes", INT_FORMAT, encodedBytes); - outputSavings("Key encoding", encodedBytes - totalValueLength, - rawBytes - totalValueLength); + outputSavings("Key encoding", encodedBytes - totalValueLength, rawBytes - totalValueLength); outputSavings("Total encoding", encodedBytes, rawBytes); - int encodedCompressedSize = codec.getEncodedCompressedSize( - compressionAlgorithm, compressor); - outputTuple("Encoding + " + comprAlgo + " size", INT_FORMAT, - encodedCompressedSize); + int encodedCompressedSize = codec.getEncodedCompressedSize(compressionAlgorithm, compressor); + outputTuple("Encoding + " + comprAlgo + " size", INT_FORMAT, encodedCompressedSize); outputSavings("Encoding + " + comprAlgo, encodedCompressedSize, rawBytes); - outputSavings("Encoding with " + comprAlgo, encodedCompressedSize, - compressedSize); + outputSavings("Encoding with " + comprAlgo, encodedCompressedSize, compressedSize); System.out.println(); } } private void outputTuplePct(String caption, long size) { - outputTuple(caption, INT_FORMAT + " (" + PCT_FORMAT + ")", - size, size * 100.0 / rawKVs.length); + outputTuple(caption, INT_FORMAT + " (" + PCT_FORMAT + ")", size, size * 100.0 / rawKVs.length); } private void outputSavings(String caption, long part, long whole) { double pct = 100.0 * (1 - 1.0 * part / whole); double times = whole * 1.0 / part; - outputTuple(caption + " savings", PCT_FORMAT + " (%.2f x)", - pct, times); + outputTuple(caption + " savings", PCT_FORMAT + " (%.2f x)", pct, times); } /** @@ -606,9 +573,8 @@ private void outputSavings(String caption, long part, long whole) { * @param doVerify Verify correctness. * @throws IOException When pathName is incorrect. */ - public static void testCodecs(Configuration conf, int kvLimit, - String hfilePath, String compressionName, boolean doBenchmark, - boolean doVerify) throws IOException { + public static void testCodecs(Configuration conf, int kvLimit, String hfilePath, + String compressionName, boolean doBenchmark, boolean doVerify) throws IOException { // create environment Path path = new Path(hfilePath); CacheConfig cacheConf = new CacheConfig(conf); @@ -617,15 +583,14 @@ public static void testCodecs(Configuration conf, int kvLimit, hsf.initReader(); StoreFileReader reader = hsf.getReader(); reader.loadFileInfo(); - KeyValueScanner scanner = reader.getStoreFileScanner(true, true, - false, hsf.getMaxMemStoreTS(), 0, false); + KeyValueScanner scanner = + reader.getStoreFileScanner(true, true, false, hsf.getMaxMemStoreTS(), 0, false); USE_TAG = reader.getHFileReader().getFileContext().isIncludesTags(); // run the utilities DataBlockEncodingTool comp = new DataBlockEncodingTool(conf, compressionName); int majorVersion = reader.getHFileVersion(); - comp.useHBaseChecksum = majorVersion > 2 || - (majorVersion == 2 && - reader.getHFileMinorVersion() >= HFileReaderImpl.MINOR_VERSION_WITH_CHECKSUM); + comp.useHBaseChecksum = majorVersion > 2 || (majorVersion == 2 + && reader.getHFileMinorVersion() >= HFileReaderImpl.MINOR_VERSION_WITH_CHECKSUM); comp.checkStatistics(scanner, kvLimit); if (doVerify) { comp.verifyCodecs(scanner, kvLimit); @@ -642,24 +607,23 @@ public static void testCodecs(Configuration conf, int kvLimit, private static void printUsage(Options options) { System.err.println("Usage:"); - System.err.println(String.format("./hbase %s ", - DataBlockEncodingTool.class.getName())); + System.err + .println(String.format("./hbase %s ", DataBlockEncodingTool.class.getName())); System.err.println("Options:"); for (Object it : options.getOptions()) { Option opt = (Option) it; if (opt.hasArg()) { - System.err.println(String.format("-%s %s: %s", opt.getOpt(), - opt.getArgName(), opt.getDescription())); + System.err.println( + String.format("-%s %s: %s", opt.getOpt(), opt.getArgName(), opt.getDescription())); } else { - System.err.println(String.format("-%s: %s", opt.getOpt(), - opt.getDescription())); + System.err.println(String.format("-%s: %s", opt.getOpt(), opt.getDescription())); } } } /** - * A command line interface to benchmarks. Parses command-line arguments and - * runs the appropriate benchmarks. + * A command line interface to benchmarks. Parses command-line arguments and runs the appropriate + * benchmarks. * @param args Should have length at least 1 and holds the file path to HFile. * @throws IOException If you specified the wrong file. */ @@ -669,24 +633,20 @@ public static void main(final String[] args) throws IOException { options.addOption(OPT_HFILE_NAME, true, "HFile to analyse (REQUIRED)"); options.getOption(OPT_HFILE_NAME).setArgName("FILENAME"); options.addOption(OPT_KV_LIMIT, true, - "Maximum number of KeyValues to process. A benchmark stops running " + - "after iterating over this many KV pairs."); + "Maximum number of KeyValues to process. A benchmark stops running " + + "after iterating over this many KV pairs."); options.getOption(OPT_KV_LIMIT).setArgName("NUMBER"); - options.addOption(OPT_MEASURE_THROUGHPUT, false, - "Measure read throughput"); - options.addOption(OPT_OMIT_CORRECTNESS_TEST, false, - "Omit corectness tests."); + options.addOption(OPT_MEASURE_THROUGHPUT, false, "Measure read throughput"); + options.addOption(OPT_OMIT_CORRECTNESS_TEST, false, "Omit corectness tests."); options.addOption(OPT_COMPRESSION_ALGORITHM, true, - "What kind of compression algorithm use for comparison."); - options.addOption(OPT_BENCHMARK_N_TIMES, - true, "Number of times to run each benchmark. Default value: " + - DEFAULT_BENCHMARK_N_TIMES); + "What kind of compression algorithm use for comparison."); + options.addOption(OPT_BENCHMARK_N_TIMES, true, + "Number of times to run each benchmark. Default value: " + DEFAULT_BENCHMARK_N_TIMES); options.addOption(OPT_BENCHMARK_N_OMIT, true, - "Number of first runs of every benchmark to exclude from " - + "statistics (" + DEFAULT_BENCHMARK_N_OMIT - + " by default, so that " + "only the last " - + (DEFAULT_BENCHMARK_N_TIMES - DEFAULT_BENCHMARK_N_OMIT) - + " times are included in statistics.)"); + "Number of first runs of every benchmark to exclude from " + "statistics (" + + DEFAULT_BENCHMARK_N_OMIT + " by default, so that " + "only the last " + + (DEFAULT_BENCHMARK_N_TIMES - DEFAULT_BENCHMARK_N_OMIT) + + " times are included in statistics.)"); // parse arguments CommandLineParser parser = new PosixParser(); @@ -709,8 +669,7 @@ public static void main(final String[] args) throws IOException { // basic argument sanity checks if (!cmd.hasOption(OPT_HFILE_NAME)) { - LOG.error("Please specify HFile name using the " + OPT_HFILE_NAME - + " option"); + LOG.error("Please specify HFile name using the " + OPT_HFILE_NAME + " option"); printUsage(options); System.exit(-1); } @@ -718,29 +677,25 @@ public static void main(final String[] args) throws IOException { String pathName = cmd.getOptionValue(OPT_HFILE_NAME); String compressionName = DEFAULT_COMPRESSION.getName(); if (cmd.hasOption(OPT_COMPRESSION_ALGORITHM)) { - compressionName = - cmd.getOptionValue(OPT_COMPRESSION_ALGORITHM).toLowerCase(Locale.ROOT); + compressionName = cmd.getOptionValue(OPT_COMPRESSION_ALGORITHM).toLowerCase(Locale.ROOT); } boolean doBenchmark = cmd.hasOption(OPT_MEASURE_THROUGHPUT); boolean doVerify = !cmd.hasOption(OPT_OMIT_CORRECTNESS_TEST); if (cmd.hasOption(OPT_BENCHMARK_N_TIMES)) { - benchmarkNTimes = Integer.valueOf(cmd.getOptionValue( - OPT_BENCHMARK_N_TIMES)); + benchmarkNTimes = Integer.valueOf(cmd.getOptionValue(OPT_BENCHMARK_N_TIMES)); } if (cmd.hasOption(OPT_BENCHMARK_N_OMIT)) { - benchmarkNOmit = - Integer.valueOf(cmd.getOptionValue(OPT_BENCHMARK_N_OMIT)); + benchmarkNOmit = Integer.valueOf(cmd.getOptionValue(OPT_BENCHMARK_N_OMIT)); } if (benchmarkNTimes < benchmarkNOmit) { - LOG.error("The number of times to run each benchmark (" - + benchmarkNTimes - + ") must be greater than the number of benchmark runs to exclude " - + "from statistics (" + benchmarkNOmit + ")"); + LOG.error("The number of times to run each benchmark (" + benchmarkNTimes + + ") must be greater than the number of benchmark runs to exclude " + "from statistics (" + + benchmarkNOmit + ")"); System.exit(1); } - LOG.info("Running benchmark " + benchmarkNTimes + " times. " + - "Excluding the first " + benchmarkNOmit + " times from statistics."); + LOG.info("Running benchmark " + benchmarkNTimes + " times. " + "Excluding the first " + + benchmarkNOmit + " times from statistics."); final Configuration conf = HBaseConfiguration.create(); testCodecs(conf, kvLimit, pathName, compressionName, doBenchmark, doVerify); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingInternalScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingInternalScanner.java index ad733d11c792..b7ba8086280c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingInternalScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingInternalScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java index 568ab7226e93..373e138a764b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.Scan; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java index 0b86e0af0e3d..2ae321b0bd36 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; @@ -20,7 +21,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Random; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -33,8 +33,7 @@ import org.apache.hadoop.hbase.io.hfile.LruBlockCache; /** - * Test seek performance for encoded data blocks. Read an HFile and do several - * random seeks. + * Test seek performance for encoded data blocks. Read an HFile and do several random seeks. */ public class EncodedSeekPerformanceTest { private static final double NANOSEC_IN_SEC = 1000.0 * 1000.0 * 1000.0; @@ -59,8 +58,8 @@ private List prepareListOfTestSeeks(Path path) throws IOException { List allKeyValues = new ArrayList<>(); // read all of the key values - HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(), - path, configuration, cacheConf, BloomType.NONE, true); + HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(), path, configuration, + cacheConf, BloomType.NONE, true); storeFile.initReader(); StoreFileReader reader = storeFile.getReader(); StoreFileScanner scanner = reader.getStoreFileScanner(true, false, false, 0, 0, false); @@ -76,8 +75,7 @@ private List prepareListOfTestSeeks(Path path) throws IOException { // pick seeks by random List seeks = new ArrayList<>(); for (int i = 0; i < numberOfSeeks; ++i) { - Cell keyValue = allKeyValues.get( - randomizer.nextInt(allKeyValues.size())); + Cell keyValue = allKeyValues.get(randomizer.nextInt(allKeyValues.size())); seeks.add(keyValue); } @@ -86,11 +84,11 @@ private List prepareListOfTestSeeks(Path path) throws IOException { return seeks; } - private void runTest(Path path, DataBlockEncoding blockEncoding, - List seeks) throws IOException { + private void runTest(Path path, DataBlockEncoding blockEncoding, List seeks) + throws IOException { // read all of the key values - HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(), - path, configuration, cacheConf, BloomType.NONE, true); + HStoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(), path, configuration, + cacheConf, BloomType.NONE, true); storeFile.initReader(); long totalSize = 0; @@ -114,23 +112,23 @@ private void runTest(Path path, DataBlockEncoding blockEncoding, scanner.seek(keyValue); Cell toVerify = scanner.next(); if (!keyValue.equals(toVerify)) { - System.out.println(String.format("KeyValue doesn't match:\n" + "Orig key: %s\n" - + "Ret key: %s", KeyValueUtil.ensureKeyValue(keyValue).getKeyString(), KeyValueUtil - .ensureKeyValue(toVerify).getKeyString())); + System.out + .println(String.format("KeyValue doesn't match:\n" + "Orig key: %s\n" + "Ret key: %s", + KeyValueUtil.ensureKeyValue(keyValue).getKeyString(), + KeyValueUtil.ensureKeyValue(toVerify).getKeyString())); break; } } long finishSeeksTime = System.nanoTime(); if (finishSeeksTime < startSeeksTime) { - throw new AssertionError("Finish time " + finishSeeksTime + - " is earlier than start time " + startSeeksTime); + throw new AssertionError( + "Finish time " + finishSeeksTime + " is earlier than start time " + startSeeksTime); } // write some stats - double readInMbPerSec = (totalSize * NANOSEC_IN_SEC) / - (BYTES_IN_MEGABYTES * (finishReadingTime - startReadingTime)); - double seeksPerSec = (seeks.size() * NANOSEC_IN_SEC) / - (finishSeeksTime - startSeeksTime); + double readInMbPerSec = (totalSize * NANOSEC_IN_SEC) + / (BYTES_IN_MEGABYTES * (finishReadingTime - startReadingTime)); + double seeksPerSec = (seeks.size() * NANOSEC_IN_SEC) / (finishSeeksTime - startSeeksTime); storeFile.closeStoreFile(cacheConf.shouldEvictOnClose()); clearBlockCache(); @@ -146,8 +144,7 @@ private void runTest(Path path, DataBlockEncoding blockEncoding, * @param encodings the data block encoding algorithms to use * @throws IOException if there is a bug while reading from disk */ - public void runTests(Path path, DataBlockEncoding[] encodings) - throws IOException { + public void runTests(Path path, DataBlockEncoding[] encodings) throws IOException { List seeks = prepareListOfTestSeeks(path); for (DataBlockEncoding blockEncoding : encodings) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java index f15e4323c848..9a3c5d2e218b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,31 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.util.CollectionBackedScanner; /** - * A fixture that implements and presents a KeyValueScanner. - * It takes a list of key/values which is then sorted according - * to the provided comparator, and then the whole thing pretends - * to be a store file scanner. + * A fixture that implements and presents a KeyValueScanner. It takes a list of key/values which is + * then sorted according to the provided comparator, and then the whole thing pretends to be a store + * file scanner. */ public class KeyValueScanFixture extends CollectionBackedScanner { public KeyValueScanFixture(CellComparator comparator, Cell... cells) { super(comparator, cells); } - public static List scanFixture(KeyValue[] ... kvArrays) { + public static List scanFixture(KeyValue[]... kvArrays) { ArrayList scanners = new ArrayList<>(); - for (KeyValue [] kvs : kvArrays) { + for (KeyValue[] kvs : kvArrays) { scanners.add(new KeyValueScanFixture(CellComparator.getInstance(), kvs)); } return scanners; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java index a874cb17181a..3133224f6aa1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; @@ -304,7 +303,7 @@ public long getBlockCacheEvictedCount() { } @Override - public long getBlockCachePrimaryEvictedCount() { + public long getBlockCachePrimaryEvictedCount() { return 420; } @@ -370,7 +369,7 @@ public long getUpdatesBlockedTime() { @Override public void forceRecompute() { - //IGNORED. + // IGNORED. } @Override @@ -644,7 +643,7 @@ public long getAverageRegionSize() { } @Override - public long getRpcFullScanRequestsCount() { + public long getRpcFullScanRequestsCount() { return 10; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java index 4f40f6289cb3..01187475944d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.HashMap; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java index af24c2798531..bedd24e93587 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.Optional; import java.util.OptionalLong; import java.util.TreeMap; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderFactory; @@ -52,8 +51,8 @@ public class MockHStoreFile extends HStoreFile { long modificationTime; boolean compactedAway; - MockHStoreFile(HBaseTestingUtil testUtil, Path testPath, - long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException { + MockHStoreFile(HBaseTestingUtil testUtil, Path testPath, long length, long ageInDisk, + boolean isRef, long sequenceid) throws IOException { super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(), new CacheConfig(testUtil.getConfiguration()), BloomType.NONE, true); this.length = length; @@ -62,8 +61,9 @@ public class MockHStoreFile extends HStoreFile { this.sequenceid = sequenceid; this.isMajor = false; hdfsBlocksDistribution = new HDFSBlocksDistribution(); - hdfsBlocksDistribution.addHostsAndBlockWeight(new String[] - { DNS.getHostname(testUtil.getConfiguration(), DNS.ServerType.REGIONSERVER) }, 1); + hdfsBlocksDistribution.addHostsAndBlockWeight( + new String[] { DNS.getHostname(testUtil.getConfiguration(), DNS.ServerType.REGIONSERVER) }, + 1); modificationTime = EnvironmentEdgeManager.currentTime(); } @@ -176,7 +176,7 @@ public long length() { @Override public long getMaxTimestamp() { - return timeRange == null? Long.MAX_VALUE: timeRangeTracker.getMax(); + return timeRange == null ? Long.MAX_VALUE : timeRangeTracker.getMax(); } @Override @@ -192,9 +192,9 @@ public void close(boolean evictOnClose) throws IOException { @Override public Optional getLastKey() { if (splitPoint != null) { - return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setType(Cell.Type.Put) - .setRow(Arrays.copyOf(splitPoint, splitPoint.length + 1)).build()); + return Optional + .of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setType(Cell.Type.Put) + .setRow(Arrays.copyOf(splitPoint, splitPoint.length + 1)).build()); } else { return Optional.empty(); } @@ -214,8 +214,7 @@ public Optional midKey() throws IOException { public Optional getFirstKey() { if (splitPoint != null) { return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setType(Cell.Type.Put).setRow(splitPoint, 0, splitPoint.length - 1) - .build()); + .setType(Cell.Type.Put).setRow(splitPoint, 0, splitPoint.length - 1).build()); } else { return Optional.empty(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java index 67d765227157..ae19cb633f81 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.Optional; - import org.apache.hadoop.hbase.client.TestFromClientSideWithCoprocessor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java index a5277166f6f7..2339e1fd2fd5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,18 +20,17 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.client.Put; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest; /** - * A region server that will OOME. - * Everytime {@link #put(byte[], Put)} is called, we add - * keep around a reference to the batch. Use this class to test OOME extremes. - * Needs to be started manually as in + * A region server that will OOME. Everytime {@link #put(byte[], Put)} is called, we add keep around + * a reference to the batch. Use this class to test OOME extremes. Needs to be started manually as + * in * ${HBASE_HOME}/bin/hbase ./bin/hbase org.apache.hadoop.hbase.OOMERegionServer start. */ public class OOMERegionServer extends HRegionServer { @@ -42,11 +40,9 @@ public OOMERegionServer(HBaseConfiguration conf) throws IOException, Interrupted super(conf); } - public void put(byte [] regionName, Put put) - throws IOException { + public void put(byte[] regionName, Put put) throws IOException { try { - MutateRequest request = - RequestConverter.buildMutateRequest(regionName, put); + MutateRequest request = RequestConverter.buildMutateRequest(regionName, put); rpcServices.mutate(null, request); for (int i = 0; i < 30; i++) { // Add the batch update 30 times to bring on the OOME faster. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java index 969d01afa424..1b1e008a38ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,12 +54,11 @@ /** * An implementation of {@link Table} that sits directly on a Region; it decorates the passed in * Region instance with the Table API. Some API is not implemented yet (throws - * {@link UnsupportedOperationException}) mostly because no need as yet or it necessitates copying - * a load of code local from RegionServer. - * - *

          Use as an instance of a {@link Table} in-the-small -- no networking or servers - * necessary -- or to write a test that can run directly against the datastore and then - * over the network. + * {@link UnsupportedOperationException}) mostly because no need as yet or it necessitates copying a + * load of code local from RegionServer. + *

          + * Use as an instance of a {@link Table} in-the-small -- no networking or servers necessary -- or to + * write a test that can run directly against the datastore and then over the network. */ public class RegionAsTable implements Table { private final Region region; @@ -94,9 +93,9 @@ public boolean exists(Get get) throws IOException { @Override public boolean[] exists(List gets) throws IOException { - boolean [] results = new boolean[gets.size()]; + boolean[] results = new boolean[gets.size()]; int index = 0; - for (Get get: gets) { + for (Get get : gets) { results[index++] = exists(get); } return results; @@ -104,14 +103,13 @@ public boolean[] exists(List gets) throws IOException { @Override public void batch(List actions, Object[] results) - throws IOException, InterruptedException { + throws IOException, InterruptedException { throw new UnsupportedOperationException(); } @Override - public void batchCallback(List actions, Object[] results, - Callback callback) - throws IOException, InterruptedException { + public void batchCallback(List actions, Object[] results, Callback callback) + throws IOException, InterruptedException { throw new UnsupportedOperationException(); } @@ -122,16 +120,16 @@ public Result get(Get get) throws IOException { @Override public Result[] get(List gets) throws IOException { - Result [] results = new Result[gets.size()]; + Result[] results = new Result[gets.size()]; int index = 0; - for (Get get: gets) { + for (Get get : gets) { results[index++] = get(get); } return results; } static class RegionScannerToResultScannerAdaptor implements ResultScanner { - private static final Result [] EMPTY_RESULT_ARRAY = new Result[0]; + private static final Result[] EMPTY_RESULT_ARRAY = new Result[0]; private final RegionScanner regionScanner; RegionScannerToResultScannerAdaptor(final RegionScanner regionScanner) { @@ -146,7 +144,7 @@ public Iterator iterator() { @Override public Result next() throws IOException { List cells = new ArrayList<>(); - return regionScanner.next(cells)? Result.create(cells): null; + return regionScanner.next(cells) ? Result.create(cells) : null; } @Override @@ -202,7 +200,8 @@ public void put(Put put) throws IOException { @Override public void put(List puts) throws IOException { - for (Put put: puts) put(put); + for (Put put : puts) + put(put); } @Override @@ -212,7 +211,8 @@ public void delete(Delete delete) throws IOException { @Override public void delete(List deletes) throws IOException { - for(Delete delete: deletes) delete(delete); + for (Delete delete : deletes) + delete(delete); } @Override @@ -242,14 +242,13 @@ public Result increment(Increment increment) throws IOException { @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) - throws IOException { + throws IOException { throw new UnsupportedOperationException(); } @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, - Durability durability) - throws IOException { + Durability durability) throws IOException { throw new UnsupportedOperationException(); } @@ -267,30 +266,27 @@ public CoprocessorRpcChannel coprocessorService(byte[] row) { @Override public Map coprocessorService(Class service, byte[] startKey, - byte[] endKey, Call callable) - throws ServiceException, Throwable { + byte[] endKey, Call callable) throws ServiceException, Throwable { throw new UnsupportedOperationException(); } @Override public void coprocessorService(Class service, byte[] startKey, - byte[] endKey, Call callable, Callback callback) - throws ServiceException, Throwable { + byte[] endKey, Call callable, Callback callback) throws ServiceException, Throwable { throw new UnsupportedOperationException(); } @Override - public Map batchCoprocessorService(MethodDescriptor - methodDescriptor, Message request, - byte[] startKey, byte[] endKey, R responsePrototype) - throws ServiceException, Throwable { + public Map batchCoprocessorService( + MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, + R responsePrototype) throws ServiceException, Throwable { throw new UnsupportedOperationException(); } @Override public void batchCoprocessorService(MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) - throws ServiceException, Throwable { + throws ServiceException, Throwable { throw new UnsupportedOperationException(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java index f89be4325d8d..89439a546bff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,14 +24,13 @@ import static org.mockito.Mockito.when; import java.util.Optional; - import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; /** - * This class is a helper that allows to create a partially-implemented, stateful mocks of - * Store. It contains a bunch of blank methods, and answers redirecting to these. + * This class is a helper that allows to create a partially-implemented, stateful mocks of Store. It + * contains a bunch of blank methods, and answers redirecting to these. */ public class StatefulStoreMockMaker { // Add and expand the methods and answers as needed. @@ -40,11 +38,13 @@ public Optional selectCompaction() { return Optional.empty(); } - public void cancelCompaction(Object originalContext) {} + public void cancelCompaction(Object originalContext) { + } public int getPriority() { return 0; } + private class CancelAnswer implements Answer { @Override public CompactionContext answer(InvocationOnMock invocation) throws Throwable { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java index 49aeae0c1051..7f317fc347c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java @@ -81,10 +81,9 @@ import org.slf4j.LoggerFactory; /** - * Testing of HRegion.incrementColumnValue, HRegion.increment, - * and HRegion.append + * Testing of HRegion.incrementColumnValue, HRegion.increment, and HRegion.append */ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) // Starts 100 threads +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) // Starts 100 threads public class TestAtomicOperation { @ClassRule @@ -92,20 +91,21 @@ public class TestAtomicOperation { HBaseClassTestRule.forClass(TestAtomicOperation.class); private static final Logger LOG = LoggerFactory.getLogger(TestAtomicOperation.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); HRegion region = null; private HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); // Test names - static byte[] tableName; + static byte[] tableName; static final byte[] qual1 = Bytes.toBytes("qual1"); static final byte[] qual2 = Bytes.toBytes("qual2"); static final byte[] qual3 = Bytes.toBytes("qual3"); static final byte[] value1 = Bytes.toBytes("value1"); static final byte[] value2 = Bytes.toBytes("value2"); - static final byte [] row = Bytes.toBytes("rowA"); - static final byte [] row2 = Bytes.toBytes("rowB"); + static final byte[] row = Bytes.toBytes("rowA"); + static final byte[] row2 = Bytes.toBytes("rowB"); @Before public void setup() { @@ -132,15 +132,14 @@ public void teardown() throws IOException { ////////////////////////////////////////////////////////////////////////////// /** - * Test basic append operation. - * More tests in + * Test basic append operation. More tests in * @see org.apache.hadoop.hbase.client.TestFromClientSide#testAppend() */ @Test public void testAppend() throws IOException { initHRegion(tableName, name.getMethodName(), fam1); - String v1 = "Ultimate Answer to the Ultimate Question of Life,"+ - " The Universe, and Everything"; + String v1 = + "Ultimate Answer to the Ultimate Question of Life," + " The Universe, and Everything"; String v2 = " is... 42."; Append a = new Append(row); a.setReturnResults(false); @@ -151,8 +150,8 @@ public void testAppend() throws IOException { a.addColumn(fam1, qual1, Bytes.toBytes(v2)); a.addColumn(fam1, qual2, Bytes.toBytes(v1)); Result result = region.append(a, HConstants.NO_NONCE, HConstants.NO_NONCE); - assertEquals(0, Bytes.compareTo(Bytes.toBytes(v1+v2), result.getValue(fam1, qual1))); - assertEquals(0, Bytes.compareTo(Bytes.toBytes(v2+v1), result.getValue(fam1, qual2))); + assertEquals(0, Bytes.compareTo(Bytes.toBytes(v1 + v2), result.getValue(fam1, qual1))); + assertEquals(0, Bytes.compareTo(Bytes.toBytes(v2 + v1), result.getValue(fam1, qual2))); } @Test @@ -238,7 +237,7 @@ public void testIncrementMultiThreads() throws IOException { boolean fast = true; LOG.info("Starting test testIncrementMultiThreads"); // run a with mixed column families (1 and 3 versions) - initHRegion(tableName, name.getMethodName(), new int[] {1,3}, fam1, fam2); + initHRegion(tableName, name.getMethodName(), new int[] { 1, 3 }, fam1, fam2); // Create 100 threads, each will increment by its own quantity. All 100 threads update the // same row over two column families. @@ -266,17 +265,13 @@ public void testIncrementMultiThreads() throws IOException { } } assertICV(row, fam1, qual1, expectedTotal, fast); - assertICV(row, fam1, qual2, expectedTotal*2, fast); - assertICV(row, fam2, qual3, expectedTotal*3, fast); + assertICV(row, fam1, qual2, expectedTotal * 2, fast); + assertICV(row, fam2, qual3, expectedTotal * 3, fast); LOG.info("testIncrementMultiThreads successfully verified that total is " + expectedTotal); } - - private void assertICV(byte [] row, - byte [] familiy, - byte[] qualifier, - long amount, - boolean fast) throws IOException { + private void assertICV(byte[] row, byte[] familiy, byte[] qualifier, long amount, boolean fast) + throws IOException { // run a get and see? Get get = new Get(row); if (fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); @@ -289,21 +284,20 @@ private void assertICV(byte [] row, assertEquals(amount, r); } - private void initHRegion (byte [] tableName, String callingMethod, - byte[] ... families) - throws IOException { + private void initHRegion(byte[] tableName, String callingMethod, byte[]... families) + throws IOException { initHRegion(tableName, callingMethod, null, families); } private void initHRegion(byte[] tableName, String callingMethod, int[] maxVersions, - byte[]... families) throws IOException { + byte[]... families) throws IOException { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); int i = 0; for (byte[] family : families) { ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(family) - .setMaxVersions(maxVersions != null ? maxVersions[i++] : 1).build(); + .setMaxVersions(maxVersions != null ? maxVersions[i++] : 1).build(); builder.setColumnFamily(familyDescriptor); } TableDescriptor tableDescriptor = builder.build(); @@ -321,7 +315,6 @@ public static class Incrementer extends Thread { private final int numIncrements; private final int amount; - public Incrementer(Region region, int threadNumber, int amount, int numIncrements) { super("Incrementer." + threadNumber); this.region = region; @@ -336,22 +329,22 @@ public void run() { try { Increment inc = new Increment(row); inc.addColumn(fam1, qual1, amount); - inc.addColumn(fam1, qual2, amount*2); - inc.addColumn(fam2, qual3, amount*3); + inc.addColumn(fam1, qual2, amount * 2); + inc.addColumn(fam2, qual3, amount * 3); inc.setDurability(Durability.ASYNC_WAL); Result result = region.increment(inc); if (result != null) { - assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, + assertEquals(Bytes.toLong(result.getValue(fam1, qual1)) * 2, Bytes.toLong(result.getValue(fam1, qual2))); assertTrue(result.getValue(fam2, qual3) != null); - assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3, + assertEquals(Bytes.toLong(result.getValue(fam1, qual1)) * 3, Bytes.toLong(result.getValue(fam2, qual3))); - assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, - Bytes.toLong(result.getValue(fam1, qual2))); - long fam1Increment = Bytes.toLong(result.getValue(fam1, qual1))*3; + assertEquals(Bytes.toLong(result.getValue(fam1, qual1)) * 2, + Bytes.toLong(result.getValue(fam1, qual2))); + long fam1Increment = Bytes.toLong(result.getValue(fam1, qual1)) * 3; long fam2Increment = Bytes.toLong(result.getValue(fam2, qual3)); - assertEquals("fam1=" + fam1Increment + ", fam2=" + fam2Increment, - fam1Increment, fam2Increment); + assertEquals("fam1=" + fam1Increment + ", fam2=" + fam2Increment, fam1Increment, + fam2Increment); } } catch (IOException e) { e.printStackTrace(); @@ -364,12 +357,12 @@ public void run() { public void testAppendMultiThreads() throws IOException { LOG.info("Starting test testAppendMultiThreads"); // run a with mixed column families (1 and 3 versions) - initHRegion(tableName, name.getMethodName(), new int[] {1,3}, fam1, fam2); + initHRegion(tableName, name.getMethodName(), new int[] { 1, 3 }, fam1, fam2); int numThreads = 100; int opsPerThread = 100; AtomicOperation[] all = new AtomicOperation[numThreads]; - final byte[] val = new byte[]{1}; + final byte[] val = new byte[] { 1 }; AtomicInteger failures = new AtomicInteger(0); // create all threads @@ -377,7 +370,7 @@ public void testAppendMultiThreads() throws IOException { all[i] = new AtomicOperation(region, opsPerThread, null, failures) { @Override public void run() { - for (int i=0; i 11 - CHECKANDPUT_COMPLETED // completed checkAndPut + INIT, // initial put of 10 to set value of the cell + PUT_STARTED, // began doing a put of 50 to cell + PUT_COMPLETED, // put complete (released RowLock, but may not have advanced MVCC). + CHECKANDPUT_STARTED, // began checkAndPut: if 10 -> 11 + CHECKANDPUT_COMPLETED // completed checkAndPut // NOTE: at the end of these steps, the value of the cell should be 50, not 11! } + private static volatile TestStep testStep = TestStep.INIT; private final String family = "f1"; /** - * Test written as a verifier for HBASE-7051, CheckAndPut should properly read - * MVCC. - * - * Moved into TestAtomicOperation from its original location, TestHBase7051 + * Test written as a verifier for HBASE-7051, CheckAndPut should properly read MVCC. Moved into + * TestAtomicOperation from its original location, TestHBase7051 */ @Test public void testPutAndCheckAndPutInParallel() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class); TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); this.region = TEST_UTIL.createLocalHRegion(tableDescriptorBuilder.build(), null, null); Put[] puts = new Put[1]; @@ -655,8 +650,7 @@ public void testPutAndCheckAndPutInParallel() throws Exception { puts[0] = put; region.batchMutate(puts); - MultithreadedTestUtil.TestContext ctx = - new MultithreadedTestUtil.TestContext(conf); + MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf); ctx.addThread(new PutThread(ctx, region)); ctx.addThread(new CheckAndPutThread(ctx, region)); ctx.startThreads(); @@ -670,12 +664,13 @@ public void testPutAndCheckAndPutInParallel() throws Exception { ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(2).build(); scanner.next(results, scannerContext); for (Cell keyValue : results) { - assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue))); + assertEquals("50", Bytes.toString(CellUtil.cloneValue(keyValue))); } } private class PutThread extends TestThread { private Region region; + PutThread(TestContext ctx, Region region) { super(ctx); this.region = region; @@ -694,10 +689,11 @@ public void doWork() throws Exception { private class CheckAndPutThread extends TestThread { private Region region; + CheckAndPutThread(TestContext ctx, Region region) { super(ctx); this.region = region; - } + } @Override public void doWork() throws Exception { @@ -739,7 +735,6 @@ private WrappedRowLock(RowLock rowLock) { this.rowLock = rowLock; } - @Override public void release() { if (testStep == TestStep.INIT) { @@ -752,11 +747,11 @@ public void release() { testStep = TestStep.PUT_COMPLETED; this.rowLock.release(); // put has been written to the memstore and the row lock has been released, but the - // MVCC has not been advanced. Prior to fixing HBASE-7051, the following order of + // MVCC has not been advanced. Prior to fixing HBASE-7051, the following order of // operations would cause the non-atomicity to show up: // 1) Put releases row lock (where we are now) // 2) CheckAndPut grabs row lock and reads the value prior to the put (10) - // because the MVCC has not advanced + // because the MVCC has not advanced // 3) Put advances MVCC // So, in order to recreate this order, we wait for the checkAndPut to grab the rowLock // (see below), and then wait some more to give the checkAndPut time to read the old @@ -766,8 +761,7 @@ public void release() { } catch (InterruptedException e) { Thread.currentThread().interrupt(); } - } - else if (testStep == TestStep.CHECKANDPUT_STARTED) { + } else if (testStep == TestStep.CHECKANDPUT_STARTED) { this.rowLock.release(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java index 0e62b503896a..1e6ad14cfbfd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ import static junit.framework.TestCase.assertTrue; import static org.junit.Assert.assertEquals; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -56,8 +57,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RegionServerTests.class, SmallTests.class}) -public class TestBlocksRead { +@Category({ RegionServerTests.class, SmallTests.class }) +public class TestBlocksRead { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -67,8 +68,8 @@ public class TestBlocksRead { @Rule public TestName testName = new TestName(); - static final BloomType[] BLOOM_TYPE = new BloomType[] { BloomType.ROWCOL, - BloomType.ROW, BloomType.NONE }; + static final BloomType[] BLOOM_TYPE = + new BloomType[] { BloomType.ROWCOL, BloomType.ROW, BloomType.NONE }; HRegion region = null; private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -105,8 +106,8 @@ private HRegion initHRegion(byte[] tableName, String callingMethod, Configuratio for (int i = 0; i < BLOOM_TYPE.length; i++) { BloomType bloomType = BLOOM_TYPE[i]; builder.setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family + "_" + bloomType)) - .setBlocksize(1).setBloomFilterType(bloomType).build()); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family + "_" + bloomType)) + .setBlocksize(1).setBloomFilterType(bloomType).build()); } RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build(); Path path = new Path(DIR + callingMethod); @@ -117,11 +118,9 @@ private HRegion initHRegion(byte[] tableName, String callingMethod, Configuratio } } - private void putData(String family, String row, String col, long version) - throws IOException { + private void putData(String family, String row, String col, long version) throws IOException { for (int i = 0; i < BLOOM_TYPE.length; i++) { - putData(Bytes.toBytes(family + "_" + BLOOM_TYPE[i]), row, col, version, - version); + putData(Bytes.toBytes(family + "_" + BLOOM_TYPE[i]), row, col, version, version); } } @@ -130,9 +129,9 @@ private static byte[] genValue(String row, String col, long version) { return Bytes.toBytes("Value:" + row + "#" + col + "#" + version); } - private void putData(byte[] cf, String row, String col, long versionStart, - long versionEnd) throws IOException { - byte [] columnBytes = Bytes.toBytes(col); + private void putData(byte[] cf, String row, String col, long versionStart, long versionEnd) + throws IOException { + byte[] columnBytes = Bytes.toBytes(col); Put put = new Put(Bytes.toBytes(row)); put.setDurability(Durability.SKIP_WAL); @@ -142,14 +141,13 @@ private void putData(byte[] cf, String row, String col, long versionStart, region.put(put); } - private Cell[] getData(String family, String row, List columns, - int expBlocks) throws IOException { + private Cell[] getData(String family, String row, List columns, int expBlocks) + throws IOException { return getData(family, row, columns, expBlocks, expBlocks, expBlocks); } - private Cell[] getData(String family, String row, List columns, - int expBlocksRowCol, int expBlocksRow, int expBlocksNone) - throws IOException { + private Cell[] getData(String family, String row, List columns, int expBlocksRowCol, + int expBlocksRow, int expBlocksNone) throws IOException { int[] expBlocks = new int[] { expBlocksRowCol, expBlocksRow, expBlocksNone }; Cell[] kvs = null; @@ -167,29 +165,26 @@ private Cell[] getData(String family, String row, List columns, long blocksEnd = getBlkAccessCount(cf); if (expBlocks[i] != -1) { assertEquals("Blocks Read Check for Bloom: " + bloomType, expBlocks[i], - blocksEnd - blocksStart); + blocksEnd - blocksStart); } - System.out.println("Blocks Read for Bloom: " + bloomType + " = " - + (blocksEnd - blocksStart) + "Expected = " + expBlocks[i]); + System.out.println("Blocks Read for Bloom: " + bloomType + " = " + (blocksEnd - blocksStart) + + "Expected = " + expBlocks[i]); } return kvs; } - private Cell[] getData(String family, String row, String column, - int expBlocks) throws IOException { - return getData(family, row, Arrays.asList(column), expBlocks, expBlocks, - expBlocks); + private Cell[] getData(String family, String row, String column, int expBlocks) + throws IOException { + return getData(family, row, Arrays.asList(column), expBlocks, expBlocks, expBlocks); } - private Cell[] getData(String family, String row, String column, - int expBlocksRowCol, int expBlocksRow, int expBlocksNone) - throws IOException { - return getData(family, row, Arrays.asList(column), expBlocksRowCol, - expBlocksRow, expBlocksNone); + private Cell[] getData(String family, String row, String column, int expBlocksRowCol, + int expBlocksRow, int expBlocksNone) throws IOException { + return getData(family, row, Arrays.asList(column), expBlocksRowCol, expBlocksRow, + expBlocksNone); } - private void deleteFamily(String family, String row, long version) - throws IOException { + private void deleteFamily(String family, String row, long version) throws IOException { Delete del = new Delete(Bytes.toBytes(row)); del.addFamily(Bytes.toBytes(family + "_ROWCOL"), version); del.addFamily(Bytes.toBytes(family + "_ROW"), version); @@ -197,13 +192,13 @@ private void deleteFamily(String family, String row, long version) region.delete(del); } - private static void verifyData(Cell kv, String expectedRow, - String expectedCol, long expectedVersion) { - assertTrue("RowCheck", CellUtil.matchingRows(kv, Bytes.toBytes(expectedRow))); + private static void verifyData(Cell kv, String expectedRow, String expectedCol, + long expectedVersion) { + assertTrue("RowCheck", CellUtil.matchingRows(kv, Bytes.toBytes(expectedRow))); assertTrue("ColumnCheck", CellUtil.matchingQualifier(kv, Bytes.toBytes(expectedCol))); assertEquals("TSCheck", expectedVersion, kv.getTimestamp()); - assertTrue("ValueCheck", CellUtil.matchingValue(kv, genValue(expectedRow, expectedCol, - expectedVersion))); + assertTrue("ValueCheck", + CellUtil.matchingValue(kv, genValue(expectedRow, expectedCol, expectedVersion))); } private static long getBlkAccessCount(byte[] cf) { @@ -217,7 +212,7 @@ private static long getBlkAccessCount(byte[] cf) { public void testBlocksRead() throws Exception { byte[] TABLE = Bytes.toBytes("testBlocksRead"); String FAMILY = "cf1"; - Cell [] kvs; + Cell[] kvs; this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY); try { @@ -271,7 +266,7 @@ public void testBlocksRead() throws Exception { public void testLazySeekBlocksRead() throws Exception { byte[] TABLE = Bytes.toBytes("testLazySeekBlocksRead"); String FAMILY = "cf1"; - Cell [] kvs; + Cell[] kvs; this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY); try { @@ -357,7 +352,6 @@ public void testLazySeekBlocksRead() throws Exception { putData(FAMILY, "row", "col3", 13); region.flush(true); - // Expected blocks read: 8. [HBASE-4585, HBASE-13109] kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 8, 9, 9); assertEquals(3, kvs.length); @@ -375,7 +369,7 @@ public void testLazySeekBlocksRead() throws Exception { */ @Test public void testBlocksStoredWhenCachingDisabled() throws Exception { - byte [] TABLE = Bytes.toBytes("testBlocksReadWhenCachingDisabled"); + byte[] TABLE = Bytes.toBytes("testBlocksReadWhenCachingDisabled"); String FAMILY = "cf1"; BlockCache blockCache = BlockCacheFactory.createBlockCache(conf); @@ -422,7 +416,7 @@ public void testBlocksStoredWhenCachingDisabled() throws Exception { public void testLazySeekBlocksReadWithDelete() throws Exception { byte[] TABLE = Bytes.toBytes("testLazySeekBlocksReadWithDelete"); String FAMILY = "cf1"; - Cell [] kvs; + Cell[] kvs; this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY); try { deleteFamily(FAMILY, "row", 200); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java index f18acdc7a8eb..37d12d0fad7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java @@ -49,17 +49,17 @@ import org.junit.experimental.categories.Category; @SuppressWarnings("deprecation") -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestBlocksScanned { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestBlocksScanned.class); - private static byte [] FAMILY = Bytes.toBytes("family"); - private static byte [] COL = Bytes.toBytes("col"); - private static byte [] START_KEY = Bytes.toBytes("aaa"); - private static byte [] END_KEY = Bytes.toBytes("zzz"); + private static byte[] FAMILY = Bytes.toBytes("family"); + private static byte[] COL = Bytes.toBytes("col"); + private static byte[] START_KEY = Bytes.toBytes("aaa"); + private static byte[] END_KEY = Bytes.toBytes("zzz"); private static int BLOCK_SIZE = 70; private static HBaseTestingUtil TEST_UTIL = null; @@ -77,11 +77,11 @@ public void setUp() throws Exception { public void testBlocksScanned() throws Exception { byte[] tableName = Bytes.toBytes("TestBlocksScanned"); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(10) - .setBlockCacheEnabled(true).setBlocksize(BLOCK_SIZE) - .setCompressionType(Compression.Algorithm.NONE).build()) - .build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(10) + .setBlockCacheEnabled(true).setBlocksize(BLOCK_SIZE) + .setCompressionType(Compression.Algorithm.NONE).build()) + .build(); _testBlocksScanned(tableDescriptor); } @@ -89,19 +89,18 @@ public void testBlocksScanned() throws Exception { public void testBlocksScannedWithEncoding() throws Exception { byte[] tableName = Bytes.toBytes("TestBlocksScannedWithEncoding"); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(10) - .setBlockCacheEnabled(true).setDataBlockEncoding(DataBlockEncoding.FAST_DIFF) - .setBlocksize(BLOCK_SIZE).setCompressionType(Compression.Algorithm.NONE).build()) - .build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(10) + .setBlockCacheEnabled(true).setDataBlockEncoding(DataBlockEncoding.FAST_DIFF) + .setBlocksize(BLOCK_SIZE).setCompressionType(Compression.Algorithm.NONE).build()) + .build(); _testBlocksScanned(tableDescriptor); } private void _testBlocksScanned(TableDescriptor td) throws Exception { BlockCache blockCache = BlockCacheFactory.createBlockCache(conf); - RegionInfo regionInfo = - RegionInfoBuilder.newBuilder(td.getTableName()).setStartKey(START_KEY).setEndKey(END_KEY) - .build(); + RegionInfo regionInfo = RegionInfoBuilder.newBuilder(td.getTableName()).setStartKey(START_KEY) + .setEndKey(END_KEY).build(); HRegion r = HBaseTestingUtil.createRegionAndWAL(regionInfo, testDir, conf, td, blockCache); addContent(r, FAMILY, COL); r.flush(true); @@ -116,20 +115,21 @@ private void _testBlocksScanned(TableDescriptor td) throws Exception { InternalScanner s = r.getScanner(scan); List results = new ArrayList<>(); - while (s.next(results)); + while (s.next(results)) + ; s.close(); int expectResultSize = 'z' - 'a'; assertEquals(expectResultSize, results.size()); - int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / - (double) KeyValueUtil.ensureKeyValue(results.get(0)).getLength()); + int kvPerBlock = (int) Math + .ceil(BLOCK_SIZE / (double) KeyValueUtil.ensureKeyValue(results.get(0)).getLength()); assertEquals(2, kvPerBlock); long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock); long expectIndexBlockRead = expectDataBlockRead; assertEquals(expectIndexBlockRead + expectDataBlockRead, - stats.getHitCount() + stats.getMissCount() - before); + stats.getHitCount() + stats.getMissCount() - before); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBootstrapNodeManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBootstrapNodeManager.java index ba97a04f326b..2aac49988675 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBootstrapNodeManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBootstrapNodeManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ public class TestBootstrapNodeManager { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBootstrapNodeManager.class); + HBaseClassTestRule.forClass(TestBootstrapNodeManager.class); private Configuration conf; @@ -95,14 +95,14 @@ private void assertListEquals(List expected, List actual @Test public void testNormal() throws Exception { List regionServers = - Arrays.asList(ServerName.valueOf("server1", 12345, EnvironmentEdgeManager.currentTime()), - ServerName.valueOf("server2", 12345, EnvironmentEdgeManager.currentTime()), - ServerName.valueOf("server3", 12345, EnvironmentEdgeManager.currentTime()), - ServerName.valueOf("server4", 12345, EnvironmentEdgeManager.currentTime())); + Arrays.asList(ServerName.valueOf("server1", 12345, EnvironmentEdgeManager.currentTime()), + ServerName.valueOf("server2", 12345, EnvironmentEdgeManager.currentTime()), + ServerName.valueOf("server3", 12345, EnvironmentEdgeManager.currentTime()), + ServerName.valueOf("server4", 12345, EnvironmentEdgeManager.currentTime())); when(conn.getLiveRegionServers(any(), anyInt())) - .thenReturn(CompletableFuture.completedFuture(regionServers)); + .thenReturn(CompletableFuture.completedFuture(regionServers)); when(conn.getAllBootstrapNodes(any())) - .thenReturn(CompletableFuture.completedFuture(regionServers)); + .thenReturn(CompletableFuture.completedFuture(regionServers)); manager = new BootstrapNodeManager(conn, tracker); Thread.sleep(3000); verify(conn, times(1)).getLiveRegionServers(any(), anyInt()); @@ -114,11 +114,11 @@ public void testNormal() throws Exception { @Test public void testOnlyMaster() throws Exception { List regionServers = - Arrays.asList(ServerName.valueOf("server1", 12345, EnvironmentEdgeManager.currentTime())); + Arrays.asList(ServerName.valueOf("server1", 12345, EnvironmentEdgeManager.currentTime())); when(conn.getLiveRegionServers(any(), anyInt())) - .thenReturn(CompletableFuture.completedFuture(regionServers)); + .thenReturn(CompletableFuture.completedFuture(regionServers)); when(conn.getAllBootstrapNodes(any())) - .thenReturn(CompletableFuture.completedFuture(regionServers)); + .thenReturn(CompletableFuture.completedFuture(regionServers)); manager = new BootstrapNodeManager(conn, tracker); Thread.sleep(3000); verify(conn, atLeast(2)).getLiveRegionServers(any(), anyInt()); @@ -129,15 +129,15 @@ public void testOnlyMaster() throws Exception { @Test public void testRegionServerError() throws Exception { List regionServers = - Arrays.asList(ServerName.valueOf("server1", 12345, EnvironmentEdgeManager.currentTime()), - ServerName.valueOf("server2", 12345, EnvironmentEdgeManager.currentTime()), - ServerName.valueOf("server3", 12345, EnvironmentEdgeManager.currentTime()), - ServerName.valueOf("server4", 12345, EnvironmentEdgeManager.currentTime())); + Arrays.asList(ServerName.valueOf("server1", 12345, EnvironmentEdgeManager.currentTime()), + ServerName.valueOf("server2", 12345, EnvironmentEdgeManager.currentTime()), + ServerName.valueOf("server3", 12345, EnvironmentEdgeManager.currentTime()), + ServerName.valueOf("server4", 12345, EnvironmentEdgeManager.currentTime())); List newRegionServers = - Arrays.asList(ServerName.valueOf("server5", 12345, EnvironmentEdgeManager.currentTime()), - ServerName.valueOf("server6", 12345, EnvironmentEdgeManager.currentTime())); + Arrays.asList(ServerName.valueOf("server5", 12345, EnvironmentEdgeManager.currentTime()), + ServerName.valueOf("server6", 12345, EnvironmentEdgeManager.currentTime())); when(conn.getLiveRegionServers(any(), anyInt())) - .thenReturn(CompletableFuture.completedFuture(regionServers)); + .thenReturn(CompletableFuture.completedFuture(regionServers)); when(conn.getAllBootstrapNodes(any())).thenAnswer(invocation -> { if (invocation.getArgument(0, ServerName.class).getHostname().equals("server4")) { return FutureUtils.failedFuture(new IOException("Inject error")); @@ -150,7 +150,7 @@ public void testRegionServerError() throws Exception { Waiter.waitFor(conf, 30000, () -> manager.getBootstrapNodes().size() == 3); assertListEquals(regionServers.subList(0, 3), manager.getBootstrapNodes()); when(conn.getLiveRegionServers(any(), anyInt())) - .thenReturn(CompletableFuture.completedFuture(newRegionServers)); + .thenReturn(CompletableFuture.completedFuture(newRegionServers)); doAnswer(invocation -> { String hostname = invocation.getArgument(0, ServerName.class).getHostname(); switch (hostname) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBrokenStoreFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBrokenStoreFileCleaner.java index 78755a4fe772..858bffa60753 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBrokenStoreFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBrokenStoreFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.io.IOException; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -50,19 +51,18 @@ public class TestBrokenStoreFileCleaner { private final static byte[] fam = Bytes.toBytes("cf_1"); private final static byte[] qual1 = Bytes.toBytes("qf_1"); private final static byte[] val = Bytes.toBytes("val"); - private final static String junkFileName = "409fad9a751c4e8c86d7f32581bdc156"; + private final static String junkFileName = "409fad9a751c4e8c86d7f32581bdc156"; TableName tableName; - @Before public void setUp() throws Exception { testUtil.getConfiguration().set(StoreFileTrackerFactory.TRACKER_IMPL, "org.apache.hadoop.hbase.regionserver.storefiletracker.FileBasedStoreFileTracker"); - testUtil.getConfiguration() - .set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_ENABLED, "true"); + testUtil.getConfiguration().set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_ENABLED, + "true"); testUtil.getConfiguration().set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_TTL, "0"); - testUtil.getConfiguration() - .set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_PERIOD, "15000000"); + testUtil.getConfiguration().set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_PERIOD, + "15000000"); testUtil.getConfiguration().set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_DELAY, "0"); testUtil.startMiniCluster(1); } @@ -79,12 +79,12 @@ public void testDeletingJunkFile() throws Exception { createTableWithData(tableName); HRegion region = testUtil.getMiniHBaseCluster().getRegions(tableName).get(0); - ServerName sn = testUtil.getMiniHBaseCluster() - .getServerHoldingRegion(tableName, region.getRegionInfo().getRegionName()); + ServerName sn = testUtil.getMiniHBaseCluster().getServerHoldingRegion(tableName, + region.getRegionInfo().getRegionName()); HRegionServer rs = testUtil.getMiniHBaseCluster().getRegionServer(sn); BrokenStoreFileCleaner cleaner = rs.getBrokenStoreFileCleaner(); - //create junk file + // create junk file HStore store = region.getStore(fam); Path cfPath = store.getRegionFileSystem().getStoreDir(store.getColumnFamilyName()); Path junkFilePath = new Path(cfPath, junkFileName); @@ -93,16 +93,16 @@ public void testDeletingJunkFile() throws Exception { junkFileOS.writeUTF("hello"); junkFileOS.close(); - int storeFiles = store.getStorefilesCount(); + int storeFiles = store.getStorefilesCount(); assertTrue(storeFiles > 0); - //verify the file exist before the chore and missing afterwards + // verify the file exist before the chore and missing afterwards assertTrue(store.getFileSystem().exists(junkFilePath)); cleaner.chore(); assertFalse(store.getFileSystem().exists(junkFilePath)); - //verify no storefile got deleted - int currentStoreFiles = store.getStorefilesCount(); + // verify no storefile got deleted + int currentStoreFiles = store.getStorefilesCount(); assertEquals(currentStoreFiles, storeFiles); } @@ -114,26 +114,26 @@ public void testSkippingCompactedFiles() throws Exception { HRegion region = testUtil.getMiniHBaseCluster().getRegions(tableName).get(0); - ServerName sn = testUtil.getMiniHBaseCluster() - .getServerHoldingRegion(tableName, region.getRegionInfo().getRegionName()); + ServerName sn = testUtil.getMiniHBaseCluster().getServerHoldingRegion(tableName, + region.getRegionInfo().getRegionName()); HRegionServer rs = testUtil.getMiniHBaseCluster().getRegionServer(sn); BrokenStoreFileCleaner cleaner = rs.getBrokenStoreFileCleaner(); - //run major compaction to generate compaced files + // run major compaction to generate compaced files region.compact(true); - //make sure there are compacted files + // make sure there are compacted files HStore store = region.getStore(fam); - int compactedFiles = store.getCompactedFilesCount(); + int compactedFiles = store.getCompactedFilesCount(); assertTrue(compactedFiles > 0); cleaner.chore(); - //verify none of the compacted files were deleted - int existingCompactedFiles = store.getCompactedFilesCount(); + // verify none of the compacted files were deleted + int existingCompactedFiles = store.getCompactedFilesCount(); assertEquals(compactedFiles, existingCompactedFiles); - //verify adding a junk file does not break anything + // verify adding a junk file does not break anything Path cfPath = store.getRegionFileSystem().getStoreDir(store.getColumnFamilyName()); Path junkFilePath = new Path(cfPath, junkFileName); @@ -146,8 +146,8 @@ public void testSkippingCompactedFiles() throws Exception { cleaner.chore(); assertFalse(store.getFileSystem().exists(junkFilePath)); - //verify compacted files are still intact - existingCompactedFiles = store.getCompactedFilesCount(); + // verify compacted files are still intact + existingCompactedFiles = store.getCompactedFilesCount(); assertEquals(compactedFiles, existingCompactedFiles); } @@ -157,11 +157,11 @@ public void testJunkFileTTL() throws Exception { createTableWithData(tableName); HRegion region = testUtil.getMiniHBaseCluster().getRegions(tableName).get(0); - ServerName sn = testUtil.getMiniHBaseCluster() - .getServerHoldingRegion(tableName, region.getRegionInfo().getRegionName()); + ServerName sn = testUtil.getMiniHBaseCluster().getServerHoldingRegion(tableName, + region.getRegionInfo().getRegionName()); HRegionServer rs = testUtil.getMiniHBaseCluster().getRegionServer(sn); - //create junk file + // create junk file HStore store = region.getStore(fam); Path cfPath = store.getRegionFileSystem().getStoreDir(store.getColumnFamilyName()); Path junkFilePath = new Path(cfPath, junkFileName); @@ -170,25 +170,25 @@ public void testJunkFileTTL() throws Exception { junkFileOS.writeUTF("hello"); junkFileOS.close(); - int storeFiles = store.getStorefilesCount(); + int storeFiles = store.getStorefilesCount(); assertTrue(storeFiles > 0); - //verify the file exist before the chore + // verify the file exist before the chore assertTrue(store.getFileSystem().exists(junkFilePath)); - //set a 5 sec ttl + // set a 5 sec ttl rs.getConfiguration().set(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_TTL, "5000"); - BrokenStoreFileCleaner cleaner = new BrokenStoreFileCleaner(15000000, - 0, rs, rs.getConfiguration(), rs); + BrokenStoreFileCleaner cleaner = + new BrokenStoreFileCleaner(15000000, 0, rs, rs.getConfiguration(), rs); cleaner.chore(); - //file is still present after chore run + // file is still present after chore run assertTrue(store.getFileSystem().exists(junkFilePath)); Thread.sleep(5000); cleaner.chore(); assertFalse(store.getFileSystem().exists(junkFilePath)); - //verify no storefile got deleted - int currentStoreFiles = store.getStorefilesCount(); + // verify no storefile got deleted + int currentStoreFiles = store.getStorefilesCount(); assertEquals(currentStoreFiles, storeFiles); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java index 012d1664e2ef..96bff143dfec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java @@ -28,7 +28,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -43,7 +42,6 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; - /** * This class attempts to unit test bulk HLog loading. */ @@ -69,26 +67,26 @@ public void verifyBulkLoadEvent() throws IOException { storeFileNames.add(storeFileName); when(log.appendMarker(any(), any(), argThat(bulkLogWalEdit(WALEdit.BULK_LOAD, tableName.toBytes(), familyName, storeFileNames)))) - .thenAnswer(new Answer() { - @Override - public Object answer(InvocationOnMock invocation) { - WALKeyImpl walKey = invocation.getArgument(1); - MultiVersionConcurrencyControl mvcc = walKey.getMvcc(); - if (mvcc != null) { - MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin(); - walKey.setWriteEntry(we); + .thenAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) { + WALKeyImpl walKey = invocation.getArgument(1); + MultiVersionConcurrencyControl mvcc = walKey.getMvcc(); + if (mvcc != null) { + MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin(); + walKey.setWriteEntry(we); + } + return 01L; } - return 01L; - } - }); - testRegionWithFamiliesAndSpecifiedTableName(tableName, family1) - .bulkLoadHFiles(familyPaths, false, null); + }); + testRegionWithFamiliesAndSpecifiedTableName(tableName, family1).bulkLoadHFiles(familyPaths, + false, null); verify(log).sync(anyLong()); } @Test public void bulkHLogShouldThrowNoErrorAndWriteMarkerWithBlankInput() throws IOException { - testRegionWithFamilies(family1).bulkLoadHFiles(new ArrayList<>(),false, null); + testRegionWithFamilies(family1).bulkLoadHFiles(new ArrayList<>(), false, null); } @Test @@ -112,21 +110,21 @@ public Object answer(InvocationOnMock invocation) { @Test public void shouldBulkLoadManyFamilyHLog() throws IOException { - when(log.appendMarker(any(), - any(), argThat(bulkLogWalEditType(WALEdit.BULK_LOAD)))).thenAnswer(new Answer() { - @Override - public Object answer(InvocationOnMock invocation) { - WALKeyImpl walKey = invocation.getArgument(1); - MultiVersionConcurrencyControl mvcc = walKey.getMvcc(); - if (mvcc != null) { - MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin(); - walKey.setWriteEntry(we); - } - return 01L; - } - }); + when(log.appendMarker(any(), any(), argThat(bulkLogWalEditType(WALEdit.BULK_LOAD)))) + .thenAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) { + WALKeyImpl walKey = invocation.getArgument(1); + MultiVersionConcurrencyControl mvcc = walKey.getMvcc(); + if (mvcc != null) { + MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin(); + walKey.setWriteEntry(we); + } + return 01L; + } + }); testRegionWithFamilies(family1, family2).bulkLoadHFiles(withFamilyPathsFor(family1, family2), - false, null); + false, null); verify(log).sync(anyLong()); } @@ -172,9 +170,8 @@ public void bulkHLogShouldThrowErrorWhenFamilySpecifiedAndHFileExistsButNotInTab @Test(expected = DoNotRetryIOException.class) public void shouldThrowErrorIfBadFamilySpecifiedAsFamilyPath() throws IOException { - testRegionWithFamilies() - .bulkLoadHFiles(asList(withInvalidColumnFamilyButProperHFileLocation(family1)), - false, null); + testRegionWithFamilies().bulkLoadHFiles( + asList(withInvalidColumnFamilyButProperHFileLocation(family1)), false, null); } @Test(expected = FileNotFoundException.class) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java index c2333920350b..a6715d6b5245 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -81,25 +80,22 @@ /** * Integration test for bulk load replication. Defines three clusters, with the following - * replication topology: "1 <-> 2 <-> 3" (active-active between 1 and 2, and active-active between - * 2 and 3). - * - * For each of defined test clusters, it performs a bulk load, asserting values on bulk loaded file - * gets replicated to other two peers. Since we are doing 3 bulk loads, with the given replication - * topology all these bulk loads should get replicated only once on each peer. To assert this, - * this test defines a preBulkLoad coprocessor and adds it to all test table regions, on each of the - * clusters. This CP counts the amount of times bulk load actually gets invoked, certifying + * replication topology: "1 <-> 2 <-> 3" (active-active between 1 and 2, and active-active between 2 + * and 3). For each of defined test clusters, it performs a bulk load, asserting values on bulk + * loaded file gets replicated to other two peers. Since we are doing 3 bulk loads, with the given + * replication topology all these bulk loads should get replicated only once on each peer. To assert + * this, this test defines a preBulkLoad coprocessor and adds it to all test table regions, on each + * of the clusters. This CP counts the amount of times bulk load actually gets invoked, certifying * we are not entering the infinite loop condition addressed by HBASE-22380. */ -@Category({ ReplicationTests.class, MediumTests.class}) +@Category({ ReplicationTests.class, MediumTests.class }) public class TestBulkLoadReplication extends TestReplicationBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBulkLoadReplication.class); + HBaseClassTestRule.forClass(TestBulkLoadReplication.class); - protected static final Logger LOG = - LoggerFactory.getLogger(TestBulkLoadReplication.class); + protected static final Logger LOG = LoggerFactory.getLogger(TestBulkLoadReplication.class); private static final String PEER1_CLUSTER_ID = "peer1"; private static final String PEER2_CLUSTER_ID = "peer2"; @@ -140,11 +136,9 @@ private static void startThirdCluster() throws Exception { UTIL3.startMiniCluster(NUM_SLAVES1); TableDescriptor table = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) - .setMobEnabled(true) - .setMobThreshold(4000) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName).setMobEnabled(true) + .setMobThreshold(4000).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); Connection connection3 = ConnectionFactory.createConnection(CONF3); try (Admin admin3 = connection3.getAdmin()) { @@ -157,18 +151,18 @@ private static void startThirdCluster() throws Exception { @Before @Override public void setUpBase() throws Exception { - //"super.setUpBase()" already sets replication from 1->2, - //then on the subsequent lines, sets 2->1, 2->3 and 3->2. - //So we have following topology: "1 <-> 2 <->3" + // "super.setUpBase()" already sets replication from 1->2, + // then on the subsequent lines, sets 2->1, 2->3 and 3->2. + // So we have following topology: "1 <-> 2 <->3" super.setUpBase(); ReplicationPeerConfig peer1Config = getPeerConfigForCluster(UTIL1); ReplicationPeerConfig peer2Config = getPeerConfigForCluster(UTIL2); ReplicationPeerConfig peer3Config = getPeerConfigForCluster(UTIL3); - //adds cluster1 as a remote peer on cluster2 + // adds cluster1 as a remote peer on cluster2 UTIL2.getAdmin().addReplicationPeer(PEER_ID1, peer1Config); - //adds cluster3 as a remote peer on cluster2 + // adds cluster3 as a remote peer on cluster2 UTIL2.getAdmin().addReplicationPeer(PEER_ID3, peer3Config); - //adds cluster2 as a remote peer on cluster3 + // adds cluster2 as a remote peer on cluster3 UTIL3.getAdmin().addReplicationPeer(PEER_ID2, peer2Config); setupCoprocessor(UTIL1); setupCoprocessor(UTIL2); @@ -177,24 +171,23 @@ public void setUpBase() throws Exception { } private ReplicationPeerConfig getPeerConfigForCluster(HBaseTestingUtil util) { - return ReplicationPeerConfig.newBuilder() - .setClusterKey(util.getClusterKey()).setSerial(isSerialPeer()).build(); + return ReplicationPeerConfig.newBuilder().setClusterKey(util.getClusterKey()) + .setSerial(isSerialPeer()).build(); } - private void setupCoprocessor(HBaseTestingUtil cluster){ + private void setupCoprocessor(HBaseTestingUtil cluster) { cluster.getHBaseCluster().getRegions(tableName).forEach(r -> { try { - TestBulkLoadReplication.BulkReplicationTestObserver cp = r.getCoprocessorHost(). - findCoprocessor(TestBulkLoadReplication.BulkReplicationTestObserver.class); - if(cp == null) { - r.getCoprocessorHost(). - load(TestBulkLoadReplication.BulkReplicationTestObserver.class, 0, - cluster.getConfiguration()); - cp = r.getCoprocessorHost(). - findCoprocessor(TestBulkLoadReplication.BulkReplicationTestObserver.class); + TestBulkLoadReplication.BulkReplicationTestObserver cp = r.getCoprocessorHost() + .findCoprocessor(TestBulkLoadReplication.BulkReplicationTestObserver.class); + if (cp == null) { + r.getCoprocessorHost().load(TestBulkLoadReplication.BulkReplicationTestObserver.class, 0, + cluster.getConfiguration()); + cp = r.getCoprocessorHost() + .findCoprocessor(TestBulkLoadReplication.BulkReplicationTestObserver.class); cp.clusterName = cluster.getClusterKey(); } - } catch (Exception e){ + } catch (Exception e) { LOG.error(e.getMessage(), e); } }); @@ -210,12 +203,11 @@ public void tearDownBase() throws Exception { } protected static void setupBulkLoadConfigsForCluster(Configuration config, - String clusterReplicationId) throws Exception { + String clusterReplicationId) throws Exception { config.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); config.set(REPLICATION_CLUSTER_ID, clusterReplicationId); File sourceConfigFolder = testFolder.newFolder(clusterReplicationId); - File sourceConfigFile = new File(sourceConfigFolder.getAbsolutePath() - + "/hbase-site.xml"); + File sourceConfigFile = new File(sourceConfigFolder.getAbsolutePath() + "/hbase-site.xml"); config.writeXml(new FileOutputStream(sourceConfigFile)); config.set(REPLICATION_CONF_DIR, testFolder.getRoot().getAbsolutePath()); } @@ -227,27 +219,26 @@ public void testBulkLoadReplicationActiveActive() throws Exception { Table peer3TestTable = UTIL3.getConnection().getTable(TestReplicationBase.tableName); byte[] row = Bytes.toBytes("001"); byte[] value = Bytes.toBytes("v1"); - assertBulkLoadConditions(tableName, row, value, UTIL1, peer1TestTable, - peer2TestTable, peer3TestTable); + assertBulkLoadConditions(tableName, row, value, UTIL1, peer1TestTable, peer2TestTable, + peer3TestTable); row = Bytes.toBytes("002"); value = Bytes.toBytes("v2"); - assertBulkLoadConditions(tableName, row, value, UTIL2, peer1TestTable, - peer2TestTable, peer3TestTable); + assertBulkLoadConditions(tableName, row, value, UTIL2, peer1TestTable, peer2TestTable, + peer3TestTable); row = Bytes.toBytes("003"); value = Bytes.toBytes("v3"); - assertBulkLoadConditions(tableName, row, value, UTIL3, peer1TestTable, - peer2TestTable, peer3TestTable); - //Additional wait to make sure no extra bulk load happens + assertBulkLoadConditions(tableName, row, value, UTIL3, peer1TestTable, peer2TestTable, + peer3TestTable); + // Additional wait to make sure no extra bulk load happens Thread.sleep(400); - //We have 3 bulk load events (1 initiated on each cluster). - //Each event gets 3 counts (the originator cluster, plus the two peers), - //so BULK_LOADS_COUNT expected value is 3 * 3 = 9. + // We have 3 bulk load events (1 initiated on each cluster). + // Each event gets 3 counts (the originator cluster, plus the two peers), + // so BULK_LOADS_COUNT expected value is 3 * 3 = 9. assertEquals(9, BULK_LOADS_COUNT.get()); } - protected void assertBulkLoadConditions(TableName tableName, byte[] row, byte[] value, - HBaseTestingUtil utility, Table...tables) throws Exception { + HBaseTestingUtil utility, Table... tables) throws Exception { BULK_LOAD_LATCH = new CountDownLatch(3); bulkLoadOnCluster(tableName, row, value, utility); assertTrue(BULK_LOAD_LATCH.await(1, TimeUnit.MINUTES)); @@ -257,7 +248,7 @@ protected void assertBulkLoadConditions(TableName tableName, byte[] row, byte[] } protected void bulkLoadOnCluster(TableName tableName, byte[] row, byte[] value, - HBaseTestingUtil cluster) throws Exception { + HBaseTestingUtil cluster) throws Exception { String bulkLoadFilePath = createHFileForFamilies(row, value, cluster.getConfiguration()); copyToHdfs(bulkLoadFilePath, cluster.getDFSCluster()); BulkLoadHFilesTool bulkLoadHFilesTool = new BulkLoadHFilesTool(cluster.getConfiguration()); @@ -283,20 +274,16 @@ protected void assertTableNoValue(Table table, byte[] row, byte[] value) throws assertTrue(result.isEmpty()); } - private String createHFileForFamilies(byte[] row, byte[] value, - Configuration clusterConfig) throws IOException { + private String createHFileForFamilies(byte[] row, byte[] value, Configuration clusterConfig) + throws IOException { CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY); - cellBuilder.setRow(row) - .setFamily(TestReplicationBase.famName) - .setQualifier(Bytes.toBytes("1")) - .setValue(value) - .setType(Cell.Type.Put); + cellBuilder.setRow(row).setFamily(TestReplicationBase.famName).setQualifier(Bytes.toBytes("1")) + .setValue(value).setType(Cell.Type.Put); HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(clusterConfig); // TODO We need a way to do this without creating files File hFileLocation = testFolder.newFile(); - FSDataOutputStream out = - new FSDataOutputStream(new FileOutputStream(hFileLocation), null); + FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null); try { hFileFactory.withOutputStream(out); hFileFactory.withFileContext(new HFileContextBuilder().build()); @@ -323,7 +310,7 @@ public Optional getRegionObserver() { @Override public void postBulkLoadHFile(ObserverContext ctx, - List> stagingFamilyPaths, Map> finalPaths) + List> stagingFamilyPaths, Map> finalPaths) throws IOException { BULK_LOAD_LATCH.countDown(); BULK_LOADS_COUNT.incrementAndGet(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java index 134ea4771263..7ee527b1d3aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplicationHFileRefs.java @@ -1,19 +1,19 @@ /* - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; @@ -28,7 +28,6 @@ Licensed to the Apache Software Foundation (ASF) under one import java.io.IOException; import java.util.List; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -74,12 +73,12 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -@Category({ ReplicationTests.class, SmallTests.class}) +@Category({ ReplicationTests.class, SmallTests.class }) public class TestBulkLoadReplicationHFileRefs extends TestReplicationBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBulkLoadReplicationHFileRefs.class); + HBaseClassTestRule.forClass(TestBulkLoadReplicationHFileRefs.class); private static final String PEER1_CLUSTER_ID = "peer1"; private static final String PEER2_CLUSTER_ID = "peer2"; @@ -87,9 +86,9 @@ public class TestBulkLoadReplicationHFileRefs extends TestReplicationBase { private static final String REPLICATE_NAMESPACE = "replicate_ns"; private static final String NO_REPLICATE_NAMESPACE = "no_replicate_ns"; private static final TableName REPLICATE_TABLE = - TableName.valueOf(REPLICATE_NAMESPACE, "replicate_table"); + TableName.valueOf(REPLICATE_NAMESPACE, "replicate_table"); private static final TableName NO_REPLICATE_TABLE = - TableName.valueOf(NO_REPLICATE_NAMESPACE, "no_replicate_table"); + TableName.valueOf(NO_REPLICATE_NAMESPACE, "no_replicate_table"); private static final byte[] CF_A = Bytes.toBytes("cfa"); private static final byte[] CF_B = Bytes.toBytes("cfb"); @@ -125,7 +124,7 @@ public static void setUpBeforeClass() throws Exception { } protected static void setupBulkLoadConfigsForCluster(Configuration config, - String clusterReplicationId) throws Exception { + String clusterReplicationId) throws Exception { config.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); config.set(REPLICATION_CLUSTER_ID, clusterReplicationId); File sourceConfigFolder = testFolder.newFolder(clusterReplicationId); @@ -161,11 +160,9 @@ public void testWhenExcludeCF() throws Exception { // Add peer, setReplicateAllUserTables true, but exclude CF_B. Map> excludeTableCFs = Maps.newHashMap(); excludeTableCFs.put(REPLICATE_TABLE, Lists.newArrayList(Bytes.toString(CF_B))); - ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() - .setClusterKey(UTIL2.getClusterKey()) - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCFs) - .build(); + ReplicationPeerConfig peerConfig = + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCFs).build(); admin1.addReplicationPeer(PEER_ID2, peerConfig); Assert.assertTrue(peerConfig.needToReplicate(REPLICATE_TABLE)); Assert.assertTrue(peerConfig.needToReplicate(REPLICATE_TABLE, CF_A)); @@ -194,11 +191,9 @@ public void testWhenExcludeTable() throws Exception { // Add peer, setReplicateAllUserTables true, but exclude one table. Map> excludeTableCFs = Maps.newHashMap(); excludeTableCFs.put(NO_REPLICATE_TABLE, null); - ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() - .setClusterKey(UTIL2.getClusterKey()) - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCFs) - .build(); + ReplicationPeerConfig peerConfig = + ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCFs).build(); admin1.addReplicationPeer(PEER_ID2, peerConfig); assertTrue(peerConfig.needToReplicate(REPLICATE_TABLE)); assertFalse(peerConfig.needToReplicate(NO_REPLICATE_TABLE)); @@ -228,10 +223,8 @@ public void testWhenExcludeNamespace() throws Exception { // Add peer, setReplicateAllUserTables true, but exclude one namespace. ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() - .setClusterKey(UTIL2.getClusterKey()) - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NO_REPLICATE_NAMESPACE)) - .build(); + .setClusterKey(UTIL2.getClusterKey()).setReplicateAllUserTables(true) + .setExcludeNamespaces(Sets.newHashSet(NO_REPLICATE_NAMESPACE)).build(); admin1.addReplicationPeer(PEER_ID2, peerConfig); assertTrue(peerConfig.needToReplicate(REPLICATE_TABLE)); assertFalse(peerConfig.needToReplicate(NO_REPLICATE_TABLE)); @@ -255,8 +248,7 @@ public void testWhenExcludeNamespace() throws Exception { assertEquals(0, queueStorage.getAllHFileRefs().size()); } - protected void bulkLoadOnCluster(TableName tableName, byte[] family) - throws Exception { + protected void bulkLoadOnCluster(TableName tableName, byte[] family) throws Exception { String bulkLoadFilePath = createHFileForFamilies(family); copyToHdfs(family, bulkLoadFilePath, UTIL1.getDFSCluster()); BulkLoadHFilesTool bulkLoadHFilesTool = new BulkLoadHFilesTool(UTIL1.getConfiguration()); @@ -265,16 +257,12 @@ protected void bulkLoadOnCluster(TableName tableName, byte[] family) private String createHFileForFamilies(byte[] family) throws IOException { CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY); - cellBuilder.setRow(row) - .setFamily(family) - .setQualifier(qualifier) - .setValue(value) - .setType(Cell.Type.Put); + cellBuilder.setRow(row).setFamily(family).setQualifier(qualifier).setValue(value) + .setType(Cell.Type.Put); HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(UTIL1.getConfiguration()); File hFileLocation = testFolder.newFile(); - FSDataOutputStream out = - new FSDataOutputStream(new FileOutputStream(hFileLocation), null); + FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null); try { hFileFactory.withOutputStream(out); hFileFactory.withFileContext(new HFileContextBuilder().build()); @@ -291,7 +279,7 @@ private String createHFileForFamilies(byte[] family) throws IOException { } private void copyToHdfs(byte[] family, String bulkLoadFilePath, MiniDFSCluster cluster) - throws Exception { + throws Exception { Path bulkLoadDir = new Path(BULK_LOAD_BASE_DIR, Bytes.toString(family)); cluster.getFileSystem().mkdirs(bulkLoadDir); cluster.getFileSystem().copyFromLocalFile(new Path(bulkLoadFilePath), bulkLoadDir); @@ -301,7 +289,7 @@ private void createTableOnClusters(TableName tableName, byte[]... cfs) throws IO TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (byte[] cf : cfs) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cf) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); } TableDescriptor td = builder.build(); admin1.createTable(td); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkloadBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkloadBase.java index ffc13c677ddb..fc3051d06a28 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkloadBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkloadBase.java @@ -30,7 +30,6 @@ import java.util.Collection; import java.util.List; import java.util.UUID; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -55,17 +54,16 @@ import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeMatcher; - import org.junit.Before; import org.junit.ClassRule; import org.junit.Rule; import org.junit.rules.TemporaryFolder; import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestBulkloadBase { @@ -90,18 +88,17 @@ public TestBulkloadBase(boolean useFileBasedSFT) { @Parameterized.Parameters public static Collection data() { - Boolean[] data = {false, true}; + Boolean[] data = { false, true }; return Arrays.asList(data); } @Before public void before() throws IOException { Bytes.random(randomBytes); - if(useFileBasedSFT) { + if (useFileBasedSFT) { conf.set(StoreFileTrackerFactory.TRACKER_IMPL, "org.apache.hadoop.hbase.regionserver.storefiletracker.FileBasedStoreFileTracker"); - } - else { + } else { conf.unset(StoreFileTrackerFactory.TRACKER_IMPL); } } @@ -138,7 +135,8 @@ protected HRegion testRegionWithFamiliesAndSpecifiedTableName(TableName tableNam } protected HRegion testRegionWithFamilies(byte[]... families) throws IOException { - TableName tableName = TableName.valueOf(name.getMethodName().substring(0, name.getMethodName().indexOf("["))); + TableName tableName = + TableName.valueOf(name.getMethodName().substring(0, name.getMethodName().indexOf("["))); return testRegionWithFamiliesAndSpecifiedTableName(tableName, families); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 7dbb68090946..d4d970c2ac6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,11 +68,11 @@ import org.slf4j.LoggerFactory; /** - * Tests {@link HFile} cache-on-write functionality for data blocks, non-root - * index blocks, and Bloom filter blocks, as specified by the column family. + * Tests {@link HFile} cache-on-write functionality for data blocks, non-root index blocks, and + * Bloom filter blocks, as specified by the column family. */ @RunWith(Parameterized.class) -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestCacheOnWriteInSchema { @ClassRule @@ -80,21 +80,20 @@ public class TestCacheOnWriteInSchema { HBaseClassTestRule.forClass(TestCacheOnWriteInSchema.class); private static final Logger LOG = LoggerFactory.getLogger(TestCacheOnWriteInSchema.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final String DIR = TEST_UTIL.getDataTestDir("TestCacheOnWriteInSchema").toString(); - private static byte [] table; - private static byte [] family = Bytes.toBytes("family"); + private static byte[] table; + private static byte[] family = Bytes.toBytes("family"); private static final int NUM_KV = 25000; private static final Random rand = new Random(12983177L); /** The number of valid key types possible in a store file */ - private static final int NUM_VALID_KEY_TYPES = - KeyValue.Type.values().length - 2; + private static final int NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2; private static enum CacheOnWriteType { - DATA_BLOCKS(BlockType.DATA, BlockType.ENCODED_DATA), - BLOOM_BLOCKS(BlockType.BLOOM_CHUNK), + DATA_BLOCKS(BlockType.DATA, BlockType.ENCODED_DATA), BLOOM_BLOCKS(BlockType.BLOOM_CHUNK), INDEX_BLOCKS(BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX); private final BlockType blockType1; @@ -175,7 +174,7 @@ public void setUp() throws IOException { // Create a store based on the schema String id = TestCacheOnWriteInSchema.class.getName(); Path logdir = - new Path(CommonFSUtils.getRootDir(conf), AbstractFSWALProvider.getWALDirectoryName(id)); + new Path(CommonFSUtils.getRootDir(conf), AbstractFSWALProvider.getWALDirectoryName(id)); fs.delete(logdir, true); RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); @@ -216,9 +215,9 @@ public void tearDown() throws IOException { public void testCacheOnWriteInSchema() throws IOException { // Write some random data into the store StoreFileWriter writer = store.getStoreEngine() - .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(Integer.MAX_VALUE) - .compression(HFile.DEFAULT_COMPRESSION_ALGORITHM).isCompaction(false) - .includeMVCCReadpoint(true).includesTag(false).shouldDropBehind(false)); + .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(Integer.MAX_VALUE) + .compression(HFile.DEFAULT_COMPRESSION_ALGORITHM).isCompaction(false) + .includeMVCCReadpoint(true).includesTag(false).shouldDropBehind(false)); writeStoreFile(writer); writer.close(); // Verify the block types of interest were cached on write @@ -240,22 +239,18 @@ private void readStoreFile(Path path) throws IOException { while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { // Flags: don't cache the block, use pread, this is not a compaction. // Also, pass null for expected block type to avoid checking it. - HFileBlock block = reader.readBlock(offset, -1, false, true, - false, true, null, DataBlockEncoding.NONE); - BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), - offset); + HFileBlock block = + reader.readBlock(offset, -1, false, true, false, true, null, DataBlockEncoding.NONE); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null; boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType()); final BlockType blockType = block.getBlockType(); - if (shouldBeCached != isCached && - (cowType.blockType1.equals(blockType) || cowType.blockType2.equals(blockType))) { - throw new AssertionError( - "shouldBeCached: " + shouldBeCached+ "\n" + - "isCached: " + isCached + "\n" + - "Test description: " + testDescription + "\n" + - "block: " + block + "\n" + - "blockCacheKey: " + blockCacheKey); + if (shouldBeCached != isCached + && (cowType.blockType1.equals(blockType) || cowType.blockType2.equals(blockType))) { + throw new AssertionError("shouldBeCached: " + shouldBeCached + "\n" + "isCached: " + + isCached + "\n" + "Test description: " + testDescription + "\n" + "block: " + block + + "\n" + "blockCacheKey: " + blockCacheKey); } offset += block.getOnDiskSizeWithHeader(); } @@ -269,11 +264,10 @@ private static KeyValue.Type generateKeyType(Random rand) { // Let's make half of KVs puts. return KeyValue.Type.Put; } else { - KeyValue.Type keyType = - KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; + KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) { - throw new RuntimeException("Generated an invalid key type: " + keyType - + ". " + "Probably the layout of KeyValue.Type has changed."); + throw new RuntimeException("Generated an invalid key type: " + keyType + ". " + + "Probably the layout of KeyValue.Type has changed."); } return keyType; } @@ -285,16 +279,10 @@ private void writeStoreFile(StoreFileWriter writer) throws IOException { byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i); byte[] v = RandomKeyValueUtil.randomValue(rand); int cfLen = rand.nextInt(k.length - rowLen + 1); - KeyValue kv = new KeyValue( - k, 0, rowLen, - k, rowLen, cfLen, - k, rowLen + cfLen, k.length - rowLen - cfLen, - rand.nextLong(), - generateKeyType(rand), - v, 0, v.length); + KeyValue kv = new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen, + k.length - rowLen - cfLen, rand.nextLong(), generateKeyType(rand), v, 0, v.length); writer.append(kv); } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java index 143d7fb67a9c..d24eb4a6065e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) @RunWith(Parameterized.class) public class TestCellFlatSet { @@ -61,6 +61,7 @@ public class TestCellFlatSet { public static Object[] data() { return new Object[] { "SMALL_CHUNKS", "NORMAL_CHUNKS" }; // test with different chunk sizes } + private static final int NUM_OF_CELLS = 4; private static final int SMALL_CHUNK_SIZE = 64; private Cell ascCells[]; @@ -71,27 +72,27 @@ public static Object[] data() { private KeyValue lowerOuterCell; private KeyValue upperOuterCell; - - private CellChunkMap ascCCM; // for testing ascending CellChunkMap with one chunk in array - private CellChunkMap descCCM; // for testing descending CellChunkMap with one chunk in array + private CellChunkMap ascCCM; // for testing ascending CellChunkMap with one chunk in array + private CellChunkMap descCCM; // for testing descending CellChunkMap with one chunk in array private final boolean smallChunks; private static ChunkCreator chunkCreator; - - public TestCellFlatSet(String chunkType){ - long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage() - .getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(CONF, false)); + public TestCellFlatSet(String chunkType) { + long globalMemStoreLimit = + (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax() + * MemorySizeUtil.getGlobalMemStoreHeapPercent(CONF, false)); if (chunkType.equals("NORMAL_CHUNKS")) { chunkCreator = ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, - globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, - null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); assertNotNull(chunkCreator); smallChunks = false; } else { - // chunkCreator with smaller chunk size, so only 3 cell-representations can accommodate a chunk - chunkCreator = ChunkCreator.initialize(SMALL_CHUNK_SIZE, false, - globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, - null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + // chunkCreator with smaller chunk size, so only 3 cell-representations can accommodate a + // chunk + chunkCreator = ChunkCreator.initialize(SMALL_CHUNK_SIZE, false, globalMemStoreLimit, 0.2f, + MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); assertNotNull(chunkCreator); smallChunks = true; } @@ -115,10 +116,10 @@ public void setUp() throws Exception { final KeyValue kv4 = new KeyValue(four, f, q, 40, v); lowerOuterCell = new KeyValue(Bytes.toBytes(10), f, q, 10, v); upperOuterCell = new KeyValue(Bytes.toBytes(50), f, q, 10, v); - ascCells = new Cell[] {kv1,kv2,kv3,kv4}; - ascCbOnHeap = new CellArrayMap(CellComparator.getInstance(), ascCells,0, NUM_OF_CELLS,false); - descCells = new Cell[] {kv4,kv3,kv2,kv1}; - descCbOnHeap = new CellArrayMap(CellComparator.getInstance(), descCells,0, NUM_OF_CELLS,true); + ascCells = new Cell[] { kv1, kv2, kv3, kv4 }; + ascCbOnHeap = new CellArrayMap(CellComparator.getInstance(), ascCells, 0, NUM_OF_CELLS, false); + descCells = new Cell[] { kv4, kv3, kv2, kv1 }; + descCbOnHeap = new CellArrayMap(CellComparator.getInstance(), descCells, 0, NUM_OF_CELLS, true); CONF.setBoolean(MemStoreLAB.USEMSLAB_KEY, true); CONF.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f); @@ -129,7 +130,7 @@ public void setUp() throws Exception { ascCCM = setUpCellChunkMap(true); descCCM = setUpCellChunkMap(false); - if (smallChunks) { // check jumbo chunks as well + if (smallChunks) { // check jumbo chunks as well ascCCM = setUpJumboCellChunkMap(true); } } @@ -151,11 +152,11 @@ public void testCellChunkMap() throws Exception { testSubSet(cs); cs = new CellSet(descCCM); testSubSet(cs); -// cs = new CellSet(ascMultCCM); -// testCellBlocks(cs); -// testSubSet(cs); -// cs = new CellSet(descMultCCM); -// testSubSet(cs); + // cs = new CellSet(ascMultCCM); + // testCellBlocks(cs); + // testSubSet(cs); + // cs = new CellSet(descMultCCM); + // testSubSet(cs); } @Test @@ -164,6 +165,7 @@ public void testAsc() throws Exception { assertEquals(NUM_OF_CELLS, ascCs.size()); testSubSet(ascCs); } + @Test public void testDesc() throws Exception { CellSet descCs = new CellSet(descCbOnHeap); @@ -205,7 +207,8 @@ private void testSubSet(CellSet cs) throws Exception { assertEquals(0, cs.headSet(lowerOuterCell, false).size()); assertEquals(NUM_OF_CELLS, cs.headSet(upperOuterCell, false).size()); - NavigableMap sub = cs.getDelegatee().subMap(lowerOuterCell, true, upperOuterCell, true); + NavigableMap sub = + cs.getDelegatee().subMap(lowerOuterCell, true, upperOuterCell, true); assertEquals(NUM_OF_CELLS, sub.size()); Iterator iter = sub.values().iterator(); for (int i = 0; i != ascCells.length; ++i) { @@ -221,23 +224,23 @@ private void testCellBlocks(CellSet cs) throws Exception { final byte[] v = Bytes.toBytes(4); final KeyValue outerCell = new KeyValue(oneAndHalf, f, q, 10, v); - assertEquals(NUM_OF_CELLS, cs.size()); // check size - assertFalse(cs.contains(outerCell)); // check outer cell + assertEquals(NUM_OF_CELLS, cs.size()); // check size + assertFalse(cs.contains(outerCell)); // check outer cell - assertTrue(cs.contains(ascCells[0])); // check existence of the first + assertTrue(cs.contains(ascCells[0])); // check existence of the first Cell first = cs.first(); assertTrue(ascCells[0].equals(first)); - assertTrue(cs.contains(ascCells[NUM_OF_CELLS - 1])); // check last + assertTrue(cs.contains(ascCells[NUM_OF_CELLS - 1])); // check last Cell last = cs.last(); assertTrue(ascCells[NUM_OF_CELLS - 1].equals(last)); - SortedSet tail = cs.tailSet(ascCells[1]); // check tail abd head sizes + SortedSet tail = cs.tailSet(ascCells[1]); // check tail abd head sizes assertEquals(NUM_OF_CELLS - 1, tail.size()); SortedSet head = cs.headSet(ascCells[1]); assertEquals(1, head.size()); - SortedSet tailOuter = cs.tailSet(outerCell); // check tail starting from outer cell + SortedSet tailOuter = cs.tailSet(outerCell); // check tail starting from outer cell assertEquals(NUM_OF_CELLS - 1, tailOuter.size()); Cell tailFirst = tail.first(); @@ -256,13 +259,14 @@ private void testIterators(CellSet cs) throws Exception { // Assert that we have NUM_OF_CELLS values and that they are in order int count = 0; - for (Cell kv: cs) { - assertEquals("\n\n-------------------------------------------------------------------\n" - + "Comparing iteration number " + (count + 1) + " the returned cell: " + kv - + ", the first Cell in the CellBlocksMap: " + ascCells[count] - + ", and the same transformed to String: " + ascCells[count].toString() - + "\n-------------------------------------------------------------------\n", - ascCells[count], kv); + for (Cell kv : cs) { + assertEquals( + "\n\n-------------------------------------------------------------------\n" + + "Comparing iteration number " + (count + 1) + " the returned cell: " + kv + + ", the first Cell in the CellBlocksMap: " + ascCells[count] + + ", and the same transformed to String: " + ascCells[count].toString() + + "\n-------------------------------------------------------------------\n", + ascCells[count], kv); count++; } assertEquals(NUM_OF_CELLS, count); @@ -285,18 +289,18 @@ private CellChunkMap setUpCellChunkMap(boolean asc) { Chunk dataChunk = chunkCreator.getChunk(); Chunk idxChunk = chunkCreator.getChunk(); // the array of index chunks to be used as a basis for CellChunkMap - Chunk chunkArray[] = new Chunk[8]; // according to test currently written 8 is way enough + Chunk chunkArray[] = new Chunk[8]; // according to test currently written 8 is way enough int chunkArrayIdx = 0; chunkArray[chunkArrayIdx++] = idxChunk; - ByteBuffer idxBuffer = idxChunk.getData(); // the buffers of the chunks + ByteBuffer idxBuffer = idxChunk.getData(); // the buffers of the chunks ByteBuffer dataBuffer = dataChunk.getData(); - int dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // offset inside data buffer - int idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // skip the space for chunk ID + int dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // offset inside data buffer + int idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // skip the space for chunk ID Cell[] cellArray = asc ? ascCells : descCells; - for (Cell kv: cellArray) { + for (Cell kv : cellArray) { // do we have enough space to write the cell data on the data chunk? if (dataOffset + kv.getSerializedSize() > chunkCreator.getChunkSize()) { // allocate more data chunks if needed @@ -315,39 +319,40 @@ private CellChunkMap setUpCellChunkMap(boolean asc) { idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; chunkArray[chunkArrayIdx++] = idxChunk; } - idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, dataChunk.getId()); // write data chunk id - idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, dataStartOfset); // offset + idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, dataChunk.getId()); // write data + // chunk id + idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, dataStartOfset); // offset idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, kv.getSerializedSize()); // length - idxOffset = ByteBufferUtils.putLong(idxBuffer, idxOffset, kv.getSequenceId()); // seqId + idxOffset = ByteBufferUtils.putLong(idxBuffer, idxOffset, kv.getSequenceId()); // seqId } - return new CellChunkMap(CellComparator.getInstance(),chunkArray,0,NUM_OF_CELLS,!asc); + return new CellChunkMap(CellComparator.getInstance(), chunkArray, 0, NUM_OF_CELLS, !asc); } - /* Create CellChunkMap with four cells inside the data jumbo chunk. This test is working only - ** with small chunks sized SMALL_CHUNK_SIZE (64) bytes */ + /* + * Create CellChunkMap with four cells inside the data jumbo chunk. This test is working only with + * small chunks sized SMALL_CHUNK_SIZE (64) bytes + */ private CellChunkMap setUpJumboCellChunkMap(boolean asc) { - int smallChunkSize = SMALL_CHUNK_SIZE+8; + int smallChunkSize = SMALL_CHUNK_SIZE + 8; // allocate new chunks and use the data JUMBO chunk to hold the full data of the cells // and the normal index chunk to hold the cell-representations - Chunk dataJumboChunk = - chunkCreator.getChunk(ChunkType.JUMBO_CHUNK, - smallChunkSize); + Chunk dataJumboChunk = chunkCreator.getChunk(ChunkType.JUMBO_CHUNK, smallChunkSize); assertTrue(dataJumboChunk.isJumbo()); Chunk idxChunk = chunkCreator.getChunk(); // the array of index chunks to be used as a basis for CellChunkMap - Chunk[] chunkArray = new Chunk[8]; // according to test currently written 8 is way enough + Chunk[] chunkArray = new Chunk[8]; // according to test currently written 8 is way enough int chunkArrayIdx = 0; chunkArray[chunkArrayIdx++] = idxChunk; - ByteBuffer idxBuffer = idxChunk.getData(); // the buffers of the chunks + ByteBuffer idxBuffer = idxChunk.getData(); // the buffers of the chunks ByteBuffer dataBuffer = dataJumboChunk.getData(); - int dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // offset inside data buffer - int idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // skip the space for chunk ID + int dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // offset inside data buffer + int idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; // skip the space for chunk ID Cell[] cellArray = asc ? ascCells : descCells; - for (Cell kv: cellArray) { + for (Cell kv : cellArray) { int dataStartOfset = dataOffset; dataOffset = KeyValueUtil.appendTo(kv, dataBuffer, dataOffset, false); // write deep cell data @@ -361,20 +366,18 @@ private CellChunkMap setUpJumboCellChunkMap(boolean asc) { } // write data chunk id idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, dataJumboChunk.getId()); - idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, dataStartOfset); // offset + idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, dataStartOfset); // offset idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, kv.getSerializedSize()); // length - idxOffset = ByteBufferUtils.putLong(idxBuffer, idxOffset, kv.getSequenceId()); // seqId + idxOffset = ByteBufferUtils.putLong(idxBuffer, idxOffset, kv.getSequenceId()); // seqId // Jumbo chunks are working only with one cell per chunk, thus always allocate a new jumbo // data chunk for next cell - dataJumboChunk = - chunkCreator.getChunk(ChunkType.JUMBO_CHUNK, - smallChunkSize); + dataJumboChunk = chunkCreator.getChunk(ChunkType.JUMBO_CHUNK, smallChunkSize); assertTrue(dataJumboChunk.isJumbo()); dataBuffer = dataJumboChunk.getData(); dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER; } - return new CellChunkMap(CellComparator.getInstance(),chunkArray,0,NUM_OF_CELLS,!asc); + return new CellChunkMap(CellComparator.getInstance(), chunkArray, 0, NUM_OF_CELLS, !asc); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java index 227236865fd3..f8c387fbc465 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,8 +45,7 @@ public class TestCellSkipListSet { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCellSkipListSet.class); - private final CellSet csls = - new CellSet(CellComparatorImpl.COMPARATOR); + private final CellSet csls = new CellSet(CellComparatorImpl.COMPARATOR); @Rule public TestName name = new TestName(); @@ -81,16 +80,16 @@ public void testAdd() throws Exception { @Test public void testIterator() throws Exception { - byte [] bytes = Bytes.toBytes(name.getMethodName()); - byte [] value1 = Bytes.toBytes("1"); - byte [] value2 = Bytes.toBytes("2"); + byte[] bytes = Bytes.toBytes(name.getMethodName()); + byte[] value1 = Bytes.toBytes("1"); + byte[] value2 = Bytes.toBytes("2"); final int total = 3; for (int i = 0; i < total; i++) { this.csls.add(new KeyValue(bytes, bytes, Bytes.toBytes("" + i), value1)); } // Assert that we added 'total' values and that they are in order int count = 0; - for (Cell kv: this.csls) { + for (Cell kv : this.csls) { assertEquals("" + count, Bytes.toString(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength())); assertTrue(Bytes.equals(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength(), value1, @@ -117,9 +116,9 @@ public void testIterator() throws Exception { @Test public void testDescendingIterator() throws Exception { - byte [] bytes = Bytes.toBytes(name.getMethodName()); - byte [] value1 = Bytes.toBytes("1"); - byte [] value2 = Bytes.toBytes("2"); + byte[] bytes = Bytes.toBytes(name.getMethodName()); + byte[] value1 = Bytes.toBytes("1"); + byte[] value2 = Bytes.toBytes("2"); final int total = 3; for (int i = 0; i < total; i++) { this.csls.add(new KeyValue(bytes, bytes, Bytes.toBytes("" + i), value1)); @@ -155,9 +154,9 @@ public void testDescendingIterator() throws Exception { @Test public void testHeadTail() throws Exception { - byte [] bytes = Bytes.toBytes(name.getMethodName()); - byte [] value1 = Bytes.toBytes("1"); - byte [] value2 = Bytes.toBytes("2"); + byte[] bytes = Bytes.toBytes(name.getMethodName()); + byte[] value1 = Bytes.toBytes("1"); + byte[] value2 = Bytes.toBytes("2"); final int total = 3; KeyValue splitter = null; for (int i = 0; i < total; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileAfterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileAfterFailover.java index b3c12fa36839..f20a136d49ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileAfterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileAfterFailover.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -48,7 +47,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({LargeTests.class}) +@Category({ LargeTests.class }) public class TestCleanupCompactedFileAfterFailover { private static final Logger LOG = @@ -74,8 +73,8 @@ public static void beforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtil(); // Set the scanner lease to 20min, so the scanner can't be closed by RegionServer TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 1200000); - TEST_UTIL.getConfiguration() - .setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100); + TEST_UTIL.getConfiguration().setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, + 100); TEST_UTIL.getConfiguration().set("dfs.blocksize", "64000"); TEST_UTIL.getConfiguration().set("dfs.namenode.fs-limits.min-block-size", "1024"); TEST_UTIL.getConfiguration().set(TimeToLiveHFileCleaner.TTL_CONF_KEY, "0"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.java index f43b0f579dd9..6b6d158c1ee3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +44,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestCleanupCompactedFileOnRegionClose { @ClassRule @@ -57,10 +56,10 @@ public class TestCleanupCompactedFileOnRegionClose { @BeforeClass public static void beforeClass() throws Exception { util = new HBaseTestingUtil(); - util.getConfiguration().setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY,100); + util.getConfiguration().setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100); util.getConfiguration().set("dfs.blocksize", "64000"); util.getConfiguration().set("dfs.namenode.fs-limits.min-block-size", "1024"); - util.getConfiguration().set(TimeToLiveHFileCleaner.TTL_CONF_KEY,"0"); + util.getConfiguration().set(TimeToLiveHFileCleaner.TTL_CONF_KEY, "0"); util.startMiniCluster(2); } @@ -91,27 +90,27 @@ public void testCleanupOnClose() throws Exception { } util.flush(tableName); } - assertEquals(refSFCount, region.getStoreFileList(new byte[][]{familyNameBytes}).size()); + assertEquals(refSFCount, region.getStoreFileList(new byte[][] { familyNameBytes }).size()); - //add a delete, to test wether we end up with an inconsistency post region close - Delete delete = new Delete(Bytes.toBytes(refSFCount-1)); + // add a delete, to test wether we end up with an inconsistency post region close + Delete delete = new Delete(Bytes.toBytes(refSFCount - 1)); table.delete(delete); util.flush(tableName); - assertFalse(table.exists(new Get(Bytes.toBytes(refSFCount-1)))); + assertFalse(table.exists(new Get(Bytes.toBytes(refSFCount - 1)))); - //Create a scanner and keep it open to add references to StoreFileReaders + // Create a scanner and keep it open to add references to StoreFileReaders Scan scan = new Scan(); - scan.withStopRow(Bytes.toBytes(refSFCount-2)); + scan.withStopRow(Bytes.toBytes(refSFCount - 2)); scan.setCaching(1); ResultScanner scanner = table.getScanner(scan); Result res = scanner.next(); assertNotNull(res); assertEquals(refSFCount, res.getFamilyMap(familyNameBytes).size()); - - //Verify the references + // Verify the references int count = 0; - for (HStoreFile sf : (Collection)region.getStore(familyNameBytes).getStorefiles()) { + for (HStoreFile sf : (Collection) region.getStore(familyNameBytes) + .getStorefiles()) { synchronized (sf) { if (count < refSFCount) { assertTrue(sf.isReferencedInReads()); @@ -122,27 +121,24 @@ public void testCleanupOnClose() throws Exception { count++; } - //Major compact to produce compacted storefiles that need to be cleaned up + // Major compact to produce compacted storefiles that need to be cleaned up util.compact(tableName, true); - assertEquals(1, region.getStoreFileList(new byte[][]{familyNameBytes}).size()); - assertEquals(refSFCount+1, - ((HStore)region.getStore(familyNameBytes)).getStoreEngine().getStoreFileManager() - .getCompactedfiles().size()); + assertEquals(1, region.getStoreFileList(new byte[][] { familyNameBytes }).size()); + assertEquals(refSFCount + 1, ((HStore) region.getStore(familyNameBytes)).getStoreEngine() + .getStoreFileManager().getCompactedfiles().size()); - //close then open the region to determine wether compacted storefiles get cleaned up on close + // close then open the region to determine wether compacted storefiles get cleaned up on close hBaseAdmin.unassign(region.getRegionInfo().getRegionName(), false); hBaseAdmin.assign(region.getRegionInfo().getRegionName()); util.waitUntilNoRegionsInTransition(10000); - assertFalse("Deleted row should not exist", - table.exists(new Get(Bytes.toBytes(refSFCount-1)))); + table.exists(new Get(Bytes.toBytes(refSFCount - 1)))); rs = util.getRSForFirstRegionInTable(tableName); region = rs.getRegions(tableName).get(0); - assertEquals(1, region.getStoreFileList(new byte[][]{familyNameBytes}).size()); - assertEquals(0, - ((HStore)region.getStore(familyNameBytes)).getStoreEngine().getStoreFileManager() - .getCompactedfiles().size()); + assertEquals(1, region.getStoreFileList(new byte[][] { familyNameBytes }).size()); + assertEquals(0, ((HStore) region.getStore(familyNameBytes)).getStoreEngine() + .getStoreFileManager().getCompactedfiles().size()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java index f8d59dec3903..586d78f37aff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,14 +64,12 @@ public void testCleanupMetaWAL() throws Exception { TEST_UTIL.createTable(TableName.valueOf("test"), "cf"); HRegionServer serverWithMeta = TEST_UTIL.getMiniHBaseCluster() .getRegionServer(TEST_UTIL.getMiniHBaseCluster().getServerWithMeta()); - TEST_UTIL.getAdmin() - .move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes()); + TEST_UTIL.getAdmin().move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes()); LOG.info("KILL"); TEST_UTIL.getMiniHBaseCluster().killRegionServer(serverWithMeta.getServerName()); LOG.info("WAIT"); - TEST_UTIL.waitFor(30000, () -> - TEST_UTIL.getMiniHBaseCluster().getMaster().getProcedures().stream() - .filter(p -> p instanceof ServerCrashProcedure && p.isFinished()).count() > 0); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster().getMaster().getProcedures() + .stream().filter(p -> p instanceof ServerCrashProcedure && p.isFinished()).count() > 0); LOG.info("DONE WAITING"); MasterFileSystem fs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path walPath = new Path(fs.getWALRootDir(), HConstants.HREGION_LOGDIR_NAME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java index 8f869e1bbeae..8419134b90e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,7 +69,8 @@ public class TestClearRegionBlockCache { private HRegionServer rs1, rs2; private SingleProcessHBaseCluster cluster; - @Parameterized.Parameter public String cacheType; + @Parameterized.Parameter + public String cacheType; @Parameterized.Parameters(name = "{index}: {0}") public static Object[] data() { @@ -118,10 +119,8 @@ public void testClearBlockCache() throws Exception { HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); clearRegionBlockCache(rs2); - assertEquals("" + blockCache1.getBlockCount(), - initialBlockCount1, blockCache1.getBlockCount()); - assertEquals("" + blockCache2.getBlockCount(), - initialBlockCount2, blockCache2.getBlockCount()); + assertEquals("" + blockCache1.getBlockCount(), initialBlockCount1, blockCache1.getBlockCount()); + assertEquals("" + blockCache2.getBlockCount(), initialBlockCount2, blockCache2.getBlockCount()); } @Test @@ -136,10 +135,10 @@ public void testClearBlockCacheFromAdmin() throws Exception { // scan will cause blocks to be added in BlockCache scanAllRegionsForRS(rs1); assertEquals(blockCache1.getBlockCount() - initialBlockCount1, - HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); + HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)); scanAllRegionsForRS(rs2); assertEquals(blockCache2.getBlockCount() - initialBlockCount2, - HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME); assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) @@ -150,8 +149,8 @@ public void testClearBlockCacheFromAdmin() throws Exception { @Test public void testClearBlockCacheFromAsyncAdmin() throws Exception { - try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(HTU.getConfiguration()) - .get()) { + try (AsyncConnection conn = + ConnectionFactory.createAsyncConnection(HTU.getConfiguration()).get()) { AsyncAdmin admin = conn.getAdmin(); BlockCache blockCache1 = rs1.getBlockCache().get(); @@ -168,8 +167,8 @@ public void testClearBlockCacheFromAsyncAdmin() throws Exception { HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME).get(); - assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU - .getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); + assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY)); assertEquals(initialBlockCount1, blockCache1.getBlockCount()); assertEquals(initialBlockCount2, blockCache2.getBlockCount()); } @@ -178,7 +177,8 @@ public void testClearBlockCacheFromAsyncAdmin() throws Exception { private void scanAllRegionsForRS(HRegionServer rs) throws IOException { for (Region region : rs.getRegions(TABLE_NAME)) { RegionScanner scanner = region.getScanner(new Scan()); - while (scanner.next(new ArrayList())); + while (scanner.next(new ArrayList())) + ; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java index 4d8092c3f854..25ae1545bc6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,15 +44,14 @@ /** * Test metrics incremented on region server operations. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestClusterId { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestClusterId.class); - private final HBaseTestingUtil TEST_UTIL = - new HBaseTestingUtil(); + private final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private JVMClusterUtil.RegionServerThread rst; @@ -64,27 +63,27 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); - if(rst != null && rst.getRegionServer() != null) { + if (rst != null && rst.getRegionServer() != null) { rst.getRegionServer().stop("end of test"); rst.join(); } } @Test - public void testClusterId() throws Exception { + public void testClusterId() throws Exception { TEST_UTIL.startMiniZKCluster(); TEST_UTIL.startMiniDFSCluster(1); Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - //start region server, needs to be separate - //so we get an unset clusterId + // start region server, needs to be separate + // so we get an unset clusterId rst = JVMClusterUtil.createRegionServerThread(conf, HRegionServer.class, 0); rst.start(); - //Make sure RS is in blocking state + // Make sure RS is in blocking state Thread.sleep(10000); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numMasters(1).numRegionServers(0).build(); + StartTestingClusterOption option = + StartTestingClusterOption.builder().numMasters(1).numRegionServers(0).build(); TEST_UTIL.startMiniHBaseCluster(option); rst.waitForServerOnline(); @@ -117,4 +116,3 @@ public void testRewritingClusterIdToPB() throws Exception { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java index 5c12617656f9..dd88a16c7833 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java @@ -53,14 +53,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestColumnSeeking { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestColumnSeeking.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Logger LOG = LoggerFactory.getLogger(TestColumnSeeking.class); @@ -73,9 +74,9 @@ public void testDuplicateVersions() throws IOException { TableName table = TableName.valueOf(name.getMethodName()); ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(familyBytes) - .setMaxVersions(1000).setMaxVersions(3).build(); + .setMaxVersions(1000).setMaxVersions(3).build(); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(table).setColumnFamily(familyDescriptor).build(); + TableDescriptorBuilder.newBuilder(table).setColumnFamily(familyDescriptor).build(); RegionInfo info = RegionInfoBuilder.newBuilder(table).build(); // Set this so that the archiver writes to the temp dir as well. HRegion region = TEST_UTIL.createLocalHRegion(info, tableDescriptor); @@ -113,8 +114,7 @@ public void testDuplicateVersions() throws IOException { p.setDurability(Durability.SKIP_WAL); for (String column : allColumns) { for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { - KeyValue kv = - KeyValueTestUtil.create(row, family, column, timestamp, value); + KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, value); if (Math.random() < putPercentage) { p.add(kv); allKVMap.put(kv.getKeyString(), kv); @@ -155,8 +155,7 @@ public void testDuplicateVersions() throws IOException { scan.addColumn(familyBytes, Bytes.toBytes(column)); } LOG.info("ExplicitColumns scanner"); - LOG.info("Columns: " + columnLists[i].size() + " Keys: " - + kvSet.size()); + LOG.info("Columns: " + columnLists[i].size() + " Keys: " + kvSet.size()); } else { kvSet = allKVMap.values(); LOG.info("Wildcard scanner"); @@ -184,12 +183,9 @@ public void testReseeking() throws IOException { byte[] familyBytes = Bytes.toBytes("Family"); TableName table = TableName.valueOf(name.getMethodName()); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(table); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(table); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(family)) - .setMaxVersions(3).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).setMaxVersions(3).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); RegionInfo info = RegionInfoBuilder.newBuilder(table).build(); @@ -227,9 +223,7 @@ public void testReseeking() throws IOException { p.setDurability(Durability.SKIP_WAL); for (String column : allColumns) { for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { - KeyValue kv = - KeyValueTestUtil.create(row, family, column, timestamp, - valueString); + KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, valueString); if (Math.random() < putPercentage) { p.add(kv); allKVMap.put(kv.getKeyString(), kv); @@ -270,8 +264,7 @@ public void testReseeking() throws IOException { scan.addColumn(familyBytes, Bytes.toBytes(column)); } LOG.info("ExplicitColumns scanner"); - LOG.info("Columns: " + columnLists[i].size() + " Keys: " - + kvSet.size()); + LOG.info("Columns: " + columnLists[i].size() + " Keys: " + kvSet.size()); } else { kvSet = allKVMap.values(); LOG.info("Wildcard scanner"); @@ -310,4 +303,3 @@ List generateRandomWords(int numberOfWords, String suffix) { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java index ba9f70c0ffbc..cc81c72c3a42 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,8 +61,6 @@ public class TestCompactSplitThread { private static Path rootDir; private static FileSystem fs; - - /** * Setup the config for the cluster */ @@ -113,8 +111,8 @@ public void testThreadPoolSizeTuning() throws Exception { Connection conn = ConnectionFactory.createConnection(conf); try { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).setCompactionEnabled(false) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).setCompactionEnabled(false) + .build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); TEST_UTIL.waitTableAvailable(tableName); HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(tableName); @@ -161,18 +159,18 @@ public void testThreadPoolSizeTuning() throws Exception { @Test public void testFlushWithTableCompactionDisabled() throws Exception { TableDescriptor htd = - TableDescriptorBuilder.newBuilder(tableName).setCompactionEnabled(false).build(); + TableDescriptorBuilder.newBuilder(tableName).setCompactionEnabled(false).build(); TEST_UTIL.createTable(htd, new byte[][] { family }, null); // load the table - for (int i = 0; i < blockingStoreFiles + 1; i ++) { + for (int i = 0; i < blockingStoreFiles + 1; i++) { TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(tableName), family); TEST_UTIL.flush(tableName); } // Make sure that store file number is greater than blockingStoreFiles + 1 Path tableDir = CommonFSUtils.getTableDir(rootDir, tableName); - Collection hfiles = SnapshotTestingUtils.listHFileNames(fs, tableDir); - assert(hfiles.size() > blockingStoreFiles + 1); + Collection hfiles = SnapshotTestingUtils.listHFileNames(fs, tableDir); + assert (hfiles.size() > blockingStoreFiles + 1); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index d0e03d7f01fe..2cc0359ea01f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -70,7 +70,7 @@ /** * compacted memstore test case */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestCompactingMemStore extends TestDefaultMemStore { @ClassRule @@ -87,8 +87,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { // Helpers ////////////////////////////////////////////////////////////////////////////// protected static byte[] makeQualifier(final int i1, final int i2) { - return Bytes.toBytes(Integer.toString(i1) + ";" + - Integer.toString(i2)); + return Bytes.toBytes(Integer.toString(i1) + ";" + Integer.toString(i2)); } @After @@ -100,9 +99,9 @@ public void tearDown() throws Exception { @Before public void setUp() throws Exception { compactingSetUp(); - this.memstore = new MyCompactingMemStore(HBaseConfiguration.create(), CellComparator.getInstance(), - store, regionServicesForStores, MemoryCompactionPolicy.EAGER); - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.ARRAY_MAP); + this.memstore = new MyCompactingMemStore(HBaseConfiguration.create(), + CellComparator.getInstance(), store, regionServicesForStores, MemoryCompactionPolicy.EAGER); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.ARRAY_MAP); } protected void compactingSetUp() throws Exception { @@ -114,21 +113,22 @@ protected void compactingSetUp() throws Exception { HBaseTestingUtil hbaseUtility = new HBaseTestingUtil(conf); ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of(FAMILY); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf("foobar")) - .setColumnFamily(familyDescriptor).build(); + .setColumnFamily(familyDescriptor).build(); RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf("foobar")).build(); WAL wal = HBaseTestingUtil.createWal(conf, hbaseUtility.getDataTestDir(), info); - this.region = HRegion.createHRegion(info, hbaseUtility.getDataTestDir(), conf, - tableDescriptor, wal, true); + this.region = HRegion.createHRegion(info, hbaseUtility.getDataTestDir(), conf, tableDescriptor, + wal, true); this.regionServicesForStores = Mockito.spy(region.getRegionServicesForStores()); ThreadPoolExecutor pool = (ThreadPoolExecutor) Executors.newFixedThreadPool(1); Mockito.when(regionServicesForStores.getInMemoryCompactionPool()).thenReturn(pool); this.store = new HStore(region, familyDescriptor, conf, false); - long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage() - .getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false)); + long globalMemStoreLimit = + (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax() + * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false)); chunkCreator = ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, - globalMemStoreLimit, 0.4f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, - null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + globalMemStoreLimit, 0.4f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); assertNotNull(chunkCreator); } @@ -137,7 +137,7 @@ protected void compactingSetUp() throws Exception { */ @Test public void testTimeOfOldestEdit() { - assertEquals(Long.MAX_VALUE, memstore.timeOfOldestEdit()); + assertEquals(Long.MAX_VALUE, memstore.timeOfOldestEdit()); final byte[] r = Bytes.toBytes("r"); final byte[] f = Bytes.toBytes("f"); final byte[] q = Bytes.toBytes("q"); @@ -147,7 +147,7 @@ public void testTimeOfOldestEdit() { long timeOfOldestEdit = memstore.timeOfOldestEdit(); assertNotEquals(Long.MAX_VALUE, timeOfOldestEdit); - ((CompactingMemStore)memstore).flushInMemory(); + ((CompactingMemStore) memstore).flushInMemory(); assertEquals(timeOfOldestEdit, memstore.timeOfOldestEdit()); memstore.add(kv, null); assertEquals(timeOfOldestEdit, memstore.timeOfOldestEdit()); @@ -157,7 +157,6 @@ public void testTimeOfOldestEdit() { /** * A simple test which verifies the 3 possible states when scanning across snapshot. - * * @throws IOException * @throws CloneNotSupportedException */ @@ -190,15 +189,14 @@ public void testScanAcrossSnapshot2() throws IOException, CloneNotSupportedExcep // use case 3: first in snapshot second in kvset this.memstore = new CompactingMemStore(HBaseConfiguration.create(), - CellComparator.getInstance(), store, regionServicesForStores, - MemoryCompactionPolicy.EAGER); + CellComparator.getInstance(), store, regionServicesForStores, MemoryCompactionPolicy.EAGER); this.memstore.add(kv1.clone(), null); // As compaction is starting in the background the repetition // of the k1 might be removed BUT the scanners created earlier // should look on the OLD MutableCellSetSegment, so this should be OK... this.memstore.snapshot(); this.memstore.add(kv2.clone(), null); - verifyScanAcrossSnapshot2(kv1,kv2); + verifyScanAcrossSnapshot2(kv1, kv2); } /** @@ -217,12 +215,12 @@ public void testSnapshotting() throws IOException { } } - ////////////////////////////////////////////////////////////////////////////// // Get tests ////////////////////////////////////////////////////////////////////////////// - /** Test getNextRow from memstore + /** + * Test getNextRow from memstore * @throws InterruptedException */ @Override @@ -232,24 +230,25 @@ public void testGetNextRow() throws Exception { // Add more versions to make it a little more interesting. Thread.sleep(1); addRows(this.memstore); - Cell closestToEmpty = ((CompactingMemStore)this.memstore).getNextRow(KeyValue.LOWESTKEY); + Cell closestToEmpty = ((CompactingMemStore) this.memstore).getNextRow(KeyValue.LOWESTKEY); assertTrue(CellComparator.getInstance().compareRows(closestToEmpty, - new KeyValue(Bytes.toBytes(0), EnvironmentEdgeManager.currentTime())) == 0); + new KeyValue(Bytes.toBytes(0), EnvironmentEdgeManager.currentTime())) == 0); for (int i = 0; i < ROW_COUNT; i++) { - Cell nr = ((CompactingMemStore)this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i), - EnvironmentEdgeManager.currentTime())); + Cell nr = ((CompactingMemStore) this.memstore) + .getNextRow(new KeyValue(Bytes.toBytes(i), EnvironmentEdgeManager.currentTime())); if (i + 1 == ROW_COUNT) { assertNull(nr); } else { assertTrue(CellComparator.getInstance().compareRows(nr, - new KeyValue(Bytes.toBytes(i + 1), EnvironmentEdgeManager.currentTime())) == 0); + new KeyValue(Bytes.toBytes(i + 1), EnvironmentEdgeManager.currentTime())) == 0); } } - //starting from each row, validate results should contain the starting row + // starting from each row, validate results should contain the starting row Configuration conf = HBaseConfiguration.create(); for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) { - ScanInfo scanInfo = new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false); + ScanInfo scanInfo = + new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE, KeepDeletedCells.FALSE, + HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false); try (InternalScanner scanner = new StoreScanner(new Scan().withStartRow(Bytes.toBytes(startRowId)), scanInfo, null, memstore.getScanners(0))) { @@ -285,17 +284,17 @@ public void testGet_memstoreAndSnapShot() throws IOException { byte[] qf5 = Bytes.toBytes("testqualifier5"); byte[] val = Bytes.toBytes("testval"); - //Setting up memstore + // Setting up memstore memstore.add(new KeyValue(row, fam, qf1, val), null); memstore.add(new KeyValue(row, fam, qf2, val), null); memstore.add(new KeyValue(row, fam, qf3, val), null); - //Pushing to pipeline - ((CompactingMemStore)memstore).flushInMemory(); + // Pushing to pipeline + ((CompactingMemStore) memstore).flushInMemory(); assertEquals(0, memstore.getSnapshot().getCellsCount()); - //Creating a snapshot + // Creating a snapshot memstore.snapshot(); assertEquals(3, memstore.getSnapshot().getCellsCount()); - //Adding value to "new" memstore + // Adding value to "new" memstore assertEquals(0, memstore.getActive().getCellsCount()); memstore.add(new KeyValue(row, fam, qf4, val), null); memstore.add(new KeyValue(row, fam, qf5, val), null); @@ -308,9 +307,8 @@ public void testGet_memstoreAndSnapShot() throws IOException { //////////////////////////////////// /** - * Add keyvalues with a fixed memstoreTs, and checks that memstore size is decreased - * as older keyvalues are deleted from the memstore. - * + * Add keyvalues with a fixed memstoreTs, and checks that memstore size is decreased as older + * keyvalues are deleted from the memstore. * @throws Exception */ @Override @@ -333,7 +331,7 @@ public void testUpsertMemstoreSize() throws Exception { this.memstore.upsert(l, 2, null);// readpoint is 2 MemStoreSize newSize = this.memstore.size(); assert (newSize.getDataSize() > oldSize.getDataSize()); - //The kv1 should be removed. + // The kv1 should be removed. assert (memstore.getActive().getCellsCount() == 2); KeyValue kv4 = KeyValueTestUtil.create("r", "f", "q", 104, "v"); @@ -342,14 +340,14 @@ public void testUpsertMemstoreSize() throws Exception { l.add(kv4); this.memstore.upsert(l, 3, null); assertEquals(newSize, this.memstore.size()); - //The kv2 should be removed. + // The kv2 should be removed. assert (memstore.getActive().getCellsCount() == 2); - //this.memstore = null; + // this.memstore = null; } /** - * Tests that the timeOfOldestEdit is updated correctly for the - * various edit operations in memstore. + * Tests that the timeOfOldestEdit is updated correctly for the various edit operations in + * memstore. */ @Override @Test @@ -372,7 +370,7 @@ public void testUpdateToTimeOfOldestEdit() throws Exception { memstore.add(KeyValueTestUtil.create("r", "f", "q", 100, KeyValue.Type.Delete, "v"), null); t = memstore.timeOfOldestEdit(); assertTrue(t == 1234); - t = runSnapshot(memstore, true); + t = runSnapshot(memstore, true); // test the case that the timeOfOldestEdit is updated after a KV upsert List l = new ArrayList<>(); @@ -387,8 +385,7 @@ public void testUpdateToTimeOfOldestEdit() throws Exception { } } - private long runSnapshot(final AbstractMemStore hmc, boolean useForce) - throws IOException { + private long runSnapshot(final AbstractMemStore hmc, boolean useForce) throws IOException { // Save off old state. long oldHistorySize = hmc.getSnapshot().getDataSize(); long prevTimeStamp = hmc.timeOfOldestEdit(); @@ -408,15 +405,14 @@ private long runSnapshot(final AbstractMemStore hmc, boolean useForce) return prevTimeStamp; } - private void isExpectedRowWithoutTimestamps(final int rowIndex, - List kvs) { + private void isExpectedRowWithoutTimestamps(final int rowIndex, List kvs) { int i = 0; for (Cell kv : kvs) { byte[] expectedColname = makeQualifier(rowIndex, i++); assertTrue("Column name", CellUtil.matchingQualifier(kv, expectedColname)); - // Value is column name as bytes. Usually result is + // Value is column name as bytes. Usually result is // 100 bytes in size at least. This is the default size - // for BytesWriteable. For comparison, convert bytes to + // for BytesWriteable. For comparison, convert bytes to // String and trim to remove trailing null bytes. assertTrue("Content", CellUtil.matchingValue(kv, expectedColname)); } @@ -448,7 +444,7 @@ public void testPuttingBackChunksAfterFlushing() throws IOException { memstore.add(new KeyValue(row, fam, qf5, val), null); assertEquals(2, memstore.getActive().getCellsCount()); // close the scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { + for (KeyValueScanner scanner : snapshot.getScanners()) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); @@ -459,8 +455,7 @@ public void testPuttingBackChunksAfterFlushing() throws IOException { } @Test - public void testPuttingBackChunksWithOpeningScanner() - throws IOException { + public void testPuttingBackChunksWithOpeningScanner() throws IOException { byte[] row = Bytes.toBytes("testrow"); byte[] fam = Bytes.toBytes("testfamily"); byte[] qf1 = Bytes.toBytes("testqualifier1"); @@ -492,7 +487,7 @@ public void testPuttingBackChunksWithOpeningScanner() // Shouldn't putting back the chunks to pool,since some scanners are opening // based on their data // close the scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { + for (KeyValueScanner scanner : snapshot.getScanners()) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); @@ -523,7 +518,7 @@ public void testPuttingBackChunksWithOpeningScanner() // Since no opening scanner, the chunks of snapshot should be put back to // pool // close the scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { + for (KeyValueScanner scanner : snapshot.getScanners()) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); @@ -531,14 +526,13 @@ public void testPuttingBackChunksWithOpeningScanner() } @Test - public void testPuttingBackChunksWithOpeningPipelineScanner() - throws IOException { + public void testPuttingBackChunksWithOpeningPipelineScanner() throws IOException { // set memstore to do data compaction and not to use the speculative scan MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(compactionType)); - ((MyCompactingMemStore)memstore).initiateType(compactionType, memstore.getConfiguration()); + String.valueOf(compactionType)); + ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); byte[] row = Bytes.toBytes("testrow"); byte[] fam = Bytes.toBytes("testfamily"); @@ -553,8 +547,8 @@ public void testPuttingBackChunksWithOpeningPipelineScanner() memstore.add(new KeyValue(row, fam, qf3, 1, val), null); // Creating a pipeline - ((MyCompactingMemStore)memstore).disableCompaction(); - ((CompactingMemStore)memstore).flushInMemory(); + ((MyCompactingMemStore) memstore).disableCompaction(); + ((CompactingMemStore) memstore).flushInMemory(); // Adding value to "new" memstore assertEquals(0, memstore.getActive().getCellsCount()); @@ -563,14 +557,14 @@ public void testPuttingBackChunksWithOpeningPipelineScanner() assertEquals(2, memstore.getActive().getCellsCount()); // pipeline bucket 2 - ((CompactingMemStore)memstore).flushInMemory(); + ((CompactingMemStore) memstore).flushInMemory(); // opening scanner before force flushing List scanners = memstore.getScanners(0); // Shouldn't putting back the chunks to pool,since some scanners are opening // based on their data - ((MyCompactingMemStore)memstore).enableCompaction(); + ((MyCompactingMemStore) memstore).enableCompaction(); // trigger compaction - ((CompactingMemStore)memstore).flushInMemory(); + ((CompactingMemStore) memstore).flushInMemory(); // Adding value to "new" memstore assertEquals(0, memstore.getActive().getCellsCount()); @@ -594,7 +588,7 @@ public void testPuttingBackChunksWithOpeningPipelineScanner() MemStoreSnapshot snapshot = memstore.snapshot(); // close the scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { + for (KeyValueScanner scanner : snapshot.getScanners()) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); @@ -612,7 +606,7 @@ public void testPuttingBackChunksWithOpeningPipelineScanner() // Since no opening scanner, the chunks of snapshot should be put back to // pool // close the scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { + for (KeyValueScanner scanner : snapshot.getScanners()) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); @@ -628,11 +622,11 @@ public void testCompaction1Bucket() throws IOException { // set memstore to do basic structure flattening, the "eager" option is tested in // TestCompactingToCellFlatMapMemStore MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC; - memstore.getConfiguration() - .set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType)); - ((MyCompactingMemStore)memstore).initiateType(compactionType, memstore.getConfiguration()); + memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(compactionType)); + ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); - String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4 + String[] keys1 = { "A", "A", "B", "C" }; // A1, A2, B3, C4 // test 1 bucket int totalCellsLen = addRowsByKeys(memstore, keys1); @@ -640,16 +634,16 @@ public void testCompaction1Bucket() throws IOException { int oneCellOnCAHeapSize = 88; long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * oneCellOnCSLMHeapSize; assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize()); - assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); + assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact assertEquals(0, memstore.getSnapshot().getCellsCount()); // There is no compaction, as the compacting memstore type is basic. // totalCellsLen remains the same totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM + 4 * oneCellOnCAHeapSize; assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize()); - assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); + assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); MemStoreSize mss = memstore.getFlushableSize(); MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot @@ -669,10 +663,10 @@ public void testCompaction2Buckets() throws IOException { // TestCompactingToCellFlatMapMemStore MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(compactionType)); + String.valueOf(compactionType)); memstore.getConfiguration().set(MemStoreCompactionStrategy.COMPACTING_MEMSTORE_THRESHOLD_KEY, - String.valueOf(1)); - ((MyCompactingMemStore)memstore).initiateType(compactionType, memstore.getConfiguration()); + String.valueOf(1)); + ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); String[] keys1 = { "A", "A", "B", "C" }; String[] keys2 = { "A", "B", "D" }; @@ -682,11 +676,11 @@ public void testCompaction2Buckets() throws IOException { long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * oneCellOnCSLMHeapSize; assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize()); - assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); + assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact int counter = 0; - for ( Segment s : memstore.getSegments()) { + for (Segment s : memstore.getSegments()) { counter += s.getCellsCount(); } assertEquals(4, counter); @@ -696,7 +690,7 @@ public void testCompaction2Buckets() throws IOException { assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize()); totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM + 4 * oneCellOnCAHeapSize; - assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); + assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); int totalCellsLen2 = addRowsByKeys(memstore, keys2); totalHeapSize += 3 * oneCellOnCSLMHeapSize; @@ -704,12 +698,12 @@ public void testCompaction2Buckets() throws IOException { assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); MemStoreSize mss = memstore.getFlushableSize(); - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact assertEquals(0, memstore.getSnapshot().getCellsCount()); assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemStoreSize()); totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM + 7 * oneCellOnCAHeapSize; - assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); + assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); mss = memstore.getFlushableSize(); MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot @@ -728,8 +722,8 @@ public void testCompaction3Buckets() throws IOException { // set memstore to do data compaction and not to use the speculative scan MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(compactionType)); - ((MyCompactingMemStore)memstore).initiateType(compactionType, memstore.getConfiguration()); + String.valueOf(compactionType)); + ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); String[] keys1 = { "A", "A", "B", "C" }; String[] keys2 = { "A", "B", "D" }; String[] keys3 = { "D", "B", "B" }; @@ -739,8 +733,8 @@ public void testCompaction3Buckets() throws IOException { int oneCellOnCAHeapSize = 88; assertEquals(totalCellsLen1, region.getMemStoreDataSize()); long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * oneCellOnCSLMHeapSize; - assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact + assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact assertEquals(0, memstore.getSnapshot().getCellsCount()); // One cell is duplicated and the compaction will remove it. All cells of same time so adjusting @@ -750,7 +744,7 @@ public void testCompaction3Buckets() throws IOException { // In memory flush to make a CellArrayMap instead of CSLM. See the overhead diff. totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM + 3 * oneCellOnCAHeapSize; - assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); + assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); int totalCellsLen2 = addRowsByKeys(memstore, keys2);// Adding 3 more cells. long totalHeapSize2 = totalHeapSize + 3 * oneCellOnCSLMHeapSize; @@ -760,33 +754,35 @@ public void testCompaction3Buckets() throws IOException { ((MyCompactingMemStore) memstore).disableCompaction(); MemStoreSize mss = memstore.getFlushableSize(); - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline without compaction + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline without compaction assertEquals(0, memstore.getSnapshot().getCellsCount()); // No change in the cells data size. ie. memstore size. as there is no compaction. assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemStoreSize()); assertEquals(totalHeapSize2 + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM, - ((CompactingMemStore) memstore).heapSize()); + ((CompactingMemStore) memstore).heapSize()); int totalCellsLen3 = addRowsByKeys(memstore, keys3);// 3 more cells added assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3, - regionServicesForStores.getMemStoreSize()); - long totalHeapSize3 = totalHeapSize2 + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM - + 3 * oneCellOnCSLMHeapSize; + regionServicesForStores.getMemStoreSize()); + long totalHeapSize3 = + totalHeapSize2 + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM + 3 * oneCellOnCSLMHeapSize; assertEquals(totalHeapSize3, ((CompactingMemStore) memstore).heapSize()); - ((MyCompactingMemStore)memstore).enableCompaction(); + ((MyCompactingMemStore) memstore).enableCompaction(); mss = memstore.getFlushableSize(); - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact assertEquals(0, memstore.getSnapshot().getCellsCount()); // active flushed to pipeline and all 3 segments compacted. Will get rid of duplicated cells. // Out of total 10, only 4 cells are unique totalCellsLen2 = totalCellsLen2 / 3;// 2 out of 3 cells are duplicated totalCellsLen3 = 0;// All duplicated cells. assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3, - regionServicesForStores.getMemStoreSize()); + regionServicesForStores.getMemStoreSize()); // Only 4 unique cells left - assertEquals(4 * oneCellOnCAHeapSize + MutableSegment.DEEP_OVERHEAD - + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM, ((CompactingMemStore) memstore).heapSize()); + assertEquals( + 4 * oneCellOnCAHeapSize + MutableSegment.DEEP_OVERHEAD + + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM, + ((CompactingMemStore) memstore).heapSize()); mss = memstore.getFlushableSize(); MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot @@ -804,11 +800,11 @@ public void testMagicCompaction3Buckets() throws IOException { MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.ADAPTIVE; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(compactionType)); - memstore.getConfiguration().setDouble( - AdaptiveMemStoreCompactionStrategy.ADAPTIVE_COMPACTION_THRESHOLD_KEY, 0.45); - memstore.getConfiguration().setInt( - AdaptiveMemStoreCompactionStrategy.COMPACTING_MEMSTORE_THRESHOLD_KEY, 2); + String.valueOf(compactionType)); + memstore.getConfiguration() + .setDouble(AdaptiveMemStoreCompactionStrategy.ADAPTIVE_COMPACTION_THRESHOLD_KEY, 0.45); + memstore.getConfiguration() + .setInt(AdaptiveMemStoreCompactionStrategy.COMPACTING_MEMSTORE_THRESHOLD_KEY, 2); memstore.getConfiguration().setInt(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 1); ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); @@ -823,28 +819,28 @@ public void testMagicCompaction3Buckets() throws IOException { long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 3 * oneCellOnCSLMHeapSize; assertEquals(totalHeapSize, memstore.heapSize()); - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline - flatten + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline - flatten assertEquals(3, ((CompactingMemStore) memstore).getImmutableSegments().getNumOfCells()); assertEquals(1.0, - ((CompactingMemStore) memstore).getImmutableSegments().getEstimatedUniquesFrac(), 0); + ((CompactingMemStore) memstore).getImmutableSegments().getEstimatedUniquesFrac(), 0); assertEquals(0, memstore.getSnapshot().getCellsCount()); addRowsByKeys(memstore, keys2);// Adding 1 more cell - flatten. - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline without compaction + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline without compaction assertEquals(4, ((CompactingMemStore) memstore).getImmutableSegments().getNumOfCells()); assertEquals(1.0, - ((CompactingMemStore) memstore).getImmutableSegments().getEstimatedUniquesFrac(), 0); + ((CompactingMemStore) memstore).getImmutableSegments().getEstimatedUniquesFrac(), 0); assertEquals(0, memstore.getSnapshot().getCellsCount()); addRowsByKeys(memstore, keys3);// Adding 4 more cells - merge. - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline without compaction + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline without compaction assertEquals(8, ((CompactingMemStore) memstore).getImmutableSegments().getNumOfCells()); assertEquals((4.0 / 8.0), - ((CompactingMemStore) memstore).getImmutableSegments().getEstimatedUniquesFrac(), 0); + ((CompactingMemStore) memstore).getImmutableSegments().getEstimatedUniquesFrac(), 0); assertEquals(0, memstore.getSnapshot().getCellsCount()); addRowsByKeys(memstore, keys4);// 3 more cells added - compact (or not) - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact int numCells = ((CompactingMemStore) memstore).getImmutableSegments().getNumOfCells(); assertTrue(4 == numCells || 11 == numCells); assertEquals(0, memstore.getSnapshot().getCellsCount()); @@ -930,6 +926,7 @@ void disableCompaction() { void enableCompaction() { allowCompaction.set(true); } + void initiateType(MemoryCompactionPolicy compactionType, Configuration conf) throws IllegalArgumentIOException { compactor.initiateCompactionStrategy(compactionType, conf, "CF_TEST"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java index 0cb4116682b9..ec193f10f593 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ /** * compacted memstore test case */ -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) @RunWith(Parameterized.class) public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore { @@ -59,14 +59,16 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore public static Object[] data() { return new Object[] { "CHUNK_MAP", "ARRAY_MAP" }; // test different immutable indexes } + private static final Logger LOG = LoggerFactory.getLogger(TestCompactingToCellFlatMapMemStore.class); public final boolean toCellChunkMap; Configuration conf; + ////////////////////////////////////////////////////////////////////////////// // Helpers ////////////////////////////////////////////////////////////////////////////// - public TestCompactingToCellFlatMapMemStore(String type){ + public TestCompactingToCellFlatMapMemStore(String type) { if (type == "CHUNK_MAP") { toCellChunkMap = true; } else { @@ -74,22 +76,23 @@ public TestCompactingToCellFlatMapMemStore(String type){ } } - @Override public void tearDown() throws Exception { + @Override + public void tearDown() throws Exception { chunkCreator.clearChunksInPool(); } - @Override public void setUp() throws Exception { + @Override + public void setUp() throws Exception { compactingSetUp(); this.conf = HBaseConfiguration.create(); // set memstore to do data compaction conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(MemoryCompactionPolicy.EAGER)); + String.valueOf(MemoryCompactionPolicy.EAGER)); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.02); - this.memstore = - new MyCompactingMemStore(conf, CellComparatorImpl.COMPARATOR, store, - regionServicesForStores, MemoryCompactionPolicy.EAGER); + this.memstore = new MyCompactingMemStore(conf, CellComparatorImpl.COMPARATOR, store, + regionServicesForStores, MemoryCompactionPolicy.EAGER); } ////////////////////////////////////////////////////////////////////////////// @@ -98,44 +101,42 @@ public TestCompactingToCellFlatMapMemStore(String type){ @Override public void testCompaction1Bucket() throws IOException { int counter = 0; - String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4 + String[] keys1 = { "A", "A", "B", "C" }; // A1, A2, B3, C4 if (toCellChunkMap) { // set memstore to flat into CellChunkMap - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); } else { - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.ARRAY_MAP); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.ARRAY_MAP); } // test 1 bucket long totalCellsLen = addRowsByKeysDataSize(memstore, keys1); long cellBeforeFlushSize = cellBeforeFlushSize(); - long cellAfterFlushSize = cellAfterFlushSize(); + long cellAfterFlushSize = cellAfterFlushSize(); long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * cellBeforeFlushSize; assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize()); - assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); + assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); assertEquals(4, memstore.getActive().getCellsCount()); - ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact assertEquals(0, memstore.getSnapshot().getCellsCount()); // One cell is duplicated and the compaction will remove it. All cells of same size so adjusting // totalCellsLen totalCellsLen = (totalCellsLen * 3) / 4; assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize()); - totalHeapSize = - 3 * cellAfterFlushSize + MutableSegment.DEEP_OVERHEAD - + (toCellChunkMap ? - CellChunkImmutableSegment.DEEP_OVERHEAD_CCM : - CellArrayImmutableSegment.DEEP_OVERHEAD_CAM); - assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); - for ( Segment s : memstore.getSegments()) { + totalHeapSize = 3 * cellAfterFlushSize + MutableSegment.DEEP_OVERHEAD + + (toCellChunkMap ? CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + : CellArrayImmutableSegment.DEEP_OVERHEAD_CAM); + assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); + for (Segment s : memstore.getSegments()) { counter += s.getCellsCount(); } assertEquals(3, counter); MemStoreSize mss = memstore.getFlushableSize(); MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot - region.decrMemStoreSize(mss); // simulate flusher + region.decrMemStoreSize(mss); // simulate flusher ImmutableSegment s = memstore.getSnapshot(); assertEquals(3, s.getCellsCount()); assertEquals(0, regionServicesForStores.getMemStoreSize()); @@ -147,14 +148,14 @@ public void testCompaction1Bucket() throws IOException { public void testCompaction2Buckets() throws IOException { if (toCellChunkMap) { // set memstore to flat into CellChunkMap - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); } else { - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.ARRAY_MAP); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.ARRAY_MAP); } String[] keys1 = { "A", "A", "B", "C" }; String[] keys2 = { "A", "B", "D" }; - long totalCellsLen1 = addRowsByKeysDataSize(memstore, keys1); // INSERT 4 + long totalCellsLen1 = addRowsByKeysDataSize(memstore, keys1); // INSERT 4 long cellBeforeFlushSize = cellBeforeFlushSize(); long cellAfterFlushSize = cellAfterFlushSize(); long totalHeapSize1 = MutableSegment.DEEP_OVERHEAD + 4 * cellBeforeFlushSize; @@ -162,23 +163,22 @@ public void testCompaction2Buckets() throws IOException { assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact - int counter = 0; // COMPACT 4->3 - for ( Segment s : memstore.getSegments()) { + int counter = 0; // COMPACT 4->3 + for (Segment s : memstore.getSegments()) { counter += s.getCellsCount(); } - assertEquals(3,counter); + assertEquals(3, counter); assertEquals(0, memstore.getSnapshot().getCellsCount()); // One cell is duplicated and the compaction will remove it. All cells of same size so adjusting // totalCellsLen totalCellsLen1 = (totalCellsLen1 * 3) / 4; totalHeapSize1 = 3 * cellAfterFlushSize + MutableSegment.DEEP_OVERHEAD - + (toCellChunkMap ? - CellChunkImmutableSegment.DEEP_OVERHEAD_CCM : - CellArrayImmutableSegment.DEEP_OVERHEAD_CAM); + + (toCellChunkMap ? CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + : CellArrayImmutableSegment.DEEP_OVERHEAD_CAM); assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize()); assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); - long totalCellsLen2 = addRowsByKeysDataSize(memstore, keys2); // INSERT 3 (3+3=6) + long totalCellsLen2 = addRowsByKeysDataSize(memstore, keys2); // INSERT 3 (3+3=6) long totalHeapSize2 = 3 * cellBeforeFlushSize; assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemStoreSize()); assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize()); @@ -186,10 +186,10 @@ public void testCompaction2Buckets() throws IOException { ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact assertEquals(0, memstore.getSnapshot().getCellsCount());// COMPACT 6->4 counter = 0; - for ( Segment s : memstore.getSegments()) { + for (Segment s : memstore.getSegments()) { counter += s.getCellsCount(); } - assertEquals(4,counter); + assertEquals(4, counter); totalCellsLen2 = totalCellsLen2 / 3;// 2 cells duplicated in set 2 assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemStoreSize()); totalHeapSize2 = 1 * cellAfterFlushSize; @@ -210,10 +210,10 @@ public void testCompaction2Buckets() throws IOException { public void testCompaction3Buckets() throws IOException { if (toCellChunkMap) { // set memstore to flat into CellChunkMap - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); } else { // set to CellArrayMap as CCM is configured by default due to MSLAB usage - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.ARRAY_MAP); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.ARRAY_MAP); } String[] keys1 = { "A", "A", "B", "C" }; String[] keys2 = { "A", "B", "D" }; @@ -234,9 +234,8 @@ public void testCompaction3Buckets() throws IOException { // totalCellsLen totalCellsLen1 = (totalCellsLen1 * 3) / 4; totalHeapSize1 = 3 * cellAfterFlushSize + MutableSegment.DEEP_OVERHEAD - + (toCellChunkMap ? - CellChunkImmutableSegment.DEEP_OVERHEAD_CCM : - CellArrayImmutableSegment.DEEP_OVERHEAD_CAM); + + (toCellChunkMap ? CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + : CellArrayImmutableSegment.DEEP_OVERHEAD_CAM); assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize()); assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); @@ -257,9 +256,9 @@ public void testCompaction3Buckets() throws IOException { long totalCellsLen3 = addRowsByKeysDataSize(memstore, keys3); long totalHeapSize3 = 3 * cellBeforeFlushSize; assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3, - regionServicesForStores.getMemStoreSize()); + regionServicesForStores.getMemStoreSize()); assertEquals(totalHeapSize1 + totalHeapSize2 + totalHeapSize3, - ((CompactingMemStore) memstore).heapSize()); + ((CompactingMemStore) memstore).heapSize()); ((MyCompactingMemStore) memstore).enableCompaction(); mss = memstore.getFlushableSize(); @@ -273,12 +272,11 @@ public void testCompaction3Buckets() throws IOException { totalCellsLen2 = totalCellsLen2 / 3;// 2 out of 3 cells are duplicated totalCellsLen3 = 0;// All duplicated cells. assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3, - regionServicesForStores.getMemStoreSize()); + regionServicesForStores.getMemStoreSize()); // Only 4 unique cells left long totalHeapSize4 = 4 * cellAfterFlushSize + MutableSegment.DEEP_OVERHEAD - + (toCellChunkMap ? - CellChunkImmutableSegment.DEEP_OVERHEAD_CCM : - CellArrayImmutableSegment.DEEP_OVERHEAD_CAM); + + (toCellChunkMap ? CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + : CellArrayImmutableSegment.DEEP_OVERHEAD_CAM); assertEquals(totalHeapSize4, ((CompactingMemStore) memstore).heapSize()); mss = memstore.getFlushableSize(); @@ -299,16 +297,16 @@ public void testCompaction3Buckets() throws IOException { public void testMerging() throws IOException { if (toCellChunkMap) { // set memstore to flat into CellChunkMap - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); } - String[] keys1 = { "A", "A", "B", "C", "F", "H"}; - String[] keys2 = { "A", "B", "D", "G", "I", "J"}; + String[] keys1 = { "A", "A", "B", "C", "F", "H" }; + String[] keys2 = { "A", "B", "D", "G", "I", "J" }; String[] keys3 = { "D", "B", "B", "E" }; MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(compactionType)); - ((MyCompactingMemStore)memstore).initiateType(compactionType, memstore.getConfiguration()); + String.valueOf(compactionType)); + ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); addRowsByKeysDataSize(memstore, keys1); ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline should not compact @@ -321,7 +319,7 @@ public void testMerging() throws IOException { addRowsByKeysDataSize(memstore, keys2); // also should only flatten int counter2 = 0; - for ( Segment s : memstore.getSegments()) { + for (Segment s : memstore.getSegments()) { counter2 += s.getCellsCount(); } assertEquals(12, counter2); @@ -332,7 +330,7 @@ public void testMerging() throws IOException { assertEquals(0, memstore.getSnapshot().getCellsCount()); int counter3 = 0; - for ( Segment s : memstore.getSegments()) { + for (Segment s : memstore.getSegments()) { counter3 += s.getCellsCount(); } assertEquals(12, counter3); @@ -340,14 +338,13 @@ public void testMerging() throws IOException { addRowsByKeysDataSize(memstore, keys3); int counter4 = 0; - for ( Segment s : memstore.getSegments()) { + for (Segment s : memstore.getSegments()) { counter4 += s.getCellsCount(); } assertEquals(16, counter4); ((MyCompactingMemStore) memstore).enableCompaction(); - ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { Threads.sleep(10); @@ -355,10 +352,10 @@ public void testMerging() throws IOException { assertEquals(0, memstore.getSnapshot().getCellsCount()); int counter = 0; - for ( Segment s : memstore.getSegments()) { + for (Segment s : memstore.getSegments()) { counter += s.getCellsCount(); } - assertEquals(16,counter); + assertEquals(16, counter); MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot ImmutableSegment s = memstore.getSnapshot(); @@ -369,7 +366,7 @@ public void testMerging() throws IOException { public void testTimeRangeAfterCompaction() throws IOException { if (toCellChunkMap) { // set memstore to flat into CellChunkMap - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); } testTimeRange(true); } @@ -378,12 +375,12 @@ public void testTimeRangeAfterCompaction() throws IOException { public void testTimeRangeAfterMerge() throws IOException { if (toCellChunkMap) { // set memstore to flat into CellChunkMap - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); } MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(compactionType)); - ((MyCompactingMemStore)memstore).initiateType(compactionType, memstore.getConfiguration()); + String.valueOf(compactionType)); + ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); testTimeRange(false); } @@ -395,16 +392,17 @@ private void testTimeRange(boolean isCompaction) throws IOException { byte[] qf1 = Bytes.toBytes("qf1"); // first segment in pipeline - this.memstore.add(new KeyValue(row, family, qf1, ++currentTs, (byte[])null), null); + this.memstore.add(new KeyValue(row, family, qf1, ++currentTs, (byte[]) null), null); long minTs = currentTs; - this.memstore.add(new KeyValue(row, family, qf1, ++currentTs, (byte[])null), null); + this.memstore.add(new KeyValue(row, family, qf1, ++currentTs, (byte[]) null), null); long numberOfCell = 2; - assertEquals(numberOfCell, memstore.getSegments().stream().mapToInt(Segment::getCellsCount).sum()); - assertEquals(minTs, memstore.getSegments().stream().mapToLong( - m -> m.getTimeRangeTracker().getMin()).min().getAsLong()); - assertEquals(currentTs, memstore.getSegments().stream().mapToLong( - m -> m.getTimeRangeTracker().getMax()).max().getAsLong()); + assertEquals(numberOfCell, + memstore.getSegments().stream().mapToInt(Segment::getCellsCount).sum()); + assertEquals(minTs, memstore.getSegments().stream() + .mapToLong(m -> m.getTimeRangeTracker().getMin()).min().getAsLong()); + assertEquals(currentTs, memstore.getSegments().stream() + .mapToLong(m -> m.getTimeRangeTracker().getMax()).max().getAsLong()); ((CompactingMemStore) memstore).flushInMemory(); @@ -417,14 +415,15 @@ private void testTimeRange(boolean isCompaction) throws IOException { minTs = currentTs; } // second segment in pipeline - this.memstore.add(new KeyValue(row, family, qf1, ++currentTs, (byte[])null), null); - this.memstore.add(new KeyValue(row, family, qf1, ++currentTs, (byte[])null), null); + this.memstore.add(new KeyValue(row, family, qf1, ++currentTs, (byte[]) null), null); + this.memstore.add(new KeyValue(row, family, qf1, ++currentTs, (byte[]) null), null); numberOfCell += 2; - assertEquals(numberOfCell, memstore.getSegments().stream().mapToInt(Segment::getCellsCount).sum()); - assertEquals(minTs, memstore.getSegments().stream().mapToLong( - m -> m.getTimeRangeTracker().getMin()).min().getAsLong()); - assertEquals(currentTs, memstore.getSegments().stream().mapToLong( - m -> m.getTimeRangeTracker().getMax()).max().getAsLong()); + assertEquals(numberOfCell, + memstore.getSegments().stream().mapToInt(Segment::getCellsCount).sum()); + assertEquals(minTs, memstore.getSegments().stream() + .mapToLong(m -> m.getTimeRangeTracker().getMin()).min().getAsLong()); + assertEquals(currentTs, memstore.getSegments().stream() + .mapToLong(m -> m.getTimeRangeTracker().getMax()).max().getAsLong()); ((CompactingMemStore) memstore).flushInMemory(); // trigger the merge @@ -437,11 +436,12 @@ private void testTimeRange(boolean isCompaction) throws IOException { minTs = currentTs; } - assertEquals(numberOfCell, memstore.getSegments().stream().mapToInt(Segment::getCellsCount).sum()); - assertEquals(minTs, memstore.getSegments().stream().mapToLong( - m -> m.getTimeRangeTracker().getMin()).min().getAsLong()); - assertEquals(currentTs, memstore.getSegments().stream().mapToLong( - m -> m.getTimeRangeTracker().getMax()).max().getAsLong()); + assertEquals(numberOfCell, + memstore.getSegments().stream().mapToInt(Segment::getCellsCount).sum()); + assertEquals(minTs, memstore.getSegments().stream() + .mapToLong(m -> m.getTimeRangeTracker().getMin()).min().getAsLong()); + assertEquals(currentTs, memstore.getSegments().stream() + .mapToLong(m -> m.getTimeRangeTracker().getMax()).max().getAsLong()); } @Test @@ -456,14 +456,14 @@ public void testCountOfCellsAfterFlatteningByScan() throws IOException { List scanners = memstore.getScanners(Long.MAX_VALUE); // seek int count = 0; - for(int i = 0; i < scanners.size(); i++) { + for (int i = 0; i < scanners.size(); i++) { scanners.get(i).seek(KeyValue.LOWESTKEY); while (scanners.get(i).next() != null) { count++; } } assertEquals("the count should be ", 150, count); - for(int i = 0; i < scanners.size(); i++) { + for (int i = 0; i < scanners.size(); i++) { scanners.get(i).close(); } } @@ -498,8 +498,8 @@ private void addRowsByKeysWith50Cols(AbstractMemStore hmc, String[] keys) { long timestamp = EnvironmentEdgeManager.currentTime(); Threads.sleep(1); // to make sure each kv gets a different ts byte[] row = Bytes.toBytes(keys[i]); - for(int j =0 ;j < 50; j++) { - byte[] qf = Bytes.toBytes("testqualifier"+j); + for (int j = 0; j < 50; j++) { + byte[] qf = Bytes.toBytes("testqualifier" + j); byte[] val = Bytes.toBytes(keys[i] + j); KeyValue kv = new KeyValue(row, fam, qf, timestamp, val); hmc.add(kv, null); @@ -541,7 +541,7 @@ public void testPuttingBackChunksWithOpeningScanner() throws IOException { // Shouldn't putting back the chunks to pool,since some scanners are opening // based on their data // close the scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { + for (KeyValueScanner scanner : snapshot.getScanners()) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); @@ -572,7 +572,7 @@ public void testPuttingBackChunksWithOpeningScanner() throws IOException { // Since no opening scanner, the chunks of snapshot should be put back to // pool // close the scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { + for (KeyValueScanner scanner : snapshot.getScanners()) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); @@ -606,7 +606,7 @@ public void testPuttingBackChunksAfterFlushing() throws IOException { memstore.add(new KeyValue(row, fam, qf5, val), null); assertEquals(2, memstore.getActive().getCellsCount()); // close the scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { + for (KeyValueScanner scanner : snapshot.getScanners()) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); @@ -621,17 +621,16 @@ public void testFlatteningToCellChunkMap() throws IOException { // set memstore to flat into CellChunkMap MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(compactionType)); - ((MyCompactingMemStore)memstore).initiateType(compactionType, memstore.getConfiguration()); - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); + String.valueOf(compactionType)); + ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); int numOfCells = 8; - String[] keys1 = { "A", "A", "B", "C", "D", "D", "E", "F" }; //A1, A2, B3, C4, D5, D6, E7, F8 + String[] keys1 = { "A", "A", "B", "C", "D", "D", "E", "F" }; // A1, A2, B3, C4, D5, D6, E7, F8 // make one cell byte[] row = Bytes.toBytes(keys1[0]); byte[] val = Bytes.toBytes(keys1[0] + 0); - KeyValue kv = - new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), + KeyValue kv = new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val); // test 1 bucket @@ -643,7 +642,7 @@ public void testFlatteningToCellChunkMap() throws IOException { assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize()); assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and flatten + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and flatten assertEquals(0, memstore.getSnapshot().getCellsCount()); long oneCellOnCCMHeapSize = ClassSize.CELL_CHUNK_MAP_ENTRY + ClassSize.align(kv.getSerializedSize()); @@ -665,13 +664,12 @@ public void testFlatteningToCellChunkMap() throws IOException { } /** - * CellChunkMap Segment index requires all cell data to be written in the MSLAB Chunks. - * Even though MSLAB is enabled, cells bigger than maxAlloc - * (even if smaller than the size of a chunk) are not written in the MSLAB Chunks. - * If such cells are found in the process of flattening into CellChunkMap - * (in-memory-flush) they need to be copied into MSLAB. - * testFlatteningToBigCellChunkMap checks that the process of flattening into - * CellChunkMap succeeds, even when such big cells are allocated. + * CellChunkMap Segment index requires all cell data to be written in the MSLAB Chunks. Even + * though MSLAB is enabled, cells bigger than maxAlloc (even if smaller than the size of a chunk) + * are not written in the MSLAB Chunks. If such cells are found in the process of flattening into + * CellChunkMap (in-memory-flush) they need to be copied into MSLAB. + * testFlatteningToBigCellChunkMap checks that the process of flattening into CellChunkMap + * succeeds, even when such big cells are allocated. */ @Test public void testFlatteningToBigCellChunkMap() throws IOException { @@ -682,36 +680,34 @@ public void testFlatteningToBigCellChunkMap() throws IOException { // set memstore to flat into CellChunkMap MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(compactionType)); - ((MyCompactingMemStore)memstore).initiateType(compactionType, memstore.getConfiguration()); - ((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); + String.valueOf(compactionType)); + ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); + ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); int numOfCells = 4; char[] chars = new char[MemStoreLAB.MAX_ALLOC_DEFAULT]; for (int i = 0; i < chars.length; i++) { chars[i] = 'A'; } String bigVal = new String(chars); - String[] keys1 = { "A", "B", "C", "D"}; + String[] keys1 = { "A", "B", "C", "D" }; // make one cell byte[] row = Bytes.toBytes(keys1[0]); byte[] val = Bytes.toBytes(bigVal); - KeyValue kv = - new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), + KeyValue kv = new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val); // test 1 bucket int totalCellsLen = addRowsByKeys(memstore, keys1, val); long oneCellOnCSLMHeapSize = - ClassSize.align( - ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize()); + ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize()); long totalHeapSize = numOfCells * oneCellOnCSLMHeapSize + MutableSegment.DEEP_OVERHEAD; assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize()); assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); - ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and flatten + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and flatten while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { Threads.sleep(10); } @@ -719,9 +715,9 @@ public void testFlatteningToBigCellChunkMap() throws IOException { // One cell is duplicated, but it shouldn't be compacted because we are in BASIC mode. // totalCellsLen should remain the same long oneCellOnCCMHeapSize = - ClassSize.CELL_CHUNK_MAP_ENTRY + ClassSize.align(kv.getSerializedSize()); + ClassSize.CELL_CHUNK_MAP_ENTRY + ClassSize.align(kv.getSerializedSize()); totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM - + numOfCells * oneCellOnCCMHeapSize; + + numOfCells * oneCellOnCCMHeapSize; assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize()); assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); @@ -738,13 +734,12 @@ public void testFlatteningToBigCellChunkMap() throws IOException { } /** - * CellChunkMap Segment index requires all cell data to be written in the MSLAB Chunks. - * Even though MSLAB is enabled, cells bigger than the size of a chunk are not - * written in the MSLAB Chunks. - * If such cells are found in the process of flattening into CellChunkMap - * (in-memory-flush) they need to be copied into MSLAB. - * testFlatteningToJumboCellChunkMap checks that the process of flattening - * into CellChunkMap succeeds, even when such big cells are allocated. + * CellChunkMap Segment index requires all cell data to be written in the MSLAB Chunks. Even + * though MSLAB is enabled, cells bigger than the size of a chunk are not written in the MSLAB + * Chunks. If such cells are found in the process of flattening into CellChunkMap + * (in-memory-flush) they need to be copied into MSLAB. testFlatteningToJumboCellChunkMap checks + * that the process of flattening into CellChunkMap succeeds, even when such big cells are + * allocated. */ @Test public void testFlatteningToJumboCellChunkMap() throws IOException { @@ -755,7 +750,7 @@ public void testFlatteningToJumboCellChunkMap() throws IOException { // set memstore to flat into CellChunkMap MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(compactionType)); + String.valueOf(compactionType)); ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); @@ -765,21 +760,19 @@ public void testFlatteningToJumboCellChunkMap() throws IOException { chars[i] = 'A'; } String bigVal = new String(chars); - String[] keys1 = {"A"}; + String[] keys1 = { "A" }; // make one cell byte[] row = Bytes.toBytes(keys1[0]); byte[] val = Bytes.toBytes(bigVal); - KeyValue kv = - new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), + KeyValue kv = new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val); // test 1 bucket int totalCellsLen = addRowsByKeys(memstore, keys1, val); long oneCellOnCSLMHeapSize = - ClassSize.align( - ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize()); + ClassSize.align(ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + kv.heapSize()); long totalHeapSize = numOfCells * oneCellOnCSLMHeapSize + MutableSegment.DEEP_OVERHEAD; assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize()); @@ -796,10 +789,9 @@ public void testFlatteningToJumboCellChunkMap() throws IOException { long oneCellOnCCMHeapSize = (long) ClassSize.CELL_CHUNK_MAP_ENTRY + ClassSize.align(kv.getSerializedSize()); totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM - + numOfCells * oneCellOnCCMHeapSize; + + numOfCells * oneCellOnCCMHeapSize; - assertEquals(totalCellsLen, regionServicesForStores - .getMemStoreSize()); + assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize()); assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); @@ -814,7 +806,7 @@ public void testFlatteningToJumboCellChunkMap() throws IOException { memstore.clearSnapshot(snapshot.getId()); // Allocating two big cells (too big for being copied into a regular chunk). - String[] keys2 = {"C", "D"}; + String[] keys2 = { "C", "D" }; addRowsByKeys(memstore, keys2, val); while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { Threads.sleep(10); @@ -824,23 +816,22 @@ public void testFlatteningToJumboCellChunkMap() throws IOException { // but smaller than the size of two cells. // Therefore, the two created cells are flushed together as a single CSLMImmutableSegment and // flattened. - totalHeapSize = MutableSegment.DEEP_OVERHEAD - + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + totalHeapSize = MutableSegment.DEEP_OVERHEAD + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + 2 * oneCellOnCCMHeapSize; assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize()); } /** - * CellChunkMap Segment index requires all cell data to be written in the MSLAB Chunks. - * Even though MSLAB is enabled, cells bigger than the size of a chunk are not - * written in the MSLAB Chunks. - * If such cells are found in the process of a merge they need to be copied into MSLAB. - * testForceCopyOfBigCellIntoImmutableSegment checks that the - * ImmutableMemStoreLAB's forceCopyOfBigCellInto does what it's supposed to do. + * CellChunkMap Segment index requires all cell data to be written in the MSLAB Chunks. Even + * though MSLAB is enabled, cells bigger than the size of a chunk are not written in the MSLAB + * Chunks. If such cells are found in the process of a merge they need to be copied into MSLAB. + * testForceCopyOfBigCellIntoImmutableSegment checks that the ImmutableMemStoreLAB's + * forceCopyOfBigCellInto does what it's supposed to do. */ - @org.junit.Ignore @Test // Flakey. Disabled by HBASE-24128. HBASE-24129 is for reenable. + @org.junit.Ignore + @Test // Flakey. Disabled by HBASE-24128. HBASE-24129 is for reenable. // TestCompactingToCellFlatMapMemStore.testForceCopyOfBigCellIntoImmutableSegment:902 i=1 - // expected:<8389924> but was:<8389992> + // expected:<8389924> but was:<8389992> public void testForceCopyOfBigCellIntoImmutableSegment() throws IOException { if (toCellChunkMap == false) { @@ -849,12 +840,12 @@ public void testForceCopyOfBigCellIntoImmutableSegment() throws IOException { // set memstore to flat into CellChunkMap MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC; - memstore.getConfiguration().setInt(MemStoreCompactionStrategy - .COMPACTING_MEMSTORE_THRESHOLD_KEY, 4); - memstore.getConfiguration() - .setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.014); + memstore.getConfiguration().setInt(MemStoreCompactionStrategy.COMPACTING_MEMSTORE_THRESHOLD_KEY, + 4); + memstore.getConfiguration().setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, + 0.014); memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(compactionType)); + String.valueOf(compactionType)); ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); ((CompactingMemStore) memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP); @@ -867,14 +858,14 @@ public void testForceCopyOfBigCellIntoImmutableSegment() throws IOException { // We need to add two cells, three times, in order to guarantee a merge List keysList = new ArrayList<>(); - keysList.add(new String[]{"A", "B"}); - keysList.add(new String[]{"C", "D"}); - keysList.add(new String[]{"E", "F"}); - keysList.add(new String[]{"G", "H"}); + keysList.add(new String[] { "A", "B" }); + keysList.add(new String[] { "C", "D" }); + keysList.add(new String[] { "E", "F" }); + keysList.add(new String[] { "G", "H" }); // Measuring the size of a single kv KeyValue kv = new KeyValue(Bytes.toBytes("A"), Bytes.toBytes("testfamily"), - Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val); + Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val); long oneCellOnCCMHeapSize = (long) ClassSize.CELL_CHUNK_MAP_ENTRY + ClassSize.align(kv.getSerializedSize()); long oneCellOnCSLMHeapSize = @@ -886,9 +877,9 @@ public void testForceCopyOfBigCellIntoImmutableSegment() throws IOException { Threads.sleep(10); } - if(i==0) { - totalHeapSize += CellChunkImmutableSegment.DEEP_OVERHEAD_CCM - + oneCellOnCCMHeapSize + oneCellOnCSLMHeapSize; + if (i == 0) { + totalHeapSize += CellChunkImmutableSegment.DEEP_OVERHEAD_CCM + oneCellOnCCMHeapSize + + oneCellOnCSLMHeapSize; } else { // The in-memory flush size is bigger than the size of a single cell, // but smaller than the size of two cells. @@ -900,7 +891,7 @@ public void testForceCopyOfBigCellIntoImmutableSegment() throws IOException { totalHeapSize -= (4 * CellChunkImmutableSegment.DEEP_OVERHEAD_CCM); totalHeapSize = ClassSize.align(totalHeapSize); } - assertEquals("i="+i, totalHeapSize, ((CompactingMemStore) memstore).heapSize()); + assertEquals("i=" + i, totalHeapSize, ((CompactingMemStore) memstore).heapSize()); } } @@ -910,18 +901,18 @@ public void testForceCopyOfBigCellIntoImmutableSegment() throws IOException { @Test public void testBigCellSizeAfterInMemoryCompaction() throws IOException { MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC; - memstore.getConfiguration().setInt(MemStoreCompactionStrategy - .COMPACTING_MEMSTORE_THRESHOLD_KEY, 1); + memstore.getConfiguration().setInt(MemStoreCompactionStrategy.COMPACTING_MEMSTORE_THRESHOLD_KEY, + 1); memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType)); ((MyCompactingMemStore) memstore).initiateType(compactionType, memstore.getConfiguration()); byte[] val = new byte[MemStoreLAB.CHUNK_SIZE_DEFAULT]; - long size = addRowsByKeys(memstore, new String[]{"A"}, val); + long size = addRowsByKeys(memstore, new String[] { "A" }, val); ((MyCompactingMemStore) memstore).flushInMemory(); - for(KeyValueScanner scanner : memstore.getScanners(Long.MAX_VALUE)) { + for (KeyValueScanner scanner : memstore.getScanners(Long.MAX_VALUE)) { Cell cell; while ((cell = scanner.next()) != null) { assertEquals(size, cell.getSerializedSize()); @@ -929,7 +920,6 @@ public void testBigCellSizeAfterInMemoryCompaction() throws IOException { } } - private long addRowsByKeysDataSize(final AbstractMemStore hmc, String[] keys) { byte[] fam = Bytes.toBytes("testfamily"); byte[] qf = Bytes.toBytes("testqualifier"); @@ -953,25 +943,21 @@ private long cellBeforeFlushSize() { // make one cell byte[] row = Bytes.toBytes("A"); byte[] val = Bytes.toBytes("A" + 0); - KeyValue kv = - new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), + KeyValue kv = new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val); return ClassSize.align( - ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + KeyValue.FIXED_OVERHEAD + kv.getSerializedSize()); + ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + KeyValue.FIXED_OVERHEAD + kv.getSerializedSize()); } private long cellAfterFlushSize() { // make one cell byte[] row = Bytes.toBytes("A"); byte[] val = Bytes.toBytes("A" + 0); - KeyValue kv = - new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), + KeyValue kv = new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val); - return toCellChunkMap ? - ClassSize.align( - ClassSize.CELL_CHUNK_MAP_ENTRY + kv.getSerializedSize()) : - ClassSize.align( - ClassSize.CELL_ARRAY_MAP_ENTRY + KeyValue.FIXED_OVERHEAD + kv.getSerializedSize()); + return toCellChunkMap ? ClassSize.align(ClassSize.CELL_CHUNK_MAP_ENTRY + kv.getSerializedSize()) + : ClassSize.align( + ClassSize.CELL_ARRAY_MAP_ENTRY + KeyValue.FIXED_OVERHEAD + kv.getSerializedSize()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index 3c3b162097f0..050425d7f6e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -92,12 +92,12 @@ /** * Test compaction framework and common functions */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestCompaction { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCompaction.class); + HBaseClassTestRule.forClass(TestCompaction.class); @Rule public TestName name = new TestName(); @@ -106,9 +106,9 @@ public class TestCompaction { private HRegion r = null; private TableDescriptor tableDescriptor = null; - private static final byte [] COLUMN_FAMILY = fam1; - private final byte [] STARTROW = Bytes.toBytes(START_KEY); - private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY; + private static final byte[] COLUMN_FAMILY = fam1; + private final byte[] STARTROW = Bytes.toBytes(START_KEY); + private static final byte[] COLUMN_FAMILY_TEXT = COLUMN_FAMILY; private int compactionThreshold; private byte[] secondRowBytes, thirdRowBytes; private static final long MAX_FILES_TO_COMPACT = 10; @@ -122,7 +122,7 @@ public TestCompaction() { conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024); conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100); conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY, - NoLimitThroughputController.class.getName()); + NoLimitThroughputController.class.getName()); compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); secondRowBytes = START_KEY_BYTES.clone(); @@ -141,7 +141,7 @@ public void setUp() throws Exception { UTIL.getConfiguration().set(DefaultStoreEngine.DEFAULT_COMPACTOR_CLASS_KEY, DummyCompactor.class.getName()); ColumnFamilyDescriptor familyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(65536).build(); + ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(65536).build(); builder.setColumnFamily(familyDescriptor); } this.tableDescriptor = builder.build(); @@ -156,8 +156,7 @@ public void tearDown() throws Exception { } /** - * Verify that you can stop a long-running compaction - * (used during RS shutdown) + * Verify that you can stop a long-running compaction (used during RS shutdown) */ @Test public void testInterruptCompactionBySize() throws Exception { @@ -168,8 +167,8 @@ public void testInterruptCompactionBySize() throws Exception { try { // Create a couple store files w/ 15KB (over 10KB interval) - int jmax = (int) Math.ceil(15.0/compactionThreshold); - byte [] pad = new byte[1000]; // 1 KB chunk + int jmax = (int) Math.ceil(15.0 / compactionThreshold); + byte[] pad = new byte[1000]; // 1 KB chunk for (int i = 0; i < compactionThreshold; i++) { Table loader = new RegionAsTable(r); Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i))); @@ -197,12 +196,13 @@ public Object answer(InvocationOnMock invocation) throws Throwable { // ensure that the compaction stopped, all old files are intact, HStore s = r.getStore(COLUMN_FAMILY); assertEquals(compactionThreshold, s.getStorefilesCount()); - assertTrue(s.getStorefilesSize() > 15*1000); + assertTrue(s.getStorefilesSize() > 15 * 1000); // and no new store files persisted past compactStores() // only one empty dir exists in temp dir FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir()); assertEquals(1, ls.length); - Path storeTempDir = new Path(r.getRegionFileSystem().getTempDir(), Bytes.toString(COLUMN_FAMILY)); + Path storeTempDir = + new Path(r.getRegionFileSystem().getTempDir(), Bytes.toString(COLUMN_FAMILY)); assertTrue(r.getFilesystem().exists(storeTempDir)); ls = r.getFilesystem().listStatus(storeTempDir); assertEquals(0, ls.length); @@ -214,7 +214,7 @@ public Object answer(InvocationOnMock invocation) throws Throwable { // Delete all Store information once done using for (int i = 0; i < compactionThreshold; i++) { Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i))); - byte [][] famAndQf = {COLUMN_FAMILY, null}; + byte[][] famAndQf = { COLUMN_FAMILY, null }; delete.addFamily(famAndQf[0]); r.delete(delete); } @@ -244,8 +244,8 @@ public void testInterruptCompactionByTime() throws Exception { try { // Create a couple store files w/ 15KB (over 10KB interval) - int jmax = (int) Math.ceil(15.0/compactionThreshold); - byte [] pad = new byte[1000]; // 1 KB chunk + int jmax = (int) Math.ceil(15.0 / compactionThreshold); + byte[] pad = new byte[1000]; // 1 KB chunk for (int i = 0; i < compactionThreshold; i++) { Table loader = new RegionAsTable(r); Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i))); @@ -273,13 +273,13 @@ public Object answer(InvocationOnMock invocation) throws Throwable { // ensure that the compaction stopped, all old files are intact, HStore s = r.getStore(COLUMN_FAMILY); assertEquals(compactionThreshold, s.getStorefilesCount()); - assertTrue(s.getStorefilesSize() > 15*1000); + assertTrue(s.getStorefilesSize() > 15 * 1000); // and no new store files persisted past compactStores() // only one empty dir exists in temp dir FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir()); assertEquals(1, ls.length); Path storeTempDir = - new Path(r.getRegionFileSystem().getTempDir(), Bytes.toString(COLUMN_FAMILY)); + new Path(r.getRegionFileSystem().getTempDir(), Bytes.toString(COLUMN_FAMILY)); assertTrue(r.getFilesystem().exists(storeTempDir)); ls = r.getFilesystem().listStatus(storeTempDir); assertEquals(0, ls.length); @@ -291,7 +291,7 @@ public Object answer(InvocationOnMock invocation) throws Throwable { // Delete all Store information once done using for (int i = 0; i < compactionThreshold; i++) { Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i))); - byte [][] famAndQf = {COLUMN_FAMILY, null}; + byte[][] famAndQf = { COLUMN_FAMILY, null }; delete.addFamily(famAndQf[0]); r.delete(delete); } @@ -384,8 +384,7 @@ public void testTrackingCompactionRequest() throws Exception { CountDownLatch latch = new CountDownLatch(1); Tracker tracker = new Tracker(latch); - thread.requestCompaction(r, store, "test custom comapction", PRIORITY_USER, tracker, - null); + thread.requestCompaction(r, store, "test custom comapction", PRIORITY_USER, tracker, null); // wait for the latch to complete. latch.await(); @@ -408,8 +407,8 @@ public void testCompactionFailure() throws Exception { } HRegion mockRegion = Mockito.spy(r); - Mockito.when(mockRegion.checkSplit()). - thenThrow(new RuntimeException("Thrown intentionally by test!")); + Mockito.when(mockRegion.checkSplit()) + .thenThrow(new RuntimeException("Thrown intentionally by test!")); try (MetricsRegionWrapperImpl metricsWrapper = new MetricsRegionWrapperImpl(r)) { @@ -427,10 +426,12 @@ public void testCompactionFailure() throws Exception { long postCompletedCount = metricsWrapper.getNumCompactionsCompleted(); long postFailedCount = metricsWrapper.getNumCompactionsFailed(); - assertTrue("Completed count should have increased (pre=" + preCompletedCount + ", post=" + - postCompletedCount + ")", postCompletedCount > preCompletedCount); - assertTrue("Failed count should have increased (pre=" + preFailedCount + ", post=" + - postFailedCount + ")", postFailedCount > preFailedCount); + assertTrue("Completed count should have increased (pre=" + preCompletedCount + ", post=" + + postCompletedCount + ")", + postCompletedCount > preCompletedCount); + assertTrue("Failed count should have increased (pre=" + preFailedCount + ", post=" + + postFailedCount + ")", + postFailedCount > preFailedCount); } } @@ -456,29 +457,31 @@ public void testStopStartCompaction() throws IOException { assertFalse(thread.isCompactionsEnabled()); int longCompactions = thread.getLongCompactions().getActiveCount(); int shortCompactions = thread.getShortCompactions().getActiveCount(); - assertEquals("longCompactions=" + longCompactions + "," + - "shortCompactions=" + shortCompactions, 0, longCompactions + shortCompactions); + assertEquals( + "longCompactions=" + longCompactions + "," + "shortCompactions=" + shortCompactions, 0, + longCompactions + shortCompactions); thread.switchCompaction(true); assertTrue(thread.isCompactionsEnabled()); // Make sure no compactions have run. - assertEquals(0, thread.getLongCompactions().getCompletedTaskCount() + - thread.getShortCompactions().getCompletedTaskCount()); + assertEquals(0, thread.getLongCompactions().getCompletedTaskCount() + + thread.getShortCompactions().getCompletedTaskCount()); // Request a compaction and make sure it is submitted successfully. thread.requestCompaction(r, store, "test", Store.PRIORITY_USER, - CompactionLifeCycleTracker.DUMMY, null); + CompactionLifeCycleTracker.DUMMY, null); // Wait until the compaction finishes. Waiter.waitFor(UTIL.getConfiguration(), 5000, - (Waiter.Predicate) () -> thread.getLongCompactions().getCompletedTaskCount() + - thread.getShortCompactions().getCompletedTaskCount() == 1); + (Waiter.Predicate) () -> thread.getLongCompactions().getCompletedTaskCount() + + thread.getShortCompactions().getCompletedTaskCount() == 1); // Make sure there are no compactions running. - assertEquals(0, thread.getLongCompactions().getActiveCount() - + thread.getShortCompactions().getActiveCount()); + assertEquals(0, + thread.getLongCompactions().getActiveCount() + thread.getShortCompactions().getActiveCount()); } - @Test public void testInterruptingRunningCompactions() throws Exception { + @Test + public void testInterruptingRunningCompactions() throws Exception { // setup a compact/split thread on a mock server conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY, - WaitThroughPutController.class.getName()); + WaitThroughPutController.class.getName()); HRegionServer mockServer = Mockito.mock(HRegionServer.class); Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf()); CompactSplit thread = new CompactSplit(mockServer); @@ -504,18 +507,17 @@ public void testStopStartCompaction() throws IOException { int initialFiles = s.getStorefilesCount(); thread.requestCompaction(r, store, "test custom comapction", PRIORITY_USER, - CompactionLifeCycleTracker.DUMMY, null); + CompactionLifeCycleTracker.DUMMY, null); Thread.sleep(3000); thread.switchCompaction(false); assertEquals(initialFiles, s.getStorefilesCount()); - //don't mess up future tests + // don't mess up future tests thread.switchCompaction(true); } /** - * HBASE-7947: Regression test to ensure adding to the correct list in the - * {@link CompactSplit} + * HBASE-7947: Regression test to ensure adding to the correct list in the {@link CompactSplit} * @throws Exception on failure */ @Test @@ -536,8 +538,8 @@ public void testMultipleCustomCompactionRequests() throws Exception { createStoreFile(r, store.getColumnFamilyName()); createStoreFile(r, store.getColumnFamilyName()); createStoreFile(r, store.getColumnFamilyName()); - thread.requestCompaction(r, store, "test mulitple custom comapctions", PRIORITY_USER, - tracker, null); + thread.requestCompaction(r, store, "test mulitple custom comapctions", PRIORITY_USER, tracker, + null); } // wait for the latch to complete. latch.await(); @@ -599,7 +601,7 @@ public synchronized Optional selectCompaction() { @Override public synchronized void cancelCompaction(Object object) { - TestCompactionContext ctx = (TestCompactionContext)object; + TestCompactionContext ctx = (TestCompactionContext) object; compacting.removeAll(ctx.selectedFiles); notCompacting.addAll(ctx.selectedFiles); } @@ -669,7 +671,8 @@ public Optional selectCompaction() { } @Override - public void cancelCompaction(Object object) {} + public void cancelCompaction(Object object) { + } @Override public int getPriority() { @@ -709,17 +712,16 @@ public void testCompactionQueuePriorities() throws Exception { when(mockServer.getChoreService()).thenReturn(new ChoreService("test")); CompactSplit cst = new CompactSplit(mockServer); when(mockServer.getCompactSplitThread()).thenReturn(cst); - //prevent large compaction thread pool stealing job from small compaction queue. + // prevent large compaction thread pool stealing job from small compaction queue. cst.shutdownLongCompactions(); // Set up the region mock that redirects compactions. HRegion r = mock(HRegion.class); - when( - r.compact(any(), any(), any(), any())).then(new Answer() { - @Override - public Boolean answer(InvocationOnMock invocation) throws Throwable { - invocation.getArgument(0).compact(invocation.getArgument(2), null); - return true; - } + when(r.compact(any(), any(), any(), any())).then(new Answer() { + @Override + public Boolean answer(InvocationOnMock invocation) throws Throwable { + invocation. getArgument(0).compact(invocation.getArgument(2), null); + return true; + } }); // Set up store mocks for 2 "real" stores and the one we use for blocking CST. @@ -774,20 +776,16 @@ public Boolean answer(InvocationOnMock invocation) throws Throwable { } /** - * Firstly write 10 cells (with different time stamp) to a qualifier and flush - * to hfile1, then write 10 cells (with different time stamp) to the same - * qualifier and flush to hfile2. The latest cell (cell-A) in hfile1 and the - * oldest cell (cell-B) in hfile2 are with the same time stamp but different - * sequence id, and will get scanned successively during compaction. + * Firstly write 10 cells (with different time stamp) to a qualifier and flush to hfile1, then + * write 10 cells (with different time stamp) to the same qualifier and flush to hfile2. The + * latest cell (cell-A) in hfile1 and the oldest cell (cell-B) in hfile2 are with the same time + * stamp but different sequence id, and will get scanned successively during compaction. *

          - * We set compaction.kv.max to 10 so compaction will scan 10 versions each - * round, meanwhile we set keepSeqIdPeriod=0 in {@link DummyCompactor} so all - * 10 versions of hfile2 will be written out with seqId cleaned (set to 0) - * including cell-B, then when scanner goes to cell-A it will cause a scan - * out-of-order assertion error before HBASE-16931 - * - * @throws Exception - * if error occurs during the test + * We set compaction.kv.max to 10 so compaction will scan 10 versions each round, meanwhile we set + * keepSeqIdPeriod=0 in {@link DummyCompactor} so all 10 versions of hfile2 will be written out + * with seqId cleaned (set to 0) including cell-B, then when scanner goes to cell-A it will cause + * a scan out-of-order assertion error before HBASE-16931 + * @throws Exception if error occurs during the test */ @Test public void testCompactionSeqId() throws Exception { @@ -870,7 +868,7 @@ public void afterExecution(Store store) { * Simple {@link CompactionLifeCycleTracker} on which you can wait until the requested compaction * finishes. */ - public static class WaitThroughPutController extends NoLimitThroughputController{ + public static class WaitThroughPutController extends NoLimitThroughputController { public WaitThroughPutController() { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionAfterBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionAfterBulkLoad.java index 70b81a3bc7df..26bb689678eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionAfterBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionAfterBulkLoad.java @@ -18,17 +18,16 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.regionserver.HRegion.COMPACTION_AFTER_BULKLOAD_ENABLE; - import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.hamcrest.MockitoHamcrest.argThat; + import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -56,7 +55,7 @@ public class TestCompactionAfterBulkLoad extends TestBulkloadBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCompactionAfterBulkLoad.class); + HBaseClassTestRule.forClass(TestCompactionAfterBulkLoad.class); private final RegionServerServices regionServerServices = mock(RegionServerServices.class); public static AtomicInteger called = new AtomicInteger(0); @@ -131,18 +130,18 @@ public void testAvoidRepeatedlyRequestCompactAfterBulkLoad() throws IOException when(regionServerServices.getConfiguration()).thenReturn(conf); when(regionServerServices.getCompactionRequestor()).thenReturn(compactSplit); when(log.appendMarker(any(), any(), argThat(bulkLogWalEditType(WALEdit.BULK_LOAD)))) - .thenAnswer(new Answer() { - @Override - public Object answer(InvocationOnMock invocation) { - WALKeyImpl walKey = invocation.getArgument(1); - MultiVersionConcurrencyControl mvcc = walKey.getMvcc(); - if (mvcc != null) { - MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin(); - walKey.setWriteEntry(we); + .thenAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) { + WALKeyImpl walKey = invocation.getArgument(1); + MultiVersionConcurrencyControl mvcc = walKey.getMvcc(); + if (mvcc != null) { + MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin(); + walKey.setWriteEntry(we); + } + return 01L; } - return 01L; - } - }); + }); HRegion region = testRegionWithFamilies(family1, family2, family3); region.bulkLoadHFiles(familyPaths, false, null); @@ -161,8 +160,8 @@ private class TestCompactSplit extends CompactSplit { @Override protected void requestCompactionInternal(HRegion region, HStore store, String why, int priority, - boolean selectNow, CompactionLifeCycleTracker tracker, - CompactionCompleteTracker completeTracker, User user) throws IOException { + boolean selectNow, CompactionLifeCycleTracker tracker, + CompactionCompleteTracker completeTracker, User user) throws IOException { called.addAndGet(1); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java index 1837bfae4d9f..21fb5b8d3838 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ * Tests a race condition between archiving of compacted files in CompactedHFilesDischarger chore * and HRegion.close(); */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestCompactionArchiveConcurrentClose { @ClassRule @@ -172,8 +172,8 @@ private HRegion initHRegion(TableDescriptor htd, RegionInfo info) throws IOExcep HRegionFileSystem fs = new WaitingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); final Configuration walConf = new Configuration(conf); CommonFSUtils.setRootDir(walConf, tableDir); final WALFactory wals = new WALFactory(walConf, "log_" + info.getEncodedName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java index c5af9a452280..e3742dbaae73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -156,8 +156,7 @@ public void testRemoveCompactedFilesWithException() throws Exception { out.close(); HStoreFile errStoreFile = new MockHStoreFile(testUtil, errFile, 1, 0, false, 1); - fileManager.addCompactionResults( - ImmutableList.of(errStoreFile), ImmutableList.of()); + fileManager.addCompactionResults(ImmutableList.of(errStoreFile), ImmutableList.of()); // cleanup compacted files cleaner.chore(); @@ -181,8 +180,8 @@ public void testRemoveCompactedFilesWithException() throws Exception { private HRegion initHRegion(TableDescriptor htd, RegionInfo info) throws IOException { Configuration conf = testUtil.getConfiguration(); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); Path tableDir = CommonFSUtils.getTableDir(testDir, htd.getTableName()); Path regionDir = new Path(tableDir, info.getEncodedName()); Path storeDir = new Path(regionDir, htd.getColumnFamilies()[0].getNameAsString()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java index c5cc9ca20c84..14cc50528d45 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,8 +77,7 @@ public class TestCompactionFileNotFound { @BeforeClass public static void setupBeforeClass() throws Exception { Configuration conf = util.getConfiguration(); - conf.setInt("hbase.hfile.compaction.discharger.interval", - Integer.MAX_VALUE); + conf.setInt("hbase.hfile.compaction.discharger.interval", Integer.MAX_VALUE); util.startMiniCluster(3); } @@ -132,7 +131,7 @@ public void testSplitAfterRefresh() throws Exception { int numRegionsBeforeSplit = admin.getRegions(TEST_TABLE).size(); // Check if we can successfully split after compaction admin.splitRegionAsync(admin.getRegions(TEST_TABLE).get(0).getEncodedNameAsBytes(), ROW_C) - .get(); + .get(); util.waitFor(20000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java index 32c602d45525..4cc3e58f47b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -100,7 +100,7 @@ protected void tryRegionServerReport(long reportStartTime, long reportEndTime) @Parameters(name = "{index}: wal={0}") public static List params() { return Arrays.asList(new Object[] { FSHLogProvider.class }, - new Object[] { AsyncFSWALProvider.class }); + new Object[] { AsyncFSWALProvider.class }); } @Before @@ -169,8 +169,8 @@ public String explainFailure() throws Exception { }); try { region.compact(true); - fail("Should fail as our wal file has already been closed, " + - "and walDir has also been renamed"); + fail("Should fail as our wal file has already been closed, " + + "and walDir has also been renamed"); } catch (Exception e) { LOG.debug("expected exception: ", e); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java index a4a3f865b202..c1addfa5366d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -155,28 +155,18 @@ public void setUp() throws IOException { try (Table table = UTIL.getConnection().getTable(NAME)) { for (int i = 0; i < 100; i++) { byte[] row = Bytes.toBytes(i); - table.put(new Put(row) - .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(CF1) - .setQualifier(QUALIFIER) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(i)) - .build())); + table.put( + new Put(row).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row) + .setFamily(CF1).setQualifier(QUALIFIER).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(Cell.Type.Put).setValue(Bytes.toBytes(i)).build())); } UTIL.getAdmin().flush(NAME); for (int i = 100; i < 200; i++) { byte[] row = Bytes.toBytes(i); - table.put(new Put(row) - .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(CF1) - .setQualifier(QUALIFIER) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Type.Put) - .setValue(Bytes.toBytes(i)) - .build())); + table.put( + new Put(row).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row) + .setFamily(CF1).setQualifier(QUALIFIER).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(Type.Put).setValue(Bytes.toBytes(i)).build())); } UTIL.getAdmin().flush(NAME); } @@ -277,7 +267,8 @@ public void testRequestOnStore() throws IOException, InterruptedException { // This test assumes that compaction wouldn't happen with null user. // But null user means system generated compaction so compaction should happen // even if the space quota is violated. So this test should be removed/ignored. - @Ignore @Test + @Ignore + @Test public void testSpaceQuotaViolation() throws IOException, InterruptedException { region.getRegionServerServices().getRegionServerSpaceQuotaManager().enforceViolationPolicy(NAME, new SpaceQuotaSnapshot(new SpaceQuotaStatus(SpaceViolationPolicy.NO_WRITES_COMPACTIONS), 10L, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java index bbf7250dd042..1be0ef40822c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.io.IOException; @@ -54,8 +53,8 @@ public class TestCompactionPolicy { protected Configuration conf; protected HStore store; - private static final String DIR = TEST_UTIL.getDataTestDir( - TestCompactionPolicy.class.getSimpleName()).toString(); + private static final String DIR = + TEST_UTIL.getDataTestDir(TestCompactionPolicy.class.getSimpleName()).toString(); protected static Path TEST_FILE; protected static final int minFiles = 3; protected static final int maxFiles = 5; @@ -94,20 +93,20 @@ protected void initialize() throws IOException { String logName = "logs"; Path logdir = new Path(DIR, logName); ColumnFamilyDescriptor familyDescriptor = - ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("family")); + ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("family")); FileSystem fs = FileSystem.get(conf); fs.delete(logdir, true); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes("table"))) - .setColumnFamily(familyDescriptor).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes("table"))) + .setColumnFamily(familyDescriptor).build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); hlog = new FSHLog(fs, basedir, logName, conf); hlog.init(); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); region = HRegion.createHRegion(info, basedir, conf, tableDescriptor, hlog); region.close(); Path tableDir = CommonFSUtils.getTableDir(basedir, tableDescriptor.getTableName()); @@ -171,8 +170,8 @@ List sfCreate(boolean isReference, ArrayList sizes, ArrayList< throws IOException { List ret = Lists.newArrayList(); for (int i = 0; i < sizes.size(); i++) { - ret.add(new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes.get(i), ageInDisk.get(i), isReference, - i)); + ret.add( + new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes.get(i), ageInDisk.get(i), isReference, i)); } return ret; } @@ -199,8 +198,8 @@ void compactEquals(List candidates, boolean forcemajor, boolean isOf store.forceMajor = forcemajor; // Test Default compactions CompactionRequestImpl result = - ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction( - candidates, new ArrayList<>(), false, isOffPeak, forcemajor); + ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()) + .selectCompaction(candidates, new ArrayList<>(), false, isOffPeak, forcemajor); List actual = new ArrayList<>(result.getFiles()); if (isOffPeak && !forcemajor) { Assert.assertTrue(result.isOffPeak()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java index de6c54628f56..9b8c861d5304 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -46,8 +45,8 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -/** Unit tests to test retrieving table/region compaction state*/ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +/** Unit tests to test retrieving table/region compaction state */ +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestCompactionState { @ClassRule @@ -118,8 +117,8 @@ public void testMinorCompactionOnFamilyStateFromMaster() @Test public void testInvalidColumnFamily() throws IOException, InterruptedException { final TableName tableName = TableName.valueOf(name.getMethodName()); - byte [] family = Bytes.toBytes("family"); - byte [] fakecf = Bytes.toBytes("fakecf"); + byte[] family = Bytes.toBytes("family"); + byte[] fakecf = Bytes.toBytes("fakecf"); boolean caughtMinorCompact = false; boolean caughtMajorCompact = false; Table ht = null; @@ -146,9 +145,8 @@ public void testInvalidColumnFamily() throws IOException, InterruptedException { } /** - * Load data to a table, flush it to disk, trigger compaction, - * confirm the compaction state is right and wait till it is done. - * + * Load data to a table, flush it to disk, trigger compaction, confirm the compaction state is + * right and wait till it is done. * @param tableName * @param flushes * @param expectedState @@ -162,9 +160,9 @@ private void compaction(final String tableName, final int flushes, throws IOException, InterruptedException { // Create a table with regions TableName table = TableName.valueOf(tableName); - byte [] family = Bytes.toBytes("family"); - byte [][] families = - {family, Bytes.add(family, Bytes.toBytes("2")), Bytes.add(family, Bytes.toBytes("3"))}; + byte[] family = Bytes.toBytes("family"); + byte[][] families = + { family, Bytes.add(family, Bytes.toBytes("2")), Bytes.add(family, Bytes.toBytes("3")) }; Table ht = null; try { ht = TEST_UTIL.createTable(table, families); @@ -201,7 +199,7 @@ private void compaction(final String tableName, final int flushes, // Now, should have the right compaction state, // otherwise, the compaction should have already been done if (expectedState != state) { - for (Region region: regions) { + for (Region region : regions) { state = CompactionState.valueOf(region.getCompactionState().toString()); assertEquals(CompactionState.NONE, state); } @@ -240,27 +238,25 @@ private void compaction(final String tableName, final int flushes, private static CompactionState getCompactionState(StateSource stateSource, HMaster master, Admin admin, TableName table) throws IOException { - CompactionState state = stateSource == StateSource.ADMIN ? - admin.getCompactionState(table) : - master.getCompactionState(table); + CompactionState state = stateSource == StateSource.ADMIN ? admin.getCompactionState(table) + : master.getCompactionState(table); return state; } - private static int countStoreFilesInFamily( - List regions, final byte[] family) { - return countStoreFilesInFamilies(regions, new byte[][]{family}); + private static int countStoreFilesInFamily(List regions, final byte[] family) { + return countStoreFilesInFamilies(regions, new byte[][] { family }); } private static int countStoreFilesInFamilies(List regions, final byte[][] families) { int count = 0; - for (HRegion region: regions) { + for (HRegion region : regions) { count += region.getStoreFileList(families).size(); } return count; } - private static void loadData(final Table ht, final byte[][] families, - final int rows, final int flushes) throws IOException { + private static void loadData(final Table ht, final byte[][] families, final int rows, + final int flushes) throws IOException { List puts = new ArrayList<>(rows); byte[] qualifier = Bytes.toBytes("val"); Random rand = ThreadLocalRandom.current(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithByteBuff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithByteBuff.java index adc4f6dc1881..ba637ba7ad60 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithByteBuff.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithByteBuff.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -109,8 +108,7 @@ public void testCompaction() throws Exception { } } - private Table createTable(HBaseTestingUtil util, TableName tableName) - throws IOException { + private Table createTable(HBaseTestingUtil util, TableName tableName) throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithCoprocessor.java index 572a0baad121..619256a7298a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionWithCoprocessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,10 +25,10 @@ import org.junit.experimental.categories.Category; /** - * Make sure compaction tests still pass with the preFlush and preCompact - * overridden to implement the default behavior + * Make sure compaction tests still pass with the preFlush and preCompact overridden to implement + * the default behavior */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestCompactionWithCoprocessor extends TestCompaction { @ClassRule @@ -39,6 +39,6 @@ public class TestCompactionWithCoprocessor extends TestCompaction { public TestCompactionWithCoprocessor() throws Exception { super(); conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - NoOpScanPolicyObserver.class.getName()); + NoOpScanPolicyObserver.class.getName()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactorMemLeak.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactorMemLeak.java index 6a0a8baa9ded..07ef746af110 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactorMemLeak.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactorMemLeak.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -128,8 +127,8 @@ public MyCompactor(Configuration conf, HStore store) { } @Override - protected List commitWriter(FileDetails fd, - CompactionRequestImpl request) throws IOException { + protected List commitWriter(FileDetails fd, CompactionRequestImpl request) + throws IOException { HFileWriterImpl writerImpl = (HFileWriterImpl) writer.writer; Cell cell = writerImpl.getLastCell(); // The cell should be backend with an KeyOnlyKeyValue. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java index ac4dd965f4be..c9bc5b738dad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,54 +61,53 @@ import org.slf4j.LoggerFactory; /** - * Tests writing Bloom filter blocks in the same part of the file as data - * blocks. + * Tests writing Bloom filter blocks in the same part of the file as data blocks. */ -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestCompoundBloomFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCompoundBloomFilter.class); - private static final HBaseTestingUtil TEST_UTIL = - new HBaseTestingUtil(); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final Logger LOG = LoggerFactory.getLogger( - TestCompoundBloomFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(TestCompoundBloomFilter.class); private static final int NUM_TESTS = 9; - private static final BloomType BLOOM_TYPES[] = { BloomType.ROW, - BloomType.ROW, BloomType.ROWCOL, BloomType.ROWCOL, BloomType.ROW, - BloomType.ROWCOL, BloomType.ROWCOL, BloomType.ROWCOL, BloomType.ROW }; + private static final BloomType BLOOM_TYPES[] = + { BloomType.ROW, BloomType.ROW, BloomType.ROWCOL, BloomType.ROWCOL, BloomType.ROW, + BloomType.ROWCOL, BloomType.ROWCOL, BloomType.ROWCOL, BloomType.ROW }; private static final int NUM_KV[]; static { final int N = 10000; // Only used in initialization. - NUM_KV = new int[] { 21870, N, N, N, N, 1000, N, 7500, 7500}; + NUM_KV = new int[] { 21870, N, N, N, N, 1000, N, 7500, 7500 }; assert NUM_KV.length == NUM_TESTS; } private static final int BLOCK_SIZES[]; static { final int blkSize = 65536; - BLOCK_SIZES = new int[] { 512, 1000, blkSize, blkSize, blkSize, 128, 300, - blkSize, blkSize }; + BLOCK_SIZES = new int[] { 512, 1000, blkSize, blkSize, blkSize, 128, 300, blkSize, blkSize }; assert BLOCK_SIZES.length == NUM_TESTS; } /** - * Be careful not to specify too high a Bloom filter block size, otherwise - * there will only be one oversized chunk and the observed false positive - * rate will be too low. + * Be careful not to specify too high a Bloom filter block size, otherwise there will only be one + * oversized chunk and the observed false positive rate will be too low. */ - private static final int BLOOM_BLOCK_SIZES[] = { 1000, 4096, 4096, 4096, - 8192, 128, 1024, 600, 600 }; - static { assert BLOOM_BLOCK_SIZES.length == NUM_TESTS; } + private static final int BLOOM_BLOCK_SIZES[] = + { 1000, 4096, 4096, 4096, 8192, 128, 1024, 600, 600 }; + static { + assert BLOOM_BLOCK_SIZES.length == NUM_TESTS; + } - private static final double TARGET_ERROR_RATES[] = { 0.025, 0.01, 0.015, - 0.01, 0.03, 0.01, 0.01, 0.07, 0.07 }; - static { assert TARGET_ERROR_RATES.length == NUM_TESTS; } + private static final double TARGET_ERROR_RATES[] = + { 0.025, 0.01, 0.015, 0.01, 0.03, 0.01, 0.01, 0.07, 0.07 }; + static { + assert TARGET_ERROR_RATES.length == NUM_TESTS; + } /** A false positive rate that is obviously too high. */ private static final double TOO_HIGH_ERROR_RATE; @@ -156,7 +155,7 @@ public void testCompoundBloomFilter() throws IOException { conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true); for (int t = 0; t < NUM_TESTS; ++t) { conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, - (float) TARGET_ERROR_RATES[t]); + (float) TARGET_ERROR_RATES[t]); testIdMsg = "in test #" + t + ":"; Random generationRand = new Random(GENERATION_SEED); @@ -168,40 +167,36 @@ public void testCompoundBloomFilter() throws IOException { } /** - * Validates the false positive ratio by computing its z-value and comparing - * it to the provided threshold. - * + * Validates the false positive ratio by computing its z-value and comparing it to the provided + * threshold. * @param falsePosRate experimental positive rate * @param nTrials the number of Bloom filter checks - * @param zValueBoundary z-value boundary, positive for an upper bound and - * negative for a lower bound + * @param zValueBoundary z-value boundary, positive for an upper bound and negative for a lower + * bound * @param cbf the compound Bloom filter we are using - * @param additionalMsg additional message to include in log output and - * assertion failures + * @param additionalMsg additional message to include in log output and assertion failures */ - private void validateFalsePosRate(double falsePosRate, int nTrials, - double zValueBoundary, CompoundBloomFilter cbf, String additionalMsg) { + private void validateFalsePosRate(double falsePosRate, int nTrials, double zValueBoundary, + CompoundBloomFilter cbf, String additionalMsg) { double p = BloomFilterFactory.getErrorRate(conf); double zValue = (falsePosRate - p) / Math.sqrt(p * (1 - p) / nTrials); - String assortedStatsStr = " (targetErrorRate=" + p + ", falsePosRate=" - + falsePosRate + ", nTrials=" + nTrials + ")"; + String assortedStatsStr = + " (targetErrorRate=" + p + ", falsePosRate=" + falsePosRate + ", nTrials=" + nTrials + ")"; LOG.info("z-value is " + zValue + assortedStatsStr); boolean isUpperBound = zValueBoundary > 0; - if (isUpperBound && zValue > zValueBoundary || - !isUpperBound && zValue < zValueBoundary) { + if (isUpperBound && zValue > zValueBoundary || !isUpperBound && zValue < zValueBoundary) { String errorMsg = "False positive rate z-value " + zValue + " is " - + (isUpperBound ? "higher" : "lower") + " than " + zValueBoundary - + assortedStatsStr + ". Per-chunk stats:\n" - + cbf.formatTestingStats(); + + (isUpperBound ? "higher" : "lower") + " than " + zValueBoundary + assortedStatsStr + + ". Per-chunk stats:\n" + cbf.formatTestingStats(); fail(errorMsg + additionalMsg); } } - private void readStoreFile(int t, BloomType bt, List kvs, - Path sfPath) throws IOException { + private void readStoreFile(int t, BloomType bt, List kvs, Path sfPath) + throws IOException { HStoreFile sf = new HStoreFile(fs, sfPath, conf, cacheConf, bt, true); sf.initReader(); StoreFileReader r = sf.getReader(); @@ -214,9 +209,9 @@ private void readStoreFile(int t, BloomType bt, List kvs, for (KeyValue kv : kvs) { byte[] row = CellUtil.cloneRow(kv); boolean present = isInBloom(scanner, row, CellUtil.cloneQualifier(kv)); - assertTrue(testIdMsg + " Bloom filter false negative on row " - + Bytes.toStringBinary(row) + " after " + numChecked - + " successful checks", present); + assertTrue(testIdMsg + " Bloom filter false negative on row " + Bytes.toStringBinary(row) + + " after " + numChecked + " successful checks", + present); ++numChecked; } } @@ -228,8 +223,8 @@ private void readStoreFile(int t, BloomType bt, List kvs, BloomFilterUtil.setRandomGeneratorForTest(new Random(283742987L)); } try { - String fakeLookupModeStr = ", fake lookup is " + (fakeLookupEnabled ? - "enabled" : "disabled"); + String fakeLookupModeStr = + ", fake lookup is " + (fakeLookupEnabled ? "enabled" : "disabled"); CompoundBloomFilter cbf = (CompoundBloomFilter) r.getGeneralBloomFilter(); cbf.enableTestingStats(); int numFalsePos = 0; @@ -242,14 +237,13 @@ private void readStoreFile(int t, BloomType bt, List kvs, } } double falsePosRate = numFalsePos * 1.0 / nTrials; - LOG.debug(String.format(testIdMsg - + " False positives: %d out of %d (%f)", - numFalsePos, nTrials, falsePosRate) + fakeLookupModeStr); + LOG.debug(String.format(testIdMsg + " False positives: %d out of %d (%f)", numFalsePos, + nTrials, falsePosRate) + fakeLookupModeStr); // Check for obvious Bloom filter crashes. - assertTrue("False positive is too high: " + falsePosRate + " (greater " - + "than " + TOO_HIGH_ERROR_RATE + ")" + fakeLookupModeStr, - falsePosRate < TOO_HIGH_ERROR_RATE); + assertTrue("False positive is too high: " + falsePosRate + " (greater " + "than " + + TOO_HIGH_ERROR_RATE + ")" + fakeLookupModeStr, + falsePosRate < TOO_HIGH_ERROR_RATE); // Now a more precise check to see if the false positive rate is not // too high. The reason we use a relaxed restriction for the real-world @@ -257,8 +251,7 @@ private void readStoreFile(int t, BloomType bt, List kvs, // are not completely independent. double maxZValue = fakeLookupEnabled ? 1.96 : 2.5; - validateFalsePosRate(falsePosRate, nTrials, maxZValue, cbf, - fakeLookupModeStr); + validateFalsePosRate(falsePosRate, nTrials, maxZValue, cbf, fakeLookupModeStr); // For checking the lower bound we need to eliminate the last chunk, // because it is frequently smaller and the false positive rate in it @@ -269,12 +262,11 @@ private void readStoreFile(int t, BloomType bt, List kvs, numFalsePos -= cbf.getNumPositivesForTesting(nChunks - 1); nTrials -= cbf.getNumQueriesForTesting(nChunks - 1); falsePosRate = numFalsePos * 1.0 / nTrials; - LOG.info(testIdMsg + " False positive rate without last chunk is " + - falsePosRate + fakeLookupModeStr); + LOG.info(testIdMsg + " False positive rate without last chunk is " + falsePosRate + + fakeLookupModeStr); } - validateFalsePosRate(falsePosRate, nTrials, -2.58, cbf, - fakeLookupModeStr); + validateFalsePosRate(falsePosRate, nTrials, -2.58, cbf, fakeLookupModeStr); } finally { BloomFilterUtil.setRandomGeneratorForTest(null); } @@ -283,13 +275,11 @@ private void readStoreFile(int t, BloomType bt, List kvs, r.close(true); // end of test so evictOnClose } - private boolean isInBloom(StoreFileScanner scanner, byte[] row, BloomType bt, - Random rand) { + private boolean isInBloom(StoreFileScanner scanner, byte[] row, BloomType bt, Random rand) { return isInBloom(scanner, row, RandomKeyValueUtil.randomRowOrQualifier(rand)); } - private boolean isInBloom(StoreFileScanner scanner, byte[] row, - byte[] qualifier) { + private boolean isInBloom(StoreFileScanner scanner, byte[] row, byte[] qualifier) { Scan scan = new Scan().withStartRow(row).withStopRow(row, true); scan.addColumn(Bytes.toBytes(RandomKeyValueUtil.COLUMN_FAMILY_NAME), qualifier); HStore store = mock(HStore.class); @@ -298,23 +288,17 @@ private boolean isInBloom(StoreFileScanner scanner, byte[] row, return scanner.shouldUseScanner(scan, store, Long.MIN_VALUE); } - private Path writeStoreFile(int t, BloomType bt, List kvs) - throws IOException { - conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, - BLOOM_BLOCK_SIZES[t]); + private Path writeStoreFile(int t, BloomType bt, List kvs) throws IOException { + conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZES[t]); conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true); cacheConf = new CacheConfig(conf, blockCache); HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build(); StoreFileWriter w = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withOutputDir(TEST_UTIL.getDataTestDir()) - .withBloomType(bt) - .withFileContext(meta) - .build(); + .withOutputDir(TEST_UTIL.getDataTestDir()).withBloomType(bt).withFileContext(meta).build(); assertTrue(w.hasGeneralBloom()); assertTrue(w.getGeneralBloomWriter() instanceof CompoundBloomFilterWriter); - CompoundBloomFilterWriter cbbf = - (CompoundBloomFilterWriter) w.getGeneralBloomWriter(); + CompoundBloomFilterWriter cbbf = (CompoundBloomFilterWriter) w.getGeneralBloomWriter(); int keyCount = 0; KeyValue prev = null; @@ -325,11 +309,10 @@ private Path writeStoreFile(int t, BloomType bt, List kvs) // Validate the key count in the Bloom filter. boolean newKey = true; if (prev != null) { - newKey = !(bt == BloomType.ROW ? CellUtil.matchingRows(kv, - prev) : CellUtil.matchingRowColumn(kv, prev)); + newKey = !(bt == BloomType.ROW ? CellUtil.matchingRows(kv, prev) + : CellUtil.matchingRowColumn(kv, prev)); } - if (newKey) - ++keyCount; + if (newKey) ++keyCount; assertEquals(keyCount, cbbf.getKeyCount()); prev = kv; @@ -344,12 +327,10 @@ public void testCompoundBloomSizing() { int bloomBlockByteSize = 4096; int bloomBlockBitSize = bloomBlockByteSize * 8; double targetErrorRate = 0.01; - long maxKeysPerChunk = BloomFilterUtil.idealMaxKeys(bloomBlockBitSize, - targetErrorRate); + long maxKeysPerChunk = BloomFilterUtil.idealMaxKeys(bloomBlockBitSize, targetErrorRate); long bloomSize1 = bloomBlockByteSize * 8; - long bloomSize2 = BloomFilterUtil.computeBitSize(maxKeysPerChunk, - targetErrorRate); + long bloomSize2 = BloomFilterUtil.computeBitSize(maxKeysPerChunk, targetErrorRate); double bloomSizeRatio = (bloomSize2 * 1.0 / bloomSize1); assertTrue(Math.abs(bloomSizeRatio - 0.9999) < 0.0001); @@ -360,16 +341,18 @@ public void testCreateKey() { byte[] row = Bytes.toBytes("myRow"); byte[] qualifier = Bytes.toBytes("myQualifier"); // Mimic what Storefile.createBloomKeyValue() does - byte[] rowKey = KeyValueUtil.createFirstOnRow(row, 0, row.length, new byte[0], 0, 0, row, 0, 0).getKey(); - byte[] rowColKey = KeyValueUtil.createFirstOnRow(row, 0, row.length, - new byte[0], 0, 0, qualifier, 0, qualifier.length).getKey(); + byte[] rowKey = + KeyValueUtil.createFirstOnRow(row, 0, row.length, new byte[0], 0, 0, row, 0, 0).getKey(); + byte[] rowColKey = KeyValueUtil + .createFirstOnRow(row, 0, row.length, new byte[0], 0, 0, qualifier, 0, qualifier.length) + .getKey(); KeyValue rowKV = KeyValueUtil.createKeyValueFromKey(rowKey); KeyValue rowColKV = KeyValueUtil.createKeyValueFromKey(rowColKey); assertEquals(rowKV.getTimestamp(), rowColKV.getTimestamp()); - assertEquals(Bytes.toStringBinary(rowKV.getRowArray(), rowKV.getRowOffset(), - rowKV.getRowLength()), Bytes.toStringBinary(rowColKV.getRowArray(), rowColKV.getRowOffset(), - rowColKV.getRowLength())); + assertEquals( + Bytes.toStringBinary(rowKV.getRowArray(), rowKV.getRowOffset(), rowKV.getRowLength()), + Bytes.toStringBinary(rowColKV.getRowArray(), rowColKV.getRowOffset(), + rowColKV.getRowLength())); assertEquals(0, rowKV.getQualifierLength()); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java index 93424b6353af..ea9f76918eae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -41,9 +40,9 @@ import org.junit.experimental.categories.Category; /** - * Test DataBlockEncodingTool. + * Test DataBlockEncodingTool. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestDataBlockEncodingTool { @ClassRule @@ -75,13 +74,9 @@ private void testHFile(String fileName, boolean useTags, boolean allTags) throws } private void createHFileWithTags(Path path, boolean useTags, boolean allTags) throws IOException { - HFileContext meta = new HFileContextBuilder() - .withBlockSize(64 * 1024) - .withIncludesTags(useTags).build(); - sfw = - new StoreFileWriter.Builder(conf, fs) - .withFilePath(path) - .withFileContext(meta).build(); + HFileContext meta = + new HFileContextBuilder().withBlockSize(64 * 1024).withIncludesTags(useTags).build(); + sfw = new StoreFileWriter.Builder(conf, fs).withFilePath(path).withFileContext(meta).build(); long now = EnvironmentEdgeManager.currentTime(); byte[] FAMILY = Bytes.toBytes("cf"); byte[] QUALIFIER = Bytes.toBytes("q"); @@ -93,18 +88,16 @@ private void createHFileWithTags(Path path, boolean useTags, boolean allTags) th if (useTags) { if (allTags) { // Write cells with tags to HFile. - Tag[] tags = new Tag[]{ - new ArrayBackedTag((byte) 0, Bytes.toString(b)), - new ArrayBackedTag((byte) 0, Bytes.toString(b))}; + Tag[] tags = new Tag[] { new ArrayBackedTag((byte) 0, Bytes.toString(b)), + new ArrayBackedTag((byte) 0, Bytes.toString(b)) }; kv = new KeyValue(b, FAMILY, QUALIFIER, now, b, tags); } else { // Write half cells with tags and half without tags to HFile. if ((e - 'a') % 2 == 0) { kv = new KeyValue(b, FAMILY, QUALIFIER, now, b); } else { - Tag[] tags = new Tag[]{ - new ArrayBackedTag((byte) 0, Bytes.toString(b)), - new ArrayBackedTag((byte) 0, Bytes.toString(b))}; + Tag[] tags = new Tag[] { new ArrayBackedTag((byte) 0, Bytes.toString(b)), + new ArrayBackedTag((byte) 0, Bytes.toString(b)) }; kv = new KeyValue(b, FAMILY, QUALIFIER, now, b, tags); } } @@ -127,8 +120,8 @@ private static void testDataBlockingTool(Path path) throws IOException { boolean doVerify = true; boolean doBenchmark = true; String testHFilePath = path.toString(); - DataBlockEncodingTool.testCodecs(conf, maxKV, testHFilePath, - Compression.Algorithm.GZ.getName(), doBenchmark, doVerify); + DataBlockEncodingTool.testCodecs(conf, maxKV, testHFilePath, Compression.Algorithm.GZ.getName(), + doBenchmark, doVerify); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java index 37635295f255..20aade111acd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -82,8 +82,8 @@ public void NotIncomingWindow() throws IOException { long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }; long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10, 11 }; - compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), new long[] { 20, 21, 22, 23, - 24, 25 }, new long[] { Long.MIN_VALUE, 6}, false, true); + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), + new long[] { 20, 21, 22, 23, 24, 25 }, new long[] { Long.MIN_VALUE, 6 }, false, true); } /** @@ -111,7 +111,7 @@ public void NewerThanIncomingWindow() throws IOException { long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10, 11, 12, 13 }; compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), new long[] { 10, 11, 12, 13 }, - new long[] { Long.MIN_VALUE, 12}, false, true); + new long[] { Long.MIN_VALUE, 12 }, false, true); } /** @@ -125,7 +125,7 @@ public void NoT2() throws IOException { long[] sizes = new long[] { 0, 20, 21, 22, 23, 1 }; compactEquals(194, sfCreate(minTimestamps, maxTimestamps, sizes), new long[] { 22, 23 }, - new long[] { Long.MIN_VALUE, 96}, false, true); + new long[] { Long.MIN_VALUE, 96 }, false, true); } @Test @@ -190,8 +190,8 @@ public void olderThanMaxAge() throws IOException { long[] maxTimestamps = new long[] { 44, 60, 61, 96, 100, 104, 105, 106, 113, 145, 157 }; long[] sizes = new long[] { 0, 50, 51, 40, 41, 42, 33, 30, 31, 2, 1 }; - compactEquals(161, sfCreate(minTimestamps, maxTimestamps, sizes), new long[] { 40, 41, 42, 33, - 30, 31 }, new long[] { Long.MIN_VALUE, 96 }, false, true); + compactEquals(161, sfCreate(minTimestamps, maxTimestamps, sizes), + new long[] { 40, 41, 42, 33, 30, 31 }, new long[] { Long.MIN_VALUE, 96 }, false, true); } /** @@ -204,8 +204,9 @@ public void outOfOrder() throws IOException { long[] maxTimestamps = new long[] { 0, 13, 3, 10, 11, 1, 2, 12, 14, 15 }; long[] sizes = new long[] { 30, 31, 32, 33, 34, 22, 28, 23, 24, 1 }; - compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), new long[] { 31, 32, 33, 34, - 22, 28, 23, 24, 1 }, new long[] { Long.MIN_VALUE, 12 }, false, true); + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), + new long[] { 31, 32, 33, 34, 22, 28, 23, 24, 1 }, new long[] { Long.MIN_VALUE, 12 }, false, + true); } /** @@ -220,8 +221,8 @@ public void negativeEpochtime() throws IOException { long[] sizes = new long[] { 30, 31, 32, 33, 34, 22, 25, 23, 24, 1 }; compactEquals(1, sfCreate(minTimestamps, maxTimestamps, sizes), - new long[] { 31, 32, 33, 34, 22, 25, 23, 24, 1 }, - new long[] { Long.MIN_VALUE, -24 }, false, true); + new long[] { 31, 32, 33, 34, 22, 25, 23, 24, 1 }, new long[] { Long.MIN_VALUE, -24 }, false, + true); } /** @@ -266,8 +267,8 @@ public void negativeForMajor() throws IOException { long[] maxTimestamps = new long[] { -8, -7, -6, -5, -4, -3, -2, -1, 0, 6, 13 }; long[] sizes = new long[] { 0, 50, 51, 40, 41, 42, 33, 30, 31, 2, 1 }; - compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), new long[] { 0, 50, 51, 40, - 41, 42, 33, 30, 31, 2, 1 }, + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), + new long[] { 0, 50, 51, 40, 41, 42, 33, 30, 31, 2, 1 }, new long[] { Long.MIN_VALUE, -144, -120, -96, -72, -48, -24, 0, 6, 12 }, true, true); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicyHeterogeneousStorage.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicyHeterogeneousStorage.java index 74210e620fdd..2629d31e73c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicyHeterogeneousStorage.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicyHeterogeneousStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,8 +72,8 @@ protected void config() { } /** - * Test for minor compaction of incoming window. - * Incoming window start ts >= now - hot age. So it is HOT window, will use HOT_WINDOW_SP. + * Test for minor compaction of incoming window. Incoming window start ts >= now - hot age. So it + * is HOT window, will use HOT_WINDOW_SP. * @throws IOException with error */ @Test @@ -85,14 +85,13 @@ public void testIncomingWindowHot() throws IOException { // expected DateTieredCompactionRequest boundaries = { Long.MIN_VALUE, 12 } // test whether DateTieredCompactionRequest boundariesPolicies matches expected expected.put(12L, HOT_WINDOW_SP); - compactEqualsStoragePolicy(16, sfCreate(minTimestamps, maxTimestamps, sizes), - expected, false, true); + compactEqualsStoragePolicy(16, sfCreate(minTimestamps, maxTimestamps, sizes), expected, false, + true); } /** - * Test for not incoming window. - * now - hot age > window start >= now - warm age, - * so this window and is WARM window, will use WARM_WINDOW_SP + * Test for not incoming window. now - hot age > window start >= now - warm age, so this window + * and is WARM window, will use WARM_WINDOW_SP * @throws IOException with error */ @Test @@ -103,14 +102,13 @@ public void testNotIncomingWindowWarm() throws IOException { Map expected = new HashMap<>(); // expected DateTieredCompactionRequest boundaries = { Long.MIN_VALUE, 6 } expected.put(6L, WARM_WINDOW_SP); - compactEqualsStoragePolicy(16, sfCreate(minTimestamps, maxTimestamps, sizes), - expected, false, true); + compactEqualsStoragePolicy(16, sfCreate(minTimestamps, maxTimestamps, sizes), expected, false, + true); } /** - * Test for not incoming window. - * this window start ts >= ow - hot age, - * So this incoming window and is HOT window. Use HOT_WINDOW_SP + * Test for not incoming window. this window start ts >= ow - hot age, So this incoming window and + * is HOT window. Use HOT_WINDOW_SP * @throws IOException with error */ @Test @@ -121,13 +119,13 @@ public void testNotIncomingWindowAndIsHot() throws IOException { Map expected = new HashMap<>(); // expected DateTieredCompactionRequest boundaries = { Long.MIN_VALUE, 6 } expected.put(6L, HOT_WINDOW_SP); - compactEqualsStoragePolicy(12, sfCreate(minTimestamps, maxTimestamps, sizes), - expected, false, true); + compactEqualsStoragePolicy(12, sfCreate(minTimestamps, maxTimestamps, sizes), expected, false, + true); } /** - * Test for not incoming window. - * COLD window start timestamp < now - warm age, so use COLD_WINDOW_SP + * Test for not incoming window. COLD window start timestamp < now - warm age, so use + * COLD_WINDOW_SP * @throws IOException with error */ @Test @@ -138,14 +136,14 @@ public void testColdWindow() throws IOException { Map expected = new HashMap<>(); // expected DateTieredCompactionRequest boundaries = { Long.MIN_VALUE, 6 } expected.put(6L, COLD_WINDOW_SP); - compactEqualsStoragePolicy(22, sfCreate(minTimestamps, maxTimestamps, sizes), - expected, false, true); + compactEqualsStoragePolicy(22, sfCreate(minTimestamps, maxTimestamps, sizes), expected, false, + true); } /** - * Test for not incoming window. but not all hfiles will be selected to compact. - * Apply exploring logic on non-incoming window. More than one hfile left in this window. - * this means minor compact single out is true. boundaries only contains Long.MIN_VALUE + * Test for not incoming window. but not all hfiles will be selected to compact. Apply exploring + * logic on non-incoming window. More than one hfile left in this window. this means minor compact + * single out is true. boundaries only contains Long.MIN_VALUE * @throws IOException with error */ @Test @@ -156,13 +154,13 @@ public void testRatioT0() throws IOException { Map expected = new HashMap<>(); // window start = 6, expected DateTieredCompactionRequest boundaries = { Long.MIN_VALUE } expected.put(Long.MIN_VALUE, WARM_WINDOW_SP); - compactEqualsStoragePolicy(16, sfCreate(minTimestamps, maxTimestamps, sizes), - expected, false, true); + compactEqualsStoragePolicy(16, sfCreate(minTimestamps, maxTimestamps, sizes), expected, false, + true); } /** - * Test for Major compaction. It will compact all files and create multi output files - * with different window storage policy. + * Test for Major compaction. It will compact all files and create multi output files with + * different window storage policy. * @throws IOException with error */ @Test @@ -183,7 +181,7 @@ public void testMajorCompation() throws IOException { compactEquals(161, sfCreate(minTimestamps, maxTimestamps, sizes), new long[] { 0, 50, 51, 40, 41, 42, 33, 30, 31, 2, 1 }, new long[] { Long.MIN_VALUE, 24, 48, 72, 96, 120, 144, 150, 156 }, true, true); - compactEqualsStoragePolicy(161, sfCreate(minTimestamps, maxTimestamps, sizes), - expected,true, true); + compactEqualsStoragePolicy(161, sfCreate(minTimestamps, maxTimestamps, sizes), expected, true, + true); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicyOverflow.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicyOverflow.java index 29f9ea74414a..af94baa68eee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicyOverflow.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicyOverflow.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,6 +70,7 @@ public void maxValuesForMajor() throws IOException { compactEquals(Long.MAX_VALUE, sfCreate(minTimestamps, maxTimestamps, sizes), new long[] { 0, 1 }, new long[] { Long.MIN_VALUE, -4611686018427387903L, 0, - 4611686018427387903L, 9223372036854775806L }, true, true); + 4611686018427387903L, 9223372036854775806L }, + true, true); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java index dd0ed42a4a6d..542f47fdb2f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,49 +51,48 @@ public void testCompactionRatio() throws IOException { TimeOffsetEnvironmentEdge edge = new TimeOffsetEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(edge); /** - * NOTE: these tests are specific to describe the implementation of the - * current compaction algorithm. Developed to ensure that refactoring - * doesn't implicitly alter this. + * NOTE: these tests are specific to describe the implementation of the current compaction + * algorithm. Developed to ensure that refactoring doesn't implicitly alter this. */ long tooBig = maxSize + 1; // default case. preserve user ratio on size - compactEquals(sfCreate(100,50,23,12,12), 23, 12, 12); + compactEquals(sfCreate(100, 50, 23, 12, 12), 23, 12, 12); // less than compact threshold = don't compact - compactEquals(sfCreate(100,50,25,12,12) /* empty */); + compactEquals(sfCreate(100, 50, 25, 12, 12) /* empty */); // greater than compact size = skip those compactEquals(sfCreate(tooBig, tooBig, 700, 700, 700), 700, 700, 700); // big size + threshold - compactEquals(sfCreate(tooBig, tooBig, 700,700) /* empty */); + compactEquals(sfCreate(tooBig, tooBig, 700, 700) /* empty */); // small files = don't care about ratio - compactEquals(sfCreate(7,1,1), 7,1,1); + compactEquals(sfCreate(7, 1, 1), 7, 1, 1); // don't exceed max file compact threshold - // note: file selection starts with largest to smallest. + // note: file selection starts with largest to smallest. compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1); - compactEquals(sfCreate(50, 10, 10 ,10, 10), 10, 10, 10, 10); + compactEquals(sfCreate(50, 10, 10, 10, 10), 10, 10, 10, 10); compactEquals(sfCreate(10, 10, 10, 10, 50), 10, 10, 10, 10); - compactEquals(sfCreate(251, 253, 251, maxSize -1), 251, 253, 251); + compactEquals(sfCreate(251, 253, 251, maxSize - 1), 251, 253, 251); - compactEquals(sfCreate(maxSize -1,maxSize -1,maxSize -1) /* empty */); + compactEquals(sfCreate(maxSize - 1, maxSize - 1, maxSize - 1) /* empty */); // Always try and compact something to get below blocking storefile count this.conf.setLong("hbase.hstore.compaction.min.size", 1); store.storeEngine.getCompactionPolicy().setConf(conf); - compactEquals(sfCreate(512,256,128,64,32,16,8,4,2,1), 4,2,1); + compactEquals(sfCreate(512, 256, 128, 64, 32, 16, 8, 4, 2, 1), 4, 2, 1); this.conf.setLong("hbase.hstore.compaction.min.size", minSize); store.storeEngine.getCompactionPolicy().setConf(conf); /* MAJOR COMPACTION */ // if a major compaction has been forced, then compact everything - compactEquals(sfCreate(50,25,12,12), true, 50, 25, 12, 12); + compactEquals(sfCreate(50, 25, 12, 12), true, 50, 25, 12, 12); // also choose files < threshold on major compaction - compactEquals(sfCreate(12,12), true, 12, 12); + compactEquals(sfCreate(12, 12), true, 12, 12); // even if one of those files is too big - compactEquals(sfCreate(tooBig, 12,12), true, tooBig, 12, 12); + compactEquals(sfCreate(tooBig, 12, 12), true, tooBig, 12, 12); // don't exceed max file compact threshold, even with major compaction store.forceMajor = true; compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1); @@ -101,7 +100,7 @@ public void testCompactionRatio() throws IOException { // if we exceed maxCompactSize, downgrade to minor // if not, it creates a 'snowball effect' when files >> maxCompactSize: // the last file in compaction is the aggregate of all previous compactions - compactEquals(sfCreate(100,50,23,12,12), true, 23, 12, 12); + compactEquals(sfCreate(100, 50, 23, 12, 12), true, 23, 12, 12); conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1); conf.setFloat("hbase.hregion.majorcompaction.jitter", 0); store.storeEngine.getCompactionPolicy().setConf(conf); @@ -118,15 +117,15 @@ public void testCompactionRatio() throws IOException { edge.increment(2); compactEquals(candidates, 23, 12, 12); } finally { - conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24); + conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000 * 60 * 60 * 24); conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F); } /* REFERENCES == file is from a region that was split */ // treat storefiles that have references like a major compaction - compactEquals(sfCreate(true, 100,50,25,12,12), 100, 50, 25, 12, 12); + compactEquals(sfCreate(true, 100, 50, 25, 12, 12), 100, 50, 25, 12, 12); // reference files shouldn't obey max threshold - compactEquals(sfCreate(true, tooBig, 12,12), tooBig, 12, 12); + compactEquals(sfCreate(true, tooBig, 12, 12), tooBig, 12, 12); // reference files should obey max file compact to avoid OOM compactEquals(sfCreate(true, 7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3); @@ -139,9 +138,8 @@ public void testCompactionRatio() throws IOException { @Test public void testOffPeakCompactionRatio() throws IOException { /* - * NOTE: these tests are specific to describe the implementation of the - * current compaction algorithm. Developed to ensure that refactoring - * doesn't implicitly alter this. + * NOTE: these tests are specific to describe the implementation of the current compaction + * algorithm. Developed to ensure that refactoring doesn't implicitly alter this. */ // set an off-peak compaction threshold this.conf.setFloat("hbase.hstore.compaction.ratio.offpeak", 5.0F); @@ -154,24 +152,24 @@ public void testOffPeakCompactionRatio() throws IOException { @Test public void testStuckStoreCompaction() throws IOException { // Select the smallest compaction if the store is stuck. - compactEquals(sfCreate(99,99,99,99,99,99, 30,30,30,30), 30, 30, 30); + compactEquals(sfCreate(99, 99, 99, 99, 99, 99, 30, 30, 30, 30), 30, 30, 30); // If not stuck, standard policy applies. - compactEquals(sfCreate(99,99,99,99,99, 30,30,30,30), 99, 30, 30, 30, 30); + compactEquals(sfCreate(99, 99, 99, 99, 99, 30, 30, 30, 30), 99, 30, 30, 30, 30); // Add sufficiently small files to compaction, though - compactEquals(sfCreate(99,99,99,99,99,99, 30,30,30,15), 30, 30, 30, 15); + compactEquals(sfCreate(99, 99, 99, 99, 99, 99, 30, 30, 30, 15), 30, 30, 30, 15); // Prefer earlier compaction to latter if the benefit is not significant - compactEquals(sfCreate(99,99,99,99, 30,26,26,29,25,25), 30, 26, 26); + compactEquals(sfCreate(99, 99, 99, 99, 30, 26, 26, 29, 25, 25), 30, 26, 26); // Prefer later compaction if the benefit is significant. - compactEquals(sfCreate(99,99,99,99, 27,27,27,20,20,20), 20, 20, 20); + compactEquals(sfCreate(99, 99, 99, 99, 27, 27, 27, 20, 20, 20), 20, 20, 20); } @Test public void testCompactionEmptyHFile() throws IOException { // Set TTL ScanInfo oldScanInfo = store.getScanInfo(); - ScanInfo newScanInfo = oldScanInfo.customize(oldScanInfo.getMaxVersions(), 600, - oldScanInfo.getKeepDeletedCells()); + ScanInfo newScanInfo = + oldScanInfo.customize(oldScanInfo.getMaxVersions(), 600, oldScanInfo.getKeepDeletedCells()); store.setScanInfo(newScanInfo); // Do not compact empty store file List candidates = sfCreate(0); @@ -183,9 +181,9 @@ public void testCompactionEmptyHFile() throws IOException { } } // Test Default compactions - CompactionRequestImpl result = ((RatioBasedCompactionPolicy) store.storeEngine - .getCompactionPolicy()).selectCompaction(candidates, - new ArrayList<>(), false, false, false); + CompactionRequestImpl result = + ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()) + .selectCompaction(candidates, new ArrayList<>(), false, false, false); Assert.assertTrue(result.getFiles().isEmpty()); store.setScanInfo(oldScanInfo); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 4800786244c5..539fd1024e2a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -76,7 +76,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** memstore test case */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestDefaultMemStore { @ClassRule @@ -84,7 +84,8 @@ public class TestDefaultMemStore { HBaseClassTestRule.forClass(TestDefaultMemStore.class); private static final Logger LOG = LoggerFactory.getLogger(TestDefaultMemStore.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); protected AbstractMemStore memstore; protected static final int ROW_COUNT = 10; protected static final int QUALIFIER_COUNT = ROW_COUNT; @@ -101,9 +102,8 @@ private String getName() { public void setUp() throws Exception { internalSetUp(); // no pool - this.chunkCreator = - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + this.chunkCreator = ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, + null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); this.memstore = new DefaultMemStore(); } @@ -144,7 +144,7 @@ public void testPutSameCell() { if (msLab != null) { // make sure memstore size increased even when writing the same cell, if using MSLAB assertEquals(Segment.getCellLength(kv), - sizeChangeForSecondCell.getMemStoreSize().getDataSize()); + sizeChangeForSecondCell.getMemStoreSize().getDataSize()); // make sure chunk size increased even when writing the same cell, if using MSLAB if (msLab instanceof MemStoreLABImpl) { // since we add the chunkID at the 0th offset of the chunk and the @@ -170,8 +170,9 @@ public void testScanAcrossSnapshot() throws IOException { Scan scan = new Scan(); List result = new ArrayList<>(); Configuration conf = HBaseConfiguration.create(); - ScanInfo scanInfo = new ScanInfo(conf, null, 0, 1, HConstants.LATEST_TIMESTAMP, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false); + ScanInfo scanInfo = + new ScanInfo(conf, null, 0, 1, HConstants.LATEST_TIMESTAMP, KeepDeletedCells.FALSE, + HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false); int count = 0; try (StoreScanner s = new StoreScanner(scan, scanInfo, null, memstorescanners)) { while (s.next(result)) { @@ -275,8 +276,7 @@ public void testScanAcrossSnapshot2() throws IOException, CloneNotSupportedExcep verifyScanAcrossSnapshot2(kv1, kv2); } - protected void verifyScanAcrossSnapshot2(KeyValue kv1, KeyValue kv2) - throws IOException { + protected void verifyScanAcrossSnapshot2(KeyValue kv1, KeyValue kv2) throws IOException { List memstorescanners = this.memstore.getScanners(mvcc.getReadPoint()); assertEquals(2, memstorescanners.size()); final KeyValueScanner scanner0 = memstorescanners.get(0); @@ -286,9 +286,7 @@ protected void verifyScanAcrossSnapshot2(KeyValue kv1, KeyValue kv2) Cell n0 = scanner0.next(); Cell n1 = scanner1.next(); assertTrue(kv1.equals(n0) || kv1.equals(n1)); - assertTrue(kv2.equals(n0) - || kv2.equals(n1) - || kv2.equals(scanner0.next()) + assertTrue(kv2.equals(n0) || kv2.equals(n1) || kv2.equals(scanner0.next()) || kv2.equals(scanner1.next())); assertNull(scanner0.next()); assertNull(scanner1.next()); @@ -308,7 +306,7 @@ protected void verifyOneScanAcrossSnapshot2(KeyValue kv1, KeyValue kv2) throws I protected void assertScannerResults(KeyValueScanner scanner, KeyValue[] expected) throws IOException { - scanner.seek(KeyValueUtil.createFirstOnRow(new byte[]{})); + scanner.seek(KeyValueUtil.createFirstOnRow(new byte[] {})); List returned = Lists.newArrayList(); while (true) { @@ -318,9 +316,8 @@ protected void assertScannerResults(KeyValueScanner scanner, KeyValue[] expected } assertTrue( - "Got:\n" + Joiner.on("\n").join(returned) + - "\nExpected:\n" + Joiner.on("\n").join(expected), - Iterables.elementsEqual(Arrays.asList(expected), returned)); + "Got:\n" + Joiner.on("\n").join(returned) + "\nExpected:\n" + Joiner.on("\n").join(expected), + Iterables.elementsEqual(Arrays.asList(expected), returned)); assertNull(scanner.peek()); } @@ -332,20 +329,19 @@ public void testMemstoreConcurrentControl() throws IOException { final byte[] q2 = Bytes.toBytes("q2"); final byte[] v = Bytes.toBytes("value"); - MultiVersionConcurrencyControl.WriteEntry w = - mvcc.begin(); + MultiVersionConcurrencyControl.WriteEntry w = mvcc.begin(); KeyValue kv1 = new KeyValue(row, f, q1, v); kv1.setSequenceId(w.getWriteNumber()); memstore.add(kv1, null); KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); - assertScannerResults(s, new KeyValue[]{}); + assertScannerResults(s, new KeyValue[] {}); mvcc.completeAndWait(w); s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); - assertScannerResults(s, new KeyValue[]{kv1}); + assertScannerResults(s, new KeyValue[] { kv1 }); w = mvcc.begin(); KeyValue kv2 = new KeyValue(row, f, q2, v); @@ -353,19 +349,18 @@ public void testMemstoreConcurrentControl() throws IOException { memstore.add(kv2, null); s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); - assertScannerResults(s, new KeyValue[]{kv1}); + assertScannerResults(s, new KeyValue[] { kv1 }); mvcc.completeAndWait(w); s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); - assertScannerResults(s, new KeyValue[]{kv1, kv2}); + assertScannerResults(s, new KeyValue[] { kv1, kv2 }); } /** - * Regression test for HBASE-2616, HBASE-2670. - * When we insert a higher-memstoreTS version of a cell but with - * the same timestamp, we still need to provide consistent reads - * for the same scanner. + * Regression test for HBASE-2616, HBASE-2670. When we insert a higher-memstoreTS version of a + * cell but with the same timestamp, we still need to provide consistent reads for the same + * scanner. */ @Test public void testMemstoreEditsVisibilityWithSameKey() throws IOException { @@ -377,8 +372,7 @@ public void testMemstoreEditsVisibilityWithSameKey() throws IOException { final byte[] v2 = Bytes.toBytes("value2"); // INSERT 1: Write both columns val1 - MultiVersionConcurrencyControl.WriteEntry w = - mvcc.begin(); + MultiVersionConcurrencyControl.WriteEntry w = mvcc.begin(); KeyValue kv11 = new KeyValue(row, f, q1, v1); kv11.setSequenceId(w.getWriteNumber()); @@ -391,7 +385,7 @@ public void testMemstoreEditsVisibilityWithSameKey() throws IOException { // BEFORE STARTING INSERT 2, SEE FIRST KVS KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); - assertScannerResults(s, new KeyValue[]{kv11, kv12}); + assertScannerResults(s, new KeyValue[] { kv11, kv12 }); // START INSERT 2: Write both columns val2 w = mvcc.begin(); @@ -405,7 +399,7 @@ public void testMemstoreEditsVisibilityWithSameKey() throws IOException { // BEFORE COMPLETING INSERT 2, SEE FIRST KVS s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); - assertScannerResults(s, new KeyValue[]{kv11, kv12}); + assertScannerResults(s, new KeyValue[] { kv11, kv12 }); // COMPLETE INSERT 2 mvcc.completeAndWait(w); @@ -414,13 +408,12 @@ public void testMemstoreEditsVisibilityWithSameKey() throws IOException { // See HBASE-1485 for discussion about what we should do with // the duplicate-TS inserts s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); - assertScannerResults(s, new KeyValue[]{kv21, kv11, kv22, kv12}); + assertScannerResults(s, new KeyValue[] { kv21, kv11, kv22, kv12 }); } /** - * When we insert a higher-memstoreTS deletion of a cell but with - * the same timestamp, we still need to provide consistent reads - * for the same scanner. + * When we insert a higher-memstoreTS deletion of a cell but with the same timestamp, we still + * need to provide consistent reads for the same scanner. */ @Test public void testMemstoreDeletesVisibilityWithSameKey() throws IOException { @@ -430,8 +423,7 @@ public void testMemstoreDeletesVisibilityWithSameKey() throws IOException { final byte[] q2 = Bytes.toBytes("q2"); final byte[] v1 = Bytes.toBytes("value1"); // INSERT 1: Write both columns val1 - MultiVersionConcurrencyControl.WriteEntry w = - mvcc.begin(); + MultiVersionConcurrencyControl.WriteEntry w = mvcc.begin(); KeyValue kv11 = new KeyValue(row, f, q1, v1); kv11.setSequenceId(w.getWriteNumber()); @@ -444,28 +436,26 @@ public void testMemstoreDeletesVisibilityWithSameKey() throws IOException { // BEFORE STARTING INSERT 2, SEE FIRST KVS KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); - assertScannerResults(s, new KeyValue[]{kv11, kv12}); + assertScannerResults(s, new KeyValue[] { kv11, kv12 }); // START DELETE: Insert delete for one of the columns w = mvcc.begin(); - KeyValue kvDel = new KeyValue(row, f, q2, kv11.getTimestamp(), - KeyValue.Type.DeleteColumn); + KeyValue kvDel = new KeyValue(row, f, q2, kv11.getTimestamp(), KeyValue.Type.DeleteColumn); kvDel.setSequenceId(w.getWriteNumber()); memstore.add(kvDel, null); // BEFORE COMPLETING DELETE, SEE FIRST KVS s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); - assertScannerResults(s, new KeyValue[]{kv11, kv12}); + assertScannerResults(s, new KeyValue[] { kv11, kv12 }); // COMPLETE DELETE mvcc.completeAndWait(w); // NOW WE SHOULD SEE DELETE s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); - assertScannerResults(s, new KeyValue[]{kv11, kvDel, kv12}); + assertScannerResults(s, new KeyValue[] { kv11, kvDel, kv12 }); } - private static class ReadOwnWritesTester extends Thread { static final int NUM_TRIES = 1000; @@ -479,7 +469,6 @@ private static class ReadOwnWritesTester extends Thread { AtomicReference caughtException; - public ReadOwnWritesTester(int id, MemStore memstore, MultiVersionConcurrencyControl mvcc, AtomicReference caughtException) { this.mvcc = mvcc; @@ -499,8 +488,7 @@ public void run() { private void internalRun() throws IOException { for (long i = 0; i < NUM_TRIES && caughtException.get() == null; i++) { - MultiVersionConcurrencyControl.WriteEntry w = - mvcc.begin(); + MultiVersionConcurrencyControl.WriteEntry w = mvcc.begin(); // Insert the sequence value (i) byte[] v = Bytes.toBytes(i); @@ -516,8 +504,7 @@ private void internalRun() throws IOException { Cell ret = s.next(); assertNotNull("Didnt find own write at all", ret); - assertEquals("Didnt read own writes", - kv.getTimestamp(), ret.getTimestamp()); + assertEquals("Didnt read own writes", kv.getTimestamp(), ret.getTimestamp()); } } } @@ -561,12 +548,11 @@ public void testSnapshotting() throws IOException { @Test public void testMultipleVersionsSimple() throws Exception { DefaultMemStore m = new DefaultMemStore(new Configuration(), CellComparatorImpl.COMPARATOR); - byte [] row = Bytes.toBytes("testRow"); - byte [] family = Bytes.toBytes("testFamily"); - byte [] qf = Bytes.toBytes("testQualifier"); - long [] stamps = {1,2,3}; - byte [][] values = {Bytes.toBytes("value0"), Bytes.toBytes("value1"), - Bytes.toBytes("value2")}; + byte[] row = Bytes.toBytes("testRow"); + byte[] family = Bytes.toBytes("testFamily"); + byte[] qf = Bytes.toBytes("testQualifier"); + long[] stamps = { 1, 2, 3 }; + byte[][] values = { Bytes.toBytes("value0"), Bytes.toBytes("value1"), Bytes.toBytes("value2") }; KeyValue key0 = new KeyValue(row, family, qf, stamps[0], values[0]); KeyValue key1 = new KeyValue(row, family, qf, stamps[1], values[1]); KeyValue key2 = new KeyValue(row, family, qf, stamps[2], values[2]); @@ -575,15 +561,16 @@ public void testMultipleVersionsSimple() throws Exception { m.add(key1, null); m.add(key2, null); - assertTrue("Expected memstore to hold 3 values, actually has " + - m.getActive().getCellsCount(), m.getActive().getCellsCount() == 3); + assertTrue("Expected memstore to hold 3 values, actually has " + m.getActive().getCellsCount(), + m.getActive().getCellsCount() == 3); } ////////////////////////////////////////////////////////////////////////////// // Get tests ////////////////////////////////////////////////////////////////////////////// - /** Test getNextRow from memstore + /** + * Test getNextRow from memstore * @throws InterruptedException */ @Test @@ -594,18 +581,18 @@ public void testGetNextRow() throws Exception { addRows(this.memstore); Cell closestToEmpty = ((DefaultMemStore) this.memstore).getNextRow(KeyValue.LOWESTKEY); assertTrue(CellComparatorImpl.COMPARATOR.compareRows(closestToEmpty, - new KeyValue(Bytes.toBytes(0), EnvironmentEdgeManager.currentTime())) == 0); + new KeyValue(Bytes.toBytes(0), EnvironmentEdgeManager.currentTime())) == 0); for (int i = 0; i < ROW_COUNT; i++) { - Cell nr = ((DefaultMemStore) this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i), - EnvironmentEdgeManager.currentTime())); + Cell nr = ((DefaultMemStore) this.memstore) + .getNextRow(new KeyValue(Bytes.toBytes(i), EnvironmentEdgeManager.currentTime())); if (i + 1 == ROW_COUNT) { assertNull(nr); } else { assertTrue(CellComparatorImpl.COMPARATOR.compareRows(nr, - new KeyValue(Bytes.toBytes(i + 1), EnvironmentEdgeManager.currentTime())) == 0); + new KeyValue(Bytes.toBytes(i + 1), EnvironmentEdgeManager.currentTime())) == 0); } } - //starting from each row, validate results should contain the starting row + // starting from each row, validate results should contain the starting row Configuration conf = HBaseConfiguration.create(); for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) { ScanInfo scanInfo = @@ -636,26 +623,26 @@ public void testGetNextRow() throws Exception { @Test public void testGet_memstoreAndSnapShot() throws IOException { - byte [] row = Bytes.toBytes("testrow"); - byte [] fam = Bytes.toBytes("testfamily"); - byte [] qf1 = Bytes.toBytes("testqualifier1"); - byte [] qf2 = Bytes.toBytes("testqualifier2"); - byte [] qf3 = Bytes.toBytes("testqualifier3"); - byte [] qf4 = Bytes.toBytes("testqualifier4"); - byte [] qf5 = Bytes.toBytes("testqualifier5"); - byte [] val = Bytes.toBytes("testval"); - - //Setting up memstore + byte[] row = Bytes.toBytes("testrow"); + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf1 = Bytes.toBytes("testqualifier1"); + byte[] qf2 = Bytes.toBytes("testqualifier2"); + byte[] qf3 = Bytes.toBytes("testqualifier3"); + byte[] qf4 = Bytes.toBytes("testqualifier4"); + byte[] qf5 = Bytes.toBytes("testqualifier5"); + byte[] val = Bytes.toBytes("testval"); + + // Setting up memstore memstore.add(new KeyValue(row, fam, qf1, val), null); memstore.add(new KeyValue(row, fam, qf2, val), null); memstore.add(new KeyValue(row, fam, qf3, val), null); - //Creating a snapshot + // Creating a snapshot memstore.snapshot(); assertEquals(3, memstore.getSnapshot().getCellsCount()); - //Adding value to "new" memstore + // Adding value to "new" memstore assertEquals(0, memstore.getActive().getCellsCount()); - memstore.add(new KeyValue(row, fam ,qf4, val), null); - memstore.add(new KeyValue(row, fam ,qf5, val), null); + memstore.add(new KeyValue(row, fam, qf4, val), null); + memstore.add(new KeyValue(row, fam, qf5, val), null); assertEquals(2, memstore.getActive().getCellsCount()); } @@ -664,10 +651,10 @@ public void testGet_memstoreAndSnapShot() throws IOException { ////////////////////////////////////////////////////////////////////////////// @Test public void testGetWithDelete() throws IOException { - byte [] row = Bytes.toBytes("testrow"); - byte [] fam = Bytes.toBytes("testfamily"); - byte [] qf1 = Bytes.toBytes("testqualifier"); - byte [] val = Bytes.toBytes("testval"); + byte[] row = Bytes.toBytes("testrow"); + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf1 = Bytes.toBytes("testqualifier"); + byte[] val = Bytes.toBytes("testval"); long ts1 = System.nanoTime(); KeyValue put1 = new KeyValue(row, fam, qf1, ts1, val); @@ -692,17 +679,17 @@ public void testGetWithDelete() throws IOException { assertEquals(4, memstore.getActive().getCellsCount()); int i = 0; - for(Cell cell : memstore.getActive().getCellSet()) { + for (Cell cell : memstore.getActive().getCellSet()) { assertEquals(expected.get(i++), cell); } } @Test public void testGetWithDeleteColumn() throws IOException { - byte [] row = Bytes.toBytes("testrow"); - byte [] fam = Bytes.toBytes("testfamily"); - byte [] qf1 = Bytes.toBytes("testqualifier"); - byte [] val = Bytes.toBytes("testval"); + byte[] row = Bytes.toBytes("testrow"); + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf1 = Bytes.toBytes("testqualifier"); + byte[] val = Bytes.toBytes("testval"); long ts1 = System.nanoTime(); KeyValue put1 = new KeyValue(row, fam, qf1, ts1, val); @@ -716,8 +703,7 @@ public void testGetWithDeleteColumn() throws IOException { assertEquals(3, memstore.getActive().getCellsCount()); - KeyValue del2 = - new KeyValue(row, fam, qf1, ts2, KeyValue.Type.DeleteColumn, val); + KeyValue del2 = new KeyValue(row, fam, qf1, ts2, KeyValue.Type.DeleteColumn, val); memstore.add(del2, null); List expected = new ArrayList<>(); @@ -735,26 +721,25 @@ public void testGetWithDeleteColumn() throws IOException { @Test public void testGetWithDeleteFamily() throws IOException { - byte [] row = Bytes.toBytes("testrow"); - byte [] fam = Bytes.toBytes("testfamily"); - byte [] qf1 = Bytes.toBytes("testqualifier1"); - byte [] qf2 = Bytes.toBytes("testqualifier2"); - byte [] qf3 = Bytes.toBytes("testqualifier3"); - byte [] val = Bytes.toBytes("testval"); + byte[] row = Bytes.toBytes("testrow"); + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf1 = Bytes.toBytes("testqualifier1"); + byte[] qf2 = Bytes.toBytes("testqualifier2"); + byte[] qf3 = Bytes.toBytes("testqualifier3"); + byte[] val = Bytes.toBytes("testval"); long ts = System.nanoTime(); KeyValue put1 = new KeyValue(row, fam, qf1, ts, val); KeyValue put2 = new KeyValue(row, fam, qf2, ts, val); KeyValue put3 = new KeyValue(row, fam, qf3, ts, val); - KeyValue put4 = new KeyValue(row, fam, qf3, ts+1, val); + KeyValue put4 = new KeyValue(row, fam, qf3, ts + 1, val); memstore.add(put1, null); memstore.add(put2, null); memstore.add(put3, null); memstore.add(put4, null); - KeyValue del = - new KeyValue(row, fam, null, ts, KeyValue.Type.DeleteFamily, val); + KeyValue del = new KeyValue(row, fam, null, ts, KeyValue.Type.DeleteFamily, val); memstore.add(del, null); List expected = new ArrayList<>(); @@ -773,10 +758,10 @@ public void testGetWithDeleteFamily() throws IOException { @Test public void testKeepDeleteInmemstore() { - byte [] row = Bytes.toBytes("testrow"); - byte [] fam = Bytes.toBytes("testfamily"); - byte [] qf = Bytes.toBytes("testqualifier"); - byte [] val = Bytes.toBytes("testval"); + byte[] row = Bytes.toBytes("testrow"); + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf = Bytes.toBytes("testqualifier"); + byte[] val = Bytes.toBytes("testval"); long ts = System.nanoTime(); memstore.add(new KeyValue(row, fam, qf, ts, val), null); KeyValue delete = new KeyValue(row, fam, qf, ts, KeyValue.Type.Delete, val); @@ -791,8 +776,8 @@ public void testRetainsDeleteVersion() throws IOException { memstore.add(KeyValueTestUtil.create("row1", "fam", "a", 100, "dont-care"), null); // now process a specific delete: - KeyValue delete = KeyValueTestUtil.create( - "row1", "fam", "a", 100, KeyValue.Type.Delete, "dont-care"); + KeyValue delete = + KeyValueTestUtil.create("row1", "fam", "a", 100, KeyValue.Type.Delete, "dont-care"); memstore.add(delete, null); assertEquals(2, memstore.getActive().getCellsCount()); @@ -805,8 +790,8 @@ public void testRetainsDeleteColumn() throws IOException { memstore.add(KeyValueTestUtil.create("row1", "fam", "a", 100, "dont-care"), null); // now process a specific delete: - KeyValue delete = KeyValueTestUtil.create("row1", "fam", "a", 100, - KeyValue.Type.DeleteColumn, "dont-care"); + KeyValue delete = + KeyValueTestUtil.create("row1", "fam", "a", 100, KeyValue.Type.DeleteColumn, "dont-care"); memstore.add(delete, null); assertEquals(2, memstore.getActive().getCellsCount()); @@ -819,8 +804,8 @@ public void testRetainsDeleteFamily() throws IOException { memstore.add(KeyValueTestUtil.create("row1", "fam", "a", 100, "dont-care"), null); // now process a specific delete: - KeyValue delete = KeyValueTestUtil.create("row1", "fam", "a", 100, - KeyValue.Type.DeleteFamily, "dont-care"); + KeyValue delete = + KeyValueTestUtil.create("row1", "fam", "a", 100, KeyValue.Type.DeleteFamily, "dont-care"); memstore.add(delete, null); assertEquals(2, memstore.getActive().getCellsCount()); @@ -830,14 +815,13 @@ public void testRetainsDeleteFamily() throws IOException { ////////////////////////////////////////////////////////////////////////////// // Helpers ////////////////////////////////////////////////////////////////////////////// - private static byte [] makeQualifier(final int i1, final int i2){ - return Bytes.toBytes(Integer.toString(i1) + ";" + - Integer.toString(i2)); + private static byte[] makeQualifier(final int i1, final int i2) { + return Bytes.toBytes(Integer.toString(i1) + ";" + Integer.toString(i2)); } /** - * Add keyvalues with a fixed memstoreTs, and checks that memstore size is decreased - * as older keyvalues are deleted from the memstore. + * Add keyvalues with a fixed memstoreTs, and checks that memstore size is decreased as older + * keyvalues are deleted from the memstore. * @throws Exception */ @Test @@ -851,23 +835,28 @@ public void testUpsertMemstoreSize() throws Exception { KeyValue kv2 = KeyValueTestUtil.create("r", "f", "q", 101, "v"); KeyValue kv3 = KeyValueTestUtil.create("r", "f", "q", 102, "v"); - kv1.setSequenceId(1); kv2.setSequenceId(1);kv3.setSequenceId(1); - l.add(kv1); l.add(kv2); l.add(kv3); + kv1.setSequenceId(1); + kv2.setSequenceId(1); + kv3.setSequenceId(1); + l.add(kv1); + l.add(kv2); + l.add(kv3); this.memstore.upsert(l, 2, null);// readpoint is 2 MemStoreSize newSize = this.memstore.size(); assert (newSize.getDataSize() > oldSize.getDataSize()); - //The kv1 should be removed. - assert(memstore.getActive().getCellsCount() == 2); + // The kv1 should be removed. + assert (memstore.getActive().getCellsCount() == 2); KeyValue kv4 = KeyValueTestUtil.create("r", "f", "q", 104, "v"); kv4.setSequenceId(1); - l.clear(); l.add(kv4); + l.clear(); + l.add(kv4); this.memstore.upsert(l, 3, null); assertEquals(newSize, this.memstore.size()); - //The kv2 should be removed. - assert(memstore.getActive().getCellsCount() == 2); - //this.memstore = null; + // The kv2 should be removed. + assert (memstore.getActive().getCellsCount() == 2); + // this.memstore = null; } //////////////////////////////////// @@ -876,8 +865,8 @@ public void testUpsertMemstoreSize() throws Exception { //////////////////////////////////// /** - * Tests that the timeOfOldestEdit is updated correctly for the - * various edit operations in memstore. + * Tests that the timeOfOldestEdit is updated correctly for the various edit operations in + * memstore. * @throws Exception */ @Test @@ -917,10 +906,9 @@ public void testUpdateToTimeOfOldestEdit() throws Exception { } /** - * Tests the HRegion.shouldFlush method - adds an edit in the memstore - * and checks that shouldFlush returns true, and another where it disables - * the periodic flush functionality and tests whether shouldFlush returns - * false. + * Tests the HRegion.shouldFlush method - adds an edit in the memstore and checks that shouldFlush + * returns true, and another where it disables the periodic flush functionality and tests whether + * shouldFlush returns false. * @throws Exception */ @Test @@ -971,9 +959,8 @@ public void testShouldFlushMeta() throws Exception { WALFactory wFactory = new WALFactory(conf, "1234"); TableDescriptors tds = new FSTableDescriptors(conf); FSTableDescriptors.tryUpdateMetaTableDescriptor(conf); - HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir, - conf, tds.get(TableName.META_TABLE_NAME), - wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO)); + HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir, conf, + tds.get(TableName.META_TABLE_NAME), wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO)); // parameterized tests add [#] suffix get rid of [ and ]. TableDescriptor desc = TableDescriptorBuilder .newBuilder(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_"))) @@ -1000,10 +987,10 @@ private static void addRegionToMETA(final HRegion meta, final HRegion r) throws final long now = EnvironmentEdgeManager.currentTime(); final List cells = new ArrayList<>(2); cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, now, - RegionInfo.toByteArray(r.getRegionInfo()))); + RegionInfo.toByteArray(r.getRegionInfo()))); // Set into the root table the version of the meta table. cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER, now, - Bytes.toBytes(HConstants.META_VERSION))); + Bytes.toBytes(HConstants.META_VERSION))); NavigableMap> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); familyMap.put(HConstants.CATALOG_FAMILY, cells); meta.put(new Put(row, HConstants.LATEST_TIMESTAMP, familyMap)); @@ -1011,10 +998,12 @@ private static void addRegionToMETA(final HRegion meta, final HRegion r) throws private class EnvironmentEdgeForMemstoreTest implements EnvironmentEdge { long t = 1234; + @Override public long currentTime() { return t; } + public void setCurrentTimeMillis(long t) { this.t = t; } @@ -1038,11 +1027,11 @@ protected int addRows(final AbstractMemStore hmc) { */ protected int addRows(final MemStore hmc, final long ts) { for (int i = 0; i < ROW_COUNT; i++) { - long timestamp = ts == HConstants.LATEST_TIMESTAMP ? - EnvironmentEdgeManager.currentTime() : ts; + long timestamp = + ts == HConstants.LATEST_TIMESTAMP ? EnvironmentEdgeManager.currentTime() : ts; for (int ii = 0; ii < QUALIFIER_COUNT; ii++) { - byte [] row = Bytes.toBytes(i); - byte [] qf = makeQualifier(i, ii); + byte[] row = Bytes.toBytes(i); + byte[] qf = makeQualifier(i, ii); hmc.add(new KeyValue(row, FAMILY, qf, timestamp, qf), null); } } @@ -1054,23 +1043,22 @@ private long runSnapshot(final AbstractMemStore hmc) throws UnexpectedStateExcep int oldHistorySize = hmc.getSnapshot().getCellsCount(); MemStoreSnapshot snapshot = hmc.snapshot(); // Make some assertions about what just happened. - assertTrue("History size has not increased", oldHistorySize < hmc.getSnapshot().getCellsCount - ()); + assertTrue("History size has not increased", + oldHistorySize < hmc.getSnapshot().getCellsCount()); long t = memstore.timeOfOldestEdit(); assertTrue("Time of oldest edit is not Long.MAX_VALUE", t == Long.MAX_VALUE); hmc.clearSnapshot(snapshot.getId()); return t; } - private void isExpectedRowWithoutTimestamps(final int rowIndex, - List kvs) { + private void isExpectedRowWithoutTimestamps(final int rowIndex, List kvs) { int i = 0; for (Cell kv : kvs) { byte[] expectedColname = makeQualifier(rowIndex, i++); assertTrue("Column name", CellUtil.matchingQualifier(kv, expectedColname)); - // Value is column name as bytes. Usually result is + // Value is column name as bytes. Usually result is // 100 bytes in size at least. This is the default size - // for BytesWriteable. For comparison, convert bytes to + // for BytesWriteable. For comparison, convert bytes to // String and trim to remove trailing null bytes. assertTrue("Content", CellUtil.matchingValue(kv, expectedColname)); } @@ -1079,17 +1067,17 @@ private void isExpectedRowWithoutTimestamps(final int rowIndex, private static void addRows(int count, final MemStore mem) { long nanos = System.nanoTime(); - for (int i = 0 ; i < count ; i++) { + for (int i = 0; i < count; i++) { if (i % 1000 == 0) { - System.out.println(i + " Took for 1k usec: " + (System.nanoTime() - nanos)/1000); + System.out.println(i + " Took for 1k usec: " + (System.nanoTime() - nanos) / 1000); nanos = System.nanoTime(); } long timestamp = System.currentTimeMillis(); - for (int ii = 0; ii < QUALIFIER_COUNT ; ii++) { - byte [] row = Bytes.toBytes(i); - byte [] qf = makeQualifier(i, ii); + for (int ii = 0; ii < QUALIFIER_COUNT; ii++) { + byte[] row = Bytes.toBytes(i); + byte[] qf = makeQualifier(i, ii); mem.add(new KeyValue(row, FAMILY, qf, timestamp, qf), null); } } @@ -1098,28 +1086,28 @@ private static void addRows(int count, final MemStore mem) { static void doScan(MemStore ms, int iteration) throws IOException { long nanos = System.nanoTime(); KeyValueScanner s = ms.getScanners(0).get(0); - s.seek(KeyValueUtil.createFirstOnRow(new byte[]{})); + s.seek(KeyValueUtil.createFirstOnRow(new byte[] {})); - System.out.println(iteration + " create/seek took: " + (System.nanoTime() - nanos)/1000); - int cnt=0; - while(s.next() != null) ++cnt; + System.out.println(iteration + " create/seek took: " + (System.nanoTime() - nanos) / 1000); + int cnt = 0; + while (s.next() != null) + ++cnt; - System.out.println(iteration + " took usec: " + (System.nanoTime() - nanos) / 1000 + " for: " - + cnt); + System.out + .println(iteration + " took usec: " + (System.nanoTime() - nanos) / 1000 + " for: " + cnt); } - public static void main(String [] args) throws IOException { + public static void main(String[] args) throws IOException { MemStore ms = new DefaultMemStore(); long n1 = System.nanoTime(); addRows(25000, ms); - System.out.println("Took for insert: " + (System.nanoTime()-n1)/1000); + System.out.println("Took for insert: " + (System.nanoTime() - n1) / 1000); System.out.println("foo"); - for (int i = 0 ; i < 50 ; i++) + for (int i = 0; i < 50; i++) doScan(ms, i); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java index 523f27782362..47d3e20cb3b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultStoreEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestDefaultStoreEngine { @ClassRule @@ -62,16 +62,14 @@ public void testCustomParts() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.set(DefaultStoreEngine.DEFAULT_COMPACTOR_CLASS_KEY, DummyCompactor.class.getName()); conf.set(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, - DummyCompactionPolicy.class.getName()); - conf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, - DummyStoreFlusher.class.getName()); + DummyCompactionPolicy.class.getName()); + conf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, DummyStoreFlusher.class.getName()); HRegion mockRegion = Mockito.mock(HRegion.class); HStore mockStore = Mockito.mock(HStore.class); mockStore.conf = conf; Mockito.when(mockStore.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO); Mockito.when(mockStore.getHRegion()).thenReturn(mockRegion); - StoreEngine se = - StoreEngine.create(mockStore, conf, CellComparatorImpl.COMPARATOR); + StoreEngine se = StoreEngine.create(mockStore, conf, CellComparatorImpl.COMPARATOR); Assert.assertTrue(se instanceof DefaultStoreEngine); Assert.assertTrue(se.getCompactionPolicy() instanceof DummyCompactionPolicy); Assert.assertTrue(se.getStoreFlusher() instanceof DummyStoreFlusher); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java index d685ad5bb9c6..9377e5dd1360 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -75,9 +74,7 @@ public static void tearDownAfterClass() throws Exception { /** * Generate the mob value. - * - * @param size - * the size of the value + * @param size the size of the value * @return the mob value generated */ private static byte[] generateMobValue(int size) { @@ -170,7 +167,7 @@ public void testMobFamilyDelete() throws Exception { TableDescriptor tableDescriptor = createTableDescriptor(tableName, true); ColumnFamilyDescriptor familyDescriptor = tableDescriptor.getColumnFamily(FAMILY); tableDescriptor = TableDescriptorBuilder.newBuilder(tableDescriptor) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("family2"))).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("family2"))).build(); Table table = createTableWithOneFile(tableDescriptor); try { @@ -202,11 +199,10 @@ private int countMobFiles(TableName tn, String familyName) throws IOException { return 0; } - private int countArchiveMobFiles(TableName tn, String familyName) - throws IOException { + private int countArchiveMobFiles(TableName tn, String familyName) throws IOException { FileSystem fs = TEST_UTIL.getTestFileSystem(); Path storePath = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), tn, - MobUtils.getMobRegionInfo(tn).getEncodedName(), familyName); + MobUtils.getMobRegionInfo(tn).getEncodedName(), familyName); if (fs.exists(storePath)) { return fs.listStatus(storePath).length; } @@ -216,7 +212,7 @@ private int countArchiveMobFiles(TableName tn, String familyName) private boolean mobTableDirExist(TableName tn) throws IOException { FileSystem fs = TEST_UTIL.getTestFileSystem(); Path tableDir = - CommonFSUtils.getTableDir(MobUtils.getMobHome(TEST_UTIL.getConfiguration()), tn); + CommonFSUtils.getTableDir(MobUtils.getMobHome(TEST_UTIL.getConfiguration()), tn); return fs.exists(tableDir); } @@ -230,7 +226,7 @@ private boolean mobArchiveExist(TableName tn, String familyName, String fileName throws IOException { FileSystem fs = TEST_UTIL.getTestFileSystem(); Path storePath = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), tn, - MobUtils.getMobRegionInfo(tn).getEncodedName(), familyName); + MobUtils.getMobRegionInfo(tn).getEncodedName(), familyName); return fs.exists(new Path(storePath, fileName)); } @@ -242,8 +238,8 @@ private String assertHasOneMobRow(Table table, TableName tn, String familyName) Result r = rs.next(); Assert.assertNotNull(r); String fileName = MobUtils.getMobFileName(r.getColumnLatestCell(FAMILY, QF)); - Path filePath = new Path( - MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, familyName), fileName); + Path filePath = + new Path(MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, familyName), fileName); FileSystem fs = TEST_UTIL.getTestFileSystem(); Assert.assertTrue(fs.exists(filePath)); r = rs.next(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDirectStoreSplitsMerges.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDirectStoreSplitsMerges.java index 0eba8aa541ce..d1289de2bb3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDirectStoreSplitsMerges.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDirectStoreSplitsMerges.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -47,8 +46,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; - -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestDirectStoreSplitsMerges { @ClassRule @@ -72,27 +70,25 @@ public static void after() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test public void testSplitStoreDir() throws Exception { TableName table = TableName.valueOf(name.getMethodName()); TEST_UTIL.createTable(table, FAMILY_NAME); - //first put some data in order to have a store file created + // first put some data in order to have a store file created putThreeRowsAndFlush(table); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0); HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem(); - RegionInfo daughterA = - RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo().getStartKey()). - setEndKey(Bytes.toBytes("002")).setSplit(false) - .setRegionId(region.getRegionInfo().getRegionId() + - EnvironmentEdgeManager.currentTime()).build(); + RegionInfo daughterA = RegionInfoBuilder.newBuilder(table) + .setStartKey(region.getRegionInfo().getStartKey()).setEndKey(Bytes.toBytes("002")) + .setSplit(false) + .setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) + .build(); HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; - Path result = regionFS - .splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, - Bytes.toBytes("002"), false, region.getSplitPolicy()); - //asserts the reference file naming is correct + Path result = regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, + Bytes.toBytes("002"), false, region.getSplitPolicy()); + // asserts the reference file naming is correct validateResultingFile(region.getRegionInfo().getEncodedName(), result); - //Additionally check if split region dir was created directly under table dir, not on .tmp + // Additionally check if split region dir was created directly under table dir, not on .tmp Path resultGreatGrandParent = result.getParent().getParent().getParent(); assertEquals(regionFS.getTableDir().getName(), resultGreatGrandParent.getName()); } @@ -101,10 +97,10 @@ public void testSplitStoreDir() throws Exception { public void testMergeStoreFile() throws Exception { TableName table = TableName.valueOf(name.getMethodName()); TEST_UTIL.createTable(table, FAMILY_NAME); - //splitting the table first + // splitting the table first TEST_UTIL.getAdmin().split(table, Bytes.toBytes("002")); waitForSplitProcComplete(1000, 10); - //Add data and flush to create files in the two different regions + // Add data and flush to create files in the two different regions putThreeRowsAndFlush(table); List regions = TEST_UTIL.getHBaseCluster().getRegions(table); HRegion first = regions.get(0); @@ -112,19 +108,19 @@ public void testMergeStoreFile() throws Exception { HRegionFileSystem regionFS = first.getRegionFileSystem(); RegionInfo mergeResult = - RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey()) - .setEndKey(second.getRegionInfo().getEndKey()).setSplit(false) - .setRegionId(first.getRegionInfo().getRegionId() + - EnvironmentEdgeManager.currentTime()).build(); + RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey()) + .setEndKey(second.getRegionInfo().getEndKey()).setSplit(false) + .setRegionId(first.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) + .build(); - HRegionFileSystem mergeRegionFs = HRegionFileSystem - .createRegionOnFileSystem(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), - regionFS.getFileSystem(), regionFS.getTableDir(), mergeResult); + HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem( + TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), regionFS.getFileSystem(), + regionFS.getTableDir(), mergeResult); - //merge file from first region + // merge file from first region HStoreFile file = (HStoreFile) first.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; mergeFileFromRegion(mergeRegionFs, first, file); - //merge file from second region + // merge file from second region file = (HStoreFile) second.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; mergeFileFromRegion(mergeRegionFs, second, file); } @@ -135,14 +131,14 @@ public void testCommitDaughterRegionNoFiles() throws Exception { TEST_UTIL.createTable(table, FAMILY_NAME); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0); HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem(); - RegionInfo daughterA = - RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo(). - getStartKey()).setEndKey(Bytes.toBytes("002")).setSplit(false). - setRegionId(region.getRegionInfo().getRegionId() + - EnvironmentEdgeManager.currentTime()).build(); + RegionInfo daughterA = RegionInfoBuilder.newBuilder(table) + .setStartKey(region.getRegionInfo().getStartKey()).setEndKey(Bytes.toBytes("002")) + .setSplit(false) + .setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) + .build(); Path splitDir = regionFS.getSplitsDir(daughterA); - MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster(). - getMasterProcedureExecutor().getEnvironment(); + MasterProcedureEnv env = + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); Path result = regionFS.commitDaughterRegion(daughterA, new ArrayList<>(), env); assertEquals(splitDir, result); } @@ -151,31 +147,29 @@ public void testCommitDaughterRegionNoFiles() throws Exception { public void testCommitDaughterRegionWithFiles() throws Exception { TableName table = TableName.valueOf(name.getMethodName()); TEST_UTIL.createTable(table, FAMILY_NAME); - //first put some data in order to have a store file created + // first put some data in order to have a store file created putThreeRowsAndFlush(table); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0); HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem(); - RegionInfo daughterA = - RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo().getStartKey()). - setEndKey(Bytes.toBytes("002")).setSplit(false). - setRegionId(region.getRegionInfo().getRegionId() + - EnvironmentEdgeManager.currentTime()).build(); + RegionInfo daughterA = RegionInfoBuilder.newBuilder(table) + .setStartKey(region.getRegionInfo().getStartKey()).setEndKey(Bytes.toBytes("002")) + .setSplit(false) + .setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) + .build(); RegionInfo daughterB = RegionInfoBuilder.newBuilder(table).setStartKey(Bytes.toBytes("002")) - .setEndKey(region.getRegionInfo().getEndKey()).setSplit(false) - .setRegionId(region.getRegionInfo().getRegionId()).build(); + .setEndKey(region.getRegionInfo().getEndKey()).setSplit(false) + .setRegionId(region.getRegionInfo().getRegionId()).build(); Path splitDirA = regionFS.getSplitsDir(daughterA); Path splitDirB = regionFS.getSplitsDir(daughterB); HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; List filesA = new ArrayList<>(); - filesA.add(regionFS - .splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, - Bytes.toBytes("002"), false, region.getSplitPolicy())); + filesA.add(regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, + Bytes.toBytes("002"), false, region.getSplitPolicy())); List filesB = new ArrayList<>(); - filesB.add(regionFS - .splitStoreFile(daughterB, Bytes.toString(FAMILY_NAME), file, - Bytes.toBytes("002"), true, region.getSplitPolicy())); - MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster(). - getMasterProcedureExecutor().getEnvironment(); + filesB.add(regionFS.splitStoreFile(daughterB, Bytes.toString(FAMILY_NAME), file, + Bytes.toBytes("002"), true, region.getSplitPolicy())); + MasterProcedureEnv env = + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); Path resultA = regionFS.commitDaughterRegion(daughterA, filesA, env); Path resultB = regionFS.commitDaughterRegion(daughterB, filesB, env); assertEquals(splitDirA, resultA); @@ -186,10 +180,10 @@ public void testCommitDaughterRegionWithFiles() throws Exception { public void testCommitMergedRegion() throws Exception { TableName table = TableName.valueOf(name.getMethodName()); TEST_UTIL.createTable(table, FAMILY_NAME); - //splitting the table first + // splitting the table first TEST_UTIL.getAdmin().split(table, Bytes.toBytes("002")); waitForSplitProcComplete(1000, 10); - //Add data and flush to create files in the two different regions + // Add data and flush to create files in the two different regions putThreeRowsAndFlush(table); List regions = TEST_UTIL.getHBaseCluster().getRegions(table); HRegion first = regions.get(0); @@ -197,32 +191,32 @@ public void testCommitMergedRegion() throws Exception { HRegionFileSystem regionFS = first.getRegionFileSystem(); RegionInfo mergeResult = - RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey()) - .setEndKey(second.getRegionInfo().getEndKey()).setSplit(false) - .setRegionId(first.getRegionInfo().getRegionId() + - EnvironmentEdgeManager.currentTime()).build(); + RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey()) + .setEndKey(second.getRegionInfo().getEndKey()).setSplit(false) + .setRegionId(first.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) + .build(); - HRegionFileSystem mergeRegionFs = HRegionFileSystem - .createRegionOnFileSystem(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), - regionFS.getFileSystem(), regionFS.getTableDir(), mergeResult); + HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem( + TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), regionFS.getFileSystem(), + regionFS.getTableDir(), mergeResult); - //merge file from first region + // merge file from first region HStoreFile file = (HStoreFile) first.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; mergeFileFromRegion(mergeRegionFs, first, file); - //merge file from second region + // merge file from second region file = (HStoreFile) second.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; List mergedFiles = new ArrayList<>(); mergedFiles.add(mergeFileFromRegion(mergeRegionFs, second, file)); - MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster(). - getMasterProcedureExecutor().getEnvironment(); + MasterProcedureEnv env = + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); mergeRegionFs.commitMergedRegion(mergedFiles, env); } private void waitForSplitProcComplete(int attempts, int waitTime) throws Exception { List> procedures = TEST_UTIL.getHBaseCluster().getMaster().getProcedures(); - if(procedures.size()>0) { - Procedure splitProc = procedures.stream(). - filter(p -> p instanceof SplitTableRegionProcedure).findFirst().get(); + if (procedures.size() > 0) { + Procedure splitProc = + procedures.stream().filter(p -> p instanceof SplitTableRegionProcedure).findFirst().get(); int count = 0; while ((splitProc.isWaiting() || splitProc.isRunnable()) && count < attempts) { synchronized (splitProc) { @@ -236,15 +230,15 @@ private void waitForSplitProcComplete(int attempts, int waitTime) throws Excepti private Path mergeFileFromRegion(HRegionFileSystem regionFS, HRegion regionToMerge, HStoreFile file) throws IOException { - Path mergedFile = regionFS.mergeStoreFile(regionToMerge.getRegionInfo(), - Bytes.toString(FAMILY_NAME), file); + Path mergedFile = + regionFS.mergeStoreFile(regionToMerge.getRegionInfo(), Bytes.toString(FAMILY_NAME), file); validateResultingFile(regionToMerge.getRegionInfo().getEncodedName(), mergedFile); return mergedFile; } - private void validateResultingFile(String originalRegion, Path result){ + private void validateResultingFile(String originalRegion, Path result) { assertEquals(originalRegion, result.getName().split("\\.")[1]); - //asserts we are under the cf directory + // asserts we are under the cf directory Path resultParent = result.getParent(); assertEquals(Bytes.toString(FAMILY_NAME), resultParent.getName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionDisabled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionDisabled.java index 87ceca7518c5..4e42bdfabf90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionDisabled.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionDisabled.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.ExpectedException; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestEncryptionDisabled { @ClassRule @@ -53,7 +53,6 @@ public class TestEncryptionDisabled { private static Configuration conf = TEST_UTIL.getConfiguration(); private static TableDescriptorBuilder tdb; - @BeforeClass public static void setUp() throws Exception { conf.setInt("hfile.format.version", 3); @@ -75,10 +74,10 @@ public static void tearDown() throws Exception { public void testEncryptedTableShouldNotBeCreatedWhenEncryptionDisabled() throws Exception { // Create the table schema // Specify an encryption algorithm without a key (normally HBase would generate a random key) - tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf("default", - "TestEncryptionDisabledFail")); + tdb = TableDescriptorBuilder + .newBuilder(TableName.valueOf("default", "TestEncryptionDisabledFail")); ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); columnFamilyDescriptorBuilder.setEncryptionType(algorithm); tdb.setColumnFamily(columnFamilyDescriptorBuilder.build()); @@ -92,10 +91,10 @@ public void testEncryptedTableShouldNotBeCreatedWhenEncryptionDisabled() throws @Test public void testNonEncryptedTableShouldBeCreatedWhenEncryptionDisabled() throws Exception { // Create the table schema - tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf("default", - "TestEncryptionDisabledSuccess")); + tdb = TableDescriptorBuilder + .newBuilder(TableName.valueOf("default", "TestEncryptionDisabledSuccess")); ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); tdb.setColumnFamily(columnFamilyDescriptorBuilder.build()); // Create the test table, this should succeed, as we don't use encryption diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java index fcda02eb0ae0..e6615f1bb326 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; - import javax.crypto.spec.SecretKeySpec; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -59,7 +58,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestEncryptionKeyRotation { @ClassRule @@ -78,8 +77,7 @@ public class TestEncryptionKeyRotation { // Create the test encryption keys byte[] keyBytes = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(keyBytes); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); initialCFKey = new SecretKeySpec(keyBytes, algorithm); Bytes.secureRandom(keyBytes); secondCFKey = new SecretKeySpec(keyBytes, algorithm); @@ -104,14 +102,13 @@ public static void tearDown() throws Exception { public void testCFKeyRotation() throws Exception { // Create the table schema TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf("default", name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf("default", name.getMethodName())); ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); columnFamilyDescriptorBuilder.setEncryptionType(algorithm); - columnFamilyDescriptorBuilder.setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", - initialCFKey)); + columnFamilyDescriptorBuilder + .setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey)); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptorBuilder.build()); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); @@ -121,7 +118,7 @@ public void testCFKeyRotation() throws Exception { // Verify we have store file(s) with the initial key final List initialPaths = findStorefilePaths(tableDescriptor.getTableName()); assertTrue(initialPaths.size() > 0); - for (Path path: initialPaths) { + for (Path path : initialPaths) { assertTrue("Store file " + path + " has incorrect key", Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path))); } @@ -140,19 +137,19 @@ public void testCFKeyRotation() throws Exception { TEST_UTIL.waitFor(30000, new Waiter.Predicate() { @Override public boolean evaluate() throws IOException { - return TEST_UTIL.getAdmin().getCompactionState(tableDescriptor - .getTableName()) == CompactionState.NONE; + return TEST_UTIL.getAdmin() + .getCompactionState(tableDescriptor.getTableName()) == CompactionState.NONE; } }); List pathsAfterCompaction = findStorefilePaths(tableDescriptor.getTableName()); assertTrue(pathsAfterCompaction.size() > 0); - for (Path path: pathsAfterCompaction) { + for (Path path : pathsAfterCompaction) { assertTrue("Store file " + path + " has incorrect key", Bytes.equals(secondCFKey.getEncoded(), extractHFileKey(path))); } List compactedPaths = findCompactedStorefilePaths(tableDescriptor.getTableName()); assertTrue(compactedPaths.size() > 0); - for (Path path: compactedPaths) { + for (Path path : compactedPaths) { assertTrue("Store file " + path + " retains initial key", Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path))); } @@ -162,14 +159,13 @@ public boolean evaluate() throws IOException { public void testMasterKeyRotation() throws Exception { // Create the table schema TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf("default", name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf("default", name.getMethodName())); ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); columnFamilyDescriptorBuilder.setEncryptionType(algorithm); - columnFamilyDescriptorBuilder.setEncryptionKey( - EncryptionUtil.wrapKey(conf, "hbase", initialCFKey)); + columnFamilyDescriptorBuilder + .setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey)); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptorBuilder.build()); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); @@ -179,7 +175,7 @@ public void testMasterKeyRotation() throws Exception { // Verify we have store file(s) with the initial key List storeFilePaths = findStorefilePaths(tableDescriptor.getTableName()); assertTrue(storeFilePaths.size() > 0); - for (Path path: storeFilePaths) { + for (Path path : storeFilePaths) { assertTrue("Store file " + path + " has incorrect key", Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path))); } @@ -198,7 +194,7 @@ public void testMasterKeyRotation() throws Exception { // Double check that the store file keys can be unwrapped storeFilePaths = findStorefilePaths(tableDescriptor.getTableName()); assertTrue(storeFilePaths.size() > 0); - for (Path path: storeFilePaths) { + for (Path path : storeFilePaths) { assertTrue("Store file " + path + " has incorrect key", Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path))); } @@ -206,8 +202,7 @@ public void testMasterKeyRotation() throws Exception { private static List findStorefilePaths(TableName tableName) throws Exception { List paths = new ArrayList<>(); - for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName) - .getRegions(tableName)) { + for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegions(tableName)) { for (HStore store : ((HRegion) region).getStores()) { for (HStoreFile storefile : store.getStorefiles()) { paths.add(storefile.getPath()); @@ -219,8 +214,7 @@ private static List findStorefilePaths(TableName tableName) throws Excepti private static List findCompactedStorefilePaths(TableName tableName) throws Exception { List paths = new ArrayList<>(); - for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName) - .getRegions(tableName)) { + for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegions(tableName)) { for (HStore store : ((HRegion) region).getStores()) { Collection compactedfiles = store.getStoreEngine().getStoreFileManager().getCompactedfiles(); @@ -242,8 +236,8 @@ private void createTableAndFlush(TableDescriptor tableDescriptor) throws Excepti // Create a store file Table table = TEST_UTIL.getConnection().getTable(tableDescriptor.getTableName()); try { - table.put(new Put(Bytes.toBytes("testrow")) - .addColumn(cfd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value"))); + table.put(new Put(Bytes.toBytes("testrow")).addColumn(cfd.getName(), Bytes.toBytes("q"), + Bytes.toBytes("value"))); } finally { table.close(); } @@ -251,8 +245,8 @@ private void createTableAndFlush(TableDescriptor tableDescriptor) throws Excepti } private static byte[] extractHFileKey(Path path) throws Exception { - HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path, - new CacheConfig(conf), true, conf); + HFile.Reader reader = + HFile.createReader(TEST_UTIL.getTestFileSystem(), path, new CacheConfig(conf), true, conf); try { Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext(); assertNotNull("Reader has a null crypto context", cryptoContext); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java index 94901bca9bd4..b9d2c947cd98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestEncryptionRandomKeying { @ClassRule @@ -59,8 +59,8 @@ public class TestEncryptionRandomKeying { private static List findStorefilePaths(TableName tableName) throws Exception { List paths = new ArrayList<>(); - for (Region region: - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegions(tdb.build().getTableName())) { + for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName) + .getRegions(tdb.build().getTableName())) { for (HStore store : ((HRegion) region).getStores()) { for (HStoreFile storefile : store.getStorefiles()) { paths.add(storefile.getPath()); @@ -71,8 +71,8 @@ private static List findStorefilePaths(TableName tableName) throws Excepti } private static byte[] extractHFileKey(Path path) throws Exception { - HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path, - new CacheConfig(conf), true, conf); + HFile.Reader reader = + HFile.createReader(TEST_UTIL.getTestFileSystem(), path, new CacheConfig(conf), true, conf); try { Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext(); assertNotNull("Reader has a null crypto context", cryptoContext); @@ -94,10 +94,10 @@ public static void setUp() throws Exception { // Create the table schema // Specify an encryption algorithm without a key - tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf("default", - "TestEncryptionRandomKeying")); + tdb = TableDescriptorBuilder + .newBuilder(TableName.valueOf("default", "TestEncryptionRandomKeying")); ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); columnFamilyDescriptorBuilder.setEncryptionType(algorithm); tdb.setColumnFamily(columnFamilyDescriptorBuilder.build()); @@ -112,8 +112,8 @@ public static void setUp() throws Exception { // Create a store file Table table = TEST_UTIL.getConnection().getTable(tdb.build().getTableName()); try { - table.put(new Put(Bytes.toBytes("testrow")) - .addColumn(columnFamilyDescriptorBuilder.build().getName(), + table.put( + new Put(Bytes.toBytes("testrow")).addColumn(columnFamilyDescriptorBuilder.build().getName(), Bytes.toBytes("q"), Bytes.toBytes("value"))); } finally { table.close(); @@ -131,7 +131,7 @@ public void testRandomKeying() throws Exception { // Verify we have store file(s) with a random key final List initialPaths = findStorefilePaths(tdb.build().getTableName()); assertTrue(initialPaths.size() > 0); - for (Path path: initialPaths) { + for (Path path : initialPaths) { assertNotNull("Store file " + path + " is not encrypted", extractHFileKey(path)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index e5722d53bcb1..3f982fecce76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -82,7 +82,7 @@ public class TestEndToEndSplitTransaction { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestEndToEndSplitTransaction.class); + HBaseClassTestRule.forClass(TestEndToEndSplitTransaction.class); private static final Logger LOG = LoggerFactory.getLogger(TestEndToEndSplitTransaction.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -112,13 +112,13 @@ public void testCanSplitJustAfterASplit() throws Exception { byte[] fam = Bytes.toBytes("cf_split"); CompactSplit compactSplit = - TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getCompactSplitThread(); + TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getCompactSplitThread(); TableName tableName = TableName.valueOf("CanSplitTable"); Table source = TEST_UTIL.getConnection().getTable(tableName); Admin admin = TEST_UTIL.getAdmin(); // set a large min compaction file count to avoid compaction just after splitting. TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build(); Map scanner = Maps.newHashMap(); try { admin.createTable(htd); @@ -129,15 +129,16 @@ public void testCanSplitJustAfterASplit() throws Exception { List regions = TEST_UTIL.getHBaseCluster().getRegions(tableName); regions.stream() - .forEach(r -> r.getStores().get(0).getStorefiles().stream() - .filter(s -> s.isReference() && !scanner.containsKey(r.getRegionInfo().getEncodedName())) - .forEach(sf -> { - StoreFileReader reader = ((HStoreFile) sf).getReader(); - reader.getStoreFileScanner(true, false, false, 0, 0, false); - scanner.put(r.getRegionInfo().getEncodedName(), reader); - LOG.info("Got reference to file = " + sf.getPath() + ",for region = " + - r.getRegionInfo().getEncodedName()); - })); + .forEach(r -> r.getStores().get(0).getStorefiles().stream() + .filter( + s -> s.isReference() && !scanner.containsKey(r.getRegionInfo().getEncodedName())) + .forEach(sf -> { + StoreFileReader reader = ((HStoreFile) sf).getReader(); + reader.getStoreFileScanner(true, false, false, 0, 0, false); + scanner.put(r.getRegionInfo().getEncodedName(), reader); + LOG.info("Got reference to file = " + sf.getPath() + ",for region = " + + r.getRegionInfo().getEncodedName()); + })); assertTrue("Regions did not split properly", regions.size() > 1); assertTrue("Could not get reference any of the store file", scanner.size() > 1); compactSplit.setCompactionsEnabled(true); @@ -146,9 +147,9 @@ public void testCanSplitJustAfterASplit() throws Exception { } regions.stream() - .filter(region -> scanner.containsKey(region.getRegionInfo().getEncodedName())) - .forEach(r -> assertFalse("Contains an open file reference which can be split", - r.getStores().get(0).canSplit())); + .filter(region -> scanner.containsKey(region.getRegionInfo().getEncodedName())) + .forEach(r -> assertFalse("Contains an open file reference which can be split", + r.getStores().get(0).canSplit())); } finally { scanner.values().forEach(s -> { try { @@ -299,10 +300,10 @@ static class RegionChecker extends ScheduledChore { void verifyRegionsUsingMetaTableAccessor() throws Exception { List regionList = MetaTableAccessor.getTableRegions(connection, tableName, true); verifyTableRegions(regionList.stream() - .collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR)))); + .collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR)))); regionList = MetaTableAccessor.getAllRegions(connection, true); verifyTableRegions(regionList.stream() - .collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR)))); + .collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR)))); } /** verify region boundaries obtained from HTable.getStartEndKeys() */ @@ -354,8 +355,8 @@ void verifyStartEndKeys(Pair keys) { // ensure that we do not have any gaps for (int i = 0; i < startKeys.length; i++) { assertArrayEquals( - "Hole in hbase:meta is detected. prevEndKey=" + Bytes.toStringBinary(prevEndKey) + - " ,regionStartKey=" + Bytes.toStringBinary(startKeys[i]), + "Hole in hbase:meta is detected. prevEndKey=" + Bytes.toStringBinary(prevEndKey) + + " ,regionStartKey=" + Bytes.toStringBinary(startKeys[i]), prevEndKey, startKeys[i]); prevEndKey = endKeys[i]; } @@ -397,9 +398,10 @@ public static void compactAndBlockUntilDone(Admin admin, HRegionServer rs, byte[ // Wait till its online before we do compact else it comes back with NoServerForRegionException try { TEST_UTIL.waitFor(10000, new Waiter.Predicate() { - @Override public boolean evaluate() throws Exception { - return rs.getServerName().equals(MetaTableAccessor. - getRegionLocation(admin.getConnection(), regionName).getServerName()); + @Override + public boolean evaluate() throws Exception { + return rs.getServerName().equals( + MetaTableAccessor.getRegionLocation(admin.getConnection(), regionName).getServerName()); } }); } catch (Exception e) { @@ -448,9 +450,9 @@ public static void blockUntilRegionSplit(Configuration conf, long timeout, Threads.sleep(100); } if (daughterA == null || daughterB == null) { - throw new IOException("Failed to get daughters, daughterA=" + daughterA + ", daughterB=" + - daughterB + ", timeout=" + timeout + ", result=" + result + ", regionName=" + - Bytes.toString(regionName) + ", region=" + region); + throw new IOException("Failed to get daughters, daughterA=" + daughterA + ", daughterB=" + + daughterB + ", timeout=" + timeout + ", result=" + result + ", regionName=" + + Bytes.toString(regionName) + ", region=" + region); } // if we are here, this means the region split is complete or timed out diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index 707462f05f2a..a8e2dbdf39fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,10 +61,10 @@ import org.slf4j.LoggerFactory; /** - * Test cases that ensure that file system level errors are bubbled up - * appropriately to clients, rather than swallowed. + * Test cases that ensure that file system level errors are bubbled up appropriately to clients, + * rather than swallowed. */ -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestFSErrorsExposed { @ClassRule @@ -79,26 +79,21 @@ public class TestFSErrorsExposed { public TestName name = new TestName(); /** - * Injects errors into the pread calls of an on-disk file, and makes - * sure those bubble up to the HFile scanner + * Injects errors into the pread calls of an on-disk file, and makes sure those bubble up to the + * HFile scanner */ @Test public void testHFileScannerThrowsErrors() throws IOException { - Path hfilePath = new Path(new Path( - util.getDataTestDir("internalScannerExposesErrors"), - "regionname"), "familyname"); - HFileSystem hfs = (HFileSystem)util.getTestFileSystem(); + Path hfilePath = new Path( + new Path(util.getDataTestDir("internalScannerExposesErrors"), "regionname"), "familyname"); + HFileSystem hfs = (HFileSystem) util.getTestFileSystem(); FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs()); FileSystem fs = new HFileSystem(faultyfs); CacheConfig cacheConf = new CacheConfig(util.getConfiguration()); HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build(); - StoreFileWriter writer = new StoreFileWriter.Builder( - util.getConfiguration(), cacheConf, hfs) - .withOutputDir(hfilePath) - .withFileContext(meta) - .build(); - TestHStoreFile.writeStoreFile( - writer, Bytes.toBytes("cf"), Bytes.toBytes("qual")); + StoreFileWriter writer = new StoreFileWriter.Builder(util.getConfiguration(), cacheConf, hfs) + .withOutputDir(hfilePath).withFileContext(meta).build(); + TestHStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual")); HStoreFile sf = new HStoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE, true); @@ -116,7 +111,7 @@ public void testHFileScannerThrowsErrors() throws IOException { faultyfs.startFaults(); try { - int scanned=0; + int scanned = 0; while (scanner.next()) { scanned++; } @@ -129,34 +124,29 @@ public void testHFileScannerThrowsErrors() throws IOException { } /** - * Injects errors into the pread calls of an on-disk file, and makes - * sure those bubble up to the StoreFileScanner + * Injects errors into the pread calls of an on-disk file, and makes sure those bubble up to the + * StoreFileScanner */ @Test public void testStoreFileScannerThrowsErrors() throws IOException { - Path hfilePath = new Path(new Path( - util.getDataTestDir("internalScannerExposesErrors"), - "regionname"), "familyname"); - HFileSystem hfs = (HFileSystem)util.getTestFileSystem(); + Path hfilePath = new Path( + new Path(util.getDataTestDir("internalScannerExposesErrors"), "regionname"), "familyname"); + HFileSystem hfs = (HFileSystem) util.getTestFileSystem(); FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs()); HFileSystem fs = new HFileSystem(faultyfs); CacheConfig cacheConf = new CacheConfig(util.getConfiguration()); HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build(); - StoreFileWriter writer = new StoreFileWriter.Builder( - util.getConfiguration(), cacheConf, hfs) - .withOutputDir(hfilePath) - .withFileContext(meta) - .build(); - TestHStoreFile.writeStoreFile( - writer, Bytes.toBytes("cf"), Bytes.toBytes("qual")); + StoreFileWriter writer = new StoreFileWriter.Builder(util.getConfiguration(), cacheConf, hfs) + .withOutputDir(hfilePath).withFileContext(meta).build(); + TestHStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual")); HStoreFile sf = new HStoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE, true); List scanners = StoreFileScanner.getScannersForStoreFiles( - Collections.singletonList(sf), false, true, false, false, - // 0 is passed as readpoint because this test operates on HStoreFile directly - 0); + Collections.singletonList(sf), false, true, false, false, + // 0 is passed as readpoint because this test operates on HStoreFile directly + 0); KeyValueScanner scanner = scanners.get(0); FaultyInputStream inStream = faultyfs.inStreams.get(0).get(); @@ -168,7 +158,7 @@ public void testStoreFileScannerThrowsErrors() throws IOException { faultyfs.startFaults(); try { - int scanned=0; + int scanned = 0; while (scanner.next() != null) { scanned++; } @@ -181,14 +171,13 @@ public void testStoreFileScannerThrowsErrors() throws IOException { } /** - * Cluster test which starts a region server with a region, then - * removes the data from HDFS underneath it, and ensures that - * errors are bubbled to the client. + * Cluster test which starts a region server with a region, then removes the data from HDFS + * underneath it, and ensures that errors are bubbled to the client. */ @Test public void testFullSystemBubblesFSErrors() throws Exception { // We won't have an error if the datanode is not there if we use short circuit - // it's a known 'feature'. + // it's a known 'feature'. Assume.assumeTrue(!util.isReadShortCircuitOn()); try { @@ -203,8 +192,8 @@ public void testFullSystemBubblesFSErrors() throws Exception { Admin admin = util.getAdmin(); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(fam).setMaxVersions(1).setBlockCacheEnabled(false).build()).build(); + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(fam).setMaxVersions(1).setBlockCacheEnabled(false).build()).build(); admin.createTable(tableDescriptor); // Make a new Configuration so it makes a new connection that has the @@ -245,7 +234,7 @@ public FaultyFileSystem(FileSystem testFileSystem) { } @Override - public FSDataInputStream open(Path p, int bufferSize) throws IOException { + public FSDataInputStream open(Path p, int bufferSize) throws IOException { FSDataInputStream orig = fs.open(p, bufferSize); FaultyInputStream faulty = new FaultyInputStream(orig); inStreams.add(new SoftReference<>(faulty)); @@ -256,7 +245,7 @@ public FSDataInputStream open(Path p, int bufferSize) throws IOException { * Starts to simulate faults on all streams opened so far */ public void startFaults() { - for (SoftReference is: inStreams) { + for (SoftReference is : inStreams) { is.get().startFaults(); } } @@ -274,10 +263,9 @@ public void startFaults() { } @Override - public int read(long position, byte[] buffer, int offset, int length) - throws IOException { + public int read(long position, byte[] buffer, int offset, int length) throws IOException { injectFault(); - return ((PositionedReadable)in).read(position, buffer, offset, length); + return ((PositionedReadable) in).read(position, buffer, offset, length); } private void injectFault() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java index 05e0f1f42eec..21b61f90a603 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -60,10 +59,9 @@ import org.slf4j.LoggerFactory; /** - * Testing sync/append failures. - * Copied from TestHRegion. + * Testing sync/append failures. Copied from TestHRegion. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestFailedAppendAndSync { @ClassRule @@ -71,15 +69,16 @@ public class TestFailedAppendAndSync { HBaseClassTestRule.forClass(TestFailedAppendAndSync.class); private static final Logger LOG = LoggerFactory.getLogger(TestFailedAppendAndSync.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final String COLUMN_FAMILY = "MyCF"; - private static final byte [] COLUMN_FAMILY_BYTES = Bytes.toBytes(COLUMN_FAMILY); + private static final byte[] COLUMN_FAMILY_BYTES = Bytes.toBytes(COLUMN_FAMILY); HRegion region = null; - // Do not run unit tests in parallel (? Why not? It don't work? Why not? St.Ack) + // Do not run unit tests in parallel (? Why not? It don't work? Why not? St.Ack) private static HBaseTestingUtil TEST_UTIL; - public static Configuration CONF ; + public static Configuration CONF; private String dir; // Test names @@ -176,11 +175,11 @@ public long getSyncedLength() { }; } } + /** - * Reproduce locking up that happens when we get an exceptions appending and syncing. - * See HBASE-14317. - * First I need to set up some mocks for Server and RegionServerServices. I also need to - * set up a dodgy WAL that will throw an exception when we go to append to it. + * Reproduce locking up that happens when we get an exceptions appending and syncing. See + * HBASE-14317. First I need to set up some mocks for Server and RegionServerServices. I also need + * to set up a dodgy WAL that will throw an exception when we go to append to it. */ @Test public void testLockupAroundBadAssignSync() throws IOException { @@ -193,7 +192,7 @@ public void testLockupAroundBadAssignSync() throws IOException { // the test. FileSystem fs = FileSystem.get(CONF); Path rootDir = new Path(dir + getName()); - DodgyFSLog dodgyWAL = new DodgyFSLog(fs, (Server)services, rootDir, getName(), CONF); + DodgyFSLog dodgyWAL = new DodgyFSLog(fs, (Server) services, rootDir, getName(), CONF); dodgyWAL.init(); LogRoller logRoller = new LogRoller(services); logRoller.addWAL(dodgyWAL); @@ -313,13 +312,13 @@ public void testLockupAroundBadAssignSync() throws IOException { } /** - * @return A region on which you must call - * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} when done. + * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} + * when done. */ public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, Configuration conf, WAL wal) throws IOException { - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, conf, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushLifeCycleTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushLifeCycleTracker.java index 0b1a41e4fd30..eb851d5014b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushLifeCycleTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushLifeCycleTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -199,15 +199,10 @@ public void test() throws IOException, InterruptedException { try (Table table = UTIL.getConnection().getTable(NAME)) { for (int i = 0; i < 100; i++) { byte[] row = Bytes.toBytes(i); - table.put(new Put(row, true) - .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(CF) - .setQualifier(QUALIFIER) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Type.Put) - .setValue(Bytes.toBytes(i)) - .build())); + table.put( + new Put(row, true).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row) + .setFamily(CF).setQualifier(QUALIFIER).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(Type.Put).setValue(Bytes.toBytes(i)).build())); } } Tracker tracker = new Tracker(); @@ -233,15 +228,10 @@ public void testNotExecuted() throws IOException, InterruptedException { try (Table table = UTIL.getConnection().getTable(NAME)) { for (int i = 0; i < 100; i++) { byte[] row = Bytes.toBytes(i); - table.put(new Put(row, true) - .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(CF) - .setQualifier(QUALIFIER) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Type.Put) - .setValue(Bytes.toBytes(i)) - .build())); + table.put( + new Put(row, true).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row) + .setFamily(CF).setQualifier(QUALIFIER).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(Type.Put).setValue(Bytes.toBytes(i)).build())); } } // here we may have overlap when calling the CP hooks so we do not assert on TRACKER diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushRegionEntry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushRegionEntry.java index d273501d8eff..a0eccf3cf7f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushRegionEntry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushRegionEntry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index a435b9d9b239..53390a1ba2f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -67,7 +67,7 @@ public class TestGetClosestAtOrBefore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGetClosestAtOrBefore.class); + HBaseClassTestRule.forClass(TestGetClosestAtOrBefore.class); @Rule public TestName testName = new TestName(); @@ -104,11 +104,12 @@ public void testUsingMetaAndBinary() throws IOException { final int interval = 2; for (int i = 0; i <= last; i += interval) { RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(i == 0 ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i)) - .setEndKey(i == last ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i + interval)) - .build(); + .setStartKey(i == 0 ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i)) + .setEndKey( + i == last ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i + interval)) + .build(); Put put = - MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime()); + MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime()); put.setDurability(Durability.SKIP_WAL); LOG.info("Put {}", put); mr.put(put); @@ -168,7 +169,7 @@ public void testUsingMetaAndBinary() throws IOException { * @return Row found. */ private byte[] findRow(final Region mr, final char table, final int rowToFind, final int answer) - throws IOException { + throws IOException { TableName tableb = TableName.valueOf("" + table); // Find the row. byte[] tofindBytes = Bytes.toBytes((short) rowToFind); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java index fb560d114af5..90a3921f749a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import java.util.NavigableSet; import java.util.Optional; import java.util.concurrent.ConcurrentSkipListSet; - import javax.crypto.spec.SecretKeySpec; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -91,21 +90,22 @@ public class TestHMobStore { HBaseClassTestRule.forClass(TestHMobStore.class); public static final Logger LOG = LoggerFactory.getLogger(TestHMobStore.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private HMobStore store; private HRegion region; private FileSystem fs; - private byte [] table = Bytes.toBytes("table"); - private byte [] family = Bytes.toBytes("family"); - private byte [] row = Bytes.toBytes("row"); - private byte [] row2 = Bytes.toBytes("row2"); - private byte [] qf1 = Bytes.toBytes("qf1"); - private byte [] qf2 = Bytes.toBytes("qf2"); - private byte [] qf3 = Bytes.toBytes("qf3"); - private byte [] qf4 = Bytes.toBytes("qf4"); - private byte [] qf5 = Bytes.toBytes("qf5"); - private byte [] qf6 = Bytes.toBytes("qf6"); + private byte[] table = Bytes.toBytes("table"); + private byte[] family = Bytes.toBytes("family"); + private byte[] row = Bytes.toBytes("row"); + private byte[] row2 = Bytes.toBytes("row2"); + private byte[] qf1 = Bytes.toBytes("qf1"); + private byte[] qf2 = Bytes.toBytes("qf2"); + private byte[] qf3 = Bytes.toBytes("qf3"); + private byte[] qf4 = Bytes.toBytes("qf4"); + private byte[] qf5 = Bytes.toBytes("qf5"); + private byte[] qf6 = Bytes.toBytes("qf6"); private byte[] value = Bytes.toBytes("value"); private byte[] value2 = Bytes.toBytes("value2"); private Path mobFilePath; @@ -131,8 +131,8 @@ public void setUp() throws Exception { qualifiers.add(qf5); Iterator iter = qualifiers.iterator(); - while(iter.hasNext()){ - byte [] next = iter.next(); + while (iter.hasNext()) { + byte[] next = iter.next(); expected.add(new KeyValue(row, family, next, 1, value)); get.addColumn(family, next); get.readAllVersions(); @@ -140,9 +140,8 @@ public void setUp() throws Exception { } private void init(String methodName, Configuration conf, boolean testStore) throws IOException { - ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(family).setMobEnabled(true).setMobThreshold(3L) - .setMaxVersions(4).build(); + ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(family) + .setMobEnabled(true).setMobThreshold(3L).setMaxVersions(4).build(); init(methodName, conf, cfd, testStore); } @@ -151,7 +150,7 @@ private void init(String methodName, Configuration conf, ColumnFamilyDescriptor TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf(table)).setColumnFamily(cfd).build(); - //Setting up tje Region and Store + // Setting up tje Region and Store Path basedir = new Path(DIR + methodName); Path tableDir = CommonFSUtils.getTableDir(basedir, td.getTableName()); String logName = "logs"; @@ -160,8 +159,8 @@ private void init(String methodName, Configuration conf, ColumnFamilyDescriptor fs.delete(logdir, true); RegionInfo info = RegionInfoBuilder.newBuilder(td.getTableName()).build(); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); final Configuration walConf = new Configuration(conf); CommonFSUtils.setRootDir(walConf, basedir); final WALFactory wals = new WALFactory(walConf, methodName); @@ -173,12 +172,11 @@ private void init(String methodName, Configuration conf, ColumnFamilyDescriptor } } - private void init(Configuration conf, ColumnFamilyDescriptor cfd) - throws IOException { + private void init(Configuration conf, ColumnFamilyDescriptor cfd) throws IOException { Path basedir = CommonFSUtils.getRootDir(conf); fs = FileSystem.get(conf); - Path homePath = new Path(basedir, Bytes.toString(family) + Path.SEPARATOR - + Bytes.toString(family)); + Path homePath = + new Path(basedir, Bytes.toString(family) + Path.SEPARATOR + Bytes.toString(family)); fs.mkdirs(homePath); KeyValue key1 = new KeyValue(row, family, qf1, 1, value); @@ -187,7 +185,7 @@ private void init(Configuration conf, ColumnFamilyDescriptor cfd) KeyValue[] keys = new KeyValue[] { key1, key2, key3 }; int maxKeyCount = keys.length; StoreFileWriter mobWriter = store.createWriterInTmp(currentDate, maxKeyCount, - cfd.getCompactionCompressionType(), region.getRegionInfo().getStartKey(), false); + cfd.getCompactionCompressionType(), region.getRegionInfo().getStartKey(), false); mobFilePath = mobWriter.getPath(); mobWriter.append(key1); @@ -197,8 +195,8 @@ private void init(Configuration conf, ColumnFamilyDescriptor cfd) String targetPathName = MobUtils.formatDate(currentDate); byte[] referenceValue = Bytes.toBytes(targetPathName + Path.SEPARATOR + mobFilePath.getName()); - Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, - store.getTableName().getName()); + Tag tableNameTag = + new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, store.getTableName().getName()); KeyValue kv1 = new KeyValue(row, family, qf1, Long.MAX_VALUE, referenceValue); KeyValue kv2 = new KeyValue(row, family, qf2, Long.MAX_VALUE, referenceValue); KeyValue kv3 = new KeyValue(row2, family, qf3, Long.MAX_VALUE, referenceValue); @@ -216,7 +214,7 @@ public void testGetFromMemStore() throws IOException { final Configuration conf = HBaseConfiguration.create(); init(name.getMethodName(), conf, false); - //Put data in memstore + // Put data in memstore this.store.add(new KeyValue(row, family, qf1, 1, value), null); this.store.add(new KeyValue(row, family, qf2, 1, value), null); this.store.add(new KeyValue(row, family, qf3, 1, value), null); @@ -226,17 +224,16 @@ public void testGetFromMemStore() throws IOException { Scan scan = new Scan(get); InternalScanner scanner = (InternalScanner) store.getScanner(scan, - scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), - 0); + scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), 0); List results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparatorImpl.COMPARATOR); scanner.close(); - //Compare + // Compare Assert.assertEquals(expected.size(), results.size()); - for(int i=0; i results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparatorImpl.COMPARATOR); scanner.close(); - //Compare + // Compare Assert.assertEquals(expected.size(), results.size()); - for(int i=0; i results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparatorImpl.COMPARATOR); scanner.close(); - //Compare + // Compare Assert.assertEquals(expected.size(), results.size()); - for(int i=0; i results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparatorImpl.COMPARATOR); scanner.close(); - //Compare + // Compare Assert.assertEquals(expected.size(), results.size()); - for(int i=0; i results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparatorImpl.COMPARATOR); scanner.close(); - //Compare + // Compare Assert.assertEquals(expected.size(), results.size()); - for(int i=0; i results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparatorImpl.COMPARATOR); scanner.close(); Assert.assertEquals(expected.size(), results.size()); - for(int i=0; i 0); RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class); - doThrow(new IOException()) - .when(mockedCPHost).postBatchMutate(Mockito.>any()); + doThrow(new IOException()).when(mockedCPHost) + .postBatchMutate(Mockito.> any()); region.setCoprocessorHost(mockedCPHost); put = new Put(value); @@ -433,10 +430,9 @@ public void testMemstoreSizeAccountingWithFailedPostBatchMutate() throws IOExcep } catch (IOException expected) { } long expectedSize = onePutSize * 2; - assertEquals("memstoreSize should be incremented", - expectedSize, region.getMemStoreDataSize()); - assertEquals("flushable size should be incremented", - expectedSize, store.getFlushableSize().getDataSize()); + assertEquals("memstoreSize should be incremented", expectedSize, region.getMemStoreDataSize()); + assertEquals("flushable size should be incremented", expectedSize, + store.getFlushableSize().getDataSize()); region.setCoprocessorHost(null); } @@ -472,18 +468,17 @@ public void testFlushAndMemstoreSizeCounting() throws Exception { } /** - * Test we do not lose data if we fail a flush and then close. - * Part of HBase-10466. Tests the following from the issue description: - * "Bug 1: Wrong calculation of HRegion.memstoreSize: When a flush fails, data to be flushed is - * kept in each MemStore's snapshot and wait for next flush attempt to continue on it. But when - * the next flush succeeds, the counter of total memstore size in HRegion is always deduced by - * the sum of current memstore sizes instead of snapshots left from previous failed flush. This - * calculation is problematic that almost every time there is failed flush, HRegion.memstoreSize - * gets reduced by a wrong value. If region flush could not proceed for a couple cycles, the size - * in current memstore could be much larger than the snapshot. It's likely to drift memstoreSize - * much smaller than expected. In extreme case, if the error accumulates to even bigger than - * HRegion's memstore size limit, any further flush is skipped because flush does not do anything - * if memstoreSize is not larger than 0." + * Test we do not lose data if we fail a flush and then close. Part of HBase-10466. Tests the + * following from the issue description: "Bug 1: Wrong calculation of HRegion.memstoreSize: When a + * flush fails, data to be flushed is kept in each MemStore's snapshot and wait for next flush + * attempt to continue on it. But when the next flush succeeds, the counter of total memstore size + * in HRegion is always deduced by the sum of current memstore sizes instead of snapshots left + * from previous failed flush. This calculation is problematic that almost every time there is + * failed flush, HRegion.memstoreSize gets reduced by a wrong value. If region flush could not + * proceed for a couple cycles, the size in current memstore could be much larger than the + * snapshot. It's likely to drift memstoreSize much smaller than expected. In extreme case, if the + * error accumulates to even bigger than HRegion's memstore size limit, any further flush is + * skipped because flush does not do anything if memstoreSize is not larger than 0." * @throws Exception */ @Test @@ -492,8 +487,7 @@ public void testFlushSizeAccounting() throws Exception { final WAL wal = createWALCompatibleWithFaultyFileSystem(method, conf, tableName); // Only retry once. conf.setInt("hbase.hstore.flush.retries.number", 1); - final User user = - User.createUserForTesting(conf, method, new String[]{"foo"}); + final User user = User.createUserForTesting(conf, method, new String[] { "foo" }); // Inject our faulty LocalFileSystem conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class); user.runAs(new PrivilegedExceptionAction() { @@ -502,15 +496,15 @@ public Object run() throws Exception { // Make sure it worked (above is sensitive to caching details in hadoop core) FileSystem fs = FileSystem.get(conf); Assert.assertEquals(FaultyFileSystem.class, fs.getClass()); - FaultyFileSystem ffs = (FaultyFileSystem)fs; + FaultyFileSystem ffs = (FaultyFileSystem) fs; HRegion region = null; try { // Initialize region region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, wal, - COLUMN_FAMILY_BYTES); + COLUMN_FAMILY_BYTES); long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); - // Put one item into memstore. Measure the size of one item in memstore. + // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[]) null)); region.put(p1); @@ -526,17 +520,17 @@ public Object run() throws Exception { } // Make it so all writes succeed from here on out ffs.fault.set(false); - // Check sizes. Should still be the one entry. + // Check sizes. Should still be the one entry. Assert.assertEquals(sizeOfOnePut, region.getMemStoreDataSize()); // Now add two entries so that on this next flush that fails, we can see if we // subtract the right amount, the snapshot size only. Put p2 = new Put(row); - p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[])null)); - p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[])null)); + p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[]) null)); + p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[]) null)); region.put(p2); long expectedSize = sizeOfOnePut * 3; Assert.assertEquals(expectedSize, region.getMemStoreDataSize()); - // Do a successful flush. It will clear the snapshot only. Thats how flushes work. + // Do a successful flush. It will clear the snapshot only. Thats how flushes work. // If already a snapshot, we clear it else we move the memstore to be snapshot and flush // it region.flush(true); @@ -557,8 +551,7 @@ public void testCloseWithFailingFlush() throws Exception { final WAL wal = createWALCompatibleWithFaultyFileSystem(method, conf, tableName); // Only retry once. conf.setInt("hbase.hstore.flush.retries.number", 1); - final User user = - User.createUserForTesting(conf, this.method, new String[]{"foo"}); + final User user = User.createUserForTesting(conf, this.method, new String[] { "foo" }); // Inject our faulty LocalFileSystem conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class); user.runAs(new PrivilegedExceptionAction() { @@ -567,17 +560,17 @@ public Object run() throws Exception { // Make sure it worked (above is sensitive to caching details in hadoop core) FileSystem fs = FileSystem.get(conf); Assert.assertEquals(FaultyFileSystem.class, fs.getClass()); - FaultyFileSystem ffs = (FaultyFileSystem)fs; + FaultyFileSystem ffs = (FaultyFileSystem) fs; HRegion region = null; try { // Initialize region - region = initHRegion(tableName, null, null, CONF, false, - Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); + region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, wal, + COLUMN_FAMILY_BYTES); long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); - // Put one item into memstore. Measure the size of one item in memstore. + // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); - p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[])null)); + p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[]) null)); region.put(p1); // Manufacture an outstanding snapshot -- fake a failed flush by doing prepare step only. HStore store = region.getStore(COLUMN_FAMILY_BYTES); @@ -586,8 +579,8 @@ public Object run() throws Exception { storeFlushCtx.prepare(); // Now add two entries to the foreground memstore. Put p2 = new Put(row); - p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[])null)); - p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[])null)); + p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[]) null)); + p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[]) null)); region.put(p2); // Now try close on top of a failing flush. HBaseTestingUtil.closeRegionAndWAL(region); @@ -707,10 +700,10 @@ public void testArchiveRecoveredEditsReplay() throws Exception { long time = System.nanoTime(); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes - .toBytes(i))); - writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, i, time, - HConstants.DEFAULT_CLUSTER_ID), edit)); + edit.add( + new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i))); + writer.append(new WAL.Entry( + new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit)); writer.close(); } @@ -761,10 +754,10 @@ public void testSkipRecoveredEditsReplay() throws Exception { long time = System.nanoTime(); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes - .toBytes(i))); - writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, i, time, - HConstants.DEFAULT_CLUSTER_ID), edit)); + edit.add( + new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i))); + writer.append(new WAL.Entry( + new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit)); writer.close(); } @@ -812,10 +805,10 @@ public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception { long time = System.nanoTime(); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes - .toBytes(i))); - writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, i, time, - HConstants.DEFAULT_CLUSTER_ID), edit)); + edit.add( + new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i))); + writer.append(new WAL.Entry( + new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit)); writer.close(); } @@ -900,20 +893,19 @@ public void testSkipRecoveredEditsReplayTheLastFileIgnored() throws Exception { WALEdit edit = null; if (i == maxSeqId) { edit = WALEdit.createCompaction(region.getRegionInfo(), - CompactionDescriptor.newBuilder() - .setTableName(ByteString.copyFrom(tableName.getName())) - .setFamilyName(ByteString.copyFrom(regionName)) - .setEncodedRegionName(ByteString.copyFrom(regionName)) - .setStoreHomeDirBytes(ByteString.copyFrom(Bytes.toBytes(regiondir.toString()))) - .setRegionName(ByteString.copyFrom(region.getRegionInfo().getRegionName())) - .build()); + CompactionDescriptor.newBuilder().setTableName(ByteString.copyFrom(tableName.getName())) + .setFamilyName(ByteString.copyFrom(regionName)) + .setEncodedRegionName(ByteString.copyFrom(regionName)) + .setStoreHomeDirBytes(ByteString.copyFrom(Bytes.toBytes(regiondir.toString()))) + .setRegionName(ByteString.copyFrom(region.getRegionInfo().getRegionName())) + .build()); } else { edit = new WALEdit(); - edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes - .toBytes(i))); + edit.add( + new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i))); } - writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, i, time, - HConstants.DEFAULT_CLUSTER_ID), edit)); + writer.append(new WAL.Entry( + new WALKeyImpl(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit)); writer.close(); } @@ -984,23 +976,23 @@ public void testRecoveredEditsReplayCompaction(boolean mismatchedRegionName) thr assertNotNull(errorMsg, files); assertEquals(errorMsg, 1, files.length); // move the file inside region dir - Path newFile = region.getRegionFileSystem().commitStoreFile(Bytes.toString(family), - files[0].getPath()); + Path newFile = + region.getRegionFileSystem().commitStoreFile(Bytes.toString(family), files[0].getPath()); byte[] encodedNameAsBytes = this.region.getRegionInfo().getEncodedNameAsBytes(); - byte[] fakeEncodedNameAsBytes = new byte [encodedNameAsBytes.length]; - for (int i=0; i < encodedNameAsBytes.length; i++) { + byte[] fakeEncodedNameAsBytes = new byte[encodedNameAsBytes.length]; + for (int i = 0; i < encodedNameAsBytes.length; i++) { // Mix the byte array to have a new encodedName fakeEncodedNameAsBytes[i] = (byte) (encodedNameAsBytes[i] + 1); } - CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(this.region - .getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family, - storeFiles, Lists.newArrayList(newFile), - region.getRegionFileSystem().getStoreDir(Bytes.toString(family))); + CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor( + this.region.getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family, + storeFiles, Lists.newArrayList(newFile), + region.getRegionFileSystem().getStoreDir(Bytes.toString(family))); WALUtil.writeCompactionMarker(region.getWAL(), this.region.getReplicationScope(), - this.region.getRegionInfo(), compactionDescriptor, region.getMVCC(), null); + this.region.getRegionInfo(), compactionDescriptor, region.getMVCC(), null); Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir); @@ -1010,9 +1002,9 @@ public void testRecoveredEditsReplayCompaction(boolean mismatchedRegionName) thr long time = System.nanoTime(); - writer.append(new WAL.Entry(new WALKeyImpl(regionName, tableName, 10, time, - HConstants.DEFAULT_CLUSTER_ID), WALEdit.createCompaction(region.getRegionInfo(), - compactionDescriptor))); + writer.append(new WAL.Entry( + new WALKeyImpl(regionName, tableName, 10, time, HConstants.DEFAULT_CLUSTER_ID), + WALEdit.createCompaction(region.getRegionInfo(), compactionDescriptor))); writer.close(); // close the region now, and reopen again @@ -1060,8 +1052,8 @@ public void testFlushMarkers() throws Exception { final WALFactory wals = new WALFactory(walConf, method); final WAL wal = wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build()); - this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family); + this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, CONF, + false, Durability.USE_DEFAULT, wal, family); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); @@ -1108,14 +1100,14 @@ public void testFlushMarkers() throws Exception { } lastFlushSeqId = flushDesc.getFlushSequenceNumber(); assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray()); - assertEquals(1, flushDesc.getStoreFlushesCount()); //only one store + assertEquals(1, flushDesc.getStoreFlushesCount()); // only one store StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0); assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray()); assertEquals("family", storeFlushDesc.getStoreHomeDir()); if (flushDesc.getAction() == FlushAction.START_FLUSH) { assertEquals(0, storeFlushDesc.getFlushOutputCount()); } else { - assertEquals(1, storeFlushDesc.getFlushOutputCount()); //only one file from flush + assertEquals(1, storeFlushDesc.getFlushOutputCount()); // only one file from flush assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0))); } @@ -1168,9 +1160,11 @@ public void testFlushMarkers() throws Exception { static class IsFlushWALMarker implements ArgumentMatcher { volatile FlushAction[] actions; + public IsFlushWALMarker(FlushAction... actions) { this.actions = actions; } + @Override public boolean matches(WALEdit edit) { List cells = edit.getCells(); @@ -1195,6 +1189,7 @@ public boolean matches(WALEdit edit) { } return false; } + public IsFlushWALMarker set(FlushAction... actions) { this.actions = actions; return this; @@ -1213,10 +1208,10 @@ public void testFlushMarkersWALFail() throws Exception { CommonFSUtils.setRootDir(walConf, logDir); // Make up a WAL that we can manipulate at append time. class FailAppendFlushMarkerWAL extends FSHLog { - volatile FlushAction [] flushActions = null; + volatile FlushAction[] flushActions = null; public FailAppendFlushMarkerWAL(FileSystem fs, Path root, String logDir, Configuration conf) - throws IOException { + throws IOException { super(fs, root, logDir, conf); } @@ -1240,7 +1235,7 @@ public void append(Entry entry) throws IOException { if (WALEdit.isMetaEditFamily(cells.get(0))) { FlushDescriptor desc = WALEdit.getFlushDescriptor(cells.get(0)); if (desc != null) { - for (FlushAction flushAction: flushActions) { + for (FlushAction flushAction : flushActions) { if (desc.getAction().equals(flushAction)) { throw new IOException("Failed to append flush marker! " + flushAction); } @@ -1263,10 +1258,10 @@ public long getSyncedLength() { } } FailAppendFlushMarkerWAL wal = new FailAppendFlushMarkerWAL(FileSystem.get(walConf), - CommonFSUtils.getRootDir(walConf), method, walConf); + CommonFSUtils.getRootDir(walConf), method, walConf); wal.init(); - this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family); + this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, CONF, + false, Durability.USE_DEFAULT, wal, family); int i = 0; Put put = new Put(Bytes.toBytes(i)); put.setDurability(Durability.SKIP_WAL); // have to skip mocked wal @@ -1274,7 +1269,7 @@ public long getSyncedLength() { region.put(put); // 1. Test case where START_FLUSH throws exception - wal.flushActions = new FlushAction [] {FlushAction.START_FLUSH}; + wal.flushActions = new FlushAction[] { FlushAction.START_FLUSH }; // start cache flush will throw exception try { @@ -1294,15 +1289,15 @@ public long getSyncedLength() { // 2. Test case where START_FLUSH succeeds but COMMIT_FLUSH will throw exception wal.flushActions = new FlushAction[] { FlushAction.COMMIT_FLUSH }; wal = new FailAppendFlushMarkerWAL(FileSystem.get(walConf), CommonFSUtils.getRootDir(walConf), - method, walConf); + method, walConf); wal.init(); - this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, CONF, false, Durability.USE_DEFAULT, wal, family); + this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, CONF, + false, Durability.USE_DEFAULT, wal, family); region.put(put); // 3. Test case where ABORT_FLUSH will throw exception. // Even if ABORT_FLUSH throws exception, we should not fail with IOE, but continue with // DroppedSnapshotException. Below COMMIT_FLUSH will cause flush to abort - wal.flushActions = new FlushAction [] {FlushAction.COMMIT_FLUSH, FlushAction.ABORT_FLUSH}; + wal.flushActions = new FlushAction[] { FlushAction.COMMIT_FLUSH, FlushAction.ABORT_FLUSH }; try { region.flush(true); @@ -1367,8 +1362,8 @@ public void testGetWhileRegionClose() throws IOException { } /* - * Thread that does get on single row until 'done' flag is flipped. If an - * exception causes us to fail, it records it. + * Thread that does get on single row until 'done' flag is flipped. If an exception causes us to + * fail, it records it. */ class GetTillDoneOrException extends Thread { private final Get g; @@ -1420,23 +1415,23 @@ public void testWeirdCacheBehaviour() throws Exception { putRows(this.region, 3, value2, keyPrefix3); System.out.println("Checking values for key: " + keyPrefix1); assertEquals("Got back incorrect number of rows from scan", 3, - getNumberOfRows(keyPrefix1, value2, this.region)); + getNumberOfRows(keyPrefix1, value2, this.region)); System.out.println("Checking values for key: " + keyPrefix2); assertEquals("Got back incorrect number of rows from scan", 3, - getNumberOfRows(keyPrefix2, value2, this.region)); + getNumberOfRows(keyPrefix2, value2, this.region)); System.out.println("Checking values for key: " + keyPrefix3); assertEquals("Got back incorrect number of rows from scan", 3, - getNumberOfRows(keyPrefix3, value2, this.region)); + getNumberOfRows(keyPrefix3, value2, this.region)); deleteColumns(this.region, value2, keyPrefix1); deleteColumns(this.region, value2, keyPrefix2); deleteColumns(this.region, value2, keyPrefix3); System.out.println("Starting important checks....."); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix1, 0, - getNumberOfRows(keyPrefix1, value2, this.region)); + getNumberOfRows(keyPrefix1, value2, this.region)); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix2, 0, - getNumberOfRows(keyPrefix2, value2, this.region)); + getNumberOfRows(keyPrefix2, value2, this.region)); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix3, 0, - getNumberOfRows(keyPrefix3, value2, this.region)); + getNumberOfRows(keyPrefix3, value2, this.region)); } @Test @@ -1447,7 +1442,7 @@ public void testAppendWithReadOnlyTable() throws Exception { Append append = new Append(Bytes.toBytes("somerow")); append.setDurability(Durability.SKIP_WAL); append.addColumn(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"), - Bytes.toBytes("somevalue")); + Bytes.toBytes("somevalue")); try { region.append(append); } catch (IOException e) { @@ -1479,10 +1474,8 @@ private void deleteColumns(HRegion r, String value, String keyPrefix) throws IOE List results = new ArrayList<>(); do { more = scanner.next(results); - if (results != null && !results.isEmpty()) - count++; - else - break; + if (results != null && !results.isEmpty()) count++; + else break; Delete delete = new Delete(CellUtil.cloneRow(results.get(0))); delete.addColumn(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2")); r.delete(delete); @@ -1498,10 +1491,8 @@ private int getNumberOfRows(String keyPrefix, String value, HRegion r) throws Ex boolean more = false; do { more = resultScanner.next(results); - if (results != null && !results.isEmpty()) - numberOfResults++; - else - break; + if (results != null && !results.isEmpty()) numberOfResults++; + else break; for (Cell kv : results) { System.out.println("kv=" + kv.toString() + ", " + Bytes.toString(CellUtil.cloneValue(kv))); } @@ -1580,7 +1571,7 @@ public void testBatchPut_whileNoRowLocksHeld() throws IOException { assertEquals(10, codes.length); for (int i = 0; i < 10; i++) { assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY : OperationStatusCode.SUCCESS, - codes[i].getOperationStatusCode()); + codes[i].getOperationStatusCode()); } metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 2, source); @@ -1615,10 +1606,10 @@ public void doWork() throws IOException { ctx.addThread(putter); ctx.startThreads(); - // Now attempt to close the region from another thread. Prior to HBASE-12565 + // Now attempt to close the region from another thread. Prior to HBASE-12565 // this would cause the in-progress batchMutate operation to to fail with // exception because it use to release and re-acquire the close-guard lock - // between batches. Caller then didn't get status indicating which writes succeeded. + // between batches. Caller then didn't get status indicating which writes succeeded. // We now expect this thread to block until the batchMutate call finishes. Thread regionCloseThread = new TestThread(ctx) { @Override @@ -1656,7 +1647,7 @@ public void doWork() { OperationStatus[] codes = retFromThread.get(); for (int i = 0; i < codes.length; i++) { assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY : OperationStatusCode.SUCCESS, - codes[i].getOperationStatusCode()); + codes[i].getOperationStatusCode()); } rowLock4.release(); } @@ -1669,7 +1660,7 @@ private void waitForCounter(MetricsWALSource source, String metricName, long exp Thread.sleep(100); if (EnvironmentEdgeManager.currentTime() - startWait > 10000) { fail(String.format("Timed out waiting for '%s' >= '%s', currentCount=%s", metricName, - expectedCount, currentCount)); + expectedCount, currentCount)); } } } @@ -1756,8 +1747,8 @@ private long prepareRegionForBachPut(final Put[] puts, final MetricsWALSource so LOG.info("First a batch put with all valid puts"); for (int i = 0; i < puts.length; i++) { - puts[i] = slop ? new Put(Bytes.toBytes("row_" + i), Long.MAX_VALUE - 100) : - new Put(Bytes.toBytes("row_" + i)); + puts[i] = slop ? new Put(Bytes.toBytes("row_" + i), Long.MAX_VALUE - 100) + : new Put(Bytes.toBytes("row_" + i)); puts[i].addColumn(COLUMN_FAMILY_BYTES, qual, value); } @@ -1787,7 +1778,7 @@ public void testCheckAndMutate_WithEmptyRowValue() throws IOException { // checkAndPut with empty value boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(emptyVal), put); + new BinaryComparator(emptyVal), put); assertTrue(res); // Putting data in key @@ -1796,46 +1787,45 @@ public void testCheckAndMutate_WithEmptyRowValue() throws IOException { // checkAndPut with correct value res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(emptyVal), put); + new BinaryComparator(emptyVal), put); assertTrue(res); // not empty anymore res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(emptyVal), put); + new BinaryComparator(emptyVal), put); assertFalse(res); Delete delete = new Delete(row1); delete.addColumn(fam1, qf1); res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(emptyVal), delete); + new BinaryComparator(emptyVal), delete); assertFalse(res); put = new Put(row1); put.addColumn(fam1, qf1, val2); // checkAndPut with correct value - res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(val1), put); + res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BinaryComparator(val1), + put); assertTrue(res); // checkAndDelete with correct value delete = new Delete(row1); delete.addColumn(fam1, qf1); delete.addColumn(fam1, qf1); - res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(val2), delete); + res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BinaryComparator(val2), + delete); assertTrue(res); delete = new Delete(row1); res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(emptyVal), delete); + new BinaryComparator(emptyVal), delete); assertTrue(res); // checkAndPut looking for a null value put = new Put(row1); put.addColumn(fam1, qf1, val1); - res = region - .checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new NullComparator(), put); + res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new NullComparator(), put); assertTrue(res); } @@ -1859,14 +1849,14 @@ public void testCheckAndMutate_WithWrongValue() throws IOException { // checkAndPut with wrong value boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(val2), put); + new BinaryComparator(val2), put); assertEquals(false, res); // checkAndDelete with wrong value Delete delete = new Delete(row1); delete.addFamily(fam1); - res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(val2), put); + res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BinaryComparator(val2), + put); assertEquals(false, res); // Putting data in key @@ -1875,17 +1865,15 @@ public void testCheckAndMutate_WithWrongValue() throws IOException { region.put(put); // checkAndPut with wrong value - res = - region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BigDecimalComparator(bd2), put); + res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, + new BigDecimalComparator(bd2), put); assertEquals(false, res); // checkAndDelete with wrong value delete = new Delete(row1); delete.addFamily(fam1); - res = - region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BigDecimalComparator(bd2), put); + res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, + new BigDecimalComparator(bd2), put); assertEquals(false, res); } @@ -1908,14 +1896,14 @@ public void testCheckAndMutate_WithCorrectValue() throws IOException { // checkAndPut with correct value boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(val1), put); + new BinaryComparator(val1), put); assertEquals("First", true, res); // checkAndDelete with correct value Delete delete = new Delete(row1, now + 1); delete.addColumn(fam1, qf1); res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BinaryComparator(val1), - delete); + delete); assertEquals("Delete", true, res); // Putting data in key @@ -1924,17 +1912,15 @@ public void testCheckAndMutate_WithCorrectValue() throws IOException { region.put(put); // checkAndPut with correct value - res = - region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BigDecimalComparator( - bd1), put); + res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, + new BigDecimalComparator(bd1), put); assertEquals("Second put", true, res); // checkAndDelete with correct value delete = new Delete(row1, now + 3); delete.addColumn(fam1, qf1); - res = - region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BigDecimalComparator( - bd1), delete); + res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, + new BigDecimalComparator(bd1), delete); assertEquals("Second delete", true, res); } @@ -1958,31 +1944,31 @@ public void testCheckAndMutate_WithNonEqualCompareOp() throws IOException { // Test CompareOp.LESS: original = val3, compare with val3, fail boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.LESS, - new BinaryComparator(val3), put); + new BinaryComparator(val3), put); assertEquals(false, res); // Test CompareOp.LESS: original = val3, compare with val4, fail - res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.LESS, - new BinaryComparator(val4), put); + res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.LESS, new BinaryComparator(val4), + put); assertEquals(false, res); // Test CompareOp.LESS: original = val3, compare with val2, // succeed (now value = val2) put = new Put(row1); put.addColumn(fam1, qf1, val2); - res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.LESS, - new BinaryComparator(val2), put); + res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.LESS, new BinaryComparator(val2), + put); assertEquals(true, res); // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val3, fail res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(val3), put); + new BinaryComparator(val3), put); assertEquals(false, res); // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val2, // succeed (value still = val2) res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(val2), put); + new BinaryComparator(val2), put); assertEquals(true, res); // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val1, @@ -1990,17 +1976,17 @@ public void testCheckAndMutate_WithNonEqualCompareOp() throws IOException { put = new Put(row1); put.addColumn(fam1, qf1, val3); res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.LESS_OR_EQUAL, - new BinaryComparator(val1), put); + new BinaryComparator(val1), put); assertEquals(true, res); // Test CompareOp.GREATER: original = val3, compare with val3, fail res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.GREATER, - new BinaryComparator(val3), put); + new BinaryComparator(val3), put); assertEquals(false, res); // Test CompareOp.GREATER: original = val3, compare with val2, fail res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.GREATER, - new BinaryComparator(val2), put); + new BinaryComparator(val2), put); assertEquals(false, res); // Test CompareOp.GREATER: original = val3, compare with val4, @@ -2008,23 +1994,23 @@ public void testCheckAndMutate_WithNonEqualCompareOp() throws IOException { put = new Put(row1); put.addColumn(fam1, qf1, val2); res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.GREATER, - new BinaryComparator(val4), put); + new BinaryComparator(val4), put); assertEquals(true, res); // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val1, fail res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.GREATER_OR_EQUAL, - new BinaryComparator(val1), put); + new BinaryComparator(val1), put); assertEquals(false, res); // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val2, // succeed (value still = val2) res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.GREATER_OR_EQUAL, - new BinaryComparator(val2), put); + new BinaryComparator(val2), put); assertEquals(true, res); // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val3, succeed res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.GREATER_OR_EQUAL, - new BinaryComparator(val3), put); + new BinaryComparator(val3), put); assertEquals(true, res); } @@ -2055,7 +2041,7 @@ public void testCheckAndPut_ThatPutWasWritten() throws IOException { // checkAndPut with wrong value boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(val1), put); + new BinaryComparator(val1), put); assertEquals(true, res); Get get = new Get(row1); @@ -2077,8 +2063,8 @@ public void testCheckAndPut_wrongRowInPut() throws IOException { Put put = new Put(row2); put.addColumn(fam1, qual1, value1); try { - region.checkAndMutate(row, fam1, qual1, CompareOperator.EQUAL, - new BinaryComparator(value2), put); + region.checkAndMutate(row, fam1, qual1, CompareOperator.EQUAL, new BinaryComparator(value2), + put); fail(); } catch (org.apache.hadoop.hbase.DoNotRetryIOException expected) { // expected exception. @@ -2125,7 +2111,7 @@ public void testCheckAndDelete_ThatDeleteWasWritten() throws IOException { delete.addColumn(fam2, qf1); delete.addColumn(fam1, qf3); boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, - new BinaryComparator(val2), delete); + new BinaryComparator(val2), delete); assertEquals(true, res); Get get = new Get(row1); @@ -2141,7 +2127,7 @@ public void testCheckAndDelete_ThatDeleteWasWritten() throws IOException { delete = new Delete(row1); delete.addFamily(fam2); res = region.checkAndMutate(row1, fam2, qf1, CompareOperator.EQUAL, - new BinaryComparator(emptyVal), delete); + new BinaryComparator(emptyVal), delete); assertEquals(true, res); get = new Get(row1); @@ -2152,7 +2138,7 @@ public void testCheckAndDelete_ThatDeleteWasWritten() throws IOException { // Row delete delete = new Delete(row1); res = region.checkAndMutate(row1, fam1, qf1, CompareOperator.EQUAL, new BinaryComparator(val1), - delete); + delete); assertEquals(true, res); get = new Get(row1); r = region.get(get); @@ -2177,11 +2163,10 @@ public void testCheckAndMutate_WithFilters() throws Throwable { // Put with success boolean ok = region.checkAndMutate(row, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")) - ), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b"))), new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))); assertTrue(ok); @@ -2191,11 +2176,10 @@ public void testCheckAndMutate_WithFilters() throws Throwable { // Put with failure ok = region.checkAndMutate(row, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("c")) - ), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("c"))), new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))); assertFalse(ok); @@ -2204,11 +2188,10 @@ public void testCheckAndMutate_WithFilters() throws Throwable { // Delete with success ok = region.checkAndMutate(row, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")) - ), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b"))), new Delete(row).addColumns(FAMILY, Bytes.toBytes("D"))); assertTrue(ok); @@ -2217,15 +2200,13 @@ public void testCheckAndMutate_WithFilters() throws Throwable { // Mutate with success ok = region.checkAndRowMutate(row, new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")) - ), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b"))), new RowMutations(row) - .add((Mutation) new Put(row) - .addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))) - .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A")))); + .add((Mutation) new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))) + .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A")))); assertTrue(ok); result = region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("E"))); @@ -2248,7 +2229,7 @@ public void testCheckAndMutate_WithFiltersAndTimeRange() throws Throwable { // Put with success boolean ok = region.checkAndMutate(row, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), + Bytes.toBytes("a")), TimeRange.between(0, 101), new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))); assertTrue(ok); @@ -2259,7 +2240,7 @@ public void testCheckAndMutate_WithFiltersAndTimeRange() throws Throwable { // Put with failure ok = region.checkAndMutate(row, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), + Bytes.toBytes("a")), TimeRange.between(0, 100), new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c"))); assertFalse(ok); @@ -2268,13 +2249,12 @@ public void testCheckAndMutate_WithFiltersAndTimeRange() throws Throwable { // Mutate with success ok = region.checkAndRowMutate(row, - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), + new SingleColumnValueFilter( + FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, Bytes.toBytes("a")), TimeRange.between(0, 101), new RowMutations(row) - .add((Mutation) new Put(row) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A")))); + .add((Mutation) new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) + .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A")))); assertTrue(ok); result = region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("D"))); @@ -2337,10 +2317,8 @@ public void testCheckAndMutate_wrongRow() throws Throwable { try { region.checkAndRowMutate(row, fam1, qual1, CompareOperator.EQUAL, new BinaryComparator(value1), - new RowMutations(wrongRow) - .add((Mutation) new Put(wrongRow) - .addColumn(fam1, qual1, value1)) - .add((Mutation) new Delete(wrongRow).addColumns(fam1, qual2))); + new RowMutations(wrongRow).add((Mutation) new Put(wrongRow).addColumn(fam1, qual1, value1)) + .add((Mutation) new Delete(wrongRow).addColumns(fam1, qual2))); fail("should throw DoNotRetryIOException"); } catch (DoNotRetryIOException e) { assertEquals("The row of the action doesn't match the original one ", @@ -2350,10 +2328,8 @@ public void testCheckAndMutate_wrongRow() throws Throwable { try { region.checkAndRowMutate(row, new SingleColumnValueFilter(fam1, qual1, CompareOperator.EQUAL, value1), - new RowMutations(wrongRow) - .add((Mutation) new Put(wrongRow) - .addColumn(fam1, qual1, value1)) - .add((Mutation) new Delete(wrongRow).addColumns(fam1, qual2))); + new RowMutations(wrongRow).add((Mutation) new Put(wrongRow).addColumn(fam1, qual1, value1)) + .add((Mutation) new Delete(wrongRow).addColumns(fam1, qual2))); fail("should throw DoNotRetryIOException"); } catch (DoNotRetryIOException e) { assertEquals("The row of the action doesn't match the original one ", @@ -2378,7 +2354,7 @@ public void testCheckAndMutateWithEmptyRowValue() throws IOException { // checkAndPut with empty value CheckAndMutateResult res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, emptyVal).build(put)); + .ifMatches(fam1, qf1, CompareOperator.EQUAL, emptyVal).build(put)); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2388,28 +2364,28 @@ public void testCheckAndMutateWithEmptyRowValue() throws IOException { // checkAndPut with correct value res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, emptyVal).build(put)); + .ifMatches(fam1, qf1, CompareOperator.EQUAL, emptyVal).build(put)); assertTrue(res.isSuccess()); assertNull(res.getResult()); // not empty anymore res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, emptyVal).build(put)); + .ifMatches(fam1, qf1, CompareOperator.EQUAL, emptyVal).build(put)); assertFalse(res.isSuccess()); assertNull(res.getResult()); Delete delete = new Delete(row1); delete.addColumn(fam1, qf1); res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, emptyVal).build(delete)); + .ifMatches(fam1, qf1, CompareOperator.EQUAL, emptyVal).build(delete)); assertFalse(res.isSuccess()); assertNull(res.getResult()); put = new Put(row1); put.addColumn(fam1, qf1, val2); // checkAndPut with correct value - res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, val1).build(put)); + res = region.checkAndMutate( + CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.EQUAL, val1).build(put)); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2418,13 +2394,13 @@ public void testCheckAndMutateWithEmptyRowValue() throws IOException { delete.addColumn(fam1, qf1); delete.addColumn(fam1, qf1); res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, val2).build(delete)); + .ifMatches(fam1, qf1, CompareOperator.EQUAL, val2).build(delete)); assertTrue(res.isSuccess()); assertNull(res.getResult()); delete = new Delete(row1); res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, emptyVal).build(delete)); + .ifMatches(fam1, qf1, CompareOperator.EQUAL, emptyVal).build(delete)); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2432,8 +2408,7 @@ public void testCheckAndMutateWithEmptyRowValue() throws IOException { put = new Put(row1); put.addColumn(fam1, qf1, val1); - res = region.checkAndMutate(CheckAndMutate.newBuilder(row1).ifNotExists(fam1, qf1) - .build(put)); + res = region.checkAndMutate(CheckAndMutate.newBuilder(row1).ifNotExists(fam1, qf1).build(put)); assertTrue(res.isSuccess()); assertNull(res.getResult()); } @@ -2456,16 +2431,16 @@ public void testCheckAndMutateWithWrongValue() throws IOException { region.put(put); // checkAndPut with wrong value - CheckAndMutateResult res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, val2).build(put)); + CheckAndMutateResult res = region.checkAndMutate( + CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.EQUAL, val2).build(put)); assertFalse(res.isSuccess()); assertNull(res.getResult()); // checkAndDelete with wrong value Delete delete = new Delete(row1); delete.addFamily(fam1); - res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, val2).build(put)); + res = region.checkAndMutate( + CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.EQUAL, val2).build(put)); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -2476,7 +2451,7 @@ public void testCheckAndMutateWithWrongValue() throws IOException { // checkAndPut with wrong value res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, Bytes.toBytes(bd2)).build(put)); + .ifMatches(fam1, qf1, CompareOperator.EQUAL, Bytes.toBytes(bd2)).build(put)); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -2484,7 +2459,7 @@ public void testCheckAndMutateWithWrongValue() throws IOException { delete = new Delete(row1); delete.addFamily(fam1); res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, Bytes.toBytes(bd2)).build(delete)); + .ifMatches(fam1, qf1, CompareOperator.EQUAL, Bytes.toBytes(bd2)).build(delete)); assertFalse(res.isSuccess()); assertNull(res.getResult()); } @@ -2506,15 +2481,15 @@ public void testCheckAndMutateWithCorrectValue() throws IOException { region.put(put); // checkAndPut with correct value - CheckAndMutateResult res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, val1).build(put)); + CheckAndMutateResult res = region.checkAndMutate( + CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.EQUAL, val1).build(put)); assertTrue("First", res.isSuccess()); // checkAndDelete with correct value Delete delete = new Delete(row1, now + 1); delete.addColumn(fam1, qf1); res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, val1).build(delete)); + .ifMatches(fam1, qf1, CompareOperator.EQUAL, val1).build(delete)); assertTrue("Delete", res.isSuccess()); assertNull(res.getResult()); @@ -2556,14 +2531,14 @@ public void testCheckAndMutateWithNonEqualCompareOp() throws IOException { region.put(put); // Test CompareOp.LESS: original = val3, compare with val3, fail - CheckAndMutateResult res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.LESS, val3).build(put)); + CheckAndMutateResult res = region.checkAndMutate( + CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.LESS, val3).build(put)); assertFalse(res.isSuccess()); assertNull(res.getResult()); // Test CompareOp.LESS: original = val3, compare with val4, fail - res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.LESS, val4).build(put)); + res = region.checkAndMutate( + CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.LESS, val4).build(put)); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -2571,21 +2546,21 @@ public void testCheckAndMutateWithNonEqualCompareOp() throws IOException { // succeed (now value = val2) put = new Put(row1); put.addColumn(fam1, qf1, val2); - res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.LESS, val2).build(put)); + res = region.checkAndMutate( + CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.LESS, val2).build(put)); assertTrue(res.isSuccess()); assertNull(res.getResult()); // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val3, fail res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.LESS_OR_EQUAL, val3).build(put)); + .ifMatches(fam1, qf1, CompareOperator.LESS_OR_EQUAL, val3).build(put)); assertFalse(res.isSuccess()); assertNull(res.getResult()); // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val2, // succeed (value still = val2) res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.LESS_OR_EQUAL, val2).build(put)); + .ifMatches(fam1, qf1, CompareOperator.LESS_OR_EQUAL, val2).build(put)); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2594,19 +2569,19 @@ public void testCheckAndMutateWithNonEqualCompareOp() throws IOException { put = new Put(row1); put.addColumn(fam1, qf1, val3); res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.LESS_OR_EQUAL, val1).build(put)); + .ifMatches(fam1, qf1, CompareOperator.LESS_OR_EQUAL, val1).build(put)); assertTrue(res.isSuccess()); assertNull(res.getResult()); // Test CompareOp.GREATER: original = val3, compare with val3, fail res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.GREATER, val3).build(put)); + .ifMatches(fam1, qf1, CompareOperator.GREATER, val3).build(put)); assertFalse(res.isSuccess()); assertNull(res.getResult()); // Test CompareOp.GREATER: original = val3, compare with val2, fail res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.GREATER, val2).build(put)); + .ifMatches(fam1, qf1, CompareOperator.GREATER, val2).build(put)); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -2615,26 +2590,26 @@ public void testCheckAndMutateWithNonEqualCompareOp() throws IOException { put = new Put(row1); put.addColumn(fam1, qf1, val2); res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.GREATER, val4).build(put)); + .ifMatches(fam1, qf1, CompareOperator.GREATER, val4).build(put)); assertTrue(res.isSuccess()); assertNull(res.getResult()); // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val1, fail res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.GREATER_OR_EQUAL, val1).build(put)); + .ifMatches(fam1, qf1, CompareOperator.GREATER_OR_EQUAL, val1).build(put)); assertFalse(res.isSuccess()); assertNull(res.getResult()); // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val2, // succeed (value still = val2) res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.GREATER_OR_EQUAL, val2).build(put)); + .ifMatches(fam1, qf1, CompareOperator.GREATER_OR_EQUAL, val2).build(put)); assertTrue(res.isSuccess()); assertNull(res.getResult()); // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val3, succeed res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.GREATER_OR_EQUAL, val3).build(put)); + .ifMatches(fam1, qf1, CompareOperator.GREATER_OR_EQUAL, val3).build(put)); assertTrue(res.isSuccess()); assertNull(res.getResult()); } @@ -2664,8 +2639,8 @@ public void testCheckAndPutThatPutWasWritten() throws IOException { put.add(kv); // checkAndPut with wrong value - CheckAndMutateResult res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, val1).build(put)); + CheckAndMutateResult res = region.checkAndMutate( + CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.EQUAL, val1).build(put)); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2720,7 +2695,7 @@ public void testCheckAndDeleteThatDeleteWasWritten() throws IOException { delete.addColumn(fam2, qf1); delete.addColumn(fam1, qf3); CheckAndMutateResult res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, val2).build(delete)); + .ifMatches(fam1, qf1, CompareOperator.EQUAL, val2).build(delete)); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2737,7 +2712,7 @@ public void testCheckAndDeleteThatDeleteWasWritten() throws IOException { delete = new Delete(row1); delete.addFamily(fam2); res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam2, qf1, CompareOperator.EQUAL, emptyVal).build(delete)); + .ifMatches(fam2, qf1, CompareOperator.EQUAL, emptyVal).build(delete)); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2749,7 +2724,7 @@ public void testCheckAndDeleteThatDeleteWasWritten() throws IOException { // Row delete delete = new Delete(row1); res = region.checkAndMutate(CheckAndMutate.newBuilder(row1) - .ifMatches(fam1, qf1, CompareOperator.EQUAL, val1).build(delete)); + .ifMatches(fam1, qf1, CompareOperator.EQUAL, val1).build(delete)); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2774,12 +2749,12 @@ public void testCheckAndMutateWithFilters() throws Throwable { // Put with success CheckAndMutateResult res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2788,12 +2763,12 @@ public void testCheckAndMutateWithFilters() throws Throwable { // Put with failure res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("c")))) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("c")))) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")))); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -2801,12 +2776,12 @@ public void testCheckAndMutateWithFilters() throws Throwable { // Delete with success res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new Delete(row).addColumns(FAMILY, Bytes.toBytes("D")))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new Delete(row).addColumns(FAMILY, Bytes.toBytes("D")))); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2814,15 +2789,14 @@ public void testCheckAndMutateWithFilters() throws Throwable { // Mutate with success res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, - Bytes.toBytes("b")))) - .build(new RowMutations(row) - .add((Mutation) new Put(row) - .addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))) - .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A"))))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("B"), CompareOperator.EQUAL, + Bytes.toBytes("b")))) + .build(new RowMutations(row) + .add((Mutation) new Put(row).addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e"))) + .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A"))))); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2844,10 +2818,10 @@ public void testCheckAndMutateWithFiltersAndTimeRange() throws Throwable { // Put with success CheckAndMutateResult res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a"))) - .timeRange(TimeRange.between(0, 101)) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .timeRange(TimeRange.between(0, 101)) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2856,10 +2830,10 @@ public void testCheckAndMutateWithFiltersAndTimeRange() throws Throwable { // Put with failure res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a"))) - .timeRange(TimeRange.between(0, 100)) - .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")))); + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .timeRange(TimeRange.between(0, 100)) + .build(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")))); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -2867,13 +2841,12 @@ public void testCheckAndMutateWithFiltersAndTimeRange() throws Throwable { // RowMutations with success res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a"))) - .timeRange(TimeRange.between(0, 101)) - .build(new RowMutations(row) - .add((Mutation) new Put(row) - .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) - .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A"))))); + .ifMatches(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a"))) + .timeRange(TimeRange.between(0, 101)) + .build(new RowMutations(row) + .add((Mutation) new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))) + .add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A"))))); assertTrue(res.isSuccess()); assertNull(res.getResult()); @@ -2893,9 +2866,9 @@ public void testCheckAndIncrement() throws Throwable { region.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a"))); // CheckAndIncrement with correct value - CheckAndMutateResult res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 1))); + CheckAndMutateResult res = region.checkAndMutate( + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 1))); assertTrue(res.isSuccess()); assertEquals(1, Bytes.toLong(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -2903,9 +2876,9 @@ public void testCheckAndIncrement() throws Throwable { assertEquals(1, Bytes.toLong(result.getValue(FAMILY, Bytes.toBytes("B")))); // CheckAndIncrement with wrong value - res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) - .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 1))); + res = region.checkAndMutate( + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) + .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 1))); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -2916,12 +2889,12 @@ public void testCheckAndIncrement() throws Throwable { // CheckAndIncrement with a filter and correct value res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("c")))) - .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 2))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("c")))) + .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 2))); assertTrue(res.isSuccess()); assertEquals(3, Bytes.toLong(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -2930,12 +2903,12 @@ public void testCheckAndIncrement() throws Throwable { // CheckAndIncrement with a filter and correct value res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("b")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("d")))) - .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 2))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("b")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("d")))) + .build(new Increment(row).addColumn(FAMILY, Bytes.toBytes("B"), 2))); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -2953,10 +2926,9 @@ public void testCheckAndAppend() throws Throwable { region.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a"))); // CheckAndAppend with correct value - CheckAndMutateResult res = - region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) - .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); + CheckAndMutateResult res = region.checkAndMutate( + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")) + .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); assertTrue(res.isSuccess()); assertEquals("b", Bytes.toString(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -2964,9 +2936,9 @@ public void testCheckAndAppend() throws Throwable { assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B")))); // CheckAndAppend with wrong value - res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) - .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); + res = region.checkAndMutate( + CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")) + .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")))); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -2977,12 +2949,12 @@ public void testCheckAndAppend() throws Throwable { // CheckAndAppend with a filter and correct value res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("a")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("c")))) - .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("a")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("c")))) + .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))); assertTrue(res.isSuccess()); assertEquals("bbb", Bytes.toString(res.getResult().getValue(FAMILY, Bytes.toBytes("B")))); @@ -2991,12 +2963,12 @@ public void testCheckAndAppend() throws Throwable { // CheckAndAppend with a filter and wrong value res = region.checkAndMutate(CheckAndMutate.newBuilder(row) - .ifMatches(new FilterList( - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, - Bytes.toBytes("b")), - new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, - Bytes.toBytes("d")))) - .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))); + .ifMatches(new FilterList( + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, + Bytes.toBytes("b")), + new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, + Bytes.toBytes("d")))) + .build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb")))); assertFalse(res.isSuccess()); assertNull(res.getResult()); @@ -3010,12 +2982,9 @@ public void testCheckAndIncrementAndAppend() throws Throwable { this.region = initHRegion(tableName, method, CONF, fam1); // CheckAndMutate with Increment and Append - CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row) - .ifNotExists(fam1, qual) - .build(new RowMutations(row) - .add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L)) - .add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a"))) - ); + CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(fam1, qual) + .build(new RowMutations(row).add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L)) + .add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a")))); CheckAndMutateResult result = region.checkAndMutate(checkAndMutate); assertTrue(result.isSuccess()); @@ -3027,13 +2996,11 @@ public void testCheckAndIncrementAndAppend() throws Throwable { assertEquals("a", Bytes.toString(r.getValue(fam1, qual2))); // Set return results to false - checkAndMutate = CheckAndMutate.newBuilder(row) - .ifNotExists(fam1, qual) - .build(new RowMutations(row) - .add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L).setReturnResults(false)) - .add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a")) - .setReturnResults(false)) - ); + checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(fam1, qual) + .build(new RowMutations(row) + .add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L).setReturnResults(false)) + .add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a")) + .setReturnResults(false))); result = region.checkAndMutate(checkAndMutate); assertTrue(result.isSuccess()); @@ -3044,13 +3011,10 @@ public void testCheckAndIncrementAndAppend() throws Throwable { assertEquals(2L, Bytes.toLong(r.getValue(fam1, qual1))); assertEquals("aa", Bytes.toString(r.getValue(fam1, qual2))); - checkAndMutate = CheckAndMutate.newBuilder(row) - .ifNotExists(fam1, qual) - .build(new RowMutations(row) - .add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L)) - .add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a")) - .setReturnResults(false)) - ); + checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(fam1, qual) + .build(new RowMutations(row).add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L)) + .add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a")) + .setReturnResults(false))); result = region.checkAndMutate(checkAndMutate); assertTrue(result.isSuccess()); @@ -3074,21 +3038,16 @@ public void testCheckAndRowMutations() throws Throwable { region = initHRegion(tableName, method, CONF, fam1); // Initial values - region.batchMutate(new Mutation[] { - new Put(row).addColumn(fam1, q2, Bytes.toBytes("toBeDeleted")), - new Put(row).addColumn(fam1, q3, Bytes.toBytes(5L)), - new Put(row).addColumn(fam1, q4, Bytes.toBytes("a")), - }); + region.batchMutate( + new Mutation[] { new Put(row).addColumn(fam1, q2, Bytes.toBytes("toBeDeleted")), + new Put(row).addColumn(fam1, q3, Bytes.toBytes(5L)), + new Put(row).addColumn(fam1, q4, Bytes.toBytes("a")), }); // Do CheckAndRowMutations - CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row) - .ifNotExists(fam1, q1) - .build(new RowMutations(row).add(Arrays.asList( - new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)), - new Delete(row).addColumns(fam1, q2), - new Increment(row).addColumn(fam1, q3, 1), - new Append(row).addColumn(fam1, q4, Bytes.toBytes("b")))) - ); + CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(fam1, q1).build( + new RowMutations(row).add(Arrays.asList(new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)), + new Delete(row).addColumns(fam1, q2), new Increment(row).addColumn(fam1, q3, 1), + new Append(row).addColumn(fam1, q4, Bytes.toBytes("b"))))); CheckAndMutateResult result = region.checkAndMutate(checkAndMutate); assertTrue(result.isSuccess()); @@ -3103,14 +3062,11 @@ public void testCheckAndRowMutations() throws Throwable { assertEquals("ab", Bytes.toString(r.getValue(fam1, q4))); // Do CheckAndRowMutations again - checkAndMutate = CheckAndMutate.newBuilder(row) - .ifNotExists(fam1, q1) - .build(new RowMutations(row).add(Arrays.asList( - new Delete(row).addColumns(fam1, q1), - new Put(row).addColumn(fam1, q2, Bytes.toBytes(v1)), - new Increment(row).addColumn(fam1, q3, 1), - new Append(row).addColumn(fam1, q4, Bytes.toBytes("b")))) - ); + checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(fam1, q1) + .build(new RowMutations(row).add(Arrays.asList(new Delete(row).addColumns(fam1, q1), + new Put(row).addColumn(fam1, q2, Bytes.toBytes(v1)), + new Increment(row).addColumn(fam1, q3, 1), + new Append(row).addColumn(fam1, q4, Bytes.toBytes("b"))))); result = region.checkAndMutate(checkAndMutate); assertFalse(result.isSuccess()); @@ -3277,8 +3233,7 @@ public void testDeleteRowWithFutureTs() throws IOException { } /** - * Tests that the special LATEST_TIMESTAMP option for puts gets replaced by - * the actual timestamp + * Tests that the special LATEST_TIMESTAMP option for puts gets replaced by the actual timestamp */ @Test public void testPutWithLatestTS() throws IOException { @@ -3301,7 +3256,7 @@ public void testPutWithLatestTS() throws IOException { Cell kv = result.rawCells()[0]; LOG.info("Got: " + kv); assertTrue("LATEST_TIMESTAMP was not replaced with real timestamp", - kv.getTimestamp() != HConstants.LATEST_TIMESTAMP); + kv.getTimestamp() != HConstants.LATEST_TIMESTAMP); // Check same with WAL enabled (historically these took different // code paths, so check both) @@ -3317,13 +3272,12 @@ public void testPutWithLatestTS() throws IOException { kv = result.rawCells()[0]; LOG.info("Got: " + kv); assertTrue("LATEST_TIMESTAMP was not replaced with real timestamp", - kv.getTimestamp() != HConstants.LATEST_TIMESTAMP); + kv.getTimestamp() != HConstants.LATEST_TIMESTAMP); } /** - * Tests that there is server-side filtering for invalid timestamp upper - * bound. Note that the timestamp lower bound is automatically handled for us - * by the TTL field. + * Tests that there is server-side filtering for invalid timestamp upper bound. Note that the + * timestamp lower bound is automatically handled for us by the TTL field. */ @Test public void testPutWithTsSlop() throws IOException { @@ -3394,60 +3348,47 @@ public void testDataInMemoryWithoutWAL() throws IOException { // This chunk creation is done throughout the code base. Do we want to move it into core? // It is missing from this test. W/o it we NPE. region = initHRegion(tableName, null, null, CONF, false, Durability.SYNC_WAL, hLog, - COLUMN_FAMILY_BYTES); - - Cell originalCell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(row) - .setFamily(COLUMN_FAMILY_BYTES) - .setQualifier(qual1) - .setTimestamp(EnvironmentEdgeManager.currentTime()) - .setType(KeyValue.Type.Put.getCode()) - .setValue(value1) - .build(); + COLUMN_FAMILY_BYTES); + + Cell originalCell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row) + .setFamily(COLUMN_FAMILY_BYTES).setQualifier(qual1) + .setTimestamp(EnvironmentEdgeManager.currentTime()).setType(KeyValue.Type.Put.getCode()) + .setValue(value1).build(); final long originalSize = originalCell.getSerializedSize(); - Cell addCell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(row) - .setFamily(COLUMN_FAMILY_BYTES) - .setQualifier(qual1) - .setTimestamp(EnvironmentEdgeManager.currentTime()) - .setType(KeyValue.Type.Put.getCode()) - .setValue(Bytes.toBytes("xxxxxxxxxx")) - .build(); + Cell addCell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row) + .setFamily(COLUMN_FAMILY_BYTES).setQualifier(qual1) + .setTimestamp(EnvironmentEdgeManager.currentTime()).setType(KeyValue.Type.Put.getCode()) + .setValue(Bytes.toBytes("xxxxxxxxxx")).build(); final long addSize = addCell.getSerializedSize(); - LOG.info("originalSize:" + originalSize - + ", addSize:" + addSize); + LOG.info("originalSize:" + originalSize + ", addSize:" + addSize); // start test. We expect that the addPut's durability will be replaced // by originalPut's durability. // case 1: testDataInMemoryWithoutWAL(region, - new Put(row).add(originalCell).setDurability(Durability.SKIP_WAL), - new Put(row).add(addCell).setDurability(Durability.SKIP_WAL), - originalSize + addSize); + new Put(row).add(originalCell).setDurability(Durability.SKIP_WAL), + new Put(row).add(addCell).setDurability(Durability.SKIP_WAL), originalSize + addSize); // case 2: testDataInMemoryWithoutWAL(region, - new Put(row).add(originalCell).setDurability(Durability.SKIP_WAL), - new Put(row).add(addCell).setDurability(Durability.SYNC_WAL), - originalSize + addSize); + new Put(row).add(originalCell).setDurability(Durability.SKIP_WAL), + new Put(row).add(addCell).setDurability(Durability.SYNC_WAL), originalSize + addSize); // case 3: testDataInMemoryWithoutWAL(region, - new Put(row).add(originalCell).setDurability(Durability.SYNC_WAL), - new Put(row).add(addCell).setDurability(Durability.SKIP_WAL), - 0); + new Put(row).add(originalCell).setDurability(Durability.SYNC_WAL), + new Put(row).add(addCell).setDurability(Durability.SKIP_WAL), 0); // case 4: testDataInMemoryWithoutWAL(region, - new Put(row).add(originalCell).setDurability(Durability.SYNC_WAL), - new Put(row).add(addCell).setDurability(Durability.SYNC_WAL), - 0); + new Put(row).add(originalCell).setDurability(Durability.SYNC_WAL), + new Put(row).add(addCell).setDurability(Durability.SYNC_WAL), 0); } - private static void testDataInMemoryWithoutWAL(HRegion region, Put originalPut, - final Put addPut, long delta) throws IOException { + private static void testDataInMemoryWithoutWAL(HRegion region, Put originalPut, final Put addPut, + long delta) throws IOException { final long initSize = region.getDataInMemoryWithoutWAL(); // save normalCPHost and replaced by mockedCPHost RegionCoprocessorHost normalCPHost = region.getCoprocessorHost(); @@ -3458,25 +3399,26 @@ private static void testDataInMemoryWithoutWAL(HRegion region, Put originalPut, @Override public Void answer(InvocationOnMock invocation) throws Throwable { MiniBatchOperationInProgress mb = invocation.getArgument(0); - mb.addOperationsFromCP(0, new Mutation[]{addPut}); + mb.addOperationsFromCP(0, new Mutation[] { addPut }); return null; } }).when(mockedCPHost).preBatchMutate(Mockito.isA(MiniBatchOperationInProgress.class)); - ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder. - newBuilder(COLUMN_FAMILY_BYTES); - ScanInfo info = new ScanInfo(CONF, builder.build(), Long.MAX_VALUE, - Long.MAX_VALUE, region.getCellComparator()); - Mockito.when(mockedCPHost.preFlushScannerOpen(Mockito.any(HStore.class), - Mockito.any())).thenReturn(info); - Mockito.when(mockedCPHost.preFlush(Mockito.any(), Mockito.any(StoreScanner.class), - Mockito.any())).thenAnswer(i -> i.getArgument(1)); + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(COLUMN_FAMILY_BYTES); + ScanInfo info = new ScanInfo(CONF, builder.build(), Long.MAX_VALUE, Long.MAX_VALUE, + region.getCellComparator()); + Mockito.when(mockedCPHost.preFlushScannerOpen(Mockito.any(HStore.class), Mockito.any())) + .thenReturn(info); + Mockito + .when(mockedCPHost.preFlush(Mockito.any(), Mockito.any(StoreScanner.class), Mockito.any())) + .thenAnswer(i -> i.getArgument(1)); region.setCoprocessorHost(mockedCPHost); region.put(originalPut); region.setCoprocessorHost(normalCPHost); final long finalSize = region.getDataInMemoryWithoutWAL(); - assertEquals("finalSize:" + finalSize + ", initSize:" - + initSize + ", delta:" + delta,finalSize, initSize + delta); + assertEquals("finalSize:" + finalSize + ", initSize:" + initSize + ", delta:" + delta, + finalSize, initSize + delta); } @Test @@ -3659,12 +3601,12 @@ public void testGetWithFilter() throws IOException, InterruptedException { final int maxVersions = 3; TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf("testFilterAndColumnTracker")) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(fam1).setMaxVersions(maxVersions).build()) - .build(); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + TableDescriptorBuilder.newBuilder(TableName.valueOf("testFilterAndColumnTracker")) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(fam1).setMaxVersions(maxVersions).build()) + .build(); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log"); final WAL wal = HBaseTestingUtil.createWal(TEST_UTIL.getConfiguration(), logDir, info); @@ -3789,7 +3731,6 @@ public void testGetScanner_WithNoFamilies() throws IOException { /** * This method tests https://issues.apache.org/jira/browse/HBASE-2516. - * * @throws IOException */ @Test @@ -3813,9 +3754,8 @@ public void testGetScanner_WithRegionClosed() throws IOException { } catch (NotServingRegionException e) { // this is the correct exception that is expected } catch (IOException e) { - fail("Got wrong type of exception - should be a NotServingRegionException, " + - "but was an IOException: " - + e.getMessage()); + fail("Got wrong type of exception - should be a NotServingRegionException, " + + "but was an IOException: " + e.getMessage()); } } @@ -3990,8 +3930,8 @@ public void testScanner_ExplicitColumns_FromFilesOnly_EnforceVersions() throws I } @Test - public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() throws - IOException { + public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() + throws IOException { byte[] row1 = Bytes.toBytes("row1"); byte[] fam1 = Bytes.toBytes("fam1"); byte[][] families = { fam1 }; @@ -4293,9 +4233,7 @@ public void testScanner_Wildcard_FromMemStoreAndFiles_EnforceVersions() throws I } /** - * Added for HBASE-5416 - * - * Here we test scan optimization when only subset of CFs are used in filter + * Added for HBASE-5416 Here we test scan optimization when only subset of CFs are used in filter * conditions. */ @Test @@ -4335,7 +4273,7 @@ public void testScanner_JoinedScanners() throws IOException { Scan scan = new Scan(); Filter filter = new SingleColumnValueExcludeFilter(cf_essential, col_normal, - CompareOperator.NOT_EQUAL, filtered_val); + CompareOperator.NOT_EQUAL, filtered_val); scan.setFilter(filter); scan.setLoadColumnFamiliesOnDemand(true); InternalScanner s = region.getScanner(scan); @@ -4357,9 +4295,7 @@ public void testScanner_JoinedScanners() throws IOException { } /** - * HBASE-5416 - * - * Test case when scan limits amount of KVs returned on each next() call. + * HBASE-5416 Test case when scan limits amount of KVs returned on each next() call. */ @Test public void testScanner_JoinedScannersWithLimits() throws IOException { @@ -4390,6 +4326,7 @@ public void testScanner_JoinedScannersWithLimits() throws IOException { public ReturnCode filterCell(final Cell ignored) throws IOException { return ReturnCode.INCLUDE; } + @Override public boolean isFamilyEssential(byte[] name) { return Bytes.equals(name, cf_first); @@ -4468,20 +4405,20 @@ public void testScannerOperationId() throws IOException { } /** - * Write an HFile block full with Cells whose qualifier that are identical between - * 0 and Short.MAX_VALUE. See HBASE-13329. + * Write an HFile block full with Cells whose qualifier that are identical between 0 and + * Short.MAX_VALUE. See HBASE-13329. * @throws Exception */ @Test public void testLongQualifier() throws Exception { byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); - byte[] q = new byte[Short.MAX_VALUE+2]; - Arrays.fill(q, 0, q.length-1, (byte)42); - for (byte i=0; i<10; i++) { + byte[] q = new byte[Short.MAX_VALUE + 2]; + Arrays.fill(q, 0, q.length - 1, (byte) 42); + for (byte i = 0; i < 10; i++) { Put p = new Put(Bytes.toBytes("row")); // qualifiers that differ past Short.MAX_VALUE - q[q.length-1]=i; + q[q.length - 1] = i; p.addColumn(family, q, q); region.put(p); } @@ -4489,14 +4426,10 @@ public void testLongQualifier() throws Exception { } /** - * Flushes the cache in a thread while scanning. The tests verify that the - * scan is coherent - e.g. the returned results are always of the same or - * later update as the previous results. - * - * @throws IOException - * scan / compact - * @throws InterruptedException - * thread join + * Flushes the cache in a thread while scanning. The tests verify that the scan is coherent - e.g. + * the returned results are always of the same or later update as the previous results. + * @throws IOException scan / compact + * @throws InterruptedException thread join */ @Test public void testFlushCacheWhileScanning() throws IOException, InterruptedException { @@ -4526,7 +4459,7 @@ public void testFlushCacheWhileScanning() throws IOException, InterruptedExcepti region.put(put); if (i != 0 && i % compactInterval == 0) { - LOG.debug("iteration = " + i+ " ts=" + EnvironmentEdgeManager.currentTime()); + LOG.debug("iteration = " + i + " ts=" + EnvironmentEdgeManager.currentTime()); region.compact(true); } @@ -4545,8 +4478,9 @@ public void testFlushCacheWhileScanning() throws IOException, InterruptedExcepti if (!toggle) { flushThread.flush(); } - assertEquals("toggle="+toggle+"i=" + i + " ts=" + EnvironmentEdgeManager.currentTime(), - expectedCount, res.size()); + assertEquals( + "toggle=" + toggle + "i=" + i + " ts=" + EnvironmentEdgeManager.currentTime(), + expectedCount, res.size()); toggle = !toggle; } } @@ -4635,13 +4569,10 @@ int getTestCountForTestWritesWhileScanning() { } /** - * Writes very wide records and scans for the latest every time.. Flushes and - * compacts the region every now and then to keep things realistic. - * - * @throws IOException - * by flush / scan / compaction - * @throws InterruptedException - * when joining threads + * Writes very wide records and scans for the latest every time.. Flushes and compacts the region + * every now and then to keep things realistic. + * @throws IOException by flush / scan / compaction + * @throws InterruptedException when joining threads */ @Test public void testWritesWhileScanning() throws IOException, InterruptedException { @@ -4669,8 +4600,7 @@ public void testWritesWhileScanning() throws IOException, InterruptedException { flushThread.start(); - Scan scan = new Scan().withStartRow(Bytes.toBytes("row0")) - .withStopRow(Bytes.toBytes("row1")); + Scan scan = new Scan().withStartRow(Bytes.toBytes("row0")).withStopRow(Bytes.toBytes("row1")); int expectedCount = numFamilies * numQualifiers; List res = new ArrayList<>(); @@ -4701,7 +4631,7 @@ public void testWritesWhileScanning() throws IOException, InterruptedException { assertEquals("i=" + i, expectedCount, res.size()); long timestamp = res.get(0).getTimestamp(); assertTrue("Timestamps were broke: " + timestamp + " prev: " + prevTimestamp, - timestamp >= prevTimestamp); + timestamp >= prevTimestamp); prevTimestamp = timestamp; } } @@ -4813,13 +4743,10 @@ public void run() { } /** - * Writes very wide records and gets the latest row every time.. Flushes and - * compacts the region aggressivly to catch issues. - * - * @throws IOException - * by flush / scan / compaction - * @throws InterruptedException - * when joining threads + * Writes very wide records and gets the latest row every time.. Flushes and compacts the region + * aggressivly to catch issues. + * @throws IOException by flush / scan / compaction + * @throws InterruptedException when joining threads */ @Test public void testWritesWhileGetting() throws Exception { @@ -4837,7 +4764,6 @@ public void testWritesWhileGetting() throws Exception { qualifiers[i] = Bytes.toBytes("qual" + i); } - // This test flushes constantly and can cause many files to be created, // possibly // extending over the ulimit. Make sure compactions are aggressive in @@ -4910,8 +4836,7 @@ public void doAnAction() throws Exception { } } } finally { - if (putThread != null) - putThread.done(); + if (putThread != null) putThread.done(); region.flush(true); @@ -4929,8 +4854,8 @@ public void doAnAction() throws Exception { @Test public void testHolesInMeta() throws Exception { byte[] family = Bytes.toBytes("family"); - this.region = initHRegion(tableName, Bytes.toBytes("x"), Bytes.toBytes("z"), method, CONF, - false, family); + this.region = + initHRegion(tableName, Bytes.toBytes("x"), Bytes.toBytes("z"), method, CONF, false, family); byte[] rowNotServed = Bytes.toBytes("a"); Get g = new Get(rowNotServed); try { @@ -4965,10 +4890,12 @@ public void testIndexesScanWithOneDeletedRow() throws IOException { Scan idxScan = new Scan(); idxScan.addFamily(family); - idxScan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, Arrays. asList( - new SingleColumnValueFilter(family, qual1, CompareOperator.GREATER_OR_EQUAL, - new BinaryComparator(Bytes.toBytes(0L))), new SingleColumnValueFilter(family, qual1, - CompareOperator.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes(3L)))))); + idxScan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, + Arrays. asList( + new SingleColumnValueFilter(family, qual1, CompareOperator.GREATER_OR_EQUAL, + new BinaryComparator(Bytes.toBytes(0L))), + new SingleColumnValueFilter(family, qual1, CompareOperator.LESS_OR_EQUAL, + new BinaryComparator(Bytes.toBytes(3L)))))); InternalScanner scanner = region.getScanner(idxScan); List res = new ArrayList<>(); @@ -4988,9 +4915,9 @@ public void testBloomFilterSize() throws IOException { byte[] val1 = Bytes.toBytes("value1"); // Create Table TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1) - .setMaxVersions(Integer.MAX_VALUE).setBloomFilterType(BloomType.ROWCOL).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1) + .setMaxVersions(Integer.MAX_VALUE).setBloomFilterType(BloomType.ROWCOL).build()) + .build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); this.region = TEST_UTIL.createLocalHRegion(info, tableDescriptor); int num_unique_rows = 10; @@ -5041,9 +4968,9 @@ public void testAllColumnsWithBloomFilter() throws IOException { // Create table TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY) - .setMaxVersions(Integer.MAX_VALUE).setBloomFilterType(BloomType.ROWCOL).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY) + .setMaxVersions(Integer.MAX_VALUE).setBloomFilterType(BloomType.ROWCOL).build()) + .build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); this.region = TEST_UTIL.createLocalHRegion(info, tableDescriptor); // For row:0, col:0: insert versions 1 through 5. @@ -5073,9 +5000,8 @@ public void testAllColumnsWithBloomFilter() throws IOException { } /** - * Testcase to cover bug-fix for HBASE-2823 Ensures correct delete when - * issuing delete row on columns with bloom filter set to row+col - * (BloomType.ROWCOL) + * Testcase to cover bug-fix for HBASE-2823 Ensures correct delete when issuing delete row on + * columns with bloom filter set to row+col (BloomType.ROWCOL) */ @Test public void testDeleteRowWithBloomFilter() throws IOException { @@ -5083,9 +5009,9 @@ public void testDeleteRowWithBloomFilter() throws IOException { // Create Table TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setMaxVersions(Integer.MAX_VALUE).setBloomFilterType(BloomType.ROWCOL).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(familyName) + .setMaxVersions(Integer.MAX_VALUE).setBloomFilterType(BloomType.ROWCOL).build()) + .build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); this.region = TEST_UTIL.createLocalHRegion(info, tableDescriptor); // Insert some data @@ -5112,11 +5038,11 @@ public void testDeleteRowWithBloomFilter() throws IOException { @Test public void testgetHDFSBlocksDistribution() throws Exception { HBaseTestingUtil htu = new HBaseTestingUtil(); - // Why do we set the block size in this test? If we set it smaller than the kvs, then we'll + // Why do we set the block size in this test? If we set it smaller than the kvs, then we'll // break up the file in to more pieces that can be distributed across the three nodes and we // won't be able to have the condition this test asserts; that at least one node has // a copy of all replicas -- if small block size, then blocks are spread evenly across the - // the three nodes. hfilev3 with tags seems to put us over the block size. St.Ack. + // the three nodes. hfilev3 with tags seems to put us over the block size. St.Ack. // final int DEFAULT_BLOCK_SIZE = 1024; // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE); htu.getConfiguration().setInt("dfs.replication", 2); @@ -5152,7 +5078,7 @@ public void testgetHDFSBlocksDistribution() throws Exception { // weight will be equal to the unique block weight. long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight(); StringBuilder sb = new StringBuilder(); - for (String host: blocksDistribution1.getTopHosts()) { + for (String host : blocksDistribution1.getTopHosts()) { if (sb.length() > 0) sb.append(", "); sb.append(host); sb.append("="); @@ -5161,15 +5087,15 @@ public void testgetHDFSBlocksDistribution() throws Exception { String topHost = blocksDistribution1.getTopHosts().get(0); long topHostWeight = blocksDistribution1.getWeight(topHost); - String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + - topHostWeight + ", topHost=" + topHost + "; " + sb.toString(); + String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + topHostWeight + + ", topHost=" + topHost + "; " + sb.toString(); LOG.info(msg); assertTrue(msg, uniqueBlocksWeight1 == topHostWeight); // use the static method to compute the value, it should be the same. // static method is used by load balancer or other components HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution( - htu.getConfiguration(), firstRegion.getTableDescriptor(), firstRegion.getRegionInfo()); + htu.getConfiguration(), firstRegion.getTableDescriptor(), firstRegion.getRegionInfo()); long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight(); assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2); @@ -5183,9 +5109,8 @@ public void testgetHDFSBlocksDistribution() throws Exception { } /** - * Testcase to check state of region initialization task set to ABORTED or not - * if any exceptions during initialization - * + * Testcase to check state of region initialization task set to ABORTED or not if any exceptions + * during initialization * @throws Exception */ @Test @@ -5194,15 +5119,13 @@ public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() th try { FileSystem fs = Mockito.mock(FileSystem.class); Mockito.when(fs.exists((Path) Mockito.anyObject())).thenThrow(new IOException()); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tableName); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); info = RegionInfoBuilder.newBuilder(tableName).build(); Path path = new Path(dir + "testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization"); - region = HRegion.newHRegion(path, null, fs, CONF, info, - tableDescriptorBuilder.build(), null); + region = HRegion.newHRegion(path, null, fs, CONF, info, tableDescriptorBuilder.build(), null); // region initialization throws IOException and set task state to ABORTED. region.initialize(); fail("Region initialization should fail due to IOException"); @@ -5212,7 +5135,7 @@ public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() th if (!(monitoredTask instanceof MonitoredRPCHandler) && monitoredTask.getDescription().contains(region.toString())) { assertTrue("Region state should be ABORTED.", - monitoredTask.getState().equals(MonitoredTask.State.ABORTED)); + monitoredTask.getState().equals(MonitoredTask.State.ABORTED)); break; } } @@ -5220,25 +5143,24 @@ public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() th } /** - * Verifies that the .regioninfo file is written on region creation and that - * is recreated if missing during region opening. + * Verifies that the .regioninfo file is written on region creation and that is recreated if + * missing during region opening. */ @Test public void testRegionInfoFileCreation() throws IOException { Path rootDir = new Path(dir + "testRegionInfoFileCreation"); TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); // Create a region and skip the initialization (like CreateTableHandler) - region = HBaseTestingUtil.createRegionAndWAL(hri, rootDir, CONF, - tableDescriptor, false); + region = HBaseTestingUtil.createRegionAndWAL(hri, rootDir, CONF, tableDescriptor, false); Path regionDir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); HBaseTestingUtil.closeRegionAndWAL(region); @@ -5247,7 +5169,7 @@ public void testRegionInfoFileCreation() throws IOException { // Verify that the .regioninfo file is present assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir", - fs.exists(regionInfoFile)); + fs.exists(regionInfoFile)); // Try to open the region region = HRegion.openHRegion(rootDir, hri, tableDescriptor, null, CONF); @@ -5256,21 +5178,21 @@ public void testRegionInfoFileCreation() throws IOException { // Verify that the .regioninfo file is still there assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir", - fs.exists(regionInfoFile)); + fs.exists(regionInfoFile)); // Remove the .regioninfo file and verify is recreated on region open fs.delete(regionInfoFile, true); assertFalse(HRegionFileSystem.REGION_INFO_FILE + " should be removed from the region dir", - fs.exists(regionInfoFile)); + fs.exists(regionInfoFile)); region = HRegion.openHRegion(rootDir, hri, tableDescriptor, null, CONF); -// region = TEST_UTIL.openHRegion(hri, htd); + // region = TEST_UTIL.openHRegion(hri, htd); assertEquals(regionDir, region.getRegionFileSystem().getRegionDir()); HBaseTestingUtil.closeRegionAndWAL(region); // Verify that the .regioninfo file is still there assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir", - fs.exists(new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE))); + fs.exists(new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE))); region = null; } @@ -5576,9 +5498,8 @@ private void durabilityTest(String method, Durability tableDurability, walConf.set(WALFactory.WAL_PROVIDER, "filesystem"); final WALFactory wals = new WALFactory(walConf, HBaseTestingUtil.getRandomUUID().toString()); final WAL wal = spy(wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build())); - this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, CONF, false, tableDurability, wal, - new byte[][] { family }); + this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, CONF, + false, tableDurability, wal, new byte[][] { family }); Put put = new Put(Bytes.toBytes("r1")); put.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes("v1")); @@ -5606,7 +5527,7 @@ public boolean evaluate() throws Exception { } }); } else { - //verify(wal, never()).sync(anyLong()); + // verify(wal, never()).sync(anyLong()); verify(wal, never()).sync(); } @@ -5622,28 +5543,26 @@ public void testRegionReplicaSecondary() throws IOException { Path rootDir = new Path(dir + name.getMethodName()); CommonFSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir); - byte[][] families = new byte[][] { - Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") - }; + byte[][] families = + new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; byte[] cq = Bytes.toBytes("cq"); TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder( - TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); for (byte[] family : families) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); } TableDescriptor tableDescriptor = builder.build(); long time = EnvironmentEdgeManager.currentTime(); RegionInfo primaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setRegionId(time).setReplicaId(0).build(); + .setRegionId(time).setReplicaId(0).build(); RegionInfo secondaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setRegionId(time).setReplicaId(1).build(); + .setRegionId(time).setReplicaId(1).build(); HRegion primaryRegion = null, secondaryRegion = null; try { - primaryRegion = HBaseTestingUtil.createRegionAndWAL(primaryHri, - rootDir, TEST_UTIL.getConfiguration(), tableDescriptor); + primaryRegion = HBaseTestingUtil.createRegionAndWAL(primaryHri, rootDir, + TEST_UTIL.getConfiguration(), tableDescriptor); // load some data putData(primaryRegion, 0, 1000, cq, families); @@ -5672,27 +5591,26 @@ public void testRegionReplicaSecondaryIsReadOnly() throws IOException { Path rootDir = new Path(dir + name.getMethodName()); CommonFSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir); - byte[][] families = new byte[][] { - Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") - }; + byte[][] families = + new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; byte[] cq = Bytes.toBytes("cq"); TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); for (byte[] family : families) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); } TableDescriptor tableDescriptor = builder.build(); long time = EnvironmentEdgeManager.currentTime(); RegionInfo primaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setRegionId(time).setReplicaId(0).build(); + .setRegionId(time).setReplicaId(0).build(); RegionInfo secondaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setRegionId(time).setReplicaId(1).build(); + .setRegionId(time).setReplicaId(1).build(); HRegion primaryRegion = null, secondaryRegion = null; try { - primaryRegion = HBaseTestingUtil.createRegionAndWAL(primaryHri, - rootDir, TEST_UTIL.getConfiguration(), tableDescriptor); + primaryRegion = HBaseTestingUtil.createRegionAndWAL(primaryHri, rootDir, + TEST_UTIL.getConfiguration(), tableDescriptor); // load some data putData(primaryRegion, 0, 1000, cq, families); @@ -5730,27 +5648,26 @@ public void testCompactionFromPrimary() throws IOException { Path rootDir = new Path(dir + name.getMethodName()); CommonFSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir); - byte[][] families = new byte[][] { - Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") - }; + byte[][] families = + new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; byte[] cq = Bytes.toBytes("cq"); TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); for (byte[] family : families) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); } TableDescriptor tableDescriptor = builder.build(); long time = EnvironmentEdgeManager.currentTime(); RegionInfo primaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setRegionId(time).setReplicaId(0).build(); + .setRegionId(time).setReplicaId(0).build(); RegionInfo secondaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()) - .setRegionId(time).setReplicaId(1).build(); + .setRegionId(time).setReplicaId(1).build(); HRegion primaryRegion = null, secondaryRegion = null; try { - primaryRegion = HBaseTestingUtil.createRegionAndWAL(primaryHri, - rootDir, TEST_UTIL.getConfiguration(), tableDescriptor); + primaryRegion = HBaseTestingUtil.createRegionAndWAL(primaryHri, rootDir, + TEST_UTIL.getConfiguration(), tableDescriptor); // load some data putData(primaryRegion, 0, 1000, cq, families); @@ -5764,8 +5681,8 @@ public void testCompactionFromPrimary() throws IOException { // move the file of the primary region to the archive, simulating a compaction Collection storeFiles = primaryRegion.getStore(families[0]).getStorefiles(); primaryRegion.getRegionFileSystem().removeStoreFiles(Bytes.toString(families[0]), storeFiles); - Collection storeFileInfos = primaryRegion.getRegionFileSystem() - .getStoreFiles(Bytes.toString(families[0])); + Collection storeFileInfos = + primaryRegion.getRegionFileSystem().getStoreFiles(Bytes.toString(families[0])); Assert.assertTrue(storeFileInfos == null || storeFileInfos.isEmpty()); verifyData(secondaryRegion, 0, 1000, cq, families); @@ -5779,18 +5696,18 @@ public void testCompactionFromPrimary() throws IOException { } } - private void putData(int startRow, int numRows, byte[] qf, byte[]... families) throws - IOException { + private void putData(int startRow, int numRows, byte[] qf, byte[]... families) + throws IOException { putData(this.region, startRow, numRows, qf, families); } - private void putData(HRegion region, - int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { + private void putData(HRegion region, int startRow, int numRows, byte[] qf, byte[]... families) + throws IOException { putData(region, Durability.SKIP_WAL, startRow, numRows, qf, families); } - static void putData(HRegion region, Durability durability, - int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { + static void putData(HRegion region, Durability durability, int startRow, int numRows, byte[] qf, + byte[]... families) throws IOException { for (int i = startRow; i < startRow + numRows; i++) { Put put = new Put(Bytes.toBytes("" + i)); put.setDurability(durability); @@ -5834,13 +5751,9 @@ static void assertGet(final HRegion r, final byte[] family, final byte[] k) thro /* * Assert first value in the passed region is firstValue. - * * @param r - * * @param fs - * * @param firstValue - * * @throws IOException */ protected void assertScan(final HRegion r, final byte[] fs, final byte[] firstValue) @@ -5920,8 +5833,8 @@ protected Configuration initSplit() { } /** - * @return A region on which you must call - * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} when done. + * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} + * when done. */ protected HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, byte[]... families) throws IOException { @@ -5929,8 +5842,8 @@ protected HRegion initHRegion(TableName tableName, String callingMethod, Configu } /** - * @return A region on which you must call - * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} when done. + * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} + * when done. */ protected HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families) throws IOException { @@ -5938,49 +5851,47 @@ protected HRegion initHRegion(TableName tableName, String callingMethod, Configu } protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, - String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families) - throws IOException { + String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families) + throws IOException { Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log"); RegionInfo hri = - RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(stopKey).build(); + RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(stopKey).build(); final WAL wal = HBaseTestingUtil.createWal(conf, logDir, hri); return initHRegion(tableName, startKey, stopKey, conf, isReadOnly, Durability.SYNC_WAL, wal, families); } /** - * @return A region on which you must call - * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} when done. + * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} + * when done. */ public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, - Configuration conf, boolean isReadOnly, Durability durability, WAL wal, - byte[]... families) throws IOException { - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, - conf, isReadOnly, durability, wal, families); + Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) + throws IOException { + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, conf, isReadOnly, durability, + wal, families); } /** - * Assert that the passed in Cell has expected contents for the specified row, - * column & timestamp. + * Assert that the passed in Cell has expected contents for the specified row, column & timestamp. */ private void checkOneCell(Cell kv, byte[] cf, int rowIdx, int colIdx, long ts) { String ctx = "rowIdx=" + rowIdx + "; colIdx=" + colIdx + "; ts=" + ts; assertEquals("Row mismatch which checking: " + ctx, "row:" + rowIdx, - Bytes.toString(CellUtil.cloneRow(kv))); + Bytes.toString(CellUtil.cloneRow(kv))); assertEquals("ColumnFamily mismatch while checking: " + ctx, Bytes.toString(cf), - Bytes.toString(CellUtil.cloneFamily(kv))); + Bytes.toString(CellUtil.cloneFamily(kv))); assertEquals("Column qualifier mismatch while checking: " + ctx, "column:" + colIdx, - Bytes.toString(CellUtil.cloneQualifier(kv))); + Bytes.toString(CellUtil.cloneQualifier(kv))); assertEquals("Timestamp mismatch while checking: " + ctx, ts, kv.getTimestamp()); assertEquals("Value mismatch while checking: " + ctx, "value-version-" + ts, - Bytes.toString(CellUtil.cloneValue(kv))); + Bytes.toString(CellUtil.cloneValue(kv))); } @Test - public void testReverseScanner_FromMemStore_SingleCF_Normal() - throws IOException { + public void testReverseScanner_FromMemStore_SingleCF_Normal() throws IOException { byte[] rowC = Bytes.toBytes("rowC"); byte[] rowA = Bytes.toBytes("rowA"); byte[] rowB = Bytes.toBytes("rowB"); @@ -5990,8 +5901,7 @@ public void testReverseScanner_FromMemStore_SingleCF_Normal() long ts = 1; this.region = initHRegion(tableName, method, families); KeyValue kv1 = new KeyValue(rowC, cf, col, ts, KeyValue.Type.Put, null); - KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, - null); + KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, null); KeyValue kv2 = new KeyValue(rowA, cf, col, ts, KeyValue.Type.Put, null); KeyValue kv3 = new KeyValue(rowB, cf, col, ts, KeyValue.Type.Put, null); Put put = null; @@ -6013,27 +5923,26 @@ public void testReverseScanner_FromMemStore_SingleCF_Normal() List currRow = new ArrayList<>(); boolean hasNext = scanner.next(currRow); assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowA, 0, rowA.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowA, 0, rowA.length)); assertFalse(hasNext); scanner.close(); } @Test - public void testReverseScanner_FromMemStore_SingleCF_LargerKey() - throws IOException { + public void testReverseScanner_FromMemStore_SingleCF_LargerKey() throws IOException { byte[] rowC = Bytes.toBytes("rowC"); byte[] rowA = Bytes.toBytes("rowA"); byte[] rowB = Bytes.toBytes("rowB"); @@ -6044,8 +5953,7 @@ public void testReverseScanner_FromMemStore_SingleCF_LargerKey() long ts = 1; this.region = initHRegion(tableName, method, families); KeyValue kv1 = new KeyValue(rowC, cf, col, ts, KeyValue.Type.Put, null); - KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, - null); + KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, null); KeyValue kv2 = new KeyValue(rowA, cf, col, ts, KeyValue.Type.Put, null); KeyValue kv3 = new KeyValue(rowB, cf, col, ts, KeyValue.Type.Put, null); Put put = null; @@ -6067,27 +5975,26 @@ public void testReverseScanner_FromMemStore_SingleCF_LargerKey() InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(currRow); assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowA, 0, rowA.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowA, 0, rowA.length)); assertFalse(hasNext); scanner.close(); } @Test - public void testReverseScanner_FromMemStore_SingleCF_FullScan() - throws IOException { + public void testReverseScanner_FromMemStore_SingleCF_FullScan() throws IOException { byte[] rowC = Bytes.toBytes("rowC"); byte[] rowA = Bytes.toBytes("rowA"); byte[] rowB = Bytes.toBytes("rowB"); @@ -6097,8 +6004,7 @@ public void testReverseScanner_FromMemStore_SingleCF_FullScan() long ts = 1; this.region = initHRegion(tableName, method, families); KeyValue kv1 = new KeyValue(rowC, cf, col, ts, KeyValue.Type.Put, null); - KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, - null); + KeyValue kv11 = new KeyValue(rowC, cf, col, ts + 1, KeyValue.Type.Put, null); KeyValue kv2 = new KeyValue(rowA, cf, col, ts, KeyValue.Type.Put, null); KeyValue kv3 = new KeyValue(rowB, cf, col, ts, KeyValue.Type.Put, null); Put put = null; @@ -6118,20 +6024,20 @@ public void testReverseScanner_FromMemStore_SingleCF_FullScan() InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowA, 0, rowA.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowA, 0, rowA.length)); assertFalse(hasNext); scanner.close(); } @@ -6183,20 +6089,20 @@ public void testReverseScanner_moreRowsMayExistAfter() throws IOException { InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); assertFalse(hasNext); scanner.close(); @@ -6207,8 +6113,8 @@ public void testReverseScanner_moreRowsMayExistAfter() throws IOException { scanner = region.getScanner(scan); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); scanner.close(); } @@ -6261,20 +6167,20 @@ public void testReverseScanner_smaller_blocksize() throws IOException { InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowC, 0, rowC.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowC, 0, rowC.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowB, 0, rowB.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowB, 0, rowB.length)); assertFalse(hasNext); scanner.close(); @@ -6285,14 +6191,13 @@ public void testReverseScanner_smaller_blocksize() throws IOException { scanner = region.getScanner(scan); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), rowD, 0, rowD.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), rowD, 0, rowD.length)); scanner.close(); } @Test - public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1() - throws IOException { + public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1() throws IOException { byte[] row0 = Bytes.toBytes("row0"); // 1 kv byte[] row1 = Bytes.toBytes("row1"); // 2 kv byte[] row2 = Bytes.toBytes("row2"); // 4 kv @@ -6310,38 +6215,22 @@ public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1() conf.setInt("hbase.hstore.compactionThreshold", 10000); this.region = initHRegion(tableName, method, conf, families); // kv naming style: kv(row number) totalKvCountInThisRow seq no - KeyValue kv0_1_1 = new KeyValue(row0, cf1, col, ts, KeyValue.Type.Put, - null); - KeyValue kv1_2_1 = new KeyValue(row1, cf2, col, ts, KeyValue.Type.Put, - null); - KeyValue kv1_2_2 = new KeyValue(row1, cf1, col, ts + 1, - KeyValue.Type.Put, null); - KeyValue kv2_4_1 = new KeyValue(row2, cf2, col, ts, KeyValue.Type.Put, - null); - KeyValue kv2_4_2 = new KeyValue(row2, cf1, col, ts, KeyValue.Type.Put, - null); - KeyValue kv2_4_3 = new KeyValue(row2, cf3, col, ts, KeyValue.Type.Put, - null); - KeyValue kv2_4_4 = new KeyValue(row2, cf1, col, ts + 4, - KeyValue.Type.Put, null); - KeyValue kv3_2_1 = new KeyValue(row3, cf2, col, ts, KeyValue.Type.Put, - null); - KeyValue kv3_2_2 = new KeyValue(row3, cf1, col, ts + 4, - KeyValue.Type.Put, null); - KeyValue kv4_5_1 = new KeyValue(row4, cf1, col, ts, KeyValue.Type.Put, - null); - KeyValue kv4_5_2 = new KeyValue(row4, cf3, col, ts, KeyValue.Type.Put, - null); - KeyValue kv4_5_3 = new KeyValue(row4, cf3, col, ts + 5, - KeyValue.Type.Put, null); - KeyValue kv4_5_4 = new KeyValue(row4, cf2, col, ts, KeyValue.Type.Put, - null); - KeyValue kv4_5_5 = new KeyValue(row4, cf1, col, ts + 3, - KeyValue.Type.Put, null); - KeyValue kv5_2_1 = new KeyValue(row5, cf2, col, ts, KeyValue.Type.Put, - null); - KeyValue kv5_2_2 = new KeyValue(row5, cf3, col, ts, KeyValue.Type.Put, - null); + KeyValue kv0_1_1 = new KeyValue(row0, cf1, col, ts, KeyValue.Type.Put, null); + KeyValue kv1_2_1 = new KeyValue(row1, cf2, col, ts, KeyValue.Type.Put, null); + KeyValue kv1_2_2 = new KeyValue(row1, cf1, col, ts + 1, KeyValue.Type.Put, null); + KeyValue kv2_4_1 = new KeyValue(row2, cf2, col, ts, KeyValue.Type.Put, null); + KeyValue kv2_4_2 = new KeyValue(row2, cf1, col, ts, KeyValue.Type.Put, null); + KeyValue kv2_4_3 = new KeyValue(row2, cf3, col, ts, KeyValue.Type.Put, null); + KeyValue kv2_4_4 = new KeyValue(row2, cf1, col, ts + 4, KeyValue.Type.Put, null); + KeyValue kv3_2_1 = new KeyValue(row3, cf2, col, ts, KeyValue.Type.Put, null); + KeyValue kv3_2_2 = new KeyValue(row3, cf1, col, ts + 4, KeyValue.Type.Put, null); + KeyValue kv4_5_1 = new KeyValue(row4, cf1, col, ts, KeyValue.Type.Put, null); + KeyValue kv4_5_2 = new KeyValue(row4, cf3, col, ts, KeyValue.Type.Put, null); + KeyValue kv4_5_3 = new KeyValue(row4, cf3, col, ts + 5, KeyValue.Type.Put, null); + KeyValue kv4_5_4 = new KeyValue(row4, cf2, col, ts, KeyValue.Type.Put, null); + KeyValue kv4_5_5 = new KeyValue(row4, cf1, col, ts + 3, KeyValue.Type.Put, null); + KeyValue kv5_2_1 = new KeyValue(row5, cf2, col, ts, KeyValue.Type.Put, null); + KeyValue kv5_2_2 = new KeyValue(row5, cf3, col, ts, KeyValue.Type.Put, null); // hfiles(cf1/cf2) :"row1"(1 kv) / "row2"(1 kv) / "row4"(2 kv) Put put = null; put = new Put(row1); @@ -6404,58 +6293,56 @@ public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1() // "row4" takes 2 next() calls since batch=3 hasNext = scanner.next(currRow); assertEquals(3, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row4, 0, row4.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row4, 0, row4.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(2, currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), - currRow.get(0).getRowLength(), row4, 0, - row4.length)); + currRow.get(0).getRowLength(), row4, 0, row4.length)); assertTrue(hasNext); // 2. scan out "row3" (2 kv) currRow.clear(); hasNext = scanner.next(currRow); assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row3, 0, row3.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row3, 0, row3.length)); assertTrue(hasNext); // 3. scan out "row2" (4 kvs) // "row2" takes 2 next() calls since batch=3 currRow.clear(); hasNext = scanner.next(currRow); assertEquals(3, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row2, 0, row2.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row2, 0, row2.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row2, 0, row2.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row2, 0, row2.length)); assertTrue(hasNext); // 4. scan out "row1" (2 kv) currRow.clear(); hasNext = scanner.next(currRow); assertEquals(2, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row1, 0, row1.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row1, 0, row1.length)); assertTrue(hasNext); // 5. scan out "row0" (1 kv) currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row0, 0, row0.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row0, 0, row0.length)); assertFalse(hasNext); scanner.close(); } @Test - public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2() - throws IOException { + public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2() throws IOException { byte[] row1 = Bytes.toBytes("row1"); byte[] row2 = Bytes.toBytes("row2"); byte[] row3 = Bytes.toBytes("row3"); @@ -6502,26 +6389,26 @@ public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2() List currRow = new ArrayList<>(); boolean hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row4, 0, row4.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row4, 0, row4.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row3, 0, row3.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row3, 0, row3.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row2, 0, row2.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row2, 0, row2.length)); assertTrue(hasNext); currRow.clear(); hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); - assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow - .get(0).getRowLength(), row1, 0, row1.length)); + assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), + currRow.get(0).getRowLength(), row1, 0, row1.length)); assertFalse(hasNext); } @@ -6531,7 +6418,7 @@ public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2() @Test public void testReverseScanner_StackOverflow() throws IOException { byte[] cf1 = Bytes.toBytes("CF1"); - byte[][] families = {cf1}; + byte[][] families = { cf1 }; byte[] col = Bytes.toBytes("C"); Configuration conf = new Configuration(CONF); this.region = initHRegion(tableName, method, conf, families); @@ -6551,7 +6438,7 @@ public void testReverseScanner_StackOverflow() throws IOException { // create one storefile contains many rows will be skipped // to check StoreFileScanner.seekToPreviousRow for (int i = 10000; i < 20000; i++) { - Put p = new Put(Bytes.toBytes(""+i)); + Put p = new Put(Bytes.toBytes("" + i)); p.addColumn(cf1, col, Bytes.toBytes("" + i)); region.put(p); } @@ -6560,7 +6447,7 @@ public void testReverseScanner_StackOverflow() throws IOException { // create one memstore contains many rows will be skipped // to check MemStoreScanner.seekToPreviousRow for (int i = 10000; i < 20000; i++) { - Put p = new Put(Bytes.toBytes(""+i)); + Put p = new Put(Bytes.toBytes("" + i)); p.addColumn(cf1, col, Bytes.toBytes("" + i)); region.put(p); } @@ -6613,11 +6500,9 @@ public void testReverseScanShouldNotScanMemstoreIfReadPtLesser() throws Exceptio // With HBASE-15871, after the scanner is reset the memstore scanner should not be // added here if (!assertDone) { - StoreScanner current = - (StoreScanner) (scanner.storeHeap).getCurrentForTesting(); + StoreScanner current = (StoreScanner) (scanner.storeHeap).getCurrentForTesting(); List scanners = current.getAllScannersForTesting(); - assertEquals("There should be only one scanner the store file scanner", 1, - scanners.size()); + assertEquals("There should be only one scanner the store file scanner", 1, scanners.size()); assertDone = true; } } while (hasNext); @@ -6696,14 +6581,13 @@ public void testOpenRegionWrittenToWAL() throws Exception { final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2)).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); // open the region w/o rss and wal and flush some files - region = - HBaseTestingUtil.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL - .getConfiguration(), htd); + region = HBaseTestingUtil.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), + TEST_UTIL.getConfiguration(), htd); assertNotNull(region); // create a file in fam1 for the region before opening in OpenRegionHandler @@ -6717,8 +6601,8 @@ public void testOpenRegionWrittenToWAL() throws Exception { WAL wal = mockWAL(); when(rss.getWAL(any(RegionInfo.class))).thenReturn(wal); - region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), - TEST_UTIL.getConfiguration(), rss, null); + region = + HRegion.openHRegion(hri, htd, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null); verify(wal, times(1)).appendMarker(any(RegionInfo.class), any(WALKeyImpl.class), editCaptor.capture()); @@ -6734,8 +6618,8 @@ public void testOpenRegionWrittenToWAL() throws Exception { assertEquals(RegionEventDescriptor.EventType.REGION_OPEN, desc.getEventType()); assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getTableName().toBytes())); - assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), - hri.getEncodedNameAsBytes())); + assertTrue( + Bytes.equals(desc.getEncodedRegionName().toByteArray(), hri.getEncodedNameAsBytes())); assertTrue(desc.getLogSequenceNumber() > 0); assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer())); assertEquals(2, desc.getStoresCount()); @@ -6755,10 +6639,11 @@ public void testOpenRegionWrittenToWAL() throws Exception { // Helper for test testOpenRegionWrittenToWALForLogReplay static class HRegionWithSeqId extends HRegion { public HRegionWithSeqId(final Path tableDir, final WAL wal, final FileSystem fs, - final Configuration confParam, final RegionInfo regionInfo, - final TableDescriptor htd, final RegionServerServices rsServices) { + final Configuration confParam, final RegionInfo regionInfo, final TableDescriptor htd, + final RegionServerServices rsServices) { super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices); } + @Override protected long getNextSequenceId(WAL wal) throws IOException { return 42; @@ -6769,11 +6654,11 @@ protected long getNextSequenceId(WAL wal) throws IOException { public void testFlushedFileWithNoTags() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); Path path = TEST_UTIL.getDataTestDir(getClass().getSimpleName()); - region = HBaseTestingUtil.createRegionAndWAL(info, path, - TEST_UTIL.getConfiguration(), tableDescriptor); + region = HBaseTestingUtil.createRegionAndWAL(info, path, TEST_UTIL.getConfiguration(), + tableDescriptor); Put put = new Put(Bytes.toBytes("a-b-0-0")); put.addColumn(fam1, qual1, Bytes.toBytes("c1-value")); region.put(put); @@ -6781,8 +6666,8 @@ public void testFlushedFileWithNoTags() throws Exception { HStore store = region.getStore(fam1); Collection storefiles = store.getStorefiles(); for (HStoreFile sf : storefiles) { - assertFalse("Tags should not be present " - ,sf.getReader().getHFileReader().getFileContext().isIncludesTags()); + assertFalse("Tags should not be present ", + sf.getReader().getHFileReader().getFileContext().isIncludesTags()); } } @@ -6795,17 +6680,17 @@ public void testFlushedFileWithNoTags() throws Exception { private WAL mockWAL() throws IOException { WAL wal = mock(WAL.class); when(wal.appendData(any(RegionInfo.class), any(WALKeyImpl.class), any(WALEdit.class))) - .thenAnswer(new Answer() { - @Override - public Long answer(InvocationOnMock invocation) throws Throwable { - WALKeyImpl key = invocation.getArgument(1); - MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(); - key.setWriteEntry(we); - return 1L; - } - }); - when(wal.appendMarker(any(RegionInfo.class), any(WALKeyImpl.class), any(WALEdit.class))). - thenAnswer(new Answer() { + .thenAnswer(new Answer() { + @Override + public Long answer(InvocationOnMock invocation) throws Throwable { + WALKeyImpl key = invocation.getArgument(1); + MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(); + key.setWriteEntry(we); + return 1L; + } + }); + when(wal.appendMarker(any(RegionInfo.class), any(WALKeyImpl.class), any(WALEdit.class))) + .thenAnswer(new Answer() { @Override public Long answer(InvocationOnMock invocation) throws Throwable { WALKeyImpl key = invocation.getArgument(1); @@ -6826,8 +6711,8 @@ public void testCloseRegionWrittenToWAL() throws Exception { final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2)).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); ArgumentCaptor editCaptor = ArgumentCaptor.forClass(WALEdit.class); @@ -6836,18 +6721,18 @@ public void testCloseRegionWrittenToWAL() throws Exception { WAL wal = mockWAL(); when(rss.getWAL(any(RegionInfo.class))).thenReturn(wal); - // create and then open a region first so that it can be closed later - region = HRegion.createHRegion(hri, rootDir, TEST_UTIL.getConfiguration(), htd, rss.getWAL(hri)); - region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), - TEST_UTIL.getConfiguration(), rss, null); + region = + HRegion.createHRegion(hri, rootDir, TEST_UTIL.getConfiguration(), htd, rss.getWAL(hri)); + region = + HRegion.openHRegion(hri, htd, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null); // close the region region.close(false); // 2 times, one for region open, the other close region - verify(wal, times(2)).appendMarker(any(RegionInfo.class), - (WALKeyImpl) any(WALKeyImpl.class), editCaptor.capture()); + verify(wal, times(2)).appendMarker(any(RegionInfo.class), (WALKeyImpl) any(WALKeyImpl.class), + editCaptor.capture()); WALEdit edit = editCaptor.getAllValues().get(1); assertNotNull(edit); @@ -6860,8 +6745,8 @@ public void testCloseRegionWrittenToWAL() throws Exception { assertEquals(RegionEventDescriptor.EventType.REGION_CLOSE, desc.getEventType()); assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getTableName().toBytes())); - assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), - hri.getEncodedNameAsBytes())); + assertTrue( + Bytes.equals(desc.getEncodedRegionName().toByteArray(), hri.getEncodedNameAsBytes())); assertTrue(desc.getLogSequenceNumber() > 0); assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer())); assertEquals(2, desc.getStoresCount()); @@ -6883,8 +6768,8 @@ public void testCloseRegionWrittenToWAL() throws Exception { @Test public void testRegionTooBusy() throws IOException { byte[] family = Bytes.toBytes("family"); - long defaultBusyWaitDuration = CONF.getLong("hbase.busy.wait.duration", - HRegion.DEFAULT_BUSY_WAIT_DURATION); + long defaultBusyWaitDuration = + CONF.getLong("hbase.busy.wait.duration", HRegion.DEFAULT_BUSY_WAIT_DURATION); CONF.setLong("hbase.busy.wait.duration", 1000); region = initHRegion(tableName, method, CONF, family); final AtomicBoolean stopped = new AtomicBoolean(true); @@ -6940,8 +6825,8 @@ public void testCellTTLs() throws IOException { final byte[] q4 = Bytes.toBytes("q4"); // 10 seconds - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + TableDescriptor tableDescriptor = TableDescriptorBuilder + .newBuilder(TableName.valueOf(name.getMethodName())) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).setTimeToLive(10).build()) .build(); @@ -6954,17 +6839,17 @@ public void testCellTTLs() throws IOException { assertNotNull(region); long now = EnvironmentEdgeManager.currentTime(); // Add a cell that will expire in 5 seconds via cell TTL - region.put(new Put(row).add(new KeyValue(row, fam1, q1, now, - HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { - // TTL tags specify ts in milliseconds - new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) }))); + region.put(new Put(row).add(new KeyValue(row, fam1, q1, now, HConstants.EMPTY_BYTE_ARRAY, + new ArrayBackedTag[] { + // TTL tags specify ts in milliseconds + new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) }))); // Add a cell that will expire after 10 seconds via family setting region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY)); // Add a cell that will expire in 15 seconds via cell TTL region.put(new Put(row).add(new KeyValue(row, fam1, q3, now + 10000 - 1, - HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { - // TTL tags specify ts in milliseconds - new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) }))); + HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { + // TTL tags specify ts in milliseconds + new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) }))); // Add a cell that will expire in 20 seconds via family setting region.put(new Put(row).addColumn(fam1, q4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY)); @@ -7069,17 +6954,19 @@ public void testTTLsUsingSmallHeartBeatCells() throws IOException { // 10 seconds int ttlSecs = 10; TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(fam1).setTimeToLive(ttlSecs).build()).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(fam1).setTimeToLive(ttlSecs).build()) + .build(); Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS); // using small heart beat cells conf.setLong(StoreScanner.HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK, 2); - region = HBaseTestingUtil - .createRegionAndWAL(RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(), - TEST_UTIL.getDataTestDir(), conf, tableDescriptor); + region = HBaseTestingUtil.createRegionAndWAL( + RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(), + TEST_UTIL.getDataTestDir(), conf, tableDescriptor); assertNotNull(region); long now = EnvironmentEdgeManager.currentTime(); // Add a cell that will expire in 5 seconds via cell TTL @@ -7087,15 +6974,15 @@ public void testTTLsUsingSmallHeartBeatCells() throws IOException { region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY)); region.put(new Put(row).addColumn(fam1, q3, now, HConstants.EMPTY_BYTE_ARRAY)); // Add a cell that will expire after 10 seconds via family setting - region - .put(new Put(row).addColumn(fam1, q4, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY)); - region - .put(new Put(row).addColumn(fam1, q5, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY)); + region.put( + new Put(row).addColumn(fam1, q4, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY)); + region.put( + new Put(row).addColumn(fam1, q5, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY)); region.put(new Put(row).addColumn(fam1, q6, now, HConstants.EMPTY_BYTE_ARRAY)); region.put(new Put(row).addColumn(fam1, q7, now, HConstants.EMPTY_BYTE_ARRAY)); - region - .put(new Put(row).addColumn(fam1, q8, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY)); + region.put( + new Put(row).addColumn(fam1, q8, now + ttlSecs * 1000 + 1, HConstants.EMPTY_BYTE_ARRAY)); // Flush so we are sure store scanning gets this right region.flush(true); @@ -7108,7 +6995,7 @@ public void testTTLsUsingSmallHeartBeatCells() throws IOException { checkScan(3); } - private void checkScan(int expectCellSize) throws IOException{ + private void checkScan(int expectCellSize) throws IOException { Scan s = new Scan().withStartRow(row); ScannerContext.Builder contextBuilder = ScannerContext.newBuilder(true); ScannerContext scannerContext = contextBuilder.build(); @@ -7167,12 +7054,12 @@ public void testAppendTimestampsAreMonotonic() throws IOException { c = result.getColumnLatestCell(fam1, qual1); assertEquals(11L, c.getTimestamp()); - byte[] expected = new byte[qual1.length*2]; + byte[] expected = new byte[qual1.length * 2]; System.arraycopy(qual1, 0, expected, 0, qual1.length); System.arraycopy(qual1, 0, expected, qual1.length, qual1.length); - assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), - expected, 0, expected.length)); + assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), expected, 0, + expected.length)); } @Test @@ -7201,8 +7088,8 @@ public void testCheckAndMutateTimestampsAreMonotonic() throws IOException { c = result.getColumnLatestCell(fam1, qual1); assertEquals(10L, c.getTimestamp()); - assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), - qual2, 0, qual2.length)); + assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), qual2, 0, + qual2.length)); } @Test @@ -7217,33 +7104,20 @@ public void testBatchMutateWithWrongRegionException() throws Exception { Mutation[] mutations = new Mutation[] { new Put(a) - .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(a) - .setFamily(fam1) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Cell.Type.Put) - .build()), + .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(a).setFamily(fam1) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(Cell.Type.Put).build()), // this is outside the region boundary - new Put(c).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(c) - .setFamily(fam1) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Type.Put) - .build()), - new Put(b).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(b) - .setFamily(fam1) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Cell.Type.Put) - .build()) - }; + new Put(c).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(c) + .setFamily(fam1).setTimestamp(HConstants.LATEST_TIMESTAMP).setType(Type.Put).build()), + new Put(b) + .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(b).setFamily(fam1) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(Cell.Type.Put).build()) }; OperationStatus[] status = region.batchMutate(mutations); assertEquals(OperationStatusCode.SUCCESS, status[0].getOperationStatusCode()); assertEquals(OperationStatusCode.SANITY_CHECK_FAILURE, status[1].getOperationStatusCode()); assertEquals(OperationStatusCode.SUCCESS, status[2].getOperationStatusCode()); - // test with a row lock held for a long time final CountDownLatch obtainedRowLock = new CountDownLatch(1); ExecutorService exec = Executors.newFixedThreadPool(2); @@ -7266,19 +7140,12 @@ public Void call() throws Exception { @Override public Void call() throws Exception { Mutation[] mutations = new Mutation[] { - new Put(a).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(a) - .setFamily(fam1) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Cell.Type.Put) - .build()), - new Put(b).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(b) - .setFamily(fam1) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Cell.Type.Put) - .build()), - }; + new Put(a).add( + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(a).setFamily(fam1) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(Cell.Type.Put).build()), + new Put(b).add( + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(b).setFamily(fam1) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(Cell.Type.Put).build()), }; // this will wait for the row lock, and it will eventually succeed OperationStatus[] status = region.batchMutate(mutations); @@ -7303,33 +7170,24 @@ public void testBatchMutateWithZeroRowLockWait() throws Exception { Configuration conf = new Configuration(CONF); conf.setInt("hbase.rowlock.wait.duration", 0); final RegionInfo hri = - RegionInfoBuilder.newBuilder(tableName).setStartKey(a).setEndKey(c).build(); + RegionInfoBuilder.newBuilder(tableName).setStartKey(a).setEndKey(c).build(); final TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); region = HRegion.createHRegion(hri, TEST_UTIL.getDataTestDir(), conf, htd, HBaseTestingUtil.createWal(conf, TEST_UTIL.getDataTestDirOnTestFS(method + ".log"), hri)); Mutation[] mutations = new Mutation[] { new Put(a) - .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(a) - .setFamily(fam1) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Cell.Type.Put) - .build()), - new Put(b).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(b) - .setFamily(fam1) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Cell.Type.Put) - .build()) - }; + .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(a).setFamily(fam1) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(Cell.Type.Put).build()), + new Put(b) + .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(b).setFamily(fam1) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(Cell.Type.Put).build()) }; OperationStatus[] status = region.batchMutate(mutations); assertEquals(OperationStatusCode.SUCCESS, status[0].getOperationStatusCode()); assertEquals(OperationStatusCode.SUCCESS, status[1].getOperationStatusCode()); - // test with a row lock held for a long time final CountDownLatch obtainedRowLock = new CountDownLatch(1); ExecutorService exec = Executors.newFixedThreadPool(2); @@ -7352,19 +7210,12 @@ public Void call() throws Exception { @Override public Void call() throws Exception { Mutation[] mutations = new Mutation[] { - new Put(a).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(a) - .setFamily(fam1) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Cell.Type.Put) - .build()), - new Put(b).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(b) - .setFamily(fam1) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(Cell.Type.Put) - .build()), - }; + new Put(a).add( + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(a).setFamily(fam1) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(Cell.Type.Put).build()), + new Put(b).add( + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(b).setFamily(fam1) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(Cell.Type.Put).build()), }; // when handling row b we are going to spin on the failure to get the row lock // until the lock above is released, but we will still succeed so long as that // takes less time then the test time out. @@ -7403,21 +7254,20 @@ public void testCheckAndRowMutateTimestampsAreMonotonic() throws IOException { RowMutations rm = new RowMutations(row); rm.add(p); assertTrue(region.checkAndRowMutate(row, fam1, qual1, CompareOperator.EQUAL, - new BinaryComparator(qual1), rm)); + new BinaryComparator(qual1), rm)); result = region.get(new Get(row)); c = result.getColumnLatestCell(fam1, qual1); assertEquals(10L, c.getTimestamp()); - LOG.info("c value " + - Bytes.toStringBinary(c.getValueArray(), c.getValueOffset(), c.getValueLength())); + LOG.info( + "c value " + Bytes.toStringBinary(c.getValueArray(), c.getValueOffset(), c.getValueLength())); - assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), - qual2, 0, qual2.length)); + assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), qual2, 0, + qual2.length)); } - HRegion initHRegion(TableName tableName, String callingMethod, - byte[]... families) throws IOException { - return initHRegion(tableName, callingMethod, HBaseConfiguration.create(), - families); + HRegion initHRegion(TableName tableName, String callingMethod, byte[]... families) + throws IOException { + return initHRegion(tableName, callingMethod, HBaseConfiguration.create(), families); } /** @@ -7501,18 +7351,16 @@ public void testMutateRow() throws Exception { region = initHRegion(tableName, method, CONF, fam1); // Initial values - region.batchMutate(new Mutation[] { - new Put(row).addColumn(fam1, q2, Bytes.toBytes("toBeDeleted")), - new Put(row).addColumn(fam1, q3, Bytes.toBytes(5L)), - new Put(row).addColumn(fam1, q4, Bytes.toBytes("a")), - }); + region.batchMutate( + new Mutation[] { new Put(row).addColumn(fam1, q2, Bytes.toBytes("toBeDeleted")), + new Put(row).addColumn(fam1, q3, Bytes.toBytes(5L)), + new Put(row).addColumn(fam1, q4, Bytes.toBytes("a")), }); // Do mutateRow - Result result = region.mutateRow(new RowMutations(row).add(Arrays.asList( - new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)), - new Delete(row).addColumns(fam1, q2), - new Increment(row).addColumn(fam1, q3, 1), - new Append(row).addColumn(fam1, q4, Bytes.toBytes("b"))))); + Result result = region.mutateRow( + new RowMutations(row).add(Arrays.asList(new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)), + new Delete(row).addColumns(fam1, q2), new Increment(row).addColumn(fam1, q3, 1), + new Append(row).addColumn(fam1, q4, Bytes.toBytes("b"))))); assertNotNull(result); assertEquals(6L, Bytes.toLong(result.getValue(fam1, q3))); @@ -7545,12 +7393,9 @@ public void testMutateRowInParallel() throws Exception { region = initHRegion(tableName, method, CONF, fam1); // Initial values - region.batchMutate(new Mutation[] { - new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)) + region.batchMutate(new Mutation[] { new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)) .addColumn(fam1, q2, deleteTimestamp.getAndIncrement(), Bytes.toBytes(v2)) - .addColumn(fam1, q3, Bytes.toBytes(1L)) - .addColumn(fam1, q4, Bytes.toBytes("a")) - }); + .addColumn(fam1, q3, Bytes.toBytes(1L)).addColumn(fam1, q4, Bytes.toBytes("a")) }); final AtomicReference assertionError = new AtomicReference<>(); @@ -7564,11 +7409,11 @@ public void testMutateRowInParallel() throws Exception { } // Execute the mutations. This should be done atomically - region.mutateRow(new RowMutations(row).add(Arrays.asList( - new Put(row).addColumn(fam1, q1, Bytes.toBytes(v2)), - new Delete(row).addColumns(fam1, q2, deleteTimestamp.getAndIncrement()), - new Increment(row).addColumn(fam1, q3, 1L), - new Append(row).addColumn(fam1, q4, Bytes.toBytes("b"))))); + region.mutateRow(new RowMutations(row) + .add(Arrays.asList(new Put(row).addColumn(fam1, q1, Bytes.toBytes(v2)), + new Delete(row).addColumns(fam1, q2, deleteTimestamp.getAndIncrement()), + new Increment(row).addColumn(fam1, q3, 1L), + new Append(row).addColumn(fam1, q4, Bytes.toBytes("b"))))); // We need to ensure the timestamps of the Increment/Append operations are more than the // previous ones @@ -7577,12 +7422,10 @@ public void testMutateRowInParallel() throws Exception { long tsAppend = result.getColumnLatestCell(fam1, q4).getTimestamp(); // Put the initial values - region.batchMutate(new Mutation[] { - new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)) + region.batchMutate(new Mutation[] { new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)) .addColumn(fam1, q2, deleteTimestamp.getAndIncrement(), Bytes.toBytes(v2)) .addColumn(fam1, q3, tsIncrement + 1, Bytes.toBytes(1L)) - .addColumn(fam1, q4, tsAppend + 1, Bytes.toBytes("a")) - }); + .addColumn(fam1, q4, tsAppend + 1, Bytes.toBytes("a")) }); } } catch (Exception e) { assertionError.set(new AssertionError(e)); @@ -7610,8 +7453,8 @@ public void testMutateRowInParallel() throws Exception { assertEquals(2L, Bytes.toLong(result.getValue(fam1, q3))); assertEquals("ab", Bytes.toString(result.getValue(fam1, q4))); } else { - fail("the qualifier " + Bytes.toString(q1) + " should be " + v1 + " or " + v2 + - ", but " + q1Value); + fail("the qualifier " + Bytes.toString(q1) + " should be " + v1 + " or " + v2 + + ", but " + q1Value); } } } catch (Exception e) { @@ -7657,8 +7500,8 @@ public void testBulkLoadReplicationEnabled() throws IOException { final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); region = HRegion.openHRegion(hri, tableDescriptor, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null); @@ -7667,26 +7510,25 @@ public void testBulkLoadReplicationEnabled() throws IOException { String plugins = region.conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, ""); String replicationCoprocessorClass = ReplicationObserver.class.getCanonicalName(); assertTrue(plugins.contains(replicationCoprocessorClass)); - assertTrue(region.getCoprocessorHost(). - getCoprocessors().contains(ReplicationObserver.class.getSimpleName())); + assertTrue(region.getCoprocessorHost().getCoprocessors() + .contains(ReplicationObserver.class.getSimpleName())); } /** - * The same as HRegion class, the only difference is that instantiateHStore will - * create a different HStore - HStoreForTesting. [HBASE-8518] + * The same as HRegion class, the only difference is that instantiateHStore will create a + * different HStore - HStoreForTesting. [HBASE-8518] */ public static class HRegionForTesting extends HRegion { public HRegionForTesting(final Path tableDir, final WAL wal, final FileSystem fs, - final Configuration confParam, final RegionInfo regionInfo, - final TableDescriptor htd, final RegionServerServices rsServices) { - this(new HRegionFileSystem(confParam, fs, tableDir, regionInfo), - wal, confParam, htd, rsServices); + final Configuration confParam, final RegionInfo regionInfo, final TableDescriptor htd, + final RegionServerServices rsServices) { + this(new HRegionFileSystem(confParam, fs, tableDir, regionInfo), wal, confParam, htd, + rsServices); } - public HRegionForTesting(HRegionFileSystem fs, WAL wal, - Configuration confParam, TableDescriptor htd, - RegionServerServices rsServices) { + public HRegionForTesting(HRegionFileSystem fs, WAL wal, Configuration confParam, + TableDescriptor htd, RegionServerServices rsServices) { super(fs, wal, confParam, htd, rsServices); } @@ -7699,9 +7541,9 @@ protected HStore instantiateHStore(final ColumnFamilyDescriptor family, boolean throws IOException { if (family.isMobEnabled()) { if (HFile.getFormatVersion(this.conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) { - throw new IOException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS + - " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY + - " accordingly."); + throw new IOException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS + + " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY + + " accordingly."); } return new HMobStore(this, family, this.conf, warmup); } @@ -7710,17 +7552,16 @@ protected HStore instantiateHStore(final ColumnFamilyDescriptor family, boolean } /** - * HStoreForTesting is merely the same as HStore, the difference is in the doCompaction method - * of HStoreForTesting there is a checkpoint "hbase.hstore.compaction.complete" which - * doesn't let hstore compaction complete. In the former edition, this config is set in - * HStore class inside compact method, though this is just for testing, otherwise it - * doesn't do any help. In HBASE-8518, we try to get rid of all "hbase.hstore.compaction.complete" - * config (except for testing code). + * HStoreForTesting is merely the same as HStore, the difference is in the doCompaction method of + * HStoreForTesting there is a checkpoint "hbase.hstore.compaction.complete" which doesn't let + * hstore compaction complete. In the former edition, this config is set in HStore class inside + * compact method, though this is just for testing, otherwise it doesn't do any help. In + * HBASE-8518, we try to get rid of all "hbase.hstore.compaction.complete" config (except for + * testing code). */ public static class HStoreForTesting extends HStore { - protected HStoreForTesting(final HRegion region, - final ColumnFamilyDescriptor family, + protected HStoreForTesting(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { super(region, family, confParam, warmup); } @@ -7734,7 +7575,7 @@ protected List doCompaction(CompactionRequestImpl cr, LOG.warn("hbase.hstore.compaction.complete is set to false"); List sfs = new ArrayList<>(newFiles.size()); final boolean evictOnClose = - getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; + getCacheConfig() != null ? getCacheConfig().shouldEvictOnClose() : true; for (Path newFile : newFiles) { // Create storefile around what we wrote with a reader on it. HStoreFile sf = storeEngine.createStoreFileAndReader(newFile); @@ -7911,7 +7752,7 @@ public void run() { holder.join(); // Verify the region tried to abort the server - verify(rsServices, atLeast(1)).abort(anyString(),any()); + verify(rsServices, atLeast(1)).abort(anyString(), any()); } @Test @@ -7979,8 +7820,7 @@ public void testRegionOnCoprocessorsChange() throws IOException { // set and verify the system coprocessors for region and user region Configuration newConf = new Configuration(conf); - newConf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - MetaTableMetrics.class.getName()); + newConf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MetaTableMetrics.class.getName()); newConf.set(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, NoOpRegionCoprocessor.class.getName()); // trigger configuration change @@ -7989,9 +7829,9 @@ public void testRegionOnCoprocessorsChange() throws IOException { Set coprocessors = region.getCoprocessorHost().getCoprocessors(); assertTrue(coprocessors.size() == 2); assertTrue(region.getCoprocessorHost().getCoprocessors() - .contains(MetaTableMetrics.class.getSimpleName())); + .contains(MetaTableMetrics.class.getSimpleName())); assertTrue(region.getCoprocessorHost().getCoprocessors() - .contains(NoOpRegionCoprocessor.class.getSimpleName())); + .contains(NoOpRegionCoprocessor.class.getSimpleName())); // remove region coprocessor and keep only user region coprocessor newConf.unset(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY); @@ -8000,7 +7840,7 @@ public void testRegionOnCoprocessorsChange() throws IOException { coprocessors = region.getCoprocessorHost().getCoprocessors(); assertTrue(coprocessors.size() == 1); assertTrue(region.getCoprocessorHost().getCoprocessors() - .contains(NoOpRegionCoprocessor.class.getSimpleName())); + .contains(NoOpRegionCoprocessor.class.getSimpleName())); } @Test @@ -8014,13 +7854,13 @@ public void testRegionOnCoprocessorsWithoutChange() throws IOException { region = initHRegion(tableName, method, conf, families); // region service is null in unit test, we need to load the coprocessor once region.setCoprocessorHost(new RegionCoprocessorHost(region, null, conf)); - RegionCoprocessor regionCoprocessor = region.getCoprocessorHost() - .findCoprocessor(MetaTableMetrics.class.getName()); + RegionCoprocessor regionCoprocessor = + region.getCoprocessorHost().findCoprocessor(MetaTableMetrics.class.getName()); // simulate when other configuration may have changed and onConfigurationChange execute once region.onConfigurationChange(conf); - RegionCoprocessor regionCoprocessorAfterOnConfigurationChange = region.getCoprocessorHost() - .findCoprocessor(MetaTableMetrics.class.getName()); + RegionCoprocessor regionCoprocessorAfterOnConfigurationChange = + region.getCoprocessorHost().findCoprocessor(MetaTableMetrics.class.getName()); assertEquals(regionCoprocessor, regionCoprocessorAfterOnConfigurationChange); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index 80d62b88299e..6d9f809b09a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestHRegionFileSystem { @ClassRule @@ -70,9 +70,8 @@ public class TestHRegionFileSystem { private static final Logger LOG = LoggerFactory.getLogger(TestHRegionFileSystem.class); public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); - private static final byte[][] FAMILIES = { - Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), - Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; + private static final byte[][] FAMILIES = + { Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; private static final TableName TABLE_NAME = TableName.valueOf("TestTable"); @Rule @@ -111,19 +110,17 @@ public void testBlockStoragePolicy() throws Exception { // alter table cf schema to change storage policies // and make sure it could override settings in conf - ColumnFamilyDescriptorBuilder cfdA = - ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]); + ColumnFamilyDescriptorBuilder cfdA = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]); // alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor cfdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD"); admin.modifyColumnFamily(TABLE_NAME, cfdA.build()); - while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(). - getRegionStates().hasRegionsInTransition()) { + while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates() + .hasRegionsInTransition()) { Thread.sleep(200); LOG.debug("Waiting on table to finish schema altering"); } // alter through HColumnDescriptor#setStoragePolicy - ColumnFamilyDescriptorBuilder cfdB = - ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[1]); + ColumnFamilyDescriptorBuilder cfdB = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[1]); cfdB.setStoragePolicy("ALL_SSD"); admin.modifyColumnFamily(TABLE_NAME, cfdB.build()); while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates() @@ -193,7 +190,7 @@ private HRegionFileSystem getHRegionFS(Connection conn, Table table, Configurati List familyDirs = FSUtils.getFamilyDirs(fs, regionDirs.get(0)); assertEquals(2, familyDirs.size()); RegionInfo hri = - conn.getRegionLocator(table.getName()).getAllRegionLocations().get(0).getRegion(); + conn.getRegionLocator(table.getName()).getAllRegionLocations().get(0).getRegion(); HRegionFileSystem regionFs = new HRegionFileSystem(conf, new HFileSystem(fs), tableDir, hri); return regionFs; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java index 6633081a4c8f..64c94184d183 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,11 +52,10 @@ import org.slf4j.LoggerFactory; /** - * Tests that need to spin up a cluster testing an {@link HRegion}. Use - * {@link TestHRegion} if you don't need a cluster, if you can test w/ a - * standalone {@link HRegion}. + * Tests that need to spin up a cluster testing an {@link HRegion}. Use {@link TestHRegion} if you + * don't need a cluster, if you can test w/ a standalone {@link HRegion}. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestHRegionOnCluster { @ClassRule @@ -83,7 +82,7 @@ public void testDataCorrectnessReplayingRecoveredEdits() throws Exception { // Create table TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); hbaseAdmin = master.getConnection().getAdmin(); hbaseAdmin.createTable(tableDescriptor); @@ -154,8 +153,8 @@ public void testDataCorrectnessReplayingRecoveredEdits() throws Exception { } } - private void putDataAndVerify(Table table, String row, byte[] family, - String value, int verifyNum) throws IOException { + private void putDataAndVerify(Table table, String row, byte[] family, String value, int verifyNum) + throws IOException { System.out.println("=========Putting data :" + row); Put put = new Put(Bytes.toBytes(row)); put.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes(value)); @@ -164,8 +163,7 @@ private void putDataAndVerify(Table table, String row, byte[] family, List results = new ArrayList<>(); while (true) { Result r = resultScanner.next(); - if (r == null) - break; + if (r == null) break; results.add(r); } resultScanner.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index 2b4dca8bb1d5..abc04f2859f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -120,15 +120,16 @@ public class TestHRegionReplayEvents { HBaseClassTestRule.forClass(TestHRegionReplayEvents.class); private static final Logger LOG = LoggerFactory.getLogger(TestHRegion.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static HBaseTestingUtil TEST_UTIL; public static Configuration CONF; private String dir; - private byte[][] families = new byte[][] { - Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")}; + private byte[][] families = + new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; // Test names protected byte[] tableName; @@ -175,8 +176,8 @@ public void setUp() throws Exception { htd = builder.build(); long time = EnvironmentEdgeManager.currentTime(); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); primaryHri = RegionInfoBuilder.newBuilder(htd.getTableName()).setRegionId(time).setReplicaId(0).build(); secondaryHri = @@ -190,11 +191,11 @@ public void setUp() throws Exception { when(rss.getServerName()).thenReturn(ServerName.valueOf("foo", 1, 1)); when(rss.getConfiguration()).thenReturn(CONF); when(rss.getRegionServerAccounting()).thenReturn(new RegionServerAccounting(CONF)); - String string = org.apache.hadoop.hbase.executor.EventType.RS_COMPACTED_FILES_DISCHARGER - .toString(); + String string = + org.apache.hadoop.hbase.executor.EventType.RS_COMPACTED_FILES_DISCHARGER.toString(); ExecutorService es = new ExecutorService(string); - es.startExecutorService(es.new ExecutorConfig().setCorePoolSize(1).setExecutorType( - ExecutorType.RS_COMPACTED_FILES_DISCHARGER)); + es.startExecutorService(es.new ExecutorConfig().setCorePoolSize(1) + .setExecutorType(ExecutorType.RS_COMPACTED_FILES_DISCHARGER)); when(rss.getExecutorService()).thenReturn(es); primaryRegion = HRegion.createHRegion(primaryHri, rootDir, CONF, htd, walPrimary); primaryRegion.close(); @@ -249,7 +250,7 @@ public void testRegionReplicaSecondaryCannotFlush() throws IOException { verifyData(secondaryRegion, 0, 1000, cq, families); // flush region - FlushResultImpl flush = (FlushResultImpl)secondaryRegion.flush(true); + FlushResultImpl flush = (FlushResultImpl) secondaryRegion.flush(true); assertEquals(FlushResultImpl.Result.CANNOT_FLUSH, flush.result); verifyData(secondaryRegion, 0, 1000, cq, families); @@ -270,7 +271,7 @@ public void testRegionReplicaSecondaryCannotFlush() throws IOException { public void testOnlyReplayingFlushStartDoesNotHoldUpRegionClose() throws IOException { // load some data to primary and flush int start = 0; - LOG.info("-- Writing some data to primary from " + start + " to " + (start+100)); + LOG.info("-- Writing some data to primary from " + start + " to " + (start + 100)); putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families); LOG.info("-- Flushing primary, creating 3 files for 3 stores"); primaryRegion.flush(true); @@ -284,8 +285,7 @@ public void testOnlyReplayingFlushStartDoesNotHoldUpRegionClose() throws IOExcep if (entry == null) { break; } - FlushDescriptor flushDesc - = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { if (flushDesc.getAction() == FlushAction.START_FLUSH) { LOG.info("-- Replaying flush start in secondary"); @@ -311,18 +311,17 @@ static int replayEdit(HRegion region, WAL.Entry entry) throws IOException { return 0; // handled elsewhere } Put put = new Put(CellUtil.cloneRow(entry.getEdit().getCells().get(0))); - for (Cell cell : entry.getEdit().getCells()) put.add(cell); + for (Cell cell : entry.getEdit().getCells()) + put.add(cell); put.setDurability(Durability.SKIP_WAL); MutationReplay mutation = new MutationReplay(MutationType.PUT, put, 0, 0); - region.batchReplay(new MutationReplay[] {mutation}, - entry.getKey().getSequenceId()); + region.batchReplay(new MutationReplay[] { mutation }, entry.getKey().getSequenceId()); return Integer.parseInt(Bytes.toString(put.getRow())); } WAL.Reader createWALReaderForPrimary() throws FileNotFoundException, IOException { return WALFactory.createReader(TEST_UTIL.getTestFileSystem(), - AbstractFSWALProvider.getCurrentFileName(walPrimary), - TEST_UTIL.getConfiguration()); + AbstractFSWALProvider.getCurrentFileName(walPrimary), TEST_UTIL.getConfiguration()); } @Test @@ -355,8 +354,7 @@ public void testReplayFlushesAndCompactions() throws IOException { // compaction from primary LOG.info("-- Compacting primary, only 1 store"); - primaryRegion.compactStore(Bytes.toBytes("cf1"), - NoLimitThroughputController.INSTANCE); + primaryRegion.compactStore(Bytes.toBytes("cf1"), NoLimitThroughputController.INSTANCE); // now replay the edits and the flush marker reader = createWALReaderForPrimary(); @@ -369,10 +367,9 @@ public void testReplayFlushesAndCompactions() throws IOException { if (entry == null) { break; } - FlushDescriptor flushDesc - = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); - CompactionDescriptor compactionDesc - = WALEdit.getCompaction(entry.getEdit().getCells().get(0)); + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + CompactionDescriptor compactionDesc = + WALEdit.getCompaction(entry.getEdit().getCells().get(0)); if (flushDesc != null) { // first verify that everything is replayed and visible before flush event replay verifyData(secondaryRegion, 0, lastReplayed, cq, families); @@ -416,7 +413,7 @@ public void testReplayFlushesAndCompactions() throws IOException { assertEquals(store.getSize(), store.getStorefilesSize()); } // after replay verify that everything is still visible - verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families); } else if (compactionDesc != null) { secondaryRegion.replayWALCompactionMarker(compactionDesc, true, false, Long.MAX_VALUE); @@ -433,7 +430,7 @@ public void testReplayFlushesAndCompactions() throws IOException { } } - assertEquals(400-1, lastReplayed); + assertEquals(400 - 1, lastReplayed); LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion, 0, 400, cq, families); @@ -459,7 +456,7 @@ public void testReplayFlushStartMarkers() throws IOException { int numRows = 200; // now replay the edits and the flush marker - reader = createWALReaderForPrimary(); + reader = createWALReaderForPrimary(); LOG.info("-- Replaying edits and flush events in secondary"); @@ -471,8 +468,7 @@ public void testReplayFlushStartMarkers() throws IOException { if (entry == null) { break; } - FlushDescriptor flushDesc - = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { // first verify that everything is replayed and visible before flush event replay HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1")); @@ -494,11 +490,11 @@ public void testReplayFlushStartMarkers() throws IOException { LOG.info("Memstore size reduced by:" + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize)); assertTrue(storeMemstoreSize > newStoreMemstoreSize); - verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families); } // after replay verify that everything is still visible - verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families); } else { lastReplayed = replayEdit(secondaryRegion, entry); } @@ -521,8 +517,8 @@ public void testReplayFlushStartMarkers() throws IOException { verifyData(secondaryRegion, 0, numRows, cq, families); // Test case 2: replay a flush start marker with a smaller seqId - FlushDescriptor startFlushDescSmallerSeqId - = clone(startFlushDesc, startFlushDesc.getFlushSequenceNumber() - 50); + FlushDescriptor startFlushDescSmallerSeqId = + clone(startFlushDesc, startFlushDesc.getFlushSequenceNumber() - 50); LOG.info("-- Replaying same flush start in secondary again " + startFlushDescSmallerSeqId); result = secondaryRegion.replayWALFlushStartMarker(startFlushDescSmallerSeqId); assertNull(result); // this should return null. Ignoring the flush start marker @@ -534,8 +530,8 @@ public void testReplayFlushStartMarkers() throws IOException { verifyData(secondaryRegion, 0, numRows, cq, families); // Test case 3: replay a flush start marker with a larger seqId - FlushDescriptor startFlushDescLargerSeqId - = clone(startFlushDesc, startFlushDesc.getFlushSequenceNumber() + 50); + FlushDescriptor startFlushDescLargerSeqId = + clone(startFlushDesc, startFlushDesc.getFlushSequenceNumber() + 50); LOG.info("-- Replaying same flush start in secondary again " + startFlushDescLargerSeqId); result = secondaryRegion.replayWALFlushStartMarker(startFlushDescLargerSeqId); assertNull(result); // this should return null. Ignoring the flush start marker @@ -564,7 +560,7 @@ public void testReplayFlushCommitMarkerSmallerThanFlushStartMarker() throws IOEx int numRows = 300; // now replay the edits and the flush marker - reader = createWALReaderForPrimary(); + reader = createWALReaderForPrimary(); LOG.info("-- Replaying edits and flush events in secondary"); FlushDescriptor startFlushDesc = null; @@ -577,8 +573,7 @@ public void testReplayFlushCommitMarkerSmallerThanFlushStartMarker() throws IOEx if (entry == null) { break; } - FlushDescriptor flushDesc - = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { if (flushDesc.getAction() == FlushAction.START_FLUSH) { // don't replay the first flush start marker, hold on to it, replay the second one @@ -597,7 +592,7 @@ public void testReplayFlushCommitMarkerSmallerThanFlushStartMarker() throws IOEx } } // after replay verify that everything is still visible - verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families); } else { lastReplayed = replayEdit(secondaryRegion, entry); } @@ -655,7 +650,7 @@ public void testReplayFlushCommitMarkerLargerThanFlushStartMarker() throws IOExc int numRows = 200; // now replay the edits and the flush marker - reader = createWALReaderForPrimary(); + reader = createWALReaderForPrimary(); LOG.info("-- Replaying edits and flush events in secondary"); FlushDescriptor startFlushDesc = null; @@ -667,8 +662,7 @@ public void testReplayFlushCommitMarkerLargerThanFlushStartMarker() throws IOExc if (entry == null) { break; } - FlushDescriptor flushDesc - = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { if (flushDesc.getAction() == FlushAction.START_FLUSH) { if (startFlushDesc == null) { @@ -681,13 +675,11 @@ public void testReplayFlushCommitMarkerLargerThanFlushStartMarker() throws IOExc // do not replay any flush commit yet // hold on to the flush commit marker but simulate a larger // flush commit seqId - commitFlushDesc = - FlushDescriptor.newBuilder(flushDesc) - .setFlushSequenceNumber(flushDesc.getFlushSequenceNumber() + 50) - .build(); + commitFlushDesc = FlushDescriptor.newBuilder(flushDesc) + .setFlushSequenceNumber(flushDesc.getFlushSequenceNumber() + 50).build(); } // after replay verify that everything is still visible - verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families); } else { lastReplayed = replayEdit(secondaryRegion, entry); } @@ -736,9 +728,9 @@ public void testReplayFlushCommitMarkerLargerThanFlushStartMarker() throws IOExc } /** - * Tests the case where we receive a flush commit before receiving any flush prepare markers. - * The memstore edits should be dropped after the flush commit replay since they should be in - * flushed files + * Tests the case where we receive a flush commit before receiving any flush prepare markers. The + * memstore edits should be dropped after the flush commit replay since they should be in flushed + * files */ @Test public void testReplayFlushCommitMarkerWithoutFlushStartMarkerDroppableMemstore() @@ -747,9 +739,9 @@ public void testReplayFlushCommitMarkerWithoutFlushStartMarkerDroppableMemstore( } /** - * Tests the case where we receive a flush commit before receiving any flush prepare markers. - * The memstore edits should be not dropped after the flush commit replay since not every edit - * will be in flushed files (based on seqId) + * Tests the case where we receive a flush commit before receiving any flush prepare markers. The + * memstore edits should be not dropped after the flush commit replay since not every edit will be + * in flushed files (based on seqId) */ @Test public void testReplayFlushCommitMarkerWithoutFlushStartMarkerNonDroppableMemstore() @@ -768,7 +760,7 @@ public void testReplayFlushCommitMarkerWithoutFlushStartMarker(boolean droppable int numRows = droppableMemstore ? 100 : 200; // now replay the edits and the flush marker - reader = createWALReaderForPrimary(); + reader = createWALReaderForPrimary(); LOG.info("-- Replaying edits and flush events in secondary"); FlushDescriptor commitFlushDesc = null; @@ -779,8 +771,7 @@ public void testReplayFlushCommitMarkerWithoutFlushStartMarker(boolean droppable if (entry == null) { break; } - FlushDescriptor flushDesc - = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { if (flushDesc.getAction() == FlushAction.START_FLUSH) { // do not replay flush start marker @@ -788,7 +779,7 @@ public void testReplayFlushCommitMarkerWithoutFlushStartMarker(boolean droppable commitFlushDesc = flushDesc; // hold on to the flush commit marker } // after replay verify that everything is still visible - verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + verifyData(secondaryRegion, 0, lastReplayed + 1, cq, families); } else { lastReplayed = replayEdit(secondaryRegion, entry); } @@ -847,9 +838,7 @@ public void testReplayFlushCommitMarkerWithoutFlushStartMarker(boolean droppable } private FlushDescriptor clone(FlushDescriptor flush, long flushSeqId) { - return FlushDescriptor.newBuilder(flush) - .setFlushSequenceNumber(flushSeqId) - .build(); + return FlushDescriptor.newBuilder(flush).setFlushSequenceNumber(flushSeqId).build(); } /** @@ -865,7 +854,7 @@ public void testReplayRegionOpenEvent() throws IOException { primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null); // now replay the edits and the flush marker - reader = createWALReaderForPrimary(); + reader = createWALReaderForPrimary(); List regionEvents = Lists.newArrayList(); LOG.info("-- Replaying edits and region events in secondary"); @@ -874,10 +863,9 @@ public void testReplayRegionOpenEvent() throws IOException { if (entry == null) { break; } - FlushDescriptor flushDesc - = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); - RegionEventDescriptor regionEventDesc - = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + RegionEventDescriptor regionEventDesc = + WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { // don't replay flush events @@ -922,7 +910,8 @@ public void testReplayRegionOpenEvent() throws IOException { long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(newRegionMemstoreSize == 0); - assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot should be dropped if any + assertNull(secondaryRegion.getPrepareFlushResult()); // prepare snapshot should be dropped if + // any LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion, 0, numRows, cq, families); @@ -945,7 +934,7 @@ public void testReplayRegionOpenEventAfterFlushStart() throws IOException { primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null); // now replay the edits and the flush marker - reader = createWALReaderForPrimary(); + reader = createWALReaderForPrimary(); List regionEvents = Lists.newArrayList(); LOG.info("-- Replaying edits and region events in secondary"); @@ -954,10 +943,9 @@ public void testReplayRegionOpenEventAfterFlushStart() throws IOException { if (entry == null) { break; } - FlushDescriptor flushDesc - = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); - RegionEventDescriptor regionEventDesc - = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + RegionEventDescriptor regionEventDesc = + WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { // only replay flush start @@ -1001,7 +989,8 @@ public void testReplayRegionOpenEventAfterFlushStart() throws IOException { long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(newRegionMemstoreSize == 0); - assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot should be dropped if any + assertNull(secondaryRegion.getPrepareFlushResult()); // prepare snapshot should be dropped if + // any LOG.info("-- Verifying edits from secondary"); verifyData(secondaryRegion, 0, numRows, cq, families); @@ -1024,7 +1013,7 @@ public void testSkippingEditsWithSmallerSeqIdAfterRegionOpenEvent() throws IOExc primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null); // now replay the edits and the flush marker - reader = createWALReaderForPrimary(); + reader = createWALReaderForPrimary(); List regionEvents = Lists.newArrayList(); List edits = Lists.newArrayList(); @@ -1034,10 +1023,9 @@ public void testSkippingEditsWithSmallerSeqIdAfterRegionOpenEvent() throws IOExc if (entry == null) { break; } - FlushDescriptor flushDesc - = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); - RegionEventDescriptor regionEventDesc - = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + RegionEventDescriptor regionEventDesc = + WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { // don't replay flushes @@ -1050,14 +1038,12 @@ public void testSkippingEditsWithSmallerSeqIdAfterRegionOpenEvent() throws IOExc // replay the region open of first open, but with the seqid of the second open // this way non of the flush files will be picked up. - secondaryRegion.replayWALRegionEventMarker( - RegionEventDescriptor.newBuilder(regionEvents.get(0)).setLogSequenceNumber( - regionEvents.get(2).getLogSequenceNumber()).build()); - + secondaryRegion.replayWALRegionEventMarker(RegionEventDescriptor.newBuilder(regionEvents.get(0)) + .setLogSequenceNumber(regionEvents.get(2).getLogSequenceNumber()).build()); // replay edits from the before region close. If replay does not // skip these the following verification will NOT fail. - for (WAL.Entry entry: edits) { + for (WAL.Entry entry : edits) { replayEdit(secondaryRegion, entry); } @@ -1076,13 +1062,13 @@ public void testSkippingEditsWithSmallerSeqIdAfterRegionOpenEvent() throws IOExc public void testReplayFlushSeqIds() throws IOException { // load some data to primary and flush int start = 0; - LOG.info("-- Writing some data to primary from " + start + " to " + (start+100)); + LOG.info("-- Writing some data to primary from " + start + " to " + (start + 100)); putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families); LOG.info("-- Flushing primary, creating 3 files for 3 stores"); primaryRegion.flush(true); // now replay the flush marker - reader = createWALReaderForPrimary(); + reader = createWALReaderForPrimary(); long flushSeqId = -1; LOG.info("-- Replaying flush events in secondary"); @@ -1091,8 +1077,7 @@ public void testReplayFlushSeqIds() throws IOException { if (entry == null) { break; } - FlushDescriptor flushDesc - = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); if (flushDesc != null) { if (flushDesc.getAction() == FlushAction.START_FLUSH) { LOG.info("-- Replaying flush start in secondary"); @@ -1154,8 +1139,8 @@ public void testSeqIdsFromReplay() throws IOException { } /** - * Tests that a region opened in secondary mode would not write region open / close - * events to its WAL. + * Tests that a region opened in secondary mode would not write region open / close events to its + * WAL. * @throws IOException */ @Test @@ -1170,16 +1155,16 @@ public void testSecondaryRegionDoesNotWriteRegionEventsToWAL() throws IOExceptio // test for replay prepare flush putDataByReplay(secondaryRegion, 0, 10, cq, families); - secondaryRegion.replayWALFlushStartMarker(FlushDescriptor.newBuilder(). - setFlushSequenceNumber(10) - .setTableName(UnsafeByteOperations.unsafeWrap( - primaryRegion.getTableDescriptor().getTableName().getName())) - .setAction(FlushAction.START_FLUSH) - .setEncodedRegionName( + secondaryRegion.replayWALFlushStartMarker(FlushDescriptor.newBuilder() + .setFlushSequenceNumber(10) + .setTableName(UnsafeByteOperations + .unsafeWrap(primaryRegion.getTableDescriptor().getTableName().getName())) + .setAction(FlushAction.START_FLUSH) + .setEncodedRegionName( UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getEncodedNameAsBytes())) - .setRegionName(UnsafeByteOperations.unsafeWrap( - primaryRegion.getRegionInfo().getRegionName())) - .build()); + .setRegionName( + UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getRegionName())) + .build()); verify(walSecondary, times(0)).appendData(any(RegionInfo.class), any(WALKeyImpl.class), any(WALEdit.class)); @@ -1204,7 +1189,7 @@ public void testRegionReadsEnabledFlag() throws IOException { try { verifyData(secondaryRegion, 0, 100, cq, families); fail("Should have failed with IOException"); - } catch(IOException ex) { + } catch (IOException ex) { // expected } @@ -1254,9 +1239,9 @@ public void testWriteFlushRequestMarker() throws IOException { /** * Test the case where the secondary region replica is not in reads enabled state because it is - * waiting for a flush or region open marker from primary region. Replaying CANNOT_FLUSH - * flush marker entry should restore the reads enabled status in the region and allow the reads - * to continue. + * waiting for a flush or region open marker from primary region. Replaying CANNOT_FLUSH flush + * marker entry should restore the reads enabled status in the region and allow the reads to + * continue. */ @Test public void testReplayingFlushRequestRestoresReadsEnabledState() throws IOException { @@ -1284,8 +1269,7 @@ public void testReplayingFlushRequestRestoresReadsEnabledState() throws IOExcept /** * Test the case where the secondary region replica is not in reads enabled state because it is * waiting for a flush or region open marker from primary region. Replaying flush start and commit - * entries should restore the reads enabled status in the region and allow the reads - * to continue. + * entries should restore the reads enabled status in the region and allow the reads to continue. */ @Test public void testReplayingFlushRestoresReadsEnabledState() throws IOException { @@ -1326,8 +1310,7 @@ public void testReplayingFlushRestoresReadsEnabledState() throws IOException { /** * Test the case where the secondary region replica is not in reads enabled state because it is * waiting for a flush or region open marker from primary region. Replaying flush start and commit - * entries should restore the reads enabled status in the region and allow the reads - * to continue. + * entries should restore the reads enabled status in the region and allow the reads to continue. */ @Test public void testReplayingFlushWithEmptyMemstoreRestoresReadsEnabledState() throws IOException { @@ -1358,8 +1341,8 @@ public void testReplayingFlushWithEmptyMemstoreRestoresReadsEnabledState() throw /** * Test the case where the secondary region replica is not in reads enabled state because it is * waiting for a flush or region open marker from primary region. Replaying region open event - * entry from primary should restore the reads enabled status in the region and allow the reads - * to continue. + * entry from primary should restore the reads enabled status in the region and allow the reads to + * continue. */ @Test public void testReplayingRegionOpenEventRestoresReadsEnabledState() throws IOException { @@ -1376,8 +1359,8 @@ public void testReplayingRegionOpenEventRestoresReadsEnabledState() throws IOExc break; } - RegionEventDescriptor regionEventDesc - = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); + RegionEventDescriptor regionEventDesc = + WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); if (regionEventDesc != null) { secondaryRegion.replayWALRegionEventMarker(regionEventDesc); @@ -1450,7 +1433,7 @@ public void testRefresStoreFiles() throws IOException { putDataWithFlushes(primaryRegion, 400, 400, 0); numRows = 400; - reader = createWALReaderForPrimary(); + reader = createWALReaderForPrimary(); while (true) { WAL.Entry entry = reader.next(); if (entry == null) { @@ -1496,7 +1479,7 @@ private void disableReads(HRegion region) { try { verifyData(region, 0, 1, cq, families); fail("Should have failed with IOException"); - } catch(IOException ex) { + } catch (IOException ex) { // expected } } @@ -1504,7 +1487,7 @@ private void disableReads(HRegion region) { private void replay(HRegion region, Put put, long replaySeqId) throws IOException { put.setDurability(Durability.SKIP_WAL); MutationReplay mutation = new MutationReplay(MutationType.PUT, put, 0, 0); - region.batchReplay(new MutationReplay[] {mutation}, replaySeqId); + region.batchReplay(new MutationReplay[] { mutation }, replaySeqId); } /** @@ -1555,7 +1538,6 @@ public void testReplayBulkLoadEvent() throws IOException { // replay the bulk load event secondaryRegion.replayWALBulkLoadEventMarker(bulkloadEvent); - List storeFileName = new ArrayList<>(); for (StoreDescriptor storeDesc : bulkloadEvent.getStoresList()) { storeFileName.addAll(storeDesc.getStoreFileList()); @@ -1578,38 +1560,37 @@ public void testReplayBulkLoadEvent() throws IOException { public void testReplayingFlushCommitWithFileAlreadyDeleted() throws IOException { // tests replaying flush commit marker, but the flush file has already been compacted // from primary and also deleted from the archive directory - secondaryRegion.replayWALFlushCommitMarker(FlushDescriptor.newBuilder(). - setFlushSequenceNumber(Long.MAX_VALUE) - .setTableName(UnsafeByteOperations.unsafeWrap(primaryRegion.getTableDescriptor().getTableName().getName())) - .setAction(FlushAction.COMMIT_FLUSH) - .setEncodedRegionName( - UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getEncodedNameAsBytes())) - .setRegionName(UnsafeByteOperations.unsafeWrap( - primaryRegion.getRegionInfo().getRegionName())) - .addStoreFlushes(StoreFlushDescriptor.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(families[0])) - .setStoreHomeDir("/store_home_dir") - .addFlushOutput("/foo/baz/123") - .build()) - .build()); + secondaryRegion.replayWALFlushCommitMarker( + FlushDescriptor.newBuilder().setFlushSequenceNumber(Long.MAX_VALUE) + .setTableName(UnsafeByteOperations + .unsafeWrap(primaryRegion.getTableDescriptor().getTableName().getName())) + .setAction(FlushAction.COMMIT_FLUSH) + .setEncodedRegionName( + UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getEncodedNameAsBytes())) + .setRegionName( + UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getRegionName())) + .addStoreFlushes(StoreFlushDescriptor.newBuilder() + .setFamilyName(UnsafeByteOperations.unsafeWrap(families[0])) + .setStoreHomeDir("/store_home_dir").addFlushOutput("/foo/baz/123").build()) + .build()); } @Test public void testReplayingCompactionWithFileAlreadyDeleted() throws IOException { // tests replaying compaction marker, but the compaction output file has already been compacted // from primary and also deleted from the archive directory - secondaryRegion.replayWALCompactionMarker(CompactionDescriptor.newBuilder() - .setTableName(UnsafeByteOperations.unsafeWrap( - primaryRegion.getTableDescriptor().getTableName().getName())) - .setEncodedRegionName( - UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getEncodedNameAsBytes())) - .setFamilyName(UnsafeByteOperations.unsafeWrap(families[0])) - .addCompactionInput("/123") - .addCompactionOutput("/456") - .setStoreHomeDir("/store_home_dir") - .setRegionName(UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getRegionName())) - .build() - , true, true, Long.MAX_VALUE); + secondaryRegion.replayWALCompactionMarker( + CompactionDescriptor.newBuilder() + .setTableName(UnsafeByteOperations + .unsafeWrap(primaryRegion.getTableDescriptor().getTableName().getName())) + .setEncodedRegionName( + UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getEncodedNameAsBytes())) + .setFamilyName(UnsafeByteOperations.unsafeWrap(families[0])).addCompactionInput("/123") + .addCompactionOutput("/456").setStoreHomeDir("/store_home_dir") + .setRegionName( + UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getRegionName())) + .build(), + true, true, Long.MAX_VALUE); } @Test @@ -1617,20 +1598,19 @@ public void testReplayingRegionOpenEventWithFileAlreadyDeleted() throws IOExcept // tests replaying region open event marker, but the region files have already been compacted // from primary and also deleted from the archive directory secondaryRegion.replayWALRegionEventMarker(RegionEventDescriptor.newBuilder() - .setTableName(UnsafeByteOperations.unsafeWrap( - primaryRegion.getTableDescriptor().getTableName().getName())) - .setEncodedRegionName( + .setTableName(UnsafeByteOperations + .unsafeWrap(primaryRegion.getTableDescriptor().getTableName().getName())) + .setEncodedRegionName( UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getEncodedNameAsBytes())) - .setRegionName(UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getRegionName())) - .setEventType(EventType.REGION_OPEN) - .setServer(ProtobufUtil.toServerName(ServerName.valueOf("foo", 1, 1))) - .setLogSequenceNumber(Long.MAX_VALUE) - .addStores(StoreDescriptor.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(families[0])) - .setStoreHomeDir("/store_home_dir") - .addStoreFile("/123") - .build()) - .build()); + .setRegionName( + UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getRegionName())) + .setEventType(EventType.REGION_OPEN) + .setServer(ProtobufUtil.toServerName(ServerName.valueOf("foo", 1, 1))) + .setLogSequenceNumber(Long.MAX_VALUE) + .addStores( + StoreDescriptor.newBuilder().setFamilyName(UnsafeByteOperations.unsafeWrap(families[0])) + .setStoreHomeDir("/store_home_dir").addStoreFile("/123").build()) + .build()); } @Test @@ -1638,20 +1618,19 @@ public void testReplayingBulkLoadEventWithFileAlreadyDeleted() throws IOExceptio // tests replaying bulk load event marker, but the bulk load files have already been compacted // from primary and also deleted from the archive directory secondaryRegion.replayWALBulkLoadEventMarker(BulkLoadDescriptor.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(primaryRegion.getTableDescriptor().getTableName())) - .setEncodedRegionName( + .setTableName( + ProtobufUtil.toProtoTableName(primaryRegion.getTableDescriptor().getTableName())) + .setEncodedRegionName( UnsafeByteOperations.unsafeWrap(primaryRegion.getRegionInfo().getEncodedNameAsBytes())) - .setBulkloadSeqNum(Long.MAX_VALUE) - .addStores(StoreDescriptor.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(families[0])) - .setStoreHomeDir("/store_home_dir") - .addStoreFile("/123") - .build()) - .build()); + .setBulkloadSeqNum(Long.MAX_VALUE) + .addStores( + StoreDescriptor.newBuilder().setFamilyName(UnsafeByteOperations.unsafeWrap(families[0])) + .setStoreHomeDir("/store_home_dir").addStoreFile("/123").build()) + .build()); } - private String createHFileForFamilies(Path testPath, byte[] family, - byte[] valueBytes) throws IOException { + private String createHFileForFamilies(Path testPath, byte[] family, byte[] valueBytes) + throws IOException { HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration()); // TODO We need a way to do this without creating files Path testFile = new Path(testPath, TEST_UTIL.getRandomUUID().toString()); @@ -1662,13 +1641,8 @@ private String createHFileForFamilies(Path testPath, byte[] family, HFile.Writer writer = hFileFactory.create(); try { writer.append(new KeyValue(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(valueBytes) - .setFamily(family) - .setQualifier(valueBytes) - .setTimestamp(0L) - .setType(KeyValue.Type.Put.getCode()) - .setValue(valueBytes) - .build())); + .setRow(valueBytes).setFamily(family).setQualifier(valueBytes).setTimestamp(0L) + .setType(KeyValue.Type.Put.getCode()).setValue(valueBytes).build())); } finally { writer.close(); } @@ -1678,15 +1652,17 @@ private String createHFileForFamilies(Path testPath, byte[] family, return testFile.toString(); } - /** Puts a total of numRows + numRowsAfterFlush records indexed with numeric row keys. Does - * a flush every flushInterval number of records. Then it puts numRowsAfterFlush number of - * more rows but does not execute flush after - * @throws IOException */ - private void putDataWithFlushes(HRegion region, int flushInterval, - int numRows, int numRowsAfterFlush) throws IOException { + /** + * Puts a total of numRows + numRowsAfterFlush records indexed with numeric row keys. Does a flush + * every flushInterval number of records. Then it puts numRowsAfterFlush number of more rows but + * does not execute flush after + * @throws IOException + */ + private void putDataWithFlushes(HRegion region, int flushInterval, int numRows, + int numRowsAfterFlush) throws IOException { int start = 0; for (; start < numRows; start += flushInterval) { - LOG.info("-- Writing some data to primary from " + start + " to " + (start+flushInterval)); + LOG.info("-- Writing some data to primary from " + start + " to " + (start + flushInterval)); putData(region, Durability.SYNC_WAL, start, flushInterval, cq, families); LOG.info("-- Flushing primary, creating 3 files for 3 stores"); region.flush(true); @@ -1695,15 +1671,15 @@ private void putDataWithFlushes(HRegion region, int flushInterval, putData(region, Durability.SYNC_WAL, start, numRowsAfterFlush, cq, families); } - private void putDataByReplay(HRegion region, - int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { + private void putDataByReplay(HRegion region, int startRow, int numRows, byte[] qf, + byte[]... families) throws IOException { for (int i = startRow; i < startRow + numRows; i++) { Put put = new Put(Bytes.toBytes("" + i)); put.setDurability(Durability.SKIP_WAL); for (byte[] family : families) { put.addColumn(family, qf, EnvironmentEdgeManager.currentTime(), null); } - replay(region, put, i+1); + replay(region, put, i + 1); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index a21c1e1c21b9..11138ac42e2f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -90,11 +90,11 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of - * the region server's bullkLoad functionality. + * Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of the region server's + * bullkLoad functionality. */ @RunWith(Parameterized.class) -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestHRegionServerBulkLoad { @ClassRule @@ -116,6 +116,7 @@ public class TestHRegionServerBulkLoad { families[i] = Bytes.toBytes(family(i)); } } + @Parameters public static final Collection parameters() { int[] sleepDurations = new int[] { 0, 30000 }; @@ -150,16 +151,12 @@ static String family(int i) { /** * Create an HFile with the given number of rows with a specified value. */ - public static void createHFile(FileSystem fs, Path path, byte[] family, - byte[] qualifier, byte[] value, int numRows) throws IOException { - HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE) - .withCompression(COMPRESSION) - .build(); - HFile.Writer writer = HFile - .getWriterFactory(conf, new CacheConfig(conf)) - .withPath(fs, path) - .withFileContext(context) - .create(); + public static void createHFile(FileSystem fs, Path path, byte[] family, byte[] qualifier, + byte[] value, int numRows) throws IOException { + HFileContext context = + new HFileContextBuilder().withBlockSize(BLOCKSIZE).withCompression(COMPRESSION).build(); + HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path) + .withFileContext(context).create(); long now = EnvironmentEdgeManager.currentTime(); try { // subtract 2 since iterateOnSplits doesn't include boundary keys @@ -174,20 +171,18 @@ public static void createHFile(FileSystem fs, Path path, byte[] family, } /** - * Thread that does full scans of the table looking for any partially - * completed rows. - * - * Each iteration of this loads 10 hdfs files, which occupies 5 file open file - * handles. So every 10 iterations (500 file handles) it does a region - * compaction to reduce the number of open file handles. + * Thread that does full scans of the table looking for any partially completed rows. Each + * iteration of this loads 10 hdfs files, which occupies 5 file open file handles. So every 10 + * iterations (500 file handles) it does a region compaction to reduce the number of open file + * handles. */ public static class AtomicHFileLoader extends RepeatingTestThread { final AtomicLong numBulkLoads = new AtomicLong(); final AtomicLong numCompactions = new AtomicLong(); private TableName tableName; - public AtomicHFileLoader(TableName tableName, TestContext ctx, - byte targetFamilies[][]) throws IOException { + public AtomicHFileLoader(TableName tableName, TestContext ctx, byte targetFamilies[][]) + throws IOException { super(ctx); this.tableName = tableName; } @@ -195,8 +190,7 @@ public AtomicHFileLoader(TableName tableName, TestContext ctx, @Override public void doAnAction() throws Exception { long iteration = numBulkLoads.getAndIncrement(); - Path dir = UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d", - iteration)); + Path dir = UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d", iteration)); // create HFiles for different column families FileSystem fs = UTIL.getTestFileSystem(); @@ -234,8 +228,7 @@ public Optional getRegionObserver() { @Override public InternalScanner preCompact(ObserverContext e, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, - CompactionRequest request) - throws IOException { + CompactionRequest request) throws IOException { try { Thread.sleep(sleepDuration); } catch (InterruptedException ie) { @@ -248,8 +241,7 @@ public InternalScanner preCompact(ObserverContext } /** - * Thread that does full scans of the table looking for any partially - * completed rows. + * Thread that does full scans of the table looking for any partially completed rows. */ public static class AtomicScanReader extends RepeatingTestThread { byte targetFamilies[][]; @@ -258,8 +250,8 @@ public static class AtomicScanReader extends RepeatingTestThread { AtomicLong numRowsScanned = new AtomicLong(); TableName TABLE_NAME; - public AtomicScanReader(TableName TABLE_NAME, TestContext ctx, - byte targetFamilies[][]) throws IOException { + public AtomicScanReader(TableName TABLE_NAME, TestContext ctx, byte targetFamilies[][]) + throws IOException { super(ctx); this.TABLE_NAME = TABLE_NAME; this.targetFamilies = targetFamilies; @@ -280,19 +272,15 @@ public void doAnAction() throws Exception { for (byte[] family : targetFamilies) { byte qualifier[] = QUAL; byte thisValue[] = res.getValue(family, qualifier); - if (gotValue != null && thisValue != null - && !Bytes.equals(gotValue, thisValue)) { + if (gotValue != null && thisValue != null && !Bytes.equals(gotValue, thisValue)) { StringBuilder msg = new StringBuilder(); - msg.append("Failed on scan ").append(numScans) - .append(" after scanning ").append(numRowsScanned) - .append(" rows!\n"); - msg.append("Current was " + Bytes.toString(res.getRow()) + "/" - + Bytes.toString(family) + ":" + Bytes.toString(qualifier) - + " = " + Bytes.toString(thisValue) + "\n"); - msg.append("Previous was " + Bytes.toString(lastRow) + "/" - + Bytes.toString(lastFam) + ":" + Bytes.toString(lastQual) - + " = " + Bytes.toString(gotValue)); + msg.append("Failed on scan ").append(numScans).append(" after scanning ") + .append(numRowsScanned).append(" rows!\n"); + msg.append("Current was " + Bytes.toString(res.getRow()) + "/" + Bytes.toString(family) + + ":" + Bytes.toString(qualifier) + " = " + Bytes.toString(thisValue) + "\n"); + msg.append("Previous was " + Bytes.toString(lastRow) + "/" + Bytes.toString(lastFam) + + ":" + Bytes.toString(lastQual) + " = " + Bytes.toString(gotValue)); throw new RuntimeException(msg.toString()); } @@ -308,20 +296,19 @@ public void doAnAction() throws Exception { } /** - * Creates a table with given table name and specified number of column - * families if the table does not already exist. + * Creates a table with given table name and specified number of column families if the table does + * not already exist. */ public void setupTable(TableName table, int cfs) throws IOException { try { LOG.info("Creating table " + table); - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(table); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(table); tableDescriptorBuilder.setCoprocessor(MyObserver.class.getName()); MyObserver.sleepDuration = this.sleepDuration; for (int i = 0; i < 10; i++) { ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family(i))).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family(i))).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); } @@ -386,8 +373,8 @@ void runAtomicBulkloadTest(TableName tableName, int millisToRun, int numScanners } /** - * Run test on an HBase instance for 5 minutes. This assumes that the table - * under test only has a single region. + * Run test on an HBase instance for 5 minutes. This assumes that the table under test only has a + * single region. */ public static void main(String args[]) throws Exception { try { @@ -424,5 +411,3 @@ public boolean isFound() { } } } - - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionTracing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionTracing.java index 62f8a150bcf4..60f83b33268a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionTracing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionTracing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ public class TestHRegionTracing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHRegionTracing.class); + HBaseClassTestRule.forClass(TestHRegionTracing.class); private static HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -92,7 +92,7 @@ public static void tearDownAfterClass() throws IOException { public void setUp() throws IOException { TableName tableName = tableNameRule.getTableName(); TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); @@ -116,8 +116,8 @@ private void assertSpan(String spanName) { return false; } List regionNames = span.getAttributes().get(HBaseSemanticAttributes.REGION_NAMES_KEY); - return regionNames != null && regionNames.size() == 1 && - regionNames.get(0).equals(region.getRegionInfo().getRegionNameAsString()); + return regionNames != null && regionNames.size() == 1 + && regionNames.get(0).equals(region.getRegionInfo().getRegionNameAsString()); })); } @@ -165,7 +165,7 @@ public void testBatchMutate() throws IOException { @Test public void testCheckAndMutate() throws IOException { region.checkAndMutate(CheckAndMutate.newBuilder(ROW).ifNotExists(FAMILY, QUALIFIER) - .build(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE))); + .build(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE))); assertSpan("Region.checkAndMutate"); assertSpan("Region.getRowLock"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java index ad6f1596ded0..03bdd7450f83 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -35,12 +34,11 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - /** - * A test similar to TestHRegion, but with in-memory flush families. - * Also checks wal truncation after in-memory compaction. + * A test similar to TestHRegion, but with in-memory flush families. Also checks wal truncation + * after in-memory compaction. */ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestHRegionWithInMemoryFlush extends TestHRegion { @ClassRule @@ -48,32 +46,34 @@ public class TestHRegionWithInMemoryFlush extends TestHRegion { HBaseClassTestRule.forClass(TestHRegionWithInMemoryFlush.class); /** - * @return A region on which you must call - * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} when done. + * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} + * when done. */ @Override public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { boolean[] inMemory = new boolean[families.length]; - for(int i = 0; i < inMemory.length; i++) { + for (int i = 0; i < inMemory.length; i++) { inMemory[i] = true; } - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - return TEST_UTIL.createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, - conf, isReadOnly, durability, wal, inMemory, families); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + return TEST_UTIL.createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, conf, + isReadOnly, durability, wal, inMemory, families); } - @Override int getTestCountForTestWritesWhileScanning() { + @Override + int getTestCountForTestWritesWhileScanning() { return 10; } /** - * testWritesWhileScanning is flakey when called out of this class. Need to dig in. Meantime - * go easy on it. See if that helps. + * testWritesWhileScanning is flakey when called out of this class. Need to dig in. Meantime go + * easy on it. See if that helps. */ - @Override int getNumQualifiersForTestWritesWhileScanning() { + @Override + int getNumQualifiersForTestWritesWhileScanning() { return 10; } @@ -93,10 +93,9 @@ public void testFlushAndMemstoreSizeCounting() throws Exception { Put put = new Put(row); put.addColumn(family, family, row); region.put(put); - //In memory flush every 1000 puts + // In memory flush every 1000 puts if (count++ % 1000 == 0) { - ((CompactingMemStore) (region.getStore(family).memstore)) - .flushInMemory(); + ((CompactingMemStore) (region.getStore(family).memstore)).flushInMemory(); } } region.flush(true); @@ -115,4 +114,3 @@ public void testFlushAndMemstoreSizeCounting() throws Exception { } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index 5543c03b6505..bb63bec32551 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -136,8 +136,7 @@ public class TestHStore { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHStore.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHStore.class); private static final Logger LOG = LoggerFactory.getLogger(TestHStore.class); @Rule @@ -145,17 +144,17 @@ public class TestHStore { HRegion region; HStore store; - byte [] table = Bytes.toBytes("table"); - byte [] family = Bytes.toBytes("family"); - - byte [] row = Bytes.toBytes("row"); - byte [] row2 = Bytes.toBytes("row2"); - byte [] qf1 = Bytes.toBytes("qf1"); - byte [] qf2 = Bytes.toBytes("qf2"); - byte [] qf3 = Bytes.toBytes("qf3"); - byte [] qf4 = Bytes.toBytes("qf4"); - byte [] qf5 = Bytes.toBytes("qf5"); - byte [] qf6 = Bytes.toBytes("qf6"); + byte[] table = Bytes.toBytes("table"); + byte[] family = Bytes.toBytes("family"); + + byte[] row = Bytes.toBytes("row"); + byte[] row2 = Bytes.toBytes("row2"); + byte[] qf1 = Bytes.toBytes("qf1"); + byte[] qf2 = Bytes.toBytes("qf2"); + byte[] qf3 = Bytes.toBytes("qf3"); + byte[] qf4 = Bytes.toBytes("qf4"); + byte[] qf5 = Bytes.toBytes("qf5"); + byte[] qf6 = Bytes.toBytes("qf6"); NavigableSet qualifiers = new ConcurrentSkipListSet<>(Bytes.BYTES_COMPARATOR); @@ -176,9 +175,9 @@ public void setUp() throws IOException { qualifiers.add(qf5); Iterator iter = qualifiers.iterator(); - while(iter.hasNext()){ - byte [] next = iter.next(); - expected.add(new KeyValue(row, family, next, 1, (byte[])null)); + while (iter.hasNext()) { + byte[] next = iter.next(); + expected.add(new KeyValue(row, family, next, 1, (byte[]) null)); get.addColumn(family, next); } } @@ -220,8 +219,8 @@ private void initHRegion(String methodName, Configuration conf, TableDescriptorB fs.delete(logdir, true); ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, - MemStoreLABImpl.CHUNK_SIZE_DEFAULT, 1, 0, - null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + MemStoreLABImpl.CHUNK_SIZE_DEFAULT, 1, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); Configuration walConf = new Configuration(conf); CommonFSUtils.setRootDir(walConf, basedir); @@ -246,8 +245,7 @@ private HStore init(String methodName, Configuration conf, TableDescriptorBuilde } /** - * Test we do not lose data if we fail a flush and then close. - * Part of HBase-10466 + * Test we do not lose data if we fail a flush and then close. Part of HBase-10466 */ @Test public void testFlushSizeSizing() throws Exception { @@ -255,8 +253,7 @@ public void testFlushSizeSizing() throws Exception { final Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); // Only retry once. conf.setInt("hbase.hstore.flush.retries.number", 1); - User user = User.createUserForTesting(conf, this.name.getMethodName(), - new String[]{"foo"}); + User user = User.createUserForTesting(conf, this.name.getMethodName(), new String[] { "foo" }); // Inject our faulty LocalFileSystem conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class); user.runAs(new PrivilegedExceptionAction() { @@ -265,7 +262,7 @@ public Object run() throws Exception { // Make sure it worked (above is sensitive to caching details in hadoop core) FileSystem fs = FileSystem.get(conf); assertEquals(FaultyFileSystem.class, fs.getClass()); - FaultyFileSystem ffs = (FaultyFileSystem)fs; + FaultyFileSystem ffs = (FaultyFileSystem) fs; // Initialize region init(name.getMethodName(), conf); @@ -279,7 +276,7 @@ public Object run() throws Exception { kvSize.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD, 0, 0); mss = store.memstore.getFlushableSize(); assertEquals(kvSize.getMemStoreSize(), mss); - // Flush. Bug #1 from HBASE-10466. Make sure size calculation on failed flush is right. + // Flush. Bug #1 from HBASE-10466. Make sure size calculation on failed flush is right. try { LOG.info("Flushing"); flushStore(store, id++); @@ -313,8 +310,8 @@ public Object run() throws Exception { } /** - * Verify that compression and data block encoding are respected by the - * createWriter method, used on store flush. + * Verify that compression and data block encoding are respected by the createWriter method, used + * on store flush. */ @Test public void testCreateWriter() throws Exception { @@ -328,9 +325,9 @@ public void testCreateWriter() throws Exception { // Test createWriter StoreFileWriter writer = store.getStoreEngine() - .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(4) - .compression(hcd.getCompressionType()).isCompaction(false).includeMVCCReadpoint(true) - .includesTag(false).shouldDropBehind(false)); + .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(4) + .compression(hcd.getCompressionType()).isCompaction(false).includeMVCCReadpoint(true) + .includesTag(false).shouldDropBehind(false)); Path path = writer.getPath(); writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1))); writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2))); @@ -387,8 +384,8 @@ public void testDeleteExpiredStoreFiles(int minVersions) throws Exception { // Verify the total number of store files assertEquals(storeFileNum, this.store.getStorefiles().size()); - // Each call will find one expired store file and delete it before compaction happens. - // There will be no compaction due to threshold above. Last file will not be replaced. + // Each call will find one expired store file and delete it before compaction happens. + // There will be no compaction due to threshold above. Last file will not be replaced. for (int i = 1; i <= storeFileNum - 1; i++) { // verify the expired store file. assertFalse(this.store.requestCompaction().isPresent()); @@ -430,16 +427,16 @@ public void testLowestModificationTime() throws Exception { int storeFileNum = 4; for (int i = 1; i <= storeFileNum; i++) { - LOG.info("Adding some data for the store file #"+i); - this.store.add(new KeyValue(row, family, qf1, i, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf2, i, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf3, i, (byte[])null), null); + LOG.info("Adding some data for the store file #" + i); + this.store.add(new KeyValue(row, family, qf1, i, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf2, i, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf3, i, (byte[]) null), null); flush(i); } // after flush; check the lowest time stamp long lowestTimeStampFromManager = StoreUtils.getLowestTimestamp(store.getStorefiles()); long lowestTimeStampFromFS = getLowestTimeStampFromFS(fs, store.getStorefiles()); - assertEquals(lowestTimeStampFromManager,lowestTimeStampFromFS); + assertEquals(lowestTimeStampFromManager, lowestTimeStampFromFS); // after compact; check the lowest time stamp store.compact(store.requestCompaction().get(), NoLimitThroughputController.INSTANCE, null); @@ -484,10 +481,10 @@ private static long getLowestTimeStampFromFS(FileSystem fs, public void testEmptyStoreFile() throws IOException { init(this.name.getMethodName()); // Write a store file. - this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null); + this.store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf2, 1, (byte[]) null), null); flush(1); - // Now put in place an empty store file. Its a little tricky. Have to + // Now put in place an empty store file. Its a little tricky. Have to // do manually with hacked in sequence id. HStoreFile f = this.store.getStorefiles().iterator().next(); Path storedir = f.getPath().getParent(); @@ -495,11 +492,8 @@ public void testEmptyStoreFile() throws IOException { Configuration c = HBaseConfiguration.create(); FileSystem fs = FileSystem.get(c); HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build(); - StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), - fs) - .withOutputDir(storedir) - .withFileContext(meta) - .build(); + StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs) + .withOutputDir(storedir).withFileContext(meta).build(); w.appendMetadata(seqid + 1, false); w.close(); this.store.close(); @@ -508,9 +502,7 @@ public void testEmptyStoreFile() throws IOException { new HStore(this.store.getHRegion(), this.store.getColumnFamilyDescriptor(), c, false); assertEquals(2, this.store.getStorefilesCount()); - result = HBaseTestingUtil.getFromStoreFile(store, - get.getRow(), - qualifiers); + result = HBaseTestingUtil.getFromStoreFile(store, get.getRow(), qualifiers); assertEquals(1, result.size()); } @@ -521,19 +513,18 @@ public void testEmptyStoreFile() throws IOException { public void testGet_FromMemStoreOnly() throws IOException { init(this.name.getMethodName()); - //Put data in memstore - this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf3, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf4, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf5, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf6, 1, (byte[])null), null); + // Put data in memstore + this.store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf2, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf3, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf4, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf5, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf6, 1, (byte[]) null), null); - //Get - result = HBaseTestingUtil.getFromStoreFile(store, - get.getRow(), qualifiers); + // Get + result = HBaseTestingUtil.getFromStoreFile(store, get.getRow(), qualifiers); - //Compare + // Compare assertCheck(); } @@ -546,13 +537,13 @@ public void testTimeRangeIfSomeCellsAreDroppedInFlush() throws IOException { private void testTimeRangeIfSomeCellsAreDroppedInFlush(int maxVersion) throws IOException { init(this.name.getMethodName(), TEST_UTIL.getConfiguration(), - ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(maxVersion).build()); + ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(maxVersion).build()); long currentTs = 100; long minTs = currentTs; // the extra cell won't be flushed to disk, // so the min of timerange will be different between memStore and hfile. for (int i = 0; i != (maxVersion + 1); ++i) { - this.store.add(new KeyValue(row, family, qf1, ++currentTs, (byte[])null), null); + this.store.add(new KeyValue(row, family, qf1, ++currentTs, (byte[]) null), null); if (i == 1) { minTs = currentTs; } @@ -575,34 +566,32 @@ private void testTimeRangeIfSomeCellsAreDroppedInFlush(int maxVersion) throws IO public void testGet_FromFilesOnly() throws IOException { init(this.name.getMethodName()); - //Put data in memstore - this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null); - //flush + // Put data in memstore + this.store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf2, 1, (byte[]) null), null); + // flush flush(1); - //Add more data - this.store.add(new KeyValue(row, family, qf3, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf4, 1, (byte[])null), null); - //flush + // Add more data + this.store.add(new KeyValue(row, family, qf3, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf4, 1, (byte[]) null), null); + // flush flush(2); - //Add more data - this.store.add(new KeyValue(row, family, qf5, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf6, 1, (byte[])null), null); - //flush + // Add more data + this.store.add(new KeyValue(row, family, qf5, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf6, 1, (byte[]) null), null); + // flush flush(3); - //Get - result = HBaseTestingUtil.getFromStoreFile(store, - get.getRow(), - qualifiers); - //this.store.get(get, qualifiers, result); + // Get + result = HBaseTestingUtil.getFromStoreFile(store, get.getRow(), qualifiers); + // this.store.get(get, qualifiers, result); - //Need to sort the result since multiple files + // Need to sort the result since multiple files Collections.sort(result, CellComparatorImpl.COMPARATOR); - //Compare + // Compare assertCheck(); } @@ -613,42 +602,41 @@ public void testGet_FromFilesOnly() throws IOException { public void testGet_FromMemStoreAndFiles() throws IOException { init(this.name.getMethodName()); - //Put data in memstore - this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null); - //flush + // Put data in memstore + this.store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf2, 1, (byte[]) null), null); + // flush flush(1); - //Add more data - this.store.add(new KeyValue(row, family, qf3, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf4, 1, (byte[])null), null); - //flush + // Add more data + this.store.add(new KeyValue(row, family, qf3, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf4, 1, (byte[]) null), null); + // flush flush(2); - //Add more data - this.store.add(new KeyValue(row, family, qf5, 1, (byte[])null), null); - this.store.add(new KeyValue(row, family, qf6, 1, (byte[])null), null); + // Add more data + this.store.add(new KeyValue(row, family, qf5, 1, (byte[]) null), null); + this.store.add(new KeyValue(row, family, qf6, 1, (byte[]) null), null); - //Get - result = HBaseTestingUtil.getFromStoreFile(store, - get.getRow(), qualifiers); + // Get + result = HBaseTestingUtil.getFromStoreFile(store, get.getRow(), qualifiers); - //Need to sort the result since multiple files + // Need to sort the result since multiple files Collections.sort(result, CellComparatorImpl.COMPARATOR); - //Compare + // Compare assertCheck(); } private void flush(int storeFilessize) throws IOException { flushStore(store, id++); assertEquals(storeFilessize, this.store.getStorefiles().size()); - assertEquals(0, ((AbstractMemStore)this.store.memstore).getActive().getCellsCount()); + assertEquals(0, ((AbstractMemStore) this.store.memstore).getActive().getCellsCount()); } private void assertCheck() { assertEquals(expected.size(), result.size()); - for(int i=0; i() { @Override public Object run() throws Exception { @@ -695,17 +681,17 @@ public Object run() throws Exception { init(name.getMethodName(), conf); LOG.info("Adding some data"); - store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); - store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null); - store.add(new KeyValue(row, family, qf3, 1, (byte[])null), null); + store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), null); + store.add(new KeyValue(row, family, qf2, 1, (byte[]) null), null); + store.add(new KeyValue(row, family, qf3, 1, (byte[]) null), null); LOG.info("Before flush, we should have no files"); Collection files = - store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); + store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); assertEquals(0, files != null ? files.size() : 0); - //flush + // flush try { LOG.info("Flushing"); flush(1); @@ -725,8 +711,8 @@ public Object run() throws Exception { } /** - * Faulty file system that will fail if you write past its fault position the FIRST TIME - * only; thereafter it will succeed. Used by {@link TestHRegion} too. + * Faulty file system that will fail if you write past its fault position the FIRST TIME only; + * thereafter it will succeed. Used by {@link TestHRegion} too. */ static class FaultyFileSystem extends FilterFileSystem { List> outStreams = new ArrayList<>(); @@ -744,18 +730,18 @@ public FSDataOutputStream create(Path p) throws IOException { } @Override - public FSDataOutputStream create(Path f, FsPermission permission, - boolean overwrite, int bufferSize, short replication, long blockSize, - Progressable progress) throws IOException { - return new FaultyOutputStream(super.create(f, permission, - overwrite, bufferSize, replication, blockSize, progress), faultPos, fault); + public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, + int bufferSize, short replication, long blockSize, Progressable progress) + throws IOException { + return new FaultyOutputStream( + super.create(f, permission, overwrite, bufferSize, replication, blockSize, progress), + faultPos, fault); } @Override - public FSDataOutputStream createNonRecursive(Path f, boolean overwrite, - int bufferSize, short replication, long blockSize, Progressable progress) - throws IOException { - // Fake it. Call create instead. The default implementation throws an IOE + public FSDataOutputStream createNonRecursive(Path f, boolean overwrite, int bufferSize, + short replication, long blockSize, Progressable progress) throws IOException { + // Fake it. Call create instead. The default implementation throws an IOE // that this is not supported. return create(f, overwrite, bufferSize, replication, blockSize, progress); } @@ -766,7 +752,7 @@ static class FaultyOutputStream extends FSDataOutputStream { private final AtomicBoolean fault; public FaultyOutputStream(FSDataOutputStream out, long faultPos, final AtomicBoolean fault) - throws IOException { + throws IOException { super(out, null); this.faultPos = faultPos; this.fault = fault; @@ -798,12 +784,12 @@ private static StoreFlushContext flushStore(HStore store, long id) throws IOExce * Generate a list of KeyValues for testing based on given parameters * @return the rows key-value list */ - private List getKeyValueSet(long[] timestamps, int numRows, - byte[] qualifier, byte[] family) { + private List getKeyValueSet(long[] timestamps, int numRows, byte[] qualifier, + byte[] family) { List kvList = new ArrayList<>(); - for (int i=1;i<=numRows;i++) { + for (int i = 1; i <= numRows; i++) { byte[] b = Bytes.toBytes(i); - for (long timestamp: timestamps) { + for (long timestamp : timestamps) { kvList.add(new KeyValue(b, family, qualifier, timestamp, b)); } } @@ -816,55 +802,54 @@ private List getKeyValueSet(long[] timestamps, int numRows, @Test public void testMultipleTimestamps() throws IOException { int numRows = 1; - long[] timestamps1 = new long[] {1,5,10,20}; - long[] timestamps2 = new long[] {30,80}; + long[] timestamps1 = new long[] { 1, 5, 10, 20 }; + long[] timestamps2 = new long[] { 30, 80 }; init(this.name.getMethodName()); - List kvList1 = getKeyValueSet(timestamps1,numRows, qf1, family); + List kvList1 = getKeyValueSet(timestamps1, numRows, qf1, family); for (Cell kv : kvList1) { this.store.add(kv, null); } flushStore(store, id++); - List kvList2 = getKeyValueSet(timestamps2,numRows, qf1, family); - for(Cell kv : kvList2) { + List kvList2 = getKeyValueSet(timestamps2, numRows, qf1, family); + for (Cell kv : kvList2) { this.store.add(kv, null); } List result; Get get = new Get(Bytes.toBytes(1)); - get.addColumn(family,qf1); + get.addColumn(family, qf1); - get.setTimeRange(0,15); + get.setTimeRange(0, 15); result = HBaseTestingUtil.getFromStoreFile(store, get); - assertTrue(result.size()>0); + assertTrue(result.size() > 0); - get.setTimeRange(40,90); + get.setTimeRange(40, 90); result = HBaseTestingUtil.getFromStoreFile(store, get); - assertTrue(result.size()>0); + assertTrue(result.size() > 0); - get.setTimeRange(10,45); + get.setTimeRange(10, 45); result = HBaseTestingUtil.getFromStoreFile(store, get); - assertTrue(result.size()>0); + assertTrue(result.size() > 0); - get.setTimeRange(80,145); + get.setTimeRange(80, 145); result = HBaseTestingUtil.getFromStoreFile(store, get); - assertTrue(result.size()>0); + assertTrue(result.size() > 0); - get.setTimeRange(1,2); + get.setTimeRange(1, 2); result = HBaseTestingUtil.getFromStoreFile(store, get); - assertTrue(result.size()>0); + assertTrue(result.size() > 0); - get.setTimeRange(90,200); + get.setTimeRange(90, 200); result = HBaseTestingUtil.getFromStoreFile(store, get); - assertTrue(result.size()==0); + assertTrue(result.size() == 0); } /** * Test for HBASE-3492 - Test split on empty colfam (no store files). - * * @throws IOException When the IO operations fail. */ @Test @@ -889,8 +874,9 @@ public void testStoreUsesConfigurationFromHcdAndHtd() throws Exception { // HTD overrides XML. --anyValue; - init(name.getMethodName() + "-htd", conf, TableDescriptorBuilder - .newBuilder(TableName.valueOf(table)).setValue(CONFIG_KEY, Long.toString(anyValue)), + init( + name.getMethodName() + "-htd", conf, TableDescriptorBuilder + .newBuilder(TableName.valueOf(table)).setValue(CONFIG_KEY, Long.toString(anyValue)), ColumnFamilyDescriptorBuilder.of(family)); assertTrue(store.throttleCompaction(anyValue + 1)); assertFalse(store.throttleCompaction(anyValue)); @@ -922,8 +908,7 @@ public void testStoreUsesSearchEngineOverride() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DummyStoreEngine.class.getName()); init(this.name.getMethodName(), conf); - assertEquals(DummyStoreEngine.lastCreatedCompactor, - this.store.storeEngine.getCompactor()); + assertEquals(DummyStoreEngine.lastCreatedCompactor, this.store.storeEngine.getCompactor()); } private void addStoreFile() throws IOException { @@ -933,11 +918,8 @@ private void addStoreFile() throws IOException { Configuration c = TEST_UTIL.getConfiguration(); FileSystem fs = FileSystem.get(c); HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build(); - StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), - fs) - .withOutputDir(storedir) - .withFileContext(fileContext) - .build(); + StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs) + .withOutputDir(storedir).withFileContext(fileContext).build(); w.appendMetadata(seqid + 1, false); w.close(); LOG.info("Added store file:" + w.getPath()); @@ -950,7 +932,8 @@ private void archiveStoreFile(int index) throws IOException { for (int i = 0; i <= index; i++) { sf = it.next(); } - store.getRegionFileSystem().removeStoreFiles(store.getColumnFamilyName(), Lists.newArrayList(sf)); + store.getRegionFileSystem().removeStoreFiles(store.getColumnFamilyName(), + Lists.newArrayList(sf)); } private void closeCompactedFile(int index) throws IOException { @@ -976,7 +959,7 @@ public void testRefreshStoreFiles() throws Exception { assertEquals(0, this.store.getStorefilesCount()); // add some data, flush - this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); + this.store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), null); flush(1); assertEquals(1, this.store.getStorefilesCount()); @@ -1055,22 +1038,19 @@ public void testNumberOfMemStoreScannersAfterFlush() throws IOException { long seqId = 100; long timestamp = EnvironmentEdgeManager.currentTime(); Cell cell0 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) - .setQualifier(qf1).setTimestamp(timestamp).setType(Cell.Type.Put) - .setValue(qf1).build(); + .setQualifier(qf1).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); PrivateCellUtil.setSequenceId(cell0, seqId); testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Collections.emptyList()); Cell cell1 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) - .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put) - .setValue(qf1).build(); + .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); PrivateCellUtil.setSequenceId(cell1, seqId); testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Arrays.asList(cell1)); seqId = 101; timestamp = EnvironmentEdgeManager.currentTime(); Cell cell2 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row2).setFamily(family) - .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put) - .setValue(qf1).build(); + .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put).setValue(qf1).build(); PrivateCellUtil.setSequenceId(cell2, seqId); testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Arrays.asList(cell1, cell2)); } @@ -1108,9 +1088,10 @@ private void testNumberOfMemStoreScannersAfterFlush(List inputCellsBeforeS cellCount += cells.size(); assertEquals(more ? numberOfMemScannersAfterFlush : 0, countMemStoreScanner(s)); } while (more); - assertEquals("The number of cells added before snapshot is " + inputCellsBeforeSnapshot.size() - + ", The number of cells added after snapshot is " + inputCellsAfterSnapshot.size(), - inputCellsBeforeSnapshot.size() + inputCellsAfterSnapshot.size(), cellCount); + assertEquals( + "The number of cells added before snapshot is " + inputCellsBeforeSnapshot.size() + + ", The number of cells added after snapshot is " + inputCellsAfterSnapshot.size(), + inputCellsBeforeSnapshot.size() + inputCellsAfterSnapshot.size(), cellCount); // the current scanners is cleared assertEquals(0, countMemStoreScanner(s)); } @@ -1124,8 +1105,7 @@ private Cell createCell(byte[] qualifier, long ts, long sequenceId, byte[] value private Cell createCell(byte[] row, byte[] qualifier, long ts, long sequenceId, byte[] value) throws IOException { Cell c = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) - .setQualifier(qualifier).setTimestamp(ts).setType(Cell.Type.Put) - .setValue(value).build(); + .setQualifier(qualifier).setTimestamp(ts).setType(Cell.Type.Put).setValue(value).build(); PrivateCellUtil.setSequenceId(c, sequenceId); return c; } @@ -1184,8 +1164,8 @@ public Filter.ReturnCode filterCell(final Cell c) throws IOException { } @Test - public void testFlushBeforeCompletingScanWithFilterHint() throws IOException, - InterruptedException { + public void testFlushBeforeCompletingScanWithFilterHint() + throws IOException, InterruptedException { final AtomicBoolean timeToGetHint = new AtomicBoolean(false); final int expectedSize = 2; testFlushBeforeCompletingScan(new MyListHook() { @@ -1210,6 +1190,7 @@ public Filter.ReturnCode filterCell(final Cell c) throws IOException { return Filter.ReturnCode.INCLUDE; } } + @Override public Cell getNextCellHint(Cell currentCell) throws IOException { return currentCell; @@ -1218,7 +1199,7 @@ public Cell getNextCellHint(Cell currentCell) throws IOException { } private void testFlushBeforeCompletingScan(MyListHook hook, Filter filter, int expectedSize) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Configuration conf = HBaseConfiguration.create(); byte[] r0 = Bytes.toBytes("row0"); byte[] r1 = Bytes.toBytes("row1"); @@ -1251,19 +1232,16 @@ public long getSmallestReadPoint(HStore store) { store.add(createCell(r1, qf2, ts + 3, seqId + 3, value1), memStoreSizing); store.add(createCell(r1, qf3, ts + 3, seqId + 3, value1), memStoreSizing); List myList = new MyList<>(hook); - Scan scan = new Scan() - .withStartRow(r1) - .setFilter(filter); - try (InternalScanner scanner = (InternalScanner) store.getScanner( - scan, null, seqId + 3)){ + Scan scan = new Scan().withStartRow(r1).setFilter(filter); + try (InternalScanner scanner = (InternalScanner) store.getScanner(scan, null, seqId + 3)) { // r1 scanner.next(myList); assertEquals(expectedSize, myList.size()); for (Cell c : myList) { byte[] actualValue = CellUtil.cloneValue(c); - assertTrue("expected:" + Bytes.toStringBinary(value1) - + ", actual:" + Bytes.toStringBinary(actualValue) - , Bytes.equals(actualValue, value1)); + assertTrue("expected:" + Bytes.toStringBinary(value1) + ", actual:" + + Bytes.toStringBinary(actualValue), + Bytes.equals(actualValue, value1)); } List normalList = new ArrayList<>(3); // r2 @@ -1271,9 +1249,9 @@ public long getSmallestReadPoint(HStore store) { assertEquals(3, normalList.size()); for (Cell c : normalList) { byte[] actualValue = CellUtil.cloneValue(c); - assertTrue("expected:" + Bytes.toStringBinary(value2) - + ", actual:" + Bytes.toStringBinary(actualValue) - , Bytes.equals(actualValue, value2)); + assertTrue("expected:" + Bytes.toStringBinary(value2) + ", actual:" + + Bytes.toStringBinary(actualValue), + Bytes.equals(actualValue, value2)); } } } @@ -1295,7 +1273,8 @@ public void testPreventLoopRead() throws Exception { init(name.getMethodName(), conf, TableDescriptorBuilder.newBuilder(TableName.valueOf(table)), ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(10).build(), new MyStoreHook() { - @Override public long getSmallestReadPoint(HStore store) { + @Override + public long getSmallestReadPoint(HStore store) { return seqId + 3; } }); @@ -1312,8 +1291,7 @@ public void testPreventLoopRead() throws Exception { ScannerContext.Builder contextBuilder = ScannerContext.newBuilder(false); // test normal scan, should return all the cells ScannerContext scannerContext = contextBuilder.build(); - try (InternalScanner scanner = (InternalScanner) store.getScanner(scan, null, - seqId + 3)) { + try (InternalScanner scanner = (InternalScanner) store.getScanner(scan, null, seqId + 3)) { scanner.next(myList, scannerContext); assertEquals(6, myList.size()); } @@ -1322,8 +1300,7 @@ public void testPreventLoopRead() throws Exception { edge.incrementTime(10 * 1000); scannerContext = contextBuilder.build(); myList.clear(); - try (InternalScanner scanner = (InternalScanner) store.getScanner(scan, null, - seqId + 3)) { + try (InternalScanner scanner = (InternalScanner) store.getScanner(scan, null, seqId + 3)) { // r0 scanner.next(myList, scannerContext); assertEquals(0, myList.size()); @@ -1331,8 +1308,7 @@ public void testPreventLoopRead() throws Exception { // should scan all non-ttl expired cells by iterative next int resultCells = 0; - try (InternalScanner scanner = (InternalScanner) store.getScanner(scan, null, - seqId + 3)) { + try (InternalScanner scanner = (InternalScanner) store.getScanner(scan, null, seqId + 3)) { boolean hasMore = true; while (hasMore) { myList.clear(); @@ -1342,8 +1318,9 @@ public void testPreventLoopRead() throws Exception { } for (Cell c : myList) { byte[] actualValue = CellUtil.cloneValue(c); - assertTrue("expected:" + Bytes.toStringBinary(value1) + ", actual:" + Bytes - .toStringBinary(actualValue), Bytes.equals(actualValue, value1)); + assertTrue("expected:" + Bytes.toStringBinary(value1) + ", actual:" + + Bytes.toStringBinary(actualValue), + Bytes.equals(actualValue, value1)); } } assertEquals(2, resultCells); @@ -1379,8 +1356,8 @@ public void testCreateScannerAndSnapshotConcurrently() throws IOException, Inter // we get scanner from pipeline and snapshot but they are empty. -- phase (2/5) // this is blocked until we recreate the active memstore -- phase (3/5) // we get scanner from active memstore but it is empty -- phase (5/5) - InternalScanner scanner = (InternalScanner) store.getScanner( - new Scan(new Get(row)), quals, seqId + 1); + InternalScanner scanner = + (InternalScanner) store.getScanner(new Scan(new Get(row)), quals, seqId + 1); service.shutdown(); service.awaitTermination(20, TimeUnit.SECONDS); try { @@ -1390,9 +1367,9 @@ public void testCreateScannerAndSnapshotConcurrently() throws IOException, Inter assertEquals(3, results.size()); for (Cell c : results) { byte[] actualValue = CellUtil.cloneValue(c); - assertTrue("expected:" + Bytes.toStringBinary(value) - + ", actual:" + Bytes.toStringBinary(actualValue) - , Bytes.equals(actualValue, value)); + assertTrue("expected:" + Bytes.toStringBinary(value) + ", actual:" + + Bytes.toStringBinary(actualValue), + Bytes.equals(actualValue, value)); } } finally { scanner.close(); @@ -1408,7 +1385,7 @@ public void testCreateScannerAndSnapshotConcurrently() throws IOException, Inter public void testScanWithDoubleFlush() throws IOException { Configuration conf = HBaseConfiguration.create(); // Initialize region - MyStore myStore = initMyStore(name.getMethodName(), conf, new MyStoreHook(){ + MyStore myStore = initMyStore(name.getMethodName(), conf, new MyStoreHook() { @Override public void getScanners(MyStore store) throws IOException { final long tmpId = id++; @@ -1420,7 +1397,7 @@ public void getScanners(MyStore store) throws IOException { // be clear. // -- phase (4/4) flushStore(store, tmpId); - }catch (IOException ex) { + } catch (IOException ex) { throw new RuntimeException(ex); } }); @@ -1443,8 +1420,8 @@ public void getScanners(MyStore store) throws IOException { myStore.add(createCell(qf3, ts, seqId, oldValue), memStoreSizing); long snapshotId = id++; // push older data into snapshot -- phase (1/4) - StoreFlushContext storeFlushCtx = store.createFlushContext(snapshotId, FlushLifeCycleTracker - .DUMMY); + StoreFlushContext storeFlushCtx = + store.createFlushContext(snapshotId, FlushLifeCycleTracker.DUMMY); storeFlushCtx.prepare(); // insert current data into active -- phase (2/4) @@ -1455,8 +1432,8 @@ public void getScanners(MyStore store) throws IOException { quals.add(qf1); quals.add(qf2); quals.add(qf3); - try (InternalScanner scanner = (InternalScanner) myStore.getScanner( - new Scan(new Get(row)), quals, seqId + 1)) { + try (InternalScanner scanner = + (InternalScanner) myStore.getScanner(new Scan(new Get(row)), quals, seqId + 1)) { // complete the flush -- phase (3/4) storeFlushCtx.flushCache(Mockito.mock(MonitoredTask.class)); storeFlushCtx.commit(Mockito.mock(MonitoredTask.class)); @@ -1466,9 +1443,9 @@ public void getScanners(MyStore store) throws IOException { assertEquals(3, results.size()); for (Cell c : results) { byte[] actualValue = CellUtil.cloneValue(c); - assertTrue("expected:" + Bytes.toStringBinary(currentValue) - + ", actual:" + Bytes.toStringBinary(actualValue) - , Bytes.equals(actualValue, currentValue)); + assertTrue("expected:" + Bytes.toStringBinary(currentValue) + ", actual:" + + Bytes.toStringBinary(actualValue), + Bytes.equals(actualValue, currentValue)); } } } @@ -1487,8 +1464,8 @@ public void testReclaimChunkWhenScaning() throws IOException { quals.add(qf1); quals.add(qf2); quals.add(qf3); - try (InternalScanner scanner = (InternalScanner) store.getScanner( - new Scan(new Get(row)), quals, seqId)) { + try (InternalScanner scanner = + (InternalScanner) store.getScanner(new Scan(new Get(row)), quals, seqId)) { List results = new MyList<>(size -> { switch (size) { // 1) we get the first cell (qf1) @@ -1522,18 +1499,18 @@ public void testReclaimChunkWhenScaning() throws IOException { assertEquals(3, results.size()); for (Cell c : results) { byte[] actualValue = CellUtil.cloneValue(c); - assertTrue("expected:" + Bytes.toStringBinary(value) - + ", actual:" + Bytes.toStringBinary(actualValue) - , Bytes.equals(actualValue, value)); + assertTrue("expected:" + Bytes.toStringBinary(value) + ", actual:" + + Bytes.toStringBinary(actualValue), + Bytes.equals(actualValue, value)); } } } /** - * If there are two running InMemoryFlushRunnable, the later InMemoryFlushRunnable - * may change the versionedList. And the first InMemoryFlushRunnable will use the chagned - * versionedList to remove the corresponding segments. - * In short, there will be some segements which isn't in merge are removed. + * If there are two running InMemoryFlushRunnable, the later InMemoryFlushRunnable may change the + * versionedList. And the first InMemoryFlushRunnable will use the chagned versionedList to remove + * the corresponding segments. In short, there will be some segements which isn't in merge are + * removed. */ @Test public void testRunDoubleMemStoreCompactors() throws IOException, InterruptedException { @@ -1564,9 +1541,9 @@ public void testRunDoubleMemStoreCompactors() throws IOException, InterruptedExc store.add(createCell(qf1, ts + 1, seqId + 1, value), memStoreSizing); store.add(createCell(qf1, ts + 1, seqId + 1, value), memStoreSizing); assertEquals(1, MyCompactingMemStoreWithCustomCompactor.RUNNER_COUNT.get()); - //okay. Let the compaction be completed + // okay. Let the compaction be completed MyMemStoreCompactor.START_COMPACTOR_LATCH.countDown(); - CompactingMemStore mem = (CompactingMemStore) ((HStore)store).memstore; + CompactingMemStore mem = (CompactingMemStore) ((HStore) store).memstore; while (mem.isMemStoreFlushingInMemory()) { TimeUnit.SECONDS.sleep(1); } @@ -1631,8 +1608,8 @@ private MyStore initMyStore(String methodName, Configuration conf, MyStoreHook h private static class MyStore extends HStore { private final MyStoreHook hook; - MyStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration - confParam, MyStoreHook hook, boolean switchToPread) throws IOException { + MyStore(final HRegion region, final ColumnFamilyDescriptor family, + final Configuration confParam, MyStoreHook hook, boolean switchToPread) throws IOException { super(region, family, confParam, false); this.hook = hook; } @@ -1669,7 +1646,8 @@ public void testSwitchingPreadtoStreamParallelyWithCompactionDischarger() throws conf.set("hbase.hstore.engine.class", DummyStoreEngine.class.getName()); conf.setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 0); // Set the lower threshold to invoke the "MERGE" policy - MyStore store = initMyStore(name.getMethodName(), conf, new MyStoreHook() {}); + MyStore store = initMyStore(name.getMethodName(), conf, new MyStoreHook() { + }); MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing(); long ts = EnvironmentEdgeManager.currentTime(); long seqID = 1L; @@ -1734,13 +1712,14 @@ public void testMaxPreadBytesConfiguredToBeLessThanZero() throws Exception { conf.set("hbase.hstore.engine.class", DummyStoreEngine.class.getName()); // Set 'hbase.storescanner.pread.max.bytes' < 0, so that StoreScanner will be a STREAM type. conf.setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, -1); - MyStore store = initMyStore(name.getMethodName(), conf, new MyStoreHook() {}); + MyStore store = initMyStore(name.getMethodName(), conf, new MyStoreHook() { + }); Scan scan = new Scan(); scan.addFamily(family); // ReadType on Scan is still DEFAULT only. assertEquals(ReadType.DEFAULT, scan.getReadType()); - StoreScanner storeScanner = (StoreScanner) store.getScanner(scan, - scan.getFamilyMap().get(family), Long.MAX_VALUE); + StoreScanner storeScanner = + (StoreScanner) store.getScanner(scan, scan.getFamilyMap().get(family), Long.MAX_VALUE); assertFalse(storeScanner.isScanUsePread()); } @@ -1761,20 +1740,20 @@ public void testSpaceQuotaChangeAfterReplacement() throws IOException { // Compacting two files down to one, reducing size sizeStore.put(regionInfo, 1024L + 4096L); - store.updateSpaceQuotaAfterFileReplacement( - sizeStore, regionInfo, Arrays.asList(sf1, sf3), Arrays.asList(sf2)); + store.updateSpaceQuotaAfterFileReplacement(sizeStore, regionInfo, Arrays.asList(sf1, sf3), + Arrays.asList(sf2)); assertEquals(2048L, sizeStore.getRegionSize(regionInfo).getSize()); // The same file length in and out should have no change - store.updateSpaceQuotaAfterFileReplacement( - sizeStore, regionInfo, Arrays.asList(sf2), Arrays.asList(sf2)); + store.updateSpaceQuotaAfterFileReplacement(sizeStore, regionInfo, Arrays.asList(sf2), + Arrays.asList(sf2)); assertEquals(2048L, sizeStore.getRegionSize(regionInfo).getSize()); // Increase the total size used - store.updateSpaceQuotaAfterFileReplacement( - sizeStore, regionInfo, Arrays.asList(sf2), Arrays.asList(sf3)); + store.updateSpaceQuotaAfterFileReplacement(sizeStore, regionInfo, Arrays.asList(sf2), + Arrays.asList(sf3)); assertEquals(4096L, sizeStore.getRegionSize(regionInfo).getSize()); @@ -1789,9 +1768,9 @@ public void testSpaceQuotaChangeAfterReplacement() throws IOException { public void testHFileContextSetWithCFAndTable() throws Exception { init(this.name.getMethodName()); StoreFileWriter writer = store.getStoreEngine() - .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(10000L) - .compression(Compression.Algorithm.NONE).isCompaction(true).includeMVCCReadpoint(true) - .includesTag(false).shouldDropBehind(true)); + .createWriter(CreateStoreFileWriterParams.create().maxKeyCount(10000L) + .compression(Compression.Algorithm.NONE).isCompaction(true).includeMVCCReadpoint(true) + .includesTag(false).shouldDropBehind(true)); HFileContext hFileContext = writer.getHFileWriter().getFileContext(); assertArrayEquals(family, hFileContext.getColumnFamily()); assertArrayEquals(table, hFileContext.getTableName()); @@ -1869,8 +1848,7 @@ public void testCompactingMemStoreNoCellButDataSizeExceedsInmemoryFlushSize() // This test is for HBASE-26210, HBase Write be stuck when there is cell which size exceeds // InmemoryFlushSize @Test(timeout = 60000) - public void testCompactingMemStoreCellExceedInmemoryFlushSize() - throws Exception { + public void testCompactingMemStoreCellExceedInmemoryFlushSize() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.set(HStore.MEMSTORE_CLASS_NAME, MyCompactingMemStore6.class.getName()); @@ -1911,8 +1889,7 @@ public void testCompactingMemStoreWriteLargeCellAndSmallCellConcurrently() (smallCellByteSize, largeCellByteSize) -> smallCellByteSize + largeCellByteSize + 1); } - private void doWriteTestLargeCellAndSmallCellConcurrently( - IntBinaryOperator getFlushByteSize) + private void doWriteTestLargeCellAndSmallCellConcurrently(IntBinaryOperator getFlushByteSize) throws IOException, InterruptedException { Configuration conf = HBaseConfiguration.create(); @@ -1933,7 +1910,6 @@ private void doWriteTestLargeCellAndSmallCellConcurrently( conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.005); conf.set(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, String.valueOf(flushByteSize * 200)); - init(name.getMethodName(), conf, ColumnFamilyDescriptorBuilder.newBuilder(family) .setInMemoryCompaction(MemoryCompactionPolicy.BASIC).build()); @@ -1946,7 +1922,6 @@ private void doWriteTestLargeCellAndSmallCellConcurrently( myCompactingMemStore.flushByteSizeLessThanSmallAndLargeCellSize = false; } - final ThreadSafeMemStoreSizing memStoreSizing = new ThreadSafeMemStoreSizing(); final AtomicLong totalCellByteSize = new AtomicLong(0); final AtomicReference exceptionRef = new AtomicReference(); @@ -2338,22 +2313,23 @@ private T getTypeKeyValueScanner(StoreScanner storeScanner, Class keyValu return resultScanners.get(0); } - @Test + @Test public void testOnConfigurationChange() throws IOException { final int COMMON_MAX_FILES_TO_COMPACT = 10; final int NEW_COMMON_MAX_FILES_TO_COMPACT = 8; final int STORE_MAX_FILES_TO_COMPACT = 6; - //Build a table that its maxFileToCompact different from common configuration. + // Build a table that its maxFileToCompact different from common configuration. Configuration conf = HBaseConfiguration.create(); conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, COMMON_MAX_FILES_TO_COMPACT); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(family) - .setConfiguration(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, - String.valueOf(STORE_MAX_FILES_TO_COMPACT)).build(); + .setConfiguration(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, + String.valueOf(STORE_MAX_FILES_TO_COMPACT)) + .build(); init(this.name.getMethodName(), conf, hcd); - //After updating common configuration, the conf in HStore itself must not be changed. + // After updating common configuration, the conf in HStore itself must not be changed. conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, NEW_COMMON_MAX_FILES_TO_COMPACT); this.store.onConfigurationChange(conf); @@ -2427,8 +2403,7 @@ public void testMemoryLeakWhenFlushMemStoreRetrying() throws Exception { assertTrue(memStoreLAB.chunks.isEmpty()); StoreScanner storeScanner = null; try { - storeScanner = - (StoreScanner) store.getScanner(new Scan(new Get(row)), quals, seqId + 1); + storeScanner = (StoreScanner) store.getScanner(new Scan(new Get(row)), quals, seqId + 1); assertTrue(store.storeEngine.getStoreFileManager().getStorefileCount() == 1); assertTrue(store.memstore.size().getCellsCount() == 0); assertTrue(store.memstore.getSnapshotSize().getCellsCount() == 0); @@ -2447,7 +2422,6 @@ public void testMemoryLeakWhenFlushMemStoreRetrying() throws Exception { } } - static class MyDefaultMemStore1 extends DefaultMemStore { private ImmutableSegment snapshotImmutableSegment; @@ -2510,10 +2484,10 @@ public void testImmutableMemStoreLABRefCnt() throws Exception { final long seqId = 100; final Cell smallCell1 = createCell(qf1, timestamp, seqId, smallValue); final Cell largeCell1 = createCell(qf2, timestamp, seqId, largeValue); - final Cell smallCell2 = createCell(qf3, timestamp, seqId+1, smallValue); - final Cell largeCell2 = createCell(qf4, timestamp, seqId+1, largeValue); - final Cell smallCell3 = createCell(qf5, timestamp, seqId+2, smallValue); - final Cell largeCell3 = createCell(qf6, timestamp, seqId+2, largeValue); + final Cell smallCell2 = createCell(qf3, timestamp, seqId + 1, smallValue); + final Cell largeCell2 = createCell(qf4, timestamp, seqId + 1, largeValue); + final Cell smallCell3 = createCell(qf5, timestamp, seqId + 2, smallValue); + final Cell largeCell3 = createCell(qf6, timestamp, seqId + 2, largeValue); int smallCellByteSize = MutableSegment.getCellLength(smallCell1); int largeCellByteSize = MutableSegment.getCellLength(largeCell1); @@ -2625,8 +2599,9 @@ public void run() { private static class MyMemStoreCompactor extends MemStoreCompactor { private static final AtomicInteger RUNNER_COUNT = new AtomicInteger(0); private static final CountDownLatch START_COMPACTOR_LATCH = new CountDownLatch(1); - public MyMemStoreCompactor(CompactingMemStore compactingMemStore, MemoryCompactionPolicy - compactionPolicy) throws IllegalArgumentIOException { + + public MyMemStoreCompactor(CompactingMemStore compactingMemStore, + MemoryCompactionPolicy compactionPolicy) throws IllegalArgumentIOException { super(compactingMemStore, compactionPolicy); } @@ -2647,6 +2622,7 @@ public boolean start() throws IOException { public static class MyCompactingMemStoreWithCustomCompactor extends CompactingMemStore { private static final AtomicInteger RUNNER_COUNT = new AtomicInteger(0); + public MyCompactingMemStoreWithCustomCompactor(Configuration conf, CellComparatorImpl c, HStore store, RegionServicesForStores regionServices, MemoryCompactionPolicy compactionPolicy) throws IOException { @@ -2676,9 +2652,10 @@ public static class MyCompactingMemStore extends CompactingMemStore { private static final AtomicBoolean START_TEST = new AtomicBoolean(false); private final CountDownLatch getScannerLatch = new CountDownLatch(1); private final CountDownLatch snapshotLatch = new CountDownLatch(1); - public MyCompactingMemStore(Configuration conf, CellComparatorImpl c, - HStore store, RegionServicesForStores regionServices, - MemoryCompactionPolicy compactionPolicy) throws IOException { + + public MyCompactingMemStore(Configuration conf, CellComparatorImpl c, HStore store, + RegionServicesForStores regionServices, MemoryCompactionPolicy compactionPolicy) + throws IOException { super(conf, c, store, regionServices, compactionPolicy); } @@ -2694,6 +2671,7 @@ protected List createList(int capacity) { } return new ArrayList<>(capacity); } + @Override protected void pushActiveToPipeline(MutableSegment active, boolean checkEmpty) { if (START_TEST.get()) { @@ -2718,26 +2696,40 @@ interface MyListHook { private static class MyList implements List { private final List delegatee = new ArrayList<>(); private final MyListHook hookAtAdd; + MyList(final MyListHook hookAtAdd) { this.hookAtAdd = hookAtAdd; } + @Override - public int size() {return delegatee.size();} + public int size() { + return delegatee.size(); + } @Override - public boolean isEmpty() {return delegatee.isEmpty();} + public boolean isEmpty() { + return delegatee.isEmpty(); + } @Override - public boolean contains(Object o) {return delegatee.contains(o);} + public boolean contains(Object o) { + return delegatee.contains(o); + } @Override - public Iterator iterator() {return delegatee.iterator();} + public Iterator iterator() { + return delegatee.iterator(); + } @Override - public Object[] toArray() {return delegatee.toArray();} + public Object[] toArray() { + return delegatee.toArray(); + } @Override - public R[] toArray(R[] a) {return delegatee.toArray(a);} + public R[] toArray(R[] a) { + return delegatee.toArray(a); + } @Override public boolean add(T e) { @@ -2746,52 +2738,84 @@ public boolean add(T e) { } @Override - public boolean remove(Object o) {return delegatee.remove(o);} + public boolean remove(Object o) { + return delegatee.remove(o); + } @Override - public boolean containsAll(Collection c) {return delegatee.containsAll(c);} + public boolean containsAll(Collection c) { + return delegatee.containsAll(c); + } @Override - public boolean addAll(Collection c) {return delegatee.addAll(c);} + public boolean addAll(Collection c) { + return delegatee.addAll(c); + } @Override - public boolean addAll(int index, Collection c) {return delegatee.addAll(index, c);} + public boolean addAll(int index, Collection c) { + return delegatee.addAll(index, c); + } @Override - public boolean removeAll(Collection c) {return delegatee.removeAll(c);} + public boolean removeAll(Collection c) { + return delegatee.removeAll(c); + } @Override - public boolean retainAll(Collection c) {return delegatee.retainAll(c);} + public boolean retainAll(Collection c) { + return delegatee.retainAll(c); + } @Override - public void clear() {delegatee.clear();} + public void clear() { + delegatee.clear(); + } @Override - public T get(int index) {return delegatee.get(index);} + public T get(int index) { + return delegatee.get(index); + } @Override - public T set(int index, T element) {return delegatee.set(index, element);} + public T set(int index, T element) { + return delegatee.set(index, element); + } @Override - public void add(int index, T element) {delegatee.add(index, element);} + public void add(int index, T element) { + delegatee.add(index, element); + } @Override - public T remove(int index) {return delegatee.remove(index);} + public T remove(int index) { + return delegatee.remove(index); + } @Override - public int indexOf(Object o) {return delegatee.indexOf(o);} + public int indexOf(Object o) { + return delegatee.indexOf(o); + } @Override - public int lastIndexOf(Object o) {return delegatee.lastIndexOf(o);} + public int lastIndexOf(Object o) { + return delegatee.lastIndexOf(o); + } @Override - public ListIterator listIterator() {return delegatee.listIterator();} + public ListIterator listIterator() { + return delegatee.listIterator(); + } @Override - public ListIterator listIterator(int index) {return delegatee.listIterator(index);} + public ListIterator listIterator(int index) { + return delegatee.listIterator(index); + } @Override - public List subList(int fromIndex, int toIndex) {return delegatee.subList(fromIndex, toIndex);} + public List subList(int fromIndex, int toIndex) { + return delegatee.subList(fromIndex, toIndex); + } } public static class MyCompactingMemStore2 extends CompactingMemStore { @@ -3297,8 +3321,7 @@ public static class MyDefaultMemStore extends DefaultMemStore { private volatile HStore store = null; public MyDefaultMemStore(Configuration conf, CellComparator cellComparator, - RegionServicesForStores regionServices) - throws IOException { + RegionServicesForStores regionServices) throws IOException { super(conf, cellComparator, regionServices); } @@ -3331,7 +3354,6 @@ protected List getSnapshotSegments() { return result; } - @Override protected void doClearSnapShot() { if (Thread.currentThread().getName().equals(FLUSH_THREAD_NAME)) { @@ -3339,7 +3361,7 @@ protected void doClearSnapShot() { if (currentCount == 1) { try { if (((ReentrantReadWriteLock) store.getStoreEngine().getLock()) - .isWriteLockedByCurrentThread()) { + .isWriteLockedByCurrentThread()) { shouldWait = false; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java index 2433af42b5c0..6a15ffa1c0b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -105,7 +105,7 @@ public class TestHStoreFile { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHStoreFile.class); + HBaseClassTestRule.forClass(TestHStoreFile.class); private static final Logger LOG = LoggerFactory.getLogger(TestHStoreFile.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -143,7 +143,7 @@ public static void tearDownAfterClass() { @Test public void testBasicHalfAndHFileLinkMapFile() throws Exception { final RegionInfo hri = - RegionInfoBuilder.newBuilder(TableName.valueOf("testBasicHalfAndHFileLinkMapFile")).build(); + RegionInfoBuilder.newBuilder(TableName.valueOf("testBasicHalfAndHFileLinkMapFile")).build(); // The locations of HFileLink refers hfiles only should be consistent with the table dir // create by CommonFSUtils directory, so we should make the region directory under // the mode of CommonFSUtils.getTableDir here. @@ -152,7 +152,7 @@ public void testBasicHalfAndHFileLinkMapFile() throws Exception { HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build(); StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) - .withFilePath(regionFs.createTempName()).withFileContext(meta).build(); + .withFilePath(regionFs.createTempName()).withFileContext(meta).build(); writeStoreFile(writer); Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); @@ -174,7 +174,7 @@ private void writeStoreFile(final StoreFileWriter writer) throws IOException { * @throws IOException */ public static void writeStoreFile(final StoreFileWriter writer, byte[] fam, byte[] qualifier) - throws IOException { + throws IOException { long now = EnvironmentEdgeManager.currentTime(); try { for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) { @@ -195,14 +195,14 @@ public static void writeStoreFile(final StoreFileWriter writer, byte[] fam, byte @Test public void testReference() throws IOException { final RegionInfo hri = - RegionInfoBuilder.newBuilder(TableName.valueOf("testReferenceTb")).build(); + RegionInfoBuilder.newBuilder(TableName.valueOf("testReferenceTb")).build(); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri); HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) - .withFilePath(regionFs.createTempName()).withFileContext(meta).build(); + .withFilePath(regionFs.createTempName()).withFileContext(meta).build(); writeStoreFile(writer); Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); @@ -241,14 +241,14 @@ public void testReference() throws IOException { @Test public void testStoreFileReference() throws Exception { final RegionInfo hri = - RegionInfoBuilder.newBuilder(TableName.valueOf("testStoreFileReference")).build(); + RegionInfoBuilder.newBuilder(TableName.valueOf("testStoreFileReference")).build(); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri); HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) - .withFilePath(regionFs.createTempName()).withFileContext(meta).build(); + .withFilePath(regionFs.createTempName()).withFileContext(meta).build(); writeStoreFile(writer); Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); writer.close(); @@ -258,7 +258,7 @@ public void testStoreFileReference() throws Exception { StoreFileReader r = file.getReader(); assertNotNull(r); StoreFileScanner scanner = - new StoreFileScanner(r, mock(HFileScanner.class), false, false, 0, 0, false); + new StoreFileScanner(r, mock(HFileScanner.class), false, false, 0, 0, false); // Verify after instantiating scanner refCount is increased assertTrue("Verify file is being referenced", file.isReferencedInReads()); @@ -275,7 +275,7 @@ public void testEmptyStoreFileRestrictKeyRanges() throws Exception { ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(cf); when(store.getColumnFamilyDescriptor()).thenReturn(cfd); try (StoreFileScanner scanner = - new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true)) { + new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true)) { Scan scan = new Scan(); scan.setColumnFamilyTimeRange(cf, 0, 1); assertFalse(scanner.shouldUseScanner(scan, store, 0)); @@ -285,7 +285,7 @@ public void testEmptyStoreFileRestrictKeyRanges() throws Exception { @Test public void testHFileLink() throws IOException { final RegionInfo hri = - RegionInfoBuilder.newBuilder(TableName.valueOf("testHFileLinkTb")).build(); + RegionInfoBuilder.newBuilder(TableName.valueOf("testHFileLinkTb")).build(); // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/ Configuration testConf = new Configuration(this.conf); CommonFSUtils.setRootDir(testConf, testDir); @@ -295,14 +295,14 @@ public void testHFileLink() throws IOException { // Make a store file and write data to it. StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) - .withFilePath(regionFs.createTempName()).withFileContext(meta).build(); + .withFilePath(regionFs.createTempName()).withFileContext(meta).build(); writeStoreFile(writer); Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY)); HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName()); Path linkFilePath = - new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName())); + new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName())); // Try to open store file from link StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath, true); @@ -338,7 +338,7 @@ public void testReferenceToHFileLink() throws IOException { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. //// StoreFileWriter writer = new StoreFileWriter.Builder(testConf, cacheConf, this.fs) - .withFilePath(regionFs.createTempName()).withFileContext(meta).build(); + .withFilePath(regionFs.createTempName()).withFileContext(meta).build(); writeStoreFile(writer); Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); @@ -349,14 +349,14 @@ public void testReferenceToHFileLink() throws IOException { Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY); HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName()); Path linkFilePath = - new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName())); + new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName())); // create splits of the link. // /clone/splitA//, // /clone/splitB// RegionInfo splitHriA = RegionInfoBuilder.newBuilder(hri.getTable()).setEndKey(SPLITKEY).build(); RegionInfo splitHriB = - RegionInfoBuilder.newBuilder(hri.getTable()).setStartKey(SPLITKEY).build(); + RegionInfoBuilder.newBuilder(hri.getTable()).setStartKey(SPLITKEY).build(); HStoreFile f = new HStoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE, true); f.initReader(); Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top @@ -400,7 +400,7 @@ public void testReferenceToHFileLink() throws IOException { } private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f) - throws IOException { + throws IOException { f.initReader(); Cell midkey = f.getReader().midKey().get(); KeyValue midKV = (KeyValue) midkey; @@ -408,12 +408,12 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f // in the children byte[] midRow = CellUtil.cloneRow(midKV); // Create top split. - RegionInfo topHri = - RegionInfoBuilder.newBuilder(regionFs.getRegionInfo().getTable()).setEndKey(SPLITKEY).build(); + RegionInfo topHri = RegionInfoBuilder.newBuilder(regionFs.getRegionInfo().getTable()) + .setEndKey(SPLITKEY).build(); Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true); // Create bottom split. RegionInfo bottomHri = RegionInfoBuilder.newBuilder(regionFs.getRegionInfo().getTable()) - .setStartKey(SPLITKEY).build(); + .setStartKey(SPLITKEY).build(); Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false); // Make readers on top and bottom. HStoreFile topF = new HStoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, true); @@ -433,8 +433,8 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f boolean first = true; ByteBuffer key = null; HFileScanner topScanner = top.getScanner(false, false); - while ((!topScanner.isSeeked() && topScanner.seekTo()) || - (topScanner.isSeeked() && topScanner.next())) { + while ((!topScanner.isSeeked() && topScanner.seekTo()) + || (topScanner.isSeeked() && topScanner.next())) { key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey()); if ((PrivateCellUtil.compare(topScanner.getReader().getComparator(), midKV, key.array(), @@ -493,7 +493,7 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("First top when key < bottom: " + keyKV); String tmp = - Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); + Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); for (int i = 0; i < tmp.length(); i++) { assertTrue(tmp.charAt(i) == 'a'); } @@ -536,7 +536,7 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f LOG.info("Last bottom when key > top: " + keyKV); for (int i = 0; i < tmp.length(); i++) { assertTrue(Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()) - .charAt(i) == 'z'); + .charAt(i) == 'z'); } } finally { if (top != null) { @@ -550,7 +550,7 @@ private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f } private static StoreFileScanner getStoreFileScanner(StoreFileReader reader, boolean cacheBlocks, - boolean pread) { + boolean pread) { return reader.getStoreFileScanner(cacheBlocks, pread, false, 0, 0, false); } @@ -563,7 +563,7 @@ private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Except for (int i = 0; i < 2000; i += 2) { String row = String.format(localFormatter, i); KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("col"), - now, Bytes.toBytes("value")); + now, Bytes.toBytes("value")); writer.append(kv); } writer.close(); @@ -571,7 +571,7 @@ private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Except ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build(); HFileInfo fileInfo = new HFileInfo(context, conf); StoreFileReader reader = - new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf); + new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf); fileInfo.initMetaAndIndex(reader.getHFileReader()); reader.loadFileInfo(); reader.loadBloomfilter(); @@ -589,7 +589,7 @@ private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Except scan.addColumn(Bytes.toBytes("family"), Bytes.toBytes("family:col")); HStore store = mock(HStore.class); when(store.getColumnFamilyDescriptor()) - .thenReturn(ColumnFamilyDescriptorBuilder.of("family")); + .thenReturn(ColumnFamilyDescriptorBuilder.of("family")); boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE); if (i % 2 == 0) { if (!exists) { @@ -605,8 +605,9 @@ private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Except fs.delete(f, true); assertEquals("False negatives: " + falseNeg, 0, falseNeg); int maxFalsePos = (int) (2 * 2000 * err); - assertTrue("Too many false positives: " + falsePos + " (err=" + err + - ", expected no more than " + maxFalsePos + ")", falsePos <= maxFalsePos); + assertTrue("Too many false positives: " + falsePos + " (err=" + err + ", expected no more than " + + maxFalsePos + ")", + falsePos <= maxFalsePos); } private static final int BLOCKSIZE_SMALL = 8192; @@ -619,10 +620,10 @@ public void testBloomFilter() throws Exception { // write the file Path f = new Path(ROOT_DIR, name.getMethodName()); HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL) - .withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build(); + .withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f) - .withBloomType(BloomType.ROW).withMaxKeyCount(2000).withFileContext(meta).build(); + .withBloomType(BloomType.ROW).withMaxKeyCount(2000).withFileContext(meta).build(); bloomWriteRead(writer, fs); } @@ -636,17 +637,17 @@ public void testDeleteFamilyBloomFilter() throws Exception { Path f = new Path(ROOT_DIR, name.getMethodName()); HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL) - .withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build(); + .withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f) - .withMaxKeyCount(2000).withFileContext(meta).build(); + .withMaxKeyCount(2000).withFileContext(meta).build(); // add delete family long now = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < 2000; i += 2) { String row = String.format(localFormatter, i); KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("col"), - now, KeyValue.Type.DeleteFamily, Bytes.toBytes("value")); + now, KeyValue.Type.DeleteFamily, Bytes.toBytes("value")); writer.append(kv); } writer.close(); @@ -654,7 +655,7 @@ public void testDeleteFamilyBloomFilter() throws Exception { ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build(); HFileInfo fileInfo = new HFileInfo(context, conf); StoreFileReader reader = - new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf); + new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf); fileInfo.initMetaAndIndex(reader.getHFileReader()); reader.loadFileInfo(); reader.loadBloomfilter(); @@ -681,8 +682,9 @@ public void testDeleteFamilyBloomFilter() throws Exception { fs.delete(f, true); assertEquals("False negatives: " + falseNeg, 0, falseNeg); int maxFalsePos = (int) (2 * 2000 * err); - assertTrue("Too many false positives: " + falsePos + " (err=" + err + - ", expected no more than " + maxFalsePos, falsePos <= maxFalsePos); + assertTrue("Too many false positives: " + falsePos + " (err=" + err + ", expected no more than " + + maxFalsePos, + falsePos <= maxFalsePos); } /** @@ -695,7 +697,7 @@ public void testReseek() throws Exception { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f) - .withFileContext(meta).build(); + .withFileContext(meta).build(); writeStoreFile(writer); writer.close(); @@ -703,7 +705,7 @@ public void testReseek() throws Exception { ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build(); HFileInfo fileInfo = new HFileInfo(context, conf); StoreFileReader reader = - new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf); + new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf); fileInfo.initMetaAndIndex(reader.getHFileReader()); // Now do reseek with empty KV to position to the beginning of the file @@ -739,10 +741,10 @@ public void testBloomTypes() throws Exception { // write the file Path f = new Path(ROOT_DIR, name.getMethodName() + x); HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL) - .withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build(); + .withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f) - .withBloomType(bt[x]).withMaxKeyCount(expKeys[x]).withFileContext(meta).build(); + .withBloomType(bt[x]).withMaxKeyCount(expKeys[x]).withFileContext(meta).build(); long now = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < rowCount * 2; i += 2) { // rows @@ -751,19 +753,19 @@ public void testBloomTypes() throws Exception { String col = String.format(localFormatter, j); for (int k = 0; k < versions; ++k) { // versions KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), - Bytes.toBytes("col" + col), now - k, Bytes.toBytes(-1L)); + Bytes.toBytes("col" + col), now - k, Bytes.toBytes(-1L)); writer.append(kv); } } } writer.close(); - ReaderContext context = - new ReaderContextBuilder().withFilePath(f).withFileSize(fs.getFileStatus(f).getLen()) - .withFileSystem(fs).withInputStreamWrapper(new FSDataInputStreamWrapper(fs, f)).build(); + ReaderContext context = new ReaderContextBuilder().withFilePath(f) + .withFileSize(fs.getFileStatus(f).getLen()).withFileSystem(fs) + .withInputStreamWrapper(new FSDataInputStreamWrapper(fs, f)).build(); HFileInfo fileInfo = new HFileInfo(context, conf); StoreFileReader reader = - new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf); + new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf); fileInfo.initMetaAndIndex(reader.getHFileReader()); reader.loadFileInfo(); reader.loadBloomfilter(); @@ -772,7 +774,7 @@ public void testBloomTypes() throws Exception { HStore store = mock(HStore.class); when(store.getColumnFamilyDescriptor()) - .thenReturn(ColumnFamilyDescriptorBuilder.of("family")); + .thenReturn(ColumnFamilyDescriptorBuilder.of("family")); // check false positives rate int falsePos = 0; int falseNeg = 0; @@ -784,7 +786,7 @@ public void testBloomTypes() throws Exception { columns.add(Bytes.toBytes("col" + col)); Scan scan = - new Scan().withStartRow(Bytes.toBytes(row)).withStopRow(Bytes.toBytes(row), true); + new Scan().withStartRow(Bytes.toBytes(row)).withStopRow(Bytes.toBytes(row), true); scan.addColumn(Bytes.toBytes("family"), Bytes.toBytes(("col" + col))); boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE); @@ -838,7 +840,7 @@ private void assertOrdering(Comparator comparator, HStoreFil * Create a mock StoreFile with the given attributes. */ private HStoreFile mockStoreFile(boolean bulkLoad, long size, long bulkTimestamp, long seqId, - String path) { + String path) { HStoreFile mock = Mockito.mock(HStoreFile.class); StoreFileReader reader = Mockito.mock(StoreFileReader.class); @@ -849,8 +851,8 @@ private HStoreFile mockStoreFile(boolean bulkLoad, long size, long bulkTimestamp Mockito.doReturn(OptionalLong.of(bulkTimestamp)).when(mock).getBulkLoadTimestamp(); Mockito.doReturn(seqId).when(mock).getMaxSequenceId(); Mockito.doReturn(new Path(path)).when(mock).getPath(); - String name = "mock storefile, bulkLoad=" + bulkLoad + " bulkTimestamp=" + bulkTimestamp + - " seqId=" + seqId + " path=" + path; + String name = "mock storefile, bulkLoad=" + bulkLoad + " bulkTimestamp=" + bulkTimestamp + + " seqId=" + seqId + " path=" + path; Mockito.doReturn(name).when(mock).toString(); return mock; } @@ -889,7 +891,7 @@ public void testMultipleTimestamps() throws IOException { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) - .withOutputDir(dir).withFileContext(meta).build(); + .withOutputDir(dir).withFileContext(meta).build(); List kvList = getKeyValueSet(timestamps, numRows, qualifier, family); @@ -900,7 +902,7 @@ public void testMultipleTimestamps() throws IOException { writer.close(); HStoreFile hsf = - new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); + new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); HStore store = mock(HStore.class); when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of(family)); hsf.initReader(); @@ -955,7 +957,7 @@ public void testCacheOnWriteEvictOnClose() throws Exception { Path pathCowOff = new Path(baseDir, "123456789"); StoreFileWriter writer = writeStoreFile(conf, cacheConf, pathCowOff, 3); HStoreFile hsf = - new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); + new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); LOG.debug(hsf.getPath().toString()); // Read this file, we should see 3 misses @@ -1060,8 +1062,8 @@ public void testCacheOnWriteEvictOnClose() throws Exception { } private Path splitStoreFile(final HRegionFileSystem regionFs, final RegionInfo hri, - final String family, final HStoreFile sf, final byte[] splitKey, boolean isTopRef) - throws IOException { + final String family, final HStoreFile sf, final byte[] splitKey, boolean isTopRef) + throws IOException { Path path = regionFs.splitStoreFile(hri, family, sf, splitKey, isTopRef, null); if (null == path) { return null; @@ -1075,14 +1077,14 @@ private Path splitStoreFile(final HRegionFileSystem regionFs, final RegionInfo h TableDescriptors mockTblDescs = mock(TableDescriptors.class); when(mockServices.getTableDescriptors()).thenReturn(mockTblDescs); TableDescriptor mockTblDesc = TableDescriptorBuilder.newBuilder(hri.getTable()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); when(mockTblDescs.get(any())).thenReturn(mockTblDesc); Path regionDir = regionFs.commitDaughterRegion(hri, splitFiles, mockEnv); return new Path(new Path(regionDir, family), path.getName()); } private StoreFileWriter writeStoreFile(Configuration conf, CacheConfig cacheConf, Path path, - int numBlocks) throws IOException { + int numBlocks) throws IOException { // Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs int numKVs = 5 * numBlocks; List kvs = new ArrayList<>(numKVs); @@ -1096,10 +1098,10 @@ private StoreFileWriter writeStoreFile(Configuration conf, CacheConfig cacheConf } int blockSize = totalSize / numBlocks; HFileContext meta = new HFileContextBuilder().withBlockSize(blockSize).withChecksumType(CKTYPE) - .withBytesPerCheckSum(CKBYTES).build(); + .withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) - .withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build(); + .withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build(); // We'll write N-1 KVs to ensure we don't write an extra block kvs.remove(kvs.size() - 1); for (KeyValue kv : kvs) { @@ -1122,15 +1124,15 @@ public void testDataBlockEncodingMetaData() throws IOException { DataBlockEncoding dataBlockEncoderAlgo = DataBlockEncoding.FAST_DIFF; cacheConf = new CacheConfig(conf); HFileContext meta = - new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE) - .withBytesPerCheckSum(CKBYTES).withDataBlockEncoding(dataBlockEncoderAlgo).build(); + new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE) + .withBytesPerCheckSum(CKBYTES).withDataBlockEncoding(dataBlockEncoderAlgo).build(); // Make a store file and write data to it. StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) - .withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build(); + .withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build(); writer.close(); HStoreFile storeFile = - new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); + new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true); storeFile.initReader(); StoreFileReader reader = storeFile.getReader(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java index d17e30107f52..c3f001990e3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestHdfsSnapshotHRegion { @ClassRule @@ -53,7 +53,6 @@ public class TestHdfsSnapshotHRegion { private DFSClient client; private String baseDir; - @Before public void setUp() throws Exception { Configuration c = TEST_UTIL.getConfiguration(); @@ -79,8 +78,8 @@ public void tearDown() throws Exception { @Test public void testOpeningReadOnlyRegionBasic() throws Exception { String snapshotDir = client.createSnapshot(baseDir, SNAPSHOT_NAME); - RegionInfo firstRegion = TEST_UTIL.getConnection().getRegionLocator( - table.getName()).getAllRegionLocations().stream().findFirst().get().getRegion(); + RegionInfo firstRegion = TEST_UTIL.getConnection().getRegionLocator(table.getName()) + .getAllRegionLocations().stream().findFirst().get().getRegion(); Path tableDir = CommonFSUtils.getTableDir(new Path(snapshotDir), TABLE_NAME); HRegion snapshottedRegion = openSnapshotRegion(firstRegion, tableDir); Assert.assertNotNull(snapshottedRegion); @@ -90,17 +89,17 @@ public void testOpeningReadOnlyRegionBasic() throws Exception { @Test public void testSnapshottingWithTmpSplitsAndMergeDirectoriesPresent() throws Exception { // lets get a region and create those directories and make sure we ignore them - RegionInfo firstRegion = TEST_UTIL.getConnection().getRegionLocator( - table.getName()).getAllRegionLocations().stream().findFirst().get().getRegion(); + RegionInfo firstRegion = TEST_UTIL.getConnection().getRegionLocator(table.getName()) + .getAllRegionLocations().stream().findFirst().get().getRegion(); String encodedName = firstRegion.getEncodedName(); Path tableDir = CommonFSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLE_NAME); Path regionDirectoryPath = new Path(tableDir, encodedName); - TEST_UTIL.getTestFileSystem().create( - new Path(regionDirectoryPath, HRegionFileSystem.REGION_TEMP_DIR)); - TEST_UTIL.getTestFileSystem().create( - new Path(regionDirectoryPath, HRegionFileSystem.REGION_SPLITS_DIR)); - TEST_UTIL.getTestFileSystem().create( - new Path(regionDirectoryPath, HRegionFileSystem.REGION_MERGES_DIR)); + TEST_UTIL.getTestFileSystem() + .create(new Path(regionDirectoryPath, HRegionFileSystem.REGION_TEMP_DIR)); + TEST_UTIL.getTestFileSystem() + .create(new Path(regionDirectoryPath, HRegionFileSystem.REGION_SPLITS_DIR)); + TEST_UTIL.getTestFileSystem() + .create(new Path(regionDirectoryPath, HRegionFileSystem.REGION_MERGES_DIR)); // now snapshot String snapshotDir = client.createSnapshot(baseDir, "foo_snapshot"); // everything should still open just fine @@ -111,12 +110,7 @@ public void testSnapshottingWithTmpSplitsAndMergeDirectoriesPresent() throws Exc } private HRegion openSnapshotRegion(RegionInfo firstRegion, Path tableDir) throws IOException { - return HRegion.openReadOnlyFileSystemHRegion( - TEST_UTIL.getConfiguration(), - TEST_UTIL.getTestFileSystem(), - tableDir, - firstRegion, - table.getDescriptor() - ); + return HRegion.openReadOnlyFileSystemHRegion(TEST_UTIL.getConfiguration(), + TEST_UTIL.getTestFileSystem(), tableDir, firstRegion, table.getDescriptor()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index 626048b487d6..2fa32692b37e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,11 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.Iterator; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.ChoreService; @@ -55,7 +55,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestHeapMemoryManager { @ClassRule @@ -74,8 +74,7 @@ public void testAutoTunerShouldBeOffWhenMaxMinRangesForMemstoreIsNotGiven() thro conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.03f); RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf); HeapMemoryManager manager = new HeapMemoryManager(new BlockCacheStub(0), - new MemstoreFlusherStub(0), new RegionServerStub(conf), - regionServerAccounting); + new MemstoreFlusherStub(0), new RegionServerStub(conf), regionServerAccounting); assertFalse(manager.isTunerOn()); } @@ -87,8 +86,7 @@ public void testAutoTunerShouldBeOffWhenMaxMinRangesForBlockCacheIsNotGiven() th conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.03f); RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf); HeapMemoryManager manager = new HeapMemoryManager(new BlockCacheStub(0), - new MemstoreFlusherStub(0), new RegionServerStub(conf), - regionServerAccounting); + new MemstoreFlusherStub(0), new RegionServerStub(conf), regionServerAccounting); assertFalse(manager.isTunerOn()); } @@ -101,8 +99,8 @@ public void testWhenMemstoreAndBlockCacheMaxMinChecksFails() throws Exception { RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf); MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub(0); try { - new HeapMemoryManager(blockCache, memStoreFlusher, - new RegionServerStub(conf), regionServerAccounting); + new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf), + regionServerAccounting); fail(); } catch (RuntimeException e) { } @@ -110,8 +108,8 @@ public void testWhenMemstoreAndBlockCacheMaxMinChecksFails() throws Exception { conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.2f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f); try { - new HeapMemoryManager(blockCache, memStoreFlusher, - new RegionServerStub(conf), regionServerAccounting); + new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf), + regionServerAccounting); fail(); } catch (RuntimeException e) { } @@ -127,8 +125,7 @@ public void testWhenClusterIsWriteHeavyWithEmptyMemstore() throws Exception { conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000); BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4)); RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf); - MemstoreFlusherStub memStoreFlusher = - new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); + MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); // Empty block cache and memstore blockCache.setTestBlockSize(0); regionServerAccounting.setTestMemstoreSize(0); @@ -165,8 +162,7 @@ public void testHeapMemoryManagerWhenOffheapFlushesHappenUnderReadHeavyCase() th conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0); RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf, true); - MemstoreFlusherStub memStoreFlusher = - new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); + MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); // Empty memstore and but nearly filled block cache blockCache.setTestBlockSize((long) (maxHeapSize * 0.4 * 0.8)); regionServerAccounting.setTestMemstoreSize(0); @@ -301,8 +297,7 @@ public void testWhenClusterIsWriteHeavy() throws Exception { conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0); RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf); - MemstoreFlusherStub memStoreFlusher = - new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); + MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); // Empty block cache and but nearly filled memstore blockCache.setTestBlockSize(0); regionServerAccounting.setTestMemstoreSize((long) (maxHeapSize * 0.4 * 0.8)); @@ -321,9 +316,9 @@ public void testWhenClusterIsWriteHeavy() throws Exception { // Allow the tuner to run once and do necessary memory up waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize); assertHeapSpaceDelta(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE, oldMemstoreHeapSize, - memStoreFlusher.memstoreSize); + memStoreFlusher.memstoreSize); assertHeapSpaceDelta(-(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE), oldBlockCacheSize, - blockCache.maxSize); + blockCache.maxSize); oldMemstoreHeapSize = memStoreFlusher.memstoreSize; oldBlockCacheSize = blockCache.maxSize; // Do some more flushes before the next run of HeapMemoryTuner @@ -333,9 +328,9 @@ public void testWhenClusterIsWriteHeavy() throws Exception { // Allow the tuner to run once and do necessary memory up waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize); assertHeapSpaceDelta(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE, oldMemstoreHeapSize, - memStoreFlusher.memstoreSize); + memStoreFlusher.memstoreSize); assertHeapSpaceDelta(-(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE), oldBlockCacheSize, - blockCache.maxSize); + blockCache.maxSize); } @Test @@ -349,8 +344,7 @@ public void testWhenClusterIsWriteHeavyWithOffheapMemstore() throws Exception { conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0); RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf); - MemstoreFlusherStub memStoreFlusher = - new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); + MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); // Empty block cache and but nearly filled memstore blockCache.setTestBlockSize(0); regionServerAccounting.setTestMemstoreSize((long) (maxHeapSize * 0.4 * 0.8)); @@ -386,8 +380,7 @@ public void testWhenClusterIsReadHeavy() throws Exception { conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0); RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf); - MemstoreFlusherStub memStoreFlusher = - new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); + MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); // Empty memstore and but nearly filled block cache blockCache.setTestBlockSize((long) (maxHeapSize * 0.4 * 0.8)); regionServerAccounting.setTestMemstoreSize(0); @@ -397,10 +390,11 @@ public void testWhenClusterIsReadHeavy() throws Exception { long oldMemstoreHeapSize = memStoreFlusher.memstoreSize; long oldBlockCacheSize = blockCache.maxSize; long oldMemstoreLowerMarkSize = 7 * oldMemstoreHeapSize / 10; - long maxTuneSize = oldMemstoreHeapSize - (oldMemstoreLowerMarkSize + oldMemstoreHeapSize) / 2; + long maxTuneSize = oldMemstoreHeapSize - (oldMemstoreLowerMarkSize + oldMemstoreHeapSize) / 2; float maxStepValue = (maxTuneSize * 1.0f) / oldMemstoreHeapSize; - maxStepValue = maxStepValue > DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE ? - DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE:maxStepValue; + maxStepValue = maxStepValue > DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE + ? DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE + : maxStepValue; final ChoreService choreService = new ChoreService("TEST_SERVER_NAME"); heapMemoryManager.start(choreService); blockCache.evictBlock(null); @@ -413,10 +407,11 @@ public void testWhenClusterIsReadHeavy() throws Exception { oldMemstoreHeapSize = memStoreFlusher.memstoreSize; oldBlockCacheSize = blockCache.maxSize; oldMemstoreLowerMarkSize = 7 * oldMemstoreHeapSize / 10; - maxTuneSize = oldMemstoreHeapSize - (oldMemstoreLowerMarkSize + oldMemstoreHeapSize) / 2; + maxTuneSize = oldMemstoreHeapSize - (oldMemstoreLowerMarkSize + oldMemstoreHeapSize) / 2; maxStepValue = (maxTuneSize * 1.0f) / oldMemstoreHeapSize; - maxStepValue = maxStepValue > DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE ? - DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE:maxStepValue; + maxStepValue = maxStepValue > DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE + ? DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE + : maxStepValue; // Do some more evictions before the next run of HeapMemoryTuner blockCache.evictBlock(null); // Allow the tuner to run once and do necessary memory up @@ -436,8 +431,7 @@ public void testWhenClusterIsHavingMoreWritesThanReads() throws Exception { conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0); RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf); - MemstoreFlusherStub memStoreFlusher = - new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); + MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); // Both memstore and block cache are nearly filled blockCache.setTestBlockSize(0); regionServerAccounting.setTestMemstoreSize((long) (maxHeapSize * 0.4 * 0.8)); @@ -467,9 +461,9 @@ public void testWhenClusterIsHavingMoreWritesThanReads() throws Exception { // Allow the tuner to run once and do necessary memory up waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize); assertHeapSpaceDelta(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE, oldMemstoreHeapSize, - memStoreFlusher.memstoreSize); + memStoreFlusher.memstoreSize); assertHeapSpaceDelta(-(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE), oldBlockCacheSize, - blockCache.maxSize); + blockCache.maxSize); } @Test @@ -516,9 +510,9 @@ public void testBlockedFlushesIncreaseMemstoreInSteadyState() throws Exception { // Allow the tuner to run once and do necessary memory up Thread.sleep(1500); assertHeapSpaceDelta(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE, oldMemstoreHeapSize, - memStoreFlusher.memstoreSize); + memStoreFlusher.memstoreSize); assertHeapSpaceDelta(-(DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE), oldBlockCacheSize, - blockCache.maxSize); + blockCache.maxSize); } @Test @@ -533,7 +527,7 @@ public void testPluggingInHeapMemoryTuner() throws Exception { conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0); conf.setClass(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_CLASS, CustomHeapMemoryTuner.class, - HeapMemoryTuner.class); + HeapMemoryTuner.class); // Let the system start with default values for memstore heap and block cache size. HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf), new RegionServerAccountingStub(conf)); @@ -567,7 +561,7 @@ public void testWhenSizeGivenByHeapTunerGoesOutsideRange() throws Exception { conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0); conf.setClass(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_CLASS, CustomHeapMemoryTuner.class, - HeapMemoryTuner.class); + HeapMemoryTuner.class); HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf), new RegionServerAccountingStub(conf)); final ChoreService choreService = new ChoreService("TEST_SERVER_NAME"); @@ -593,7 +587,7 @@ public void testWhenCombinedHeapSizesFromTunerGoesOutSideMaxLimit() throws Excep conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0); conf.setClass(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_CLASS, CustomHeapMemoryTuner.class, - HeapMemoryTuner.class); + HeapMemoryTuner.class); HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf), new RegionServerAccountingStub(conf)); long oldMemstoreSize = memStoreFlusher.memstoreSize; @@ -619,17 +613,16 @@ private void assertHeapSpaceDelta(double expectedDeltaPercent, long oldHeapSpace // Tolerable error double error = 0.95; if (expectedDeltaPercent > 0) { - assertTrue(expctedMinDelta*error <= (double)(newHeapSpace - oldHeapSpace)); - assertTrue(expctedMinDelta/error >= (double)(newHeapSpace - oldHeapSpace)); + assertTrue(expctedMinDelta * error <= (double) (newHeapSpace - oldHeapSpace)); + assertTrue(expctedMinDelta / error >= (double) (newHeapSpace - oldHeapSpace)); } else { - assertTrue(-expctedMinDelta*error <= (double)(oldHeapSpace - newHeapSpace)); - assertTrue(-expctedMinDelta/error >= (double)(oldHeapSpace - newHeapSpace)); + assertTrue(-expctedMinDelta * error <= (double) (oldHeapSpace - newHeapSpace)); + assertTrue(-expctedMinDelta / error >= (double) (oldHeapSpace - newHeapSpace)); } } - private void waitForTune(final MemstoreFlusherStub memStoreFlusher, - final long oldMemstoreHeapSize) throws Exception { + final long oldMemstoreHeapSize) throws Exception { // Allow the tuner to run once and do necessary memory up UTIL.waitFor(10000, new Waiter.Predicate() { @Override @@ -644,7 +637,7 @@ private static class BlockCacheStub implements ResizableBlockCache { long maxSize = 0; private long testBlockSize = 0; - public BlockCacheStub(long size){ + public BlockCacheStub(long size) { this.maxSize = size; } @@ -836,7 +829,7 @@ public Connection getConnection() { @Override public ServerName getServerName() { - return ServerName.valueOf("server1",4000,12345); + return ServerName.valueOf("server1", 4000, 12345); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestInputStreamBlockDistribution.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestInputStreamBlockDistribution.java index 2c7872ad89d0..0836b79e138c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestInputStreamBlockDistribution.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestInputStreamBlockDistribution.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -44,12 +46,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestInputStreamBlockDistribution { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestInputStreamBlockDistribution.class); + HBaseClassTestRule.forClass(TestInputStreamBlockDistribution.class); private Configuration conf; private FileSystem fs; @@ -68,7 +70,7 @@ public void setUp() throws Exception { testPath = new Path(testUtil.getDefaultRootDirPath(), "test.file"); - writeSomeData(fs, testPath, 256 << 20, (byte)2); + writeSomeData(fs, testPath, 256 << 20, (byte) 2); } @After @@ -82,7 +84,7 @@ public void itDerivesLocalityFromHFileInputStream() throws Exception { try (FSDataInputStream stream = fs.open(testPath)) { HDFSBlocksDistribution initial = new HDFSBlocksDistribution(); InputStreamBlockDistribution test = - new InputStreamBlockDistribution(stream, getMockedStoreFileInfo(initial, false)); + new InputStreamBlockDistribution(stream, getMockedStoreFileInfo(initial, false)); assertSame(initial, test.getHDFSBlockDistribution()); @@ -103,8 +105,8 @@ public void itDerivesLocalityFromFileLinkInputStream() throws Exception { HDFSBlocksDistribution initial = new HDFSBlocksDistribution(); - InputStreamBlockDistribution test = new InputStreamBlockDistribution(stream, - getMockedStoreFileInfo(initial, true)); + InputStreamBlockDistribution test = + new InputStreamBlockDistribution(stream, getMockedStoreFileInfo(initial, true)); assertSame(initial, test.getHDFSBlockDistribution()); @@ -119,8 +121,8 @@ public void itFallsBackOnLastKnownValueWhenUnsupported() { FSDataInputStream fakeStream = mock(FSDataInputStream.class); HDFSBlocksDistribution initial = new HDFSBlocksDistribution(); - InputStreamBlockDistribution test = new InputStreamBlockDistribution(fakeStream, - getMockedStoreFileInfo(initial, false)); + InputStreamBlockDistribution test = + new InputStreamBlockDistribution(fakeStream, getMockedStoreFileInfo(initial, false)); assertSame(initial, test.getHDFSBlockDistribution()); test.setLastCachedAt(test.getCachePeriodMs() + 1); @@ -137,8 +139,8 @@ public void itFallsBackOnLastKnownValueOnException() throws IOException { HDFSBlocksDistribution initial = new HDFSBlocksDistribution(); - InputStreamBlockDistribution test = new InputStreamBlockDistribution(fakeStream, - getMockedStoreFileInfo(initial, false)); + InputStreamBlockDistribution test = + new InputStreamBlockDistribution(fakeStream, getMockedStoreFileInfo(initial, false)); assertSame(initial, test.getHDFSBlockDistribution()); test.setLastCachedAt(test.getCachePeriodMs() + 1); @@ -171,10 +173,9 @@ private void writeSomeData(FileSystem fs, Path path, long size, byte v) throws I } private StoreFileInfo getMockedStoreFileInfo(HDFSBlocksDistribution distribution, - boolean isFileLink) { + boolean isFileLink) { StoreFileInfo mock = mock(StoreFileInfo.class); - when(mock.getHDFSBlockDistribution()) - .thenReturn(distribution); + when(mock.getHDFSBlockDistribution()).thenReturn(distribution); when(mock.getConf()).thenReturn(conf); when(mock.isLink()).thenReturn(isFileLink); return mock; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestIsDeleteFailure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestIsDeleteFailure.java index 75af6bf2434b..17fd03176f51 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestIsDeleteFailure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestIsDeleteFailure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -45,8 +47,8 @@ import org.junit.rules.TestName; /** - * Test failure in ScanDeleteTracker.isDeleted when ROWCOL bloom filter - * is used during a scan with a filter. + * Test failure in ScanDeleteTracker.isDeleted when ROWCOL bloom filter is used during a scan with a + * filter. */ @Category({ RegionServerTests.class, FilterTests.class, MediumTests.class }) public class TestIsDeleteFailure { @@ -56,7 +58,8 @@ public class TestIsDeleteFailure { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestIsDeleteFailure.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -75,7 +78,7 @@ public static void tearDownAfterClass() throws Exception { @Test public void testIsDeleteFailure() throws Exception { final TableDescriptor table = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); final byte[] family = Bytes.toBytes("0"); final byte[] c1 = Bytes.toBytes("C01"); final byte[] c2 = Bytes.toBytes("C02"); @@ -96,9 +99,8 @@ public void testIsDeleteFailure() throws Exception { final byte[] val = Bytes.toBytes("foo"); List fams = new ArrayList<>(1); fams.add(family); - Table ht = TEST_UTIL - .createTable(table, fams.toArray(new byte[0][]), null, BloomType.ROWCOL, 10000, - new Configuration(TEST_UTIL.getConfiguration())); + Table ht = TEST_UTIL.createTable(table, fams.toArray(new byte[0][]), null, BloomType.ROWCOL, + 10000, new Configuration(TEST_UTIL.getConfiguration())); List pending = new ArrayList(); for (int i = 0; i < 1000; i++) { byte[] row = Bytes.toBytes("key" + Integer.toString(i)); @@ -150,10 +152,9 @@ public void testIsDeleteFailure() throws Exception { scan.addColumn(family, c9); scan.addColumn(family, c15); SingleColumnValueFilter filter = - new SingleColumnValueFilter(family, c15, CompareOperator.EQUAL, - new BinaryComparator(c15)); + new SingleColumnValueFilter(family, c15, CompareOperator.EQUAL, new BinaryComparator(c15)); scan.setFilter(filter); - //Trigger the scan for not existing row, so it will scan over all rows + // Trigger the scan for not existing row, so it will scan over all rows for (Result result : ht.getScanner(scan)) { result.advance(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java index 997410cc03d6..cb386bba1db7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -66,7 +65,7 @@ * Test performance improvement of joined scanners optimization: * https://issues.apache.org/jira/browse/HBASE-5416 */ -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestJoinedScanners { @ClassRule @@ -97,7 +96,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt("dfs.replication", 1); TEST_UTIL.getConfiguration().setLong("hbase.hregion.max.filesize", 322122547200L); - String[] dataNodeHosts = new String[] {"host1", "host2", "host3"}; + String[] dataNodeHosts = new String[] { "host1", "host2", "host3" }; int regionServersCount = 3; StartTestingClusterOption option = StartTestingClusterOption.builder() .numRegionServers(regionServersCount).dataNodeHosts(dataNodeHosts).build(); @@ -111,14 +110,13 @@ public static void tearDownAfterClass() throws Exception { @Test public void testJoinedScanners() throws Exception { - byte[][] families = {cf_essential, cf_joined}; + byte[][] families = { cf_essential, cf_joined }; final TableName tableName = TableName.valueOf(name.getMethodName()); - TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(tableName); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (byte[] family : families) { ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(family) - .setDataBlockEncoding(blockEncoding).build(); + .setDataBlockEncoding(blockEncoding).build(); builder.setColumnFamily(familyDescriptor); } TableDescriptor tableDescriptor = builder.build(); @@ -128,8 +126,8 @@ public void testJoinedScanners() throws Exception { long rows_to_insert = 1000; int insert_batch = 20; - LOG.info("Make " + Long.toString(rows_to_insert) + " rows, total size = " + Float - .toString(rows_to_insert * valueWidth / 1024 / 1024) + " MB"); + LOG.info("Make " + Long.toString(rows_to_insert) + " rows, total size = " + + Float.toString(rows_to_insert * valueWidth / 1024 / 1024) + " MB"); long time = System.nanoTime(); Random rand = ThreadLocalRandom.current(); @@ -154,8 +152,8 @@ public void testJoinedScanners() throws Exception { puts.clear(); } - LOG.info("Data generated in " - + Double.toString((System.nanoTime() - time) / 1000000000.0) + " seconds"); + LOG.info("Data generated in " + Double.toString((System.nanoTime() - time) / 1000000000.0) + + " seconds"); boolean slow = true; for (int i = 0; i < 10; ++i) { @@ -172,8 +170,8 @@ private void runScanner(Table table, boolean slow) throws Exception { scan.addColumn(cf_essential, col_name); scan.addColumn(cf_joined, col_name); - SingleColumnValueFilter filter = new SingleColumnValueFilter( - cf_essential, col_name, CompareOperator.EQUAL, flag_yes); + SingleColumnValueFilter filter = + new SingleColumnValueFilter(cf_essential, col_name, CompareOperator.EQUAL, flag_yes); filter.setFilterIfMissing(true); scan.setFilter(filter); scan.setLoadColumnFamiliesOnDemand(!slow); @@ -188,7 +186,7 @@ private void runScanner(Table table, boolean slow) throws Exception { double timeSec = (System.nanoTime() - time) / 1000000000.0; result_scanner.close(); LOG.info((slow ? "Slow" : "Joined") + " scanner finished in " + Double.toString(timeSec) - + " seconds, got " + Long.toString(rows_count/2) + " rows"); + + " seconds, got " + Long.toString(rows_count / 2) + " rows"); } private static Options options = new Options(); @@ -199,18 +197,18 @@ private void runScanner(Table table, boolean slow) throws Exception { * @throws IOException if there is a bug while reading from disk */ public static void main(final String[] args) throws Exception { - Option encodingOption = new Option("e", "blockEncoding", true, - "Data block encoding; Default: FAST_DIFF"); + Option encodingOption = + new Option("e", "blockEncoding", true, "Data block encoding; Default: FAST_DIFF"); encodingOption.setRequired(false); options.addOption(encodingOption); Option ratioOption = new Option("r", "selectionRatio", true, - "Ratio of selected rows using essential column family"); + "Ratio of selected rows using essential column family"); ratioOption.setRequired(false); options.addOption(ratioOption); - Option widthOption = new Option("w", "valueWidth", true, - "Width of value for non-essential column family"); + Option widthOption = + new Option("w", "valueWidth", true, "Width of value for non-essential column family"); widthOption.setRequired(false); options.addOption(widthOption); @@ -242,13 +240,12 @@ public void testWithReverseScan() throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf1")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf2")) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf2")).build(); admin.createTable(tableDescriptor); try (Table table = con.getTable(tableName)) { SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes.toBytes("cf1"), - Bytes.toBytes("col"), CompareOperator.EQUAL, Bytes.toBytes("val")); + Bytes.toBytes("col"), CompareOperator.EQUAL, Bytes.toBytes("val")); filter.setFilterIfMissing(true); // Reverse scan with loading CFs on demand diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java index 522487fb9da5..1286ce628b96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestKeepDeletes { @ClassRule @@ -74,19 +74,18 @@ public class TestKeepDeletes { private final byte[] c0 = COLUMNS[0]; private final byte[] c1 = COLUMNS[1]; - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); @Before public void setUp() throws Exception { - /* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on - * implicit RS timing. - * Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete - * compact timestamps are tracked. Otherwise, forced major compaction will not purge - * Delete's having the same timestamp. see ScanQueryMatcher.match(): - * if (retainDeletesInOutput - * || (!isUserScan && (EnvironmentEdgeManager.currentTime() - timestamp) - * <= timeToPurgeDeletes) ... ) - * + /* + * HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on implicit + * RS timing. Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete + * compact timestamps are tracked. Otherwise, forced major compaction will not purge Delete's + * having the same timestamp. see ScanQueryMatcher.match(): if (retainDeletesInOutput || + * (!isUserScan && (EnvironmentEdgeManager.currentTime() - timestamp) <= timeToPurgeDeletes) ... + * ) */ EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge()); } @@ -97,34 +96,32 @@ public void tearDown() throws Exception { } /** - * Make sure that deleted rows are retained. - * Family delete markers are deleted. - * Column Delete markers are versioned - * Time range scan of deleted rows are possible + * Make sure that deleted rows are retained. Family delete markers are deleted. Column Delete + * markers are versioned Time range scan of deleted rows are possible */ @Test public void testBasicScenario() throws Exception { // keep 3 versions, rows do not expire TableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 3, - HConstants.FOREVER, KeepDeletedCells.TRUE); + HConstants.FOREVER, KeepDeletedCells.TRUE); HRegion region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.addColumn(c0, c0, T1); region.put(p); - p = new Put(T1, ts+1); + p = new Put(T1, ts + 1); p.addColumn(c0, c0, T2); region.put(p); - p = new Put(T1, ts+2); + p = new Put(T1, ts + 2); p.addColumn(c0, c0, T3); region.put(p); - p = new Put(T1, ts+4); + p = new Put(T1, ts + 4); p.addColumn(c0, c0, T4); region.put(p); // now place a delete marker at ts+2 - Delete d = new Delete(T1, ts+2); + Delete d = new Delete(T1, ts + 2); region.delete(d); // a raw scan can see the delete markers @@ -134,9 +131,9 @@ public void testBasicScenario() throws Exception { // get something *before* the delete marker Get g = new Get(T1); g.readAllVersions(); - g.setTimeRange(0L, ts+2); + g.setTimeRange(0L, ts + 2); Result r = region.get(g); - checkResult(r, c0, c0, T2,T1); + checkResult(r, c0, c0, T2, T1); // flush region.flush(true); @@ -158,15 +155,15 @@ public void testBasicScenario() throws Exception { checkResult(r, c0, c0, T2); // a timerange that includes the delete marker won't see past rows - g.setTimeRange(0L, ts+4); + g.setTimeRange(0L, ts + 4); r = region.get(g); assertTrue(r.isEmpty()); // two more puts, this will expire the older puts. - p = new Put(T1, ts+5); + p = new Put(T1, ts + 5); p.addColumn(c0, c0, T5); region.put(p); - p = new Put(T1, ts+6); + p = new Put(T1, ts + 6); p.addColumn(c0, c0, T6); region.put(p); @@ -192,18 +189,16 @@ public void testBasicScenario() throws Exception { } /** - * Even when the store does not keep deletes a "raw" scan will - * return everything it can find (unless discarding cells is guaranteed - * to have no effect). - * Assuming this the desired behavior. Could also disallow "raw" scanning - * if the store does not have KEEP_DELETED_CELLS enabled. - * (can be changed easily) + * Even when the store does not keep deletes a "raw" scan will return everything it can find + * (unless discarding cells is guaranteed to have no effect). Assuming this the desired behavior. + * Could also disallow "raw" scanning if the store does not have KEEP_DELETED_CELLS enabled. (can + * be changed easily) */ @Test public void testRawScanWithoutKeepingDeletes() throws Exception { // KEEP_DELETED_CELLS is NOT enabled TableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 3, - HConstants.FOREVER, KeepDeletedCells.FALSE); + HConstants.FOREVER, KeepDeletedCells.FALSE); HRegion region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -229,7 +224,7 @@ public void testRawScanWithoutKeepingDeletes() throws Exception { // after compaction they are gone // (note that this a test with a Store without - // KEEP_DELETED_CELLS) + // KEEP_DELETED_CELLS) s = new Scan(); s.setRaw(true); s.readAllVersions(); @@ -248,7 +243,7 @@ public void testRawScanWithoutKeepingDeletes() throws Exception { public void testWithoutKeepingDeletes() throws Exception { // KEEP_DELETED_CELLS is NOT enabled TableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 3, - HConstants.FOREVER, KeepDeletedCells.FALSE); + HConstants.FOREVER, KeepDeletedCells.FALSE); HRegion region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -262,22 +257,21 @@ public void testWithoutKeepingDeletes() throws Exception { Result rOne = region.get(gOne); assertFalse(rOne.isEmpty()); - - Delete d = new Delete(T1, ts+2); + Delete d = new Delete(T1, ts + 2); d.addColumn(c0, c0, ts); region.delete(d); // "past" get does not see rows behind delete marker Get g = new Get(T1); g.readAllVersions(); - g.setTimeRange(0L, ts+1); + g.setTimeRange(0L, ts + 1); Result r = region.get(g); assertTrue(r.isEmpty()); // "past" scan does not see rows behind delete marker Scan s = new Scan(); s.readAllVersions(); - s.setTimeRange(0L, ts+1); + s.setTimeRange(0L, ts + 1); InternalScanner scanner = region.getScanner(s); List kvs = new ArrayList<>(); while (scanner.next(kvs)) { @@ -302,7 +296,7 @@ public void testWithoutKeepingDeletes() throws Exception { @Test public void testRawScanWithColumns() throws Exception { TableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 3, - HConstants.FOREVER, KeepDeletedCells.TRUE); + HConstants.FOREVER, KeepDeletedCells.TRUE); Region region = hbu.createLocalHRegion(htd, null, null); Scan s = new Scan(); @@ -326,29 +320,29 @@ public void testRawScanWithColumns() throws Exception { @Test public void testRawScan() throws Exception { TableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 3, - HConstants.FOREVER, KeepDeletedCells.TRUE); + HConstants.FOREVER, KeepDeletedCells.TRUE); Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.addColumn(c0, c0, T1); region.put(p); - p = new Put(T1, ts+2); + p = new Put(T1, ts + 2); p.addColumn(c0, c0, T2); region.put(p); - p = new Put(T1, ts+4); + p = new Put(T1, ts + 4); p.addColumn(c0, c0, T3); region.put(p); - Delete d = new Delete(T1, ts+1); + Delete d = new Delete(T1, ts + 1); region.delete(d); - d = new Delete(T1, ts+2); - d.addColumn(c0, c0, ts+2); + d = new Delete(T1, ts + 2); + d.addColumn(c0, c0, ts + 2); region.delete(d); - d = new Delete(T1, ts+3); - d.addColumns(c0, c0, ts+3); + d = new Delete(T1, ts + 3); + d.addColumns(c0, c0, ts + 3); region.delete(d); Scan s = new Scan(); @@ -383,7 +377,7 @@ public void testRawScan() throws Exception { s = new Scan(); s.setRaw(true); s.readAllVersions(); - s.setTimeRange(0, ts+2); + s.setTimeRange(0, ts + 2); scan = region.getScanner(s); kvs = new ArrayList<>(); scan.next(kvs); @@ -398,7 +392,7 @@ public void testRawScan() throws Exception { s = new Scan(); s.setRaw(true); s.readAllVersions(); - s.setTimeRange(ts+3, ts+5); + s.setTimeRange(ts + 3, ts + 5); scan = region.getScanner(s); kvs = new ArrayList<>(); scan.next(kvs); @@ -406,7 +400,6 @@ public void testRawScan() throws Exception { assertArrayEquals(CellUtil.cloneValue(kvs.get(0)), T3); assertTrue(CellUtil.isDelete(kvs.get(1))); - HBaseTestingUtil.closeRegionAndWAL(region); } @@ -416,7 +409,7 @@ public void testRawScan() throws Exception { @Test public void testDeleteMarkerExpirationEmptyStore() throws Exception { TableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 1, - HConstants.FOREVER, KeepDeletedCells.TRUE); + HConstants.FOREVER, KeepDeletedCells.TRUE); HRegion region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -430,11 +423,11 @@ public void testDeleteMarkerExpirationEmptyStore() throws Exception { region.delete(d); d = new Delete(T1, ts); - d.addColumn(c0, c0, ts+1); + d.addColumn(c0, c0, ts + 1); region.delete(d); d = new Delete(T1, ts); - d.addColumn(c0, c0, ts+2); + d.addColumn(c0, c0, ts + 2); region.delete(d); // 1 family marker, 1 column marker, 2 version markers @@ -459,7 +452,7 @@ public void testDeleteMarkerExpirationEmptyStore() throws Exception { @Test public void testDeleteMarkerExpiration() throws Exception { TableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 1, - HConstants.FOREVER, KeepDeletedCells.TRUE); + HConstants.FOREVER, KeepDeletedCells.TRUE); HRegion region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -469,7 +462,7 @@ public void testDeleteMarkerExpiration() throws Exception { region.put(p); // a put into another store (CF) should have no effect - p = new Put(T1, ts-10); + p = new Put(T1, ts - 10); p.addColumn(c1, c0, T1); region.put(p); @@ -483,11 +476,11 @@ public void testDeleteMarkerExpiration() throws Exception { region.delete(d); d = new Delete(T1, ts); - d.addColumn(c0, c0, ts+1); + d.addColumn(c0, c0, ts + 1); region.delete(d); d = new Delete(T1, ts); - d.addColumn(c0, c0, ts+2); + d.addColumn(c0, c0, ts + 2); region.delete(d); // 1 family marker, 1 column marker, 2 version markers @@ -499,7 +492,7 @@ public void testDeleteMarkerExpiration() throws Exception { assertEquals(4, countDeleteMarkers(region)); // another put will push out the earlier put... - p = new Put(T1, ts+3); + p = new Put(T1, ts + 3); p.addColumn(c0, c0, T1); region.put(p); @@ -522,7 +515,7 @@ public void testDeleteMarkerExpiration() throws Exception { @Test public void testWithOldRow() throws Exception { TableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 1, - HConstants.FOREVER, KeepDeletedCells.TRUE); + HConstants.FOREVER, KeepDeletedCells.TRUE); HRegion region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -532,7 +525,7 @@ public void testWithOldRow() throws Exception { region.put(p); // a put another (older) row in the same store - p = new Put(T2, ts-10); + p = new Put(T2, ts - 10); p.addColumn(c0, c0, T1); region.put(p); @@ -546,11 +539,11 @@ public void testWithOldRow() throws Exception { region.delete(d); d = new Delete(T1, ts); - d.addColumn(c0, c0, ts+1); + d.addColumn(c0, c0, ts + 1); region.delete(d); d = new Delete(T1, ts); - d.addColumn(c0, c0, ts+2); + d.addColumn(c0, c0, ts + 2); region.delete(d); // 1 family marker, 1 column marker, 2 version markers @@ -562,7 +555,7 @@ public void testWithOldRow() throws Exception { assertEquals(4, countDeleteMarkers(region)); // another put will push out the earlier put... - p = new Put(T1, ts+3); + p = new Put(T1, ts + 3); p.addColumn(c0, c0, T1); region.put(p); @@ -577,7 +570,7 @@ public void testWithOldRow() throws Exception { assertEquals(4, countDeleteMarkers(region)); // another put will push out the earlier put... - p = new Put(T1, ts+4); + p = new Put(T1, ts + 4); p.addColumn(c0, c0, T1); region.put(p); @@ -600,7 +593,7 @@ public void testWithOldRow() throws Exception { @Test public void testRanges() throws Exception { TableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 3, - HConstants.FOREVER, KeepDeletedCells.TRUE); + HConstants.FOREVER, KeepDeletedCells.TRUE); Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -618,71 +611,70 @@ public void testRanges() throws Exception { p.addColumn(c1, c1, T1); region.put(p); - p = new Put(T1, ts+1); + p = new Put(T1, ts + 1); p.addColumn(c0, c0, T2); p.addColumn(c0, c1, T2); p.addColumn(c1, c0, T2); p.addColumn(c1, c1, T2); region.put(p); - p = new Put(T2, ts+1); + p = new Put(T2, ts + 1); p.addColumn(c0, c0, T2); p.addColumn(c0, c1, T2); p.addColumn(c1, c0, T2); p.addColumn(c1, c1, T2); region.put(p); - Delete d = new Delete(T1, ts+2); - d.addColumns(c0, c0, ts+2); + Delete d = new Delete(T1, ts + 2); + d.addColumns(c0, c0, ts + 2); region.delete(d); - d = new Delete(T1, ts+2); - d.addFamily(c1, ts+2); + d = new Delete(T1, ts + 2); + d.addFamily(c1, ts + 2); region.delete(d); - d = new Delete(T2, ts+2); - d.addFamily(c0, ts+2); + d = new Delete(T2, ts + 2); + d.addFamily(c0, ts + 2); region.delete(d); // add an older delete, to make sure it is filtered - d = new Delete(T1, ts-10); - d.addFamily(c1, ts-10); + d = new Delete(T1, ts - 10); + d.addFamily(c1, ts - 10); region.delete(d); // ts + 2 does NOT include the delete at ts+2 - checkGet(region, T1, c0, c0, ts+2, T2, T1); - checkGet(region, T1, c0, c1, ts+2, T2, T1); - checkGet(region, T1, c1, c0, ts+2, T2, T1); - checkGet(region, T1, c1, c1, ts+2, T2, T1); + checkGet(region, T1, c0, c0, ts + 2, T2, T1); + checkGet(region, T1, c0, c1, ts + 2, T2, T1); + checkGet(region, T1, c1, c0, ts + 2, T2, T1); + checkGet(region, T1, c1, c1, ts + 2, T2, T1); - checkGet(region, T2, c0, c0, ts+2, T2, T1); - checkGet(region, T2, c0, c1, ts+2, T2, T1); - checkGet(region, T2, c1, c0, ts+2, T2, T1); - checkGet(region, T2, c1, c1, ts+2, T2, T1); + checkGet(region, T2, c0, c0, ts + 2, T2, T1); + checkGet(region, T2, c0, c1, ts + 2, T2, T1); + checkGet(region, T2, c1, c0, ts + 2, T2, T1); + checkGet(region, T2, c1, c1, ts + 2, T2, T1); // ts + 3 does - checkGet(region, T1, c0, c0, ts+3); - checkGet(region, T1, c0, c1, ts+3, T2, T1); - checkGet(region, T1, c1, c0, ts+3); - checkGet(region, T1, c1, c1, ts+3); + checkGet(region, T1, c0, c0, ts + 3); + checkGet(region, T1, c0, c1, ts + 3, T2, T1); + checkGet(region, T1, c1, c0, ts + 3); + checkGet(region, T1, c1, c1, ts + 3); - checkGet(region, T2, c0, c0, ts+3); - checkGet(region, T2, c0, c1, ts+3); - checkGet(region, T2, c1, c0, ts+3, T2, T1); - checkGet(region, T2, c1, c1, ts+3, T2, T1); + checkGet(region, T2, c0, c0, ts + 3); + checkGet(region, T2, c0, c1, ts + 3); + checkGet(region, T2, c1, c0, ts + 3, T2, T1); + checkGet(region, T2, c1, c1, ts + 3, T2, T1); HBaseTestingUtil.closeRegionAndWAL(region); } /** - * Verify that column/version delete makers are sorted - * with their respective puts and removed correctly by - * versioning (i.e. not relying on the store earliestPutTS). + * Verify that column/version delete makers are sorted with their respective puts and removed + * correctly by versioning (i.e. not relying on the store earliestPutTS). */ @Test public void testDeleteMarkerVersioning() throws Exception { TableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 1, - HConstants.FOREVER, KeepDeletedCells.TRUE); + HConstants.FOREVER, KeepDeletedCells.TRUE); HRegion region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -692,7 +684,7 @@ public void testDeleteMarkerVersioning() throws Exception { // this prevents marker collection based on earliestPut // (cannot keep earliest put per column in the store file) - p = new Put(T1, ts-10); + p = new Put(T1, ts - 10); p.addColumn(c0, c1, T1); region.put(p); @@ -701,12 +693,12 @@ public void testDeleteMarkerVersioning() throws Exception { d.addColumns(c0, c0, ts); region.delete(d); - d = new Delete(T1, ts+1); - d.addColumn(c0, c0, ts+1); + d = new Delete(T1, ts + 1); + d.addColumn(c0, c0, ts + 1); region.delete(d); - d = new Delete(T1, ts+3); - d.addColumn(c0, c0, ts+3); + d = new Delete(T1, ts + 3); + d.addColumn(c0, c0, ts + 3); region.delete(d); region.flush(true); @@ -717,14 +709,14 @@ public void testDeleteMarkerVersioning() throws Exception { // add two more puts, since max version is 1 // the 2nd put (and all delete markers following) // will be removed. - p = new Put(T1, ts+2); + p = new Put(T1, ts + 2); p.addColumn(c0, c0, T2); region.put(p); // delete, put, delete, delete, put assertEquals(3, countDeleteMarkers(region)); - p = new Put(T1, ts+3); + p = new Put(T1, ts + 3); p.addColumn(c0, c0, T3); region.put(p); @@ -754,7 +746,7 @@ public void testDeleteMarkerVersioning() throws Exception { assertEquals(3, countDeleteMarkers(region)); // add one more put - p = new Put(T1, ts+4); + p = new Put(T1, ts + 4); p.addColumn(c0, c0, T4); region.put(p); @@ -775,7 +767,7 @@ public void testDeleteMarkerVersioning() throws Exception { @Test public void testWithMixedCFs() throws Exception { TableDescriptor htd = hbu.createTableDescriptor(TableName.valueOf(name.getMethodName()), 0, 1, - HConstants.FOREVER, KeepDeletedCells.TRUE); + HConstants.FOREVER, KeepDeletedCells.TRUE); Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -787,7 +779,7 @@ public void testWithMixedCFs() throws Exception { p.addColumn(c1, c1, T1); region.put(p); - p = new Put(T2, ts+1); + p = new Put(T2, ts + 1); p.addColumn(c0, c0, T2); p.addColumn(c0, c1, T2); p.addColumn(c1, c0, T2); @@ -795,14 +787,14 @@ public void testWithMixedCFs() throws Exception { region.put(p); // family markers are each family - Delete d = new Delete(T1, ts+1); + Delete d = new Delete(T1, ts + 1); region.delete(d); - d = new Delete(T2, ts+2); + d = new Delete(T2, ts + 2); region.delete(d); Scan s = new Scan().withStartRow(T1); - s.setTimeRange(0, ts+1); + s.setTimeRange(0, ts + 1); InternalScanner scanner = region.getScanner(s); List kvs = new ArrayList<>(); scanner.next(kvs); @@ -810,7 +802,7 @@ public void testWithMixedCFs() throws Exception { scanner.close(); s = new Scan().withStartRow(T2); - s.setTimeRange(0, ts+2); + s.setTimeRange(0, ts + 2); scanner = region.getScanner(s); kvs = new ArrayList<>(); scanner.next(kvs); @@ -834,31 +826,31 @@ public void testWithMinVersions() throws Exception { Put p = new Put(T1, ts); p.addColumn(c0, c0, T3); region.put(p); - p = new Put(T1, ts-1); + p = new Put(T1, ts - 1); p.addColumn(c0, c0, T2); region.put(p); - p = new Put(T1, ts-3); + p = new Put(T1, ts - 3); p.addColumn(c0, c0, T1); region.put(p); - p = new Put(T1, ts-4); + p = new Put(T1, ts - 4); p.addColumn(c0, c0, T0); region.put(p); // all puts now are just retained because of min versions = 3 // place a family delete marker - Delete d = new Delete(T1, ts-1); + Delete d = new Delete(T1, ts - 1); region.delete(d); // and a column delete marker - d = new Delete(T1, ts-2); - d.addColumns(c0, c0, ts-1); + d = new Delete(T1, ts - 2); + d.addColumns(c0, c0, ts - 1); region.delete(d); Get g = new Get(T1); g.readAllVersions(); - g.setTimeRange(0L, ts-2); + g.setTimeRange(0L, ts - 2); Result r = region.get(g); - checkResult(r, c0, c0, T1,T0); + checkResult(r, c0, c0, T1, T0); // 3 families, one column delete marker assertEquals(4, countDeleteMarkers(region)); @@ -869,7 +861,7 @@ public void testWithMinVersions() throws Exception { r = region.get(g); checkResult(r, c0, c0, T1); - p = new Put(T1, ts+1); + p = new Put(T1, ts + 1); p.addColumn(c0, c0, T4); region.put(p); region.flush(true); @@ -881,7 +873,7 @@ public void testWithMinVersions() throws Exception { // this will push out the last put before // family delete marker - p = new Put(T1, ts+2); + p = new Put(T1, ts + 2); p.addColumn(c0, c0, T5); region.put(p); @@ -914,16 +906,16 @@ public void testWithTTL() throws Exception { region.put(p); // place an old row, to make the family marker expires anyway - p = new Put(T2, ts-10); + p = new Put(T2, ts - 10); p.addColumn(c0, c0, T1); region.put(p); - checkGet(region, T1, c0, c0, ts+1, T3); + checkGet(region, T1, c0, c0, ts + 1, T3); // place a family delete marker - Delete d = new Delete(T1, ts+2); + Delete d = new Delete(T1, ts + 2); region.delete(d); - checkGet(region, T1, c0, c0, ts+1, T3); + checkGet(region, T1, c0, c0, ts + 1, T3); // 3 families, one column delete marker assertEquals(3, countDeleteMarkers(region)); @@ -933,7 +925,7 @@ public void testWithTTL() throws Exception { assertEquals(3, countDeleteMarkers(region)); // but the Put is gone - checkGet(region, T1, c0, c0, ts+1); + checkGet(region, T1, c0, c0, ts + 1); region.compact(true); // all delete marker gone @@ -942,8 +934,8 @@ public void testWithTTL() throws Exception { HBaseTestingUtil.closeRegionAndWAL(region); } - private void checkGet(Region region, byte[] row, byte[] fam, byte[] col, - long time, byte[]... vals) throws IOException { + private void checkGet(Region region, byte[] row, byte[] fam, byte[] col, long time, + byte[]... vals) throws IOException { Get g = new Get(row); g.addColumn(fam, col); g.readAllVersions(); @@ -965,7 +957,7 @@ private int countDeleteMarkers(HRegion region) throws IOException { do { hasMore = scan.next(kvs); for (Cell kv : kvs) { - if(CellUtil.isDelete(kv)) { + if (CellUtil.isDelete(kv)) { res++; } } @@ -975,15 +967,13 @@ private int countDeleteMarkers(HRegion region) throws IOException { return res; } - private void checkResult(Result r, byte[] fam, byte[] col, byte[] ... vals) { + private void checkResult(Result r, byte[] fam, byte[] col, byte[]... vals) { assertEquals(r.size(), vals.length); List kvs = r.getColumnCells(fam, col); assertEquals(kvs.size(), vals.length); - for (int i=0;i assertCells(List expected, List scanners) - throws IOException { + throws IOException { // Creating KeyValueHeap try (KeyValueHeap kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR)) { List actual = new ArrayList<>(); @@ -101,7 +101,7 @@ public void testSorted() throws IOException { // 2. Current scanner gets empty List expected = - Arrays.asList(kv111, kv112, kv113, kv114, kv115, kv121, kv122, kv211, kv212, kv213); + Arrays.asList(kv111, kv112, kv113, kv114, kv115, kv121, kv122, kv211, kv212, kv213); List actual = assertCells(expected, scanners); @@ -126,8 +126,9 @@ public void testSeek() throws IOException { List actual = Arrays.asList(kvh.peek()); - assertEquals("Expected = " + Arrays.toString(expected.toArray()) + "\n Actual = " + - Arrays.toString(actual.toArray()), expected, actual); + assertEquals("Expected = " + Arrays.toString(expected.toArray()) + "\n Actual = " + + Arrays.toString(actual.toArray()), + expected, actual); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java index 4f8ee55fa849..9f7ebf5c50bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueScanFixture.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,14 +43,10 @@ public class TestKeyValueScanFixture { @Test public void testKeyValueScanFixture() throws IOException { - KeyValue kvs[] = new KeyValue[]{ - KeyValueTestUtil.create("RowA", "family", "qf1", - 1, KeyValue.Type.Put, "value-1"), - KeyValueTestUtil.create("RowA", "family", "qf2", - 1, KeyValue.Type.Put, "value-2"), - KeyValueTestUtil.create("RowB", "family", "qf1", - 10, KeyValue.Type.Put, "value-10") - }; + KeyValue kvs[] = new KeyValue[] { + KeyValueTestUtil.create("RowA", "family", "qf1", 1, KeyValue.Type.Put, "value-1"), + KeyValueTestUtil.create("RowA", "family", "qf2", 1, KeyValue.Type.Put, "value-2"), + KeyValueTestUtil.create("RowB", "family", "qf1", 10, KeyValue.Type.Put, "value-10") }; KeyValueScanner scan = new KeyValueScanFixture(CellComparator.getInstance(), kvs); KeyValue kv = KeyValueUtil.createFirstOnRow(Bytes.toBytes("RowA")); @@ -78,4 +74,3 @@ public void testKeyValueScanFixture() throws IOException { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestLogRoller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestLogRoller.java index 1405e40a55ea..d584d5e85bdc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestLogRoller.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestLogRoller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,6 +22,9 @@ import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -38,11 +41,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestLogRoller { @ClassRule @@ -84,8 +84,8 @@ public void tearDown() throws Exception { public void testRemoveClosedWAL() throws Exception { assertEquals(0, ROLLER.getWalNeedsRoll().size()); for (int i = 1; i <= 3; i++) { - FSHLog wal = new FSHLog(FS, ROOT_DIR, LOG_DIR, ARCHIVE_DIR, CONF, null, - true, WAL_PREFIX, getWALSuffix(i)); + FSHLog wal = new FSHLog(FS, ROOT_DIR, LOG_DIR, ARCHIVE_DIR, CONF, null, true, WAL_PREFIX, + getWALSuffix(i)); ROLLER.addWAL(wal); } @@ -117,8 +117,8 @@ public void testRequestRollWithMultiWal() throws Exception { // add multiple wal Map wals = new HashMap<>(); for (int i = 1; i <= 3; i++) { - FSHLog wal = new FSHLog(FS, ROOT_DIR, LOG_DIR, ARCHIVE_DIR, CONF, null, - true, WAL_PREFIX, getWALSuffix(i)); + FSHLog wal = new FSHLog(FS, ROOT_DIR, LOG_DIR, ARCHIVE_DIR, CONF, null, true, WAL_PREFIX, + getWALSuffix(i)); wal.init(); wals.put(wal, wal.getCurrentFileName()); ROLLER.addWAL(wal); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 85fdf0871f7f..f27c8f78a0d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -83,7 +83,7 @@ public class TestMajorCompaction { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMajorCompaction.class); + HBaseClassTestRule.forClass(TestMajorCompaction.class); @Parameterized.Parameters public static Object[] data() { @@ -120,7 +120,7 @@ public TestMajorCompaction(String compType) { secondRowBytes[START_KEY_BYTES.length - 1]++; thirdRowBytes = START_KEY_BYTES.clone(); thirdRowBytes[START_KEY_BYTES.length - 1] = - (byte) (thirdRowBytes[START_KEY_BYTES.length - 1] + 2); + (byte) (thirdRowBytes[START_KEY_BYTES.length - 1] + 2); } @Before @@ -319,7 +319,7 @@ public void testTimeBasedMajorCompaction() throws Exception { // ensure that major compaction time is deterministic RatioBasedCompactionPolicy c = - (RatioBasedCompactionPolicy) s.storeEngine.getCompactionPolicy(); + (RatioBasedCompactionPolicy) s.storeEngine.getCompactionPolicy(); Collection storeFiles = s.getStorefiles(); long mcTime = c.getNextMajorCompactTime(storeFiles); for (int i = 0; i < 10; ++i) { @@ -426,8 +426,9 @@ public void testUserMajorCompactionRequest() throws IOException { createStoreFile(r); } store.triggerMajorCompaction(); - CompactionRequestImpl request = store - .requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null).get().getRequest(); + CompactionRequestImpl request = + store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null).get() + .getRequest(); assertNotNull("Expected to receive a compaction request", request); assertEquals( "User-requested major compaction should always occur, even if there are too many store files", @@ -481,7 +482,7 @@ public void testMajorCompactingToNoOutputWithReverseScan() throws IOException { } private void testMajorCompactingWithDeletes(KeepDeletedCells keepDeletedCells) - throws IOException { + throws IOException { createStoreFile(r); for (int i = 0; i < compactionThreshold; i++) { createStoreFile(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMaxResultsPerColumnFamily.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMaxResultsPerColumnFamily.java index d8a10bfca5f3..281d035072fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMaxResultsPerColumnFamily.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMaxResultsPerColumnFamily.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -45,15 +46,12 @@ public class TestMaxResultsPerColumnFamily { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMaxResultsPerColumnFamily.class); + HBaseClassTestRule.forClass(TestMaxResultsPerColumnFamily.class); - private static final byte [][] FAMILIES = { - Bytes.toBytes("1"), Bytes.toBytes("2") - }; + private static final byte[][] FAMILIES = { Bytes.toBytes("1"), Bytes.toBytes("2") }; - private static final byte [][] VALUES = { - Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") - }; + private static final byte[][] VALUES = + { Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") }; private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -75,10 +73,10 @@ public void testSetMaxResultsPerColumnFamilySimple() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); Admin admin = UTIL.getAdmin(); ColumnFamilyDescriptorBuilder cfBuilder0 = - ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]); + ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(cfBuilder0.build()).build(); + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(cfBuilder0.build()).build(); admin.createTable(tableDescriptor); try (Table table = UTIL.getConnection().getTable(tableName)) { @@ -107,9 +105,9 @@ public void testSetMaxResultsPerColumnFamilySimple() throws Exception { static int countScanRows(Table t, Scan scan) throws Exception { int count = 0; - try(ResultScanner scanner = t.getScanner(scan)) { - for(Result r:scanner) { - count ++; + try (ResultScanner scanner = t.getScanner(scan)) { + for (Result r : scanner) { + count++; } } return count; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java index db708c599626..c6491df28ab8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,6 @@ import java.util.Random; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicReference; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -51,7 +50,7 @@ /** * Test the {@link org.apache.hadoop.hbase.regionserver.ChunkCreator.MemStoreChunkPool} class */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestMemStoreChunkPool { @ClassRule @@ -68,11 +67,12 @@ public static void setUpBeforeClass() throws Exception { conf.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f); chunkPoolDisabledBeforeTest = ChunkCreator.chunkPoolDisabled; ChunkCreator.chunkPoolDisabled = false; - long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage() - .getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false)); + long globalMemStoreLimit = + (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax() + * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false)); chunkCreator = ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, - globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, - null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); assertNotNull(chunkCreator); } @@ -107,7 +107,7 @@ public void testReusingChunks() { } assertEquals(expectedOff, newKv.getOffset()); assertTrue("Allocation overruns buffer", - newKv.getOffset() + size <= newKv.getBuffer().capacity()); + newKv.getOffset() + size <= newKv.getBuffer().capacity()); expectedOff += size; } // chunks will be put back to pool after close @@ -150,7 +150,7 @@ public void testPuttingBackChunksAfterFlushing() throws UnexpectedStateException memstore.add(new KeyValue(row, fam, qf5, val), null); assertEquals(2, memstore.getActive().getCellsCount()); // close the scanner - this is how the snapshot will be used - for(KeyValueScanner scanner : snapshot.getScanners()) { + for (KeyValueScanner scanner : snapshot.getScanners()) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); @@ -161,8 +161,7 @@ public void testPuttingBackChunksAfterFlushing() throws UnexpectedStateException } @Test - public void testPuttingBackChunksWithOpeningScanner() - throws IOException { + public void testPuttingBackChunksWithOpeningScanner() throws IOException { byte[] row = Bytes.toBytes("testrow"); byte[] fam = Bytes.toBytes("testfamily"); byte[] qf1 = Bytes.toBytes("testqualifier1"); @@ -196,7 +195,7 @@ public void testPuttingBackChunksWithOpeningScanner() // Shouldn't putting back the chunks to pool,since some scanners are opening // based on their data // close the snapshot scanner - for(KeyValueScanner scanner : snapshot.getScanners()) { + for (KeyValueScanner scanner : snapshot.getScanners()) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); @@ -226,7 +225,7 @@ public void testPuttingBackChunksWithOpeningScanner() // Since no opening scanner, the chunks of snapshot should be put back to // pool // close the snapshot scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { + for (KeyValueScanner scanner : snapshot.getScanners()) { scanner.close(); } memstore.clearSnapshot(snapshot.getId()); @@ -248,8 +247,8 @@ public void testPutbackChunksMultiThreaded() throws Exception { // back the original ChunkCreator.instance = newCreator; - final KeyValue kv = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), - new byte[valSize]); + final KeyValue kv = + new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), new byte[valSize]); final AtomicReference exceptionRef = new AtomicReference(); try { Runnable r = new Runnable() { @@ -394,8 +393,7 @@ public void testNoIndexChunksPoolOrNoDataChunksPool() throws Exception { assertEquals(0, newCreator.getPoolSize()); assertEquals(initialCount, newCreator.getPoolSize(ChunkType.INDEX_CHUNK)); - Chunk indexChunk = - newCreator.getChunk(ChunkType.INDEX_CHUNK); + Chunk indexChunk = newCreator.getChunk(ChunkType.INDEX_CHUNK); assertEquals(0, newCreator.getPoolSize()); assertEquals(initialCount - 1, newCreator.getPoolSize(ChunkType.INDEX_CHUNK)); assertTrue(indexChunk.isIndexChunk()); @@ -419,8 +417,7 @@ public void testNoIndexChunksPoolOrNoDataChunksPool() throws Exception { Chunk dataChunk = ChunkCreator.getInstance().getChunk(); assertTrue(dataChunk.isDataChunk()); assertTrue(dataChunk.isFromPool()); - Chunk indexChunk = ChunkCreator.getInstance().getChunk( - ChunkType.INDEX_CHUNK); + Chunk indexChunk = ChunkCreator.getInstance().getChunk(ChunkType.INDEX_CHUNK); assertTrue(indexChunk.isIndexChunk()); assertTrue(indexChunk.isFromPool()); Chunk jumboChunk = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreFlusher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreFlusher.java index bc3df0ab8051..4c6f81358bd6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreFlusher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreFlusher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; @@ -58,9 +59,8 @@ public void setUp() throws Exception { @Test public void testReplaceDelayedFlushEntry() { - RegionInfo hri = - RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setRegionId(1) - .setReplicaId(0).build(); + RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setRegionId(1).setReplicaId(0).build(); HRegion r = mock(HRegion.class); doReturn(hri).when(r).getRegionInfo(); @@ -77,9 +77,8 @@ public void testReplaceDelayedFlushEntry() { @Test public void testNotReplaceDelayedFlushEntryWhichExpired() { - RegionInfo hri = - RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setRegionId(1) - .setReplicaId(0).build(); + RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setRegionId(1).setReplicaId(0).build(); HRegion r = mock(HRegion.class); doReturn(hri).when(r).getRegionInfo(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java index 032ea4eef608..6c5599a8d9bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +16,13 @@ * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; + import static org.apache.hadoop.hbase.regionserver.MemStoreLAB.CHUNK_SIZE_KEY; import static org.apache.hadoop.hbase.regionserver.MemStoreLAB.MAX_ALLOC_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.lang.management.ManagementFactory; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -50,12 +52,13 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.primitives.Ints; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestMemStoreLAB { @ClassRule @@ -112,7 +115,7 @@ public void testLABRandomAllocation() { } assertEquals(expectedOff, newKv.getOffset()); assertTrue("Allocation overruns buffer", - newKv.getOffset() + size <= newKv.getBuffer().capacity()); + newKv.getOffset() + size <= newKv.getBuffer().capacity()); expectedOff += size; } } @@ -126,14 +129,12 @@ public void testLABLargeAllocation() { } /** - * Test allocation from lots of threads, making sure the results don't - * overlap in any way + * Test allocation from lots of threads, making sure the results don't overlap in any way */ @Test public void testLABThreading() throws Exception { Configuration conf = new Configuration(); - MultithreadedTestUtil.TestContext ctx = - new MultithreadedTestUtil.TestContext(conf); + MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf); final AtomicInteger totalAllocated = new AtomicInteger(); @@ -159,14 +160,13 @@ public void doAnAction() throws Exception { } ctx.startThreads(); - while (totalAllocated.get() < 50*1024*1000 && ctx.shouldRun()) { + while (totalAllocated.get() < 50 * 1024 * 1000 && ctx.shouldRun()) { Thread.sleep(10); } ctx.stop(); // Partition the allocations by the actual byte[] they point into, // make sure offsets are unique for each chunk - Map> mapsByChunk = - Maps.newHashMap(); + Map> mapsByChunk = Maps.newHashMap(); int sizeCounted = 0; for (AllocRecord rec : Iterables.concat(allocations)) { @@ -174,15 +174,13 @@ public void doAnAction() throws Exception { if (rec.size == 0) { continue; } - Map mapForThisByteArray = - mapsByChunk.get(rec.alloc); + Map mapForThisByteArray = mapsByChunk.get(rec.alloc); if (mapForThisByteArray == null) { mapForThisByteArray = Maps.newTreeMap(); mapsByChunk.put(rec.alloc, mapForThisByteArray); } AllocRecord oldVal = mapForThisByteArray.put(rec.offset, rec); - assertNull("Already had an entry " + oldVal + " for allocation " + rec, - oldVal); + assertNull("Already had an entry " + oldVal + " for allocation " + rec, oldVal); } assertEquals("Sanity check test", sizeCounted, totalAllocated.get()); @@ -194,7 +192,7 @@ public void doAnAction() throws Exception { for (AllocRecord alloc : allocsInChunk.values()) { assertEquals(expectedOff, alloc.offset); assertTrue("Allocation overruns buffer", - alloc.offset + alloc.size <= alloc.alloc.capacity()); + alloc.offset + alloc.size <= alloc.alloc.capacity()); expectedOff += alloc.size; } } @@ -220,11 +218,12 @@ public void testLABChunkQueue() throws Exception { // set chunk size to default max alloc size, so we could easily trigger chunk retirement conf.setLong(CHUNK_SIZE_KEY, MemStoreLABImpl.MAX_ALLOC_DEFAULT); // reconstruct mslab - long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage() - .getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false)); - ChunkCreator.initialize(MemStoreLABImpl.MAX_ALLOC_DEFAULT, false, - globalMemStoreLimit, 0.1f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, - null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + long globalMemStoreLimit = + (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax() + * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false)); + ChunkCreator.initialize(MemStoreLABImpl.MAX_ALLOC_DEFAULT, false, globalMemStoreLimit, 0.1f, + MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); ChunkCreator.clearDisableFlag(); mslab = new MemStoreLABImpl(conf); // launch multiple threads to trigger frequent chunk retirement @@ -258,16 +257,17 @@ public void testLABChunkQueue() throws Exception { } // none of the chunkIds would have been returned back assertTrue("All the chunks must have been cleared", - ChunkCreator.instance.numberOfMappedChunks() != 0); + ChunkCreator.instance.numberOfMappedChunks() != 0); Set chunkIds = new HashSet(mslab.chunks); int pooledChunksNum = mslab.getPooledChunks().size(); // close the mslab mslab.close(); // make sure all chunks where reclaimed back to pool int queueLength = mslab.getNumOfChunksReturnedToPool(chunkIds); - assertTrue("All chunks in chunk queue should be reclaimed or removed" - + " after mslab closed but actually: " + (pooledChunksNum-queueLength), - pooledChunksNum-queueLength == 0); + assertTrue( + "All chunks in chunk queue should be reclaimed or removed" + + " after mslab closed but actually: " + (pooledChunksNum - queueLength), + pooledChunksNum - queueLength == 0); } finally { ChunkCreator.instance = oldInstance; } @@ -289,14 +289,14 @@ public void testForceCopyOfBigCellInto() { byte[] qualify = Bytes.toBytes("qualify"); byte[] smallValue = new byte[chunkSize / 2]; byte[] bigValue = new byte[chunkSize]; - KeyValue smallKV = new KeyValue(row, columnFamily, qualify, EnvironmentEdgeManager - .currentTime(), smallValue); + KeyValue smallKV = + new KeyValue(row, columnFamily, qualify, EnvironmentEdgeManager.currentTime(), smallValue); assertEquals(smallKV.getSerializedSize(), mslab.forceCopyOfBigCellInto(smallKV).getSerializedSize()); - KeyValue bigKV = new KeyValue(row, columnFamily, qualify, EnvironmentEdgeManager - .currentTime(), bigValue); + KeyValue bigKV = + new KeyValue(row, columnFamily, qualify, EnvironmentEdgeManager.currentTime(), bigValue); assertEquals(bigKV.getSerializedSize(), mslab.forceCopyOfBigCellInto(bigKV).getSerializedSize()); @@ -355,7 +355,7 @@ public void interrupt() { return thread; } - private static class AllocRecord implements Comparable{ + private static class AllocRecord implements Comparable { private final ByteBuffer alloc; private final int offset; private final int size; @@ -381,4 +381,3 @@ public String toString() { } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java index d6d9baf7a481..b2ab7e8b53f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,16 +78,16 @@ public void setup() throws IOException { Configuration conf = new Configuration(); HBaseTestingUtil hbaseUtility = new HBaseTestingUtil(conf); TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE)); + TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE)); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY)).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(TABLE)).build(); Path rootPath = hbaseUtility.getDataTestDir(ROOT_SUB_PATH); this.wal = HBaseTestingUtil.createWal(conf, rootPath, info); - this.region = HRegion.createHRegion(info, rootPath, conf, - tableDescriptorBuilder.build(), this.wal, true); + this.region = + HRegion.createHRegion(info, rootPath, conf, tableDescriptorBuilder.build(), this.wal, true); this.store = new HStore(this.region, columnFamilyDescriptor, conf, false); this.comparator = CellComparator.getInstance(); this.compactionKVMax = HConstants.COMPACTION_KV_MAX_DEFAULT; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java index 1dae13322c3f..84ce4b5dfd8c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstoreLABWithoutPool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ByteBufferKeyValue; import org.apache.hadoop.hbase.Cell; @@ -43,7 +42,7 @@ import org.junit.experimental.categories.Category; @Ignore // See HBASE-19742 for issue on reenabling. -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestMemstoreLABWithoutPool { @ClassRule @@ -58,12 +57,12 @@ public class TestMemstoreLABWithoutPool { @BeforeClass public static void setUpBeforeClass() throws Exception { - long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage() - .getMax() * 0.8); + long globalMemStoreLimit = + (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax() * 0.8); // disable pool - ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT + Bytes.SIZEOF_LONG, - false, globalMemStoreLimit, 0.0f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, - null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT + Bytes.SIZEOF_LONG, false, + globalMemStoreLimit, 0.0f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); } /** @@ -95,7 +94,7 @@ public void testLABRandomAllocation() { } assertEquals(expectedOff, newKv.getOffset()); assertTrue("Allocation overruns buffer", - newKv.getOffset() + size <= newKv.getBuffer().capacity()); + newKv.getOffset() + size <= newKv.getBuffer().capacity()); expectedOff += size; } } @@ -115,8 +114,8 @@ public void testLABChunkQueueWithMultipleMSLABs() throws Exception { // launch multiple threads to trigger frequent chunk retirement List threads = new ArrayList<>(); // create smaller sized kvs - final KeyValue kv = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), - new byte[0]); + final KeyValue kv = + new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), new byte[0]); for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { threads.add(getChunkQueueTestThread(mslab[i], "testLABChunkQueue-" + j, kv)); @@ -150,7 +149,7 @@ public void testLABChunkQueueWithMultipleMSLABs() throws Exception { } // all of the chunkIds would have been returned back assertTrue("All the chunks must have been cleared", - ChunkCreator.instance.numberOfMappedChunks() == 0); + ChunkCreator.instance.numberOfMappedChunks() == 0); } private Thread getChunkQueueTestThread(final MemStoreLABImpl mslab, String threadName, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java index 306b771341bd..979628687f60 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMergesSplitsAddToTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,6 +25,7 @@ import static org.hamcrest.Matchers.hasProperty; import static org.hamcrest.Matchers.not; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -60,13 +61,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestMergesSplitsAddToTracker { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMergesSplitsAddToTracker.class); + HBaseClassTestRule.forClass(TestMergesSplitsAddToTracker.class); private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -88,14 +88,14 @@ public static void afterClass() throws Exception { } @Before - public void setup(){ + public void setup() { StoreFileTrackerForTest.clear(); } private TableName createTable(byte[] splitKey) throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(name.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME)) - .setValue(TRACKER_IMPL, StoreFileTrackerForTest.class.getName()).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME)) + .setValue(TRACKER_IMPL, StoreFileTrackerForTest.class.getName()).build(); if (splitKey != null) { TEST_UTIL.getAdmin().createTable(td, new byte[][] { splitKey }); } else { @@ -107,30 +107,27 @@ private TableName createTable(byte[] splitKey) throws IOException { @Test public void testCommitDaughterRegion() throws Exception { TableName table = createTable(null); - //first put some data in order to have a store file created + // first put some data in order to have a store file created putThreeRowsAndFlush(table); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0); HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem(); - RegionInfo daughterA = - RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo().getStartKey()). - setEndKey(Bytes.toBytes("002")).setSplit(false). - setRegionId(region.getRegionInfo().getRegionId() + - EnvironmentEdgeManager.currentTime()). - build(); + RegionInfo daughterA = RegionInfoBuilder.newBuilder(table) + .setStartKey(region.getRegionInfo().getStartKey()).setEndKey(Bytes.toBytes("002")) + .setSplit(false) + .setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) + .build(); RegionInfo daughterB = RegionInfoBuilder.newBuilder(table).setStartKey(Bytes.toBytes("002")) - .setEndKey(region.getRegionInfo().getEndKey()).setSplit(false) - .setRegionId(region.getRegionInfo().getRegionId()).build(); + .setEndKey(region.getRegionInfo().getEndKey()).setSplit(false) + .setRegionId(region.getRegionInfo().getRegionId()).build(); HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; List splitFilesA = new ArrayList<>(); - splitFilesA.add(regionFS - .splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, - Bytes.toBytes("002"), false, region.getSplitPolicy())); + splitFilesA.add(regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, + Bytes.toBytes("002"), false, region.getSplitPolicy())); List splitFilesB = new ArrayList<>(); - splitFilesB.add(regionFS - .splitStoreFile(daughterB, Bytes.toString(FAMILY_NAME), file, - Bytes.toBytes("002"), true, region.getSplitPolicy())); - MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster(). - getMasterProcedureExecutor().getEnvironment(); + splitFilesB.add(regionFS.splitStoreFile(daughterB, Bytes.toString(FAMILY_NAME), file, + Bytes.toBytes("002"), true, region.getSplitPolicy())); + MasterProcedureEnv env = + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); Path resultA = regionFS.commitDaughterRegion(daughterA, splitFilesA, env); Path resultB = regionFS.commitDaughterRegion(daughterB, splitFilesB, env); FileSystem fs = regionFS.getFileSystem(); @@ -141,9 +138,9 @@ public void testCommitDaughterRegion() throws Exception { @Test public void testCommitMergedRegion() throws Exception { TableName table = createTable(null); - //splitting the table first + // splitting the table first TEST_UTIL.getAdmin().split(table, Bytes.toBytes("002")); - //Add data and flush to create files in the two different regions + // Add data and flush to create files in the two different regions putThreeRowsAndFlush(table); List regions = TEST_UTIL.getHBaseCluster().getRegions(table); HRegion first = regions.get(0); @@ -151,40 +148,40 @@ public void testCommitMergedRegion() throws Exception { HRegionFileSystem regionFS = first.getRegionFileSystem(); RegionInfo mergeResult = - RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey()) - .setEndKey(second.getRegionInfo().getEndKey()).setSplit(false) - .setRegionId(first.getRegionInfo().getRegionId() + - EnvironmentEdgeManager.currentTime()).build(); + RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey()) + .setEndKey(second.getRegionInfo().getEndKey()).setSplit(false) + .setRegionId(first.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) + .build(); HRegionFileSystem mergeFS = HRegionFileSystem.createRegionOnFileSystem( - TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), - regionFS.getFileSystem(), regionFS.getTableDir(), mergeResult); + TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), regionFS.getFileSystem(), + regionFS.getTableDir(), mergeResult); List mergedFiles = new ArrayList<>(); - //merge file from first region + // merge file from first region mergedFiles.add(mergeFileFromRegion(first, mergeFS)); - //merge file from second region + // merge file from second region mergedFiles.add(mergeFileFromRegion(second, mergeFS)); - MasterProcedureEnv env = TEST_UTIL.getMiniHBaseCluster().getMaster(). - getMasterProcedureExecutor().getEnvironment(); + MasterProcedureEnv env = + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); mergeFS.commitMergedRegion(mergedFiles, env); - //validate + // validate FileSystem fs = first.getRegionFileSystem().getFileSystem(); - Path finalMergeDir = new Path(first.getRegionFileSystem().getTableDir(), - mergeResult.getEncodedName()); + Path finalMergeDir = + new Path(first.getRegionFileSystem().getTableDir(), mergeResult.getEncodedName()); verifyFilesAreTracked(finalMergeDir, fs); } @Test public void testSplitLoadsFromTracker() throws Exception { TableName table = createTable(null); - //Add data and flush to create files in the two different regions + // Add data and flush to create files in the two different regions putThreeRowsAndFlush(table); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0); Pair copyResult = copyFileInTheStoreDir(region); StoreFileInfo fileInfo = copyResult.getFirst(); String copyName = copyResult.getSecond(); - //Now splits the region + // Now splits the region TEST_UTIL.getAdmin().split(table, Bytes.toBytes("002")); List regions = TEST_UTIL.getHBaseCluster().getRegions(table); HRegion first = regions.get(0); @@ -196,38 +193,39 @@ public void testSplitLoadsFromTracker() throws Exception { @Test public void testMergeLoadsFromTracker() throws Exception { TableName table = createTable(Bytes.toBytes("002")); - //Add data and flush to create files in the two different regions + // Add data and flush to create files in the two different regions putThreeRowsAndFlush(table); List regions = TEST_UTIL.getHBaseCluster().getRegions(table); HRegion first = regions.get(0); Pair copyResult = copyFileInTheStoreDir(first); StoreFileInfo fileInfo = copyResult.getFirst(); String copyName = copyResult.getSecond(); - //Now merges the first two regions - TEST_UTIL.getAdmin().mergeRegionsAsync(new byte[][]{ - first.getRegionInfo().getEncodedNameAsBytes(), - regions.get(1).getRegionInfo().getEncodedNameAsBytes() - }, true).get(10, TimeUnit.SECONDS); + // Now merges the first two regions + TEST_UTIL.getAdmin() + .mergeRegionsAsync(new byte[][] { first.getRegionInfo().getEncodedNameAsBytes(), + regions.get(1).getRegionInfo().getEncodedNameAsBytes() }, + true) + .get(10, TimeUnit.SECONDS); regions = TEST_UTIL.getHBaseCluster().getRegions(table); HRegion merged = regions.get(0); validateDaughterRegionsFiles(merged, fileInfo.getActiveFileName(), copyName); } - private Pair copyFileInTheStoreDir(HRegion region) throws IOException { + private Pair copyFileInTheStoreDir(HRegion region) throws IOException { Path storeDir = region.getRegionFileSystem().getStoreDir("info"); - //gets the single file + // gets the single file StoreFileInfo fileInfo = region.getRegionFileSystem().getStoreFiles("info").get(0); - //make a copy of the valid file staight into the store dir, so that it's not tracked. + // make a copy of the valid file staight into the store dir, so that it's not tracked. String copyName = UUID.randomUUID().toString().replaceAll("-", ""); Path copy = new Path(storeDir, copyName); - FileUtil.copy(region.getFilesystem(), fileInfo.getFileStatus(), region.getFilesystem(), - copy , false, false, TEST_UTIL.getConfiguration()); + FileUtil.copy(region.getFilesystem(), fileInfo.getFileStatus(), region.getFilesystem(), copy, + false, false, TEST_UTIL.getConfiguration()); return new Pair<>(fileInfo, copyName); } private void validateDaughterRegionsFiles(HRegion region, String originalFileName, String untrackedFile) throws IOException { - //verify there's no link for the untracked, copied file in first region + // verify there's no link for the untracked, copied file in first region List infos = region.getRegionFileSystem().getStoreFiles("info"); assertThat(infos, everyItem(hasProperty("activeFileName", not(containsString(untrackedFile))))); assertThat(infos, hasItem(hasProperty("activeFileName", containsString(originalFileName)))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsHeapMemoryManager.java index 9958ca664bd4..2b17711d44af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsHeapMemoryManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,8 +39,8 @@ public class TestMetricsHeapMemoryManager { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetricsHeapMemoryManager.class); - public static MetricsAssertHelper HELPER = CompatibilitySingletonFactory - .getInstance(MetricsAssertHelper.class); + public static MetricsAssertHelper HELPER = + CompatibilitySingletonFactory.getInstance(MetricsAssertHelper.class); private MetricsHeapMemoryManager hmm; private MetricsHeapMemoryManagerSource source; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java index 00031130604c..d4b4aaa66844 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,14 +27,13 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestMetricsRegion { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetricsRegion.class); - public MetricsAssertHelper HELPER = CompatibilityFactory.getInstance(MetricsAssertHelper.class); @Test @@ -43,8 +42,8 @@ public void testRegionWrapperMetrics() { MetricsRegionAggregateSource agg = mr.getSource().getAggregateSource(); HELPER.assertGauge( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeCount", - 101, agg); + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeCount", 101, + agg); HELPER.assertGauge( "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeFileCount", 102, agg); @@ -61,42 +60,42 @@ public void testRegionWrapperMetrics() { "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_numReferenceFiles", 2, agg); HELPER.assertGauge( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize", - 103, agg); + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize", 103, + agg); HELPER.assertCounter( "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_cpRequestCount", 108, agg); + HELPER + .assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" + + "filteredReadRequestCount", + 107, agg); HELPER.assertCounter( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" + - "filteredReadRequestCount", - 107, agg); - HELPER.assertCounter( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid", - 0, agg); + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid", 0, + agg); mr.close(); // test region with replica id > 0 mr = new MetricsRegion(new MetricsRegionWrapperStub(1), new Configuration()); agg = mr.getSource().getAggregateSource(); HELPER.assertGauge( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeCount", - 101, agg); + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeCount", 101, + agg); HELPER.assertGauge( "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_storeFileCount", 102, agg); HELPER.assertGauge( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize", - 103, agg); + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize", 103, + agg); HELPER.assertCounter( "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_cpRequestCount", 108, agg); + HELPER + .assertCounter("namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" + + "filteredReadRequestCount", + 107, agg); HELPER.assertCounter( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" + - "filteredReadRequestCount", - 107, agg); - HELPER.assertCounter( - "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid", - 1, agg); + "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid", 1, + agg); HELPER.assertCounter( "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_compactionsQueuedCount", 4, agg); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java index f2e485442bd0..8fa4a6344c92 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ /** * Unit test version of rs metrics tests. */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestMetricsRegionServer { @ClassRule @@ -124,34 +124,35 @@ public void testWrapperSource() { @Test public void testConstuctor() { - assertNotNull("There should be a hadoop1/hadoop2 metrics source", rsm.getMetricsSource() ); - assertNotNull("The RegionServerMetricsWrapper should be accessable", rsm.getRegionServerWrapper()); + assertNotNull("There should be a hadoop1/hadoop2 metrics source", rsm.getMetricsSource()); + assertNotNull("The RegionServerMetricsWrapper should be accessable", + rsm.getRegionServerWrapper()); } @Test public void testSlowCount() { - for (int i=0; i < 12; i ++) { + for (int i = 0; i < 12; i++) { rsm.updateAppend(null, 12); rsm.updateAppend(null, 1002); } - for (int i=0; i < 13; i ++) { + for (int i = 0; i < 13; i++) { rsm.updateDeleteBatch(null, 13); rsm.updateDeleteBatch(null, 1003); } - for (int i=0; i < 14; i ++) { + for (int i = 0; i < 14; i++) { rsm.updateGet(null, 14); rsm.updateGet(null, 1004); } - for (int i=0; i < 15; i ++) { + for (int i = 0; i < 15; i++) { rsm.updateIncrement(null, 15); rsm.updateIncrement(null, 1005); } - for (int i=0; i < 16; i ++) { + for (int i = 0; i < 16; i++) { rsm.updatePutBatch(null, 16); rsm.updatePutBatch(null, 1006); } - for (int i=0; i < 17; i ++) { + for (int i = 0; i < 17; i++) { rsm.updatePut(null, 17); rsm.updateDelete(null, 17); rsm.updatePut(null, 1006); @@ -280,4 +281,3 @@ public void testTableQueryMeterSwitch() { HELPER.assertGauge("ServerWriteQueryPerSecond_count", 500L, serverSource); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java index 98a412cf59bf..9e108422a32f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; @@ -43,12 +44,12 @@ public class TestMetricsTableAggregate { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMetricsTableAggregate.class); + HBaseClassTestRule.forClass(TestMetricsTableAggregate.class); private static final Logger LOG = LoggerFactory.getLogger(TestMetricsTableAggregate.class); private static MetricsAssertHelper HELPER = - CompatibilityFactory.getInstance(MetricsAssertHelper.class); + CompatibilityFactory.getInstance(MetricsAssertHelper.class); private String tableName = "testTableMetrics"; private String pre = "Namespace_default_table_" + tableName + "_metric_"; @@ -170,8 +171,8 @@ public void testConcurrentUpdate() throws InterruptedException { AtomicBoolean succ = new AtomicBoolean(true); CyclicBarrier barrier = new CyclicBarrier(threadNumber); Thread[] threads = IntStream.range(0, threadNumber) - .mapToObj(i -> new Thread(() -> update(succ, round, barrier), "Update-Worker-" + i)) - .toArray(Thread[]::new); + .mapToObj(i -> new Thread(() -> update(succ, round, barrier), "Update-Worker-" + i)) + .toArray(Thread[]::new); Stream.of(threads).forEach(Thread::start); for (Thread t : threads) { t.join(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableLatencies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableLatencies.java index 4ee847a36ae7..69fc9ee8ab81 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableLatencies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableLatencies.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; @@ -34,7 +33,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestMetricsTableLatencies { @ClassRule @@ -48,78 +47,76 @@ public class TestMetricsTableLatencies { public void testTableWrapperAggregateMetrics() throws IOException { TableName tn1 = TableName.valueOf("table1"); TableName tn2 = TableName.valueOf("table2"); - MetricsTableLatencies latencies = CompatibilitySingletonFactory.getInstance( - MetricsTableLatencies.class); + MetricsTableLatencies latencies = + CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class); assertTrue("'latencies' is actually " + latencies.getClass(), - latencies instanceof MetricsTableLatenciesImpl); + latencies instanceof MetricsTableLatenciesImpl); MetricsTableLatenciesImpl latenciesImpl = (MetricsTableLatenciesImpl) latencies; RegionServerTableMetrics tableMetrics = new RegionServerTableMetrics(false); // Metrics to each table should be disjoint // N.B. each call to assertGauge removes all previously acquired metrics so we have to - // make the metrics call and then immediately verify it. Trying to do multiple metrics - // updates followed by multiple verifications will fail on the 2nd verification (as the - // first verification cleaned the data structures in MetricsAssertHelperImpl). + // make the metrics call and then immediately verify it. Trying to do multiple metrics + // updates followed by multiple verifications will fail on the 2nd verification (as the + // first verification cleaned the data structures in MetricsAssertHelperImpl). tableMetrics.updateGet(tn1, 500L); - HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName( - tn1, MetricsTableLatencies.GET_TIME + "_" + "999th_percentile"), 500L, latenciesImpl); + HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(tn1, + MetricsTableLatencies.GET_TIME + "_" + "999th_percentile"), 500L, latenciesImpl); tableMetrics.updatePut(tn1, 50L); - HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName( - tn1, MetricsTableLatencies.PUT_TIME + "_" + "99th_percentile"), 50L, latenciesImpl); + HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(tn1, + MetricsTableLatencies.PUT_TIME + "_" + "99th_percentile"), 50L, latenciesImpl); tableMetrics.updateGet(tn2, 300L); - HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName( - tn2, MetricsTableLatencies.GET_TIME + "_" + "999th_percentile"), 300L, latenciesImpl); + HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(tn2, + MetricsTableLatencies.GET_TIME + "_" + "999th_percentile"), 300L, latenciesImpl); tableMetrics.updatePut(tn2, 75L); - HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName( - tn2, MetricsTableLatencies.PUT_TIME + "_" + "99th_percentile"), 75L, latenciesImpl); + HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName(tn2, + MetricsTableLatencies.PUT_TIME + "_" + "99th_percentile"), 75L, latenciesImpl); } @Test public void testTableQueryMeterSwitch() { TableName tn1 = TableName.valueOf("table1"); - MetricsTableLatencies latencies = CompatibilitySingletonFactory.getInstance( - MetricsTableLatencies.class); + MetricsTableLatencies latencies = + CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class); assertTrue("'latencies' is actually " + latencies.getClass(), latencies instanceof MetricsTableLatenciesImpl); MetricsTableLatenciesImpl latenciesImpl = (MetricsTableLatenciesImpl) latencies; Configuration conf = new Configuration(); - boolean enableTableQueryMeter = conf.getBoolean( - MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY, - MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT); + boolean enableTableQueryMeter = + conf.getBoolean(MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY, + MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT); // disable assertFalse(enableTableQueryMeter); RegionServerTableMetrics tableMetrics = new RegionServerTableMetrics(enableTableQueryMeter); tableMetrics.updateTableReadQueryMeter(tn1, 500L); - assertFalse(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName( - tn1, MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"), - latenciesImpl)); + assertFalse(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(tn1, + MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"), latenciesImpl)); tableMetrics.updateTableWriteQueryMeter(tn1, 500L); - assertFalse(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName( - tn1, MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"), - latenciesImpl)); + assertFalse(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(tn1, + MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"), latenciesImpl)); // enable conf.setBoolean(MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY, true); - enableTableQueryMeter = conf.getBoolean( - MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY, - MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT); + enableTableQueryMeter = + conf.getBoolean(MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY, + MetricsRegionServer.RS_ENABLE_TABLE_QUERY_METER_METRICS_KEY_DEFAULT); assertTrue(enableTableQueryMeter); tableMetrics = new RegionServerTableMetrics(true); tableMetrics.updateTableReadQueryMeter(tn1, 500L); - assertTrue(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName( - tn1, MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"), - latenciesImpl)); - HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName( - tn1, MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"), + assertTrue(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(tn1, + MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"), latenciesImpl)); + HELPER.assertGauge( + MetricsTableLatenciesImpl.qualifyMetricsName(tn1, + MetricsTableQueryMeterImpl.TABLE_READ_QUERY_PER_SECOND + "_" + "count"), 500L, latenciesImpl); tableMetrics.updateTableWriteQueryMeter(tn1, 500L); - assertTrue(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName( - tn1, MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"), - latenciesImpl)); - HELPER.assertGauge(MetricsTableLatenciesImpl.qualifyMetricsName( - tn1, MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"), + assertTrue(HELPER.checkGaugeExists(MetricsTableLatenciesImpl.qualifyMetricsName(tn1, + MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"), latenciesImpl)); + HELPER.assertGauge( + MetricsTableLatenciesImpl.qualifyMetricsName(tn1, + MetricsTableQueryMeterImpl.TABLE_WRITE_QUERY_PER_SECOND + "_" + "count"), 500L, latenciesImpl); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserAggregate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserAggregate.java index 501ca726cd31..27ff1940ea1d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserAggregate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserAggregate.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.security.PrivilegedAction; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompatibilityFactory; @@ -36,10 +36,11 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestMetricsUserAggregate { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetricsUserAggregate.class); private static MetricsAssertHelper HELPER = @@ -59,30 +60,30 @@ public static void classSetUp() { public void setUp() { wrapper = new MetricsRegionServerWrapperStub(); Configuration conf = HBaseConfiguration.create(); - rsm = new MetricsRegionServer(wrapper,conf , null); - userAgg = (MetricsUserAggregate)rsm.getMetricsUserAggregate(); + rsm = new MetricsRegionServer(wrapper, conf, null); + userAgg = (MetricsUserAggregate) rsm.getMetricsUserAggregate(); } private void doOperations() { - for (int i=0; i < 10; i ++) { - rsm.updateGet(tableName,10); + for (int i = 0; i < 10; i++) { + rsm.updateGet(tableName, 10); } - for (int i=0; i < 11; i ++) { - rsm.updateScanTime(tableName,11); + for (int i = 0; i < 11; i++) { + rsm.updateScanTime(tableName, 11); } - for (int i=0; i < 12; i ++) { - rsm.updatePut(tableName,12); + for (int i = 0; i < 12; i++) { + rsm.updatePut(tableName, 12); } - for (int i=0; i < 13; i ++) { - rsm.updateDelete(tableName,13); + for (int i = 0; i < 13; i++) { + rsm.updateDelete(tableName, 13); } - for (int i=0; i < 14; i ++) { - rsm.updateIncrement(tableName,14); + for (int i = 0; i < 14; i++) { + rsm.updateIncrement(tableName, 14); } - for (int i=0; i < 15; i ++) { - rsm.updateAppend(tableName,15); + for (int i = 0; i < 15; i++) { + rsm.updateAppend(tableName, 15); } - for (int i=0; i < 16; i ++) { + for (int i = 0; i < 16; i++) { rsm.updateReplay(16); } } @@ -90,7 +91,7 @@ private void doOperations() { @Test public void testPerUserOperations() { Configuration conf = HBaseConfiguration.create(); - // If metrics for users is not enabled, this test doesn't make sense. + // If metrics for users is not enabled, this test doesn't make sense. if (!conf.getBoolean(MetricsUserAggregateFactory.METRIC_USER_ENABLED_CONF, MetricsUserAggregateFactory.DEFAULT_METRIC_USER_ENABLED_CONF)) { return; @@ -131,9 +132,10 @@ public Void run() { HELPER.assertCounter("userbarmetricreplaynumops", 16, userAgg.getSource()); } - @Test public void testLossyCountingOfUserMetrics() { + @Test + public void testLossyCountingOfUserMetrics() { Configuration conf = HBaseConfiguration.create(); - // If metrics for users is not enabled, this test doesn't make sense. + // If metrics for users is not enabled, this test doesn't make sense. if (!conf.getBoolean(MetricsUserAggregateFactory.METRIC_USER_ENABLED_CONF, MetricsUserAggregateFactory.DEFAULT_METRIC_USER_ENABLED_CONF)) { return; @@ -142,18 +144,18 @@ public Void run() { for (int i = 1; i <= noOfUsers; i++) { User.createUserForTesting(conf, "FOO" + i, new String[0]).getUGI() .doAs(new PrivilegedAction() { - @Override public Void run() { + @Override + public Void run() { rsm.updateGet(tableName, 10); return null; } }); } - assertTrue( - ((MetricsUserAggregateSourceImpl) userAgg.getSource()).getUserSources().size() <= (noOfUsers - / 10)); + assertTrue(((MetricsUserAggregateSourceImpl) userAgg.getSource()).getUserSources() + .size() <= (noOfUsers / 10)); for (int i = 1; i <= noOfUsers / 10; i++) { assertFalse( - HELPER.checkCounterExists("userfoo" + i + "metricgetnumops", userAgg.getSource())); + HELPER.checkCounterExists("userfoo" + i + "metricgetnumops", userAgg.getSource())); } HELPER.assertCounter("userfoo" + noOfUsers + "metricgetnumops", 1, userAgg.getSource()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java index 089397313fec..c2832c5d28cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ /** * Test Minimum Versions feature (HBASE-4071). */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestMinVersions { @ClassRule @@ -71,7 +71,8 @@ public class TestMinVersions { private final byte[] c0 = COLUMNS[0]; - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); /** * Verify behavior of getClosestBefore(...) @@ -79,13 +80,11 @@ public class TestMinVersions { @Test public void testGetClosestBefore() throws Exception { - ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(c0) - .setMinVersions(1).setMaxVersions(1000).setTimeToLive(1). - setKeepDeletedCells(KeepDeletedCells.FALSE).build(); + ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(c0).setMinVersions(1) + .setMaxVersions(1000).setTimeToLive(1).setKeepDeletedCells(KeepDeletedCells.FALSE).build(); - TableDescriptor htd = TableDescriptorBuilder. - newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(cfd).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(cfd).build(); HRegion region = hbu.createLocalHRegion(htd, null, null); try { @@ -96,7 +95,7 @@ public void testGetClosestBefore() throws Exception { p.addColumn(c0, c0, T1); region.put(p); - p = new Put(T1, ts+1); + p = new Put(T1, ts + 1); p.addColumn(c0, c0, T4); region.put(p); @@ -128,31 +127,28 @@ public void testGetClosestBefore() throws Exception { } /** - * Test mixed memstore and storefile scanning - * with minimum versions. + * Test mixed memstore and storefile scanning with minimum versions. */ @Test public void testStoreMemStore() throws Exception { // keep 3 versions minimum - ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(c0) - .setMinVersions(3).setMaxVersions(1000).setTimeToLive(1). - setKeepDeletedCells(KeepDeletedCells.FALSE).build(); + ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(c0).setMinVersions(3) + .setMaxVersions(1000).setTimeToLive(1).setKeepDeletedCells(KeepDeletedCells.FALSE).build(); - TableDescriptor htd = TableDescriptorBuilder. - newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(cfd).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(cfd).build(); HRegion region = hbu.createLocalHRegion(htd, null, null); // 2s in the past long ts = EnvironmentEdgeManager.currentTime() - 2000; try { - Put p = new Put(T1, ts-1); + Put p = new Put(T1, ts - 1); p.addColumn(c0, c0, T2); region.put(p); - p = new Put(T1, ts-3); + p = new Put(T1, ts - 3); p.addColumn(c0, c0, T0); region.put(p); @@ -164,11 +160,11 @@ public void testStoreMemStore() throws Exception { p.addColumn(c0, c0, T3); region.put(p); - p = new Put(T1, ts-2); + p = new Put(T1, ts - 2); p.addColumn(c0, c0, T1); region.put(p); - p = new Put(T1, ts-3); + p = new Put(T1, ts - 3); p.addColumn(c0, c0, T0); region.put(p); @@ -179,13 +175,13 @@ public void testStoreMemStore() throws Exception { Get g = new Get(T1); g.readAllVersions(); Result r = region.get(g); // this'll use ScanWildcardColumnTracker - checkResult(r, c0, T3,T2,T1); + checkResult(r, c0, T3, T2, T1); g = new Get(T1); g.readAllVersions(); g.addColumn(c0, c0); - r = region.get(g); // this'll use ExplicitColumnTracker - checkResult(r, c0, T3,T2,T1); + r = region.get(g); // this'll use ExplicitColumnTracker + checkResult(r, c0, T3, T2, T1); } finally { HBaseTestingUtil.closeRegionAndWAL(region); } @@ -196,13 +192,11 @@ public void testStoreMemStore() throws Exception { */ @Test public void testDelete() throws Exception { - ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(c0) - .setMinVersions(3).setMaxVersions(1000).setTimeToLive(1). - setKeepDeletedCells(KeepDeletedCells.FALSE).build(); + ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(c0).setMinVersions(3) + .setMaxVersions(1000).setTimeToLive(1).setKeepDeletedCells(KeepDeletedCells.FALSE).build(); - TableDescriptor htd = TableDescriptorBuilder. - newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(cfd).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(cfd).build(); HRegion region = hbu.createLocalHRegion(htd, null, null); @@ -210,11 +204,11 @@ public void testDelete() throws Exception { long ts = EnvironmentEdgeManager.currentTime() - 2000; try { - Put p = new Put(T1, ts-2); + Put p = new Put(T1, ts - 2); p.addColumn(c0, c0, T1); region.put(p); - p = new Put(T1, ts-1); + p = new Put(T1, ts - 1); p.addColumn(c0, c0, T2); region.put(p); @@ -222,18 +216,18 @@ public void testDelete() throws Exception { p.addColumn(c0, c0, T3); region.put(p); - Delete d = new Delete(T1, ts-1); + Delete d = new Delete(T1, ts - 1); region.delete(d); Get g = new Get(T1); g.readAllVersions(); - Result r = region.get(g); // this'll use ScanWildcardColumnTracker + Result r = region.get(g); // this'll use ScanWildcardColumnTracker checkResult(r, c0, T3); g = new Get(T1); g.readAllVersions(); g.addColumn(c0, c0); - r = region.get(g); // this'll use ExplicitColumnTracker + r = region.get(g); // this'll use ExplicitColumnTracker checkResult(r, c0, T3); // now flush/compact @@ -243,13 +237,13 @@ public void testDelete() throws Exception { // try again g = new Get(T1); g.readAllVersions(); - r = region.get(g); // this'll use ScanWildcardColumnTracker + r = region.get(g); // this'll use ScanWildcardColumnTracker checkResult(r, c0, T3); g = new Get(T1); g.readAllVersions(); g.addColumn(c0, c0); - r = region.get(g); // this'll use ExplicitColumnTracker + r = region.get(g); // this'll use ExplicitColumnTracker checkResult(r, c0, T3); } finally { HBaseTestingUtil.closeRegionAndWAL(region); @@ -261,13 +255,11 @@ public void testDelete() throws Exception { */ @Test public void testMemStore() throws Exception { - ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(c0) - .setMinVersions(2).setMaxVersions(1000).setTimeToLive(1). - setKeepDeletedCells(KeepDeletedCells.FALSE).build(); + ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(c0).setMinVersions(2) + .setMaxVersions(1000).setTimeToLive(1).setKeepDeletedCells(KeepDeletedCells.FALSE).build(); - TableDescriptor htd = TableDescriptorBuilder. - newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(cfd).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(cfd).build(); HRegion region = hbu.createLocalHRegion(htd, null, null); // 2s in the past @@ -275,12 +267,12 @@ public void testMemStore() throws Exception { try { // 2nd version - Put p = new Put(T1, ts-2); + Put p = new Put(T1, ts - 2); p.addColumn(c0, c0, T2); region.put(p); // 3rd version - p = new Put(T1, ts-1); + p = new Put(T1, ts - 1); p.addColumn(c0, c0, T3); region.put(p); @@ -294,7 +286,7 @@ public void testMemStore() throws Exception { region.compact(true); // now put the first version (backdated) - p = new Put(T1, ts-3); + p = new Put(T1, ts - 3); p.addColumn(c0, c0, T1); region.put(p); @@ -307,15 +299,15 @@ public void testMemStore() throws Exception { Get g = new Get(T1); g.readAllVersions(); r = region.get(g); // this'll use ScanWildcardColumnTracker - checkResult(r, c0, T4,T3); + checkResult(r, c0, T4, T3); g = new Get(T1); g.readAllVersions(); g.addColumn(c0, c0); - r = region.get(g); // this'll use ExplicitColumnTracker - checkResult(r, c0, T4,T3); + r = region.get(g); // this'll use ExplicitColumnTracker + checkResult(r, c0, T4, T3); - p = new Put(T1, ts+1); + p = new Put(T1, ts + 1); p.addColumn(c0, c0, T5); region.put(p); @@ -323,14 +315,14 @@ public void testMemStore() throws Exception { g = new Get(T1); g.readAllVersions(); - r = region.get(g); // this'll use ScanWildcardColumnTracker - checkResult(r, c0, T5,T4); + r = region.get(g); // this'll use ScanWildcardColumnTracker + checkResult(r, c0, T5, T4); g = new Get(T1); g.readAllVersions(); g.addColumn(c0, c0); - r = region.get(g); // this'll use ExplicitColumnTracker - checkResult(r, c0, T5,T4); + r = region.get(g); // this'll use ExplicitColumnTracker + checkResult(r, c0, T5, T4); } finally { HBaseTestingUtil.closeRegionAndWAL(region); } @@ -342,31 +334,29 @@ public void testMemStore() throws Exception { @Test public void testBaseCase() throws Exception { // 2 version minimum, 1000 versions maximum, ttl = 1s - ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(c0) - .setMinVersions(2).setMaxVersions(1000).setTimeToLive(1). - setKeepDeletedCells(KeepDeletedCells.FALSE).build(); + ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(c0).setMinVersions(2) + .setMaxVersions(1000).setTimeToLive(1).setKeepDeletedCells(KeepDeletedCells.FALSE).build(); - TableDescriptor htd = TableDescriptorBuilder. - newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(cfd).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(cfd).build(); HRegion region = hbu.createLocalHRegion(htd, null, null); try { // 2s in the past long ts = EnvironmentEdgeManager.currentTime() - 2000; - // 1st version - Put p = new Put(T1, ts-3); + // 1st version + Put p = new Put(T1, ts - 3); p.addColumn(c0, c0, T1); region.put(p); // 2nd version - p = new Put(T1, ts-2); + p = new Put(T1, ts - 2); p.addColumn(c0, c0, T2); region.put(p); // 3rd version - p = new Put(T1, ts-1); + p = new Put(T1, ts - 1); p.addColumn(c0, c0, T3); region.put(p); @@ -379,12 +369,12 @@ public void testBaseCase() throws Exception { checkResult(r, c0, T4); Get g = new Get(T1); - g.setTimeRange(0L, ts+1); + g.setTimeRange(0L, ts + 1); r = region.get(g); checkResult(r, c0, T4); - // oldest version still exists - g.setTimeRange(0L, ts-2); + // oldest version still exists + g.setTimeRange(0L, ts - 2); r = region.get(g); checkResult(r, c0, T1); @@ -393,20 +383,20 @@ public void testBaseCase() throws Exception { g = new Get(T1); g.readAllVersions(); r = region.get(g); // this'll use ScanWildcardColumnTracker - checkResult(r, c0, T4,T3); + checkResult(r, c0, T4, T3); g = new Get(T1); g.readAllVersions(); g.addColumn(c0, c0); - r = region.get(g); // this'll use ExplicitColumnTracker - checkResult(r, c0, T4,T3); + r = region.get(g); // this'll use ExplicitColumnTracker + checkResult(r, c0, T4, T3); // now flush region.flush(true); // with HBASE-4241 a flush will eliminate the expired rows g = new Get(T1); - g.setTimeRange(0L, ts-2); + g.setTimeRange(0L, ts - 2); r = region.get(g); assertTrue(r.isEmpty()); @@ -415,7 +405,7 @@ public void testBaseCase() throws Exception { // after compaction the 4th version is still available g = new Get(T1); - g.setTimeRange(0L, ts+1); + g.setTimeRange(0L, ts + 1); r = region.get(g); checkResult(r, c0, T4); @@ -425,7 +415,7 @@ public void testBaseCase() throws Exception { checkResult(r, c0, T3); // but the 2nd and earlier versions are gone - g.setTimeRange(0L, ts-1); + g.setTimeRange(0L, ts - 1); r = region.get(g); assertTrue(r.isEmpty()); } finally { @@ -434,44 +424,39 @@ public void testBaseCase() throws Exception { } /** - * Verify that basic filters still behave correctly with - * minimum versions enabled. + * Verify that basic filters still behave correctly with minimum versions enabled. */ @Test public void testFilters() throws Exception { - final byte [] c1 = COLUMNS[1]; - ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(c0) - .setMinVersions(2).setMaxVersions(1000).setTimeToLive(1). - setKeepDeletedCells(KeepDeletedCells.FALSE).build(); - - ColumnFamilyDescriptor cfd2 = - ColumnFamilyDescriptorBuilder.newBuilder(c1) - .setMinVersions(2).setMaxVersions(1000).setTimeToLive(1). - setKeepDeletedCells(KeepDeletedCells.FALSE).build(); + final byte[] c1 = COLUMNS[1]; + ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(c0).setMinVersions(2) + .setMaxVersions(1000).setTimeToLive(1).setKeepDeletedCells(KeepDeletedCells.FALSE).build(); + + ColumnFamilyDescriptor cfd2 = ColumnFamilyDescriptorBuilder.newBuilder(c1).setMinVersions(2) + .setMaxVersions(1000).setTimeToLive(1).setKeepDeletedCells(KeepDeletedCells.FALSE).build(); List cfdList = new ArrayList(); cfdList.add(cfd); cfdList.add(cfd2); - TableDescriptor htd = TableDescriptorBuilder. - newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamilies(cfdList).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamilies(cfdList).build(); HRegion region = hbu.createLocalHRegion(htd, null, null); // 2s in the past long ts = EnvironmentEdgeManager.currentTime() - 2000; try { - Put p = new Put(T1, ts-3); + Put p = new Put(T1, ts - 3); p.addColumn(c0, c0, T0); p.addColumn(c1, c1, T0); region.put(p); - p = new Put(T1, ts-2); + p = new Put(T1, ts - 2); p.addColumn(c0, c0, T1); p.addColumn(c1, c1, T1); region.put(p); - p = new Put(T1, ts-1); + p = new Put(T1, ts - 1); p.addColumn(c0, c0, T2); p.addColumn(c1, c1, T2); region.put(p); @@ -482,12 +467,12 @@ public void testFilters() throws Exception { region.put(p); List tss = new ArrayList<>(); - tss.add(ts-1); - tss.add(ts-2); + tss.add(ts - 1); + tss.add(ts - 2); // Sholud only get T2, versions is 2, so T1 is gone from user view. Get g = new Get(T1); - g.addColumn(c1,c1); + g.addColumn(c1, c1); g.setFilter(new TimestampsFilter(tss)); g.readAllVersions(); Result r = region.get(g); @@ -495,7 +480,7 @@ public void testFilters() throws Exception { // Sholud only get T2, versions is 2, so T1 is gone from user view. g = new Get(T1); - g.addColumn(c0,c0); + g.addColumn(c0, c0); g.setFilter(new TimestampsFilter(tss)); g.readAllVersions(); r = region.get(g); @@ -507,7 +492,7 @@ public void testFilters() throws Exception { // After flush/compact, the result should be consistent with previous result g = new Get(T1); - g.addColumn(c1,c1); + g.addColumn(c1, c1); g.setFilter(new TimestampsFilter(tss)); g.readAllVersions(); r = region.get(g); @@ -515,7 +500,7 @@ public void testFilters() throws Exception { // After flush/compact, the result should be consistent with previous result g = new Get(T1); - g.addColumn(c0,c0); + g.addColumn(c0, c0); g.setFilter(new TimestampsFilter(tss)); g.readAllVersions(); r = region.get(g); @@ -529,23 +514,19 @@ public void testFilters() throws Exception { public void testMinVersionsWithKeepDeletedCellsTTL() throws Exception { int ttl = 4; ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(c0) - .setVersionsWithTimeToLive(ttl, 2).build(); + ColumnFamilyDescriptorBuilder.newBuilder(c0).setVersionsWithTimeToLive(ttl, 2).build(); verifyVersionedCellKeyValues(ttl, cfd); - cfd = ColumnFamilyDescriptorBuilder.newBuilder(c0) - .setMinVersions(2) - .setMaxVersions(Integer.MAX_VALUE) - .setTimeToLive(ttl) - .setKeepDeletedCells(KeepDeletedCells.TTL) - .build(); + cfd = ColumnFamilyDescriptorBuilder.newBuilder(c0).setMinVersions(2) + .setMaxVersions(Integer.MAX_VALUE).setTimeToLive(ttl) + .setKeepDeletedCells(KeepDeletedCells.TTL).build(); verifyVersionedCellKeyValues(ttl, cfd); } private void verifyVersionedCellKeyValues(int ttl, ColumnFamilyDescriptor cfd) throws IOException { - TableDescriptor htd = TableDescriptorBuilder. - newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(cfd).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(cfd).build(); HRegion region = hbu.createLocalHRegion(htd, null, null); @@ -561,7 +542,7 @@ private void verifyVersionedCellKeyValues(int ttl, ColumnFamilyDescriptor cfd) Get get; Result result; - //check we can still see all versions before compaction + // check we can still see all versions before compaction get = new Get(T1); get.readAllVersions(); get.setTimeRange(0, ts); @@ -573,7 +554,7 @@ private void verifyVersionedCellKeyValues(int ttl, ColumnFamilyDescriptor cfd) Assert.assertEquals(startTS, EnvironmentEdgeManager.currentTime()); long expiredTime = EnvironmentEdgeManager.currentTime() - ts - 4; Assert.assertTrue("TTL for T1 has expired", expiredTime < (ttl * 1000)); - //check that nothing was purged yet + // check that nothing was purged yet verifyBeforeCompaction(region, ts); injectEdge.incValue(ttl * 1000); @@ -589,7 +570,7 @@ private void verifyVersionedCellKeyValues(int ttl, ColumnFamilyDescriptor cfd) private void verifyAfterTtl(HRegion region, long ts) throws IOException { Get get; Result result; - //check that after compaction (which is after TTL) that only T1 && T2 were purged + // check that after compaction (which is after TTL) that only T1 && T2 were purged get = new Get(T1); get.readAllVersions(); get.setTimeRange(0, ts); @@ -671,17 +652,16 @@ private void putFourVersions(HRegion region, long ts) throws IOException { region.put(put); } - private void checkResult(Result r, byte[] col, byte[] ... vals) { + private void checkResult(Result r, byte[] col, byte[]... vals) { assertEquals(vals.length, r.size()); List kvs = r.getColumnCells(col, col); assertEquals(kvs.size(), vals.length); - for (int i=0;i(new Put(Bytes.toBytes(i)), null); } MiniBatchOperationInProgress> miniBatch = - new MiniBatchOperationInProgress<>(operations, retCodeDetails, - walEditsFromCoprocessors, 0, 5, 5); + new MiniBatchOperationInProgress<>(operations, retCodeDetails, walEditsFromCoprocessors, 0, + 5, 5); assertEquals(5, miniBatch.size()); assertTrue(Bytes.equals(Bytes.toBytes(0), miniBatch.getOperation(0).getFirst().getRow())); @@ -74,8 +74,8 @@ public void testMiniBatchOperationInProgressMethods() { } catch (ArrayIndexOutOfBoundsException e) { } - miniBatch = new MiniBatchOperationInProgress<>(operations, - retCodeDetails, walEditsFromCoprocessors, 7, 10, 3); + miniBatch = new MiniBatchOperationInProgress<>(operations, retCodeDetails, + walEditsFromCoprocessors, 7, 10, 3); try { miniBatch.setWalEdit(-1, new WALEdit()); fail("Should throw Exception while accessing out of range"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java index 56e7cb9432bf..84880133bc6c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ /** * Test minor compactions */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestMinorCompaction { @ClassRule @@ -86,11 +86,11 @@ public MyCompactionPolicy(Configuration conf, StoreConfigInformation storeConfig @Override public CompactionRequestImpl selectCompaction(Collection candidateFiles, - List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, - boolean forceMajor) throws IOException { + List filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak, + boolean forceMajor) throws IOException { return new CompactionRequestImpl( - candidateFiles.stream().filter(f -> !filesCompacting.contains(f)) - .limit(COMPACTION_THRESHOLD).collect(Collectors.toList())); + candidateFiles.stream().filter(f -> !filesCompacting.contains(f)) + .limit(COMPACTION_THRESHOLD).collect(Collectors.toList())); } } @@ -109,7 +109,7 @@ public static void setUpBeforeClass() { SECOND_ROW_BYTES[START_KEY_BYTES.length - 1]++; THIRD_ROW_BYTES = START_KEY_BYTES.clone(); THIRD_ROW_BYTES[START_KEY_BYTES.length - 1] = - (byte) (THIRD_ROW_BYTES[START_KEY_BYTES.length - 1] + 2); + (byte) (THIRD_ROW_BYTES[START_KEY_BYTES.length - 1] + 2); COL1 = Bytes.toBytes("column1"); COL2 = Bytes.toBytes("column2"); } @@ -147,11 +147,11 @@ public void testMinorCompactionWithDeleteColumn1() throws Exception { public void testMinorCompactionWithDeleteColumn2() throws Exception { Delete dc = new Delete(SECOND_ROW_BYTES); dc.addColumn(fam2, COL2); - /* compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3. - * we only delete the latest version. One might expect to see only - * versions 1 and 2. HBase differs, and gives us 0, 1 and 2. - * This is okay as well. Since there was no compaction done before the - * delete, version 0 seems to stay on. + /* + * compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3. we only delete the latest + * version. One might expect to see only versions 1 and 2. HBase differs, and gives us 0, 1 and + * 2. This is okay as well. Since there was no compaction done before the delete, version 0 + * seems to stay on. */ testMinorCompactionWithDelete(dc, 3); } @@ -167,8 +167,9 @@ public void testMinorCompactionWithDeleteColumnFamily() throws Exception { public void testMinorCompactionWithDeleteVersion1() throws Exception { Delete deleteVersion = new Delete(SECOND_ROW_BYTES); deleteVersion.addColumns(fam2, COL2, 2); - /* compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3. - * We delete versions 0 ... 2. So, we still have one remaining. + /* + * compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3. We delete versions 0 ... + * 2. So, we still have one remaining. */ testMinorCompactionWithDelete(deleteVersion, 1); } @@ -178,18 +179,15 @@ public void testMinorCompactionWithDeleteVersion2() throws Exception { Delete deleteVersion = new Delete(SECOND_ROW_BYTES); deleteVersion.addColumn(fam2, COL2, 1); /* - * the table has 4 versions: 0, 1, 2, and 3. - * We delete 1. - * Should have 3 remaining. + * the table has 4 versions: 0, 1, 2, and 3. We delete 1. Should have 3 remaining. */ testMinorCompactionWithDelete(deleteVersion, 3); } /* - * A helper function to test the minor compaction algorithm. We check that - * the delete markers are left behind. Takes delete as an argument, which - * can be any delete (row, column, columnfamliy etc), that essentially - * deletes row2 and column2. row1 and column1 should be undeleted + * A helper function to test the minor compaction algorithm. We check that the delete markers are + * left behind. Takes delete as an argument, which can be any delete (row, column, columnfamliy + * etc), that essentially deletes row2 and column2. row1 and column1 should be undeleted */ private void testMinorCompactionWithDelete(Delete delete) throws Exception { testMinorCompactionWithDelete(delete, 0); @@ -215,8 +213,8 @@ private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAft result = r.get(new Get(SECOND_ROW_BYTES).addColumn(fam2, COL2).readVersions(100)); assertEquals(COMPACTION_THRESHOLD, result.size()); - // Now add deletes to memstore and then flush it. That will put us over - // the compaction threshold of 3 store files. Compacting these store files + // Now add deletes to memstore and then flush it. That will put us over + // the compaction threshold of 3 store files. Compacting these store files // should result in a compacted store file that has no references to the // deleted row. r.delete(delete); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java index f059884b28d2..e29d334fea8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,6 @@ import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellUtil; @@ -72,8 +71,7 @@ public abstract class TestMultiColumnScanner { private static final Logger LOG = LoggerFactory.getLogger(TestMultiColumnScanner.class); - private static final String TABLE_NAME = - TestMultiColumnScanner.class.getSimpleName(); + private static final String TABLE_NAME = TestMultiColumnScanner.class.getSimpleName(); static final int MAX_VERSIONS = 50; @@ -81,8 +79,8 @@ public abstract class TestMultiColumnScanner { private static final byte[] FAMILY_BYTES = Bytes.toBytes(FAMILY); /** - * The size of the column qualifier set used. Increasing this parameter - * exponentially increases test time. + * The size of the column qualifier set used. Increasing this parameter exponentially increases + * test time. */ private static final int NUM_COLUMNS = 8; @@ -94,11 +92,11 @@ public abstract class TestMultiColumnScanner { private static final long BIG_LONG = 9111222333444555666L; /** - * Timestamps to test with. Cannot use {@link Long#MAX_VALUE} here, because - * it will be replaced by an timestamp auto-generated based on the time. + * Timestamps to test with. Cannot use {@link Long#MAX_VALUE} here, because it will be replaced by + * an timestamp auto-generated based on the time. */ - private static final long[] TIMESTAMPS = new long[] { 1, 3, 5, - Integer.MAX_VALUE, BIG_LONG, Long.MAX_VALUE - 1 }; + private static final long[] TIMESTAMPS = + new long[] { 1, 3, 5, Integer.MAX_VALUE, BIG_LONG, Long.MAX_VALUE - 1 }; /** The probability that a column is skipped in a store file. */ private static final double COLUMN_SKIP_IN_STORE_FILE_PROB = 0.7; @@ -141,10 +139,10 @@ public static Collection generateParams(Compression.Algorithm algo, public void testMultiColumnScanner() throws IOException { TEST_UTIL.getConfiguration().setInt(BloomFilterUtil.PREFIX_LENGTH_KEY, 10); HRegion region = TEST_UTIL.createTestRegion(TABLE_NAME, - ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_BYTES).setCompressionType(comprAlgo) - .setBloomFilterType(bloomType).setMaxVersions(MAX_VERSIONS) - .setDataBlockEncoding(dataBlockEncoding).build(), - BlockCacheFactory.createBlockCache(TEST_UTIL.getConfiguration())); + ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_BYTES).setCompressionType(comprAlgo) + .setBloomFilterType(bloomType).setMaxVersions(MAX_VERSIONS) + .setDataBlockEncoding(dataBlockEncoding).build(), + BlockCacheFactory.createBlockCache(TEST_UTIL.getConfiguration())); List rows = sequentialStrings("row", NUM_ROWS); List qualifiers = sequentialStrings("qual", NUM_COLUMNS); List kvs = new ArrayList<>(); @@ -159,16 +157,14 @@ public void testMultiColumnScanner() throws IOException { for (String qual : qualifiers) { // This is where we decide to include or not include this column into // this store file, regardless of row and timestamp. - if (rand.nextDouble() < COLUMN_SKIP_IN_STORE_FILE_PROB) - continue; + if (rand.nextDouble() < COLUMN_SKIP_IN_STORE_FILE_PROB) continue; byte[] qualBytes = Bytes.toBytes(qual); for (String row : rows) { Put p = new Put(Bytes.toBytes(row)); for (long ts : TIMESTAMPS) { String value = createValue(row, qual, ts); - KeyValue kv = KeyValueTestUtil.create(row, FAMILY, qual, ts, - value); + KeyValue kv = KeyValueTestUtil.create(row, FAMILY, qual, ts, value); assertEquals(kv.getTimestamp(), ts); p.add(kv); String keyAsString = kv.toString(); @@ -186,12 +182,10 @@ public void testMultiColumnScanner() throws IOException { d.addColumns(FAMILY_BYTES, qualBytes, ts); String rowAndQual = row + "_" + qual; Long whenDeleted = lastDelTimeMap.get(rowAndQual); - lastDelTimeMap.put(rowAndQual, whenDeleted == null ? ts - : Math.max(ts, whenDeleted)); + lastDelTimeMap.put(rowAndQual, whenDeleted == null ? ts : Math.max(ts, whenDeleted)); deletedSomething = true; } - if (deletedSomething) - region.delete(d); + if (deletedSomething) region.delete(d); } } region.flush(true); @@ -220,25 +214,25 @@ public void testMultiColumnScanner() throws IOException { int kvPos = 0; int numResults = 0; - String queryInfo = "columns queried: " + qualSet + " (columnBitMask=" - + columnBitMask + "), maxVersions=" + maxVersions; + String queryInfo = "columns queried: " + qualSet + " (columnBitMask=" + columnBitMask + + "), maxVersions=" + maxVersions; while (scanner.next(results) || results.size() > 0) { for (Cell kv : results) { while (kvPos < kvs.size() - && !matchesQuery(kvs.get(kvPos), qualSet, maxVersions, - lastDelTimeMap)) { + && !matchesQuery(kvs.get(kvPos), qualSet, maxVersions, lastDelTimeMap)) { ++kvPos; } String rowQual = getRowQualStr(kv); String deleteInfo = ""; Long lastDelTS = lastDelTimeMap.get(rowQual); if (lastDelTS != null) { - deleteInfo = "; last timestamp when row/column " + rowQual - + " was deleted: " + lastDelTS; + deleteInfo = + "; last timestamp when row/column " + rowQual + " was deleted: " + lastDelTS; } - assertTrue("Scanner returned additional key/value: " + kv + ", " - + queryInfo + deleteInfo + ";", kvPos < kvs.size()); + assertTrue( + "Scanner returned additional key/value: " + kv + ", " + queryInfo + deleteInfo + ";", + kvPos < kvs.size()); assertTrue("Scanner returned wrong key/value; " + queryInfo + deleteInfo + ";", PrivateCellUtil.equalsIgnoreMvccVersion(kvs.get(kvPos), (kv))); ++kvPos; @@ -248,17 +242,16 @@ public void testMultiColumnScanner() throws IOException { } for (; kvPos < kvs.size(); ++kvPos) { KeyValue remainingKV = kvs.get(kvPos); - assertFalse("Matching column not returned by scanner: " - + remainingKV + ", " + queryInfo + ", results returned: " - + numResults, matchesQuery(remainingKV, qualSet, maxVersions, - lastDelTimeMap)); + assertFalse( + "Matching column not returned by scanner: " + remainingKV + ", " + queryInfo + + ", results returned: " + numResults, + matchesQuery(remainingKV, qualSet, maxVersions, lastDelTimeMap)); } } } - assertTrue("This test is supposed to delete at least some row/column " + - "pairs", lastDelTimeMap.size() > 0); - LOG.info("Number of row/col pairs deleted at least once: " + - lastDelTimeMap.size()); + assertTrue("This test is supposed to delete at least some row/column " + "pairs", + lastDelTimeMap.size() > 0); + LOG.info("Number of row/col pairs deleted at least once: " + lastDelTimeMap.size()); HBaseTestingUtil.closeRegionAndWAL(region); } @@ -268,18 +261,16 @@ private static String getRowQualStr(Cell kv) { return rowStr + "_" + qualStr; } - private static boolean matchesQuery(KeyValue kv, Set qualSet, - int maxVersions, Map lastDelTimeMap) { + private static boolean matchesQuery(KeyValue kv, Set qualSet, int maxVersions, + Map lastDelTimeMap) { Long lastDelTS = lastDelTimeMap.get(getRowQualStr(kv)); long ts = kv.getTimestamp(); - return qualSet.contains(qualStr(kv)) - && ts >= TIMESTAMPS[TIMESTAMPS.length - maxVersions] + return qualSet.contains(qualStr(kv)) && ts >= TIMESTAMPS[TIMESTAMPS.length - maxVersions] && (lastDelTS == null || ts > lastDelTS); } private static String qualStr(KeyValue kv) { - return Bytes.toString(kv.getQualifierArray(), kv.getQualifierOffset(), - kv.getQualifierLength()); + return Bytes.toString(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); } static String createValue(String row, String qual, long ts) { @@ -304,4 +295,3 @@ private static List sequentialStrings(String prefix, int n) { return lst; } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithAlgoGZAndNoDataEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithAlgoGZAndNoDataEncoding.java index cc68c1153903..61d79907d538 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithAlgoGZAndNoDataEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithAlgoGZAndNoDataEncoding.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.util.Collection; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.testclassification.LargeTests; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithAlgoGZAndUseDataEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithAlgoGZAndUseDataEncoding.java index c817da257ecd..940d1c1c429b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithAlgoGZAndUseDataEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithAlgoGZAndUseDataEncoding.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.util.Collection; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.testclassification.LargeTests; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithNoneAndNoDataEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithNoneAndNoDataEncoding.java index 4f6aa90b8c5c..f0a21f4de5fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithNoneAndNoDataEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithNoneAndNoDataEncoding.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.util.Collection; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.testclassification.LargeTests; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithNoneAndUseDataEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithNoneAndUseDataEncoding.java index f1fd30d41cfc..fa66bb9710c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithNoneAndUseDataEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScannerWithNoneAndUseDataEncoding.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.util.Collection; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.testclassification.LargeTests; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java index defea10ebd9f..98c1f7601e63 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java @@ -68,7 +68,7 @@ public class TestMultiLogThreshold { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMultiLogThreshold.class); + HBaseClassTestRule.forClass(TestMultiLogThreshold.class); private static final TableName NAME = TableName.valueOf("tableName"); private static final byte[] TEST_FAM = Bytes.toBytes("fam"); @@ -109,7 +109,7 @@ public void setupTest() throws Exception { util = new HBaseTestingUtil(); conf = util.getConfiguration(); threshold = - conf.getInt(HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); + conf.getInt(HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); conf.setBoolean("hbase.rpc.rows.size.threshold.reject", rejectLargeBatchOp); util.startMiniCluster(); util.createTable(NAME, TEST_FAM); @@ -122,20 +122,20 @@ public void setupTest() throws Exception { @Override public Void answer(InvocationOnMock invocation) throws Throwable { org.apache.logging.log4j.core.LogEvent logEvent = - invocation.getArgument(0, org.apache.logging.log4j.core.LogEvent.class); + invocation.getArgument(0, org.apache.logging.log4j.core.LogEvent.class); logs.add( new LevelAndMessage(logEvent.getLevel(), logEvent.getMessage().getFormattedMessage())); return null; } }).when(appender).append(any(org.apache.logging.log4j.core.LogEvent.class)); ((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager - .getLogger(RSRpcServices.class)).addAppender(appender); + .getLogger(RSRpcServices.class)).addAppender(appender); } @After public void tearDown() throws Exception { ((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager - .getLogger(RSRpcServices.class)).removeAppender(appender); + .getLogger(RSRpcServices.class)).removeAppender(appender); util.shutdownMiniCluster(); } @@ -149,7 +149,7 @@ private enum ActionType { * Actions */ private void sendMultiRequest(int rows, ActionType actionType) - throws ServiceException, IOException { + throws ServiceException, IOException { RpcController rpcc = Mockito.mock(HBaseRpcController.class); MultiRequest.Builder builder = MultiRequest.newBuilder(); int numRAs = 1; @@ -180,8 +180,8 @@ private void sendMultiRequest(int rows, ActionType actionType) private void assertLogBatchWarnings(boolean expected) { boolean actual = false; for (LevelAndMessage event : logs) { - if (event.level == org.apache.logging.log4j.Level.WARN && - event.msg.contains("Large batch operation detected")) { + if (event.level == org.apache.logging.log4j.Level.WARN + && event.msg.contains("Large batch operation detected")) { actual = true; break; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControl.java index ca5e83b26eab..19296ac4ffad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,10 +29,10 @@ import org.junit.experimental.categories.Category; /** - * This is a hammer test that verifies MultiVersionConcurrencyControl in a - * multiple writer single reader scenario. + * This is a hammer test that verifies MultiVersionConcurrencyControl in a multiple writer single + * reader scenario. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestMultiVersionConcurrencyControl { @ClassRule @@ -55,8 +55,7 @@ static class Writer implements Runnable { @Override public void run() { while (!finished.get()) { - MultiVersionConcurrencyControl.WriteEntry e = - mvcc.begin(); + MultiVersionConcurrencyControl.WriteEntry e = mvcc.begin(); // System.out.println("Begin write: " + e.getWriteNumber()); // 10 usec - 500usec (including 0) int sleepTime = ThreadLocalRandom.current().nextInt(500); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControlBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControlBasic.java index 6ac8aaf814e0..084c76e1fcd2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControlBasic.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControlBasic.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ * Very basic tests. * @see TestMultiVersionConcurrencyControl for more. */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestMultiVersionConcurrencyControlBasic { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMutateRowsRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMutateRowsRecovery.java index 5a2ce0b46b59..2ad93a21cce8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMutateRowsRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMutateRowsRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -103,7 +103,7 @@ public void MutateRowsAndCheckPostKill() throws IOException, InterruptedExceptio admin = connection.getAdmin(); hTable = connection.getTable(tableName); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); admin.createTable(tableDescriptor); // Add a multi diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java index 582f6c9542e0..91ca11767364 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -79,8 +79,8 @@ public static void setDownAfterClass() throws Exception { private Table createTable() throws IOException { TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(FAMILY).setNewVersionBehavior(true).setMaxVersions(3).build()).build(); + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(FAMILY).setNewVersionBehavior(true).setMaxVersions(3).build()).build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); return TEST_UTIL.getConnection().getTable(tableName); } @@ -312,8 +312,8 @@ public void testExplicitColum() throws IOException { public void testGetColumnHint() throws IOException { createTable(); try (Table t = - TEST_UTIL.getConnection().getTableBuilder(TableName.valueOf(name.getMethodName()), null) - .setOperationTimeout(10000).setRpcTimeout(10000).build()) { + TEST_UTIL.getConnection().getTableBuilder(TableName.valueOf(name.getMethodName()), null) + .setOperationTimeout(10000).setRpcTimeout(10000).build()) { t.put(new Put(ROW).addColumn(FAMILY, col1, 100, value)); t.put(new Put(ROW).addColumn(FAMILY, col1, 101, value)); t.put(new Put(ROW).addColumn(FAMILY, col1, 102, value)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNotCleanupCompactedFileWhenRegionWarmup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNotCleanupCompactedFileWhenRegionWarmup.java index 5a96152d9236..ddbf1009abd1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNotCleanupCompactedFileWhenRegionWarmup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNotCleanupCompactedFileWhenRegionWarmup.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.Assert.assertEquals; + +import java.util.ArrayList; +import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -43,11 +47,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; - @Category({ LargeTests.class, RegionServerTests.class }) public class TestNotCleanupCompactedFileWhenRegionWarmup { private static final Logger LOG = @@ -72,8 +71,8 @@ public static void beforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtil(); // Set the scanner lease to 20min, so the scanner can't be closed by RegionServer TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 1200000); - TEST_UTIL.getConfiguration() - .setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100); + TEST_UTIL.getConfiguration().setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, + 100); TEST_UTIL.getConfiguration().set("dfs.blocksize", "64000"); TEST_UTIL.getConfiguration().set("dfs.namenode.fs-limits.min-block-size", "1024"); TEST_UTIL.getConfiguration().set(TimeToLiveHFileCleaner.TTL_CONF_KEY, "0"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestObservedExceptionsInBatch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestObservedExceptionsInBatch.java index a84a9a9c8f54..8e30d071eb8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestObservedExceptionsInBatch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestObservedExceptionsInBatch.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenRegionFailedMemoryLeak.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenRegionFailedMemoryLeak.java index 05246c9dcfd0..1c75bef87671 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenRegionFailedMemoryLeak.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenRegionFailedMemoryLeak.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ public class TestOpenRegionFailedMemoryLeak { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestOpenRegionFailedMemoryLeak.class); + HBaseClassTestRule.forClass(TestOpenRegionFailedMemoryLeak.class); private static final Logger LOG = LoggerFactory.getLogger(TestOpenRegionFailedMemoryLeak.class); @@ -74,13 +74,13 @@ public void testOpenRegionFailedMemoryLeak() throws Exception { final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TableName.valueOf("testOpenRegionFailed")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)) - .setValue("COPROCESSOR$1", "hdfs://test/test.jar|test||").build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf("testOpenRegionFailed")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)) + .setValue("COPROCESSOR$1", "hdfs://test/test.jar|test||").build(); RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); ScheduledExecutorService executor = - CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor(); + CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor(); for (int i = 0; i < 20; i++) { try { HRegion.openHRegion(hri, htd, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null); @@ -97,8 +97,9 @@ public void testOpenRegionFailedMemoryLeak() throws Exception { field.setAccessible(true); BlockingQueue workQueue = (BlockingQueue) field.get(executor); // there are still two task not cancel, can not cause to memory lack - Assert.assertTrue("ScheduledExecutor#workQueue should equals 2, now is " + - workQueue.size() + ", please check region is close", 2 == workQueue.size()); + Assert.assertTrue("ScheduledExecutor#workQueue should equals 2, now is " + workQueue.size() + + ", please check region is close", + 2 == workQueue.size()); found = true; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenSeqNumUnexpectedIncrease.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenSeqNumUnexpectedIncrease.java index 1a4eb1f31e38..461fcf1a4d05 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenSeqNumUnexpectedIncrease.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenSeqNumUnexpectedIncrease.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestOpenSeqNumUnexpectedIncrease { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestOpenSeqNumUnexpectedIncrease.class); + HBaseClassTestRule.forClass(TestOpenSeqNumUnexpectedIncrease.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -78,7 +78,7 @@ protected void writeRegionOpenMarker(WAL wal, long openSeqId) throws IOException } public Map> close() throws IOException { - //skip close + // skip close return null; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java index 75b4ad875914..6db8a4850213 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,9 +54,8 @@ /** * Testing of multiPut in parallel. - * */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestParallelPut { @ClassRule @@ -64,7 +63,8 @@ public class TestParallelPut { HBaseClassTestRule.forClass(TestParallelPut.class); private static final Logger LOG = LoggerFactory.getLogger(TestParallelPut.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private HRegion region = null; private static HBaseTestingUtil HBTU = new HBaseTestingUtil(); @@ -77,8 +77,8 @@ public class TestParallelPut { static final byte[] qual3 = Bytes.toBytes("qual3"); static final byte[] value1 = Bytes.toBytes("value1"); static final byte[] value2 = Bytes.toBytes("value2"); - static final byte [] row = Bytes.toBytes("rowA"); - static final byte [] row2 = Bytes.toBytes("rowB"); + static final byte[] row = Bytes.toBytes("rowA"); + static final byte[] row2 = Bytes.toBytes("rowB"); @BeforeClass public static void beforeClass() { @@ -86,7 +86,6 @@ public static void beforeClass() { HBTU.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, THREADS100); } - /** * @see org.apache.hadoop.hbase.HBaseTestCase#setUp() */ @@ -158,17 +157,15 @@ public void testParallelPuts() throws IOException { try { all[i].join(); } catch (InterruptedException e) { - LOG.warn("testParallelPuts encountered InterruptedException." + - " Ignoring....", e); + LOG.warn("testParallelPuts encountered InterruptedException." + " Ignoring....", e); } } - LOG.info("testParallelPuts successfully verified " + - (numOps * THREADS100) + " put operations."); + LOG.info( + "testParallelPuts successfully verified " + (numOps * THREADS100) + " put operations."); } - - private static void assertGet(final HRegion region, byte [] row, byte [] familiy, - byte[] qualifier, byte[] value) throws IOException { + private static void assertGet(final HRegion region, byte[] row, byte[] familiy, byte[] qualifier, + byte[] value) throws IOException { // run a get and see if the value matches Get get = new Get(row); get.addColumn(familiy, qualifier); @@ -180,13 +177,12 @@ private static void assertGet(final HRegion region, byte [] row, byte [] familiy assertTrue(Bytes.compareTo(r, value) == 0); } - private HRegion initHRegion(byte [] tableName, String callingMethod, byte[] ... families) + private HRegion initHRegion(byte[] tableName, String callingMethod, byte[]... families) throws IOException { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); - for(byte [] family : families) { - builder.setColumnFamily( - ColumnFamilyDescriptorBuilder.of(family)); + TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)); + for (byte[] family : families) { + builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); } TableDescriptor tableDescriptor = builder.build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); @@ -201,23 +197,23 @@ public static class Putter extends Thread { private final HRegion region; private final int threadNumber; private final int numOps; - byte [] rowkey = null; + byte[] rowkey = null; public Putter(HRegion region, int threadNumber, int numOps) { this.region = region; this.threadNumber = threadNumber; this.numOps = numOps; - this.rowkey = Bytes.toBytes((long)threadNumber); // unique rowid per thread + this.rowkey = Bytes.toBytes((long) threadNumber); // unique rowid per thread setDaemon(true); } @Override public void run() { byte[] value = new byte[100]; - Put[] in = new Put[1]; + Put[] in = new Put[1]; // iterate for the specified number of operations - for (int i=0; i cfFlushSizeLowerBound); - assertTrue(desiredRegion.getStore(FAMILY2).getMemStoreSize().getHeapSize() < cfFlushSizeLowerBound); - assertTrue(desiredRegion.getStore(FAMILY3).getMemStoreSize().getHeapSize() < cfFlushSizeLowerBound); + assertTrue( + desiredRegion.getStore(FAMILY1).getMemStoreSize().getHeapSize() > cfFlushSizeLowerBound); + assertTrue( + desiredRegion.getStore(FAMILY2).getMemStoreSize().getHeapSize() < cfFlushSizeLowerBound); + assertTrue( + desiredRegion.getStore(FAMILY3).getMemStoreSize().getHeapSize() < cfFlushSizeLowerBound); table.put(createPut(1, 12345678)); // Make numRolledLogFiles greater than maxLogs desiredRegionAndServer.getSecond().getWalRoller().requestRollAll(); @@ -557,9 +560,9 @@ public void testCompareStoreFileCount() throws Exception { ConstantSizeRegionSplitPolicy.class.getName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLENAME) - .setCompactionEnabled(false).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY1)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY2)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY3)).build(); + .setCompactionEnabled(false).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY1)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY2)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY3)).build(); LOG.info("==============Test with selective flush disabled==============="); int cf1StoreFileCount = -1; @@ -570,8 +573,8 @@ public void testCompareStoreFileCount() throws Exception { int cf3StoreFileCount1 = -1; try { TEST_UTIL.startMiniCluster(1); - TEST_UTIL.getAdmin().createNamespace( - NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); + TEST_UTIL.getAdmin() + .createNamespace(NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); TEST_UTIL.getAdmin().createTable(tableDescriptor); TEST_UTIL.waitTableAvailable(TABLENAME); Connection conn = ConnectionFactory.createConnection(conf); @@ -594,8 +597,8 @@ public void testCompareStoreFileCount() throws Exception { conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 0); try { TEST_UTIL.startMiniCluster(1); - TEST_UTIL.getAdmin().createNamespace( - NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); + TEST_UTIL.getAdmin() + .createNamespace(NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); TEST_UTIL.getAdmin().createTable(tableDescriptor); Connection conn = ConnectionFactory.createConnection(conf); Table table = conn.getTable(TABLENAME); @@ -611,12 +614,12 @@ public void testCompareStoreFileCount() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - LOG.info("disable selective flush: " + Bytes.toString(FAMILY1) + "=>" + cf1StoreFileCount - + ", " + Bytes.toString(FAMILY2) + "=>" + cf2StoreFileCount + ", " - + Bytes.toString(FAMILY3) + "=>" + cf3StoreFileCount); - LOG.info("enable selective flush: " + Bytes.toString(FAMILY1) + "=>" + cf1StoreFileCount1 - + ", " + Bytes.toString(FAMILY2) + "=>" + cf2StoreFileCount1 + ", " - + Bytes.toString(FAMILY3) + "=>" + cf3StoreFileCount1); + LOG.info("disable selective flush: " + Bytes.toString(FAMILY1) + "=>" + cf1StoreFileCount + ", " + + Bytes.toString(FAMILY2) + "=>" + cf2StoreFileCount + ", " + Bytes.toString(FAMILY3) + "=>" + + cf3StoreFileCount); + LOG.info("enable selective flush: " + Bytes.toString(FAMILY1) + "=>" + cf1StoreFileCount1 + ", " + + Bytes.toString(FAMILY2) + "=>" + cf2StoreFileCount1 + ", " + Bytes.toString(FAMILY3) + + "=>" + cf3StoreFileCount1); // small CF will have less store files. assertTrue(cf1StoreFileCount1 < cf1StoreFileCount); assertTrue(cf2StoreFileCount1 < cf2StoreFileCount); @@ -626,12 +629,13 @@ public static void main(String[] args) throws Exception { int numRegions = Integer.parseInt(args[0]); long numRows = Long.parseLong(args[1]); - TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLENAME) - .setMaxFileSize(10L * 1024 * 1024 * 1024) - .setValue(TableDescriptorBuilder.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY1)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY2)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY3)).build(); + TableDescriptor tableDescriptor = + TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(10L * 1024 * 1024 * 1024) + .setValue(TableDescriptorBuilder.SPLIT_POLICY, + ConstantSizeRegionSplitPolicy.class.getName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY1)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY2)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY3)).build(); Configuration conf = HBaseConfiguration.create(); Connection conn = ConnectionFactory.createConnection(conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java index a6767429f374..744b0d4432c1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,7 +56,7 @@ public class TestPriorityRpc { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPriorityRpc.class); + HBaseClassTestRule.forClass(TestPriorityRpc.class); private static Configuration CONF = HBaseConfiguration.create(); @@ -72,7 +72,7 @@ public void testQosFunctionForMeta() throws IOException { RegionSpecifier.Builder regionSpecifierBuilder = RegionSpecifier.newBuilder(); regionSpecifierBuilder.setType(RegionSpecifierType.REGION_NAME); ByteString name = - UnsafeByteOperations.unsafeWrap(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); + UnsafeByteOperations.unsafeWrap(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); regionSpecifierBuilder.setValue(name); RegionSpecifier regionSpecifier = regionSpecifierBuilder.build(); getRequestBuilder.setRegion(regionSpecifier); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java index 633adb0c404a..db12bce2bead 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSChoresScheduled.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.lang.reflect.Field; - import org.apache.hadoop.hbase.ExecutorStatusChore; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -37,12 +35,12 @@ /** * Tests to validate if HRegionServer default chores are scheduled */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestRSChoresScheduled { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSChoresScheduled.class); + HBaseClassTestRule.forClass(TestRSChoresScheduled.class); private static HRegionServer hRegionServer; @@ -61,8 +59,7 @@ public static void tearDown() throws Exception { private static class TestChoreField { - private E getChoreObj(String fieldName) throws NoSuchFieldException, - IllegalAccessException { + private E getChoreObj(String fieldName) throws NoSuchFieldException, IllegalAccessException { Field hRegionServerField = HRegionServer.class.getDeclaredField(fieldName); hRegionServerField.setAccessible(true); E choreFieldVal = (E) hRegionServerField.get(hRegionServer); @@ -80,35 +77,32 @@ private void testIfChoreScheduled(E choreObj) { public void testDefaultScheduledChores() throws Exception { // test if compactedHFilesDischarger chore is scheduled by default in HRegionServer init TestChoreField compactedHFilesDischargerTestChoreField = - new TestChoreField<>(); + new TestChoreField<>(); CompactedHFilesDischarger compactedHFilesDischarger = - compactedHFilesDischargerTestChoreField.getChoreObj("compactedFileDischarger"); + compactedHFilesDischargerTestChoreField.getChoreObj("compactedFileDischarger"); compactedHFilesDischargerTestChoreField.testIfChoreScheduled(compactedHFilesDischarger); // test if compactionChecker chore is scheduled by default in HRegionServer init TestChoreField compactionCheckerTestChoreField = new TestChoreField<>(); ScheduledChore compactionChecker = - compactionCheckerTestChoreField.getChoreObj("compactionChecker"); + compactionCheckerTestChoreField.getChoreObj("compactionChecker"); compactionCheckerTestChoreField.testIfChoreScheduled(compactionChecker); // test if periodicFlusher chore is scheduled by default in HRegionServer init - TestChoreField periodicMemstoreFlusherTestChoreField = - new TestChoreField<>(); + TestChoreField periodicMemstoreFlusherTestChoreField = new TestChoreField<>(); ScheduledChore periodicFlusher = - periodicMemstoreFlusherTestChoreField.getChoreObj("periodicFlusher"); + periodicMemstoreFlusherTestChoreField.getChoreObj("periodicFlusher"); periodicMemstoreFlusherTestChoreField.testIfChoreScheduled(periodicFlusher); // test if nonceManager chore is scheduled by default in HRegionServer init TestChoreField nonceManagerTestChoreField = new TestChoreField<>(); - ScheduledChore nonceManagerChore = - nonceManagerTestChoreField.getChoreObj("nonceManagerChore"); + ScheduledChore nonceManagerChore = nonceManagerTestChoreField.getChoreObj("nonceManagerChore"); nonceManagerTestChoreField.testIfChoreScheduled(nonceManagerChore); // test if executorStatusChore chore is scheduled by default in HRegionServer init - TestChoreField executorStatusChoreTestChoreField = - new TestChoreField<>(); + TestChoreField executorStatusChoreTestChoreField = new TestChoreField<>(); ExecutorStatusChore executorStatusChore = - executorStatusChoreTestChoreField.getChoreObj("executorStatusChore"); + executorStatusChoreTestChoreField.getChoreObj("executorStatusChore"); executorStatusChoreTestChoreField.testIfChoreScheduled(executorStatusChore); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java index 7cffc39c165a..f4213cd1a074 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,10 +53,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; /** - * Tests that a regionserver that dies after reporting for duty gets removed - * from list of online regions. See HBASE-9593. + * Tests that a regionserver that dies after reporting for duty gets removed from list of online + * regions. See HBASE-9593. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) @Ignore("See HBASE-19515") public class TestRSKilledWhenInitializing { @@ -85,7 +85,7 @@ public class TestRSKilledWhenInitializing { */ @Test public void testRSTerminationAfterRegisteringToMasterBeforeCreatingEphemeralNode() - throws Exception { + throws Exception { // Create config to use for this cluster Configuration conf = HBaseConfiguration.create(); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1); @@ -128,10 +128,11 @@ public void testRSTerminationAfterRegisteringToMasterBeforeCreatingEphemeralNode // Find non-meta region (namespace?) and assign to the killed server. That'll trigger cleanup. Map assignments = null; do { - assignments = master.getMaster().getAssignmentManager().getRegionStates().getRegionAssignments(); + assignments = + master.getMaster().getAssignmentManager().getRegionStates().getRegionAssignments(); } while (assignments == null || assignments.size() < 2); RegionInfo hri = null; - for (Map.Entry e: assignments.entrySet()) { + for (Map.Entry e : assignments.entrySet()) { if (e.getKey().isMetaRegion()) continue; hri = e.getKey(); break; @@ -142,7 +143,7 @@ public void testRSTerminationAfterRegisteringToMasterBeforeCreatingEphemeralNode master.getMaster().getServerManager().getOnlineServersList().size()); LOG.info("Move " + hri.getEncodedName() + " to " + killedRS.get()); master.getMaster().move(hri.getEncodedNameAsBytes(), - Bytes.toBytes(killedRS.get().toString())); + Bytes.toBytes(killedRS.get().toString())); // TODO: This test could do more to verify fix. It could create a table // and do round-robin assign. It should fail if zombie RS. HBASE-19515. @@ -163,8 +164,8 @@ public void testRSTerminationAfterRegisteringToMasterBeforeCreatingEphemeralNode } /** - * Start Master. Get as far as the state where Master is waiting on - * RegionServers to check in, then return. + * Start Master. Get as far as the state where Master is waiting on RegionServers to check in, + * then return. */ private MasterThread startMaster(MasterThread master) { master.start(); @@ -197,14 +198,13 @@ public void waiting() { * notices and so removes the region from its set of online regionservers. */ static class RegisterAndDieRegionServer - extends SingleProcessHBaseCluster.MiniHBaseClusterRegionServer { + extends SingleProcessHBaseCluster.MiniHBaseClusterRegionServer { public RegisterAndDieRegionServer(Configuration conf) throws IOException, InterruptedException { super(conf); } @Override - protected void handleReportForDutyResponse(RegionServerStartupResponse c) - throws IOException { + protected void handleReportForDutyResponse(RegionServerStartupResponse c) throws IOException { if (killedRS.compareAndSet(null, getServerName())) { // Make sure Master is up so it will see the removal of the ephemeral znode for this RS. while (!masterActive.get()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSQosFunction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSQosFunction.java index ba64a29cfc61..fad7d0414706 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSQosFunction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSQosFunction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,10 +35,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; /** - * Basic test that qos function is sort of working; i.e. a change in method naming style - * over in pb doesn't break it. + * Basic test that qos function is sort of working; i.e. a change in method naming style over in pb + * doesn't break it. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestRSQosFunction extends QosTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -64,7 +64,7 @@ public void testPriority() { checkMethod(conf, "OpenRegion", HConstants.ADMIN_QOS, qosFunction); // Check multi works. checkMethod(conf, "Multi", HConstants.NORMAL_QOS, qosFunction, - MultiRequest.getDefaultInstance()); + MultiRequest.getDefaultInstance()); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java index 9a2456d207d8..7a23710c05b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; + import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Optional; @@ -37,17 +38,17 @@ /** * Test parts of {@link RSRpcServices} */ -@Category({ RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestRSRpcServices { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSRpcServices.class); + HBaseClassTestRule.forClass(TestRSRpcServices.class); private static final Logger LOG = LoggerFactory.getLogger(TestRSRpcServices.class); /** - * Simple test of the toString on RegionScannerHolder works. - * Just creates one and calls #toString on it. + * Simple test of the toString on RegionScannerHolder works. Just creates one and calls #toString + * on it. */ @Test public void testRegionScannerHolderToString() throws UnknownHostException { @@ -65,8 +66,7 @@ public void testRegionScannerHolderToString() throws UnknownHostException { HRegion region = Mockito.mock(HRegion.class); Mockito.when(region.getRegionInfo()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO); RSRpcServices.RegionScannerHolder rsh = new RSRpcServices.RegionScannerHolder(null, region, - null, null, false, false, clientIpAndPort, - userNameTest); + null, null, false, false, clientIpAndPort, userNameTest); LOG.info("rsh: {}", rsh); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSSnapshotVerifier.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSSnapshotVerifier.java index 39b1418d1502..f5f5bf770591 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSSnapshotVerifier.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSSnapshotVerifier.java @@ -43,6 +43,7 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; @@ -52,15 +53,15 @@ public class TestRSSnapshotVerifier { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSSnapshotVerifier.class); + HBaseClassTestRule.forClass(TestRSSnapshotVerifier.class); private HBaseTestingUtil TEST_UTIL; private final TableName tableName = TableName.valueOf("TestRSSnapshotVerifier"); private final byte[] cf = Bytes.toBytes("cf"); private final SnapshotDescription snapshot = - new SnapshotDescription("test-snapshot", tableName, SnapshotType.FLUSH); + new SnapshotDescription("test-snapshot", tableName, SnapshotType.FLUSH); private SnapshotProtos.SnapshotDescription snapshotProto = - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); @Before public void setup() throws Exception { @@ -81,28 +82,27 @@ public void setup() throws Exception { workingDirFs.mkdirs(workingDir); } ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(snapshot.getName()); - SnapshotManifest manifest = SnapshotManifest - .create(conf, workingDirFs, workingDir, snapshotProto, monitor); - manifest.addTableDescriptor(TEST_UTIL.getHBaseCluster() - .getMaster().getTableDescriptors().get(tableName)); + SnapshotManifest manifest = + SnapshotManifest.create(conf, workingDirFs, workingDir, snapshotProto, monitor); + manifest.addTableDescriptor( + TEST_UTIL.getHBaseCluster().getMaster().getTableDescriptors().get(tableName)); SnapshotDescriptionUtils.writeSnapshotInfo(snapshotProto, workingDir, workingDirFs); - TEST_UTIL.getHBaseCluster() - .getRegions(tableName).forEach(r -> { - try { - r.addRegionToSnapshot(snapshotProto, monitor); - } catch (IOException e) { - LOG.warn("Failed snapshot region {}", r.getRegionInfo()); - } - }); + TEST_UTIL.getHBaseCluster().getRegions(tableName).forEach(r -> { + try { + r.addRegionToSnapshot(snapshotProto, monitor); + } catch (IOException e) { + LOG.warn("Failed snapshot region {}", r.getRegionInfo()); + } + }); manifest.consolidate(); } @Test(expected = org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException.class) public void testVerifyStoreFile() throws Exception { - RSSnapshotVerifier verifier = TEST_UTIL - .getHBaseCluster().getRegionServer(0).getRsSnapshotVerifier(); + RSSnapshotVerifier verifier = + TEST_UTIL.getHBaseCluster().getRegionServer(0).getRsSnapshotVerifier(); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(tableName).stream() - .filter(r -> !r.getStore(cf).getStorefiles().isEmpty()).findFirst().get(); + .filter(r -> !r.getStore(cf).getStorefiles().isEmpty()).findFirst().get(); Path filePath = new ArrayList<>(region.getStore(cf).getStorefiles()).get(0).getPath(); TEST_UTIL.getDFSCluster().getFileSystem().delete(filePath, true); LOG.info("delete store file {}", filePath); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java index c0e72cbf94e9..6b5c8b9c9efa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ public class TestReadAndWriteRegionInfoFile { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReadAndWriteRegionInfoFile.class); + HBaseClassTestRule.forClass(TestReadAndWriteRegionInfoFile.class); private static final HBaseCommonTestingUtil UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java index cb8ec4fcf146..9f76788434a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -64,7 +65,7 @@ /** * Tests around replay of recovered.edits content. */ -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestRecoveredEdits { @ClassRule @@ -76,16 +77,16 @@ public class TestRecoveredEdits { private static BlockCache blockCache; - @Rule public TestName testName = new TestName(); + @Rule + public TestName testName = new TestName(); /** - * Path to a recovered.edits file in hbase-server test resources folder. - * This is a little fragile getting this path to a file of 10M of edits. + * Path to a recovered.edits file in hbase-server test resources folder. This is a little fragile + * getting this path to a file of 10M of edits. */ @SuppressWarnings("checkstyle:VisibilityModifier") public static final Path RECOVEREDEDITS_PATH = new Path( - System.getProperty("test.build.classes", "target/test-classes"), - "0000000000000016310"); + System.getProperty("test.build.classes", "target/test-classes"), "0000000000000016310"); /** * Name of table referenced by edits in the recovered.edits file. @@ -95,11 +96,11 @@ public class TestRecoveredEdits { /** * Column family referenced by edits in the recovered.edits file. */ - public static final byte [] RECOVEREDEDITS_COLUMNFAMILY = Bytes.toBytes("meta"); + public static final byte[] RECOVEREDEDITS_COLUMNFAMILY = Bytes.toBytes("meta"); public static final byte[][] RECOVEREDITS_COLUMNFAMILY_ARRAY = - new byte[][] {RECOVEREDEDITS_COLUMNFAMILY}; + new byte[][] { RECOVEREDEDITS_COLUMNFAMILY }; public static final ColumnFamilyDescriptor RECOVEREDEDITS_CFD = - ColumnFamilyDescriptorBuilder.newBuilder(RECOVEREDEDITS_COLUMNFAMILY).build(); + ColumnFamilyDescriptorBuilder.newBuilder(RECOVEREDEDITS_COLUMNFAMILY).build(); /** * Name of table mentioned edits from recovered.edits @@ -110,10 +111,9 @@ public static void setUpBeforeClass() throws Exception { } /** - * HBASE-12782 ITBLL fails for me if generator does anything but 5M per maptask. - * Create a region. Close it. Then copy into place a file to replay, one that is bigger than - * configured flush size so we bring on lots of flushes. Then reopen and confirm all edits - * made it in. + * HBASE-12782 ITBLL fails for me if generator does anything but 5M per maptask. Create a region. + * Close it. Then copy into place a file to replay, one that is bigger than configured flush size + * so we bring on lots of flushes. Then reopen and confirm all edits made it in. */ @Test public void testReplayWorksThoughLotsOfFlushing() throws IOException { @@ -122,28 +122,27 @@ public void testReplayWorksThoughLotsOfFlushing() throws IOException { } } - private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy policy) throws - IOException { + private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy policy) + throws IOException { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - // Set it so we flush every 1M or so. Thats a lot. - conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024); + // Set it so we flush every 1M or so. Thats a lot. + conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(policy).toLowerCase()); - TableDescriptor tableDescriptor = TableDescriptorBuilder. - newBuilder(TableName.valueOf(testName.getMethodName())). - setColumnFamily(RECOVEREDEDITS_CFD) .build(); + TableDescriptor tableDescriptor = + TableDescriptorBuilder.newBuilder(TableName.valueOf(testName.getMethodName())) + .setColumnFamily(RECOVEREDEDITS_CFD).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); final String encodedRegionName = hri.getEncodedName(); Path hbaseRootDir = TEST_UTIL.getDataTestDir(); FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); Path tableDir = CommonFSUtils.getTableDir(hbaseRootDir, tableDescriptor.getTableName()); - HRegionFileSystem hrfs = - new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri); + HRegionFileSystem hrfs = new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri); if (fs.exists(hrfs.getRegionDir())) { LOG.info("Region directory already exists. Deleting."); fs.delete(hrfs.getRegionDir(), true); } - HRegion region = HBaseTestingUtil - .createRegionAndWAL(hri, hbaseRootDir, conf, tableDescriptor, blockCache); + HRegion region = + HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, conf, tableDescriptor, blockCache); assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName()); List storeFiles = region.getStoreFileList(RECOVEREDITS_COLUMNFAMILY_ARRAY); // There should be no store files. @@ -162,7 +161,7 @@ private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy po // Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if // we flush at 1MB, that there are at least 3 flushed files that are there because of the // replay of edits. - if(policy == MemoryCompactionPolicy.EAGER || policy == MemoryCompactionPolicy.ADAPTIVE) { + if (policy == MemoryCompactionPolicy.EAGER || policy == MemoryCompactionPolicy.ADAPTIVE) { assertTrue("Files count=" + storeFiles.size(), storeFiles.size() >= 1); } else { assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10); @@ -190,7 +189,7 @@ public static int verifyAllEditsMadeItIn(final FileSystem fs, final Configuratio count++; // Check this edit is for this region. if (!Bytes.equals(key.getEncodedRegionName(), - region.getRegionInfo().getEncodedNameAsBytes())) { + region.getRegionInfo().getEncodedNameAsBytes())) { continue; } Cell previous = null; @@ -220,10 +219,9 @@ public static int verifyAllEditsMadeItIn(final FileSystem fs, final Configuratio Collections.sort(walCells, CellComparatorImpl.COMPARATOR); int found = 0; - for (int i = 0, j = 0; i < walCells.size() && j < regionCells.size(); ) { - int compareResult = PrivateCellUtil - .compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, walCells.get(i), - regionCells.get(j)); + for (int i = 0, j = 0; i < walCells.size() && j < regionCells.size();) { + int compareResult = PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, + walCells.get(i), regionCells.get(j)); if (compareResult == 0) { i++; j++; @@ -234,8 +232,9 @@ public static int verifyAllEditsMadeItIn(final FileSystem fs, final Configuratio i++; } } - assertEquals("Only found " + found + " cells in region, but there are " + walCells.size() + - " cells in recover edits", found, walCells.size()); + assertEquals("Only found " + found + " cells in region, but there are " + walCells.size() + + " cells in recover edits", + found, walCells.size()); return count; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java index 933cf1d2a1f8..91f396acf2c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,22 +57,20 @@ import org.slf4j.LoggerFactory; /** - * HBASE-21031 - * If replay edits fails, we need to make sure memstore is rollbacked - * And if MSLAB is used, all chunk is released too. + * HBASE-21031 If replay edits fails, we need to make sure memstore is rollbacked And if MSLAB is + * used, all chunk is released too. */ -@Category({RegionServerTests.class, SmallTests.class }) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestRecoveredEditsReplayAndAbort { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRecoveredEditsReplayAndAbort.class); - private static final Logger LOG = LoggerFactory - .getLogger(TestRecoveredEditsReplayAndAbort.class); + private static final Logger LOG = LoggerFactory.getLogger(TestRecoveredEditsReplayAndAbort.class); protected final byte[] row = Bytes.toBytes("rowA"); - protected final static byte [] fam1 = Bytes.toBytes("colfamily11"); + protected final static byte[] fam1 = Bytes.toBytes("colfamily11"); @Rule public TestName name = new TestName(); @@ -83,7 +80,7 @@ public class TestRecoveredEditsReplayAndAbort { protected String method; protected static HBaseTestingUtil TEST_UTIL; - public static Configuration CONF ; + public static Configuration CONF; private HRegion region = null; @Before @@ -102,73 +99,62 @@ public void tearDown() throws Exception { @Test public void test() throws Exception { - //set flush size to 10MB + // set flush size to 10MB CONF.setInt("hbase.hregion.memstore.flush.size", 1024 * 1024 * 10); - //set the report interval to a very small value + // set the report interval to a very small value CONF.setInt("hbase.hstore.report.interval.edits", 1); CONF.setInt("hbase.hstore.report.period", 0); - //mock a RegionServerServices + // mock a RegionServerServices final RegionServerAccounting rsAccounting = new RegionServerAccounting(CONF); RegionServerServices rs = Mockito.mock(RegionServerServices.class); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); Mockito.when(rs.getRegionServerAccounting()).thenReturn(rsAccounting); Mockito.when(rs.isAborted()).thenReturn(false); Mockito.when(rs.getNonceManager()).thenReturn(null); - Mockito.when(rs.getServerName()).thenReturn(ServerName - .valueOf("test", 0, 111)); + Mockito.when(rs.getServerName()).thenReturn(ServerName.valueOf("test", 0, 111)); Mockito.when(rs.getConfiguration()).thenReturn(CONF); - //create a region + // create a region TableName testTable = TableName.valueOf("testRecoveredEidtsReplayAndAbort"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(testTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).build()).build(); RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); - Path logDir = TEST_UTIL - .getDataTestDirOnTestFS("TestRecoveredEidtsReplayAndAbort.log"); + Path logDir = TEST_UTIL.getDataTestDirOnTestFS("TestRecoveredEidtsReplayAndAbort.log"); final WAL wal = HBaseTestingUtil.createWal(CONF, logDir, info); Path rootDir = TEST_UTIL.getDataTestDir(); Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable()); - HRegionFileSystem - .createRegionOnFileSystem(CONF, TEST_UTIL.getTestFileSystem(), tableDir, info); - region = HRegion.newHRegion(tableDir, wal, TEST_UTIL.getTestFileSystem(), CONF, info, - htd, rs); - //create some recovered.edits + HRegionFileSystem.createRegionOnFileSystem(CONF, TEST_UTIL.getTestFileSystem(), tableDir, info); + region = HRegion.newHRegion(tableDir, wal, TEST_UTIL.getTestFileSystem(), CONF, info, htd, rs); + // create some recovered.edits final WALFactory wals = new WALFactory(CONF, method); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes(); - Path recoveredEditsDir = WALSplitUtil - .getRegionDirRecoveredEditsDir(regiondir); + Path recoveredEditsDir = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir); long maxSeqId = 1200; long minSeqId = 1000; long totalEdits = maxSeqId - minSeqId; for (long i = minSeqId; i <= maxSeqId; i += 100) { - Path recoveredEdits = new Path(recoveredEditsDir, - String.format("%019d", i)); + Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i)); LOG.info("Begin to write recovered.edits : " + recoveredEdits); fs.create(recoveredEdits); - WALProvider.Writer writer = wals - .createRecoveredEditsWriter(fs, recoveredEdits); + WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits); for (long j = i; j < i + 100; j++) { long time = System.nanoTime(); WALEdit edit = new WALEdit(); // 200KB kv byte[] value = new byte[200 * 1024]; Bytes.random(value); - edit.add( - new KeyValue(row, fam1, Bytes.toBytes(j), time, KeyValue.Type.Put, - value)); + edit.add(new KeyValue(row, fam1, Bytes.toBytes(j), time, KeyValue.Type.Put, value)); writer.append(new WAL.Entry( - new WALKeyImpl(regionName, tableName, j, time, - HConstants.DEFAULT_CLUSTER_ID), edit)); + new WALKeyImpl(regionName, tableName, j, time, HConstants.DEFAULT_CLUSTER_ID), edit)); } writer.close(); } TaskMonitor.get().createStatus(method); - //try to replay the edits + // try to replay the edits try { region.initialize(new CancelableProgressable() { private long replayedEdits = 0; @@ -176,29 +162,28 @@ public void test() throws Exception { @Override public boolean progress() { replayedEdits++; - //during replay, rsAccounting should align with global memstore, because - //there is only one memstore here + // during replay, rsAccounting should align with global memstore, because + // there is only one memstore here Assert.assertEquals(rsAccounting.getGlobalMemStoreDataSize(), - region.getMemStoreDataSize()); + region.getMemStoreDataSize()); Assert.assertEquals(rsAccounting.getGlobalMemStoreHeapSize(), - region.getMemStoreHeapSize()); + region.getMemStoreHeapSize()); Assert.assertEquals(rsAccounting.getGlobalMemStoreOffHeapSize(), - region.getMemStoreOffHeapSize()); + region.getMemStoreOffHeapSize()); // abort the replay before finishing, leaving some edits in the memory return replayedEdits < totalEdits - 10; } }); Assert.fail("Should not reach here"); } catch (IOException t) { - LOG.info("Current memstore: " + region.getMemStoreDataSize() + ", " + region - .getMemStoreHeapSize() + ", " + region - .getMemStoreOffHeapSize()); + LOG.info("Current memstore: " + region.getMemStoreDataSize() + ", " + + region.getMemStoreHeapSize() + ", " + region.getMemStoreOffHeapSize()); } - //After aborting replay, there should be no data in the memory + // After aborting replay, there should be no data in the memory Assert.assertEquals(0, rsAccounting.getGlobalMemStoreDataSize()); Assert.assertEquals(0, region.getMemStoreDataSize()); - //All the chunk in the MSLAB should be recycled, otherwise, there might be - //a memory leak. + // All the chunk in the MSLAB should be recycled, otherwise, there might be + // a memory leak. Assert.assertEquals(0, ChunkCreator.getInstance().numberOfMappedChunks()); } finally { HBaseTestingUtil.closeRegionAndWAL(this.region); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java index 803f497b89de..43f7f1bfad26 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ /** * Tests the ability to specify favored nodes for a region. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestRegionFavoredNodes { @ClassRule @@ -57,8 +57,7 @@ public class TestRegionFavoredNodes { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Table table; - private static final TableName TABLE_NAME = - TableName.valueOf("table"); + private static final TableName TABLE_NAME = TableName.valueOf("table"); private static final byte[] COLUMN_FAMILY = Bytes.toBytes("family"); private static final int FAVORED_NODES_NUM = 3; private static final int REGION_SERVERS = 6; @@ -69,8 +68,8 @@ public class TestRegionFavoredNodes { public static void setUpBeforeClass() throws Exception { try { createWithFavoredNode = DistributedFileSystem.class.getDeclaredMethod("create", Path.class, - FsPermission.class, boolean.class, int.class, short.class, long.class, - Progressable.class, InetSocketAddress[].class); + FsPermission.class, boolean.class, int.class, short.class, long.class, Progressable.class, + InetSocketAddress[].class); } catch (NoSuchMethodException nm) { return; } @@ -104,13 +103,12 @@ public void testFavoredNodes() throws Exception { selfAddress = DataNode.class.getMethod("getXferAddress"); } for (int i = 0; i < REGION_SERVERS; i++) { - nodes[i] = (InetSocketAddress)selfAddress.invoke(datanodes.get(i)); + nodes[i] = (InetSocketAddress) selfAddress.invoke(datanodes.get(i)); } String[] nodeNames = new String[REGION_SERVERS]; for (int i = 0; i < REGION_SERVERS; i++) { - nodeNames[i] = nodes[i].getAddress().getHostAddress() + ":" + - nodes[i].getPort(); + nodeNames[i] = nodes[i].getAddress().getHostAddress() + ":" + nodes[i].getPort(); } // For each region, choose some datanodes as the favored nodes then assign @@ -119,7 +117,7 @@ public void testFavoredNodes() throws Exception { HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i); List regions = server.getRegions(TABLE_NAME); for (HRegion region : regions) { - ListfavoredNodes = + List favoredNodes = new ArrayList<>(3); String encodedRegionName = region.getRegionInfo().getEncodedName(); for (int j = 0; j < FAVORED_NODES_NUM; j++) { @@ -147,24 +145,22 @@ public void testFavoredNodes() throws Exception { HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i); List regions = server.getRegions(TABLE_NAME); for (HRegion region : regions) { - List files = region.getStoreFileList(new byte[][]{COLUMN_FAMILY}); + List files = region.getStoreFileList(new byte[][] { COLUMN_FAMILY }); for (String file : files) { - FileStatus status = TEST_UTIL.getDFSCluster().getFileSystem(). - getFileStatus(new Path(new URI(file).getPath())); - BlockLocation[] lbks = - ((DistributedFileSystem)TEST_UTIL.getDFSCluster().getFileSystem()) + FileStatus status = TEST_UTIL.getDFSCluster().getFileSystem() + .getFileStatus(new Path(new URI(file).getPath())); + BlockLocation[] lbks = ((DistributedFileSystem) TEST_UTIL.getDFSCluster().getFileSystem()) .getFileBlockLocations(status, 0, Long.MAX_VALUE); for (BlockLocation lbk : lbks) { - locations: - for (String info : lbk.getNames()) { - for (int j = 0; j < FAVORED_NODES_NUM; j++) { - if (info.equals(nodeNames[(i + j) % REGION_SERVERS])) { - continue locations; - } + locations: for (String info : lbk.getNames()) { + for (int j = 0; j < FAVORED_NODES_NUM; j++) { + if (info.equals(nodeNames[(i + j) % REGION_SERVERS])) { + continue locations; } - // This block was at a location that was not a favored location. - fail("Block location " + info + " not a favored node"); } + // This block was at a location that was not a favored location. + fail("Block location " + info + " not a favored node"); + } } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java index 8a937a3bff28..78747fe68040 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,13 +49,13 @@ import org.slf4j.LoggerFactory; /** - * Increments with some concurrency against a region to ensure we get the right answer. - * Test is parameterized to run the fast and slow path increments; if fast, + * Increments with some concurrency against a region to ensure we get the right answer. Test is + * parameterized to run the fast and slow path increments; if fast, * HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY is true. - * - *

          There is similar test up in TestAtomicOperation. It does a test where it has 100 threads - * doing increments across two column families all on one row and the increments are connected to - * prove atomicity on row. + *

          + * There is similar test up in TestAtomicOperation. It does a test where it has 100 threads doing + * increments across two column families all on one row and the increments are connected to prove + * atomicity on row. */ @Category(MediumTests.class) public class TestRegionIncrement { @@ -65,9 +65,10 @@ public class TestRegionIncrement { HBaseClassTestRule.forClass(TestRegionIncrement.class); private static final Logger LOG = LoggerFactory.getLogger(TestRegionIncrement.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static HBaseTestingUtil TEST_UTIL; - private final static byte [] INCREMENT_BYTES = Bytes.toBytes("increment"); + private final static byte[] INCREMENT_BYTES = Bytes.toBytes("increment"); private static final int THREAD_COUNT = 10; private static final int INCREMENT_COUNT = 10000; @@ -85,8 +86,8 @@ private HRegion getRegion(final Configuration conf, final String tableName) thro FSHLog wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDataTestDir(), TEST_UTIL.getDataTestDir().toString(), conf); wal.init(); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return TEST_UTIL.createLocalHRegion(TableName.valueOf(tableName), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, conf, false, Durability.SKIP_WAL, wal, INCREMENT_BYTES); } @@ -142,7 +143,7 @@ public void run() { private static class CrossRowCellIncrementer extends Thread { private final int count; private final HRegion region; - private final Increment [] increments; + private final Increment[] increments; CrossRowCellIncrementer(final int i, final int count, final HRegion region, final int range) { super("" + i); @@ -174,15 +175,14 @@ public void run() { * Have each thread update its own Cell. Avoid contention with another thread. */ @Test - public void testUnContendedSingleCellIncrement() - throws IOException, InterruptedException { + public void testUnContendedSingleCellIncrement() throws IOException, InterruptedException { final HRegion region = getRegion(TEST_UTIL.getConfiguration(), - TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName())); + TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName())); long startTime = EnvironmentEdgeManager.currentTime(); try { - SingleCellIncrementer [] threads = new SingleCellIncrementer[THREAD_COUNT]; + SingleCellIncrementer[] threads = new SingleCellIncrementer[THREAD_COUNT]; for (int i = 0; i < threads.length; i++) { - byte [] rowBytes = Bytes.toBytes(i); + byte[] rowBytes = Bytes.toBytes(i); Increment increment = new Increment(rowBytes); increment.addColumn(INCREMENT_BYTES, INCREMENT_BYTES, 1); threads[i] = new SingleCellIncrementer(i, INCREMENT_COUNT, region, increment); @@ -195,16 +195,17 @@ public void testUnContendedSingleCellIncrement() } RegionScanner regionScanner = region.getScanner(new Scan()); List cells = new ArrayList<>(THREAD_COUNT); - while(regionScanner.next(cells)) continue; + while (regionScanner.next(cells)) + continue; assertEquals(THREAD_COUNT, cells.size()); long total = 0; - for (Cell cell: cells) total += - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + for (Cell cell : cells) + total += Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); assertEquals(INCREMENT_COUNT * THREAD_COUNT, total); } finally { closeRegion(region); - LOG.info(this.name.getMethodName() + " " + - (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); + LOG.info(this.name.getMethodName() + " " + (EnvironmentEdgeManager.currentTime() - startTime) + + "ms"); } } @@ -214,10 +215,10 @@ public void testUnContendedSingleCellIncrement() @Test public void testContendedAcrossCellsIncrement() throws IOException, InterruptedException { final HRegion region = getRegion(TEST_UTIL.getConfiguration(), - TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName())); + TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName())); long startTime = EnvironmentEdgeManager.currentTime(); try { - CrossRowCellIncrementer [] threads = new CrossRowCellIncrementer[THREAD_COUNT]; + CrossRowCellIncrementer[] threads = new CrossRowCellIncrementer[THREAD_COUNT]; for (int i = 0; i < threads.length; i++) { threads[i] = new CrossRowCellIncrementer(i, INCREMENT_COUNT, region, THREAD_COUNT); } @@ -229,16 +230,17 @@ public void testContendedAcrossCellsIncrement() throws IOException, InterruptedE } RegionScanner regionScanner = region.getScanner(new Scan()); List cells = new ArrayList<>(100); - while(regionScanner.next(cells)) continue; + while (regionScanner.next(cells)) + continue; assertEquals(THREAD_COUNT, cells.size()); long total = 0; - for (Cell cell: cells) total += - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + for (Cell cell : cells) + total += Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); assertEquals(INCREMENT_COUNT * THREAD_COUNT, total); } finally { closeRegion(region); - LOG.info(this.name.getMethodName() + " " + - (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); + LOG.info(this.name.getMethodName() + " " + (EnvironmentEdgeManager.currentTime() - startTime) + + "ms"); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java index 56a8ea061e0a..94f51fd809d0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java @@ -57,7 +57,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestRegionInfo { @ClassRule @@ -70,57 +70,50 @@ public class TestRegionInfo { @Test public void testIsStart() { assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst()); - org.apache.hadoop.hbase.client.RegionInfo ri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(Bytes.toBytes("not_start")).build(); + org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(TableName.META_TABLE_NAME).setStartKey(Bytes.toBytes("not_start")).build(); assertFalse(ri.isFirst()); } @Test public void testIsEnd() { assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst()); - org.apache.hadoop.hbase.client.RegionInfo ri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setEndKey(Bytes.toBytes("not_end")).build(); + org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(TableName.META_TABLE_NAME).setEndKey(Bytes.toBytes("not_end")).build(); assertFalse(ri.isLast()); } @Test public void testIsNext() { - byte [] bytes = Bytes.toBytes("row"); - org.apache.hadoop.hbase.client.RegionInfo ri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setEndKey(bytes).build(); - org.apache.hadoop.hbase.client.RegionInfo ri2 = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(bytes).build(); + byte[] bytes = Bytes.toBytes("row"); + org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(TableName.META_TABLE_NAME).setEndKey(bytes).build(); + org.apache.hadoop.hbase.client.RegionInfo ri2 = org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(TableName.META_TABLE_NAME).setStartKey(bytes).build(); assertFalse(ri.isNext(RegionInfoBuilder.FIRST_META_REGIONINFO)); assertTrue(ri.isNext(ri2)); } @Test public void testIsOverlap() { - byte [] a = Bytes.toBytes("a"); - byte [] b = Bytes.toBytes("b"); - byte [] c = Bytes.toBytes("c"); - byte [] d = Bytes.toBytes("d"); - org.apache.hadoop.hbase.client.RegionInfo all = - RegionInfoBuilder.FIRST_META_REGIONINFO; - org.apache.hadoop.hbase.client.RegionInfo ari = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setEndKey(a).build(); + byte[] a = Bytes.toBytes("a"); + byte[] b = Bytes.toBytes("b"); + byte[] c = Bytes.toBytes("c"); + byte[] d = Bytes.toBytes("d"); + org.apache.hadoop.hbase.client.RegionInfo all = RegionInfoBuilder.FIRST_META_REGIONINFO; + org.apache.hadoop.hbase.client.RegionInfo ari = org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build(); org.apache.hadoop.hbase.client.RegionInfo abri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(a).setEndKey(b).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + .setStartKey(a).setEndKey(b).build(); org.apache.hadoop.hbase.client.RegionInfo adri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(a).setEndKey(d).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + .setStartKey(a).setEndKey(d).build(); org.apache.hadoop.hbase.client.RegionInfo cdri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(c).setEndKey(d).build(); - org.apache.hadoop.hbase.client.RegionInfo dri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(d).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + .setStartKey(c).setEndKey(d).build(); + org.apache.hadoop.hbase.client.RegionInfo dri = org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(TableName.META_TABLE_NAME).setStartKey(d).build(); assertTrue(all.isOverlap(all)); assertTrue(all.isOverlap(abri)); assertFalse(abri.isOverlap(cdri)); @@ -146,21 +139,19 @@ public void testIsOverlaps() { byte[] d = Bytes.toBytes("d"); byte[] e = Bytes.toBytes("e"); byte[] f = Bytes.toBytes("f"); - org.apache.hadoop.hbase.client.RegionInfo ari = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setEndKey(a).build(); + org.apache.hadoop.hbase.client.RegionInfo ari = org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build(); org.apache.hadoop.hbase.client.RegionInfo abri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(a).setEndKey(b).build(); - org.apache.hadoop.hbase.client.RegionInfo eri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setEndKey(e).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + .setStartKey(a).setEndKey(b).build(); + org.apache.hadoop.hbase.client.RegionInfo eri = org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(TableName.META_TABLE_NAME).setEndKey(e).build(); org.apache.hadoop.hbase.client.RegionInfo cdri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(c).setEndKey(d).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + .setStartKey(c).setEndKey(d).build(); org.apache.hadoop.hbase.client.RegionInfo efri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). - setStartKey(e).setEndKey(f).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + .setStartKey(e).setEndKey(f).build(); assertFalse(ari.isOverlap(abri)); assertTrue(abri.isOverlap(eri)); assertFalse(cdri.isOverlap(efri)); @@ -170,7 +161,7 @@ public void testIsOverlaps() { @Test public void testPb() throws DeserializationException { RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO; - byte [] bytes = RegionInfo.toByteArray(hri); + byte[] bytes = RegionInfo.toByteArray(hri); RegionInfo pbhri = RegionInfo.parseFrom(bytes); assertTrue(hri.equals(pbhri)); } @@ -180,24 +171,24 @@ public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedExc HBaseTestingUtil htu = new HBaseTestingUtil(); RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO; Path basedir = htu.getDataTestDir(); - // Create a region. That'll write the .regioninfo file. + // Create a region. That'll write the .regioninfo file. FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); FSTableDescriptors.tryUpdateMetaTableDescriptor(htu.getConfiguration()); HRegion r = HBaseTestingUtil.createRegionAndWAL(hri, basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + fsTableDescriptors.get(TableName.META_TABLE_NAME)); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtil.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), - null, htu.getConfiguration()); + r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null, + htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); assertEquals(modtime, modtime2); // Now load the file. org.apache.hadoop.hbase.client.RegionInfo deserializedHri = - HRegionFileSystem.loadRegionInfoFileContent( - r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir()); + HRegionFileSystem.loadRegionInfoFileContent(r.getRegionFileSystem().getFileSystem(), + r.getRegionFileSystem().getRegionDir()); assertEquals(0, org.apache.hadoop.hbase.client.RegionInfo.COMPARATOR.compare(hri, deserializedHri)); HBaseTestingUtil.closeRegionAndWAL(r); @@ -219,27 +210,24 @@ public void testCreateHRegionInfoName() throws Exception { String id = "id"; // old format region name - byte [] name = RegionInfo.createRegionName(tn, sk, id, false); + byte[] name = RegionInfo.createRegionName(tn, sk, id, false); String nameStr = Bytes.toString(name); assertEquals(tableName + "," + startKey + "," + id, nameStr); - // new format region name. String md5HashInHex = MD5Hash.getMD5AsHex(name); assertEquals(RegionInfo.MD5_HEX_LENGTH, md5HashInHex.length()); name = RegionInfo.createRegionName(tn, sk, id, true); nameStr = Bytes.toString(name); - assertEquals(tableName + "," + startKey + "," - + id + "." + md5HashInHex + ".", - nameStr); + assertEquals(tableName + "," + startKey + "," + id + "." + md5HashInHex + ".", nameStr); } @Test public void testContainsRange() { TableDescriptor tableDesc = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("g")).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("g")).build(); // Single row range at start of region assertTrue(hri.containsRange(Bytes.toBytes("a"), Bytes.toBytes("a"))); // Fully contained range @@ -299,11 +287,11 @@ public void testContainsRangeForMetaTable() { @Test public void testLastRegionCompare() { TableDescriptor tableDesc = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); RegionInfo hrip = RegionInfoBuilder.newBuilder(tableDesc.getTableName()) - .setStartKey(Bytes.toBytes("a")).build(); + .setStartKey(Bytes.toBytes("a")).build(); RegionInfo hric = RegionInfoBuilder.newBuilder(tableDesc.getTableName()) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); assertTrue(hrip.compareTo(hric) > 0); } @@ -327,7 +315,7 @@ public void testComparator() { RegionInfo b = RegionInfoBuilder.newBuilder(TableName.valueOf("b")).build(); assertNotEquals(0, a.compareTo(b)); TableName t = TableName.valueOf("t"); - byte [] midway = Bytes.toBytes("midway"); + byte[] midway = Bytes.toBytes("midway"); a = RegionInfoBuilder.newBuilder(t).setEndKey(midway).build(); b = RegionInfoBuilder.newBuilder(t).setStartKey(midway).build(); assertTrue(a.compareTo(b) < 0); @@ -335,19 +323,19 @@ public void testComparator() { assertEquals(a, a); assertEquals(0, a.compareTo(a)); a = RegionInfoBuilder.newBuilder(t).setStartKey(Bytes.toBytes("a")) - .setEndKey(Bytes.toBytes("d")).build(); + .setEndKey(Bytes.toBytes("d")).build(); b = RegionInfoBuilder.newBuilder(t).setStartKey(Bytes.toBytes("e")) - .setEndKey(Bytes.toBytes("g")).build(); + .setEndKey(Bytes.toBytes("g")).build(); assertTrue(a.compareTo(b) < 0); a = RegionInfoBuilder.newBuilder(t).setStartKey(Bytes.toBytes("aaaa")) - .setEndKey(Bytes.toBytes("dddd")).build(); + .setEndKey(Bytes.toBytes("dddd")).build(); b = RegionInfoBuilder.newBuilder(t).setStartKey(Bytes.toBytes("e")) - .setEndKey(Bytes.toBytes("g")).build(); + .setEndKey(Bytes.toBytes("g")).build(); assertTrue(a.compareTo(b) < 0); a = RegionInfoBuilder.newBuilder(t).setStartKey(Bytes.toBytes("aaaa")) - .setEndKey(Bytes.toBytes("dddd")).build(); + .setEndKey(Bytes.toBytes("dddd")).build(); b = RegionInfoBuilder.newBuilder(t).setStartKey(Bytes.toBytes("aaaa")) - .setEndKey(Bytes.toBytes("eeee")).build(); + .setEndKey(Bytes.toBytes("eeee")).build(); assertTrue(a.compareTo(b) < 0); } @@ -363,21 +351,23 @@ public void testRegionNameForRegionReplicas() throws Exception { // assert with only the region name without encoding // primary, replicaId = 0 - byte [] name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 0, false); + byte[] name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 0, false); String nameStr = Bytes.toString(name); assertEquals(tableName + "," + startKey + "," + id, nameStr); // replicaId = 1 name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 1, false); nameStr = Bytes.toString(name); - assertEquals(tableName + "," + startKey + "," + id + "_" + - String.format(RegionInfo.REPLICA_ID_FORMAT, 1), nameStr); + assertEquals( + tableName + "," + startKey + "," + id + "_" + String.format(RegionInfo.REPLICA_ID_FORMAT, 1), + nameStr); // replicaId = max name = RegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 0xFFFF, false); nameStr = Bytes.toString(name); - assertEquals(tableName + "," + startKey + "," + id + "_" + - String.format(RegionInfo.REPLICA_ID_FORMAT, 0xFFFF), nameStr); + assertEquals(tableName + "," + startKey + "," + id + "_" + + String.format(RegionInfo.REPLICA_ID_FORMAT, 0xFFFF), + nameStr); } @Test @@ -391,21 +381,20 @@ public void testParseName() throws IOException { byte[] regionName = RegionInfo.createRegionName(tableName, startKey, regionId, false); byte[][] fields = RegionInfo.parseRegionName(regionName); - assertArrayEquals(Bytes.toString(fields[0]),tableName.getName(), fields[0]); - assertArrayEquals(Bytes.toString(fields[1]),startKey, fields[1]); - assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)),fields[2]); + assertArrayEquals(Bytes.toString(fields[0]), tableName.getName(), fields[0]); + assertArrayEquals(Bytes.toString(fields[1]), startKey, fields[1]); + assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)), fields[2]); assertEquals(3, fields.length); // test with replicaId - regionName = RegionInfo.createRegionName(tableName, startKey, regionId, - replicaId, false); + regionName = RegionInfo.createRegionName(tableName, startKey, regionId, replicaId, false); fields = RegionInfo.parseRegionName(regionName); - assertArrayEquals(Bytes.toString(fields[0]),tableName.getName(), fields[0]); - assertArrayEquals(Bytes.toString(fields[1]),startKey, fields[1]); - assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)),fields[2]); - assertArrayEquals(Bytes.toString(fields[3]), Bytes.toBytes( - String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)), fields[3]); + assertArrayEquals(Bytes.toString(fields[0]), tableName.getName(), fields[0]); + assertArrayEquals(Bytes.toString(fields[1]), startKey, fields[1]); + assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)), fields[2]); + assertArrayEquals(Bytes.toString(fields[3]), + Bytes.toBytes(String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)), fields[3]); } @Test @@ -418,7 +407,7 @@ public void testConvert() { int replicaId = 42; RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(endKey) - .setSplit(split).setRegionId(regionId).setReplicaId(replicaId).build(); + .setSplit(split).setRegionId(regionId).setReplicaId(replicaId).build(); // convert two times, compare RegionInfo convertedHri = ProtobufUtil.toRegionInfo(ProtobufUtil.toRegionInfo(hri)); @@ -427,32 +416,34 @@ public void testConvert() { // test convert RegionInfo without replicaId HBaseProtos.RegionInfo info = HBaseProtos.RegionInfo.newBuilder() - .setTableName(HBaseProtos.TableName.newBuilder() - .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())) - .setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())).build()) - .setStartKey(UnsafeByteOperations.unsafeWrap(startKey)) - .setEndKey(UnsafeByteOperations.unsafeWrap(endKey)).setSplit(split).setRegionId(regionId) - .build(); + .setTableName(HBaseProtos.TableName.newBuilder() + .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())) + .setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())).build()) + .setStartKey(UnsafeByteOperations.unsafeWrap(startKey)) + .setEndKey(UnsafeByteOperations.unsafeWrap(endKey)).setSplit(split).setRegionId(regionId) + .build(); convertedHri = ProtobufUtil.toRegionInfo(info); // expecting default replicaId RegionInfo expectedHri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey) - .setEndKey(endKey).setSplit(split).setRegionId(regionId).setReplicaId(0).build(); + .setEndKey(endKey).setSplit(split).setRegionId(regionId).setReplicaId(0).build(); assertEquals(expectedHri, convertedHri); } + @Test public void testRegionDetailsForDisplay() throws IOException { - byte[] startKey = new byte[] {0x01, 0x01, 0x02, 0x03}; - byte[] endKey = new byte[] {0x01, 0x01, 0x02, 0x04}; + byte[] startKey = new byte[] { 0x01, 0x01, 0x02, 0x03 }; + byte[] endKey = new byte[] { 0x01, 0x01, 0x02, 0x04 }; Configuration conf = new Configuration(); conf.setBoolean("hbase.display.keys", false); RegionInfo h = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(startKey).setEndKey(endKey).build(); + .setStartKey(startKey).setEndKey(endKey).build(); checkEquality(h, conf); // check HRIs with non-default replicaId h = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setStartKey(startKey) - .setEndKey(endKey).setRegionId(EnvironmentEdgeManager.currentTime()).setReplicaId(1).build(); + .setEndKey(endKey).setRegionId(EnvironmentEdgeManager.currentTime()).setReplicaId(1) + .build(); checkEquality(h, conf); assertArrayEquals(RegionInfoDisplay.HIDDEN_END_KEY, RegionInfoDisplay.getEndKeyForDisplay(h, conf)); @@ -461,7 +452,7 @@ public void testRegionDetailsForDisplay() throws IOException { RegionState state = RegionState.createForTesting(h, RegionState.State.OPEN); String descriptiveNameForDisplay = - RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf); + RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf); checkDescriptiveNameEquality(descriptiveNameForDisplay, state.toDescriptiveString(), startKey); conf.setBoolean("hbase.display.keys", true); @@ -472,13 +463,13 @@ public void testRegionDetailsForDisplay() throws IOException { } private void checkDescriptiveNameEquality(String descriptiveNameForDisplay, String origDesc, - byte[] startKey) { + byte[] startKey) { // except for the "hidden-start-key" substring everything else should exactly match String firstPart = descriptiveNameForDisplay.substring(0, descriptiveNameForDisplay.indexOf(new String(RegionInfoDisplay.HIDDEN_START_KEY))); String secondPart = descriptiveNameForDisplay - .substring(descriptiveNameForDisplay.indexOf(new String(RegionInfoDisplay.HIDDEN_START_KEY)) + - RegionInfoDisplay.HIDDEN_START_KEY.length); + .substring(descriptiveNameForDisplay.indexOf(new String(RegionInfoDisplay.HIDDEN_START_KEY)) + + RegionInfoDisplay.HIDDEN_START_KEY.length); String firstPartOrig = origDesc.substring(0, origDesc.indexOf(Bytes.toStringBinary(startKey))); String secondPartOrig = origDesc.substring( origDesc.indexOf(Bytes.toStringBinary(startKey)) + Bytes.toStringBinary(startKey).length()); @@ -507,4 +498,3 @@ private void checkEquality(RegionInfo h, Configuration conf) throws IOException } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java index 48729faae3ef..e12e435f6660 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoStaticInitialization.java @@ -34,15 +34,15 @@ * Test for the tangled mess that is static initialization of our our {@link RegionInfo} and * {@link RegionInfoBuilder}, as reported on HBASE-24896. The condition being tested can only be * reproduced the first time a JVM loads the classes under test. Thus, this test is marked as a - * {@link LargeTests} because, under their current configuration, tests in that category are run - * in their own JVM instances. + * {@link LargeTests} because, under their current configuration, tests in that category are run in + * their own JVM instances. */ @SuppressWarnings("deprecation") -@Category({ RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestRegionInfoStaticInitialization { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionInfoStaticInitialization.class); + HBaseClassTestRule.forClass(TestRegionInfoStaticInitialization.class); @Test public void testParallelStaticInitialization() throws Exception { @@ -51,16 +51,15 @@ public void testParallelStaticInitialization() throws Exception { // RegionInfoBuilder. final Supplier retrieveUNDEFINED = () -> RegionInfo.UNDEFINED; final Supplier retrieveMetaRegionInfo = - () -> RegionInfoBuilder.FIRST_META_REGIONINFO; + () -> RegionInfoBuilder.FIRST_META_REGIONINFO; // The test runs multiple threads that reference these mutually dependent symbols. In order to // express this bug, these threads need to access these symbols at roughly the same time, so // that the classloader is asked to materialize these symbols concurrently. These Suppliers are // run on threads that have already been allocated, managed by the system's ForkJoin pool. - final CompletableFuture[] futures = Stream.of( - retrieveUNDEFINED, retrieveMetaRegionInfo, retrieveUNDEFINED, retrieveMetaRegionInfo) - .map(CompletableFuture::supplyAsync) - .toArray(CompletableFuture[]::new); + final CompletableFuture[] futures = Stream + .of(retrieveUNDEFINED, retrieveMetaRegionInfo, retrieveUNDEFINED, retrieveMetaRegionInfo) + .map(CompletableFuture::supplyAsync).toArray(CompletableFuture[]::new); // Loading classes should be relatively fast. 5 seconds is an arbitrary choice of timeout. It // was chosen under the assumption that loading these symbols should complete much faster than diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java index 1e3ce753037b..b38325601209 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInterrupt.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertTrue; @@ -24,7 +23,6 @@ import java.io.InterruptedIOException; import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -72,12 +70,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestRegionInterrupt { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionInterrupt.class); + HBaseClassTestRule.forClass(TestRegionInterrupt.class); private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Logger LOG = LoggerFactory.getLogger(TestRegionInterrupt.class); @@ -96,8 +94,8 @@ public static void setUpBeforeClass() throws Exception { conf.setClass(HConstants.REGION_IMPL, InterruptInterceptingHRegion.class, Region.class); conf.setBoolean(HRegion.CLOSE_WAIT_ABORT, true); // Ensure the sleep interval is long enough for interrupts to occur. - long waitInterval = conf.getLong(HRegion.CLOSE_WAIT_INTERVAL, - HRegion.DEFAULT_CLOSE_WAIT_INTERVAL); + long waitInterval = + conf.getLong(HRegion.CLOSE_WAIT_INTERVAL, HRegion.DEFAULT_CLOSE_WAIT_INTERVAL); sleepTime = waitInterval * 2; // Try to bound the running time of this unit if expected actions do not take place. conf.setLong(HRegion.CLOSE_WAIT_TIME, sleepTime * 2); @@ -152,7 +150,7 @@ public void run() { // Wait for the filter to begin sleeping LOG.info("Waiting for scanner to start"); - Waiter.waitFor(TEST_UTIL.getConfiguration(), 10*1000, new Waiter.Predicate() { + Waiter.waitFor(TEST_UTIL.getConfiguration(), 10 * 1000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { return DelayingFilter.isSleeping(); @@ -180,8 +178,7 @@ public void testCloseInterruptMutation() throws Exception { // Create the test table TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .setCoprocessor(MutationDelayingCoprocessor.class.getName()) - .build(); + .setCoprocessor(MutationDelayingCoprocessor.class.getName()).build(); LOG.info("Creating table " + tableName); admin.createTable(htd); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); @@ -211,7 +208,7 @@ public void run() { // Wait for delayed insertion to begin LOG.info("Waiting for mutations to start"); - Waiter.waitFor(TEST_UTIL.getConfiguration(), 10*1000, new Waiter.Predicate() { + Waiter.waitFor(TEST_UTIL.getConfiguration(), 10 * 1000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { return MutationDelayingCoprocessor.isSleeping(); @@ -240,9 +237,8 @@ public static boolean wasInterrupted() { return interrupted; } - public InterruptInterceptingHRegion(Path tableDir, WAL wal, FileSystem fs, - Configuration conf, RegionInfo regionInfo, TableDescriptor htd, - RegionServerServices rsServices) { + public InterruptInterceptingHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration conf, + RegionInfo regionInfo, TableDescriptor htd, RegionServerServices rsServices) { super(tableDir, wal, fs, conf, regionInfo, htd, rsServices); } @@ -294,8 +290,7 @@ public ReturnCode filterCell(Cell v) throws IOException { return ReturnCode.INCLUDE; } - public static DelayingFilter parseFrom(final byte [] pbBytes) - throws DeserializationException { + public static DelayingFilter parseFrom(final byte[] pbBytes) throws DeserializationException { // Just return a new instance. return new DelayingFilter(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 0434caebd2f3..f72a1b277918 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -81,14 +81,16 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestRegionMergeTransactionOnCluster { @ClassRule @@ -98,7 +100,8 @@ public class TestRegionMergeTransactionOnCluster { private static final Logger LOG = LoggerFactory.getLogger(TestRegionMergeTransactionOnCluster.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final int NB_SERVERS = 3; @@ -150,13 +153,13 @@ public void testWholesomeMerge() throws Exception { // Merge 2nd and 3th region PairOfSameType mergedRegions = - mergeRegionsAndVerifyRegionNum(MASTER, tableName, 1, 2, INITIAL_REGION_NUM - 2); + mergeRegionsAndVerifyRegionNum(MASTER, tableName, 1, 2, INITIAL_REGION_NUM - 2); verifyRowCount(table, ROWSIZE); // Randomly choose one of the two merged regions - RegionInfo hri = ThreadLocalRandom.current().nextBoolean() ? mergedRegions.getFirst() : - mergedRegions.getSecond(); + RegionInfo hri = ThreadLocalRandom.current().nextBoolean() ? mergedRegions.getFirst() + : mergedRegions.getSecond(); SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); AssignmentManager am = cluster.getMaster().getAssignmentManager(); RegionStates regionStates = am.getRegionStates(); @@ -176,8 +179,8 @@ public void testWholesomeMerge() throws Exception { } /** - * Not really restarting the master. Simulate it by clear of new region - * state since it is not persisted, will be lost after master restarts. + * Not really restarting the master. Simulate it by clear of new region state since it is not + * persisted, will be lost after master restarts. */ @Test public void testMergeAndRestartingMaster() throws Exception { @@ -215,13 +218,12 @@ public void testCleanMergeReference() throws Exception { verifyRowCount(table, ROWSIZE); table.close(); - List> tableRegions = MetaTableAccessor - .getTableRegionsAndLocations(MASTER.getConnection(), tableName); + List> tableRegions = + MetaTableAccessor.getTableRegionsAndLocations(MASTER.getConnection(), tableName); RegionInfo mergedRegionInfo = tableRegions.get(0).getFirst(); - TableDescriptor tableDescriptor = MASTER.getTableDescriptors().get( - tableName); - Result mergedRegionResult = MetaTableAccessor.getRegionResult( - MASTER.getConnection(), mergedRegionInfo.getRegionName()); + TableDescriptor tableDescriptor = MASTER.getTableDescriptors().get(tableName); + Result mergedRegionResult = MetaTableAccessor.getRegionResult(MASTER.getConnection(), + mergedRegionInfo.getRegionName()); // contains merge reference in META assertTrue(CatalogFamilyFormat.hasMergeRegions(mergedRegionResult.rawCells())); @@ -240,10 +242,10 @@ public void testCleanMergeReference() throws Exception { assertTrue(fs.exists(regionBdir)); ColumnFamilyDescriptor[] columnFamilies = tableDescriptor.getColumnFamilies(); - HRegionFileSystem hrfs = new HRegionFileSystem( - TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo); + HRegionFileSystem hrfs = + new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo); int count = 0; - for(ColumnFamilyDescriptor colFamily : columnFamilies) { + for (ColumnFamilyDescriptor colFamily : columnFamilies) { count += hrfs.getStoreFiles(colFamily.getNameAsString()).size(); } ADMIN.compactRegion(mergedRegionInfo.getRegionName()); @@ -252,29 +254,29 @@ public void testCleanMergeReference() throws Exception { long timeout = EnvironmentEdgeManager.currentTime() + waitTime; int newcount = 0; while (EnvironmentEdgeManager.currentTime() < timeout) { - for(ColumnFamilyDescriptor colFamily : columnFamilies) { + for (ColumnFamilyDescriptor colFamily : columnFamilies) { newcount += hrfs.getStoreFiles(colFamily.getNameAsString()).size(); } - if(newcount > count) { + if (newcount > count) { break; } Thread.sleep(50); } assertTrue(newcount > count); - List regionServerThreads = TEST_UTIL.getHBaseCluster() - .getRegionServerThreads(); + List regionServerThreads = + TEST_UTIL.getHBaseCluster().getRegionServerThreads(); for (RegionServerThread rs : regionServerThreads) { - CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(100, null, - rs.getRegionServer(), false); + CompactedHFilesDischarger cleaner = + new CompactedHFilesDischarger(100, null, rs.getRegionServer(), false); cleaner.chore(); Thread.sleep(1000); } while (EnvironmentEdgeManager.currentTime() < timeout) { int newcount1 = 0; - for(ColumnFamilyDescriptor colFamily : columnFamilies) { + for (ColumnFamilyDescriptor colFamily : columnFamilies) { newcount1 += hrfs.getStoreFiles(colFamily.getNameAsString()).size(); } - if(newcount1 <= 1) { + if (newcount1 <= 1) { break; } Thread.sleep(50); @@ -288,7 +290,7 @@ public void testCleanMergeReference() throws Exception { Thread.sleep(50); // Cleanup is async so wait till all procedures are done running. ProcedureTestingUtility.waitNoProcedureRunning( - TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()); } // We used to check for existence of region in fs but sometimes the region dir was // cleaned up by the time we got here making the test sometimes flakey. @@ -296,12 +298,12 @@ public void testCleanMergeReference() throws Exception { // Wait around a bit to give stuff a chance to complete. while (true) { - mergedRegionResult = MetaTableAccessor - .getRegionResult(TEST_UTIL.getConnection(), mergedRegionInfo.getRegionName()); + mergedRegionResult = MetaTableAccessor.getRegionResult(TEST_UTIL.getConnection(), + mergedRegionInfo.getRegionName()); if (CatalogFamilyFormat.hasMergeRegions(mergedRegionResult.rawCells())) { LOG.info("Waiting on cleanup of merge columns {}", - Arrays.asList(mergedRegionResult.rawCells()).stream(). - map(c -> c.toString()).collect(Collectors.joining(","))); + Arrays.asList(mergedRegionResult.rawCells()).stream().map(c -> c.toString()) + .collect(Collectors.joining(","))); Threads.sleep(50); } else { break; @@ -315,10 +317,9 @@ public void testCleanMergeReference() throws Exception { } /** - * This test tests 1, merging region not online; - * 2, merging same two regions; 3, merging unknown regions. - * They are in one test case so that we don't have to create - * many tables, and these tests are simple. + * This test tests 1, merging region not online; 2, merging same two regions; 3, merging unknown + * regions. They are in one test case so that we don't have to create many tables, and these tests + * are simple. */ @Test public void testMerge() throws Exception { @@ -348,13 +349,13 @@ public void testMerge() throws Exception { try { // Merge the same region: b and b. - FutureUtils - .get(admin.mergeRegionsAsync(b.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), true)); + FutureUtils.get( + admin.mergeRegionsAsync(b.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), true)); fail("A region should not be able to merge with itself, even forcfully"); } catch (IOException ie) { assertTrue("Exception should mention regions not online", - StringUtils.stringifyException(ie).contains("region to itself") && - ie instanceof MergeRegionException); + StringUtils.stringifyException(ie).contains("region to itself") + && ie instanceof MergeRegionException); } try { @@ -377,12 +378,12 @@ public void testMergeWithReplicas() throws Exception { // Create table and load data. Table table = createTableAndLoadData(MASTER, tableName, 5, 2); List> initialRegionToServers = - MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tableName); + MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tableName); // Merge 1st and 2nd region PairOfSameType mergedRegions = - mergeRegionsAndVerifyRegionNum(MASTER, tableName, 0, 2, 5 * 2 - 2); + mergeRegionsAndVerifyRegionNum(MASTER, tableName, 0, 2, 5 * 2 - 2); List> currentRegionToServers = - MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tableName); + MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tableName); List initialRegions = new ArrayList<>(); for (Pair p : initialRegionToServers) { initialRegions.add(p.getFirst()); @@ -391,48 +392,76 @@ public void testMergeWithReplicas() throws Exception { for (Pair p : currentRegionToServers) { currentRegions.add(p.getFirst()); } - assertTrue(initialRegions.contains(mergedRegions.getFirst())); //this is the first region - assertTrue(initialRegions.contains(RegionReplicaUtil - .getRegionInfoForReplica(mergedRegions.getFirst(), 1))); //this is the replica of the first region - assertTrue(initialRegions.contains(mergedRegions.getSecond())); //this is the second region - assertTrue(initialRegions.contains(RegionReplicaUtil - .getRegionInfoForReplica(mergedRegions.getSecond(), 1))); //this is the replica of the second region - assertTrue(!initialRegions.contains(currentRegions.get(0))); //this is the new region - assertTrue(!initialRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1))); //replica of the new region - assertTrue(currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1))); //replica of the new region - assertTrue(!currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getFirst(), 1))); //replica of the merged region - assertTrue(!currentRegions.contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getSecond(), 1))); //replica of the merged region + assertTrue(initialRegions.contains(mergedRegions.getFirst())); // this is the first region + assertTrue(initialRegions + .contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getFirst(), 1))); // this + // is + // the + // replica + // of + // the + // first + // region + assertTrue(initialRegions.contains(mergedRegions.getSecond())); // this is the second region + assertTrue(initialRegions + .contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getSecond(), 1))); // this + // is + // the + // replica + // of + // the + // second + // region + assertTrue(!initialRegions.contains(currentRegions.get(0))); // this is the new region + assertTrue(!initialRegions + .contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1))); // replica + // of the + // new + // region + assertTrue(currentRegions + .contains(RegionReplicaUtil.getRegionInfoForReplica(currentRegions.get(0), 1))); // replica + // of the + // new + // region + assertTrue(!currentRegions + .contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getFirst(), 1))); // replica + // of + // the + // merged + // region + assertTrue(!currentRegions + .contains(RegionReplicaUtil.getRegionInfoForReplica(mergedRegions.getSecond(), 1))); // replica + // of + // the + // merged + // region table.close(); } finally { TEST_UTIL.deleteTable(tableName); } } - private PairOfSameType mergeRegionsAndVerifyRegionNum( - HMaster master, TableName tablename, - int regionAnum, int regionBnum, int expectedRegionNum) throws Exception { + private PairOfSameType mergeRegionsAndVerifyRegionNum(HMaster master, + TableName tablename, int regionAnum, int regionBnum, int expectedRegionNum) throws Exception { PairOfSameType mergedRegions = - requestMergeRegion(master, tablename, regionAnum, regionBnum); + requestMergeRegion(master, tablename, regionAnum, regionBnum); waitAndVerifyRegionNum(master, tablename, expectedRegionNum); return mergedRegions; } - private PairOfSameType requestMergeRegion( - HMaster master, TableName tablename, + private PairOfSameType requestMergeRegion(HMaster master, TableName tablename, int regionAnum, int regionBnum) throws Exception { - List> tableRegions = MetaTableAccessor - .getTableRegionsAndLocations( - TEST_UTIL.getConnection(), tablename); + List> tableRegions = + MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tablename); RegionInfo regionA = tableRegions.get(regionAnum).getFirst(); RegionInfo regionB = tableRegions.get(regionBnum).getFirst(); - ADMIN.mergeRegionsAsync( - regionA.getEncodedNameAsBytes(), - regionB.getEncodedNameAsBytes(), false); + ADMIN.mergeRegionsAsync(regionA.getEncodedNameAsBytes(), regionB.getEncodedNameAsBytes(), + false); return new PairOfSameType<>(regionA, regionB); } - private void waitAndVerifyRegionNum(HMaster master, TableName tablename, - int expectedRegionNum) throws Exception { + private void waitAndVerifyRegionNum(HMaster master, TableName tablename, int expectedRegionNum) + throws Exception { List> tableRegionsInMeta; List tableRegionsInMaster; long timeout = EnvironmentEdgeManager.currentTime() + waitTime; @@ -452,19 +481,18 @@ private void waitAndVerifyRegionNum(HMaster master, TableName tablename, Thread.sleep(250); } - tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations( - TEST_UTIL.getConnection(), tablename); + tableRegionsInMeta = + MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tablename); LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta)); assertEquals(expectedRegionNum, tableRegionsInMeta.size()); } - private Table createTableAndLoadData(HMaster master, TableName tablename) - throws Exception { + private Table createTableAndLoadData(HMaster master, TableName tablename) throws Exception { return createTableAndLoadData(master, tablename, INITIAL_REGION_NUM, 1); } - private Table createTableAndLoadData(HMaster master, TableName tablename, - int numRegions, int replication) throws Exception { + private Table createTableAndLoadData(HMaster master, TableName tablename, int numRegions, + int replication) throws Exception { assertTrue("ROWSIZE must > numregions:" + numRegions, ROWSIZE > numRegions); byte[][] splitRows = new byte[numRegions - 1][]; for (int i = 0; i < splitRows.length; i++) { @@ -485,10 +513,10 @@ private Table createTableAndLoadData(HMaster master, TableName tablename, List> tableRegions; TEST_UTIL.waitUntilAllRegionsAssigned(tablename); LOG.info("All regions assigned for table - " + table.getName()); - tableRegions = MetaTableAccessor.getTableRegionsAndLocations( - TEST_UTIL.getConnection(), tablename); - assertEquals("Wrong number of regions in table " + tablename, - numRegions * replication, tableRegions.size()); + tableRegions = + MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tablename); + assertEquals("Wrong number of regions in table " + tablename, numRegions * replication, + tableRegions.size()); LOG.info(tableRegions.size() + "Regions after load: " + Joiner.on(',').join(tableRegions)); assertEquals(numRegions * replication, tableRegions.size()); return table; @@ -510,8 +538,7 @@ private void loadData(Table table) throws IOException { } } - private void verifyRowCount(Table table, int expectedRegionNum) - throws IOException { + private void verifyRowCount(Table table, int expectedRegionNum) throws IOException { ResultScanner scanner = table.getScanner(new Scan()); int rowCount = 0; while (scanner.next() != null) { @@ -537,6 +564,7 @@ static class MyMasterRpcServices extends MasterRpcServices { static AtomicBoolean enabled = new AtomicBoolean(false); private HMaster myMaster; + public MyMasterRpcServices(HMaster master) throws IOException { super(master); myMaster = master; @@ -546,10 +574,10 @@ public MyMasterRpcServices(HMaster master) throws IOException { public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c, ReportRegionStateTransitionRequest req) throws ServiceException { ReportRegionStateTransitionResponse resp = super.reportRegionStateTransition(c, req); - if (enabled.get() && req.getTransition(0).getTransitionCode() - == TransitionCode.READY_TO_MERGE && !resp.hasErrorMessage()) { + if (enabled.get() && req.getTransition(0).getTransitionCode() == TransitionCode.READY_TO_MERGE + && !resp.hasErrorMessage()) { RegionStates regionStates = myMaster.getAssignmentManager().getRegionStates(); - for (RegionState regionState: regionStates.getRegionsStateInTransition()) { + for (RegionState regionState : regionStates.getRegionsStateInTransition()) { // Find the merging_new region and remove it if (regionState.isMergingNew()) { regionStates.deleteRegion(regionState.getRegion()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMove.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMove.java index 9f5ed94b9819..c649cfe3577f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMove.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMove.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ /** * Test move fails when table disabled */ -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestRegionMove { @ClassRule @@ -61,7 +61,7 @@ public class TestRegionMove { @Rule public TestName name = new TestName(); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - public static Configuration CONF ; + public static Configuration CONF; protected static final String F1 = "f1"; // Test names @@ -98,8 +98,8 @@ public void testDisableAndMove() throws Exception { byte[] startKey = regionInfo.getStartKey(); // The startKey of the first region is "empty", which would throw an error if we try to // Put that. - byte[] rowKey = org.apache.hbase.thirdparty.com.google.common.primitives.Bytes.concat( - startKey, Bytes.toBytes("1")); + byte[] rowKey = org.apache.hbase.thirdparty.com.google.common.primitives.Bytes + .concat(startKey, Bytes.toBytes("1")); Put p = new Put(rowKey); p.addColumn(Bytes.toBytes(F1), Bytes.toBytes("q1"), Bytes.toBytes("value")); t.put(p); @@ -111,9 +111,9 @@ public void testDisableAndMove() throws Exception { List regionsOnRS1ForTable = admin.getRegions(rs1.getServerName()).stream() .filter((regionInfo) -> regionInfo.getTable().equals(tableName)) .collect(Collectors.toList()); - assertTrue( - "Expected to find at least one region for " + tableName + " on " + rs1.getServerName() - + ", but found none", !regionsOnRS1ForTable.isEmpty()); + assertTrue("Expected to find at least one region for " + tableName + " on " + + rs1.getServerName() + ", but found none", + !regionsOnRS1ForTable.isEmpty()); final RegionInfo regionToMove = regionsOnRS1ForTable.get(0); // Offline the region and then try to move it. Should fail. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java index 781f4d52a40a..bf0573bc1231 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MediumTests.class, RegionServerTests.class}) +@Category({ MediumTests.class, RegionServerTests.class }) public class TestRegionOpen { @ClassRule @@ -86,13 +86,13 @@ private static HRegionServer getRS() { @Test public void testPriorityRegionIsOpenedWithSeparateThreadPool() throws Exception { final TableName tableName = TableName.valueOf(TestRegionOpen.class.getSimpleName()); - ThreadPoolExecutor exec = getRS().getExecutorService() - .getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION); + ThreadPoolExecutor exec = + getRS().getExecutorService().getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION); long completed = exec.getCompletedTaskCount(); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(tableName).setPriority(HConstants.HIGH_QOS) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(tableName).setPriority(HConstants.HIGH_QOS) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Admin admin = connection.getAdmin()) { admin.createTable(tableDescriptor); @@ -111,21 +111,22 @@ public void testNonExistentRegionReplica() throws Exception { Path rootDir = HTU.getDataTestDirOnTestFS(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYNAME)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYNAME)).build(); admin.createTable(htd); HTU.waitUntilNoRegionsInTransition(60000); // Create new HRI with non-default region replica id RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(Bytes.toBytes("A")).setEndKey(Bytes.toBytes("B")) - .setRegionId(EnvironmentEdgeManager.currentTime()).setReplicaId(2).build(); + .setStartKey(Bytes.toBytes("A")).setEndKey(Bytes.toBytes("B")) + .setRegionId(EnvironmentEdgeManager.currentTime()).setReplicaId(2).build(); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri); Path regionDir = regionFs.getRegionDir(); try { HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); } catch (IOException e) { - LOG.info("Caught expected IOE due missing .regioninfo file, due: " + e.getMessage() + " skipping region open."); + LOG.info("Caught expected IOE due missing .regioninfo file, due: " + e.getMessage() + + " skipping region open."); // We should only have 1 region online List regions = admin.getRegions(tableName); LOG.info("Regions: " + regions); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java index ba7e9d154e29..c6aa66c4f57d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,15 +63,14 @@ public class TestRegionReplicaFailover { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRegionReplicaFailover.class); - private static final Logger LOG = - LoggerFactory.getLogger(TestRegionReplicaReplication.class); + private static final Logger LOG = LoggerFactory.getLogger(TestRegionReplicaReplication.class); private static final HBaseTestingUtil HTU = new HBaseTestingUtil(); private static final int NB_SERVERS = 3; protected final byte[][] families = - new byte[][] { HBaseTestingUtil.fam1, HBaseTestingUtil.fam2, HBaseTestingUtil.fam3 }; + new byte[][] { HBaseTestingUtil.fam1, HBaseTestingUtil.fam2, HBaseTestingUtil.fam3 }; protected final byte[] fam = HBaseTestingUtil.fam1; protected final byte[] qual1 = Bytes.toBytes("qual1"); protected final byte[] value1 = Bytes.toBytes("value1"); @@ -86,7 +85,7 @@ public class TestRegionReplicaFailover { @Before public void before() throws Exception { Configuration conf = HTU.getConfiguration(); - // Up the handlers; this test needs more than usual. + // Up the handlers; this test needs more than usual. conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true); conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, true); @@ -186,7 +185,8 @@ public void testPrimaryRegionKill() throws Exception { HTU.getMiniHBaseCluster().startRegionServer(); } - /** wal replication is async, we have to wait until the replication catches up, or we timeout + /** + * wal replication is async, we have to wait until the replication catches up, or we timeout */ private void verifyNumericRowsWithTimeout(final Table table, final byte[] f, final int startRow, final int endRow, final int replicaId, final long timeout) throws Exception { @@ -250,9 +250,9 @@ public void testSecondaryRegionKill() throws Exception { } /** - * Tests the case where there are 3 region replicas and the primary is continuously accepting - * new writes while one of the secondaries is killed. Verification is done for both of the - * secondary replicas. + * Tests the case where there are 3 region replicas and the primary is continuously accepting new + * writes while one of the secondaries is killed. Verification is done for both of the secondary + * replicas. */ @Test public void testSecondaryRegionKillWhilePrimaryIsAcceptingWrites() throws Exception { @@ -273,7 +273,7 @@ public void testSecondaryRegionKillWhilePrimaryIsAcceptingWrites() throws Except public void run() { while (!done.get()) { try { - HTU.loadNumericRows(table, fam, key.get(), key.get()+1000); + HTU.loadNumericRows(table, fam, key.get(), key.get() + 1000); key.addAndGet(1000); } catch (Throwable e) { ex.compareAndSet(null, e); @@ -332,10 +332,10 @@ public void testLotsOfRegionReplicas() throws IOException { int regionReplication = 10; String tableName = htd.getTableName().getNameAsString() + "2"; htd = HTU - .createModifyableTableDescriptor(TableName.valueOf(tableName), - ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, - ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED) - .setRegionReplication(regionReplication).build(); + .createModifyableTableDescriptor(TableName.valueOf(tableName), + ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, + ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED) + .setRegionReplication(regionReplication).build(); // dont care about splits themselves too much byte[] startKey = Bytes.toBytes("aaa"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaReplicationError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaReplicationError.java index 6f8fac8d6380..3b9baa2a63cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaReplicationError.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaReplicationError.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ public class TestRegionReplicaReplicationError { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionReplicaReplicationError.class); + HBaseClassTestRule.forClass(TestRegionReplicaReplicationError.class); public static final class ErrorReplayRSRpcServices extends RSRpcServices { @@ -98,7 +98,7 @@ public ReplicateWALEntryResponse replicateToReplica(RpcController controller, } public static final class RSForTest - extends SingleProcessHBaseCluster.MiniHBaseClusterRegionServer { + extends SingleProcessHBaseCluster.MiniHBaseClusterRegionServer { public RSForTest(Configuration conf) throws IOException, InterruptedException { super(conf); @@ -125,7 +125,7 @@ public static void setUp() throws Exception { HTU.startMiniCluster( StartTestingClusterOption.builder().rsClass(RSForTest.class).numRegionServers(3).build()); TableDescriptor td = TableDescriptorBuilder.newBuilder(TN).setRegionReplication(3) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build(); HTU.getAdmin().createTable(td); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaWaitForPrimaryFlushConf.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaWaitForPrimaryFlushConf.java index efbd73aed43b..6646407b13a3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaWaitForPrimaryFlushConf.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaWaitForPrimaryFlushConf.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index be3f901a2a60..0cae00a2e341 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -68,10 +68,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; /** - * Tests for region replicas. Sad that we cannot isolate these without bringing up a whole - * cluster. See {@link TestRegionServerNoMaster}. + * Tests for region replicas. Sad that we cannot isolate these without bringing up a whole cluster. + * See {@link TestRegionServerNoMaster}. */ -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestRegionReplicas { @ClassRule @@ -130,7 +130,7 @@ private HRegionServer getRS() { public void testOpenRegionReplica() throws Exception { openRegion(HTU, getRS(), hriSecondary); try { - //load some data to primary + // load some data to primary HTU.loadNumericRows(table, f, 0, 1000); // assert that we can read back from primary @@ -148,8 +148,8 @@ public void testRegionReplicaUpdatesMetaLocation() throws Exception { Table meta = null; try { meta = HTU.getConnection().getTable(TableName.META_TABLE_NAME); - TestMetaTableAccessor.assertMetaLocation(meta, hriPrimary.getRegionName() - , getRS().getServerName(), -1, 1, false); + TestMetaTableAccessor.assertMetaLocation(meta, hriPrimary.getRegionName(), + getRS().getServerName(), -1, 1, false); } finally { if (meta != null) { meta.close(); @@ -161,7 +161,7 @@ public void testRegionReplicaUpdatesMetaLocation() throws Exception { @Test public void testRegionReplicaGets() throws Exception { try { - //load some data to primary + // load some data to primary HTU.loadNumericRows(table, f, 0, 1000); // assert that we can read back from primary Assert.assertEquals(1000, HBaseTestingUtil.countRows(table)); @@ -185,7 +185,7 @@ public void testRegionReplicaGets() throws Exception { @Test public void testGetOnTargetRegionReplica() throws Exception { try { - //load some data to primary + // load some data to primary HTU.loadNumericRows(table, f, 0, 1000); // assert that we can read back from primary Assert.assertEquals(1000, HBaseTestingUtil.countRows(table)); @@ -225,7 +225,7 @@ private void assertGetRpc(RegionInfo info, int value, boolean expect) byte[] row = Bytes.toBytes(String.valueOf(value)); Get get = new Get(row); ClientProtos.GetRequest getReq = RequestConverter.buildGetRequest(info.getRegionName(), get); - ClientProtos.GetResponse getResp = getRS().getRSRpcServices().get(null, getReq); + ClientProtos.GetResponse getResp = getRS().getRSRpcServices().get(null, getReq); Result result = ProtobufUtil.toResult(getResp.getResult()); if (expect) { Assert.assertArrayEquals(row, result.getValue(f, null)); @@ -253,7 +253,7 @@ public void testRefresStoreFiles() throws Exception { LOG.info("Opening the secondary region " + hriSecondary.getEncodedName()); openRegion(HTU, getRS(), hriSecondary); - //load some data to primary + // load some data to primary LOG.info("Loading data to primary region"); HTU.loadNumericRows(table, f, 0, 1000); // assert that we can read back from primary @@ -275,7 +275,7 @@ public void testRefresStoreFiles() throws Exception { assertGetRpc(hriSecondary, 42, true); assertGetRpc(hriSecondary, 1042, false); - //load some data to primary + // load some data to primary HTU.loadNumericRows(table, f, 1000, 1100); region = getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); region.flush(true); @@ -331,7 +331,7 @@ public void testFlushAndCompactionsInPrimary() throws Exception { try { openRegion(HTU, getRS(), hriSecondary); - //load some data to primary so that reader won't fail + // load some data to primary so that reader won't fail HTU.loadNumericRows(table, f, startKey, endKey); TestRegionServerNoMaster.flushRegion(HTU, hriPrimary); // ensure that chore is run @@ -340,12 +340,13 @@ public void testFlushAndCompactionsInPrimary() throws Exception { final AtomicBoolean running = new AtomicBoolean(true); @SuppressWarnings("unchecked") final AtomicReference[] exceptions = new AtomicReference[3]; - for (int i=0; i < exceptions.length; i++) { + for (int i = 0; i < exceptions.length; i++) { exceptions[i] = new AtomicReference<>(); } Runnable writer = new Runnable() { int key = startKey; + @Override public void run() { try { @@ -368,6 +369,7 @@ public void run() { Runnable flusherCompactor = new Runnable() { Random random = ThreadLocalRandom.current(); + public void run() { try { while (running.get()) { @@ -396,15 +398,15 @@ public void run() { try { closeRegion(HTU, getRS(), hriSecondary); } catch (Exception ex) { - LOG.warn("Failed closing the region " + hriSecondary + " " + - StringUtils.stringifyException(ex)); + LOG.warn("Failed closing the region " + hriSecondary + " " + + StringUtils.stringifyException(ex)); exceptions[2].compareAndSet(null, ex); } try { openRegion(HTU, getRS(), hriSecondary); } catch (Exception ex) { - LOG.warn("Failed opening the region " + hriSecondary + " " + - StringUtils.stringifyException(ex)); + LOG.warn("Failed opening the region " + hriSecondary + " " + + StringUtils.stringifyException(ex)); exceptions[2].compareAndSet(null, ex); } } @@ -413,8 +415,8 @@ public void run() { assertGetRpc(hriSecondary, key, true); } } catch (Exception ex) { - LOG.warn("Failed getting the value in the region " + hriSecondary + " " + - StringUtils.stringifyException(ex)); + LOG.warn("Failed getting the value in the region " + hriSecondary + " " + + StringUtils.stringifyException(ex)); exceptions[2].compareAndSet(null, ex); } } @@ -475,8 +477,8 @@ public void testVerifySecondaryAbilityToReadWithOnFiles() throws Exception { LOG.info("Force Major compaction on primary region " + hriPrimary); primaryRegion.compact(true); Assert.assertEquals(1, primaryRegion.getStore(f).getStorefilesCount()); - List regionServerThreads = HTU.getMiniHBaseCluster() - .getRegionServerThreads(); + List regionServerThreads = + HTU.getMiniHBaseCluster().getRegionServerThreads(); HRegionServer hrs = null; for (RegionServerThread rs : regionServerThreads) { if (rs.getRegionServer() @@ -485,8 +487,7 @@ public void testVerifySecondaryAbilityToReadWithOnFiles() throws Exception { break; } } - CompactedHFilesDischarger cleaner = - new CompactedHFilesDischarger(100, null, hrs, false); + CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(100, null, hrs, false); cleaner.chore(); // scan all the hfiles on the secondary. // since there are no read on the secondary when we ask locations to @@ -505,8 +506,8 @@ public void testVerifySecondaryAbilityToReadWithOnFiles() throws Exception { keys++; Cell cell = scanner.getCell(); - sum += Integer.parseInt(Bytes.toString(cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength())); + sum += Integer.parseInt( + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); } while (scanner.next()); } Assert.assertEquals(3000, keys); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasAreDistributed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasAreDistributed.java index e98b78f1bb49..67a81280b8da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasAreDistributed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasAreDistributed.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -80,7 +80,7 @@ public static void before() throws Exception { private static void createTableDirectlyFromHTD(final TableName tableName) throws IOException { TableDescriptor htd = - TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(3).build(); + TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(3).build(); // create a table with 3 replication table = HTU.createTable(htd, new byte[][] { f }, getSplits(20), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java index bd0bbd77c814..c8db66d1b8fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ public class TestRegionReplicasWithModifyTable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionReplicasWithModifyTable.class); + HBaseClassTestRule.forClass(TestRegionReplicasWithModifyTable.class); private static final int NB_SERVERS = 3; @@ -76,7 +76,7 @@ public static void before() throws Exception { } private void enableReplicationByModification(boolean withReplica, int initialReplicaCount, - int enableReplicaCount, int splitCount) throws IOException, InterruptedException { + int enableReplicaCount, int splitCount) throws IOException, InterruptedException { TableName tableName = name.getTableName(); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); if (withReplica) { @@ -152,13 +152,13 @@ public void testRegionReplicasByEnableTableWhenReplicaCountIsDecreased() throws @Test public void testRegionReplicasByEnableTableWhenReplicaCountIsDecreasedWithMultipleRegions() - throws Exception { + throws Exception { enableReplicationByModification(true, 3, 2, 20); } @Test public void testRegionReplicasByEnableTableWhenReplicaCountIsIncreasedWithmultipleRegions() - throws Exception { + throws Exception { enableReplicationByModification(true, 2, 3, 15); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java index a0104baf4892..86074aed6339 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,10 +17,11 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.Assert.*; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -47,9 +48,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.junit.Assert.*; - -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestRegionReplicasWithRestartScenarios { @ClassRule @@ -59,7 +58,8 @@ public class TestRegionReplicasWithRestartScenarios { private static final Logger LOG = LoggerFactory.getLogger(TestRegionReplicasWithRestartScenarios.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final int NB_SERVERS = 3; private Table table; @@ -132,7 +132,7 @@ public void testWhenRestart() throws Exception { ServerName serverName = stopRegionServer.getServerName(); // Make a copy because this is actual instance from HRegionServer Collection regionsOnStoppedServer = - new ArrayList(stopRegionServer.getOnlineRegionsLocalContext()); + new ArrayList(stopRegionServer.getOnlineRegionsLocalContext()); HTU.getHBaseCluster().stopRegionServer(serverName); HTU.getHBaseCluster().waitForRegionServerToStop(serverName, 60000); HTU.waitTableAvailable(this.tableName); @@ -153,8 +153,8 @@ private void assertReplicaDistributed(Collection onlineRegions) throws Collection onlineRegions3 = getTertiaryRS().getOnlineRegionsLocalContext(); checkDuplicates(onlineRegions3); assertFalse(res); - int totalRegions = HTU.getMiniHBaseCluster().getLiveRegionServerThreads().stream(). - mapToInt(l -> l.getRegionServer().getOnlineRegions().size()).sum(); + int totalRegions = HTU.getMiniHBaseCluster().getLiveRegionServerThreads().stream() + .mapToInt(l -> l.getRegionServer().getOnlineRegions().size()).sum(); assertEquals(61, totalRegions); } @@ -171,7 +171,7 @@ private boolean checkDuplicates(Collection onlineRegions3) throws Excep i++; if (i > 1) { LOG.warn("Duplicate found {} and {}", actualRegion.getRegionInfo(), - region.getRegionInfo()); + region.getRegionInfo()); assertTrue(Bytes.equals(region.getRegionInfo().getStartKey(), actualRegion.getRegionInfo().getStartKey())); assertTrue(Bytes.equals(region.getRegionInfo().getEndKey(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java index 0dd16de9641b..97254f0e171c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,7 +74,7 @@ /** * Tests around regionserver shutdown and abort */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestRegionServerAbort { @ClassRule @@ -95,9 +95,9 @@ public void setup() throws Exception { testUtil = new HBaseTestingUtil(); conf = testUtil.getConfiguration(); conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, - StopBlockingRegionObserver.class.getName()); + StopBlockingRegionObserver.class.getName()); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - StopBlockingRegionObserver.class.getName()); + StopBlockingRegionObserver.class.getName()); // make sure we have multiple blocks so that the client does not prefetch all block locations conf.set("dfs.blocksize", Long.toString(100 * 1024)); // prefetch the first block @@ -107,7 +107,7 @@ public void setup() throws Exception { testUtil.startMiniZKCluster(); dfsCluster = testUtil.startMiniDFSCluster(2); StartTestingClusterOption option = - StartTestingClusterOption.builder().numRegionServers(2).build(); + StartTestingClusterOption.builder().numRegionServers(2).build(); cluster = testUtil.startMiniHBaseCluster(option); } @@ -117,15 +117,16 @@ public void tearDown() throws Exception { for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) { HRegionServer rs = t.getRegionServer(); RegionServerCoprocessorHost cpHost = rs.getRegionServerCoprocessorHost(); - StopBlockingRegionObserver cp = (StopBlockingRegionObserver)cpHost.findCoprocessor(className); + StopBlockingRegionObserver cp = + (StopBlockingRegionObserver) cpHost.findCoprocessor(className); cp.setStopAllowed(true); } testUtil.shutdownMiniCluster(); } /** - * Test that a regionserver is able to abort properly, even when a coprocessor - * throws an exception in preStopRegionServer(). + * Test that a regionserver is able to abort properly, even when a coprocessor throws an exception + * in preStopRegionServer(). */ @Test public void testAbortFromRPC() throws Exception { @@ -141,9 +142,9 @@ public void testAbortFromRPC() throws Exception { LOG.info("Flushed table"); // Send a poisoned put to trigger the abort - Put put = new Put(new byte[]{0, 0, 0, 0}); - put.addColumn(FAMILY_BYTES, Bytes.toBytes("c"), new byte[]{}); - put.setAttribute(StopBlockingRegionObserver.DO_ABORT, new byte[]{1}); + Put put = new Put(new byte[] { 0, 0, 0, 0 }); + put.addColumn(FAMILY_BYTES, Bytes.toBytes("c"), new byte[] {}); + put.setAttribute(StopBlockingRegionObserver.DO_ABORT, new byte[] { 1 }); List regions = cluster.findRegionsForTable(tableName); HRegion firstRegion = cluster.findRegionsForTable(tableName).get(0); @@ -182,8 +183,8 @@ public void testMultiAbort() { HRegionServer rs = t.getRegionServer(); assertFalse(rs.isAborted()); RegionServerCoprocessorHost cpHost = rs.getRegionServerCoprocessorHost(); - StopBlockingRegionObserver cp = (StopBlockingRegionObserver)cpHost.findCoprocessor( - StopBlockingRegionObserver.class.getName()); + StopBlockingRegionObserver cp = (StopBlockingRegionObserver) cpHost + .findCoprocessor(StopBlockingRegionObserver.class.getName()); // Enable clean abort. cp.setStopAllowed(true); // Issue two aborts in quick succession. @@ -221,13 +222,13 @@ public Optional getRegionServerObserver() { @Override public void prePut(ObserverContext c, Put put, WALEdit edit, - Durability durability) throws IOException { + Durability durability) throws IOException { if (put.getAttribute(DO_ABORT) != null) { // TODO: Change this so it throws a CP Abort Exception instead. RegionServerServices rss = - ((HasRegionServerServices)c.getEnvironment()).getRegionServerServices(); + ((HasRegionServerServices) c.getEnvironment()).getRegionServerServices(); String str = "Aborting for test"; - LOG.info(str + " " + rss.getServerName()); + LOG.info(str + " " + rss.getServerName()); rss.abort(str, new Throwable(str)); } } @@ -255,13 +256,12 @@ public void setStopAllowed(boolean allowed) { */ public static class ErrorThrowingHRegion extends HRegion { public ErrorThrowingHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration confParam, - RegionInfo regionInfo, TableDescriptor htd, - RegionServerServices rsServices) { + RegionInfo regionInfo, TableDescriptor htd, RegionServerServices rsServices) { super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices); } public ErrorThrowingHRegion(HRegionFileSystem fs, WAL wal, Configuration confParam, - TableDescriptor htd, RegionServerServices rsServices) { + TableDescriptor htd, RegionServerServices rsServices) { super(fs, wal, confParam, htd, rsServices); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java index 2448098179f6..1f1c7814cf6c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.util.Optional; import java.util.TimerTask; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -81,11 +80,11 @@ public static void setUp() throws Exception { conf.setLong(HRegionServer.ABORT_TIMEOUT, SLEEP_TIME_WHEN_CLOSE_REGION); conf.set(HRegionServer.ABORT_TIMEOUT_TASK, TestAbortTimeoutTask.class.getName()); StartTestingClusterOption option = - StartTestingClusterOption.builder().numRegionServers(2).build(); + StartTestingClusterOption.builder().numRegionServers(2).build(); UTIL.startMiniCluster(option); TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setCoprocessor(SleepWhenCloseCoprocessor.class.getName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF).build()).build(); + .setCoprocessor(SleepWhenCloseCoprocessor.class.getName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF).build()).build(); UTIL.getAdmin().createTable(td, Bytes.toBytes("0"), Bytes.toBytes("9"), REGIONS_NUM); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java index bf4bca01ca2e..52f58de95cb0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java index 18c1ef886843..31302058b6a3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public class TestRegionServerCrashDisableWAL { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionServerCrashDisableWAL.class); + HBaseClassTestRule.forClass(TestRegionServerCrashDisableWAL.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -99,7 +99,7 @@ public void test() throws InterruptedException, IOException { // make sure that we can schedule a SCP for the crashed server which WAL is disabled and bring // the region online. try (Table table = - UTIL.getConnection().getTableBuilder(TABLE_NAME, null).setOperationTimeout(30000).build()) { + UTIL.getConnection().getTableBuilder(TABLE_NAME, null).setOperationTimeout(30000).build()) { table.put(new Put(Bytes.toBytes(1)).addColumn(CF, CQ, Bytes.toBytes(1))); assertEquals(1, Bytes.toInt(table.get(new Get(Bytes.toBytes(1))).getValue(CF, CQ))); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java index 8ea72f760045..bf0cd2ed0b31 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ /** * Tests for the hostname specification by region server */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestRegionServerHostname { @ClassRule @@ -81,9 +81,8 @@ public void testInvalidRegionServerHostnameAbortsServer() throws Exception { try { hrs = new HRegionServer(TEST_UTIL.getConfiguration()); } catch (IllegalArgumentException iae) { - assertTrue(iae.getMessage(), - iae.getMessage().contains("Failed resolve of " + invalidHostname) || - iae.getMessage().contains("Problem binding to " + invalidHostname)); + assertTrue(iae.getMessage(), iae.getMessage().contains("Failed resolve of " + invalidHostname) + || iae.getMessage().contains("Problem binding to " + invalidHostname)); } assertNull("Failed to validate against invalid hostname", hrs); } @@ -97,8 +96,8 @@ public void testRegionServerHostname() throws Exception { // iterate through host addresses and use each as hostname while (addrList.hasMoreElements()) { InetAddress addr = addrList.nextElement(); - if (addr.isLoopbackAddress() || addr.isLinkLocalAddress() || addr.isMulticastAddress() || - !addr.isSiteLocalAddress()) { + if (addr.isLoopbackAddress() || addr.isLinkLocalAddress() || addr.isMulticastAddress() + || !addr.isSiteLocalAddress()) { continue; } String hostName = addr.getHostName(); @@ -115,7 +114,7 @@ public void testRegionServerHostname() throws Exception { assertEquals(NUM_RS, servers.size()); for (String server : servers) { assertTrue("From zookeeper: " + server + " hostname: " + hostName, - server.startsWith(hostName.toLowerCase(Locale.ROOT)+",")); + server.startsWith(hostName.toLowerCase(Locale.ROOT) + ",")); } zkw.close(); } finally { @@ -130,9 +129,11 @@ public void testDeprecatedConfigs() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); new HRegionServer(conf); conf.setBoolean(HRegionServer.RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false); - assertFalse(conf.getBoolean(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, true)); + assertFalse( + conf.getBoolean(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, true)); conf.setBoolean(HRegionServer.RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, true); - assertTrue(conf.getBoolean(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)); + assertTrue( + conf.getBoolean(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)); conf.setBoolean(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, true); assertTrue(conf.getBoolean(HRegionServer.RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)); conf.setBoolean(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false); @@ -164,10 +165,12 @@ public void testConflictRegionServerHostnameConfigurationsAbortServer() throws E LOG.info("Found " + hostName + " on " + ni); TEST_UTIL.getConfiguration().set(DNS.MASTER_HOSTNAME_KEY, hostName); - // "hbase.unsafe.regionserver.hostname" and "hbase.unsafe.regionserver.hostname.disable.master.reversedns" + // "hbase.unsafe.regionserver.hostname" and + // "hbase.unsafe.regionserver.hostname.disable.master.reversedns" // are mutually exclusive. Exception should be thrown if both are used. TEST_UTIL.getConfiguration().set(DNS.UNSAFE_RS_HOSTNAME_KEY, hostName); - TEST_UTIL.getConfiguration().setBoolean(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, true); + TEST_UTIL.getConfiguration() + .setBoolean(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, true); try { StartTestingClusterOption option = StartTestingClusterOption.builder() .numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build(); @@ -175,9 +178,9 @@ public void testConflictRegionServerHostnameConfigurationsAbortServer() throws E } catch (Exception e) { Throwable t1 = e.getCause(); Throwable t2 = t1.getCause(); - assertTrue(t1.getMessage()+" - "+t2.getMessage(), t2.getMessage().contains( - HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " and " + - DNS.UNSAFE_RS_HOSTNAME_KEY + " are mutually exclusive")); + assertTrue(t1.getMessage() + " - " + t2.getMessage(), + t2.getMessage().contains(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + + " and " + DNS.UNSAFE_RS_HOSTNAME_KEY + " are mutually exclusive")); return; } finally { TEST_UTIL.shutdownMiniCluster(); @@ -189,10 +192,10 @@ public void testConflictRegionServerHostnameConfigurationsAbortServer() throws E @Test public void testRegionServerHostnameReportedToMaster() throws Exception { - TEST_UTIL.getConfiguration().setBoolean(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, - true); - StartTestingClusterOption option = StartTestingClusterOption.builder() - .numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build(); + TEST_UTIL.getConfiguration() + .setBoolean(HRegionServer.UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, true); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS) + .numRegionServers(NUM_RS).numDataNodes(NUM_RS).build(); TEST_UTIL.startMiniCluster(option); int expectedRS = NUM_RS; try (ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index d0657a2f125f..32df56764405 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -75,7 +75,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestRegionServerMetrics { @ClassRule @@ -114,7 +114,7 @@ public static void startCluster() throws Exception { // testMobMetrics creates few hfiles and manages compaction manually. conf.setInt("hbase.hstore.compactionThreshold", 100); conf.setInt("hbase.hstore.compaction.max", 100); - conf.setInt("hbase.regionserver.periodicmemstoreflusher.rangeofdelayseconds", 4*60); + conf.setInt("hbase.regionserver.periodicmemstoreflusher.rangeofdelayseconds", 4 * 60); conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1); TEST_UTIL.startMiniCluster(); @@ -123,9 +123,8 @@ public static void startCluster() throws Exception { admin = TEST_UTIL.getAdmin(); connection = TEST_UTIL.getConnection(); - while (cluster.getLiveRegionServerThreads().isEmpty() && - cluster.getRegionServer(0) == null && - rs.getMetrics() == null) { + while (cluster.getLiveRegionServerThreads().isEmpty() && cluster.getRegionServer(0) == null + && rs.getMetrics() == null) { Threads.sleep(100); } rs = cluster.getRegionServer(0); @@ -167,14 +166,12 @@ private void assertGauge(String metric, long expectedValue) { // Aggregates metrics from regions and assert given list of metrics and expected values. private void assertRegionMetrics(String metric, long expectedValue) throws Exception { try (RegionLocator locator = connection.getRegionLocator(tableName)) { - for ( HRegionLocation location: locator.getAllRegionLocations()) { + for (HRegionLocation location : locator.getAllRegionLocations()) { RegionInfo hri = location.getRegion(); MetricsRegionAggregateSource agg = rs.getRegion(hri.getRegionName()).getMetrics().getSource().getAggregateSource(); - String prefix = "namespace_" + NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + - "_table_" + tableName.getNameAsString() + - "_region_" + hri.getEncodedName()+ - "_metric_"; + String prefix = "namespace_" + NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + "_table_" + + tableName.getNameAsString() + "_region_" + hri.getEncodedName() + "_metric_"; metricsHelper.assertCounter(prefix + metric, expectedValue, agg); } } @@ -220,8 +217,8 @@ private void doScan(int n, boolean caching) throws IOException { ResultScanner scanner = table.getScanner(scan); for (int i = 0; i < n; i++) { Result res = scanner.next(); - LOG.debug("Result row: " + Bytes.toString(res.getRow()) + ", value: " + - Bytes.toString(res.getValue(cf, qualifier))); + LOG.debug("Result row: " + Bytes.toString(res.getRow()) + ", value: " + + Bytes.toString(res.getValue(cf, qualifier))); } } @@ -266,11 +263,10 @@ public void testRequestCount() throws Exception { assertRegionMetrics("getCount", 10); assertRegionMetrics("putCount", 31); - doNGets(10, true); // true = batch + doNGets(10, true); // true = batch metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("writeRequestCount", writeRequests + 30); doNPuts(30, true); @@ -300,8 +296,7 @@ public void testGet() throws Exception { @Test public void testMutationsWithoutWal() throws Exception { - Put p = new Put(row).addColumn(cf, qualifier, val) - .setDurability(Durability.SKIP_WAL); + Put p = new Put(row).addColumn(cf, qualifier, val).setDurability(Durability.SKIP_WAL); table.put(p); metricsRegionServer.getRegionServerWrapper().forceRecompute(); @@ -323,7 +318,7 @@ public void testStoreCount() throws Exception { @Test public void testStoreFileAge() throws Exception { - //Force a hfile. + // Force a hfile. doNPuts(1, false); TEST_UTIL.getAdmin().flush(tableName); @@ -361,7 +356,7 @@ public void testIncrement() throws Exception { Put p = new Put(row).addColumn(cf, qualifier, Bytes.toBytes(0L)); table.put(p); - for(int count = 0; count < 13; count++) { + for (int count = 0; count < 13; count++) { Increment inc = new Increment(row); inc.addColumn(cf, qualifier, 100); table.increment(inc); @@ -375,7 +370,7 @@ public void testIncrement() throws Exception { public void testAppend() throws Exception { doNPuts(1, false); - for(int count = 0; count< 73; count++) { + for (int count = 0; count < 73; count++) { Append append = new Append(row); append.addColumn(cf, qualifier, Bytes.toBytes(",Test")); table.append(append); @@ -437,10 +432,9 @@ public void testScanSizeForSmallScan() throws Exception { public void testMobMetrics() throws IOException, InterruptedException { TableName tableName = TableName.valueOf("testMobMetricsLocal"); int numHfiles = 5; - TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(cf).setMobEnabled(true).setMobThreshold(0).build()) - .build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(cf).setMobEnabled(true).setMobThreshold(0).build()) + .build(); byte[] val = Bytes.toBytes("mobdata"); try { Table table = TEST_UTIL.createTable(htd, new byte[0][0], conf); @@ -456,7 +450,7 @@ public void testMobMetrics() throws IOException, InterruptedException { Scan scan = new Scan().withStartRow(Bytes.toBytes(0)).withStopRow(Bytes.toBytes(numHfiles)); ResultScanner scanner = table.getScanner(scan); scanner.next(100); - numScanNext++; // this is an ugly construct + numScanNext++; // this is an ugly construct scanner.close(); metricsRegionServer.getRegionServerWrapper().forceRecompute(); assertCounter("mobScanCellsCount", numHfiles); @@ -466,17 +460,16 @@ public void testMobMetrics() throws IOException, InterruptedException { region.initialize(); // This is how we MOB compact region List stores = region.getStores(); - for (HStore store: stores) { + for (HStore store : stores) { // Force major compaction store.triggerMajorCompaction(); - Optional context = - store.requestCompaction(HStore.PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, - User.getCurrent()); + Optional context = store.requestCompaction(HStore.PRIORITY_USER, + CompactionLifeCycleTracker.DUMMY, User.getCurrent()); if (!context.isPresent()) { continue; } - region.compact(context.get(), store, - NoLimitThroughputController.INSTANCE, User.getCurrent()); + region.compact(context.get(), store, NoLimitThroughputController.INSTANCE, + User.getCurrent()); } metricsRegionServer.getRegionServerWrapper().forceRecompute(); assertCounter("cellsCountCompactedFromMob", numHfiles); @@ -484,7 +477,7 @@ public void testMobMetrics() throws IOException, InterruptedException { scanner = table.getScanner(scan); scanner.next(100); - numScanNext++; // this is an ugly construct + numScanNext++; // this is an ugly construct metricsRegionServer.getRegionServerWrapper().forceRecompute(); assertCounter("mobScanCellsCount", 0); @@ -514,15 +507,11 @@ public void testMobMetrics() throws IOException, InterruptedException { private static Region setMobThreshold(Region region, byte[] cfName, long modThreshold) { ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder - .newBuilder(region.getTableDescriptor().getColumnFamily(cfName)) - .setMobThreshold(modThreshold) - .build(); - TableDescriptor td = TableDescriptorBuilder - .newBuilder(region.getTableDescriptor()) - .removeColumnFamily(cfName) - .setColumnFamily(cfd) - .build(); - ((HRegion)region).setTableDescriptor(td); + .newBuilder(region.getTableDescriptor().getColumnFamily(cfName)) + .setMobThreshold(modThreshold).build(); + TableDescriptor td = TableDescriptorBuilder.newBuilder(region.getTableDescriptor()) + .removeColumnFamily(cfName).setColumnFamily(cfd).build(); + ((HRegion) region).setTableDescriptor(td); return region; } @@ -568,8 +557,8 @@ public void testRangeCountMetrics() throws Exception { } prior = timeranges[i]; } - dynamicMetricName = - timeRangeMetricName + "_" + timeRangeType + "_" + timeranges[timeranges.length - 1] + "-inf"; + dynamicMetricName = timeRangeMetricName + "_" + timeRangeType + "_" + + timeranges[timeranges.length - 1] + "-inf"; if (metricsHelper.checkCounterExists(dynamicMetricName, serverSource)) { long count = metricsHelper.getCounter(dynamicMetricName, serverSource); if (count > 0) { @@ -581,7 +570,7 @@ public void testRangeCountMetrics() throws Exception { @Test public void testAverageRegionSize() throws Exception { - //Force a hfile. + // Force a hfile. doNPuts(1, false); TEST_UTIL.getAdmin().flush(tableName); @@ -598,12 +587,12 @@ public void testReadBytes() throws Exception { metricsRegionServer.getRegionServerWrapper().forceRecompute(); assertTrue("Total read bytes should be larger than 0", - metricsRegionServer.getRegionServerWrapper().getTotalBytesRead() > 0); + metricsRegionServer.getRegionServerWrapper().getTotalBytesRead() > 0); assertTrue("Total local read bytes should be larger than 0", - metricsRegionServer.getRegionServerWrapper().getLocalBytesRead() > 0); + metricsRegionServer.getRegionServerWrapper().getLocalBytesRead() > 0); assertEquals("Total short circuit read bytes should be equal to 0", 0, - metricsRegionServer.getRegionServerWrapper().getShortCircuitBytesRead()); + metricsRegionServer.getRegionServerWrapper().getShortCircuitBytesRead()); assertEquals("Total zero-byte read bytes should be equal to 0", 0, - metricsRegionServer.getRegionServerWrapper().getZeroCopyBytesRead()); + metricsRegionServer.getRegionServerWrapper().getZeroCopyBytesRead()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java index f97aa49f9a24..0da1cd332cf4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ /** * Tests on the region server, without the master. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestRegionServerNoMaster { @ClassRule @@ -71,14 +71,13 @@ public class TestRegionServerNoMaster { private static byte[] regionName; private static final HBaseTestingUtil HTU = new HBaseTestingUtil(); - @BeforeClass public static void before() throws Exception { HTU.startMiniCluster(NB_SERVERS); final TableName tableName = TableName.valueOf(TestRegionServerNoMaster.class.getSimpleName()); // Create table then get the single region for our new table. - table = HTU.createTable(tableName,HConstants.CATALOG_FAMILY); + table = HTU.createTable(tableName, HConstants.CATALOG_FAMILY); Put p = new Put(row); p.addColumn(HConstants.CATALOG_FAMILY, row, row); table.put(p); @@ -92,11 +91,11 @@ public static void before() throws Exception { } public static void stopMasterAndCacheMetaLocation(HBaseTestingUtil HTU) - throws IOException, InterruptedException { + throws IOException, InterruptedException { // cache meta location, so we will not go to master to lookup meta region location for (JVMClusterUtil.RegionServerThread t : HTU.getMiniHBaseCluster().getRegionServerThreads()) { try (RegionLocator locator = - t.getRegionServer().getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { + t.getRegionServer().getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { locator.getAllRegionLocations(); } } @@ -116,9 +115,10 @@ public static void stopMasterAndCacheMetaLocation(HBaseTestingUtil HTU) HRegionServer.TEST_SKIP_REPORTING_TRANSITION = true; } - /** Flush the given region in the mini cluster. Since no master, we cannot use HBaseAdmin.flush() */ - public static void flushRegion(HBaseTestingUtil HTU, RegionInfo regionInfo) - throws IOException { + /** + * Flush the given region in the mini cluster. Since no master, we cannot use HBaseAdmin.flush() + */ + public static void flushRegion(HBaseTestingUtil HTU, RegionInfo regionInfo) throws IOException { for (RegionServerThread rst : HTU.getMiniHBaseCluster().getRegionServerThreads()) { HRegion region = rst.getRegionServer().getRegionByEncodedName(regionInfo.getEncodedName()); if (region != null) { @@ -142,23 +142,21 @@ private static HRegionServer getRS() { return HTU.getHBaseCluster().getLiveRegionServerThreads().get(0).getRegionServer(); } - public static void openRegion(HBaseTestingUtil HTU, HRegionServer rs, RegionInfo hri) throws Exception { AdminProtos.OpenRegionRequest orr = - RequestConverter.buildOpenRegionRequest(rs.getServerName(), hri, null); + RequestConverter.buildOpenRegionRequest(rs.getServerName(), hri, null); AdminProtos.OpenRegionResponse responseOpen = rs.getRpcServices().openRegion(null, orr); Assert.assertTrue(responseOpen.getOpeningStateCount() == 1); - Assert.assertTrue(responseOpen.getOpeningState(0). - equals(AdminProtos.OpenRegionResponse.RegionOpeningState.OPENED)); - + Assert.assertTrue(responseOpen.getOpeningState(0) + .equals(AdminProtos.OpenRegionResponse.RegionOpeningState.OPENED)); checkRegionIsOpened(HTU, rs, hri); } - public static void checkRegionIsOpened(HBaseTestingUtil HTU, HRegionServer rs, - RegionInfo hri) throws Exception { + public static void checkRegionIsOpened(HBaseTestingUtil HTU, HRegionServer rs, RegionInfo hri) + throws Exception { while (!rs.getRegionsInTransitionInRS().isEmpty()) { Thread.sleep(1); } @@ -168,15 +166,15 @@ public static void checkRegionIsOpened(HBaseTestingUtil HTU, HRegionServer rs, public static void closeRegion(HBaseTestingUtil HTU, HRegionServer rs, RegionInfo hri) throws Exception { - AdminProtos.CloseRegionRequest crr = ProtobufUtil.buildCloseRegionRequest( - rs.getServerName(), hri.getRegionName()); + AdminProtos.CloseRegionRequest crr = + ProtobufUtil.buildCloseRegionRequest(rs.getServerName(), hri.getRegionName()); AdminProtos.CloseRegionResponse responseClose = rs.getRpcServices().closeRegion(null, crr); Assert.assertTrue(responseClose.getClosed()); checkRegionIsClosed(HTU, rs, hri); } - public static void checkRegionIsClosed(HBaseTestingUtil HTU, HRegionServer rs, - RegionInfo hri) throws Exception { + public static void checkRegionIsClosed(HBaseTestingUtil HTU, HRegionServer rs, RegionInfo hri) + throws Exception { while (!rs.getRegionsInTransitionInRS().isEmpty()) { Thread.sleep(1); } @@ -202,7 +200,6 @@ private void closeRegionNoZK() throws Exception { checkRegionIsClosed(HTU, getRS(), hri); } - @Test public void testCloseByRegionServer() throws Exception { closeRegionNoZK(); @@ -213,10 +210,10 @@ public void testCloseByRegionServer() throws Exception { public void testMultipleCloseFromMaster() throws Exception { for (int i = 0; i < 10; i++) { AdminProtos.CloseRegionRequest crr = - ProtobufUtil.buildCloseRegionRequest(getRS().getServerName(), regionName, null); + ProtobufUtil.buildCloseRegionRequest(getRS().getServerName(), regionName, null); try { AdminProtos.CloseRegionResponse responseClose = - getRS().getRpcServices().closeRegion(null, crr); + getRS().getRpcServices().closeRegion(null, crr); Assert.assertTrue("request " + i + " failed", responseClose.getClosed() || responseClose.hasClosed()); } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException se) { @@ -251,8 +248,8 @@ public void testCancelOpeningWithoutZK() throws Exception { } // The state in RIT should have changed to close - Assert.assertEquals(Boolean.FALSE, getRS().getRegionsInTransitionInRS().get( - hri.getEncodedNameAsBytes())); + Assert.assertEquals(Boolean.FALSE, + getRS().getRegionsInTransitionInRS().get(hri.getEncodedNameAsBytes())); // Let's start the open handler TableDescriptor htd = getRS().getTableDescriptors().get(hri.getTable()); @@ -266,8 +263,8 @@ public void testCancelOpeningWithoutZK() throws Exception { } /** - * Tests an on-the-fly RPC that was scheduled for the earlier RS on the same port - * for openRegion. The region server should reject this RPC. (HBASE-9721) + * Tests an on-the-fly RPC that was scheduled for the earlier RS on the same port for openRegion. + * The region server should reject this RPC. (HBASE-9721) */ @Test public void testOpenCloseRegionRPCIntendedForPreviousServer() throws Exception { @@ -277,24 +274,27 @@ public void testOpenCloseRegionRPCIntendedForPreviousServer() throws Exception { ServerName earlierServerName = ServerName.valueOf(sn.getHostname(), sn.getPort(), 1); try { - CloseRegionRequest request = ProtobufUtil.buildCloseRegionRequest(earlierServerName, regionName); + CloseRegionRequest request = + ProtobufUtil.buildCloseRegionRequest(earlierServerName, regionName); getRS().getRSRpcServices().closeRegion(null, request); Assert.fail("The closeRegion should have been rejected"); } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException se) { Assert.assertTrue(se.getCause() instanceof IOException); - Assert.assertTrue(se.getCause().getMessage().contains("This RPC was intended for a different server")); + Assert.assertTrue( + se.getCause().getMessage().contains("This RPC was intended for a different server")); } - //actual close + // actual close closeRegionNoZK(); try { - AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest( - earlierServerName, hri, null); + AdminProtos.OpenRegionRequest orr = + RequestConverter.buildOpenRegionRequest(earlierServerName, hri, null); getRS().getRSRpcServices().openRegion(null, orr); Assert.fail("The openRegion should have been rejected"); } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException se) { Assert.assertTrue(se.getCause() instanceof IOException); - Assert.assertTrue(se.getCause().getMessage().contains("This RPC was intended for a different server")); + Assert.assertTrue( + se.getCause().getMessage().contains("This RPC was intended for a different server")); } finally { openRegion(HTU, getRS(), hri); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java index 37b7f645db24..5f9bfdcbdbf9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,12 +54,11 @@ import org.slf4j.LoggerFactory; /** - * Verify that the Online config Changes on the HRegionServer side are actually - * happening. We should add tests for important configurations which will be - * changed online. + * Verify that the Online config Changes on the HRegionServer side are actually happening. We should + * add tests for important configurations which will be changed online. */ -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestRegionServerOnlineConfigChange { @ClassRule @@ -67,7 +66,7 @@ public class TestRegionServerOnlineConfigChange { HBaseClassTestRule.forClass(TestRegionServerOnlineConfigChange.class); private static final Logger LOG = - LoggerFactory.getLogger(TestRegionServerOnlineConfigChange.class.getName()); + LoggerFactory.getLogger(TestRegionServerOnlineConfigChange.class.getName()); private static final long WAIT_TIMEOUT = TimeUnit.MINUTES.toMillis(2); private static HBaseTestingUtil hbaseTestingUtility = new HBaseTestingUtil(); private static Configuration conf = null; @@ -84,7 +83,6 @@ public class TestRegionServerOnlineConfigChange { private final static byte[] COLUMN_FAMILY1 = Bytes.toBytes(columnFamily1Str); private final static long MAX_FILE_SIZE = 20 * 1024 * 1024L; - @BeforeClass public static void setUpBeforeClass() throws Exception { conf = hbaseTestingUtility.getConfiguration(); @@ -104,8 +102,8 @@ public void setUp() throws Exception { try (RegionLocator locator = hbaseTestingUtility.getConnection().getRegionLocator(TABLE1)) { RegionInfo firstHRI = locator.getAllRegionLocations().get(0).getRegion(); r1name = firstHRI.getRegionName(); - rs1 = hbaseTestingUtility.getHBaseCluster().getRegionServer( - hbaseTestingUtility.getHBaseCluster().getServerWith(r1name)); + rs1 = hbaseTestingUtility.getHBaseCluster() + .getRegionServer(hbaseTestingUtility.getHBaseCluster().getServerWith(r1name)); r1 = rs1.getRegion(r1name); hMaster = hbaseTestingUtility.getHBaseCluster().getMaster(); } @@ -117,27 +115,19 @@ public void setUp() throws Exception { @Test public void testNumCompactionThreadsOnlineChange() { assertNotNull(rs1.getCompactSplitThread()); - int newNumSmallThreads = - rs1.getCompactSplitThread().getSmallCompactionThreadNum() + 1; - int newNumLargeThreads = - rs1.getCompactSplitThread().getLargeCompactionThreadNum() + 1; - - conf.setInt("hbase.regionserver.thread.compaction.small", - newNumSmallThreads); - conf.setInt("hbase.regionserver.thread.compaction.large", - newNumLargeThreads); + int newNumSmallThreads = rs1.getCompactSplitThread().getSmallCompactionThreadNum() + 1; + int newNumLargeThreads = rs1.getCompactSplitThread().getLargeCompactionThreadNum() + 1; + + conf.setInt("hbase.regionserver.thread.compaction.small", newNumSmallThreads); + conf.setInt("hbase.regionserver.thread.compaction.large", newNumLargeThreads); rs1.getConfigurationManager().notifyAllObservers(conf); - assertEquals(newNumSmallThreads, - rs1.getCompactSplitThread().getSmallCompactionThreadNum()); - assertEquals(newNumLargeThreads, - rs1.getCompactSplitThread().getLargeCompactionThreadNum()); + assertEquals(newNumSmallThreads, rs1.getCompactSplitThread().getSmallCompactionThreadNum()); + assertEquals(newNumLargeThreads, rs1.getCompactSplitThread().getLargeCompactionThreadNum()); } /** - * Test that the configurations in the CompactionConfiguration class change - * properly. - * + * Test that the configurations in the CompactionConfiguration class change properly. * @throws IOException */ @Test @@ -149,55 +139,51 @@ public void testCompactionConfigurationOnlineChange() throws IOException { + "Got a different implementation other than HStore"); return; } - HStore hstore = (HStore)s; + HStore hstore = (HStore) s; // Set the new compaction ratio to a different value. double newCompactionRatio = - hstore.getStoreEngine().getCompactionPolicy().getConf().getCompactionRatio() + 0.1; - conf.setFloat(strPrefix + "ratio", (float)newCompactionRatio); + hstore.getStoreEngine().getCompactionPolicy().getConf().getCompactionRatio() + 0.1; + conf.setFloat(strPrefix + "ratio", (float) newCompactionRatio); // Notify all the observers, which includes the Store object. rs1.getConfigurationManager().notifyAllObservers(conf); // Check if the compaction ratio got updated in the Compaction Configuration assertEquals(newCompactionRatio, - hstore.getStoreEngine().getCompactionPolicy().getConf().getCompactionRatio(), - 0.00001); + hstore.getStoreEngine().getCompactionPolicy().getConf().getCompactionRatio(), 0.00001); // Check if the off peak compaction ratio gets updated. double newOffPeakCompactionRatio = hstore.getStoreEngine().getCompactionPolicy().getConf().getCompactionRatioOffPeak() + 0.1; - conf.setFloat(strPrefix + "ratio.offpeak", - (float)newOffPeakCompactionRatio); + conf.setFloat(strPrefix + "ratio.offpeak", (float) newOffPeakCompactionRatio); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newOffPeakCompactionRatio, - hstore.getStoreEngine().getCompactionPolicy().getConf().getCompactionRatioOffPeak(), - 0.00001); + hstore.getStoreEngine().getCompactionPolicy().getConf().getCompactionRatioOffPeak(), 0.00001); // Check if the throttle point gets updated. long newThrottlePoint = hstore.getStoreEngine().getCompactionPolicy().getConf().getThrottlePoint() + 10; - conf.setLong("hbase.regionserver.thread.compaction.throttle", - newThrottlePoint); + conf.setLong("hbase.regionserver.thread.compaction.throttle", newThrottlePoint); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newThrottlePoint, - hstore.getStoreEngine().getCompactionPolicy().getConf().getThrottlePoint()); + hstore.getStoreEngine().getCompactionPolicy().getConf().getThrottlePoint()); // Check if the minFilesToCompact gets updated. int newMinFilesToCompact = - hstore.getStoreEngine().getCompactionPolicy().getConf().getMinFilesToCompact() + 1; + hstore.getStoreEngine().getCompactionPolicy().getConf().getMinFilesToCompact() + 1; conf.setLong(strPrefix + "min", newMinFilesToCompact); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMinFilesToCompact, - hstore.getStoreEngine().getCompactionPolicy().getConf().getMinFilesToCompact()); + hstore.getStoreEngine().getCompactionPolicy().getConf().getMinFilesToCompact()); // Check if the maxFilesToCompact gets updated. int newMaxFilesToCompact = - hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxFilesToCompact() + 1; + hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxFilesToCompact() + 1; conf.setLong(strPrefix + "max", newMaxFilesToCompact); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMaxFilesToCompact, - hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxFilesToCompact()); + hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxFilesToCompact()); // Check OffPeak hours is updated in an online fashion. conf.setLong(CompactionConfiguration.HBASE_HSTORE_OFFPEAK_START_HOUR, 6); @@ -207,41 +193,40 @@ public void testCompactionConfigurationOnlineChange() throws IOException { // Check if the minCompactSize gets updated. long newMinCompactSize = - hstore.getStoreEngine().getCompactionPolicy().getConf().getMinCompactSize() + 1; + hstore.getStoreEngine().getCompactionPolicy().getConf().getMinCompactSize() + 1; conf.setLong(strPrefix + "min.size", newMinCompactSize); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMinCompactSize, - hstore.getStoreEngine().getCompactionPolicy().getConf().getMinCompactSize()); + hstore.getStoreEngine().getCompactionPolicy().getConf().getMinCompactSize()); // Check if the maxCompactSize gets updated. long newMaxCompactSize = - hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxCompactSize() - 1; + hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxCompactSize() - 1; conf.setLong(strPrefix + "max.size", newMaxCompactSize); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMaxCompactSize, - hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxCompactSize()); + hstore.getStoreEngine().getCompactionPolicy().getConf().getMaxCompactSize()); // Check if the offPeakMaxCompactSize gets updated. long newOffpeakMaxCompactSize = - hstore.getStoreEngine().getCompactionPolicy().getConf().getOffPeakMaxCompactSize() - 1; + hstore.getStoreEngine().getCompactionPolicy().getConf().getOffPeakMaxCompactSize() - 1; conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, newOffpeakMaxCompactSize); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newOffpeakMaxCompactSize, - hstore.getStoreEngine().getCompactionPolicy().getConf().getOffPeakMaxCompactSize()); + hstore.getStoreEngine().getCompactionPolicy().getConf().getOffPeakMaxCompactSize()); // Check if majorCompactionPeriod gets updated. long newMajorCompactionPeriod = - hstore.getStoreEngine().getCompactionPolicy().getConf().getMajorCompactionPeriod() + 10; + hstore.getStoreEngine().getCompactionPolicy().getConf().getMajorCompactionPeriod() + 10; conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, newMajorCompactionPeriod); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMajorCompactionPeriod, - hstore.getStoreEngine().getCompactionPolicy().getConf().getMajorCompactionPeriod()); + hstore.getStoreEngine().getCompactionPolicy().getConf().getMajorCompactionPeriod()); // Check if majorCompactionJitter gets updated. float newMajorCompactionJitter = hstore.getStoreEngine().getCompactionPolicy().getConf().getMajorCompactionJitter() + 0.02F; - conf.setFloat("hbase.hregion.majorcompaction.jitter", - newMajorCompactionJitter); + conf.setFloat("hbase.hregion.majorcompaction.jitter", newMajorCompactionJitter); rs1.getConfigurationManager().notifyAllObservers(conf); assertEquals(newMajorCompactionJitter, hstore.getStoreEngine().getCompactionPolicy().getConf().getMajorCompactionJitter(), 0.00001); @@ -255,7 +240,8 @@ public void removeClosedRegionFromConfigurationManager() throws Exception { rs1.getConfigurationManager().containsObserver(r1)); admin.move(r1name); hbaseTestingUtility.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { - @Override public boolean evaluate() throws Exception { + @Override + public boolean evaluate() throws Exception { return rs1.getOnlineRegion(r1name) == null; } }); @@ -263,7 +249,8 @@ public void removeClosedRegionFromConfigurationManager() throws Exception { rs1.getConfigurationManager().containsObserver(r1)); admin.move(r1name, rs1.getServerName()); hbaseTestingUtility.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { - @Override public boolean evaluate() throws Exception { + @Override + public boolean evaluate() throws Exception { return rs1.getOnlineRegion(r1name) != null; } }); @@ -293,8 +280,7 @@ public void testCoprocessorConfigurationOnlineChangeOnMaster() { conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, JMXListener.class.getName()); assertFalse(hMaster.isInMaintenanceMode()); hMaster.getConfigurationManager().notifyAllObservers(conf); - assertNotNull( - hMaster.getMasterCoprocessorHost().findCoprocessor(JMXListener.class.getName())); + assertNotNull(hMaster.getMasterCoprocessorHost().findCoprocessor(JMXListener.class.getName())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java index 35cb81d23146..c881d3cbf193 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -110,8 +110,8 @@ public class TestRegionServerReadRequestMetrics { public static void setUpOnce() throws Exception { TEST_UTIL.startMiniCluster(); admin = TEST_UTIL.getAdmin(); - serverNames = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet(); + serverNames = + admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(); table = createTable(); putData(); List regions = admin.getRegions(TABLE_NAME); @@ -127,26 +127,23 @@ public static void setUpOnce() throws Exception { private static Table createTable() throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TABLE_NAME); builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF1)); - builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF2).setTimeToLive(TTL) - .build()); + builder + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF2).setTimeToLive(TTL).build()); admin.createTable(builder.build()); return TEST_UTIL.getConnection().getTable(TABLE_NAME); } - private static void testReadRequests(long resultCount, - long expectedReadRequests, long expectedFilteredReadRequests) - throws IOException, InterruptedException { + private static void testReadRequests(long resultCount, long expectedReadRequests, + long expectedFilteredReadRequests) throws IOException, InterruptedException { updateMetricsMap(); System.out.println("requestsMapPrev = " + requestsMapPrev); System.out.println("requestsMap = " + requestsMap); assertEquals(expectedReadRequests, requestsMap.get(Metric.REGION_READ) - requestsMapPrev.get(Metric.REGION_READ)); - assertEquals(expectedFilteredReadRequests, - requestsMap.get(Metric.FILTERED_REGION_READ) + assertEquals(expectedFilteredReadRequests, requestsMap.get(Metric.FILTERED_REGION_READ) - requestsMapPrev.get(Metric.FILTERED_REGION_READ)); - assertEquals(expectedFilteredReadRequests, - requestsMap.get(Metric.FILTERED_SERVER_READ) + assertEquals(expectedFilteredReadRequests, requestsMap.get(Metric.FILTERED_SERVER_READ) - requestsMapPrev.get(Metric.FILTERED_SERVER_READ)); assertEquals(expectedReadRequests, resultCount); } @@ -162,7 +159,7 @@ private static void updateMetricsMap() throws IOException, InterruptedException for (int i = 0; i < MAX_TRY; i++) { for (ServerName serverName : serverNames) { serverMetrics = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().get(serverName); + .getLiveServerMetrics().get(serverName); Map regionMetrics = serverMetrics.getRegionMetrics(); RegionMetrics regionMetric = regionMetrics.get(regionInfo.getRegionName()); @@ -171,8 +168,8 @@ private static void updateMetricsMap() throws IOException, InterruptedException for (Metric metric : Metric.values()) { if (getReadRequest(serverMetrics, regionMetric, metric) > requestsMapPrev.get(metric)) { for (Metric metricInner : Metric.values()) { - requestsMap.put(metricInner, getReadRequest(serverMetrics, regionMetric, - metricInner)); + requestsMap.put(metricInner, + getReadRequest(serverMetrics, regionMetric, metricInner)); } metricsUpdated = true; break; @@ -219,7 +216,7 @@ private static void putData() throws IOException { put.addColumn(CF1, COL3, VAL3); table.put(put); put = new Put(ROW2); - put.addColumn(CF1, COL1, VAL2); // put val2 instead of val1 + put.addColumn(CF1, COL1, VAL2); // put val2 instead of val1 put.addColumn(CF1, COL2, VAL2); table.put(put); put = new Put(ROW3); @@ -362,11 +359,11 @@ public void testReadRequestsCountWithFilter() throws Exception { } // fixme filtered get should not increase readRequestsCount -// Get get = new Get(ROW2); -// get.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1)); -// Result result = table.get(get); -// resultCount = result.isEmpty() ? 0 : 1; -// testReadRequests(resultCount, 0, 1); + // Get get = new Get(ROW2); + // get.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1)); + // Result result = table.get(get); + // resultCount = result.isEmpty() ? 0 : 1; + // testReadRequests(resultCount, 0, 1); } @Ignore // HBASE-19785 @@ -445,8 +442,8 @@ public void testReadRequestsWithCoprocessor() throws Exception { private void testReadRequests(byte[] regionName, int expectedReadRequests) throws Exception { for (ServerName serverName : serverNames) { - ServerMetrics serverMetrics = admin.getClusterMetrics( - EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().get(serverName); + ServerMetrics serverMetrics = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) + .getLiveServerMetrics().get(serverName); Map regionMetrics = serverMetrics.getRegionMetrics(); RegionMetrics regionMetric = regionMetrics.get(regionName); if (regionMetric != null) { @@ -494,5 +491,7 @@ private void putData(Region region) throws Exception { } } - private enum Metric {REGION_READ, SERVER_READ, FILTERED_REGION_READ, FILTERED_SERVER_READ} + private enum Metric { + REGION_READ, SERVER_READ, FILTERED_REGION_READ, FILTERED_SERVER_READ + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java index 3cac439f88ee..723e62de01ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,8 +43,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest; /** - * Test class for isolated (non-cluster) tests surrounding the report - * of Region space use to the Master by RegionServers. + * Test class for isolated (non-cluster) tests surrounding the report of Region space use to the + * Master by RegionServers. */ @Category(SmallTests.class) public class TestRegionServerRegionSpaceUseReport { @@ -57,18 +57,12 @@ public class TestRegionServerRegionSpaceUseReport { public void testConversion() { TableName tn = TableName.valueOf("table1"); - RegionInfo hri1 = RegionInfoBuilder.newBuilder(tn) - .setStartKey(Bytes.toBytes("a")) - .setEndKey(Bytes.toBytes("b")) - .build(); - RegionInfo hri2 = RegionInfoBuilder.newBuilder(tn) - .setStartKey(Bytes.toBytes("b")) - .setEndKey(Bytes.toBytes("c")) - .build(); - RegionInfo hri3 = RegionInfoBuilder.newBuilder(tn) - .setStartKey(Bytes.toBytes("c")) - .setEndKey(Bytes.toBytes("d")) - .build(); + RegionInfo hri1 = RegionInfoBuilder.newBuilder(tn).setStartKey(Bytes.toBytes("a")) + .setEndKey(Bytes.toBytes("b")).build(); + RegionInfo hri2 = RegionInfoBuilder.newBuilder(tn).setStartKey(Bytes.toBytes("b")) + .setEndKey(Bytes.toBytes("c")).build(); + RegionInfo hri3 = RegionInfoBuilder.newBuilder(tn).setStartKey(Bytes.toBytes("c")) + .setEndKey(Bytes.toBytes("d")).build(); RegionSizeStore store = RegionSizeStoreFactory.getInstance().createStore(); store.put(hri1, 1024L * 1024L); store.put(hri2, 1024L * 1024L * 8L); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java index da49b8710c52..27b87f46b7bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -95,7 +95,7 @@ private static class LogCapturer { LogCapturer(org.apache.logging.log4j.core.Logger logger) { this.logger = logger; this.appender = org.apache.logging.log4j.core.appender.WriterAppender.newBuilder() - .setName("test").setTarget(sw).build(); + .setName("test").setTarget(sw).build(); this.logger.addAppender(this.appender); } @@ -132,8 +132,8 @@ public void testReportForDutyBackoff() throws IOException, InterruptedException master.start(); LogCapturer capturer = - new LogCapturer((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager - .getLogger(HRegionServer.class)); + new LogCapturer((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager + .getLogger(HRegionServer.class)); // Set sleep interval relatively low so that exponential backoff is more demanding. int msginterval = 100; cluster.getConfiguration().setInt("hbase.regionserver.msginterval", msginterval); @@ -150,16 +150,16 @@ public void testReportForDutyBackoff() throws IOException, InterruptedException // Following asserts the actual retry number is in range (expectedRetry/2, expectedRetry*2). // Ideally we can assert the exact retry count. We relax here to tolerate contention error. - int expectedRetry = (int)Math.ceil(Math.log(interval - msginterval)); - assertTrue(String.format("reportForDuty retries %d times, less than expected min %d", - count, expectedRetry / 2), count > expectedRetry / 2); - assertTrue(String.format("reportForDuty retries %d times, more than expected max %d", - count, expectedRetry * 2), count < expectedRetry * 2); + int expectedRetry = (int) Math.ceil(Math.log(interval - msginterval)); + assertTrue(String.format("reportForDuty retries %d times, less than expected min %d", count, + expectedRetry / 2), count > expectedRetry / 2); + assertTrue(String.format("reportForDuty retries %d times, more than expected max %d", count, + expectedRetry * 2), count < expectedRetry * 2); } /** - * Tests region sever reportForDuty with backup master becomes primary master after - * the first master goes away. + * Tests region sever reportForDuty with backup master becomes primary master after the first + * master goes away. */ @Test public void testReportForDutyWithMasterChange() throws Exception { @@ -209,15 +209,15 @@ public void testReportForDutyWithMasterChange() throws Exception { assertEquals(backupMaster.getMaster().getServerManager().getOnlineServersList().size(), 2); } - + /** * Tests region sever reportForDuty with RS RPC retry */ @Test public void testReportForDutyWithRSRpcRetry() throws Exception { ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(1, - new ThreadFactoryBuilder().setNameFormat("RSDelayedStart-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadFactoryBuilder().setNameFormat("RSDelayedStart-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); // Start a master and wait for it to become the active/primary master. // Use a random unique port @@ -301,8 +301,8 @@ public static class MyRegionServer extends MiniHBaseClusterRegionServer { private boolean rpcStubCreatedFlag = false; private boolean masterChanged = false; - public MyRegionServer(Configuration conf) throws IOException, KeeperException, - InterruptedException { + public MyRegionServer(Configuration conf) + throws IOException, KeeperException, InterruptedException { super(conf); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerScan.java index 43576696a62c..2f6c11614529 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -114,8 +114,7 @@ public void testScannWhenRpcCallContextNull() throws Exception { ResultScanner resultScanner = null; Table table = null; try { - table = - TEST_UTIL.createTable(tableName, new byte[][] { CF }, 1, 1024, null); + table = TEST_UTIL.createTable(tableName, new byte[][] { CF }, 1, 1024, null); putToTable(table, r0); putToTable(table, r1); putToTable(table, r2); @@ -190,13 +189,12 @@ public ScanResponse scan(RpcController controller, ScanRequest request) region = this.getRegion(request.getRegion()); } - if (region != null - && !tableName.equals(region.getTableDescriptor().getTableName())) { + if (region != null && !tableName.equals(region.getTableDescriptor().getTableName())) { return super.scan(controller, request); } ScanResponse result = null; - //Simulate RpcCallContext is null for test. + // Simulate RpcCallContext is null for test. Optional rpcCall = RpcServer.unsetCurrentCall(); try { result = super.scan(controller, request); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java index 48a9995a2844..3e9632a3f03f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ public class TestRegionSplitPolicy { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionSplitPolicy.class); + HBaseClassTestRule.forClass(TestRegionSplitPolicy.class); private Configuration conf; private HRegion mockRegion; @@ -85,7 +85,7 @@ public void testForceSplitRegionWithReference() throws IOException { conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); ConstantSizeRegionSplitPolicy policy = - (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); + (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); assertFalse(policy.shouldSplit()); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, @@ -112,13 +112,13 @@ public void testIncreasingToUpperBoundRegionSplitPolicy() throws IOException { long flushSize = maxSplitSize / 8; conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSize); TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(maxSplitSize) - .setMemStoreFlushSize(flushSize).build(); + .setMemStoreFlushSize(flushSize).build(); doReturn(td).when(mockRegion).getTableDescriptor(); // If RegionServerService with no regions in it -- 'online regions' == 0 -- // then IncreasingToUpperBoundRegionSplitPolicy should act like a // ConstantSizePolicy IncreasingToUpperBoundRegionSplitPolicy policy = - (IncreasingToUpperBoundRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); + (IncreasingToUpperBoundRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); doConstantSizePolicyTests(policy); // Add a store in excess of split size. Because there are "no regions" @@ -169,7 +169,7 @@ public void testIsExceedSize() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).build(); doReturn(td).when(mockRegion).getTableDescriptor(); ConstantSizeRegionSplitPolicy policy = - (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); + (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); regions.add(mockRegion); HStore mockStore1 = mock(HStore.class); @@ -194,7 +194,7 @@ public void testIsExceedSize() throws IOException { @Test public void testBusyRegionSplitPolicy() throws Exception { doReturn(TableDescriptorBuilder.newBuilder(TABLENAME).build()).when(mockRegion) - .getTableDescriptor(); + .getTableDescriptor(); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, BusyRegionSplitPolicy.class.getName()); conf.setLong("hbase.busy.policy.minAge", 1000000L); conf.setFloat("hbase.busy.policy.blockedRequests", 0.1f); @@ -207,7 +207,7 @@ public void testBusyRegionSplitPolicy() throws Exception { when(mockRegion.getWriteRequestsCount()).thenReturn(0L); BusyRegionSplitPolicy policy = - (BusyRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); + (BusyRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); when(mockRegion.getBlockedRequestsCount()).thenReturn(10L); when(mockRegion.getWriteRequestsCount()).thenReturn(10L); @@ -253,7 +253,7 @@ public void testCreateDefault() throws IOException { // Using a default HTD, should pick up the file size from // configuration. ConstantSizeRegionSplitPolicy policy = - (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); + (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); assertWithinJitter(1234L, policy.getDesiredMaxFileSize()); // If specified in HTD, should use that @@ -269,8 +269,8 @@ public void testCreateDefault() throws IOException { @Test public void testCustomPolicy() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME) - .setRegionSplitPolicyClassName(KeyPrefixRegionSplitPolicy.class.getName()) - .setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY, "2").build(); + .setRegionSplitPolicyClassName(KeyPrefixRegionSplitPolicy.class.getName()) + .setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY, "2").build(); doReturn(td).when(mockRegion).getTableDescriptor(); @@ -281,7 +281,7 @@ public void testCustomPolicy() throws IOException { stores.add(mockStore); KeyPrefixRegionSplitPolicy policy = - (KeyPrefixRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); + (KeyPrefixRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); assertEquals("ab", Bytes.toString(policy.getSplitPoint())); } @@ -291,7 +291,7 @@ public void testConstantSizePolicy() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(1024L).build(); doReturn(td).when(mockRegion).getTableDescriptor(); ConstantSizeRegionSplitPolicy policy = - (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); + (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); doConstantSizePolicyTests(policy); } @@ -332,7 +332,7 @@ public void testGetSplitPoint() throws IOException { doReturn(td).when(mockRegion).getTableDescriptor(); ConstantSizeRegionSplitPolicy policy = - (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); + (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); // For no stores, should not split assertFalse(policy.shouldSplit()); @@ -360,8 +360,8 @@ public void testGetSplitPoint() throws IOException { @Test public void testDelimitedKeyPrefixRegionSplitPolicy() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME) - .setRegionSplitPolicyClassName(DelimitedKeyPrefixRegionSplitPolicy.class.getName()) - .setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY, ",").build(); + .setRegionSplitPolicyClassName(DelimitedKeyPrefixRegionSplitPolicy.class.getName()) + .setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY, ",").build(); doReturn(td).when(mockRegion).getTableDescriptor(); doReturn(stores).when(mockRegion).getStores(); @@ -373,7 +373,7 @@ public void testDelimitedKeyPrefixRegionSplitPolicy() throws IOException { stores.add(mockStore); DelimitedKeyPrefixRegionSplitPolicy policy = - (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); + (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf); assertEquals("ab", Bytes.toString(policy.getSplitPoint())); @@ -386,7 +386,7 @@ public void testConstantSizePolicyWithJitter() throws IOException { conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); TableDescriptor td = - TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(Long.MAX_VALUE).build(); + TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(Long.MAX_VALUE).build(); doReturn(td).when(mockRegion).getTableDescriptor(); boolean positiveJitter = false; ConstantSizeRegionSplitPolicy policy = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitRestriction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitRestriction.java index 329a7afa5ac0..3e8a5b844430 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitRestriction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitRestriction.java @@ -40,10 +40,11 @@ public class TestRegionSplitRestriction { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionSplitRestriction.class); + HBaseClassTestRule.forClass(TestRegionSplitRestriction.class); Configuration conf; - @Mock TableDescriptor tableDescriptor; + @Mock + TableDescriptor tableDescriptor; @Before public void setup() { @@ -55,30 +56,27 @@ public void setup() { @Test public void testWhenTableDescriptorReturnsNoneType() throws IOException { when(tableDescriptor.getValue(RegionSplitRestriction.RESTRICTION_TYPE_KEY)) - .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_NONE); + .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_NONE); - RegionSplitRestriction splitRestriction = - RegionSplitRestriction.create(tableDescriptor, conf); + RegionSplitRestriction splitRestriction = RegionSplitRestriction.create(tableDescriptor, conf); assertTrue(splitRestriction instanceof NoRegionSplitRestriction); } @Test public void testWhenTableDescriptorReturnsKeyPrefixType() throws IOException { when(tableDescriptor.getValue(RegionSplitRestriction.RESTRICTION_TYPE_KEY)) - .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_KEY_PREFIX); + .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_KEY_PREFIX); - RegionSplitRestriction splitRestriction = - RegionSplitRestriction.create(tableDescriptor, conf); + RegionSplitRestriction splitRestriction = RegionSplitRestriction.create(tableDescriptor, conf); assertTrue(splitRestriction instanceof KeyPrefixRegionSplitRestriction); } @Test public void testWhenTableDescriptorReturnsDelimitedKeyPrefixType() throws IOException { when(tableDescriptor.getValue(RegionSplitRestriction.RESTRICTION_TYPE_KEY)) - .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_DELIMITED_KEY_PREFIX); + .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_DELIMITED_KEY_PREFIX); - RegionSplitRestriction splitRestriction = - RegionSplitRestriction.create(tableDescriptor, conf); + RegionSplitRestriction splitRestriction = RegionSplitRestriction.create(tableDescriptor, conf); assertTrue(splitRestriction instanceof DelimitedKeyPrefixRegionSplitRestriction); } @@ -87,8 +85,7 @@ public void testWhenConfigurationReturnsNoneType() throws IOException { conf.set(RegionSplitRestriction.RESTRICTION_TYPE_KEY, RegionSplitRestriction.RESTRICTION_TYPE_NONE); - RegionSplitRestriction splitRestriction = - RegionSplitRestriction.create(tableDescriptor, conf); + RegionSplitRestriction splitRestriction = RegionSplitRestriction.create(tableDescriptor, conf); assertTrue(splitRestriction instanceof NoRegionSplitRestriction); } @@ -97,8 +94,7 @@ public void testWhenConfigurationReturnsKeyPrefixType() throws IOException { conf.set(RegionSplitRestriction.RESTRICTION_TYPE_KEY, RegionSplitRestriction.RESTRICTION_TYPE_KEY_PREFIX); - RegionSplitRestriction splitRestriction = - RegionSplitRestriction.create(tableDescriptor, conf); + RegionSplitRestriction splitRestriction = RegionSplitRestriction.create(tableDescriptor, conf); assertTrue(splitRestriction instanceof KeyPrefixRegionSplitRestriction); } @@ -107,78 +103,73 @@ public void testWhenConfigurationReturnsDelimitedKeyPrefixType() throws IOExcept conf.set(RegionSplitRestriction.RESTRICTION_TYPE_KEY, RegionSplitRestriction.RESTRICTION_TYPE_DELIMITED_KEY_PREFIX); - RegionSplitRestriction splitRestriction = - RegionSplitRestriction.create(tableDescriptor, conf); + RegionSplitRestriction splitRestriction = RegionSplitRestriction.create(tableDescriptor, conf); assertTrue(splitRestriction instanceof DelimitedKeyPrefixRegionSplitRestriction); } @Test public void testWhenTableDescriptorAndConfigurationReturnNull() throws IOException { - RegionSplitRestriction splitRestriction = - RegionSplitRestriction.create(tableDescriptor, conf); + RegionSplitRestriction splitRestriction = RegionSplitRestriction.create(tableDescriptor, conf); assertTrue(splitRestriction instanceof NoRegionSplitRestriction); } @Test public void testWhenTableDescriptorReturnsInvalidType() throws IOException { when(tableDescriptor.getValue(RegionSplitRestriction.RESTRICTION_TYPE_KEY)) - .thenReturn("Invalid"); + .thenReturn("Invalid"); - RegionSplitRestriction splitRestriction = - RegionSplitRestriction.create(tableDescriptor, conf); + RegionSplitRestriction splitRestriction = RegionSplitRestriction.create(tableDescriptor, conf); assertTrue(splitRestriction instanceof NoRegionSplitRestriction); } @Test public void testNoneRegionSplitRestriction() throws IOException { when(tableDescriptor.getValue(RegionSplitRestriction.RESTRICTION_TYPE_KEY)) - .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_NONE); + .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_NONE); NoRegionSplitRestriction noRegionSplitRestriction = - (NoRegionSplitRestriction) RegionSplitRestriction.create(tableDescriptor, conf); + (NoRegionSplitRestriction) RegionSplitRestriction.create(tableDescriptor, conf); byte[] restrictedSplit = - noRegionSplitRestriction.getRestrictedSplitPoint(Bytes.toBytes("abcd")); + noRegionSplitRestriction.getRestrictedSplitPoint(Bytes.toBytes("abcd")); assertEquals("abcd", Bytes.toString(restrictedSplit)); } @Test public void testKeyPrefixRegionSplitRestriction() throws IOException { when(tableDescriptor.getValue(RegionSplitRestriction.RESTRICTION_TYPE_KEY)) - .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_KEY_PREFIX); + .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_KEY_PREFIX); when(tableDescriptor.getValue(KeyPrefixRegionSplitRestriction.PREFIX_LENGTH_KEY)) - .thenReturn("2"); + .thenReturn("2"); KeyPrefixRegionSplitRestriction keyPrefixRegionSplitRestriction = - (KeyPrefixRegionSplitRestriction) RegionSplitRestriction.create( - tableDescriptor, conf); + (KeyPrefixRegionSplitRestriction) RegionSplitRestriction.create(tableDescriptor, conf); byte[] restrictedSplit = - keyPrefixRegionSplitRestriction.getRestrictedSplitPoint(Bytes.toBytes("abcd")); + keyPrefixRegionSplitRestriction.getRestrictedSplitPoint(Bytes.toBytes("abcd")); assertEquals("ab", Bytes.toString(restrictedSplit)); - restrictedSplit = - keyPrefixRegionSplitRestriction.getRestrictedSplitPoint(Bytes.toBytes("a")); + restrictedSplit = keyPrefixRegionSplitRestriction.getRestrictedSplitPoint(Bytes.toBytes("a")); assertEquals("a", Bytes.toString(restrictedSplit)); } @Test public void testDelimitedKeyPrefixRegionSplitRestriction() throws IOException { when(tableDescriptor.getValue(RegionSplitRestriction.RESTRICTION_TYPE_KEY)) - .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_DELIMITED_KEY_PREFIX); + .thenReturn(RegionSplitRestriction.RESTRICTION_TYPE_DELIMITED_KEY_PREFIX); when(tableDescriptor.getValue(DelimitedKeyPrefixRegionSplitRestriction.DELIMITER_KEY)) - .thenReturn(","); + .thenReturn(","); DelimitedKeyPrefixRegionSplitRestriction delimitedKeyPrefixRegionSplitRestriction = - (DelimitedKeyPrefixRegionSplitRestriction) RegionSplitRestriction.create( - tableDescriptor, conf); + (DelimitedKeyPrefixRegionSplitRestriction) RegionSplitRestriction.create(tableDescriptor, + conf); - byte[] restrictedSplit = delimitedKeyPrefixRegionSplitRestriction - .getRestrictedSplitPoint(Bytes.toBytes("ab,cd")); + byte[] restrictedSplit = + delimitedKeyPrefixRegionSplitRestriction.getRestrictedSplitPoint(Bytes.toBytes("ab,cd")); assertEquals("ab", Bytes.toString(restrictedSplit)); - restrictedSplit = delimitedKeyPrefixRegionSplitRestriction - .getRestrictedSplitPoint(Bytes.toBytes("ijk")); + restrictedSplit = + delimitedKeyPrefixRegionSplitRestriction.getRestrictedSplitPoint(Bytes.toBytes("ijk")); assertEquals("ijk", Bytes.toString(restrictedSplit)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRemoveRegionMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRemoveRegionMetrics.java index 91a80aba0283..af28e45ac629 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRemoveRegionMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRemoveRegionMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestRemoveRegionMetrics { @ClassRule @@ -78,7 +78,6 @@ public static void startCluster() throws Exception { } } - @Test public void testMoveRegion() throws IOException, InterruptedException { String tableNameString = name.getMethodName(); @@ -87,8 +86,7 @@ public void testMoveRegion() throws IOException, InterruptedException { TEST_UTIL.waitUntilAllRegionsAssigned(t.getName()); Admin admin = TEST_UTIL.getAdmin(); RegionInfo regionInfo; - byte[] row = Bytes.toBytes("r1"); - + byte[] row = Bytes.toBytes("r1"); for (int i = 0; i < 30; i++) { boolean moved = false; @@ -97,30 +95,23 @@ public void testMoveRegion() throws IOException, InterruptedException { } int currentServerIdx = cluster.getServerWith(regionInfo.getRegionName()); - int destServerIdx = (currentServerIdx +1)% cluster.getLiveRegionServerThreads().size(); + int destServerIdx = (currentServerIdx + 1) % cluster.getLiveRegionServerThreads().size(); HRegionServer currentServer = cluster.getRegionServer(currentServerIdx); HRegionServer destServer = cluster.getRegionServer(destServerIdx); - // Do a put. The counters should be non-zero now Put p = new Put(row); p.addColumn(Bytes.toBytes("D"), Bytes.toBytes("Zero"), Bytes.toBytes("VALUE")); t.put(p); - MetricsRegionAggregateSource currentAgg = currentServer.getRegion(regionInfo.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); + .getMetrics().getSource().getAggregateSource(); - String prefix = "namespace_"+ NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+ - "_table_"+tableNameString + - "_region_" + regionInfo.getEncodedName()+ - "_metric"; + String prefix = "namespace_" + NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + "_table_" + + tableNameString + "_region_" + regionInfo.getEncodedName() + "_metric"; metricsHelper.assertCounter(prefix + "_putCount", 1, currentAgg); - try { TEST_UTIL.moveRegionAndWait(regionInfo, destServer.getServerName()); moved = true; @@ -130,9 +121,7 @@ public void testMoveRegion() throws IOException, InterruptedException { if (moved) { MetricsRegionAggregateSource destAgg = destServer.getRegion(regionInfo.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); + .getMetrics().getSource().getAggregateSource(); metricsHelper.assertCounter(prefix + "_putCount", 0, destAgg); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReplicateToReplica.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReplicateToReplica.java index d9f846d789ab..e125b9fe9340 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReplicateToReplica.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReplicateToReplica.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -86,7 +86,7 @@ public class TestReplicateToReplica { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicateToReplica.class); + HBaseClassTestRule.forClass(TestReplicateToReplica.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -128,20 +128,20 @@ public class TestReplicateToReplica { public static final class HRegionForTest extends HRegion { public HRegionForTest(HRegionFileSystem fs, WAL wal, Configuration confParam, - TableDescriptor htd, RegionServerServices rsServices) { + TableDescriptor htd, RegionServerServices rsServices) { super(fs, wal, confParam, htd, rsServices); } @SuppressWarnings("deprecation") public HRegionForTest(Path tableDir, WAL wal, FileSystem fs, Configuration confParam, - RegionInfo regionInfo, TableDescriptor htd, RegionServerServices rsServices) { + RegionInfo regionInfo, TableDescriptor htd, RegionServerServices rsServices) { super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices); } @Override protected PrepareFlushResult internalPrepareFlushCache(WAL wal, long myseqid, - Collection storesToFlush, MonitoredTask status, boolean writeFlushWalMarker, - FlushLifeCycleTracker tracker) throws IOException { + Collection storesToFlush, MonitoredTask status, boolean writeFlushWalMarker, + FlushLifeCycleTracker tracker) throws IOException { PrepareFlushResult result = super.internalPrepareFlushCache(wal, myseqid, storesToFlush, status, writeFlushWalMarker, tracker); for (Put put : TO_ADD_AFTER_PREPARE_FLUSH) { @@ -162,7 +162,7 @@ public static void setUpBeforeClass() { conf.setClass(HConstants.REGION_IMPL, HRegionForTest.class, HRegion.class); EXEC = new ExecutorService("test"); EXEC.startExecutorService(EXEC.new ExecutorConfig().setCorePoolSize(1) - .setExecutorType(ExecutorType.RS_COMPACTED_FILES_DISCHARGER)); + .setExecutorType(ExecutorType.RS_COMPACTED_FILES_DISCHARGER)); ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); } @@ -182,8 +182,8 @@ public void setUp() throws IOException { conf.set(HConstants.HBASE_DIR, testDir.toString()); td = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setRegionReplication(2) - .setRegionMemStoreReplication(true).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setRegionReplication(2) + .setRegionMemStoreReplication(true).build(); reqAndResps = new ArrayDeque<>(); queueReqAndResps = true; @@ -251,8 +251,8 @@ private FlushResult flushPrimary() throws IOException { private void replicate(Pair, CompletableFuture> pair) throws IOException { Pair params = ReplicationProtobufUtil - .buildReplicateWALEntryRequest(pair.getFirst().toArray(new WAL.Entry[0]), - secondary.getRegionInfo().getEncodedNameAsBytes(), null, null, null); + .buildReplicateWALEntryRequest(pair.getFirst().toArray(new WAL.Entry[0]), + secondary.getRegionInfo().getEncodedNameAsBytes(), null, null, null); for (WALEntry entry : params.getFirst().getEntryList()) { secondary.replayWALEntry(entry, params.getSecond()); } @@ -328,7 +328,7 @@ public void testErrorAfterFlushStartBeforeFlushCommit() throws IOException { primary.put(new Put(Bytes.toBytes(0)).addColumn(FAMILY, QUAL, Bytes.toBytes(1))); replicateAll(); TO_ADD_AFTER_PREPARE_FLUSH - .add(new Put(Bytes.toBytes(1)).addColumn(FAMILY, QUAL, Bytes.toBytes(2))); + .add(new Put(Bytes.toBytes(1)).addColumn(FAMILY, QUAL, Bytes.toBytes(2))); flushPrimary(); // replicate the start flush edit replicateOne(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRequestsPerSecondMetric.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRequestsPerSecondMetric.java index bd166722118d..d655abdc10f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRequestsPerSecondMetric.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRequestsPerSecondMetric.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -15,7 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver; - import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -48,7 +49,6 @@ public class TestRequestsPerSecondMetric { private static final long METRICS_PERIOD = 2000L; private static Configuration conf; - @BeforeClass public static void setup() throws Exception { conf = UTIL.getConfiguration(); @@ -61,18 +61,15 @@ public static void teardown() throws Exception { UTIL.shutdownMiniCluster(); } - @Test /** * This test will confirm no negative value in requestsPerSecond metric during any region - * transition(close region/remove region/move region). - * Firstly, load 2000 random rows for 25 regions and will trigger a metric. - * Now, metricCache will have a current read and write requests count. - * Next, we disable a table and all of its 25 regions will be closed. - * As part of region close, his metric will also be removed from metricCache. - * prior to HBASE-23237, we do not remove/reset his metric so we incorrectly compute - * (currentRequestCount - lastRequestCount) which result into negative value. - * + * transition(close region/remove region/move region). Firstly, load 2000 random rows for 25 + * regions and will trigger a metric. Now, metricCache will have a current read and write requests + * count. Next, we disable a table and all of its 25 regions will be closed. As part of region + * close, his metric will also be removed from metricCache. prior to HBASE-23237, we do not + * remove/reset his metric so we incorrectly compute (currentRequestCount - lastRequestCount) + * which result into negative value. * @throws IOException * @throws InterruptedException */ @@ -80,14 +77,14 @@ public void testNoNegativeSignAtRequestsPerSecond() throws IOException, Interrup final TableName TABLENAME = TableName.valueOf("t"); final String FAMILY = "f"; Admin admin = UTIL.getAdmin(); - UTIL.createMultiRegionTable(TABLENAME, FAMILY.getBytes(),25); + UTIL.createMultiRegionTable(TABLENAME, FAMILY.getBytes(), 25); Table table = admin.getConnection().getTable(TABLENAME); ServerName serverName = admin.getRegionServers().iterator().next(); HRegionServer regionServer = UTIL.getMiniHBaseCluster().getRegionServer(serverName); - MetricsRegionServerWrapperImpl metricsWrapper = + MetricsRegionServerWrapperImpl metricsWrapper = new MetricsRegionServerWrapperImpl(regionServer); - MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable metricsServer - = metricsWrapper.new RegionServerMetricsWrapperRunnable(); + MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable metricsServer = + metricsWrapper.new RegionServerMetricsWrapperRunnable(); metricsServer.run(); UTIL.loadRandomRows(table, FAMILY.getBytes(), 1, 2000); Thread.sleep(METRICS_PERIOD); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java index 85045ea79a90..1bbe53e13400 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestResettingCounters { @ClassRule @@ -62,24 +62,21 @@ public void testResettingCounters() throws Exception { HBaseTestingUtil htu = new HBaseTestingUtil(); Configuration conf = htu.getConfiguration(); FileSystem fs = FileSystem.get(conf); - byte [] table = Bytes.toBytes(name.getMethodName()); - byte [][] families = new byte [][] { - Bytes.toBytes("family1"), - Bytes.toBytes("family2"), - Bytes.toBytes("family3") - }; + byte[] table = Bytes.toBytes(name.getMethodName()); + byte[][] families = new byte[][] { Bytes.toBytes("family1"), Bytes.toBytes("family2"), + Bytes.toBytes("family3") }; int numQualifiers = 10; - byte [][] qualifiers = new byte [numQualifiers][]; - for (int i=0; i scanners = StoreFileScanner - .getScannersForStoreFiles(Collections.singletonList(sf), - false, true, false, false, Long.MAX_VALUE); + List scanners = StoreFileScanner.getScannersForStoreFiles( + Collections.singletonList(sf), false, true, false, false, Long.MAX_VALUE); StoreFileScanner scanner = scanners.get(0); seekTestOfReversibleKeyValueScanner(scanner); for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) { LOG.info("Setting read point to " + readPoint); - scanners = StoreFileScanner.getScannersForStoreFiles( - Collections.singletonList(sf), false, true, false, false, readPoint); + scanners = StoreFileScanner.getScannersForStoreFiles(Collections.singletonList(sf), false, + true, false, false, readPoint); seekTestOfReversibleKeyValueScannerWithMVCC(scanners, readPoint); } } @@ -157,23 +157,22 @@ public void testReversibleMemstoreScanner() throws IOException { public void testReversibleKeyValueHeap() throws IOException { // write data to one memstore and two store files FileSystem fs = TEST_UTIL.getTestFileSystem(); - Path hfilePath = new Path(new Path( - TEST_UTIL.getDataTestDir("testReversibleKeyValueHeap"), "regionname"), - "familyname"); + Path hfilePath = + new Path(new Path(TEST_UTIL.getDataTestDir("testReversibleKeyValueHeap"), "regionname"), + "familyname"); CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); HFileContextBuilder hcBuilder = new HFileContextBuilder(); hcBuilder.withBlockSize(2 * 1024); HFileContext hFileContext = hcBuilder.build(); - StoreFileWriter writer1 = new StoreFileWriter.Builder( - TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir( - hfilePath).withFileContext(hFileContext).build(); - StoreFileWriter writer2 = new StoreFileWriter.Builder( - TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir( - hfilePath).withFileContext(hFileContext).build(); + StoreFileWriter writer1 = + new StoreFileWriter.Builder(TEST_UTIL.getConfiguration(), cacheConf, fs) + .withOutputDir(hfilePath).withFileContext(hFileContext).build(); + StoreFileWriter writer2 = + new StoreFileWriter.Builder(TEST_UTIL.getConfiguration(), cacheConf, fs) + .withOutputDir(hfilePath).withFileContext(hFileContext).build(); MemStore memstore = new DefaultMemStore(); - writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1, - writer2 }); + writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1, writer2 }); HStoreFile sf1 = new HStoreFile(fs, writer1.getPath(), TEST_UTIL.getConfiguration(), cacheConf, BloomType.NONE, true); @@ -184,13 +183,12 @@ public void testReversibleKeyValueHeap() throws IOException { * Test without MVCC */ int startRowNum = ROWSIZE / 2; - ReversedKeyValueHeap kvHeap = getReversibleKeyValueHeap(memstore, sf1, sf2, - ROWS[startRowNum], MAXMVCC); + ReversedKeyValueHeap kvHeap = + getReversibleKeyValueHeap(memstore, sf1, sf2, ROWS[startRowNum], MAXMVCC); internalTestSeekAndNextForReversibleKeyValueHeap(kvHeap, startRowNum); startRowNum = ROWSIZE - 1; - kvHeap = getReversibleKeyValueHeap(memstore, sf1, sf2, - HConstants.EMPTY_START_ROW, MAXMVCC); + kvHeap = getReversibleKeyValueHeap(memstore, sf1, sf2, HConstants.EMPTY_START_ROW, MAXMVCC); internalTestSeekAndNextForReversibleKeyValueHeap(kvHeap, startRowNum); /** @@ -199,27 +197,23 @@ public void testReversibleKeyValueHeap() throws IOException { for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) { LOG.info("Setting read point to " + readPoint); startRowNum = ROWSIZE - 1; - kvHeap = getReversibleKeyValueHeap(memstore, sf1, sf2, - HConstants.EMPTY_START_ROW, readPoint); + kvHeap = getReversibleKeyValueHeap(memstore, sf1, sf2, HConstants.EMPTY_START_ROW, readPoint); for (int i = startRowNum; i >= 0; i--) { if (i - 2 < 0) break; i = i - 2; kvHeap.seekToPreviousRow(KeyValueUtil.createFirstOnRow(ROWS[i + 1])); - Pair nextReadableNum = getNextReadableNumWithBackwardScan( - i, 0, readPoint); + Pair nextReadableNum = + getNextReadableNumWithBackwardScan(i, 0, readPoint); if (nextReadableNum == null) break; - KeyValue expecedKey = makeKV(nextReadableNum.getFirst(), - nextReadableNum.getSecond()); + KeyValue expecedKey = makeKV(nextReadableNum.getFirst(), nextReadableNum.getSecond()); assertEquals(expecedKey, kvHeap.peek()); i = nextReadableNum.getFirst(); int qualNum = nextReadableNum.getSecond(); if (qualNum + 1 < QUALSIZE) { kvHeap.backwardSeek(makeKV(i, qualNum + 1)); - nextReadableNum = getNextReadableNumWithBackwardScan(i, qualNum + 1, - readPoint); + nextReadableNum = getNextReadableNumWithBackwardScan(i, qualNum + 1, readPoint); if (nextReadableNum == null) break; - expecedKey = makeKV(nextReadableNum.getFirst(), - nextReadableNum.getSecond()); + expecedKey = makeKV(nextReadableNum.getFirst(), nextReadableNum.getSecond()); assertEquals(expecedKey, kvHeap.peek()); i = nextReadableNum.getFirst(); qualNum = nextReadableNum.getSecond(); @@ -228,15 +222,12 @@ public void testReversibleKeyValueHeap() throws IOException { kvHeap.next(); if (qualNum + 1 >= QUALSIZE) { - nextReadableNum = getNextReadableNumWithBackwardScan(i - 1, 0, - readPoint); + nextReadableNum = getNextReadableNumWithBackwardScan(i - 1, 0, readPoint); } else { - nextReadableNum = getNextReadableNumWithBackwardScan(i, qualNum + 1, - readPoint); + nextReadableNum = getNextReadableNumWithBackwardScan(i, qualNum + 1, readPoint); } if (nextReadableNum == null) break; - expecedKey = makeKV(nextReadableNum.getFirst(), - nextReadableNum.getSecond()); + expecedKey = makeKV(nextReadableNum.getFirst(), nextReadableNum.getSecond()); assertEquals(expecedKey, kvHeap.peek()); i = nextReadableNum.getFirst(); } @@ -247,23 +238,22 @@ public void testReversibleKeyValueHeap() throws IOException { public void testReversibleStoreScanner() throws IOException { // write data to one memstore and two store files FileSystem fs = TEST_UTIL.getTestFileSystem(); - Path hfilePath = new Path(new Path( - TEST_UTIL.getDataTestDir("testReversibleStoreScanner"), "regionname"), - "familyname"); + Path hfilePath = + new Path(new Path(TEST_UTIL.getDataTestDir("testReversibleStoreScanner"), "regionname"), + "familyname"); CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); HFileContextBuilder hcBuilder = new HFileContextBuilder(); hcBuilder.withBlockSize(2 * 1024); HFileContext hFileContext = hcBuilder.build(); - StoreFileWriter writer1 = new StoreFileWriter.Builder( - TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir( - hfilePath).withFileContext(hFileContext).build(); - StoreFileWriter writer2 = new StoreFileWriter.Builder( - TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir( - hfilePath).withFileContext(hFileContext).build(); + StoreFileWriter writer1 = + new StoreFileWriter.Builder(TEST_UTIL.getConfiguration(), cacheConf, fs) + .withOutputDir(hfilePath).withFileContext(hFileContext).build(); + StoreFileWriter writer2 = + new StoreFileWriter.Builder(TEST_UTIL.getConfiguration(), cacheConf, fs) + .withOutputDir(hfilePath).withFileContext(hFileContext).build(); MemStore memstore = new DefaultMemStore(); - writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1, - writer2 }); + writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1, writer2 }); HStoreFile sf1 = new HStoreFile(fs, writer1.getPath(), TEST_UTIL.getConfiguration(), cacheConf, BloomType.NONE, true); @@ -271,9 +261,9 @@ public void testReversibleStoreScanner() throws IOException { HStoreFile sf2 = new HStoreFile(fs, writer2.getPath(), TEST_UTIL.getConfiguration(), cacheConf, BloomType.NONE, true); - ScanInfo scanInfo = - new ScanInfo(TEST_UTIL.getConfiguration(), FAMILYNAME, 0, Integer.MAX_VALUE, Long.MAX_VALUE, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, false); + ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(), FAMILYNAME, 0, Integer.MAX_VALUE, + Long.MAX_VALUE, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, + CellComparatorImpl.COMPARATOR, false); // Case 1.Test a full reversed scan Scan scan = new Scan(); @@ -287,8 +277,7 @@ public void testReversibleStoreScanner() throws IOException { byte[] startRow = ROWS[startRowNum]; scan.withStartRow(startRow); storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC); - verifyCountAndOrder(storeScanner, QUALSIZE * (startRowNum + 1), - startRowNum + 1, false); + verifyCountAndOrder(storeScanner, QUALSIZE * (startRowNum + 1), startRowNum + 1, false); // Case 3.Test reversed scan with a specified start row and specified // qualifiers @@ -296,8 +285,7 @@ public void testReversibleStoreScanner() throws IOException { scan.addColumn(FAMILYNAME, QUALS[0]); scan.addColumn(FAMILYNAME, QUALS[2]); storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC); - verifyCountAndOrder(storeScanner, 2 * (startRowNum + 1), startRowNum + 1, - false); + verifyCountAndOrder(storeScanner, 2 * (startRowNum + 1), startRowNum + 1, false); // Case 4.Test reversed scan with mvcc based on case 3 for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) { @@ -318,8 +306,7 @@ public void testReversibleStoreScanner() throws IOException { expectedKVCount += kvCount; } } - verifyCountAndOrder(storeScanner, expectedKVCount, expectedRowCount, - false); + verifyCountAndOrder(storeScanner, expectedKVCount, expectedRowCount, false); } } @@ -327,8 +314,8 @@ public void testReversibleStoreScanner() throws IOException { public void testReversibleRegionScanner() throws IOException { byte[] FAMILYNAME2 = Bytes.toBytes("testCf2"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYNAME)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYNAME2)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYNAME)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYNAME2)).build(); HRegion region = TEST_UTIL.createLocalHRegion(htd, null, null); loadDataToRegion(region, FAMILYNAME2); @@ -366,33 +353,31 @@ public void testReversibleRegionScanner() throws IOException { int startRowNum = ROWSIZE * 3 / 4; scan.withStartRow(ROWS[startRowNum]); scanner = region.getScanner(scan); - verifyCountAndOrder(scanner, (startRowNum + 1) * 2 * 2, (startRowNum + 1), - false); + verifyCountAndOrder(scanner, (startRowNum + 1) * 2 * 2, (startRowNum + 1), false); // Case6: Case4 + specify stop row int stopRowNum = ROWSIZE / 4; scan.withStartRow(HConstants.EMPTY_BYTE_ARRAY); scan.withStopRow(ROWS[stopRowNum]); scanner = region.getScanner(scan); - verifyCountAndOrder(scanner, (ROWSIZE - stopRowNum - 1) * 2 * 2, (ROWSIZE - - stopRowNum - 1), false); + verifyCountAndOrder(scanner, (ROWSIZE - stopRowNum - 1) * 2 * 2, (ROWSIZE - stopRowNum - 1), + false); // Case7: Case4 + specify start row + specify stop row scan.withStartRow(ROWS[startRowNum]); scanner = region.getScanner(scan); - verifyCountAndOrder(scanner, (startRowNum - stopRowNum) * 2 * 2, - (startRowNum - stopRowNum), false); + verifyCountAndOrder(scanner, (startRowNum - stopRowNum) * 2 * 2, (startRowNum - stopRowNum), + false); // Case8: Case7 + SingleColumnValueFilter int valueNum = startRowNum % VALUESIZE; - Filter filter = new SingleColumnValueFilter(FAMILYNAME, - specifiedQualifiers[0], CompareOperator.EQUAL, VALUES[valueNum]); + Filter filter = new SingleColumnValueFilter(FAMILYNAME, specifiedQualifiers[0], + CompareOperator.EQUAL, VALUES[valueNum]); scan.setFilter(filter); scanner = region.getScanner(scan); - int unfilteredRowNum = (startRowNum - stopRowNum) / VALUESIZE - + (stopRowNum / VALUESIZE == valueNum ? 0 : 1); - verifyCountAndOrder(scanner, unfilteredRowNum * 2 * 2, unfilteredRowNum, - false); + int unfilteredRowNum = + (startRowNum - stopRowNum) / VALUESIZE + (stopRowNum / VALUESIZE == valueNum ? 0 : 1); + verifyCountAndOrder(scanner, unfilteredRowNum * 2 * 2, unfilteredRowNum, false); // Case9: Case7 + PageFilter int pageSize = 10; @@ -403,10 +388,10 @@ public void testReversibleRegionScanner() throws IOException { verifyCountAndOrder(scanner, expectedRowNum * 2 * 2, expectedRowNum, false); // Case10: Case7 + FilterList+MUST_PASS_ONE - SingleColumnValueFilter scvFilter1 = new SingleColumnValueFilter( - FAMILYNAME, specifiedQualifiers[0], CompareOperator.EQUAL, VALUES[0]); - SingleColumnValueFilter scvFilter2 = new SingleColumnValueFilter( - FAMILYNAME, specifiedQualifiers[0], CompareOperator.EQUAL, VALUES[1]); + SingleColumnValueFilter scvFilter1 = new SingleColumnValueFilter(FAMILYNAME, + specifiedQualifiers[0], CompareOperator.EQUAL, VALUES[0]); + SingleColumnValueFilter scvFilter2 = new SingleColumnValueFilter(FAMILYNAME, + specifiedQualifiers[0], CompareOperator.EQUAL, VALUES[1]); expectedRowNum = 0; for (int i = startRowNum; i > stopRowNum; i--) { if (i % VALUESIZE == 0 || i % VALUESIZE == 1) { @@ -438,9 +423,8 @@ private StoreScanner getReversibleStoreScanner(MemStore memstore, HStoreFile sf1 return storeScanner; } - private void verifyCountAndOrder(InternalScanner scanner, - int expectedKVCount, int expectedRowCount, boolean forward) - throws IOException { + private void verifyCountAndOrder(InternalScanner scanner, int expectedKVCount, + int expectedRowCount, boolean forward) throws IOException { List kvList = new ArrayList<>(); Result lastResult = null; int rowCount = 0; @@ -452,9 +436,8 @@ private void verifyCountAndOrder(InternalScanner scanner, kvCount += kvList.size(); if (lastResult != null) { Result curResult = Result.create(kvList); - assertEquals("LastResult:" + lastResult + "CurResult:" + curResult, - forward, - Bytes.compareTo(curResult.getRow(), lastResult.getRow()) > 0); + assertEquals("LastResult:" + lastResult + "CurResult:" + curResult, forward, + Bytes.compareTo(curResult.getRow(), lastResult.getRow()) > 0); } lastResult = Result.create(kvList); kvList.clear(); @@ -471,8 +454,8 @@ private void verifyCountAndOrder(InternalScanner scanner, assertEquals(expectedRowCount, rowCount); } - private void internalTestSeekAndNextForReversibleKeyValueHeap( - ReversedKeyValueHeap kvHeap, int startRowNum) throws IOException { + private void internalTestSeekAndNextForReversibleKeyValueHeap(ReversedKeyValueHeap kvHeap, + int startRowNum) throws IOException { // Test next and seek for (int i = startRowNum; i >= 0; i--) { if (i % 2 == 1 && i - 2 >= 0) { @@ -522,8 +505,7 @@ private List getScanners(MemStore memstore, HStoreFile sf1, HSt return scanners; } - private void seekTestOfReversibleKeyValueScanner(KeyValueScanner scanner) - throws IOException { + private void seekTestOfReversibleKeyValueScanner(KeyValueScanner scanner) throws IOException { /** * Test without MVCC */ @@ -549,8 +531,7 @@ private void seekTestOfReversibleKeyValueScanner(KeyValueScanner scanner) // Test seek to previous row seekRowNum = ROWSIZE - 4; - assertTrue(scanner.seekToPreviousRow(KeyValueUtil - .createFirstOnRow(ROWS[seekRowNum]))); + assertTrue(scanner.seekToPreviousRow(KeyValueUtil.createFirstOnRow(ROWS[seekRowNum]))); expectedKey = makeKV(seekRowNum - 1, 0); assertEquals(expectedKey, scanner.peek()); @@ -560,14 +541,13 @@ private void seekTestOfReversibleKeyValueScanner(KeyValueScanner scanner) } - private void seekTestOfReversibleKeyValueScannerWithMVCC( - List scanners, int readPoint) throws IOException { - /** - * Test with MVCC - */ + private void seekTestOfReversibleKeyValueScannerWithMVCC(List scanners, + int readPoint) throws IOException { + /** + * Test with MVCC + */ // Test seek to last row - KeyValue expectedKey = getNextReadableKeyValueWithBackwardScan( - ROWSIZE - 1, 0, readPoint); + KeyValue expectedKey = getNextReadableKeyValueWithBackwardScan(ROWSIZE - 1, 0, readPoint); boolean res = false; for (KeyValueScanner scanner : scanners) { res |= scanner.seekToLastRow(); @@ -579,10 +559,9 @@ private void seekTestOfReversibleKeyValueScannerWithMVCC( } assertTrue(res); - // Test backward seek in two cases - // Case1: seek in the same row in backwardSeek - expectedKey = getNextReadableKeyValueWithBackwardScan(ROWSIZE - 2, - QUALSIZE - 2, readPoint); + // Test backward seek in two cases + // Case1: seek in the same row in backwardSeek + expectedKey = getNextReadableKeyValueWithBackwardScan(ROWSIZE - 2, QUALSIZE - 2, readPoint); res = false; for (KeyValueScanner scanner : scanners) { res |= scanner.backwardSeek(expectedKey); @@ -594,7 +573,7 @@ private void seekTestOfReversibleKeyValueScannerWithMVCC( } assertTrue(res); - // Case2: seek to the previous row in backwardSeek + // Case2: seek to the previous row in backwardSeek int seekRowNum = ROWSIZE - 3; res = false; for (KeyValueScanner scanner : scanners) { @@ -606,10 +585,9 @@ private void seekTestOfReversibleKeyValueScannerWithMVCC( } assertTrue(res); - // Test seek to previous row - seekRowNum = ROWSIZE - 4; - expectedKey = getNextReadableKeyValueWithBackwardScan(seekRowNum - 1, 0, - readPoint); + // Test seek to previous row + seekRowNum = ROWSIZE - 4; + expectedKey = getNextReadableKeyValueWithBackwardScan(seekRowNum - 1, 0, readPoint); res = false; for (KeyValueScanner scanner : scanners) { res |= scanner.seekToPreviousRow(KeyValueUtil.createFirstOnRow(ROWS[seekRowNum])); @@ -622,17 +600,16 @@ private void seekTestOfReversibleKeyValueScannerWithMVCC( assertTrue(res); } - private KeyValue getNextReadableKeyValueWithBackwardScan(int startRowNum, - int startQualNum, int readPoint) { - Pair nextReadableNum = getNextReadableNumWithBackwardScan( - startRowNum, startQualNum, readPoint); - if (nextReadableNum == null) - return null; + private KeyValue getNextReadableKeyValueWithBackwardScan(int startRowNum, int startQualNum, + int readPoint) { + Pair nextReadableNum = + getNextReadableNumWithBackwardScan(startRowNum, startQualNum, readPoint); + if (nextReadableNum == null) return null; return makeKV(nextReadableNum.getFirst(), nextReadableNum.getSecond()); } - private Pair getNextReadableNumWithBackwardScan( - int startRowNum, int startQualNum, int readPoint) { + private Pair getNextReadableNumWithBackwardScan(int startRowNum, + int startQualNum, int readPoint) { Pair nextReadableNum = null; boolean findExpected = false; for (int i = startRowNum; i >= 0; i--) { @@ -643,14 +620,12 @@ private Pair getNextReadableNumWithBackwardScan( break; } } - if (findExpected) - break; + if (findExpected) break; } return nextReadableNum; } - private static void loadDataToRegion(HRegion region, byte[] additionalFamily) - throws IOException { + private static void loadDataToRegion(HRegion region, byte[] additionalFamily) throws IOException { for (int i = 0; i < ROWSIZE; i++) { Put put = new Put(ROWS[i]); for (int j = 0; j < QUALSIZE; j++) { @@ -665,8 +640,8 @@ private static void loadDataToRegion(HRegion region, byte[] additionalFamily) } } - private static void writeMemstoreAndStoreFiles(MemStore memstore, - final StoreFileWriter[] writers) throws IOException { + private static void writeMemstoreAndStoreFiles(MemStore memstore, final StoreFileWriter[] writers) + throws IOException { try { for (int i = 0; i < ROWSIZE; i++) { for (int j = 0; j < QUALSIZE; j++) { @@ -684,8 +659,7 @@ private static void writeMemstoreAndStoreFiles(MemStore memstore, } } - private static void writeStoreFile(final StoreFileWriter writer) - throws IOException { + private static void writeStoreFile(final StoreFileWriter writer) throws IOException { try { for (int i = 0; i < ROWSIZE; i++) { for (int j = 0; j < QUALSIZE; j++) { @@ -722,8 +696,8 @@ private static KeyValue makeKV(int rowNum, int cqNum) { } private static KeyValue makeKV(int rowNum, int cqNum, byte[] familyName) { - KeyValue kv = new KeyValue(ROWS[rowNum], familyName, QUALS[cqNum], TS, - VALUES[rowNum % VALUESIZE]); + KeyValue kv = + new KeyValue(ROWS[rowNum], familyName, QUALS[cqNum], TS, VALUES[rowNum % VALUESIZE]); kv.setSequenceId(makeMVCC(rowNum, cqNum)); return kv; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java index b6a4ceb8d1e1..c0afbf36207f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,7 +62,7 @@ /** * Test TestRowPrefixBloomFilter */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestRowPrefixBloomFilter { @ClassRule @@ -99,8 +99,7 @@ public void setUp() throws Exception { conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true); conf.setInt(BloomFilterUtil.PREFIX_LENGTH_KEY, prefixLength); - localfs = - (conf.get("fs.defaultFS", "file:///").compareTo("file:///") == 0); + localfs = (conf.get("fs.defaultFS", "file:///").compareTo("file:///") == 0); if (fs == null) { fs = FileSystem.get(conf); @@ -138,38 +137,29 @@ private static StoreFileScanner getStoreFileScanner(StoreFileReader reader) { } private void writeStoreFile(final Path f, BloomType bt, int expKeys) throws IOException { - HFileContext meta = new HFileContextBuilder() - .withBlockSize(BLOCKSIZE_SMALL) - .withChecksumType(CKTYPE) - .withBytesPerCheckSum(CKBYTES) - .build(); + HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL) + .withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. - StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withFilePath(f) - .withBloomType(bt) - .withMaxKeyCount(expKeys) - .withFileContext(meta) - .build(); + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(f) + .withBloomType(bt).withMaxKeyCount(expKeys).withFileContext(meta).build(); long now = EnvironmentEdgeManager.currentTime(); try { - //Put with valid row style + // Put with valid row style for (int i = 0; i < prefixRowCount; i += 2) { // prefix rows String prefixRow = String.format(prefixFormatter, i); - for (int j = 0; j < suffixRowCount; j++) { // suffix rows + for (int j = 0; j < suffixRowCount; j++) { // suffix rows String row = generateRowWithSuffix(prefixRow, j); - KeyValue kv = - new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("col"), now, - Bytes.toBytes("value")); + KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), + Bytes.toBytes("col"), now, Bytes.toBytes("value")); writer.append(kv); } } - //Put with invalid row style + // Put with invalid row style for (int i = prefixRowCount; i < prefixRowCount * 2; i += 2) { // prefix rows String row = String.format(invalidFormatter, i); - KeyValue kv = - new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("col"), now, - Bytes.toBytes("value")); + KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), + Bytes.toBytes("col"), now, Bytes.toBytes("value")); writer.append(kv); } } finally { @@ -202,7 +192,7 @@ public void testRowPrefixBloomFilter() throws Exception { reader.loadFileInfo(); reader.loadBloomfilter(); - //check basic param + // check basic param assertEquals(bt, reader.getBloomFilterType()); assertEquals(prefixLength, reader.getPrefixLength()); assertEquals(expKeys, reader.getGeneralBloomFilter().getKeyCount()); @@ -214,7 +204,7 @@ public void testRowPrefixBloomFilter() throws Exception { int falseNeg = 0; for (int i = 0; i < prefixRowCount; i++) { // prefix rows String prefixRow = String.format(prefixFormatter, i); - for (int j = 0; j < suffixRowCount; j++) { // suffix rows + for (int j = 0; j < suffixRowCount; j++) { // suffix rows String startRow = generateRowWithSuffix(prefixRow, j); String stopRow = generateRowWithSuffix(prefixRow, j + 1); Scan scan = @@ -252,9 +242,9 @@ public void testRowPrefixBloomFilter() throws Exception { fs.delete(f, true); assertEquals("False negatives: " + falseNeg, 0, falseNeg); int maxFalsePos = (int) (2 * expErr); - assertTrue( - "Too many false positives: " + falsePos + " (err=" + err + ", expected no more than " + - maxFalsePos + ")", falsePos <= maxFalsePos); + assertTrue("Too many false positives: " + falsePos + " (err=" + err + ", expected no more than " + + maxFalsePos + ")", + falsePos <= maxFalsePos); } @Test @@ -277,8 +267,8 @@ public void testRowPrefixBloomFilterWithGet() throws Exception { HStore store = mock(HStore.class); when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of("family")); - //Get with valid row style - //prefix row in bloom + // Get with valid row style + // prefix row in bloom String prefixRow = String.format(prefixFormatter, prefixRowCount - 2); String row = generateRowWithSuffix(prefixRow, 0); Scan scan = new Scan(new Get(Bytes.toBytes(row))); @@ -330,9 +320,9 @@ public void testRowPrefixBloomFilterWithScan() throws Exception { HStore store = mock(HStore.class); when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of("family")); - //Scan with valid row style. startRow and stopRow have a common prefix. - //And the length of the common prefix is no less than prefixLength. - //prefix row in bloom + // Scan with valid row style. startRow and stopRow have a common prefix. + // And the length of the common prefix is no less than prefixLength. + // prefix row in bloom String prefixRow = String.format(prefixFormatter, prefixRowCount - 2); String startRow = generateRowWithSuffix(prefixRow, 0); String stopRow = generateRowWithSuffix(prefixRow, 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java index 0f07959ae00a..7c9499b0c17f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,10 +41,10 @@ import org.junit.experimental.categories.Category; /** - * Test case to check HRS throws {@link org.apache.hadoop.hbase.client.RowTooBigException} - * when row size exceeds configured limits. + * Test case to check HRS throws {@link org.apache.hadoop.hbase.client.RowTooBigException} when row + * size exceeds configured limits. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestRowTooBig { @ClassRule @@ -54,13 +54,12 @@ public class TestRowTooBig { private final static HBaseTestingUtil HTU = new HBaseTestingUtil(); private static Path rootRegionDir; private static final TableDescriptor TEST_TD = TableDescriptorBuilder - .newBuilder(TableName.valueOf(TestRowTooBig.class.getSimpleName())).build(); + .newBuilder(TableName.valueOf(TestRowTooBig.class.getSimpleName())).build(); @BeforeClass public static void before() throws Exception { HTU.startMiniCluster(); - HTU.getConfiguration().setLong(HConstants.TABLE_MAX_ROWSIZE_KEY, - 10 * 1024 * 1024L); + HTU.getConfiguration().setLong(HConstants.TABLE_MAX_ROWSIZE_KEY, 10 * 1024 * 1024L); rootRegionDir = HTU.getDataTestDirOnTestFS("TestRowTooBig"); } @@ -70,14 +69,10 @@ public static void after() throws Exception { } /** - * Usecase: - * - create a row with 5 large cells (5 Mb each) - * - flush memstore but don't compact storefiles. - * - try to Get whole row. - * - * OOME happened before we actually get to reading results, but - * during seeking, as each StoreFile gets it's own scanner, - * and each scanner seeks after the first KV. + * Usecase: - create a row with 5 large cells (5 Mb each) - flush memstore but don't compact + * storefiles. - try to Get whole row. OOME happened before we actually get to reading results, + * but during seeking, as each StoreFile gets it's own scanner, and each scanner seeks after the + * first KV. */ @Test(expected = RowTooBigException.class) public void testScannersSeekOnFewLargeCells() throws IOException { @@ -85,14 +80,14 @@ public void testScannersSeekOnFewLargeCells() throws IOException { byte[] fam1 = Bytes.toBytes("fam1"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TD) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); final RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); - HRegion region = HBaseTestingUtil.createRegionAndWAL(hri, rootRegionDir, - HTU.getConfiguration(), tableDescriptor); + HRegion region = HBaseTestingUtil.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(), + tableDescriptor); try { // Add 5 cells to memstore - for (int i = 0; i < 5 ; i++) { + for (int i = 0; i < 5; i++) { Put put = new Put(row1); byte[] value = new byte[5 * 1024 * 1024]; @@ -109,14 +104,8 @@ public void testScannersSeekOnFewLargeCells() throws IOException { } /** - * Usecase: - * - * - create a row with 1M cells, 10 bytes in each - * - flush & run major compaction - * - try to Get whole row. - * - * OOME happened in StoreScanner.next(..). - * + * Usecase: - create a row with 1M cells, 10 bytes in each - flush & run major compaction - try to + * Get whole row. OOME happened in StoreScanner.next(..). * @throws IOException */ @Test(expected = RowTooBigException.class) @@ -125,11 +114,11 @@ public void testScanAcrossManySmallColumns() throws IOException { byte[] fam1 = Bytes.toBytes("fam1"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TD) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); final RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); - HRegion region = HBaseTestingUtil.createRegionAndWAL(hri, rootRegionDir, - HTU.getConfiguration(), tableDescriptor); + HRegion region = HBaseTestingUtil.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(), + tableDescriptor); try { // Add to memstore for (int i = 0; i < 10; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java index 879d459822fe..34788519e813 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +36,8 @@ import org.junit.rules.TestName; /** - * A silly test that does nothing but make sure an rpcscheduler factory makes what it says - * it is going to make. + * A silly test that does nothing but make sure an rpcscheduler factory makes what it says it is + * going to make. */ @Category(SmallTests.class) public class TestRpcSchedulerFactory { @@ -46,7 +46,8 @@ public class TestRpcSchedulerFactory { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRpcSchedulerFactory.class); - @Rule public TestName testName = new TestName(); + @Rule + public TestName testName = new TestName(); private Configuration conf; @Before diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java index 2688cbfc54a5..a986e579ac7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,10 +52,10 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) /* - * This test verifies that the scenarios illustrated by HBASE-10850 work - * w.r.t. essential column family optimization + * This test verifies that the scenarios illustrated by HBASE-10850 work w.r.t. essential column + * family optimization */ public class TestSCVFWithMiniCluster { @@ -124,7 +124,7 @@ public static void setUp() throws Exception { * 'false'. Only row with key '1' should be returned in the scan. */ scanFilter = new SingleColumnValueFilter(FAMILY_A, QUALIFIER_FOO, CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("false"))); + new BinaryComparator(Bytes.toBytes("false"))); ((SingleColumnValueFilter) scanFilter).setFilterIfMissing(true); } @@ -149,6 +149,7 @@ private void verify(Scan scan) throws IOException { } assertEquals(expected, count); } + /** * Test the filter by adding all columns of family A in the scan. (OK) */ @@ -222,11 +223,11 @@ public void scanWithSpecificQualifiers3() throws IOException { } private static void create(Admin admin, TableName tableName, byte[]... families) - throws IOException { + throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (byte[] family : families) { ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(family) - .setMaxVersions(1).setCompressionType(Algorithm.GZ).build(); + .setMaxVersions(1).setCompressionType(Algorithm.GZ).build(); builder.setColumnFamily(familyDescriptor); } try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java index 31637f4e6ec3..97a81be10a09 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,19 +59,18 @@ import org.slf4j.LoggerFactory; /** - * Test a multi-column scanner when there is a Bloom filter false-positive. - * This is needed for the multi-column Bloom filter optimization. + * Test a multi-column scanner when there is a Bloom filter false-positive. This is needed for the + * multi-column Bloom filter optimization. */ @RunWith(Parameterized.class) -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestScanWithBloomError { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestScanWithBloomError.class); - private static final Logger LOG = - LoggerFactory.getLogger(TestScanWithBloomError.class); + private static final Logger LOG = LoggerFactory.getLogger(TestScanWithBloomError.class); private static final String TABLE_NAME = "ScanWithBloomError"; private static final String FAMILY = "myCF"; @@ -101,7 +100,7 @@ public TestScanWithBloomError(BloomType bloomType) { } @Before - public void setUp() throws IOException{ + public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); fs = FileSystem.get(conf); conf.setInt(BloomFilterUtil.PREFIX_LENGTH_KEY, 10); @@ -109,34 +108,29 @@ public void setUp() throws IOException{ @Test public void testThreeStoreFiles() throws IOException { - ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(FAMILY)) - .setCompressionType(Compression.Algorithm.GZ) - .setBloomFilterType(bloomType) - .setMaxVersions(TestMultiColumnScanner.MAX_VERSIONS).build(); + ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder + .newBuilder(Bytes.toBytes(FAMILY)).setCompressionType(Compression.Algorithm.GZ) + .setBloomFilterType(bloomType).setMaxVersions(TestMultiColumnScanner.MAX_VERSIONS).build(); region = TEST_UTIL.createTestRegion(TABLE_NAME, columnFamilyDescriptor); - createStoreFile(new int[] {1, 2, 6}); - createStoreFile(new int[] {1, 2, 3, 7}); - createStoreFile(new int[] {1, 9}); - scanColSet(new int[]{1, 4, 6, 7}, new int[]{1, 6, 7}); + createStoreFile(new int[] { 1, 2, 6 }); + createStoreFile(new int[] { 1, 2, 3, 7 }); + createStoreFile(new int[] { 1, 9 }); + scanColSet(new int[] { 1, 4, 6, 7 }, new int[] { 1, 6, 7 }); HBaseTestingUtil.closeRegionAndWAL(region); } - private void scanColSet(int[] colSet, int[] expectedResultCols) - throws IOException { + private void scanColSet(int[] colSet, int[] expectedResultCols) throws IOException { LOG.info("Scanning column set: " + Arrays.toString(colSet)); Scan scan = new Scan().withStartRow(ROW_BYTES).withStopRow(ROW_BYTES, true); addColumnSetToScan(scan, colSet); RegionScannerImpl scanner = region.getScanner(scan); KeyValueHeap storeHeap = scanner.storeHeap; assertEquals(0, storeHeap.getHeap().size()); - StoreScanner storeScanner = - (StoreScanner) storeHeap.getCurrentForTesting(); + StoreScanner storeScanner = (StoreScanner) storeHeap.getCurrentForTesting(); @SuppressWarnings({ "unchecked", "rawtypes" }) - List scanners = (List) - (List) storeScanner.getAllScannersForTesting(); + List scanners = + (List) (List) storeScanner.getAllScannersForTesting(); // Sort scanners by their HFile's modification time. Collections.sort(scanners, new Comparator() { @@ -159,13 +153,12 @@ public int compare(StoreFileScanner s1, StoreFileScanner s2) { for (StoreFileScanner sfScanner : scanners) lastStoreFileReader = sfScanner.getReader(); - new HFilePrettyPrinter(conf).run(new String[]{ "-m", "-p", "-f", - lastStoreFileReader.getHFileReader().getPath().toString()}); + new HFilePrettyPrinter(conf).run( + new String[] { "-m", "-p", "-f", lastStoreFileReader.getHFileReader().getPath().toString() }); // Disable Bloom filter for the last store file. The disabled Bloom filter // will always return "true". - LOG.info("Disabling Bloom filter for: " - + lastStoreFileReader.getHFileReader().getName()); + LOG.info("Disabling Bloom filter for: " + lastStoreFileReader.getHFileReader().getName()); lastStoreFileReader.disableBloomFilterForTesting(); List allResults = new ArrayList<>(); @@ -182,22 +175,19 @@ public int compare(StoreFileScanner s1, StoreFileScanner s2) { for (Cell kv : allResults) { String qual = Bytes.toString(CellUtil.cloneQualifier(kv)); assertTrue(qual.startsWith(QUALIFIER_PREFIX)); - actualIds.add(Integer.valueOf(qual.substring( - QUALIFIER_PREFIX.length()))); + actualIds.add(Integer.valueOf(qual.substring(QUALIFIER_PREFIX.length()))); } List expectedIds = new ArrayList<>(); for (int expectedId : expectedResultCols) expectedIds.add(expectedId); - LOG.info("Column ids returned: " + actualIds + ", expected: " - + expectedIds); + LOG.info("Column ids returned: " + actualIds + ", expected: " + expectedIds); assertEquals(expectedIds.toString(), actualIds.toString()); } private void addColumnSetToScan(Scan scan, int[] colIds) { for (int colId : colIds) { - scan.addColumn(FAMILY_BYTES, - Bytes.toBytes(qualFromId(colId))); + scan.addColumn(FAMILY_BYTES, Bytes.toBytes(qualFromId(colId))); } } @@ -205,21 +195,18 @@ private String qualFromId(int colId) { return QUALIFIER_PREFIX + colId; } - private void createStoreFile(int[] colIds) - throws IOException { + private void createStoreFile(int[] colIds) throws IOException { Put p = new Put(ROW_BYTES); for (int colId : colIds) { long ts = Long.MAX_VALUE; String qual = qualFromId(colId); allColIds.add(colId); - KeyValue kv = KeyValueTestUtil.create(ROW, FAMILY, - qual, ts, TestMultiColumnScanner.createValue(ROW, qual, ts)); + KeyValue kv = KeyValueTestUtil.create(ROW, FAMILY, qual, ts, + TestMultiColumnScanner.createValue(ROW, qual, ts)); p.add(kv); } region.put(p); region.flush(true); } - } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index 029aa56bcbb9..c66bd5d0a799 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,36 +68,37 @@ /** * Test of a long-lived scanner validating as we go. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestScanner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestScanner.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final Logger LOG = LoggerFactory.getLogger(TestScanner.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final byte [] FIRST_ROW = HConstants.EMPTY_START_ROW; - private static final byte [][] COLS = { HConstants.CATALOG_FAMILY }; - private static final byte [][] EXPLICIT_COLS = { - HConstants.REGIONINFO_QUALIFIER, HConstants.SERVER_QUALIFIER, + private static final byte[] FIRST_ROW = HConstants.EMPTY_START_ROW; + private static final byte[][] COLS = { HConstants.CATALOG_FAMILY }; + private static final byte[][] EXPLICIT_COLS = + { HConstants.REGIONINFO_QUALIFIER, HConstants.SERVER_QUALIFIER, // TODO ryan - //HConstants.STARTCODE_QUALIFIER - }; + // HConstants.STARTCODE_QUALIFIER + }; - static final TableDescriptor TESTTABLEDESC = - TableDescriptorBuilder.newBuilder(TableName.valueOf("testscanner")) + static final TableDescriptor TESTTABLEDESC = TableDescriptorBuilder + .newBuilder(TableName.valueOf("testscanner")) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY) - // Ten is an arbitrary number. Keep versions to help debugging. - .setMaxVersions(10).setBlockCacheEnabled(false).setBlocksize(8 * 1024).build()) + // Ten is an arbitrary number. Keep versions to help debugging. + .setMaxVersions(10).setBlockCacheEnabled(false).setBlocksize(8 * 1024).build()) .build(); /** HRegionInfo for root region */ public static final RegionInfo REGION_INFO = - RegionInfoBuilder.newBuilder(TESTTABLEDESC.getTableName()).build(); + RegionInfoBuilder.newBuilder(TESTTABLEDESC.getTableName()).build(); private static final byte[] ROW_KEY = REGION_INFO.getRegionName(); @@ -126,15 +127,14 @@ public TestScanner() { */ @Test public void testStopRow() throws Exception { - byte [] startrow = Bytes.toBytes("bbb"); - byte [] stoprow = Bytes.toBytes("ccc"); + byte[] startrow = Bytes.toBytes("bbb"); + byte[] stoprow = Bytes.toBytes("ccc"); try { this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null); HTestConst.addContent(this.region, HConstants.CATALOG_FAMILY); List results = new ArrayList<>(); // Do simple test of getting one row only first. - Scan scan = new Scan().withStartRow(Bytes.toBytes("abc")) - .withStopRow(Bytes.toBytes("abd")); + Scan scan = new Scan().withStartRow(Bytes.toBytes("abc")).withStopRow(Bytes.toBytes("abd")); scan.addFamily(HConstants.CATALOG_FAMILY); InternalScanner s = region.getScanner(scan); @@ -155,7 +155,7 @@ public void testStopRow() throws Exception { for (boolean first = true; s.next(results);) { kv = results.get(0); if (first) { - assertTrue(CellUtil.matchingRows(kv, startrow)); + assertTrue(CellUtil.matchingRows(kv, startrow)); first = false; } count++; @@ -177,8 +177,8 @@ void rowPrefixFilter(Scan scan) throws IOException { while (hasMore) { hasMore = s.next(results); for (Cell kv : results) { - assertEquals((byte)'a', CellUtil.cloneRow(kv)[0]); - assertEquals((byte)'b', CellUtil.cloneRow(kv)[1]); + assertEquals((byte) 'a', CellUtil.cloneRow(kv)[0]); + assertEquals((byte) 'b', CellUtil.cloneRow(kv)[1]); } results.clear(); } @@ -205,7 +205,7 @@ public void testFilters() throws IOException { try { this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null); HTestConst.addContent(this.region, HConstants.CATALOG_FAMILY); - byte [] prefix = Bytes.toBytes("ab"); + byte[] prefix = Bytes.toBytes("ab"); Filter newFilter = new PrefixFilter(prefix); Scan scan = new Scan(); scan.setFilter(newFilter); @@ -223,8 +223,8 @@ public void testFilters() throws IOException { } /** - * Test that closing a scanner while a client is using it doesn't throw - * NPEs but instead a UnknownScannerException. HBASE-2503 + * Test that closing a scanner while a client is using it doesn't throw NPEs but instead a + * UnknownScannerException. HBASE-2503 */ @Test public void testRaceBetweenClientAndTimeout() throws Exception { @@ -248,7 +248,8 @@ public void testRaceBetweenClientAndTimeout() throws Exception { } } - /** The test! + /** + * The test! */ @Test public void testScanner() throws IOException { @@ -261,7 +262,7 @@ public void testScanner() throws IOException { Put put = new Put(ROW_KEY, EnvironmentEdgeManager.currentTime()); put.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - RegionInfo.toByteArray(REGION_INFO)); + RegionInfo.toByteArray(REGION_INFO)); table.put(put); // What we just committed is in the memstore. Verify that we can get @@ -272,7 +273,7 @@ public void testScanner() throws IOException { // Close and re-open - ((HRegion)region).close(); + ((HRegion) region).close(); region = HRegion.openHRegion(region, null); table = new RegionAsTable(region); @@ -286,10 +287,9 @@ public void testScanner() throws IOException { String address = HConstants.LOCALHOST_IP + ":" + HBaseTestingUtil.randomFreePort(); put = new Put(ROW_KEY, EnvironmentEdgeManager.currentTime()); - put.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(address)); + put.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(address)); -// put.add(HConstants.COL_STARTCODE, Bytes.toBytes(START_CODE)); + // put.add(HConstants.COL_STARTCODE, Bytes.toBytes(START_CODE)); table.put(put); @@ -309,8 +309,8 @@ public void testScanner() throws IOException { // Close and reopen - ((HRegion)region).close(); - region = HRegion.openHRegion(region,null); + ((HRegion) region).close(); + region = HRegion.openHRegion(region, null); table = new RegionAsTable(region); // Validate again @@ -343,7 +343,7 @@ public void testScanner() throws IOException { // Close and reopen - ((HRegion)this.region).close(); + ((HRegion) this.region).close(); this.region = HRegion.openHRegion(region, null); table = new RegionAsTable(this.region); @@ -359,52 +359,50 @@ public void testScanner() throws IOException { } /** Compare the HRegionInfo we read from HBase to what we stored */ - private void validateRegionInfo(byte [] regionBytes) throws IOException { + private void validateRegionInfo(byte[] regionBytes) throws IOException { RegionInfo info = RegionInfo.parseFromOrNull(regionBytes); assertEquals(REGION_INFO.getRegionId(), info.getRegionId()); assertEquals(0, info.getStartKey().length); assertEquals(0, info.getEndKey().length); assertEquals(0, Bytes.compareTo(info.getRegionName(), REGION_INFO.getRegionName())); - //assertEquals(0, info.getTableDesc().compareTo(REGION_INFO.getTableDesc())); + // assertEquals(0, info.getTableDesc().compareTo(REGION_INFO.getTableDesc())); } /** Use a scanner to get the region info and then validate the results */ - private void scan(boolean validateStartcode, String serverName) - throws IOException { + private void scan(boolean validateStartcode, String serverName) throws IOException { InternalScanner scanner = null; Scan scan = null; List results = new ArrayList<>(); - byte [][][] scanColumns = {COLS, EXPLICIT_COLS}; - for(int i = 0; i < scanColumns.length; i++) { + byte[][][] scanColumns = { COLS, EXPLICIT_COLS }; + for (int i = 0; i < scanColumns.length; i++) { try { scan = new Scan().withStartRow(FIRST_ROW); for (int ii = 0; ii < EXPLICIT_COLS.length; ii++) { - scan.addColumn(COLS[0], EXPLICIT_COLS[ii]); + scan.addColumn(COLS[0], EXPLICIT_COLS[ii]); } scanner = region.getScanner(scan); while (scanner.next(results)) { - assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER)); - byte [] val = CellUtil.cloneValue(getColumn(results, HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER)); + assertTrue( + hasColumn(results, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); + byte[] val = CellUtil.cloneValue( + getColumn(results, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); validateRegionInfo(val); - if(validateStartcode) { -// assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY, -// HConstants.STARTCODE_QUALIFIER)); -// val = getColumn(results, HConstants.CATALOG_FAMILY, -// HConstants.STARTCODE_QUALIFIER).getValue(); + if (validateStartcode) { + // assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY, + // HConstants.STARTCODE_QUALIFIER)); + // val = getColumn(results, HConstants.CATALOG_FAMILY, + // HConstants.STARTCODE_QUALIFIER).getValue(); assertNotNull(val); assertFalse(val.length == 0); long startCode = Bytes.toLong(val); assertEquals(START_CODE, startCode); } - if(serverName != null) { - assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER)); - val = CellUtil.cloneValue(getColumn(results, HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER)); + if (serverName != null) { + assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER)); + val = CellUtil.cloneValue( + getColumn(results, HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER)); assertNotNull(val); assertFalse(val.length == 0); String server = Bytes.toString(val); @@ -414,16 +412,15 @@ private void scan(boolean validateStartcode, String serverName) } finally { InternalScanner s = scanner; scanner = null; - if(s != null) { + if (s != null) { s.close(); } } } } - private boolean hasColumn(final List kvs, final byte [] family, - final byte [] qualifier) { - for (Cell kv: kvs) { + private boolean hasColumn(final List kvs, final byte[] family, final byte[] qualifier) { + for (Cell kv : kvs) { if (CellUtil.matchingFamily(kv, family) && CellUtil.matchingQualifier(kv, qualifier)) { return true; } @@ -431,9 +428,8 @@ private boolean hasColumn(final List kvs, final byte [] family, return false; } - private Cell getColumn(final List kvs, final byte [] family, - final byte [] qualifier) { - for (Cell kv: kvs) { + private Cell getColumn(final List kvs, final byte[] family, final byte[] qualifier) { + for (Cell kv : kvs) { if (CellUtil.matchingFamily(kv, family) && CellUtil.matchingQualifier(kv, qualifier)) { return kv; } @@ -441,29 +437,26 @@ private Cell getColumn(final List kvs, final byte [] family, return null; } - /** Use get to retrieve the HRegionInfo and validate it */ private void getRegionInfo(Table table) throws IOException { Get get = new Get(ROW_KEY); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); Result result = table.get(get); - byte [] bytes = result.value(); + byte[] bytes = result.value(); validateRegionInfo(bytes); } /** - * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner - * update readers code essentially. This is not highly concurrent, since its all 1 thread. - * HBase-910. + * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner update + * readers code essentially. This is not highly concurrent, since its all 1 thread. HBase-910. */ @Test public void testScanAndSyncFlush() throws Exception { this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null); Table hri = new RegionAsTable(region); try { - LOG.info("Added: " + - HTestConst.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY), - Bytes.toString(HConstants.REGIONINFO_QUALIFIER))); + LOG.info("Added: " + HTestConst.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY), + Bytes.toString(HConstants.REGIONINFO_QUALIFIER))); int count = count(hri, -1, false); assertEquals(count, count(hri, 100, false)); // do a sync flush. } catch (Exception e) { @@ -475,17 +468,16 @@ public void testScanAndSyncFlush() throws Exception { } /** - * Tests to do a concurrent flush (using a 2nd thread) while scanning. This tests both - * the StoreScanner update readers and the transition from memstore -> snapshot -> store file. + * Tests to do a concurrent flush (using a 2nd thread) while scanning. This tests both the + * StoreScanner update readers and the transition from memstore -> snapshot -> store file. */ @Test public void testScanAndRealConcurrentFlush() throws Exception { this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null); Table hri = new RegionAsTable(region); try { - LOG.info("Added: " + - HTestConst.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY), - Bytes.toString(HConstants.REGIONINFO_QUALIFIER))); + LOG.info("Added: " + HTestConst.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY), + Bytes.toString(HConstants.REGIONINFO_QUALIFIER))); int count = count(hri, -1, false); assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush } catch (Exception e) { @@ -497,8 +489,7 @@ public void testScanAndRealConcurrentFlush() throws Exception { } /** - * Make sure scanner returns correct result when we run a major compaction - * with deletes. + * Make sure scanner returns correct result when we run a major compaction with deletes. */ @Test public void testScanAndConcurrentMajorCompact() throws Exception { @@ -509,10 +500,10 @@ public void testScanAndConcurrentMajorCompact() throws Exception { Table hri = new RegionAsTable(region); try { - HTestConst.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1), - firstRowBytes, secondRowBytes); - HTestConst.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1), - firstRowBytes, secondRowBytes); + HTestConst.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes, + secondRowBytes); + HTestConst.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes, + secondRowBytes); Delete dc = new Delete(firstRowBytes); /* delete column1 of firstRow */ @@ -520,10 +511,10 @@ public void testScanAndConcurrentMajorCompact() throws Exception { region.delete(dc); region.flush(true); - HTestConst.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1), - secondRowBytes, thirdRowBytes); - HTestConst.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1), - secondRowBytes, thirdRowBytes); + HTestConst.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1), secondRowBytes, + thirdRowBytes); + HTestConst.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1), secondRowBytes, + thirdRowBytes); region.flush(true); InternalScanner s = region.getScanner(new Scan()); @@ -534,8 +525,7 @@ public void testScanAndConcurrentMajorCompact() throws Exception { s.next(results); // make sure returns column2 of firstRow - assertTrue("result is not correct, keyValues : " + results, - results.size() == 1); + assertTrue("result is not correct, keyValues : " + results, results.size() == 1); assertTrue(CellUtil.matchingRows(results.get(0), firstRowBytes)); assertTrue(CellUtil.matchingFamily(results.get(0), fam2)); @@ -552,7 +542,6 @@ public void testScanAndConcurrentMajorCompact() throws Exception { } } - /* * @param hri Region * @param flushIndex At what row we start the flush. @@ -564,7 +553,7 @@ private int count(final Table countTable, final int flushIndex, boolean concurre throws IOException { LOG.info("Taking out counting scan"); Scan scan = new Scan(); - for (byte [] qualifier: EXPLICIT_COLS) { + for (byte[] qualifier : EXPLICIT_COLS) { scan.addColumn(HConstants.CATALOG_FAMILY, qualifier); } ResultScanner s = countTable.getScanner(scan); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java index f678f2bcfab4..6f89654eb67e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -316,7 +316,7 @@ public Void call() throws Exception { scan.setCaching(Integer.MAX_VALUE); scan.setFilter(new SparseCellFilter()); try (ScanPerNextResultScanner scanner = - new ScanPerNextResultScanner(CONN.getTable(TABLE_NAME), scan)) { + new ScanPerNextResultScanner(CONN.getTable(TABLE_NAME), scan)) { int num = 0; while (scanner.next() != null) { num++; @@ -330,7 +330,7 @@ public Void call() throws Exception { scan.setFilter(new SparseCellFilter()); scan.setAllowPartialResults(true); try (ScanPerNextResultScanner scanner = - new ScanPerNextResultScanner(CONN.getTable(TABLE_NAME), scan)) { + new ScanPerNextResultScanner(CONN.getTable(TABLE_NAME), scan)) { int num = 0; while (scanner.next() != null) { num++; @@ -356,7 +356,7 @@ public Void call() throws Exception { scan.setCaching(Integer.MAX_VALUE); scan.setFilter(new SparseRowFilter()); try (ScanPerNextResultScanner scanner = - new ScanPerNextResultScanner(CONN.getTable(TABLE_NAME), scan)) { + new ScanPerNextResultScanner(CONN.getTable(TABLE_NAME), scan)) { int num = 0; while (scanner.next() != null) { num++; @@ -566,11 +566,9 @@ public boolean nextRaw(List outResults, ScannerContext context) throws IOE @Override protected void initializeKVHeap(List scanners, List joinedScanners, HRegion region) throws IOException { - this.storeHeap = - new HeartbeatKVHeap(scanners, region.getCellComparator()); + this.storeHeap = new HeartbeatKVHeap(scanners, region.getCellComparator()); if (!joinedScanners.isEmpty()) { - this.joinedHeap = - new HeartbeatKVHeap(joinedScanners, region.getCellComparator()); + this.joinedHeap = new HeartbeatKVHeap(joinedScanners, region.getCellComparator()); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java index 2d47ff630435..dee06e3c0821 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java @@ -44,12 +44,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestScannerRPCScanMetrics { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScannerRPCScanMetrics.class); + HBaseClassTestRule.forClass(TestScannerRPCScanMetrics.class); private static final Logger LOG = LoggerFactory.getLogger(TestScannerRPCScanMetrics.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -57,7 +57,6 @@ public class TestScannerRPCScanMetrics { private static final byte[] QUALIFIER = Bytes.toBytes("testQualifier"); private static final byte[] VALUE = Bytes.toBytes("testValue"); - @Rule public TestName name = new TestName(); @@ -78,7 +77,7 @@ public void testScannerRPCScanMetrics() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); byte[][] splits = new byte[1][]; splits[0] = Bytes.toBytes("row-4"); - Table ht = TEST_UTIL.createTable(tableName, FAMILY,splits); + Table ht = TEST_UTIL.createTable(tableName, FAMILY, splits); byte[] r0 = Bytes.toBytes("row-0"); byte[] r1 = Bytes.toBytes("row-1"); byte[] r2 = Bytes.toBytes("row-2"); @@ -110,8 +109,8 @@ public void testScannerRPCScanMetrics() throws Exception { // This scan should increment rpc full scan count by 2 (both regions - no stop/start row) scanNextIterate(ht, dummyScan); - RSRpcServices testClusterRSRPCServices = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) - .getRpcServices(); + RSRpcServices testClusterRSRPCServices = + TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRpcServices(); assertEquals(4, testClusterRSRPCServices.rpcFullScanRequestCount.intValue()); } @@ -121,10 +120,9 @@ private void putToTable(Table ht, byte[] rowkey) throws IOException { ht.put(put); } - private void scanNextIterate(Table ht, Scan scan) throws Exception{ + private void scanNextIterate(Table ht, Scan scan) throws Exception { ResultScanner scanner = ht.getScanner(scan); - for (Result result = scanner.next(); result != null; result = scanner.next()) - { + for (Result result = scanner.next(); result != null; result = scanner.next()) { // Use the result object } scanner.close(); @@ -132,7 +130,7 @@ private void scanNextIterate(Table ht, Scan scan) throws Exception{ private static class RegionServerWithScanMetrics extends MiniHBaseClusterRegionServer { public RegionServerWithScanMetrics(Configuration conf) - throws IOException, InterruptedException { + throws IOException, InterruptedException { super(conf); } @@ -140,15 +138,17 @@ protected RSRpcServices createRPCServices() throws IOException { return new RSRPCServicesWithScanMetrics(this); } } + private static class RSRPCServicesWithScanMetrics extends RSRpcServices { public long getScanRequestCount() { return super.rpcScanRequestCount.longValue(); } + public long getFullScanRequestCount() { return super.rpcFullScanRequestCount.longValue(); } - public RSRPCServicesWithScanMetrics(HRegionServer rs) - throws IOException { + + public RSRPCServicesWithScanMetrics(HRegionServer rs) throws IOException { super(rs); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java index 16fe4e30dbfb..6a1a4b174992 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestScannerRetriableFailure { @ClassRule @@ -68,7 +68,8 @@ public class TestScannerRetriableFailure { private static final String FAMILY_NAME_STR = "f"; private static final byte[] FAMILY_NAME = Bytes.toBytes(FAMILY_NAME_STR); - @Rule public TableNameTestRule testTable = new TableNameTestRule(); + @Rule + public TableNameTestRule testTable = new TableNameTestRule(); public static class FaultyScannerObserver implements RegionCoprocessor, RegionObserver { private int faults = 0; @@ -80,8 +81,8 @@ public Optional getRegionObserver() { @Override public boolean preScannerNext(final ObserverContext e, - final InternalScanner s, final List results, - final int limit, final boolean hasMore) throws IOException { + final InternalScanner s, final List results, final int limit, final boolean hasMore) + throws IOException { final TableName tableName = e.getEnvironment().getRegionInfo().getTable(); if (!tableName.isSystemTable() && (faults++ % 2) == 0) { LOG.debug(" Injecting fault in table=" + tableName + " scanner"); @@ -128,7 +129,7 @@ public void testFaultyScanner() throws Exception { } // ========================================================================== - // Helpers + // Helpers // ========================================================================== private FileSystem getFileSystem() { return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java index 7be349e88afc..eb6dbe0747a0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestScannerWithBulkload { @ClassRule @@ -76,12 +76,9 @@ public static void setUpBeforeClass() throws Exception { } private static void createTable(Admin admin, TableName tableName) throws IOException { - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tableName); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("col")) - .setMaxVersions(3).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("col")).setMaxVersions(3).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.createTable(tableDescriptorBuilder.build()); } @@ -95,8 +92,8 @@ public void testBulkLoad() throws Exception { Scan scan = createScan(); final Table table = init(admin, l, scan, tableName); // use bulkload - final Path hfilePath = writeToHFile(l, "/temp/testBulkLoad/", "/temp/testBulkLoad/col/file", - false); + final Path hfilePath = + writeToHFile(l, "/temp/testBulkLoad/", "/temp/testBulkLoad/col/file", false); Configuration conf = TEST_UTIL.getConfiguration(); conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true); BulkLoadHFiles.create(conf).bulkLoad(tableName, hfilePath); @@ -104,8 +101,8 @@ public void testBulkLoad() throws Exception { Result result = scanner.next(); result = scanAfterBulkLoad(scanner, result, "version2"); Put put0 = new Put(Bytes.toBytes("row1")); - put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes - .toBytes("version3"))); + put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, + Bytes.toBytes("version3"))); table.put(put0); admin.flush(tableName); scanner = table.getScanner(scan); @@ -113,8 +110,7 @@ public void testBulkLoad() throws Exception { while (result != null) { List cells = result.getColumnCells(Bytes.toBytes("col"), Bytes.toBytes("q")); for (Cell _c : cells) { - if (Bytes.toString(_c.getRowArray(), _c.getRowOffset(), _c.getRowLength()) - .equals("row1")) { + if (Bytes.toString(_c.getRowArray(), _c.getRowOffset(), _c.getRowLength()).equals("row1")) { System.out .println(Bytes.toString(_c.getRowArray(), _c.getRowOffset(), _c.getRowLength())); System.out.println(Bytes.toString(_c.getQualifierArray(), _c.getQualifierOffset(), @@ -136,8 +132,7 @@ private Result scanAfterBulkLoad(ResultScanner scanner, Result result, String ex while (result != null) { List cells = result.getColumnCells(Bytes.toBytes("col"), Bytes.toBytes("q")); for (Cell _c : cells) { - if (Bytes.toString(_c.getRowArray(), _c.getRowOffset(), _c.getRowLength()) - .equals("row1")) { + if (Bytes.toString(_c.getRowArray(), _c.getRowOffset(), _c.getRowLength()).equals("row1")) { System.out .println(Bytes.toString(_c.getRowArray(), _c.getRowOffset(), _c.getRowLength())); System.out.println(Bytes.toString(_c.getQualifierArray(), _c.getQualifierOffset(), @@ -183,10 +178,8 @@ private Path writeToHFile(long l, String hFilePath, String pathStr, boolean nati // Scan should only look at the seq id appended at the bulk load time, and not skip its // kv. writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(new Long(9999999))); - } - else { - writer.appendFileInfo(BULKLOAD_TIME_KEY, - Bytes.toBytes(EnvironmentEdgeManager.currentTime())); + } else { + writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime())); } writer.close(); return hfilePath; @@ -200,13 +193,13 @@ private Table init(Admin admin, long l, Scan scan, TableName tableName) throws E table.put(put0); admin.flush(tableName); Put put1 = new Put(Bytes.toBytes("row2")); - put1.add(new KeyValue(Bytes.toBytes("row2"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes - .toBytes("version0"))); + put1.add(new KeyValue(Bytes.toBytes("row2"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, + Bytes.toBytes("version0"))); table.put(put1); admin.flush(tableName); put0 = new Put(Bytes.toBytes("row1")); - put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes - .toBytes("version1"))); + put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, + Bytes.toBytes("version1"))); table.put(put0); admin.flush(tableName); admin.compact(tableName); @@ -233,7 +226,7 @@ public void testBulkLoadWithParallelScan() throws Exception { final Table table = init(admin, l, scan, tableName); // use bulkload final Path hfilePath = writeToHFile(l, "/temp/testBulkLoadWithParallelScan/", - "/temp/testBulkLoadWithParallelScan/col/file", false); + "/temp/testBulkLoadWithParallelScan/col/file", false); Configuration conf = TEST_UTIL.getConfiguration(); conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true); final BulkLoadHFiles bulkload = BulkLoadHFiles.create(conf); @@ -281,11 +274,11 @@ public void testBulkLoadNativeHFile() throws Exception { ResultScanner scanner = table.getScanner(scan); Result result = scanner.next(); // We had 'version0', 'version1' for 'row1,col:q' in the table. - // Bulk load added 'version2' scanner should be able to see 'version2' + // Bulk load added 'version2' scanner should be able to see 'version2' result = scanAfterBulkLoad(scanner, result, "version2"); Put put0 = new Put(Bytes.toBytes("row1")); - put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes - .toBytes("version3"))); + put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, + Bytes.toBytes("version3"))); table.put(put0); admin.flush(tableName); scanner = table.getScanner(scan); @@ -293,8 +286,7 @@ public void testBulkLoadNativeHFile() throws Exception { while (result != null) { List cells = result.getColumnCells(Bytes.toBytes("col"), Bytes.toBytes("q")); for (Cell _c : cells) { - if (Bytes.toString(_c.getRowArray(), _c.getRowOffset(), _c.getRowLength()) - .equals("row1")) { + if (Bytes.toString(_c.getRowArray(), _c.getRowOffset(), _c.getRowLength()).equals("row1")) { System.out .println(Bytes.toString(_c.getRowArray(), _c.getRowOffset(), _c.getRowLength())); System.out.println(Bytes.toString(_c.getQualifierArray(), _c.getQualifierOffset(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java index 1788e4720262..121f541cc4b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,11 +58,11 @@ public class TestScannerWithCorruptHFile { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestScannerWithCorruptHFile.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final byte[] FAMILY_NAME = Bytes.toBytes("f"); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @BeforeClass public static void setup() throws Exception { TEST_UTIL.startMiniCluster(1); @@ -81,7 +81,7 @@ public Optional getRegionObserver() { @Override public boolean preScannerNext(ObserverContext e, - InternalScanner s, List results, int limit, boolean hasMore) throws IOException { + InternalScanner s, List results, int limit, boolean hasMore) throws IOException { throw new CorruptHFileException("For test"); } } @@ -90,8 +90,8 @@ public boolean preScannerNext(ObserverContext e, public void testScanOnCorruptHFile() throws IOException { TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setCoprocessor(CorruptHFileCoprocessor.class.getName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME)).build(); + .setCoprocessor(CorruptHFileCoprocessor.class.getName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME)).build(); Table table = TEST_UTIL.createTable(tableDescriptor, null); try { loadTable(table, 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java index 22434a2dbe37..f72eb1a18b9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,17 +68,15 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; - @RunWith(Parameterized.class) -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestSecureBulkLoadManager { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSecureBulkLoadManager.class); - private static final Logger LOG = - LoggerFactory.getLogger(TestSecureBulkLoadManager.class); + private static final Logger LOG = LoggerFactory.getLogger(TestSecureBulkLoadManager.class); private static TableName TABLE = TableName.valueOf(Bytes.toBytes("TestSecureBulkLoadManager")); private static byte[] FAMILY = Bytes.toBytes("family"); @@ -104,7 +102,7 @@ public TestSecureBulkLoadManager(Boolean useFileBasedSFT) { @Parameterized.Parameters public static Collection data() { - Boolean[] data = {false, true}; + Boolean[] data = { false, true }; return Arrays.asList(data); } @@ -113,8 +111,7 @@ public void setUp() throws Exception { if (useFileBasedSFT) { conf.set(StoreFileTrackerFactory.TRACKER_IMPL, "org.apache.hadoop.hbase.regionserver.storefiletracker.FileBasedStoreFileTracker"); - } - else{ + } else { conf.unset(StoreFileTrackerFactory.TRACKER_IMPL); } testUtil.startMiniCluster(); @@ -130,12 +127,11 @@ public void tearDown() throws Exception { * After a secure bulkload finished , there is a clean-up for FileSystems used in the bulkload. * Sometimes, FileSystems used in the finished bulkload might also be used in other bulkload * calls, or there are other FileSystems created by the same user, they could be closed by a - * FileSystem.closeAllForUGI call. So during the clean-up, those FileSystems need to be used - * later can not get closed ,or else a race condition occurs. - * - * testForRaceCondition tests the case that two secure bulkload calls from the same UGI go - * into two different regions and one bulkload finishes earlier when the other bulkload still - * needs its FileSystems, checks that both bulkloads succeed. + * FileSystem.closeAllForUGI call. So during the clean-up, those FileSystems need to be used later + * can not get closed ,or else a race condition occurs. testForRaceCondition tests the case that + * two secure bulkload calls from the same UGI go into two different regions and one bulkload + * finishes earlier when the other bulkload still needs its FileSystems, checks that both + * bulkloads succeed. */ @Test public void testForRaceCondition() throws Exception { @@ -146,15 +142,15 @@ public void accept(HRegion hRegion) { Threads.shutdown(ealierBulkload);/// wait util the other bulkload finished } } - } ; + }; testUtil.getMiniHBaseCluster().getRegionServerThreads().get(0).getRegionServer() .getSecureBulkLoadManager().setFsCreatedListener(fsCreatedListener); /// create table - testUtil.createTable(TABLE,FAMILY,Bytes.toByteArrays(SPLIT_ROWKEY)); + testUtil.createTable(TABLE, FAMILY, Bytes.toByteArrays(SPLIT_ROWKEY)); /// prepare files - Path rootdir = testUtil.getMiniHBaseCluster().getRegionServerThreads().get(0) - .getRegionServer().getDataRootDir(); + Path rootdir = testUtil.getMiniHBaseCluster().getRegionServerThreads().get(0).getRegionServer() + .getDataRootDir(); Path dir1 = new Path(rootdir, "dir1"); prepareHFile(dir1, key1, value1); Path dir2 = new Path(rootdir, "dir2"); @@ -169,7 +165,7 @@ public void run() { try { doBulkloadWithoutRetry(dir1); } catch (Exception e) { - LOG.error("bulk load failed .",e); + LOG.error("bulk load failed .", e); t1Exception.set(e); } } @@ -180,7 +176,7 @@ public void run() { try { doBulkloadWithoutRetry(dir2); } catch (Exception e) { - LOG.error("bulk load failed .",e); + LOG.error("bulk load failed .", e); t2Exception.set(e); } } @@ -205,13 +201,13 @@ public void run() { /** * A trick is used to make sure server-side failures( if any ) not being covered up by a client - * retry. Since BulkLoadHFilesTool.bulkLoad keeps performing bulkload calls as long as the - * HFile queue is not empty, while server-side exceptions in the doAs block do not lead - * to a client exception, a bulkload will always succeed in this case by default, thus client - * will never be aware that failures have ever happened . To avoid this kind of retry , - * a MyExceptionToAvoidRetry exception is thrown after bulkLoadPhase finished and caught - * silently outside the doBulkLoad call, so that the bulkLoadPhase would be called exactly - * once, and server-side failures, if any ,can be checked via data. + * retry. Since BulkLoadHFilesTool.bulkLoad keeps performing bulkload calls as long as the HFile + * queue is not empty, while server-side exceptions in the doAs block do not lead to a client + * exception, a bulkload will always succeed in this case by default, thus client will never be + * aware that failures have ever happened . To avoid this kind of retry , a + * MyExceptionToAvoidRetry exception is thrown after bulkLoadPhase finished and caught silently + * outside the doBulkLoad call, so that the bulkLoadPhase would be called exactly once, and + * server-side failures, if any ,can be checked via data. */ class MyExceptionToAvoidRetry extends DoNotRetryIOException { @@ -232,7 +228,7 @@ protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, try { h.bulkLoad(TABLE, dir); Assert.fail("MyExceptionToAvoidRetry is expected"); - } catch (MyExceptionToAvoidRetry e) { //expected + } catch (MyExceptionToAvoidRetry e) { // expected } } @@ -243,25 +239,20 @@ private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception { CacheConfig writerCacheConf = new CacheConfig(conf, family, null, ByteBuffAllocator.HEAP); writerCacheConf.setCacheDataOnWrite(false); - HFileContext hFileContext = new HFileContextBuilder() - .withIncludesMvcc(false) - .withIncludesTags(true) - .withCompression(compression) - .withCompressTags(family.isCompressTags()) - .withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) - .withBlockSize(family.getBlocksize()) - .withHBaseCheckSum(true) - .withDataBlockEncoding(family.getDataBlockEncoding()) - .withEncryptionContext(Encryption.Context.NONE) - .withCreateTime(EnvironmentEdgeManager.currentTime()) - .build(); + HFileContext hFileContext = + new HFileContextBuilder().withIncludesMvcc(false).withIncludesTags(true) + .withCompression(compression).withCompressTags(family.isCompressTags()) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) + .withBlockSize(family.getBlocksize()).withHBaseCheckSum(true) + .withDataBlockEncoding(family.getDataBlockEncoding()) + .withEncryptionContext(Encryption.Context.NONE) + .withCreateTime(EnvironmentEdgeManager.currentTime()).build(); StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, dir.getFileSystem(conf)) - .withOutputDir(new Path(dir, family.getNameAsString())) - .withBloomType(family.getBloomFilterType()) - .withMaxKeyCount(Integer.MAX_VALUE) - .withFileContext(hFileContext); + .withOutputDir(new Path(dir, family.getNameAsString())) + .withBloomType(family.getBloomFilterType()).withMaxKeyCount(Integer.MAX_VALUE) + .withFileContext(hFileContext); StoreFileWriter writer = builder.build(); Put put = new Put(key); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkloadListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkloadListener.java index 93b3f00b34f4..4097203f412c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkloadListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkloadListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ * Tests for failedBulkLoad logic to make sure staged files are returned to their original location * if the bulkload have failed. */ -@Category({MiscTests.class, LargeTests.class}) +@Category({ MiscTests.class, LargeTests.class }) public class TestSecureBulkloadListener { @ClassRule @@ -79,8 +79,8 @@ public void setUp() throws Exception { htu = new HBaseTestingUtil(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks htu.getConfiguration().setInt("dfs.replication", 3); - htu.startMiniDFSCluster(3, - new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3}); + htu.startMiniDFSCluster(3, new String[] { "/r1", "/r2", "/r3" }, + new String[] { host1, host2, host3 }); conf = htu.getConfiguration(); cluster = htu.getDFSCluster(); @@ -95,14 +95,14 @@ public void tearDownAfterClass() throws Exception { @Test public void testMovingStagedFile() throws Exception { Path stagingDirPath = - new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); + new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); if (!dfs.exists(stagingDirPath)) { dfs.mkdirs(stagingDirPath); } SecureBulkLoadManager.SecureBulkLoadListener listener = - new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf); + new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf); - //creating file to load + // creating file to load String srcFile = createHFileForFamilies(FAMILY); Path srcPath = new Path(srcFile); Assert.assertTrue(dfs.exists(srcPath)); @@ -112,13 +112,13 @@ public void testMovingStagedFile() throws Exception { dfs.mkdirs(stagedFamily); } - //moving file to staging + // moving file to staging String stagedFile = listener.prepareBulkLoad(FAMILY, srcFile, false, null); Path stagedPath = new Path(stagedFile); Assert.assertTrue(dfs.exists(stagedPath)); Assert.assertFalse(dfs.exists(srcPath)); - //moving files back to original location after a failed bulkload + // moving files back to original location after a failed bulkload listener.failedBulkLoad(FAMILY, stagedFile); Assert.assertFalse(dfs.exists(stagedPath)); Assert.assertTrue(dfs.exists(srcPath)); @@ -127,14 +127,14 @@ public void testMovingStagedFile() throws Exception { @Test public void testMovingStagedFileWithCustomStageDir() throws Exception { Path stagingDirPath = - new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); + new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); if (!dfs.exists(stagingDirPath)) { dfs.mkdirs(stagingDirPath); } SecureBulkLoadManager.SecureBulkLoadListener listener = - new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf); + new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf); - //creating file to load + // creating file to load String srcFile = createHFileForFamilies(FAMILY); Path srcPath = new Path(srcFile); Assert.assertTrue(dfs.exists(srcPath)); @@ -145,20 +145,20 @@ public void testMovingStagedFileWithCustomStageDir() throws Exception { } Path customStagingDirPath = - new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), CUSTOM_STAGING_DIR)); + new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), CUSTOM_STAGING_DIR)); Path customStagedFamily = new Path(customStagingDirPath, new Path(Bytes.toString(FAMILY))); if (!dfs.exists(customStagedFamily)) { dfs.mkdirs(customStagedFamily); } - //moving file to staging using a custom staging dir + // moving file to staging using a custom staging dir String stagedFile = - listener.prepareBulkLoad(FAMILY, srcFile, false, customStagingDirPath.toString()); + listener.prepareBulkLoad(FAMILY, srcFile, false, customStagingDirPath.toString()); Path stagedPath = new Path(stagedFile); Assert.assertTrue(dfs.exists(stagedPath)); Assert.assertFalse(dfs.exists(srcPath)); - //moving files back to original location after a failed bulkload + // moving files back to original location after a failed bulkload listener.failedBulkLoad(FAMILY, stagedFile); Assert.assertFalse(dfs.exists(stagedPath)); Assert.assertTrue(dfs.exists(srcPath)); @@ -167,14 +167,14 @@ public void testMovingStagedFileWithCustomStageDir() throws Exception { @Test public void testCopiedStagedFile() throws Exception { Path stagingDirPath = - new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); + new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); if (!dfs.exists(stagingDirPath)) { dfs.mkdirs(stagingDirPath); } SecureBulkLoadManager.SecureBulkLoadListener listener = - new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf); + new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf); - //creating file to load + // creating file to load String srcFile = createHFileForFamilies(FAMILY); Path srcPath = new Path(srcFile); Assert.assertTrue(dfs.exists(srcPath)); @@ -184,13 +184,13 @@ public void testCopiedStagedFile() throws Exception { dfs.mkdirs(stagedFamily); } - //copying file to staging + // copying file to staging String stagedFile = listener.prepareBulkLoad(FAMILY, srcFile, true, null); Path stagedPath = new Path(stagedFile); Assert.assertTrue(dfs.exists(stagedPath)); Assert.assertTrue(dfs.exists(srcPath)); - //should do nothing because the original file was copied to staging + // should do nothing because the original file was copied to staging listener.failedBulkLoad(FAMILY, stagedFile); Assert.assertTrue(dfs.exists(stagedPath)); Assert.assertTrue(dfs.exists(srcPath)); @@ -199,14 +199,14 @@ public void testCopiedStagedFile() throws Exception { @Test(expected = IOException.class) public void testDeletedStagedFile() throws Exception { Path stagingDirPath = - new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); + new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR)); if (!dfs.exists(stagingDirPath)) { dfs.mkdirs(stagingDirPath); } SecureBulkLoadManager.SecureBulkLoadListener listener = - new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf); + new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf); - //creating file to load + // creating file to load String srcFile = createHFileForFamilies(FAMILY); Path srcPath = new Path(srcFile); Assert.assertTrue(dfs.exists(srcPath)); @@ -216,7 +216,7 @@ public void testDeletedStagedFile() throws Exception { dfs.mkdirs(stagedFamily); } - //moving file to staging + // moving file to staging String stagedFile = listener.prepareBulkLoad(FAMILY, srcFile, false, null); Path stagedPath = new Path(stagedFile); Assert.assertTrue(dfs.exists(stagedPath)); @@ -224,14 +224,15 @@ public void testDeletedStagedFile() throws Exception { dfs.delete(stagedPath, false); - //moving files back to original location after a failed bulkload + // moving files back to original location after a failed bulkload listener.failedBulkLoad(FAMILY, stagedFile); } private String createHFileForFamilies(byte[] family) throws IOException { HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf); - Path testDir = new Path(dfs.getWorkingDirectory() , new Path(name.getMethodName(), Bytes.toString(family))); - if(!dfs.exists(testDir)){ + Path testDir = + new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), Bytes.toString(family))); + if (!dfs.exists(testDir)) { dfs.mkdirs(testDir); } Path hfilePath = new Path(testDir, generateUniqueName(null)); @@ -242,8 +243,8 @@ private String createHFileForFamilies(byte[] family) throws IOException { HFile.Writer writer = hFileFactory.create(); try { writer.append(new KeyValue(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(randomBytes).setFamily(family).setQualifier(randomBytes).setTimestamp(0L) - .setType(KeyValue.Type.Put.getCode()).setValue(randomBytes).build())); + .setRow(randomBytes).setFamily(family).setQualifier(randomBytes).setTimestamp(0L) + .setType(KeyValue.Type.Put.getCode()).setValue(randomBytes).build())); } finally { writer.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java index d6bb999bbe8b..087115ac9e21 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,19 +60,18 @@ import org.slf4j.LoggerFactory; /** - * Test various seek optimizations for correctness and check if they are - * actually saving I/O operations. + * Test various seek optimizations for correctness and check if they are actually saving I/O + * operations. */ @RunWith(Parameterized.class) -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestSeekOptimizations { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSeekOptimizations.class); - private static final Logger LOG = - LoggerFactory.getLogger(TestSeekOptimizations.class); + private static final Logger LOG = LoggerFactory.getLogger(TestSeekOptimizations.class); // Constants private static final String FAMILY = "myCF"; @@ -87,29 +86,17 @@ public class TestSeekOptimizations { private static final boolean VERBOSE = false; /** - * Disable this when this test fails hopelessly and you need to debug a - * simpler case. + * Disable this when this test fails hopelessly and you need to debug a simpler case. */ private static final boolean USE_MANY_STORE_FILES = true; - private static final int[][] COLUMN_SETS = new int[][] { - {}, // All columns - {0}, - {1}, - {0, 2}, - {1, 2}, - {0, 1, 2}, - }; + private static final int[][] COLUMN_SETS = new int[][] { {}, // All columns + { 0 }, { 1 }, { 0, 2 }, { 1, 2 }, { 0, 1, 2 }, }; // Both start row and end row are inclusive here for the purposes of this // test. - private static final int[][] ROW_RANGES = new int[][] { - {-1, -1}, - {0, 1}, - {1, 1}, - {1, 2}, - {0, 2} - }; + private static final int[][] ROW_RANGES = + new int[][] { { -1, -1 }, { 0, 1 }, { 1, 1 }, { 1, 2 }, { 0, 2 } }; private static final int[] MAX_VERSIONS_VALUES = new int[] { 1, 2 }; @@ -134,8 +121,7 @@ public static final Collection parameters() { return HBaseTestingUtil.BLOOM_AND_COMPRESSION_COMBINATIONS; } - public TestSeekOptimizations(Compression.Algorithm comprAlgo, - BloomType bloomType) { + public TestSeekOptimizations(Compression.Algorithm comprAlgo, BloomType bloomType) { this.comprAlgo = comprAlgo; this.bloomType = bloomType; } @@ -152,11 +138,8 @@ public void testMultipleTimestampRanges() throws IOException { // enable seek counting StoreFileScanner.instrument(); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY)) - .setCompressionType(comprAlgo) - .setBloomFilterType(bloomType) - .setMaxVersions(3) - .build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(FAMILY)) + .setCompressionType(comprAlgo).setBloomFilterType(bloomType).setMaxVersions(3).build(); region = TEST_UTIL.createTestRegion("testMultipleTimestampRanges", columnFamilyDescriptor); @@ -179,33 +162,29 @@ public void testMultipleTimestampRanges() throws IOException { for (int[] rowRange : ROW_RANGES) { for (int maxVersions : MAX_VERSIONS_VALUES) { for (boolean lazySeekEnabled : new boolean[] { false, true }) { - testScan(columnArr, lazySeekEnabled, rowRange[0], rowRange[1], - maxVersions); + testScan(columnArr, lazySeekEnabled, rowRange[0], rowRange[1], maxVersions); } } } } final double seekSavings = 1 - totalSeekLazy * 1.0 / totalSeekDiligent; - System.err.println("For bloom=" + bloomType + ", compr=" + comprAlgo + - " total seeks without optimization: " + totalSeekDiligent - + ", with optimization: " + totalSeekLazy + " (" + - String.format("%.2f%%", totalSeekLazy * 100.0 / totalSeekDiligent) + - "), savings: " + String.format("%.2f%%", - 100.0 * seekSavings) + "\n"); + System.err.println("For bloom=" + bloomType + ", compr=" + comprAlgo + + " total seeks without optimization: " + totalSeekDiligent + ", with optimization: " + + totalSeekLazy + " (" + String.format("%.2f%%", totalSeekLazy * 100.0 / totalSeekDiligent) + + "), savings: " + String.format("%.2f%%", 100.0 * seekSavings) + "\n"); // Test that lazy seeks are buying us something. Without the actual // implementation of the lazy seek optimization this will be 0. final double expectedSeekSavings = 0.0; - assertTrue("Lazy seek is only saving " + - String.format("%.2f%%", seekSavings * 100) + " seeks but should " + - "save at least " + String.format("%.2f%%", expectedSeekSavings * 100), - seekSavings >= expectedSeekSavings); + assertTrue("Lazy seek is only saving " + String.format("%.2f%%", seekSavings * 100) + + " seeks but should " + "save at least " + + String.format("%.2f%%", expectedSeekSavings * 100), + seekSavings >= expectedSeekSavings); } - private void testScan(final int[] columnArr, final boolean lazySeekEnabled, - final int startRow, final int endRow, int maxVersions) - throws IOException { + private void testScan(final int[] columnArr, final boolean lazySeekEnabled, final int startRow, + final int endRow, int maxVersions) throws IOException { StoreScanner.enableLazySeekGlobally(lazySeekEnabled); final Scan scan = new Scan(); final Set qualSet = new HashSet<>(); @@ -239,25 +218,20 @@ private void testScan(final int[] columnArr, final boolean lazySeekEnabled, results.clear(); } while (hasNext); - List filteredKVs = filterExpectedResults(qualSet, - rowBytes(startRow), rowBytes(endRow), maxVersions); - final String rowRestrictionStr = - (startRow == -1 && endRow == -1) ? "all rows" : ( - startRow == endRow ? ("row=" + startRow) : ("startRow=" - + startRow + ", " + "endRow=" + endRow)); + List filteredKVs = + filterExpectedResults(qualSet, rowBytes(startRow), rowBytes(endRow), maxVersions); + final String rowRestrictionStr = (startRow == -1 && endRow == -1) ? "all rows" + : (startRow == endRow ? ("row=" + startRow) + : ("startRow=" + startRow + ", " + "endRow=" + endRow)); final String columnRestrictionStr = - columnArr.length == 0 ? "all columns" - : ("columns=" + Arrays.toString(columnArr)); - final String testDesc = - "Bloom=" + bloomType + ", compr=" + comprAlgo + ", " - + (scan.isGetScan() ? "Get" : "Scan") + ": " - + columnRestrictionStr + ", " + rowRestrictionStr - + ", maxVersions=" + maxVersions + ", lazySeek=" + lazySeekEnabled; + columnArr.length == 0 ? "all columns" : ("columns=" + Arrays.toString(columnArr)); + final String testDesc = "Bloom=" + bloomType + ", compr=" + comprAlgo + ", " + + (scan.isGetScan() ? "Get" : "Scan") + ": " + columnRestrictionStr + ", " + + rowRestrictionStr + ", maxVersions=" + maxVersions + ", lazySeek=" + lazySeekEnabled; long seekCount = StoreFileScanner.getSeekCount() - initialSeekCount; if (VERBOSE) { - System.err.println("Seek count: " + seekCount + ", KVs returned: " - + actualKVs.size() + ". " + testDesc + - (lazySeekEnabled ? "\n" : "")); + System.err.println("Seek count: " + seekCount + ", KVs returned: " + actualKVs.size() + ". " + + testDesc + (lazySeekEnabled ? "\n" : "")); } if (lazySeekEnabled) { totalSeekLazy += seekCount; @@ -267,21 +241,19 @@ private void testScan(final int[] columnArr, final boolean lazySeekEnabled, assertKVListsEqual(testDesc, filteredKVs, actualKVs); } - private List filterExpectedResults(Set qualSet, - byte[] startRow, byte[] endRow, int maxVersions) { + private List filterExpectedResults(Set qualSet, byte[] startRow, byte[] endRow, + int maxVersions) { final List filteredKVs = new ArrayList<>(); final Map verCount = new HashMap<>(); for (Cell kv : expectedKVs) { - if (startRow.length > 0 && - Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - startRow, 0, startRow.length) < 0) { + if (startRow.length > 0 && Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), + kv.getRowLength(), startRow, 0, startRow.length) < 0) { continue; } // In this unit test the end row is always inclusive. - if (endRow.length > 0 && - Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), - endRow, 0, endRow.length) > 0) { + if (endRow.length > 0 && Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), + kv.getRowLength(), endRow, 0, endRow.length) > 0) { continue; } @@ -290,10 +262,9 @@ private List filterExpectedResults(Set qualSet, continue; } - final String rowColStr = - Bytes.toStringBinary(CellUtil.cloneRow(kv)) + "/" - + Bytes.toStringBinary(CellUtil.cloneFamily(kv)) + ":" - + Bytes.toStringBinary(CellUtil.cloneQualifier(kv)); + final String rowColStr = Bytes.toStringBinary(CellUtil.cloneRow(kv)) + "/" + + Bytes.toStringBinary(CellUtil.cloneFamily(kv)) + ":" + + Bytes.toStringBinary(CellUtil.cloneQualifier(kv)); final Integer curNumVer = verCount.get(rowColStr); final int newNumVer = curNumVer != null ? (curNumVer + 1) : 1; if (newNumVer <= maxVersions) { @@ -322,8 +293,8 @@ public void put(String qual, long ts) { putTimestamps.add(ts); } if (VERBOSE) { - LOG.info("put: row " + Bytes.toStringBinary(put.getRow()) - + ", cf " + FAMILY + ", qualifier " + qual + ", ts " + ts); + LOG.info("put: row " + Bytes.toStringBinary(put.getRow()) + ", cf " + FAMILY + ", qualifier " + + qual + ", ts " + ts); } } @@ -338,8 +309,7 @@ public void delAtTimestamp(String qual, long ts) { private void logDelete(String qual, long ts, String delType) { if (VERBOSE) { - LOG.info("del " + delType + ": row " - + Bytes.toStringBinary(put.getRow()) + ", cf " + FAMILY + LOG.info("del " + delType + ": row " + Bytes.toStringBinary(put.getRow()) + ", cf " + FAMILY + ", qualifier " + qual + ", ts " + ts); } } @@ -351,8 +321,7 @@ private void delUpToTimestamp(String qual, long upToTS) { private long randLong(long n) { long l = RNG.nextLong(); - if (l == Long.MIN_VALUE) - l = Long.MAX_VALUE; + if (l == Long.MIN_VALUE) l = Long.MAX_VALUE; return Math.abs(l) % n; } @@ -377,11 +346,9 @@ private final String getQualStr(int i) { return ("qual" + i).intern(); } - public void createTimestampRange(long minTS, long maxTS, - long deleteUpToTS) throws IOException { + public void createTimestampRange(long minTS, long maxTS, long deleteUpToTS) throws IOException { assertTrue(minTS < maxTS); - assertTrue(deleteUpToTS == -1 - || (minTS <= deleteUpToTS && deleteUpToTS <= maxTS)); + assertTrue(deleteUpToTS == -1 || (minTS <= deleteUpToTS && deleteUpToTS <= maxTS)); for (int iRow = 0; iRow < NUM_ROWS; ++iRow) { final String row = rowStr(iRow); @@ -437,8 +404,7 @@ public void createTimestampRange(long minTS, long maxTS, // Add remaining timestamps (those we have not deleted) to expected // results for (long ts : putTimestamps) { - expectedKVs.add(new KeyValue(rowBytes, FAMILY_BYTES, qualBytes, ts, - KeyValue.Type.Put)); + expectedKVs.add(new KeyValue(rowBytes, FAMILY_BYTES, qualBytes, ts, KeyValue.Type.Put)); } } } @@ -454,22 +420,18 @@ public void tearDown() throws IOException { // We have to re-set the lazy seek flag back to the default so that other // unit tests are not affected. - StoreScanner.enableLazySeekGlobally( - StoreScanner.LAZY_SEEK_ENABLED_BY_DEFAULT); + StoreScanner.enableLazySeekGlobally(StoreScanner.LAZY_SEEK_ENABLED_BY_DEFAULT); } - - public void assertKVListsEqual(String additionalMsg, - final List expected, + public void assertKVListsEqual(String additionalMsg, final List expected, final List actual) { final int eLen = expected.size(); final int aLen = actual.size(); final int minLen = Math.min(eLen, aLen); int i; - for (i = 0; i < minLen - && PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, expected.get(i), - actual.get(i)) == 0; ++i) { + for (i = 0; i < minLen && PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, + expected.get(i), actual.get(i)) == 0; ++i) { } if (additionalMsg == null) { @@ -480,11 +442,9 @@ public void assertKVListsEqual(String additionalMsg, } if (eLen != aLen || i != minLen) { - throw new AssertionError( - "Expected and actual KV arrays differ at position " + i + ": " + - HBaseTestingUtil.safeGetAsStr(expected, i) + " (length " + eLen +") vs. " + - HBaseTestingUtil.safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg); + throw new AssertionError("Expected and actual KV arrays differ at position " + i + ": " + + HBaseTestingUtil.safeGetAsStr(expected, i) + " (length " + eLen + ") vs. " + + HBaseTestingUtil.safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg); } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java index e2525db73f0e..3520cf0e33de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerNonceManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestServerNonceManager { @ClassRule @@ -95,7 +95,7 @@ public void testNormalStartEnd() throws Exception { for (int j = 0; j < numbers.length; ++j) { nm.endOperation(numbers[i], numbers[j], true); assertEquals(numbers[j] == NO_NONCE, - nm.startOperation(numbers[i], numbers[j], createStoppable())); + nm.startOperation(numbers[i], numbers[j], createStoppable())); } } } @@ -106,7 +106,8 @@ public void testNoEndWithoutStart() { try { nm.endOperation(NO_NONCE, 1, true); throw new Error("Should have thrown"); - } catch (AssertionError err) {} + } catch (AssertionError err) { + } } @Test @@ -197,7 +198,7 @@ public void testConcurrentAttempts() throws Exception { nm.startOperation(NO_NONCE, 3, createStoppable()); tr = new TestRunnable(nm, 4, true, createStoppable()); - tr.start().join(); // nonce 3 must have no bearing on nonce 4 + tr.start().join(); // nonce 3 must have no bearing on nonce 4 tr.propagateError(); } @@ -208,6 +209,7 @@ public void testStopWaiting() throws Exception { Stoppable stoppingStoppable = createStoppable(); Mockito.when(stoppingStoppable.isStopped()).thenAnswer(new Answer() { AtomicInteger answer = new AtomicInteger(3); + @Override public Boolean answer(InvocationOnMock invocation) throws Throwable { return 0 < answer.decrementAndGet(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java index 5043f94dfa2a..4e5c4f55b8eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestSettingTimeoutOnBlockingPoint { @ClassRule @@ -93,12 +93,12 @@ public Result preIncrementAfterRowLock(final ObserverContext { try { - try( Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { table.incrementColumnValue(ROW1, FAM, FAM, 1); } } catch (IOException e) { @@ -107,7 +107,7 @@ public void testRowLock() throws IOException { }); Thread getThread = new Thread(() -> { try (Table table = - TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(1000).build()) { + TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(1000).build()) { Delete delete = new Delete(ROW1); table.delete(delete); } catch (IOException e) { @@ -120,7 +120,7 @@ public void testRowLock() throws IOException { getThread.start(); Threads.sleep(2000); try (Table table = - TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(1000).build()) { + TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(1000).build()) { // We have only two handlers. The first thread will get a write lock for row1 and occupy // the first handler. The second thread need a read lock for row1, it should quit after 1000 // ms and give back the handler because it can not get the lock in time. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java index 00ecf9847a06..18843d3e1d8b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java index 4a2cd34f6065..9920c259fb09 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestSimpleTimeRangeTracker { @ClassRule @@ -101,7 +101,7 @@ public void testAlwaysDecrementingSetsMaximum() { trr.includeTimestamp(2); trr.includeTimestamp(1); assertTrue(trr.getMin() != TimeRangeTracker.INITIAL_MIN_TIMESTAMP); - assertTrue(trr.getMax() != -1 /*The initial max value*/); + assertTrue(trr.getMax() != -1 /* The initial max value */); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index c96a5c98fdc8..9a9997c1fd43 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,6 @@ import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -60,7 +59,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestSplitLogWorker { @ClassRule @@ -70,8 +69,7 @@ public class TestSplitLogWorker { private static final Logger LOG = LoggerFactory.getLogger(TestSplitLogWorker.class); private static final int WAIT_TIME = 15000; private final ServerName MANAGER = ServerName.valueOf("manager,1,1"); - private final static HBaseTestingUtil TEST_UTIL = - new HBaseTestingUtil(); + private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private DummyServer ds; private ZKWatcher zkw; private SplitLogWorker slw; @@ -119,15 +117,15 @@ private boolean waitForCounterBoolean(final LongAdder ctr, final long oldval, lo private boolean waitForCounterBoolean(final LongAdder ctr, final long oldval, final long newval, long timems, boolean failIfTimeout) throws Exception { - long timeWaited = TEST_UTIL.waitFor(timems, 10, failIfTimeout, - new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { + long timeWaited = + TEST_UTIL.waitFor(timems, 10, failIfTimeout, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { return (ctr.sum() >= newval); - } - }); + } + }); - if( timeWaited > 0) { + if (timeWaited > 0) { // when not timed out assertEquals(newval, ctr.sum()); } @@ -138,8 +136,7 @@ public boolean evaluate() throws Exception { public void setup() throws Exception { TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); - zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), - "split-log-worker-tests", null); + zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), "split-log-worker-tests", null); ds = new DummyServer(zkw, conf); ZKUtil.deleteChildrenRecursively(zkw, zkw.getZNodePaths().baseZNode); ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().baseZNode); @@ -154,8 +151,8 @@ public void setup() throws Exception { SplitLogCounters.resetCounters(); executorService = new ExecutorService("TestSplitLogWorker"); - executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType( - ExecutorType.RS_LOG_REPLAY_OPS).setCorePoolSize(10)); + executorService.startExecutorService(executorService.new ExecutorConfig() + .setExecutorType(ExecutorType.RS_LOG_REPLAY_OPS).setCorePoolSize(10)); } @After @@ -166,22 +163,21 @@ public void teardown() throws Exception { TEST_UTIL.shutdownMiniZKCluster(); } - SplitLogWorker.TaskExecutor neverEndingTask = - new SplitLogWorker.TaskExecutor() { + SplitLogWorker.TaskExecutor neverEndingTask = new SplitLogWorker.TaskExecutor() { - @Override - public Status exec(String name, CancelableProgressable p) { - while (true) { - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - return Status.PREEMPTED; - } - if (!p.progress()) { - return Status.PREEMPTED; - } + @Override + public Status exec(String name, CancelableProgressable p) { + while (true) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + return Status.PREEMPTED; + } + if (!p.progress()) { + return Status.PREEMPTED; } } + } }; @@ -193,25 +189,23 @@ public void testAcquireTaskAtStartup() throws Exception { final ServerName RS = ServerName.valueOf("rs,1,1"); RegionServerServices mockedRS = getRegionServer(RS); zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS), - new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")).toByteArray(), - Ids.OPEN_ACL_UNSAFE, - CreateMode.PERSISTENT); + new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")).toByteArray(), Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); SplitLogWorker slw = new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask); slw.start(); try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); - byte [] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS)); + byte[] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS)); SplitLogTask slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(RS)); } finally { - stopSplitLogWorker(slw); + stopSplitLogWorker(slw); } } - private void stopSplitLogWorker(final SplitLogWorker slw) - throws InterruptedException { + private void stopSplitLogWorker(final SplitLogWorker slw) throws InterruptedException { if (slw != null) { slw.stop(); slw.worker.join(WAIT_TIME); @@ -229,8 +223,8 @@ public void testRaceForTask() throws Exception { final ServerName SVR1 = ServerName.valueOf("svr1,1,1"); final ServerName SVR2 = ServerName.valueOf("svr2,1,1"); zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TRFT), - new SplitLogTask.Unassigned(MANAGER).toByteArray(), - Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + new SplitLogTask.Unassigned(MANAGER).toByteArray(), Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); RegionServerServices mockedRS1 = getRegionServer(SVR1); RegionServerServices mockedRS2 = getRegionServer(SVR2); SplitLogWorker slw1 = @@ -244,9 +238,8 @@ public void testRaceForTask() throws Exception { // Assert that either the tot_wkr_failed_to_grab_task_owned count was set of if // not it, that we fell through to the next counter in line and it was set. assertTrue(waitForCounterBoolean(SplitLogCounters.tot_wkr_failed_to_grab_task_owned, 0, 1, - WAIT_TIME, false) || - SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.sum() == 1); - byte [] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TRFT)); + WAIT_TIME, false) || SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.sum() == 1); + byte[] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TRFT)); SplitLogTask slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(SVR1) || slt.isOwned(SVR2)); } finally { @@ -271,13 +264,12 @@ public void testPreemptTask() throws Exception { waitForCounter(SplitLogCounters.tot_wkr_task_grabing, 0, 1, WAIT_TIME); // this time create a task node after starting the splitLogWorker - zkw.getRecoverableZooKeeper().create(PATH, - new SplitLogTask.Unassigned(MANAGER).toByteArray(), + zkw.getRecoverableZooKeeper().create(PATH, new SplitLogTask.Unassigned(MANAGER).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); assertEquals(1, slw.getTaskReadySeq()); - byte [] bytes = ZKUtil.getData(zkw, PATH); + byte[] bytes = ZKUtil.getData(zkw, PATH); SplitLogTask slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(SRV)); slt = new SplitLogTask.Owned(MANAGER); @@ -303,8 +295,7 @@ public void testMultipleTasks() throws Exception { Thread.sleep(100); waitForCounter(SplitLogCounters.tot_wkr_task_grabing, 0, 1, WAIT_TIME); - SplitLogTask unassignedManager = - new SplitLogTask.Unassigned(MANAGER); + SplitLogTask unassignedManager = new SplitLogTask.Unassigned(MANAGER); zkw.getRecoverableZooKeeper().create(PATH1, unassignedManager.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); @@ -324,7 +315,7 @@ public void testMultipleTasks() throws Exception { waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 1, 2, WAIT_TIME); assertEquals(2, slw.getTaskReadySeq()); - byte [] bytes = ZKUtil.getData(zkw, PATH2); + byte[] bytes = ZKUtil.getData(zkw, PATH2); slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(SRV)); } finally { @@ -345,7 +336,7 @@ public void testRescan() throws Exception { String task = ZKSplitLog.getEncodedNodeName(zkw, "task"); SplitLogTask slt = new SplitLogTask.Unassigned(MANAGER); - zkw.getRecoverableZooKeeper().create(task,slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, + zkw.getRecoverableZooKeeper().create(task, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); @@ -376,8 +367,8 @@ public void testRescan() throws Exception { if (node.startsWith("RESCAN")) { String name = ZKSplitLog.getEncodedNodeName(zkw, node); String fn = ZKSplitLog.getFileName(name); - byte [] data = ZKUtil.getData(zkw, - ZNodePaths.joinZNode(zkw.getZNodePaths().splitLogZNode, fn)); + byte[] data = + ZKUtil.getData(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().splitLogZNode, fn)); slt = SplitLogTask.parseFrom(data); assertTrue(slt.toString(), slt.isDone(SRV)); } @@ -398,7 +389,7 @@ public void testAcquireMultiTasks() throws Exception { for (int i = 0; i < maxTasks; i++) { zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i), new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")).toByteArray(), - Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } SplitLogWorker slw = new SplitLogWorker(ds, testConf, mockedRS, neverEndingTask); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 652c019ff044..21cae6c8f1c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -130,7 +130,7 @@ /** * The below tests are testing split region against a running cluster */ -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestSplitTransactionOnCluster { @ClassRule @@ -151,8 +151,7 @@ public class TestSplitTransactionOnCluster { public static void before() throws Exception { TESTING_UTIL.getConfiguration().setInt(HConstants.HBASE_BALANCER_PERIOD, 60000); StartTestingClusterOption option = StartTestingClusterOption.builder() - .masterClass(MyMaster.class).numRegionServers(NB_SERVERS). - numDataNodes(NB_SERVERS).build(); + .masterClass(MyMaster.class).numRegionServers(NB_SERVERS).numDataNodes(NB_SERVERS).build(); TESTING_UTIL.startMiniCluster(option); } @@ -171,7 +170,7 @@ public void setup() throws IOException { @After public void tearDown() throws Exception { this.admin.close(); - for (TableDescriptor htd: this.admin.listTableDescriptors()) { + for (TableDescriptor htd : this.admin.listTableDescriptors()) { LOG.info("Tear down, remove table=" + htd.getTableName()); TESTING_UTIL.deleteTable(htd.getTableName()); } @@ -185,12 +184,10 @@ private RegionInfo getAndCheckSingleTableRegion(final List regions) return hri; } - private void requestSplitRegion( - final HRegionServer rsServer, - final Region region, + private void requestSplitRegion(final HRegionServer rsServer, final Region region, final byte[] midKey) throws IOException { long procId = cluster.getMaster().splitRegion(region.getRegionInfo(), midKey, 0, 0); - // wait for the split to complete or get interrupted. If the split completes successfully, + // wait for the split to complete or get interrupted. If the split completes successfully, // the procedure will return true; if the split fails, the procedure would throw exception. ProcedureTestingUtility.waitProcedure(cluster.getMaster().getMasterProcedureExecutor(), procId); } @@ -217,10 +214,8 @@ public void testRITStateForRollback() throws Exception { assertTrue("not able to find a splittable region", region != null); // install master co-processor to fail splits - master.getMasterCoprocessorHost().load( - FailingSplitMasterObserver.class, - Coprocessor.PRIORITY_USER, - master.getConfiguration()); + master.getMasterCoprocessorHost().load(FailingSplitMasterObserver.class, + Coprocessor.PRIORITY_USER, master.getConfiguration()); // split async this.admin.splitRegionAsync(region.getRegionInfo().getRegionName(), new byte[] { 42 }); @@ -250,7 +245,7 @@ public void testSplitFailedCompactionAndSplit() throws Exception { // Create table then get the single region for our new table. byte[] cf = Bytes.toBytes("cf"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)).build(); admin.createTable(htd); for (int i = 0; cluster.getRegions(tableName).isEmpty() && i < 100; i++) { @@ -294,7 +289,7 @@ public void testSplitCompactWithPriority() throws Exception { // Create table then get the single region for our new table. byte[] cf = Bytes.toBytes("cf"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)).build(); admin.createTable(htd); assertNotEquals("Unable to retrieve regions of the table", -1, @@ -320,16 +315,14 @@ public void testSplitCompactWithPriority() throws Exception { // Split long procId = - cluster.getMaster().splitRegion(region.getRegionInfo(), Bytes.toBytes("row4"), 0, 0); + cluster.getMaster().splitRegion(region.getRegionInfo(), Bytes.toBytes("row4"), 0, 0); - // wait for the split to complete or get interrupted. If the split completes successfully, + // wait for the split to complete or get interrupted. If the split completes successfully, // the procedure will return true; if the split fails, the procedure would throw exception. - ProcedureTestingUtility.waitProcedure(cluster.getMaster().getMasterProcedureExecutor(), - procId); + ProcedureTestingUtility.waitProcedure(cluster.getMaster().getMasterProcedureExecutor(), procId); Thread.sleep(3000); assertNotEquals("Table is not split properly?", -1, - TESTING_UTIL.waitFor(3000, - () -> cluster.getRegions(tableName).size() == 2)); + TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 2)); // we have 2 daughter regions HRegion hRegion1 = cluster.getRegions(tableName).get(0); HRegion hRegion2 = cluster.getRegions(tableName).get(1); @@ -354,7 +347,7 @@ public void testSplitCompactWithPriority() throws Exception { assertEquals(compactionContext.get().getRequest().getPriority(), Integer.MIN_VALUE + 1000); compactionContext = - hStore2.requestCompaction(Integer.MIN_VALUE + 10, CompactionLifeCycleTracker.DUMMY, null); + hStore2.requestCompaction(Integer.MIN_VALUE + 10, CompactionLifeCycleTracker.DUMMY, null); assertTrue(compactionContext.isPresent()); // compaction request contains higher priority than default priority of daughter region // compaction (Integer.MIN_VALUE + 1000), hence we are expecting request priority to @@ -371,7 +364,7 @@ public void testContinuousSplitUsingLinkFile() throws Exception { // Create table then get the single region for our new table. byte[] cf = Bytes.toBytes("cf"); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)); String splitPolicy = ConstantSizeRegionSplitPolicy.class.getName(); builder.setValue(SPLIT_POLICY, splitPolicy); @@ -392,7 +385,7 @@ public void testContinuousSplitUsingLinkFile() throws Exception { // Split admin.splitRegionAsync(cluster.getRegions(tableName).get(0).getRegionInfo().getRegionName(), Bytes.toBytes("row14")); - // wait for the split to complete or get interrupted. If the split completes successfully, + // wait for the split to complete or get interrupted. If the split completes successfully, // the procedure will return true; if the split fails, the procedure would throw exception. Thread.sleep(3000); assertNotEquals("Table is not split properly?", -1, @@ -415,7 +408,7 @@ public void testContinuousSplitUsingLinkFile() throws Exception { scan = new Scan(); scanValidate(scan, rowCount, table); - //Continuous Split + // Continuous Split findRegionToSplit(tableName, "row24"); Thread.sleep(3000); assertNotEquals("Table is not split properly?", -1, @@ -430,8 +423,8 @@ public void testContinuousSplitUsingLinkFile() throws Exception { scan = new Scan(); scanValidate(scan, rowCount, table); - //Continuous Split, random split HFileLink, generate Reference files. - //After this, can not continuous split, because there are reference files. + // Continuous Split, random split HFileLink, generate Reference files. + // After this, can not continuous split, because there are reference files. findRegionToSplit(tableName, "row11"); Thread.sleep(3000); assertNotEquals("Table is not split properly?", -1, @@ -444,18 +437,16 @@ public void testContinuousSplitUsingLinkFile() throws Exception { private void findRegionToSplit(TableName tableName, String splitRowKey) throws Exception { HRegion toSplit = null; byte[] toSplitKey = Bytes.toBytes(splitRowKey); - for(HRegion rg : cluster.getRegions(tableName)) { - LOG.debug("startKey=" + - Bytes.toStringBinary(rg.getRegionInfo().getStartKey()) + ", getEndKey()=" + - Bytes.toStringBinary(rg.getRegionInfo().getEndKey()) + ", row=" + splitRowKey); - if((rg.getRegionInfo().getStartKey().length==0|| - CellComparator.getInstance().compare( - PrivateCellUtil.createFirstOnRow(rg.getRegionInfo().getStartKey()), - PrivateCellUtil.createFirstOnRow(toSplitKey)) <= 0) &&( - rg.getRegionInfo().getEndKey().length==0|| - CellComparator.getInstance().compare( + for (HRegion rg : cluster.getRegions(tableName)) { + LOG.debug( + "startKey=" + Bytes.toStringBinary(rg.getRegionInfo().getStartKey()) + ", getEndKey()=" + + Bytes.toStringBinary(rg.getRegionInfo().getEndKey()) + ", row=" + splitRowKey); + if ((rg.getRegionInfo().getStartKey().length == 0 || CellComparator.getInstance().compare( + PrivateCellUtil.createFirstOnRow(rg.getRegionInfo().getStartKey()), + PrivateCellUtil.createFirstOnRow(toSplitKey)) <= 0) + && (rg.getRegionInfo().getEndKey().length == 0 || CellComparator.getInstance().compare( PrivateCellUtil.createFirstOnRow(rg.getRegionInfo().getEndKey()), - PrivateCellUtil.createFirstOnRow(toSplitKey)) >= 0)){ + PrivateCellUtil.createFirstOnRow(toSplitKey)) >= 0)) { toSplit = rg; } } @@ -463,7 +454,8 @@ private void findRegionToSplit(TableName tableName, String splitRowKey) throws E admin.splitRegionAsync(toSplit.getRegionInfo().getRegionName(), toSplitKey); } - private static void scanValidate(Scan scan, int expectedRowCount, Table table) throws IOException{ + private static void scanValidate(Scan scan, int expectedRowCount, Table table) + throws IOException { ResultScanner scanner = table.getScanner(scan); int rows = 0; for (Result result : scanner) { @@ -488,8 +480,7 @@ public Optional getMasterObserver() { @Override public void preSplitRegionBeforeMETAAction( - final ObserverContext ctx, - final byte[] splitKey, + final ObserverContext ctx, final byte[] splitKey, final List metaEntries) throws IOException { latch.countDown(); throw new IOException("Causing rollback of region split"); @@ -522,7 +513,7 @@ public void testSplitRollbackOnRegionClosing() throws Exception { int regionCount = cluster.getRegions(hri.getTable()).size(); regionStates.updateRegionState(hri, RegionState.State.CLOSING); - // Now try splitting.... should fail. And each should successfully + // Now try splitting.... should fail. And each should successfully // rollback. // We don't roll back here anymore. Instead we fail-fast on construction of the // split transaction. Catch the exception instead. @@ -559,8 +550,9 @@ public void testShutdownFixupWhenDaughterHasSplit() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); // Create table then get the single region for our new table. - Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY); List regions = - cluster.getRegions(tableName); RegionInfo hri = getAndCheckSingleTableRegion(regions); + Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY); + List regions = cluster.getRegions(tableName); + RegionInfo hri = getAndCheckSingleTableRegion(regions); int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri); // Turn off balancer so it doesn't cut in and mess up our placements. @@ -586,7 +578,7 @@ public void testShutdownFixupWhenDaughterHasSplit() throws Exception { admin.splitRegionAsync(daughter.getRegionName()).get(2, TimeUnit.MINUTES); // Get list of daughters daughters = cluster.getRegions(tableName); - for (HRegion d: daughters) { + for (HRegion d : daughters) { LOG.info("Regions before crash: " + d); } // Now crash the server @@ -596,14 +588,14 @@ public void testShutdownFixupWhenDaughterHasSplit() throws Exception { // Assert daughters are online and ONLY the original daughters -- that // fixup didn't insert one during server shutdown recover. regions = cluster.getRegions(tableName); - for (HRegion d: daughters) { + for (HRegion d : daughters) { LOG.info("Regions after crash: " + d); } if (daughters.size() != regions.size()) { LOG.info("Daughters=" + daughters.size() + ", regions=" + regions.size()); } assertEquals(daughters.size(), regions.size()); - for (HRegion r: regions) { + for (HRegion r : regions) { LOG.info("Regions post crash " + r + ", contains=" + daughters.contains(r)); assertTrue("Missing region post crash " + r, daughters.contains(r)); } @@ -639,7 +631,7 @@ private void clearReferences(HRegion region) throws IOException { public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exception { TableName userTableName = TableName.valueOf(name.getMethodName()); TableDescriptor htd = TableDescriptorBuilder.newBuilder(userTableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("col")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("col")).build(); admin.createTable(htd); Table table = TESTING_UTIL.getConnection().getTable(userTableName); try { @@ -656,8 +648,7 @@ public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exc admin.flush(userTableName); } admin.majorCompact(userTableName); - List regionsOfTable = - cluster.getMaster().getAssignmentManager().getRegionStates() + List regionsOfTable = cluster.getMaster().getAssignmentManager().getRegionStates() .getRegionsOfTable(userTableName); assertEquals(1, regionsOfTable.size()); RegionInfo hRegionInfo = regionsOfTable.get(0); @@ -672,17 +663,15 @@ public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exc table.put(p); admin.flush(userTableName); admin.splitRegionAsync(hRegionInfo.getRegionName(), Bytes.toBytes("row7")); - regionsOfTable = cluster.getMaster() - .getAssignmentManager().getRegionStates() + regionsOfTable = cluster.getMaster().getAssignmentManager().getRegionStates() .getRegionsOfTable(userTableName); while (regionsOfTable.size() != 2) { Thread.sleep(1000); - regionsOfTable = cluster.getMaster() - .getAssignmentManager().getRegionStates() + regionsOfTable = cluster.getMaster().getAssignmentManager().getRegionStates() .getRegionsOfTable(userTableName); - LOG.debug("waiting 2 regions to be available, got " + regionsOfTable.size() + - ": " + regionsOfTable); + LOG.debug("waiting 2 regions to be available, got " + regionsOfTable.size() + ": " + + regionsOfTable); } Assert.assertEquals(2, regionsOfTable.size()); @@ -763,21 +752,22 @@ public void testMasterRestartAtRegionSplitPendingCatalogJanitor() public void testSplitWithRegionReplicas() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor htd = TESTING_UTIL - .createModifyableTableDescriptor(TableName.valueOf(name.getMethodName()), - ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, - ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED) - .setRegionReplication(2).setCoprocessor(SlowMeCopro.class.getName()).build(); + .createModifyableTableDescriptor(TableName.valueOf(name.getMethodName()), + ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, + ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED) + .setRegionReplication(2).setCoprocessor(SlowMeCopro.class.getName()).build(); // Create table then get the single region for our new table. - Table t = TESTING_UTIL.createTable(htd, new byte[][]{Bytes.toBytes("cf")}, null); + Table t = TESTING_UTIL.createTable(htd, new byte[][] { Bytes.toBytes("cf") }, null); List oldRegions; do { oldRegions = cluster.getRegions(tableName); Thread.sleep(10); } while (oldRegions.size() != 2); - for (HRegion h : oldRegions) LOG.debug("OLDREGION " + h.getRegionInfo()); + for (HRegion h : oldRegions) + LOG.debug("OLDREGION " + h.getRegionInfo()); try { - int regionServerIndex = cluster.getServerWith(oldRegions.get(0).getRegionInfo() - .getRegionName()); + int regionServerIndex = + cluster.getServerWith(oldRegions.get(0).getRegionInfo().getRegionName()); HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); insertData(tableName, admin, t); // Turn off balancer so it doesn't cut in and mess up our placements. @@ -796,11 +786,12 @@ public void testSplitWithRegionReplicas() throws Exception { e.printStackTrace(); fail("Split execution should have succeeded with no exceptions thrown " + e); } - //TESTING_UTIL.waitUntilAllRegionsAssigned(tableName); + // TESTING_UTIL.waitUntilAllRegionsAssigned(tableName); List newRegions; do { newRegions = cluster.getRegions(tableName); - for (HRegion h : newRegions) LOG.debug("NEWREGION " + h.getRegionInfo()); + for (HRegion h : newRegions) + LOG.debug("NEWREGION " + h.getRegionInfo()); Thread.sleep(1000); } while ((newRegions.contains(oldRegions.get(0)) || newRegions.contains(oldRegions.get(1))) || newRegions.size() != 4); @@ -864,8 +855,7 @@ public void testSplitRegionWithNoStoreFiles() throws Exception { List regions = cluster.getRegions(tableName); RegionInfo hri = getAndCheckSingleTableRegion(regions); ensureTableRegionNotOnSameServerAsMeta(admin, hri); - int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo() - .getRegionName()); + int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); // Turn off balancer so it doesn't cut in and mess up our placements. this.admin.balancerSwitch(false, true); @@ -878,11 +868,10 @@ public void testSplitRegionWithNoStoreFiles() throws Exception { HBaseFsck.debugLsr(conf, new Path("/")); Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = TESTING_UTIL.getDFSCluster().getFileSystem(); - Map storefiles = - FSUtils.getTableStoreFilePathMap(null, fs, rootDir, tableName); + Map storefiles = FSUtils.getTableStoreFilePathMap(null, fs, rootDir, tableName); assertEquals("Expected nothing but found " + storefiles.toString(), 0, storefiles.size()); - // find a splittable region. Refresh the regions list + // find a splittable region. Refresh the regions list regions = cluster.getRegions(tableName); final HRegion region = findSplittableRegion(regions); assertTrue("not able to find a splittable region", region != null); @@ -904,7 +893,7 @@ public void testSplitRegionWithNoStoreFiles() throws Exception { Map storefilesAfter = FSUtils.getTableStoreFilePathMap(null, fs, rootDir, tableName); assertEquals("Expected nothing but found " + storefilesAfter.toString(), 0, - storefilesAfter.size()); + storefilesAfter.size()); hri = region.getRegionInfo(); // split parent AssignmentManager am = cluster.getMaster().getAssignmentManager(); @@ -944,21 +933,20 @@ public void testSplitRegionWithNoStoreFiles() throws Exception { } @Test - public void testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck() - throws Exception { + public void testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); try { byte[] cf = Bytes.toBytes("f"); byte[] cf1 = Bytes.toBytes("i_f"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)) - .setRegionSplitPolicyClassName(CustomSplitPolicy.class.getName()).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)) + .setRegionSplitPolicyClassName(CustomSplitPolicy.class.getName()).build(); admin.createTable(htd); List regions = awaitTableRegions(tableName); HRegion region = regions.get(0); - for(int i = 3;i<9;i++) { - Put p = new Put(Bytes.toBytes("row"+i)); + for (int i = 3; i < 9; i++) { + Put p = new Put(Bytes.toBytes("row" + i)); p.addColumn(cf, Bytes.toBytes("q"), Bytes.toBytes("value" + i)); p.addColumn(cf1, Bytes.toBytes("q"), Bytes.toBytes("value" + i)); region.put(p); @@ -968,13 +956,11 @@ public void testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck() Collection storefiles = store.getStorefiles(); assertEquals(1, storefiles.size()); assertFalse(region.hasReferences()); - Path referencePath = - region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "f", - storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy()); + Path referencePath = region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "f", + storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy()); assertNull(referencePath); - referencePath = - region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "i_f", - storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy()); + referencePath = region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "i_f", + storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy()); assertNotNull(referencePath); } finally { TESTING_UTIL.deleteTable(tableName); @@ -983,9 +969,9 @@ public void testStoreFileReferenceCreationWhenSplitPolicySaysToSkipRangeCheck() private HRegion findSplittableRegion(final List regions) throws InterruptedException { for (int i = 0; i < 5; ++i) { - for (HRegion r: regions) { + for (HRegion r : regions) { if (r.isSplittable() && r.getRegionInfo().getReplicaId() == 0) { - return(r); + return (r); } } Thread.sleep(100); @@ -1020,43 +1006,40 @@ private HMaster abortAndWaitForMaster() throws IOException, InterruptedException } /** - * Ensure single table region is not on same server as the single hbase:meta table - * region. + * Ensure single table region is not on same server as the single hbase:meta table region. * @return Index of the server hosting the single table region * @throws UnknownRegionException * @throws MasterNotRunningException * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException * @throws InterruptedException */ - private int ensureTableRegionNotOnSameServerAsMeta(final Admin admin, - final RegionInfo hri) - throws IOException, MasterNotRunningException, - ZooKeeperConnectionException, InterruptedException { + private int ensureTableRegionNotOnSameServerAsMeta(final Admin admin, final RegionInfo hri) + throws IOException, MasterNotRunningException, ZooKeeperConnectionException, + InterruptedException { // Now make sure that the table region is not on same server as that hosting - // hbase:meta We don't want hbase:meta replay polluting our test when we later crash + // hbase:meta We don't want hbase:meta replay polluting our test when we later crash // the table region serving server. int metaServerIndex = cluster.getServerWithMeta(); HRegionServer metaRegionServer = cluster.getRegionServer(metaServerIndex); int tableRegionIndex = cluster.getServerWith(hri.getRegionName()); assertTrue(tableRegionIndex != -1); HRegionServer tableRegionServer = cluster.getRegionServer(tableRegionIndex); - LOG.info("MetaRegionServer=" + metaRegionServer.getServerName() + - ", other=" + tableRegionServer.getServerName()); + LOG.info("MetaRegionServer=" + metaRegionServer.getServerName() + ", other=" + + tableRegionServer.getServerName()); if (metaRegionServer.getServerName().equals(tableRegionServer.getServerName())) { HRegionServer hrs = getOtherRegionServer(cluster, metaRegionServer); assertNotNull(hrs); assertNotNull(hri); - LOG.info("Moving " + hri.getRegionNameAsString() + " from " + - metaRegionServer.getServerName() + " to " + - hrs.getServerName() + "; metaServerIndex=" + metaServerIndex); + LOG.info("Moving " + hri.getRegionNameAsString() + " from " + metaRegionServer.getServerName() + + " to " + hrs.getServerName() + "; metaServerIndex=" + metaServerIndex); admin.move(hri.getEncodedNameAsBytes(), hrs.getServerName()); } // Wait till table region is up on the server that is NOT carrying hbase:meta. for (int i = 0; i < 100; i++) { tableRegionIndex = cluster.getServerWith(hri.getRegionName()); if (tableRegionIndex != -1 && tableRegionIndex != metaServerIndex) break; - LOG.debug("Waiting on region move off the hbase:meta server; current index " + - tableRegionIndex + " and metaServerIndex=" + metaServerIndex); + LOG.debug("Waiting on region move off the hbase:meta server; current index " + + tableRegionIndex + " and metaServerIndex=" + metaServerIndex); Thread.sleep(100); } assertTrue("Region not moved off hbase:meta server, tableRegionIndex=" + tableRegionIndex, @@ -1069,17 +1052,15 @@ private int ensureTableRegionNotOnSameServerAsMeta(final Admin admin, } /** - * Find regionserver other than the one passed. - * Can't rely on indexes into list of regionservers since crashed servers - * occupy an index. + * Find regionserver other than the one passed. Can't rely on indexes into list of regionservers + * since crashed servers occupy an index. * @param cluster * @param notThisOne - * @return A regionserver that is not notThisOne or null if none - * found + * @return A regionserver that is not notThisOne or null if none found */ private HRegionServer getOtherRegionServer(final SingleProcessHBaseCluster cluster, final HRegionServer notThisOne) { - for (RegionServerThread rst: cluster.getRegionServerThreads()) { + for (RegionServerThread rst : cluster.getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); if (hrs.getServerName().equals(notThisOne.getServerName())) continue; if (hrs.isStopping() || hrs.isStopped()) continue; @@ -1088,25 +1069,24 @@ private HRegionServer getOtherRegionServer(final SingleProcessHBaseCluster clust return null; } - private void printOutRegions(final HRegionServer hrs, final String prefix) - throws IOException { + private void printOutRegions(final HRegionServer hrs, final String prefix) throws IOException { List regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices()); - for (RegionInfo region: regions) { + for (RegionInfo region : regions) { LOG.info(prefix + region.getRegionNameAsString()); } } private void waitUntilRegionServerDead() throws InterruptedException, IOException { // Wait until the master processes the RS shutdown - for (int i=0; (cluster.getMaster().getClusterMetrics() - .getLiveServerMetrics().size() > NB_SERVERS - || cluster.getLiveRegionServerThreads().size() > NB_SERVERS) && i<100; i++) { + for (int i = + 0; (cluster.getMaster().getClusterMetrics().getLiveServerMetrics().size() > NB_SERVERS + || cluster.getLiveRegionServerThreads().size() > NB_SERVERS) && i < 100; i++) { LOG.info("Waiting on server to go down"); Thread.sleep(100); } assertFalse("Waited too long for RS to die", - cluster.getMaster().getClusterMetrics(). getLiveServerMetrics().size() > NB_SERVERS - || cluster.getLiveRegionServerThreads().size() > NB_SERVERS); + cluster.getMaster().getClusterMetrics().getLiveServerMetrics().size() > NB_SERVERS + || cluster.getLiveRegionServerThreads().size() > NB_SERVERS); } private void awaitDaughters(TableName tableName, int numDaughters) throws InterruptedException { @@ -1130,12 +1110,11 @@ private List awaitTableRegions(final TableName tableName) throws Interr return regions; } - private Table createTableAndWait(TableName tableName, byte[] cf) throws IOException, - InterruptedException { + private Table createTableAndWait(TableName tableName, byte[] cf) + throws IOException, InterruptedException { Table t = TESTING_UTIL.createTable(tableName, cf); awaitTableRegions(tableName); - assertTrue("Table not online: " + tableName, - cluster.getRegions(tableName).size() != 0); + assertTrue("Table not online: " + tableName, cluster.getRegions(tableName).size() != 0); return t; } @@ -1155,6 +1134,7 @@ static class MyMasterRpcServices extends MasterRpcServices { static AtomicBoolean enabled = new AtomicBoolean(false); private HMaster myMaster; + public MyMasterRpcServices(HMaster master) throws IOException { super(master); myMaster = master; @@ -1164,17 +1144,15 @@ public MyMasterRpcServices(HMaster master) throws IOException { public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c, ReportRegionStateTransitionRequest req) throws ServiceException { ReportRegionStateTransitionResponse resp = super.reportRegionStateTransition(c, req); - if (enabled.get() && req.getTransition(0).getTransitionCode().equals( - TransitionCode.READY_TO_SPLIT) && !resp.hasErrorMessage()) { + if (enabled.get() + && req.getTransition(0).getTransitionCode().equals(TransitionCode.READY_TO_SPLIT) + && !resp.hasErrorMessage()) { RegionStates regionStates = myMaster.getAssignmentManager().getRegionStates(); - for (RegionStateNode regionState: - regionStates.getRegionsInTransition()) { - /* TODO!!!! - // Find the merging_new region and remove it - if (regionState.isSplittingNew()) { - regionStates.deleteRegion(regionState.getRegion()); - } - */ + for (RegionStateNode regionState : regionStates.getRegionsInTransition()) { + /* + * TODO!!!! // Find the merging_new region and remove it if (regionState.isSplittingNew()) + * { regionStates.deleteRegion(regionState.getRegion()); } + */ } } return resp; @@ -1190,7 +1168,7 @@ protected boolean shouldSplit() { @Override public boolean skipStoreFileRangeCheck(String familyName) { - if(familyName.startsWith("i_")) { + if (familyName.startsWith("i_")) { return true; } else { return false; @@ -1198,4 +1176,3 @@ public boolean skipStoreFileRangeCheck(String familyName) { } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java index 550b6d4ac096..4c6759243f3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWalDataLoss.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,8 +72,8 @@ public class TestSplitWalDataLoss { private final HBaseTestingUtil testUtil = new HBaseTestingUtil(); - private NamespaceDescriptor namespace = NamespaceDescriptor.create(getClass().getSimpleName()) - .build(); + private NamespaceDescriptor namespace = + NamespaceDescriptor.create(getClass().getSimpleName()).build(); private TableName tableName = TableName.valueOf(namespace.getName(), "dataloss"); @@ -116,8 +116,8 @@ public FlushResult answer(InvocationOnMock invocation) throws Throwable { reported.wait(); } } - rs.getWAL(region.getRegionInfo()).abortCacheFlush( - region.getRegionInfo().getEncodedNameAsBytes()); + rs.getWAL(region.getRegionInfo()) + .abortCacheFlush(region.getRegionInfo().getEncodedNameAsBytes()); throw new DroppedSnapshotException("testcase"); } }).when(spiedRegion).internalFlushCacheAndCommit(Matchers. any(), @@ -125,7 +125,7 @@ Matchers. any(), Matchers. any(), Matchers.> any()); // Find region key; don't pick up key for hbase:meta by mistake. String key = null; - for (Map.Entry entry: rs.getOnlineRegions().entrySet()) { + for (Map.Entry entry : rs.getOnlineRegions().entrySet()) { if (entry.getValue().getRegionInfo().getTable().equals(this.tableName)) { key = entry.getKey(); break; @@ -135,8 +135,7 @@ Matchers. any(), Matchers. any(), Connection conn = testUtil.getConnection(); try (Table table = conn.getTable(tableName)) { - table.put(new Put(Bytes.toBytes("row0")) - .addColumn(family, qualifier, Bytes.toBytes("val0"))); + table.put(new Put(Bytes.toBytes("row0")).addColumn(family, qualifier, Bytes.toBytes("val0"))); } long oldestSeqIdOfStore = region.getOldestSeqIdOfStore(family); LOG.info("CHANGE OLDEST " + oldestSeqIdOfStore); @@ -148,8 +147,7 @@ Matchers. any(), Matchers. any(), } } try (Table table = conn.getTable(tableName)) { - table.put(new Put(Bytes.toBytes("row1")) - .addColumn(family, qualifier, Bytes.toBytes("val1"))); + table.put(new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, Bytes.toBytes("val1"))); } long now = EnvironmentEdgeManager.currentTime(); rs.tryRegionServerReport(now - 500, now); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWithBlockingFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWithBlockingFiles.java index cf5ea97c9cf7..a974e0e1cc85 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWithBlockingFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitWithBlockingFiles.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; + import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -50,14 +51,15 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; -@Category({ MediumTests.class}) +@Category({ MediumTests.class }) public class TestSplitWithBlockingFiles { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSplitWithBlockingFiles.class); + HBaseClassTestRule.forClass(TestSplitWithBlockingFiles.class); private static final Logger LOG = LoggerFactory.getLogger(TestSplitWithBlockingFiles.class); @@ -67,7 +69,6 @@ public class TestSplitWithBlockingFiles { private static byte[] CF = Bytes.toBytes("cf"); private static Table TABLE; - @BeforeClass public static void setupCluster() throws Exception { UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 8 * 2 * 10240L); @@ -78,8 +79,8 @@ public static void setupCluster() throws Exception { UTIL.startMiniCluster(1); ADMIN = UTIL.getAdmin(); TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(CF).setBlocksize(1000).build()).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF).setBlocksize(1000).build()) + .build(); TABLE = UTIL.createTable(td, null); UTIL.waitTableAvailable(TABLE_NAME); } @@ -117,16 +118,16 @@ public void testSplitIgnoreBlockingFiles() throws Exception { assertNotNull(regions.get(0).getSplitPolicy().getSplitPoint()); assertTrue(regions.get(0).getCompactPriority() >= PRIORITY_USER); assertTrue(UTIL.getMiniHBaseCluster().getRegionServer(0).getCompactSplitThread() - .requestSplit(regions.get(0))); + .requestSplit(regions.get(0))); // split region ADMIN.splitSwitch(true, true); MasterProcedureEnv env = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); final ProcedureExecutor executor = - UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); + UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); SplitTableRegionProcedure splitProcedure = - new SplitTableRegionProcedure(env, regions.get(0).getRegionInfo(), Bytes.toBytes("row5")); + new SplitTableRegionProcedure(env, regions.get(0).getRegionInfo(), Bytes.toBytes("row5")); executor.submitProcedure(splitProcedure); ProcedureTestingUtility.waitProcedure(executor, splitProcedure.getProcId()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java index 6c76553a02a8..861c5d2cbd96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java @@ -23,7 +23,6 @@ import java.io.FileNotFoundException; import java.io.IOException; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -42,7 +41,7 @@ /** * Test HStoreFile */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestStoreFileInfo { @ClassRule @@ -57,23 +56,23 @@ public class TestStoreFileInfo { @Test public void testStoreFileNames() { String[] legalHFileLink = { "MyTable_02=abc012-def345", "MyTable_02.300=abc012-def345", - "MyTable_02-400=abc012-def345", "MyTable_02-400.200=abc012-def345", - "MyTable_02=abc012-def345_SeqId_1_", "MyTable_02=abc012-def345_SeqId_20_" }; - for (String name: legalHFileLink) { + "MyTable_02-400=abc012-def345", "MyTable_02-400.200=abc012-def345", + "MyTable_02=abc012-def345_SeqId_1_", "MyTable_02=abc012-def345_SeqId_20_" }; + for (String name : legalHFileLink) { assertTrue("should be a valid link: " + name, HFileLink.isHFileLink(name)); assertTrue("should be a valid StoreFile" + name, StoreFileInfo.validateStoreFileName(name)); assertFalse("should not be a valid reference: " + name, StoreFileInfo.isReference(name)); String refName = name + ".6789"; assertTrue("should be a valid link reference: " + refName, - StoreFileInfo.isReference(refName)); + StoreFileInfo.isReference(refName)); assertTrue("should be a valid StoreFile" + refName, - StoreFileInfo.validateStoreFileName(refName)); + StoreFileInfo.validateStoreFileName(refName)); } String[] illegalHFileLink = { ".MyTable_02=abc012-def345", "-MyTable_02.300=abc012-def345", - "MyTable_02-400=abc0_12-def345", "MyTable_02-400.200=abc012-def345...." }; - for (String name: illegalHFileLink) { + "MyTable_02-400=abc0_12-def345", "MyTable_02-400.200=abc012-def345...." }; + for (String name : illegalHFileLink) { assertFalse("should not be a valid link: " + name, HFileLink.isHFileLink(name)); } } @@ -85,14 +84,14 @@ public void testEqualsWithLink() throws IOException { Path mob = new Path("/mob"); Path archive = new Path("/archive"); HFileLink link1 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"), - new Path(mob, "f1"), new Path(archive, "f1")); + new Path(mob, "f1"), new Path(archive, "f1")); HFileLink link2 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"), - new Path(mob, "f1"), new Path(archive, "f1")); + new Path(mob, "f1"), new Path(archive, "f1")); - StoreFileInfo info1 = new StoreFileInfo(TEST_UTIL.getConfiguration(), - TEST_UTIL.getTestFileSystem(), null, link1); - StoreFileInfo info2 = new StoreFileInfo(TEST_UTIL.getConfiguration(), - TEST_UTIL.getTestFileSystem(), null, link2); + StoreFileInfo info1 = + new StoreFileInfo(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(), null, link1); + StoreFileInfo info2 = + new StoreFileInfo(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(), null, link2); assertEquals(info1, info2); assertEquals(info1.hashCode(), info2.hashCode()); @@ -119,7 +118,7 @@ public void testOpenErrorMessageHFileLink() throws IOException, IllegalStateExce public void testOpenErrorMessageReference() throws IOException { // Test file link exception // Try to open nonsense hfilelink. Make sure exception is from HFileLink. - Path p = new Path(TEST_UTIL.getDataTestDirOnTestFS(),"4567.abcd"); + Path p = new Path(TEST_UTIL.getDataTestDirOnTestFS(), "4567.abcd"); FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); fs.mkdirs(p.getParent()); Reference r = Reference.createBottomReference(HConstants.EMPTY_START_ROW); @@ -134,4 +133,3 @@ public void testOpenErrorMessageReference() throws IOException { } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java index 2fab050446ec..90e09ef80719 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestStoreFileRefresherChore { @ClassRule @@ -117,11 +117,9 @@ private HRegion initHRegion(TableDescriptor htd, byte[] startKey, byte[] stopKey final Configuration walConf = new Configuration(conf); CommonFSUtils.setRootDir(walConf, tableDir); final WALFactory wals = new WALFactory(walConf, "log_" + replicaId); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - HRegion region = - new HRegion(fs, wals.getWAL(info), - conf, htd, null); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null); region.initialize(); @@ -174,10 +172,12 @@ private void verifyData(Region newReg, int startRow, int numRows, byte[] qf, byt static class StaleStorefileRefresherChore extends StorefileRefresherChore { boolean isStale = false; + public StaleStorefileRefresherChore(int period, HRegionServer regionServer, Stoppable stoppable) { super(period, false, regionServer, stoppable); } + @Override protected boolean isRegionStale(String encodedName, long time) { return isStale; @@ -187,7 +187,7 @@ protected boolean isRegionStale(String encodedName, long time) { @Test public void testIsStale() throws IOException { int period = 0; - byte[][] families = new byte[][] {Bytes.toBytes("cf")}; + byte[][] families = new byte[][] { Bytes.toBytes("cf") }; byte[] qf = Bytes.toBytes("cq"); HRegionServer regionServer = mock(HRegionServer.class); @@ -201,8 +201,8 @@ public void testIsStale() throws IOException { regions.add(primary); regions.add(replica1); - StaleStorefileRefresherChore chore = new StaleStorefileRefresherChore(period, regionServer, - new StoppableImplementation()); + StaleStorefileRefresherChore chore = + new StaleStorefileRefresherChore(period, regionServer, new StoppableImplementation()); // write some data to primary and flush putData(primary, 0, 100, qf, families); @@ -214,7 +214,7 @@ public void testIsStale() throws IOException { verifyData(replica1, 0, 100, qf, families); // simulate an fs failure where we cannot refresh the store files for the replica - ((FailingHRegionFileSystem)replica1.getRegionFileSystem()).fail = true; + ((FailingHRegionFileSystem) replica1.getRegionFileSystem()).fail = true; // write some more data to primary and flush putData(primary, 100, 100, qf, families); @@ -227,11 +227,11 @@ public void testIsStale() throws IOException { verifyDataExpectFail(replica1, 100, 100, qf, families); chore.isStale = true; - chore.chore(); //now after this, we cannot read back any value + chore.chore(); // now after this, we cannot read back any value try { verifyData(replica1, 0, 100, qf, families); fail("should have failed with IOException"); - } catch(IOException ex) { + } catch (IOException ex) { // expected } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java index 4e85c2619c0c..5aa8d36c528e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestStoreFileScannerWithTagCompression { @ClassRule @@ -60,8 +60,8 @@ public class TestStoreFileScannerWithTagCompression { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Configuration conf = TEST_UTIL.getConfiguration(); private static CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); - private static String ROOT_DIR = TEST_UTIL.getDataTestDir( - "TestStoreFileScannerWithTagCompression").toString(); + private static String ROOT_DIR = + TEST_UTIL.getDataTestDir("TestStoreFileScannerWithTagCompression").toString(); private static FileSystem fs = null; @BeforeClass @@ -97,8 +97,8 @@ public void testReseek() throws Exception { kv = s.next(); kv = s.next(); byte[] key5 = Bytes.toBytes("k5"); - assertTrue(Bytes.equals(key5, 0, key5.length, kv.getRowArray(), kv.getRowOffset(), - kv.getRowLength())); + assertTrue( + Bytes.equals(key5, 0, key5.length, kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); List tags = PrivateCellUtil.getTags(kv); assertEquals(1, tags.size()); assertEquals("tag3", Bytes.toString(Tag.cloneValue(tags.get(0)))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index d12342f64a0f..627aa69a5493 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,6 @@ import java.util.NavigableSet; import java.util.TreeSet; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; @@ -71,7 +70,7 @@ import org.slf4j.LoggerFactory; // Can't be small as it plays with EnvironmentEdgeManager -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestStoreScanner { @ClassRule @@ -79,7 +78,8 @@ public class TestStoreScanner { HBaseClassTestRule.forClass(TestStoreScanner.class); private static final Logger LOG = LoggerFactory.getLogger(TestStoreScanner.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static final String CF_STR = "cf"; private static final byte[] CF = Bytes.toBytes(CF_STR); static Configuration CONF = HBaseConfiguration.create(); @@ -88,20 +88,19 @@ public class TestStoreScanner { KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); /** - * From here on down, we have a bunch of defines and specific CELL_GRID of Cells. The - * CELL_GRID then has a Scanner that can fake out 'block' transitions. All this elaborate - * setup is for tests that ensure we don't overread, and that the {@link StoreScanner} is not - * overly enthusiastic. + * From here on down, we have a bunch of defines and specific CELL_GRID of Cells. The CELL_GRID + * then has a Scanner that can fake out 'block' transitions. All this elaborate setup is for tests + * that ensure we don't overread, and that the {@link StoreScanner} is not overly enthusiastic. */ - private static final byte[] ZERO = new byte[] {'0'}; - private static final byte[] ZERO_POINT_ZERO = new byte[] {'0', '.', '0'}; - private static final byte[] ONE = new byte[] {'1'}; - private static final byte[] TWO = new byte[] {'2'}; - private static final byte[] TWO_POINT_TWO = new byte[] {'2', '.', '2'}; - private static final byte[] THREE = new byte[] {'3'}; - private static final byte[] FOUR = new byte[] {'4'}; - private static final byte[] FIVE = new byte[] {'5'}; - private static final byte[] VALUE = new byte[] {'v'}; + private static final byte[] ZERO = new byte[] { '0' }; + private static final byte[] ZERO_POINT_ZERO = new byte[] { '0', '.', '0' }; + private static final byte[] ONE = new byte[] { '1' }; + private static final byte[] TWO = new byte[] { '2' }; + private static final byte[] TWO_POINT_TWO = new byte[] { '2', '.', '2' }; + private static final byte[] THREE = new byte[] { '3' }; + private static final byte[] FOUR = new byte[] { '4' }; + private static final byte[] FIVE = new byte[] { '5' }; + private static final byte[] VALUE = new byte[] { 'v' }; private static final int CELL_GRID_BLOCK2_BOUNDARY = 4; private static final int CELL_GRID_BLOCK3_BOUNDARY = 11; private static final int CELL_GRID_BLOCK4_BOUNDARY = 15; @@ -109,80 +108,79 @@ public class TestStoreScanner { /** * Five rows by four columns distinguished by column qualifier (column qualifier is one of the - * four rows... ONE, TWO, etc.). Exceptions are a weird row after TWO; it is TWO_POINT_TWO. - * And then row FOUR has five columns finishing w/ row FIVE having a single column. - * We will use this to test scan does the right thing as it - * we do Gets, StoreScanner#optimize, and what we do on (faked) block boundaries. + * four rows... ONE, TWO, etc.). Exceptions are a weird row after TWO; it is TWO_POINT_TWO. And + * then row FOUR has five columns finishing w/ row FIVE having a single column. We will use this + * to test scan does the right thing as it we do Gets, StoreScanner#optimize, and what we do on + * (faked) block boundaries. */ - private static final Cell[] CELL_GRID = new Cell [] { - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE) - .setFamily(CF).setQualifier(ONE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE) - .setFamily(CF).setQualifier(TWO).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE) - .setFamily(CF).setQualifier(THREE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE) - .setFamily(CF).setQualifier(FOUR).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - // Offset 4 CELL_GRID_BLOCK2_BOUNDARY - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO) - .setFamily(CF).setQualifier(ONE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO) - .setFamily(CF).setQualifier(TWO).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO) - .setFamily(CF).setQualifier(THREE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO) - .setFamily(CF).setQualifier(FOUR).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO_POINT_TWO) - .setFamily(CF).setQualifier(ZERO).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO_POINT_TWO) - .setFamily(CF).setQualifier(ZERO_POINT_ZERO).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO_POINT_TWO) - .setFamily(CF).setQualifier(FIVE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - // Offset 11! CELL_GRID_BLOCK3_BOUNDARY - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE) - .setFamily(CF).setQualifier(ONE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE) - .setFamily(CF).setQualifier(TWO).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE) - .setFamily(CF).setQualifier(THREE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE) - .setFamily(CF).setQualifier(FOUR).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - // Offset 15 CELL_GRID_BLOCK4_BOUNDARY - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR) - .setFamily(CF).setQualifier(ONE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR) - .setFamily(CF).setQualifier(TWO).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR) - .setFamily(CF).setQualifier(THREE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR) - .setFamily(CF).setQualifier(FOUR).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - // Offset 19 CELL_GRID_BLOCK5_BOUNDARY - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR) - .setFamily(CF).setQualifier(FIVE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FIVE) - .setFamily(CF).setQualifier(ZERO).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - }; + private static final Cell[] CELL_GRID = new Cell[] { + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(THREE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(FOUR).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + // Offset 4 CELL_GRID_BLOCK2_BOUNDARY + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(THREE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(FOUR).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO_POINT_TWO) + .setFamily(CF).setQualifier(ZERO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()) + .setValue(VALUE).build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO_POINT_TWO) + .setFamily(CF).setQualifier(ZERO_POINT_ZERO).setTimestamp(1L) + .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO_POINT_TWO) + .setFamily(CF).setQualifier(FIVE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()) + .setValue(VALUE).build(), + // Offset 11! CELL_GRID_BLOCK3_BOUNDARY + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE).setFamily(CF) + .setQualifier(THREE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(THREE).setFamily(CF) + .setQualifier(FOUR).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + // Offset 15 CELL_GRID_BLOCK4_BOUNDARY + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR).setFamily(CF) + .setQualifier(THREE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR).setFamily(CF) + .setQualifier(FOUR).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + // Offset 19 CELL_GRID_BLOCK5_BOUNDARY + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FOUR).setFamily(CF) + .setQualifier(FIVE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(FIVE).setFamily(CF) + .setQualifier(ZERO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), }; private static class KeyValueHeapWithCount extends KeyValueHeap { @@ -202,8 +200,8 @@ public Cell peek() { } /** - * A StoreScanner for our CELL_GRID above. Fakes the block transitions. Does counts of - * calls to optimize and counts of when optimize actually did an optimize. + * A StoreScanner for our CELL_GRID above. Fakes the block transitions. Does counts of calls to + * optimize and counts of when optimize actually did an optimize. */ private static class CellGridStoreScanner extends StoreScanner { // Count of how often optimize is called and of how often it does an optimize. @@ -211,13 +209,14 @@ private static class CellGridStoreScanner extends StoreScanner { final AtomicInteger optimization = new AtomicInteger(0); CellGridStoreScanner(final Scan scan, ScanInfo scanInfo) throws IOException { - super(scan, scanInfo, scan.getFamilyMap().get(CF), Arrays. asList( - new KeyValueScanner[] {new KeyValueScanFixture(CellComparator.getInstance(), CELL_GRID)})); + super(scan, scanInfo, scan.getFamilyMap().get(CF), + Arrays. asList(new KeyValueScanner[] { + new KeyValueScanFixture(CellComparator.getInstance(), CELL_GRID) })); } @Override - protected void resetKVHeap(List scanners, - CellComparator comparator) throws IOException { + protected void resetKVHeap(List scanners, CellComparator comparator) + throws IOException { if (count == null) { count = new AtomicInteger(0); } @@ -255,39 +254,38 @@ protected boolean trySkipToNextColumn(Cell cell) throws IOException { @Override public Cell getNextIndexedKey() { // Fake block boundaries by having index of next block change as we go through scan. - return count.get() > CELL_GRID_BLOCK4_BOUNDARY? - PrivateCellUtil.createFirstOnRow(CELL_GRID[CELL_GRID_BLOCK5_BOUNDARY]): - count.get() > CELL_GRID_BLOCK3_BOUNDARY? - PrivateCellUtil.createFirstOnRow(CELL_GRID[CELL_GRID_BLOCK4_BOUNDARY]): - count.get() > CELL_GRID_BLOCK2_BOUNDARY? - PrivateCellUtil.createFirstOnRow(CELL_GRID[CELL_GRID_BLOCK3_BOUNDARY]): - PrivateCellUtil.createFirstOnRow(CELL_GRID[CELL_GRID_BLOCK2_BOUNDARY]); + return count.get() > CELL_GRID_BLOCK4_BOUNDARY + ? PrivateCellUtil.createFirstOnRow(CELL_GRID[CELL_GRID_BLOCK5_BOUNDARY]) + : count.get() > CELL_GRID_BLOCK3_BOUNDARY + ? PrivateCellUtil.createFirstOnRow(CELL_GRID[CELL_GRID_BLOCK4_BOUNDARY]) + : count.get() > CELL_GRID_BLOCK2_BOUNDARY + ? PrivateCellUtil.createFirstOnRow(CELL_GRID[CELL_GRID_BLOCK3_BOUNDARY]) + : PrivateCellUtil.createFirstOnRow(CELL_GRID[CELL_GRID_BLOCK2_BOUNDARY]); } } private static final int CELL_WITH_VERSIONS_BLOCK2_BOUNDARY = 4; - private static final Cell[] CELL_WITH_VERSIONS = new Cell [] { - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE) - .setFamily(CF).setQualifier(ONE).setTimestamp(2L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE) - .setFamily(CF).setQualifier(ONE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE) - .setFamily(CF).setQualifier(TWO).setTimestamp(2L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE) - .setFamily(CF).setQualifier(TWO).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - // Offset 4 CELL_WITH_VERSIONS_BLOCK2_BOUNDARY - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO) - .setFamily(CF).setQualifier(ONE).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO) - .setFamily(CF).setQualifier(TWO).setTimestamp(1L) - .setType(KeyValue.Type.Put.getCode()).setValue(VALUE).build(), - }; + private static final Cell[] CELL_WITH_VERSIONS = new Cell[] { + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(ONE).setTimestamp(2L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(TWO).setTimestamp(2L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(ONE).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + // Offset 4 CELL_WITH_VERSIONS_BLOCK2_BOUNDARY + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(ONE).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(TWO).setFamily(CF) + .setQualifier(TWO).setTimestamp(1L).setType(KeyValue.Type.Put.getCode()).setValue(VALUE) + .build(), }; private static class CellWithVersionsStoreScanner extends StoreScanner { // Count of how often optimize is called and of how often it does an optimize. @@ -296,7 +294,7 @@ private static class CellWithVersionsStoreScanner extends StoreScanner { CellWithVersionsStoreScanner(final Scan scan, ScanInfo scanInfo) throws IOException { super(scan, scanInfo, scan.getFamilyMap().get(CF), Arrays. asList(new KeyValueScanner[] { - new KeyValueScanFixture(CellComparator.getInstance(), CELL_WITH_VERSIONS) })); + new KeyValueScanFixture(CellComparator.getInstance(), CELL_WITH_VERSIONS) })); } @Override @@ -324,8 +322,8 @@ private static class CellWithVersionsNoOptimizeStoreScanner extends StoreScanner CellWithVersionsNoOptimizeStoreScanner(Scan scan, ScanInfo scanInfo) throws IOException { super(scan, scanInfo, scan.getFamilyMap().get(CF), - Arrays. asList(new KeyValueScanner[] { - new KeyValueScanFixture(CellComparator.getInstance(), CELL_WITH_VERSIONS) })); + Arrays. asList(new KeyValueScanner[] { + new KeyValueScanFixture(CellComparator.getInstance(), CELL_WITH_VERSIONS) })); } @Override @@ -384,7 +382,7 @@ public void testWithColumnCountGetFilter() throws Exception { * @param strCols * @return */ - NavigableSet getCols(String ...strCols) { + NavigableSet getCols(String... strCols) { NavigableSet cols = new TreeSet<>(Bytes.BYTES_COMPARATOR); for (String col : strCols) { byte[] bytes = Bytes.toBytes(col); @@ -454,9 +452,9 @@ public void testOptimize() throws IOException { // Should be four results of column 1 (though there are 5 rows in the CELL_GRID -- the // TWO_POINT_TWO row does not have a a column ONE. assertEquals(4, results.size()); - for (Cell cell: results) { - assertTrue(Bytes.equals(ONE, 0, ONE.length, - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); + for (Cell cell : results) { + assertTrue(Bytes.equals(ONE, 0, ONE.length, cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength())); } assertTrue("Optimize should do some optimizations", scanner.optimization.get() > 0); } @@ -464,9 +462,9 @@ public void testOptimize() throws IOException { /** * Ensure the optimize Scan method in StoreScanner does not get in the way of a Get doing minimum - * work... seeking to start of block and then SKIPPING until we find the wanted Cell. - * This 'simple' scenario mimics case of all Cells fitting inside a single HFileBlock. - * See HBASE-15392. This test is a little cryptic. Takes a bit of staring to figure what it up to. + * work... seeking to start of block and then SKIPPING until we find the wanted Cell. This + * 'simple' scenario mimics case of all Cells fitting inside a single HFileBlock. See HBASE-15392. + * This test is a little cryptic. Takes a bit of staring to figure what it up to. */ @Test public void testOptimizeAndGet() throws IOException { @@ -485,18 +483,17 @@ public void testOptimizeAndGet() throws IOException { // And we should have gone through optimize twice only. assertEquals("First qcode is SEEK_NEXT_COL and second INCLUDE_AND_SEEK_NEXT_ROW", 3, scanner.count.get()); - assertEquals("Memstore Read count should be", 1, - scanner.memstoreOnlyReads); + assertEquals("Memstore Read count should be", 1, scanner.memstoreOnlyReads); } } /** - * Ensure that optimize does not cause the Get to do more seeking than required. Optimize - * (see HBASE-15392) was causing us to seek all Cells in a block when a Get Scan if the next block - * index/start key was a different row to the current one. A bug. We'd call next too often - * because we had to exhaust all Cells in the current row making us load the next block just to - * discard what we read there. This test is a little cryptic. Takes a bit of staring to figure - * what it up to. + * Ensure that optimize does not cause the Get to do more seeking than required. Optimize (see + * HBASE-15392) was causing us to seek all Cells in a block when a Get Scan if the next block + * index/start key was a different row to the current one. A bug. We'd call next too often because + * we had to exhaust all Cells in the current row making us load the next block just to discard + * what we read there. This test is a little cryptic. Takes a bit of staring to figure what it up + * to. */ @Test public void testOptimizeAndGetWithFakedNextBlockIndexStart() throws IOException { @@ -521,15 +518,13 @@ public void testOptimizeAndGetWithFakedNextBlockIndexStart() throws IOException public void testScanTimeRange() throws IOException { String r1 = "R1"; // returns only 1 of these 2 even though same timestamp - KeyValue [] kvs = new KeyValue[] { - create(r1, CF_STR, "a", 1, KeyValue.Type.Put, "dont-care"), + KeyValue[] kvs = new KeyValue[] { create(r1, CF_STR, "a", 1, KeyValue.Type.Put, "dont-care"), create(r1, CF_STR, "a", 2, KeyValue.Type.Put, "dont-care"), create(r1, CF_STR, "a", 3, KeyValue.Type.Put, "dont-care"), create(r1, CF_STR, "a", 4, KeyValue.Type.Put, "dont-care"), - create(r1, CF_STR, "a", 5, KeyValue.Type.Put, "dont-care"), - }; - List scanners = Arrays.asList( - new KeyValueScanner[] {new KeyValueScanFixture(CellComparator.getInstance(), kvs)}); + create(r1, CF_STR, "a", 5, KeyValue.Type.Put, "dont-care"), }; + List scanners = Arrays. asList( + new KeyValueScanner[] { new KeyValueScanFixture(CellComparator.getInstance(), kvs) }); Scan scanSpec = new Scan().withStartRow(Bytes.toBytes(r1)); scanSpec.setTimeRange(0, 6); scanSpec.readAllVersions(); @@ -573,12 +568,10 @@ public void testScanTimeRange() throws IOException { @Test public void testScanSameTimestamp() throws IOException { // returns only 1 of these 2 even though same timestamp - KeyValue [] kvs = new KeyValue[] { - create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), - }; + KeyValue[] kvs = new KeyValue[] { create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), }; List scanners = Arrays.asList( - new KeyValueScanner[] {new KeyValueScanFixture(CellComparator.getInstance(), kvs)}); + new KeyValueScanner[] { new KeyValueScanFixture(CellComparator.getInstance(), kvs) }); Scan scanSpec = new Scan().withStartRow(Bytes.toBytes("R1")); // this only uses maxVersions (default=1) and TimeRange (default=all) @@ -613,20 +606,17 @@ public void testNonUserScan() throws IOException { } /* - * Test test shows exactly how the matcher's return codes confuses the StoreScanner - * and prevent it from doing the right thing. Seeking once, then nexting twice - * should return R1, then R2, but in this case it doesnt. - * TODO this comment makes no sense above. Appears to do the right thing. + * Test test shows exactly how the matcher's return codes confuses the StoreScanner and prevent it + * from doing the right thing. Seeking once, then nexting twice should return R1, then R2, but in + * this case it doesnt. TODO this comment makes no sense above. Appears to do the right thing. * @throws IOException */ @Test public void testWontNextToNext() throws IOException { // build the scan file: - KeyValue [] kvs = new KeyValue[] { - create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), + KeyValue[] kvs = new KeyValue[] { create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), - create("R2", "cf", "a", 1, KeyValue.Type.Put, "dont-care") - }; + create("R2", "cf", "a", 1, KeyValue.Type.Put, "dont-care") }; List scanners = scanFixture(kvs); Scan scanSpec = new Scan().withStartRow(Bytes.toBytes("R1")); @@ -649,13 +639,10 @@ public void testWontNextToNext() throws IOException { } } - @Test public void testDeleteVersionSameTimestamp() throws IOException { - KeyValue [] kvs = new KeyValue [] { - create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care"), - }; + KeyValue[] kvs = new KeyValue[] { create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care"), }; List scanners = scanFixture(kvs); Scan scanSpec = new Scan().withStartRow(Bytes.toBytes("R1")); try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, getCols("a"), scanners)) { @@ -666,16 +653,14 @@ public void testDeleteVersionSameTimestamp() throws IOException { } /* - * Test the case where there is a delete row 'in front of' the next row, the scanner - * will move to the next row. + * Test the case where there is a delete row 'in front of' the next row, the scanner will move to + * the next row. */ @Test public void testDeletedRowThenGoodRow() throws IOException { - KeyValue [] kvs = new KeyValue [] { - create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), + KeyValue[] kvs = new KeyValue[] { create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care"), - create("R2", "cf", "a", 20, KeyValue.Type.Put, "dont-care") - }; + create("R2", "cf", "a", 20, KeyValue.Type.Put, "dont-care") }; List scanners = scanFixture(kvs); Scan scanSpec = new Scan().withStartRow(Bytes.toBytes("R1")); try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, getCols("a"), scanners)) { @@ -694,15 +679,12 @@ public void testDeletedRowThenGoodRow() throws IOException { @Test public void testDeleteVersionMaskingMultiplePuts() throws IOException { long now = EnvironmentEdgeManager.currentTime(); - KeyValue [] kvs1 = new KeyValue[] { - create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", now, KeyValue.Type.Delete, "dont-care") - }; - KeyValue [] kvs2 = new KeyValue[] { - create("R1", "cf", "a", now-500, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", now-100, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care") - }; + KeyValue[] kvs1 = new KeyValue[] { create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "a", now, KeyValue.Type.Delete, "dont-care") }; + KeyValue[] kvs2 = + new KeyValue[] { create("R1", "cf", "a", now - 500, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "a", now - 100, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care") }; List scanners = scanFixture(kvs1, kvs2); try (StoreScanner scan = new StoreScanner(new Scan().withStartRow(Bytes.toBytes("R1")), @@ -720,16 +702,13 @@ scanInfo, getCols("a"), scanners)) { @Test public void testDeleteVersionsMixedAndMultipleVersionReturn() throws IOException { long now = EnvironmentEdgeManager.currentTime(); - KeyValue [] kvs1 = new KeyValue[] { - create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", now, KeyValue.Type.Delete, "dont-care") - }; - KeyValue [] kvs2 = new KeyValue[] { - create("R1", "cf", "a", now-500, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", now+500, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"), - create("R2", "cf", "z", now, KeyValue.Type.Put, "dont-care") - }; + KeyValue[] kvs1 = new KeyValue[] { create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "a", now, KeyValue.Type.Delete, "dont-care") }; + KeyValue[] kvs2 = + new KeyValue[] { create("R1", "cf", "a", now - 500, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "a", now + 500, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"), + create("R2", "cf", "z", now, KeyValue.Type.Put, "dont-care") }; List scanners = scanFixture(kvs1, kvs2); Scan scanSpec = new Scan().withStartRow(Bytes.toBytes("R1")).readVersions(2); @@ -744,11 +723,9 @@ public void testDeleteVersionsMixedAndMultipleVersionReturn() throws IOException @Test public void testWildCardOneVersionScan() throws IOException { - KeyValue [] kvs = new KeyValue [] { - create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), + KeyValue[] kvs = new KeyValue[] { create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), create("R1", "cf", "b", 1, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", 1, KeyValue.Type.DeleteColumn, "dont-care"), - }; + create("R1", "cf", "a", 1, KeyValue.Type.DeleteColumn, "dont-care"), }; List scanners = scanFixture(kvs); try (StoreScanner scan = new StoreScanner(new Scan().withStartRow(Bytes.toBytes("R1")), scanInfo, null, scanners)) { @@ -762,8 +739,7 @@ public void testWildCardOneVersionScan() throws IOException { @Test public void testWildCardScannerUnderDeletes() throws IOException { - KeyValue [] kvs = new KeyValue [] { - create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), // inc + KeyValue[] kvs = new KeyValue[] { create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), // inc // orphaned delete column. create("R1", "cf", "a", 1, KeyValue.Type.DeleteColumn, "dont-care"), // column b @@ -772,12 +748,12 @@ public void testWildCardScannerUnderDeletes() throws IOException { // column c create("R1", "cf", "c", 10, KeyValue.Type.Delete, "dont-care"), create("R1", "cf", "c", 10, KeyValue.Type.Put, "dont-care"), // no - create("R1", "cf", "c", 9, KeyValue.Type.Put, "dont-care"), // inc + create("R1", "cf", "c", 9, KeyValue.Type.Put, "dont-care"), // inc // column d create("R1", "cf", "d", 11, KeyValue.Type.Put, "dont-care"), // inc create("R1", "cf", "d", 10, KeyValue.Type.DeleteColumn, "dont-care"), - create("R1", "cf", "d", 9, KeyValue.Type.Put, "dont-care"), // no - create("R1", "cf", "d", 8, KeyValue.Type.Put, "dont-care"), // no + create("R1", "cf", "d", 9, KeyValue.Type.Put, "dont-care"), // no + create("R1", "cf", "d", 8, KeyValue.Type.Put, "dont-care"), // no }; List scanners = scanFixture(kvs); @@ -796,20 +772,19 @@ public void testWildCardScannerUnderDeletes() throws IOException { @Test public void testDeleteFamily() throws IOException { - KeyValue[] kvs = new KeyValue[] { - create("R1", "cf", "a", 100, KeyValue.Type.DeleteFamily, "dont-care"), - create("R1", "cf", "b", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "c", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "d", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "e", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "e", 11, KeyValue.Type.DeleteColumn, "dont-care"), - create("R1", "cf", "f", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "g", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "g", 11, KeyValue.Type.Delete, "dont-care"), - create("R1", "cf", "h", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "i", 11, KeyValue.Type.Put, "dont-care"), - create("R2", "cf", "a", 11, KeyValue.Type.Put, "dont-care"), - }; + KeyValue[] kvs = + new KeyValue[] { create("R1", "cf", "a", 100, KeyValue.Type.DeleteFamily, "dont-care"), + create("R1", "cf", "b", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "c", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "d", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "e", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "e", 11, KeyValue.Type.DeleteColumn, "dont-care"), + create("R1", "cf", "f", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "g", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "g", 11, KeyValue.Type.Delete, "dont-care"), + create("R1", "cf", "h", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "i", 11, KeyValue.Type.Put, "dont-care"), + create("R2", "cf", "a", 11, KeyValue.Type.Put, "dont-care"), }; List scanners = scanFixture(kvs); try (StoreScanner scan = new StoreScanner(new Scan().readAllVersions(), scanInfo, null, scanners)) { @@ -826,12 +801,11 @@ public void testDeleteFamily() throws IOException { @Test public void testDeleteColumn() throws IOException { - KeyValue [] kvs = new KeyValue[] { - create("R1", "cf", "a", 10, KeyValue.Type.DeleteColumn, "dont-care"), - create("R1", "cf", "a", 9, KeyValue.Type.Delete, "dont-care"), - create("R1", "cf", "a", 8, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "b", 5, KeyValue.Type.Put, "dont-care") - }; + KeyValue[] kvs = + new KeyValue[] { create("R1", "cf", "a", 10, KeyValue.Type.DeleteColumn, "dont-care"), + create("R1", "cf", "a", 9, KeyValue.Type.Delete, "dont-care"), + create("R1", "cf", "a", 8, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "b", 5, KeyValue.Type.Put, "dont-care") }; List scanners = scanFixture(kvs); try (StoreScanner scan = new StoreScanner(new Scan(), scanInfo, null, scanners)) { List results = new ArrayList<>(); @@ -841,18 +815,17 @@ public void testDeleteColumn() throws IOException { } } - private static final KeyValue[] kvs = new KeyValue[] { - create("R1", "cf", "a", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "b", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "c", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "d", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "e", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "f", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "g", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "h", 11, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "i", 11, KeyValue.Type.Put, "dont-care"), - create("R2", "cf", "a", 11, KeyValue.Type.Put, "dont-care"), - }; + private static final KeyValue[] kvs = + new KeyValue[] { create("R1", "cf", "a", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "b", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "c", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "d", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "e", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "f", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "g", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "h", 11, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "i", 11, KeyValue.Type.Put, "dont-care"), + create("R2", "cf", "a", 11, KeyValue.Type.Put, "dont-care"), }; @Test public void testSkipColumn() throws IOException { @@ -875,22 +848,21 @@ public void testSkipColumn() throws IOException { } /* - * Test expiration of KeyValues in combination with a configured TTL for - * a column family (as should be triggered in a major compaction). + * Test expiration of KeyValues in combination with a configured TTL for a column family (as + * should be triggered in a major compaction). */ @Test public void testWildCardTtlScan() throws IOException { long now = EnvironmentEdgeManager.currentTime(); - KeyValue [] kvs = new KeyValue[] { - create("R1", "cf", "a", now-1000, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "b", now-10, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "c", now-200, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "d", now-10000, KeyValue.Type.Put, "dont-care"), - create("R2", "cf", "a", now, KeyValue.Type.Put, "dont-care"), - create("R2", "cf", "b", now-10, KeyValue.Type.Put, "dont-care"), - create("R2", "cf", "c", now-200, KeyValue.Type.Put, "dont-care"), - create("R2", "cf", "c", now-1000, KeyValue.Type.Put, "dont-care") - }; + KeyValue[] kvs = + new KeyValue[] { create("R1", "cf", "a", now - 1000, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "b", now - 10, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "c", now - 200, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "d", now - 10000, KeyValue.Type.Put, "dont-care"), + create("R2", "cf", "a", now, KeyValue.Type.Put, "dont-care"), + create("R2", "cf", "b", now - 10, KeyValue.Type.Put, "dont-care"), + create("R2", "cf", "c", now - 200, KeyValue.Type.Put, "dont-care"), + create("R2", "cf", "c", now - 1000, KeyValue.Type.Put, "dont-care") }; List scanners = scanFixture(kvs); Scan scan = new Scan(); scan.readVersions(1); @@ -928,12 +900,11 @@ public void testScannerReseekDoesntNPE() throws Exception { } } - @Test @Ignore("this fails, since we don't handle deletions, etc, in peek") + @Test + @Ignore("this fails, since we don't handle deletions, etc, in peek") public void testPeek() throws Exception { - KeyValue[] kvs = new KeyValue [] { - create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care"), - }; + KeyValue[] kvs = new KeyValue[] { create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care"), }; List scanners = scanFixture(kvs); Scan scanSpec = new Scan().withStartRow(Bytes.toBytes("R1")); try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, getCols("a"), scanners)) { @@ -948,11 +919,9 @@ public void testPeek() throws Exception { public void testExpiredDeleteFamily() throws Exception { long now = EnvironmentEdgeManager.currentTime(); KeyValue[] kvs = new KeyValue[] { - new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now-1000, - KeyValue.Type.DeleteFamily), - create("R1", "cf", "a", now-10, KeyValue.Type.Put, - "dont-care"), - }; + new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now - 1000, + KeyValue.Type.DeleteFamily), + create("R1", "cf", "a", now - 10, KeyValue.Type.Put, "dont-care"), }; List scanners = scanFixture(kvs); Scan scan = new Scan(); scan.readVersions(1); @@ -980,48 +949,31 @@ public long currentTime() { return now; } }); - KeyValue[] kvs = new KeyValue[]{ - /*0*/ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, - now - 100, KeyValue.Type.DeleteFamily), // live - /*1*/ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, - now - 1000, KeyValue.Type.DeleteFamily), // expired - /*2*/ create("R1", "cf", "a", now - 50, - KeyValue.Type.Put, "v3"), // live - /*3*/ create("R1", "cf", "a", now - 55, - KeyValue.Type.Delete, "dontcare"), // live - /*4*/ create("R1", "cf", "a", now - 55, - KeyValue.Type.Put, "deleted-version v2"), // deleted - /*5*/ create("R1", "cf", "a", now - 60, - KeyValue.Type.Put, "v1"), // live - /*6*/ create("R1", "cf", "a", now - 65, - KeyValue.Type.Put, "v0"), // max-version reached - /*7*/ create("R1", "cf", "a", - now - 100, KeyValue.Type.DeleteColumn, "dont-care"), // max-version - /*8*/ create("R1", "cf", "b", now - 600, - KeyValue.Type.DeleteColumn, "dont-care"), //expired - /*9*/ create("R1", "cf", "b", now - 70, - KeyValue.Type.Put, "v2"), //live - /*10*/ create("R1", "cf", "b", now - 750, - KeyValue.Type.Put, "v1"), //expired - /*11*/ create("R1", "cf", "c", now - 500, - KeyValue.Type.Delete, "dontcare"), //expired - /*12*/ create("R1", "cf", "c", now - 600, - KeyValue.Type.Put, "v1"), //expired - /*13*/ create("R1", "cf", "c", now - 1000, - KeyValue.Type.Delete, "dontcare"), //expired - /*14*/ create("R1", "cf", "d", now - 60, - KeyValue.Type.Put, "expired put"), //live - /*15*/ create("R1", "cf", "d", now - 100, - KeyValue.Type.Delete, "not-expired delete"), //live + KeyValue[] kvs = new KeyValue[] { + /* 0 */ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now - 100, + KeyValue.Type.DeleteFamily), // live + /* 1 */ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now - 1000, + KeyValue.Type.DeleteFamily), // expired + /* 2 */ create("R1", "cf", "a", now - 50, KeyValue.Type.Put, "v3"), // live + /* 3 */ create("R1", "cf", "a", now - 55, KeyValue.Type.Delete, "dontcare"), // live + /* 4 */ create("R1", "cf", "a", now - 55, KeyValue.Type.Put, "deleted-version v2"), // deleted + /* 5 */ create("R1", "cf", "a", now - 60, KeyValue.Type.Put, "v1"), // live + /* 6 */ create("R1", "cf", "a", now - 65, KeyValue.Type.Put, "v0"), // max-version reached + /* 7 */ create("R1", "cf", "a", now - 100, KeyValue.Type.DeleteColumn, "dont-care"), // max-version + /* 8 */ create("R1", "cf", "b", now - 600, KeyValue.Type.DeleteColumn, "dont-care"), // expired + /* 9 */ create("R1", "cf", "b", now - 70, KeyValue.Type.Put, "v2"), // live + /* 10 */ create("R1", "cf", "b", now - 750, KeyValue.Type.Put, "v1"), // expired + /* 11 */ create("R1", "cf", "c", now - 500, KeyValue.Type.Delete, "dontcare"), // expired + /* 12 */ create("R1", "cf", "c", now - 600, KeyValue.Type.Put, "v1"), // expired + /* 13 */ create("R1", "cf", "c", now - 1000, KeyValue.Type.Delete, "dontcare"), // expired + /* 14 */ create("R1", "cf", "d", now - 60, KeyValue.Type.Put, "expired put"), // live + /* 15 */ create("R1", "cf", "d", now - 100, KeyValue.Type.Delete, "not-expired delete"), // live }; List scanners = scanFixture(kvs); - ScanInfo scanInfo = new ScanInfo(CONF, Bytes.toBytes("cf"), - 0 /* minVersions */, - 2 /* maxVersions */, 500 /* ttl */, - KeepDeletedCells.FALSE /* keepDeletedCells */, - HConstants.DEFAULT_BLOCKSIZE /* block size */, - 200, /* timeToPurgeDeletes */ - CellComparator.getInstance(), false); + ScanInfo scanInfo = new ScanInfo(CONF, Bytes.toBytes("cf"), 0 /* minVersions */, + 2 /* maxVersions */, 500 /* ttl */, KeepDeletedCells.FALSE /* keepDeletedCells */, + HConstants.DEFAULT_BLOCKSIZE /* block size */, 200, /* timeToPurgeDeletes */ + CellComparator.getInstance(), false); try (StoreScanner scanner = new StoreScanner(scanInfo, 2, ScanType.COMPACT_DROP_DELETES, scanners)) { List results = new ArrayList<>(); @@ -1045,32 +997,27 @@ public long currentTime() { public void testPreadNotEnabledForCompactionStoreScanners() throws Exception { long now = EnvironmentEdgeManager.currentTime(); KeyValue[] kvs = new KeyValue[] { - new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now - 1000, - KeyValue.Type.DeleteFamily), - create("R1", "cf", "a", now - 10, KeyValue.Type.Put, "dont-care"), }; + new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now - 1000, + KeyValue.Type.DeleteFamily), + create("R1", "cf", "a", now - 10, KeyValue.Type.Put, "dont-care"), }; List scanners = scanFixture(kvs); ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); - try (StoreScanner storeScanner = new StoreScanner(scanInfo, -1, - ScanType.COMPACT_RETAIN_DELETES, scanners)) { + try (StoreScanner storeScanner = + new StoreScanner(scanInfo, -1, ScanType.COMPACT_RETAIN_DELETES, scanners)) { assertFalse(storeScanner.isScanUsePread()); } } @Test public void testReadVersionWithRawAndFilter() throws IOException { - ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, Long.MAX_VALUE, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0 - , CellComparator.getInstance(), false); - KeyValue [] kvs = new KeyValue[] { - create("R1", "cf", "a", 3, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), - create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care") - }; + ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, Long.MAX_VALUE, KeepDeletedCells.FALSE, + HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); + KeyValue[] kvs = new KeyValue[] { create("R1", "cf", "a", 3, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), + create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care") }; List scanners = Arrays.asList( - new KeyValueScanner[]{ - new KeyValueScanFixture(CellComparator.getInstance(), kvs) - }); + new KeyValueScanner[] { new KeyValueScanFixture(CellComparator.getInstance(), kvs) }); BinaryComparator comp = new BinaryComparator(Bytes.toBytes("a")); Filter filter = new QualifierFilter(CompareOperator.EQUAL, comp); @@ -1106,9 +1053,8 @@ public void close() { } } - ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, Long.MAX_VALUE, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0 - , CellComparator.getInstance(), false); + ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, Long.MAX_VALUE, KeepDeletedCells.FALSE, + HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); InternalScan scan = new InternalScan(new Scan()); scan.checkOnlyMemStore(); MyCollectionBackedScanner fileScanner = new MyCollectionBackedScanner(true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java index 3b893a87cb32..c6195338f5ee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,10 +63,11 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + /** * This test tests whether parallel {@link StoreScanner#close()} and - * {@link StoreScanner#updateReaders(List, List)} works perfectly ensuring - * that there are no references on the existing Storescanner readers. + * {@link StoreScanner#updateReaders(List, List)} works perfectly ensuring that there are no + * references on the existing Storescanner readers. */ @Category({ RegionServerTests.class, SmallTests.class }) public class TestStoreScannerClosure { @@ -108,11 +109,11 @@ public static void setUp() throws Exception { fs = TEST_UTIL.getTestFileSystem(); TableName tableName = TableName.valueOf("test"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); Path path = TEST_UTIL.getDataTestDir("test"); - region = HBaseTestingUtil.createRegionAndWAL(info, path, - TEST_UTIL.getConfiguration(), tableDescriptor); + region = HBaseTestingUtil.createRegionAndWAL(info, path, TEST_UTIL.getConfiguration(), + tableDescriptor); } @Test @@ -130,7 +131,7 @@ public void testScannerCloseAndUpdateReadersWithMemstoreScanner() throws Excepti HStore store = region.getStore(fam); // use the lock to manually get a new memstore scanner. this is what // HStore#notifyChangedReadersObservers does under the lock.(lock is not needed here - //since it is just a testcase). + // since it is just a testcase). store.getStoreEngine().readLock(); final List memScanners = store.memstore.getScanners(Long.MAX_VALUE); store.getStoreEngine().readUnlock(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java index 80012dfcd461..404234beed86 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestStripeStoreEngine { @ClassRule @@ -80,10 +80,8 @@ public void testCompactionContextForceSelect() throws Exception { StripeCompactor mockCompactor = mock(StripeCompactor.class); se.setCompactorOverride(mockCompactor); when( - mockCompactor.compact(any(), anyInt(), anyLong(), any(), - any(), any(), any(), - any(), any())) - .thenReturn(new ArrayList<>()); + mockCompactor.compact(any(), anyInt(), anyLong(), any(), any(), any(), any(), any(), any())) + .thenReturn(new ArrayList<>()); // Produce 3 L0 files. HStoreFile sf = createFile(); @@ -109,8 +107,7 @@ public void testCompactionContextForceSelect() throws Exception { private static HStoreFile createFile() throws Exception { HStoreFile sf = mock(HStoreFile.class); - when(sf.getMetadataValue(any())) - .thenReturn(StripeStoreFileManager.INVALID_KEY); + when(sf.getMetadataValue(any())).thenReturn(StripeStoreFileManager.INVALID_KEY); when(sf.getReader()).thenReturn(mock(StoreFileReader.class)); when(sf.getPath()).thenReturn(new Path("moo")); when(sf.getBulkLoadTimestamp()).thenReturn(OptionalLong.empty()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java index 63db911f8b5d..2c5b76eb7504 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestStripeStoreFileManager { @ClassRule @@ -60,8 +60,8 @@ public class TestStripeStoreFileManager { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Path BASEDIR = TEST_UTIL.getDataTestDir(TestStripeStoreFileManager.class.getSimpleName()); - private static final Path CFDIR = HRegionFileSystem.getStoreHomedir(BASEDIR, "region", - Bytes.toBytes("cf")); + private static final Path CFDIR = + HRegionFileSystem.getStoreHomedir(BASEDIR, "region", Bytes.toBytes("cf")); private static final byte[] KEY_A = Bytes.toBytes("aaa"); private static final byte[] KEY_B = Bytes.toBytes("aab"); @@ -100,8 +100,8 @@ public void testInsertFilesIntoL0() throws Exception { assertTrue(filesForGet.contains(sf)); // Add some stripes and make sure we get this file for every stripe. - manager.addCompactionResults(al(), al(createFile(OPEN_KEY, KEY_B), - createFile(KEY_B, OPEN_KEY))); + manager.addCompactionResults(al(), + al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, OPEN_KEY))); assertTrue(manager.getFilesForScan(KEY_A, true, KEY_A, true).contains(sf)); assertTrue(manager.getFilesForScan(KEY_C, true, KEY_C, true).contains(sf)); } @@ -111,8 +111,8 @@ public void testClearFiles() throws Exception { StripeStoreFileManager manager = createManager(); manager.insertNewFiles(al(createFile())); manager.insertNewFiles(al(createFile())); - manager.addCompactionResults(al(), al(createFile(OPEN_KEY, KEY_B), - createFile(KEY_B, OPEN_KEY))); + manager.addCompactionResults(al(), + al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, OPEN_KEY))); assertEquals(4, manager.getStorefileCount()); Collection allFiles = manager.clearFiles(); assertEquals(4, allFiles.size()); @@ -206,35 +206,34 @@ public void testGetSplitPointEdgeCases() throws Exception { @Test public void testGetStripeBoundarySplits() throws Exception { /* First number - split must be after this stripe; further numbers - stripes */ - verifySplitPointScenario(5, false, 0f, 2, 1, 1, 1, 1, 1, 10); - verifySplitPointScenario(0, false, 0f, 6, 3, 1, 1, 2); - verifySplitPointScenario(2, false, 0f, 1, 1, 1, 1, 2); - verifySplitPointScenario(0, false, 0f, 5, 4); - verifySplitPointScenario(2, false, 0f, 5, 2, 5, 5, 5); + verifySplitPointScenario(5, false, 0f, 2, 1, 1, 1, 1, 1, 10); + verifySplitPointScenario(0, false, 0f, 6, 3, 1, 1, 2); + verifySplitPointScenario(2, false, 0f, 1, 1, 1, 1, 2); + verifySplitPointScenario(0, false, 0f, 5, 4); + verifySplitPointScenario(2, false, 0f, 5, 2, 5, 5, 5); } @Test public void testGetUnbalancedSplits() throws Exception { /* First number - split must be inside/after this stripe; further numbers - stripes */ - verifySplitPointScenario(0, false, 2.1f, 4, 4, 4); // 8/4 is less than 2.1f - verifySplitPointScenario(1, true, 1.5f, 4, 4, 4); // 8/4 > 6/6 - verifySplitPointScenario(1, false, 1.1f, 3, 4, 1, 1, 2, 2); // 7/6 < 8/5 - verifySplitPointScenario(1, false, 1.1f, 3, 6, 1, 1, 2, 2); // 9/6 == 9/6 - verifySplitPointScenario(1, true, 1.1f, 3, 8, 1, 1, 2, 2); // 11/6 > 10/7 - verifySplitPointScenario(3, false, 1.1f, 2, 2, 1, 1, 4, 3); // reverse order - verifySplitPointScenario(4, true, 1.1f, 2, 2, 1, 1, 8, 3); // reverse order - verifySplitPointScenario(0, true, 1.5f, 10, 4); // 10/4 > 9/5 - verifySplitPointScenario(0, false, 1.4f, 6, 4); // 6/4 == 6/4 - verifySplitPointScenario(1, true, 1.5f, 4, 10); // reverse just in case - verifySplitPointScenario(0, false, 1.4f, 4, 6); // reverse just in case + verifySplitPointScenario(0, false, 2.1f, 4, 4, 4); // 8/4 is less than 2.1f + verifySplitPointScenario(1, true, 1.5f, 4, 4, 4); // 8/4 > 6/6 + verifySplitPointScenario(1, false, 1.1f, 3, 4, 1, 1, 2, 2); // 7/6 < 8/5 + verifySplitPointScenario(1, false, 1.1f, 3, 6, 1, 1, 2, 2); // 9/6 == 9/6 + verifySplitPointScenario(1, true, 1.1f, 3, 8, 1, 1, 2, 2); // 11/6 > 10/7 + verifySplitPointScenario(3, false, 1.1f, 2, 2, 1, 1, 4, 3); // reverse order + verifySplitPointScenario(4, true, 1.1f, 2, 2, 1, 1, 8, 3); // reverse order + verifySplitPointScenario(0, true, 1.5f, 10, 4); // 10/4 > 9/5 + verifySplitPointScenario(0, false, 1.4f, 6, 4); // 6/4 == 6/4 + verifySplitPointScenario(1, true, 1.5f, 4, 10); // reverse just in case + verifySplitPointScenario(0, false, 1.4f, 4, 6); // reverse just in case } - /** * Verifies scenario for finding a split point. * @param splitPointAfter Stripe to expect the split point at/after. * @param shouldSplitStripe If true, the split point is expected in the middle of the above - * stripe; if false, should be at the end. + * stripe; if false, should be at the end. * @param splitRatioToVerify Maximum split imbalance ratio. * @param sizes Stripe sizes. */ @@ -274,9 +273,9 @@ public void testGetFilesForGetAndScan() throws Exception { // Populate one L0 file. MockHStoreFile sf0 = createFile(); manager.insertNewFiles(al(sf0)); - verifyGetAndScanScenario(manager, null, null, sf0); - verifyGetAndScanScenario(manager, null, KEY_C, sf0); - verifyGetAndScanScenario(manager, KEY_B, null, sf0); + verifyGetAndScanScenario(manager, null, null, sf0); + verifyGetAndScanScenario(manager, null, KEY_C, sf0); + verifyGetAndScanScenario(manager, KEY_B, null, sf0); verifyGetAndScanScenario(manager, KEY_B, KEY_C, sf0); // Populate a bunch of files for stripes, keep L0. @@ -287,14 +286,14 @@ public void testGetFilesForGetAndScan() throws Exception { MockHStoreFile sfE = createFile(KEY_D, OPEN_KEY); manager.addCompactionResults(al(), al(sfA, sfB, sfC, sfD, sfE)); - verifyGetAndScanScenario(manager, null, null, sf0, sfA, sfB, sfC, sfD, sfE); - verifyGetAndScanScenario(manager, keyAfter(KEY_A), null, sf0, sfB, sfC, sfD, sfE); - verifyGetAndScanScenario(manager, null, keyAfter(KEY_C), sf0, sfA, sfB, sfC, sfD); - verifyGetAndScanScenario(manager, KEY_B, null, sf0, sfC, sfD, sfE); - verifyGetAndScanScenario(manager, null, KEY_C, sf0, sfA, sfB, sfC, sfD); - verifyGetAndScanScenario(manager, KEY_B, keyAfter(KEY_B), sf0, sfC); - verifyGetAndScanScenario(manager, keyAfter(KEY_A), KEY_B, sf0, sfB, sfC); - verifyGetAndScanScenario(manager, KEY_D, KEY_D, sf0, sfE); + verifyGetAndScanScenario(manager, null, null, sf0, sfA, sfB, sfC, sfD, sfE); + verifyGetAndScanScenario(manager, keyAfter(KEY_A), null, sf0, sfB, sfC, sfD, sfE); + verifyGetAndScanScenario(manager, null, keyAfter(KEY_C), sf0, sfA, sfB, sfC, sfD); + verifyGetAndScanScenario(manager, KEY_B, null, sf0, sfC, sfD, sfE); + verifyGetAndScanScenario(manager, null, KEY_C, sf0, sfA, sfB, sfC, sfD); + verifyGetAndScanScenario(manager, KEY_B, keyAfter(KEY_B), sf0, sfC); + verifyGetAndScanScenario(manager, keyAfter(KEY_A), KEY_B, sf0, sfB, sfC); + verifyGetAndScanScenario(manager, KEY_D, KEY_D, sf0, sfE); verifyGetAndScanScenario(manager, keyAfter(KEY_B), keyAfter(KEY_C), sf0, sfC, sfD); } @@ -310,11 +309,10 @@ public void testLoadFilesWithRecoverableBadFiles() throws Exception { // files that overlap valid stripes in various ways). Note that the 4th way to overlap the // stripes will cause the structure to be mostly scraped, and is tested separately. ArrayList validStripeFiles = al(createFile(OPEN_KEY, KEY_B), - createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY), - createFile(KEY_C, OPEN_KEY)); + createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY), createFile(KEY_C, OPEN_KEY)); ArrayList filesToGoToL0 = al(createFile(), createFile(null, KEY_A), - createFile(KEY_D, null), createFile(KEY_D, KEY_A), createFile(keyAfter(KEY_A), KEY_C), - createFile(OPEN_KEY, KEY_D), createFile(KEY_D, keyAfter(KEY_D))); + createFile(KEY_D, null), createFile(KEY_D, KEY_A), createFile(keyAfter(KEY_A), KEY_C), + createFile(OPEN_KEY, KEY_D), createFile(KEY_D, keyAfter(KEY_D))); ArrayList allFilesToGo = flattenLists(validStripeFiles, filesToGoToL0); Collections.shuffle(allFilesToGo); StripeStoreFileManager manager = createManager(allFilesToGo); @@ -330,9 +328,8 @@ public void testLoadFilesWithRecoverableBadFiles() throws Exception { public void testLoadFilesWithBadStripe() throws Exception { // Current "algorithm" will see the after-B key before C key, add it as valid stripe, // and then fail all other stripes. So everything would end up in L0. - ArrayList allFilesToGo = al(createFile(OPEN_KEY, KEY_B), - createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY), - createFile(KEY_B, keyAfter(KEY_B))); + ArrayList allFilesToGo = al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, KEY_C), + createFile(KEY_C, OPEN_KEY), createFile(KEY_B, keyAfter(KEY_B))); Collections.shuffle(allFilesToGo); StripeStoreFileManager manager = createManager(allFilesToGo); assertEquals(allFilesToGo.size(), manager.getLevel0Files().size()); @@ -342,7 +339,7 @@ public void testLoadFilesWithBadStripe() throws Exception { public void testLoadFilesWithGaps() throws Exception { // Stripes must not have gaps. If they do, everything goes to L0. StripeStoreFileManager manager = - createManager(al(createFile(OPEN_KEY, KEY_B), createFile(KEY_C, OPEN_KEY))); + createManager(al(createFile(OPEN_KEY, KEY_B), createFile(KEY_C, OPEN_KEY))); assertEquals(2, manager.getLevel0Files().size()); // Just one open stripe should be ok. manager = createManager(al(createFile(OPEN_KEY, OPEN_KEY))); @@ -379,12 +376,12 @@ public void testAddingCompactionResults() throws Exception { // Try compacting with invalid new branches (gaps, overlaps) - no effect. verifyInvalidCompactionScenario(manager, al(sf_L0_0a), al(createFile(OPEN_KEY, KEY_B))); - verifyInvalidCompactionScenario(manager, al(sf_L0_0a), al(createFile(OPEN_KEY, KEY_B), - createFile(KEY_C, OPEN_KEY))); - verifyInvalidCompactionScenario(manager, al(sf_L0_0a), al(createFile(OPEN_KEY, KEY_B), - createFile(KEY_B, OPEN_KEY), createFile(KEY_A, KEY_D))); - verifyInvalidCompactionScenario(manager, al(sf_L0_0a), al(createFile(OPEN_KEY, KEY_B), - createFile(KEY_A, KEY_B), createFile(KEY_B, OPEN_KEY))); + verifyInvalidCompactionScenario(manager, al(sf_L0_0a), + al(createFile(OPEN_KEY, KEY_B), createFile(KEY_C, OPEN_KEY))); + verifyInvalidCompactionScenario(manager, al(sf_L0_0a), + al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, OPEN_KEY), createFile(KEY_A, KEY_D))); + verifyInvalidCompactionScenario(manager, al(sf_L0_0a), + al(createFile(OPEN_KEY, KEY_B), createFile(KEY_A, KEY_B), createFile(KEY_B, OPEN_KEY))); HStoreFile sf_i2B_0 = createFile(OPEN_KEY, KEY_B); HStoreFile sf_B2C_0 = createFile(KEY_B, KEY_C); @@ -493,8 +490,8 @@ public void testEmptyResultsForStripes() throws Exception { HStoreFile sf0b = createFile(); manager.insertNewFiles(al(sf0a)); manager.insertNewFiles(al(sf0b)); - ArrayList compacted = al(createFile(OPEN_KEY, KEY_B), - createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY)); + ArrayList compacted = + al(createFile(OPEN_KEY, KEY_B), createFile(KEY_B, KEY_C), createFile(KEY_C, OPEN_KEY)); manager.addCompactionResults(al(sf0a), compacted); manager.removeCompactedFiles(al(sf0a)); // Next L0 compaction only produces file for the first and last stripe. @@ -508,21 +505,21 @@ public void testEmptyResultsForStripes() throws Exception { @Test public void testPriority() throws Exception { // Expected priority, file limit, stripe count, files per stripe, l0 files. - testPriorityScenario(5, 5, 0, 0, 0); - testPriorityScenario(2, 5, 0, 0, 3); - testPriorityScenario(4, 25, 5, 1, 0); // example case. - testPriorityScenario(3, 25, 5, 1, 1); // L0 files counts for all stripes. - testPriorityScenario(3, 25, 5, 2, 0); // file to each stripe - same as one L0 file. - testPriorityScenario(2, 25, 5, 4, 0); // 1 is priority user, so 2 is returned. - testPriorityScenario(2, 25, 5, 4, 4); // don't return higher than user unless over limit. - testPriorityScenario(2, 25, 5, 1, 10); // same. - testPriorityScenario(0, 25, 5, 4, 5); // at limit. - testPriorityScenario(-5, 25, 5, 6, 0); // over limit! - testPriorityScenario(-1, 25, 0, 0, 26); // over limit with just L0 - } - - private void testPriorityScenario(int expectedPriority, - int limit, int stripes, int filesInStripe, int l0Files) throws Exception { + testPriorityScenario(5, 5, 0, 0, 0); + testPriorityScenario(2, 5, 0, 0, 3); + testPriorityScenario(4, 25, 5, 1, 0); // example case. + testPriorityScenario(3, 25, 5, 1, 1); // L0 files counts for all stripes. + testPriorityScenario(3, 25, 5, 2, 0); // file to each stripe - same as one L0 file. + testPriorityScenario(2, 25, 5, 4, 0); // 1 is priority user, so 2 is returned. + testPriorityScenario(2, 25, 5, 4, 4); // don't return higher than user unless over limit. + testPriorityScenario(2, 25, 5, 1, 10); // same. + testPriorityScenario(0, 25, 5, 4, 5); // at limit. + testPriorityScenario(-5, 25, 5, 6, 0); // over limit! + testPriorityScenario(-1, 25, 0, 0, 26); // over limit with just L0 + } + + private void testPriorityScenario(int expectedPriority, int limit, int stripes, int filesInStripe, + int l0Files) throws Exception { final byte[][] keys = { KEY_A, KEY_B, KEY_C, KEY_D }; assertTrue(stripes <= keys.length + 1); Configuration conf = TEST_UTIL.getConfiguration(); @@ -534,8 +531,8 @@ private void testPriorityScenario(int expectedPriority, for (int i = 0; i < filesInStripe; ++i) { ArrayList stripe = new ArrayList<>(); for (int j = 0; j < stripes; ++j) { - stripe.add(createFile( - (j == 0) ? OPEN_KEY : keys[j - 1], (j == stripes - 1) ? OPEN_KEY : keys[j])); + stripe.add( + createFile((j == 0) ? OPEN_KEY : keys[j - 1], (j == stripes - 1) ? OPEN_KEY : keys[j])); } sfm.addCompactionResults(al(), stripe); } @@ -543,7 +540,7 @@ private void testPriorityScenario(int expectedPriority, } private void verifyInvalidCompactionScenario(StripeStoreFileManager manager, - ArrayList filesToCompact, ArrayList filesToInsert) throws Exception { + ArrayList filesToCompact, ArrayList filesToInsert) throws Exception { Collection allFiles = manager.getStorefiles(); assertThrows(IllegalStateException.class, () -> manager.addCompactionResults(filesToCompact, filesToInsert)); @@ -566,14 +563,14 @@ private void verifyGetOrScanScenario(StripeStoreFileManager manager, byte[] star } } - private void verifyAllFiles( - StripeStoreFileManager manager, Collection results) throws Exception { + private void verifyAllFiles(StripeStoreFileManager manager, Collection results) + throws Exception { verifyGetOrScanScenario(manager, null, null, results); } // TODO: replace with Mockito? - private static MockHStoreFile createFile( - long size, long seqNum, byte[] startKey, byte[] endKey) throws Exception { + private static MockHStoreFile createFile(long size, long seqNum, byte[] startKey, byte[] endKey) + throws Exception { FileSystem fs = TEST_UTIL.getTestFileSystem(); Path testFilePath = StoreFileWriter.getUniqueFile(fs, CFDIR); fs.create(testFilePath).close(); @@ -607,12 +604,12 @@ private static StripeStoreFileManager createManager(ArrayList sfs) t return createManager(sfs, TEST_UTIL.getConfiguration()); } - private static StripeStoreFileManager createManager( - ArrayList sfs, Configuration conf) throws Exception { - StripeStoreConfig config = new StripeStoreConfig( - conf, Mockito.mock(StoreConfigInformation.class)); - StripeStoreFileManager result = new StripeStoreFileManager(CellComparatorImpl.COMPARATOR, conf, - config); + private static StripeStoreFileManager createManager(ArrayList sfs, Configuration conf) + throws Exception { + StripeStoreConfig config = + new StripeStoreConfig(conf, Mockito.mock(StoreConfigInformation.class)); + StripeStoreFileManager result = + new StripeStoreFileManager(CellComparatorImpl.COMPARATOR, conf, config); result.loadFiles(sfs); return result; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java index d826397c6b7f..916b25889ca6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -52,7 +53,7 @@ public class TestSwitchToStreamRead { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSwitchToStreamRead.class); + HBaseClassTestRule.forClass(TestSwitchToStreamRead.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -76,18 +77,18 @@ public void setUp() throws IOException { VALUE_PREFIX = sb.append("-").toString(); REGION = UTIL.createLocalHRegion( TableDescriptorBuilder.newBuilder(TABLE_NAME) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBlocksize(1024).build()) - .build(), + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBlocksize(1024).build()) + .build(), null, null); for (int i = 0; i < 900; i++) { REGION - .put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(VALUE_PREFIX + i))); + .put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(VALUE_PREFIX + i))); } REGION.flush(true); for (int i = 900; i < 1000; i++) { REGION - .put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(VALUE_PREFIX + i))); + .put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(VALUE_PREFIX + i))); } } @@ -100,14 +101,12 @@ public void tearDown() throws IOException { @Test public void test() throws IOException { try (RegionScannerImpl scanner = REGION.getScanner(new Scan())) { - StoreScanner storeScanner = - (StoreScanner) scanner.storeHeap.getCurrentForTesting(); + StoreScanner storeScanner = (StoreScanner) scanner.storeHeap.getCurrentForTesting(); for (KeyValueScanner kvs : storeScanner.getAllScannersForTesting()) { if (kvs instanceof StoreFileScanner) { StoreFileScanner sfScanner = (StoreFileScanner) kvs; // starting from pread so we use shared reader here. - assertTrue(sfScanner.getReader().getReaderContext() - .getReaderType() == ReaderType.PREAD); + assertTrue(sfScanner.getReader().getReaderContext().getReaderType() == ReaderType.PREAD); } } List cells = new ArrayList<>(); @@ -122,8 +121,7 @@ public void test() throws IOException { if (kvs instanceof StoreFileScanner) { StoreFileScanner sfScanner = (StoreFileScanner) kvs; // we should have convert to use stream read now. - assertFalse(sfScanner.getReader().getReaderContext() - .getReaderType() == ReaderType.PREAD); + assertFalse(sfScanner.getReader().getReaderContext().getReaderType() == ReaderType.PREAD); } } for (int i = 500; i < 1000; i++) { @@ -155,8 +153,7 @@ private void testFilter(Filter filter) throws IOException { if (kvs instanceof StoreFileScanner) { StoreFileScanner sfScanner = (StoreFileScanner) kvs; // starting from pread so we use shared reader here. - assertTrue(sfScanner.getReader().getReaderContext() - .getReaderType() == ReaderType.PREAD); + assertTrue(sfScanner.getReader().getReaderContext().getReaderType() == ReaderType.PREAD); } } List cells = new ArrayList<>(); @@ -170,8 +167,7 @@ private void testFilter(Filter filter) throws IOException { if (kvs instanceof StoreFileScanner) { StoreFileScanner sfScanner = (StoreFileScanner) kvs; // we should have convert to use stream read now. - assertFalse(sfScanner.getReader().getReaderContext() - .getReaderType() == ReaderType.PREAD); + assertFalse(sfScanner.getReader().getReaderContext().getReaderType() == ReaderType.PREAD); } } assertFalse(scanner.next(cells, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java index d9f661abc32e..97c22fd93e8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertTrue; + import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -26,7 +27,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestSyncTimeRangeTracker extends TestSimpleTimeRangeTracker { @ClassRule @@ -47,16 +48,16 @@ protected TimeRangeTracker getTimeRangeTracker(long min, long max) { } /** - * Run a bunch of threads against a single TimeRangeTracker and ensure we arrive - * at right range. Here we do ten threads each incrementing over 100k at an offset - * of the thread index; max is 10 * 10k and min is 0. + * Run a bunch of threads against a single TimeRangeTracker and ensure we arrive at right range. + * Here we do ten threads each incrementing over 100k at an offset of the thread index; max is 10 + * * 10k and min is 0. */ @Test public void testArriveAtRightAnswer() throws InterruptedException { final TimeRangeTracker trr = getTimeRangeTracker(); final int threadCount = 10; final int calls = 1000 * 1000; - Thread [] threads = new Thread[threadCount]; + Thread[] threads = new Thread[threadCount]; for (int i = 0; i < threads.length; i++) { Thread t = new Thread("" + i) { @Override @@ -128,6 +129,7 @@ static class TrtUpdateRunnable implements Runnable { private TimeRangeTracker trt; private RandomTestData data; + public TrtUpdateRunnable(final TimeRangeTracker trt, final RandomTestData data) { this.trt = trt; this.data = data; @@ -142,15 +144,15 @@ public void run() { } /** - * Run a bunch of threads against a single TimeRangeTracker and ensure we arrive - * at right range. The data chosen is going to ensure that there are lots collisions, i.e, - * some other threads may already update the value while one tries to update min/max value. + * Run a bunch of threads against a single TimeRangeTracker and ensure we arrive at right range. + * The data chosen is going to ensure that there are lots collisions, i.e, some other threads may + * already update the value while one tries to update min/max value. */ @Test public void testConcurrentIncludeTimestampCorrectness() { RandomTestData[] testData = new RandomTestData[NUM_OF_THREADS]; long min = Long.MAX_VALUE, max = 0; - for (int i = 0; i < NUM_OF_THREADS; i ++) { + for (int i = 0; i < NUM_OF_THREADS; i++) { testData[i] = new RandomTestData(); if (testData[i].getMin() < min) { min = testData[i].getMin(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java index 26e294b7f4cc..521db44c112a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,12 +73,11 @@ /** * Class that test tags */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestTags { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTags.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestTags.class); static boolean useFilter = false; @@ -92,7 +91,7 @@ public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt("hfile.format.version", 3); conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - TestCoprocessorForTags.class.getName()); + TestCoprocessorForTags.class.getName()); TEST_UTIL.startMiniCluster(2); } @@ -120,10 +119,9 @@ public void testTags() throws Exception { byte[] row2 = Bytes.toBytes("rowc"); - TableDescriptor tableDescriptor = - TableDescriptorBuilder + TableDescriptor tableDescriptor = TableDescriptorBuilder .newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam) - .setBlockCacheEnabled(true).setDataBlockEncoding(DataBlockEncoding.NONE).build()) + .setBlockCacheEnabled(true).setDataBlockEncoding(DataBlockEncoding.NONE).build()) .build(); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(tableDescriptor); @@ -185,8 +183,7 @@ public void testFlushAndCompactionWithoutTags() throws Exception { byte[] row2 = Bytes.toBytes("rowc"); - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(tableName) + TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily( ColumnFamilyDescriptorBuilder.newBuilder(fam).setBlockCacheEnabled(true).build()) .build(); @@ -231,8 +228,7 @@ public void testFlushAndCompactionWithoutTags() throws Exception { assertEquals(0, current.getTagsLength()); } } finally { - if (scanner != null) - scanner.close(); + if (scanner != null) scanner.close(); } admin.compact(tableName); while (admin.getCompactionState(tableName) != CompactionState.NONE) { @@ -276,9 +272,9 @@ public void testFlushAndCompactionwithCombinations() throws Exception { Table table = null; for (DataBlockEncoding encoding : DataBlockEncoding.values()) { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam).setBlockCacheEnabled(true) - .setDataBlockEncoding(encoding).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam).setBlockCacheEnabled(true) + .setDataBlockEncoding(encoding).build()) + .build(); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(tableDescriptor); try { @@ -389,7 +385,7 @@ public void testTagsWithAppendAndIncrement() throws Exception { byte[] row2 = Bytes.toBytes("r2"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(f)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(f)).build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); Table table = null; @@ -427,7 +423,7 @@ public void testTagsWithAppendAndIncrement() throws Exception { assertEquals(2, tags.size()); // We cannot assume the ordering of tags List tagValues = new ArrayList<>(); - for (Tag tag: tags) { + for (Tag tag : tags) { tagValues.add(Bytes.toString(Tag.cloneValue(tag))); } assertTrue(tagValues.contains("tag1")); @@ -485,7 +481,7 @@ public void testTagsWithAppendAndIncrement() throws Exception { assertEquals(2, tags.size()); // We cannot assume the ordering of tags tagValues.clear(); - for (Tag tag: tags) { + for (Tag tag : tags) { tagValues.add(Bytes.toString(Tag.cloneValue(tag))); } assertTrue(tagValues.contains("tag1")); @@ -543,8 +539,7 @@ private void result(byte[] fam, byte[] row, byte[] qual, byte[] row2, Table tabl assertTrue(Bytes.equals(next2.getValue(fam, qual), value2)); } finally { - if (scanner != null) - scanner.close(); + if (scanner != null) scanner.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimestampFilterSeekHint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimestampFilterSeekHint.java index e370d6853d0d..0418838788a0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimestampFilterSeekHint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimestampFilterSeekHint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestTimestampFilterSeekHint { @ClassRule @@ -72,10 +72,9 @@ public void testGetSeek() throws IOException { final long finalSeekCount = StoreFileScanner.getSeekCount(); /* - Make sure there's more than one. - Aka one seek to get to the row, and one to get to the time. - */ - assertTrue(finalSeekCount >= initialSeekCount + 3 ); + * Make sure there's more than one. Aka one seek to get to the row, and one to get to the time. + */ + assertTrue(finalSeekCount >= initialSeekCount + 3); } @Test @@ -89,22 +88,18 @@ public void testGetDoesntSeekWithNoHint() throws IOException { region.get(g); final long finalSeekCount = StoreFileScanner.getSeekCount(); - assertTrue(finalSeekCount >= initialSeekCount ); + assertTrue(finalSeekCount >= initialSeekCount); assertTrue(finalSeekCount < initialSeekCount + 3); } @Before public void prepareRegion() throws IOException { - ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(FAMILY)) - .setBlocksize(1024) - .setMaxVersions(MAX_VERSIONS) - .build(); - region = TEST_UTIL - .createTestRegion("TestTimestampFilterSeekHint" + regionCount++, columnFamilyDescriptor); - - for (long i = 0; i First I need to set up some mocks for Server and RegionServerServices. I also need to - * set up a dodgy WAL that will throw an exception when we go to append to it. + * Reproduce locking up that happens when we get an inopportune sync during setup for zigzaglatch + * wait. See HBASE-14317. If below is broken, we will see this test timeout because it is locked + * up. + *

          + * First I need to set up some mocks for Server and RegionServerServices. I also need to set up a + * dodgy WAL that will throw an exception when we go to append to it. */ @Test public void testLockupWhenSyncInMiddleOfZigZagSetup() throws IOException { @@ -218,9 +218,8 @@ public void testLockupWhenSyncInMiddleOfZigZagSetup() throws IOException { logRoller.start(); // Now get a region and start adding in edits. final HRegion region = initHRegion(tableName, null, null, CONF, dodgyWAL); - byte [] bytes = Bytes.toBytes(getName()); - NavigableMap scopes = new TreeMap<>( - Bytes.BYTES_COMPARATOR); + byte[] bytes = Bytes.toBytes(getName()); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(COLUMN_FAMILY_BYTES, 0); MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); try { @@ -230,7 +229,7 @@ public void testLockupWhenSyncInMiddleOfZigZagSetup() throws IOException { Put put = new Put(bytes); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), bytes); WALKeyImpl key = new WALKeyImpl(region.getRegionInfo().getEncodedNameAsBytes(), - TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime(), mvcc, scopes); + TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime(), mvcc, scopes); WALEdit edit = new WALEdit(); CellScanner CellScanner = put.cellScanner(); assertTrue(CellScanner.advance()); @@ -256,7 +255,7 @@ public void testLockupWhenSyncInMiddleOfZigZagSetup() throws IOException { // Get a memstore flush going too so we have same hung profile as up in the issue over // in HBASE-14317. Flush hangs trying to get sequenceid because the ringbuffer is held up // by the zigzaglatch waiting on syncs to come home. - Thread t = new Thread ("Flusher") { + Thread t = new Thread("Flusher") { @Override public void run() { try { @@ -305,13 +304,12 @@ public void run() { } /** - * * If below is broken, we will see this test timeout because RingBufferEventHandler was stuck in * attainSafePoint. Everyone will wait for sync to finish forever. See HBASE-14317. */ @Test public void testRingBufferEventHandlerStuckWhenSyncFailed() - throws IOException, InterruptedException { + throws IOException, InterruptedException { // A WAL that we can have throw exceptions and slow FSHLog.replaceWriter down class DodgyFSLog extends FSHLog { @@ -319,7 +317,7 @@ class DodgyFSLog extends FSHLog { private volatile boolean zigZagCreated = false; public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf) - throws IOException { + throws IOException { super(fs, root, logDir, conf); } @@ -424,7 +422,8 @@ public void run() { try { LOG.info("Call sync for testing whether RingBufferEventHandler is hanging."); - dodgyWAL.sync(false); // Should not get a hang here, otherwise we will see timeout in this test. + dodgyWAL.sync(false); // Should not get a hang here, otherwise we will see timeout in this + // test. Assert.fail("Expect an IOException here."); } catch (IOException ignore) { } @@ -447,8 +446,8 @@ public void run() { */ private static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, Configuration conf, WAL wal) throws IOException { - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, conf, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java index d29c09675563..d11254a6a0a0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -122,8 +122,8 @@ private HRegion initHRegion(TableDescriptor htd, byte[] startKey, byte[] stopKey CommonFSUtils.setRootDir(walConf, tableDir); this.walConf = walConf; wals = new WALFactory(walConf, "log_" + replicaId); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDefaultRootDirPath(), conf, htd, wals.getWAL(info)); return region; @@ -236,8 +236,8 @@ public void testWALMonotonicallyIncreasingSeqId() throws Exception { if (currentSeqid > currentMaxSeqid) { currentMaxSeqid = currentSeqid; } else { - fail("Current max Seqid is " + currentMaxSeqid + - ", but the next seqid in wal is smaller:" + currentSeqid); + fail("Current max Seqid is " + currentMaxSeqid + + ", but the next seqid in wal is smaller:" + currentSeqid); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 006be2124da5..97deba0051c1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,8 +48,8 @@ import org.mockito.Mockito; /** - * This test verifies the correctness of the Per Column Family flushing strategy - * when part of the memstores are compacted memstores + * This test verifies the correctness of the Per Column Family flushing strategy when part of the + * memstores are compacted memstores */ @Category({ RegionServerTests.class, LargeTests.class }) public class TestWalAndCompactingMemStoreFlush { @@ -60,8 +60,8 @@ public class TestWalAndCompactingMemStoreFlush { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Path DIR = TEST_UTIL.getDataTestDir("TestHRegion"); - public static final TableName TABLENAME = TableName.valueOf("TestWalAndCompactingMemStoreFlush", - "t1"); + public static final TableName TABLENAME = + TableName.valueOf("TestWalAndCompactingMemStoreFlush", "t1"); public static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2"), Bytes.toBytes("f3"), Bytes.toBytes("f4"), Bytes.toBytes("f5") }; @@ -80,7 +80,7 @@ private HRegion initHRegion(String callingMethod, Configuration conf) throws IOE // even column families are going to have compacted memstore if (i % 2 == 0) { cfBuilder.setInMemoryCompaction(MemoryCompactionPolicy - .valueOf(conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY))); + .valueOf(conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY))); } else { cfBuilder.setInMemoryCompaction(MemoryCompactionPolicy.NONE); } @@ -90,8 +90,7 @@ private HRegion initHRegion(String callingMethod, Configuration conf) throws IOE RegionInfo info = RegionInfoBuilder.newBuilder(TABLENAME).build(); Path path = new Path(DIR, callingMethod); - HRegion region = - HBaseTestingUtil.createRegionAndWAL(info, path, conf, builder.build(), false); + HRegion region = HBaseTestingUtil.createRegionAndWAL(info, path, conf, builder.build(), false); region.regionServicesForStores = Mockito.spy(region.regionServicesForStores); ThreadPoolExecutor pool = (ThreadPoolExecutor) Executors.newFixedThreadPool(1); Mockito.when(region.regionServicesForStores.getInMemoryCompactionPool()).thenReturn(pool); @@ -101,7 +100,7 @@ private HRegion initHRegion(String callingMethod, Configuration conf) throws IOE // A helper function to create puts. private Put createPut(int familyNum, int putNum) { - byte[] qf = Bytes.toBytes("q" + familyNum); + byte[] qf = Bytes.toBytes("q" + familyNum); byte[] row = Bytes.toBytes("row" + familyNum + "-" + putNum); byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum); Put p = new Put(row); @@ -111,7 +110,7 @@ private Put createPut(int familyNum, int putNum) { // A helper function to create double puts, so something can be compacted later. private Put createDoublePut(int familyNum, int putNum) { - byte[] qf = Bytes.toBytes("q" + familyNum); + byte[] qf = Bytes.toBytes("q" + familyNum); byte[] row = Bytes.toBytes("row" + familyNum + "-" + putNum); byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum); Put p = new Put(row); @@ -123,15 +122,15 @@ private Put createDoublePut(int familyNum, int putNum) { private void verifyInMemoryFlushSize(Region region) { assertEquals( - ((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore).getInmemoryFlushSize(), - ((CompactingMemStore) ((HStore)region.getStore(FAMILY3)).memstore).getInmemoryFlushSize()); + ((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore).getInmemoryFlushSize(), + ((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore).getInmemoryFlushSize()); } @Before public void setup() { conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, - FlushNonSloppyStoresFirstPolicy.class.getName()); + FlushNonSloppyStoresFirstPolicy.class.getName()); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); } @@ -142,14 +141,14 @@ public void testSelectiveFlushWithEager() throws IOException { conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024); // set memstore to do data compaction conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(MemoryCompactionPolicy.EAGER)); + String.valueOf(MemoryCompactionPolicy.EAGER)); // Intialize the region HRegion region = initHRegion("testSelectiveFlushWithEager", conf); verifyInMemoryFlushSize(region); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 for (int i = 1; i <= 1200; i++) { - region.put(createPut(1, i)); // compacted memstore, all the keys are unique + region.put(createPut(1, i)); // compacted memstore, all the keys are unique if (i <= 100) { region.put(createPut(2, i)); @@ -178,17 +177,15 @@ public void testSelectiveFlushWithEager() throws IOException { MemStoreSize cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getMemStoreSize(); // Get the overall smallest LSN in the region's memstores. - long smallestSeqInRegionCurrentMemstorePhaseI = getWAL(region) - .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseI = + getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); String s = "\n\n----------------------------------\n" - + "Upon initial insert and before any flush, size of CF1 is:" - + cf1MemstoreSizePhaseI + ", is CF1 compacted memstore?:" - + region.getStore(FAMILY1).isSloppyMemStore() + ". Size of CF2 is:" - + cf2MemstoreSizePhaseI + ", is CF2 compacted memstore?:" - + region.getStore(FAMILY2).isSloppyMemStore() + ". Size of CF3 is:" - + cf3MemstoreSizePhaseI + ", is CF3 compacted memstore?:" - + region.getStore(FAMILY3).isSloppyMemStore() + "\n"; + + "Upon initial insert and before any flush, size of CF1 is:" + cf1MemstoreSizePhaseI + + ", is CF1 compacted memstore?:" + region.getStore(FAMILY1).isSloppyMemStore() + + ". Size of CF2 is:" + cf2MemstoreSizePhaseI + ", is CF2 compacted memstore?:" + + region.getStore(FAMILY2).isSloppyMemStore() + ". Size of CF3 is:" + cf3MemstoreSizePhaseI + + ", is CF3 compacted memstore?:" + region.getStore(FAMILY3).isSloppyMemStore() + "\n"; // The overall smallest LSN in the region's memstores should be the same as // the LSN of the smallest edit in CF1 @@ -203,10 +200,9 @@ public void testSelectiveFlushWithEager() throws IOException { // The total memstore size should be the same as the sum of the sizes of // memstores of CF1, CF2 and CF3. - String msg = "totalMemstoreSize="+totalMemstoreSize + - " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI + - " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI + - " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ; + String msg = "totalMemstoreSize=" + totalMemstoreSize + " cf1MemstoreSizePhaseI=" + + cf1MemstoreSizePhaseI + " cf2MemstoreSizePhaseI=" + cf2MemstoreSizePhaseI + + " cf3MemstoreSizePhaseI=" + cf3MemstoreSizePhaseI; assertEquals(msg, totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize() + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize()); @@ -228,8 +224,8 @@ public void testSelectiveFlushWithEager() throws IOException { MemStoreSize cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseII = getWAL(region) - .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseII = + getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1PhaseII = region.getOldestSeqIdOfStore(FAMILY1); long smallestSeqCF2PhaseII = region.getOldestSeqIdOfStore(FAMILY2); @@ -249,8 +245,7 @@ public void testSelectiveFlushWithEager() throws IOException { // verify that CF3 was flushed to memory and was compacted (this is approximation check) assertTrue(cf3MemstoreSizePhaseI.getDataSize() > cf3MemstoreSizePhaseII.getDataSize()); - assertTrue( - cf3MemstoreSizePhaseI.getHeapSize() / 2 > cf3MemstoreSizePhaseII.getHeapSize()); + assertTrue(cf3MemstoreSizePhaseI.getHeapSize() / 2 > cf3MemstoreSizePhaseII.getHeapSize()); // Now the smallest LSN in the region should be the same as the smallest // LSN in the memstore of CF1. @@ -263,17 +258,16 @@ public void testSelectiveFlushWithEager() throws IOException { } s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseII - + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseII + ", " + - "the smallest sequence in CF2:" - + smallestSeqCF2PhaseII +", the smallest sequence in CF3:" + smallestSeqCF3PhaseII + "\n"; + + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseII + ", " + + "the smallest sequence in CF2:" + smallestSeqCF2PhaseII + + ", the smallest sequence in CF3:" + smallestSeqCF3PhaseII + "\n"; // How much does the CF1 memstore occupy? Will be used later. MemStoreSize cf1MemstoreSizePhaseIII = region.getStore(FAMILY1).getMemStoreSize(); long smallestSeqCF1PhaseIII = region.getOldestSeqIdOfStore(FAMILY1); s = s + "----After more puts into CF1 its size is:" + cf1MemstoreSizePhaseIII - + ", and its sequence is:" + smallestSeqCF1PhaseIII + " ----\n" ; - + + ", and its sequence is:" + smallestSeqCF1PhaseIII + " ----\n"; // Flush!!!!!!!!!!!!!!!!!!!!!! // Flush again, CF1 is flushed to disk @@ -286,21 +280,19 @@ public void testSelectiveFlushWithEager() throws IOException { MemStoreSize cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseIV = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseIV = getWAL(region) - .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseIV = + getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); long smallestSeqCF1PhaseIV = region.getOldestSeqIdOfStore(FAMILY1); long smallestSeqCF2PhaseIV = region.getOldestSeqIdOfStore(FAMILY2); long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3); s = s + "----After SECOND FLUSH, CF1 size is:" + cf1MemstoreSizePhaseIV + ", CF2 size is:" - + cf2MemstoreSizePhaseIV + " and CF3 size is:" + cf3MemstoreSizePhaseIV - + "\n"; + + cf2MemstoreSizePhaseIV + " and CF3 size is:" + cf3MemstoreSizePhaseIV + "\n"; s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIV - + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIV + ", " + - "the smallest sequence in CF2:" - + smallestSeqCF2PhaseIV +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIV - + "\n"; + + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIV + ", " + + "the smallest sequence in CF2:" + smallestSeqCF2PhaseIV + + ", the smallest sequence in CF3:" + smallestSeqCF3PhaseIV + "\n"; // CF1's pipeline component (inserted before first flush) should be flushed to disk // CF2 should be flushed to disk @@ -326,8 +318,8 @@ public void testSelectiveFlushWithEager() throws IOException { MemStoreSize cf1MemstoreSizePhaseV = region.getStore(FAMILY1).getMemStoreSize(); MemStoreSize cf2MemstoreSizePhaseV = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region) - .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseV = + getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); assertEquals(0, cf1MemstoreSizePhaseV.getDataSize()); assertEquals(MutableSegment.DEEP_OVERHEAD, cf1MemstoreSizePhaseV.getHeapSize()); @@ -355,9 +347,8 @@ public void testSelectiveFlushWithEager() throws IOException { s = s + "----AFTER THIRD AND FORTH FLUSH, The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseV - + ". After additional inserts and last flush, the entire region size is:" + region - .getMemStoreDataSize() - + "\n----------------------------------\n"; + + ". After additional inserts and last flush, the entire region size is:" + + region.getMemStoreDataSize() + "\n----------------------------------\n"; // Since we won't find any CF above the threshold, and hence no specific // store to flush, we should flush all the memstores @@ -379,7 +370,7 @@ public void testSelectiveFlushWithIndexCompaction() throws IOException { conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to index-compaction conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(MemoryCompactionPolicy.BASIC)); + String.valueOf(MemoryCompactionPolicy.BASIC)); // Initialize the region HRegion region = initHRegion("testSelectiveFlushWithIndexCompaction", conf); @@ -388,7 +379,7 @@ public void testSelectiveFlushWithIndexCompaction() throws IOException { /* PHASE I - insertions */ // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 for (int i = 1; i <= 1200; i++) { - region.put(createPut(1, i)); // compacted memstore + region.put(createPut(1, i)); // compacted memstore if (i <= 100) { region.put(createPut(2, i)); if (i <= 50) { @@ -414,8 +405,8 @@ public void testSelectiveFlushWithIndexCompaction() throws IOException { MemStoreSize cf2MemstoreSizePhaseI = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getMemStoreSize(); // Get the overall smallest LSN in the region's memstores. - long smallestSeqInRegionCurrentMemstorePhaseI = getWAL(region) - .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseI = + getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); /*------------------------------------------------------------------------------*/ /* PHASE I - validation */ @@ -463,8 +454,8 @@ public void testSelectiveFlushWithIndexCompaction() throws IOException { MemStoreSize cf1MemstoreSizePhaseII = region.getStore(FAMILY1).getMemStoreSize(); MemStoreSize cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseII = getWAL(region) - .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseII = + getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3); long totalMemstoreSizePhaseII = region.getMemStoreDataSize(); @@ -472,14 +463,14 @@ public void testSelectiveFlushWithIndexCompaction() throws IOException { /*------------------------------------------------------------------------------*/ /* PHASE II - validation */ // CF1 was flushed to memory, should be flattened and take less space - assertEquals(cf1MemstoreSizePhaseII.getDataSize() , cf1MemstoreSizePhaseI.getDataSize()); + assertEquals(cf1MemstoreSizePhaseII.getDataSize(), cf1MemstoreSizePhaseI.getDataSize()); assertTrue(cf1MemstoreSizePhaseII.getHeapSize() < cf1MemstoreSizePhaseI.getHeapSize()); // CF2 should become empty assertEquals(0, cf2MemstoreSizePhaseII.getDataSize()); assertEquals(MutableSegment.DEEP_OVERHEAD, cf2MemstoreSizePhaseII.getHeapSize()); // verify that CF3 was flushed to memory and was not compacted (this is an approximation check) // if compacted CF# should be at least twice less because its every key was duplicated - assertEquals(cf3MemstoreSizePhaseII.getDataSize() , cf3MemstoreSizePhaseI.getDataSize()); + assertEquals(cf3MemstoreSizePhaseII.getDataSize(), cf3MemstoreSizePhaseI.getDataSize()); assertTrue(cf3MemstoreSizePhaseI.getHeapSize() / 2 < cf3MemstoreSizePhaseII.getHeapSize()); // Now the smallest LSN in the region should be the same as the smallest @@ -534,8 +525,8 @@ public void testSelectiveFlushWithIndexCompaction() throws IOException { MemStoreSize cf1MemstoreSizePhaseIV = region.getStore(FAMILY1).getMemStoreSize(); MemStoreSize cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseIV = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseIV = getWAL(region) - .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseIV = + getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3); /*------------------------------------------------------------------------------*/ @@ -566,8 +557,8 @@ public void testSelectiveFlushWithIndexCompaction() throws IOException { MemStoreSize cf1MemstoreSizePhaseV = region.getStore(FAMILY1).getMemStoreSize(); MemStoreSize cf2MemstoreSizePhaseV = region.getStore(FAMILY2).getMemStoreSize(); MemStoreSize cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getMemStoreSize(); - long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region) - .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstorePhaseV = + getWAL(region).getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); long totalMemstoreSizePhaseV = region.getMemStoreDataSize(); /*------------------------------------------------------------------------------*/ @@ -632,7 +623,7 @@ public void testSelectiveFlushAndWALinDataCompaction() throws IOException { conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024); // set memstore to do data compaction and not to use the speculative scan conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(MemoryCompactionPolicy.EAGER)); + String.valueOf(MemoryCompactionPolicy.EAGER)); // Intialize the HRegion HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); @@ -670,11 +661,10 @@ public void testSelectiveFlushAndWALinDataCompaction() throws IOException { // The total memstore size should be the same as the sum of the sizes of // memstores of CF1, CF2 and CF3. - String msg = "totalMemstoreSize="+totalMemstoreSize + - " DefaultMemStore.DEEP_OVERHEAD="+DefaultMemStore.DEEP_OVERHEAD + - " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI + - " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI + - " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ; + String msg = "totalMemstoreSize=" + totalMemstoreSize + " DefaultMemStore.DEEP_OVERHEAD=" + + DefaultMemStore.DEEP_OVERHEAD + " cf1MemstoreSizePhaseI=" + cf1MemstoreSizePhaseI + + " cf2MemstoreSizePhaseI=" + cf2MemstoreSizePhaseI + " cf3MemstoreSizePhaseI=" + + cf3MemstoreSizePhaseI; assertEquals(msg, totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize() + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize()); @@ -697,12 +687,11 @@ public void testSelectiveFlushAndWALinDataCompaction() throws IOException { assertEquals(0, cf2MemstoreSizePhaseII.getDataSize()); assertEquals(MutableSegment.DEEP_OVERHEAD, cf2MemstoreSizePhaseII.getHeapSize()); - String s = "\n\n----------------------------------\n" - + "Upon initial insert and flush, LSN of CF1 is:" - + smallestSeqCF1PhaseII + ". LSN of CF2 is:" - + smallestSeqCF2PhaseII + ". LSN of CF3 is:" - + smallestSeqCF3PhaseII + ", smallestSeqInRegionCurrentMemstore:" - + smallestSeqInRegionCurrentMemstorePhaseII + "\n"; + String s = + "\n\n----------------------------------\n" + "Upon initial insert and flush, LSN of CF1 is:" + + smallestSeqCF1PhaseII + ". LSN of CF2 is:" + smallestSeqCF2PhaseII + + ". LSN of CF3 is:" + smallestSeqCF3PhaseII + ", smallestSeqInRegionCurrentMemstore:" + + smallestSeqInRegionCurrentMemstorePhaseII + "\n"; // Add same entries to compact them later for (int i = 1; i <= 1200; i++) { @@ -726,9 +715,9 @@ public void testSelectiveFlushAndWALinDataCompaction() throws IOException { long smallestSeqCF3PhaseIII = region.getOldestSeqIdOfStore(FAMILY3); s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIII - + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIII + ", " + - "the smallest sequence in CF2:" - + smallestSeqCF2PhaseIII +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIII + "\n"; + + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIII + ", " + + "the smallest sequence in CF2:" + smallestSeqCF2PhaseIII + + ", the smallest sequence in CF3:" + smallestSeqCF3PhaseIII + "\n"; // Flush! cms1 = (CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore; @@ -744,13 +733,13 @@ public void testSelectiveFlushAndWALinDataCompaction() throws IOException { long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3); s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIV - + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIV + ", " + - "the smallest sequence in CF2:" - + smallestSeqCF2PhaseIV +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIV + "\n"; + + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIV + ", " + + "the smallest sequence in CF2:" + smallestSeqCF2PhaseIV + + ", the smallest sequence in CF3:" + smallestSeqCF3PhaseIV + "\n"; // now check that the LSN of the entire WAL, of CF1 and of CF3 has progressed due to compaction - assertTrue(s, smallestSeqInRegionCurrentMemstorePhaseIV > - smallestSeqInRegionCurrentMemstorePhaseIII); + assertTrue(s, + smallestSeqInRegionCurrentMemstorePhaseIV > smallestSeqInRegionCurrentMemstorePhaseIII); assertTrue(smallestSeqCF1PhaseIV > smallestSeqCF1PhaseIII); assertTrue(smallestSeqCF3PhaseIV > smallestSeqCF3PhaseIII); @@ -765,7 +754,7 @@ public void testSelectiveFlushWithBasicAndMerge() throws IOException { conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.8); // set memstore to do index compaction with merge conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(MemoryCompactionPolicy.BASIC)); + String.valueOf(MemoryCompactionPolicy.BASIC)); // length of pipeline that requires merge conf.setInt(MemStoreCompactionStrategy.COMPACTING_MEMSTORE_THRESHOLD_KEY, 1); @@ -790,10 +779,10 @@ public void testSelectiveFlushWithBasicAndMerge() throws IOException { long totalMemstoreSize = region.getMemStoreDataSize(); // test in-memory flashing into CAM here - ((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore).setIndexType( - CompactingMemStore.IndexType.ARRAY_MAP); - ((CompactingMemStore) ((HStore)region.getStore(FAMILY3)).memstore).setIndexType( - CompactingMemStore.IndexType.ARRAY_MAP); + ((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore) + .setIndexType(CompactingMemStore.IndexType.ARRAY_MAP); + ((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore) + .setIndexType(CompactingMemStore.IndexType.ARRAY_MAP); // Find the sizes of the memstores of each CF. MemStoreSize cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getMemStoreSize(); @@ -807,19 +796,18 @@ public void testSelectiveFlushWithBasicAndMerge() throws IOException { // The total memstore size should be the same as the sum of the sizes of // memstores of CF1, CF2 and CF3. - assertEquals(totalMemstoreSize, - cf1MemstoreSizePhaseI.getDataSize() + cf2MemstoreSizePhaseI.getDataSize() - + cf3MemstoreSizePhaseI.getDataSize()); + assertEquals(totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize() + + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize()); // Initiate in-memory Flush! - ((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore).flushInMemory(); - ((CompactingMemStore) ((HStore)region.getStore(FAMILY3)).memstore).flushInMemory(); + ((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore).flushInMemory(); + ((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore).flushInMemory(); // CF1 and CF3 should be flatten and merged so wait here to be sure the merge is done - while (((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore) + while (((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore) .isMemStoreFlushingInMemory()) { Threads.sleep(10); } - while (((CompactingMemStore) ((HStore)region.getStore(FAMILY3)).memstore) + while (((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore) .isMemStoreFlushingInMemory()) { Threads.sleep(10); } @@ -856,14 +844,14 @@ public void testSelectiveFlushWithBasicAndMerge() throws IOException { MemStoreSize cf1MemstoreSizePhaseIII = region.getStore(FAMILY1).getMemStoreSize(); // Flush in memory! - ((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore).flushInMemory(); - ((CompactingMemStore) ((HStore)region.getStore(FAMILY3)).memstore).flushInMemory(); + ((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore).flushInMemory(); + ((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore).flushInMemory(); // CF1 and CF3 should be merged so wait here to be sure the merge is done - while (((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore) + while (((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore) .isMemStoreFlushingInMemory()) { Threads.sleep(10); } - while (((CompactingMemStore) ((HStore)region.getStore(FAMILY3)).memstore) + while (((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore) .isMemStoreFlushingInMemory()) { Threads.sleep(10); } @@ -872,29 +860,29 @@ public void testSelectiveFlushWithBasicAndMerge() throws IOException { MemStoreSize cf1MemstoreSizePhaseIV = region.getStore(FAMILY1).getMemStoreSize(); MemStoreSize cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getMemStoreSize(); - assertEquals(2*cf1MemstoreSizePhaseI.getDataSize(), cf1MemstoreSizePhaseIV.getDataSize()); + assertEquals(2 * cf1MemstoreSizePhaseI.getDataSize(), cf1MemstoreSizePhaseIV.getDataSize()); // the decrease in the heap size due to usage of CellArrayMap instead of CSLM // should be the same in flattening and in merge (first and second in-memory-flush) // but in phase 1 we do not yet have immutable segment - assertEquals( - cf1MemstoreSizePhaseI.getHeapSize() - cf1MemstoreSizePhaseII.getHeapSize(), - cf1MemstoreSizePhaseIII.getHeapSize() - cf1MemstoreSizePhaseIV.getHeapSize() - - CellArrayImmutableSegment.DEEP_OVERHEAD_CAM); + assertEquals(cf1MemstoreSizePhaseI.getHeapSize() - cf1MemstoreSizePhaseII.getHeapSize(), + cf1MemstoreSizePhaseIII.getHeapSize() - cf1MemstoreSizePhaseIV.getHeapSize() + - CellArrayImmutableSegment.DEEP_OVERHEAD_CAM); assertEquals(3, // active, one in pipeline, snapshot - ((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore).getSegments().size()); + ((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore).getSegments().size()); // CF2 should have been cleared - assertEquals("\n<<< DEBUG: The data--heap sizes of stores before/after first flushes," - + " CF1: " + cf1MemstoreSizePhaseI.getDataSize() + "/" + cf1MemstoreSizePhaseII - .getDataSize() + "--" + cf1MemstoreSizePhaseI.getHeapSize() + "/" + cf1MemstoreSizePhaseII - .getHeapSize() + ", CF2: " + cf2MemstoreSizePhaseI.getDataSize() + "/" - + cf2MemstoreSizePhaseII.getDataSize() + "--" + cf2MemstoreSizePhaseI.getHeapSize() + "/" - + cf2MemstoreSizePhaseII.getHeapSize() + ", CF3: " + cf3MemstoreSizePhaseI.getDataSize() - + "/" + cf3MemstoreSizePhaseII.getDataSize() + "--" + cf3MemstoreSizePhaseI.getHeapSize() - + "/" + cf3MemstoreSizePhaseII.getHeapSize() + "\n<<< AND before/after second flushes " - + " CF1: " + cf1MemstoreSizePhaseIII.getDataSize() + "/" + cf1MemstoreSizePhaseIV - .getDataSize() + "--" + cf1MemstoreSizePhaseIII.getHeapSize() + "/" + cf1MemstoreSizePhaseIV - .getHeapSize() + "\n", - 0, cf2MemstoreSizePhaseIV.getDataSize()); + assertEquals( + "\n<<< DEBUG: The data--heap sizes of stores before/after first flushes," + " CF1: " + + cf1MemstoreSizePhaseI.getDataSize() + "/" + cf1MemstoreSizePhaseII.getDataSize() + "--" + + cf1MemstoreSizePhaseI.getHeapSize() + "/" + cf1MemstoreSizePhaseII.getHeapSize() + + ", CF2: " + cf2MemstoreSizePhaseI.getDataSize() + "/" + + cf2MemstoreSizePhaseII.getDataSize() + "--" + cf2MemstoreSizePhaseI.getHeapSize() + "/" + + cf2MemstoreSizePhaseII.getHeapSize() + ", CF3: " + cf3MemstoreSizePhaseI.getDataSize() + + "/" + cf3MemstoreSizePhaseII.getDataSize() + "--" + cf3MemstoreSizePhaseI.getHeapSize() + + "/" + cf3MemstoreSizePhaseII.getHeapSize() + "\n<<< AND before/after second flushes " + + " CF1: " + cf1MemstoreSizePhaseIII.getDataSize() + "/" + + cf1MemstoreSizePhaseIV.getDataSize() + "--" + cf1MemstoreSizePhaseIII.getHeapSize() + + "/" + cf1MemstoreSizePhaseIV.getHeapSize() + "\n", + 0, cf2MemstoreSizePhaseIV.getDataSize()); HBaseTestingUtil.closeRegionAndWAL(region); } @@ -905,10 +893,10 @@ public void testStressFlushAndWALinIndexCompaction() throws IOException { // Set up the configuration conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 600 * 1024); conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, - 200 * 1024); + 200 * 1024); // set memstore to do data compaction and not to use the speculative scan conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(MemoryCompactionPolicy.BASIC)); + String.valueOf(MemoryCompactionPolicy.BASIC)); // Successfully initialize the HRegion HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); @@ -925,13 +913,13 @@ public void testStressFlushAndWALinIndexCompaction() throws IOException { Threads.sleep(10000); // let other threads continue region.flush(true); // enforce flush of everything TO DISK while there are still ongoing puts - ((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore).flushInMemory(); - ((CompactingMemStore) ((HStore)region.getStore(FAMILY3)).memstore).flushInMemory(); - while (((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore) + ((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore).flushInMemory(); + ((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore).flushInMemory(); + while (((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore) .isMemStoreFlushingInMemory()) { Threads.sleep(10); } - while (((CompactingMemStore) ((HStore)region.getStore(FAMILY3)).memstore) + while (((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore) .isMemStoreFlushingInMemory()) { Threads.sleep(10); } @@ -996,14 +984,16 @@ public void run() { } } if (startNumber == 10000) { - ((CompactingMemStore) ((HStore) stressedRegion.getStore(FAMILY2)).memstore).flushInMemory(); + ((CompactingMemStore) ((HStore) stressedRegion.getStore(FAMILY2)).memstore) + .flushInMemory(); while (((CompactingMemStore) ((HStore) stressedRegion.getStore(FAMILY2)).memstore) .isMemStoreFlushingInMemory()) { Threads.sleep(10); } } if (startNumber == 20000) { - ((CompactingMemStore) ((HStore) stressedRegion.getStore(FAMILY3)).memstore).flushInMemory(); + ((CompactingMemStore) ((HStore) stressedRegion.getStore(FAMILY3)).memstore) + .flushInMemory(); while (((CompactingMemStore) ((HStore) stressedRegion.getStore(FAMILY3)).memstore) .isMemStoreFlushingInMemory()) { Threads.sleep(10); @@ -1017,6 +1007,6 @@ public void run() { } private WAL getWAL(Region region) { - return ((HRegion)region).getWAL(); + return ((HRegion) region).getWAL(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index 5ba1035cf826..7be907ececa3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,7 +57,7 @@ public class TestWideScanner { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWideScanner.class); + HBaseClassTestRule.forClass(TestWideScanner.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -71,11 +71,11 @@ public class TestWideScanner { private static final TableDescriptor TESTTABLEDESC; static { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf("testwidescan")); + TableDescriptorBuilder.newBuilder(TableName.valueOf("testwidescan")); for (byte[] cfName : new byte[][] { A, B, C }) { // Keep versions to help debugging. builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName).setMaxVersions(100) - .setBlocksize(8 * 1024).build()); + .setBlocksize(8 * 1024).build()); } TESTTABLEDESC = builder.build(); } @@ -88,7 +88,7 @@ public static void setUp() throws IOException { Path testDir = UTIL.getDataTestDir(); RegionInfo hri = RegionInfoBuilder.newBuilder(TESTTABLEDESC.getTableName()).build(); REGION = - HBaseTestingUtil.createRegionAndWAL(hri, testDir, UTIL.getConfiguration(), TESTTABLEDESC); + HBaseTestingUtil.createRegionAndWAL(hri, testDir, UTIL.getConfiguration(), TESTTABLEDESC); } @AfterClass @@ -157,8 +157,7 @@ public void testWideScanBatching() throws IOException { results.clear(); // trigger ChangedReadersObservers - Iterator scanners = - ((RegionScannerImpl) s).storeHeap.getHeap().iterator(); + Iterator scanners = ((RegionScannerImpl) s).storeHeap.getHeap().iterator(); while (scanners.hasNext()) { StoreScanner ss = (StoreScanner) scanners.next(); ss.updateReaders(Collections.emptyList(), Collections.emptyList()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ConstantSizeFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ConstantSizeFileListGenerator.java index 124d8f10a9f6..f5362c38d2b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ConstantSizeFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ConstantSizeFileListGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; - import org.apache.hadoop.hbase.regionserver.HStoreFile; /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/EverythingPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/EverythingPolicy.java index ca65bf1178ba..3dddd78adca0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/EverythingPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/EverythingPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,10 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; - import java.io.IOException; import java.util.ArrayList; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; @@ -31,8 +29,7 @@ public class EverythingPolicy extends RatioBasedCompactionPolicy { /** * Constructor. - * - * @param conf The Conf. + * @param conf The Conf. * @param storeConfigInfo Info about the store. */ public EverythingPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ExplicitFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ExplicitFileListGenerator.java index a76a8a510b69..141b2cc2cc12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ExplicitFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ExplicitFileListGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,35 +22,27 @@ import org.apache.hadoop.hbase.regionserver.HStoreFile; /** - * Class to create list of mock storefiles of specified length. - * This is great for testing edge cases. + * Class to create list of mock storefiles of specified length. This is great for testing edge + * cases. */ class ExplicitFileListGenerator extends StoreFileListGenerator { /** The explicit files size lists to return. */ - private int[][] fileSizes = new int[][]{ - {1000, 350, 200, 100, 20, 10, 10}, - {1000, 450, 200, 100, 20, 10, 10}, - {1000, 550, 200, 100, 20, 10, 10}, - {1000, 650, 200, 100, 20, 10, 10}, - {1, 1, 600, 1, 1, 1, 1}, - {1, 1, 600, 600, 600, 600, 600, 1, 1, 1, 1}, - {1, 1, 600, 600, 600, 1, 1, 1, 1}, - {1000, 250, 25, 25, 25, 25, 25, 25}, - {25, 25, 25, 25, 25, 25, 500}, - {1000, 1000, 1000, 1000, 900}, - {107, 50, 10, 10, 10, 10}, - {2000, 107, 50, 10, 10, 10, 10}, - {9, 8, 7, 6, 5, 4, 3, 2, 1}, - {11, 18, 9, 8, 7, 6, 5, 4, 3, 2, 1}, - {110, 18, 18, 18, 18, 9, 8, 7, 6, 5, 4, 3, 2, 1}, - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15} - }; + private int[][] fileSizes = new int[][] { { 1000, 350, 200, 100, 20, 10, 10 }, + { 1000, 450, 200, 100, 20, 10, 10 }, { 1000, 550, 200, 100, 20, 10, 10 }, + { 1000, 650, 200, 100, 20, 10, 10 }, { 1, 1, 600, 1, 1, 1, 1 }, + { 1, 1, 600, 600, 600, 600, 600, 1, 1, 1, 1 }, { 1, 1, 600, 600, 600, 1, 1, 1, 1 }, + { 1000, 250, 25, 25, 25, 25, 25, 25 }, { 25, 25, 25, 25, 25, 25, 500 }, + { 1000, 1000, 1000, 1000, 900 }, { 107, 50, 10, 10, 10, 10 }, + { 2000, 107, 50, 10, 10, 10, 10 }, { 9, 8, 7, 6, 5, 4, 3, 2, 1 }, + { 11, 18, 9, 8, 7, 6, 5, 4, 3, 2, 1 }, { 110, 18, 18, 18, 18, 9, 8, 7, 6, 5, 4, 3, 2, 1 }, + { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15 } }; @Override public final Iterator> iterator() { return new Iterator>() { private int nextIndex = 0; + @Override public boolean hasNext() { return nextIndex < fileSizes.length; @@ -58,7 +50,7 @@ public boolean hasNext() { @Override public List next() { - List files = createStoreFileList(fileSizes[nextIndex]); + List files = createStoreFileList(fileSizes[nextIndex]); nextIndex += 1; return files; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/GaussianFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/GaussianFileListGenerator.java index 51930301a8aa..362f201036a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/GaussianFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/GaussianFileListGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,8 +43,7 @@ public List next() { ArrayList files = new ArrayList<>(NUM_FILES_GEN); for (int i = 0; i < NUM_FILES_GEN; i++) { files.add(createMockStoreFile( - (int) Math.ceil(Math.max(0, gen.nextNormalizedDouble() * 32 + 32))) - ); + (int) Math.ceil(Math.max(0, gen.nextNormalizedDouble() * 32 + 32)))); } return files; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java index d6d84ddbf9c8..7068bc87e230 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import static org.mockito.Mockito.mock; @@ -58,9 +57,8 @@ protected HStoreFile createMockStoreFileBytes(final long size) { protected HStoreFile createMockStoreFile(final long sizeInBytes, final long seqId) { HStoreFile mockSf = mock(HStoreFile.class); StoreFileReader reader = mock(StoreFileReader.class); - String stringPath = "/hbase/testTable/regionA/" + - RandomStringUtils.random(FILENAME_LENGTH, 0, 0, true, true, null, - ThreadLocalRandom.current()); + String stringPath = "/hbase/testTable/regionA/" + RandomStringUtils.random(FILENAME_LENGTH, 0, + 0, true, true, null, ThreadLocalRandom.current()); Path path = new Path(stringPath); when(reader.getSequenceID()).thenReturn(seqId); @@ -72,10 +70,8 @@ protected HStoreFile createMockStoreFile(final long sizeInBytes, final long seqI when(mockSf.isReference()).thenReturn(false); // TODO come back to // this when selection takes this into account when(mockSf.getReader()).thenReturn(reader); - String toString = MoreObjects.toStringHelper("MockStoreFile") - .add("isReference", false) - .add("fileSize", StringUtils.humanReadableInt(sizeInBytes)) - .add("seqId", seqId) + String toString = MoreObjects.toStringHelper("MockStoreFile").add("isReference", false) + .add("fileSize", StringUtils.humanReadableInt(sizeInBytes)).add("seqId", seqId) .add("path", stringPath).toString(); when(mockSf.toString()).thenReturn(toString); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index 73204475b42a..83096dd51b15 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.compactions; import static org.mockito.Mockito.mock; @@ -45,16 +44,16 @@ /** * This is not a unit test. It is not run as part of the general unit test suite. It is for - * comparing compaction policies. You must run it explicitly; - * e.g. mvn test -Dtest=PerfTestCompactionPolicies + * comparing compaction policies. You must run it explicitly; e.g. mvn test + * -Dtest=PerfTestCompactionPolicies */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) @RunWith(Parameterized.class) public class PerfTestCompactionPolicies extends MockStoreFileGenerator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(PerfTestCompactionPolicies.class); + HBaseClassTestRule.forClass(PerfTestCompactionPolicies.class); private final RatioBasedCompactionPolicy cp; private final StoreFileListGenerator generator; @@ -68,33 +67,20 @@ public class PerfTestCompactionPolicies extends MockStoreFileGenerator { @Parameterized.Parameters public static Collection data() { + Class[] policyClasses = new Class[] { EverythingPolicy.class, + RatioBasedCompactionPolicy.class, ExploringCompactionPolicy.class, }; + Class[] fileListGenClasses = + new Class[] { ExplicitFileListGenerator.class, ConstantSizeFileListGenerator.class, + SemiConstantSizeFileListGenerator.class, GaussianFileListGenerator.class, + SinusoidalFileListGenerator.class, SpikyFileListGenerator.class }; - Class[] policyClasses = new Class[]{ - EverythingPolicy.class, - RatioBasedCompactionPolicy.class, - ExploringCompactionPolicy.class, - }; - - Class[] fileListGenClasses = new Class[]{ - ExplicitFileListGenerator.class, - ConstantSizeFileListGenerator.class, - SemiConstantSizeFileListGenerator.class, - GaussianFileListGenerator.class, - SinusoidalFileListGenerator.class, - SpikyFileListGenerator.class - }; - - int[] maxFileValues = new int[] {10}; - int[] minFilesValues = new int[] {3}; - float[] ratioValues = new float[] {1.2f}; - - List params = new ArrayList<>( - maxFileValues.length - * minFilesValues.length - * fileListGenClasses.length - * policyClasses.length); + int[] maxFileValues = new int[] { 10 }; + int[] minFilesValues = new int[] { 3 }; + float[] ratioValues = new float[] { 1.2f }; + List params = new ArrayList<>(maxFileValues.length * minFilesValues.length + * fileListGenClasses.length * policyClasses.length); for (Class policyClass : policyClasses) { for (Class genClass : fileListGenClasses) { @@ -118,11 +104,8 @@ public static Collection data() { * @param inMin The min number of files to compact * @param inRatio The ratio that files must be under to be compacted. */ - public PerfTestCompactionPolicies( - final Class cpClass, - final Class fileGenClass, - final int inMmax, - final int inMin, + public PerfTestCompactionPolicies(final Class cpClass, + final Class fileGenClass, final int inMmax, final int inMin, final float inRatio) throws IllegalAccessException, InstantiationException, NoSuchMethodException, InvocationTargetException { this.fileGenClass = fileGenClass; @@ -135,7 +118,6 @@ public PerfTestCompactionPolicies( Log4jUtils.setLogLevel(RatioBasedCompactionPolicy.class.getName(), "ERROR"); Log4jUtils.setLogLevel(cpClass.getName(), "ERROR"); - Configuration configuration = HBaseConfiguration.create(); // Make sure that this doesn't include every file. @@ -145,8 +127,8 @@ public PerfTestCompactionPolicies( store = createMockStore(); this.cp = ReflectionUtils.instantiateWithCustomCtor(cpClass.getName(), - new Class[] {Configuration.class, StoreConfigInformation.class }, - new Object[] {configuration, store }); + new Class[] { Configuration.class, StoreConfigInformation.class }, + new Object[] { configuration, store }); this.generator = fileGenClass.getDeclaredConstructor().newInstance(); // Used for making paths @@ -165,22 +147,14 @@ public final void testSelection() throws Exception { } // print out tab delimited so that it can be used in excel/gdocs. - System.out.println( - cp.getClass().getSimpleName() - + "\t" + fileGenClass.getSimpleName() - + "\t" + max - + "\t" + min - + "\t" + ratio - + "\t" + written - + "\t" + fileDiff - ); + System.out.println(cp.getClass().getSimpleName() + "\t" + fileGenClass.getSimpleName() + "\t" + + max + "\t" + min + "\t" + ratio + "\t" + written + "\t" + fileDiff); } - private List runIteration(List startingStoreFiles) throws IOException { List storeFiles = new ArrayList<>(startingStoreFiles); - CompactionRequestImpl req = cp.selectCompaction( - storeFiles, new ArrayList<>(), false, false, false); + CompactionRequestImpl req = + cp.selectCompaction(storeFiles, new ArrayList<>(), false, false, false); long newFileSize = 0; Collection filesToCompact = req.getFiles(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SemiConstantSizeFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SemiConstantSizeFileListGenerator.java index 8c3db2002ecb..e3d8410238cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SemiConstantSizeFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SemiConstantSizeFileListGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SinusoidalFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SinusoidalFileListGenerator.java index 75779c182440..b5a85ea28c88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SinusoidalFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SinusoidalFileListGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; - import org.apache.hadoop.hbase.regionserver.HStoreFile; class SinusoidalFileListGenerator extends StoreFileListGenerator { @@ -29,6 +28,7 @@ class SinusoidalFileListGenerator extends StoreFileListGenerator { public Iterator> iterator() { return new Iterator>() { private int count = 0; + @Override public boolean hasNext() { return count < MAX_FILE_GEN_ITERS; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SpikyFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SpikyFileListGenerator.java index 98c1469d512f..ed74332b5db0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SpikyFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SpikyFileListGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public List next() { Random rand = ThreadLocalRandom.current(); for (int x = 0; x < NUM_FILES_GEN; x++) { int fileSize = rand.nextInt(5) + 1; - if ( x % 10 == 0) { + if (x % 10 == 0) { fileSize = rand.nextInt(5) + 50; } files.add(createMockStoreFile(fileSize)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/StoreFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/StoreFileListGenerator.java index 7828a4a97b95..686696876dc8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/StoreFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/StoreFileListGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCloseChecker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCloseChecker.java index baa226781ca2..8a160e169d59 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCloseChecker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCloseChecker.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.regionserver.Store; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java index 9d119d393812..4355c9aaee05 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -79,11 +79,11 @@ public class TestCompactedHFilesDischarger { public void setUp() throws Exception { TableName tableName = TableName.valueOf(getClass().getSimpleName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); Path path = testUtil.getDataTestDir(getClass().getSimpleName()); - region = HBaseTestingUtil.createRegionAndWAL(info, path, - testUtil.getConfiguration(), tableDescriptor); + region = HBaseTestingUtil.createRegionAndWAL(info, path, testUtil.getConfiguration(), + tableDescriptor); rss = mock(RegionServerServices.class); List regions = new ArrayList<>(1); regions.add(region); @@ -216,7 +216,7 @@ public void testCleanerWithParallelScannersAfterCompaction() throws Exception { } } compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles(); - for(HStoreFile file : compactedfiles) { + for (HStoreFile file : compactedfiles) { assertEquals("Refcount should be 3", 0, ((HStoreFile) file).getRefCount()); unusedReaderCount++; } @@ -287,7 +287,7 @@ public void testCleanerWithParallelScanners() throws Exception { } } compactedfiles = store.getStoreEngine().getStoreFileManager().getCompactedfiles(); - for(HStoreFile file : compactedfiles) { + for (HStoreFile file : compactedfiles) { assertEquals("Refcount should be 3", 3, ((HStoreFile) file).getRefCount()); usedReaderCount++; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java index 2dbf00c4447a..fb072dba224b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,6 @@ import java.util.Arrays; import java.util.List; import java.util.TreeMap; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCurrentHourProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCurrentHourProvider.java index 4a0e1d0fbcba..393e83d94bb9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCurrentHourProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCurrentHourProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ public class TestCurrentHourProvider { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCurrentHourProvider.class); + HBaseClassTestRule.forClass(TestCurrentHourProvider.class); private static final List ZONE_IDS = Lists.newArrayList("UTC", "US/Pacific", "Etc/GMT+8"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java index 0ea82c57be1a..1804a732adc3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -100,8 +100,8 @@ private DateTieredCompactor createCompactor(StoreFileWritersCapture writers, final Scanner scanner = new Scanner(input); // Create store mock that is satisfactory for compactor. ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of(NAME_OF_THINGS); - ScanInfo si = new ScanInfo(conf, familyDescriptor, Long.MAX_VALUE, 0, - CellComparatorImpl.COMPARATOR); + ScanInfo si = + new ScanInfo(conf, familyDescriptor, Long.MAX_VALUE, 0, CellComparatorImpl.COMPARATOR); HStore store = mock(HStore.class); when(store.getStorefiles()).thenReturn(storefiles); when(store.getColumnFamilyDescriptor()).thenReturn(familyDescriptor); @@ -167,8 +167,8 @@ public void test() throws Exception { public void testEmptyOutputFile() throws Exception { StoreFileWritersCapture writers = new StoreFileWritersCapture(); CompactionRequestImpl request = createDummyRequest(); - DateTieredCompactor dtc = createCompactor(writers, new KeyValue[0], - new ArrayList<>(request.getFiles())); + DateTieredCompactor dtc = + createCompactor(writers, new KeyValue[0], new ArrayList<>(request.getFiles())); List paths = dtc.compact(request, Arrays.asList(Long.MIN_VALUE, Long.MAX_VALUE), new HashMap(), NoLimitThroughputController.INSTANCE, null); assertEquals(1, paths.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java index 552d3f360654..ed5bd2f10d6e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,6 +57,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.ExpectedException; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @Category({ RegionServerTests.class, MediumTests.class }) @@ -229,7 +230,7 @@ public void testFIFOCompactionPolicyExpiredEmptyHFiles() throws Exception { TEST_UTIL.getAdmin().majorCompact(tableName); TEST_UTIL.waitFor(testWaitTimeoutMs, - (Waiter.Predicate) () -> store.getStorefilesCount() == 1); + (Waiter.Predicate) () -> store.getStorefilesCount() == 1); Assert.assertEquals(1, store.getStorefilesCount()); HStoreFile sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next()); @@ -242,7 +243,7 @@ public void testFIFOCompactionPolicyExpiredEmptyHFiles() throws Exception { TEST_UTIL.getAdmin().majorCompact(tableName); TEST_UTIL.waitFor(testWaitTimeoutMs, - (Waiter.Predicate) () -> store.getStorefilesCount() == 1); + (Waiter.Predicate) () -> store.getStorefilesCount() == 1); Assert.assertEquals(1, store.getStorefilesCount()); sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestOffPeakHours.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestOffPeakHours.java index cd7630ca7c82..f2228d859e78 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestOffPeakHours.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestOffPeakHours.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestOffPeakHours { @ClassRule @@ -54,9 +54,9 @@ public static void setUpClass() { @Before public void setUp() { hourOfDay = 15; - hourPlusOne = ((hourOfDay+1)%24); - hourMinusOne = ((hourOfDay-1+24)%24); - hourMinusTwo = ((hourOfDay-2+24)%24); + hourPlusOne = ((hourOfDay + 1) % 24); + hourMinusOne = ((hourOfDay - 1 + 24) % 24); + hourMinusTwo = ((hourOfDay - 2 + 24) % 24); conf = testUtil.getConfiguration(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index 47cdecf2abe4..1d454bcdb6cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,6 @@ import java.util.Iterator; import java.util.List; import java.util.OptionalLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -99,7 +98,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @RunWith(Parameterized.class) -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestStripeCompactionPolicy { @ClassRule @@ -117,7 +116,6 @@ public class TestStripeCompactionPolicy { private static final KeyValue KV_D = new KeyValue(KEY_D, 0L); private static final KeyValue KV_E = new KeyValue(KEY_E, 0L); - private static long defaultSplitSize = 18; private static float defaultSplitCount = 1.8F; private final static int defaultInitialCount = 1; @@ -150,7 +148,7 @@ public void testOldStripesFromFlush() throws Exception { KeyValue[] input = new KeyValue[] { KV_B, KV_C, KV_C, KV_D, KV_E }; KeyValue[][] expected = new KeyValue[][] { new KeyValue[] { KV_B }, - new KeyValue[] { KV_C, KV_C }, new KeyValue[] { KV_D, KV_E } }; + new KeyValue[] { KV_C, KV_C }, new KeyValue[] { KV_D, KV_E } }; verifyFlush(policy, si, input, expected, new byte[][] { OPEN_KEY, KEY_C, KEY_D, OPEN_KEY }); } @@ -168,7 +166,7 @@ public void testNewStripesFromFlush() throws Exception { public void testSingleStripeCompaction() throws Exception { // Create a special policy that only compacts single stripes, using standard methods. Configuration conf = HBaseConfiguration.create(); - // Test depends on this not being set to pass. Default breaks test. TODO: Revisit. + // Test depends on this not being set to pass. Default breaks test. TODO: Revisit. conf.unset("hbase.hstore.compaction.min.size"); conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.0F); conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 3); @@ -188,8 +186,8 @@ public StripeCompactionRequest selectCompaction(StripeInformationProvider si, } @Override - public boolean needsCompactions( - StripeInformationProvider si, List filesCompacting) { + public boolean needsCompactions(StripeInformationProvider si, + List filesCompacting) { if (!filesCompacting.isEmpty()) { return false; } @@ -198,28 +196,28 @@ public boolean needsCompactions( }; // No compaction due to min files or ratio - StripeInformationProvider si = createStripesWithSizes(0, 0, - new Long[] { 2L }, new Long[] { 3L, 3L }, new Long[] { 5L, 1L }); + StripeInformationProvider si = createStripesWithSizes(0, 0, new Long[] { 2L }, + new Long[] { 3L, 3L }, new Long[] { 5L, 1L }); verifyNoCompaction(policy, si); // No compaction due to min files or ratio - will report needed, but not do any. - si = createStripesWithSizes(0, 0, - new Long[] { 2L }, new Long[] { 3L, 3L }, new Long[] { 5L, 1L, 1L }); + si = createStripesWithSizes(0, 0, new Long[] { 2L }, new Long[] { 3L, 3L }, + new Long[] { 5L, 1L, 1L }); assertNull(policy.selectCompaction(si, al(), false)); assertTrue(policy.needsCompactions(si, al())); // One stripe has possible compaction - si = createStripesWithSizes(0, 0, - new Long[] { 2L }, new Long[] { 3L, 3L }, new Long[] { 5L, 4L, 3L }); + si = createStripesWithSizes(0, 0, new Long[] { 2L }, new Long[] { 3L, 3L }, + new Long[] { 5L, 4L, 3L }); verifySingleStripeCompaction(policy, si, 2, null); // Several stripes have possible compactions; choose best quality (removes most files) - si = createStripesWithSizes(0, 0, - new Long[] { 3L, 2L, 2L }, new Long[] { 2L, 2L, 1L }, new Long[] { 3L, 2L, 2L, 1L }); + si = createStripesWithSizes(0, 0, new Long[] { 3L, 2L, 2L }, new Long[] { 2L, 2L, 1L }, + new Long[] { 3L, 2L, 2L, 1L }); verifySingleStripeCompaction(policy, si, 2, null); - si = createStripesWithSizes(0, 0, - new Long[] { 5L }, new Long[] { 3L, 2L, 2L, 1L }, new Long[] { 3L, 2L, 2L }); + si = createStripesWithSizes(0, 0, new Long[] { 5L }, new Long[] { 3L, 2L, 2L, 1L }, + new Long[] { 3L, 2L, 2L }); verifySingleStripeCompaction(policy, si, 1, null); // Or with smallest files, if the count is the same - si = createStripesWithSizes(0, 0, - new Long[] { 3L, 3L, 3L }, new Long[] { 3L, 1L, 2L }, new Long[] { 3L, 2L, 2L }); + si = createStripesWithSizes(0, 0, new Long[] { 3L, 3L, 3L }, new Long[] { 3L, 1L, 2L }, + new Long[] { 3L, 2L, 2L }); verifySingleStripeCompaction(policy, si, 1, null); // Verify max count is respected. si = createStripesWithSizes(0, 0, new Long[] { 5L }, new Long[] { 5L, 4L, 4L, 4L, 4L }); @@ -234,9 +232,9 @@ public boolean needsCompactions( @Test public void testWithParallelCompaction() throws Exception { // TODO: currently only one compaction at a time per store is allowed. If this changes, - // the appropriate file exclusion testing would need to be done in respective tests. - assertNull(createPolicy(HBaseConfiguration.create()).selectCompaction( - mock(StripeInformationProvider.class), al(createFile()), false)); + // the appropriate file exclusion testing would need to be done in respective tests. + assertNull(createPolicy(HBaseConfiguration.create()) + .selectCompaction(mock(StripeInformationProvider.class), al(createFile()), false)); } @Test @@ -256,16 +254,15 @@ public void testWithReferences() throws Exception { assertEquals(si.getStorefiles(), new ArrayList<>(scr.getRequest().getFiles())); scr.execute(sc, NoLimitThroughputController.INSTANCE, null); verify(sc, only()).compact(eq(scr.getRequest()), anyInt(), anyLong(), aryEq(OPEN_KEY), - aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), - any(), any()); + aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), any(), any()); } @Test public void testInitialCountFromL0() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 2); - StripeCompactionPolicy policy = createPolicy( - conf, defaultSplitSize, defaultSplitCount, 2, false); + StripeCompactionPolicy policy = + createPolicy(conf, defaultSplitSize, defaultSplitCount, 2, false); StripeCompactionPolicy.StripeInformationProvider si = createStripesL0Only(3, 8); verifyCompaction(policy, si, si.getStorefiles(), true, 2, 12L, OPEN_KEY, OPEN_KEY, true); si = createStripesL0Only(3, 10); // If result would be too large, split into smaller parts. @@ -276,7 +273,7 @@ public void testInitialCountFromL0() throws Exception { @Test public void testSelectL0Compaction() throws Exception { - //test select ALL L0 files when L0 files count > MIN_FILES_L0_KEY + // test select ALL L0 files when L0 files count > MIN_FILES_L0_KEY Configuration conf = HBaseConfiguration.create(); conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 4); StripeCompactionPolicy policy = createPolicy(conf); @@ -290,8 +287,8 @@ public void testSelectL0Compaction() throws Exception { // test select partial L0 files when size of L0 files > HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY conf.setLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, 100L); policy = createPolicy(conf); - si = createStripesWithSizes(5, 50L, - new Long[] { 5L, 1L, 1L }, new Long[] { 3L, 2L, 2L }, new Long[] { 3L, 2L, 2L }); + si = createStripesWithSizes(5, 50L, new Long[] { 5L, 1L, 1L }, new Long[] { 3L, 2L, 2L }, + new Long[] { 3L, 2L, 2L }); cr = policy.selectCompaction(si, al(), false); assertNotNull(cr); assertEquals(2, cr.getRequest().getFiles().size()); @@ -301,8 +298,8 @@ public void testSelectL0Compaction() throws Exception { conf.setInt(MAX_FILES_KEY, 6); conf.setLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, 1000L); policy = createPolicy(conf); - si = createStripesWithSizes(10, 10L, - new Long[] { 5L, 1L, 1L }, new Long[] { 3L, 2L, 2L }, new Long[] { 3L, 2L, 2L }); + si = createStripesWithSizes(10, 10L, new Long[] { 5L, 1L, 1L }, new Long[] { 3L, 2L, 2L }, + new Long[] { 3L, 2L, 2L }); cr = policy.selectCompaction(si, al(), false); assertNotNull(cr); assertEquals(6, cr.getRequest().getFiles().size()); @@ -314,8 +311,8 @@ public void testExistingStripesFromL0() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 3); StripeCompactionPolicy.StripeInformationProvider si = createStripes(3, KEY_A); - verifyCompaction( - createPolicy(conf), si, si.getLevel0Files(), null, null, si.getStripeBoundaries()); + verifyCompaction(createPolicy(conf), si, si.getLevel0Files(), null, null, + si.getStripeBoundaries()); } @Test @@ -347,10 +344,10 @@ public void testCheckExpiredStripeCompaction() throws Exception { List mixed = Lists.newArrayList(expiredFile, notExpiredFile); StripeCompactionPolicy policy = - createPolicy(conf, defaultSplitSize, defaultSplitCount, defaultInitialCount, true); + createPolicy(conf, defaultSplitSize, defaultSplitCount, defaultInitialCount, true); // Merge expired if there are eligible stripes. StripeCompactionPolicy.StripeInformationProvider si = - createStripesWithFiles(mixed, mixed, mixed); + createStripesWithFiles(mixed, mixed, mixed); assertFalse(policy.needsCompactions(si, al())); si = createStripesWithFiles(mixed, mixed, mixed, expired); @@ -362,13 +359,13 @@ public void testCheckExpiredStripeCompaction() throws Exception { @Test public void testSplitOffStripe() throws Exception { Configuration conf = HBaseConfiguration.create(); - // Test depends on this not being set to pass. Default breaks test. TODO: Revisit. + // Test depends on this not being set to pass. Default breaks test. TODO: Revisit. conf.unset("hbase.hstore.compaction.min.size"); // First test everything with default split count of 2, then split into more. conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 2); Long[] toSplit = new Long[] { defaultSplitSize - 2, 1L, 1L }; Long[] noSplit = new Long[] { defaultSplitSize - 2, 1L }; - long splitTargetSize = (long)(defaultSplitSize / defaultSplitCount); + long splitTargetSize = (long) (defaultSplitSize / defaultSplitCount); // Don't split if not eligible for compaction. StripeCompactionPolicy.StripeInformationProvider si = createStripesWithSizes(0, 0, new Long[] { defaultSplitSize - 2, 2L }); @@ -385,8 +382,8 @@ public void testSplitOffStripe() throws Exception { verifyWholeStripesCompaction(policy, si, 1, 1, null, 2, splitTargetSize); // No split-off with different config (larger split size). // However, in this case some eligible stripe will just be compacted alone. - StripeCompactionPolicy specPolicy = createPolicy( - conf, defaultSplitSize + 1, defaultSplitCount, defaultInitialCount, false); + StripeCompactionPolicy specPolicy = + createPolicy(conf, defaultSplitSize + 1, defaultSplitCount, defaultInitialCount, false); verifySingleStripeCompaction(specPolicy, si, 1, null); } @@ -395,19 +392,19 @@ public void testSplitOffStripeOffPeak() throws Exception { // for HBASE-11439 Configuration conf = HBaseConfiguration.create(); - // Test depends on this not being set to pass. Default breaks test. TODO: Revisit. + // Test depends on this not being set to pass. Default breaks test. TODO: Revisit. conf.unset("hbase.hstore.compaction.min.size"); conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 2); // Select the last 2 files. StripeCompactionPolicy.StripeInformationProvider si = createStripesWithSizes(0, 0, new Long[] { defaultSplitSize - 2, 1L, 1L }); - assertEquals(2, createPolicy(conf).selectCompaction(si, al(), false).getRequest().getFiles() - .size()); + assertEquals(2, + createPolicy(conf).selectCompaction(si, al(), false).getRequest().getFiles().size()); // Make sure everything is eligible in offpeak. conf.setFloat("hbase.hstore.compaction.ratio.offpeak", 500f); - assertEquals(3, createPolicy(conf).selectCompaction(si, al(), true).getRequest().getFiles() - .size()); + assertEquals(3, + createPolicy(conf).selectCompaction(si, al(), true).getRequest().getFiles().size()); } @Test @@ -417,15 +414,15 @@ public void testSplitOffStripeDropDeletes() throws Exception { StripeCompactionPolicy policy = createPolicy(conf); Long[] toSplit = new Long[] { defaultSplitSize / 2, defaultSplitSize / 2 }; Long[] noSplit = new Long[] { 1L }; - long splitTargetSize = (long)(defaultSplitSize / defaultSplitCount); + long splitTargetSize = (long) (defaultSplitSize / defaultSplitCount); // Verify the deletes can be dropped if there are no L0 files. StripeCompactionPolicy.StripeInformationProvider si = createStripesWithSizes(0, 0, noSplit, toSplit); - verifyWholeStripesCompaction(policy, si, 1, 1, true, null, splitTargetSize); + verifyWholeStripesCompaction(policy, si, 1, 1, true, null, splitTargetSize); // But cannot be dropped if there are. si = createStripesWithSizes(2, 2, noSplit, toSplit); - verifyWholeStripesCompaction(policy, si, 1, 1, false, null, splitTargetSize); + verifyWholeStripesCompaction(policy, si, 1, 1, false, null, splitTargetSize); } @SuppressWarnings("unchecked") @@ -443,8 +440,8 @@ public void testMergeExpiredFiles() throws Exception { List notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile); List mixed = Lists.newArrayList(expiredFile, notExpiredFile); - StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create(), - defaultSplitSize, defaultSplitCount, defaultInitialCount, true); + StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create(), defaultSplitSize, + defaultSplitCount, defaultInitialCount, true); // Merge expired if there are eligible stripes. StripeCompactionPolicy.StripeInformationProvider si = createStripesWithFiles(expired, expired, expired); @@ -482,9 +479,8 @@ public void testMergeExpiredStripes() throws Exception { List expired = Lists.newArrayList(expiredFile, expiredFile); List notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile); - StripeCompactionPolicy policy = - createPolicy(HBaseConfiguration.create(), defaultSplitSize, defaultSplitCount, - defaultInitialCount, true); + StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create(), defaultSplitSize, + defaultSplitCount, defaultInitialCount, true); // Merge all three expired stripes into one. StripeCompactionPolicy.StripeInformationProvider si = @@ -500,16 +496,16 @@ public void testMergeExpiredStripes() throws Exception { } @SuppressWarnings("unchecked") - private static StripeCompactionPolicy.StripeInformationProvider createStripesWithFiles( - List... stripeFiles) throws Exception { + private static StripeCompactionPolicy.StripeInformationProvider + createStripesWithFiles(List... stripeFiles) throws Exception { return createStripesWithFiles(createBoundaries(stripeFiles.length), - Lists.newArrayList(stripeFiles), new ArrayList<>()); + Lists.newArrayList(stripeFiles), new ArrayList<>()); } @Test public void testSingleStripeDropDeletes() throws Exception { Configuration conf = HBaseConfiguration.create(); - // Test depends on this not being set to pass. Default breaks test. TODO: Revisit. + // Test depends on this not being set to pass. Default breaks test. TODO: Revisit. conf.unset("hbase.hstore.compaction.min.size"); StripeCompactionPolicy policy = createPolicy(conf); // Verify the deletes can be dropped if there are no L0 files. @@ -524,11 +520,10 @@ public void testSingleStripeDropDeletes() throws Exception { ConcatenatedLists sfs = new ConcatenatedLists<>(); sfs.addSublist(si.getLevel0Files()); sfs.addSublist(si.getStripes().get(0)); - verifyCompaction( - policy, si, sfs, si.getStartRow(0), si.getEndRow(0), si.getStripeBoundaries()); + verifyCompaction(policy, si, sfs, si.getStartRow(0), si.getEndRow(0), si.getStripeBoundaries()); // If we cannot actually compact all files in some stripe, L0 is chosen. si = createStripesWithSizes(6, 2, - new Long[][] { new Long[] { 10L, 1L, 1L, 1L, 1L }, new Long[] { 12L } }); + new Long[][] { new Long[] { 10L, 1L, 1L, 1L, 1L }, new Long[] { 12L } }); verifyCompaction(policy, si, si.getLevel0Files(), null, null, si.getStripeBoundaries()); // even if L0 has no file // if all files of stripe aren't selected, delete must not be dropped. @@ -582,13 +577,12 @@ public void testCheckExpiredL0Compaction() throws Exception { } /********* HELPER METHODS ************/ - private static StripeCompactionPolicy createPolicy( - Configuration conf) throws Exception { + private static StripeCompactionPolicy createPolicy(Configuration conf) throws Exception { return createPolicy(conf, defaultSplitSize, defaultSplitCount, defaultInitialCount, false); } - private static StripeCompactionPolicy createPolicy(Configuration conf, - long splitSize, float splitCount, int initialCount, boolean hasTtl) throws Exception { + private static StripeCompactionPolicy createPolicy(Configuration conf, long splitSize, + float splitCount, int initialCount, boolean hasTtl) throws Exception { conf.setLong(StripeStoreConfig.SIZE_TO_SPLIT_KEY, splitSize); conf.setFloat(StripeStoreConfig.SPLIT_PARTS_KEY, splitCount); conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, initialCount); @@ -627,15 +621,15 @@ private void verifyMergeCompatcion(StripeCompactionPolicy policy, StripeInformat * @param size Expected target stripe size, null if not checked. */ private void verifyWholeStripesCompaction(StripeCompactionPolicy policy, - StripeInformationProvider si, int from, int to, Boolean dropDeletes, - Integer count, Long size, boolean needsCompaction) throws IOException { - verifyCompaction(policy, si, getAllFiles(si, from, to), dropDeletes, - count, size, si.getStartRow(from), si.getEndRow(to), needsCompaction); + StripeInformationProvider si, int from, int to, Boolean dropDeletes, Integer count, Long size, + boolean needsCompaction) throws IOException { + verifyCompaction(policy, si, getAllFiles(si, from, to), dropDeletes, count, size, + si.getStartRow(from), si.getEndRow(to), needsCompaction); } private void verifyWholeStripesCompaction(StripeCompactionPolicy policy, - StripeInformationProvider si, int from, int to, Boolean dropDeletes, - Integer count, Long size) throws IOException { + StripeInformationProvider si, int from, int to, Boolean dropDeletes, Integer count, Long size) + throws IOException { verifyWholeStripesCompaction(policy, si, from, to, dropDeletes, count, size, true); } @@ -649,8 +643,8 @@ private void verifySingleStripeCompaction(StripeCompactionPolicy policy, * @param policy Policy to test. * @param si Stripe information pre-set with stripes to test. */ - private void verifyNoCompaction( - StripeCompactionPolicy policy, StripeInformationProvider si) throws IOException { + private void verifyNoCompaction(StripeCompactionPolicy policy, StripeInformationProvider si) + throws IOException { assertNull(policy.selectCompaction(si, al(), false)); assertFalse(policy.needsCompactions(si, al())); } @@ -687,8 +681,7 @@ public boolean matches(List argument) { return true; } }), dropDeletesFrom == null ? isNull(byte[].class) : aryEq(dropDeletesFrom), - dropDeletesTo == null ? isNull(byte[].class) : aryEq(dropDeletesTo), - any(), any()); + dropDeletesTo == null ? isNull(byte[].class) : aryEq(dropDeletesTo), any(), any()); } /** @@ -703,8 +696,8 @@ public boolean matches(List argument) { * @param end Right boundary of the compaction. */ private void verifyCompaction(StripeCompactionPolicy policy, StripeInformationProvider si, - Collection sfs, Boolean dropDeletes, Integer count, Long size, - byte[] start, byte[] end, boolean needsCompaction) throws IOException { + Collection sfs, Boolean dropDeletes, Integer count, Long size, byte[] start, + byte[] end, boolean needsCompaction) throws IOException { StripeCompactor sc = mock(StripeCompactor.class); assertTrue(!needsCompaction || policy.needsCompactions(si, al())); StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false); @@ -713,8 +706,7 @@ private void verifyCompaction(StripeCompactionPolicy policy, StripeInformationPr verify(sc, times(1)).compact(eq(scr.getRequest()), count == null ? anyInt() : eq(count.intValue()), size == null ? anyLong() : eq(size.longValue()), aryEq(start), aryEq(end), - dropDeletesMatcher(dropDeletes, start), dropDeletesMatcher(dropDeletes, end), - any(), any()); + dropDeletesMatcher(dropDeletes, start), dropDeletesMatcher(dropDeletes, end), any(), any()); } /** Verify arbitrary flush. */ @@ -722,7 +714,7 @@ protected void verifyFlush(StripeCompactionPolicy policy, StripeInformationProvi KeyValue[] input, KeyValue[][] expected, byte[][] boundaries) throws IOException { StoreFileWritersCapture writers = new StoreFileWritersCapture(); StripeStoreFlusher.StripeFlushRequest req = - policy.selectFlush(CellComparatorImpl.COMPARATOR, si, input.length); + policy.selectFlush(CellComparatorImpl.COMPARATOR, si, input.length); StripeMultiFileWriter mw = req.createWriter(); mw.init(null, writers); for (KeyValue kv : input) { @@ -736,10 +728,9 @@ protected void verifyFlush(StripeCompactionPolicy policy, StripeInformationProvi } } - private byte[] dropDeletesMatcher(Boolean dropDeletes, byte[] value) { return dropDeletes == null ? any() - : (dropDeletes.booleanValue() ? aryEq(value) : isNull(byte[].class)); + : (dropDeletes.booleanValue() ? aryEq(value) : isNull(byte[].class)); } private void verifyCollectionsEqual(Collection sfs, Collection scr) { @@ -748,8 +739,8 @@ private void verifyCollectionsEqual(Collection sfs, Collection getAllFiles( - StripeInformationProvider si, int fromStripe, int toStripe) { + private static List getAllFiles(StripeInformationProvider si, int fromStripe, + int toStripe) { ArrayList expected = new ArrayList<>(); for (int i = fromStripe; i <= toStripe; ++i) { expected.addAll(si.getStripes().get(i)); @@ -762,8 +753,8 @@ private static List getAllFiles( * @param boundaries Target boundaries. * @return Mock stripes. */ - private static StripeInformationProvider createStripes( - int l0Count, byte[]... boundaries) throws Exception { + private static StripeInformationProvider createStripes(int l0Count, byte[]... boundaries) + throws Exception { List l0Sizes = new ArrayList<>(); for (int i = 0; i < l0Count; ++i) { l0Sizes.add(5L); @@ -780,8 +771,8 @@ private static StripeInformationProvider createStripes( * @param l0Size Size of each file. * @return Mock stripes. */ - private static StripeInformationProvider createStripesL0Only( - int l0Count, long l0Size) throws Exception { + private static StripeInformationProvider createStripesL0Only(int l0Count, long l0Size) + throws Exception { List l0Sizes = new ArrayList<>(); for (int i = 0; i < l0Count; ++i) { l0Sizes.add(l0Size); @@ -795,8 +786,8 @@ private static StripeInformationProvider createStripesL0Only( * @param sizes Sizes of the files; each sub-array representing a stripe. * @return Mock stripes. */ - private static StripeInformationProvider createStripesWithSizes( - int l0Count, long l0Size, Long[]... sizes) throws Exception { + private static StripeInformationProvider createStripesWithSizes(int l0Count, long l0Size, + Long[]... sizes) throws Exception { ArrayList> sizeList = new ArrayList<>(sizes.length); for (Long[] size : sizes) { sizeList.add(Arrays.asList(size)); @@ -804,8 +795,8 @@ private static StripeInformationProvider createStripesWithSizes( return createStripesWithSizes(l0Count, l0Size, sizeList); } - private static StripeInformationProvider createStripesWithSizes( - int l0Count, long l0Size, List> sizes) throws Exception { + private static StripeInformationProvider createStripesWithSizes(int l0Count, long l0Size, + List> sizes) throws Exception { List boundaries = createBoundaries(sizes.size()); List l0Sizes = new ArrayList<>(); for (int i = 0; i < l0Count; ++i) { @@ -900,7 +891,7 @@ private static void setFileStripe(HStoreFile sf, byte[] startKey, byte[] endKey) private StripeCompactor createCompactor() throws Exception { ColumnFamilyDescriptor familyDescriptor = - ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("foo")); + ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("foo")); StoreFileWritersCapture writers = new StoreFileWritersCapture(); HStore store = mock(HStore.class); RegionInfo info = mock(RegionInfo.class); @@ -939,8 +930,7 @@ public Scanner(KeyValue... kvs) { } @Override - public boolean next(List result, ScannerContext scannerContext) - throws IOException { + public boolean next(List result, ScannerContext scannerContext) throws IOException { if (kvs.isEmpty()) { return false; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java index ae59c74bad95..d8ac93ca851f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -200,8 +200,8 @@ private StripeCompactor createCompactor(StoreFileWritersCapture writers, KeyValu // Create store mock that is satisfactory for compactor. ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of(NAME_OF_THINGS); - ScanInfo si = new ScanInfo(conf, familyDescriptor, Long.MAX_VALUE, 0, - CellComparatorImpl.COMPARATOR); + ScanInfo si = + new ScanInfo(conf, familyDescriptor, Long.MAX_VALUE, 0, CellComparatorImpl.COMPARATOR); HStore store = mock(HStore.class); when(store.getColumnFamilyDescriptor()).thenReturn(familyDescriptor); when(store.getScanInfo()).thenReturn(si); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/http/TestRSStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/http/TestRSStatusServlet.java index f4e29ccd1585..4ca6e8ed90f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/http/TestRSStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/http/TestRSStatusServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,7 +63,7 @@ /** * Tests for the region server status page and its template. */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestRSStatusServlet { @ClassRule @@ -78,13 +78,11 @@ public class TestRSStatusServlet { static final int FAKE_IPC_PORT = 1585; static final int FAKE_WEB_PORT = 1586; - private final ServerName fakeServerName = - ServerName.valueOf("localhost", FAKE_IPC_PORT, 11111); + private final ServerName fakeServerName = ServerName.valueOf("localhost", FAKE_IPC_PORT, 11111); private final GetServerInfoResponse fakeResponse = - ResponseConverter.buildGetServerInfoResponse(fakeServerName, FAKE_WEB_PORT); + ResponseConverter.buildGetServerInfoResponse(fakeServerName, FAKE_WEB_PORT); - private final ServerName fakeMasterAddress = - ServerName.valueOf("localhost", 60010, 1212121212); + private final ServerName fakeMasterAddress = ServerName.valueOf("localhost", 60010, 1212121212); @Rule public TestName name = new TestName(); @@ -130,14 +128,14 @@ public void testBasic() throws IOException, ServiceException { @Test public void testWithRegions() throws IOException, ServiceException { TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); List regions = Lists.newArrayList( RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(Bytes.toBytes("a")) - .setEndKey(Bytes.toBytes("d")).build(), + .setEndKey(Bytes.toBytes("d")).build(), RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(Bytes.toBytes("d")) - .setEndKey(Bytes.toBytes("z")).build()); - Mockito.doReturn(ResponseConverter.buildGetOnlineRegionResponse(regions)) - .when(rpcServices) .getOnlineRegion(Mockito.any(), Mockito.any()); + .setEndKey(Bytes.toBytes("z")).build()); + Mockito.doReturn(ResponseConverter.buildGetOnlineRegionResponse(regions)).when(rpcServices) + .getOnlineRegion(Mockito.any(), Mockito.any()); new RSStatusTmpl().render(new StringWriter(), rs); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/AbstractTestScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/AbstractTestScanQueryMatcher.java index af63de9695ff..a707d79415e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/AbstractTestScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/AbstractTestScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java index b4f421c4a981..06740233bed4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java index e87dfd813a95..7e6dc92f80ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -179,9 +179,9 @@ public void testInfiniteLoop() throws IOException { TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); columns.addAll(Arrays.asList(new byte[][] { col2, col3, col5 })); List scanner = Arrays. asList(new byte[][] { col1, col4 }); - List expected = Arrays. asList( - new ScanQueryMatcher.MatchCode[] { ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, - ScanQueryMatcher.MatchCode.SEEK_NEXT_COL }); + List expected = + Arrays. asList(new ScanQueryMatcher.MatchCode[] { + ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.SEEK_NEXT_COL }); runTest(1, columns, scanner, expected); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestNewVersionBehaviorTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestNewVersionBehaviorTracker.java index d3542ebbee54..da6375840b1c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestNewVersionBehaviorTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestNewVersionBehaviorTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hbase.regionserver.querymatcher; -import java.util.TreeSet; import static org.junit.Assert.assertEquals; import java.io.IOException; +import java.util.TreeSet; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -34,7 +34,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category({ RegionServerTests.class, SmallTests.class }) public class TestNewVersionBehaviorTracker { @@ -218,8 +217,8 @@ public void testFamilyVersionDelete() { NewVersionBehaviorTracker tracker = new NewVersionBehaviorTracker(null, comparator, 1, 3, 3, 10000); - KeyValue delete = new KeyValue(row, family, null, 20000, KeyValue.Type.DeleteFamilyVersion, - value); + KeyValue delete = + new KeyValue(row, family, null, 20000, KeyValue.Type.DeleteFamilyVersion, value); delete.setSequenceId(1000); delete.setTimestamp(20000); tracker.add(delete); @@ -257,54 +256,53 @@ public void testMinVersionsAndTTL() throws IOException { keyValue.setSequenceId(1000); assertEquals(DeleteResult.NOT_DELETED, tracker.isDeleted(keyValue)); assertEquals(MatchCode.INCLUDE_AND_SEEK_NEXT_COL, - tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); + tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); keyValue.setTimestamp(19999); keyValue.setSequenceId(999); assertEquals(DeleteResult.NOT_DELETED, tracker.isDeleted(keyValue)); - assertEquals( - MatchCode.SEEK_NEXT_COL, - tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); + assertEquals(MatchCode.SEEK_NEXT_COL, + tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); keyValue.setTimestamp(19999); keyValue.setSequenceId(998); assertEquals(DeleteResult.VERSION_MASKED, tracker.isDeleted(keyValue)); assertEquals(MatchCode.SEEK_NEXT_COL, - tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); + tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); keyValue.setTimestamp(19998); keyValue.setSequenceId(997); assertEquals(DeleteResult.NOT_DELETED, tracker.isDeleted(keyValue)); assertEquals(MatchCode.SEEK_NEXT_COL, - tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); + tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); keyValue.setTimestamp(19997); keyValue.setSequenceId(996); assertEquals(DeleteResult.VERSION_MASKED, tracker.isDeleted(keyValue)); assertEquals(MatchCode.SEEK_NEXT_COL, - tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); + tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); keyValue = new KeyValue(row, family, col2, 20000, KeyValue.Type.Put, value); keyValue.setTimestamp(20000); keyValue.setSequenceId(1000); assertEquals(DeleteResult.NOT_DELETED, tracker.isDeleted(keyValue)); assertEquals(MatchCode.INCLUDE_AND_SEEK_NEXT_COL, - tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); + tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); keyValue.setTimestamp(19999); keyValue.setSequenceId(1002); assertEquals(DeleteResult.NOT_DELETED, tracker.isDeleted(keyValue)); assertEquals(MatchCode.SEEK_NEXT_COL, - tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); + tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); keyValue.setTimestamp(19999); keyValue.setSequenceId(1001); assertEquals(DeleteResult.VERSION_MASKED, tracker.isDeleted(keyValue)); assertEquals(MatchCode.SEEK_NEXT_COL, - tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); + tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); keyValue.setTimestamp(19998); keyValue.setSequenceId(1003); assertEquals(DeleteResult.NOT_DELETED, tracker.isDeleted(keyValue)); assertEquals(MatchCode.SEEK_NEXT_COL, - tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); + tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); keyValue.setTimestamp(19997); keyValue.setSequenceId(1004); assertEquals(DeleteResult.VERSION_MASKED, tracker.isDeleted(keyValue)); assertEquals(MatchCode.SEEK_NEXT_COL, - tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); + tracker.checkVersions(keyValue, keyValue.getTimestamp(), keyValue.getTypeByte(), false)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanDeleteTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanDeleteTracker.java index 7cfc323eadf2..ae0488626df9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanDeleteTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanDeleteTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java index b8130c3587b1..709a8ebfcb45 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,8 +45,8 @@ public class TestScanWildcardColumnTracker { @Test public void testCheckColumnOk() throws IOException { - ScanWildcardColumnTracker tracker = new ScanWildcardColumnTracker( - 0, VERSIONS, Long.MIN_VALUE, CellComparatorImpl.COMPARATOR); + ScanWildcardColumnTracker tracker = + new ScanWildcardColumnTracker(0, VERSIONS, Long.MIN_VALUE, CellComparatorImpl.COMPARATOR); // Create list of qualifiers List qualifiers = new ArrayList<>(4); @@ -78,8 +78,8 @@ public void testCheckColumnOk() throws IOException { @Test public void testCheckColumnEnforceVersions() throws IOException { - ScanWildcardColumnTracker tracker = new ScanWildcardColumnTracker( - 0, VERSIONS, Long.MIN_VALUE, CellComparatorImpl.COMPARATOR); + ScanWildcardColumnTracker tracker = + new ScanWildcardColumnTracker(0, VERSIONS, Long.MIN_VALUE, CellComparatorImpl.COMPARATOR); // Create list of qualifiers List qualifiers = new ArrayList<>(4); @@ -112,8 +112,8 @@ public void testCheckColumnEnforceVersions() throws IOException { @Test public void DisabledTestCheckColumnWrongOrder() { - ScanWildcardColumnTracker tracker = new ScanWildcardColumnTracker( - 0, VERSIONS, Long.MIN_VALUE, CellComparatorImpl.COMPARATOR); + ScanWildcardColumnTracker tracker = + new ScanWildcardColumnTracker(0, VERSIONS, Long.MIN_VALUE, CellComparatorImpl.COMPARATOR); // Create list of qualifiers List qualifiers = new ArrayList<>(2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java index 651567deb9c1..59937df4103d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -215,8 +215,8 @@ public void testMatch_ExpiredWildcard() throws IOException { long testTTL = 1000; MatchCode[] expected = new MatchCode[] { ScanQueryMatcher.MatchCode.INCLUDE, ScanQueryMatcher.MatchCode.INCLUDE, - ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.INCLUDE, - ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.DONE }; + ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.INCLUDE, + ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.DONE }; long now = EnvironmentEdgeManager.currentTime(); UserScanQueryMatcher qm = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationBufferManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationBufferManager.java index 8b56d09de18a..b2888b745c6a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationBufferManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationBufferManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ public class TestRegionReplicationBufferManager { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionReplicationBufferManager.class); + HBaseClassTestRule.forClass(TestRegionReplicationBufferManager.class); private Configuration conf; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationFlushRequester.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationFlushRequester.java index abe5aa1cdcc9..3d71d8a21215 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationFlushRequester.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationFlushRequester.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestRegionReplicationFlushRequester { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionReplicationFlushRequester.class); + HBaseClassTestRule.forClass(TestRegionReplicationFlushRequester.class); private Configuration conf; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationSink.java index e065709c1b57..21375635f358 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationSink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationSink.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,7 +72,7 @@ public class TestRegionReplicationSink { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionReplicationSink.class); + HBaseClassTestRule.forClass(TestRegionReplicationSink.class); private Configuration conf; @@ -97,7 +97,7 @@ public void setUp() { conf.setLong(RegionReplicationSink.BATCH_COUNT_CAPACITY, 5); conf.setLong(RegionReplicationSink.BATCH_SIZE_CAPACITY, 1024 * 1024); td = TableDescriptorBuilder.newBuilder(name.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).setRegionReplication(3).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).setRegionReplication(3).build(); primary = RegionInfoBuilder.newBuilder(name.getTableName()).build(); flushRequester = mock(Runnable.class); conn = mock(AsyncClusterConnection.class); @@ -115,9 +115,9 @@ public void tearDown() throws InterruptedException { public void testNormal() { MutableInt next = new MutableInt(0); List> futures = - Arrays.asList(new CompletableFuture<>(), new CompletableFuture<>()); + Arrays.asList(new CompletableFuture<>(), new CompletableFuture<>()); when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())) - .then(i -> futures.get(next.getAndIncrement())); + .then(i -> futures.get(next.getAndIncrement())); ServerCall rpcCall = mock(ServerCall.class); WALKeyImpl key = mock(WALKeyImpl.class); when(key.estimatedSerializedSizeOf()).thenReturn(100L); @@ -151,9 +151,9 @@ public void testNormal() { public void testDropEdits() { MutableInt next = new MutableInt(0); List> futures = - Arrays.asList(new CompletableFuture<>(), new CompletableFuture<>()); + Arrays.asList(new CompletableFuture<>(), new CompletableFuture<>()); when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())) - .then(i -> futures.get(next.getAndIncrement())); + .then(i -> futures.get(next.getAndIncrement())); ServerCall rpcCall1 = mock(ServerCall.class); WALKeyImpl key1 = mock(WALKeyImpl.class); when(key1.estimatedSerializedSizeOf()).thenReturn(100L); @@ -215,9 +215,9 @@ public void testDropEdits() { public void testNotAddToFailedReplicas() { MutableInt next = new MutableInt(0); List> futures = - Stream.generate(() -> new CompletableFuture()).limit(4).collect(Collectors.toList()); + Stream.generate(() -> new CompletableFuture()).limit(4).collect(Collectors.toList()); when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())) - .then(i -> futures.get(next.getAndIncrement())); + .then(i -> futures.get(next.getAndIncrement())); ServerCall rpcCall1 = mock(ServerCall.class); WALKeyImpl key1 = mock(WALKeyImpl.class); @@ -234,11 +234,11 @@ public void testNotAddToFailedReplicas() { when(key2.getSequenceId()).thenReturn(3L); Map> committedFiles = td.getColumnFamilyNames().stream() - .collect(Collectors.toMap(Function.identity(), k -> Collections.emptyList(), (u, v) -> { - throw new IllegalStateException(); - }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); + .collect(Collectors.toMap(Function.identity(), k -> Collections.emptyList(), (u, v) -> { + throw new IllegalStateException(); + }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); FlushDescriptor fd = - ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, primary, 2L, committedFiles); + ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, primary, 2L, committedFiles); WALEdit edit2 = WALEdit.createFlushWALEdit(primary, fd); sink.add(key2, edit2, rpcCall2); @@ -261,9 +261,9 @@ public void testNotAddToFailedReplicas() { public void testAddToFailedReplica() { MutableInt next = new MutableInt(0); List> futures = - Stream.generate(() -> new CompletableFuture()).limit(5).collect(Collectors.toList()); + Stream.generate(() -> new CompletableFuture()).limit(5).collect(Collectors.toList()); when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())) - .then(i -> futures.get(next.getAndIncrement())); + .then(i -> futures.get(next.getAndIncrement())); ServerCall rpcCall1 = mock(ServerCall.class); WALKeyImpl key1 = mock(WALKeyImpl.class); @@ -298,11 +298,11 @@ public void testAddToFailedReplica() { when(key3.estimatedSerializedSizeOf()).thenReturn(200L); when(key3.getSequenceId()).thenReturn(3L); Map> committedFiles = td.getColumnFamilyNames().stream() - .collect(Collectors.toMap(Function.identity(), k -> Collections.emptyList(), (u, v) -> { - throw new IllegalStateException(); - }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); + .collect(Collectors.toMap(Function.identity(), k -> Collections.emptyList(), (u, v) -> { + throw new IllegalStateException(); + }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); FlushDescriptor fd = - ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, primary, 2L, committedFiles); + ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, primary, 2L, committedFiles); WALEdit edit3 = WALEdit.createFlushWALEdit(primary, fd); sink.add(key3, edit3, rpcCall3); @@ -320,9 +320,9 @@ public void testAddToFailedReplica() { public void testSizeCapacity() { MutableInt next = new MutableInt(0); List> futures = - Stream.generate(() -> new CompletableFuture()).limit(6).collect(Collectors.toList()); + Stream.generate(() -> new CompletableFuture()).limit(6).collect(Collectors.toList()); when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())) - .then(i -> futures.get(next.getAndIncrement())); + .then(i -> futures.get(next.getAndIncrement())); for (int i = 0; i < 3; i++) { ServerCall rpcCall = mock(ServerCall.class); WALKeyImpl key = mock(WALKeyImpl.class); @@ -362,9 +362,9 @@ public void testSizeCapacity() { public void testCountCapacity() { MutableInt next = new MutableInt(0); List> futures = - Stream.generate(() -> new CompletableFuture()).limit(6).collect(Collectors.toList()); + Stream.generate(() -> new CompletableFuture()).limit(6).collect(Collectors.toList()); when(conn.replicate(any(), anyList(), anyInt(), anyLong(), anyLong())) - .then(i -> futures.get(next.getAndIncrement())); + .then(i -> futures.get(next.getAndIncrement())); for (int i = 0; i < 7; i++) { ServerCall rpcCall = mock(ServerCall.class); WALKeyImpl key = mock(WALKeyImpl.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationSinkCallbackAndFlushConcurrently.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationSinkCallbackAndFlushConcurrently.java index d6432a696e52..d3c2975986a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationSinkCallbackAndFlushConcurrently.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestRegionReplicationSinkCallbackAndFlushConcurrently.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -121,13 +121,11 @@ public static void tearDown() throws Exception { public void test() throws Exception { final HRegionForTest[] regions = this.createTable(); final AtomicBoolean completedRef = new AtomicBoolean(false); - RegionReplicationSink regionReplicationSink = - regions[0].getRegionReplicationSink().get(); + RegionReplicationSink regionReplicationSink = regions[0].getRegionReplicationSink().get(); assertTrue(regionReplicationSink != null); - RegionReplicationSink spiedRegionReplicationSink = this.setUpSpiedRegionReplicationSink( - regionReplicationSink, regions[0], - completedRef); + RegionReplicationSink spiedRegionReplicationSink = + this.setUpSpiedRegionReplicationSink(regionReplicationSink, regions[0], completedRef); String oldThreadName = Thread.currentThread().getName(); Thread.currentThread().setName(HRegionForTest.USER_THREAD_NAME); @@ -179,8 +177,8 @@ private RegionReplicationSink setUpSpiedRegionReplicationSink( if (primaryRegion.prepareFlush && Thread.currentThread().getName().equals(HRegionForTest.USER_THREAD_NAME)) { int count = getStartFlushAllDescriptorCounter.incrementAndGet(); - if(count == 1) { - //onComplete could execute + if (count == 1) { + // onComplete could execute primaryRegion.cyclicBarrier.await(); return invocationOnMock.callRealMethod(); } @@ -193,9 +191,9 @@ private RegionReplicationSink setUpSpiedRegionReplicationSink( } private HRegionForTest[] createTable() throws Exception { - TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setRegionReplication(NB_SERVERS).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) - .build(); + TableDescriptor tableDescriptor = + TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(NB_SERVERS) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); HTU.getAdmin().createTable(tableDescriptor); final HRegionForTest[] regions = new HRegionForTest[NB_SERVERS]; for (int i = 0; i < NB_SERVERS; i++) { @@ -247,13 +245,11 @@ protected PrepareFlushResult internalPrepareFlushCache(WAL wal, long myseqid, this.prepareFlush = true; } try { - PrepareFlushResult result = - super.internalPrepareFlushCache(wal, myseqid, storesToFlush, status, - writeFlushWalMarker, tracker); + PrepareFlushResult result = super.internalPrepareFlushCache(wal, myseqid, storesToFlush, + status, writeFlushWalMarker, tracker); return result; - } - finally { + } finally { if (this.getRegionInfo().getReplicaId() == 0 && Thread.currentThread().getName().equals(USER_THREAD_NAME)) { this.prepareFlush = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestStartupWithLegacyRegionReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestStartupWithLegacyRegionReplicationEndpoint.java index a550ecd1291e..e500a0243630 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestStartupWithLegacyRegionReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/regionreplication/TestStartupWithLegacyRegionReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestStartupWithLegacyRegionReplicationEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStartupWithLegacyRegionReplicationEndpoint.class); + HBaseClassTestRule.forClass(TestStartupWithLegacyRegionReplicationEndpoint.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -66,9 +66,10 @@ public static void tearDown() throws IOException { @Test public void test() throws Exception { - ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() - .setClusterKey("127.0.0.1:2181:/hbase") - .setReplicationEndpointImpl(ReplicationUtils.LEGACY_REGION_REPLICATION_ENDPOINT_NAME).build(); + ReplicationPeerConfig peerConfig = + ReplicationPeerConfig.newBuilder().setClusterKey("127.0.0.1:2181:/hbase") + .setReplicationEndpointImpl(ReplicationUtils.LEGACY_REGION_REPLICATION_ENDPOINT_NAME) + .build(); SingleProcessHBaseCluster cluster = UTIL.getMiniHBaseCluster(); HMaster master = cluster.getMaster(); // can not use Admin.addPeer as it will fail with ClassNotFound @@ -83,7 +84,7 @@ public void test() throws Exception { assertNotNull(UTIL.getAdmin().getReplicationPeerConfig("legacy")); // but at RS side, we should not have this peer loaded as replication source assertTrue(rst.getRegionServer().getReplicationSourceService().getReplicationManager() - .getSources().isEmpty()); + .getSources().isEmpty()); UTIL.shutdownMiniHBaseCluster(); UTIL.restartHBaseCluster(1); @@ -92,14 +93,14 @@ public void test() throws Exception { () -> UTIL.getAdmin().getReplicationPeerConfig("legacy")); // at rs side, we should not have the peer this time, not only for not having replication source assertTrue(UTIL.getMiniHBaseCluster().getRegionServer(0).getReplicationSourceService() - .getReplicationManager().getReplicationPeers().getAllPeerIds().isEmpty()); + .getReplicationManager().getReplicationPeers().getAllPeerIds().isEmpty()); // make sure that we can finish the SCP and delete the test-wal-file UTIL.waitFor(15000, () -> UTIL.getMiniHBaseCluster().getMaster().getProcedures().stream() - .filter(p -> p instanceof ServerCrashProcedure).map(p -> (ServerCrashProcedure) p) - .allMatch(Procedure::isSuccess)); + .filter(p -> p instanceof ServerCrashProcedure).map(p -> (ServerCrashProcedure) p) + .allMatch(Procedure::isSuccess)); assertTrue(UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage() - .getAllQueues(rsName).isEmpty()); + .getAllQueues(rsName).isEmpty()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerForTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerForTest.java index 4a90beeb5249..4b6b8f8eb6de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerForTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerForTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class StoreFileTrackerForTest extends DefaultStoreFileTracker { private static final Logger LOG = LoggerFactory.getLogger(StoreFileTrackerForTest.class); private static ConcurrentMap> trackedFiles = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); private String storeId; public StoreFileTrackerForTest(Configuration conf, boolean isPrimaryReplica, StoreContext ctx) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestChangeStoreFileTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestChangeStoreFileTracker.java index afce4ed4e636..5c6b05eb3656 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestChangeStoreFileTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestChangeStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ public class TestChangeStoreFileTracker { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestChangeStoreFileTracker.class); + HBaseClassTestRule.forClass(TestChangeStoreFileTracker.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -74,65 +74,70 @@ public static void tearDown() throws IOException { @Test(expected = DoNotRetryIOException.class) public void testCreateError() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(MigrationStoreFileTracker.SRC_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")) + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(MigrationStoreFileTracker.SRC_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()) + .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .build(); UTIL.getAdmin().createTable(td); } @Test(expected = DoNotRetryIOException.class) public void testModifyError1() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); UTIL.getAdmin().createTable(td); - TableDescriptor newTd = TableDescriptorBuilder.newBuilder(td) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .build(); + TableDescriptor newTd = + TableDescriptorBuilder.newBuilder(td).setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.FILE.name()).build(); UTIL.getAdmin().modifyTable(newTd); } @Test(expected = DoNotRetryIOException.class) public void testModifyError2() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); UTIL.getAdmin().createTable(td); TableDescriptor newTd = TableDescriptorBuilder.newBuilder(td) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(MigrationStoreFileTracker.SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .build(); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(MigrationStoreFileTracker.SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .setValue(MigrationStoreFileTracker.DST_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()) + .build(); UTIL.getAdmin().modifyTable(newTd); } @Test(expected = DoNotRetryIOException.class) public void testModifyError3() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); UTIL.getAdmin().createTable(td); TableDescriptor newTd = TableDescriptorBuilder.newBuilder(td) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(MigrationStoreFileTracker.SRC_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .build(); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(MigrationStoreFileTracker.SRC_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()) + .setValue(MigrationStoreFileTracker.DST_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()) + .build(); UTIL.getAdmin().modifyTable(newTd); } // return the TableDescriptor for creating table private TableDescriptor createTableAndChangeToMigrationTracker() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); UTIL.getAdmin().createTable(td); TableDescriptor newTd = TableDescriptorBuilder.newBuilder(td) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(MigrationStoreFileTracker.SRC_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .build(); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(MigrationStoreFileTracker.SRC_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()) + .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .build(); UTIL.getAdmin().modifyTable(newTd); return td; } @@ -141,11 +146,12 @@ private TableDescriptor createTableAndChangeToMigrationTracker() throws IOExcept public void testModifyError4() throws IOException { TableDescriptor td = createTableAndChangeToMigrationTracker(); TableDescriptor newTd = TableDescriptorBuilder.newBuilder(td) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(MigrationStoreFileTracker.SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .build(); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(MigrationStoreFileTracker.SRC_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .setValue(MigrationStoreFileTracker.DST_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()) + .build(); UTIL.getAdmin().modifyTable(newTd); } @@ -153,11 +159,13 @@ public void testModifyError4() throws IOException { public void testModifyError5() throws IOException { TableDescriptor td = createTableAndChangeToMigrationTracker(); TableDescriptor newTd = TableDescriptorBuilder.newBuilder(td) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(MigrationStoreFileTracker.SRC_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .build(); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(MigrationStoreFileTracker.SRC_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()) + .setValue(MigrationStoreFileTracker.DST_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()) + .build(); UTIL.getAdmin().modifyTable(newTd); } @@ -165,23 +173,23 @@ public void testModifyError5() throws IOException { public void testModifyError6() throws IOException { TableDescriptor td = createTableAndChangeToMigrationTracker(); TableDescriptor newTd = - TableDescriptorBuilder.newBuilder(td).setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.DEFAULT.name()).build(); + TableDescriptorBuilder.newBuilder(td).setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()).build(); UTIL.getAdmin().modifyTable(newTd); } @Test(expected = DoNotRetryIOException.class) public void testModifyError7() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); UTIL.getAdmin().createTable(td); TableDescriptor newTd = TableDescriptorBuilder.newBuilder(tableName.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("family1")) - .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("family1")) + .setConfiguration(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .build()) + .build(); UTIL.getAdmin().modifyTable(newTd); } @@ -189,39 +197,40 @@ public void testModifyError7() throws IOException { @Test(expected = IOException.class) public void testModifyError8() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); UTIL.getAdmin().createTable(td); TableDescriptor newTd = - TableDescriptorBuilder.newBuilder(td).setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()).build(); + TableDescriptorBuilder.newBuilder(td).setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()).build(); UTIL.getAdmin().modifyTable(newTd); } @Test public void testModifyError9() throws IOException { TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build(); UTIL.getAdmin().createTable(td); UTIL.getAdmin().disableTable(td.getTableName()); TableDescriptor newTd = TableDescriptorBuilder.newBuilder(td) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(MigrationStoreFileTracker.SRC_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .build(); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(MigrationStoreFileTracker.SRC_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()) + .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .build(); UTIL.getAdmin().modifyTable(newTd); - TableDescriptor newTd2 = TableDescriptorBuilder.newBuilder(td) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .build(); + TableDescriptor newTd2 = + TableDescriptorBuilder.newBuilder(td).setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.FILE.name()).build(); // changing from MIGRATION while table is disabled is not allowed assertThrows(TableNotEnabledException.class, () -> UTIL.getAdmin().modifyTable(newTd2)); } private String getStoreFileName(TableName table, byte[] family) { return Iterables - .getOnlyElement(Iterables.getOnlyElement(UTIL.getMiniHBaseCluster().getRegions(table)) - .getStore(family).getStorefiles()) - .getPath().getName(); + .getOnlyElement(Iterables.getOnlyElement(UTIL.getMiniHBaseCluster().getRegions(table)) + .getStore(family).getStorefiles()) + .getPath().getName(); } @Test @@ -232,7 +241,7 @@ public void testModify() throws IOException { byte[] qualifier = Bytes.toBytes("qualifier"); byte[] value = Bytes.toBytes("value"); TableDescriptor td = TableDescriptorBuilder.newBuilder(tn) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); UTIL.getAdmin().createTable(td); try (Table table = UTIL.getConnection().getTable(tn)) { table.put(new Put(row).addColumn(family, qualifier, value)); @@ -241,20 +250,21 @@ public void testModify() throws IOException { String fileName = getStoreFileName(tn, family); TableDescriptor newTd = TableDescriptorBuilder.newBuilder(td) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, - StoreFileTrackerFactory.Trackers.MIGRATION.name()) - .setValue(MigrationStoreFileTracker.SRC_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()) - .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .build(); + .setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.MIGRATION.name()) + .setValue(MigrationStoreFileTracker.SRC_IMPL, + StoreFileTrackerFactory.Trackers.DEFAULT.name()) + .setValue(MigrationStoreFileTracker.DST_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) + .build(); UTIL.getAdmin().modifyTable(newTd); assertEquals(fileName, getStoreFileName(tn, family)); try (Table table = UTIL.getConnection().getTable(tn)) { assertArrayEquals(value, table.get(new Get(row)).getValue(family, qualifier)); } - TableDescriptor newTd2 = TableDescriptorBuilder.newBuilder(td) - .setValue(StoreFileTrackerFactory.TRACKER_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()) - .build(); + TableDescriptor newTd2 = + TableDescriptorBuilder.newBuilder(td).setValue(StoreFileTrackerFactory.TRACKER_IMPL, + StoreFileTrackerFactory.Trackers.FILE.name()).build(); UTIL.getAdmin().modifyTable(newTd2); assertEquals(fileName, getStoreFileName(tn, family)); try (Table table = UTIL.getConnection().getTable(tn)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestMigrationStoreFileTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestMigrationStoreFileTracker.java index 119f8fb553f8..769724e4bca1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestMigrationStoreFileTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestMigrationStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,7 +67,7 @@ public class TestMigrationStoreFileTracker { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMigrationStoreFileTracker.class); + HBaseClassTestRule.forClass(TestMigrationStoreFileTracker.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -76,8 +76,8 @@ public class TestMigrationStoreFileTracker { private static final byte[] CQ = Bytes.toBytes("cq"); private static final TableDescriptor TD = - TableDescriptorBuilder.newBuilder(TableName.valueOf("file_based_tracker")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf("file_based_tracker")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build(); private static final RegionInfo RI = RegionInfoBuilder.newBuilder(TD.getTableName()).build(); @@ -102,7 +102,7 @@ public static List params() { for (StoreFileTrackerFactory.Trackers src : StoreFileTrackerFactory.Trackers.values()) { for (StoreFileTrackerFactory.Trackers dst : StoreFileTrackerFactory.Trackers.values()) { if (src == StoreFileTrackerFactory.Trackers.MIGRATION - || dst == StoreFileTrackerFactory.Trackers.MIGRATION) { + || dst == StoreFileTrackerFactory.Trackers.MIGRATION) { continue; } if (src.equals(dst)) { @@ -140,18 +140,18 @@ public void tearDown() throws IOException { private List getStoreFiles() { return Iterables.getOnlyElement(region.getStores()).getStorefiles().stream() - .map(s -> s.getFileInfo().getPath().getName()).collect(Collectors.toList()); + .map(s -> s.getFileInfo().getPath().getName()).collect(Collectors.toList()); } private HRegion createRegion(Class trackerImplClass) - throws IOException { + throws IOException { Configuration conf = new Configuration(UTIL.getConfiguration()); conf.setClass(StoreFileTrackerFactory.TRACKER_IMPL, trackerImplClass, StoreFileTracker.class); return HRegion.createHRegion(RI, rootDir, conf, TD, wal, true); } private void reopenRegion(Class trackerImplClass) - throws IOException { + throws IOException { region.flush(true); List before = getStoreFiles(); region.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestRegionWithFileBasedStoreFileTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestRegionWithFileBasedStoreFileTracker.java index 77cd3c448fa0..d17aa40eaf8c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestRegionWithFileBasedStoreFileTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestRegionWithFileBasedStoreFileTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ public class TestRegionWithFileBasedStoreFileTracker { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionWithFileBasedStoreFileTracker.class); + HBaseClassTestRule.forClass(TestRegionWithFileBasedStoreFileTracker.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -58,8 +58,8 @@ public class TestRegionWithFileBasedStoreFileTracker { private static final byte[] CQ = Bytes.toBytes("cq"); private static final TableDescriptor TD = - TableDescriptorBuilder.newBuilder(TableName.valueOf("file_based_tracker")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf("file_based_tracker")) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build(); private static final RegionInfo RI = RegionInfoBuilder.newBuilder(TD.getTableName()).build(); @@ -72,8 +72,8 @@ public class TestRegionWithFileBasedStoreFileTracker { public void setUp() throws IOException { Configuration conf = new Configuration(UTIL.getConfiguration()); conf.set(StoreFileTrackerFactory.TRACKER_IMPL, StoreFileTrackerFactory.Trackers.FILE.name()); - region = - HBaseTestingUtil.createRegionAndWAL(RI, UTIL.getDataTestDir(name.getMethodName()), conf, TD); + region = HBaseTestingUtil.createRegionAndWAL(RI, UTIL.getDataTestDir(name.getMethodName()), + conf, TD); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileListFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileListFile.java index e50ca14cc881..658dd4984197 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileListFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileListFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,7 +57,7 @@ public class TestStoreFileListFile { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStoreFileListFile.class); + HBaseClassTestRule.forClass(TestStoreFileListFile.class); private static final Logger LOG = LoggerFactory.getLogger(TestStoreFileListFile.class); @@ -74,7 +74,7 @@ private StoreFileListFile create() throws IOException { HRegionFileSystem hfs = mock(HRegionFileSystem.class); when(hfs.getFileSystem()).thenReturn(FileSystem.get(UTIL.getConfiguration())); StoreContext ctx = StoreContext.getBuilder().withFamilyStoreDirectoryPath(testDir) - .withRegionFileSystem(hfs).build(); + .withRegionFileSystem(hfs).build(); return new StoreFileListFile(ctx); } @@ -206,12 +206,12 @@ public void testConcurrentUpdate() throws IOException { StoreFileListFile storeFileListFile2 = create(); storeFileListFile2.update(StoreFileList.newBuilder() - .addStoreFile(StoreFileEntry.newBuilder().setName("hehe").setSize(10).build())); + .addStoreFile(StoreFileEntry.newBuilder().setName("hehe").setSize(10).build())); // let's update storeFileListFile several times for (int i = 0; i < 10; i++) { storeFileListFile.update(StoreFileList.newBuilder() - .addStoreFile(StoreFileEntry.newBuilder().setName("haha-" + i).setSize(100 + i).build())); + .addStoreFile(StoreFileEntry.newBuilder().setName("haha-" + i).setSize(100 + i).build())); } // create a new list file, make sure we load the list generate by storeFileListFile2. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerFactory.java index 41f2afdfa421..72780e8ae61c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ public class TestStoreFileTrackerFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStoreFileTrackerFactory.class); + HBaseClassTestRule.forClass(TestStoreFileTrackerFactory.class); @Test public void testCreateForMigration() { @@ -53,6 +53,6 @@ public void testCreateForMigration() { // nested MigrationStoreFileTracker conf.setClass(configName, MigrationStoreFileTracker.class, StoreFileTrackerBase.class); assertThrows(IllegalArgumentException.class, () -> StoreFileTrackerFactory - .createForMigration(conf, configName, false, StoreContext.getBuilder().build())); + .createForMigration(conf, configName, false, StoreContext.getBuilder().build())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerValidationUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerValidationUtils.java index a686b559f9aa..e17b4fbebcda 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerValidationUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerValidationUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestStoreFileTrackerValidationUtils { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestStoreFileTrackerValidationUtils.class); + HBaseClassTestRule.forClass(TestStoreFileTrackerValidationUtils.class); @Test public void testCheckSFTCompatibility() throws Exception { @@ -56,10 +56,10 @@ public void testCheckSFTCompatibility() throws Exception { // creating a TD with matching ColumnFamilyDescriptor level setting TableDescriptorBuilder snapBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf("TableY")); + TableDescriptorBuilder.newBuilder(TableName.valueOf("TableY")); snapBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE"); ColumnFamilyDescriptorBuilder snapCFBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); snapCFBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE"); snapBuilder.setColumnFamily(snapCFBuilder.build()); TableDescriptor snapTd = snapBuilder.build(); @@ -70,10 +70,10 @@ public void testCheckSFTCompatibility() throws Exception { StoreFileTrackerValidationUtils.validatePreRestoreSnapshot(snapTd, td, conf); TableDescriptorBuilder defaultBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf("TableY")); + TableDescriptorBuilder.newBuilder(TableName.valueOf("TableY")); defaultBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE"); ColumnFamilyDescriptorBuilder defaultCFBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")); defaultCFBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "DEFAULT"); defaultBuilder.setColumnFamily(defaultCFBuilder.build()); TableDescriptor defaultTd = defaultBuilder.build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java index 17cd05a81296..9fe550919635 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -112,12 +112,10 @@ private long testCompactionWithThroughputLimit() throws Exception { conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200); conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000); conf.setLong( - PressureAwareCompactionThroughputController - .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, + PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, throughputLimit); conf.setLong( - PressureAwareCompactionThroughputController - .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, + PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, throughputLimit); conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY, PressureAwareCompactionThroughputController.class.getName()); @@ -184,12 +182,10 @@ public void testThroughputTuning() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName()); conf.setLong( - PressureAwareCompactionThroughputController - .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, + PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND, 20L * 1024 * 1024); conf.setLong( - PressureAwareCompactionThroughputController - .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, + PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND, 10L * 1024 * 1024); conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 4); conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 6); @@ -236,8 +232,8 @@ public void testThroughputTuning() throws Exception { NoLimitThroughputController.class.getName()); regionServer.getCompactSplitThread().onConfigurationChange(conf); assertTrue(throughputController.isStopped()); - assertTrue(regionServer.getCompactSplitThread().getCompactionThroughputController() - instanceof NoLimitThroughputController); + assertTrue(regionServer.getCompactSplitThread() + .getCompactionThroughputController() instanceof NoLimitThroughputController); } finally { conn.close(); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java index b1de5fdfa396..acc3506d4afa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,7 +67,8 @@ public class TestFlushWithThroughputController { private static final double EPSILON = 1.3E-6; private HBaseTestingUtil hbtu; - @Rule public TestName testName = new TestName(); + @Rule + public TestName testName = new TestName(); private TableName tableName; private final byte[] family = Bytes.toBytes("f"); private final byte[] qualifier = Bytes.toBytes("q"); @@ -77,8 +78,8 @@ public void setUp() { hbtu = new HBaseTestingUtil(); tableName = TableName.valueOf("Table-" + testName.getMethodName()); hbtu.getConfiguration().set( - FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY, - PressureAwareFlushThroughputController.class.getName()); + FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY, + PressureAwareFlushThroughputController.class.getName()); } @After @@ -101,9 +102,9 @@ private HStore getStoreWithName(TableName tableName) { private void setMaxMinThroughputs(long max, long min) { Configuration conf = hbtu.getConfiguration(); conf.setLong( - PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND, min); + PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND, min); conf.setLong( - PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND, max); + PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND, max); } /** @@ -128,8 +129,8 @@ private Pair generateAndFlushData(Table table) throws IOException } HStore store = getStoreWithName(tableName); assertEquals(NUM_FLUSHES, store.getStorefilesCount()); - double throughput = (double)store.getStorefilesSize() - / TimeUnit.NANOSECONDS.toSeconds(duration); + double throughput = + (double) store.getStorefilesSize() / TimeUnit.NANOSECONDS.toSeconds(duration); return new Pair<>(throughput, duration); } @@ -147,7 +148,7 @@ private long testFlushWithThroughputLimit() throws Exception { LOG.debug("Throughput is: " + (result.getFirst() / 1024 / 1024) + " MB/s"); // confirm that the speed limit work properly(not too fast, and also not too slow) // 20% is the max acceptable error rate. - assertTrue(result.getFirst() < throughputLimit * 1.2); + assertTrue(result.getFirst() < throughputLimit * 1.2); assertTrue(result.getFirst() > throughputLimit * 0.8); return result.getSecond(); } @@ -169,9 +170,10 @@ public void testFlushThroughputTuning() throws Exception { 3000); hbtu.startMiniCluster(1); Connection conn = ConnectionFactory.createConnection(conf); - hbtu.getAdmin().createTable(TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).setCompactionEnabled(false) - .build()); + hbtu.getAdmin() + .createTable(TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).setCompactionEnabled(false) + .build()); hbtu.waitTableAvailable(tableName); HRegionServer regionServer = hbtu.getRSForFirstRegionInTable(tableName); double pressure = regionServer.getFlushPressure(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestStoreHotnessProtector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestStoreHotnessProtector.java index 218b41d9c61b..78f17a2abb33 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestStoreHotnessProtector.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestStoreHotnessProtector.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -50,7 +50,8 @@ @Category(SmallTests.class) public class TestStoreHotnessProtector { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestStoreHotnessProtector.class); @Test @@ -83,8 +84,8 @@ public void testPreparePutCounter() throws Exception { // PreparePutCounter not access limit - int threadCount = conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT, 10) * conf - .getInt(PARALLEL_PREPARE_PUT_STORE_MULTIPLIER, 3); + int threadCount = conf.getInt(PARALLEL_PUT_STORE_THREADS_LIMIT, 10) + * conf.getInt(PARALLEL_PREPARE_PUT_STORE_MULTIPLIER, 3); CountDownLatch countDownLatch = new CountDownLatch(threadCount); for (int i = 0; i < threadCount; i++) { @@ -101,11 +102,11 @@ public void testPreparePutCounter() throws Exception { } countDownLatch.await(60, TimeUnit.SECONDS); - //no exception + // no exception Assert.assertEquals(exception.get(), null); Assert.assertEquals(storeHotnessProtector.getPreparePutToStoreMap().size(), 1); Assert.assertEquals(storeHotnessProtector.getPreparePutToStoreMap().get(family).get(), - threadCount); + threadCount); // access limit @@ -121,11 +122,11 @@ public void testPreparePutCounter() throws Exception { Assert.assertEquals(storeHotnessProtector.getPreparePutToStoreMap().size(), 1); // when access limit, counter will not changed. Assert.assertEquals(storeHotnessProtector.getPreparePutToStoreMap().get(family).get(), - threadCount + 1); + threadCount + 1); storeHotnessProtector.finish(familyMaps); Assert.assertEquals(storeHotnessProtector.getPreparePutToStoreMap().get(family).get(), - threadCount); + threadCount); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index 07f3625ac898..3d72e355caef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -165,7 +165,7 @@ public void testWALCoprocessorLoaded() throws Exception { AbstractFSWAL wal = null; try { wal = newWAL(FS, CommonFSUtils.getWALRootDir(CONF), DIR.toString(), - HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); + HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); WALCoprocessorHost host = wal.getCoprocessorHost(); Coprocessor c = host.findCoprocessor(SampleRegionWALCoprocessor.class); assertNotNull(c); @@ -211,7 +211,7 @@ public void testWALComparator() throws Exception { AbstractFSWAL walMeta = null; try { wal1 = newWAL(FS, CommonFSUtils.getWALRootDir(CONF), DIR.toString(), - HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); + HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); LOG.debug("Log obtained is: " + wal1); Comparator comp = wal1.LOG_NAME_COMPARATOR; Path p1 = wal1.computeFilename(11); @@ -221,8 +221,8 @@ public void testWALComparator() throws Exception { // comparing with different filenum. assertTrue(comp.compare(p1, p2) < 0); walMeta = newWAL(FS, CommonFSUtils.getWALRootDir(CONF), DIR.toString(), - HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, - AbstractFSWALProvider.META_WAL_PROVIDER_ID); + HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, + AbstractFSWALProvider.META_WAL_PROVIDER_ID); Comparator compMeta = walMeta.LOG_NAME_COMPARATOR; Path p1WithMeta = walMeta.computeFilename(11); @@ -274,17 +274,17 @@ public void testFindMemStoresEligibleForFlush() throws Exception { String cf2 = "cf2"; String cf3 = "cf3"; TableDescriptor t1 = TableDescriptorBuilder.newBuilder(TableName.valueOf("t1")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)).build(); TableDescriptor t2 = TableDescriptorBuilder.newBuilder(TableName.valueOf("t2")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)).build(); RegionInfo hri1 = RegionInfoBuilder.newBuilder(t1.getTableName()).build(); RegionInfo hri2 = RegionInfoBuilder.newBuilder(t2.getTableName()).build(); List cfs = new ArrayList(); cfs.add(ColumnFamilyDescriptorBuilder.of(cf1)); cfs.add(ColumnFamilyDescriptorBuilder.of(cf2)); - TableDescriptor t3 = TableDescriptorBuilder.newBuilder(TableName.valueOf("t3")) - .setColumnFamilies(cfs).build(); + TableDescriptor t3 = + TableDescriptorBuilder.newBuilder(TableName.valueOf("t3")).setColumnFamilies(cfs).build(); RegionInfo hri3 = RegionInfoBuilder.newBuilder(t3.getTableName()).build(); // add edits and roll the wal @@ -314,13 +314,13 @@ public void testFindMemStoresEligibleForFlush() throws Exception { // return only one region. Map> regionsToFlush = wal.findRegionsToForceFlush(); assertEquals(1, regionsToFlush.size()); - assertEquals(hri1.getEncodedNameAsBytes(), (byte[])regionsToFlush.keySet().toArray()[0]); + assertEquals(hri1.getEncodedNameAsBytes(), (byte[]) regionsToFlush.keySet().toArray()[0]); // insert edits in second region addEdits(wal, hri2, t2, 2, mvcc, scopes2, cf1); // get the regions to flush, it should still read region1. regionsToFlush = wal.findRegionsToForceFlush(); assertEquals(1, regionsToFlush.size()); - assertEquals(hri1.getEncodedNameAsBytes(), (byte[])regionsToFlush.keySet().toArray()[0]); + assertEquals(hri1.getEncodedNameAsBytes(), (byte[]) regionsToFlush.keySet().toArray()[0]); // flush region 1, and roll the wal file. Only last wal which has entries for region1 should // remain. flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getColumnFamilyNames()); @@ -375,7 +375,7 @@ public void testFindMemStoresEligibleForFlush() throws Exception { regionsToFlush = wal.findRegionsToForceFlush(); // then only two family need to be flushed when archive oldest wal assertEquals(1, regionsToFlush.size()); - assertEquals(hri3.getEncodedNameAsBytes(), (byte[])regionsToFlush.keySet().toArray()[0]); + assertEquals(hri3.getEncodedNameAsBytes(), (byte[]) regionsToFlush.keySet().toArray()[0]); assertEquals(2, regionsToFlush.get(hri3.getEncodedNameAsBytes()).size()); } finally { if (wal != null) { @@ -385,8 +385,8 @@ public void testFindMemStoresEligibleForFlush() throws Exception { } @Test(expected = IOException.class) - public void testFailedToCreateWALIfParentRenamed() throws IOException, - CommonFSUtils.StreamLacksCapabilityException { + public void testFailedToCreateWALIfParentRenamed() + throws IOException, CommonFSUtils.StreamLacksCapabilityException { final String name = "testFailedToCreateWALIfParentRenamed"; AbstractFSWAL wal = newWAL(FS, CommonFSUtils.getWALRootDir(CONF), name, HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); @@ -416,7 +416,7 @@ public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException final RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build(); final byte[] rowName = tableName.getName(); final TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); HRegion r = HBaseTestingUtil.createRegionAndWAL(hri, TEST_UTIL.getDefaultRootDirPath(), TEST_UTIL.getConfiguration(), htd); HBaseTestingUtil.closeRegionAndWAL(r); @@ -428,24 +428,23 @@ public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException } // subclass and doctor a method. AbstractFSWAL wal = newSlowWAL(FS, CommonFSUtils.getWALRootDir(CONF), DIR.toString(), - testName, CONF, null, true, null, null, new Runnable() { - - @Override - public void run() { - if (goslow.get()) { - Threads.sleep(100); - LOG.debug("Sleeping before appending 100ms"); - } + testName, CONF, null, true, null, null, new Runnable() { + + @Override + public void run() { + if (goslow.get()) { + Threads.sleep(100); + LOG.debug("Sleeping before appending 100ms"); } - }); + } + }); HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal); EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate(); try { List puts = null; for (byte[] fam : htd.getColumnFamilyNames()) { - puts = - TestWALReplay.addRegionEdits(rowName, fam, countPerFamily, ee, region, "x"); + puts = TestWALReplay.addRegionEdits(rowName, fam, countPerFamily, ee, region, "x"); } // Now assert edits made it in. @@ -469,7 +468,7 @@ public void run() { for (int i = 0; i < countPerFamily; i++) { final RegionInfo info = region.getRegionInfo(); final WALKeyImpl logkey = new WALKeyImpl(info.getEncodedNameAsBytes(), tableName, - EnvironmentEdgeManager.currentTime(), clusterIds, -1, -1, region.getMVCC(), scopes); + EnvironmentEdgeManager.currentTime(), clusterIds, -1, -1, region.getMVCC(), scopes); wal.append(info, logkey, edits, true); region.getMVCC().completeAndWait(logkey.getWriteEntry()); } @@ -489,7 +488,7 @@ public void run() { public void testSyncNoAppend() throws IOException { String testName = currentTest.getMethodName(); AbstractFSWAL wal = newWAL(FS, CommonFSUtils.getWALRootDir(CONF), DIR.toString(), testName, - CONF, null, true, null, null); + CONF, null, true, null, null); try { wal.sync(); } finally { @@ -504,7 +503,7 @@ public void testWriteEntryCanBeNull() throws IOException { CONF, null, true, null, null); wal.close(); TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf("table")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build(); RegionInfo ri = RegionInfoBuilder.newBuilder(td.getTableName()).build(); MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); @@ -517,7 +516,7 @@ public void testWriteEntryCanBeNull() throws IOException { cols.add(new KeyValue(row, row, row, timestamp, row)); WALKeyImpl key = new WALKeyImpl(ri.getEncodedNameAsBytes(), td.getTableName(), SequenceId.NO_SEQUENCE_ID, - timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE, HConstants.NO_NONCE, mvcc, scopes); + timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE, HConstants.NO_NONCE, mvcc, scopes); try { wal.append(ri, key, cols, true); fail("Should fail since the wal has already been closed"); @@ -539,7 +538,7 @@ public void testRollWriterForClosedWAL() throws IOException { } private AbstractFSWAL createHoldingWAL(String testName, AtomicBoolean startHoldingForAppend, - CountDownLatch holdAppend) throws IOException { + CountDownLatch holdAppend) throws IOException { AbstractFSWAL wal = newWAL(FS, CommonFSUtils.getRootDir(CONF), testName, HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); wal.init(); @@ -559,10 +558,10 @@ public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit log } private HRegion createHoldingHRegion(Configuration conf, TableDescriptor htd, WAL wal) - throws IOException { + throws IOException { RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); TEST_UTIL.createLocalHRegion(hri, CONF, htd, wal).close(); RegionServerServices rsServices = mock(RegionServerServices.class); when(rsServices.getServerName()).thenReturn(ServerName.valueOf("localhost:12345", 123456)); @@ -571,9 +570,9 @@ private HRegion createHoldingHRegion(Configuration conf, TableDescriptor htd, WA } private void doPutWithAsyncWAL(ExecutorService exec, HRegion region, Put put, - Runnable flushOrCloseRegion, AtomicBoolean startHoldingForAppend, - CountDownLatch flushOrCloseFinished, CountDownLatch holdAppend) - throws InterruptedException, IOException { + Runnable flushOrCloseRegion, AtomicBoolean startHoldingForAppend, + CountDownLatch flushOrCloseFinished, CountDownLatch holdAppend) + throws InterruptedException, IOException { // do a regular write first because of memstore size calculation. region.put(put); @@ -600,7 +599,7 @@ public void testUnflushedSeqIdTrackingWithAsyncWal() throws IOException, Interru String testName = currentTest.getMethodName(); byte[] b = Bytes.toBytes("b"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf("table")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(b)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(b)).build(); AtomicBoolean startHoldingForAppend = new AtomicBoolean(false); CountDownLatch holdAppend = new CountDownLatch(1); @@ -632,7 +631,7 @@ public void testUnflushedSeqIdTrackingWithAsyncWal() throws IOException, Interru } private static final Set STORES_TO_FLUSH = - Collections.newSetFromMap(new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR)); + Collections.newSetFromMap(new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR)); // Testcase for HBASE-23157 @Test @@ -641,8 +640,8 @@ public void testMaxFlushedSequenceIdGoBackwards() throws IOException, Interrupte byte[] a = Bytes.toBytes("a"); byte[] b = Bytes.toBytes("b"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf("table")) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(a)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(b)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(a)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(b)).build(); AtomicBoolean startHoldingForAppend = new AtomicBoolean(false); CountDownLatch holdAppend = new CountDownLatch(1); @@ -679,8 +678,8 @@ public void testMaxFlushedSequenceIdGoBackwards() throws IOException, Interrupte long maxFlushedSeqId2 = region.getMaxFlushedSeqId(); // make sure that the maxFlushedSequenceId does not go backwards assertTrue( - "maxFlushedSeqId1(" + maxFlushedSeqId1 + - ") is not greater than or equal to maxFlushedSeqId2(" + maxFlushedSeqId2 + ")", + "maxFlushedSeqId1(" + maxFlushedSeqId1 + + ") is not greater than or equal to maxFlushedSeqId2(" + maxFlushedSeqId2 + ")", maxFlushedSeqId1 <= maxFlushedSeqId2); } finally { exec.shutdownNow(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java index b4a26e4ba8ed..f86c4ffa42c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -129,13 +128,12 @@ public void run() { } } - private void checkMinLogRolls(final WAL log, final int minRolls) - throws Exception { + private void checkMinLogRolls(final WAL log, final int minRolls) throws Exception { final List paths = new ArrayList<>(); log.registerWALActionsListener(new WALActionsListener() { @Override public void postLogRoll(Path oldFile, Path newFile) { - LOG.debug("postLogRoll: oldFile="+oldFile+" newFile="+newFile); + LOG.debug("postLogRoll: oldFile=" + oldFile + " newFile=" + newFile); paths.add(newFile); } }); @@ -151,7 +149,7 @@ public void postLogRoll(Path oldFile, Path newFile) { } wtime = EnvironmentEdgeManager.currentTime() - wtime; LOG.info(String.format("got %d rolls after %dms (%dms each) - expected at least %d rolls", - paths.size(), wtime, wtime / paths.size(), minRolls)); + paths.size(), wtime, wtime / paths.size(), minRolls)); assertFalse(paths.size() < minRolls); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java index bbff17d4a8fd..a798b1209575 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +60,7 @@ /** * Test log deletion as logs are rolled. */ -public abstract class AbstractTestLogRolling { +public abstract class AbstractTestLogRolling { private static final Logger LOG = LoggerFactory.getLogger(AbstractTestLogRolling.class); protected HRegionServer server; protected String tableName; @@ -71,9 +70,10 @@ public abstract class AbstractTestLogRolling { protected Admin admin; protected SingleProcessHBaseCluster cluster; protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - @Rule public final TestName name = new TestName(); + @Rule + public final TestName name = new TestName(); - public AbstractTestLogRolling() { + public AbstractTestLogRolling() { this.server = null; this.tableName = null; @@ -133,7 +133,7 @@ public void setUp() throws Exception { } @After - public void tearDown() throws Exception { + public void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -145,7 +145,7 @@ protected void startAndWriteData() throws IOException, InterruptedException { Table table = createTestTable(this.tableName); server = TEST_UTIL.getRSForFirstRegionInTable(table.getName()); - for (int i = 1; i <= 256; i++) { // 256 writes should cause 8 log rolls + for (int i = 1; i <= 256; i++) { // 256 writes should cause 8 log rolls doPut(table, i); if (i % 32 == 0) { // After every 32 writes sleep to let the log roller run @@ -165,7 +165,7 @@ protected void startAndWriteData() throws IOException, InterruptedException { public void testLogRollOnNothingWritten() throws Exception { final Configuration conf = TEST_UTIL.getConfiguration(); final WALFactory wals = - new WALFactory(conf, ServerName.valueOf("test.com", 8080, 1).toString()); + new WALFactory(conf, ServerName.valueOf("test.com", 8080, 1).toString()); final WAL newLog = wals.getWAL(null); try { // Now roll the log before we write anything. @@ -198,7 +198,8 @@ public void testLogRolling() throws Exception { startAndWriteData(); RegionInfo region = server.getRegions(TableName.valueOf(tableName)).get(0).getRegionInfo(); final WAL log = server.getWAL(region); - LOG.info("after writing there are " + AbstractFSWALProvider.getNumRolledLogFiles(log) + " log files"); + LOG.info( + "after writing there are " + AbstractFSWALProvider.getNumRolledLogFiles(log) + " log files"); assertLogFileSize(log); // flush all regions @@ -236,14 +237,13 @@ void validateData(Table table, int rownum) throws IOException { get.addFamily(HConstants.CATALOG_FAMILY); Result result = table.get(get); assertTrue(result.size() == 1); - assertTrue(Bytes.equals(value, - result.getValue(HConstants.CATALOG_FAMILY, null))); + assertTrue(Bytes.equals(value, result.getValue(HConstants.CATALOG_FAMILY, null))); LOG.info("Validated row " + row); } /** - * Tests that logs are deleted when some region has a compaction - * record in WAL and no other records. See HBASE-8597. + * Tests that logs are deleted when some region has a compaction record in WAL and no other + * records. See HBASE-8597. */ @Test public void testCompactionRecordDoesntBlockRolling() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java index f9179cb15418..2a04e591f3f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index 4c454d96e89c..327bb5942c8c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -156,10 +155,8 @@ public void setUp() throws Exception { this.fs = TEST_UTIL.getDFSCluster().getFileSystem(); this.hbaseRootDir = CommonFSUtils.getRootDir(this.conf); this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME); - String serverName = - ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010, - EnvironmentEdgeManager.currentTime()) - .toString(); + String serverName = ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010, + EnvironmentEdgeManager.currentTime()).toString(); this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName); this.logDir = new Path(this.hbaseRootDir, logName); if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) { @@ -240,7 +237,7 @@ public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception { // flush region and make major compaction HRegion region = - (HRegion) destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName()); + (HRegion) destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName()); region.flush(true); // wait to complete major compaction for (HStore store : region.getStores()) { @@ -279,7 +276,7 @@ public void test2727() throws Exception { TableDescriptor tableDescriptor = createBasic3FamilyHTD(tableName); Region region2 = - HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, tableDescriptor); + HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, tableDescriptor); HBaseTestingUtil.closeRegionAndWAL(region2); final byte[] rowName = tableName.getName(); @@ -310,7 +307,7 @@ public void test2727() throws Exception { WAL wal3 = createWAL(this.conf, hbaseRootDir, logName); try { HRegion region = - HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, tableDescriptor, wal3); + HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, tableDescriptor, wal3); long seqid = region.getOpenSeqNum(); // The regions opens with sequenceId as 1. With 6k edits, its sequence number reaches 6k + 1. // When opened, this region would apply 6k edits, and increment the sequenceId by 1 @@ -331,7 +328,7 @@ public void test2727() throws Exception { */ @Test public void testRegionMadeOfBulkLoadedFilesOnly() throws IOException, SecurityException, - IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { + IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { final TableName tableName = TableName.valueOf("testRegionMadeOfBulkLoadedFilesOnly"); final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName); final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString()); @@ -368,7 +365,7 @@ public Object run() throws Exception { WAL wal2 = createWAL(newConf, hbaseRootDir, logName); HRegion region2 = - HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd, wal2); + HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd, wal2); long seqid2 = region2.getOpenSeqNum(); assertTrue(seqid2 > -1); assertEquals(rowsInsertedCount, getScannedCount(region2.getScanner(new Scan()))); @@ -390,7 +387,7 @@ public Object run() throws Exception { */ @Test public void testCompactedBulkLoadedFiles() throws IOException, SecurityException, - IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { + IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { final TableName tableName = TableName.valueOf("testCompactedBulkLoadedFiles"); final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName); final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString()); @@ -432,7 +429,7 @@ public Object run() throws Exception { WAL wal2 = createWAL(newConf, hbaseRootDir, logName); HRegion region2 = - HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd, wal2); + HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd, wal2); long seqid2 = region2.getOpenSeqNum(); assertTrue(seqid2 > -1); assertEquals(rowsInsertedCount, getScannedCount(region2.getScanner(new Scan()))); @@ -451,7 +448,7 @@ public Object run() throws Exception { */ @Test public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, - IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { + IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { final TableName tableName = TableName.valueOf("testReplayEditsWrittenViaHRegion"); final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName); final Path basedir = CommonFSUtils.getTableDir(this.hbaseRootDir, tableName); @@ -547,7 +544,7 @@ protected void restoreEdit(HStore s, Cell cell, MemStoreSizing memstoreSizing) { */ @Test public void testReplayEditsAfterPartialFlush() throws IOException, SecurityException, - IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { + IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { final TableName tableName = TableName.valueOf("testReplayEditsWrittenViaHRegion"); final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName); final Path basedir = CommonFSUtils.getTableDir(this.hbaseRootDir, tableName); @@ -612,8 +609,8 @@ public CustomStoreFlusher(Configuration conf, HStore store) { @Override public List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId, - MonitoredTask status, ThroughputController throughputController, - FlushLifeCycleTracker tracker) throws IOException { + MonitoredTask status, ThroughputController throughputController, + FlushLifeCycleTracker tracker) throws IOException { if (throwExceptionWhenFlushing.get()) { throw new IOException("Simulated exception by tests"); } @@ -646,7 +643,7 @@ public void testReplayEditsAfterAbortingFlush() throws IOException { customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, CustomStoreFlusher.class.getName()); HRegion region = - HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal, customConf, rsServices, null); + HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal, customConf, rsServices, null); int writtenRowCount = 10; List families = Arrays.asList((htd.getColumnFamilies())); for (int i = 0; i < writtenRowCount; i++) { @@ -698,7 +695,7 @@ public void testReplayEditsAfterAbortingFlush() throws IOException { WAL wal2 = createWAL(this.conf, hbaseRootDir, logName); Mockito.doReturn(false).when(rsServices).isAborted(); HRegion region2 = - HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal2, this.conf, rsServices, null); + HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal2, this.conf, rsServices, null); scanner = region2.getScanner(new Scan()); assertEquals(writtenRowCount, getScannedCount(scanner)); } @@ -787,8 +784,8 @@ public Void run() throws Exception { final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) { @Override protected FlushResultImpl internalFlushcache(final WAL wal, final long myseqid, - final Collection storesToFlush, MonitoredTask status, - boolean writeFlushWalMarker, FlushLifeCycleTracker tracker) throws IOException { + final Collection storesToFlush, MonitoredTask status, + boolean writeFlushWalMarker, FlushLifeCycleTracker tracker) throws IOException { LOG.info("InternalFlushCache Invoked"); FlushResultImpl fs = super.internalFlushcache(wal, myseqid, storesToFlush, Mockito.mock(MonitoredTask.class), writeFlushWalMarker, tracker); @@ -857,13 +854,14 @@ public void testSequentialEditLogSeqNum() throws IOException { WALSplitter.splitLogFile(hbaseRootDir, listStatus[0], this.fs, this.conf, null, null, null, wals, null); FileStatus[] listStatus1 = - this.fs.listStatus(new Path(CommonFSUtils.getWALTableDir(conf, tableName), - new Path(hri.getEncodedName(), "recovered.edits")), new PathFilter() { - @Override - public boolean accept(Path p) { - return !WALSplitUtil.isSequenceIdFile(p); - } - }); + this.fs.listStatus(new Path(CommonFSUtils.getWALTableDir(conf, tableName), + new Path(hri.getEncodedName(), "recovered.edits")), + new PathFilter() { + @Override + public boolean accept(Path p) { + return !WALSplitUtil.isSequenceIdFile(p); + } + }); int editCount = 0; for (FileStatus fileStatus : listStatus1) { editCount = Integer.parseInt(fileStatus.getPath().getName()); @@ -911,8 +909,8 @@ public void testDatalossWhenInputError() throws Exception { FSDataInputStream stream = fs.open(editFile); stream.seek(ProtobufLogReader.PB_WAL_MAGIC.length); Class logReaderClass = - conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, - AbstractFSWALProvider.Reader.class); + conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, + AbstractFSWALProvider.Reader.class); AbstractFSWALProvider.Reader reader = logReaderClass.getDeclaredConstructor().newInstance(); reader.init(this.fs, editFile, conf, stream); final long headerLength = stream.getPos(); @@ -974,7 +972,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { * testcase for https://issues.apache.org/jira/browse/HBASE-14949. */ private void testNameConflictWhenSplit(boolean largeFirst) - throws IOException, StreamLacksCapabilityException { + throws IOException, StreamLacksCapabilityException { final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL"); final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName); @@ -1027,7 +1025,7 @@ static class MockWAL extends FSHLog { boolean doCompleteCacheFlush = false; public MockWAL(FileSystem fs, Path rootDir, String logName, Configuration conf) - throws IOException { + throws IOException { super(fs, rootDir, logName, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null); } @@ -1098,12 +1096,12 @@ public void setGlobalMemStoreLimit(long globalMemStoreSize) { } private WALKeyImpl createWALKey(final TableName tableName, final RegionInfo hri, - final MultiVersionConcurrencyControl mvcc, NavigableMap scopes) { + final MultiVersionConcurrencyControl mvcc, NavigableMap scopes) { return new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, 999, mvcc, scopes); } private WALEdit createWALEdit(final byte[] rowName, final byte[] family, EnvironmentEdge ee, - int index) { + int index) { byte[] qualifierBytes = Bytes.toBytes(Integer.toString(index)); byte[] columnBytes = Bytes.toBytes(Bytes.toString(family) + ":" + Integer.toString(index)); WALEdit edit = new WALEdit(); @@ -1112,18 +1110,18 @@ private WALEdit createWALEdit(final byte[] rowName, final byte[] family, Environ } private FSWALEntry createFSWALEntry(TableDescriptor htd, RegionInfo hri, long sequence, - byte[] rowName, byte[] family, EnvironmentEdge ee, MultiVersionConcurrencyControl mvcc, - int index, NavigableMap scopes) throws IOException { + byte[] rowName, byte[] family, EnvironmentEdge ee, MultiVersionConcurrencyControl mvcc, + int index, NavigableMap scopes) throws IOException { FSWALEntry entry = new FSWALEntry(sequence, createWALKey(htd.getTableName(), hri, mvcc, scopes), - createWALEdit(rowName, family, ee, index), hri, true, null); + createWALEdit(rowName, family, ee, index), hri, true, null); entry.stampRegionSequenceId(mvcc.begin()); return entry; } private void addWALEdits(final TableName tableName, final RegionInfo hri, final byte[] rowName, - final byte[] family, final int count, EnvironmentEdge ee, final WAL wal, - final MultiVersionConcurrencyControl mvcc, NavigableMap scopes) - throws IOException { + final byte[] family, final int count, EnvironmentEdge ee, final WAL wal, + final MultiVersionConcurrencyControl mvcc, NavigableMap scopes) + throws IOException { for (int j = 0; j < count; j++) { wal.appendData(hri, createWALKey(tableName, hri, mvcc, scopes), createWALEdit(rowName, family, ee, j)); @@ -1132,7 +1130,7 @@ private void addWALEdits(final TableName tableName, final RegionInfo hri, final } public static List addRegionEdits(final byte[] rowName, final byte[] family, final int count, - EnvironmentEdge ee, final Region r, final String qualifierPrefix) throws IOException { + EnvironmentEdge ee, final Region r, final String qualifierPrefix) throws IOException { List puts = new ArrayList<>(); for (int j = 0; j < count; j++) { byte[] qualifier = Bytes.toBytes(qualifierPrefix + Integer.toString(j)); @@ -1159,7 +1157,7 @@ private RegionInfo createBasic3FamilyHRegionInfo(final TableName tableName) { */ private Path runWALSplit(final Configuration c) throws IOException { List splits = - WALSplitter.split(hbaseRootDir, logDir, oldLogDir, FileSystem.get(c), c, wals); + WALSplitter.split(hbaseRootDir, logDir, oldLogDir, FileSystem.get(c), c, wals); // Split should generate only 1 file since there's only 1 region assertEquals("splits=" + splits, 1, splits.size()); // Make sure the file exists @@ -1177,7 +1175,7 @@ private TableDescriptor createBasic3FamilyHTD(final TableName tableName) { } private void writerWALFile(Path file, List entries) - throws IOException, StreamLacksCapabilityException { + throws IOException, StreamLacksCapabilityException { fs.mkdirs(file.getParent()); ProtobufLogWriter writer = new ProtobufLogWriter(); writer.init(fs, file, conf, true, WALUtil.getWALBlockSize(conf, fs, file), @@ -1190,5 +1188,5 @@ private void writerWALFile(Path file, List entries) } protected abstract WAL createWAL(Configuration c, Path hbaseRootDir, String logName) - throws IOException; + throws IOException; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java index 2b195c4a1eb4..e71d89067a7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.LinkedList; import java.util.Queue; - import org.apache.hadoop.hbase.wal.WAL.Entry; public class FaultyProtobufLogReader extends ProtobufLogReader { @@ -53,10 +52,11 @@ public Entry next(Entry reuse) throws IOException { throw new IOException("fake Exception"); } else if (nextQueue.size() == this.numberOfFileEntries / 2 && getFailureType() == FailureType.MIDDLE) { - throw new IOException("fake Exception"); - } else if (nextQueue.size() == 1 && getFailureType() == FailureType.END) { - throw new IOException("fake Exception"); - } + throw new IOException("fake Exception"); + } else + if (nextQueue.size() == 1 && getFailureType() == FailureType.END) { + throw new IOException("fake Exception"); + } if (nextQueue.peek() != null) { edit++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedLogWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedLogWriter.java index 5a6137056ed6..0a0f869865c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedLogWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedLogWriter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -35,14 +33,15 @@ protected String getWriterClassName() { } public static boolean activateFailure = false; + @Override - public void append(Entry entry) throws IOException { - super.append(entry); - if (activateFailure && - Bytes.equals(entry.getKey().getEncodedRegionName(), Bytes.toBytes("break"))) { - System.out.println(getClass().getName() + ": I will throw an exception now..."); - throw(new IOException("This exception is instrumented and should only be thrown for testing" - )); - } + public void append(Entry entry) throws IOException { + super.append(entry); + if (activateFailure + && Bytes.equals(entry.getKey().getEncodedRegionName(), Bytes.toBytes("break"))) { + System.out.println(getClass().getName() + ": I will throw an exception now..."); + throw (new IOException( + "This exception is instrumented and should only be thrown for testing")); } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java index cf4862b2c335..ccc568e7770c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogTestHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java index 7b342a1e879a..07070411de7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java @@ -56,13 +56,13 @@ import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup; @@ -84,9 +84,9 @@ public class TestAsyncFSWAL extends AbstractTestFSWAL { @BeforeClass public static void setUpBeforeClass() throws Exception { - GROUP = - new NioEventLoopGroup(1, new ThreadFactoryBuilder().setNameFormat("TestAsyncFSWAL-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + GROUP = new NioEventLoopGroup(1, + new ThreadFactoryBuilder().setNameFormat("TestAsyncFSWAL-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); CHANNEL_CLASS = NioSocketChannel.class; AbstractTestFSWAL.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALDurability.java index a2ac3375ab6e..491807f692e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALDurability.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALDurability.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ public class TestAsyncFSWALDurability extends WALDurabilityTestBase channelClass) - throws FailedLogCloseException, IOException { + EventLoopGroup eventLoopGroup, Class channelClass) + throws FailedLogCloseException, IOException { super(fs, rootDir, logDir, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null, - eventLoopGroup, channelClass); + eventLoopGroup, channelClass); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java index 858fc3156641..83b15da9ad22 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWALRollStuck.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,7 +70,7 @@ public class TestAsyncFSWALRollStuck { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncFSWALRollStuck.class); + HBaseClassTestRule.forClass(TestAsyncFSWALRollStuck.class); private static final Logger LOG = LoggerFactory.getLogger(TestAsyncFSWALRollStuck.class); @@ -143,11 +143,11 @@ public static void setUp() throws Exception { MVCC = new MultiVersionConcurrencyControl(); EXECUTOR = - Executors.newScheduledThreadPool(2, new ThreadFactoryBuilder().setDaemon(true).build()); + Executors.newScheduledThreadPool(2, new ThreadFactoryBuilder().setDaemon(true).build()); Path rootDir = UTIL.getDataTestDir(); ROLL_EXEC = - Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).build()); + Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).build()); WALActionsListener listener = new WALActionsListener() { @Override @@ -163,7 +163,7 @@ public void logRollRequested(RollRequestReason reason) { }; WAL = new AsyncFSWAL(UTIL.getTestFileSystem(), rootDir, "log", "oldlog", conf, - Arrays.asList(listener), true, null, null, EVENT_LOOP_GROUP, CHANNEL_CLASS); + Arrays.asList(listener), true, null, null, EVENT_LOOP_GROUP, CHANNEL_CLASS); WAL.init(); } @@ -180,10 +180,10 @@ public void testRoll() throws Exception { byte[] row = Bytes.toBytes("family"); WALEdit edit = new WALEdit(); edit.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setFamily(row) - .setQualifier(row).setRow(row).setValue(row) - .setTimestamp(EnvironmentEdgeManager.currentTime()).setType(Type.Put).build()); + .setQualifier(row).setRow(row).setValue(row) + .setTimestamp(EnvironmentEdgeManager.currentTime()).setType(Type.Put).build()); WALKeyImpl key1 = - new WALKeyImpl(RI.getEncodedNameAsBytes(), TN, EnvironmentEdgeManager.currentTime(), MVCC); + new WALKeyImpl(RI.getEncodedNameAsBytes(), TN, EnvironmentEdgeManager.currentTime(), MVCC); WAL.appendData(RI, key1, edit); WALKeyImpl key2 = new WALKeyImpl(RI.getEncodedNameAsBytes(), TN, key1.getWriteTime() + 1, MVCC); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRollPeriod.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRollPeriod.java index 981e871532c6..0a4c4da3af5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRollPeriod.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRollPeriod.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java index 8afae061be43..ee9e7969a309 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java index ef04548a68b0..6bcead90cb03 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,6 +28,7 @@ import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java index c14e8da9122a..eee7f08d0215 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,12 +28,12 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup; @@ -53,8 +53,8 @@ public class TestAsyncWALReplay extends AbstractTestWALReplay { @BeforeClass public static void setUpBeforeClass() throws Exception { GROUP = new NioEventLoopGroup(1, - new ThreadFactoryBuilder().setNameFormat("TestAsyncWALReplay-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadFactoryBuilder().setNameFormat("TestAsyncWALReplay-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); CHANNEL_CLASS = NioSocketChannel.class; Configuration conf = AbstractTestWALReplay.TEST_UTIL.getConfiguration(); conf.set(WALFactory.WAL_PROVIDER, "asyncfs"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplayCompressed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplayCompressed.java index 347b81258c01..3de5ceced9df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplayCompressed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplayCompressed.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplayValueCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplayValueCompression.java index cbe1faa65d48..e6f9ce4062cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplayValueCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplayValueCompression.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java index 359add7fd14f..799876af9c73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ public class TestCombinedAsyncWriter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCombinedAsyncWriter.class); + HBaseClassTestRule.forClass(TestCombinedAsyncWriter.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -106,11 +106,11 @@ private void doTest(boolean withTrailer) throws IOException { FileSystem fs = UTIL.getTestFileSystem(); Configuration conf = UTIL.getConfiguration(); try ( - AsyncWriter writer1 = AsyncFSWALProvider.createAsyncWriter(conf, fs, path1, false, - EVENT_LOOP_GROUP.next(), CHANNEL_CLASS); - AsyncWriter writer2 = AsyncFSWALProvider.createAsyncWriter(conf, fs, path2, false, - EVENT_LOOP_GROUP.next(), CHANNEL_CLASS); - CombinedAsyncWriter writer = CombinedAsyncWriter.create(writer1, writer2)) { + AsyncWriter writer1 = AsyncFSWALProvider.createAsyncWriter(conf, fs, path1, false, + EVENT_LOOP_GROUP.next(), CHANNEL_CLASS); + AsyncWriter writer2 = AsyncFSWALProvider.createAsyncWriter(conf, fs, path2, false, + EVENT_LOOP_GROUP.next(), CHANNEL_CLASS); + CombinedAsyncWriter writer = CombinedAsyncWriter.create(writer1, writer2)) { ProtobufLogTestHelper.doWrite(new WriterOverAsyncWriter(writer), withTrailer, tableName, columnCount, recordCount, row, timestamp); try (ProtobufLogReader reader = (ProtobufLogReader) WALS.createReader(fs, path1)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCompressor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCompressor.java index d8a5099f84c6..62d796093818 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCompressor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCompressor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ /** * Test our compressor class. */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestCompressor { @ClassRule @@ -53,27 +53,26 @@ public static void setUpBeforeClass() throws Exception { @Test public void testToShort() { short s = 1; - assertEquals(s, Compressor.toShort((byte)0, (byte)1)); + assertEquals(s, Compressor.toShort((byte) 0, (byte) 1)); s <<= 8; - assertEquals(s, Compressor.toShort((byte)1, (byte)0)); + assertEquals(s, Compressor.toShort((byte) 1, (byte) 0)); } - @Test (expected = IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testNegativeToShort() { - Compressor.toShort((byte)0xff, (byte)0xff); + Compressor.toShort((byte) 0xff, (byte) 0xff); } @Test public void testCompressingWithNullDictionaries() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); - byte [] blahBytes = Bytes.toBytes("blah"); + byte[] blahBytes = Bytes.toBytes("blah"); Compressor.writeCompressed(blahBytes, 0, blahBytes.length, dos, null); dos.close(); - byte [] dosbytes = baos.toByteArray(); - DataInputStream dis = - new DataInputStream(new ByteArrayInputStream(dosbytes)); - byte [] product = Compressor.readCompressed(dis, null); + byte[] dosbytes = baos.toByteArray(); + DataInputStream dis = new DataInputStream(new ByteArrayInputStream(dosbytes)); + byte[] product = Compressor.readCompressed(dis, null); assertTrue(Bytes.equals(blahBytes, product)); } @@ -83,15 +82,14 @@ public void testCompressingWithClearDictionaries() throws IOException { DataOutputStream dos = new DataOutputStream(baos); Dictionary dictionary = new LRUDictionary(); dictionary.init(Short.MAX_VALUE); - byte [] blahBytes = Bytes.toBytes("blah"); + byte[] blahBytes = Bytes.toBytes("blah"); Compressor.writeCompressed(blahBytes, 0, blahBytes.length, dos, dictionary); dos.close(); - byte [] dosbytes = baos.toByteArray(); - DataInputStream dis = - new DataInputStream(new ByteArrayInputStream(dosbytes)); + byte[] dosbytes = baos.toByteArray(); + DataInputStream dis = new DataInputStream(new ByteArrayInputStream(dosbytes)); dictionary = new LRUDictionary(); dictionary.init(Short.MAX_VALUE); - byte [] product = Compressor.readCompressed(dis, dictionary); + byte[] product = Compressor.readCompressed(dis, dictionary); assertTrue(Bytes.equals(blahBytes, product)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCustomWALCellCodec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCustomWALCellCodec.java index 6add84fa7a3f..33f872178da0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCustomWALCellCodec.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCustomWALCellCodec.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ /** * Test that we can create, load, setup our own custom codec */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestCustomWALCellCodec { @ClassRule @@ -66,8 +66,7 @@ public void testCreatePreparesCodec() throws Exception { } /** - * Test that a custom {@link WALCellCodec} will fail if provided an invalid - * code class. + * Test that a custom {@link WALCellCodec} will fail if provided an invalid code class. */ @Test(expected = RuntimeException.class) public void testCreatePreparesCodecInvalidClass() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java index de09d5b2b41e..0113d92f8bec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -124,9 +124,8 @@ public void tearDown() throws IOException { @Test public void testDurability() throws Exception { - WALFactory wals = new WALFactory(CONF, - ServerName.valueOf("TestDurability", 16010, EnvironmentEdgeManager.currentTime()) - .toString()); + WALFactory wals = new WALFactory(CONF, ServerName + .valueOf("TestDurability", 16010, EnvironmentEdgeManager.currentTime()).toString()); HRegion region = createHRegion(wals, Durability.USE_DEFAULT); WAL wal = region.getWAL(); HRegion deferredRegion = createHRegion(region.getTableDescriptor(), region.getRegionInfo(), @@ -189,9 +188,8 @@ public void testIncrement() throws Exception { byte[] col3 = Bytes.toBytes("col3"); // Setting up region - WALFactory wals = new WALFactory(CONF, - ServerName.valueOf("TestIncrement", 16010, EnvironmentEdgeManager.currentTime()) - .toString()); + WALFactory wals = new WALFactory(CONF, ServerName + .valueOf("TestIncrement", 16010, EnvironmentEdgeManager.currentTime()).toString()); HRegion region = createHRegion(wals, Durability.USE_DEFAULT); WAL wal = region.getWAL(); @@ -255,10 +253,9 @@ public void testIncrementWithReturnResultsSetToFalse() throws Exception { byte[] col1 = Bytes.toBytes("col1"); // Setting up region - WALFactory wals = new WALFactory(CONF, - ServerName.valueOf("testIncrementWithReturnResultsSetToFalse", - 16010, EnvironmentEdgeManager.currentTime()) - .toString()); + WALFactory wals = + new WALFactory(CONF, ServerName.valueOf("testIncrementWithReturnResultsSetToFalse", 16010, + EnvironmentEdgeManager.currentTime()).toString()); HRegion region = createHRegion(wals, Durability.USE_DEFAULT); Increment inc1 = new Increment(row1); @@ -301,8 +298,8 @@ private HRegion createHRegion(WALFactory wals, Durability durability) throws IOE throw new IOException("Failed delete of " + path); } } - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return HRegion.createHRegion(info, path, CONF, htd, wals.getWAL(info)); } @@ -314,8 +311,8 @@ private HRegion createHRegion(TableDescriptor td, RegionInfo info, String dir, W throw new IOException("Failed delete of " + path); } } - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return HRegion.createHRegion(info, path, CONF, td, wal); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java index c930d1099c1c..75c98d277e2e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,8 +70,7 @@ public class TestFSHLog extends AbstractTestFSWAL { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFSHLog.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestFSHLog.class); private static final long TEST_TIMEOUT_MS = 10000; @@ -82,8 +81,8 @@ public class TestFSHLog extends AbstractTestFSWAL { protected AbstractFSWAL newWAL(FileSystem fs, Path rootDir, String walDir, String archiveDir, Configuration conf, List listeners, boolean failIfWALExists, String prefix, String suffix) throws IOException { - FSHLog wal = - new FSHLog(fs, rootDir, walDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); + FSHLog wal = new FSHLog(fs, rootDir, walDir, archiveDir, conf, listeners, failIfWALExists, + prefix, suffix); wal.init(); return wal; } @@ -111,7 +110,7 @@ public void testSyncRunnerIndexOverflow() throws IOException, NoSuchFieldExcepti SecurityException, IllegalArgumentException, IllegalAccessException { final String name = this.name.getMethodName(); FSHLog log = new FSHLog(FS, CommonFSUtils.getRootDir(CONF), name, - HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); + HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); log.init(); try { Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler"); @@ -124,7 +123,7 @@ public void testSyncRunnerIndexOverflow() throws IOException, NoSuchFieldExcepti syncRunnerIndexField.set(ringBufferEventHandler, Integer.MAX_VALUE - 1); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(this.name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build(); NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] fam : htd.getColumnFamilyNames()) { scopes.put(fam, 0); @@ -147,14 +146,17 @@ public void testDeadlockWithSyncOverwrites() throws Exception { final CountDownLatch blockBeforeSafePoint = new CountDownLatch(1); class FailingWriter implements WALProvider.Writer { - @Override public void sync(boolean forceSync) throws IOException { + @Override + public void sync(boolean forceSync) throws IOException { throw new IOException("Injected failure.."); } - @Override public void append(WAL.Entry entry) throws IOException { + @Override + public void append(WAL.Entry entry) throws IOException { } - @Override public long getLength() { + @Override + public long getLength() { return 0; } @@ -163,7 +165,8 @@ public long getSyncedLength() { return 0; } - @Override public void close() throws IOException { + @Override + public void close() throws IOException { } } @@ -172,8 +175,8 @@ public long getSyncedLength() { */ class CustomFSHLog extends FSHLog { public CustomFSHLog(FileSystem fs, Path rootDir, String logDir, String archiveDir, - Configuration conf, List listeners, boolean failIfWALExists, - String prefix, String suffix) throws IOException { + Configuration conf, List listeners, boolean failIfWALExists, + String prefix, String suffix) throws IOException { super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); } @@ -196,8 +199,7 @@ public SyncFuture publishSyncOnRingBuffer() { try (CustomFSHLog log = new CustomFSHLog(FS, CommonFSUtils.getRootDir(CONF), name, HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null)) { log.setWriter(new FailingWriter()); - Field ringBufferEventHandlerField = - FSHLog.class.getDeclaredField("ringBufferEventHandler"); + Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler"); ringBufferEventHandlerField.setAccessible(true); FSHLog.RingBufferEventHandler ringBufferEventHandler = (FSHLog.RingBufferEventHandler) ringBufferEventHandlerField.get(log); @@ -213,7 +215,8 @@ public SyncFuture publishSyncOnRingBuffer() { // Unblock the safe point trigger.. blockBeforeSafePoint.countDown(); // Wait for the safe point to be reached. - // With the deadlock in HBASE-25984, this is never possible, thus blocking the sync pipeline. + // With the deadlock in HBASE-25984, this is never possible, thus blocking the sync + // pipeline. Waiter.waitFor(CONF, TEST_TIMEOUT_MS, latch::isSafePointAttained); } finally { // Force release the safe point, for the clean up. @@ -236,7 +239,7 @@ public void testUnflushedSeqIdTracking() throws IOException, InterruptedExceptio final CountDownLatch putFinished = new CountDownLatch(1); try (FSHLog log = new FSHLog(FS, CommonFSUtils.getRootDir(CONF), name, - HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null)) { + HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null)) { log.init(); log.registerWALActionsListener(new WALActionsListener() { @Override @@ -254,22 +257,22 @@ public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit log // open a new region which uses this WAL TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(this.name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(b)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(b)).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); final HRegion region = TEST_UTIL.createLocalHRegion(hri, CONF, htd, log); ExecutorService exec = Executors.newFixedThreadPool(2); // do a regular write first because of memstore size calculation. - region.put(new Put(b).addColumn(b, b,b)); + region.put(new Put(b).addColumn(b, b, b)); startHoldingForAppend.set(true); exec.submit(new Runnable() { @Override public void run() { try { - region.put(new Put(b).addColumn(b, b,b)); + region.put(new Put(b).addColumn(b, b, b)); putFinished.countDown(); } catch (IOException e) { LOG.error(e.toString(), e); @@ -285,8 +288,8 @@ public void run() { public void run() { try { HRegion.FlushResult flushResult = region.flush(true); - LOG.info("Flush result:" + flushResult.getResult()); - LOG.info("Flush succeeded:" + flushResult.isFlushSucceeded()); + LOG.info("Flush result:" + flushResult.getResult()); + LOG.info("Flush succeeded:" + flushResult.isFlushSucceeded()); flushFinished.countDown(); } catch (IOException e) { LOG.error(e.toString(), e); @@ -304,12 +307,12 @@ public void run() { flushFinished.await(); // check whether flush went through - assertEquals("Region did not flush?", 1, region.getStoreFileList(new byte[][]{b}).size()); + assertEquals("Region did not flush?", 1, region.getStoreFileList(new byte[][] { b }).size()); // now check the region's unflushed seqIds. long seqId = log.getEarliestMemStoreSeqNum(hri.getEncodedNameAsBytes()); - assertEquals("Found seqId for the region which is already flushed", - HConstants.NO_SEQNUM, seqId); + assertEquals("Found seqId for the region which is already flushed", HConstants.NO_SEQNUM, + seqId); region.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLogDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLogDurability.java index 78531f350c15..1d186f699312 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLogDurability.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLogDurability.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,11 +33,11 @@ public class TestFSHLogDurability extends WALDurabilityTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFSHLogDurability.class); + HBaseClassTestRule.forClass(TestFSHLogDurability.class); @Override protected CustomFSHLog getWAL(FileSystem fs, Path root, String logDir, Configuration conf) - throws IOException { + throws IOException { CustomFSHLog wal = new CustomFSHLog(fs, root, logDir, conf); wal.init(); return wal; @@ -65,7 +65,7 @@ class CustomFSHLog extends FSHLog { private Boolean writerSyncFlag; public CustomFSHLog(FileSystem fs, Path root, String logDir, Configuration conf) - throws IOException { + throws IOException { super(fs, root, logDir, conf); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSWALEntry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSWALEntry.java index 6c45bd4f2dc3..f18386d2a062 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSWALEntry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSWALEntry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,52 +51,33 @@ public void testCollectFamilies() { List cells = new ArrayList<>(); assertEquals(0, FSWALEntry.collectFamilies(cells).size()); - cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(family0).setFamily(family0).setQualifier(family0) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build()); + cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(family0) + .setFamily(family0).setQualifier(family0).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build()); assertEquals(1, FSWALEntry.collectFamilies(cells).size()); - cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(family1).setFamily(family1).setQualifier(family1) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build()); + cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(family1) + .setFamily(family1).setQualifier(family1).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build()); assertEquals(2, FSWALEntry.collectFamilies(cells).size()); - cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(family0).setFamily(family0).setQualifier(family0) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build()); - cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(family1).setFamily(family1).setQualifier(family1) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build()); + cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(family0) + .setFamily(family0).setQualifier(family0).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build()); + cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(family1) + .setFamily(family1).setQualifier(family1).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build()); assertEquals(2, FSWALEntry.collectFamilies(cells).size()); - cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(family2).setFamily(family2).setQualifier(family2) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build()); + cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(family2) + .setFamily(family2).setQualifier(family2).setTimestamp(HConstants.LATEST_TIMESTAMP) + .setType(KeyValue.Type.Maximum.getCode()).setValue(HConstants.EMPTY_BYTE_ARRAY).build()); assertEquals(3, FSWALEntry.collectFamilies(cells).size()); cells.add(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) - .setRow(WALEdit.METAFAMILY).setFamily(WALEdit.METAFAMILY) - .setQualifier(WALEdit.METAFAMILY) - .setTimestamp(HConstants.LATEST_TIMESTAMP) - .setType(KeyValue.Type.Maximum.getCode()) - .setValue(HConstants.EMPTY_BYTE_ARRAY) - .build()); + .setRow(WALEdit.METAFAMILY).setFamily(WALEdit.METAFAMILY).setQualifier(WALEdit.METAFAMILY) + .setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()) + .setValue(HConstants.EMPTY_BYTE_ARRAY).build()); assertEquals(3, FSWALEntry.collectFamilies(cells).size()); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHBaseWalOnEC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHBaseWalOnEC.java index 6dccb7853660..937b01a42851 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHBaseWalOnEC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHBaseWalOnEC.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import static org.junit.Assert.assertArrayEquals; @@ -62,7 +61,7 @@ public class TestHBaseWalOnEC { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHBaseWalOnEC.class); + HBaseClassTestRule.forClass(TestHBaseWalOnEC.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java index 179b0fb2a006..24d3991aa9c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,10 +65,9 @@ import org.slf4j.LoggerFactory; /** - * Tests for conditions that should trigger RegionServer aborts when - * rolling the current WAL fails. + * Tests for conditions that should trigger RegionServer aborts when rolling the current WAL fails. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestLogRollAbort { @ClassRule @@ -91,8 +90,7 @@ public class TestLogRollAbort { @BeforeClass public static void setUpBeforeClass() throws Exception { // Tweak default timeout values down for faster recovery - TEST_UTIL.getConfiguration().setInt( - "hbase.regionserver.logroll.errors.tolerated", 2); + TEST_UTIL.getConfiguration().setInt("hbase.regionserver.logroll.errors.tolerated", 2); TEST_UTIL.getConfiguration().setInt("hbase.rpc.timeout", 10 * 1000); // Increase the amount of time between client retries @@ -133,8 +131,8 @@ public void tearDown() throws Exception { } /** - * Tests that RegionServer aborts if we hit an error closing the WAL when - * there are unsynced WAL edits. See HBASE-4282. + * Tests that RegionServer aborts if we hit an error closing the WAL when there are unsynced WAL + * edits. See HBASE-4282. */ @Test public void testRSAbortWithUnflushedEdits() throws Exception { @@ -170,8 +168,8 @@ public void testRSAbortWithUnflushedEdits() throws Exception { try { log.rollWriter(true); } catch (FailedLogCloseException flce) { - // Expected exception. We used to expect that there would be unsynced appends but this - // not reliable now that sync plays a roll in wall rolling. The above puts also now call + // Expected exception. We used to expect that there would be unsynced appends but this + // not reliable now that sync plays a roll in wall rolling. The above puts also now call // sync. } catch (Throwable t) { LOG.error(HBaseMarkers.FATAL, "FAILED TEST: Got wrong exception", t); @@ -182,22 +180,22 @@ public void testRSAbortWithUnflushedEdits() throws Exception { } /** - * Tests the case where a RegionServer enters a GC pause, - * comes back online after the master declared it dead and started to split. - * Want log rolling after a master split to fail. See HBASE-2312. + * Tests the case where a RegionServer enters a GC pause, comes back online after the master + * declared it dead and started to split. Want log rolling after a master split to fail. See + * HBASE-2312. */ @Test public void testLogRollAfterSplitStart() throws IOException { LOG.info("Verify wal roll after split starts will fail."); - String logName = ServerName.valueOf("testLogRollAfterSplitStart", - 16010, EnvironmentEdgeManager.currentTime()).toString(); + String logName = ServerName + .valueOf("testLogRollAfterSplitStart", 16010, EnvironmentEdgeManager.currentTime()) + .toString(); Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName)); final WALFactory wals = new WALFactory(conf, logName); try { // put some entries in an WAL - TableName tableName = - TableName.valueOf(this.getClass().getName()); + TableName tableName = TableName.valueOf(this.getClass().getName()); RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build(); WAL log = wals.getWAL(regionInfo); MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1); @@ -209,11 +207,12 @@ public void testLogRollAfterSplitStart() throws IOException { NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(Bytes.toBytes("column"), 0); log.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, - EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs); + EnvironmentEdgeManager.currentTime(), mvcc, scopes), + kvs); } // Send the data to HDFS datanodes and close the HDFS writer log.sync(); - ((AbstractFSWAL) log).replaceWriter(((FSHLog)log).getOldPath(), null, null); + ((AbstractFSWAL) log).replaceWriter(((FSHLog) log).getOldPath(), null, null); // code taken from MasterFileSystem.getLogDirs(), which is called from // MasterFileSystem.splitLog() handles RS shutdowns (as observed by the splitting process) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollPeriod.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollPeriod.java index 4b42c768956b..12db2efd0e24 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollPeriod.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollPeriod.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index 8b4b7103e92a..5b24fe01e3d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -87,7 +87,7 @@ public static void setUpBeforeClass() throws Exception { /**** configuration for testLogRollOnDatanodeDeath ****/ // lower the namenode & datanode heartbeat so the namenode // quickly detects datanode failures - Configuration conf= TEST_UTIL.getConfiguration(); + Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt("dfs.namenode.heartbeat.recheck-interval", 5000); conf.setInt("dfs.heartbeat.interval", 1); // the namenode might still try to choose the recently-dead datanode @@ -141,8 +141,7 @@ public void logRollRequested(WALActionsListener.RollRequestReason reason) { writeData(table, row++); } - assertFalse("Should not have triggered log roll due to SLOW_SYNC", - slowSyncHookCalled.get()); + assertFalse("Should not have triggered log roll due to SLOW_SYNC", slowSyncHookCalled.get()); // Set up for test slowSyncHookCalled.set(false); @@ -156,6 +155,7 @@ public void logRollRequested(WALActionsListener.RollRequestReason reason) { public void close() throws IOException { oldWriter1.close(); } + @Override public void sync(boolean forceSync) throws IOException { try { @@ -167,10 +167,12 @@ public void sync(boolean forceSync) throws IOException { } oldWriter1.sync(forceSync); } + @Override public void append(Entry entry) throws IOException { oldWriter1.append(entry); } + @Override public long getLength() { return oldWriter1.getLength(); @@ -197,14 +199,14 @@ public long getSyncedLength() { public boolean evaluate() throws Exception { return log.getWriter() != newWriter1; } + @Override public String explainFailure() throws Exception { return "Waited too long for our test writer to get rolled out"; } }); - assertTrue("Should have triggered log roll due to SLOW_SYNC", - slowSyncHookCalled.get()); + assertTrue("Should have triggered log roll due to SLOW_SYNC", slowSyncHookCalled.get()); // Set up for test slowSyncHookCalled.set(false); @@ -212,12 +214,13 @@ public String explainFailure() throws Exception { // Wrap the current writer with the anonymous class below that adds 5000 ms of // latency to any sync on the hlog. // This will trip the other threshold. - final Writer oldWriter2 = (Writer)log.getWriter(); + final Writer oldWriter2 = (Writer) log.getWriter(); final Writer newWriter2 = new Writer() { @Override public void close() throws IOException { oldWriter2.close(); } + @Override public void sync(boolean forceSync) throws IOException { try { @@ -229,10 +232,12 @@ public void sync(boolean forceSync) throws IOException { } oldWriter2.sync(forceSync); } + @Override public void append(Entry entry) throws IOException { oldWriter2.append(entry); } + @Override public long getLength() { return oldWriter2.getLength(); @@ -256,14 +261,14 @@ public long getSyncedLength() { public boolean evaluate() throws Exception { return log.getWriter() != newWriter2; } + @Override public String explainFailure() throws Exception { return "Waited too long for our test writer to get rolled out"; } }); - assertTrue("Should have triggered log roll due to SLOW_SYNC", - slowSyncHookCalled.get()); + assertTrue("Should have triggered log roll due to SLOW_SYNC", slowSyncHookCalled.get()); // Set up for test slowSyncHookCalled.set(false); @@ -273,8 +278,7 @@ public String explainFailure() throws Exception { writeData(table, row++); } - assertFalse("Should not have triggered log roll due to SLOW_SYNC", - slowSyncHookCalled.get()); + assertFalse("Should not have triggered log roll due to SLOW_SYNC", slowSyncHookCalled.get()); } finally { table.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index c2206399872d..bfd317172da3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ /** * Test many concurrent appenders to an WAL while rolling the log. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestLogRollingNoCluster { @ClassRule @@ -63,12 +63,12 @@ public class TestLogRollingNoCluster { HBaseClassTestRule.forClass(TestLogRollingNoCluster.class); private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private final static byte [] EMPTY_1K_ARRAY = new byte[1024]; + private final static byte[] EMPTY_1K_ARRAY = new byte[1024]; private static final int NUM_THREADS = 100; // Spin up this many threads private static final int NUM_ENTRIES = 100; // How many entries to write /** ProtobufLogWriter that simulates higher latencies in sync() call */ - public static class HighLatencySyncWriter extends ProtobufLogWriter { + public static class HighLatencySyncWriter extends ProtobufLogWriter { @Override public void sync(boolean forceSync) throws IOException { Threads.sleep(ThreadLocalRandom.current().nextInt(10)); @@ -78,8 +78,8 @@ public void sync(boolean forceSync) throws IOException { } /** - * Spin up a bunch of threads and have them all append to a WAL. Roll the - * WAL frequently to try and trigger NPE. + * Spin up a bunch of threads and have them all append to a WAL. Roll the WAL frequently to try + * and trigger NPE. * @throws IOException * @throws InterruptedException */ @@ -100,7 +100,7 @@ public void testContendedLogRolling() throws Exception { final WALFactory wals = new WALFactory(conf, TestLogRollingNoCluster.class.getName()); final WAL wal = wals.getWAL(null); - Appender [] appenders = null; + Appender[] appenders = null; final int numThreads = NUM_THREADS; appenders = new Appender[numThreads]; @@ -113,7 +113,7 @@ public void testContendedLogRolling() throws Exception { appenders[i].start(); } for (int i = 0; i < numThreads; i++) { - //ensure that all threads are joined before closing the wal + // ensure that all threads are joined before closing the wal appenders[i].join(); } } finally { @@ -126,7 +126,7 @@ public void testContendedLogRolling() throws Exception { } /** - * Appender thread. Appends to passed wal file. + * Appender thread. Appends to passed wal file. */ static class Appender extends Thread { private final Logger log; @@ -156,7 +156,7 @@ Exception getException() { @Override public void run() { - this.log.info(getName() +" started"); + this.log.info(getName() + " started"); final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); try { TableDescriptors tds = new FSTableDescriptors(TEST_UTIL.getConfiguration()); @@ -173,19 +173,18 @@ public void run() { edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY)); RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO; NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for(byte[] fam: this.metaTableDescriptor.getColumnFamilyNames()) { + for (byte[] fam : this.metaTableDescriptor.getColumnFamilyNames()) { scopes.put(fam, 0); } final long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), - TableName.META_TABLE_NAME, now, mvcc, scopes), edit); + TableName.META_TABLE_NAME, now, mvcc, scopes), + edit); Threads.sleep(ThreadLocalRandom.current().nextInt(5)); wal.sync(txid); } String msg = getName() + " finished"; - if (isException()) - this.log.info(msg, getException()); - else - this.log.info(msg); + if (isException()) this.log.info(msg, getException()); + else this.log.info(msg); } catch (Exception e) { this.e = e; log.info("Caught exception from Appender:" + getName(), e); @@ -200,7 +199,7 @@ public void run() { } } - //@org.junit.Rule - //public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = - // new org.apache.hadoop.hbase.ResourceCheckerJUnitRule(); + // @org.junit.Rule + // public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = + // new org.apache.hadoop.hbase.ResourceCheckerJUnitRule(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWAL.java index 574330241f1e..85615abd19fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWAL.java @@ -38,7 +38,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestMetricsWAL { @Rule public TestName name = new TestName(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java index d429a01fdb9a..9e18d5ebb9e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java index d63ac7716bb5..f94473cc6a10 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureWALReplay.java index 7583816aa903..6e1ac3e282f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureWALReplay.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java index 8eb99b3a4772..f4124dceaf0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,9 +42,9 @@ public class TestSequenceIdAccounting { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSequenceIdAccounting.class); - private static final byte [] ENCODED_REGION_NAME = Bytes.toBytes("r"); - private static final byte [] FAMILY_NAME = Bytes.toBytes("cf"); - private static final byte [] META_FAMILY = Bytes.toBytes("METAFAMILY"); + private static final byte[] ENCODED_REGION_NAME = Bytes.toBytes("r"); + private static final byte[] FAMILY_NAME = Bytes.toBytes("cf"); + private static final byte[] META_FAMILY = Bytes.toBytes("METAFAMILY"); private static final Set FAMILIES; private static final Set META_FAMILY_SET; static { @@ -60,12 +60,12 @@ public void testStartCacheFlush() { sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); Map m = new HashMap<>(); m.put(ENCODED_REGION_NAME, HConstants.NO_SEQNUM); - assertEquals(HConstants.NO_SEQNUM, (long)sida.startCacheFlush(ENCODED_REGION_NAME, FAMILIES)); + assertEquals(HConstants.NO_SEQNUM, (long) sida.startCacheFlush(ENCODED_REGION_NAME, FAMILIES)); sida.completeCacheFlush(ENCODED_REGION_NAME, HConstants.NO_SEQNUM); long sequenceid = 1; sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid, true); // Only one family so should return NO_SEQNUM still. - assertEquals(HConstants.NO_SEQNUM, (long)sida.startCacheFlush(ENCODED_REGION_NAME, FAMILIES)); + assertEquals(HConstants.NO_SEQNUM, (long) sida.startCacheFlush(ENCODED_REGION_NAME, FAMILIES)); sida.completeCacheFlush(ENCODED_REGION_NAME, HConstants.NO_SEQNUM); long currentSequenceId = sequenceid; sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid, true); @@ -73,7 +73,7 @@ public void testStartCacheFlush() { otherFamily.add(Bytes.toBytes("otherCf")); sida.update(ENCODED_REGION_NAME, FAMILIES, ++sequenceid, true); // Should return oldest sequence id in the region. - assertEquals(currentSequenceId, (long)sida.startCacheFlush(ENCODED_REGION_NAME, otherFamily)); + assertEquals(currentSequenceId, (long) sida.startCacheFlush(ENCODED_REGION_NAME, otherFamily)); sida.completeCacheFlush(ENCODED_REGION_NAME, HConstants.NO_SEQNUM); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSyncFuture.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSyncFuture.java index 64956536dc02..d9a45405ba44 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSyncFuture.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSyncFuture.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSyncFutureCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSyncFutureCache.java index dd4590a6a597..bdf26342a02f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSyncFutureCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSyncFutureCache.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; + import java.util.concurrent.CompletableFuture; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -55,8 +56,8 @@ public void testSyncFutureCacheLifeCycle() throws Exception { assertEquals(future3, future0); final SyncFuture[] future4 = new SyncFuture[1]; // From a different thread - CompletableFuture.runAsync(() -> - future4[0] = cache.getIfPresentOrNew().reset(4, false)).get(); + CompletableFuture.runAsync(() -> future4[0] = cache.getIfPresentOrNew().reset(4, false)) + .get(); assertNotNull(future4[0]); assertNotSame(future3, future4[0]); // Clean up diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java index d2733375c856..4b8134ec701e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,17 +50,16 @@ /** * Test that the actions are called while playing with an WAL */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestWALActionsListener { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestWALActionsListener.class); - private final static HBaseTestingUtil TEST_UTIL = - new HBaseTestingUtil(); + private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private final static byte[] SOME_BYTES = Bytes.toBytes("t"); + private final static byte[] SOME_BYTES = Bytes.toBytes("t"); private static Configuration conf; private static Path rootDir; private static Path walRootDir; @@ -90,9 +89,9 @@ public void tearDown() throws Exception { } /** - * Add a bunch of dummy data and roll the logs every two insert. We - * should end up with 10 rolled files (plus the roll called in - * the constructor). Also test adding a listener while it's running. + * Add a bunch of dummy data and roll the logs every two insert. We should end up with 10 rolled + * files (plus the roll called in the constructor). Also test adding a listener while it's + * running. */ @Test public void testActionListener() throws Exception { @@ -131,7 +130,6 @@ public void testActionListener() throws Exception { assertEquals(1, observer.closedCount); } - /** * Just counts when methods are called */ @@ -157,4 +155,3 @@ public void logCloseRequested() { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java index 0cfd75e382f6..76339482ac2b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ByteBufferKeyValue; @@ -50,7 +49,7 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) @RunWith(Parameterized.class) public class TestWALCellCodecWithCompression { @@ -108,8 +107,8 @@ public void testValueCompression() throws Exception { fillBytes(value_5, Bytes.toBytes("CAFEBABE")); Configuration conf = new Configuration(false); - WALCellCodec codec = new WALCellCodec(conf, new CompressionContext(LRUDictionary.class, - false, true, true, compression)); + WALCellCodec codec = new WALCellCodec(conf, + new CompressionContext(LRUDictionary.class, false, true, true, compression)); ByteArrayOutputStream bos = new ByteArrayOutputStream(); Encoder encoder = codec.getEncoder(bos); encoder.write(createKV(row_1, value_1, 0)); @@ -122,34 +121,34 @@ public void testValueCompression() throws Exception { Decoder decoder = codec.getDecoder(is); decoder.advance(); KeyValue kv = (KeyValue) decoder.current(); - assertTrue(Bytes.equals(row_1, 0, row_1.length, - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); - assertTrue(Bytes.equals(value_1, 0, value_1.length, - kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); + assertTrue(Bytes.equals(row_1, 0, row_1.length, kv.getRowArray(), kv.getRowOffset(), + kv.getRowLength())); + assertTrue(Bytes.equals(value_1, 0, value_1.length, kv.getValueArray(), kv.getValueOffset(), + kv.getValueLength())); decoder.advance(); kv = (KeyValue) decoder.current(); - assertTrue(Bytes.equals(row_2, 0, row_2.length, - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); - assertTrue(Bytes.equals(value_2, 0, value_2.length, - kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); + assertTrue(Bytes.equals(row_2, 0, row_2.length, kv.getRowArray(), kv.getRowOffset(), + kv.getRowLength())); + assertTrue(Bytes.equals(value_2, 0, value_2.length, kv.getValueArray(), kv.getValueOffset(), + kv.getValueLength())); decoder.advance(); kv = (KeyValue) decoder.current(); - assertTrue(Bytes.equals(row_3, 0, row_3.length, - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); - assertTrue(Bytes.equals(value_3, 0, value_3.length, - kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); + assertTrue(Bytes.equals(row_3, 0, row_3.length, kv.getRowArray(), kv.getRowOffset(), + kv.getRowLength())); + assertTrue(Bytes.equals(value_3, 0, value_3.length, kv.getValueArray(), kv.getValueOffset(), + kv.getValueLength())); decoder.advance(); kv = (KeyValue) decoder.current(); - assertTrue(Bytes.equals(row_4, 0, row_4.length, - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); - assertTrue(Bytes.equals(value_4, 0, value_4.length, - kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); + assertTrue(Bytes.equals(row_4, 0, row_4.length, kv.getRowArray(), kv.getRowOffset(), + kv.getRowLength())); + assertTrue(Bytes.equals(value_4, 0, value_4.length, kv.getValueArray(), kv.getValueOffset(), + kv.getValueLength())); decoder.advance(); kv = (KeyValue) decoder.current(); - assertTrue(Bytes.equals(row_5, 0, row_5.length, - kv.getRowArray(), kv.getRowOffset(), kv.getRowLength())); - assertTrue(Bytes.equals(value_5, 0, value_5.length, - kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); + assertTrue(Bytes.equals(row_5, 0, row_5.length, kv.getRowArray(), kv.getRowOffset(), + kv.getRowLength())); + assertTrue(Bytes.equals(value_5, 0, value_5.length, kv.getValueArray(), kv.getValueOffset(), + kv.getValueLength())); } } @@ -164,14 +163,13 @@ static void fillBytes(byte[] buffer, byte[] fill) { } } - private void doTest(boolean compressTags, boolean offheapKV) - throws Exception { + private void doTest(boolean compressTags, boolean offheapKV) throws Exception { final byte[] key = Bytes.toBytes("myRow"); final byte[] value = Bytes.toBytes("myValue"); Configuration conf = new Configuration(false); conf.setBoolean(CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, compressTags); - WALCellCodec codec = new WALCellCodec(conf, new CompressionContext(LRUDictionary.class, - false, compressTags)); + WALCellCodec codec = + new WALCellCodec(conf, new CompressionContext(LRUDictionary.class, false, compressTags)); ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); Encoder encoder = codec.getEncoder(bos); if (offheapKV) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALConfiguration.java index 32a04d5e65ab..e9cc716c9b12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,11 @@ */ package org.apache.hadoop.hbase.regionserver.wal; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.util.Arrays; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -38,15 +43,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Arrays; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - /** - * Ensure configuration changes are having an effect on WAL. - * There is a lot of reflection around WAL setup; could be skipping Configuration changes. + * Ensure configuration changes are having an effect on WAL. There is a lot of reflection around WAL + * setup; could be skipping Configuration changes. */ @RunWith(Parameterized.class) @Category({ RegionServerTests.class, SmallTests.class }) @@ -74,9 +73,9 @@ public void before() { } /** - * Test blocksize change from HBASE-20520 takes on both asycnfs and old wal provider. - * Hard to verify more than this given the blocksize is passed down to HDFS on create -- not - * kept local to the streams themselves. + * Test blocksize change from HBASE-20520 takes on both asycnfs and old wal provider. Hard to + * verify more than this given the blocksize is passed down to HDFS on create -- not kept local to + * the streams themselves. */ @Test public void testBlocksizeDefaultsToTwiceHDFSBlockSize() throws IOException { @@ -89,7 +88,7 @@ public void testBlocksizeDefaultsToTwiceHDFSBlockSize() throws IOException { if (wal instanceof AbstractFSWAL) { long expectedDefaultBlockSize = WALUtil.getWALBlockSize(conf, FileSystem.get(conf), TEST_UTIL.getDataTestDir()); - long blocksize = ((AbstractFSWAL)wal).blocksize; + long blocksize = ((AbstractFSWAL) wal).blocksize; assertEquals(expectedDefaultBlockSize, blocksize); LOG.info("Found blocksize of {} on {}", blocksize, wal); } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index c1a53a098fb5..a4315157fca7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayBoundedLogWriterCreation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayBoundedLogWriterCreation.java index 53526e8814e1..df9455a5cb73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayBoundedLogWriterCreation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayBoundedLogWriterCreation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,4 +37,3 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setBoolean(WALSplitter.SPLIT_WRITER_CREATION_BOUNDED, true); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayCompressed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayCompressed.java index 7f7173eab44a..405c14d5c982 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayCompressed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayCompressed.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayValueCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayValueCompression.java index d10cc9c73540..336b0650447b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayValueCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplayValueCompression.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java index 1b1f4ecdd9d3..0b35d7af8cf4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WALDurabilityTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,7 +70,7 @@ public void tearDown() throws IOException { } protected abstract T getWAL(FileSystem fs, Path root, String logDir, Configuration conf) - throws IOException; + throws IOException; protected abstract void resetSyncFlag(T wal); @@ -158,8 +158,8 @@ private String getName() { */ public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, Configuration conf, WAL wal) throws IOException { - ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, - 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, conf, false, Durability.USE_DEFAULT, wal, COLUMN_FAMILY_BYTES); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WriterOverAsyncWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WriterOverAsyncWriter.java index 8ae74ad3f38d..1b4303fab2a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WriterOverAsyncWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/WriterOverAsyncWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DualAsyncFSWALForTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DualAsyncFSWALForTest.java index 19e51126e367..cf711e5be891 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DualAsyncFSWALForTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DualAsyncFSWALForTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -107,7 +107,7 @@ public DualAsyncFSWALForTest(FileSystem fs, FileSystem remoteFs, Path rootDir, P boolean failIfWALExists, String prefix, String suffix, EventLoopGroup eventLoopGroup, Class channelClass) throws FailedLogCloseException, IOException { super(fs, remoteFs, rootDir, remoteWALDir, logDir, archiveDir, conf, listeners, failIfWALExists, - prefix, suffix, eventLoopGroup, channelClass); + prefix, suffix, eventLoopGroup, channelClass); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DummyReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DummyReplicationEndpoint.java index 38f0dbe6103c..e6a39e7fede1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DummyReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/DummyReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java index cab01d6fa6e6..7bc0a98335c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java @@ -105,8 +105,7 @@ public String getQueueId() { @Override public String getPeerId() { String[] parts = peerClusterId.split("-", 2); - return parts.length != 1 ? - parts[0] : peerClusterId; + return parts.length != 1 ? parts[0] : peerClusterId; } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java index 2bcca44aeca0..fd458ad39656 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -175,9 +175,7 @@ protected static void rollAllWALs() throws Exception { @Override public boolean evaluate() throws Exception { - return UTIL.getMiniHBaseCluster() - .getLiveRegionServerThreads() - .stream() + return UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().stream() .map(RegionServerThread::getRegionServer) .allMatch(HRegionServer::walRollRequestFinished); } @@ -225,14 +223,14 @@ protected final void enablePeerAndWaitUntilReplicationDone(int expectedEntries) protected final void addPeer(boolean enabled) throws IOException { UTIL.getAdmin().addReplicationPeer(PEER_ID, ReplicationPeerConfig.newBuilder().setClusterKey("127.0.0.1:2181:/hbase") - .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()).setSerial(true) - .build(), + .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()).setSerial(true) + .build(), enabled); } protected final void checkOrder(int expectedEntries) throws IOException { try (WAL.Reader reader = - WALFactory.createReader(UTIL.getTestFileSystem(), logPath, UTIL.getConfiguration())) { + WALFactory.createReader(UTIL.getTestFileSystem(), logPath, UTIL.getConfiguration())) { long seqId = -1L; int count = 0; for (Entry entry;;) { @@ -254,7 +252,7 @@ protected final TableName createTable() throws IOException, InterruptedException TableName tableName = TableName.valueOf(name.getMethodName()); UTIL.getAdmin().createTable( TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(CF).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build()); + .newBuilder(CF).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build()); UTIL.waitTableAvailable(tableName); return tableName; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java index e25aefa52b34..3335751d8023 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -106,33 +106,33 @@ public static void setUp() throws Exception { ZK_UTIL.startMiniZKCluster(); initTestingUtility(UTIL1, "/cluster1"); initTestingUtility(UTIL2, "/cluster2"); - StartTestingClusterOption option = - StartTestingClusterOption.builder().numMasters(2).numRegionServers(3).numDataNodes(3).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(2) + .numRegionServers(3).numDataNodes(3).build(); UTIL1.startMiniCluster(option); UTIL2.startMiniCluster(option); TableDescriptor td = - TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(CF).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build(); + TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(CF).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build(); UTIL1.getAdmin().createTable(td); UTIL2.getAdmin().createTable(td); FileSystem fs1 = UTIL1.getTestFileSystem(); FileSystem fs2 = UTIL2.getTestFileSystem(); REMOTE_WAL_DIR1 = - new Path(UTIL1.getMiniHBaseCluster().getMaster().getMasterFileSystem().getWALRootDir(), - "remoteWALs").makeQualified(fs1.getUri(), fs1.getWorkingDirectory()); + new Path(UTIL1.getMiniHBaseCluster().getMaster().getMasterFileSystem().getWALRootDir(), + "remoteWALs").makeQualified(fs1.getUri(), fs1.getWorkingDirectory()); REMOTE_WAL_DIR2 = - new Path(UTIL2.getMiniHBaseCluster().getMaster().getMasterFileSystem().getWALRootDir(), - "remoteWALs").makeQualified(fs2.getUri(), fs2.getWorkingDirectory()); + new Path(UTIL2.getMiniHBaseCluster().getMaster().getMasterFileSystem().getWALRootDir(), + "remoteWALs").makeQualified(fs2.getUri(), fs2.getWorkingDirectory()); UTIL1.getAdmin().addReplicationPeer(PEER_ID, ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) - .setReplicateAllUserTables(false) - .setTableCFsMap(ImmutableMap.of(TABLE_NAME, new ArrayList<>())) - .setRemoteWALDir(REMOTE_WAL_DIR2.toUri().toString()).build()); + .setReplicateAllUserTables(false) + .setTableCFsMap(ImmutableMap.of(TABLE_NAME, new ArrayList<>())) + .setRemoteWALDir(REMOTE_WAL_DIR2.toUri().toString()).build()); UTIL2.getAdmin().addReplicationPeer(PEER_ID, ReplicationPeerConfig.newBuilder().setClusterKey(UTIL1.getClusterKey()) - .setReplicateAllUserTables(false) - .setTableCFsMap(ImmutableMap.of(TABLE_NAME, new ArrayList<>())) - .setRemoteWALDir(REMOTE_WAL_DIR1.toUri().toString()).build()); + .setReplicateAllUserTables(false) + .setTableCFsMap(ImmutableMap.of(TABLE_NAME, new ArrayList<>())) + .setRemoteWALDir(REMOTE_WAL_DIR1.toUri().toString()).build()); } private static void shutdown(HBaseTestingUtil util) throws Exception { @@ -141,8 +141,8 @@ private static void shutdown(HBaseTestingUtil util) throws Exception { } Admin admin = util.getAdmin(); if (!admin.listReplicationPeers(Pattern.compile(PEER_ID)).isEmpty()) { - if (admin - .getReplicationPeerSyncReplicationState(PEER_ID) != SyncReplicationState.DOWNGRADE_ACTIVE) { + if (admin.getReplicationPeerSyncReplicationState( + PEER_ID) != SyncReplicationState.DOWNGRADE_ACTIVE) { admin.transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE); } @@ -182,16 +182,15 @@ protected final void verifyThroughRegion(HBaseTestingUtil util, int start, int e } } - protected final void verifyNotReplicatedThroughRegion(HBaseTestingUtil util, int start, - int end) throws IOException { + protected final void verifyNotReplicatedThroughRegion(HBaseTestingUtil util, int start, int end) + throws IOException { HRegion region = util.getMiniHBaseCluster().getRegions(TABLE_NAME).get(0); for (int i = start; i < end; i++) { assertTrue(region.get(new Get(Bytes.toBytes(i))).isEmpty()); } } - protected final void waitUntilReplicationDone(HBaseTestingUtil util, int end) - throws Exception { + protected final void waitUntilReplicationDone(HBaseTestingUtil util, int end) throws Exception { // The reject check is in RSRpcService so we can still read through HRegion HRegion region = util.getMiniHBaseCluster().getRegions(TABLE_NAME).get(0); util.waitFor(30000, new ExplainingPredicate() { @@ -208,8 +207,8 @@ public String explainFailure() throws Exception { }); } - protected final void writeAndVerifyReplication(HBaseTestingUtil util1, - HBaseTestingUtil util2, int start, int end) throws Exception { + protected final void writeAndVerifyReplication(HBaseTestingUtil util1, HBaseTestingUtil util2, + int start, int end) throws Exception { write(util1, start, end); waitUntilReplicationDone(util2, end); verifyThroughRegion(util2, start, end); @@ -228,10 +227,10 @@ protected final Path getReplayRemoteWALs(Path remoteWALDir, String peerId) { return new Path(remoteWALDir, peerId + "-replay"); } - protected final void verifyRemovedPeer(String peerId, Path remoteWALDir, - HBaseTestingUtil utility) throws Exception { + protected final void verifyRemovedPeer(String peerId, Path remoteWALDir, HBaseTestingUtil utility) + throws Exception { ReplicationPeerStorage rps = ReplicationStorageFactory - .getReplicationPeerStorage(utility.getZooKeeperWatcher(), utility.getConfiguration()); + .getReplicationPeerStorage(utility.getZooKeeperWatcher(), utility.getConfiguration()); try { rps.getPeerSyncReplicationState(peerId); fail("Should throw exception when get the sync replication state of a removed peer."); @@ -263,7 +262,7 @@ protected final void verifyReplicationRequestRejection(HBaseTestingUtil utility, Entry[] entries = new Entry[10]; for (int i = 0; i < entries.length; i++) { entries[i] = - new Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TABLE_NAME, 0), new WALEdit()); + new Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TABLE_NAME, 0), new WALEdit()); } if (!expectedRejection) { ReplicationProtobufUtil.replicateWALEntry( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestAddToSerialReplicationPeer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestAddToSerialReplicationPeer.java index e5c25877eced..55c883086037 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestAddToSerialReplicationPeer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestAddToSerialReplicationPeer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ public class TestAddToSerialReplicationPeer extends SerialReplicationTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAddToSerialReplicationPeer.class); + HBaseClassTestRule.forClass(TestAddToSerialReplicationPeer.class); @Before public void setUp() throws IOException, StreamLacksCapabilityException { @@ -69,7 +69,7 @@ private void moveRegionAndArchiveOldWals(RegionInfo region, HRegionServer rs) th } private void waitUntilReplicatedToTheCurrentWALFile(HRegionServer rs, final String oldWalName) - throws Exception { + throws Exception { Path path = ((AbstractFSWAL) rs.getWAL(null)).getCurrentFileName(); String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(path.getName()); UTIL.waitFor(30000, new ExplainingPredicate() { @@ -77,10 +77,10 @@ private void waitUntilReplicatedToTheCurrentWALFile(HRegionServer rs, final Stri @Override public boolean evaluate() throws Exception { ReplicationSourceManager manager = - ((Replication) rs.getReplicationSourceService()).getReplicationManager(); + ((Replication) rs.getReplicationSourceService()).getReplicationManager(); // Make sure replication moves to the new file. - return (manager.getWALs().get(PEER_ID).get(logPrefix).size() == 1) && - !oldWalName.equals(manager.getWALs().get(PEER_ID).get(logPrefix).first()); + return (manager.getWALs().get(PEER_ID).get(logPrefix).size() == 1) + && !oldWalName.equals(manager.getWALs().get(PEER_ID).get(logPrefix).first()); } @Override @@ -114,8 +114,8 @@ public void testAddPeer() throws Exception { @Test public void testChangeToSerial() throws Exception { ReplicationPeerConfig peerConfig = - ReplicationPeerConfig.newBuilder().setClusterKey("127.0.0.1:2181:/hbase") - .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()).build(); + ReplicationPeerConfig.newBuilder().setClusterKey("127.0.0.1:2181:/hbase") + .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()).build(); UTIL.getAdmin().addReplicationPeer(PEER_ID, peerConfig, true); TableName tableName = createTable(); @@ -129,7 +129,7 @@ public void testChangeToSerial() throws Exception { HRegionServer srcRs = UTIL.getRSForFirstRegionInTable(tableName); // Get the current wal file name String walFileNameBeforeRollover = - ((AbstractFSWAL) srcRs.getWAL(null)).getCurrentFileName().getName(); + ((AbstractFSWAL) srcRs.getWAL(null)).getCurrentFileName().getName(); HRegionServer rs = UTIL.getOtherRegionServer(srcRs); moveRegionAndArchiveOldWals(region, rs); @@ -153,9 +153,9 @@ public void testChangeToSerial() throws Exception { @Test public void testAddToSerialPeer() throws Exception { ReplicationPeerConfig peerConfig = - ReplicationPeerConfig.newBuilder().setClusterKey("127.0.0.1:2181:/hbase") - .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()) - .setReplicateAllUserTables(false).setSerial(true).build(); + ReplicationPeerConfig.newBuilder().setClusterKey("127.0.0.1:2181:/hbase") + .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()) + .setReplicateAllUserTables(false).setSerial(true).build(); UTIL.getAdmin().addReplicationPeer(PEER_ID, peerConfig, true); TableName tableName = createTable(); @@ -170,7 +170,7 @@ public void testAddToSerialPeer() throws Exception { // Get the current wal file name String walFileNameBeforeRollover = - ((AbstractFSWAL) srcRs.getWAL(null)).getCurrentFileName().getName(); + ((AbstractFSWAL) srcRs.getWAL(null)).getCurrentFileName().getName(); moveRegionAndArchiveOldWals(region, rs); @@ -180,7 +180,7 @@ public void testAddToSerialPeer() throws Exception { UTIL.getAdmin().disableReplicationPeer(PEER_ID); UTIL.getAdmin().updateReplicationPeerConfig(PEER_ID, ReplicationPeerConfig.newBuilder(peerConfig) - .setTableCFsMap(ImmutableMap.of(tableName, Collections.emptyList())).build()); + .setTableCFsMap(ImmutableMap.of(tableName, Collections.emptyList())).build()); UTIL.getAdmin().enableReplicationPeer(PEER_ID); try (Table table = UTIL.getConnection().getTable(tableName)) { for (int i = 0; i < 100; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestClaimReplicationQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestClaimReplicationQueue.java index 41f55a252237..f49f8d57fa70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestClaimReplicationQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestClaimReplicationQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ public class TestClaimReplicationQueue extends TestReplicationBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClaimReplicationQueue.class); + HBaseClassTestRule.forClass(TestClaimReplicationQueue.class); private static final TableName tableName3 = TableName.valueOf("test3"); @@ -93,8 +93,8 @@ public HMasterForTest(Configuration conf) throws IOException { } @Override - protected ServerManager createServerManager(MasterServices master, - RegionServerList storage) throws IOException { + protected ServerManager createServerManager(MasterServices master, RegionServerList storage) + throws IOException { setupClusterConnection(); return new ServerManagerForTest(master, storage); } @@ -149,8 +149,8 @@ public void testClaim() throws Exception { HMaster master = UTIL1.getMiniHBaseCluster().getMaster(); UTIL1.waitFor(30000, () -> master.getProcedures().stream() - .filter(p -> p instanceof ClaimReplicationQueuesProcedure) - .anyMatch(p -> p.getState() == ProcedureState.WAITING_TIMEOUT)); + .filter(p -> p instanceof ClaimReplicationQueuesProcedure) + .anyMatch(p -> p.getState() == ProcedureState.WAITING_TIMEOUT)); hbaseAdmin.enableReplicationPeer(PEER_ID2); hbaseAdmin.enableReplicationPeer(PEER_ID3); @@ -158,7 +158,7 @@ public void testClaim() throws Exception { EMPTY = false; // wait until the SCP finished, ClaimReplicationQueuesProcedure is a sub procedure of SCP UTIL1.waitFor(30000, () -> master.getProcedures().stream() - .filter(p -> p instanceof ServerCrashProcedure).allMatch(Procedure::isSuccess)); + .filter(p -> p instanceof ServerCrashProcedure).allMatch(Procedure::isSuccess)); // we should get all the data in the target cluster waitForReplication(htable2, count1, NB_RETRIES); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java index 6d4544fa363f..3e2887db5597 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestHBaseReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -41,12 +40,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({ReplicationTests.class, SmallTests.class}) +@Category({ ReplicationTests.class, SmallTests.class }) public class TestHBaseReplicationEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHBaseReplicationEndpoint.class); + HBaseClassTestRule.forClass(TestHBaseReplicationEndpoint.class); private static final Logger LOG = LoggerFactory.getLogger(TestHBaseReplicationEndpoint.class); @@ -58,8 +57,8 @@ public class TestHBaseReplicationEndpoint { public void setUp() throws Exception { try { ReplicationEndpoint.Context context = - new ReplicationEndpoint.Context(null, UTIL.getConfiguration(), UTIL.getConfiguration(), - null, null, null, null, null, null, null); + new ReplicationEndpoint.Context(null, UTIL.getConfiguration(), UTIL.getConfiguration(), + null, null, null, null, null, null, null); endpoint = new DummyHBaseReplicationEndpoint(); endpoint.init(context); } catch (Exception e) { @@ -82,8 +81,8 @@ public void testChooseSinks() { @Test public void testChooseSinksLessThanRatioAvailable() { - List serverNames = Lists.newArrayList(mock(ServerName.class), - mock(ServerName.class)); + List serverNames = + Lists.newArrayList(mock(ServerName.class), mock(ServerName.class)); ((DummyHBaseReplicationEndpoint) endpoint).setRegionServers(serverNames); endpoint.chooseSinks(); assertEquals(1, endpoint.getNumSinks()); @@ -94,7 +93,7 @@ public void testReportBadSink() { ServerName serverNameA = mock(ServerName.class); ServerName serverNameB = mock(ServerName.class); ((DummyHBaseReplicationEndpoint) endpoint) - .setRegionServers(Lists.newArrayList(serverNameA, serverNameB)); + .setRegionServers(Lists.newArrayList(serverNameA, serverNameB)); endpoint.chooseSinks(); // Sanity check assertEquals(1, endpoint.getNumSinks()); @@ -106,8 +105,8 @@ public void testReportBadSink() { } /** - * Once a SinkPeer has been reported as bad more than BAD_SINK_THRESHOLD times, it should not - * be replicated to anymore. + * Once a SinkPeer has been reported as bad more than BAD_SINK_THRESHOLD times, it should not be + * replicated to anymore. */ @Test public void testReportBadSinkPastThreshold() { @@ -180,7 +179,7 @@ public void testReportBadSinkDownToZeroSinks() { // should have been refreshed now, so out of 4 servers, 2 are not considered as they are // reported as bad. expected = - (int) ((totalServers - 2) * HBaseReplicationEndpoint.DEFAULT_REPLICATION_SOURCE_RATIO); + (int) ((totalServers - 2) * HBaseReplicationEndpoint.DEFAULT_REPLICATION_SOURCE_RATIO); assertEquals(expected, endpoint.getNumSinks()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index d614b79d0742..d7b7947dce6c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; + import java.io.Closeable; import java.io.IOException; import java.util.Arrays; @@ -28,7 +29,6 @@ import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -79,7 +79,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ReplicationTests.class, LargeTests.class}) +@Category({ ReplicationTests.class, LargeTests.class }) public class TestMasterReplication { @ClassRule @@ -128,9 +128,8 @@ public void setUp() throws Exception { TestSourceFSConfigurationProvider.class.getCanonicalName()); baseConfiguration.set(HConstants.REPLICATION_CLUSTER_ID, "12345"); baseConfiguration.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); - baseConfiguration.setStrings( - CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - CoprocessorCounter.class.getName()); + baseConfiguration.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, + CoprocessorCounter.class.getName()); table = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) @@ -140,10 +139,9 @@ public void setUp() throws Exception { } /** - * It tests the replication scenario involving 0 -> 1 -> 0. It does it by - * adding and deleting a row to a table in each cluster, checking if it's - * replicated. It also tests that the puts and deletes are not replicated back - * to the originating cluster. + * It tests the replication scenario involving 0 -> 1 -> 0. It does it by adding and deleting a + * row to a table in each cluster, checking if it's replicated. It also tests that the puts and + * deletes are not replicated back to the originating cluster. */ @Test public void testCyclicReplication1() throws Exception { @@ -176,8 +174,7 @@ public void testCyclicReplication1() throws Exception { * the replication peer should not be added. */ @Test(expected = DoNotRetryIOException.class) - public void testLoopedReplication() - throws Exception { + public void testLoopedReplication() throws Exception { LOG.info("testLoopedReplication"); startMiniClusters(1); createTableOnClusters(table); @@ -199,8 +196,8 @@ public void testHFileCyclicReplication() throws Exception { // Load 100 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1'. byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; int numOfRows = 100; int[] expectedCounts = new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; @@ -211,10 +208,10 @@ public void testHFileCyclicReplication() throws Exception { // Load 200 rows for each hfile range in cluster '1' and validate whether its been replicated // to cluster '0'. hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") }, - new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; + new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; numOfRows = 200; int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0], - hfileRanges.length * numOfRows + expectedCounts[1] }; + hfileRanges.length * numOfRows + expectedCounts[1] }; loadAndValidateHFileReplication("testHFileCyclicReplication_10", 1, new int[] { 0 }, row, famName, htables, hfileRanges, numOfRows, newExpectedCounts, true); @@ -313,12 +310,12 @@ public void testHFileMultiSlaveReplication() throws Exception { // Load 100 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1'. byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("mmmm"), Bytes.toBytes("oooo") }, - new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("rrr") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("mmmm"), Bytes.toBytes("oooo") }, + new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("rrr") }, }; int numOfRows = 100; int[] expectedCounts = - new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; + new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; loadAndValidateHFileReplication("testHFileCyclicReplication_0", 0, new int[] { 1 }, row, famName, htables, hfileRanges, numOfRows, expectedCounts, true); @@ -334,11 +331,11 @@ public void testHFileMultiSlaveReplication() throws Exception { // Load 200 rows for each hfile range in cluster '0' and validate whether its been replicated // to cluster '1' and '2'. Previous data should be replicated to cluster '2'. hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("ssss"), Bytes.toBytes("uuuu") }, - new byte[][] { Bytes.toBytes("vvv"), Bytes.toBytes("xxx") }, }; + new byte[][] { Bytes.toBytes("vvv"), Bytes.toBytes("xxx") }, }; numOfRows = 200; int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0], - hfileRanges.length * numOfRows + expectedCounts[1], hfileRanges.length * numOfRows }; + hfileRanges.length * numOfRows + expectedCounts[1], hfileRanges.length * numOfRows }; loadAndValidateHFileReplication("testHFileCyclicReplication_1", 0, new int[] { 1, 2 }, row, famName, htables, hfileRanges, numOfRows, newExpectedCounts, true); @@ -369,8 +366,8 @@ public void testHFileReplicationForConfiguredTableCfs() throws Exception { // Load 100 rows for each hfile range in cluster '0' for table CF 'f' byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, }; int numOfRows = 100; int[] expectedCounts = new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows }; @@ -380,11 +377,11 @@ public void testHFileReplicationForConfiguredTableCfs() throws Exception { // Load 100 rows for each hfile range in cluster '0' for table CF 'f1' hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") }, - new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; + new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, }; numOfRows = 100; int[] newExpectedCounts = - new int[] { hfileRanges.length * numOfRows + expectedCounts[0], expectedCounts[1] }; + new int[] { hfileRanges.length * numOfRows + expectedCounts[0], expectedCounts[1] }; loadAndValidateHFileReplication("load_f1", 0, new int[] { 1 }, row, famName1, htables, hfileRanges, numOfRows, newExpectedCounts, false); @@ -443,13 +440,11 @@ public void testCyclicReplication3() throws Exception { } /** - * Tests that base replication peer configs are applied on peer creation - * and the configs are overriden if updated as part of updateReplicationPeerConfig() - * + * Tests that base replication peer configs are applied on peer creation and the configs are + * overriden if updated as part of updateReplicationPeerConfig() */ @Test - public void testBasePeerConfigsForReplicationPeer() - throws Exception { + public void testBasePeerConfigsForReplicationPeer() throws Exception { LOG.info("testBasePeerConfigsForPeerMutations"); String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; String firstCustomPeerConfigValue = "test"; @@ -467,50 +462,50 @@ public void testBasePeerConfigsForReplicationPeer() Admin admin = utilities[0].getAdmin(); // Validates base configs 1 is present for both peer. - Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). - getConfiguration().get(firstCustomPeerConfigKey)); - Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("2"). - getConfiguration().get(firstCustomPeerConfigKey)); + Assert.assertEquals(firstCustomPeerConfigValue, + admin.getReplicationPeerConfig("1").getConfiguration().get(firstCustomPeerConfigKey)); + Assert.assertEquals(firstCustomPeerConfigValue, + admin.getReplicationPeerConfig("2").getConfiguration().get(firstCustomPeerConfigKey)); // override value of configuration 1 for peer "1". - ReplicationPeerConfig updatedReplicationConfigForPeer1 = ReplicationPeerConfig. - newBuilder(admin.getReplicationPeerConfig("1")). - putConfiguration(firstCustomPeerConfigKey, firstCustomPeerConfigUpdatedValue).build(); + ReplicationPeerConfig updatedReplicationConfigForPeer1 = ReplicationPeerConfig + .newBuilder(admin.getReplicationPeerConfig("1")) + .putConfiguration(firstCustomPeerConfigKey, firstCustomPeerConfigUpdatedValue).build(); // add configuration 2 for peer "2". - ReplicationPeerConfig updatedReplicationConfigForPeer2 = ReplicationPeerConfig. - newBuilder(admin.getReplicationPeerConfig("2")). - putConfiguration(secondCustomPeerConfigKey, secondCustomPeerConfigUpdatedValue).build(); + ReplicationPeerConfig updatedReplicationConfigForPeer2 = ReplicationPeerConfig + .newBuilder(admin.getReplicationPeerConfig("2")) + .putConfiguration(secondCustomPeerConfigKey, secondCustomPeerConfigUpdatedValue).build(); admin.updateReplicationPeerConfig("1", updatedReplicationConfigForPeer1); admin.updateReplicationPeerConfig("2", updatedReplicationConfigForPeer2); // validates configuration is overridden by updateReplicationPeerConfig - Assert.assertEquals(firstCustomPeerConfigUpdatedValue, admin.getReplicationPeerConfig("1"). - getConfiguration().get(firstCustomPeerConfigKey)); - Assert.assertEquals(secondCustomPeerConfigUpdatedValue, admin.getReplicationPeerConfig("2"). - getConfiguration().get(secondCustomPeerConfigKey)); + Assert.assertEquals(firstCustomPeerConfigUpdatedValue, + admin.getReplicationPeerConfig("1").getConfiguration().get(firstCustomPeerConfigKey)); + Assert.assertEquals(secondCustomPeerConfigUpdatedValue, + admin.getReplicationPeerConfig("2").getConfiguration().get(secondCustomPeerConfigKey)); // Add second config to base config and perform restart. - utilities[0].getConfiguration().set(ReplicationPeerConfigUtil. - HBASE_REPLICATION_PEER_BASE_CONFIG, firstCustomPeerConfigKey.concat("="). - concat(firstCustomPeerConfigValue).concat(";").concat(secondCustomPeerConfigKey) - .concat("=").concat(secondCustomPeerConfigValue)); + utilities[0].getConfiguration().set( + ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + firstCustomPeerConfigKey.concat("=").concat(firstCustomPeerConfigValue).concat(";") + .concat(secondCustomPeerConfigKey).concat("=").concat(secondCustomPeerConfigValue)); utilities[0].shutdownMiniHBaseCluster(); utilities[0].restartHBaseCluster(1); admin = utilities[0].getAdmin(); // Configurations should be updated after restart again - Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). - getConfiguration().get(firstCustomPeerConfigKey)); - Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("2"). - getConfiguration().get(firstCustomPeerConfigKey)); - - Assert.assertEquals(secondCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). - getConfiguration().get(secondCustomPeerConfigKey)); - Assert.assertEquals(secondCustomPeerConfigValue, admin.getReplicationPeerConfig("2"). - getConfiguration().get(secondCustomPeerConfigKey)); + Assert.assertEquals(firstCustomPeerConfigValue, + admin.getReplicationPeerConfig("1").getConfiguration().get(firstCustomPeerConfigKey)); + Assert.assertEquals(firstCustomPeerConfigValue, + admin.getReplicationPeerConfig("2").getConfiguration().get(firstCustomPeerConfigKey)); + + Assert.assertEquals(secondCustomPeerConfigValue, + admin.getReplicationPeerConfig("1").getConfiguration().get(secondCustomPeerConfigKey)); + Assert.assertEquals(secondCustomPeerConfigValue, + admin.getReplicationPeerConfig("2").getConfiguration().get(secondCustomPeerConfigKey)); } finally { shutDownMiniClusters(); baseConfiguration.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); @@ -518,8 +513,7 @@ public void testBasePeerConfigsForReplicationPeer() } @Test - public void testBasePeerConfigsRemovalForReplicationPeer() - throws Exception { + public void testBasePeerConfigsRemovalForReplicationPeer() throws Exception { LOG.info("testBasePeerConfigsForPeerMutations"); String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; String firstCustomPeerConfigValue = "test"; @@ -532,22 +526,22 @@ public void testBasePeerConfigsRemovalForReplicationPeer() Admin admin = utilities[0].getAdmin(); // Validates base configs 1 is present for both peer. - Assert.assertEquals(firstCustomPeerConfigValue, admin.getReplicationPeerConfig("1"). - getConfiguration().get(firstCustomPeerConfigKey)); - - utilities[0].getConfiguration().unset(ReplicationPeerConfigUtil. - HBASE_REPLICATION_PEER_BASE_CONFIG); - utilities[0].getConfiguration().set(ReplicationPeerConfigUtil. - HBASE_REPLICATION_PEER_BASE_CONFIG, firstCustomPeerConfigKey.concat("=").concat("")); + Assert.assertEquals(firstCustomPeerConfigValue, + admin.getReplicationPeerConfig("1").getConfiguration().get(firstCustomPeerConfigKey)); + utilities[0].getConfiguration() + .unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); + utilities[0].getConfiguration().set( + ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, + firstCustomPeerConfigKey.concat("=").concat("")); utilities[0].shutdownMiniHBaseCluster(); utilities[0].restartHBaseCluster(1); admin = utilities[0].getAdmin(); // Configurations should be removed after restart again - Assert.assertNull(admin.getReplicationPeerConfig("1") - .getConfiguration().get(firstCustomPeerConfigKey)); + Assert.assertNull( + admin.getReplicationPeerConfig("1").getConfiguration().get(firstCustomPeerConfigKey)); } finally { shutDownMiniClusters(); baseConfiguration.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); @@ -555,8 +549,7 @@ public void testBasePeerConfigsRemovalForReplicationPeer() } @Test - public void testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer() - throws Exception { + public void testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer() throws Exception { LOG.info("testBasePeerConfigsForPeerMutations"); String firstCustomPeerConfigKey = "hbase.xxx.custom_config"; @@ -567,8 +560,8 @@ public void testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer() addPeer("1", 0, 1); Admin admin = utilities[0].getAdmin(); - Assert.assertNull("Config should not be there", admin.getReplicationPeerConfig("1"). - getConfiguration().get(firstCustomPeerConfigKey)); + Assert.assertNull("Config should not be there", + admin.getReplicationPeerConfig("1").getConfiguration().get(firstCustomPeerConfigKey)); } finally { shutDownMiniClusters(); baseConfiguration.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); @@ -587,8 +580,7 @@ private void startMiniClusters(int numClusters) throws Exception { configurations = new Configuration[numClusters]; for (int i = 0; i < numClusters; i++) { Configuration conf = new Configuration(baseConfiguration); - conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/" + - i + ThreadLocalRandom.current().nextInt()); + conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/" + i + ThreadLocalRandom.current().nextInt()); HBaseTestingUtil utility = new HBaseTestingUtil(conf); if (i == 0) { utility.startMiniZKCluster(); @@ -619,39 +611,37 @@ private void createTableOnClusters(TableDescriptor table) throws Exception { } } - private void addPeer(String id, int masterClusterNumber, - int slaveClusterNumber) throws Exception { + private void addPeer(String id, int masterClusterNumber, int slaveClusterNumber) + throws Exception { try (Connection conn = ConnectionFactory.createConnection(configurations[masterClusterNumber]); - Admin admin = conn.getAdmin()) { - admin.addReplicationPeer(id, - ReplicationPeerConfig.newBuilder(). - setClusterKey(utilities[slaveClusterNumber].getClusterKey()).build()); + Admin admin = conn.getAdmin()) { + admin.addReplicationPeer(id, ReplicationPeerConfig.newBuilder() + .setClusterKey(utilities[slaveClusterNumber].getClusterKey()).build()); } } private void addPeer(String id, int masterClusterNumber, int slaveClusterNumber, String tableCfs) throws Exception { try (Connection conn = ConnectionFactory.createConnection(configurations[masterClusterNumber]); - Admin admin = conn.getAdmin()) { - admin.addReplicationPeer( - id, + Admin admin = conn.getAdmin()) { + admin.addReplicationPeer(id, ReplicationPeerConfig.newBuilder() - .setClusterKey(utilities[slaveClusterNumber].getClusterKey()) - .setReplicateAllUserTables(false) - .setTableCFsMap(ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableCfs)).build()); + .setClusterKey(utilities[slaveClusterNumber].getClusterKey()) + .setReplicateAllUserTables(false) + .setTableCFsMap(ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableCfs)).build()); } } private void disablePeer(String id, int masterClusterNumber) throws Exception { try (Connection conn = ConnectionFactory.createConnection(configurations[masterClusterNumber]); - Admin admin = conn.getAdmin()) { + Admin admin = conn.getAdmin()) { admin.disableReplicationPeer(id); } } private void enablePeer(String id, int masterClusterNumber) throws Exception { try (Connection conn = ConnectionFactory.createConnection(configurations[masterClusterNumber]); - Admin admin = conn.getAdmin()) { + Admin admin = conn.getAdmin()) { admin.enableReplicationPeer(id); } } @@ -679,11 +669,11 @@ private Table[] getHTablesOnClusters(TableName tableName) throws Exception { return htables; } - private void validateCounts(Table[] htables, byte[] type, - int[] expectedCounts) throws IOException { + private void validateCounts(Table[] htables, byte[] type, int[] expectedCounts) + throws IOException { for (int i = 0; i < htables.length; i++) { - assertEquals(Bytes.toString(type) + " were replicated back ", - expectedCounts[i], getCount(htables[i], type)); + assertEquals(Bytes.toString(type) + " were replicated back ", expectedCounts[i], + getCount(htables[i], type)); } } @@ -694,15 +684,13 @@ private int getCount(Table t, byte[] type) throws IOException { return Bytes.toInt(res.getValue(count, type)); } - private void deleteAndWait(byte[] row, Table source, Table target) - throws Exception { + private void deleteAndWait(byte[] row, Table source, Table target) throws Exception { Delete del = new Delete(row); source.delete(del); wait(row, target, true); } - private void putAndWait(byte[] row, byte[] fam, Table source, Table target) - throws Exception { + private void putAndWait(byte[] row, byte[] fam, Table source, Table target) throws Exception { Put put = new Put(row); put.addColumn(fam, row, row); source.put(put); @@ -766,15 +754,14 @@ private void wait(byte[] row, Table target, boolean isDeleted) throws Exception Result res = target.get(get); boolean sleep = isDeleted ? res.size() > 0 : res.isEmpty(); if (sleep) { - LOG.info("Waiting for more time for replication. Row:" - + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted); + LOG.info("Waiting for more time for replication. Row:" + Bytes.toString(row) + + ". IsDeleteReplication:" + isDeleted); Thread.sleep(SLEEP_TIME); } else { if (!isDeleted) { assertArrayEquals(res.value(), row); } - LOG.info("Obtained row:" - + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted); + LOG.info("Obtained row:" + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted); break; } } @@ -814,8 +801,8 @@ public void postLogRoll(final Path oldPath, final Path newPath) throws IOExcepti try { latch.await(); } catch (InterruptedException exception) { - LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " + - "replication tests fail, it's probably because we should still be waiting."); + LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " + + "replication tests fail, it's probably because we should still be waiting."); Thread.currentThread().interrupt(); } region.getWAL().unregisterWALActionsListener(listener); @@ -847,8 +834,8 @@ public void postDelete(final ObserverContext c, } @Override - public void preGetOp(final ObserverContext c, - final Get get, final List result) throws IOException { + public void preGetOp(final ObserverContext c, final Get get, + final List result) throws IOException { if (get.getAttribute("count") != null) { result.clear(); // order is important! diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java index bbfbdde15c20..6324e03694af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ReplicationTests.class, LargeTests.class}) +@Category({ ReplicationTests.class, LargeTests.class }) public class TestMultiSlaveReplication { @ClassRule @@ -94,14 +94,14 @@ public static void setUpBeforeClass() throws Exception { conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); // smaller block size and capacity to trigger more operations // and test them - conf1.setInt("hbase.regionserver.hlog.blocksize", 1024*20); + conf1.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20); conf1.setInt("replication.source.size.capacity", 1024); conf1.setLong("replication.source.sleepforretries", 100); conf1.setInt("hbase.regionserver.maxlogs", 10); conf1.setLong("hbase.master.logcleaner.ttl", 10); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - "org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter"); + "org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter"); conf1.setInt("hbase.master.cleaner.interval", 5 * 1000); utility1 = new HBaseTestingUtil(conf1); @@ -125,9 +125,9 @@ public static void setUpBeforeClass() throws Exception { new ZKWatcher(conf3, "cluster3", null, true); table = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); } @Test @@ -137,7 +137,7 @@ public void testMultiSlaveReplication() throws Exception { utility2.startMiniCluster(); utility3.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf1); - Admin admin1 = conn.getAdmin()) { + Admin admin1 = conn.getAdmin()) { utility1.getAdmin().createTable(table); utility2.getAdmin().createTable(table); utility3.getAdmin().createTable(table); @@ -146,7 +146,7 @@ public void testMultiSlaveReplication() throws Exception { Table htable3 = utility3.getConnection().getTable(tableName); ReplicationPeerConfigBuilder rpcBuilder = - ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()); + ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()); admin1.addReplicationPeer("1", rpcBuilder.build()); // put "row" and wait 'til it got around, then delete @@ -224,11 +224,11 @@ private void rollWALAndWait(final HBaseTestingUtil utility, final TableName tabl // listen for successful log rolls final WALActionsListener listener = new WALActionsListener() { - @Override - public void postLogRoll(final Path oldPath, final Path newPath) throws IOException { - latch.countDown(); - } - }; + @Override + public void postLogRoll(final Path oldPath, final Path newPath) throws IOException { + latch.countDown(); + } + }; region.getWAL().registerWALActionsListener(listener); // request a roll @@ -239,14 +239,13 @@ public void postLogRoll(final Path oldPath, final Path newPath) throws IOExcepti try { latch.await(); } catch (InterruptedException exception) { - LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " + - "replication tests fail, it's probably because we should still be waiting."); + LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " + + "replication tests fail, it's probably because we should still be waiting."); Thread.currentThread().interrupt(); } region.getWAL().unregisterWALActionsListener(listener); } - private void checkWithWait(byte[] row, int count, Table table) throws Exception { Get get = new Get(row); for (int i = 0; i < NB_RETRIES; i++) { @@ -258,8 +257,8 @@ private void checkWithWait(byte[] row, int count, Table table) throws Exception if (res.size() >= 1) { LOG.info("Row is replicated"); rowReplicated = true; - assertEquals("Table '" + table + "' did not have the expected number of results.", - count, res.size()); + assertEquals("Table '" + table + "' did not have the expected number of results.", count, + res.size()); break; } if (rowReplicated) { @@ -274,19 +273,18 @@ private void checkRow(byte[] row, int count, Table... tables) throws IOException Get get = new Get(row); for (Table table : tables) { Result res = table.get(get); - assertEquals("Table '" + table + "' did not have the expected number of results.", - count, res.size()); + assertEquals("Table '" + table + "' did not have the expected number of results.", count, + res.size()); } } - private void deleteAndWait(byte[] row, Table source, Table... targets) - throws Exception { + private void deleteAndWait(byte[] row, Table source, Table... targets) throws Exception { Delete del = new Delete(row); source.delete(del); Get get = new Get(row); for (int i = 0; i < NB_RETRIES; i++) { - if (i==NB_RETRIES-1) { + if (i == NB_RETRIES - 1) { fail("Waited too much time for del replication"); } boolean removedFromAll = true; @@ -306,15 +304,14 @@ private void deleteAndWait(byte[] row, Table source, Table... targets) } } - private void putAndWait(byte[] row, byte[] fam, Table source, Table... targets) - throws Exception { + private void putAndWait(byte[] row, byte[] fam, Table source, Table... targets) throws Exception { Put put = new Put(row); put.addColumn(fam, row, row); source.put(put); Get get = new Get(row); for (int i = 0; i < NB_RETRIES; i++) { - if (i==NB_RETRIES-1) { + if (i == NB_RETRIES - 1) { fail("Waited too much time for put replication"); } boolean replicatedToAll = true; @@ -337,4 +334,3 @@ private void putAndWait(byte[] row, byte[] fam, Table source, Table... targets) } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java index 6c0a76aaeeb3..2a82e0bbcd6d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -112,19 +112,19 @@ public static void setUpBeforeClass() throws Exception { admin2.createNamespace(NamespaceDescriptor.create(ns2).build()); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tabAName); - builder.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(f1Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); - builder.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(f2Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); + builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); + builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f2Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); TableDescriptor tabA = builder.build(); admin1.createTable(tabA); admin2.createTable(tabA); builder = TableDescriptorBuilder.newBuilder(tabBName); - builder.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(f1Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); - builder.setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(f2Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); + builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); + builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f2Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()); TableDescriptor tabB = builder.build(); admin1.createTable(tabB); admin2.createTable(tabB); @@ -209,8 +209,7 @@ public void testNamespaceReplication() throws Exception { admin1.removeReplicationPeer(peerId); } - private void put(Table source, byte[] row, byte[]... families) - throws Exception { + private void put(Table source, byte[] row, byte[]... families) throws Exception { for (byte[] fam : families) { Put put = new Put(row); put.addColumn(fam, row, val); @@ -218,8 +217,7 @@ private void put(Table source, byte[] row, byte[]... families) } } - private void delete(Table source, byte[] row, byte[]... families) - throws Exception { + private void delete(Table source, byte[] row, byte[]... families) throws Exception { for (byte[] fam : families) { Delete del = new Delete(row); del.addFamily(fam); @@ -227,8 +225,7 @@ private void delete(Table source, byte[] row, byte[]... families) } } - private void ensureRowExisted(Table target, byte[] row, byte[]... families) - throws Exception { + private void ensureRowExisted(Table target, byte[] row, byte[]... families) throws Exception { for (byte[] fam : families) { Get get = new Get(row); get.addFamily(fam); @@ -249,8 +246,7 @@ private void ensureRowExisted(Table target, byte[] row, byte[]... families) } } - private void ensureRowNotExisted(Table target, byte[] row, byte[]... families) - throws Exception { + private void ensureRowNotExisted(Table target, byte[] row, byte[]... families) throws Exception { for (byte[] fam : families) { Get get = new Get(row); get.addFamily(fam); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplicationWithBulkLoadedData.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplicationWithBulkLoadedData.java index ec9c40d18177..d576035f357d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplicationWithBulkLoadedData.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplicationWithBulkLoadedData.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -45,7 +44,6 @@ import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; - import org.junit.After; import org.junit.Before; import org.junit.BeforeClass; @@ -108,40 +106,25 @@ private static void startFourthCluster() throws Exception { @Before @Override public void setUpBase() throws Exception { - /** "super.setUpBase()" already sets peer1 from 1 <-> 2 <-> 3 - * and this test add the fourth cluster. - * So we have following topology: - * 1 - * / \ - * 2 4 - * / - * 3 - * - * The 1 -> 4 has two peers, - * ns_peer1: ns1 -> ns1 (validate this peer hfile-refs) - * ns_peer1 configuration is NAMESPACES => ["ns1"] - * - * ns_peer2: ns2:t2_syncup -> ns2:t2_syncup, this peers is - * ns_peer2 configuration is NAMESPACES => ["ns2"], - * TABLE_CFS => { "ns2:t2_syncup" => []} - * - * The 1 -> 2 has one peer, this peer configuration is - * add_peer '2', CLUSTER_KEY => "server1.cie.com:2181:/hbase" - * + /** + * "super.setUpBase()" already sets peer1 from 1 <-> 2 <-> 3 and this test add the fourth + * cluster. So we have following topology: 1 / \ 2 4 / 3 The 1 -> 4 has two peers, ns_peer1: ns1 + * -> ns1 (validate this peer hfile-refs) ns_peer1 configuration is NAMESPACES => ["ns1"] + * ns_peer2: ns2:t2_syncup -> ns2:t2_syncup, this peers is ns_peer2 configuration is NAMESPACES + * => ["ns2"], TABLE_CFS => { "ns2:t2_syncup" => []} The 1 -> 2 has one peer, this peer + * configuration is add_peer '2', CLUSTER_KEY => "server1.cie.com:2181:/hbase" */ super.setUpBase(); // Create tables TableDescriptor table1 = TableDescriptorBuilder.newBuilder(NS1_TABLE) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(famName) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); TableDescriptor table2 = TableDescriptorBuilder.newBuilder(NS2_TABLE) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(famName) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); Admin admin1 = UTIL1.getAdmin(); @@ -169,10 +152,8 @@ public void setUpBase() throws Exception { admin4.createTable(table2); /** - * Set ns_peer1 1: ns1 -> 2: ns1 - * - * add_peer 'ns_peer1', CLUSTER_KEY => "zk1,zk2,zk3:2182:/hbase-prod", - * NAMESPACES => ["ns1"] + * Set ns_peer1 1: ns1 -> 2: ns1 add_peer 'ns_peer1', CLUSTER_KEY => + * "zk1,zk2,zk3:2182:/hbase-prod", NAMESPACES => ["ns1"] */ Set namespaces = new HashSet<>(); namespaces.add(NS1); @@ -182,10 +163,8 @@ public void setUpBase() throws Exception { admin1.addReplicationPeer(PEER4_NS, rpc4_ns); /** - * Set ns_peer2 1: ns2:t2_syncup -> 4: ns2:t2_syncup - * - * add_peer 'ns_peer2', CLUSTER_KEY => "zk1,zk2,zk3:2182:/hbase-prod", - * NAMESPACES => ["ns2"], TABLE_CFS => { "ns2:t2_syncup" => [] } + * Set ns_peer2 1: ns2:t2_syncup -> 4: ns2:t2_syncup add_peer 'ns_peer2', CLUSTER_KEY => + * "zk1,zk2,zk3:2182:/hbase-prod", NAMESPACES => ["ns2"], TABLE_CFS => { "ns2:t2_syncup" => [] } */ Map> tableCFsMap = new HashMap<>(); tableCFsMap.put(NS2_TABLE, null); @@ -200,15 +179,13 @@ public void setUpBase() throws Exception { public void tearDownBase() throws Exception { super.tearDownBase(); TableDescriptor table1 = TableDescriptorBuilder.newBuilder(NS1_TABLE) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(famName) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); TableDescriptor table2 = TableDescriptorBuilder.newBuilder(NS2_TABLE) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(famName) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); Admin admin1 = UTIL1.getAdmin(); admin1.disableTable(table1.getTableName()); @@ -271,11 +248,11 @@ public void testBulkLoadReplicationActiveActive() throws Exception { assertTableHasValue(ns2Table, row, value); // case3: The table test will be replicate to cluster1,cluster2,cluster3 - // not replicate to cluster4, because we not set other peer for that tables. + // not replicate to cluster4, because we not set other peer for that tables. row = Bytes.toBytes("001_nopeer"); value = Bytes.toBytes("v1"); - assertBulkLoadConditions(tableName, row, value, UTIL1, peer1TestTable, - peer2TestTable, peer3TestTable); + assertBulkLoadConditions(tableName, row, value, UTIL1, peer1TestTable, peer2TestTable, + peer3TestTable); assertTableNoValue(notPeerTable, row, value); // 1 -> 4, table is empty // Verify hfile-refs for 1:ns_peer1, expect is empty diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNonHBaseReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNonHBaseReplicationEndpoint.java index 7b395ad157c7..013b9af8923a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNonHBaseReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNonHBaseReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -51,7 +50,7 @@ public class TestNonHBaseReplicationEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNonHBaseReplicationEndpoint.class); + HBaseClassTestRule.forClass(TestNonHBaseReplicationEndpoint.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -80,19 +79,18 @@ public void setup() { @Test public void test() throws IOException { - TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .build(); + TableDescriptor td = + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(famName).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build(); Table table = UTIL.createTable(td, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE); ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() - .setReplicationEndpointImpl(NonHBaseReplicationEndpoint.class.getName()) - .setReplicateAllUserTables(false) - .setTableCFsMap(new HashMap>() {{ - put(tableName, new ArrayList<>()); - } - }).build(); + .setReplicationEndpointImpl(NonHBaseReplicationEndpoint.class.getName()) + .setReplicateAllUserTables(false).setTableCFsMap(new HashMap>() { + { + put(tableName, new ArrayList<>()); + } + }).build(); ADMIN.addReplicationPeer("1", peerConfig); loadData(table); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java index f27e8e9067d6..16ebf6cfd684 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,7 +66,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; -@Category({FlakeyTests.class, LargeTests.class}) +@Category({ FlakeyTests.class, LargeTests.class }) public class TestPerTableCFReplication { @ClassRule @@ -112,14 +112,14 @@ public static void setUpBeforeClass() throws Exception { conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); // smaller block size and capacity to trigger more operations // and test them - conf1.setInt("hbase.regionserver.hlog.blocksize", 1024*20); + conf1.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20); conf1.setInt("replication.source.size.capacity", 1024); conf1.setLong("replication.source.sleepforretries", 100); conf1.setInt("hbase.regionserver.maxlogs", 10); conf1.setLong("hbase.master.logcleaner.ttl", 10); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - "org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter"); + "org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter"); utility1 = new HBaseTestingUtil(conf1); utility1.startMiniZKCluster(); @@ -141,36 +141,36 @@ public static void setUpBeforeClass() throws Exception { new ZKWatcher(conf3, "cluster3", null, true); table = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); tabA = TableDescriptorBuilder.newBuilder(tabAName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1Name) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f2Name) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f3Name) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f2Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f3Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .build(); tabB = TableDescriptorBuilder.newBuilder(tabBName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1Name) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f2Name) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f3Name) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f2Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f3Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .build(); tabC = TableDescriptorBuilder.newBuilder(tabCName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1Name) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f2Name) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f3Name) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f2Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f3Name) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .build(); utility1.startMiniCluster(); utility2.startMiniCluster(); @@ -205,28 +205,28 @@ public void testParseTableCFsFromConfig() { // 2. single table: "tableName1" / "tableName2:cf1" / "tableName3:cf1,cf3" tabCFsMap = ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableName1.getNameAsString()); assertEquals(1, tabCFsMap.size()); // only one table - assertTrue(tabCFsMap.containsKey(tableName1)); // its table name is "tableName1" - assertFalse(tabCFsMap.containsKey(tableName2)); // not other table - assertEquals(null, tabCFsMap.get(tableName1)); // null cf-list, + assertTrue(tabCFsMap.containsKey(tableName1)); // its table name is "tableName1" + assertFalse(tabCFsMap.containsKey(tableName2)); // not other table + assertEquals(null, tabCFsMap.get(tableName1)); // null cf-list, tabCFsMap = ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableName2 + ":cf1"); assertEquals(1, tabCFsMap.size()); // only one table - assertTrue(tabCFsMap.containsKey(tableName2)); // its table name is "tableName2" - assertFalse(tabCFsMap.containsKey(tableName1)); // not other table - assertEquals(1, tabCFsMap.get(tableName2).size()); // cf-list contains only 1 cf + assertTrue(tabCFsMap.containsKey(tableName2)); // its table name is "tableName2" + assertFalse(tabCFsMap.containsKey(tableName1)); // not other table + assertEquals(1, tabCFsMap.get(tableName2).size()); // cf-list contains only 1 cf assertEquals("cf1", tabCFsMap.get(tableName2).get(0));// the only cf is "cf1" tabCFsMap = ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableName3 + " : cf1 , cf3"); assertEquals(1, tabCFsMap.size()); // only one table - assertTrue(tabCFsMap.containsKey(tableName3)); // its table name is "tableName2" - assertFalse(tabCFsMap.containsKey(tableName1)); // not other table - assertEquals(2, tabCFsMap.get(tableName3).size()); // cf-list contains 2 cf + assertTrue(tabCFsMap.containsKey(tableName3)); // its table name is "tableName2" + assertFalse(tabCFsMap.containsKey(tableName1)); // not other table + assertEquals(2, tabCFsMap.get(tableName3).size()); // cf-list contains 2 cf assertTrue(tabCFsMap.get(tableName3).contains("cf1"));// contains "cf1" assertTrue(tabCFsMap.get(tableName3).contains("cf3"));// contains "cf3" // 3. multiple tables: "tableName1 ; tableName2:cf1 ; tableName3:cf1,cf3" - tabCFsMap = ReplicationPeerConfigUtil.parseTableCFsFromConfig(tableName1 + " ; " + tableName2 - + ":cf1 ; " + tableName3 + ":cf1,cf3"); + tabCFsMap = ReplicationPeerConfigUtil.parseTableCFsFromConfig( + tableName1 + " ; " + tableName2 + ":cf1 ; " + tableName3 + ":cf1,cf3"); // 3.1 contains 3 tables : "tableName1", "tableName2" and "tableName3" assertEquals(3, tabCFsMap.size()); assertTrue(tabCFsMap.containsKey(tableName1)); @@ -262,7 +262,7 @@ public void testParseTableCFsFromConfig() { assertTrue(tabCFsMap.get(tableName3).contains("cf3")); // 5. invalid format "tableName1:tt:cf1 ; tableName2::cf1 ; tableName3:cf1,cf3" - // "tableName1:tt:cf1" and "tableName2::cf1" are invalid and will be ignored totally + // "tableName1:tt:cf1" and "tableName2::cf1" are invalid and will be ignored totally tabCFsMap = ReplicationPeerConfigUtil.parseTableCFsFromConfig( tableName1 + ":tt:cf1 ; " + tableName2 + "::cf1 ; " + tableName3 + ":cf1,cf3"); // 5.1 no "tableName1" and "tableName2", only "tableName3" @@ -270,11 +270,11 @@ public void testParseTableCFsFromConfig() { assertFalse(tabCFsMap.containsKey(tableName1)); assertFalse(tabCFsMap.containsKey(tableName2)); assertTrue(tabCFsMap.containsKey(tableName3)); - // 5.2 table "tableName3" : cf-list contains "cf1" and "cf3" + // 5.2 table "tableName3" : cf-list contains "cf1" and "cf3" assertEquals(2, tabCFsMap.get(tableName3).size()); assertTrue(tabCFsMap.get(tableName3).contains("cf1")); assertTrue(tabCFsMap.get(tableName3).contains("cf3")); - } + } @Test public void testTableCFsHelperConverter() { @@ -298,8 +298,7 @@ public void testTableCFsHelperConverter() { tabCFsMap.put(tableName1, null); tableCFs = ReplicationPeerConfigUtil.convert(tabCFsMap); assertEquals(1, tableCFs.length); // only one table - assertEquals(tableName1.toString(), - tableCFs[0].getTableName().getQualifier().toStringUtf8()); + assertEquals(tableName1.toString(), tableCFs[0].getTableName().getQualifier().toStringUtf8()); assertEquals(0, tableCFs[0].getFamiliesCount()); tabCFsMap.clear(); @@ -307,8 +306,7 @@ public void testTableCFsHelperConverter() { tabCFsMap.get(tableName2).add("cf1"); tableCFs = ReplicationPeerConfigUtil.convert(tabCFsMap); assertEquals(1, tableCFs.length); // only one table - assertEquals(tableName2.toString(), - tableCFs[0].getTableName().getQualifier().toStringUtf8()); + assertEquals(tableName2.toString(), tableCFs[0].getTableName().getQualifier().toStringUtf8()); assertEquals(1, tableCFs[0].getFamiliesCount()); assertEquals("cf1", tableCFs[0].getFamilies(0).toStringUtf8()); @@ -318,8 +316,7 @@ public void testTableCFsHelperConverter() { tabCFsMap.get(tableName3).add("cf3"); tableCFs = ReplicationPeerConfigUtil.convert(tabCFsMap); assertEquals(1, tableCFs.length); - assertEquals(tableName3.toString(), - tableCFs[0].getTableName().getQualifier().toStringUtf8()); + assertEquals(tableName3.toString(), tableCFs[0].getTableName().getQualifier().toStringUtf8()); assertEquals(2, tableCFs[0].getFamiliesCount()); assertEquals("cf1", tableCFs[0].getFamilies(0).toStringUtf8()); assertEquals("cf3", tableCFs[0].getFamilies(1).toStringUtf8()); @@ -339,15 +336,15 @@ public void testTableCFsHelperConverter() { assertNotNull(ReplicationPeerConfigUtil.getTableCF(tableCFs, tableName3.toString())); assertEquals(0, - ReplicationPeerConfigUtil.getTableCF(tableCFs, tableName1.toString()).getFamiliesCount()); + ReplicationPeerConfigUtil.getTableCF(tableCFs, tableName1.toString()).getFamiliesCount()); - assertEquals(1, ReplicationPeerConfigUtil.getTableCF(tableCFs, tableName2.toString()) - .getFamiliesCount()); + assertEquals(1, + ReplicationPeerConfigUtil.getTableCF(tableCFs, tableName2.toString()).getFamiliesCount()); assertEquals("cf1", ReplicationPeerConfigUtil.getTableCF(tableCFs, tableName2.toString()) .getFamilies(0).toStringUtf8()); - assertEquals(2, ReplicationPeerConfigUtil.getTableCF(tableCFs, tableName3.toString()) - .getFamiliesCount()); + assertEquals(2, + ReplicationPeerConfigUtil.getTableCF(tableCFs, tableName3.toString()).getFamiliesCount()); assertEquals("cf1", ReplicationPeerConfigUtil.getTableCF(tableCFs, tableName3.toString()) .getFamilies(0).toStringUtf8()); assertEquals("cf3", ReplicationPeerConfigUtil.getTableCF(tableCFs, tableName3.toString()) @@ -373,12 +370,12 @@ public void testTableCFsHelperConverter() { public void testPerTableCFReplication() throws Exception { LOG.info("testPerTableCFReplication"); try (Connection connection1 = ConnectionFactory.createConnection(conf1); - Connection connection2 = ConnectionFactory.createConnection(conf2); - Connection connection3 = ConnectionFactory.createConnection(conf3); - Admin admin1 = connection1.getAdmin(); - Admin admin2 = connection2.getAdmin(); - Admin admin3 = connection3.getAdmin(); - Admin replicationAdmin = connection1.getAdmin()) { + Connection connection2 = ConnectionFactory.createConnection(conf2); + Connection connection3 = ConnectionFactory.createConnection(conf3); + Admin admin1 = connection1.getAdmin(); + Admin admin2 = connection2.getAdmin(); + Admin admin3 = connection3.getAdmin(); + Admin replicationAdmin = connection1.getAdmin()) { admin1.createTable(tabA); admin1.createTable(tabB); @@ -408,9 +405,9 @@ public void testPerTableCFReplication() throws Exception { tableCFs.put(tabBName, new ArrayList<>()); tableCFs.get(tabBName).add("f1"); tableCFs.get(tabBName).add("f3"); - ReplicationPeerConfig rpc2 = ReplicationPeerConfig.newBuilder() - .setClusterKey(utility2.getClusterKey()).setReplicateAllUserTables(false) - .setTableCFsMap(tableCFs).build(); + ReplicationPeerConfig rpc2 = + ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()) + .setReplicateAllUserTables(false).setTableCFsMap(tableCFs).build(); replicationAdmin.addReplicationPeer("2", rpc2); tableCFs.clear(); @@ -418,9 +415,9 @@ public void testPerTableCFReplication() throws Exception { tableCFs.put(tabBName, new ArrayList<>()); tableCFs.get(tabBName).add("f1"); tableCFs.get(tabBName).add("f2"); - ReplicationPeerConfig rpc3 = ReplicationPeerConfig.newBuilder() - .setClusterKey(utility3.getClusterKey()).setReplicateAllUserTables(false) - .setTableCFsMap(tableCFs).build(); + ReplicationPeerConfig rpc3 = + ReplicationPeerConfig.newBuilder().setClusterKey(utility3.getClusterKey()) + .setReplicateAllUserTables(false).setTableCFsMap(tableCFs).build(); replicationAdmin.addReplicationPeer("3", rpc3); // A1. tableA can only replicated to cluster3 @@ -440,12 +437,12 @@ public void testPerTableCFReplication() throws Exception { putAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B); deleteAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B); - // cf 'f2' of tableB can only replicated to cluster3 + // cf 'f2' of tableB can only replicated to cluster3 putAndWaitWithFamily(row1, f2Name, htab1B, htab3B); ensureRowNotReplicated(row1, f2Name, htab2B); deleteAndWaitWithFamily(row1, f2Name, htab1B, htab3B); - // cf 'f3' of tableB can only replicated to cluster2 + // cf 'f3' of tableB can only replicated to cluster2 putAndWaitWithFamily(row1, f3Name, htab1B, htab2B); ensureRowNotReplicated(row1, f3Name, htab3B); deleteAndWaitWithFamily(row1, f3Name, htab1B, htab2B); @@ -472,26 +469,26 @@ public void testPerTableCFReplication() throws Exception { tableCFs.get(tabCName).add("f2"); tableCFs.get(tabCName).add("f3"); replicationAdmin.updateReplicationPeerConfig("2", - ReplicationPeerConfig.newBuilder(replicationAdmin.getReplicationPeerConfig("2")) - .setTableCFsMap(tableCFs).build()); + ReplicationPeerConfig.newBuilder(replicationAdmin.getReplicationPeerConfig("2")) + .setTableCFsMap(tableCFs).build()); tableCFs.clear(); tableCFs.put(tabBName, null); tableCFs.put(tabCName, new ArrayList<>()); tableCFs.get(tabCName).add("f3"); replicationAdmin.updateReplicationPeerConfig("3", - ReplicationPeerConfig.newBuilder(replicationAdmin.getReplicationPeerConfig("3")) - .setTableCFsMap(tableCFs).build()); + ReplicationPeerConfig.newBuilder(replicationAdmin.getReplicationPeerConfig("3")) + .setTableCFsMap(tableCFs).build()); // B1. cf 'f1' of tableA can only replicated to cluster2 putAndWaitWithFamily(row2, f1Name, htab1A, htab2A); ensureRowNotReplicated(row2, f1Name, htab3A); deleteAndWaitWithFamily(row2, f1Name, htab1A, htab2A); - // cf 'f2' of tableA can only replicated to cluster2 + // cf 'f2' of tableA can only replicated to cluster2 putAndWaitWithFamily(row2, f2Name, htab1A, htab2A); ensureRowNotReplicated(row2, f2Name, htab3A); deleteAndWaitWithFamily(row2, f2Name, htab1A, htab2A); - // cf 'f3' of tableA isn't replicable to either cluster2 or cluster3 + // cf 'f3' of tableA isn't replicable to either cluster2 or cluster3 putAndWaitWithFamily(row2, f3Name, htab1A); ensureRowNotReplicated(row2, f3Name, htab2A, htab3A); deleteAndWaitWithFamily(row2, f3Name, htab1A); @@ -513,11 +510,11 @@ public void testPerTableCFReplication() throws Exception { putAndWaitWithFamily(row2, f1Name, htab1C); ensureRowNotReplicated(row2, f1Name, htab2C, htab3C); deleteAndWaitWithFamily(row2, f1Name, htab1C); - // cf 'f2' of tableC can only replicated to cluster2 + // cf 'f2' of tableC can only replicated to cluster2 putAndWaitWithFamily(row2, f2Name, htab1C, htab2C); ensureRowNotReplicated(row2, f2Name, htab3C); deleteAndWaitWithFamily(row2, f2Name, htab1C, htab2C); - // cf 'f3' of tableC can replicated to cluster2 and cluster3 + // cf 'f3' of tableC can replicated to cluster2 and cluster3 putAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C); deleteAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C); } @@ -532,9 +529,8 @@ private void ensureRowNotReplicated(byte[] row, byte[] fam, Table... tables) thr } } - private void deleteAndWaitWithFamily(byte[] row, byte[] fam, - Table source, Table... targets) - throws Exception { + private void deleteAndWaitWithFamily(byte[] row, byte[] fam, Table source, Table... targets) + throws Exception { Delete del = new Delete(row); del.addFamily(fam); source.delete(del); @@ -542,7 +538,7 @@ private void deleteAndWaitWithFamily(byte[] row, byte[] fam, Get get = new Get(row); get.addFamily(fam); for (int i = 0; i < NB_RETRIES; i++) { - if (i==NB_RETRIES-1) { + if (i == NB_RETRIES - 1) { fail("Waited too much time for del replication"); } boolean removedFromAll = true; @@ -562,9 +558,8 @@ private void deleteAndWaitWithFamily(byte[] row, byte[] fam, } } - private void putAndWaitWithFamily(byte[] row, byte[] fam, - Table source, Table... targets) - throws Exception { + private void putAndWaitWithFamily(byte[] row, byte[] fam, Table source, Table... targets) + throws Exception { Put put = new Put(row); put.addColumn(fam, row, val); source.put(put); @@ -572,7 +567,7 @@ private void putAndWaitWithFamily(byte[] row, byte[] fam, Get get = new Get(row); get.addFamily(fam); for (int i = 0; i < NB_RETRIES; i++) { - if (i==NB_RETRIES-1) { + if (i == NB_RETRIES - 1) { fail("Waited too much time for put replication"); } boolean replicatedToAll = true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestRemoveFromSerialReplicationPeer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestRemoveFromSerialReplicationPeer.java index eda15d815a84..34d1388e9131 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestRemoveFromSerialReplicationPeer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestRemoveFromSerialReplicationPeer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public class TestRemoveFromSerialReplicationPeer extends SerialReplicationTestBa @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemoveFromSerialReplicationPeer.class); + HBaseClassTestRule.forClass(TestRemoveFromSerialReplicationPeer.class); @Before public void setUp() throws IOException, StreamLacksCapabilityException { @@ -56,7 +56,7 @@ public void setUp() throws IOException, StreamLacksCapabilityException { private void waitUntilHasLastPushedSequenceId(RegionInfo region) throws Exception { ReplicationQueueStorage queueStorage = - UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage(); + UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage(); UTIL.waitFor(30000, new ExplainingPredicate() { @Override @@ -74,11 +74,12 @@ public String explainFailure() throws Exception { @Test public void testRemoveTable() throws Exception { TableName tableName = createTable(); - ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() - .setClusterKey("127.0.0.1:2181:/hbase") - .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()) - .setReplicateAllUserTables(false) - .setTableCFsMap(ImmutableMap.of(tableName, Collections.emptyList())).setSerial(true).build(); + ReplicationPeerConfig peerConfig = + ReplicationPeerConfig.newBuilder().setClusterKey("127.0.0.1:2181:/hbase") + .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()) + .setReplicateAllUserTables(false) + .setTableCFsMap(ImmutableMap.of(tableName, Collections.emptyList())).setSerial(true) + .build(); UTIL.getAdmin().addReplicationPeer(PEER_ID, peerConfig, true); try (Table table = UTIL.getConnection().getTable(tableName)) { for (int i = 0; i < 100; i++) { @@ -92,7 +93,7 @@ public void testRemoveTable() throws Exception { ReplicationPeerConfig.newBuilder(peerConfig).setTableCFsMap(Collections.emptyMap()).build()); ReplicationQueueStorage queueStorage = - UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage(); + UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage(); assertEquals(HConstants.NO_SEQNUM, queueStorage.getLastSequenceId(region.getEncodedName(), PEER_ID)); } @@ -109,11 +110,11 @@ public void testRemoveSerialFlag() throws Exception { RegionInfo region = UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getRegionInfo(); waitUntilHasLastPushedSequenceId(region); UTIL.getAdmin().updateReplicationPeerConfig(PEER_ID, ReplicationPeerConfig - .newBuilder(UTIL.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(false).build()); + .newBuilder(UTIL.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(false).build()); waitUntilReplicationDone(100); ReplicationQueueStorage queueStorage = - UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage(); + UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage(); assertEquals(HConstants.NO_SEQNUM, queueStorage.getLastSequenceId(region.getEncodedName(), PEER_ID)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index e1228990a0bd..1ff3363eb1f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -63,10 +63,9 @@ import org.apache.hbase.thirdparty.com.google.common.io.Closeables; /** - * This class is only a base for other integration-level replication tests. - * Do not add tests here. - * TestReplicationSmallTests is where tests that don't require bring machines up/down should go - * All other tests should have their own classes and extend this one + * This class is only a base for other integration-level replication tests. Do not add tests here. + * TestReplicationSmallTests is where tests that don't require bring machines up/down should go All + * other tests should have their own classes and extend this one */ public class TestReplicationBase { private static final Logger LOG = LoggerFactory.getLogger(TestReplicationBase.class); @@ -87,8 +86,7 @@ public class TestReplicationBase { protected static int NUM_SLAVES1 = 1; protected static int NUM_SLAVES2 = 1; protected static final int NB_ROWS_IN_BATCH = 100; - protected static final int NB_ROWS_IN_BIG_BATCH = - NB_ROWS_IN_BATCH * 10; + protected static final int NB_ROWS_IN_BIG_BATCH = NB_ROWS_IN_BATCH * 10; protected static final long SLEEP_TIME = 500; protected static final int NB_RETRIES = 50; protected static AtomicInteger replicateCount = new AtomicInteger(); @@ -111,8 +109,7 @@ protected boolean isSyncPeer() { protected final void cleanUp() throws IOException, InterruptedException { // Starting and stopping replication can make us miss new logs, // rolling like this makes sure the most recent one gets added to the queue - for (JVMClusterUtil.RegionServerThread r : UTIL1.getHBaseCluster() - .getRegionServerThreads()) { + for (JVMClusterUtil.RegionServerThread r : UTIL1.getHBaseCluster().getRegionServerThreads()) { UTIL1.getAdmin().rollWALWriter(r.getRegionServer().getServerName()); } int rowCount = UTIL1.countRows(tableName); @@ -150,7 +147,7 @@ protected static void waitForReplication(int expectedRows, int retries) } protected static void waitForReplication(Table table, int expectedRows, int retries) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Scan scan; for (int i = 0; i < retries; i++) { scan = new Scan(); @@ -209,8 +206,7 @@ protected static void setupConfig(HBaseTestingUtil util, String znodeParent) { conf.setLong("hbase.serial.replication.waiting.ms", 100); } - static void configureClusters(HBaseTestingUtil util1, - HBaseTestingUtil util2) { + static void configureClusters(HBaseTestingUtil util1, HBaseTestingUtil util2) { setupConfig(util1, "/1"); setupConfig(util2, "/2"); @@ -240,12 +236,11 @@ static void restartTargetHBaseCluster(int numSlaves) throws Exception { htable2 = UTIL2.getConnection().getTable(tableName); } - protected static void createTable(TableName tableName) - throws IOException { + protected static void createTable(TableName tableName) throws IOException { TableDescriptor table = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName).setMaxVersions(100) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName).setMaxVersions(100) + .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build(); UTIL1.createTable(table, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE); UTIL2.createTable(table, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE); UTIL1.waitUntilAllRegionsAssigned(tableName); @@ -288,17 +283,17 @@ private boolean peerExist(String peerId) throws IOException { protected final void addPeer(String peerId, TableName tableName) throws Exception { if (!peerExist(peerId)) { ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder() - .setClusterKey(UTIL2.getClusterKey()).setSerial(isSerialPeer()) - .setReplicationEndpointImpl(ReplicationEndpointTest.class.getName()); + .setClusterKey(UTIL2.getClusterKey()).setSerial(isSerialPeer()) + .setReplicationEndpointImpl(ReplicationEndpointTest.class.getName()); if (isSyncPeer()) { FileSystem fs2 = UTIL2.getTestFileSystem(); // The remote wal dir is not important as we do not use it in DA state, here we only need to // confirm that a sync peer in DA state can still replicate data to remote cluster // asynchronously. builder.setReplicateAllUserTables(false) - .setTableCFsMap(ImmutableMap.of(tableName, ImmutableList.of())) - .setRemoteWALDir(new Path("/RemoteWAL") - .makeQualified(fs2.getUri(), fs2.getWorkingDirectory()).toUri().toString()); + .setTableCFsMap(ImmutableMap.of(tableName, ImmutableList.of())) + .setRemoteWALDir(new Path("/RemoteWAL") + .makeQualified(fs2.getUri(), fs2.getWorkingDirectory()).toUri().toString()); } hbaseAdmin.addReplicationPeer(peerId, builder.build()); } @@ -404,7 +399,8 @@ public ReplicationEndpointTest() { replicateCount.set(0); } - @Override public boolean replicate(ReplicateContext replicateContext) { + @Override + public boolean replicate(ReplicateContext replicateContext) { replicateCount.incrementAndGet(); replicatedEntries.addAll(replicateContext.getEntries()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java index 269af5c1540e..c665e81caa83 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -44,6 +45,7 @@ import org.junit.runners.Parameterized.Parameters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** @@ -60,7 +62,8 @@ public class TestReplicationChangingPeerRegionservers extends TestReplicationBas private static final Logger LOG = LoggerFactory.getLogger(TestReplicationChangingPeerRegionservers.class); - @SuppressWarnings("checkstyle:VisibilityModifier") @Parameter(0) + @SuppressWarnings("checkstyle:VisibilityModifier") + @Parameter(0) public boolean serialPeer; @Parameter(1) @@ -86,8 +89,7 @@ public static List parameters() { public void setUp() throws Exception { // Starting and stopping replication can make us miss new logs, // rolling like this makes sure the most recent one gets added to the queue - for (JVMClusterUtil.RegionServerThread r : UTIL1.getHBaseCluster() - .getRegionServerThreads()) { + for (JVMClusterUtil.RegionServerThread r : UTIL1.getHBaseCluster().getRegionServerThreads()) { UTIL1.getAdmin().rollWALWriter(r.getRegionServer().getServerName()); } UTIL1.deleteTableData(tableName); @@ -125,7 +127,8 @@ public void testChangingNumberOfPeerRegionServers() throws IOException, Interrup // This test wants two RS's up. We only run one generally so add one. peerCluster.startRegionServer(); Waiter.waitFor(peerCluster.getConfiguration(), 30000, new Waiter.Predicate() { - @Override public boolean evaluate() throws Exception { + @Override + public boolean evaluate() throws Exception { return peerCluster.getLiveRegionServerThreads().size() > 1; } }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java index 4ea0bcf60733..471500d61f71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({ReplicationTests.class, LargeTests.class}) +@Category({ ReplicationTests.class, LargeTests.class }) public class TestReplicationDisableInactivePeer extends TestReplicationBase { @ClassRule @@ -44,11 +44,9 @@ public class TestReplicationDisableInactivePeer extends TestReplicationBase { LoggerFactory.getLogger(TestReplicationDisableInactivePeer.class); /** - * Test disabling an inactive peer. Add a peer which is inactive, trying to - * insert, disable the peer, then activate the peer and make sure nothing is - * replicated. In Addition, enable the peer and check the updates are - * replicated. - * + * Test disabling an inactive peer. Add a peer which is inactive, trying to insert, disable the + * peer, then activate the peer and make sure nothing is replicated. In Addition, enable the peer + * and check the updates are replicated. * @throws Exception */ @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java index 872aa0a204dd..8414c12e6f2e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDroppedTables.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import static org.junit.Assert.fail; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -64,8 +63,7 @@ public class TestReplicationDroppedTables extends TestReplicationBase { public void setUpBase() throws Exception { // Starting and stopping replication can make us miss new logs, // rolling like this makes sure the most recent one gets added to the queue - for (JVMClusterUtil.RegionServerThread r : UTIL1.getHBaseCluster() - .getRegionServerThreads()) { + for (JVMClusterUtil.RegionServerThread r : UTIL1.getHBaseCluster().getRegionServerThreads()) { UTIL1.getAdmin().rollWALWriter(r.getRegionServer().getServerName()); } // Initialize the peer after wal rolling, so that we will abandon the stuck WALs. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDeletedTableCFs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDeletedTableCFs.java index b7906ad47a7d..e6d8d4a5723b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDeletedTableCFs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDeletedTableCFs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -123,8 +122,7 @@ public void setup() throws Exception { } // add peer ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(utility2.getClusterKey()) - .setReplicateAllUserTables(true).build(); + .setClusterKey(utility2.getClusterKey()).setReplicateAllUserTables(true).build(); admin1.addReplicationPeer(PEER_ID, rpc); // create table createTable(); @@ -215,8 +213,8 @@ private void verifyReplicationStuck() throws Exception { for (int i = 0; i < NB_RETRIES; i++) { Result result = peerTable.get(new Get(ROW).addColumn(NORMAL_CF, QUALIFIER)); if (result != null && !result.isEmpty()) { - fail("Edit should have been stuck behind dropped tables, but value is " + Bytes - .toString(result.getValue(NORMAL_CF, QUALIFIER))); + fail("Edit should have been stuck behind dropped tables, but value is " + + Bytes.toString(result.getValue(NORMAL_CF, QUALIFIER))); } else { LOG.info("Row not replicated, let's wait a bit more..."); Thread.sleep(SLEEP_TIME); @@ -226,11 +224,9 @@ private void verifyReplicationStuck() throws Exception { } private TableDescriptor createTableDescriptor(byte[]... cfs) { - return TableDescriptorBuilder.newBuilder(TABLE) - .setColumnFamilies(Arrays.stream(cfs).map(cf -> - ColumnFamilyDescriptorBuilder.newBuilder(cf).setScope(REPLICATION_SCOPE_GLOBAL).build()) - .collect(Collectors.toList()) - ).build(); + return TableDescriptorBuilder.newBuilder(TABLE).setColumnFamilies(Arrays.stream(cfs).map( + cf -> ColumnFamilyDescriptorBuilder.newBuilder(cf).setScope(REPLICATION_SCOPE_GLOBAL).build()) + .collect(Collectors.toList())).build(); } private void deleteCf(Admin admin) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDroppedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDroppedTable.java index 98a52be4183b..18117410a2d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDroppedTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEditsDroppedWithDroppedTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -86,7 +86,7 @@ public class TestReplicationEditsDroppedWithDroppedTable { public static void setUpBeforeClass() throws Exception { // Set true to filter replication edits for dropped table conf1.setBoolean(HBaseInterClusterReplicationEndpoint.REPLICATION_DROP_ON_DELETED_TABLE_KEY, - true); + true); conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); conf1.setInt("replication.source.nb.capacity", 1); utility1 = new HBaseTestingUtil(conf1); @@ -125,8 +125,7 @@ public void setup() throws Exception { } // add peer ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(utility2.getClusterKey()) - .setReplicateAllUserTables(true).build(); + .setClusterKey(utility2.getClusterKey()).setReplicateAllUserTables(true).build(); admin1.addReplicationPeer(PEER_ID, rpc); // create table createTable(NORMAL_TABLE); @@ -144,10 +143,9 @@ public void tearDown() throws Exception { } private void createTable(TableName tableName) throws Exception { - TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(FAMILY) - .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build() - ).build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(FAMILY).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build(); admin1.createTable(desc); admin2.createTable(desc); utility1.waitUntilAllRegionsAssigned(tableName); @@ -237,8 +235,8 @@ private void verifyReplicationStuck() throws Exception { for (int i = 0; i < NB_RETRIES; i++) { Result result = normalTable.get(new Get(ROW).addColumn(FAMILY, QUALIFIER)); if (result != null && !result.isEmpty()) { - fail("Edit should have been stuck behind dropped tables, but value is " + Bytes - .toString(result.getValue(FAMILY, QUALIFIER))); + fail("Edit should have been stuck behind dropped tables, but value is " + + Bytes.toString(result.getValue(FAMILY, QUALIFIER))); } else { LOG.info("Row not replicated, let's wait a bit more..."); Thread.sleep(SLEEP_TIME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java index da7f98824cfd..a198ce67324f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -55,7 +57,7 @@ public class TestReplicationEmptyWALRecovery extends TestReplicationBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationEmptyWALRecovery.class); + HBaseClassTestRule.forClass(TestReplicationEmptyWALRecovery.class); @Before public void setUp() throws IOException, InterruptedException { @@ -67,7 +69,6 @@ public void setUp() throws IOException, InterruptedException { /** * Waits until there is only one log(the current writing one) in the replication queue - * * @param numRs number of region servers */ private void waitForLogAdvance(int numRs) { @@ -77,20 +78,20 @@ public boolean evaluate() throws Exception { for (int i = 0; i < numRs; i++) { HRegionServer hrs = UTIL1.getHBaseCluster().getRegionServer(i); RegionInfo regionInfo = - UTIL1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo(); + UTIL1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo(); WAL wal = hrs.getWAL(regionInfo); Path currentFile = ((AbstractFSWAL) wal).getCurrentFileName(); - Replication replicationService = - (Replication) UTIL1.getHBaseCluster().getRegionServer(i).getReplicationSourceService(); + Replication replicationService = (Replication) UTIL1.getHBaseCluster().getRegionServer(i) + .getReplicationSourceService(); for (ReplicationSourceInterface rsi : replicationService.getReplicationManager() - .getSources()) { + .getSources()) { ReplicationSource source = (ReplicationSource) rsi; // We are making sure that there is only one log queue and that is for the // current WAL of region server String logPrefix = source.getQueues().keySet().stream().findFirst().get(); if (!currentFile.equals(source.getCurrentPath()) - || source.getQueues().keySet().size() != 1 - || source.getQueues().get(logPrefix).size() != 1) { + || source.getQueues().keySet().size() != 1 + || source.getQueues().get(logPrefix).size() != 1) { return false; } } @@ -105,10 +106,10 @@ private void verifyNumberOfLogsInQueue(int numQueues, int numRs) { @Override public boolean evaluate() { for (int i = 0; i < numRs; i++) { - Replication replicationService = - (Replication) UTIL1.getHBaseCluster().getRegionServer(i).getReplicationSourceService(); + Replication replicationService = (Replication) UTIL1.getHBaseCluster().getRegionServer(i) + .getReplicationSourceService(); for (ReplicationSourceInterface rsi : replicationService.getReplicationManager() - .getSources()) { + .getSources()) { ReplicationSource source = (ReplicationSource) rsi; String logPrefix = source.getQueues().keySet().stream().findFirst().get(); if (source.getQueues().get(logPrefix).size() != numQueues) { @@ -129,7 +130,7 @@ public void testEmptyWALRecovery() throws Exception { long ts = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < numRs; i++) { RegionInfo regionInfo = - UTIL1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo(); + UTIL1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo(); WAL wal = UTIL1.getHBaseCluster().getRegionServer(i).getWAL(regionInfo); Path currentWalPath = AbstractFSWALProvider.getCurrentFileName(wal); String walGroupId = AbstractFSWALProvider.getWALPrefixFromWALName(currentWalPath.getName()); @@ -168,7 +169,7 @@ public void testReplicationOfEmptyWALFollowingNonEmptyWAL() throws Exception { long ts = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < numRs; i++) { RegionInfo regionInfo = - UTIL1.getHBaseCluster().getRegions(tableName.getName()).get(0).getRegionInfo(); + UTIL1.getHBaseCluster().getRegions(tableName.getName()).get(0).getRegionInfo(); WAL wal = UTIL1.getHBaseCluster().getRegionServer(i).getWAL(regionInfo); Path currentWalPath = AbstractFSWALProvider.getCurrentFileName(wal); @@ -225,7 +226,7 @@ public void testReplicationOfEmptyWALFollowedByNonEmptyWAL() throws Exception { WAL wal = null; for (int i = 0; i < numRs; i++) { RegionInfo regionInfo = - UTIL1.getHBaseCluster().getRegions(tableName.getName()).get(0).getRegionInfo(); + UTIL1.getHBaseCluster().getRegions(tableName.getName()).get(0).getRegionInfo(); wal = UTIL1.getHBaseCluster().getRegionServer(i).getWAL(regionInfo); Path currentWalPath = AbstractFSWALProvider.getCurrentFileName(wal); appendEntriesToWal(numOfEntriesToReplicate, wal); @@ -279,7 +280,7 @@ public void testReplicationOfEmptyWALSurroundedNonEmptyWAL() throws Exception { WAL wal = null; for (int i = 0; i < numRs; i++) { RegionInfo regionInfo = - UTIL1.getHBaseCluster().getRegions(tableName.getName()).get(0).getRegionInfo(); + UTIL1.getHBaseCluster().getRegions(tableName.getName()).get(0).getRegionInfo(); wal = UTIL1.getHBaseCluster().getRegionServer(i).getWAL(regionInfo); Path currentWalPath = AbstractFSWALProvider.getCurrentFileName(wal); appendEntriesToWal(numOfEntriesToReplicate, wal); @@ -329,7 +330,7 @@ private void injectEmptyWAL(int numRs, List emptyWalPaths) throws IOExcept replicationService.getReplicationManager().preLogRoll(emptyWalPaths.get(i)); replicationService.getReplicationManager().postLogRoll(emptyWalPaths.get(i)); RegionInfo regionInfo = - UTIL1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo(); + UTIL1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo(); WAL wal = hrs.getWAL(regionInfo); wal.rollWriter(true); } @@ -342,7 +343,7 @@ protected WALKeyImpl getWalKeyImpl() { // Roll the WAL and wait for it to get deque from the log queue private void rollWalsAndWaitForDeque(int numRs) throws IOException { RegionInfo regionInfo = - UTIL1.getHBaseCluster().getRegions(tableName.getName()).get(0).getRegionInfo(); + UTIL1.getHBaseCluster().getRegions(tableName.getName()).get(0).getRegionInfo(); for (int i = 0; i < numRs; i++) { WAL wal = UTIL1.getHBaseCluster().getRegionServer(i).getWAL(regionInfo); wal.rollWriter(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java index 7d33ce683c04..1627be15911e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -109,12 +109,11 @@ public void setup() throws Exception { ReplicationEndpointForTest.replicateCount.set(0); ReplicationEndpointReturningFalse.replicated.set(false); ReplicationEndpointForTest.lastEntries = null; - final List rsThreads = - UTIL1.getMiniHBaseCluster().getRegionServerThreads(); + final List rsThreads = UTIL1.getMiniHBaseCluster().getRegionServerThreads(); for (RegionServerThread rs : rsThreads) { UTIL1.getAdmin().rollWALWriter(rs.getRegionServer().getServerName()); } - // Wait for all log roll to finish + // Wait for all log roll to finish UTIL1.waitFor(3000, new Waiter.ExplainingPredicate() { @Override public boolean evaluate() throws Exception { @@ -143,10 +142,8 @@ public String explainFailure() throws Exception { public void testCustomReplicationEndpoint() throws Exception { // test installing a custom replication endpoint other than the default one. hbaseAdmin.addReplicationPeer("testCustomReplicationEndpoint", - ReplicationPeerConfig.newBuilder() - .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()) - .build()); + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()).build()); // check whether the class has been constructed and started Waiter.waitFor(CONF1, 60000, new Waiter.Predicate() { @@ -187,12 +184,10 @@ public void testReplicationEndpointReturnsFalseOnReplicate() throws Exception { int peerCount = hbaseAdmin.listReplicationPeers().size(); final String id = "testReplicationEndpointReturnsFalseOnReplicate"; hbaseAdmin.addReplicationPeer(id, - ReplicationPeerConfig.newBuilder() - .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName()) - .build()); + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointReturningFalse.class.getName()).build()); // This test is flakey and then there is so much stuff flying around in here its, hard to - // debug. Peer needs to be up for the edit to make it across. This wait on + // debug. Peer needs to be up for the edit to make it across. This wait on // peer count seems to be a hack that has us not progress till peer is up. if (hbaseAdmin.listReplicationPeers().size() <= peerCount) { LOG.info("Waiting on peercount to go up from " + peerCount); @@ -227,7 +222,7 @@ public void testInterClusterReplication() throws Exception { // Make sure edits are spread across regions because we do region based batching // before shipping edits. - for(HRegion region: regions) { + for (HRegion region : regions) { RegionInfo hri = region.getRegionInfo(); byte[] row = hri.getStartKey(); for (int i = 0; i < 100; i++) { @@ -241,10 +236,9 @@ public void testInterClusterReplication() throws Exception { } hbaseAdmin.addReplicationPeer(id, - ReplicationPeerConfig.newBuilder() - .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF2)) - .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()) - .build()); + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF2)) + .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()) + .build()); final int numEdits = totEdits; Waiter.waitFor(CONF1, 30000, new Waiter.ExplainingPredicate() { @@ -255,8 +249,8 @@ public boolean evaluate() throws Exception { @Override public String explainFailure() throws Exception { - String failure = "Failed to replicate all edits, expected = " + numEdits - + " replicated = " + InterClusterReplicationEndpointForTest.replicateCount.get(); + String failure = "Failed to replicate all edits, expected = " + numEdits + " replicated = " + + InterClusterReplicationEndpointForTest.replicateCount.get(); return failure; } }); @@ -267,14 +261,14 @@ public String explainFailure() throws Exception { @Test public void testWALEntryFilterFromReplicationEndpoint() throws Exception { - ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) - // test that we can create mutliple WALFilters reflectively - .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - EverythingPassesWALEntryFilter.class.getName() + "," + - EverythingPassesWALEntryFilterSubclass.class.getName()) - .build(); + ReplicationPeerConfig rpc = + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + EverythingPassesWALEntryFilter.class.getName() + "," + + EverythingPassesWALEntryFilterSubclass.class.getName()) + .build(); hbaseAdmin.addReplicationPeer("testWALEntryFilterFromReplicationEndpoint", rpc); // now replicate some data. @@ -292,32 +286,32 @@ public boolean evaluate() throws Exception { }); Assert.assertNull(ReplicationEndpointWithWALEntryFilter.ex.get()); - //make sure our reflectively created filter is in the filter chain + // make sure our reflectively created filter is in the filter chain Assert.assertTrue(EverythingPassesWALEntryFilter.hasPassedAnEntry()); hbaseAdmin.removeReplicationPeer("testWALEntryFilterFromReplicationEndpoint"); } @Test(expected = IOException.class) public void testWALEntryFilterAddValidation() throws Exception { - ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) - // test that we can create mutliple WALFilters reflectively - .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - "IAmNotARealWalEntryFilter") - .build(); + ReplicationPeerConfig rpc = + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + "IAmNotARealWalEntryFilter") + .build(); hbaseAdmin.addReplicationPeer("testWALEntryFilterAddValidation", rpc); } @Test(expected = IOException.class) public void testWALEntryFilterUpdateValidation() throws Exception { - ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) - .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) - // test that we can create mutliple WALFilters reflectively - .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, - "IAmNotARealWalEntryFilter") - .build(); + ReplicationPeerConfig rpc = + ReplicationPeerConfig.newBuilder().setClusterKey(ZKConfig.getZooKeeperClusterKey(CONF1)) + .setReplicationEndpointImpl(ReplicationEndpointWithWALEntryFilter.class.getName()) + // test that we can create mutliple WALFilters reflectively + .putConfiguration(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY, + "IAmNotARealWalEntryFilter") + .build(); hbaseAdmin.updateReplicationPeerConfig("testWALEntryFilterUpdateValidation", rpc); } @@ -325,12 +319,11 @@ public void testWALEntryFilterUpdateValidation() throws Exception { public void testMetricsSourceBaseSourcePassThrough() { /* * The replication MetricsSource wraps a MetricsReplicationTableSourceImpl, - * MetricsReplicationSourceSourceImpl and a MetricsReplicationGlobalSourceSource, - * so that metrics get written to both namespaces. Both of those classes wrap a - * MetricsReplicationSourceImpl that implements BaseSource, which allows - * for custom JMX metrics. This test checks to make sure the BaseSource decorator logic on - * MetricsSource actually calls down through the two layers of wrapping to the actual - * BaseSource. + * MetricsReplicationSourceSourceImpl and a MetricsReplicationGlobalSourceSource, so that + * metrics get written to both namespaces. Both of those classes wrap a + * MetricsReplicationSourceImpl that implements BaseSource, which allows for custom JMX metrics. + * This test checks to make sure the BaseSource decorator logic on MetricsSource actually calls + * down through the two layers of wrapping to the actual BaseSource. */ String id = "id"; DynamicMetricsRegistry mockRegistry = mock(DynamicMetricsRegistry.class); @@ -340,17 +333,15 @@ public void testMetricsSourceBaseSourcePassThrough() { when(globalRms.getMetricsRegistry()).thenReturn(mockRegistry); MetricsReplicationSourceSource singleSourceSource = - new MetricsReplicationSourceSourceImpl(singleRms, id); + new MetricsReplicationSourceSourceImpl(singleRms, id); MetricsReplicationGlobalSourceSource globalSourceSource = - new MetricsReplicationGlobalSourceSourceImpl(globalRms); + new MetricsReplicationGlobalSourceSourceImpl(globalRms); MetricsReplicationGlobalSourceSource spyglobalSourceSource = spy(globalSourceSource); doNothing().when(spyglobalSourceSource).incrFailedRecoveryQueue(); - Map singleSourceSourceByTable = - new HashMap<>(); - MetricsSource source = new MetricsSource(id, singleSourceSource, - spyglobalSourceSource, singleSourceSourceByTable); - + Map singleSourceSourceByTable = new HashMap<>(); + MetricsSource source = + new MetricsSource(id, singleSourceSource, spyglobalSourceSource, singleSourceSourceByTable); String gaugeName = "gauge"; String singleGaugeName = "source.id." + gaugeName; @@ -373,7 +364,6 @@ public void testMetricsSourceBaseSourcePassThrough() { source.updateHistogram(counterName, count); source.incrFailedRecoveryQueue(); - verify(singleRms).decGauge(singleGaugeName, delta); verify(globalRms).decGauge(globalGaugeName, delta); verify(globalRms).getMetricsContext(); @@ -392,18 +382,16 @@ public void testMetricsSourceBaseSourcePassThrough() { verify(globalRms).updateHistogram(globalCounterName, count); verify(spyglobalSourceSource).incrFailedRecoveryQueue(); - //check singleSourceSourceByTable metrics. + // check singleSourceSourceByTable metrics. // singleSourceSourceByTable map entry will be created only // after calling #setAgeOfLastShippedOpByTable - boolean containsRandomNewTable = source.getSingleSourceSourceByTable() - .containsKey("RandomNewTable"); + boolean containsRandomNewTable = + source.getSingleSourceSourceByTable().containsKey("RandomNewTable"); Assert.assertEquals(false, containsRandomNewTable); source.updateTableLevelMetrics(createWALEntriesWithSize("RandomNewTable")); - containsRandomNewTable = source.getSingleSourceSourceByTable() - .containsKey("RandomNewTable"); + containsRandomNewTable = source.getSingleSourceSourceByTable().containsKey("RandomNewTable"); Assert.assertEquals(true, containsRandomNewTable); - MetricsReplicationTableSource msr = source.getSingleSourceSourceByTable() - .get("RandomNewTable"); + MetricsReplicationTableSource msr = source.getSingleSourceSourceByTable().get("RandomNewTable"); // age should be greater than zero we created the entry with time in the past Assert.assertTrue(msr.getLastShippedAge() > 0); @@ -421,8 +409,7 @@ private List> createWALEntriesWithSize(String tableName) { private Entry createEntry(String tableName, TreeMap scopes, byte[]... kvs) { WALKeyImpl key1 = new WALKeyImpl(new byte[0], TableName.valueOf(tableName), - EnvironmentEdgeManager.currentTime() - 1L, - scopes); + EnvironmentEdgeManager.currentTime() - 1L, scopes); WALEdit edit1 = new WALEdit(); for (byte[] kv : kvs) { @@ -437,7 +424,7 @@ private void doPut(byte[] row) throws IOException { } } - private void doPut(final Connection connection, final byte [] row) throws IOException { + private void doPut(final Connection connection, final byte[] row) throws IOException { try (Table t = connection.getTable(tableName)) { Put put = new Put(row); put.addColumn(famName, row, row); @@ -513,6 +500,7 @@ public boolean canReplicateToSameCluster() { * Not used by unit tests, helpful for manual testing with replication. *

          * Snippet for `hbase shell`: + * *

              * create 't', 'f'
              * add_peer '1', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.replication.' + \
          @@ -522,6 +510,7 @@ public boolean canReplicateToSameCluster() {
              */
             public static class SleepingReplicationEndpointForTest extends ReplicationEndpointForTest {
               private long duration;
          +
               public SleepingReplicationEndpointForTest() {
                 super();
               }
          @@ -530,8 +519,8 @@ public SleepingReplicationEndpointForTest() {
               public void init(Context context) throws IOException {
                 super.init(context);
                 if (this.ctx != null) {
          -        duration = this.ctx.getConfiguration().getLong(
          -            "hbase.test.sleep.replication.endpoint.duration.millis", 5000L);
          +        duration = this.ctx.getConfiguration()
          +            .getLong("hbase.test.sleep.replication.endpoint.duration.millis", 5000L);
                 }
               }
           
          @@ -584,6 +573,7 @@ public static class ReplicationEndpointReturningFalse extends ReplicationEndpoin
               static int COUNT = 10;
               static AtomicReference ex = new AtomicReference<>(null);
               static AtomicBoolean replicated = new AtomicBoolean(false);
          +
               @Override
               public boolean replicate(ReplicateContext replicateContext) {
                 try {
          @@ -623,10 +613,10 @@ public WALEntryFilter getWALEntryfilter() {
                   public Entry filter(Entry entry) {
                     ArrayList cells = entry.getEdit().getCells();
                     int size = cells.size();
          -          for (int i = size-1; i >= 0; i--) {
          +          for (int i = size - 1; i >= 0; i--) {
                       Cell cell = cells.get(i);
          -            if (!Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
          -              row, 0, row.length)) {
          +            if (!Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, 0,
          +              row.length)) {
                         cells.remove(i);
                       }
                     }
          @@ -638,13 +628,14 @@ public Entry filter(Entry entry) {
           
             public static class EverythingPassesWALEntryFilter implements WALEntryFilter {
               private static boolean passedEntry = false;
          +
               @Override
               public Entry filter(Entry entry) {
                 passedEntry = true;
                 return entry;
               }
           
          -    public static boolean hasPassedAnEntry(){
          +    public static boolean hasPassedAnEntry() {
                 return passedEntry;
               }
             }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java
          index b261b25bd811..740548bb78ac 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRS.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -34,7 +34,7 @@ public class TestReplicationKillMasterRS extends TestReplicationKillRS {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationKillMasterRS.class);
          +      HBaseClassTestRule.forClass(TestReplicationKillMasterRS.class);
           
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java
          index 8cfe8027340c..db582fd9b5d9 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSCompressed.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -26,8 +26,8 @@
           import org.junit.experimental.categories.Category;
           
           /**
          - * Run the same test as TestReplicationKillMasterRS but with WAL compression enabled
          - * Do not add other tests in this class.
          + * Run the same test as TestReplicationKillMasterRS but with WAL compression enabled Do not add
          + * other tests in this class.
            */
           @Category({ ReplicationTests.class, LargeTests.class })
           public class TestReplicationKillMasterRSCompressed extends TestReplicationKillMasterRS {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java
          index d0ceb6b3957f..d44c23caa60e 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -30,7 +30,7 @@ public class TestReplicationKillMasterRSWithSeparateOldWALs extends TestReplicat
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationKillMasterRSWithSeparateOldWALs.class);
          +      HBaseClassTestRule.forClass(TestReplicationKillMasterRSWithSeparateOldWALs.class);
           
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
          index 47903c17ccf4..b898c72e28f5 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java
          index 16663f0e77b2..b17ba5381bc0 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRS.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -34,7 +34,7 @@ public class TestReplicationKillSlaveRS extends TestReplicationKillRS {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationKillSlaveRS.class);
          +      HBaseClassTestRule.forClass(TestReplicationKillSlaveRS.class);
           
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java
          index 195ad89b3e30..3882655bd757 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillSlaveRSWithSeparateOldWALs.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -15,7 +15,6 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.replication;
           
           import org.apache.hadoop.hbase.HBaseClassTestRule;
          @@ -31,7 +30,7 @@ public class TestReplicationKillSlaveRSWithSeparateOldWALs extends TestReplicati
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationKillSlaveRSWithSeparateOldWALs.class);
          +      HBaseClassTestRule.forClass(TestReplicationKillSlaveRSWithSeparateOldWALs.class);
           
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java
          index c646a9011c33..b1277f617bba 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationMetricsforUI.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
          index cd6a39596185..212d1a3ec5ea 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -25,6 +25,7 @@
           import static org.mockito.ArgumentMatchers.anyString;
           import static org.mockito.Mockito.doAnswer;
           import static org.mockito.Mockito.spy;
          +
           import java.io.IOException;
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.hbase.HBaseClassTestRule;
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
          index 625fb59cec22..3a739d6d14ae 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -337,8 +337,8 @@ public void testLoading() throws Exception {
                     }
                     LOG.error("Last row: " + lastRow);
                     fail("Waited too much time for normal batch replication, " + res.length + " instead of "
          -            + NB_ROWS_IN_BIG_BATCH + "; waited="
          -            + (EnvironmentEdgeManager.currentTime() - start) + "ms");
          +              + NB_ROWS_IN_BIG_BATCH + "; waited=" + (EnvironmentEdgeManager.currentTime() - start)
          +              + "ms");
                   } else {
                     LOG.info("Only got " + res.length + " rows... retrying");
                     Thread.sleep(SLEEP_TIME);
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java
          index 9ca0044d31be..110047154007 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTestsSync.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -31,7 +31,7 @@ public class TestReplicationSmallTestsSync extends TestReplicationSmallTests {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationSmallTestsSync.class);
          +      HBaseClassTestRule.forClass(TestReplicationSmallTestsSync.class);
           
             @Override
             protected boolean isSyncPeer() {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java
          index 56264104ff43..62f5868d682f 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java
          @@ -19,6 +19,7 @@
           
           import static org.junit.Assert.assertEquals;
           import static org.junit.Assert.assertTrue;
          +
           import java.io.IOException;
           import java.util.EnumSet;
           import java.util.List;
          @@ -48,7 +49,7 @@ public class TestReplicationStatus extends TestReplicationBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationStatus.class);
          +      HBaseClassTestRule.forClass(TestReplicationStatus.class);
           
             static void insertRowsOnSource() throws IOException {
               final byte[] qualName = Bytes.toBytes("q");
          @@ -72,7 +73,8 @@ public void testReplicationStatus() throws Exception {
               // This test wants two RS's up. We only run one generally so add one.
               UTIL1.getMiniHBaseCluster().startRegionServer();
               Waiter.waitFor(UTIL1.getConfiguration(), 30000, new Waiter.Predicate() {
          -      @Override public boolean evaluate() throws Exception {
          +      @Override
          +      public boolean evaluate() throws Exception {
                   return UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().size() > 1;
                 }
               });
          @@ -86,8 +88,8 @@ public void testReplicationStatus() throws Exception {
               // HACK! To address flakeyness.
               Threads.sleep(10000);
               ClusterMetrics metrics = hbaseAdmin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
          -    for (JVMClusterUtil.RegionServerThread thread : UTIL1.getHBaseCluster().
          -        getRegionServerThreads()) {
          +    for (JVMClusterUtil.RegionServerThread thread : UTIL1.getHBaseCluster()
          +        .getRegionServerThreads()) {
                 ServerName server = thread.getRegionServer().getServerName();
                 assertTrue("" + server, metrics.getLiveServerMetrics().containsKey(server));
                 ServerMetrics sm = metrics.getLiveServerMetrics().get(server);
          @@ -95,8 +97,8 @@ public void testReplicationStatus() throws Exception {
                 ReplicationLoadSink rLoadSink = sm.getReplicationLoadSink();
           
                 // check SourceList only has one entry, because only has one peer
          -      assertEquals("Failed to get ReplicationLoadSourceList " +
          -        rLoadSourceList + ", " + server,1, rLoadSourceList.size());
          +      assertEquals("Failed to get ReplicationLoadSourceList " + rLoadSourceList + ", " + server, 1,
          +        rLoadSourceList.size());
                 assertEquals(PEER_ID2, rLoadSourceList.get(0).getPeerID());
           
                 // check Sink exist only as it is difficult to verify the value on the fly
          @@ -109,7 +111,7 @@ public void testReplicationStatus() throws Exception {
               // Stop rs1, then the queue of rs1 will be transfered to rs0
               HRegionServer hrs = UTIL1.getHBaseCluster().getRegionServer(1);
               hrs.stop("Stop RegionServer");
          -    while(hrs.isAlive()) {
          +    while (hrs.isAlive()) {
                 Threads.sleep(100);
               }
               // To be sure it dead and references cleaned up. TODO: Change this to a barrier.
          @@ -125,18 +127,17 @@ public void testReplicationStatus() throws Exception {
             }
           
             /**
          -   * Wait until Master shows metrics counts for ReplicationLoadSourceList that are
          -   * greater than greaterThan for serverName before
          -   * returning. We want to avoid case where RS hasn't yet updated Master before
          -   * allowing test proceed.
          +   * Wait until Master shows metrics counts for ReplicationLoadSourceList that are greater than
          +   * greaterThan for serverName before returning. We want to avoid case
          +   * where RS hasn't yet updated Master before allowing test proceed.
              * @param greaterThan size of replicationLoadSourceList must be greater before we proceed
              */
             private List waitOnMetricsReport(int greaterThan, ServerName serverName)
                 throws IOException {
               ClusterMetrics metrics = hbaseAdmin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
               List list =
          -      metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
          -    while(list.size() <= greaterThan) {
          +        metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
          +    while (list.size() <= greaterThan) {
                 Threads.sleep(1000);
               }
               return list;
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java
          index 79520d5e4caf..a6ff3bb60eb1 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusAfterLagging.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -40,7 +40,7 @@ public class TestReplicationStatusAfterLagging extends TestReplicationBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationStatusAfterLagging.class);
          +      HBaseClassTestRule.forClass(TestReplicationStatusAfterLagging.class);
           
             @Test
             public void testReplicationStatusAfterLagging() throws Exception {
          @@ -58,7 +58,7 @@ public void testReplicationStatusAfterLagging() throws Exception {
               ServerName serverName = UTIL1.getHBaseCluster().getRegionServer(0).getServerName();
               ClusterMetrics metrics = hbaseAdmin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
               List loadSources =
          -      metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
          +        metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
               assertEquals(1, loadSources.size());
               ReplicationLoadSource loadSource = loadSources.get(0);
               assertTrue(loadSource.hasEditsSinceRestart());
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java
          index 26f836c7c796..a79e5f502ba2 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusBothNormalAndRecoveryLagging.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -40,7 +40,7 @@ public class TestReplicationStatusBothNormalAndRecoveryLagging extends TestRepli
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationStatusBothNormalAndRecoveryLagging.class);
          +      HBaseClassTestRule.forClass(TestReplicationStatusBothNormalAndRecoveryLagging.class);
           
             @Test
             public void testReplicationStatusBothNormalAndRecoveryLagging() throws Exception {
          @@ -65,7 +65,7 @@ public void testReplicationStatusBothNormalAndRecoveryLagging() throws Exception
               Thread.sleep(10000);
               ClusterMetrics metrics = hbaseAdmin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
               List loadSources =
          -      metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
          +        metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
               assertEquals(2, loadSources.size());
               boolean foundRecovery = false;
               boolean foundNormal = false;
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java
          index edc1817cce3e..0f8fd5e8ef30 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSink.java
          @@ -15,9 +15,10 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.replication;
           
          +import java.io.IOException;
          +import java.util.EnumSet;
           import org.apache.hadoop.hbase.ClusterMetrics;
           import org.apache.hadoop.hbase.HBaseClassTestRule;
           import org.apache.hadoop.hbase.ServerMetrics;
          @@ -30,31 +31,29 @@
           import org.junit.ClassRule;
           import org.junit.Test;
           import org.junit.experimental.categories.Category;
          -import java.io.IOException;
          -import java.util.EnumSet;
           
           @Category({ ReplicationTests.class, MediumTests.class })
           public class TestReplicationStatusSink extends TestReplicationBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationStatusSink.class);
          +      HBaseClassTestRule.forClass(TestReplicationStatusSink.class);
           
             @Test
             public void testReplicationStatusSink() throws Exception {
               try (Admin admin = UTIL2.getConnection().getAdmin()) {
                 ServerName server = UTIL2.getHBaseCluster().getRegionServer(0).getServerName();
                 ReplicationLoadSink loadSink = getLatestSinkMetric(admin, server);
          -      //First checks if status of timestamp of last applied op is same as RS start, since no edits
          -      //were replicated yet
          +      // First checks if status of timestamp of last applied op is same as RS start, since no edits
          +      // were replicated yet
                 Assert.assertEquals(loadSink.getTimestampStarted(), loadSink.getTimestampsOfLastAppliedOp());
          -      //now insert some rows on source, so that it gets delivered to target
          +      // now insert some rows on source, so that it gets delivered to target
                 TestReplicationStatus.insertRowsOnSource();
                 long wait =
          -        Waiter.waitFor(UTIL2.getConfiguration(), 10000, (Waiter.Predicate) () -> {
          -          ReplicationLoadSink loadSink1 = getLatestSinkMetric(admin, server);
          -          return loadSink1.getTimestampsOfLastAppliedOp() > loadSink1.getTimestampStarted();
          -        });
          +          Waiter.waitFor(UTIL2.getConfiguration(), 10000, (Waiter.Predicate) () -> {
          +            ReplicationLoadSink loadSink1 = getLatestSinkMetric(admin, server);
          +            return loadSink1.getTimestampsOfLastAppliedOp() > loadSink1.getTimestampStarted();
          +          });
                 Assert.assertNotEquals(-1, wait);
               }
             }
          @@ -62,7 +61,7 @@ public void testReplicationStatusSink() throws Exception {
             private ReplicationLoadSink getLatestSinkMetric(Admin admin, ServerName server)
                 throws IOException {
               ClusterMetrics metrics =
          -      admin.getClusterMetrics(EnumSet.of(ClusterMetrics.Option.LIVE_SERVERS));
          +        admin.getClusterMetrics(EnumSet.of(ClusterMetrics.Option.LIVE_SERVERS));
               ServerMetrics sm = metrics.getLiveServerMetrics().get(server);
               return sm.getReplicationLoadSink();
             }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java
          index 243e24541bed..0ff22f23efe3 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNewOp.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -41,7 +41,7 @@ public class TestReplicationStatusSourceStartedTargetStoppedNewOp extends TestRe
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationStatusSourceStartedTargetStoppedNewOp.class);
          +      HBaseClassTestRule.forClass(TestReplicationStatusSourceStartedTargetStoppedNewOp.class);
           
             @Test
             public void testReplicationStatusSourceStartedTargetStoppedNewOp() throws Exception {
          @@ -58,7 +58,7 @@ public void testReplicationStatusSourceStartedTargetStoppedNewOp() throws Except
               ServerName serverName = UTIL1.getHBaseCluster().getRegionServer(0).getServerName();
               ClusterMetrics metrics = hbaseAdmin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
               List loadSources =
          -      metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
          +        metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
               assertEquals(1, loadSources.size());
               ReplicationLoadSource loadSource = loadSources.get(0);
               assertTrue(loadSource.hasEditsSinceRestart());
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java
          index 24c5051ee71d..9724d6731a77 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedNoOps.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -38,7 +38,7 @@ public class TestReplicationStatusSourceStartedTargetStoppedNoOps extends TestRe
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationStatusSourceStartedTargetStoppedNoOps.class);
          +      HBaseClassTestRule.forClass(TestReplicationStatusSourceStartedTargetStoppedNoOps.class);
           
             @Test
             public void testReplicationStatusSourceStartedTargetStoppedNoOps() throws Exception {
          @@ -49,7 +49,7 @@ public void testReplicationStatusSourceStartedTargetStoppedNoOps() throws Except
               Thread.sleep(10000);
               ClusterMetrics metrics = hbaseAdmin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
               List loadSources =
          -      metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
          +        metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
               assertEquals(1, loadSources.size());
               ReplicationLoadSource loadSource = loadSources.get(0);
               assertFalse(loadSource.hasEditsSinceRestart());
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java
          index 0f3450e06aa1..81e868c4f486 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatusSourceStartedTargetStoppedWithRecovery.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -41,8 +41,8 @@ public class TestReplicationStatusSourceStartedTargetStoppedWithRecovery
               extends TestReplicationBase {
           
             @ClassRule
          -  public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationStatusSourceStartedTargetStoppedWithRecovery.class);
          +  public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule
          +      .forClass(TestReplicationStatusSourceStartedTargetStoppedWithRecovery.class);
           
             @Test
             public void testReplicationStatusSourceStartedTargetStoppedWithRecovery() throws Exception {
          @@ -60,7 +60,7 @@ public void testReplicationStatusSourceStartedTargetStoppedWithRecovery() throws
               Thread.sleep(10000);
               ClusterMetrics metrics = hbaseAdmin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS));
               List loadSources =
          -      metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
          +        metrics.getLiveServerMetrics().get(serverName).getReplicationLoadSourceList();
               assertEquals(2, loadSources.size());
               boolean foundRecovery = false;
               boolean foundNormal = false;
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDeletedTableCFs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDeletedTableCFs.java
          index f57306a1888c..65eaa3591f32 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDeletedTableCFs.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDeletedTableCFs.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -22,7 +22,6 @@
           
           import java.util.Arrays;
           import java.util.stream.Collectors;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.hbase.HBaseClassTestRule;
           import org.apache.hadoop.hbase.HBaseConfiguration;
          @@ -49,8 +48,8 @@
           import org.slf4j.LoggerFactory;
           
           /**
          - * Replication with dropped table will stuck as the default REPLICATION_DROP_ON_DELETED_TABLE_KEY
          - * is false.
          + * Replication with dropped table will stuck as the default REPLICATION_DROP_ON_DELETED_TABLE_KEY is
          + * false.
            */
           @Category({ LargeTests.class })
           public class TestReplicationStuckWithDeletedTableCFs {
          @@ -163,8 +162,8 @@ private void verifyReplicationStuck() throws Exception {
                 for (int i = 0; i < NB_RETRIES; i++) {
                   Result result = normalTable.get(new Get(ROW).addColumn(NORMAL_FAMILY, QUALIFIER));
                   if (result != null && !result.isEmpty()) {
          -          fail("Edit should have been stuck behind dropped tables, but value is " + Bytes
          -              .toString(result.getValue(NORMAL_FAMILY, QUALIFIER)));
          +          fail("Edit should have been stuck behind dropped tables, but value is "
          +              + Bytes.toString(result.getValue(NORMAL_FAMILY, QUALIFIER)));
                   } else {
                     LOG.info("Row not replicated, let's wait a bit more...");
                     Thread.sleep(SLEEP_TIME);
          @@ -174,10 +173,8 @@ private void verifyReplicationStuck() throws Exception {
             }
           
             private TableDescriptor createTableDescriptor(byte[]... cfs) {
          -    return TableDescriptorBuilder.newBuilder(TABLE)
          -        .setColumnFamilies(Arrays.stream(cfs).map(cf ->
          -            ColumnFamilyDescriptorBuilder.newBuilder(cf).setScope(REPLICATION_SCOPE_GLOBAL).build())
          -            .collect(Collectors.toList())
          -        ).build();
          +    return TableDescriptorBuilder.newBuilder(TABLE).setColumnFamilies(Arrays.stream(cfs).map(
          +      cf -> ColumnFamilyDescriptorBuilder.newBuilder(cf).setScope(REPLICATION_SCOPE_GLOBAL).build())
          +        .collect(Collectors.toList())).build();
             }
           }
          \ No newline at end of file
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDroppedTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDroppedTable.java
          index 512bcdd9a1fa..d4581dd38f03 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDroppedTable.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStuckWithDroppedTable.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -46,8 +46,8 @@
           import org.slf4j.LoggerFactory;
           
           /**
          - * Replication with dropped table will stuck as the default REPLICATION_DROP_ON_DELETED_TABLE_KEY
          - * is false.
          + * Replication with dropped table will stuck as the default REPLICATION_DROP_ON_DELETED_TABLE_KEY is
          + * false.
            */
           @Category({ LargeTests.class })
           public class TestReplicationStuckWithDroppedTable {
          @@ -108,8 +108,8 @@ public static void tearDownAfterClass() throws Exception {
           
             private void createTable(TableName tableName) throws Exception {
               TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(
          -        ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setScope(REPLICATION_SCOPE_GLOBAL).build()
          -    ).build();
          +      ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setScope(REPLICATION_SCOPE_GLOBAL).build())
          +        .build();
               admin1.createTable(desc);
               admin2.createTable(desc);
               utility1.waitUntilAllRegionsAssigned(tableName);
          @@ -120,8 +120,7 @@ private void createTable(TableName tableName) throws Exception {
             public void testEditsStuckBehindDroppedTable() throws Exception {
               // add peer
               ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder()
          -        .setClusterKey(utility2.getClusterKey())
          -        .setReplicateAllUserTables(true).build();
          +        .setClusterKey(utility2.getClusterKey()).setReplicateAllUserTables(true).build();
               admin1.addReplicationPeer(PEER_ID, rpc);
           
               // create table
          @@ -164,8 +163,8 @@ private void verifyReplicationStuck() throws Exception {
                 for (int i = 0; i < NB_RETRIES; i++) {
                   Result result = normalTable.get(new Get(ROW).addColumn(FAMILY, QUALIFIER));
                   if (result != null && !result.isEmpty()) {
          -          fail("Edit should have been stuck behind dropped tables, but value is " + Bytes
          -              .toString(result.getValue(FAMILY, QUALIFIER)));
          +          fail("Edit should have been stuck behind dropped tables, but value is "
          +              + Bytes.toString(result.getValue(FAMILY, QUALIFIER)));
                   } else {
                     LOG.info("Row not replicated, let's wait a bit more...");
                     Thread.sleep(SLEEP_TIME);
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java
          index b4b087cf75b0..51ab23c908a6 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -42,7 +42,7 @@ public class TestReplicationSyncUpTool extends TestReplicationSyncUpToolBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationSyncUpTool.class);
          +      HBaseClassTestRule.forClass(TestReplicationSyncUpTool.class);
           
             private static final Logger LOG = LoggerFactory.getLogger(TestReplicationSyncUpTool.class);
           
          @@ -204,8 +204,9 @@ private void mimicSyncUpAfterDelete() throws Exception {
                   LOG.info("SyncUpAfterDelete succeeded at retry = " + i);
                   break;
                 } else {
          -        LOG.debug("SyncUpAfterDelete failed at retry = " + i + ", with rowCount_ht1TargetPeer1 =" +
          -          rowCountHt1TargetAtPeer1 + " and rowCount_ht2TargetAtPeer1 =" + rowCountHt2TargetAtPeer1);
          +        LOG.debug("SyncUpAfterDelete failed at retry = " + i + ", with rowCount_ht1TargetPeer1 ="
          +            + rowCountHt1TargetAtPeer1 + " and rowCount_ht2TargetAtPeer1 ="
          +            + rowCountHt2TargetAtPeer1);
                 }
                 Thread.sleep(SLEEP_TIME);
               }
          @@ -280,8 +281,9 @@ private void mimicSyncUpAfterPut() throws Exception {
                   LOG.info("SyncUpAfterPut succeeded at retry = " + i);
                   break;
                 } else {
          -        LOG.debug("SyncUpAfterPut failed at retry = " + i + ", with rowCount_ht1TargetPeer1 =" +
          -          rowCountHt1TargetAtPeer1 + " and rowCount_ht2TargetAtPeer1 =" + rowCountHt2TargetAtPeer1);
          +        LOG.debug("SyncUpAfterPut failed at retry = " + i + ", with rowCount_ht1TargetPeer1 ="
          +            + rowCountHt1TargetAtPeer1 + " and rowCount_ht2TargetAtPeer1 ="
          +            + rowCountHt2TargetAtPeer1);
                 }
                 Thread.sleep(SLEEP_TIME);
               }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java
          index a23aebb0311c..61d962edeb60 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolBase.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -78,22 +78,22 @@ public void setUp() throws Exception {
               UTIL2.startMiniCluster(4);
           
               t1SyncupSource = TableDescriptorBuilder.newBuilder(TN1)
          -      .setColumnFamily(
          -        ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setScope(REPLICATION_SCOPE_GLOBAL).build())
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NO_REP_FAMILY)).build();
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY)
          +            .setScope(REPLICATION_SCOPE_GLOBAL).build())
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NO_REP_FAMILY)).build();
           
               t1SyncupTarget = TableDescriptorBuilder.newBuilder(TN1)
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY))
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NO_REP_FAMILY)).build();
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY))
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NO_REP_FAMILY)).build();
           
               t2SyncupSource = TableDescriptorBuilder.newBuilder(TN2)
          -      .setColumnFamily(
          -        ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setScope(REPLICATION_SCOPE_GLOBAL).build())
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NO_REP_FAMILY)).build();
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY)
          +            .setScope(REPLICATION_SCOPE_GLOBAL).build())
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NO_REP_FAMILY)).build();
           
               t2SyncupTarget = TableDescriptorBuilder.newBuilder(TN2)
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY))
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NO_REP_FAMILY)).build();
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY))
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(NO_REP_FAMILY)).build();
             }
           
             @After
          @@ -131,7 +131,7 @@ final void setupReplication() throws Exception {
                * set M-S : Master: utility1 Slave1: utility2
                */
               ReplicationPeerConfig rpc =
          -      ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()).build();
          +        ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()).build();
               admin1.addReplicationPeer("1", rpc);
             }
           
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
          index 4e9f1ebc3115..a293d4c4ea4e 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -55,10 +55,10 @@ public class TestReplicationSyncUpToolWithBulkLoadedData extends TestReplication
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithBulkLoadedData.class);
          +      HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithBulkLoadedData.class);
           
             private static final Logger LOG =
          -    LoggerFactory.getLogger(TestReplicationSyncUpToolWithBulkLoadedData.class);
          +      LoggerFactory.getLogger(TestReplicationSyncUpToolWithBulkLoadedData.class);
           
             @Override
             protected void customizeClusterConf(Configuration conf) {
          @@ -90,9 +90,9 @@ public void testSyncUpTool() throws Exception {
               randomHFileRangeListIterator = randomHFileRangeList.iterator();
           
               /**
          -     * at Master: t1_syncup: Load 50 rows into cf1, and 50 rows from other hdfs into cf1, and 3
          -     * rows into norep t2_syncup: Load 100 rows into cf1, and 100 rows from other hdfs into cf1,
          -     * and 3 rows into norep verify correctly replicated to slave
          +     * at Master: t1_syncup: Load 50 rows into cf1, and 50 rows from other hdfs into cf1, and 3 rows
          +     * into norep t2_syncup: Load 100 rows into cf1, and 100 rows from other hdfs into cf1, and 3
          +     * rows into norep verify correctly replicated to slave
                */
               loadAndReplicateHFiles(true, randomHFileRangeListIterator);
           
          @@ -161,9 +161,9 @@ private void mimicSyncUpAfterBulkLoad(Iterator randomHFileRangeListItera
                   LOG.info("SyncUpAfterBulkLoad succeeded at retry = " + i);
                   break;
                 } else {
          -        LOG.debug("SyncUpAfterBulkLoad failed at retry = " + i +
          -          ", with rowCount_ht1TargetPeer1 =" + rowCountHt1TargetAtPeer1 +
          -          " and rowCount_ht2TargetAtPeer1 =" + rowCountHt2TargetAtPeer1);
          +        LOG.debug("SyncUpAfterBulkLoad failed at retry = " + i + ", with rowCount_ht1TargetPeer1 ="
          +            + rowCountHt1TargetAtPeer1 + " and rowCount_ht2TargetAtPeer1 ="
          +            + rowCountHt2TargetAtPeer1);
                 }
                 Thread.sleep(SLEEP_TIME);
               }
          @@ -175,37 +175,37 @@ private void loadAndReplicateHFiles(boolean verifyReplicationOnSlave,
           
               // Load 50 + 50 + 3 hfiles to t1_syncup.
               byte[][][] hfileRanges =
          -      new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()),
          -        Bytes.toBytes(randomHFileRangeListIterator.next()) } };
          +        new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()),
          +            Bytes.toBytes(randomHFileRangeListIterator.next()) } };
               loadAndValidateHFileReplication("HFileReplication_1", row, FAMILY, ht1Source, hfileRanges, 50);
           
               hfileRanges =
                   new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()),
                       Bytes.toBytes(randomHFileRangeListIterator.next()) } };
               loadFromOtherHDFSAndValidateHFileReplication("HFileReplication_1", row, FAMILY, ht1Source,
          -        hfileRanges, 50);
          +      hfileRanges, 50);
           
               hfileRanges =
          -      new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()),
          -        Bytes.toBytes(randomHFileRangeListIterator.next()) } };
          +        new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()),
          +            Bytes.toBytes(randomHFileRangeListIterator.next()) } };
               loadAndValidateHFileReplication("HFileReplication_1", row, NO_REP_FAMILY, ht1Source,
                 hfileRanges, 3);
           
               // Load 100 + 100 + 3 hfiles to t2_syncup.
               hfileRanges =
          -      new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()),
          -        Bytes.toBytes(randomHFileRangeListIterator.next()) } };
          +        new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()),
          +            Bytes.toBytes(randomHFileRangeListIterator.next()) } };
               loadAndValidateHFileReplication("HFileReplication_1", row, FAMILY, ht2Source, hfileRanges, 100);
           
               hfileRanges =
                   new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()),
                       Bytes.toBytes(randomHFileRangeListIterator.next()) } };
               loadFromOtherHDFSAndValidateHFileReplication("HFileReplication_1", row, FAMILY, ht2Source,
          -        hfileRanges, 100);
          +      hfileRanges, 100);
           
               hfileRanges =
          -      new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()),
          -        Bytes.toBytes(randomHFileRangeListIterator.next()) } };
          +        new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()),
          +            Bytes.toBytes(randomHFileRangeListIterator.next()) } };
               loadAndValidateHFileReplication("HFileReplication_1", row, NO_REP_FAMILY, ht2Source,
                 hfileRanges, 3);
           
          @@ -251,7 +251,7 @@ private void loadFromOtherHDFSAndValidateHFileReplication(String testName, byte[
                 byte[] from = range[0];
                 byte[] to = range[1];
                 HFileTestUtil.createHFile(UTIL2.getConfiguration(), fs,
          -          new Path(familyDir, "hfile_" + hfileIdx++), fam, row, from, to, numOfRows);
          +        new Path(familyDir, "hfile_" + hfileIdx++), fam, row, from, to, numOfRows);
               }
           
               final TableName tableName = source.getName();
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
          index a0d5cc961453..8f5aa5f011ee 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -57,10 +57,10 @@ public class TestReplicationWALEntryFilters {
             public static final HBaseClassTestRule CLASS_RULE =
                 HBaseClassTestRule.forClass(TestReplicationWALEntryFilters.class);
           
          -  static byte[] a = new byte[] {'a'};
          -  static byte[] b = new byte[] {'b'};
          -  static byte[] c = new byte[] {'c'};
          -  static byte[] d = new byte[] {'d'};
          +  static byte[] a = new byte[] { 'a' };
          +  static byte[] b = new byte[] { 'b' };
          +  static byte[] c = new byte[] { 'c' };
          +  static byte[] d = new byte[] { 'd' };
           
             @Test
             public void testSystemTableWALEntryFilter() {
          @@ -68,15 +68,15 @@ public void testSystemTableWALEntryFilter() {
           
               // meta
               WALKeyImpl key1 =
          -      new WALKeyImpl(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
          -        TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime());
          +        new WALKeyImpl(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
          +            TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime());
               Entry metaEntry = new Entry(key1, null);
           
               assertNull(filter.filter(metaEntry));
           
               // user table
          -    WALKeyImpl key3 = new WALKeyImpl(new byte[0], TableName.valueOf("foo"),
          -      EnvironmentEdgeManager.currentTime());
          +    WALKeyImpl key3 =
          +        new WALKeyImpl(new byte[0], TableName.valueOf("foo"), EnvironmentEdgeManager.currentTime());
               Entry userEntry = new Entry(key3, null);
           
               assertEquals(userEntry, filter.filter(userEntry));
          @@ -164,8 +164,8 @@ public Entry filter(Entry entry) {
           
               @Override
               public Cell filterCell(Entry entry, Cell cell) {
          -      if (Bytes.toString(
          -          cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()).equals("a")) {
          +      if (Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())
          +          .equals("a")) {
                   return null;
                 } else {
                   return cell;
          @@ -191,15 +191,14 @@ public void testChainWALEntryWithCellFilter() {
               ChainWALEntryFilter filterSomeCells =
                   new ChainWALEntryFilter(new FilterSomeCellsWALCellFilter());
               // since WALCellFilter filter cells with rowkey 'a'
          -    assertEquals(createEntry(null, b,c), filterSomeCells.filter(userEntry));
          +    assertEquals(createEntry(null, b, c), filterSomeCells.filter(userEntry));
           
               Entry userEntry2 = createEntry(null, b, c, d);
               // since there is no cell to get filtered, nothing should get filtered
               assertEquals(userEntry2, filterSomeCells.filter(userEntry2));
           
               // since we filter all the cells, we should get empty entry
          -    ChainWALEntryFilter filterAllCells =
          -        new ChainWALEntryFilter(new FilterAllCellsWALCellFilter());
          +    ChainWALEntryFilter filterAllCells = new ChainWALEntryFilter(new FilterAllCellsWALCellFilter());
               assertEquals(createEntry(null), filterAllCells.filter(userEntry));
             }
           
          @@ -209,7 +208,7 @@ public void testChainWALEmptyEntryWithCellFilter() {
               ChainWALEmptyEntryFilter filterSomeCells =
                   new ChainWALEmptyEntryFilter(new FilterSomeCellsWALCellFilter());
               // since WALCellFilter filter cells with rowkey 'a'
          -    assertEquals(createEntry(null, b,c), filterSomeCells.filter(userEntry));
          +    assertEquals(createEntry(null, b, c), filterSomeCells.filter(userEntry));
           
               Entry userEntry2 = createEntry(null, b, c, d);
               // since there is no cell to get filtered, nothing should get filtered
          @@ -229,13 +228,13 @@ public void testChainWALEntryFilter() {
               Entry userEntry = createEntry(null, a, b, c);
           
               ChainWALEntryFilter filter = new ChainWALEntryFilter(passFilter);
          -    assertEquals(createEntry(null, a,b,c), filter.filter(userEntry));
          +    assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
           
               filter = new ChainWALEntryFilter(passFilter, passFilter);
          -    assertEquals(createEntry(null, a,b,c), filter.filter(userEntry));
          +    assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
           
               filter = new ChainWALEntryFilter(passFilter, passFilter, passFilter);
          -    assertEquals(createEntry(null, a,b,c), filter.filter(userEntry));
          +    assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
           
               filter = new ChainWALEntryFilter(nullFilter);
               assertEquals(null, filter.filter(userEntry));
          @@ -253,22 +252,16 @@ public void testChainWALEntryFilter() {
               assertEquals(null, filter.filter(userEntry));
           
               // flatten
          -    filter =
          -        new ChainWALEntryFilter(
          -          new ChainWALEntryFilter(passFilter,
          -            new ChainWALEntryFilter(passFilter, passFilter),
          -          new ChainWALEntryFilter(passFilter),
          -          new ChainWALEntryFilter(passFilter)),
          -          new ChainWALEntryFilter(passFilter));
          -    assertEquals(createEntry(null, a,b,c), filter.filter(userEntry));
          -
          -
          -    filter =
          -        new ChainWALEntryFilter(
          -          new ChainWALEntryFilter(passFilter,
          -            new ChainWALEntryFilter(passFilter,
          -              new ChainWALEntryFilter(nullFilter))),
          -          new ChainWALEntryFilter(passFilter));
          +    filter = new ChainWALEntryFilter(
          +        new ChainWALEntryFilter(passFilter, new ChainWALEntryFilter(passFilter, passFilter),
          +            new ChainWALEntryFilter(passFilter), new ChainWALEntryFilter(passFilter)),
          +        new ChainWALEntryFilter(passFilter));
          +    assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
          +
          +    filter = new ChainWALEntryFilter(
          +        new ChainWALEntryFilter(passFilter,
          +            new ChainWALEntryFilter(passFilter, new ChainWALEntryFilter(nullFilter))),
          +        new ChainWALEntryFilter(passFilter));
               assertEquals(null, filter.filter(userEntry));
             }
           
          @@ -281,8 +274,7 @@ public void testNamespaceTableCfWALEntryFilter() {
               peerConfigBuilder.setReplicateAllUserTables(false).setNamespaces(null).setTableCFsMap(null);
               when(peer.getPeerConfig()).thenReturn(peerConfigBuilder.build());
               Entry userEntry = createEntry(null, a, b, c);
          -    ChainWALEntryFilter filter =
          -        new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
          +    ChainWALEntryFilter filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
               assertEquals(null, filter.filter(userEntry));
           
               // 2. replicate_all flag is false, and only config table-cfs in peer
          @@ -319,14 +311,14 @@ public void testNamespaceTableCfWALEntryFilter() {
               peerConfigBuilder.setReplicateAllUserTables(false).setTableCFsMap(tableCfs);
               when(peer.getPeerConfig()).thenReturn(peerConfigBuilder.build());
               filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
          -    assertEquals(createEntry(null, a,c), filter.filter(userEntry));
          +    assertEquals(createEntry(null, a, c), filter.filter(userEntry));
           
               // 3. replicate_all flag is false, and only config namespaces in peer
               when(peer.getTableCFs()).thenReturn(null);
               // empty set
               Set namespaces = new HashSet<>();
               peerConfigBuilder.setReplicateAllUserTables(false).setNamespaces(namespaces)
          -      .setTableCFsMap(null);
          +        .setTableCFsMap(null);
               when(peer.getPeerConfig()).thenReturn(peerConfigBuilder.build());
               userEntry = createEntry(null, a, b, c);
               filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
          @@ -338,7 +330,7 @@ public void testNamespaceTableCfWALEntryFilter() {
               when(peer.getPeerConfig()).thenReturn(peerConfigBuilder.build());
               userEntry = createEntry(null, a, b, c);
               filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
          -    assertEquals(createEntry(null, a,b,c), filter.filter(userEntry));
          +    assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
           
               // namespace ns1
               namespaces = new HashSet<>();
          @@ -356,7 +348,7 @@ public void testNamespaceTableCfWALEntryFilter() {
               namespaces.add("ns1");
               tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a", "c"));
               peerConfigBuilder.setReplicateAllUserTables(false).setNamespaces(namespaces)
          -      .setTableCFsMap(tableCfs);
          +        .setTableCFsMap(tableCfs);
               when(peer.getPeerConfig()).thenReturn(peerConfigBuilder.build());
               userEntry = createEntry(null, a, b, c);
               filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
          @@ -367,7 +359,7 @@ public void testNamespaceTableCfWALEntryFilter() {
               namespaces.add("default");
               tableCfs.put(TableName.valueOf("ns1:foo"), Lists.newArrayList("a", "c"));
               peerConfigBuilder.setReplicateAllUserTables(false).setNamespaces(namespaces)
          -      .setTableCFsMap(tableCfs);
          +        .setTableCFsMap(tableCfs);
               when(peer.getPeerConfig()).thenReturn(peerConfigBuilder.build());
               userEntry = createEntry(null, a, b, c);
               filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
          @@ -378,7 +370,7 @@ public void testNamespaceTableCfWALEntryFilter() {
               namespaces.add("ns1");
               tableCfs.put(TableName.valueOf("bar"), null);
               peerConfigBuilder.setReplicateAllUserTables(false).setNamespaces(namespaces)
          -      .setTableCFsMap(tableCfs);
          +        .setTableCFsMap(tableCfs);
               when(peer.getPeerConfig()).thenReturn(peerConfigBuilder.build());
               userEntry = createEntry(null, a, b, c);
               filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
          @@ -392,13 +384,11 @@ public void testNamespaceTableCfWALEntryFilter2() {
           
               // 1. replicate_all flag is true
               // and no exclude namespaces and no exclude table-cfs config
          -    peerConfigBuilder.setReplicateAllUserTables(true)
          -      .setExcludeNamespaces(null)
          -      .setExcludeTableCFsMap(null);
          +    peerConfigBuilder.setReplicateAllUserTables(true).setExcludeNamespaces(null)
          +        .setExcludeTableCFsMap(null);
               when(peer.getPeerConfig()).thenReturn(peerConfigBuilder.build());
               Entry userEntry = createEntry(null, a, b, c);
          -    ChainWALEntryFilter filter =
          -        new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
          +    ChainWALEntryFilter filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
               assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
           
               // 2. replicate_all flag is true, and only config exclude namespaces
          @@ -480,7 +470,7 @@ public void testNamespaceTableCfWALEntryFilter2() {
           
             private Entry createEntry(TreeMap scopes, byte[]... kvs) {
               WALKeyImpl key1 = new WALKeyImpl(new byte[0], TableName.valueOf("foo"),
          -      EnvironmentEdgeManager.currentTime(), scopes);
          +        EnvironmentEdgeManager.currentTime(), scopes);
               WALEdit edit1 = new WALEdit();
           
               for (byte[] kv : kvs) {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
          index efea969df16c..e34288ef7f81 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -70,7 +70,7 @@
           
           import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
           
          -@Category({ReplicationTests.class, MediumTests.class})
          +@Category({ ReplicationTests.class, MediumTests.class })
           public class TestReplicationWithTags {
           
             @ClassRule
          @@ -114,7 +114,7 @@ public static void setUpBeforeClass() throws Exception {
               conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
               conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
               conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
          -        TestCoprocessorForTagsAtSource.class.getName());
          +      TestCoprocessorForTagsAtSource.class.getName());
           
               utility1 = new HBaseTestingUtil(conf1);
               utility1.startMiniZKCluster();
          @@ -132,7 +132,7 @@ public static void setUpBeforeClass() throws Exception {
               conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
               conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
               conf2.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
          -            TestCoprocessorForTagsAtSink.class.getName());
          +      TestCoprocessorForTagsAtSink.class.getName());
           
               utility2 = new HBaseTestingUtil(conf2);
               utility2.setZkCluster(miniZK);
          @@ -143,14 +143,14 @@ public static void setUpBeforeClass() throws Exception {
           
               connection1 = ConnectionFactory.createConnection(conf1);
               replicationAdmin = connection1.getAdmin();
          -    ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder()
          -      .setClusterKey(utility2.getClusterKey()).build();
          +    ReplicationPeerConfig rpc =
          +        ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()).build();
               replicationAdmin.addReplicationPeer("2", rpc);
           
               TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAME)
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(3)
          -        .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build())
          -      .build();
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(3)
          +            .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build())
          +        .build();
               try (Connection conn = ConnectionFactory.createConnection(conf1);
                   Admin admin = conn.getAdmin()) {
                 admin.createTable(tableDescriptor, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE);
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
          index 07e626b3c84c..b4e58713622d 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -54,7 +54,7 @@ public class TestSerialReplication extends SerialReplicationTestBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestSerialReplication.class);
          +      HBaseClassTestRule.forClass(TestSerialReplication.class);
           
             @Before
             public void setUp() throws IOException, StreamLacksCapabilityException {
          @@ -108,7 +108,7 @@ public void testRegionSplit() throws Exception {
               regionsToSeqId.put(region.getEncodedName(), -1L);
               regions.stream().map(RegionInfo::getEncodedName).forEach(n -> regionsToSeqId.put(n, -1L));
               try (WAL.Reader reader =
          -      WALFactory.createReader(UTIL.getTestFileSystem(), logPath, UTIL.getConfiguration())) {
          +        WALFactory.createReader(UTIL.getTestFileSystem(), logPath, UTIL.getConfiguration())) {
                 int count = 0;
                 for (Entry entry;;) {
                   entry = reader.next();
          @@ -119,8 +119,8 @@ public void testRegionSplit() throws Exception {
                   Long seqId = regionsToSeqId.get(encodedName);
                   assertNotNull(
                     "Unexcepted entry " + entry + ", expected regions " + region + ", or " + regions, seqId);
          -        assertTrue("Sequence id go backwards from " + seqId + " to " +
          -          entry.getKey().getSequenceId() + " for " + encodedName,
          +        assertTrue("Sequence id go backwards from " + seqId + " to "
          +            + entry.getKey().getSequenceId() + " for " + encodedName,
                     entry.getKey().getSequenceId() >= seqId.longValue());
                   if (count < 100) {
                     assertEquals(encodedName + " is pushed before parent " + region.getEncodedName(),
          @@ -140,9 +140,9 @@ public void testRegionMerge() throws Exception {
               TableName tableName = TableName.valueOf(name.getMethodName());
               UTIL.getAdmin().createTable(
                 TableDescriptorBuilder.newBuilder(tableName)
          -        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF)
          -          .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build())
          -        .build(),
          +          .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF)
          +              .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build())
          +          .build(),
                 new byte[][] { splitKey });
               UTIL.waitTableAvailable(tableName);
               try (Table table = UTIL.getConnection().getTable(tableName)) {
          @@ -152,9 +152,9 @@ public void testRegionMerge() throws Exception {
               }
               List regions = UTIL.getAdmin().getRegions(tableName);
               UTIL.getAdmin()
          -      .mergeRegionsAsync(
          -        regions.stream().map(RegionInfo::getEncodedNameAsBytes).toArray(byte[][]::new), false)
          -      .get(30, TimeUnit.SECONDS);
          +        .mergeRegionsAsync(
          +          regions.stream().map(RegionInfo::getEncodedNameAsBytes).toArray(byte[][]::new), false)
          +        .get(30, TimeUnit.SECONDS);
               UTIL.waitUntilNoRegionsInTransition(30000);
               List regionsAfterMerge = UTIL.getAdmin().getRegions(tableName);
               assertEquals(1, regionsAfterMerge.size());
          @@ -169,7 +169,7 @@ public void testRegionMerge() throws Exception {
               regionsToSeqId.put(region.getEncodedName(), -1L);
               regions.stream().map(RegionInfo::getEncodedName).forEach(n -> regionsToSeqId.put(n, -1L));
               try (WAL.Reader reader =
          -      WALFactory.createReader(UTIL.getTestFileSystem(), logPath, UTIL.getConfiguration())) {
          +        WALFactory.createReader(UTIL.getTestFileSystem(), logPath, UTIL.getConfiguration())) {
                 int count = 0;
                 for (Entry entry;;) {
                   entry = reader.next();
          @@ -180,13 +180,13 @@ public void testRegionMerge() throws Exception {
                   Long seqId = regionsToSeqId.get(encodedName);
                   assertNotNull(
                     "Unexcepted entry " + entry + ", expected regions " + region + ", or " + regions, seqId);
          -        assertTrue("Sequence id go backwards from " + seqId + " to " +
          -          entry.getKey().getSequenceId() + " for " + encodedName,
          +        assertTrue("Sequence id go backwards from " + seqId + " to "
          +            + entry.getKey().getSequenceId() + " for " + encodedName,
                     entry.getKey().getSequenceId() >= seqId.longValue());
                   if (count < 100) {
                     assertNotEquals(
          -            encodedName + " is pushed before parents " +
          -              regions.stream().map(RegionInfo::getEncodedName).collect(Collectors.joining(" and ")),
          +            encodedName + " is pushed before parents " + regions.stream()
          +                .map(RegionInfo::getEncodedName).collect(Collectors.joining(" and ")),
                       region.getEncodedName(), encodedName);
                   } else {
                     assertEquals(region.getEncodedName(), encodedName);
          @@ -201,9 +201,9 @@ public void testRegionMerge() throws Exception {
             public void testRemovePeerNothingReplicated() throws Exception {
               TableName tableName = createTable();
               String encodedRegionName =
          -      UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getRegionInfo().getEncodedName();
          +        UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getRegionInfo().getEncodedName();
               ReplicationQueueStorage queueStorage =
          -      UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage();
          +        UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage();
               assertEquals(HConstants.NO_SEQNUM, queueStorage.getLastSequenceId(encodedRegionName, PEER_ID));
               UTIL.getAdmin().removeReplicationPeer(PEER_ID);
               assertEquals(HConstants.NO_SEQNUM, queueStorage.getLastSequenceId(encodedRegionName, PEER_ID));
          @@ -220,9 +220,9 @@ public void testRemovePeer() throws Exception {
               enablePeerAndWaitUntilReplicationDone(100);
               checkOrder(100);
               String encodedRegionName =
          -      UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getRegionInfo().getEncodedName();
          +        UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getRegionInfo().getEncodedName();
               ReplicationQueueStorage queueStorage =
          -      UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage();
          +        UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage();
               assertTrue(queueStorage.getLastSequenceId(encodedRegionName, PEER_ID) > 0);
               UTIL.getAdmin().removeReplicationPeer(PEER_ID);
               // confirm that we delete the last pushed sequence id
          @@ -240,9 +240,9 @@ public void testRemoveSerialFlag() throws Exception {
               enablePeerAndWaitUntilReplicationDone(100);
               checkOrder(100);
               String encodedRegionName =
          -      UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getRegionInfo().getEncodedName();
          +        UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getRegionInfo().getEncodedName();
               ReplicationQueueStorage queueStorage =
          -      UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage();
          +        UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage();
               assertTrue(queueStorage.getLastSequenceId(encodedRegionName, PEER_ID) > 0);
               ReplicationPeerConfig peerConfig = UTIL.getAdmin().getReplicationPeerConfig(PEER_ID);
               UTIL.getAdmin().updateReplicationPeerConfig(PEER_ID,
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplicationFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplicationFailover.java
          index 324a69fa8a8d..3e2f9a81d367 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplicationFailover.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplicationFailover.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -40,7 +40,7 @@ public class TestSerialReplicationFailover extends SerialReplicationTestBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestSerialReplicationFailover.class);
          +      HBaseClassTestRule.forClass(TestSerialReplicationFailover.class);
           
             @Before
             public void setUp() throws IOException, StreamLacksCapabilityException {
          @@ -54,7 +54,7 @@ public void testKillRS() throws Exception {
               TableName tableName = TableName.valueOf(name.getMethodName());
               UTIL.getAdmin().createTable(
                 TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder
          -        .newBuilder(CF).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build());
          +          .newBuilder(CF).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build());
               UTIL.waitTableAvailable(tableName);
               try (Table table = UTIL.getConnection().getTable(tableName)) {
                 for (int i = 0; i < 100; i++) {
          @@ -62,7 +62,7 @@ public void testKillRS() throws Exception {
                 }
               }
               RegionServerThread thread = UTIL.getMiniHBaseCluster().getRegionServerThreads().stream()
          -      .filter(t -> !t.getRegionServer().getRegions(tableName).isEmpty()).findFirst().get();
          +        .filter(t -> !t.getRegionServer().getRegions(tableName).isEmpty()).findFirst().get();
               thread.getRegionServer().abort("for testing");
               thread.join();
               try (Table table = UTIL.getConnection().getTable(tableName)) {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java
          index 869d9890d11a..7a36e450e907 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -46,15 +46,15 @@ public class TestSerialSyncReplication extends SyncReplicationTestBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestSerialSyncReplication.class);
          +      HBaseClassTestRule.forClass(TestSerialSyncReplication.class);
           
             @Test
             public void test() throws Exception {
               // change to serial
               UTIL1.getAdmin().updateReplicationPeerConfig(PEER_ID, ReplicationPeerConfig
          -      .newBuilder(UTIL1.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(true).build());
          +        .newBuilder(UTIL1.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(true).build());
               UTIL2.getAdmin().updateReplicationPeerConfig(PEER_ID, ReplicationPeerConfig
          -      .newBuilder(UTIL2.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(true).build());
          +        .newBuilder(UTIL2.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(true).build());
           
               UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
                 SyncReplicationState.STANDBY);
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
          index 51cfa2ee4d01..41d0fc0155fc 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -52,7 +52,7 @@ public class TestSyncReplicationActive extends SyncReplicationTestBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestSyncReplicationActive.class);
          +      HBaseClassTestRule.forClass(TestSyncReplicationActive.class);
           
             @Test
             public void testActive() throws Exception {
          @@ -82,10 +82,10 @@ public void testActive() throws Exception {
               verify(UTIL2, 0, 100);
           
               try (AsyncConnection conn =
          -      ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
          +        ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
                 AsyncTable table = conn.getTableBuilder(TABLE_NAME).setMaxAttempts(1).build();
                 CompletableFuture future =
          -        table.put(new Put(Bytes.toBytes(1000)).addColumn(CF, CQ, Bytes.toBytes(1000)));
          +          table.put(new Put(Bytes.toBytes(1000)).addColumn(CF, CQ, Bytes.toBytes(1000)));
                 Thread.sleep(2000);
                 // should hang on rolling
                 assertFalse(future.isDone());
          @@ -117,14 +117,14 @@ public void testActive() throws Exception {
               write(UTIL2, 200, 300);
             }
           
          -  private void verifyNoClusterIdInRemoteLog(HBaseTestingUtil utility, Path remoteDir,
          -      String peerId) throws Exception {
          +  private void verifyNoClusterIdInRemoteLog(HBaseTestingUtil utility, Path remoteDir, String peerId)
          +      throws Exception {
               FileSystem fs2 = utility.getTestFileSystem();
               FileStatus[] files = fs2.listStatus(new Path(remoteDir, peerId));
               Assert.assertTrue(files.length > 0);
               for (FileStatus file : files) {
          -      try (
          -        Reader reader = WALFactory.createReader(fs2, file.getPath(), utility.getConfiguration())) {
          +      try (Reader reader =
          +          WALFactory.createReader(fs2, file.getPath(), utility.getConfiguration())) {
                   Entry entry = reader.next();
                   Assert.assertTrue(entry != null);
                   while (entry != null) {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalCopyToRemote.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalCopyToRemote.java
          index cf8993b019f2..79e1179826c4 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalCopyToRemote.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalCopyToRemote.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -48,10 +48,10 @@ public class TestSyncReplicationMoreLogsInLocalCopyToRemote extends SyncReplicat
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestSyncReplicationMoreLogsInLocalCopyToRemote.class);
          +      HBaseClassTestRule.forClass(TestSyncReplicationMoreLogsInLocalCopyToRemote.class);
           
             private static final Logger LOG =
          -    LoggerFactory.getLogger(TestSyncReplicationMoreLogsInLocalCopyToRemote.class);
          +      LoggerFactory.getLogger(TestSyncReplicationMoreLogsInLocalCopyToRemote.class);
           
             @BeforeClass
             public static void setUp() throws Exception {
          @@ -71,10 +71,10 @@ public void testSplitLog() throws Exception {
                 SyncReplicationState.ACTIVE);
               HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME);
               DualAsyncFSWALForTest wal =
          -      (DualAsyncFSWALForTest) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
          +        (DualAsyncFSWALForTest) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
               wal.setRemoteBroken();
               try (AsyncConnection conn =
          -      ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
          +        ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
                 AsyncTable table = conn.getTableBuilder(TABLE_NAME).setMaxAttempts(1).build();
                 try {
                   table.put(new Put(Bytes.toBytes(0)).addColumn(CF, CQ, Bytes.toBytes(0))).get();
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java
          index 47f2b2cceb70..f266d48ff450 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationMoreLogsInLocalGiveUpSplitting.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -55,10 +55,10 @@ public class TestSyncReplicationMoreLogsInLocalGiveUpSplitting extends SyncRepli
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestSyncReplicationMoreLogsInLocalGiveUpSplitting.class);
          +      HBaseClassTestRule.forClass(TestSyncReplicationMoreLogsInLocalGiveUpSplitting.class);
           
             private static final Logger LOG =
          -    LoggerFactory.getLogger(TestSyncReplicationMoreLogsInLocalGiveUpSplitting.class);
          +      LoggerFactory.getLogger(TestSyncReplicationMoreLogsInLocalGiveUpSplitting.class);
           
             @BeforeClass
             public static void setUp() throws Exception {
          @@ -82,13 +82,13 @@ public void testSplitLog() throws Exception {
               }
               HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME);
               DualAsyncFSWALForTest wal =
          -      (DualAsyncFSWALForTest) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
          +        (DualAsyncFSWALForTest) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
               wal.setRemoteBroken();
               wal.suspendLogRoll();
               try (AsyncConnection conn =
          -      ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
          +        ConnectionFactory.createAsyncConnection(UTIL1.getConfiguration()).get()) {
                 AsyncTable table = conn.getTableBuilder(TABLE_NAME).setMaxAttempts(1)
          -        .setWriteRpcTimeout(5, TimeUnit.SECONDS).build();
          +          .setWriteRpcTimeout(5, TimeUnit.SECONDS).build();
                 try {
                   table.put(new Put(Bytes.toBytes(1)).addColumn(CF, CQ, Bytes.toBytes(1))).get();
                   fail("Should fail since the rs will hang and we will get a rpc timeout");
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationNewRSJoinBetweenRefreshes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationNewRSJoinBetweenRefreshes.java
          index 86ad8c0c3f0b..9caa6db8e1a3 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationNewRSJoinBetweenRefreshes.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationNewRSJoinBetweenRefreshes.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -46,7 +46,7 @@ public class TestSyncReplicationNewRSJoinBetweenRefreshes extends SyncReplicatio
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestSyncReplicationNewRSJoinBetweenRefreshes.class);
          +      HBaseClassTestRule.forClass(TestSyncReplicationNewRSJoinBetweenRefreshes.class);
           
             private static boolean HALT;
           
          @@ -69,22 +69,22 @@ public void postExecuteProcedures(ObserverContext p instanceof TransitPeerSyncReplicationStateProcedure)
          -          .filter(p -> !p.isFinished()).map(p -> (TransitPeerSyncReplicationStateProcedure) p)
          -          .findFirst().ifPresent(proc -> {
          -            // this is the next state of REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_BEGIN_VALUE
          -            if (proc.getCurrentStateId() == REOPEN_ALL_REGIONS_IN_PEER_VALUE) {
          -              // tell the main thread to start a new region server
          -              ARRIVE.countDown();
          -              try {
          -                // wait for the region server to online
          -                RESUME.await();
          -              } catch (InterruptedException e) {
          -                throw new RuntimeException(e);
          +            .filter(p -> p instanceof TransitPeerSyncReplicationStateProcedure)
          +            .filter(p -> !p.isFinished()).map(p -> (TransitPeerSyncReplicationStateProcedure) p)
          +            .findFirst().ifPresent(proc -> {
          +              // this is the next state of REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_BEGIN_VALUE
          +              if (proc.getCurrentStateId() == REOPEN_ALL_REGIONS_IN_PEER_VALUE) {
          +                // tell the main thread to start a new region server
          +                ARRIVE.countDown();
          +                try {
          +                  // wait for the region server to online
          +                  RESUME.await();
          +                } catch (InterruptedException e) {
          +                  throw new RuntimeException(e);
          +                }
          +                HALT = false;
                         }
          -              HALT = false;
          -            }
          -          });
          +            });
                 }
               }
             }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java
          index 04b5d65318a8..54a067ea9079 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -38,7 +38,7 @@ public class TestSyncReplicationRemoveRemoteWAL extends SyncReplicationTestBase
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestSyncReplicationRemoveRemoteWAL.class);
          +      HBaseClassTestRule.forClass(TestSyncReplicationRemoveRemoteWAL.class);
           
             @Test
             public void testRemoveRemoteWAL() throws Exception {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java
          index 0c1c350b2aae..81787408b7c3 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandBy.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -51,7 +51,7 @@ public class TestSyncReplicationStandBy extends SyncReplicationTestBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestSyncReplicationStandBy.class);
          +      HBaseClassTestRule.forClass(TestSyncReplicationStandBy.class);
           
             @FunctionalInterface
             private interface TableAction {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillMaster.java
          index 6265f5cce7c5..592035147420 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillMaster.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillMaster.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -51,10 +51,10 @@ public void testStandbyKillMaster() throws Exception {
               Path remoteWALDir = getRemoteWALDir(mfs, PEER_ID);
               assertFalse(mfs.getWALFileSystem().exists(remoteWALDir));
               UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
          -        SyncReplicationState.STANDBY);
          +      SyncReplicationState.STANDBY);
               assertTrue(mfs.getWALFileSystem().exists(remoteWALDir));
               UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
          -        SyncReplicationState.ACTIVE);
          +      SyncReplicationState.ACTIVE);
           
               // Disable async replication and write data, then shutdown
               UTIL1.getAdmin().disableReplicationPeer(PEER_ID);
          @@ -74,13 +74,13 @@ public void testStandbyKillMaster() throws Exception {
               // Transit standby to DA to replay logs
               try {
                 UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
          -          SyncReplicationState.DOWNGRADE_ACTIVE);
          +        SyncReplicationState.DOWNGRADE_ACTIVE);
               } catch (Exception e) {
                 LOG.error("Failed to transit standby cluster to " + SyncReplicationState.DOWNGRADE_ACTIVE);
               }
           
          -    while (UTIL2.getAdmin().getReplicationPeerSyncReplicationState(PEER_ID)
          -        != SyncReplicationState.DOWNGRADE_ACTIVE) {
          +    while (UTIL2.getAdmin()
          +        .getReplicationPeerSyncReplicationState(PEER_ID) != SyncReplicationState.DOWNGRADE_ACTIVE) {
                 Thread.sleep(SLEEP_TIME);
               }
               verify(UTIL2, 0, COUNT);
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillRS.java
          index 04360f81f3f4..e9a4de01888b 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillRS.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationStandbyKillRS.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -40,8 +40,7 @@
           @Category({ ReplicationTests.class, LargeTests.class })
           public class TestSyncReplicationStandbyKillRS extends SyncReplicationTestBase {
           
          -  private static final Logger LOG =
          -      LoggerFactory.getLogger(TestSyncReplicationStandbyKillRS.class);
          +  private static final Logger LOG = LoggerFactory.getLogger(TestSyncReplicationStandbyKillRS.class);
           
             private final long SLEEP_TIME = 1000;
           
          @@ -57,10 +56,10 @@ public void testStandbyKillRegionServer() throws Exception {
               Path remoteWALDir = getRemoteWALDir(mfs, PEER_ID);
               assertFalse(mfs.getWALFileSystem().exists(remoteWALDir));
               UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
          -        SyncReplicationState.STANDBY);
          +      SyncReplicationState.STANDBY);
               assertTrue(mfs.getWALFileSystem().exists(remoteWALDir));
               UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
          -        SyncReplicationState.ACTIVE);
          +      SyncReplicationState.ACTIVE);
           
               // Disable async replication and write data, then shutdown
               UTIL1.getAdmin().disableReplicationPeer(PEER_ID);
          @@ -72,10 +71,10 @@ public void testStandbyKillRegionServer() throws Exception {
               Thread t = new Thread(() -> {
                 try {
                   List regionServers =
          -          UTIL2.getMiniHBaseCluster().getLiveRegionServerThreads();
          +            UTIL2.getMiniHBaseCluster().getLiveRegionServerThreads();
                   LOG.debug("Going to stop {} RSes: [{}]", regionServers.size(),
                     regionServers.stream().map(rst -> rst.getRegionServer().getServerName().getServerName())
          -            .collect(Collectors.joining(", ")));
          +              .collect(Collectors.joining(", ")));
                   for (JVMClusterUtil.RegionServerThread rst : regionServers) {
                     ServerName serverName = rst.getRegionServer().getServerName();
                     LOG.debug("Going to RS stop [{}]", serverName);
          @@ -83,7 +82,7 @@ public void testStandbyKillRegionServer() throws Exception {
                     waitForRSShutdownToStartAndFinish(activeMaster, serverName);
                     LOG.debug("Going to start a new RS");
                     JVMClusterUtil.RegionServerThread restarted =
          -            UTIL2.getMiniHBaseCluster().startRegionServer();
          +              UTIL2.getMiniHBaseCluster().startRegionServer();
                     LOG.debug("Waiting RS [{}] to online", restarted.getRegionServer().getServerName());
                     restarted.waitForServerOnline();
                     LOG.debug("Waiting the old RS {} thread to quit", rst.getName());
          @@ -112,7 +111,7 @@ public void testStandbyKillRegionServer() throws Exception {
               t.join();
           
               while (UTIL2.getAdmin()
          -      .getReplicationPeerSyncReplicationState(PEER_ID) != SyncReplicationState.DOWNGRADE_ACTIVE) {
          +        .getReplicationPeerSyncReplicationState(PEER_ID) != SyncReplicationState.DOWNGRADE_ACTIVE) {
                 LOG.debug("Waiting for peer {} to be in {} state", PEER_ID,
                   SyncReplicationState.DOWNGRADE_ACTIVE);
                 Thread.sleep(SLEEP_TIME);
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyCellsReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyCellsReplicationEndpoint.java
          index 2f81d8e62b75..73637774cab0 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyCellsReplicationEndpoint.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyCellsReplicationEndpoint.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -51,10 +51,10 @@ public class TestVerifyCellsReplicationEndpoint {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestVerifyCellsReplicationEndpoint.class);
          +      HBaseClassTestRule.forClass(TestVerifyCellsReplicationEndpoint.class);
           
             private static final Logger LOG =
          -    LoggerFactory.getLogger(TestVerifyCellsReplicationEndpoint.class);
          +      LoggerFactory.getLogger(TestVerifyCellsReplicationEndpoint.class);
           
             private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
           
          @@ -74,7 +74,7 @@ public static final class EndpointForTest extends VerifyWALEntriesReplicationEnd
               public boolean replicate(ReplicateContext replicateContext) {
                 LOG.info(replicateContext.getEntries().toString());
                 replicateContext.entries.stream().map(WAL.Entry::getEdit).map(WALEdit::getCells)
          -        .forEachOrdered(CELLS::addAll);
          +          .forEachOrdered(CELLS::addAll);
                 return super.replicate(replicateContext);
               }
             }
          @@ -87,7 +87,7 @@ public static void setUp() throws Exception {
               UTIL.createTable(TABLE_NAME, CF);
               UTIL.getAdmin().addReplicationPeer(PEER_ID,
                 ReplicationPeerConfig.newBuilder().setClusterKey("zk1:8888:/hbase")
          -        .setReplicationEndpointImpl(EndpointForTest.class.getName()).build());
          +          .setReplicationEndpointImpl(EndpointForTest.class.getName()).build());
             }
           
             @AfterClass
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java
          index 9b803dc4f7a9..fdc0dc1f6a50 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -65,7 +65,7 @@
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
           
          -@Category({MasterTests.class, LargeTests.class})
          +@Category({ MasterTests.class, LargeTests.class })
           public class TestRecoverStandbyProcedure {
           
             @ClassRule
          @@ -155,16 +155,15 @@ public void testRecoverStandby() throws IOException, StreamLacksCapabilityExcept
           
             private void setupSyncReplicationWALs() throws IOException, StreamLacksCapabilityException {
               Path peerRemoteWALDir = ReplicationUtils
          -      .getPeerRemoteWALDir(syncReplicationReplayWALManager.getRemoteWALDir(), PEER_ID);
          +        .getPeerRemoteWALDir(syncReplicationReplayWALManager.getRemoteWALDir(), PEER_ID);
               if (!fs.exists(peerRemoteWALDir)) {
                 fs.mkdirs(peerRemoteWALDir);
               }
               for (int i = 0; i < WAL_NUMBER; i++) {
                 try (ProtobufLogWriter writer = new ProtobufLogWriter()) {
                   Path wal = new Path(peerRemoteWALDir, "srv1,8888." + i + ".syncrep");
          -        writer.init(fs, wal, conf, true,
          -            WALUtil.getWALBlockSize(conf, fs, peerRemoteWALDir),
          -            StreamSlowMonitor.create(conf, "defaultMonitor"));
          +        writer.init(fs, wal, conf, true, WALUtil.getWALBlockSize(conf, fs, peerRemoteWALDir),
          +          StreamSlowMonitor.create(conf, "defaultMonitor"));
                   List entries = setupWALEntries(i * ROW_COUNT, (i + 1) * ROW_COUNT);
                   for (Entry entry : entries) {
                     writer.append(entry);
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java
          index 2aa3ea4b0e51..f5670dbc6806 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java
          index 36c07fd014d1..fe5f87cb3f6f 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java
          index 0f7949283975..bd070eacecd9 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -27,9 +27,9 @@
           import org.junit.ClassRule;
           import org.junit.experimental.categories.Category;
           
          -@Category({ReplicationTests.class, LargeTests.class})
          -public class TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL extends
          -    TestReplicationKillMasterRSCompressed {
          +@Category({ ReplicationTests.class, LargeTests.class })
          +public class TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL
          +    extends TestReplicationKillMasterRSCompressed {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java
          index 21f325c1b16f..fbad17630ece 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -27,9 +27,9 @@
           import org.junit.ClassRule;
           import org.junit.experimental.categories.Category;
           
          -@Category({ReplicationTests.class, LargeTests.class})
          -public class TestReplicationKillMasterRSCompressedWithMultipleWAL extends
          -    TestReplicationKillMasterRSCompressed {
          +@Category({ ReplicationTests.class, LargeTests.class })
          +public class TestReplicationKillMasterRSCompressedWithMultipleWAL
          +    extends TestReplicationKillMasterRSCompressed {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java
          index b2835eee8766..5f71a0ddfe15 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java
          index a5dbaf3f1c5c..231276957830 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -32,7 +32,7 @@ public class TestReplicationSyncUpToolWithMultipleWAL extends TestReplicationSyn
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithMultipleWAL.class);
          +      HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithMultipleWAL.class);
           
             @Override
             protected void customizeClusterConf(Configuration conf) {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java
          index b07b5b42dc97..922dc3cb4abf 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStream.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -88,7 +88,7 @@ public void testAppendsWithRolls() throws Exception {
               appendToLogAndSync();
               long oldPos;
               try (WALEntryStream entryStream =
          -      new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
          +        new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
                 // There's one edit in the log, read it. Reading past it needs to throw exception
                 assertTrue(entryStream.hasNext());
                 WAL.Entry entry = entryStream.peek();
          @@ -103,7 +103,7 @@ public void testAppendsWithRolls() throws Exception {
               appendToLogAndSync();
           
               try (WALEntryStream entryStream = new WALEntryStreamWithRetries(logQueue, CONF, oldPos, log,
          -      null, new MetricsSource("1"), fakeWalGroupId)) {
          +        null, new MetricsSource("1"), fakeWalGroupId)) {
                 // Read the newly added entry, make sure we made progress
                 WAL.Entry entry = entryStream.next();
                 assertNotEquals(oldPos, entryStream.getPosition());
          @@ -117,7 +117,7 @@ null, new MetricsSource("1"), fakeWalGroupId)) {
               appendToLogAndSync();
           
               try (WALEntryStream entryStream = new WALEntryStreamWithRetries(logQueue, CONF, oldPos, log,
          -      null, new MetricsSource("1"), fakeWalGroupId)) {
          +        null, new MetricsSource("1"), fakeWalGroupId)) {
                 WAL.Entry entry = entryStream.next();
                 assertNotEquals(oldPos, entryStream.getPosition());
                 assertNotNull(entry);
          @@ -142,7 +142,7 @@ public void testLogrollWhileStreaming() throws Exception {
               appendToLog("1");
               appendToLog("2");// 2
               try (WALEntryStream entryStream = new WALEntryStreamWithRetries(logQueue, CONF, 0, log, null,
          -      new MetricsSource("1"), fakeWalGroupId)) {
          +        new MetricsSource("1"), fakeWalGroupId)) {
                 assertEquals("1", getRow(entryStream.next()));
           
                 appendToLog("3"); // 3 - comes in after reader opened
          @@ -168,7 +168,7 @@ public void testLogrollWhileStreaming() throws Exception {
             public void testNewEntriesWhileStreaming() throws Exception {
               appendToLog("1");
               try (WALEntryStream entryStream =
          -      new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
          +        new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
                 entryStream.next(); // we've hit the end of the stream at this point
           
                 // some new entries come in while we're streaming
          @@ -191,7 +191,7 @@ public void testResumeStreamingFromPosition() throws Exception {
               long lastPosition = 0;
               appendToLog("1");
               try (WALEntryStream entryStream =
          -      new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
          +        new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
                 entryStream.next(); // we've hit the end of the stream at this point
                 appendToLog("2");
                 appendToLog("3");
          @@ -199,7 +199,7 @@ public void testResumeStreamingFromPosition() throws Exception {
               }
               // next stream should picks up where we left off
               try (WALEntryStream entryStream = new WALEntryStream(logQueue, CONF, lastPosition, log, null,
          -      new MetricsSource("1"), fakeWalGroupId)) {
          +        new MetricsSource("1"), fakeWalGroupId)) {
                 assertEquals("2", getRow(entryStream.next()));
                 assertEquals("3", getRow(entryStream.next()));
                 assertFalse(entryStream.hasNext()); // done
          @@ -218,13 +218,13 @@ public void testPosition() throws Exception {
               appendEntriesToLogAndSync(3);
               // read only one element
               try (WALEntryStream entryStream = new WALEntryStream(logQueue, CONF, lastPosition, log, null,
          -      new MetricsSource("1"), fakeWalGroupId)) {
          +        new MetricsSource("1"), fakeWalGroupId)) {
                 entryStream.next();
                 lastPosition = entryStream.getPosition();
               }
               // there should still be two more entries from where we left off
               try (WALEntryStream entryStream = new WALEntryStream(logQueue, CONF, lastPosition, log, null,
          -      new MetricsSource("1"), fakeWalGroupId)) {
          +        new MetricsSource("1"), fakeWalGroupId)) {
                 assertNotNull(entryStream.next());
                 assertNotNull(entryStream.next());
                 assertFalse(entryStream.hasNext());
          @@ -234,7 +234,7 @@ public void testPosition() throws Exception {
             @Test
             public void testEmptyStream() throws Exception {
               try (WALEntryStream entryStream =
          -      new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
          +        new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
                 assertFalse(entryStream.hasNext());
               }
             }
          @@ -244,9 +244,9 @@ public void testWALKeySerialization() throws Exception {
               Map attributes = new HashMap();
               attributes.put("foo", Bytes.toBytes("foo-value"));
               attributes.put("bar", Bytes.toBytes("bar-value"));
          -    WALKeyImpl key =
          -      new WALKeyImpl(info.getEncodedNameAsBytes(), tableName, EnvironmentEdgeManager.currentTime(),
          -        new ArrayList(), 0L, 0L, mvcc, scopes, attributes);
          +    WALKeyImpl key = new WALKeyImpl(info.getEncodedNameAsBytes(), tableName,
          +        EnvironmentEdgeManager.currentTime(), new ArrayList(), 0L, 0L, mvcc, scopes,
          +        attributes);
               Assert.assertEquals(attributes, key.getExtendedAttributes());
           
               WALProtos.WALKey.Builder builder = key.getBuilder(WALCellCodec.getNoneCompressor());
          @@ -270,7 +270,7 @@ private ReplicationSource mockReplicationSource(boolean recovered, Configuration
               ReplicationSourceManager mockSourceManager = Mockito.mock(ReplicationSourceManager.class);
               when(mockSourceManager.getTotalBufferUsed()).thenReturn(new AtomicLong(0));
               when(mockSourceManager.getTotalBufferLimit())
          -      .thenReturn((long) HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT);
          +        .thenReturn((long) HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT);
               Server mockServer = Mockito.mock(Server.class);
               ReplicationSource source = Mockito.mock(ReplicationSource.class);
               when(source.getSourceManager()).thenReturn(mockSourceManager);
          @@ -279,7 +279,7 @@ private ReplicationSource mockReplicationSource(boolean recovered, Configuration
               when(source.getServer()).thenReturn(mockServer);
               when(source.isRecovered()).thenReturn(recovered);
               MetricsReplicationGlobalSourceSource globalMetrics =
          -      Mockito.mock(MetricsReplicationGlobalSourceSource.class);
          +        Mockito.mock(MetricsReplicationGlobalSourceSource.class);
               when(mockSourceManager.getGlobalMetrics()).thenReturn(globalMetrics);
               return source;
             }
          @@ -288,17 +288,17 @@ private ReplicationSourceWALReader createReader(boolean recovered, Configuration
               ReplicationSource source = mockReplicationSource(recovered, conf);
               when(source.isPeerEnabled()).thenReturn(true);
               ReplicationSourceWALReader reader = new ReplicationSourceWALReader(fs, conf, logQueue, 0,
          -      getDummyFilter(), source, fakeWalGroupId);
          +        getDummyFilter(), source, fakeWalGroupId);
               reader.start();
               return reader;
             }
           
             private ReplicationSourceWALReader createReaderWithBadReplicationFilter(int numFailures,
          -    Configuration conf) {
          +      Configuration conf) {
               ReplicationSource source = mockReplicationSource(false, conf);
               when(source.isPeerEnabled()).thenReturn(true);
               ReplicationSourceWALReader reader = new ReplicationSourceWALReader(fs, conf, logQueue, 0,
          -      getIntermittentFailingFilter(numFailures), source, fakeWalGroupId);
          +        getIntermittentFailingFilter(numFailures), source, fakeWalGroupId);
               reader.start();
               return reader;
             }
          @@ -309,7 +309,7 @@ public void testReplicationSourceWALReader() throws Exception {
               // get ending position
               long position;
               try (WALEntryStream entryStream =
          -      new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
          +        new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
                 entryStream.next();
                 entryStream.next();
                 entryStream.next();
          @@ -340,7 +340,7 @@ public void testReplicationSourceWALReaderWithFailingFilter() throws Exception {
               // get ending position
               long position;
               try (WALEntryStream entryStream =
          -      new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
          +        new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
                 entryStream.next();
                 entryStream.next();
                 entryStream.next();
          @@ -351,7 +351,7 @@ public void testReplicationSourceWALReaderWithFailingFilter() throws Exception {
               Path walPath = getQueue().peek();
               int numFailuresInFilter = 5;
               ReplicationSourceWALReader reader =
          -      createReaderWithBadReplicationFilter(numFailuresInFilter, CONF);
          +        createReaderWithBadReplicationFilter(numFailuresInFilter, CONF);
               WALEntryBatch entryBatch = reader.take();
               assertEquals(numFailuresInFilter, FailingWALEntryFilter.numFailures());
           
          @@ -406,8 +406,8 @@ public void testReplicationSourceWALReaderWrongPosition() throws Exception {
           
                 @Override
                 public boolean evaluate() throws Exception {
          -        return fs.getFileStatus(walPath).getLen() > 0 &&
          -          ((AbstractFSWAL) log).getInflightWALCloseCount() == 0;
          +        return fs.getFileStatus(walPath).getLen() > 0
          +            && ((AbstractFSWAL) log).getInflightWALCloseCount() == 0;
                 }
           
                 @Override
          @@ -423,8 +423,9 @@ public String explainFailure() throws Exception {
               assertEquals(walPath, entryBatch.getLastWalPath());
           
               long walLength = fs.getFileStatus(walPath).getLen();
          -    assertTrue("Position " + entryBatch.getLastWalPosition() + " is out of range, file length is " +
          -      walLength, entryBatch.getLastWalPosition() <= walLength);
          +    assertTrue("Position " + entryBatch.getLastWalPosition() + " is out of range, file length is "
          +        + walLength,
          +      entryBatch.getLastWalPosition() <= walLength);
               assertEquals(1, entryBatch.getNbEntries());
               assertTrue(entryBatch.isEndOfFile());
           
          @@ -450,12 +451,12 @@ public String explainFailure() throws Exception {
           
             @Test
             public void testReplicationSourceWALReaderDisabled()
          -    throws IOException, InterruptedException, ExecutionException {
          +      throws IOException, InterruptedException, ExecutionException {
               appendEntriesToLogAndSync(3);
               // get ending position
               long position;
               try (WALEntryStream entryStream =
          -      new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
          +        new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
                 entryStream.next();
                 entryStream.next();
                 entryStream.next();
          @@ -473,7 +474,7 @@ public void testReplicationSourceWALReaderDisabled()
               });
           
               ReplicationSourceWALReader reader = new ReplicationSourceWALReader(fs, CONF, logQueue, 0,
          -      getDummyFilter(), source, fakeWalGroupId);
          +        getDummyFilter(), source, fakeWalGroupId);
               reader.start();
               Future future = ForkJoinPool.commonPool().submit(() -> {
                 return reader.take();
          @@ -501,7 +502,8 @@ private String getRow(WAL.Entry entry) {
           
             private void appendToLog(String key) throws IOException {
               final long txid = log.appendData(info, new WALKeyImpl(info.getEncodedNameAsBytes(), tableName,
          -      EnvironmentEdgeManager.currentTime(), mvcc, scopes), getWALEdit(key));
          +        EnvironmentEdgeManager.currentTime(), mvcc, scopes),
          +      getWALEdit(key));
               log.sync(txid);
             }
           
          @@ -516,7 +518,7 @@ private void appendEntriesToLogAndSync(int count) throws IOException {
             private WALEdit getWALEdit(String row) {
               WALEdit edit = new WALEdit();
               edit.add(new KeyValue(Bytes.toBytes(row), family, qualifier,
          -      EnvironmentEdgeManager.currentTime(), qualifier));
          +        EnvironmentEdgeManager.currentTime(), qualifier));
               return edit;
             }
           
          @@ -563,7 +565,7 @@ public void testReadBeyondCommittedLength() throws IOException, InterruptedExcep
               long size = log.getLogFileSizeIfBeingWritten(getQueue().peek()).getAsLong();
               AtomicLong fileLength = new AtomicLong(size - 1);
               try (WALEntryStream entryStream = new WALEntryStream(logQueue, CONF, 0,
          -      p -> OptionalLong.of(fileLength.get()), null, new MetricsSource("1"), fakeWalGroupId)) {
          +        p -> OptionalLong.of(fileLength.get()), null, new MetricsSource("1"), fakeWalGroupId)) {
                 assertTrue(entryStream.hasNext());
                 assertNotNull(entryStream.next());
                 // can not get log 2
          @@ -610,7 +612,7 @@ public void testEOFExceptionForRecoveredQueue() throws Exception {
               ReplicationSourceLogQueue localLogQueue = new ReplicationSourceLogQueue(conf, metrics, source);
               localLogQueue.enqueueLog(emptyLog, fakeWalGroupId);
               ReplicationSourceWALReader reader = new ReplicationSourceWALReader(fs, conf, localLogQueue, 0,
          -      getDummyFilter(), source, fakeWalGroupId);
          +        getDummyFilter(), source, fakeWalGroupId);
               reader.run();
               // ReplicationSourceWALReaderThread#handleEofException method will
               // remove empty log from logQueue.
          @@ -638,7 +640,7 @@ public void testEOFExceptionForRecoveredQueueWithMultipleLogs() throws Exception
               ReplicationSourceManager mockSourceManager = mock(ReplicationSourceManager.class);
               // Make it look like the source is from recovered source.
               when(mockSourceManager.getOldSources())
          -      .thenReturn(new ArrayList<>(Arrays.asList((ReplicationSourceInterface) source)));
          +        .thenReturn(new ArrayList<>(Arrays.asList((ReplicationSourceInterface) source)));
               when(source.isPeerEnabled()).thenReturn(true);
               when(mockSourceManager.getTotalBufferUsed()).thenReturn(new AtomicLong(0));
               // Override the max retries multiplier to fail fast.
          @@ -647,7 +649,7 @@ public void testEOFExceptionForRecoveredQueueWithMultipleLogs() throws Exception
               conf.setInt("replication.source.nb.batches", 10);
               // Create a reader thread.
               ReplicationSourceWALReader reader = new ReplicationSourceWALReader(fs, conf, localLogQueue, 0,
          -      getDummyFilter(), source, fakeWalGroupId);
          +        getDummyFilter(), source, fakeWalGroupId);
               assertEquals("Initial log queue size is not correct", 2,
                 localLogQueue.getQueueSize(fakeWalGroupId));
               reader.run();
          @@ -690,7 +692,7 @@ public void testSizeOfLogQueue() throws Exception {
               assertEquals(2, logQueue.getMetrics().getSizeOfLogQueue());
           
               try (WALEntryStream entryStream =
          -      new WALEntryStream(logQueue, CONF, 0, log, null, logQueue.getMetrics(), fakeWalGroupId)) {
          +        new WALEntryStream(logQueue, CONF, 0, log, null, logQueue.getMetrics(), fakeWalGroupId)) {
                 // There's one edit in the log, read it.
                 assertTrue(entryStream.hasNext());
                 WAL.Entry entry = entryStream.next();
          @@ -708,7 +710,7 @@ public void testSizeOfLogQueue() throws Exception {
             @Test
             public void testCleanClosedWALs() throws Exception {
               try (WALEntryStream entryStream = new WALEntryStreamWithRetries(logQueue, CONF, 0, log, null,
          -      logQueue.getMetrics(), fakeWalGroupId)) {
          +        logQueue.getMetrics(), fakeWalGroupId)) {
                 assertEquals(0, logQueue.getMetrics().getUncleanlyClosedWALs());
                 appendToLogAndSync();
                 assertNotNull(entryStream.next());
          @@ -726,7 +728,7 @@ public void testCleanClosedWALs() throws Exception {
             @Test
             public void testEOFExceptionInOldWALsDirectory() throws Exception {
               assertEquals(1, logQueue.getQueueSize(fakeWalGroupId));
          -    AbstractFSWAL  abstractWAL = (AbstractFSWAL)log;
          +    AbstractFSWAL abstractWAL = (AbstractFSWAL) log;
               Path emptyLogFile = abstractWAL.getCurrentFileName();
               log.rollWriter(true);
           
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStreamAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStreamAsyncFSWAL.java
          index 6ad0d152820a..d55a2e2513cf 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStreamAsyncFSWAL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStreamAsyncFSWAL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -35,7 +35,7 @@ public class TestBasicWALEntryStreamAsyncFSWAL extends TestBasicWALEntryStream {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestBasicWALEntryStreamAsyncFSWAL.class);
          +      HBaseClassTestRule.forClass(TestBasicWALEntryStreamAsyncFSWAL.class);
           
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStreamFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStreamFSHLog.java
          index 75e85b550deb..b8d5a165bcb2 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStreamFSHLog.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestBasicWALEntryStreamFSHLog.java
          @@ -7,7 +7,7 @@
            * "License"); you may not use this file except in compliance
            * with the License.  You may obtain a copy of the License at
            *
          - * http://www.apache.org/licenses/LICENSE-2.0
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
            * Unless required by applicable law or agreed to in writing, software
            * distributed under the License is distributed on an "AS IS" BASIS,
          @@ -35,7 +35,7 @@ public class TestBasicWALEntryStreamFSHLog extends TestBasicWALEntryStream {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestBasicWALEntryStreamFSHLog.class);
          +      HBaseClassTestRule.forClass(TestBasicWALEntryStreamFSHLog.class);
           
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDrainReplicationQueuesForStandBy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDrainReplicationQueuesForStandBy.java
          index 5da7870dc385..0ee9fa77886e 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDrainReplicationQueuesForStandBy.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDrainReplicationQueuesForStandBy.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -43,7 +43,7 @@ public class TestDrainReplicationQueuesForStandBy extends SyncReplicationTestBas
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestDrainReplicationQueuesForStandBy.class);
          +      HBaseClassTestRule.forClass(TestDrainReplicationQueuesForStandBy.class);
           
             @Test
             public void test() throws Exception {
          @@ -57,7 +57,7 @@ public void test() throws Exception {
               HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME);
               String walGroupId = AbstractFSWALProvider.getWALPrefixFromWALName(
                 ((AbstractFSWAL) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build()))
          -        .getCurrentFileName().getName());
          +          .getCurrentFileName().getName());
               UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
                 SyncReplicationState.DOWNGRADE_ACTIVE);
               // transit cluster2 to DA and cluster 1 to S
          @@ -97,7 +97,7 @@ public String explainFailure() throws Exception {
           
               // confirm that we will not replicate the old data which causes inconsistency
               ReplicationSource source = (ReplicationSource) ((Replication) rs.getReplicationSourceService())
          -      .getReplicationManager().getSource(PEER_ID);
          +        .getReplicationManager().getSource(PEER_ID);
               UTIL1.waitFor(30000, new ExplainingPredicate() {
           
                 @Override
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
          index b216af1e2a87..4b2f8f0601c9 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -26,7 +26,6 @@
           import java.util.HashSet;
           import java.util.List;
           import java.util.Set;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.hbase.HBaseClassTestRule;
           import org.apache.hadoop.hbase.HBaseConfiguration;
          @@ -43,7 +42,7 @@
           /**
            * Tests for DumpReplicationQueues tool
            */
          -@Category({ ReplicationTests.class, SmallTests.class})
          +@Category({ ReplicationTests.class, SmallTests.class })
           public class TestDumpReplicationQueues {
           
             @ClassRule
          @@ -66,18 +65,17 @@ public void testDumpReplicationReturnsWalSorted() throws Exception {
               String server = "rs1,60030," + EnvironmentEdgeManager.currentTime();
               nodes.add(server);
               when(recoverableZooKeeperMock.getChildren("/hbase/rs", null)).thenReturn(nodes);
          -    when(recoverableZooKeeperMock.getChildren("/hbase/replication/rs", null)).
          -        thenReturn(nodes);
          +    when(recoverableZooKeeperMock.getChildren("/hbase/replication/rs", null)).thenReturn(nodes);
               List queuesIds = new ArrayList<>();
               queuesIds.add("1");
          -    when(recoverableZooKeeperMock.getChildren("/hbase/replication/rs/"+server, null)).
          -        thenReturn(queuesIds);
          +    when(recoverableZooKeeperMock.getChildren("/hbase/replication/rs/" + server, null))
          +        .thenReturn(queuesIds);
               List wals = new ArrayList<>();
               wals.add("rs1%2C60964%2C1549394085556.1549394101427");
               wals.add("rs1%2C60964%2C1549394085556.1549394101426");
               wals.add("rs1%2C60964%2C1549394085556.1549394101428");
          -    when(recoverableZooKeeperMock.getChildren("/hbase/replication/rs/"+server+"/1",
          -        null)).thenReturn(wals);
          +    when(recoverableZooKeeperMock.getChildren("/hbase/replication/rs/" + server + "/1", null))
          +        .thenReturn(wals);
               DumpReplicationQueues dumpQueues = new DumpReplicationQueues();
               Set peerIds = new HashSet<>();
               peerIds.add("1");
          @@ -85,15 +83,15 @@ public void testDumpReplicationReturnsWalSorted() throws Exception {
               String dump = dumpQueues.dumpQueues(zkWatcherMock, peerIds, false);
               String[] parsedDump = dump.split("Replication position for");
               assertEquals("Parsed dump should have 4 parts.", 4, parsedDump.length);
          -    assertTrue("First wal should be rs1%2C60964%2C1549394085556.1549394101426, but got: "
          -        + parsedDump[1],
          -        parsedDump[1].indexOf("rs1%2C60964%2C1549394085556.1549394101426")>=0);
          -    assertTrue("Second wal should be rs1%2C60964%2C1549394085556.1549394101427, but got: "
          -            + parsedDump[2],
          -        parsedDump[2].indexOf("rs1%2C60964%2C1549394085556.1549394101427")>=0);
          -    assertTrue("Third wal should be rs1%2C60964%2C1549394085556.1549394101428, but got: "
          -            + parsedDump[3],
          -        parsedDump[3].indexOf("rs1%2C60964%2C1549394085556.1549394101428")>=0);
          +    assertTrue(
          +      "First wal should be rs1%2C60964%2C1549394085556.1549394101426, but got: " + parsedDump[1],
          +      parsedDump[1].indexOf("rs1%2C60964%2C1549394085556.1549394101426") >= 0);
          +    assertTrue(
          +      "Second wal should be rs1%2C60964%2C1549394085556.1549394101427, but got: " + parsedDump[2],
          +      parsedDump[2].indexOf("rs1%2C60964%2C1549394085556.1549394101427") >= 0);
          +    assertTrue(
          +      "Third wal should be rs1%2C60964%2C1549394085556.1549394101428, but got: " + parsedDump[3],
          +      parsedDump[3].indexOf("rs1%2C60964%2C1549394085556.1549394101428") >= 0);
             }
           
           }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java
          index f6a31bba4d77..0cb4ec39539a 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -101,14 +101,14 @@ public static void setUpBeforeClass() throws Exception {
               utility2.setZkCluster(miniZK);
               new ZKWatcher(conf2, "cluster2", null, true);
           
          -    ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder()
          -      .setClusterKey(utility2.getClusterKey()).build();
          +    ReplicationPeerConfig rpc =
          +        ReplicationPeerConfig.newBuilder().setClusterKey(utility2.getClusterKey()).build();
           
               utility1.startMiniCluster();
               utility2.startMiniCluster();
           
               try (Connection connection = ConnectionFactory.createConnection(utility1.getConfiguration());
          -      Admin admin1 = connection.getAdmin()) {
          +        Admin admin1 = connection.getAdmin()) {
                 admin1.addReplicationPeer("peer1", rpc);
                 admin1.addReplicationPeer("peer2", rpc);
                 admin1.addReplicationPeer("peer3", rpc);
          @@ -122,21 +122,21 @@ public static void tearDownAfterClass() throws Exception {
               utility1.shutdownMiniCluster();
             }
           
          -
             volatile private boolean testQuotaPass = false;
             volatile private boolean testQuotaNonZero = false;
          +
             @Test
             public void testQuota() throws IOException {
               final TableName tableName = TableName.valueOf(name.getMethodName());
               TableDescriptor tableDescriptor =
          -      TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder
          -        .newBuilder(famName).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build();
          +        TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder
          +            .newBuilder(famName).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build();
               utility1.getAdmin().createTable(tableDescriptor);
               utility2.getAdmin().createTable(tableDescriptor);
           
          -    Thread watcher = new Thread(()->{
          -      Replication replication = (Replication)utility1.getMiniHBaseCluster()
          -          .getRegionServer(0).getReplicationSourceService();
          +    Thread watcher = new Thread(() -> {
          +      Replication replication = (Replication) utility1.getMiniHBaseCluster().getRegionServer(0)
          +          .getReplicationSourceService();
                 AtomicLong bufferUsed = replication.getReplicationManager().getTotalBufferUsed();
                 testQuotaPass = true;
                 while (!Thread.interrupted()) {
          @@ -144,9 +144,12 @@ public void testQuota() throws IOException {
                   if (size > 0) {
                     testQuotaNonZero = true;
                   }
          -        //the reason here doing "numOfPeer + 1" is because by using method addEntryToBatch(), even the
          -        // batch size (after added last entry) exceeds quota, it still keeps the last one in the batch
          -        // so total used buffer size can be one "replication.total.buffer.quota" larger than expected
          +        // the reason here doing "numOfPeer + 1" is because by using method addEntryToBatch(), even
          +        // the
          +        // batch size (after added last entry) exceeds quota, it still keeps the last one in the
          +        // batch
          +        // so total used buffer size can be one "replication.total.buffer.quota" larger than
          +        // expected
                   if (size > REPLICATION_SOURCE_QUOTA * (numOfPeer + 1)) {
                     // We read logs first then check throttler, so if the buffer quota limiter doesn't
                     // take effect, it will push many logs and exceed the quota.
          @@ -157,7 +160,7 @@ public void testQuota() throws IOException {
               });
               watcher.start();
           
          -    try(Table t1 = utility1.getConnection().getTable(tableName);
          +    try (Table t1 = utility1.getConnection().getTable(tableName);
                   Table t2 = utility2.getConnection().getTable(tableName)) {
                 for (int i = 0; i < 50; i++) {
                   Put put = new Put(ROWS[i]);
          @@ -188,5 +191,4 @@ public void testQuota() throws IOException {
               Assert.assertTrue(testQuotaNonZero);
             }
           
          -
           }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java
          index c55aee3c1c2a..d8dcf6799122 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -24,7 +24,6 @@
           
           import java.util.ArrayList;
           import java.util.List;
          -
           import org.apache.hadoop.hbase.Cell;
           import org.apache.hadoop.hbase.CellUtil;
           import org.apache.hadoop.hbase.HBaseClassTestRule;
          @@ -99,15 +98,15 @@ public void testFilterNotExistColumnFamilyEdits() {
               List> entryList = new ArrayList<>();
               // should be filtered
               Cell c1 = new KeyValue(ROW, NON_EXISTING_FAMILY, QUALIFIER,
          -      EnvironmentEdgeManager.currentTime(), Type.Put, VALUE);
          -    Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE1,
          -      EnvironmentEdgeManager.currentTime()), new WALEdit().add(c1));
          +        EnvironmentEdgeManager.currentTime(), Type.Put, VALUE);
          +    Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE1, EnvironmentEdgeManager.currentTime()),
          +        new WALEdit().add(c1));
               entryList.add(Lists.newArrayList(e1));
               // should be kept
          -    Cell c2 = new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(),
          -      Type.Put, VALUE);
          -    Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1,
          -      EnvironmentEdgeManager.currentTime()), new WALEdit().add(c2));
          +    Cell c2 =
          +        new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(), Type.Put, VALUE);
          +    Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1, EnvironmentEdgeManager.currentTime()),
          +        new WALEdit().add(c2));
               entryList.add(Lists.newArrayList(e2, e1));
               List> filtered = endpoint.filterNotExistColumnFamilyEdits(entryList);
               assertEquals(1, filtered.size());
          @@ -120,16 +119,16 @@ public void testFilterNotExistColumnFamilyEdits() {
             public void testFilterNotExistTableEdits() {
               List> entryList = new ArrayList<>();
               // should be filtered
          -    Cell c1 = new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(),
          -      Type.Put, VALUE);
          -    Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE2,
          -      EnvironmentEdgeManager.currentTime()), new WALEdit().add(c1));
          +    Cell c1 =
          +        new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(), Type.Put, VALUE);
          +    Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE2, EnvironmentEdgeManager.currentTime()),
          +        new WALEdit().add(c1));
               entryList.add(Lists.newArrayList(e1));
               // should be kept
          -    Cell c2 = new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(),
          -      Type.Put, VALUE);
          -    Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1,
          -      EnvironmentEdgeManager.currentTime()), new WALEdit().add(c2));
          +    Cell c2 =
          +        new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(), Type.Put, VALUE);
          +    Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1, EnvironmentEdgeManager.currentTime()),
          +        new WALEdit().add(c2));
               entryList.add(Lists.newArrayList(e2));
               List> filtered = endpoint.filterNotExistTableEdits(entryList);
               assertEquals(1, filtered.size());
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java
          index 2fcfc29a1636..a9b4ca025eb7 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java
          @@ -73,7 +73,7 @@ public class TestMetaRegionReplicaReplication {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestMetaRegionReplicaReplication.class);
          +      HBaseClassTestRule.forClass(TestMetaRegionReplicaReplication.class);
             private static final Logger LOG = LoggerFactory.getLogger(TestMetaRegionReplicaReplication.class);
             private static final int NB_SERVERS = 4;
             private final HBaseTestingUtil HTU = new HBaseTestingUtil();
          @@ -100,7 +100,7 @@ public void before() throws Exception {
               HBaseTestingUtil.setReplicas(HTU.getAdmin(), TableName.META_TABLE_NAME, numOfMetaReplica);
           
               HTU.waitFor(30000, () -> HTU.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME)
          -      .size() >= numOfMetaReplica);
          +        .size() >= numOfMetaReplica);
             }
           
             @After
          @@ -132,7 +132,7 @@ public void testHBaseMetaReplicates() throws Exception {
             @Test
             public void testCatalogReplicaReplicationWithFlushAndCompaction() throws Exception {
               try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
          -      Table table = connection.getTable(TableName.META_TABLE_NAME)) {
          +        Table table = connection.getTable(TableName.META_TABLE_NAME)) {
                 // load the data to the table
                 for (int i = 0; i < 5; i++) {
                   LOG.info("Writing data from " + i * 1000 + " to " + (i * 1000 + 1000));
          @@ -180,7 +180,7 @@ public void testCatalogReplicaReplicationWithReplicaMoved() throws Exception {
                 }
               }
               try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
          -      Table table = connection.getTable(TableName.META_TABLE_NAME)) {
          +        Table table = connection.getTable(TableName.META_TABLE_NAME)) {
                 // load the data to the table
                 for (int i = 0; i < 5; i++) {
                   LOG.info("Writing data from " + i * 1000 + " to " + (i * 1000 + 1000));
          @@ -196,12 +196,12 @@ public void testCatalogReplicaReplicationWithReplicaMoved() throws Exception {
             }
           
             protected void verifyReplication(TableName tableName, int regionReplication, final int startRow,
          -    final int endRow, final byte[] family) throws Exception {
          +      final int endRow, final byte[] family) throws Exception {
               verifyReplication(tableName, regionReplication, startRow, endRow, family, true);
             }
           
             private void verifyReplication(TableName tableName, int regionReplication, final int startRow,
          -    final int endRow, final byte[] family, final boolean present) throws Exception {
          +      final int endRow, final byte[] family, final boolean present) throws Exception {
               // find the regions
               final Region[] regions = new Region[regionReplication];
           
          @@ -276,7 +276,7 @@ private Region[] getAllRegions(TableName tableName, int replication) {
              * they get the delete of the table rows too).
              */
             private void verifyDeletedReplication(TableName tableName, int regionReplication,
          -    final TableName deletedTableName) {
          +      final TableName deletedTableName) {
               final Region[] regions = getAllRegions(tableName, regionReplication);
           
               // Start count at '1' so we skip default, primary replica and only look at secondaries.
          @@ -322,7 +322,7 @@ private boolean doesNotContain(List cells, TableName tableName) {
              * Verify Replicas have results (exactly).
              */
             private void verifyReplication(TableName tableName, int regionReplication,
          -    List contains) {
          +      List contains) {
               final Region[] regions = getAllRegions(tableName, regionReplication);
           
               // Start count at '1' so we skip default, primary replica and only look at secondaries.
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRaceWhenCreatingReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRaceWhenCreatingReplicationSource.java
          index b484b9db530a..898b90c6e0ad 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRaceWhenCreatingReplicationSource.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRaceWhenCreatingReplicationSource.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -60,7 +60,7 @@ public class TestRaceWhenCreatingReplicationSource {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestRaceWhenCreatingReplicationSource.class);
          +      HBaseClassTestRule.forClass(TestRaceWhenCreatingReplicationSource.class);
           
             private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
           
          @@ -146,7 +146,7 @@ public static void setUpBeforeClass() throws Exception {
               WRITER = WALFactory.createWALWriter(FS, LOG_PATH, UTIL.getConfiguration());
               UTIL.getAdmin().addReplicationPeer(PEER_ID,
                 ReplicationPeerConfig.newBuilder().setClusterKey("127.0.0.1:2181:/hbase")
          -        .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()).build(),
          +          .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()).build(),
                 true);
             }
           
          @@ -163,8 +163,8 @@ public void testRace() throws Exception {
                 public boolean evaluate() throws Exception {
                   for (RegionServerThread t : UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
                     ReplicationSource source =
          -            (ReplicationSource) ((Replication) t.getRegionServer().getReplicationSourceService())
          -              .getReplicationManager().getSource(PEER_ID);
          +              (ReplicationSource) ((Replication) t.getRegionServer().getReplicationSourceService())
          +                  .getReplicationManager().getSource(PEER_ID);
                     if (source == null || source.getReplicationEndpoint() == null) {
                       return false;
                     }
          @@ -179,7 +179,7 @@ public String explainFailure() throws Exception {
               });
               UTIL.getAdmin().createTable(
                 TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder
          -        .newBuilder(CF).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build());
          +          .newBuilder(CF).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build());
               UTIL.waitTableAvailable(TABLE_NAME);
               try (Table table = UTIL.getConnection().getTable(TABLE_NAME)) {
                 table.put(new Put(Bytes.toBytes(1)).addColumn(CF, CQ, Bytes.toBytes(1)));
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java
          index 80d416c05058..eeb6540f26d9 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshPeerWhileRegionServerRestarts.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -49,7 +49,7 @@ public class TestRefreshPeerWhileRegionServerRestarts extends TestReplicationBas
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestRefreshPeerWhileRegionServerRestarts.class);
          +      HBaseClassTestRule.forClass(TestRefreshPeerWhileRegionServerRestarts.class);
           
             private static CountDownLatch ARRIVE;
           
          @@ -63,7 +63,7 @@ public RegionServerForTest(Configuration conf) throws IOException {
           
               @Override
               protected void tryRegionServerReport(long reportStartTime, long reportEndTime)
          -      throws IOException {
          +        throws IOException {
                 if (ARRIVE != null) {
                   ARRIVE.countDown();
                   ARRIVE = null;
          @@ -86,7 +86,7 @@ public void testRestart() throws Exception {
               // restart a new region server, and wait until it finish initialization and want to call
               // regionServerReport, so it will load the peer state to peer cache.
               Future regionServerFuture = ForkJoinPool.commonPool()
          -      .submit(() -> UTIL1.getMiniHBaseCluster().startRegionServer().getRegionServer());
          +        .submit(() -> UTIL1.getMiniHBaseCluster().startRegionServer().getRegionServer());
               ARRIVE.await();
               // change the peer state, wait until it reach the last state, where we have already get the
               // region server list for refreshing
          @@ -95,8 +95,8 @@ public void testRestart() throws Exception {
                 UTIL1.waitFor(30000, () -> {
                   for (Procedure proc : UTIL1.getMiniHBaseCluster().getMaster().getProcedures()) {
                     if (proc instanceof DisablePeerProcedure) {
          -            return ((DisablePeerProcedure) proc).getCurrentStateId() ==
          -              MasterProcedureProtos.PeerModificationState.POST_PEER_MODIFICATION_VALUE;
          +            return ((DisablePeerProcedure) proc)
          +                .getCurrentStateId() == MasterProcedureProtos.PeerModificationState.POST_PEER_MODIFICATION_VALUE;
                     }
                   }
                   return false;
          @@ -109,7 +109,7 @@ public void testRestart() throws Exception {
               future.get();
               // assert that the peer cache on the new region server has also been refreshed
               ReplicationPeer peer = regionServerFuture.get().getReplicationSourceService()
          -      .getReplicationPeers().getPeer(PEER_ID2);
          +        .getReplicationPeers().getPeer(PEER_ID2);
               assertEquals(PeerState.DISABLED, peer.getPeerState());
             }
           }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java
          index cf4f7106f060..ff835bcf452c 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRefreshRecoveredReplication.java
          @@ -17,12 +17,13 @@
            */
           package org.apache.hadoop.hbase.replication.regionserver;
           
          +import static org.junit.Assert.assertEquals;
          +
           import java.io.IOException;
           import java.util.Collection;
           import java.util.List;
           import java.util.Optional;
           import java.util.stream.Collectors;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.hbase.HBaseClassTestRule;
           import org.apache.hadoop.hbase.HConstants;
          @@ -55,7 +56,6 @@
           import org.slf4j.LoggerFactory;
           
           import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
          -import static org.junit.Assert.assertEquals;
           
           /**
            * Testcase for HBASE-24871.
          @@ -98,10 +98,9 @@ public void setup() throws Exception {
               setUpBase();
           
               tablename = TableName.valueOf(name.getMethodName());
          -    TableDescriptor table = TableDescriptorBuilder.newBuilder(tablename)
          -        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName)
          -            .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build())
          -        .build();
          +    TableDescriptor table =
          +        TableDescriptorBuilder.newBuilder(tablename).setColumnFamily(ColumnFamilyDescriptorBuilder
          +            .newBuilder(famName).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build();
           
               UTIL1.getAdmin().createTable(table);
               UTIL2.getAdmin().createTable(table);
          @@ -134,16 +133,17 @@ public void testReplicationRefreshSource() throws Exception {
                   .filter(rst -> CollectionUtils.isNotEmpty(rst.getRegionServer().getRegions(tablename)))
                   .findAny();
               Assert.assertTrue(server.isPresent());
          -    HRegionServer otherServer = rss.get(0).getRegionServer() == server.get().getRegionServer()?
          -      rss.get(1).getRegionServer(): rss.get(0).getRegionServer();
          +    HRegionServer otherServer = rss.get(0).getRegionServer() == server.get().getRegionServer()
          +        ? rss.get(1).getRegionServer()
          +        : rss.get(0).getRegionServer();
               server.get().getRegionServer().abort("stopping for test");
               // waiting for recovered peer to appear.
          -    Replication replication = (Replication)otherServer.getReplicationSourceService();
          +    Replication replication = (Replication) otherServer.getReplicationSourceService();
               UTIL1.waitFor(60000, () -> !replication.getReplicationManager().getOldSources().isEmpty());
               // Wait on only one server being up.
               UTIL1.waitFor(60000, () ->
          -      // Have to go back to source here because getLiveRegionServerThreads makes new array each time
          -      UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().size() == NUM_SLAVES1 - 1);
          +    // Have to go back to source here because getLiveRegionServerThreads makes new array each time
          +    UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().size() == NUM_SLAVES1 - 1);
               UTIL1.waitTableAvailable(tablename);
               LOG.info("Available {}", tablename);
           
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplication.java
          index ac279ed6e5ea..53e464c660d8 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplication.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplication.java
          @@ -56,15 +56,14 @@
            * Tests region replication by setting up region replicas and verifying async wal replication
            * replays the edits to the secondary region in various scenarios.
            */
          -@Category({FlakeyTests.class, LargeTests.class})
          +@Category({ FlakeyTests.class, LargeTests.class })
           public class TestRegionReplicaReplication {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
                 HBaseClassTestRule.forClass(TestRegionReplicaReplication.class);
           
          -  private static final Logger LOG =
          -      LoggerFactory.getLogger(TestRegionReplicaReplication.class);
          +  private static final Logger LOG = LoggerFactory.getLogger(TestRegionReplicaReplication.class);
           
             private static final int NB_SERVERS = 2;
           
          @@ -100,13 +99,13 @@ public static void afterClass() throws Exception {
             private void testRegionReplicaReplication(int regionReplication) throws Exception {
               // test region replica replication. Create a table with single region, write some data
               // ensure that data is replicated to the secondary region
          -    TableName tableName = TableName.valueOf("testRegionReplicaReplicationWithReplicas_"
          -        + regionReplication);
          +    TableName tableName =
          +        TableName.valueOf("testRegionReplicaReplicationWithReplicas_" + regionReplication);
               TableDescriptor htd = HTU
          -      .createModifyableTableDescriptor(TableName.valueOf(tableName.toString()),
          -        ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER,
          -        ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED)
          -      .setRegionReplication(regionReplication).build();
          +        .createModifyableTableDescriptor(TableName.valueOf(tableName.toString()),
          +          ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER,
          +          ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED)
          +        .setRegionReplication(regionReplication).build();
               createOrEnableTableWithRetries(htd, true);
               TableName tableNameNoReplicas =
                   TableName.valueOf("testRegionReplicaReplicationWithReplicas_NO_REPLICAS");
          @@ -114,8 +113,8 @@ private void testRegionReplicaReplication(int regionReplication) throws Exceptio
               HTU.createTable(tableNameNoReplicas, HBaseTestingUtil.fam1);
           
               try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
          -      Table table = connection.getTable(tableName);
          -      Table tableNoReplicas = connection.getTable(tableNameNoReplicas)) {
          +        Table table = connection.getTable(tableName);
          +        Table tableNoReplicas = connection.getTable(tableNameNoReplicas)) {
                 // load some data to the non-replicated table
                 HTU.loadNumericRows(tableNoReplicas, HBaseTestingUtil.fam1, 6000, 7000);
           
          @@ -128,17 +127,17 @@ private void testRegionReplicaReplication(int regionReplication) throws Exceptio
               }
             }
           
          -  private void verifyReplication(TableName tableName, int regionReplication,
          -      final int startRow, final int endRow) throws Exception {
          +  private void verifyReplication(TableName tableName, int regionReplication, final int startRow,
          +      final int endRow) throws Exception {
               verifyReplication(tableName, regionReplication, startRow, endRow, true);
             }
           
          -  private void verifyReplication(TableName tableName, int regionReplication,
          -      final int startRow, final int endRow, final boolean present) throws Exception {
          +  private void verifyReplication(TableName tableName, int regionReplication, final int startRow,
          +      final int endRow, final boolean present) throws Exception {
               // find the regions
               final Region[] regions = new Region[regionReplication];
           
          -    for (int i=0; i < NB_SERVERS; i++) {
          +    for (int i = 0; i < NB_SERVERS; i++) {
                 HRegionServer rs = HTU.getMiniHBaseCluster().getRegionServer(i);
                 List onlineRegions = rs.getRegions(tableName);
                 for (HRegion region : onlineRegions) {
          @@ -159,7 +158,7 @@ public boolean evaluate() throws Exception {
                     LOG.info("verifying replication for region replica:" + region.getRegionInfo());
                     try {
                       HTU.verifyNumericRows(region, HBaseTestingUtil.fam1, startRow, endRow, present);
          -          } catch(Throwable ex) {
          +          } catch (Throwable ex) {
                       LOG.warn("Verification from secondary region is not complete yet", ex);
                       // still wait
                       return false;
          @@ -189,7 +188,7 @@ public void testRegionReplicaReplicationWith10Replicas() throws Exception {
             public void testRegionReplicaWithoutMemstoreReplication() throws Exception {
               int regionReplication = 3;
               TableDescriptor htd = HTU.createModifyableTableDescriptor(name.getMethodName())
          -      .setRegionReplication(regionReplication).setRegionMemStoreReplication(false).build();
          +        .setRegionReplication(regionReplication).setRegionMemStoreReplication(false).build();
               createOrEnableTableWithRetries(htd, true);
               final TableName tableName = htd.getTableName();
               Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
          @@ -223,7 +222,7 @@ public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception
               // to their stores
               int regionReplication = 3;
               TableDescriptor htd = HTU.createModifyableTableDescriptor(name.getMethodName())
          -      .setRegionReplication(regionReplication).build();
          +        .setRegionReplication(regionReplication).build();
               createOrEnableTableWithRetries(htd, true);
               final TableName tableName = htd.getTableName();
           
          @@ -233,8 +232,8 @@ public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception
                 // load the data to the table
           
                 for (int i = 0; i < 6000; i += 1000) {
          -        LOG.info("Writing data from " + i + " to " + (i+1000));
          -        HTU.loadNumericRows(table, HBaseTestingUtil.fam1, i, i+1000);
          +        LOG.info("Writing data from " + i + " to " + (i + 1000));
          +        HTU.loadNumericRows(table, HBaseTestingUtil.fam1, i, i + 1000);
                   LOG.info("flushing table");
                   HTU.flush(tableName);
                   LOG.info("compacting table");
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java
          index 62fc4a3a90e4..b9fa983474c8 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationCompressedWAL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -21,7 +21,6 @@
           import static org.junit.Assert.fail;
           
           import java.io.IOException;
          -
           import org.apache.hadoop.hbase.HBaseClassTestRule;
           import org.apache.hadoop.hbase.HConstants;
           import org.apache.hadoop.hbase.client.Get;
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
          index 10a5affcbce2..5ebe3a9f6bd1 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
          @@ -78,7 +78,7 @@
           import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
           import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey;
           
          -@Category({ReplicationTests.class, LargeTests.class})
          +@Category({ ReplicationTests.class, LargeTests.class })
           public class TestReplicationSink {
           
             @ClassRule
          @@ -120,7 +120,7 @@ public void stop(String why) {
             protected static String hfileArchiveDir;
             protected static String replicationClusterId;
           
          -   /**
          +  /**
              * @throws java.lang.Exception
              */
             @BeforeClass
          @@ -163,7 +163,7 @@ public void setUp() throws Exception {
             public void testBatchSink() throws Exception {
               List entries = new ArrayList<>(BATCH_SIZE);
               List cells = new ArrayList<>();
          -    for(int i = 0; i < BATCH_SIZE; i++) {
          +    for (int i = 0; i < BATCH_SIZE; i++) {
                 entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
               }
               SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
          @@ -179,9 +179,9 @@ public void testBatchSink() throws Exception {
              */
             @Test
             public void testMixedPutDelete() throws Exception {
          -    List entries = new ArrayList<>(BATCH_SIZE/2);
          +    List entries = new ArrayList<>(BATCH_SIZE / 2);
               List cells = new ArrayList<>();
          -    for(int i = 0; i < BATCH_SIZE/2; i++) {
          +    for (int i = 0; i < BATCH_SIZE / 2; i++) {
                 entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
               }
               SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId,
          @@ -189,16 +189,16 @@ public void testMixedPutDelete() throws Exception {
           
               entries = new ArrayList<>(BATCH_SIZE);
               cells = new ArrayList<>();
          -    for(int i = 0; i < BATCH_SIZE; i++) {
          +    for (int i = 0; i < BATCH_SIZE; i++) {
                 entries.add(createEntry(TABLE_NAME1, i,
          -          i % 2 != 0 ? KeyValue.Type.Put: KeyValue.Type.DeleteColumn, cells));
          +        i % 2 != 0 ? KeyValue.Type.Put : KeyValue.Type.DeleteColumn, cells));
               }
           
               SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
                 replicationClusterId, baseNamespaceDir, hfileArchiveDir);
               Scan scan = new Scan();
               ResultScanner scanRes = table1.getScanner(scan);
          -    assertEquals(BATCH_SIZE/2, scanRes.next(BATCH_SIZE).length);
          +    assertEquals(BATCH_SIZE / 2, scanRes.next(BATCH_SIZE).length);
             }
           
             @Test
          @@ -221,9 +221,8 @@ public void testLargeEditsPutDelete() throws Exception {
               entries = new ArrayList<>();
               cells = new ArrayList<>();
               for (int i = 0; i < 11000; i++) {
          -      entries.add(
          -        createEntry(TABLE_NAME1, i, i % 2 != 0 ? KeyValue.Type.Put : KeyValue.Type.DeleteColumn,
          -          cells));
          +      entries.add(createEntry(TABLE_NAME1, i,
          +        i % 2 != 0 ? KeyValue.Type.Put : KeyValue.Type.DeleteColumn, cells));
               }
               SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId,
                 baseNamespaceDir, hfileArchiveDir);
          @@ -241,22 +240,21 @@ public void testLargeEditsPutDelete() throws Exception {
              */
             @Test
             public void testMixedPutTables() throws Exception {
          -    List entries = new ArrayList<>(BATCH_SIZE/2);
          +    List entries = new ArrayList<>(BATCH_SIZE / 2);
               List cells = new ArrayList<>();
          -    for(int i = 0; i < BATCH_SIZE; i++) {
          -      entries.add(createEntry( i % 2 == 0 ? TABLE_NAME2 : TABLE_NAME1,
          -              i, KeyValue.Type.Put, cells));
          +    for (int i = 0; i < BATCH_SIZE; i++) {
          +      entries.add(createEntry(i % 2 == 0 ? TABLE_NAME2 : TABLE_NAME1, i, KeyValue.Type.Put, cells));
               }
           
               SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
                 replicationClusterId, baseNamespaceDir, hfileArchiveDir);
               Scan scan = new Scan();
               ResultScanner scanRes = table2.getScanner(scan);
          -    for(Result res : scanRes) {
          +    for (Result res : scanRes) {
                 assertEquals(0, Bytes.toInt(res.getRow()) % 2);
               }
               scanRes = table1.getScanner(scan);
          -    for(Result res : scanRes) {
          +    for (Result res : scanRes) {
                 assertEquals(1, Bytes.toInt(res.getRow()) % 2);
               }
             }
          @@ -269,7 +267,7 @@ public void testMixedPutTables() throws Exception {
             public void testMixedDeletes() throws Exception {
               List entries = new ArrayList<>(3);
               List cells = new ArrayList<>();
          -    for(int i = 0; i < 3; i++) {
          +    for (int i = 0; i < 3; i++) {
                 entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
               }
               SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
          @@ -289,19 +287,19 @@ public void testMixedDeletes() throws Exception {
             }
           
             /**
          -   * Puts are buffered, but this tests when a delete (not-buffered) is applied
          -   * before the actual Put that creates it.
          +   * Puts are buffered, but this tests when a delete (not-buffered) is applied before the actual Put
          +   * that creates it.
              * @throws Exception
              */
             @Test
             public void testApplyDeleteBeforePut() throws Exception {
               List entries = new ArrayList<>(5);
               List cells = new ArrayList<>();
          -    for(int i = 0; i < 2; i++) {
          +    for (int i = 0; i < 2; i++) {
                 entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
               }
               entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells));
          -    for(int i = 3; i < 5; i++) {
          +    for (int i = 3; i < 5; i++) {
                 entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
               }
               SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
          @@ -387,12 +385,10 @@ public void testReplicateEntriesForHFiles() throws Exception {
               try (Connection c = ConnectionFactory.createConnection(conf);
                   RegionLocator l = c.getRegionLocator(TABLE_NAME1)) {
                 RegionInfo regionInfo = l.getAllRegionLocations().get(0).getRegion();
          -      loadDescriptor =
          -          ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1,
          -              UnsafeByteOperations.unsafeWrap(regionInfo.getEncodedNameAsBytes()),
          -              storeFiles, storeFilesSize, 1);
          -      edit = org.apache.hadoop.hbase.wal.WALEdit.createBulkLoadEvent(regionInfo,
          -        loadDescriptor);
          +      loadDescriptor = ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1,
          +        UnsafeByteOperations.unsafeWrap(regionInfo.getEncodedNameAsBytes()), storeFiles,
          +        storeFilesSize, 1);
          +      edit = org.apache.hadoop.hbase.wal.WALEdit.createBulkLoadEvent(regionInfo, loadDescriptor);
               }
               List entries = new ArrayList<>(1);
           
          @@ -427,7 +423,7 @@ public void testReplicateEntriesForHFiles() throws Exception {
               // Clean up the created hfiles or it will mess up subsequent tests
             }
           
          -  private WALEntry createEntry(TableName table, int row,  KeyValue.Type type, List cells) {
          +  private WALEntry createEntry(TableName table, int row, KeyValue.Type type, List cells) {
               byte[] fam = table.equals(TABLE_NAME1) ? FAM_NAME1 : FAM_NAME2;
               byte[] rowBytes = Bytes.toBytes(row);
               // Just make sure we don't get the same ts for two consecutive rows with
          @@ -439,15 +435,12 @@ private WALEntry createEntry(TableName table, int row,  KeyValue.Type type, List
               }
               final long now = EnvironmentEdgeManager.currentTime();
               KeyValue kv = null;
          -    if(type.getCode() == KeyValue.Type.Put.getCode()) {
          -      kv = new KeyValue(rowBytes, fam, fam, now,
          -          KeyValue.Type.Put, Bytes.toBytes(row));
          +    if (type.getCode() == KeyValue.Type.Put.getCode()) {
          +      kv = new KeyValue(rowBytes, fam, fam, now, KeyValue.Type.Put, Bytes.toBytes(row));
               } else if (type.getCode() == KeyValue.Type.DeleteColumn.getCode()) {
          -        kv = new KeyValue(rowBytes, fam, fam,
          -            now, KeyValue.Type.DeleteColumn);
          +      kv = new KeyValue(rowBytes, fam, fam, now, KeyValue.Type.DeleteColumn);
               } else if (type.getCode() == KeyValue.Type.DeleteFamily.getCode()) {
          -        kv = new KeyValue(rowBytes, fam, null,
          -            now, KeyValue.Type.DeleteFamily);
          +      kv = new KeyValue(rowBytes, fam, null, now, KeyValue.Type.DeleteFamily);
               }
               WALEntry.Builder builder = createWALEntryBuilder(table);
               cells.add(kv);
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java
          index 6f32c044c538..058d78a08afa 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java
          @@ -16,6 +16,7 @@
            * limitations under the License.
            */
           package org.apache.hadoop.hbase.replication.regionserver;
          +
           import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.META_WAL_PROVIDER_ID;
           import static org.junit.Assert.assertEquals;
           import static org.junit.Assert.assertFalse;
          @@ -25,6 +26,7 @@
           import static org.mockito.Mockito.doNothing;
           import static org.mockito.Mockito.mock;
           import static org.mockito.Mockito.when;
          +
           import java.io.IOException;
           import java.util.ArrayList;
           import java.util.OptionalLong;
          @@ -77,19 +79,16 @@
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
           
          -@Category({ReplicationTests.class, MediumTests.class})
          +@Category({ ReplicationTests.class, MediumTests.class })
           public class TestReplicationSource {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
                 HBaseClassTestRule.forClass(TestReplicationSource.class);
           
          -  private static final Logger LOG =
          -      LoggerFactory.getLogger(TestReplicationSource.class);
          -  private final static HBaseTestingUtil TEST_UTIL =
          -      new HBaseTestingUtil();
          -  private final static HBaseTestingUtil TEST_UTIL_PEER =
          -      new HBaseTestingUtil();
          +  private static final Logger LOG = LoggerFactory.getLogger(TestReplicationSource.class);
          +  private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
          +  private final static HBaseTestingUtil TEST_UTIL_PEER = new HBaseTestingUtil();
             private static FileSystem FS;
             private static Path oldLogDir;
             private static Path logDir;
          @@ -129,18 +128,18 @@ public void testDefaultSkipsMetaWAL() throws IOException {
               Mockito.when(mockPeer.getConfiguration()).thenReturn(conf);
               Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L);
               ReplicationPeerConfig peerConfig = Mockito.mock(ReplicationPeerConfig.class);
          -    Mockito.when(peerConfig.getReplicationEndpointImpl()).
          -      thenReturn(DoNothingReplicationEndpoint.class.getName());
          +    Mockito.when(peerConfig.getReplicationEndpointImpl())
          +        .thenReturn(DoNothingReplicationEndpoint.class.getName());
               Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig);
               ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class);
               Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong());
          -    Mockito.when(manager.getGlobalMetrics()).
          -      thenReturn(mock(MetricsReplicationGlobalSourceSource.class));
          +    Mockito.when(manager.getGlobalMetrics())
          +        .thenReturn(mock(MetricsReplicationGlobalSourceSource.class));
               String queueId = "qid";
               RegionServerServices rss =
          -      TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1"));
          -    rs.init(conf, null, manager, null, mockPeer, rss, queueId, null,
          -      p -> OptionalLong.empty(), new MetricsSource(queueId));
          +        TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1"));
          +    rs.init(conf, null, manager, null, mockPeer, rss, queueId, null, p -> OptionalLong.empty(),
          +      new MetricsSource(queueId));
               try {
                 rs.startup();
                 assertTrue(rs.isSourceActive());
          @@ -169,32 +168,30 @@ public void testWALEntryFilter() throws IOException {
               Mockito.when(mockPeer.getConfiguration()).thenReturn(conf);
               Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L);
               ReplicationPeerConfig peerConfig = Mockito.mock(ReplicationPeerConfig.class);
          -    Mockito.when(peerConfig.getReplicationEndpointImpl()).
          -      thenReturn(DoNothingReplicationEndpoint.class.getName());
          +    Mockito.when(peerConfig.getReplicationEndpointImpl())
          +        .thenReturn(DoNothingReplicationEndpoint.class.getName());
               Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig);
               ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class);
               Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong());
               String queueId = "qid";
               RegionServerServices rss =
          -      TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1"));
          -    rs.init(conf, null, manager, null, mockPeer, rss, queueId,
          -      uuid, p -> OptionalLong.empty(), new MetricsSource(queueId));
          +        TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1"));
          +    rs.init(conf, null, manager, null, mockPeer, rss, queueId, uuid, p -> OptionalLong.empty(),
          +      new MetricsSource(queueId));
               try {
                 rs.startup();
                 TEST_UTIL.waitFor(30000, () -> rs.getWalEntryFilter() != null);
                 WALEntryFilter wef = rs.getWalEntryFilter();
                 // Test non-system WAL edit.
          -      WALEdit we = new WALEdit().add(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).
          -        setRow(HConstants.EMPTY_START_ROW).
          -        setFamily(HConstants.CATALOG_FAMILY).
          -        setType(Cell.Type.Put).build());
          -      WAL.Entry e = new WAL.Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY,
          -        TableName.valueOf("test"), -1, -1, uuid), we);
          +      WALEdit we = new WALEdit().add(
          +        CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(HConstants.EMPTY_START_ROW)
          +            .setFamily(HConstants.CATALOG_FAMILY).setType(Cell.Type.Put).build());
          +      WAL.Entry e = new WAL.Entry(
          +          new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TableName.valueOf("test"), -1, -1, uuid), we);
                 assertTrue(wef.filter(e) == e);
                 // Test system WAL edit.
                 e = new WAL.Entry(
          -        new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TableName.META_TABLE_NAME, -1, -1, uuid),
          -          we);
          +          new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TableName.META_TABLE_NAME, -1, -1, uuid), we);
                 assertNull(wef.filter(e));
               } finally {
                 rs.terminate("Done");
          @@ -203,13 +200,12 @@ public void testWALEntryFilter() throws IOException {
             }
           
             /**
          -   * Sanity check that we can move logs around while we are reading
          -   * from them. Should this test fail, ReplicationSource would have a hard
          -   * time reading logs that are being archived.
          +   * Sanity check that we can move logs around while we are reading from them. Should this test
          +   * fail, ReplicationSource would have a hard time reading logs that are being archived.
              */
             // This tests doesn't belong in here... it is not about ReplicationSource.
             @Test
          -  public void testLogMoving() throws Exception{
          +  public void testLogMoving() throws Exception {
               Path logPath = new Path(logDir, "log");
               if (!FS.exists(logDir)) {
                 FS.mkdirs(logDir);
          @@ -217,15 +213,14 @@ public void testLogMoving() throws Exception{
               if (!FS.exists(oldLogDir)) {
                 FS.mkdirs(oldLogDir);
               }
          -    WALProvider.Writer writer = WALFactory.createWALWriter(FS, logPath,
          -        TEST_UTIL.getConfiguration());
          -    for(int i = 0; i < 3; i++) {
          +    WALProvider.Writer writer =
          +        WALFactory.createWALWriter(FS, logPath, TEST_UTIL.getConfiguration());
          +    for (int i = 0; i < 3; i++) {
                 byte[] b = Bytes.toBytes(Integer.toString(i));
          -      KeyValue kv = new KeyValue(b,b,b);
          +      KeyValue kv = new KeyValue(b, b, b);
                 WALEdit edit = new WALEdit();
                 edit.add(kv);
          -      WALKeyImpl key = new WALKeyImpl(b, TableName.valueOf(b), 0, 0,
          -          HConstants.DEFAULT_CLUSTER_ID);
          +      WALKeyImpl key = new WALKeyImpl(b, TableName.valueOf(b), 0, 0, HConstants.DEFAULT_CLUSTER_ID);
                 writer.append(new WAL.Entry(key, edit));
                 writer.sync(false);
               }
          @@ -249,14 +244,13 @@ public void testLogMoving() throws Exception{
             }
           
             /**
          -   * Tests that {@link ReplicationSource#terminate(String)} will timeout properly
          -   * Moved here from TestReplicationSource because doesn't need cluster.
          +   * Tests that {@link ReplicationSource#terminate(String)} will timeout properly Moved here from
          +   * TestReplicationSource because doesn't need cluster.
              */
             @Test
             public void testTerminateTimeout() throws Exception {
               ReplicationSource source = new ReplicationSource();
          -    ReplicationEndpoint
          -      replicationEndpoint = new DoNothingReplicationEndpoint();
          +    ReplicationEndpoint replicationEndpoint = new DoNothingReplicationEndpoint();
               try {
                 replicationEndpoint.start();
                 ReplicationPeer mockPeer = Mockito.mock(ReplicationPeer.class);
          @@ -265,11 +259,10 @@ public void testTerminateTimeout() throws Exception {
                 testConf.setInt("replication.source.maxretriesmultiplier", 1);
                 ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class);
                 Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong());
          -      source.init(testConf, null, manager, null, mockPeer, null, "testPeer",
          -        null, p -> OptionalLong.empty(), null);
          +      source.init(testConf, null, manager, null, mockPeer, null, "testPeer", null,
          +        p -> OptionalLong.empty(), null);
                 ExecutorService executor = Executors.newSingleThreadExecutor();
          -      Future future = executor.submit(
          -        () -> source.terminate("testing source termination"));
          +      Future future = executor.submit(() -> source.terminate("testing source termination"));
                 long sleepForRetries = testConf.getLong("replication.source.sleepforretries", 1000);
                 Waiter.waitFor(testConf, sleepForRetries * 2, (Waiter.Predicate) future::isDone);
               } finally {
          @@ -282,7 +275,7 @@ public void testTerminateClearsBuffer() throws Exception {
               ReplicationSource source = new ReplicationSource();
               ReplicationSourceManager mockManager = mock(ReplicationSourceManager.class);
               MetricsReplicationGlobalSourceSource mockMetrics =
          -      mock(MetricsReplicationGlobalSourceSource.class);
          +        mock(MetricsReplicationGlobalSourceSource.class);
               AtomicLong buffer = new AtomicLong();
               Mockito.when(mockManager.getTotalBufferUsed()).thenReturn(buffer);
               Mockito.when(mockManager.getGlobalMetrics()).thenReturn(mockMetrics);
          @@ -291,10 +284,9 @@ public void testTerminateClearsBuffer() throws Exception {
               Configuration testConf = HBaseConfiguration.create();
               source.init(testConf, null, mockManager, null, mockPeer, Mockito.mock(Server.class), "testPeer",
                 null, p -> OptionalLong.empty(), mock(MetricsSource.class));
          -    ReplicationSourceWALReader reader = new ReplicationSourceWALReader(null,
          -      conf, null, 0, null, source, null);
          -    ReplicationSourceShipper shipper =
          -      new ReplicationSourceShipper(conf, null, null, source);
          +    ReplicationSourceWALReader reader =
          +        new ReplicationSourceWALReader(null, conf, null, 0, null, source, null);
          +    ReplicationSourceShipper shipper = new ReplicationSourceShipper(conf, null, null, source);
               shipper.entryReader = reader;
               source.workerThreads.put("testPeer", shipper);
               WALEntryBatch batch = new WALEntryBatch(10, logDir);
          @@ -308,8 +300,8 @@ public void testTerminateClearsBuffer() throws Exception {
               when(mockEdit.heapSize()).thenReturn(10000L);
               when(mockEdit.size()).thenReturn(0);
               ArrayList cells = new ArrayList<>();
          -    KeyValue kv = new KeyValue(Bytes.toBytes("0001"), Bytes.toBytes("f"),
          -      Bytes.toBytes("1"), Bytes.toBytes("v1"));
          +    KeyValue kv = new KeyValue(Bytes.toBytes("0001"), Bytes.toBytes("f"), Bytes.toBytes("1"),
          +        Bytes.toBytes("v1"));
               cells.add(kv);
               when(mockEdit.getCells()).thenReturn(cells);
               reader.addEntryToBatch(batch, mockEntry);
          @@ -319,8 +311,7 @@ public void testTerminateClearsBuffer() throws Exception {
             }
           
             /**
          -   * Tests that recovered queues are preserved on a regionserver shutdown.
          -   * See HBASE-18192
          +   * Tests that recovered queues are preserved on a regionserver shutdown. See HBASE-18192
              */
             @Test
             public void testServerShutdownRecoveredQueue() throws Exception {
          @@ -346,7 +337,8 @@ public void testServerShutdownRecoveredQueue() throws Exception {
                   ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL_PEER.getClusterKey()).build());
                 // Wait for replication sources to come up
                 Waiter.waitFor(conf, 20000, new Waiter.Predicate() {
          -        @Override public boolean evaluate() {
          +        @Override
          +        public boolean evaluate() {
                     return !(managerA.getSources().isEmpty() || managerB.getSources().isEmpty());
                   }
                 });
          @@ -359,7 +351,8 @@ public void testServerShutdownRecoveredQueue() throws Exception {
                 // It's queues should be claimed by the only other alive server i.e. serverB
                 cluster.stopRegionServer(serverA.getServerName());
                 Waiter.waitFor(conf, 20000, new Waiter.Predicate() {
          -        @Override public boolean evaluate() throws Exception {
          +        @Override
          +        public boolean evaluate() throws Exception {
                     return managerB.getOldSources().size() == 1;
                   }
                 });
          @@ -367,7 +360,8 @@ public void testServerShutdownRecoveredQueue() throws Exception {
                 final HRegionServer serverC = cluster.startRegionServer().getRegionServer();
                 serverC.waitForServerOnline();
                 Waiter.waitFor(conf, 20000, new Waiter.Predicate() {
          -        @Override public boolean evaluate() throws Exception {
          +        @Override
          +        public boolean evaluate() throws Exception {
                     return serverC.getReplicationSourceService() != null;
                   }
                 });
          @@ -419,15 +413,18 @@ protected void stopServiceThreads() {
             public static class DoNothingReplicationEndpoint extends HBaseInterClusterReplicationEndpoint {
               private final UUID uuid = UUID.randomUUID();
           
          -    @Override public void init(Context context) throws IOException {
          +    @Override
          +    public void init(Context context) throws IOException {
                 this.ctx = context;
               }
           
          -    @Override public WALEntryFilter getWALEntryfilter() {
          +    @Override
          +    public WALEntryFilter getWALEntryfilter() {
                 return null;
               }
           
          -    @Override public synchronized UUID getPeerUUID() {
          +    @Override
          +    public synchronized UUID getPeerUUID() {
                 return this.uuid;
               }
           
          @@ -441,7 +438,8 @@ protected void doStop() {
                 notifyStopped();
               }
           
          -    @Override public boolean canReplicateToSameCluster() {
          +    @Override
          +    public boolean canReplicateToSameCluster() {
                 return true;
               }
             }
          @@ -455,7 +453,7 @@ public static class FlakyReplicationEndpoint extends DoNothingReplicationEndpoin
           
               @Override
               public synchronized UUID getPeerUUID() {
          -      if (count==0) {
          +      if (count == 0) {
                   count++;
                   throw new RuntimeException();
                 } else {
          @@ -489,8 +487,7 @@ public synchronized UUID getPeerUUID() {
             }
           
             /**
          -   * Test HBASE-20497
          -   * Moved here from TestReplicationSource because doesn't need cluster.
          +   * Test HBASE-20497 Moved here from TestReplicationSource because doesn't need cluster.
              */
             @Test
             public void testRecoveredReplicationSourceShipperGetPosition() throws Exception {
          @@ -504,9 +501,9 @@ public void testRecoveredReplicationSourceShipperGetPosition() throws Exception
               Mockito.when(source.getServerWALsBelongTo()).thenReturn(deadServer);
               ReplicationQueueStorage storage = mock(ReplicationQueueStorage.class);
               Mockito.when(storage.getWALPosition(Mockito.eq(serverName), Mockito.any(), Mockito.any()))
          -      .thenReturn(1001L);
          +        .thenReturn(1001L);
               Mockito.when(storage.getWALPosition(Mockito.eq(deadServer), Mockito.any(), Mockito.any()))
          -      .thenReturn(-1L);
          +        .thenReturn(-1L);
               Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
               conf.setInt("replication.source.maxretriesmultiplier", -1);
               MetricsSource metricsSource = mock(MetricsSource.class);
          @@ -514,7 +511,7 @@ public void testRecoveredReplicationSourceShipperGetPosition() throws Exception
               ReplicationSourceLogQueue logQueue = new ReplicationSourceLogQueue(conf, metricsSource, source);
               logQueue.enqueueLog(new Path("/www/html/test"), walGroupId);
               RecoveredReplicationSourceShipper shipper =
          -      new RecoveredReplicationSourceShipper(conf, walGroupId, logQueue, source, storage);
          +        new RecoveredReplicationSourceShipper(conf, walGroupId, logQueue, source, storage);
               assertEquals(1001L, shipper.getStartPosition());
             }
           
          @@ -526,32 +523,31 @@ private RegionServerServices setupForAbortTests(ReplicationSource rs, Configurat
               Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L);
               ReplicationPeerConfig peerConfig = Mockito.mock(ReplicationPeerConfig.class);
               FaultyReplicationEndpoint.count = 0;
          -    Mockito.when(peerConfig.getReplicationEndpointImpl()).
          -      thenReturn(endpointName);
          +    Mockito.when(peerConfig.getReplicationEndpointImpl()).thenReturn(endpointName);
               Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig);
               ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class);
               Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong());
          -    Mockito.when(manager.getGlobalMetrics()).
          -      thenReturn(mock(MetricsReplicationGlobalSourceSource.class));
          +    Mockito.when(manager.getGlobalMetrics())
          +        .thenReturn(mock(MetricsReplicationGlobalSourceSource.class));
               String queueId = "qid";
               RegionServerServices rss =
          -      TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1"));
          -    rs.init(conf, null, manager, null, mockPeer, rss, queueId, null,
          -      p -> OptionalLong.empty(), new MetricsSource(queueId));
          +        TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1"));
          +    rs.init(conf, null, manager, null, mockPeer, rss, queueId, null, p -> OptionalLong.empty(),
          +      new MetricsSource(queueId));
               return rss;
             }
           
             /**
          -   * Test ReplicationSource retries startup once an uncaught exception happens
          -   * during initialization and eplication.source.regionserver.abort is set to false.
          +   * Test ReplicationSource retries startup once an uncaught exception happens during initialization
          +   * and eplication.source.regionserver.abort is set to false.
              */
             @Test
             public void testAbortFalseOnError() throws IOException {
               Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
               conf.setBoolean("replication.source.regionserver.abort", false);
               ReplicationSource rs = new ReplicationSource();
          -    RegionServerServices rss = setupForAbortTests(rs, conf,
          -      FlakyReplicationEndpoint.class.getName());
          +    RegionServerServices rss =
          +        setupForAbortTests(rs, conf, FlakyReplicationEndpoint.class.getName());
               try {
                 rs.startup();
                 assertTrue(rs.isSourceActive());
          @@ -571,8 +567,7 @@ public void testReplicationSourceInitializingMetric() throws IOException {
               Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
               conf.setBoolean("replication.source.regionserver.abort", false);
               ReplicationSource rs = new ReplicationSource();
          -    RegionServerServices rss = setupForAbortTests(rs, conf,
          -      BadReplicationEndpoint.class.getName());
          +    RegionServerServices rss = setupForAbortTests(rs, conf, BadReplicationEndpoint.class.getName());
               try {
                 rs.startup();
                 assertTrue(rs.isSourceActive());
          @@ -593,8 +588,8 @@ public void testReplicationSourceInitializingMetric() throws IOException {
             public void testAbortFalseOnErrorDoesntBlockMainThread() throws IOException {
               Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
               ReplicationSource rs = new ReplicationSource();
          -    RegionServerServices rss = setupForAbortTests(rs, conf,
          -      FaultyReplicationEndpoint.class.getName());
          +    RegionServerServices rss =
          +        setupForAbortTests(rs, conf, FaultyReplicationEndpoint.class.getName());
               try {
                 rs.startup();
                 assertTrue(true);
          @@ -605,15 +600,15 @@ public void testAbortFalseOnErrorDoesntBlockMainThread() throws IOException {
             }
           
             /**
          -   * Test ReplicationSource retries startup once an uncaught exception happens
          -   * during initialization and replication.source.regionserver.abort is set to true.
          +   * Test ReplicationSource retries startup once an uncaught exception happens during initialization
          +   * and replication.source.regionserver.abort is set to true.
              */
             @Test
             public void testAbortTrueOnError() throws IOException {
               Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
               ReplicationSource rs = new ReplicationSource();
          -    RegionServerServices rss = setupForAbortTests(rs, conf,
          -      FlakyReplicationEndpoint.class.getName());
          +    RegionServerServices rss =
          +        setupForAbortTests(rs, conf, FlakyReplicationEndpoint.class.getName());
               try {
                 rs.startup();
                 assertTrue(rs.isSourceActive());
          @@ -628,8 +623,8 @@ public void testAbortTrueOnError() throws IOException {
             }
           
             /*
          -    Test age of oldest wal metric.
          -  */
          +   * Test age of oldest wal metric.
          +   */
             @Test
             public void testAgeOfOldestWal() throws Exception {
               try {
          @@ -644,23 +639,23 @@ public void testAgeOfOldestWal() throws Exception {
                 Mockito.when(mockPeer.getConfiguration()).thenReturn(conf);
                 Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L);
                 ReplicationPeerConfig peerConfig = Mockito.mock(ReplicationPeerConfig.class);
          -      Mockito.when(peerConfig.getReplicationEndpointImpl()).
          -        thenReturn(DoNothingReplicationEndpoint.class.getName());
          +      Mockito.when(peerConfig.getReplicationEndpointImpl())
          +          .thenReturn(DoNothingReplicationEndpoint.class.getName());
                 Mockito.when(mockPeer.getPeerConfig()).thenReturn(peerConfig);
                 ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class);
                 Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong());
          -      Mockito.when(manager.getGlobalMetrics()).
          -        thenReturn(mock(MetricsReplicationGlobalSourceSource.class));
          +      Mockito.when(manager.getGlobalMetrics())
          +          .thenReturn(mock(MetricsReplicationGlobalSourceSource.class));
                 RegionServerServices rss =
          -        TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1"));
          +          TEST_UTIL.createMockRegionServerService(ServerName.parseServerName("a.b.c,1,1"));
           
                 ReplicationSource source = new ReplicationSource();
          -      source.init(conf, null, manager, null, mockPeer, rss, id, null,
          -        p -> OptionalLong.empty(), metrics);
          +      source.init(conf, null, manager, null, mockPeer, rss, id, null, p -> OptionalLong.empty(),
          +        metrics);
           
                 final Path log1 = new Path(logDir, "log-walgroup-a.8");
                 manualEdge.setValue(10);
          -      // Diff of current time (10) and  log-walgroup-a.8 timestamp will be 2.
          +      // Diff of current time (10) and log-walgroup-a.8 timestamp will be 2.
                 source.enqueueLog(log1);
                 MetricsReplicationSourceSource metricsSource1 = getSourceMetrics(id);
                 assertEquals(2, metricsSource1.getOldestWalAge());
          @@ -678,8 +673,8 @@ public void testAgeOfOldestWal() throws Exception {
           
             private MetricsReplicationSourceSource getSourceMetrics(String sourceId) {
               MetricsReplicationSourceFactoryImpl factory =
          -      (MetricsReplicationSourceFactoryImpl) CompatibilitySingletonFactory.getInstance(
          -        MetricsReplicationSourceFactory.class);
          +        (MetricsReplicationSourceFactoryImpl) CompatibilitySingletonFactory
          +            .getInstance(MetricsReplicationSourceFactory.class);
               return factory.getSource(sourceId);
             }
           }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceLogQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceLogQueue.java
          index c28b18003c5b..177afe14aa96 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceLogQueue.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceLogQueue.java
          @@ -19,6 +19,7 @@
           
           import static org.junit.Assert.assertEquals;
           import static org.mockito.Mockito.mock;
          +
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.fs.Path;
           import org.apache.hadoop.hbase.HBaseClassTestRule;
          @@ -32,16 +33,16 @@
           import org.junit.experimental.categories.Category;
           import org.mockito.Mockito;
           
          -@Category({SmallTests.class,ReplicationTests.class})
          +@Category({ SmallTests.class, ReplicationTests.class })
           public class TestReplicationSourceLogQueue {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationSourceLogQueue.class);
          +      HBaseClassTestRule.forClass(TestReplicationSourceLogQueue.class);
           
             /*
          -    Testing enqueue and dequeuing of wal and check age of oldest wal.
          -  */
          +   * Testing enqueue and dequeuing of wal and check age of oldest wal.
          +   */
             @Test
             public void testEnqueueDequeue() {
               try {
          @@ -58,7 +59,7 @@ public void testEnqueueDequeue() {
                 ReplicationSourceLogQueue logQueue = new ReplicationSourceLogQueue(conf, metrics, source);
                 final Path log1 = new Path("log-walgroup-a.8");
                 manualEdge.setValue(10);
          -      // Diff of current time (10) and  log-walgroup-a.8 timestamp will be 2.
          +      // Diff of current time (10) and log-walgroup-a.8 timestamp will be 2.
                 logQueue.enqueueLog(log1, walGroupId1);
                 assertEquals(2, logQueue.getOldestWalAge());
           
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
          index a52615465ed0..386d131baad4 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
          @@ -42,7 +42,6 @@
           import java.util.UUID;
           import java.util.concurrent.CountDownLatch;
           import java.util.stream.Collectors;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.fs.FileSystem;
           import org.apache.hadoop.fs.Path;
          @@ -109,19 +108,17 @@
           import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
           
           /**
          - * An abstract class that tests ReplicationSourceManager. Classes that extend this class should
          - * set up the proper config for this class and initialize the proper cluster using
          - * HBaseTestingUtility.
          + * An abstract class that tests ReplicationSourceManager. Classes that extend this class should set
          + * up the proper config for this class and initialize the proper cluster using HBaseTestingUtility.
            */
          -@Category({ReplicationTests.class, MediumTests.class})
          +@Category({ ReplicationTests.class, MediumTests.class })
           public abstract class TestReplicationSourceManager {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
                 HBaseClassTestRule.forClass(TestReplicationSourceManager.class);
           
          -  protected static final Logger LOG =
          -      LoggerFactory.getLogger(TestReplicationSourceManager.class);
          +  protected static final Logger LOG = LoggerFactory.getLogger(TestReplicationSourceManager.class);
           
             protected static Configuration conf;
           
          @@ -147,8 +144,7 @@ public abstract class TestReplicationSourceManager {
           
             protected static final byte[] f2 = Bytes.toBytes("f2");
           
          -  protected static final TableName test =
          -      TableName.valueOf("test");
          +  protected static final TableName test = TableName.valueOf("test");
           
             protected static final String slaveId = "1";
           
          @@ -172,8 +168,8 @@ protected static void setupZkAndReplication() throws Exception {
               ZKUtil.createWithParents(zkw, "/hbase/replication");
               ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1");
               ZKUtil.setData(zkw, "/hbase/replication/peers/1",
          -        Bytes.toBytes(conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
          -            + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":/1"));
          +      Bytes.toBytes(conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
          +          + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":/1"));
               ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/peer-state");
               ZKUtil.setData(zkw, "/hbase/replication/peers/1/peer-state",
                 ZKReplicationPeerStorage.ENABLED_ZNODE_BYTES);
          @@ -210,12 +206,12 @@ protected static void setupZkAndReplication() throws Exception {
               waitPeer(slaveId, manager, true);
           
               htd = TableDescriptorBuilder.newBuilder(test)
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1)
          -        .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build())
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(f2)).build();
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1)
          +            .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build())
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(f2)).build();
           
               scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
          -    for(byte[] fam : htd.getColumnFamilyNames()) {
          +    for (byte[] fam : htd.getColumnFamilyNames()) {
                 scopes.put(fam, 0);
               }
               hri = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(r1).setEndKey(r2).build();
          @@ -226,13 +222,10 @@ private static ReplicationSourceManager getManagerFromCluster() {
               if (utility.getMiniHBaseCluster() == null) {
                 return null;
               }
          -    return utility.getMiniHBaseCluster().getRegionServerThreads()
          -        .stream().map(JVMClusterUtil.RegionServerThread::getRegionServer)
          -        .findAny()
          -        .map(RegionServerServices::getReplicationSourceService)
          -        .map(r -> (Replication)r)
          -        .map(Replication::getReplicationManager)
          -        .get();
          +    return utility.getMiniHBaseCluster().getRegionServerThreads().stream()
          +        .map(JVMClusterUtil.RegionServerThread::getRegionServer).findAny()
          +        .map(RegionServerServices::getReplicationSourceService).map(r -> (Replication) r)
          +        .map(Replication::getReplicationManager).get();
             }
           
             @AfterClass
          @@ -262,8 +255,8 @@ public void setUp() throws Exception {
             public void tearDown() throws Exception {
               LOG.info("End " + testName.getMethodName());
               cleanLogDir();
          -    List ids = manager.getSources().stream()
          -        .map(ReplicationSourceInterface::getPeerId).collect(Collectors.toList());
          +    List ids = manager.getSources().stream().map(ReplicationSourceInterface::getPeerId)
          +        .collect(Collectors.toList());
               for (String id : ids) {
                 if (slaveId.equals(id)) {
                   continue;
          @@ -282,27 +275,26 @@ public void testLogRoll() throws Exception {
               edit.add(kv);
           
               WALFactory wals =
          -      new WALFactory(utility.getConfiguration(), URLEncoder.encode("regionserver:60020", "UTF8"));
          +        new WALFactory(utility.getConfiguration(), URLEncoder.encode("regionserver:60020", "UTF8"));
               ReplicationSourceManager replicationManager = replication.getReplicationManager();
               wals.getWALProvider()
          -      .addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager));
          +        .addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager));
               final WAL wal = wals.getWAL(hri);
               manager.init();
               TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf("tableame"))
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(f1)).build();
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(f1)).build();
               NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
          -    for(byte[] fam : htd.getColumnFamilyNames()) {
          +    for (byte[] fam : htd.getColumnFamilyNames()) {
                 scopes.put(fam, 0);
               }
               // Testing normal log rolling every 20
          -    for(long i = 1; i < 101; i++) {
          -      if(i > 1 && i % 20 == 0) {
          +    for (long i = 1; i < 101; i++) {
          +      if (i > 1 && i % 20 == 0) {
                   wal.rollWriter();
                 }
                 LOG.info(Long.toString(i));
          -      final long txid = wal.appendData(hri,
          -        new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(),
          -          mvcc, scopes),
          +      final long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test,
          +          EnvironmentEdgeManager.currentTime(), mvcc, scopes),
                   edit);
                 wal.sync(txid);
               }
          @@ -315,16 +307,15 @@ public void testLogRoll() throws Exception {
               LOG.info(baseline + " and " + time);
           
               for (int i = 0; i < 3; i++) {
          -      wal.appendData(hri,
          -        new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(),
          -          mvcc, scopes),
          +      wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test,
          +          EnvironmentEdgeManager.currentTime(), mvcc, scopes),
                   edit);
               }
               wal.sync();
           
               int logNumber = 0;
               for (Map.Entry> entry : manager.getWALs().get(slaveId)
          -      .entrySet()) {
          +        .entrySet()) {
                 logNumber += entry.getValue().size();
               }
               assertEquals(6, logNumber);
          @@ -338,15 +329,13 @@ public void testLogRoll() throws Exception {
               manager.logPositionAndCleanOldLogs(source,
                 new WALEntryBatch(0, manager.getSources().get(0).getCurrentPath()));
           
          -    wal.appendData(hri,
          -      new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(),
          -        mvcc, scopes),
          +    wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), test,
          +        EnvironmentEdgeManager.currentTime(), mvcc, scopes),
                 edit);
               wal.sync();
           
               assertEquals(1, manager.getWALs().size());
           
          -
               // TODO Need a case with only 2 WALs and we only want to delete the first one
             }
           
          @@ -380,8 +369,8 @@ public void testClaimQueues() throws Exception {
               int populatedMap = 0;
               // wait for result now... till all the workers are done.
               latch.await();
          -    populatedMap += w1.isLogZnodesMapPopulated() + w2.isLogZnodesMapPopulated()
          -        + w3.isLogZnodesMapPopulated();
          +    populatedMap +=
          +        w1.isLogZnodesMapPopulated() + w2.isLogZnodesMapPopulated() + w3.isLogZnodesMapPopulated();
               assertEquals(1, populatedMap);
               server.abort("", null);
             }
          @@ -445,7 +434,7 @@ public void testCleanupUnknownPeerZNode() throws Exception {
             public void testCompactionWALEdits() throws Exception {
               TableName tableName = TableName.valueOf("testCompactionWALEdits");
               WALProtos.CompactionDescriptor compactionDescriptor =
          -      WALProtos.CompactionDescriptor.getDefaultInstance();
          +        WALProtos.CompactionDescriptor.getDefaultInstance();
               RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(HConstants.EMPTY_START_ROW)
                   .setEndKey(HConstants.EMPTY_END_ROW).build();
               WALEdit edit = WALEdit.createCompaction(hri, compactionDescriptor);
          @@ -493,16 +482,15 @@ public void testBulkLoadWALEdits() throws Exception {
           
             /**
              * Test whether calling removePeer() on a ReplicationSourceManager that failed on initializing the
          -   * corresponding ReplicationSourceInterface correctly cleans up the corresponding
          -   * replication queue and ReplicationPeer.
          -   * See HBASE-16096.
          +   * corresponding ReplicationSourceInterface correctly cleans up the corresponding replication
          +   * queue and ReplicationPeer. See HBASE-16096.
              */
             @Test
          -  public void testPeerRemovalCleanup() throws Exception{
          +  public void testPeerRemovalCleanup() throws Exception {
               String replicationSourceImplName = conf.get("replication.replicationsource.implementation");
               final String peerId = "FakePeer";
               final ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder()
          -      .setClusterKey(utility.getZkCluster().getAddress().toString() + ":/hbase").build();
          +        .setClusterKey(utility.getZkCluster().getAddress().toString() + ":/hbase").build();
               try {
                 DummyServer server = new DummyServer();
                 ReplicationQueueStorage rq = ReplicationStorageFactory
          @@ -510,7 +498,7 @@ public void testPeerRemovalCleanup() throws Exception{
                 // Purposely fail ReplicationSourceManager.addSource() by causing ReplicationSourceInterface
                 // initialization to throw an exception.
                 conf.set("replication.replicationsource.implementation",
          -          FailInitializeDummyReplicationSource.class.getName());
          +        FailInitializeDummyReplicationSource.class.getName());
                 manager.getReplicationPeers();
                 // Set up the znode and ReplicationPeer for the fake peer
                 // Don't wait for replication source to initialize, we know it won't.
          @@ -536,7 +524,7 @@ private static MetricsReplicationSourceSource getGlobalSource() throws Exception
               // Retrieve the global replication metrics source
               Field f = MetricsSource.class.getDeclaredField("globalSourceSource");
               f.setAccessible(true);
          -    return (MetricsReplicationSourceSource)f.get(source.getSourceMetrics());
          +    return (MetricsReplicationSourceSource) f.get(source.getSourceMetrics());
             }
           
             private static long getSizeOfLatestPath() {
          @@ -544,20 +532,18 @@ private static long getSizeOfLatestPath() {
               if (utility.getMiniHBaseCluster() == null) {
                 return 0;
               }
          -    return utility.getMiniHBaseCluster().getRegionServerThreads()
          -        .stream().map(JVMClusterUtil.RegionServerThread::getRegionServer)
          -        .map(RegionServerServices::getReplicationSourceService)
          -        .map(r -> (Replication)r)
          +    return utility.getMiniHBaseCluster().getRegionServerThreads().stream()
          +        .map(JVMClusterUtil.RegionServerThread::getRegionServer)
          +        .map(RegionServerServices::getReplicationSourceService).map(r -> (Replication) r)
                   .map(Replication::getReplicationManager)
          -        .mapToLong(ReplicationSourceManager::getSizeOfLatestPath)
          -        .sum();
          +        .mapToLong(ReplicationSourceManager::getSizeOfLatestPath).sum();
             }
           
             @Test
             public void testRemovePeerMetricsCleanup() throws Exception {
               final String peerId = "DummyPeer";
               final ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder()
          -      .setClusterKey(utility.getZkCluster().getAddress().toString() + ":/hbase").build();
          +        .setClusterKey(utility.getZkCluster().getAddress().toString() + ":/hbase").build();
               try {
                 MetricsReplicationSourceSource globalSource = getGlobalSource();
                 final int globalLogQueueSizeInitial = globalSource.getSizeOfLogQueue();
          @@ -597,7 +583,7 @@ private ReplicationSourceInterface mockReplicationSource(String peerId) {
               when(source.isSyncReplication()).thenReturn(true);
               ReplicationPeerConfig config = mock(ReplicationPeerConfig.class);
               when(config.getRemoteWALDir())
          -      .thenReturn(remoteLogDir.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString());
          +        .thenReturn(remoteLogDir.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString());
               ReplicationPeer peer = mock(ReplicationPeer.class);
               when(peer.getPeerConfig()).thenReturn(config);
               when(source.getPeer()).thenReturn(peer);
          @@ -609,22 +595,21 @@ public void testRemoveRemoteWALs() throws Exception {
               String peerId2 = slaveId + "_2";
               addPeerAndWait(peerId2,
                 ReplicationPeerConfig.newBuilder()
          -        .setClusterKey("localhost:" + utility.getZkCluster().getClientPort() + ":/hbase").build(),
          +          .setClusterKey("localhost:" + utility.getZkCluster().getClientPort() + ":/hbase").build(),
                 true);
               try {
                 // make sure that we can deal with files which does not exist
                 String walNameNotExists =
          -        "remoteWAL-12345-" + slaveId + ".12345" + ReplicationUtils.SYNC_WAL_SUFFIX;
          +          "remoteWAL-12345-" + slaveId + ".12345" + ReplicationUtils.SYNC_WAL_SUFFIX;
                 Path wal = new Path(logDir, walNameNotExists);
                 manager.preLogRoll(wal);
                 manager.postLogRoll(wal);
           
                 Path remoteLogDirForPeer = new Path(remoteLogDir, slaveId);
                 fs.mkdirs(remoteLogDirForPeer);
          -      String walName =
          -        "remoteWAL-12345-" + slaveId + ".23456" + ReplicationUtils.SYNC_WAL_SUFFIX;
          -      Path remoteWAL =
          -        new Path(remoteLogDirForPeer, walName).makeQualified(fs.getUri(), fs.getWorkingDirectory());
          +      String walName = "remoteWAL-12345-" + slaveId + ".23456" + ReplicationUtils.SYNC_WAL_SUFFIX;
          +      Path remoteWAL = new Path(remoteLogDirForPeer, walName).makeQualified(fs.getUri(),
          +        fs.getWorkingDirectory());
                 fs.create(remoteWAL).close();
                 wal = new Path(logDir, walName);
                 manager.preLogRoll(wal);
          @@ -646,14 +631,14 @@ public void testRemoveRemoteWALs() throws Exception {
             @Test
             public void testSameWALPrefix() throws IOException {
               Set latestWalsBefore =
          -      manager.getLastestPath().stream().map(Path::getName).collect(Collectors.toSet());
          +        manager.getLastestPath().stream().map(Path::getName).collect(Collectors.toSet());
               String walName1 = "localhost,8080,12345-45678-Peer.34567";
               String walName2 = "localhost,8080,12345.56789";
               manager.preLogRoll(new Path(walName1));
               manager.preLogRoll(new Path(walName2));
           
               Set latestWals = manager.getLastestPath().stream().map(Path::getName)
          -      .filter(n -> !latestWalsBefore.contains(n)).collect(Collectors.toSet());
          +        .filter(n -> !latestWalsBefore.contains(n)).collect(Collectors.toSet());
               assertEquals(2, latestWals.size());
               assertTrue(latestWals.contains(walName1));
               assertTrue(latestWals.contains(walName2));
          @@ -679,8 +664,8 @@ private void addPeerAndWait(final String peerId, final ReplicationPeerConfig pee
               }
             }
           
          -  private static void waitPeer(final String peerId,
          -      ReplicationSourceManager manager, final boolean waitForSource) {
          +  private static void waitPeer(final String peerId, ReplicationSourceManager manager,
          +      final boolean waitForSource) {
               ReplicationPeers rp = manager.getReplicationPeers();
               Waiter.waitFor(conf, 20000, () -> {
                 if (waitForSource) {
          @@ -689,7 +674,7 @@ private static void waitPeer(final String peerId,
                     return false;
                   }
                   if (rs instanceof ReplicationSourceDummy) {
          -          return ((ReplicationSourceDummy)rs).isStartup();
          +          return ((ReplicationSourceDummy) rs).isStartup();
                   }
                   return true;
                 } else {
          @@ -747,8 +732,7 @@ private WALEdit getBulkLoadWALEdit(NavigableMap scope) {
               }
               storeFiles.put(f2, p);
               // 2. Create bulk load descriptor
          -    BulkLoadDescriptor desc =
          -        ProtobufUtil.toBulkLoadDescriptor(hri.getTable(),
          +    BulkLoadDescriptor desc = ProtobufUtil.toBulkLoadDescriptor(hri.getTable(),
                 UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes()), storeFiles, storeFilesSize, 1);
           
               // 3. create bulk load wal edit event
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java
          index 17955880c9c9..86b9738c6c12 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerJoin.java
          @@ -22,7 +22,6 @@
           
           import java.util.Optional;
           import java.util.stream.Stream;
          -
           import org.apache.hadoop.hbase.HBaseClassTestRule;
           import org.apache.hadoop.hbase.HConstants;
           import org.apache.hadoop.hbase.TableName;
          @@ -44,12 +43,12 @@
           import org.junit.experimental.categories.Category;
           import org.junit.rules.TestName;
           
          -@Category({ ReplicationTests.class, MediumTests.class})
          +@Category({ ReplicationTests.class, MediumTests.class })
           public class TestReplicationSourceManagerJoin extends TestReplicationBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestReplicationSourceManagerJoin.class);
          +      HBaseClassTestRule.forClass(TestReplicationSourceManagerJoin.class);
           
             @Rule
             public TestName testName = new TestName();
          @@ -67,9 +66,9 @@ public void testReplicationSourcesTerminate() throws Exception {
               // recovered source end.
               TableName tableName = TableName.valueOf(testName.getMethodName());
               TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName)
          -        .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build())
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build();
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName)
          +            .setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build())
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build();
               hbaseAdmin.createTable(td);
               assertFalse(UTIL2.getAdmin().tableExists(tableName));
               Table table = UTIL1.getConnection().getTable(tableName);
          @@ -78,10 +77,9 @@ public void testReplicationSourcesTerminate() throws Exception {
                 table.put(new Put(Bytes.toBytes(i)).addColumn(famName, row, row));
               }
               // Kill rs holding table region. There are only TWO servers. We depend on it.
          -    Optional server =
          -      UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().stream()
          -      .map(JVMClusterUtil.RegionServerThread::getRegionServer)
          -      .filter(rs -> !rs.getRegions(tableName).isEmpty()).findAny();
          +    Optional server = UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads()
          +        .stream().map(JVMClusterUtil.RegionServerThread::getRegionServer)
          +        .filter(rs -> !rs.getRegions(tableName).isEmpty()).findAny();
               assertTrue(server.isPresent());
               server.get().abort("stopping for test");
           
          @@ -89,7 +87,7 @@ public void testReplicationSourcesTerminate() throws Exception {
               UTIL1.waitTableAvailable(tableName);
               // Wait for recovered source running
               HRegionServer rs =
          -      UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().get(0).getRegionServer();
          +        UTIL1.getMiniHBaseCluster().getLiveRegionServerThreads().get(0).getRegionServer();
               ReplicationSourceManager manager = rs.getReplicationSourceService().getReplicationManager();
               UTIL1.waitFor(60000, () -> !manager.getOldSources().isEmpty());
           
          @@ -98,12 +96,12 @@ public void testReplicationSourcesTerminate() throws Exception {
           
               // Check all sources running before manager.join(), terminated after manager.join().
               Stream.concat(manager.getSources().stream(), manager.getOldSources().stream())
          -      .filter(src -> src instanceof ReplicationSource)
          -      .forEach(src -> assertTrue(((ReplicationSource) src).sourceRunning));
          +        .filter(src -> src instanceof ReplicationSource)
          +        .forEach(src -> assertTrue(((ReplicationSource) src).sourceRunning));
               manager.join();
               Stream.concat(manager.getSources().stream(), manager.getOldSources().stream())
          -      .filter(src -> src instanceof ReplicationSource)
          -      .forEach(src -> assertFalse(((ReplicationSource) src).sourceRunning));
          +        .filter(src -> src instanceof ReplicationSource)
          +        .forEach(src -> assertFalse(((ReplicationSource) src).sourceRunning));
             }
           
           }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerZkImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerZkImpl.java
          index 34ecf2217ee1..e87a1db3e163 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerZkImpl.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManagerZkImpl.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationThrottler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationThrottler.java
          index c4d529e23c15..4fe6ef163896 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationThrottler.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationThrottler.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -29,7 +29,7 @@
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
           
          -@Category({ReplicationTests.class, SmallTests.class})
          +@Category({ ReplicationTests.class, SmallTests.class })
           public class TestReplicationThrottler {
           
             @ClassRule
          @@ -53,8 +53,8 @@ public void testThrottling() {
               long ticks2 = throttler2.getNextSleepInterval(1000);
           
               // 1. the first push size is 1000, though 1000 bytes exceeds 100/10
          -    //    bandwidthes, but no sleep since it's the first push of current
          -    //    cycle, amortizing occurs when next push arrives
          +    // bandwidthes, but no sleep since it's the first push of current
          +    // cycle, amortizing occurs when next push arrives
               assertEquals(0, ticks1);
               assertEquals(0, ticks2);
           
          @@ -65,18 +65,18 @@ public void testThrottling() {
               ticks2 = throttler2.getNextSleepInterval(5);
           
               // 2. when the second push(5) arrives and throttling(5) is called, the
          -    //    current cyclePushSize is 1000 bytes, this should make throttler1
          -    //    sleep 1000/100 = 10 cycles = 1s and make throttler2 sleep 1000/10
          -    //    = 100 cycles = 10s before the second push occurs -- amortize case
          -    //    after amortizing, both cycleStartTick and cyclePushSize are reset
          +    // current cyclePushSize is 1000 bytes, this should make throttler1
          +    // sleep 1000/100 = 10 cycles = 1s and make throttler2 sleep 1000/10
          +    // = 100 cycles = 10s before the second push occurs -- amortize case
          +    // after amortizing, both cycleStartTick and cyclePushSize are reset
               //
               // Note: in a slow machine, the sleep interval might be less than ideal ticks.
               // If it is 75% of expected value, its is still acceptable.
               if (ticks1 != 1000 && ticks1 != 999) {
          -      assertTrue(ticks1 >= 750 && ticks1 <=1000);
          +      assertTrue(ticks1 >= 750 && ticks1 <= 1000);
               }
               if (ticks2 != 10000 && ticks2 != 9999) {
          -      assertTrue(ticks2 >= 7500 && ticks2 <=10000);
          +      assertTrue(ticks2 >= 7500 && ticks2 <= 10000);
               }
           
               throttler1.resetStartTick();
          @@ -89,13 +89,13 @@ public void testThrottling() {
               ticks2 = throttler2.getNextSleepInterval(45);
           
               // 3. when the third push(45) arrives and throttling(45) is called, the
          -    //    current cyclePushSize is 5 bytes, 50-byte makes throttler1 no
          -    //    sleep, but can make throttler2 delay to next cycle
          +    // current cyclePushSize is 5 bytes, 50-byte makes throttler1 no
          +    // sleep, but can make throttler2 delay to next cycle
               // note: in real case, sleep time should cover time elapses during push
          -    //       operation
          +    // operation
               assertTrue(ticks1 == 0);
               if (ticks2 != 100 && ticks2 != 99) {
          -      assertTrue(ticks1 >= 75 && ticks1 <=100);
          +      assertTrue(ticks1 >= 75 && ticks1 <= 100);
               }
           
               throttler2.resetStartTick();
          @@ -107,15 +107,15 @@ public void testThrottling() {
               ticks2 = throttler2.getNextSleepInterval(60);
           
               // 4. when the fourth push(60) arrives and throttling(60) is called, throttler1
          -    //    delay to next cycle since 45+60 == 105; and throttler2 should firstly sleep
          -    //    ceiling(45/10)= 5 cycles = 500ms to amortize previous push
          +    // delay to next cycle since 45+60 == 105; and throttler2 should firstly sleep
          +    // ceiling(45/10)= 5 cycles = 500ms to amortize previous push
               //
               // Note: in real case, sleep time should cover time elapses during push operation
               if (ticks1 != 100 && ticks1 != 99) {
          -      assertTrue(ticks1 >= 75 && ticks1 <=100);
          +      assertTrue(ticks1 >= 75 && ticks1 <= 100);
               }
               if (ticks2 != 500 && ticks2 != 499) {
          -      assertTrue(ticks1 >= 375 && ticks1 <=500);
          +      assertTrue(ticks1 >= 375 && ticks1 <= 500);
               }
             }
           }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java
          index 00bf7dc26f11..12256be9e3bd 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationValueCompressedWAL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
          index b32f43471fca..f93d8c5f84c1 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -121,8 +121,8 @@ public void testReplicatorWithErrors() throws Exception {
               // Replace the peer set up for us by the base class with a wrapper for this test
               hbaseAdmin.addReplicationPeer("testReplicatorWithErrors",
                 ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey())
          -        .setReplicationEndpointImpl(FailureInjectingReplicationEndpointForTest.class.getName())
          -        .build());
          +          .setReplicationEndpointImpl(FailureInjectingReplicationEndpointForTest.class.getName())
          +          .build());
           
               FailureInjectingReplicationEndpointForTest.setBatchCount(0);
               FailureInjectingReplicationEndpointForTest.setEntriesCount(0);
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java
          index 37e315035d1d..1878bc6f08b8 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -72,7 +72,7 @@ public class TestSerialReplicationChecker {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestSerialReplicationChecker.class);
          +      HBaseClassTestRule.forClass(TestSerialReplicationChecker.class);
           
             private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
           
          @@ -139,7 +139,7 @@ private Entry createEntry(RegionInfo region, long seqId) {
           
             private Cell createCell(RegionInfo region) {
               return CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(region.getStartKey())
          -      .setType(Type.Put).build();
          +        .setType(Type.Put).build();
             }
           
             @Test
          @@ -245,9 +245,9 @@ public void testCanPushAfterMerge() throws IOException, ReplicationException {
               // 0xFF is the escape byte when storing region name so let's make sure it can work.
               byte[] endKey = new byte[] { (byte) 0xFF, 0x00, (byte) 0xFF, (byte) 0xFF, 0x01 };
               RegionInfo regionA =
          -      RegionInfoBuilder.newBuilder(tableName).setEndKey(endKey).setRegionId(1).build();
          +        RegionInfoBuilder.newBuilder(tableName).setEndKey(endKey).setRegionId(1).build();
               RegionInfo regionB =
          -      RegionInfoBuilder.newBuilder(tableName).setStartKey(endKey).setRegionId(2).build();
          +        RegionInfoBuilder.newBuilder(tableName).setStartKey(endKey).setRegionId(2).build();
               RegionInfo region = RegionInfoBuilder.newBuilder(tableName).setRegionId(3).build();
               addStateAndBarrier(regionA, null, 10, 100);
               addStateAndBarrier(regionB, null, 20, 200);
          @@ -270,9 +270,9 @@ public void testCanPushAfterSplit() throws IOException, ReplicationException {
               byte[] endKey = new byte[] { (byte) 0xFF, 0x00, (byte) 0xFF, (byte) 0xFF, 0x01 };
               RegionInfo region = RegionInfoBuilder.newBuilder(tableName).setRegionId(1).build();
               RegionInfo regionA =
          -      RegionInfoBuilder.newBuilder(tableName).setEndKey(endKey).setRegionId(2).build();
          +        RegionInfoBuilder.newBuilder(tableName).setEndKey(endKey).setRegionId(2).build();
               RegionInfo regionB =
          -      RegionInfoBuilder.newBuilder(tableName).setStartKey(endKey).setRegionId(3).build();
          +        RegionInfoBuilder.newBuilder(tableName).setStartKey(endKey).setRegionId(3).build();
               addStateAndBarrier(region, null, 10, 100);
               addStateAndBarrier(regionA, RegionState.State.OPEN, 100, 200);
               addStateAndBarrier(regionB, RegionState.State.OPEN, 100, 300);
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java
          index 865e1914a131..7d6eb989cf1c 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -15,7 +15,6 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.replication.regionserver;
           
           import java.io.IOException;
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSourceFSConfigurationProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSourceFSConfigurationProvider.java
          index ca98d150eaf3..c2c4e436d63c 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSourceFSConfigurationProvider.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSourceFSConfigurationProvider.java
          @@ -1,17 +1,23 @@
           /*
          - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
          - * agreements. See the NOTICE file distributed with this work for additional information regarding
          - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
          - * "License"); you may not use this file except in compliance with the License. You may obtain a
          - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
          - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
          - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
          - * for the specific language governing permissions and limitations under the License.
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.replication.regionserver;
           
           import java.io.IOException;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.yetus.audience.InterfaceAudience;
           
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSyncReplicationShipperQuit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSyncReplicationShipperQuit.java
          index f6dc3d775894..86484d26d7b5 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSyncReplicationShipperQuit.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSyncReplicationShipperQuit.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -42,15 +42,15 @@ public class TestSyncReplicationShipperQuit extends SyncReplicationTestBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestSyncReplicationShipperQuit.class);
          +      HBaseClassTestRule.forClass(TestSyncReplicationShipperQuit.class);
           
             @Test
             public void testShipperQuitWhenDA() throws Exception {
               // set to serial replication
               UTIL1.getAdmin().updateReplicationPeerConfig(PEER_ID, ReplicationPeerConfig
          -      .newBuilder(UTIL1.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(true).build());
          +        .newBuilder(UTIL1.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(true).build());
               UTIL2.getAdmin().updateReplicationPeerConfig(PEER_ID, ReplicationPeerConfig
          -      .newBuilder(UTIL2.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(true).build());
          +        .newBuilder(UTIL2.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(true).build());
               UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
                 SyncReplicationState.STANDBY);
               UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
          @@ -59,12 +59,12 @@ public void testShipperQuitWhenDA() throws Exception {
               writeAndVerifyReplication(UTIL1, UTIL2, 0, 100);
               HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME);
               DualAsyncFSWAL wal =
          -      (DualAsyncFSWAL) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
          +        (DualAsyncFSWAL) rs.getWAL(RegionInfoBuilder.newBuilder(TABLE_NAME).build());
               String walGroupId =
          -      AbstractFSWALProvider.getWALPrefixFromWALName(wal.getCurrentFileName().getName());
          +        AbstractFSWALProvider.getWALPrefixFromWALName(wal.getCurrentFileName().getName());
               ReplicationSourceShipper shipper =
          -      ((ReplicationSource) ((Replication) rs.getReplicationSourceService()).getReplicationManager()
          -        .getSource(PEER_ID)).workerThreads.get(walGroupId);
          +        ((ReplicationSource) ((Replication) rs.getReplicationSourceService())
          +            .getReplicationManager().getSource(PEER_ID)).workerThreads.get(walGroupId);
               assertFalse(shipper.isFinished());
           
               UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
          @@ -72,7 +72,7 @@ public void testShipperQuitWhenDA() throws Exception {
               writeAndVerifyReplication(UTIL1, UTIL2, 100, 200);
           
               ReplicationSource source = (ReplicationSource) ((Replication) rs.getReplicationSourceService())
          -      .getReplicationManager().getSource(PEER_ID);
          +        .getReplicationManager().getSource(PEER_ID);
               // the peer is serial so here we can make sure that the previous wals have already been
               // replicated, and finally the shipper should be removed from the worker pool
               UTIL1.waitFor(10000, () -> !source.workerThreads.containsKey(walGroupId));
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
          index beaa78cd294a..fe1672e44c0a 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
          @@ -19,6 +19,7 @@
           
           import static org.junit.Assert.assertEquals;
           import static org.junit.Assert.assertTrue;
          +
           import java.io.IOException;
           import java.net.SocketAddress;
           import java.util.ArrayList;
          @@ -56,7 +57,9 @@
           import org.junit.rules.TestName;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
          +
           import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
          +
           import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
           
           /**
          @@ -67,7 +70,7 @@ public class TestWALEntrySinkFilter {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestWALEntrySinkFilter.class);
          +      HBaseClassTestRule.forClass(TestWALEntrySinkFilter.class);
           
             private static final Logger LOG = LoggerFactory.getLogger(TestReplicationSink.class);
             @Rule
          @@ -106,18 +109,18 @@ public void testWALEntryFilter() throws IOException {
               Configuration conf = HBaseConfiguration.create();
               // Make it so our filter is instantiated on construction of ReplicationSink.
               conf.setClass(DummyConnectionRegistry.REGISTRY_IMPL_CONF_KEY, DevNullConnectionRegistry.class,
          -        DummyConnectionRegistry.class);
          +      DummyConnectionRegistry.class);
               conf.setClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY,
          -        IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl.class, WALEntrySinkFilter.class);
          +      IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl.class, WALEntrySinkFilter.class);
               conf.setClass(ClusterConnectionFactory.HBASE_SERVER_CLUSTER_CONNECTION_IMPL,
          -        DevNullAsyncClusterConnection.class, AsyncClusterConnection.class);
          +      DevNullAsyncClusterConnection.class, AsyncClusterConnection.class);
               ReplicationSink sink = new ReplicationSink(conf);
               // Create some dumb walentries.
               List entries = new ArrayList<>();
               AdminProtos.WALEntry.Builder entryBuilder = AdminProtos.WALEntry.newBuilder();
               // Need a tablename.
               ByteString tableName =
          -      ByteString.copyFromUtf8(TableName.valueOf(this.name.getMethodName()).toString());
          +        ByteString.copyFromUtf8(TableName.valueOf(this.name.getMethodName()).toString());
               // Add WALEdit Cells to Cells List. The way edits arrive at the sink is with protos
               // describing the edit with all Cells from all edits aggregated in a single CellScanner.
               final List cells = new ArrayList<>();
          @@ -127,8 +130,8 @@ public void testWALEntryFilter() throws IOException {
                 // Create a wal entry. Everything is set to the current index as bytes or int/long.
                 entryBuilder.clear();
                 entryBuilder.setKey(entryBuilder.getKeyBuilder().setLogSequenceNumber(i)
          -        .setEncodedRegionName(ByteString.copyFrom(bytes)).setWriteTime(i).setTableName(tableName)
          -        .build());
          +          .setEncodedRegionName(ByteString.copyFrom(bytes)).setWriteTime(i).setTableName(tableName)
          +          .build());
                 // Lets have one Cell associated with each WALEdit.
                 entryBuilder.setAssociatedCellCount(1);
                 entries.add(entryBuilder.build());
          @@ -136,7 +139,7 @@ public void testWALEntryFilter() throws IOException {
                 CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY);
                 // Make cells whose row, family, cell, value, and ts are == 'i'.
                 Cell cell = cellBuilder.setRow(bytes).setFamily(bytes).setQualifier(bytes)
          -        .setType(Cell.Type.Put).setTimestamp(i).setValue(bytes).build();
          +          .setType(Cell.Type.Put).setTimestamp(i).setValue(bytes).build();
                 cells.add(cell);
               }
               // Now wrap our cells array in a CellScanner that we can pass in to replicateEntries. It has
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCounts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCounts.java
          index bf4562014a0d..525b6fb7fb66 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCounts.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCounts.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -74,7 +74,7 @@ public void testDifferentCounts() throws Exception {
               log.rollWriter();
           
               try (WALEntryStream entryStream =
          -      new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
          +        new WALEntryStream(logQueue, CONF, 0, log, null, new MetricsSource("1"), fakeWalGroupId)) {
                 int i = 0;
                 while (entryStream.hasNext()) {
                   assertNotNull(entryStream.next());
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCountsAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCountsAsyncFSWAL.java
          index c734f7985ea7..d651727f489c 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCountsAsyncFSWAL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCountsAsyncFSWAL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -31,12 +31,11 @@
           
           @RunWith(Parameterized.class)
           @Category({ ReplicationTests.class, LargeTests.class })
          -public class TestWALEntryStreamDifferentCountsAsyncFSWAL
          -  extends TestWALEntryStreamDifferentCounts {
          +public class TestWALEntryStreamDifferentCountsAsyncFSWAL extends TestWALEntryStreamDifferentCounts {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestWALEntryStreamDifferentCountsAsyncFSWAL.class);
          +      HBaseClassTestRule.forClass(TestWALEntryStreamDifferentCountsAsyncFSWAL.class);
           
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCountsFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCountsFSHLog.java
          index 66dc00eaa41e..6efea1aad7ae 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCountsFSHLog.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStreamDifferentCountsFSHLog.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -35,7 +35,7 @@ public class TestWALEntryStreamDifferentCountsFSHLog extends TestWALEntryStreamD
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestWALEntryStreamDifferentCountsFSHLog.class);
          +      HBaseClassTestRule.forClass(TestWALEntryStreamDifferentCountsFSHLog.class);
           
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStreamTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStreamTestBase.java
          index dab425395b13..c5e184b321aa 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStreamTestBase.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStreamTestBase.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -65,7 +65,7 @@ public abstract class WALEntryStreamTestBase {
             protected static final byte[] family = Bytes.toBytes("column");
             protected static final byte[] qualifier = Bytes.toBytes("qualifier");
             protected static final RegionInfo info = RegionInfoBuilder.newBuilder(tableName)
          -    .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.LAST_ROW).build();
          +      .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.LAST_ROW).build();
             protected static final NavigableMap scopes = getScopes();
             protected final String fakeWalGroupId = "fake-wal-group-id";
           
          @@ -84,8 +84,8 @@ protected static class WALEntryStreamWithRetries extends WALEntryStream {
               private Entry result;
           
               public WALEntryStreamWithRetries(ReplicationSourceLogQueue logQueue, Configuration conf,
          -      long startPosition, WALFileLengthProvider walFileLengthProvider, ServerName serverName,
          -      MetricsSource metrics, String walGroupId) throws IOException {
          +        long startPosition, WALFileLengthProvider walFileLengthProvider, ServerName serverName,
          +        MetricsSource metrics, String walGroupId) throws IOException {
                 super(logQueue, conf, startPosition, walFileLengthProvider, serverName, metrics, walGroupId);
               }
           
          @@ -147,7 +147,7 @@ protected void initWAL() throws IOException {
               logQueue = new ReplicationSourceLogQueue(CONF, metricsSource, source);
               pathWatcher = new PathWatcher();
               final WALFactory wals =
          -      new WALFactory(CONF, TableNameTestRule.cleanUpTestName(tn.getMethodName()));
          +        new WALFactory(CONF, TableNameTestRule.cleanUpTestName(tn.getMethodName()));
               wals.getWALProvider().addWALActionsListener(pathWatcher);
               log = wals.getWAL(info);
             }
          @@ -168,14 +168,15 @@ protected void appendToLogAndSync(int count) throws IOException {
           
             protected long appendToLog(int count) throws IOException {
               return log.appendData(info, new WALKeyImpl(info.getEncodedNameAsBytes(), tableName,
          -      EnvironmentEdgeManager.currentTime(), mvcc, scopes), getWALEdits(count));
          +        EnvironmentEdgeManager.currentTime(), mvcc, scopes),
          +      getWALEdits(count));
             }
           
             protected WALEdit getWALEdits(int count) {
               WALEdit edit = new WALEdit();
               for (int i = 0; i < count; i++) {
                 edit.add(new KeyValue(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), family, qualifier,
          -        EnvironmentEdgeManager.currentTime(), qualifier));
          +          EnvironmentEdgeManager.currentTime(), qualifier));
               }
               return edit;
             }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/EnableRSGroupsTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/EnableRSGroupsTestBase.java
          index 9611bafc2c9e..bc8a24cb977e 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/EnableRSGroupsTestBase.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/EnableRSGroupsTestBase.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -69,7 +69,7 @@ public void testEnableRSGroup() throws IOException, InterruptedException {
           
               // wait RSGroupBasedLoadBalancer online
               RSGroupBasedLoadBalancer loadBalancer =
          -      (RSGroupBasedLoadBalancer) TEST_UTIL.getMiniHBaseCluster().getMaster().getLoadBalancer();
          +        (RSGroupBasedLoadBalancer) TEST_UTIL.getMiniHBaseCluster().getMaster().getLoadBalancer();
               long start = EnvironmentEdgeManager.currentTime();
               while (EnvironmentEdgeManager.currentTime() - start <= 60000 && !loadBalancer.isOnline()) {
                 LOG.info("waiting for rsgroup load balancer onLine...");
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java
          index 6407799a1576..71aeb3ba12ad 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -29,7 +29,7 @@ public class TestEnableRSGroups extends EnableRSGroupsTestBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestEnableRSGroups.class);
          +      HBaseClassTestRule.forClass(TestEnableRSGroups.class);
           
             @Override
             protected void enableRSGroup(Configuration conf) {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroupsCompatibility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroupsCompatibility.java
          index 92faa5bd9d89..fe97b11ab1ef 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroupsCompatibility.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroupsCompatibility.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -35,7 +35,7 @@ public class TestEnableRSGroupsCompatibility extends EnableRSGroupsTestBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestEnableRSGroupsCompatibility.class);
          +      HBaseClassTestRule.forClass(TestEnableRSGroupsCompatibility.class);
           
             @Override
             protected void enableRSGroup(Configuration conf) {
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestMigrateRSGroupInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestMigrateRSGroupInfo.java
          index 8daddd5024ae..eb2eb66a010e 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestMigrateRSGroupInfo.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestMigrateRSGroupInfo.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -58,7 +58,7 @@ public class TestMigrateRSGroupInfo extends TestRSGroupsBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestMigrateRSGroupInfo.class);
          +      HBaseClassTestRule.forClass(TestMigrateRSGroupInfo.class);
           
             private static String TABLE_NAME_PREFIX = "Table_";
           
          @@ -147,7 +147,7 @@ public void testMigrate() throws IOException, InterruptedException {
               // confirm that before migrating, we could still get the correct rs group for a table.
               for (int i = 0; i < NUM_TABLES; i++) {
                 RSGroupInfo info =
          -        RS_GROUP_ADMIN_CLIENT.getRSGroupInfoOfTable(TableName.valueOf(TABLE_NAME_PREFIX + i));
          +          RS_GROUP_ADMIN_CLIENT.getRSGroupInfoOfTable(TableName.valueOf(TABLE_NAME_PREFIX + i));
                 assertEquals(rsGroupInfo.getName(), info.getName());
                 assertEquals(NUM_TABLES, info.getTables().size());
               }
          @@ -172,18 +172,18 @@ public void testMigrate() throws IOException, InterruptedException {
                 try (Table table = TEST_UTIL.getConnection().getTable(RSGROUP_TABLE_NAME)) {
                   Result result = table.get(new Get(Bytes.toBytes(rsGroupInfo.getName())));
                   RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo
          -          .parseFrom(result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES));
          +            .parseFrom(result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES));
                   RSGroupInfo gi = ProtobufUtil.toGroupInfo(proto);
                   return gi.getTables().isEmpty();
                 }
               });
               // make sure that the migrate thread has quit.
               TEST_UTIL.waitFor(30000, () -> Thread.getAllStackTraces().keySet().stream()
          -      .noneMatch(t -> t.getName().equals(RSGroupInfoManagerImpl.MIGRATE_THREAD_NAME)));
          +        .noneMatch(t -> t.getName().equals(RSGroupInfoManagerImpl.MIGRATE_THREAD_NAME)));
               // make sure we could still get the correct rs group info after migration
               for (int i = 0; i < NUM_TABLES; i++) {
                 RSGroupInfo info =
          -        RS_GROUP_ADMIN_CLIENT.getRSGroupInfoOfTable(TableName.valueOf(TABLE_NAME_PREFIX + i));
          +          RS_GROUP_ADMIN_CLIENT.getRSGroupInfoOfTable(TableName.valueOf(TABLE_NAME_PREFIX + i));
                 assertEquals(rsGroupInfo.getName(), info.getName());
                 assertEquals(NUM_TABLES, info.getTables().size());
               }
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupConfig.java
          index bfee68ed5867..df45f1fba2e6 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupConfig.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupConfig.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java
          index c267bdb55cfc..46db4757e664 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMajorCompactionTTL.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -60,12 +60,12 @@ public void setUp() throws Exception {
               SingleProcessHBaseCluster cluster = utility.getHBaseCluster();
               final HMaster master = cluster.getMaster();
           
          -    //wait for balancer to come online
          +    // wait for balancer to come online
               utility.waitFor(60000, new Waiter.Predicate() {
                 @Override
                 public boolean evaluate() {
          -        return master.isInitialized() &&
          -            ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline();
          +        return master.isInitialized()
          +            && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline();
                 }
               });
               admin = utility.getAdmin();
          @@ -94,8 +94,8 @@ public void testCompactingTables() throws Exception {
               }
           
               RSGroupMajorCompactionTTL compactor = new RSGroupMajorCompactionTTL();
          -    compactor.compactTTLRegionsOnGroup(utility.getConfiguration(),
          -        RSGroupInfo.DEFAULT_GROUP, 1, 200, -1, -1, false, false);
          +    compactor.compactTTLRegionsOnGroup(utility.getConfiguration(), RSGroupInfo.DEFAULT_GROUP, 1,
          +      200, -1, -1, false, false);
           
               for (TableName tableName : tableNames) {
                 int numberOfRegions = admin.getRegions(tableName).size();
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java
          index 974b567fc2fc..3fb765fd9fb2 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -17,7 +17,6 @@
            */
           package org.apache.hadoop.hbase.rsgroup;
           
          -
           import java.io.BufferedReader;
           import java.io.File;
           import java.io.FileOutputStream;
          @@ -45,7 +44,7 @@ public class TestRSGroupMappingScript {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestRSGroupMappingScript.class);
          +      HBaseClassTestRule.forClass(TestRSGroupMappingScript.class);
             private static final Logger LOG = LoggerFactory.getLogger(TestRSGroupMappingScript.class);
           
             private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
          @@ -54,10 +53,8 @@ public class TestRSGroupMappingScript {
             @BeforeClass
             public static void setupScript() throws Exception {
               String currentDir = new File("").getAbsolutePath();
          -    UTIL.getConfiguration().set(
          -      RSGroupInfoManagerImpl.RSGroupMappingScript.RS_GROUP_MAPPING_SCRIPT,
          -      currentDir + "/rsgroup_table_mapping.sh"
          -    );
          +    UTIL.getConfiguration().set(RSGroupInfoManagerImpl.RSGroupMappingScript.RS_GROUP_MAPPING_SCRIPT,
          +      currentDir + "/rsgroup_table_mapping.sh");
             }
           
             @Before
          @@ -84,7 +81,7 @@ public void setup() throws Exception {
                 pw.close();
               }
               boolean executable = script.setExecutable(true);
          -    LOG.info("Created " + script  + ", executable=" + executable);
          +    LOG.info("Created " + script + ", executable=" + executable);
               verifyScriptContent(script);
             }
           
          @@ -99,23 +96,18 @@ private void verifyScriptContent(File file) throws Exception {
             @Test
             public void testScript() throws Exception {
               RSGroupMappingScript script = new RSGroupMappingScript(UTIL.getConfiguration());
          -    TableName testNamespace =
          -      TableName.valueOf("test", "should_be_in_test");
          -    String rsgroup = script.getRSGroup(
          -      testNamespace.getNamespaceAsString(), testNamespace.getQualifierAsString()
          -    );
          +    TableName testNamespace = TableName.valueOf("test", "should_be_in_test");
          +    String rsgroup = script.getRSGroup(testNamespace.getNamespaceAsString(),
          +      testNamespace.getQualifierAsString());
               Assert.assertEquals("test", rsgroup);
           
          -    TableName otherName =
          -      TableName.valueOf("whatever", "oh_foo_should_be_in_other");
          +    TableName otherName = TableName.valueOf("whatever", "oh_foo_should_be_in_other");
               rsgroup = script.getRSGroup(otherName.getNamespaceAsString(), otherName.getQualifierAsString());
               Assert.assertEquals("other", rsgroup);
           
          -    TableName defaultName =
          -      TableName.valueOf("nono", "should_be_in_default");
          -    rsgroup = script.getRSGroup(
          -      defaultName.getNamespaceAsString(), defaultName.getQualifierAsString()
          -    );
          +    TableName defaultName = TableName.valueOf("nono", "should_be_in_default");
          +    rsgroup =
          +        script.getRSGroup(defaultName.getNamespaceAsString(), defaultName.getQualifierAsString());
               Assert.assertEquals("default", rsgroup);
             }
           
          diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
          index 87cb76e20a6d..794ca3221578 100644
          --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
          +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin1.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -62,7 +62,7 @@ public class TestRSGroupsAdmin1 extends TestRSGroupsBase {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestRSGroupsAdmin1.class);
          +      HBaseClassTestRule.forClass(TestRSGroupsAdmin1.class);
           
             private static final Logger LOG = LoggerFactory.getLogger(TestRSGroupsAdmin1.class);
           
          @@ -152,7 +152,7 @@ public void testNamespaceConstraint() throws Exception {
               assertTrue(OBSERVER.postAddRSGroupCalled);
           
               ADMIN.createNamespace(NamespaceDescriptor.create(nsName)
          -      .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, groupName).build());
          +        .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, groupName).build());
               RSGroupInfo rsGroupInfo = ADMIN.getRSGroup(groupName);
               ADMIN.moveServersToRSGroup(rsGroupInfo.getServers(), RSGroupInfo.DEFAULT_GROUP);
               // test removing a referenced group
          @@ -164,7 +164,7 @@ public void testNamespaceConstraint() throws Exception {
               // test modify group
               // changing with the same name is fine
               ADMIN.modifyNamespace(NamespaceDescriptor.create(nsName)
          -      .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, groupName).build());
          +        .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, groupName).build());
               String anotherGroup = TABLE_PREFIX + "_anotherGroup";
               ADMIN.addRSGroup(anotherGroup);
               // test add non-existent group
          @@ -174,7 +174,7 @@ public void testNamespaceConstraint() throws Exception {
               assertTrue(OBSERVER.postRemoveRSGroupCalled);
               try {
                 ADMIN.createNamespace(NamespaceDescriptor.create(nsName)
          -        .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "foo").build());
          +          .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "foo").build());
                 fail("Expected a constraint exception");
               } catch (IOException ex) {
               }
          @@ -238,9 +238,9 @@ public void testFailRemoveGroup() throws IOException, InterruptedException {
             @Test
             public void testMultiTableMove() throws Exception {
               final TableName tableNameA =
          -      TableName.valueOf(TABLE_PREFIX + getNameWithoutIndex(name.getMethodName()) + "A");
          +        TableName.valueOf(TABLE_PREFIX + getNameWithoutIndex(name.getMethodName()) + "A");
               final TableName tableNameB =
          -      TableName.valueOf(TABLE_PREFIX + getNameWithoutIndex(name.getMethodName()) + "B");
          +        TableName.valueOf(TABLE_PREFIX + getNameWithoutIndex(name.getMethodName()) + "B");
               final byte[] familyNameBytes = Bytes.toBytes("f");
               String newGroupName = getGroupName(getNameWithoutIndex(name.getMethodName()));
               final RSGroupInfo newGroup = addGroup(newGroupName, 1);
          @@ -258,8 +258,8 @@ public boolean evaluate() throws Exception {
                   if (regionsB == null) {
                     return false;
                   }
          -        return getTableRegionMap().get(tableNameA).size() >= 1 &&
          -          getTableRegionMap().get(tableNameB).size() >= 1;
          +        return getTableRegionMap().get(tableNameA).size() >= 1
          +            && getTableRegionMap().get(tableNameB).size() >= 1;
                 }
               });
           
          @@ -279,7 +279,7 @@ public boolean evaluate() throws Exception {
           
               // verify tables' not exist in old group
               Set defaultTables =
          -      Sets.newHashSet(ADMIN.listTablesInRSGroup(RSGroupInfo.DEFAULT_GROUP));
          +        Sets.newHashSet(ADMIN.listTablesInRSGroup(RSGroupInfo.DEFAULT_GROUP));
               assertFalse(defaultTables.contains(tableNameA));
               assertFalse(defaultTables.contains(tableNameB));
           
          @@ -382,7 +382,7 @@ public boolean evaluate() throws Exception {
             @Test
             public void testNonExistentTableMove() throws Exception {
               TableName tableName =
          -      TableName.valueOf(TABLE_PREFIX + getNameWithoutIndex(name.getMethodName()));
          +        TableName.valueOf(TABLE_PREFIX + getNameWithoutIndex(name.getMethodName()));
               RSGroupInfo tableGrp = ADMIN.getRSGroup(tableName);
               assertNull(tableGrp);
           
          @@ -415,19 +415,19 @@ public void testRSGroupListDoesNotContainFailedTableCreation() throws Exception
               toggleQuotaCheckAndRestartMiniCluster(true);
               String nsp = "np1";
               NamespaceDescriptor nspDesc =
          -      NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5")
          -        .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
          +        NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5")
          +            .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
               ADMIN.createNamespace(nspDesc);
               assertEquals(3, ADMIN.listNamespaceDescriptors().length);
               ColumnFamilyDescriptor fam1 = ColumnFamilyDescriptorBuilder.of("fam1");
               TableDescriptor tableDescOne = TableDescriptorBuilder
          -      .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"))
          -      .setColumnFamily(fam1).build();
          +        .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"))
          +        .setColumnFamily(fam1).build();
               ADMIN.createTable(tableDescOne);
           
               TableDescriptor tableDescTwo = TableDescriptorBuilder
          -      .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2"))
          -      .setColumnFamily(fam1).build();
          +        .newBuilder(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2"))
          +        .setColumnFamily(fam1).build();
               boolean constraintViolated = false;
           
               try {
          @@ -445,7 +445,7 @@ public void testRSGroupListDoesNotContainFailedTableCreation() throws Exception
               boolean foundTable1 = false;
               for (int i = 0; i < rsGroupInfoList.size(); i++) {
                 Set tables =
          -        Sets.newHashSet(ADMIN.listTablesInRSGroup(rsGroupInfoList.get(i).getName()));
          +          Sets.newHashSet(ADMIN.listTablesInRSGroup(rsGroupInfoList.get(i).getName()));
                 if (tables.contains(tableDescTwo.getTableName())) {
                   foundTable2 = true;
                 }
          @@ -500,8 +500,7 @@ public void testRenameRSGroup() throws Exception {
               TEST_UTIL.createTable(tb1, "tr");
               ADMIN.setRSGroup(Sets.newHashSet(tb1), oldgroup.getName());
               TEST_UTIL.waitFor(1000,
          -      (Waiter.Predicate) () ->
          -        ADMIN.getRSGroup(tb1).getServers().size() == 2);
          +      (Waiter.Predicate) () -> ADMIN.getRSGroup(tb1).getServers().size() == 2);
               oldgroup = ADMIN.getRSGroup(oldgroup.getName());
               assertEquals(2, oldgroup.getServers().size());
               assertEquals(oldgroup.getName(), ADMIN.getRSGroup(tb1).getName());
          @@ -513,13 +512,11 @@ public void testRenameRSGroup() throws Exception {
               TEST_UTIL.createTable(tb2, "ut");
               ADMIN.setRSGroup(Sets.newHashSet(tb2), normal.getName());
               TEST_UTIL.waitFor(1000,
          -      (Waiter.Predicate) () ->
          -        ADMIN.getRSGroup(tb2).getServers().size() == 1);
          +      (Waiter.Predicate) () -> ADMIN.getRSGroup(tb2).getServers().size() == 1);
               normal = ADMIN.getRSGroup(normal.getName());
               assertEquals(1, normal.getServers().size());
               assertEquals(normal.getName(), ADMIN.getRSGroup(tb2).getName());
           
          -
               // Rename rsgroup
               ADMIN.renameRSGroup(oldgroup.getName(), "newgroup");
               Set
          servers = oldgroup.getServers(); @@ -545,43 +542,42 @@ public void testRenameRSGroupConstraints() throws Exception { assertNotNull(oldGroup); assertEquals(2, oldGroup.getServers().size()); - //Add another RSGroup + // Add another RSGroup String anotherRSGroupName = "anotherRSGroup"; RSGroupInfo anotherGroup = addGroup(anotherRSGroupName, 1); anotherGroup = ADMIN.getRSGroup(anotherGroup.getName()); assertNotNull(anotherGroup); assertEquals(1, anotherGroup.getServers().size()); - - //Rename a non existing RSGroup + // Rename a non existing RSGroup try { ADMIN.renameRSGroup("nonExistingRSGroup", "newRSGroup1"); fail("ConstraintException was expected."); - } catch (ConstraintException e){ + } catch (ConstraintException e) { assertTrue(e.getMessage().contains("does not exist")); } - //Rename to existing group + // Rename to existing group try { ADMIN.renameRSGroup(oldGroup.getName(), anotherRSGroupName); fail("ConstraintException was expected."); - } catch (ConstraintException e){ + } catch (ConstraintException e) { assertTrue(e.getMessage().contains("Group already exists")); } - //Rename default RSGroup + // Rename default RSGroup try { ADMIN.renameRSGroup(RSGroupInfo.DEFAULT_GROUP, "newRSGroup2"); fail("ConstraintException was expected."); - } catch (ConstraintException e){ - //Do nothing + } catch (ConstraintException e) { + // Do nothing } - //Rename to default RSGroup + // Rename to default RSGroup try { ADMIN.renameRSGroup(oldGroup.getName(), RSGroupInfo.DEFAULT_GROUP); fail("ConstraintException was expected."); - } catch (ConstraintException e){ + } catch (ConstraintException e) { assertTrue(e.getMessage().contains("Group already exists")); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java index 16c27af94cc4..f14e59ff22a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,7 +67,7 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupsAdmin2.class); + HBaseClassTestRule.forClass(TestRSGroupsAdmin2.class); private static final Logger LOG = LoggerFactory.getLogger(TestRSGroupsAdmin2.class); @@ -121,7 +121,7 @@ public boolean evaluate() throws Exception { // get server which is not a member of new group ServerName tmpTargetServer = null; for (ServerName server : ADMIN.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet()) { + .getLiveServerMetrics().keySet()) { if (!newGroup.containsServer(server.getAddress())) { tmpTargetServer = server; break; @@ -138,15 +138,15 @@ public boolean evaluate() throws Exception { }); // Lets move this region to the new group. - TEST_UTIL.getAdmin() - .move(Bytes.toBytes(RegionInfo.encodeRegionName(Bytes.toBytes(targetRegion))), targetServer); + TEST_UTIL.getAdmin().move( + Bytes.toBytes(RegionInfo.encodeRegionName(Bytes.toBytes(targetRegion))), targetServer); TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return getTableRegionMap().get(tableName) != null && - getTableRegionMap().get(tableName).size() == 6 && - ADMIN.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)) - .getRegionStatesInTransition().size() < 1; + return getTableRegionMap().get(tableName) != null + && getTableRegionMap().get(tableName).size() == 6 + && ADMIN.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)) + .getRegionStatesInTransition().size() < 1; } }); @@ -240,7 +240,7 @@ public void testRemoveServers() throws Exception { fail("Online servers shouldn't have been successfully removed."); } catch (IOException ex) { String exp = - "Server " + targetServer.getAddress() + " is an online server, not allowed to remove."; + "Server " + targetServer.getAddress() + " is an online server, not allowed to remove."; String msg = "Expected '" + exp + "' in exception message: "; assertTrue(msg + " " + ex.getMessage(), ex.getMessage().contains(exp)); } @@ -261,8 +261,8 @@ public void testRemoveServers() throws Exception { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return !MASTER.getServerManager().areDeadServersInProgress() && - CLUSTER.getClusterMetrics().getDeadServerNames().size() == NUM_DEAD_SERVERS; + return !MASTER.getServerManager().areDeadServersInProgress() + && CLUSTER.getClusterMetrics().getDeadServerNames().size() == NUM_DEAD_SERVERS; } }); @@ -270,8 +270,8 @@ public boolean evaluate() throws Exception { ADMIN.removeServersFromRSGroup(Sets.newHashSet(targetServer.getAddress())); fail("Dead servers shouldn't have been successfully removed."); } catch (IOException ex) { - String exp = "Server " + targetServer.getAddress() + " is on the dead servers list," + - " Maybe it will come back again, not allowed to remove."; + String exp = "Server " + targetServer.getAddress() + " is on the dead servers list," + + " Maybe it will come back again, not allowed to remove."; String msg = "Expected '" + exp + "' in exception message: "; assertTrue(msg + " " + ex.getMessage(), ex.getMessage().contains(exp)); } @@ -318,9 +318,9 @@ public boolean evaluate() throws Exception { // get server which is not a member of new group ServerName targetServer = null; for (ServerName server : ADMIN.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().keySet()) { - if (!newGroup.containsServer(server.getAddress()) && - !ADMIN.getRSGroup("master").containsServer(server.getAddress())) { + .getLiveServerMetrics().keySet()) { + if (!newGroup.containsServer(server.getAddress()) + && !ADMIN.getRSGroup("master").containsServer(server.getAddress())) { targetServer = server; break; } @@ -373,11 +373,11 @@ public boolean evaluate() throws Exception { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return getTableRegionMap().get(tableName) != null && - getTableRegionMap().get(tableName).size() == 5 && - getTableServerRegionMap().get(tableName).size() == 1 && - ADMIN.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)) - .getRegionStatesInTransition().size() < 1; + return getTableRegionMap().get(tableName) != null + && getTableRegionMap().get(tableName).size() == 5 + && getTableServerRegionMap().get(tableName).size() == 1 + && ADMIN.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION)) + .getRegionStatesInTransition().size() < 1; } }); @@ -402,12 +402,12 @@ public boolean evaluate() throws Exception { // verify tables' not exist in old group Set defaultTables = - Sets.newHashSet(ADMIN.listTablesInRSGroup(RSGroupInfo.DEFAULT_GROUP)); + Sets.newHashSet(ADMIN.listTablesInRSGroup(RSGroupInfo.DEFAULT_GROUP)); assertFalse(defaultTables.contains(tableName)); // verify tables' exist in new group - Set newGroupTables = Sets - .newHashSet(ADMIN.getConfiguredNamespacesAndTablesInRSGroup(newGroup.getName()).getSecond()); + Set newGroupTables = Sets.newHashSet( + ADMIN.getConfiguredNamespacesAndTablesInRSGroup(newGroup.getName()).getSecond()); assertTrue(newGroupTables.contains(tableName)); // verify that all region still assign on targetServer @@ -440,8 +440,8 @@ public void testMoveServersFromDefaultGroup() throws Exception { // test success case, remove one server from default ,keep at least one server if (defaultGroup.getServers().size() > 1) { Address serverInDefaultGroup = defaultGroup.getServers().iterator().next(); - LOG.info("moving server " + serverInDefaultGroup + " from group default to group " + - fooGroup.getName()); + LOG.info("moving server " + serverInDefaultGroup + " from group default to group " + + fooGroup.getName()); ADMIN.moveServersToRSGroup(Sets.newHashSet(serverInDefaultGroup), fooGroup.getName()); } @@ -498,8 +498,8 @@ public void testFailedMoveBeforeRetryExhaustedWhenMoveServer() throws Exception @Override public boolean evaluate() { if (changed.get()) { - return MASTER.getAssignmentManager().getRegionsOnServer(movedServer).size() == 0 && - !rsn.getRegionLocation().equals(movedServer); + return MASTER.getAssignmentManager().getRegionsOnServer(movedServer).size() == 0 + && !rsn.getRegionLocation().equals(movedServer); } return false; } @@ -507,15 +507,15 @@ public boolean evaluate() { } private Thread recoverRegionStateThread(T owner, Function> getRegions, - RegionStateNode rsn, AtomicBoolean changed) { + RegionStateNode rsn, AtomicBoolean changed) { return new Thread(() -> { LOG.info("thread1 start running, will recover region state"); long current = EnvironmentEdgeManager.currentTime(); // wait until there is only left the region we changed state and recover its state. // wait time is set according to the number of max retries, all except failed regions will be // moved in one retry, and will sleep 1s until next retry. - while (EnvironmentEdgeManager.currentTime() - - current <= RSGroupInfoManagerImpl.DEFAULT_MAX_RETRY_VALUE * 1000) { + while (EnvironmentEdgeManager.currentTime() + - current <= RSGroupInfoManagerImpl.DEFAULT_MAX_RETRY_VALUE * 1000) { List regions = getRegions.apply(owner); LOG.debug("server table region size is:{}", regions.size()); assert regions.size() >= 1; @@ -535,7 +535,7 @@ private Thread recoverRegionStateThread(T owner, Function createTableWithRegionSplitting(RSGroupInfo rsGroupInfo, - int tableRegionCount) throws Exception { + int tableRegionCount) throws Exception { final byte[] familyNameBytes = Bytes.toBytes("f"); // All the regions created below will be assigned to the default group. TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, tableRegionCount); @@ -560,14 +560,14 @@ public boolean evaluate() throws Exception { * @throws IOException if methods called throw */ private Pair - randomlySetOneRegionStateToSplitting(RSGroupInfo newGroup) throws IOException { + randomlySetOneRegionStateToSplitting(RSGroupInfo newGroup) throws IOException { // get target server to move, which should has more than one regions // randomly set a region state to SPLITTING to make move fail return randomlySetRegionState(newGroup, RegionState.State.SPLITTING, tableName); } private Pair randomlySetRegionState(RSGroupInfo groupInfo, - RegionState.State state, TableName... tableNames) throws IOException { + RegionState.State state, TableName... tableNames) throws IOException { Preconditions.checkArgument(tableNames.length == 1 || tableNames.length == 2, "only support one or two tables"); Map>> tableServerRegionMap = getTableServerRegionMap(); @@ -584,9 +584,9 @@ private Pair randomlySetRegionState(RSGroupInfo gro ServerName srcServer = null; for (ServerName server : assignMap.keySet()) { toCorrectRegionName = - assignMap.get(server).size() >= 1 && !groupInfo.containsServer(server.getAddress()) - ? assignMap.get(server).get(0) - : null; + assignMap.get(server).size() >= 1 && !groupInfo.containsServer(server.getAddress()) + ? assignMap.get(server).get(0) + : null; if (toCorrectRegionName != null) { srcServer = server; break; @@ -594,9 +594,9 @@ private Pair randomlySetRegionState(RSGroupInfo gro } assert srcServer != null; RegionInfo toCorrectRegionInfo = TEST_UTIL.getMiniHBaseCluster().getMaster() - .getAssignmentManager().getRegionInfo(Bytes.toBytesBinary(toCorrectRegionName)); + .getAssignmentManager().getRegionInfo(Bytes.toBytesBinary(toCorrectRegionName)); RegionStateNode rsn = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getRegionStateNode(toCorrectRegionInfo); + .getRegionStates().getRegionStateNode(toCorrectRegionInfo); rsn.setState(state); return new Pair<>(srcServer, rsn); } @@ -613,15 +613,15 @@ public void testFailedMoveServersAndRepair() throws Exception { // create table // randomly set a region state to SPLITTING to make move abort Pair gotPair = - createTableWithRegionSplitting(newGroup, ThreadLocalRandom.current().nextInt(8) + 4); + createTableWithRegionSplitting(newGroup, ThreadLocalRandom.current().nextInt(8) + 4); RegionStateNode rsn = gotPair.getSecond(); ServerName srcServer = rsn.getRegionLocation(); // move server to newGroup and check regions try { ADMIN.moveServersToRSGroup(Sets.newHashSet(srcServer.getAddress()), newGroup.getName()); - fail("should get IOException when retry exhausted but there still exists failed moved " + - "regions"); + fail("should get IOException when retry exhausted but there still exists failed moved " + + "regions"); } catch (Exception e) { assertTrue( e.getMessage().contains(gotPair.getSecond().getRegionInfo().getRegionNameAsString())); @@ -658,7 +658,7 @@ public void testFailedMoveServersTablesAndRepair() throws Exception { // randomly set a region state to SPLITTING to make move abort Pair gotPair = - randomlySetRegionState(newGroup, RegionState.State.SPLITTING, table1, table2); + randomlySetRegionState(newGroup, RegionState.State.SPLITTING, table1, table2); RegionStateNode rsn = gotPair.getSecond(); ServerName srcServer = rsn.getRegionLocation(); @@ -666,8 +666,8 @@ public void testFailedMoveServersTablesAndRepair() throws Exception { try { ADMIN.moveServersToRSGroup(Sets.newHashSet(srcServer.getAddress()), newGroup.getName()); ADMIN.setRSGroup(Sets.newHashSet(table2), newGroup.getName()); - fail("should get IOException when retry exhausted but there still exists failed moved " + - "regions"); + fail("should get IOException when retry exhausted but there still exists failed moved " + + "regions"); } catch (Exception e) { assertTrue( e.getMessage().contains(gotPair.getSecond().getRegionInfo().getRegionNameAsString())); @@ -699,7 +699,8 @@ public void testMoveServersToRSGroupPerformance() throws Exception { // All the regions created below will be assigned to the default group. TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, tableRegionCount); TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { - @Override public boolean evaluate() throws Exception { + @Override + public boolean evaluate() throws Exception { List regions = getTableRegionMap().get(tableName); if (regions == null) { return false; @@ -716,10 +717,10 @@ public void testMoveServersToRSGroupPerformance() throws Exception { ADMIN.moveServersToRSGroup(Sets.newHashSet(newGroup.getServers().first()), rsGroup2); long timeTaken = EnvironmentEdgeManager.currentTime() - startTime; String msg = - "Should not take mote than 15000 ms to move a table with 100 regions. Time taken =" - + timeTaken + " ms"; - //This test case is meant to be used for verifying the performance quickly by a developer. - //Moving 100 regions takes much less than 15000 ms. Given 15000 ms so test cases passes + "Should not take mote than 15000 ms to move a table with 100 regions. Time taken =" + + timeTaken + " ms"; + // This test case is meant to be used for verifying the performance quickly by a developer. + // Moving 100 regions takes much less than 15000 ms. Given 15000 ms so test cases passes // on all environment. assertTrue(msg, timeTaken < 15000); LOG.info("Time taken to move a table with 100 region is {} ms", timeTaken); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java index 12b83784b8ab..5a29eed7c586 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBalance.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ public class TestRSGroupsBalance extends TestRSGroupsBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupsBalance.class); + HBaseClassTestRule.forClass(TestRSGroupsBalance.class); protected static final Logger LOG = LoggerFactory.getLogger(TestRSGroupsBalance.class); @@ -126,10 +126,11 @@ public void testGroupDryRunBalance() throws Exception { ServerName first = setupBalanceTest(newGroupName, tableName); - // run the balancer in dry run mode. it should return true, but should not actually move any regions + // run the balancer in dry run mode. it should return true, but should not actually move any + // regions ADMIN.balancerSwitch(true, true); - BalanceResponse response = ADMIN.balanceRSGroup(newGroupName, - BalanceRequest.newBuilder().setDryRun(true).build()); + BalanceResponse response = + ADMIN.balanceRSGroup(newGroupName, BalanceRequest.newBuilder().setDryRun(true).build()); assertTrue(response.isBalancerRan()); assertTrue(response.getMovesCalculated() > 0); assertEquals(0, response.getMovesExecuted()); @@ -141,9 +142,9 @@ private ServerName setupBalanceTest(String newGroupName, TableName tableName) th addGroup(newGroupName, 3); ADMIN.createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()) - .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, newGroupName).build()); + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, newGroupName).build()); final TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); byte[] startKey = Bytes.toBytes("aaaaa"); byte[] endKey = Bytes.toBytes("zzzzz"); ADMIN.createTable(desc, startKey, endKey, 6); @@ -188,15 +189,15 @@ public boolean evaluate() throws Exception { public void testMisplacedRegions() throws Exception { String namespace = TABLE_PREFIX + "_" + getNameWithoutIndex(name.getMethodName()); TEST_UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build()); - final TableName tableName = - TableName.valueOf(namespace, TABLE_PREFIX + "_" + getNameWithoutIndex(name.getMethodName())); + final TableName tableName = TableName.valueOf(namespace, + TABLE_PREFIX + "_" + getNameWithoutIndex(name.getMethodName())); final RSGroupInfo rsGroupInfo = addGroup(getGroupName(name.getMethodName()), 1); TEST_UTIL.createMultiRegionTable(tableName, new byte[] { 'f' }, 15); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); TEST_UTIL.getAdmin().modifyNamespace(NamespaceDescriptor.create(namespace) - .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, rsGroupInfo.getName()).build()); + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, rsGroupInfo.getName()).build()); ADMIN.balancerSwitch(true, true); assertTrue(ADMIN.balanceRSGroup(rsGroupInfo.getName()).isBalancerRan()); @@ -208,13 +209,14 @@ public void testMisplacedRegions() throws Exception { @Override public boolean evaluate() throws Exception { ServerName serverName = - ServerName.valueOf(rsGroupInfo.getServers().iterator().next().toString(), 1); + ServerName.valueOf(rsGroupInfo.getServers().iterator().next().toString(), 1); return ADMIN.getConnection().getAdmin().getRegions(serverName).size() == 15; } }); } - @Test public void testGetRSGroupAssignmentsByTable() throws Exception { + @Test + public void testGetRSGroupAssignmentsByTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TEST_UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY, 10); // disable table @@ -225,7 +227,7 @@ public boolean evaluate() throws Exception { HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); RSGroupInfoManagerImpl gm = (RSGroupInfoManagerImpl) master.getRSGroupInfoManager(); Map>> assignments = - gm.getRSGroupAssignmentsByTable(master.getTableStateManager(), RSGroupInfo.DEFAULT_GROUP); + gm.getRSGroupAssignmentsByTable(master.getTableStateManager(), RSGroupInfo.DEFAULT_GROUP); assertFalse(assignments.containsKey(disableTableName)); assertTrue(assignments.containsKey(tableName)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java index d0521c5ae4f4..7fe097e883c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -102,14 +102,13 @@ public static void setUpTestBeforeClass() throws Exception { } if (conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY) != null) { conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY) + "," + - CPMasterObserver.class.getName()); + conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY) + "," + + CPMasterObserver.class.getName()); } else { conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, CPMasterObserver.class.getName()); } - conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, - NUM_SLAVES_BASE - 1); + conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, NUM_SLAVES_BASE - 1); conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); conf.setInt("hbase.rpc.timeout", 100000); @@ -126,8 +125,8 @@ protected static void initialize() throws Exception { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return MASTER.isInitialized() && - ((RSGroupBasedLoadBalancer) MASTER.getLoadBalancer()).isOnline(); + return MASTER.isInitialized() + && ((RSGroupBasedLoadBalancer) MASTER.getLoadBalancer()).isOnline(); } }); ADMIN.balancerSwitch(false, true); @@ -185,7 +184,7 @@ public boolean evaluate() throws Exception { } protected final RSGroupInfo addGroup(String groupName, int serverCount) - throws IOException, InterruptedException { + throws IOException, InterruptedException { RSGroupInfo defaultInfo = ADMIN.getRSGroup(RSGroupInfo.DEFAULT_GROUP); ADMIN.addRSGroup(groupName); Set
          set = new HashSet<>(); @@ -222,7 +221,7 @@ protected final void removeGroup(String groupName) throws IOException { protected final void deleteTableIfNecessary() throws IOException { for (TableDescriptor desc : TEST_UTIL.getAdmin() - .listTableDescriptors(Pattern.compile(TABLE_PREFIX + ".*"))) { + .listTableDescriptors(Pattern.compile(TABLE_PREFIX + ".*"))) { TEST_UTIL.deleteTable(desc.getTableName()); } } @@ -258,16 +257,17 @@ protected Map> getTableRegionMap() throws IOException { } protected Map>> getTableServerRegionMap() - throws IOException { + throws IOException { Map>> map = Maps.newTreeMap(); Admin admin = TEST_UTIL.getAdmin(); ClusterMetrics metrics = - admin.getClusterMetrics(EnumSet.of(ClusterMetrics.Option.SERVERS_NAME)); + admin.getClusterMetrics(EnumSet.of(ClusterMetrics.Option.SERVERS_NAME)); for (ServerName serverName : metrics.getServersName()) { for (RegionInfo region : admin.getRegions(serverName)) { TableName tableName = region.getTable(); map.computeIfAbsent(tableName, k -> new TreeMap<>()) - .computeIfAbsent(serverName, k -> new ArrayList<>()).add(region.getRegionNameAsString()); + .computeIfAbsent(serverName, k -> new ArrayList<>()) + .add(region.getRegionNameAsString()); } } return map; @@ -287,8 +287,8 @@ protected int getNumServers() throws IOException { } protected final String getGroupName(String baseName) { - return GROUP_PREFIX + "_" + getNameWithoutIndex(baseName) + "_" + - ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE); + return GROUP_PREFIX + "_" + getNameWithoutIndex(baseName) + "_" + + ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE); } /** @@ -297,8 +297,8 @@ protected final String getGroupName(String baseName) { */ protected final ServerName getServerName(Address addr) { return TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().stream() - .map(t -> t.getRegionServer().getServerName()).filter(sn -> sn.getAddress().equals(addr)) - .findFirst().get(); + .map(t -> t.getRegionServer().getServerName()).filter(sn -> sn.getAddress().equals(addr)) + .findFirst().get(); } protected final void toggleQuotaCheckAndRestartMiniCluster(boolean enable) throws Exception { @@ -387,73 +387,73 @@ public Optional getMasterObserver() { @Override public void preMoveServersAndTables(final ObserverContext ctx, - Set
          servers, Set tables, String targetGroup) throws IOException { + Set
          servers, Set tables, String targetGroup) throws IOException { preMoveServersAndTables = true; } @Override public void postMoveServersAndTables(final ObserverContext ctx, - Set
          servers, Set tables, String targetGroup) throws IOException { + Set
          servers, Set tables, String targetGroup) throws IOException { postMoveServersAndTables = true; } @Override public void preRemoveServers(final ObserverContext ctx, - Set
          servers) throws IOException { + Set
          servers) throws IOException { preRemoveServersCalled = true; } @Override public void postRemoveServers(final ObserverContext ctx, - Set
          servers) throws IOException { + Set
          servers) throws IOException { postRemoveServersCalled = true; } @Override public void preRemoveRSGroup(final ObserverContext ctx, - String name) throws IOException { + String name) throws IOException { preRemoveRSGroupCalled = true; } @Override public void postRemoveRSGroup(final ObserverContext ctx, - String name) throws IOException { + String name) throws IOException { postRemoveRSGroupCalled = true; } @Override public void preAddRSGroup(final ObserverContext ctx, String name) - throws IOException { + throws IOException { preAddRSGroupCalled = true; } @Override public void postAddRSGroup(final ObserverContext ctx, String name) - throws IOException { + throws IOException { postAddRSGroupCalled = true; } @Override public void preMoveTables(final ObserverContext ctx, - Set tables, String targetGroup) throws IOException { + Set tables, String targetGroup) throws IOException { preMoveTablesCalled = true; } @Override public void postMoveTables(final ObserverContext ctx, - Set tables, String targetGroup) throws IOException { + Set tables, String targetGroup) throws IOException { postMoveTablesCalled = true; } @Override public void preMoveServers(final ObserverContext ctx, - Set
          servers, String targetGroup) throws IOException { + Set
          servers, String targetGroup) throws IOException { preMoveServersCalled = true; } @Override public void postMoveServers(final ObserverContext ctx, - Set
          servers, String targetGroup) throws IOException { + Set
          servers, String targetGroup) throws IOException { postMoveServersCalled = true; } @@ -471,97 +471,97 @@ public void postBalanceRSGroup(final ObserverContext ctx, - final String groupName) throws IOException { + final String groupName) throws IOException { preGetRSGroupInfoCalled = true; } @Override public void postGetRSGroupInfo(final ObserverContext ctx, - final String groupName) throws IOException { + final String groupName) throws IOException { postGetRSGroupInfoCalled = true; } @Override public void preGetRSGroupInfoOfTable(final ObserverContext ctx, - final TableName tableName) throws IOException { + final TableName tableName) throws IOException { preGetRSGroupInfoOfTableCalled = true; } @Override public void postGetRSGroupInfoOfTable(final ObserverContext ctx, - final TableName tableName) throws IOException { + final TableName tableName) throws IOException { postGetRSGroupInfoOfTableCalled = true; } @Override public void preListRSGroups(final ObserverContext ctx) - throws IOException { + throws IOException { preListRSGroupsCalled = true; } @Override public void postListRSGroups(final ObserverContext ctx) - throws IOException { + throws IOException { postListRSGroupsCalled = true; } @Override public void preGetRSGroupInfoOfServer(final ObserverContext ctx, - final Address server) throws IOException { + final Address server) throws IOException { preGetRSGroupInfoOfServerCalled = true; } @Override public void postGetRSGroupInfoOfServer(final ObserverContext ctx, - final Address server) throws IOException { + final Address server) throws IOException { postGetRSGroupInfoOfServerCalled = true; } @Override public void preListTablesInRSGroup(ObserverContext ctx, - String groupName) throws IOException { + String groupName) throws IOException { preListTablesInRSGroupCalled = true; } @Override public void postListTablesInRSGroup(ObserverContext ctx, - String groupName) throws IOException { + String groupName) throws IOException { postListTablesInRSGroupCalled = true; } @Override public void preGetConfiguredNamespacesAndTablesInRSGroup( - ObserverContext ctx, String groupName) throws IOException { + ObserverContext ctx, String groupName) throws IOException { preGetConfiguredNamespacesAndTablesInRSGroupCalled = true; } @Override public void postGetConfiguredNamespacesAndTablesInRSGroup( - ObserverContext ctx, String groupName) throws IOException { + ObserverContext ctx, String groupName) throws IOException { postGetConfiguredNamespacesAndTablesInRSGroupCalled = true; } @Override public void preRenameRSGroup(ObserverContext ctx, String oldName, - String newName) throws IOException { + String newName) throws IOException { preRenameRSGroup = true; } @Override public void postRenameRSGroup(ObserverContext ctx, String oldName, - String newName) throws IOException { + String newName) throws IOException { postRenameRSGroup = true; } @Override public void preUpdateRSGroupConfig(final ObserverContext ctx, - final String groupName, final Map configuration) throws IOException { + final String groupName, final Map configuration) throws IOException { preUpdateRSGroupConfig = true; } @Override public void postUpdateRSGroupConfig(final ObserverContext ctx, - final String groupName, final Map configuration) throws IOException { + final String groupName, final Map configuration) throws IOException { postUpdateRSGroupConfig = true; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java index 0d8d1193dabc..21b511b5505d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ public class TestRSGroupsBasics extends TestRSGroupsBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupsBasics.class); + HBaseClassTestRule.forClass(TestRSGroupsBasics.class); protected static final Logger LOG = LoggerFactory.getLogger(TestRSGroupsBasics.class); @@ -85,8 +85,8 @@ public void testBasicStartUp() throws IOException { assertEquals(NUM_SLAVES_BASE, defaultInfo.getServers().size()); // Assignment of meta and rsgroup regions. int count = MASTER.getAssignmentManager().getRegionStates().getRegionAssignments().size(); - LOG.info("regions assignments are" + - MASTER.getAssignmentManager().getRegionStates().getRegionAssignments().toString()); + LOG.info("regions assignments are" + + MASTER.getAssignmentManager().getRegionStates().getRegionAssignments().toString()); // 2 (meta and rsgroup) assertEquals(2, count); } @@ -119,9 +119,9 @@ public void testNamespaceCreateAndAssign() throws Exception { final TableName tableName = TableName.valueOf(nsName, TABLE_PREFIX + "_testCreateAndAssign"); RSGroupInfo appInfo = addGroup("appInfo", 1); ADMIN.createNamespace(NamespaceDescriptor.create(nsName) - .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "appInfo").build()); + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "appInfo").build()); final TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); ADMIN.createTable(desc); // wait for created table to be assigned TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @@ -140,9 +140,9 @@ public void testDefaultNamespaceCreateAndAssign() throws Exception { LOG.info("testDefaultNamespaceCreateAndAssign"); String tableName = TABLE_PREFIX + "_testCreateAndAssign"; ADMIN.modifyNamespace(NamespaceDescriptor.create("default") - .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "default").build()); + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "default").build()); final TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); ADMIN.createTable(desc); // wait for created table to be assigned TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @@ -177,7 +177,7 @@ public void testClearDeadServers() throws Exception { // move region servers from default group to new group final int serverCountToMoveToNewGroup = 3; final RSGroupInfo newGroup = - addGroup(getGroupName(name.getMethodName()), serverCountToMoveToNewGroup); + addGroup(getGroupName(name.getMethodName()), serverCountToMoveToNewGroup); // get the existing dead servers NUM_DEAD_SERVERS = CLUSTER.getClusterMetrics().getDeadServerNames().size(); @@ -196,8 +196,8 @@ public void testClearDeadServers() throws Exception { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return CLUSTER.getClusterMetrics().getDeadServerNames().size() == NUM_DEAD_SERVERS && - !MASTER.getServerManager().areDeadServersInProgress(); + return CLUSTER.getClusterMetrics().getDeadServerNames().size() == NUM_DEAD_SERVERS + && !MASTER.getServerManager().areDeadServersInProgress(); } }); assertFalse(CLUSTER.getClusterMetrics().getLiveServerMetrics().containsKey(serverToStop)); @@ -242,7 +242,7 @@ public boolean evaluate() throws Exception { }); Set
          ServersInDeadServerGroup = - ADMIN.getRSGroup(deadServerGroup.getName()).getServers(); + ADMIN.getRSGroup(deadServerGroup.getName()).getServers(); assertEquals(serverCountToMoveToDeadServerGroup, ServersInDeadServerGroup.size()); assertTrue(ServersInDeadServerGroup.contains(serverToStop.getAddress())); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java index 403045f22a64..c72bedcb8be6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ public class TestRSGroupsCPHookCalled extends TestRSGroupsBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupsCPHookCalled.class); + HBaseClassTestRule.forClass(TestRSGroupsCPHookCalled.class); @BeforeClass public static void setUp() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java index 9b78d4c406f9..5368b6a9c197 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsFallback.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Collections; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -91,9 +90,8 @@ public void testFallback() throws Exception { String groupName = getGroupName(name.getMethodName()); addGroup(groupName, 1); TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).build()) - .setRegionServerGroup(groupName) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).build()) + .setRegionServerGroup(groupName).build(); ADMIN.createTable(desc, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); // server of test group crash, regions move to default group @@ -111,7 +109,7 @@ public void testFallback() throws Exception { // add a new server to test group, regions move back JVMClusterUtil.RegionServerThread t = - TEST_UTIL.getMiniHBaseCluster().startRegionServerAndWait(60000); + TEST_UTIL.getMiniHBaseCluster().startRegionServerAndWait(60000); ADMIN.moveServersToRSGroup( Collections.singleton(t.getRegionServer().getServerName().getAddress()), groupName); assertTrue(MASTER.balance().isBalancerRan()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java index 76c15692379b..dade4d67c45c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,7 +66,7 @@ public class TestRSGroupsKillRS extends TestRSGroupsBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupsKillRS.class); + HBaseClassTestRule.forClass(TestRSGroupsKillRS.class); private static final Logger LOG = LoggerFactory.getLogger(TestRSGroupsKillRS.class); @@ -76,7 +76,7 @@ public static void setUp() throws Exception { // processed which causes dead lock. TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); TEST_UTIL.getConfiguration() - .setFloat(MetaRWQueueRpcExecutor.META_CALL_QUEUE_READ_SHARE_CONF_KEY, 0.5f); + .setFloat(MetaRWQueueRpcExecutor.META_CALL_QUEUE_READ_SHARE_CONF_KEY, 0.5f); setUpTestBeforeClass(); } @@ -99,11 +99,11 @@ public void afterMethod() throws Exception { public void testKillRS() throws Exception { RSGroupInfo appInfo = addGroup("appInfo", 1); final TableName tableName = - TableName.valueOf(TABLE_PREFIX + "_ns", getNameWithoutIndex(name.getMethodName())); + TableName.valueOf(TABLE_PREFIX + "_ns", getNameWithoutIndex(name.getMethodName())); ADMIN.createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()) - .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, appInfo.getName()).build()); + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, appInfo.getName()).build()); final TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).build(); ADMIN.createTable(desc); // wait for created table to be assigned TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @@ -181,9 +181,9 @@ public void testKillAllRSInGroup() throws Exception { assertEquals(2, servers.size()); LOG.debug("group servers {}", servers); for (RegionInfo tr : MASTER.getAssignmentManager().getRegionStates() - .getRegionsOfTable(tableName)) { + .getRegionsOfTable(tableName)) { assertTrue(servers.contains(MASTER.getAssignmentManager().getRegionStates() - .getRegionAssignments().get(tr).getAddress())); + .getRegionAssignments().get(tr).getAddress())); } // Move a region, to ensure there exists a region whose 'lastHost' is in my_group @@ -195,7 +195,7 @@ public void testKillAllRSInGroup() throws Exception { } assertEquals(2, gsn.size()); for (Map.Entry entry : MASTER.getAssignmentManager().getRegionStates() - .getRegionAssignments().entrySet()) { + .getRegionAssignments().entrySet()) { if (entry.getKey().getTable().equals(tableName)) { LOG.debug("move region {} from {} to {}", entry.getKey().getRegionNameAsString(), entry.getValue(), gsn.get(1 - gsn.indexOf(entry.getValue()))); @@ -224,7 +224,7 @@ public void testKillAllRSInGroup() throws Exception { // regionserver(from the 'default' group) to my_group, // and then check if all table regions are online for (JVMClusterUtil.RegionServerThread rst : TEST_UTIL.getMiniHBaseCluster() - .getLiveRegionServerThreads()) { + .getLiveRegionServerThreads()) { if (rst.getRegionServer().getServerName().getAddress().equals(gsn.get(0).getAddress())) { TEST_UTIL.getMiniHBaseCluster().stopRegionServer(rst.getRegionServer().getServerName()); break; @@ -251,7 +251,7 @@ public void testLowerMetaGroupVersion() throws Exception { toAddTables.add(TableName.META_TABLE_NAME); ADMIN.setRSGroup(toAddTables, groupName); assertTrue(ADMIN.getConfiguredNamespacesAndTablesInRSGroup(groupName).getSecond() - .contains(TableName.META_TABLE_NAME)); + .contains(TableName.META_TABLE_NAME)); // restart the regionserver in meta_group, and lower its version String originVersion = ""; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java index 6ed2adecd14e..b87b4a99f30c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,7 +62,7 @@ public class TestRSGroupsOfflineMode extends TestRSGroupsBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRSGroupsOfflineMode.class); + HBaseClassTestRule.forClass(TestRSGroupsOfflineMode.class); private static final Logger LOG = LoggerFactory.getLogger(TestRSGroupsOfflineMode.class); private static HMaster master; @@ -79,8 +79,8 @@ public static void setUp() throws Exception { TEST_UTIL = new HBaseTestingUtil(); RSGroupUtil.enableRSGroup(TEST_UTIL.getConfiguration()); TEST_UTIL.getConfiguration().set(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, "1"); - StartTestingClusterOption option = - StartTestingClusterOption.builder().numMasters(2).numRegionServers(3).numDataNodes(3).build(); + StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(2) + .numRegionServers(3).numDataNodes(3).build(); TEST_UTIL.startMiniCluster(option); cluster = TEST_UTIL.getHBaseCluster(); master = ((SingleProcessHBaseCluster) cluster).getMaster(); @@ -90,9 +90,9 @@ public static void setUp() throws Exception { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return master.isInitialized() && - ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline() && - master.getServerManager().getOnlineServersList().size() >= 3; + return master.isInitialized() + && ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline() + && master.getServerManager().getOnlineServersList().size() >= 3; } }); } @@ -114,7 +114,7 @@ public void testOffline() throws Exception, InterruptedException { Admin admin = TEST_UTIL.getAdmin(); admin.addRSGroup(newGroup); if (master.getAssignmentManager().getRegionStates().getRegionAssignments() - .containsValue(failoverRS.getServerName())) { + .containsValue(failoverRS.getServerName())) { for (RegionInfo regionInfo : hbaseAdmin.getRegions(failoverRS.getServerName())) { hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), failoverRS.getServerName()); } @@ -123,7 +123,7 @@ public void testOffline() throws Exception, InterruptedException { @Override public boolean evaluate() throws Exception { return !master.getServerManager().getLoad(failoverRS.getServerName()).getRegionMetrics() - .isEmpty(); + .isEmpty(); } }); } @@ -133,8 +133,8 @@ public boolean evaluate() throws Exception { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return groupRS.getNumberOfOnlineRegions() < 1 && - master.getAssignmentManager().getRegionStates().getRegionsInTransitionCount() < 1; + return groupRS.getNumberOfOnlineRegions() < 1 + && master.getAssignmentManager().getRegionStates().getRegionsInTransitionCount() < 1; } }); // Move table to group and wait. @@ -154,16 +154,16 @@ public boolean evaluate() throws Exception { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return TEST_UTIL.getHBaseCluster().getMaster() != null && - TEST_UTIL.getHBaseCluster().getMaster().isActiveMaster() && - TEST_UTIL.getHBaseCluster().getMaster().isInitialized() && - TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() <= 3; + return TEST_UTIL.getHBaseCluster().getMaster() != null + && TEST_UTIL.getHBaseCluster().getMaster().isActiveMaster() + && TEST_UTIL.getHBaseCluster().getMaster().isInitialized() && TEST_UTIL + .getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() <= 3; } }); // Get groupInfoManager from the new active master. RSGroupInfoManager groupMgr = - ((SingleProcessHBaseCluster) cluster).getMaster().getRSGroupInfoManager(); + ((SingleProcessHBaseCluster) cluster).getMaster().getRSGroupInfoManager(); // Make sure balancer is in offline mode, since this is what we're testing. assertFalse(groupMgr.isOnline()); // Kill final regionserver to see the failover happens for all tables except GROUP table since diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java index 5649242e3be7..67d46f6f224a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUpdateRSGroupConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUpdateRSGroupConfiguration.java index 3bc6c97aa68e..0569e2553eed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUpdateRSGroupConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUpdateRSGroupConfiguration.java @@ -21,7 +21,6 @@ import static org.junit.Assert.fail; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -36,13 +35,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestUpdateRSGroupConfiguration extends TestRSGroupsBase { protected static final Logger LOG = LoggerFactory.getLogger(TestUpdateRSGroupConfiguration.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestUpdateRSGroupConfiguration.class); + HBaseClassTestRule.forClass(TestUpdateRSGroupConfiguration.class); private static final String TEST_GROUP = "test"; private static final String TEST2_GROUP = "test2"; @@ -99,20 +98,19 @@ public void testCustomOnlineConfigChangeInRSGroup() throws Exception { // Check the configuration of the RegionServer in test rsgroup, should be update Configuration regionServerConfiguration = - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().stream() - .map(JVMClusterUtil.RegionServerThread::getRegionServer) - .filter(regionServer -> - (regionServer.getServerName().getAddress().equals(testRSGroup.getServers().first()))) - .collect(Collectors.toList()).get(0).getConfiguration(); + TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().stream() + .map(JVMClusterUtil.RegionServerThread::getRegionServer) + .filter(regionServer -> (regionServer.getServerName().getAddress() + .equals(testRSGroup.getServers().first()))) + .collect(Collectors.toList()).get(0).getConfiguration(); int custom = regionServerConfiguration.getInt("hbase.custom.config", 0); assertEquals(1000, custom); // Check the configuration of the RegionServer in test2 rsgroup, should not be update - regionServerConfiguration = - TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().stream() - .map(JVMClusterUtil.RegionServerThread::getRegionServer) - .filter(regionServer -> - (regionServer.getServerName().getAddress().equals(test2RSGroup.getServers().first()))) + regionServerConfiguration = TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads() + .stream().map(JVMClusterUtil.RegionServerThread::getRegionServer) + .filter(regionServer -> (regionServer.getServerName().getAddress() + .equals(test2RSGroup.getServers().first()))) .collect(Collectors.toList()).get(0).getConfiguration(); custom = regionServerConfiguration.getInt("hbase.custom.config", 0); assertEquals(0, custom); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index 44a42f155176..2b9840c4e0ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -140,7 +140,7 @@ public List listTableDescriptors(boolean includeSysTables) thro } public List listTableDescriptors(Pattern pattern, boolean includeSysTables) - throws IOException { + throws IOException { return admin.listTableDescriptors(pattern, includeSysTables); } @@ -153,12 +153,12 @@ public TableName[] listTableNames(Pattern pattern, boolean includeSysTables) thr } public TableDescriptor getDescriptor(TableName tableName) - throws TableNotFoundException, IOException { + throws TableNotFoundException, IOException { return admin.getDescriptor(tableName); } public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) - throws IOException { + throws IOException { admin.createTable(desc, startKey, endKey, numRegions); } @@ -167,7 +167,7 @@ public Future createTableAsync(TableDescriptor desc) throws IOException { } public Future createTableAsync(TableDescriptor desc, byte[][] splitKeys) - throws IOException { + throws IOException { return admin.createTableAsync(desc, splitKeys); } @@ -176,7 +176,7 @@ public Future deleteTableAsync(TableName tableName) throws IOException { } public Future truncateTableAsync(TableName tableName, boolean preserveSplits) - throws IOException { + throws IOException { return admin.truncateTableAsync(tableName, preserveSplits); } @@ -201,17 +201,17 @@ public boolean isTableAvailable(TableName tableName) throws IOException { } public Future addColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException { + throws IOException { return admin.addColumnFamilyAsync(tableName, columnFamily); } public Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) - throws IOException { + throws IOException { return admin.deleteColumnFamilyAsync(tableName, columnFamily); } public Future modifyColumnFamilyAsync(TableName tableName, - ColumnFamilyDescriptor columnFamily) throws IOException { + ColumnFamilyDescriptor columnFamily) throws IOException { return admin.modifyColumnFamilyAsync(tableName, columnFamily); } @@ -256,12 +256,12 @@ public void compactRegion(byte[] regionName, byte[] columnFamily) throws IOExcep } public void compact(TableName tableName, CompactType compactType) - throws IOException, InterruptedException { + throws IOException, InterruptedException { admin.compact(tableName, compactType); } public void compact(TableName tableName, byte[] columnFamily, CompactType compactType) - throws IOException, InterruptedException { + throws IOException, InterruptedException { admin.compact(tableName, columnFamily, compactType); } @@ -282,17 +282,17 @@ public void majorCompactRegion(byte[] regionName, byte[] columnFamily) throws IO } public void majorCompact(TableName tableName, CompactType compactType) - throws IOException, InterruptedException { + throws IOException, InterruptedException { admin.majorCompact(tableName, compactType); } public void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType) - throws IOException, InterruptedException { + throws IOException, InterruptedException { admin.majorCompact(tableName, columnFamily, compactType); } public Map compactionSwitch(boolean switchState, - List serverNamesList) throws IOException { + List serverNamesList) throws IOException { return admin.compactionSwitch(switchState, serverNamesList); } @@ -378,7 +378,7 @@ public boolean isCleanerChoreEnabled() throws IOException { } public Future mergeRegionsAsync(byte[][] nameofRegionsToMerge, boolean forcible) - throws IOException { + throws IOException { return admin.mergeRegionsAsync(nameofRegionsToMerge, forcible); } @@ -427,7 +427,7 @@ public List getRegionMetrics(ServerName serverName) throws IOExce } public List getRegionMetrics(ServerName serverName, TableName tableName) - throws IOException { + throws IOException { return admin.getRegionMetrics(serverName, tableName); } @@ -448,7 +448,7 @@ public Future deleteNamespaceAsync(String name) throws IOException { } public NamespaceDescriptor getNamespaceDescriptor(String name) - throws NamespaceNotFoundException, IOException { + throws NamespaceNotFoundException, IOException { return admin.getNamespaceDescriptor(name); } @@ -481,7 +481,7 @@ public List listTableDescriptors(List tableNames) th } public Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) - throws IOException { + throws IOException { return admin.abortProcedureAsync(procId, mayInterruptIfRunning); } @@ -502,7 +502,7 @@ public CompactionState getCompactionState(TableName tableName) throws IOExceptio } public CompactionState getCompactionState(TableName tableName, CompactType compactType) - throws IOException { + throws IOException { return admin.getCompactionState(tableName, compactType); } @@ -519,17 +519,17 @@ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws I } public void snapshot(SnapshotDescription snapshot) - throws IOException, SnapshotCreationException, IllegalArgumentException { + throws IOException, SnapshotCreationException, IllegalArgumentException { admin.snapshot(snapshot); } public Future snapshotAsync(SnapshotDescription snapshot) - throws IOException, SnapshotCreationException { + throws IOException, SnapshotCreationException { return admin.snapshotAsync(snapshot); } public boolean isSnapshotFinished(SnapshotDescription snapshot) - throws IOException, HBaseSnapshotException, UnknownSnapshotException { + throws IOException, HBaseSnapshotException, UnknownSnapshotException { return admin.isSnapshotFinished(snapshot); } @@ -538,28 +538,28 @@ public void restoreSnapshot(String snapshotName) throws IOException, RestoreSnap } public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl) - throws IOException, RestoreSnapshotException { + throws IOException, RestoreSnapshotException { admin.restoreSnapshot(snapshotName, takeFailSafeSnapshot, restoreAcl); } public Future cloneSnapshotAsync(String snapshotName, TableName tableName, - boolean restoreAcl, String customSFT) - throws IOException, TableExistsException, RestoreSnapshotException { + boolean restoreAcl, String customSFT) + throws IOException, TableExistsException, RestoreSnapshotException { return admin.cloneSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT); } public void execProcedure(String signature, String instance, Map props) - throws IOException { + throws IOException { admin.execProcedure(signature, instance, props); } public byte[] execProcedureWithReturn(String signature, String instance, - Map props) throws IOException { + Map props) throws IOException { return admin.execProcedureWithReturn(signature, instance, props); } public boolean isProcedureFinished(String signature, String instance, Map props) - throws IOException { + throws IOException { return admin.isProcedureFinished(signature, instance, props); } @@ -572,7 +572,7 @@ public List listSnapshots(Pattern pattern) throws IOExcepti } public List listTableSnapshots(Pattern tableNamePattern, - Pattern snapshotNamePattern) throws IOException { + Pattern snapshotNamePattern) throws IOException { return admin.listTableSnapshots(tableNamePattern, snapshotNamePattern); } @@ -585,7 +585,7 @@ public void deleteSnapshots(Pattern pattern) throws IOException { } public void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) - throws IOException { + throws IOException { admin.deleteTableSnapshots(tableNamePattern, snapshotNamePattern); } @@ -638,7 +638,7 @@ public boolean isMergeEnabled() throws IOException { } public Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig, - boolean enabled) throws IOException { + boolean enabled) throws IOException { return admin.addReplicationPeerAsync(peerId, peerConfig, enabled); } @@ -659,7 +659,7 @@ public ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws IOEx } public Future updateReplicationPeerConfigAsync(String peerId, - ReplicationPeerConfig peerConfig) throws IOException { + ReplicationPeerConfig peerConfig) throws IOException { return admin.updateReplicationPeerConfigAsync(peerId, peerConfig); } @@ -672,12 +672,12 @@ public List listReplicationPeers(Pattern pattern) th } public Future transitReplicationPeerSyncReplicationStateAsync(String peerId, - SyncReplicationState state) throws IOException { + SyncReplicationState state) throws IOException { return admin.transitReplicationPeerSyncReplicationStateAsync(peerId, state); } public void decommissionRegionServers(List servers, boolean offload) - throws IOException { + throws IOException { admin.decommissionRegionServers(servers, offload); } @@ -686,7 +686,7 @@ public List listDecommissionedRegionServers() throws IOException { } public void recommissionRegionServer(ServerName server, List encodedRegionNames) - throws IOException { + throws IOException { admin.recommissionRegionServer(server, encodedRegionNames); } @@ -703,7 +703,7 @@ public void disableTableReplication(TableName tableName) throws IOException { } public void clearCompactionQueues(ServerName serverName, Set queues) - throws IOException, InterruptedException { + throws IOException, InterruptedException { admin.clearCompactionQueues(serverName, queues); } @@ -712,7 +712,7 @@ public List clearDeadServers(List servers) throws IOExce } public void cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits) - throws IOException { + throws IOException { admin.cloneTableSchema(tableName, newTableName, preserveSplits); } @@ -733,7 +733,7 @@ public Map getSpaceQuotaTableSizes() throws IOException { } public Map - getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException { + getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException { return admin.getRegionServerSpaceQuotaSnapshots(serverName); } @@ -742,12 +742,12 @@ public SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(String namespace) thr } public SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(TableName tableName) - throws IOException { + throws IOException { return admin.getCurrentSpaceQuotaSnapshot(tableName); } public void grant(UserPermission userPermission, boolean mergeExistingPermissions) - throws IOException { + throws IOException { admin.grant(userPermission, mergeExistingPermissions); } @@ -756,12 +756,12 @@ public void revoke(UserPermission userPermission) throws IOException { } public List - getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) throws IOException { + getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) throws IOException { return admin.getUserPermissions(getUserPermissionsRequest); } public List hasUserPermissions(String userName, List permissions) - throws IOException { + throws IOException { return admin.hasUserPermissions(userName, permissions); } @@ -801,7 +801,7 @@ public List listTablesInRSGroup(String groupName) throws IOException @Override public Pair, List> - getConfiguredNamespacesAndTablesInRSGroup(String groupName) throws IOException { + getConfiguredNamespacesAndTablesInRSGroup(String groupName) throws IOException { return admin.getConfiguredNamespacesAndTablesInRSGroup(groupName); } @@ -825,7 +825,8 @@ public void setRSGroup(Set tables, String groupName) throws IOExcepti verify(); } - public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { + public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) + throws IOException { return admin.balanceRSGroup(groupName, request); } @@ -844,7 +845,7 @@ public void updateRSGroupConfig(String groupName, Map configurat @Override public List getLogEntries(Set serverNames, String logType, - ServerType serverType, int limit, Map filterParams) throws IOException { + ServerType serverType, int limit, Map filterParams) throws IOException { return admin.getLogEntries(serverNames, logType, serverType, limit, filterParams); } @@ -864,7 +865,7 @@ private void verify() throws IOException { lives.remove(sn.getAddress()); } try (Table table = conn.getTable(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME); - ResultScanner scanner = table.getScanner(new Scan())) { + ResultScanner scanner = table.getScanner(new Scan())) { for (;;) { Result result = scanner.next(); if (result == null) { @@ -897,9 +898,9 @@ private void verify() throws IOException { if (data.length > 0) { ProtobufUtil.expectPBMagicPrefix(data); ByteArrayInputStream bis = - new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length); + new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length); RSGroupInfo rsGroupInfo = - ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)); + ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)); zList.add(RSGroupUtil.fillTables(rsGroupInfo, tds)); } } @@ -924,13 +925,13 @@ public List clearSlowLogResponses(Set serverNames) throws I @Override public Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] family, - String dstSFT) throws IOException { + String dstSFT) throws IOException { return admin.modifyColumnFamilyStoreFileTrackerAsync(tableName, family, dstSFT); } @Override public Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT) - throws IOException { + throws IOException { return admin.modifyTableStoreFileTrackerAsync(tableName, dstSFT); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HadoopSecurityEnabledUserProviderForTesting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HadoopSecurityEnabledUserProviderForTesting.java index ad5de4f72b95..2f61449ec7e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HadoopSecurityEnabledUserProviderForTesting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HadoopSecurityEnabledUserProviderForTesting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java index ddeea8c79592..ac7bbfc916bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -100,8 +100,8 @@ public class TestSecureIPC { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final File KEYTAB_FILE = new File( - TEST_UTIL.getDataTestDir("keytab").toUri().getPath()); + private static final File KEYTAB_FILE = + new File(TEST_UTIL.getDataTestDir("keytab").toUri().getPath()); private static MiniKdc KDC; private static String HOST = "localhost"; @@ -119,10 +119,10 @@ public class TestSecureIPC { @Parameters(name = "{index}: rpcClientImpl={0}, rpcServerImpl={1}") public static Collection parameters() { List params = new ArrayList<>(); - List rpcClientImpls = Arrays.asList( - BlockingRpcClient.class.getName(), NettyRpcClient.class.getName()); - List rpcServerImpls = Arrays.asList( - SimpleRpcServer.class.getName(), NettyRpcServer.class.getName()); + List rpcClientImpls = + Arrays.asList(BlockingRpcClient.class.getName(), NettyRpcClient.class.getName()); + List rpcServerImpls = + Arrays.asList(SimpleRpcServer.class.getName(), NettyRpcServer.class.getName()); for (String rpcClientImpl : rpcClientImpls) { for (String rpcServerImpl : rpcServerImpls) { params.add(new Object[] { rpcClientImpl, rpcServerImpl }); @@ -161,8 +161,7 @@ public void setUpTest() throws Exception { clientConf = getSecuredConfiguration(); clientConf.set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY, rpcClientImpl); serverConf = getSecuredConfiguration(); - serverConf.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, - rpcServerImpl); + serverConf.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, rpcServerImpl); } @Test @@ -212,39 +211,38 @@ public void testRpcCallWithEnabledKerberosSaslAuth_NoCanonicalHostname() throws } private static void enableCanonicalHostnameTesting(Configuration conf, String canonicalHostname) { - conf.setClass(SELECTOR_KEY, - CanonicalHostnameTestingAuthenticationProviderSelector.class, + conf.setClass(SELECTOR_KEY, CanonicalHostnameTestingAuthenticationProviderSelector.class, AuthenticationProviderSelector.class); conf.set(CanonicalHostnameTestingAuthenticationProviderSelector.CANONICAL_HOST_NAME_KEY, canonicalHostname); } - public static class CanonicalHostnameTestingAuthenticationProviderSelector extends - BuiltInProviderSelector { + public static class CanonicalHostnameTestingAuthenticationProviderSelector + extends BuiltInProviderSelector { private static final String CANONICAL_HOST_NAME_KEY = - "CanonicalHostnameTestingAuthenticationProviderSelector.canonicalHostName"; + "CanonicalHostnameTestingAuthenticationProviderSelector.canonicalHostName"; @Override - public Pair> selectProvider( - String clusterId, User user) { + public Pair> + selectProvider(String clusterId, User user) { final Pair> pair = - super.selectProvider(clusterId, user); + super.selectProvider(clusterId, user); pair.setFirst(createCanonicalHostNameTestingProvider(pair.getFirst())); return pair; } - SaslClientAuthenticationProvider createCanonicalHostNameTestingProvider( - SaslClientAuthenticationProvider delegate) { + SaslClientAuthenticationProvider + createCanonicalHostNameTestingProvider(SaslClientAuthenticationProvider delegate) { return new SaslClientAuthenticationProvider() { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, - boolean fallbackAllowed, Map saslProps) throws IOException { - final String s = - conf.get(CANONICAL_HOST_NAME_KEY); + SecurityInfo securityInfo, Token token, + boolean fallbackAllowed, Map saslProps) throws IOException { + final String s = conf.get(CANONICAL_HOST_NAME_KEY); if (s != null) { try { - final Field canonicalHostName = InetAddress.class.getDeclaredField("canonicalHostName"); + final Field canonicalHostName = + InetAddress.class.getDeclaredField("canonicalHostName"); canonicalHostName.setAccessible(true); canonicalHostName.set(serverAddr, s); } catch (NoSuchFieldException | IllegalAccessException e) { @@ -252,7 +250,8 @@ public SaslClient createClient(Configuration conf, InetAddress serverAddr, } } - return delegate.createClient(conf, serverAddr, securityInfo, token, fallbackAllowed, saslProps); + return delegate.createClient(conf, serverAddr, securityInfo, token, fallbackAllowed, + saslProps); } @Override @@ -291,8 +290,8 @@ public String getTokenKind() { @Test public void testRpcFallbackToSimpleAuth() throws Exception { String clientUsername = "testuser"; - UserGroupInformation clientUgi = UserGroupInformation.createUserForTesting(clientUsername, - new String[] { clientUsername }); + UserGroupInformation clientUgi = + UserGroupInformation.createUserForTesting(clientUsername, new String[] { clientUsername }); // check that the client user is insecure assertNotSame(ugi, clientUgi); @@ -397,13 +396,14 @@ private void callRpcService(User clientUser) throws Exception { InetSocketAddress isa = new InetSocketAddress(HOST, 0); RpcServerInterface rpcServer = RpcServerFactory.createRpcServer(null, "AbstractTestSecureIPC", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface((BlockingService) SERVICE, null)), isa, - serverConf, new FifoRpcScheduler(serverConf, 1)); + Lists + .newArrayList(new RpcServer.BlockingServiceAndInterface((BlockingService) SERVICE, null)), + isa, serverConf, new FifoRpcScheduler(serverConf, 1)); rpcServer.start(); - try (RpcClient rpcClient = RpcClientFactory.createClient(clientConf, - HConstants.DEFAULT_CLUSTER_ID.toString())) { - BlockingInterface stub = newBlockingStub(rpcClient, rpcServer.getListenerAddress(), - clientUser); + try (RpcClient rpcClient = + RpcClientFactory.createClient(clientConf, HConstants.DEFAULT_CLUSTER_ID.toString())) { + BlockingInterface stub = + newBlockingStub(rpcClient, rpcServer.getListenerAddress(), clientUser); TestThread th1 = new TestThread(stub); final Throwable exception[] = new Throwable[1]; Collections.synchronizedList(new ArrayList()); @@ -441,9 +441,9 @@ public void run() { int[] messageSize = new int[] { 100, 1000, 10000 }; for (int i = 0; i < messageSize.length; i++) { String input = RandomStringUtils.random(messageSize[i]); - String result = stub - .echo(null, TestProtos.EchoRequestProto.newBuilder().setMessage(input).build()) - .getMessage(); + String result = + stub.echo(null, TestProtos.EchoRequestProto.newBuilder().setMessage(input).build()) + .getMessage(); assertEquals(input, result); } } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUser.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUser.java index 28c13cfba3f7..e154b666b9d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUser.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUser.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,12 +38,11 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestUser { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestUser.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestUser.class); private static final Logger LOG = LoggerFactory.getLogger(TestUser.class); @@ -75,7 +74,7 @@ public void testCacheGetGroups() throws Exception { User uTwo = up.create(ugiTwo); // Make sure that we didn't break groups and everything worked well. - assertArrayEquals(uOne.getGroupNames(),uTwo.getGroupNames()); + assertArrayEquals(uOne.getGroupNames(), uTwo.getGroupNames()); // Check that they are referentially equal. // Since getting a group for a users that doesn't exist creates a new string array @@ -93,7 +92,6 @@ public void testCacheGetGroupsRoot() throws Exception { Configuration conf = HBaseConfiguration.create(); UserProvider up = UserProvider.instantiate(conf); - String rootUserName = "root"; // Create two UGI's for this username @@ -105,17 +103,16 @@ public void testCacheGetGroupsRoot() throws Exception { User uTwo = up.create(ugiTwo); // Make sure that we didn't break groups and everything worked well. - assertArrayEquals(uOne.getGroupNames(),uTwo.getGroupNames()); + assertArrayEquals(uOne.getGroupNames(), uTwo.getGroupNames()); String[] groupNames = ugiOne.getGroupNames(); assertTrue(groupNames.length > 0); } } - @Test public void testBasicAttributes() throws Exception { Configuration conf = HBaseConfiguration.create(); - User user = User.createUserForTesting(conf, "simple", new String[]{"foo"}); + User user = User.createUserForTesting(conf, "simple", new String[] { "foo" }); assertEquals("Username should match", "simple", user.getName()); assertEquals("Short username should match", "simple", user.getShortName()); // don't test shortening of kerberos names because regular Hadoop doesn't support them @@ -124,27 +121,25 @@ public void testBasicAttributes() throws Exception { @Test public void testRunAs() throws Exception { Configuration conf = HBaseConfiguration.create(); - final User user = User.createUserForTesting(conf, "testuser", new String[]{"foo"}); - final PrivilegedExceptionAction action = new PrivilegedExceptionAction(){ + final User user = User.createUserForTesting(conf, "testuser", new String[] { "foo" }); + final PrivilegedExceptionAction action = new PrivilegedExceptionAction() { @Override public String run() throws IOException { - User u = User.getCurrent(); - return u.getName(); + User u = User.getCurrent(); + return u.getName(); } }; String username = user.runAs(action); - assertEquals("Current user within runAs() should match", - "testuser", username); + assertEquals("Current user within runAs() should match", "testuser", username); // ensure the next run is correctly set - User user2 = User.createUserForTesting(conf, "testuser2", new String[]{"foo"}); + User user2 = User.createUserForTesting(conf, "testuser2", new String[] { "foo" }); String username2 = user2.runAs(action); - assertEquals("Second username should match second user", - "testuser2", username2); + assertEquals("Second username should match second user", "testuser2", username2); // check the exception version - username = user.runAs(new PrivilegedExceptionAction(){ + username = user.runAs(new PrivilegedExceptionAction() { @Override public String run() throws Exception { return User.getCurrent().getName(); @@ -153,19 +148,20 @@ public String run() throws Exception { assertEquals("User name in runAs() should match", "testuser", username); // verify that nested contexts work - user2.runAs(new PrivilegedExceptionAction(){ + user2.runAs(new PrivilegedExceptionAction() { @Override - public Object run() throws IOException, InterruptedException{ + public Object run() throws IOException, InterruptedException { String nestedName = user.runAs(action); assertEquals("Nest name should match nested user", "testuser", nestedName); - assertEquals("Current name should match current user", - "testuser2", User.getCurrent().getName()); + assertEquals("Current name should match current user", "testuser2", + User.getCurrent().getName()); return null; } }); - username = user.runAs(new PrivilegedAction(){ + username = user.runAs(new PrivilegedAction() { String result = null; + @Override public String run() { try { @@ -177,22 +173,20 @@ public String run() { } }); - assertEquals("Current user within runAs() should match", - "testuser", username); + assertEquals("Current user within runAs() should match", "testuser", username); } /** - * Make sure that we're returning a result for the current user. - * Previously getCurrent() was returning null if not initialized on - * non-secure Hadoop variants. + * Make sure that we're returning a result for the current user. Previously getCurrent() was + * returning null if not initialized on non-secure Hadoop variants. */ @Test public void testGetCurrent() throws Exception { User user1 = User.getCurrent(); assertNotNull(user1.ugi); - LOG.debug("User1 is "+user1.getName()); + LOG.debug("User1 is " + user1.getName()); - for (int i =0 ; i< 100; i++) { + for (int i = 0; i < 100; i++) { User u = User.getCurrent(); assertNotNull(u); assertEquals(user1.getName(), u.getName()); @@ -207,19 +201,19 @@ public void testUserGroupNames() throws Exception { final ImmutableSet singleGroups = ImmutableSet.of("group"); final Configuration conf = HBaseConfiguration.create(); User user = User.createUserForTesting(conf, username, - singleGroups.toArray(new String[singleGroups.size()])); + singleGroups.toArray(new String[singleGroups.size()])); assertUserGroup(user, singleGroups); final ImmutableSet multiGroups = ImmutableSet.of("group", "group1", "group2"); user = User.createUserForTesting(conf, username, - multiGroups.toArray(new String[multiGroups.size()])); + multiGroups.toArray(new String[multiGroups.size()])); assertUserGroup(user, multiGroups); } private void assertUserGroup(User user, ImmutableSet groups) { assertNotNull("GroupNames should be not null", user.getGroupNames()); assertTrue("UserGroupNames length should be == " + groups.size(), - user.getGroupNames().length == groups.size()); + user.getGroupNames().length == groups.size()); for (String group : user.getGroupNames()) { assertTrue("groupName should be in set ", groups.contains(group)); @@ -228,8 +222,7 @@ private void assertUserGroup(User user, ImmutableSet groups) { @Test public void testSecurityForNonSecureHadoop() { - assertFalse("Security should be disable in non-secure Hadoop", - User.isSecurityEnabled()); + assertFalse("Security should be disable in non-secure Hadoop", User.isSecurityEnabled()); Configuration conf = HBaseConfiguration.create(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); @@ -238,13 +231,13 @@ public void testSecurityForNonSecureHadoop() { conf = HBaseConfiguration.create(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - assertFalse("HBase security should not be enabled if " - + User.HBASE_SECURITY_CONF_KEY + " is not set accordingly", - User.isHBaseSecurityEnabled(conf)); + assertFalse("HBase security should not be enabled if " + User.HBASE_SECURITY_CONF_KEY + + " is not set accordingly", + User.isHBaseSecurityEnabled(conf)); conf = HBaseConfiguration.create(); conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos"); - assertTrue("HBase security should be enabled regardless of underlying " - + "HDFS settings", User.isHBaseSecurityEnabled(conf)); + assertTrue("HBase security should be enabled regardless of underlying " + "HDFS settings", + User.isHBaseSecurityEnabled(conf)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java index 29b8c21a9059..7a79648d4fa0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,6 @@ import java.io.File; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -52,8 +51,8 @@ public class TestUsersOperationsWithSecureHadoop { HBaseClassTestRule.forClass(TestUsersOperationsWithSecureHadoop.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - private static final File KEYTAB_FILE = new File(TEST_UTIL.getDataTestDir("keytab").toUri() - .getPath()); + private static final File KEYTAB_FILE = + new File(TEST_UTIL.getDataTestDir("keytab").toUri().getPath()); private static MiniKdc KDC; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java index 02f23ed1cca9..c9a7e6645d52 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import static org.junit.Assert.assertEquals; @@ -85,7 +84,9 @@ public static void configureSuperuser(Configuration conf) throws IOException { // Assumes we won't ever have a minicluster with more than 5 slaves for (int i = 0; i < 5; i++) { sb.append(','); - sb.append(currentUser); sb.append(".hfs."); sb.append(i); + sb.append(currentUser); + sb.append(".hfs."); + sb.append(i); } // Add a supergroup for improving test coverage. sb.append(',').append("@supergroup"); @@ -97,8 +98,8 @@ public static void configureSuperuser(Configuration conf) throws IOException { public static void enableSecurity(Configuration conf) throws IOException { conf.set("hadoop.security.authorization", "false"); conf.set("hadoop.security.authentication", "simple"); - conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName() + - "," + MasterSyncObserver.class.getName()); + conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + AccessController.class.getName() + "," + MasterSyncObserver.class.getName()); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName()); conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName()); // Need HFile V3 for tags for security features @@ -117,11 +118,10 @@ public static void verifyConfiguration(Configuration conf) { } catch (ClassNotFoundException cnfe) { } } - if (!(conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY).contains( - AccessController.class.getName()) - && accessControllerLoaded && conf.get( - CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY).contains( - AccessController.class.getName()))) { + if (!(conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY) + .contains(AccessController.class.getName()) && accessControllerLoaded + && conf.get(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY) + .contains(AccessController.class.getName()))) { throw new RuntimeException("AccessController is missing from a system coprocessor list"); } if (conf.getInt(HFile.FORMAT_VERSION_KEY, 2) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) { @@ -135,16 +135,16 @@ public static void verifyConfiguration(Configuration conf) { } /** - * An AccessTestAction performs an action that will be examined to confirm - * the results conform to expected access rights. + * An AccessTestAction performs an action that will be examined to confirm the results conform to + * expected access rights. *

          - * To indicate an action was allowed, return null or a non empty list of - * KeyValues. + * To indicate an action was allowed, return null or a non empty list of KeyValues. *

          - * To indicate the action was not allowed, either throw an AccessDeniedException - * or return an empty list of KeyValues. + * To indicate the action was not allowed, either throw an AccessDeniedException or return an + * empty list of KeyValues. */ - public interface AccessTestAction extends PrivilegedExceptionAction { } + public interface AccessTestAction extends PrivilegedExceptionAction { + } /** This fails only in case of ADE or empty list for any of the actions. */ public static void verifyAllowed(User user, AccessTestAction... actions) throws Exception { @@ -200,8 +200,8 @@ public static void verifyIfEmptyList(AccessTestAction action, User... users) thr if (obj != null && obj instanceof List) { List results = (List) obj; if (results != null && !results.isEmpty()) { - fail("Unexpected action results: " + results + " for user '" - + user.getShortName() + "'"); + fail( + "Unexpected action results: " + results + " for user '" + user.getShortName() + "'"); } } else { fail("Unexpected results for user '" + user.getShortName() + "'"); @@ -213,7 +213,7 @@ public static void verifyIfEmptyList(AccessTestAction action, User... users) thr } /** This passes only in case of null for all users. */ - public static void verifyIfNull(AccessTestAction action, User... users) throws Exception { + public static void verifyIfNull(AccessTestAction action, User... users) throws Exception { for (User user : users) { try { Object obj = user.runAs(action); @@ -234,18 +234,17 @@ public static void verifyDenied(User user, AccessTestAction... actions) throws E fail("Expected exception was not thrown for user '" + user.getShortName() + "'"); } catch (IOException e) { boolean isAccessDeniedException = false; - if(e instanceof RetriesExhaustedWithDetailsException) { + if (e instanceof RetriesExhaustedWithDetailsException) { // in case of batch operations, and put, the client assembles a // RetriesExhaustedWithDetailsException instead of throwing an // AccessDeniedException - for(Throwable ex : ((RetriesExhaustedWithDetailsException) e).getCauses()) { + for (Throwable ex : ((RetriesExhaustedWithDetailsException) e).getCauses()) { if (ex instanceof AccessDeniedException) { isAccessDeniedException = true; break; } } - } - else { + } else { // For doBulkLoad calls AccessDeniedException // is buried in the stack trace Throwable ex = e; @@ -257,7 +256,7 @@ public static void verifyDenied(User user, AccessTestAction... actions) throws E isAccessDeniedException = true; break; } - } while((ex = ex.getCause()) != null); + } while ((ex = ex.getCause()) != null); } if (!isAccessDeniedException) { fail("Expected exception was not thrown for user '" + user.getShortName() + "'"); @@ -269,7 +268,7 @@ public static void verifyDenied(User user, AccessTestAction... actions) throws E ex = ((PrivilegedActionException) ex).getException(); } if (ex instanceof ServiceException) { - ServiceException se = (ServiceException)ex; + ServiceException se = (ServiceException) ex; if (se.getCause() != null && se.getCause() instanceof AccessDeniedException) { // expected result return; @@ -282,11 +281,11 @@ public static void verifyDenied(User user, AccessTestAction... actions) throws E private static List getAccessControllers(SingleProcessHBaseCluster cluster) { List result = Lists.newArrayList(); - for (RegionServerThread t: cluster.getLiveRegionServerThreads()) { - for (HRegion region: t.getRegionServer().getOnlineRegionsLocalContext()) { + for (RegionServerThread t : cluster.getLiveRegionServerThreads()) { + for (HRegion region : t.getRegionServer().getOnlineRegionsLocalContext()) { Coprocessor cp = region.getCoprocessorHost().findCoprocessor(AccessController.class); if (cp != null) { - result.add((AccessController)cp); + result.add((AccessController) cp); } } } @@ -294,7 +293,7 @@ private static List getAccessControllers(SingleProcessHBaseClu } private static Map - getAuthManagerMTimes(SingleProcessHBaseCluster cluster) { + getAuthManagerMTimes(SingleProcessHBaseCluster cluster) { Map result = Maps.newHashMap(); for (AccessController ac : getAccessControllers(cluster)) { result.put(ac, ac.getAuthManager().getMTime()); @@ -305,7 +304,7 @@ private static List getAccessControllers(SingleProcessHBaseClu @SuppressWarnings("rawtypes") private static void updateACLs(final HBaseTestingUtil util, Callable c) throws Exception { // Get the current mtimes for all access controllers - final Map oldMTimes = getAuthManagerMTimes(util.getHBaseCluster()); + final Map oldMTimes = getAuthManagerMTimes(util.getHBaseCluster()); // Run the update action c.call(); @@ -314,20 +313,20 @@ private static void updateACLs(final HBaseTestingUtil util, Callable c) throws E util.waitFor(WAIT_TIME, 100, new Predicate() { @Override public boolean evaluate() throws IOException { - Map mtimes = getAuthManagerMTimes(util.getHBaseCluster()); - for (Map.Entry e: mtimes.entrySet()) { + Map mtimes = getAuthManagerMTimes(util.getHBaseCluster()); + for (Map.Entry e : mtimes.entrySet()) { if (!oldMTimes.containsKey(e.getKey())) { - LOG.error("Snapshot of AccessController state does not include instance on region " + - e.getKey().getRegion().getRegionInfo().getRegionNameAsString()); + LOG.error("Snapshot of AccessController state does not include instance on region " + + e.getKey().getRegion().getRegionInfo().getRegionNameAsString()); // Error out the predicate, we will try again return false; } long old = oldMTimes.get(e.getKey()); long now = e.getValue(); if (now <= old) { - LOG.info("AccessController on region " + - e.getKey().getRegion().getRegionInfo().getRegionNameAsString() + - " has not updated: mtime=" + now); + LOG.info("AccessController on region " + + e.getKey().getRegion().getRegionInfo().getRegionNameAsString() + + " has not updated: mtime=" + now); return false; } } @@ -337,9 +336,9 @@ public boolean evaluate() throws IOException { } /** - * Grant permissions globally to the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * Grant permissions globally to the given user. Will wait until all active AccessController + * instances have updated their permissions caches or will throw an exception upon timeout (10 + * seconds). */ public static void grantGlobal(final HBaseTestingUtil util, final String user, final Permission.Action... actions) throws Exception { @@ -356,12 +355,12 @@ public Void call() throws Exception { } /** - * Grant permissions globally to the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * Grant permissions globally to the given user. Will wait until all active AccessController + * instances have updated their permissions caches or will throw an exception upon timeout (10 + * seconds). */ - public static void grantGlobal(final User caller, final HBaseTestingUtil util, - final String user, final Permission.Action... actions) throws Exception { + public static void grantGlobal(final User caller, final HBaseTestingUtil util, final String user, + final Permission.Action... actions) throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override public Void call() throws Exception { @@ -376,9 +375,9 @@ public Void call() throws Exception { } /** - * Revoke permissions globally from the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * Revoke permissions globally from the given user. Will wait until all active AccessController + * instances have updated their permissions caches or will throw an exception upon timeout (10 + * seconds). */ public static void revokeGlobal(final HBaseTestingUtil util, final String user, final Permission.Action... actions) throws Exception { @@ -395,12 +394,12 @@ public Void call() throws Exception { } /** - * Revoke permissions globally from the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * Revoke permissions globally from the given user. Will wait until all active AccessController + * instances have updated their permissions caches or will throw an exception upon timeout (10 + * seconds). */ - public static void revokeGlobal(final User caller, final HBaseTestingUtil util, - final String user, final Permission.Action... actions) throws Exception { + public static void revokeGlobal(final User caller, final HBaseTestingUtil util, final String user, + final Permission.Action... actions) throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override public Void call() throws Exception { @@ -415,9 +414,9 @@ public Void call() throws Exception { } /** - * Grant permissions on a namespace to the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * Grant permissions on a namespace to the given user. Will wait until all active AccessController + * instances have updated their permissions caches or will throw an exception upon timeout (10 + * seconds). */ public static void grantOnNamespace(final HBaseTestingUtil util, final String user, final String namespace, final Permission.Action... actions) throws Exception { @@ -435,13 +434,13 @@ public Void call() throws Exception { } /** - * Grant permissions on a namespace to the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * Grant permissions on a namespace to the given user. Will wait until all active AccessController + * instances have updated their permissions caches or will throw an exception upon timeout (10 + * seconds). */ public static void grantOnNamespace(final User caller, final HBaseTestingUtil util, - final String user, final String namespace, - final Permission.Action... actions) throws Exception { + final String user, final String namespace, final Permission.Action... actions) + throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override public Void call() throws Exception { @@ -457,9 +456,9 @@ public Void call() throws Exception { } /** - * Grant permissions on a namespace to the given user using AccessControl Client. - * Will wait until all active AccessController instances have updated their permissions caches - * or will throw an exception upon timeout (10 seconds). + * Grant permissions on a namespace to the given user using AccessControl Client. Will wait until + * all active AccessController instances have updated their permissions caches or will throw an + * exception upon timeout (10 seconds). */ public static void grantOnNamespaceUsingAccessControlClient(final HBaseTestingUtil util, final Connection connection, final String user, final String namespace, @@ -478,9 +477,9 @@ public Void call() throws Exception { } /** - * Revoke permissions on a namespace from the given user using AccessControl Client. - * Will wait until all active AccessController instances have updated their permissions caches - * or will throw an exception upon timeout (10 seconds). + * Revoke permissions on a namespace from the given user using AccessControl Client. Will wait + * until all active AccessController instances have updated their permissions caches or will throw + * an exception upon timeout (10 seconds). */ public static void revokeFromNamespaceUsingAccessControlClient(final HBaseTestingUtil util, final Connection connection, final String user, final String namespace, @@ -500,8 +499,8 @@ public Void call() throws Exception { /** * Revoke permissions on a namespace from the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * AccessController instances have updated their permissions caches or will throw an exception + * upon timeout (10 seconds). */ public static void revokeFromNamespace(final HBaseTestingUtil util, final String user, final String namespace, final Permission.Action... actions) throws Exception { @@ -519,12 +518,12 @@ public Void call() throws Exception { /** * Revoke permissions on a namespace from the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * AccessController instances have updated their permissions caches or will throw an exception + * upon timeout (10 seconds). */ public static void revokeFromNamespace(final User caller, final HBaseTestingUtil util, - final String user, final String namespace, - final Permission.Action... actions) throws Exception { + final String user, final String namespace, final Permission.Action... actions) + throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override public Void call() throws Exception { @@ -539,9 +538,9 @@ public Void call() throws Exception { } /** - * Grant permissions on a table to the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * Grant permissions on a table to the given user. Will wait until all active AccessController + * instances have updated their permissions caches or will throw an exception upon timeout (10 + * seconds). */ public static void grantOnTable(final HBaseTestingUtil util, final String user, final TableName table, final byte[] family, final byte[] qualifier, @@ -560,12 +559,12 @@ public Void call() throws Exception { } /** - * Grant permissions on a table to the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * Grant permissions on a table to the given user. Will wait until all active AccessController + * instances have updated their permissions caches or will throw an exception upon timeout (10 + * seconds). */ - public static void grantOnTable(final User caller, final HBaseTestingUtil util, - final String user, final TableName table, final byte[] family, final byte[] qualifier, + public static void grantOnTable(final User caller, final HBaseTestingUtil util, final String user, + final TableName table, final byte[] family, final byte[] qualifier, final Permission.Action... actions) throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override @@ -583,8 +582,8 @@ public Void call() throws Exception { /** * Grant permissions on a table to the given user using AccessControlClient. Will wait until all - * active AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * active AccessController instances have updated their permissions caches or will throw an + * exception upon timeout (10 seconds). */ public static void grantOnTableUsingAccessControlClient(final HBaseTestingUtil util, final Connection connection, final String user, final TableName table, final byte[] family, @@ -604,8 +603,8 @@ public Void call() throws Exception { /** * Grant global permissions to the given user using AccessControlClient. Will wait until all - * active AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * active AccessController instances have updated their permissions caches or will throw an + * exception upon timeout (10 seconds). */ public static void grantGlobalUsingAccessControlClient(final HBaseTestingUtil util, final Connection connection, final String user, final Permission.Action... actions) @@ -624,9 +623,9 @@ public Void call() throws Exception { } /** - * Revoke permissions on a table from the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * Revoke permissions on a table from the given user. Will wait until all active AccessController + * instances have updated their permissions caches or will throw an exception upon timeout (10 + * seconds). */ public static void revokeFromTable(final HBaseTestingUtil util, final String user, final TableName table, final byte[] family, final byte[] qualifier, @@ -644,9 +643,9 @@ public Void call() throws Exception { } /** - * Revoke permissions on a table from the given user. Will wait until all active - * AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * Revoke permissions on a table from the given user. Will wait until all active AccessController + * instances have updated their permissions caches or will throw an exception upon timeout (10 + * seconds). */ public static void revokeFromTable(final User caller, final HBaseTestingUtil util, final String user, final TableName table, final byte[] family, final byte[] qualifier, @@ -666,8 +665,8 @@ public Void call() throws Exception { /** * Revoke permissions on a table from the given user using AccessControlClient. Will wait until - * all active AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * all active AccessController instances have updated their permissions caches or will throw an + * exception upon timeout (10 seconds). */ public static void revokeFromTableUsingAccessControlClient(final HBaseTestingUtil util, final Connection connection, final String user, final TableName table, final byte[] family, @@ -686,12 +685,12 @@ public Void call() throws Exception { } /** - * Revoke global permissions from the given user using AccessControlClient. Will wait until - * all active AccessController instances have updated their permissions caches or will - * throw an exception upon timeout (10 seconds). + * Revoke global permissions from the given user using AccessControlClient. Will wait until all + * active AccessController instances have updated their permissions caches or will throw an + * exception upon timeout (10 seconds). */ public static void revokeGlobalUsingAccessControlClient(final HBaseTestingUtil util, - final Connection connection, final String user,final Permission.Action... actions) + final Connection connection, final String user, final Permission.Action... actions) throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override @@ -717,8 +716,8 @@ public Optional getMasterObserver() { @Override public void postCompletedCreateTableAction( - final ObserverContext ctx, - TableDescriptor desc, RegionInfo[] regions) throws IOException { + final ObserverContext ctx, TableDescriptor desc, + RegionInfo[] regions) throws IOException { // the AccessController test, some times calls only and directly the // postCompletedCreateTableAction() if (tableCreationLatch != null) { @@ -728,8 +727,8 @@ public void postCompletedCreateTableAction( @Override public void postCompletedDeleteTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException { + final ObserverContext ctx, final TableName tableName) + throws IOException { // the AccessController test, some times calls only and directly the // postCompletedDeleteTableAction() if (tableDeletionLatch != null) { @@ -738,8 +737,8 @@ public void postCompletedDeleteTableAction( } } - public static Table createTable(HBaseTestingUtil testUtil, TableName tableName, - byte[][] families) throws Exception { + public static Table createTable(HBaseTestingUtil testUtil, TableName tableName, byte[][] families) + throws Exception { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (byte[] family : families) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); @@ -748,13 +747,12 @@ public static Table createTable(HBaseTestingUtil testUtil, TableName tableName, return testUtil.getConnection().getTable(tableName); } - public static void createTable(HBaseTestingUtil testUtil, TableDescriptor htd) - throws Exception { + public static void createTable(HBaseTestingUtil testUtil, TableDescriptor htd) throws Exception { createTable(testUtil, testUtil.getAdmin(), htd); } - public static void createTable(HBaseTestingUtil testUtil, TableDescriptor htd, - byte[][] splitKeys) throws Exception { + public static void createTable(HBaseTestingUtil testUtil, TableDescriptor htd, byte[][] splitKeys) + throws Exception { createTable(testUtil, testUtil.getAdmin(), htd, splitKeys); } @@ -767,8 +765,8 @@ public static void createTable(HBaseTestingUtil testUtil, Admin admin, TableDesc byte[][] splitKeys) throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class); + MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster().getMasterCoprocessorHost() + .findCoprocessor(MasterSyncObserver.class); observer.tableCreationLatch = new CountDownLatch(1); if (splitKeys != null) { admin.createTable(htd, splitKeys); @@ -781,19 +779,18 @@ public static void createTable(HBaseTestingUtil testUtil, Admin admin, TableDesc } public static void createTable(HBaseTestingUtil testUtil, User user, TableDescriptor htd) - throws Exception { + throws Exception { createTable(testUtil, user, htd, null); } public static void createTable(HBaseTestingUtil testUtil, User user, TableDescriptor htd, - byte[][] splitKeys) throws Exception { + byte[][] splitKeys) throws Exception { try (Connection con = testUtil.getConnection(user); Admin admin = con.getAdmin()) { createTable(testUtil, admin, htd, splitKeys); } } - public static void deleteTable(HBaseTestingUtil testUtil, TableName tableName) - throws Exception { + public static void deleteTable(HBaseTestingUtil testUtil, TableName tableName) throws Exception { deleteTable(testUtil, testUtil.getAdmin(), tableName); } @@ -802,8 +799,7 @@ public static void createNamespace(HBaseTestingUtil testUtil, NamespaceDescripto testUtil.getAdmin().createNamespace(nsDesc); } - public static void deleteNamespace(HBaseTestingUtil testUtil, String namespace) - throws Exception { + public static void deleteNamespace(HBaseTestingUtil testUtil, String namespace) throws Exception { testUtil.getAdmin().deleteNamespace(namespace); } @@ -811,8 +807,8 @@ public static void deleteTable(HBaseTestingUtil testUtil, Admin admin, TableName throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class); + MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster().getMasterCoprocessorHost() + .findCoprocessor(MasterSyncObserver.class); observer.tableDeletionLatch = new CountDownLatch(1); try { admin.disableTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java index 5ab0c3a922e3..75c604eaecc5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,14 +49,15 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestAccessControlFilter extends SecureTestUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAccessControlFilter.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); private static HBaseTestingUtil TEST_UTIL; private static User READER; @@ -69,7 +70,7 @@ public class TestAccessControlFilter extends SecureTestUtil { private static byte[] PUBLIC_COL = Bytes.toBytes("public"); @Before - public void setup () { + public void setup() { TABLE = TableName.valueOf(name.getMethodName()); } @@ -117,7 +118,7 @@ private void doQualifierAccess(final Table table) throws Exception { // put some test data List puts = new ArrayList<>(100); - for (int i=0; i<100; i++) { + for (int i = 0; i < 100; i++) { Put p = new Put(Bytes.toBytes(i)); p.addColumn(FAMILY, PRIVATE_COL, Bytes.toBytes("secret " + i)); p.addColumn(FAMILY, PUBLIC_COL, Bytes.toBytes("info " + i)); @@ -141,9 +142,9 @@ public Object run() throws Exception { rowcnt++; int rownum = Bytes.toInt(r.getRow()); assertTrue(r.containsColumn(FAMILY, PRIVATE_COL)); - assertEquals("secret "+rownum, Bytes.toString(r.getValue(FAMILY, PRIVATE_COL))); + assertEquals("secret " + rownum, Bytes.toString(r.getValue(FAMILY, PRIVATE_COL))); assertTrue(r.containsColumn(FAMILY, PUBLIC_COL)); - assertEquals("info "+rownum, Bytes.toString(r.getValue(FAMILY, PUBLIC_COL))); + assertEquals("info " + rownum, Bytes.toString(r.getValue(FAMILY, PUBLIC_COL))); } assertEquals("Expected 100 rows returned", 100, rowcnt); return null; @@ -183,7 +184,7 @@ public Object run() throws Exception { }); // test as user with no permission - DENIED.runAs(new PrivilegedExceptionAction(){ + DENIED.runAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 721497afbf63..5addf63216d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,6 +24,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.security.PrivilegedAction; import java.util.ArrayList; @@ -129,11 +130,13 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.Service; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.PingProtos.CountRequest; import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.PingProtos.CountResponse; import org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.PingProtos.HelloRequest; @@ -153,10 +156,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; /** - * Performs authorization checks for common operations, according to different - * levels of authorized users. + * Performs authorization checks for common operations, according to different levels of authorized + * users. */ -@Category({SecurityTests.class, LargeTests.class}) +@Category({ SecurityTests.class, LargeTests.class }) public class TestAccessController extends SecureTestUtil { @ClassRule @@ -169,12 +172,13 @@ public class TestAccessController extends SecureTestUtil { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Configuration conf; - /** The systemUserConnection created here is tied to the system user. In case, you are planning - * to create AccessTestAction, DON'T use this systemUserConnection as the 'doAs' user - * gets eclipsed by the system user. */ + /** + * The systemUserConnection created here is tied to the system user. In case, you are planning to + * create AccessTestAction, DON'T use this systemUserConnection as the 'doAs' user gets eclipsed + * by the system user. + */ private static Connection systemUserConnection; - // user with all permissions private static User SUPERUSER; // user granted with all global permission @@ -243,13 +247,13 @@ public static void setupBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); MasterCoprocessorHost masterCpHost = - TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); masterCpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); ACCESS_CONTROLLER = masterCpHost.findCoprocessor(AccessController.class); - CP_ENV = masterCpHost.createEnvironment( - ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); - RegionServerCoprocessorHost rsCpHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) - .getRegionServerCoprocessorHost(); + CP_ENV = + masterCpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); + RegionServerCoprocessorHost rsCpHost = + TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost(); RSCP_ENV = rsCpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available @@ -289,8 +293,9 @@ public static void tearDownAfterClass() throws Exception { private static void setUpTableAndUserPermissions() throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()).build(); + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()) + .build(); createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE).get(0); @@ -299,31 +304,21 @@ private static void setUpTableAndUserPermissions() throws Exception { // Set up initial grants - grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), - Permission.Action.ADMIN, - Permission.Action.CREATE, - Permission.Action.READ, - Permission.Action.WRITE); + grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), Permission.Action.ADMIN, + Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); - grantOnTable(TEST_UTIL, USER_RW.getShortName(), - TEST_TABLE, TEST_FAMILY, null, - Permission.Action.READ, - Permission.Action.WRITE); + grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null, + Permission.Action.READ, Permission.Action.WRITE); // USER_CREATE is USER_RW plus CREATE permissions - grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), - TEST_TABLE, null, null, - Permission.Action.CREATE, - Permission.Action.READ, - Permission.Action.WRITE); - - grantOnTable(TEST_UTIL, USER_RO.getShortName(), - TEST_TABLE, TEST_FAMILY, null, + grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), TEST_TABLE, null, null, + Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); + + grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ); - grantOnTable(TEST_UTIL, USER_ADMIN_CF.getShortName(), - TEST_TABLE, TEST_FAMILY, - null, Permission.Action.ADMIN, Permission.Action.CREATE); + grantOnTable(TEST_UTIL, USER_ADMIN_CF.getShortName(), TEST_TABLE, TEST_FAMILY, null, + Permission.Action.ADMIN, Permission.Action.CREATE); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN), Permission.Action.ADMIN); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_CREATE), Permission.Action.CREATE); @@ -359,20 +354,22 @@ private static void cleanUp() throws Exception { @Test public void testUnauthorizedShutdown() throws Exception { AccessTestAction action = new AccessTestAction() { - @Override public Object run() throws Exception { + @Override + public Object run() throws Exception { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); master.shutdown(); return null; } }; verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, - USER_GROUP_WRITE, USER_GROUP_CREATE); + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testUnauthorizedStopMaster() throws Exception { AccessTestAction action = new AccessTestAction() { - @Override public Object run() throws Exception { + @Override + public Object run() throws Exception { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); master.stopMaster(); return null; @@ -380,21 +377,21 @@ public void testUnauthorizedStopMaster() throws Exception { }; verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, - USER_GROUP_WRITE, USER_GROUP_CREATE); + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testUnauthorizedSetTableStateInMeta() throws Exception { AccessTestAction action = () -> { - try(Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Hbck hbck = conn.getHbck()){ + try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Hbck hbck = conn.getHbck()) { hbck.setTableStateInMeta(new TableState(TEST_TABLE, TableState.State.DISABLED)); } return null; }; verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, - USER_GROUP_WRITE, USER_GROUP_CREATE); + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -405,35 +402,35 @@ public void testUnauthorizedSetRegionStateInMeta() throws Exception { Map newStates = new HashMap<>(); newStates.put(closeRegion.getEncodedName(), RegionState.State.CLOSED); AccessTestAction action = () -> { - try(Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Hbck hbck = conn.getHbck()){ + try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Hbck hbck = conn.getHbck()) { hbck.setRegionStateInMeta(newStates); } return null; }; verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, - USER_GROUP_WRITE, USER_GROUP_CREATE); + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testUnauthorizedFixMeta() throws Exception { AccessTestAction action = () -> { - try(Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Hbck hbck = conn.getHbck()){ + try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Hbck hbck = conn.getHbck()) { hbck.fixMeta(); } return null; }; verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, - USER_GROUP_WRITE, USER_GROUP_CREATE); + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testSecurityCapabilities() throws Exception { - List capabilities = TEST_UTIL.getConnection().getAdmin() - .getSecurityCapabilities(); + List capabilities = + TEST_UTIL.getConnection().getAdmin().getSecurityCapabilities(); assertTrue("AUTHORIZATION capability is missing", capabilities.contains(SecurityCapability.AUTHORIZATION)); assertTrue("CELL_AUTHORIZATION capability is missing", @@ -446,8 +443,8 @@ public void testTableCreate() throws Exception { @Override public Object run() throws Exception { TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); ACCESS_CONTROLLER.preCreateTable(ObserverContextImpl.createAndPrepare(CP_ENV), tableDescriptor, null); return null; @@ -468,17 +465,16 @@ public void testTableModify() throws Exception { @Override public Object run() throws Exception { TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TEST_TABLE); + TableDescriptorBuilder.newBuilder(TEST_TABLE); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).build(); + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("fam_" + User.getCurrent().getShortName())).build(); + .newBuilder(Bytes.toBytes("fam_" + User.getCurrent().getShortName())).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - ACCESS_CONTROLLER.preModifyTable(ObserverContextImpl.createAndPrepare(CP_ENV), - TEST_TABLE, - null, // not needed by AccessController - tableDescriptorBuilder.build()); + ACCESS_CONTROLLER.preModifyTable(ObserverContextImpl.createAndPrepare(CP_ENV), TEST_TABLE, + null, // not needed by AccessController + tableDescriptorBuilder.build()); return null; } }; @@ -493,8 +489,7 @@ public void testTableDelete() throws Exception { AccessTestAction deleteTable = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER - .preDeleteTable(ObserverContextImpl.createAndPrepare(CP_ENV), TEST_TABLE); + ACCESS_CONTROLLER.preDeleteTable(ObserverContextImpl.createAndPrepare(CP_ENV), TEST_TABLE); return null; } }; @@ -509,9 +504,8 @@ public void testTableTruncate() throws Exception { AccessTestAction truncateTable = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER - .preTruncateTable(ObserverContextImpl.createAndPrepare(CP_ENV), - TEST_TABLE); + ACCESS_CONTROLLER.preTruncateTable(ObserverContextImpl.createAndPrepare(CP_ENV), + TEST_TABLE); return null; } }; @@ -526,8 +520,7 @@ public void testTableDisable() throws Exception { AccessTestAction disableTable = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preDisableTable(ObserverContextImpl.createAndPrepare(CP_ENV), - TEST_TABLE); + ACCESS_CONTROLLER.preDisableTable(ObserverContextImpl.createAndPrepare(CP_ENV), TEST_TABLE); return null; } }; @@ -555,8 +548,7 @@ public void testTableEnable() throws Exception { AccessTestAction enableTable = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER - .preEnableTable(ObserverContextImpl.createAndPrepare(CP_ENV), TEST_TABLE); + ACCESS_CONTROLLER.preEnableTable(ObserverContextImpl.createAndPrepare(CP_ENV), TEST_TABLE); return null; } }; @@ -567,7 +559,7 @@ public Object run() throws Exception { } public static class TestTableDDLProcedure extends Procedure - implements TableProcedureInterface { + implements TableProcedureInterface { private TableName tableName; public TestTableDDLProcedure() { @@ -596,25 +588,23 @@ protected boolean abort(MasterProcedureEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { TestProcedureProtos.TestTableDDLStateData.Builder testTableDDLMsg = TestProcedureProtos.TestTableDDLStateData.newBuilder() - .setTableName(tableName.getNameAsString()); + .setTableName(tableName.getNameAsString()); serializer.serialize(testTableDDLMsg.build()); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { TestProcedureProtos.TestTableDDLStateData testTableDDLMsg = serializer.deserialize(TestProcedureProtos.TestTableDDLStateData.class); tableName = TableName.valueOf(testTableDDLMsg.getTableName()); } @Override - protected Procedure[] execute(MasterProcedureEnv env) throws ProcedureYieldException, - InterruptedException { + protected Procedure[] execute(MasterProcedureEnv env) + throws ProcedureYieldException, InterruptedException { // Not letting the procedure to complete until timed out setState(ProcedureState.WAITING_TIMEOUT); return null; @@ -632,7 +622,7 @@ public void testAbortProcedure() throws Exception { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preAbortProcedure(ObserverContextImpl.createAndPrepare(CP_ENV), procId); - return null; + return null; } }; @@ -652,16 +642,15 @@ public void testGetProcedures() throws Exception { AccessTestAction getProceduresAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER - .postGetProcedures(ObserverContextImpl.createAndPrepare(CP_ENV)); - return null; + ACCESS_CONTROLLER.postGetProcedures(ObserverContextImpl.createAndPrepare(CP_ENV)); + return null; } }; verifyAllowed(getProceduresAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyAllowed(getProceduresAction, USER_OWNER); - verifyIfNull( - getProceduresAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); + verifyIfNull(getProceduresAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE); } @Test @@ -675,8 +664,8 @@ public Object run() throws Exception { }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, - USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -691,8 +680,8 @@ public void testMove() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preMove(ObserverContextImpl.createAndPrepare(CP_ENV), - hri, server, server); + ACCESS_CONTROLLER.preMove(ObserverContextImpl.createAndPrepare(CP_ENV), hri, server, + server); return null; } }; @@ -855,17 +844,15 @@ public void testSplitWithSplitRow() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preSplitRegion( - ObserverContextImpl.createAndPrepare(CP_ENV), - tableName, - TEST_ROW); + ACCESS_CONTROLLER.preSplitRegion(ObserverContextImpl.createAndPrepare(CP_ENV), tableName, + TEST_ROW); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, - USER_GROUP_WRITE, USER_GROUP_CREATE); + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -909,7 +896,7 @@ private void verifyRead(AccessTestAction action) throws Exception { private void verifyReadWrite(AccessTestAction action) throws Exception { verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_RW); verifyDenied(action, USER_NONE, USER_RO, USER_GROUP_ADMIN, USER_GROUP_CREATE, USER_GROUP_READ, - USER_GROUP_WRITE); + USER_GROUP_WRITE); } @Test @@ -920,7 +907,7 @@ public void testRead() throws Exception { public Object run() throws Exception { Get g = new Get(TEST_ROW); g.addFamily(TEST_FAMILY); - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.get(g); } @@ -935,7 +922,7 @@ public Object run() throws Exception { public Object run() throws Exception { Scan s = new Scan(); s.addFamily(TEST_FAMILY); - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table table = conn.getTable(TEST_TABLE)) { ResultScanner scanner = table.getScanner(s); try { @@ -961,7 +948,7 @@ public void testWrite() throws Exception { public Object run() throws Exception { Put p = new Put(TEST_ROW); p.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(1)); - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.put(p); } @@ -976,7 +963,7 @@ public Object run() throws Exception { public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.addFamily(TEST_FAMILY); - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.delete(d); } @@ -991,7 +978,7 @@ public Object run() throws Exception { public Object run() throws Exception { Increment inc = new Increment(TEST_ROW); inc.addColumn(TEST_FAMILY, TEST_QUALIFIER, 1); - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.increment(inc); } @@ -1009,7 +996,7 @@ public void testReadWrite() throws Exception { public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.addFamily(TEST_FAMILY); - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.checkAndMutate(TEST_ROW, TEST_FAMILY).qualifier(TEST_QUALIFIER) .ifEquals(Bytes.toBytes("test_value")).thenDelete(d); @@ -1025,7 +1012,7 @@ public Object run() throws Exception { public Object run() throws Exception { Put p = new Put(TEST_ROW); p.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(1)); - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.checkAndMutate(TEST_ROW, TEST_FAMILY).qualifier(TEST_QUALIFIER) .ifEquals(Bytes.toBytes("test_value")).thenPut(p); @@ -1055,8 +1042,9 @@ public Object run() throws Exception { byte[][][] hfileRanges = { { { (byte) 0 }, { (byte) 9 } } }; Path bulkLoadBasePath = new Path(dir, new Path(User.getCurrent().getName())); - new BulkLoadHelper(bulkLoadBasePath).initHFileData(TEST_FAMILY, TEST_QUALIFIER, - hfileRanges, numRows, FS_PERMISSION_ALL).bulkLoadHFile(TEST_TABLE); + new BulkLoadHelper(bulkLoadBasePath) + .initHFileData(TEST_FAMILY, TEST_QUALIFIER, hfileRanges, numRows, FS_PERMISSION_ALL) + .bulkLoadHFile(TEST_TABLE); return null; } }; @@ -1130,9 +1118,8 @@ public BulkLoadHelper(Path loadPath) throws IOException { this.loadPath = loadPath; } - private void createHFile(Path path, - byte[] family, byte[] qualifier, - byte[] startKey, byte[] endKey, int numRows) throws IOException { + private void createHFile(Path path, byte[] family, byte[] qualifier, byte[] startKey, + byte[] endKey, int numRows) throws IOException { HFile.Writer writer = null; long now = EnvironmentEdgeManager.currentTime(); try { @@ -1203,7 +1190,7 @@ public Object run() throws Exception { put.addColumn(TEST_FAMILY, qualifier, Bytes.toBytes(1)); Append append = new Append(row); append.addColumn(TEST_FAMILY, qualifier, Bytes.toBytes(2)); - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.put(put); t.append(append); @@ -1317,7 +1304,7 @@ public Object run() throws Exception { verifyAllowed(grantAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(grantAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, - USER_GROUP_WRITE, USER_GROUP_CREATE); + USER_GROUP_WRITE, USER_GROUP_CREATE); try { verifyAllowed(revokeAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(revokeAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, @@ -1328,8 +1315,8 @@ public Object run() throws Exception { USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(getGlobalPermissionsAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(getGlobalPermissionsAction, USER_CREATE, USER_OWNER, USER_RW, USER_RO, - USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(getGlobalPermissionsAction, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, + USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(preGrantAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(preGrantAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, @@ -1355,8 +1342,7 @@ public Object run() throws Exception { @Test public void testPostGrantRevoke() throws Exception { - final TableName tableName = - TableName.valueOf("TempTable"); + final TableName tableName = TableName.valueOf("TempTable"); final byte[] family1 = Bytes.toBytes("f1"); final byte[] family2 = Bytes.toBytes("f2"); final byte[] qualifier = Bytes.toBytes("q"); @@ -1367,8 +1353,8 @@ public void testPostGrantRevoke() throws Exception { deleteTable(TEST_UTIL, tableName); } TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family1)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family2)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family1)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family2)).build(); createTable(TEST_UTIL, tableDescriptor); try { // create temp users @@ -1511,7 +1497,8 @@ public Object run() throws Exception { // grant table read permission grantGlobal(TEST_UTIL, gblUser.getShortName(), Permission.Action.READ); - grantOnTable(TEST_UTIL, tblUser.getShortName(), tableName, null, null, Permission.Action.READ); + grantOnTable(TEST_UTIL, tblUser.getShortName(), tableName, null, null, + Permission.Action.READ); // check verifyAllowed(tblUser, getActionAll, getAction1, getAction2); @@ -1599,11 +1586,12 @@ public Object run() throws Exception { } private boolean hasFoundUserPermission(List userPermissions, - List perms) { + List perms) { return perms.containsAll(userPermissions); } - private boolean hasFoundUserPermission(UserPermission userPermission, List perms) { + private boolean hasFoundUserPermission(UserPermission userPermission, + List perms) { return perms.contains(userPermission); } @@ -1620,8 +1608,8 @@ public void testPostGrantRevokeAtQualifierLevel() throws Exception { deleteTable(TEST_UTIL, tableName); } TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family1)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family2)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family1)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family2)).build(); createTable(TEST_UTIL, tableDescriptor); try { @@ -1723,8 +1711,8 @@ public void testPermissionList() throws Exception { deleteTable(TEST_UTIL, tableName); } TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family1)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family2)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family1)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family2)).build(); createTable(TEST_UTIL, USER_OWNER, tableDescriptor); try { List perms = @@ -1779,8 +1767,8 @@ public void testPermissionList() throws Exception { hasFoundUserPermission(upToVerify, perms)); User newOwner = User.createUserForTesting(conf, "new_owner", new String[] {}); - grantOnTable(TEST_UTIL, newOwner.getShortName(), tableName, - null, null, Permission.Action.values()); + grantOnTable(TEST_UTIL, newOwner.getShortName(), tableName, null, null, + Permission.Action.values()); perms = admin.getUserPermissions(GetUserPermissionsRequest.newBuilder(tableName).build()); UserPermission newOwnerperm = new UserPermission(newOwner.getName(), @@ -1807,9 +1795,10 @@ public void testGlobalPermissionList() throws Exception { adminPerms.add( new UserPermission(user, Permission.newBuilder().withActions(Action.values()).build())); } - assertTrue("Only super users, global users and user admin has permission on table hbase:acl " + - "per setup", perms.size() == 6 + superUsers.size() && - hasFoundUserPermission(adminPerms, perms)); + assertTrue( + "Only super users, global users and user admin has permission on table hbase:acl " + + "per setup", + perms.size() == 6 + superUsers.size() && hasFoundUserPermission(adminPerms, perms)); } /** global operations */ @@ -1854,14 +1843,11 @@ public Void run() throws Exception { User userColumn = User.createUserForTesting(conf, "user_check_perms_family", new String[0]); User userQualifier = User.createUserForTesting(conf, "user_check_perms_q", new String[0]); - grantOnTable(TEST_UTIL, userTable.getShortName(), - TEST_TABLE, null, null, + grantOnTable(TEST_UTIL, userTable.getShortName(), TEST_TABLE, null, null, Permission.Action.READ); - grantOnTable(TEST_UTIL, userColumn.getShortName(), - TEST_TABLE, TEST_FAMILY, null, + grantOnTable(TEST_UTIL, userColumn.getShortName(), TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ); - grantOnTable(TEST_UTIL, userQualifier.getShortName(), - TEST_TABLE, TEST_FAMILY, TEST_Q1, + grantOnTable(TEST_UTIL, userQualifier.getShortName(), TEST_TABLE, TEST_FAMILY, TEST_Q1, Permission.Action.READ); try { @@ -1951,17 +1937,13 @@ public Void run() throws Exception { // -------------------------------------- // check for wrong table region - CheckPermissionsRequest checkRequest = - CheckPermissionsRequest - .newBuilder() - .addPermission( - AccessControlProtos.Permission - .newBuilder() - .setType(AccessControlProtos.Permission.Type.Table) - .setTablePermission( - AccessControlProtos.TablePermission.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(TEST_TABLE)) - .addAction(AccessControlProtos.Permission.Action.CREATE))).build(); + CheckPermissionsRequest checkRequest = CheckPermissionsRequest.newBuilder() + .addPermission(AccessControlProtos.Permission.newBuilder() + .setType(AccessControlProtos.Permission.Type.Table) + .setTablePermission(AccessControlProtos.TablePermission.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(TEST_TABLE)) + .addAction(AccessControlProtos.Permission.Action.CREATE))) + .build(); Table acl = systemUserConnection.getTable(PermissionStorage.ACL_TABLE_NAME); try { BlockingRpcChannel channel = acl.coprocessorService(new byte[0]); @@ -2052,13 +2034,12 @@ public Object run() throws Exception { public void testSnapshot() throws Exception { Admin admin = TEST_UTIL.getAdmin(); final TableDescriptor htd = admin.getDescriptor(TEST_TABLE); - final SnapshotDescription snapshot = new SnapshotDescription( - TEST_TABLE.getNameAsString() + "-snapshot", TEST_TABLE); + final SnapshotDescription snapshot = + new SnapshotDescription(TEST_TABLE.getNameAsString() + "-snapshot", TEST_TABLE); AccessTestAction snapshotAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot, htd); + ACCESS_CONTROLLER.preSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot, htd); return null; } }; @@ -2066,8 +2047,7 @@ public Object run() throws Exception { AccessTestAction deleteAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preDeleteSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot); + ACCESS_CONTROLLER.preDeleteSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot); return null; } }; @@ -2075,8 +2055,8 @@ public Object run() throws Exception { AccessTestAction restoreAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preRestoreSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot, htd); + ACCESS_CONTROLLER.preRestoreSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot, + htd); return null; } }; @@ -2084,8 +2064,8 @@ public Object run() throws Exception { AccessTestAction cloneAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preCloneSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot, null); + ACCESS_CONTROLLER.preCloneSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot, + null); return null; } }; @@ -2103,8 +2083,8 @@ public Object run() throws Exception { USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(deleteAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); - verifyDenied(cloneAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, - USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(cloneAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test @@ -2117,8 +2097,7 @@ public void testSnapshotWithOwner() throws Exception { AccessTestAction snapshotAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot, htd); + ACCESS_CONTROLLER.preSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot, htd); return null; } }; @@ -2129,8 +2108,7 @@ public Object run() throws Exception { AccessTestAction deleteAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preDeleteSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot); + ACCESS_CONTROLLER.preDeleteSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot); return null; } }; @@ -2141,8 +2119,8 @@ public Object run() throws Exception { AccessTestAction restoreAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preRestoreSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot, htd); + ACCESS_CONTROLLER.preRestoreSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot, + htd); return null; } }; @@ -2153,8 +2131,8 @@ public Object run() throws Exception { AccessTestAction cloneAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preCloneSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot, htd); + ACCESS_CONTROLLER.preCloneSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot, + htd); return null; } }; @@ -2170,12 +2148,11 @@ public void testGlobalAuthorizationForNewRegisteredRS() throws Exception { final Admin admin = TEST_UTIL.getAdmin(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE2) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); createTable(TEST_UTIL, tableDescriptor); // Starting a new RegionServer. - JVMClusterUtil.RegionServerThread newRsThread = hbaseCluster - .startRegionServer(); + JVMClusterUtil.RegionServerThread newRsThread = hbaseCluster.startRegionServer(); final HRegionServer newRs = newRsThread.getRegionServer(); // Move region to the new RegionServer. @@ -2199,8 +2176,7 @@ public Object run() throws Exception { final int RETRIES_LIMIT = 10; int retries = 0; while (newRs.getRegions(TEST_TABLE2).size() < 1 && retries < RETRIES_LIMIT) { - LOG.debug("Waiting for region to be opened. Already retried " + retries - + " times."); + LOG.debug("Waiting for region to be opened. Already retried " + retries + " times."); try { Thread.sleep(1000); } catch (InterruptedException e) { @@ -2258,8 +2234,8 @@ public Object run() throws Exception { verifyIfEmptyList(listTablesAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); - verifyAllowed(getTableDescAction, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, - TABLE_ADMIN, USER_GROUP_CREATE, USER_GROUP_ADMIN); + verifyAllowed(getTableDescAction, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, TABLE_ADMIN, + USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(getTableDescAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } finally { @@ -2298,7 +2274,8 @@ public void testTableDeletion() throws Exception { createTestTable(tableName); // Grant TABLE ADMIN privs - grantOnTable(TEST_UTIL, TABLE_ADMIN.getShortName(), tableName, null, null, Permission.Action.ADMIN); + grantOnTable(TEST_UTIL, TABLE_ADMIN.getShortName(), tableName, null, null, + Permission.Action.ADMIN); AccessTestAction deleteTableAction = new AccessTestAction() { @Override @@ -2316,8 +2293,7 @@ public Object run() throws Exception { } }; - verifyDenied(deleteTableAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, - USER_GROUP_WRITE); + verifyDenied(deleteTableAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); verifyAllowed(deleteTableAction, TABLE_ADMIN); } @@ -2327,8 +2303,8 @@ private void createTestTable(TableName tname) throws Exception { private void createTestTable(TableName tname, byte[] cf) throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tname) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cf).setMaxVersions(100).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cf).setMaxVersions(100).build()) + .build(); createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); } @@ -2337,7 +2313,7 @@ public void testNamespaceUserGrant() throws Exception { AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { return t.get(new Get(TEST_ROW)); } @@ -2363,7 +2339,7 @@ public void testAccessControlClientGrantRevoke() throws Exception { AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { return t.get(new Get(TEST_ROW)); } @@ -2398,12 +2374,12 @@ public Object run() throws Exception { @Test public void testAccessControlClientGlobalGrantRevoke() throws Exception { // Create user for testing, who has no READ privileges by default. - User testGlobalGrantRevoke = User.createUserForTesting(conf, - "testGlobalGrantRevoke", new String[0]); + User testGlobalGrantRevoke = + User.createUserForTesting(conf, "testGlobalGrantRevoke", new String[0]); AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { return t.get(new Get(TEST_ROW)); } @@ -2443,12 +2419,11 @@ public Object run() throws Exception { @Test public void testAccessControlClientMultiGrantRevoke() throws Exception { - User testGrantRevoke = - User.createUserForTesting(conf, "testGrantRevoke", new String[0]); + User testGrantRevoke = User.createUserForTesting(conf, "testGrantRevoke", new String[0]); AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { return t.get(new Get(TEST_ROW)); } @@ -2460,7 +2435,7 @@ public Object run() throws Exception { public Object run() throws Exception { Put p = new Put(TEST_ROW); p.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(1)); - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.put(p); return null; @@ -2485,7 +2460,7 @@ public Object run() throws Exception { // Grant global WRITE permissions to testGrantRevoke. try { grantGlobalUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, - Permission.Action.WRITE); + Permission.Action.WRITE); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.grant. ", e); } @@ -2495,7 +2470,7 @@ public Object run() throws Exception { // Revoke global READ permission to testGrantRevoke. try { revokeGlobalUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, - Permission.Action.READ, Permission.Action.WRITE); + Permission.Action.READ, Permission.Action.WRITE); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.revoke ", e); } @@ -2524,8 +2499,8 @@ public Object run() throws Exception { // Revoke table READ & WRITE permission to testGrantRevoke. try { - revokeFromTableUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, TEST_TABLE, null, null, - Permission.Action.READ, Permission.Action.WRITE); + revokeFromTableUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, TEST_TABLE, + null, null, Permission.Action.READ, Permission.Action.WRITE); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.revoke ", e); } @@ -2535,8 +2510,8 @@ public Object run() throws Exception { // Grant Namespace READ permissions to testGrantRevoke String namespace = TEST_TABLE.getNamespaceAsString(); try { - grantOnNamespaceUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, - namespace, Permission.Action.READ); + grantOnNamespaceUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, namespace, + Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.grant. ", e); } @@ -2545,8 +2520,8 @@ public Object run() throws Exception { // Grant Namespace WRITE permissions to testGrantRevoke try { - grantOnNamespaceUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, - namespace, Permission.Action.WRITE); + grantOnNamespaceUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, namespace, + Permission.Action.WRITE); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.grant. ", e); } @@ -2556,7 +2531,7 @@ public Object run() throws Exception { // Revoke table READ & WRITE permission to testGrantRevoke. try { revokeFromNamespaceUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, - TEST_TABLE.getNamespaceAsString(), Permission.Action.READ, Permission.Action.WRITE); + TEST_TABLE.getNamespaceAsString(), Permission.Action.READ, Permission.Action.WRITE); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.revoke ", e); } @@ -2571,7 +2546,7 @@ public void testAccessControlClientGrantRevokeOnNamespace() throws Exception { AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { - try(Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { return t.get(new Get(TEST_ROW)); } @@ -2609,14 +2584,15 @@ public Object run() throws Exception { verifyDenied(getAction, testNS); } - public static class PingCoprocessor extends PingService implements RegionCoprocessor { @Override - public void start(CoprocessorEnvironment env) throws IOException { } + public void start(CoprocessorEnvironment env) throws IOException { + } @Override - public void stop(CoprocessorEnvironment env) throws IOException { } + public void stop(CoprocessorEnvironment env) throws IOException { + } @Override public Iterable getServices() { @@ -2657,12 +2633,11 @@ public void noop(RpcController controller, NoopRequest request, @Test public void testCoprocessorExec() throws Exception { // Set up our ping endpoint service on all regions of our test table - for (JVMClusterUtil.RegionServerThread thread: - TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) { + for (JVMClusterUtil.RegionServerThread thread : TEST_UTIL.getMiniHBaseCluster() + .getRegionServerThreads()) { HRegionServer rs = thread.getRegionServer(); - for (HRegion region: rs.getRegions(TEST_TABLE)) { - region.getCoprocessorHost().load(PingCoprocessor.class, - Coprocessor.PRIORITY_USER, conf); + for (HRegion region : rs.getRegions(TEST_TABLE)) { + region.getCoprocessorHost().load(PingCoprocessor.class, Coprocessor.PRIORITY_USER, conf); } } @@ -2671,9 +2646,7 @@ public void testCoprocessorExec() throws Exception { User userA = User.createUserForTesting(conf, "UserA", new String[0]); User userB = User.createUserForTesting(conf, "UserB", new String[0]); - grantOnTable(TEST_UTIL, userA.getShortName(), - TEST_TABLE, null, null, - Permission.Action.EXEC); + grantOnTable(TEST_UTIL, userA.getShortName(), TEST_TABLE, null, null, Permission.Action.EXEC); try { // Create an action for invoking our test endpoint AccessTestAction execEndpointAction = new AccessTestAction() { @@ -2710,8 +2683,7 @@ public void testSetQuota() throws Exception { AccessTestAction setUserQuotaAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preSetUserQuota(ObserverContextImpl.createAndPrepare(CP_ENV), - null, null); + ACCESS_CONTROLLER.preSetUserQuota(ObserverContextImpl.createAndPrepare(CP_ENV), null, null); return null; } }; @@ -2728,8 +2700,8 @@ public Object run() throws Exception { AccessTestAction setUserNamespaceQuotaAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preSetUserQuota(ObserverContextImpl.createAndPrepare(CP_ENV), - null, (String)null, null); + ACCESS_CONTROLLER.preSetUserQuota(ObserverContextImpl.createAndPrepare(CP_ENV), null, + (String) null, null); return null; } }; @@ -2737,8 +2709,8 @@ public Object run() throws Exception { AccessTestAction setTableQuotaAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preSetTableQuota(ObserverContextImpl.createAndPrepare(CP_ENV), - TEST_TABLE, null); + ACCESS_CONTROLLER.preSetTableQuota(ObserverContextImpl.createAndPrepare(CP_ENV), TEST_TABLE, + null); return null; } }; @@ -2746,8 +2718,8 @@ public Object run() throws Exception { AccessTestAction setNamespaceQuotaAction = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preSetNamespaceQuota(ObserverContextImpl.createAndPrepare(CP_ENV), - null, null); + ACCESS_CONTROLLER.preSetNamespaceQuota(ObserverContextImpl.createAndPrepare(CP_ENV), null, + null); return null; } }; @@ -2766,8 +2738,8 @@ public Object run() throws Exception { USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(setUserTableQuotaAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); - verifyDenied(setUserTableQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, - USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); + verifyDenied(setUserTableQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, + USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(setUserNamespaceQuotaAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(setUserNamespaceQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, @@ -2805,9 +2777,10 @@ public void testGetNamespacePermission() throws Exception { } /** - * List all user permissions match the given regular expression for namespace - * and verify each of them. - * @param namespaceRegexWithoutPrefix the regualar expression for namespace, without NAMESPACE_PREFIX + * List all user permissions match the given regular expression for namespace and verify each of + * them. + * @param namespaceRegexWithoutPrefix the regualar expression for namespace, without + * NAMESPACE_PREFIX * @param expectedAmount the expected amount of user permissions returned * @param expectedNamespace the expected namespace of each user permission returned * @throws HBaseException in the case of any HBase exception when accessing hbase:acl table @@ -2834,15 +2807,15 @@ private void getNamespacePermissionsAndVerify(String namespaceRegexWithoutPrefix @Test public void testTruncatePerms() throws Exception { try { - List existingPerms = AccessControlClient.getUserPermissions( - systemUserConnection, TEST_TABLE.getNameAsString()); + List existingPerms = AccessControlClient + .getUserPermissions(systemUserConnection, TEST_TABLE.getNameAsString()); assertTrue(existingPerms != null); assertTrue(existingPerms.size() > 1); TEST_UTIL.getAdmin().disableTable(TEST_TABLE); TEST_UTIL.truncateTable(TEST_TABLE); TEST_UTIL.waitTableAvailable(TEST_TABLE); - List perms = AccessControlClient.getUserPermissions( - systemUserConnection, TEST_TABLE.getNameAsString()); + List perms = AccessControlClient.getUserPermissions(systemUserConnection, + TEST_TABLE.getNameAsString()); assertTrue(perms != null); assertEquals(existingPerms.size(), perms.size()); } catch (Throwable e) { @@ -2854,7 +2827,7 @@ private PrivilegedAction> getPrivilegedAction(final String return new PrivilegedAction>() { @Override public List run() { - try(Connection conn = ConnectionFactory.createConnection(conf)) { + try (Connection conn = ConnectionFactory.createConnection(conf)) { return AccessControlClient.getUserPermissions(conn, regex); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.getUserPermissions.", e); @@ -2894,7 +2867,7 @@ public void testAccessControllerUserPermsRegexHandling() throws Exception { // create table in default ns TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(table1) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); createTable(TEST_UTIL, tableDescriptor); // creating the ns and table in it @@ -2903,7 +2876,7 @@ public void testAccessControllerUserPermsRegexHandling() throws Exception { final TableName table2 = TableName.valueOf(ns, tableName); createNamespace(TEST_UTIL, desc); tableDescriptor = TableDescriptorBuilder.newBuilder(table2) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); createTable(TEST_UTIL, tableDescriptor); // Verify that we can read sys-tables @@ -2920,11 +2893,13 @@ public void testAccessControllerUserPermsRegexHandling() throws Exception { // USER_ADMIN, testUserPerms must have a row each. assertEquals(2, testRegexHandler.runAs(getPrivilegedAction(tableName)).size()); - assertEquals(2, testRegexHandler.runAs(getPrivilegedAction( - NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + TableName.NAMESPACE_DELIM + tableName) - ).size()); - assertEquals(2, testRegexHandler.runAs(getPrivilegedAction( - ns + TableName.NAMESPACE_DELIM + tableName)).size()); + assertEquals(2, + testRegexHandler + .runAs(getPrivilegedAction( + NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + TableName.NAMESPACE_DELIM + tableName)) + .size()); + assertEquals(2, testRegexHandler + .runAs(getPrivilegedAction(ns + TableName.NAMESPACE_DELIM + tableName)).size()); assertEquals(0, testRegexHandler.runAs(getPrivilegedAction("notMatchingAny")).size()); deleteTable(TEST_UTIL, table1); @@ -3039,8 +3014,8 @@ public void testGetReplicationPeerConfig() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preGetReplicationPeerConfig( - ObserverContextImpl.createAndPrepare(CP_ENV), "test"); + ACCESS_CONTROLLER.preGetReplicationPeerConfig(ObserverContextImpl.createAndPrepare(CP_ENV), + "test"); return null; } }; @@ -3108,16 +3083,17 @@ public void testRemoteLocks() throws Exception { User tableACUser = User.createUserForTesting(conf, "qLTableACUser", new String[0]); // User will be granted READ, WRITE, EXECUTE on table. Should be denied. User tableRWXUser = User.createUserForTesting(conf, "qLTableRWXUser", new String[0]); - grantOnTable(TEST_UTIL, tableRWXUser.getShortName(), tableName, null, null, - Action.READ, Action.WRITE, Action.EXEC); + grantOnTable(TEST_UTIL, tableRWXUser.getShortName(), tableName, null, null, Action.READ, + Action.WRITE, Action.EXEC); // User with global READ, WRITE, EXECUTE should be denied lock access. User globalRWXUser = User.createUserForTesting(conf, "qLGlobalRWXUser", new String[0]); grantGlobal(TEST_UTIL, globalRWXUser.getShortName(), Action.READ, Action.WRITE, Action.EXEC); AccessTestAction namespaceLockAction = new AccessTestAction() { - @Override public Object run() throws Exception { + @Override + public Object run() throws Exception { ACCESS_CONTROLLER.preRequestLock(ObserverContextImpl.createAndPrepare(CP_ENV), namespace, - null, null, null); + null, null, null); return null; } }; @@ -3129,16 +3105,17 @@ public void testRemoteLocks() throws Exception { verifyAllowed(namespaceLockAction, namespaceUser); AccessTestAction tableLockAction = new AccessTestAction() { - @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preRequestLock(ObserverContextImpl.createAndPrepare(CP_ENV), - null, tableName, null, null); + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preRequestLock(ObserverContextImpl.createAndPrepare(CP_ENV), null, + tableName, null, null); return null; } }; verifyAllowed(tableLockAction, SUPERUSER, USER_ADMIN, namespaceUser); verifyDenied(tableLockAction, globalRWXUser, tableACUser, tableRWXUser); - grantOnTable(TEST_UTIL, tableACUser.getShortName(), tableName, null, null, - Action.ADMIN, Action.CREATE); + grantOnTable(TEST_UTIL, tableACUser.getShortName(), tableName, null, null, Action.ADMIN, + Action.CREATE); // See if this can fail (flakie) because grant hasn't propagated yet. for (int i = 0; i < 10; i++) { try { @@ -3151,9 +3128,10 @@ public void testRemoteLocks() throws Exception { } AccessTestAction regionsLockAction = new AccessTestAction() { - @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preRequestLock(ObserverContextImpl.createAndPrepare(CP_ENV), - null, null, regionInfos, null); + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preRequestLock(ObserverContextImpl.createAndPrepare(CP_ENV), null, null, + regionInfos, null); return null; } }; @@ -3165,9 +3143,10 @@ public void testRemoteLocks() throws Exception { // was created, we just need namespace from the lock's tablename. LockProcedure proc = new LockProcedure(conf, tableName, LockType.EXCLUSIVE, "test", null); AccessTestAction regionLockHeartbeatAction = new AccessTestAction() { - @Override public Object run() throws Exception { + @Override + public Object run() throws Exception { ACCESS_CONTROLLER.preLockHeartbeat(ObserverContextImpl.createAndPrepare(CP_ENV), - proc.getTableName(), proc.getDescription()); + proc.getTableName(), proc.getDescription()); return null; } }; @@ -3221,8 +3200,8 @@ public Object run() throws Exception { } }; - verifyAllowed( - action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_RW, USER_RO, USER_NONE, + USER_OWNER); } @Test @@ -3720,17 +3699,15 @@ private void verifyGetUserPermissionResult(List userPermissions, if (cq != null) { assertTrue(Bytes.equals(cq, tablePerm.getQualifier())); } - if (userName != null - && (superUsers == null || !superUsers.contains(perm.getUser()))) { - assertTrue(userName.equals(perm.getUser())); - } - } else if (perm.getPermission() instanceof NamespacePermission || - perm.getPermission() instanceof GlobalPermission) { - if (userName != null && - (superUsers == null || !superUsers.contains(perm.getUser()))) { + if (userName != null && (superUsers == null || !superUsers.contains(perm.getUser()))) { assertTrue(userName.equals(perm.getUser())); } - } + } else if (perm.getPermission() instanceof NamespacePermission + || perm.getPermission() instanceof GlobalPermission) { + if (userName != null && (superUsers == null || !superUsers.contains(perm.getUser()))) { + assertTrue(userName.equals(perm.getUser())); + } + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java index 9b474a774d0e..b1d06d57e770 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,7 +67,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestAccessController2 extends SecureTestUtil { @ClassRule @@ -84,9 +84,11 @@ public class TestAccessController2 extends SecureTestUtil { private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Configuration conf; - /** The systemUserConnection created here is tied to the system user. In case, you are planning - * to create AccessTestAction, DON'T use this systemUserConnection as the 'doAs' user - * gets eclipsed by the system user. */ + /** + * The systemUserConnection created here is tied to the system user. In case, you are planning to + * create AccessTestAction, DON'T use this systemUserConnection as the 'doAs' user gets eclipsed + * by the system user. + */ private static Connection systemUserConnection; private final static byte[] Q1 = Bytes.toBytes("q1"); @@ -137,20 +139,20 @@ public static void setupBeforeClass() throws Exception { @Before public void setUp() throws Exception { createNamespace(TEST_UTIL, NamespaceDescriptor.create(namespace).build()); - try (Table table = createTable(TEST_UTIL, tableName, - new byte[][] { TEST_FAMILY, TEST_FAMILY_2 })) { + try (Table table = + createTable(TEST_UTIL, tableName, new byte[][] { TEST_FAMILY, TEST_FAMILY_2 })) { TEST_UTIL.waitTableEnabled(tableName); // Ingesting test data. table.put(Arrays.asList(new Put(TEST_ROW).addColumn(TEST_FAMILY, Q1, value1), - new Put(TEST_ROW_2).addColumn(TEST_FAMILY, Q2, value2), - new Put(TEST_ROW_3).addColumn(TEST_FAMILY_2, Q1, value1))); + new Put(TEST_ROW_2).addColumn(TEST_FAMILY, Q2, value2), + new Put(TEST_ROW_3).addColumn(TEST_FAMILY_2, Q1, value1))); } assertEquals(1, PermissionStorage.getTablePermissions(conf, tableName).size()); try { - assertEquals(1, AccessControlClient.getUserPermissions(systemUserConnection, - tableName.toString()).size()); + assertEquals(1, + AccessControlClient.getUserPermissions(systemUserConnection, tableName.toString()).size()); } catch (Throwable e) { LOG.error("Error during call of AccessControlClient.getUserPermissions. ", e); } @@ -181,16 +183,16 @@ public void tearDown() throws Exception { @Test public void testCreateWithCorrectOwner() throws Exception { // Create a test user - final User testUser = User.createUserForTesting(TEST_UTIL.getConfiguration(), "TestUser", - new String[0]); + final User testUser = + User.createUserForTesting(TEST_UTIL.getConfiguration(), "TestUser", new String[0]); // Grant the test user the ability to create tables SecureTestUtil.grantGlobal(TEST_UTIL, testUser.getShortName(), Action.CREATE); verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(testTable.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(testTable.getTableName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration(), testUser)) { try (Admin admin = connection.getAdmin()) { @@ -223,8 +225,8 @@ public void testCreateTableWithGroupPermissions() throws Exception { @Override public Object run() throws Exception { TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(testTable.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(testTable.getTableName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { try (Admin admin = connection.getAdmin()) { @@ -277,14 +279,14 @@ public void testACLTableAccess() throws Exception { User tableWrite = User.createUserForTesting(conf, "tableWrite", new String[0]); User tableCreate = User.createUserForTesting(conf, "tableCreate", new String[0]); User tableAdmin = User.createUserForTesting(conf, "tableAdmin", new String[0]); - SecureTestUtil.grantOnTable(TEST_UTIL, tableRead.getShortName(), - testTable.getTableName(), null, null, Action.READ); - SecureTestUtil.grantOnTable(TEST_UTIL, tableWrite.getShortName(), - testTable.getTableName(), null, null, Action.WRITE); - SecureTestUtil.grantOnTable(TEST_UTIL, tableCreate.getShortName(), - testTable.getTableName(), null, null, Action.CREATE); - SecureTestUtil.grantOnTable(TEST_UTIL, tableAdmin.getShortName(), - testTable.getTableName(), null, null, Action.ADMIN); + SecureTestUtil.grantOnTable(TEST_UTIL, tableRead.getShortName(), testTable.getTableName(), null, + null, Action.READ); + SecureTestUtil.grantOnTable(TEST_UTIL, tableWrite.getShortName(), testTable.getTableName(), + null, null, Action.WRITE); + SecureTestUtil.grantOnTable(TEST_UTIL, tableCreate.getShortName(), testTable.getTableName(), + null, null, Action.CREATE); + SecureTestUtil.grantOnTable(TEST_UTIL, tableAdmin.getShortName(), testTable.getTableName(), + null, null, Action.ADMIN); grantGlobal(TEST_UTIL, TESTGROUP_1_NAME, Action.WRITE); try { @@ -361,7 +363,8 @@ public Void run() throws Exception { try (ResultScanner scanner1 = table.getScanner(s1)) { Result[] next1 = scanner1.next(5); assertTrue("User having table level access should be able to scan all " - + "the data in the table.", next1.length == 3); + + "the data in the table.", + next1.length == 3); } } return null; @@ -377,7 +380,8 @@ public Void run() throws Exception { try (ResultScanner scanner1 = table.getScanner(s1)) { Result[] next1 = scanner1.next(5); assertTrue("User having column family level access should be able to scan all " - + "the data belonging to that family.", next1.length == 2); + + "the data belonging to that family.", + next1.length == 2); } } return null; @@ -408,7 +412,8 @@ public Void run() throws Exception { try (ResultScanner scanner1 = table.getScanner(s1)) { Result[] next1 = scanner1.next(5); assertTrue("User having column qualifier level access should be able to scan " - + "that column family qualifier data.", next1.length == 1); + + "that column family qualifier data.", + next1.length == 1); } } return null; @@ -457,8 +462,7 @@ public Void run() throws Exception { // Verify user from a group which has column family level access can read all the data // belonging to that family and group which has no access can't read any data. - grantOnTable(TEST_UTIL, TESTGROUP_1_NAME, tableName, TEST_FAMILY, null, - Permission.Action.READ); + grantOnTable(TEST_UTIL, TESTGROUP_1_NAME, tableName, TEST_FAMILY, null, Permission.Action.READ); verifyAllowed(TESTGROUP1_USER1, scanTableActionForGroupWithFamilyLevelAccess); verifyDenied(TESTGROUP1_USER1, scanFamilyActionForGroupWithFamilyLevelAccess); verifyDenied(TESTGROUP2_USER1, scanTableActionForGroupWithFamilyLevelAccess); @@ -494,12 +498,12 @@ public void testCoprocessorLoading() throws Exception { TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); cpHost.load(MyAccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); AccessController ACCESS_CONTROLLER = cpHost.findCoprocessor(MyAccessController.class); - MasterCoprocessorEnvironment CP_ENV = cpHost.createEnvironment( - ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); - RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) - .getRegionServerCoprocessorHost(); - RegionServerCoprocessorEnvironment RSCP_ENV = rsHost.createEnvironment( - ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); + MasterCoprocessorEnvironment CP_ENV = + cpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); + RegionServerCoprocessorHost rsHost = + TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost(); + RegionServerCoprocessorEnvironment RSCP_ENV = + rsHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); } @Test @@ -512,24 +516,24 @@ public void testACLZNodeDeletion() throws Exception { final TableName table = TableName.valueOf(ns, "testACLZNodeDeletionTable"); final byte[] family = Bytes.toBytes("f1"); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(table) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); createTable(TEST_UTIL, tableDescriptor); // Namespace needs this, as they follow the lazy creation of ACL znode. grantOnNamespace(TEST_UTIL, TESTGROUP1_USER1.getShortName(), ns, Action.ADMIN); ZKWatcher zkw = TEST_UTIL.getMiniHBaseCluster().getMaster().getZooKeeper(); - assertTrue("The acl znode for table should exist", ZKUtil.checkExists(zkw, baseAclZNode + - table.getNameAsString()) != -1); - assertTrue("The acl znode for namespace should exist", ZKUtil.checkExists(zkw, baseAclZNode + - convertToNamespace(ns)) != -1); + assertTrue("The acl znode for table should exist", + ZKUtil.checkExists(zkw, baseAclZNode + table.getNameAsString()) != -1); + assertTrue("The acl znode for namespace should exist", + ZKUtil.checkExists(zkw, baseAclZNode + convertToNamespace(ns)) != -1); revokeFromNamespace(TEST_UTIL, TESTGROUP1_USER1.getShortName(), ns, Action.ADMIN); deleteTable(TEST_UTIL, table); deleteNamespace(TEST_UTIL, ns); assertTrue("The acl znode for table should have been deleted", - ZKUtil.checkExists(zkw, baseAclZNode + table.getNameAsString()) == -1); - assertTrue( "The acl znode for namespace should have been deleted", - ZKUtil.checkExists(zkw, baseAclZNode + convertToNamespace(ns)) == -1); + ZKUtil.checkExists(zkw, baseAclZNode + table.getNameAsString()) == -1); + assertTrue("The acl znode for namespace should have been deleted", + ZKUtil.checkExists(zkw, baseAclZNode + convertToNamespace(ns)) == -1); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java index 9f2202f4d201..40cdbb73e28f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,13 +57,11 @@ import org.slf4j.LoggerFactory; /** - * Performs checks for reference counting w.r.t. AuthManager which is used by - * AccessController. - * - * NOTE: Only one test in here. In AMv2, there is problem deleting because - * we are missing auth. For now disabled. See the cleanup method. + * Performs checks for reference counting w.r.t. AuthManager which is used by AccessController. + * NOTE: Only one test in here. In AMv2, there is problem deleting because we are missing auth. For + * now disabled. See the cleanup method. */ -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestAccessController3 extends SecureTestUtil { @ClassRule @@ -75,12 +73,13 @@ public class TestAccessController3 extends SecureTestUtil { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Configuration conf; - /** The systemUserConnection created here is tied to the system user. In case, you are planning - * to create AccessTestAction, DON'T use this systemUserConnection as the 'doAs' user - * gets eclipsed by the system user. */ + /** + * The systemUserConnection created here is tied to the system user. In case, you are planning to + * create AccessTestAction, DON'T use this systemUserConnection as the 'doAs' user gets eclipsed + * by the system user. + */ private static Connection systemUserConnection; - // user with all permissions private static User SUPERUSER; // user granted with all global permission @@ -156,14 +155,13 @@ public static void setupBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); MasterCoprocessorHost cpHost = - TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); cpHost.load(FaultyAccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(accessControllerClassName); CP_ENV = cpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); RegionServerCoprocessorHost rsHost; do { - rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) - .getRegionServerCoprocessorHost(); + rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost(); } while (rsHost == null); RSCP_ENV = rsHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); @@ -199,8 +197,8 @@ public static void setupBeforeClass() throws Exception { @AfterClass public static void tearDownAfterClass() throws Exception { assertEquals(1, TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().size()); - HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().get(0). - getRegionServer(); + HRegionServer rs = + TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().get(0).getRegionServer(); // Strange place for an assert. assertFalse("RegionServer should have ABORTED (FaultyAccessController)", rs.isAborted()); cleanUp(); @@ -209,8 +207,9 @@ public static void tearDownAfterClass() throws Exception { private static void setUpTableAndUserPermissions() throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()).build(); + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()) + .build(); createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE).get(0); @@ -219,31 +218,21 @@ private static void setUpTableAndUserPermissions() throws Exception { // Set up initial grants - grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), - Permission.Action.ADMIN, - Permission.Action.CREATE, - Permission.Action.READ, - Permission.Action.WRITE); + grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), Permission.Action.ADMIN, + Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); - grantOnTable(TEST_UTIL, USER_RW.getShortName(), - TEST_TABLE, TEST_FAMILY, null, - Permission.Action.READ, - Permission.Action.WRITE); + grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null, + Permission.Action.READ, Permission.Action.WRITE); // USER_CREATE is USER_RW plus CREATE permissions - grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), - TEST_TABLE, null, null, - Permission.Action.CREATE, - Permission.Action.READ, - Permission.Action.WRITE); - - grantOnTable(TEST_UTIL, USER_RO.getShortName(), - TEST_TABLE, TEST_FAMILY, null, + grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), TEST_TABLE, null, null, + Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); + + grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ); - grantOnTable(TEST_UTIL, USER_ADMIN_CF.getShortName(), - TEST_TABLE, TEST_FAMILY, - null, Permission.Action.ADMIN, Permission.Action.CREATE); + grantOnTable(TEST_UTIL, USER_ADMIN_CF.getShortName(), TEST_TABLE, TEST_FAMILY, null, + Permission.Action.ADMIN, Permission.Action.CREATE); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN), Permission.Action.ADMIN); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_CREATE), Permission.Action.CREATE); @@ -252,8 +241,8 @@ private static void setUpTableAndUserPermissions() throws Exception { assertEquals(5, PermissionStorage.getTablePermissions(conf, TEST_TABLE).size()); try { - assertEquals(5, AccessControlClient.getUserPermissions(systemUserConnection, - TEST_TABLE.toString()).size()); + assertEquals(5, + AccessControlClient.getUserPermissions(systemUserConnection, TEST_TABLE.toString()).size()); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.getUserPermissions. ", e); } @@ -282,8 +271,8 @@ public void testTableCreate() throws Exception { @Override public Object run() throws Exception { TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); ACCESS_CONTROLLER.preCreateTable(ObserverContextImpl.createAndPrepare(CP_ENV), tableDescriptor, null); return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java index 0af18a5b15e5..4240f3260024 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestCellACLWithMultipleVersions extends SecureTestUtil { @ClassRule @@ -105,13 +105,13 @@ public static void setupBeforeClass() throws Exception { conf.setBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, false); TEST_UTIL.startMiniCluster(); - MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster() - .getMasterCoprocessorHost(); + MasterCoprocessorHost cpHost = + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); AccessController ac = cpHost.findCoprocessor(AccessController.class); cpHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); - RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) - .getRegionServerCoprocessorHost(); + RegionServerCoprocessorHost rsHost = + TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost(); rsHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available @@ -137,10 +137,11 @@ public static void tearDownAfterClass() throws Exception { @Before public void setUp() throws Exception { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(testTable.getTableName()) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY1).setMaxVersions(4).build()) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY2).setMaxVersions(4).build()).build(); + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY1).setMaxVersions(4).build()) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY2).setMaxVersions(4).build()) + .build(); // Create the test table (owner added to the _acl_ table) createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); TEST_UTIL.waitTableEnabled(testTable.getTableName()); @@ -157,7 +158,7 @@ public void testCellPermissionwithVersions() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { Put p; // with ro ACL @@ -190,7 +191,7 @@ public Object run() throws Exception { public Object run() throws Exception { Get get = new Get(TEST_ROW); get.readVersions(10); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { return t.get(get).listCells(); } @@ -202,7 +203,7 @@ public Object run() throws Exception { public Object run() throws Exception { Get get = new Get(TEST_ROW); get.readVersions(10); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { return t.get(get).listCells(); } @@ -218,7 +219,7 @@ public Object run() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { Put p; p = new Put(TEST_ROW).addColumn(TEST_FAMILY1, TEST_Q1, ZERO); @@ -270,15 +271,15 @@ public Object run() throws Exception { Put p = new Put(TEST_ROW1); p.addColumn(TEST_FAMILY1, TEST_Q1, ZERO); p.addColumn(TEST_FAMILY1, TEST_Q2, ZERO); - p.setACL(user1.getShortName(), new Permission(Permission.Action.READ, - Permission.Action.WRITE)); + p.setACL(user1.getShortName(), + new Permission(Permission.Action.READ, Permission.Action.WRITE)); t.put(p); // with rw ACL for "user1" p = new Put(TEST_ROW2); p.addColumn(TEST_FAMILY1, TEST_Q1, ZERO); p.addColumn(TEST_FAMILY1, TEST_Q2, ZERO); - p.setACL(user1.getShortName(), new Permission(Permission.Action.READ, - Permission.Action.WRITE)); + p.setACL(user1.getShortName(), + new Permission(Permission.Action.READ, Permission.Action.WRITE)); t.put(p); } } @@ -297,7 +298,8 @@ public Object run() throws Exception { p.addColumn(TEST_FAMILY1, TEST_Q2, ZERO); Map perms = prepareCellPermissions(new String[] { user1.getShortName(), user2.getShortName(), - AuthUtil.toGroupEntry(GROUP) }, Action.READ, Action.WRITE); + AuthUtil.toGroupEntry(GROUP) }, + Action.READ, Action.WRITE); p.setACL(perms); t.put(p); // with rw ACL for "user1", "user2" and "@group" @@ -373,7 +375,6 @@ public Void run() throws Exception { }); } - @Test public void testDeleteWithFutureTimestamp() throws Exception { // Store two values, one in the future @@ -395,9 +396,10 @@ public Object run() throws Exception { LOG.info("Stored at current time"); // Store read only ACL at a future time p = new Put(TEST_ROW).addColumn(TEST_FAMILY1, TEST_Q1, - EnvironmentEdgeManager.currentTime() + 1000000, ZERO); - p.setACL(prepareCellPermissions(new String[]{ USER_OTHER.getShortName(), - AuthUtil.toGroupEntry(GROUP)}, Action.READ)); + EnvironmentEdgeManager.currentTime() + 1000000, ZERO); + p.setACL(prepareCellPermissions( + new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP) }, + Action.READ)); t.put(p); } } @@ -434,7 +436,6 @@ public Object run() throws Exception { verifyAllowed(getQ1, USER_OWNER, USER_OTHER, GROUP_USER); verifyAllowed(getQ2, USER_OWNER, USER_OTHER, GROUP_USER); - // Issue a DELETE for the family, should succeed because the future ACL is // not considered AccessTestAction deleteFamily1 = getDeleteFamilyAction(TEST_FAMILY1); @@ -445,7 +446,7 @@ public Object run() throws Exception { // The future put should still exist - verifyAllowed(getQ1, USER_OWNER, USER_OTHER,GROUP_USER); + verifyAllowed(getQ1, USER_OWNER, USER_OTHER, GROUP_USER); // The other put should be covered by the tombstone @@ -479,9 +480,9 @@ public Object run() throws Exception { Put p = new Put(TEST_ROW); p.addColumn(TEST_FAMILY1, TEST_Q1, 123L, ZERO); p.addColumn(TEST_FAMILY1, TEST_Q2, 123L, ZERO); - p.setACL(prepareCellPermissions( - new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP), - USER_OTHER2.getShortName() }, Permission.Action.READ, Permission.Action.WRITE)); + p.setACL(prepareCellPermissions(new String[] { USER_OTHER.getShortName(), + AuthUtil.toGroupEntry(GROUP), USER_OTHER2.getShortName() }, + Permission.Action.READ, Permission.Action.WRITE)); t.put(p); // This version (TS = 125) with rw ACL for USER_OTHER @@ -489,8 +490,8 @@ public Object run() throws Exception { p.addColumn(TEST_FAMILY1, TEST_Q1, 125L, ONE); p.addColumn(TEST_FAMILY1, TEST_Q2, 125L, ONE); p.setACL(prepareCellPermissions( - new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP) }, - Action.READ, Action.WRITE)); + new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP) }, Action.READ, + Action.WRITE)); t.put(p); // This version (TS = 127) with rw ACL for USER_OTHER @@ -498,8 +499,8 @@ public Object run() throws Exception { p.addColumn(TEST_FAMILY1, TEST_Q1, 127L, TWO); p.addColumn(TEST_FAMILY1, TEST_Q2, 127L, TWO); p.setACL(prepareCellPermissions( - new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP) }, - Action.READ, Action.WRITE)); + new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP) }, Action.READ, + Action.WRITE)); t.put(p); return null; @@ -554,14 +555,12 @@ public void testCellPermissionsWithDeleteExactVersion() throws Exception { public Object run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf)) { try (Table t = connection.getTable(testTable.getTableName())) { - Map permsU1andOwner = - prepareCellPermissions( - new String[] { user1.getShortName(), USER_OWNER.getShortName() }, Action.READ, - Action.WRITE); - Map permsU2andGUandOwner = - prepareCellPermissions( - new String[] { user2.getShortName(), AuthUtil.toGroupEntry(GROUP), - USER_OWNER.getShortName() }, Action.READ, Action.WRITE); + Map permsU1andOwner = prepareCellPermissions( + new String[] { user1.getShortName(), USER_OWNER.getShortName() }, Action.READ, + Action.WRITE); + Map permsU2andGUandOwner = prepareCellPermissions(new String[] { + user2.getShortName(), AuthUtil.toGroupEntry(GROUP), USER_OWNER.getShortName() }, + Action.READ, Action.WRITE); Put p = new Put(TEST_ROW1); p.addColumn(TEST_FAMILY1, TEST_Q1, 123, ZERO); p.setACL(permsU1andOwner); @@ -660,14 +659,12 @@ public void testCellPermissionsForIncrementWithMultipleVersions() throws Excepti public Object run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf)) { try (Table t = connection.getTable(testTable.getTableName())) { - Map permsU1andOwner = - prepareCellPermissions( - new String[] { user1.getShortName(), USER_OWNER.getShortName() }, Action.READ, - Action.WRITE); - Map permsU2andGUandOwner = - prepareCellPermissions( - new String[] { user2.getShortName(), AuthUtil.toGroupEntry(GROUP), - USER_OWNER.getShortName() }, Action.READ, Action.WRITE); + Map permsU1andOwner = prepareCellPermissions( + new String[] { user1.getShortName(), USER_OWNER.getShortName() }, Action.READ, + Action.WRITE); + Map permsU2andGUandOwner = prepareCellPermissions(new String[] { + user2.getShortName(), AuthUtil.toGroupEntry(GROUP), USER_OWNER.getShortName() }, + Action.READ, Action.WRITE); Put p = new Put(TEST_ROW1); p.addColumn(TEST_FAMILY1, TEST_Q1, 123, ZERO); p.setACL(permsU1andOwner); @@ -748,18 +745,16 @@ public void testCellPermissionsForPutWithMultipleVersions() throws Exception { public Object run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf)) { try (Table t = connection.getTable(testTable.getTableName())) { - Map permsU1andOwner = - prepareCellPermissions( - new String[] { user1.getShortName(), USER_OWNER.getShortName() }, Action.READ, - Action.WRITE); - Map permsU2andGUandOwner = - prepareCellPermissions( - new String[] { user1.getShortName(), AuthUtil.toGroupEntry(GROUP), - USER_OWNER.getShortName() }, Action.READ, Action.WRITE); - permsU2andGUandOwner.put(user2.getShortName(), new Permission(Permission.Action.READ, - Permission.Action.WRITE)); - permsU2andGUandOwner.put(USER_OWNER.getShortName(), new Permission(Permission.Action.READ, - Permission.Action.WRITE)); + Map permsU1andOwner = prepareCellPermissions( + new String[] { user1.getShortName(), USER_OWNER.getShortName() }, Action.READ, + Action.WRITE); + Map permsU2andGUandOwner = prepareCellPermissions(new String[] { + user1.getShortName(), AuthUtil.toGroupEntry(GROUP), USER_OWNER.getShortName() }, + Action.READ, Action.WRITE); + permsU2andGUandOwner.put(user2.getShortName(), + new Permission(Permission.Action.READ, Permission.Action.WRITE)); + permsU2andGUandOwner.put(USER_OWNER.getShortName(), + new Permission(Permission.Action.READ, Permission.Action.WRITE)); Put p = new Put(TEST_ROW1); p.addColumn(TEST_FAMILY1, TEST_Q1, 123, ZERO); p.setACL(permsU1andOwner); @@ -794,8 +789,8 @@ public Void run() throws Exception { Put p = new Put(TEST_ROW1); p.addColumn(TEST_FAMILY1, TEST_Q1, 125, ZERO); p.addColumn(TEST_FAMILY1, TEST_Q2, ZERO); - p.setACL(user2.getShortName(), new Permission(Permission.Action.READ, - Permission.Action.WRITE)); + p.setACL(user2.getShortName(), + new Permission(Permission.Action.READ, Permission.Action.WRITE)); t.put(p); } } @@ -808,8 +803,8 @@ public Void run() throws Exception { } private void verifyUserDeniedForPutMultipleVersions(final User user, final byte[] row, - final byte[] q1, final byte[] q2, final byte[] value) throws IOException, - InterruptedException { + final byte[] q1, final byte[] q2, final byte[] value) + throws IOException, InterruptedException { user.runAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { @@ -844,17 +839,17 @@ public void testCellPermissionsForCheckAndDelete() throws Exception { public Object run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf)) { try (Table t = connection.getTable(testTable.getTableName())) { - Map permsU1andOwner = - prepareCellPermissions( - new String[] { user1.getShortName(), USER_OWNER.getShortName() }, Action.READ, - Action.WRITE); - Map permsU1andU2andGUandOwner = - prepareCellPermissions(new String[] { user1.getShortName(), user2.getShortName(), - AuthUtil.toGroupEntry(GROUP), USER_OWNER.getShortName() }, Action.READ, - Action.WRITE); + Map permsU1andOwner = prepareCellPermissions( + new String[] { user1.getShortName(), USER_OWNER.getShortName() }, Action.READ, + Action.WRITE); + Map permsU1andU2andGUandOwner = prepareCellPermissions( + new String[] { user1.getShortName(), user2.getShortName(), + AuthUtil.toGroupEntry(GROUP), USER_OWNER.getShortName() }, + Action.READ, Action.WRITE); Map permsU1_U2andGU = prepareCellPermissions(new String[] { user1.getShortName(), user2.getShortName(), - AuthUtil.toGroupEntry(GROUP) }, Action.READ, Action.WRITE); + AuthUtil.toGroupEntry(GROUP) }, + Action.READ, Action.WRITE); Put p = new Put(TEST_ROW1); p.addColumn(TEST_FAMILY1, TEST_Q1, 120, ZERO); @@ -899,8 +894,8 @@ public Void run() throws Exception { try (Table t = connection.getTable(testTable.getTableName())) { Delete d = new Delete(TEST_ROW1); d.addColumns(TEST_FAMILY1, TEST_Q1, 120); - t.checkAndMutate(TEST_ROW1, TEST_FAMILY1).qualifier(TEST_Q1) - .ifEquals(ZERO).thenDelete(d); + t.checkAndMutate(TEST_ROW1, TEST_FAMILY1).qualifier(TEST_Q1).ifEquals(ZERO) + .thenDelete(d); } } return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java index 5d9c31e5cb68..6c6b3c2104f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestCellACLs extends SecureTestUtil { @ClassRule @@ -108,13 +108,13 @@ public static void setupBeforeClass() throws Exception { conf.setBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, false); TEST_UTIL.startMiniCluster(); - MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster() - .getMasterCoprocessorHost(); + MasterCoprocessorHost cpHost = + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); AccessController ac = cpHost.findCoprocessor(AccessController.class); cpHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); - RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) - .getRegionServerCoprocessorHost(); + RegionServerCoprocessorHost rsHost = + TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost(); rsHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available @@ -139,9 +139,11 @@ public static void tearDownAfterClass() throws Exception { @Before public void setUp() throws Exception { // Create the test table (owner added to the _acl_ table) - TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(testTable.getTableName()) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(4).build()).build(); + TableDescriptor tableDescriptor = + TableDescriptorBuilder.newBuilder(testTable.getTableName()) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(4).build()) + .build(); createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); TEST_UTIL.waitTableEnabled(testTable.getTableName()); LOG.info("Sleeping a second because of HBASE-12581"); @@ -154,7 +156,7 @@ public void testCellPermissions() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { Put p; // with ro ACL @@ -166,9 +168,8 @@ public Object run() throws Exception { p.setACL(prepareCellPermissions(usersAndGroups, Action.READ, Action.WRITE)); t.put(p); // no ACL - p = new Put(TEST_ROW) - .addColumn(TEST_FAMILY, TEST_Q3, ZERO) - .addColumn(TEST_FAMILY, TEST_Q4, ZERO); + p = new Put(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q3, ZERO).addColumn(TEST_FAMILY, + TEST_Q4, ZERO); t.put(p); } return null; @@ -181,7 +182,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { Get get = new Get(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q1); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { return t.get(get).listCells(); } @@ -192,7 +193,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { Get get = new Get(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q2); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { return t.get(get).listCells(); } @@ -203,7 +204,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { Get get = new Get(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q3); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { return t.get(get).listCells(); } @@ -214,7 +215,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { Get get = new Get(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q4); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { return t.get(get).listCells(); } @@ -242,7 +243,7 @@ public Object run() throws Exception { public List run() throws Exception { Scan scan = new Scan(); scan.withStartRow(TEST_ROW); - scan.withStopRow(Bytes.add(TEST_ROW, new byte[]{ 0 })); + scan.withStopRow(Bytes.add(TEST_ROW, new byte[] { 0 })); scan.addFamily(TEST_FAMILY); Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName()); @@ -283,7 +284,7 @@ public List run() throws Exception { @Override public Object run() throws Exception { Increment i = new Increment(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q1, 1L); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { t.increment(i); } @@ -295,7 +296,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { Increment i = new Increment(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q2, 1L); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { t.increment(i); } @@ -309,7 +310,7 @@ public Object run() throws Exception { Increment i = new Increment(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q2, 1L); // Tag this increment with an ACL that denies write permissions to USER_OTHER and GROUP i.setACL(prepareCellPermissions(usersAndGroups, Action.READ)); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { t.increment(i); } @@ -321,7 +322,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { Increment i = new Increment(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q3, 1L); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { t.increment(i); } @@ -346,7 +347,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { Delete delete = new Delete(TEST_ROW).addFamily(TEST_FAMILY); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { t.delete(delete); } @@ -358,7 +359,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { Delete delete = new Delete(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q1); - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { t.delete(delete); } @@ -372,14 +373,14 @@ public Object run() throws Exception { } /** - * Insure we are not granting access in the absence of any cells found - * when scanning for covered cells. + * Insure we are not granting access in the absence of any cells found when scanning for covered + * cells. */ @Test public void testCoveringCheck() throws Exception { // Grant read access to USER_OTHER - grantOnTable(TEST_UTIL, USER_OTHER.getShortName(), testTable.getTableName(), TEST_FAMILY, - null, Action.READ); + grantOnTable(TEST_UTIL, USER_OTHER.getShortName(), testTable.getTableName(), TEST_FAMILY, null, + Action.READ); // Grant read access to GROUP grantOnTable(TEST_UTIL, AuthUtil.toGroupEntry(GROUP), testTable.getTableName(), TEST_FAMILY, null, Action.READ); @@ -395,7 +396,7 @@ public void testCoveringCheck() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { Put p; p = new Put(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q1, ZERO); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java index cb3c7d3ff657..9aadcb39c737 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ /** * Performs coprocessor loads for various paths and malformed strings */ -@Category({SecurityTests.class, LargeTests.class}) +@Category({ SecurityTests.class, LargeTests.class }) public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil { @ClassRule @@ -87,24 +87,19 @@ public void tearDownTestCoprocessorWhitelistMasterObserver() throws Exception { } /** - * Test a table modification adding a coprocessor path - * which is not whitelisted. - * @exception Exception should be thrown and caught - * to show coprocessor is working as desired - * @param whitelistedPaths A String array of paths to add in - * for the whitelisting configuration - * @param coprocessorPath A String to use as the - * path for a mock coprocessor + * Test a table modification adding a coprocessor path which is not whitelisted. + * @exception Exception should be thrown and caught to show coprocessor is working as desired + * @param whitelistedPaths A String array of paths to add in for the whitelisting configuration + * @param coprocessorPath A String to use as the path for a mock coprocessor */ - private static void positiveTestCase(String[] whitelistedPaths, - String coprocessorPath) throws Exception { + private static void positiveTestCase(String[] whitelistedPaths, String coprocessorPath) + throws Exception { Configuration conf = UTIL.getConfiguration(); // load coprocessor under test conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - CoprocessorWhitelistMasterObserver.class.getName()); - conf.setStrings( - CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY, - whitelistedPaths); + CoprocessorWhitelistMasterObserver.class.getName()); + conf.setStrings(CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY, + whitelistedPaths); // set retries low to raise exception quickly conf.setInt("hbase.client.retries.number", 5); UTIL.startMiniCluster(); @@ -113,10 +108,10 @@ private static void positiveTestCase(String[] whitelistedPaths, Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(TEST_TABLE); TableDescriptor htd = TableDescriptorBuilder.newBuilder(t.getDescriptor()) - .setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder("net.clayb.hbase.coprocessor.NotWhitelisted") - .setJarPath(coprocessorPath).setPriority(Coprocessor.PRIORITY_USER).build()) - .build(); + .setCoprocessor( + CoprocessorDescriptorBuilder.newBuilder("net.clayb.hbase.coprocessor.NotWhitelisted") + .setJarPath(coprocessorPath).setPriority(Coprocessor.PRIORITY_USER).build()) + .build(); LOG.info("Modifying Table"); try { connection.getAdmin().modifyTable(htd); @@ -129,26 +124,22 @@ private static void positiveTestCase(String[] whitelistedPaths, } /** - * Test a table modification adding a coprocessor path - * which is whitelisted. The coprocessor should be added to - * the table descriptor successfully. - * @param whitelistedPaths A String array of paths to add in - * for the whitelisting configuration - * @param coprocessorPath A String to use as the - * path for a mock coprocessor + * Test a table modification adding a coprocessor path which is whitelisted. The coprocessor + * should be added to the table descriptor successfully. + * @param whitelistedPaths A String array of paths to add in for the whitelisting configuration + * @param coprocessorPath A String to use as the path for a mock coprocessor */ - private static void negativeTestCase(String[] whitelistedPaths, - String coprocessorPath) throws Exception { + private static void negativeTestCase(String[] whitelistedPaths, String coprocessorPath) + throws Exception { Configuration conf = UTIL.getConfiguration(); conf.setInt("hbase.client.retries.number", 5); // load coprocessor under test conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - CoprocessorWhitelistMasterObserver.class.getName()); + CoprocessorWhitelistMasterObserver.class.getName()); // set retries low to raise exception quickly // set a coprocessor whitelist path for test - conf.setStrings( - CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY, - whitelistedPaths); + conf.setStrings(CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY, + whitelistedPaths); UTIL.startMiniCluster(); UTIL.createTable(TEST_TABLE, new byte[][] { TEST_FAMILY }); UTIL.waitUntilAllRegionsAssigned(TEST_TABLE); @@ -159,10 +150,10 @@ private static void negativeTestCase(String[] whitelistedPaths, admin.disableTable(TEST_TABLE); Table t = connection.getTable(TEST_TABLE); TableDescriptor htd = TableDescriptorBuilder.newBuilder(t.getDescriptor()) - .setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder("net.clayb.hbase.coprocessor.Whitelisted") - .setJarPath(coprocessorPath).setPriority(Coprocessor.PRIORITY_USER).build()) - .build(); + .setCoprocessor( + CoprocessorDescriptorBuilder.newBuilder("net.clayb.hbase.coprocessor.Whitelisted") + .setJarPath(coprocessorPath).setPriority(Coprocessor.PRIORITY_USER).build()) + .build(); LOG.info("Modifying Table"); admin.modifyTable(htd); assertEquals(1, t.getDescriptor().getCoprocessorDescriptors().size()); @@ -170,112 +161,101 @@ private static void negativeTestCase(String[] whitelistedPaths, } /** - * Test a table modification adding a coprocessor path - * which is not whitelisted. - * @exception Exception should be thrown and caught - * to show coprocessor is working as desired + * Test a table modification adding a coprocessor path which is not whitelisted. + * @exception Exception should be thrown and caught to show coprocessor is working as desired */ @Test public void testSubstringNonWhitelisted() throws Exception { - positiveTestCase(new String[]{"/permitted/*"}, - "file:///notpermitted/couldnotpossiblyexist.jar"); + positiveTestCase(new String[] { "/permitted/*" }, + "file:///notpermitted/couldnotpossiblyexist.jar"); } /** - * Test a table creation including a coprocessor path - * which is not whitelisted. Coprocessor should be added to - * table descriptor. Table is disabled to avoid an IOException due - * to the added coprocessor not actually existing on disk. + * Test a table creation including a coprocessor path which is not whitelisted. Coprocessor should + * be added to table descriptor. Table is disabled to avoid an IOException due to the added + * coprocessor not actually existing on disk. */ @Test public void testDifferentFileSystemNonWhitelisted() throws Exception { - positiveTestCase(new String[]{"hdfs://foo/bar"}, - "file:///notpermitted/couldnotpossiblyexist.jar"); + positiveTestCase(new String[] { "hdfs://foo/bar" }, + "file:///notpermitted/couldnotpossiblyexist.jar"); } /** - * Test a table modification adding a coprocessor path - * which is whitelisted. Coprocessor should be added to table - * descriptor. Table is disabled to avoid an IOException due to - * the added coprocessor not actually existing on disk. + * Test a table modification adding a coprocessor path which is whitelisted. Coprocessor should be + * added to table descriptor. Table is disabled to avoid an IOException due to the added + * coprocessor not actually existing on disk. */ @Test public void testSchemeAndDirectorywhitelisted() throws Exception { - negativeTestCase(new String[]{"/tmp","file:///permitted/*"}, - "file:///permitted/couldnotpossiblyexist.jar"); + negativeTestCase(new String[] { "/tmp", "file:///permitted/*" }, + "file:///permitted/couldnotpossiblyexist.jar"); } /** - * Test a table modification adding a coprocessor path - * which is whitelisted. Coprocessor should be added to table - * descriptor. Table is disabled to avoid an IOException due to - * the added coprocessor not actually existing on disk. + * Test a table modification adding a coprocessor path which is whitelisted. Coprocessor should be + * added to table descriptor. Table is disabled to avoid an IOException due to the added + * coprocessor not actually existing on disk. */ @Test public void testSchemeWhitelisted() throws Exception { - negativeTestCase(new String[]{"file:///"}, - "file:///permitted/couldnotpossiblyexist.jar"); + negativeTestCase(new String[] { "file:///" }, "file:///permitted/couldnotpossiblyexist.jar"); } /** - * Test a table modification adding a coprocessor path - * which is whitelisted. Coprocessor should be added to table - * descriptor. Table is disabled to avoid an IOException due to - * the added coprocessor not actually existing on disk. + * Test a table modification adding a coprocessor path which is whitelisted. Coprocessor should be + * added to table descriptor. Table is disabled to avoid an IOException due to the added + * coprocessor not actually existing on disk. */ @Test public void testDFSNameWhitelistedWorks() throws Exception { - negativeTestCase(new String[]{"hdfs://Your-FileSystem"}, - "hdfs://Your-FileSystem/permitted/couldnotpossiblyexist.jar"); + negativeTestCase(new String[] { "hdfs://Your-FileSystem" }, + "hdfs://Your-FileSystem/permitted/couldnotpossiblyexist.jar"); } /** - * Test a table modification adding a coprocessor path - * which is whitelisted. Coprocessor should be added to table - * descriptor. Table is disabled to avoid an IOException due to - * the added coprocessor not actually existing on disk. + * Test a table modification adding a coprocessor path which is whitelisted. Coprocessor should be + * added to table descriptor. Table is disabled to avoid an IOException due to the added + * coprocessor not actually existing on disk. */ @Test public void testDFSNameNotWhitelistedFails() throws Exception { - positiveTestCase(new String[]{"hdfs://Your-FileSystem"}, - "hdfs://My-FileSystem/permitted/couldnotpossiblyexist.jar"); + positiveTestCase(new String[] { "hdfs://Your-FileSystem" }, + "hdfs://My-FileSystem/permitted/couldnotpossiblyexist.jar"); } /** - * Test a table modification adding a coprocessor path - * which is whitelisted. Coprocessor should be added to table - * descriptor. Table is disabled to avoid an IOException due to - * the added coprocessor not actually existing on disk. + * Test a table modification adding a coprocessor path which is whitelisted. Coprocessor should be + * added to table descriptor. Table is disabled to avoid an IOException due to the added + * coprocessor not actually existing on disk. */ @Test public void testBlanketWhitelist() throws Exception { - negativeTestCase(new String[]{"*"}, - "hdfs:///permitted/couldnotpossiblyexist.jar"); + negativeTestCase(new String[] { "*" }, "hdfs:///permitted/couldnotpossiblyexist.jar"); } /** - * Test a table creation including a coprocessor path - * which is not whitelisted. Table will not be created due to the - * offending coprocessor. + * Test a table creation including a coprocessor path which is not whitelisted. Table will not be + * created due to the offending coprocessor. */ @Test public void testCreationNonWhitelistedCoprocessorPath() throws Exception { Configuration conf = UTIL.getConfiguration(); // load coprocessor under test conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - CoprocessorWhitelistMasterObserver.class.getName()); + CoprocessorWhitelistMasterObserver.class.getName()); conf.setStrings(CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY, - new String[]{}); + new String[] {}); // set retries low to raise exception quickly conf.setInt("hbase.client.retries.number", 5); UTIL.startMiniCluster(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)) - .setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder("net.clayb.hbase.coprocessor.NotWhitelisted") - .setJarPath("file:///notpermitted/couldnotpossiblyexist.jar") - .setPriority(Coprocessor.PRIORITY_USER).setProperties(Collections.emptyMap()).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)) + .setCoprocessor( + CoprocessorDescriptorBuilder.newBuilder("net.clayb.hbase.coprocessor.NotWhitelisted") + .setJarPath("file:///notpermitted/couldnotpossiblyexist.jar") + .setPriority(Coprocessor.PRIORITY_USER).setProperties(Collections.emptyMap()).build()) + .build(); Connection connection = ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); LOG.info("Creating Table"); @@ -300,24 +280,23 @@ public Optional getRegionObserver() { } /** - * Test a table creation including a coprocessor path - * which is on the classpath. Table will be created with the - * coprocessor. + * Test a table creation including a coprocessor path which is on the classpath. Table will be + * created with the coprocessor. */ @Test public void testCreationClasspathCoprocessor() throws Exception { Configuration conf = UTIL.getConfiguration(); // load coprocessor under test conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - CoprocessorWhitelistMasterObserver.class.getName()); + CoprocessorWhitelistMasterObserver.class.getName()); conf.setStrings(CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY, - new String[]{}); + new String[] {}); // set retries low to raise exception quickly conf.setInt("hbase.client.retries.number", 5); UTIL.startMiniCluster(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)) - .setCoprocessor(TestRegionObserver.class.getName()).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)) + .setCoprocessor(TestRegionObserver.class.getName()).build(); Connection connection = ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); LOG.info("Creating Table"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java index 340c1bde2f4d..ff8e51247d72 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestHDFSAclHelper.java @@ -17,6 +17,13 @@ */ package org.apache.hadoop.hbase.security.access; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -28,14 +35,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static org.junit.Assert.assertEquals; - final class TestHDFSAclHelper { private static final Logger LOG = LoggerFactory.getLogger(TestHDFSAclHelper.class); @@ -45,7 +44,7 @@ private TestHDFSAclHelper() { } static void grantOnTable(HBaseTestingUtil util, String user, TableName tableName, - Permission.Action... actions) throws Exception { + Permission.Action... actions) throws Exception { SecureTestUtil.grantOnTable(util, user, tableName, null, null, actions); } @@ -155,7 +154,7 @@ static void canUserScanSnapshot(HBaseTestingUtil util, User user, String snapsho } static PrivilegedExceptionAction getScanSnapshotAction(Configuration conf, - String snapshotName, long expectedRowCount) { + String snapshotName, long expectedRowCount) { return () -> { try { Path restoreDir = new Path(SnapshotScannerHDFSAclHelper.SNAPSHOT_RESTORE_TMP_DIR_DEFAULT); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java index bb2f4d422b7b..4200f7f7a60f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java @@ -62,7 +62,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.AccessControlService; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestNamespaceCommands extends SecureTestUtil { @ClassRule @@ -103,8 +103,8 @@ public class TestNamespaceCommands extends SecureTestUtil { private static User USER_NS_EXEC; // user with rw permissions - private static User USER_TABLE_WRITE; // TODO: WE DO NOT GIVE ANY PERMS TO THIS USER - //user with create table permissions alone + private static User USER_TABLE_WRITE; // TODO: WE DO NOT GIVE ANY PERMS TO THIS USER + // user with create table permissions alone private static User USER_TABLE_CREATE; // TODO: WE DO NOT GIVE ANY PERMS TO THIS USER private static final String GROUP_ADMIN = "group_admin"; @@ -163,10 +163,10 @@ public static void beforeClass() throws Exception { // Find the Access Controller CP. Could be on master or if master is not serving regions, is // on an arbitrary server. - for (JVMClusterUtil.RegionServerThread rst: - UTIL.getMiniHBaseCluster().getLiveRegionServerThreads()) { - ACCESS_CONTROLLER = rst.getRegionServer().getRegionServerCoprocessorHost(). - findCoprocessor(AccessController.class); + for (JVMClusterUtil.RegionServerThread rst : UTIL.getMiniHBaseCluster() + .getLiveRegionServerThreads()) { + ACCESS_CONTROLLER = rst.getRegionServer().getRegionServerCoprocessorHost() + .findCoprocessor(AccessController.class); if (ACCESS_CONTROLLER != null) { break; } @@ -179,18 +179,18 @@ public static void beforeClass() throws Exception { UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(TEST_NAMESPACE2).build()); // grants on global - grantGlobal(UTIL, USER_GLOBAL_ADMIN.getShortName(), Permission.Action.ADMIN); + grantGlobal(UTIL, USER_GLOBAL_ADMIN.getShortName(), Permission.Action.ADMIN); grantGlobal(UTIL, USER_GLOBAL_CREATE.getShortName(), Permission.Action.CREATE); - grantGlobal(UTIL, USER_GLOBAL_WRITE.getShortName(), Permission.Action.WRITE); - grantGlobal(UTIL, USER_GLOBAL_READ.getShortName(), Permission.Action.READ); - grantGlobal(UTIL, USER_GLOBAL_EXEC.getShortName(), Permission.Action.EXEC); + grantGlobal(UTIL, USER_GLOBAL_WRITE.getShortName(), Permission.Action.WRITE); + grantGlobal(UTIL, USER_GLOBAL_READ.getShortName(), Permission.Action.READ); + grantGlobal(UTIL, USER_GLOBAL_EXEC.getShortName(), Permission.Action.EXEC); // grants on namespace - grantOnNamespace(UTIL, USER_NS_ADMIN.getShortName(), TEST_NAMESPACE, Permission.Action.ADMIN); + grantOnNamespace(UTIL, USER_NS_ADMIN.getShortName(), TEST_NAMESPACE, Permission.Action.ADMIN); grantOnNamespace(UTIL, USER_NS_CREATE.getShortName(), TEST_NAMESPACE, Permission.Action.CREATE); - grantOnNamespace(UTIL, USER_NS_WRITE.getShortName(), TEST_NAMESPACE, Permission.Action.WRITE); - grantOnNamespace(UTIL, USER_NS_READ.getShortName(), TEST_NAMESPACE, Permission.Action.READ); - grantOnNamespace(UTIL, USER_NS_EXEC.getShortName(), TEST_NAMESPACE, Permission.Action.EXEC); + grantOnNamespace(UTIL, USER_NS_WRITE.getShortName(), TEST_NAMESPACE, Permission.Action.WRITE); + grantOnNamespace(UTIL, USER_NS_READ.getShortName(), TEST_NAMESPACE, Permission.Action.READ); + grantOnNamespace(UTIL, USER_NS_EXEC.getShortName(), TEST_NAMESPACE, Permission.Action.EXEC); grantOnNamespace(UTIL, toGroupEntry(GROUP_NS_ADMIN), TEST_NAMESPACE, Permission.Action.ADMIN); grantOnNamespace(UTIL, USER_NS_ADMIN.getShortName(), TEST_NAMESPACE2, Permission.Action.ADMIN); @@ -221,8 +221,7 @@ public void testAclTableEntries() throws Exception { assertEquals(6, perms.size()); // Grant and check state in ACL table - grantOnNamespace(UTIL, userTestNamespace, TEST_NAMESPACE, - Permission.Action.WRITE); + grantOnNamespace(UTIL, userTestNamespace, TEST_NAMESPACE, Permission.Action.WRITE); Result result = acl.get(new Get(Bytes.toBytes(userTestNamespace))); assertTrue(result != null); @@ -237,8 +236,7 @@ public void testAclTableEntries() throws Exception { assertEquals(Permission.Action.WRITE, namespacePerms.get(0).getPermission().getActions()[0]); // Revoke and check state in ACL table - revokeFromNamespace(UTIL, userTestNamespace, TEST_NAMESPACE, - Permission.Action.WRITE); + revokeFromNamespace(UTIL, userTestNamespace, TEST_NAMESPACE, Permission.Action.WRITE); perms = PermissionStorage.getNamespacePermissions(conf, TEST_NAMESPACE); assertEquals(6, perms.size()); @@ -252,8 +250,10 @@ public void testModifyNamespace() throws Exception { AccessTestAction modifyNamespace = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preModifyNamespace(ObserverContextImpl.createAndPrepare(CP_ENV), - null, // not needed by AccessController + ACCESS_CONTROLLER.preModifyNamespace(ObserverContextImpl.createAndPrepare(CP_ENV), null, // not + // needed + // by + // AccessController NamespaceDescriptor.create(TEST_NAMESPACE).addConfiguration("abc", "156").build()); return null; } @@ -335,34 +335,35 @@ public Object run() throws Exception { } }; - // listNamespaces : All access* + // listNamespaces : All access* // * Returned list will only show what you can call getNamespaceDescriptor() verifyAllowed(listAction, SUPERUSER, USER_GLOBAL_ADMIN, USER_NS_ADMIN, USER_GROUP_ADMIN); // we have 3 namespaces: [default, hbase, TEST_NAMESPACE, TEST_NAMESPACE2] - assertEquals(4, ((List)SUPERUSER.runAs(listAction)).size()); - assertEquals(4, ((List)USER_GLOBAL_ADMIN.runAs(listAction)).size()); - assertEquals(4, ((List)USER_GROUP_ADMIN.runAs(listAction)).size()); - - assertEquals(2, ((List)USER_NS_ADMIN.runAs(listAction)).size()); - - assertEquals(0, ((List)USER_GLOBAL_CREATE.runAs(listAction)).size()); - assertEquals(0, ((List)USER_GLOBAL_WRITE.runAs(listAction)).size()); - assertEquals(0, ((List)USER_GLOBAL_READ.runAs(listAction)).size()); - assertEquals(0, ((List)USER_GLOBAL_EXEC.runAs(listAction)).size()); - assertEquals(0, ((List)USER_NS_CREATE.runAs(listAction)).size()); - assertEquals(0, ((List)USER_NS_WRITE.runAs(listAction)).size()); - assertEquals(0, ((List)USER_NS_READ.runAs(listAction)).size()); - assertEquals(0, ((List)USER_NS_EXEC.runAs(listAction)).size()); - assertEquals(0, ((List)USER_TABLE_CREATE.runAs(listAction)).size()); - assertEquals(0, ((List)USER_TABLE_WRITE.runAs(listAction)).size()); - assertEquals(0, ((List)USER_GROUP_CREATE.runAs(listAction)).size()); - assertEquals(0, ((List)USER_GROUP_READ.runAs(listAction)).size()); - assertEquals(0, ((List)USER_GROUP_WRITE.runAs(listAction)).size()); + assertEquals(4, ((List) SUPERUSER.runAs(listAction)).size()); + assertEquals(4, ((List) USER_GLOBAL_ADMIN.runAs(listAction)).size()); + assertEquals(4, ((List) USER_GROUP_ADMIN.runAs(listAction)).size()); + + assertEquals(2, ((List) USER_NS_ADMIN.runAs(listAction)).size()); + + assertEquals(0, ((List) USER_GLOBAL_CREATE.runAs(listAction)).size()); + assertEquals(0, ((List) USER_GLOBAL_WRITE.runAs(listAction)).size()); + assertEquals(0, ((List) USER_GLOBAL_READ.runAs(listAction)).size()); + assertEquals(0, ((List) USER_GLOBAL_EXEC.runAs(listAction)).size()); + assertEquals(0, ((List) USER_NS_CREATE.runAs(listAction)).size()); + assertEquals(0, ((List) USER_NS_WRITE.runAs(listAction)).size()); + assertEquals(0, ((List) USER_NS_READ.runAs(listAction)).size()); + assertEquals(0, ((List) USER_NS_EXEC.runAs(listAction)).size()); + assertEquals(0, ((List) USER_TABLE_CREATE.runAs(listAction)).size()); + assertEquals(0, ((List) USER_TABLE_WRITE.runAs(listAction)).size()); + assertEquals(0, ((List) USER_GROUP_CREATE.runAs(listAction)).size()); + assertEquals(0, ((List) USER_GROUP_READ.runAs(listAction)).size()); + assertEquals(0, ((List) USER_GROUP_WRITE.runAs(listAction)).size()); } - @SuppressWarnings("checkstyle:MethodLength") @Test + @SuppressWarnings("checkstyle:MethodLength") + @Test public void testGrantRevoke() throws Exception { final String testUser = "testUser"; // Set this else in test context, with limit on the number of threads for @@ -522,19 +523,19 @@ public void testCreateTableWithNamespace() throws Exception { @Override public Object run() throws Exception { TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); ACCESS_CONTROLLER.preCreateTable(ObserverContextImpl.createAndPrepare(CP_ENV), tableDescriptor, null); return null; } }; - //createTable : superuser | global(AC) | NS(AC) + // createTable : superuser | global(AC) | NS(AC) verifyAllowed(createTable, SUPERUSER, USER_GLOBAL_CREATE, USER_NS_CREATE, USER_GROUP_CREATE, USER_GLOBAL_ADMIN, USER_NS_ADMIN, USER_GROUP_ADMIN); - verifyDenied(createTable, USER_GLOBAL_WRITE, USER_GLOBAL_READ, USER_GLOBAL_EXEC, - USER_NS_WRITE, USER_NS_READ, USER_NS_EXEC, USER_TABLE_CREATE, USER_TABLE_WRITE, - USER_GROUP_READ, USER_GROUP_WRITE); + verifyDenied(createTable, USER_GLOBAL_WRITE, USER_GLOBAL_READ, USER_GLOBAL_EXEC, USER_NS_WRITE, + USER_NS_READ, USER_NS_EXEC, USER_TABLE_CREATE, USER_TABLE_WRITE, USER_GROUP_READ, + USER_GROUP_WRITE); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestPermissionBuilder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestPermissionBuilder.java index ff002b15d2ad..65fcc733c750 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestPermissionBuilder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestPermissionBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java index cb7ea3125fdb..d7f383301924 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import static org.apache.hadoop.hbase.AuthUtil.toGroupEntry; @@ -69,28 +68,21 @@ import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos; /** - * This class tests operations in MasterRpcServices which require ADMIN access. - * It doesn't test all operations which require ADMIN access, only those which get vetted within - * MasterRpcServices at the point of entry itself (unlike old approach of using - * hooks in AccessController). - * - * Sidenote: + * This class tests operations in MasterRpcServices which require ADMIN access. It doesn't test all + * operations which require ADMIN access, only those which get vetted within MasterRpcServices at + * the point of entry itself (unlike old approach of using hooks in AccessController). Sidenote: * There is one big difference between how security tests for AccessController hooks work, and how - * the tests in this class for security in MasterRpcServices work. - * The difference arises because of the way AC & MasterRpcServices get the user. - * - * In AccessController, it first checks if there is an active rpc user in ObserverContext. If not, - * it uses UserProvider for current user. This *might* make sense in the context of coprocessors, - * because they can be called outside the context of RPCs. - * But in the context of MasterRpcServices, only one way makes sense - RPCServer.getRequestUser(). - * - * In AC tests, when we do FooUser.runAs on AccessController instance directly, it bypasses - * the rpc framework completely, but works because UserProvider provides the correct user, i.e. - * FooUser in this case. - * - * But this doesn't work for the tests here, so we go around by doing complete RPCs. + * the tests in this class for security in MasterRpcServices work. The difference arises because of + * the way AC & MasterRpcServices get the user. In AccessController, it first checks if there is an + * active rpc user in ObserverContext. If not, it uses UserProvider for current user. This *might* + * make sense in the context of coprocessors, because they can be called outside the context of + * RPCs. But in the context of MasterRpcServices, only one way makes sense - + * RPCServer.getRequestUser(). In AC tests, when we do FooUser.runAs on AccessController instance + * directly, it bypasses the rpc framework completely, but works because UserProvider provides the + * correct user, i.e. FooUser in this case. But this doesn't work for the tests here, so we go + * around by doing complete RPCs. */ -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestRpcAccessChecks { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -116,7 +108,8 @@ public class TestRpcAccessChecks { // Dummy service to test execService calls. Needs to be public so can be loaded as Coprocessor. public static class DummyCpService implements MasterCoprocessor, RegionServerCoprocessor { - public DummyCpService() {} + public DummyCpService() { + } @Override public Iterable getServices() { @@ -127,11 +120,11 @@ public Iterable getServices() { private static void enableSecurity(Configuration conf) throws IOException { conf.set("hadoop.security.authorization", "false"); conf.set("hadoop.security.authentication", "simple"); - conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName() + - "," + DummyCpService.class.getName()); + conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + AccessController.class.getName() + "," + DummyCpService.class.getName()); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName()); - conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName() + - "," + DummyCpService.class.getName()); + conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, + AccessController.class.getName() + "," + DummyCpService.class.getName()); conf.set(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, "true"); SecureTestUtil.configureSuperuser(conf); } @@ -158,8 +151,8 @@ public static void setup() throws Exception { TEST_UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME); // Assign permissions to groups - SecureTestUtil.grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN), - Permission.Action.ADMIN, Permission.Action.CREATE); + SecureTestUtil.grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN), Permission.Action.ADMIN, + Permission.Action.CREATE); SecureTestUtil.grantGlobal(TEST_UTIL, USER_ADMIN_NOT_SUPER.getShortName(), Permission.Action.ADMIN); } @@ -243,7 +236,7 @@ public void testExecProcedure() throws Exception { verifyAdminCheckForAction((admin) -> { // Using existing table instead of creating a new one. admin.execProcedure("flush-table-proc", TableName.META_TABLE_NAME.getNameAsString(), - new HashMap<>()); + new HashMap<>()); }); } @@ -266,7 +259,7 @@ public void testExecProcedureWithRet() throws Exception { verifyAdminCheckForAction((admin) -> { // Using existing table instead of creating a new one. admin.execProcedureWithReturn("flush-table-proc", TableName.META_TABLE_NAME.getNameAsString(), - new HashMap<>()); + new HashMap<>()); }); } @@ -285,8 +278,8 @@ public void testExecRegionServerService() throws Exception { Action action = (admin) -> { ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service = - TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub( - admin.coprocessorService(serverName)); + TestRpcServiceProtos.TestProtobufRpcProto + .newBlockingStub(admin.coprocessorService(serverName)); service.ping(null, TestProtos.EmptyRequestProto.getDefaultInstance()); }; @@ -303,9 +296,8 @@ public void testTableFlush() throws Exception { Action adminAction = (admin) -> { admin.createTable(desc); // Avoid giving a global permission which may screw up other tests - SecureTestUtil.grantOnTable( - TEST_UTIL, USER_NON_ADMIN.getShortName(), tn, null, null, Permission.Action.READ, - Permission.Action.WRITE, Permission.Action.CREATE); + SecureTestUtil.grantOnTable(TEST_UTIL, USER_NON_ADMIN.getShortName(), tn, null, null, + Permission.Action.READ, Permission.Action.WRITE, Permission.Action.CREATE); }; verifyAllowed(USER_ADMIN, adminAction); @@ -322,8 +314,8 @@ public void testTableFlush() throws Exception { // Flush should not require ADMIN permission admin.flush(tn); // Nb: ideally, we would verify snapshot permission too (as that was fixed in the - // regression HBASE-20185) but taking a snapshot requires ADMIN permission which - // masks the root issue. + // regression HBASE-20185) but taking a snapshot requires ADMIN permission which + // masks the root issue. // Make sure we read the value Result result = table.get(new Get(rowKey)); assertFalse(result.isEmpty()); @@ -342,9 +334,9 @@ public void testTableFlushAndSnapshot() throws Exception { Action adminAction = (admin) -> { admin.createTable(desc); // Giving ADMIN here, but only on this table, *not* globally - SecureTestUtil.grantOnTable( - TEST_UTIL, USER_NON_ADMIN.getShortName(), tn, null, null, Permission.Action.READ, - Permission.Action.WRITE, Permission.Action.CREATE, Permission.Action.ADMIN); + SecureTestUtil.grantOnTable(TEST_UTIL, USER_NON_ADMIN.getShortName(), tn, null, null, + Permission.Action.READ, Permission.Action.WRITE, Permission.Action.CREATE, + Permission.Action.ADMIN); }; verifyAllowed(USER_ADMIN, adminAction); @@ -385,16 +377,15 @@ public void testGrantDeniedOnSuperUsersGroups() { try { // Namespace SecureTestUtil.grantOnNamespace(USER_ADMIN_NOT_SUPER, TEST_UTIL, USER_ADMIN.getShortName(), - TEST_NAME.getMethodName(), - Permission.Action.ADMIN, Permission.Action.CREATE); + TEST_NAME.getMethodName(), Permission.Action.ADMIN, Permission.Action.CREATE); fail("Granting superuser's namespace permissions is not allowed."); } catch (Exception e) { } try { // Table SecureTestUtil.grantOnTable(USER_ADMIN_NOT_SUPER, TEST_UTIL, USER_ADMIN.getName(), - TableName.valueOf(TEST_NAME.getMethodName()), null, null, - Permission.Action.ADMIN, Permission.Action.CREATE); + TableName.valueOf(TEST_NAME.getMethodName()), null, null, Permission.Action.ADMIN, + Permission.Action.CREATE); fail("Granting superuser's table permissions is not allowed."); } catch (Exception e) { } @@ -428,8 +419,7 @@ public void testRevokeDeniedOnSuperUsersGroups() { try { // Table SecureTestUtil.revokeFromTable(USER_ADMIN_NOT_SUPER, TEST_UTIL, USER_ADMIN.getName(), - TableName.valueOf(TEST_NAME.getMethodName()), null, null, - Permission.Action.ADMIN); + TableName.valueOf(TEST_NAME.getMethodName()), null, null, Permission.Action.ADMIN); fail("Revoking superuser's table permissions is not allowed."); } catch (Exception e) { } @@ -438,8 +428,7 @@ public void testRevokeDeniedOnSuperUsersGroups() { try { // Global revoke SecureTestUtil.revokeGlobal(USER_ADMIN_NOT_SUPER, TEST_UTIL, - AuthUtil.toGroupEntry("supergroup"), - Permission.Action.ADMIN, Permission.Action.CREATE); + AuthUtil.toGroupEntry("supergroup"), Permission.Action.ADMIN, Permission.Action.CREATE); fail("Revoking supergroup's permissions is not allowed."); } catch (Exception e) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java index c7c4aee37cc7..1bc4ecdbb4f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestScanEarlyTermination extends SecureTestUtil { @ClassRule @@ -90,14 +90,14 @@ public static void setupBeforeClass() throws Exception { verifyConfiguration(conf); TEST_UTIL.startMiniCluster(); - MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster() - .getMasterCoprocessorHost(); + MasterCoprocessorHost cpHost = + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); - AccessController ac = (AccessController) - cpHost.findCoprocessor(AccessController.class.getName()); + AccessController ac = + (AccessController) cpHost.findCoprocessor(AccessController.class.getName()); cpHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); - RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) - .getRegionServerCoprocessorHost(); + RegionServerCoprocessorHost rsHost = + TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost(); rsHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available @@ -118,8 +118,7 @@ public static void tearDownAfterClass() throws Exception { @Before public void setUp() throws Exception { - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(testTable.getTableName()) + TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(testTable.getTableName()) .setColumnFamily( ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY1).setMaxVersions(10).build()) .setColumnFamily( @@ -147,8 +146,8 @@ public void tearDown() throws Exception { @Test public void testEarlyScanTermination() throws Exception { // Grant USER_OTHER access to TEST_FAMILY1 only - grantOnTable(TEST_UTIL, USER_OTHER.getShortName(), testTable.getTableName(), TEST_FAMILY1, - null, Action.READ); + grantOnTable(TEST_UTIL, USER_OTHER.getShortName(), testTable.getTableName(), TEST_FAMILY1, null, + Action.READ); // Set up test data verifyAllowed(new AccessTestAction() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java index ca2a39e8ebd8..dafe1e5344db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import static org.apache.hadoop.hbase.security.access.Permission.Action.READ; @@ -26,6 +25,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -99,7 +99,7 @@ public static void setupBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); SnapshotScannerHDFSAclController coprocessor = TEST_UTIL.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(SnapshotScannerHDFSAclController.class); + .getMasterCoprocessorHost().findCoprocessor(SnapshotScannerHDFSAclController.class); TEST_UTIL.waitFor(30000, () -> coprocessor.checkInitialized("check initialized")); TEST_UTIL.waitTableAvailable(PermissionStorage.ACL_TABLE_NAME); @@ -141,7 +141,7 @@ public static void tearDownAfterClass() throws Exception { } private void snapshotAndWait(final String snapShotName, final TableName tableName) - throws Exception{ + throws Exception { admin.snapshot(snapShotName, tableName); LOG.info("Sleep for three seconds, waiting for HDFS Acl setup"); Threads.sleep(3000); @@ -524,8 +524,7 @@ public void testRevokeNamespace2() throws Exception { assertFalse(hasUserNamespaceHdfsAcl(aclTable, grantUserName, namespace)); checkUserAclEntry(FS, helper.getNamespaceRootPaths(namespace), grantUserName, true, false); assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table)); - checkUserAclEntry(FS, helper.getTableRootPaths(table, false), - grantUserName, true, true); + checkUserAclEntry(FS, helper.getTableRootPaths(table, false), grantUserName, true, true); deleteTable(table); } @@ -839,7 +838,7 @@ public void testModifyTable2() throws Exception { TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, globalUser2, snapshot, -1); // check access String[] users = new String[] { globalUserName, globalUserName2, nsUserName, tableUserName, - tableUserName2, tableUserName3 }; + tableUserName2, tableUserName3 }; for (Path path : helper.getTableRootPaths(table, false)) { for (String user : users) { checkUserAclEntry(FS, path, user, false, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController2.java index 9d29516c81e7..119c55aed6ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController2.java @@ -15,13 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import static org.apache.hadoop.hbase.security.access.Permission.Action.READ; import static org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController.SnapshotScannerHDFSAclStorage.hasUserTableHdfsAcl; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -86,7 +86,7 @@ public static void setupBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); SnapshotScannerHDFSAclController coprocessor = TEST_UTIL.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(SnapshotScannerHDFSAclController.class); + .getMasterCoprocessorHost().findCoprocessor(SnapshotScannerHDFSAclController.class); TEST_UTIL.waitFor(30000, () -> coprocessor.checkInitialized("check initialized")); TEST_UTIL.waitTableAvailable(PermissionStorage.ACL_TABLE_NAME); @@ -158,9 +158,8 @@ public void testRestoreSnapshot() throws Exception { TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot, 6); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, 10); assertTrue(hasUserTableHdfsAcl(aclTable, grantUserName, table)); - TestSnapshotScannerHDFSAclController. - checkUserAclEntry(FS, helper.getTableRootPaths(table, false), - grantUserName, true, true); + TestSnapshotScannerHDFSAclController.checkUserAclEntry(FS, + helper.getTableRootPaths(table, false), grantUserName, true, true); // delete admin.disableTable(table); @@ -174,12 +173,10 @@ public void testRestoreSnapshot() throws Exception { TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot2, -1); TestHDFSAclHelper.canUserScanSnapshot(TEST_UTIL, grantUser, snapshot3, -1); assertFalse(hasUserTableHdfsAcl(aclTable, grantUserName, table)); - TestSnapshotScannerHDFSAclController. - checkUserAclEntry(FS, helper.getPathHelper().getDataTableDir(table), - grantUserName, false, false); - TestSnapshotScannerHDFSAclController. - checkUserAclEntry(FS, helper.getPathHelper().getArchiveTableDir(table), - grantUserName, true, false); + TestSnapshotScannerHDFSAclController.checkUserAclEntry(FS, + helper.getPathHelper().getDataTableDir(table), grantUserName, false, false); + TestSnapshotScannerHDFSAclController.checkUserAclEntry(FS, + helper.getPathHelper().getArchiveTableDir(table), grantUserName, true, false); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java index 5261d862d79e..bba8a2f6678b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ /** * Test the reading and writing of access permissions on {@code _acl_} table. */ -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestTablePermissions { @ClassRule @@ -85,10 +85,8 @@ public boolean isAborted() { private static String TEST_NAMESPACE = "perms_test_ns"; private static String TEST_NAMESPACE2 = "perms_test_ns2"; - private static TableName TEST_TABLE = - TableName.valueOf("perms_test"); - private static TableName TEST_TABLE2 = - TableName.valueOf("perms_test2"); + private static TableName TEST_TABLE = TableName.valueOf("perms_test"); + private static TableName TEST_TABLE2 = TableName.valueOf("perms_test2"); private static byte[] TEST_FAMILY = Bytes.toBytes("f1"); private static byte[] TEST_QUALIFIER = Bytes.toBytes("col1"); @@ -104,8 +102,7 @@ public static void beforeClass() throws Exception { UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME); UTIL.waitTableAvailable(TableName.valueOf("hbase:acl")); - ZKW = new ZKWatcher(UTIL.getConfiguration(), - "TestTablePermissions", ABORTABLE); + ZKW = new ZKWatcher(UTIL.getConfiguration(), "TestTablePermissions", ABORTABLE); UTIL.createTable(TEST_TABLE, TEST_FAMILY); UTIL.createTable(TEST_TABLE2, TEST_FAMILY); @@ -130,7 +127,8 @@ public void tearDown() throws Exception { /** * The PermissionStorage.addUserPermission may throw exception before closing the table. */ - private void addUserPermission(Configuration conf, UserPermission userPerm, Table t) throws IOException { + private void addUserPermission(Configuration conf, UserPermission userPerm, Table t) + throws IOException { try { PermissionStorage.addUserPermission(conf, userPerm, t); } finally { @@ -166,8 +164,7 @@ public void testBasicWrite() throws Exception { assertEquals("Should have 1 permission for george", 1, userPerms.size()); assertEquals(Permission.Scope.TABLE, userPerms.get(0).getAccessScope()); TablePermission permission = (TablePermission) userPerms.get(0).getPermission(); - assertEquals("Permission should be for " + TEST_TABLE, - TEST_TABLE, permission.getTableName()); + assertEquals("Permission should be for " + TEST_TABLE, TEST_TABLE, permission.getTableName()); assertNull("Column family should be empty", permission.getFamily()); // check actions @@ -182,8 +179,7 @@ public void testBasicWrite() throws Exception { assertEquals("Should have 1 permission for hubert", 1, userPerms.size()); assertEquals(Permission.Scope.TABLE, userPerms.get(0).getAccessScope()); permission = (TablePermission) userPerms.get(0).getPermission(); - assertEquals("Permission should be for " + TEST_TABLE, - TEST_TABLE, permission.getTableName()); + assertEquals("Permission should be for " + TEST_TABLE, TEST_TABLE, permission.getTableName()); assertNull("Column family should be empty", permission.getFamily()); // check actions @@ -198,12 +194,11 @@ public void testBasicWrite() throws Exception { assertEquals("Should have 1 permission for humphrey", 1, userPerms.size()); assertEquals(Permission.Scope.TABLE, userPerms.get(0).getAccessScope()); permission = (TablePermission) userPerms.get(0).getPermission(); - assertEquals("Permission should be for " + TEST_TABLE, - TEST_TABLE, permission.getTableName()); + assertEquals("Permission should be for " + TEST_TABLE, TEST_TABLE, permission.getTableName()); assertTrue("Permission should be for family " + Bytes.toString(TEST_FAMILY), - Bytes.equals(TEST_FAMILY, permission.getFamily())); + Bytes.equals(TEST_FAMILY, permission.getFamily())); assertTrue("Permission should be for qualifier " + Bytes.toString(TEST_QUALIFIER), - Bytes.equals(TEST_QUALIFIER, permission.getQualifier())); + Bytes.equals(TEST_QUALIFIER, permission.getQualifier())); // check actions assertNotNull(permission.getActions()); @@ -222,8 +217,8 @@ public void testBasicWrite() throws Exception { } // check full load Map> allPerms = PermissionStorage.loadAll(conf); - assertEquals("Full permission map should have entries for both test tables", - 2, allPerms.size()); + assertEquals("Full permission map should have entries for both test tables", 2, + allPerms.size()); userPerms = allPerms.get(TEST_TABLE.getName()).get("hubert"); assertNotNull(userPerms); @@ -282,12 +277,11 @@ public void testPersistence() throws Exception { Admin admin = UTIL.getAdmin(); try { admin.split(TEST_TABLE); - } - catch (IOException e) { - //although split fail, this may not affect following check - //In old Split API without AM2, if region's best split key is not found, - //there are not exception thrown. But in current API, exception - //will be thrown. + } catch (IOException e) { + // although split fail, this may not affect following check + // In old Split API without AM2, if region's best split key is not found, + // there are not exception thrown. But in current API, exception + // will be thrown. LOG.debug("region is not splittable, because " + e); } @@ -335,10 +329,10 @@ public void checkMultimapEqual(ListMultimap first, List secondPerms = second.get(key); assertNotNull(secondPerms); assertEquals(firstPerms.size(), secondPerms.size()); - LOG.info("First permissions: "+firstPerms.toString()); - LOG.info("Second permissions: "+secondPerms.toString()); + LOG.info("First permissions: " + firstPerms.toString()); + LOG.info("Second permissions: " + secondPerms.toString()); for (UserPermission p : firstPerms) { - assertTrue("Permission "+p.toString()+" not found", secondPerms.contains(p)); + assertTrue("Permission " + p.toString() + " not found", secondPerms.contains(p)); } } } @@ -427,30 +421,29 @@ public void testGlobalPermission() throws Exception { List user1Perms = perms.get("user1"); assertEquals("Should have 1 permission for user1", 1, user1Perms.size()); assertEquals("user1 should have WRITE permission", - new Permission.Action[] { Permission.Action.READ, Permission.Action.WRITE }, - user1Perms.get(0).getPermission().getActions()); + new Permission.Action[] { Permission.Action.READ, Permission.Action.WRITE }, + user1Perms.get(0).getPermission().getActions()); List user2Perms = perms.get("user2"); assertEquals("Should have 1 permission for user2", 1, user2Perms.size()); assertEquals("user2 should have CREATE permission", - new Permission.Action[] { Permission.Action.CREATE }, - user2Perms.get(0).getPermission().getActions()); + new Permission.Action[] { Permission.Action.CREATE }, + user2Perms.get(0).getPermission().getActions()); List user3Perms = perms.get("user3"); assertEquals("Should have 1 permission for user3", 1, user3Perms.size()); - assertEquals("user3 should have ADMIN, READ, CREATE permission", - new Permission.Action[] { - Permission.Action.READ, Permission.Action.CREATE, Permission.Action.ADMIN - }, - user3Perms.get(0).getPermission().getActions()); + assertEquals( + "user3 should have ADMIN, READ, CREATE permission", new Permission.Action[] { + Permission.Action.READ, Permission.Action.CREATE, Permission.Action.ADMIN }, + user3Perms.get(0).getPermission().getActions()); } @Test public void testAuthManager() throws Exception { Configuration conf = UTIL.getConfiguration(); /** - * test a race condition causing AuthManager to sometimes fail global permissions checks - * when the global cache is being updated + * test a race condition causing AuthManager to sometimes fail global permissions checks when + * the global cache is being updated */ AuthManager authManager = new AuthManager(conf); // currently running user is the system user and should have global admin perms @@ -466,7 +459,7 @@ public void testAuthManager() throws Exception { .build()), connection.getTable(PermissionStorage.ACL_TABLE_NAME)); // make sure the system user still shows as authorized - assertTrue("Failed current user auth check on iter "+i, + assertTrue("Failed current user auth check on iter " + i, authManager.authorizeUserGlobal(currentUser, Permission.Action.ADMIN)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestUnloadAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestUnloadAccessController.java index 1d34e517bbde..49646182ea9e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestUnloadAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestUnloadAccessController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java index 749b3de6cd6f..699034a36a4d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.security.access; import static org.junit.Assert.assertEquals; + import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -78,9 +79,10 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({SecurityTests.class, LargeTests.class}) +@Category({ SecurityTests.class, LargeTests.class }) public class TestWithDisabledAuthorization extends SecureTestUtil { @ClassRule @@ -104,7 +106,8 @@ public class TestWithDisabledAuthorization extends SecureTestUtil { private static RegionServerCoprocessorEnvironment RSCP_ENV; private RegionCoprocessorEnvironment RCP_ENV; - @Rule public TableNameTestRule testTable = new TableNameTestRule(); + @Rule + public TableNameTestRule testTable = new TableNameTestRule(); // default users @@ -149,8 +152,8 @@ public static void setupBeforeClass() throws Exception { cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(AccessController.class.getName()); CP_ENV = cpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); - RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) - .getRegionServerCoprocessorHost(); + RegionServerCoprocessorHost rsHost = + TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost(); RSCP_ENV = rsHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available @@ -179,44 +182,34 @@ public static void tearDownAfterClass() throws Exception { public void setUp() throws Exception { // Create the test table (owner added to the _acl_ table) TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(testTable.getTableName()) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()).build(); + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()) + .build(); createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") }); TEST_UTIL.waitUntilAllRegionsAssigned(testTable.getTableName()); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(testTable.getTableName()).get(0); RegionCoprocessorHost rcpHost = region.getCoprocessorHost(); - RCP_ENV = rcpHost.createEnvironment(ACCESS_CONTROLLER, - Coprocessor.PRIORITY_HIGHEST, 1, TEST_UTIL.getConfiguration()); + RCP_ENV = rcpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, + TEST_UTIL.getConfiguration()); // Set up initial grants - grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), - Permission.Action.ADMIN, - Permission.Action.CREATE, - Permission.Action.READ, - Permission.Action.WRITE); + grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), Permission.Action.ADMIN, + Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); - grantOnTable(TEST_UTIL, USER_RW.getShortName(), - testTable.getTableName(), TEST_FAMILY, null, - Permission.Action.READ, - Permission.Action.WRITE); + grantOnTable(TEST_UTIL, USER_RW.getShortName(), testTable.getTableName(), TEST_FAMILY, null, + Permission.Action.READ, Permission.Action.WRITE); // USER_CREATE is USER_RW plus CREATE permissions - grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), - testTable.getTableName(), null, null, - Permission.Action.CREATE, - Permission.Action.READ, - Permission.Action.WRITE); - - grantOnTable(TEST_UTIL, USER_RO.getShortName(), - testTable.getTableName(), TEST_FAMILY, null, + grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), testTable.getTableName(), null, null, + Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); + + grantOnTable(TEST_UTIL, USER_RO.getShortName(), testTable.getTableName(), TEST_FAMILY, null, Permission.Action.READ); - grantOnTable(TEST_UTIL, USER_QUAL.getShortName(), - testTable.getTableName(), TEST_FAMILY, TEST_Q1, - Permission.Action.READ, - Permission.Action.WRITE); + grantOnTable(TEST_UTIL, USER_QUAL.getShortName(), testTable.getTableName(), TEST_FAMILY, + TEST_Q1, Permission.Action.READ, Permission.Action.WRITE); assertEquals(5, PermissionStorage .getTablePermissions(TEST_UTIL.getConfiguration(), testTable.getTableName()).size()); @@ -277,8 +270,7 @@ public Void run() throws Exception { AccessTestAction checkTableAdmin = new AccessTestAction() { @Override public Void run() throws Exception { - checkTablePerms(TEST_UTIL, testTable.getTableName(), null, null, - Permission.Action.ADMIN); + checkTablePerms(TEST_UTIL, testTable.getTableName(), null, null, Permission.Action.ADMIN); return null; } }; @@ -289,8 +281,7 @@ public Void run() throws Exception { AccessTestAction checkTableCreate = new AccessTestAction() { @Override public Void run() throws Exception { - checkTablePerms(TEST_UTIL, testTable.getTableName(), null, null, - Permission.Action.CREATE); + checkTablePerms(TEST_UTIL, testTable.getTableName(), null, null, Permission.Action.CREATE); return null; } }; @@ -301,8 +292,7 @@ public Void run() throws Exception { AccessTestAction checkTableRead = new AccessTestAction() { @Override public Void run() throws Exception { - checkTablePerms(TEST_UTIL, testTable.getTableName(), null, null, - Permission.Action.READ); + checkTablePerms(TEST_UTIL, testTable.getTableName(), null, null, Permission.Action.READ); return null; } }; @@ -313,8 +303,8 @@ public Void run() throws Exception { AccessTestAction checkTableReadWrite = new AccessTestAction() { @Override public Void run() throws Exception { - checkTablePerms(TEST_UTIL, testTable.getTableName(), null, null, - Permission.Action.READ, Permission.Action.WRITE); + checkTablePerms(TEST_UTIL, testTable.getTableName(), null, null, Permission.Action.READ, + Permission.Action.WRITE); return null; } }; @@ -411,8 +401,8 @@ public void testPassiveGrantRevoke() throws Exception { // Add a test user - User tblUser = User.createUserForTesting(TEST_UTIL.getConfiguration(), "tbluser", - new String[0]); + User tblUser = + User.createUserForTesting(TEST_UTIL.getConfiguration(), "tbluser", new String[0]); // If we check now, the test user have permissions because authorization is disabled @@ -433,7 +423,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table t = conn.getTable(testTable.getTableName())) { + Table t = conn.getTable(testTable.getTableName())) { t.get(new Get(TEST_ROW).addFamily(TEST_FAMILY)); } return null; @@ -444,8 +434,8 @@ public Void run() throws Exception { // Grant read perms to the test user - grantOnTable(TEST_UTIL, tblUser.getShortName(), testTable.getTableName(), TEST_FAMILY, - null, Permission.Action.READ); + grantOnTable(TEST_UTIL, tblUser.getShortName(), testTable.getTableName(), TEST_FAMILY, null, + Permission.Action.READ); // Now both the permission check and actual op will succeed @@ -454,8 +444,8 @@ public Void run() throws Exception { // Revoke read perms from the test user - revokeFromTable(TEST_UTIL, tblUser.getShortName(), testTable.getTableName(), TEST_FAMILY, - null, Permission.Action.READ); + revokeFromTable(TEST_UTIL, tblUser.getShortName(), testTable.getTableName(), TEST_FAMILY, null, + Permission.Action.READ); // Now the permission check will indicate revocation but the actual op will still succeed @@ -472,8 +462,8 @@ public void testPassiveMasterOperations() throws Exception { @Override public Object run() throws Exception { TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(testTable.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + TableDescriptorBuilder.newBuilder(testTable.getTableName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); ACCESS_CONTROLLER.preCreateTable(ObserverContextImpl.createAndPrepare(CP_ENV), tableDescriptor, null); return null; @@ -485,12 +475,11 @@ public Object run() throws Exception { @Override public Object run() throws Exception { TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(testTable.getTableName()) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY2)).build(); + TableDescriptorBuilder.newBuilder(testTable.getTableName()) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY2)).build(); ACCESS_CONTROLLER.preModifyTable(ObserverContextImpl.createAndPrepare(CP_ENV), - testTable.getTableName(), - null, // not needed by AccessController + testTable.getTableName(), null, // not needed by AccessController tableDescriptor); return null; } @@ -543,8 +532,8 @@ public Object run() throws Exception { RegionInfo region = RegionInfoBuilder.newBuilder(testTable.getTableName()).build(); ServerName srcServer = ServerName.valueOf("1.1.1.1", 1, 0); ServerName destServer = ServerName.valueOf("2.2.2.2", 2, 0); - ACCESS_CONTROLLER.preMove(ObserverContextImpl.createAndPrepare(CP_ENV), region, - srcServer, destServer); + ACCESS_CONTROLLER.preMove(ObserverContextImpl.createAndPrepare(CP_ENV), region, srcServer, + destServer); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -583,8 +572,7 @@ public Object run() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preBalanceSwitch(ObserverContextImpl.createAndPrepare(CP_ENV), - true); + ACCESS_CONTROLLER.preBalanceSwitch(ObserverContextImpl.createAndPrepare(CP_ENV), true); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -605,8 +593,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { SnapshotDescription snapshot = new SnapshotDescription("foo"); - ACCESS_CONTROLLER.preListSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot); + ACCESS_CONTROLLER.preListSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -617,8 +604,8 @@ public Object run() throws Exception { public Object run() throws Exception { SnapshotDescription snapshot = new SnapshotDescription("foo"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(testTable.getTableName()).build(); - ACCESS_CONTROLLER.preCloneSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot, htd); + ACCESS_CONTROLLER.preCloneSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot, + htd); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -629,8 +616,8 @@ public Object run() throws Exception { public Object run() throws Exception { SnapshotDescription snapshot = new SnapshotDescription("foo"); TableDescriptor htd = TableDescriptorBuilder.newBuilder(testTable.getTableName()).build(); - ACCESS_CONTROLLER.preRestoreSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot, htd); + ACCESS_CONTROLLER.preRestoreSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot, + htd); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -640,8 +627,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { SnapshotDescription snapshot = new SnapshotDescription("foo"); - ACCESS_CONTROLLER.preDeleteSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), - snapshot); + ACCESS_CONTROLLER.preDeleteSnapshot(ObserverContextImpl.createAndPrepare(CP_ENV), snapshot); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -675,8 +661,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { NamespaceDescriptor ns = NamespaceDescriptor.create("test").build(); - ACCESS_CONTROLLER.preCreateNamespace(ObserverContextImpl.createAndPrepare(CP_ENV), - ns); + ACCESS_CONTROLLER.preCreateNamespace(ObserverContextImpl.createAndPrepare(CP_ENV), ns); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -685,8 +670,7 @@ public Object run() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preDeleteNamespace(ObserverContextImpl.createAndPrepare(CP_ENV), - "test"); + ACCESS_CONTROLLER.preDeleteNamespace(ObserverContextImpl.createAndPrepare(CP_ENV), "test"); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -696,8 +680,10 @@ public Object run() throws Exception { @Override public Object run() throws Exception { NamespaceDescriptor ns = NamespaceDescriptor.create("test").build(); - ACCESS_CONTROLLER.preModifyNamespace(ObserverContextImpl.createAndPrepare(CP_ENV), - null, // not needed by AccessController + ACCESS_CONTROLLER.preModifyNamespace(ObserverContextImpl.createAndPrepare(CP_ENV), null, // not + // needed + // by + // AccessController ns); return null; } @@ -728,10 +714,8 @@ public Object run() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preSplitRegion( - ObserverContextImpl.createAndPrepare(CP_ENV), - testTable.getTableName(), - Bytes.toBytes("ss")); + ACCESS_CONTROLLER.preSplitRegion(ObserverContextImpl.createAndPrepare(CP_ENV), + testTable.getTableName(), Bytes.toBytes("ss")); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -740,8 +724,8 @@ public Object run() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preSetUserQuota(ObserverContextImpl.createAndPrepare(CP_ENV), - "testuser", null); + ACCESS_CONTROLLER.preSetUserQuota(ObserverContextImpl.createAndPrepare(CP_ENV), "testuser", + null); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -760,8 +744,8 @@ public Object run() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preSetNamespaceQuota(ObserverContextImpl.createAndPrepare(CP_ENV), - "test", null); + ACCESS_CONTROLLER.preSetNamespaceQuota(ObserverContextImpl.createAndPrepare(CP_ENV), "test", + null); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -819,8 +803,8 @@ public Object run() throws Exception { @Override public Object run() throws Exception { List cells = Lists.newArrayList(); - ACCESS_CONTROLLER.preGetOp(ObserverContextImpl.createAndPrepare(RCP_ENV), - new Get(TEST_ROW), cells); + ACCESS_CONTROLLER.preGetOp(ObserverContextImpl.createAndPrepare(RCP_ENV), new Get(TEST_ROW), + cells); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -839,8 +823,8 @@ public Object run() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.prePut(ObserverContextImpl.createAndPrepare(RCP_ENV), - new Put(TEST_ROW), new WALEdit(), Durability.USE_DEFAULT); + ACCESS_CONTROLLER.prePut(ObserverContextImpl.createAndPrepare(RCP_ENV), new Put(TEST_ROW), + new WALEdit(), Durability.USE_DEFAULT); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -869,9 +853,9 @@ public Object run() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preCheckAndPut(ObserverContextImpl.createAndPrepare(RCP_ENV), - TEST_ROW, TEST_FAMILY, TEST_Q1, CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("foo")), new Put(TEST_ROW), true); + ACCESS_CONTROLLER.preCheckAndPut(ObserverContextImpl.createAndPrepare(RCP_ENV), TEST_ROW, + TEST_FAMILY, TEST_Q1, CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("foo")), + new Put(TEST_ROW), true); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -880,9 +864,9 @@ public Object run() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preCheckAndDelete(ObserverContextImpl.createAndPrepare(RCP_ENV), - TEST_ROW, TEST_FAMILY, TEST_Q1, CompareOperator.EQUAL, - new BinaryComparator(Bytes.toBytes("foo")), new Delete(TEST_ROW), true); + ACCESS_CONTROLLER.preCheckAndDelete(ObserverContextImpl.createAndPrepare(RCP_ENV), TEST_ROW, + TEST_FAMILY, TEST_Q1, CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("foo")), + new Delete(TEST_ROW), true); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -911,8 +895,7 @@ public Object run() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preScannerOpen(ObserverContextImpl.createAndPrepare(RCP_ENV), - new Scan()); + ACCESS_CONTROLLER.preScannerOpen(ObserverContextImpl.createAndPrepare(RCP_ENV), new Scan()); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -922,8 +905,7 @@ public Object run() throws Exception { @Override public Object run() throws Exception { List> paths = Lists.newArrayList(); - ACCESS_CONTROLLER.preBulkLoadHFile(ObserverContextImpl.createAndPrepare(RCP_ENV), - paths); + ACCESS_CONTROLLER.preBulkLoadHFile(ObserverContextImpl.createAndPrepare(RCP_ENV), paths); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); @@ -938,7 +920,7 @@ public void testPassiveCellPermissions() throws Exception { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - try(Connection connection = ConnectionFactory.createConnection(conf); + try (Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName())) { Put p; // with ro ACL @@ -950,9 +932,8 @@ public Object run() throws Exception { p.setACL(USER_NONE.getShortName(), new Permission(Action.READ, Action.WRITE)); t.put(p); // no ACL - p = new Put(TEST_ROW) - .addColumn(TEST_FAMILY, TEST_Q3, ZERO) - .addColumn(TEST_FAMILY, TEST_Q4, ZERO); + p = new Put(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q3, ZERO).addColumn(TEST_FAMILY, + TEST_Q4, ZERO); t.put(p); } return null; @@ -968,7 +949,7 @@ public Object run() throws Exception { public List run() throws Exception { Scan scan = new Scan(); scan.withStartRow(TEST_ROW); - scan.withStopRow(Bytes.add(TEST_ROW, new byte[]{ 0 })); + scan.withStopRow(Bytes.add(TEST_ROW, new byte[] { 0 })); scan.addFamily(TEST_FAMILY); Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(testTable.getTableName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java index 2019bb1ea376..62785f933c84 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ /** * Test the reading and writing of access permissions to and from zookeeper. */ -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestZKPermissionWatcher { @ClassRule @@ -75,8 +75,7 @@ public boolean isAborted() { } }; - private static TableName TEST_TABLE = - TableName.valueOf("perms_test"); + private static TableName TEST_TABLE = TableName.valueOf("perms_test"); @BeforeClass public static void beforeClass() throws Exception { @@ -106,8 +105,8 @@ public static void afterClass() throws Exception { @Test public void testPermissionsWatcher() throws Exception { Configuration conf = UTIL.getConfiguration(); - User george = User.createUserForTesting(conf, "george", new String[] { }); - User hubert = User.createUserForTesting(conf, "hubert", new String[] { }); + User george = User.createUserForTesting(conf, "george", new String[] {}); + User hubert = User.createUserForTesting(conf, "hubert", new String[] {}); assertFalse(AUTH_A.authorizeUserTable(george, TEST_TABLE, Permission.Action.READ)); assertFalse(AUTH_A.authorizeUserTable(george, TEST_TABLE, Permission.Action.WRITE)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/CustomSaslAuthenticationProviderTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/CustomSaslAuthenticationProviderTestBase.java index 15f969ab9f9d..c7c15bbb82e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/CustomSaslAuthenticationProviderTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/CustomSaslAuthenticationProviderTestBase.java @@ -113,7 +113,7 @@ public abstract class CustomSaslAuthenticationProviderTestBase { private static final Logger LOG = - LoggerFactory.getLogger(CustomSaslAuthenticationProviderTestBase.class); + LoggerFactory.getLogger(CustomSaslAuthenticationProviderTestBase.class); private static final Map USER_DATABASE = createUserDatabase(); @@ -184,10 +184,10 @@ public UserGroupInformation getUser() { } public static Token createPasswordToken(String username, - String password, String clusterId) { + String password, String clusterId) { PasswordAuthTokenIdentifier id = new PasswordAuthTokenIdentifier(username); Token token = - new Token<>(id.getBytes(), Bytes.toBytes(password), id.getKind(), new Text(clusterId)); + new Token<>(id.getBytes(), Bytes.toBytes(password), id.getKind(), new Text(clusterId)); return token; } @@ -198,20 +198,21 @@ public static Token createPasswordToken(String userna public static class InMemoryClientProvider extends AbstractSaslClientAuthenticationProvider { public static final String MECHANISM = "DIGEST-MD5"; public static final SaslAuthMethod SASL_AUTH_METHOD = - new SaslAuthMethod("IN_MEMORY", (byte) 42, MECHANISM, AuthenticationMethod.TOKEN); + new SaslAuthMethod("IN_MEMORY", (byte) 42, MECHANISM, AuthenticationMethod.TOKEN); @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, - SecurityInfo securityInfo, Token token, boolean fallbackAllowed, - Map saslProps) throws IOException { + SecurityInfo securityInfo, Token token, boolean fallbackAllowed, + Map saslProps) throws IOException { return Sasl.createSaslClient(new String[] { MECHANISM }, null, null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new InMemoryClientProviderCallbackHandler(token)); } public Optional> findToken(User user) { List> tokens = user.getTokens().stream() - .filter((token) -> token.getKind().equals(PasswordAuthTokenIdentifier.PASSWORD_AUTH_TOKEN)) - .collect(Collectors.toList()); + .filter( + (token) -> token.getKind().equals(PasswordAuthTokenIdentifier.PASSWORD_AUTH_TOKEN)) + .collect(Collectors.toList()); if (tokens.isEmpty()) { return Optional.empty(); } @@ -277,16 +278,16 @@ public UserInformation getUserInfo(User user) { * Server provider which validates credentials from an in-memory database. */ public static class InMemoryServerProvider extends InMemoryClientProvider - implements SaslServerAuthenticationProvider { + implements SaslServerAuthenticationProvider { @Override public AttemptingUserProvidingSaslServer - createServer(SecretManager secretManager, Map saslProps) - throws IOException { + createServer(SecretManager secretManager, Map saslProps) + throws IOException { return new AttemptingUserProvidingSaslServer( - Sasl.createSaslServer(getSaslAuthMethod().getSaslMechanism(), null, - SaslUtil.SASL_DEFAULT_REALM, saslProps, new InMemoryServerProviderCallbackHandler()), - () -> null); + Sasl.createSaslServer(getSaslAuthMethod().getSaslMechanism(), null, + SaslUtil.SASL_DEFAULT_REALM, saslProps, new InMemoryServerProviderCallbackHandler()), + () -> null); } /** @@ -320,10 +321,10 @@ public void handle(Callback[] callbacks) throws InvalidToken, UnsupportedCallbac id.readFields(new DataInputStream(new ByteArrayInputStream(encodedId))); } catch (IOException e) { throw (InvalidToken) new InvalidToken("Can't de-serialize tokenIdentifier") - .initCause(e); + .initCause(e); } char[] actualPassword = - SaslUtil.encodePassword(Bytes.toBytes(getPassword(id.getUser().getUserName()))); + SaslUtil.encodePassword(Bytes.toBytes(getPassword(id.getUser().getUserName()))); pc.setPassword(actualPassword); } if (ac != null) { @@ -348,7 +349,7 @@ public boolean supportsProtocolAuthentication() { @Override public UserGroupInformation getAuthorizedUgi(String authzId, - SecretManager secretManager) throws IOException { + SecretManager secretManager) throws IOException { UserGroupInformation authorizedUgi; byte[] encodedId = SaslUtil.decodeIdentifier(authzId); PasswordAuthTokenIdentifier tokenId = new PasswordAuthTokenIdentifier(); @@ -376,20 +377,20 @@ public static class InMemoryProviderSelector extends BuiltInProviderSelector { @Override public void configure(Configuration conf, - Collection providers) { + Collection providers) { super.configure(conf, providers); Optional o = - providers.stream().filter((p) -> p instanceof InMemoryClientProvider).findAny(); + providers.stream().filter((p) -> p instanceof InMemoryClientProvider).findAny(); inMemoryProvider = (InMemoryClientProvider) o.orElseThrow(() -> new RuntimeException( - "InMemoryClientProvider not found in available providers: " + providers)); + "InMemoryClientProvider not found in available providers: " + providers)); } @Override public Pair> - selectProvider(String clusterId, User user) { + selectProvider(String clusterId, User user) { Pair> superPair = - super.selectProvider(clusterId, user); + super.selectProvider(clusterId, user); Optional> optional = inMemoryProvider.findToken(user); if (optional.isPresent()) { @@ -403,7 +404,7 @@ public void configure(Configuration conf, } private static void createBaseCluster(HBaseTestingUtil util, File keytabFile, MiniKdc kdc) - throws Exception { + throws Exception { String servicePrincipal = "hbase/localhost"; String spnegoPrincipal = "HTTP/localhost"; kdc.createPrincipal(keytabFile, servicePrincipal); @@ -476,14 +477,14 @@ private void createTable() throws Exception { // Create a table and write a record as the service user (hbase) UserGroupInformation serviceUgi = UserGroupInformation - .loginUserFromKeytabAndReturnUGI("hbase/localhost", KEYTAB_FILE.getAbsolutePath()); + .loginUserFromKeytabAndReturnUGI("hbase/localhost", KEYTAB_FILE.getAbsolutePath()); clusterId = serviceUgi.doAs(new PrivilegedExceptionAction() { @Override public String run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(CONF); - Admin admin = conn.getAdmin();) { + Admin admin = conn.getAdmin();) { admin.createTable(TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")).build()); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")).build()); UTIL.waitTableAvailable(tableName); @@ -515,7 +516,7 @@ public void testPositiveAuthentication() throws Exception { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(getClientConf()); - Table t = conn.getTable(tableName)) { + Table t = conn.getTable(tableName)) { Result r = t.get(new Get(Bytes.toBytes("r1"))); assertNotNull(r); assertFalse("Should have read a non-empty Result", r.isEmpty()); @@ -544,7 +545,7 @@ public Void run() throws Exception { // is no RPC to HBase services involved) but the subsequent get() fails. The root cause // should still be a SaslException in both the cases. try (Connection conn = ConnectionFactory.createConnection(clientConf); - Table t = conn.getTable(tableName)) { + Table t = conn.getTable(tableName)) { t.get(new Get(Bytes.toBytes("r1"))); fail("Should not successfully authenticate with HBase"); } catch (MasterRegistryFetchException mfe) { @@ -554,8 +555,8 @@ public Void run() throws Exception { assertTrue(re.getMessage(), re.getMessage().contains("SaslException")); } catch (Exception e) { // Any other exception is unexpected. - fail("Unexpected exception caught, was expecting a authentication error: " + - Throwables.getStackTraceAsString(e)); + fail("Unexpected exception caught, was expecting a authentication error: " + + Throwables.getStackTraceAsString(e)); } return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestCustomSaslAuthenticationProviderNettyRpcServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestCustomSaslAuthenticationProviderNettyRpcServer.java index 3fd8a5196282..e9ad1bed2d4b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestCustomSaslAuthenticationProviderNettyRpcServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestCustomSaslAuthenticationProviderNettyRpcServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,11 +30,11 @@ @RunWith(Parameterized.class) @Category({ MediumTests.class, SecurityTests.class }) public class TestCustomSaslAuthenticationProviderNettyRpcServer - extends CustomSaslAuthenticationProviderTestBase { + extends CustomSaslAuthenticationProviderTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCustomSaslAuthenticationProviderNettyRpcServer.class); + HBaseClassTestRule.forClass(TestCustomSaslAuthenticationProviderNettyRpcServer.class); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestCustomSaslAuthenticationProviderSimpleRpcServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestCustomSaslAuthenticationProviderSimpleRpcServer.java index 58bca98db498..bfc375193dbb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestCustomSaslAuthenticationProviderSimpleRpcServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestCustomSaslAuthenticationProviderSimpleRpcServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,11 +30,11 @@ @RunWith(Parameterized.class) @Category({ MediumTests.class, SecurityTests.class }) public class TestCustomSaslAuthenticationProviderSimpleRpcServer - extends CustomSaslAuthenticationProviderTestBase { + extends CustomSaslAuthenticationProviderTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCustomSaslAuthenticationProviderSimpleRpcServer.class); + HBaseClassTestRule.forClass(TestCustomSaslAuthenticationProviderSimpleRpcServer.class); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslServerAuthenticationProviders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslServerAuthenticationProviders.java index aa7b834116a1..bfc1e813a6a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslServerAuthenticationProviders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslServerAuthenticationProviders.java @@ -25,7 +25,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -40,7 +39,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class, SecurityTests.class}) +@Category({ SmallTests.class, SecurityTests.class }) public class TestSaslServerAuthenticationProviders { @ClassRule @@ -55,7 +54,7 @@ public void reset() { @Test public void testCannotAddTheSameProviderTwice() { - HashMap registeredProviders = new HashMap<>(); + HashMap registeredProviders = new HashMap<>(); SimpleSaslServerAuthenticationProvider p1 = new SimpleSaslServerAuthenticationProvider(); SimpleSaslServerAuthenticationProvider p2 = new SimpleSaslServerAuthenticationProvider(); @@ -64,10 +63,11 @@ public void testCannotAddTheSameProviderTwice() { try { SaslServerAuthenticationProviders.addProviderIfNotExists(p2, registeredProviders); - } catch (RuntimeException e) {} + } catch (RuntimeException e) { + } assertSame("Expected the original provider to be present", p1, - registeredProviders.entrySet().iterator().next().getValue()); + registeredProviders.entrySet().iterator().next().getValue()); } @Test @@ -91,7 +91,7 @@ public void testInstanceIsCached() { public void instancesAreInitialized() { Configuration conf = HBaseConfiguration.create(); conf.set(SaslServerAuthenticationProviders.EXTRA_PROVIDERS_KEY, - InitCheckingSaslServerAuthenticationProvider.class.getName()); + InitCheckingSaslServerAuthenticationProvider.class.getName()); SaslServerAuthenticationProviders providers = SaslServerAuthenticationProviders.getInstance(conf); @@ -101,12 +101,12 @@ public void instancesAreInitialized() { assertEquals(InitCheckingSaslServerAuthenticationProvider.class, provider.getClass()); assertTrue("Provider was not inititalized", - ((InitCheckingSaslServerAuthenticationProvider) provider).isInitialized()); + ((InitCheckingSaslServerAuthenticationProvider) provider).isInitialized()); } public static class InitCheckingSaslServerAuthenticationProvider implements SaslServerAuthenticationProvider { - public static final byte ID = (byte)88; + public static final byte ID = (byte) 88; private boolean initialized = false; public synchronized void init(Configuration conf) { @@ -128,9 +128,9 @@ public String getTokenKind() { } @Override - public AttemptingUserProvidingSaslServer createServer( - SecretManager secretManager, - Map saslProps) throws IOException { + public AttemptingUserProvidingSaslServer + createServer(SecretManager secretManager, Map saslProps) + throws IOException { throw new UnsupportedOperationException(); } @@ -140,9 +140,8 @@ public boolean supportsProtocolAuthentication() { } @Override - public UserGroupInformation getAuthorizedUgi( - String authzId, SecretManager secretManager) - throws IOException { + public UserGroupInformation getAuthorizedUgi(String authzId, + SecretManager secretManager) throws IOException { throw new UnsupportedOperationException(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/SecureTestCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/SecureTestCluster.java index 29dad832972e..6300c3111ec5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/SecureTestCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/SecureTestCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.io.File; @@ -41,8 +40,8 @@ public class SecureTestCluster { private static LocalHBaseCluster CLUSTER; - private static final File KEYTAB_FILE = new File(TEST_UTIL.getDataTestDir("keytab").toUri() - .getPath()); + private static final File KEYTAB_FILE = + new File(TEST_UTIL.getDataTestDir("keytab").toUri().getPath()); private static MiniKdc KDC; private static String HOST = "localhost"; @@ -51,20 +50,20 @@ public class SecureTestCluster { private static String HTTP_PRINCIPAL; - //When extending SecureTestCluster on downstream projects that refer SecureTestCluster via - //hbase-server jar, we need to provide a way for the implementation to refer to its own class - //definition, so that KeyStoreTestUtil.getClasspathDir can resolve a valid path in the local FS - //to place required SSL config files. + // When extending SecureTestCluster on downstream projects that refer SecureTestCluster via + // hbase-server jar, we need to provide a way for the implementation to refer to its own class + // definition, so that KeyStoreTestUtil.getClasspathDir can resolve a valid path in the local FS + // to place required SSL config files. private static Class testRunnerClass = SecureTestCluster.class; /** - * SecureTestCluster extending classes can set their own Class reference type - * to be used as the target resource to be looked for on the class loader by + * SecureTestCluster extending classes can set their own Class reference type to be + * used as the target resource to be looked for on the class loader by * KeyStoreTestUtil, when deciding where to place ssl related config files. - * @param testRunnerClass a Class reference from the - * SecureTestCluster extender. + * @param testRunnerClass a Class reference from the SecureTestCluster + * extender. */ - protected static void setTestRunner(Class testRunnerClass){ + protected static void setTestRunner(Class testRunnerClass) { SecureTestCluster.testRunnerClass = testRunnerClass; } @@ -84,11 +83,11 @@ public static void setUp() throws Exception { TEST_UTIL.startMiniZKCluster(); HBaseKerberosUtils.setSecuredConfiguration(TEST_UTIL.getConfiguration(), - PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm()); + PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm()); HBaseKerberosUtils.setSSLConfiguration(TEST_UTIL, testRunnerClass); TEST_UTIL.getConfiguration().setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - TokenProvider.class.getName()); + TokenProvider.class.getName()); TEST_UTIL.startMiniDFSCluster(1); Path rootdir = TEST_UTIL.getDataTestDirOnTestFS("TestGenerateDelegationToken"); CommonFSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootdir); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestAuthenticationKey.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestAuthenticationKey.java index 094452f2e6c2..4e2acb24a48f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestAuthenticationKey.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestAuthenticationKey.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestAuthenticationKey { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestDelegationTokenWithEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestDelegationTokenWithEncryption.java index 4d541183ab39..1837a2614a0e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestDelegationTokenWithEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestDelegationTokenWithEncryption.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -76,8 +76,7 @@ public static void setUp() throws Exception { public static Collection parameters() { // Client connection supports only non-blocking RPCs (due to master registry restriction), hence // we only test NettyRpcClient. - return Arrays.asList( - new Object[] { NettyRpcClient.class.getName() }); + return Arrays.asList(new Object[] { NettyRpcClient.class.getName() }); } @Parameter @@ -106,7 +105,7 @@ public void testPutGetWithDelegationToken() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { Admin admin = conn.getAdmin(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); admin.createTable(tableDescriptor); try (Table table = conn.getTable(tableName)) { table.put(new Put(row).addColumn(family, qualifier, value)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestFsDelegationToken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestFsDelegationToken.java index 81347c741153..d95f47eda48e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestFsDelegationToken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestFsDelegationToken.java @@ -42,7 +42,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestFsDelegationToken { private UserProvider userProvider = Mockito.mock(UserProvider.class); private User user = Mockito.mock(User.class); @@ -68,16 +68,13 @@ public void setup() throws IOException, URISyntaxException { when(webHdfsFileSystem.getUri()).thenReturn(new URI("webhdfs://someUri")); when(swebHdfsFileSystem.getCanonicalServiceName()).thenReturn("swebhdfs://"); when(swebHdfsFileSystem.getUri()).thenReturn(new URI("swebhdfs://someUri")); - when(user.getToken( - HDFS_DELEGATION_KIND.toString(), - fileSystem.getCanonicalServiceName())) + when(user.getToken(HDFS_DELEGATION_KIND.toString(), fileSystem.getCanonicalServiceName())) .thenReturn(hdfsToken); - when(user.getToken( - WEBHDFS_TOKEN_KIND.toString(), - webHdfsFileSystem.getCanonicalServiceName())).thenReturn(webhdfsToken); - when(user.getToken( - SWEBHDFS_TOKEN_KIND.toString(), - swebHdfsFileSystem.getCanonicalServiceName())).thenReturn(swebhdfsToken); + when(user.getToken(WEBHDFS_TOKEN_KIND.toString(), webHdfsFileSystem.getCanonicalServiceName())) + .thenReturn(webhdfsToken); + when( + user.getToken(SWEBHDFS_TOKEN_KIND.toString(), swebHdfsFileSystem.getCanonicalServiceName())) + .thenReturn(swebhdfsToken); when(hdfsToken.getKind()).thenReturn(new Text("HDFS_DELEGATION_TOKEN")); when(webhdfsToken.getKind()).thenReturn(WEBHDFS_TOKEN_KIND); when(swebhdfsToken.getKind()).thenReturn(SWEBHDFS_TOKEN_KIND); @@ -86,22 +83,19 @@ public void setup() throws IOException, URISyntaxException { @Test public void acquireDelegationToken_defaults_to_hdfsFileSystem() throws IOException { fsDelegationToken.acquireDelegationToken(fileSystem); - assertEquals( - fsDelegationToken.getUserToken().getKind(), HDFS_DELEGATION_KIND); + assertEquals(fsDelegationToken.getUserToken().getKind(), HDFS_DELEGATION_KIND); } @Test public void acquireDelegationToken_webhdfsFileSystem() throws IOException { fsDelegationToken.acquireDelegationToken(webHdfsFileSystem); - assertEquals( - fsDelegationToken.getUserToken().getKind(), WEBHDFS_TOKEN_KIND); + assertEquals(fsDelegationToken.getUserToken().getKind(), WEBHDFS_TOKEN_KIND); } @Test public void acquireDelegationToken_swebhdfsFileSystem() throws IOException { fsDelegationToken.acquireDelegationToken(swebHdfsFileSystem); - assertEquals( - fsDelegationToken.getUserToken().getKind(), SWEBHDFS_TOKEN_KIND); + assertEquals(fsDelegationToken.getUserToken().getKind(), SWEBHDFS_TOKEN_KIND); } @Test(expected = NullPointerException.class) @@ -111,16 +105,13 @@ public void acquireDelegationTokenByTokenKind_rejects_null_token_kind() throws I @Test public void acquireDelegationTokenByTokenKind_webhdfsFileSystem() throws IOException { - fsDelegationToken - .acquireDelegationToken(WEBHDFS_TOKEN_KIND.toString(), webHdfsFileSystem); + fsDelegationToken.acquireDelegationToken(WEBHDFS_TOKEN_KIND.toString(), webHdfsFileSystem); assertEquals(fsDelegationToken.getUserToken().getKind(), WEBHDFS_TOKEN_KIND); } @Test public void acquireDelegationTokenByTokenKind_swebhdfsFileSystem() throws IOException { - fsDelegationToken - .acquireDelegationToken( - SWEBHDFS_TOKEN_KIND.toString(), swebHdfsFileSystem); + fsDelegationToken.acquireDelegationToken(SWEBHDFS_TOKEN_KIND.toString(), swebHdfsFileSystem); assertEquals(fsDelegationToken.getUserToken().getKind(), SWEBHDFS_TOKEN_KIND); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java index dae3abc913ca..651f0b29f65b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -80,8 +80,7 @@ public static void setUp() throws Exception { public static Collection parameters() { // Client connection supports only non-blocking RPCs (due to master registry restriction), hence // we only test NettyRpcClient. - return Arrays.asList( - new Object[] { NettyRpcClient.class.getName() }); + return Arrays.asList(new Object[] { NettyRpcClient.class.getName() }); } @Parameter diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index f35c9bb8c479..8c4bd0c18a74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -103,10 +103,10 @@ // This test does a fancy trick where it uses RpcServer and plugs in the Token Service for RpcServer // to offer up. It worked find pre-hbase-2.0.0 but post the shading project, it fails because // RpcServer is all about shaded protobuf whereas the Token Service is a CPEP which does non-shaded -// protobufs. Since hbase-2.0.0, we added convertion from shaded to non-shaded so this test keeps +// protobufs. Since hbase-2.0.0, we added convertion from shaded to non-shaded so this test keeps // working. @RunWith(Parameterized.class) -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestTokenAuthentication { @ClassRule @@ -123,8 +123,8 @@ public class TestTokenAuthentication { /** * Basic server process for RPC authentication testing */ - private static class TokenServer extends TokenProvider implements - AuthenticationProtos.AuthenticationService.BlockingInterface, Runnable, Server { + private static class TokenServer extends TokenProvider + implements AuthenticationProtos.AuthenticationService.BlockingInterface, Runnable, Server { private static final Logger LOG = LoggerFactory.getLogger(TokenServer.class); private Configuration conf; private HBaseTestingUtil TEST_UTIL; @@ -143,7 +143,7 @@ public TokenServer(Configuration conf, HBaseTestingUtil TEST_UTIL) throws IOExce this.startcode = EnvironmentEdgeManager.currentTime(); // Server to handle client requests. String hostname = - Strings.domainNamePointerToHostName(DNS.getDefaultHost("default", "default")); + Strings.domainNamePointerToHostName(DNS.getDefaultHost("default", "default")); int port = 0; // Creation of an ISA will force a resolve. InetSocketAddress initialIsa = new InetSocketAddress(hostname, port); @@ -156,13 +156,13 @@ public TokenServer(Configuration conf, HBaseTestingUtil TEST_UTIL) throws IOExce // little trick of testing the CPEP Service by inserting it as RpcServer Service. This // worked fine before we shaded PB. Now we need these proxies. final BlockingService service = - AuthenticationProtos.AuthenticationService.newReflectiveBlockingService(this); + AuthenticationProtos.AuthenticationService.newReflectiveBlockingService(this); final BlockingService proxy = new BlockingService() { @Override public Message callBlockingMethod(MethodDescriptor md, RpcController controller, - Message param) throws ServiceException { + Message param) throws ServiceException { MethodDescriptor methodDescriptor = - service.getDescriptorForType().findMethodByName(md.getName()); + service.getDescriptorForType().findMethodByName(md.getName()); Message request = service.getRequestPrototype(methodDescriptor); // TODO: Convert rpcController Message response = null; @@ -192,9 +192,9 @@ public Message getResponsePrototype(MethodDescriptor arg0) { } }; sai.add(new BlockingServiceAndInterface(proxy, - AuthenticationProtos.AuthenticationService.BlockingInterface.class)); + AuthenticationProtos.AuthenticationService.BlockingInterface.class)); this.rpcServer = RpcServerFactory.createRpcServer(this, "tokenServer", sai, initialIsa, conf, - new FifoRpcScheduler(conf, 1)); + new FifoRpcScheduler(conf, 1)); InetSocketAddress address = rpcServer.getListenerAddress(); if (address == null) { throw new IOException("Listener channel is closed"); @@ -245,7 +245,7 @@ public boolean isStopping() { @Override public void abort(String reason, Throwable error) { - LOG.error(HBaseMarkers.FATAL, "Aborting on: "+reason, error); + LOG.error(HBaseMarkers.FATAL, "Aborting on: " + reason, error); this.aborted = true; this.stopped = true; sleeper.skipSleepCycle(); @@ -255,16 +255,15 @@ private void initialize() throws IOException { // ZK configuration must _not_ have hbase.security.authentication or it will require SASL auth Configuration zkConf = new Configuration(conf); zkConf.set(User.HBASE_SECURITY_CONF_KEY, "simple"); - this.zookeeper = new ZKWatcher(zkConf, TokenServer.class.getSimpleName(), - this, true); + this.zookeeper = new ZKWatcher(zkConf, TokenServer.class.getSimpleName(), this, true); this.rpcServer.start(); // Mock up region coprocessor environment RegionCoprocessorEnvironment mockRegionCpEnv = mock(RegionCoprocessorEnvironment.class, - Mockito.withSettings().extraInterfaces(HasRegionServerServices.class)); + Mockito.withSettings().extraInterfaces(HasRegionServerServices.class)); when(mockRegionCpEnv.getConfiguration()).thenReturn(conf); - when(mockRegionCpEnv.getClassLoader()).then( - (var1) -> Thread.currentThread().getContextClassLoader()); + when(mockRegionCpEnv.getClassLoader()) + .then((var1) -> Thread.currentThread().getContextClassLoader()); RegionServerServices mockRss = mock(RegionServerServices.class); when(mockRss.getRpcServer()).thenReturn(rpcServer); when(((HasRegionServerServices) mockRegionCpEnv).getRegionServerServices()) @@ -293,7 +292,7 @@ public boolean isStarted() { @Override public void stop(String reason) { - LOG.info("Stopping due to: "+reason); + LOG.info("Stopping due to: " + reason); this.stopped = true; sleeper.skipSleepCycle(); } @@ -308,18 +307,18 @@ public InetSocketAddress getAddress() { } public SecretManager getSecretManager() { - return ((RpcServer)rpcServer).getSecretManager(); + return ((RpcServer) rpcServer).getSecretManager(); } @Override public AuthenticationProtos.GetAuthenticationTokenResponse getAuthenticationToken( RpcController controller, AuthenticationProtos.GetAuthenticationTokenRequest request) - throws ServiceException { + throws ServiceException { LOG.debug("Authentication token request from " + RpcServer.getRequestUserName().orElse(null)); // Ignore above passed in controller -- it is always null ServerRpcController serverController = new ServerRpcController(); - final BlockingRpcCallback - callback = new BlockingRpcCallback<>(); + final BlockingRpcCallback callback = + new BlockingRpcCallback<>(); getAuthenticationToken(null, request, callback); try { serverController.checkFailed(); @@ -330,9 +329,8 @@ public AuthenticationProtos.GetAuthenticationTokenResponse getAuthenticationToke } @Override - public AuthenticationProtos.WhoAmIResponse whoAmI( - RpcController controller, AuthenticationProtos.WhoAmIRequest request) - throws ServiceException { + public AuthenticationProtos.WhoAmIResponse whoAmI(RpcController controller, + AuthenticationProtos.WhoAmIRequest request) throws ServiceException { LOG.debug("whoAmI() request from " + RpcServer.getRequestUserName().orElse(null)); // Ignore above passed in controller -- it is always null ServerRpcController serverController = new ServerRpcController(); @@ -366,7 +364,7 @@ public AsyncClusterConnection getAsyncClusterConnection() { @Parameters(name = "{index}: rpcServerImpl={0}") public static Collection parameters() { return Arrays.asList(new Object[] { SimpleRpcServer.class.getName() }, - new Object[] { NettyRpcServer.class.getName() }); + new Object[] { NettyRpcServer.class.getName() }); } @Parameter(0) @@ -384,12 +382,12 @@ public void setUp() throws Exception { // Override the connection registry to avoid spinning up a mini cluster for the connection below // to go through. TEST_UTIL.getConfiguration().set(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - HConstants.ZK_CONNECTION_REGISTRY_CLASS); + HConstants.ZK_CONNECTION_REGISTRY_CLASS); TEST_UTIL.startMiniZKCluster(); // register token type for protocol SecurityInfo.addInfo(AuthenticationProtos.AuthenticationService.getDescriptor().getName(), new SecurityInfo("hbase.test.kerberos.principal", - AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN)); + AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN)); // security settings only added after startup so that ZK does not require SASL Configuration conf = TEST_UTIL.getConfiguration(); conf.set("hadoop.security.authentication", "kerberos"); @@ -398,7 +396,8 @@ public void setUp() throws Exception { conf.set(RpcServerFactory.CUSTOM_RPC_SERVER_IMPL_CONF_KEY, rpcServerImpl); server = new TokenServer(conf, TEST_UTIL); serverThread = new Thread(server); - Threads.setDaemonThreadRunning(serverThread, "TokenServer:"+server.getServerName().toString()); + Threads.setDaemonThreadRunning(serverThread, + "TokenServer:" + server.getServerName().toString()); // wait for startup while (!server.isStarted() && !server.isStopped()) { Thread.sleep(10); @@ -406,14 +405,13 @@ public void setUp() throws Exception { server.rpcServer.refreshAuthManager(conf, new PolicyProvider() { @Override public Service[] getServices() { - return new Service [] { - new Service("security.client.protocol.acl", - AuthenticationProtos.AuthenticationService.BlockingInterface.class)}; + return new Service[] { new Service("security.client.protocol.acl", + AuthenticationProtos.AuthenticationService.BlockingInterface.class) }; } }); ZKClusterId.setClusterId(server.getZooKeeper(), clusterId); - secretManager = (AuthenticationTokenSecretManager)server.getSecretManager(); - while(secretManager.getCurrentKey() == null) { + secretManager = (AuthenticationTokenSecretManager) server.getSecretManager(); + while (secretManager.getCurrentKey() == null) { Thread.sleep(1); } } @@ -427,69 +425,69 @@ public void tearDown() throws Exception { @Test public void testTokenCreation() throws Exception { - Token token = - secretManager.generateToken("testuser"); + Token token = secretManager.generateToken("testuser"); AuthenticationTokenIdentifier ident = new AuthenticationTokenIdentifier(); Writables.getWritable(token.getIdentifier(), ident); - assertEquals("Token username should match", "testuser", - ident.getUsername()); + assertEquals("Token username should match", "testuser", ident.getUsername()); byte[] passwd = secretManager.retrievePassword(ident); assertTrue("Token password and password from secret manager should match", - Bytes.equals(token.getPassword(), passwd)); + Bytes.equals(token.getPassword(), passwd)); } -// This won't work any more now RpcServer takes Shaded Service. It depends on RPCServer being able to provide a -// non-shaded service. TODO: FIX. Tried to make RPC generic but then it ripples; have to make Connection generic. -// And Call generic, etc. -// -// @Test -// public void testTokenAuthentication() throws Exception { -// UserGroupInformation testuser = -// UserGroupInformation.createUserForTesting("testuser", new String[]{"testgroup"}); -// testuser.setAuthenticationMethod( -// UserGroupInformation.AuthenticationMethod.TOKEN); -// final Configuration conf = TEST_UTIL.getConfiguration(); -// UserGroupInformation.setConfiguration(conf); -// Token token = secretManager.generateToken("testuser"); -// LOG.debug("Got token: " + token.toString()); -// testuser.addToken(token); -// // Verify the server authenticates us as this token user -// testuser.doAs(new PrivilegedExceptionAction() { -// public Object run() throws Exception { -// Configuration c = server.getConfiguration(); -// final RpcClient rpcClient = RpcClientFactory.createClient(c, clusterId.toString()); -// ServerName sn = -// ServerName.valueOf(server.getAddress().getHostName(), server.getAddress().getPort(), -// EnvironmentEdgeManager.currentTime()); -// try { -// // Make a proxy to go between the shaded RpcController that rpc expects and the -// // non-shaded controller this CPEP is providing. This is because this test does a neat -// // little trick of testing the CPEP Service by inserting it as RpcServer Service. This -// // worked fine before we shaded PB. Now we need these proxies. -// final org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel channel = -// rpcClient.createBlockingRpcChannel(sn, User.getCurrent(), HConstants.DEFAULT_HBASE_RPC_TIMEOUT); -// AuthenticationProtos.AuthenticationService.BlockingInterface stub = -// AuthenticationProtos.AuthenticationService.newBlockingStub(channel); -// AuthenticationProtos.WhoAmIResponse response = -// stub.whoAmI(null, AuthenticationProtos.WhoAmIRequest.getDefaultInstance()); -// String myname = response.getUsername(); -// assertEquals("testuser", myname); -// String authMethod = response.getAuthMethod(); -// assertEquals("TOKEN", authMethod); -// } finally { -// rpcClient.close(); -// } -// return null; -// } -// }); -// } + // This won't work any more now RpcServer takes Shaded Service. It depends on RPCServer being able + // to provide a + // non-shaded service. TODO: FIX. Tried to make RPC generic but then it ripples; have to make + // Connection generic. + // And Call generic, etc. + // + // @Test + // public void testTokenAuthentication() throws Exception { + // UserGroupInformation testuser = + // UserGroupInformation.createUserForTesting("testuser", new String[]{"testgroup"}); + // testuser.setAuthenticationMethod( + // UserGroupInformation.AuthenticationMethod.TOKEN); + // final Configuration conf = TEST_UTIL.getConfiguration(); + // UserGroupInformation.setConfiguration(conf); + // Token token = secretManager.generateToken("testuser"); + // LOG.debug("Got token: " + token.toString()); + // testuser.addToken(token); + // // Verify the server authenticates us as this token user + // testuser.doAs(new PrivilegedExceptionAction() { + // public Object run() throws Exception { + // Configuration c = server.getConfiguration(); + // final RpcClient rpcClient = RpcClientFactory.createClient(c, clusterId.toString()); + // ServerName sn = + // ServerName.valueOf(server.getAddress().getHostName(), server.getAddress().getPort(), + // EnvironmentEdgeManager.currentTime()); + // try { + // // Make a proxy to go between the shaded RpcController that rpc expects and the + // // non-shaded controller this CPEP is providing. This is because this test does a neat + // // little trick of testing the CPEP Service by inserting it as RpcServer Service. This + // // worked fine before we shaded PB. Now we need these proxies. + // final org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel channel = + // rpcClient.createBlockingRpcChannel(sn, User.getCurrent(), + // HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + // AuthenticationProtos.AuthenticationService.BlockingInterface stub = + // AuthenticationProtos.AuthenticationService.newBlockingStub(channel); + // AuthenticationProtos.WhoAmIResponse response = + // stub.whoAmI(null, AuthenticationProtos.WhoAmIRequest.getDefaultInstance()); + // String myname = response.getUsername(); + // assertEquals("testuser", myname); + // String authMethod = response.getAuthMethod(); + // assertEquals("TOKEN", authMethod); + // } finally { + // rpcClient.close(); + // } + // return null; + // } + // }); + // } @Test public void testUseExistingToken() throws Exception { User user = User.createUserForTesting(TEST_UTIL.getConfiguration(), "testuser2", - new String[]{"testgroup"}); - Token token = - secretManager.generateToken(user.getName()); + new String[] { "testgroup" }); + Token token = secretManager.generateToken(user.getName()); assertNotNull(token); user.addToken(token); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java index 6fab5e15f363..d9ee1b20eebb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java @@ -43,10 +43,9 @@ import org.slf4j.LoggerFactory; /** - * Test the synchronization of token authentication master keys through - * ZKSecretWatcher + * Test the synchronization of token authentication master keys through ZKSecretWatcher */ -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestZKSecretWatcher { @ClassRule @@ -62,9 +61,10 @@ public class TestZKSecretWatcher { private static class MockAbortable implements Abortable { private boolean abort; + @Override public void abort(String reason, Throwable e) { - LOG.info("Aborting: "+reason, e); + LOG.info("Aborting: " + reason, e); abort = true; } @@ -77,12 +77,11 @@ public boolean isAborted() { // We subclass AuthenticationTokenSecretManager so that testKeyUpdate can receive // notification on the removal of keyId private static class AuthenticationTokenSecretManagerForTest - extends AuthenticationTokenSecretManager { + extends AuthenticationTokenSecretManager { private CountDownLatch latch = new CountDownLatch(1); - public AuthenticationTokenSecretManagerForTest(Configuration conf, - ZKWatcher zk, String serverName, - long keyUpdateInterval, long tokenMaxLifetime) { + public AuthenticationTokenSecretManagerForTest(Configuration conf, ZKWatcher zk, + String serverName, long keyUpdateInterval, long tokenMaxLifetime) { super(conf, zk, serverName, keyUpdateInterval, tokenMaxLifetime); } @@ -108,27 +107,26 @@ public static void setupBeforeClass() throws Exception { ZKWatcher zk = newZK(conf, "server1", new MockAbortable()); AuthenticationTokenSecretManagerForTest[] tmp = new AuthenticationTokenSecretManagerForTest[2]; - tmp[0] = new AuthenticationTokenSecretManagerForTest( - conf, zk, "server1", 60*60*1000, 60*1000); + tmp[0] = + new AuthenticationTokenSecretManagerForTest(conf, zk, "server1", 60 * 60 * 1000, 60 * 1000); tmp[0].start(); zk = newZK(conf, "server2", new MockAbortable()); - tmp[1] = new AuthenticationTokenSecretManagerForTest( - conf, zk, "server2", 60*60*1000, 60*1000); + tmp[1] = + new AuthenticationTokenSecretManagerForTest(conf, zk, "server2", 60 * 60 * 1000, 60 * 1000); tmp[1].start(); while (KEY_MASTER == null) { - for (int i=0; i<2; i++) { + for (int i = 0; i < 2; i++) { if (tmp[i].isMaster()) { KEY_MASTER = tmp[i]; - KEY_SLAVE = tmp[ (i+1) % 2 ]; + KEY_SLAVE = tmp[(i + 1) % 2]; break; } } Thread.sleep(500); } - LOG.info("Master is "+KEY_MASTER.getName()+ - ", slave is "+KEY_SLAVE.getName()); + LOG.info("Master is " + KEY_MASTER.getName() + ", slave is " + KEY_SLAVE.getName()); } @AfterClass @@ -184,19 +182,18 @@ public void testKeyUpdate() throws Exception { LOG.debug("Slave current key (key3) {}", slaveCurrent); // verify that the expired key has been removed - Waiter.waitFor(TEST_UTIL.getConfiguration(), 30000, - () -> { + Waiter.waitFor(TEST_UTIL.getConfiguration(), 30000, () -> { AuthenticationKey k = KEY_SLAVE.getKey(key1.getKeyId()); LOG.info("AuthKey1={}", k); - return k == null;}); - assertNull("key1=" + KEY_SLAVE.getKey(key1.getKeyId()), - KEY_SLAVE.getKey(key1.getKeyId())); + return k == null; + }); + assertNull("key1=" + KEY_SLAVE.getKey(key1.getKeyId()), KEY_SLAVE.getKey(key1.getKeyId())); // bring up a new slave Configuration conf = TEST_UTIL.getConfiguration(); ZKWatcher zk = newZK(conf, "server3", new MockAbortable()); - KEY_SLAVE2 = new AuthenticationTokenSecretManager( - conf, zk, "server3", 60*60*1000, 60*1000); + KEY_SLAVE2 = + new AuthenticationTokenSecretManager(conf, zk, "server3", 60 * 60 * 1000, 60 * 1000); KEY_SLAVE2.start(); Thread.sleep(1000); @@ -220,7 +217,7 @@ public void testKeyUpdate() throws Exception { // check for a new master AuthenticationTokenSecretManager[] mgrs = - new AuthenticationTokenSecretManager[]{ KEY_SLAVE, KEY_SLAVE2 }; + new AuthenticationTokenSecretManager[] { KEY_SLAVE, KEY_SLAVE2 }; AuthenticationTokenSecretManager newMaster = null; int tries = 0; while (newMaster == null && tries++ < 5) { @@ -239,18 +236,18 @@ public void testKeyUpdate() throws Exception { AuthenticationKey current = newMaster.getCurrentKey(); // new master will immediately roll the current key, so it's current may be greater assertTrue(current.getKeyId() >= slaveCurrent.getKeyId()); - LOG.debug("New master, current key: "+current.getKeyId()); + LOG.debug("New master, current key: " + current.getKeyId()); // roll the current key again on new master and verify the key ID increments newMaster.rollCurrentKey(); AuthenticationKey newCurrent = newMaster.getCurrentKey(); - LOG.debug("New master, rolled new current key: "+newCurrent.getKeyId()); + LOG.debug("New master, rolled new current key: " + newCurrent.getKeyId()); assertTrue(newCurrent.getKeyId() > current.getKeyId()); // add another slave ZKWatcher zk3 = newZK(conf, "server4", new MockAbortable()); - KEY_SLAVE3 = new AuthenticationTokenSecretManager( - conf, zk3, "server4", 60*60*1000, 60*1000); + KEY_SLAVE3 = + new AuthenticationTokenSecretManager(conf, zk3, "server4", 60 * 60 * 1000, 60 * 1000); KEY_SLAVE3.start(); Thread.sleep(5000); @@ -262,7 +259,7 @@ public void testKeyUpdate() throws Exception { assertFalse(newMaster.isMaster()); // check for a new master - mgrs = new AuthenticationTokenSecretManager[]{ KEY_SLAVE, KEY_SLAVE2, KEY_SLAVE3 }; + mgrs = new AuthenticationTokenSecretManager[] { KEY_SLAVE, KEY_SLAVE2, KEY_SLAVE3 }; newMaster = null; tries = 0; while (newMaster == null && tries++ < 5) { @@ -281,17 +278,17 @@ public void testKeyUpdate() throws Exception { AuthenticationKey current2 = newMaster.getCurrentKey(); // new master will immediately roll the current key, so it's current may be greater assertTrue(current2.getKeyId() >= newCurrent.getKeyId()); - LOG.debug("New master 2, current key: "+current2.getKeyId()); + LOG.debug("New master 2, current key: " + current2.getKeyId()); // roll the current key again on new master and verify the key ID increments newMaster.rollCurrentKey(); AuthenticationKey newCurrent2 = newMaster.getCurrentKey(); - LOG.debug("New master 2, rolled new current key: "+newCurrent2.getKeyId()); + LOG.debug("New master 2, rolled new current key: " + newCurrent2.getKeyId()); assertTrue(newCurrent2.getKeyId() > current2.getKeyId()); } - private static ZKWatcher newZK(Configuration conf, String name, - Abortable abort) throws Exception { + private static ZKWatcher newZK(Configuration conf, String name, Abortable abort) + throws Exception { Configuration copy = HBaseConfiguration.create(conf); ZKWatcher zk = new ZKWatcher(copy, name, abort); return zk; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java index d7081e7604ef..52443dffce12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,9 +53,10 @@ public class TestZKSecretWatcherRefreshKeys { private static class MockAbortable implements Abortable { private boolean abort; + @Override public void abort(String reason, Throwable e) { - LOG.info("Aborting: "+reason, e); + LOG.info("Aborting: " + reason, e); abort = true; } @@ -76,8 +77,8 @@ public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniZKCluster(); } - private static ZKWatcher newZK(Configuration conf, String name, - Abortable abort) throws Exception { + private static ZKWatcher newZK(Configuration conf, String name, Abortable abort) + throws Exception { Configuration copy = HBaseConfiguration.create(conf); ZKWatcher zk = new ZKWatcher(copy, name, abort); return zk; @@ -88,17 +89,15 @@ public void testRefreshKeys() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); ZKWatcher zk = newZK(conf, "127.0.0.1", new MockAbortable()); AuthenticationTokenSecretManager keyManager = - new AuthenticationTokenSecretManager(conf, zk, "127.0.0.1", - 60 * 60 * 1000, 60 * 1000); + new AuthenticationTokenSecretManager(conf, zk, "127.0.0.1", 60 * 60 * 1000, 60 * 1000); ZKSecretWatcher watcher = new ZKSecretWatcher(conf, zk, keyManager); ZKUtil.deleteChildrenRecursively(zk, watcher.getKeysParentZNode()); Integer[] keys = { 1, 2, 3, 4, 5, 6 }; for (Integer key : keys) { - AuthenticationKey ak = new AuthenticationKey(key, - EnvironmentEdgeManager.currentTime() + 600 * 1000, null); + AuthenticationKey ak = + new AuthenticationKey(key, EnvironmentEdgeManager.currentTime() + 600 * 1000, null); ZKUtil.createWithParents(zk, - ZNodePaths.joinZNode(watcher.getKeysParentZNode(), key.toString()), - Writables.getBytes(ak)); + ZNodePaths.joinZNode(watcher.getKeysParentZNode(), key.toString()), Writables.getBytes(ak)); } Assert.assertNull(keyManager.getCurrentKey()); watcher.refreshKeys(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java index 05c65ab8c069..c9fe778954ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_FAMILY; import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME; import static org.apache.hadoop.hbase.security.visibility.VisibilityUtils.SYSTEM_LABEL; + import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; @@ -66,10 +67,9 @@ import org.slf4j.LoggerFactory; /** - * This is a VisibilityLabelService where labels in Mutation's visibility - * expression will be persisted as Strings itself rather than ordinals in - * 'labels' table. Also there is no need to add labels to the system, prior to - * using them in Mutations/Authorizations. + * This is a VisibilityLabelService where labels in Mutation's visibility expression will be + * persisted as Strings itself rather than ordinals in 'labels' table. Also there is no need to add + * labels to the system, prior to using them in Mutations/Authorizations. */ @InterfaceAudience.Private public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelService { @@ -78,9 +78,9 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer private static final byte[] DUMMY_VALUE = new byte[0]; private static final byte STRING_SERIALIZATION_FORMAT = 2; - private static final Tag STRING_SERIALIZATION_FORMAT_TAG = new ArrayBackedTag( - TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE, - new byte[] { STRING_SERIALIZATION_FORMAT }); + private static final Tag STRING_SERIALIZATION_FORMAT_TAG = + new ArrayBackedTag(TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE, + new byte[] { STRING_SERIALIZATION_FORMAT }); private final ExpressionParser expressionParser = new ExpressionParser(); private final ExpressionExpander expressionExpander = new ExpressionExpander(); private Configuration conf; @@ -106,14 +106,8 @@ public OperationStatus[] setAuths(byte[] user, List authLabels) throws I Put p = new Put(user); CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); for (byte[] auth : authLabels) { - p.add(builder.clear() - .setRow(p.getRow()) - .setFamily(LABELS_TABLE_FAMILY) - .setQualifier(auth) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(DUMMY_VALUE) - .build()); + p.add(builder.clear().setRow(p.getRow()).setFamily(LABELS_TABLE_FAMILY).setQualifier(auth) + .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).setValue(DUMMY_VALUE).build()); } this.labelsRegion.put(p); // This is a testing impl and so not doing any caching @@ -130,9 +124,8 @@ public OperationStatus[] clearAuths(byte[] user, List authLabels) throws List currentAuths; if (AuthUtil.isGroupPrincipal(Bytes.toString(user))) { String group = AuthUtil.getGroupName(Bytes.toString(user)); - currentAuths = this.getGroupAuths(new String[]{group}, true); - } - else { + currentAuths = this.getGroupAuths(new String[] { group }, true); + } else { currentAuths = this.getUserAuths(user, true); } Delete d = new Delete(user); @@ -143,9 +136,9 @@ public OperationStatus[] clearAuths(byte[] user, List authLabels) throws d.addColumns(LABELS_TABLE_FAMILY, authLabel); } else { // This label is not set for the user. - finalOpStatus[i] = new OperationStatus(OperationStatusCode.FAILURE, - new InvalidLabelException("Label '" + authLabelStr + "' is not set for the user " - + Bytes.toString(user))); + finalOpStatus[i] = + new OperationStatus(OperationStatusCode.FAILURE, new InvalidLabelException( + "Label '" + authLabelStr + "' is not set for the user " + Bytes.toString(user))); } i++; } @@ -208,8 +201,8 @@ private void getAuths(Get get, List auths) throws IOException { scanner.next(cells); } for (Cell cell : cells) { - String auth = Bytes - .toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); + String auth = Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), + cell.getQualifierLength()); auths.add(auth); } } finally { @@ -360,8 +353,8 @@ private void extractLabels(ExpressionNode node, List labels, List p break; } } - if (!matchFound) - break; + if (!matchFound) break; } } return matchFound; @@ -442,8 +434,8 @@ private static boolean checkForMatchingVisibilityTagsWithSortedOrder(List p @Override public byte[] encodeVisibilityForReplication(final List tags, final Byte serializationFormat) throws IOException { - if (tags.size() > 0 && (serializationFormat == null - || serializationFormat == STRING_SERIALIZATION_FORMAT)) { + if (tags.size() > 0 + && (serializationFormat == null || serializationFormat == STRING_SERIALIZATION_FORMAT)) { return createModifiedVisExpression(tags); } return null; @@ -453,14 +445,13 @@ public byte[] encodeVisibilityForReplication(final List tags, final Byte se * @param tags - all the tags associated with the current Cell * @return - the modified visibility expression as byte[] */ - private byte[] createModifiedVisExpression(final List tags) - throws IOException { + private byte[] createModifiedVisExpression(final List tags) throws IOException { StringBuilder visibilityString = new StringBuilder(); for (Tag tag : tags) { if (tag.getType() == TagType.VISIBILITY_TAG_TYPE) { if (visibilityString.length() != 0) { - visibilityString.append(VisibilityConstants.CLOSED_PARAN - + VisibilityConstants.OR_OPERATOR); + visibilityString + .append(VisibilityConstants.CLOSED_PARAN + VisibilityConstants.OR_OPERATOR); } int offset = tag.getValueOffset(); int endOffset = offset + tag.getValueLength(); @@ -483,8 +474,8 @@ private byte[] createModifiedVisExpression(final List tags) if (expressionStart) { visibilityString.append(VisibilityConstants.OPEN_PARAN + CellVisibility.quote(label)); } else { - visibilityString.append(VisibilityConstants.AND_OPERATOR - + CellVisibility.quote(label)); + visibilityString + .append(VisibilityConstants.AND_OPERATOR + CellVisibility.quote(label)); } } expressionStart = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LabelFilteringScanLabelGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LabelFilteringScanLabelGenerator.java index 104cc499a768..59b0b091e04c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LabelFilteringScanLabelGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LabelFilteringScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +19,9 @@ import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; // Strictly removes a specified label @InterfaceAudience.Private diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LoadTestDataGeneratorWithVisibilityLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LoadTestDataGeneratorWithVisibilityLabels.java index b6987f83b62f..a7d96163bbcf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LoadTestDataGeneratorWithVisibilityLabels.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LoadTestDataGeneratorWithVisibilityLabels.java @@ -1,27 +1,27 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.util.MultiThreadedAction.DefaultDataGenerator; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class LoadTestDataGeneratorWithVisibilityLabels extends DefaultDataGenerator { @@ -77,15 +77,15 @@ private static String[][] toAuthorizationsSet(String authorizationsStr) { @Override public Mutation beforeMutate(long rowkeyBase, Mutation m) throws IOException { - m.setCellVisibility(new CellVisibility(this.visibilityExps[(int) rowkeyBase - % this.visibilityExps.length])); + m.setCellVisibility( + new CellVisibility(this.visibilityExps[(int) rowkeyBase % this.visibilityExps.length])); return m; } @Override public Get beforeGet(long rowkeyBase, Get get) { - get.setAuthorizations(new Authorizations( - authorizations[(int) (rowkeyBase % authorizations.length)])); + get.setAuthorizations( + new Authorizations(authorizations[(int) (rowkeyBase % authorizations.length)])); return get; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java index 501fd5471bf8..f20eaf5131c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestDefaultScanLabelGeneratorStack { @ClassRule @@ -83,7 +83,7 @@ public static void setupBeforeClass() throws Exception { conf.set("hbase.superuser", "admin"); TEST_UTIL.startMiniCluster(1); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); - TESTUSER = User.createUserForTesting(conf, "test", new String[] { }); + TESTUSER = User.createUserForTesting(conf, "test", new String[] {}); // Wait for the labels table to become available TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000); @@ -111,7 +111,7 @@ public void testDefaultScanLabelGeneratorStack() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = TEST_UTIL.createTable(tableName, CF)) { + Table table = TEST_UTIL.createTable(tableName, CF)) { Put put = new Put(ROW_1); put.addColumn(CF, Q1, HConstants.LATEST_TIMESTAMP, value1); put.setCellVisibility(new CellVisibility(SECRET)); @@ -133,7 +133,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Result[] next = getResult(table, new Scan()); // Test that super user can see all the cells. @@ -141,7 +141,7 @@ public Void run() throws Exception { cellScanner.advance(); Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), ROW_1, 0, ROW_1.length)); + current.getRowLength(), ROW_1, 0, ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), Q1, 0, Q1.length)); assertTrue(Bytes.equals(current.getValueArray(), current.getValueOffset(), @@ -172,7 +172,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { // Test scan with no auth attribute Result[] next = getResult(table, new Scan()); CellScanner cellScanner = next[0].cellScanner(); @@ -180,7 +180,7 @@ public Void run() throws Exception { Cell current = cellScanner.current(); // test user can see value2 (CONFIDENTIAL) and value3 (no label) assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), ROW_1, 0, ROW_1.length)); + current.getRowLength(), ROW_1, 0, ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), Q2, 0, Q2.length)); assertTrue(Bytes.equals(current.getValueArray(), current.getValueOffset(), @@ -189,7 +189,7 @@ public Void run() throws Exception { current = cellScanner.current(); // test user can see value2 (CONFIDENTIAL) and value3 (no label) assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), ROW_1, 0, ROW_1.length)); + current.getRowLength(), ROW_1, 0, ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), Q3, 0, Q3.length)); assertTrue(Bytes.equals(current.getValueArray(), current.getValueOffset(), @@ -251,7 +251,7 @@ public Void run() throws Exception { } - private static Result [] getResult(Table table, Scan scan) throws IOException { + private static Result[] getResult(Table table, Scan scan) throws IOException { ResultScanner scanner = table.getScanner(scan); Result[] next = scanner.next(1); assertTrue(next.length == 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java index cbbdd4fde459..fec59f36c6fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestEnforcingScanLabelGenerator { @ClassRule @@ -80,7 +80,7 @@ public static void setupBeforeClass() throws Exception { conf.set("hbase.superuser", "admin"); TEST_UTIL.startMiniCluster(1); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); - TESTUSER = User.createUserForTesting(conf, "test", new String[] { }); + TESTUSER = User.createUserForTesting(conf, "test", new String[] {}); // Wait for the labels table to become available TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000); @@ -108,7 +108,7 @@ public void testEnforcingScanLabelGenerator() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = TEST_UTIL.createTable(tableName, CF)) { + Table table = TEST_UTIL.createTable(tableName, CF)) { Put put = new Put(ROW_1); put.addColumn(CF, Q1, HConstants.LATEST_TIMESTAMP, value); put.setCellVisibility(new CellVisibility(SECRET)); @@ -130,7 +130,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { // Test that super user can see all the cells. Get get = new Get(ROW_1); Result result = table.get(get); @@ -146,7 +146,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { // Test that we enforce the defined set Get get = new Get(ROW_1); get.setAuthorizations(new Authorizations(new String[] { SECRET, CONFIDENTIAL })); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestExpressionExpander.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestExpressionExpander.java index ba788b252674..c81898d590a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestExpressionExpander.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestExpressionExpander.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestExpressionExpander { @ClassRule @@ -43,8 +43,8 @@ public void testPositiveCases() throws Exception { ExpressionExpander expander = new ExpressionExpander(); // (!a) -> (!a) - NonLeafExpressionNode exp1 = new NonLeafExpressionNode(Operator.NOT, - new LeafExpressionNode("a")); + NonLeafExpressionNode exp1 = + new NonLeafExpressionNode(Operator.NOT, new LeafExpressionNode("a")); ExpressionNode result = expander.expand(exp1); assertTrue(result instanceof NonLeafExpressionNode); NonLeafExpressionNode nlResult = (NonLeafExpressionNode) result; @@ -52,8 +52,8 @@ public void testPositiveCases() throws Exception { assertEquals("a", ((LeafExpressionNode) nlResult.getChildExps().get(0)).getIdentifier()); // (a | b) -> (a | b) - NonLeafExpressionNode exp2 = new NonLeafExpressionNode(Operator.OR, - new LeafExpressionNode("a"), new LeafExpressionNode("b")); + NonLeafExpressionNode exp2 = new NonLeafExpressionNode(Operator.OR, new LeafExpressionNode("a"), + new LeafExpressionNode("b")); result = expander.expand(exp2); assertTrue(result instanceof NonLeafExpressionNode); nlResult = (NonLeafExpressionNode) result; @@ -74,9 +74,9 @@ public void testPositiveCases() throws Exception { assertEquals("b", ((LeafExpressionNode) nlResult.getChildExps().get(1)).getIdentifier()); // ((a | b) | c) -> (a | b | c) - NonLeafExpressionNode exp4 = new NonLeafExpressionNode(Operator.OR, new NonLeafExpressionNode( - Operator.OR, new LeafExpressionNode("a"), new LeafExpressionNode("b")), - new LeafExpressionNode("c")); + NonLeafExpressionNode exp4 = + new NonLeafExpressionNode(Operator.OR, new NonLeafExpressionNode(Operator.OR, + new LeafExpressionNode("a"), new LeafExpressionNode("b")), new LeafExpressionNode("c")); result = expander.expand(exp4); assertTrue(result instanceof NonLeafExpressionNode); nlResult = (NonLeafExpressionNode) result; @@ -87,9 +87,9 @@ Operator.OR, new LeafExpressionNode("a"), new LeafExpressionNode("b")), assertEquals("c", ((LeafExpressionNode) nlResult.getChildExps().get(2)).getIdentifier()); // ((a & b) & c) -> (a & b & c) - NonLeafExpressionNode exp5 = new NonLeafExpressionNode(Operator.AND, new NonLeafExpressionNode( - Operator.AND, new LeafExpressionNode("a"), new LeafExpressionNode("b")), - new LeafExpressionNode("c")); + NonLeafExpressionNode exp5 = + new NonLeafExpressionNode(Operator.AND, new NonLeafExpressionNode(Operator.AND, + new LeafExpressionNode("a"), new LeafExpressionNode("b")), new LeafExpressionNode("c")); result = expander.expand(exp5); assertTrue(result instanceof NonLeafExpressionNode); nlResult = (NonLeafExpressionNode) result; @@ -100,9 +100,9 @@ Operator.AND, new LeafExpressionNode("a"), new LeafExpressionNode("b")), assertEquals("c", ((LeafExpressionNode) nlResult.getChildExps().get(2)).getIdentifier()); // (a | b) & c -> ((a & c) | (b & c)) - NonLeafExpressionNode exp6 = new NonLeafExpressionNode(Operator.AND, new NonLeafExpressionNode( - Operator.OR, new LeafExpressionNode("a"), new LeafExpressionNode("b")), - new LeafExpressionNode("c")); + NonLeafExpressionNode exp6 = + new NonLeafExpressionNode(Operator.AND, new NonLeafExpressionNode(Operator.OR, + new LeafExpressionNode("a"), new LeafExpressionNode("b")), new LeafExpressionNode("c")); result = expander.expand(exp6); assertTrue(result instanceof NonLeafExpressionNode); nlResult = (NonLeafExpressionNode) result; @@ -120,9 +120,9 @@ Operator.OR, new LeafExpressionNode("a"), new LeafExpressionNode("b")), assertEquals("c", ((LeafExpressionNode) temp.getChildExps().get(1)).getIdentifier()); // (a & b) | c -> ((a & b) | c) - NonLeafExpressionNode exp7 = new NonLeafExpressionNode(Operator.OR, new NonLeafExpressionNode( - Operator.AND, new LeafExpressionNode("a"), new LeafExpressionNode("b")), - new LeafExpressionNode("c")); + NonLeafExpressionNode exp7 = + new NonLeafExpressionNode(Operator.OR, new NonLeafExpressionNode(Operator.AND, + new LeafExpressionNode("a"), new LeafExpressionNode("b")), new LeafExpressionNode("c")); result = expander.expand(exp7); assertTrue(result instanceof NonLeafExpressionNode); nlResult = (NonLeafExpressionNode) result; @@ -226,12 +226,12 @@ Operator.AND, new LeafExpressionNode("a"), new LeafExpressionNode("b")), // (((a | b) | c) | d) & e -> ((a & e) | (b & e) | (c & e) | (d & e)) NonLeafExpressionNode exp12 = new NonLeafExpressionNode(Operator.AND); - NonLeafExpressionNode tempExp1 = new NonLeafExpressionNode(Operator.OR, new LeafExpressionNode( - "a"), new LeafExpressionNode("b")); - NonLeafExpressionNode tempExp2 = new NonLeafExpressionNode(Operator.OR, tempExp1, - new LeafExpressionNode("c")); - NonLeafExpressionNode tempExp3 = new NonLeafExpressionNode(Operator.OR, tempExp2, - new LeafExpressionNode("d")); + NonLeafExpressionNode tempExp1 = new NonLeafExpressionNode(Operator.OR, + new LeafExpressionNode("a"), new LeafExpressionNode("b")); + NonLeafExpressionNode tempExp2 = + new NonLeafExpressionNode(Operator.OR, tempExp1, new LeafExpressionNode("c")); + NonLeafExpressionNode tempExp3 = + new NonLeafExpressionNode(Operator.OR, tempExp2, new LeafExpressionNode("d")); exp12.addChildExp(tempExp3); exp12.addChildExp(new LeafExpressionNode("e")); result = expander.expand(exp12); @@ -261,9 +261,11 @@ Operator.AND, new LeafExpressionNode("a"), new LeafExpressionNode("b")), assertEquals("e", ((LeafExpressionNode) temp.getChildExps().get(1)).getIdentifier()); // (a | b | c) & d -> ((a & d) | (b & d) | (c & d)) - NonLeafExpressionNode exp13 = new NonLeafExpressionNode(Operator.AND, - new NonLeafExpressionNode(Operator.OR, new LeafExpressionNode("a"), new LeafExpressionNode( - "b"), new LeafExpressionNode("c")), new LeafExpressionNode("d")); + NonLeafExpressionNode exp13 = + new NonLeafExpressionNode(Operator.AND, + new NonLeafExpressionNode(Operator.OR, new LeafExpressionNode("a"), + new LeafExpressionNode("b"), new LeafExpressionNode("c")), + new LeafExpressionNode("d")); result = expander.expand(exp13); assertTrue(result instanceof NonLeafExpressionNode); nlResult = (NonLeafExpressionNode) result; @@ -382,9 +384,9 @@ Operator.AND, new LeafExpressionNode("a"), new LeafExpressionNode("b")), assertEquals("d", ((LeafExpressionNode) temp.getChildExps().get(1)).getIdentifier()); // !(a | b) -> ((!a) & (!b)) - NonLeafExpressionNode exp16 = new NonLeafExpressionNode(Operator.NOT, - new NonLeafExpressionNode(Operator.OR, new LeafExpressionNode("a"), new LeafExpressionNode( - "b"))); + NonLeafExpressionNode exp16 = + new NonLeafExpressionNode(Operator.NOT, new NonLeafExpressionNode(Operator.OR, + new LeafExpressionNode("a"), new LeafExpressionNode("b"))); result = expander.expand(exp16); assertTrue(result instanceof NonLeafExpressionNode); nlResult = (NonLeafExpressionNode) result; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestExpressionParser.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestExpressionParser.java index 5a1e37a86d42..974c4a5e29f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestExpressionParser.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestExpressionParser.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestExpressionParser { @ClassRule @@ -317,9 +317,9 @@ public void testNegativeCases() throws Exception { @Test public void testNonAsciiCases() throws Exception { - ExpressionNode node = parser.parse(CellVisibility.quote("\u0027") + "&" - + CellVisibility.quote("\u002b") + "|" + CellVisibility.quote("\u002d") + "&" - + CellVisibility.quote("\u003f")); + ExpressionNode node = + parser.parse(CellVisibility.quote("\u0027") + "&" + CellVisibility.quote("\u002b") + "|" + + CellVisibility.quote("\u002d") + "&" + CellVisibility.quote("\u003f")); assertTrue(node instanceof NonLeafExpressionNode); NonLeafExpressionNode nlNode = (NonLeafExpressionNode) node; assertEquals(Operator.AND, nlNode.getOperator()); @@ -372,7 +372,7 @@ public void testCasesSeperatedByDoubleQuotes() throws Exception { assertEquals(Operator.OR, nlNode.getOperator()); assertEquals(2, nlNode.getChildExps().size()); assertEquals("\u002b" + "&" + "\u003f", - ((LeafExpressionNode) nlNode.getChildExps().get(1)).getIdentifier()); + ((LeafExpressionNode) nlNode.getChildExps().get(1)).getIdentifier()); assertTrue(nlNode.getChildExps().get(0) instanceof NonLeafExpressionNode); nlNode = (NonLeafExpressionNode) nlNode.getChildExps().get(0); assertEquals(Operator.AND, nlNode.getOperator()); @@ -397,12 +397,12 @@ public void testCasesSeperatedByDoubleQuotes() throws Exception { assertEquals(Operator.OR, nlNode.getOperator()); assertEquals(2, nlNode.getChildExps().size()); assertEquals("\u0027" + "&" + "\\", - ((LeafExpressionNode) nlNode.getChildExps().get(0)).getIdentifier()); + ((LeafExpressionNode) nlNode.getChildExps().get(0)).getIdentifier()); assertEquals("\u003f" + "&" + "\\", - ((LeafExpressionNode) nlNode.getChildExps().get(1)).getIdentifier()); + ((LeafExpressionNode) nlNode.getChildExps().get(1)).getIdentifier()); try { - node = parser.parse(CellVisibility.quote("\u002b&\\") + "|" + CellVisibility.quote("\u0027&\\") + "&" - + "\"$$"); + node = parser.parse( + CellVisibility.quote("\u002b&\\") + "|" + CellVisibility.quote("\u0027&\\") + "&" + "\"$$"); fail("Excpetion must be thrown as there is no end quote"); } catch (ParseException e) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java index 06d79f868e7f..08bc3670b2c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,8 +64,8 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestVisibilityLabelReplicationWithExpAsString.class); - private static final Logger LOG = LoggerFactory - .getLogger(TestVisibilityLabelReplicationWithExpAsString.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestVisibilityLabelReplicationWithExpAsString.class); @Override @Before @@ -79,8 +79,8 @@ public void setup() throws Exception { expectedVisString[1] = "(\"private\"&\"public\")|(\"private\"&\"topsecret\")|" + "(\"confidential\"&\"public\")|(\"confidential\"&\"topsecret\")"; expectedVisString[2] = "(!\"topsecret\"&\"secret\")|(!\"topsecret\"&\"confidential\")"; - expectedVisString[3] = "(\"secret\"&\"" + COPYRIGHT + "\\\"" + ACCENT + "\\\\" + SECRET - + "\\\"" + "\u0027&\\\\" + "\")"; + expectedVisString[3] = "(\"secret\"&\"" + COPYRIGHT + "\\\"" + ACCENT + "\\\\" + SECRET + "\\\"" + + "\u0027&\\\\" + "\")"; // setup configuration conf = HBaseConfiguration.create(); conf.setInt("hfile.format.version", 3); @@ -98,19 +98,18 @@ public void setup() throws Exception { conf.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName()); VisibilityTestUtil.enableVisiblityLabels(conf); conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, - VisibilityReplication.class.getName()); - conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - SimpleCP.class.getName()); + VisibilityReplication.class.getName()); + conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, SimpleCP.class.getName()); // Have to reset conf1 in case zk cluster location different // than default conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, - ScanLabelGenerator.class); + ScanLabelGenerator.class); conf.set("hbase.superuser", "admin"); conf.set("hbase.superuser", User.getCurrent().getShortName()); SUPERUSER = User.createUserForTesting(conf, User.getCurrent().getShortName(), - new String[] { "supergroup" }); - User.createUserForTesting(conf, - User.getCurrent().getShortName(), new String[] { "supergroup" }); + new String[] { "supergroup" }); + User.createUserForTesting(conf, User.getCurrent().getShortName(), + new String[] { "supergroup" }); USER1 = User.createUserForTesting(conf, "user1", new String[] {}); TEST_UTIL = new HBaseTestingUtil(conf); TEST_UTIL.startMiniZKCluster(); @@ -125,7 +124,7 @@ public void setup() throws Exception { conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false); conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName()); conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - TestCoprocessorForTagsAtSink.class.getName()); + TestCoprocessorForTagsAtSink.class.getName()); setVisibilityLabelServiceImpl(conf1, ExpAsStringVisibilityLabelServiceImpl.class); TEST_UTIL1 = new HBaseTestingUtil(conf1); TEST_UTIL1.setZkCluster(miniZK); @@ -137,18 +136,17 @@ public void setup() throws Exception { TEST_UTIL1.startMiniCluster(1); admin = TEST_UTIL.getAdmin(); - ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(TEST_UTIL1.getClusterKey()) - .build(); + ReplicationPeerConfig rpc = + ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL1.getClusterKey()).build(); admin.addReplicationPeer("2", rpc); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(fam).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build(); + TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(fam).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build(); try (Admin hBaseAdmin = TEST_UTIL.getAdmin()) { hBaseAdmin.createTable(tableDescriptor); } - try (Admin hBaseAdmin1 = TEST_UTIL1.getAdmin()){ + try (Admin hBaseAdmin1 = TEST_UTIL1.getAdmin()) { hBaseAdmin1.createTable(tableDescriptor); } addLabels(); @@ -157,20 +155,19 @@ public void setup() throws Exception { } protected static void setVisibilityLabelServiceImpl(Configuration conf, Class clazz) { - conf.setClass(VisibilityLabelServiceManager.VISIBILITY_LABEL_SERVICE_CLASS, - clazz, VisibilityLabelService.class); + conf.setClass(VisibilityLabelServiceManager.VISIBILITY_LABEL_SERVICE_CLASS, clazz, + VisibilityLabelService.class); } @Override protected void verifyGet(final byte[] row, final String visString, final int expected, - final boolean nullExpected, final String... auths) throws IOException, - InterruptedException { + final boolean nullExpected, final String... auths) throws IOException, InterruptedException { PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf1); - Table table2 = connection.getTable(TABLE_NAME)) { + Table table2 = connection.getTable(TABLE_NAME)) { CellScanner cellScanner; Cell current; Get get = new Get(row); @@ -186,8 +183,8 @@ public Void run() throws Exception { assertArrayEquals(CellUtil.cloneRow(current), row); assertEquals(expected, TestCoprocessorForTagsAtSink.tags.size()); boolean foundNonVisTag = false; - for(Tag t : TestCoprocessorForTagsAtSink.tags) { - if(t.getType() == NON_VIS_TAG_TYPE) { + for (Tag t : TestCoprocessorForTagsAtSink.tags) { + if (t.getType() == NON_VIS_TAG_TYPE) { assertEquals(TEMP, Bytes.toString(Tag.cloneValue(t))); foundNonVisTag = true; break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java index 3fcf7d53fbb9..fb9eed81cb15 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -87,8 +87,8 @@ public abstract class TestVisibilityLabels { public static final String SECRET = "secret"; public static final String COPYRIGHT = "\u00A9ABC"; public static final String ACCENT = "\u0941"; - public static final String UNICODE_VIS_TAG = COPYRIGHT + "\"" + ACCENT + "\\" + SECRET + "\"" - + "\u0027&\\"; + public static final String UNICODE_VIS_TAG = + COPYRIGHT + "\"" + ACCENT + "\\" + SECRET + "\"" + "\u0027&\\"; public static final String UC1 = "\u0027\"\u002b"; public static final String UC2 = "\u002d\u003f"; public static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -118,8 +118,8 @@ public void tearDown() throws Exception { @Test public void testSecurityCapabilities() throws Exception { - List capabilities = TEST_UTIL.getConnection().getAdmin() - .getSecurityCapabilities(); + List capabilities = + TEST_UTIL.getConnection().getAdmin().getSecurityCapabilities(); assertTrue("CELL_VISIBILITY capability is missing", capabilities.contains(SecurityCapability.CELL_VISIBILITY)); } @@ -128,7 +128,7 @@ public void testSecurityCapabilities() throws Exception { public void testSimpleVisibilityLabels() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); try (Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "|" + CONFIDENTIAL, - PRIVATE + "|" + CONFIDENTIAL)) { + PRIVATE + "|" + CONFIDENTIAL)) { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL, PRIVATE)); ResultScanner scanner = table.getScanner(s); @@ -138,13 +138,13 @@ public void testSimpleVisibilityLabels() throws Exception { CellScanner cellScanner = next[0].cellScanner(); cellScanner.advance(); Cell current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row1, 0, row1.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row1, 0, row1.length)); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row2, 0, row2.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row2, 0, row2.length)); } } @@ -152,30 +152,30 @@ public void testSimpleVisibilityLabels() throws Exception { public void testSimpleVisibilityLabelsWithUniCodeCharacters() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); try (Table table = createTableAndWriteDataWithLabels(tableName, - SECRET + "|" + CellVisibility.quote(COPYRIGHT), "(" + CellVisibility.quote(COPYRIGHT) - + "&" + CellVisibility.quote(ACCENT) + ")|" + CONFIDENTIAL, - CellVisibility.quote(UNICODE_VIS_TAG) + "&" + SECRET)) { + SECRET + "|" + CellVisibility.quote(COPYRIGHT), "(" + CellVisibility.quote(COPYRIGHT) + "&" + + CellVisibility.quote(ACCENT) + ")|" + CONFIDENTIAL, + CellVisibility.quote(UNICODE_VIS_TAG) + "&" + SECRET)) { Scan s = new Scan(); - s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL, PRIVATE, COPYRIGHT, ACCENT, - UNICODE_VIS_TAG)); + s.setAuthorizations( + new Authorizations(SECRET, CONFIDENTIAL, PRIVATE, COPYRIGHT, ACCENT, UNICODE_VIS_TAG)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); assertTrue(next.length == 3); CellScanner cellScanner = next[0].cellScanner(); cellScanner.advance(); Cell current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row1, 0, row1.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row1, 0, row1.length)); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row2, 0, row2.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row2, 0, row2.length)); cellScanner = next[2].cellScanner(); cellScanner.advance(); current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row3, 0, row3.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row3, 0, row3.length)); } } @@ -183,39 +183,39 @@ public void testSimpleVisibilityLabelsWithUniCodeCharacters() throws Exception { public void testAuthorizationsWithSpecialUnicodeCharacters() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); try (Table table = createTableAndWriteDataWithLabels(tableName, - CellVisibility.quote(UC1) + "|" + CellVisibility.quote(UC2), CellVisibility.quote(UC1), - CellVisibility.quote(UNICODE_VIS_TAG))) { + CellVisibility.quote(UC1) + "|" + CellVisibility.quote(UC2), CellVisibility.quote(UC1), + CellVisibility.quote(UNICODE_VIS_TAG))) { Scan s = new Scan(); - s.setAuthorizations(new Authorizations(UC1, UC2, ACCENT, - UNICODE_VIS_TAG)); + s.setAuthorizations(new Authorizations(UC1, UC2, ACCENT, UNICODE_VIS_TAG)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); assertTrue(next.length == 3); CellScanner cellScanner = next[0].cellScanner(); cellScanner.advance(); Cell current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row1, 0, row1.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row1, 0, row1.length)); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row2, 0, row2.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row2, 0, row2.length)); cellScanner = next[2].cellScanner(); cellScanner.advance(); current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row3, 0, row3.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row3, 0, row3.length)); } } @Test public void testVisibilityLabelsWithComplexLabels() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - try (Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" - + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET, "(" + PRIVATE + "&" + CONFIDENTIAL + "&" - + SECRET + ")", "(" + PRIVATE + "&" + CONFIDENTIAL + "&" + SECRET + ")", "(" + PRIVATE - + "&" + CONFIDENTIAL + "&" + SECRET + ")")) { + try (Table table = createTableAndWriteDataWithLabels(tableName, + "(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET, + "(" + PRIVATE + "&" + CONFIDENTIAL + "&" + SECRET + ")", + "(" + PRIVATE + "&" + CONFIDENTIAL + "&" + SECRET + ")", + "(" + PRIVATE + "&" + CONFIDENTIAL + "&" + SECRET + ")")) { Scan s = new Scan(); s.setAuthorizations(new Authorizations(TOPSECRET, CONFIDENTIAL, PRIVATE, PUBLIC, SECRET)); ResultScanner scanner = table.getScanner(s); @@ -224,18 +224,18 @@ public void testVisibilityLabelsWithComplexLabels() throws Exception { CellScanner cellScanner = next[0].cellScanner(); cellScanner.advance(); Cell current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row2, 0, row2.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row2, 0, row2.length)); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row3, 0, row3.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row3, 0, row3.length)); cellScanner = next[2].cellScanner(); cellScanner.advance(); current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row4, 0, row4.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row4, 0, row4.length)); } } @@ -243,7 +243,7 @@ public void testVisibilityLabelsWithComplexLabels() throws Exception { public void testVisibilityLabelsThatDoesNotPassTheCriteria() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); try (Table table = createTableAndWriteDataWithLabels(tableName, - "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE)){ + "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE)) { Scan s = new Scan(); s.setAuthorizations(new Authorizations(PUBLIC)); ResultScanner scanner = table.getScanner(s); @@ -265,8 +265,8 @@ public void testVisibilityLabelsInPutsThatDoesNotMatchAnyDefinedLabels() throws @Test public void testVisibilityLabelsInScanThatDoesNotMatchAnyDefinedLabels() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - try ( Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" - + CONFIDENTIAL + ")", PRIVATE)){ + try (Table table = createTableAndWriteDataWithLabels(tableName, + "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE)) { Scan s = new Scan(); s.setAuthorizations(new Authorizations("SAMPLE")); ResultScanner scanner = table.getScanner(s); @@ -278,22 +278,22 @@ public void testVisibilityLabelsInScanThatDoesNotMatchAnyDefinedLabels() throws @Test public void testVisibilityLabelsWithGet() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - try (Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL - + "&!" + PRIVATE, SECRET + "&" + CONFIDENTIAL + "&" + PRIVATE)) { + try (Table table = createTableAndWriteDataWithLabels(tableName, + SECRET + "&" + CONFIDENTIAL + "&!" + PRIVATE, SECRET + "&" + CONFIDENTIAL + "&" + PRIVATE)) { Get get = new Get(row1); get.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); Result result = table.get(get); assertTrue(!result.isEmpty()); Cell cell = result.getColumnLatestCell(fam, qual); assertTrue(Bytes.equals(value, 0, value.length, cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + cell.getValueLength())); } } @Test public void testVisibilityLabelsOnKillingOfRSContainingLabelsTable() throws Exception { - List regionServerThreads = TEST_UTIL.getHBaseCluster() - .getRegionServerThreads(); + List regionServerThreads = + TEST_UTIL.getHBaseCluster().getRegionServerThreads(); int liveRS = 0; for (RegionServerThread rsThreads : regionServerThreads) { if (!rsThreads.getRegionServer().isAborted()) { @@ -306,11 +306,10 @@ public void testVisibilityLabelsOnKillingOfRSContainingLabelsTable() throws Exce Thread t1 = new Thread() { @Override public void run() { - List regionServerThreads = TEST_UTIL.getHBaseCluster() - .getRegionServerThreads(); + List regionServerThreads = + TEST_UTIL.getHBaseCluster().getRegionServerThreads(); for (RegionServerThread rsThread : regionServerThreads) { - List onlineRegions = rsThread.getRegionServer().getRegions( - LABELS_TABLE_NAME); + List onlineRegions = rsThread.getRegionServer().getRegions(LABELS_TABLE_NAME); if (onlineRegions.size() > 0) { rsThread.getRegionServer().abort("Aborting "); killedRS = true; @@ -330,7 +329,7 @@ public void run() { Thread.sleep(1); } createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL + ")", - PRIVATE); + PRIVATE); } catch (Exception e) { } } @@ -344,8 +343,7 @@ public void run() { for (RegionServerThread rsThread : regionServerThreads) { while (true) { if (!rsThread.getRegionServer().isAborted()) { - List onlineRegions = rsThread.getRegionServer().getRegions( - LABELS_TABLE_NAME); + List onlineRegions = rsThread.getRegionServer().getRegions(LABELS_TABLE_NAME); if (onlineRegions.size() > 0) { break; } else { @@ -370,16 +368,16 @@ public void run() { @Test public void testVisibilityLabelsOnRSRestart() throws Exception { final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - List regionServerThreads = TEST_UTIL.getHBaseCluster() - .getRegionServerThreads(); + List regionServerThreads = + TEST_UTIL.getHBaseCluster().getRegionServerThreads(); for (RegionServerThread rsThread : regionServerThreads) { rsThread.getRegionServer().abort("Aborting "); } // Start one new RS RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer(); waitForLabelsRegionAvailability(rs.getRegionServer()); - try (Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL - + ")", PRIVATE)) { + try (Table table = createTableAndWriteDataWithLabels(tableName, + "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE)) { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET)); ResultScanner scanner = table.getScanner(s); @@ -406,8 +404,8 @@ protected void waitForLabelsRegionAvailability(HRegionServer regionServer) { @Test public void testVisibilityLabelsInGetThatDoesNotMatchAnyDefinedLabels() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - try (Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL - + ")", PRIVATE)) { + try (Table table = createTableAndWriteDataWithLabels(tableName, + "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE)) { Get get = new Get(row1); get.setAuthorizations(new Authorizations("SAMPLE")); Result result = table.get(get); @@ -502,7 +500,7 @@ protected List extractAuths(String user, List results) { Cell userAuthCell = result.getColumnLatestCell(LABELS_TABLE_FAMILY, Bytes.toBytes(user)); if (userAuthCell != null) { auths.add(Bytes.toString(labelCell.getValueArray(), labelCell.getValueOffset(), - labelCell.getValueLength())); + labelCell.getValueLength())); } } return auths; @@ -533,14 +531,13 @@ public Void run() throws Exception { assertEquals(3, resultList.size()); assertTrue(resultList.get(0).getException().getValue().isEmpty()); assertEquals("org.apache.hadoop.hbase.DoNotRetryIOException", - resultList.get(1).getException().getName()); + resultList.get(1).getException().getName()); assertTrue(Bytes.toString(resultList.get(1).getException().getValue().toByteArray()) - .contains( - "org.apache.hadoop.hbase.security.visibility.InvalidLabelException: " - + "Label 'public' is not set for the user testUser")); + .contains("org.apache.hadoop.hbase.security.visibility.InvalidLabelException: " + + "Label 'public' is not set for the user testUser")); assertTrue(resultList.get(2).getException().getValue().isEmpty()); try (Connection connection = ConnectionFactory.createConnection(conf); - Table ht = connection.getTable(LABELS_TABLE_NAME)) { + Table ht = connection.getTable(LABELS_TABLE_NAME)) { ResultScanner scanner = ht.getScanner(new Scan()); Result result = null; List results = new ArrayList<>(); @@ -665,7 +662,7 @@ public void testUserShouldNotDoDDLOpOnLabelsTable() throws Exception { } try { ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("testFamily")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("testFamily")).build(); admin.addColumnFamily(LABELS_TABLE_NAME, columnFamilyDescriptor); fail("Lables table should not get altered by user."); } catch (Exception e) { @@ -677,20 +674,20 @@ public void testUserShouldNotDoDDLOpOnLabelsTable() throws Exception { } try { ColumnFamilyDescriptor familyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(VisibilityConstants.LABELS_TABLE_FAMILY) - .setBloomFilterType(BloomType.ROWCOL).build(); + ColumnFamilyDescriptorBuilder.newBuilder(VisibilityConstants.LABELS_TABLE_FAMILY) + .setBloomFilterType(BloomType.ROWCOL).build(); admin.modifyColumnFamily(LABELS_TABLE_NAME, familyDescriptor); fail("Lables table should not get altered by user."); } catch (Exception e) { } try { TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(LABELS_TABLE_NAME); + TableDescriptorBuilder.newBuilder(LABELS_TABLE_NAME); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); - columnFamilyDescriptor = ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("f2")).build(); + columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f2")).build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); admin.modifyTable(tableDescriptorBuilder.build()); fail("Lables table should not get altered by user."); @@ -709,9 +706,9 @@ public void testMultipleVersions() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); // Default max versions is 1. TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2).setMaxVersions(5).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2).setMaxVersions(5).build()) + .build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Put put = new Put(r1); @@ -762,30 +759,30 @@ public void testMultipleVersions() throws Exception { Cell cell = result.getColumnLatestCell(fam2, qual); assertNotNull(cell); assertTrue(Bytes.equals(v1, 0, v1.length, cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + cell.getValueLength())); cell = result.getColumnLatestCell(fam2, qual2); assertNotNull(cell); assertTrue(Bytes.equals(v1, 0, v1.length, cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + cell.getValueLength())); result = scanner.next(); assertTrue(Bytes.equals(r2, result.getRow())); cell = result.getColumnLatestCell(fam, qual); assertNotNull(cell); assertTrue(Bytes.equals(v2, 0, v2.length, cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + cell.getValueLength())); cell = result.getColumnLatestCell(fam, qual2); assertNotNull(cell); assertTrue(Bytes.equals(v2, 0, v2.length, cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + cell.getValueLength())); cell = result.getColumnLatestCell(fam2, qual); assertNotNull(cell); assertTrue(Bytes.equals(v2, 0, v2.length, cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + cell.getValueLength())); cell = result.getColumnLatestCell(fam2, qual2); assertNotNull(cell); assertTrue(Bytes.equals(v2, 0, v2.length, cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + cell.getValueLength())); } } @@ -794,9 +791,9 @@ public void testMutateRow() throws Exception { final byte[] qual2 = Bytes.toBytes("qual2"); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); - try (Table table = TEST_UTIL.getConnection().getTable(tableName)){ + try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Put p1 = new Put(row1); p1.addColumn(fam, qual, value); p1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); @@ -829,7 +826,7 @@ public void testFlushedFileWithVisibilityTags() throws Exception { final byte[] qual2 = Bytes.toBytes("qual2"); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Put p1 = new Put(row1); @@ -860,7 +857,7 @@ static Table createTableAndWriteDataWithLabels(TableName tableName, String... la throws Exception { List puts = new ArrayList<>(labelExps.length); for (int i = 0; i < labelExps.length; i++) { - Put put = new Put(Bytes.toBytes("row" + (i+1))); + Put put = new Put(Bytes.toBytes("row" + (i + 1))); put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value); put.setCellVisibility(new CellVisibility(labelExps[i])); puts.add(put); @@ -873,18 +870,18 @@ static Table createTableAndWriteDataWithLabels(TableName tableName, String... la public static void addLabels() throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE, COPYRIGHT, ACCENT, - UNICODE_VIS_TAG, UC1, UC2 }; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels); - } catch (Throwable t) { - throw new IOException(t); - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE, COPYRIGHT, ACCENT, + UNICODE_VIS_TAG, UC1, UC2 }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels); + } catch (Throwable t) { + throw new IOException(t); + } + return null; + } + }; SUPERUSER.runAs(action); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java index 64905308019c..fa2f585fc229 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category({ SecurityTests.class, MediumTests.class }) public class TestVisibilityLabelsOnNewVersionBehaviorTable extends VisibilityLabelsWithDeletesTestBase { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java index 02393d7cbdeb..4a95a388f535 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestVisibilityLabelsOpWithDifferentUsersNoACL { @ClassRule @@ -72,7 +72,7 @@ public static void setupBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); VisibilityTestUtil.enableVisiblityLabels(conf); String currentUser = User.getCurrent().getName(); - conf.set("hbase.superuser", "admin,"+currentUser); + conf.set("hbase.superuser", "admin," + currentUser); TEST_UTIL.startMiniCluster(2); // Wait for the labels table to become available @@ -92,20 +92,21 @@ public static void tearDownAfterClass() throws Exception { public void testLabelsTableOpsWithDifferentUsers() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.setAuths(conn, new String[] { CONFIDENTIAL, PRIVATE }, "user1"); - } catch (Throwable e) { - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + return VisibilityClient.setAuths(conn, new String[] { CONFIDENTIAL, PRIVATE }, + "user1"); + } catch (Throwable e) { + } + return null; + } + }; VisibilityLabelsResponse response = SUPERUSER.runAs(action); assertTrue(response.getResult(0).getException().getValue().isEmpty()); assertTrue(response.getResult(1).getException().getValue().isEmpty()); - // Ideally this should not be allowed. this operation should fail or do nothing. + // Ideally this should not be allowed. this operation should fail or do nothing. action = new PrivilegedExceptionAction() { @Override public VisibilityLabelsResponse run() throws Exception { @@ -117,22 +118,22 @@ public VisibilityLabelsResponse run() throws Exception { } }; response = NORMAL_USER1.runAs(action); - assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", response - .getResult(0).getException().getName()); - assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", response - .getResult(1).getException().getName()); + assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", + response.getResult(0).getException().getName()); + assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", + response.getResult(1).getException().getName()); PrivilegedExceptionAction action1 = new PrivilegedExceptionAction() { - @Override - public GetAuthsResponse run() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.getAuths(conn, "user1"); - } catch (Throwable e) { - } - return null; - } - }; + @Override + public GetAuthsResponse run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + return VisibilityClient.getAuths(conn, "user1"); + } catch (Throwable e) { + } + return null; + } + }; GetAuthsResponse authsResponse = NORMAL_USER.runAs(action1); assertTrue(authsResponse.getAuthList().isEmpty()); authsResponse = NORMAL_USER1.runAs(action1); @@ -148,21 +149,21 @@ public GetAuthsResponse run() throws Exception { PrivilegedExceptionAction action2 = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.clearAuths(conn, new String[] { - CONFIDENTIAL, PRIVATE }, "user1"); - } catch (Throwable e) { - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + return VisibilityClient.clearAuths(conn, new String[] { CONFIDENTIAL, PRIVATE }, + "user1"); + } catch (Throwable e) { + } + return null; + } + }; response = NORMAL_USER1.runAs(action2); - assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", response - .getResult(0).getException().getName()); - assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", response - .getResult(1).getException().getName()); + assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", + response.getResult(0).getException().getName()); + assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", + response.getResult(1).getException().getName()); response = SUPERUSER.runAs(action2); assertTrue(response.getResult(0).getException().getValue().isEmpty()); assertTrue(response.getResult(1).getException().getValue().isEmpty()); @@ -173,17 +174,17 @@ public VisibilityLabelsResponse run() throws Exception { private static void addLabels() throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { SECRET, CONFIDENTIAL, PRIVATE }; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels); - } catch (Throwable t) { - throw new IOException(t); - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { SECRET, CONFIDENTIAL, PRIVATE }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels); + } catch (Throwable t) { + throw new IOException(t); + } + return null; + } + }; SUPERUSER.runAs(action); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index 4b31f836c55e..0e69801b8527 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -105,8 +105,8 @@ public class TestVisibilityLabelsReplication { public static final String COPYRIGHT = "\u00A9ABC"; public static final String ACCENT = "\u0941"; public static final String SECRET = "secret"; - public static final String UNICODE_VIS_TAG = COPYRIGHT + "\"" + ACCENT + "\\" + SECRET + "\"" - + "\u0027&\\"; + public static final String UNICODE_VIS_TAG = + COPYRIGHT + "\"" + ACCENT + "\\" + SECRET + "\"" + "\u0027&\\"; public static HBaseTestingUtil TEST_UTIL; public static HBaseTestingUtil TEST_UTIL1; public static final byte[] row1 = Bytes.toBytes("row1"); @@ -120,13 +120,12 @@ public class TestVisibilityLabelsReplication { protected static ZKWatcher zkw2; protected static int expected[] = { 4, 6, 4, 0, 3 }; private static final String NON_VISIBILITY = "non-visibility"; - protected static String[] expectedVisString = { - "(\"secret\"&\"topsecret\"&\"public\")|(\"topsecret\"&\"confidential\")", - "(\"public\"&\"private\")|(\"topsecret\"&\"private\")|" - + "(\"confidential\"&\"public\")|(\"topsecret\"&\"confidential\")", - "(!\"topsecret\"&\"secret\")|(!\"topsecret\"&\"confidential\")", - "(\"secret\"&\"" + COPYRIGHT + "\\\"" + ACCENT + "\\\\" + SECRET + "\\\"" + "\u0027&\\\\" - + "\")" }; + protected static String[] expectedVisString = + { "(\"secret\"&\"topsecret\"&\"public\")|(\"topsecret\"&\"confidential\")", + "(\"public\"&\"private\")|(\"topsecret\"&\"private\")|" + + "(\"confidential\"&\"public\")|(\"topsecret\"&\"confidential\")", + "(!\"topsecret\"&\"secret\")|(!\"topsecret\"&\"confidential\")", "(\"secret\"&\"" + + COPYRIGHT + "\\\"" + ACCENT + "\\\\" + SECRET + "\\\"" + "\u0027&\\\\" + "\")" }; @Rule public final TestName TEST_NAME = new TestName(); @@ -151,16 +150,15 @@ public void setup() throws Exception { conf.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName()); VisibilityTestUtil.enableVisiblityLabels(conf); conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, - VisibilityReplication.class.getName()); - conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - SimpleCP.class.getName()); + VisibilityReplication.class.getName()); + conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, SimpleCP.class.getName()); // Have to reset conf1 in case zk cluster location different // than default conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, - ScanLabelGenerator.class); + ScanLabelGenerator.class); conf.set("hbase.superuser", User.getCurrent().getShortName()); SUPERUSER = User.createUserForTesting(conf, User.getCurrent().getShortName(), - new String[] { "supergroup" }); + new String[] { "supergroup" }); // User.createUserForTesting(conf, User.getCurrent().getShortName(), new // String[] { "supergroup" }); USER1 = User.createUserForTesting(conf, "user1", new String[] {}); @@ -177,7 +175,7 @@ public void setup() throws Exception { conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false); conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName()); conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - TestCoprocessorForTagsAtSink.class.getName()); + TestCoprocessorForTagsAtSink.class.getName()); // setVisibilityLabelServiceImpl(conf1); USER1 = User.createUserForTesting(conf1, "user1", new String[] {}); TEST_UTIL1 = new HBaseTestingUtil(conf1); @@ -190,14 +188,14 @@ public void setup() throws Exception { TEST_UTIL1.startMiniCluster(1); admin = TEST_UTIL.getAdmin(); - ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() - .setClusterKey(TEST_UTIL1.getClusterKey()).build(); + ReplicationPeerConfig rpc = + ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL1.getClusterKey()).build(); admin.addReplicationPeer("2", rpc); Admin hBaseAdmin = TEST_UTIL.getAdmin(); TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(fam).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build(); + TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder + .newBuilder(fam).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build(); try { hBaseAdmin.createTable(tableDescriptor); } finally { @@ -220,19 +218,20 @@ public void setup() throws Exception { protected static void setVisibilityLabelServiceImpl(Configuration conf) { conf.setClass(VisibilityLabelServiceManager.VISIBILITY_LABEL_SERVICE_CLASS, - DefaultVisibilityLabelServiceImpl.class, VisibilityLabelService.class); + DefaultVisibilityLabelServiceImpl.class, VisibilityLabelService.class); } @Test public void testVisibilityReplication() throws Exception { int retry = 0; - try (Table table = writeData(TABLE_NAME, "(" + SECRET + "&" + PUBLIC + ")" + "|(" + CONFIDENTIAL - + ")&(" + TOPSECRET + ")", "(" + PRIVATE + "|" + CONFIDENTIAL + ")&(" + PUBLIC + "|" - + TOPSECRET + ")", "(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET, - CellVisibility.quote(UNICODE_VIS_TAG) + "&" + SECRET)) { + try (Table table = writeData(TABLE_NAME, + "(" + SECRET + "&" + PUBLIC + ")" + "|(" + CONFIDENTIAL + ")&(" + TOPSECRET + ")", + "(" + PRIVATE + "|" + CONFIDENTIAL + ")&(" + PUBLIC + "|" + TOPSECRET + ")", + "(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET, + CellVisibility.quote(UNICODE_VIS_TAG) + "&" + SECRET)) { Scan s = new Scan(); - s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL, PRIVATE, TOPSECRET, - UNICODE_VIS_TAG)); + s.setAuthorizations( + new Authorizations(SECRET, CONFIDENTIAL, PRIVATE, TOPSECRET, UNICODE_VIS_TAG)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(4); @@ -240,23 +239,23 @@ public void testVisibilityReplication() throws Exception { CellScanner cellScanner = next[0].cellScanner(); cellScanner.advance(); Cell current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row1, 0, row1.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row1, 0, row1.length)); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row2, 0, row2.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row2, 0, row2.length)); cellScanner = next[2].cellScanner(); cellScanner.advance(); current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row3, 0, row3.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row3, 0, row3.length)); cellScanner = next[3].cellScanner(); cellScanner.advance(); current = cellScanner.current(); - assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), row4, 0, row4.length)); + assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), + row4, 0, row4.length)); try (Table table2 = TEST_UTIL1.getConnection().getTable(TABLE_NAME)) { s = new Scan(); // Ensure both rows are replicated @@ -290,7 +289,7 @@ protected static void doAssert(byte[] row, String visTag) throws Exception { boolean tagFound = false; for (Cell cell : cells) { if ((Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, 0, - row.length))) { + row.length))) { List tags = PrivateCellUtil.getTags(cell); for (Tag tag : tags) { if (tag.getType() == TagType.STRING_VIS_TAG_TYPE) { @@ -305,13 +304,12 @@ protected static void doAssert(byte[] row, String visTag) throws Exception { } protected void verifyGet(final byte[] row, final String visString, final int expected, - final boolean nullExpected, final String... auths) throws IOException, - InterruptedException { + final boolean nullExpected, final String... auths) throws IOException, InterruptedException { PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf1); - Table table2 = connection.getTable(TABLE_NAME)) { + Table table2 = connection.getTable(TABLE_NAME)) { CellScanner cellScanner; Cell current; Get get = new Get(row); @@ -354,33 +352,34 @@ public Void run() throws Exception { public static void addLabels() throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE, UNICODE_VIS_TAG }; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels); - } catch (Throwable t) { - throw new IOException(t); - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE, UNICODE_VIS_TAG }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels); + } catch (Throwable t) { + throw new IOException(t); + } + return null; + } + }; SUPERUSER.runAs(action); } public static void setAuths(final Configuration conf) throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.setAuths(conn, new String[] { SECRET, - CONFIDENTIAL, PRIVATE, TOPSECRET, UNICODE_VIS_TAG }, "user1"); - } catch (Throwable e) { - throw new Exception(e); - } - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + return VisibilityClient.setAuths(conn, + new String[] { SECRET, CONFIDENTIAL, PRIVATE, TOPSECRET, UNICODE_VIS_TAG }, + "user1"); + } catch (Throwable e) { + throw new Exception(e); + } + } + }; VisibilityLabelsResponse response = SUPERUSER.runAs(action); } @@ -399,8 +398,9 @@ static Table writeData(TableName tableName, String... labelExps) throws Exceptio table.put(puts); return table; } + // A simple BaseRegionbserver impl that allows to add a non-visibility tag from the - // attributes of the Put mutation. The existing cells in the put mutation is overwritten + // attributes of the Put mutation. The existing cells in the put mutation is overwritten // with a new cell that has the visibility tags and the non visibility tag public static class SimpleCP implements RegionCoprocessor, RegionObserver { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java index ebecd8eaa7ca..d18d43719893 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestVisibilityLabelsWithACL { @ClassRule @@ -88,10 +88,10 @@ public static void setupBeforeClass() throws Exception { // setup configuration conf = TEST_UTIL.getConfiguration(); SecureTestUtil.enableSecurity(conf); - conf.set("hbase.coprocessor.master.classes", AccessController.class.getName() + "," - + VisibilityController.class.getName()); - conf.set("hbase.coprocessor.region.classes", AccessController.class.getName() + "," - + VisibilityController.class.getName()); + conf.set("hbase.coprocessor.master.classes", + AccessController.class.getName() + "," + VisibilityController.class.getName()); + conf.set("hbase.coprocessor.region.classes", + AccessController.class.getName() + "," + VisibilityController.class.getName()); TEST_UTIL.startMiniCluster(2); TEST_UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME.getName(), 50000); @@ -106,10 +106,10 @@ public static void setupBeforeClass() throws Exception { // Grant users EXEC privilege on the labels table. For the purposes of this // test, we want to insure that access is denied even with the ability to access // the endpoint. - SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER1.getShortName(), LABELS_TABLE_NAME, - null, null, Permission.Action.EXEC); - SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER2.getShortName(), LABELS_TABLE_NAME, - null, null, Permission.Action.EXEC); + SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER1.getShortName(), LABELS_TABLE_NAME, null, + null, Permission.Action.EXEC); + SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER2.getShortName(), LABELS_TABLE_NAME, null, + null, Permission.Action.EXEC); } @AfterClass @@ -123,17 +123,17 @@ public void testScanForUserWithFewerLabelAuthsThanLabelsInScanAuthorizations() t String user = "user2"; VisibilityClient.setAuths(TEST_UTIL.getConnection(), auths, user); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL - + "&!" + PRIVATE, SECRET + "&!" + PRIVATE); - SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER2.getShortName(), tableName, - null, null, Permission.Action.READ); + final Table table = createTableAndWriteDataWithLabels(tableName, + SECRET + "&" + CONFIDENTIAL + "&!" + PRIVATE, SECRET + "&!" + PRIVATE); + SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER2.getShortName(), tableName, null, null, + Permission.Action.READ); PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); try (Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(table.getName())) { + Table t = connection.getTable(table.getName())) { ResultScanner scanner = t.getScanner(s); Result result = scanner.next(); assertTrue(!result.isEmpty()); @@ -155,15 +155,15 @@ public void testScanForSuperUserWithFewerLabelAuths() throws Throwable { VisibilityClient.setAuths(conn, auths, user); } TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL - + "&!" + PRIVATE, SECRET + "&!" + PRIVATE); + final Table table = createTableAndWriteDataWithLabels(tableName, + SECRET + "&" + CONFIDENTIAL + "&!" + PRIVATE, SECRET + "&!" + PRIVATE); PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); try (Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(table.getName())) { + Table t = connection.getTable(table.getName())) { ResultScanner scanner = t.getScanner(s); Result[] result = scanner.next(5); assertTrue(result.length == 2); @@ -180,15 +180,15 @@ public void testGetForSuperUserWithFewerLabelAuths() throws Throwable { String user = "admin"; VisibilityClient.setAuths(TEST_UTIL.getConnection(), auths, user); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL - + "&!" + PRIVATE, SECRET + "&!" + PRIVATE); + final Table table = createTableAndWriteDataWithLabels(tableName, + SECRET + "&" + CONFIDENTIAL + "&!" + PRIVATE, SECRET + "&!" + PRIVATE); PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { Get g = new Get(row1); g.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); try (Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(table.getName())) { + Table t = connection.getTable(table.getName())) { Result result = t.get(g); assertTrue(!result.isEmpty()); } @@ -208,17 +208,17 @@ public void testVisibilityLabelsForUserWithNoAuths() throws Throwable { } TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); final Table table = createTableAndWriteDataWithLabels(tableName, SECRET); - SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER1.getShortName(), tableName, - null, null, Permission.Action.READ); - SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER2.getShortName(), tableName, - null, null, Permission.Action.READ); + SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER1.getShortName(), tableName, null, null, + Permission.Action.READ); + SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER2.getShortName(), tableName, null, null, + Permission.Action.READ); PrivilegedExceptionAction getAction = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { Get g = new Get(row1); g.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); try (Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(table.getName())) { + Table t = connection.getTable(table.getName())) { Result result = t.get(g); assertTrue(result.isEmpty()); } @@ -232,20 +232,20 @@ public Void run() throws Exception { public void testLabelsTableOpsWithDifferentUsers() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.addLabels(conn, new String[] { "l1", "l2" }); - } catch (Throwable e) { - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + return VisibilityClient.addLabels(conn, new String[] { "l1", "l2" }); + } catch (Throwable e) { + } + return null; + } + }; VisibilityLabelsResponse response = NORMAL_USER1.runAs(action); - assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", response - .getResult(0).getException().getName()); - assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", response - .getResult(1).getException().getName()); + assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", + response.getResult(0).getException().getName()); + assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", + response.getResult(1).getException().getName()); action = new PrivilegedExceptionAction() { @Override @@ -258,10 +258,10 @@ public VisibilityLabelsResponse run() throws Exception { } }; response = NORMAL_USER1.runAs(action); - assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", response - .getResult(0).getException().getName()); - assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", response - .getResult(1).getException().getName()); + assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", + response.getResult(0).getException().getName()); + assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", + response.getResult(1).getException().getName()); action = new PrivilegedExceptionAction() { @Override @@ -281,21 +281,20 @@ public VisibilityLabelsResponse run() throws Exception { @Override public VisibilityLabelsResponse run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.clearAuths(conn, new String[] { - CONFIDENTIAL, PRIVATE }, "user1"); + return VisibilityClient.clearAuths(conn, new String[] { CONFIDENTIAL, PRIVATE }, "user1"); } catch (Throwable e) { } return null; } }; response = NORMAL_USER1.runAs(action); - assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", response.getResult(0) - .getException().getName()); - assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", response.getResult(1) - .getException().getName()); + assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", + response.getResult(0).getException().getName()); + assertEquals("org.apache.hadoop.hbase.security.AccessDeniedException", + response.getResult(1).getException().getName()); - response = VisibilityClient.clearAuths(TEST_UTIL.getConnection(), new String[] { CONFIDENTIAL, - PRIVATE }, "user1"); + response = VisibilityClient.clearAuths(TEST_UTIL.getConnection(), + new String[] { CONFIDENTIAL, PRIVATE }, "user1"); assertTrue(response.getResult(0).getException().getValue().isEmpty()); assertTrue(response.getResult(1).getException().getValue().isEmpty()); @@ -303,15 +302,15 @@ public VisibilityLabelsResponse run() throws Exception { "user3"); PrivilegedExceptionAction action1 = new PrivilegedExceptionAction() { - @Override - public GetAuthsResponse run() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.getAuths(conn, "user3"); - } catch (Throwable e) { - } - return null; - } - }; + @Override + public GetAuthsResponse run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + return VisibilityClient.getAuths(conn, "user3"); + } catch (Throwable e) { + } + return null; + } + }; GetAuthsResponse authsResponse = NORMAL_USER1.runAs(action1); assertNull(authsResponse); authsResponse = SUPERUSER.runAs(action1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java index 7233f9c658b1..3608e6a4945c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestVisibilityLabelsWithCustomVisLabService extends TestVisibilityLabels { @ClassRule @@ -48,9 +48,9 @@ public static void setupBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); VisibilityTestUtil.enableVisiblityLabels(conf); conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, - ScanLabelGenerator.class); + ScanLabelGenerator.class); conf.setClass(VisibilityLabelServiceManager.VISIBILITY_LABEL_SERVICE_CLASS, - ExpAsStringVisibilityLabelServiceImpl.class, VisibilityLabelService.class); + ExpAsStringVisibilityLabelServiceImpl.class, VisibilityLabelService.class); conf.set("hbase.superuser", "admin"); TEST_UTIL.startMiniCluster(2); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); @@ -60,7 +60,8 @@ public static void setupBeforeClass() throws Exception { addLabels(); } - // Extending this test from super as we don't verify predefined labels in ExpAsStringVisibilityLabelServiceImpl + // Extending this test from super as we don't verify predefined labels in + // ExpAsStringVisibilityLabelServiceImpl @Override @Test public void testVisibilityLabelsInPutsThatDoesNotMatchAnyDefinedLabels() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java index 6e824b5d70f7..a0ae1dd23d8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,15 +57,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.ListLabelsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibilityLabels { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestVisibilityLabelsWithDefaultVisLabelService.class); - private static final Logger LOG = LoggerFactory.getLogger( - TestVisibilityLabelsWithDefaultVisLabelService.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestVisibilityLabelsWithDefaultVisLabelService.class); @BeforeClass public static void setupBeforeClass() throws Exception { @@ -73,7 +73,7 @@ public static void setupBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); VisibilityTestUtil.enableVisiblityLabels(conf); conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, - ScanLabelGenerator.class); + ScanLabelGenerator.class); conf.set("hbase.superuser", "admin"); TEST_UTIL.startMiniCluster(2); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); @@ -88,37 +88,37 @@ public static void setupBeforeClass() throws Exception { public void testAddLabels() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { "L1", SECRET, "L2", "invalid~", "L3" }; - VisibilityLabelsResponse response = null; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - response = VisibilityClient.addLabels(conn, labels); - } catch (Throwable e) { - throw new IOException(e); - } - List resultList = response.getResultList(); - assertEquals(5, resultList.size()); - assertTrue(resultList.get(0).getException().getValue().isEmpty()); - assertEquals("org.apache.hadoop.hbase.DoNotRetryIOException", resultList.get(1) - .getException().getName()); - assertTrue(Bytes.toString(resultList.get(1).getException().getValue().toByteArray()) - .contains( + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { "L1", SECRET, "L2", "invalid~", "L3" }; + VisibilityLabelsResponse response = null; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + response = VisibilityClient.addLabels(conn, labels); + } catch (Throwable e) { + throw new IOException(e); + } + List resultList = response.getResultList(); + assertEquals(5, resultList.size()); + assertTrue(resultList.get(0).getException().getValue().isEmpty()); + assertEquals("org.apache.hadoop.hbase.DoNotRetryIOException", + resultList.get(1).getException().getName()); + assertTrue( + Bytes.toString(resultList.get(1).getException().getValue().toByteArray()).contains( "org.apache.hadoop.hbase.security.visibility.LabelAlreadyExistsException: " + "Label 'secret' already exists")); - assertTrue(resultList.get(2).getException().getValue().isEmpty()); - assertTrue(resultList.get(3).getException().getValue().isEmpty()); - assertTrue(resultList.get(4).getException().getValue().isEmpty()); - return null; - } - }; + assertTrue(resultList.get(2).getException().getValue().isEmpty()); + assertTrue(resultList.get(3).getException().getValue().isEmpty()); + assertTrue(resultList.get(4).getException().getValue().isEmpty()); + return null; + } + }; SUPERUSER.runAs(action); } @Test public void testAddVisibilityLabelsOnRSRestart() throws Exception { - List regionServerThreads = TEST_UTIL.getHBaseCluster() - .getRegionServerThreads(); + List regionServerThreads = + TEST_UTIL.getHBaseCluster().getRegionServerThreads(); for (RegionServerThread rsThread : regionServerThreads) { rsThread.getRegionServer().abort("Aborting "); } @@ -129,30 +129,30 @@ public void testAddVisibilityLabelsOnRSRestart() throws Exception { do { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, "ABC", "XYZ" }; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityLabelsResponse resp = VisibilityClient.addLabels(conn, labels); - List results = resp.getResultList(); - if (results.get(0).hasException()) { - NameBytesPair pair = results.get(0).getException(); - Throwable t = ProtobufUtil.toException(pair); - LOG.debug("Got exception writing labels", t); - if (t instanceof VisibilityControllerNotReadyException) { - vcInitialized.set(false); - LOG.warn("VisibilityController was not yet initialized"); - Threads.sleep(10); - } else { - vcInitialized.set(true); + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, "ABC", "XYZ" }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityLabelsResponse resp = VisibilityClient.addLabels(conn, labels); + List results = resp.getResultList(); + if (results.get(0).hasException()) { + NameBytesPair pair = results.get(0).getException(); + Throwable t = ProtobufUtil.toException(pair); + LOG.debug("Got exception writing labels", t); + if (t instanceof VisibilityControllerNotReadyException) { + vcInitialized.set(false); + LOG.warn("VisibilityController was not yet initialized"); + Threads.sleep(10); + } else { + vcInitialized.set(true); + } + } else LOG.debug("new labels added: " + resp); + } catch (Throwable t) { + throw new IOException(t); } - } else LOG.debug("new labels added: " + resp); - } catch (Throwable t) { - throw new IOException(t); - } - return null; - } - }; + return null; + } + }; SUPERUSER.runAs(action); } while (!vcInitialized.get()); // Scan the visibility label @@ -161,7 +161,7 @@ public VisibilityLabelsResponse run() throws Exception { int i = 0; try (Table ht = TEST_UTIL.getConnection().getTable(LABELS_TABLE_NAME); - ResultScanner scanner = ht.getScanner(s)) { + ResultScanner scanner = ht.getScanner(s)) { while (true) { Result next = scanner.next(); if (next == null) { @@ -178,30 +178,30 @@ public VisibilityLabelsResponse run() throws Exception { public void testListLabels() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public ListLabelsResponse run() throws Exception { - ListLabelsResponse response = null; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - response = VisibilityClient.listLabels(conn, null); - } catch (Throwable e) { - throw new IOException(e); - } - // The addLabels() in setup added: - // { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE, COPYRIGHT, ACCENT, - // UNICODE_VIS_TAG, UC1, UC2 }; - // The previous tests added 2 more labels: ABC, XYZ - // The 'system' label is excluded. - List labels = response.getLabelList(); - assertEquals(12, labels.size()); - assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes(SECRET)))); - assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes(TOPSECRET)))); - assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes(CONFIDENTIAL)))); - assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes("ABC")))); - assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes("XYZ")))); - assertFalse(labels.contains(ByteString.copyFrom(Bytes.toBytes(SYSTEM_LABEL)))); - return null; - } - }; + @Override + public ListLabelsResponse run() throws Exception { + ListLabelsResponse response = null; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + response = VisibilityClient.listLabels(conn, null); + } catch (Throwable e) { + throw new IOException(e); + } + // The addLabels() in setup added: + // { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE, COPYRIGHT, ACCENT, + // UNICODE_VIS_TAG, UC1, UC2 }; + // The previous tests added 2 more labels: ABC, XYZ + // The 'system' label is excluded. + List labels = response.getLabelList(); + assertEquals(12, labels.size()); + assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes(SECRET)))); + assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes(TOPSECRET)))); + assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes(CONFIDENTIAL)))); + assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes("ABC")))); + assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes("XYZ")))); + assertFalse(labels.contains(ByteString.copyFrom(Bytes.toBytes(SYSTEM_LABEL)))); + return null; + } + }; SUPERUSER.runAs(action); } @@ -209,22 +209,22 @@ public ListLabelsResponse run() throws Exception { public void testListLabelsWithRegEx() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public ListLabelsResponse run() throws Exception { - ListLabelsResponse response = null; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - response = VisibilityClient.listLabels(conn, ".*secret"); - } catch (Throwable e) { - throw new IOException(e); - } - // Only return the labels that end with 'secret' - List labels = response.getLabelList(); - assertEquals(2, labels.size()); - assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes(SECRET)))); - assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes(TOPSECRET)))); - return null; - } - }; + @Override + public ListLabelsResponse run() throws Exception { + ListLabelsResponse response = null; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + response = VisibilityClient.listLabels(conn, ".*secret"); + } catch (Throwable e) { + throw new IOException(e); + } + // Only return the labels that end with 'secret' + List labels = response.getLabelList(); + assertEquals(2, labels.size()); + assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes(SECRET)))); + assertTrue(labels.contains(ByteString.copyFrom(Bytes.toBytes(TOPSECRET)))); + return null; + } + }; SUPERUSER.runAs(action); } @@ -232,9 +232,9 @@ public ListLabelsResponse run() throws Exception { public void testVisibilityLabelsOnWALReplay() throws Exception { final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); try (Table table = createTableAndWriteDataWithLabels(tableName, - "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE)) { - List regionServerThreads = TEST_UTIL.getHBaseCluster() - .getRegionServerThreads(); + "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE)) { + List regionServerThreads = + TEST_UTIL.getHBaseCluster().getRegionServerThreads(); for (RegionServerThread rsThread : regionServerThreads) { rsThread.getRegionServer().abort("Aborting "); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java index 425b26b6eca6..b5350d493e56 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ public class TestVisibilityLabelsWithDeletes extends VisibilityLabelsWithDeletes @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVisibilityLabelsWithDeletes.class); + HBaseClassTestRule.forClass(TestVisibilityLabelsWithDeletes.class); @Override protected Table createTable(byte[] fam) throws IOException { @@ -98,7 +98,7 @@ public void testVisibilityLabelsWithDeleteColumnsWithMultipleVersions() throws E @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")")); @@ -154,7 +154,7 @@ public void testVisibilityLabelsWithDeleteColumnsWithMultipleVersionsNoTimestamp @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d1 = new Delete(row1); d1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d1.addColumns(fam, qual); @@ -203,7 +203,7 @@ public void testVisibilityLabelsWithDeleteColumnsNoMatchVisExpWithMultipleVersio @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumns(fam, qual); @@ -256,7 +256,7 @@ public void testVisibilityLabelsWithDeleteFamilyWithMultipleVersionsNoTimestamp( @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d1 = new Delete(row1); d1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d1.addFamily(fam); @@ -309,7 +309,7 @@ public void testDeleteColumnsWithoutAndWithVisibilityLabels() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); @@ -328,7 +328,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); @@ -358,7 +358,7 @@ public void testDeleteColumnsWithAndWithoutVisibilityLabels() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); @@ -376,7 +376,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); @@ -405,7 +405,7 @@ public void testDeleteFamiliesWithoutAndWithVisibilityLabels() throws Exception @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); @@ -424,7 +424,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); @@ -454,7 +454,7 @@ public void testDeleteFamiliesWithAndWithoutVisibilityLabels() throws Exception @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); @@ -472,7 +472,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); @@ -501,7 +501,7 @@ public void testDeletesWithoutAndWithVisibilityLabels() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); // The delete would not be able to apply it because of visibility mismatch @@ -521,7 +521,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); @@ -552,7 +552,7 @@ public void testVisibilityLabelsWithDeleteFamilyWithPutsReAppearing() throws Exc @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamily(fam); @@ -578,7 +578,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addFamily(fam); @@ -622,7 +622,7 @@ public void testVisibilityLabelsWithDeleteColumnsWithPutsReAppearing() throws Ex @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumns(fam, qual); @@ -648,7 +648,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumns(fam, qual); @@ -691,7 +691,7 @@ public void testVisibilityCombinations() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumns(fam, qual, 126L); @@ -701,7 +701,7 @@ public Void run() throws Exception { } try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumn(fam, qual, 123L); @@ -748,7 +748,7 @@ public void testVisibilityLabelsWithDeleteColumnWithSpecificVersionWithPutsReApp @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumn(fam, qual, 123L); @@ -756,7 +756,7 @@ public Void run() throws Exception { } try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumn(fam, qual, 123L); @@ -801,7 +801,7 @@ public Void run() throws Exception { d3.addFamily(fam); try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { table.delete(createList(d1, d2, d3)); } catch (Throwable t) { throw new IOException(t); @@ -846,7 +846,7 @@ public Void run() throws Exception { d2.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d2.addColumns(fam, qual); try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { table.delete(createList(d1, d2)); } catch (Throwable t) { throw new IOException(t); @@ -1020,7 +1020,7 @@ public void testDeleteColumnWithSpecificTimeStampUsingMultipleVersionsUnMatching @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")")); @@ -1085,7 +1085,7 @@ public void testDeleteColumnWithLatestTimeStampUsingMultipleVersions() throws Ex @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam, qual); @@ -1148,7 +1148,7 @@ public void testDeleteColumnWithLatestTimeStampWhenNoVersionMatches() throws Exc @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumn(fam, qual); @@ -1232,7 +1232,7 @@ public void testDeleteColumnWithLatestTimeStampUsingMultipleVersionsAfterCompact @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam, qual); @@ -1298,7 +1298,7 @@ public void testDeleteFamilyLatestTimeStampWithMulipleVersions() throws Exceptio @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addFamily(fam); @@ -1350,7 +1350,7 @@ public Void run() throws Exception { d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumns(fam, qual, 125L); try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -1420,7 +1420,7 @@ public Void run() throws Exception { d2.addColumns(fam, qual1, 125L); try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { table.delete(createList(d1, d2)); } catch (Throwable t) { throw new IOException(t); @@ -1463,7 +1463,7 @@ public Void run() throws Exception { d2.addColumns(fam, qual1, 126L); try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { table.delete(createList(d1, d2)); } catch (Throwable t) { throw new IOException(t); @@ -1491,7 +1491,7 @@ public void testDeleteFamilyWithoutCellVisibilityWithMulipleVersions() throws Ex @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addFamily(fam); table.delete(d); @@ -1529,7 +1529,7 @@ public void testDeleteFamilyLatestTimeStampWithMulipleVersionsWithoutCellVisibil @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addFamily(fam); @@ -1592,7 +1592,7 @@ public void testDeleteFamilySpecificTimeStampWithMulipleVersions() throws Except @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")")); @@ -1647,7 +1647,7 @@ public void testScanAfterCompaction() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")")); @@ -1700,7 +1700,7 @@ public void testDeleteFamilySpecificTimeStampWithMulipleVersionsDoneTwice() thro @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -1752,7 +1752,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -1794,17 +1794,17 @@ public Void run() throws Exception { @Test public void testMultipleDeleteFamilyVersionWithDiffLabels() throws Exception { PrivilegedExceptionAction action = - new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.setAuths(conn, new String[] { CONFIDENTIAL, PRIVATE, SECRET }, - SUPERUSER.getShortName()); - } catch (Throwable e) { + new PrivilegedExceptionAction() { + @Override + public VisibilityLabelsResponse run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + return VisibilityClient.setAuths(conn, new String[] { CONFIDENTIAL, PRIVATE, SECRET }, + SUPERUSER.getShortName()); + } catch (Throwable e) { + } + return null; } - return null; - } - }; + }; SUPERUSER.runAs(action); final TableName tableName = TableName.valueOf(testName.getMethodName()); try (Table table = doPuts(tableName)) { @@ -1812,7 +1812,7 @@ public VisibilityLabelsResponse run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamilyVersion(fam, 123L); @@ -1864,7 +1864,7 @@ public void testSpecificDeletesFollowedByDeleteFamily() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -1910,7 +1910,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamily(fam); @@ -1945,17 +1945,17 @@ public Void run() throws Exception { @Test public void testSpecificDeletesFollowedByDeleteFamily1() throws Exception { PrivilegedExceptionAction action = - new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.setAuths(conn, new String[] { CONFIDENTIAL, PRIVATE, SECRET }, - SUPERUSER.getShortName()); - } catch (Throwable e) { + new PrivilegedExceptionAction() { + @Override + public VisibilityLabelsResponse run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + return VisibilityClient.setAuths(conn, new String[] { CONFIDENTIAL, PRIVATE, SECRET }, + SUPERUSER.getShortName()); + } catch (Throwable e) { + } + return null; } - return null; - } - }; + }; SUPERUSER.runAs(action); final TableName tableName = TableName.valueOf(testName.getMethodName()); try (Table table = doPuts(tableName)) { @@ -1963,7 +1963,7 @@ public VisibilityLabelsResponse run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -2010,7 +2010,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamily(fam); @@ -2051,7 +2051,7 @@ public void testDeleteColumnSpecificTimeStampWithMulipleVersionsDoneTwice() thro @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam, qual, 125L); @@ -2102,7 +2102,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -2156,7 +2156,7 @@ public void testDeleteColumnSpecificTimeStampWithMulipleVersionsDoneTwice1() thr @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + CONFIDENTIAL + "&" + PRIVATE + ")" + "|(" + TOPSECRET + "&" + SECRET + ")")); @@ -2208,7 +2208,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam, qual, 127L); @@ -2267,7 +2267,7 @@ public void testDeleteColumnSpecificTimeStampWithMulipleVersionsDoneTwice2() thr @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -2324,7 +2324,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -2384,7 +2384,7 @@ public void testDeleteColumnAndDeleteFamilylSpecificTimeStampWithMulipleVersion( @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam, qual, 125L); @@ -2435,7 +2435,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -2484,7 +2484,7 @@ public void testDiffDeleteTypesForTheSameCellUsingMultipleVersions() throws Exce @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -2536,7 +2536,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -2589,7 +2589,7 @@ public void testDeleteColumnLatestWithNoCellVisibility() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addColumn(fam, qual, 125L); table.delete(d); @@ -2613,7 +2613,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addColumns(fam, qual, 125L); table.delete(d); @@ -2638,7 +2638,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addFamily(fam, 125L); table.delete(d); @@ -2663,7 +2663,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addFamily(fam); table.delete(d); @@ -2688,7 +2688,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addColumns(fam, qual); table.delete(d); @@ -2713,7 +2713,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addFamilyVersion(fam, 126L); table.delete(d); @@ -2788,7 +2788,7 @@ public void testVisibilityExpressionWithNotEqualORCondition() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addColumn(fam, qual, 124L); d.setCellVisibility(new CellVisibility(PRIVATE)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java index a37e82c929ad..e1f9f4c2628c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestVisibilityLabelsWithSLGStack { @ClassRule @@ -113,17 +113,17 @@ public void testWithSAGStack() throws Exception { private static void addLabels() throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { SECRET, CONFIDENTIAL }; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels); - } catch (Throwable t) { - throw new IOException(t); - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { SECRET, CONFIDENTIAL }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels); + } catch (Throwable t) { + throw new IOException(t); + } + return null; + } + }; SUPERUSER.runAs(action); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java index 634b36dbc8eb..cc35666ade4e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) public class TestVisibilityLablesWithGroups { @ClassRule @@ -95,7 +95,7 @@ public static void setupBeforeClass() throws Exception { // 'admin' has super user permission because it is part of the 'supergroup' SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); // 'test' user will inherit 'testgroup' visibility labels - TESTUSER = User.createUserForTesting(conf, "test", new String[] {"testgroup" }); + TESTUSER = User.createUserForTesting(conf, "test", new String[] { "testgroup" }); // Wait for the labels table to become available TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000); @@ -126,7 +126,7 @@ public void testGroupAuths() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Put put = new Put(ROW_1); put.addColumn(CF, Q1, HConstants.LATEST_TIMESTAMP, value1); put.setCellVisibility(new CellVisibility(SECRET)); @@ -148,7 +148,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(1); @@ -159,7 +159,7 @@ public Void run() throws Exception { cellScanner.advance(); Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), ROW_1, 0, ROW_1.length)); + current.getRowLength(), ROW_1, 0, ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), Q1, 0, Q1.length)); assertTrue(Bytes.equals(current.getValueArray(), current.getValueOffset(), @@ -167,7 +167,7 @@ public Void run() throws Exception { cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), ROW_1, 0, ROW_1.length)); + current.getRowLength(), ROW_1, 0, ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), Q2, 0, Q2.length)); assertTrue(Bytes.equals(current.getValueArray(), current.getValueOffset(), @@ -175,7 +175,7 @@ public Void run() throws Exception { cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), ROW_1, 0, ROW_1.length)); + current.getRowLength(), ROW_1, 0, ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), Q3, 0, Q3.length)); assertTrue(Bytes.equals(current.getValueArray(), current.getValueOffset(), @@ -210,7 +210,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { // Test scan with no auth attribute Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); @@ -222,7 +222,7 @@ public Void run() throws Exception { Cell current = cellScanner.current(); // test user can see value2 (CONFIDENTIAL) and value3 (no label) assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), ROW_1, 0, ROW_1.length)); + current.getRowLength(), ROW_1, 0, ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), Q2, 0, Q2.length)); assertTrue(Bytes.equals(current.getValueArray(), current.getValueOffset(), @@ -231,7 +231,7 @@ public Void run() throws Exception { current = cellScanner.current(); // test user can see value2 (CONFIDENTIAL) and value3 (no label) assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), - current.getRowLength(), ROW_1, 0, ROW_1.length)); + current.getRowLength(), ROW_1, 0, ROW_1.length)); assertTrue(Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), Q3, 0, Q3.length)); assertTrue(Bytes.equals(current.getValueArray(), current.getValueOffset(), @@ -296,8 +296,7 @@ public Void run() throws Exception { public Void run() throws Exception { VisibilityLabelsResponse response = null; try (Connection conn = ConnectionFactory.createConnection(conf)) { - response = VisibilityClient.clearAuths(conn, new String[] { - CONFIDENTIAL }, "@testgroup"); + response = VisibilityClient.clearAuths(conn, new String[] { CONFIDENTIAL }, "@testgroup"); } catch (Throwable e) { fail("Should not have failed"); } @@ -305,7 +304,7 @@ public Void run() throws Exception { } }); - // Get testgroup's labels. No label is returned. + // Get testgroup's labels. No label is returned. SUPERUSER.runAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { @@ -329,10 +328,10 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Scan s1 = new Scan(); // test user is not entitled to 'CONFIDENTIAL' anymore since we dropped - // testgroup's label. test user has no auth labels now. + // testgroup's label. test user has no auth labels now. // scan's labels will be dropped on the server side. s1.setAuthorizations(new Authorizations(new String[] { SECRET, CONFIDENTIAL })); ResultScanner scanner1 = table.getScanner(s1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java index 1d0c6e60a98e..bb1c9471cd4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; -@Category({SecurityTests.class, MediumTests.class}) +@Category({ SecurityTests.class, MediumTests.class }) /** * Test visibility by setting 'hbase.security.visibility.mutations.checkauths' to true */ @@ -74,6 +74,7 @@ public class TestVisibilityWithCheckAuths { public final TestName TEST_NAME = new TestName(); public static User SUPERUSER; public static User USER; + @BeforeClass public static void setupBeforeClass() throws Exception { // setup configuration @@ -81,11 +82,11 @@ public static void setupBeforeClass() throws Exception { VisibilityTestUtil.enableVisiblityLabels(conf); conf.setBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, true); conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, - ScanLabelGenerator.class); + ScanLabelGenerator.class); conf.set("hbase.superuser", "admin"); TEST_UTIL.startMiniCluster(2); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); - USER = User.createUserForTesting(conf, "user", new String[]{}); + USER = User.createUserForTesting(conf, "user", new String[] {}); // Wait for the labels table to become available TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000); addLabels(); @@ -99,17 +100,17 @@ public static void tearDownAfterClass() throws Exception { public static void addLabels() throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { TOPSECRET }; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels); - } catch (Throwable t) { - throw new IOException(t); - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { TOPSECRET }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels); + } catch (Throwable t) { + throw new IOException(t); + } + return null; + } + }; SUPERUSER.runAs(action); } @@ -117,22 +118,22 @@ public VisibilityLabelsResponse run() throws Exception { public void testVerifyAccessDeniedForInvalidUserAuths() throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.setAuths(conn, new String[] { TOPSECRET }, - USER.getShortName()); - } catch (Throwable e) { - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + return VisibilityClient.setAuths(conn, new String[] { TOPSECRET }, + USER.getShortName()); + } catch (Throwable e) { + } + return null; + } + }; SUPERUSER.runAs(action); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin = TEST_UTIL.getAdmin(); TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam).setMaxVersions(5).build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam).setMaxVersions(5).build()) + .build(); hBaseAdmin.createTable(tableDescriptor); try { TEST_UTIL.getAdmin().flush(tableName); @@ -140,7 +141,7 @@ public VisibilityLabelsResponse run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Put p = new Put(row1); p.setCellVisibility(new CellVisibility(PUBLIC + "&" + TOPSECRET)); p.addColumn(fam, qual, 125L, value); @@ -162,16 +163,16 @@ public Void run() throws Exception { public void testLabelsWithAppend() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.setAuths(conn, new String[] { TOPSECRET }, - USER.getShortName()); - } catch (Throwable e) { - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + return VisibilityClient.setAuths(conn, new String[] { TOPSECRET }, + USER.getShortName()); + } catch (Throwable e) { + } + return null; + } + }; SUPERUSER.runAs(action); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); try (Table table = TEST_UTIL.createTable(tableName, fam)) { @@ -181,7 +182,7 @@ public VisibilityLabelsResponse run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Put put = new Put(row1); put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, val); put.setCellVisibility(new CellVisibility(TOPSECRET)); @@ -195,7 +196,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Append append = new Append(row1); append.addColumn(fam, qual, Bytes.toBytes("b")); table.append(append); @@ -208,7 +209,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Append append = new Append(row1); append.addColumn(fam, qual, Bytes.toBytes("c")); append.setCellVisibility(new CellVisibility(PUBLIC)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java index 3a97a239b253..c8bd1492fb38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,7 +54,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse; -@Category({SecurityTests.class, LargeTests.class}) +@Category({ SecurityTests.class, LargeTests.class }) public class TestWithDisabledAuthorization { @ClassRule @@ -70,7 +70,6 @@ public class TestWithDisabledAuthorization { private static final byte[] TEST_QUALIFIER = Bytes.toBytes("q"); private static final byte[] ZERO = Bytes.toBytes(0L); - @Rule public final TestName TEST_NAME = new TestName(); @@ -106,10 +105,8 @@ public static void setUpBeforeClass() throws Exception { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, - new String[] { SECRET, CONFIDENTIAL, PRIVATE }); - VisibilityClient.setAuths(conn, - new String[] { SECRET, CONFIDENTIAL }, + VisibilityClient.addLabels(conn, new String[] { SECRET, CONFIDENTIAL, PRIVATE }); + VisibilityClient.setAuths(conn, new String[] { SECRET, CONFIDENTIAL }, USER_RW.getShortName()); } catch (Throwable t) { fail("Should not have failed"); @@ -132,8 +129,7 @@ public void testManageUserAuths() throws Throwable { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.setAuths(conn, - new String[] { SECRET, CONFIDENTIAL }, + VisibilityClient.setAuths(conn, new String[] { SECRET, CONFIDENTIAL }, USER_RW.getShortName()); } catch (Throwable t) { fail("Should not have failed"); @@ -143,23 +139,22 @@ public Void run() throws Exception { }); PrivilegedExceptionAction> getAuths = - new PrivilegedExceptionAction>() { - @Override - public List run() throws Exception { - GetAuthsResponse authsResponse = null; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - authsResponse = VisibilityClient.getAuths(conn, - USER_RW.getShortName()); - } catch (Throwable t) { - fail("Should not have failed"); - } - List authsList = new ArrayList<>(authsResponse.getAuthList().size()); - for (ByteString authBS : authsResponse.getAuthList()) { - authsList.add(Bytes.toString(authBS.toByteArray())); + new PrivilegedExceptionAction>() { + @Override + public List run() throws Exception { + GetAuthsResponse authsResponse = null; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + authsResponse = VisibilityClient.getAuths(conn, USER_RW.getShortName()); + } catch (Throwable t) { + fail("Should not have failed"); + } + List authsList = new ArrayList<>(authsResponse.getAuthList().size()); + for (ByteString authBS : authsResponse.getAuthList()) { + authsList.add(Bytes.toString(authBS.toByteArray())); + } + return authsList; } - return authsList; - } - }; + }; List authsList = SUPERUSER.runAs(getAuths); assertEquals(2, authsList.size()); @@ -170,9 +165,7 @@ public List run() throws Exception { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.clearAuths(conn, - new String[] { SECRET }, - USER_RW.getShortName()); + VisibilityClient.clearAuths(conn, new String[] { SECRET }, USER_RW.getShortName()); } catch (Throwable t) { fail("Should not have failed"); } @@ -188,9 +181,7 @@ public Void run() throws Exception { @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.clearAuths(conn, - new String[] { CONFIDENTIAL }, - USER_RW.getShortName()); + VisibilityClient.clearAuths(conn, new String[] { CONFIDENTIAL }, USER_RW.getShortName()); } catch (Throwable t) { fail("Should not have failed"); } @@ -205,12 +196,8 @@ public Void run() throws Exception { @Test public void testPassiveVisibility() throws Exception { // No values should be filtered regardless of authorization if we are passive - try (Table t = createTableAndWriteDataWithLabels( - TableName.valueOf(TEST_NAME.getMethodName()), - SECRET, - PRIVATE, - SECRET + "|" + CONFIDENTIAL, - PRIVATE + "|" + CONFIDENTIAL)) { + try (Table t = createTableAndWriteDataWithLabels(TableName.valueOf(TEST_NAME.getMethodName()), + SECRET, PRIVATE, SECRET + "|" + CONFIDENTIAL, PRIVATE + "|" + CONFIDENTIAL)) { Scan s = new Scan(); s.setAuthorizations(new Authorizations()); try (ResultScanner scanner = t.getScanner(s)) { @@ -242,7 +229,7 @@ static Table createTableAndWriteDataWithLabels(TableName tableName, String... la throws Exception { List puts = new ArrayList<>(labelExps.length + 1); for (int i = 0; i < labelExps.length; i++) { - Put put = new Put(Bytes.toBytes("row" + (i+1))); + Put put = new Put(Bytes.toBytes("row" + (i + 1))); put.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.LATEST_TIMESTAMP, ZERO); put.setCellVisibility(new CellVisibility(labelExps[i])); puts.add(put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsWithDeletesTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsWithDeletesTestBase.java index 387becbdae7d..c315b826efda 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsWithDeletesTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsWithDeletesTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -96,18 +96,18 @@ public static void tearDownAfterClass() throws Exception { public static void addLabels() throws Exception { PrivilegedExceptionAction action = - new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE }; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels); - } catch (Throwable t) { - throw new IOException(t); + new PrivilegedExceptionAction() { + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels); + } catch (Throwable t) { + throw new IOException(t); + } + return null; } - return null; - } - }; + }; SUPERUSER.runAs(action); } @@ -115,17 +115,18 @@ public VisibilityLabelsResponse run() throws Exception { protected final void setAuths() throws IOException, InterruptedException { PrivilegedExceptionAction action = - new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - return VisibilityClient.setAuths(conn, - new String[] { CONFIDENTIAL, PRIVATE, SECRET, TOPSECRET }, SUPERUSER.getShortName()); - } catch (Throwable e) { + new PrivilegedExceptionAction() { + @Override + public VisibilityLabelsResponse run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + return VisibilityClient.setAuths(conn, + new String[] { CONFIDENTIAL, PRIVATE, SECRET, TOPSECRET }, + SUPERUSER.getShortName()); + } catch (Throwable e) { + } + return null; } - return null; - } - }; + }; SUPERUSER.runAs(action); } @@ -172,7 +173,7 @@ public void testVisibilityLabelsWithDeleteColumns() throws Throwable { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(TOPSECRET + "&" + SECRET)); d.addColumns(fam, qual); @@ -209,7 +210,7 @@ public void testVisibilityLabelsWithDeleteFamily() throws Exception { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row2); d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL)); d.addFamily(fam); @@ -241,13 +242,13 @@ public void testVisibilityLabelsWithDeleteFamilyVersion() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(testName.getMethodName()); long[] ts = new long[] { 123L, 125L }; - try ( - Table table = createTableAndWriteDataWithLabels(ts, CONFIDENTIAL + "|" + TOPSECRET, SECRET)) { + try (Table table = + createTableAndWriteDataWithLabels(ts, CONFIDENTIAL + "|" + TOPSECRET, SECRET)) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL)); d.addFamilyVersion(fam, 123L); @@ -279,13 +280,13 @@ public void testVisibilityLabelsWithDeleteColumnExactVersion() throws Exception setAuths(); final TableName tableName = TableName.valueOf(testName.getMethodName()); long[] ts = new long[] { 123L, 125L }; - try ( - Table table = createTableAndWriteDataWithLabels(ts, CONFIDENTIAL + "|" + TOPSECRET, SECRET)) { + try (Table table = + createTableAndWriteDataWithLabels(ts, CONFIDENTIAL + "|" + TOPSECRET, SECRET)) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL)); d.addColumn(fam, qual, 123L); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityTestUtil.java index 4e2c4b7c303a..d15720c2a79a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityTestUtil.java @@ -1,17 +1,23 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.security.User; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java index 7523ae8fa0ba..ede1d64f400a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,44 +47,44 @@ public class MobSnapshotTestingUtils { * Create the Mob Table. */ public static void createMobTable(final HBaseTestingUtil util, final TableName tableName, - int regionReplication, final byte[]... families) throws IOException, InterruptedException { + int regionReplication, final byte[]... families) throws IOException, InterruptedException { createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(), regionReplication, StoreFileTrackerFactory.Trackers.DEFAULT.name(), families); } public static void createMobTable(final HBaseTestingUtil util, final TableName tableName, - int regionReplication, String storeFileTracker, final byte[]... families) - throws IOException, InterruptedException { + int regionReplication, String storeFileTracker, final byte[]... families) + throws IOException, InterruptedException { createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(), regionReplication, storeFileTracker, families); } public static void createPreSplitMobTable(final HBaseTestingUtil util, final TableName tableName, - int nRegions, final byte[]... families) throws IOException, InterruptedException { + int nRegions, final byte[]... families) throws IOException, InterruptedException { createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(nRegions), 1, families); } public static void createMobTable(final HBaseTestingUtil util, final TableName tableName, - final byte[][] splitKeys, int regionReplication, final byte[]... families) - throws IOException, InterruptedException { + final byte[][] splitKeys, int regionReplication, final byte[]... families) + throws IOException, InterruptedException { createMobTable(util, tableName, splitKeys, regionReplication, StoreFileTrackerFactory.Trackers.DEFAULT.name(), families); } public static void createMobTable(final HBaseTestingUtil util, final TableName tableName, - final byte[][] splitKeys, int regionReplication, String storeFileTracker, - final byte[]... families) throws IOException, InterruptedException { + final byte[][] splitKeys, int regionReplication, String storeFileTracker, + final byte[]... families) throws IOException, InterruptedException { createMobTable(util, tableName, splitKeys, regionReplication, storeFileTracker, null, families); } public static void createMobTable(HBaseTestingUtil util, TableName tableName, byte[][] splitKeys, - int regionReplication, String storeFileTracker, String cpClassName, byte[]... families) - throws IOException, InterruptedException { + int regionReplication, String storeFileTracker, String cpClassName, byte[]... families) + throws IOException, InterruptedException { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication); + TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication); for (byte[] family : families) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setMobEnabled(true) - .setMobThreshold(0L).build()); + .setMobThreshold(0L).build()); } if (!StringUtils.isBlank(cpClassName)) { builder.setCoprocessor(cpClassName); @@ -101,7 +101,7 @@ public static void createMobTable(HBaseTestingUtil util, TableName tableName, by * @return An Table instance for the created table. */ public static Table createMobTable(final HBaseTestingUtil util, final TableName tableName, - final byte[]... families) throws IOException { + final byte[]... families) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (byte[] family : families) { // Disable blooms (they are on by default as of 0.95) but we disable them @@ -110,7 +110,7 @@ public static Table createMobTable(final HBaseTestingUtil util, final TableName // and blooms being // on is interfering. builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family) - .setBloomFilterType(BloomType.NONE).setMobEnabled(true).setMobThreshold(0L).build()); + .setBloomFilterType(BloomType.NONE).setMobEnabled(true).setMobThreshold(0L).build()); } util.getAdmin().createTable(builder.build()); // HBaseAdmin only waits for regions to appear in hbase:meta we should wait @@ -141,7 +141,7 @@ public static int countMobRows(final Table table, final byte[]... families) thro } public static void verifyMobRowCount(final HBaseTestingUtil util, final TableName tableName, - long expectedRows) throws IOException { + long expectedRows) throws IOException { Table table = ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName); try { @@ -161,10 +161,10 @@ public SnapshotMock(final Configuration conf, final FileSystem fs, final Path ro @Override public TableDescriptor createHtd(final String tableName) { - return TableDescriptorBuilder - .newBuilder(TableName.valueOf(tableName)).setColumnFamily(ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes(TEST_FAMILY)).setMobEnabled(true).setMobThreshold(0L).build()) - .build(); + return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(TEST_FAMILY)) + .setMobEnabled(true).setMobThreshold(0L).build()) + .build(); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index 00d2e84a464b..8b1e1e8f8ebe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -93,22 +93,18 @@ private SnapshotTestingUtils() { /** * Assert that we don't have any snapshots lists - * - * @throws IOException - * if the admin operation fails + * @throws IOException if the admin operation fails */ public static void assertNoSnapshots(Admin admin) throws IOException { - assertEquals("Have some previous snapshots", 0, admin.listSnapshots() - .size()); + assertEquals("Have some previous snapshots", 0, admin.listSnapshots().size()); } /** - * Make sure that there is only one snapshot returned from the master and its - * name and table match the passed in parameters. + * Make sure that there is only one snapshot returned from the master and its name and table match + * the passed in parameters. */ - public static List assertExistsMatchingSnapshot( - Admin admin, String snapshotName, TableName tableName) - throws IOException { + public static List assertExistsMatchingSnapshot(Admin admin, + String snapshotName, TableName tableName) throws IOException { // list the snapshot List snapshots = admin.listSnapshots(); @@ -119,7 +115,7 @@ public static List assertExistsMatchingSnapshot( } } - Assert.assertTrue("No matching snapshots found.", returnedSnapshots.size()>0); + Assert.assertTrue("No matching snapshots found.", returnedSnapshots.size() > 0); return returnedSnapshots; } @@ -132,12 +128,11 @@ public static void assertOneSnapshotThatMatches(Admin admin, } /** - * Make sure that there is only one snapshot returned from the master and its - * name and table match the passed in parameters. + * Make sure that there is only one snapshot returned from the master and its name and table match + * the passed in parameters. */ - public static List assertOneSnapshotThatMatches( - Admin admin, String snapshotName, TableName tableName) - throws IOException { + public static List assertOneSnapshotThatMatches(Admin admin, + String snapshotName, TableName tableName) throws IOException { // list the snapshot List snapshots = admin.listSnapshots(); @@ -149,13 +144,12 @@ public static List assertOneSnapshotThatMatches( } /** - * Make sure that there is only one snapshot returned from the master and its - * name and table match the passed in parameters. + * Make sure that there is only one snapshot returned from the master and its name and table match + * the passed in parameters. */ - public static List assertOneSnapshotThatMatches( - Admin admin, byte[] snapshot, TableName tableName) throws IOException { - return assertOneSnapshotThatMatches(admin, Bytes.toString(snapshot), - tableName); + public static List assertOneSnapshotThatMatches(Admin admin, byte[] snapshot, + TableName tableName) throws IOException { + return assertOneSnapshotThatMatches(admin, Bytes.toString(snapshot), tableName); } public static void confirmSnapshotValid(HBaseTestingUtil testUtil, @@ -167,76 +161,79 @@ public static void confirmSnapshotValid(HBaseTestingUtil testUtil, } /** - * Confirm that the snapshot contains references to all the files that should - * be in the snapshot. + * Confirm that the snapshot contains references to all the files that should be in the snapshot. */ public static void confirmSnapshotValid(SnapshotProtos.SnapshotDescription snapshotDescriptor, TableName tableName, byte[] testFamily, Path rootDir, Admin admin, FileSystem fs) throws IOException { ArrayList nonEmptyTestFamilies = new ArrayList(1); nonEmptyTestFamilies.add(testFamily); - confirmSnapshotValid(snapshotDescriptor, tableName, - nonEmptyTestFamilies, null, rootDir, admin, fs); + confirmSnapshotValid(snapshotDescriptor, tableName, nonEmptyTestFamilies, null, rootDir, admin, + fs); } /** * Confirm that the snapshot has no references files but only metadata. */ public static void confirmEmptySnapshotValid( - SnapshotProtos.SnapshotDescription snapshotDescriptor, TableName tableName, - byte[] testFamily, Path rootDir, Admin admin, FileSystem fs) - throws IOException { + SnapshotProtos.SnapshotDescription snapshotDescriptor, TableName tableName, byte[] testFamily, + Path rootDir, Admin admin, FileSystem fs) throws IOException { ArrayList emptyTestFamilies = new ArrayList(1); emptyTestFamilies.add(testFamily); - confirmSnapshotValid(snapshotDescriptor, tableName, - null, emptyTestFamilies, rootDir, admin, fs); + confirmSnapshotValid(snapshotDescriptor, tableName, null, emptyTestFamilies, rootDir, admin, + fs); } /** - * Confirm that the snapshot contains references to all the files that should - * be in the snapshot. This method also perform some redundant check like - * the existence of the snapshotinfo or the regioninfo which are done always - * by the MasterSnapshotVerifier, at the end of the snapshot operation. + * Confirm that the snapshot contains references to all the files that should be in the snapshot. + * This method also perform some redundant check like the existence of the snapshotinfo or the + * regioninfo which are done always by the MasterSnapshotVerifier, at the end of the snapshot + * operation. */ - public static void confirmSnapshotValid( - SnapshotProtos.SnapshotDescription snapshotDescriptor, TableName tableName, - List nonEmptyTestFamilies, List emptyTestFamilies, + public static void confirmSnapshotValid(SnapshotProtos.SnapshotDescription snapshotDescriptor, + TableName tableName, List nonEmptyTestFamilies, List emptyTestFamilies, Path rootDir, Admin admin, FileSystem fs) throws IOException { final Configuration conf = admin.getConfiguration(); // check snapshot dir - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir( - snapshotDescriptor, rootDir); - assertTrue("target snapshot directory, '"+ snapshotDir +"', doesn't exist.", fs.exists(snapshotDir)); + Path snapshotDir = + SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDescriptor, rootDir); + assertTrue("target snapshot directory, '" + snapshotDir + "', doesn't exist.", + fs.exists(snapshotDir)); - SnapshotProtos.SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + SnapshotProtos.SnapshotDescription desc = + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); // Extract regions and families with store files final Set snapshotFamilies = new TreeSet<>(Bytes.BYTES_COMPARATOR); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, desc); Map regionManifests = manifest.getRegionManifestsMap(); - for (SnapshotRegionManifest regionManifest: regionManifests.values()) { + for (SnapshotRegionManifest regionManifest : regionManifests.values()) { SnapshotReferenceUtil.visitRegionStoreFiles(regionManifest, - new SnapshotReferenceUtil.StoreFileVisitor() { - @Override - public void storeFile(final RegionInfo regionInfo, final String family, + new SnapshotReferenceUtil.StoreFileVisitor() { + @Override + public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { - snapshotFamilies.add(Bytes.toBytes(family)); - } - }); + snapshotFamilies.add(Bytes.toBytes(family)); + } + }); } // Verify that there are store files in the specified families if (nonEmptyTestFamilies != null) { - for (final byte[] familyName: nonEmptyTestFamilies) { - assertTrue("Expected snapshot to contain family '" + Bytes.toString(familyName) + "', but it does not.", snapshotFamilies.contains(familyName)); + for (final byte[] familyName : nonEmptyTestFamilies) { + assertTrue("Expected snapshot to contain family '" + Bytes.toString(familyName) + + "', but it does not.", + snapshotFamilies.contains(familyName)); } } // Verify that there are no store files in the specified families if (emptyTestFamilies != null) { - for (final byte[] familyName: emptyTestFamilies) { - assertFalse("Expected snapshot to skip empty family '" + Bytes.toString(familyName) + "', but it is present.", snapshotFamilies.contains(familyName)); + for (final byte[] familyName : emptyTestFamilies) { + assertFalse("Expected snapshot to skip empty family '" + Bytes.toString(familyName) + + "', but it is present.", + snapshotFamilies.contains(familyName)); } } @@ -244,8 +241,8 @@ public void storeFile(final RegionInfo regionInfo, final String family, List regions = admin.getRegions(tableName); // remove the non-default regions RegionReplicaUtil.removeNonDefaultRegions(regions); - boolean hasMob = regionManifests.containsKey(MobUtils.getMobRegionInfo(tableName) - .getEncodedName()); + boolean hasMob = + regionManifests.containsKey(MobUtils.getMobRegionInfo(tableName).getEncodedName()); if (hasMob) { assertEquals("Wrong number of regions.", regions.size(), regionManifests.size() - 1); } else { @@ -265,13 +262,14 @@ public void storeFile(final RegionInfo regionInfo, final String family, // Verify Regions (redundant check, see MasterSnapshotVerifier) for (RegionInfo info : regions) { String regionName = info.getEncodedName(); - assertTrue("Missing region name: '" + regionName + "'", regionManifests.containsKey(regionName)); + assertTrue("Missing region name: '" + regionName + "'", + regionManifests.containsKey(regionName)); } } /* - * Take snapshot with maximum of numTries attempts, ignoring CorruptedSnapshotException - * except for the last CorruptedSnapshotException + * Take snapshot with maximum of numTries attempts, ignoring CorruptedSnapshotException except for + * the last CorruptedSnapshotException */ public static void snapshot(Admin admin, final String snapshotName, final TableName tableName, final SnapshotType type, final int numTries) throws IOException { @@ -289,28 +287,23 @@ public static void snapshot(Admin admin, final String snapshotName, final TableN throw lastEx; } - public static void cleanupSnapshot(Admin admin, byte[] tableName) - throws IOException { + public static void cleanupSnapshot(Admin admin, byte[] tableName) throws IOException { SnapshotTestingUtils.cleanupSnapshot(admin, Bytes.toString(tableName)); } - public static void cleanupSnapshot(Admin admin, String snapshotName) - throws IOException { + public static void cleanupSnapshot(Admin admin, String snapshotName) throws IOException { // delete the taken snapshot admin.deleteSnapshot(snapshotName); assertNoSnapshots(admin); } /** - * Expect the snapshot to throw an error when checking if the snapshot is - * complete - * + * Expect the snapshot to throw an error when checking if the snapshot is complete * @param master master to check * @param snapshot the {@link SnapshotDescription} request to pass to the master * @param clazz expected exception from the master */ - public static void expectSnapshotDoneException(HMaster master, - IsSnapshotDoneRequest snapshot, + public static void expectSnapshotDoneException(HMaster master, IsSnapshotDoneRequest snapshot, Class clazz) { try { master.getMasterRpcServices().isSnapshotDone(null, snapshot); @@ -328,7 +321,6 @@ public static void expectSnapshotDoneException(HMaster master, /** * List all the HFiles in the given table - * * @param fs FileSystem where the table lives * @param tableDir directory of the table * @return array of the current HFiles in the table (could be a zero-length array) @@ -349,35 +341,32 @@ public void storeFile(final String region, final String family, final String hfi } /** - * Take a snapshot of the specified table and verify that the given family is - * not empty. Note that this will leave the table disabled - * in the case of an offline snapshot. + * Take a snapshot of the specified table and verify that the given family is not empty. Note that + * this will leave the table disabled in the case of an offline snapshot. */ - public static void createSnapshotAndValidate(Admin admin, - TableName tableName, String familyName, String snapshotNameString, - Path rootDir, FileSystem fs, boolean onlineSnapshot) + public static void createSnapshotAndValidate(Admin admin, TableName tableName, String familyName, + String snapshotNameString, Path rootDir, FileSystem fs, boolean onlineSnapshot) throws Exception { ArrayList nonEmptyFamilyNames = new ArrayList<>(1); nonEmptyFamilyNames.add(Bytes.toBytes(familyName)); createSnapshotAndValidate(admin, tableName, nonEmptyFamilyNames, /* emptyFamilyNames= */ null, - snapshotNameString, rootDir, fs, onlineSnapshot); + snapshotNameString, rootDir, fs, onlineSnapshot); } /** - * Take a snapshot of the specified table and verify the given families. - * Note that this will leave the table disabled in the case of an offline snapshot. + * Take a snapshot of the specified table and verify the given families. Note that this will leave + * the table disabled in the case of an offline snapshot. */ - public static void createSnapshotAndValidate(Admin admin, - TableName tableName, List nonEmptyFamilyNames, List emptyFamilyNames, - String snapshotNameString, Path rootDir, FileSystem fs, boolean onlineSnapshot) - throws Exception { + public static void createSnapshotAndValidate(Admin admin, TableName tableName, + List nonEmptyFamilyNames, List emptyFamilyNames, String snapshotNameString, + Path rootDir, FileSystem fs, boolean onlineSnapshot) throws Exception { if (!onlineSnapshot) { try { LOG.info("prepping for offline snapshot."); admin.disableTable(tableName); } catch (TableNotEnabledException tne) { - LOG.info("In attempting to disable " + tableName + " it turns out that the this table is " + - "already disabled."); + LOG.info("In attempting to disable " + tableName + " it turns out that the this table is " + + "already disabled."); } } LOG.info("taking snapshot."); @@ -398,7 +387,6 @@ public static void createSnapshotAndValidate(Admin admin, /** * Corrupt the specified snapshot by deleting some files. - * * @param util {@link HBaseTestingUtil} * @param snapshotName name of the snapshot to corrupt * @return array of the corrupted HFiles @@ -409,8 +397,8 @@ public static ArrayList corruptSnapshot(final HBaseTestingUtil util, final Strin final MasterFileSystem mfs = util.getHBaseCluster().getMaster().getMasterFileSystem(); final FileSystem fs = mfs.getFileSystem(); - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, - mfs.getRootDir()); + Path snapshotDir = + SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, mfs.getRootDir()); SnapshotProtos.SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); final TableName table = TableName.valueOf(snapshotDesc.getTable()); @@ -418,26 +406,26 @@ public static ArrayList corruptSnapshot(final HBaseTestingUtil util, final Strin final ArrayList corruptedFiles = new ArrayList(); final Configuration conf = util.getConfiguration(); SnapshotReferenceUtil.visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, - new SnapshotReferenceUtil.StoreFileVisitor() { - @Override - public void storeFile(final RegionInfo regionInfo, final String family, + new SnapshotReferenceUtil.StoreFileVisitor() { + @Override + public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { - String region = regionInfo.getEncodedName(); - String hfile = storeFile.getName(); - HFileLink link = HFileLink.build(conf, table, region, family, hfile); - if (corruptedFiles.size() % 2 == 0) { - fs.delete(link.getAvailablePath(fs), true); - corruptedFiles.add(hfile); + String region = regionInfo.getEncodedName(); + String hfile = storeFile.getName(); + HFileLink link = HFileLink.build(conf, table, region, family, hfile); + if (corruptedFiles.size() % 2 == 0) { + fs.delete(link.getAvailablePath(fs), true); + corruptedFiles.add(hfile); + } } - } - }); + }); assertTrue(corruptedFiles.size() > 0); return corruptedFiles; } // ========================================================================== - // Snapshot Mock + // Snapshot Mock // ========================================================================== public static class SnapshotMock { protected final static String TEST_FAMILY = "cf"; @@ -469,10 +457,9 @@ public static class SnapshotBuilder { private Path snapshotDir; private int snapshotted = 0; - public SnapshotBuilder(final Configuration conf, final FileSystem fs, - final Path rootDir, final TableDescriptor htd, - final SnapshotProtos.SnapshotDescription desc, final RegionData[] tableRegions) - throws IOException { + public SnapshotBuilder(final Configuration conf, final FileSystem fs, final Path rootDir, + final TableDescriptor htd, final SnapshotProtos.SnapshotDescription desc, + final RegionData[] tableRegions) throws IOException { this.fs = fs; this.conf = conf; this.rootDir = rootDir; @@ -533,8 +520,8 @@ private void corruptFile(Path p) throws IOException { // Create a new region-manifest file FSDataOutputStream out = fs.create(p); - //Copy the first 25 bytes of the original region-manifest into the new one, - //make it a corrupted region-manifest file. + // Copy the first 25 bytes of the original region-manifest into the new one, + // make it a corrupted region-manifest file. FSDataInputStream input = fs.open(newP); byte[] buffer = new byte[25]; int len = input.read(0, buffer, 0, 25); @@ -549,12 +536,12 @@ private void corruptFile(Path p) throws IOException { /** * Corrupt one region-manifest file - * * @throws IOException on unexecpted error from the FS */ public void corruptOneRegionManifest() throws IOException { FileStatus[] manifestFiles = CommonFSUtils.listStatus(fs, snapshotDir, new PathFilter() { - @Override public boolean accept(Path path) { + @Override + public boolean accept(Path path) { return path.getName().startsWith(SnapshotManifestV2.SNAPSHOT_MANIFEST_PREFIX); } }); @@ -571,16 +558,15 @@ public void missOneRegionSnapshotFile() throws IOException { for (FileStatus fileStatus : manifestFiles) { String fileName = fileStatus.getPath().getName(); if (fileName.endsWith(SnapshotDescriptionUtils.SNAPSHOTINFO_FILE) - || fileName.endsWith(".tabledesc") - || fileName.endsWith(SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME)) { - fs.delete(fileStatus.getPath(), true); + || fileName.endsWith(".tabledesc") + || fileName.endsWith(SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME)) { + fs.delete(fileStatus.getPath(), true); } } } /** * Corrupt data-manifest file - * * @throws IOException on unexecpted error from the FS */ public void corruptDataManifest() throws IOException { @@ -631,7 +617,8 @@ public SnapshotBuilder createSnapshotV1(final String snapshotName, final String public SnapshotBuilder createSnapshotV1(final String snapshotName, final String tableName, final int numRegions) throws IOException { - return createSnapshot(snapshotName, tableName, numRegions, SnapshotManifestV1.DESCRIPTOR_VERSION); + return createSnapshot(snapshotName, tableName, numRegions, + SnapshotManifestV1.DESCRIPTOR_VERSION); } public SnapshotBuilder createSnapshotV2(final String snapshotName, final String tableName) @@ -641,13 +628,14 @@ public SnapshotBuilder createSnapshotV2(final String snapshotName, final String public SnapshotBuilder createSnapshotV2(final String snapshotName, final String tableName, final int numRegions) throws IOException { - return createSnapshot(snapshotName, tableName, numRegions, SnapshotManifestV2.DESCRIPTOR_VERSION); + return createSnapshot(snapshotName, tableName, numRegions, + SnapshotManifestV2.DESCRIPTOR_VERSION); } public SnapshotBuilder createSnapshotV2(final String snapshotName, final String tableName, final int numRegions, final long ttl) throws IOException { return createSnapshot(snapshotName, tableName, numRegions, - SnapshotManifestV2.DESCRIPTOR_VERSION, ttl); + SnapshotManifestV2.DESCRIPTOR_VERSION, ttl); } private SnapshotBuilder createSnapshot(final String snapshotName, final String tableName, @@ -661,10 +649,8 @@ private SnapshotBuilder createSnapshot(final String snapshotName, final String t RegionData[] regions = createTable(htd, numRegions); SnapshotProtos.SnapshotDescription desc = SnapshotProtos.SnapshotDescription.newBuilder() - .setTable(htd.getTableName().getNameAsString()) - .setName(snapshotName) - .setVersion(version) - .build(); + .setTable(htd.getTableName().getNameAsString()).setName(snapshotName).setVersion(version) + .build(); Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf); FileSystem workingFs = workingDir.getFileSystem(conf); @@ -677,12 +663,8 @@ private SnapshotBuilder createSnapshot(final String snapshotName, final String t TableDescriptor htd = createHtd(tableName); RegionData[] regions = createTable(htd, numRegions); SnapshotProtos.SnapshotDescription desc = SnapshotProtos.SnapshotDescription.newBuilder() - .setTable(htd.getTableName().getNameAsString()) - .setName(snapshotName) - .setVersion(version) - .setCreationTime(EnvironmentEdgeManager.currentTime()) - .setTtl(ttl) - .build(); + .setTable(htd.getTableName().getNameAsString()).setName(snapshotName).setVersion(version) + .setCreationTime(EnvironmentEdgeManager.currentTime()).setTtl(ttl).build(); Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf); SnapshotDescriptionUtils.writeSnapshotInfo(desc, workingDir, fs); return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions); @@ -690,8 +672,7 @@ private SnapshotBuilder createSnapshot(final String snapshotName, final String t public TableDescriptor createHtd(final String tableName) { return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); } private RegionData[] createTable(final TableDescriptor htd, final int nregions) @@ -706,10 +687,8 @@ private RegionData[] createTable(final TableDescriptor htd, final int nregions) byte[] endKey = Bytes.toBytes(1 + i * 2); // First region, simple with one plain hfile. - RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()) - .setStartKey(startKey) - .setEndKey(endKey) - .build(); + RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKey) + .setEndKey(endKey).build(); HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri); regions[i] = new RegionData(tableDir, hri, 3); for (int j = 0; j < regions[i].files.length; ++j) { @@ -723,18 +702,17 @@ private RegionData[] createTable(final TableDescriptor htd, final int nregions) endKey = Bytes.toBytes(3 + i * 2); hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri); - regions[i+1] = new RegionData(tableDir, hri, regions[i].files.length); + regions[i + 1] = new RegionData(tableDir, hri, regions[i].files.length); for (int j = 0; j < regions[i].files.length; ++j) { String refName = regions[i].files[j].getName() + '.' + regions[i].hri.getEncodedName(); Path refFile = createStoreFile(new Path(rootDir, refName)); - regions[i+1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile); + regions[i + 1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile); } } return regions; } - private Path createStoreFile(final Path storeFile) - throws IOException { + private Path createStoreFile(final Path storeFile) throws IOException { FSDataOutputStream out = fs.create(storeFile); try { out.write(Bytes.toBytes(storeFile.toString())); @@ -746,10 +724,9 @@ private Path createStoreFile(final Path storeFile) } // ========================================================================== - // Table Helpers + // Table Helpers // ========================================================================== - public static void waitForTableToBeOnline(final HBaseTestingUtil util, - final TableName tableName) + public static void waitForTableToBeOnline(final HBaseTestingUtil util, final TableName tableName) throws IOException, InterruptedException { HRegionServer rs = util.getRSForFirstRegionInTable(tableName); List onlineRegions = rs.getRegions(tableName); @@ -763,17 +740,15 @@ public static void waitForTableToBeOnline(final HBaseTestingUtil util, public static void createTable(final HBaseTestingUtil util, final TableName tableName, int regionReplication, int nRegions, final byte[]... families) throws IOException, InterruptedException { - TableDescriptorBuilder builder - = TableDescriptorBuilder - .newBuilder(tableName) - .setRegionReplication(regionReplication); + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication); for (byte[] family : families) { builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); } byte[][] splitKeys = getSplitKeys(nRegions); util.createTable(builder.build(), splitKeys); assertEquals((splitKeys.length + 1) * regionReplication, - util.getAdmin().getRegions(tableName).size()); + util.getAdmin().getRegions(tableName).size()); } public static byte[][] getSplitKeys() { @@ -782,7 +757,7 @@ public static byte[][] getSplitKeys() { public static byte[][] getSplitKeys(int nRegions) { nRegions = nRegions < KEYS.length ? nRegions : (KEYS.length - 1); - final byte[][] splitKeys = new byte[nRegions-1][]; + final byte[][] splitKeys = new byte[nRegions - 1][]; final int step = KEYS.length / nRegions; int keyIndex = 1; for (int i = 0; i < splitKeys.length; ++i) { @@ -798,7 +773,8 @@ public static void createTable(final HBaseTestingUtil util, final TableName tabl } public static void createTable(final HBaseTestingUtil util, final TableName tableName, - final int regionReplication, final byte[]... families) throws IOException, InterruptedException { + final int regionReplication, final byte[]... families) + throws IOException, InterruptedException { createTable(util, tableName, regionReplication, KEYS.length, families); } @@ -813,11 +789,11 @@ public static void loadData(final HBaseTestingUtil util, final TableName tableNa loadData(util, mutator, rows, families); } - public static void loadData(final HBaseTestingUtil util, final BufferedMutator mutator, - int rows, byte[]... families) throws IOException, InterruptedException { + public static void loadData(final HBaseTestingUtil util, final BufferedMutator mutator, int rows, + byte[]... families) throws IOException, InterruptedException { // Ensure one row per region assertTrue(rows >= KEYS.length); - for (byte k0: KEYS) { + for (byte k0 : KEYS) { byte[] k = new byte[] { k0 }; byte[] value = Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), k); byte[] key = Bytes.add(k, Bytes.toBytes(MD5Hash.getMD5AsHex(value))); @@ -830,8 +806,8 @@ public static void loadData(final HBaseTestingUtil util, final BufferedMutator m // Add other extra rows. more rows, more files while (rows-- > 0) { - byte[] value = Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), - Bytes.toBytes(rows)); + byte[] value = + Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), Bytes.toBytes(rows)); byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value)); final byte[][] families1 = families; final byte[] key1 = key; @@ -847,23 +823,21 @@ private static Put createPut(final byte[][] families, final byte[] key, final by byte[] q = Bytes.toBytes("q"); Put put = new Put(key); put.setDurability(Durability.SKIP_WAL); - for (byte[] family: families) { + for (byte[] family : families) { put.addColumn(family, q, value); } return put; } - public static void deleteAllSnapshots(final Admin admin) - throws IOException { + public static void deleteAllSnapshots(final Admin admin) throws IOException { // Delete all the snapshots - for (SnapshotDescription snapshot: admin.listSnapshots()) { + for (SnapshotDescription snapshot : admin.listSnapshots()) { admin.deleteSnapshot(snapshot.getName()); } SnapshotTestingUtils.assertNoSnapshots(admin); } - public static void deleteArchiveDirectory(final HBaseTestingUtil util) - throws IOException { + public static void deleteArchiveDirectory(final HBaseTestingUtil util) throws IOException { // Ensure the archiver to be empty MasterFileSystem mfs = util.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path archiveDir = new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestConcurrentFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestConcurrentFlushSnapshotFromClient.java index bdda640565ec..528b328222ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestConcurrentFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestConcurrentFlushSnapshotFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java index b7a99d40dbc8..bce62feb5c1b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java @@ -68,12 +68,11 @@ /** * Test creating/using/deleting snapshots from the client *

          - * This is an end-to-end test for the snapshot utility - * - * TODO This is essentially a clone of TestSnapshotFromClient. This is worth refactoring this - * because there will be a few more flavors of snapshots that need to run these tests. + * This is an end-to-end test for the snapshot utility TODO This is essentially a clone of + * TestSnapshotFromClient. This is worth refactoring this because there will be a few more flavors + * of snapshots that need to run these tests. */ -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestFlushSnapshotFromClient { @ClassRule @@ -109,7 +108,7 @@ protected static void setupConf(Configuration conf) { // Enable snapshot conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, - ConstantSizeRegionSplitPolicy.class.getName()); + ConstantSizeRegionSplitPolicy.class.getName()); } @Before @@ -171,7 +170,7 @@ public void testFlushTableSnapshot() throws Exception { ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM); } - /** + /** * Test snapshotting a table that is online without flushing */ @Test @@ -209,7 +208,6 @@ public void testSkipFlushTableSnapshot() throws Exception { SnapshotTestingUtils.assertNoSnapshots(admin); } - /** * Test simple flush snapshotting a table that is online */ @@ -229,15 +227,14 @@ public void testFlushTableSnapshotWithProcedure() throws Exception { byte[] snapshot = Bytes.toBytes(snapshotString); Map props = new HashMap<>(); props.put("table", TABLE_NAME.getNameAsString()); - admin.execProcedure(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION, - snapshotString, props); - + admin.execProcedure(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION, snapshotString, + props); LOG.debug("Snapshot completed."); // make sure we have the snapshot - List snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, - snapshot, TABLE_NAME); + List snapshots = + SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); // make sure its a valid snapshot LOG.debug("FS state after snapshot:"); @@ -285,7 +282,7 @@ public void testSnapshotFailsOnNonExistantTable() throws Exception { private static void waitForSnapshotToComplete(HMaster master, SnapshotProtos.SnapshotDescription snapshot, long timeoutNanos) throws Exception { final IsSnapshotDoneRequest request = - IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build(); + IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build(); long start = System.nanoTime(); while (System.nanoTime() - start < timeoutNanos) { try { @@ -314,8 +311,7 @@ public void testAsyncFlushSnapshot() throws Exception { .setType(SnapshotProtos.SnapshotDescription.Type.FLUSH).build(); // take the snapshot async - admin.snapshotAsync( - new SnapshotDescription("asyncSnapshot", TABLE_NAME, SnapshotType.FLUSH)); + admin.snapshotAsync(new SnapshotDescription("asyncSnapshot", TABLE_NAME, SnapshotType.FLUSH)); // constantly loop, looking for the snapshot to complete HMaster master = UTIL.getMiniHBaseCluster().getMaster(); @@ -356,9 +352,9 @@ public int compare(RegionInfo r1, RegionInfo r2) { int numRegions = admin.getRegions(TABLE_NAME).size(); int numRegionsAfterMerge = numRegions - 2; admin.mergeRegionsAsync(regions.get(1).getEncodedNameAsBytes(), - regions.get(2).getEncodedNameAsBytes(), true); + regions.get(2).getEncodedNameAsBytes(), true); admin.mergeRegionsAsync(regions.get(4).getEncodedNameAsBytes(), - regions.get(5).getEncodedNameAsBytes(), true); + regions.get(5).getEncodedNameAsBytes(), true); // Verify that there's one region less waitRegionsAfterMerge(numRegionsAfterMerge); @@ -398,9 +394,9 @@ public int compare(RegionInfo r1, RegionInfo r2) { int numRegions = admin.getRegions(TABLE_NAME).size(); int numRegionsAfterMerge = numRegions - 2; admin.mergeRegionsAsync(regions.get(1).getEncodedNameAsBytes(), - regions.get(2).getEncodedNameAsBytes(), true); + regions.get(2).getEncodedNameAsBytes(), true); admin.mergeRegionsAsync(regions.get(4).getEncodedNameAsBytes(), - regions.get(5).getEncodedNameAsBytes(), true); + regions.get(5).getEncodedNameAsBytes(), true); waitRegionsAfterMerge(numRegionsAfterMerge); assertEquals(numRegionsAfterMerge, admin.getRegions(TABLE_NAME).size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java index 431672ae19d4..ac7e3186aa13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,12 +35,11 @@ /** * Test creating/using/deleting snapshots from the client *

          - * This is an end-to-end test for the snapshot utility - * - * TODO This is essentially a clone of TestSnapshotFromClient. This is worth refactoring this - * because there will be a few more flavors of snapshots that need to run these tests. + * This is an end-to-end test for the snapshot utility TODO This is essentially a clone of + * TestSnapshotFromClient. This is worth refactoring this because there will be a few more flavors + * of snapshots that need to run these tests. */ -@Category({ClientTests.class, MediumTests.class}) +@Category({ ClientTests.class, MediumTests.class }) public class TestMobFlushSnapshotFromClient extends TestFlushSnapshotFromClient { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java index e9dced61aa64..e50cb0a1aca7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,12 +33,11 @@ import org.slf4j.LoggerFactory; /** - * Test clone/restore snapshots from the client - * - * TODO This is essentially a clone of TestRestoreSnapshotFromClient. This is worth refactoring - * this because there will be a few more flavors of snapshots that need to run these tests. + * Test clone/restore snapshots from the client TODO This is essentially a clone of + * TestRestoreSnapshotFromClient. This is worth refactoring this because there will be a few more + * flavors of snapshots that need to run these tests. */ -@Category({ClientTests.class,LargeTests.class}) +@Category({ ClientTests.class, LargeTests.class }) public class TestMobRestoreFlushSnapshotFromClient extends TestRestoreFlushSnapshotFromClient { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java index 10c67ada3207..a047a2f0a050 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java index fd1cc535e211..45dece9c0288 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ * Testing the region snapshot task on a cluster. * @see org.apache.hadoop.hbase.regionserver.snapshot.FlushSnapshotSubprocedure.RegionSnapshotTask */ -@Category({MediumTests.class, RegionServerTests.class}) +@Category({ MediumTests.class, RegionServerTests.class }) public class TestRegionSnapshotTask { @ClassRule @@ -98,13 +98,12 @@ public static void tearDown() throws Exception { } /** - * Tests adding a region to the snapshot manifest while compactions are running on the region. - * The idea is to slow down the process of adding a store file to the manifest while - * triggering compactions on the region, allowing the store files to be marked for archival while - * snapshot operation is running. - * This test checks for the correct behavior in such a case that the compacted files should - * not be moved around if a snapshot operation is in progress. - * See HBASE-18398 + * Tests adding a region to the snapshot manifest while compactions are running on the region. The + * idea is to slow down the process of adding a store file to the manifest while triggering + * compactions on the region, allowing the store files to be marked for archival while snapshot + * operation is running. This test checks for the correct behavior in such a case that the + * compacted files should not be moved around if a snapshot operation is in progress. See + * HBASE-18398 */ @Test public void testAddRegionWithCompactions() throws Exception { @@ -114,12 +113,9 @@ public void testAddRegionWithCompactions() throws Exception { List hRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName); final SnapshotProtos.SnapshotDescription snapshot = - SnapshotProtos.SnapshotDescription.newBuilder() - .setTable(tableName.getNameAsString()) - .setType(SnapshotProtos.SnapshotDescription.Type.FLUSH) - .setName("test_table_snapshot") - .setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION) - .build(); + SnapshotProtos.SnapshotDescription.newBuilder().setTable(tableName.getNameAsString()) + .setType(SnapshotProtos.SnapshotDescription.Type.FLUSH).setName("test_table_snapshot") + .setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION).build(); ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(snapshot.getName()); final HRegion region = spy(hRegions.get(0)); @@ -163,8 +159,8 @@ public void testAddRegionWithCompactions() throws Exception { SnapshotReferenceUtil.verifySnapshot(conf, fs, manifest); } - private void addRegionToSnapshot(SnapshotProtos.SnapshotDescription snapshot, - HRegion region, SnapshotManifest manifest) throws Exception { + private void addRegionToSnapshot(SnapshotProtos.SnapshotDescription snapshot, HRegion region, + SnapshotManifest manifest) throws Exception { LOG.info("Adding region to snapshot: " + region.getRegionInfo().getRegionNameAsString()); Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf); SnapshotManifest.RegionVisitor visitor = createRegionVisitorWithDelay(snapshot, workingDir); @@ -172,8 +168,8 @@ private void addRegionToSnapshot(SnapshotProtos.SnapshotDescription snapshot, LOG.info("Added the region to snapshot: " + region.getRegionInfo().getRegionNameAsString()); } - private SnapshotManifest.RegionVisitor createRegionVisitorWithDelay( - SnapshotProtos.SnapshotDescription desc, Path workingDir) { + private SnapshotManifest.RegionVisitor + createRegionVisitorWithDelay(SnapshotProtos.SnapshotDescription desc, Path workingDir) { return new SnapshotManifestV2.ManifestBuilder(conf, fs, workingDir) { @Override public void storeFile(final SnapshotProtos.SnapshotRegionManifest.Builder region, @@ -194,16 +190,13 @@ private Table setupTable(TableName tableName) throws Exception { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); // Flush many files, but do not compact immediately // Make sure that the region does not split - builder - .setMemStoreFlushSize(5000) + builder.setMemStoreFlushSize(5000) .setRegionSplitPolicyClassName(ConstantSizeRegionSplitPolicy.class.getName()) - .setMaxFileSize(100 * 1024 * 1024) - .setValue("hbase.hstore.compactionThreshold", "250"); + .setMaxFileSize(100 * 1024 * 1024).setValue("hbase.hstore.compactionThreshold", "250"); TableDescriptor td = builder.build(); byte[] fam = Bytes.toBytes("fam"); - Table table = TEST_UTIL.createTable(td, new byte[][] {fam}, - TEST_UTIL.getConfiguration()); + Table table = TEST_UTIL.createTable(td, new byte[][] { fam }, TEST_UTIL.getConfiguration()); TEST_UTIL.loadTable(table, fam); return table; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java index 3e9e8462ad1e..6b8207be7af3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,12 +43,11 @@ import org.slf4j.LoggerFactory; /** - * Test clone/restore snapshots from the client - * - * TODO This is essentially a clone of TestRestoreSnapshotFromClient. This is worth refactoring - * this because there will be a few more flavors of snapshots that need to run these tests. + * Test clone/restore snapshots from the client TODO This is essentially a clone of + * TestRestoreSnapshotFromClient. This is worth refactoring this because there will be a few more + * flavors of snapshots that need to run these tests. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestRestoreFlushSnapshotFromClient { @ClassRule @@ -80,8 +79,7 @@ protected static void setupConf(Configuration conf) { UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); UTIL.getConfiguration().setInt("hbase.client.pause", 250); UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); - UTIL.getConfiguration().setBoolean( - "hbase.master.enabletable.roundrobin", true); + UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true); // Enable snapshot UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); @@ -99,9 +97,9 @@ protected void createTable() throws Exception { } /** - * Initialize the tests with a table filled with some data - * and two snapshots (snapshotName0, snapshotName1) of different states. - * The tableName, snapshotNames and the number of rows in the snapshot are initialized. + * Initialize the tests with a table filled with some data and two snapshots (snapshotName0, + * snapshotName1) of different states. The tableName, snapshotNames and the number of rows in the + * snapshot are initialized. */ @Before public void setup() throws Exception { @@ -171,18 +169,17 @@ public void testRestoreSnapshot() throws IOException { verifyRowCount(UTIL, tableName, snapshot1Rows); } - @Test(expected=SnapshotDoesNotExistException.class) + @Test(expected = SnapshotDoesNotExistException.class) public void testCloneNonExistentSnapshot() throws IOException, InterruptedException { String snapshotName = "random-snapshot-" + EnvironmentEdgeManager.currentTime(); - TableName tableName = TableName.valueOf("random-table-" + - EnvironmentEdgeManager.currentTime()); + TableName tableName = TableName.valueOf("random-table-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName, tableName); } @Test public void testCloneSnapshot() throws IOException, InterruptedException { - TableName clonedTableName = TableName.valueOf("clonedtb-" + - EnvironmentEdgeManager.currentTime()); + TableName clonedTableName = + TableName.valueOf("clonedtb-" + EnvironmentEdgeManager.currentTime()); testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows); testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows); } @@ -198,8 +195,8 @@ private void testCloneSnapshot(final TableName tableName, final String snapshotN @Test public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException { - TableName clonedTableName = TableName.valueOf("clonedtb-" + - EnvironmentEdgeManager.currentTime()); + TableName clonedTableName = + TableName.valueOf("clonedtb-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName0, clonedTableName); verifyRowCount(UTIL, clonedTableName, snapshot0Rows); admin.snapshot(snapshotName2, clonedTableName, SnapshotType.FLUSH); @@ -211,7 +208,7 @@ public void testRestoreSnapshotOfCloned() throws IOException, InterruptedExcepti } // ========================================================================== - // Helpers + // Helpers // ========================================================================== private void logFSTree() throws IOException { UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java index 58023e139c46..796a8474d255 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,7 +64,7 @@ /** * Test the restore/clone operation from a file-system point of view. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestRestoreSnapshotHelper { @ClassRule @@ -154,7 +154,7 @@ public void testSkipReplayAndUpdateSeqId() throws Exception { Path restoreDir = new Path("/hbase/.tmp-restore/testScannerWithRestoreScanner2"); // restore snapshot. final RestoreSnapshotHelper.RestoreMetaChanges meta = - RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName); + RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName); TableDescriptor htd = meta.getTableDescriptor(); final List restoredRegions = meta.getRegionsToAdd(); for (RegionInfo restoredRegion : restoredRegions) { @@ -165,7 +165,7 @@ public void testSkipReplayAndUpdateSeqId() throws Exception { region.setRestoredRegion(true); region.initialize(); Path recoveredEdit = - CommonFSUtils.getWALRegionDir(conf, tableName, region.getRegionInfo().getEncodedName()); + CommonFSUtils.getWALRegionDir(conf, tableName, region.getRegionInfo().getEncodedName()); long maxSeqId = WALSplitUtil.getMaxRegionSequenceId(fs, recoveredEdit); // open restored region without set restored flag @@ -207,7 +207,8 @@ private boolean hasHFileLink(Path tableDir) throws IOException { return false; } - private void restoreAndVerify(final String snapshotName, final String tableName) throws IOException { + private void restoreAndVerify(final String snapshotName, final String tableName) + throws IOException { // Test Rolling-Upgrade like Snapshot. // half machines writing using v1 and the others using v2 format. SnapshotMock snapshotMock = createSnapshotMock(); @@ -226,10 +227,8 @@ private void restoreAndVerify(final String snapshotName, final String tableName) verifyRestore(rootDir, htd, htdClone); // Test clone a clone ("link to link") - SnapshotDescription cloneDesc = SnapshotDescription.newBuilder() - .setName("cloneSnapshot") - .setTable("testtb-clone") - .build(); + SnapshotDescription cloneDesc = + SnapshotDescription.newBuilder().setName("cloneSnapshot").setTable("testtb-clone").build(); Path cloneDir = CommonFSUtils.getTableDir(rootDir, htdClone.getTableName()); TableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2"); testRestore(cloneDir, cloneDesc, htdClone2); @@ -243,13 +242,14 @@ private void verifyRestore(final Path rootDir, final TableDescriptor sourceHtd, assertEquals(12, files.size()); for (int i = 0; i < files.size(); i += 2) { String linkFile = files.get(i); - String refFile = files.get(i+1); + String refFile = files.get(i + 1); assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile)); assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile)); assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile)); Path refPath = getReferredToFile(refFile); LOG.debug("get reference name for file " + refFile + " = " + refPath); - assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName())); + assertTrue(refPath.getName() + " should be a HFileLink", + HFileLink.isHFileLink(refPath.getName())); assertEquals(linkFile, refPath.getName()); } } @@ -282,8 +282,7 @@ private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path sn MonitoredTask status = Mockito.mock(MonitoredTask.class); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd); - return new RestoreSnapshotHelper(conf, fs, manifest, - htdClone, rootDir, monitor, status); + return new RestoreSnapshotHelper(conf, fs, manifest, htdClone, rootDir, monitor, status); } private Path getReferredToFile(final String referenceName) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java index a2d6501cbc3b..10dae3fcf9da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,8 @@ public class TestSnapshotClientRetries { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotClientRetries.class); - @Rule public TableNameTestRule testTable = new TableNameTestRule(); + @Rule + public TableNameTestRule testTable = new TableNameTestRule(); @Before public void setUp() throws Exception { @@ -67,7 +68,7 @@ public void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - @Test(expected=SnapshotExistsException.class) + @Test(expected = SnapshotExistsException.class) public void testSnapshotAlreadyExist() throws Exception { final String snapshotName = "testSnapshotAlreadyExist"; TEST_UTIL.createTable(testTable.getTableName(), "f"); @@ -75,7 +76,7 @@ public void testSnapshotAlreadyExist() throws Exception { snapshotAndAssertOneRetry(snapshotName, testTable.getTableName()); } - @Test(expected=SnapshotDoesNotExistException.class) + @Test(expected = SnapshotDoesNotExistException.class) public void testCloneNonExistentSnapshot() throws Exception { final String snapshotName = "testCloneNonExistentSnapshot"; cloneAndAssertOneRetry(snapshotName, testTable.getTableName()); @@ -126,7 +127,7 @@ public void cloneAndAssertOneRetry(final String snapshotName, final TableName ta } private MasterSyncObserver getMasterSyncObserver() { - return (MasterSyncObserver)TEST_UTIL.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + return (MasterSyncObserver) TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost() + .findCoprocessor(MasterSyncObserver.class.getName()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java index a7b8c8e7269f..e9dcb2cbcc2c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescriptionUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -38,12 +39,13 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; /** * Test that the {@link SnapshotDescription} helper is helping correctly. */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestSnapshotDescriptionUtils { @ClassRule @@ -99,8 +101,9 @@ public void testCompleteSnapshotWithNoSnapshotDirectoryFailure() throws Exceptio Path workingDir = new Path(tmpDir, "not_a_snapshot"); Configuration conf = new Configuration(); FileSystem workingFs = workingDir.getFileSystem(conf); - assertFalse("Already have working snapshot dir: " + workingDir - + " but shouldn't. Test file leak?", fs.exists(workingDir)); + assertFalse( + "Already have working snapshot dir: " + workingDir + " but shouldn't. Test file leak?", + fs.exists(workingDir)); SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build(); Path finishedDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, snapshotDir); @@ -117,25 +120,25 @@ public void testIsSubDirectoryWorks() { Path rootDir = new Path("hdfs://root/.hbase-snapshot/"); assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(rootDir, rootDir)); - assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf( - new Path("hdfs://root/.hbase-snapshotdir"), rootDir)); - assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf( - new Path("hdfs://root/.hbase-snapshot"), rootDir)); - assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf( - new Path("hdfs://.hbase-snapshot"), rootDir)); - assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf( - new Path("hdfs://.hbase-snapshot/.tmp"), rootDir)); + assertFalse(SnapshotDescriptionUtils + .isSubDirectoryOf(new Path("hdfs://root/.hbase-snapshotdir"), rootDir)); + assertFalse( + SnapshotDescriptionUtils.isSubDirectoryOf(new Path("hdfs://root/.hbase-snapshot"), rootDir)); + assertFalse( + SnapshotDescriptionUtils.isSubDirectoryOf(new Path("hdfs://.hbase-snapshot"), rootDir)); + assertFalse( + SnapshotDescriptionUtils.isSubDirectoryOf(new Path("hdfs://.hbase-snapshot/.tmp"), rootDir)); assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(new Path("hdfs://root"), rootDir)); - assertTrue(SnapshotDescriptionUtils.isSubDirectoryOf( - new Path("hdfs://root/.hbase-snapshot/.tmp"), rootDir)); - assertTrue(SnapshotDescriptionUtils.isSubDirectoryOf( - new Path("hdfs://root/.hbase-snapshot/.tmp/snapshot"), rootDir)); + assertTrue(SnapshotDescriptionUtils + .isSubDirectoryOf(new Path("hdfs://root/.hbase-snapshot/.tmp"), rootDir)); + assertTrue(SnapshotDescriptionUtils + .isSubDirectoryOf(new Path("hdfs://root/.hbase-snapshot/.tmp/snapshot"), rootDir)); - assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf( - new Path("s3://root/.hbase-snapshot/"), rootDir)); + assertFalse( + SnapshotDescriptionUtils.isSubDirectoryOf(new Path("s3://root/.hbase-snapshot/"), rootDir)); assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(new Path("s3://root"), rootDir)); - assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf( - new Path("s3://root/.hbase-snapshot/.tmp/snapshot"), rootDir)); + assertFalse(SnapshotDescriptionUtils + .isSubDirectoryOf(new Path("s3://root/.hbase-snapshot/.tmp/snapshot"), rootDir)); } @Test @@ -143,48 +146,48 @@ public void testIsWithinWorkingDir() throws IOException { Configuration conf = new Configuration(); conf.set(HConstants.HBASE_DIR, "hdfs://localhost/root/"); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("hdfs://localhost/root/"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("hdfs://localhost/root/.hbase-snapshotdir"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("hdfs://localhost/root/.hbase-snapshot"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("hdfs://localhost/.hbase-snapshot"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("hdfs://localhost/.hbase-snapshot/.tmp"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("hdfs://localhost/root"), conf)); - assertTrue(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("hdfs://localhost/root/.hbase-snapshot/.tmp"), conf)); + assertFalse( + SnapshotDescriptionUtils.isWithinDefaultWorkingDir(new Path("hdfs://localhost/root/"), conf)); + assertFalse(SnapshotDescriptionUtils + .isWithinDefaultWorkingDir(new Path("hdfs://localhost/root/.hbase-snapshotdir"), conf)); + assertFalse(SnapshotDescriptionUtils + .isWithinDefaultWorkingDir(new Path("hdfs://localhost/root/.hbase-snapshot"), conf)); + assertFalse(SnapshotDescriptionUtils + .isWithinDefaultWorkingDir(new Path("hdfs://localhost/.hbase-snapshot"), conf)); + assertFalse(SnapshotDescriptionUtils + .isWithinDefaultWorkingDir(new Path("hdfs://localhost/.hbase-snapshot/.tmp"), conf)); + assertFalse( + SnapshotDescriptionUtils.isWithinDefaultWorkingDir(new Path("hdfs://localhost/root"), conf)); + assertTrue(SnapshotDescriptionUtils + .isWithinDefaultWorkingDir(new Path("hdfs://localhost/root/.hbase-snapshot/.tmp"), conf)); assertTrue(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("hdfs://localhost/root/.hbase-snapshot/.tmp/snapshot"), conf)); + new Path("hdfs://localhost/root/.hbase-snapshot/.tmp/snapshot"), conf)); + assertFalse(SnapshotDescriptionUtils + .isWithinDefaultWorkingDir(new Path("s3://localhost/root/.hbase-snapshot/"), conf)); + assertFalse( + SnapshotDescriptionUtils.isWithinDefaultWorkingDir(new Path("s3://localhost/root"), conf)); assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("s3://localhost/root/.hbase-snapshot/"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("s3://localhost/root"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("s3://localhost/root/.hbase-snapshot/.tmp/snapshot"), conf)); + new Path("s3://localhost/root/.hbase-snapshot/.tmp/snapshot"), conf)); // for local mode conf = HBaseConfiguration.create(); String hbsaeDir = conf.get(HConstants.HBASE_DIR); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("file:" + hbsaeDir + "/"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("file:" + hbsaeDir + "/.hbase-snapshotdir"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("file:" + hbsaeDir + "/.hbase-snapshot"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("file:/.hbase-snapshot"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("file:/.hbase-snapshot/.tmp"), conf)); - assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("file:" + hbsaeDir), conf)); - assertTrue(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( - new Path("file:" + hbsaeDir + "/.hbase-snapshot/.tmp"), conf)); + assertFalse( + SnapshotDescriptionUtils.isWithinDefaultWorkingDir(new Path("file:" + hbsaeDir + "/"), conf)); + assertFalse(SnapshotDescriptionUtils + .isWithinDefaultWorkingDir(new Path("file:" + hbsaeDir + "/.hbase-snapshotdir"), conf)); + assertFalse(SnapshotDescriptionUtils + .isWithinDefaultWorkingDir(new Path("file:" + hbsaeDir + "/.hbase-snapshot"), conf)); + assertFalse( + SnapshotDescriptionUtils.isWithinDefaultWorkingDir(new Path("file:/.hbase-snapshot"), conf)); + assertFalse(SnapshotDescriptionUtils + .isWithinDefaultWorkingDir(new Path("file:/.hbase-snapshot/.tmp"), conf)); + assertFalse( + SnapshotDescriptionUtils.isWithinDefaultWorkingDir(new Path("file:" + hbsaeDir), conf)); + assertTrue(SnapshotDescriptionUtils + .isWithinDefaultWorkingDir(new Path("file:" + hbsaeDir + "/.hbase-snapshot/.tmp"), conf)); assertTrue(SnapshotDescriptionUtils.isWithinDefaultWorkingDir( new Path("file:" + hbsaeDir + "/.hbase-snapshot/.tmp/snapshot"), conf)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java index 775728f4d028..de2d4d674ee6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotManifest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestSnapshotManifest { @ClassRule @@ -79,7 +79,7 @@ public void setup() throws Exception { conf = TEST_UTIL.getConfiguration(); SnapshotTestingUtils.SnapshotMock snapshotMock = - new SnapshotTestingUtils.SnapshotMock(conf, fs, rootDir); + new SnapshotTestingUtils.SnapshotMock(conf, fs, rootDir); builder = snapshotMock.createSnapshotV2("snapshot", TABLE_NAME_STR, 0); snapshotDir = builder.commit(); snapshotDesc = builder.getSnapshotDescription(); @@ -87,7 +87,7 @@ public void setup() throws Exception { @After public void tearDown() throws Exception { - fs.delete(rootDir,true); + fs.delete(rootDir, true); } @Test @@ -128,8 +128,7 @@ public void testReadSnapshotRegionManifest() throws IOException { } private Path createDataManifest() throws IOException { - SnapshotDataManifest.Builder dataManifestBuilder = - SnapshotDataManifest.newBuilder(); + SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder(); byte[] startKey = null; byte[] stopKey = null; for (int i = 1; i <= TEST_NUM_REGIONS; i++) { @@ -138,7 +137,7 @@ private Path createDataManifest() throws IOException { SnapshotRegionManifest.Builder dataRegionManifestBuilder = SnapshotRegionManifest.newBuilder(); - for (ColumnFamilyDescriptor hcd: builder.getTableDescriptor().getColumnFamilies()) { + for (ColumnFamilyDescriptor hcd : builder.getTableDescriptor().getColumnFamilies()) { SnapshotRegionManifest.FamilyFiles.Builder family = SnapshotRegionManifest.FamilyFiles.newBuilder(); family.setFamilyName(UnsafeByteOperations.unsafeWrap(hcd.getName())); @@ -158,8 +157,7 @@ private Path createDataManifest() throws IOException { startKey = stopKey; } - dataManifestBuilder - .setTableSchema(ProtobufUtil.toTableSchema(builder.getTableDescriptor())); + dataManifestBuilder.setTableSchema(ProtobufUtil.toTableSchema(builder.getTableDescriptor())); SnapshotDataManifest dataManifest = dataManifestBuilder.build(); return writeDataManifest(dataManifest); @@ -172,13 +170,13 @@ private Path createRegionManifest() throws IOException { SnapshotRegionManifest.Builder dataRegionManifestBuilder = SnapshotRegionManifest.newBuilder(); dataRegionManifestBuilder.setRegionInfo(ProtobufUtil.toRegionInfo(regionInfo)); - for (ColumnFamilyDescriptor hcd: builder.getTableDescriptor().getColumnFamilies()) { + for (ColumnFamilyDescriptor hcd : builder.getTableDescriptor().getColumnFamilies()) { SnapshotRegionManifest.FamilyFiles.Builder family = SnapshotRegionManifest.FamilyFiles.newBuilder(); family.setFamilyName(UnsafeByteOperations.unsafeWrap(hcd.getName())); for (int j = 0; j < TEST_NUM_REGIONFILES; ++j) { SnapshotRegionManifest.StoreFile.Builder sfManifest = - SnapshotRegionManifest.StoreFile.newBuilder(); + SnapshotRegionManifest.StoreFile.newBuilder(); sfManifest.setName(String.format("%064d", j)); sfManifest.setFileSize(j * 1024); family.addStoreFiles(sfManifest.build()); @@ -200,8 +198,7 @@ private Path createRegionManifest() throws IOException { return regionPath; } - private Path writeDataManifest(final SnapshotDataManifest manifest) - throws IOException { + private Path writeDataManifest(final SnapshotDataManifest manifest) throws IOException { Path dataRegionPath = new Path(snapshotDir, SnapshotManifest.DATA_MANIFEST_NAME); FSDataOutputStream stream = fs.create(dataRegionPath); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotStoreFileSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotStoreFileSize.java index 8f8905fdf4a9..90c11c6d7e31 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotStoreFileSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotStoreFileSize.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -48,8 +50,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * Validate if storefile length match - * both snapshop manifest and filesystem. + * Validate if storefile length match both snapshop manifest and filesystem. */ @Category({ MasterTests.class, MediumTests.class }) public class TestSnapshotStoreFileSize { @@ -91,8 +92,8 @@ public void testIsStoreFileSizeMatchFilesystemAndManifest() throws IOException { Map storeFileInfoFromFS = new HashMap(); String storeFileName = ""; long storeFilesize = 0L; - Path snapshotDir = SnapshotDescriptionUtils - .getCompletedSnapshotDir(SNAPSHOT_NAME, UTIL.getDefaultRootDirPath()); + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(SNAPSHOT_NAME, + UTIL.getDefaultRootDirPath()); SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotManifest snaphotManifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); List regionManifest = snaphotManifest.getRegionManifests(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotWhenChoreCleaning.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotWhenChoreCleaning.java index 6414b9803feb..4f7ece2b5f3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotWhenChoreCleaning.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotWhenChoreCleaning.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.IOException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/MapreduceTestingShim.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/MapreduceTestingShim.java index 54b64282f02e..838ce8d301de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/MapreduceTestingShim.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/MapreduceTestingShim.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MiniMRCluster; @@ -30,10 +29,8 @@ import org.apache.hadoop.mapreduce.JobID; /** - * This class provides shims for HBase to interact with the Hadoop 1.0.x and the - * Hadoop 0.23.x series. - * - * NOTE: No testing done against 0.22.x, or 0.21.x. + * This class provides shims for HBase to interact with the Hadoop 1.0.x and the Hadoop 0.23.x + * series. NOTE: No testing done against 0.22.x, or 0.21.x. */ abstract public class MapreduceTestingShim { private static MapreduceTestingShim instance; @@ -42,16 +39,14 @@ abstract public class MapreduceTestingShim { static { try { // This class exists in hadoop 0.22+ but not in Hadoop 20.x/1.x - Class c = Class - .forName("org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl"); + Class c = Class.forName("org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl"); instance = new MapreduceV2Shim(); } catch (Exception e) { instance = new MapreduceV1Shim(); } } - abstract public JobContext newJobContext(Configuration jobConf) - throws IOException; + abstract public JobContext newJobContext(Configuration jobConf) throws IOException; abstract public Job newJob(Configuration conf) throws IOException; @@ -59,8 +54,7 @@ abstract public JobContext newJobContext(Configuration jobConf) abstract public String obtainMROutputDirProp(); - public static JobContext createJobContext(Configuration jobConf) - throws IOException { + public static JobContext createJobContext(Configuration jobConf) throws IOException { return instance.newJobContext(jobConf); } @@ -101,8 +95,7 @@ public Job newJob(Configuration conf) throws IOException { c = Job.class.getConstructor(Configuration.class); return c.newInstance(conf); } catch (Exception e) { - throw new IllegalStateException( - "Failed to instantiate new Job(conf)", e); + throw new IllegalStateException("Failed to instantiate new Job(conf)", e); } } @@ -112,9 +105,9 @@ public JobConf obtainJobConf(MiniMRCluster cluster) { try { Object runner = cluster.getJobTrackerRunner(); Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam); - Object tracker = meth.invoke(runner, new Object []{}); + Object tracker = meth.invoke(runner, new Object[] {}); Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam); - return (JobConf) m.invoke(tracker, new Object []{}); + return (JobConf) m.invoke(tracker, new Object[] {}); } catch (NoSuchMethodException nsme) { return null; } catch (InvocationTargetException ite) { @@ -145,8 +138,7 @@ public Job newJob(Configuration jobConf) { return (Job) m.invoke(null, jobConf); // static method, then arg } catch (Exception e) { e.printStackTrace(); - throw new IllegalStateException( - "Failed to return from Job.getInstance(jobConf)"); + throw new IllegalStateException("Failed to return from Job.getInstance(jobConf)"); } } @@ -154,7 +146,7 @@ public Job newJob(Configuration jobConf) { public JobConf obtainJobConf(MiniMRCluster cluster) { try { Method meth = MiniMRCluster.class.getMethod("getJobTrackerConf", emptyParam); - return (JobConf) meth.invoke(cluster, new Object []{}); + return (JobConf) meth.invoke(cluster, new Object[] {}); } catch (NoSuchMethodException nsme) { return null; } catch (InvocationTargetException ite) { @@ -167,7 +159,7 @@ public JobConf obtainJobConf(MiniMRCluster cluster) { @Override public String obtainMROutputDirProp() { // This is a copy of o.a.h.mapreduce.lib.output.FileOutputFormat.OUTDIR - // from Hadoop 0.23.x. If we use the source directly we break the hadoop 1.x compile. + // from Hadoop 0.23.x. If we use the source directly we break the hadoop 1.x compile. return "mapreduce.output.fileoutputformat.outputdir"; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java index ffc09bccd6be..fc5119a8ebb3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFiles.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -81,7 +81,7 @@ public class TestBulkLoadHFiles { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBulkLoadHFiles.class); + HBaseClassTestRule.forClass(TestBulkLoadHFiles.class); @Rule public TestName tn = new TestName(); @@ -94,7 +94,7 @@ public class TestBulkLoadHFiles { static final int MAX_FILES_PER_REGION_PER_FAMILY = 4; private static final byte[][] SPLIT_KEYS = - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ppp") }; + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ppp") }; static HBaseTestingUtil util = new HBaseTestingUtil(); @@ -124,7 +124,7 @@ public static void tearDownAfterClass() throws Exception { public void testSimpleLoadWithMap() throws Exception { runTest("testSimpleLoadWithMap", BloomType.NONE, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, }, true); } @@ -135,7 +135,7 @@ public void testSimpleLoadWithMap() throws Exception { public void testSimpleLoad() throws Exception { runTest("testSimpleLoad", BloomType.NONE, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, }); + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, }); } @Test @@ -144,7 +144,7 @@ public void testSimpleLoadWithFileCopy() throws Exception { final byte[] TABLE_NAME = Bytes.toBytes("mytable_" + testName); runTest(testName, buildHTD(TableName.valueOf(TABLE_NAME), BloomType.NONE), false, null, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, }, false, true, 2); } @@ -155,7 +155,7 @@ public void testSimpleLoadWithFileCopy() throws Exception { public void testRegionCrossingLoad() throws Exception { runTest("testRegionCrossingLoad", BloomType.NONE, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") }, - new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, }); + new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, }); } /** @@ -165,7 +165,7 @@ public void testRegionCrossingLoad() throws Exception { public void testRegionCrossingRowBloom() throws Exception { runTest("testRegionCrossingLoadRowBloom", BloomType.ROW, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") }, - new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, }); + new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, }); } /** @@ -175,7 +175,7 @@ public void testRegionCrossingRowBloom() throws Exception { public void testRegionCrossingRowColBloom() throws Exception { runTest("testRegionCrossingLoadRowColBloom", BloomType.ROWCOL, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") }, - new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, }); + new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, }); } /** @@ -186,9 +186,9 @@ public void testRegionCrossingRowColBloom() throws Exception { public void testSimpleHFileSplit() throws Exception { runTest("testHFileSplit", BloomType.NONE, new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("fff"), Bytes.toBytes("jjj"), - Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), }, + Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), }, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("lll") }, - new byte[][] { Bytes.toBytes("mmm"), Bytes.toBytes("zzz") }, }); + new byte[][] { Bytes.toBytes("mmm"), Bytes.toBytes("zzz") }, }); } /** @@ -222,27 +222,27 @@ public void testRegionCrossingHFileSplitRowColBloom() throws Exception { public void testSplitALot() throws Exception { runTest("testSplitALot", BloomType.NONE, new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), - Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), Bytes.toBytes("ggg"), - Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), - Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), - Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), - Bytes.toBytes("vvv"), Bytes.toBytes("zzz"), }, + Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), Bytes.toBytes("ggg"), + Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), + Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), + Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), + Bytes.toBytes("vvv"), Bytes.toBytes("zzz"), }, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("zzz") }, }); } private void testRegionCrossingHFileSplit(BloomType bloomType) throws Exception { runTest("testHFileSplit" + bloomType + "Bloom", bloomType, new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("fff"), Bytes.toBytes("jjj"), - Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), }, + Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), }, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") }, - new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, }); + new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, }); } private TableDescriptor buildHTD(TableName tableName, BloomType bloomType) { return TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBloomFilterType(bloomType).build()) - .build(); + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBloomFilterType(bloomType).build()) + .build(); } private void runTest(String testName, BloomType bloomType, byte[][][] hfileRanges) @@ -387,15 +387,15 @@ public static int loadHFiles(String testName, TableDescriptor htd, HBaseTestingU } private void runTest(String testName, TableDescriptor htd, boolean preCreateTable, - byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap, boolean copyFiles, int depth) - throws Exception { + byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap, boolean copyFiles, int depth) + throws Exception { loadHFiles(testName, htd, util, FAMILY, QUALIFIER, preCreateTable, tableSplitKeys, hfileRanges, useMap, true, copyFiles, 0, 1000, depth); final TableName tableName = htd.getTableName(); // verify staging folder has been cleaned up Path stagingBasePath = new Path(CommonFSUtils.getRootDir(util.getConfiguration()), - HConstants.BULKLOAD_STAGING_DIR_NAME); + HConstants.BULKLOAD_STAGING_DIR_NAME); FileSystem fs = util.getTestFileSystem(); if (fs.exists(stagingBasePath)) { FileStatus[] files = fs.listStatus(stagingBasePath); @@ -421,7 +421,7 @@ public void testTagsSurviveBulkLoadSplit() throws Exception { Path familyDir = new Path(dir, Bytes.toString(FAMILY)); // table has these split points byte[][] tableSplitKeys = new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("fff"), - Bytes.toBytes("jjj"), Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), }; + Bytes.toBytes("jjj"), Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), }; // creating an hfile that has values that span the split points. byte[] from = Bytes.toBytes("ddd"); @@ -454,16 +454,16 @@ public void testTagsSurviveBulkLoadSplit() throws Exception { public void testNonexistentColumnFamilyLoad() throws Exception { String testName = tn.getMethodName(); byte[][][] hFileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("ccc") }, - new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, }; + new byte[][][] { new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("ccc") }, + new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") }, }; byte[] TABLE = Bytes.toBytes("mytable_" + testName); // set real family name to upper case in purpose to simulate the case that // family name in HFiles is invalid TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder - .of(Bytes.toBytes(new String(FAMILY).toUpperCase(Locale.ROOT)))) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder + .of(Bytes.toBytes(new String(FAMILY).toUpperCase(Locale.ROOT)))) + .build(); try { runTest(testName, htd, true, SPLIT_KEYS, hFileRanges, false, false, 2); @@ -473,8 +473,8 @@ public void testNonexistentColumnFamilyLoad() throws Exception { // further check whether the exception message is correct String errMsg = e.getMessage(); assertTrue( - "Incorrect exception message, expected message: [" + EXPECTED_MSG_FOR_NON_EXISTING_FAMILY + - "], current message: [" + errMsg + "]", + "Incorrect exception message, expected message: [" + EXPECTED_MSG_FOR_NON_EXISTING_FAMILY + + "], current message: [" + errMsg + "]", errMsg.contains(EXPECTED_MSG_FOR_NON_EXISTING_FAMILY)); } } @@ -592,7 +592,7 @@ private void testSplitStoreFileWithDifferentEncoding(DataBlockEncoding bulkloadE FileSystem fs = util.getTestFileSystem(); Path testIn = new Path(dir, "testhfile"); ColumnFamilyDescriptor familyDesc = - ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setDataBlockEncoding(cfEncoding).build(); + ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setDataBlockEncoding(cfEncoding).build(); HFileTestUtil.createHFileWithDataBlockEncoding(util.getConfiguration(), fs, testIn, bulkloadEncoding, FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000); @@ -610,7 +610,7 @@ private void testSplitStoreFileWithDifferentEncoding(DataBlockEncoding bulkloadE private int verifyHFile(Path p) throws IOException { Configuration conf = util.getConfiguration(); HFile.Reader reader = - HFile.createReader(p.getFileSystem(conf), p, new CacheConfig(conf), true, conf); + HFile.createReader(p.getFileSystem(conf), p, new CacheConfig(conf), true, conf); HFileScanner scanner = reader.getScanner(conf, false, false); scanner.seekTo(); int count = 0; @@ -708,11 +708,11 @@ public void testLoadTooMayHFiles() throws Exception { try { BulkLoadHFiles.create(util.getConfiguration()) - .bulkLoad(TableName.valueOf("mytable_testLoadTooMayHFiles"), dir); + .bulkLoad(TableName.valueOf("mytable_testLoadTooMayHFiles"), dir); fail("Bulk loading too many files should fail"); } catch (IOException ie) { assertTrue(ie.getMessage() - .contains("Trying to load more than " + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles")); + .contains("Trying to load more than " + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles")); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java index 7c5f1139ff0d..59509c6188c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -86,7 +86,7 @@ public class TestBulkLoadHFilesSplitRecovery { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBulkLoadHFilesSplitRecovery.class); + HBaseClassTestRule.forClass(TestBulkLoadHFilesSplitRecovery.class); private static final Logger LOG = LoggerFactory.getLogger(TestHRegionServerBulkLoad.class); @@ -134,7 +134,7 @@ public static void buildHFiles(FileSystem fs, Path dir, int value) throws IOExce private TableDescriptor createTableDesc(TableName name, int cfs) { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(name); IntStream.range(0, cfs).mapToObj(i -> ColumnFamilyDescriptorBuilder.of(family(i))) - .forEachOrdered(builder::setColumnFamily); + .forEachOrdered(builder::setColumnFamily); return builder.build(); } @@ -249,7 +249,7 @@ void assertExpectedTable(TableName table, int count, int value) throws IOExcepti int i = 0; for (Result r; (r = sr.next()) != null;) { r.getNoVersionMap().values().stream().flatMap(m -> m.values().stream()) - .forEach(v -> assertArrayEquals(value(value), v)); + .forEach(v -> assertArrayEquals(value(value), v)); i++; } assertEquals(count, i); @@ -266,9 +266,8 @@ private static CompletableFuture failedFuture(Throwable error) { private static AsyncClusterConnection mockAndInjectError(AsyncClusterConnection conn) { AsyncClusterConnection errConn = spy(conn); - doReturn(failedFuture(new IOException("injecting bulk load error"))).when(errConn) - .bulkLoad(any(), anyList(), any(), anyBoolean(), any(), any(), anyBoolean(), anyList(), - anyBoolean()); + doReturn(failedFuture(new IOException("injecting bulk load error"))).when(errConn).bulkLoad( + any(), anyList(), any(), anyBoolean(), any(), any(), anyBoolean(), anyList(), anyBoolean()); return errConn; } @@ -289,7 +288,7 @@ protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, Deque queue, Multimap regionGroups, boolean copyFiles, Map item2RegionMap) throws IOException { AsyncClusterConnection c = - attemptedCalls.incrementAndGet() == 1 ? mockAndInjectError(conn) : conn; + attemptedCalls.incrementAndGet() == 1 ? mockAndInjectError(conn) : conn; super.bulkLoadPhase(c, tableName, queue, regionGroups, copyFiles, item2RegionMap); } }; @@ -392,7 +391,7 @@ protected Pair, String> groupOrSplit(AsyncClusterConnection TableName tableName, Multimap regionGroups, LoadQueueItem item, List> startEndKeys) throws IOException { Pair, String> lqis = - super.groupOrSplit(conn, tableName, regionGroups, item, startEndKeys); + super.groupOrSplit(conn, tableName, regionGroups, item, startEndKeys); if (lqis != null && lqis.getFirst() != null) { countedLqis.addAndGet(lqis.getFirst().size()); } @@ -442,8 +441,8 @@ protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, public void testSplitTmpFileCleanUp() throws Exception { final TableName table = TableName.valueOf(name.getMethodName()); byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000010"), - Bytes.toBytes("row_00000020"), Bytes.toBytes("row_00000030"), Bytes.toBytes("row_00000040"), - Bytes.toBytes("row_00000050") }; + Bytes.toBytes("row_00000020"), Bytes.toBytes("row_00000030"), Bytes.toBytes("row_00000040"), + Bytes.toBytes("row_00000050") }; setupTableWithSplitkeys(table, 10, SPLIT_KEYS); BulkLoadHFiles loader = BulkLoadHFiles.create(util.getConfiguration()); @@ -494,10 +493,10 @@ protected Pair, String> groupOrSplit(AsyncClusterConnection } /** - * We are testing a split after initial validation but before the atomic bulk load call. - * We cannot use presplitting to test this path, so we actually inject a - * split just before the atomic region load. However, we will pass null item2RegionMap - * and that should not affect the bulk load behavior. + * We are testing a split after initial validation but before the atomic bulk load call. We cannot + * use presplitting to test this path, so we actually inject a split just before the atomic region + * load. However, we will pass null item2RegionMap and that should not affect the bulk load + * behavior. */ @Test public void testSplitWhileBulkLoadPhaseWithoutItemMap() throws Exception { @@ -513,9 +512,9 @@ public void testSplitWhileBulkLoadPhaseWithoutItemMap() throws Exception { @Override protected void bulkLoadPhase(final AsyncClusterConnection conn, final TableName tableName, - final Deque queue, final Multimap regionGroups, - final boolean copyFiles, - final Map item2RegionMap) throws IOException { + final Deque queue, final Multimap regionGroups, + final boolean copyFiles, final Map item2RegionMap) + throws IOException { int i = attemptedCalls.incrementAndGet(); if (i == 1) { @@ -541,7 +540,6 @@ protected void bulkLoadPhase(final AsyncClusterConnection conn, final TableName assertExpectedTable(table, ROWCOUNT, 2); } - /** * Checks that all columns have the expected value and that there is the expected number of rows. */ @@ -553,7 +551,7 @@ void assertExpectedTable(final Connection connection, TableName table, int count int i = 0; for (Result r; (r = sr.next()) != null;) { r.getNoVersionMap().values().stream().flatMap(m -> m.values().stream()) - .forEach(v -> assertArrayEquals(value(value), v)); + .forEach(v -> assertArrayEquals(value(value), v)); i++; } assertEquals(count, i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryStatusServlet.java index 56c02a52fad0..d8985c70256a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryStatusServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -31,24 +31,21 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category({ SmallTests.class }) public class TestCanaryStatusServlet { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCanaryStatusServlet.class); + HBaseClassTestRule.forClass(TestCanaryStatusServlet.class); @Test public void testFailures() throws IOException { CanaryTool.RegionStdOutSink regionStdOutSink = new CanaryTool.RegionStdOutSink(); - ServerName serverName1 = ServerName.valueOf("staging-st04.server:22600", - 1584180761635L); + ServerName serverName1 = ServerName.valueOf("staging-st04.server:22600", 1584180761635L); TableName fakeTableName1 = TableName.valueOf("fakeTableName1"); RegionInfo regionInfo1 = RegionInfoBuilder.newBuilder(fakeTableName1).build(); - ServerName serverName2 = ServerName.valueOf("staging-st05.server:22600", - 1584180761636L); + ServerName serverName2 = ServerName.valueOf("staging-st05.server:22600", 1584180761636L); TableName fakeTableName2 = TableName.valueOf("fakeTableName2"); RegionInfo regionInfo2 = RegionInfoBuilder.newBuilder(fakeTableName2).build(); @@ -69,8 +66,7 @@ public void testFailures() throws IOException { public void testReadFailuresOnly() throws IOException { CanaryTool.RegionStdOutSink regionStdOutSink = new CanaryTool.RegionStdOutSink(); - ServerName serverName1 = ServerName.valueOf("staging-st04.server:22600", - 1584180761635L); + ServerName serverName1 = ServerName.valueOf("staging-st04.server:22600", 1584180761635L); TableName fakeTableName1 = TableName.valueOf("fakeTableName1"); RegionInfo regionInfo1 = RegionInfoBuilder.newBuilder(fakeTableName1).build(); @@ -87,8 +83,7 @@ public void testReadFailuresOnly() throws IOException { public void testWriteFailuresOnly() throws IOException { CanaryTool.RegionStdOutSink regionStdOutSink = new CanaryTool.RegionStdOutSink(); - ServerName serverName2 = ServerName.valueOf("staging-st05.server:22600", - 1584180761636L); + ServerName serverName2 = ServerName.valueOf("staging-st05.server:22600", 1584180761636L); TableName fakeTableName2 = TableName.valueOf("fakeTableName2"); RegionInfo regionInfo2 = RegionInfoBuilder.newBuilder(fakeTableName2).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java index 2de5575f8d2a..4fac7ad1449b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java @@ -66,7 +66,7 @@ public class TestCanaryTool { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCanaryTool.class); + HBaseClassTestRule.forClass(TestCanaryTool.class); private HBaseTestingUtil testingUtility; private static final byte[] FAMILY = Bytes.toBytes("f"); @@ -85,14 +85,14 @@ public void setUp() throws Exception { when(mockAppender.getName()).thenReturn("mockAppender"); when(mockAppender.isStarted()).thenReturn(true); ((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager - .getLogger("org.apache.hadoop.hbase")).addAppender(mockAppender); + .getLogger("org.apache.hadoop.hbase")).addAppender(mockAppender); } @After public void tearDown() throws Exception { testingUtility.shutdownMiniCluster(); ((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager - .getLogger("org.apache.hadoop.hbase")).removeAppender(mockAppender); + .getLogger("org.apache.hadoop.hbase")).removeAppender(mockAppender); } @Test @@ -104,7 +104,7 @@ public void testBasicZookeeperCanaryWorks() throws Exception { @Test public void testZookeeperCanaryPermittedFailuresArgumentWorks() throws Exception { final String[] args = - { "-t", "10000", "-zookeeper", "-treatFailureAsError", "-permittedZookeeperFailures", "1" }; + { "-t", "10000", "-zookeeper", "-treatFailureAsError", "-permittedZookeeperFailures", "1" }; testZookeeperCanaryWithArgs(args); } @@ -154,7 +154,7 @@ public void testCanaryRegionTaskReadAllCF() throws Exception { // the test table has two column family. If readAllCF set true, // we expect read count is double of region count int expectedReadCount = - readAllCF ? 2 * sink.getTotalExpectedRegions() : sink.getTotalExpectedRegions(); + readAllCF ? 2 * sink.getTotalExpectedRegions() : sink.getTotalExpectedRegions(); assertEquals("canary region success count should equal total expected read count", expectedReadCount, sink.getReadSuccessCount()); Map> regionMap = sink.getRegionMap(); @@ -252,7 +252,7 @@ public void testCanaryRegionTaskResult() throws Exception { @Test public void testReadTableTimeouts() throws Exception { final TableName[] tableNames = new TableName[] { TableName.valueOf(name.getMethodName() + "1"), - TableName.valueOf(name.getMethodName() + "2") }; + TableName.valueOf(name.getMethodName() + "2") }; // Create 2 test tables. for (int j = 0; j < 2; j++) { Table table = testingUtility.createTable(tableNames[j], new byte[][] { FAMILY }); @@ -267,10 +267,10 @@ public void testReadTableTimeouts() throws Exception { ExecutorService executor = new ScheduledThreadPoolExecutor(1); CanaryTool.RegionStdOutSink sink = spy(new CanaryTool.RegionStdOutSink()); CanaryTool canary = new CanaryTool(executor, sink); - String configuredTimeoutStr = tableNames[0].getNameAsString() + "=" + Long.MAX_VALUE + "," + - tableNames[1].getNameAsString() + "=0"; + String configuredTimeoutStr = tableNames[0].getNameAsString() + "=" + Long.MAX_VALUE + "," + + tableNames[1].getNameAsString() + "=0"; String[] args = { "-readTableTimeouts", configuredTimeoutStr, name.getMethodName() + "1", - name.getMethodName() + "2" }; + name.getMethodName() + "2" }; assertEquals(0, ToolRunner.run(testingUtility.getConfiguration(), canary, args)); verify(sink, times(tableNames.length)).initializeAndGetReadLatencyForTable(isA(String.class)); for (int i = 0; i < 2; i++) { @@ -281,20 +281,20 @@ public void testReadTableTimeouts() throws Exception { } // One table's timeout is set for 0 ms and thus, should lead to an error. verify(mockAppender, times(1)) - .append(argThat(new ArgumentMatcher() { - @Override - public boolean matches(org.apache.logging.log4j.core.LogEvent argument) { - return argument.getMessage().getFormattedMessage() - .contains("exceeded the configured read timeout."); - } - })); + .append(argThat(new ArgumentMatcher() { + @Override + public boolean matches(org.apache.logging.log4j.core.LogEvent argument) { + return argument.getMessage().getFormattedMessage() + .contains("exceeded the configured read timeout."); + } + })); verify(mockAppender, times(2)) - .append(argThat(new ArgumentMatcher() { - @Override - public boolean matches(org.apache.logging.log4j.core.LogEvent argument) { - return argument.getMessage().getFormattedMessage().contains("Configured read timeout"); - } - })); + .append(argThat(new ArgumentMatcher() { + @Override + public boolean matches(org.apache.logging.log4j.core.LogEvent argument) { + return argument.getMessage().getFormattedMessage().contains("Configured read timeout"); + } + })); } @Test @@ -307,12 +307,12 @@ public void testWriteTableTimeout() throws Exception { assertNotEquals("verify non-null write latency", null, sink.getWriteLatency()); assertNotEquals("verify non-zero write latency", 0L, sink.getWriteLatency()); verify(mockAppender, times(1)) - .append(argThat(new ArgumentMatcher() { - @Override - public boolean matches(org.apache.logging.log4j.core.LogEvent argument) { - return argument.getMessage().getFormattedMessage().contains("Configured write timeout"); - } - })); + .append(argThat(new ArgumentMatcher() { + @Override + public boolean matches(org.apache.logging.log4j.core.LogEvent argument) { + return argument.getMessage().getFormattedMessage().contains("Configured write timeout"); + } + })); } // no table created, so there should be no regions @@ -320,13 +320,13 @@ public boolean matches(org.apache.logging.log4j.core.LogEvent argument) { public void testRegionserverNoRegions() throws Exception { runRegionserverCanary(); verify(mockAppender) - .append(argThat(new ArgumentMatcher() { - @Override - public boolean matches(org.apache.logging.log4j.core.LogEvent argument) { - return argument.getMessage().getFormattedMessage() - .contains("Regionserver not serving any regions"); - } - })); + .append(argThat(new ArgumentMatcher() { + @Override + public boolean matches(org.apache.logging.log4j.core.LogEvent argument) { + return argument.getMessage().getFormattedMessage() + .contains("Regionserver not serving any regions"); + } + })); } // by creating a table, there shouldn't be any region servers not serving any regions @@ -336,13 +336,13 @@ public void testRegionserverWithRegions() throws Exception { testingUtility.createTable(tableName, new byte[][] { FAMILY }); runRegionserverCanary(); verify(mockAppender, never()) - .append(argThat(new ArgumentMatcher() { - @Override - public boolean matches(org.apache.logging.log4j.core.LogEvent argument) { - return argument.getMessage().getFormattedMessage() - .contains("Regionserver not serving any regions"); - } - })); + .append(argThat(new ArgumentMatcher() { + @Override + public boolean matches(org.apache.logging.log4j.core.LogEvent argument) { + return argument.getMessage().getFormattedMessage() + .contains("Regionserver not serving any regions"); + } + })); } @Test @@ -361,7 +361,7 @@ public void testRawScanConfig() throws Exception { CanaryTool canary = new CanaryTool(executor, sink); String[] args = { "-t", "10000", name.getMethodName() }; org.apache.hadoop.conf.Configuration conf = - new org.apache.hadoop.conf.Configuration(testingUtility.getConfiguration()); + new org.apache.hadoop.conf.Configuration(testingUtility.getConfiguration()); conf.setBoolean(HConstants.HBASE_CANARY_READ_RAW_SCAN_KEY, true); assertEquals(0, ToolRunner.run(conf, canary, args)); verify(sink, atLeastOnce()).publishReadTiming(isA(ServerName.class), isA(RegionInfo.class), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFiles.java index 05785b432617..06451fbe551c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFiles.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,9 +31,9 @@ import org.junit.experimental.categories.Category; /** - * Reruns TestBulkLoadHFiles using BulkLoadHFiles in secure mode. This suite is unable - * to verify the security handoff/turnover as miniCluster is running as system user thus has root - * privileges and delegation tokens don't seem to work on miniDFS. + * Reruns TestBulkLoadHFiles using BulkLoadHFiles in secure mode. This suite is unable to verify the + * security handoff/turnover as miniCluster is running as system user thus has root privileges and + * delegation tokens don't seem to work on miniDFS. *

          * Thus SecureBulkload can only be completely verified by running integration tests against a secure * cluster. This suite is still invaluable as it verifies the other mechanisms that need to be @@ -44,7 +44,7 @@ public class TestSecureBulkLoadHFiles extends TestBulkLoadHFiles { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecureBulkLoadHFiles.class); + HBaseClassTestRule.forClass(TestSecureBulkLoadHFiles.class); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFilesSplitRecovery.java index 94bb925563bd..2f19ede8fcdc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFilesSplitRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureBulkLoadHFilesSplitRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,9 +31,9 @@ import org.junit.experimental.categories.Category; /** - * Reruns TestBulkLoadHFilesSplitRecovery using BulkLoadHFiles in secure mode. - * This suite is unable to verify the security handoff/turnove as miniCluster is running as system - * user thus has root privileges and delegation tokens don't seem to work on miniDFS. + * Reruns TestBulkLoadHFilesSplitRecovery using BulkLoadHFiles in secure mode. This suite is unable + * to verify the security handoff/turnove as miniCluster is running as system user thus has root + * privileges and delegation tokens don't seem to work on miniDFS. *

          * Thus SecureBulkload can only be completely verified by running integration tests against a secure * cluster. This suite is still invaluable as it verifies the other mechanisms that need to be @@ -44,7 +44,7 @@ public class TestSecureBulkLoadHFilesSplitRecovery extends TestBulkLoadHFilesSpl @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSecureBulkLoadHFilesSplitRecovery.class); + HBaseClassTestRule.forClass(TestSecureBulkLoadHFilesSplitRecovery.class); // This "overrides" the parent static method // make sure they are in sync diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidatorTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidatorTest.java index b05e48f16391..e55c8f51c380 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidatorTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/coprocessor/CoprocessorValidatorTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.tool.coprocessor; import static org.junit.Assert.assertEquals; @@ -101,17 +100,16 @@ public void testNoSuchClass() throws IOException { assertEquals(Severity.ERROR, violation.getSeverity()); String stackTrace = Throwables.getStackTraceAsString(violation.getThrowable()); - assertTrue(stackTrace.contains("java.lang.ClassNotFoundException: " + - "org.apache.hadoop.hbase.tool.coprocessor.CoprocessorValidatorTest$NoSuchClass")); + assertTrue(stackTrace.contains("java.lang.ClassNotFoundException: " + + "org.apache.hadoop.hbase.tool.coprocessor.CoprocessorValidatorTest$NoSuchClass")); } /* - * In this test case, we are validating MissingClass coprocessor, which - * references a missing class. With a special classloader, we prevent that - * class to be loaded at runtime. It simulates similar cases where a class - * is no more on our classpath. - * E.g. org.apache.hadoop.hbase.regionserver.wal.WALEdit was moved to - * org.apache.hadoop.hbase.wal, so class loading will fail on 2.0. + * In this test case, we are validating MissingClass coprocessor, which references a missing + * class. With a special classloader, we prevent that class to be loaded at runtime. It simulates + * similar cases where a class is no more on our classpath. E.g. + * org.apache.hadoop.hbase.regionserver.wal.WALEdit was moved to org.apache.hadoop.hbase.wal, so + * class loading will fail on 2.0. */ private static class MissingClass { } @@ -140,8 +138,8 @@ public Class loadClass(String name) throws ClassNotFoundException { @Test public void testMissingClass() throws IOException { MissingClassClassLoader missingClassClassLoader = new MissingClassClassLoader(); - List violations = validateClass(missingClassClassLoader, - "MissingClassObserver"); + List violations = + validateClass(missingClassClassLoader, "MissingClassObserver"); assertEquals(1, violations.size()); CoprocessorViolation violation = violations.get(0); @@ -149,8 +147,8 @@ public void testMissingClass() throws IOException { assertEquals(Severity.ERROR, violation.getSeverity()); String stackTrace = Throwables.getStackTraceAsString(violation.getThrowable()); - assertTrue(stackTrace.contains("java.lang.ClassNotFoundException: " + - "org.apache.hadoop.hbase.tool.coprocessor.CoprocessorValidatorTest$MissingClass")); + assertTrue(stackTrace.contains("java.lang.ClassNotFoundException: " + + "org.apache.hadoop.hbase.tool.coprocessor.CoprocessorValidatorTest$MissingClass")); } /** @@ -161,7 +159,7 @@ public void testMissingClass() throws IOException { @SuppressWarnings("unused") private static class ObsoleteMethodObserver /* implements MasterObserver */ { public void preEnableTableHandler(ObserverContext ctx, - TableName tablName) throws IOException { + TableName tablName) throws IOException { } } @@ -187,8 +185,7 @@ private List validateTable(String jarFile, String classNam doReturn(tableDescriptors).when(admin).listTableDescriptors(pattern); CoprocessorDescriptor coprocessorDescriptor = mock(CoprocessorDescriptor.class); - List coprocessorDescriptors = - Lists.newArrayList(coprocessorDescriptor); + List coprocessorDescriptors = Lists.newArrayList(coprocessorDescriptor); doReturn(coprocessorDescriptors).when(tableDescriptor).getCoprocessorDescriptors(); doReturn(getFullClassName(className)).when(coprocessorDescriptor).getClassName(); @@ -211,8 +208,8 @@ public void testTableNoSuchClass() throws IOException { assertEquals(Severity.ERROR, violation.getSeverity()); String stackTrace = Throwables.getStackTraceAsString(violation.getThrowable()); - assertTrue(stackTrace.contains("java.lang.ClassNotFoundException: " + - "org.apache.hadoop.hbase.tool.coprocessor.CoprocessorValidatorTest$NoSuchClass")); + assertTrue(stackTrace.contains("java.lang.ClassNotFoundException: " + + "org.apache.hadoop.hbase.tool.coprocessor.CoprocessorValidatorTest$NoSuchClass")); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/OpenTelemetryClassRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/OpenTelemetryClassRule.java index 3bbf2d445a81..0bebcb3c450b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/OpenTelemetryClassRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/OpenTelemetryClassRule.java @@ -31,57 +31,58 @@ import org.junit.rules.ExternalResource; /** - *

          Like {@link OpenTelemetryRule}, except modeled after the junit5 implementation + *

          + * Like {@link OpenTelemetryRule}, except modeled after the junit5 implementation * {@code OpenTelemetryExtension}. Use this class when you need to make asserts on {@link SpanData} * created on a MiniCluster. Make sure this rule initialized before the MiniCluster so that it can * register its instance of {@link OpenTelemetry} as the global instance before any server-side - * component can call {@link TraceUtil#getGlobalTracer()}.

          - *

          For example:

          - *
          {@code
          - * public class TestMyClass {
          - *   private static final OpenTelemetryClassRule otelClassRule =
          - *     OpenTelemetryClassRule.create();
          - *   private static final MiniClusterRule miniClusterRule =
          - *     MiniClusterRule.newBuilder().build();
          - *   protected static final ConnectionRule connectionRule =
          - *     new ConnectionRule(miniClusterRule::createConnection);
          + * component can call {@link TraceUtil#getGlobalTracer()}.
          + * 

          + *

          + * For example: + *

          + * + *
          + * {
          + *   @code
          + *   public class TestMyClass {
          + *     private static final OpenTelemetryClassRule otelClassRule = OpenTelemetryClassRule.create();
          + *     private static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder().build();
          + *     protected static final ConnectionRule connectionRule =
          + *         new ConnectionRule(miniClusterRule::createConnection);
            *
          - *   @ClassRule
          - *   public static final TestRule classRule = RuleChain.outerRule(otelClassRule)
          - *     .around(miniClusterRule)
          - *     .around(connectionRule);
          + *     @ClassRule
          + *     public static final TestRule classRule =
          + *         RuleChain.outerRule(otelClassRule).around(miniClusterRule).around(connectionRule);
            *
          - *   @Rule
          - *   public final OpenTelemetryTestRule otelTestRule =
          - *     new OpenTelemetryTestRule(otelClassRule);
          + *     @Rule
          + *     public final OpenTelemetryTestRule otelTestRule = new OpenTelemetryTestRule(otelClassRule);
            *
          - *   @Test
          - *   public void myTest() {
          - *     // ...
          - *     // do something that makes spans
          - *     final List spans = otelClassRule.getSpans();
          - *     // make assertions on them
          + *     @Test
          + *     public void myTest() {
          + *       // ...
          + *       // do something that makes spans
          + *       final List spans = otelClassRule.getSpans();
          + *       // make assertions on them
          + *     }
            *   }
            * }
          - * }
          + *
          * - * @see junit5/OpenTelemetryExtension.java + * @see junit5/OpenTelemetryExtension.java */ public final class OpenTelemetryClassRule extends ExternalResource { public static OpenTelemetryClassRule create() { InMemorySpanExporter spanExporter = InMemorySpanExporter.create(); - SdkTracerProvider tracerProvider = - SdkTracerProvider.builder() - .addSpanProcessor(SimpleSpanProcessor.create(spanExporter)) - .build(); + SdkTracerProvider tracerProvider = SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(spanExporter)).build(); - OpenTelemetrySdk openTelemetry = - OpenTelemetrySdk.builder() + OpenTelemetrySdk openTelemetry = OpenTelemetrySdk.builder() .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) - .setTracerProvider(tracerProvider) - .build(); + .setTracerProvider(tracerProvider).build(); return new OpenTelemetryClassRule(openTelemetry, spanExporter); } @@ -89,10 +90,8 @@ public static OpenTelemetryClassRule create() { private final OpenTelemetrySdk openTelemetry; private final InMemorySpanExporter spanExporter; - private OpenTelemetryClassRule( - final OpenTelemetrySdk openTelemetry, - final InMemorySpanExporter spanExporter - ) { + private OpenTelemetryClassRule(final OpenTelemetrySdk openTelemetry, + final InMemorySpanExporter spanExporter) { this.openTelemetry = openTelemetry; this.spanExporter = spanExporter; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/OpenTelemetryTestRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/OpenTelemetryTestRule.java index a6b50ffca293..a51dc2eff450 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/OpenTelemetryTestRule.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/OpenTelemetryTestRule.java @@ -21,8 +21,8 @@ /** * Used alongside {@link OpenTelemetryClassRule}. See that class's javadoc for details on when to - * use these classes instead of {@link io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule} and - * an example of how to use these classes together. + * use these classes instead of {@link io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule} and an + * example of how to use these classes together. */ public final class OpenTelemetryTestRule extends ExternalResource { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java index bedc104b6c26..408f0f4d79c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,13 +72,8 @@ import org.slf4j.LoggerFactory; /** - * This is the base class for HBaseFsck's ability to detect reasons for inconsistent tables. - * - * Actual tests are in : - * TestHBaseFsckTwoRS - * TestHBaseFsckOneRS - * TestHBaseFsckMOB - * TestHBaseFsckReplicas + * This is the base class for HBaseFsck's ability to detect reasons for inconsistent tables. Actual + * tests are in : TestHBaseFsckTwoRS TestHBaseFsckOneRS TestHBaseFsckMOB TestHBaseFsckReplicas */ public class BaseTestHBaseFsck { static final int POOL_SIZE = 7; @@ -98,70 +92,71 @@ public class BaseTestHBaseFsck { // for the instance, reset every test run protected Table tbl; - protected final static byte[][] SPLITS = new byte[][] { Bytes.toBytes("A"), - Bytes.toBytes("B"), Bytes.toBytes("C") }; + protected final static byte[][] SPLITS = + new byte[][] { Bytes.toBytes("A"), Bytes.toBytes("B"), Bytes.toBytes("C") }; // one row per region. - protected final static byte[][] ROWKEYS= new byte[][] { - Bytes.toBytes("00"), Bytes.toBytes("50"), Bytes.toBytes("A0"), Bytes.toBytes("A5"), - Bytes.toBytes("B0"), Bytes.toBytes("B5"), Bytes.toBytes("C0"), Bytes.toBytes("C5") }; + protected final static byte[][] ROWKEYS = new byte[][] { Bytes.toBytes("00"), Bytes.toBytes("50"), + Bytes.toBytes("A0"), Bytes.toBytes("A5"), Bytes.toBytes("B0"), Bytes.toBytes("B5"), + Bytes.toBytes("C0"), Bytes.toBytes("C5") }; /** * Debugging method to dump the contents of meta. */ protected void dumpMeta(TableName tableName) throws IOException { List regions = - MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tableName); + MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tableName); for (RegionInfo region : regions) { LOG.info(region.getRegionNameAsString()); } } /** - * This method is used to undeploy a region -- close it and attempt to - * remove its state from the Master. + * This method is used to undeploy a region -- close it and attempt to remove its state from the + * Master. */ - protected void undeployRegion(Connection conn, ServerName sn, - RegionInfo hri) throws IOException, InterruptedException { + protected void undeployRegion(Connection conn, ServerName sn, RegionInfo hri) + throws IOException, InterruptedException { try { HBaseFsckRepair.closeRegionSilentlyAndWait(conn, sn, hri); if (!hri.isMetaRegion()) { admin.offline(hri.getRegionName()); } } catch (IOException ioe) { - LOG.warn("Got exception when attempting to offline region " - + Bytes.toString(hri.getRegionName()), ioe); + LOG.warn( + "Got exception when attempting to offline region " + Bytes.toString(hri.getRegionName()), + ioe); } } + /** * Delete a region from assignments, meta, or completely from hdfs. * @param unassign if true unassign region if assigned - * @param metaRow if true remove region's row from META + * @param metaRow if true remove region's row from META * @param hdfs if true remove region's dir in HDFS */ - protected void deleteRegion(Configuration conf, final TableDescriptor htd, - byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow, - boolean hdfs) throws IOException, InterruptedException { + protected void deleteRegion(Configuration conf, final TableDescriptor htd, byte[] startKey, + byte[] endKey, boolean unassign, boolean metaRow, boolean hdfs) + throws IOException, InterruptedException { deleteRegion(conf, htd, startKey, endKey, unassign, metaRow, hdfs, false, - RegionInfo.DEFAULT_REPLICA_ID); + RegionInfo.DEFAULT_REPLICA_ID); } /** * Delete a region from assignments, meta, or completely from hdfs. * @param unassign if true unassign region if assigned - * @param metaRow if true remove region's row from META + * @param metaRow if true remove region's row from META * @param hdfs if true remove region's dir in HDFS * @param regionInfoOnly if true remove a region dir's .regioninfo file * @param replicaId replica id */ - protected void deleteRegion(Configuration conf, final TableDescriptor htd, - byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow, - boolean hdfs, boolean regionInfoOnly, int replicaId) - throws IOException, InterruptedException { + protected void deleteRegion(Configuration conf, final TableDescriptor htd, byte[] startKey, + byte[] endKey, boolean unassign, boolean metaRow, boolean hdfs, boolean regionInfoOnly, + int replicaId) throws IOException, InterruptedException { LOG.info("** Before delete:"); dumpMeta(htd.getTableName()); List locations; - try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) { + try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) { locations = rl.getAllRegionLocations(); } @@ -169,10 +164,9 @@ protected void deleteRegion(Configuration conf, final TableDescriptor htd, RegionInfo hri = location.getRegion(); ServerName hsa = location.getServerName(); if (Bytes.compareTo(hri.getStartKey(), startKey) == 0 - && Bytes.compareTo(hri.getEndKey(), endKey) == 0 - && hri.getReplicaId() == replicaId) { + && Bytes.compareTo(hri.getEndKey(), endKey) == 0 && hri.getReplicaId() == replicaId) { - LOG.info("RegionName: " +hri.getRegionNameAsString()); + LOG.info("RegionName: " + hri.getRegionNameAsString()); byte[] deleteRow = hri.getRegionName(); if (unassign) { @@ -217,10 +211,8 @@ protected void deleteRegion(Configuration conf, final TableDescriptor htd, } /** - * Setup a clean table before we start mucking with it. - * - * It will set tbl which needs to be closed after test - * + * Setup a clean table before we start mucking with it. It will set tbl which needs to be closed + * after test * @throws IOException * @throws InterruptedException * @throws KeeperException @@ -230,17 +222,14 @@ void setupTable(TableName tablename) throws Exception { } /** - * Setup a clean table with a certain region_replica count - * - * It will set tbl which needs to be closed after test - * + * Setup a clean table with a certain region_replica count It will set tbl which needs to be + * closed after test * @throws Exception */ void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception { - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tablename); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tablename); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder.newBuilder(FAM).build(); + ColumnFamilyDescriptorBuilder.newBuilder(FAM).build(); tableDescriptorBuilder.setRegionReplication(replicaCount); // If a table has no CF's it doesn't get checked tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); @@ -258,18 +247,13 @@ void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws E /** * Setup a clean table with a mob-enabled column. - * * @param tablename The name of a table to be created. * @throws Exception */ void setupMobTable(TableName tablename) throws Exception { - TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(tablename); - ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(FAM) - .setMobEnabled(true) - .setMobThreshold(0).build(); + TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tablename); + ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(FAM) + .setMobEnabled(true).setMobThreshold(0).build(); // If a table has no CF's it doesn't get checked tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); createTable(TEST_UTIL, tableDescriptorBuilder.build(), SPLITS); @@ -288,7 +272,7 @@ void setupMobTable(TableName tablename) throws Exception { * Counts the number of rows to verify data loss or non-dataloss. */ int countRows() throws IOException { - return TEST_UTIL.countRows(tbl); + return TEST_UTIL.countRows(tbl); } /** @@ -333,7 +317,7 @@ Map> getDeployedHRIs(final Admin admin) throws IOExcept * Returns the HSI a region info is on. */ ServerName findDeployedHSI(Map> mm, RegionInfo hri) { - for (Map.Entry> e : mm.entrySet()) { + for (Map.Entry> e : mm.entrySet()) { if (e.getValue().contains(hri.getRegionNameAsString())) { return e.getKey(); } @@ -359,7 +343,7 @@ public void deleteTableDir(TableName table) throws IOException { * @throws IOException */ Path getFlushedHFile(FileSystem fs, TableName table) throws IOException { - Path tableDir= CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), table); + Path tableDir = CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), table); Path regionDir = FSUtils.getRegionDirs(fs, tableDir).get(0); Path famDir = new Path(regionDir, FAM_STR); @@ -410,19 +394,16 @@ String createMobFileName(String oldFileName) { MobFileName mobFileName = MobFileName.create(oldFileName); String startKey = mobFileName.getStartKey(); String date = mobFileName.getDate(); - return MobFileName.create(startKey, date, - TEST_UTIL.getRandomUUID().toString().replaceAll("-", ""), "abcdef") - .getFileName(); + return MobFileName + .create(startKey, date, TEST_UTIL.getRandomUUID().toString().replaceAll("-", ""), "abcdef") + .getFileName(); } - - - /** * Test that use this should have a timeout, because this method could potentially wait forever. - */ - protected void doQuarantineTest(TableName table, HBaseFsck hbck, int check, - int corrupt, int fail, int quar, int missing) throws Exception { + */ + protected void doQuarantineTest(TableName table, HBaseFsck hbck, int check, int corrupt, int fail, + int quar, int missing) throws Exception { try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -431,8 +412,8 @@ protected void doQuarantineTest(TableName table, HBaseFsck hbck, int check, // Mess it up by leaving a hole in the assignment, meta, and hdfs data admin.disableTable(table); - String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission", - table.getNameAsString()}; + String[] args = { "-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission", + table.getNameAsString() }; HBaseFsck res = hbck.exec(hbfsckExecutorService, args); HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker(); @@ -457,7 +438,6 @@ protected void doQuarantineTest(TableName table, HBaseFsck hbck, int check, } } - static class MockErrorReporter implements HbckErrorReporter { static int calledCount = 0; @@ -487,14 +467,14 @@ public void reportError(ERROR_CODE errorCode, String message, HbckTableInfo tabl } @Override - public void reportError(ERROR_CODE errorCode, - String message, HbckTableInfo table, HbckRegionInfo info) { + public void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, + HbckRegionInfo info) { calledCount++; } @Override - public void reportError(ERROR_CODE errorCode, String message, - HbckTableInfo table, HbckRegionInfo info1, HbckRegionInfo info2) { + public void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, + HbckRegionInfo info1, HbckRegionInfo info2) { calledCount++; } @@ -536,9 +516,8 @@ public boolean tableHasErrors(HbckTableInfo table) { } } - protected void deleteMetaRegion(Configuration conf, boolean unassign, boolean hdfs, - boolean regionInfoOnly) throws IOException, InterruptedException { + boolean regionInfoOnly) throws IOException, InterruptedException { HRegionLocation metaLocation = connection.getRegionLocator(TableName.META_TABLE_NAME) .getRegionLocation(HConstants.EMPTY_START_ROW); ServerName hsa = metaLocation.getServerName(); @@ -587,8 +566,7 @@ public Optional getMasterObserver() { @Override public void postCompletedCreateTableAction( - final ObserverContext ctx, - final TableDescriptor desc, + final ObserverContext ctx, final TableDescriptor desc, final RegionInfo[] regions) throws IOException { // the AccessController test, some times calls only and directly the // postCompletedCreateTableAction() @@ -599,8 +577,8 @@ public void postCompletedCreateTableAction( @Override public void postCompletedDeleteTableAction( - final ObserverContext ctx, - final TableName tableName) throws IOException { + final ObserverContext ctx, final TableName tableName) + throws IOException { // the AccessController test, some times calls only and directly the // postCompletedDeleteTableAction() if (tableDeletionLatch != null) { @@ -613,8 +591,8 @@ public static void createTable(HBaseTestingUtil testUtil, TableDescriptor tableD byte[][] splitKeys) throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncCoprocessor coproc = testUtil.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncCoprocessor.class); + MasterSyncCoprocessor coproc = testUtil.getHBaseCluster().getMaster().getMasterCoprocessorHost() + .findCoprocessor(MasterSyncCoprocessor.class); coproc.tableCreationLatch = new CountDownLatch(1); if (splitKeys != null) { admin.createTable(tableDescriptor, splitKeys); @@ -626,12 +604,11 @@ public static void createTable(HBaseTestingUtil testUtil, TableDescriptor tableD testUtil.waitUntilAllRegionsAssigned(tableDescriptor.getTableName()); } - public static void deleteTable(HBaseTestingUtil testUtil, TableName tableName) - throws Exception { + public static void deleteTable(HBaseTestingUtil testUtil, TableName tableName) throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncCoprocessor coproc = testUtil.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncCoprocessor.class); + MasterSyncCoprocessor coproc = testUtil.getHBaseCluster().getMaster().getMasterCoprocessorHost() + .findCoprocessor(MasterSyncCoprocessor.class); coproc.tableDeletionLatch = new CountDownLatch(1); try { admin.disableTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java index 2585b9f6b364..4aaaf250e455 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.util.Collection; @@ -24,7 +23,6 @@ import java.util.concurrent.DelayQueue; import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -37,6 +35,7 @@ public class ConstantDelayQueue implements BlockingQueue { private static final class DelayedElement implements Delayed { T element; long end; + public DelayedElement(T element, long delayMs) { this.element = element; this.end = EnvironmentEdgeManager.currentTime() + delayMs; @@ -45,7 +44,7 @@ public DelayedElement(T element, long delayMs) { @Override public int compareTo(Delayed o) { long cmp = getDelay(TimeUnit.MILLISECONDS) - o.getDelay(TimeUnit.MILLISECONDS); - return cmp == 0 ? 0 : ( cmp < 0 ? -1 : 1); + return cmp == 0 ? 0 : (cmp < 0 ? -1 : 1); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HBaseHomePath.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HBaseHomePath.java index 490640ea5fb6..5200608ef66a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HBaseHomePath.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HBaseHomePath.java @@ -1,23 +1,23 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; import java.net.URL; - import org.apache.hadoop.hbase.master.HMaster; /** Determines HBase home path from either class or jar directory */ @@ -31,7 +31,7 @@ private HBaseHomePath() { } public static String getHomePath() { - String className = HMaster.class.getName(); // This could have been any HBase class. + String className = HMaster.class.getName(); // This could have been any HBase class. String relPathForClass = className.replace(".", "/") + ".class"; URL url = ClassLoader.getSystemResource(relPathForClass); relPathForClass = "/" + relPathForClass; @@ -41,8 +41,8 @@ public static String getHomePath() { String path = url.getPath(); if (!path.endsWith(relPathForClass)) { - throw new RuntimeException("Got invalid path trying to look up class " + className + - ": " + path); + throw new RuntimeException( + "Got invalid path trying to look up class " + className + ": " + path); } path = path.substring(0, path.length() - relPathForClass.length()); @@ -59,8 +59,8 @@ public static String getHomePath() { } path = path.substring(0, slashIndex); } else { - throw new RuntimeException("Cannot identify HBase source directory or installation path " + - "from " + path); + throw new RuntimeException( + "Cannot identify HBase source directory or installation path " + "from " + path); } return path; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java index 4835d2f12b06..f24aff9a73ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -132,8 +132,8 @@ public static void assertArchiveEqualToOriginal(FileStatus[] expected, FileStatu private static String assertArchiveEquality(List expected, List archived) { String compare = compareFileLists(expected, archived); - if (!(expected.size() == archived.size())) return "Not the same number of current files\n" - + compare; + if (!(expected.size() == archived.size())) + return "Not the same number of current files\n" + compare; if (!expected.equals(archived)) return "Different backup files, but same amount\n" + compare; return null; } @@ -185,8 +185,8 @@ private static List convertToString(List files) { /* Get a pretty representation of the differences */ private static String compareFileLists(List expected, List gotten) { - StringBuilder sb = new StringBuilder("Expected (" + expected.size() + "): \t\t Gotten (" - + gotten.size() + "):\n"); + StringBuilder sb = new StringBuilder( + "Expected (" + expected.size() + "): \t\t Gotten (" + gotten.size() + "):\n"); List notFound = new ArrayList<>(); for (String s : expected) { if (gotten.contains(s)) sb.append(s + "\t\t" + s + "\n"); @@ -224,11 +224,11 @@ public static Path getRegionArchiveDir(Configuration conf, HRegion region) throw public static Path getStoreArchivePath(Configuration conf, HRegion region, Store store) throws IOException { return HFileArchiveUtil.getStoreArchivePath(conf, region.getRegionInfo(), - region.getRegionFileSystem().getTableDir(), store.getColumnFamilyDescriptor().getName()); + region.getRegionFileSystem().getTableDir(), store.getColumnFamilyDescriptor().getName()); } - public static Path getStoreArchivePath(HBaseTestingUtil util, String tableName, - byte[] storeName) throws IOException { + public static Path getStoreArchivePath(HBaseTestingUtil util, String tableName, byte[] storeName) + throws IOException { byte[] table = Bytes.toBytes(tableName); // get the RS and region serving our table List servingRegions = util.getHBaseCluster().getRegions(table); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java index f086d06ce750..6a2407397bb8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,10 +51,9 @@ */ public class HFileTestUtil { - public static final String OPT_DATA_BLOCK_ENCODING_USAGE = - "Encoding algorithm (e.g. prefix " - + "compression) to use for data blocks in the test column family, " - + "one of " + Arrays.toString(DataBlockEncoding.values()) + "."; + public static final String OPT_DATA_BLOCK_ENCODING_USAGE = "Encoding algorithm (e.g. prefix " + + "compression) to use for data blocks in the test column family, " + "one of " + + Arrays.toString(DataBlockEncoding.values()) + "."; public static final String OPT_DATA_BLOCK_ENCODING = ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING.toLowerCase(Locale.ROOT); /** Column family used by the test */ @@ -64,74 +62,59 @@ public class HFileTestUtil { public static final byte[][] DEFAULT_COLUMN_FAMILIES = { DEFAULT_COLUMN_FAMILY }; /** - * Create an HFile with the given number of rows between a given - * start key and end key @ family:qualifier. The value will be the key value. - * This file will not have tags. + * Create an HFile with the given number of rows between a given start key and end key @ + * family:qualifier. The value will be the key value. This file will not have tags. */ - public static void createHFile( - Configuration configuration, - FileSystem fs, Path path, - byte[] family, byte[] qualifier, - byte[] startKey, byte[] endKey, int numRows) throws IOException { - createHFile(configuration, fs, path, DataBlockEncoding.NONE, family, qualifier, - startKey, endKey, numRows, false); + public static void createHFile(Configuration configuration, FileSystem fs, Path path, + byte[] family, byte[] qualifier, byte[] startKey, byte[] endKey, int numRows) + throws IOException { + createHFile(configuration, fs, path, DataBlockEncoding.NONE, family, qualifier, startKey, + endKey, numRows, false); } /** - * Create an HFile with the given number of rows between a given - * start key and end key @ family:qualifier. The value will be the key value. - * This file will use certain data block encoding algorithm. + * Create an HFile with the given number of rows between a given start key and end key @ + * family:qualifier. The value will be the key value. This file will use certain data block + * encoding algorithm. */ - public static void createHFileWithDataBlockEncoding( - Configuration configuration, - FileSystem fs, Path path, DataBlockEncoding encoding, - byte[] family, byte[] qualifier, - byte[] startKey, byte[] endKey, int numRows) throws IOException { - createHFile(configuration, fs, path, encoding, family, qualifier, startKey, endKey, - numRows, false); + public static void createHFileWithDataBlockEncoding(Configuration configuration, FileSystem fs, + Path path, DataBlockEncoding encoding, byte[] family, byte[] qualifier, byte[] startKey, + byte[] endKey, int numRows) throws IOException { + createHFile(configuration, fs, path, encoding, family, qualifier, startKey, endKey, numRows, + false); } /** - * Create an HFile with the given number of rows between a given - * start key and end key @ family:qualifier. The value will be the key value. - * This cells will also have a tag whose value is the key. + * Create an HFile with the given number of rows between a given start key and end key @ + * family:qualifier. The value will be the key value. This cells will also have a tag whose value + * is the key. */ - public static void createHFileWithTags( - Configuration configuration, - FileSystem fs, Path path, - byte[] family, byte[] qualifier, - byte[] startKey, byte[] endKey, int numRows) throws IOException { - createHFile(configuration, fs, path, DataBlockEncoding.NONE, family, qualifier, - startKey, endKey, numRows, true); + public static void createHFileWithTags(Configuration configuration, FileSystem fs, Path path, + byte[] family, byte[] qualifier, byte[] startKey, byte[] endKey, int numRows) + throws IOException { + createHFile(configuration, fs, path, DataBlockEncoding.NONE, family, qualifier, startKey, + endKey, numRows, true); } /** - * Create an HFile with the given number of rows between a given - * start key and end key @ family:qualifier. - * If withTag is true, we add the rowKey as the tag value for - * tagtype MOB_TABLE_NAME_TAG_TYPE + * Create an HFile with the given number of rows between a given start key and end key @ + * family:qualifier. If withTag is true, we add the rowKey as the tag value for tagtype + * MOB_TABLE_NAME_TAG_TYPE */ - public static void createHFile( - Configuration configuration, - FileSystem fs, Path path, DataBlockEncoding encoding, - byte[] family, byte[] qualifier, - byte[] startKey, byte[] endKey, int numRows, boolean withTag) throws IOException { - HFileContext meta = new HFileContextBuilder() - .withIncludesTags(withTag) - .withDataBlockEncoding(encoding) - .withColumnFamily(family) - .build(); + public static void createHFile(Configuration configuration, FileSystem fs, Path path, + DataBlockEncoding encoding, byte[] family, byte[] qualifier, byte[] startKey, byte[] endKey, + int numRows, boolean withTag) throws IOException { + HFileContext meta = new HFileContextBuilder().withIncludesTags(withTag) + .withDataBlockEncoding(encoding).withColumnFamily(family).build(); HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration)) - .withPath(fs, path) - .withFileContext(meta) - .create(); + .withPath(fs, path).withFileContext(meta).create(); long now = EnvironmentEdgeManager.currentTime(); try { // subtract 2 since iterateOnSplits doesn't include boundary keys for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows - 2)) { Cell kv = new KeyValue(key, family, qualifier, now, key); if (withTag) { - // add a tag. Arbitrarily chose mob tag since we have a helper already. + // add a tag. Arbitrarily chose mob tag since we have a helper already. Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, key); kv = MobUtils.createMobRefCell(kv, key, tableNameTag); @@ -144,15 +127,14 @@ public static void createHFile( writer.append(kv); } } finally { - writer.appendFileInfo(BULKLOAD_TIME_KEY, - Bytes.toBytes(EnvironmentEdgeManager.currentTime())); + writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime())); writer.close(); } } /** - * This verifies that each cell has a tag that is equal to its rowkey name. For this to work - * the hbase instance must have HConstants.RPC_CODEC_CONF_KEY set to + * This verifies that each cell has a tag that is equal to its rowkey name. For this to work the + * hbase instance must have HConstants.RPC_CODEC_CONF_KEY set to * KeyValueCodecWithTags.class.getCanonicalName()); * @param table table containing tagged cells * @throws IOException if problems reading table @@ -168,8 +150,7 @@ public static void verifyTags(Table table) throws IOException { } Tag t = tag.get(); byte[] tval = Tag.cloneValue(t); - assertArrayEquals(c.toString() + " has tag" + Bytes.toString(tval), - r.getRow(), tval); + assertArrayEquals(c.toString() + " has tag" + Bytes.toString(tval), r.getRow(), tval); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LauncherSecurityManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LauncherSecurityManager.java index 8be2bea40714..8b93c40e4c2f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LauncherSecurityManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LauncherSecurityManager.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,22 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.security.Permission; /** - * class for masquerading System.exit(int). - * Use for test main method with System.exit(int ) - * usage: - * new LauncherSecurityManager(); - * try { - * CellCounter.main(args); - * fail("should be exception"); - * } catch (SecurityException e) { - * assert(.,e.getExitCode()); - * } + * class for masquerading System.exit(int). Use for test main method with System.exit(int ) usage: + * new LauncherSecurityManager(); try { CellCounter.main(args); fail("should be exception"); } catch + * (SecurityException e) { assert(.,e.getExitCode()); } */ public class LauncherSecurityManager extends SecurityManager { @@ -64,7 +55,7 @@ public void checkExit(int status) throws SecurityException { throw new SecurityException("Intercepted System.exit(" + status + ")"); } - public int getExitCode() { + public int getExitCode() { return exitCode; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java index 8aefd4247166..1839d55129a6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,16 +22,14 @@ /** * A load test data generator for MOB */ -public class LoadTestDataGeneratorWithMOB - extends MultiThreadedAction.DefaultDataGenerator { +public class LoadTestDataGeneratorWithMOB extends MultiThreadedAction.DefaultDataGenerator { private byte[] mobColumnFamily; private LoadTestKVGenerator mobKvGenerator; - public LoadTestDataGeneratorWithMOB(int minValueSize, int maxValueSize, - int minColumnsPerKey, int maxColumnsPerKey, byte[]... columnFamilies) { - super(minValueSize, maxValueSize, minColumnsPerKey, maxColumnsPerKey, - columnFamilies); + public LoadTestDataGeneratorWithMOB(int minValueSize, int maxValueSize, int minColumnsPerKey, + int maxColumnsPerKey, byte[]... columnFamilies) { + super(minValueSize, maxValueSize, minColumnsPerKey, maxColumnsPerKey, columnFamilies); } public LoadTestDataGeneratorWithMOB(byte[]... columnFamilies) { @@ -42,11 +40,9 @@ public LoadTestDataGeneratorWithMOB(byte[]... columnFamilies) { public void initialize(String[] args) { super.initialize(args); if (args.length != 3) { - throw new IllegalArgumentException( - "LoadTestDataGeneratorWithMOB can have 3 arguments." - + "1st argument is a column family, the 2nd argument " - + "is the minimum mob data size and the 3rd argument " - + "is the maximum mob data size."); + throw new IllegalArgumentException("LoadTestDataGeneratorWithMOB can have 3 arguments." + + "1st argument is a column family, the 2nd argument " + + "is the minimum mob data size and the 3rd argument " + "is the maximum mob data size."); } String mobColumnFamily = args[0]; int minMobDataSize = Integer.parseInt(args[1]); @@ -54,16 +50,14 @@ public void initialize(String[] args) { configureMob(Bytes.toBytes(mobColumnFamily), minMobDataSize, maxMobDataSize); } - private void configureMob(byte[] mobColumnFamily, int minMobDataSize, - int maxMobDataSize) { + private void configureMob(byte[] mobColumnFamily, int minMobDataSize, int maxMobDataSize) { this.mobColumnFamily = mobColumnFamily; mobKvGenerator = new LoadTestKVGenerator(minMobDataSize, maxMobDataSize); } @Override - public byte[] generateValue(byte[] rowKey, byte[] cf, - byte[] column) { - if(Arrays.equals(cf, mobColumnFamily)) + public byte[] generateValue(byte[] rowKey, byte[] cf, byte[] column) { + if (Arrays.equals(cf, mobColumnFamily)) return mobKvGenerator.generateRandomSizeValue(rowKey, cf, column); return super.generateValue(rowKey, cf, column); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java index 81117b21f40f..e405fbca52f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java @@ -1,18 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; @@ -21,18 +22,17 @@ import java.util.List; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.Tag; -import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.util.MultiThreadedAction.DefaultDataGenerator; import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class LoadTestDataGeneratorWithTags extends DefaultDataGenerator { @@ -77,7 +77,7 @@ public Mutation beforeMutate(long rowkeyBase, Mutation m) throws IOException { for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); byte[] tag = LoadTestDataGenerator.generateData(rand, - minTagLength + rand.nextInt(maxTagLength - minTagLength)); + minTagLength + rand.nextInt(maxTagLength - minTagLength)); tags = new ArrayList<>(); for (int n = 0; n < numTags; n++) { tags.add(new ArrayBackedTag((byte) 127, tag)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java index 0e92fae0668b..90f4a7555b93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.ChoreService; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java index 2e1483d4df13..5bd7502e38c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; @@ -48,8 +49,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; /** - * Common base class for reader and writer parts of multi-thread HBase load - * test (See LoadTestTool). + * Common base class for reader and writer parts of multi-thread HBase load test (See LoadTestTool). */ public abstract class MultiThreadedAction { private static final Logger LOG = LoggerFactory.getLogger(MultiThreadedAction.class); @@ -75,20 +75,18 @@ public abstract class MultiThreadedAction { protected LoadTestDataGenerator dataGenerator = null; /** - * Default implementation of LoadTestDataGenerator that uses LoadTestKVGenerator, fixed - * set of column families, and random number of columns in range. The table for it can - * be created manually or, for example, via - * {@link org.apache.hadoop.hbase.HBaseTestingUtil#createPreSplitLoadTestTable(Configuration, - * TableName, byte[], org.apache.hadoop.hbase.io.compress.Compression.Algorithm, - * org.apache.hadoop.hbase.io.encoding.DataBlockEncoding)} + * Default implementation of LoadTestDataGenerator that uses LoadTestKVGenerator, fixed set of + * column families, and random number of columns in range. The table for it can be created + * manually or, for example, via + * {@link org.apache.hadoop.hbase.HBaseTestingUtil#createPreSplitLoadTestTable(Configuration, TableName, byte[], org.apache.hadoop.hbase.io.compress.Compression.Algorithm, org.apache.hadoop.hbase.io.encoding.DataBlockEncoding)} */ public static class DefaultDataGenerator extends LoadTestDataGenerator { private byte[][] columnFamilies = null; private int minColumnsPerKey; private int maxColumnsPerKey; - public DefaultDataGenerator(int minValueSize, int maxValueSize, - int minColumnsPerKey, int maxColumnsPerKey, byte[]... columnFamilies) { + public DefaultDataGenerator(int minValueSize, int maxValueSize, int minColumnsPerKey, + int maxColumnsPerKey, byte[]... columnFamilies) { super(minValueSize, maxValueSize); this.columnFamilies = columnFamilies; this.minColumnsPerKey = minColumnsPerKey; @@ -112,8 +110,8 @@ public byte[][] getColumnFamilies() { @Override public byte[][] generateColumnsForCf(byte[] rowKey, byte[] cf) { - int numColumns = minColumnsPerKey + - ThreadLocalRandom.current().nextInt(maxColumnsPerKey - minColumnsPerKey + 1); + int numColumns = minColumnsPerKey + + ThreadLocalRandom.current().nextInt(maxColumnsPerKey - minColumnsPerKey + 1); byte[][] columns = new byte[numColumns][]; for (int i = 0; i < numColumns; ++i) { columns[i] = Bytes.toBytes(Integer.toString(i)); @@ -168,7 +166,7 @@ private static String formatTime(long elapsedTime) { String seconds = String.format(format, elapsedTime % 60); String minutes = String.format(format, (elapsedTime % 3600) / 60); String hours = String.format(format, elapsedTime / 3600); - String time = hours + ":" + minutes + ":" + seconds; + String time = hours + ":" + minutes + ":" + seconds; return time; } @@ -192,8 +190,7 @@ public void run() { Threads.sleep(REPORTING_INTERVAL_MS); while (numThreadsWorking.get() != 0) { - String threadsLeft = - "[" + reporterId + ":" + numThreadsWorking.get() + "] "; + String threadsLeft = "[" + reporterId + ":" + numThreadsWorking.get() + "] "; if (numKeys.get() == 0) { LOG.info(threadsLeft + "Number of keys = 0"); } else { @@ -204,29 +201,24 @@ public void run() { long numKeysDelta = numKeys - priorNumKeys; long totalOpTimeDelta = totalOpTime - priorCumulativeOpTime; - double averageKeysPerSecond = - (time > 0) ? (numKeys * 1000 / time) : 0; - - LOG.info(threadsLeft - + "Keys=" - + numKeys - + ", cols=" - + StringUtils.humanReadableInt(numCols.get()) - + ", time=" - + formatTime(time) - + ((numKeys > 0 && time > 0) ? (" Overall: [" + "keys/s= " - + numKeys * 1000 / time + ", latency=" - + String.format("%.2f", (double)totalOpTime / (double)numKeys) - + " ms]") : "") - + ((numKeysDelta > 0) ? (" Current: [" + "keys/s=" - + numKeysDelta * 1000 / REPORTING_INTERVAL_MS + ", latency=" - + String.format("%.2f", (double)totalOpTimeDelta / (double)numKeysDelta) - + " ms]") : "") + double averageKeysPerSecond = (time > 0) ? (numKeys * 1000 / time) : 0; + + LOG.info(threadsLeft + "Keys=" + numKeys + ", cols=" + + StringUtils.humanReadableInt(numCols.get()) + ", time=" + formatTime(time) + + ((numKeys > 0 && time > 0) + ? (" Overall: [" + "keys/s= " + numKeys * 1000 / time + ", latency=" + + String.format("%.2f", (double) totalOpTime / (double) numKeys) + " ms]") + : "") + + ((numKeysDelta > 0) + ? (" Current: [" + "keys/s=" + numKeysDelta * 1000 / REPORTING_INTERVAL_MS + + ", latency=" + + String.format("%.2f", (double) totalOpTimeDelta / (double) numKeysDelta) + + " ms]") + : "") + progressInfo()); if (streamingCounters) { - printStreamingCounters(numKeysDelta, - averageKeysPerSecond - priorAverageKeysPerSecond); + printStreamingCounters(numKeysDelta, averageKeysPerSecond - priorAverageKeysPerSecond); } priorNumKeys = numKeys; @@ -238,16 +230,13 @@ public void run() { } } - private void printStreamingCounters(long numKeysDelta, - double avgKeysPerSecondDelta) { + private void printStreamingCounters(long numKeysDelta, double avgKeysPerSecondDelta) { // Write stats in a format that can be interpreted as counters by // streaming map-reduce jobs. - System.err.println("reporter:counter:numKeys," + reporterId + "," - + numKeysDelta); - System.err.println("reporter:counter:numCols," + reporterId + "," - + numCols.get()); - System.err.println("reporter:counter:avgKeysPerSecond," + reporterId - + "," + (long) (avgKeysPerSecondDelta)); + System.err.println("reporter:counter:numKeys," + reporterId + "," + numKeysDelta); + System.err.println("reporter:counter:numCols," + reporterId + "," + numCols.get()); + System.err.println( + "reporter:counter:avgKeysPerSecond," + reporterId + "," + (long) (avgKeysPerSecondDelta)); } } @@ -287,8 +276,7 @@ public long getEndKey() { /** Returns a task-specific progress string */ protected abstract String progressInfo(); - protected static void appendToStatus(StringBuilder sb, String desc, - long v) { + protected static void appendToStatus(StringBuilder sb, String desc, long v) { if (v == 0) { return; } @@ -298,8 +286,7 @@ protected static void appendToStatus(StringBuilder sb, String desc, sb.append(v); } - protected static void appendToStatus(StringBuilder sb, String desc, - String v) { + protected static void appendToStatus(StringBuilder sb, String desc, String v) { sb.append(", "); sb.append(desc); sb.append("="); @@ -307,20 +294,20 @@ protected static void appendToStatus(StringBuilder sb, String desc, } /** - * See {@link #verifyResultAgainstDataGenerator(Result, boolean, boolean)}. - * Does not verify cf/column integrity. + * See {@link #verifyResultAgainstDataGenerator(Result, boolean, boolean)}. Does not verify + * cf/column integrity. */ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyValues) { return verifyResultAgainstDataGenerator(result, verifyValues, false); } /** - * Verifies the result from get or scan using the dataGenerator (that was presumably - * also used to generate said result). + * Verifies the result from get or scan using the dataGenerator (that was presumably also used to + * generate said result). * @param verifyValues verify that values in the result make sense for row/cf/column combination * @param verifyCfAndColumnIntegrity verify that cf/column set in the result is complete. Note - * that to use this multiPut should be used, or verification - * has to happen after writes, otherwise there can be races. + * that to use this multiPut should be used, or verification has to happen after writes, + * otherwise there can be races. * @return true if the values of row result makes sense for row/cf/column combination and true if * the cf/column set in the result is complete, false otherwise. */ @@ -341,8 +328,8 @@ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyVal // See if we have all the CFs. byte[][] expectedCfs = dataGenerator.getColumnFamilies(); if (verifyCfAndColumnIntegrity && (expectedCfs.length != result.getMap().size())) { - LOG.error("Error checking data for key [" + rowKeyStr - + "], bad family count: " + result.getMap().size()); + LOG.error("Error checking data for key [" + rowKeyStr + "], bad family count: " + + result.getMap().size()); printLocations(result); return false; } @@ -352,8 +339,8 @@ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyVal String cfStr = Bytes.toString(cf); Map columnValues = result.getFamilyMap(cf); if (columnValues == null) { - LOG.error("Error checking data for key [" + rowKeyStr - + "], no data for family [" + cfStr + "]]"); + LOG.error( + "Error checking data for key [" + rowKeyStr + "], no data for family [" + cfStr + "]]"); printLocations(result); return false; } @@ -361,8 +348,8 @@ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyVal Map mutateInfo = null; if (verifyCfAndColumnIntegrity || verifyValues) { if (!columnValues.containsKey(MUTATE_INFO)) { - LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" - + cfStr + "], column [" + Bytes.toString(MUTATE_INFO) + "]; value is not found"); + LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + + "], column [" + Bytes.toString(MUTATE_INFO) + "]; value is not found"); printLocations(result); return false; } @@ -371,15 +358,15 @@ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyVal // Verify deleted columns, and make up column counts if deleted byte[] mutateInfoValue = columnValues.remove(MUTATE_INFO); mutateInfo = parseMutateInfo(mutateInfoValue); - for (Map.Entry mutate: mutateInfo.entrySet()) { + for (Map.Entry mutate : mutateInfo.entrySet()) { if (mutate.getValue() == MutationType.DELETE) { byte[] column = Bytes.toBytes(mutate.getKey()); long columnHash = Arrays.hashCode(column); long hashCode = cfHash + columnHash; if (hashCode % 2 == 0) { if (columnValues.containsKey(column)) { - LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" - + cfStr + "], column [" + mutate.getKey() + "]; should be deleted"); + LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + + "], column [" + mutate.getKey() + "]; should be deleted"); printLocations(result); return false; } @@ -391,8 +378,8 @@ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyVal // Verify increment if (!columnValues.containsKey(INCREMENT)) { - LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" - + cfStr + "], column [" + Bytes.toString(INCREMENT) + "]; value is not found"); + LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + + "], column [" + Bytes.toString(INCREMENT) + "]; value is not found"); printLocations(result); return false; } @@ -402,14 +389,14 @@ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyVal long originalValue = Arrays.hashCode(result.getRow()); long extra = currentValue - originalValue; if (extra != 0 && (amount == 0 || extra % amount != 0)) { - LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" - + cfStr + "], column [increment], extra [" + extra + "], amount [" + amount + "]"); + LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + + "], column [increment], extra [" + extra + "], amount [" + amount + "]"); printLocations(result); return false; } if (amount != 0 && extra != amount) { - LOG.warn("Warning checking data for key [" + rowKeyStr + "], column family [" - + cfStr + "], column [increment], incremented [" + (extra / amount) + "] times"); + LOG.warn("Warning checking data for key [" + rowKeyStr + "], column family [" + cfStr + + "], column [increment], incremented [" + (extra / amount) + "] times"); } } @@ -423,8 +410,8 @@ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyVal } colsStr += "[" + Bytes.toString(col) + "]"; } - LOG.error("Error checking data for key [" + rowKeyStr - + "], bad columns for family [" + cfStr + "]: " + colsStr); + LOG.error("Error checking data for key [" + rowKeyStr + "], bad columns for family [" + + cfStr + "]: " + colsStr); printLocations(result); return false; } @@ -442,14 +429,14 @@ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyVal byte[] hashCodeBytes = Bytes.toBytes(hashCode); if (mutation == MutationType.APPEND) { int offset = bytes.length - hashCodeBytes.length; - mutationVerified = offset > 0 && Bytes.equals(hashCodeBytes, - 0, hashCodeBytes.length, bytes, offset, hashCodeBytes.length); + mutationVerified = offset > 0 && Bytes.equals(hashCodeBytes, 0, + hashCodeBytes.length, bytes, offset, hashCodeBytes.length); if (mutationVerified) { int n = 1; while (true) { int newOffset = offset - hashCodeBytes.length; - if (newOffset < 0 || !Bytes.equals(hashCodeBytes, 0, - hashCodeBytes.length, bytes, newOffset, hashCodeBytes.length)) { + if (newOffset < 0 || !Bytes.equals(hashCodeBytes, 0, hashCodeBytes.length, + bytes, newOffset, hashCodeBytes.length)) { break; } offset = newOffset; @@ -457,7 +444,7 @@ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyVal } if (n > 1) { LOG.warn("Warning checking data for key [" + rowKeyStr + "], column family [" - + cfStr + "], column [" + column + "], appended [" + n + "] times"); + + cfStr + "], column [" + column + "], appended [" + n + "] times"); } byte[] dest = new byte[offset]; System.arraycopy(bytes, 0, dest, 0, offset); @@ -469,19 +456,18 @@ public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyVal } if (!mutationVerified) { LOG.error("Error checking data for key [" + rowKeyStr - + "], mutation checking failed for column family [" + cfStr + "], column [" - + column + "]; mutation [" + mutation + "], hashCode [" - + hashCode + "], verificationNeeded [" - + verificationNeeded + "]"); + + "], mutation checking failed for column family [" + cfStr + "], column [" + + column + "]; mutation [" + mutation + "], hashCode [" + hashCode + + "], verificationNeeded [" + verificationNeeded + "]"); printLocations(result); return false; } } // end of mutation checking - if (verificationNeeded && - !dataGenerator.verify(result.getRow(), cf, kv.getKey(), bytes)) { - LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" - + cfStr + "], column [" + column + "], mutation [" + mutation - + "]; value of length " + bytes.length); + if (verificationNeeded + && !dataGenerator.verify(result.getRow(), cf, kv.getKey(), bytes)) { + LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + + "], column [" + column + "], mutation [" + mutation + "]; value of length " + + bytes.length); printLocations(result); return false; } @@ -514,14 +500,14 @@ private void printLocations(Result r) { private String resultToString(Result result) { StringBuilder sb = new StringBuilder(); sb.append("cells="); - if(result.isEmpty()) { + if (result.isEmpty()) { sb.append("NONE"); return sb.toString(); } sb.append("{"); boolean moreThanOne = false; - for(Cell cell : result.listCells()) { - if(moreThanOne) { + for (Cell cell : result.listCells()) { + if (moreThanOne) { sb.append(", "); } else { moreThanOne = true; @@ -538,14 +524,12 @@ private Map parseMutateInfo(byte[] mutateInfo) { if (mutateInfo != null) { String mutateInfoStr = Bytes.toString(mutateInfo); String[] mutations = mutateInfoStr.split("#"); - for (String mutation: mutations) { + for (String mutation : mutations) { if (mutation.isEmpty()) continue; - Preconditions.checkArgument(mutation.contains(":"), - "Invalid mutation info " + mutation); + Preconditions.checkArgument(mutation.contains(":"), "Invalid mutation info " + mutation); int p = mutation.indexOf(":"); String column = mutation.substring(0, p); - MutationType type = MutationType.valueOf( - Integer.parseInt(mutation.substring(p+1))); + MutationType type = MutationType.valueOf(Integer.parseInt(mutation.substring(p + 1))); mi.put(column, type); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java index 4c046c82870e..322f92dc47e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; @@ -36,8 +37,7 @@ import org.slf4j.LoggerFactory; /** Creates multiple threads that read and verify previously written data */ -public class MultiThreadedReader extends MultiThreadedAction -{ +public class MultiThreadedReader extends MultiThreadedAction { private static final Logger LOG = LoggerFactory.getLogger(MultiThreadedReader.class); protected Set readers = new HashSet<>(); @@ -47,30 +47,27 @@ public class MultiThreadedReader extends MultiThreadedAction protected MultiThreadedWriterBase writer = null; /** - * The number of keys verified in a sequence. This will never be larger than - * the total number of keys in the range. The reader might also verify - * random keys when it catches up with the writer. + * The number of keys verified in a sequence. This will never be larger than the total number of + * keys in the range. The reader might also verify random keys when it catches up with the writer. */ private final AtomicLong numUniqueKeysVerified = new AtomicLong(); /** - * Default maximum number of read errors to tolerate before shutting down all - * readers. + * Default maximum number of read errors to tolerate before shutting down all readers. */ public static final int DEFAULT_MAX_ERRORS = 10; /** - * Default "window" size between the last key written by the writer and the - * key that we attempt to read. The lower this number, the stricter our - * testing is. If this is zero, we always attempt to read the highest key - * in the contiguous sequence of keys written by the writers. + * Default "window" size between the last key written by the writer and the key that we attempt to + * read. The lower this number, the stricter our testing is. If this is zero, we always attempt to + * read the highest key in the contiguous sequence of keys written by the writers. */ public static final int DEFAULT_KEY_WINDOW = 0; /** * Default batch size for multigets */ - public static final int DEFAULT_BATCH_SIZE = 1; //translates to simple GET (no multi GET) + public static final int DEFAULT_BATCH_SIZE = 1; // translates to simple GET (no multi GET) protected AtomicLong numKeysVerified = new AtomicLong(0); protected AtomicLong numReadErrors = new AtomicLong(0); @@ -81,8 +78,8 @@ public class MultiThreadedReader extends MultiThreadedAction private int batchSize = DEFAULT_BATCH_SIZE; private int regionReplicaId = -1; // particular region replica id to do reads against if set - public MultiThreadedReader(LoadTestDataGenerator dataGen, Configuration conf, - TableName tableName, double verifyPercent) throws IOException { + public MultiThreadedReader(LoadTestDataGenerator dataGen, Configuration conf, TableName tableName, + double verifyPercent) throws IOException { super(dataGen, conf, tableName, "R"); this.verifyPercent = verifyPercent; } @@ -148,8 +145,8 @@ public class HBaseReaderThread extends Thread { private boolean printExceptionTrace = true; /** - * @param readerId only the keys with this remainder from division by - * {@link #numThreads} will be read by this thread + * @param readerId only the keys with this remainder from division by {@link #numThreads} will + * be read by this thread */ public HBaseReaderThread(int readerId) throws IOException { this.readerId = readerId; @@ -188,7 +185,7 @@ private void runReader() { startTimeMs = EnvironmentEdgeManager.currentTime(); curKey = startKey; - long [] keysForThisReader = new long[batchSize]; + long[] keysForThisReader = new long[batchSize]; while (curKey < endKey && !aborted) { int readingRandomKeyStartIndex = -1; int numKeys = 0; @@ -197,9 +194,8 @@ private void runReader() { long k = getNextKeyToRead(); if (k < startKey || k >= endKey) { numReadErrors.incrementAndGet(); - throw new AssertionError("Load tester logic error: proposed key " + - "to read " + k + " is out of range (startKey=" + startKey + - ", endKey=" + endKey + ")"); + throw new AssertionError("Load tester logic error: proposed key " + "to read " + k + + " is out of range (startKey=" + startKey + ", endKey=" + endKey + ")"); } if (k % numThreads != readerId || (writer != null && writer.failedToWriteKey(k))) { // Skip keys that this thread should not read, as well as the keys @@ -208,25 +204,24 @@ private void runReader() { } keysForThisReader[numKeys] = k; if (readingRandomKey && readingRandomKeyStartIndex == -1) { - //store the first index of a random read + // store the first index of a random read readingRandomKeyStartIndex = numKeys; } numKeys++; } while (numKeys < batchSize && curKey < endKey && !aborted); - if (numKeys > 0) { //meaning there is some key to read + if (numKeys > 0) { // meaning there is some key to read readKey(keysForThisReader); // We have verified some unique key(s). - numUniqueKeysVerified.getAndAdd(readingRandomKeyStartIndex == -1 ? - numKeys : readingRandomKeyStartIndex); + numUniqueKeysVerified + .getAndAdd(readingRandomKeyStartIndex == -1 ? numKeys : readingRandomKeyStartIndex); } } } /** - * Should only be used for the concurrent writer/reader workload. The - * maximum key we are allowed to read, subject to the "key window" - * constraint. + * Should only be used for the concurrent writer/reader workload. The maximum key we are allowed + * to read, subject to the "key window" constraint. */ private long maxKeyWeCanRead() { long insertedUpToKey = writer.wroteUpToKey(); @@ -264,13 +259,13 @@ protected long getNextKeyToRead() { // later. Set a flag to make sure that we don't count this key towards // the set of unique keys we have verified. readingRandomKey = true; - return startKey + Math.abs(ThreadLocalRandom.current().nextLong()) - % (maxKeyToRead - startKey + 1); + return startKey + + Math.abs(ThreadLocalRandom.current().nextLong()) % (maxKeyToRead - startKey + 1); } private Get[] readKey(long[] keysToRead) { Random rand = ThreadLocalRandom.current(); - Get [] gets = new Get[keysToRead.length]; + Get[] gets = new Get[keysToRead.length]; int i = 0; for (long keyToRead : keysToRead) { try { @@ -281,9 +276,9 @@ private Get[] readKey(long[] keysToRead) { i++; } catch (IOException e) { numReadFailures.addAndGet(1); - LOG.debug("[" + readerId + "] FAILED read, key = " + (keyToRead + "") - + ", time from start: " - + (EnvironmentEdgeManager.currentTime() - startTimeMs) + " ms"); + LOG.debug( + "[" + readerId + "] FAILED read, key = " + (keyToRead + "") + ", time from start: " + + (EnvironmentEdgeManager.currentTime() - startTimeMs) + " ms"); if (printExceptionTrace) { LOG.warn(e.toString(), e); printExceptionTrace = false; @@ -296,9 +291,9 @@ private Get[] readKey(long[] keysToRead) { } catch (IOException e) { numReadFailures.addAndGet(gets.length); for (long keyToRead : keysToRead) { - LOG.debug("[" + readerId + "] FAILED read, key = " + (keyToRead + "") - + ", time from start: " - + (EnvironmentEdgeManager.currentTime() - startTimeMs) + " ms"); + LOG.debug( + "[" + readerId + "] FAILED read, key = " + (keyToRead + "") + ", time from start: " + + (EnvironmentEdgeManager.currentTime() - startTimeMs) + " ms"); } if (printExceptionTrace) { LOG.warn(e.toString(), e); @@ -353,26 +348,24 @@ public void queryKey(Get get, boolean verify, long keyToRead) throws IOException } protected void verifyResultsAndUpdateMetrics(boolean verify, Get[] gets, long elapsedNano, - Result[] results, Table table, boolean isNullExpected) - throws IOException { + Result[] results, Table table, boolean isNullExpected) throws IOException { totalOpTimeMs.addAndGet(elapsedNano / 1000000); numKeys.addAndGet(gets.length); int i = 0; for (Result result : results) { verifyResultsAndUpdateMetricsOnAPerGetBasis(verify, gets[i++], result, table, - isNullExpected); + isNullExpected); } } protected void verifyResultsAndUpdateMetrics(boolean verify, Get get, long elapsedNano, - Result result, Table table, boolean isNullExpected) - throws IOException { - verifyResultsAndUpdateMetrics(verify, new Get[]{get}, elapsedNano, - new Result[]{result}, table, isNullExpected); + Result result, Table table, boolean isNullExpected) throws IOException { + verifyResultsAndUpdateMetrics(verify, new Get[] { get }, elapsedNano, new Result[] { result }, + table, isNullExpected); } - private void verifyResultsAndUpdateMetricsOnAPerGetBasis(boolean verify, Get get, - Result result, Table table, boolean isNullExpected) throws IOException { + private void verifyResultsAndUpdateMetricsOnAPerGetBasis(boolean verify, Get get, Result result, + Table table, boolean isNullExpected) throws IOException { if (!result.isEmpty()) { if (verify) { numKeysVerified.incrementAndGet(); @@ -384,9 +377,9 @@ private void verifyResultsAndUpdateMetricsOnAPerGetBasis(boolean verify, Get get } String rowKey = Bytes.toString(get.getRow()); LOG.info("Key = " + rowKey + ", Region location: " + hloc); - if(isNullExpected) { + if (isNullExpected) { nullResult.incrementAndGet(); - LOG.debug("Null result obtained for the key ="+rowKey); + LOG.debug("Null result obtained for the key =" + rowKey); return; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java index f76042e9a7e4..7743bf966c7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; @@ -20,7 +21,6 @@ import java.security.PrivilegedExceptionAction; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; @@ -41,8 +41,8 @@ public class MultiThreadedReaderWithACL extends MultiThreadedReader { private static final String COMMA = ","; /** - * Maps user with Table instance. Because the table instance has to be created - * per user inorder to work in that user's context + * Maps user with Table instance. Because the table instance has to be created per user inorder to + * work in that user's context */ private Map userVsTable = new HashMap<>(); private Map users = new HashMap<>(); @@ -109,7 +109,8 @@ public Object run() throws Exception { } boolean isNullExpected = ((((int) keyToRead % specialPermCellInsertionFactor)) == 0); long end = System.nanoTime(); - verifyResultsAndUpdateMetrics(verify, get, end - start, result, localTable, isNullExpected); + verifyResultsAndUpdateMetrics(verify, get, end - start, result, localTable, + isNullExpected); } catch (IOException e) { recordFailure(keyToRead); } @@ -120,8 +121,8 @@ public Object run() throws Exception { int mod = ((int) keyToRead % userNames.length); User user; UserGroupInformation realUserUgi; - if(!users.containsKey(userNames[mod])) { - if(User.isHBaseSecurityEnabled(conf)) { + if (!users.containsKey(userNames[mod])) { + if (User.isHBaseSecurityEnabled(conf)) { realUserUgi = HBaseKerberosUtils.loginAndReturnUGI(conf, userNames[mod]); } else { realUserUgi = UserGroupInformation.createRemoteUser(userNames[mod]); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java index 78c698975a7f..88d7efb36f8e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.INCREMENT; @@ -30,7 +29,6 @@ import java.util.Random; import java.util.Set; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -45,13 +43,15 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; + /** Creates multiple threads that write key/values into the */ public class MultiThreadedUpdater extends MultiThreadedWriterBase { private static final Logger LOG = LoggerFactory.getLogger(MultiThreadedUpdater.class); @@ -168,12 +168,13 @@ public void run() { get = dataGenerator.beforeGet(rowKeyBase, get); } catch (Exception e) { // Ideally wont happen - LOG.warn("Failed to modify the get from the load generator = [" + Bytes.toString(get.getRow()) - + "], column family = [" + Bytes.toString(cf) + "]", e); + LOG.warn("Failed to modify the get from the load generator = [" + + Bytes.toString(get.getRow()) + "], column family = [" + Bytes.toString(cf) + + "]", + e); } Result result = getRow(get, rowKeyBase, cf); - Map columnValues = - result != null ? result.getFamilyMap(cf) : null; + Map columnValues = result != null ? result.getFamilyMap(cf) : null; if (columnValues == null) { int specialPermCellInsertionFactor = Integer.parseInt(dataGenerator.getArgs()[2]); if (((int) rowKeyBase % specialPermCellInsertionFactor == 0)) { @@ -184,13 +185,13 @@ public void run() { + "], since we could not get the original row"); } } - if(columnValues != null) { + if (columnValues != null) { for (byte[] column : columnValues.keySet()) { if (Bytes.equals(column, INCREMENT) || Bytes.equals(column, MUTATE_INFO)) { continue; } MutationType mt = - MutationType.values()[rand.nextInt(MutationType.values().length)]; + MutationType.values()[rand.nextInt(MutationType.values().length)]; long columnHash = Arrays.hashCode(column); long hashCode = cfHash + columnHash; byte[] hashCodeBytes = Bytes.toBytes(hashCode); @@ -199,29 +200,29 @@ public void run() { Cell kv = result.getColumnLatestCell(cf, column); checkedValue = kv != null ? CellUtil.cloneValue(kv) : null; Preconditions.checkNotNull(checkedValue, - "Column value to be checked should not be null"); + "Column value to be checked should not be null"); } buf.setLength(0); // Clear the buffer buf.append("#").append(Bytes.toString(column)).append(":"); ++columnCount; switch (mt) { - case PUT: - Put put = new Put(rowKey); - put.addColumn(cf, column, hashCodeBytes); - mutate(table, put, rowKeyBase, rowKey, cf, column, checkedValue); - buf.append(MutationType.PUT.getNumber()); - break; - case DELETE: - Delete delete = new Delete(rowKey); - // Delete all versions since a put - // could be called multiple times if CM is used - delete.addColumns(cf, column); - mutate(table, delete, rowKeyBase, rowKey, cf, column, checkedValue); - buf.append(MutationType.DELETE.getNumber()); - break; - default: - buf.append(MutationType.APPEND.getNumber()); - app.addColumn(cf, column, hashCodeBytes); + case PUT: + Put put = new Put(rowKey); + put.addColumn(cf, column, hashCodeBytes); + mutate(table, put, rowKeyBase, rowKey, cf, column, checkedValue); + buf.append(MutationType.PUT.getNumber()); + break; + case DELETE: + Delete delete = new Delete(rowKey); + // Delete all versions since a put + // could be called multiple times if CM is used + delete.addColumns(cf, column); + mutate(table, delete, rowKeyBase, rowKey, cf, column, checkedValue); + buf.append(MutationType.DELETE.getNumber()); + break; + default: + buf.append(MutationType.APPEND.getNumber()); + app.addColumn(cf, column, hashCodeBytes); } app.addColumn(cf, MUTATE_INFO, Bytes.toBytes(buf.toString())); if (!isBatchUpdate) { @@ -234,8 +235,8 @@ public void run() { } if (isBatchUpdate) { if (verbose) { - LOG.debug("Preparing increment and append for key = [" - + Bytes.toString(rowKey) + "], " + columnCount + " columns"); + LOG.debug("Preparing increment and append for key = [" + Bytes.toString(rowKey) + + "], " + columnCount + " columns"); } mutate(table, inc, rowKeyBase); mutate(table, app, rowKeyBase); @@ -267,9 +268,9 @@ protected Result getRow(Get get, long rowKeyBase, byte[] cf) { try { result = table.get(get); } catch (IOException ie) { - LOG.warn( - "Failed to get the row for key = [" + Bytes.toString(get.getRow()) + "], column family = [" - + Bytes.toString(cf) + "]", ie); + LOG.warn("Failed to get the row for key = [" + Bytes.toString(get.getRow()) + + "], column family = [" + Bytes.toString(cf) + "]", + ie); } return result; } @@ -278,22 +279,22 @@ public void mutate(Table table, Mutation m, long keyBase) { mutate(table, m, keyBase, null, null, null, null); } - public void mutate(Table table, Mutation m, - long keyBase, byte[] row, byte[] cf, byte[] q, byte[] v) { + public void mutate(Table table, Mutation m, long keyBase, byte[] row, byte[] cf, byte[] q, + byte[] v) { long start = EnvironmentEdgeManager.currentTime(); try { m = dataGenerator.beforeMutate(keyBase, m); if (m instanceof Increment) { - table.increment((Increment)m); + table.increment((Increment) m); } else if (m instanceof Append) { - table.append((Append)m); + table.append((Append) m); } else if (m instanceof Put) { - table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenPut((Put)m); + table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenPut((Put) m); } else if (m instanceof Delete) { - table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenDelete((Delete)m); + table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenDelete((Delete) m); } else { throw new IllegalArgumentException( - "unsupported mutation " + m.getClass().getSimpleName()); + "unsupported mutation " + m.getClass().getSimpleName()); } totalOpTimeMs.addAndGet(EnvironmentEdgeManager.currentTime() - start); } catch (IOException e) { @@ -310,10 +311,9 @@ public void mutate(Table table, Mutation m, } else { exceptionInfo = StringUtils.stringifyException(e); } - LOG.error("Failed to mutate: " + keyBase + " after " + - (EnvironmentEdgeManager.currentTime() - start) + - "ms; region information: " + getRegionDebugInfoSafe(table, m.getRow()) + "; errors: " - + exceptionInfo); + LOG.error("Failed to mutate: " + keyBase + " after " + + (EnvironmentEdgeManager.currentTime() - start) + "ms; region information: " + + getRegionDebugInfoSafe(table, m.getRow()) + "; errors: " + exceptionInfo); } } } @@ -323,7 +323,7 @@ public void waitForFinish() { super.waitForFinish(); System.out.println("Failed to update keys: " + failedKeySet.size()); for (Long key : failedKeySet) { - System.out.println("Failed to update key: " + key); + System.out.println("Failed to update key: " + key); } } @@ -331,29 +331,28 @@ public void mutate(Table table, Mutation m, long keyBase) { mutate(table, m, keyBase, null, null, null, null); } - public void mutate(Table table, Mutation m, - long keyBase, byte[] row, byte[] cf, byte[] q, byte[] v) { + public void mutate(Table table, Mutation m, long keyBase, byte[] row, byte[] cf, byte[] q, + byte[] v) { long start = EnvironmentEdgeManager.currentTime(); try { m = dataGenerator.beforeMutate(keyBase, m); if (m instanceof Increment) { - table.increment((Increment)m); + table.increment((Increment) m); } else if (m instanceof Append) { - table.append((Append)m); + table.append((Append) m); } else if (m instanceof Put) { - table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenPut((Put)m); + table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenPut((Put) m); } else if (m instanceof Delete) { - table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenDelete((Delete)m); + table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenDelete((Delete) m); } else { - throw new IllegalArgumentException( - "unsupported mutation " + m.getClass().getSimpleName()); + throw new IllegalArgumentException("unsupported mutation " + m.getClass().getSimpleName()); } totalOpTimeMs.addAndGet(EnvironmentEdgeManager.currentTime() - start); } catch (IOException e) { failedKeySet.add(keyBase); String exceptionInfo; if (e instanceof RetriesExhaustedWithDetailsException) { - RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException)e; + RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e; exceptionInfo = aggEx.getExhaustiveDescription(); } else { StringWriter stackWriter = new StringWriter(); @@ -362,9 +361,9 @@ public void mutate(Table table, Mutation m, pw.flush(); exceptionInfo = StringUtils.stringifyException(e); } - LOG.error("Failed to mutate: " + keyBase + " after " + - (EnvironmentEdgeManager.currentTime() - start) + "ms; region information: " + - getRegionDebugInfoSafe(table, m.getRow()) + "; errors: " + exceptionInfo); + LOG.error("Failed to mutate: " + keyBase + " after " + + (EnvironmentEdgeManager.currentTime() - start) + "ms; region information: " + + getRegionDebugInfoSafe(table, m.getRow()) + "; errors: " + exceptionInfo); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java index 31337a787dcf..1a9ffa49db0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.security.PrivilegedExceptionAction; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; @@ -48,11 +47,11 @@ */ public class MultiThreadedUpdaterWithACL extends MultiThreadedUpdater { private static final Logger LOG = LoggerFactory.getLogger(MultiThreadedUpdaterWithACL.class); - private final static String COMMA= ","; + private final static String COMMA = ","; private User userOwner; /** - * Maps user with Table instance. Because the table instance has to be created - * per user inorder to work in that user's context + * Maps user with Table instance. Because the table instance has to be created per user inorder to + * work in that user's context */ private Map userVsTable = new HashMap<>(); private Map users = new HashMap<>(); @@ -60,7 +59,7 @@ public class MultiThreadedUpdaterWithACL extends MultiThreadedUpdater { public MultiThreadedUpdaterWithACL(LoadTestDataGenerator dataGen, Configuration conf, TableName tableName, double updatePercent, User userOwner, String userNames) - throws IOException { + throws IOException { super(dataGen, conf, tableName, updatePercent); this.userOwner = userOwner; this.userNames = userNames.split(COMMA); @@ -102,7 +101,7 @@ protected void closeHTable() { } } } catch (Exception e) { - LOG.error("Error while closing the HTable "+table.getName(), e); + LOG.error("Error while closing the HTable " + table.getName(), e); } } @@ -125,8 +124,9 @@ public Object run() throws Exception { res = localTable.get(get); } } catch (IOException ie) { - LOG.warn("Failed to get the row for key = [" + Bytes.toString(get.getRow()) + - "], column family = [" + Bytes.toString(cf) + "]", ie); + LOG.warn("Failed to get the row for key = [" + Bytes.toString(get.getRow()) + + "], column family = [" + Bytes.toString(cf) + "]", + ie); } return res; } @@ -151,8 +151,9 @@ public Object run() throws Exception { Result result = (Result) user.runAs(action); return result; } catch (Exception ie) { - LOG.warn("Failed to get the row for key = [" + Bytes.toString(get.getRow()) + - "], column family = [" + Bytes.toString(cf) + "]", ie); + LOG.warn("Failed to get the row for key = [" + Bytes.toString(get.getRow()) + + "], column family = [" + Bytes.toString(cf) + "]", + ie); } } // This means that no users were present @@ -237,8 +238,8 @@ public Object run() throws Exception { } else if (m instanceof Delete) { table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenDelete((Delete) m); } else { - throw new IllegalArgumentException("unsupported mutation " - + m.getClass().getSimpleName()); + throw new IllegalArgumentException( + "unsupported mutation " + m.getClass().getSimpleName()); } totalOpTimeMs.addAndGet(EnvironmentEdgeManager.currentTime() - start); } catch (IOException e) { @@ -248,8 +249,8 @@ public Object run() throws Exception { } } - private void recordFailure(final Mutation m, final long keyBase, - final long start, IOException e) { + private void recordFailure(final Mutation m, final long keyBase, final long start, + IOException e) { failedKeySet.add(keyBase); String exceptionInfo; if (e instanceof RetriesExhaustedWithDetailsException) { @@ -262,9 +263,9 @@ private void recordFailure(final Mutation m, final long keyBase, pw.flush(); exceptionInfo = StringUtils.stringifyException(e); } - LOG.error("Failed to mutate: " + keyBase + " after " + - (EnvironmentEdgeManager.currentTime() - start) + "ms; region information: " + - getRegionDebugInfoSafe(table, m.getRow()) + "; errors: " + exceptionInfo); + LOG.error("Failed to mutate: " + keyBase + " after " + + (EnvironmentEdgeManager.currentTime() - start) + "ms; region information: " + + getRegionDebugInfoSafe(table, m.getRow()) + "; errors: " + exceptionInfo); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java index b255f5d3f149..026467e54eb5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.INCREMENT; @@ -27,7 +26,6 @@ import java.util.Arrays; import java.util.HashSet; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -47,8 +45,8 @@ public class MultiThreadedWriter extends MultiThreadedWriterBase { protected boolean isMultiPut = false; - public MultiThreadedWriter(LoadTestDataGenerator dataGen, Configuration conf, - TableName tableName) throws IOException { + public MultiThreadedWriter(LoadTestDataGenerator dataGen, Configuration conf, TableName tableName) + throws IOException { super(dataGen, conf, tableName, "W"); } @@ -123,7 +121,8 @@ public void run() { } if (isMultiPut) { if (verbose) { - LOG.debug("Preparing put for key = [" + Bytes.toString(rowKey) + "], " + columnCount + " columns"); + LOG.debug("Preparing put for key = [" + Bytes.toString(rowKey) + "], " + columnCount + + " columns"); } insert(table, put, rowKeyBase); numCols.addAndGet(columnCount); @@ -148,7 +147,7 @@ public void insert(Table table, Put put, long keyBase) { failedKeySet.add(keyBase); String exceptionInfo; if (e instanceof RetriesExhaustedWithDetailsException) { - RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException)e; + RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e; exceptionInfo = aggEx.getExhaustiveDescription(); } else { StringWriter stackWriter = new StringWriter(); @@ -157,11 +156,12 @@ public void insert(Table table, Put put, long keyBase) { pw.flush(); exceptionInfo = StringUtils.stringifyException(e); } - LOG.error("Failed to insert: " + keyBase + " after " + - (EnvironmentEdgeManager.currentTime() - start) + "ms; region information: " + - getRegionDebugInfoSafe(table, put.getRow()) + "; errors: " + exceptionInfo); + LOG.error("Failed to insert: " + keyBase + " after " + + (EnvironmentEdgeManager.currentTime() - start) + "ms; region information: " + + getRegionDebugInfoSafe(table, put.getRow()) + "; errors: " + exceptionInfo); } } + protected void closeHTable() { try { if (table != null) { @@ -178,7 +178,7 @@ public void waitForFinish() { super.waitForFinish(); System.out.println("Failed to write keys: " + failedKeySet.size()); for (Long key : failedKeySet) { - System.out.println("Failed to write key: " + key); + System.out.println("Failed to write key: " + key); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java index 6222eea8c5db..a73945fdb435 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import java.io.IOException; @@ -41,16 +40,15 @@ public abstract class MultiThreadedWriterBase extends MultiThreadedAction { private static final Logger LOG = LoggerFactory.getLogger(MultiThreadedWriterBase.class); /** - * A temporary place to keep track of inserted/updated keys. This is written to by - * all writers and is drained on a separate thread that populates - * {@link #wroteUpToKey}, the maximum key in the contiguous range of keys - * being inserted/updated. This queue is supposed to stay small. + * A temporary place to keep track of inserted/updated keys. This is written to by all writers and + * is drained on a separate thread that populates {@link #wroteUpToKey}, the maximum key in the + * contiguous range of keys being inserted/updated. This queue is supposed to stay small. */ protected BlockingQueue wroteKeys; /** - * This is the current key to be inserted/updated by any thread. Each thread does an - * atomic get and increment operation and inserts the current value. + * This is the current key to be inserted/updated by any thread. Each thread does an atomic get + * and increment operation and inserts the current value. */ protected AtomicLong nextKeyToWrite = new AtomicLong(); @@ -63,9 +61,8 @@ public abstract class MultiThreadedWriterBase extends MultiThreadedAction { protected Set failedKeySet = new ConcurrentSkipListSet<>(); /** - * The total size of the temporary inserted/updated key set that have not yet lined - * up in a our contiguous sequence starting from startKey. Supposed to stay - * small. + * The total size of the temporary inserted/updated key set that have not yet lined up in a our + * contiguous sequence starting from startKey. Supposed to stay small. */ protected AtomicLong wroteKeyQueueSize = new AtomicLong(); @@ -91,7 +88,7 @@ public void start(long startKey, long endKey, int numThreads) throws IOException if (trackWroteKeys) { new Thread(new WroteKeysTracker(), "MultiThreadedWriterBase-WroteKeysTracker-" + EnvironmentEdgeManager.currentTime()) - .start(); + .start(); numThreadsWorking.incrementAndGet(); } } @@ -120,8 +117,7 @@ protected String getRegionDebugInfoSafe(Table table, byte[] rowKey) { } /** - * A thread that keeps track of the highest key in the contiguous range of - * inserted/updated keys. + * A thread that keeps track of the highest key in the contiguous range of inserted/updated keys. */ private class WroteKeysTracker implements Runnable { @@ -152,8 +148,7 @@ public void run() { } // See if we have a sequence of contiguous keys lined up. - while (!sortedKeys.isEmpty() - && ((k = sortedKeys.peek()) == expectedKey)) { + while (!sortedKeys.isEmpty() && ((k = sortedKeys.peek()) == expectedKey)) { sortedKeys.poll(); wroteUpToKey.set(k); ++expectedKey; @@ -194,8 +189,8 @@ protected String progressInfo() { } /** - * Used for a joint write/read workload. Enables tracking the last inserted/updated - * key, which requires a blocking queue and a consumer thread. + * Used for a joint write/read workload. Enables tracking the last inserted/updated key, which + * requires a blocking queue and a consumer thread. * @param enable whether to enable tracking the last inserted/updated key */ public void setTrackWroteKeys(boolean enable) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterWithACL.java index 1b2d40da827d..b7f48a378dc7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterWithACL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.PrintWriter; import java.io.StringWriter; import java.security.PrivilegedExceptionAction; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; @@ -80,7 +79,7 @@ protected void closeHTable() { try { table.close(); } catch (Exception e) { - LOG.error("Error in closing the table "+table.getName(), e); + LOG.error("Error in closing the table " + table.getName(), e); } } } @@ -136,8 +135,8 @@ public Object run() throws Exception { } } - private void recordFailure(final Table table, final Put put, final long keyBase, - final long start, IOException e) { + private void recordFailure(final Table table, final Put put, final long keyBase, final long start, + IOException e) { failedKeySet.add(keyBase); String exceptionInfo; if (e instanceof RetriesExhaustedWithDetailsException) { @@ -150,8 +149,8 @@ private void recordFailure(final Table table, final Put put, final long keyBase, pw.flush(); exceptionInfo = StringUtils.stringifyException(e); } - LOG.error("Failed to insert: " + keyBase + " after " + - (EnvironmentEdgeManager.currentTime() - start) + "ms; region information: " + - getRegionDebugInfoSafe(table, put.getRow()) + "; errors: " + exceptionInfo); + LOG.error("Failed to insert: " + keyBase + " after " + + (EnvironmentEdgeManager.currentTime() - start) + "ms; region information: " + + getRegionDebugInfoSafe(table, put.getRow()) + "; errors: " + exceptionInfo); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java index cef59dc3a09c..b98f72745208 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; @@ -49,9 +50,8 @@ import org.slf4j.LoggerFactory; /** - * A helper class for process-based mini-cluster tests. Unlike - * {@link SingleProcessHBaseCluster}, starts daemons as separate processes, allowing to - * do real kill testing. + * A helper class for process-based mini-cluster tests. Unlike {@link SingleProcessHBaseCluster}, + * starts daemons as separate processes, allowing to do real kill testing. */ public class ProcessBasedLocalHBaseCluster { @@ -64,11 +64,9 @@ public class ProcessBasedLocalHBaseCluster { private static final int MAX_FILE_SIZE_OVERRIDE = 10 * 1000 * 1000; - private static final Logger LOG = LoggerFactory.getLogger( - ProcessBasedLocalHBaseCluster.class); + private static final Logger LOG = LoggerFactory.getLogger(ProcessBasedLocalHBaseCluster.class); - private List daemonPidFiles = - Collections.synchronizedList(new ArrayList()); + private List daemonPidFiles = Collections.synchronizedList(new ArrayList()); private boolean shutdownHookInstalled; @@ -83,9 +81,7 @@ public class ProcessBasedLocalHBaseCluster { private List logTailDirs = Collections.synchronizedList(new ArrayList()); private static enum ServerType { - MASTER("master"), - RS("regionserver"), - ZK("zookeeper"); + MASTER("master"), RS("regionserver"), ZK("zookeeper"); private final String fullName; @@ -100,8 +96,7 @@ private ServerType(String fullName) { * @param numDataNodes the number of data nodes * @param numRegionServers the number of region servers */ - public ProcessBasedLocalHBaseCluster(Configuration conf, - int numDataNodes, int numRegionServers) { + public ProcessBasedLocalHBaseCluster(Configuration conf, int numDataNodes, int numRegionServers) { this.conf = conf; this.hbaseHome = HBaseHomePath.getHomePath(); this.numMasters = 1; @@ -132,9 +127,9 @@ public void startMiniDFS() throws Exception { } /** - * Generates a list of random port numbers in the sorted order. A sorted - * order makes sense if we ever want to refer to these servers by their index - * in the returned array, e.g. server #0, #1, etc. + * Generates a list of random port numbers in the sorted order. A sorted order makes sense if we + * ever want to refer to these servers by their index in the returned array, e.g. server #0, #1, + * etc. */ private static List sortedPorts(int n) { List ports = new ArrayList<>(n); @@ -171,14 +166,13 @@ public void startHBase() throws IOException { try { testUtil.getConnection().getTable(TableName.META_TABLE_NAME); } catch (Exception e) { - LOG.info("Waiting for HBase to startup. Retries left: " + attemptsLeft, - e); + LOG.info("Waiting for HBase to startup. Retries left: " + attemptsLeft, e); Threads.sleep(1000); } } - LOG.info("Process-based HBase Cluster with " + numRegionServers + - " region servers up and running... \n\n"); + LOG.info("Process-based HBase Cluster with " + numRegionServers + + " region servers up and running... \n\n"); } public void startRegionServer(int port) { @@ -205,29 +199,26 @@ private void executeCommand(String command) { executeCommand(command, null); } - private void executeCommand(String command, Map envOverrides) { + private void executeCommand(String command, Map envOverrides) { ensureShutdownHookInstalled(); LOG.debug("Command : " + command); try { - String [] envp = null; + String[] envp = null; if (envOverrides != null) { Map map = new HashMap<>(System.getenv()); map.putAll(envOverrides); envp = new String[map.size()]; int idx = 0; - for (Map.Entry e: map.entrySet()) { + for (Map.Entry e : map.entrySet()) { envp[idx++] = e.getKey() + "=" + e.getValue(); } } Process p = Runtime.getRuntime().exec(command, envp); - BufferedReader stdInput = new BufferedReader( - new InputStreamReader(p.getInputStream())); - BufferedReader stdError = new BufferedReader( - new InputStreamReader(p.getErrorStream())); + BufferedReader stdInput = new BufferedReader(new InputStreamReader(p.getInputStream())); + BufferedReader stdError = new BufferedReader(new InputStreamReader(p.getErrorStream())); // read the output from the command String s = null; @@ -312,8 +303,7 @@ private static int readPidFromFile(String pidFile) throws IOException { private String pidFilePath(ServerType serverType, int port) { String dir = serverWorkingDir(serverType, port); String user = System.getenv("USER"); - String pidFile = String.format("%s/hbase-%s-%s.pid", - dir, user, serverType.fullName); + String pidFile = String.format("%s/hbase-%s-%s.pid", dir, user, serverType.fullName); return pidFile; } @@ -343,37 +333,30 @@ private void startServer(ServerType serverType, int rsPort) { // using default ports. If we want to run remote debugging on process-based local cluster's // daemons, we can automatically choose non-conflicting JDWP and JMX ports for each daemon // and specify them here. - writeStringToFile( - "unset HBASE_MASTER_OPTS\n" + - "unset HBASE_REGIONSERVER_OPTS\n" + - "unset HBASE_ZOOKEEPER_OPTS\n" + - "HBASE_MASTER_DBG_OPTS=' '\n" + - "HBASE_REGIONSERVER_DBG_OPTS=' '\n" + - "HBASE_ZOOKEEPER_DBG_OPTS=' '\n" + - "HBASE_MASTER_JMX_OPTS=' '\n" + - "HBASE_REGIONSERVER_JMX_OPTS=' '\n" + - "HBASE_ZOOKEEPER_JMX_OPTS=' '\n", - dir + "/hbase-env.sh"); + writeStringToFile("unset HBASE_MASTER_OPTS\n" + "unset HBASE_REGIONSERVER_OPTS\n" + + "unset HBASE_ZOOKEEPER_OPTS\n" + "HBASE_MASTER_DBG_OPTS=' '\n" + + "HBASE_REGIONSERVER_DBG_OPTS=' '\n" + "HBASE_ZOOKEEPER_DBG_OPTS=' '\n" + + "HBASE_MASTER_JMX_OPTS=' '\n" + "HBASE_REGIONSERVER_JMX_OPTS=' '\n" + + "HBASE_ZOOKEEPER_JMX_OPTS=' '\n", + dir + "/hbase-env.sh"); Map envOverrides = new HashMap<>(); envOverrides.put("HBASE_LOG_DIR", dir); envOverrides.put("HBASE_PID_DIR", dir); try { - FileUtils.copyFile( - new File(hbaseHome, "conf/log4j.properties"), - new File(dir, "log4j.properties")); + FileUtils.copyFile(new File(hbaseHome, "conf/log4j.properties"), + new File(dir, "log4j.properties")); } catch (IOException ex) { LOG.error("Could not install log4j.properties into " + dir); } - executeCommand(hbaseDaemonScript + " --config " + dir + - " start " + serverType.fullName, envOverrides); + executeCommand(hbaseDaemonScript + " --config " + dir + " start " + serverType.fullName, + envOverrides); daemonPidFiles.add(pidFilePath(serverType, rsPort)); logTailDirs.add(dir); } - private final String generateConfig(ServerType serverType, int rpcPort, - String daemonDir) { + private final String generateConfig(ServerType serverType, int rpcPort, String daemonDir) { StringBuilder sb = new StringBuilder(); Map confMap = new TreeMap<>(); confMap.put(HConstants.CLUSTER_DISTRIBUTED, true); @@ -415,8 +398,7 @@ private final String generateConfig(ServerType serverType, int rpcPort, } private static void reportWebUIPort(String daemon, int port) { - LOG.info("Local " + daemon + " web UI is at http://" - + HConstants.LOCALHOST + ":" + port); + LOG.info("Local " + daemon + " web UI is at http://" + HConstants.LOCALHOST + ":" + port); } public Configuration getConf() { @@ -433,8 +415,7 @@ public void shutdown() { private static final Pattern TO_REMOVE_FROM_LOG_LINES_RE = Pattern.compile("org\\.apache\\.hadoop\\.hbase\\."); - private static final Pattern LOG_PATH_FORMAT_RE = - Pattern.compile("^.*/([A-Z]+)-(\\d+)/[^/]+$"); + private static final Pattern LOG_PATH_FORMAT_RE = Pattern.compile("^.*/([A-Z]+)-(\\d+)/[^/]+$"); private static String processLine(String line) { Matcher m = TO_REMOVE_FROM_LOG_LINES_RE.matcher(line); @@ -550,4 +531,3 @@ private void startDaemonLogTailer() { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java index ddd29fbc556d..f871db0e38d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; @@ -31,12 +32,12 @@ import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; /** - * A command-line tool that spins up a local process-based cluster, loads - * some data, restarts the regionserver holding hbase:meta, and verifies that the - * cluster recovers. + * A command-line tool that spins up a local process-based cluster, loads some data, restarts the + * regionserver holding hbase:meta, and verifies that the cluster recovers. */ public class RestartMetaTest extends AbstractHBaseTool { @@ -46,8 +47,7 @@ public class RestartMetaTest extends AbstractHBaseTool { private static final int DEFAULT_NUM_RS = 2; /** Table name for the test */ - private static TableName TABLE_NAME = - TableName.valueOf("load_test"); + private static TableName TABLE_NAME = TableName.valueOf("load_test"); /** The number of seconds to sleep after loading the data */ private static final int SLEEP_SEC_AFTER_DATA_LOAD = 5; @@ -71,16 +71,13 @@ private void loadData() throws IOException { // print out the arguments System.out.printf("Key range %d .. %d\n", startKey, endKey); - System.out.printf("Number of Columns/Key: %d..%d\n", minColsPerKey, - maxColsPerKey); - System.out.printf("Data Size/Column: %d..%d bytes\n", minColDataSize, - maxColDataSize); + System.out.printf("Number of Columns/Key: %d..%d\n", minColsPerKey, maxColsPerKey); + System.out.printf("Data Size/Column: %d..%d bytes\n", minColDataSize, maxColDataSize); System.out.printf("Client Threads: %d\n", numThreads); // start the writers - LoadTestDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator( - minColDataSize, maxColDataSize, minColsPerKey, maxColsPerKey, - HFileTestUtil.DEFAULT_COLUMN_FAMILY); + LoadTestDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator(minColDataSize, + maxColDataSize, minColsPerKey, maxColsPerKey, HFileTestUtil.DEFAULT_COLUMN_FAMILY); MultiThreadedWriter writer = new MultiThreadedWriter(dataGen, conf, TABLE_NAME); writer.setMultiPut(true); writer.start(startKey, endKey, numThreads); @@ -100,14 +97,12 @@ protected int doWork() throws Exception { // create tables if needed HBaseTestingUtil.createPreSplitLoadTestTable(conf, TABLE_NAME, - HFileTestUtil.DEFAULT_COLUMN_FAMILY, Compression.Algorithm.NONE, - DataBlockEncoding.NONE); + HFileTestUtil.DEFAULT_COLUMN_FAMILY, Compression.Algorithm.NONE, DataBlockEncoding.NONE); LOG.debug("Loading data....\n\n"); loadData(); - LOG.debug("Sleeping for " + SLEEP_SEC_AFTER_DATA_LOAD + - " seconds....\n\n"); + LOG.debug("Sleeping for " + SLEEP_SEC_AFTER_DATA_LOAD + " seconds....\n\n"); Threads.sleep(5 * SLEEP_SEC_AFTER_DATA_LOAD); Connection connection = ConnectionFactory.createConnection(conf); @@ -128,11 +123,9 @@ protected int doWork() throws Exception { ResultScanner scanner = metaTable.getScanner(new Scan()); Result result; while ((result = scanner.next()) != null) { - LOG.info("Region assignment from META: " - + Bytes.toStringBinary(result.getRow()) - + " => " - + Bytes.toStringBinary(result.getFamilyMap(HConstants.CATALOG_FAMILY) - .get(HConstants.SERVER_QUALIFIER))); + LOG.info("Region assignment from META: " + Bytes.toStringBinary(result.getRow()) + " => " + + Bytes.toStringBinary( + result.getFamilyMap(HConstants.CATALOG_FAMILY).get(HConstants.SERVER_QUALIFIER))); } metaTable.close(); connection.close(); @@ -143,13 +136,13 @@ protected int doWork() throws Exception { protected void addOptions() { addOptWithArg(OPT_NUM_RS, "Number of Region Servers"); addOptWithArg(HFileTestUtil.OPT_DATA_BLOCK_ENCODING, - HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); + HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); } @Override protected void processOptions(CommandLine cmd) { - numRegionServers = Integer.parseInt(cmd.getOptionValue(OPT_NUM_RS, - String.valueOf(DEFAULT_NUM_RS))); + numRegionServers = + Integer.parseInt(cmd.getOptionValue(OPT_NUM_RS, String.valueOf(DEFAULT_NUM_RS))); } public static void main(String[] args) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/StoppableImplementation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/StoppableImplementation.java index 9971a7025780..1c955e20454f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/StoppableImplementation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/StoppableImplementation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Stoppable; +import org.apache.yetus.audience.InterfaceAudience; /** * A base implementation for a Stoppable service diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBloomFilterChunk.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBloomFilterChunk.java index 610942226a82..9d62165b56db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBloomFilterChunk.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBloomFilterChunk.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestBloomFilterChunk { @ClassRule @@ -41,69 +41,67 @@ public class TestBloomFilterChunk { @Test public void testBasicBloom() throws Exception { - BloomFilterChunk bf1 = new BloomFilterChunk(1000, (float)0.01, Hash.MURMUR_HASH, 0); - BloomFilterChunk bf2 = new BloomFilterChunk(1000, (float)0.01, Hash.MURMUR_HASH, 0); + BloomFilterChunk bf1 = new BloomFilterChunk(1000, (float) 0.01, Hash.MURMUR_HASH, 0); + BloomFilterChunk bf2 = new BloomFilterChunk(1000, (float) 0.01, Hash.MURMUR_HASH, 0); bf1.allocBloom(); bf2.allocBloom(); // test 1: verify no fundamental false negatives or positives - byte[] key1 = {1,2,3,4,5,6,7,8,9}; - byte[] key2 = {1,2,3,4,5,6,7,8,7}; + byte[] key1 = { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + byte[] key2 = { 1, 2, 3, 4, 5, 6, 7, 8, 7 }; bf1.add(key1, 0, key1.length); bf2.add(key2, 0, key2.length); assertTrue(BloomFilterUtil.contains(key1, 0, key1.length, new MultiByteBuff(bf1.bloom), 0, - (int) bf1.byteSize, bf1.hash, bf1.hashCount)); + (int) bf1.byteSize, bf1.hash, bf1.hashCount)); assertFalse(BloomFilterUtil.contains(key2, 0, key2.length, new MultiByteBuff(bf1.bloom), 0, - (int) bf1.byteSize, bf1.hash, bf1.hashCount)); + (int) bf1.byteSize, bf1.hash, bf1.hashCount)); assertFalse(BloomFilterUtil.contains(key1, 0, key1.length, new MultiByteBuff(bf2.bloom), 0, - (int) bf2.byteSize, bf2.hash, bf2.hashCount)); + (int) bf2.byteSize, bf2.hash, bf2.hashCount)); assertTrue(BloomFilterUtil.contains(key2, 0, key2.length, new MultiByteBuff(bf2.bloom), 0, - (int) bf2.byteSize, bf2.hash, bf2.hashCount)); + (int) bf2.byteSize, bf2.hash, bf2.hashCount)); - byte [] bkey = {1,2,3,4}; - byte [] bval = Bytes.toBytes("this is a much larger byte array"); + byte[] bkey = { 1, 2, 3, 4 }; + byte[] bval = Bytes.toBytes("this is a much larger byte array"); bf1.add(bkey, 0, bkey.length); - bf1.add(bval, 1, bval.length-1); + bf1.add(bval, 1, bval.length - 1); assertTrue(BloomFilterUtil.contains(bkey, 0, bkey.length, new MultiByteBuff(bf1.bloom), 0, - (int) bf1.byteSize, bf1.hash, bf1.hashCount)); - assertTrue(BloomFilterUtil.contains(bval, 1, bval.length - 1, new MultiByteBuff(bf1.bloom), - 0, (int) bf1.byteSize, bf1.hash, bf1.hashCount)); + (int) bf1.byteSize, bf1.hash, bf1.hashCount)); + assertTrue(BloomFilterUtil.contains(bval, 1, bval.length - 1, new MultiByteBuff(bf1.bloom), 0, + (int) bf1.byteSize, bf1.hash, bf1.hashCount)); assertFalse(BloomFilterUtil.contains(bval, 0, bval.length, new MultiByteBuff(bf1.bloom), 0, - (int) bf1.byteSize, bf1.hash, bf1.hashCount)); + (int) bf1.byteSize, bf1.hash, bf1.hashCount)); // test 2: serialization & deserialization. // (convert bloom to byte array & read byte array back in as input) ByteArrayOutputStream bOut = new ByteArrayOutputStream(); bf1.writeBloom(new DataOutputStream(bOut)); ByteBuffer bb = ByteBuffer.wrap(bOut.toByteArray()); - BloomFilterChunk newBf1 = new BloomFilterChunk(1000, (float)0.01, - Hash.MURMUR_HASH, 0); + BloomFilterChunk newBf1 = new BloomFilterChunk(1000, (float) 0.01, Hash.MURMUR_HASH, 0); assertTrue(BloomFilterUtil.contains(key1, 0, key1.length, new MultiByteBuff(bb), 0, - (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); + (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); assertFalse(BloomFilterUtil.contains(key2, 0, key2.length, new MultiByteBuff(bb), 0, - (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); + (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); assertTrue(BloomFilterUtil.contains(bkey, 0, bkey.length, new MultiByteBuff(bb), 0, - (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); + (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); assertTrue(BloomFilterUtil.contains(bval, 1, bval.length - 1, new MultiByteBuff(bb), 0, - (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); + (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); assertFalse(BloomFilterUtil.contains(bval, 0, bval.length, new MultiByteBuff(bb), 0, - (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); + (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); assertFalse(BloomFilterUtil.contains(bval, 0, bval.length, new MultiByteBuff(bb), 0, - (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); + (int) newBf1.byteSize, newBf1.hash, newBf1.hashCount)); System.out.println("Serialized as " + bOut.size() + " bytes"); - assertTrue(bOut.size() - bf1.byteSize < 10); //... allow small padding + assertTrue(bOut.size() - bf1.byteSize < 10); // ... allow small padding } @Test public void testBloomFold() throws Exception { // test: foldFactor < log(max/actual) - BloomFilterChunk b = new BloomFilterChunk(1003, (float) 0.01, - Hash.MURMUR_HASH, 2); + BloomFilterChunk b = new BloomFilterChunk(1003, (float) 0.01, Hash.MURMUR_HASH, 2); b.allocBloom(); long origSize = b.getByteSize(); assertEquals(1204, origSize); @@ -112,14 +110,13 @@ public void testBloomFold() throws Exception { b.add(ib, 0, ib.length); } b.compactBloom(); - assertEquals(origSize>>2, b.getByteSize()); + assertEquals(origSize >> 2, b.getByteSize()); int falsePositives = 0; for (int i = 0; i < 25; ++i) { byte[] bytes = Bytes.toBytes(i); if (BloomFilterUtil.contains(bytes, 0, bytes.length, new MultiByteBuff(b.bloom), 0, - (int) b.byteSize, b.hash, b.hashCount)) { - if (i >= 12) - falsePositives++; + (int) b.byteSize, b.hash, b.hashCount)) { + if (i >= 12) falsePositives++; } else { assertFalse(i < 12); } @@ -132,12 +129,12 @@ public void testBloomFold() throws Exception { @Test public void testBloomPerf() throws Exception { // add - float err = (float)0.01; - BloomFilterChunk b = new BloomFilterChunk(10*1000*1000, (float)err, Hash.MURMUR_HASH, 3); + float err = (float) 0.01; + BloomFilterChunk b = new BloomFilterChunk(10 * 1000 * 1000, (float) err, Hash.MURMUR_HASH, 3); b.allocBloom(); - long startTime = EnvironmentEdgeManager.currentTime(); + long startTime = EnvironmentEdgeManager.currentTime(); long origSize = b.getByteSize(); - for (int i = 0; i < 1*1000*1000; ++i) { + for (int i = 0; i < 1 * 1000 * 1000; ++i) { byte[] ib = Bytes.toBytes(i); b.add(ib, 0, ib.length); } @@ -149,26 +146,25 @@ public void testBloomPerf() throws Exception { b.compactBloom(); endTime = EnvironmentEdgeManager.currentTime(); System.out.println("Total Fold time = " + (endTime - startTime) + "ms"); - assertTrue(origSize >= b.getByteSize()<<3); + assertTrue(origSize >= b.getByteSize() << 3); // test startTime = EnvironmentEdgeManager.currentTime(); int falsePositives = 0; - for (int i = 0; i < 2*1000*1000; ++i) { + for (int i = 0; i < 2 * 1000 * 1000; ++i) { byte[] bytes = Bytes.toBytes(i); if (BloomFilterUtil.contains(bytes, 0, bytes.length, new MultiByteBuff(b.bloom), 0, - (int) b.byteSize, b.hash, b.hashCount)) { - if (i >= 1 * 1000 * 1000) - falsePositives++; + (int) b.byteSize, b.hash, b.hashCount)) { + if (i >= 1 * 1000 * 1000) falsePositives++; } else { - assertFalse(i < 1*1000*1000); + assertFalse(i < 1 * 1000 * 1000); } } endTime = EnvironmentEdgeManager.currentTime(); System.out.println("Total Contains time = " + (endTime - startTime) + "ms"); System.out.println("False Positive = " + falsePositives); - assertTrue(falsePositives <= (1*1000*1000)*err); + assertTrue(falsePositives <= (1 * 1000 * 1000) * err); // test: foldFactor > log(max/actual) } @@ -197,4 +193,3 @@ public void testFoldableByteSize() { assertEquals(640, BloomFilterUtil.computeFoldableByteSize(5001, 4)); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java index e8bbf7b14ad6..c4337758d800 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestBoundedPriorityBlockingQueue { @ClassRule @@ -66,7 +66,8 @@ public int getPriority() { } static class TestObjectComparator implements Comparator { - public TestObjectComparator() {} + public TestObjectComparator() { + } @Override public int compare(TestObject a, TestObject b) { @@ -230,8 +231,8 @@ public void run() { @Override public void run() { try { - threadsStarted.await(); - queue.offer(testObj); + threadsStarted.await(); + queue.offer(testObj); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java index 7ed8891e03a5..6f1ae827b54b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java index 86a350a011d7..3d809c274c5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestCompressionTest { @ClassRule @@ -89,7 +89,7 @@ public void testTestCompression() { // Hadoop nativelib is not available LOG.debug("Native code not loaded"); // This check is useless as it fails with - // ...DoNotRetryIOException: Compression algorithm 'lzo' previously failed test. + // ...DoNotRetryIOException: Compression algorithm 'lzo' previously failed test. // assertFalse("LZO", CompressionTest.testCompression("LZO")); // LZ4 requires that the native lib be present before 3.3.1. After 3.3.1, hadoop uses // lz4-java which will do java version of lz4 as last resort -- so the below fails before @@ -122,21 +122,22 @@ private void nativeCodecTest(String codecName, String libName, String codecClass } try { - Configuration conf = new Configuration(); - CompressionCodec codec = (CompressionCodec) - ReflectionUtils.newInstance(conf.getClassByName(codecClassName), conf); - - DataOutputBuffer compressedDataBuffer = new DataOutputBuffer(); - CompressionOutputStream deflateFilter = codec.createOutputStream(compressedDataBuffer); - - byte[] data = new byte[1024]; - DataOutputStream deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter)); - deflateOut.write(data, 0, data.length); - deflateOut.flush(); - deflateFilter.finish(); - - // Codec class, codec nativelib and Hadoop nativelib with codec JNIs are present - assertTrue(CompressionTest.testCompression(codecName)); + Configuration conf = new Configuration(); + CompressionCodec codec = (CompressionCodec) ReflectionUtils + .newInstance(conf.getClassByName(codecClassName), conf); + + DataOutputBuffer compressedDataBuffer = new DataOutputBuffer(); + CompressionOutputStream deflateFilter = codec.createOutputStream(compressedDataBuffer); + + byte[] data = new byte[1024]; + DataOutputStream deflateOut = + new DataOutputStream(new BufferedOutputStream(deflateFilter)); + deflateOut.write(data, 0, data.length); + deflateOut.flush(); + deflateFilter.finish(); + + // Codec class, codec nativelib and Hadoop nativelib with codec JNIs are present + assertTrue(CompressionTest.testCompression(codecName)); } catch (UnsatisfiedLinkError e) { // Hadoop nativelib does not have codec JNIs. // cannot assert the codec here because the current logic of @@ -158,4 +159,3 @@ private void nativeCodecTest(String codecName, String libName, String codecClass } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConfigurationUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConfigurationUtil.java index 583b6921f3e5..929ed3f57d08 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConfigurationUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConfigurationUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java index 1b4023f2829f..343aad117479 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ public class TestConnectionCache { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionCache.class); + HBaseClassTestRule.forClass(TestConnectionCache.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -57,7 +57,7 @@ public static void tearDown() throws IOException { public void testConnectionChore() throws Exception { // 1s for clean interval & 5s for maxIdleTime ConnectionCache cache = new ConnectionCache(UTIL.getConfiguration(), - UserProvider.instantiate(UTIL.getConfiguration()), 1000, 5000); + UserProvider.instantiate(UTIL.getConfiguration()), 1000, 5000); ConnectionCache.ConnectionInfo info = cache.getCurrentConnection(); assertEquals(false, info.connection.isClosed()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java index d5c31236d117..6ecaab53d748 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,6 @@ */ package org.apache.hadoop.hbase.util; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseCommonTestingUtil; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.junit.ClassRule; - -// this is deliberately not in the o.a.h.h.regionserver package - -// in order to make sure all required classes/method are available - import static org.junit.Assert.assertEquals; import java.io.IOException; @@ -35,10 +26,12 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.function.Predicate; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtil; +import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -66,6 +59,7 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.junit.AfterClass; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java index 7dd6a54be83f..d29bb6a95f9b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,10 +28,9 @@ import org.junit.experimental.categories.Category; /** - * Tests to make sure that the default environment edge conforms to appropriate - * behaviour. + * Tests to make sure that the default environment edge conforms to appropriate behaviour. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestDefaultEnvironmentEdge { @ClassRule @@ -51,7 +50,6 @@ public void testGetCurrentTimeUsesSystemClock() { fail(e.getMessage()); } long secondEdgeTime = edge.currentTime(); - assertTrue("Second time must be greater than the first", - secondEdgeTime > edgeTime); + assertTrue("Second time must be greater than the first", secondEdgeTime > edgeTime); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java index ffff31a83a51..e49018cf944d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestEncryptionTest { @ClassRule @@ -78,8 +78,7 @@ public void testBadCipherProvider() throws Exception { public void testAESCipher() { Configuration conf = HBaseConfiguration.create(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); try { EncryptionTest.testEncryption(conf, algorithm, null); } catch (Exception e) { @@ -99,13 +98,12 @@ public void testUnknownCipher() throws Exception { public void testTestEnabledWithDefaultConfig() { Configuration conf = HBaseConfiguration.create(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); try { EncryptionTest.testEncryption(conf, algorithm, null); } catch (Exception e) { - fail("Test for cipher " + algorithm + " should have succeeded, when " + - Encryption.CRYPTO_ENABLED_CONF_KEY + " is not set"); + fail("Test for cipher " + algorithm + " should have succeeded, when " + + Encryption.CRYPTO_ENABLED_CONF_KEY + " is not set"); } } @@ -113,14 +111,13 @@ public void testTestEnabledWithDefaultConfig() { public void testTestEnabledWhenCryptoIsExplicitlyEnabled() { Configuration conf = HBaseConfiguration.create(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); conf.setBoolean(Encryption.CRYPTO_ENABLED_CONF_KEY, true); try { EncryptionTest.testEncryption(conf, algorithm, null); } catch (Exception e) { - fail("Test for cipher " + algorithm + " should have succeeded, when " + - Encryption.CRYPTO_ENABLED_CONF_KEY + " is set to true"); + fail("Test for cipher " + algorithm + " should have succeeded, when " + + Encryption.CRYPTO_ENABLED_CONF_KEY + " is set to true"); } } @@ -128,13 +125,11 @@ public void testTestEnabledWhenCryptoIsExplicitlyEnabled() { public void testTestEnabledWhenCryptoIsExplicitlyDisabled() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); conf.setBoolean(Encryption.CRYPTO_ENABLED_CONF_KEY, false); EncryptionTest.testEncryption(conf, algorithm, null); } - public static class FailingKeyProvider implements KeyProvider { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index 78b0f012b336..435f25ed3052 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,7 +57,7 @@ * Tests for {@link FSTableDescriptors}. */ // Do not support to be executed in he same JVM as other tests -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestFSTableDescriptors { @ClassRule @@ -95,13 +95,13 @@ public void testRegexAgainstOldStyleTableInfo() { @Test public void testCreateAndUpdate() throws IOException { TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testDir); assertTrue(fstd.createTableDescriptor(htd)); assertFalse(fstd.createTableDescriptor(htd)); Path tableInfoDir = new Path(CommonFSUtils.getTableDir(testDir, htd.getTableName()), - FSTableDescriptors.TABLEINFO_DIR); + FSTableDescriptors.TABLEINFO_DIR); FileStatus[] statuses = fs.listStatus(tableInfoDir); assertEquals("statuses.length=" + statuses.length, 1, statuses.length); for (int i = 0; i < 10; i++) { @@ -114,15 +114,14 @@ public void testCreateAndUpdate() throws IOException { @Test public void testSequenceIdAdvancesOnTableInfo() throws IOException { TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testDir); Path previousPath = null; int previousSeqId = -1; for (int i = 0; i < 10; i++) { Path path = fstd.updateTableDescriptor(htd); - int seqId = - FSTableDescriptors.getTableInfoSequenceIdAndFileLength(path).sequenceId; + int seqId = FSTableDescriptors.getTableInfoSequenceIdAndFileLength(path).sequenceId; if (previousPath != null) { // Assert we cleaned up the old file. assertTrue(!fs.exists(previousPath)); @@ -160,7 +159,7 @@ public void testFormatTableInfoSequenceId() { private Path assertWriteAndReadSequenceId(final int i) { Path p = - new Path(testDir, FSTableDescriptors.getTableInfoFileName(i, HConstants.EMPTY_BYTE_ARRAY)); + new Path(testDir, FSTableDescriptors.getTableInfoFileName(i, HConstants.EMPTY_BYTE_ARRAY)); int ii = FSTableDescriptors.getTableInfoSequenceIdAndFileLength(p).sequenceId; assertEquals(i, ii); return p; @@ -172,7 +171,7 @@ public void testRemoves() throws IOException { // Cleanup old tests if any detrius laying around. TableDescriptors htds = new FSTableDescriptors(fs, testDir); TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); htds.update(htd); assertNotNull(htds.remove(htd.getTableName())); assertNull(htds.remove(htd.getTableName())); @@ -182,11 +181,11 @@ public void testRemoves() throws IOException { public void testReadingHTDFromFS() throws IOException { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); FSTableDescriptors fstd = new FSTableDescriptors(fs, testDir); fstd.createTableDescriptor(htd); TableDescriptor td2 = - FSTableDescriptors.getTableDescriptorFromFs(fs, testDir, htd.getTableName()); + FSTableDescriptors.getTableDescriptorFromFs(fs, testDir, htd.getTableName()); assertTrue(htd.equals(td2)); } @@ -217,7 +216,7 @@ public TableDescriptor get(TableName tablename) { // Update the table infos for (int i = 0; i < count; i++) { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName() + i)); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName() + i)); builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i)); htds.update(builder.build()); } @@ -253,7 +252,7 @@ public void testTableDescriptorsNoCache() throws IOException, InterruptedExcepti // Update the table infos for (int i = 0; i < count; i++) { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName() + i)); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName() + i)); builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i)); htds.update(builder.build()); } @@ -261,7 +260,7 @@ public void testTableDescriptorsNoCache() throws IOException, InterruptedExcepti assertNotNull("Expected HTD, got null instead", htds.get(TableName.valueOf(name.getMethodName() + i))); assertTrue("Column Family " + i + " missing", htds - .get(TableName.valueOf(name.getMethodName() + i)).hasColumnFamily(Bytes.toBytes("" + i))); + .get(TableName.valueOf(name.getMethodName() + i)).hasColumnFamily(Bytes.toBytes("" + i))); } assertEquals(count * 4, htds.invocations); assertEquals("expected=0, actual=" + htds.cachehits, 0, htds.cachehits); @@ -280,10 +279,11 @@ public void testGetAll() throws IOException, InterruptedException { TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build()); } // add hbase:meta - htds - .createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build()); - assertEquals("getAll() didn't return all TableDescriptors, expected: " + (count + 1) + - " got: " + htds.getAll().size(), count + 1, htds.getAll().size()); + htds.createTableDescriptor( + TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build()); + assertEquals("getAll() didn't return all TableDescriptors, expected: " + (count + 1) + " got: " + + htds.getAll().size(), + count + 1, htds.getAll().size()); } @Test @@ -305,7 +305,7 @@ public void testGetAllOrdering() throws Exception { assertEquals(4, tables.size()); String[] tableNamesOrdered = - new String[] { "bar:foo", "default:bar", "default:foo", "foo:bar" }; + new String[] { "bar:foo", "default:bar", "default:foo", "foo:bar" }; int i = 0; for (Map.Entry entry : tables.entrySet()) { assertEquals(tableNamesOrdered[i], entry.getKey()); @@ -373,7 +373,7 @@ public void testUpdates() throws IOException { // Cleanup old tests if any detrius laying around. TableDescriptors htds = new FSTableDescriptors(fs, testDir); TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); htds.update(htd); htds.update(htd); htds.update(htd); @@ -382,11 +382,11 @@ public void testUpdates() throws IOException { @Test public void testTableInfoFileStatusComparator() { FileStatus bare = new FileStatus(0, false, 0, 0, -1, - new Path("/tmp", FSTableDescriptors.TABLEINFO_FILE_PREFIX)); + new Path("/tmp", FSTableDescriptors.TABLEINFO_FILE_PREFIX)); FileStatus future = new FileStatus(0, false, 0, 0, -1, - new Path("/tmp/tablinfo." + EnvironmentEdgeManager.currentTime())); + new Path("/tmp/tablinfo." + EnvironmentEdgeManager.currentTime())); FileStatus farFuture = new FileStatus(0, false, 0, 0, -1, - new Path("/tmp/tablinfo." + EnvironmentEdgeManager.currentTime() + 1000)); + new Path("/tmp/tablinfo." + EnvironmentEdgeManager.currentTime() + 1000)); FileStatus[] alist = { bare, future, farFuture }; FileStatus[] blist = { bare, farFuture, future }; FileStatus[] clist = { farFuture, bare, future }; @@ -407,24 +407,24 @@ public void testReadingInvalidDirectoryFromFS() throws IOException { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); try { new FSTableDescriptors(fs, CommonFSUtils.getRootDir(UTIL.getConfiguration())) - .get(TableName.valueOf(HConstants.HBASE_TEMP_DIRECTORY)); + .get(TableName.valueOf(HConstants.HBASE_TEMP_DIRECTORY)); fail("Shouldn't be able to read a table descriptor for the archive directory."); } catch (Exception e) { - LOG.debug("Correctly got error when reading a table descriptor from the archive directory: " + - e.getMessage()); + LOG.debug("Correctly got error when reading a table descriptor from the archive directory: " + + e.getMessage()); } } @Test public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException { TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testDir); assertTrue(fstd.createTableDescriptor(htd)); assertFalse(fstd.createTableDescriptor(htd)); htd = TableDescriptorBuilder.newBuilder(htd) - .setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue")).build(); + .setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue")).build(); assertTrue(fstd.createTableDescriptor(htd)); // this will re-create Path tableDir = CommonFSUtils.getTableDir(testDir, htd.getTableName()); assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir)); @@ -433,10 +433,10 @@ public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException @Test public void testIgnoreBrokenTableDescriptorFiles() throws IOException { TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build(); TableDescriptor newHtd = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf2")).build(); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf2")).build(); assertNotEquals(newHtd, htd); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testDir, false, false); @@ -447,7 +447,8 @@ public void testIgnoreBrokenTableDescriptorFiles() throws IOException { FileStatus[] statuses = fs.listStatus(tableInfoDir); assertEquals(1, statuses.length); int seqId = - FSTableDescriptors.getTableInfoSequenceIdAndFileLength(statuses[0].getPath()).sequenceId + 1; + FSTableDescriptors.getTableInfoSequenceIdAndFileLength(statuses[0].getPath()).sequenceId + + 1; Path brokenFile = new Path(tableInfoDir, FSTableDescriptors.getTableInfoFileName(seqId, bytes)); try (FSDataOutputStream out = fs.create(brokenFile)) { out.write(bytes, 0, bytes.length / 2); @@ -470,10 +471,9 @@ public FSTableDescriptorsTest(FileSystem fs, Path rootdir, boolean usecache) { @Override public TableDescriptor get(TableName tablename) { - LOG.info((super.isUsecache() ? "Cached" : "Non-Cached") + - " TableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits); + LOG.info((super.isUsecache() ? "Cached" : "Non-Cached") + " TableDescriptor.get() on " + + tablename + ", cachehits=" + this.cachehits); return super.get(tablename); } } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index ecdb4f675368..3745d68344b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import java.io.IOException; import java.util.List; import java.util.Random; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -64,7 +63,7 @@ /** * Test {@link FSUtils}. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestFSUtils { @ClassRule @@ -98,10 +97,9 @@ public void testIsHDFS() throws Exception { } } - private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize) - throws Exception { + private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize) throws Exception { FSDataOutputStream out = fs.create(file); - byte [] data = new byte[dataSize]; + byte[] data = new byte[dataSize]; out.write(data, 0, dataSize); out.close(); } @@ -130,7 +128,7 @@ interface HDFSBlockDistributionFunction { } private void testComputeHDFSBlocksDistribution( - HDFSBlockDistributionFunction fileToBlockDistribution) throws Exception { + HDFSBlockDistributionFunction fileToBlockDistribution) throws Exception { final int DEFAULT_BLOCK_SIZE = 1024; conf.setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE); MiniDFSCluster cluster = null; @@ -145,7 +143,7 @@ private void testComputeHDFSBlocksDistribution( // create a file with two blocks testFile = new Path("/test1.txt"); - WriteDataToHDFS(fs, testFile, 2*DEFAULT_BLOCK_SIZE); + WriteDataToHDFS(fs, testFile, 2 * DEFAULT_BLOCK_SIZE); // given the default replication factor is 3, the same as the number of // datanodes; the locality index for each host should be 100%, @@ -156,21 +154,19 @@ private void testComputeHDFSBlocksDistribution( ok = true; HDFSBlocksDistribution blocksDistribution = - fileToBlockDistribution.getForPath(fs, testFile); + fileToBlockDistribution.getForPath(fs, testFile); - long uniqueBlocksTotalWeight = - blocksDistribution.getUniqueBlocksTotalWeight(); + long uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight(); for (String host : hosts) { long weight = blocksDistribution.getWeight(host); ok = (ok && uniqueBlocksTotalWeight == weight); } } while (!ok && EnvironmentEdgeManager.currentTime() < maxTime); assertTrue(ok); - } finally { + } finally { htu.shutdownMiniDFSCluster(); } - try { // set up a cluster with 4 nodes String hosts[] = new String[] { "host1", "host2", "host3", "host4" }; @@ -180,7 +176,7 @@ private void testComputeHDFSBlocksDistribution( // create a file with three blocks testFile = new Path("/test2.txt"); - WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE); + WriteDataToHDFS(fs, testFile, 3 * DEFAULT_BLOCK_SIZE); // given the default replication factor is 3, we will have total of 9 // replica of blocks; thus the host with the highest weight should have @@ -190,22 +186,20 @@ private void testComputeHDFSBlocksDistribution( long uniqueBlocksTotalWeight; do { HDFSBlocksDistribution blocksDistribution = - fileToBlockDistribution.getForPath(fs, testFile); + fileToBlockDistribution.getForPath(fs, testFile); uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight(); String tophost = blocksDistribution.getTopHosts().get(0); weight = blocksDistribution.getWeight(tophost); // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175 - } while (uniqueBlocksTotalWeight != weight && - EnvironmentEdgeManager.currentTime() < maxTime); + } while (uniqueBlocksTotalWeight != weight && EnvironmentEdgeManager.currentTime() < maxTime); assertTrue(uniqueBlocksTotalWeight == weight); } finally { htu.shutdownMiniDFSCluster(); } - try { // set up a cluster with 4 nodes String hosts[] = new String[] { "host1", "host2", "host3", "host4" }; @@ -224,9 +218,8 @@ private void testComputeHDFSBlocksDistribution( do { blocksDistribution = fileToBlockDistribution.getForPath(fs, testFile); // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175 - } - while (blocksDistribution.getTopHosts().size() != 3 && - EnvironmentEdgeManager.currentTime() < maxTime); + } while (blocksDistribution.getTopHosts().size() != 3 + && EnvironmentEdgeManager.currentTime() < maxTime); assertEquals("Wrong number of hosts distributing blocks.", 3, blocksDistribution.getTopHosts().size()); } finally { @@ -258,8 +251,8 @@ public void testVersion() throws DeserializationException, IOException { assertTrue(CommonFSUtils.delete(fs, versionFile, true)); Path metaRegionDir = FSUtils.getRegionDirFromRootDir(rootdir, RegionInfoBuilder.FIRST_META_REGIONINFO); - FsPermission defaultPerms = CommonFSUtils.getFilePermissions(fs, this.conf, - HConstants.DATA_FILE_UMASK_KEY); + FsPermission defaultPerms = + CommonFSUtils.getFilePermissions(fs, this.conf, HConstants.DATA_FILE_UMASK_KEY); CommonFSUtils.create(fs, metaRegionDir, defaultPerms, false); boolean thrown = false; try { @@ -268,10 +261,10 @@ public void testVersion() throws DeserializationException, IOException { thrown = true; } assertTrue("Expected FileSystemVersionException", thrown); - // Write out a good version file. See if we can read it in and convert. + // Write out a good version file. See if we can read it in and convert. String version = HConstants.FILE_SYSTEM_VERSION; writeVersionFile(versionFile, version); - FileStatus [] status = fs.listStatus(versionFile); + FileStatus[] status = fs.listStatus(versionFile); assertNotNull(status); assertTrue(status.length > 0); String newVersion = FSUtils.getVersion(fs, rootdir); @@ -299,15 +292,15 @@ public void testPermMask() throws Exception { final Path rootdir = htu.getDataTestDir(); final FileSystem fs = rootdir.getFileSystem(conf); // default fs permission - FsPermission defaultFsPerm = CommonFSUtils.getFilePermissions(fs, conf, - HConstants.DATA_FILE_UMASK_KEY); + FsPermission defaultFsPerm = + CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); // 'hbase.data.umask.enable' is false. We will get default fs permission. assertEquals(FsPermission.getFileDefault(), defaultFsPerm); conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true); // first check that we don't crash if we don't have perms set - FsPermission defaultStartPerm = CommonFSUtils.getFilePermissions(fs, conf, - HConstants.DATA_FILE_UMASK_KEY); + FsPermission defaultStartPerm = + CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); // default 'hbase.data.umask'is 000, and this umask will be used when // 'hbase.data.umask.enable' is true. // Therefore we will not get the real fs default in this case. @@ -316,8 +309,8 @@ public void testPermMask() throws Exception { conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077"); // now check that we get the right perms - FsPermission filePerm = CommonFSUtils.getFilePermissions(fs, conf, - HConstants.DATA_FILE_UMASK_KEY); + FsPermission filePerm = + CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); assertEquals(new FsPermission("700"), filePerm); // then that the correct file is created @@ -366,7 +359,8 @@ public void testDeleteAndExists() throws Exception { public void testFilteredStatusDoesNotThrowOnNotFound() throws Exception { MiniDFSCluster cluster = htu.startMiniDFSCluster(1); try { - assertNull(FSUtils.listStatusWithStatusFilter(cluster.getFileSystem(), new Path("definitely/doesn't/exist"), null)); + assertNull(FSUtils.listStatusWithStatusFilter(cluster.getFileSystem(), + new Path("definitely/doesn't/exist"), null)); } finally { cluster.shutdown(); } @@ -396,7 +390,7 @@ public void testRenameAndSetModifyTime() throws Exception { EnvironmentEdgeManager.injectEdge(mockEnv); try { String dstFile = HBaseTestingUtil.getRandomUUID().toString(); - Path dst = new Path(testDir , dstFile); + Path dst = new Path(testDir, dstFile); assertTrue(CommonFSUtils.renameAndSetModifyTime(fs, p, dst)); assertFalse("The moved file should not be present", CommonFSUtils.isExists(fs, p)); @@ -441,8 +435,7 @@ private void verifyNoHDFSApiInvocationForDefaultPolicy() { class AlwaysFailSetStoragePolicyFileSystem extends DistributedFileSystem { @Override - public void setStoragePolicy(final Path src, final String policyName) - throws IOException { + public void setStoragePolicy(final Path src, final String policyName) throws IOException { throw new IOException("The setStoragePolicy method is invoked"); } } @@ -484,10 +477,10 @@ private void verifyFileInDirWithStoragePolicy(final String policy) throws Except String policySet = hfs.getStoragePolicyName(p); LOG.debug("The storage policy of path " + p + " is " + policySet); if (policy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY) - || policy.equals(INVALID_STORAGE_POLICY)) { + || policy.equals(INVALID_STORAGE_POLICY)) { String hdfsDefaultPolicy = hfs.getStoragePolicyName(hfs.getHomeDirectory()); LOG.debug("The default hdfs storage policy (indicated by home path: " - + hfs.getHomeDirectory() + ") is " + hdfsDefaultPolicy); + + hfs.getHomeDirectory() + ") is " + hdfsDefaultPolicy); Assert.assertEquals(hdfsDefaultPolicy, policySet); } else { Assert.assertEquals(policy, policySet); @@ -500,11 +493,12 @@ private void verifyFileInDirWithStoragePolicy(final String policy) throws Except } /** - * Ugly test that ensures we can get at the hedged read counters in dfsclient. - * Does a bit of preading with hedged reads enabled using code taken from hdfs TestPread. + * Ugly test that ensures we can get at the hedged read counters in dfsclient. Does a bit of + * preading with hedged reads enabled using code taken from hdfs TestPread. * @throws Exception */ - @Test public void testDFSHedgedReadMetrics() throws Exception { + @Test + public void testDFSHedgedReadMetrics() throws Exception { // Enable hedged reads and set it so the threshold is really low. // Most of this test is taken from HDFS, from TestPread. conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, 5); @@ -515,15 +509,15 @@ private void verifyFileInDirWithStoragePolicy(final String policy) throws Except conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0); conf.setBoolean("dfs.datanode.transferTo.allowed", false); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); - // Get the metrics. Should be empty. + // Get the metrics. Should be empty. DFSHedgedReadMetrics metrics = FSUtils.getDFSHedgedReadMetrics(conf); assertEquals(0, metrics.getHedgedReadOps()); FileSystem fileSys = cluster.getFileSystem(); try { Path p = new Path("preadtest.dat"); // We need > 1 blocks to test out the hedged reads. - DFSTestUtil.createFile(fileSys, p, 12 * blockSize, 12 * blockSize, - blockSize, (short) 3, seed); + DFSTestUtil.createFile(fileSys, p, 12 * blockSize, 12 * blockSize, blockSize, (short) 3, + seed); pReadFile(fileSys, p); cleanupFile(fileSys, p); assertTrue(metrics.getHedgedReadOps() > 0); @@ -533,7 +527,6 @@ private void verifyFileInDirWithStoragePolicy(final String policy) throws Except } } - @Test public void testCopyFilesParallel() throws Exception { MiniDFSCluster cluster = htu.startMiniDFSCluster(1); @@ -605,19 +598,19 @@ private void pReadFile(FileSystem fileSys, Path name) throws IOException { // check block location caching stm = fileSys.open(name); stm.readFully(1, actual, 0, 4096); - stm.readFully(4*blockSize, actual, 0, 4096); - stm.readFully(7*blockSize, actual, 0, 4096); - actual = new byte[3*4096]; - stm.readFully(0*blockSize, actual, 0, 3*4096); + stm.readFully(4 * blockSize, actual, 0, 4096); + stm.readFully(7 * blockSize, actual, 0, 4096); + actual = new byte[3 * 4096]; + stm.readFully(0 * blockSize, actual, 0, 3 * 4096); checkAndEraseData(actual, 0, expected, "Pread Test 7"); - actual = new byte[8*4096]; - stm.readFully(3*blockSize, actual, 0, 8*4096); - checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8"); + actual = new byte[8 * 4096]; + stm.readFully(3 * blockSize, actual, 0, 8 * 4096); + checkAndEraseData(actual, 3 * blockSize, expected, "Pread Test 8"); // read the tail - stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize/2); + stm.readFully(11 * blockSize + blockSize / 2, actual, 0, blockSize / 2); IOException res = null; try { // read beyond the end of the file - stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize); + stm.readFully(11 * blockSize + blockSize / 2, actual, 0, blockSize); } catch (IOException e) { // should throw an exception res = e; @@ -629,42 +622,38 @@ private void pReadFile(FileSystem fileSys, Path name) throws IOException { private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) { for (int idx = 0; idx < actual.length; idx++) { - assertEquals(message+" byte "+(from+idx)+" differs. expected "+ - expected[from+idx]+" actual "+actual[idx], - actual[idx], expected[from+idx]); + assertEquals(message + " byte " + (from + idx) + " differs. expected " + expected[from + idx] + + " actual " + actual[idx], + actual[idx], expected[from + idx]); actual[idx] = 0; } } - private void doPread(FSDataInputStream stm, long position, byte[] buffer, - int offset, int length) throws IOException { + private void doPread(FSDataInputStream stm, long position, byte[] buffer, int offset, int length) + throws IOException { int nread = 0; // long totalRead = 0; // DFSInputStream dfstm = null; - /* Disable. This counts do not add up. Some issue in original hdfs tests? - if (stm.getWrappedStream() instanceof DFSInputStream) { - dfstm = (DFSInputStream) (stm.getWrappedStream()); - totalRead = dfstm.getReadStatistics().getTotalBytesRead(); - } */ + /* + * Disable. This counts do not add up. Some issue in original hdfs tests? if + * (stm.getWrappedStream() instanceof DFSInputStream) { dfstm = (DFSInputStream) + * (stm.getWrappedStream()); totalRead = dfstm.getReadStatistics().getTotalBytesRead(); } + */ while (nread < length) { - int nbytes = - stm.read(position + nread, buffer, offset + nread, length - nread); + int nbytes = stm.read(position + nread, buffer, offset + nread, length - nread); assertTrue("Error in pread", nbytes > 0); nread += nbytes; } - /* Disable. This counts do not add up. Some issue in original hdfs tests? - if (dfstm != null) { - if (isHedgedRead) { - assertTrue("Expected read statistic to be incremented", - length <= dfstm.getReadStatistics().getTotalBytesRead() - totalRead); - } else { - assertEquals("Expected read statistic to be incremented", length, dfstm - .getReadStatistics().getTotalBytesRead() - totalRead); - } - }*/ + /* + * Disable. This counts do not add up. Some issue in original hdfs tests? if (dfstm != null) { + * if (isHedgedRead) { assertTrue("Expected read statistic to be incremented", length <= + * dfstm.getReadStatistics().getTotalBytesRead() - totalRead); } else { + * assertEquals("Expected read statistic to be incremented", length, dfstm + * .getReadStatistics().getTotalBytesRead() - totalRead); } } + */ } private void cleanupFile(FileSystem fileSys, Path name) throws IOException { @@ -673,7 +662,6 @@ private void cleanupFile(FileSystem fileSys, Path name) throws IOException { assertTrue(!fileSys.exists(name)); } - static { try { Class.forName("org.apache.hadoop.fs.StreamCapabilities"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java index a3c06259e155..f962efceacdb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ /** * Test {@link FSUtils}. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestFSVisitor { @ClassRule @@ -97,13 +97,9 @@ public void storeFile(final String region, final String family, final String hfi } /* - * |-testtb/ - * |----f1d3ff8443297732862df21dc4e57262/ - * |-------f1/ - * |----------d0be84935ba84b66b1e866752ec5d663 - * |----------9fc9d481718f4878b29aad0a597ecb94 - * |-------f2/ - * |----------4b0fe6068c564737946bcf4fd4ab8ae1 + * |-testtb/ |----f1d3ff8443297732862df21dc4e57262/ |-------f1/ + * |----------d0be84935ba84b66b1e866752ec5d663 |----------9fc9d481718f4878b29aad0a597ecb94 + * |-------f2/ |----------4b0fe6068c564737946bcf4fd4ab8ae1 */ private Path createTableFiles(final Path rootDir, final String tableName, final Set tableRegions, final Set tableFamilies, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java index 589435948238..91cc6d2ec70f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({LargeTests.class, ClientTests.class}) +@Category({ LargeTests.class, ClientTests.class }) public class TestFromClientSide3WoUnsafe extends TestFromClientSide3 { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java index c55b4db5076d..5eb6c79291ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java @@ -1,12 +1,19 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; @@ -114,10 +121,10 @@ public void testCleanReplicationBarrierWithDeletedTable() throws Exception { barrierScan.setCaching(100); barrierScan.addFamily(HConstants.REPLICATION_BARRIER_FAMILY); barrierScan - .withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(tableName, - ClientMetaTableAccessor.QueryType.REGION)) - .withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(tableName, - ClientMetaTableAccessor.QueryType.REGION)); + .withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(tableName, + ClientMetaTableAccessor.QueryType.REGION)) + .withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(tableName, + ClientMetaTableAccessor.QueryType.REGION)); Result result; try (ResultScanner scanner = MetaTableAccessor.getMetaHTable(UTIL.getConnection()).getScanner(barrierScan)) { @@ -175,9 +182,8 @@ public void testCleanReplicationBarrierWithExistTable() throws Exception { } public static void createPeer() throws IOException { - ReplicationPeerConfig rpc = - ReplicationPeerConfig.newBuilder().setClusterKey(UTIL.getClusterKey() + "-test") - .setSerial(true).build(); + ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder() + .setClusterKey(UTIL.getClusterKey() + "-test").setSerial(true).build(); UTIL.getAdmin().addReplicationPeer(PEER_1, rpc); UTIL.getAdmin().addReplicationPeer(PEER_2, rpc); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java index c917de2bc6ed..e3c6877aaf19 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,17 +33,15 @@ /** * Test the comparator used by Hbck. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHBaseFsckComparator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHBaseFsckComparator.class); - TableName table = - TableName.valueOf("table1"); - TableName table2 = - TableName.valueOf("table2"); + TableName table = TableName.valueOf("table1"); + TableName table2 = TableName.valueOf("table2"); byte[] keyStart = Bytes.toBytes(""); byte[] keyA = Bytes.toBytes("A"); byte[] keyB = Bytes.toBytes("B"); @@ -52,7 +50,7 @@ public class TestHBaseFsckComparator { static HbckRegionInfo genHbckInfo(TableName table, byte[] start, byte[] end, int time) { return new HbckRegionInfo(new MetaEntry( - RegionInfoBuilder.newBuilder(table).setStartKey(start).setEndKey(end).build(), null, time)); + RegionInfoBuilder.newBuilder(table).setStartKey(start).setEndKey(end).build(), null, time)); } @Test @@ -103,4 +101,3 @@ public void testAbsEndKey() { assertTrue(HbckRegionInfo.COMPARATOR.compare(hi2, hi1) > 0); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java index 817e1f07193e..d4bd0181c7ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,7 +58,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestHBaseFsckEncryption { @ClassRule @@ -81,23 +81,21 @@ public void setUp() throws Exception { // Create the test encryption key byte[] keyBytes = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(keyBytes); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); - cfKey = new SecretKeySpec(keyBytes,algorithm); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + cfKey = new SecretKeySpec(keyBytes, algorithm); // Start the minicluster TEST_UTIL.startMiniCluster(3); // Create the table TableDescriptorBuilder tableDescriptorBuilder = - TableDescriptorBuilder.newBuilder(TableName.valueOf("default", "TestHBaseFsckEncryption")); + TableDescriptorBuilder.newBuilder(TableName.valueOf("default", "TestHBaseFsckEncryption")); ColumnFamilyDescriptor columnFamilyDescriptor = - ColumnFamilyDescriptorBuilder - .newBuilder(Bytes.toBytes("cf")) - .setEncryptionType(algorithm) - .setEncryptionKey(EncryptionUtil.wrapKey(conf, - conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), - cfKey)).build(); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")).setEncryptionType(algorithm) + .setEncryptionKey(EncryptionUtil.wrapKey(conf, + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), + cfKey)) + .build(); tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); tableDescriptor = tableDescriptorBuilder.build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); @@ -118,8 +116,7 @@ public void testFsckWithEncryption() throws Exception { for (int i = 0; i < values.length; i++) { for (int j = 0; j < values.length; j++) { Put put = new Put(new byte[] { values[i], values[j] }); - put.addColumn(Bytes.toBytes("cf"), new byte[]{}, new byte[]{values[i], - values[j]}); + put.addColumn(Bytes.toBytes("cf"), new byte[] {}, new byte[] { values[i], values[j] }); table.put(put); } } @@ -132,7 +129,7 @@ public void testFsckWithEncryption() throws Exception { // Verify we have encrypted store files on disk final List paths = findStorefilePaths(tableDescriptor.getTableName()); assertTrue(paths.size() > 0); - for (Path path: paths) { + for (Path path : paths) { assertTrue("Store file " + path + " has incorrect key", Bytes.equals(cfKey.getEncoded(), extractHFileKey(path))); } @@ -161,8 +158,8 @@ private List findStorefilePaths(TableName tableName) throws Exception { } private byte[] extractHFileKey(Path path) throws Exception { - HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path, - new CacheConfig(conf), true, conf); + HFile.Reader reader = + HFile.createReader(TEST_UTIL.getTestFileSystem(), path, new CacheConfig(conf), true, conf); try { Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext(); assertNotNull("Reader has a null crypto context", cryptoContext); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java index 71da6863e7dd..cb35af8b440b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -44,7 +43,9 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, MediumTests.class}) +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + +@Category({ MiscTests.class, MediumTests.class }) public class TestHBaseFsckMOB extends BaseTestHBaseFsck { @ClassRule @@ -54,7 +55,7 @@ public class TestHBaseFsckMOB extends BaseTestHBaseFsck { @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - MasterSyncCoprocessor.class.getName()); + MasterSyncCoprocessor.class.getName()); conf.setInt("hbase.regionserver.handler.count", 2); conf.setInt("hbase.regionserver.metahandler.count", 30); @@ -65,9 +66,10 @@ public static void setUpBeforeClass() throws Exception { conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 8 * REGION_ONLINE_TIMEOUT); TEST_UTIL.startMiniCluster(1); - tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), new ThreadFactoryBuilder().setNameFormat("testhbck-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + tableExecutorService = + new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat("testhbck-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE); @@ -96,9 +98,8 @@ public void setUp() { EnvironmentEdgeManager.reset(); } - /** - * This creates a table and then corrupts a mob file. Hbck should quarantine the file. + * This creates a table and then corrupts a mob file. Hbck should quarantine the file. */ @SuppressWarnings("deprecation") @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java index 193f9a248ff6..4bd3662599f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java index f8727fdbc37f..ef34f3753d71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ /** * Test that the utility works as expected */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHFileArchiveUtil { @ClassRule @@ -51,8 +51,8 @@ public class TestHFileArchiveUtil { @Test public void testGetTableArchivePath() { - assertNotNull(HFileArchiveUtil.getTableArchivePath(rootDir, - TableName.valueOf(name.getMethodName()))); + assertNotNull( + HFileArchiveUtil.getTableArchivePath(rootDir, TableName.valueOf(name.getMethodName()))); } @Test @@ -66,7 +66,7 @@ public void testGetArchivePath() throws Exception { public void testRegionArchiveDir() { Path regionDir = new Path("region"); assertNotNull(HFileArchiveUtil.getRegionArchiveDir(rootDir, - TableName.valueOf(name.getMethodName()), regionDir)); + TableName.valueOf(name.getMethodName()), regionDir)); } @Test @@ -74,7 +74,7 @@ public void testGetStoreArchivePath() throws IOException { byte[] family = Bytes.toBytes("Family"); Path tabledir = CommonFSUtils.getTableDir(rootDir, TableName.valueOf(name.getMethodName())); RegionInfo region = - RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); Configuration conf = new Configuration(); CommonFSUtils.setRootDir(conf, new Path("root")); assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, region, tabledir, family)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java index 15871745b8b1..198ceee3de46 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,13 +38,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) // Medium as it creates 100 threads; seems better to run it isolated public class TestIdLock { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIdLock.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestIdLock.class); private static final Logger LOG = LoggerFactory.getLogger(TestIdLock.class); @@ -77,8 +76,7 @@ public Boolean call() throws Exception { int sleepMs = 1 + rand.nextInt(4); String owner = idOwner.get(id); if (owner != null) { - LOG.error("Id " + id + " already taken by " + owner + ", " - + clientId + " failed"); + LOG.error("Id " + id + " already taken by " + owner + ", " + clientId + " failed"); return false; } @@ -113,6 +111,4 @@ public void testMultipleClients() throws Exception { } } - } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLockStrongRef.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLockStrongRef.java index 8768efb7e49e..cea88e2d279e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLockStrongRef.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLockStrongRef.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.util; import java.util.concurrent.locks.ReentrantReadWriteLock; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Assert; @@ -28,7 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - @Category({ SmallTests.class }) public class TestIdReadWriteLockStrongRef { @@ -46,10 +44,9 @@ public void testGetLock() throws Exception { Long offset_2 = 2L; ReentrantReadWriteLock offsetLock_1 = idLock.getLock(offset_1); ReentrantReadWriteLock offsetLock_2 = idLock.getLock(offset_1); - Assert.assertEquals(offsetLock_1,offsetLock_2); + Assert.assertEquals(offsetLock_1, offsetLock_2); ReentrantReadWriteLock offsetLock_3 = idLock.getLock(offset_2); - Assert.assertNotEquals(offsetLock_1,offsetLock_3); + Assert.assertNotEquals(offsetLock_1, offsetLock_3); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLockWithObjectPool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLockWithObjectPool.java index 47d6dfd627b7..821ea32d3a2c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLockWithObjectPool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLockWithObjectPool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ import org.slf4j.LoggerFactory; @RunWith(Parameterized.class) -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) // Medium as it creates 100 threads; seems better to run it isolated public class TestIdReadWriteLockWithObjectPool { @@ -66,9 +66,9 @@ public class TestIdReadWriteLockWithObjectPool { @Parameterized.Parameters public static Iterable data() { - return Arrays.asList(new Object[][] { - { new IdReadWriteLockWithObjectPool(ReferenceType.WEAK) }, - { new IdReadWriteLockWithObjectPool(ReferenceType.SOFT) } }); + return Arrays + .asList(new Object[][] { { new IdReadWriteLockWithObjectPool(ReferenceType.WEAK) }, + { new IdReadWriteLockWithObjectPool(ReferenceType.SOFT) } }); } private Map idOwner = new ConcurrentHashMap<>(); @@ -133,17 +133,17 @@ public void testMultipleClients() throws Exception { LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize); ReferenceType refType = idLock.getReferenceType(); switch (refType) { - case WEAK: - // make sure the entry pool will be cleared after GC and purge call - assertEquals(0, entryPoolSize); - break; - case SOFT: - // make sure the entry pool won't be cleared when JVM memory is enough - // even after GC and purge call - assertEquals(NUM_IDS, entryPoolSize); - break; - default: - break; + case WEAK: + // make sure the entry pool will be cleared after GC and purge call + assertEquals(0, entryPoolSize); + break; + case SOFT: + // make sure the entry pool won't be cleared when JVM memory is enough + // even after GC and purge call + assertEquals(NUM_IDS, entryPoolSize); + break; + default: + break; } } finally { exec.shutdown(); @@ -151,6 +151,4 @@ public void testMultipleClients() throws Exception { } } - } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIncrementingEnvironmentEdge.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIncrementingEnvironmentEdge.java index 4cea2a639258..ec8c6d2cbb33 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIncrementingEnvironmentEdge.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIncrementingEnvironmentEdge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,10 +27,9 @@ import org.junit.experimental.categories.Category; /** - * Tests that the incrementing environment edge increments time instead of using - * the default. + * Tests that the incrementing environment edge increments time instead of using the default. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestIncrementingEnvironmentEdge { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestJSONMetricUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestJSONMetricUtil.java index dbf54bbe1fb3..c75d0c4fb3b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestJSONMetricUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestJSONMetricUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public class TestJSONMetricUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestJSONMetricUtil.class); + HBaseClassTestRule.forClass(TestJSONMetricUtil.class); private static final Logger LOG = LoggerFactory.getLogger(TestJSONMetricUtil.class); @@ -70,7 +70,7 @@ public void testBuildObjectName() throws MalformedObjectNameException { String[] values = { "MemoryPool", "Par Eden Space" }; Hashtable properties = JSONMetricUtil.buldKeyValueTable(keys, values); ObjectName testObject = - JSONMetricUtil.buildObjectName(JSONMetricUtil.JAVA_LANG_DOMAIN, properties); + JSONMetricUtil.buildObjectName(JSONMetricUtil.JAVA_LANG_DOMAIN, properties); assertEquals(JSONMetricUtil.JAVA_LANG_DOMAIN, testObject.getDomain()); assertEquals(testObject.getKeyPropertyList(), properties); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestLossyCounting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestLossyCounting.java index b6c0ddf0f120..c094536743c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestLossyCounting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestLossyCounting.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -30,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestLossyCounting { @ClassRule @@ -64,20 +63,20 @@ public void testAddByOne() { @Test public void testSweep1() throws Exception { LossyCounting lossyCounting = new LossyCounting<>("testSweep1", 0.01); - for(int i = 0; i < 400; i++){ + for (int i = 0; i < 400; i++) { String key = "" + i; lossyCounting.add(key); } assertEquals(4L, lossyCounting.getCurrentTerm()); waitForSweep(lossyCounting); - //Do last one sweep as some sweep will be skipped when first one was running + // Do last one sweep as some sweep will be skipped when first one was running lossyCounting.sweep(); assertEquals(lossyCounting.getBucketSize() - 1, lossyCounting.getDataSize()); } private void waitForSweep(LossyCounting lossyCounting) throws InterruptedException { - //wait for sweep thread to complete + // wait for sweep thread to complete int retry = 0; while (!lossyCounting.getSweepFuture().isDone() && retry < 10) { Thread.sleep(100); @@ -94,7 +93,7 @@ public void testSweep2() throws Exception { } waitForSweep(lossyCounting); assertEquals(10L, lossyCounting.getDataSize()); - for(int i = 0; i < 10; i++){ + for (int i = 0; i < 10; i++) { String key = "1"; lossyCounting.add(key); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadEncoded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadEncoded.java index 159c2813d54a..a23b7330b33d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadEncoded.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadEncoded.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,12 +29,11 @@ import org.junit.runners.Parameterized.Parameters; /** - * Runs a load test on a mini HBase cluster with data block encoding turned on. - * Compared to other load-test-style unit tests, this one writes a smaller - * amount of data, but goes through all available data block encoding - * algorithms. + * Runs a load test on a mini HBase cluster with data block encoding turned on. Compared to other + * load-test-style unit tests, this one writes a smaller amount of data, but goes through all + * available data block encoding algorithms. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestMiniClusterLoadEncoded extends TestMiniClusterLoadParallel { @ClassRule @@ -47,8 +46,8 @@ public class TestMiniClusterLoadEncoded extends TestMiniClusterLoadParallel { @Parameters public static Collection parameters() { List parameters = new ArrayList<>(); - for (DataBlockEncoding dataBlockEncoding : DataBlockEncoding.values() ) { - parameters.add(new Object[]{dataBlockEncoding}); + for (DataBlockEncoding dataBlockEncoding : DataBlockEncoding.values()) { + parameters.add(new Object[] { dataBlockEncoding }); } return parameters; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java index c150196e2ca9..9124c0c11ef2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,20 +30,18 @@ import org.junit.runners.Parameterized; /** - * A write/read/verify load test on a mini HBase cluster. Tests reading - * and writing at the same time. + * A write/read/verify load test on a mini HBase cluster. Tests reading and writing at the same + * time. */ -@Category({MiscTests.class, LargeTests.class}) +@Category({ MiscTests.class, LargeTests.class }) @RunWith(Parameterized.class) -public class TestMiniClusterLoadParallel - extends TestMiniClusterLoadSequential { +public class TestMiniClusterLoadParallel extends TestMiniClusterLoadSequential { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMiniClusterLoadParallel.class); - public TestMiniClusterLoadParallel(boolean isMultiPut, - DataBlockEncoding encoding) { + public TestMiniClusterLoadParallel(boolean isMultiPut, DataBlockEncoding encoding) { super(isMultiPut, encoding); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java index f5669ba012ab..44112e4a5257 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,10 +53,9 @@ import org.slf4j.LoggerFactory; /** - * A write/read/verify load test on a mini HBase cluster. Tests reading - * and then writing. + * A write/read/verify load test on a mini HBase cluster. Tests reading and then writing. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) @RunWith(Parameterized.class) public class TestMiniClusterLoadSequential { @@ -64,17 +63,14 @@ public class TestMiniClusterLoadSequential { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMiniClusterLoadSequential.class); - private static final Logger LOG = LoggerFactory.getLogger( - TestMiniClusterLoadSequential.class); + private static final Logger LOG = LoggerFactory.getLogger(TestMiniClusterLoadSequential.class); - protected static final TableName TABLE = - TableName.valueOf("load_test_tbl"); + protected static final TableName TABLE = TableName.valueOf("load_test_tbl"); protected static final byte[] CF = Bytes.toBytes("load_test_cf"); protected static final int NUM_THREADS = 8; protected static final int NUM_RS = 2; protected static final int TIMEOUT_MS = 180000; - protected static final HBaseTestingUtil TEST_UTIL = - new HBaseTestingUtil(); + protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); protected final Configuration conf = TEST_UTIL.getConfiguration(); protected final boolean isMultiPut; @@ -86,8 +82,7 @@ public class TestMiniClusterLoadSequential { protected Compression.Algorithm compression = Compression.Algorithm.NONE; - public TestMiniClusterLoadSequential(boolean isMultiPut, - DataBlockEncoding dataBlockEncoding) { + public TestMiniClusterLoadSequential(boolean isMultiPut, DataBlockEncoding dataBlockEncoding) { this.isMultiPut = isMultiPut; this.dataBlockEncoding = dataBlockEncoding; conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024); @@ -99,10 +94,10 @@ public TestMiniClusterLoadSequential(boolean isMultiPut, @Parameters public static Collection parameters() { List parameters = new ArrayList<>(); - for (boolean multiPut : new boolean[]{false, true}) { - for (DataBlockEncoding dataBlockEncoding : new DataBlockEncoding[] { - DataBlockEncoding.NONE, DataBlockEncoding.PREFIX }) { - parameters.add(new Object[]{multiPut, dataBlockEncoding}); + for (boolean multiPut : new boolean[] { false, true }) { + for (DataBlockEncoding dataBlockEncoding : new DataBlockEncoding[] { DataBlockEncoding.NONE, + DataBlockEncoding.PREFIX }) { + parameters.add(new Object[] { multiPut, dataBlockEncoding }); } } return parameters; @@ -158,12 +153,12 @@ protected void createPreSplitLoadTestTable(TableDescriptor tableDescriptor, } protected void prepareForLoadTest() throws IOException { - LOG.info("Starting load test: dataBlockEncoding=" + dataBlockEncoding + - ", isMultiPut=" + isMultiPut); + LOG.info( + "Starting load test: dataBlockEncoding=" + dataBlockEncoding + ", isMultiPut=" + isMultiPut); numKeys = numKeys(); Admin admin = TEST_UTIL.getAdmin(); - while (admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) - .getLiveServerMetrics().size() < NUM_RS) { + while (admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics() + .size() < NUM_RS) { LOG.info("Sleeping until " + NUM_RS + " RSs are online"); Threads.sleepWithoutInterrupt(1000); } @@ -171,7 +166,7 @@ protected void prepareForLoadTest() throws IOException { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE).build(); ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(CF) - .setCompressionType(compression).setDataBlockEncoding(dataBlockEncoding).build(); + .setCompressionType(compression).setDataBlockEncoding(dataBlockEncoding).build(); createPreSplitLoadTestTable(tableDescriptor, familyDescriptor); LoadTestDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator(CF); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java index 165fc67b29e4..c6ca7394d2bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.io.IOException; import java.util.Collections; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -60,12 +59,12 @@ * Tests for Region Mover Load/Unload functionality with and without ack mode and also to test * exclude functionality useful for rack decommissioning */ -@Category({MiscTests.class, LargeTests.class}) +@Category({ MiscTests.class, LargeTests.class }) public class TestRegionMover1 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionMover1.class); + HBaseClassTestRule.forClass(TestRegionMover1.class); @Rule public TestName name = new TestName(); @@ -89,7 +88,7 @@ public static void tearDownAfterClass() throws Exception { public void setUp() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); String startKey = "a"; String endKey = "z"; TEST_UTIL.getAdmin().createTable(tableDesc, Bytes.toBytes(startKey), Bytes.toBytes(endKey), 9); @@ -109,7 +108,7 @@ public void testWithAck() throws Exception { String rsName = regionServer.getServerName().getAddress().toString(); int numRegions = regionServer.getNumberOfOnlineRegions(); RegionMoverBuilder rmBuilder = - new RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true).maxthreads(8); + new RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true).maxthreads(8); try (RegionMover rm = rmBuilder.build()) { LOG.info("Unloading " + regionServer.getServerName()); rm.unload(); @@ -132,7 +131,7 @@ public void testWithoutAck() throws Exception { String rsName = regionServer.getServerName().getAddress().toString(); int numRegions = regionServer.getNumberOfOnlineRegions(); RegionMoverBuilder rmBuilder = - new RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(false); + new RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(false); try (RegionMover rm = rmBuilder.build()) { LOG.info("Unloading " + regionServer.getServerName()); rm.unload(); @@ -176,24 +175,23 @@ public void testExclude() throws Exception { int port = regionServer.getServerName().getPort(); String rs = rsName + ":" + Integer.toString(port); RegionMoverBuilder rmBuilder = new RegionMoverBuilder(rs, TEST_UTIL.getConfiguration()) - .ack(true).excludeFile(excludeFile.getCanonicalPath()); + .ack(true).excludeFile(excludeFile.getCanonicalPath()); try (RegionMover rm = rmBuilder.build()) { rm.unload(); LOG.info("Unloading " + rs); assertEquals(0, regionServer.getNumberOfOnlineRegions()); assertEquals(regionsExcludeServer, cluster.getRegionServer(1).getNumberOfOnlineRegions()); - LOG.info("Before:" + regionsExcludeServer + " After:" + - cluster.getRegionServer(1).getNumberOfOnlineRegions()); + LOG.info("Before:" + regionsExcludeServer + " After:" + + cluster.getRegionServer(1).getNumberOfOnlineRegions()); } } @Test - public void testDesignatedFile() throws Exception{ + public void testDesignatedFile() throws Exception { SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - File designatedFile = new File(TEST_UTIL.getDataTestDir().toUri().getPath(), - "designated_file"); + File designatedFile = new File(TEST_UTIL.getDataTestDir().toUri().getPath(), "designated_file"); HRegionServer designatedServer = cluster.getRegionServer(0); - try(FileWriter fos = new FileWriter(designatedFile)) { + try (FileWriter fos = new FileWriter(designatedFile)) { String designatedHostname = designatedServer.getServerName().getHostname(); int designatedServerPort = designatedServer.getServerName().getPort(); String excludeServerName = designatedHostname + ":" + designatedServerPort; @@ -206,7 +204,7 @@ public void testDesignatedFile() throws Exception{ String rs = rsName + ":" + port; int regionsInRegionServer = regionServer.getNumberOfOnlineRegions(); RegionMoverBuilder rmBuilder = new RegionMoverBuilder(rs, TEST_UTIL.getConfiguration()) - .designatedFile(designatedFile.getCanonicalPath()); + .designatedFile(designatedFile.getCanonicalPath()); try (RegionMover rm = rmBuilder.build()) { LOG.debug("Unloading {} regions", rs); rm.unload(); @@ -219,13 +217,12 @@ public void testDesignatedFile() throws Exception{ } @Test - public void testExcludeAndDesignated() throws Exception{ + public void testExcludeAndDesignated() throws Exception { SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); // create designated file - File designatedFile = new File(TEST_UTIL.getDataTestDir().toUri().getPath(), - "designated_file"); + File designatedFile = new File(TEST_UTIL.getDataTestDir().toUri().getPath(), "designated_file"); HRegionServer designatedServer = cluster.getRegionServer(0); - try(FileWriter fos = new FileWriter(designatedFile)) { + try (FileWriter fos = new FileWriter(designatedFile)) { String designatedHostname = designatedServer.getServerName().getHostname(); int designatedServerPort = designatedServer.getServerName().getPort(); String excludeServerName = designatedHostname + ":" + designatedServerPort; @@ -235,7 +232,7 @@ public void testExcludeAndDesignated() throws Exception{ // create exclude file File excludeFile = new File(TEST_UTIL.getDataTestDir().toUri().getPath(), "exclude_file"); HRegionServer excludeServer = cluster.getRegionServer(1); - try(FileWriter fos = new FileWriter(excludeFile)) { + try (FileWriter fos = new FileWriter(excludeFile)) { String excludeHostname = excludeServer.getServerName().getHostname(); int excludeServerPort = excludeServer.getServerName().getPort(); String excludeServerName = excludeHostname + ":" + excludeServerPort; @@ -250,8 +247,8 @@ public void testExcludeAndDesignated() throws Exception{ int regionsInTargetRegionServer = targetRegionServer.getNumberOfOnlineRegions(); RegionMoverBuilder rmBuilder = new RegionMoverBuilder(rs, TEST_UTIL.getConfiguration()) - .designatedFile(designatedFile.getCanonicalPath()) - .excludeFile(excludeFile.getCanonicalPath()); + .designatedFile(designatedFile.getCanonicalPath()) + .excludeFile(excludeFile.getCanonicalPath()); try (RegionMover rm = rmBuilder.build()) { LOG.debug("Unloading {}", rs); rm.unload(); @@ -289,12 +286,12 @@ public void testRegionServerPort() throws Exception { @Test public void testLoadMetaRegion() throws Exception { HRegionServer rsWithMeta = TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().stream() - .map(t -> t.getRegionServer()) - .filter(rs -> rs.getRegions(TableName.META_TABLE_NAME).size() > 0).findFirst().get(); + .map(t -> t.getRegionServer()) + .filter(rs -> rs.getRegions(TableName.META_TABLE_NAME).size() > 0).findFirst().get(); int onlineRegions = rsWithMeta.getNumberOfOnlineRegions(); String rsName = rsWithMeta.getServerName().getAddress().toString(); try (RegionMover rm = - new RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true).build()) { + new RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true).build()) { LOG.info("Unloading " + rsWithMeta.getServerName()); rm.unload(); assertEquals(0, rsWithMeta.getNumberOfOnlineRegions()); @@ -315,17 +312,17 @@ public void testTargetServerDeadWhenLoading() throws Exception { // wait 5 seconds at most conf.setInt(RegionMover.SERVERSTART_WAIT_MAX_KEY, 5); String filename = - new Path(TEST_UTIL.getDataTestDir(), "testTargetServerDeadWhenLoading").toString(); + new Path(TEST_UTIL.getDataTestDir(), "testTargetServerDeadWhenLoading").toString(); // unload the region server try (RegionMover rm = - new RegionMoverBuilder(rsName, conf).filename(filename).ack(true).build()) { + new RegionMoverBuilder(rsName, conf).filename(filename).ack(true).build()) { LOG.info("Unloading " + rs.getServerName()); rm.unload(); assertEquals(0, rs.getNumberOfOnlineRegions()); } String inexistRsName = "whatever:123"; try (RegionMover rm = - new RegionMoverBuilder(inexistRsName, conf).filename(filename).ack(true).build()) { + new RegionMoverBuilder(inexistRsName, conf).filename(filename).ack(true).build()) { // load the regions to an inexist region server, which should fail and return false LOG.info("Loading to an inexist region server {}", inexistRsName); assertFalse(rm.load()); @@ -338,8 +335,8 @@ public void testDecomServerExclusionWithAck() throws Exception { HRegionServer excludeServer = cluster.getRegionServer(1); List regions = excludeServer.getRegions(); int regionsExcludeServer = excludeServer.getNumberOfOnlineRegions(); - TEST_UTIL.getAdmin().decommissionRegionServers( - Collections.singletonList(excludeServer.getServerName()), false); + TEST_UTIL.getAdmin() + .decommissionRegionServers(Collections.singletonList(excludeServer.getServerName()), false); waitForServerDecom(excludeServer); @@ -348,8 +345,7 @@ public void testDecomServerExclusionWithAck() throws Exception { int port = regionServer.getServerName().getPort(); String hostname = rsName + ":" + Integer.toString(port); RegionMoverBuilder rmBuilder = - new RegionMoverBuilder(hostname, TEST_UTIL.getConfiguration()) - .ack(true); + new RegionMoverBuilder(hostname, TEST_UTIL.getConfiguration()).ack(true); int targetServerRegions = cluster.getRegionServer(2).getRegions().size(); int sourceServerRegions = regionServer.getRegions().size(); @@ -359,8 +355,8 @@ public void testDecomServerExclusionWithAck() throws Exception { LOG.info("Unloading {}", hostname); assertEquals(0, regionServer.getNumberOfOnlineRegions()); assertEquals(regionsExcludeServer, cluster.getRegionServer(1).getNumberOfOnlineRegions()); - LOG.info("Before:" + regionsExcludeServer + " After:" + - cluster.getRegionServer(1).getNumberOfOnlineRegions()); + LOG.info("Before:" + regionsExcludeServer + " After:" + + cluster.getRegionServer(1).getNumberOfOnlineRegions()); List regionList = cluster.getRegionServer(1).getRegions(); int index = 0; for (HRegion hRegion : regionList) { @@ -381,7 +377,7 @@ private void waitForServerDecom(HRegionServer excludeServer) { try { List decomServers = TEST_UTIL.getAdmin().listDecommissionedRegionServers(); return decomServers.size() == 1 - && decomServers.get(0).equals(excludeServer.getServerName()); + && decomServers.get(0).equals(excludeServer.getServerName()); } catch (IOException e) { throw new RuntimeException(e); } @@ -394,8 +390,8 @@ public void testDecomServerExclusion() throws Exception { HRegionServer excludeServer = cluster.getRegionServer(0); List regions = excludeServer.getRegions(); int regionsExcludeServer = excludeServer.getNumberOfOnlineRegions(); - TEST_UTIL.getAdmin().decommissionRegionServers( - Collections.singletonList(excludeServer.getServerName()), false); + TEST_UTIL.getAdmin() + .decommissionRegionServers(Collections.singletonList(excludeServer.getServerName()), false); waitForServerDecom(excludeServer); @@ -404,7 +400,7 @@ public void testDecomServerExclusion() throws Exception { int port = sourceRegionServer.getServerName().getPort(); String hostname = rsName + ":" + Integer.toString(port); RegionMoverBuilder rmBuilder = - new RegionMoverBuilder(hostname, TEST_UTIL.getConfiguration()).ack(false); + new RegionMoverBuilder(hostname, TEST_UTIL.getConfiguration()).ack(false); int targetServerRegions = cluster.getRegionServer(2).getRegions().size(); int sourceServerRegions = sourceRegionServer.getRegions().size(); @@ -414,8 +410,8 @@ public void testDecomServerExclusion() throws Exception { LOG.info("Unloading {}", hostname); assertEquals(0, sourceRegionServer.getNumberOfOnlineRegions()); assertEquals(regionsExcludeServer, cluster.getRegionServer(0).getNumberOfOnlineRegions()); - LOG.info("Before:" + regionsExcludeServer + " After:" + - cluster.getRegionServer(1).getNumberOfOnlineRegions()); + LOG.info("Before:" + regionsExcludeServer + " After:" + + cluster.getRegionServer(1).getNumberOfOnlineRegions()); List regionList = cluster.getRegionServer(0).getRegions(); int index = 0; for (HRegion hRegion : regionList) { @@ -443,8 +439,8 @@ public void testExcludeAndDecomServers() throws Exception { fos.close(); HRegionServer decomServer = cluster.getRegionServer(2); - TEST_UTIL.getAdmin().decommissionRegionServers( - Collections.singletonList(decomServer.getServerName()), false); + TEST_UTIL.getAdmin() + .decommissionRegionServers(Collections.singletonList(decomServer.getServerName()), false); waitForServerDecom(decomServer); @@ -453,9 +449,8 @@ public void testExcludeAndDecomServers() throws Exception { int port = regionServer.getServerName().getPort(); String sourceServer = rsName + ":" + Integer.toString(port); RegionMoverBuilder rmBuilder = - new RegionMoverBuilder(sourceServer, TEST_UTIL.getConfiguration()) - .ack(true) - .excludeFile(excludeFile.getCanonicalPath()); + new RegionMoverBuilder(sourceServer, TEST_UTIL.getConfiguration()).ack(true) + .excludeFile(excludeFile.getCanonicalPath()); try (RegionMover regionMover = rmBuilder.build()) { Assert.assertFalse(regionMover.unload()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java index ecf3ef56cd82..4a90023c4096 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover2.java @@ -15,9 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; @@ -44,21 +47,17 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; /** * Tests for Region Mover Load/Unload functionality with and without ack mode and also to test * exclude functionality useful for rack decommissioning */ -@Category({ MiscTests.class, LargeTests.class}) +@Category({ MiscTests.class, LargeTests.class }) public class TestRegionMover2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionMover2.class); + HBaseClassTestRule.forClass(TestRegionMover2.class); @Rule public TestName name = new TestName(); @@ -82,7 +81,7 @@ public static void tearDownAfterClass() throws Exception { public void setUp() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); int startKey = 0; int endKey = 80000; TEST_UTIL.getAdmin().createTable(tableDesc, Bytes.toBytes(startKey), Bytes.toBytes(endKey), 9); @@ -103,8 +102,8 @@ public void testWithMergedRegions() throws Exception { Table table = TEST_UTIL.getConnection().getTable(tableName); List puts = new ArrayList<>(); for (int i = 0; i < 10000; i++) { - puts.add(new Put(Bytes.toBytes("rowkey_" + i)) - .addColumn(Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val_" + i))); + puts.add(new Put(Bytes.toBytes("rowkey_" + i)).addColumn(Bytes.toBytes("fam1"), + Bytes.toBytes("q1"), Bytes.toBytes("val_" + i))); } table.put(puts); admin.flush(tableName); @@ -112,19 +111,19 @@ public void testWithMergedRegions() throws Exception { String rsName = regionServer.getServerName().getAddress().toString(); int numRegions = regionServer.getNumberOfOnlineRegions(); List hRegions = regionServer.getRegions().stream() - .filter(hRegion -> hRegion.getRegionInfo().getTable().equals(tableName)) - .collect(Collectors.toList()); + .filter(hRegion -> hRegion.getRegionInfo().getTable().equals(tableName)) + .collect(Collectors.toList()); RegionMover.RegionMoverBuilder rmBuilder = - new RegionMover.RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true) - .maxthreads(8); + new RegionMover.RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true) + .maxthreads(8); try (RegionMover rm = rmBuilder.build()) { LOG.debug("Unloading {}", regionServer.getServerName()); rm.unload(); Assert.assertEquals(0, regionServer.getNumberOfOnlineRegions()); LOG.debug("Successfully Unloaded, now Loading"); admin.mergeRegionsAsync(new byte[][] { hRegions.get(0).getRegionInfo().getRegionName(), - hRegions.get(1).getRegionInfo().getRegionName() }, true) - .get(5, TimeUnit.SECONDS); + hRegions.get(1).getRegionInfo().getRegionName() }, + true).get(5, TimeUnit.SECONDS); Assert.assertTrue(rm.load()); Assert.assertEquals(numRegions - 2, regionServer.getNumberOfOnlineRegions()); } @@ -138,8 +137,8 @@ public void testWithSplitRegions() throws Exception { Table table = TEST_UTIL.getConnection().getTable(tableName); List puts = new ArrayList<>(); for (int i = 10; i < 50000; i++) { - puts.add(new Put(Bytes.toBytes(i)) - .addColumn(Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val_" + i))); + puts.add(new Put(Bytes.toBytes(i)).addColumn(Bytes.toBytes("fam1"), Bytes.toBytes("q1"), + Bytes.toBytes("val_" + i))); } table.put(puts); admin.flush(tableName); @@ -148,12 +147,12 @@ public void testWithSplitRegions() throws Exception { String rsName = regionServer.getServerName().getAddress().toString(); int numRegions = regionServer.getNumberOfOnlineRegions(); List hRegions = regionServer.getRegions().stream() - .filter(hRegion -> hRegion.getRegionInfo().getTable().equals(tableName)) - .collect(Collectors.toList()); + .filter(hRegion -> hRegion.getRegionInfo().getTable().equals(tableName)) + .collect(Collectors.toList()); RegionMover.RegionMoverBuilder rmBuilder = - new RegionMover.RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true) - .maxthreads(8); + new RegionMover.RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true) + .maxthreads(8); try (RegionMover rm = rmBuilder.build()) { LOG.debug("Unloading {}", regionServer.getServerName()); rm.unload(); @@ -172,8 +171,8 @@ public void testWithSplitRegions() throws Exception { endKey = Bytes.toInt(hRegion.getRegionInfo().getEndKey()); } int midKey = startKey + (endKey - startKey) / 2; - admin.splitRegionAsync(hRegion.getRegionInfo().getRegionName(), Bytes.toBytes(midKey)) - .get(5, TimeUnit.SECONDS); + admin.splitRegionAsync(hRegion.getRegionInfo().getRegionName(), Bytes.toBytes(midKey)).get(5, + TimeUnit.SECONDS); Assert.assertTrue(rm.load()); Assert.assertEquals(numRegions - 1, regionServer.getNumberOfOnlineRegions()); } @@ -187,8 +186,8 @@ public void testFailedRegionMove() throws Exception { Table table = TEST_UTIL.getConnection().getTable(tableName); List puts = new ArrayList<>(); for (int i = 0; i < 1000; i++) { - puts.add(new Put(Bytes.toBytes("rowkey_" + i)) - .addColumn(Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val_" + i))); + puts.add(new Put(Bytes.toBytes("rowkey_" + i)).addColumn(Bytes.toBytes("fam1"), + Bytes.toBytes("q1"), Bytes.toBytes("val_" + i))); } table.put(puts); admin.flush(tableName); @@ -196,11 +195,11 @@ public void testFailedRegionMove() throws Exception { String rsName = regionServer.getServerName().getAddress().toString(); int numRegions = regionServer.getNumberOfOnlineRegions(); List hRegions = regionServer.getRegions().stream() - .filter(hRegion -> hRegion.getRegionInfo().getTable().equals(tableName)) - .collect(Collectors.toList()); + .filter(hRegion -> hRegion.getRegionInfo().getTable().equals(tableName)) + .collect(Collectors.toList()); RegionMover.RegionMoverBuilder rmBuilder = - new RegionMover.RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true) - .maxthreads(8); + new RegionMover.RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true) + .maxthreads(8); try (RegionMover rm = rmBuilder.build()) { LOG.debug("Unloading {}", regionServer.getServerName()); rm.unload(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java index aa9fbe49b852..ad4efdd05042 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover3.java @@ -15,9 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; @@ -43,18 +46,13 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -@Category({ MiscTests.class, LargeTests.class}) +@Category({ MiscTests.class, LargeTests.class }) public class TestRegionMover3 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionMover3.class); + HBaseClassTestRule.forClass(TestRegionMover3.class); @Rule public TestName name = new TestName(); @@ -83,7 +81,7 @@ public static void tearDownAfterClass() throws Exception { public void setUp() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build(); int startKey = 0; int endKey = 80000; TEST_UTIL.getAdmin().createTable(tableDesc, Bytes.toBytes(startKey), Bytes.toBytes(endKey), 9); @@ -95,10 +93,11 @@ public void testRegionUnloadWithRack() throws Exception { SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); Admin admin = TEST_UTIL.getAdmin(); Table table = TEST_UTIL.getConnection().getTable(tableName); - List puts = IntStream.range(10, 50000) - .mapToObj(i -> new Put(Bytes.toBytes(i)) - .addColumn(Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val_" + i))) - .collect(Collectors.toList()); + List puts = + IntStream + .range(10, 50000).mapToObj(i -> new Put(Bytes.toBytes(i)) + .addColumn(Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val_" + i))) + .collect(Collectors.toList()); table.put(puts); admin.flush(tableName); admin.compact(tableName); @@ -132,9 +131,8 @@ public void testRegionUnloadWithRack() throws Exception { // with default rackManager, which resolves "/default-rack" for each server, no region // is moved while using unloadFromRack() as all rs belong to same rack. RegionMover.RegionMoverBuilder rmBuilder = - new RegionMover.RegionMoverBuilder(sourceRSName, TEST_UTIL.getConfiguration()) - .ack(true) - .maxthreads(8); + new RegionMover.RegionMoverBuilder(sourceRSName, TEST_UTIL.getConfiguration()).ack(true) + .maxthreads(8); try (RegionMover regionMover = rmBuilder.build()) { regionMover.unloadFromRack(); int newNumRegions0 = hRegionServer0.getNumberOfOnlineRegions(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java index 50b96e82c696..1af4a5c5f976 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.util; +import static org.junit.Assert.assertEquals; + import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; @@ -45,24 +47,22 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.junit.Assert.assertEquals; /** - * Test for rsgroup enable, unloaded regions from decommissoned host of a rsgroup - * should be assigned to those regionservers belonging to the same rsgroup. + * Test for rsgroup enable, unloaded regions from decommissoned host of a rsgroup should be assigned + * to those regionservers belonging to the same rsgroup. */ @Category({ MiscTests.class, MediumTests.class }) public class TestRegionMoverWithRSGroupEnable { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionMoverWithRSGroupEnable.class); + HBaseClassTestRule.forClass(TestRegionMoverWithRSGroupEnable.class); private static final Logger LOG = LoggerFactory.getLogger(TestRegionMoverWithRSGroupEnable.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final String TEST_RSGROUP = "test"; - @BeforeClass public static void setUpBeforeClass() throws Exception { RSGroupUtil.enableRSGroup(TEST_UTIL.getConfiguration()); @@ -85,9 +85,9 @@ public void setUp() throws Exception { Collection allServers = admin.getRegionServers(); // Remove rs contains hbase:meta, otherwise test looks unstable and buggy in test env. ServerName rsContainMeta = TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().stream() - .map(t -> t.getRegionServer()) - .filter(rs -> rs.getRegions(TableName.META_TABLE_NAME).size() > 0).findFirst().get() - .getServerName(); + .map(t -> t.getRegionServer()) + .filter(rs -> rs.getRegions(TableName.META_TABLE_NAME).size() > 0).findFirst().get() + .getServerName(); LOG.info("{} contains hbase:meta", rsContainMeta); List modifiable = new ArrayList<>(allServers); modifiable.remove(rsContainMeta); @@ -108,9 +108,8 @@ public void setUp() throws Exception { TEST_UTIL.deleteTable(tableName); } TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")) - .setRegionServerGroup(TEST_RSGROUP) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f")).setRegionServerGroup(TEST_RSGROUP) + .build(); String startKey = "a"; String endKey = "z"; admin.createTable(tableDesc, Bytes.toBytes(startKey), Bytes.toBytes(endKey), 9); @@ -122,14 +121,14 @@ public void testUnloadRegions() throws Exception { Address online = rsservers.get(1); String filename = new Path(TEST_UTIL.getDataTestDir(), "testRSGroupUnload").toString(); RegionMoverBuilder builder = - new RegionMoverBuilder(decommission.toString(), TEST_UTIL.getConfiguration()); + new RegionMoverBuilder(decommission.toString(), TEST_UTIL.getConfiguration()); try (RegionMover rm = builder.filename(filename).ack(true).build()) { LOG.info("Unloading " + decommission.getHostname()); rm.unload(); } HRegionServer onlineRS = TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().stream() - .map(JVMClusterUtil.RegionServerThread::getRegionServer) - .filter(rs -> rs.getServerName().getAddress().equals(online)).findFirst().get(); + .map(JVMClusterUtil.RegionServerThread::getRegionServer) + .filter(rs -> rs.getServerName().getAddress().equals(online)).findFirst().get(); assertEquals(9, onlineRS.getNumberOfOnlineRegions()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java index e9e51672997f..76b3e7540249 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ComparisonChain; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestRegionSplitCalculator { @ClassRule @@ -48,10 +48,10 @@ public class TestRegionSplitCalculator { private static final Logger LOG = LoggerFactory.getLogger(TestRegionSplitCalculator.class); public static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + /** - * This is range uses a user specified start and end keys. It also has an - * extra tiebreaker so that different ranges with the same start/end key pair - * count as different regions. + * This is range uses a user specified start and end keys. It also has an extra tiebreaker so that + * different ranges with the same start/end key pair count as different regions. */ static class SimpleRange implements KeyRange { byte[] start, end; @@ -83,21 +83,18 @@ public String toString() { @Override public int compare(SimpleRange sr1, SimpleRange sr2) { ComparisonChain cc = ComparisonChain.start(); - cc = cc.compare(sr1.getStartKey(), sr2.getStartKey(), - Bytes.BYTES_COMPARATOR); - cc = cc.compare(sr1.getEndKey(), sr2.getEndKey(), - RegionSplitCalculator.BYTES_COMPARATOR); + cc = cc.compare(sr1.getStartKey(), sr2.getStartKey(), Bytes.BYTES_COMPARATOR); + cc = cc.compare(sr1.getEndKey(), sr2.getEndKey(), RegionSplitCalculator.BYTES_COMPARATOR); cc = cc.compare(sr1.tiebreaker, sr2.tiebreaker); return cc.result(); } }; /** - * Check the "depth" (number of regions included at a split) of a generated - * split calculation + * Check the "depth" (number of regions included at a split) of a generated split calculation */ - void checkDepths(SortedSet splits, - Multimap regions, Integer... depths) { + void checkDepths(SortedSet splits, Multimap regions, + Integer... depths) { assertEquals(splits.size(), depths.length); int i = 0; for (byte[] k : splits) { @@ -109,8 +106,8 @@ void checkDepths(SortedSet splits, } /** - * This dumps data in a visually reasonable way for visual debugging. It has - * the basic iteration structure. + * This dumps data in a visually reasonable way for visual debugging. It has the basic iteration + * structure. */ String dump(SortedSet splits, Multimap regions) { // we display this way because the last end key should be displayed as well. @@ -195,8 +192,7 @@ public void testSplitCalculatorCoverSplit() { LOG.info("AC covers AB, BC"); String res = dump(sc.getSplits(), regions); checkDepths(sc.getSplits(), regions, 2, 2, 0); - assertEquals("A:\t[A, B]\t[A, C]\t\n" + "B:\t[A, C]\t[B, C]\t\n" - + "C:\t\n", res); + assertEquals("A:\t[A, B]\t[A, C]\t\n" + "B:\t[A, C]\t[B, C]\t\n" + "C:\t\n", res); } @Test @@ -213,8 +209,7 @@ public void testSplitCalculatorOverEndpoint() { LOG.info("AB, BD covers BC"); String res = dump(sc.getSplits(), regions); checkDepths(sc.getSplits(), regions, 1, 2, 1, 0); - assertEquals("A:\t[A, B]\t\n" + "B:\t[B, C]\t[B, D]\t\n" - + "C:\t[B, D]\t\n" + "D:\t\n", res); + assertEquals("A:\t[A, B]\t\n" + "B:\t[B, C]\t[B, D]\t\n" + "C:\t[B, D]\t\n" + "D:\t\n", res); } @Test @@ -231,8 +226,7 @@ public void testSplitCalculatorHoles() { LOG.info("Hole between C and E"); String res = dump(sc.getSplits(), regions); checkDepths(sc.getSplits(), regions, 1, 1, 0, 1, 0); - assertEquals("A:\t[A, B]\t\n" + "B:\t[B, C]\t\n" + "C:\t\n" - + "E:\t[E, F]\t\n" + "F:\t\n", res); + assertEquals("A:\t[A, B]\t\n" + "B:\t[B, C]\t\n" + "C:\t\n" + "E:\t[E, F]\t\n" + "F:\t\n", res); } @Test @@ -247,8 +241,7 @@ public void testSplitCalculatorOverreach() { LOG.info("AC and BD overlap but share no start/end keys"); String res = dump(sc.getSplits(), regions); checkDepths(sc.getSplits(), regions, 1, 2, 1, 0); - assertEquals("A:\t[A, C]\t\n" + "B:\t[A, C]\t[B, D]\t\n" - + "C:\t[B, D]\t\n" + "D:\t\n", res); + assertEquals("A:\t[A, C]\t\n" + "B:\t[A, C]\t[B, D]\t\n" + "C:\t[B, D]\t\n" + "D:\t\n", res); } @Test @@ -327,11 +320,10 @@ public void testComplex() { LOG.info("Something fairly complex"); String res = dump(sc.getSplits(), regions); checkDepths(sc.getSplits(), regions, 3, 3, 3, 1, 2, 0, 1, 0, 1, 0); - assertEquals("A:\t[A, Am]\t[A, B]\t[A, C]\t\n" - + "Am:\t[A, B]\t[A, C]\t[Am, C]\t\n" - + "B:\t[A, C]\t[Am, C]\t[B, E]\t\n" + "C:\t[B, E]\t\n" - + "D:\t[B, E]\t[D, E]\t\n" + "E:\t\n" + "F:\t[F, G]\t\n" + "G:\t\n" - + "H:\t[H, I]\t\n" + "I:\t\n", res); + assertEquals("A:\t[A, Am]\t[A, B]\t[A, C]\t\n" + "Am:\t[A, B]\t[A, C]\t[Am, C]\t\n" + + "B:\t[A, C]\t[Am, C]\t[B, E]\t\n" + "C:\t[B, E]\t\n" + "D:\t[B, E]\t[D, E]\t\n" + "E:\t\n" + + "F:\t[F, G]\t\n" + "G:\t\n" + "H:\t[H, I]\t\n" + "I:\t\n", + res); } @Test @@ -345,8 +337,7 @@ public void testBeginEndMarker() { LOG.info("Special cases -- empty"); String res = dump(sc.getSplits(), regions); checkDepths(sc.getSplits(), regions, 1, 1, 1, 0); - assertEquals(":\t[, A]\t\n" + "A:\t[A, B]\t\n" + "B:\t[B, ]\t\n" - + "null:\t\n", res); + assertEquals(":\t[, A]\t\n" + "A:\t[A, B]\t\n" + "B:\t[B, ]\t\n" + "null:\t\n", res); } @Test @@ -381,9 +372,7 @@ public void testBigRanges() { assertEquals("A", Bytes.toString(r2.start)); String r1e = Bytes.toString(r1.end); String r2e = Bytes.toString(r2.end); - assertTrue((r1e.equals("C") && r2e.equals("E")) - || (r1e.equals("E") && r2e.equals("C"))); + assertTrue((r1e.equals("C") && r2e.equals("E")) || (r1e.equals("E") && r2e.equals("C"))); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java index 72b2bd804df8..255cd6452595 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,15 +51,15 @@ import org.slf4j.LoggerFactory; /** - * Tests for {@link RegionSplitter}, which can create a pre-split table or do a - * rolling split of an existing table. + * Tests for {@link RegionSplitter}, which can create a pre-split table or do a rolling split of an + * existing table. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestRegionSplitter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionSplitter.class); + HBaseClassTestRule.forClass(TestRegionSplitter.class); private final static Logger LOG = LoggerFactory.getLogger(TestRegionSplitter.class); private final static HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -103,10 +103,9 @@ public void testCreatePresplitTableHex() throws Exception { expectedBounds.add(Bytes.toBytes("f0000000")); expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY); - // Do table creation/pre-splitting and verification of region boundaries - preSplitTableAndVerify(expectedBounds, - HexStringSplit.class.getSimpleName(), - TableName.valueOf(name.getMethodName())); + // Do table creation/pre-splitting and verification of region boundaries + preSplitTableAndVerify(expectedBounds, HexStringSplit.class.getSimpleName(), + TableName.valueOf(name.getMethodName())); } /** @@ -116,31 +115,31 @@ public void testCreatePresplitTableHex() throws Exception { public void testCreatePresplitTableUniform() throws Exception { List expectedBounds = new ArrayList<>(17); expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY); - expectedBounds.add(new byte[] { 0x10, 0, 0, 0, 0, 0, 0, 0}); - expectedBounds.add(new byte[] { 0x20, 0, 0, 0, 0, 0, 0, 0}); - expectedBounds.add(new byte[] { 0x30, 0, 0, 0, 0, 0, 0, 0}); - expectedBounds.add(new byte[] { 0x40, 0, 0, 0, 0, 0, 0, 0}); + expectedBounds.add(new byte[] { 0x10, 0, 0, 0, 0, 0, 0, 0 }); + expectedBounds.add(new byte[] { 0x20, 0, 0, 0, 0, 0, 0, 0 }); + expectedBounds.add(new byte[] { 0x30, 0, 0, 0, 0, 0, 0, 0 }); + expectedBounds.add(new byte[] { 0x40, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { 0x50, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { 0x60, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { 0x70, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { (byte) 0x80, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { (byte) 0x90, 0, 0, 0, 0, 0, 0, 0 }); - expectedBounds.add(new byte[] {(byte)0xa0, 0, 0, 0, 0, 0, 0, 0}); + expectedBounds.add(new byte[] { (byte) 0xa0, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { (byte) 0xb0, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { (byte) 0xc0, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { (byte) 0xd0, 0, 0, 0, 0, 0, 0, 0 }); - expectedBounds.add(new byte[] {(byte)0xe0, 0, 0, 0, 0, 0, 0, 0}); + expectedBounds.add(new byte[] { (byte) 0xe0, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(new byte[] { (byte) 0xf0, 0, 0, 0, 0, 0, 0, 0 }); expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY); // Do table creation/pre-splitting and verification of region boundaries preSplitTableAndVerify(expectedBounds, UniformSplit.class.getSimpleName(), - TableName.valueOf(name.getMethodName())); + TableName.valueOf(name.getMethodName())); } /** - * Unit tests for the HexStringSplit algorithm. Makes sure it divides up the - * space of keys in the way that we expect. + * Unit tests for the HexStringSplit algorithm. Makes sure it divides up the space of keys in the + * way that we expect. */ @Test public void unitTestHexStringSplit() { @@ -176,8 +175,8 @@ public void unitTestHexStringSplit() { assertArrayEquals(Bytes.toBytes("efffffff"), splitPoint); // Check splitting region with multiple mappers per region - byte[][] splits = splitter.split(Bytes.toBytes("00000000"), Bytes.toBytes("30000000"), - 3, false); + byte[][] splits = + splitter.split(Bytes.toBytes("00000000"), Bytes.toBytes("30000000"), 3, false); assertEquals(2, splits.length); assertArrayEquals(Bytes.toBytes("10000000"), splits[0]); assertArrayEquals(Bytes.toBytes("20000000"), splits[1]); @@ -188,8 +187,8 @@ public void unitTestHexStringSplit() { } /** - * Unit tests for the DecimalStringSplit algorithm. Makes sure it divides up the - * space of keys in the way that we expect. + * Unit tests for the DecimalStringSplit algorithm. Makes sure it divides up the space of keys in + * the way that we expect. */ @Test public void unitTestDecimalStringSplit() { @@ -229,8 +228,8 @@ public void unitTestDecimalStringSplit() { assertArrayEquals(Bytes.toBytes("89999999"), splitPoint); // Check splitting region with multiple mappers per region - byte[][] splits = splitter.split(Bytes.toBytes("00000000"), Bytes.toBytes("30000000"), - 3, false); + byte[][] splits = + splitter.split(Bytes.toBytes("00000000"), Bytes.toBytes("30000000"), 3, false); assertEquals(2, splits.length); assertArrayEquals(Bytes.toBytes("10000000"), splits[0]); assertArrayEquals(Bytes.toBytes("20000000"), splits[1]); @@ -241,8 +240,8 @@ public void unitTestDecimalStringSplit() { } /** - * Unit tests for the UniformSplit algorithm. Makes sure it divides up the space of - * keys in the way that we expect. + * Unit tests for the UniformSplit algorithm. Makes sure it divides up the space of keys in the + * way that we expect. */ @Test public void unitTestUniformSplit() { @@ -252,7 +251,8 @@ public void unitTestUniformSplit() { try { splitter.split(1); throw new AssertionError("Splitting into <2 regions should have thrown exception"); - } catch (IllegalArgumentException e) { } + } catch (IllegalArgumentException e) { + } byte[][] twoRegionsSplits = splitter.split(2); assertEquals(1, twoRegionsSplits.length); @@ -260,41 +260,41 @@ public void unitTestUniformSplit() { byte[][] threeRegionsSplits = splitter.split(3); assertEquals(2, threeRegionsSplits.length); - byte[] expectedSplit0 = new byte[] {0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55}; + byte[] expectedSplit0 = new byte[] { 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55 }; assertArrayEquals(expectedSplit0, threeRegionsSplits[0]); - byte[] expectedSplit1 = new byte[] {(byte)0xAA, (byte)0xAA, (byte)0xAA, (byte)0xAA, - (byte)0xAA, (byte)0xAA, (byte)0xAA, (byte)0xAA}; + byte[] expectedSplit1 = new byte[] { (byte) 0xAA, (byte) 0xAA, (byte) 0xAA, (byte) 0xAA, + (byte) 0xAA, (byte) 0xAA, (byte) 0xAA, (byte) 0xAA }; assertArrayEquals(expectedSplit1, threeRegionsSplits[1]); // Check splitting existing regions that have start and end points - byte[] splitPoint = splitter.split(new byte[] {0x10}, new byte[] {0x30}); + byte[] splitPoint = splitter.split(new byte[] { 0x10 }, new byte[] { 0x30 }); assertArrayEquals(new byte[] { 0x20 }, splitPoint); - byte[] lastRow = new byte[] {xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF}; + byte[] lastRow = new byte[] { xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF }; assertArrayEquals(lastRow, splitter.lastRow()); byte[] firstRow = ArrayUtils.EMPTY_BYTE_ARRAY; assertArrayEquals(firstRow, splitter.firstRow()); - splitPoint = splitter.split(firstRow, new byte[] {0x20}); + splitPoint = splitter.split(firstRow, new byte[] { 0x20 }); assertArrayEquals(splitPoint, new byte[] { 0x10 }); - splitPoint = splitter.split(new byte[] {(byte)0xdf, xFF, xFF, xFF, xFF, - xFF, xFF, xFF}, lastRow); - assertArrayEquals(splitPoint, new byte[] { (byte) 0xef, xFF, xFF, xFF, xFF, xFF, xFF, xFF}); + splitPoint = + splitter.split(new byte[] { (byte) 0xdf, xFF, xFF, xFF, xFF, xFF, xFF, xFF }, lastRow); + assertArrayEquals(splitPoint, new byte[] { (byte) 0xef, xFF, xFF, xFF, xFF, xFF, xFF, xFF }); - splitPoint = splitter.split(new byte[] {'a', 'a', 'a'}, new byte[] {'a', 'a', 'b'}); + splitPoint = splitter.split(new byte[] { 'a', 'a', 'a' }, new byte[] { 'a', 'a', 'b' }); assertArrayEquals(splitPoint, new byte[] { 'a', 'a', 'a', (byte) 0x80 }); // Check splitting region with multiple mappers per region - byte[][] splits = splitter.split(new byte[] {'a', 'a', 'a'}, new byte[] {'a', 'a', 'd'}, - 3, false); + byte[][] splits = + splitter.split(new byte[] { 'a', 'a', 'a' }, new byte[] { 'a', 'a', 'd' }, 3, false); assertEquals(2, splits.length); - assertArrayEquals(splits[0], new byte[]{'a', 'a', 'b'}); - assertArrayEquals(splits[1], new byte[]{'a', 'a', 'c'}); + assertArrayEquals(splits[0], new byte[] { 'a', 'a', 'b' }); + assertArrayEquals(splits[1], new byte[] { 'a', 'a', 'c' }); - splits = splitter.split(new byte[] {'a', 'a', 'a'}, new byte[] {'a', 'a', 'e'}, 2, true); + splits = splitter.split(new byte[] { 'a', 'a', 'a' }, new byte[] { 'a', 'a', 'e' }, 2, true); assertEquals(3, splits.length); - assertArrayEquals(splits[1], new byte[] { 'a', 'a', 'c'}); + assertArrayEquals(splits[1], new byte[] { 'a', 'a', 'c' }); } @Test @@ -331,13 +331,12 @@ private boolean splitFailsPrecondition(SplitAlgorithm algo) { return splitFailsPrecondition(algo, 100); } - private boolean splitFailsPrecondition(SplitAlgorithm algo, String firstRow, - String lastRow) { + private boolean splitFailsPrecondition(SplitAlgorithm algo, String firstRow, String lastRow) { return splitFailsPrecondition(algo, firstRow, lastRow, 100); } - private boolean splitFailsPrecondition(SplitAlgorithm algo, String firstRow, - String lastRow, int numRegions) { + private boolean splitFailsPrecondition(SplitAlgorithm algo, String firstRow, String lastRow, + int numRegions) { algo.setFirstRow(firstRow); algo.setLastRow(lastRow); return splitFailsPrecondition(algo, numRegions); @@ -365,14 +364,13 @@ private boolean splitFailsPrecondition(SplitAlgorithm algo, int numRegions) { } /** - * Creates a pre-split table with expectedBounds.size()+1 regions, then - * verifies that the region boundaries are the same as the expected - * region boundaries in expectedBounds. + * Creates a pre-split table with expectedBounds.size()+1 regions, then verifies that the region + * boundaries are the same as the expected region boundaries in expectedBounds. * @throws Various junit assertions */ - private void preSplitTableAndVerify(List expectedBounds, - String splitClass, TableName tableName) throws Exception { - final int numRegions = expectedBounds.size()-1; + private void preSplitTableAndVerify(List expectedBounds, String splitClass, + TableName tableName) throws Exception { + final int numRegions = expectedBounds.size() - 1; final Configuration conf = UTIL.getConfiguration(); conf.setInt("split.count", numRegions); SplitAlgorithm splitAlgo = RegionSplitter.newSplitAlgoInstance(conf, splitClass); @@ -385,11 +383,11 @@ public void noopRollingSplit() throws Exception { final List expectedBounds = new ArrayList<>(1); expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY); rollingSplitAndVerify(TableName.valueOf(TestRegionSplitter.class.getSimpleName()), - "UniformSplit", expectedBounds); + "UniformSplit", expectedBounds); } private void rollingSplitAndVerify(TableName tableName, String splitClass, - List expectedBounds) throws Exception { + List expectedBounds) throws Exception { final Configuration conf = UTIL.getConfiguration(); // Set this larger than the number of splits so RegionSplitter won't block @@ -399,10 +397,9 @@ private void rollingSplitAndVerify(TableName tableName, String splitClass, verifyBounds(expectedBounds, tableName); } - private void verifyBounds(List expectedBounds, TableName tableName) - throws Exception { + private void verifyBounds(List expectedBounds, TableName tableName) throws Exception { // Get region boundaries from the cluster and verify their endpoints - final int numRegions = expectedBounds.size()-1; + final int numRegions = expectedBounds.size() - 1; try (Table table = UTIL.getConnection().getTable(tableName); RegionLocator locator = UTIL.getConnection().getRegionLocator(tableName)) { final List regionInfoMap = locator.getAllRegionLocations(); @@ -425,15 +422,15 @@ private void verifyBounds(List expectedBounds, TableName tableName) } /** - * List.indexOf() doesn't really work for a List<byte[]>, because byte[] - * doesn't override equals(). This method checks whether a list contains - * a given element by checking each element using the byte array comparator. + * List.indexOf() doesn't really work for a List<byte[]>, because byte[] doesn't override + * equals(). This method checks whether a list contains a given element by checking each element + * using the byte array comparator. * @return the index of the first element that equals compareTo, or -1 if no elements are equal. */ - static private int indexOfBytes(List list, byte[] compareTo) { + static private int indexOfBytes(List list, byte[] compareTo) { int listIndex = 0; - for(byte[] elem: list) { - if(Bytes.BYTES_COMPARATOR.compare(elem, compareTo) == 0) { + for (byte[] elem : list) { + if (Bytes.BYTES_COMPARATOR.compare(elem, compareTo) == 0) { return listIndex; } listIndex++; @@ -442,4 +439,3 @@ static private int indexOfBytes(List list, byte[] compareTo) { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRootPath.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRootPath.java index 226e0b5387d8..3f7171085b60 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRootPath.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRootPath.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ /** * Test requirement that root directory must be a URI */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestRootPath { @ClassRule @@ -71,4 +71,3 @@ public void testRootPath() { } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedList.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedList.java index 343c10bb8da1..2db10e186fda 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedList.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedList.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestSortedList { @ClassRule @@ -55,27 +55,28 @@ public void testSorting() throws Exception { list.add("b"); assertEquals(4, list.size()); - assertArrayEquals(new String[]{"a", "b", "c", "d"}, list.toArray(new String[4])); + assertArrayEquals(new String[] { "a", "b", "c", "d" }, list.toArray(new String[4])); list.add("c"); assertEquals(5, list.size()); - assertArrayEquals(new String[]{"a", "b", "c", "c", "d"}, list.toArray(new String[5])); + assertArrayEquals(new String[] { "a", "b", "c", "c", "d" }, list.toArray(new String[5])); // Test that removal from head or middle maintains sort list.remove("b"); assertEquals(4, list.size()); - assertArrayEquals(new String[]{"a", "c", "c", "d"}, list.toArray(new String[4])); + assertArrayEquals(new String[] { "a", "c", "c", "d" }, list.toArray(new String[4])); list.remove("c"); assertEquals(3, list.size()); - assertArrayEquals(new String[]{"a", "c", "d"}, list.toArray(new String[3])); + assertArrayEquals(new String[] { "a", "c", "d" }, list.toArray(new String[3])); list.remove("a"); assertEquals(2, list.size()); - assertArrayEquals(new String[]{"c", "d"}, list.toArray(new String[2])); + assertArrayEquals(new String[] { "c", "d" }, list.toArray(new String[2])); } @Test public void testReadOnlyIterators() throws Exception { - SortedList list = new SortedList<>(Lists.newArrayList("a", "b", "c", "d", "e"), new StringComparator()); + SortedList list = + new SortedList<>(Lists.newArrayList("a", "b", "c", "d", "e"), new StringComparator()); Iterator i = list.iterator(); i.next(); @@ -110,7 +111,8 @@ public void testReadOnlyIterators() throws Exception { @Test public void testIteratorIsolation() throws Exception { - SortedList list = new SortedList<>(Lists.newArrayList("a", "b", "c", "d", "e"), new StringComparator()); + SortedList list = + new SortedList<>(Lists.newArrayList("a", "b", "c", "d", "e"), new StringComparator()); // isolation of remove() Iterator iter = list.iterator(); @@ -162,7 +164,8 @@ public void testIteratorIsolation() throws Exception { @Test public void testRandomAccessIsolation() throws Exception { - SortedList list = new SortedList<>(Lists.newArrayList("a", "b", "c"), new StringComparator()); + SortedList list = + new SortedList<>(Lists.newArrayList("a", "b", "c"), new StringComparator()); List innerList = list.get(); assertEquals("a", innerList.get(0)); assertEquals("b", innerList.get(1)); @@ -170,4 +173,3 @@ public void testRandomAccessIsolation() throws Exception { assertEquals("c", innerList.get(2)); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestStealJobQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestStealJobQueue.java index 4d69f8e99c98..b22187111db6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestStealJobQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestStealJobQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestStealJobQueue { @ClassRule @@ -51,7 +51,6 @@ public void setup() { } - @Test public void testTake() throws InterruptedException { stealJobQueue.offer(3); @@ -61,7 +60,7 @@ public void testTake() throws InterruptedException { assertEquals(3, stealJobQueue.take().intValue()); assertEquals(4, stealJobQueue.take().intValue()); assertEquals("always take from the main queue before trying to steal", 15, - stealJobQueue.take().intValue()); + stealJobQueue.take().intValue()); assertEquals(10, stealJobQueue.take().intValue()); assertTrue(stealFromQueue.isEmpty()); assertTrue(stealJobQueue.isEmpty()); @@ -85,10 +84,9 @@ public void run() { stealFromQueue.offer(3); consumer.join(1000); assertEquals(3, taken.get()); - consumer.interrupt(); //Ensure the consumer thread will stop. + consumer.interrupt(); // Ensure the consumer thread will stop. } - @Test public void testOfferInStealJobQueueShouldUnblock() throws InterruptedException { final AtomicInteger taken = new AtomicInteger(); @@ -107,10 +105,9 @@ public void run() { stealJobQueue.offer(3); consumer.join(1000); assertEquals(3, taken.get()); - consumer.interrupt(); //Ensure the consumer thread will stop. + consumer.interrupt(); // Ensure the consumer thread will stop. } - @Test public void testPoll() throws InterruptedException { stealJobQueue.offer(3); @@ -120,7 +117,7 @@ public void testPoll() throws InterruptedException { assertEquals(3, stealJobQueue.poll(1, TimeUnit.SECONDS).intValue()); assertEquals(4, stealJobQueue.poll(1, TimeUnit.SECONDS).intValue()); assertEquals("always take from the main queue before trying to steal", 15, - stealJobQueue.poll(1, TimeUnit.SECONDS).intValue()); + stealJobQueue.poll(1, TimeUnit.SECONDS).intValue()); assertEquals(10, stealJobQueue.poll(1, TimeUnit.SECONDS).intValue()); assertTrue(stealFromQueue.isEmpty()); assertTrue(stealJobQueue.isEmpty()); @@ -145,11 +142,10 @@ public void run() { stealFromQueue.put(3); consumer.join(1000); assertEquals(3, taken.get()); - consumer.interrupt(); //Ensure the consumer thread will stop. + consumer.interrupt(); // Ensure the consumer thread will stop. } - @Test public void testAddInStealJobQueueShouldUnblockPoll() throws InterruptedException { final AtomicInteger taken = new AtomicInteger(); @@ -168,10 +164,9 @@ public void run() { stealJobQueue.add(3); consumer.join(1000); assertEquals(3, taken.get()); - consumer.interrupt(); //Ensure the consumer thread will stop. + consumer.interrupt(); // Ensure the consumer thread will stop. } - @Test public void testInteractWithThreadPool() throws InterruptedException { StealJobQueue stealTasksQueue = @@ -187,17 +182,17 @@ protected void afterExecute(Runnable r, Throwable t) { }; - //This is necessary otherwise no worker will be running and stealing job + // This is necessary otherwise no worker will be running and stealing job stealPool.prestartAllCoreThreads(); - ThreadPoolExecutor stealFromPool = new ThreadPoolExecutor(3, 3, 1, TimeUnit.DAYS, - stealTasksQueue.getStealFromQueue()) { - @Override - protected void afterExecute(Runnable r, Throwable t) { - super.afterExecute(r, t); - stealFromCountDown.countDown(); - } - }; + ThreadPoolExecutor stealFromPool = + new ThreadPoolExecutor(3, 3, 1, TimeUnit.DAYS, stealTasksQueue.getStealFromQueue()) { + @Override + protected void afterExecute(Runnable r, Throwable t) { + super.afterExecute(r, t); + stealFromCountDown.countDown(); + } + }; for (int i = 0; i < 4; i++) { TestTask task = new TestTask(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java index abdc8529a9c6..ad57c43248c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java @@ -33,7 +33,6 @@ import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; - import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -54,11 +53,12 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestMajorCompactionRequest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -69,12 +69,14 @@ public class TestMajorCompactionRequest { protected Path rootRegionDir; protected Path regionStoreDir; - @Before public void setUp() throws Exception { + @Before + public void setUp() throws Exception { rootRegionDir = UTILITY.getDataTestDirOnTestFS("TestMajorCompactionRequest"); regionStoreDir = new Path(rootRegionDir, FAMILY); } - @Test public void testStoresNeedingCompaction() throws Exception { + @Test + public void testStoresNeedingCompaction() throws Exception { // store files older than timestamp List storeFiles = mockStoreFiles(regionStoreDir, 5, 10); MajorCompactionRequest request = makeMockRequest(storeFiles, false); @@ -89,7 +91,8 @@ public class TestMajorCompactionRequest { assertFalse(result.isPresent()); } - @Test public void testIfWeHaveNewReferenceFilesButOldStoreFiles() throws Exception { + @Test + public void testIfWeHaveNewReferenceFilesButOldStoreFiles() throws Exception { // this tests that reference files that are new, but have older timestamps for the files // they reference still will get compacted. TableName table = TableName.valueOf("TestMajorCompactor"); @@ -103,12 +106,11 @@ public class TestMajorCompactionRequest { List storeFiles = mockStoreFiles(regionStoreDir, 4, 101); List paths = storeFiles.stream().map(StoreFileInfo::getPath).collect(Collectors.toList()); // the files that are referenced are older, thus we still compact. - HRegionFileSystem fileSystem = - mockFileSystem(region.getRegionInfo(), true, storeFiles, 50); - MajorCompactionRequest majorCompactionRequest = spy(new MajorCompactionRequest(connection, - region.getRegionInfo(), Sets.newHashSet(FAMILY))); + HRegionFileSystem fileSystem = mockFileSystem(region.getRegionInfo(), true, storeFiles, 50); + MajorCompactionRequest majorCompactionRequest = spy( + new MajorCompactionRequest(connection, region.getRegionInfo(), Sets.newHashSet(FAMILY))); doReturn(paths).when(majorCompactionRequest).getReferenceFilePaths(any(FileSystem.class), - any(Path.class)); + any(Path.class)); doReturn(fileSystem).when(majorCompactionRequest).getFileSystem(); Set result = majorCompactionRequest.getStoresRequiringCompaction(Sets.newHashSet("a"), 100); @@ -153,8 +155,8 @@ protected List mockStoreFiles(Path regionStoreDir, int howMany, l return infos; } - private MajorCompactionRequest makeMockRequest(List storeFiles, - boolean references) throws IOException { + private MajorCompactionRequest makeMockRequest(List storeFiles, boolean references) + throws IOException { Connection connection = mock(Connection.class); RegionInfo regionInfo = mock(RegionInfo.class); when(regionInfo.getEncodedName()).thenReturn("HBase"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionTTLRequest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionTTLRequest.java index c9dc6f5fc977..366267678b66 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionTTLRequest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionTTLRequest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util.compaction; import static org.junit.Assert.assertFalse; @@ -28,7 +27,6 @@ import java.io.IOException; import java.util.List; import java.util.Optional; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; @@ -41,10 +39,11 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestMajorCompactionTTLRequest extends TestMajorCompactionRequest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactor.java index b5d72cc86c2e..a240230cf281 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactor.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hbase.util.compaction; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -27,15 +30,13 @@ import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.junit.After; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; @Category({ MiscTests.class, MediumTests.class }) public class TestMajorCompactor { @@ -47,17 +48,20 @@ public class TestMajorCompactor { protected HBaseTestingUtil utility; protected Admin admin; - @Before public void setUp() throws Exception { + @Before + public void setUp() throws Exception { utility = new HBaseTestingUtil(); utility.getConfiguration().setInt("hbase.hfile.compaction.discharger.interval", 10); utility.startMiniCluster(); } - @After public void tearDown() throws Exception { + @After + public void tearDown() throws Exception { utility.shutdownMiniCluster(); } - @Test public void testCompactingATable() throws Exception { + @Test + public void testCompactingATable() throws Exception { TableName tableName = TableName.valueOf("TestMajorCompactor"); utility.createMultiRegionTable(tableName, FAMILY, 5); utility.waitTableAvailable(tableName); @@ -74,8 +78,7 @@ public class TestMajorCompactor { // we should have a table with more store files than we would before we major compacted. assertTrue(numberOfRegions < numHFiles); - MajorCompactor compactor = - new MajorCompactor(utility.getConfiguration(), tableName, + MajorCompactor compactor = new MajorCompactor(utility.getConfiguration(), tableName, Sets.newHashSet(Bytes.toString(FAMILY)), 1, EnvironmentEdgeManager.currentTime(), 200); compactor.initializeWorkQueues(); compactor.compactAllRegions(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactorTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactorTTL.java index c12b38e6faa7..73eb0e26d5d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactorTTL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactorTTL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; @@ -77,8 +76,8 @@ public void testCompactingATable() throws Exception { assertTrue(numberOfRegions < numHFiles); modifyTTL(tableName); - MajorCompactorTTL compactor = new MajorCompactorTTL(utility.getConfiguration(), - admin.getDescriptor(tableName), 1, 200); + MajorCompactorTTL compactor = + new MajorCompactorTTL(utility.getConfiguration(), admin.getDescriptor(tableName), 1, 200); compactor.initializeWorkQueues(); compactor.compactAllRegions(); compactor.shutdown(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java index d04c3514f361..fd040129d71b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,22 +25,22 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; - -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.HbckErrorReporter.ERROR_CODE; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + public class HbckTestingUtil { private static ExecutorService exec = new ScheduledThreadPoolExecutor(10); - public static HBaseFsck doFsck( - Configuration conf, boolean fix) throws Exception { + + public static HBaseFsck doFsck(Configuration conf, boolean fix) throws Exception { return doFsck(conf, fix, null); } - public static HBaseFsck doFsck( - Configuration conf, boolean fix, TableName table) throws Exception { + public static HBaseFsck doFsck(Configuration conf, boolean fix, TableName table) + throws Exception { return doFsck(conf, fix, fix, fix, fix, fix, fix, fix, fix, fix, fix, fix, fix, fix, table); } @@ -84,7 +84,8 @@ public static HBaseFsck doFsck(Configuration conf, boolean fixAssignments, boole * @return hbckInstance */ public static HBaseFsck doHFileQuarantine(Configuration conf, TableName table) throws Exception { - String[] args = {"-sidelineCorruptHFiles", "-ignorePreCheckPermission", table.getNameAsString()}; + String[] args = + { "-sidelineCorruptHFiles", "-ignorePreCheckPermission", table.getNameAsString() }; HBaseFsck hbck = new HBaseFsck(conf, exec); hbck.exec(exec, args); return hbck; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java index 089a181cb8d6..823d75aaed2e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java @@ -1,34 +1,34 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util.test; import java.io.IOException; import java.util.Random; import java.util.Set; - -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.LoadTestKVGenerator; import org.apache.yetus.audience.InterfaceAudience; /** - * A generator of random data (keys/cfs/columns/values) for load testing. - * Contains LoadTestKVGenerator as a matter of convenience... + * A generator of random data (keys/cfs/columns/values) for load testing. Contains + * LoadTestKVGenerator as a matter of convenience... */ @InterfaceAudience.Private public abstract class LoadTestDataGenerator { @@ -51,31 +51,31 @@ public LoadTestDataGenerator() { /** * Initializes the object. * @param minValueSize minimum size of the value generated by - * {@link #generateValue(byte[], byte[], byte[])}. + * {@link #generateValue(byte[], byte[], byte[])}. * @param maxValueSize maximum size of the value generated by - * {@link #generateValue(byte[], byte[], byte[])}. + * {@link #generateValue(byte[], byte[], byte[])}. */ public LoadTestDataGenerator(int minValueSize, int maxValueSize) { this.kvGenerator = new LoadTestKVGenerator(minValueSize, maxValueSize); } public static byte[] generateData(final Random r, int length) { - byte [] b = new byte [length]; + byte[] b = new byte[length]; int i = 0; - for(i = 0; i < (length-8); i += 8) { + for (i = 0; i < (length - 8); i += 8) { b[i] = (byte) (65 + r.nextInt(26)); - b[i+1] = b[i]; - b[i+2] = b[i]; - b[i+3] = b[i]; - b[i+4] = b[i]; - b[i+5] = b[i]; - b[i+6] = b[i]; - b[i+7] = b[i]; + b[i + 1] = b[i]; + b[i + 2] = b[i]; + b[i + 3] = b[i]; + b[i + 4] = b[i]; + b[i + 5] = b[i]; + b[i + 6] = b[i]; + b[i + 7] = b[i]; } byte a = (byte) (65 + r.nextInt(26)); - for(; i < length; i++) { + for (; i < length; i++) { b[i] = a; } return b; @@ -83,17 +83,15 @@ public static byte[] generateData(final Random r, int length) { /** * initialize the LoadTestDataGenerator - * - * @param args - * init args + * @param args init args */ public void initialize(String[] args) { this.args = args; } /** - * Generates a deterministic, unique hashed row key from a number. That way, the user can - * keep track of numbers, without messing with byte array and ensuring key distribution. + * Generates a deterministic, unique hashed row key from a number. That way, the user can keep + * track of numbers, without messing with byte array and ensuring key distribution. * @param keyBase Base number for a key, such as a loop counter. */ public abstract byte[] getDeterministicUniqueKey(long keyBase); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGeneratorWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGeneratorWithACL.java index 27eeb594081f..dcc18898366f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGeneratorWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGeneratorWithACL.java @@ -1,30 +1,30 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util.test; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.util.MultiThreadedAction.DefaultDataGenerator; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class LoadTestDataGeneratorWithACL extends DefaultDataGenerator { @@ -42,11 +42,10 @@ public LoadTestDataGeneratorWithACL(int minValueSize, int maxValueSize, int minC public void initialize(String[] args) { super.initialize(args); if (args.length != 3) { - throw new IllegalArgumentException( - "LoadTestDataGeneratorWithACL can have " - + "1st arguement which would be super user, the 2nd argument " - + "would be the user list and the 3rd argument should be the factor representing " - + "the row keys for which only write ACLs will be added."); + throw new IllegalArgumentException("LoadTestDataGeneratorWithACL can have " + + "1st arguement which would be super user, the 2nd argument " + + "would be the user list and the 3rd argument should be the factor representing " + + "the row keys for which only write ACLs will be added."); } String temp = args[1]; // This will be comma separated list of expressions. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java index 879cd4e79dfe..0c732ad929a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/CompressedWALTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,25 +42,26 @@ public class CompressedWALTestBase { static final byte[] VALUE; static { // 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597 - VALUE = new byte[1+1+2+3+5+8+13+21+34+55+89+144+233+377+610+987+1597]; + VALUE = new byte[1 + 1 + 2 + 3 + 5 + 8 + 13 + 21 + 34 + 55 + 89 + 144 + 233 + 377 + 610 + 987 + + 1597]; int off = 0; - Arrays.fill(VALUE, off, (off+=1), (byte)'A'); - Arrays.fill(VALUE, off, (off+=1), (byte)'B'); - Arrays.fill(VALUE, off, (off+=2), (byte)'C'); - Arrays.fill(VALUE, off, (off+=3), (byte)'D'); - Arrays.fill(VALUE, off, (off+=5), (byte)'E'); - Arrays.fill(VALUE, off, (off+=8), (byte)'F'); - Arrays.fill(VALUE, off, (off+=13), (byte)'G'); - Arrays.fill(VALUE, off, (off+=21), (byte)'H'); - Arrays.fill(VALUE, off, (off+=34), (byte)'I'); - Arrays.fill(VALUE, off, (off+=55), (byte)'J'); - Arrays.fill(VALUE, off, (off+=89), (byte)'K'); - Arrays.fill(VALUE, off, (off+=144), (byte)'L'); - Arrays.fill(VALUE, off, (off+=233), (byte)'M'); - Arrays.fill(VALUE, off, (off+=377), (byte)'N'); - Arrays.fill(VALUE, off, (off+=610), (byte)'O'); - Arrays.fill(VALUE, off, (off+=987), (byte)'P'); - Arrays.fill(VALUE, off, (off+=1597), (byte)'Q'); + Arrays.fill(VALUE, off, (off += 1), (byte) 'A'); + Arrays.fill(VALUE, off, (off += 1), (byte) 'B'); + Arrays.fill(VALUE, off, (off += 2), (byte) 'C'); + Arrays.fill(VALUE, off, (off += 3), (byte) 'D'); + Arrays.fill(VALUE, off, (off += 5), (byte) 'E'); + Arrays.fill(VALUE, off, (off += 8), (byte) 'F'); + Arrays.fill(VALUE, off, (off += 13), (byte) 'G'); + Arrays.fill(VALUE, off, (off += 21), (byte) 'H'); + Arrays.fill(VALUE, off, (off += 34), (byte) 'I'); + Arrays.fill(VALUE, off, (off += 55), (byte) 'J'); + Arrays.fill(VALUE, off, (off += 89), (byte) 'K'); + Arrays.fill(VALUE, off, (off += 144), (byte) 'L'); + Arrays.fill(VALUE, off, (off += 233), (byte) 'M'); + Arrays.fill(VALUE, off, (off += 377), (byte) 'N'); + Arrays.fill(VALUE, off, (off += 610), (byte) 'O'); + Arrays.fill(VALUE, off, (off += 987), (byte) 'P'); + Arrays.fill(VALUE, off, (off += 1597), (byte) 'Q'); } public void doTest(TableName tableName) throws Exception { @@ -72,7 +73,7 @@ public void doTest(TableName tableName) throws Exception { final byte[] family = Bytes.toBytes("family"); final byte[] value = VALUE; final WALFactory wals = - new WALFactory(TEST_UTIL.getConfiguration(), tableName.getNameAsString()); + new WALFactory(TEST_UTIL.getConfiguration(), tableName.getNameAsString()); // Write the WAL final WAL wal = wals.getWAL(regionInfo); @@ -83,7 +84,8 @@ public void doTest(TableName tableName) throws Exception { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(row, family, Bytes.toBytes(i), value)); wal.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, - System.currentTimeMillis(), mvcc, scopes), kvs); + System.currentTimeMillis(), mvcc, scopes), + kvs); } wal.sync(); final Path walPath = AbstractFSWALProvider.getCurrentFileName(wal); @@ -97,7 +99,7 @@ public void doTest(TableName tableName) throws Exception { count++; List cells = entry.getEdit().getCells(); assertTrue("Should be one KV per WALEdit", cells.size() == 1); - for (Cell cell: cells) { + for (Cell cell : cells) { assertTrue("Incorrect row", Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, 0, row.length)); assertTrue("Incorrect family", Bytes.equals(cell.getFamilyArray(), cell.getFamilyOffset(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/FaultyFSLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/FaultyFSLog.java index 3afafa16982e..2edb91de3a3e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/FaultyFSLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/FaultyFSLog.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.wal; import java.io.IOException; @@ -37,6 +35,7 @@ public class FaultyFSLog extends FSHLog { public enum FailureType { NONE, APPEND, SYNC } + FailureType ft = FailureType.NONE; public FaultyFSLog(FileSystem fs, Path rootDir, String logName, Configuration conf) @@ -65,4 +64,3 @@ protected long append(RegionInfo info, WALKeyImpl key, WALEdit edits, boolean in return super.append(info, key, edits, inMemstore); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/FileSystemProxy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/FileSystemProxy.java index fb729f55beff..048c27ea93f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/FileSystemProxy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/FileSystemProxy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; - import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -30,10 +29,9 @@ import org.apache.hadoop.util.Progressable; /** - * Create a non-abstract "proxy" for FileSystem because FileSystem is an - * abstract class and not an interface. Only interfaces can be used with the - * Java Proxy class to override functionality via an InvocationHandler. - * + * Create a non-abstract "proxy" for FileSystem because FileSystem is an abstract class and not an + * interface. Only interfaces can be used with the Java Proxy class to override functionality via an + * InvocationHandler. */ public class FileSystemProxy extends FileSystem { private final FileSystem real; @@ -58,13 +56,14 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException { } @Override - public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, - short replication, long blockSize, Progressable progress) throws IOException { + public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, + int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { return real.create(f, permission, overwrite, bufferSize, replication, blockSize, progress); } @Override - public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { + public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) + throws IOException { return real.append(f, bufferSize, progress); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java index 88ccbc7e2ee5..b6acd6cf4bff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,14 +27,12 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RegionInfo; -// imports for things that haven't moved from regionserver.wal yet. import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter; @@ -76,12 +73,9 @@ public class IOTestProvider implements WALProvider { private static final Logger LOG = LoggerFactory.getLogger(IOTestProvider.class); private static final String ALLOWED_OPERATIONS = "hbase.wal.iotestprovider.operations"; + private enum AllowedOperations { - all, - append, - sync, - fileroll, - none + all, append, sync, fileroll, none } private WALFactory factory; @@ -94,11 +88,12 @@ private enum AllowedOperations { protected AtomicBoolean initialized = new AtomicBoolean(false); private List listeners = new ArrayList<>(); + /** * @param factory factory that made us, identity used for FS layout. may not be null * @param conf may not be null * @param providerId differentiate between providers from one facotry, used for FS layout. may be - * null + * null */ @Override public void init(WALFactory factory, Configuration conf, String providerId, Abortable abortable) @@ -164,38 +159,32 @@ private static class IOTestWAL extends FSHLog { private final boolean initialized; /** - * Create an edit log at the given dir location. - * - * You should never have to load an existing log. If there is a log at - * startup, it should have already been processed and deleted by the time the - * WAL object is started up. - * + * Create an edit log at the given dir location. You should never have to load an + * existing log. If there is a log at startup, it should have already been processed and deleted + * by the time the WAL object is started up. * @param fs filesystem handle * @param rootDir path to where logs and oldlogs * @param logDir dir where wals are stored * @param archiveDir dir where wals are archived * @param conf configuration to use - * @param listeners Listeners on WAL events. Listeners passed here will - * be registered before we do anything else; e.g. the - * Constructor {@link #rollWriter()}. + * @param listeners Listeners on WAL events. Listeners passed here will be registered before we + * do anything else; e.g. the Constructor {@link #rollWriter()}. * @param failIfWALExists If true IOException will be thrown if files related to this wal - * already exist. - * @param prefix should always be hostname and port in distributed env and - * it will be URL encoded before being used. - * If prefix is null, "wal" will be used + * already exist. + * @param prefix should always be hostname and port in distributed env and it will be URL + * encoded before being used. If prefix is null, "wal" will be used * @param suffix will be url encoded. null is treated as empty. non-empty must start with - * {@link AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER} + * {@link AbstractFSWALProvider#WAL_FILE_NAME_DELIMITER} * @throws IOException */ public IOTestWAL(final FileSystem fs, final Path rootDir, final String logDir, - final String archiveDir, final Configuration conf, - final List listeners, + final String archiveDir, final Configuration conf, final List listeners, final boolean failIfWALExists, final String prefix, final String suffix) throws IOException { super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); Collection operations = conf.getStringCollection(ALLOWED_OPERATIONS); - doFileRolls = operations.isEmpty() || operations.contains(AllowedOperations.all.name()) || - operations.contains(AllowedOperations.fileroll.name()); + doFileRolls = operations.isEmpty() || operations.contains(AllowedOperations.all.name()) + || operations.contains(AllowedOperations.fileroll.name()); initialized = true; LOG.info("Initialized with file rolling " + (doFileRolls ? "enabled" : "disabled")); } @@ -214,10 +203,10 @@ protected Writer createWriterInstance(final Path path) throws IOException { final ProtobufLogWriter writer = new IOTestWriter(); try { writer.init(fs, path, conf, false, this.blocksize, - StreamSlowMonitor.create(conf, path.getName())); + StreamSlowMonitor.create(conf, path.getName())); } catch (CommonFSUtils.StreamLacksCapabilityException exception) { - throw new IOException("Can't create writer instance because underlying FileSystem " + - "doesn't support needed stream capabilities.", exception); + throw new IOException("Can't create writer instance because underlying FileSystem " + + "doesn't support needed stream capabilities.", exception); } if (!initialized) { LOG.info("storing initial writer instance in case file rolling isn't allowed."); @@ -242,8 +231,8 @@ private static class IOTestWriter extends ProtobufLogWriter { @Override public void init(FileSystem fs, Path path, Configuration conf, boolean overwritable, - long blocksize, StreamSlowMonitor monitor) throws IOException, - CommonFSUtils.StreamLacksCapabilityException { + long blocksize, StreamSlowMonitor monitor) + throws IOException, CommonFSUtils.StreamLacksCapabilityException { Collection operations = conf.getStringCollection(ALLOWED_OPERATIONS); if (operations.isEmpty() || operations.contains(AllowedOperations.all.name())) { doAppends = doSyncs = true; @@ -253,8 +242,8 @@ public void init(FileSystem fs, Path path, Configuration conf, boolean overwrita doAppends = operations.contains(AllowedOperations.append.name()); doSyncs = operations.contains(AllowedOperations.sync.name()); } - LOG.info("IOTestWriter initialized with appends " + (doAppends ? "enabled" : "disabled") + - " and syncs " + (doSyncs ? "enabled" : "disabled")); + LOG.info("IOTestWriter initialized with appends " + (doAppends ? "enabled" : "disabled") + + " and syncs " + (doSyncs ? "enabled" : "disabled")); super.init(fs, path, conf, overwritable, blocksize, monitor); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestAsyncFSWALCorruptionDueToDanglingByteBuffer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestAsyncFSWALCorruptionDueToDanglingByteBuffer.java index 46aa87107b4b..953109e19497 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestAsyncFSWALCorruptionDueToDanglingByteBuffer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestAsyncFSWALCorruptionDueToDanglingByteBuffer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,20 +43,20 @@ */ @Category({ RegionServerTests.class, MediumTests.class }) public class TestAsyncFSWALCorruptionDueToDanglingByteBuffer - extends WALCorruptionDueToDanglingByteBufferTestBase { + extends WALCorruptionDueToDanglingByteBufferTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncFSWALCorruptionDueToDanglingByteBuffer.class); + HBaseClassTestRule.forClass(TestAsyncFSWALCorruptionDueToDanglingByteBuffer.class); public static final class PauseWAL extends AsyncFSWAL { public PauseWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, - Configuration conf, List listeners, boolean failIfWALExists, - String prefix, String suffix, EventLoopGroup eventLoopGroup, - Class channelClass) throws FailedLogCloseException, IOException { + Configuration conf, List listeners, boolean failIfWALExists, + String prefix, String suffix, EventLoopGroup eventLoopGroup, + Class channelClass) throws FailedLogCloseException, IOException { super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix, - eventLoopGroup, channelClass); + eventLoopGroup, channelClass); } @Override @@ -80,16 +80,16 @@ public static final class PauseWALProvider extends AbstractFSWALProvider> eventLoopGroupAndChannelClass = - NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf); + NettyAsyncFSWALConfigHelper.getEventLoopConfig(conf); eventLoopGroup = eventLoopGroupAndChannelClass.getFirst(); channelClass = eventLoopGroupAndChannelClass.getSecond(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java index 01a2f1ec2ac6..90a7f14543c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -137,9 +137,9 @@ public void testConcurrentWrites() throws Exception { @Test public void testMoreRegionsThanBound() throws Exception { final String parallelism = Integer.toString(DEFAULT_NUM_REGION_GROUPS * 2); - int errCode = WALPerformanceEvaluation.innerMain(new Configuration(CONF), - new String[] { "-threads", parallelism, "-verify", "-noclosefs", "-iterations", "3000", - "-regions", parallelism }); + int errCode = + WALPerformanceEvaluation.innerMain(new Configuration(CONF), new String[] { "-threads", + parallelism, "-verify", "-noclosefs", "-iterations", "3000", "-regions", parallelism }); assertEquals(0, errCode); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWAL.java index d59a8605ef48..6bbbf214ee20 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,8 +57,7 @@ public static Iterable data() { @Before public void setUp() throws Exception { TEST_UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, walProvider); - TEST_UTIL.getConfiguration() - .setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); + TEST_UTIL.getConfiguration().setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); TEST_UTIL.startMiniDFSCluster(3); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWALValueCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWALValueCompression.java index 8e282e89eb38..44062cf72a47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWALValueCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestCompressedWALValueCompression.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.wal; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -65,12 +64,10 @@ public TestCompressedWALValueCompression(Compression.Algorithm algo) { @Before public void setUp() throws Exception { - TEST_UTIL.getConfiguration() - .setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); - TEST_UTIL.getConfiguration() - .setBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, true); - TEST_UTIL.getConfiguration() - .set(CompressionContext.WAL_VALUE_COMPRESSION_TYPE, compression.getName()); + TEST_UTIL.getConfiguration().setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); + TEST_UTIL.getConfiguration().setBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, true); + TEST_UTIL.getConfiguration().set(CompressionContext.WAL_VALUE_COMPRESSION_TYPE, + compression.getName()); TEST_UTIL.startMiniDFSCluster(3); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestDisabledWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestDisabledWAL.java index 5e91f9a339b8..52dd9810f57a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestDisabledWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestDisabledWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import static org.junit.Assert.fail; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -42,7 +41,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogCorruptionDueToDanglingByteBuffer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogCorruptionDueToDanglingByteBuffer.java index 73f7ad4ae132..754b81c0ea12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogCorruptionDueToDanglingByteBuffer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogCorruptionDueToDanglingByteBuffer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,17 +38,17 @@ */ @Category({ RegionServerTests.class, MediumTests.class }) public class TestFSHLogCorruptionDueToDanglingByteBuffer - extends WALCorruptionDueToDanglingByteBufferTestBase { + extends WALCorruptionDueToDanglingByteBufferTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFSHLogCorruptionDueToDanglingByteBuffer.class); + HBaseClassTestRule.forClass(TestFSHLogCorruptionDueToDanglingByteBuffer.class); public static final class PauseWAL extends FSHLog { public PauseWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir, - Configuration conf, List listeners, boolean failIfWALExists, - String prefix, String suffix) throws IOException { + Configuration conf, List listeners, boolean failIfWALExists, + String prefix, String suffix) throws IOException { super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); } @@ -69,9 +69,9 @@ public static final class PauseWALProvider extends AbstractFSWALProvider listeners, boolean failIfWALExists, - String prefix, String suffix) throws IOException { + Configuration conf, List listeners, boolean failIfWALExists, + String prefix, String suffix) throws IOException { super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix); } @@ -88,9 +87,9 @@ public static final class PauseWALProvider extends AbstractFSWALProvider scopes) throws IOException { final byte[] row = Bytes.toBytes("row"); @@ -253,14 +249,13 @@ public void testLogCleaning() throws Exception { } /** - * Tests wal archiving by adding data, doing flushing/rolling and checking we archive old logs - * and also don't archive "live logs" (that is, a log with un-flushed entries). + * Tests wal archiving by adding data, doing flushing/rolling and checking we archive old logs and + * also don't archive "live logs" (that is, a log with un-flushed entries). *

          - * This is what it does: - * It creates two regions, and does a series of inserts along with log rolling. - * Whenever a WAL is rolled, HLogBase checks previous wals for archiving. A wal is eligible for - * archiving if for all the regions which have entries in that wal file, have flushed - past - * their maximum sequence id in that wal file. + * This is what it does: It creates two regions, and does a series of inserts along with log + * rolling. Whenever a WAL is rolled, HLogBase checks previous wals for archiving. A wal is + * eligible for archiving if for all the regions which have entries in that wal file, have flushed + * - past their maximum sequence id in that wal file. *

          * @throws IOException */ @@ -344,9 +339,9 @@ public void testWALArchiving() throws IOException { public void testConcurrentWrites() throws Exception { // Run the WPE tool with three threads writing 3000 edits each concurrently. // When done, verify that all edits were written. - int errCode = WALPerformanceEvaluation. - innerMain(new Configuration(TEST_UTIL.getConfiguration()), - new String [] {"-threads", "3", "-verify", "-noclosefs", "-iterations", "3000"}); + int errCode = + WALPerformanceEvaluation.innerMain(new Configuration(TEST_UTIL.getConfiguration()), + new String[] { "-threads", "3", "-verify", "-noclosefs", "-iterations", "3000" }); assertEquals(0, errCode); } @@ -364,8 +359,8 @@ public void setMembershipDedups() throws IOException { seen.add(wals.getWAL(null))); for (int i = 0; i < 1000; i++) { assertFalse( - "default wal provider is only supposed to return a single wal, which should " + - "compare as .equals itself.", + "default wal provider is only supposed to return a single wal, which should " + + "compare as .equals itself.", seen.add(wals.getWAL(RegionInfoBuilder .newBuilder(TableName.valueOf("Table-" + ThreadLocalRandom.current().nextInt())) .build()))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java index 26ff11836f05..340ce5101859 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public class TestRaceBetweenGetWALAndGetWALs { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRaceBetweenGetWALAndGetWALs.class); + HBaseClassTestRule.forClass(TestRaceBetweenGetWALAndGetWALs.class); private static Future> GET_WALS_FUTURE; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestReadWriteSeqIdFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestReadWriteSeqIdFiles.java index b987f7c00810..ce061a9f7b2a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestReadWriteSeqIdFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestReadWriteSeqIdFiles.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public class TestReadWriteSeqIdFiles { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReadWriteSeqIdFiles.class); + HBaseClassTestRule.forClass(TestReadWriteSeqIdFiles.class); private static final Logger LOG = LoggerFactory.getLogger(TestReadWriteSeqIdFiles.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java index fc45a140ba2b..1815e40808b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -131,7 +131,8 @@ public void testSecureWAL() throws Exception { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(row, family, Bytes.toBytes(i), value)); wal.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, - EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs); + EnvironmentEdgeManager.currentTime(), mvcc, scopes), + kvs); } wal.sync(); final Path walPath = AbstractFSWALProvider.getCurrentFileName(wal); @@ -140,7 +141,7 @@ public void testSecureWAL() throws Exception { // Insure edits are not plaintext long length = fs.getFileStatus(walPath).getLen(); FSDataInputStream in = fs.open(walPath); - byte[] fileData = new byte[(int)length]; + byte[] fileData = new byte[(int) length]; IOUtils.readFully(in, fileData); in.close(); assertFalse("Cells appear to be plaintext", Bytes.contains(fileData, value)); @@ -153,7 +154,7 @@ public void testSecureWAL() throws Exception { count++; List cells = entry.getEdit().getCells(); assertTrue("Should be one KV per WALEdit", cells.size() == 1); - for (Cell cell: cells) { + for (Cell cell : cells) { assertTrue("Incorrect row", Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), row, 0, row.length)); assertTrue("Incorrect family", Bytes.equals(cell.getFamilyArray(), cell.getFamilyOffset(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java index d9493feace4c..ba5714ab080b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ public class TestSyncReplicationWALProvider { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSyncReplicationWALProvider.class); + HBaseClassTestRule.forClass(TestSyncReplicationWALProvider.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -116,12 +116,12 @@ private void testReadWrite(DualAsyncFSWAL wal) throws Exception { Path localFile = wal.getCurrentFileName(); Path remoteFile = new Path(REMOTE_WAL_DIR + "/" + PEER_ID, localFile.getName()); try (ProtobufLogReader reader = - (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) { + (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) { ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row, timestamp); } try (ProtobufLogReader reader = - (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) { + (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) { ProtobufLogTestHelper.doRead(reader, false, REGION, TABLE, columnCount, recordCount, row, timestamp); } @@ -147,12 +147,12 @@ public String explainFailure() throws Exception { } }); try (ProtobufLogReader reader = - (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) { + (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), localFile)) { ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row, timestamp); } try (ProtobufLogReader reader = - (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) { + (ProtobufLogReader) FACTORY.createReader(UTIL.getTestFileSystem(), remoteFile)) { ProtobufLogTestHelper.doRead(reader, true, REGION, TABLE, columnCount, recordCount, row, timestamp); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java index 62e29c5e0dad..12679cf6791e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,6 @@ import java.util.TreeMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -90,7 +89,7 @@ /** * WAL tests that can be reused across providers. */ -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestWALFactory { @ClassRule @@ -127,8 +126,8 @@ public void tearDown() throws Exception { try { wals.close(); } catch (IOException exception) { - LOG.warn("Encountered exception while closing wal factory. If you have other errors, this" + - " may be the cause. Message: " + exception); + LOG.warn("Encountered exception while closing wal factory. If you have other errors, this" + + " may be the cause. Message: " + exception); LOG.debug("Exception details for failure to close wal factory.", exception); } FileStatus[] entries = fs.listStatus(new Path("/")); @@ -149,16 +148,13 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt("dfs.client.socket-timeout", 5000); // faster failover with cluster.shutdown();fs.close() idiom - TEST_UTIL.getConfiguration() - .setInt("hbase.ipc.client.connect.max.retries", 1); - TEST_UTIL.getConfiguration().setInt( - "dfs.client.block.recovery.retries", 1); - TEST_UTIL.getConfiguration().setInt( - "hbase.ipc.client.connection.maxidletime", 500); + TEST_UTIL.getConfiguration().setInt("hbase.ipc.client.connect.max.retries", 1); + TEST_UTIL.getConfiguration().setInt("dfs.client.block.recovery.retries", 1); + TEST_UTIL.getConfiguration().setInt("hbase.ipc.client.connection.maxidletime", 500); TEST_UTIL.getConfiguration().setInt("hbase.lease.recovery.timeout", 10000); TEST_UTIL.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000); TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, - SampleRegionWALCoprocessor.class.getName()); + SampleRegionWALCoprocessor.class.getName()); TEST_UTIL.startMiniDFSCluster(3); conf = TEST_UTIL.getConfiguration(); @@ -179,14 +175,13 @@ public void canCloseSingleton() throws IOException { } /** - * Just write multiple logs then split. Before fix for HADOOP-2283, this - * would fail. + * Just write multiple logs then split. Before fix for HADOOP-2283, this would fail. * @throws IOException */ @Test public void testSplit() throws IOException { final TableName tableName = TableName.valueOf(currentTest.getMethodName()); - final byte [] rowName = tableName.getName(); + final byte[] rowName = tableName.getName(); final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1); final int howmany = 3; RegionInfo[] infos = new RegionInfo[3]; @@ -204,22 +199,20 @@ public void testSplit() throws IOException { NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(Bytes.toBytes("column"), 0); - // Add edits for three regions. for (int ii = 0; ii < howmany; ii++) { for (int i = 0; i < howmany; i++) { - final WAL log = - wals.getWAL(infos[i]); + final WAL log = wals.getWAL(infos[i]); for (int j = 0; j < howmany; j++) { WALEdit edit = new WALEdit(); - byte [] family = Bytes.toBytes("column"); - byte [] qualifier = Bytes.toBytes(Integer.toString(j)); - byte [] column = Bytes.toBytes("column:" + Integer.toString(j)); - edit.add(new KeyValue(rowName, family, qualifier, - EnvironmentEdgeManager.currentTime(), column)); + byte[] family = Bytes.toBytes("column"); + byte[] qualifier = Bytes.toBytes(Integer.toString(j)); + byte[] column = Bytes.toBytes("column:" + Integer.toString(j)); + edit.add( + new KeyValue(rowName, family, qualifier, EnvironmentEdgeManager.currentTime(), column)); LOG.info("Region " + i + ": " + edit); - WALKeyImpl walKey = new WALKeyImpl(infos[i].getEncodedNameAsBytes(), tableName, - EnvironmentEdgeManager.currentTime(), mvcc, scopes); + WALKeyImpl walKey = new WALKeyImpl(infos[i].getEncodedNameAsBytes(), tableName, + EnvironmentEdgeManager.currentTime(), mvcc, scopes); log.appendData(infos[i], walKey, edit); walKey.getWriteEntry(); } @@ -228,11 +221,11 @@ public void testSplit() throws IOException { } } wals.shutdown(); - // The below calculation of logDir relies on insider information... WALSplitter should be connected better + // The below calculation of logDir relies on insider information... WALSplitter should be + // connected better // with the WAL system.... not requiring explicit path. The oldLogDir is just made up not used. - Path logDir = - new Path(new Path(hbaseWALDir, HConstants.HREGION_LOGDIR_NAME), - this.currentServername.toString()); + Path logDir = new Path(new Path(hbaseWALDir, HConstants.HREGION_LOGDIR_NAME), + this.currentServername.toString()); Path oldLogDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME); List splits = WALSplitter.split(hbaseWALDir, logDir, oldLogDir, fs, conf, wals); verifySplits(splits, howmany); @@ -252,19 +245,18 @@ public void Broken_testSync() throws Exception { out.write(tableName.getName()); Method syncMethod = null; try { - syncMethod = out.getClass().getMethod("hflush", new Class []{}); + syncMethod = out.getClass().getMethod("hflush", new Class[] {}); } catch (NoSuchMethodException e) { try { - syncMethod = out.getClass().getMethod("sync", new Class []{}); + syncMethod = out.getClass().getMethod("sync", new Class[] {}); } catch (NoSuchMethodException ex) { - fail("This version of Hadoop supports neither Syncable.sync() " + - "nor Syncable.hflush()."); + fail("This version of Hadoop supports neither Syncable.sync() " + "nor Syncable.hflush()."); } } - syncMethod.invoke(out, new Object[]{}); + syncMethod.invoke(out, new Object[] {}); FSDataInputStream in = fs.open(p); assertTrue(in.available() > 0); - byte [] buffer = new byte [1024]; + byte[] buffer = new byte[1024]; int read = in.read(buffer); assertEquals(tableName.getName().length, read); out.close(); @@ -283,9 +275,10 @@ public void Broken_testSync() throws Exception { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); wal.appendData(info, new WALKeyImpl(info.getEncodedNameAsBytes(), tableName, - EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs); + EnvironmentEdgeManager.currentTime(), mvcc, scopes), + kvs); } - // Now call sync and try reading. Opening a Reader before you sync just + // Now call sync and try reading. Opening a Reader before you sync just // gives you EOFE. wal.sync(); // Open a Reader. @@ -293,7 +286,8 @@ public void Broken_testSync() throws Exception { reader = wals.createReader(fs, walPath); int count = 0; WAL.Entry entry = new WAL.Entry(); - while ((entry = reader.next(entry)) != null) count++; + while ((entry = reader.next(entry)) != null) + count++; assertEquals(total, count); reader.close(); // Add test that checks to see that an open of a Reader works on a file @@ -302,42 +296,48 @@ public void Broken_testSync() throws Exception { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); wal.appendData(info, new WALKeyImpl(info.getEncodedNameAsBytes(), tableName, - EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs); + EnvironmentEdgeManager.currentTime(), mvcc, scopes), + kvs); } wal.sync(); reader = wals.createReader(fs, walPath); count = 0; - while((entry = reader.next(entry)) != null) count++; + while ((entry = reader.next(entry)) != null) + count++; assertTrue(count >= total); reader.close(); // If I sync, should see double the edits. wal.sync(); reader = wals.createReader(fs, walPath); count = 0; - while((entry = reader.next(entry)) != null) count++; + while ((entry = reader.next(entry)) != null) + count++; assertEquals(total * 2, count); reader.close(); // Now do a test that ensures stuff works when we go over block boundary, // especially that we return good length on file. - final byte [] value = new byte[1025 * 1024]; // Make a 1M value. + final byte[] value = new byte[1025 * 1024]; // Make a 1M value. for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), value)); wal.appendData(info, new WALKeyImpl(info.getEncodedNameAsBytes(), tableName, - EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs); + EnvironmentEdgeManager.currentTime(), mvcc, scopes), + kvs); } - // Now I should have written out lots of blocks. Sync then read. + // Now I should have written out lots of blocks. Sync then read. wal.sync(); reader = wals.createReader(fs, walPath); count = 0; - while((entry = reader.next(entry)) != null) count++; + while ((entry = reader.next(entry)) != null) + count++; assertEquals(total * 3, count); reader.close(); // shutdown and ensure that Reader gets right length also. wal.shutdown(); reader = wals.createReader(fs, walPath); count = 0; - while((entry = reader.next(entry)) != null) count++; + while ((entry = reader.next(entry)) != null) + count++; assertEquals(total * 3, count); reader.close(); } finally { @@ -345,8 +345,7 @@ public void Broken_testSync() throws Exception { } } - private void verifySplits(final List splits, final int howmany) - throws IOException { + private void verifySplits(final List splits, final int howmany) throws IOException { assertEquals(howmany * howmany, splits.size()); for (int i = 0; i < splits.size(); i++) { LOG.info("Verifying=" + splits.get(i)); @@ -356,7 +355,7 @@ private void verifySplits(final List splits, final int howmany) String previousRegion = null; long seqno = -1; WAL.Entry entry = new WAL.Entry(); - while((entry = reader.next(entry)) != null) { + while ((entry = reader.next(entry)) != null) { WALKey key = entry.getKey(); String region = Bytes.toString(key.getEncodedRegionName()); // Assert that all edits are for same region. @@ -377,18 +376,14 @@ private void verifySplits(final List splits, final int howmany) } /* - * We pass different values to recoverFileLease() so that different code paths are covered - * - * For this test to pass, requires: - * 1. HDFS-200 (append support) - * 2. HDFS-988 (SafeMode should freeze file operations - * [FSNamesystem.nextGenerationStampForBlock]) - * 3. HDFS-142 (on restart, maintain pendingCreates) + * We pass different values to recoverFileLease() so that different code paths are covered For + * this test to pass, requires: 1. HDFS-200 (append support) 2. HDFS-988 (SafeMode should freeze + * file operations [FSNamesystem.nextGenerationStampForBlock]) 3. HDFS-142 (on restart, maintain + * pendingCreates) */ @Test public void testAppendClose() throws Exception { - TableName tableName = - TableName.valueOf(currentTest.getMethodName()); + TableName tableName = TableName.valueOf(currentTest.getMethodName()); RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build(); WAL wal = wals.getWAL(regionInfo); @@ -401,15 +396,15 @@ public void testAppendClose() throws Exception { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); wal.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, - EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs); + EnvironmentEdgeManager.currentTime(), mvcc, scopes), + kvs); } // Now call sync to send the data to HDFS datanodes wal.sync(); - int namenodePort = cluster.getNameNodePort(); + int namenodePort = cluster.getNameNodePort(); final Path walPath = AbstractFSWALProvider.getCurrentFileName(wal); - - // Stop the cluster. (ensure restart since we're sharing MiniDFSCluster) + // Stop the cluster. (ensure restart since we're sharing MiniDFSCluster) try { DistributedFileSystem dfs = cluster.getFileSystem(); dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); @@ -425,7 +420,7 @@ public void testAppendClose() throws Exception { LOG.info("STOPPED first instance of the cluster"); } finally { // Restart the cluster - while (cluster.isClusterUp()){ + while (cluster.isClusterUp()) { LOG.error("Waiting for cluster to go down"); Thread.sleep(1000); } @@ -447,8 +442,8 @@ public void testAppendClose() throws Exception { // set the lease period to be 1 second so that the // namenode triggers lease recovery upon append request - Method setLeasePeriod = cluster.getClass() - .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE}); + Method setLeasePeriod = cluster.getClass().getDeclaredMethod("setLeasePeriod", + new Class[] { Long.TYPE, Long.TYPE }); setLeasePeriod.setAccessible(true); setLeasePeriod.invoke(cluster, 1000L, 1000L); try { @@ -478,13 +473,12 @@ public void run() { t.start(); // Timeout after 60 sec. Without correct patches, would be an infinite loop t.join(60 * 1000); - if(t.isAlive()) { + if (t.isAlive()) { t.interrupt(); throw new Exception("Timed out waiting for WAL.recoverLog()"); } - if (t.exception != null) - throw t.exception; + if (t.exception != null) throw t.exception; // Make sure you can read all the content WAL.Reader reader = wals.createReader(fs, walPath); @@ -492,14 +486,13 @@ public void run() { WAL.Entry entry = new WAL.Entry(); while (reader.next(entry) != null) { count++; - assertTrue("Should be one KeyValue per WALEdit", - entry.getEdit().getCells().size() == 1); + assertTrue("Should be one KeyValue per WALEdit", entry.getEdit().getCells().size() == 1); } assertEquals(total, count); reader.close(); // Reset the lease period - setLeasePeriod.invoke(cluster, new Object[]{ 60000L, 3600000L }); + setLeasePeriod.invoke(cluster, new Object[] { 60000L, 3600000L }); } /** @@ -525,16 +518,16 @@ public void testEditAdd() throws IOException { long timestamp = EnvironmentEdgeManager.currentTime(); WALEdit cols = new WALEdit(); for (int i = 0; i < colCount; i++) { - cols.add(new KeyValue(row, Bytes.toBytes("column"), - Bytes.toBytes(Integer.toString(i)), - timestamp, new byte[] { (byte)(i + '0') })); + cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), + timestamp, new byte[] { (byte) (i + '0') })); } RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(row) .setEndKey(Bytes.toBytes(Bytes.toString(row) + "1")).build(); final WAL log = wals.getWAL(info); final long txid = log.appendData(info, new WALKeyImpl(info.getEncodedNameAsBytes(), - htd.getTableName(), EnvironmentEdgeManager.currentTime(), mvcc, scopes), cols); + htd.getTableName(), EnvironmentEdgeManager.currentTime(), mvcc, scopes), + cols); log.sync(txid); log.startCacheFlush(info.getEncodedNameAsBytes(), htd.getColumnFamilyNames()); log.completeCacheFlush(info.getEncodedNameAsBytes(), HConstants.NO_SEQNUM); @@ -554,7 +547,7 @@ public void testEditAdd() throws IOException { Cell cell = val.getCells().get(0); assertTrue(Bytes.equals(row, 0, row.length, cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); - assertEquals((byte)(i + '0'), CellUtil.cloneValue(cell)[0]); + assertEquals((byte) (i + '0'), CellUtil.cloneValue(cell)[0]); System.out.println(key + " " + val); } } finally { @@ -583,14 +576,14 @@ public void testAppend() throws IOException { long timestamp = EnvironmentEdgeManager.currentTime(); WALEdit cols = new WALEdit(); for (int i = 0; i < colCount; i++) { - cols.add(new KeyValue(row, Bytes.toBytes("column"), - Bytes.toBytes(Integer.toString(i)), - timestamp, new byte[] { (byte)(i + '0') })); + cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), + timestamp, new byte[] { (byte) (i + '0') })); } RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); final WAL log = wals.getWAL(hri); final long txid = log.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), - htd.getTableName(), EnvironmentEdgeManager.currentTime(), mvcc, scopes), cols); + htd.getTableName(), EnvironmentEdgeManager.currentTime(), mvcc, scopes), + cols); log.sync(txid); log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getColumnFamilyNames()); log.completeCacheFlush(hri.getEncodedNameAsBytes(), HConstants.NO_SEQNUM); @@ -602,8 +595,8 @@ public void testAppend() throws IOException { assertEquals(colCount, entry.getEdit().size()); int idx = 0; for (Cell val : entry.getEdit().getCells()) { - assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(), - entry.getKey().getEncodedRegionName())); + assertTrue( + Bytes.equals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName())); assertTrue(htd.getTableName().equals(entry.getKey().getTableName())); assertTrue(Bytes.equals(row, 0, row.length, val.getRowArray(), val.getRowOffset(), val.getRowLength())); @@ -626,7 +619,7 @@ public void testAppend() throws IOException { public void testVisitors() throws Exception { final int COL_COUNT = 10; final TableName tableName = TableName.valueOf(currentTest.getMethodName()); - final byte [] row = Bytes.toBytes("row"); + final byte[] row = Bytes.toBytes("row"); final DumbWALActionsListener visitor = new DumbWALActionsListener(); final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1); long timestamp = EnvironmentEdgeManager.currentTime(); @@ -638,21 +631,21 @@ public void testVisitors() throws Exception { log.registerWALActionsListener(visitor); for (int i = 0; i < COL_COUNT; i++) { WALEdit cols = new WALEdit(); - cols.add(new KeyValue(row, Bytes.toBytes("column"), - Bytes.toBytes(Integer.toString(i)), - timestamp, new byte[]{(byte) (i + '0')})); + cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), + timestamp, new byte[] { (byte) (i + '0') })); log.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, - EnvironmentEdgeManager.currentTime(), mvcc, scopes), cols); + EnvironmentEdgeManager.currentTime(), mvcc, scopes), + cols); } log.sync(); assertEquals(COL_COUNT, visitor.increments); log.unregisterWALActionsListener(visitor); WALEdit cols = new WALEdit(); - cols.add(new KeyValue(row, Bytes.toBytes("column"), - Bytes.toBytes(Integer.toString(11)), - timestamp, new byte[]{(byte) (11 + '0')})); + cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(11)), + timestamp, new byte[] { (byte) (11 + '0') })); log.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, - EnvironmentEdgeManager.currentTime(), mvcc, scopes), cols); + EnvironmentEdgeManager.currentTime(), mvcc, scopes), + cols); log.sync(); assertEquals(COL_COUNT, visitor.increments); } @@ -684,8 +677,8 @@ public void testWALProviders() throws IOException { // with not only system tables WALFactory walFactory = new WALFactory(conf, this.currentServername.toString()); assertEquals(SyncReplicationWALProvider.class, walFactory.getWALProvider().getClass()); - WALProvider wrappedWALProvider = ((SyncReplicationWALProvider) walFactory.getWALProvider()) - .getWrappedProvider(); + WALProvider wrappedWALProvider = + ((SyncReplicationWALProvider) walFactory.getWALProvider()).getWrappedProvider(); assertEquals(wrappedWALProvider.getClass(), walFactory.getMetaProvider().getClass()); // if providers are not set and do not enable SyncReplicationWALProvider @@ -698,8 +691,8 @@ public void testOnlySetWALProvider() throws IOException { Configuration conf = new Configuration(); conf.set(WAL_PROVIDER, WALFactory.Providers.multiwal.name()); WALFactory walFactory = new WALFactory(conf, this.currentServername.toString()); - WALProvider wrappedWALProvider = ((SyncReplicationWALProvider) walFactory.getWALProvider()) - .getWrappedProvider(); + WALProvider wrappedWALProvider = + ((SyncReplicationWALProvider) walFactory.getWALProvider()).getWrappedProvider(); assertEquals(SyncReplicationWALProvider.class, walFactory.getWALProvider().getClass()); // class of WALProvider and metaWALProvider are the same when metaWALProvider is not set @@ -712,8 +705,8 @@ public void testOnlySetMetaWALProvider() throws IOException { Configuration conf = new Configuration(); conf.set(META_WAL_PROVIDER, WALFactory.Providers.asyncfs.name()); WALFactory walFactory = new WALFactory(conf, this.currentServername.toString()); - WALProvider wrappedWALProvider = ((SyncReplicationWALProvider) walFactory.getWALProvider()) - .getWrappedProvider(); + WALProvider wrappedWALProvider = + ((SyncReplicationWALProvider) walFactory.getWALProvider()).getWrappedProvider(); assertEquals(SyncReplicationWALProvider.class, walFactory.getWALProvider().getClass()); assertEquals(WALFactory.Providers.defaultProvider.clazz, wrappedWALProvider.getClass()); @@ -725,21 +718,21 @@ public void testDefaultProvider() throws IOException { final Configuration conf = new Configuration(); // AsyncFSWal is the default, we should be able to request any WAL. final WALFactory normalWalFactory = new WALFactory(conf, this.currentServername.toString()); - Class fshLogProvider = normalWalFactory.getProviderClass( - WALFactory.WAL_PROVIDER, Providers.filesystem.name()); + Class fshLogProvider = + normalWalFactory.getProviderClass(WALFactory.WAL_PROVIDER, Providers.filesystem.name()); assertEquals(Providers.filesystem.clazz, fshLogProvider); // Imagine a world where MultiWAL is the default - final WALFactory customizedWalFactory = new WALFactory( - conf, this.currentServername.toString()) { - @Override - Providers getDefaultProvider() { - return Providers.multiwal; - } - }; + final WALFactory customizedWalFactory = + new WALFactory(conf, this.currentServername.toString()) { + @Override + Providers getDefaultProvider() { + return Providers.multiwal; + } + }; // If we don't specify a WALProvider, we should get the default implementation. - Class multiwalProviderClass = customizedWalFactory.getProviderClass( - WALFactory.WAL_PROVIDER, Providers.multiwal.name()); + Class multiwalProviderClass = + customizedWalFactory.getProviderClass(WALFactory.WAL_PROVIDER, Providers.multiwal.name()); assertEquals(Providers.multiwal.clazz, multiwalProviderClass); } @@ -748,8 +741,8 @@ public void testCustomProvider() throws IOException { final Configuration config = new Configuration(); config.set(WALFactory.WAL_PROVIDER, IOTestProvider.class.getName()); final WALFactory walFactory = new WALFactory(config, this.currentServername.toString()); - Class walProvider = walFactory.getProviderClass( - WALFactory.WAL_PROVIDER, Providers.filesystem.name()); + Class walProvider = + walFactory.getProviderClass(WALFactory.WAL_PROVIDER, Providers.filesystem.name()); assertEquals(IOTestProvider.class, walProvider); WALProvider metaWALProvider = walFactory.getMetaProvider(); assertEquals(IOTestProvider.class, metaWALProvider.getClass()); @@ -760,8 +753,8 @@ public void testCustomMetaProvider() throws IOException { final Configuration config = new Configuration(); config.set(WALFactory.META_WAL_PROVIDER, IOTestProvider.class.getName()); final WALFactory walFactory = new WALFactory(config, this.currentServername.toString()); - Class walProvider = walFactory.getProviderClass( - WALFactory.WAL_PROVIDER, Providers.filesystem.name()); + Class walProvider = + walFactory.getProviderClass(WALFactory.WAL_PROVIDER, Providers.filesystem.name()); assertEquals(Providers.filesystem.clazz, walProvider); WALProvider metaWALProvider = walFactory.getMetaProvider(); assertEquals(IOTestProvider.class, metaWALProvider.getClass()); @@ -771,7 +764,8 @@ public void testCustomMetaProvider() throws IOException { public void testReaderClosedOnBadCodec() throws IOException { // Create our own Configuration and WALFactory to avoid breaking other test methods Configuration confWithCodec = new Configuration(conf); - confWithCodec.setClass(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, BrokenWALCellCodec.class, Codec.class); + confWithCodec.setClass(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, BrokenWALCellCodec.class, + Codec.class); WALFactory customFactory = new WALFactory(confWithCodec, this.currentServername.toString()); // Hack a Proxy over the FileSystem so that we can track the InputStreams opened by @@ -808,11 +802,12 @@ public FSDataInputStream open(Path p, int blockSize) throws IOException { try { // Write one column in one edit. WALEdit cols = new WALEdit(); - cols.add(new KeyValue(row, Bytes.toBytes("column"), - Bytes.toBytes("0"), EnvironmentEdgeManager.currentTime(), new byte[] { 0 })); + cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes("0"), + EnvironmentEdgeManager.currentTime(), new byte[] { 0 })); final WAL log = customFactory.getWAL(hri); final long txid = log.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), - htd.getTableName(), EnvironmentEdgeManager.currentTime(), mvcc, scopes), cols); + htd.getTableName(), EnvironmentEdgeManager.currentTime(), mvcc, scopes), + cols); // Sync the edit to the WAL log.sync(txid); log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getColumnFamilyNames()); @@ -834,9 +829,8 @@ public FSDataInputStream open(Path p, int blockSize) throws IOException { // We should have exactly one reader assertEquals(1, openedReaders.size()); // And that reader should be closed. - long unclosedReaders = openedReaders.stream() - .filter((r) -> !r.isClosed.get()) - .collect(Collectors.counting()); + long unclosedReaders = + openedReaders.stream().filter((r) -> !r.isClosed.get()).collect(Collectors.counting()); assertEquals("Should not find any open readers", 0, (int) unclosedReaders); } finally { if (reader != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java index b1042e257f0a..df950664c396 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestWALFiltering { @ClassRule @@ -58,8 +58,7 @@ public class TestWALFiltering { private static final int NUM_RS = 4; - private static final TableName TABLE_NAME = - TableName.valueOf("TestWALFiltering"); + private static final TableName TABLE_NAME = TableName.valueOf("TestWALFiltering"); private static final byte[] CF1 = Bytes.toBytes("MyCF1"); private static final byte[] CF2 = Bytes.toBytes("MyCF2"); private static final byte[][] FAMILIES = { CF1, CF2 }; @@ -78,8 +77,8 @@ public void tearDown() throws Exception { } private void fillTable() throws IOException, InterruptedException { - Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES, 3, - Bytes.toBytes("row0"), Bytes.toBytes("row99"), NUM_RS); + Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES, 3, Bytes.toBytes("row0"), + Bytes.toBytes("row99"), NUM_RS); Random rand = new Random(19387129L); for (int iStoreFile = 0; iStoreFile < 4; ++iStoreFile) { for (int iRow = 0; iRow < 100; ++iRow) { @@ -91,9 +90,9 @@ private void fillTable() throws IOException, InterruptedException { final long ts = Math.abs(rand.nextInt()); final byte[] qual = Bytes.toBytes("col" + iCol); if (rand.nextBoolean()) { - final byte[] value = Bytes.toBytes("value_for_row_" + iRow + - "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" + - ts + "_random_" + rand.nextLong()); + final byte[] value = + Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_" + + iCol + "_ts_" + ts + "_random_" + rand.nextLong()); put.addColumn(cf, qual, ts, value); } else if (rand.nextDouble() < 0.8) { del.addColumn(cf, qual, ts); @@ -109,9 +108,8 @@ private void fillTable() throws IOException, InterruptedException { } @Test - public void testFlushedSequenceIdsSentToHMaster() - throws IOException, InterruptedException, - org.apache.hbase.thirdparty.com.google.protobuf.ServiceException, ServiceException { + public void testFlushedSequenceIdsSentToHMaster() throws IOException, InterruptedException, + org.apache.hbase.thirdparty.com.google.protobuf.ServiceException, ServiceException { SortedMap allFlushedSequenceIds = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (int i = 0; i < NUM_RS; ++i) { flushAllRegions(i); @@ -122,11 +120,10 @@ public void testFlushedSequenceIdsSentToHMaster() for (byte[] regionName : getRegionsByServer(i)) { if (allFlushedSequenceIds.containsKey(regionName)) { GetLastFlushedSequenceIdRequest req = - RequestConverter.buildGetLastFlushedSequenceIdRequest(regionName); + RequestConverter.buildGetLastFlushedSequenceIdRequest(regionName); - assertEquals((long)allFlushedSequenceIds.get(regionName), - master.getMasterRpcServices().getLastFlushedSequenceId( - null, req).getLastFlushedSequenceId()); + assertEquals((long) allFlushedSequenceIds.get(regionName), master.getMasterRpcServices() + .getLastFlushedSequenceId(null, req).getLastFlushedSequenceId()); } } } @@ -145,13 +142,11 @@ private HRegionServer getRegionServer(int rsId) { return TEST_UTIL.getMiniHBaseCluster().getRegionServer(rsId); } - private void flushAllRegions(int rsId) - throws ServiceException, - org.apache.hbase.thirdparty.com.google.protobuf.ServiceException, IOException { + private void flushAllRegions(int rsId) throws ServiceException, + org.apache.hbase.thirdparty.com.google.protobuf.ServiceException, IOException { HRegionServer hrs = getRegionServer(rsId); for (byte[] regionName : getRegionsByServer(rsId)) { - FlushRegionRequest request = - RequestConverter.buildFlushRegionRequest(regionName); + FlushRegionRequest request = RequestConverter.buildFlushRegionRequest(regionName); hrs.getRSRpcServices().flushRegion(null, request); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java index 68bf5d043449..0ed1ca5f35f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ /** * Simple testing of a few WAL methods. */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestWALMethods { @ClassRule @@ -56,8 +56,7 @@ public class TestWALMethods { HBaseClassTestRule.forClass(TestWALMethods.class); private static final byte[] TEST_REGION = Bytes.toBytes("test_region"); - private static final TableName TEST_TABLE = - TableName.valueOf("test_table"); + private static final TableName TEST_TABLE = TableName.valueOf("test_table"); private final HBaseTestingUtil util = new HBaseTestingUtil(); @@ -79,11 +78,12 @@ public void testServerNameFromTestWAL() throws Exception { } /** - * Assert that getSplitEditFilesSorted returns files in expected order and - * that it skips moved-aside files. + * Assert that getSplitEditFilesSorted returns files in expected order and that it skips + * moved-aside files. * @throws IOException */ - @Test public void testGetSplitEditFilesSorted() throws IOException { + @Test + public void testGetSplitEditFilesSorted() throws IOException { FileSystem fs = FileSystem.get(util.getConfiguration()); Path regiondir = util.getDataTestDir("regiondir"); fs.delete(regiondir, true); @@ -93,11 +93,9 @@ public void testServerNameFromTestWAL() throws Exception { createFile(fs, recoverededits, first); createFile(fs, recoverededits, WALSplitUtil.formatRecoveredEditsFileName(0)); createFile(fs, recoverededits, WALSplitUtil.formatRecoveredEditsFileName(1)); - createFile(fs, recoverededits, WALSplitUtil - .formatRecoveredEditsFileName(11)); + createFile(fs, recoverededits, WALSplitUtil.formatRecoveredEditsFileName(11)); createFile(fs, recoverededits, WALSplitUtil.formatRecoveredEditsFileName(2)); - createFile(fs, recoverededits, WALSplitUtil - .formatRecoveredEditsFileName(50)); + createFile(fs, recoverededits, WALSplitUtil.formatRecoveredEditsFileName(50)); String last = WALSplitUtil.formatRecoveredEditsFileName(Long.MAX_VALUE); createFile(fs, recoverededits, last); createFile(fs, recoverededits, @@ -111,31 +109,22 @@ public void testServerNameFromTestWAL() throws Exception { assertEquals(7, files.size()); assertEquals(files.pollFirst().getName(), first); assertEquals(files.pollLast().getName(), last); - assertEquals(files.pollFirst().getName(), - WALSplitUtil - .formatRecoveredEditsFileName(0)); - assertEquals(files.pollFirst().getName(), - WALSplitUtil - .formatRecoveredEditsFileName(1)); - assertEquals(files.pollFirst().getName(), - WALSplitUtil - .formatRecoveredEditsFileName(2)); - assertEquals(files.pollFirst().getName(), - WALSplitUtil - .formatRecoveredEditsFileName(11)); + assertEquals(files.pollFirst().getName(), WALSplitUtil.formatRecoveredEditsFileName(0)); + assertEquals(files.pollFirst().getName(), WALSplitUtil.formatRecoveredEditsFileName(1)); + assertEquals(files.pollFirst().getName(), WALSplitUtil.formatRecoveredEditsFileName(2)); + assertEquals(files.pollFirst().getName(), WALSplitUtil.formatRecoveredEditsFileName(11)); } - private void createFile(final FileSystem fs, final Path testdir, - final String name) - throws IOException { + private void createFile(final FileSystem fs, final Path testdir, final String name) + throws IOException { FSDataOutputStream fdos = fs.create(new Path(testdir, name), true); fdos.close(); } @Test public void testRegionEntryBuffer() throws Exception { - EntryBuffers.RegionEntryBuffer reb = new EntryBuffers.RegionEntryBuffer( - TEST_TABLE, TEST_REGION); + EntryBuffers.RegionEntryBuffer reb = + new EntryBuffers.RegionEntryBuffer(TEST_TABLE, TEST_REGION); assertEquals(0, reb.heapSize()); reb.appendEntry(createTestLogEntry(1)); @@ -144,7 +133,7 @@ public void testRegionEntryBuffer() throws Exception { @Test public void testEntrySink() throws Exception { - EntryBuffers sink = new EntryBuffers(new PipelineController(), 1*1024*1024); + EntryBuffers sink = new EntryBuffers(new PipelineController(), 1 * 1024 * 1024); for (int i = 0; i < 1000; i++) { WAL.Entry entry = createTestLogEntry(i); sink.appendEntry(entry); @@ -189,11 +178,10 @@ private WAL.Entry createTestLogEntry(int i) { WALEdit edit = new WALEdit(); edit.add(KeyValueTestUtil.create("row", "fam", "qual", 1234, "val")); - WALKeyImpl key = new WALKeyImpl(TEST_REGION, TEST_TABLE, seq, now, - HConstants.DEFAULT_CLUSTER_ID); + WALKeyImpl key = + new WALKeyImpl(TEST_REGION, TEST_TABLE, seq, now, HConstants.DEFAULT_CLUSTER_ID); WAL.Entry entry = new WAL.Entry(key, edit); return entry; } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java index 5fa16bc79249..1fb0dd65c402 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java index bc06147d7cca..1e9888df4064 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALProvider.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.wal; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Comparator; import org.apache.hadoop.fs.Path; @@ -28,29 +29,29 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestWALProvider { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALProvider.class); + HBaseClassTestRule.forClass(TestWALProvider.class); /** * Test start time comparator. */ @Test public void testWALStartTimeComparator() throws IOException { - Path metaPath1 = new Path("hdfs://localhost:59875/user/stack/test-data/" + - "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + - "localhost%2C59908%2C1600304600425.meta.1600304604319.meta"); - Path metaPath2 = new Path("hdfs://localhost:59875/user/stack/test-data/" + - "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + - "localhost%2C59908%2C1600304600425.meta.1600304604320.meta"); - Path path3 = new Path("hdfs://localhost:59875/user/stack/test-data/" + - "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + - "localhost%2C59908%2C1600304600425.1600304604321"); - Path metaPath4 = new Path("hdfs://localhost:59875/user/stack/test-data/" + - "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + - "localhost%2C59908%2C1600304600425.meta.1600304604321.meta"); + Path metaPath1 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.meta.1600304604319.meta"); + Path metaPath2 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.meta.1600304604320.meta"); + Path path3 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.1600304604321"); + Path metaPath4 = new Path("hdfs://localhost:59875/user/stack/test-data/" + + "f4cb8ffa-6ff7-59a6-f167-6cc00f24899a/WALs/localhost,59908,1600304600425/" + + "localhost%2C59908%2C1600304600425.meta.1600304604321.meta"); Comparator c = new AbstractFSWALProvider.WALStartTimeComparator(); assertTrue(c.compare(metaPath1, metaPath1) == 0); assertTrue(c.compare(metaPath2, metaPath2) == 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java index 924b21a02a0b..6037ba7a9a10 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; + import java.io.FileNotFoundException; import java.io.IOException; import java.nio.ByteBuffer; @@ -64,7 +65,7 @@ /** * Test that verifies WAL written by SecureProtobufLogWriter is not readable by ProtobufLogReader */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestWALReaderOnSecureWAL { @ClassRule @@ -119,7 +120,8 @@ private Path writeWAL(final WALFactory wals, final String tblName, boolean offhe kvs.add(kv); } wal.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, - EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs); + EnvironmentEdgeManager.currentTime(), mvcc, scopes), + kvs); } wal.sync(); final Path walPath = AbstractFSWALProvider.getCurrentFileName(wal); @@ -144,8 +146,7 @@ public void testWALReaderOnSecureWALWithOffheapKeyValues() throws Exception { private void testSecureWALInternal(boolean offheap) throws IOException, FileNotFoundException { Configuration conf = TEST_UTIL.getConfiguration(); - conf.setClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, - WAL.Reader.class); + conf.setClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, WAL.Reader.class); conf.setClass("hbase.regionserver.hlog.writer.impl", SecureProtobufLogWriter.class, WALProvider.Writer.class); conf.setClass("hbase.regionserver.hlog.async.writer.impl", SecureAsyncProtobufLogWriter.class, @@ -158,7 +159,7 @@ private void testSecureWALInternal(boolean offheap) throws IOException, FileNotF // Insure edits are not plaintext long length = fs.getFileStatus(walPath).getLen(); FSDataInputStream in = fs.open(walPath); - byte[] fileData = new byte[(int)length]; + byte[] fileData = new byte[(int) length]; IOUtils.readFully(in, fileData); in.close(); assertFalse("Cells appear to be plaintext", Bytes.contains(fileData, value)); @@ -188,15 +189,15 @@ public void testSecureWALReaderOnWAL() throws Exception { WALProvider.Writer.class); conf.setBoolean(WAL_ENCRYPTION, false); FileSystem fs = TEST_UTIL.getTestFileSystem(); - final WALFactory wals = new WALFactory(conf, ServerName - .valueOf(currentTest.getMethodName(), 16010, EnvironmentEdgeManager.currentTime()) - .toString()); + final WALFactory wals = new WALFactory(conf, + ServerName.valueOf(currentTest.getMethodName(), 16010, EnvironmentEdgeManager.currentTime()) + .toString()); Path walPath = writeWAL(wals, currentTest.getMethodName(), false); // Ensure edits are plaintext long length = fs.getFileStatus(walPath).getLen(); FSDataInputStream in = fs.open(walPath); - byte[] fileData = new byte[(int)length]; + byte[] fileData = new byte[(int) length]; IOUtils.readFully(in, fileData); in.close(); assertTrue("Cells should be plaintext", Bytes.contains(fileData, value)); @@ -215,7 +216,7 @@ public void testSecureWALReaderOnWAL() throws Exception { WALSplitter s = new WALSplitter(wals, conf, rootdir, fs, rootdir, fs, null, null, null); s.splitWAL(listStatus[0], null); Path file = new Path(ZKSplitLog.getSplitLogDir(rootdir, listStatus[0].getPath().getName()), - "corrupt"); + "corrupt"); assertTrue(!fs.exists(file)); } catch (IOException ioe) { assertTrue("WAL should have been processed", false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALRootDir.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALRootDir.java index dc7df65266a1..36451fd00cef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALRootDir.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALRootDir.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,8 +61,8 @@ public class TestWALRootDir { private static FileSystem fs; private static FileSystem walFs; private static final TableName tableName = TableName.valueOf("TestWALWALDir"); - private static final byte [] rowName = Bytes.toBytes("row"); - private static final byte [] family = Bytes.toBytes("column"); + private static final byte[] rowName = Bytes.toBytes("row"); + private static final byte[] family = Bytes.toBytes("column"); private static Path walRootDir; private static Path rootDir; private static WALFactory wals; @@ -95,29 +95,28 @@ public void testWALRootDir() throws Exception { WAL log = wals.getWAL(regionInfo); assertEquals(1, getWALFiles(walFs, walRootDir).size()); - byte [] value = Bytes.toBytes("value"); + byte[] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), - EnvironmentEdgeManager.currentTime(), value)); - long txid = log.appendData(regionInfo, getWalKey(EnvironmentEdgeManager.currentTime(), - regionInfo, 0), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), EnvironmentEdgeManager.currentTime(), + value)); + long txid = log.appendData(regionInfo, + getWalKey(EnvironmentEdgeManager.currentTime(), regionInfo, 0), edit); log.sync(txid); - assertEquals("Expect 1 log have been created", 1, - getWALFiles(walFs, walRootDir).size()); + assertEquals("Expect 1 log have been created", 1, getWALFiles(walFs, walRootDir).size()); log.rollWriter(); - //Create 1 more WAL - assertEquals(2, getWALFiles(walFs, new Path(walRootDir, - HConstants.HREGION_LOGDIR_NAME)).size()); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), - EnvironmentEdgeManager.currentTime(), value)); - txid = log.appendData(regionInfo, getWalKey(EnvironmentEdgeManager.currentTime(), - regionInfo, 1), edit); + // Create 1 more WAL + assertEquals(2, + getWALFiles(walFs, new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME)).size()); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), EnvironmentEdgeManager.currentTime(), + value)); + txid = log.appendData(regionInfo, + getWalKey(EnvironmentEdgeManager.currentTime(), regionInfo, 1), edit); log.sync(txid); log.rollWriter(); log.shutdown(); - assertEquals("Expect 3 logs in WALs dir", 3, getWALFiles(walFs, - new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME)).size()); + assertEquals("Expect 3 logs in WALs dir", 3, + getWALFiles(walFs, new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME)).size()); } private WALKeyImpl getWalKey(final long time, RegionInfo hri, final long startPoint) { @@ -125,8 +124,7 @@ private WALKeyImpl getWalKey(final long time, RegionInfo hri, final long startPo new MultiVersionConcurrencyControl(startPoint)); } - private List getWALFiles(FileSystem fs, Path dir) - throws IOException { + private List getWALFiles(FileSystem fs, Path dir) throws IOException { List result = new ArrayList(); LOG.debug("Scanning " + dir.toString() + " for WAL files"); @@ -146,10 +144,9 @@ private List getWALFiles(FileSystem fs, Path dir) return result; } - private static void cleanup() throws Exception{ + private static void cleanup() throws Exception { walFs.delete(walRootDir, true); fs.delete(rootDir, true); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java index 3659981c30a3..0cb93d8708de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.FileNotFoundException; import java.io.IOException; import java.lang.reflect.Method; @@ -91,17 +92,19 @@ import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Joiner; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; /** * Testing {@link WAL} splitting code. */ -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestWALSplit { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -125,8 +128,7 @@ public class TestWALSplit { private static final int ENTRIES = 10; // entries per writer per region private static final String FILENAME_BEING_SPLIT = "testfile"; - private static final TableName TABLE_NAME = - TableName.valueOf("t1"); + private static final TableName TABLE_NAME = TableName.valueOf("t1"); private static final byte[] FAMILY = Bytes.toBytes("f1"); private static final byte[] QUALIFIER = Bytes.toBytes("q1"); private static final byte[] VALUE = Bytes.toBytes("v1"); @@ -134,25 +136,21 @@ public class TestWALSplit { private static List REGIONS = new ArrayList<>(); private static String ROBBER; private static String ZOMBIE; - private static String [] GROUP = new String [] {"supergroup"}; + private static String[] GROUP = new String[] { "supergroup" }; static enum Corruptions { - INSERT_GARBAGE_ON_FIRST_LINE, - INSERT_GARBAGE_IN_THE_MIDDLE, - APPEND_GARBAGE, - TRUNCATE, + INSERT_GARBAGE_ON_FIRST_LINE, INSERT_GARBAGE_IN_THE_MIDDLE, APPEND_GARBAGE, TRUNCATE, TRUNCATE_TRAILER } @BeforeClass public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); - conf.setClass("hbase.regionserver.hlog.writer.impl", - InstrumentedLogWriter.class, Writer.class); - // This is how you turn off shortcircuit read currently. TODO: Fix. Should read config. + conf.setClass("hbase.regionserver.hlog.writer.impl", InstrumentedLogWriter.class, Writer.class); + // This is how you turn off shortcircuit read currently. TODO: Fix. Should read config. System.setProperty("hbase.tests.use.shortcircuit.reads", "false"); // Create fake maping user to group and set it to the conf. - Map u2g_map = new HashMap<>(2); + Map u2g_map = new HashMap<>(2); ROBBER = User.getCurrent().getName() + "-robber"; ZOMBIE = User.getCurrent().getName() + "-zombie"; u2g_map.put(ROBBER, GROUP); @@ -186,20 +184,19 @@ public void setUp() throws Exception { Collections.addAll(REGIONS, "bbb", "ccc"); InstrumentedLogWriter.activateFailure = false; wals = new WALFactory(conf, name.getMethodName()); - WALDIR = new Path(HBASELOGDIR, - AbstractFSWALProvider.getWALDirectoryName(ServerName.valueOf(name.getMethodName(), - 16010, EnvironmentEdgeManager.currentTime()).toString())); - //fs.mkdirs(WALDIR); + WALDIR = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(ServerName + .valueOf(name.getMethodName(), 16010, EnvironmentEdgeManager.currentTime()).toString())); + // fs.mkdirs(WALDIR); } @After public void tearDown() throws Exception { try { wals.close(); - } catch(IOException exception) { + } catch (IOException exception) { // Some tests will move WALs out from under us. In those cases, we'll get an error on close. - LOG.info("Ignoring an error while closing down our WALFactory. Fine for some tests, but if" + - " you see a failure look here."); + LOG.info("Ignoring an error while closing down our WALFactory. Fine for some tests, but if" + + " you see a failure look here."); LOG.debug("exception details", exception); } finally { wals = null; @@ -224,7 +221,8 @@ public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedEx long startCount = counter.get(); zombie.start(); // Wait till writer starts going. - while (startCount == counter.get()) Threads.sleep(1); + while (startCount == counter.get()) + Threads.sleep(1); // Give it a second to write a few appends. Threads.sleep(1000); final Configuration conf2 = HBaseConfiguration.create(conf); @@ -232,8 +230,8 @@ public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedEx int count = robber.runAs(new PrivilegedExceptionAction() { @Override public Integer run() throws Exception { - StringBuilder ls = new StringBuilder("Contents of WALDIR (").append(WALDIR) - .append("):\n"); + StringBuilder ls = + new StringBuilder("Contents of WALDIR (").append(WALDIR).append("):\n"); for (FileStatus status : fs.listStatus(WALDIR)) { ls.append("\t").append(status.toString()).append("\n"); } @@ -244,16 +242,17 @@ public Integer run() throws Exception { Path[] logfiles = getLogForRegion(TABLE_NAME, region); assertEquals("wrong number of split files for region", numWriters, logfiles.length); int count = 0; - for (Path logfile: logfiles) { + for (Path logfile : logfiles) { count += countWAL(logfile); } return count; } }); LOG.info("zombie=" + counter.get() + ", robber=" + count); - assertTrue("The log file could have at most 1 extra log entry, but can't have less. " + - "Zombie could write " + counter.get() + " and logfile had only " + count, - counter.get() == count || counter.get() + 1 == count); + assertTrue( + "The log file could have at most 1 extra log entry, but can't have less. " + + "Zombie could write " + counter.get() + " and logfile had only " + count, + counter.get() == count || counter.get() + 1 == count); } finally { stop.set(true); zombie.interrupt(); @@ -262,10 +261,10 @@ public Integer run() throws Exception { } /** - * This thread will keep writing to a 'wal' file even after the split process has started. - * It simulates a region server that was considered dead but woke up and wrote some more to the - * last log entry. Does its writing as an alternate user in another filesystem instance to - * simulate better it being a regionserver. + * This thread will keep writing to a 'wal' file even after the split process has started. It + * simulates a region server that was considered dead but woke up and wrote some more to the last + * log entry. Does its writing as an alternate user in another filesystem instance to simulate + * better it being a regionserver. */ class ZombieLastLogWriterRegionServer extends Thread { final AtomicLong editsCount; @@ -278,8 +277,7 @@ class ZombieLastLogWriterRegionServer extends Thread { final User user; public ZombieLastLogWriterRegionServer(AtomicLong counter, AtomicBoolean stop, - final String region, final int writers) - throws IOException, InterruptedException { + final String region, final int writers) throws IOException, InterruptedException { super("ZombieLastLogWriterRegionServer"); setDaemon(true); this.stop = stop; @@ -304,7 +302,7 @@ private void doWriting() throws IOException, InterruptedException { this.user.runAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { - // Index of the WAL we want to keep open. generateWALs will leave open the WAL whose + // Index of the WAL we want to keep open. generateWALs will leave open the WAL whose // index we supply here. int walToKeepOpen = numOfWriters - 1; // The below method writes numOfWriters files each with ENTRIES entries for a total of @@ -332,11 +330,11 @@ public Object run() throws Exception { } private void loop(final Writer writer) { - byte [] regionBytes = Bytes.toBytes(this.region); + byte[] regionBytes = Bytes.toBytes(this.region); while (!stop.get()) { try { long seq = appendEntry(writer, TABLE_NAME, regionBytes, - Bytes.toBytes("r" + editsCount.get()), regionBytes, QUALIFIER, VALUE, 0); + Bytes.toBytes("r" + editsCount.get()), regionBytes, QUALIFIER, VALUE, 0); long count = editsCount.incrementAndGet(); LOG.info(getName() + " sync count=" + count + ", seq=" + seq); try { @@ -347,8 +345,8 @@ private void loop(final Writer writer) { } catch (IOException ex) { LOG.error(getName() + " ex " + ex.toString()); if (ex instanceof RemoteException) { - LOG.error("Juliet: got RemoteException " + ex.getMessage() + - " while writing " + (editsCount.get() + 1)); + LOG.error("Juliet: got RemoteException " + ex.getMessage() + " while writing " + + (editsCount.get() + 1)); } else { LOG.error(getName() + " failed to write....at " + editsCount.get()); fail("Failed to write " + editsCount.get()); @@ -375,15 +373,14 @@ public void testRecoveredEditsPathForMeta() throws IOException { } /** - * Test old recovered edits file doesn't break WALSplitter. - * This is useful in upgrading old instances. + * Test old recovered edits file doesn't break WALSplitter. This is useful in upgrading old + * instances. */ @Test public void testOldRecoveredEditsFileSidelined() throws IOException { Path p = createRecoveredEditsPathForRegion(); Path tdir = CommonFSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME); - Path regiondir = new Path(tdir, - RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); + Path regiondir = new Path(tdir, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); fs.mkdirs(regiondir); Path parent = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir); assertEquals(HConstants.RECOVERED_EDITS_DIR, parent.getName()); @@ -399,9 +396,8 @@ private Path createRecoveredEditsPathForRegion() throws IOException { Entry entry = new Entry( new WALKeyImpl(encoded, TableName.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), new WALEdit()); - Path p = WALSplitUtil - .getRegionSplitEditsPath(TableName.META_TABLE_NAME, encoded, 1, FILENAME_BEING_SPLIT, - TMPDIRNAME, conf); + Path p = WALSplitUtil.getRegionSplitEditsPath(TableName.META_TABLE_NAME, encoded, 1, + FILENAME_BEING_SPLIT, TMPDIRNAME, conf); return p; } @@ -421,7 +417,7 @@ private void useDifferentDFSClient() throws IOException { } @Test - public void testSplitPreservesEdits() throws IOException{ + public void testSplitPreservesEdits() throws IOException { final String REGION = "region__1"; REGIONS.clear(); REGIONS.add(REGION); @@ -437,7 +433,7 @@ public void testSplitPreservesEdits() throws IOException{ } @Test - public void testSplitRemovesRegionEventsEdits() throws IOException{ + public void testSplitRemovesRegionEventsEdits() throws IOException { final String REGION = "region__1"; REGIONS.clear(); REGIONS.add(REGION); @@ -454,19 +450,18 @@ public void testSplitRemovesRegionEventsEdits() throws IOException{ assertEquals(10, countWAL(splitLog[0])); } - @Test - public void testSplitLeavesCompactionEventsEdits() throws IOException{ + public void testSplitLeavesCompactionEventsEdits() throws IOException { RegionInfo hri = RegionInfoBuilder.newBuilder(TABLE_NAME).build(); REGIONS.clear(); REGIONS.add(hri.getEncodedName()); Path regionDir = - new Path(CommonFSUtils.getTableDir(HBASEDIR, TABLE_NAME), hri.getEncodedName()); + new Path(CommonFSUtils.getTableDir(HBASEDIR, TABLE_NAME), hri.getEncodedName()); LOG.info("Creating region directory: " + regionDir); assertTrue(fs.mkdirs(regionDir)); Writer writer = generateWALs(1, 10, 0, 10); - String[] compactInputs = new String[]{"file1", "file2", "file3"}; + String[] compactInputs = new String[] { "file1", "file2", "file3" }; String compactOutput = "file4"; appendCompactionEvent(writer, hri, compactInputs, compactOutput); writer.close(); @@ -490,8 +485,7 @@ public void testSplitLeavesCompactionEventsEdits() throws IOException{ * @param expectedEntries -1 to not assert * @return the count across all regions */ - private int splitAndCount(final int expectedFiles, final int expectedEntries) - throws IOException { + private int splitAndCount(final int expectedFiles, final int expectedEntries) throws IOException { useDifferentDFSClient(); WALSplitter.split(HBASELOGDIR, WALDIR, OLDLOGDIR, fs, conf, wals); int result = 0; @@ -499,7 +493,7 @@ private int splitAndCount(final int expectedFiles, final int expectedEntries) Path[] logfiles = getLogForRegion(TABLE_NAME, region); assertEquals(expectedFiles, logfiles.length); int count = 0; - for (Path logfile: logfiles) { + for (Path logfile : logfiles) { count += countWAL(logfile); } if (-1 != expectedEntries) { @@ -541,8 +535,7 @@ public void testOpenZeroLengthReportedFileButWithDataGetsSplit() throws IOExcept public void testTralingGarbageCorruptionFileSkipErrorsPasses() throws IOException { conf.setBoolean(WALSplitter.SPLIT_SKIP_ERRORS_KEY, true); generateWALs(Integer.MAX_VALUE); - corruptWAL(new Path(WALDIR, WAL_FILE_PREFIX + "5"), - Corruptions.APPEND_GARBAGE, true); + corruptWAL(new Path(WALDIR, WAL_FILE_PREFIX + "5"), Corruptions.APPEND_GARBAGE, true); splitAndCount(NUM_WRITERS, NUM_WRITERS * ENTRIES); } @@ -550,17 +543,17 @@ public void testTralingGarbageCorruptionFileSkipErrorsPasses() throws IOExceptio public void testFirstLineCorruptionLogFileSkipErrorsPasses() throws IOException { conf.setBoolean(WALSplitter.SPLIT_SKIP_ERRORS_KEY, true); generateWALs(Integer.MAX_VALUE); - corruptWAL(new Path(WALDIR, WAL_FILE_PREFIX + "5"), - Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true); - splitAndCount(NUM_WRITERS - 1, (NUM_WRITERS - 1) * ENTRIES); //1 corrupt + corruptWAL(new Path(WALDIR, WAL_FILE_PREFIX + "5"), Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, + true); + splitAndCount(NUM_WRITERS - 1, (NUM_WRITERS - 1) * ENTRIES); // 1 corrupt } @Test public void testMiddleGarbageCorruptionSkipErrorsReadsHalfOfFile() throws IOException { conf.setBoolean(WALSplitter.SPLIT_SKIP_ERRORS_KEY, true); generateWALs(Integer.MAX_VALUE); - corruptWAL(new Path(WALDIR, WAL_FILE_PREFIX + "5"), - Corruptions.INSERT_GARBAGE_IN_THE_MIDDLE, false); + corruptWAL(new Path(WALDIR, WAL_FILE_PREFIX + "5"), Corruptions.INSERT_GARBAGE_IN_THE_MIDDLE, + false); // the entries in the original logs are alternating regions // considering the sequence file header, the middle corruption should // affect at least half of the entries @@ -568,7 +561,7 @@ public void testMiddleGarbageCorruptionSkipErrorsReadsHalfOfFile() throws IOExce int firstHalfEntries = (int) Math.ceil(ENTRIES / 2) - 1; int allRegionsCount = splitAndCount(NUM_WRITERS, -1); assertTrue("The file up to the corrupted area hasn't been parsed", - REGIONS.size() * (goodEntries + firstHalfEntries) <= allRegionsCount); + REGIONS.size() * (goodEntries + firstHalfEntries) <= allRegionsCount); } @Test @@ -587,7 +580,7 @@ public void testCorruptedFileGetsArchivedIfSkipErrors() throws IOException { } LOG.debug(archived.toString()); assertEquals(failureType.name() + ": expected to find all of our wals corrupt.", archivedLogs, - walDirContents); + walDirContents); } } @@ -597,8 +590,7 @@ public void testCorruptedFileGetsArchivedIfSkipErrors() throws IOException { */ private Set splitCorruptWALs(final FaultyProtobufLogReader.FailureType failureType) throws IOException { - Class backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl", - Reader.class); + Class backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl", Reader.class); InstrumentedLogWriter.activateFailure = false; try { @@ -632,29 +624,26 @@ private Set splitCorruptWALs(final FaultyProtobufLogReader.FailureType f WALSplitter.split(HBASELOGDIR, WALDIR, OLDLOGDIR, fs, conf, wals); return walDirContents; } finally { - conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass, - Reader.class); + conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass, Reader.class); } } - @Test (expected = IOException.class) - public void testTrailingGarbageCorruptionLogFileSkipErrorsFalseThrows() - throws IOException { + @Test(expected = IOException.class) + public void testTrailingGarbageCorruptionLogFileSkipErrorsFalseThrows() throws IOException { conf.setBoolean(WALSplitter.SPLIT_SKIP_ERRORS_KEY, false); splitCorruptWALs(FaultyProtobufLogReader.FailureType.BEGINNING); } @Test - public void testCorruptedLogFilesSkipErrorsFalseDoesNotTouchLogs() - throws IOException { + public void testCorruptedLogFilesSkipErrorsFalseDoesNotTouchLogs() throws IOException { conf.setBoolean(WALSplitter.SPLIT_SKIP_ERRORS_KEY, false); try { splitCorruptWALs(FaultyProtobufLogReader.FailureType.BEGINNING); } catch (IOException e) { LOG.debug("split with 'skip errors' set to 'false' correctly threw"); } - assertEquals("if skip.errors is false all files should remain in place", - NUM_WRITERS, fs.listStatus(WALDIR).length); + assertEquals("if skip.errors is false all files should remain in place", NUM_WRITERS, + fs.listStatus(WALDIR).length); } private void ignoreCorruption(final Corruptions corruption, final int entryCount, @@ -679,12 +668,14 @@ private void ignoreCorruption(final Corruptions corruption, final int entryCount Reader in = wals.createReader(fs, splitLog[0]); @SuppressWarnings("unused") Entry entry; - while ((entry = in.next()) != null) ++actualCount; + while ((entry = in.next()) != null) + ++actualCount; assertEquals(expectedCount, actualCount); in.close(); // should not have stored the EOF files as corrupt - FileStatus[] archivedLogs = fs.exists(CORRUPTDIR)? fs.listStatus(CORRUPTDIR): new FileStatus[0]; + FileStatus[] archivedLogs = + fs.exists(CORRUPTDIR) ? fs.listStatus(CORRUPTDIR) : new FileStatus[0]; assertEquals(0, archivedLogs.length); } @@ -692,7 +683,7 @@ private void ignoreCorruption(final Corruptions corruption, final int entryCount @Test public void testEOFisIgnored() throws IOException { int entryCount = 10; - ignoreCorruption(Corruptions.TRUNCATE, entryCount, entryCount-1); + ignoreCorruption(Corruptions.TRUNCATE, entryCount, entryCount - 1); } @Test @@ -718,17 +709,15 @@ public void testSplit() throws IOException { } @Test - public void testLogDirectoryShouldBeDeletedAfterSuccessfulSplit() - throws IOException { + public void testLogDirectoryShouldBeDeletedAfterSuccessfulSplit() throws IOException { generateWALs(-1); useDifferentDFSClient(); WALSplitter.split(HBASELOGDIR, WALDIR, OLDLOGDIR, fs, conf, wals); - FileStatus [] statuses = null; + FileStatus[] statuses = null; try { statuses = fs.listStatus(WALDIR); if (statuses != null) { - fail("Files left in log dir: " + - Joiner.on(",").join(FileUtil.stat2Paths(statuses))); + fail("Files left in log dir: " + Joiner.on(",").join(FileUtil.stat2Paths(statuses))); } } catch (FileNotFoundException e) { // hadoop 0.21 throws FNFE whereas hadoop 0.20 returns null @@ -737,7 +726,7 @@ public void testLogDirectoryShouldBeDeletedAfterSuccessfulSplit() @Test(expected = IOException.class) public void testSplitWillFailIfWritingToRegionFails() throws Exception { - //leave 5th log open so we could append the "trap" + // leave 5th log open so we could append the "trap" Writer writer = generateWALs(4); useDifferentDFSClient(); @@ -746,16 +735,16 @@ public void testSplitWillFailIfWritingToRegionFails() throws Exception { fs.mkdirs(regiondir); InstrumentedLogWriter.activateFailure = false; - appendEntry(writer, TABLE_NAME, Bytes.toBytes(region), - Bytes.toBytes("r" + 999), FAMILY, QUALIFIER, VALUE, 0); + appendEntry(writer, TABLE_NAME, Bytes.toBytes(region), Bytes.toBytes("r" + 999), FAMILY, + QUALIFIER, VALUE, 0); writer.close(); try { InstrumentedLogWriter.activateFailure = true; WALSplitter.split(HBASELOGDIR, WALDIR, OLDLOGDIR, fs, conf, wals); } catch (IOException e) { - assertTrue(e.getMessage(). - contains("This exception is instrumented and should only be thrown for testing")); + assertTrue(e.getMessage() + .contains("This exception is instrumented and should only be thrown for testing")); throw e; } finally { InstrumentedLogWriter.activateFailure = false; @@ -784,8 +773,7 @@ public void testIOEOnOutputThread() throws Exception { generateWALs(-1); useDifferentDFSClient(); FileStatus[] logfiles = fs.listStatus(WALDIR); - assertTrue("There should be some log file", - logfiles != null && logfiles.length > 0); + assertTrue("There should be some log file", logfiles != null && logfiles.length > 0); // wals with no entries (like the one we don't use in the factory) // won't cause a failure since nothing will ever be written. // pick the largest one since it's most likely to have entries. @@ -800,22 +788,23 @@ public void testIOEOnOutputThread() throws Exception { assertTrue("There should be some log greater than size 0.", 0 < largestSize); // Set up a splitter that will throw an IOE on the output side WALSplitter logSplitter = - new WALSplitter(wals, conf, HBASEDIR, fs, HBASEDIR, fs, null, null, null) { - @Override - protected Writer createWriter(Path logfile) throws IOException { - Writer mockWriter = Mockito.mock(Writer.class); - Mockito.doThrow(new IOException("Injected")).when(mockWriter) - .append(Mockito. any()); - return mockWriter; - } - }; - // Set up a background thread dumper. Needs a thread to depend on and then we need to run + new WALSplitter(wals, conf, HBASEDIR, fs, HBASEDIR, fs, null, null, null) { + @Override + protected Writer createWriter(Path logfile) throws IOException { + Writer mockWriter = Mockito.mock(Writer.class); + Mockito.doThrow(new IOException("Injected")).when(mockWriter) + .append(Mockito. any()); + return mockWriter; + } + }; + // Set up a background thread dumper. Needs a thread to depend on and then we need to run // the thread dumping in a background thread so it does not hold up the test. final AtomicBoolean stop = new AtomicBoolean(false); final Thread someOldThread = new Thread("Some-old-thread") { @Override public void run() { - while(!stop.get()) Threads.sleep(10); + while (!stop.get()) + Threads.sleep(10); } }; someOldThread.setDaemon(true); @@ -867,8 +856,8 @@ public void testMovedWALDuringRecovery() throws Exception { FileSystem spiedFs = Mockito.spy(fs); // The "File does not exist" part is very important, // that's how it comes out of HDFS - Mockito.doThrow(new LeaseExpiredException("Injected: File does not exist")). - when(spiedFs).append(Mockito.any()); + Mockito.doThrow(new LeaseExpiredException("Injected: File does not exist")).when(spiedFs) + .append(Mockito. any()); retryOverHdfsProblem(spiedFs); } @@ -885,9 +874,8 @@ public void testRetryOpenDuringRecovery() throws Exception { // last block is under recovery, HDFS may have problem to obtain // the block length, in which case, retry may help. Mockito.doAnswer(new Answer() { - private final String[] errors = new String[] { - "Cannot obtain block length", "Could not obtain the last block", - "Blocklist for " + OLDLOGDIR + " has changed"}; + private final String[] errors = new String[] { "Cannot obtain block length", + "Could not obtain the last block", "Blocklist for " + OLDLOGDIR + " has changed" }; private int count = 0; @Override @@ -895,9 +883,9 @@ public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable { if (count < 3) { throw new IOException(errors[count++]); } - return (FSDataInputStream)invocation.callRealMethod(); + return (FSDataInputStream) invocation.callRealMethod(); } - }).when(spiedFs).open(Mockito.any(), Mockito.anyInt()); + }).when(spiedFs).open(Mockito. any(), Mockito.anyInt()); retryOverHdfsProblem(spiedFs); } @@ -909,8 +897,7 @@ public void testTerminationAskedByReporter() throws IOException, CorruptedLogFil final AtomicInteger count = new AtomicInteger(); - CancelableProgressable localReporter - = new CancelableProgressable() { + CancelableProgressable localReporter = new CancelableProgressable() { @Override public boolean progress() { count.getAndIncrement(); @@ -923,9 +910,9 @@ public boolean progress() { @Override public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable { Thread.sleep(1500); // Sleep a while and wait report status invoked - return (FSDataInputStream)invocation.callRealMethod(); + return (FSDataInputStream) invocation.callRealMethod(); } - }).when(spiedFs).open(Mockito.any(), Mockito.anyInt()); + }).when(spiedFs).open(Mockito. any(), Mockito.anyInt()); try { conf.setInt("hbase.splitlog.report.period", 1000); @@ -942,17 +929,16 @@ public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable { } /** - * Test log split process with fake data and lots of edits to trigger threading - * issues. + * Test log split process with fake data and lots of edits to trigger threading issues. */ @Test public void testThreading() throws Exception { - doTestThreading(20000, 128*1024*1024, 0); + doTestThreading(20000, 128 * 1024 * 1024, 0); } /** - * Test blocking behavior of the log split process if writers are writing slower - * than the reader is reading. + * Test blocking behavior of the log split process if writers are writing slower than the reader + * is reading. */ @Test public void testThreadingSlowWriterSmallBuffer() throws Exception { @@ -960,19 +946,15 @@ public void testThreadingSlowWriterSmallBuffer() throws Exception { } /** - * Sets up a log splitter with a mock reader and writer. The mock reader generates - * a specified number of edits spread across 5 regions. The mock writer optionally - * sleeps for each edit it is fed. - * * - * After the split is complete, verifies that the statistics show the correct number - * of edits output into each region. - * + * Sets up a log splitter with a mock reader and writer. The mock reader generates a specified + * number of edits spread across 5 regions. The mock writer optionally sleeps for each edit it is + * fed. * After the split is complete, verifies that the statistics show the correct number of + * edits output into each region. * @param numFakeEdits number of fake edits to push through pipeline * @param bufferSize size of in-memory buffer * @param writerSlowness writer threads will sleep this many ms per edit */ - private void doTestThreading(final int numFakeEdits, - final int bufferSize, + private void doTestThreading(final int numFakeEdits, final int bufferSize, final int writerSlowness) throws Exception { Configuration localConf = new Configuration(conf); @@ -1012,19 +994,19 @@ public Void answer(InvocationOnMock invocation) { Cell cell = cells.get(0); // Check that the edits come in the right order. - assertEquals(expectedIndex, Bytes.toInt(cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength())); + assertEquals(expectedIndex, + Bytes.toInt(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); expectedIndex++; return null; } - }).when(mockWriter).append(Mockito.any()); + }).when(mockWriter).append(Mockito. any()); return mockWriter; } /* Produce a mock reader that generates fake entries */ @Override protected Reader getReader(FileStatus file, boolean skipErrors, - CancelableProgressable reporter) throws IOException, CorruptedLogFileException { + CancelableProgressable reporter) throws IOException, CorruptedLogFileException { Reader mockReader = Mockito.mock(Reader.class); Mockito.doAnswer(new Answer() { int index = 0; @@ -1035,11 +1017,10 @@ public Entry answer(InvocationOnMock invocation) throws Throwable { // Generate r0 through r4 in round robin fashion int regionIdx = index % regions.size(); - byte region[] = new byte[] {(byte)'r', (byte) (0x30 + regionIdx)}; + byte region[] = new byte[] { (byte) 'r', (byte) (0x30 + regionIdx) }; - Entry ret = createTestEntry(TABLE_NAME, region, - Bytes.toBytes(index / regions.size()), - FAMILY, QUALIFIER, VALUE, index); + Entry ret = createTestEntry(TABLE_NAME, region, Bytes.toBytes(index / regions.size()), + FAMILY, QUALIFIER, VALUE, index); index++; return ret; } @@ -1101,20 +1082,18 @@ public void testSplitLogFileMultipleRegions() throws IOException { } @Test - public void testSplitLogFileFirstLineCorruptionLog() - throws IOException { + public void testSplitLogFileFirstLineCorruptionLog() throws IOException { conf.setBoolean(WALSplitter.SPLIT_SKIP_ERRORS_KEY, true); generateWALs(1, 10, -1); FileStatus logfile = fs.listStatus(WALDIR)[0]; - corruptWAL(logfile.getPath(), - Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true); + corruptWAL(logfile.getPath(), Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true); useDifferentDFSClient(); WALSplitter.split(HBASELOGDIR, WALDIR, OLDLOGDIR, fs, conf, wals); final Path corruptDir = - new Path(CommonFSUtils.getWALRootDir(conf), HConstants.CORRUPT_DIR_NAME); + new Path(CommonFSUtils.getWALRootDir(conf), HConstants.CORRUPT_DIR_NAME); assertEquals(1, fs.listStatus(corruptDir).length); } @@ -1133,32 +1112,30 @@ public void testConcurrentSplitLogAndReplayRecoverEdit() throws IOException { wals.getWAL(null); FileStatus[] logfiles = fs.listStatus(WALDIR); - assertTrue("There should be some log file", - logfiles != null && logfiles.length > 0); + assertTrue("There should be some log file", logfiles != null && logfiles.length > 0); WALSplitter logSplitter = new WALSplitter(wals, conf, HBASEDIR, fs, HBASEDIR, fs, null, null, null) { - @Override - protected Writer createWriter(Path logfile) - throws IOException { - Writer writer = wals.createRecoveredEditsWriter(this.walFS, logfile); - // After creating writer, simulate region's - // replayRecoveredEditsIfAny() which gets SplitEditFiles of this - // region and delete them, excluding files with '.temp' suffix. - NavigableSet files = WALSplitUtil.getSplitEditFilesSorted(fs, regiondir); - if (files != null && !files.isEmpty()) { - for (Path file : files) { - if (!this.walFS.delete(file, false)) { - LOG.error("Failed delete of " + file); - } else { - LOG.debug("Deleted recovered.edits file=" + file); + @Override + protected Writer createWriter(Path logfile) throws IOException { + Writer writer = wals.createRecoveredEditsWriter(this.walFS, logfile); + // After creating writer, simulate region's + // replayRecoveredEditsIfAny() which gets SplitEditFiles of this + // region and delete them, excluding files with '.temp' suffix. + NavigableSet files = WALSplitUtil.getSplitEditFilesSorted(fs, regiondir); + if (files != null && !files.isEmpty()) { + for (Path file : files) { + if (!this.walFS.delete(file, false)) { + LOG.error("Failed delete of " + file); + } else { + LOG.debug("Deleted recovered.edits file=" + file); + } + } } + return writer; } - } - return writer; - } - }; - try{ + }; + try { logSplitter.splitWAL(logfiles[0], null); } catch (IOException e) { LOG.info(e.toString(), e); @@ -1205,10 +1182,11 @@ private void makeRegionDirs(List regions) throws IOException { * @param leaveOpen index to leave un-closed. -1 to close all. * @return the writer that's still open, or null if all were closed. */ - private Writer generateWALs(int writers, int entries, int leaveOpen, int regionEvents) throws IOException { + private Writer generateWALs(int writers, int entries, int leaveOpen, int regionEvents) + throws IOException { makeRegionDirs(REGIONS); fs.mkdirs(WALDIR); - Writer [] ws = new Writer[writers]; + Writer[] ws = new Writer[writers]; int seq = 0; int numRegionEventsAdded = 0; for (int i = 0; i < writers; i++) { @@ -1218,10 +1196,10 @@ private Writer generateWALs(int writers, int entries, int leaveOpen, int regionE for (String region : REGIONS) { String row_key = region + prefix++ + i + j; appendEntry(ws[i], TABLE_NAME, Bytes.toBytes(region), Bytes.toBytes(row_key), FAMILY, - QUALIFIER, VALUE, seq++); + QUALIFIER, VALUE, seq++); if (numRegionEventsAdded < regionEvents) { - numRegionEventsAdded ++; + numRegionEventsAdded++; appendRegionEvent(ws[i], region); } } @@ -1237,14 +1215,11 @@ private Writer generateWALs(int writers, int entries, int leaveOpen, int regionE return ws[leaveOpen]; } - - - private Path[] getLogForRegion(TableName table, String region) - throws IOException { + private Path[] getLogForRegion(TableName table, String region) throws IOException { Path tdir = CommonFSUtils.getWALTableDir(conf, table); @SuppressWarnings("deprecation") - Path editsdir = WALSplitUtil.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, - Bytes.toString(Bytes.toBytes(region)))); + Path editsdir = WALSplitUtil.getRegionDirRecoveredEditsDir( + HRegion.getRegionDir(tdir, Bytes.toString(Bytes.toBytes(region)))); FileStatus[] files = fs.listStatus(editsdir, new PathFilter() { @Override public boolean accept(Path p) { @@ -1300,8 +1275,8 @@ private void corruptWAL(Path path, Corruptions corruption, boolean close) throws case TRUNCATE: fs.delete(path, false); out = fs.create(path); - out.write(corrupted_bytes, 0, fileSize - - (32 + ProtobufLogReader.PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT)); + out.write(corrupted_bytes, 0, + fileSize - (32 + ProtobufLogReader.PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT)); closeOrFlush(close, out); break; @@ -1314,24 +1289,23 @@ private void corruptWAL(Path path, Corruptions corruption, boolean close) throws } } - private void closeOrFlush(boolean close, FSDataOutputStream out) - throws IOException { + private void closeOrFlush(boolean close, FSDataOutputStream out) throws IOException { if (close) { out.close(); } else { Method syncMethod = null; try { - syncMethod = out.getClass().getMethod("hflush", new Class []{}); + syncMethod = out.getClass().getMethod("hflush", new Class[] {}); } catch (NoSuchMethodException e) { try { - syncMethod = out.getClass().getMethod("sync", new Class []{}); + syncMethod = out.getClass().getMethod("sync", new Class[] {}); } catch (NoSuchMethodException ex) { - throw new IOException("This version of Hadoop supports " + - "neither Syncable.sync() nor Syncable.hflush()."); + throw new IOException("This version of Hadoop supports " + + "neither Syncable.sync() nor Syncable.hflush()."); } } try { - syncMethod.invoke(out, new Object[]{}); + syncMethod.invoke(out, new Object[] {}); } catch (Exception e) { throw new IOException(e); } @@ -1357,8 +1331,7 @@ private static void appendCompactionEvent(Writer w, RegionInfo hri, String[] inp .setRegionName(ByteString.copyFrom(hri.getRegionName())) .setFamilyName(ByteString.copyFrom(FAMILY)) .setStoreHomeDir(hri.getEncodedName() + "/" + Bytes.toString(FAMILY)) - .addAllCompactionInput(Arrays.asList(inputs)) - .addCompactionOutput(output); + .addAllCompactionInput(Arrays.asList(inputs)).addCompactionOutput(output); WALEdit edit = WALEdit.createCompaction(hri, desc.build()); WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), TABLE_NAME, 1, @@ -1369,24 +1342,19 @@ private static void appendCompactionEvent(Writer w, RegionInfo hri, String[] inp private static void appendRegionEvent(Writer w, String region) throws IOException { WALProtos.RegionEventDescriptor regionOpenDesc = ProtobufUtil.toRegionEventDescriptor( - WALProtos.RegionEventDescriptor.EventType.REGION_OPEN, - TABLE_NAME.toBytes(), - Bytes.toBytes(region), - Bytes.toBytes(String.valueOf(region.hashCode())), - 1, - ServerName.parseServerName("ServerName:9099"), ImmutableMap.>of()); + WALProtos.RegionEventDescriptor.EventType.REGION_OPEN, TABLE_NAME.toBytes(), + Bytes.toBytes(region), Bytes.toBytes(String.valueOf(region.hashCode())), 1, + ServerName.parseServerName("ServerName:9099"), ImmutableMap.> of()); final long time = EnvironmentEdgeManager.currentTime(); - final WALKeyImpl walKey = new WALKeyImpl(Bytes.toBytes(region), TABLE_NAME, 1, time, - HConstants.DEFAULT_CLUSTER_ID); + final WALKeyImpl walKey = + new WALKeyImpl(Bytes.toBytes(region), TABLE_NAME, 1, time, HConstants.DEFAULT_CLUSTER_ID); WALEdit we = WALEdit.createRegionEventWALEdit(Bytes.toBytes(region), regionOpenDesc); w.append(new Entry(walKey, we)); w.sync(false); } - public static long appendEntry(Writer writer, TableName table, byte[] region, - byte[] row, byte[] family, byte[] qualifier, - byte[] value, long seq) - throws IOException { + public static long appendEntry(Writer writer, TableName table, byte[] region, byte[] row, + byte[] family, byte[] qualifier, byte[] value, long seq) throws IOException { LOG.info(Thread.currentThread().getName() + " append"); writer.append(createTestEntry(table, region, row, family, qualifier, value, seq)); LOG.info(Thread.currentThread().getName() + " sync"); @@ -1394,18 +1362,15 @@ public static long appendEntry(Writer writer, TableName table, byte[] region, return seq; } - private static Entry createTestEntry( - TableName table, byte[] region, - byte[] row, byte[] family, byte[] qualifier, - byte[] value, long seq) { + private static Entry createTestEntry(TableName table, byte[] region, byte[] row, byte[] family, + byte[] qualifier, byte[] value, long seq) { long time = System.nanoTime(); seq++; final KeyValue cell = new KeyValue(row, family, qualifier, time, KeyValue.Type.Put, value); WALEdit edit = new WALEdit(); edit.add(cell); - return new Entry(new WALKeyImpl(region, table, seq, time, - HConstants.DEFAULT_CLUSTER_ID), edit); + return new Entry(new WALKeyImpl(region, table, seq, time, HConstants.DEFAULT_CLUSTER_ID), edit); } private void injectEmptyFile(String suffix, boolean closeFile) throws IOException { @@ -1424,8 +1389,8 @@ private boolean logsAreEqual(Path p1, Path p2) throws IOException { Entry entry2; while ((entry1 = in1.next()) != null) { entry2 = in2.next(); - if ((entry1.getKey().compareTo(entry2.getKey()) != 0) || - (!entry1.getEdit().toString().equals(entry2.getEdit().toString()))) { + if ((entry1.getKey().compareTo(entry2.getKey()) != 0) + || (!entry1.getEdit().toString().equals(entry2.getEdit().toString()))) { return false; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitBoundedLogWriterCreation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitBoundedLogWriterCreation.java index 3ca584af392b..0287af29075e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitBoundedLogWriterCreation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitBoundedLogWriterCreation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ import org.junit.experimental.categories.Category; @Category(LargeTests.class) -public class TestWALSplitBoundedLogWriterCreation extends TestWALSplit{ +public class TestWALSplitBoundedLogWriterCreation extends TestWALSplit { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -48,4 +48,3 @@ public void testThreadingSlowWriterSmallBuffer() throws Exception { super.testThreadingSlowWriterSmallBuffer(); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitCompressed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitCompressed.java index 4926ec6c9c85..63a6a0ae8882 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitCompressed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitCompressed.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestWALSplitCompressed extends TestWALSplit { @ClassRule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java index 8df034cee74e..965a71a29b5f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java @@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.when; + import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; @@ -131,10 +132,9 @@ public void setUp() throws Exception { this.fs = UTIL.getDFSCluster().getFileSystem(); this.rootDir = CommonFSUtils.getRootDir(this.conf); this.oldLogDir = new Path(this.rootDir, HConstants.HREGION_OLDLOGDIR_NAME); - String serverName = - ServerName.valueOf(TEST_NAME.getMethodName() + "-manual", 16010, - EnvironmentEdgeManager.currentTime()) - .toString(); + String serverName = ServerName + .valueOf(TEST_NAME.getMethodName() + "-manual", 16010, EnvironmentEdgeManager.currentTime()) + .toString(); this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName); this.logDir = new Path(this.rootDir, logName); if (UTIL.getDFSCluster().getFileSystem().exists(this.rootDir)) { @@ -209,10 +209,9 @@ public void testDifferentRootDirAndWALRootDir() throws Exception { FileSystem walFs = CommonFSUtils.getWALFileSystem(this.conf); this.oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); - String serverName = - ServerName.valueOf(TEST_NAME.getMethodName() + "-manual", 16010, - EnvironmentEdgeManager.currentTime()) - .toString(); + String serverName = ServerName + .valueOf(TEST_NAME.getMethodName() + "-manual", 16010, EnvironmentEdgeManager.currentTime()) + .toString(); this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName); this.logDir = new Path(walRootDir, logName); this.wals = new WALFactory(conf, TEST_NAME.getMethodName()); @@ -372,8 +371,8 @@ public void testRecoverSequenceId() throws Exception { } /** - * Test writing edits into an HRegion, closing it, splitting logs, opening - * Region again. Verify seqids. + * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify + * seqids. */ @Test public void testWrittenViaHRegion() @@ -382,7 +381,7 @@ public void testWrittenViaHRegion() TableDescriptor td = pair.getFirst(); RegionInfo ri = pair.getSecond(); - // Write countPerFamily edits into the three families. Do a flush on one + // Write countPerFamily edits into the three families. Do a flush on one // of the families during the load of edits so its seqid is not same as // others to test we do right thing when different seqids. WAL wal = createWAL(this.conf, rootDir, logName); @@ -419,7 +418,7 @@ public void testWrittenViaHRegion() final Result result1b = region2.get(g); assertEquals(result.size(), result1b.size()); - // Next test. Add more edits, then 'crash' this region by stealing its wal + // Next test. Add more edits, then 'crash' this region by stealing its wal // out from under it and assert that replay of the log adds the edits back // correctly when region is opened again. for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { @@ -445,7 +444,7 @@ public Object run() throws Exception { // Assert that count of cells is same as before crash. assertEquals(result2.size(), result3.size()); - // I can't close wal1. Its been appropriated when we split. + // I can't close wal1. Its been appropriated when we split. region3.close(); wal3.close(); return null; @@ -454,14 +453,12 @@ public Object run() throws Exception { } /** - * Test that we recover correctly when there is a failure in between the - * flushes. i.e. Some stores got flushed but others did not. - * Unfortunately, there is no easy hook to flush at a store level. The way - * we get around this is by flushing at the region level, and then deleting - * the recently flushed store file for one of the Stores. This would put us - * back in the situation where all but that store got flushed and the region - * died. - * We restart Region again, and verify that the edits were replayed. + * Test that we recover correctly when there is a failure in between the flushes. i.e. Some stores + * got flushed but others did not. Unfortunately, there is no easy hook to flush at a store level. + * The way we get around this is by flushing at the region level, and then deleting the recently + * flushed store file for one of the Stores. This would put us back in the situation where all but + * that store got flushed and the region died. We restart Region again, and verify that the edits + * were replayed. */ @Test public void testAfterPartialFlush() @@ -470,7 +467,7 @@ public void testAfterPartialFlush() TableDescriptor td = pair.getFirst(); RegionInfo ri = pair.getSecond(); - // Write countPerFamily edits into the three families. Do a flush on one + // Write countPerFamily edits into the three families. Do a flush on one // of the families during the load of edits so its seqid is not same as // others to test we do right thing when different seqids. WAL wal = createWAL(this.conf, rootDir, logName); @@ -514,9 +511,8 @@ public void testAfterPartialFlush() } /** - * Test that we could recover the data correctly after aborting flush. In the - * test, first we abort flush after writing some data, then writing more data - * and flush again, at last verify the data. + * Test that we could recover the data correctly after aborting flush. In the test, first we abort + * flush after writing some data, then writing more data and flush again, at last verify the data. */ @Test public void testAfterAbortingFlush() throws IOException { @@ -534,14 +530,14 @@ public void testAfterAbortingFlush() throws IOException { when(rsServices.getConfiguration()).thenReturn(conf); Configuration customConf = new Configuration(this.conf); customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY, - AbstractTestWALReplay.CustomStoreFlusher.class.getName()); + AbstractTestWALReplay.CustomStoreFlusher.class.getName()); HRegion region = HRegion.openHRegion(this.rootDir, ri, td, wal, customConf, rsServices, null); int writtenRowCount = 10; List families = Arrays.asList(td.getColumnFamilies()); for (int i = 0; i < writtenRowCount; i++) { Put put = new Put(Bytes.toBytes(td.getTableName() + Integer.toString(i))); put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), - Bytes.toBytes("val")); + Bytes.toBytes("val")); region.put(put); } @@ -566,7 +562,7 @@ public void testAfterAbortingFlush() throws IOException { for (int i = writtenRowCount; i < writtenRowCount + moreRow; i++) { Put put = new Put(Bytes.toBytes(td.getTableName() + Integer.toString(i))); put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), - Bytes.toBytes("val")); + Bytes.toBytes("val")); region.put(put); } writtenRowCount += moreRow; @@ -576,7 +572,7 @@ public void testAfterAbortingFlush() throws IOException { region.flush(true); } catch (IOException t) { LOG.info( - "Expected exception when flushing region because server is stopped," + t.getMessage()); + "Expected exception when flushing region because server is stopped," + t.getMessage()); } region.close(true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitValueCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitValueCompression.java index 32ed85f6bba8..7394723ebe9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitValueCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitValueCompression.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestWALSplitValueCompression extends TestWALSplit { @ClassRule @@ -35,10 +35,8 @@ public class TestWALSplitValueCompression extends TestWALSplit { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration() - .setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); - TEST_UTIL.getConfiguration() - .setBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, true); + TEST_UTIL.getConfiguration().setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); + TEST_UTIL.getConfiguration().setBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, true); TestWALSplit.setUpBeforeClass(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitWithDeletedTableData.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitWithDeletedTableData.java index 123538bb11b2..3016bdaa3a7f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitWithDeletedTableData.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitWithDeletedTableData.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -49,12 +48,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestWALSplitWithDeletedTableData { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule - .forClass(TestWALSplitWithDeletedTableData.class); + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestWALSplitWithDeletedTableData.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @BeforeClass @@ -74,8 +73,8 @@ public void testWALSplitWithDeletedTableData() throws Exception { final byte[] VALUE = Bytes.toBytes("v1"); final TableName t1 = TableName.valueOf("t1"); final TableName t2 = TableName.valueOf("t2"); - final byte[][] splitRows = { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"), - Bytes.toBytes("d") }; + final byte[][] splitRows = + { Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d") }; TableDescriptorBuilder htdBuilder1 = TableDescriptorBuilder.newBuilder(t1); htdBuilder1.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CFNAME).build()); Table tab1 = TEST_UTIL.createTable(htdBuilder1.build(), splitRows); @@ -83,8 +82,8 @@ public void testWALSplitWithDeletedTableData() throws Exception { htdBuilder2.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CFNAME).build()); Table tab2 = TEST_UTIL.createTable(htdBuilder2.build(), splitRows); List puts = new ArrayList(4); - byte[][] rks = { Bytes.toBytes("ac"), Bytes.toBytes("ba"), Bytes.toBytes("ca"), - Bytes.toBytes("dd") }; + byte[][] rks = + { Bytes.toBytes("ac"), Bytes.toBytes("ba"), Bytes.toBytes("ca"), Bytes.toBytes("dd") }; for (byte[] rk : rks) { puts.add(new Put(rk).addColumn(CFNAME, QNAME, VALUE)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWrongMetaWALFileName.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWrongMetaWALFileName.java index 45f46bb7b322..2e35f7322d4d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWrongMetaWALFileName.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWrongMetaWALFileName.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ public class TestWrongMetaWALFileName { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWrongMetaWALFileName.class); + HBaseClassTestRule.forClass(TestWrongMetaWALFileName.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -65,7 +65,7 @@ public void test() throws Exception { UTIL.waitTableAvailable(tableName); HRegionServer rs = UTIL.getMiniHBaseCluster().getRegionServer(0); Path walDir = new Path(rs.getWALRootDir(), - AbstractFSWALProvider.getWALDirectoryName(rs.getServerName().toString())); + AbstractFSWALProvider.getWALDirectoryName(rs.getServerName().toString())); // we should have meta wal files. assertTrue( rs.getWALFileSystem().listStatus(walDir, AbstractFSWALProvider::isMetaFile).length > 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALCorruptionDueToDanglingByteBufferTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALCorruptionDueToDanglingByteBufferTestBase.java index 57d6e8637493..1f3e0568d735 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALCorruptionDueToDanglingByteBufferTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALCorruptionDueToDanglingByteBufferTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public abstract class WALCorruptionDueToDanglingByteBufferTestBase { private static final Logger LOG = - LoggerFactory.getLogger(TestAsyncFSWALCorruptionDueToDanglingByteBuffer.class); + LoggerFactory.getLogger(TestAsyncFSWALCorruptionDueToDanglingByteBuffer.class); protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -66,7 +66,7 @@ public void test() throws Exception { LOG.info("Put 100 rows with " + Durability.ASYNC_WAL + "..."); for (int i = 0; i < 100; i++) { table.batch(Arrays.asList(new Put(getBytes("row", i)) - .addColumn(CF, CQ, getBytes("value", i)).setDurability(Durability.ASYNC_WAL)), + .addColumn(CF, CQ, getBytes("value", i)).setDurability(Durability.ASYNC_WAL)), new Object[1]); } ARRIVE.await(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALCorruptionWithMultiPutDueToDanglingByteBufferTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALCorruptionWithMultiPutDueToDanglingByteBufferTestBase.java index a8110f71c22e..eafae7d502c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALCorruptionWithMultiPutDueToDanglingByteBufferTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALCorruptionWithMultiPutDueToDanglingByteBufferTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; - import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Durability; @@ -37,8 +36,8 @@ public abstract class WALCorruptionWithMultiPutDueToDanglingByteBufferTestBase { - private static final Logger LOG = LoggerFactory - .getLogger(WALCorruptionWithMultiPutDueToDanglingByteBufferTestBase.class); + private static final Logger LOG = + LoggerFactory.getLogger(WALCorruptionWithMultiPutDueToDanglingByteBufferTestBase.class); protected static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); @@ -46,8 +45,8 @@ public abstract class WALCorruptionWithMultiPutDueToDanglingByteBufferTestBase { protected static CountDownLatch RESUME; - protected static TableName TABLE_NAME = TableName - .valueOf("WALCorruptionWithMultiPutDueToDanglingByteBufferTestBase"); + protected static TableName TABLE_NAME = + TableName.valueOf("WALCorruptionWithMultiPutDueToDanglingByteBufferTestBase"); protected static byte[] CF = Bytes.toBytes("cf"); @@ -68,7 +67,7 @@ public void test() throws Exception { List puts = new ArrayList<>(batchSize); for (int i = 1; i <= 100; i++) { Put p = new Put(getBytes("row", i)).addColumn(CF, CQ, getBytes("value", i)) - .setDurability(Durability.ASYNC_WAL); + .setDurability(Durability.ASYNC_WAL); puts.add(p); if (i % batchSize == 0) { table.put(puts); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java index f52cfb4b84c6..927aeb95f64f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.wal; import static com.codahale.metrics.MetricRegistry.name; @@ -76,27 +75,25 @@ // imports for things that haven't moved from regionserver.wal yet. /** - * This class runs performance benchmarks for {@link WAL}. - * See usage for this tool by running: + * This class runs performance benchmarks for {@link WAL}. See usage for this tool by running: * $ hbase org.apache.hadoop.hbase.wal.WALPerformanceEvaluation -h */ @InterfaceAudience.Private public final class WALPerformanceEvaluation extends Configured implements Tool { - private static final Logger LOG = - LoggerFactory.getLogger(WALPerformanceEvaluation.class); + private static final Logger LOG = LoggerFactory.getLogger(WALPerformanceEvaluation.class); private final MetricRegistry metrics = new MetricRegistry(); private final Meter syncMeter = - metrics.meter(name(WALPerformanceEvaluation.class, "syncMeter", "syncs")); - - private final Histogram syncHistogram = metrics.histogram( - name(WALPerformanceEvaluation.class, "syncHistogram", "nanos-between-syncs")); - private final Histogram syncCountHistogram = metrics.histogram( - name(WALPerformanceEvaluation.class, "syncCountHistogram", "countPerSync")); - private final Meter appendMeter = metrics.meter( - name(WALPerformanceEvaluation.class, "appendMeter", "bytes")); + metrics.meter(name(WALPerformanceEvaluation.class, "syncMeter", "syncs")); + + private final Histogram syncHistogram = metrics + .histogram(name(WALPerformanceEvaluation.class, "syncHistogram", "nanos-between-syncs")); + private final Histogram syncCountHistogram = + metrics.histogram(name(WALPerformanceEvaluation.class, "syncCountHistogram", "countPerSync")); + private final Meter appendMeter = + metrics.meter(name(WALPerformanceEvaluation.class, "appendMeter", "bytes")); private final Histogram latencyHistogram = - metrics.histogram(name(WALPerformanceEvaluation.class, "latencyHistogram", "nanos")); + metrics.histogram(name(WALPerformanceEvaluation.class, "latencyHistogram", "nanos")); private final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); @@ -116,9 +113,9 @@ public void setConf(Configuration conf) { } /** - * Perform WAL.append() of Put object, for the number of iterations requested. - * Keys and Vaues are generated randomly, the number of column families, - * qualifiers and key/value size is tunable by the user. + * Perform WAL.append() of Put object, for the number of iterations requested. Keys and Vaues are + * generated randomly, the number of column families, qualifiers and key/value size is tunable by + * the user. */ class WALPutBenchmark implements Runnable { private final long numIterations; @@ -128,15 +125,15 @@ class WALPutBenchmark implements Runnable { private final int syncInterval; private final NavigableMap scopes; - WALPutBenchmark(final HRegion region, final TableDescriptor htd, - final long numIterations, final boolean noSync, final int syncInterval) { + WALPutBenchmark(final HRegion region, final TableDescriptor htd, final long numIterations, + final boolean noSync, final int syncInterval) { this.numIterations = numIterations; this.noSync = noSync; this.syncInterval = syncInterval; this.numFamilies = htd.getColumnFamilyCount(); this.region = region; scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for(byte[] fam : htd.getColumnFamilyNames()) { + for (byte[] fam : htd.getColumnFamilyNames()) { scopes.put(fam, 0); } } @@ -147,7 +144,7 @@ public void run() { byte[] value = new byte[valueSize]; WAL wal = region.getWAL(); Span threadSpan = TraceUtil.getGlobalTracer() - .spanBuilder("WALPerfEval." + Thread.currentThread().getName()).startSpan(); + .spanBuilder("WALPerfEval." + Thread.currentThread().getName()).startSpan(); try (Scope threadScope = threadSpan.makeCurrent()) { int lastSync = 0; for (int i = 0; i < numIterations; ++i) { @@ -282,12 +279,12 @@ public int run(String[] args) throws Exception { rootRegionDir = TEST_UTIL.getDataTestDirOnTestFS("WALPerformanceEvaluation"); } // Run WAL Performance Evaluation - // First set the fs from configs. In case we are on hadoop1 + // First set the fs from configs. In case we are on hadoop1 CommonFSUtils.setFsDefault(getConf(), CommonFSUtils.getRootDir(getConf())); FileSystem fs = FileSystem.get(getConf()); LOG.info("FileSystem={}, rootDir={}", fs, rootRegionDir); Span span = TraceUtil.getGlobalTracer().spanBuilder("WALPerfEval").startSpan(); - try (Scope scope = span.makeCurrent()){ + try (Scope scope = span.makeCurrent()) { rootRegionDir = rootRegionDir.makeQualified(fs.getUri(), fs.getWorkingDirectory()); cleanRegionRootDir(fs, rootRegionDir); CommonFSUtils.setRootDir(getConf(), rootRegionDir); @@ -299,21 +296,21 @@ public int run(String[] args) throws Exception { Threads.setDaemonThreadRunning(roller, "WALPerfEval.logRoller"); try { - for(int i = 0; i < numRegions; i++) { + for (int i = 0; i < numRegions; i++) { // Initialize Table Descriptor // a table per desired region means we can avoid carving up the key space final TableDescriptor htd = createHTableDescriptor(i, numFamilies); regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller); - benchmarks[i] = - new WALPutBenchmark(regions[i], htd, numIterations, noSync, syncInterval); + benchmarks[i] = new WALPutBenchmark(regions[i], htd, numIterations, noSync, syncInterval); } - ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics). - outputTo(System.out).convertRatesTo(TimeUnit.SECONDS).filter(MetricFilter.ALL).build(); + ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).outputTo(System.out) + .convertRatesTo(TimeUnit.SECONDS).filter(MetricFilter.ALL).build(); reporter.start(30, TimeUnit.SECONDS); long putTime = runBenchmark(benchmarks, numThreads); - logBenchmarkResult("Summary: threads=" + numThreads + ", iterations=" + numIterations + - ", syncInterval=" + syncInterval, numIterations * numThreads, putTime); + logBenchmarkResult("Summary: threads=" + numThreads + ", iterations=" + numIterations + + ", syncInterval=" + syncInterval, + numIterations * numThreads, putTime); for (int i = 0; i < numRegions; i++) { if (regions[i] != null) { @@ -324,11 +321,11 @@ public int run(String[] args) throws Exception { if (verify) { LOG.info("verifying written log entries."); Path dir = new Path(CommonFSUtils.getRootDir(getConf()), - AbstractFSWALProvider.getWALDirectoryName("wals")); + AbstractFSWALProvider.getWALDirectoryName("wals")); long editCount = 0; - FileStatus [] fsss = fs.listStatus(dir); + FileStatus[] fsss = fs.listStatus(dir); if (fsss.length == 0) throw new IllegalStateException("No WAL found"); - for (FileStatus fss: fsss) { + for (FileStatus fss : fsss) { Path p = fss.getPath(); if (!fs.exists(p)) throw new IllegalStateException(p.toString()); editCount += verify(wals, p, verbose); @@ -375,8 +372,7 @@ private static TableDescriptor createHTableDescriptor(final int regionNum, } /** - * Verify the content of the WAL file. - * Verify that the file has expected number of edits. + * Verify the content of the WAL file. Verify that the file has expected number of edits. * @param wals may not be null * @param wal * @return Count of edits. @@ -435,24 +431,23 @@ private void printUsageAndExit() { System.err.println(" -nocleanup Do NOT remove test data when done."); System.err.println(" -noclosefs Do NOT close the filesystem when done."); System.err.println(" -nosync Append without syncing"); - System.err.println(" -syncInterval Append N edits and then sync. " + - "Default=0, i.e. sync every edit."); + System.err.println( + " -syncInterval Append N edits and then sync. " + "Default=0, i.e. sync every edit."); System.err.println(" -verify Verify edits written in sequence"); - System.err.println(" -verbose Output extra info; " + - "e.g. all edit seq ids when verifying"); + System.err + .println(" -verbose Output extra info; " + "e.g. all edit seq ids when verifying"); System.err.println(" -roll Roll the way every N appends"); System.err.println(" -encryption Encrypt the WAL with algorithm A, e.g. AES"); - System.err.println(" -traceFreq Rate of trace sampling. Default: 1.0, " + - "only respected when tracing is enabled, ie -Dhbase.trace.spanreceiver.classes=..."); + System.err.println(" -traceFreq Rate of trace sampling. Default: 1.0, " + + "only respected when tracing is enabled, ie -Dhbase.trace.spanreceiver.classes=..."); System.err.println(""); System.err.println("Examples:"); System.err.println(""); - System.err.println(" To run 100 threads on hdfs with log rolling every 10k edits and " + - "verification afterward do:"); - System.err.println(" $ hbase org.apache.hadoop.hbase.wal." + - "WALPerformanceEvaluation \\"); - System.err.println(" -conf ./core-site.xml -path hdfs://example.org:7000/tmp " + - "-threads 100 -roll 10000 -verify"); + System.err.println(" To run 100 threads on hdfs with log rolling every 10k edits and " + + "verification afterward do:"); + System.err.println(" $ hbase org.apache.hadoop.hbase.wal." + "WALPerformanceEvaluation \\"); + System.err.println(" -conf ./core-site.xml -path hdfs://example.org:7000/tmp " + + "-threads 100 -roll 10000 -verify"); System.exit(1); } @@ -522,8 +517,8 @@ private Put setupPut(Random rand, byte[] key, byte[] value, final int numFamilie for (int cf = 0; cf < numFamilies; ++cf) { for (int q = 0; q < numQualifiers; ++q) { rand.nextBytes(value); - put.addColumn(Bytes.toBytes(FAMILY_PREFIX + cf), - Bytes.toBytes(QUALIFIER_PREFIX + q), value); + put.addColumn(Bytes.toBytes(FAMILY_PREFIX + cf), Bytes.toBytes(QUALIFIER_PREFIX + q), + value); } } return put; @@ -533,26 +528,28 @@ private long runBenchmark(Runnable[] runnable, final int numThreads) throws Inte Thread[] threads = new Thread[numThreads]; long startTime = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < numThreads; ++i) { - threads[i] = new Thread(runnable[i%runnable.length], "t" + i + ",r" + (i%runnable.length)); + threads[i] = + new Thread(runnable[i % runnable.length], "t" + i + ",r" + (i % runnable.length)); threads[i].start(); } - for (Thread t : threads) t.join(); + for (Thread t : threads) + t.join(); long endTime = EnvironmentEdgeManager.currentTime(); - return(endTime - startTime); + return (endTime - startTime); } /** - * The guts of the {@link #main} method. - * Call this method to avoid the {@link #main(String[])} System.exit. + * The guts of the {@link #main} method. Call this method to avoid the {@link #main(String[])} + * System.exit. * @param args * @return errCode * @throws Exception */ - static int innerMain(final Configuration c, final String [] args) throws Exception { + static int innerMain(final Configuration c, final String[] args) throws Exception { return ToolRunner.run(c, new WALPerformanceEvaluation(), args); } public static void main(String[] args) throws Exception { - System.exit(innerMain(HBaseConfiguration.create(), args)); + System.exit(innerMain(HBaseConfiguration.create(), args)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java index 75e0ec55f7ca..6a0779c2dd95 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -58,8 +58,7 @@ public class TestZooKeeperACL { HBaseClassTestRule.forClass(TestZooKeeperACL.class); private final static Logger LOG = LoggerFactory.getLogger(TestZooKeeperACL.class); - private final static HBaseTestingUtil TEST_UTIL = - new HBaseTestingUtil(); + private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static ZKWatcher zkw; private static boolean secureZKAvailable; @@ -67,23 +66,16 @@ public class TestZooKeeperACL { @BeforeClass public static void setUpBeforeClass() throws Exception { File saslConfFile = File.createTempFile("tmp", "jaas.conf"); - try (OutputStreamWriter fwriter = new OutputStreamWriter( - new FileOutputStream(saslConfFile), StandardCharsets.UTF_8)) { - fwriter.write( - "Server {\n" + - "org.apache.zookeeper.server.auth.DigestLoginModule required\n" + - "user_hbase=\"secret\";\n" + - "};\n" + - "Client {\n" + - "org.apache.zookeeper.server.auth.DigestLoginModule required\n" + - "username=\"hbase\"\n" + - "password=\"secret\";\n" + - "};" + "\n"); + try (OutputStreamWriter fwriter = + new OutputStreamWriter(new FileOutputStream(saslConfFile), StandardCharsets.UTF_8)) { + fwriter.write("Server {\n" + "org.apache.zookeeper.server.auth.DigestLoginModule required\n" + + "user_hbase=\"secret\";\n" + "};\n" + "Client {\n" + + "org.apache.zookeeper.server.auth.DigestLoginModule required\n" + "username=\"hbase\"\n" + + "password=\"secret\";\n" + "};" + "\n"); } - System.setProperty("java.security.auth.login.config", - saslConfFile.getAbsolutePath()); + System.setProperty("java.security.auth.login.config", saslConfFile.getAbsolutePath()); System.setProperty("zookeeper.authProvider.1", - "org.apache.zookeeper.server.auth.SASLAuthenticationProvider"); + "org.apache.zookeeper.server.auth.SASLAuthenticationProvider"); TEST_UTIL.getConfiguration().setInt("hbase.zookeeper.property.maxClientCnxns", 1000); @@ -96,8 +88,7 @@ public static void setUpBeforeClass() throws Exception { secureZKAvailable = false; return; } - zkw = new ZKWatcher( - new Configuration(TEST_UTIL.getConfiguration()), + zkw = new ZKWatcher(new Configuration(TEST_UTIL.getConfiguration()), TestZooKeeper.class.getName(), null); } @@ -118,16 +109,13 @@ public void setUp() throws Exception { } /** - * Create a node and check its ACL. When authentication is enabled on - * ZooKeeper, all nodes (except /hbase/root-region-server, /hbase/master - * and /hbase/hbaseid) should be created so that only the hbase server user - * (master or region server user) that created them can access them, and - * this user should have all permissions on this node. For - * /hbase/root-region-server, /hbase/master, and /hbase/hbaseid the - * permissions should be as above, but should also be world-readable. First - * we check the general case of /hbase nodes in the following test, and - * then check the subset of world-readable nodes in the three tests after - * that. + * Create a node and check its ACL. When authentication is enabled on ZooKeeper, all nodes (except + * /hbase/root-region-server, /hbase/master and /hbase/hbaseid) should be created so that only the + * hbase server user (master or region server user) that created them can access them, and this + * user should have all permissions on this node. For /hbase/root-region-server, /hbase/master, + * and /hbase/hbaseid the permissions should be as above, but should also be world-readable. First + * we check the general case of /hbase nodes in the following test, and then check the subset of + * world-readable nodes in the three tests after that. */ @Test public void testHBaseRootZNodeACL() throws Exception { @@ -135,8 +123,7 @@ public void testHBaseRootZNodeACL() throws Exception { return; } - List acls = zkw.getRecoverableZooKeeper().getZooKeeper() - .getACL("/hbase", new Stat()); + List acls = zkw.getRecoverableZooKeeper().getZooKeeper().getACL("/hbase", new Stat()); assertEquals(1, acls.size()); assertEquals("sasl", acls.get(0).getId().getScheme()); assertEquals("hbase", acls.get(0).getId().getId()); @@ -144,9 +131,9 @@ public void testHBaseRootZNodeACL() throws Exception { } /** - * When authentication is enabled on ZooKeeper, /hbase/root-region-server - * should be created with 2 ACLs: one specifies that the hbase user has - * full access to the node; the other, that it is world-readable. + * When authentication is enabled on ZooKeeper, /hbase/root-region-server should be created with 2 + * ACLs: one specifies that the hbase user has full access to the node; the other, that it is + * world-readable. */ @Test public void testHBaseRootRegionServerZNodeACL() throws Exception { @@ -160,13 +147,12 @@ public void testHBaseRootRegionServerZNodeACL() throws Exception { boolean foundWorldReadableAcl = false; boolean foundHBaseOwnerAcl = false; - for(int i = 0; i < 2; i++) { + for (int i = 0; i < 2; i++) { if (acls.get(i).getId().getScheme().equals("world") == true) { assertEquals("anyone", acls.get(0).getId().getId()); assertEquals(ZooDefs.Perms.READ, acls.get(0).getPerms()); foundWorldReadableAcl = true; - } - else { + } else { if (acls.get(i).getId().getScheme().equals("sasl") == true) { assertEquals("hbase", acls.get(1).getId().getId()); assertEquals("sasl", acls.get(1).getId().getScheme()); @@ -181,9 +167,9 @@ public void testHBaseRootRegionServerZNodeACL() throws Exception { } /** - * When authentication is enabled on ZooKeeper, /hbase/master should be - * created with 2 ACLs: one specifies that the hbase user has full access - * to the node; the other, that it is world-readable. + * When authentication is enabled on ZooKeeper, /hbase/master should be created with 2 ACLs: one + * specifies that the hbase user has full access to the node; the other, that it is + * world-readable. */ @Test public void testHBaseMasterServerZNodeACL() throws Exception { @@ -191,13 +177,13 @@ public void testHBaseMasterServerZNodeACL() throws Exception { return; } - List acls = zkw.getRecoverableZooKeeper().getZooKeeper() - .getACL("/hbase/master", new Stat()); + List acls = + zkw.getRecoverableZooKeeper().getZooKeeper().getACL("/hbase/master", new Stat()); assertEquals(2, acls.size()); boolean foundWorldReadableAcl = false; boolean foundHBaseOwnerAcl = false; - for(int i = 0; i < 2; i++) { + for (int i = 0; i < 2; i++) { if (acls.get(i).getId().getScheme().equals("world") == true) { assertEquals("anyone", acls.get(0).getId().getId()); assertEquals(ZooDefs.Perms.READ, acls.get(0).getPerms()); @@ -217,9 +203,9 @@ public void testHBaseMasterServerZNodeACL() throws Exception { } /** - * When authentication is enabled on ZooKeeper, /hbase/hbaseid should be - * created with 2 ACLs: one specifies that the hbase user has full access - * to the node; the other, that it is world-readable. + * When authentication is enabled on ZooKeeper, /hbase/hbaseid should be created with 2 ACLs: one + * specifies that the hbase user has full access to the node; the other, that it is + * world-readable. */ @Test public void testHBaseIDZNodeACL() throws Exception { @@ -227,13 +213,13 @@ public void testHBaseIDZNodeACL() throws Exception { return; } - List acls = zkw.getRecoverableZooKeeper().getZooKeeper() - .getACL("/hbase/hbaseid", new Stat()); + List acls = + zkw.getRecoverableZooKeeper().getZooKeeper().getACL("/hbase/hbaseid", new Stat()); assertEquals(2, acls.size()); boolean foundWorldReadableAcl = false; boolean foundHBaseOwnerAcl = false; - for(int i = 0; i < 2; i++) { + for (int i = 0; i < 2; i++) { if (acls.get(i).getId().getScheme().equals("world") == true) { assertEquals("anyone", acls.get(0).getId().getId()); assertEquals(ZooDefs.Perms.READ, acls.get(0).getPerms()); @@ -253,8 +239,8 @@ public void testHBaseIDZNodeACL() throws Exception { } /** - * Finally, we check the ACLs of a node outside of the /hbase hierarchy and - * verify that its ACL is simply 'hbase:Perms.ALL'. + * Finally, we check the ACLs of a node outside of the /hbase hierarchy and verify that its ACL is + * simply 'hbase:Perms.ALL'. */ @Test public void testOutsideHBaseNodeACL() throws Exception { @@ -263,8 +249,8 @@ public void testOutsideHBaseNodeACL() throws Exception { } ZKUtil.createWithParents(zkw, "/testACLNode"); - List acls = zkw.getRecoverableZooKeeper().getZooKeeper() - .getACL("/testACLNode", new Stat()); + List acls = + zkw.getRecoverableZooKeeper().getZooKeeper().getACL("/testACLNode", new Stat()); assertEquals(1, acls.size()); assertEquals("sasl", acls.get(0).getId().getScheme()); assertEquals("hbase", acls.get(0).getId().getId()); @@ -281,16 +267,15 @@ public void testIsZooKeeperSecure() throws Exception { assertEquals(testJaasConfig, secureZKAvailable); // Define Jaas configuration without ZooKeeper Jaas config File saslConfFile = File.createTempFile("tmp", "fakeJaas.conf"); - try (OutputStreamWriter fwriter = new OutputStreamWriter( - new FileOutputStream(saslConfFile), StandardCharsets.UTF_8)) { + try (OutputStreamWriter fwriter = + new OutputStreamWriter(new FileOutputStream(saslConfFile), StandardCharsets.UTF_8)) { fwriter.write(""); } - System.setProperty("java.security.auth.login.config", - saslConfFile.getAbsolutePath()); + System.setProperty("java.security.auth.login.config", saslConfFile.getAbsolutePath()); - testJaasConfig = ZKAuthentication.isSecureZooKeeper( - new Configuration(TEST_UTIL.getConfiguration())); + testJaasConfig = + ZKAuthentication.isSecureZooKeeper(new Configuration(TEST_UTIL.getConfiguration())); assertFalse(testJaasConfig); saslConfFile.delete(); } @@ -350,4 +335,3 @@ public void testAdminDrainAllowedOnSecureZK() throws Exception { } } - diff --git a/hbase-shaded/hbase-shaded-check-invariants/pom.xml b/hbase-shaded/hbase-shaded-check-invariants/pom.xml index e3b67e7b95c3..a581000eab16 100644 --- a/hbase-shaded/hbase-shaded-check-invariants/pom.xml +++ b/hbase-shaded/hbase-shaded-check-invariants/pom.xml @@ -10,28 +10,24 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 - hbase org.apache.hbase + hbase 3.0.0-alpha-3-SNAPSHOT ../.. hbase-shaded-check-invariants pom + Apache HBase Shaded Packaging Invariants - - Enforces our invariants for our shaded artifacts. e.g. shaded clients have + Enforces our invariants for our shaded artifacts. e.g. shaded clients have a specific set of transitive dependencies and shaded clients only contain classes that are in particular packages. Does the enforcement through - the maven-enforcer-plugin and integration test. - - Apache HBase Shaded Packaging Invariants + the maven-enforcer-plugin and integration test. - - + make-sure-validation-files-are-in-sync - validate exec + validate diff false @@ -204,10 +200,10 @@ --> check-jar-contents - integration-test exec + integration-test ${shell-executable} ${project.build.testOutputDirectory} diff --git a/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml b/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml index 81064b90ceea..0416c3686aa2 100644 --- a/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml +++ b/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml @@ -1,7 +1,6 @@ - - 4.0.0 - hbase-shaded org.apache.hbase + hbase-shaded 3.0.0-alpha-3-SNAPSHOT .. hbase-shaded-client-byo-hadoop Apache HBase - Shaded - Client + + + org.apache.hbase + hbase-client + + @@ -51,12 +56,6 @@ - - - org.apache.hbase - hbase-client - - 4.0.0 - hbase-shaded org.apache.hbase + hbase-shaded 3.0.0-alpha-3-SNAPSHOT .. hbase-shaded-client Apache HBase - Shaded - Client (with Hadoop bundled) + + + org.apache.hbase + hbase-client + + @@ -59,7 +64,8 @@ need not be included --> javax.annotation:javax.annotation-api javax.activation:javax.activation-api - jakarta.activation:jakarta.activation-api + jakarta.activation:jakarta.activation-api + jakarta.ws.rs:jakarta.ws.rs-api jakarta.annotation:jakarta.annotation-api jakarta.validation:jakarta.validation-api @@ -88,10 +94,4 @@ - - - org.apache.hbase - hbase-client - - diff --git a/hbase-shaded/hbase-shaded-mapreduce/pom.xml b/hbase-shaded/hbase-shaded-mapreduce/pom.xml index 15c57494306f..f81c0da44740 100644 --- a/hbase-shaded/hbase-shaded-mapreduce/pom.xml +++ b/hbase-shaded/hbase-shaded-mapreduce/pom.xml @@ -1,7 +1,6 @@ - - 4.0.0 - hbase-shaded org.apache.hbase + hbase-shaded 3.0.0-alpha-3-SNAPSHOT .. hbase-shaded-mapreduce Apache HBase - Shaded - MapReduce - - - - org.apache.maven.plugins - maven-site-plugin - - true - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-jar-plugin - - - - - org/apache/hadoop/hbase/mapreduce/Driver - - - - - - org.apache.maven.plugins - maven-shade-plugin - - - + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + + + org/apache/hadoop/hbase/mapreduce/Driver + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + - + 4.0.0 - hbase org.apache.hbase + hbase 3.0.0-alpha-3-SNAPSHOT ../.. hbase-shaded-with-hadoop-check-invariants pom + Apache HBase Shaded Packaging Invariants (with Hadoop bundled) - - Enforces our invariants for our shaded artifacts. e.g. shaded clients have + Enforces our invariants for our shaded artifacts. e.g. shaded clients have a specific set of transitive dependencies and shaded clients only contain classes that are in particular packages. Does the enforcement through - the maven-enforcer-plugin and integration test. - - Apache HBase Shaded Packaging Invariants (with Hadoop bundled) + the maven-enforcer-plugin and integration test. - - + make-sure-validation-files-are-in-sync - validate exec + validate diff false @@ -195,10 +191,10 @@ --> check-jar-contents-for-stuff-with-hadoop - integration-test exec + integration-test ${shell-executable} ${project.build.testOutputDirectory} diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml index 20411905acf2..d718dfb4eab1 100644 --- a/hbase-shaded/pom.xml +++ b/hbase-shaded/pom.xml @@ -1,8 +1,6 @@ - - - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-shaded + pom Apache HBase - Shaded Module of HBase with most deps shaded. - pom - - - true - - true - true - org.apache.hadoop.hbase.shaded - hbase-shaded-client-byo-hadoop hbase-shaded-client @@ -49,6 +39,14 @@ hbase-shaded-check-invariants hbase-shaded-with-hadoop-check-invariants + + + true + + true + true + org.apache.hadoop.hbase.shaded + org.apache.hbase @@ -73,48 +71,6 @@ - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - aggregate-licenses - - process - - - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - @@ -131,10 +87,10 @@ aggregate-into-a-jar-with-relocated-third-parties - package shade + package false false @@ -147,7 +103,8 @@ need not be included --> javax.annotation:javax.annotation-api javax.activation:javax.activation-api - jakarta.activation:jakarta.activation-api + jakarta.activation:jakarta.activation-api + jakarta.ws.rs:jakarta.ws.rs-api jakarta.annotation:jakarta.annotation-api jakarta.validation:jakarta.validation-api @@ -461,24 +418,20 @@ - + LICENSE.txt ASL2.0 - + overview.html - + false ${project.name} - - + @@ -492,7 +445,7 @@ - + org.eclipse.jetty.orbit:javax.servlet.jsp.jstl META-INF/ECLIPSEF.SF @@ -566,5 +519,47 @@ + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + aggregate-licenses + + process + + + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml index ce48923ece63..6dda9dc91d6e 100644 --- a/hbase-shell/pom.xml +++ b/hbase-shell/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration hbase-shell Apache HBase - Shell Shell for HBase - - - - src/main/ruby - - - - - src/test/resources - - **/** - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - - org.jruby.JarBootstrapMain - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-surefire-plugin - ${surefire.version} - - - ${surefire.firstPartGroups} - - - - - @@ -162,6 +110,58 @@ test + + + + src/main/ruby + + + + + src/test/resources + + **/** + + + + + + + + maven-surefire-plugin + ${surefire.version} + + + ${surefire.firstPartGroups} + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + org.jruby.JarBootstrapMain + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + net.revelc.code + warbucks-maven-plugin + + + @@ -183,7 +183,9 @@ hadoop-3.0 - !hadoop.profile + + !hadoop.profile + @@ -216,10 +218,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 3.0.0-alpha-3-SNAPSHOT ../hbase-build-configuration @@ -32,8 +30,8 @@ Apache HBase - Testing Util HBase Testing Utilities. - - + + org.apache.hbase hbase-logging diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java index a98bd6e00f58..0b536bbff765 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,28 +29,27 @@ import org.slf4j.LoggerFactory; /** - * This class defines methods that can help with managing HBase clusters - * from unit tests and system tests. There are 3 types of cluster deployments: + * This class defines methods that can help with managing HBase clusters from unit tests and system + * tests. There are 3 types of cluster deployments: *
            - *
          • MiniHBaseCluster: each server is run in the same JVM in separate threads, - * used by unit tests
          • + *
          • MiniHBaseCluster: each server is run in the same JVM in separate threads, used by unit + * tests
          • *
          • DistributedHBaseCluster: the cluster is pre-deployed, system and integration tests can - * interact with the cluster.
          • - *
          • ProcessBasedLocalHBaseCluster: each server is deployed locally but in separate - * JVMs.
          • + * interact with the cluster. + *
          • ProcessBasedLocalHBaseCluster: each server is deployed locally but in separate JVMs. + *
          • *
          *

          - * HBaseCluster unifies the way tests interact with the cluster, so that the same test can - * be run against a mini-cluster during unit test execution, or a distributed cluster having - * tens/hundreds of nodes during execution of integration tests. - * + * HBaseCluster unifies the way tests interact with the cluster, so that the same test can be run + * against a mini-cluster during unit test execution, or a distributed cluster having tens/hundreds + * of nodes during execution of integration tests. *

          * HBaseCluster exposes client-side public interfaces to tests, so that tests does not assume - * running in a particular mode. Not all the tests are suitable to be run on an actual cluster, - * and some tests will still need to mock stuff and introspect internal state. For those use - * cases from unit tests, or if more control is needed, you can use the subclasses directly. - * In that sense, this class does not abstract away every interface that - * MiniHBaseCluster or DistributedHBaseCluster provide. + * running in a particular mode. Not all the tests are suitable to be run on an actual cluster, and + * some tests will still need to mock stuff and introspect internal state. For those use cases from + * unit tests, or if more control is needed, you can use the subclasses directly. In that sense, + * this class does not abstract away every interface that MiniHBaseCluster or + * DistributedHBaseCluster provide. * @deprecated since 3.0.0, will be removed in 4.0.0. Use * {@link org.apache.hadoop.hbase.testing.TestingHBaseCluster} instead. */ @@ -89,24 +88,23 @@ public Configuration getConf() { public abstract ClusterMetrics getClusterMetrics() throws IOException; /** - * Returns a ClusterStatus for this HBase cluster as observed at the - * starting of the HBaseCluster + * Returns a ClusterStatus for this HBase cluster as observed at the starting of the HBaseCluster */ public ClusterMetrics getInitialClusterMetrics() throws IOException { return initialClusterStatus; } /** - * Starts a new region server on the given hostname or if this is a mini/local cluster, - * starts a region server locally. + * Starts a new region server on the given hostname or if this is a mini/local cluster, starts a + * region server locally. * @param hostname the hostname to start the regionserver on * @throws IOException if something goes wrong */ public abstract void startRegionServer(String hostname, int port) throws IOException; /** - * Kills the region server process if this is a distributed cluster, otherwise - * this causes the region server to exit doing basic clean up only. + * Kills the region server process if this is a distributed cluster, otherwise this causes the + * region server to exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killRegionServer(ServerName serverName) throws IOException; @@ -114,9 +112,9 @@ public ClusterMetrics getInitialClusterMetrics() throws IOException { /** * Keeping track of killed servers and being able to check if a particular server was killed makes * it possible to do fault tolerance testing for dead servers in a deterministic way. A concrete - * example of such case is - killing servers and waiting for all regions of a particular table - * to be assigned. We can check for server column in META table and that its value is not one - * of the killed servers. + * example of such case is - killing servers and waiting for all regions of a particular table to + * be assigned. We can check for server column in META table and that its value is not one of the + * killed servers. */ public abstract boolean isKilledRS(ServerName serverName); @@ -141,8 +139,8 @@ public void waitForRegionServerToStart(String hostname, int port, long timeout) } Threads.sleep(100); } - throw new IOException("did timeout " + timeout + "ms waiting for region server to start: " - + hostname); + throw new IOException( + "did timeout " + timeout + "ms waiting for region server to start: " + hostname); } /** @@ -167,23 +165,23 @@ public abstract void waitForRegionServerToStop(ServerName serverName, long timeo public abstract void resumeRegionServer(ServerName serverName) throws IOException; /** - * Starts a new zookeeper node on the given hostname or if this is a mini/local cluster, - * silently logs warning message. + * Starts a new zookeeper node on the given hostname or if this is a mini/local cluster, silently + * logs warning message. * @param hostname the hostname to start the regionserver on * @throws IOException if something goes wrong */ public abstract void startZkNode(String hostname, int port) throws IOException; /** - * Kills the zookeeper node process if this is a distributed cluster, otherwise, - * this causes master to exit doing basic clean up only. + * Kills the zookeeper node process if this is a distributed cluster, otherwise, this causes + * master to exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killZkNode(ServerName serverName) throws IOException; /** - * Stops the region zookeeper if this is a distributed cluster, otherwise - * silently logs warning message. + * Stops the region zookeeper if this is a distributed cluster, otherwise silently logs warning + * message. * @throws IOException if something goes wrong */ public abstract void stopZkNode(ServerName serverName) throws IOException; @@ -192,33 +190,30 @@ public abstract void waitForRegionServerToStop(ServerName serverName, long timeo * Wait for the specified zookeeper node to join the cluster * @throws IOException if something goes wrong or timeout occurs */ - public abstract void waitForZkNodeToStart(ServerName serverName, long timeout) - throws IOException; + public abstract void waitForZkNodeToStart(ServerName serverName, long timeout) throws IOException; /** * Wait for the specified zookeeper node to stop the thread / process. * @throws IOException if something goes wrong or timeout occurs */ - public abstract void waitForZkNodeToStop(ServerName serverName, long timeout) - throws IOException; + public abstract void waitForZkNodeToStop(ServerName serverName, long timeout) throws IOException; /** - * Starts a new datanode on the given hostname or if this is a mini/local cluster, - * silently logs warning message. + * Starts a new datanode on the given hostname or if this is a mini/local cluster, silently logs + * warning message. * @throws IOException if something goes wrong */ public abstract void startDataNode(ServerName serverName) throws IOException; /** - * Kills the datanode process if this is a distributed cluster, otherwise, - * this causes master to exit doing basic clean up only. + * Kills the datanode process if this is a distributed cluster, otherwise, this causes master to + * exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killDataNode(ServerName serverName) throws IOException; /** - * Stops the datanode if this is a distributed cluster, otherwise - * silently logs warning message. + * Stops the datanode if this is a distributed cluster, otherwise silently logs warning message. * @throws IOException if something goes wrong */ public abstract void stopDataNode(ServerName serverName) throws IOException; @@ -228,14 +223,14 @@ public abstract void waitForZkNodeToStop(ServerName serverName, long timeout) * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForDataNodeToStart(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** * Wait for the specified datanode to stop the thread / process. * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForDataNodeToStop(ServerName serverName, long timeout) - throws IOException; + throws IOException; /** * Starts a new namenode on the given hostname or if this is a mini/local cluster, silently logs @@ -272,16 +267,16 @@ public abstract void waitForNameNodeToStop(ServerName serverName, long timeout) throws IOException; /** - * Starts a new master on the given hostname or if this is a mini/local cluster, - * starts a master locally. + * Starts a new master on the given hostname or if this is a mini/local cluster, starts a master + * locally. * @param hostname the hostname to start the master on * @throws IOException if something goes wrong */ public abstract void startMaster(String hostname, int port) throws IOException; /** - * Kills the master process if this is a distributed cluster, otherwise, - * this causes master to exit doing basic clean up only. + * Kills the master process if this is a distributed cluster, otherwise, this causes master to + * exit doing basic clean up only. * @throws IOException if something goes wrong */ public abstract void killMaster(ServerName serverName) throws IOException; @@ -296,31 +291,23 @@ public abstract void waitForNameNodeToStop(ServerName serverName, long timeout) * Wait for the specified master to stop the thread / process. * @throws IOException if something goes wrong or timeout occurs */ - public abstract void waitForMasterToStop(ServerName serverName, long timeout) - throws IOException; + public abstract void waitForMasterToStop(ServerName serverName, long timeout) throws IOException; /** - * Blocks until there is an active master and that master has completed - * initialization. - * - * @return true if an active master becomes available. false if there are no - * masters left. + * Blocks until there is an active master and that master has completed initialization. + * @return true if an active master becomes available. false if there are no masters left. * @throws IOException if something goes wrong or timeout occurs */ - public boolean waitForActiveAndReadyMaster() - throws IOException { + public boolean waitForActiveAndReadyMaster() throws IOException { return waitForActiveAndReadyMaster(Long.MAX_VALUE); } /** - * Blocks until there is an active master and that master has completed - * initialization. + * Blocks until there is an active master and that master has completed initialization. * @param timeout the timeout limit in ms - * @return true if an active master becomes available. false if there are no - * masters left. + * @return true if an active master becomes available. false if there are no masters left. */ - public abstract boolean waitForActiveAndReadyMaster(long timeout) - throws IOException; + public abstract boolean waitForActiveAndReadyMaster(long timeout) throws IOException; /** * Wait for HBase Cluster to shut down. @@ -333,10 +320,9 @@ public abstract boolean waitForActiveAndReadyMaster(long timeout) public abstract void shutdown() throws IOException; /** - * Restores the cluster to it's initial state if this is a real cluster, - * otherwise does nothing. - * This is a best effort restore. If the servers are not reachable, or insufficient - * permissions, etc. restoration might be partial. + * Restores the cluster to it's initial state if this is a real cluster, otherwise does nothing. + * This is a best effort restore. If the servers are not reachable, or insufficient permissions, + * etc. restoration might be partial. * @return whether restoration is complete */ public boolean restoreInitialStatus() throws IOException { @@ -344,10 +330,9 @@ public boolean restoreInitialStatus() throws IOException { } /** - * Restores the cluster to given state if this is a real cluster, - * otherwise does nothing. - * This is a best effort restore. If the servers are not reachable, or insufficient - * permissions, etc. restoration might be partial. + * Restores the cluster to given state if this is a real cluster, otherwise does nothing. This is + * a best effort restore. If the servers are not reachable, or insufficient permissions, etc. + * restoration might be partial. * @return whether restoration is complete */ public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOException { @@ -372,16 +357,16 @@ public abstract ServerName getServerHoldingRegion(final TableName tn, byte[] reg throws IOException; /** - * @return whether we are interacting with a distributed cluster as opposed to an - * in-process mini/local cluster. + * @return whether we are interacting with a distributed cluster as opposed to an in-process + * mini/local cluster. */ public boolean isDistributedCluster() { return false; } /** - * Closes all the resources held open for this cluster. Note that this call does not shutdown - * the cluster. + * Closes all the resources held open for this cluster. Note that this call does not shutdown the + * cluster. * @see #shutdown() */ @Override @@ -389,7 +374,6 @@ public boolean isDistributedCluster() { /** * Wait for the namenode. - * * @throws InterruptedException */ public void waitForNamenodeAvailable() throws InterruptedException { diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java index 1a5864959df2..f5c2441378e5 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java @@ -49,27 +49,20 @@ public class HBaseCommonTestingUtility { /** * Compression algorithms to use in parameterized JUnit 4 tests */ - public static final List COMPRESSION_ALGORITHMS_PARAMETERIZED = - Arrays.asList(new Object[][] { - { Compression.Algorithm.NONE }, - { Compression.Algorithm.GZ } - }); + public static final List COMPRESSION_ALGORITHMS_PARAMETERIZED = Arrays + .asList(new Object[][] { { Compression.Algorithm.NONE }, { Compression.Algorithm.GZ } }); /** * This is for unit tests parameterized with a two booleans. */ public static final List BOOLEAN_PARAMETERIZED = - Arrays.asList(new Object[][] { - {false}, - {true} - }); + Arrays.asList(new Object[][] { { false }, { true } }); /** * Compression algorithms to use in testing */ - public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS = { - Compression.Algorithm.NONE, Compression.Algorithm.GZ - }; + public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS = + { Compression.Algorithm.NONE, Compression.Algorithm.GZ }; protected final Configuration conf; @@ -83,7 +76,6 @@ public HBaseCommonTestingUtility(Configuration conf) { /** * Returns this classes's instance of {@link Configuration}. - * * @return Instance of Configuration. */ public Configuration getConfiguration() { @@ -93,8 +85,7 @@ public Configuration getConfiguration() { /** * System property key to get base test directory value */ - public static final String BASE_TEST_DIRECTORY_KEY = - "test.build.data.basedirectory"; + public static final String BASE_TEST_DIRECTORY_KEY = "test.build.data.basedirectory"; /** * Default base directory for test output. @@ -108,7 +99,7 @@ public Configuration getConfiguration() { /** * @return Where to write test data on local filesystem, specific to the test. Useful for tests - * that do not use a cluster. Creates it if it does not exist already. + * that do not use a cluster. Creates it if it does not exist already. */ public Path getDataTestDir() { if (this.dataTestDir == null) { @@ -119,8 +110,8 @@ public Path getDataTestDir() { /** * @param name the name of a subdirectory or file in the test data directory - * @return Path to a subdirectory or file named {code subdirName} under - * {@link #getDataTestDir()}. Does *NOT* create the directory or file if it does not exist. + * @return Path to a subdirectory or file named {code subdirName} under {@link #getDataTestDir()}. + * Does *NOT* create the directory or file if it does not exist. */ public Path getDataTestDir(final String name) { return new Path(getDataTestDir(), name); @@ -128,13 +119,11 @@ public Path getDataTestDir(final String name) { /** * Sets up a directory for a test to use. - * * @return New directory path, if created. */ protected Path setupDataTestDir() { if (this.dataTestDir != null) { - LOG.warn("Data test dir already setup in " + - dataTestDir.getAbsolutePath()); + LOG.warn("Data test dir already setup in " + dataTestDir.getAbsolutePath()); return null; } Path testPath = getRandomDir(); @@ -160,8 +149,7 @@ public Path getRandomDir() { } public static UUID getRandomUUID() { - return new UUID(ThreadLocalRandom.current().nextLong(), - ThreadLocalRandom.current().nextLong()); + return new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); } protected void createSubDir(String propertyName, Path parent, String subDirName) { @@ -208,14 +196,12 @@ public boolean cleanupTestDir(final String subdir) { /** * @return Where to write test data on local filesystem; usually - * {@link #DEFAULT_BASE_TEST_DIRECTORY} - * Should not be used by the unit tests, hence its's private. - * Unit test will use a subdirectory of this directory. + * {@link #DEFAULT_BASE_TEST_DIRECTORY} Should not be used by the unit tests, hence its's + * private. Unit test will use a subdirectory of this directory. * @see #setupDataTestDir() */ private Path getBaseTestDir() { - String PathName = System.getProperty( - BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY); + String PathName = System.getProperty(BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY); return new Path(PathName); } @@ -250,8 +236,7 @@ boolean deleteDir(final File dir) { /** * Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}. */ - public long waitFor(long timeout, Predicate predicate) - throws E { + public long waitFor(long timeout, Predicate predicate) throws E { return Waiter.waitFor(this.conf, timeout, predicate); } @@ -266,8 +251,8 @@ public long waitFor(long timeout, long interval, Predicate /** * Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}. */ - public long waitFor(long timeout, long interval, - boolean failIfTimeout, Predicate predicate) throws E { + public long waitFor(long timeout, long interval, boolean failIfTimeout, + Predicate predicate) throws E { return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate); } @@ -327,12 +312,12 @@ public int randomFreePort() { } /** - * Returns a random port. These ports cannot be registered with IANA and are - * intended for dynamic allocation (see http://bit.ly/dynports). + * Returns a random port. These ports cannot be registered with IANA and are intended for + * dynamic allocation (see http://bit.ly/dynports). */ private int randomPort() { return MIN_RANDOM_PORT - + ThreadLocalRandom.current().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT); + + ThreadLocalRandom.current().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT); } interface AvailablePortChecker { diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 345c2d6d4053..5fe10812a9bd 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -150,20 +150,17 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * Facility for testing HBase. Replacement for - * old HBaseTestCase and HBaseClusterTestCase functionality. - * Create an instance and keep it around testing HBase. This class is - * meant to be your one-stop shop for anything you might need testing. Manages - * one cluster at a time only. Managed cluster can be an in-process - * {@link MiniHBaseCluster}, or a deployed cluster of type {@code DistributedHBaseCluster}. - * Not all methods work with the real cluster. - * Depends on log4j being on classpath and - * hbase-site.xml for logging and test-run configuration. It does not set - * logging levels. - * In the configuration properties, default values for master-info-port and - * region-server-port are overridden such that a random port will be assigned (thus - * avoiding port contention if another local HBase instance is already running). - *

          To preserve test data directories, pass the system property "hbase.testing.preserve.testdir" + * Facility for testing HBase. Replacement for old HBaseTestCase and HBaseClusterTestCase + * functionality. Create an instance and keep it around testing HBase. This class is meant to be + * your one-stop shop for anything you might need testing. Manages one cluster at a time only. + * Managed cluster can be an in-process {@link MiniHBaseCluster}, or a deployed cluster of type + * {@code DistributedHBaseCluster}. Not all methods work with the real cluster. Depends on log4j + * being on classpath and hbase-site.xml for logging and test-run configuration. It does not set + * logging levels. In the configuration properties, default values for master-info-port and + * region-server-port are overridden such that a random port will be assigned (thus avoiding port + * contention if another local HBase instance is already running). + *

          + * To preserve test data directories, pass the system property "hbase.testing.preserve.testdir" * setting it to true. * @deprecated since 3.0.0, will be removed in 4.0.0. Use * {@link org.apache.hadoop.hbase.testing.TestingHBaseCluster} instead. @@ -184,12 +181,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server"; /** - * The default number of regions per regionserver when creating a pre-split - * table. + * The default number of regions per regionserver when creating a pre-split table. */ public static final int DEFAULT_REGIONS_PER_SERVER = 3; - public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table"; public static final boolean PRESPLIT_TEST_TABLE = true; @@ -203,8 +198,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { private String hadoopLogDir; - /** Directory on test filesystem where we put the data for this instance of - * HBaseTestingUtility*/ + /** + * Directory on test filesystem where we put the data for this instance of HBaseTestingUtility + */ private Path dataTestDirOnTestFS = null; private final AtomicReference asyncConnection = new AtomicReference<>(); @@ -217,7 +213,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Checks to see if a specific port is available. - * * @param port the port number to check for availability * @return true if the port is available, or false if not */ @@ -250,13 +245,11 @@ public static boolean available(int port) { } /** - * Create all combinations of Bloom filters and compression algorithms for - * testing. + * Create all combinations of Bloom filters and compression algorithms for testing. */ private static List bloomAndCompressionCombinations() { List configurations = new ArrayList<>(); - for (Compression.Algorithm comprAlgo : - HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) { + for (Compression.Algorithm comprAlgo : HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) { for (BloomType bloomType : BloomType.values()) { configurations.add(new Object[] { comprAlgo, bloomType }); } @@ -292,25 +285,25 @@ public static List memStoreTSTagsAndOffheapCombination() { public static final Collection BLOOM_AND_COMPRESSION_COMBINATIONS = bloomAndCompressionCombinations(); - /** - *

          Create an HBaseTestingUtility using a default configuration. - * - *

          Initially, all tmp files are written to a local test data directory. - * Once {@link #startMiniDFSCluster} is called, either directly or via - * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead. + *

          + * Create an HBaseTestingUtility using a default configuration. + *

          + * Initially, all tmp files are written to a local test data directory. Once + * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp + * data will be written to the DFS directory instead. */ public HBaseTestingUtility() { this(HBaseConfiguration.create()); } /** - *

          Create an HBaseTestingUtility using a given configuration. - * - *

          Initially, all tmp files are written to a local test data directory. - * Once {@link #startMiniDFSCluster} is called, either directly or via - * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead. - * + *

          + * Create an HBaseTestingUtility using a given configuration. + *

          + * Initially, all tmp files are written to a local test data directory. Once + * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp + * data will be written to the DFS directory instead. * @param conf The configuration to use for further operations */ public HBaseTestingUtility(Configuration conf) { @@ -329,21 +322,21 @@ public HBaseTestingUtility(Configuration conf) { // Every cluster is a local cluster until we start DFS // Note that conf could be null, but this.conf will not be String dataTestDir = getDataTestDir().toString(); - this.conf.set("fs.defaultFS","file:///"); + this.conf.set("fs.defaultFS", "file:///"); this.conf.set(HConstants.HBASE_DIR, "file://" + dataTestDir); LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir); - this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,false); + this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, false); // If the value for random ports isn't set set it to true, thus making // tests opt-out for random port assignment this.conf.setBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, - this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true)); + this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true)); } /** * Close both the region {@code r} and it's underlying WAL. For use in tests. */ public static void closeRegionAndWAL(final Region r) throws IOException { - closeRegionAndWAL((HRegion)r); + closeRegionAndWAL((HRegion) r); } /** @@ -357,13 +350,11 @@ public static void closeRegionAndWAL(final HRegion r) throws IOException { } /** - * Returns this classes's instance of {@link Configuration}. Be careful how - * you use the returned Configuration since {@link Connection} instances - * can be shared. The Map of Connections is keyed by the Configuration. If - * say, a Connection was being used against a cluster that had been shutdown, - * see {@link #shutdownMiniCluster()}, then the Connection will no longer - * be wholesome. Rather than use the return direct, its usually best to - * make a copy and use that. Do + * Returns this classes's instance of {@link Configuration}. Be careful how you use the returned + * Configuration since {@link Connection} instances can be shared. The Map of Connections is keyed + * by the Configuration. If say, a Connection was being used against a cluster that had been + * shutdown, see {@link #shutdownMiniCluster()}, then the Connection will no longer be wholesome. + * Rather than use the return direct, its usually best to make a copy and use that. Do * Configuration c = new Configuration(INSTANCE.getConfiguration()); * @return Instance of Configuration. */ @@ -377,19 +368,14 @@ public void setHBaseCluster(HBaseCluster hbaseCluster) { } /** - * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. - * Give it a random name so can have many concurrent tests running if - * we need to. It needs to amend the {@link #TEST_DIRECTORY_KEY} - * System property, as it's what minidfscluster bases - * it data dir on. Moding a System property is not the way to do concurrent - * instances -- another instance could grab the temporary - * value unintentionally -- but not anything can do about it at moment; - * single instance only is how the minidfscluster works. - * - * We also create the underlying directory names for - * hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values - * in the conf, and as a system property for hadoop.tmp.dir (We do not create them!). - * + * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. Give it a random name so can + * have many concurrent tests running if we need to. It needs to amend the + * {@link #TEST_DIRECTORY_KEY} System property, as it's what minidfscluster bases it data dir on. + * Moding a System property is not the way to do concurrent instances -- another instance could + * grab the temporary value unintentionally -- but not anything can do about it at moment; single + * instance only is how the minidfscluster works. We also create the underlying directory names + * for hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values in the + * conf, and as a system property for hadoop.tmp.dir (We do not create them!). * @return The calculated data test build directory, if newly-created. */ @Override @@ -399,40 +385,31 @@ protected Path setupDataTestDir() { return null; } - createSubDirAndSystemProperty( - "hadoop.log.dir", - testPath, "hadoop-log-dir"); + createSubDirAndSystemProperty("hadoop.log.dir", testPath, "hadoop-log-dir"); // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but - // we want our own value to ensure uniqueness on the same machine - createSubDirAndSystemProperty( - "hadoop.tmp.dir", - testPath, "hadoop-tmp-dir"); + // we want our own value to ensure uniqueness on the same machine + createSubDirAndSystemProperty("hadoop.tmp.dir", testPath, "hadoop-tmp-dir"); // Read and modified in org.apache.hadoop.mapred.MiniMRCluster - createSubDir( - "mapreduce.cluster.local.dir", - testPath, "mapred-local-dir"); + createSubDir("mapreduce.cluster.local.dir", testPath, "mapred-local-dir"); return testPath; } - private void createSubDirAndSystemProperty( - String propertyName, Path parent, String subDirName){ + private void createSubDirAndSystemProperty(String propertyName, Path parent, String subDirName) { String sysValue = System.getProperty(propertyName); if (sysValue != null) { // There is already a value set. So we do nothing but hope - // that there will be no conflicts - LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+ - sysValue + " so I do NOT create it in " + parent); + // that there will be no conflicts + LOG.info("System.getProperty(\"" + propertyName + "\") already set to: " + sysValue + + " so I do NOT create it in " + parent); String confValue = conf.get(propertyName); - if (confValue != null && !confValue.endsWith(sysValue)){ - LOG.warn( - propertyName + " property value differs in configuration and system: "+ - "Configuration="+confValue+" while System="+sysValue+ - " Erasing configuration value by system value." - ); + if (confValue != null && !confValue.endsWith(sysValue)) { + LOG.warn(propertyName + " property value differs in configuration and system: " + + "Configuration=" + confValue + " while System=" + sysValue + + " Erasing configuration value by system value."); } conf.set(propertyName, sysValue); } else { @@ -443,8 +420,8 @@ private void createSubDirAndSystemProperty( } /** - * @return Where to write test data on the test filesystem; Returns working directory - * for the test filesystem by default + * @return Where to write test data on the test filesystem; Returns working directory for the test + * filesystem by default * @see #setupDataTestDirOnTestFS() * @see #getTestFileSystem() */ @@ -454,9 +431,9 @@ private Path getBaseTestDirOnTestFS() throws IOException { } /** - * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} - * to write temporary test data. Call this method after setting up the mini dfs cluster - * if the test relies on it. + * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write + * temporary test data. Call this method after setting up the mini dfs cluster if the test relies + * on it. * @return a unique path in the test filesystem */ public Path getDataTestDirOnTestFS() throws IOException { @@ -468,9 +445,9 @@ public Path getDataTestDirOnTestFS() throws IOException { } /** - * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} - * to write temporary test data. Call this method after setting up the mini dfs cluster - * if the test relies on it. + * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write + * temporary test data. Call this method after setting up the mini dfs cluster if the test relies + * on it. * @return a unique path in the test filesystem * @param subdirName name of the subdir to create under the base test dir */ @@ -479,13 +456,12 @@ public Path getDataTestDirOnTestFS(final String subdirName) throws IOException { } /** - * Sets up a path in test filesystem to be used by tests. - * Creates a new directory if not already setup. + * Sets up a path in test filesystem to be used by tests. Creates a new directory if not already + * setup. */ private void setupDataTestDirOnTestFS() throws IOException { if (dataTestDirOnTestFS != null) { - LOG.warn("Data test on test fs dir already setup in " - + dataTestDirOnTestFS.toString()); + LOG.warn("Data test on test fs dir already setup in " + dataTestDirOnTestFS.toString()); return; } dataTestDirOnTestFS = getNewDataTestDirOnTestFS(); @@ -495,10 +471,10 @@ private void setupDataTestDirOnTestFS() throws IOException { * Sets up a new path in test filesystem to be used by tests. */ private Path getNewDataTestDirOnTestFS() throws IOException { - //The file system can be either local, mini dfs, or if the configuration - //is supplied externally, it can be an external cluster FS. If it is a local - //file system, the tests should use getBaseTestDir, otherwise, we can use - //the working directory, and create a unique sub dir there + // The file system can be either local, mini dfs, or if the configuration + // is supplied externally, it can be an external cluster FS. If it is a local + // file system, the tests should use getBaseTestDir, otherwise, we can use + // the working directory, and create a unique sub dir there FileSystem fs = getTestFileSystem(); Path newDataTestDir; String randomStr = getRandomUUID().toString(); @@ -521,8 +497,7 @@ private Path getNewDataTestDirOnTestFS() throws IOException { */ public boolean cleanupDataTestDirOnTestFS() throws IOException { boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true); - if (ret) - dataTestDirOnTestFS = null; + if (ret) dataTestDirOnTestFS = null; return ret; } @@ -548,19 +523,16 @@ public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception { } /** - * Start a minidfscluster. - * This is useful if you want to run datanode on distinct hosts for things - * like HDFS block location verification. - * If you start MiniDFSCluster without host names, all instances of the - * datanodes will have the same host name. + * Start a minidfscluster. This is useful if you want to run datanode on distinct hosts for things + * like HDFS block location verification. If you start MiniDFSCluster without host names, all + * instances of the datanodes will have the same host name. * @param hosts hostnames DNs to run on. * @throws Exception * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ - public MiniDFSCluster startMiniDFSCluster(final String hosts[]) - throws Exception { - if ( hosts != null && hosts.length != 0) { + public MiniDFSCluster startMiniDFSCluster(final String hosts[]) throws Exception { + if (hosts != null && hosts.length != 0) { return startMiniDFSCluster(hosts.length, hosts); } else { return startMiniDFSCluster(1, null); @@ -568,21 +540,19 @@ public MiniDFSCluster startMiniDFSCluster(final String hosts[]) } /** - * Start a minidfscluster. - * Can only create one. + * Start a minidfscluster. Can only create one. * @param servers How many DNs to start. * @param hosts hostnames DNs to run on. * @throws Exception * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ - public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[]) - throws Exception { + public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[]) throws Exception { return startMiniDFSCluster(servers, null, hosts); } private void setFs() throws IOException { - if(this.dfsCluster == null){ + if (this.dfsCluster == null) { LOG.info("Skipping setting fs because dfsCluster is null"); return; } @@ -593,7 +563,7 @@ private void setFs() throws IOException { conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE); } - public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[]) + public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[]) throws Exception { createDirsAndSetProperties(); EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); @@ -603,8 +573,8 @@ public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], St Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(), "ERROR"); - this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true, - true, null, racks, hosts, null); + this.dfsCluster = + new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null); // Set this just-started cluster as our filesystem. setFs(); @@ -612,7 +582,7 @@ public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], St // Wait for the cluster to be totally up this.dfsCluster.waitClusterUp(); - //reset the test directory for test file system + // reset the test directory for test file system dataTestDirOnTestFS = null; String dataTestDir = getDataTestDir().toString(); conf.set(HConstants.HBASE_DIR, dataTestDir); @@ -627,8 +597,8 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR"); Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(), "ERROR"); - dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, - null, null, null); + dfsCluster = + new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null); return dfsCluster; } @@ -636,6 +606,7 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE * This is used before starting HDFS and map-reduce mini-clusters Run something like the below to * check for the likes of '/tmp' references -- i.e. references outside of the test data dir -- in * the conf. + * *

              * Configuration conf = TEST_UTIL.getConfiguration();
              * for (Iterator<Map.Entry<String, String>> i = conf.iterator(); i.hasNext();) {
          @@ -684,35 +655,35 @@ private void createDirsAndSetProperties() throws IOException {
             }
           
             /**
          -   *  Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating
          -   *  new column families. Default to false.
          +   * Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating new column families.
          +   * Default to false.
              */
          -  public boolean isNewVersionBehaviorEnabled(){
          +  public boolean isNewVersionBehaviorEnabled() {
               final String propName = "hbase.tests.new.version.behavior";
               String v = System.getProperty(propName);
          -    if (v != null){
          +    if (v != null) {
                 return Boolean.parseBoolean(v);
               }
               return false;
             }
           
             /**
          -   *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
          -   *  This allows to specify this parameter on the command line.
          -   *   If not set, default is true.
          +   * Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property. This
          +   * allows to specify this parameter on the command line. If not set, default is true.
              */
          -  public boolean isReadShortCircuitOn(){
          +  public boolean isReadShortCircuitOn() {
               final String propName = "hbase.tests.use.shortcircuit.reads";
               String readOnProp = System.getProperty(propName);
          -    if (readOnProp != null){
          -      return  Boolean.parseBoolean(readOnProp);
          +    if (readOnProp != null) {
          +      return Boolean.parseBoolean(readOnProp);
               } else {
                 return conf.getBoolean(propName, false);
               }
             }
           
          -  /** Enable the short circuit read, unless configured differently.
          -   * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
          +  /**
          +   * Enable the short circuit read, unless configured differently. Set both HBase and HDFS settings,
          +   * including skipping the hdfs checksum checks.
              */
             private void enableShortCircuit() {
               if (isReadShortCircuitOn()) {
          @@ -743,8 +714,7 @@ private String createDirAndSetProperty(final String relPath, String property) {
             }
           
             /**
          -   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
          -   * or does nothing.
          +   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)} or does nothing.
              * @throws IOException
              */
             public void shutdownMiniDFSCluster() throws IOException {
          @@ -764,236 +734,225 @@ public void shutdownMiniDFSCluster() throws IOException {
              * @return The mini HBase cluster created.
              * @see #shutdownMiniCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
              * @see #startMiniCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniCluster(boolean createWALDir) throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .createWALDir(createWALDir).build();
          +    StartMiniClusterOption option =
          +        StartMiniClusterOption.builder().createWALDir(createWALDir).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs, and zookeeper.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
          +   * defined in {@link StartMiniClusterOption.Builder}.
              * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
              * @param createRootDir Whether to create a new root or data directory path.
              * @return The mini HBase cluster created.
              * @see #shutdownMiniCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
              * @see #startMiniCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
          -  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir)
          -  throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir).build();
          +  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir) throws Exception {
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(numSlaves)
          +        .numDataNodes(numSlaves).createRootDir(createRootDir).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs, and zookeeper.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
          +   * defined in {@link StartMiniClusterOption.Builder}.
              * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
              * @param createRootDir Whether to create a new root or data directory path.
              * @param createWALDir Whether to create a new WAL directory.
              * @return The mini HBase cluster created.
              * @see #shutdownMiniCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
              * @see #startMiniCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir,
                 boolean createWALDir) throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numRegionServers(numSlaves).numDataNodes(numSlaves).createRootDir(createRootDir)
          -        .createWALDir(createWALDir).build();
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(numSlaves)
          +        .numDataNodes(numSlaves).createRootDir(createRootDir).createWALDir(createWALDir).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs, and zookeeper.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
          +   * defined in {@link StartMiniClusterOption.Builder}.
              * @param numMasters Master node number.
              * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
              * @param createRootDir Whether to create a new root or data directory path.
              * @return The mini HBase cluster created.
              * @see #shutdownMiniCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *  {@link #startMiniCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
              * @see #startMiniCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, boolean createRootDir)
          -    throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
          -        .numDataNodes(numSlaves).build();
          +      throws Exception {
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
          +        .numRegionServers(numSlaves).createRootDir(createRootDir).numDataNodes(numSlaves).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs, and zookeeper.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
          +   * defined in {@link StartMiniClusterOption.Builder}.
              * @param numMasters Master node number.
              * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
              * @return The mini HBase cluster created.
              * @see #shutdownMiniCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
              * @see #startMiniCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves) throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numMasters(numMasters).numRegionServers(numSlaves).numDataNodes(numSlaves).build();
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
          +        .numRegionServers(numSlaves).numDataNodes(numSlaves).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs, and zookeeper.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
          +   * defined in {@link StartMiniClusterOption.Builder}.
              * @param numMasters Master node number.
              * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
              * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
          -   *                      HDFS data node number.
          +   *          HDFS data node number.
              * @param createRootDir Whether to create a new root or data directory path.
              * @return The mini HBase cluster created.
              * @see #shutdownMiniCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
              * @see #startMiniCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
                 boolean createRootDir) throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numMasters(numMasters).numRegionServers(numSlaves).createRootDir(createRootDir)
          -        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
          +        .numRegionServers(numSlaves).createRootDir(createRootDir).numDataNodes(numSlaves)
          +        .dataNodeHosts(dataNodeHosts).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs, and zookeeper.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
          +   * defined in {@link StartMiniClusterOption.Builder}.
              * @param numMasters Master node number.
              * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
              * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
          -   *                      HDFS data node number.
          +   *          HDFS data node number.
              * @return The mini HBase cluster created.
              * @see #shutdownMiniCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
              * @see #startMiniCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts)
                 throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numMasters(numMasters).numRegionServers(numSlaves)
          -        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
          +        .numRegionServers(numSlaves).numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs, and zookeeper.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
          +   * defined in {@link StartMiniClusterOption.Builder}.
              * @param numMasters Master node number.
              * @param numRegionServers Number of region servers.
              * @param numDataNodes Number of datanodes.
              * @return The mini HBase cluster created.
              * @see #shutdownMiniCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
              * @see #startMiniCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes)
                 throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numMasters(numMasters).numRegionServers(numRegionServers).numDataNodes(numDataNodes)
          -        .build();
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
          +        .numRegionServers(numRegionServers).numDataNodes(numDataNodes).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs, and zookeeper.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
          +   * defined in {@link StartMiniClusterOption.Builder}.
              * @param numMasters Master node number.
              * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
              * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
          -   *                      HDFS data node number.
          +   *          HDFS data node number.
              * @param masterClass The class to use as HMaster, or null for default.
              * @param rsClass The class to use as HRegionServer, or null for default.
              * @return The mini HBase cluster created.
              * @see #shutdownMiniCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
              * @see #startMiniCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
                 Class masterClass,
          -      Class rsClass)
          -      throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numMasters(numMasters).masterClass(masterClass)
          -        .numRegionServers(numSlaves).rsClass(rsClass)
          -        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts)
          -        .build();
          +      Class rsClass) throws Exception {
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
          +        .masterClass(masterClass).numRegionServers(numSlaves).rsClass(rsClass)
          +        .numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs, and zookeeper.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
          +   * defined in {@link StartMiniClusterOption.Builder}.
              * @param numMasters Master node number.
              * @param numRegionServers Number of region servers.
              * @param numDataNodes Number of datanodes.
              * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
          -   *                      HDFS data node number.
          +   *          HDFS data node number.
              * @param masterClass The class to use as HMaster, or null for default.
              * @param rsClass The class to use as HRegionServer, or null for default.
              * @return The mini HBase cluster created.
              * @see #shutdownMiniCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
              * @see #startMiniCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
                 String[] dataNodeHosts, Class masterClass,
          -      Class rsClass)
          -    throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numMasters(numMasters).masterClass(masterClass)
          -        .numRegionServers(numRegionServers).rsClass(rsClass)
          -        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
          -        .build();
          +      Class rsClass) throws Exception {
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
          +        .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass)
          +        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs, and zookeeper.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
          +   * defined in {@link StartMiniClusterOption.Builder}.
              * @param numMasters Master node number.
              * @param numRegionServers Number of region servers.
              * @param numDataNodes Number of datanodes.
              * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
          -   *                      HDFS data node number.
          +   *          HDFS data node number.
              * @param masterClass The class to use as HMaster, or null for default.
              * @param rsClass The class to use as HRegionServer, or null for default.
              * @param createRootDir Whether to create a new root or data directory path.
          @@ -1001,7 +960,7 @@ public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, i
              * @return The mini HBase cluster created.
              * @see #shutdownMiniCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
              * @see #startMiniCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
          @@ -1010,31 +969,29 @@ public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, i
                 String[] dataNodeHosts, Class masterClass,
                 Class rsClass, boolean createRootDir,
                 boolean createWALDir) throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numMasters(numMasters).masterClass(masterClass)
          -        .numRegionServers(numRegionServers).rsClass(rsClass)
          -        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
          -        .createRootDir(createRootDir).createWALDir(createWALDir)
          -        .build();
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
          +        .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass)
          +        .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts).createRootDir(createRootDir)
          +        .createWALDir(createWALDir).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number. All
          +   * other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
              * @param numSlaves slave node number, for both HBase region server and HDFS data node.
              * @see #startMiniCluster(StartMiniClusterOption option)
              * @see #shutdownMiniDFSCluster()
              */
             public MiniHBaseCluster startMiniCluster(int numSlaves) throws Exception {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numRegionServers(numSlaves).numDataNodes(numSlaves).build();
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(numSlaves)
          +        .numDataNodes(numSlaves).build();
               return startMiniCluster(option);
             }
           
             /**
          -   * Start up a minicluster of hbase, dfs and zookeeper all using default options.
          -   * Option default value can be found in {@link StartMiniClusterOption.Builder}.
          +   * Start up a minicluster of hbase, dfs and zookeeper all using default options. Option default
          +   * value can be found in {@link StartMiniClusterOption.Builder}.
              * @see #startMiniCluster(StartMiniClusterOption option)
              * @see #shutdownMiniDFSCluster()
              */
          @@ -1043,9 +1000,9 @@ public MiniHBaseCluster startMiniCluster() throws Exception {
             }
           
             /**
          -   * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed.
          -   * It modifies Configuration.  It homes the cluster data directory under a random
          -   * subdirectory in a directory under System property test.build.data, to be cleaned up on exit.
          +   * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed. It modifies
          +   * Configuration. It homes the cluster data directory under a random subdirectory in a directory
          +   * under System property test.build.data, to be cleaned up on exit.
              * @see #shutdownMiniDFSCluster()
              */
             public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws Exception {
          @@ -1086,7 +1043,7 @@ public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws E
              * @see #shutdownMiniHBaseCluster()
              */
             public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option)
          -    throws IOException, InterruptedException {
          +      throws IOException, InterruptedException {
               // Now do the mini hbase cluster. Set the hbase.rootdir in config.
               createRootDir(option.isCreateRootDir());
               if (option.isCreateWALDir()) {
          @@ -1110,13 +1067,13 @@ public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option)
           
               Configuration c = new Configuration(this.conf);
               this.hbaseCluster = new MiniHBaseCluster(c, option.getNumMasters(),
          -      option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(),
          -      option.getMasterClass(), option.getRsClass());
          +        option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(),
          +        option.getMasterClass(), option.getRsClass());
               // Populate the master address configuration from mini cluster configuration.
               conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c));
               // Don't leave here till we've done a successful scan of the hbase:meta
               try (Table t = getConnection().getTable(TableName.META_TABLE_NAME);
          -      ResultScanner s = t.getScanner(new Scan())) {
          +        ResultScanner s = t.getScanner(new Scan())) {
                 for (;;) {
                   if (s.next() == null) {
                     break;
          @@ -1124,7 +1081,6 @@ public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option)
                 }
               }
           
          -
               getAdmin(); // create immediately the hbaseAdmin
               LOG.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
           
          @@ -1132,8 +1088,8 @@ public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option)
             }
           
             /**
          -   * Starts up mini hbase cluster using default options.
          -   * Default options can be found in {@link StartMiniClusterOption.Builder}.
          +   * Starts up mini hbase cluster using default options. Default options can be found in
          +   * {@link StartMiniClusterOption.Builder}.
              * @see #startMiniHBaseCluster(StartMiniClusterOption)
              * @see #shutdownMiniHBaseCluster()
              */
          @@ -1142,52 +1098,52 @@ public MiniHBaseCluster startMiniHBaseCluster() throws IOException, InterruptedE
             }
           
             /**
          -   * Starts up mini hbase cluster.
          -   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
          +   * {@link #startMiniCluster()}. All other options will use default values, defined in
          +   * {@link StartMiniClusterOption.Builder}.
              * @param numMasters Master node number.
              * @param numRegionServers Number of region servers.
              * @return The mini HBase cluster created.
              * @see #shutdownMiniHBaseCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
              * @see #startMiniHBaseCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers)
                 throws IOException, InterruptedException {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numMasters(numMasters).numRegionServers(numRegionServers).build();
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
          +        .numRegionServers(numRegionServers).build();
               return startMiniHBaseCluster(option);
             }
           
             /**
          -   * Starts up mini hbase cluster.
          -   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
          +   * {@link #startMiniCluster()}. All other options will use default values, defined in
          +   * {@link StartMiniClusterOption.Builder}.
              * @param numMasters Master node number.
              * @param numRegionServers Number of region servers.
              * @param rsPorts Ports that RegionServer should use.
              * @return The mini HBase cluster created.
              * @see #shutdownMiniHBaseCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
              * @see #startMiniHBaseCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
                 List rsPorts) throws IOException, InterruptedException {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numMasters(numMasters).numRegionServers(numRegionServers).rsPorts(rsPorts).build();
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
          +        .numRegionServers(numRegionServers).rsPorts(rsPorts).build();
               return startMiniHBaseCluster(option);
             }
           
             /**
          -   * Starts up mini hbase cluster.
          -   * Usually you won't want this.  You'll usually want {@link #startMiniCluster()}.
          -   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
          +   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
          +   * {@link #startMiniCluster()}. All other options will use default values, defined in
          +   * {@link StartMiniClusterOption.Builder}.
              * @param numMasters Master node number.
              * @param numRegionServers Number of region servers.
              * @param rsPorts Ports that RegionServer should use.
          @@ -1198,25 +1154,24 @@ public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServe
              * @return The mini HBase cluster created.
              * @see #shutdownMiniHBaseCluster()
              * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
          -   *   {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
          +   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
              * @see #startMiniHBaseCluster(StartMiniClusterOption)
              * @see HBASE-21071
              */
             @Deprecated
             public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
                 List rsPorts, Class masterClass,
          -      Class rsClass,
          -      boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException {
          -    StartMiniClusterOption option = StartMiniClusterOption.builder()
          -        .numMasters(numMasters).masterClass(masterClass)
          -        .numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts)
          -        .createRootDir(createRootDir).createWALDir(createWALDir).build();
          +      Class rsClass, boolean createRootDir,
          +      boolean createWALDir) throws IOException, InterruptedException {
          +    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
          +        .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass)
          +        .rsPorts(rsPorts).createRootDir(createRootDir).createWALDir(createWALDir).build();
               return startMiniHBaseCluster(option);
             }
           
             /**
          -   * Starts the hbase cluster up again after shutting it down previously in a
          -   * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
          +   * Starts the hbase cluster up again after shutting it down previously in a test. Use this if you
          +   * want to keep dfs/zk up and just stop/start hbase.
              * @param servers number of region servers
              */
             public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
          @@ -1234,10 +1189,9 @@ public void restartHBaseCluster(int servers, List ports)
             public void restartHBaseCluster(StartMiniClusterOption option)
                 throws IOException, InterruptedException {
               closeConnection();
          -    this.hbaseCluster =
          -        new MiniHBaseCluster(this.conf, option.getNumMasters(), option.getNumAlwaysStandByMasters(),
          -            option.getNumRegionServers(), option.getRsPorts(), option.getMasterClass(),
          -            option.getRsClass());
          +    this.hbaseCluster = new MiniHBaseCluster(this.conf, option.getNumMasters(),
          +        option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(),
          +        option.getMasterClass(), option.getRsClass());
               // Don't leave here till we've done a successful scan of the hbase:meta
               Connection conn = ConnectionFactory.createConnection(this.conf);
               Table t = conn.getTable(TableName.META_TABLE_NAME);
          @@ -1252,16 +1206,16 @@ public void restartHBaseCluster(StartMiniClusterOption option)
             }
           
             /**
          -   * @return Current mini hbase cluster. Only has something in it after a call
          -   * to {@link #startMiniCluster()}.
          +   * @return Current mini hbase cluster. Only has something in it after a call to
          +   *         {@link #startMiniCluster()}.
              * @see #startMiniCluster()
              */
             public MiniHBaseCluster getMiniHBaseCluster() {
               if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
          -      return (MiniHBaseCluster)this.hbaseCluster;
          +      return (MiniHBaseCluster) this.hbaseCluster;
               }
          -    throw new RuntimeException(hbaseCluster + " not an instance of " +
          -                               MiniHBaseCluster.class.getName());
          +    throw new RuntimeException(
          +        hbaseCluster + " not an instance of " + MiniHBaseCluster.class.getName());
             }
           
             /**
          @@ -1322,10 +1276,9 @@ private void cleanup() throws IOException {
             }
           
             /**
          -   * Returns the path to the default root dir the minicluster uses. If create
          -   * is true, a new root directory path is fetched irrespective of whether it has been fetched
          -   * before or not. If false, previous path is used.
          -   * Note: this does not cause the root dir to be created.
          +   * Returns the path to the default root dir the minicluster uses. If create is true,
          +   * a new root directory path is fetched irrespective of whether it has been fetched before or not.
          +   * If false, previous path is used. Note: this does not cause the root dir to be created.
              * @return Fully qualified path for the default hbase root dir
              * @throws IOException
              */
          @@ -1338,9 +1291,8 @@ public Path getDefaultRootDirPath(boolean create) throws IOException {
             }
           
             /**
          -   * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
          -   * except that create flag is false.
          -   * Note: this does not cause the root dir to be created.
          +   * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)} except that
          +   * create flag is false. Note: this does not cause the root dir to be created.
              * @return Fully qualified path for the default hbase root dir
              * @throws IOException
              */
          @@ -1349,14 +1301,12 @@ public Path getDefaultRootDirPath() throws IOException {
             }
           
             /**
          -   * Creates an hbase rootdir in user home directory.  Also creates hbase
          -   * version file.  Normally you won't make use of this method.  Root hbasedir
          -   * is created for you as part of mini cluster startup.  You'd only use this
          -   * method if you were doing manual operation.
          -   * @param create This flag decides whether to get a new
          -   * root or data directory path or not, if it has been fetched already.
          -   * Note : Directory will be made irrespective of whether path has been fetched or not.
          -   * If directory already exists, it will be overwritten
          +   * Creates an hbase rootdir in user home directory. Also creates hbase version file. Normally you
          +   * won't make use of this method. Root hbasedir is created for you as part of mini cluster
          +   * startup. You'd only use this method if you were doing manual operation.
          +   * @param create This flag decides whether to get a new root or data directory path or not, if it
          +   *          has been fetched already. Note : Directory will be made irrespective of whether path
          +   *          has been fetched or not. If directory already exists, it will be overwritten
              * @return Fully qualified path to hbase root dir
              * @throws IOException
              */
          @@ -1370,8 +1320,8 @@ public Path createRootDir(boolean create) throws IOException {
             }
           
             /**
          -   * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
          -   * except that create flag is false.
          +   * Same as {@link HBaseTestingUtility#createRootDir(boolean create)} except that
          +   * create flag is false.
              * @return Fully qualified path to hbase root dir
              * @throws IOException
              */
          @@ -1380,14 +1330,12 @@ public Path createRootDir() throws IOException {
             }
           
             /**
          -   * Creates a hbase walDir in the user's home directory.
          -   * Normally you won't make use of this method. Root hbaseWALDir
          -   * is created for you as part of mini cluster startup. You'd only use this
          -   * method if you were doing manual operation.
          -   *
          +   * Creates a hbase walDir in the user's home directory. Normally you won't make use of this
          +   * method. Root hbaseWALDir is created for you as part of mini cluster startup. You'd only use
          +   * this method if you were doing manual operation.
              * @return Fully qualified path to hbase root dir
              * @throws IOException
          -  */
          +   */
             public Path createWALRootDir() throws IOException {
               FileSystem fs = FileSystem.get(this.conf);
               Path walDir = getNewDataTestDirOnTestFS();
          @@ -1399,7 +1347,7 @@ public Path createWALRootDir() throws IOException {
             private void setHBaseFsTmpDir() throws IOException {
               String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
               if (hbaseFsTmpDirInString == null) {
          -      this.conf.set("hbase.fs.tmp.dir",  getDataTestDirOnTestFS("hbase-staging").toString());
          +      this.conf.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString());
                 LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
               } else {
                 LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
          @@ -1441,9 +1389,8 @@ public void compact(TableName tableName, boolean major) throws IOException {
              * @return A Table instance for the created table.
              * @throws IOException
              */
          -  public Table createTable(TableName tableName, String family)
          -  throws IOException{
          -    return createTable(tableName, new String[]{family});
          +  public Table createTable(TableName tableName, String family) throws IOException {
          +    return createTable(tableName, new String[] { family });
             }
           
             /**
          @@ -1453,8 +1400,7 @@ public Table createTable(TableName tableName, String family)
              * @return A Table instance for the created table.
              * @throws IOException
              */
          -  public Table createTable(TableName tableName, String[] families)
          -  throws IOException {
          +  public Table createTable(TableName tableName, String[] families) throws IOException {
               List fams = new ArrayList<>(families.length);
               for (String family : families) {
                 fams.add(Bytes.toBytes(family));
          @@ -1469,9 +1415,8 @@ public Table createTable(TableName tableName, String[] families)
              * @return A Table instance for the created table.
              * @throws IOException
              */
          -  public Table createTable(TableName tableName, byte[] family)
          -  throws IOException{
          -    return createTable(tableName, new byte[][]{family});
          +  public Table createTable(TableName tableName, byte[] family) throws IOException {
          +    return createTable(tableName, new byte[][] { family });
             }
           
             /**
          @@ -1499,8 +1444,7 @@ public Table createMultiRegionTable(TableName tableName, byte[] family, int numR
              * @return A Table instance for the created table.
              * @throws IOException
              */
          -  public Table createTable(TableName tableName, byte[][] families)
          -  throws IOException {
          +  public Table createTable(TableName tableName, byte[][] families) throws IOException {
               return createTable(tableName, families, (byte[][]) null);
             }
           
          @@ -1524,7 +1468,7 @@ public Table createMultiRegionTable(TableName tableName, byte[][] families) thro
              * @throws IOException
              */
             public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[][] families)
          -    throws IOException {
          +      throws IOException {
               return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE, replicaCount);
             }
           
          @@ -1557,7 +1501,7 @@ public Table createTable(TableName tableName, byte[][] families, byte[][] splitK
             }
           
             public Table createTable(TableName tableName, byte[][] families, int numVersions, byte[] startKey,
          -    byte[] endKey, int numRegions) throws IOException {
          +      byte[] endKey, int numRegions) throws IOException {
               TableDescriptor desc = createTableDescriptor(tableName, families, numVersions);
           
               getAdmin().createTable(desc, startKey, endKey, numRegions);
          @@ -1573,7 +1517,7 @@ public Table createTable(TableName tableName, byte[][] families, int numVersions
              * @return A Table instance for the created table.
              */
             public Table createTable(TableDescriptor htd, byte[][] families, Configuration c)
          -    throws IOException {
          +      throws IOException {
               return createTable(htd, families, null, c);
             }
           
          @@ -1611,10 +1555,9 @@ public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitK
               TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
               for (byte[] family : families) {
                 ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(family)
          -        .setBloomFilterType(type)
          -        .setBlocksize(blockSize);
          +          .setBloomFilterType(type).setBlocksize(blockSize);
                 if (isNewVersionBehaviorEnabled()) {
          -          cfdb.setNewVersionBehavior(true);
          +        cfdb.setNewVersionBehavior(true);
                 }
                 builder.setColumnFamily(cfdb.build());
               }
          @@ -1637,13 +1580,12 @@ public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitK
              * @return A Table instance for the created table.
              * @throws IOException
              */
          -  public Table createTable(TableDescriptor htd, byte[][] splitRows)
          -      throws IOException {
          +  public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOException {
               TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
               if (isNewVersionBehaviorEnabled()) {
                 for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
          -         builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
          -           .setNewVersionBehavior(true).build());
          +        builder.setColumnFamily(
          +          ColumnFamilyDescriptorBuilder.newBuilder(family).setNewVersionBehavior(true).build());
                 }
               }
               if (splitRows != null) {
          @@ -1667,9 +1609,9 @@ public Table createTable(TableDescriptor htd, byte[][] splitRows)
              * @return A Table instance for the created table.
              */
             public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
          -    int replicaCount, final Configuration c) throws IOException {
          +      int replicaCount, final Configuration c) throws IOException {
               TableDescriptor htd =
          -      TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(replicaCount).build();
          +        TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(replicaCount).build();
               return createTable(htd, families, splitKeys, c);
             }
           
          @@ -1698,8 +1640,8 @@ public Table createTable(TableName tableName, byte[][] families, int numVersions
                 byte[][] splitKeys) throws IOException {
               TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
               for (byte[] family : families) {
          -      ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family)
          -        .setMaxVersions(numVersions);
          +      ColumnFamilyDescriptorBuilder cfBuilder =
          +          ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(numVersions);
                 if (isNewVersionBehaviorEnabled()) {
                   cfBuilder.setNewVersionBehavior(true);
                 }
          @@ -1729,12 +1671,12 @@ public Table createMultiRegionTable(TableName tableName, byte[][] families, int
              * Create a table.
              * @return A Table instance for the created table.
              */
          -  public Table createTable(TableName tableName, byte[][] families,
          -    int numVersions, int blockSize) throws IOException {
          +  public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize)
          +      throws IOException {
               TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
               for (byte[] family : families) {
                 ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family)
          -        .setMaxVersions(numVersions).setBlocksize(blockSize);
          +          .setMaxVersions(numVersions).setBlocksize(blockSize);
                 if (isNewVersionBehaviorEnabled()) {
                   cfBuilder.setNewVersionBehavior(true);
                 }
          @@ -1747,12 +1689,12 @@ public Table createTable(TableName tableName, byte[][] families,
               return getConnection().getTable(tableName);
             }
           
          -  public Table createTable(TableName tableName, byte[][] families,
          -      int numVersions, int blockSize, String cpName) throws IOException {
          +  public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize,
          +      String cpName) throws IOException {
               TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
               for (byte[] family : families) {
                 ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family)
          -        .setMaxVersions(numVersions).setBlocksize(blockSize);
          +          .setMaxVersions(numVersions).setBlocksize(blockSize);
                 if (isNewVersionBehaviorEnabled()) {
                   cfBuilder.setNewVersionBehavior(true);
                 }
          @@ -1773,12 +1715,12 @@ public Table createTable(TableName tableName, byte[][] families,
              * @return A Table instance for the created table.
              */
             public Table createTable(TableName tableName, byte[][] families, int[] numVersions)
          -    throws IOException {
          +      throws IOException {
               TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
               int i = 0;
               for (byte[] family : families) {
                 ColumnFamilyDescriptorBuilder cfBuilder =
          -        ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(numVersions[i]);
          +          ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(numVersions[i]);
                 if (isNewVersionBehaviorEnabled()) {
                   cfBuilder.setNewVersionBehavior(true);
                 }
          @@ -1797,7 +1739,7 @@ public Table createTable(TableName tableName, byte[][] families, int[] numVersio
              * @return A Table instance for the created table.
              */
             public Table createTable(TableName tableName, byte[] family, byte[][] splitRows)
          -    throws IOException {
          +      throws IOException {
               TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
               ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family);
               if (isNewVersionBehaviorEnabled()) {
          @@ -1822,7 +1764,7 @@ public Table createMultiRegionTable(TableName tableName, byte[] family) throws I
             /**
              * Modify a table, synchronous.
              * @deprecated since 3.0.0 and will be removed in 4.0.0. Just use
          -   *   {@link Admin#modifyTable(TableDescriptor)} directly as it is synchronous now.
          +   *             {@link Admin#modifyTable(TableDescriptor)} directly as it is synchronous now.
              * @see Admin#modifyTable(TableDescriptor)
              * @see HBASE-22002
              */
          @@ -1836,9 +1778,9 @@ public static void modifyTableSync(Admin admin, TableDescriptor desc)
              * Set the number of Region replicas.
              */
             public static void setReplicas(Admin admin, TableName table, int replicaCount)
          -    throws IOException, InterruptedException {
          +      throws IOException, InterruptedException {
               TableDescriptor desc = TableDescriptorBuilder.newBuilder(admin.getDescriptor(table))
          -      .setRegionReplication(replicaCount).build();
          +        .setRegionReplication(replicaCount).build();
               admin.modifyTable(desc);
             }
           
          @@ -1870,15 +1812,15 @@ public void deleteTableIfAny(TableName tableName) throws IOException {
             // ==========================================================================
             // Canned table and table descriptor creation
           
          -  public final static byte [] fam1 = Bytes.toBytes("colfamily11");
          -  public final static byte [] fam2 = Bytes.toBytes("colfamily21");
          -  public final static byte [] fam3 = Bytes.toBytes("colfamily31");
          -  public static final byte[][] COLUMNS = {fam1, fam2, fam3};
          +  public final static byte[] fam1 = Bytes.toBytes("colfamily11");
          +  public final static byte[] fam2 = Bytes.toBytes("colfamily21");
          +  public final static byte[] fam3 = Bytes.toBytes("colfamily31");
          +  public static final byte[][] COLUMNS = { fam1, fam2, fam3 };
             private static final int MAXVERSIONS = 3;
           
             public static final char FIRST_CHAR = 'a';
             public static final char LAST_CHAR = 'z';
          -  public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
          +  public static final byte[] START_KEY_BYTES = { FIRST_CHAR, FIRST_CHAR, FIRST_CHAR };
             public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
           
             public TableDescriptorBuilder createModifyableTableDescriptor(final String name) {
          @@ -1888,12 +1830,12 @@ public TableDescriptorBuilder createModifyableTableDescriptor(final String name)
             }
           
             public TableDescriptor createTableDescriptor(final TableName name, final int minVersions,
          -    final int versions, final int ttl, KeepDeletedCells keepDeleted) {
          +      final int versions, final int ttl, KeepDeletedCells keepDeleted) {
               TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(name);
               for (byte[] cfName : new byte[][] { fam1, fam2, fam3 }) {
                 ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(cfName)
          -        .setMinVersions(minVersions).setMaxVersions(versions).setKeepDeletedCells(keepDeleted)
          -        .setBlockCacheEnabled(false).setTimeToLive(ttl);
          +          .setMinVersions(minVersions).setMaxVersions(versions).setKeepDeletedCells(keepDeleted)
          +          .setBlockCacheEnabled(false).setTimeToLive(ttl);
                 if (isNewVersionBehaviorEnabled()) {
                   cfBuilder.setNewVersionBehavior(true);
                 }
          @@ -1903,12 +1845,12 @@ public TableDescriptor createTableDescriptor(final TableName name, final int min
             }
           
             public TableDescriptorBuilder createModifyableTableDescriptor(final TableName name,
          -    final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
          +      final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
               TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(name);
               for (byte[] cfName : new byte[][] { fam1, fam2, fam3 }) {
                 ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(cfName)
          -        .setMinVersions(minVersions).setMaxVersions(versions).setKeepDeletedCells(keepDeleted)
          -        .setBlockCacheEnabled(false).setTimeToLive(ttl);
          +          .setMinVersions(minVersions).setMaxVersions(versions).setKeepDeletedCells(keepDeleted)
          +          .setBlockCacheEnabled(false).setTimeToLive(ttl);
                 if (isNewVersionBehaviorEnabled()) {
                   cfBuilder.setNewVersionBehavior(true);
                 }
          @@ -1932,11 +1874,11 @@ public TableDescriptor createTableDescriptor(final TableName tableName, byte[] f
             }
           
             public TableDescriptor createTableDescriptor(final TableName tableName, byte[][] families,
          -    int maxVersions) {
          +      int maxVersions) {
               TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
               for (byte[] family : families) {
                 ColumnFamilyDescriptorBuilder cfBuilder =
          -        ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(maxVersions);
          +          ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(maxVersions);
                 if (isNewVersionBehaviorEnabled()) {
                   cfBuilder.setNewVersionBehavior(true);
                 }
          @@ -1953,9 +1895,9 @@ public TableDescriptor createTableDescriptor(final TableName tableName, byte[][]
              * @return a region that writes to local dir for testing
              */
             public HRegion createLocalHRegion(TableDescriptor desc, byte[] startKey, byte[] endKey)
          -    throws IOException {
          +      throws IOException {
               RegionInfo hri = RegionInfoBuilder.newBuilder(desc.getTableName()).setStartKey(startKey)
          -      .setEndKey(endKey).build();
          +        .setEndKey(endKey).build();
               return createLocalHRegion(hri, desc);
             }
           
          @@ -1987,20 +1929,20 @@ public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDesc
              * @param stopKey
              * @param isReadOnly
              * @param families
          -   * @return A region on which you must call
          -   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
          +   * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
          +   *         when done.
              * @throws IOException
              */
             public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
                 Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families)
                 throws IOException {
               return createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, conf, isReadOnly,
          -        durability, wal, null, families);
          +      durability, wal, null, families);
             }
           
             public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey,
          -    byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal,
          -    boolean[] compactedMemStore, byte[]... families) throws IOException {
          +      byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal,
          +      boolean[] compactedMemStore, byte[]... families) throws IOException {
               TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
               builder.setReadOnly(isReadOnly);
               int i = 0;
          @@ -2019,7 +1961,7 @@ public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] s
               }
               builder.setDurability(durability);
               RegionInfo info =
          -      RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(stopKey).build();
          +        RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(stopKey).build();
               return createLocalHRegion(info, conf, builder.build(), wal);
             }
           
          @@ -2027,8 +1969,8 @@ public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] s
             // ==========================================================================
           
             /**
          -   * Provide an existing table name to truncate.
          -   * Scans the table and issues a delete for each row read.
          +   * Provide an existing table name to truncate. Scans the table and issues a delete for each row
          +   * read.
              * @param tableName existing table
              * @return HTable to that new table
              * @throws IOException
          @@ -2037,7 +1979,7 @@ public Table deleteTableData(TableName tableName) throws IOException {
               Table table = getConnection().getTable(tableName);
               Scan scan = new Scan();
               ResultScanner resScan = table.getScanner(scan);
          -    for(Result res : resScan) {
          +    for (Result res : resScan) {
                 Delete del = new Delete(res.getRow());
                 table.delete(del);
               }
          @@ -2047,14 +1989,14 @@ public Table deleteTableData(TableName tableName) throws IOException {
             }
           
             /**
          -   * Truncate a table using the admin command.
          -   * Effectively disables, deletes, and recreates the table.
          +   * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
          +   * table.
              * @param tableName table which must exist.
              * @param preserveRegions keep the existing split points
              * @return HTable for the new table
              */
          -  public Table truncateTable(final TableName tableName, final boolean preserveRegions) throws
          -      IOException {
          +  public Table truncateTable(final TableName tableName, final boolean preserveRegions)
          +      throws IOException {
               Admin admin = getAdmin();
               if (!admin.isTableDisabled(tableName)) {
                 admin.disableTable(tableName);
          @@ -2064,11 +2006,9 @@ public Table truncateTable(final TableName tableName, final boolean preserveRegi
             }
           
             /**
          -   * Truncate a table using the admin command.
          -   * Effectively disables, deletes, and recreates the table.
          -   * For previous behavior of issuing row deletes, see
          -   * deleteTableData.
          -   * Expressly does not preserve regions of existing table.
          +   * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
          +   * table. For previous behavior of issuing row deletes, see deleteTableData. Expressly does not
          +   * preserve regions of existing table.
              * @param tableName table which must exist.
              * @return HTable for the new table
              */
          @@ -2084,7 +2024,7 @@ public Table truncateTable(final TableName tableName) throws IOException {
              * @throws IOException
              */
             public int loadTable(final Table t, final byte[] f) throws IOException {
          -    return loadTable(t, new byte[][] {f});
          +    return loadTable(t, new byte[][] { f });
             }
           
             /**
          @@ -2095,7 +2035,7 @@ public int loadTable(final Table t, final byte[] f) throws IOException {
              * @throws IOException
              */
             public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
          -    return loadTable(t, new byte[][] {f}, null, writeToWAL);
          +    return loadTable(t, new byte[][] { f }, null, writeToWAL);
             }
           
             /**
          @@ -2129,8 +2069,8 @@ public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOExc
              * @return Count of rows loaded.
              * @throws IOException
              */
          -  public int loadTable(final Table t, final byte[][] f, byte[] value,
          -      boolean writeToWAL) throws IOException {
          +  public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL)
          +      throws IOException {
               List puts = new ArrayList<>();
               for (byte[] row : HBaseTestingUtility.ROWS) {
                 Put put = new Put(row);
          @@ -2145,12 +2085,13 @@ public int loadTable(final Table t, final byte[][] f, byte[] value,
               return puts.size();
             }
           
          -  /** A tracker for tracking and validating table rows
          -   * generated with {@link HBaseTestingUtility#loadTable(Table, byte[])}
          +  /**
          +   * A tracker for tracking and validating table rows generated with
          +   * {@link HBaseTestingUtility#loadTable(Table, byte[])}
              */
             public static class SeenRowTracker {
               int dim = 'z' - 'a' + 1;
          -    int[][][] seenRows = new int[dim][dim][dim]; //count of how many times the row is seen
          +    int[][][] seenRows = new int[dim][dim][dim]; // count of how many times the row is seen
               byte[] startRow;
               byte[] stopRow;
           
          @@ -2173,8 +2114,9 @@ public void addRow(byte[] row) {
                 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
               }
           
          -    /** Validate that all the rows between startRow and stopRow are seen exactly once, and
          -     * all other rows none
          +    /**
          +     * Validate that all the rows between startRow and stopRow are seen exactly once, and all other
          +     * rows none
                */
               public void validate() {
                 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
          @@ -2182,14 +2124,14 @@ public void validate() {
                     for (byte b3 = 'a'; b3 <= 'z'; b3++) {
                       int count = seenRows[i(b1)][i(b2)][i(b3)];
                       int expectedCount = 0;
          -            if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
          -                && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
          +            if (Bytes.compareTo(new byte[] { b1, b2, b3 }, startRow) >= 0
          +                && Bytes.compareTo(new byte[] { b1, b2, b3 }, stopRow) < 0) {
                         expectedCount = 1;
                       }
                       if (count != expectedCount) {
          -              String row = new String(new byte[] {b1,b2,b3}, StandardCharsets.UTF_8);
          -              throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " +
          -                  "instead of " + expectedCount);
          +              String row = new String(new byte[] { b1, b2, b3 }, StandardCharsets.UTF_8);
          +              throw new RuntimeException("Row:" + row + " has a seen count of " + count + " "
          +                  + "instead of " + expectedCount);
                       }
                     }
                   }
          @@ -2202,7 +2144,7 @@ public int loadRegion(final HRegion r, final byte[] f) throws IOException {
             }
           
             public int loadRegion(final Region r, final byte[] f) throws IOException {
          -    return loadRegion((HRegion)r, f);
          +    return loadRegion((HRegion) r, f);
             }
           
             /**
          @@ -2213,8 +2155,7 @@ public int loadRegion(final Region r, final byte[] f) throws IOException {
              * @return Count of rows loaded.
              * @throws IOException
              */
          -  public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
          -  throws IOException {
          +  public int loadRegion(final HRegion r, final byte[] f, final boolean flush) throws IOException {
               byte[] k = new byte[3];
               int rowCount = 0;
               for (byte b1 = 'a'; b1 <= 'z'; b1++) {
          @@ -2266,14 +2207,13 @@ public void loadRandomRows(final Table t, final byte[] f, int rowSize, int total
               for (int i = 0; i < totalRows; i++) {
                 Bytes.random(row);
                 Put put = new Put(row);
          -      put.addColumn(f, new byte[]{0}, new byte[]{0});
          +      put.addColumn(f, new byte[] { 0 }, new byte[] { 0 });
                 t.put(put);
               }
             }
           
             public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
          -      int replicaId)
          -      throws IOException {
          +      int replicaId) throws IOException {
               for (int i = startRow; i < endRow; i++) {
                 String failMsg = "Failed verification of row :" + i;
                 byte[] data = Bytes.toBytes(String.valueOf(i));
          @@ -2295,7 +2235,7 @@ public void verifyNumericRows(Table table, final byte[] f, int startRow, int end
           
             public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow)
                 throws IOException {
          -    verifyNumericRows((HRegion)region, f, startRow, endRow);
          +    verifyNumericRows((HRegion) region, f, startRow, endRow);
             }
           
             public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow)
          @@ -2305,7 +2245,7 @@ public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int
           
             public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow,
                 final boolean present) throws IOException {
          -    verifyNumericRows((HRegion)region, f, startRow, endRow, present);
          +    verifyNumericRows((HRegion) region, f, startRow, endRow, present);
             }
           
             public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
          @@ -2318,7 +2258,7 @@ public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int
                 boolean hasResult = result != null && !result.isEmpty();
                 if (present != hasResult) {
                   throw new AssertionError(
          -          failMsg + result + " expected:<" + present + "> but was:<" + hasResult + ">");
          +            failMsg + result + " expected:<" + present + "> but was:<" + hasResult + ">");
                 }
                 if (!present) continue;
           
          @@ -2365,7 +2305,7 @@ public static int countRows(final Table table, final Scan scan) throws IOExcepti
           
             public int countRows(final Table table, final byte[]... families) throws IOException {
               Scan scan = new Scan();
          -    for (byte[] family: families) {
          +    for (byte[] family : families) {
                 scan.addFamily(family);
               }
               return countRows(table, scan);
          @@ -2439,34 +2379,25 @@ public String checksumRows(final Table table) throws Exception {
               }
             }
           
          -  public static final byte[][] KEYS = {
          -    HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
          -    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
          -    Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
          -    Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
          -    Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
          -    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
          -    Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
          -    Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
          -    Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
          -  };
          -
          -  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
          -      Bytes.toBytes("bbb"),
          -      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
          -      Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
          -      Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
          -      Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
          -      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
          -      Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
          -      Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
          -      Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
          -  };
          -
          -  /**
          -   * Create rows in hbase:meta for regions of the specified table with the specified
          -   * start keys.  The first startKey should be a 0 length byte array if you
          -   * want to form a proper range of regions.
          +  public static final byte[][] KEYS = { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
          +      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"),
          +      Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"),
          +      Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
          +      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
          +      Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
          +      Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy") };
          +
          +  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = { Bytes.toBytes("bbb"),
          +      Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"),
          +      Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"),
          +      Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
          +      Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
          +      Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
          +      Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz") };
          +
          +  /**
          +   * Create rows in hbase:meta for regions of the specified table with the specified start keys. The
          +   * first startKey should be a 0 length byte array if you want to form a proper range of regions.
              * @param conf
              * @param htd
              * @param startKeys
          @@ -2474,20 +2405,17 @@ public String checksumRows(final Table table) throws Exception {
              * @throws IOException
              */
             public List createMultiRegionsInMeta(final Configuration conf,
          -      final TableDescriptor htd, byte [][] startKeys)
          -  throws IOException {
          +      final TableDescriptor htd, byte[][] startKeys) throws IOException {
               Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
               Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
               List newRegions = new ArrayList<>(startKeys.length);
          -    MetaTableAccessor
          -        .updateTableState(getConnection(), htd.getTableName(), TableState.State.ENABLED);
          +    MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(),
          +      TableState.State.ENABLED);
               // add custom ones
               for (int i = 0; i < startKeys.length; i++) {
                 int j = (i + 1) % startKeys.length;
          -      RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName())
          -          .setStartKey(startKeys[i])
          -          .setEndKey(startKeys[j])
          -          .build();
          +      RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKeys[i])
          +          .setEndKey(startKeys[j]).build();
                 MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1);
                 newRegions.add(hri);
               }
          @@ -2508,7 +2436,6 @@ public static WAL createWal(final Configuration conf, final Path rootDir, final
               return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri);
             }
           
          -
             /**
              * Create a region with it's own WAL. Be sure to call
              * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
          @@ -2530,6 +2457,7 @@ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootD
               region.initialize();
               return region;
             }
          +
             /**
              * Create a region with it's own WAL. Be sure to call
              * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
          @@ -2548,17 +2476,15 @@ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootD
              * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
              */
             public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
          -      final Configuration conf, final TableDescriptor htd, boolean initialize)
          -      throws IOException {
          -    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0,
          -      0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
          +      final Configuration conf, final TableDescriptor htd, boolean initialize) throws IOException {
          +    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null,
          +      MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
               WAL wal = createWal(conf, rootDir, info);
               return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
             }
           
             /**
              * Returns all rows from the hbase:meta table.
          -   *
              * @throws IOException When reading the rows fails.
              */
             public List getMetaTableRows() throws IOException {
          @@ -2567,8 +2493,7 @@ public List getMetaTableRows() throws IOException {
               List rows = new ArrayList<>();
               ResultScanner s = t.getScanner(new Scan());
               for (Result result : s) {
          -      LOG.info("getMetaTableRows: row -> " +
          -        Bytes.toStringBinary(result.getRow()));
          +      LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()));
                 rows.add(result.getRow());
               }
               s.close();
          @@ -2578,7 +2503,6 @@ public List getMetaTableRows() throws IOException {
           
             /**
              * Returns all rows from the hbase:meta table for a given user table
          -   *
              * @throws IOException When reading the rows fails.
              */
             public List getMetaTableRows(TableName tableName) throws IOException {
          @@ -2595,8 +2519,7 @@ public List getMetaTableRows(TableName tableName) throws IOException {
                 }
           
                 if (info.getTable().equals(tableName)) {
          -        LOG.info("getMetaTableRows: row -> " +
          -            Bytes.toStringBinary(result.getRow()) + info);
          +        LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()) + info);
                   rows.add(result.getRow());
                 }
               }
          @@ -2607,7 +2530,6 @@ public List getMetaTableRows(TableName tableName) throws IOException {
           
             /**
              * Returns all regions of the specified table
          -   *
              * @param tableName the table name
              * @return all regions of the specified table
              * @throws IOException when getting the regions fails.
          @@ -2624,8 +2546,7 @@ private List getRegions(TableName tableName) throws IOException {
              * @return another region server
              */
             public HRegionServer getOtherRegionServer(HRegionServer rs) {
          -    for (JVMClusterUtil.RegionServerThread rst :
          -      getMiniHBaseCluster().getRegionServerThreads()) {
          +    for (JVMClusterUtil.RegionServerThread rst : getMiniHBaseCluster().getRegionServerThreads()) {
                 if (!(rst.getRegionServer() == rs)) {
                   return rst.getRegionServer();
                 }
          @@ -2634,8 +2555,8 @@ public HRegionServer getOtherRegionServer(HRegionServer rs) {
             }
           
             /**
          -   * Tool to get the reference to the region server object that holds the
          -   * region of the specified user table.
          +   * Tool to get the reference to the region server object that holds the region of the specified
          +   * user table.
              * @param tableName user table to lookup in hbase:meta
              * @return region server that holds it, null if the row doesn't exist
              * @throws IOException
          @@ -2647,50 +2568,44 @@ public HRegionServer getRSForFirstRegionInTable(TableName tableName)
               if (regions == null || regions.isEmpty()) {
                 return null;
               }
          -    LOG.debug("Found " + regions.size() + " regions for table " +
          -        tableName);
          +    LOG.debug("Found " + regions.size() + " regions for table " + tableName);
           
          -    byte[] firstRegionName = regions.stream()
          -        .filter(r -> !r.isOffline())
          -        .map(RegionInfo::getRegionName)
          -        .findFirst()
          -        .orElseThrow(() -> new IOException("online regions not found in table " + tableName));
          +    byte[] firstRegionName =
          +        regions.stream().filter(r -> !r.isOffline()).map(RegionInfo::getRegionName).findFirst()
          +            .orElseThrow(() -> new IOException("online regions not found in table " + tableName));
           
               LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName));
               long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
                 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
               int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
                 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
          -    RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
          -    while(retrier.shouldRetry()) {
          +    RetryCounter retrier = new RetryCounter(numRetries + 1, (int) pause, TimeUnit.MICROSECONDS);
          +    while (retrier.shouldRetry()) {
                 int index = getMiniHBaseCluster().getServerWith(firstRegionName);
                 if (index != -1) {
                   return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
                 }
          -      // Came back -1.  Region may not be online yet.  Sleep a while.
          +      // Came back -1. Region may not be online yet. Sleep a while.
                 retrier.sleepUntilNextRetry();
               }
               return null;
             }
           
             /**
          -   * Starts a MiniMRCluster with a default number of
          -   * TaskTracker's.
          -   *
          +   * Starts a MiniMRCluster with a default number of TaskTracker's.
              * @throws IOException When starting the cluster fails.
              */
             public MiniMRCluster startMiniMapReduceCluster() throws IOException {
               // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
          -    conf.setIfUnset(
          -        "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
          -        "99.0");
          +    conf.setIfUnset("yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
          +      "99.0");
               startMiniMapReduceCluster(2);
               return mrCluster;
             }
           
             /**
          -   * Tasktracker has a bug where changing the hadoop.log.dir system property
          -   * will not change its internal static LOG_DIR variable.
          +   * Tasktracker has a bug where changing the hadoop.log.dir system property will not change its
          +   * internal static LOG_DIR variable.
              */
             private void forceChangeTaskLogDir() {
               Field logDirField;
          @@ -2717,7 +2632,7 @@ private void forceChangeTaskLogDir() {
             /**
              * Starts a MiniMRCluster. Call {@link #setFileSystemURI(String)} to use a different
              * filesystem.
          -   * @param servers  The number of TaskTracker's to start.
          +   * @param servers The number of TaskTracker's to start.
              * @throws IOException When starting the cluster fails.
              */
             private void startMiniMapReduceCluster(final int servers) throws IOException {
          @@ -2743,20 +2658,25 @@ private void startMiniMapReduceCluster(final int servers) throws IOException {
           
               // Allow the user to override FS URI for this map-reduce cluster to use.
               mrCluster = new MiniMRCluster(servers,
          -      FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
          -      null, null, new JobConf(this.conf));
          +        FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1, null, null,
          +        new JobConf(this.conf));
               JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
               if (jobConf == null) {
                 jobConf = mrCluster.createJobConf();
               }
           
          -    jobConf.set("mapreduce.cluster.local.dir",
          -      conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
          +    jobConf.set("mapreduce.cluster.local.dir", conf.get("mapreduce.cluster.local.dir")); // Hadoop
          +                                                                                         // MiniMR
          +                                                                                         // overwrites
          +                                                                                         // this
          +                                                                                         // while it
          +                                                                                         // should
          +                                                                                         // not
               LOG.info("Mini mapreduce cluster started");
           
               // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
          -    // Our HBase MR jobs need several of these settings in order to properly run.  So we copy the
          -    // necessary config properties here.  YARN-129 required adding a few properties.
          +    // Our HBase MR jobs need several of these settings in order to properly run. So we copy the
          +    // necessary config properties here. YARN-129 required adding a few properties.
               conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
               // this for mrv2 support; mr1 ignores this
               conf.set("mapreduce.framework.name", "yarn");
          @@ -2769,18 +2689,15 @@ private void startMiniMapReduceCluster(final int servers) throws IOException {
               if (historyAddress != null) {
                 conf.set("mapreduce.jobhistory.address", historyAddress);
               }
          -    String schedulerAddress =
          -      jobConf.get("yarn.resourcemanager.scheduler.address");
          +    String schedulerAddress = jobConf.get("yarn.resourcemanager.scheduler.address");
               if (schedulerAddress != null) {
                 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
               }
          -    String mrJobHistoryWebappAddress =
          -      jobConf.get("mapreduce.jobhistory.webapp.address");
          +    String mrJobHistoryWebappAddress = jobConf.get("mapreduce.jobhistory.webapp.address");
               if (mrJobHistoryWebappAddress != null) {
                 conf.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress);
               }
          -    String yarnRMWebappAddress =
          -      jobConf.get("yarn.resourcemanager.webapp.address");
          +    String yarnRMWebappAddress = jobConf.get("yarn.resourcemanager.webapp.address");
               if (yarnRMWebappAddress != null) {
                 conf.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress);
               }
          @@ -2804,15 +2721,15 @@ public void shutdownMiniMapReduceCluster() {
              * Create a stubbed out RegionServerService, mainly for getting FS.
              */
             public RegionServerServices createMockRegionServerService() throws IOException {
          -    return createMockRegionServerService((ServerName)null);
          +    return createMockRegionServerService((ServerName) null);
             }
           
             /**
          -   * Create a stubbed out RegionServerService, mainly for getting FS.
          -   * This version is used by TestTokenAuthentication
          +   * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
          +   * TestTokenAuthentication
              */
          -  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws
          -      IOException {
          +  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc)
          +      throws IOException {
               final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
               rss.setFileSystem(getTestFileSystem());
               rss.setRpcServer(rpc);
          @@ -2820,8 +2737,8 @@ public RegionServerServices createMockRegionServerService(RpcServerInterface rpc
             }
           
             /**
          -   * Create a stubbed out RegionServerService, mainly for getting FS.
          -   * This version is used by TestOpenRegionHandler
          +   * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
          +   * TestOpenRegionHandler
              */
             public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
               final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
          @@ -2872,31 +2789,25 @@ private void decrementMinRegionServerCount() {
             }
           
             private void decrementMinRegionServerCount(Configuration conf) {
          -    int currentCount = conf.getInt(
          -        ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
          +    int currentCount = conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
               if (currentCount != -1) {
          -      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
          -          Math.max(currentCount - 1, 1));
          +      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, Math.max(currentCount - 1, 1));
               }
             }
           
             public void expireSession(ZKWatcher nodeZK) throws Exception {
          -   expireSession(nodeZK, false);
          +    expireSession(nodeZK, false);
             }
           
             /**
              * Expire a ZooKeeper session as recommended in ZooKeeper documentation
          -   * http://hbase.apache.org/book.html#trouble.zookeeper
          -   * There are issues when doing this:
          -   * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
          -   * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
          -   *
          +   * http://hbase.apache.org/book.html#trouble.zookeeper There are issues when doing this: [1]
          +   * http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html [2]
          +   * https://issues.apache.org/jira/browse/ZOOKEEPER-1105
              * @param nodeZK - the ZK watcher to expire
          -   * @param checkStatus - true to check if we can create a Table with the
          -   *                    current configuration.
          +   * @param checkStatus - true to check if we can create a Table with the current configuration.
              */
          -  public void expireSession(ZKWatcher nodeZK, boolean checkStatus)
          -    throws Exception {
          +  public void expireSession(ZKWatcher nodeZK, boolean checkStatus) throws Exception {
               Configuration c = new Configuration(this.conf);
               String quorumServers = ZKConfig.getZKQuorumServersString(c);
               ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
          @@ -2904,30 +2815,29 @@ public void expireSession(ZKWatcher nodeZK, boolean checkStatus)
               long sessionID = zk.getSessionId();
           
               // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
          -    //  so we create a first watcher to be sure that the
          -    //  event was sent. We expect that if our watcher receives the event
          -    //  other watchers on the same machine will get is as well.
          +    // so we create a first watcher to be sure that the
          +    // event was sent. We expect that if our watcher receives the event
          +    // other watchers on the same machine will get is as well.
               // When we ask to close the connection, ZK does not close it before
          -    //  we receive all the events, so don't have to capture the event, just
          -    //  closing the connection should be enough.
          -    ZooKeeper monitor = new ZooKeeper(quorumServers,
          -      1000, new org.apache.zookeeper.Watcher(){
          +    // we receive all the events, so don't have to capture the event, just
          +    // closing the connection should be enough.
          +    ZooKeeper monitor = new ZooKeeper(quorumServers, 1000, new org.apache.zookeeper.Watcher() {
                 @Override
                 public void process(WatchedEvent watchedEvent) {
          -        LOG.info("Monitor ZKW received event="+watchedEvent);
          +        LOG.info("Monitor ZKW received event=" + watchedEvent);
                 }
          -    } , sessionID, password);
          +    }, sessionID, password);
           
               // Making it expire
          -    ZooKeeper newZK = new ZooKeeper(quorumServers,
          -        1000, EmptyWatcher.instance, sessionID, password);
          +    ZooKeeper newZK =
          +        new ZooKeeper(quorumServers, 1000, EmptyWatcher.instance, sessionID, password);
           
          -    //ensure that we have connection to the server before closing down, otherwise
          -    //the close session event will be eaten out before we start CONNECTING state
          +    // ensure that we have connection to the server before closing down, otherwise
          +    // the close session event will be eaten out before we start CONNECTING state
               long start = EnvironmentEdgeManager.currentTime();
               while (newZK.getState() != States.CONNECTED
          -         && EnvironmentEdgeManager.currentTime() - start < 1000) {
          -       Thread.sleep(1);
          +        && EnvironmentEdgeManager.currentTime() - start < 1000) {
          +      Thread.sleep(1);
               }
               newZK.close();
               LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
          @@ -2942,7 +2852,6 @@ public void process(WatchedEvent watchedEvent) {
           
             /**
              * Get the Mini HBase cluster.
          -   *
              * @return hbase cluster
              * @see #getHBaseClusterInterface()
              */
          @@ -2952,26 +2861,26 @@ public MiniHBaseCluster getHBaseCluster() {
           
             /**
              * Returns the HBaseCluster instance.
          -   * 

          Returned object can be any of the subclasses of HBaseCluster, and the - * tests referring this should not assume that the cluster is a mini cluster or a - * distributed one. If the test only works on a mini cluster, then specific - * method {@link #getMiniHBaseCluster()} can be used instead w/o the - * need to type-cast. + *

          + * Returned object can be any of the subclasses of HBaseCluster, and the tests referring this + * should not assume that the cluster is a mini cluster or a distributed one. If the test only + * works on a mini cluster, then specific method {@link #getMiniHBaseCluster()} can be used + * instead w/o the need to type-cast. */ public HBaseCluster getHBaseClusterInterface() { - //implementation note: we should rename this method as #getHBaseCluster(), - //but this would require refactoring 90+ calls. + // implementation note: we should rename this method as #getHBaseCluster(), + // but this would require refactoring 90+ calls. return hbaseCluster; } /** * Resets the connections so that the next time getConnection() is called, a new connection is * created. This is needed in cases where the entire cluster / all the masters are shutdown and - * the connection is not valid anymore. - * TODO: There should be a more coherent way of doing this. Unfortunately the way tests are - * written, not all start() stop() calls go through this class. Most tests directly operate on - * the underlying mini/local hbase cluster. That makes it difficult for this wrapper class to - * maintain the connection state automatically. Cleaning this is a much bigger refactor. + * the connection is not valid anymore. TODO: There should be a more coherent way of doing this. + * Unfortunately the way tests are written, not all start() stop() calls go through this class. + * Most tests directly operate on the underlying mini/local hbase cluster. That makes it difficult + * for this wrapper class to maintain the connection state automatically. Cleaning this is a much + * bigger refactor. */ public void invalidateConnection() throws IOException { closeConnection(); @@ -2979,14 +2888,13 @@ public void invalidateConnection() throws IOException { final String masterConfigBefore = conf.get(HConstants.MASTER_ADDRS_KEY); final String masterConfAfter = getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY); LOG.info("Invalidated connection. Updating master addresses before: {} after: {}", - masterConfigBefore, masterConfAfter); + masterConfigBefore, masterConfAfter); conf.set(HConstants.MASTER_ADDRS_KEY, - getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY)); + getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY)); } /** - * Get a shared Connection to the cluster. - * this method is thread safe. + * Get a shared Connection to the cluster. this method is thread safe. * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster. */ public Connection getConnection() throws IOException { @@ -2994,8 +2902,7 @@ public Connection getConnection() throws IOException { } /** - * Get a assigned Connection to the cluster. - * this method is thread safe. + * Get a assigned Connection to the cluster. this method is thread safe. * @param user assigned user * @return A Connection with assigned user. */ @@ -3004,9 +2911,9 @@ public Connection getConnection(User user) throws IOException { } /** - * Get a shared AsyncClusterConnection to the cluster. - * this method is thread safe. - * @return An AsyncClusterConnection that can be shared. Don't close. Will be closed on shutdown of cluster. + * Get a shared AsyncClusterConnection to the cluster. this method is thread safe. + * @return An AsyncClusterConnection that can be shared. Don't close. Will be closed on shutdown + * of cluster. */ public AsyncClusterConnection getAsyncConnection() throws IOException { try { @@ -3015,7 +2922,7 @@ public AsyncClusterConnection getAsyncConnection() throws IOException { try { User user = UserProvider.instantiate(conf).getCurrent(); connection = getAsyncConnection(user); - } catch(IOException ioe) { + } catch (IOException ioe) { throw new UncheckedIOException("Failed to create connection", ioe); } } @@ -3027,8 +2934,7 @@ public AsyncClusterConnection getAsyncConnection() throws IOException { } /** - * Get a assigned AsyncClusterConnection to the cluster. - * this method is thread safe. + * Get a assigned AsyncClusterConnection to the cluster. this method is thread safe. * @param user assigned user * @return An AsyncClusterConnection with assigned user. */ @@ -3048,11 +2954,11 @@ public void closeConnection() throws IOException { } /** - * Returns an Admin instance which is shared between HBaseTestingUtility instance users. - * Closing it has no effect, it will be closed automatically when the cluster shutdowns + * Returns an Admin instance which is shared between HBaseTestingUtility instance users. Closing + * it has no effect, it will be closed automatically when the cluster shutdowns */ public Admin getAdmin() throws IOException { - if (hbaseAdmin == null){ + if (hbaseAdmin == null) { this.hbaseAdmin = getConnection().getAdmin(); } return hbaseAdmin; @@ -3069,8 +2975,7 @@ public Hbck getHbck() throws IOException { /** * Unassign the named region. - * - * @param regionName The region to unassign. + * @param regionName The region to unassign. */ public void unassignRegion(String regionName) throws IOException { unassignRegion(Bytes.toBytes(regionName)); @@ -3078,8 +2983,7 @@ public void unassignRegion(String regionName) throws IOException { /** * Unassign the named region. - * - * @param regionName The region to unassign. + * @param regionName The region to unassign. */ public void unassignRegion(byte[] regionName) throws IOException { getAdmin().unassign(regionName, true); @@ -3087,9 +2991,8 @@ public void unassignRegion(byte[] regionName) throws IOException { /** * Closes the region containing the given row. - * - * @param row The row to find the containing region. - * @param table The table to find the region. + * @param row The row to find the containing region. + * @param table The table to find the region. */ public void unassignRegionByRow(String row, RegionLocator table) throws IOException { unassignRegionByRow(Bytes.toBytes(row), table); @@ -3097,9 +3000,8 @@ public void unassignRegionByRow(String row, RegionLocator table) throws IOExcept /** * Closes the region containing the given row. - * - * @param row The row to find the containing region. - * @param table The table to find the region. + * @param row The row to find the containing region. + * @param table The table to find the region. * @throws IOException */ public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException { @@ -3127,7 +3029,7 @@ public HRegion getSplittableRegion(TableName tableName, int maxAttempts) { } regCount = regions.size(); // There are chances that before we get the region for the table from an RS the region may - // be going for CLOSE. This may be because online schema change is enabled + // be going for CLOSE. This may be because online schema change is enabled if (regCount > 0) { idx = ThreadLocalRandom.current().nextInt(regCount); // if we have just tried this region, there is no need to try again @@ -3156,8 +3058,8 @@ public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, /** * Set the MiniDFSCluster * @param cluster cluster to use - * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before - * it is set. + * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before it + * is set. * @throws IllegalStateException if the passed cluster is up when it is required to be down * @throws IOException if the FileSystem could not be set from the passed dfs cluster */ @@ -3175,14 +3077,13 @@ public FileSystem getTestFileSystem() throws IOException { } /** - * Wait until all regions in a table have been assigned. Waits default timeout before giving up + * Wait until all regions in a table have been assigned. Waits default timeout before giving up * (30 seconds). * @param table Table to wait on. * @throws InterruptedException * @throws IOException */ - public void waitTableAvailable(TableName table) - throws InterruptedException, IOException { + public void waitTableAvailable(TableName table) throws InterruptedException, IOException { waitTableAvailable(table.getName(), 30000); } @@ -3203,24 +3104,24 @@ public void waitTableAvailable(byte[] table, long timeoutMillis) public String explainTableAvailability(TableName tableName) throws IOException { StringBuilder msg = - new StringBuilder(explainTableState(tableName, TableState.State.ENABLED)).append(", "); + new StringBuilder(explainTableState(tableName, TableState.State.ENABLED)).append(", "); if (getHBaseCluster().getMaster().isAlive()) { Map assignments = getHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getRegionAssignments(); + .getRegionStates().getRegionAssignments(); final List> metaLocations = - MetaTableAccessor.getTableRegionsAndLocations(getConnection(), tableName); + MetaTableAccessor.getTableRegionsAndLocations(getConnection(), tableName); for (Pair metaLocation : metaLocations) { RegionInfo hri = metaLocation.getFirst(); ServerName sn = metaLocation.getSecond(); if (!assignments.containsKey(hri)) { msg.append(", region ").append(hri) - .append(" not assigned, but found in meta, it expected to be on ").append(sn); + .append(" not assigned, but found in meta, it expected to be on ").append(sn); } else if (sn == null) { msg.append(", region ").append(hri).append(" assigned, but has no server in meta"); } else if (!sn.equals(assignments.get(hri))) { msg.append(", region ").append(hri) - .append(" assigned, but has different servers in meta and AM ( ").append(sn) - .append(" <> ").append(assignments.get(hri)); + .append(" assigned, but has different servers in meta and AM ( ").append(sn) + .append(" <> ").append(assignments.get(hri)); } } } @@ -3231,8 +3132,8 @@ public String explainTableState(final TableName table, TableState.State state) throws IOException { TableState tableState = MetaTableAccessor.getTableState(getConnection(), table); if (tableState == null) { - return "TableState in META: No table state in META for table " + table + - " last state in meta (including deleted is " + findLastTableState(table) + ")"; + return "TableState in META: No table state in META for table " + table + + " last state in meta (including deleted is " + findLastTableState(table) + ")"; } else if (!tableState.inStates(state)) { return "TableState in META: Not " + state + " state, but " + tableState; } else { @@ -3255,27 +3156,26 @@ public boolean visit(Result r) throws IOException { return true; } }; - MetaTableAccessor.scanMeta(getConnection(), null, null, - ClientMetaTableAccessor.QueryType.TABLE, Integer.MAX_VALUE, visitor); + MetaTableAccessor.scanMeta(getConnection(), null, null, ClientMetaTableAccessor.QueryType.TABLE, + Integer.MAX_VALUE, visitor); return lastTableState.get(); } /** - * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the - * regions have been all assigned. Will timeout after default period (30 seconds) - * Tolerates nonexistent table. + * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions + * have been all assigned. Will timeout after default period (30 seconds) Tolerates nonexistent + * table. * @param table the table to wait on. * @throws InterruptedException if interrupted while waiting * @throws IOException if an IO problem is encountered */ - public void waitTableEnabled(TableName table) - throws InterruptedException, IOException { + public void waitTableEnabled(TableName table) throws InterruptedException, IOException { waitTableEnabled(table, 30000); } /** - * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the - * regions have been all assigned. + * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions + * have been all assigned. * @see #waitTableEnabled(TableName, long) * @param table Table to wait on. * @param timeoutMillis Time to wait on it being marked enabled. @@ -3283,56 +3183,52 @@ public void waitTableEnabled(TableName table) * @throws IOException */ public void waitTableEnabled(byte[] table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitTableEnabled(TableName.valueOf(table), timeoutMillis); } - public void waitTableEnabled(TableName table, long timeoutMillis) - throws IOException { + public void waitTableEnabled(TableName table, long timeoutMillis) throws IOException { waitFor(timeoutMillis, predicateTableEnabled(table)); } /** - * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' - * Will timeout after default period (30 seconds) + * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' Will timeout + * after default period (30 seconds) * @param table Table to wait on. * @throws InterruptedException * @throws IOException */ - public void waitTableDisabled(byte[] table) - throws InterruptedException, IOException { + public void waitTableDisabled(byte[] table) throws InterruptedException, IOException { waitTableDisabled(table, 30000); } public void waitTableDisabled(TableName table, long millisTimeout) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitFor(millisTimeout, predicateTableDisabled(table)); } /** - * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' + * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' * @param table Table to wait on. * @param timeoutMillis Time to wait on it being marked disabled. * @throws InterruptedException * @throws IOException */ public void waitTableDisabled(byte[] table, long timeoutMillis) - throws InterruptedException, IOException { + throws InterruptedException, IOException { waitTableDisabled(TableName.valueOf(table), timeoutMillis); } /** - * Make sure that at least the specified number of region servers - * are running + * Make sure that at least the specified number of region servers are running * @param num minimum number of region servers that should be running * @return true if we started some servers * @throws IOException */ - public boolean ensureSomeRegionServersAvailable(final int num) - throws IOException { + public boolean ensureSomeRegionServersAvailable(final int num) throws IOException { boolean startedServer = false; MiniHBaseCluster hbaseCluster = getMiniHBaseCluster(); - for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i getAllOnlineRegions(MiniHBaseCluster cluster) NavigableSet online = new TreeSet<>(); for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) { try { - for (RegionInfo region : - ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) { + for (RegionInfo region : ProtobufUtil + .getOnlineRegions(rst.getRegionServer().getRSRpcServices())) { online.add(region.getRegionNameAsString()); } } catch (RegionServerStoppedException e) { @@ -3414,14 +3301,16 @@ public static NavigableSet getAllOnlineRegions(MiniHBaseCluster cluster) } /** - * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and - * makes tests linger. Here is the exception you'll see: + * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and makes tests + * linger. Here is the exception you'll see: + * *

              * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block
              * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
              * blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683
              * failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
              * 
          + * * @param stream A DFSClient.DFSOutputStream. * @param max * @throws NoSuchFieldException @@ -3429,16 +3318,15 @@ public static NavigableSet getAllOnlineRegions(MiniHBaseCluster cluster) * @throws IllegalAccessException * @throws IllegalArgumentException */ - public static void setMaxRecoveryErrorCount(final OutputStream stream, - final int max) { + public static void setMaxRecoveryErrorCount(final OutputStream stream, final int max) { try { - Class [] clazzes = DFSClient.class.getDeclaredClasses(); - for (Class clazz: clazzes) { + Class[] clazzes = DFSClient.class.getDeclaredClasses(); + for (Class clazz : clazzes) { String className = clazz.getSimpleName(); if (className.equals("DFSOutputStream")) { if (clazz.isInstance(stream)) { Field maxRecoveryErrorCountField = - stream.getClass().getDeclaredField("maxRecoveryErrorCount"); + stream.getClass().getDeclaredField("maxRecoveryErrorCount"); maxRecoveryErrorCountField.setAccessible(true); maxRecoveryErrorCountField.setInt(stream, max); break; @@ -3464,7 +3352,6 @@ public boolean assignRegion(final RegionInfo regionInfo) /** * Move region to destination server and wait till region is completely moved and online - * * @param destRegion region to move * @param destServer destination server of the region * @throws InterruptedException @@ -3476,8 +3363,8 @@ public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer) // TODO: Here we start the move. The move can take a while. getAdmin().move(destRegion.getEncodedNameAsBytes(), destServer); while (true) { - ServerName serverName = master.getAssignmentManager().getRegionStates() - .getRegionServerOfRegion(destRegion); + ServerName serverName = + master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(destRegion); if (serverName != null && serverName.equals(destServer)) { assertRegionOnServer(destRegion, serverName, 2000); break; @@ -3487,11 +3374,9 @@ public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer) } /** - * Wait until all regions for a table in hbase:meta have a non-empty - * info:server, up to a configuable timeout value (default is 60 seconds) - * This means all regions have been deployed, - * master has been informed and updated hbase:meta with the regions deployed - * server. + * Wait until all regions for a table in hbase:meta have a non-empty info:server, up to a + * configuable timeout value (default is 60 seconds) This means all regions have been deployed, + * master has been informed and updated hbase:meta with the regions deployed server. * @param tableName the table name * @throws IOException */ @@ -3509,10 +3394,9 @@ public void waitUntilAllSystemRegionsAssigned() throws IOException { } /** - * Wait until all regions for a table in hbase:meta have a non-empty - * info:server, or until timeout. This means all regions have been deployed, - * master has been informed and updated hbase:meta with the regions deployed - * server. + * Wait until all regions for a table in hbase:meta have a non-empty info:server, or until + * timeout. This means all regions have been deployed, master has been informed and updated + * hbase:meta with the regions deployed server. * @param tableName the table name * @param timeout timeout, in milliseconds * @throws IOException @@ -3521,8 +3405,8 @@ public void waitUntilAllRegionsAssigned(final TableName tableName, final long ti throws IOException { if (!TableName.isMetaTableName(tableName)) { try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { - LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + - timeout + "ms"); + LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + + timeout + "ms"); waitFor(timeout, 200, true, new ExplainingPredicate() { @Override public String explainFailure() throws IOException { @@ -3551,10 +3435,10 @@ public boolean evaluate() throws IOException { byte[] startCode = r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); ServerName serverName = - ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + - Bytes.toLong(startCode)); - if (!getHBaseClusterInterface().isDistributedCluster() && - getHBaseCluster().isKilledRS(serverName)) { + ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + "," + + Bytes.toLong(startCode)); + if (!getHBaseClusterInterface().isDistributedCluster() + && getHBaseCluster().isKilledRS(serverName)) { return false; } } @@ -3565,7 +3449,8 @@ public boolean evaluate() throws IOException { } } if (!tableFound) { - LOG.warn("Didn't find the entries for table " + tableName + " in meta, already deleted?"); + LOG.warn( + "Didn't find the entries for table " + tableName + " in meta, already deleted?"); } return tableFound; } @@ -3596,17 +3481,16 @@ public boolean evaluate() throws IOException { } /** - * Do a small get/scan against one store. This is required because store - * has no actual methods of querying itself, and relies on StoreScanner. + * Do a small get/scan against one store. This is required because store has no actual methods of + * querying itself, and relies on StoreScanner. */ - public static List getFromStoreFile(HStore store, - Get get) throws IOException { + public static List getFromStoreFile(HStore store, Get get) throws IOException { Scan scan = new Scan(get); InternalScanner scanner = (InternalScanner) store.getScanner(scan, - scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), - // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set - // readpoint 0. - 0); + scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()), + // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set + // readpoint 0. + 0); List result = new ArrayList<>(); scanner.next(result); @@ -3623,40 +3507,36 @@ public static List getFromStoreFile(HStore store, /** * Create region split keys between startkey and endKey - * * @param startKey * @param endKey * @param numRegions the number of regions to be created. it has to be greater than 3. * @return resulting split keys */ - public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){ + public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions) { if (numRegions <= 3) { throw new AssertionError(); } - byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3); - byte [][] result = new byte[tmpSplitKeys.length+1][]; + byte[][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3); + byte[][] result = new byte[tmpSplitKeys.length + 1][]; System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length); result[0] = HConstants.EMPTY_BYTE_ARRAY; return result; } /** - * Do a small get/scan against one store. This is required because store - * has no actual methods of querying itself, and relies on StoreScanner. + * Do a small get/scan against one store. This is required because store has no actual methods of + * querying itself, and relies on StoreScanner. */ - public static List getFromStoreFile(HStore store, - byte [] row, - NavigableSet columns - ) throws IOException { + public static List getFromStoreFile(HStore store, byte[] row, NavigableSet columns) + throws IOException { Get get = new Get(row); Map> s = get.getFamilyMap(); s.put(store.getColumnFamilyDescriptor().getName(), columns); - return getFromStoreFile(store,get); + return getFromStoreFile(store, get); } - public static void assertKVListsEqual(String additionalMsg, - final List expected, + public static void assertKVListsEqual(String additionalMsg, final List expected, final List actual) { final int eLen = expected.size(); final int aLen = actual.size(); @@ -3664,8 +3544,8 @@ public static void assertKVListsEqual(String additionalMsg, int i; for (i = 0; i < minLen - && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0; - ++i) {} + && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0; ++i) { + } if (additionalMsg == null) { additionalMsg = ""; @@ -3675,10 +3555,9 @@ public static void assertKVListsEqual(String additionalMsg, } if (eLen != aLen || i != minLen) { - throw new AssertionError( - "Expected and actual KV arrays differ at position " + i + ": " + - safeGetAsStr(expected, i) + " (length " + eLen +") vs. " + - safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg); + throw new AssertionError("Expected and actual KV arrays differ at position " + i + ": " + + safeGetAsStr(expected, i) + " (length " + eLen + ") vs. " + safeGetAsStr(actual, i) + + " (length " + aLen + ")" + additionalMsg); } } @@ -3691,26 +3570,19 @@ public static String safeGetAsStr(List lst, int i) { } public String getClusterKey() { - return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" - + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":" - + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + + ":" + + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); } /** Creates a random table with the given parameters */ - public Table createRandomTable(TableName tableName, - final Collection families, - final int maxVersions, - final int numColsPerRow, - final int numFlushes, - final int numRegions, - final int numRowsPerFlush) - throws IOException, InterruptedException { + public Table createRandomTable(TableName tableName, final Collection families, + final int maxVersions, final int numColsPerRow, final int numFlushes, final int numRegions, + final int numRowsPerFlush) throws IOException, InterruptedException { - LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + - " regions, " + numFlushes + " storefiles per region, " + - numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions + - "\n"); + LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + " regions, " + + numFlushes + " storefiles per region, " + numRowsPerFlush + + " rows per flush, maxVersions=" + maxVersions + "\n"); final int numCF = families.size(); final byte[][] cfBytes = new byte[numCF][]; @@ -3727,11 +3599,9 @@ public Table createRandomTable(TableName tableName, final int splitStartKey = actualStartKey + keysPerRegion; final int splitEndKey = actualEndKey - keysPerRegion; final String keyFormat = "%08x"; - final Table table = createTable(tableName, cfBytes, - maxVersions, - Bytes.toBytes(String.format(keyFormat, splitStartKey)), - Bytes.toBytes(String.format(keyFormat, splitEndKey)), - numRegions); + final Table table = createTable(tableName, cfBytes, maxVersions, + Bytes.toBytes(String.format(keyFormat, splitStartKey)), + Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions); if (hbaseCluster != null) { getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); @@ -3742,8 +3612,8 @@ public Table createRandomTable(TableName tableName, final Random rand = ThreadLocalRandom.current(); for (int iFlush = 0; iFlush < numFlushes; ++iFlush) { for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) { - final byte[] row = Bytes.toBytes(String.format(keyFormat, - actualStartKey + rand.nextInt(actualEndKey - actualStartKey))); + final byte[] row = Bytes.toBytes( + String.format(keyFormat, actualStartKey + rand.nextInt(actualEndKey - actualStartKey))); Put put = new Put(row); Delete del = new Delete(row); @@ -3752,9 +3622,9 @@ public Table createRandomTable(TableName tableName, final long ts = rand.nextInt(); final byte[] qual = Bytes.toBytes("col" + iCol); if (rand.nextBoolean()) { - final byte[] value = Bytes.toBytes("value_for_row_" + iRow + - "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" + - ts + "_random_" + rand.nextLong()); + final byte[] value = + Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_" + + iCol + "_ts_" + ts + "_random_" + rand.nextLong()); put.addColumn(cf, qual, ts, value); } else if (rand.nextDouble() < 0.8) { del.addColumn(cf, qual, ts); @@ -3785,12 +3655,12 @@ public Table createRandomTable(TableName tableName, public static int randomFreePort() { return HBaseCommonTestingUtility.randomFreePort(); } + public static String randomMultiCastAddress() { return "226.1.1." + ThreadLocalRandom.current().nextInt(254); } - public static void waitForHostPort(String host, int port) - throws IOException { + public static void waitForHostPort(String host, int port) throws IOException { final int maxTimeMs = 10000; final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS; IOException savedException = null; @@ -3816,30 +3686,30 @@ public static void waitForHostPort(String host, int port) } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableName tableName, byte[] columnFamily, Algorithm compression, - DataBlockEncoding dataBlockEncoding) throws IOException { - return createPreSplitLoadTestTable(conf, tableName, - columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1, - Durability.USE_DEFAULT); + public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, + byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding) + throws IOException { + return createPreSplitLoadTestTable(conf, tableName, columnFamily, compression, + dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1, Durability.USE_DEFAULT); } + /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, - byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding, - int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { + byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding, + int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); builder.setDurability(durability); builder.setRegionReplication(regionReplication); ColumnFamilyDescriptorBuilder cfBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(columnFamily); + ColumnFamilyDescriptorBuilder.newBuilder(columnFamily); cfBuilder.setDataBlockEncoding(dataBlockEncoding); cfBuilder.setCompressionType(compression); return createPreSplitLoadTestTable(conf, builder.build(), cfBuilder.build(), @@ -3847,20 +3717,20 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableName tabl } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, - byte[][] columnFamilies, Algorithm compression, DataBlockEncoding dataBlockEncoding, - int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { + byte[][] columnFamilies, Algorithm compression, DataBlockEncoding dataBlockEncoding, + int numRegionsPerServer, int regionReplication, Durability durability) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); builder.setDurability(durability); builder.setRegionReplication(regionReplication); ColumnFamilyDescriptor[] hcds = new ColumnFamilyDescriptor[columnFamilies.length]; for (int i = 0; i < columnFamilies.length; i++) { ColumnFamilyDescriptorBuilder cfBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(columnFamilies[i]); + ColumnFamilyDescriptorBuilder.newBuilder(columnFamilies[i]); cfBuilder.setDataBlockEncoding(dataBlockEncoding); cfBuilder.setCompressionType(compression); hcds[i] = cfBuilder.build(); @@ -3869,46 +3739,45 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableName tabl } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor desc, ColumnFamilyDescriptor hcd) throws IOException { + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, + ColumnFamilyDescriptor hcd) throws IOException { return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER); } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor desc, ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException { - return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] {hcd}, - numRegionsPerServer); + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, + ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException { + return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] { hcd }, + numRegionsPerServer); } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor desc, ColumnFamilyDescriptor[] hcds, - int numRegionsPerServer) throws IOException { - return createPreSplitLoadTestTable(conf, desc, hcds, - new RegionSplitter.HexStringSplit(), numRegionsPerServer); + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc, + ColumnFamilyDescriptor[] hcds, int numRegionsPerServer) throws IOException { + return createPreSplitLoadTestTable(conf, desc, hcds, new RegionSplitter.HexStringSplit(), + numRegionsPerServer); } /** - * Creates a pre-split table for load testing. If the table already exists, - * logs a warning and continues. + * Creates a pre-split table for load testing. If the table already exists, logs a warning and + * continues. * @return the number of regions the table was split into */ - public static int createPreSplitLoadTestTable(Configuration conf, - TableDescriptor td, ColumnFamilyDescriptor[] cds, - SplitAlgorithm splitter, int numRegionsPerServer) throws IOException { + public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor td, + ColumnFamilyDescriptor[] cds, SplitAlgorithm splitter, int numRegionsPerServer) + throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(td); for (ColumnFamilyDescriptor cd : cds) { if (!td.hasColumnFamily(cd.getName())) { @@ -3923,27 +3792,25 @@ public static int createPreSplitLoadTestTable(Configuration conf, try { // create a table a pre-splits regions. // The number of splits is set as: - // region servers * regions per region server). + // region servers * regions per region server). int numberOfServers = admin.getRegionServers().size(); if (numberOfServers == 0) { throw new IllegalStateException("No live regionservers"); } totalNumberOfRegions = numberOfServers * numRegionsPerServer; - LOG.info("Number of live regionservers: " + numberOfServers + ", " + - "pre-splitting table into " + totalNumberOfRegions + " regions " + - "(regions per server: " + numRegionsPerServer + ")"); + LOG.info("Number of live regionservers: " + numberOfServers + ", " + + "pre-splitting table into " + totalNumberOfRegions + " regions " + + "(regions per server: " + numRegionsPerServer + ")"); - byte[][] splits = splitter.split( - totalNumberOfRegions); + byte[][] splits = splitter.split(totalNumberOfRegions); admin.createTable(td, splits); } catch (MasterNotRunningException e) { LOG.error("Master not running", e); throw new IOException(e); } catch (TableExistsException e) { - LOG.warn("Table " + td.getTableName() + - " already exists, continuing"); + LOG.warn("Table " + td.getTableName() + " already exists, continuing"); } finally { admin.close(); unmanagedConnection.close(); @@ -3962,7 +3829,7 @@ public static int getMetaRSPort(Connection connection) throws IOException { * yet, after the assignment znode is deleted and the new assignment is recorded in master. */ public void assertRegionOnServer(final RegionInfo hri, final ServerName server, - final long timeout) throws IOException, InterruptedException { + final long timeout) throws IOException, InterruptedException { long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout; while (true) { List regions = getAdmin().getRegions(server); @@ -3972,29 +3839,27 @@ public void assertRegionOnServer(final RegionInfo hri, final ServerName server, Thread.sleep(10); } throw new AssertionError( - "Could not find region " + hri.getRegionNameAsString() + " on server " + server); + "Could not find region " + hri.getRegionNameAsString() + " on server " + server); } /** - * Check to make sure the region is open on the specified - * region server, but not on any other one. + * Check to make sure the region is open on the specified region server, but not on any other one. */ - public void assertRegionOnlyOnServer( - final RegionInfo hri, final ServerName server, + public void assertRegionOnlyOnServer(final RegionInfo hri, final ServerName server, final long timeout) throws IOException, InterruptedException { long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout; while (true) { List regions = getAdmin().getRegions(server); if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) { List rsThreads = - getHBaseCluster().getLiveRegionServerThreads(); - for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) { + getHBaseCluster().getLiveRegionServerThreads(); + for (JVMClusterUtil.RegionServerThread rsThread : rsThreads) { HRegionServer rs = rsThread.getRegionServer(); if (server.equals(rs.getServerName())) { continue; } Collection hrs = rs.getOnlineRegionsLocalContext(); - for (HRegion r: hrs) { + for (HRegion r : hrs) { if (r.getRegionInfo().getRegionId() == hri.getRegionId()) { throw new AssertionError("Region should not be double assigned"); } @@ -4007,7 +3872,7 @@ public void assertRegionOnlyOnServer( Thread.sleep(10); } throw new AssertionError( - "Could not find region " + hri.getRegionNameAsString() + " on server " + server); + "Could not find region " + hri.getRegionNameAsString() + " on server " + server); } public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd) throws IOException { @@ -4036,8 +3901,8 @@ public ExplainingPredicate predicateNoRegionsInTransition() { return new ExplainingPredicate() { @Override public String explainFailure() throws IOException { - final RegionStates regionStates = getMiniHBaseCluster().getMaster() - .getAssignmentManager().getRegionStates(); + final RegionStates regionStates = + getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); return "found in transition: " + regionStates.getRegionsInTransition().toString(); } @@ -4170,8 +4035,8 @@ public String explainFailure() { } /** - * Create a set of column descriptors with the combination of compression, - * encoding, bloom codecs available. + * Create a set of column descriptors with the combination of compression, encoding, bloom codecs + * available. * @return the list of column descriptors */ public static List generateColumnDescriptors() { @@ -4179,20 +4044,20 @@ public static List generateColumnDescriptors() { } /** - * Create a set of column descriptors with the combination of compression, - * encoding, bloom codecs available. + * Create a set of column descriptors with the combination of compression, encoding, bloom codecs + * available. * @param prefix family names prefix * @return the list of column descriptors */ public static List generateColumnDescriptors(final String prefix) { List columnFamilyDescriptors = new ArrayList<>(); long familyId = 0; - for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) { - for (DataBlockEncoding encodingType: DataBlockEncoding.values()) { - for (BloomType bloomType: BloomType.values()) { + for (Compression.Algorithm compressionType : getSupportedCompressionAlgorithms()) { + for (DataBlockEncoding encodingType : DataBlockEncoding.values()) { + for (BloomType bloomType : BloomType.values()) { String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId); ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name)); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name)); columnFamilyDescriptorBuilder.setCompressionType(compressionType); columnFamilyDescriptorBuilder.setDataBlockEncoding(encodingType); columnFamilyDescriptorBuilder.setBloomFilterType(bloomType); @@ -4248,10 +4113,9 @@ private boolean isTargetTable(final byte[] inRow, Cell c) { } /** - * Sets up {@link MiniKdc} for testing security. - * Uses {@link HBaseKerberosUtils} to set the given keytab file as - * {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}. - * FYI, there is also the easier-to-use kerby KDC server and utility for using it, + * Sets up {@link MiniKdc} for testing security. Uses {@link HBaseKerberosUtils} to set the given + * keytab file as {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}. FYI, there is also the easier-to-use + * kerby KDC server and utility for using it, * {@link org.apache.hadoop.hbase.util.SimpleKdcServerUtil}. The kerby KDC server is preferred; * less baggage. It came in in HBASE-5291. */ @@ -4271,7 +4135,7 @@ public MiniKdc setupMiniKdc(File keytabFile) throws Exception { kdc = new MiniKdc(conf, dir); kdc.start(); } catch (BindException e) { - FileUtils.deleteDirectory(dir); // clean directory + FileUtils.deleteDirectory(dir); // clean directory numTries++; if (numTries == 3) { LOG.error("Failed setting up MiniKDC. Tried " + numTries + " times."); @@ -4288,14 +4152,13 @@ public MiniKdc setupMiniKdc(File keytabFile) throws Exception { public int getNumHFiles(final TableName tableName, final byte[] family) { int numHFiles = 0; for (RegionServerThread regionServerThread : getMiniHBaseCluster().getRegionServerThreads()) { - numHFiles+= getNumHFilesForRS(regionServerThread.getRegionServer(), tableName, - family); + numHFiles += getNumHFilesForRS(regionServerThread.getRegionServer(), tableName, family); } return numHFiles; } public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName, - final byte[] family) { + final byte[] family) { int numHFiles = 0; for (Region region : rs.getRegions(tableName)) { numHFiles += region.getStore(family).getStorefilesCount(); @@ -4323,7 +4186,7 @@ public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescr Collection rtdFamilies = Arrays.asList(rtd.getColumnFamilies()); assertEquals("", ltdFamilies.size(), rtdFamilies.size()); for (Iterator it = ltdFamilies.iterator(), - it2 = rtdFamilies.iterator(); it.hasNext();) { + it2 = rtdFamilies.iterator(); it.hasNext();) { assertEquals("", 0, ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next())); } } @@ -4333,7 +4196,7 @@ public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescr * invocations. */ public static void await(final long sleepMillis, final BooleanSupplier condition) - throws InterruptedException { + throws InterruptedException { try { while (!condition.getAsBoolean()) { Thread.sleep(sleepMillis); diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseZKTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseZKTestingUtility.java index 7d81278000ad..8cb9022e6fe5 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseZKTestingUtility.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseZKTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index 7b6c697e4ddc..cf1b6b72a9b6 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,10 +46,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; /** - * This class creates a single process HBase cluster. - * each server. The master uses the 'default' FileSystem. The RegionServers, - * if we are running on DistributedFilesystem, create a FileSystem instance - * each and will close down their instance on the way out. + * This class creates a single process HBase cluster. each server. The master uses the 'default' + * FileSystem. The RegionServers, if we are running on DistributedFilesystem, create a FileSystem + * instance each and will close down their instance on the way out. * @deprecated since 3.0.0, will be removed in 4.0.0. Use * {@link org.apache.hadoop.hbase.testing.TestingHBaseCluster} instead. */ @@ -68,7 +66,7 @@ public class MiniHBaseCluster extends HBaseCluster { * @throws IOException */ public MiniHBaseCluster(Configuration conf, int numRegionServers) - throws IOException, InterruptedException { + throws IOException, InterruptedException { this(conf, 1, numRegionServers); } @@ -91,23 +89,23 @@ public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers * @param numRegionServers initial number of region servers to start. */ public MiniHBaseCluster(Configuration conf, int numMasters, int numRegionServers, - Class masterClass, - Class regionserverClass) + Class masterClass, + Class regionserverClass) throws IOException, InterruptedException { this(conf, numMasters, 0, numRegionServers, null, masterClass, regionserverClass); } /** * @param rsPorts Ports that RegionServer should use; pass ports if you want to test cluster - * restart where for sure the regionservers come up on same address+port (but - * just with different startcode); by default mini hbase clusters choose new - * arbitrary ports on each cluster start. + * restart where for sure the regionservers come up on same address+port (but just with + * different startcode); by default mini hbase clusters choose new arbitrary ports on + * each cluster start. * @throws IOException * @throws InterruptedException */ public MiniHBaseCluster(Configuration conf, int numMasters, int numAlwaysStandByMasters, - int numRegionServers, List rsPorts, Class masterClass, - Class regionserverClass) + int numRegionServers, List rsPorts, Class masterClass, + Class regionserverClass) throws IOException, InterruptedException { super(conf); @@ -115,7 +113,7 @@ public MiniHBaseCluster(Configuration conf, int numMasters, int numAlwaysStandBy CompatibilityFactory.getInstance(MetricsAssertHelper.class).init(); init(numMasters, numAlwaysStandByMasters, numRegionServers, rsPorts, masterClass, - regionserverClass); + regionserverClass); this.initialClusterStatus = getClusterMetrics(); } @@ -124,18 +122,17 @@ public Configuration getConfiguration() { } /** - * Subclass so can get at protected methods (none at moment). Also, creates - * a FileSystem instance per instantiation. Adds a shutdown own FileSystem - * on the way out. Shuts down own Filesystem only, not All filesystems as - * the FileSystem system exit hook does. + * Subclass so can get at protected methods (none at moment). Also, creates a FileSystem instance + * per instantiation. Adds a shutdown own FileSystem on the way out. Shuts down own Filesystem + * only, not All filesystems as the FileSystem system exit hook does. */ public static class MiniHBaseClusterRegionServer extends HRegionServer { private Thread shutdownThread = null; private User user = null; /** - * List of RegionServers killed so far. ServerName also comprises startCode of a server, - * so any restarted instances of the same server will have different ServerName and will not - * coincide with past dead ones. So there's no need to cleanup this list. + * List of RegionServers killed so far. ServerName also comprises startCode of a server, so any + * restarted instances of the same server will have different ServerName and will not coincide + * with past dead ones. So there's no need to cleanup this list. */ static Set killedServers = new HashSet<>(); @@ -154,8 +151,8 @@ public MiniHBaseClusterRegionServer(Configuration conf) */ @Override - protected void handleReportForDutyResponse( - final RegionServerStartupResponse c) throws IOException { + protected void handleReportForDutyResponse(final RegionServerStartupResponse c) + throws IOException { super.handleReportForDutyResponse(c); // Run this thread to shutdown our filesystem on way out. this.shutdownThread = new SingleFileSystemShutdownThread(getFileSystem()); @@ -209,15 +206,17 @@ private void abortRegionServer(String reason, Throwable cause) { } /** - * Alternate shutdown hook. - * Just shuts down the passed fs, not all as default filesystem hook does. + * Alternate shutdown hook. Just shuts down the passed fs, not all as default filesystem hook + * does. */ static class SingleFileSystemShutdownThread extends Thread { private final FileSystem fs; + SingleFileSystemShutdownThread(final FileSystem fs) { super("Shutdown of " + fs); this.fs = fs; } + @Override public void run() { try { @@ -234,12 +233,12 @@ public void run() { private void init(final int nMasterNodes, final int numAlwaysStandByMasters, final int nRegionNodes, List rsPorts, Class masterClass, Class regionserverClass) - throws IOException, InterruptedException { + throws IOException, InterruptedException { try { - if (masterClass == null){ - masterClass = HMaster.class; + if (masterClass == null) { + masterClass = HMaster.class; } - if (regionserverClass == null){ + if (regionserverClass == null) { regionserverClass = MiniHBaseCluster.MiniHBaseClusterRegionServer.class; } @@ -253,8 +252,7 @@ private void init(final int nMasterNodes, final int numAlwaysStandByMasters, if (rsPorts != null) { rsConf.setInt(HConstants.REGIONSERVER_PORT, rsPorts.get(i)); } - User user = HBaseTestingUtility.getDifferentUser(rsConf, - ".hfs."+index++); + User user = HBaseTestingUtility.getDifferentUser(rsConf, ".hfs." + index++); hbaseCluster.addRegionServer(rsConf, i, user); } @@ -309,7 +307,7 @@ public void resumeRegionServer(ServerName serverName) throws IOException { @Override public void waitForRegionServerToStop(ServerName serverName, long timeout) throws IOException { - //ignore timeout for now + // ignore timeout for now waitOnRegionServer(getRegionServerIndex(serverName)); } @@ -405,30 +403,27 @@ public void stopMaster(ServerName serverName) throws IOException { @Override public void waitForMasterToStop(ServerName serverName, long timeout) throws IOException { - //ignore timeout for now + // ignore timeout for now waitOnMaster(getMasterIndex(serverName)); } /** * Starts a region server thread running - * * @throws IOException * @return New RegionServerThread */ - public JVMClusterUtil.RegionServerThread startRegionServer() - throws IOException { + public JVMClusterUtil.RegionServerThread startRegionServer() throws IOException { final Configuration newConf = HBaseConfiguration.create(conf); return startRegionServer(newConf); } private JVMClusterUtil.RegionServerThread startRegionServer(Configuration configuration) throws IOException { - User rsUser = - HBaseTestingUtility.getDifferentUser(configuration, ".hfs."+index++); - JVMClusterUtil.RegionServerThread t = null; + User rsUser = HBaseTestingUtility.getDifferentUser(configuration, ".hfs." + index++); + JVMClusterUtil.RegionServerThread t = null; try { - t = hbaseCluster.addRegionServer( - configuration, hbaseCluster.getRegionServers().size(), rsUser); + t = hbaseCluster.addRegionServer(configuration, hbaseCluster.getRegionServers().size(), + rsUser); t.start(); t.waitForServerOnline(); } catch (InterruptedException ie) { @@ -438,16 +433,15 @@ private JVMClusterUtil.RegionServerThread startRegionServer(Configuration config } /** - * Starts a region server thread and waits until its processed by master. Throws an exception - * when it can't start a region server or when the region server is not processed by master - * within the timeout. - * + * Starts a region server thread and waits until its processed by master. Throws an exception when + * it can't start a region server or when the region server is not processed by master within the + * timeout. * @return New RegionServerThread */ public JVMClusterUtil.RegionServerThread startRegionServerAndWait(long timeout) throws IOException { - JVMClusterUtil.RegionServerThread t = startRegionServer(); + JVMClusterUtil.RegionServerThread t = startRegionServer(); ServerName rsServerName = t.getRegionServer().getServerName(); long start = EnvironmentEdgeManager.currentTime(); @@ -467,7 +461,7 @@ public JVMClusterUtil.RegionServerThread startRegionServerAndWait(long timeout) /** * Cause a region server to exit doing basic clean up only on its way out. - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. */ public String abortRegionServer(int serverNumber) { HRegionServer server = getRegionServer(serverNumber); @@ -478,8 +472,7 @@ public String abortRegionServer(int serverNumber) { /** * Shut down the specified region server cleanly - * - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. * @return the region server that was stopped */ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber) { @@ -488,18 +481,15 @@ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber) { /** * Shut down the specified region server cleanly - * - * @param serverNumber Used as index into a list. - * @param shutdownFS True is we are to shutdown the filesystem as part of this - * regionserver's shutdown. Usually we do but you do not want to do this if - * you are running multiple regionservers in a test and you shut down one - * before end of the test. + * @param serverNumber Used as index into a list. + * @param shutdownFS True is we are to shutdown the filesystem as part of this regionserver's + * shutdown. Usually we do but you do not want to do this if you are running multiple + * regionservers in a test and you shut down one before end of the test. * @return the region server that was stopped */ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber, final boolean shutdownFS) { - JVMClusterUtil.RegionServerThread server = - hbaseCluster.getRegionServers().get(serverNumber); + JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber); LOG.info("Stopping " + server.toString()); server.getRegionServer().stop("Stopping rs " + serverNumber); return server; @@ -510,8 +500,7 @@ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber, * @param serverNumber Used as index into a list. */ public JVMClusterUtil.RegionServerThread suspendRegionServer(int serverNumber) { - JVMClusterUtil.RegionServerThread server = - hbaseCluster.getRegionServers().get(serverNumber); + JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber); LOG.info("Suspending {}", server.toString()); server.suspend(); return server; @@ -522,16 +511,14 @@ public JVMClusterUtil.RegionServerThread suspendRegionServer(int serverNumber) { * @param serverNumber Used as index into a list. */ public JVMClusterUtil.RegionServerThread resumeRegionServer(int serverNumber) { - JVMClusterUtil.RegionServerThread server = - hbaseCluster.getRegionServers().get(serverNumber); + JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber); LOG.info("Resuming {}", server.toString()); server.resume(); return server; } /** - * Wait for the specified region server to stop. Removes this thread from list - * of running threads. + * Wait for the specified region server to stop. Removes this thread from list of running threads. * @param serverNumber * @return Name of region server that just went down. */ @@ -539,19 +526,16 @@ public String waitOnRegionServer(final int serverNumber) { return this.hbaseCluster.waitOnRegionServer(serverNumber); } - /** * Starts a master thread running - * * @return New RegionServerThread */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value = "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", - justification = "Testing only, not a big deal") + value = "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", + justification = "Testing only, not a big deal") public JVMClusterUtil.MasterThread startMaster() throws IOException { Configuration c = HBaseConfiguration.create(conf); - User user = - HBaseTestingUtility.getDifferentUser(c, ".hfs."+index++); + User user = HBaseTestingUtility.getDifferentUser(c, ".hfs." + index++); JVMClusterUtil.MasterThread t = null; try { @@ -561,7 +545,7 @@ public JVMClusterUtil.MasterThread startMaster() throws IOException { throw new IOException("Interrupted adding master to cluster", ie); } conf.set(HConstants.MASTER_ADDRS_KEY, - hbaseCluster.getConfiguration().get(HConstants.MASTER_ADDRS_KEY)); + hbaseCluster.getConfiguration().get(HConstants.MASTER_ADDRS_KEY)); return t; } @@ -578,7 +562,7 @@ public HMaster getMaster() { * @return the active MasterThread, null if none is active. */ public MasterThread getMasterThread() { - for (MasterThread mt: hbaseCluster.getLiveMasters()) { + for (MasterThread mt : hbaseCluster.getLiveMasters()) { if (mt.getMaster().isActiveMaster()) { return mt; } @@ -596,7 +580,7 @@ public HMaster getMaster(final int serverNumber) { /** * Cause a master to exit without shutting down entire cluster. - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. */ public String abortMaster(int serverNumber) { HMaster server = getMaster(serverNumber); @@ -607,8 +591,7 @@ public String abortMaster(int serverNumber) { /** * Shut down the specified master cleanly - * - * @param serverNumber Used as index into a list. + * @param serverNumber Used as index into a list. * @return the region server that was stopped */ public JVMClusterUtil.MasterThread stopMaster(int serverNumber) { @@ -617,26 +600,21 @@ public JVMClusterUtil.MasterThread stopMaster(int serverNumber) { /** * Shut down the specified master cleanly - * - * @param serverNumber Used as index into a list. - * @param shutdownFS True is we are to shutdown the filesystem as part of this - * master's shutdown. Usually we do but you do not want to do this if - * you are running multiple master in a test and you shut down one - * before end of the test. + * @param serverNumber Used as index into a list. + * @param shutdownFS True is we are to shutdown the filesystem as part of this master's shutdown. + * Usually we do but you do not want to do this if you are running multiple master in a + * test and you shut down one before end of the test. * @return the master that was stopped */ - public JVMClusterUtil.MasterThread stopMaster(int serverNumber, - final boolean shutdownFS) { - JVMClusterUtil.MasterThread server = - hbaseCluster.getMasters().get(serverNumber); + public JVMClusterUtil.MasterThread stopMaster(int serverNumber, final boolean shutdownFS) { + JVMClusterUtil.MasterThread server = hbaseCluster.getMasters().get(serverNumber); LOG.info("Stopping " + server.toString()); server.getMaster().stop("Stopping master " + serverNumber); return server; } /** - * Wait for the specified master to stop. Removes this thread from list - * of running threads. + * Wait for the specified master to stop. Removes this thread from list of running threads. * @param serverNumber * @return Name of master that just went down. */ @@ -645,11 +623,8 @@ public String waitOnMaster(final int serverNumber) { } /** - * Blocks until there is an active master and that master has completed - * initialization. - * - * @return true if an active master becomes available. false if there are no - * masters left. + * Blocks until there is an active master and that master has completed initialization. + * @return true if an active master becomes available. false if there are no masters left. * @throws InterruptedException */ @Override @@ -786,8 +761,8 @@ public int getNumLiveRegionServers() { } /** - * @return List of region server threads. Does not return the master even though it is also - * a region server. + * @return List of region server threads. Does not return the master even though it is also a + * region server. */ public List getRegionServerThreads() { return this.hbaseCluster.getRegionServers(); @@ -810,10 +785,8 @@ public HRegionServer getRegionServer(int serverNumber) { } public HRegionServer getRegionServer(ServerName serverName) { - return hbaseCluster.getRegionServers().stream() - .map(t -> t.getRegionServer()) - .filter(r -> r.getServerName().equals(serverName)) - .findFirst().orElse(null); + return hbaseCluster.getRegionServers().stream().map(t -> t.getRegionServer()) + .filter(r -> r.getServerName().equals(serverName)).findFirst().orElse(null); } public List getRegions(byte[] tableName) { @@ -826,7 +799,7 @@ public List getRegions(TableName tableName) { HRegionServer hrs = rst.getRegionServer(); for (Region region : hrs.getOnlineRegionsLocalContext()) { if (region.getTableDescriptor().getTableName().equals(tableName)) { - ret.add((HRegion)region); + ret.add((HRegion) region); } } } @@ -834,8 +807,8 @@ public List getRegions(TableName tableName) { } /** - * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} - * of HRS carrying regionName. Returns -1 if none found. + * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} of HRS carrying + * regionName. Returns -1 if none found. */ public int getServerWithMeta() { return getServerWith(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); @@ -844,12 +817,12 @@ public int getServerWithMeta() { /** * Get the location of the specified region * @param regionName Name of the region in bytes - * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} - * of HRS carrying hbase:meta. Returns -1 if none found. + * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} of HRS carrying + * hbase:meta. Returns -1 if none found. */ public int getServerWith(byte[] regionName) { int index = 0; - for (JVMClusterUtil.RegionServerThread rst: getRegionServerThreads()) { + for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); if (!hrs.isStopped()) { Region region = hrs.getOnlineRegion(regionName); @@ -864,7 +837,7 @@ public int getServerWith(byte[] regionName) { @Override public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) - throws IOException { + throws IOException { int index = getServerWith(regionName); if (index < 0) { return null; @@ -873,9 +846,9 @@ public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) } /** - * Counts the total numbers of regions being served by the currently online - * region servers by asking each how many regions they have. Does not look - * at hbase:meta at all. Count includes catalog tables. + * Counts the total numbers of regions being served by the currently online region servers by + * asking each how many regions they have. Does not look at hbase:meta at all. Count includes + * catalog tables. * @return number of regions being served by all region servers */ public long countServedRegions() { @@ -887,8 +860,8 @@ public long countServedRegions() { } /** - * Do a simulated kill all masters and regionservers. Useful when it is - * impossible to bring the mini-cluster back for clean shutdown. + * Do a simulated kill all masters and regionservers. Useful when it is impossible to bring the + * mini-cluster back for clean shutdown. */ public void killAll() { // Do backups first. @@ -920,18 +893,17 @@ public List findRegionsForTable(TableName tableName) { HRegionServer hrs = rst.getRegionServer(); for (Region region : hrs.getRegions(tableName)) { if (region.getTableDescriptor().getTableName().equals(tableName)) { - ret.add((HRegion)region); + ret.add((HRegion) region); } } } return ret; } - protected int getRegionServerIndex(ServerName serverName) { - //we have a small number of region servers, this should be fine for now. + // we have a small number of region servers, this should be fine for now. List servers = getRegionServerThreads(); - for (int i=0; i < servers.size(); i++) { + for (int i = 0; i < servers.size(); i++) { if (servers.get(i).getRegionServer().getServerName().equals(serverName)) { return i; } diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/StartMiniClusterOption.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/StartMiniClusterOption.java index 2d839958c744..e7a2f5dd4dbc 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/StartMiniClusterOption.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/StartMiniClusterOption.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,10 +26,9 @@ /** * Options for starting up a mini cluster (including an hbase, dfs and zookeeper clusters) in test. * The options include HDFS options to build mini dfs cluster, Zookeeper options to build mini zk - * cluster, and mostly HBase options to build mini hbase cluster. - * - * To create an object, use a {@link Builder}. - * Example usage: + * cluster, and mostly HBase options to build mini hbase cluster. To create an object, use a + * {@link Builder}. Example usage: + * *
            *    StartMiniClusterOption option = StartMiniClusterOption.builder().
            *        .numMasters(3).rsClass(MyRegionServer.class).createWALDir(true).build();
          @@ -44,8 +42,8 @@
           @Deprecated
           public final class StartMiniClusterOption {
             /**
          -   * Number of masters to start up.  We'll start this many hbase masters.  If numMasters > 1, you
          -   * can find the active/primary master with {@link MiniHBaseCluster#getMaster()}.
          +   * Number of masters to start up. We'll start this many hbase masters. If numMasters > 1, you can
          +   * find the active/primary master with {@link MiniHBaseCluster#getMaster()}.
              */
             private final int numMasters;
           
          @@ -62,9 +60,8 @@ public final class StartMiniClusterOption {
             private final Class masterClass;
           
             /**
          -   * Number of region servers to start up.
          -   * If this value is > 1, then make sure config "hbase.regionserver.info.port" is -1
          -   * (i.e. no ui per regionserver) otherwise bind errors.
          +   * Number of region servers to start up. If this value is > 1, then make sure config
          +   * "hbase.regionserver.info.port" is -1 (i.e. no ui per regionserver) otherwise bind errors.
              */
             private final int numRegionServers;
             /**
          @@ -95,13 +92,13 @@ public final class StartMiniClusterOption {
             private final int numZkServers;
           
             /**
          -   * Whether to create a new root or data directory path.  If true, the newly created data directory
          -   * will be configured as HBase rootdir.  This will overwrite existing root directory config.
          +   * Whether to create a new root or data directory path. If true, the newly created data directory
          +   * will be configured as HBase rootdir. This will overwrite existing root directory config.
              */
             private final boolean createRootDir;
           
             /**
          -   * Whether to create a new WAL directory.  If true, the newly created directory will be configured
          +   * Whether to create a new WAL directory. If true, the newly created directory will be configured
              * as HBase wal.dir which is separate from HBase rootdir.
              */
             private final boolean createWALDir;
          @@ -174,9 +171,9 @@ public boolean isCreateWALDir() {
             public String toString() {
               return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", masterClass=" + masterClass
                   + ", numRegionServers=" + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts)
          -        + ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes
          -        + ", dataNodeHosts=" + Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers
          -        + ", createRootDir=" + createRootDir + ", createWALDir=" + createWALDir + '}';
          +        + ", rsClass=" + rsClass + ", numDataNodes=" + numDataNodes + ", dataNodeHosts="
          +        + Arrays.toString(dataNodeHosts) + ", numZkServers=" + numZkServers + ", createRootDir="
          +        + createRootDir + ", createWALDir=" + createWALDir + '}';
             }
           
             /**
          @@ -187,10 +184,9 @@ public static Builder builder() {
             }
           
             /**
          -   * Builder pattern for creating an {@link StartMiniClusterOption}.
          -   *
          -   * The default values of its fields should be considered public and constant. Changing the default
          -   * values may cause other tests fail.
          +   * Builder pattern for creating an {@link StartMiniClusterOption}. The default values of its
          +   * fields should be considered public and constant. Changing the default values may cause other
          +   * tests fail.
              */
             public static final class Builder {
               private int numMasters = 1;
          @@ -212,7 +208,7 @@ public StartMiniClusterOption build() {
                 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
                   numDataNodes = dataNodeHosts.length;
                 }
          -      return new StartMiniClusterOption(numMasters,numAlwaysStandByMasters, masterClass,
          +      return new StartMiniClusterOption(numMasters, numAlwaysStandByMasters, masterClass,
                     numRegionServers, rsPorts, rsClass, numDataNodes, dataNodeHosts, numZkServers,
                     createRootDir, createWALDir);
               }
          diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseCluster.java
          index d59c6db2e6a3..1136e660453b 100644
          --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseCluster.java
          +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseCluster.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseClusterImpl.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseClusterImpl.java
          index 8cea789ab684..806e66c6f9eb 100644
          --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseClusterImpl.java
          +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseClusterImpl.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -47,7 +47,7 @@ class TestingHBaseClusterImpl implements TestingHBaseCluster {
             private final StartTestingClusterOption option;
           
             private final ExecutorService executor = Executors.newCachedThreadPool(new ThreadFactoryBuilder()
          -    .setNameFormat(getClass().getSuperclass() + "-%d").setDaemon(true).build());
          +      .setNameFormat(getClass().getSuperclass() + "-%d").setDaemon(true).build());
           
             private boolean miniClusterRunning = false;
           
          @@ -111,8 +111,8 @@ public CompletableFuture stopRegionServer(ServerName serverName) throws Ex
               CompletableFuture future = new CompletableFuture<>();
               int index = getRegionServerIndex(serverName);
               if (index == -1) {
          -      future
          -        .completeExceptionally(new IllegalArgumentException("Unknown region server " + serverName));
          +      future.completeExceptionally(
          +        new IllegalArgumentException("Unknown region server " + serverName));
               }
               join(util.getMiniHBaseCluster().stopRegionServer(index), future);
               return future;
          @@ -188,13 +188,13 @@ public Optional getActiveMasterAddress() {
             @Override
             public List getBackupMasterAddresses() {
               return util.getMiniHBaseCluster().getMasterThreads().stream().map(MasterThread::getMaster)
          -      .filter(m -> !m.isActiveMaster()).map(HMaster::getServerName).collect(Collectors.toList());
          +        .filter(m -> !m.isActiveMaster()).map(HMaster::getServerName).collect(Collectors.toList());
             }
           
             @Override
             public List getRegionServerAddresses() {
               return util.getMiniHBaseCluster().getRegionServerThreads().stream()
          -      .map(t -> t.getRegionServer().getServerName()).collect(Collectors.toList());
          +        .map(t -> t.getRegionServer().getServerName()).collect(Collectors.toList());
             }
           
             @Override
          diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseClusterOption.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseClusterOption.java
          index 87d6e2a07de8..e655bc26f327 100644
          --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseClusterOption.java
          +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/testing/TestingHBaseClusterOption.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -102,8 +102,8 @@ public final class TestingHBaseClusterOption {
              * Private constructor. Use {@link Builder#build()}.
              */
             private TestingHBaseClusterOption(Configuration conf, int numMasters, int numAlwaysStandByMasters,
          -    int numRegionServers, List rsPorts, int numDataNodes, String[] dataNodeHosts,
          -    int numZkServers, boolean createRootDir, boolean createWALDir) {
          +      int numRegionServers, List rsPorts, int numDataNodes, String[] dataNodeHosts,
          +      int numZkServers, boolean createRootDir, boolean createWALDir) {
               this.conf = conf;
               this.numMasters = numMasters;
               this.numAlwaysStandByMasters = numAlwaysStandByMasters;
          @@ -158,10 +158,11 @@ public boolean isCreateWALDir() {
           
             @Override
             public String toString() {
          -    return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", numRegionServers=" +
          -      numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts) + ", numDataNodes=" +
          -      numDataNodes + ", dataNodeHosts=" + Arrays.toString(dataNodeHosts) + ", numZkServers=" +
          -      numZkServers + ", createRootDir=" + createRootDir + ", createWALDir=" + createWALDir + '}';
          +    return "StartMiniClusterOption{" + "numMasters=" + numMasters + ", numRegionServers="
          +        + numRegionServers + ", rsPorts=" + StringUtils.join(rsPorts) + ", numDataNodes="
          +        + numDataNodes + ", dataNodeHosts=" + Arrays.toString(dataNodeHosts) + ", numZkServers="
          +        + numZkServers + ", createRootDir=" + createRootDir + ", createWALDir=" + createWALDir
          +        + '}';
             }
           
             /**
          @@ -169,9 +170,9 @@ public String toString() {
              */
             StartTestingClusterOption convert() {
               return StartTestingClusterOption.builder().numMasters(numMasters)
          -      .numAlwaysStandByMasters(numAlwaysStandByMasters).numRegionServers(numRegionServers)
          -      .rsPorts(rsPorts).numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
          -      .numZkServers(numZkServers).createRootDir(createRootDir).createWALDir(createWALDir).build();
          +        .numAlwaysStandByMasters(numAlwaysStandByMasters).numRegionServers(numRegionServers)
          +        .rsPorts(rsPorts).numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts)
          +        .numZkServers(numZkServers).createRootDir(createRootDir).createWALDir(createWALDir).build();
             }
           
             /**
          @@ -206,8 +207,8 @@ public TestingHBaseClusterOption build() {
                   numDataNodes = dataNodeHosts.length;
                 }
                 return new TestingHBaseClusterOption(conf, numMasters, numAlwaysStandByMasters,
          -        numRegionServers, rsPorts, numDataNodes, dataNodeHosts, numZkServers, createRootDir,
          -        createWALDir);
          +          numRegionServers, rsPorts, numDataNodes, dataNodeHosts, numZkServers, createRootDir,
          +          createWALDir);
               }
           
               public Builder conf(Configuration conf) {
          diff --git a/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtilitySpinup.java b/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtilitySpinup.java
          index ead1c72a7c4e..f3cc8c7a5926 100644
          --- a/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtilitySpinup.java
          +++ b/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtilitySpinup.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -36,7 +36,7 @@ public class TestHBaseTestingUtilitySpinup {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestHBaseTestingUtilitySpinup.class);
          +      HBaseClassTestRule.forClass(TestHBaseTestingUtilitySpinup.class);
           
             private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
           
          diff --git a/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/testing/TestTestingHBaseCluster.java b/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/testing/TestTestingHBaseCluster.java
          index 4ff7450e8f39..a64472f30a64 100644
          --- a/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/testing/TestTestingHBaseCluster.java
          +++ b/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/testing/TestTestingHBaseCluster.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -49,7 +49,7 @@ public class TestTestingHBaseCluster {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestTestingHBaseCluster.class);
          +      HBaseClassTestRule.forClass(TestTestingHBaseCluster.class);
           
             private static TestingHBaseCluster CLUSTER;
           
          @@ -60,7 +60,7 @@ public class TestTestingHBaseCluster {
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
               CLUSTER = TestingHBaseCluster.create(TestingHBaseClusterOption.builder().numMasters(2)
          -      .numRegionServers(3).numDataNodes(3).build());
          +        .numRegionServers(3).numDataNodes(3).build());
             }
           
             @AfterClass
          diff --git a/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/testing/TestTestingHBaseClusterImplForCPs.java b/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/testing/TestTestingHBaseClusterImplForCPs.java
          index 322bd37e09b4..ad7f603b3e7a 100644
          --- a/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/testing/TestTestingHBaseClusterImplForCPs.java
          +++ b/hbase-testing-util/src/test/java/org/apache/hadoop/hbase/testing/TestTestingHBaseClusterImplForCPs.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -52,7 +52,7 @@ public class TestTestingHBaseClusterImplForCPs {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestTestingHBaseClusterImplForCPs.class);
          +      HBaseClassTestRule.forClass(TestTestingHBaseClusterImplForCPs.class);
           
             private static TestingHBaseCluster CLUSTER;
           
          @@ -67,12 +67,12 @@ public class TestTestingHBaseClusterImplForCPs {
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
               CLUSTER = TestingHBaseCluster.create(TestingHBaseClusterOption.builder().numMasters(2)
          -      .numRegionServers(3).numDataNodes(3).build());
          +        .numRegionServers(3).numDataNodes(3).build());
               CLUSTER.start();
               CONN = ConnectionFactory.createConnection(CLUSTER.getConf());
               ADMIN = CONN.getAdmin();
               ADMIN.createTable(TableDescriptorBuilder.newBuilder(NAME)
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build());
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build());
               ADMIN.balancerSwitch(false, true);
             }
           
          @@ -100,8 +100,9 @@ public void testGetRegion() throws IOException {
               assertEquals(1, regions.size());
               assertSame(region, regions.get(0));
           
          -    assertFalse(CLUSTER
          -      .getRegion(RegionInfoBuilder.newBuilder(TableName.valueOf("whatever")).build()).isPresent());
          +    assertFalse(
          +      CLUSTER.getRegion(RegionInfoBuilder.newBuilder(TableName.valueOf("whatever")).build())
          +          .isPresent());
               assertFalse(CLUSTER.getOnlineRegionsInterface(ServerName.valueOf("whatever,1,1")).isPresent());
             }
           }
          diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml
          index 3cc6b9b09fb9..b92b220b6cf8 100644
          --- a/hbase-thrift/pom.xml
          +++ b/hbase-thrift/pom.xml
          @@ -1,7 +1,5 @@
          -
          -
          +
          +
             
             4.0.0
             
          -    hbase-build-configuration
               org.apache.hbase
          +    hbase-build-configuration
               3.0.0-alpha-3-SNAPSHOT
               ../hbase-build-configuration
             
          @@ -32,126 +30,6 @@
             Apache HBase - Thrift
             HBase Thrift Server
           
          -  
          -    
          -    
          -      
          -      
          -        ${project.build.directory}
          -        
          -          hbase-webapps/**
          -        
          -      
          -      
          -        src/main/resources/
          -        
          -          **/*.thrift
          -        
          -      
          -    
          -    
          -      
          -        src/test/resources/META-INF/
          -        META-INF/
          -        
          -          LICENSE
          -          NOTICE
          -        
          -        true
          -      
          -      
          -        src/test/resources
          -        
          -          **/**
          -        
          -      
          -    
          -
          -    
          -      
          -        
          -        maven-assembly-plugin
          -        
          -          true
          -        
          -      
          -      
          -      
          -        maven-antrun-plugin
          -        
          -          
          -          
          -            generate
          -            generate-sources
          -            
          -              
          -                
          -                
          -                
          -                
          -                
          -                  
          -                    
          -                    
          -                    
          -                  
          -                
          -                
          -                
          -                
          -                
          -              
          -            
          -            
          -              run
          -            
          -          
          -        
          -      
          -      
          -        org.codehaus.mojo
          -        build-helper-maven-plugin
          -        
          -          
          -          
          -            jspcSource-packageInfo-source
          -            generate-sources
          -            
          -              add-source
          -            
          -            
          -              
          -                ${project.build.directory}/generated-sources/java
          -              
          -            
          -          
          -        
          -      
          -      
          -      
          -        org.apache.maven.plugins
          -        maven-source-plugin
          -      
          -      
          -        org.apache.maven.plugins
          -        maven-checkstyle-plugin
          -        
          -          true
          -        
          -      
          -      
          -        net.revelc.code
          -        warbucks-maven-plugin
          -      
          -    
          -  
          -
             
               
               
          @@ -274,6 +152,121 @@
               
             
           
          +  
          +    
          +    
          +      
          +      
          +        ${project.build.directory}
          +        
          +          hbase-webapps/**
          +        
          +      
          +      
          +        src/main/resources/
          +        
          +          **/*.thrift
          +        
          +      
          +    
          +    
          +      
          +        META-INF/
          +        true
          +        src/test/resources/META-INF/
          +        
          +          LICENSE
          +          NOTICE
          +        
          +      
          +      
          +        src/test/resources
          +        
          +          **/**
          +        
          +      
          +    
          +
          +    
          +      
          +        
          +        maven-assembly-plugin
          +        
          +          true
          +        
          +      
          +      
          +      
          +        maven-antrun-plugin
          +        
          +          
          +          
          +            generate
          +            
          +              run
          +            
          +            generate-sources
          +            
          +              
          +                
          +                
          +                
          +                
          +                
          +                  
          +                    
          +                    
          +                    
          +                  
          +                
          +                
          +                
          +                
          +                
          +              
          +            
          +          
          +        
          +      
          +      
          +        org.codehaus.mojo
          +        build-helper-maven-plugin
          +        
          +          
          +          
          +            jspcSource-packageInfo-source
          +            
          +              add-source
          +            
          +            generate-sources
          +            
          +              
          +                ${project.build.directory}/generated-sources/java
          +              
          +            
          +          
          +        
          +      
          +      
          +      
          +        org.apache.maven.plugins
          +        maven-source-plugin
          +      
          +      
          +        org.apache.maven.plugins
          +        maven-checkstyle-plugin
          +        
          +          true
          +        
          +      
          +      
          +        net.revelc.code
          +        warbucks-maven-plugin
          +      
          +    
          +  
          +
             
               
               
          @@ -286,10 +279,10 @@
                       
                         
                           license-javadocs
          -                prepare-package
                           
                             copy-resources
                           
          +                prepare-package
                           
                             ${project.build.directory}/apidocs
                             
          @@ -349,16 +342,14 @@
                                 thrift.version
                                 "The Thrift version must be specified."
                                 0\.14\.1
          -                      
          --
          +                      -
           -
           [FATAL] ==========================================================================================
           [FATAL] HBase Thrift requires the thrift generator version 0.14.1.
           [FATAL] Setting it to something else needs to be reviewed for wire and behavior compatibility.
           [FATAL] ==========================================================================================
           -
          --
          -                      
          +-
                               
                             
                             true
          @@ -373,10 +364,10 @@
                       
                         
                           check-thrift-version
          -                generate-sources
                           
                             exec
                           
          +                generate-sources
                           
                             sh
                             ${basedir}
          @@ -391,10 +382,10 @@
                         
                         
                           thrift
          -                generate-sources
                           
                             exec
                           
          +                generate-sources
                           
                             ${thrift.path}
                             ${basedir}
          @@ -410,10 +401,10 @@
                         
                         
                           thrift2
          -                generate-sources
                           
                             exec
                           
          +                generate-sources
                           
                             ${thrift.path}
                             ${basedir}
          @@ -436,7 +427,9 @@
               
                 hadoop-3.0
                 
          -        !hadoop.profile
          +        
          +          !hadoop.profile
          +        
                 
                 
                   
          @@ -469,17 +462,16 @@
                       
                         
                           create-mrapp-generated-classpath
          -                generate-test-resources
                           
                             build-classpath
                           
          +                generate-test-resources
                           
                             
          -                  ${project.build.directory}/test-classes/mrapp-generated-classpath
          -                  
          +                  ${project.build.directory}/test-classes/mrapp-generated-classpath
                           
                         
                       
          @@ -515,7 +507,7 @@
                                   
                                 
                                 
          -                        
          +                        
                                 
                               
                               
          @@ -528,7 +520,7 @@
                                   
                                 
                                 
          -                        
          +                        
                                 
                               
                             
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java
          index 82c9b5d607d1..145e25fdbb2d 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java
          @@ -15,7 +15,6 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
           import java.util.ArrayList;
          @@ -24,14 +23,12 @@
           import java.util.List;
           import java.util.concurrent.BlockingQueue;
           import java.util.concurrent.TimeUnit;
          -
           import org.apache.yetus.audience.InterfaceAudience;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
           
           /**
          - * A BlockingQueue reports waiting time in queue and queue length to
          - * ThriftMetrics.
          + * A BlockingQueue reports waiting time in queue and queue length to ThriftMetrics.
            */
           @InterfaceAudience.Private
           public class CallQueue implements BlockingQueue {
          @@ -40,8 +37,7 @@ public class CallQueue implements BlockingQueue {
             private final BlockingQueue underlyingQueue;
             private final ThriftMetrics metrics;
           
          -  public CallQueue(BlockingQueue underlyingQueue,
          -                   ThriftMetrics metrics) {
          +  public CallQueue(BlockingQueue underlyingQueue, ThriftMetrics metrics) {
               this.underlyingQueue = underlyingQueue;
               this.metrics = metrics;
             }
          @@ -71,7 +67,7 @@ public long timeInQueue() {
               @Override
               public boolean equals(Object other) {
                 if (other instanceof Call) {
          -        Call otherCall = (Call)(other);
          +        Call otherCall = (Call) (other);
                   return this.underlyingRunnable.equals(otherCall.underlyingRunnable);
                 } else if (other instanceof Runnable) {
                   return this.underlyingRunnable.equals(other);
          @@ -127,11 +123,9 @@ public int drainTo(Collection destination) {
             }
           
             @Override
          -  public int drainTo(Collection destination,
          -                     int maxElements) {
          +  public int drainTo(Collection destination, int maxElements) {
               if (destination == this) {
          -      throw new IllegalArgumentException(
          -          "A BlockingQueue cannot drain to itself.");
          +      throw new IllegalArgumentException("A BlockingQueue cannot drain to itself.");
               }
               List drained = new ArrayList<>();
               underlyingQueue.drainTo(drained, maxElements);
          @@ -144,17 +138,16 @@ public int drainTo(Collection destination,
               return sz;
             }
           
          -
             @Override
             public boolean offer(Runnable element) {
               return underlyingQueue.offer(new Call(element));
             }
           
             @Override
          -  public boolean offer(Runnable element, long timeout, TimeUnit unit)
          -      throws InterruptedException {
          +  public boolean offer(Runnable element, long timeout, TimeUnit unit) throws InterruptedException {
               return underlyingQueue.offer(new Call(element), timeout, unit);
             }
          +
             @Override
             public void put(Runnable element) throws InterruptedException {
               underlyingQueue.put(new Call(element));
          @@ -203,6 +196,7 @@ public boolean isEmpty() {
             public Iterator iterator() {
               return new Iterator() {
                 final Iterator underlyingIterator = underlyingQueue.iterator();
          +
                 @Override
                 public Runnable next() {
                   return underlyingIterator.next();
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
          index 930dfe45800d..58f9c1a8f29a 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
          @@ -24,12 +24,12 @@
            */
           @InterfaceAudience.Private
           public final class Constants {
          -  private Constants(){}
          +  private Constants() {
          +  }
           
             public static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k
           
          -  public static final String SERVER_TYPE_CONF_KEY =
          -      "hbase.regionserver.thrift.server.type";
          +  public static final String SERVER_TYPE_CONF_KEY = "hbase.regionserver.thrift.server.type";
           
             public static final String COMPACT_CONF_KEY = "hbase.regionserver.thrift.compact";
             public static final boolean COMPACT_CONF_DEFAULT = false;
          @@ -55,8 +55,8 @@ private Constants(){}
             public static final String THRIFT_SSL_KEYSTORE_STORE_KEY = "hbase.thrift.ssl.keystore.store";
             public static final String THRIFT_SSL_KEYSTORE_PASSWORD_KEY =
                 "hbase.thrift.ssl.keystore.password";
          -  public static final String THRIFT_SSL_KEYSTORE_KEYPASSWORD_KEY
          -      = "hbase.thrift.ssl.keystore.keypassword";
          +  public static final String THRIFT_SSL_KEYSTORE_KEYPASSWORD_KEY =
          +      "hbase.thrift.ssl.keystore.keypassword";
             public static final String THRIFT_SSL_EXCLUDE_CIPHER_SUITES_KEY =
                 "hbase.thrift.ssl.exclude.cipher.suites";
             public static final String THRIFT_SSL_INCLUDE_CIPHER_SUITES_KEY =
          @@ -65,15 +65,12 @@ private Constants(){}
                 "hbase.thrift.ssl.exclude.protocols";
             public static final String THRIFT_SSL_INCLUDE_PROTOCOLS_KEY =
                 "hbase.thrift.ssl.include.protocols";
          -  public static final String THRIFT_SSL_KEYSTORE_TYPE_KEY =
          -    "hbase.thrift.ssl.keystore.type";
          -  public static final String THRIFT_SSL_KEYSTORE_TYPE_DEFAULT =
          -    "jks";
          -
          +  public static final String THRIFT_SSL_KEYSTORE_TYPE_KEY = "hbase.thrift.ssl.keystore.type";
          +  public static final String THRIFT_SSL_KEYSTORE_TYPE_DEFAULT = "jks";
           
             public static final String THRIFT_SUPPORT_PROXYUSER_KEY = "hbase.thrift.support.proxyuser";
           
          -  //kerberos related configs
          +  // kerberos related configs
             public static final String THRIFT_DNS_INTERFACE_KEY = "hbase.thrift.dns.interface";
             public static final String THRIFT_DNS_NAMESERVER_KEY = "hbase.thrift.dns.nameserver";
             public static final String THRIFT_KERBEROS_PRINCIPAL_KEY = "hbase.thrift.kerberos.principal";
          @@ -82,23 +79,18 @@ private Constants(){}
             public static final String THRIFT_SPNEGO_KEYTAB_FILE_KEY = "hbase.thrift.spnego.keytab.file";
           
             /**
          -   * Amount of time in milliseconds before a server thread will timeout
          -   * waiting for client to send data on a connected socket. Currently,
          -   * applies only to TBoundedThreadPoolServer
          +   * Amount of time in milliseconds before a server thread will timeout waiting for client to send
          +   * data on a connected socket. Currently, applies only to TBoundedThreadPoolServer
              */
             public static final String THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY =
                 "hbase.thrift.server.socket.read.timeout";
             public static final int THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT = 60000;
           
          -
             /**
          -   * Thrift quality of protection configuration key. Valid values can be:
          -   * auth-conf: authentication, integrity and confidentiality checking
          -   * auth-int: authentication and integrity checking
          -   * auth: authentication only
          -   *
          -   * This is used to authenticate the callers and support impersonation.
          -   * The thrift server and the HBase cluster must run in secure mode.
          +   * Thrift quality of protection configuration key. Valid values can be: auth-conf: authentication,
          +   * integrity and confidentiality checking auth-int: authentication and integrity checking auth:
          +   * authentication only This is used to authenticate the callers and support impersonation. The
          +   * thrift server and the HBase cluster must run in secure mode.
              */
             public static final String THRIFT_QOP_KEY = "hbase.thrift.security.qop";
           
          @@ -142,8 +134,8 @@ private Constants(){}
             public static final String PORT_OPTION = "port";
             public static final String INFOPORT_OPTION = "infoport";
           
          -  //for thrift2 server
          -  public static final String READONLY_OPTION ="readonly";
          +  // for thrift2 server
          +  public static final String READONLY_OPTION = "readonly";
           
             public static final String THRIFT_READONLY_ENABLED = "hbase.thrift.readonly";
             public static final boolean THRIFT_READONLY_ENABLED_DEFAULT = false;
          @@ -159,5 +151,4 @@ private Constants(){}
             public static final String HBASE_THRIFT_CLIENT_BUIDLER_CLASS =
                 "hbase.thrift.client.builder.class";
           
          -
           }
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HBaseServiceHandler.java
          index ccb071cc5930..207cef90316c 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HBaseServiceHandler.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HBaseServiceHandler.java
          @@ -1,5 +1,4 @@
          -/**
          - *
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -20,7 +19,6 @@
           
           import java.io.IOException;
           import java.nio.ByteBuffer;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.hbase.HBaseInterfaceAudience;
           import org.apache.hadoop.hbase.client.Admin;
          @@ -31,8 +29,7 @@
           import org.apache.yetus.audience.InterfaceAudience;
           
           /**
          - * abstract class for HBase handler
          - * providing a Connection cache and get table/admin method
          + * abstract class for HBase handler providing a Connection cache and get table/admin method
            */
           @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
           public abstract class HBaseServiceHandler {
          @@ -43,13 +40,12 @@ public abstract class HBaseServiceHandler {
           
             protected final ConnectionCache connectionCache;
           
          -  public HBaseServiceHandler(final Configuration c,
          -      final UserProvider userProvider) throws IOException {
          +  public HBaseServiceHandler(final Configuration c, final UserProvider userProvider)
          +      throws IOException {
               this.conf = c;
               int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000);
               int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000);
          -    connectionCache = new ConnectionCache(
          -        conf, userProvider, cleanInterval, maxIdleTime);
          +    connectionCache = new ConnectionCache(conf, userProvider, cleanInterval, maxIdleTime);
             }
           
             protected ThriftMetrics metrics = null;
          @@ -71,9 +67,7 @@ protected Admin getAdmin() throws IOException {
           
             /**
              * Creates and returns a Table instance from a given table name.
          -   *
          -   * @param tableName
          -   *          name of table
          +   * @param tableName name of table
              * @return Table object
              * @throws IOException if getting the table fails
              */
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java
          index ad1384c60531..640f45b76787 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java
          @@ -1,26 +1,23 @@
           /*
            * Licensed to the Apache Software Foundation (ASF) under one
          - * or more contributor license agreements. See the NOTICE file
          + * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          - * regarding copyright ownership. The ASF licenses this file
          + * regarding copyright ownership.  The ASF licenses this file
            * to you under the Apache License, Version 2.0 (the
            * "License"); you may not use this file except in compliance
          - * with the License. You may obtain a copy of the License at
          + * with the License.  You may obtain a copy of the License at
            *
          - *   http://www.apache.org/licenses/LICENSE-2.0
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * Unless required by applicable law or agreed to in writing,
          - * software distributed under the License is distributed on an
          - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
          - * KIND, either express or implied. See the License for the
          - * specific language governing permissions and limitations
          - * under the License.
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
           import java.util.Locale;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.thrift.server.TThreadedSelectorServer;
           import org.apache.thrift.transport.TNonblockingServerTransport;
          @@ -38,20 +35,17 @@ public class HThreadedSelectorServerArgs extends TThreadedSelectorServer.Args {
             /**
              * Number of selector threads for reading and writing socket
              */
          -  public static final String SELECTOR_THREADS_CONF_KEY =
          -      "hbase.thrift.selector.threads";
          +  public static final String SELECTOR_THREADS_CONF_KEY = "hbase.thrift.selector.threads";
           
             /**
              * Number fo threads for processing the thrift calls
              */
          -  public static final String WORKER_THREADS_CONF_KEY =
          -      "hbase.thrift.worker.threads";
          +  public static final String WORKER_THREADS_CONF_KEY = "hbase.thrift.worker.threads";
           
             /**
              * Time to wait for server to stop gracefully
              */
          -  public static final String STOP_TIMEOUT_CONF_KEY =
          -      "hbase.thrift.stop.timeout.seconds";
          +  public static final String STOP_TIMEOUT_CONF_KEY = "hbase.thrift.stop.timeout.seconds";
           
             /**
              * Maximum number of accepted elements per selector
          @@ -62,37 +56,28 @@ public class HThreadedSelectorServerArgs extends TThreadedSelectorServer.Args {
             /**
              * The strategy for handling new accepted connections.
              */
          -  public static final String ACCEPT_POLICY_CONF_KEY =
          -      "hbase.thrift.accept.policy";
          +  public static final String ACCEPT_POLICY_CONF_KEY = "hbase.thrift.accept.policy";
           
          -  public HThreadedSelectorServerArgs(
          -      TNonblockingServerTransport transport, Configuration conf) {
          +  public HThreadedSelectorServerArgs(TNonblockingServerTransport transport, Configuration conf) {
               super(transport);
               readConf(conf);
             }
           
             private void readConf(Configuration conf) {
          -    int selectorThreads = conf.getInt(
          -        SELECTOR_THREADS_CONF_KEY, getSelectorThreads());
          -    int workerThreads = conf.getInt(
          -        WORKER_THREADS_CONF_KEY, getWorkerThreads());
          -    int stopTimeoutVal = conf.getInt(
          -        STOP_TIMEOUT_CONF_KEY, getStopTimeoutVal());
          -    int acceptQueueSizePerThread = conf.getInt(
          -        ACCEPT_QUEUE_SIZE_PER_THREAD_CONF_KEY, getAcceptQueueSizePerThread());
          -    AcceptPolicy acceptPolicy = AcceptPolicy.valueOf(conf.get(
          -        ACCEPT_POLICY_CONF_KEY, getAcceptPolicy().toString()).toUpperCase(Locale.ROOT));
          +    int selectorThreads = conf.getInt(SELECTOR_THREADS_CONF_KEY, getSelectorThreads());
          +    int workerThreads = conf.getInt(WORKER_THREADS_CONF_KEY, getWorkerThreads());
          +    int stopTimeoutVal = conf.getInt(STOP_TIMEOUT_CONF_KEY, getStopTimeoutVal());
          +    int acceptQueueSizePerThread =
          +        conf.getInt(ACCEPT_QUEUE_SIZE_PER_THREAD_CONF_KEY, getAcceptQueueSizePerThread());
          +    AcceptPolicy acceptPolicy = AcceptPolicy.valueOf(
          +      conf.get(ACCEPT_POLICY_CONF_KEY, getAcceptPolicy().toString()).toUpperCase(Locale.ROOT));
           
          -    super.selectorThreads(selectorThreads)
          -         .workerThreads(workerThreads)
          -         .stopTimeoutVal(stopTimeoutVal)
          -         .acceptQueueSizePerThread(acceptQueueSizePerThread)
          -         .acceptPolicy(acceptPolicy);
          +    super.selectorThreads(selectorThreads).workerThreads(workerThreads)
          +        .stopTimeoutVal(stopTimeoutVal).acceptQueueSizePerThread(acceptQueueSizePerThread)
          +        .acceptPolicy(acceptPolicy);
           
          -    LOG.info("Read configuration selectorThreads:" + selectorThreads +
          -             " workerThreads:" + workerThreads +
          -             " stopTimeoutVal:" + stopTimeoutVal + "sec" +
          -             " acceptQueueSizePerThread:" + acceptQueueSizePerThread +
          -             " acceptPolicy:" + acceptPolicy);
          +    LOG.info("Read configuration selectorThreads:" + selectorThreads + " workerThreads:"
          +        + workerThreads + " stopTimeoutVal:" + stopTimeoutVal + "sec" + " acceptQueueSizePerThread:"
          +        + acceptQueueSizePerThread + " acceptPolicy:" + acceptPolicy);
             }
           }
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java
          index 1402f8697e8a..4c7b29c1d1d0 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -15,22 +15,20 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
           import java.lang.reflect.InvocationHandler;
           import java.lang.reflect.InvocationTargetException;
           import java.lang.reflect.Method;
           import java.lang.reflect.Proxy;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.hbase.thrift.generated.Hbase;
           import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
           import org.apache.yetus.audience.InterfaceAudience;
           
           /**
          - * Converts a Hbase.Iface using InvocationHandler so that it reports process
          - * time of each call to ThriftMetrics.
          + * Converts a Hbase.Iface using InvocationHandler so that it reports process time of each call to
          + * ThriftMetrics.
            */
           @InterfaceAudience.Private
           public final class HbaseHandlerMetricsProxy implements InvocationHandler {
          @@ -38,34 +36,27 @@ public final class HbaseHandlerMetricsProxy implements InvocationHandler {
             private final Object handler;
             private final ThriftMetrics metrics;
           
          -  public static Hbase.Iface newInstance(Hbase.Iface handler,
          -                                        ThriftMetrics metrics,
          -                                        Configuration conf) {
          -    return (Hbase.Iface) Proxy.newProxyInstance(
          -        handler.getClass().getClassLoader(),
          -        new Class[]{Hbase.Iface.class},
          -        new HbaseHandlerMetricsProxy(handler, metrics, conf));
          +  public static Hbase.Iface newInstance(Hbase.Iface handler, ThriftMetrics metrics,
          +      Configuration conf) {
          +    return (Hbase.Iface) Proxy.newProxyInstance(handler.getClass().getClassLoader(),
          +      new Class[] { Hbase.Iface.class }, new HbaseHandlerMetricsProxy(handler, metrics, conf));
             }
           
             // for thrift 2
          -  public static THBaseService.Iface newInstance(THBaseService.Iface handler,
          -      ThriftMetrics metrics,
          +  public static THBaseService.Iface newInstance(THBaseService.Iface handler, ThriftMetrics metrics,
                 Configuration conf) {
          -    return (THBaseService.Iface) Proxy.newProxyInstance(
          -        handler.getClass().getClassLoader(),
          -        new Class[]{THBaseService.Iface.class},
          -        new HbaseHandlerMetricsProxy(handler, metrics, conf));
          +    return (THBaseService.Iface) Proxy.newProxyInstance(handler.getClass().getClassLoader(),
          +      new Class[] { THBaseService.Iface.class },
          +      new HbaseHandlerMetricsProxy(handler, metrics, conf));
             }
           
          -  private HbaseHandlerMetricsProxy(
          -      Object handler, ThriftMetrics metrics, Configuration conf) {
          +  private HbaseHandlerMetricsProxy(Object handler, ThriftMetrics metrics, Configuration conf) {
               this.handler = handler;
               this.metrics = metrics;
             }
           
             @Override
          -  public Object invoke(Object proxy, Method m, Object[] args)
          -      throws Throwable {
          +  public Object invoke(Object proxy, Method m, Object[] args) throws Throwable {
               Object result;
               long start = now();
               try {
          @@ -75,15 +66,14 @@ public Object invoke(Object proxy, Method m, Object[] args)
                 throw e.getTargetException();
               } catch (Exception e) {
                 metrics.exception(e);
          -      throw new RuntimeException(
          -          "unexpected invocation exception: " + e.getMessage());
          +      throw new RuntimeException("unexpected invocation exception: " + e.getMessage());
               } finally {
                 long processTime = now() - start;
                 metrics.incMethodTime(m.getName(), processTime);
               }
               return result;
             }
          -  
          +
             private static long now() {
               return System.nanoTime();
             }
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java
          index b75b4334f75c..924907fa8bcf 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java
          @@ -7,14 +7,13 @@
            * "License"); you may not use this file except in compliance
            * with the License.  You may obtain a copy of the License at
            *
          - *   http://www.apache.org/licenses/LICENSE-2.0
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * Unless required by applicable law or agreed to in writing,
          - * software distributed under the License is distributed on an
          - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
          - * KIND, either express or implied.  See the License for the
          - * specific language governing permissions and limitations
          - * under the License.
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift;
           
          @@ -23,18 +22,21 @@
           @InterfaceAudience.Private
           public class HttpAuthenticationException extends Exception {
             private static final long serialVersionUID = 0;
          +
             /**
              * @param cause original exception
              */
             public HttpAuthenticationException(Throwable cause) {
               super(cause);
             }
          +
             /**
              * @param msg exception message
              */
             public HttpAuthenticationException(String msg) {
               super(msg);
             }
          +
             /**
              * @param msg exception message
              * @param cause original exception
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ImplType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ImplType.java
          index 7108115173bb..a4b2f81a5cfd 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ImplType.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ImplType.java
          @@ -22,7 +22,6 @@
           import java.util.ArrayList;
           import java.util.Arrays;
           import java.util.List;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.thrift.server.THsHaServer;
           import org.apache.thrift.server.TNonblockingServer;
          @@ -47,14 +46,13 @@ public enum ImplType {
             private static final Logger LOG = LoggerFactory.getLogger(ImplType.class);
             public static final ImplType DEFAULT = THREAD_POOL;
           
          -
             final String option;
             final boolean isAlwaysFramed;
             final Class serverClass;
             final boolean canSpecifyBindIP;
           
          -  private ImplType(String option, boolean isAlwaysFramed,
          -      Class serverClass, boolean canSpecifyBindIP) {
          +  private ImplType(String option, boolean isAlwaysFramed, Class serverClass,
          +      boolean canSpecifyBindIP) {
               this.option = option;
               this.isAlwaysFramed = isAlwaysFramed;
               this.serverClass = serverClass;
          @@ -78,8 +76,7 @@ public boolean isAlwaysFramed() {
             }
           
             public String getDescription() {
          -    StringBuilder sb = new StringBuilder("Use the " +
          -        serverClass.getSimpleName());
          +    StringBuilder sb = new StringBuilder("Use the " + serverClass.getSimpleName());
               if (isAlwaysFramed) {
                 sb.append(" This implies the framed transport.");
               }
          @@ -120,8 +117,8 @@ static void setServerImpl(CommandLine cmd, Configuration conf) {
                 LOG.info("Using default thrift server type");
                 chosenType = DEFAULT;
               } else if (numChosen > 1) {
          -      throw new AssertionError("Exactly one option out of " +
          -          Arrays.toString(values()) + " has to be specified");
          +      throw new AssertionError(
          +          "Exactly one option out of " + Arrays.toString(values()) + " has to be specified");
               }
               LOG.info("Using thrift server type " + chosenType.option);
               conf.set(SERVER_TYPE_CONF_KEY, chosenType.option);
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
          index 6fe916855ec7..36c89e81817a 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
          @@ -34,25 +34,24 @@
           import org.apache.hadoop.hbase.util.Bytes;
           import org.apache.hadoop.hbase.util.Threads;
           import org.apache.hadoop.metrics2.util.MBeans;
          -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
           import org.apache.yetus.audience.InterfaceAudience;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
           
          +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
          +
           /**
            * This class will coalesce increments from a thift server if
          - * hbase.regionserver.thrift.coalesceIncrement is set to true. Turning this
          - * config to true will cause the thrift server to queue increments into an
          - * instance of this class. The thread pool associated with this class will drain
          - * the coalesced increments as the thread is able. This can cause data loss if the
          - * thrift server dies or is shut down before everything in the queue is drained.
          - *
          + * hbase.regionserver.thrift.coalesceIncrement is set to true. Turning this config to true will
          + * cause the thrift server to queue increments into an instance of this class. The thread pool
          + * associated with this class will drain the coalesced increments as the thread is able. This can
          + * cause data loss if the thrift server dies or is shut down before everything in the queue is
          + * drained.
            */
           @InterfaceAudience.Private
           public class IncrementCoalescer implements IncrementCoalescerMBean {
             /**
              * Used to identify a cell that will be incremented.
          -   *
              */
             static class FullyQualifiedRow {
               private byte[] table;
          @@ -152,8 +151,8 @@ public IncrementCoalescer(ThriftHBaseServiceHandler hand) {
               this.handler = hand;
               LinkedBlockingQueue queue = new LinkedBlockingQueue<>();
               pool = new ThreadPoolExecutor(CORE_POOL_SIZE, CORE_POOL_SIZE, 50, TimeUnit.MILLISECONDS, queue,
          -      new ThreadFactoryBuilder().setNameFormat("IncrementCoalescer-pool-%d").setDaemon(true)
          -        .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
          +        new ThreadFactoryBuilder().setNameFormat("IncrementCoalescer-pool-%d").setDaemon(true)
          +            .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
               MBeans.register("thrift", "Thrift", this);
             }
           
          @@ -190,11 +189,11 @@ private boolean internalQueueTincrement(TIncrement inc) {
             }
           
             @SuppressWarnings("FutureReturnValueIgnored")
          -  private boolean internalQueueIncrement(byte[] tableName, byte[] rowKey, byte[] fam,
          -      byte[] qual, long ammount) {
          +  private boolean internalQueueIncrement(byte[] tableName, byte[] rowKey, byte[] fam, byte[] qual,
          +      long ammount) {
               int countersMapSize = countersMap.size();
           
          -    //Make sure that the number of threads is scaled.
          +    // Make sure that the number of threads is scaled.
               dynamicallySetCoreSize(countersMapSize);
           
               totalIncrements.increment();
          @@ -254,17 +253,16 @@ private Callable createIncCallable() {
                     if (failures > 2) {
                       throw new IOException("Auto-Fail rest of ICVs");
                     }
          -          table.incrementColumnValue(row.getRowKey(), row.getFamily(), row.getQualifier(),
          -            counter);
          +          table.incrementColumnValue(row.getRowKey(), row.getFamily(), row.getQualifier(), counter);
                   } catch (IOException e) {
                     // log failure of increment
                     failures++;
                     LOG.error("FAILED_ICV: " + Bytes.toString(row.getTable()) + ", "
          -              + Bytes.toStringBinary(row.getRowKey()) + ", "
          -              + Bytes.toStringBinary(row.getFamily()) + ", "
          -              + Bytes.toStringBinary(row.getQualifier()) + ", " + counter, e);
          -        } finally{
          -          if(table != null){
          +              + Bytes.toStringBinary(row.getRowKey()) + ", " + Bytes.toStringBinary(row.getFamily())
          +              + ", " + Bytes.toStringBinary(row.getQualifier()) + ", " + counter,
          +            e);
          +        } finally {
          +          if (table != null) {
                       table.close();
                     }
                   }
          @@ -274,8 +272,8 @@ private Callable createIncCallable() {
             }
           
             /**
          -   * This method samples the incoming requests and, if selected, will check if
          -   * the corePoolSize should be changed.
          +   * This method samples the incoming requests and, if selected, will check if the corePoolSize
          +   * should be changed.
              * @param countersMapSize the size of the counters map
              */
             private void dynamicallySetCoreSize(int countersMapSize) {
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java
          index 06cf193fe0e6..e7e528c364e8 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java
          @@ -15,7 +15,6 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
           import org.apache.yetus.audience.InterfaceAudience;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
          index cc3876d65e77..6281678f511a 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
          @@ -15,7 +15,6 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
           import java.util.concurrent.ExecutorService;
          @@ -24,7 +23,6 @@
           import java.util.concurrent.SynchronousQueue;
           import java.util.concurrent.ThreadPoolExecutor;
           import java.util.concurrent.TimeUnit;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
           import org.apache.hadoop.hbase.util.Threads;
          @@ -40,6 +38,7 @@
           import org.apache.yetus.audience.InterfaceAudience;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
          +
           import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
           
           /**
          @@ -48,47 +47,41 @@
           @InterfaceAudience.Private
           public class TBoundedThreadPoolServer extends TServer {
           
          -  private static final String QUEUE_FULL_MSG =
          -      "Queue is full, closing connection";
          +  private static final String QUEUE_FULL_MSG = "Queue is full, closing connection";
           
             /**
          -   * The "core size" of the thread pool. New threads are created on every
          -   * connection until this many threads are created.
          +   * The "core size" of the thread pool. New threads are created on every connection until this many
          +   * threads are created.
              */
          -  public static final String MIN_WORKER_THREADS_CONF_KEY =
          -      "hbase.thrift.minWorkerThreads";
          +  public static final String MIN_WORKER_THREADS_CONF_KEY = "hbase.thrift.minWorkerThreads";
           
             /**
          -   * This default core pool size should be enough for many test scenarios. We
          -   * want to override this with a much larger number (e.g. at least 200) for a
          -   * large-scale production setup.
          +   * This default core pool size should be enough for many test scenarios. We want to override this
          +   * with a much larger number (e.g. at least 200) for a large-scale production setup.
              */
             public static final int DEFAULT_MIN_WORKER_THREADS = 16;
           
             /**
          -   * The maximum size of the thread pool. When the pending request queue
          -   * overflows, new threads are created until their number reaches this number.
          -   * After that, the server starts dropping connections.
          +   * The maximum size of the thread pool. When the pending request queue overflows, new threads are
          +   * created until their number reaches this number. After that, the server starts dropping
          +   * connections.
              */
          -  public static final String MAX_WORKER_THREADS_CONF_KEY =
          -      "hbase.thrift.maxWorkerThreads";
          +  public static final String MAX_WORKER_THREADS_CONF_KEY = "hbase.thrift.maxWorkerThreads";
           
             public static final int DEFAULT_MAX_WORKER_THREADS = 1000;
           
             /**
          -   * The maximum number of pending connections waiting in the queue. If there
          -   * are no idle threads in the pool, the server queues requests. Only when
          -   * the queue overflows, new threads are added, up to
          -   * hbase.thrift.maxQueuedRequests threads.
          +   * The maximum number of pending connections waiting in the queue. If there are no idle threads in
          +   * the pool, the server queues requests. Only when the queue overflows, new threads are added, up
          +   * to hbase.thrift.maxQueuedRequests threads.
              */
          -  public static final String MAX_QUEUED_REQUESTS_CONF_KEY =
          -      "hbase.thrift.maxQueuedRequests";
          +  public static final String MAX_QUEUED_REQUESTS_CONF_KEY = "hbase.thrift.maxQueuedRequests";
           
             public static final int DEFAULT_MAX_QUEUED_REQUESTS = 1000;
           
             /**
          -   * Default amount of time in seconds to keep a thread alive. Worker threads
          -   * are stopped after being idle for this long.
          +   * Default amount of time in seconds to keep a thread alive. Worker threads are stopped after
          +   * being idle for this long.
              */
             public static final String THREAD_KEEP_ALIVE_TIME_SEC_CONF_KEY =
                 "hbase.thrift.threadKeepAliveTimeSec";
          @@ -96,13 +89,13 @@ public class TBoundedThreadPoolServer extends TServer {
             private static final int DEFAULT_THREAD_KEEP_ALIVE_TIME_SEC = 60;
           
             /**
          -   * Time to wait after interrupting all worker threads. This is after a clean
          -   * shutdown has been attempted.
          +   * Time to wait after interrupting all worker threads. This is after a clean shutdown has been
          +   * attempted.
              */
             public static final int TIME_TO_WAIT_AFTER_SHUTDOWN_MS = 5000;
           
          -  private static final Logger LOG = LoggerFactory.getLogger(
          -      TBoundedThreadPoolServer.class.getName());
          +  private static final Logger LOG =
          +      LoggerFactory.getLogger(TBoundedThreadPoolServer.class.getName());
           
             private final CallQueue callQueue;
           
          @@ -112,20 +105,16 @@ public static class Args extends TThreadPoolServer.Args {
           
               public Args(TServerTransport transport, Configuration conf) {
                 super(transport);
          -      minWorkerThreads = conf.getInt(MIN_WORKER_THREADS_CONF_KEY,
          -          DEFAULT_MIN_WORKER_THREADS);
          -      maxWorkerThreads = conf.getInt(MAX_WORKER_THREADS_CONF_KEY,
          -          DEFAULT_MAX_WORKER_THREADS);
          -      maxQueuedRequests = conf.getInt(MAX_QUEUED_REQUESTS_CONF_KEY,
          -          DEFAULT_MAX_QUEUED_REQUESTS);
          -      threadKeepAliveTimeSec = conf.getInt(THREAD_KEEP_ALIVE_TIME_SEC_CONF_KEY,
          -          DEFAULT_THREAD_KEEP_ALIVE_TIME_SEC);
          +      minWorkerThreads = conf.getInt(MIN_WORKER_THREADS_CONF_KEY, DEFAULT_MIN_WORKER_THREADS);
          +      maxWorkerThreads = conf.getInt(MAX_WORKER_THREADS_CONF_KEY, DEFAULT_MAX_WORKER_THREADS);
          +      maxQueuedRequests = conf.getInt(MAX_QUEUED_REQUESTS_CONF_KEY, DEFAULT_MAX_QUEUED_REQUESTS);
          +      threadKeepAliveTimeSec =
          +          conf.getInt(THREAD_KEEP_ALIVE_TIME_SEC_CONF_KEY, DEFAULT_THREAD_KEEP_ALIVE_TIME_SEC);
               }
           
               @Override
               public String toString() {
          -      return "min worker threads=" + minWorkerThreads
          -          + ", max worker threads=" + maxWorkerThreads 
          +      return "min worker threads=" + minWorkerThreads + ", max worker threads=" + maxWorkerThreads
                     + ", max queued requests=" + maxQueuedRequests;
               }
             }
          @@ -144,8 +133,7 @@ public TBoundedThreadPoolServer(Args options, ThriftMetrics metrics) {
               int minWorkerThreads = options.minWorkerThreads;
               int maxWorkerThreads = options.maxWorkerThreads;
               if (options.maxQueuedRequests > 0) {
          -      this.callQueue = new CallQueue(
          -          new LinkedBlockingQueue<>(options.maxQueuedRequests), metrics);
          +      this.callQueue = new CallQueue(new LinkedBlockingQueue<>(options.maxQueuedRequests), metrics);
                 minWorkerThreads = maxWorkerThreads;
               } else {
                 this.callQueue = new CallQueue(new SynchronousQueue<>(), metrics);
          @@ -154,10 +142,8 @@ public TBoundedThreadPoolServer(Args options, ThriftMetrics metrics) {
               ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
               tfb.setDaemon(true);
               tfb.setNameFormat("thrift-worker-%d");
          -    executorService =
          -        new THBaseThreadPoolExecutor(minWorkerThreads,
          -            maxWorkerThreads, options.threadKeepAliveTimeSec,
          -            TimeUnit.SECONDS, this.callQueue, tfb.build(), metrics);
          +    executorService = new THBaseThreadPoolExecutor(minWorkerThreads, maxWorkerThreads,
          +        options.threadKeepAliveTimeSec, TimeUnit.SECONDS, this.callQueue, tfb.build(), metrics);
               executorService.allowCoreThreadTimeOut(true);
               serverOptions = options;
             }
          @@ -171,13 +157,12 @@ public void serve() {
                 return;
               }
           
          -    Runtime.getRuntime().addShutdownHook(
          -        new Thread(getClass().getSimpleName() + "-shutdown-hook") {
          -          @Override
          -          public void run() {
          -            TBoundedThreadPoolServer.this.stop();
          -          }
          -        });
          +    Runtime.getRuntime().addShutdownHook(new Thread(getClass().getSimpleName() + "-shutdown-hook") {
          +      @Override
          +      public void run() {
          +        TBoundedThreadPoolServer.this.stop();
          +      }
          +    });
           
               stopped = false;
               while (!stopped && !Thread.interrupted()) {
          @@ -200,8 +185,8 @@ public void run() {
                 } catch (RejectedExecutionException rex) {
                   if (client.getClass() == TSocket.class) {
                     // We expect the client to be TSocket.
          -          LOG.warn(QUEUE_FULL_MSG + " from " +
          -              ((TSocket) client).getSocket().getRemoteSocketAddress());
          +          LOG.warn(
          +            QUEUE_FULL_MSG + " from " + ((TSocket) client).getSocket().getRemoteSocketAddress());
                   } else {
                     LOG.warn(QUEUE_FULL_MSG, rex);
                   }
          @@ -213,20 +198,18 @@ public void run() {
             }
           
             /**
          -   * Loop until {@link ExecutorService#awaitTermination} finally does return
          -   * without an interrupted exception. If we don't do this, then we'll shut
          -   * down prematurely. We want to let the executor service clear its task
          -   * queue, closing client sockets appropriately.
          +   * Loop until {@link ExecutorService#awaitTermination} finally does return without an interrupted
          +   * exception. If we don't do this, then we'll shut down prematurely. We want to let the executor
          +   * service clear its task queue, closing client sockets appropriately.
              */
             private void shutdownServer() {
               executorService.shutdown();
           
          -    long msLeftToWait =
          -        serverOptions.stopTimeoutUnit.toMillis(serverOptions.stopTimeoutVal);
          +    long msLeftToWait = serverOptions.stopTimeoutUnit.toMillis(serverOptions.stopTimeoutVal);
               long timeMillis = EnvironmentEdgeManager.currentTime();
           
          -    LOG.info("Waiting for up to " + msLeftToWait + " ms to finish processing" +
          -        " pending requests");
          +    LOG.info(
          +      "Waiting for up to " + msLeftToWait + " ms to finish processing" + " pending requests");
               boolean interrupted = false;
               while (msLeftToWait >= 0) {
                 try {
          @@ -240,8 +223,8 @@ private void shutdownServer() {
                 }
               }
           
          -    LOG.info("Interrupting all worker threads and waiting for "
          -        + TIME_TO_WAIT_AFTER_SHUTDOWN_MS + " ms longer");
          +    LOG.info("Interrupting all worker threads and waiting for " + TIME_TO_WAIT_AFTER_SHUTDOWN_MS
          +        + " ms longer");
           
               // This will interrupt all the threads, even those running a task.
               executorService.shutdownNow();
          @@ -266,7 +249,6 @@ private final class ClientConnnection implements Runnable {
           
               /**
                * Default constructor.
          -     *
                * @param client Transport to process
                */
               private ClientConnnection(TTransport client) {
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java
          index c86f47616e12..8a2de47a67e3 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -15,7 +15,6 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
           import java.util.concurrent.BlockingQueue;
          @@ -25,8 +24,8 @@
           import org.apache.yetus.audience.InterfaceAudience;
           
           /**
          - * A ThreadPoolExecutor customized for working with HBase thrift to update metrics before and
          - * after the execution of a task.
          + * A ThreadPoolExecutor customized for working with HBase thrift to update metrics before and after
          + * the execution of a task.
            */
           
           @InterfaceAudience.Private
          @@ -40,8 +39,8 @@ public THBaseThreadPoolExecutor(int corePoolSize, int maxPoolSize, long keepAliv
             }
           
             public THBaseThreadPoolExecutor(int corePoolSize, int maxPoolSize, long keepAliveTime,
          -      TimeUnit unit, BlockingQueue workQueue,
          -      ThreadFactory threadFactory,ThriftMetrics metrics) {
          +      TimeUnit unit, BlockingQueue workQueue, ThreadFactory threadFactory,
          +      ThriftMetrics metrics) {
               super(corePoolSize, maxPoolSize, keepAliveTime, unit, workQueue);
               if (threadFactory != null) {
                 setThreadFactory(threadFactory);
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java
          index b91ad0983d4e..530e89f54259 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java
          @@ -1,5 +1,4 @@
          -/**
          - *
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -16,7 +15,6 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
           import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD;
          @@ -87,18 +85,18 @@
           import org.apache.hadoop.hbase.thrift.generated.TScan;
           import org.apache.hadoop.hbase.thrift.generated.TThriftServerType;
           import org.apache.hadoop.hbase.util.Bytes;
          -import org.apache.hbase.thirdparty.com.google.common.cache.Cache;
          -import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
           import org.apache.thrift.TException;
           import org.apache.yetus.audience.InterfaceAudience;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
           
           import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
          +import org.apache.hbase.thirdparty.com.google.common.cache.Cache;
          +import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
           
           /**
          - * The HBaseServiceHandler is a glue object that connects Thrift RPC calls to the
          - * HBase client API primarily defined in the Admin and Table objects.
          + * The HBaseServiceHandler is a glue object that connects Thrift RPC calls to the HBase client API
          + * primarily defined in the Admin and Table objects.
            */
           @InterfaceAudience.Private
           @SuppressWarnings("deprecation")
          @@ -124,25 +122,20 @@ byte[][] getAllColumns(Table table) throws IOException {
               return columns;
             }
           
          -
             /**
          -   * Assigns a unique ID to the scanner and adds the mapping to an internal
          -   * hash-map.
          -   *
          +   * Assigns a unique ID to the scanner and adds the mapping to an internal hash-map.
              * @param scanner the {@link ResultScanner} to add
              * @return integer scanner id
              */
             protected synchronized int addScanner(ResultScanner scanner, boolean sortColumns) {
               int id = nextScannerId++;
          -    ResultScannerWrapper resultScannerWrapper =
          -        new ResultScannerWrapper(scanner, sortColumns);
          +    ResultScannerWrapper resultScannerWrapper = new ResultScannerWrapper(scanner, sortColumns);
               scannerMap.put(id, resultScannerWrapper);
               return id;
             }
           
             /**
              * Returns the scanner associated with the specified ID.
          -   *
              * @param id the ID of the scanner to get
              * @return a Scanner, or null if ID was invalid.
              */
          @@ -151,31 +144,29 @@ private synchronized ResultScannerWrapper getScanner(int id) {
             }
           
             /**
          -   * Removes the scanner associated with the specified ID from the internal
          -   * id->scanner hash-map.
          -   *
          +   * Removes the scanner associated with the specified ID from the internal id->scanner hash-map.
              * @param id the ID of the scanner to remove
              */
             private synchronized void removeScanner(int id) {
               scannerMap.invalidate(id);
             }
           
          -  protected ThriftHBaseServiceHandler(final Configuration c,
          -      final UserProvider userProvider) throws IOException {
          +  protected ThriftHBaseServiceHandler(final Configuration c, final UserProvider userProvider)
          +      throws IOException {
               super(c, userProvider);
          -    long cacheTimeout = c.getLong(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD) * 2;
          +    long cacheTimeout =
          +        c.getLong(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)
          +            * 2;
           
          -    scannerMap = CacheBuilder.newBuilder()
          -      .expireAfterAccess(cacheTimeout, TimeUnit.MILLISECONDS)
          -      .build();
          +    scannerMap =
          +        CacheBuilder.newBuilder().expireAfterAccess(cacheTimeout, TimeUnit.MILLISECONDS).build();
           
               this.coalescer = new IncrementCoalescer(this);
             }
           
          -
             @Override
             public void enableTable(ByteBuffer tableName) throws IOError {
          -    try{
          +    try {
                 getAdmin().enableTable(getTableName(tableName));
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
          @@ -184,8 +175,8 @@ public void enableTable(ByteBuffer tableName) throws IOError {
             }
           
             @Override
          -  public void disableTable(ByteBuffer tableName) throws IOError{
          -    try{
          +  public void disableTable(ByteBuffer tableName) throws IOError {
          +    try {
                 getAdmin().disableTable(getTableName(tableName));
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
          @@ -207,7 +198,7 @@ public boolean isTableEnabled(ByteBuffer tableName) throws IOError {
             public Map getTableNamesWithIsTableEnabled() throws IOError {
               try {
                 HashMap tables = new HashMap<>();
          -      for (ByteBuffer tableName: this.getTableNames()) {
          +      for (ByteBuffer tableName : this.getTableNames()) {
                   tables.put(tableName, this.isTableEnabled(tableName));
                 }
                 return tables;
          @@ -278,8 +269,7 @@ public List getTableRegions(ByteBuffer tableName) throws IOError {
                   RegionInfo info = regionLocation.getRegion();
                   ServerName serverName = regionLocation.getServerName();
                   TRegionInfo region = new TRegionInfo();
          -        region.serverName = ByteBuffer.wrap(
          -            Bytes.toBytes(serverName.getHostname()));
          +        region.serverName = ByteBuffer.wrap(Bytes.toBytes(serverName.getHostname()));
                   region.port = serverName.getPort();
                   region.startKey = ByteBuffer.wrap(info.getStartKey());
                   region.endKey = ByteBuffer.wrap(info.getEndKey());
          @@ -292,18 +282,16 @@ public List getTableRegions(ByteBuffer tableName) throws IOError {
               } catch (TableNotFoundException e) {
                 // Return empty list for non-existing table
                 return Collections.emptyList();
          -    } catch (IOException e){
          +    } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
               }
             }
           
             @Override
          -  public List get(
          -      ByteBuffer tableName, ByteBuffer row, ByteBuffer column,
          -      Map attributes)
          -      throws IOError {
          -    byte [][] famAndQf = CellUtil.parseColumn(getBytes(column));
          +  public List get(ByteBuffer tableName, ByteBuffer row, ByteBuffer column,
          +      Map attributes) throws IOError {
          +    byte[][] famAndQf = CellUtil.parseColumn(getBytes(column));
               if (famAndQf.length == 1) {
                 return get(tableName, row, famAndQf[0], null, attributes);
               }
          @@ -314,16 +302,13 @@ public List get(
             }
           
             /**
          -   * Note: this internal interface is slightly different from public APIs in regard to handling
          -   * of the qualifier. Here we differ from the public Java API in that null != byte[0]. Rather,
          -   * we respect qual == null as a request for the entire column family. The caller (
          +   * Note: this internal interface is slightly different from public APIs in regard to handling of
          +   * the qualifier. Here we differ from the public Java API in that null != byte[0]. Rather, we
          +   * respect qual == null as a request for the entire column family. The caller (
              * {@link #get(ByteBuffer, ByteBuffer, ByteBuffer, Map)}) interface IS consistent in that the
              * column is parse like normal.
              */
          -  protected List get(ByteBuffer tableName,
          -      ByteBuffer row,
          -      byte[] family,
          -      byte[] qualifier,
          +  protected List get(ByteBuffer tableName, ByteBuffer row, byte[] family, byte[] qualifier,
                 Map attributes) throws IOError {
               Table table = null;
               try {
          @@ -348,8 +333,8 @@ protected List get(ByteBuffer tableName,
             @Override
             public List getVer(ByteBuffer tableName, ByteBuffer row, ByteBuffer column,
                 int numVersions, Map attributes) throws IOError {
          -    byte [][] famAndQf = CellUtil.parseColumn(getBytes(column));
          -    if(famAndQf.length == 1) {
          +    byte[][] famAndQf = CellUtil.parseColumn(getBytes(column));
          +    if (famAndQf.length == 1) {
                 return getVer(tableName, row, famAndQf[0], null, numVersions, attributes);
               }
               if (famAndQf.length == 2) {
          @@ -360,15 +345,14 @@ public List getVer(ByteBuffer tableName, ByteBuffer row, ByteBuffer colum
             }
           
             /**
          -   * Note: this public interface is slightly different from public Java APIs in regard to
          -   * handling of the qualifier. Here we differ from the public Java API in that null != byte[0].
          -   * Rather, we respect qual == null as a request for the entire column family. If you want to
          -   * access the entire column family, use
          -   * {@link #getVer(ByteBuffer, ByteBuffer, ByteBuffer, int, Map)} with a {@code column} value
          -   * that lacks a {@code ':'}.
          +   * Note: this public interface is slightly different from public Java APIs in regard to handling
          +   * of the qualifier. Here we differ from the public Java API in that null != byte[0]. Rather, we
          +   * respect qual == null as a request for the entire column family. If you want to access the
          +   * entire column family, use {@link #getVer(ByteBuffer, ByteBuffer, ByteBuffer, int, Map)} with a
          +   * {@code column} value that lacks a {@code ':'}.
              */
          -  public List getVer(ByteBuffer tableName, ByteBuffer row, byte[] family,
          -      byte[] qualifier, int numVersions, Map attributes) throws IOError {
          +  public List getVer(ByteBuffer tableName, ByteBuffer row, byte[] family, byte[] qualifier,
          +      int numVersions, Map attributes) throws IOError {
           
               Table table = null;
               try {
          @@ -386,7 +370,7 @@ public List getVer(ByteBuffer tableName, ByteBuffer row, byte[] family,
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
          @@ -394,23 +378,22 @@ public List getVer(ByteBuffer tableName, ByteBuffer row, byte[] family,
             @Override
             public List getVerTs(ByteBuffer tableName, ByteBuffer row, ByteBuffer column,
                 long timestamp, int numVersions, Map attributes) throws IOError {
          -    byte [][] famAndQf = CellUtil.parseColumn(getBytes(column));
          +    byte[][] famAndQf = CellUtil.parseColumn(getBytes(column));
               if (famAndQf.length == 1) {
                 return getVerTs(tableName, row, famAndQf[0], null, timestamp, numVersions, attributes);
               }
               if (famAndQf.length == 2) {
          -      return getVerTs(tableName, row, famAndQf[0], famAndQf[1], timestamp, numVersions,
          -          attributes);
          +      return getVerTs(tableName, row, famAndQf[0], famAndQf[1], timestamp, numVersions, attributes);
               }
               throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
             }
           
             /**
          -   * Note: this internal interface is slightly different from public APIs in regard to handling
          -   * of the qualifier. Here we differ from the public Java API in that null != byte[0]. Rather,
          -   * we respect qual == null as a request for the entire column family. The caller (
          -   * {@link #getVerTs(ByteBuffer, ByteBuffer, ByteBuffer, long, int, Map)}) interface IS
          -   * consistent in that the column is parse like normal.
          +   * Note: this internal interface is slightly different from public APIs in regard to handling of
          +   * the qualifier. Here we differ from the public Java API in that null != byte[0]. Rather, we
          +   * respect qual == null as a request for the entire column family. The caller (
          +   * {@link #getVerTs(ByteBuffer, ByteBuffer, ByteBuffer, long, int, Map)}) interface IS consistent
          +   * in that the column is parse like normal.
              */
             protected List getVerTs(ByteBuffer tableName, ByteBuffer row, byte[] family,
                 byte[] qualifier, long timestamp, int numVersions, Map attributes)
          @@ -433,7 +416,7 @@ protected List getVerTs(ByteBuffer tableName, ByteBuffer row, byte[] fami
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
          @@ -441,32 +424,25 @@ protected List getVerTs(ByteBuffer tableName, ByteBuffer row, byte[] fami
             @Override
             public List getRow(ByteBuffer tableName, ByteBuffer row,
                 Map attributes) throws IOError {
          -    return getRowWithColumnsTs(tableName, row, null,
          -        HConstants.LATEST_TIMESTAMP,
          -        attributes);
          +    return getRowWithColumnsTs(tableName, row, null, HConstants.LATEST_TIMESTAMP, attributes);
             }
           
             @Override
          -  public List getRowWithColumns(ByteBuffer tableName,
          -      ByteBuffer row,
          -      List columns,
          -      Map attributes) throws IOError {
          -    return getRowWithColumnsTs(tableName, row, columns,
          -        HConstants.LATEST_TIMESTAMP,
          -        attributes);
          +  public List getRowWithColumns(ByteBuffer tableName, ByteBuffer row,
          +      List columns, Map attributes) throws IOError {
          +    return getRowWithColumnsTs(tableName, row, columns, HConstants.LATEST_TIMESTAMP, attributes);
             }
           
             @Override
          -  public List getRowTs(ByteBuffer tableName, ByteBuffer row,
          -      long timestamp, Map attributes) throws IOError {
          -    return getRowWithColumnsTs(tableName, row, null,
          -        timestamp, attributes);
          +  public List getRowTs(ByteBuffer tableName, ByteBuffer row, long timestamp,
          +      Map attributes) throws IOError {
          +    return getRowWithColumnsTs(tableName, row, null, timestamp, attributes);
             }
           
             @Override
          -  public List getRowWithColumnsTs(
          -      ByteBuffer tableName, ByteBuffer row, List columns,
          -      long timestamp, Map attributes) throws IOError {
          +  public List getRowWithColumnsTs(ByteBuffer tableName, ByteBuffer row,
          +      List columns, long timestamp, Map attributes)
          +      throws IOError {
           
               Table table = null;
               try {
          @@ -480,8 +456,8 @@ public List getRowWithColumnsTs(
                 }
                 Get get = new Get(getBytes(row));
                 addAttributes(get, attributes);
          -      for(ByteBuffer column : columns) {
          -        byte [][] famAndQf = CellUtil.parseColumn(getBytes(column));
          +      for (ByteBuffer column : columns) {
          +        byte[][] famAndQf = CellUtil.parseColumn(getBytes(column));
                   if (famAndQf.length == 1) {
                     get.addFamily(famAndQf[0]);
                   } else {
          @@ -494,47 +470,35 @@ public List getRowWithColumnsTs(
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
           
             @Override
          -  public List getRows(ByteBuffer tableName,
          -      List rows,
          -      Map attributes)
          -      throws IOError {
          -    return getRowsWithColumnsTs(tableName, rows, null,
          -        HConstants.LATEST_TIMESTAMP,
          -        attributes);
          +  public List getRows(ByteBuffer tableName, List rows,
          +      Map attributes) throws IOError {
          +    return getRowsWithColumnsTs(tableName, rows, null, HConstants.LATEST_TIMESTAMP, attributes);
             }
           
             @Override
          -  public List getRowsWithColumns(ByteBuffer tableName,
          -      List rows,
          -      List columns,
          -      Map attributes) throws IOError {
          -    return getRowsWithColumnsTs(tableName, rows, columns,
          -        HConstants.LATEST_TIMESTAMP,
          -        attributes);
          +  public List getRowsWithColumns(ByteBuffer tableName, List rows,
          +      List columns, Map attributes) throws IOError {
          +    return getRowsWithColumnsTs(tableName, rows, columns, HConstants.LATEST_TIMESTAMP, attributes);
             }
           
             @Override
          -  public List getRowsTs(ByteBuffer tableName,
          -      List rows,
          -      long timestamp,
          +  public List getRowsTs(ByteBuffer tableName, List rows, long timestamp,
                 Map attributes) throws IOError {
          -    return getRowsWithColumnsTs(tableName, rows, null,
          -        timestamp, attributes);
          +    return getRowsWithColumnsTs(tableName, rows, null, timestamp, attributes);
             }
           
             @Override
          -  public List getRowsWithColumnsTs(ByteBuffer tableName,
          -      List rows,
          -      List columns, long timestamp,
          -      Map attributes) throws IOError {
          +  public List getRowsWithColumnsTs(ByteBuffer tableName, List rows,
          +      List columns, long timestamp, Map attributes)
          +      throws IOError {
           
          -    Table table= null;
          +    Table table = null;
               try {
                 List gets = new ArrayList<>(rows.size());
                 table = getTable(tableName);
          @@ -546,8 +510,8 @@ public List getRowsWithColumnsTs(ByteBuffer tableName,
                   addAttributes(get, attributes);
                   if (columns != null) {
           
          -          for(ByteBuffer column : columns) {
          -            byte [][] famAndQf = CellUtil.parseColumn(getBytes(column));
          +          for (ByteBuffer column : columns) {
          +            byte[][] famAndQf = CellUtil.parseColumn(getBytes(column));
                       if (famAndQf.length == 1) {
                         get.addFamily(famAndQf[0]);
                       } else {
          @@ -563,31 +527,26 @@ public List getRowsWithColumnsTs(ByteBuffer tableName,
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
           
             @Override
          -  public void deleteAll(
          -      ByteBuffer tableName, ByteBuffer row, ByteBuffer column,
          -      Map attributes)
          -      throws IOError {
          -    deleteAllTs(tableName, row, column, HConstants.LATEST_TIMESTAMP,
          -        attributes);
          +  public void deleteAll(ByteBuffer tableName, ByteBuffer row, ByteBuffer column,
          +      Map attributes) throws IOError {
          +    deleteAllTs(tableName, row, column, HConstants.LATEST_TIMESTAMP, attributes);
             }
           
             @Override
          -  public void deleteAllTs(ByteBuffer tableName,
          -      ByteBuffer row,
          -      ByteBuffer column,
          -      long timestamp, Map attributes) throws IOError {
          +  public void deleteAllTs(ByteBuffer tableName, ByteBuffer row, ByteBuffer column, long timestamp,
          +      Map attributes) throws IOError {
               Table table = null;
               try {
                 table = getTable(tableName);
          -      Delete delete  = new Delete(getBytes(row));
          +      Delete delete = new Delete(getBytes(row));
                 addAttributes(delete, attributes);
          -      byte [][] famAndQf = CellUtil.parseColumn(getBytes(column));
          +      byte[][] famAndQf = CellUtil.parseColumn(getBytes(column));
                 if (famAndQf.length == 1) {
                   delete.addFamily(famAndQf[0], timestamp);
                 } else {
          @@ -604,20 +563,18 @@ public void deleteAllTs(ByteBuffer tableName,
             }
           
             @Override
          -  public void deleteAllRow(
          -      ByteBuffer tableName, ByteBuffer row,
          +  public void deleteAllRow(ByteBuffer tableName, ByteBuffer row,
                 Map attributes) throws IOError {
               deleteAllRowTs(tableName, row, HConstants.LATEST_TIMESTAMP, attributes);
             }
           
             @Override
          -  public void deleteAllRowTs(
          -      ByteBuffer tableName, ByteBuffer row, long timestamp,
          +  public void deleteAllRowTs(ByteBuffer tableName, ByteBuffer row, long timestamp,
                 Map attributes) throws IOError {
               Table table = null;
               try {
                 table = getTable(tableName);
          -      Delete delete  = new Delete(getBytes(row), timestamp);
          +      Delete delete = new Delete(getBytes(row), timestamp);
                 addAttributes(delete, attributes);
                 table.delete(delete);
               } catch (IOException e) {
          @@ -630,7 +587,7 @@ public void deleteAllRowTs(
           
             @Override
             public void createTable(ByteBuffer in_tableName, List columnFamilies)
          -    throws IOError, IllegalArgument, AlreadyExists {
          +      throws IOError, IllegalArgument, AlreadyExists {
               TableName tableName = getTableName(in_tableName);
               try {
                 if (getAdmin().tableExists(tableName)) {
          @@ -672,17 +629,14 @@ public void deleteTable(ByteBuffer in_tableName) throws IOError {
             }
           
             @Override
          -  public void mutateRow(ByteBuffer tableName, ByteBuffer row,
          -      List mutations, Map attributes)
          -      throws IOError, IllegalArgument {
          +  public void mutateRow(ByteBuffer tableName, ByteBuffer row, List mutations,
          +      Map attributes) throws IOError, IllegalArgument {
               mutateRowTs(tableName, row, mutations, HConstants.LATEST_TIMESTAMP, attributes);
             }
           
             @Override
          -  public void mutateRowTs(ByteBuffer tableName, ByteBuffer row,
          -      List mutations, long timestamp,
          -      Map attributes)
          -      throws IOError, IllegalArgument {
          +  public void mutateRowTs(ByteBuffer tableName, ByteBuffer row, List mutations,
          +      long timestamp, Map attributes) throws IOError, IllegalArgument {
               Table table = null;
               try {
                 table = getTable(tableName);
          @@ -707,18 +661,13 @@ public void mutateRowTs(ByteBuffer tableName, ByteBuffer row,
                     }
                     delete.setDurability(m.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
                   } else {
          -          if(famAndQf.length == 1) {
          +          if (famAndQf.length == 1) {
                       LOG.warn("No column qualifier specified. Delete is the only mutation supported "
                           + "over the whole column family.");
                     } else {
          -            put.add(builder.clear()
          -                .setRow(put.getRow())
          -                .setFamily(famAndQf[0])
          -                .setQualifier(famAndQf[1])
          -                .setTimestamp(put.getTimestamp())
          -                .setType(Cell.Type.Put)
          -                .setValue(m.value != null ? getBytes(m.value)
          -                    : HConstants.EMPTY_BYTE_ARRAY)
          +            put.add(builder.clear().setRow(put.getRow()).setFamily(famAndQf[0])
          +                .setQualifier(famAndQf[1]).setTimestamp(put.getTimestamp()).setType(Cell.Type.Put)
          +                .setValue(m.value != null ? getBytes(m.value) : HConstants.EMPTY_BYTE_ARRAY)
                           .build());
                     }
                     put.setDurability(m.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
          @@ -736,23 +685,20 @@ public void mutateRowTs(ByteBuffer tableName, ByteBuffer row,
               } catch (IllegalArgumentException e) {
                 LOG.warn(e.getMessage(), e);
                 throw new IllegalArgument(Throwables.getStackTraceAsString(e));
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
           
             @Override
             public void mutateRows(ByteBuffer tableName, List rowBatches,
          -      Map attributes)
          -      throws IOError, IllegalArgument, TException {
          +      Map attributes) throws IOError, IllegalArgument, TException {
               mutateRowsTs(tableName, rowBatches, HConstants.LATEST_TIMESTAMP, attributes);
             }
           
             @Override
          -  public void mutateRowsTs(
          -      ByteBuffer tableName, List rowBatches, long timestamp,
          -      Map attributes)
          -      throws IOError, IllegalArgument, TException {
          +  public void mutateRowsTs(ByteBuffer tableName, List rowBatches, long timestamp,
          +      Map attributes) throws IOError, IllegalArgument, TException {
               List puts = new ArrayList<>();
               List deletes = new ArrayList<>();
               CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY);
          @@ -772,8 +718,7 @@ public void mutateRowsTs(
                     } else {
                       delete.addColumns(famAndQf[0], famAndQf[1], timestamp);
                     }
          -          delete.setDurability(m.writeToWAL ? Durability.SYNC_WAL
          -              : Durability.SKIP_WAL);
          +          delete.setDurability(m.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
                   } else {
                     if (famAndQf.length == 1) {
                       LOG.warn("No column qualifier specified. Delete is the only mutation supported "
          @@ -781,14 +726,9 @@ public void mutateRowsTs(
                     }
                     if (famAndQf.length == 2) {
                       try {
          -              put.add(builder.clear()
          -                  .setRow(put.getRow())
          -                  .setFamily(famAndQf[0])
          -                  .setQualifier(famAndQf[1])
          -                  .setTimestamp(put.getTimestamp())
          -                  .setType(Cell.Type.Put)
          -                  .setValue(m.value != null ? getBytes(m.value)
          -                      : HConstants.EMPTY_BYTE_ARRAY)
          +              put.add(builder.clear().setRow(put.getRow()).setFamily(famAndQf[0])
          +                  .setQualifier(famAndQf[1]).setTimestamp(put.getTimestamp()).setType(Cell.Type.Put)
          +                  .setValue(m.value != null ? getBytes(m.value) : HConstants.EMPTY_BYTE_ARRAY)
                             .build());
                       } catch (IOException e) {
                         throw new IllegalArgumentException(e);
          @@ -822,30 +762,27 @@ public void mutateRowsTs(
               } catch (IllegalArgumentException e) {
                 LOG.warn(e.getMessage(), e);
                 throw new IllegalArgument(Throwables.getStackTraceAsString(e));
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
           
             @Override
          -  public long atomicIncrement(
          -      ByteBuffer tableName, ByteBuffer row, ByteBuffer column, long amount)
          +  public long atomicIncrement(ByteBuffer tableName, ByteBuffer row, ByteBuffer column, long amount)
                 throws IOError, IllegalArgument, TException {
          -    byte [][] famAndQf = CellUtil.parseColumn(getBytes(column));
          -    if(famAndQf.length == 1) {
          +    byte[][] famAndQf = CellUtil.parseColumn(getBytes(column));
          +    if (famAndQf.length == 1) {
                 return atomicIncrement(tableName, row, famAndQf[0], HConstants.EMPTY_BYTE_ARRAY, amount);
               }
               return atomicIncrement(tableName, row, famAndQf[0], famAndQf[1], amount);
             }
           
          -  protected long atomicIncrement(ByteBuffer tableName, ByteBuffer row,
          -      byte [] family, byte [] qualifier, long amount)
          -      throws IOError, IllegalArgument, TException {
          +  protected long atomicIncrement(ByteBuffer tableName, ByteBuffer row, byte[] family,
          +      byte[] qualifier, long amount) throws IOError, IllegalArgument, TException {
               Table table = null;
               try {
                 table = getTable(tableName);
          -      return table.incrementColumnValue(
          -          getBytes(row), family, qualifier, amount);
          +      return table.incrementColumnValue(getBytes(row), family, qualifier, amount);
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          @@ -867,8 +804,7 @@ public void scannerClose(int id) throws IOError, IllegalArgument {
             }
           
             @Override
          -  public List scannerGetList(int id,int nbRows)
          -      throws IllegalArgument, IOError {
          +  public List scannerGetList(int id, int nbRows) throws IllegalArgument, IOError {
               LOG.debug("scannerGetList: id={}", id);
               ResultScannerWrapper resultScannerWrapper = getScanner(id);
               if (null == resultScannerWrapper) {
          @@ -877,7 +813,7 @@ public List scannerGetList(int id,int nbRows)
                 throw new IllegalArgument("scanner ID is invalid");
               }
           
          -    Result [] results;
          +    Result[] results;
               try {
                 results = resultScannerWrapper.getScanner().next(nbRows);
                 if (null == results) {
          @@ -896,13 +832,12 @@ public List scannerGetList(int id,int nbRows)
           
             @Override
             public List scannerGet(int id) throws IllegalArgument, IOError {
          -    return scannerGetList(id,1);
          +    return scannerGetList(id, 1);
             }
           
             @Override
             public int scannerOpenWithScan(ByteBuffer tableName, TScan tScan,
          -      Map attributes)
          -      throws IOError {
          +      Map attributes) throws IOError {
           
               Table table = null;
               try {
          @@ -925,9 +860,9 @@ public int scannerOpenWithScan(ByteBuffer tableName, TScan tScan,
                   scan.setBatch(tScan.getBatchSize());
                 }
                 if (tScan.isSetColumns() && !tScan.getColumns().isEmpty()) {
          -        for(ByteBuffer column : tScan.getColumns()) {
          -          byte [][] famQf = CellUtil.parseColumn(getBytes(column));
          -          if(famQf.length == 1) {
          +        for (ByteBuffer column : tScan.getColumns()) {
          +          byte[][] famQf = CellUtil.parseColumn(getBytes(column));
          +          if (famQf.length == 1) {
                       scan.addFamily(famQf[0]);
                     } else {
                       scan.addColumn(famQf[0], famQf[1]);
          @@ -936,8 +871,7 @@ public int scannerOpenWithScan(ByteBuffer tableName, TScan tScan,
                 }
                 if (tScan.isSetFilterString()) {
                   ParseFilter parseFilter = new ParseFilter();
          -        scan.setFilter(
          -            parseFilter.parseFilterString(tScan.getFilterString()));
          +        scan.setFilter(parseFilter.parseFilterString(tScan.getFilterString()));
                 }
                 if (tScan.isSetReversed()) {
                   scan.setReversed(tScan.isReversed());
          @@ -949,14 +883,13 @@ public int scannerOpenWithScan(ByteBuffer tableName, TScan tScan,
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
           
             @Override
          -  public int scannerOpen(ByteBuffer tableName, ByteBuffer startRow,
          -      List columns,
          +  public int scannerOpen(ByteBuffer tableName, ByteBuffer startRow, List columns,
                 Map attributes) throws IOError {
           
               Table table = null;
          @@ -964,10 +897,10 @@ public int scannerOpen(ByteBuffer tableName, ByteBuffer startRow,
                 table = getTable(tableName);
                 Scan scan = new Scan().withStartRow(getBytes(startRow));
                 addAttributes(scan, attributes);
          -      if(columns != null && !columns.isEmpty()) {
          -        for(ByteBuffer column : columns) {
          -          byte [][] famQf = CellUtil.parseColumn(getBytes(column));
          -          if(famQf.length == 1) {
          +      if (columns != null && !columns.isEmpty()) {
          +        for (ByteBuffer column : columns) {
          +          byte[][] famQf = CellUtil.parseColumn(getBytes(column));
          +          if (famQf.length == 1) {
                       scan.addFamily(famQf[0]);
                     } else {
                       scan.addColumn(famQf[0], famQf[1]);
          @@ -978,26 +911,24 @@ public int scannerOpen(ByteBuffer tableName, ByteBuffer startRow,
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
           
             @Override
          -  public int scannerOpenWithStop(ByteBuffer tableName, ByteBuffer startRow,
          -      ByteBuffer stopRow, List columns,
          -      Map attributes)
          -      throws IOError, TException {
          +  public int scannerOpenWithStop(ByteBuffer tableName, ByteBuffer startRow, ByteBuffer stopRow,
          +      List columns, Map attributes) throws IOError, TException {
           
               Table table = null;
               try {
                 table = getTable(tableName);
                 Scan scan = new Scan().withStartRow(getBytes(startRow)).withStopRow(getBytes(stopRow));
                 addAttributes(scan, attributes);
          -      if(columns != null && !columns.isEmpty()) {
          -        for(ByteBuffer column : columns) {
          -          byte [][] famQf = CellUtil.parseColumn(getBytes(column));
          -          if(famQf.length == 1) {
          +      if (columns != null && !columns.isEmpty()) {
          +        for (ByteBuffer column : columns) {
          +          byte[][] famQf = CellUtil.parseColumn(getBytes(column));
          +          if (famQf.length == 1) {
                       scan.addFamily(famQf[0]);
                     } else {
                       scan.addColumn(famQf[0], famQf[1]);
          @@ -1008,30 +939,26 @@ public int scannerOpenWithStop(ByteBuffer tableName, ByteBuffer startRow,
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
           
             @Override
          -  public int scannerOpenWithPrefix(ByteBuffer tableName,
          -      ByteBuffer startAndPrefix,
          -      List columns,
          -      Map attributes)
          -      throws IOError, TException {
          +  public int scannerOpenWithPrefix(ByteBuffer tableName, ByteBuffer startAndPrefix,
          +      List columns, Map attributes) throws IOError, TException {
           
               Table table = null;
               try {
                 table = getTable(tableName);
                 Scan scan = new Scan().withStartRow(getBytes(startAndPrefix));
                 addAttributes(scan, attributes);
          -      Filter f = new WhileMatchFilter(
          -          new PrefixFilter(getBytes(startAndPrefix)));
          +      Filter f = new WhileMatchFilter(new PrefixFilter(getBytes(startAndPrefix)));
                 scan.setFilter(f);
                 if (columns != null && !columns.isEmpty()) {
          -        for(ByteBuffer column : columns) {
          -          byte [][] famQf = CellUtil.parseColumn(getBytes(column));
          -          if(famQf.length == 1) {
          +        for (ByteBuffer column : columns) {
          +          byte[][] famQf = CellUtil.parseColumn(getBytes(column));
          +          if (famQf.length == 1) {
                       scan.addFamily(famQf[0]);
                     } else {
                       scan.addColumn(famQf[0], famQf[1]);
          @@ -1042,15 +969,14 @@ public int scannerOpenWithPrefix(ByteBuffer tableName,
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
           
             @Override
          -  public int scannerOpenTs(ByteBuffer tableName, ByteBuffer startRow,
          -      List columns, long timestamp,
          -      Map attributes) throws IOError, TException {
          +  public int scannerOpenTs(ByteBuffer tableName, ByteBuffer startRow, List columns,
          +      long timestamp, Map attributes) throws IOError, TException {
           
               Table table = null;
               try {
          @@ -1060,8 +986,8 @@ public int scannerOpenTs(ByteBuffer tableName, ByteBuffer startRow,
                 scan.setTimeRange(0, timestamp);
                 if (columns != null && !columns.isEmpty()) {
                   for (ByteBuffer column : columns) {
          -          byte [][] famQf = CellUtil.parseColumn(getBytes(column));
          -          if(famQf.length == 1) {
          +          byte[][] famQf = CellUtil.parseColumn(getBytes(column));
          +          if (famQf.length == 1) {
                       scan.addFamily(famQf[0]);
                     } else {
                       scan.addColumn(famQf[0], famQf[1]);
          @@ -1072,15 +998,14 @@ public int scannerOpenTs(ByteBuffer tableName, ByteBuffer startRow,
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
           
             @Override
          -  public int scannerOpenWithStopTs(ByteBuffer tableName, ByteBuffer startRow,
          -      ByteBuffer stopRow, List columns, long timestamp,
          -      Map attributes)
          +  public int scannerOpenWithStopTs(ByteBuffer tableName, ByteBuffer startRow, ByteBuffer stopRow,
          +      List columns, long timestamp, Map attributes)
                 throws IOError, TException {
           
               Table table = null;
          @@ -1091,8 +1016,8 @@ public int scannerOpenWithStopTs(ByteBuffer tableName, ByteBuffer startRow,
                 scan.setTimeRange(0, timestamp);
                 if (columns != null && !columns.isEmpty()) {
                   for (ByteBuffer column : columns) {
          -          byte [][] famQf = CellUtil.parseColumn(getBytes(column));
          -          if(famQf.length == 1) {
          +          byte[][] famQf = CellUtil.parseColumn(getBytes(column));
          +          if (famQf.length == 1) {
                       scan.addFamily(famQf[0]);
                     } else {
                       scan.addColumn(famQf[0], famQf[1]);
          @@ -1104,14 +1029,14 @@ public int scannerOpenWithStopTs(ByteBuffer tableName, ByteBuffer startRow,
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
           
             @Override
          -  public Map getColumnDescriptors(
          -      ByteBuffer tableName) throws IOError, TException {
          +  public Map getColumnDescriptors(ByteBuffer tableName)
          +      throws IOError, TException {
           
               Table table = null;
               try {
          @@ -1134,11 +1059,11 @@ public Map getColumnDescriptors(
             }
           
             private void closeTable(Table table) throws IOError {
          -    try{
          -      if(table != null){
          +    try {
          +      if (table != null) {
                   table.close();
                 }
          -    } catch (IOException e){
          +    } catch (IOException e) {
                 LOG.error(e.getMessage(), e);
                 throw getIOError(e);
               }
          @@ -1148,19 +1073,18 @@ private void closeTable(Table table) throws IOError {
             public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError {
               try {
                 byte[] row = getBytes(searchRow);
          -      Result startRowResult = getReverseScanResult(TableName.META_TABLE_NAME.getName(), row,
          -          HConstants.CATALOG_FAMILY);
          +      Result startRowResult =
          +          getReverseScanResult(TableName.META_TABLE_NAME.getName(), row, HConstants.CATALOG_FAMILY);
           
                 if (startRowResult == null) {
          -        throw new IOException("Cannot find row in "+ TableName.META_TABLE_NAME+", row="
          +        throw new IOException("Cannot find row in " + TableName.META_TABLE_NAME + ", row="
                       + Bytes.toStringBinary(row));
                 }
           
                 // find region start and end keys
                 RegionInfo regionInfo = CatalogFamilyFormat.getRegionInfo(startRowResult);
                 if (regionInfo == null) {
          -        throw new IOException("RegionInfo REGIONINFO was null or " +
          -            " empty in Meta for row="
          +        throw new IOException("RegionInfo REGIONINFO was null or " + " empty in Meta for row="
                       + Bytes.toStringBinary(row));
                 }
                 TRegionInfo region = new TRegionInfo();
          @@ -1189,8 +1113,7 @@ private Result getReverseScanResult(byte[] tableName, byte[] row, byte[] family)
               scan.setReversed(true);
               scan.addFamily(family);
               scan.withStartRow(row);
          -    try (Table table = getTable(tableName);
          -         ResultScanner scanner = table.getScanner(scan)) {
          +    try (Table table = getTable(tableName); ResultScanner scanner = table.getScanner(scan)) {
                 return scanner.next();
               }
             }
          @@ -1215,7 +1138,7 @@ public void increment(TIncrement tincrement) throws IOError, TException {
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
          @@ -1246,29 +1169,25 @@ public List append(TAppend tappend) throws IOError, TException {
               } catch (IOException e) {
                 LOG.warn(e.getMessage(), e);
                 throw getIOError(e);
          -    } finally{
          +    } finally {
                 closeTable(table);
               }
             }
           
             @Override
             public boolean checkAndPut(ByteBuffer tableName, ByteBuffer row, ByteBuffer column,
          -      ByteBuffer value, Mutation mput, Map attributes) throws IOError,
          -      IllegalArgument, TException {
          +      ByteBuffer value, Mutation mput, Map attributes)
          +      throws IOError, IllegalArgument, TException {
               Put put;
               try {
                 put = new Put(getBytes(row), HConstants.LATEST_TIMESTAMP);
                 addAttributes(put, attributes);
           
                 byte[][] famAndQf = CellUtil.parseColumn(getBytes(mput.column));
          -      put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
          -          .setRow(put.getRow())
          -          .setFamily(famAndQf[0])
          -          .setQualifier(famAndQf[1])
          -          .setTimestamp(put.getTimestamp())
          +      put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
          +          .setFamily(famAndQf[0]).setQualifier(famAndQf[1]).setTimestamp(put.getTimestamp())
                     .setType(Cell.Type.Put)
          -          .setValue(mput.value != null ? getBytes(mput.value)
          -              : HConstants.EMPTY_BYTE_ARRAY)
          +          .setValue(mput.value != null ? getBytes(mput.value) : HConstants.EMPTY_BYTE_ARRAY)
                     .build());
                 put.setDurability(mput.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
               } catch (IOException | IllegalArgumentException e) {
          @@ -1313,12 +1232,12 @@ public boolean grant(TAccessControlEntity info) throws IOError, TException {
               Permission.Action[] actions = ThriftUtilities.permissionActionsFromString(info.actions);
               try {
                 if (info.scope == TPermissionScope.NAMESPACE) {
          -        AccessControlClient.grant(connectionCache.getAdmin().getConnection(),
          -          info.getNsName(), info.getUsername(), actions);
          +        AccessControlClient.grant(connectionCache.getAdmin().getConnection(), info.getNsName(),
          +          info.getUsername(), actions);
                 } else if (info.scope == TPermissionScope.TABLE) {
                   TableName tableName = TableName.valueOf(info.getTableName());
          -        AccessControlClient.grant(connectionCache.getAdmin().getConnection(),
          -          tableName, info.getUsername(), null, null, actions);
          +        AccessControlClient.grant(connectionCache.getAdmin().getConnection(), tableName,
          +          info.getUsername(), null, null, actions);
                 }
               } catch (Throwable t) {
                 if (t instanceof IOException) {
          @@ -1335,12 +1254,12 @@ public boolean revoke(TAccessControlEntity info) throws IOError, TException {
               Permission.Action[] actions = ThriftUtilities.permissionActionsFromString(info.actions);
               try {
                 if (info.scope == TPermissionScope.NAMESPACE) {
          -        AccessControlClient.revoke(connectionCache.getAdmin().getConnection(),
          -          info.getNsName(), info.getUsername(), actions);
          +        AccessControlClient.revoke(connectionCache.getAdmin().getConnection(), info.getNsName(),
          +          info.getUsername(), actions);
                 } else if (info.scope == TPermissionScope.TABLE) {
                   TableName tableName = TableName.valueOf(info.getTableName());
          -        AccessControlClient.revoke(connectionCache.getAdmin().getConnection(),
          -          tableName, info.getUsername(), null, null, actions);
          +        AccessControlClient.revoke(connectionCache.getAdmin().getConnection(), tableName,
          +          info.getUsername(), null, null, actions);
                 }
               } catch (Throwable t) {
                 if (t instanceof IOException) {
          @@ -1369,7 +1288,7 @@ private static void addAttributes(OperationWithAttributes op,
               }
               for (Map.Entry entry : attributes.entrySet()) {
                 String name = Bytes.toStringBinary(getBytes(entry.getKey()));
          -      byte[] value =  getBytes(entry.getValue());
          +      byte[] value = getBytes(entry.getValue());
                 op.setAttribute(name, value);
               }
             }
          @@ -1378,8 +1297,8 @@ protected static class ResultScannerWrapper {
           
               private final ResultScanner scanner;
               private final boolean sortColumns;
          -    public ResultScannerWrapper(ResultScanner resultScanner,
          -        boolean sortResultColumns) {
          +
          +    public ResultScannerWrapper(ResultScanner resultScanner, boolean sortResultColumns) {
                 scanner = resultScanner;
                 sortColumns = sortResultColumns;
               }
          @@ -1395,6 +1314,7 @@ public boolean isColumnSorted() {
           
             public static class IOErrorWithCause extends IOError {
               private final Throwable cause;
          +
               public IOErrorWithCause(Throwable cause) {
                 this.cause = cause;
               }
          @@ -1406,8 +1326,7 @@ public synchronized Throwable getCause() {
           
               @Override
               public boolean equals(Object other) {
          -      if (super.equals(other) &&
          -          other instanceof IOErrorWithCause) {
          +      if (super.equals(other) && other instanceof IOErrorWithCause) {
                   Throwable otherCause = ((IOErrorWithCause) other).getCause();
                   if (this.getCause() != null) {
                     return otherCause != null && this.getCause().equals(otherCause);
          @@ -1426,5 +1345,4 @@ public int hashCode() {
               }
             }
           
          -
           }
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.java
          index c37401d37635..38c0cb514996 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.java
          @@ -15,17 +15,16 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
          +import static org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.getDoasFromHeader;
          +
           import java.io.IOException;
           import java.security.PrivilegedExceptionAction;
           import java.util.Base64;
          -
           import javax.servlet.ServletException;
           import javax.servlet.http.HttpServletRequest;
           import javax.servlet.http.HttpServletResponse;
          -
           import org.apache.hadoop.hbase.security.SecurityUtil;
           import org.apache.hadoop.security.UserGroupInformation;
           import org.apache.hadoop.security.authorize.AuthorizationException;
          @@ -43,7 +42,6 @@
           import org.ietf.jgss.Oid;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
          -import static org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.getDoasFromHeader;
           
           /**
            * Thrift Http Servlet is used for performing Kerberos authentication if security is enabled and
          @@ -63,8 +61,8 @@ public class ThriftHttpServlet extends TServlet {
             public static final String NEGOTIATE = "Negotiate";
           
             public ThriftHttpServlet(TProcessor processor, TProtocolFactory protocolFactory,
          -      UserGroupInformation serviceUGI, UserGroupInformation httpUGI,
          -      HBaseServiceHandler handler, boolean securityEnabled, boolean doAsEnabled) {
          +      UserGroupInformation serviceUGI, UserGroupInformation httpUGI, HBaseServiceHandler handler,
          +      boolean securityEnabled, boolean doAsEnabled) {
               super(processor, protocolFactory);
               this.serviceUGI = serviceUGI;
               this.httpUGI = httpUGI;
          @@ -79,9 +77,9 @@ protected void doPost(HttpServletRequest request, HttpServletResponse response)
               String effectiveUser = request.getRemoteUser();
               if (securityEnabled) {
                 /*
          -      Check that the AUTHORIZATION header has any content. If it does not then return a 401
          -      requesting AUTHORIZATION header to be sent. This is typical where the first request doesn't
          -      send the AUTHORIZATION header initially.
          +       * Check that the AUTHORIZATION header has any content. If it does not then return a 401
          +       * requesting AUTHORIZATION header to be sent. This is typical where the first request doesn't
          +       * send the AUTHORIZATION header initially.
                  */
                 String authHeader = request.getHeader(HttpHeaders.AUTHORIZATION);
                 if (authHeader == null || authHeader.isEmpty()) {
          @@ -98,18 +96,18 @@ protected void doPost(HttpServletRequest request, HttpServletResponse response)
                   effectiveUser = identity.principal;
                   // It is standard for client applications expect this header.
                   // Please see http://tools.ietf.org/html/rfc4559 for more details.
          -        response.addHeader(HttpHeaders.WWW_AUTHENTICATE,  NEGOTIATE + " " + identity.outToken);
          +        response.addHeader(HttpHeaders.WWW_AUTHENTICATE, NEGOTIATE + " " + identity.outToken);
                 } catch (HttpAuthenticationException e) {
                   LOG.error("Kerberos Authentication failed", e);
                   // Send a 401 to the client
                   response.addHeader(HttpHeaders.WWW_AUTHENTICATE, NEGOTIATE);
                   response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
          -            "Authentication Error: " + e.getMessage());
          +          "Authentication Error: " + e.getMessage());
                   return;
                 }
               }
           
          -    if(effectiveUser == null) {
          +    if (effectiveUser == null) {
                 effectiveUser = serviceUGI.getShortUserName();
               }
           
          @@ -122,8 +120,8 @@ protected void doPost(HttpServletRequest request, HttpServletResponse response)
                 UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(effectiveUser);
                 // create and attempt to authorize a proxy user (the client is attempting
                 // to do proxy user)
          -      UserGroupInformation ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery,
          -          remoteUser);
          +      UserGroupInformation ugi =
          +          UserGroupInformation.createProxyUser(doAsUserFromQuery, remoteUser);
                 // validate the proxy user authorization
                 try {
                   ProxyUsers.authorize(ugi, request.getRemoteAddr());
          @@ -137,12 +135,11 @@ protected void doPost(HttpServletRequest request, HttpServletResponse response)
             }
           
             /**
          -   * Do the GSS-API kerberos authentication.
          -   * We already have a logged in subject in the form of httpUGI,
          -   * which GSS-API will extract information from.
          +   * Do the GSS-API kerberos authentication. We already have a logged in subject in the form of
          +   * httpUGI, which GSS-API will extract information from.
              */
             private RemoteUserIdentity doKerberosAuth(HttpServletRequest request)
          -    throws HttpAuthenticationException {
          +      throws HttpAuthenticationException {
               HttpKerberosServerAction action = new HttpKerberosServerAction(request, httpUGI);
               try {
                 String principal = httpUGI.doAs(action);
          @@ -154,8 +151,8 @@ private RemoteUserIdentity doKerberosAuth(HttpServletRequest request)
             }
           
             /**
          -   * Basic "struct" class to hold the final base64-encoded, authenticated GSSAPI token
          -   * for the user with the given principal talking to the Thrift server.
          +   * Basic "struct" class to hold the final base64-encoded, authenticated GSSAPI token for the user
          +   * with the given principal talking to the Thrift server.
              */
             private static class RemoteUserIdentity {
               final String outToken;
          @@ -171,6 +168,7 @@ private static class HttpKerberosServerAction implements PrivilegedExceptionActi
               final HttpServletRequest request;
               final UserGroupInformation httpUGI;
               String outToken = null;
          +
               HttpKerberosServerAction(HttpServletRequest request, UserGroupInformation httpUGI) {
                 this.request = request;
                 this.httpUGI = httpUGI;
          @@ -192,24 +190,22 @@ public String run() throws HttpAuthenticationException {
                   // GSS name for server
                   GSSName serverName = manager.createName(serverPrincipal, krb5PrincipalOid);
                   // GSS credentials for server
          -        GSSCredential serverCreds = manager.createCredential(serverName,
          -            GSSCredential.DEFAULT_LIFETIME,
          -            new Oid[]{kerberosMechOid, spnegoMechOid},
          -            GSSCredential.ACCEPT_ONLY);
          +        GSSCredential serverCreds =
          +            manager.createCredential(serverName, GSSCredential.DEFAULT_LIFETIME,
          +              new Oid[] { kerberosMechOid, spnegoMechOid }, GSSCredential.ACCEPT_ONLY);
                   // Create a GSS context
                   gssContext = manager.createContext(serverCreds);
                   // Get service ticket from the authorization header
                   String serviceTicketBase64 = getAuthHeader(request);
                   byte[] inToken = Base64.getDecoder().decode(serviceTicketBase64);
                   byte[] res = gssContext.acceptSecContext(inToken, 0, inToken.length);
          -        if(res != null) {
          +        if (res != null) {
                     outToken = Base64.getEncoder().encodeToString(res).replace("\n", "");
                   }
                   // Authenticate or deny based on its context completion
                   if (!gssContext.isEstablished()) {
          -          throw new HttpAuthenticationException("Kerberos authentication failed: " +
          -              "unable to establish context with the service ticket " +
          -              "provided by the client.");
          +          throw new HttpAuthenticationException("Kerberos authentication failed: "
          +              + "unable to establish context with the service ticket " + "provided by the client.");
                   }
                   return SecurityUtil.getUserFromPrincipal(gssContext.getSrcName().toString());
                 } catch (GSSException e) {
          @@ -227,24 +223,22 @@ public String run() throws HttpAuthenticationException {
           
               /**
                * Returns the base64 encoded auth header payload
          -     *
                * @throws HttpAuthenticationException if a remote or network exception occurs
                */
          -    private String getAuthHeader(HttpServletRequest request)
          -        throws HttpAuthenticationException {
          +    private String getAuthHeader(HttpServletRequest request) throws HttpAuthenticationException {
                 String authHeader = request.getHeader(HttpHeaders.AUTHORIZATION);
                 // Each http request must have an Authorization header
                 if (authHeader == null || authHeader.isEmpty()) {
          -        throw new HttpAuthenticationException("Authorization header received " +
          -            "from the client is empty.");
          +        throw new HttpAuthenticationException(
          +            "Authorization header received " + "from the client is empty.");
                 }
                 String authHeaderBase64String;
                 int beginIndex = (NEGOTIATE + " ").length();
                 authHeaderBase64String = authHeader.substring(beginIndex);
                 // Authorization header must have a payload
                 if (authHeaderBase64String.isEmpty()) {
          -        throw new HttpAuthenticationException("Authorization header received " +
          -            "from the client does not contain any data.");
          +        throw new HttpAuthenticationException(
          +            "Authorization header received " + "from the client does not contain any data.");
                 }
                 return authHeaderBase64String;
               }
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java
          index 207db00afa2f..d2940c0a51ac 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java
          @@ -1,22 +1,20 @@
           /*
          - * Copyright The Apache Software Foundation
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * Licensed to the Apache Software Foundation (ASF) under one or more
          - * contributor license agreements. See the NOTICE file distributed with this
          - * work for additional information regarding copyright ownership. The ASF
          - * licenses this file to you under the Apache License, Version 2.0 (the
          - * "License"); you may not use this file except in compliance with the License.
          - * You may obtain a copy of the License at
          - *
          - * http://www.apache.org/licenses/LICENSE-2.0
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
            * Unless required by applicable law or agreed to in writing, software
          - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
          - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
          - * License for the specific language governing permissions and limitations
          - * under the License.
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
           import org.apache.hadoop.conf.Configuration;
          @@ -42,17 +40,16 @@
           import org.slf4j.LoggerFactory;
           
           /**
          - * This class is for maintaining the various statistics of thrift server
          - * and publishing them through the metrics interfaces.
          + * This class is for maintaining the various statistics of thrift server and publishing them through
          + * the metrics interfaces.
            */
           @InterfaceAudience.Private
          -public class ThriftMetrics  {
          +public class ThriftMetrics {
           
             private static final Logger LOG = LoggerFactory.getLogger(ThriftMetrics.class);
           
             public enum ThriftServerType {
          -    ONE,
          -    TWO
          +    ONE, TWO
             }
           
             public MetricsThriftServerSource getSource() {
          @@ -65,8 +62,7 @@ public void setSource(MetricsThriftServerSource source) {
           
             protected MetricsThriftServerSource source;
             protected final long slowResponseTime;
          -  public static final String SLOW_RESPONSE_NANO_SEC =
          -    "hbase.thrift.slow.response.nano.second";
          +  public static final String SLOW_RESPONSE_NANO_SEC = "hbase.thrift.slow.response.nano.second";
             public static final long DEFAULT_SLOW_RESPONSE_NANO_SEC = 10 * 1000 * 1000;
             private final ThriftServerType thriftServerType;
           
          @@ -75,10 +71,10 @@ public ThriftMetrics(Configuration conf, ThriftServerType t) {
               thriftServerType = t;
               if (t == ThriftServerType.ONE) {
                 source = CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class)
          -              .createThriftOneSource();
          +          .createThriftOneSource();
               } else if (t == ThriftServerType.TWO) {
                 source = CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class)
          -              .createThriftTwoSource();
          +          .createThriftTwoSource();
               }
           
             }
          @@ -117,8 +113,8 @@ public void decActiveWorkerCount() {
             }
           
             /**
          -   * Increment the count for a specific exception type.  This is called for each exception type
          -   * that is returned to the thrift handler.
          +   * Increment the count for a specific exception type. This is called for each exception type that
          +   * is returned to the thrift handler.
              * @param rawThrowable type of exception
              */
             public void exception(Throwable rawThrowable) {
          @@ -126,12 +122,9 @@ public void exception(Throwable rawThrowable) {
           
               Throwable throwable = unwrap(rawThrowable);
               /**
          -     * Keep some metrics for commonly seen exceptions
          -     *
          -     * Try and  put the most common types first.
          -     * Place child types before the parent type that they extend.
          -     *
          -     * If this gets much larger we might have to go to a hashmap
          +     * Keep some metrics for commonly seen exceptions Try and put the most common types first. Place
          +     * child types before the parent type that they extend. If this gets much larger we might have
          +     * to go to a hashmap
                */
               if (throwable != null) {
                 if (throwable instanceof OutOfOrderScannerNextException) {
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
          index f02234953209..68cc8e00b652 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -15,7 +15,6 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
           import static org.apache.hadoop.hbase.thrift.Constants.BACKLOG_CONF_DEAFULT;
          @@ -157,17 +156,14 @@
           import org.apache.hbase.thirdparty.org.eclipse.jetty.util.thread.QueuedThreadPool;
           
           /**
          - * ThriftServer- this class starts up a Thrift server which implements the
          - * Hbase API specified in the Hbase.thrift IDL file. The server runs in an
          - * independent process.
          + * ThriftServer- this class starts up a Thrift server which implements the Hbase API specified in
          + * the Hbase.thrift IDL file. The server runs in an independent process.
            */
           @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
          -public class ThriftServer  extends Configured implements Tool {
          +public class ThriftServer extends Configured implements Tool {
           
             private static final Logger LOG = LoggerFactory.getLogger(ThriftServer.class);
           
          -
          -
             protected Configuration conf;
           
             protected InfoServer infoServer;
          @@ -184,7 +180,6 @@ public class ThriftServer  extends Configured implements Tool {
             protected String host;
             protected int listenPort;
           
          -
             protected boolean securityEnabled;
             protected boolean doAsEnabled;
           
          @@ -193,7 +188,6 @@ public class ThriftServer  extends Configured implements Tool {
             protected volatile TServer tserver;
             protected volatile Server httpServer;
           
          -
             //
             // Main program and support routines
             //
          @@ -209,11 +203,11 @@ protected ThriftMetrics createThriftMetrics(Configuration conf) {
             protected void setupParamters() throws IOException {
               // login the server principal (if using secure Hadoop)
               UserProvider userProvider = UserProvider.instantiate(conf);
          -    securityEnabled = userProvider.isHadoopSecurityEnabled()
          -        && userProvider.isHBaseSecurityEnabled();
          +    securityEnabled =
          +        userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled();
               if (securityEnabled) {
          -      host = Strings.domainNamePointerToHostName(DNS.getDefaultHost(
          -          conf.get(THRIFT_DNS_INTERFACE_KEY, "default"),
          +      host = Strings.domainNamePointerToHostName(
          +        DNS.getDefaultHost(conf.get(THRIFT_DNS_INTERFACE_KEY, "default"),
                     conf.get(THRIFT_DNS_NAMESERVER_KEY, "default")));
                 userProvider.login(THRIFT_KEYTAB_FILE_KEY, THRIFT_KERBEROS_PRINCIPAL_KEY, host);
           
          @@ -222,8 +216,8 @@ protected void setupParamters() throws IOException {
                 String spnegoKeytab = getSpnegoKeytab(conf);
                 UserGroupInformation.setConfiguration(conf);
                 // login the SPNEGO principal using UGI to avoid polluting the login user
          -      this.httpUGI = UserGroupInformation.loginUserFromKeytabAndReturnUGI(spnegoPrincipal,
          -        spnegoKeytab);
          +      this.httpUGI =
          +          UserGroupInformation.loginUserFromKeytabAndReturnUGI(spnegoPrincipal, spnegoKeytab);
               }
               this.serviceUGI = userProvider.getCurrent().getUGI();
               if (httpUGI == null) {
          @@ -248,14 +242,13 @@ protected void setupParamters() throws IOException {
                 this.qop = SaslUtil.getQop(strQop);
               }
               if (qop != null) {
          -      if (qop != SaslUtil.QualityOfProtection.AUTHENTICATION &&
          -          qop != SaslUtil.QualityOfProtection.INTEGRITY &&
          -          qop != SaslUtil.QualityOfProtection.PRIVACY) {
          +      if (qop != SaslUtil.QualityOfProtection.AUTHENTICATION
          +          && qop != SaslUtil.QualityOfProtection.INTEGRITY
          +          && qop != SaslUtil.QualityOfProtection.PRIVACY) {
                   throw new IOException(String.format("Invalid %s: It must be one of %s, %s, or %s.",
          -            THRIFT_QOP_KEY,
          -            SaslUtil.QualityOfProtection.AUTHENTICATION.name(),
          -            SaslUtil.QualityOfProtection.INTEGRITY.name(),
          -            SaslUtil.QualityOfProtection.PRIVACY.name()));
          +          THRIFT_QOP_KEY, SaslUtil.QualityOfProtection.AUTHENTICATION.name(),
          +          SaslUtil.QualityOfProtection.INTEGRITY.name(),
          +          SaslUtil.QualityOfProtection.PRIVACY.name()));
                 }
                 checkHttpSecurity(qop, conf);
                 if (!securityEnabled) {
          @@ -299,12 +292,12 @@ private String getSpnegoKeytab(Configuration conf) {
           
             protected void startInfoServer() throws IOException {
               // Put up info server.
          -    int port = conf.getInt(THRIFT_INFO_SERVER_PORT , THRIFT_INFO_SERVER_PORT_DEFAULT);
          +    int port = conf.getInt(THRIFT_INFO_SERVER_PORT, THRIFT_INFO_SERVER_PORT_DEFAULT);
           
               if (port >= 0) {
                 conf.setLong("startcode", EnvironmentEdgeManager.currentTime());
          -      String a = conf
          -          .get(THRIFT_INFO_SERVER_BINDING_ADDRESS, THRIFT_INFO_SERVER_BINDING_ADDRESS_DEFAULT);
          +      String a =
          +          conf.get(THRIFT_INFO_SERVER_BINDING_ADDRESS, THRIFT_INFO_SERVER_BINDING_ADDRESS_DEFAULT);
                 infoServer = new InfoServer("thrift", a, port, false, conf);
                 infoServer.setAttribute("hbase.conf", conf);
                 infoServer.setAttribute("hbase.thrift.server.type", metrics.getThriftServerType().name());
          @@ -313,11 +306,10 @@ protected void startInfoServer() throws IOException {
             }
           
             protected void checkHttpSecurity(SaslUtil.QualityOfProtection qop, Configuration conf) {
          -    if (qop == SaslUtil.QualityOfProtection.PRIVACY &&
          -        conf.getBoolean(USE_HTTP_CONF_KEY, false) &&
          -        !conf.getBoolean(THRIFT_SSL_ENABLED_KEY, false)) {
          -      throw new IllegalArgumentException("Thrift HTTP Server's QoP is privacy, but " +
          -          THRIFT_SSL_ENABLED_KEY + " is false");
          +    if (qop == SaslUtil.QualityOfProtection.PRIVACY && conf.getBoolean(USE_HTTP_CONF_KEY, false)
          +        && !conf.getBoolean(THRIFT_SSL_ENABLED_KEY, false)) {
          +      throw new IllegalArgumentException(
          +          "Thrift HTTP Server's QoP is privacy, but " + THRIFT_SSL_ENABLED_KEY + " is false");
               }
             }
           
          @@ -349,15 +341,13 @@ public Server getHttpServer() {
               return httpServer;
             }
           
          -  protected void printUsageAndExit(Options options, int exitCode)
          -      throws ExitCodeException {
          +  protected void printUsageAndExit(Options options, int exitCode) throws ExitCodeException {
               HelpFormatter formatter = new HelpFormatter();
               formatter.printHelp("Thrift", null, options,
          -        "To start the Thrift server run 'hbase-daemon.sh start thrift' or " +
          -        "'hbase thrift'\n" +
          -        "To shutdown the thrift server run 'hbase-daemon.sh stop " +
          -        "thrift' or send a kill signal to the thrift server pid",
          -        true);
          +      "To start the Thrift server run 'hbase-daemon.sh start thrift' or " + "'hbase thrift'\n"
          +          + "To shutdown the thrift server run 'hbase-daemon.sh stop "
          +          + "thrift' or send a kill signal to the thrift server pid",
          +      true);
               throw new ExitCodeException(exitCode, "");
             }
           
          @@ -373,7 +363,6 @@ protected TServlet createTServlet(TProtocolFactory protocolFactory) {
           
             /**
              * Setup an HTTP Server using Jetty to serve calls from THttpClient
          -   *
              * @throws IOException IOException
              */
             protected void setupHTTPServer() throws IOException {
          @@ -385,23 +374,20 @@ protected void setupHTTPServer() throws IOException {
               // Jetty set the default max thread number to 250, if we don't set it.
               //
               // Our default min thread number 2 is the same as that used by Jetty.
          -    int minThreads = conf.getInt(HTTP_MIN_THREADS_KEY,
          -        conf.getInt(TBoundedThreadPoolServer.MIN_WORKER_THREADS_CONF_KEY,
          -            HTTP_MIN_THREADS_KEY_DEFAULT));
          -    int maxThreads = conf.getInt(HTTP_MAX_THREADS_KEY,
          -        conf.getInt(TBoundedThreadPoolServer.MAX_WORKER_THREADS_CONF_KEY,
          -            HTTP_MAX_THREADS_KEY_DEFAULT));
          +    int minThreads = conf.getInt(HTTP_MIN_THREADS_KEY, conf.getInt(
          +      TBoundedThreadPoolServer.MIN_WORKER_THREADS_CONF_KEY, HTTP_MIN_THREADS_KEY_DEFAULT));
          +    int maxThreads = conf.getInt(HTTP_MAX_THREADS_KEY, conf.getInt(
          +      TBoundedThreadPoolServer.MAX_WORKER_THREADS_CONF_KEY, HTTP_MAX_THREADS_KEY_DEFAULT));
               QueuedThreadPool threadPool = new QueuedThreadPool(maxThreads);
               threadPool.setMinThreads(minThreads);
               httpServer = new Server(threadPool);
           
               // Context handler
          -    ServletContextHandler ctxHandler = new ServletContextHandler(httpServer, "/",
          -        ServletContextHandler.SESSIONS);
          +    ServletContextHandler ctxHandler =
          +        new ServletContextHandler(httpServer, "/", ServletContextHandler.SESSIONS);
               ctxHandler.addServlet(new ServletHolder(thriftHttpServlet), "/*");
               HttpServerUtil.constrainHttpMethods(ctxHandler,
          -        conf.getBoolean(THRIFT_HTTP_ALLOW_OPTIONS_METHOD,
          -            THRIFT_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT));
          +      conf.getBoolean(THRIFT_HTTP_ALLOW_OPTIONS_METHOD, THRIFT_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT));
           
               // set up Jetty and run the embedded server
               HttpConfiguration httpConfig = new HttpConfiguration();
          @@ -414,41 +400,40 @@ protected void setupHTTPServer() throws IOException {
               httpConfig.setSendDateHeader(false);
           
               ServerConnector serverConnector;
          -    if(conf.getBoolean(THRIFT_SSL_ENABLED_KEY, false)) {
          +    if (conf.getBoolean(THRIFT_SSL_ENABLED_KEY, false)) {
                 HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig);
                 httpsConfig.addCustomizer(new SecureRequestCustomizer());
           
                 SslContextFactory sslCtxFactory = new SslContextFactory();
                 String keystore = conf.get(THRIFT_SSL_KEYSTORE_STORE_KEY);
          -      String password = HBaseConfiguration.getPassword(conf,
          -          THRIFT_SSL_KEYSTORE_PASSWORD_KEY, null);
          -      String keyPassword = HBaseConfiguration.getPassword(conf,
          -          THRIFT_SSL_KEYSTORE_KEYPASSWORD_KEY, password);
          +      String password =
          +          HBaseConfiguration.getPassword(conf, THRIFT_SSL_KEYSTORE_PASSWORD_KEY, null);
          +      String keyPassword =
          +          HBaseConfiguration.getPassword(conf, THRIFT_SSL_KEYSTORE_KEYPASSWORD_KEY, password);
                 sslCtxFactory.setKeyStorePath(keystore);
                 sslCtxFactory.setKeyStorePassword(password);
                 sslCtxFactory.setKeyManagerPassword(keyPassword);
          -      sslCtxFactory.setKeyStoreType(conf.get(
          -        THRIFT_SSL_KEYSTORE_TYPE_KEY, THRIFT_SSL_KEYSTORE_TYPE_DEFAULT));
          +      sslCtxFactory.setKeyStoreType(
          +        conf.get(THRIFT_SSL_KEYSTORE_TYPE_KEY, THRIFT_SSL_KEYSTORE_TYPE_DEFAULT));
           
          -      String[] excludeCiphers = conf.getStrings(
          -          THRIFT_SSL_EXCLUDE_CIPHER_SUITES_KEY, ArrayUtils.EMPTY_STRING_ARRAY);
          +      String[] excludeCiphers =
          +          conf.getStrings(THRIFT_SSL_EXCLUDE_CIPHER_SUITES_KEY, ArrayUtils.EMPTY_STRING_ARRAY);
                 if (excludeCiphers.length != 0) {
                   sslCtxFactory.setExcludeCipherSuites(excludeCiphers);
                 }
          -      String[] includeCiphers = conf.getStrings(
          -          THRIFT_SSL_INCLUDE_CIPHER_SUITES_KEY, ArrayUtils.EMPTY_STRING_ARRAY);
          +      String[] includeCiphers =
          +          conf.getStrings(THRIFT_SSL_INCLUDE_CIPHER_SUITES_KEY, ArrayUtils.EMPTY_STRING_ARRAY);
                 if (includeCiphers.length != 0) {
                   sslCtxFactory.setIncludeCipherSuites(includeCiphers);
                 }
           
                 // Disable SSLv3 by default due to "Poodle" Vulnerability - CVE-2014-3566
          -      String[] excludeProtocols = conf.getStrings(
          -          THRIFT_SSL_EXCLUDE_PROTOCOLS_KEY, "SSLv3");
          +      String[] excludeProtocols = conf.getStrings(THRIFT_SSL_EXCLUDE_PROTOCOLS_KEY, "SSLv3");
                 if (excludeProtocols.length != 0) {
                   sslCtxFactory.setExcludeProtocols(excludeProtocols);
                 }
          -      String[] includeProtocols = conf.getStrings(
          -          THRIFT_SSL_INCLUDE_PROTOCOLS_KEY, ArrayUtils.EMPTY_STRING_ARRAY);
          +      String[] includeProtocols =
          +          conf.getStrings(THRIFT_SSL_INCLUDE_PROTOCOLS_KEY, ArrayUtils.EMPTY_STRING_ARRAY);
                 if (includeProtocols.length != 0) {
                   sslCtxFactory.setIncludeProtocols(includeProtocols);
                 }
          @@ -484,11 +469,11 @@ protected void setupServer() throws Exception {
               TTransportFactory transportFactory;
               if (conf.getBoolean(FRAMED_CONF_KEY, FRAMED_CONF_DEFAULT) || implType.isAlwaysFramed) {
                 if (qop != null) {
          -        throw new RuntimeException("Thrift server authentication"
          -            + " doesn't work with framed transport yet");
          +        throw new RuntimeException(
          +            "Thrift server authentication" + " doesn't work with framed transport yet");
                 }
                 transportFactory = new TFramedTransport.Factory(
          -        conf.getInt(MAX_FRAME_SIZE_CONF_KEY, MAX_FRAME_SIZE_CONF_DEFAULT) * 1024 * 1024);
          +          conf.getInt(MAX_FRAME_SIZE_CONF_KEY, MAX_FRAME_SIZE_CONF_DEFAULT) * 1024 * 1024);
                 LOG.debug("Using framed transport");
               } else if (qop == null) {
                 transportFactory = new TTransportFactory();
          @@ -502,39 +487,37 @@ protected void setupServer() throws Exception {
                 Map saslProperties = SaslUtil.initSaslProperties(qop.name());
                 TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory();
                 saslFactory.addServerDefinition("GSSAPI", name, host, saslProperties,
          -          new SaslRpcServer.SaslGssCallbackHandler() {
          -            @Override
          -            public void handle(Callback[] callbacks)
          -                throws UnsupportedCallbackException {
          -              AuthorizeCallback ac = null;
          -              for (Callback callback : callbacks) {
          -                if (callback instanceof AuthorizeCallback) {
          -                  ac = (AuthorizeCallback) callback;
          -                } else {
          -                  throw new UnsupportedCallbackException(callback,
          -                      "Unrecognized SASL GSSAPI Callback");
          -                }
          +        new SaslRpcServer.SaslGssCallbackHandler() {
          +          @Override
          +          public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
          +            AuthorizeCallback ac = null;
          +            for (Callback callback : callbacks) {
          +              if (callback instanceof AuthorizeCallback) {
          +                ac = (AuthorizeCallback) callback;
          +              } else {
          +                throw new UnsupportedCallbackException(callback,
          +                    "Unrecognized SASL GSSAPI Callback");
                         }
          -              if (ac != null) {
          -                String authid = ac.getAuthenticationID();
          -                String authzid = ac.getAuthorizationID();
          -                if (!authid.equals(authzid)) {
          -                  ac.setAuthorized(false);
          -                } else {
          -                  ac.setAuthorized(true);
          -                  String userName = SecurityUtil.getUserFromPrincipal(authzid);
          -                  LOG.info("Effective user: {}", userName);
          -                  ac.setAuthorizedID(userName);
          -                }
          +            }
          +            if (ac != null) {
          +              String authid = ac.getAuthenticationID();
          +              String authzid = ac.getAuthorizationID();
          +              if (!authid.equals(authzid)) {
          +                ac.setAuthorized(false);
          +              } else {
          +                ac.setAuthorized(true);
          +                String userName = SecurityUtil.getUserFromPrincipal(authzid);
          +                LOG.info("Effective user: {}", userName);
          +                ac.setAuthorizedID(userName);
                         }
                       }
          -          });
          +          }
          +        });
                 transportFactory = saslFactory;
           
                 // Create a processor wrapper, to get the caller
                 processorToUse = (inProt, outProt) -> {
          -        TSaslServerTransport saslServerTransport =
          -            (TSaslServerTransport)inProt.getTransport();
          +        TSaslServerTransport saslServerTransport = (TSaslServerTransport) inProt.getTransport();
                   SaslServer saslServer = saslServerTransport.getSaslServer();
                   String principal = saslServer.getAuthorizationID();
                   hbaseServiceHandler.setEffectiveUser(principal);
          @@ -543,41 +526,41 @@ public void handle(Callback[] callbacks)
               }
           
               if (conf.get(BIND_CONF_KEY) != null && !implType.canSpecifyBindIP) {
          -      LOG.error("Server types {} don't support IP address binding at the moment. See " +
          -              "https://issues.apache.org/jira/browse/HBASE-2155 for details.",
          -          Joiner.on(", ").join(ImplType.serversThatCannotSpecifyBindIP()));
          +      LOG.error(
          +        "Server types {} don't support IP address binding at the moment. See "
          +            + "https://issues.apache.org/jira/browse/HBASE-2155 for details.",
          +        Joiner.on(", ").join(ImplType.serversThatCannotSpecifyBindIP()));
                 throw new RuntimeException("-" + BIND_CONF_KEY + " not supported with " + implType);
               }
           
               InetSocketAddress inetSocketAddress = new InetSocketAddress(getBindAddress(conf), listenPort);
          -    if (implType == ImplType.HS_HA || implType == ImplType.NONBLOCKING ||
          -        implType == ImplType.THREADED_SELECTOR) {
          +    if (implType == ImplType.HS_HA || implType == ImplType.NONBLOCKING
          +        || implType == ImplType.THREADED_SELECTOR) {
                 TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(inetSocketAddress);
                 if (implType == ImplType.NONBLOCKING) {
                   tserver = getTNonBlockingServer(serverTransport, protocolFactory, processorToUse,
          -            transportFactory, inetSocketAddress);
          +          transportFactory, inetSocketAddress);
                 } else if (implType == ImplType.HS_HA) {
                   tserver = getTHsHaServer(serverTransport, protocolFactory, processorToUse, transportFactory,
          -            inetSocketAddress);
          +          inetSocketAddress);
                 } else { // THREADED_SELECTOR
                   tserver = getTThreadedSelectorServer(serverTransport, protocolFactory, processorToUse,
          -            transportFactory, inetSocketAddress);
          +          transportFactory, inetSocketAddress);
                 }
                 LOG.info("starting HBase {} server on {}", implType.simpleClassName(),
          -          Integer.toString(listenPort));
          +        Integer.toString(listenPort));
               } else if (implType == ImplType.THREAD_POOL) {
                 this.tserver = getTThreadPoolServer(protocolFactory, processorToUse, transportFactory,
          -          inetSocketAddress);
          +        inetSocketAddress);
               } else {
          -      throw new AssertionError("Unsupported Thrift server implementation: " +
          -          implType.simpleClassName());
          +      throw new AssertionError(
          +          "Unsupported Thrift server implementation: " + implType.simpleClassName());
               }
           
               // A sanity check that we instantiated the right type of server.
               if (tserver.getClass() != implType.serverClass) {
          -      throw new AssertionError("Expected to create Thrift server class " +
          -          implType.serverClass.getName() + " but got " +
          -          tserver.getClass().getName());
          +      throw new AssertionError("Expected to create Thrift server class "
          +          + implType.serverClass.getName() + " but got " + tserver.getClass().getName());
               }
             }
           
          @@ -598,12 +581,11 @@ protected TServer getTHsHaServer(TNonblockingServerTransport serverTransport,
               LOG.info("starting HBase HsHA Thrift server on " + inetSocketAddress.toString());
               THsHaServer.Args serverArgs = new THsHaServer.Args(serverTransport);
               int queueSize = conf.getInt(TBoundedThreadPoolServer.MAX_QUEUED_REQUESTS_CONF_KEY,
          -        TBoundedThreadPoolServer.DEFAULT_MAX_QUEUED_REQUESTS);
          +      TBoundedThreadPoolServer.DEFAULT_MAX_QUEUED_REQUESTS);
               CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(queueSize), metrics);
               int workerThread = conf.getInt(TBoundedThreadPoolServer.MAX_WORKER_THREADS_CONF_KEY,
          -        serverArgs.getMaxWorkerThreads());
          -    ExecutorService executorService = createExecutor(
          -        callQueue, workerThread, workerThread);
          +      serverArgs.getMaxWorkerThreads());
          +    ExecutorService executorService = createExecutor(callQueue, workerThread, workerThread);
               serverArgs.executorService(executorService).processor(processor)
                   .transportFactory(transportFactory).protocolFactory(protocolFactory);
               return new THsHaServer(serverArgs);
          @@ -616,14 +598,13 @@ protected TServer getTThreadedSelectorServer(TNonblockingServerTransport serverT
               TThreadedSelectorServer.Args serverArgs =
                   new HThreadedSelectorServerArgs(serverTransport, conf);
               int queueSize = conf.getInt(TBoundedThreadPoolServer.MAX_QUEUED_REQUESTS_CONF_KEY,
          -        TBoundedThreadPoolServer.DEFAULT_MAX_QUEUED_REQUESTS);
          +      TBoundedThreadPoolServer.DEFAULT_MAX_QUEUED_REQUESTS);
               CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(queueSize), metrics);
               int workerThreads = conf.getInt(TBoundedThreadPoolServer.MAX_WORKER_THREADS_CONF_KEY,
          -        serverArgs.getWorkerThreads());
          +      serverArgs.getWorkerThreads());
               int selectorThreads = conf.getInt(THRIFT_SELECTOR_NUM, serverArgs.getSelectorThreads());
               serverArgs.selectorThreads(selectorThreads);
          -    ExecutorService executorService = createExecutor(
          -        callQueue, workerThreads, workerThreads);
          +    ExecutorService executorService = createExecutor(callQueue, workerThreads, workerThreads);
               serverArgs.executorService(executorService).processor(processor)
                   .transportFactory(transportFactory).protocolFactory(protocolFactory);
               return new TThreadedSelectorServer(serverArgs);
          @@ -635,11 +616,10 @@ protected TServer getTThreadPoolServer(TProtocolFactory protocolFactory, TProces
               // Thrift's implementation uses '0' as a placeholder for 'use the default.'
               int backlog = conf.getInt(BACKLOG_CONF_KEY, BACKLOG_CONF_DEAFULT);
               int readTimeout = conf.getInt(THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY,
          -        THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT);
          -    TServerTransport serverTransport = new TServerSocket(
          -        new TServerSocket.ServerSocketTransportArgs().
          -            bindAddr(inetSocketAddress).backlog(backlog).
          -            clientTimeout(readTimeout));
          +      THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT);
          +    TServerTransport serverTransport =
          +        new TServerSocket(new TServerSocket.ServerSocketTransportArgs().bindAddr(inetSocketAddress)
          +            .backlog(backlog).clientTimeout(readTimeout));
           
               TBoundedThreadPoolServer.Args serverArgs =
                   new TBoundedThreadPoolServer.Args(serverTransport, conf);
          @@ -662,8 +642,8 @@ protected TProtocolFactory getProtocolFactory() {
               return protocolFactory;
             }
           
          -  protected ExecutorService createExecutor(BlockingQueue callQueue,
          -      int minWorkers, int maxWorkers) {
          +  protected ExecutorService createExecutor(BlockingQueue callQueue, int minWorkers,
          +      int maxWorkers) {
               ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
               tfb.setDaemon(true);
               tfb.setNameFormat("thrift-worker-%d");
          @@ -673,20 +653,18 @@ protected ExecutorService createExecutor(BlockingQueue callQueue,
               return threadPool;
             }
           
          -  protected InetAddress getBindAddress(Configuration conf)
          -      throws UnknownHostException {
          +  protected InetAddress getBindAddress(Configuration conf) throws UnknownHostException {
               String bindAddressStr = conf.get(BIND_CONF_KEY, DEFAULT_BIND_ADDR);
               return InetAddress.getByName(bindAddressStr);
             }
           
          -
             public static void registerFilters(Configuration conf) {
               String[] filters = conf.getStrings(THRIFT_FILTERS);
               Splitter splitter = Splitter.on(':');
          -    if(filters != null) {
          -      for(String filterClass: filters) {
          +    if (filters != null) {
          +      for (String filterClass : filters) {
                   List filterPart = splitter.splitToList(filterClass);
          -        if(filterPart.size() != 2) {
          +        if (filterPart.size() != 2) {
                     LOG.warn("Invalid filter specification " + filterClass + " - skipping");
                   } else {
                     ParseFilter.registerFilter(filterPart.get(0), filterPart.get(1));
          @@ -700,10 +678,10 @@ public static void registerFilters(Configuration conf) {
              * @param options options
              */
             protected void addOptions(Options options) {
          -    options.addOption("b", BIND_OPTION, true, "Address to bind " +
          -        "the Thrift server to. [default: " + DEFAULT_BIND_ADDR + "]");
          -    options.addOption("p", PORT_OPTION, true, "Port to bind to [default: " +
          -        DEFAULT_LISTEN_PORT + "]");
          +    options.addOption("b", BIND_OPTION, true,
          +      "Address to bind " + "the Thrift server to. [default: " + DEFAULT_BIND_ADDR + "]");
          +    options.addOption("p", PORT_OPTION, true,
          +      "Port to bind to [default: " + DEFAULT_LISTEN_PORT + "]");
               options.addOption("f", FRAMED_OPTION, false, "Use framed transport");
               options.addOption("c", COMPACT_OPTION, false, "Use the compact protocol");
               options.addOption("h", "help", false, "Print help information");
          @@ -711,25 +689,22 @@ protected void addOptions(Options options) {
               options.addOption(null, INFOPORT_OPTION, true, "Port for web UI");
           
               options.addOption("m", MIN_WORKERS_OPTION, true,
          -        "The minimum number of worker threads for " +
          -            ImplType.THREAD_POOL.simpleClassName());
          +      "The minimum number of worker threads for " + ImplType.THREAD_POOL.simpleClassName());
           
               options.addOption("w", MAX_WORKERS_OPTION, true,
          -        "The maximum number of worker threads for " +
          -            ImplType.THREAD_POOL.simpleClassName());
          +      "The maximum number of worker threads for " + ImplType.THREAD_POOL.simpleClassName());
           
               options.addOption("q", MAX_QUEUE_SIZE_OPTION, true,
          -        "The maximum number of queued requests in " +
          -            ImplType.THREAD_POOL.simpleClassName());
          +      "The maximum number of queued requests in " + ImplType.THREAD_POOL.simpleClassName());
           
               options.addOption("k", KEEP_ALIVE_SEC_OPTION, true,
          -        "The amount of time in secods to keep a thread alive when idle in " +
          -            ImplType.THREAD_POOL.simpleClassName());
          +      "The amount of time in secods to keep a thread alive when idle in "
          +          + ImplType.THREAD_POOL.simpleClassName());
           
               options.addOption("t", READ_TIMEOUT_OPTION, true,
          -        "Amount of time in milliseconds before a server thread will timeout " +
          -            "waiting for client to send data on a connected socket. Currently, " +
          -            "only applies to TBoundedThreadPoolServer");
          +      "Amount of time in milliseconds before a server thread will timeout "
          +          + "waiting for client to send data on a connected socket. Currently, "
          +          + "only applies to TBoundedThreadPoolServer");
           
               options.addOptionGroup(ImplType.createOptionGroup());
             }
          @@ -753,33 +728,29 @@ protected void parseCommandLine(CommandLine cmd, Options options) throws ExitCod
                   LOG.debug("Web UI port set to " + val);
                 }
               } catch (NumberFormatException e) {
          -      LOG.error("Could not parse the value provided for the " + INFOPORT_OPTION +
          -          " option", e);
          +      LOG.error("Could not parse the value provided for the " + INFOPORT_OPTION + " option", e);
                 printUsageAndExit(options, -1);
               }
               // Make optional changes to the configuration based on command-line options
          -    optionToConf(cmd, MIN_WORKERS_OPTION,
          -        conf, TBoundedThreadPoolServer.MIN_WORKER_THREADS_CONF_KEY);
          -    optionToConf(cmd, MAX_WORKERS_OPTION,
          -        conf, TBoundedThreadPoolServer.MAX_WORKER_THREADS_CONF_KEY);
          -    optionToConf(cmd, MAX_QUEUE_SIZE_OPTION,
          -        conf, TBoundedThreadPoolServer.MAX_QUEUED_REQUESTS_CONF_KEY);
          -    optionToConf(cmd, KEEP_ALIVE_SEC_OPTION,
          -        conf, TBoundedThreadPoolServer.THREAD_KEEP_ALIVE_TIME_SEC_CONF_KEY);
          +    optionToConf(cmd, MIN_WORKERS_OPTION, conf,
          +      TBoundedThreadPoolServer.MIN_WORKER_THREADS_CONF_KEY);
          +    optionToConf(cmd, MAX_WORKERS_OPTION, conf,
          +      TBoundedThreadPoolServer.MAX_WORKER_THREADS_CONF_KEY);
          +    optionToConf(cmd, MAX_QUEUE_SIZE_OPTION, conf,
          +      TBoundedThreadPoolServer.MAX_QUEUED_REQUESTS_CONF_KEY);
          +    optionToConf(cmd, KEEP_ALIVE_SEC_OPTION, conf,
          +      TBoundedThreadPoolServer.THREAD_KEEP_ALIVE_TIME_SEC_CONF_KEY);
               optionToConf(cmd, READ_TIMEOUT_OPTION, conf, THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY);
               optionToConf(cmd, SELECTOR_NUM_OPTION, conf, THRIFT_SELECTOR_NUM);
           
               // Set general thrift server options
          -    boolean compact = cmd.hasOption(COMPACT_OPTION) ||
          -        conf.getBoolean(COMPACT_CONF_KEY, false);
          +    boolean compact = cmd.hasOption(COMPACT_OPTION) || conf.getBoolean(COMPACT_CONF_KEY, false);
               conf.setBoolean(COMPACT_CONF_KEY, compact);
          -    boolean framed = cmd.hasOption(FRAMED_OPTION) ||
          -        conf.getBoolean(FRAMED_CONF_KEY, false);
          +    boolean framed = cmd.hasOption(FRAMED_OPTION) || conf.getBoolean(FRAMED_CONF_KEY, false);
               conf.setBoolean(FRAMED_CONF_KEY, framed);
           
               optionToConf(cmd, BIND_OPTION, conf, BIND_CONF_KEY);
           
          -
               ImplType.setServerImpl(cmd, conf);
             }
           
          @@ -829,8 +800,8 @@ public void stop() {
               }
             }
           
          -  protected static void optionToConf(CommandLine cmd, String option,
          -      Configuration conf, String destConfKey) {
          +  protected static void optionToConf(CommandLine cmd, String option, Configuration conf,
          +      String destConfKey) {
               if (cmd.hasOption(option)) {
                 String value = cmd.getOptionValue(option);
                 LOG.info("Set configuration key:" + destConfKey + " value:" + value);
          @@ -878,7 +849,7 @@ public Object run() {
               return 0;
             }
           
          -  public static void main(String [] args) throws Exception {
          +  public static void main(String[] args) throws Exception {
               LOG.info("***** STARTING service '" + ThriftServer.class.getSimpleName() + "' *****");
               VersionInfo.logVersion();
               final Configuration conf = HBaseConfiguration.create();
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
          index 648a5da83318..aed106b66b35 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -15,7 +15,6 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
           import static org.apache.hadoop.hbase.util.Bytes.getBytes;
          @@ -54,36 +53,33 @@ private ThriftUtilities() {
             }
           
             /**
          -   * This utility method creates a new Hbase HColumnDescriptor object based on a
          -   * Thrift ColumnDescriptor "struct".
          -   *
          +   * This utility method creates a new Hbase HColumnDescriptor object based on a Thrift
          +   * ColumnDescriptor "struct".
              * @param in Thrift ColumnDescriptor object
              * @return ModifyableColumnFamilyDescriptor
              * @throws IllegalArgument if the column name is empty
              */
          -  public static ColumnFamilyDescriptor colDescFromThrift(
          -      ColumnDescriptor in) throws IllegalArgument {
          +  public static ColumnFamilyDescriptor colDescFromThrift(ColumnDescriptor in)
          +      throws IllegalArgument {
               Compression.Algorithm comp =
          -      Compression.getCompressionAlgorithmByName(in.compression.toLowerCase(Locale.ROOT));
          -    BloomType bt =
          -      BloomType.valueOf(in.bloomFilterType);
          +        Compression.getCompressionAlgorithmByName(in.compression.toLowerCase(Locale.ROOT));
          +    BloomType bt = BloomType.valueOf(in.bloomFilterType);
           
               if (in.name == null || !in.name.hasRemaining()) {
                 throw new IllegalArgument("column name is empty");
               }
          -    byte [] parsedName = CellUtil.parseColumn(Bytes.getBytes(in.name))[0];
          +    byte[] parsedName = CellUtil.parseColumn(Bytes.getBytes(in.name))[0];
               return ColumnFamilyDescriptorBuilder.newBuilder(parsedName).setMaxVersions(in.maxVersions)
          -      .setCompressionType(comp).setInMemory(in.inMemory).setBlockCacheEnabled(in.blockCacheEnabled)
          -      .setTimeToLive(in.timeToLive > 0 ? in.timeToLive : Integer.MAX_VALUE).setBloomFilterType(bt)
          -      .build();
          +        .setCompressionType(comp).setInMemory(in.inMemory)
          +        .setBlockCacheEnabled(in.blockCacheEnabled)
          +        .setTimeToLive(in.timeToLive > 0 ? in.timeToLive : Integer.MAX_VALUE).setBloomFilterType(bt)
          +        .build();
             }
           
             /**
          -   * This utility method creates a new Thrift ColumnDescriptor "struct" based on
          -   * an Hbase HColumnDescriptor object.
          -   *
          -   * @param in
          -   *          Hbase HColumnDescriptor object
          +   * This utility method creates a new Thrift ColumnDescriptor "struct" based on an Hbase
          +   * HColumnDescriptor object.
          +   * @param in Hbase HColumnDescriptor object
              * @return Thrift ColumnDescriptor
              */
             public static ColumnDescriptor colDescFromHbase(ColumnFamilyDescriptor in) {
          @@ -99,11 +95,9 @@ public static ColumnDescriptor colDescFromHbase(ColumnFamilyDescriptor in) {
             }
           
             /**
          -   * This utility method creates a list of Thrift TCell "struct" based on
          -   * an Hbase Cell object. The empty list is returned if the input is null.
          -   *
          -   * @param in
          -   *          Hbase Cell object
          +   * This utility method creates a list of Thrift TCell "struct" based on an Hbase Cell object. The
          +   * empty list is returned if the input is null.
          +   * @param in Hbase Cell object
              * @return Thrift TCell array
              */
             public static List cellFromHBase(Cell in) {
          @@ -115,8 +109,8 @@ public static List cellFromHBase(Cell in) {
             }
           
             /**
          -   * This utility method creates a list of Thrift TCell "struct" based on
          -   * an Hbase Cell array. The empty list is returned if the input is null.
          +   * This utility method creates a list of Thrift TCell "struct" based on an Hbase Cell array. The
          +   * empty list is returned if the input is null.
              * @param in Hbase Cell array
              * @return Thrift TCell array
              */
          @@ -134,24 +128,19 @@ public static List cellFromHBase(Cell[] in) {
             }
           
             /**
          -   * This utility method creates a list of Thrift TRowResult "struct" based on
          -   * an Hbase RowResult object. The empty list is returned if the input is
          -   * null.
          -   *
          -   * @param in
          -   *          Hbase RowResult object
          -   * @param sortColumns
          -   *          This boolean dictates if row data is returned in a sorted order
          -   *          sortColumns = True will set TRowResult's sortedColumns member
          -   *                        which is an ArrayList of TColumn struct
          -   *          sortColumns = False will set TRowResult's columns member which is
          -   *                        a map of columnName and TCell struct
          +   * This utility method creates a list of Thrift TRowResult "struct" based on an Hbase RowResult
          +   * object. The empty list is returned if the input is null.
          +   * @param in Hbase RowResult object
          +   * @param sortColumns This boolean dictates if row data is returned in a sorted order sortColumns
          +   *          = True will set TRowResult's sortedColumns member which is an ArrayList of TColumn
          +   *          struct sortColumns = False will set TRowResult's columns member which is a map of
          +   *          columnName and TCell struct
              * @return Thrift TRowResult array
              */
             public static List rowResultFromHBase(Result[] in, boolean sortColumns) {
               List results = new ArrayList<>(in.length);
               for (Result result_ : in) {
          -      if(result_ == null || result_.isEmpty()) {
          +      if (result_ == null || result_.isEmpty()) {
                   continue;
                 }
           
          @@ -162,17 +151,17 @@ public static List rowResultFromHBase(Result[] in, boolean sortColum
                   result.sortedColumns = new ArrayList<>();
                   for (Cell kv : result_.rawCells()) {
                     result.sortedColumns.add(new TColumn(
          -              ByteBuffer.wrap(CellUtil.makeColumn(CellUtil.cloneFamily(kv),
          -                  CellUtil.cloneQualifier(kv))),
          +              ByteBuffer
          +                  .wrap(CellUtil.makeColumn(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv))),
                         new TCell(ByteBuffer.wrap(CellUtil.cloneValue(kv)), kv.getTimestamp())));
                   }
                 } else {
                   result.columns = new TreeMap<>();
                   for (Cell kv : result_.rawCells()) {
                     result.columns.put(
          -              ByteBuffer.wrap(CellUtil.makeColumn(CellUtil.cloneFamily(kv),
          -                  CellUtil.cloneQualifier(kv))),
          -              new TCell(ByteBuffer.wrap(CellUtil.cloneValue(kv)), kv.getTimestamp()));
          +            ByteBuffer
          +                .wrap(CellUtil.makeColumn(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv))),
          +            new TCell(ByteBuffer.wrap(CellUtil.cloneValue(kv)), kv.getTimestamp()));
                   }
                 }
           
          @@ -183,12 +172,9 @@ public static List rowResultFromHBase(Result[] in, boolean sortColum
             }
           
             /**
          -   * This utility method creates a list of Thrift TRowResult "struct" based on
          -   * an array of Hbase RowResult objects. The empty list is returned if the input is
          -   * null.
          -   *
          -   * @param in
          -   *          Array of Hbase RowResult objects
          +   * This utility method creates a list of Thrift TRowResult "struct" based on an array of Hbase
          +   * RowResult objects. The empty list is returned if the input is null.
          +   * @param in Array of Hbase RowResult objects
              * @return Thrift TRowResult array
              */
             public static List rowResultFromHBase(Result[] in) {
          @@ -196,7 +182,7 @@ public static List rowResultFromHBase(Result[] in) {
             }
           
             public static List rowResultFromHBase(Result in) {
          -    Result [] result = { in };
          +    Result[] result = { in };
               return rowResultFromHBase(result);
             }
           
          @@ -245,12 +231,23 @@ public static Permission.Action[] permissionActionsFromString(String permission_
               Set actions = new HashSet<>();
               for (char c : permission_actions.toCharArray()) {
                 switch (c) {
          -        case 'R': actions.add(Permission.Action.READ);   break;
          -        case 'W': actions.add(Permission.Action.WRITE);  break;
          -        case 'C': actions.add(Permission.Action.CREATE); break;
          -        case 'X': actions.add(Permission.Action.EXEC);   break;
          -        case 'A': actions.add(Permission.Action.ADMIN);  break;
          -        default:                                         break;
          +        case 'R':
          +          actions.add(Permission.Action.READ);
          +          break;
          +        case 'W':
          +          actions.add(Permission.Action.WRITE);
          +          break;
          +        case 'C':
          +          actions.add(Permission.Action.CREATE);
          +          break;
          +        case 'X':
          +          actions.add(Permission.Action.EXEC);
          +          break;
          +        case 'A':
          +          actions.add(Permission.Action.ADMIN);
          +          break;
          +        default:
          +          break;
                 }
               }
               return actions.toArray(new Permission.Action[0]);
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
          index 612a3ce50083..39910a62f244 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
          @@ -1,32 +1,54 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * An AlreadyExists exceptions signals that a table with the specified
          - * name already exists
          + * An AlreadyExists exceptions signals that a table with the specified name already exists
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class AlreadyExists extends org.apache.thrift.TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlreadyExists");
          -
          -  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new AlreadyExistsStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new AlreadyExistsTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class AlreadyExists extends org.apache.thrift.TException
          +    implements org.apache.thrift.TBase, java.io.Serializable,
          +    Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("AlreadyExists");
          +
          +  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new AlreadyExistsStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new AlreadyExistsTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.lang.String message; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    MESSAGE((short)1, "message");
          +    MESSAGE((short) 1, "message");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -39,7 +61,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // MESSAGE
                     return MESSAGE;
                   default:
          @@ -48,12 +70,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -85,19 +107,22 @@ public java.lang.String getFieldName() {
             // isset id assignments
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.MESSAGE,
          +      new org.apache.thrift.meta_data.FieldMetaData("message",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlreadyExists.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlreadyExists.class,
          +      metaDataMap);
             }
           
             public AlreadyExists() {
             }
           
          -  public AlreadyExists(
          -    java.lang.String message)
          -  {
          +  public AlreadyExists(java.lang.String message) {
               this();
               this.message = message;
             }
          @@ -145,15 +170,16 @@ public void setMessageIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case MESSAGE:
          -      if (value == null) {
          -        unsetMessage();
          -      } else {
          -        setMessage((java.lang.String)value);
          -      }
          -      break;
          +      case MESSAGE:
          +        if (value == null) {
          +          unsetMessage();
          +        } else {
          +          setMessage((java.lang.String) value);
          +        }
          +        break;
           
               }
             }
          @@ -161,46 +187,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case MESSAGE:
          -      return getMessage();
          +      case MESSAGE:
          +        return getMessage();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case MESSAGE:
          -      return isSetMessage();
          +      case MESSAGE:
          +        return isSetMessage();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof AlreadyExists)
          -      return this.equals((AlreadyExists)that);
          +    if (that instanceof AlreadyExists) return this.equals((AlreadyExists) that);
               return false;
             }
           
             public boolean equals(AlreadyExists that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_message = true && this.isSetMessage();
               boolean that_present_message = true && that.isSetMessage();
               if (this_present_message || that_present_message) {
          -      if (!(this_present_message && that_present_message))
          -        return false;
          -      if (!this.message.equals(that.message))
          -        return false;
          +      if (!(this_present_message && that_present_message)) return false;
          +      if (!this.message.equals(that.message)) return false;
               }
           
               return true;
          @@ -211,8 +235,7 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetMessage()) ? 131071 : 524287);
          -    if (isSetMessage())
          -      hashCode = hashCode * 8191 + message.hashCode();
          +    if (isSetMessage()) hashCode = hashCode * 8191 + message.hashCode();
           
               return hashCode;
             }
          @@ -247,7 +270,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -274,35 +298,40 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class AlreadyExistsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class AlreadyExistsStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public AlreadyExistsStandardScheme getScheme() {
                 return new AlreadyExistsStandardScheme();
               }
             }
           
          -  private static class AlreadyExistsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class AlreadyExistsStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, AlreadyExists struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, AlreadyExists struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -310,7 +339,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AlreadyExists struc
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.message = iprot.readString();
                         struct.setMessageIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -325,7 +354,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AlreadyExists struc
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, AlreadyExists struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, AlreadyExists struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -340,17 +370,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AlreadyExists stru
           
             }
           
          -  private static class AlreadyExistsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class AlreadyExistsTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public AlreadyExistsTupleScheme getScheme() {
                 return new AlreadyExistsTupleScheme();
               }
             }
           
          -  private static class AlreadyExistsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class AlreadyExistsTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, AlreadyExists struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, AlreadyExists struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetMessage()) {
                   optionals.set(0);
          @@ -362,8 +396,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AlreadyExists struc
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, AlreadyExists struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, AlreadyExists struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(1);
                 if (incoming.get(0)) {
                   struct.message = iprot.readString();
          @@ -372,8 +408,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AlreadyExists struct
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
          index e1ec71d12549..079e79911694 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
          @@ -1,34 +1,57 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * A BatchMutation object is used to apply a number of Mutations to a single row.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class BatchMutation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BatchMutation");
          -
          -  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField MUTATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("mutations", org.apache.thrift.protocol.TType.LIST, (short)2);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new BatchMutationStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new BatchMutationTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class BatchMutation implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("BatchMutation");
          +
          +  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField MUTATIONS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("mutations", org.apache.thrift.protocol.TType.LIST,
          +          (short) 2);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new BatchMutationStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new BatchMutationTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
             public @org.apache.thrift.annotation.Nullable java.util.List mutations; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    ROW((short)1, "row"),
          -    MUTATIONS((short)2, "mutations");
          +    ROW((short) 1, "row"), MUTATIONS((short) 2, "mutations");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -41,7 +64,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // ROW
                     return ROW;
                   case 2: // MUTATIONS
          @@ -52,12 +75,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -89,23 +112,27 @@ public java.lang.String getFieldName() {
             // isset id assignments
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("mutations", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Mutation.class))));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("row",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("mutations",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
          +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                Mutation.class))));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BatchMutation.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BatchMutation.class,
          +      metaDataMap);
             }
           
             public BatchMutation() {
             }
           
          -  public BatchMutation(
          -    java.nio.ByteBuffer row,
          -    java.util.List mutations)
          -  {
          +  public BatchMutation(java.nio.ByteBuffer row, java.util.List mutations) {
               this();
               this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
               this.mutations = mutations;
          @@ -119,7 +146,8 @@ public BatchMutation(BatchMutation other) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
               }
               if (other.isSetMutations()) {
          -      java.util.List __this__mutations = new java.util.ArrayList(other.mutations.size());
          +      java.util.List __this__mutations =
          +          new java.util.ArrayList(other.mutations.size());
                 for (Mutation other_element : other.mutations) {
                   __this__mutations.add(new Mutation(other_element));
                 }
          @@ -147,7 +175,7 @@ public java.nio.ByteBuffer bufferForRow() {
             }
           
             public BatchMutation setRow(byte[] row) {
          -    this.row = row == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(row.clone());
          +    this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
               return this;
             }
           
          @@ -192,7 +220,8 @@ public java.util.List getMutations() {
               return this.mutations;
             }
           
          -  public BatchMutation setMutations(@org.apache.thrift.annotation.Nullable java.util.List mutations) {
          +  public BatchMutation
          +      setMutations(@org.apache.thrift.annotation.Nullable java.util.List mutations) {
               this.mutations = mutations;
               return this;
             }
          @@ -212,27 +241,28 @@ public void setMutationsIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case ROW:
          -      if (value == null) {
          -        unsetRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setRow((byte[])value);
          +      case ROW:
          +        if (value == null) {
          +          unsetRow();
                   } else {
          -          setRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setRow((byte[]) value);
          +          } else {
          +            setRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case MUTATIONS:
          -      if (value == null) {
          -        unsetMutations();
          -      } else {
          -        setMutations((java.util.List)value);
          -      }
          -      break;
          +      case MUTATIONS:
          +        if (value == null) {
          +          unsetMutations();
          +        } else {
          +          setMutations((java.util.List) value);
          +        }
          +        break;
           
               }
             }
          @@ -240,60 +270,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case ROW:
          -      return getRow();
          +      case ROW:
          +        return getRow();
           
          -    case MUTATIONS:
          -      return getMutations();
          +      case MUTATIONS:
          +        return getMutations();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case ROW:
          -      return isSetRow();
          -    case MUTATIONS:
          -      return isSetMutations();
          +      case ROW:
          +        return isSetRow();
          +      case MUTATIONS:
          +        return isSetMutations();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof BatchMutation)
          -      return this.equals((BatchMutation)that);
          +    if (that instanceof BatchMutation) return this.equals((BatchMutation) that);
               return false;
             }
           
             public boolean equals(BatchMutation that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_row = true && this.isSetRow();
               boolean that_present_row = true && that.isSetRow();
               if (this_present_row || that_present_row) {
          -      if (!(this_present_row && that_present_row))
          -        return false;
          -      if (!this.row.equals(that.row))
          -        return false;
          +      if (!(this_present_row && that_present_row)) return false;
          +      if (!this.row.equals(that.row)) return false;
               }
           
               boolean this_present_mutations = true && this.isSetMutations();
               boolean that_present_mutations = true && that.isSetMutations();
               if (this_present_mutations || that_present_mutations) {
          -      if (!(this_present_mutations && that_present_mutations))
          -        return false;
          -      if (!this.mutations.equals(that.mutations))
          -        return false;
          +      if (!(this_present_mutations && that_present_mutations)) return false;
          +      if (!this.mutations.equals(that.mutations)) return false;
               }
           
               return true;
          @@ -304,12 +330,10 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -    if (isSetRow())
          -      hashCode = hashCode * 8191 + row.hashCode();
          +    if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetMutations()) ? 131071 : 524287);
          -    if (isSetMutations())
          -      hashCode = hashCode * 8191 + mutations.hashCode();
          +    if (isSetMutations()) hashCode = hashCode * 8191 + mutations.hashCode();
           
               return hashCode;
             }
          @@ -354,7 +378,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -389,35 +414,40 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class BatchMutationStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class BatchMutationStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public BatchMutationStandardScheme getScheme() {
                 return new BatchMutationStandardScheme();
               }
             }
           
          -  private static class BatchMutationStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class BatchMutationStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, BatchMutation struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, BatchMutation struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -425,7 +455,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, BatchMutation struc
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.row = iprot.readBinary();
                         struct.setRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -434,9 +464,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, BatchMutation struc
                         {
                           org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
                           struct.mutations = new java.util.ArrayList(_list0.size);
          -                @org.apache.thrift.annotation.Nullable Mutation _elem1;
          -                for (int _i2 = 0; _i2 < _list0.size; ++_i2)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                Mutation _elem1;
          +                for (int _i2 = 0; _i2 < _list0.size; ++_i2) {
                             _elem1 = new Mutation();
                             _elem1.read(iprot);
                             struct.mutations.add(_elem1);
          @@ -444,7 +474,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, BatchMutation struc
                           iprot.readListEnd();
                         }
                         struct.setMutationsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -459,7 +489,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, BatchMutation struc
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, BatchMutation struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, BatchMutation struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -471,9 +502,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, BatchMutation stru
                 if (struct.mutations != null) {
                   oprot.writeFieldBegin(MUTATIONS_FIELD_DESC);
                   {
          -          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mutations.size()));
          -          for (Mutation _iter3 : struct.mutations)
          -          {
          +          oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +              org.apache.thrift.protocol.TType.STRUCT, struct.mutations.size()));
          +          for (Mutation _iter3 : struct.mutations) {
                       _iter3.write(oprot);
                     }
                     oprot.writeListEnd();
          @@ -486,17 +517,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, BatchMutation stru
           
             }
           
          -  private static class BatchMutationTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class BatchMutationTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public BatchMutationTupleScheme getScheme() {
                 return new BatchMutationTupleScheme();
               }
             }
           
          -  private static class BatchMutationTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class BatchMutationTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, BatchMutation struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, BatchMutation struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetRow()) {
                   optionals.set(0);
          @@ -511,8 +546,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, BatchMutation struc
                 if (struct.isSetMutations()) {
                   {
                     oprot.writeI32(struct.mutations.size());
          -          for (Mutation _iter4 : struct.mutations)
          -          {
          +          for (Mutation _iter4 : struct.mutations) {
                       _iter4.write(oprot);
                     }
                   }
          @@ -520,8 +554,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, BatchMutation struc
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, BatchMutation struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, BatchMutation struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(2);
                 if (incoming.get(0)) {
                   struct.row = iprot.readBinary();
          @@ -529,11 +565,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, BatchMutation struct
                 }
                 if (incoming.get(1)) {
                   {
          -          org.apache.thrift.protocol.TList _list5 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +          org.apache.thrift.protocol.TList _list5 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                     struct.mutations = new java.util.ArrayList(_list5.size);
          -          @org.apache.thrift.annotation.Nullable Mutation _elem6;
          -          for (int _i7 = 0; _i7 < _list5.size; ++_i7)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          Mutation _elem6;
          +          for (int _i7 = 0; _i7 < _list5.size; ++_i7) {
                       _elem6 = new Mutation();
                       _elem6.read(iprot);
                       struct.mutations.add(_elem6);
          @@ -544,8 +581,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, BatchMutation struct
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java
          index 58cdc9db506c..3d815c5b63de 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java
          @@ -1,33 +1,67 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * An HColumnDescriptor contains information about a column family
          - * such as the number of versions, compression settings, etc. It is
          - * used as input when creating a table or adding a column.
          + * An HColumnDescriptor contains information about a column family such as the number of versions,
          + * compression settings, etc. It is used as input when creating a table or adding a column.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class ColumnDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnDescriptor");
          -
          -  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField MAX_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxVersions", org.apache.thrift.protocol.TType.I32, (short)2);
          -  private static final org.apache.thrift.protocol.TField COMPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("compression", org.apache.thrift.protocol.TType.STRING, (short)3);
          -  private static final org.apache.thrift.protocol.TField IN_MEMORY_FIELD_DESC = new org.apache.thrift.protocol.TField("inMemory", org.apache.thrift.protocol.TType.BOOL, (short)4);
          -  private static final org.apache.thrift.protocol.TField BLOOM_FILTER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("bloomFilterType", org.apache.thrift.protocol.TType.STRING, (short)5);
          -  private static final org.apache.thrift.protocol.TField BLOOM_FILTER_VECTOR_SIZE_FIELD_DESC = new org.apache.thrift.protocol.TField("bloomFilterVectorSize", org.apache.thrift.protocol.TType.I32, (short)6);
          -  private static final org.apache.thrift.protocol.TField BLOOM_FILTER_NB_HASHES_FIELD_DESC = new org.apache.thrift.protocol.TField("bloomFilterNbHashes", org.apache.thrift.protocol.TType.I32, (short)7);
          -  private static final org.apache.thrift.protocol.TField BLOCK_CACHE_ENABLED_FIELD_DESC = new org.apache.thrift.protocol.TField("blockCacheEnabled", org.apache.thrift.protocol.TType.BOOL, (short)8);
          -  private static final org.apache.thrift.protocol.TField TIME_TO_LIVE_FIELD_DESC = new org.apache.thrift.protocol.TField("timeToLive", org.apache.thrift.protocol.TType.I32, (short)9);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new ColumnDescriptorStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new ColumnDescriptorTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class ColumnDescriptor
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("ColumnDescriptor");
          +
          +  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField MAX_VERSIONS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("maxVersions", org.apache.thrift.protocol.TType.I32,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField COMPRESSION_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("compression", org.apache.thrift.protocol.TType.STRING,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField IN_MEMORY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("inMemory", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField BLOOM_FILTER_TYPE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("bloomFilterType",
          +          org.apache.thrift.protocol.TType.STRING, (short) 5);
          +  private static final org.apache.thrift.protocol.TField BLOOM_FILTER_VECTOR_SIZE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("bloomFilterVectorSize",
          +          org.apache.thrift.protocol.TType.I32, (short) 6);
          +  private static final org.apache.thrift.protocol.TField BLOOM_FILTER_NB_HASHES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("bloomFilterNbHashes",
          +          org.apache.thrift.protocol.TType.I32, (short) 7);
          +  private static final org.apache.thrift.protocol.TField BLOCK_CACHE_ENABLED_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("blockCacheEnabled",
          +          org.apache.thrift.protocol.TType.BOOL, (short) 8);
          +  private static final org.apache.thrift.protocol.TField TIME_TO_LIVE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("timeToLive", org.apache.thrift.protocol.TType.I32,
          +          (short) 9);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new ColumnDescriptorStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new ColumnDescriptorTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer name; // required
             public int maxVersions; // required
          @@ -39,19 +73,20 @@ public class ColumnDescriptor implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +    NAME((short) 1, "name"), MAX_VERSIONS((short) 2, "maxVersions"),
          +    COMPRESSION((short) 3, "compression"), IN_MEMORY((short) 4, "inMemory"),
          +    BLOOM_FILTER_TYPE((short) 5, "bloomFilterType"),
          +    BLOOM_FILTER_VECTOR_SIZE((short) 6, "bloomFilterVectorSize"),
          +    BLOOM_FILTER_NB_HASHES((short) 7, "bloomFilterNbHashes"),
          +    BLOCK_CACHE_ENABLED((short) 8, "blockCacheEnabled"), TIME_TO_LIVE((short) 9, "timeToLive");
          +
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -64,7 +99,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // NAME
                     return NAME;
                   case 2: // MAX_VERSIONS
          @@ -89,12 +124,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -133,27 +168,44 @@ public java.lang.String getFieldName() {
             private byte __isset_bitfield = 0;
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.MAX_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("maxVersions", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("name",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.MAX_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("maxVersions",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.COMPRESSION, new org.apache.thrift.meta_data.FieldMetaData("compression", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.IN_MEMORY, new org.apache.thrift.meta_data.FieldMetaData("inMemory", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    tmpMap.put(_Fields.COMPRESSION,
          +      new org.apache.thrift.meta_data.FieldMetaData("compression",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.IN_MEMORY, new org.apache.thrift.meta_data.FieldMetaData("inMemory",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.BLOOM_FILTER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("bloomFilterType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.BLOOM_FILTER_VECTOR_SIZE, new org.apache.thrift.meta_data.FieldMetaData("bloomFilterVectorSize", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    tmpMap.put(_Fields.BLOOM_FILTER_TYPE,
          +      new org.apache.thrift.meta_data.FieldMetaData("bloomFilterType",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.BLOOM_FILTER_VECTOR_SIZE, new org.apache.thrift.meta_data.FieldMetaData(
          +        "bloomFilterVectorSize", org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.BLOOM_FILTER_NB_HASHES, new org.apache.thrift.meta_data.FieldMetaData("bloomFilterNbHashes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    tmpMap.put(_Fields.BLOOM_FILTER_NB_HASHES, new org.apache.thrift.meta_data.FieldMetaData(
          +        "bloomFilterNbHashes", org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.BLOCK_CACHE_ENABLED, new org.apache.thrift.meta_data.FieldMetaData("blockCacheEnabled", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    tmpMap.put(_Fields.BLOCK_CACHE_ENABLED, new org.apache.thrift.meta_data.FieldMetaData(
          +        "blockCacheEnabled", org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.TIME_TO_LIVE, new org.apache.thrift.meta_data.FieldMetaData("timeToLive", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    tmpMap.put(_Fields.TIME_TO_LIVE, new org.apache.thrift.meta_data.FieldMetaData("timeToLive",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnDescriptor.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnDescriptor.class,
          +      metaDataMap);
             }
           
             public ColumnDescriptor() {
          @@ -175,17 +227,9 @@ public ColumnDescriptor() {
           
             }
           
          -  public ColumnDescriptor(
          -    java.nio.ByteBuffer name,
          -    int maxVersions,
          -    java.lang.String compression,
          -    boolean inMemory,
          -    java.lang.String bloomFilterType,
          -    int bloomFilterVectorSize,
          -    int bloomFilterNbHashes,
          -    boolean blockCacheEnabled,
          -    int timeToLive)
          -  {
          +  public ColumnDescriptor(java.nio.ByteBuffer name, int maxVersions, java.lang.String compression,
          +      boolean inMemory, java.lang.String bloomFilterType, int bloomFilterVectorSize,
          +      int bloomFilterNbHashes, boolean blockCacheEnabled, int timeToLive) {
               this();
               this.name = org.apache.thrift.TBaseHelper.copyBinary(name);
               this.maxVersions = maxVersions;
          @@ -261,7 +305,7 @@ public java.nio.ByteBuffer bufferForName() {
             }
           
             public ColumnDescriptor setName(byte[] name) {
          -    this.name = name == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(name.clone());
          +    this.name = name == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(name.clone());
               return this;
             }
           
          @@ -296,7 +340,8 @@ public ColumnDescriptor setMaxVersions(int maxVersions) {
             }
           
             public void unsetMaxVersions() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID);
             }
           
             /** Returns true if field maxVersions is set (has been assigned a value) and false otherwise */
          @@ -305,7 +350,8 @@ public boolean isSetMaxVersions() {
             }
           
             public void setMaxVersionsIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID, value);
             }
           
             @org.apache.thrift.annotation.Nullable
          @@ -313,7 +359,8 @@ public java.lang.String getCompression() {
               return this.compression;
             }
           
          -  public ColumnDescriptor setCompression(@org.apache.thrift.annotation.Nullable java.lang.String compression) {
          +  public ColumnDescriptor
          +      setCompression(@org.apache.thrift.annotation.Nullable java.lang.String compression) {
               this.compression = compression;
               return this;
             }
          @@ -344,7 +391,8 @@ public ColumnDescriptor setInMemory(boolean inMemory) {
             }
           
             public void unsetInMemory() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __INMEMORY_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __INMEMORY_ISSET_ID);
             }
           
             /** Returns true if field inMemory is set (has been assigned a value) and false otherwise */
          @@ -353,7 +401,8 @@ public boolean isSetInMemory() {
             }
           
             public void setInMemoryIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __INMEMORY_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __INMEMORY_ISSET_ID, value);
             }
           
             @org.apache.thrift.annotation.Nullable
          @@ -361,7 +410,8 @@ public java.lang.String getBloomFilterType() {
               return this.bloomFilterType;
             }
           
          -  public ColumnDescriptor setBloomFilterType(@org.apache.thrift.annotation.Nullable java.lang.String bloomFilterType) {
          +  public ColumnDescriptor
          +      setBloomFilterType(@org.apache.thrift.annotation.Nullable java.lang.String bloomFilterType) {
               this.bloomFilterType = bloomFilterType;
               return this;
             }
          @@ -370,7 +420,9 @@ public void unsetBloomFilterType() {
               this.bloomFilterType = null;
             }
           
          -  /** Returns true if field bloomFilterType is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field bloomFilterType is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetBloomFilterType() {
               return this.bloomFilterType != null;
             }
          @@ -392,16 +444,22 @@ public ColumnDescriptor setBloomFilterVectorSize(int bloomFilterVectorSize) {
             }
           
             public void unsetBloomFilterVectorSize() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BLOOMFILTERVECTORSIZE_ISSET_ID);
          +    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield,
          +      __BLOOMFILTERVECTORSIZE_ISSET_ID);
             }
           
          -  /** Returns true if field bloomFilterVectorSize is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field bloomFilterVectorSize is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSetBloomFilterVectorSize() {
          -    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __BLOOMFILTERVECTORSIZE_ISSET_ID);
          +    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield,
          +      __BLOOMFILTERVECTORSIZE_ISSET_ID);
             }
           
             public void setBloomFilterVectorSizeIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __BLOOMFILTERVECTORSIZE_ISSET_ID, value);
          +    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +      __BLOOMFILTERVECTORSIZE_ISSET_ID, value);
             }
           
             public int getBloomFilterNbHashes() {
          @@ -415,16 +473,22 @@ public ColumnDescriptor setBloomFilterNbHashes(int bloomFilterNbHashes) {
             }
           
             public void unsetBloomFilterNbHashes() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BLOOMFILTERNBHASHES_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BLOOMFILTERNBHASHES_ISSET_ID);
             }
           
          -  /** Returns true if field bloomFilterNbHashes is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field bloomFilterNbHashes is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSetBloomFilterNbHashes() {
          -    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __BLOOMFILTERNBHASHES_ISSET_ID);
          +    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield,
          +      __BLOOMFILTERNBHASHES_ISSET_ID);
             }
           
             public void setBloomFilterNbHashesIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __BLOOMFILTERNBHASHES_ISSET_ID, value);
          +    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +      __BLOOMFILTERNBHASHES_ISSET_ID, value);
             }
           
             public boolean isBlockCacheEnabled() {
          @@ -438,16 +502,20 @@ public ColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
             }
           
             public void unsetBlockCacheEnabled() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BLOCKCACHEENABLED_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BLOCKCACHEENABLED_ISSET_ID);
             }
           
          -  /** Returns true if field blockCacheEnabled is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field blockCacheEnabled is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetBlockCacheEnabled() {
               return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __BLOCKCACHEENABLED_ISSET_ID);
             }
           
             public void setBlockCacheEnabledIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __BLOCKCACHEENABLED_ISSET_ID, value);
          +    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +      __BLOCKCACHEENABLED_ISSET_ID, value);
             }
           
             public int getTimeToLive() {
          @@ -461,7 +529,8 @@ public ColumnDescriptor setTimeToLive(int timeToLive) {
             }
           
             public void unsetTimeToLive() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMETOLIVE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMETOLIVE_ISSET_ID);
             }
           
             /** Returns true if field timeToLive is set (has been assigned a value) and false otherwise */
          @@ -470,86 +539,88 @@ public boolean isSetTimeToLive() {
             }
           
             public void setTimeToLiveIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMETOLIVE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMETOLIVE_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case NAME:
          -      if (value == null) {
          -        unsetName();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setName((byte[])value);
          +      case NAME:
          +        if (value == null) {
          +          unsetName();
                   } else {
          -          setName((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setName((byte[]) value);
          +          } else {
          +            setName((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case MAX_VERSIONS:
          -      if (value == null) {
          -        unsetMaxVersions();
          -      } else {
          -        setMaxVersions((java.lang.Integer)value);
          -      }
          -      break;
          +      case MAX_VERSIONS:
          +        if (value == null) {
          +          unsetMaxVersions();
          +        } else {
          +          setMaxVersions((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case COMPRESSION:
          -      if (value == null) {
          -        unsetCompression();
          -      } else {
          -        setCompression((java.lang.String)value);
          -      }
          -      break;
          +      case COMPRESSION:
          +        if (value == null) {
          +          unsetCompression();
          +        } else {
          +          setCompression((java.lang.String) value);
          +        }
          +        break;
           
          -    case IN_MEMORY:
          -      if (value == null) {
          -        unsetInMemory();
          -      } else {
          -        setInMemory((java.lang.Boolean)value);
          -      }
          -      break;
          +      case IN_MEMORY:
          +        if (value == null) {
          +          unsetInMemory();
          +        } else {
          +          setInMemory((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case BLOOM_FILTER_TYPE:
          -      if (value == null) {
          -        unsetBloomFilterType();
          -      } else {
          -        setBloomFilterType((java.lang.String)value);
          -      }
          -      break;
          +      case BLOOM_FILTER_TYPE:
          +        if (value == null) {
          +          unsetBloomFilterType();
          +        } else {
          +          setBloomFilterType((java.lang.String) value);
          +        }
          +        break;
           
          -    case BLOOM_FILTER_VECTOR_SIZE:
          -      if (value == null) {
          -        unsetBloomFilterVectorSize();
          -      } else {
          -        setBloomFilterVectorSize((java.lang.Integer)value);
          -      }
          -      break;
          +      case BLOOM_FILTER_VECTOR_SIZE:
          +        if (value == null) {
          +          unsetBloomFilterVectorSize();
          +        } else {
          +          setBloomFilterVectorSize((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case BLOOM_FILTER_NB_HASHES:
          -      if (value == null) {
          -        unsetBloomFilterNbHashes();
          -      } else {
          -        setBloomFilterNbHashes((java.lang.Integer)value);
          -      }
          -      break;
          +      case BLOOM_FILTER_NB_HASHES:
          +        if (value == null) {
          +          unsetBloomFilterNbHashes();
          +        } else {
          +          setBloomFilterNbHashes((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case BLOCK_CACHE_ENABLED:
          -      if (value == null) {
          -        unsetBlockCacheEnabled();
          -      } else {
          -        setBlockCacheEnabled((java.lang.Boolean)value);
          -      }
          -      break;
          +      case BLOCK_CACHE_ENABLED:
          +        if (value == null) {
          +          unsetBlockCacheEnabled();
          +        } else {
          +          setBlockCacheEnabled((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case TIME_TO_LIVE:
          -      if (value == null) {
          -        unsetTimeToLive();
          -      } else {
          -        setTimeToLive((java.lang.Integer)value);
          -      }
          -      break;
          +      case TIME_TO_LIVE:
          +        if (value == null) {
          +          unsetTimeToLive();
          +        } else {
          +          setTimeToLive((java.lang.Integer) value);
          +        }
          +        break;
           
               }
             }
          @@ -557,158 +628,140 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case NAME:
          -      return getName();
          +      case NAME:
          +        return getName();
           
          -    case MAX_VERSIONS:
          -      return getMaxVersions();
          +      case MAX_VERSIONS:
          +        return getMaxVersions();
           
          -    case COMPRESSION:
          -      return getCompression();
          +      case COMPRESSION:
          +        return getCompression();
           
          -    case IN_MEMORY:
          -      return isInMemory();
          +      case IN_MEMORY:
          +        return isInMemory();
           
          -    case BLOOM_FILTER_TYPE:
          -      return getBloomFilterType();
          +      case BLOOM_FILTER_TYPE:
          +        return getBloomFilterType();
           
          -    case BLOOM_FILTER_VECTOR_SIZE:
          -      return getBloomFilterVectorSize();
          +      case BLOOM_FILTER_VECTOR_SIZE:
          +        return getBloomFilterVectorSize();
           
          -    case BLOOM_FILTER_NB_HASHES:
          -      return getBloomFilterNbHashes();
          +      case BLOOM_FILTER_NB_HASHES:
          +        return getBloomFilterNbHashes();
           
          -    case BLOCK_CACHE_ENABLED:
          -      return isBlockCacheEnabled();
          +      case BLOCK_CACHE_ENABLED:
          +        return isBlockCacheEnabled();
           
          -    case TIME_TO_LIVE:
          -      return getTimeToLive();
          +      case TIME_TO_LIVE:
          +        return getTimeToLive();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case NAME:
          -      return isSetName();
          -    case MAX_VERSIONS:
          -      return isSetMaxVersions();
          -    case COMPRESSION:
          -      return isSetCompression();
          -    case IN_MEMORY:
          -      return isSetInMemory();
          -    case BLOOM_FILTER_TYPE:
          -      return isSetBloomFilterType();
          -    case BLOOM_FILTER_VECTOR_SIZE:
          -      return isSetBloomFilterVectorSize();
          -    case BLOOM_FILTER_NB_HASHES:
          -      return isSetBloomFilterNbHashes();
          -    case BLOCK_CACHE_ENABLED:
          -      return isSetBlockCacheEnabled();
          -    case TIME_TO_LIVE:
          -      return isSetTimeToLive();
          +      case NAME:
          +        return isSetName();
          +      case MAX_VERSIONS:
          +        return isSetMaxVersions();
          +      case COMPRESSION:
          +        return isSetCompression();
          +      case IN_MEMORY:
          +        return isSetInMemory();
          +      case BLOOM_FILTER_TYPE:
          +        return isSetBloomFilterType();
          +      case BLOOM_FILTER_VECTOR_SIZE:
          +        return isSetBloomFilterVectorSize();
          +      case BLOOM_FILTER_NB_HASHES:
          +        return isSetBloomFilterNbHashes();
          +      case BLOCK_CACHE_ENABLED:
          +        return isSetBlockCacheEnabled();
          +      case TIME_TO_LIVE:
          +        return isSetTimeToLive();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof ColumnDescriptor)
          -      return this.equals((ColumnDescriptor)that);
          +    if (that instanceof ColumnDescriptor) return this.equals((ColumnDescriptor) that);
               return false;
             }
           
             public boolean equals(ColumnDescriptor that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_name = true && this.isSetName();
               boolean that_present_name = true && that.isSetName();
               if (this_present_name || that_present_name) {
          -      if (!(this_present_name && that_present_name))
          -        return false;
          -      if (!this.name.equals(that.name))
          -        return false;
          +      if (!(this_present_name && that_present_name)) return false;
          +      if (!this.name.equals(that.name)) return false;
               }
           
               boolean this_present_maxVersions = true;
               boolean that_present_maxVersions = true;
               if (this_present_maxVersions || that_present_maxVersions) {
          -      if (!(this_present_maxVersions && that_present_maxVersions))
          -        return false;
          -      if (this.maxVersions != that.maxVersions)
          -        return false;
          +      if (!(this_present_maxVersions && that_present_maxVersions)) return false;
          +      if (this.maxVersions != that.maxVersions) return false;
               }
           
               boolean this_present_compression = true && this.isSetCompression();
               boolean that_present_compression = true && that.isSetCompression();
               if (this_present_compression || that_present_compression) {
          -      if (!(this_present_compression && that_present_compression))
          -        return false;
          -      if (!this.compression.equals(that.compression))
          -        return false;
          +      if (!(this_present_compression && that_present_compression)) return false;
          +      if (!this.compression.equals(that.compression)) return false;
               }
           
               boolean this_present_inMemory = true;
               boolean that_present_inMemory = true;
               if (this_present_inMemory || that_present_inMemory) {
          -      if (!(this_present_inMemory && that_present_inMemory))
          -        return false;
          -      if (this.inMemory != that.inMemory)
          -        return false;
          +      if (!(this_present_inMemory && that_present_inMemory)) return false;
          +      if (this.inMemory != that.inMemory) return false;
               }
           
               boolean this_present_bloomFilterType = true && this.isSetBloomFilterType();
               boolean that_present_bloomFilterType = true && that.isSetBloomFilterType();
               if (this_present_bloomFilterType || that_present_bloomFilterType) {
          -      if (!(this_present_bloomFilterType && that_present_bloomFilterType))
          -        return false;
          -      if (!this.bloomFilterType.equals(that.bloomFilterType))
          -        return false;
          +      if (!(this_present_bloomFilterType && that_present_bloomFilterType)) return false;
          +      if (!this.bloomFilterType.equals(that.bloomFilterType)) return false;
               }
           
               boolean this_present_bloomFilterVectorSize = true;
               boolean that_present_bloomFilterVectorSize = true;
               if (this_present_bloomFilterVectorSize || that_present_bloomFilterVectorSize) {
          -      if (!(this_present_bloomFilterVectorSize && that_present_bloomFilterVectorSize))
          -        return false;
          -      if (this.bloomFilterVectorSize != that.bloomFilterVectorSize)
          -        return false;
          +      if (!(this_present_bloomFilterVectorSize && that_present_bloomFilterVectorSize)) return false;
          +      if (this.bloomFilterVectorSize != that.bloomFilterVectorSize) return false;
               }
           
               boolean this_present_bloomFilterNbHashes = true;
               boolean that_present_bloomFilterNbHashes = true;
               if (this_present_bloomFilterNbHashes || that_present_bloomFilterNbHashes) {
          -      if (!(this_present_bloomFilterNbHashes && that_present_bloomFilterNbHashes))
          -        return false;
          -      if (this.bloomFilterNbHashes != that.bloomFilterNbHashes)
          -        return false;
          +      if (!(this_present_bloomFilterNbHashes && that_present_bloomFilterNbHashes)) return false;
          +      if (this.bloomFilterNbHashes != that.bloomFilterNbHashes) return false;
               }
           
               boolean this_present_blockCacheEnabled = true;
               boolean that_present_blockCacheEnabled = true;
               if (this_present_blockCacheEnabled || that_present_blockCacheEnabled) {
          -      if (!(this_present_blockCacheEnabled && that_present_blockCacheEnabled))
          -        return false;
          -      if (this.blockCacheEnabled != that.blockCacheEnabled)
          -        return false;
          +      if (!(this_present_blockCacheEnabled && that_present_blockCacheEnabled)) return false;
          +      if (this.blockCacheEnabled != that.blockCacheEnabled) return false;
               }
           
               boolean this_present_timeToLive = true;
               boolean that_present_timeToLive = true;
               if (this_present_timeToLive || that_present_timeToLive) {
          -      if (!(this_present_timeToLive && that_present_timeToLive))
          -        return false;
          -      if (this.timeToLive != that.timeToLive)
          -        return false;
          +      if (!(this_present_timeToLive && that_present_timeToLive)) return false;
          +      if (this.timeToLive != that.timeToLive) return false;
               }
           
               return true;
          @@ -719,20 +772,17 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287);
          -    if (isSetName())
          -      hashCode = hashCode * 8191 + name.hashCode();
          +    if (isSetName()) hashCode = hashCode * 8191 + name.hashCode();
           
               hashCode = hashCode * 8191 + maxVersions;
           
               hashCode = hashCode * 8191 + ((isSetCompression()) ? 131071 : 524287);
          -    if (isSetCompression())
          -      hashCode = hashCode * 8191 + compression.hashCode();
          +    if (isSetCompression()) hashCode = hashCode * 8191 + compression.hashCode();
           
               hashCode = hashCode * 8191 + ((inMemory) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetBloomFilterType()) ? 131071 : 524287);
          -    if (isSetBloomFilterType())
          -      hashCode = hashCode * 8191 + bloomFilterType.hashCode();
          +    if (isSetBloomFilterType()) hashCode = hashCode * 8191 + bloomFilterType.hashCode();
           
               hashCode = hashCode * 8191 + bloomFilterVectorSize;
           
          @@ -793,42 +843,50 @@ public int compareTo(ColumnDescriptor other) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetBloomFilterType(), other.isSetBloomFilterType());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetBloomFilterType(), other.isSetBloomFilterType());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetBloomFilterType()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bloomFilterType, other.bloomFilterType);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.bloomFilterType, other.bloomFilterType);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetBloomFilterVectorSize(), other.isSetBloomFilterVectorSize());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetBloomFilterVectorSize(), other.isSetBloomFilterVectorSize());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetBloomFilterVectorSize()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bloomFilterVectorSize, other.bloomFilterVectorSize);
          +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bloomFilterVectorSize,
          +        other.bloomFilterVectorSize);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetBloomFilterNbHashes(), other.isSetBloomFilterNbHashes());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetBloomFilterNbHashes(), other.isSetBloomFilterNbHashes());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetBloomFilterNbHashes()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bloomFilterNbHashes, other.bloomFilterNbHashes);
          +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bloomFilterNbHashes,
          +        other.bloomFilterNbHashes);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetBlockCacheEnabled(), other.isSetBlockCacheEnabled());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetBlockCacheEnabled(), other.isSetBlockCacheEnabled());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetBlockCacheEnabled()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.blockCacheEnabled, other.blockCacheEnabled);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.blockCacheEnabled, other.blockCacheEnabled);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -855,7 +913,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -922,37 +981,43 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class ColumnDescriptorStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class ColumnDescriptorStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public ColumnDescriptorStandardScheme getScheme() {
                 return new ColumnDescriptorStandardScheme();
               }
             }
           
          -  private static class ColumnDescriptorStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class ColumnDescriptorStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -960,7 +1025,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.name = iprot.readBinary();
                         struct.setNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -968,7 +1033,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.maxVersions = iprot.readI32();
                         struct.setMaxVersionsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -976,7 +1041,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.compression = iprot.readString();
                         struct.setCompressionIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -984,7 +1049,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor st
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.inMemory = iprot.readBool();
                         struct.setInMemoryIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -992,7 +1057,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.bloomFilterType = iprot.readString();
                         struct.setBloomFilterTypeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1000,7 +1065,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.bloomFilterVectorSize = iprot.readI32();
                         struct.setBloomFilterVectorSizeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1008,7 +1073,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.bloomFilterNbHashes = iprot.readI32();
                         struct.setBloomFilterNbHashesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1016,7 +1081,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor st
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.blockCacheEnabled = iprot.readBool();
                         struct.setBlockCacheEnabledIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1024,7 +1089,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.timeToLive = iprot.readI32();
                         struct.setTimeToLiveIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1039,7 +1104,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnDescriptor st
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnDescriptor struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnDescriptor struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -1082,17 +1148,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnDescriptor s
           
             }
           
          -  private static class ColumnDescriptorTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class ColumnDescriptorTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public ColumnDescriptorTupleScheme getScheme() {
                 return new ColumnDescriptorTupleScheme();
               }
             }
           
          -  private static class ColumnDescriptorTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class ColumnDescriptorTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, ColumnDescriptor struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, ColumnDescriptor struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetName()) {
                   optionals.set(0);
          @@ -1152,8 +1222,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ColumnDescriptor st
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, ColumnDescriptor struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, ColumnDescriptor struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(9);
                 if (incoming.get(0)) {
                   struct.name = iprot.readBinary();
          @@ -1194,8 +1266,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnDescriptor str
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
          index 43bc7fb60118..dc38be1e56b6 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
          @@ -1,630 +1,556 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-10-05")
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-10-05")
           public class Hbase {
           
             public interface Iface {
           
               /**
                * Brings a table on-line (enables it)
          -     * 
                * @param tableName name of the table
                */
          -    public void enableTable(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException;
          +    public void enableTable(java.nio.ByteBuffer tableName)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Disables a table (takes it off-line) If it is being served, the master
          -     * will tell the servers to stop serving it.
          -     * 
          +     * Disables a table (takes it off-line) If it is being served, the master will tell the servers
          +     * to stop serving it.
                * @param tableName name of the table
                */
          -    public void disableTable(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException;
          +    public void disableTable(java.nio.ByteBuffer tableName)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
                * @return true if table is on-line
          -     * 
                * @param tableName name of the table to check
                */
          -    public boolean isTableEnabled(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException;
          +    public boolean isTableEnabled(java.nio.ByteBuffer tableName)
          +        throws IOError, org.apache.thrift.TException;
           
          -    public void compact(java.nio.ByteBuffer tableNameOrRegionName) throws IOError, org.apache.thrift.TException;
          +    public void compact(java.nio.ByteBuffer tableNameOrRegionName)
          +        throws IOError, org.apache.thrift.TException;
           
          -    public void majorCompact(java.nio.ByteBuffer tableNameOrRegionName) throws IOError, org.apache.thrift.TException;
          +    public void majorCompact(java.nio.ByteBuffer tableNameOrRegionName)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
                * List all the userspace tables.
          -     * 
                * @return returns a list of names
                */
          -    public java.util.List getTableNames() throws IOError, org.apache.thrift.TException;
          +    public java.util.List getTableNames()
          +        throws IOError, org.apache.thrift.TException;
           
               /**
                * List all the userspace tables and their enabled or disabled flags.
          -     * 
                * @return list of tables with is enabled flags
                */
          -    public java.util.Map getTableNamesWithIsTableEnabled() throws IOError, org.apache.thrift.TException;
          +    public java.util.Map getTableNamesWithIsTableEnabled()
          +        throws IOError, org.apache.thrift.TException;
           
               /**
                * List all the column families assoicated with a table.
          -     * 
                * @return list of column family descriptors
          -     * 
                * @param tableName table name
                */
          -    public java.util.Map getColumnDescriptors(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException;
          +    public java.util.Map getColumnDescriptors(
          +        java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException;
           
               /**
                * List the regions associated with a table.
          -     * 
                * @return list of region descriptors
          -     * 
                * @param tableName table name
                */
          -    public java.util.List getTableRegions(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException;
          +    public java.util.List getTableRegions(java.nio.ByteBuffer tableName)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Create a table with the specified column families.  The name
          -     * field for each ColumnDescriptor must be set and must end in a
          -     * colon (:). All other fields are optional and will get default
          +     * Create a table with the specified column families. The name field for each ColumnDescriptor
          +     * must be set and must end in a colon (:). All other fields are optional and will get default
                * values if not explicitly specified.
          -     * 
                * @throws IllegalArgument if an input parameter is invalid
          -     * 
                * @throws AlreadyExists if the table name already exists
          -     * 
                * @param tableName name of table to create
          -     * 
                * @param columnFamilies list of column family descriptors
                */
          -    public void createTable(java.nio.ByteBuffer tableName, java.util.List columnFamilies) throws IOError, IllegalArgument, AlreadyExists, org.apache.thrift.TException;
          +    public void createTable(java.nio.ByteBuffer tableName,
          +        java.util.List columnFamilies)
          +        throws IOError, IllegalArgument, AlreadyExists, org.apache.thrift.TException;
           
               /**
                * Deletes a table
          -     * 
          -     * @throws IOError if table doesn't exist on server or there was some other
          -     * problem
          -     * 
          +     * @throws IOError if table doesn't exist on server or there was some other problem
                * @param tableName name of table to delete
                */
          -    public void deleteTable(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException;
          +    public void deleteTable(java.nio.ByteBuffer tableName)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get a single TCell for the specified table, row, and column at the
          -     * latest timestamp. Returns an empty list if no such value exists.
          -     * 
          +     * Get a single TCell for the specified table, row, and column at the latest timestamp. Returns
          +     * an empty list if no such value exists.
                * @return value for specified row/column
          -     * 
                * @param tableName name of table
          -     * 
                * @param row row key
          -     * 
                * @param column column name
          -     * 
                * @param attributes Get attributes
                */
          -    public java.util.List get(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public java.util.List get(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get the specified number of versions for the specified table,
          -     * row, and column.
          -     * 
          +     * Get the specified number of versions for the specified table, row, and column.
                * @return list of cells for specified row/column
          -     * 
                * @param tableName name of table
          -     * 
                * @param row row key
          -     * 
                * @param column column name
          -     * 
                * @param numVersions number of versions to retrieve
          -     * 
                * @param attributes Get attributes
                */
          -    public java.util.List getVer(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, int numVersions, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public java.util.List getVer(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, int numVersions,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get the specified number of versions for the specified table,
          -     * row, and column.  Only versions less than or equal to the specified
          -     * timestamp will be returned.
          -     * 
          +     * Get the specified number of versions for the specified table, row, and column. Only versions
          +     * less than or equal to the specified timestamp will be returned.
                * @return list of cells for specified row/column
          -     * 
                * @param tableName name of table
          -     * 
                * @param row row key
          -     * 
                * @param column column name
          -     * 
                * @param timestamp timestamp
          -     * 
                * @param numVersions number of versions to retrieve
          -     * 
                * @param attributes Get attributes
                */
          -    public java.util.List getVerTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, int numVersions, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public java.util.List getVerTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp, int numVersions,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get all the data for the specified table and row at the latest
          -     * timestamp. Returns an empty list if the row does not exist.
          -     * 
          +     * Get all the data for the specified table and row at the latest timestamp. Returns an empty
          +     * list if the row does not exist.
                * @return TRowResult containing the row and map of columns to TCells
          -     * 
                * @param tableName name of table
          -     * 
                * @param row row key
          -     * 
                * @param attributes Get attributes
                */
          -    public java.util.List getRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public java.util.List getRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get the specified columns for the specified table and row at the latest
          -     * timestamp. Returns an empty list if the row does not exist.
          -     * 
          +     * Get the specified columns for the specified table and row at the latest timestamp. Returns an
          +     * empty list if the row does not exist.
                * @return TRowResult containing the row and map of columns to TCells
          -     * 
                * @param tableName name of table
          -     * 
                * @param row row key
          -     * 
                * @param columns List of columns to return, null for all columns
          -     * 
                * @param attributes Get attributes
                */
          -    public java.util.List getRowWithColumns(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public java.util.List getRowWithColumns(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer row, java.util.List columns,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get all the data for the specified table and row at the specified
          -     * timestamp. Returns an empty list if the row does not exist.
          -     * 
          +     * Get all the data for the specified table and row at the specified timestamp. Returns an empty
          +     * list if the row does not exist.
                * @return TRowResult containing the row and map of columns to TCells
          -     * 
                * @param tableName name of the table
          -     * 
                * @param row row key
          -     * 
                * @param timestamp timestamp
          -     * 
                * @param attributes Get attributes
                */
          -    public java.util.List getRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public java.util.List getRowTs(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer row, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get the specified columns for the specified table and row at the specified
          -     * timestamp. Returns an empty list if the row does not exist.
          -     * 
          +     * Get the specified columns for the specified table and row at the specified timestamp. Returns
          +     * an empty list if the row does not exist.
                * @return TRowResult containing the row and map of columns to TCells
          -     * 
                * @param tableName name of table
          -     * 
                * @param row row key
          -     * 
                * @param columns List of columns to return, null for all columns
          -     * 
                * @param timestamp
                * @param attributes Get attributes
                */
          -    public java.util.List getRowWithColumnsTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public java.util.List getRowWithColumnsTs(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer row, java.util.List columns, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get all the data for the specified table and rows at the latest
          -     * timestamp. Returns an empty list if no rows exist.
          -     * 
          +     * Get all the data for the specified table and rows at the latest timestamp. Returns an empty
          +     * list if no rows exist.
                * @return TRowResult containing the rows and map of columns to TCells
          -     * 
                * @param tableName name of table
          -     * 
                * @param rows row keys
          -     * 
                * @param attributes Get attributes
                */
          -    public java.util.List getRows(java.nio.ByteBuffer tableName, java.util.List rows, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public java.util.List getRows(java.nio.ByteBuffer tableName,
          +        java.util.List rows,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get the specified columns for the specified table and rows at the latest
          -     * timestamp. Returns an empty list if no rows exist.
          -     * 
          +     * Get the specified columns for the specified table and rows at the latest timestamp. Returns
          +     * an empty list if no rows exist.
                * @return TRowResult containing the rows and map of columns to TCells
          -     * 
                * @param tableName name of table
          -     * 
                * @param rows row keys
          -     * 
                * @param columns List of columns to return, null for all columns
          -     * 
                * @param attributes Get attributes
                */
          -    public java.util.List getRowsWithColumns(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public java.util.List getRowsWithColumns(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get all the data for the specified table and rows at the specified
          -     * timestamp. Returns an empty list if no rows exist.
          -     * 
          +     * Get all the data for the specified table and rows at the specified timestamp. Returns an
          +     * empty list if no rows exist.
                * @return TRowResult containing the rows and map of columns to TCells
          -     * 
                * @param tableName name of the table
          -     * 
                * @param rows row keys
          -     * 
                * @param timestamp timestamp
          -     * 
                * @param attributes Get attributes
                */
          -    public java.util.List getRowsTs(java.nio.ByteBuffer tableName, java.util.List rows, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public java.util.List getRowsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rows, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get the specified columns for the specified table and rows at the specified
          -     * timestamp. Returns an empty list if no rows exist.
          -     * 
          +     * Get the specified columns for the specified table and rows at the specified timestamp.
          +     * Returns an empty list if no rows exist.
                * @return TRowResult containing the rows and map of columns to TCells
          -     * 
                * @param tableName name of table
          -     * 
                * @param rows row keys
          -     * 
                * @param columns List of columns to return, null for all columns
          -     * 
                * @param timestamp
                * @param attributes Get attributes
                */
          -    public java.util.List getRowsWithColumnsTs(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public java.util.List getRowsWithColumnsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        long timestamp, java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Apply a series of mutations (updates/deletes) to a row in a
          -     * single transaction.  If an exception is thrown, then the
          -     * transaction is aborted.  Default current timestamp is used, and
          +     * Apply a series of mutations (updates/deletes) to a row in a single transaction. If an
          +     * exception is thrown, then the transaction is aborted. Default current timestamp is used, and
                * all entries will have an identical timestamp.
          -     * 
                * @param tableName name of table
          -     * 
                * @param row row key
          -     * 
                * @param mutations list of mutation commands
          -     * 
                * @param attributes Mutation attributes
                */
          -    public void mutateRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, java.util.Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException;
          +    public void mutateRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations,
          +        java.util.Map attributes)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException;
           
               /**
          -     * Apply a series of mutations (updates/deletes) to a row in a
          -     * single transaction.  If an exception is thrown, then the
          -     * transaction is aborted.  The specified timestamp is used, and
          +     * Apply a series of mutations (updates/deletes) to a row in a single transaction. If an
          +     * exception is thrown, then the transaction is aborted. The specified timestamp is used, and
                * all entries will have an identical timestamp.
          -     * 
                * @param tableName name of table
          -     * 
                * @param row row key
          -     * 
                * @param mutations list of mutation commands
          -     * 
                * @param timestamp timestamp
          -     * 
                * @param attributes Mutation attributes
                */
          -    public void mutateRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, long timestamp, java.util.Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException;
          +    public void mutateRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException;
           
               /**
          -     * Apply a series of batches (each a series of mutations on a single row)
          -     * in a single transaction.  If an exception is thrown, then the
          -     * transaction is aborted.  Default current timestamp is used, and
          -     * all entries will have an identical timestamp.
          -     * 
          +     * Apply a series of batches (each a series of mutations on a single row) in a single
          +     * transaction. If an exception is thrown, then the transaction is aborted. Default current
          +     * timestamp is used, and all entries will have an identical timestamp.
                * @param tableName name of table
          -     * 
                * @param rowBatches list of row batches
          -     * 
                * @param attributes Mutation attributes
                */
          -    public void mutateRows(java.nio.ByteBuffer tableName, java.util.List rowBatches, java.util.Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException;
          +    public void mutateRows(java.nio.ByteBuffer tableName, java.util.List rowBatches,
          +        java.util.Map attributes)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException;
           
               /**
          -     * Apply a series of batches (each a series of mutations on a single row)
          -     * in a single transaction.  If an exception is thrown, then the
          -     * transaction is aborted.  The specified timestamp is used, and
          -     * all entries will have an identical timestamp.
          -     * 
          +     * Apply a series of batches (each a series of mutations on a single row) in a single
          +     * transaction. If an exception is thrown, then the transaction is aborted. The specified
          +     * timestamp is used, and all entries will have an identical timestamp.
                * @param tableName name of table
          -     * 
                * @param rowBatches list of row batches
          -     * 
                * @param timestamp timestamp
          -     * 
                * @param attributes Mutation attributes
                */
          -    public void mutateRowsTs(java.nio.ByteBuffer tableName, java.util.List rowBatches, long timestamp, java.util.Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException;
          +    public void mutateRowsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rowBatches, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException;
           
               /**
          -     * Atomically increment the column value specified.  Returns the next value post increment.
          -     * 
          +     * Atomically increment the column value specified. Returns the next value post increment.
                * @param tableName name of table
          -     * 
                * @param row row to increment
          -     * 
                * @param column name of column
          -     * 
                * @param value amount to increment by
                */
          -    public long atomicIncrement(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long value) throws IOError, IllegalArgument, org.apache.thrift.TException;
          +    public long atomicIncrement(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long value)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException;
           
               /**
                * Delete all cells that match the passed row and column.
          -     * 
                * @param tableName name of table
          -     * 
                * @param row Row to update
          -     * 
                * @param column name of column whose value is to be deleted
          -     * 
                * @param attributes Delete attributes
                */
          -    public void deleteAll(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public void deleteAll(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Delete all cells that match the passed row and column and whose
          -     * timestamp is equal-to or older than the passed timestamp.
          -     * 
          +     * Delete all cells that match the passed row and column and whose timestamp is equal-to or
          +     * older than the passed timestamp.
                * @param tableName name of table
          -     * 
                * @param row Row to update
          -     * 
                * @param column name of column whose value is to be deleted
          -     * 
                * @param timestamp timestamp
          -     * 
                * @param attributes Delete attributes
                */
          -    public void deleteAllTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public void deleteAllTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
                * Completely delete the row's cells.
          -     * 
                * @param tableName name of table
          -     * 
                * @param row key of the row to be completely deleted.
          -     * 
                * @param attributes Delete attributes
                */
          -    public void deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public void deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Increment a cell by the ammount.
          -     * Increments can be applied async if hbase.regionserver.thrift.coalesceIncrement is set to true.
          -     * False is the default.  Turn to true if you need the extra performance and can accept some
          -     * data loss if a thrift server dies with increments still in the queue.
          -     * 
          +     * Increment a cell by the ammount. Increments can be applied async if
          +     * hbase.regionserver.thrift.coalesceIncrement is set to true. False is the default. Turn to
          +     * true if you need the extra performance and can accept some data loss if a thrift server dies
          +     * with increments still in the queue.
                * @param increment The single increment to apply
                */
               public void increment(TIncrement increment) throws IOError, org.apache.thrift.TException;
           
          -    public void incrementRows(java.util.List increments) throws IOError, org.apache.thrift.TException;
          +    public void incrementRows(java.util.List increments)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Completely delete the row's cells marked with a timestamp
          -     * equal-to or older than the passed timestamp.
          -     * 
          +     * Completely delete the row's cells marked with a timestamp equal-to or older than the passed
          +     * timestamp.
                * @param tableName name of table
          -     * 
                * @param row key of the row to be completely deleted.
          -     * 
                * @param timestamp timestamp
          -     * 
                * @param attributes Delete attributes
                */
          -    public void deleteAllRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public void deleteAllRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        long timestamp, java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get a scanner on the current table, using the Scan instance
          -     * for the scan parameters.
          -     * 
          +     * Get a scanner on the current table, using the Scan instance for the scan parameters.
                * @param tableName name of table
          -     * 
                * @param scan Scan instance
          -     * 
                * @param attributes Scan attributes
                */
          -    public int scannerOpenWithScan(java.nio.ByteBuffer tableName, TScan scan, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public int scannerOpenWithScan(java.nio.ByteBuffer tableName, TScan scan,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get a scanner on the current table starting at the specified row and
          -     * ending at the last row in the table.  Return the specified columns.
          -     * 
          +     * Get a scanner on the current table starting at the specified row and ending at the last row
          +     * in the table. Return the specified columns.
                * @return scanner id to be used with other scanner procedures
          -     * 
                * @param tableName name of table
          -     * 
          -     * @param startRow Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          -     * 
          -     * @param columns columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          -     * 
          +     * @param startRow Starting row in table to scan. Send "" (empty string) to start at the first
          +     *          row.
          +     * @param columns columns to scan. If column name is a column family, all columns of the
          +     *          specified column family are returned. It's also possible to pass a regex in the
          +     *          column qualifier.
                * @param attributes Scan attributes
                */
          -    public int scannerOpen(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public int scannerOpen(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get a scanner on the current table starting and stopping at the
          -     * specified rows.  ending at the last row in the table.  Return the
          -     * specified columns.
          -     * 
          +     * Get a scanner on the current table starting and stopping at the specified rows. ending at the
          +     * last row in the table. Return the specified columns.
                * @return scanner id to be used with other scanner procedures
          -     * 
                * @param tableName name of table
          -     * 
          -     * @param startRow Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          -     * 
          -     * @param stopRow row to stop scanning on. This row is *not* included in the
          -     * scanner's results
          -     * 
          -     * @param columns columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          -     * 
          +     * @param startRow Starting row in table to scan. Send "" (empty string) to start at the first
          +     *          row.
          +     * @param stopRow row to stop scanning on. This row is *not* included in the scanner's results
          +     * @param columns columns to scan. If column name is a column family, all columns of the
          +     *          specified column family are returned. It's also possible to pass a regex in the
          +     *          column qualifier.
                * @param attributes Scan attributes
                */
          -    public int scannerOpenWithStop(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public int scannerOpenWithStop(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.nio.ByteBuffer stopRow, java.util.List columns,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Open a scanner for a given prefix.  That is all rows will have the specified
          -     * prefix. No other rows will be returned.
          -     * 
          +     * Open a scanner for a given prefix. That is all rows will have the specified prefix. No other
          +     * rows will be returned.
                * @return scanner id to use with other scanner calls
          -     * 
                * @param tableName name of table
          -     * 
                * @param startAndPrefix the prefix (and thus start row) of the keys you want
          -     * 
                * @param columns the columns you want returned
          -     * 
                * @param attributes Scan attributes
                */
          -    public int scannerOpenWithPrefix(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startAndPrefix, java.util.List columns, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public int scannerOpenWithPrefix(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer startAndPrefix, java.util.List columns,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get a scanner on the current table starting at the specified row and
          -     * ending at the last row in the table.  Return the specified columns.
          -     * Only values with the specified timestamp are returned.
          -     * 
          +     * Get a scanner on the current table starting at the specified row and ending at the last row
          +     * in the table. Return the specified columns. Only values with the specified timestamp are
          +     * returned.
                * @return scanner id to be used with other scanner procedures
          -     * 
                * @param tableName name of table
          -     * 
          -     * @param startRow Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          -     * 
          -     * @param columns columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          -     * 
          +     * @param startRow Starting row in table to scan. Send "" (empty string) to start at the first
          +     *          row.
          +     * @param columns columns to scan. If column name is a column family, all columns of the
          +     *          specified column family are returned. It's also possible to pass a regex in the
          +     *          column qualifier.
                * @param timestamp timestamp
          -     * 
                * @param attributes Scan attributes
                */
          -    public int scannerOpenTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public int scannerOpenTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Get a scanner on the current table starting and stopping at the
          -     * specified rows.  ending at the last row in the table.  Return the
          -     * specified columns.  Only values with the specified timestamp are
          -     * returned.
          -     * 
          +     * Get a scanner on the current table starting and stopping at the specified rows. ending at the
          +     * last row in the table. Return the specified columns. Only values with the specified timestamp
          +     * are returned.
                * @return scanner id to be used with other scanner procedures
          -     * 
                * @param tableName name of table
          -     * 
          -     * @param startRow Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          -     * 
          -     * @param stopRow row to stop scanning on. This row is *not* included in the
          -     * scanner's results
          -     * 
          -     * @param columns columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          -     * 
          +     * @param startRow Starting row in table to scan. Send "" (empty string) to start at the first
          +     *          row.
          +     * @param stopRow row to stop scanning on. This row is *not* included in the scanner's results
          +     * @param columns columns to scan. If column name is a column family, all columns of the
          +     *          specified column family are returned. It's also possible to pass a regex in the
          +     *          column qualifier.
                * @param timestamp timestamp
          -     * 
                * @param attributes Scan attributes
                */
          -    public int scannerOpenWithStopTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException;
          +    public int scannerOpenWithStopTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Returns the scanner's current row value and advances to the next
          -     * row in the table.  When there are no more rows in the table, or a key
          -     * greater-than-or-equal-to the scanner's specified stopRow is reached,
          -     * an empty list is returned.
          -     * 
          +     * Returns the scanner's current row value and advances to the next row in the table. When there
          +     * are no more rows in the table, or a key greater-than-or-equal-to the scanner's specified
          +     * stopRow is reached, an empty list is returned.
                * @return a TRowResult containing the current row and a map of the columns to TCells.
          -     * 
                * @throws IllegalArgument if ScannerID is invalid
          -     * 
                * @throws NotFound when the scanner reaches the end
          -     * 
                * @param id id of a scanner returned by scannerOpen
                */
          -    public java.util.List scannerGet(int id) throws IOError, IllegalArgument, org.apache.thrift.TException;
          +    public java.util.List scannerGet(int id)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException;
           
               /**
          -     * Returns, starting at the scanner's current row value nbRows worth of
          -     * rows and advances to the next row in the table.  When there are no more
          -     * rows in the table, or a key greater-than-or-equal-to the scanner's
          -     * specified stopRow is reached,  an empty list is returned.
          -     * 
          +     * Returns, starting at the scanner's current row value nbRows worth of rows and advances to the
          +     * next row in the table. When there are no more rows in the table, or a key
          +     * greater-than-or-equal-to the scanner's specified stopRow is reached, an empty list is
          +     * returned.
                * @return a TRowResult containing the current row and a map of the columns to TCells.
          -     * 
                * @throws IllegalArgument if ScannerID is invalid
          -     * 
                * @throws NotFound when the scanner reaches the end
          -     * 
                * @param id id of a scanner returned by scannerOpen
          -     * 
                * @param nbRows number of results to return
                */
          -    public java.util.List scannerGetList(int id, int nbRows) throws IOError, IllegalArgument, org.apache.thrift.TException;
          +    public java.util.List scannerGetList(int id, int nbRows)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException;
           
               /**
                * Closes the server-state associated with an open scanner.
          -     * 
                * @throws IllegalArgument if ScannerID is invalid
          -     * 
                * @param id id of a scanner returned by scannerOpen
                */
               public void scannerClose(int id) throws IOError, IllegalArgument, org.apache.thrift.TException;
           
               /**
          -     * Get the regininfo for the specified row. It scans
          -     * the metatable to find region's start and end keys.
          -     * 
          +     * Get the regininfo for the specified row. It scans the metatable to find region's start and
          +     * end keys.
                * @return value for specified row/column
          -     * 
                * @param row row key
                */
          -    public TRegionInfo getRegionInfo(java.nio.ByteBuffer row) throws IOError, org.apache.thrift.TException;
          +    public TRegionInfo getRegionInfo(java.nio.ByteBuffer row)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
                * Appends values to one or more columns within a single row.
          -     * 
                * @return values of columns after the append operation.
          -     * 
                * @param append The single append operation to apply
                */
          -    public java.util.List append(TAppend append) throws IOError, org.apache.thrift.TException;
          +    public java.util.List append(TAppend append)
          +        throws IOError, org.apache.thrift.TException;
           
               /**
          -     * Atomically checks if a row/family/qualifier value matches the expected
          -     * value. If it does, it adds the corresponding mutation operation for put.
          -     * 
          +     * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
          +     * adds the corresponding mutation operation for put.
                * @return true if the new put was executed, false otherwise
          -     * 
                * @param tableName name of table
          -     * 
                * @param row row key
          -     * 
                * @param column column name
          -     * 
          -     * @param value the expected value for the column parameter, if not
          -     * provided the check is for the non-existence of the
          -     * column in question
          -     * 
          +     * @param value the expected value for the column parameter, if not provided the check is for
          +     *          the non-existence of the column in question
                * @param mput mutation for the put
          -     * 
                * @param attributes Mutation attributes
                */
          -    public boolean checkAndPut(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput, java.util.Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException;
          +    public boolean checkAndPut(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput,
          +        java.util.Map attributes)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException;
           
               /**
                * Get the type of this thrift server.
          -     * 
                * @return the type of this thrift server
                */
               public TThriftServerType getThriftServerType() throws org.apache.thrift.TException;
          @@ -636,14 +562,12 @@ public interface Iface {
           
               /**
                * Grant permissions in namespace or table level.
          -     * 
                * @param info
                */
               public boolean grant(TAccessControlEntity info) throws IOError, org.apache.thrift.TException;
           
               /**
                * Revoke permissions in namespace or table level.
          -     * 
                * @param info
                */
               public boolean revoke(TAccessControlEntity info) throws IOError, org.apache.thrift.TException;
          @@ -652,141 +576,288 @@ public interface Iface {
           
             public interface AsyncIface {
           
          -    public void enableTable(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void disableTable(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void isTableEnabled(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void compact(java.nio.ByteBuffer tableNameOrRegionName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void majorCompact(java.nio.ByteBuffer tableNameOrRegionName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getTableNames(org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getTableNamesWithIsTableEnabled(org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getColumnDescriptors(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getTableRegions(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void createTable(java.nio.ByteBuffer tableName, java.util.List columnFamilies, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void deleteTable(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void get(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getVer(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, int numVersions, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getVerTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, int numVersions, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getRowWithColumns(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getRowWithColumnsTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getRows(java.nio.ByteBuffer tableName, java.util.List rows, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getRowsWithColumns(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getRowsTs(java.nio.ByteBuffer tableName, java.util.List rows, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getRowsWithColumnsTs(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void mutateRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void mutateRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void mutateRows(java.nio.ByteBuffer tableName, java.util.List rowBatches, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void mutateRowsTs(java.nio.ByteBuffer tableName, java.util.List rowBatches, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void atomicIncrement(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long value, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void deleteAll(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void deleteAllTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void increment(TIncrement increment, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void incrementRows(java.util.List increments, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void deleteAllRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void scannerOpenWithScan(java.nio.ByteBuffer tableName, TScan scan, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void scannerOpen(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void scannerOpenWithStop(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void scannerOpenWithPrefix(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startAndPrefix, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void scannerOpenTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void scannerOpenWithStopTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void scannerGet(int id, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void scannerGetList(int id, int nbRows, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void scannerClose(int id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getRegionInfo(java.nio.ByteBuffer row, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void append(TAppend append, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void checkAndPut(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getThriftServerType(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getClusterId(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void grant(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void revoke(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void enableTable(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void disableTable(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void isTableEnabled(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void compact(java.nio.ByteBuffer tableNameOrRegionName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void majorCompact(java.nio.ByteBuffer tableNameOrRegionName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getTableNames(
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getTableNamesWithIsTableEnabled(
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getColumnDescriptors(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getTableRegions(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void createTable(java.nio.ByteBuffer tableName,
          +        java.util.List columnFamilies,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void deleteTable(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void get(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getVer(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, int numVersions,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getVerTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp, int numVersions,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getRowWithColumns(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List columns,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getRowWithColumnsTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List columns, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getRows(java.nio.ByteBuffer tableName, java.util.List rows,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getRowsWithColumns(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getRowsTs(java.nio.ByteBuffer tableName, java.util.List rows,
          +        long timestamp, java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getRowsWithColumnsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        long timestamp, java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void mutateRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void mutateRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void mutateRows(java.nio.ByteBuffer tableName, java.util.List rowBatches,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void mutateRowsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rowBatches, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void atomicIncrement(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long value,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void deleteAll(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void deleteAllTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void increment(TIncrement increment,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void incrementRows(java.util.List increments,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void deleteAllRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        long timestamp, java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void scannerOpenWithScan(java.nio.ByteBuffer tableName, TScan scan,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void scannerOpen(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void scannerOpenWithStop(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.nio.ByteBuffer stopRow, java.util.List columns,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void scannerOpenWithPrefix(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer startAndPrefix, java.util.List columns,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void scannerOpenTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void scannerOpenWithStopTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void scannerGet(int id,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void scannerGetList(int id, int nbRows,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void scannerClose(int id,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getRegionInfo(java.nio.ByteBuffer row,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void append(TAppend append,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void checkAndPut(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getThriftServerType(
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void
          +        getClusterId(org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +            throws org.apache.thrift.TException;
          +
          +    public void grant(TAccessControlEntity info,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void revoke(TAccessControlEntity info,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
             }
           
             public static class Client extends org.apache.thrift.TServiceClient implements Iface {
               public static class Factory implements org.apache.thrift.TServiceClientFactory {
          -      public Factory() {}
          +      public Factory() {
          +      }
          +
                 public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
                   return new Client(prot);
                 }
          -      public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
          +
          +      public Client getClient(org.apache.thrift.protocol.TProtocol iprot,
          +          org.apache.thrift.protocol.TProtocol oprot) {
                   return new Client(iprot, oprot);
                 }
               }
           
          -    public Client(org.apache.thrift.protocol.TProtocol prot)
          -    {
          +    public Client(org.apache.thrift.protocol.TProtocol prot) {
                 super(prot, prot);
               }
           
          -    public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
          +    public Client(org.apache.thrift.protocol.TProtocol iprot,
          +        org.apache.thrift.protocol.TProtocol oprot) {
                 super(iprot, oprot);
               }
           
          -    public void enableTable(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException
          -    {
          +    public void enableTable(java.nio.ByteBuffer tableName)
          +        throws IOError, org.apache.thrift.TException {
                 send_enableTable(tableName);
                 recv_enableTable();
               }
           
          -    public void send_enableTable(java.nio.ByteBuffer tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_enableTable(java.nio.ByteBuffer tableName)
          +        throws org.apache.thrift.TException {
                 enableTable_args args = new enableTable_args();
                 args.setTableName(tableName);
                 sendBase("enableTable", args);
               }
           
          -    public void recv_enableTable() throws IOError, org.apache.thrift.TException
          -    {
          +    public void recv_enableTable() throws IOError, org.apache.thrift.TException {
                 enableTable_result result = new enableTable_result();
                 receiveBase(result, "enableTable");
                 if (result.io != null) {
          @@ -795,21 +866,20 @@ public void recv_enableTable() throws IOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void disableTable(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException
          -    {
          +    public void disableTable(java.nio.ByteBuffer tableName)
          +        throws IOError, org.apache.thrift.TException {
                 send_disableTable(tableName);
                 recv_disableTable();
               }
           
          -    public void send_disableTable(java.nio.ByteBuffer tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_disableTable(java.nio.ByteBuffer tableName)
          +        throws org.apache.thrift.TException {
                 disableTable_args args = new disableTable_args();
                 args.setTableName(tableName);
                 sendBase("disableTable", args);
               }
           
          -    public void recv_disableTable() throws IOError, org.apache.thrift.TException
          -    {
          +    public void recv_disableTable() throws IOError, org.apache.thrift.TException {
                 disableTable_result result = new disableTable_result();
                 receiveBase(result, "disableTable");
                 if (result.io != null) {
          @@ -818,21 +888,20 @@ public void recv_disableTable() throws IOError, org.apache.thrift.TException
                 return;
               }
           
          -    public boolean isTableEnabled(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException
          -    {
          +    public boolean isTableEnabled(java.nio.ByteBuffer tableName)
          +        throws IOError, org.apache.thrift.TException {
                 send_isTableEnabled(tableName);
                 return recv_isTableEnabled();
               }
           
          -    public void send_isTableEnabled(java.nio.ByteBuffer tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_isTableEnabled(java.nio.ByteBuffer tableName)
          +        throws org.apache.thrift.TException {
                 isTableEnabled_args args = new isTableEnabled_args();
                 args.setTableName(tableName);
                 sendBase("isTableEnabled", args);
               }
           
          -    public boolean recv_isTableEnabled() throws IOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_isTableEnabled() throws IOError, org.apache.thrift.TException {
                 isTableEnabled_result result = new isTableEnabled_result();
                 receiveBase(result, "isTableEnabled");
                 if (result.isSetSuccess()) {
          @@ -841,24 +910,25 @@ public boolean recv_isTableEnabled() throws IOError, org.apache.thrift.TExceptio
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "isTableEnabled failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "isTableEnabled failed: unknown result");
               }
           
          -    public void compact(java.nio.ByteBuffer tableNameOrRegionName) throws IOError, org.apache.thrift.TException
          -    {
          +    public void compact(java.nio.ByteBuffer tableNameOrRegionName)
          +        throws IOError, org.apache.thrift.TException {
                 send_compact(tableNameOrRegionName);
                 recv_compact();
               }
           
          -    public void send_compact(java.nio.ByteBuffer tableNameOrRegionName) throws org.apache.thrift.TException
          -    {
          +    public void send_compact(java.nio.ByteBuffer tableNameOrRegionName)
          +        throws org.apache.thrift.TException {
                 compact_args args = new compact_args();
                 args.setTableNameOrRegionName(tableNameOrRegionName);
                 sendBase("compact", args);
               }
           
          -    public void recv_compact() throws IOError, org.apache.thrift.TException
          -    {
          +    public void recv_compact() throws IOError, org.apache.thrift.TException {
                 compact_result result = new compact_result();
                 receiveBase(result, "compact");
                 if (result.io != null) {
          @@ -867,21 +937,20 @@ public void recv_compact() throws IOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void majorCompact(java.nio.ByteBuffer tableNameOrRegionName) throws IOError, org.apache.thrift.TException
          -    {
          +    public void majorCompact(java.nio.ByteBuffer tableNameOrRegionName)
          +        throws IOError, org.apache.thrift.TException {
                 send_majorCompact(tableNameOrRegionName);
                 recv_majorCompact();
               }
           
          -    public void send_majorCompact(java.nio.ByteBuffer tableNameOrRegionName) throws org.apache.thrift.TException
          -    {
          +    public void send_majorCompact(java.nio.ByteBuffer tableNameOrRegionName)
          +        throws org.apache.thrift.TException {
                 majorCompact_args args = new majorCompact_args();
                 args.setTableNameOrRegionName(tableNameOrRegionName);
                 sendBase("majorCompact", args);
               }
           
          -    public void recv_majorCompact() throws IOError, org.apache.thrift.TException
          -    {
          +    public void recv_majorCompact() throws IOError, org.apache.thrift.TException {
                 majorCompact_result result = new majorCompact_result();
                 receiveBase(result, "majorCompact");
                 if (result.io != null) {
          @@ -890,20 +959,19 @@ public void recv_majorCompact() throws IOError, org.apache.thrift.TException
                 return;
               }
           
          -    public java.util.List getTableNames() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getTableNames()
          +        throws IOError, org.apache.thrift.TException {
                 send_getTableNames();
                 return recv_getTableNames();
               }
           
          -    public void send_getTableNames() throws org.apache.thrift.TException
          -    {
          +    public void send_getTableNames() throws org.apache.thrift.TException {
                 getTableNames_args args = new getTableNames_args();
                 sendBase("getTableNames", args);
               }
           
          -    public java.util.List recv_getTableNames() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getTableNames()
          +        throws IOError, org.apache.thrift.TException {
                 getTableNames_result result = new getTableNames_result();
                 receiveBase(result, "getTableNames");
                 if (result.isSetSuccess()) {
          @@ -912,23 +980,24 @@ public java.util.List recv_getTableNames() throws IOError,
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTableNames failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getTableNames failed: unknown result");
               }
           
          -    public java.util.Map getTableNamesWithIsTableEnabled() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.Map getTableNamesWithIsTableEnabled()
          +        throws IOError, org.apache.thrift.TException {
                 send_getTableNamesWithIsTableEnabled();
                 return recv_getTableNamesWithIsTableEnabled();
               }
           
          -    public void send_getTableNamesWithIsTableEnabled() throws org.apache.thrift.TException
          -    {
          +    public void send_getTableNamesWithIsTableEnabled() throws org.apache.thrift.TException {
                 getTableNamesWithIsTableEnabled_args args = new getTableNamesWithIsTableEnabled_args();
                 sendBase("getTableNamesWithIsTableEnabled", args);
               }
           
          -    public java.util.Map recv_getTableNamesWithIsTableEnabled() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.Map
          +        recv_getTableNamesWithIsTableEnabled() throws IOError, org.apache.thrift.TException {
                 getTableNamesWithIsTableEnabled_result result = new getTableNamesWithIsTableEnabled_result();
                 receiveBase(result, "getTableNamesWithIsTableEnabled");
                 if (result.isSetSuccess()) {
          @@ -937,24 +1006,26 @@ public java.util.Map recv_getTableNamesWi
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTableNamesWithIsTableEnabled failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getTableNamesWithIsTableEnabled failed: unknown result");
               }
           
          -    public java.util.Map getColumnDescriptors(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.Map getColumnDescriptors(
          +        java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException {
                 send_getColumnDescriptors(tableName);
                 return recv_getColumnDescriptors();
               }
           
          -    public void send_getColumnDescriptors(java.nio.ByteBuffer tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_getColumnDescriptors(java.nio.ByteBuffer tableName)
          +        throws org.apache.thrift.TException {
                 getColumnDescriptors_args args = new getColumnDescriptors_args();
                 args.setTableName(tableName);
                 sendBase("getColumnDescriptors", args);
               }
           
          -    public java.util.Map recv_getColumnDescriptors() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.Map recv_getColumnDescriptors()
          +        throws IOError, org.apache.thrift.TException {
                 getColumnDescriptors_result result = new getColumnDescriptors_result();
                 receiveBase(result, "getColumnDescriptors");
                 if (result.isSetSuccess()) {
          @@ -963,24 +1034,26 @@ public java.util.Map recv_getColumnDescrip
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getColumnDescriptors failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getColumnDescriptors failed: unknown result");
               }
           
          -    public java.util.List getTableRegions(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getTableRegions(java.nio.ByteBuffer tableName)
          +        throws IOError, org.apache.thrift.TException {
                 send_getTableRegions(tableName);
                 return recv_getTableRegions();
               }
           
          -    public void send_getTableRegions(java.nio.ByteBuffer tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_getTableRegions(java.nio.ByteBuffer tableName)
          +        throws org.apache.thrift.TException {
                 getTableRegions_args args = new getTableRegions_args();
                 args.setTableName(tableName);
                 sendBase("getTableRegions", args);
               }
           
          -    public java.util.List recv_getTableRegions() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getTableRegions()
          +        throws IOError, org.apache.thrift.TException {
                 getTableRegions_result result = new getTableRegions_result();
                 receiveBase(result, "getTableRegions");
                 if (result.isSetSuccess()) {
          @@ -989,25 +1062,28 @@ public java.util.List recv_getTableRegions() throws IOError, org.ap
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTableRegions failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getTableRegions failed: unknown result");
               }
           
          -    public void createTable(java.nio.ByteBuffer tableName, java.util.List columnFamilies) throws IOError, IllegalArgument, AlreadyExists, org.apache.thrift.TException
          -    {
          +    public void createTable(java.nio.ByteBuffer tableName,
          +        java.util.List columnFamilies)
          +        throws IOError, IllegalArgument, AlreadyExists, org.apache.thrift.TException {
                 send_createTable(tableName, columnFamilies);
                 recv_createTable();
               }
           
          -    public void send_createTable(java.nio.ByteBuffer tableName, java.util.List columnFamilies) throws org.apache.thrift.TException
          -    {
          +    public void send_createTable(java.nio.ByteBuffer tableName,
          +        java.util.List columnFamilies) throws org.apache.thrift.TException {
                 createTable_args args = new createTable_args();
                 args.setTableName(tableName);
                 args.setColumnFamilies(columnFamilies);
                 sendBase("createTable", args);
               }
           
          -    public void recv_createTable() throws IOError, IllegalArgument, AlreadyExists, org.apache.thrift.TException
          -    {
          +    public void recv_createTable()
          +        throws IOError, IllegalArgument, AlreadyExists, org.apache.thrift.TException {
                 createTable_result result = new createTable_result();
                 receiveBase(result, "createTable");
                 if (result.io != null) {
          @@ -1022,21 +1098,20 @@ public void recv_createTable() throws IOError, IllegalArgument, AlreadyExists, o
                 return;
               }
           
          -    public void deleteTable(java.nio.ByteBuffer tableName) throws IOError, org.apache.thrift.TException
          -    {
          +    public void deleteTable(java.nio.ByteBuffer tableName)
          +        throws IOError, org.apache.thrift.TException {
                 send_deleteTable(tableName);
                 recv_deleteTable();
               }
           
          -    public void send_deleteTable(java.nio.ByteBuffer tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_deleteTable(java.nio.ByteBuffer tableName)
          +        throws org.apache.thrift.TException {
                 deleteTable_args args = new deleteTable_args();
                 args.setTableName(tableName);
                 sendBase("deleteTable", args);
               }
           
          -    public void recv_deleteTable() throws IOError, org.apache.thrift.TException
          -    {
          +    public void recv_deleteTable() throws IOError, org.apache.thrift.TException {
                 deleteTable_result result = new deleteTable_result();
                 receiveBase(result, "deleteTable");
                 if (result.io != null) {
          @@ -1045,14 +1120,18 @@ public void recv_deleteTable() throws IOError, org.apache.thrift.TException
                 return;
               }
           
          -    public java.util.List get(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List get(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_get(tableName, row, column, attributes);
                 return recv_get();
               }
           
          -    public void send_get(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_get(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 get_args args = new get_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1061,8 +1140,7 @@ public void send_get(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, jav
                 sendBase("get", args);
               }
           
          -    public java.util.List recv_get() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_get() throws IOError, org.apache.thrift.TException {
                 get_result result = new get_result();
                 receiveBase(result, "get");
                 if (result.isSetSuccess()) {
          @@ -1071,17 +1149,22 @@ public java.util.List recv_get() throws IOError, org.apache.thrift.TExcep
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "get failed: unknown result");
               }
           
          -    public java.util.List getVer(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, int numVersions, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getVer(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, int numVersions,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_getVer(tableName, row, column, numVersions, attributes);
                 return recv_getVer();
               }
           
          -    public void send_getVer(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, int numVersions, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_getVer(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, int numVersions,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 getVer_args args = new getVer_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1091,8 +1174,7 @@ public void send_getVer(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
                 sendBase("getVer", args);
               }
           
          -    public java.util.List recv_getVer() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getVer() throws IOError, org.apache.thrift.TException {
                 getVer_result result = new getVer_result();
                 receiveBase(result, "getVer");
                 if (result.isSetSuccess()) {
          @@ -1101,17 +1183,22 @@ public java.util.List recv_getVer() throws IOError, org.apache.thrift.TEx
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getVer failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "getVer failed: unknown result");
               }
           
          -    public java.util.List getVerTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, int numVersions, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getVerTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp, int numVersions,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_getVerTs(tableName, row, column, timestamp, numVersions, attributes);
                 return recv_getVerTs();
               }
           
          -    public void send_getVerTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, int numVersions, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_getVerTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp, int numVersions,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 getVerTs_args args = new getVerTs_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1122,8 +1209,7 @@ public void send_getVerTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row
                 sendBase("getVerTs", args);
               }
           
          -    public java.util.List recv_getVerTs() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getVerTs() throws IOError, org.apache.thrift.TException {
                 getVerTs_result result = new getVerTs_result();
                 receiveBase(result, "getVerTs");
                 if (result.isSetSuccess()) {
          @@ -1132,17 +1218,21 @@ public java.util.List recv_getVerTs() throws IOError, org.apache.thrift.T
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getVerTs failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getVerTs failed: unknown result");
               }
           
          -    public java.util.List getRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_getRow(tableName, row, attributes);
                 return recv_getRow();
               }
           
          -    public void send_getRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_getRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 getRow_args args = new getRow_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1150,8 +1240,7 @@ public void send_getRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
                 sendBase("getRow", args);
               }
           
          -    public java.util.List recv_getRow() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getRow() throws IOError, org.apache.thrift.TException {
                 getRow_result result = new getRow_result();
                 receiveBase(result, "getRow");
                 if (result.isSetSuccess()) {
          @@ -1160,17 +1249,22 @@ public java.util.List recv_getRow() throws IOError, org.apache.thrif
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRow failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "getRow failed: unknown result");
               }
           
          -    public java.util.List getRowWithColumns(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getRowWithColumns(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer row, java.util.List columns,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_getRowWithColumns(tableName, row, columns, attributes);
                 return recv_getRowWithColumns();
               }
           
          -    public void send_getRowWithColumns(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_getRowWithColumns(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List columns,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 getRowWithColumns_args args = new getRowWithColumns_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1179,8 +1273,8 @@ public void send_getRowWithColumns(java.nio.ByteBuffer tableName, java.nio.ByteB
                 sendBase("getRowWithColumns", args);
               }
           
          -    public java.util.List recv_getRowWithColumns() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getRowWithColumns()
          +        throws IOError, org.apache.thrift.TException {
                 getRowWithColumns_result result = new getRowWithColumns_result();
                 receiveBase(result, "getRowWithColumns");
                 if (result.isSetSuccess()) {
          @@ -1189,17 +1283,22 @@ public java.util.List recv_getRowWithColumns() throws IOError, org.a
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRowWithColumns failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getRowWithColumns failed: unknown result");
               }
           
          -    public java.util.List getRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getRowTs(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer row, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_getRowTs(tableName, row, timestamp, attributes);
                 return recv_getRowTs();
               }
           
          -    public void send_getRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_getRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        long timestamp, java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 getRowTs_args args = new getRowTs_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1208,8 +1307,7 @@ public void send_getRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row
                 sendBase("getRowTs", args);
               }
           
          -    public java.util.List recv_getRowTs() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getRowTs() throws IOError, org.apache.thrift.TException {
                 getRowTs_result result = new getRowTs_result();
                 receiveBase(result, "getRowTs");
                 if (result.isSetSuccess()) {
          @@ -1218,17 +1316,23 @@ public java.util.List recv_getRowTs() throws IOError, org.apache.thr
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRowTs failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getRowTs failed: unknown result");
               }
           
          -    public java.util.List getRowWithColumnsTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getRowWithColumnsTs(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer row, java.util.List columns, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_getRowWithColumnsTs(tableName, row, columns, timestamp, attributes);
                 return recv_getRowWithColumnsTs();
               }
           
          -    public void send_getRowWithColumnsTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, long timestamp, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_getRowWithColumnsTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List columns, long timestamp,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 getRowWithColumnsTs_args args = new getRowWithColumnsTs_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1238,8 +1342,8 @@ public void send_getRowWithColumnsTs(java.nio.ByteBuffer tableName, java.nio.Byt
                 sendBase("getRowWithColumnsTs", args);
               }
           
          -    public java.util.List recv_getRowWithColumnsTs() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getRowWithColumnsTs()
          +        throws IOError, org.apache.thrift.TException {
                 getRowWithColumnsTs_result result = new getRowWithColumnsTs_result();
                 receiveBase(result, "getRowWithColumnsTs");
                 if (result.isSetSuccess()) {
          @@ -1248,17 +1352,23 @@ public java.util.List recv_getRowWithColumnsTs() throws IOError, org
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRowWithColumnsTs failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getRowWithColumnsTs failed: unknown result");
               }
           
          -    public java.util.List getRows(java.nio.ByteBuffer tableName, java.util.List rows, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getRows(java.nio.ByteBuffer tableName,
          +        java.util.List rows,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_getRows(tableName, rows, attributes);
                 return recv_getRows();
               }
           
          -    public void send_getRows(java.nio.ByteBuffer tableName, java.util.List rows, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_getRows(java.nio.ByteBuffer tableName,
          +        java.util.List rows,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 getRows_args args = new getRows_args();
                 args.setTableName(tableName);
                 args.setRows(rows);
          @@ -1266,8 +1376,7 @@ public void send_getRows(java.nio.ByteBuffer tableName, java.util.List recv_getRows() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getRows() throws IOError, org.apache.thrift.TException {
                 getRows_result result = new getRows_result();
                 receiveBase(result, "getRows");
                 if (result.isSetSuccess()) {
          @@ -1276,17 +1385,22 @@ public java.util.List recv_getRows() throws IOError, org.apache.thri
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRows failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "getRows failed: unknown result");
               }
           
          -    public java.util.List getRowsWithColumns(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getRowsWithColumns(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_getRowsWithColumns(tableName, rows, columns, attributes);
                 return recv_getRowsWithColumns();
               }
           
          -    public void send_getRowsWithColumns(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_getRowsWithColumns(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 getRowsWithColumns_args args = new getRowsWithColumns_args();
                 args.setTableName(tableName);
                 args.setRows(rows);
          @@ -1295,8 +1409,8 @@ public void send_getRowsWithColumns(java.nio.ByteBuffer tableName, java.util.Lis
                 sendBase("getRowsWithColumns", args);
               }
           
          -    public java.util.List recv_getRowsWithColumns() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getRowsWithColumns()
          +        throws IOError, org.apache.thrift.TException {
                 getRowsWithColumns_result result = new getRowsWithColumns_result();
                 receiveBase(result, "getRowsWithColumns");
                 if (result.isSetSuccess()) {
          @@ -1305,17 +1419,23 @@ public java.util.List recv_getRowsWithColumns() throws IOError, org.
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRowsWithColumns failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getRowsWithColumns failed: unknown result");
               }
           
          -    public java.util.List getRowsTs(java.nio.ByteBuffer tableName, java.util.List rows, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getRowsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rows, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_getRowsTs(tableName, rows, timestamp, attributes);
                 return recv_getRowsTs();
               }
           
          -    public void send_getRowsTs(java.nio.ByteBuffer tableName, java.util.List rows, long timestamp, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_getRowsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rows, long timestamp,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 getRowsTs_args args = new getRowsTs_args();
                 args.setTableName(tableName);
                 args.setRows(rows);
          @@ -1324,8 +1444,8 @@ public void send_getRowsTs(java.nio.ByteBuffer tableName, java.util.List recv_getRowsTs() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getRowsTs()
          +        throws IOError, org.apache.thrift.TException {
                 getRowsTs_result result = new getRowsTs_result();
                 receiveBase(result, "getRowsTs");
                 if (result.isSetSuccess()) {
          @@ -1334,17 +1454,23 @@ public java.util.List recv_getRowsTs() throws IOError, org.apache.th
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRowsTs failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getRowsTs failed: unknown result");
               }
           
          -    public java.util.List getRowsWithColumnsTs(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getRowsWithColumnsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        long timestamp, java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_getRowsWithColumnsTs(tableName, rows, columns, timestamp, attributes);
                 return recv_getRowsWithColumnsTs();
               }
           
          -    public void send_getRowsWithColumnsTs(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, long timestamp, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_getRowsWithColumnsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        long timestamp, java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 getRowsWithColumnsTs_args args = new getRowsWithColumnsTs_args();
                 args.setTableName(tableName);
                 args.setRows(rows);
          @@ -1354,8 +1480,8 @@ public void send_getRowsWithColumnsTs(java.nio.ByteBuffer tableName, java.util.L
                 sendBase("getRowsWithColumnsTs", args);
               }
           
          -    public java.util.List recv_getRowsWithColumnsTs() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getRowsWithColumnsTs()
          +        throws IOError, org.apache.thrift.TException {
                 getRowsWithColumnsTs_result result = new getRowsWithColumnsTs_result();
                 receiveBase(result, "getRowsWithColumnsTs");
                 if (result.isSetSuccess()) {
          @@ -1364,17 +1490,23 @@ public java.util.List recv_getRowsWithColumnsTs() throws IOError, or
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRowsWithColumnsTs failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getRowsWithColumnsTs failed: unknown result");
               }
           
          -    public void mutateRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, java.util.Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public void mutateRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations,
          +        java.util.Map attributes)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException {
                 send_mutateRow(tableName, row, mutations, attributes);
                 recv_mutateRow();
               }
           
          -    public void send_mutateRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_mutateRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 mutateRow_args args = new mutateRow_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1383,8 +1515,7 @@ public void send_mutateRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer ro
                 sendBase("mutateRow", args);
               }
           
          -    public void recv_mutateRow() throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public void recv_mutateRow() throws IOError, IllegalArgument, org.apache.thrift.TException {
                 mutateRow_result result = new mutateRow_result();
                 receiveBase(result, "mutateRow");
                 if (result.io != null) {
          @@ -1396,14 +1527,18 @@ public void recv_mutateRow() throws IOError, IllegalArgument, org.apache.thrift.
                 return;
               }
           
          -    public void mutateRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, long timestamp, java.util.Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public void mutateRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException {
                 send_mutateRowTs(tableName, row, mutations, timestamp, attributes);
                 recv_mutateRowTs();
               }
           
          -    public void send_mutateRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, long timestamp, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_mutateRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations, long timestamp,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 mutateRowTs_args args = new mutateRowTs_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1413,8 +1548,7 @@ public void send_mutateRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer
                 sendBase("mutateRowTs", args);
               }
           
          -    public void recv_mutateRowTs() throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public void recv_mutateRowTs() throws IOError, IllegalArgument, org.apache.thrift.TException {
                 mutateRowTs_result result = new mutateRowTs_result();
                 receiveBase(result, "mutateRowTs");
                 if (result.io != null) {
          @@ -1426,14 +1560,17 @@ public void recv_mutateRowTs() throws IOError, IllegalArgument, org.apache.thrif
                 return;
               }
           
          -    public void mutateRows(java.nio.ByteBuffer tableName, java.util.List rowBatches, java.util.Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public void mutateRows(java.nio.ByteBuffer tableName, java.util.List rowBatches,
          +        java.util.Map attributes)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException {
                 send_mutateRows(tableName, rowBatches, attributes);
                 recv_mutateRows();
               }
           
          -    public void send_mutateRows(java.nio.ByteBuffer tableName, java.util.List rowBatches, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_mutateRows(java.nio.ByteBuffer tableName,
          +        java.util.List rowBatches,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 mutateRows_args args = new mutateRows_args();
                 args.setTableName(tableName);
                 args.setRowBatches(rowBatches);
          @@ -1441,8 +1578,7 @@ public void send_mutateRows(java.nio.ByteBuffer tableName, java.util.List rowBatches, long timestamp, java.util.Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public void mutateRowsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rowBatches, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException {
                 send_mutateRowsTs(tableName, rowBatches, timestamp, attributes);
                 recv_mutateRowsTs();
               }
           
          -    public void send_mutateRowsTs(java.nio.ByteBuffer tableName, java.util.List rowBatches, long timestamp, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_mutateRowsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rowBatches, long timestamp,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 mutateRowsTs_args args = new mutateRowsTs_args();
                 args.setTableName(tableName);
                 args.setRowBatches(rowBatches);
          @@ -1470,8 +1610,7 @@ public void send_mutateRowsTs(java.nio.ByteBuffer tableName, java.util.List attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public void deleteAll(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_deleteAll(tableName, row, column, attributes);
                 recv_deleteAll();
               }
           
          -    public void send_deleteAll(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_deleteAll(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 deleteAll_args args = new deleteAll_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1531,8 +1677,7 @@ public void send_deleteAll(java.nio.ByteBuffer tableName, java.nio.ByteBuffer ro
                 sendBase("deleteAll", args);
               }
           
          -    public void recv_deleteAll() throws IOError, org.apache.thrift.TException
          -    {
          +    public void recv_deleteAll() throws IOError, org.apache.thrift.TException {
                 deleteAll_result result = new deleteAll_result();
                 receiveBase(result, "deleteAll");
                 if (result.io != null) {
          @@ -1541,14 +1686,18 @@ public void recv_deleteAll() throws IOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void deleteAllTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public void deleteAllTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_deleteAllTs(tableName, row, column, timestamp, attributes);
                 recv_deleteAllTs();
               }
           
          -    public void send_deleteAllTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_deleteAllTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 deleteAllTs_args args = new deleteAllTs_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1558,8 +1707,7 @@ public void send_deleteAllTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer
                 sendBase("deleteAllTs", args);
               }
           
          -    public void recv_deleteAllTs() throws IOError, org.apache.thrift.TException
          -    {
          +    public void recv_deleteAllTs() throws IOError, org.apache.thrift.TException {
                 deleteAllTs_result result = new deleteAllTs_result();
                 receiveBase(result, "deleteAllTs");
                 if (result.io != null) {
          @@ -1568,14 +1716,16 @@ public void recv_deleteAllTs() throws IOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public void deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_deleteAllRow(tableName, row, attributes);
                 recv_deleteAllRow();
               }
           
          -    public void send_deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 deleteAllRow_args args = new deleteAllRow_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1583,8 +1733,7 @@ public void send_deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer
                 sendBase("deleteAllRow", args);
               }
           
          -    public void recv_deleteAllRow() throws IOError, org.apache.thrift.TException
          -    {
          +    public void recv_deleteAllRow() throws IOError, org.apache.thrift.TException {
                 deleteAllRow_result result = new deleteAllRow_result();
                 receiveBase(result, "deleteAllRow");
                 if (result.io != null) {
          @@ -1593,21 +1742,18 @@ public void recv_deleteAllRow() throws IOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void increment(TIncrement increment) throws IOError, org.apache.thrift.TException
          -    {
          +    public void increment(TIncrement increment) throws IOError, org.apache.thrift.TException {
                 send_increment(increment);
                 recv_increment();
               }
           
          -    public void send_increment(TIncrement increment) throws org.apache.thrift.TException
          -    {
          +    public void send_increment(TIncrement increment) throws org.apache.thrift.TException {
                 increment_args args = new increment_args();
                 args.setIncrement(increment);
                 sendBase("increment", args);
               }
           
          -    public void recv_increment() throws IOError, org.apache.thrift.TException
          -    {
          +    public void recv_increment() throws IOError, org.apache.thrift.TException {
                 increment_result result = new increment_result();
                 receiveBase(result, "increment");
                 if (result.io != null) {
          @@ -1616,21 +1762,20 @@ public void recv_increment() throws IOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void incrementRows(java.util.List increments) throws IOError, org.apache.thrift.TException
          -    {
          +    public void incrementRows(java.util.List increments)
          +        throws IOError, org.apache.thrift.TException {
                 send_incrementRows(increments);
                 recv_incrementRows();
               }
           
          -    public void send_incrementRows(java.util.List increments) throws org.apache.thrift.TException
          -    {
          +    public void send_incrementRows(java.util.List increments)
          +        throws org.apache.thrift.TException {
                 incrementRows_args args = new incrementRows_args();
                 args.setIncrements(increments);
                 sendBase("incrementRows", args);
               }
           
          -    public void recv_incrementRows() throws IOError, org.apache.thrift.TException
          -    {
          +    public void recv_incrementRows() throws IOError, org.apache.thrift.TException {
                 incrementRows_result result = new incrementRows_result();
                 receiveBase(result, "incrementRows");
                 if (result.io != null) {
          @@ -1639,14 +1784,16 @@ public void recv_incrementRows() throws IOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void deleteAllRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public void deleteAllRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        long timestamp, java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_deleteAllRowTs(tableName, row, timestamp, attributes);
                 recv_deleteAllRowTs();
               }
           
          -    public void send_deleteAllRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_deleteAllRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        long timestamp, java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 deleteAllRowTs_args args = new deleteAllRowTs_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1655,8 +1802,7 @@ public void send_deleteAllRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuff
                 sendBase("deleteAllRowTs", args);
               }
           
          -    public void recv_deleteAllRowTs() throws IOError, org.apache.thrift.TException
          -    {
          +    public void recv_deleteAllRowTs() throws IOError, org.apache.thrift.TException {
                 deleteAllRowTs_result result = new deleteAllRowTs_result();
                 receiveBase(result, "deleteAllRowTs");
                 if (result.io != null) {
          @@ -1665,14 +1811,16 @@ public void recv_deleteAllRowTs() throws IOError, org.apache.thrift.TException
                 return;
               }
           
          -    public int scannerOpenWithScan(java.nio.ByteBuffer tableName, TScan scan, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public int scannerOpenWithScan(java.nio.ByteBuffer tableName, TScan scan,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_scannerOpenWithScan(tableName, scan, attributes);
                 return recv_scannerOpenWithScan();
               }
           
          -    public void send_scannerOpenWithScan(java.nio.ByteBuffer tableName, TScan scan, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_scannerOpenWithScan(java.nio.ByteBuffer tableName, TScan scan,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 scannerOpenWithScan_args args = new scannerOpenWithScan_args();
                 args.setTableName(tableName);
                 args.setScan(scan);
          @@ -1680,8 +1828,7 @@ public void send_scannerOpenWithScan(java.nio.ByteBuffer tableName, TScan scan,
                 sendBase("scannerOpenWithScan", args);
               }
           
          -    public int recv_scannerOpenWithScan() throws IOError, org.apache.thrift.TException
          -    {
          +    public int recv_scannerOpenWithScan() throws IOError, org.apache.thrift.TException {
                 scannerOpenWithScan_result result = new scannerOpenWithScan_result();
                 receiveBase(result, "scannerOpenWithScan");
                 if (result.isSetSuccess()) {
          @@ -1690,17 +1837,23 @@ public int recv_scannerOpenWithScan() throws IOError, org.apache.thrift.TExcepti
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "scannerOpenWithScan failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "scannerOpenWithScan failed: unknown result");
               }
           
          -    public int scannerOpen(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public int scannerOpen(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_scannerOpen(tableName, startRow, columns, attributes);
                 return recv_scannerOpen();
               }
           
          -    public void send_scannerOpen(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_scannerOpen(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 scannerOpen_args args = new scannerOpen_args();
                 args.setTableName(tableName);
                 args.setStartRow(startRow);
          @@ -1709,8 +1862,7 @@ public void send_scannerOpen(java.nio.ByteBuffer tableName, java.nio.ByteBuffer
                 sendBase("scannerOpen", args);
               }
           
          -    public int recv_scannerOpen() throws IOError, org.apache.thrift.TException
          -    {
          +    public int recv_scannerOpen() throws IOError, org.apache.thrift.TException {
                 scannerOpen_result result = new scannerOpen_result();
                 receiveBase(result, "scannerOpen");
                 if (result.isSetSuccess()) {
          @@ -1719,17 +1871,24 @@ public int recv_scannerOpen() throws IOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "scannerOpen failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "scannerOpen failed: unknown result");
               }
           
          -    public int scannerOpenWithStop(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public int scannerOpenWithStop(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.nio.ByteBuffer stopRow, java.util.List columns,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_scannerOpenWithStop(tableName, startRow, stopRow, columns, attributes);
                 return recv_scannerOpenWithStop();
               }
           
          -    public void send_scannerOpenWithStop(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_scannerOpenWithStop(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow,
          +        java.util.List columns,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 scannerOpenWithStop_args args = new scannerOpenWithStop_args();
                 args.setTableName(tableName);
                 args.setStartRow(startRow);
          @@ -1739,8 +1898,7 @@ public void send_scannerOpenWithStop(java.nio.ByteBuffer tableName, java.nio.Byt
                 sendBase("scannerOpenWithStop", args);
               }
           
          -    public int recv_scannerOpenWithStop() throws IOError, org.apache.thrift.TException
          -    {
          +    public int recv_scannerOpenWithStop() throws IOError, org.apache.thrift.TException {
                 scannerOpenWithStop_result result = new scannerOpenWithStop_result();
                 receiveBase(result, "scannerOpenWithStop");
                 if (result.isSetSuccess()) {
          @@ -1749,17 +1907,23 @@ public int recv_scannerOpenWithStop() throws IOError, org.apache.thrift.TExcepti
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "scannerOpenWithStop failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "scannerOpenWithStop failed: unknown result");
               }
           
          -    public int scannerOpenWithPrefix(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startAndPrefix, java.util.List columns, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public int scannerOpenWithPrefix(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer startAndPrefix, java.util.List columns,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_scannerOpenWithPrefix(tableName, startAndPrefix, columns, attributes);
                 return recv_scannerOpenWithPrefix();
               }
           
          -    public void send_scannerOpenWithPrefix(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startAndPrefix, java.util.List columns, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_scannerOpenWithPrefix(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer startAndPrefix, java.util.List columns,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 scannerOpenWithPrefix_args args = new scannerOpenWithPrefix_args();
                 args.setTableName(tableName);
                 args.setStartAndPrefix(startAndPrefix);
          @@ -1768,8 +1932,7 @@ public void send_scannerOpenWithPrefix(java.nio.ByteBuffer tableName, java.nio.B
                 sendBase("scannerOpenWithPrefix", args);
               }
           
          -    public int recv_scannerOpenWithPrefix() throws IOError, org.apache.thrift.TException
          -    {
          +    public int recv_scannerOpenWithPrefix() throws IOError, org.apache.thrift.TException {
                 scannerOpenWithPrefix_result result = new scannerOpenWithPrefix_result();
                 receiveBase(result, "scannerOpenWithPrefix");
                 if (result.isSetSuccess()) {
          @@ -1778,17 +1941,23 @@ public int recv_scannerOpenWithPrefix() throws IOError, org.apache.thrift.TExcep
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "scannerOpenWithPrefix failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "scannerOpenWithPrefix failed: unknown result");
               }
           
          -    public int scannerOpenTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public int scannerOpenTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_scannerOpenTs(tableName, startRow, columns, timestamp, attributes);
                 return recv_scannerOpenTs();
               }
           
          -    public void send_scannerOpenTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, long timestamp, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_scannerOpenTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns, long timestamp,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 scannerOpenTs_args args = new scannerOpenTs_args();
                 args.setTableName(tableName);
                 args.setStartRow(startRow);
          @@ -1798,8 +1967,7 @@ public void send_scannerOpenTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffe
                 sendBase("scannerOpenTs", args);
               }
           
          -    public int recv_scannerOpenTs() throws IOError, org.apache.thrift.TException
          -    {
          +    public int recv_scannerOpenTs() throws IOError, org.apache.thrift.TException {
                 scannerOpenTs_result result = new scannerOpenTs_result();
                 receiveBase(result, "scannerOpenTs");
                 if (result.isSetSuccess()) {
          @@ -1808,17 +1976,24 @@ public int recv_scannerOpenTs() throws IOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "scannerOpenTs failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "scannerOpenTs failed: unknown result");
               }
           
          -    public int scannerOpenWithStopTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp, java.util.Map attributes) throws IOError, org.apache.thrift.TException
          -    {
          +    public int scannerOpenWithStopTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp,
          +        java.util.Map attributes)
          +        throws IOError, org.apache.thrift.TException {
                 send_scannerOpenWithStopTs(tableName, startRow, stopRow, columns, timestamp, attributes);
                 return recv_scannerOpenWithStopTs();
               }
           
          -    public void send_scannerOpenWithStopTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_scannerOpenWithStopTs(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow,
          +        java.util.List columns, long timestamp,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 scannerOpenWithStopTs_args args = new scannerOpenWithStopTs_args();
                 args.setTableName(tableName);
                 args.setStartRow(startRow);
          @@ -1829,8 +2004,7 @@ public void send_scannerOpenWithStopTs(java.nio.ByteBuffer tableName, java.nio.B
                 sendBase("scannerOpenWithStopTs", args);
               }
           
          -    public int recv_scannerOpenWithStopTs() throws IOError, org.apache.thrift.TException
          -    {
          +    public int recv_scannerOpenWithStopTs() throws IOError, org.apache.thrift.TException {
                 scannerOpenWithStopTs_result result = new scannerOpenWithStopTs_result();
                 receiveBase(result, "scannerOpenWithStopTs");
                 if (result.isSetSuccess()) {
          @@ -1839,24 +2013,25 @@ public int recv_scannerOpenWithStopTs() throws IOError, org.apache.thrift.TExcep
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "scannerOpenWithStopTs failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "scannerOpenWithStopTs failed: unknown result");
               }
           
          -    public java.util.List scannerGet(int id) throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public java.util.List scannerGet(int id)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException {
                 send_scannerGet(id);
                 return recv_scannerGet();
               }
           
          -    public void send_scannerGet(int id) throws org.apache.thrift.TException
          -    {
          +    public void send_scannerGet(int id) throws org.apache.thrift.TException {
                 scannerGet_args args = new scannerGet_args();
                 args.setId(id);
                 sendBase("scannerGet", args);
               }
           
          -    public java.util.List recv_scannerGet() throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_scannerGet()
          +        throws IOError, IllegalArgument, org.apache.thrift.TException {
                 scannerGet_result result = new scannerGet_result();
                 receiveBase(result, "scannerGet");
                 if (result.isSetSuccess()) {
          @@ -1868,25 +2043,26 @@ public java.util.List recv_scannerGet() throws IOError, IllegalArgum
                 if (result.ia != null) {
                   throw result.ia;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "scannerGet failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "scannerGet failed: unknown result");
               }
           
          -    public java.util.List scannerGetList(int id, int nbRows) throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public java.util.List scannerGetList(int id, int nbRows)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException {
                 send_scannerGetList(id, nbRows);
                 return recv_scannerGetList();
               }
           
          -    public void send_scannerGetList(int id, int nbRows) throws org.apache.thrift.TException
          -    {
          +    public void send_scannerGetList(int id, int nbRows) throws org.apache.thrift.TException {
                 scannerGetList_args args = new scannerGetList_args();
                 args.setId(id);
                 args.setNbRows(nbRows);
                 sendBase("scannerGetList", args);
               }
           
          -    public java.util.List recv_scannerGetList() throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_scannerGetList()
          +        throws IOError, IllegalArgument, org.apache.thrift.TException {
                 scannerGetList_result result = new scannerGetList_result();
                 receiveBase(result, "scannerGetList");
                 if (result.isSetSuccess()) {
          @@ -1898,24 +2074,23 @@ public java.util.List recv_scannerGetList() throws IOError, IllegalA
                 if (result.ia != null) {
                   throw result.ia;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "scannerGetList failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "scannerGetList failed: unknown result");
               }
           
          -    public void scannerClose(int id) throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public void scannerClose(int id) throws IOError, IllegalArgument, org.apache.thrift.TException {
                 send_scannerClose(id);
                 recv_scannerClose();
               }
           
          -    public void send_scannerClose(int id) throws org.apache.thrift.TException
          -    {
          +    public void send_scannerClose(int id) throws org.apache.thrift.TException {
                 scannerClose_args args = new scannerClose_args();
                 args.setId(id);
                 sendBase("scannerClose", args);
               }
           
          -    public void recv_scannerClose() throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public void recv_scannerClose() throws IOError, IllegalArgument, org.apache.thrift.TException {
                 scannerClose_result result = new scannerClose_result();
                 receiveBase(result, "scannerClose");
                 if (result.io != null) {
          @@ -1927,21 +2102,19 @@ public void recv_scannerClose() throws IOError, IllegalArgument, org.apache.thri
                 return;
               }
           
          -    public TRegionInfo getRegionInfo(java.nio.ByteBuffer row) throws IOError, org.apache.thrift.TException
          -    {
          +    public TRegionInfo getRegionInfo(java.nio.ByteBuffer row)
          +        throws IOError, org.apache.thrift.TException {
                 send_getRegionInfo(row);
                 return recv_getRegionInfo();
               }
           
          -    public void send_getRegionInfo(java.nio.ByteBuffer row) throws org.apache.thrift.TException
          -    {
          +    public void send_getRegionInfo(java.nio.ByteBuffer row) throws org.apache.thrift.TException {
                 getRegionInfo_args args = new getRegionInfo_args();
                 args.setRow(row);
                 sendBase("getRegionInfo", args);
               }
           
          -    public TRegionInfo recv_getRegionInfo() throws IOError, org.apache.thrift.TException
          -    {
          +    public TRegionInfo recv_getRegionInfo() throws IOError, org.apache.thrift.TException {
                 getRegionInfo_result result = new getRegionInfo_result();
                 receiveBase(result, "getRegionInfo");
                 if (result.isSetSuccess()) {
          @@ -1950,24 +2123,24 @@ public TRegionInfo recv_getRegionInfo() throws IOError, org.apache.thrift.TExcep
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRegionInfo failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getRegionInfo failed: unknown result");
               }
           
          -    public java.util.List append(TAppend append) throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List append(TAppend append)
          +        throws IOError, org.apache.thrift.TException {
                 send_append(append);
                 return recv_append();
               }
           
          -    public void send_append(TAppend append) throws org.apache.thrift.TException
          -    {
          +    public void send_append(TAppend append) throws org.apache.thrift.TException {
                 append_args args = new append_args();
                 args.setAppend(append);
                 sendBase("append", args);
               }
           
          -    public java.util.List recv_append() throws IOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_append() throws IOError, org.apache.thrift.TException {
                 append_result result = new append_result();
                 receiveBase(result, "append");
                 if (result.isSetSuccess()) {
          @@ -1976,17 +2149,22 @@ public java.util.List recv_append() throws IOError, org.apache.thrift.TEx
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "append failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "append failed: unknown result");
               }
           
          -    public boolean checkAndPut(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput, java.util.Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public boolean checkAndPut(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput,
          +        java.util.Map attributes)
          +        throws IOError, IllegalArgument, org.apache.thrift.TException {
                 send_checkAndPut(tableName, row, column, value, mput, attributes);
                 return recv_checkAndPut();
               }
           
          -    public void send_checkAndPut(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput, java.util.Map attributes) throws org.apache.thrift.TException
          -    {
          +    public void send_checkAndPut(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput,
          +        java.util.Map attributes)
          +        throws org.apache.thrift.TException {
                 checkAndPut_args args = new checkAndPut_args();
                 args.setTableName(tableName);
                 args.setRow(row);
          @@ -1997,8 +2175,8 @@ public void send_checkAndPut(java.nio.ByteBuffer tableName, java.nio.ByteBuffer
                 sendBase("checkAndPut", args);
               }
           
          -    public boolean recv_checkAndPut() throws IOError, IllegalArgument, org.apache.thrift.TException
          -    {
          +    public boolean recv_checkAndPut()
          +        throws IOError, IllegalArgument, org.apache.thrift.TException {
                 checkAndPut_result result = new checkAndPut_result();
                 receiveBase(result, "checkAndPut");
                 if (result.isSetSuccess()) {
          @@ -2010,68 +2188,65 @@ public boolean recv_checkAndPut() throws IOError, IllegalArgument, org.apache.th
                 if (result.ia != null) {
                   throw result.ia;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "checkAndPut failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "checkAndPut failed: unknown result");
               }
           
          -    public TThriftServerType getThriftServerType() throws org.apache.thrift.TException
          -    {
          +    public TThriftServerType getThriftServerType() throws org.apache.thrift.TException {
                 send_getThriftServerType();
                 return recv_getThriftServerType();
               }
           
          -    public void send_getThriftServerType() throws org.apache.thrift.TException
          -    {
          +    public void send_getThriftServerType() throws org.apache.thrift.TException {
                 getThriftServerType_args args = new getThriftServerType_args();
                 sendBase("getThriftServerType", args);
               }
           
          -    public TThriftServerType recv_getThriftServerType() throws org.apache.thrift.TException
          -    {
          +    public TThriftServerType recv_getThriftServerType() throws org.apache.thrift.TException {
                 getThriftServerType_result result = new getThriftServerType_result();
                 receiveBase(result, "getThriftServerType");
                 if (result.isSetSuccess()) {
                   return result.success;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getThriftServerType failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getThriftServerType failed: unknown result");
               }
           
          -    public java.lang.String getClusterId() throws org.apache.thrift.TException
          -    {
          +    public java.lang.String getClusterId() throws org.apache.thrift.TException {
                 send_getClusterId();
                 return recv_getClusterId();
               }
           
          -    public void send_getClusterId() throws org.apache.thrift.TException
          -    {
          +    public void send_getClusterId() throws org.apache.thrift.TException {
                 getClusterId_args args = new getClusterId_args();
                 sendBase("getClusterId", args);
               }
           
          -    public java.lang.String recv_getClusterId() throws org.apache.thrift.TException
          -    {
          +    public java.lang.String recv_getClusterId() throws org.apache.thrift.TException {
                 getClusterId_result result = new getClusterId_result();
                 receiveBase(result, "getClusterId");
                 if (result.isSetSuccess()) {
                   return result.success;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getClusterId failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getClusterId failed: unknown result");
               }
           
          -    public boolean grant(TAccessControlEntity info) throws IOError, org.apache.thrift.TException
          -    {
          +    public boolean grant(TAccessControlEntity info) throws IOError, org.apache.thrift.TException {
                 send_grant(info);
                 return recv_grant();
               }
           
          -    public void send_grant(TAccessControlEntity info) throws org.apache.thrift.TException
          -    {
          +    public void send_grant(TAccessControlEntity info) throws org.apache.thrift.TException {
                 grant_args args = new grant_args();
                 args.setInfo(info);
                 sendBase("grant", args);
               }
           
          -    public boolean recv_grant() throws IOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_grant() throws IOError, org.apache.thrift.TException {
                 grant_result result = new grant_result();
                 receiveBase(result, "grant");
                 if (result.isSetSuccess()) {
          @@ -2080,24 +2255,22 @@ public boolean recv_grant() throws IOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "grant failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "grant failed: unknown result");
               }
           
          -    public boolean revoke(TAccessControlEntity info) throws IOError, org.apache.thrift.TException
          -    {
          +    public boolean revoke(TAccessControlEntity info) throws IOError, org.apache.thrift.TException {
                 send_revoke(info);
                 return recv_revoke();
               }
           
          -    public void send_revoke(TAccessControlEntity info) throws org.apache.thrift.TException
          -    {
          +    public void send_revoke(TAccessControlEntity info) throws org.apache.thrift.TException {
                 revoke_args args = new revoke_args();
                 args.setInfo(info);
                 sendBase("revoke", args);
               }
           
          -    public boolean recv_revoke() throws IOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_revoke() throws IOError, org.apache.thrift.TException {
                 revoke_result result = new revoke_result();
                 receiveBase(result, "revoke");
                 if (result.isSetSuccess()) {
          @@ -2106,43 +2279,64 @@ public boolean recv_revoke() throws IOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "revoke failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "revoke failed: unknown result");
               }
           
             }
          -  public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface {
          -    public static class Factory implements org.apache.thrift.async.TAsyncClientFactory {
          +
          +  public static class AsyncClient extends org.apache.thrift.async.TAsyncClient
          +      implements AsyncIface {
          +    public static class Factory
          +        implements org.apache.thrift.async.TAsyncClientFactory {
                 private org.apache.thrift.async.TAsyncClientManager clientManager;
                 private org.apache.thrift.protocol.TProtocolFactory protocolFactory;
          -      public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
          +
          +      public Factory(org.apache.thrift.async.TAsyncClientManager clientManager,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
                   this.clientManager = clientManager;
                   this.protocolFactory = protocolFactory;
                 }
          -      public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) {
          +
          +      public AsyncClient
          +          getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) {
                   return new AsyncClient(protocolFactory, clientManager, transport);
                 }
               }
           
          -    public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) {
          +    public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +        org.apache.thrift.async.TAsyncClientManager clientManager,
          +        org.apache.thrift.transport.TNonblockingTransport transport) {
                 super(protocolFactory, clientManager, transport);
               }
           
          -    public void enableTable(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void enableTable(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      enableTable_call method_call = new enableTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      enableTable_call method_call =
          +          new enableTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class enableTable_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
          -      public enableTable_call(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public enableTable_call(java.nio.ByteBuffer tableName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("enableTable", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("enableTable",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   enableTable_args args = new enableTable_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -2153,28 +2347,41 @@ public Void getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void disableTable(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void disableTable(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      disableTable_call method_call = new disableTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      disableTable_call method_call =
          +          new disableTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class disableTable_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
          -      public disableTable_call(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public disableTable_call(java.nio.ByteBuffer tableName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("disableTable", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("disableTable",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   disableTable_args args = new disableTable_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -2185,28 +2392,42 @@ public Void getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void isTableEnabled(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void isTableEnabled(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      isTableEnabled_call method_call = new isTableEnabled_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      isTableEnabled_call method_call =
          +          new isTableEnabled_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class isTableEnabled_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class isTableEnabled_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
          -      public isTableEnabled_call(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public isTableEnabled_call(java.nio.ByteBuffer tableName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("isTableEnabled", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("isTableEnabled",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   isTableEnabled_args args = new isTableEnabled_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -2217,28 +2438,41 @@ public java.lang.Boolean getResult() throws IOError, org.apache.thrift.TExceptio
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_isTableEnabled();
                 }
               }
           
          -    public void compact(java.nio.ByteBuffer tableNameOrRegionName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void compact(java.nio.ByteBuffer tableNameOrRegionName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      compact_call method_call = new compact_call(tableNameOrRegionName, resultHandler, this, ___protocolFactory, ___transport);
          +      compact_call method_call = new compact_call(tableNameOrRegionName, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class compact_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableNameOrRegionName;
          -      public compact_call(java.nio.ByteBuffer tableNameOrRegionName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public compact_call(java.nio.ByteBuffer tableNameOrRegionName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableNameOrRegionName = tableNameOrRegionName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("compact", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("compact",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   compact_args args = new compact_args();
                   args.setTableNameOrRegionName(tableNameOrRegionName);
                   args.write(prot);
          @@ -2249,28 +2483,41 @@ public Void getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void majorCompact(java.nio.ByteBuffer tableNameOrRegionName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void majorCompact(java.nio.ByteBuffer tableNameOrRegionName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      majorCompact_call method_call = new majorCompact_call(tableNameOrRegionName, resultHandler, this, ___protocolFactory, ___transport);
          +      majorCompact_call method_call = new majorCompact_call(tableNameOrRegionName, resultHandler,
          +          this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class majorCompact_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableNameOrRegionName;
          -      public majorCompact_call(java.nio.ByteBuffer tableNameOrRegionName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public majorCompact_call(java.nio.ByteBuffer tableNameOrRegionName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableNameOrRegionName = tableNameOrRegionName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("majorCompact", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("majorCompact",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   majorCompact_args args = new majorCompact_args();
                   args.setTableNameOrRegionName(tableNameOrRegionName);
                   args.write(prot);
          @@ -2281,118 +2528,175 @@ public Void getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void getTableNames(org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getTableNames(
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getTableNames_call method_call = new getTableNames_call(resultHandler, this, ___protocolFactory, ___transport);
          +      getTableNames_call method_call =
          +          new getTableNames_call(resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getTableNames_call extends org.apache.thrift.async.TAsyncMethodCall> {
          -      public getTableNames_call(org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +    public static class getTableNames_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
          +      public getTableNames_call(
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableNames", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableNames",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getTableNames_args args = new getTableNames_args();
                   args.write(prot);
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws IOError, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getTableNames();
                 }
               }
           
          -    public void getTableNamesWithIsTableEnabled(org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getTableNamesWithIsTableEnabled(
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getTableNamesWithIsTableEnabled_call method_call = new getTableNamesWithIsTableEnabled_call(resultHandler, this, ___protocolFactory, ___transport);
          +      getTableNamesWithIsTableEnabled_call method_call = new getTableNamesWithIsTableEnabled_call(
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getTableNamesWithIsTableEnabled_call extends org.apache.thrift.async.TAsyncMethodCall> {
          -      public getTableNamesWithIsTableEnabled_call(org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +    public static class getTableNamesWithIsTableEnabled_call extends
          +        org.apache.thrift.async.TAsyncMethodCall> {
          +      public getTableNamesWithIsTableEnabled_call(
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableNamesWithIsTableEnabled", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage(
          +            "getTableNamesWithIsTableEnabled", org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getTableNamesWithIsTableEnabled_args args = new getTableNamesWithIsTableEnabled_args();
                   args.write(prot);
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.Map getResult() throws IOError, org.apache.thrift.TException {
          +      public java.util.Map getResult()
          +          throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getTableNamesWithIsTableEnabled();
                 }
               }
           
          -    public void getColumnDescriptors(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getColumnDescriptors(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getColumnDescriptors_call method_call = new getColumnDescriptors_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      getColumnDescriptors_call method_call = new getColumnDescriptors_call(tableName,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getColumnDescriptors_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getColumnDescriptors_call extends
          +        org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
          -      public getColumnDescriptors_call(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getColumnDescriptors_call(java.nio.ByteBuffer tableName,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getColumnDescriptors", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getColumnDescriptors",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getColumnDescriptors_args args = new getColumnDescriptors_args();
                   args.setTableName(tableName);
                   args.write(prot);
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.Map getResult() throws IOError, org.apache.thrift.TException {
          +      public java.util.Map getResult()
          +          throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getColumnDescriptors();
                 }
               }
           
          -    public void getTableRegions(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getTableRegions(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getTableRegions_call method_call = new getTableRegions_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      getTableRegions_call method_call = new getTableRegions_call(tableName, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getTableRegions_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getTableRegions_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
          -      public getTableRegions_call(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getTableRegions_call(java.nio.ByteBuffer tableName,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableRegions", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableRegions",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getTableRegions_args args = new getTableRegions_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -2403,15 +2707,21 @@ public java.util.List getResult() throws IOError, org.apache.thrift
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getTableRegions();
                 }
               }
           
          -    public void createTable(java.nio.ByteBuffer tableName, java.util.List columnFamilies, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void createTable(java.nio.ByteBuffer tableName,
          +        java.util.List columnFamilies,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      createTable_call method_call = new createTable_call(tableName, columnFamilies, resultHandler, this, ___protocolFactory, ___transport);
          +      createTable_call method_call = new createTable_call(tableName, columnFamilies, resultHandler,
          +          this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -2419,14 +2729,23 @@ public void createTable(java.nio.ByteBuffer tableName, java.util.List {
                 private java.nio.ByteBuffer tableName;
                 private java.util.List columnFamilies;
          -      public createTable_call(java.nio.ByteBuffer tableName, java.util.List columnFamilies, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public createTable_call(java.nio.ByteBuffer tableName,
          +          java.util.List columnFamilies,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.columnFamilies = columnFamilies;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("createTable", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("createTable",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   createTable_args args = new createTable_args();
                   args.setTableName(tableName);
                   args.setColumnFamilies(columnFamilies);
          @@ -2434,32 +2753,46 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
                   prot.writeMessageEnd();
                 }
           
          -      public Void getResult() throws IOError, IllegalArgument, AlreadyExists, org.apache.thrift.TException {
          +      public Void getResult()
          +          throws IOError, IllegalArgument, AlreadyExists, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void deleteTable(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void deleteTable(java.nio.ByteBuffer tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      deleteTable_call method_call = new deleteTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      deleteTable_call method_call =
          +          new deleteTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class deleteTable_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
          -      public deleteTable_call(java.nio.ByteBuffer tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public deleteTable_call(java.nio.ByteBuffer tableName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteTable", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteTable",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   deleteTable_args args = new deleteTable_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -2470,25 +2803,41 @@ public Void getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void get(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void get(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      get_call method_call = new get_call(tableName, row, column, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      get_call method_call = new get_call(tableName, row, column, attributes, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class get_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class get_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
                 private java.nio.ByteBuffer column;
          -      private java.util.Map attributes;
          -      public get_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public get_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.nio.ByteBuffer column,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -2496,8 +2845,10 @@ public get_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   get_args args = new get_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -2511,26 +2862,42 @@ public java.util.List getResult() throws IOError, org.apache.thrift.TExce
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_get();
                 }
               }
           
          -    public void getVer(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, int numVersions, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getVer(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, int numVersions,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getVer_call method_call = new getVer_call(tableName, row, column, numVersions, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      getVer_call method_call = new getVer_call(tableName, row, column, numVersions, attributes,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getVer_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getVer_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
                 private java.nio.ByteBuffer column;
                 private int numVersions;
          -      private java.util.Map attributes;
          -      public getVer_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, int numVersions, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public getVer_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.nio.ByteBuffer column, int numVersions,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -2539,8 +2906,10 @@ public getVer_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getVer", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getVer",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getVer_args args = new getVer_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -2555,27 +2924,43 @@ public java.util.List getResult() throws IOError, org.apache.thrift.TExce
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getVer();
                 }
               }
           
          -    public void getVerTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, int numVersions, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getVerTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp, int numVersions,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getVerTs_call method_call = new getVerTs_call(tableName, row, column, timestamp, numVersions, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      getVerTs_call method_call = new getVerTs_call(tableName, row, column, timestamp, numVersions,
          +          attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getVerTs_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getVerTs_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
                 private java.nio.ByteBuffer column;
                 private long timestamp;
                 private int numVersions;
          -      private java.util.Map attributes;
          -      public getVerTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, int numVersions, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public getVerTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.nio.ByteBuffer column, long timestamp, int numVersions,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -2585,8 +2970,10 @@ public getVerTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, jav
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getVerTs", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getVerTs",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getVerTs_args args = new getVerTs_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -2602,32 +2989,48 @@ public java.util.List getResult() throws IOError, org.apache.thrift.TExce
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getVerTs();
                 }
               }
           
          -    public void getRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getRow_call method_call = new getRow_call(tableName, row, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      getRow_call method_call = new getRow_call(tableName, row, attributes, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getRow_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getRow_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
          -      private java.util.Map attributes;
          -      public getRow_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public getRow_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRow", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRow",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getRow_args args = new getRow_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -2640,25 +3043,41 @@ public java.util.List getResult() throws IOError, org.apache.thrift.
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getRow();
                 }
               }
           
          -    public void getRowWithColumns(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getRowWithColumns(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List columns,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getRowWithColumns_call method_call = new getRowWithColumns_call(tableName, row, columns, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      getRowWithColumns_call method_call = new getRowWithColumns_call(tableName, row, columns,
          +          attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getRowWithColumns_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getRowWithColumns_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
                 private java.util.List columns;
          -      private java.util.Map attributes;
          -      public getRowWithColumns_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public getRowWithColumns_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.util.List columns,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -2666,8 +3085,10 @@ public getRowWithColumns_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRowWithColumns", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRowWithColumns",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getRowWithColumns_args args = new getRowWithColumns_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -2681,25 +3102,39 @@ public java.util.List getResult() throws IOError, org.apache.thrift.
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getRowWithColumns();
                 }
               }
           
          -    public void getRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getRowTs_call method_call = new getRowTs_call(tableName, row, timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      getRowTs_call method_call = new getRowTs_call(tableName, row, timestamp, attributes,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getRowTs_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getRowTs_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
                 private long timestamp;
          -      private java.util.Map attributes;
          -      public getRowTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public getRowTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -2707,8 +3142,10 @@ public getRowTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, lon
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRowTs", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRowTs",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getRowTs_args args = new getRowTs_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -2722,26 +3159,42 @@ public java.util.List getResult() throws IOError, org.apache.thrift.
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getRowTs();
                 }
               }
           
          -    public void getRowWithColumnsTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getRowWithColumnsTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List columns, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getRowWithColumnsTs_call method_call = new getRowWithColumnsTs_call(tableName, row, columns, timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      getRowWithColumnsTs_call method_call = new getRowWithColumnsTs_call(tableName, row, columns,
          +          timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getRowWithColumnsTs_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getRowWithColumnsTs_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
                 private java.util.List columns;
                 private long timestamp;
          -      private java.util.Map attributes;
          -      public getRowWithColumnsTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public getRowWithColumnsTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.util.List columns, long timestamp,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -2750,8 +3203,10 @@ public getRowWithColumnsTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuff
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRowWithColumnsTs", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRowWithColumnsTs",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getRowWithColumnsTs_args args = new getRowWithColumnsTs_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -2766,32 +3221,48 @@ public java.util.List getResult() throws IOError, org.apache.thrift.
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getRowWithColumnsTs();
                 }
               }
           
          -    public void getRows(java.nio.ByteBuffer tableName, java.util.List rows, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getRows(java.nio.ByteBuffer tableName, java.util.List rows,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getRows_call method_call = new getRows_call(tableName, rows, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      getRows_call method_call = new getRows_call(tableName, rows, attributes, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getRows_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getRows_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
                 private java.util.List rows;
          -      private java.util.Map attributes;
          -      public getRows_call(java.nio.ByteBuffer tableName, java.util.List rows, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public getRows_call(java.nio.ByteBuffer tableName, java.util.List rows,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.rows = rows;
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRows", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRows",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getRows_args args = new getRows_args();
                   args.setTableName(tableName);
                   args.setRows(rows);
          @@ -2804,25 +3275,41 @@ public java.util.List getResult() throws IOError, org.apache.thrift.
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getRows();
                 }
               }
           
          -    public void getRowsWithColumns(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getRowsWithColumns(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getRowsWithColumns_call method_call = new getRowsWithColumns_call(tableName, rows, columns, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      getRowsWithColumns_call method_call = new getRowsWithColumns_call(tableName, rows, columns,
          +          attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getRowsWithColumns_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getRowsWithColumns_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
                 private java.util.List rows;
                 private java.util.List columns;
          -      private java.util.Map attributes;
          -      public getRowsWithColumns_call(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public getRowsWithColumns_call(java.nio.ByteBuffer tableName,
          +          java.util.List rows, java.util.List columns,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.rows = rows;
          @@ -2830,8 +3317,10 @@ public getRowsWithColumns_call(java.nio.ByteBuffer tableName, java.util.List getResult() throws IOError, org.apache.thrift.
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getRowsWithColumns();
                 }
               }
           
          -    public void getRowsTs(java.nio.ByteBuffer tableName, java.util.List rows, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getRowsTs(java.nio.ByteBuffer tableName, java.util.List rows,
          +        long timestamp, java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getRowsTs_call method_call = new getRowsTs_call(tableName, rows, timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      getRowsTs_call method_call = new getRowsTs_call(tableName, rows, timestamp, attributes,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getRowsTs_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getRowsTs_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
                 private java.util.List rows;
                 private long timestamp;
          -      private java.util.Map attributes;
          -      public getRowsTs_call(java.nio.ByteBuffer tableName, java.util.List rows, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public getRowsTs_call(java.nio.ByteBuffer tableName, java.util.List rows,
          +          long timestamp, java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.rows = rows;
          @@ -2871,8 +3374,10 @@ public getRowsTs_call(java.nio.ByteBuffer tableName, java.util.List getResult() throws IOError, org.apache.thrift.
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getRowsTs();
                 }
               }
           
          -    public void getRowsWithColumnsTs(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getRowsWithColumnsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        long timestamp, java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getRowsWithColumnsTs_call method_call = new getRowsWithColumnsTs_call(tableName, rows, columns, timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      getRowsWithColumnsTs_call method_call = new getRowsWithColumnsTs_call(tableName, rows,
          +          columns, timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getRowsWithColumnsTs_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getRowsWithColumnsTs_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer tableName;
                 private java.util.List rows;
                 private java.util.List columns;
                 private long timestamp;
          -      private java.util.Map attributes;
          -      public getRowsWithColumnsTs_call(java.nio.ByteBuffer tableName, java.util.List rows, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public getRowsWithColumnsTs_call(java.nio.ByteBuffer tableName,
          +          java.util.List rows, java.util.List columns,
          +          long timestamp, java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.rows = rows;
          @@ -2914,8 +3435,10 @@ public getRowsWithColumnsTs_call(java.nio.ByteBuffer tableName, java.util.List getResult() throws IOError, org.apache.thrift.
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getRowsWithColumnsTs();
                 }
               }
           
          -    public void mutateRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void mutateRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      mutateRow_call method_call = new mutateRow_call(tableName, row, mutations, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      mutateRow_call method_call = new mutateRow_call(tableName, row, mutations, attributes,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -2947,8 +3477,16 @@ public static class mutateRow_call extends org.apache.thrift.async.TAsyncMethodC
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
                 private java.util.List mutations;
          -      private java.util.Map attributes;
          -      public mutateRow_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public mutateRow_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.util.List mutations,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -2956,8 +3494,10 @@ public mutateRow_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, ja
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("mutateRow", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("mutateRow",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   mutateRow_args args = new mutateRow_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -2971,15 +3511,22 @@ public Void getResult() throws IOError, IllegalArgument, org.apache.thrift.TExce
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void mutateRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void mutateRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      mutateRowTs_call method_call = new mutateRowTs_call(tableName, row, mutations, timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      mutateRowTs_call method_call = new mutateRowTs_call(tableName, row, mutations, timestamp,
          +          attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -2989,8 +3536,16 @@ public static class mutateRowTs_call extends org.apache.thrift.async.TAsyncMetho
                 private java.nio.ByteBuffer row;
                 private java.util.List mutations;
                 private long timestamp;
          -      private java.util.Map attributes;
          -      public mutateRowTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.List mutations, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public mutateRowTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.util.List mutations, long timestamp,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -2999,8 +3554,10 @@ public mutateRowTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("mutateRowTs", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("mutateRowTs",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   mutateRowTs_args args = new mutateRowTs_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -3015,15 +3572,21 @@ public Void getResult() throws IOError, IllegalArgument, org.apache.thrift.TExce
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void mutateRows(java.nio.ByteBuffer tableName, java.util.List rowBatches, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void mutateRows(java.nio.ByteBuffer tableName, java.util.List rowBatches,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      mutateRows_call method_call = new mutateRows_call(tableName, rowBatches, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      mutateRows_call method_call = new mutateRows_call(tableName, rowBatches, attributes,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -3031,16 +3594,26 @@ public void mutateRows(java.nio.ByteBuffer tableName, java.util.List {
                 private java.nio.ByteBuffer tableName;
                 private java.util.List rowBatches;
          -      private java.util.Map attributes;
          -      public mutateRows_call(java.nio.ByteBuffer tableName, java.util.List rowBatches, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public mutateRows_call(java.nio.ByteBuffer tableName,
          +          java.util.List rowBatches,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.rowBatches = rowBatches;
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("mutateRows", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("mutateRows",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   mutateRows_args args = new mutateRows_args();
                   args.setTableName(tableName);
                   args.setRowBatches(rowBatches);
          @@ -3053,15 +3626,22 @@ public Void getResult() throws IOError, IllegalArgument, org.apache.thrift.TExce
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void mutateRowsTs(java.nio.ByteBuffer tableName, java.util.List rowBatches, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void mutateRowsTs(java.nio.ByteBuffer tableName,
          +        java.util.List rowBatches, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      mutateRowsTs_call method_call = new mutateRowsTs_call(tableName, rowBatches, timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      mutateRowsTs_call method_call = new mutateRowsTs_call(tableName, rowBatches, timestamp,
          +          attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -3070,8 +3650,16 @@ public static class mutateRowsTs_call extends org.apache.thrift.async.TAsyncMeth
                 private java.nio.ByteBuffer tableName;
                 private java.util.List rowBatches;
                 private long timestamp;
          -      private java.util.Map attributes;
          -      public mutateRowsTs_call(java.nio.ByteBuffer tableName, java.util.List rowBatches, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public mutateRowsTs_call(java.nio.ByteBuffer tableName,
          +          java.util.List rowBatches, long timestamp,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.rowBatches = rowBatches;
          @@ -3079,8 +3667,10 @@ public mutateRowsTs_call(java.nio.ByteBuffer tableName, java.util.List resultHandler) throws org.apache.thrift.TException {
          +    public void atomicIncrement(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long value,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      atomicIncrement_call method_call = new atomicIncrement_call(tableName, row, column, value, resultHandler, this, ___protocolFactory, ___transport);
          +      atomicIncrement_call method_call = new atomicIncrement_call(tableName, row, column, value,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class atomicIncrement_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class atomicIncrement_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
                 private java.nio.ByteBuffer column;
                 private long value;
          -      public atomicIncrement_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long value, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public atomicIncrement_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.nio.ByteBuffer column, long value,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -3120,8 +3724,10 @@ public atomicIncrement_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer r
                   this.value = value;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("atomicIncrement", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("atomicIncrement",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   atomicIncrement_args args = new atomicIncrement_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -3131,19 +3737,27 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
                   prot.writeMessageEnd();
                 }
           
          -      public java.lang.Long getResult() throws IOError, IllegalArgument, org.apache.thrift.TException {
          +      public java.lang.Long getResult()
          +          throws IOError, IllegalArgument, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_atomicIncrement();
                 }
               }
           
          -    public void deleteAll(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void deleteAll(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      deleteAll_call method_call = new deleteAll_call(tableName, row, column, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      deleteAll_call method_call = new deleteAll_call(tableName, row, column, attributes,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -3152,8 +3766,16 @@ public static class deleteAll_call extends org.apache.thrift.async.TAsyncMethodC
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
                 private java.nio.ByteBuffer column;
          -      private java.util.Map attributes;
          -      public deleteAll_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public deleteAll_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.nio.ByteBuffer column,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -3161,8 +3783,10 @@ public deleteAll_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, ja
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteAll", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteAll",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   deleteAll_args args = new deleteAll_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -3176,15 +3800,22 @@ public Void getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void deleteAllTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void deleteAllTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      deleteAllTs_call method_call = new deleteAllTs_call(tableName, row, column, timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      deleteAllTs_call method_call = new deleteAllTs_call(tableName, row, column, timestamp,
          +          attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -3194,8 +3825,16 @@ public static class deleteAllTs_call extends org.apache.thrift.async.TAsyncMetho
                 private java.nio.ByteBuffer row;
                 private java.nio.ByteBuffer column;
                 private long timestamp;
          -      private java.util.Map attributes;
          -      public deleteAllTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public deleteAllTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.nio.ByteBuffer column, long timestamp,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -3204,8 +3843,10 @@ public deleteAllTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteAllTs", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteAllTs",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   deleteAllTs_args args = new deleteAllTs_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -3220,15 +3861,21 @@ public Void getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      deleteAllRow_call method_call = new deleteAllRow_call(tableName, row, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      deleteAllRow_call method_call = new deleteAllRow_call(tableName, row, attributes,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -3236,16 +3883,25 @@ public void deleteAllRow(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
               public static class deleteAllRow_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
          -      private java.util.Map attributes;
          -      public deleteAllRow_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public deleteAllRow_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteAllRow", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteAllRow",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   deleteAllRow_args args = new deleteAllRow_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -3258,28 +3914,41 @@ public Void getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void increment(TIncrement increment, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void increment(TIncrement increment,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      increment_call method_call = new increment_call(increment, resultHandler, this, ___protocolFactory, ___transport);
          +      increment_call method_call =
          +          new increment_call(increment, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class increment_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private TIncrement increment;
          -      public increment_call(TIncrement increment, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public increment_call(TIncrement increment,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.increment = increment;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("increment", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("increment",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   increment_args args = new increment_args();
                   args.setIncrement(increment);
                   args.write(prot);
          @@ -3290,28 +3959,41 @@ public Void getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void incrementRows(java.util.List increments, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void incrementRows(java.util.List increments,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      incrementRows_call method_call = new incrementRows_call(increments, resultHandler, this, ___protocolFactory, ___transport);
          +      incrementRows_call method_call =
          +          new incrementRows_call(increments, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class incrementRows_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.util.List increments;
          -      public incrementRows_call(java.util.List increments, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public incrementRows_call(java.util.List increments,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.increments = increments;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("incrementRows", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("incrementRows",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   incrementRows_args args = new incrementRows_args();
                   args.setIncrements(increments);
                   args.write(prot);
          @@ -3322,15 +4004,21 @@ public Void getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void deleteAllRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void deleteAllRowTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        long timestamp, java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      deleteAllRowTs_call method_call = new deleteAllRowTs_call(tableName, row, timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      deleteAllRowTs_call method_call = new deleteAllRowTs_call(tableName, row, timestamp,
          +          attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -3339,8 +4027,15 @@ public static class deleteAllRowTs_call extends org.apache.thrift.async.TAsyncMe
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
                 private long timestamp;
          -      private java.util.Map attributes;
          -      public deleteAllRowTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public deleteAllRowTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          long timestamp, java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -3348,8 +4043,10 @@ public deleteAllRowTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer ro
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteAllRowTs", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteAllRowTs",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   deleteAllRowTs_args args = new deleteAllRowTs_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -3363,32 +4060,48 @@ public Void getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void scannerOpenWithScan(java.nio.ByteBuffer tableName, TScan scan, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void scannerOpenWithScan(java.nio.ByteBuffer tableName, TScan scan,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      scannerOpenWithScan_call method_call = new scannerOpenWithScan_call(tableName, scan, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      scannerOpenWithScan_call method_call = new scannerOpenWithScan_call(tableName, scan,
          +          attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class scannerOpenWithScan_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class scannerOpenWithScan_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
                 private TScan scan;
          -      private java.util.Map attributes;
          -      public scannerOpenWithScan_call(java.nio.ByteBuffer tableName, TScan scan, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public scannerOpenWithScan_call(java.nio.ByteBuffer tableName, TScan scan,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.scan = scan;
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpenWithScan", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpenWithScan",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   scannerOpenWithScan_args args = new scannerOpenWithScan_args();
                   args.setTableName(tableName);
                   args.setScan(scan);
          @@ -3401,25 +4114,41 @@ public java.lang.Integer getResult() throws IOError, org.apache.thrift.TExceptio
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_scannerOpenWithScan();
                 }
               }
           
          -    public void scannerOpen(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void scannerOpen(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      scannerOpen_call method_call = new scannerOpen_call(tableName, startRow, columns, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      scannerOpen_call method_call = new scannerOpen_call(tableName, startRow, columns, attributes,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class scannerOpen_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class scannerOpen_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer startRow;
                 private java.util.List columns;
          -      private java.util.Map attributes;
          -      public scannerOpen_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public scannerOpen_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +          java.util.List columns,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.startRow = startRow;
          @@ -3427,8 +4156,10 @@ public scannerOpen_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer start
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpen", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpen",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   scannerOpen_args args = new scannerOpen_args();
                   args.setTableName(tableName);
                   args.setStartRow(startRow);
          @@ -3442,26 +4173,42 @@ public java.lang.Integer getResult() throws IOError, org.apache.thrift.TExceptio
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_scannerOpen();
                 }
               }
           
          -    public void scannerOpenWithStop(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void scannerOpenWithStop(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.nio.ByteBuffer stopRow, java.util.List columns,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      scannerOpenWithStop_call method_call = new scannerOpenWithStop_call(tableName, startRow, stopRow, columns, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      scannerOpenWithStop_call method_call = new scannerOpenWithStop_call(tableName, startRow,
          +          stopRow, columns, attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class scannerOpenWithStop_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class scannerOpenWithStop_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer startRow;
                 private java.nio.ByteBuffer stopRow;
                 private java.util.List columns;
          -      private java.util.Map attributes;
          -      public scannerOpenWithStop_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public scannerOpenWithStop_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +          java.nio.ByteBuffer stopRow, java.util.List columns,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.startRow = startRow;
          @@ -3470,8 +4217,10 @@ public scannerOpenWithStop_call(java.nio.ByteBuffer tableName, java.nio.ByteBuff
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpenWithStop", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpenWithStop",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   scannerOpenWithStop_args args = new scannerOpenWithStop_args();
                   args.setTableName(tableName);
                   args.setStartRow(startRow);
          @@ -3486,25 +4235,42 @@ public java.lang.Integer getResult() throws IOError, org.apache.thrift.TExceptio
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_scannerOpenWithStop();
                 }
               }
           
          -    public void scannerOpenWithPrefix(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startAndPrefix, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void scannerOpenWithPrefix(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer startAndPrefix, java.util.List columns,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      scannerOpenWithPrefix_call method_call = new scannerOpenWithPrefix_call(tableName, startAndPrefix, columns, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      scannerOpenWithPrefix_call method_call =
          +          new scannerOpenWithPrefix_call(tableName, startAndPrefix, columns, attributes,
          +              resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class scannerOpenWithPrefix_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class scannerOpenWithPrefix_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer startAndPrefix;
                 private java.util.List columns;
          -      private java.util.Map attributes;
          -      public scannerOpenWithPrefix_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startAndPrefix, java.util.List columns, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public scannerOpenWithPrefix_call(java.nio.ByteBuffer tableName,
          +          java.nio.ByteBuffer startAndPrefix, java.util.List columns,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.startAndPrefix = startAndPrefix;
          @@ -3512,8 +4278,10 @@ public scannerOpenWithPrefix_call(java.nio.ByteBuffer tableName, java.nio.ByteBu
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpenWithPrefix", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpenWithPrefix",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   scannerOpenWithPrefix_args args = new scannerOpenWithPrefix_args();
                   args.setTableName(tableName);
                   args.setStartAndPrefix(startAndPrefix);
          @@ -3527,26 +4295,42 @@ public java.lang.Integer getResult() throws IOError, org.apache.thrift.TExceptio
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_scannerOpenWithPrefix();
                 }
               }
           
          -    public void scannerOpenTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void scannerOpenTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      scannerOpenTs_call method_call = new scannerOpenTs_call(tableName, startRow, columns, timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      scannerOpenTs_call method_call = new scannerOpenTs_call(tableName, startRow, columns,
          +          timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class scannerOpenTs_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class scannerOpenTs_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer startRow;
                 private java.util.List columns;
                 private long timestamp;
          -      private java.util.Map attributes;
          -      public scannerOpenTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public scannerOpenTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +          java.util.List columns, long timestamp,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.startRow = startRow;
          @@ -3555,8 +4339,10 @@ public scannerOpenTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer sta
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpenTs", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpenTs",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   scannerOpenTs_args args = new scannerOpenTs_args();
                   args.setTableName(tableName);
                   args.setStartRow(startRow);
          @@ -3571,27 +4357,44 @@ public java.lang.Integer getResult() throws IOError, org.apache.thrift.TExceptio
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_scannerOpenTs();
                 }
               }
           
          -    public void scannerOpenWithStopTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void scannerOpenWithStopTs(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      scannerOpenWithStopTs_call method_call = new scannerOpenWithStopTs_call(tableName, startRow, stopRow, columns, timestamp, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      scannerOpenWithStopTs_call method_call =
          +          new scannerOpenWithStopTs_call(tableName, startRow, stopRow, columns, timestamp,
          +              attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class scannerOpenWithStopTs_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class scannerOpenWithStopTs_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer startRow;
                 private java.nio.ByteBuffer stopRow;
                 private java.util.List columns;
                 private long timestamp;
          -      private java.util.Map attributes;
          -      public scannerOpenWithStopTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow, java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public scannerOpenWithStopTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +          java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.startRow = startRow;
          @@ -3601,8 +4404,10 @@ public scannerOpenWithStopTs_call(java.nio.ByteBuffer tableName, java.nio.ByteBu
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpenWithStopTs", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerOpenWithStopTs",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   scannerOpenWithStopTs_args args = new scannerOpenWithStopTs_args();
                   args.setTableName(tableName);
                   args.setStartRow(startRow);
          @@ -3618,62 +4423,91 @@ public java.lang.Integer getResult() throws IOError, org.apache.thrift.TExceptio
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_scannerOpenWithStopTs();
                 }
               }
           
          -    public void scannerGet(int id, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void scannerGet(int id,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      scannerGet_call method_call = new scannerGet_call(id, resultHandler, this, ___protocolFactory, ___transport);
          +      scannerGet_call method_call =
          +          new scannerGet_call(id, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class scannerGet_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class scannerGet_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private int id;
          -      public scannerGet_call(int id, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public scannerGet_call(int id,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.id = id;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerGet", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerGet",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   scannerGet_args args = new scannerGet_args();
                   args.setId(id);
                   args.write(prot);
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws IOError, IllegalArgument, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws IOError, IllegalArgument, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_scannerGet();
                 }
               }
           
          -    public void scannerGetList(int id, int nbRows, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void scannerGetList(int id, int nbRows,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      scannerGetList_call method_call = new scannerGetList_call(id, nbRows, resultHandler, this, ___protocolFactory, ___transport);
          +      scannerGetList_call method_call = new scannerGetList_call(id, nbRows, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class scannerGetList_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class scannerGetList_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private int id;
                 private int nbRows;
          -      public scannerGetList_call(int id, int nbRows, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public scannerGetList_call(int id, int nbRows,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.id = id;
                   this.nbRows = nbRows;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerGetList", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerGetList",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   scannerGetList_args args = new scannerGetList_args();
                   args.setId(id);
                   args.setNbRows(nbRows);
          @@ -3681,32 +4515,46 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws IOError, IllegalArgument, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws IOError, IllegalArgument, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_scannerGetList();
                 }
               }
           
          -    public void scannerClose(int id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void scannerClose(int id,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      scannerClose_call method_call = new scannerClose_call(id, resultHandler, this, ___protocolFactory, ___transport);
          +      scannerClose_call method_call =
          +          new scannerClose_call(id, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class scannerClose_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private int id;
          -      public scannerClose_call(int id, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public scannerClose_call(int id,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.id = id;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerClose", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("scannerClose",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   scannerClose_args args = new scannerClose_args();
                   args.setId(id);
                   args.write(prot);
          @@ -3717,28 +4565,42 @@ public Void getResult() throws IOError, IllegalArgument, org.apache.thrift.TExce
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void getRegionInfo(java.nio.ByteBuffer row, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void getRegionInfo(java.nio.ByteBuffer row,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getRegionInfo_call method_call = new getRegionInfo_call(row, resultHandler, this, ___protocolFactory, ___transport);
          +      getRegionInfo_call method_call =
          +          new getRegionInfo_call(row, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getRegionInfo_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class getRegionInfo_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer row;
          -      public getRegionInfo_call(java.nio.ByteBuffer row, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getRegionInfo_call(java.nio.ByteBuffer row,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.row = row;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRegionInfo", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRegionInfo",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getRegionInfo_args args = new getRegionInfo_args();
                   args.setRow(row);
                   args.write(prot);
          @@ -3749,28 +4611,42 @@ public TRegionInfo getResult() throws IOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getRegionInfo();
                 }
               }
           
          -    public void append(TAppend append, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void append(TAppend append,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      append_call method_call = new append_call(append, resultHandler, this, ___protocolFactory, ___transport);
          +      append_call method_call =
          +          new append_call(append, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class append_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class append_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private TAppend append;
          -      public append_call(TAppend append, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public append_call(TAppend append,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.append = append;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("append", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("append",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   append_args args = new append_args();
                   args.setAppend(append);
                   args.write(prot);
          @@ -3781,27 +4657,43 @@ public java.util.List getResult() throws IOError, org.apache.thrift.TExce
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_append();
                 }
               }
           
          -    public void checkAndPut(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void checkAndPut(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput,
          +        java.util.Map attributes,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      checkAndPut_call method_call = new checkAndPut_call(tableName, row, column, value, mput, attributes, resultHandler, this, ___protocolFactory, ___transport);
          +      checkAndPut_call method_call = new checkAndPut_call(tableName, row, column, value, mput,
          +          attributes, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class checkAndPut_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class checkAndPut_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer tableName;
                 private java.nio.ByteBuffer row;
                 private java.nio.ByteBuffer column;
                 private java.nio.ByteBuffer value;
                 private Mutation mput;
          -      private java.util.Map attributes;
          -      public checkAndPut_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput, java.util.Map attributes, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +      private java.util.Map attributes;
          +
          +      public checkAndPut_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +          java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput,
          +          java.util.Map attributes,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.row = row;
          @@ -3811,8 +4703,10 @@ public checkAndPut_call(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
                   this.attributes = attributes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("checkAndPut", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("checkAndPut",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   checkAndPut_args args = new checkAndPut_args();
                   args.setTableName(tableName);
                   args.setRow(row);
          @@ -3824,30 +4718,44 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
                   prot.writeMessageEnd();
                 }
           
          -      public java.lang.Boolean getResult() throws IOError, IllegalArgument, org.apache.thrift.TException {
          +      public java.lang.Boolean getResult()
          +          throws IOError, IllegalArgument, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_checkAndPut();
                 }
               }
           
          -    public void getThriftServerType(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void getThriftServerType(
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getThriftServerType_call method_call = new getThriftServerType_call(resultHandler, this, ___protocolFactory, ___transport);
          +      getThriftServerType_call method_call =
          +          new getThriftServerType_call(resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getThriftServerType_call extends org.apache.thrift.async.TAsyncMethodCall {
          -      public getThriftServerType_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +    public static class getThriftServerType_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
          +      public getThriftServerType_call(
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getThriftServerType", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getThriftServerType",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getThriftServerType_args args = new getThriftServerType_args();
                   args.write(prot);
                   prot.writeMessageEnd();
          @@ -3857,26 +4765,39 @@ public TThriftServerType getResult() throws org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getThriftServerType();
                 }
               }
           
          -    public void getClusterId(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void
          +        getClusterId(org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +            throws org.apache.thrift.TException {
                 checkReady();
          -      getClusterId_call method_call = new getClusterId_call(resultHandler, this, ___protocolFactory, ___transport);
          +      getClusterId_call method_call =
          +          new getClusterId_call(resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getClusterId_call extends org.apache.thrift.async.TAsyncMethodCall {
          -      public getClusterId_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +    public static class getClusterId_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
          +      public getClusterId_call(
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getClusterId", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getClusterId",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getClusterId_args args = new getClusterId_args();
                   args.write(prot);
                   prot.writeMessageEnd();
          @@ -3886,28 +4807,42 @@ public java.lang.String getResult() throws org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getClusterId();
                 }
               }
           
          -    public void grant(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void grant(TAccessControlEntity info,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      grant_call method_call = new grant_call(info, resultHandler, this, ___protocolFactory, ___transport);
          +      grant_call method_call =
          +          new grant_call(info, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class grant_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class grant_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TAccessControlEntity info;
          -      public grant_call(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public grant_call(TAccessControlEntity info,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.info = info;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("grant", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("grant",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   grant_args args = new grant_args();
                   args.setInfo(info);
                   args.write(prot);
          @@ -3918,28 +4853,42 @@ public java.lang.Boolean getResult() throws IOError, org.apache.thrift.TExceptio
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_grant();
                 }
               }
           
          -    public void revoke(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void revoke(TAccessControlEntity info,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      revoke_call method_call = new revoke_call(info, resultHandler, this, ___protocolFactory, ___transport);
          +      revoke_call method_call =
          +          new revoke_call(info, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class revoke_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class revoke_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TAccessControlEntity info;
          -      public revoke_call(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public revoke_call(TAccessControlEntity info,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.info = info;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("revoke", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("revoke",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   revoke_args args = new revoke_args();
                   args.setInfo(info);
                   args.write(prot);
          @@ -3950,25 +4899,35 @@ public java.lang.Boolean getResult() throws IOError, org.apache.thrift.TExceptio
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_revoke();
                 }
               }
           
             }
           
          -  public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor {
          -    private static final org.slf4j.Logger _LOGGER = org.slf4j.LoggerFactory.getLogger(Processor.class.getName());
          +  public static class Processor extends org.apache.thrift.TBaseProcessor
          +      implements org.apache.thrift.TProcessor {
          +    private static final org.slf4j.Logger _LOGGER =
          +        org.slf4j.LoggerFactory.getLogger(Processor.class.getName());
          +
               public Processor(I iface) {
          -      super(iface, getProcessMap(new java.util.HashMap>()));
          +      super(iface, getProcessMap(
          +        new java.util.HashMap>()));
               }
           
          -    protected Processor(I iface, java.util.Map> processMap) {
          +    protected Processor(I iface,
          +        java.util.Map> processMap) {
                 super(iface, getProcessMap(processMap));
               }
           
          -    private static  java.util.Map> getProcessMap(java.util.Map> processMap) {
          +    private static 
          +        java.util.Map>
          +        getProcessMap(
          +            java.util.Map> processMap) {
                 processMap.put("enableTable", new enableTable());
                 processMap.put("disableTable", new disableTable());
                 processMap.put("isTableEnabled", new isTableEnabled());
          @@ -4021,7 +4980,8 @@ protected Processor(I iface, java.util.Map extends org.apache.thrift.ProcessFunction {
          +    public static class enableTable
          +        extends org.apache.thrift.ProcessFunction {
                 public enableTable() {
                   super("enableTable");
                 }
          @@ -4039,7 +4999,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public enableTable_result getResult(I iface, enableTable_args args) throws org.apache.thrift.TException {
          +      public enableTable_result getResult(I iface, enableTable_args args)
          +          throws org.apache.thrift.TException {
                   enableTable_result result = new enableTable_result();
                   try {
                     iface.enableTable(args.tableName);
          @@ -4050,7 +5011,8 @@ public enableTable_result getResult(I iface, enableTable_args args) throws org.a
                 }
               }
           
          -    public static class disableTable extends org.apache.thrift.ProcessFunction {
          +    public static class disableTable
          +        extends org.apache.thrift.ProcessFunction {
                 public disableTable() {
                   super("disableTable");
                 }
          @@ -4068,7 +5030,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public disableTable_result getResult(I iface, disableTable_args args) throws org.apache.thrift.TException {
          +      public disableTable_result getResult(I iface, disableTable_args args)
          +          throws org.apache.thrift.TException {
                   disableTable_result result = new disableTable_result();
                   try {
                     iface.disableTable(args.tableName);
          @@ -4079,7 +5042,8 @@ public disableTable_result getResult(I iface, disableTable_args args) throws org
                 }
               }
           
          -    public static class isTableEnabled extends org.apache.thrift.ProcessFunction {
          +    public static class isTableEnabled
          +        extends org.apache.thrift.ProcessFunction {
                 public isTableEnabled() {
                   super("isTableEnabled");
                 }
          @@ -4097,7 +5061,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public isTableEnabled_result getResult(I iface, isTableEnabled_args args) throws org.apache.thrift.TException {
          +      public isTableEnabled_result getResult(I iface, isTableEnabled_args args)
          +          throws org.apache.thrift.TException {
                   isTableEnabled_result result = new isTableEnabled_result();
                   try {
                     result.success = iface.isTableEnabled(args.tableName);
          @@ -4109,7 +5074,8 @@ public isTableEnabled_result getResult(I iface, isTableEnabled_args args) throws
                 }
               }
           
          -    public static class compact extends org.apache.thrift.ProcessFunction {
          +    public static class compact
          +        extends org.apache.thrift.ProcessFunction {
                 public compact() {
                   super("compact");
                 }
          @@ -4127,7 +5093,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public compact_result getResult(I iface, compact_args args) throws org.apache.thrift.TException {
          +      public compact_result getResult(I iface, compact_args args)
          +          throws org.apache.thrift.TException {
                   compact_result result = new compact_result();
                   try {
                     iface.compact(args.tableNameOrRegionName);
          @@ -4138,7 +5105,8 @@ public compact_result getResult(I iface, compact_args args) throws org.apache.th
                 }
               }
           
          -    public static class majorCompact extends org.apache.thrift.ProcessFunction {
          +    public static class majorCompact
          +        extends org.apache.thrift.ProcessFunction {
                 public majorCompact() {
                   super("majorCompact");
                 }
          @@ -4156,7 +5124,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public majorCompact_result getResult(I iface, majorCompact_args args) throws org.apache.thrift.TException {
          +      public majorCompact_result getResult(I iface, majorCompact_args args)
          +          throws org.apache.thrift.TException {
                   majorCompact_result result = new majorCompact_result();
                   try {
                     iface.majorCompact(args.tableNameOrRegionName);
          @@ -4167,7 +5136,8 @@ public majorCompact_result getResult(I iface, majorCompact_args args) throws org
                 }
               }
           
          -    public static class getTableNames extends org.apache.thrift.ProcessFunction {
          +    public static class getTableNames
          +        extends org.apache.thrift.ProcessFunction {
                 public getTableNames() {
                   super("getTableNames");
                 }
          @@ -4185,7 +5155,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getTableNames_result getResult(I iface, getTableNames_args args) throws org.apache.thrift.TException {
          +      public getTableNames_result getResult(I iface, getTableNames_args args)
          +          throws org.apache.thrift.TException {
                   getTableNames_result result = new getTableNames_result();
                   try {
                     result.success = iface.getTableNames();
          @@ -4196,7 +5167,8 @@ public getTableNames_result getResult(I iface, getTableNames_args args) throws o
                 }
               }
           
          -    public static class getTableNamesWithIsTableEnabled extends org.apache.thrift.ProcessFunction {
          +    public static class getTableNamesWithIsTableEnabled
          +        extends org.apache.thrift.ProcessFunction {
                 public getTableNamesWithIsTableEnabled() {
                   super("getTableNamesWithIsTableEnabled");
                 }
          @@ -4214,8 +5186,10 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getTableNamesWithIsTableEnabled_result getResult(I iface, getTableNamesWithIsTableEnabled_args args) throws org.apache.thrift.TException {
          -        getTableNamesWithIsTableEnabled_result result = new getTableNamesWithIsTableEnabled_result();
          +      public getTableNamesWithIsTableEnabled_result getResult(I iface,
          +          getTableNamesWithIsTableEnabled_args args) throws org.apache.thrift.TException {
          +        getTableNamesWithIsTableEnabled_result result =
          +            new getTableNamesWithIsTableEnabled_result();
                   try {
                     result.success = iface.getTableNamesWithIsTableEnabled();
                   } catch (IOError io) {
          @@ -4225,7 +5199,8 @@ public getTableNamesWithIsTableEnabled_result getResult(I iface, getTableNamesWi
                 }
               }
           
          -    public static class getColumnDescriptors extends org.apache.thrift.ProcessFunction {
          +    public static class getColumnDescriptors
          +        extends org.apache.thrift.ProcessFunction {
                 public getColumnDescriptors() {
                   super("getColumnDescriptors");
                 }
          @@ -4243,7 +5218,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getColumnDescriptors_result getResult(I iface, getColumnDescriptors_args args) throws org.apache.thrift.TException {
          +      public getColumnDescriptors_result getResult(I iface, getColumnDescriptors_args args)
          +          throws org.apache.thrift.TException {
                   getColumnDescriptors_result result = new getColumnDescriptors_result();
                   try {
                     result.success = iface.getColumnDescriptors(args.tableName);
          @@ -4254,7 +5230,8 @@ public getColumnDescriptors_result getResult(I iface, getColumnDescriptors_args
                 }
               }
           
          -    public static class getTableRegions extends org.apache.thrift.ProcessFunction {
          +    public static class getTableRegions
          +        extends org.apache.thrift.ProcessFunction {
                 public getTableRegions() {
                   super("getTableRegions");
                 }
          @@ -4272,7 +5249,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getTableRegions_result getResult(I iface, getTableRegions_args args) throws org.apache.thrift.TException {
          +      public getTableRegions_result getResult(I iface, getTableRegions_args args)
          +          throws org.apache.thrift.TException {
                   getTableRegions_result result = new getTableRegions_result();
                   try {
                     result.success = iface.getTableRegions(args.tableName);
          @@ -4283,7 +5261,8 @@ public getTableRegions_result getResult(I iface, getTableRegions_args args) thro
                 }
               }
           
          -    public static class createTable extends org.apache.thrift.ProcessFunction {
          +    public static class createTable
          +        extends org.apache.thrift.ProcessFunction {
                 public createTable() {
                   super("createTable");
                 }
          @@ -4301,7 +5280,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public createTable_result getResult(I iface, createTable_args args) throws org.apache.thrift.TException {
          +      public createTable_result getResult(I iface, createTable_args args)
          +          throws org.apache.thrift.TException {
                   createTable_result result = new createTable_result();
                   try {
                     iface.createTable(args.tableName, args.columnFamilies);
          @@ -4316,7 +5296,8 @@ public createTable_result getResult(I iface, createTable_args args) throws org.a
                 }
               }
           
          -    public static class deleteTable extends org.apache.thrift.ProcessFunction {
          +    public static class deleteTable
          +        extends org.apache.thrift.ProcessFunction {
                 public deleteTable() {
                   super("deleteTable");
                 }
          @@ -4334,7 +5315,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public deleteTable_result getResult(I iface, deleteTable_args args) throws org.apache.thrift.TException {
          +      public deleteTable_result getResult(I iface, deleteTable_args args)
          +          throws org.apache.thrift.TException {
                   deleteTable_result result = new deleteTable_result();
                   try {
                     iface.deleteTable(args.tableName);
          @@ -4345,7 +5327,8 @@ public deleteTable_result getResult(I iface, deleteTable_args args) throws org.a
                 }
               }
           
          -    public static class get extends org.apache.thrift.ProcessFunction {
          +    public static class get
          +        extends org.apache.thrift.ProcessFunction {
                 public get() {
                   super("get");
                 }
          @@ -4374,7 +5357,8 @@ public get_result getResult(I iface, get_args args) throws org.apache.thrift.TEx
                 }
               }
           
          -    public static class getVer extends org.apache.thrift.ProcessFunction {
          +    public static class getVer
          +        extends org.apache.thrift.ProcessFunction {
                 public getVer() {
                   super("getVer");
                 }
          @@ -4392,10 +5376,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getVer_result getResult(I iface, getVer_args args) throws org.apache.thrift.TException {
          +      public getVer_result getResult(I iface, getVer_args args)
          +          throws org.apache.thrift.TException {
                   getVer_result result = new getVer_result();
                   try {
          -          result.success = iface.getVer(args.tableName, args.row, args.column, args.numVersions, args.attributes);
          +          result.success = iface.getVer(args.tableName, args.row, args.column, args.numVersions,
          +            args.attributes);
                   } catch (IOError io) {
                     result.io = io;
                   }
          @@ -4403,7 +5389,8 @@ public getVer_result getResult(I iface, getVer_args args) throws org.apache.thri
                 }
               }
           
          -    public static class getVerTs extends org.apache.thrift.ProcessFunction {
          +    public static class getVerTs
          +        extends org.apache.thrift.ProcessFunction {
                 public getVerTs() {
                   super("getVerTs");
                 }
          @@ -4421,10 +5408,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getVerTs_result getResult(I iface, getVerTs_args args) throws org.apache.thrift.TException {
          +      public getVerTs_result getResult(I iface, getVerTs_args args)
          +          throws org.apache.thrift.TException {
                   getVerTs_result result = new getVerTs_result();
                   try {
          -          result.success = iface.getVerTs(args.tableName, args.row, args.column, args.timestamp, args.numVersions, args.attributes);
          +          result.success = iface.getVerTs(args.tableName, args.row, args.column, args.timestamp,
          +            args.numVersions, args.attributes);
                   } catch (IOError io) {
                     result.io = io;
                   }
          @@ -4432,7 +5421,8 @@ public getVerTs_result getResult(I iface, getVerTs_args args) throws org.apache.
                 }
               }
           
          -    public static class getRow extends org.apache.thrift.ProcessFunction {
          +    public static class getRow
          +        extends org.apache.thrift.ProcessFunction {
                 public getRow() {
                   super("getRow");
                 }
          @@ -4450,7 +5440,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getRow_result getResult(I iface, getRow_args args) throws org.apache.thrift.TException {
          +      public getRow_result getResult(I iface, getRow_args args)
          +          throws org.apache.thrift.TException {
                   getRow_result result = new getRow_result();
                   try {
                     result.success = iface.getRow(args.tableName, args.row, args.attributes);
          @@ -4461,7 +5452,8 @@ public getRow_result getResult(I iface, getRow_args args) throws org.apache.thri
                 }
               }
           
          -    public static class getRowWithColumns extends org.apache.thrift.ProcessFunction {
          +    public static class getRowWithColumns
          +        extends org.apache.thrift.ProcessFunction {
                 public getRowWithColumns() {
                   super("getRowWithColumns");
                 }
          @@ -4479,10 +5471,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getRowWithColumns_result getResult(I iface, getRowWithColumns_args args) throws org.apache.thrift.TException {
          +      public getRowWithColumns_result getResult(I iface, getRowWithColumns_args args)
          +          throws org.apache.thrift.TException {
                   getRowWithColumns_result result = new getRowWithColumns_result();
                   try {
          -          result.success = iface.getRowWithColumns(args.tableName, args.row, args.columns, args.attributes);
          +          result.success =
          +              iface.getRowWithColumns(args.tableName, args.row, args.columns, args.attributes);
                   } catch (IOError io) {
                     result.io = io;
                   }
          @@ -4490,7 +5484,8 @@ public getRowWithColumns_result getResult(I iface, getRowWithColumns_args args)
                 }
               }
           
          -    public static class getRowTs extends org.apache.thrift.ProcessFunction {
          +    public static class getRowTs
          +        extends org.apache.thrift.ProcessFunction {
                 public getRowTs() {
                   super("getRowTs");
                 }
          @@ -4508,10 +5503,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getRowTs_result getResult(I iface, getRowTs_args args) throws org.apache.thrift.TException {
          +      public getRowTs_result getResult(I iface, getRowTs_args args)
          +          throws org.apache.thrift.TException {
                   getRowTs_result result = new getRowTs_result();
                   try {
          -          result.success = iface.getRowTs(args.tableName, args.row, args.timestamp, args.attributes);
          +          result.success =
          +              iface.getRowTs(args.tableName, args.row, args.timestamp, args.attributes);
                   } catch (IOError io) {
                     result.io = io;
                   }
          @@ -4519,7 +5516,8 @@ public getRowTs_result getResult(I iface, getRowTs_args args) throws org.apache.
                 }
               }
           
          -    public static class getRowWithColumnsTs extends org.apache.thrift.ProcessFunction {
          +    public static class getRowWithColumnsTs
          +        extends org.apache.thrift.ProcessFunction {
                 public getRowWithColumnsTs() {
                   super("getRowWithColumnsTs");
                 }
          @@ -4537,10 +5535,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getRowWithColumnsTs_result getResult(I iface, getRowWithColumnsTs_args args) throws org.apache.thrift.TException {
          +      public getRowWithColumnsTs_result getResult(I iface, getRowWithColumnsTs_args args)
          +          throws org.apache.thrift.TException {
                   getRowWithColumnsTs_result result = new getRowWithColumnsTs_result();
                   try {
          -          result.success = iface.getRowWithColumnsTs(args.tableName, args.row, args.columns, args.timestamp, args.attributes);
          +          result.success = iface.getRowWithColumnsTs(args.tableName, args.row, args.columns,
          +            args.timestamp, args.attributes);
                   } catch (IOError io) {
                     result.io = io;
                   }
          @@ -4548,7 +5548,8 @@ public getRowWithColumnsTs_result getResult(I iface, getRowWithColumnsTs_args ar
                 }
               }
           
          -    public static class getRows extends org.apache.thrift.ProcessFunction {
          +    public static class getRows
          +        extends org.apache.thrift.ProcessFunction {
                 public getRows() {
                   super("getRows");
                 }
          @@ -4566,7 +5567,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getRows_result getResult(I iface, getRows_args args) throws org.apache.thrift.TException {
          +      public getRows_result getResult(I iface, getRows_args args)
          +          throws org.apache.thrift.TException {
                   getRows_result result = new getRows_result();
                   try {
                     result.success = iface.getRows(args.tableName, args.rows, args.attributes);
          @@ -4577,7 +5579,8 @@ public getRows_result getResult(I iface, getRows_args args) throws org.apache.th
                 }
               }
           
          -    public static class getRowsWithColumns extends org.apache.thrift.ProcessFunction {
          +    public static class getRowsWithColumns
          +        extends org.apache.thrift.ProcessFunction {
                 public getRowsWithColumns() {
                   super("getRowsWithColumns");
                 }
          @@ -4595,10 +5598,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getRowsWithColumns_result getResult(I iface, getRowsWithColumns_args args) throws org.apache.thrift.TException {
          +      public getRowsWithColumns_result getResult(I iface, getRowsWithColumns_args args)
          +          throws org.apache.thrift.TException {
                   getRowsWithColumns_result result = new getRowsWithColumns_result();
                   try {
          -          result.success = iface.getRowsWithColumns(args.tableName, args.rows, args.columns, args.attributes);
          +          result.success =
          +              iface.getRowsWithColumns(args.tableName, args.rows, args.columns, args.attributes);
                   } catch (IOError io) {
                     result.io = io;
                   }
          @@ -4606,7 +5611,8 @@ public getRowsWithColumns_result getResult(I iface, getRowsWithColumns_args args
                 }
               }
           
          -    public static class getRowsTs extends org.apache.thrift.ProcessFunction {
          +    public static class getRowsTs
          +        extends org.apache.thrift.ProcessFunction {
                 public getRowsTs() {
                   super("getRowsTs");
                 }
          @@ -4624,10 +5630,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getRowsTs_result getResult(I iface, getRowsTs_args args) throws org.apache.thrift.TException {
          +      public getRowsTs_result getResult(I iface, getRowsTs_args args)
          +          throws org.apache.thrift.TException {
                   getRowsTs_result result = new getRowsTs_result();
                   try {
          -          result.success = iface.getRowsTs(args.tableName, args.rows, args.timestamp, args.attributes);
          +          result.success =
          +              iface.getRowsTs(args.tableName, args.rows, args.timestamp, args.attributes);
                   } catch (IOError io) {
                     result.io = io;
                   }
          @@ -4635,7 +5643,8 @@ public getRowsTs_result getResult(I iface, getRowsTs_args args) throws org.apach
                 }
               }
           
          -    public static class getRowsWithColumnsTs extends org.apache.thrift.ProcessFunction {
          +    public static class getRowsWithColumnsTs
          +        extends org.apache.thrift.ProcessFunction {
                 public getRowsWithColumnsTs() {
                   super("getRowsWithColumnsTs");
                 }
          @@ -4653,10 +5662,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getRowsWithColumnsTs_result getResult(I iface, getRowsWithColumnsTs_args args) throws org.apache.thrift.TException {
          +      public getRowsWithColumnsTs_result getResult(I iface, getRowsWithColumnsTs_args args)
          +          throws org.apache.thrift.TException {
                   getRowsWithColumnsTs_result result = new getRowsWithColumnsTs_result();
                   try {
          -          result.success = iface.getRowsWithColumnsTs(args.tableName, args.rows, args.columns, args.timestamp, args.attributes);
          +          result.success = iface.getRowsWithColumnsTs(args.tableName, args.rows, args.columns,
          +            args.timestamp, args.attributes);
                   } catch (IOError io) {
                     result.io = io;
                   }
          @@ -4664,7 +5675,8 @@ public getRowsWithColumnsTs_result getResult(I iface, getRowsWithColumnsTs_args
                 }
               }
           
          -    public static class mutateRow extends org.apache.thrift.ProcessFunction {
          +    public static class mutateRow
          +        extends org.apache.thrift.ProcessFunction {
                 public mutateRow() {
                   super("mutateRow");
                 }
          @@ -4682,7 +5694,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public mutateRow_result getResult(I iface, mutateRow_args args) throws org.apache.thrift.TException {
          +      public mutateRow_result getResult(I iface, mutateRow_args args)
          +          throws org.apache.thrift.TException {
                   mutateRow_result result = new mutateRow_result();
                   try {
                     iface.mutateRow(args.tableName, args.row, args.mutations, args.attributes);
          @@ -4695,7 +5708,8 @@ public mutateRow_result getResult(I iface, mutateRow_args args) throws org.apach
                 }
               }
           
          -    public static class mutateRowTs extends org.apache.thrift.ProcessFunction {
          +    public static class mutateRowTs
          +        extends org.apache.thrift.ProcessFunction {
                 public mutateRowTs() {
                   super("mutateRowTs");
                 }
          @@ -4713,10 +5727,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public mutateRowTs_result getResult(I iface, mutateRowTs_args args) throws org.apache.thrift.TException {
          +      public mutateRowTs_result getResult(I iface, mutateRowTs_args args)
          +          throws org.apache.thrift.TException {
                   mutateRowTs_result result = new mutateRowTs_result();
                   try {
          -          iface.mutateRowTs(args.tableName, args.row, args.mutations, args.timestamp, args.attributes);
          +          iface.mutateRowTs(args.tableName, args.row, args.mutations, args.timestamp,
          +            args.attributes);
                   } catch (IOError io) {
                     result.io = io;
                   } catch (IllegalArgument ia) {
          @@ -4726,7 +5742,8 @@ public mutateRowTs_result getResult(I iface, mutateRowTs_args args) throws org.a
                 }
               }
           
          -    public static class mutateRows extends org.apache.thrift.ProcessFunction {
          +    public static class mutateRows
          +        extends org.apache.thrift.ProcessFunction {
                 public mutateRows() {
                   super("mutateRows");
                 }
          @@ -4744,7 +5761,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public mutateRows_result getResult(I iface, mutateRows_args args) throws org.apache.thrift.TException {
          +      public mutateRows_result getResult(I iface, mutateRows_args args)
          +          throws org.apache.thrift.TException {
                   mutateRows_result result = new mutateRows_result();
                   try {
                     iface.mutateRows(args.tableName, args.rowBatches, args.attributes);
          @@ -4757,7 +5775,8 @@ public mutateRows_result getResult(I iface, mutateRows_args args) throws org.apa
                 }
               }
           
          -    public static class mutateRowsTs extends org.apache.thrift.ProcessFunction {
          +    public static class mutateRowsTs
          +        extends org.apache.thrift.ProcessFunction {
                 public mutateRowsTs() {
                   super("mutateRowsTs");
                 }
          @@ -4775,7 +5794,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public mutateRowsTs_result getResult(I iface, mutateRowsTs_args args) throws org.apache.thrift.TException {
          +      public mutateRowsTs_result getResult(I iface, mutateRowsTs_args args)
          +          throws org.apache.thrift.TException {
                   mutateRowsTs_result result = new mutateRowsTs_result();
                   try {
                     iface.mutateRowsTs(args.tableName, args.rowBatches, args.timestamp, args.attributes);
          @@ -4788,7 +5808,8 @@ public mutateRowsTs_result getResult(I iface, mutateRowsTs_args args) throws org
                 }
               }
           
          -    public static class atomicIncrement extends org.apache.thrift.ProcessFunction {
          +    public static class atomicIncrement
          +        extends org.apache.thrift.ProcessFunction {
                 public atomicIncrement() {
                   super("atomicIncrement");
                 }
          @@ -4806,7 +5827,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public atomicIncrement_result getResult(I iface, atomicIncrement_args args) throws org.apache.thrift.TException {
          +      public atomicIncrement_result getResult(I iface, atomicIncrement_args args)
          +          throws org.apache.thrift.TException {
                   atomicIncrement_result result = new atomicIncrement_result();
                   try {
                     result.success = iface.atomicIncrement(args.tableName, args.row, args.column, args.value);
          @@ -4820,7 +5842,8 @@ public atomicIncrement_result getResult(I iface, atomicIncrement_args args) thro
                 }
               }
           
          -    public static class deleteAll extends org.apache.thrift.ProcessFunction {
          +    public static class deleteAll
          +        extends org.apache.thrift.ProcessFunction {
                 public deleteAll() {
                   super("deleteAll");
                 }
          @@ -4838,7 +5861,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public deleteAll_result getResult(I iface, deleteAll_args args) throws org.apache.thrift.TException {
          +      public deleteAll_result getResult(I iface, deleteAll_args args)
          +          throws org.apache.thrift.TException {
                   deleteAll_result result = new deleteAll_result();
                   try {
                     iface.deleteAll(args.tableName, args.row, args.column, args.attributes);
          @@ -4849,7 +5873,8 @@ public deleteAll_result getResult(I iface, deleteAll_args args) throws org.apach
                 }
               }
           
          -    public static class deleteAllTs extends org.apache.thrift.ProcessFunction {
          +    public static class deleteAllTs
          +        extends org.apache.thrift.ProcessFunction {
                 public deleteAllTs() {
                   super("deleteAllTs");
                 }
          @@ -4867,7 +5892,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public deleteAllTs_result getResult(I iface, deleteAllTs_args args) throws org.apache.thrift.TException {
          +      public deleteAllTs_result getResult(I iface, deleteAllTs_args args)
          +          throws org.apache.thrift.TException {
                   deleteAllTs_result result = new deleteAllTs_result();
                   try {
                     iface.deleteAllTs(args.tableName, args.row, args.column, args.timestamp, args.attributes);
          @@ -4878,7 +5904,8 @@ public deleteAllTs_result getResult(I iface, deleteAllTs_args args) throws org.a
                 }
               }
           
          -    public static class deleteAllRow extends org.apache.thrift.ProcessFunction {
          +    public static class deleteAllRow
          +        extends org.apache.thrift.ProcessFunction {
                 public deleteAllRow() {
                   super("deleteAllRow");
                 }
          @@ -4896,7 +5923,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public deleteAllRow_result getResult(I iface, deleteAllRow_args args) throws org.apache.thrift.TException {
          +      public deleteAllRow_result getResult(I iface, deleteAllRow_args args)
          +          throws org.apache.thrift.TException {
                   deleteAllRow_result result = new deleteAllRow_result();
                   try {
                     iface.deleteAllRow(args.tableName, args.row, args.attributes);
          @@ -4907,7 +5935,8 @@ public deleteAllRow_result getResult(I iface, deleteAllRow_args args) throws org
                 }
               }
           
          -    public static class increment extends org.apache.thrift.ProcessFunction {
          +    public static class increment
          +        extends org.apache.thrift.ProcessFunction {
                 public increment() {
                   super("increment");
                 }
          @@ -4925,7 +5954,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public increment_result getResult(I iface, increment_args args) throws org.apache.thrift.TException {
          +      public increment_result getResult(I iface, increment_args args)
          +          throws org.apache.thrift.TException {
                   increment_result result = new increment_result();
                   try {
                     iface.increment(args.increment);
          @@ -4936,7 +5966,8 @@ public increment_result getResult(I iface, increment_args args) throws org.apach
                 }
               }
           
          -    public static class incrementRows extends org.apache.thrift.ProcessFunction {
          +    public static class incrementRows
          +        extends org.apache.thrift.ProcessFunction {
                 public incrementRows() {
                   super("incrementRows");
                 }
          @@ -4954,7 +5985,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public incrementRows_result getResult(I iface, incrementRows_args args) throws org.apache.thrift.TException {
          +      public incrementRows_result getResult(I iface, incrementRows_args args)
          +          throws org.apache.thrift.TException {
                   incrementRows_result result = new incrementRows_result();
                   try {
                     iface.incrementRows(args.increments);
          @@ -4965,7 +5997,8 @@ public incrementRows_result getResult(I iface, incrementRows_args args) throws o
                 }
               }
           
          -    public static class deleteAllRowTs extends org.apache.thrift.ProcessFunction {
          +    public static class deleteAllRowTs
          +        extends org.apache.thrift.ProcessFunction {
                 public deleteAllRowTs() {
                   super("deleteAllRowTs");
                 }
          @@ -4983,7 +6016,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public deleteAllRowTs_result getResult(I iface, deleteAllRowTs_args args) throws org.apache.thrift.TException {
          +      public deleteAllRowTs_result getResult(I iface, deleteAllRowTs_args args)
          +          throws org.apache.thrift.TException {
                   deleteAllRowTs_result result = new deleteAllRowTs_result();
                   try {
                     iface.deleteAllRowTs(args.tableName, args.row, args.timestamp, args.attributes);
          @@ -4994,7 +6028,8 @@ public deleteAllRowTs_result getResult(I iface, deleteAllRowTs_args args) throws
                 }
               }
           
          -    public static class scannerOpenWithScan extends org.apache.thrift.ProcessFunction {
          +    public static class scannerOpenWithScan
          +        extends org.apache.thrift.ProcessFunction {
                 public scannerOpenWithScan() {
                   super("scannerOpenWithScan");
                 }
          @@ -5012,7 +6047,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public scannerOpenWithScan_result getResult(I iface, scannerOpenWithScan_args args) throws org.apache.thrift.TException {
          +      public scannerOpenWithScan_result getResult(I iface, scannerOpenWithScan_args args)
          +          throws org.apache.thrift.TException {
                   scannerOpenWithScan_result result = new scannerOpenWithScan_result();
                   try {
                     result.success = iface.scannerOpenWithScan(args.tableName, args.scan, args.attributes);
          @@ -5024,7 +6060,8 @@ public scannerOpenWithScan_result getResult(I iface, scannerOpenWithScan_args ar
                 }
               }
           
          -    public static class scannerOpen extends org.apache.thrift.ProcessFunction {
          +    public static class scannerOpen
          +        extends org.apache.thrift.ProcessFunction {
                 public scannerOpen() {
                   super("scannerOpen");
                 }
          @@ -5042,10 +6079,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public scannerOpen_result getResult(I iface, scannerOpen_args args) throws org.apache.thrift.TException {
          +      public scannerOpen_result getResult(I iface, scannerOpen_args args)
          +          throws org.apache.thrift.TException {
                   scannerOpen_result result = new scannerOpen_result();
                   try {
          -          result.success = iface.scannerOpen(args.tableName, args.startRow, args.columns, args.attributes);
          +          result.success =
          +              iface.scannerOpen(args.tableName, args.startRow, args.columns, args.attributes);
                     result.setSuccessIsSet(true);
                   } catch (IOError io) {
                     result.io = io;
          @@ -5054,7 +6093,8 @@ public scannerOpen_result getResult(I iface, scannerOpen_args args) throws org.a
                 }
               }
           
          -    public static class scannerOpenWithStop extends org.apache.thrift.ProcessFunction {
          +    public static class scannerOpenWithStop
          +        extends org.apache.thrift.ProcessFunction {
                 public scannerOpenWithStop() {
                   super("scannerOpenWithStop");
                 }
          @@ -5072,10 +6112,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public scannerOpenWithStop_result getResult(I iface, scannerOpenWithStop_args args) throws org.apache.thrift.TException {
          +      public scannerOpenWithStop_result getResult(I iface, scannerOpenWithStop_args args)
          +          throws org.apache.thrift.TException {
                   scannerOpenWithStop_result result = new scannerOpenWithStop_result();
                   try {
          -          result.success = iface.scannerOpenWithStop(args.tableName, args.startRow, args.stopRow, args.columns, args.attributes);
          +          result.success = iface.scannerOpenWithStop(args.tableName, args.startRow, args.stopRow,
          +            args.columns, args.attributes);
                     result.setSuccessIsSet(true);
                   } catch (IOError io) {
                     result.io = io;
          @@ -5084,7 +6126,8 @@ public scannerOpenWithStop_result getResult(I iface, scannerOpenWithStop_args ar
                 }
               }
           
          -    public static class scannerOpenWithPrefix extends org.apache.thrift.ProcessFunction {
          +    public static class scannerOpenWithPrefix
          +        extends org.apache.thrift.ProcessFunction {
                 public scannerOpenWithPrefix() {
                   super("scannerOpenWithPrefix");
                 }
          @@ -5102,10 +6145,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public scannerOpenWithPrefix_result getResult(I iface, scannerOpenWithPrefix_args args) throws org.apache.thrift.TException {
          +      public scannerOpenWithPrefix_result getResult(I iface, scannerOpenWithPrefix_args args)
          +          throws org.apache.thrift.TException {
                   scannerOpenWithPrefix_result result = new scannerOpenWithPrefix_result();
                   try {
          -          result.success = iface.scannerOpenWithPrefix(args.tableName, args.startAndPrefix, args.columns, args.attributes);
          +          result.success = iface.scannerOpenWithPrefix(args.tableName, args.startAndPrefix,
          +            args.columns, args.attributes);
                     result.setSuccessIsSet(true);
                   } catch (IOError io) {
                     result.io = io;
          @@ -5114,7 +6159,8 @@ public scannerOpenWithPrefix_result getResult(I iface, scannerOpenWithPrefix_arg
                 }
               }
           
          -    public static class scannerOpenTs extends org.apache.thrift.ProcessFunction {
          +    public static class scannerOpenTs
          +        extends org.apache.thrift.ProcessFunction {
                 public scannerOpenTs() {
                   super("scannerOpenTs");
                 }
          @@ -5132,10 +6178,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public scannerOpenTs_result getResult(I iface, scannerOpenTs_args args) throws org.apache.thrift.TException {
          +      public scannerOpenTs_result getResult(I iface, scannerOpenTs_args args)
          +          throws org.apache.thrift.TException {
                   scannerOpenTs_result result = new scannerOpenTs_result();
                   try {
          -          result.success = iface.scannerOpenTs(args.tableName, args.startRow, args.columns, args.timestamp, args.attributes);
          +          result.success = iface.scannerOpenTs(args.tableName, args.startRow, args.columns,
          +            args.timestamp, args.attributes);
                     result.setSuccessIsSet(true);
                   } catch (IOError io) {
                     result.io = io;
          @@ -5144,7 +6192,8 @@ public scannerOpenTs_result getResult(I iface, scannerOpenTs_args args) throws o
                 }
               }
           
          -    public static class scannerOpenWithStopTs extends org.apache.thrift.ProcessFunction {
          +    public static class scannerOpenWithStopTs
          +        extends org.apache.thrift.ProcessFunction {
                 public scannerOpenWithStopTs() {
                   super("scannerOpenWithStopTs");
                 }
          @@ -5162,10 +6211,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public scannerOpenWithStopTs_result getResult(I iface, scannerOpenWithStopTs_args args) throws org.apache.thrift.TException {
          +      public scannerOpenWithStopTs_result getResult(I iface, scannerOpenWithStopTs_args args)
          +          throws org.apache.thrift.TException {
                   scannerOpenWithStopTs_result result = new scannerOpenWithStopTs_result();
                   try {
          -          result.success = iface.scannerOpenWithStopTs(args.tableName, args.startRow, args.stopRow, args.columns, args.timestamp, args.attributes);
          +          result.success = iface.scannerOpenWithStopTs(args.tableName, args.startRow, args.stopRow,
          +            args.columns, args.timestamp, args.attributes);
                     result.setSuccessIsSet(true);
                   } catch (IOError io) {
                     result.io = io;
          @@ -5174,7 +6225,8 @@ public scannerOpenWithStopTs_result getResult(I iface, scannerOpenWithStopTs_arg
                 }
               }
           
          -    public static class scannerGet extends org.apache.thrift.ProcessFunction {
          +    public static class scannerGet
          +        extends org.apache.thrift.ProcessFunction {
                 public scannerGet() {
                   super("scannerGet");
                 }
          @@ -5192,7 +6244,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public scannerGet_result getResult(I iface, scannerGet_args args) throws org.apache.thrift.TException {
          +      public scannerGet_result getResult(I iface, scannerGet_args args)
          +          throws org.apache.thrift.TException {
                   scannerGet_result result = new scannerGet_result();
                   try {
                     result.success = iface.scannerGet(args.id);
          @@ -5205,7 +6258,8 @@ public scannerGet_result getResult(I iface, scannerGet_args args) throws org.apa
                 }
               }
           
          -    public static class scannerGetList extends org.apache.thrift.ProcessFunction {
          +    public static class scannerGetList
          +        extends org.apache.thrift.ProcessFunction {
                 public scannerGetList() {
                   super("scannerGetList");
                 }
          @@ -5223,7 +6277,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public scannerGetList_result getResult(I iface, scannerGetList_args args) throws org.apache.thrift.TException {
          +      public scannerGetList_result getResult(I iface, scannerGetList_args args)
          +          throws org.apache.thrift.TException {
                   scannerGetList_result result = new scannerGetList_result();
                   try {
                     result.success = iface.scannerGetList(args.id, args.nbRows);
          @@ -5236,7 +6291,8 @@ public scannerGetList_result getResult(I iface, scannerGetList_args args) throws
                 }
               }
           
          -    public static class scannerClose extends org.apache.thrift.ProcessFunction {
          +    public static class scannerClose
          +        extends org.apache.thrift.ProcessFunction {
                 public scannerClose() {
                   super("scannerClose");
                 }
          @@ -5254,7 +6310,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public scannerClose_result getResult(I iface, scannerClose_args args) throws org.apache.thrift.TException {
          +      public scannerClose_result getResult(I iface, scannerClose_args args)
          +          throws org.apache.thrift.TException {
                   scannerClose_result result = new scannerClose_result();
                   try {
                     iface.scannerClose(args.id);
          @@ -5267,7 +6324,8 @@ public scannerClose_result getResult(I iface, scannerClose_args args) throws org
                 }
               }
           
          -    public static class getRegionInfo extends org.apache.thrift.ProcessFunction {
          +    public static class getRegionInfo
          +        extends org.apache.thrift.ProcessFunction {
                 public getRegionInfo() {
                   super("getRegionInfo");
                 }
          @@ -5285,7 +6343,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getRegionInfo_result getResult(I iface, getRegionInfo_args args) throws org.apache.thrift.TException {
          +      public getRegionInfo_result getResult(I iface, getRegionInfo_args args)
          +          throws org.apache.thrift.TException {
                   getRegionInfo_result result = new getRegionInfo_result();
                   try {
                     result.success = iface.getRegionInfo(args.row);
          @@ -5296,7 +6355,8 @@ public getRegionInfo_result getResult(I iface, getRegionInfo_args args) throws o
                 }
               }
           
          -    public static class append extends org.apache.thrift.ProcessFunction {
          +    public static class append
          +        extends org.apache.thrift.ProcessFunction {
                 public append() {
                   super("append");
                 }
          @@ -5314,7 +6374,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public append_result getResult(I iface, append_args args) throws org.apache.thrift.TException {
          +      public append_result getResult(I iface, append_args args)
          +          throws org.apache.thrift.TException {
                   append_result result = new append_result();
                   try {
                     result.success = iface.append(args.append);
          @@ -5325,7 +6386,8 @@ public append_result getResult(I iface, append_args args) throws org.apache.thri
                 }
               }
           
          -    public static class checkAndPut extends org.apache.thrift.ProcessFunction {
          +    public static class checkAndPut
          +        extends org.apache.thrift.ProcessFunction {
                 public checkAndPut() {
                   super("checkAndPut");
                 }
          @@ -5343,10 +6405,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public checkAndPut_result getResult(I iface, checkAndPut_args args) throws org.apache.thrift.TException {
          +      public checkAndPut_result getResult(I iface, checkAndPut_args args)
          +          throws org.apache.thrift.TException {
                   checkAndPut_result result = new checkAndPut_result();
                   try {
          -          result.success = iface.checkAndPut(args.tableName, args.row, args.column, args.value, args.mput, args.attributes);
          +          result.success = iface.checkAndPut(args.tableName, args.row, args.column, args.value,
          +            args.mput, args.attributes);
                     result.setSuccessIsSet(true);
                   } catch (IOError io) {
                     result.io = io;
          @@ -5357,7 +6421,8 @@ public checkAndPut_result getResult(I iface, checkAndPut_args args) throws org.a
                 }
               }
           
          -    public static class getThriftServerType extends org.apache.thrift.ProcessFunction {
          +    public static class getThriftServerType
          +        extends org.apache.thrift.ProcessFunction {
                 public getThriftServerType() {
                   super("getThriftServerType");
                 }
          @@ -5375,14 +6440,16 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getThriftServerType_result getResult(I iface, getThriftServerType_args args) throws org.apache.thrift.TException {
          +      public getThriftServerType_result getResult(I iface, getThriftServerType_args args)
          +          throws org.apache.thrift.TException {
                   getThriftServerType_result result = new getThriftServerType_result();
                   result.success = iface.getThriftServerType();
                   return result;
                 }
               }
           
          -    public static class getClusterId extends org.apache.thrift.ProcessFunction {
          +    public static class getClusterId
          +        extends org.apache.thrift.ProcessFunction {
                 public getClusterId() {
                   super("getClusterId");
                 }
          @@ -5400,14 +6467,16 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getClusterId_result getResult(I iface, getClusterId_args args) throws org.apache.thrift.TException {
          +      public getClusterId_result getResult(I iface, getClusterId_args args)
          +          throws org.apache.thrift.TException {
                   getClusterId_result result = new getClusterId_result();
                   result.success = iface.getClusterId();
                   return result;
                 }
               }
           
          -    public static class grant extends org.apache.thrift.ProcessFunction {
          +    public static class grant
          +        extends org.apache.thrift.ProcessFunction {
                 public grant() {
                   super("grant");
                 }
          @@ -5437,7 +6506,8 @@ public grant_result getResult(I iface, grant_args args) throws org.apache.thrift
                 }
               }
           
          -    public static class revoke extends org.apache.thrift.ProcessFunction {
          +    public static class revoke
          +        extends org.apache.thrift.ProcessFunction {
                 public revoke() {
                   super("revoke");
                 }
          @@ -5455,7 +6525,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public revoke_result getResult(I iface, revoke_args args) throws org.apache.thrift.TException {
          +      public revoke_result getResult(I iface, revoke_args args)
          +          throws org.apache.thrift.TException {
                   revoke_result result = new revoke_result();
                   try {
                     result.success = iface.revoke(args.info);
          @@ -5469,17 +6540,25 @@ public revoke_result getResult(I iface, revoke_args args) throws org.apache.thri
           
             }
           
          -  public static class AsyncProcessor extends org.apache.thrift.TBaseAsyncProcessor {
          -    private static final org.slf4j.Logger _LOGGER = org.slf4j.LoggerFactory.getLogger(AsyncProcessor.class.getName());
          +  public static class AsyncProcessor
          +      extends org.apache.thrift.TBaseAsyncProcessor {
          +    private static final org.slf4j.Logger _LOGGER =
          +        org.slf4j.LoggerFactory.getLogger(AsyncProcessor.class.getName());
          +
               public AsyncProcessor(I iface) {
          -      super(iface, getProcessMap(new java.util.HashMap>()));
          +      super(iface, getProcessMap(
          +        new java.util.HashMap>()));
               }
           
          -    protected AsyncProcessor(I iface, java.util.Map> processMap) {
          +    protected AsyncProcessor(I iface,
          +        java.util.Map> processMap) {
                 super(iface, getProcessMap(processMap));
               }
           
          -    private static  java.util.Map> getProcessMap(java.util.Map> processMap) {
          +    private static 
          +        java.util.Map>
          +        getProcessMap(
          +            java.util.Map> processMap) {
                 processMap.put("enableTable", new enableTable());
                 processMap.put("disableTable", new disableTable());
                 processMap.put("isTableEnabled", new isTableEnabled());
          @@ -5532,7 +6611,8 @@ protected AsyncProcessor(I iface, java.util.Map extends org.apache.thrift.AsyncProcessFunction {
          +    public static class enableTable
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public enableTable() {
                   super("enableTable");
                 }
          @@ -5541,13 +6621,15 @@ public enableTable_args getEmptyArgsInstance() {
                   return new enableTable_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       enableTable_result result = new enableTable_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5556,6 +6638,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5571,14 +6654,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5591,12 +6675,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, enableTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.enableTable(args.tableName,resultHandler);
          +      public void start(I iface, enableTable_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.enableTable(args.tableName, resultHandler);
                 }
               }
           
          -    public static class disableTable extends org.apache.thrift.AsyncProcessFunction {
          +    public static class disableTable
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public disableTable() {
                   super("disableTable");
                 }
          @@ -5605,13 +6692,15 @@ public disableTable_args getEmptyArgsInstance() {
                   return new disableTable_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       disableTable_result result = new disableTable_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5620,6 +6709,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5635,14 +6725,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5655,12 +6746,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, disableTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.disableTable(args.tableName,resultHandler);
          +      public void start(I iface, disableTable_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.disableTable(args.tableName, resultHandler);
                 }
               }
           
          -    public static class isTableEnabled extends org.apache.thrift.AsyncProcessFunction {
          +    public static class isTableEnabled
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public isTableEnabled() {
                   super("isTableEnabled");
                 }
          @@ -5669,15 +6763,17 @@ public isTableEnabled_args getEmptyArgsInstance() {
                   return new isTableEnabled_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       isTableEnabled_result result = new isTableEnabled_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5686,6 +6782,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5701,14 +6798,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5721,12 +6819,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, isTableEnabled_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.isTableEnabled(args.tableName,resultHandler);
          +      public void start(I iface, isTableEnabled_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.isTableEnabled(args.tableName, resultHandler);
                 }
               }
           
          -    public static class compact extends org.apache.thrift.AsyncProcessFunction {
          +    public static class compact
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public compact() {
                   super("compact");
                 }
          @@ -5735,13 +6836,15 @@ public compact_args getEmptyArgsInstance() {
                   return new compact_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       compact_result result = new compact_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5750,6 +6853,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5765,14 +6869,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5785,12 +6890,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, compact_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.compact(args.tableNameOrRegionName,resultHandler);
          +      public void start(I iface, compact_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.compact(args.tableNameOrRegionName, resultHandler);
                 }
               }
           
          -    public static class majorCompact extends org.apache.thrift.AsyncProcessFunction {
          +    public static class majorCompact
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public majorCompact() {
                   super("majorCompact");
                 }
          @@ -5799,13 +6907,15 @@ public majorCompact_args getEmptyArgsInstance() {
                   return new majorCompact_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       majorCompact_result result = new majorCompact_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5814,6 +6924,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5829,14 +6940,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5849,12 +6961,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, majorCompact_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.majorCompact(args.tableNameOrRegionName,resultHandler);
          +      public void start(I iface, majorCompact_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.majorCompact(args.tableNameOrRegionName, resultHandler);
                 }
               }
           
          -    public static class getTableNames extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getTableNames extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getTableNames() {
                   super("getTableNames");
                 }
          @@ -5863,14 +6978,17 @@ public getTableNames_args getEmptyArgsInstance() {
                   return new getTableNames_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getTableNames_result result = new getTableNames_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5879,6 +6997,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5894,14 +7013,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5914,12 +7034,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getTableNames_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +      public void start(I iface, getTableNames_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
                   iface.getTableNames(resultHandler);
                 }
               }
           
          -    public static class getTableNamesWithIsTableEnabled extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getTableNamesWithIsTableEnabled extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getTableNamesWithIsTableEnabled() {
                   super("getTableNamesWithIsTableEnabled");
                 }
          @@ -5928,14 +7051,19 @@ public getTableNamesWithIsTableEnabled_args getEmptyArgsInstance() {
                   return new getTableNamesWithIsTableEnabled_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public
          +          org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          -          public void onComplete(java.util.Map o) {
          -            getTableNamesWithIsTableEnabled_result result = new getTableNamesWithIsTableEnabled_result();
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
          +          public void onComplete(java.util.Map o) {
          +            getTableNamesWithIsTableEnabled_result result =
          +                new getTableNamesWithIsTableEnabled_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5944,10 +7072,12 @@ public void onComplete(java.util.Map o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          -            getTableNamesWithIsTableEnabled_result result = new getTableNamesWithIsTableEnabled_result();
          +            getTableNamesWithIsTableEnabled_result result =
          +                new getTableNamesWithIsTableEnabled_result();
                       if (e instanceof IOError) {
                         result.io = (IOError) e;
                         result.setIoIsSet(true);
          @@ -5959,14 +7089,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5979,12 +7110,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getTableNamesWithIsTableEnabled_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +      public void start(I iface, getTableNamesWithIsTableEnabled_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
                   iface.getTableNamesWithIsTableEnabled(resultHandler);
                 }
               }
           
          -    public static class getColumnDescriptors extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getColumnDescriptors extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getColumnDescriptors() {
                   super("getColumnDescriptors");
                 }
          @@ -5993,14 +7127,18 @@ public getColumnDescriptors_args getEmptyArgsInstance() {
                   return new getColumnDescriptors_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public
          +          org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          -          public void onComplete(java.util.Map o) {
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
          +          public void onComplete(java.util.Map o) {
                       getColumnDescriptors_result result = new getColumnDescriptors_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6009,6 +7147,7 @@ public void onComplete(java.util.Map o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6024,14 +7163,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6044,12 +7184,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getColumnDescriptors_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getColumnDescriptors(args.tableName,resultHandler);
          +      public void start(I iface, getColumnDescriptors_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getColumnDescriptors(args.tableName, resultHandler);
                 }
               }
           
          -    public static class getTableRegions extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getTableRegions extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getTableRegions() {
                   super("getTableRegions");
                 }
          @@ -6058,14 +7201,17 @@ public getTableRegions_args getEmptyArgsInstance() {
                   return new getTableRegions_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getTableRegions_result result = new getTableRegions_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6074,6 +7220,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6089,14 +7236,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6109,12 +7257,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getTableRegions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getTableRegions(args.tableName,resultHandler);
          +      public void start(I iface, getTableRegions_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getTableRegions(args.tableName, resultHandler);
                 }
               }
           
          -    public static class createTable extends org.apache.thrift.AsyncProcessFunction {
          +    public static class createTable
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public createTable() {
                   super("createTable");
                 }
          @@ -6123,13 +7274,15 @@ public createTable_args getEmptyArgsInstance() {
                   return new createTable_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       createTable_result result = new createTable_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6138,6 +7291,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6161,14 +7315,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6181,12 +7336,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, createTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.createTable(args.tableName, args.columnFamilies,resultHandler);
          +      public void start(I iface, createTable_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.createTable(args.tableName, args.columnFamilies, resultHandler);
                 }
               }
           
          -    public static class deleteTable extends org.apache.thrift.AsyncProcessFunction {
          +    public static class deleteTable
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public deleteTable() {
                   super("deleteTable");
                 }
          @@ -6195,13 +7353,15 @@ public deleteTable_args getEmptyArgsInstance() {
                   return new deleteTable_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       deleteTable_result result = new deleteTable_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6210,6 +7370,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6225,14 +7386,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6245,12 +7407,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, deleteTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.deleteTable(args.tableName,resultHandler);
          +      public void start(I iface, deleteTable_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.deleteTable(args.tableName, resultHandler);
                 }
               }
           
          -    public static class get extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class get
          +        extends org.apache.thrift.AsyncProcessFunction> {
                 public get() {
                   super("get");
                 }
          @@ -6259,14 +7424,16 @@ public get_args getEmptyArgsInstance() {
                   return new get_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       get_result result = new get_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6275,6 +7442,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6290,14 +7458,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6310,12 +7479,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, get_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.get(args.tableName, args.row, args.column, args.attributes,resultHandler);
          +      public void start(I iface, get_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.get(args.tableName, args.row, args.column, args.attributes, resultHandler);
                 }
               }
           
          -    public static class getVer extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getVer
          +        extends org.apache.thrift.AsyncProcessFunction> {
                 public getVer() {
                   super("getVer");
                 }
          @@ -6324,14 +7496,16 @@ public getVer_args getEmptyArgsInstance() {
                   return new getVer_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getVer_result result = new getVer_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6340,6 +7514,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6355,14 +7530,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6375,12 +7551,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getVer_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getVer(args.tableName, args.row, args.column, args.numVersions, args.attributes,resultHandler);
          +      public void start(I iface, getVer_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getVer(args.tableName, args.row, args.column, args.numVersions, args.attributes,
          +          resultHandler);
                 }
               }
           
          -    public static class getVerTs extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getVerTs
          +        extends org.apache.thrift.AsyncProcessFunction> {
                 public getVerTs() {
                   super("getVerTs");
                 }
          @@ -6389,14 +7569,16 @@ public getVerTs_args getEmptyArgsInstance() {
                   return new getVerTs_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getVerTs_result result = new getVerTs_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6405,6 +7587,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6420,14 +7603,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6440,12 +7624,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getVerTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getVerTs(args.tableName, args.row, args.column, args.timestamp, args.numVersions, args.attributes,resultHandler);
          +      public void start(I iface, getVerTs_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getVerTs(args.tableName, args.row, args.column, args.timestamp, args.numVersions,
          +          args.attributes, resultHandler);
                 }
               }
           
          -    public static class getRow extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getRow
          +        extends org.apache.thrift.AsyncProcessFunction> {
                 public getRow() {
                   super("getRow");
                 }
          @@ -6454,14 +7642,17 @@ public getRow_args getEmptyArgsInstance() {
                   return new getRow_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getRow_result result = new getRow_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6470,6 +7661,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6485,14 +7677,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6505,12 +7698,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getRow_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getRow(args.tableName, args.row, args.attributes,resultHandler);
          +      public void start(I iface, getRow_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getRow(args.tableName, args.row, args.attributes, resultHandler);
                 }
               }
           
          -    public static class getRowWithColumns extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getRowWithColumns extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getRowWithColumns() {
                   super("getRowWithColumns");
                 }
          @@ -6519,14 +7715,17 @@ public getRowWithColumns_args getEmptyArgsInstance() {
                   return new getRowWithColumns_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getRowWithColumns_result result = new getRowWithColumns_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6535,6 +7734,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6550,14 +7750,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6570,12 +7771,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getRowWithColumns_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getRowWithColumns(args.tableName, args.row, args.columns, args.attributes,resultHandler);
          +      public void start(I iface, getRowWithColumns_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getRowWithColumns(args.tableName, args.row, args.columns, args.attributes,
          +          resultHandler);
                 }
               }
           
          -    public static class getRowTs extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getRowTs extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getRowTs() {
                   super("getRowTs");
                 }
          @@ -6584,14 +7789,17 @@ public getRowTs_args getEmptyArgsInstance() {
                   return new getRowTs_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getRowTs_result result = new getRowTs_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6600,6 +7808,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6615,14 +7824,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6635,12 +7845,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getRowTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getRowTs(args.tableName, args.row, args.timestamp, args.attributes,resultHandler);
          +      public void start(I iface, getRowTs_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getRowTs(args.tableName, args.row, args.timestamp, args.attributes, resultHandler);
                 }
               }
           
          -    public static class getRowWithColumnsTs extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getRowWithColumnsTs extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getRowWithColumnsTs() {
                   super("getRowWithColumnsTs");
                 }
          @@ -6649,14 +7862,17 @@ public getRowWithColumnsTs_args getEmptyArgsInstance() {
                   return new getRowWithColumnsTs_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getRowWithColumnsTs_result result = new getRowWithColumnsTs_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6665,6 +7881,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6680,14 +7897,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6700,12 +7918,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getRowWithColumnsTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getRowWithColumnsTs(args.tableName, args.row, args.columns, args.timestamp, args.attributes,resultHandler);
          +      public void start(I iface, getRowWithColumnsTs_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getRowWithColumnsTs(args.tableName, args.row, args.columns, args.timestamp,
          +          args.attributes, resultHandler);
                 }
               }
           
          -    public static class getRows extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getRows extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getRows() {
                   super("getRows");
                 }
          @@ -6714,14 +7936,17 @@ public getRows_args getEmptyArgsInstance() {
                   return new getRows_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getRows_result result = new getRows_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6730,6 +7955,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6745,14 +7971,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6765,12 +7992,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getRows_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getRows(args.tableName, args.rows, args.attributes,resultHandler);
          +      public void start(I iface, getRows_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getRows(args.tableName, args.rows, args.attributes, resultHandler);
                 }
               }
           
          -    public static class getRowsWithColumns extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getRowsWithColumns extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getRowsWithColumns() {
                   super("getRowsWithColumns");
                 }
          @@ -6779,14 +8009,17 @@ public getRowsWithColumns_args getEmptyArgsInstance() {
                   return new getRowsWithColumns_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getRowsWithColumns_result result = new getRowsWithColumns_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6795,6 +8028,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6810,14 +8044,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6830,12 +8065,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getRowsWithColumns_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getRowsWithColumns(args.tableName, args.rows, args.columns, args.attributes,resultHandler);
          +      public void start(I iface, getRowsWithColumns_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getRowsWithColumns(args.tableName, args.rows, args.columns, args.attributes,
          +          resultHandler);
                 }
               }
           
          -    public static class getRowsTs extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getRowsTs extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getRowsTs() {
                   super("getRowsTs");
                 }
          @@ -6844,14 +8083,17 @@ public getRowsTs_args getEmptyArgsInstance() {
                   return new getRowsTs_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getRowsTs_result result = new getRowsTs_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6860,6 +8102,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6875,14 +8118,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6895,12 +8139,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getRowsTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getRowsTs(args.tableName, args.rows, args.timestamp, args.attributes,resultHandler);
          +      public void start(I iface, getRowsTs_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getRowsTs(args.tableName, args.rows, args.timestamp, args.attributes, resultHandler);
                 }
               }
           
          -    public static class getRowsWithColumnsTs extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getRowsWithColumnsTs extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getRowsWithColumnsTs() {
                   super("getRowsWithColumnsTs");
                 }
          @@ -6909,14 +8156,17 @@ public getRowsWithColumnsTs_args getEmptyArgsInstance() {
                   return new getRowsWithColumnsTs_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getRowsWithColumnsTs_result result = new getRowsWithColumnsTs_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6925,6 +8175,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6940,14 +8191,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6960,12 +8212,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getRowsWithColumnsTs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getRowsWithColumnsTs(args.tableName, args.rows, args.columns, args.timestamp, args.attributes,resultHandler);
          +      public void start(I iface, getRowsWithColumnsTs_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getRowsWithColumnsTs(args.tableName, args.rows, args.columns, args.timestamp,
          +          args.attributes, resultHandler);
                 }
               }
           
          -    public static class mutateRow extends org.apache.thrift.AsyncProcessFunction {
          +    public static class mutateRow
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public mutateRow() {
                   super("mutateRow");
                 }
          @@ -6974,13 +8230,15 @@ public mutateRow_args getEmptyArgsInstance() {
                   return new mutateRow_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       mutateRow_result result = new mutateRow_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6989,6 +8247,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7008,14 +8267,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7028,12 +8288,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, mutateRow_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.mutateRow(args.tableName, args.row, args.mutations, args.attributes,resultHandler);
          +      public void start(I iface, mutateRow_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.mutateRow(args.tableName, args.row, args.mutations, args.attributes, resultHandler);
                 }
               }
           
          -    public static class mutateRowTs extends org.apache.thrift.AsyncProcessFunction {
          +    public static class mutateRowTs
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public mutateRowTs() {
                   super("mutateRowTs");
                 }
          @@ -7042,13 +8305,15 @@ public mutateRowTs_args getEmptyArgsInstance() {
                   return new mutateRowTs_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       mutateRowTs_result result = new mutateRowTs_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7057,6 +8322,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7076,14 +8342,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7096,12 +8363,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, mutateRowTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.mutateRowTs(args.tableName, args.row, args.mutations, args.timestamp, args.attributes,resultHandler);
          +      public void start(I iface, mutateRowTs_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.mutateRowTs(args.tableName, args.row, args.mutations, args.timestamp, args.attributes,
          +          resultHandler);
                 }
               }
           
          -    public static class mutateRows extends org.apache.thrift.AsyncProcessFunction {
          +    public static class mutateRows
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public mutateRows() {
                   super("mutateRows");
                 }
          @@ -7110,13 +8381,15 @@ public mutateRows_args getEmptyArgsInstance() {
                   return new mutateRows_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       mutateRows_result result = new mutateRows_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7125,6 +8398,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7144,14 +8418,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7164,12 +8439,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, mutateRows_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.mutateRows(args.tableName, args.rowBatches, args.attributes,resultHandler);
          +      public void start(I iface, mutateRows_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.mutateRows(args.tableName, args.rowBatches, args.attributes, resultHandler);
                 }
               }
           
          -    public static class mutateRowsTs extends org.apache.thrift.AsyncProcessFunction {
          +    public static class mutateRowsTs
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public mutateRowsTs() {
                   super("mutateRowsTs");
                 }
          @@ -7178,13 +8456,15 @@ public mutateRowsTs_args getEmptyArgsInstance() {
                   return new mutateRowsTs_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       mutateRowsTs_result result = new mutateRowsTs_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7193,6 +8473,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7212,14 +8493,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7232,12 +8514,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, mutateRowsTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.mutateRowsTs(args.tableName, args.rowBatches, args.timestamp, args.attributes,resultHandler);
          +      public void start(I iface, mutateRowsTs_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.mutateRowsTs(args.tableName, args.rowBatches, args.timestamp, args.attributes,
          +          resultHandler);
                 }
               }
           
          -    public static class atomicIncrement extends org.apache.thrift.AsyncProcessFunction {
          +    public static class atomicIncrement
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public atomicIncrement() {
                   super("atomicIncrement");
                 }
          @@ -7246,15 +8532,17 @@ public atomicIncrement_args getEmptyArgsInstance() {
                   return new atomicIncrement_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Long o) {
                       atomicIncrement_result result = new atomicIncrement_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7263,6 +8551,7 @@ public void onComplete(java.lang.Long o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7282,14 +8571,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7302,12 +8592,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, atomicIncrement_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.atomicIncrement(args.tableName, args.row, args.column, args.value,resultHandler);
          +      public void start(I iface, atomicIncrement_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.atomicIncrement(args.tableName, args.row, args.column, args.value, resultHandler);
                 }
               }
           
          -    public static class deleteAll extends org.apache.thrift.AsyncProcessFunction {
          +    public static class deleteAll
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public deleteAll() {
                   super("deleteAll");
                 }
          @@ -7316,13 +8609,15 @@ public deleteAll_args getEmptyArgsInstance() {
                   return new deleteAll_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       deleteAll_result result = new deleteAll_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7331,6 +8626,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7346,14 +8642,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7366,12 +8663,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, deleteAll_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.deleteAll(args.tableName, args.row, args.column, args.attributes,resultHandler);
          +      public void start(I iface, deleteAll_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.deleteAll(args.tableName, args.row, args.column, args.attributes, resultHandler);
                 }
               }
           
          -    public static class deleteAllTs extends org.apache.thrift.AsyncProcessFunction {
          +    public static class deleteAllTs
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public deleteAllTs() {
                   super("deleteAllTs");
                 }
          @@ -7380,13 +8680,15 @@ public deleteAllTs_args getEmptyArgsInstance() {
                   return new deleteAllTs_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       deleteAllTs_result result = new deleteAllTs_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7395,6 +8697,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7410,14 +8713,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7430,12 +8734,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, deleteAllTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.deleteAllTs(args.tableName, args.row, args.column, args.timestamp, args.attributes,resultHandler);
          +      public void start(I iface, deleteAllTs_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.deleteAllTs(args.tableName, args.row, args.column, args.timestamp, args.attributes,
          +          resultHandler);
                 }
               }
           
          -    public static class deleteAllRow extends org.apache.thrift.AsyncProcessFunction {
          +    public static class deleteAllRow
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public deleteAllRow() {
                   super("deleteAllRow");
                 }
          @@ -7444,13 +8752,15 @@ public deleteAllRow_args getEmptyArgsInstance() {
                   return new deleteAllRow_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       deleteAllRow_result result = new deleteAllRow_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7459,6 +8769,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7474,14 +8785,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7494,12 +8806,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, deleteAllRow_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.deleteAllRow(args.tableName, args.row, args.attributes,resultHandler);
          +      public void start(I iface, deleteAllRow_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.deleteAllRow(args.tableName, args.row, args.attributes, resultHandler);
                 }
               }
           
          -    public static class increment extends org.apache.thrift.AsyncProcessFunction {
          +    public static class increment
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public increment() {
                   super("increment");
                 }
          @@ -7508,13 +8823,15 @@ public increment_args getEmptyArgsInstance() {
                   return new increment_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       increment_result result = new increment_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7523,6 +8840,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7538,14 +8856,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7558,12 +8877,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, increment_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.increment(args.increment,resultHandler);
          +      public void start(I iface, increment_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.increment(args.increment, resultHandler);
                 }
               }
           
          -    public static class incrementRows extends org.apache.thrift.AsyncProcessFunction {
          +    public static class incrementRows
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public incrementRows() {
                   super("incrementRows");
                 }
          @@ -7572,13 +8894,15 @@ public incrementRows_args getEmptyArgsInstance() {
                   return new incrementRows_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       incrementRows_result result = new incrementRows_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7587,6 +8911,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7602,14 +8927,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7622,12 +8948,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, incrementRows_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.incrementRows(args.increments,resultHandler);
          +      public void start(I iface, incrementRows_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.incrementRows(args.increments, resultHandler);
                 }
               }
           
          -    public static class deleteAllRowTs extends org.apache.thrift.AsyncProcessFunction {
          +    public static class deleteAllRowTs
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public deleteAllRowTs() {
                   super("deleteAllRowTs");
                 }
          @@ -7636,13 +8965,15 @@ public deleteAllRowTs_args getEmptyArgsInstance() {
                   return new deleteAllRowTs_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       deleteAllRowTs_result result = new deleteAllRowTs_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7651,6 +8982,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7666,14 +8998,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7686,12 +9019,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, deleteAllRowTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.deleteAllRowTs(args.tableName, args.row, args.timestamp, args.attributes,resultHandler);
          +      public void start(I iface, deleteAllRowTs_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.deleteAllRowTs(args.tableName, args.row, args.timestamp, args.attributes,
          +          resultHandler);
                 }
               }
           
          -    public static class scannerOpenWithScan extends org.apache.thrift.AsyncProcessFunction {
          +    public static class scannerOpenWithScan extends
          +        org.apache.thrift.AsyncProcessFunction {
                 public scannerOpenWithScan() {
                   super("scannerOpenWithScan");
                 }
          @@ -7700,15 +9037,17 @@ public scannerOpenWithScan_args getEmptyArgsInstance() {
                   return new scannerOpenWithScan_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Integer o) {
                       scannerOpenWithScan_result result = new scannerOpenWithScan_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7717,6 +9056,7 @@ public void onComplete(java.lang.Integer o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7732,14 +9072,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7752,12 +9093,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, scannerOpenWithScan_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.scannerOpenWithScan(args.tableName, args.scan, args.attributes,resultHandler);
          +      public void start(I iface, scannerOpenWithScan_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.scannerOpenWithScan(args.tableName, args.scan, args.attributes, resultHandler);
                 }
               }
           
          -    public static class scannerOpen extends org.apache.thrift.AsyncProcessFunction {
          +    public static class scannerOpen
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public scannerOpen() {
                   super("scannerOpen");
                 }
          @@ -7766,15 +9110,17 @@ public scannerOpen_args getEmptyArgsInstance() {
                   return new scannerOpen_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Integer o) {
                       scannerOpen_result result = new scannerOpen_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7783,6 +9129,7 @@ public void onComplete(java.lang.Integer o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7798,14 +9145,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7818,12 +9166,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, scannerOpen_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.scannerOpen(args.tableName, args.startRow, args.columns, args.attributes,resultHandler);
          +      public void start(I iface, scannerOpen_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.scannerOpen(args.tableName, args.startRow, args.columns, args.attributes,
          +          resultHandler);
                 }
               }
           
          -    public static class scannerOpenWithStop extends org.apache.thrift.AsyncProcessFunction {
          +    public static class scannerOpenWithStop extends
          +        org.apache.thrift.AsyncProcessFunction {
                 public scannerOpenWithStop() {
                   super("scannerOpenWithStop");
                 }
          @@ -7832,15 +9184,17 @@ public scannerOpenWithStop_args getEmptyArgsInstance() {
                   return new scannerOpenWithStop_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Integer o) {
                       scannerOpenWithStop_result result = new scannerOpenWithStop_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7849,6 +9203,7 @@ public void onComplete(java.lang.Integer o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7864,14 +9219,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7884,12 +9240,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, scannerOpenWithStop_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.scannerOpenWithStop(args.tableName, args.startRow, args.stopRow, args.columns, args.attributes,resultHandler);
          +      public void start(I iface, scannerOpenWithStop_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.scannerOpenWithStop(args.tableName, args.startRow, args.stopRow, args.columns,
          +          args.attributes, resultHandler);
                 }
               }
           
          -    public static class scannerOpenWithPrefix extends org.apache.thrift.AsyncProcessFunction {
          +    public static class scannerOpenWithPrefix extends
          +        org.apache.thrift.AsyncProcessFunction {
                 public scannerOpenWithPrefix() {
                   super("scannerOpenWithPrefix");
                 }
          @@ -7898,15 +9258,17 @@ public scannerOpenWithPrefix_args getEmptyArgsInstance() {
                   return new scannerOpenWithPrefix_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Integer o) {
                       scannerOpenWithPrefix_result result = new scannerOpenWithPrefix_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7915,6 +9277,7 @@ public void onComplete(java.lang.Integer o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7930,14 +9293,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7950,12 +9314,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, scannerOpenWithPrefix_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.scannerOpenWithPrefix(args.tableName, args.startAndPrefix, args.columns, args.attributes,resultHandler);
          +      public void start(I iface, scannerOpenWithPrefix_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.scannerOpenWithPrefix(args.tableName, args.startAndPrefix, args.columns,
          +          args.attributes, resultHandler);
                 }
               }
           
          -    public static class scannerOpenTs extends org.apache.thrift.AsyncProcessFunction {
          +    public static class scannerOpenTs
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public scannerOpenTs() {
                   super("scannerOpenTs");
                 }
          @@ -7964,15 +9332,17 @@ public scannerOpenTs_args getEmptyArgsInstance() {
                   return new scannerOpenTs_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Integer o) {
                       scannerOpenTs_result result = new scannerOpenTs_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7981,6 +9351,7 @@ public void onComplete(java.lang.Integer o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7996,14 +9367,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8016,12 +9388,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, scannerOpenTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.scannerOpenTs(args.tableName, args.startRow, args.columns, args.timestamp, args.attributes,resultHandler);
          +      public void start(I iface, scannerOpenTs_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.scannerOpenTs(args.tableName, args.startRow, args.columns, args.timestamp,
          +          args.attributes, resultHandler);
                 }
               }
           
          -    public static class scannerOpenWithStopTs extends org.apache.thrift.AsyncProcessFunction {
          +    public static class scannerOpenWithStopTs extends
          +        org.apache.thrift.AsyncProcessFunction {
                 public scannerOpenWithStopTs() {
                   super("scannerOpenWithStopTs");
                 }
          @@ -8030,15 +9406,17 @@ public scannerOpenWithStopTs_args getEmptyArgsInstance() {
                   return new scannerOpenWithStopTs_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Integer o) {
                       scannerOpenWithStopTs_result result = new scannerOpenWithStopTs_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8047,6 +9425,7 @@ public void onComplete(java.lang.Integer o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8062,14 +9441,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8082,12 +9462,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, scannerOpenWithStopTs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.scannerOpenWithStopTs(args.tableName, args.startRow, args.stopRow, args.columns, args.timestamp, args.attributes,resultHandler);
          +      public void start(I iface, scannerOpenWithStopTs_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.scannerOpenWithStopTs(args.tableName, args.startRow, args.stopRow, args.columns,
          +          args.timestamp, args.attributes, resultHandler);
                 }
               }
           
          -    public static class scannerGet extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class scannerGet extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public scannerGet() {
                   super("scannerGet");
                 }
          @@ -8096,14 +9480,17 @@ public scannerGet_args getEmptyArgsInstance() {
                   return new scannerGet_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       scannerGet_result result = new scannerGet_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8112,6 +9499,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8131,14 +9519,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8151,12 +9540,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, scannerGet_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.scannerGet(args.id,resultHandler);
          +      public void start(I iface, scannerGet_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.scannerGet(args.id, resultHandler);
                 }
               }
           
          -    public static class scannerGetList extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class scannerGetList extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public scannerGetList() {
                   super("scannerGetList");
                 }
          @@ -8165,14 +9557,17 @@ public scannerGetList_args getEmptyArgsInstance() {
                   return new scannerGetList_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       scannerGetList_result result = new scannerGetList_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8181,6 +9576,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8200,14 +9596,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8220,12 +9617,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, scannerGetList_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.scannerGetList(args.id, args.nbRows,resultHandler);
          +      public void start(I iface, scannerGetList_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.scannerGetList(args.id, args.nbRows, resultHandler);
                 }
               }
           
          -    public static class scannerClose extends org.apache.thrift.AsyncProcessFunction {
          +    public static class scannerClose
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public scannerClose() {
                   super("scannerClose");
                 }
          @@ -8234,13 +9634,15 @@ public scannerClose_args getEmptyArgsInstance() {
                   return new scannerClose_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       scannerClose_result result = new scannerClose_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8249,6 +9651,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8268,14 +9671,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8288,12 +9692,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, scannerClose_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.scannerClose(args.id,resultHandler);
          +      public void start(I iface, scannerClose_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.scannerClose(args.id, resultHandler);
                 }
               }
           
          -    public static class getRegionInfo extends org.apache.thrift.AsyncProcessFunction {
          +    public static class getRegionInfo
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public getRegionInfo() {
                   super("getRegionInfo");
                 }
          @@ -8302,14 +9709,16 @@ public getRegionInfo_args getEmptyArgsInstance() {
                   return new getRegionInfo_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(TRegionInfo o) {
                       getRegionInfo_result result = new getRegionInfo_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8318,6 +9727,7 @@ public void onComplete(TRegionInfo o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8333,14 +9743,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8353,12 +9764,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getRegionInfo_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.getRegionInfo(args.row,resultHandler);
          +      public void start(I iface, getRegionInfo_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getRegionInfo(args.row, resultHandler);
                 }
               }
           
          -    public static class append extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class append
          +        extends org.apache.thrift.AsyncProcessFunction> {
                 public append() {
                   super("append");
                 }
          @@ -8367,14 +9781,16 @@ public append_args getEmptyArgsInstance() {
                   return new append_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       append_result result = new append_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8383,6 +9799,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8398,14 +9815,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8418,12 +9836,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, append_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.append(args.append,resultHandler);
          +      public void start(I iface, append_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.append(args.append, resultHandler);
                 }
               }
           
          -    public static class checkAndPut extends org.apache.thrift.AsyncProcessFunction {
          +    public static class checkAndPut
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public checkAndPut() {
                   super("checkAndPut");
                 }
          @@ -8432,15 +9853,17 @@ public checkAndPut_args getEmptyArgsInstance() {
                   return new checkAndPut_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       checkAndPut_result result = new checkAndPut_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8449,6 +9872,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8468,14 +9892,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8488,12 +9913,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, checkAndPut_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.checkAndPut(args.tableName, args.row, args.column, args.value, args.mput, args.attributes,resultHandler);
          +      public void start(I iface, checkAndPut_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.checkAndPut(args.tableName, args.row, args.column, args.value, args.mput,
          +          args.attributes, resultHandler);
                 }
               }
           
          -    public static class getThriftServerType extends org.apache.thrift.AsyncProcessFunction {
          +    public static class getThriftServerType extends
          +        org.apache.thrift.AsyncProcessFunction {
                 public getThriftServerType() {
                   super("getThriftServerType");
                 }
          @@ -8502,14 +9931,16 @@ public getThriftServerType_args getEmptyArgsInstance() {
                   return new getThriftServerType_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(TThriftServerType o) {
                       getThriftServerType_result result = new getThriftServerType_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8518,6 +9949,7 @@ public void onComplete(TThriftServerType o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8529,14 +9961,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8549,12 +9982,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getThriftServerType_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +      public void start(I iface, getThriftServerType_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
                   iface.getThriftServerType(resultHandler);
                 }
               }
           
          -    public static class getClusterId extends org.apache.thrift.AsyncProcessFunction {
          +    public static class getClusterId
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public getClusterId() {
                   super("getClusterId");
                 }
          @@ -8563,14 +9999,16 @@ public getClusterId_args getEmptyArgsInstance() {
                   return new getClusterId_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.String o) {
                       getClusterId_result result = new getClusterId_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8579,6 +10017,7 @@ public void onComplete(java.lang.String o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8590,14 +10029,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8610,12 +10050,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getClusterId_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +      public void start(I iface, getClusterId_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
                   iface.getClusterId(resultHandler);
                 }
               }
           
          -    public static class grant extends org.apache.thrift.AsyncProcessFunction {
          +    public static class grant
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public grant() {
                   super("grant");
                 }
          @@ -8624,15 +10067,17 @@ public grant_args getEmptyArgsInstance() {
                   return new grant_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       grant_result result = new grant_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8641,6 +10086,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8656,14 +10102,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8676,12 +10123,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, grant_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.grant(args.info,resultHandler);
          +      public void start(I iface, grant_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.grant(args.info, resultHandler);
                 }
               }
           
          -    public static class revoke extends org.apache.thrift.AsyncProcessFunction {
          +    public static class revoke
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public revoke() {
                   super("revoke");
                 }
          @@ -8690,15 +10140,17 @@ public revoke_args getEmptyArgsInstance() {
                   return new revoke_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       revoke_result result = new revoke_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8707,6 +10159,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8722,14 +10175,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8742,34 +10196,47 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, revoke_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.revoke(args.info,resultHandler);
          +      public void start(I iface, revoke_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.revoke(args.info, resultHandler);
                 }
               }
           
             }
           
          -  public static class enableTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("enableTable_args");
          +  public static class enableTable_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("enableTable_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new enableTable_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new enableTable_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new enableTable_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new enableTable_argsTupleSchemeFactory();
           
               /**
                * name of the table
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of the table
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -8782,7 +10249,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -8791,12 +10258,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -8828,19 +10295,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Bytes")));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Bytes")));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(enableTable_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(enableTable_args.class,
          +        metaDataMap);
               }
           
               public enableTable_args() {
               }
           
          -    public enableTable_args(
          -      java.nio.ByteBuffer tableName)
          -    {
          +    public enableTable_args(java.nio.ByteBuffer tableName) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
               }
          @@ -8879,11 +10349,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of the table
                */
               public enableTable_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public enableTable_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public enableTable_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -8903,19 +10375,20 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
                 }
               }
          @@ -8923,46 +10396,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof enableTable_args)
          -        return this.equals((enableTable_args)that);
          +      if (that instanceof enableTable_args) return this.equals((enableTable_args) that);
                 return false;
               }
           
               public boolean equals(enableTable_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -8973,8 +10444,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -9005,11 +10475,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -9036,35 +10508,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class enableTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class enableTable_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public enableTable_argsStandardScheme getScheme() {
                   return new enableTable_argsStandardScheme();
                 }
               }
           
          -    private static class enableTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class enableTable_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -9072,7 +10549,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -9083,11 +10560,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -9102,17 +10581,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_args s
           
               }
           
          -    private static class enableTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class enableTable_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public enableTable_argsTupleScheme getScheme() {
                   return new enableTable_argsTupleScheme();
                 }
               }
           
          -    private static class enableTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class enableTable_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -9124,8 +10607,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_args st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -9134,26 +10619,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class enableTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("enableTable_result");
          +  public static class enableTable_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("enableTable_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new enableTable_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new enableTable_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new enableTable_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new enableTable_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -9166,7 +10665,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -9175,12 +10674,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -9212,19 +10711,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(enableTable_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(enableTable_result.class,
          +        metaDataMap);
               }
           
               public enableTable_result() {
               }
           
          -    public enableTable_result(
          -      IOError io)
          -    {
          +    public enableTable_result(IOError io) {
                 this();
                 this.io = io;
               }
          @@ -9272,15 +10774,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -9288,46 +10791,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof enableTable_result)
          -        return this.equals((enableTable_result)that);
          +      if (that instanceof enableTable_result) return this.equals((enableTable_result) that);
                 return false;
               }
           
               public boolean equals(enableTable_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -9338,8 +10839,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -9370,13 +10870,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -9401,35 +10903,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class enableTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class enableTable_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public enableTable_resultStandardScheme getScheme() {
                   return new enableTable_resultStandardScheme();
                 }
               }
           
          -    private static class enableTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class enableTable_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -9438,7 +10945,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -9449,11 +10956,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -9468,17 +10977,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_result
           
               }
           
          -    private static class enableTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class enableTable_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public enableTable_resultTupleScheme getScheme() {
                   return new enableTable_resultTupleScheme();
                 }
               }
           
          -    private static class enableTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class enableTable_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -9490,8 +11003,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -9501,32 +11016,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class disableTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("disableTable_args");
          +  public static class disableTable_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("disableTable_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new disableTable_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new disableTable_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new disableTable_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new disableTable_argsTupleSchemeFactory();
           
               /**
                * name of the table
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of the table
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -9539,7 +11068,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -9548,12 +11077,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -9585,19 +11114,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Bytes")));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Bytes")));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_args.class,
          +        metaDataMap);
               }
           
               public disableTable_args() {
               }
           
          -    public disableTable_args(
          -      java.nio.ByteBuffer tableName)
          -    {
          +    public disableTable_args(java.nio.ByteBuffer tableName) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
               }
          @@ -9636,11 +11168,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of the table
                */
               public disableTable_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public disableTable_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public disableTable_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -9660,19 +11194,20 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
                 }
               }
          @@ -9680,46 +11215,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof disableTable_args)
          -        return this.equals((disableTable_args)that);
          +      if (that instanceof disableTable_args) return this.equals((disableTable_args) that);
                 return false;
               }
           
               public boolean equals(disableTable_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -9730,8 +11263,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -9762,11 +11294,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -9793,35 +11327,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class disableTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class disableTable_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public disableTable_argsStandardScheme getScheme() {
                   return new disableTable_argsStandardScheme();
                 }
               }
           
          -    private static class disableTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class disableTable_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -9829,7 +11368,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_args s
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -9840,11 +11379,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_args s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -9859,17 +11400,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_args
           
               }
           
          -    private static class disableTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class disableTable_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public disableTable_argsTupleScheme getScheme() {
                   return new disableTable_argsTupleScheme();
                 }
               }
           
          -    private static class disableTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class disableTable_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -9881,8 +11426,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_args s
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -9891,26 +11438,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_args st
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class disableTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("disableTable_result");
          +  public static class disableTable_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("disableTable_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new disableTable_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new disableTable_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new disableTable_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new disableTable_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -9923,7 +11484,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -9932,12 +11493,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -9969,19 +11530,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_result.class,
          +        metaDataMap);
               }
           
               public disableTable_result() {
               }
           
          -    public disableTable_result(
          -      IOError io)
          -    {
          +    public disableTable_result(IOError io) {
                 this();
                 this.io = io;
               }
          @@ -10029,15 +11593,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -10045,46 +11610,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof disableTable_result)
          -        return this.equals((disableTable_result)that);
          +      if (that instanceof disableTable_result) return this.equals((disableTable_result) that);
                 return false;
               }
           
               public boolean equals(disableTable_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -10095,8 +11658,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -10127,13 +11689,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -10158,35 +11722,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class disableTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class disableTable_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public disableTable_resultStandardScheme getScheme() {
                   return new disableTable_resultStandardScheme();
                 }
               }
           
          -    private static class disableTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class disableTable_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -10195,7 +11764,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -10206,11 +11775,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -10225,17 +11796,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_resul
           
               }
           
          -    private static class disableTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class disableTable_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public disableTable_resultTupleScheme getScheme() {
                   return new disableTable_resultTupleScheme();
                 }
               }
           
          -    private static class disableTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class disableTable_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -10247,8 +11822,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -10258,32 +11835,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class isTableEnabled_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableEnabled_args");
          +  public static class isTableEnabled_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("isTableEnabled_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableEnabled_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableEnabled_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new isTableEnabled_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new isTableEnabled_argsTupleSchemeFactory();
           
               /**
                * name of the table to check
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of the table to check
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -10296,7 +11887,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -10305,12 +11896,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -10342,19 +11933,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Bytes")));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Bytes")));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_args.class,
          +        metaDataMap);
               }
           
               public isTableEnabled_args() {
               }
           
          -    public isTableEnabled_args(
          -      java.nio.ByteBuffer tableName)
          -    {
          +    public isTableEnabled_args(java.nio.ByteBuffer tableName) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
               }
          @@ -10393,11 +11987,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of the table to check
                */
               public isTableEnabled_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public isTableEnabled_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public isTableEnabled_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -10417,19 +12013,20 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
                 }
               }
          @@ -10437,46 +12034,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof isTableEnabled_args)
          -        return this.equals((isTableEnabled_args)that);
          +      if (that instanceof isTableEnabled_args) return this.equals((isTableEnabled_args) that);
                 return false;
               }
           
               public boolean equals(isTableEnabled_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -10487,8 +12082,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -10519,11 +12113,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -10550,35 +12146,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class isTableEnabled_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableEnabled_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableEnabled_argsStandardScheme getScheme() {
                   return new isTableEnabled_argsStandardScheme();
                 }
               }
           
          -    private static class isTableEnabled_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class isTableEnabled_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -10586,7 +12187,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -10597,11 +12198,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -10616,17 +12219,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_arg
           
               }
           
          -    private static class isTableEnabled_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableEnabled_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableEnabled_argsTupleScheme getScheme() {
                   return new isTableEnabled_argsTupleScheme();
                 }
               }
           
          -    private static class isTableEnabled_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class isTableEnabled_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -10638,8 +12245,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -10648,29 +12257,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class isTableEnabled_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableEnabled_result");
          +  public static class isTableEnabled_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("isTableEnabled_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableEnabled_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableEnabled_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new isTableEnabled_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new isTableEnabled_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -10683,7 +12307,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -10694,12 +12318,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -10733,22 +12357,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_result.class,
          +        metaDataMap);
               }
           
               public isTableEnabled_result() {
               }
           
          -    public isTableEnabled_result(
          -      boolean success,
          -      IOError io)
          -    {
          +    public isTableEnabled_result(boolean success, IOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -10788,7 +12417,8 @@ public isTableEnabled_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -10797,7 +12427,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -10825,23 +12456,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -10849,60 +12481,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof isTableEnabled_result)
          -        return this.equals((isTableEnabled_result)that);
          +      if (that instanceof isTableEnabled_result) return this.equals((isTableEnabled_result) that);
                 return false;
               }
           
               public boolean equals(isTableEnabled_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -10915,8 +12543,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -10957,13 +12584,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -10992,37 +12621,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class isTableEnabled_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableEnabled_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableEnabled_resultStandardScheme getScheme() {
                   return new isTableEnabled_resultStandardScheme();
                 }
               }
           
          -    private static class isTableEnabled_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class isTableEnabled_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -11030,7 +12665,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_resu
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -11039,7 +12674,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_resu
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -11050,11 +12685,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_resu
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -11074,17 +12711,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_res
           
               }
           
          -    private static class isTableEnabled_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableEnabled_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableEnabled_resultTupleScheme getScheme() {
                   return new isTableEnabled_resultTupleScheme();
                 }
               }
           
          -    private static class isTableEnabled_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class isTableEnabled_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -11102,8 +12743,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_resu
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -11117,26 +12760,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_resul
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class compact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_args");
          +  public static class compact_args
          +      implements org.apache.thrift.TBase, java.io.Serializable,
          +      Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("compact_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_OR_REGION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableNameOrRegionName", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_OR_REGION_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableNameOrRegionName",
          +            org.apache.thrift.protocol.TType.STRING, (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new compact_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new compact_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new compact_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new compact_argsTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      TABLE_NAME_OR_REGION_NAME((short)1, "tableNameOrRegionName");
          +      TABLE_NAME_OR_REGION_NAME((short) 1, "tableNameOrRegionName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -11149,7 +12806,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME_OR_REGION_NAME
                       return TABLE_NAME_OR_REGION_NAME;
                     default:
          @@ -11158,12 +12815,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -11195,19 +12852,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME_OR_REGION_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableNameOrRegionName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Bytes")));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME_OR_REGION_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableNameOrRegionName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Bytes")));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_args.class,
          +        metaDataMap);
               }
           
               public compact_args() {
               }
           
          -    public compact_args(
          -      java.nio.ByteBuffer tableNameOrRegionName)
          -    {
          +    public compact_args(java.nio.ByteBuffer tableNameOrRegionName) {
                 this();
                 this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName);
               }
          @@ -11217,7 +12877,8 @@ public compact_args(
                */
               public compact_args(compact_args other) {
                 if (other.isSetTableNameOrRegionName()) {
          -        this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(other.tableNameOrRegionName);
          +        this.tableNameOrRegionName =
          +            org.apache.thrift.TBaseHelper.copyBinary(other.tableNameOrRegionName);
                 }
               }
           
          @@ -11240,11 +12901,13 @@ public java.nio.ByteBuffer bufferForTableNameOrRegionName() {
               }
           
               public compact_args setTableNameOrRegionName(byte[] tableNameOrRegionName) {
          -      this.tableNameOrRegionName = tableNameOrRegionName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableNameOrRegionName.clone());
          +      this.tableNameOrRegionName = tableNameOrRegionName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableNameOrRegionName.clone());
                 return this;
               }
           
          -    public compact_args setTableNameOrRegionName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName) {
          +    public compact_args setTableNameOrRegionName(
          +        @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName) {
                 this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName);
                 return this;
               }
          @@ -11253,7 +12916,10 @@ public void unsetTableNameOrRegionName() {
                 this.tableNameOrRegionName = null;
               }
           
          -    /** Returns true if field tableNameOrRegionName is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field tableNameOrRegionName is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSetTableNameOrRegionName() {
                 return this.tableNameOrRegionName != null;
               }
          @@ -11264,19 +12930,20 @@ public void setTableNameOrRegionNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME_OR_REGION_NAME:
          -        if (value == null) {
          -          unsetTableNameOrRegionName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableNameOrRegionName((byte[])value);
          +        case TABLE_NAME_OR_REGION_NAME:
          +          if (value == null) {
          +            unsetTableNameOrRegionName();
                     } else {
          -            setTableNameOrRegionName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableNameOrRegionName((byte[]) value);
          +            } else {
          +              setTableNameOrRegionName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
                 }
               }
          @@ -11284,46 +12951,45 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME_OR_REGION_NAME:
          -        return getTableNameOrRegionName();
          +        case TABLE_NAME_OR_REGION_NAME:
          +          return getTableNameOrRegionName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME_OR_REGION_NAME:
          -        return isSetTableNameOrRegionName();
          +        case TABLE_NAME_OR_REGION_NAME:
          +          return isSetTableNameOrRegionName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof compact_args)
          -        return this.equals((compact_args)that);
          +      if (that instanceof compact_args) return this.equals((compact_args) that);
                 return false;
               }
           
               public boolean equals(compact_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableNameOrRegionName = true && this.isSetTableNameOrRegionName();
                 boolean that_present_tableNameOrRegionName = true && that.isSetTableNameOrRegionName();
                 if (this_present_tableNameOrRegionName || that_present_tableNameOrRegionName) {
                   if (!(this_present_tableNameOrRegionName && that_present_tableNameOrRegionName))
                     return false;
          -        if (!this.tableNameOrRegionName.equals(that.tableNameOrRegionName))
          -          return false;
          +        if (!this.tableNameOrRegionName.equals(that.tableNameOrRegionName)) return false;
                 }
           
                 return true;
          @@ -11348,12 +13014,14 @@ public int compareTo(compact_args other) {
           
                 int lastComparison = 0;
           
          -      lastComparison = java.lang.Boolean.compare(isSetTableNameOrRegionName(), other.isSetTableNameOrRegionName());
          +      lastComparison = java.lang.Boolean.compare(isSetTableNameOrRegionName(),
          +        other.isSetTableNameOrRegionName());
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
                 if (isSetTableNameOrRegionName()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableNameOrRegionName, other.tableNameOrRegionName);
          +        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableNameOrRegionName,
          +          other.tableNameOrRegionName);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -11366,11 +13034,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -11397,35 +13067,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class compact_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class compact_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public compact_argsStandardScheme getScheme() {
                   return new compact_argsStandardScheme();
                 }
               }
           
          -    private static class compact_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class compact_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -11433,7 +13108,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableNameOrRegionName = iprot.readBinary();
                           struct.setTableNameOrRegionNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -11444,11 +13119,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -11463,17 +13140,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struc
           
               }
           
          -    private static class compact_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class compact_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public compact_argsTupleScheme getScheme() {
                   return new compact_argsTupleScheme();
                 }
               }
           
          -    private static class compact_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class compact_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableNameOrRegionName()) {
                     optionals.set(0);
          @@ -11485,8 +13166,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.tableNameOrRegionName = iprot.readBinary();
          @@ -11495,26 +13178,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct)
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_result");
          +  public static class compact_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("compact_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new compact_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new compact_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new compact_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new compact_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -11527,7 +13224,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -11536,12 +13233,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -11573,19 +13270,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_result.class,
          +        metaDataMap);
               }
           
               public compact_result() {
               }
           
          -    public compact_result(
          -      IOError io)
          -    {
          +    public compact_result(IOError io) {
                 this();
                 this.io = io;
               }
          @@ -11633,15 +13333,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -11649,46 +13350,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof compact_result)
          -        return this.equals((compact_result)that);
          +      if (that instanceof compact_result) return this.equals((compact_result) that);
                 return false;
               }
           
               public boolean equals(compact_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -11699,8 +13398,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -11731,13 +13429,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -11762,35 +13462,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class compact_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class compact_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public compact_resultStandardScheme getScheme() {
                   return new compact_resultStandardScheme();
                 }
               }
           
          -    private static class compact_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class compact_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -11799,7 +13504,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result stru
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -11810,11 +13515,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result stru
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -11829,17 +13536,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result str
           
               }
           
          -    private static class compact_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class compact_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public compact_resultTupleScheme getScheme() {
                   return new compact_resultTupleScheme();
                 }
               }
           
          -    private static class compact_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class compact_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, compact_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -11851,8 +13562,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, compact_result stru
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -11862,26 +13575,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struc
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class majorCompact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("majorCompact_args");
          +  public static class majorCompact_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("majorCompact_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_OR_REGION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableNameOrRegionName", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_OR_REGION_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableNameOrRegionName",
          +            org.apache.thrift.protocol.TType.STRING, (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new majorCompact_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new majorCompact_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new majorCompact_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new majorCompact_argsTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      TABLE_NAME_OR_REGION_NAME((short)1, "tableNameOrRegionName");
          +      TABLE_NAME_OR_REGION_NAME((short) 1, "tableNameOrRegionName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -11894,7 +13621,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME_OR_REGION_NAME
                       return TABLE_NAME_OR_REGION_NAME;
                     default:
          @@ -11903,12 +13630,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -11940,19 +13667,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME_OR_REGION_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableNameOrRegionName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Bytes")));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME_OR_REGION_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableNameOrRegionName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Bytes")));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(majorCompact_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(majorCompact_args.class,
          +        metaDataMap);
               }
           
               public majorCompact_args() {
               }
           
          -    public majorCompact_args(
          -      java.nio.ByteBuffer tableNameOrRegionName)
          -    {
          +    public majorCompact_args(java.nio.ByteBuffer tableNameOrRegionName) {
                 this();
                 this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName);
               }
          @@ -11962,7 +13692,8 @@ public majorCompact_args(
                */
               public majorCompact_args(majorCompact_args other) {
                 if (other.isSetTableNameOrRegionName()) {
          -        this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(other.tableNameOrRegionName);
          +        this.tableNameOrRegionName =
          +            org.apache.thrift.TBaseHelper.copyBinary(other.tableNameOrRegionName);
                 }
               }
           
          @@ -11985,11 +13716,13 @@ public java.nio.ByteBuffer bufferForTableNameOrRegionName() {
               }
           
               public majorCompact_args setTableNameOrRegionName(byte[] tableNameOrRegionName) {
          -      this.tableNameOrRegionName = tableNameOrRegionName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableNameOrRegionName.clone());
          +      this.tableNameOrRegionName = tableNameOrRegionName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableNameOrRegionName.clone());
                 return this;
               }
           
          -    public majorCompact_args setTableNameOrRegionName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName) {
          +    public majorCompact_args setTableNameOrRegionName(
          +        @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableNameOrRegionName) {
                 this.tableNameOrRegionName = org.apache.thrift.TBaseHelper.copyBinary(tableNameOrRegionName);
                 return this;
               }
          @@ -11998,7 +13731,10 @@ public void unsetTableNameOrRegionName() {
                 this.tableNameOrRegionName = null;
               }
           
          -    /** Returns true if field tableNameOrRegionName is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field tableNameOrRegionName is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSetTableNameOrRegionName() {
                 return this.tableNameOrRegionName != null;
               }
          @@ -12009,19 +13745,20 @@ public void setTableNameOrRegionNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME_OR_REGION_NAME:
          -        if (value == null) {
          -          unsetTableNameOrRegionName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableNameOrRegionName((byte[])value);
          +        case TABLE_NAME_OR_REGION_NAME:
          +          if (value == null) {
          +            unsetTableNameOrRegionName();
                     } else {
          -            setTableNameOrRegionName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableNameOrRegionName((byte[]) value);
          +            } else {
          +              setTableNameOrRegionName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
                 }
               }
          @@ -12029,46 +13766,45 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME_OR_REGION_NAME:
          -        return getTableNameOrRegionName();
          +        case TABLE_NAME_OR_REGION_NAME:
          +          return getTableNameOrRegionName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME_OR_REGION_NAME:
          -        return isSetTableNameOrRegionName();
          +        case TABLE_NAME_OR_REGION_NAME:
          +          return isSetTableNameOrRegionName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof majorCompact_args)
          -        return this.equals((majorCompact_args)that);
          +      if (that instanceof majorCompact_args) return this.equals((majorCompact_args) that);
                 return false;
               }
           
               public boolean equals(majorCompact_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableNameOrRegionName = true && this.isSetTableNameOrRegionName();
                 boolean that_present_tableNameOrRegionName = true && that.isSetTableNameOrRegionName();
                 if (this_present_tableNameOrRegionName || that_present_tableNameOrRegionName) {
                   if (!(this_present_tableNameOrRegionName && that_present_tableNameOrRegionName))
                     return false;
          -        if (!this.tableNameOrRegionName.equals(that.tableNameOrRegionName))
          -          return false;
          +        if (!this.tableNameOrRegionName.equals(that.tableNameOrRegionName)) return false;
                 }
           
                 return true;
          @@ -12093,12 +13829,14 @@ public int compareTo(majorCompact_args other) {
           
                 int lastComparison = 0;
           
          -      lastComparison = java.lang.Boolean.compare(isSetTableNameOrRegionName(), other.isSetTableNameOrRegionName());
          +      lastComparison = java.lang.Boolean.compare(isSetTableNameOrRegionName(),
          +        other.isSetTableNameOrRegionName());
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
                 if (isSetTableNameOrRegionName()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableNameOrRegionName, other.tableNameOrRegionName);
          +        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableNameOrRegionName,
          +          other.tableNameOrRegionName);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -12111,11 +13849,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -12142,35 +13882,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class majorCompact_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class majorCompact_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public majorCompact_argsStandardScheme getScheme() {
                   return new majorCompact_argsStandardScheme();
                 }
               }
           
          -    private static class majorCompact_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class majorCompact_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -12178,7 +13923,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_args s
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableNameOrRegionName = iprot.readBinary();
                           struct.setTableNameOrRegionNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -12189,11 +13934,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_args s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, majorCompact_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, majorCompact_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -12208,17 +13955,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, majorCompact_args
           
               }
           
          -    private static class majorCompact_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class majorCompact_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public majorCompact_argsTupleScheme getScheme() {
                   return new majorCompact_argsTupleScheme();
                 }
               }
           
          -    private static class majorCompact_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class majorCompact_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, majorCompact_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, majorCompact_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableNameOrRegionName()) {
                     optionals.set(0);
          @@ -12230,8 +13981,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, majorCompact_args s
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, majorCompact_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, majorCompact_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.tableNameOrRegionName = iprot.readBinary();
          @@ -12240,26 +13993,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, majorCompact_args st
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class majorCompact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("majorCompact_result");
          +  public static class majorCompact_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("majorCompact_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new majorCompact_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new majorCompact_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new majorCompact_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new majorCompact_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -12272,7 +14039,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -12281,12 +14048,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -12318,19 +14085,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(majorCompact_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(majorCompact_result.class,
          +        metaDataMap);
               }
           
               public majorCompact_result() {
               }
           
          -    public majorCompact_result(
          -      IOError io)
          -    {
          +    public majorCompact_result(IOError io) {
                 this();
                 this.io = io;
               }
          @@ -12378,15 +14148,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -12394,46 +14165,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof majorCompact_result)
          -        return this.equals((majorCompact_result)that);
          +      if (that instanceof majorCompact_result) return this.equals((majorCompact_result) that);
                 return false;
               }
           
               public boolean equals(majorCompact_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -12444,8 +14213,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -12476,13 +14244,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -12507,35 +14277,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class majorCompact_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class majorCompact_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public majorCompact_resultStandardScheme getScheme() {
                   return new majorCompact_resultStandardScheme();
                 }
               }
           
          -    private static class majorCompact_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class majorCompact_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -12544,7 +14319,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -12555,11 +14330,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, majorCompact_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, majorCompact_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, majorCompact_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -12574,17 +14351,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, majorCompact_resul
           
               }
           
          -    private static class majorCompact_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class majorCompact_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public majorCompact_resultTupleScheme getScheme() {
                   return new majorCompact_resultTupleScheme();
                 }
               }
           
          -    private static class majorCompact_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class majorCompact_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, majorCompact_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, majorCompact_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -12596,8 +14377,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, majorCompact_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, majorCompact_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, majorCompact_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -12607,24 +14390,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, majorCompact_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableNames_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNames_args");
          -
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNames_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNames_argsTupleSchemeFactory();
          +  public static class getTableNames_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableNames_args");
           
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableNames_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableNames_argsTupleSchemeFactory();
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -;
          +      ;
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -12637,19 +14430,19 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     default:
                       return null;
                   }
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -12677,11 +14470,14 @@ public java.lang.String getFieldName() {
                   return _fieldName;
                 }
               }
          +
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNames_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNames_args.class,
          +        metaDataMap);
               }
           
               public getTableNames_args() {
          @@ -12701,7 +14497,8 @@ public getTableNames_args deepCopy() {
               public void clear() {
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
                 }
               }
          @@ -12713,7 +14510,10 @@ public java.lang.Object getFieldValue(_Fields field) {
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
          @@ -12726,16 +14526,13 @@ public boolean isSet(_Fields field) {
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getTableNames_args)
          -        return this.equals((getTableNames_args)that);
          +      if (that instanceof getTableNames_args) return this.equals((getTableNames_args) that);
                 return false;
               }
           
               public boolean equals(getTableNames_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 return true;
               }
          @@ -12763,11 +14560,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -12787,35 +14586,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableNames_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNames_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNames_argsStandardScheme getScheme() {
                   return new getTableNames_argsStandardScheme();
                 }
               }
           
          -    private static class getTableNames_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableNames_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -12826,11 +14630,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -12840,48 +14646,69 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_args
           
               }
           
          -    private static class getTableNames_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNames_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNames_argsTupleScheme getScheme() {
                   return new getTableNames_argsTupleScheme();
                 }
               }
           
          -    private static class getTableNames_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableNames_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableNames_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getTableNames_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableNames_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNames_result");
          +  public static class getTableNames_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableNames_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNames_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNames_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableNames_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableNames_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -12894,7 +14721,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -12905,12 +14732,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -12942,23 +14769,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNames_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNames_result.class,
          +        metaDataMap);
               }
           
               public getTableNames_result() {
               }
           
          -    public getTableNames_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getTableNames_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -12969,7 +14801,8 @@ public getTableNames_result(
                */
               public getTableNames_result(getTableNames_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (java.nio.ByteBuffer other_element : other.success) {
                     __this__success.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
          @@ -13011,7 +14844,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getTableNames_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getTableNames_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -13056,23 +14890,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -13080,60 +14915,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getTableNames_result)
          -        return this.equals((getTableNames_result)that);
          +      if (that instanceof getTableNames_result) return this.equals((getTableNames_result) that);
                 return false;
               }
           
               public boolean equals(getTableNames_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -13144,12 +14975,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -13190,13 +15019,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -13229,35 +15060,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableNames_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNames_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNames_resultStandardScheme getScheme() {
                   return new getTableNames_resultStandardScheme();
                 }
               }
           
          -    private static class getTableNames_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableNames_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -13266,16 +15102,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_resul
                           {
                             org.apache.thrift.protocol.TList _list50 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list50.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem51;
          -                  for (int _i52 = 0; _i52 < _list50.size; ++_i52)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem51;
          +                  for (int _i52 = 0; _i52 < _list50.size; ++_i52) {
                               _elem51 = iprot.readBinary();
                               struct.success.add(_elem51);
                             }
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -13284,7 +15120,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_resul
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -13295,20 +15131,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNames_resul
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
          -            for (java.nio.ByteBuffer _iter53 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.success.size()));
          +            for (java.nio.ByteBuffer _iter53 : struct.success) {
                         oprot.writeBinary(_iter53);
                       }
                       oprot.writeListEnd();
          @@ -13326,17 +15164,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNames_resu
           
               }
           
          -    private static class getTableNames_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNames_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNames_resultTupleScheme getScheme() {
                   return new getTableNames_resultTupleScheme();
                 }
               }
           
          -    private static class getTableNames_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableNames_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -13348,8 +15190,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_resul
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (java.nio.ByteBuffer _iter54 : struct.success)
          -            {
          +            for (java.nio.ByteBuffer _iter54 : struct.success) {
                         oprot.writeBinary(_iter54);
                       }
                     }
          @@ -13360,16 +15201,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableNames_resul
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableNames_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getTableNames_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list55 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list55 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.success = new java.util.ArrayList(_list55.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem56;
          -            for (int _i57 = 0; _i57 < _list55.size; ++_i57)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem56;
          +            for (int _i57 = 0; _i57 < _list55.size; ++_i57) {
                         _elem56 = iprot.readBinary();
                         struct.success.add(_elem56);
                       }
          @@ -13384,24 +15228,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableNames_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableNamesWithIsTableEnabled_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNamesWithIsTableEnabled_args");
          -
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNamesWithIsTableEnabled_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNamesWithIsTableEnabled_argsTupleSchemeFactory();
          +  public static class getTableNamesWithIsTableEnabled_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableNamesWithIsTableEnabled_args");
           
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableNamesWithIsTableEnabled_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableNamesWithIsTableEnabled_argsTupleSchemeFactory();
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -;
          +      ;
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -13414,19 +15268,19 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     default:
                       return null;
                   }
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -13454,11 +15308,14 @@ public java.lang.String getFieldName() {
                   return _fieldName;
                 }
               }
          +
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNamesWithIsTableEnabled_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableNamesWithIsTableEnabled_args.class, metaDataMap);
               }
           
               public getTableNamesWithIsTableEnabled_args() {
          @@ -13478,7 +15335,8 @@ public getTableNamesWithIsTableEnabled_args deepCopy() {
               public void clear() {
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
                 }
               }
          @@ -13490,7 +15348,10 @@ public java.lang.Object getFieldValue(_Fields field) {
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
          @@ -13504,15 +15365,13 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableNamesWithIsTableEnabled_args)
          -        return this.equals((getTableNamesWithIsTableEnabled_args)that);
          +        return this.equals((getTableNamesWithIsTableEnabled_args) that);
                 return false;
               }
           
               public boolean equals(getTableNamesWithIsTableEnabled_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 return true;
               }
          @@ -13540,17 +15399,20 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
               @Override
               public java.lang.String toString() {
          -      java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableNamesWithIsTableEnabled_args(");
          +      java.lang.StringBuilder sb =
          +          new java.lang.StringBuilder("getTableNamesWithIsTableEnabled_args(");
                 boolean first = true;
           
                 sb.append(")");
          @@ -13564,35 +15426,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableNamesWithIsTableEnabled_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesWithIsTableEnabled_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesWithIsTableEnabled_argsStandardScheme getScheme() {
                   return new getTableNamesWithIsTableEnabled_argsStandardScheme();
                 }
               }
           
          -    private static class getTableNamesWithIsTableEnabled_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableNamesWithIsTableEnabled_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -13603,11 +15470,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesWithIs
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -13617,48 +15486,69 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesWithI
           
               }
           
          -    private static class getTableNamesWithIsTableEnabled_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesWithIsTableEnabled_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesWithIsTableEnabled_argsTupleScheme getScheme() {
                   return new getTableNamesWithIsTableEnabled_argsTupleScheme();
                 }
               }
           
          -    private static class getTableNamesWithIsTableEnabled_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableNamesWithIsTableEnabled_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesWithIsTableEnabled_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableNamesWithIsTableEnabled_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNamesWithIsTableEnabled_result");
          -
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNamesWithIsTableEnabled_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNamesWithIsTableEnabled_resultTupleSchemeFactory();
          -
          -    public @org.apache.thrift.annotation.Nullable java.util.Map success; // required
          +  public static class getTableNamesWithIsTableEnabled_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableNamesWithIsTableEnabled_result");
          +
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableNamesWithIsTableEnabled_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableNamesWithIsTableEnabled_resultTupleSchemeFactory();
          +
          +    public @org.apache.thrift.annotation.Nullable java.util.Map success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -13671,7 +15561,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -13682,12 +15572,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -13719,24 +15609,31 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.BOOL))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNamesWithIsTableEnabled_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableNamesWithIsTableEnabled_result.class, metaDataMap);
               }
           
               public getTableNamesWithIsTableEnabled_result() {
               }
           
               public getTableNamesWithIsTableEnabled_result(
          -      java.util.Map success,
          -      IOError io)
          -    {
          +        java.util.Map success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -13747,13 +15644,16 @@ public getTableNamesWithIsTableEnabled_result(
                */
               public getTableNamesWithIsTableEnabled_result(getTableNamesWithIsTableEnabled_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.Map __this__success = new java.util.HashMap(other.success.size());
          -        for (java.util.Map.Entry other_element : other.success.entrySet()) {
          +        java.util.Map __this__success =
          +            new java.util.HashMap(other.success.size());
          +        for (java.util.Map.Entry other_element : other.success
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.lang.Boolean other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__success_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__success_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
                     java.lang.Boolean __this__success_copy_value = other_element_value;
           
          @@ -13782,17 +15682,18 @@ public int getSuccessSize() {
           
               public void putToSuccess(java.nio.ByteBuffer key, boolean val) {
                 if (this.success == null) {
          -        this.success = new java.util.HashMap();
          +        this.success = new java.util.HashMap();
                 }
                 this.success.put(key, val);
               }
           
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getSuccess() {
          +    public java.util.Map getSuccess() {
                 return this.success;
               }
           
          -    public getTableNamesWithIsTableEnabled_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.Map success) {
          +    public getTableNamesWithIsTableEnabled_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.Map success) {
                 this.success = success;
                 return this;
               }
          @@ -13817,7 +15718,8 @@ public IOError getIo() {
                 return this.io;
               }
           
          -    public getTableNamesWithIsTableEnabled_result setIo(@org.apache.thrift.annotation.Nullable IOError io) {
          +    public getTableNamesWithIsTableEnabled_result
          +        setIo(@org.apache.thrift.annotation.Nullable IOError io) {
                 this.io = io;
                 return this;
               }
          @@ -13837,23 +15739,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.Map)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.Map) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -13861,27 +15764,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -13889,32 +15795,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableNamesWithIsTableEnabled_result)
          -        return this.equals((getTableNamesWithIsTableEnabled_result)that);
          +        return this.equals((getTableNamesWithIsTableEnabled_result) that);
                 return false;
               }
           
               public boolean equals(getTableNamesWithIsTableEnabled_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -13925,12 +15825,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -13971,17 +15869,20 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          -      java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableNamesWithIsTableEnabled_result(");
          +      java.lang.StringBuilder sb =
          +          new java.lang.StringBuilder("getTableNamesWithIsTableEnabled_result(");
                 boolean first = true;
           
                 sb.append("success:");
          @@ -14010,35 +15911,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableNamesWithIsTableEnabled_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesWithIsTableEnabled_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesWithIsTableEnabled_resultStandardScheme getScheme() {
                   return new getTableNamesWithIsTableEnabled_resultStandardScheme();
                 }
               }
           
          -    private static class getTableNamesWithIsTableEnabled_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableNamesWithIsTableEnabled_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -14046,11 +15952,12 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesWithIs
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map58 = iprot.readMapBegin();
          -                  struct.success = new java.util.HashMap(2*_map58.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key59;
          +                  struct.success = new java.util.HashMap(
          +                      2 * _map58.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key59;
                             boolean _val60;
          -                  for (int _i61 = 0; _i61 < _map58.size; ++_i61)
          -                  {
          +                  for (int _i61 = 0; _i61 < _map58.size; ++_i61) {
                               _key59 = iprot.readBinary();
                               _val60 = iprot.readBool();
                               struct.success.put(_key59, _val60);
          @@ -14058,7 +15965,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesWithIs
                             iprot.readMapEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -14067,7 +15974,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesWithIs
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -14078,20 +15985,24 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesWithIs
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.BOOL, struct.success.size()));
          -            for (java.util.Map.Entry _iter62 : struct.success.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.BOOL, struct.success.size()));
          +            for (java.util.Map.Entry _iter62 : struct.success
          +                .entrySet()) {
                         oprot.writeBinary(_iter62.getKey());
                         oprot.writeBool(_iter62.getValue());
                       }
          @@ -14110,17 +16021,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesWithI
           
               }
           
          -    private static class getTableNamesWithIsTableEnabled_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesWithIsTableEnabled_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesWithIsTableEnabled_resultTupleScheme getScheme() {
                   return new getTableNamesWithIsTableEnabled_resultTupleScheme();
                 }
               }
           
          -    private static class getTableNamesWithIsTableEnabled_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableNamesWithIsTableEnabled_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -14132,8 +16047,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesWithIs
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (java.util.Map.Entry _iter63 : struct.success.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter63 : struct.success
          +                .entrySet()) {
                         oprot.writeBinary(_iter63.getKey());
                         oprot.writeBool(_iter63.getValue());
                       }
          @@ -14145,17 +16060,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesWithIs
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesWithIsTableEnabled_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TMap _map64 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.BOOL); 
          -            struct.success = new java.util.HashMap(2*_map64.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key65;
          +            org.apache.thrift.protocol.TMap _map64 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.BOOL);
          +            struct.success =
          +                new java.util.HashMap(2 * _map64.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key65;
                       boolean _val66;
          -            for (int _i67 = 0; _i67 < _map64.size; ++_i67)
          -            {
          +            for (int _i67 = 0; _i67 < _map64.size; ++_i67) {
                         _key65 = iprot.readBinary();
                         _val66 = iprot.readBool();
                         struct.success.put(_key65, _val66);
          @@ -14171,32 +16090,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesWithIsT
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getColumnDescriptors_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getColumnDescriptors_args");
          +  public static class getColumnDescriptors_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getColumnDescriptors_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getColumnDescriptors_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getColumnDescriptors_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getColumnDescriptors_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getColumnDescriptors_argsTupleSchemeFactory();
           
               /**
                * table name
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * table name
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -14209,7 +16142,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -14218,12 +16151,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -14255,19 +16188,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getColumnDescriptors_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getColumnDescriptors_args.class, metaDataMap);
               }
           
               public getColumnDescriptors_args() {
               }
           
          -    public getColumnDescriptors_args(
          -      java.nio.ByteBuffer tableName)
          -    {
          +    public getColumnDescriptors_args(java.nio.ByteBuffer tableName) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
               }
          @@ -14306,11 +16242,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * table name
                */
               public getColumnDescriptors_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getColumnDescriptors_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getColumnDescriptors_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -14330,19 +16268,20 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
                 }
               }
          @@ -14350,22 +16289,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -14373,23 +16315,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getColumnDescriptors_args)
          -        return this.equals((getColumnDescriptors_args)that);
          +        return this.equals((getColumnDescriptors_args) that);
                 return false;
               }
           
               public boolean equals(getColumnDescriptors_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -14400,8 +16338,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -14432,11 +16369,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -14463,35 +16402,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getColumnDescriptors_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getColumnDescriptors_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getColumnDescriptors_argsStandardScheme getScheme() {
                   return new getColumnDescriptors_argsStandardScheme();
                 }
               }
           
          -    private static class getColumnDescriptors_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getColumnDescriptors_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getColumnDescriptors_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getColumnDescriptors_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -14499,7 +16443,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getColumnDescriptor
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -14510,11 +16454,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getColumnDescriptor
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getColumnDescriptors_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getColumnDescriptors_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -14529,17 +16475,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getColumnDescripto
           
               }
           
          -    private static class getColumnDescriptors_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getColumnDescriptors_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getColumnDescriptors_argsTupleScheme getScheme() {
                   return new getColumnDescriptors_argsTupleScheme();
                 }
               }
           
          -    private static class getColumnDescriptors_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getColumnDescriptors_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptors_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptors_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -14551,8 +16501,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptor
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptors_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptors_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -14561,29 +16513,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptors
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getColumnDescriptors_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getColumnDescriptors_result");
          -
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getColumnDescriptors_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getColumnDescriptors_resultTupleSchemeFactory();
          -
          -    public @org.apache.thrift.annotation.Nullable java.util.Map success; // required
          +  public static class getColumnDescriptors_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getColumnDescriptors_result");
          +
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.MAP,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getColumnDescriptors_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getColumnDescriptors_resultTupleSchemeFactory();
          +
          +    public @org.apache.thrift.annotation.Nullable java.util.Map success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -14596,7 +16563,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -14607,12 +16574,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -14644,24 +16611,31 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnDescriptor.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, ColumnDescriptor.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getColumnDescriptors_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getColumnDescriptors_result.class, metaDataMap);
               }
           
               public getColumnDescriptors_result() {
               }
           
          -    public getColumnDescriptors_result(
          -      java.util.Map success,
          -      IOError io)
          -    {
          +    public getColumnDescriptors_result(java.util.Map success,
          +        IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -14672,13 +16646,16 @@ public getColumnDescriptors_result(
                */
               public getColumnDescriptors_result(getColumnDescriptors_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.Map __this__success = new java.util.HashMap(other.success.size());
          -        for (java.util.Map.Entry other_element : other.success.entrySet()) {
          +        java.util.Map __this__success =
          +            new java.util.HashMap(other.success.size());
          +        for (java.util.Map.Entry other_element : other.success
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     ColumnDescriptor other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__success_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__success_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
                     ColumnDescriptor __this__success_copy_value = new ColumnDescriptor(other_element_value);
           
          @@ -14707,17 +16684,18 @@ public int getSuccessSize() {
           
               public void putToSuccess(java.nio.ByteBuffer key, ColumnDescriptor val) {
                 if (this.success == null) {
          -        this.success = new java.util.HashMap();
          +        this.success = new java.util.HashMap();
                 }
                 this.success.put(key, val);
               }
           
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getSuccess() {
          +    public java.util.Map getSuccess() {
                 return this.success;
               }
           
          -    public getColumnDescriptors_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.Map success) {
          +    public getColumnDescriptors_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.Map success) {
                 this.success = success;
                 return this;
               }
          @@ -14762,23 +16740,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.Map)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.Map) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -14786,27 +16765,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -14814,32 +16796,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getColumnDescriptors_result)
          -        return this.equals((getColumnDescriptors_result)that);
          +        return this.equals((getColumnDescriptors_result) that);
                 return false;
               }
           
               public boolean equals(getColumnDescriptors_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -14850,12 +16826,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -14896,13 +16870,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -14935,35 +16911,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getColumnDescriptors_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getColumnDescriptors_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getColumnDescriptors_resultStandardScheme getScheme() {
                   return new getColumnDescriptors_resultStandardScheme();
                 }
               }
           
          -    private static class getColumnDescriptors_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getColumnDescriptors_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getColumnDescriptors_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getColumnDescriptors_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -14971,11 +16952,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getColumnDescriptor
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map68 = iprot.readMapBegin();
          -                  struct.success = new java.util.HashMap(2*_map68.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key69;
          -                  @org.apache.thrift.annotation.Nullable ColumnDescriptor _val70;
          -                  for (int _i71 = 0; _i71 < _map68.size; ++_i71)
          -                  {
          +                  struct.success =
          +                      new java.util.HashMap(2 * _map68.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key69;
          +                  @org.apache.thrift.annotation.Nullable
          +                  ColumnDescriptor _val70;
          +                  for (int _i71 = 0; _i71 < _map68.size; ++_i71) {
                               _key69 = iprot.readBinary();
                               _val70 = new ColumnDescriptor();
                               _val70.read(iprot);
          @@ -14984,7 +16967,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getColumnDescriptor
                             iprot.readMapEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -14993,7 +16976,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getColumnDescriptor
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -15004,20 +16987,24 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getColumnDescriptor
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getColumnDescriptors_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getColumnDescriptors_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (java.util.Map.Entry _iter72 : struct.success.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (java.util.Map.Entry _iter72 : struct.success
          +                .entrySet()) {
                         oprot.writeBinary(_iter72.getKey());
                         _iter72.getValue().write(oprot);
                       }
          @@ -15036,17 +17023,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getColumnDescripto
           
               }
           
          -    private static class getColumnDescriptors_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getColumnDescriptors_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getColumnDescriptors_resultTupleScheme getScheme() {
                   return new getColumnDescriptors_resultTupleScheme();
                 }
               }
           
          -    private static class getColumnDescriptors_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getColumnDescriptors_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptors_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getColumnDescriptors_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -15058,8 +17049,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptor
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (java.util.Map.Entry _iter73 : struct.success.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter73 : struct.success
          +                .entrySet()) {
                         oprot.writeBinary(_iter73.getKey());
                         _iter73.getValue().write(oprot);
                       }
          @@ -15071,17 +17062,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptor
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptors_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getColumnDescriptors_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TMap _map74 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT); 
          -            struct.success = new java.util.HashMap(2*_map74.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key75;
          -            @org.apache.thrift.annotation.Nullable ColumnDescriptor _val76;
          -            for (int _i77 = 0; _i77 < _map74.size; ++_i77)
          -            {
          +            org.apache.thrift.protocol.TMap _map74 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT);
          +            struct.success =
          +                new java.util.HashMap(2 * _map74.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key75;
          +            @org.apache.thrift.annotation.Nullable
          +            ColumnDescriptor _val76;
          +            for (int _i77 = 0; _i77 < _map74.size; ++_i77) {
                         _key75 = iprot.readBinary();
                         _val76 = new ColumnDescriptor();
                         _val76.read(iprot);
          @@ -15098,32 +17094,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getColumnDescriptors
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableRegions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableRegions_args");
          +  public static class getTableRegions_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableRegions_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableRegions_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableRegions_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableRegions_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableRegions_argsTupleSchemeFactory();
           
               /**
                * table name
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * table name
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -15136,7 +17146,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -15145,12 +17155,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -15182,19 +17192,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableRegions_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableRegions_args.class,
          +        metaDataMap);
               }
           
               public getTableRegions_args() {
               }
           
          -    public getTableRegions_args(
          -      java.nio.ByteBuffer tableName)
          -    {
          +    public getTableRegions_args(java.nio.ByteBuffer tableName) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
               }
          @@ -15233,11 +17246,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * table name
                */
               public getTableRegions_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getTableRegions_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getTableRegions_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -15257,19 +17272,20 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
                 }
               }
          @@ -15277,46 +17293,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getTableRegions_args)
          -        return this.equals((getTableRegions_args)that);
          +      if (that instanceof getTableRegions_args) return this.equals((getTableRegions_args) that);
                 return false;
               }
           
               public boolean equals(getTableRegions_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -15327,8 +17341,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -15359,11 +17372,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -15390,35 +17405,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableRegions_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableRegions_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableRegions_argsStandardScheme getScheme() {
                   return new getTableRegions_argsStandardScheme();
                 }
               }
           
          -    private static class getTableRegions_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableRegions_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableRegions_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableRegions_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -15426,7 +17446,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableRegions_arg
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -15437,11 +17457,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableRegions_arg
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableRegions_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableRegions_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -15456,17 +17478,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableRegions_ar
           
               }
           
          -    private static class getTableRegions_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableRegions_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableRegions_argsTupleScheme getScheme() {
                   return new getTableRegions_argsTupleScheme();
                 }
               }
           
          -    private static class getTableRegions_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableRegions_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableRegions_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getTableRegions_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -15478,8 +17504,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableRegions_arg
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableRegions_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getTableRegions_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -15488,29 +17516,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableRegions_args
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableRegions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableRegions_result");
          +  public static class getTableRegions_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableRegions_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableRegions_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableRegions_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableRegions_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableRegions_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -15523,7 +17566,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -15534,12 +17577,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -15571,23 +17614,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRegionInfo.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TRegionInfo.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableRegions_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableRegions_result.class,
          +        metaDataMap);
               }
           
               public getTableRegions_result() {
               }
           
          -    public getTableRegions_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getTableRegions_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -15598,7 +17646,8 @@ public getTableRegions_result(
                */
               public getTableRegions_result(getTableRegions_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TRegionInfo other_element : other.success) {
                     __this__success.add(new TRegionInfo(other_element));
                   }
          @@ -15640,7 +17689,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getTableRegions_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getTableRegions_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -15685,23 +17735,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -15709,60 +17760,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getTableRegions_result)
          -        return this.equals((getTableRegions_result)that);
          +      if (that instanceof getTableRegions_result) return this.equals((getTableRegions_result) that);
                 return false;
               }
           
               public boolean equals(getTableRegions_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -15773,12 +17820,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -15819,13 +17864,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -15858,35 +17905,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableRegions_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableRegions_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableRegions_resultStandardScheme getScheme() {
                   return new getTableRegions_resultStandardScheme();
                 }
               }
           
          -    private static class getTableRegions_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableRegions_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableRegions_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableRegions_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -15895,9 +17947,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableRegions_res
                           {
                             org.apache.thrift.protocol.TList _list78 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list78.size);
          -                  @org.apache.thrift.annotation.Nullable TRegionInfo _elem79;
          -                  for (int _i80 = 0; _i80 < _list78.size; ++_i80)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TRegionInfo _elem79;
          +                  for (int _i80 = 0; _i80 < _list78.size; ++_i80) {
                               _elem79 = new TRegionInfo();
                               _elem79.read(iprot);
                               struct.success.add(_elem79);
          @@ -15905,7 +17957,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableRegions_res
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -15914,7 +17966,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableRegions_res
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -15925,20 +17977,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableRegions_res
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableRegions_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableRegions_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TRegionInfo _iter81 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TRegionInfo _iter81 : struct.success) {
                         _iter81.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -15956,17 +18010,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableRegions_re
           
               }
           
          -    private static class getTableRegions_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableRegions_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableRegions_resultTupleScheme getScheme() {
                   return new getTableRegions_resultTupleScheme();
                 }
               }
           
          -    private static class getTableRegions_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableRegions_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableRegions_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getTableRegions_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -15978,8 +18036,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableRegions_res
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TRegionInfo _iter82 : struct.success)
          -            {
          +            for (TRegionInfo _iter82 : struct.success) {
                         _iter82.write(oprot);
                       }
                     }
          @@ -15990,16 +18047,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableRegions_res
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableRegions_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getTableRegions_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list83 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list83 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list83.size);
          -            @org.apache.thrift.annotation.Nullable TRegionInfo _elem84;
          -            for (int _i85 = 0; _i85 < _list83.size; ++_i85)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TRegionInfo _elem84;
          +            for (int _i85 = 0; _i85 < _list83.size; ++_i85) {
                         _elem84 = new TRegionInfo();
                         _elem84.read(iprot);
                         struct.success.add(_elem84);
          @@ -16015,19 +18075,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableRegions_resu
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class createTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("createTable_args");
          +  public static class createTable_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("createTable_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField COLUMN_FAMILIES_FIELD_DESC = new org.apache.thrift.protocol.TField("columnFamilies", org.apache.thrift.protocol.TType.LIST, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField COLUMN_FAMILIES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("columnFamilies",
          +            org.apache.thrift.protocol.TType.LIST, (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new createTable_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new createTable_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new createTable_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new createTable_argsTupleSchemeFactory();
           
               /**
                * name of table to create
          @@ -16038,18 +18110,22 @@ public static class createTable_args implements org.apache.thrift.TBase columnFamilies; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table to create
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * list of column family descriptors
                  */
          -      COLUMN_FAMILIES((short)2, "columnFamilies");
          +      COLUMN_FAMILIES((short) 2, "columnFamilies");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -16062,7 +18138,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // COLUMN_FAMILIES
          @@ -16073,12 +18149,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -16110,23 +18186,29 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMN_FAMILIES, new org.apache.thrift.meta_data.FieldMetaData("columnFamilies", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnDescriptor.class))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMN_FAMILIES,
          +        new org.apache.thrift.meta_data.FieldMetaData("columnFamilies",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, ColumnDescriptor.class))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createTable_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createTable_args.class,
          +        metaDataMap);
               }
           
               public createTable_args() {
               }
           
          -    public createTable_args(
          -      java.nio.ByteBuffer tableName,
          -      java.util.List columnFamilies)
          -    {
          +    public createTable_args(java.nio.ByteBuffer tableName,
          +        java.util.List columnFamilies) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.columnFamilies = columnFamilies;
          @@ -16140,7 +18222,8 @@ public createTable_args(createTable_args other) {
                   this.tableName = org.apache.thrift.TBaseHelper.copyBinary(other.tableName);
                 }
                 if (other.isSetColumnFamilies()) {
          -        java.util.List __this__columnFamilies = new java.util.ArrayList(other.columnFamilies.size());
          +        java.util.List __this__columnFamilies =
          +            new java.util.ArrayList(other.columnFamilies.size());
                   for (ColumnDescriptor other_element : other.columnFamilies) {
                     __this__columnFamilies.add(new ColumnDescriptor(other_element));
                   }
          @@ -16174,11 +18257,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table to create
                */
               public createTable_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public createTable_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public createTable_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -16225,7 +18310,8 @@ public java.util.List getColumnFamilies() {
               /**
                * list of column family descriptors
                */
          -    public createTable_args setColumnFamilies(@org.apache.thrift.annotation.Nullable java.util.List columnFamilies) {
          +    public createTable_args setColumnFamilies(
          +        @org.apache.thrift.annotation.Nullable java.util.List columnFamilies) {
                 this.columnFamilies = columnFamilies;
                 return this;
               }
          @@ -16234,7 +18320,9 @@ public void unsetColumnFamilies() {
                 this.columnFamilies = null;
               }
           
          -    /** Returns true if field columnFamilies is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field columnFamilies is set (has been assigned a value) and false otherwise
          +     */
               public boolean isSetColumnFamilies() {
                 return this.columnFamilies != null;
               }
          @@ -16245,27 +18333,28 @@ public void setColumnFamiliesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMN_FAMILIES:
          -        if (value == null) {
          -          unsetColumnFamilies();
          -        } else {
          -          setColumnFamilies((java.util.List)value);
          -        }
          -        break;
          +        case COLUMN_FAMILIES:
          +          if (value == null) {
          +            unsetColumnFamilies();
          +          } else {
          +            setColumnFamilies((java.util.List) value);
          +          }
          +          break;
           
                 }
               }
          @@ -16273,60 +18362,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case COLUMN_FAMILIES:
          -        return getColumnFamilies();
          +        case COLUMN_FAMILIES:
          +          return getColumnFamilies();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case COLUMN_FAMILIES:
          -        return isSetColumnFamilies();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case COLUMN_FAMILIES:
          +          return isSetColumnFamilies();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof createTable_args)
          -        return this.equals((createTable_args)that);
          +      if (that instanceof createTable_args) return this.equals((createTable_args) that);
                 return false;
               }
           
               public boolean equals(createTable_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_columnFamilies = true && this.isSetColumnFamilies();
                 boolean that_present_columnFamilies = true && that.isSetColumnFamilies();
                 if (this_present_columnFamilies || that_present_columnFamilies) {
          -        if (!(this_present_columnFamilies && that_present_columnFamilies))
          -          return false;
          -        if (!this.columnFamilies.equals(that.columnFamilies))
          -          return false;
          +        if (!(this_present_columnFamilies && that_present_columnFamilies)) return false;
          +        if (!this.columnFamilies.equals(that.columnFamilies)) return false;
                 }
           
                 return true;
          @@ -16337,12 +18422,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumnFamilies()) ? 131071 : 524287);
          -      if (isSetColumnFamilies())
          -        hashCode = hashCode * 8191 + columnFamilies.hashCode();
          +      if (isSetColumnFamilies()) hashCode = hashCode * 8191 + columnFamilies.hashCode();
           
                 return hashCode;
               }
          @@ -16365,12 +18448,14 @@ public int compareTo(createTable_args other) {
                     return lastComparison;
                   }
                 }
          -      lastComparison = java.lang.Boolean.compare(isSetColumnFamilies(), other.isSetColumnFamilies());
          +      lastComparison =
          +          java.lang.Boolean.compare(isSetColumnFamilies(), other.isSetColumnFamilies());
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
                 if (isSetColumnFamilies()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columnFamilies, other.columnFamilies);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.columnFamilies, other.columnFamilies);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -16383,11 +18468,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -16422,35 +18509,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class createTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createTable_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createTable_argsStandardScheme getScheme() {
                   return new createTable_argsStandardScheme();
                 }
               }
           
          -    private static class createTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class createTable_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -16458,7 +18550,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -16467,9 +18559,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args st
                           {
                             org.apache.thrift.protocol.TList _list86 = iprot.readListBegin();
                             struct.columnFamilies = new java.util.ArrayList(_list86.size);
          -                  @org.apache.thrift.annotation.Nullable ColumnDescriptor _elem87;
          -                  for (int _i88 = 0; _i88 < _list86.size; ++_i88)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  ColumnDescriptor _elem87;
          +                  for (int _i88 = 0; _i88 < _list86.size; ++_i88) {
                               _elem87 = new ColumnDescriptor();
                               _elem87.read(iprot);
                               struct.columnFamilies.add(_elem87);
          @@ -16477,7 +18569,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args st
                             iprot.readListEnd();
                           }
                           struct.setColumnFamiliesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -16488,11 +18580,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -16504,9 +18598,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_args s
                   if (struct.columnFamilies != null) {
                     oprot.writeFieldBegin(COLUMN_FAMILIES_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columnFamilies.size()));
          -            for (ColumnDescriptor _iter89 : struct.columnFamilies)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.columnFamilies.size()));
          +            for (ColumnDescriptor _iter89 : struct.columnFamilies) {
                         _iter89.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -16519,17 +18613,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_args s
           
               }
           
          -    private static class createTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createTable_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createTable_argsTupleScheme getScheme() {
                   return new createTable_argsTupleScheme();
                 }
               }
           
          -    private static class createTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class createTable_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, createTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, createTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -16544,8 +18642,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, createTable_args st
                   if (struct.isSetColumnFamilies()) {
                     {
                       oprot.writeI32(struct.columnFamilies.size());
          -            for (ColumnDescriptor _iter90 : struct.columnFamilies)
          -            {
          +            for (ColumnDescriptor _iter90 : struct.columnFamilies) {
                         _iter90.write(oprot);
                       }
                     }
          @@ -16553,8 +18650,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, createTable_args st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, createTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, createTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -16562,11 +18661,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, createTable_args str
                   }
                   if (incoming.get(1)) {
                     {
          -            org.apache.thrift.protocol.TList _list91 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list91 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.columnFamilies = new java.util.ArrayList(_list91.size);
          -            @org.apache.thrift.annotation.Nullable ColumnDescriptor _elem92;
          -            for (int _i93 = 0; _i93 < _list91.size; ++_i93)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            ColumnDescriptor _elem92;
          +            for (int _i93 = 0; _i93 < _list91.size; ++_i93) {
                         _elem92 = new ColumnDescriptor();
                         _elem92.read(iprot);
                         struct.columnFamilies.add(_elem92);
          @@ -16577,32 +18677,48 @@ public void read(org.apache.thrift.protocol.TProtocol prot, createTable_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class createTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("createTable_result");
          -
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          -    private static final org.apache.thrift.protocol.TField EXIST_FIELD_DESC = new org.apache.thrift.protocol.TField("exist", org.apache.thrift.protocol.TType.STRUCT, (short)3);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new createTable_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new createTable_resultTupleSchemeFactory();
          +  public static class createTable_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("createTable_result");
          +
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField EXIST_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("exist", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 3);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new createTable_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new createTable_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
               public @org.apache.thrift.annotation.Nullable IllegalArgument ia; // required
               public @org.apache.thrift.annotation.Nullable AlreadyExists exist; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io"),
          -      IA((short)2, "ia"),
          -      EXIST((short)3, "exist");
          +      IO((short) 1, "io"), IA((short) 2, "ia"), EXIST((short) 3, "exist");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -16615,7 +18731,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     case 2: // IA
          @@ -16628,12 +18744,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -16665,25 +18781,32 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IllegalArgument.class)));
          -      tmpMap.put(_Fields.EXIST, new org.apache.thrift.meta_data.FieldMetaData("exist", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AlreadyExists.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IllegalArgument.class)));
          +      tmpMap.put(_Fields.EXIST,
          +        new org.apache.thrift.meta_data.FieldMetaData("exist",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                AlreadyExists.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createTable_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createTable_result.class,
          +        metaDataMap);
               }
           
               public createTable_result() {
               }
           
          -    public createTable_result(
          -      IOError io,
          -      IllegalArgument ia,
          -      AlreadyExists exist)
          -    {
          +    public createTable_result(IOError io, IllegalArgument ia, AlreadyExists exist) {
                 this();
                 this.io = io;
                 this.ia = ia;
          @@ -16791,31 +18914,32 @@ public void setExistIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((IllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((IllegalArgument) value);
          +          }
          +          break;
           
          -      case EXIST:
          -        if (value == null) {
          -          unsetExist();
          -        } else {
          -          setExist((AlreadyExists)value);
          -        }
          -        break;
          +        case EXIST:
          +          if (value == null) {
          +            unsetExist();
          +          } else {
          +            setExist((AlreadyExists) value);
          +          }
          +          break;
           
                 }
               }
          @@ -16823,74 +18947,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
          -      case EXIST:
          -        return getExist();
          +        case EXIST:
          +          return getExist();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          -      case EXIST:
          -        return isSetExist();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
          +        case EXIST:
          +          return isSetExist();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof createTable_result)
          -        return this.equals((createTable_result)that);
          +      if (that instanceof createTable_result) return this.equals((createTable_result) that);
                 return false;
               }
           
               public boolean equals(createTable_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 boolean this_present_exist = true && this.isSetExist();
                 boolean that_present_exist = true && that.isSetExist();
                 if (this_present_exist || that_present_exist) {
          -        if (!(this_present_exist && that_present_exist))
          -          return false;
          -        if (!this.exist.equals(that.exist))
          -          return false;
          +        if (!(this_present_exist && that_present_exist)) return false;
          +        if (!this.exist.equals(that.exist)) return false;
                 }
           
                 return true;
          @@ -16901,16 +19019,13 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetExist()) ? 131071 : 524287);
          -      if (isSetExist())
          -        hashCode = hashCode * 8191 + exist.hashCode();
          +      if (isSetExist()) hashCode = hashCode * 8191 + exist.hashCode();
           
                 return hashCode;
               }
          @@ -16961,13 +19076,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -17008,35 +19125,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class createTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createTable_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createTable_resultStandardScheme getScheme() {
                   return new createTable_resultStandardScheme();
                 }
               }
           
          -    private static class createTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class createTable_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -17045,7 +19167,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -17054,7 +19176,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_result
                           struct.ia = new IllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -17063,7 +19185,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_result
                           struct.exist = new AlreadyExists();
                           struct.exist.read(iprot);
                           struct.setExistIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -17074,11 +19196,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -17103,17 +19227,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_result
           
               }
           
          -    private static class createTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createTable_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createTable_resultTupleScheme getScheme() {
                   return new createTable_resultTupleScheme();
                 }
               }
           
          -    private static class createTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class createTable_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, createTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, createTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -17137,8 +19265,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, createTable_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, createTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, createTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(3);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -17158,32 +19288,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, createTable_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteTable_args");
          +  public static class deleteTable_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteTable_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteTable_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteTable_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteTable_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteTable_argsTupleSchemeFactory();
           
               /**
                * name of table to delete
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table to delete
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -17196,7 +19340,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -17205,12 +19349,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -17242,19 +19386,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteTable_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteTable_args.class,
          +        metaDataMap);
               }
           
               public deleteTable_args() {
               }
           
          -    public deleteTable_args(
          -      java.nio.ByteBuffer tableName)
          -    {
          +    public deleteTable_args(java.nio.ByteBuffer tableName) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
               }
          @@ -17293,11 +19440,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table to delete
                */
               public deleteTable_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public deleteTable_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public deleteTable_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -17317,19 +19466,20 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
                 }
               }
          @@ -17337,46 +19487,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteTable_args)
          -        return this.equals((deleteTable_args)that);
          +      if (that instanceof deleteTable_args) return this.equals((deleteTable_args) that);
                 return false;
               }
           
               public boolean equals(deleteTable_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -17387,8 +19535,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -17419,11 +19566,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -17450,35 +19599,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteTable_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteTable_argsStandardScheme getScheme() {
                   return new deleteTable_argsStandardScheme();
                 }
               }
           
          -    private static class deleteTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteTable_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -17486,7 +19640,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -17497,11 +19651,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -17516,17 +19672,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_args s
           
               }
           
          -    private static class deleteTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteTable_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteTable_argsTupleScheme getScheme() {
                   return new deleteTable_argsTupleScheme();
                 }
               }
           
          -    private static class deleteTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteTable_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -17538,8 +19698,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteTable_args st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -17548,26 +19710,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteTable_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteTable_result");
          +  public static class deleteTable_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteTable_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteTable_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteTable_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteTable_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteTable_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -17580,7 +19756,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -17589,12 +19765,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -17626,19 +19802,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteTable_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteTable_result.class,
          +        metaDataMap);
               }
           
               public deleteTable_result() {
               }
           
          -    public deleteTable_result(
          -      IOError io)
          -    {
          +    public deleteTable_result(IOError io) {
                 this();
                 this.io = io;
               }
          @@ -17686,15 +19865,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -17702,46 +19882,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteTable_result)
          -        return this.equals((deleteTable_result)that);
          +      if (that instanceof deleteTable_result) return this.equals((deleteTable_result) that);
                 return false;
               }
           
               public boolean equals(deleteTable_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -17752,8 +19930,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -17784,13 +19961,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -17815,35 +19994,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteTable_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteTable_resultStandardScheme getScheme() {
                   return new deleteTable_resultStandardScheme();
                 }
               }
           
          -    private static class deleteTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteTable_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -17852,7 +20036,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -17863,11 +20047,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -17882,17 +20068,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_result
           
               }
           
          -    private static class deleteTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteTable_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteTable_resultTupleScheme getScheme() {
                   return new deleteTable_resultTupleScheme();
                 }
               }
           
          -    private static class deleteTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteTable_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -17904,8 +20094,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteTable_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -17915,21 +20107,36 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteTable_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class get_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new get_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new get_argsTupleSchemeFactory();
          +  public static class get_args implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("get_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new get_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new get_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -17946,28 +20153,32 @@ public static class get_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row key
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * column name
                  */
          -      COLUMN((short)3, "column"),
          +      COLUMN((short) 3, "column"),
                 /**
                  * Get attributes
                  */
          -      ATTRIBUTES((short)4, "attributes");
          +      ATTRIBUTES((short) 4, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -17980,7 +20191,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -17995,12 +20206,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -18032,17 +20243,31 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMN,
          +        new org.apache.thrift.meta_data.FieldMetaData("column",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
                 org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_args.class, metaDataMap);
               }
          @@ -18050,12 +20275,9 @@ public java.lang.String getFieldName() {
               public get_args() {
               }
           
          -    public get_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.nio.ByteBuffer column,
          -      java.util.Map attributes)
          -    {
          +    public get_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -18077,15 +20299,20 @@ public get_args(get_args other) {
                   this.column = org.apache.thrift.TBaseHelper.copyBinary(other.column);
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -18121,11 +20348,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public get_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public get_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public get_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -18161,7 +20390,7 @@ public java.nio.ByteBuffer bufferForRow() {
                * row key
                */
               public get_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          @@ -18201,7 +20430,8 @@ public java.nio.ByteBuffer bufferForColumn() {
                * column name
                */
               public get_args setColumn(byte[] column) {
          -      this.column = column == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(column.clone());
          +      this.column =
          +          column == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(column.clone());
                 return this;
               }
           
          @@ -18231,7 +20461,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -18240,14 +20470,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Get attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Get attributes
                */
          -    public get_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public get_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -18267,51 +20498,52 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMN:
          -        if (value == null) {
          -          unsetColumn();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setColumn((byte[])value);
          +        case COLUMN:
          +          if (value == null) {
          +            unsetColumn();
                     } else {
          -            setColumn((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setColumn((byte[]) value);
          +            } else {
          +              setColumn((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -18319,88 +20551,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case COLUMN:
          -        return getColumn();
          +        case COLUMN:
          +          return getColumn();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case COLUMN:
          -        return isSetColumn();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case COLUMN:
          +          return isSetColumn();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof get_args)
          -        return this.equals((get_args)that);
          +      if (that instanceof get_args) return this.equals((get_args) that);
                 return false;
               }
           
               public boolean equals(get_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_column = true && this.isSetColumn();
                 boolean that_present_column = true && that.isSetColumn();
                 if (this_present_column || that_present_column) {
          -        if (!(this_present_column && that_present_column))
          -          return false;
          -        if (!this.column.equals(that.column))
          -          return false;
          +        if (!(this_present_column && that_present_column)) return false;
          +        if (!this.column.equals(that.column)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -18411,20 +20635,16 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -      if (isSetColumn())
          -        hashCode = hashCode * 8191 + column.hashCode();
          +      if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -18485,11 +20705,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -18540,35 +20762,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class get_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class get_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public get_argsStandardScheme getScheme() {
                   return new get_argsStandardScheme();
                 }
               }
           
          -    private static class get_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class get_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -18576,7 +20803,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) th
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -18584,7 +20811,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) th
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -18592,7 +20819,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) th
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.column = iprot.readBinary();
                           struct.setColumnIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -18600,11 +20827,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) th
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map94 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map94.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key95;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val96;
          -                  for (int _i97 = 0; _i97 < _map94.size; ++_i97)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map94.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key95;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val96;
          +                  for (int _i97 = 0; _i97 < _map94.size; ++_i97) {
                               _key95 = iprot.readBinary();
                               _val96 = iprot.readBinary();
                               struct.attributes.put(_key95, _val96);
          @@ -18612,7 +20842,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) th
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -18623,11 +20853,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) th
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, get_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, get_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -18649,9 +20881,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_args struct) t
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter98 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter98 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter98.getKey());
                         oprot.writeBinary(_iter98.getValue());
                       }
          @@ -18665,17 +20899,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_args struct) t
           
               }
           
          -    private static class get_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class get_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public get_argsTupleScheme getScheme() {
                   return new get_argsTupleScheme();
                 }
               }
           
          -    private static class get_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class get_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, get_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, get_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -18702,8 +20940,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_args struct) th
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter99 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter99 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter99.getKey());
                         oprot.writeBinary(_iter99.getValue());
                       }
          @@ -18712,8 +20950,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_args struct) th
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, get_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, get_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -18729,12 +20969,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_args struct) thr
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TMap _map100 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map100.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key101;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val102;
          -            for (int _i103 = 0; _i103 < _map100.size; ++_i103)
          -            {
          +            org.apache.thrift.protocol.TMap _map100 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map100.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key101;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val102;
          +            for (int _i103 = 0; _i103 < _map100.size; ++_i103) {
                         _key101 = iprot.readBinary();
                         _val102 = iprot.readBinary();
                         struct.attributes.put(_key101, _val102);
          @@ -18745,29 +20988,43 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_args struct) thr
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class get_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_result");
          +  public static class get_result implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("get_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new get_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new get_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new get_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new get_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -18780,7 +21037,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -18791,12 +21048,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -18828,12 +21085,19 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCell.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TCell.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
                 org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_result.class, metaDataMap);
               }
          @@ -18841,10 +21105,7 @@ public java.lang.String getFieldName() {
               public get_result() {
               }
           
          -    public get_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public get_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -18855,7 +21116,8 @@ public get_result(
                */
               public get_result(get_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TCell other_element : other.success) {
                     __this__success.add(new TCell(other_element));
                   }
          @@ -18897,7 +21159,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public get_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public get_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -18942,23 +21205,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -18966,60 +21230,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof get_result)
          -        return this.equals((get_result)that);
          +      if (that instanceof get_result) return this.equals((get_result) that);
                 return false;
               }
           
               public boolean equals(get_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -19030,12 +21290,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -19076,13 +21334,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -19115,35 +21375,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class get_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class get_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public get_resultStandardScheme getScheme() {
                   return new get_resultStandardScheme();
                 }
               }
           
          -    private static class get_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class get_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -19152,9 +21417,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct)
                           {
                             org.apache.thrift.protocol.TList _list104 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list104.size);
          -                  @org.apache.thrift.annotation.Nullable TCell _elem105;
          -                  for (int _i106 = 0; _i106 < _list104.size; ++_i106)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TCell _elem105;
          +                  for (int _i106 = 0; _i106 < _list104.size; ++_i106) {
                               _elem105 = new TCell();
                               _elem105.read(iprot);
                               struct.success.add(_elem105);
          @@ -19162,7 +21427,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct)
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -19171,7 +21436,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct)
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -19182,20 +21447,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, get_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, get_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TCell _iter107 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TCell _iter107 : struct.success) {
                         _iter107.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -19213,17 +21480,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_result struct)
           
               }
           
          -    private static class get_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class get_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public get_resultTupleScheme getScheme() {
                   return new get_resultTupleScheme();
                 }
               }
           
          -    private static class get_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class get_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, get_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, get_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -19235,8 +21506,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_result struct)
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TCell _iter108 : struct.success)
          -            {
          +            for (TCell _iter108 : struct.success) {
                         _iter108.write(oprot);
                       }
                     }
          @@ -19247,16 +21517,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_result struct)
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, get_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, get_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list109 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list109 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list109.size);
          -            @org.apache.thrift.annotation.Nullable TCell _elem110;
          -            for (int _i111 = 0; _i111 < _list109.size; ++_i111)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TCell _elem110;
          +            for (int _i111 = 0; _i111 < _list109.size; ++_i111) {
                         _elem110 = new TCell();
                         _elem110.read(iprot);
                         struct.success.add(_elem110);
          @@ -19272,22 +21545,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_result struct) t
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getVer_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getVer_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField NUM_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("numVersions", org.apache.thrift.protocol.TType.I32, (short)4);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)5);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getVer_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getVer_argsTupleSchemeFactory();
          +  public static class getVer_args
          +      implements org.apache.thrift.TBase, java.io.Serializable,
          +      Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getVer_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField NUM_VERSIONS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("numVersions", org.apache.thrift.protocol.TType.I32,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 5);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getVer_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getVer_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -19308,32 +21599,36 @@ public static class getVer_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row key
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * column name
                  */
          -      COLUMN((short)3, "column"),
          +      COLUMN((short) 3, "column"),
                 /**
                  * number of versions to retrieve
                  */
          -      NUM_VERSIONS((short)4, "numVersions"),
          +      NUM_VERSIONS((short) 4, "numVersions"),
                 /**
                  * Get attributes
                  */
          -      ATTRIBUTES((short)5, "attributes");
          +      ATTRIBUTES((short) 5, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -19346,7 +21641,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -19363,12 +21658,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -19402,33 +21697,47 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.NUM_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("numVersions", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMN,
          +        new org.apache.thrift.meta_data.FieldMetaData("column",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.NUM_VERSIONS,
          +        new org.apache.thrift.meta_data.FieldMetaData("numVersions",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I32)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getVer_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getVer_args.class,
          +        metaDataMap);
               }
           
               public getVer_args() {
               }
           
          -    public getVer_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.nio.ByteBuffer column,
          -      int numVersions,
          -      java.util.Map attributes)
          -    {
          +    public getVer_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, int numVersions,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -19454,15 +21763,20 @@ public getVer_args(getVer_args other) {
                 }
                 this.numVersions = other.numVersions;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -19500,11 +21814,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public getVer_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getVer_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getVer_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -19540,7 +21856,7 @@ public java.nio.ByteBuffer bufferForRow() {
                * row key
                */
               public getVer_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          @@ -19580,11 +21896,13 @@ public java.nio.ByteBuffer bufferForColumn() {
                * column name
                */
               public getVer_args setColumn(byte[] column) {
          -      this.column = column == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(column.clone());
          +      this.column =
          +          column == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(column.clone());
                 return this;
               }
           
          -    public getVer_args setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
          +    public getVer_args
          +        setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
                 this.column = org.apache.thrift.TBaseHelper.copyBinary(column);
                 return this;
               }
          @@ -19621,7 +21939,8 @@ public getVer_args setNumVersions(int numVersions) {
               }
           
               public void unsetNumVersions() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUMVERSIONS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUMVERSIONS_ISSET_ID);
               }
           
               /** Returns true if field numVersions is set (has been assigned a value) and false otherwise */
          @@ -19630,7 +21949,8 @@ public boolean isSetNumVersions() {
               }
           
               public void setNumVersionsIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUMVERSIONS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUMVERSIONS_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -19639,7 +21959,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -19648,14 +21968,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Get attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Get attributes
                */
          -    public getVer_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public getVer_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -19675,59 +21996,60 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMN:
          -        if (value == null) {
          -          unsetColumn();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setColumn((byte[])value);
          +        case COLUMN:
          +          if (value == null) {
          +            unsetColumn();
                     } else {
          -            setColumn((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setColumn((byte[]) value);
          +            } else {
          +              setColumn((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case NUM_VERSIONS:
          -        if (value == null) {
          -          unsetNumVersions();
          -        } else {
          -          setNumVersions((java.lang.Integer)value);
          -        }
          -        break;
          +        case NUM_VERSIONS:
          +          if (value == null) {
          +            unsetNumVersions();
          +          } else {
          +            setNumVersions((java.lang.Integer) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -19735,102 +22057,92 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case COLUMN:
          -        return getColumn();
          +        case COLUMN:
          +          return getColumn();
           
          -      case NUM_VERSIONS:
          -        return getNumVersions();
          +        case NUM_VERSIONS:
          +          return getNumVersions();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case COLUMN:
          -        return isSetColumn();
          -      case NUM_VERSIONS:
          -        return isSetNumVersions();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case COLUMN:
          +          return isSetColumn();
          +        case NUM_VERSIONS:
          +          return isSetNumVersions();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getVer_args)
          -        return this.equals((getVer_args)that);
          +      if (that instanceof getVer_args) return this.equals((getVer_args) that);
                 return false;
               }
           
               public boolean equals(getVer_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_column = true && this.isSetColumn();
                 boolean that_present_column = true && that.isSetColumn();
                 if (this_present_column || that_present_column) {
          -        if (!(this_present_column && that_present_column))
          -          return false;
          -        if (!this.column.equals(that.column))
          -          return false;
          +        if (!(this_present_column && that_present_column)) return false;
          +        if (!this.column.equals(that.column)) return false;
                 }
           
                 boolean this_present_numVersions = true;
                 boolean that_present_numVersions = true;
                 if (this_present_numVersions || that_present_numVersions) {
          -        if (!(this_present_numVersions && that_present_numVersions))
          -          return false;
          -        if (this.numVersions != that.numVersions)
          -          return false;
          +        if (!(this_present_numVersions && that_present_numVersions)) return false;
          +        if (this.numVersions != that.numVersions) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -19841,22 +22153,18 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -      if (isSetColumn())
          -        hashCode = hashCode * 8191 + column.hashCode();
          +      if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
                 hashCode = hashCode * 8191 + numVersions;
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -19904,7 +22212,8 @@ public int compareTo(getVer_args other) {
                   return lastComparison;
                 }
                 if (isSetNumVersions()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numVersions, other.numVersions);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.numVersions, other.numVersions);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -19927,11 +22236,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -19986,37 +22297,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getVer_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getVer_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getVer_argsStandardScheme getScheme() {
                   return new getVer_argsStandardScheme();
                 }
               }
           
          -    private static class getVer_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getVer_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -20024,7 +22341,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_args struct)
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -20032,7 +22349,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_args struct)
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -20040,7 +22357,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_args struct)
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.column = iprot.readBinary();
                           struct.setColumnIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -20048,7 +22365,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_args struct)
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.numVersions = iprot.readI32();
                           struct.setNumVersionsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -20056,11 +22373,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_args struct)
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map112 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map112.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key113;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val114;
          -                  for (int _i115 = 0; _i115 < _map112.size; ++_i115)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map112.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key113;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val114;
          +                  for (int _i115 = 0; _i115 < _map112.size; ++_i115) {
                               _key113 = iprot.readBinary();
                               _val114 = iprot.readBinary();
                               struct.attributes.put(_key113, _val114);
          @@ -20068,7 +22388,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_args struct)
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -20079,11 +22399,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_args struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getVer_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getVer_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -20108,9 +22430,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getVer_args struct
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter116 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter116 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter116.getKey());
                         oprot.writeBinary(_iter116.getValue());
                       }
          @@ -20124,17 +22448,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getVer_args struct
           
               }
           
          -    private static class getVer_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getVer_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getVer_argsTupleScheme getScheme() {
                   return new getVer_argsTupleScheme();
                 }
               }
           
          -    private static class getVer_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getVer_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getVer_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getVer_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -20167,8 +22495,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVer_args struct)
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter117 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter117 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter117.getKey());
                         oprot.writeBinary(_iter117.getValue());
                       }
          @@ -20177,8 +22505,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVer_args struct)
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getVer_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getVer_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(5);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -20198,12 +22528,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getVer_args struct)
                   }
                   if (incoming.get(4)) {
                     {
          -            org.apache.thrift.protocol.TMap _map118 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map118.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key119;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val120;
          -            for (int _i121 = 0; _i121 < _map118.size; ++_i121)
          -            {
          +            org.apache.thrift.protocol.TMap _map118 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map118.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key119;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val120;
          +            for (int _i121 = 0; _i121 < _map118.size; ++_i121) {
                         _key119 = iprot.readBinary();
                         _val120 = iprot.readBinary();
                         struct.attributes.put(_key119, _val120);
          @@ -20214,29 +22547,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getVer_args struct)
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getVer_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getVer_result");
          +  public static class getVer_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getVer_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getVer_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getVer_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getVer_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getVer_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -20249,7 +22597,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -20260,12 +22608,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -20297,23 +22645,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCell.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TCell.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getVer_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getVer_result.class,
          +        metaDataMap);
               }
           
               public getVer_result() {
               }
           
          -    public getVer_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getVer_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -20324,7 +22677,8 @@ public getVer_result(
                */
               public getVer_result(getVer_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TCell other_element : other.success) {
                     __this__success.add(new TCell(other_element));
                   }
          @@ -20366,7 +22720,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getVer_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getVer_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -20411,23 +22766,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -20435,60 +22791,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getVer_result)
          -        return this.equals((getVer_result)that);
          +      if (that instanceof getVer_result) return this.equals((getVer_result) that);
                 return false;
               }
           
               public boolean equals(getVer_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -20499,12 +22851,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -20545,13 +22895,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -20584,35 +22936,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getVer_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getVer_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getVer_resultStandardScheme getScheme() {
                   return new getVer_resultStandardScheme();
                 }
               }
           
          -    private static class getVer_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getVer_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -20621,9 +22978,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_result struc
                           {
                             org.apache.thrift.protocol.TList _list122 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list122.size);
          -                  @org.apache.thrift.annotation.Nullable TCell _elem123;
          -                  for (int _i124 = 0; _i124 < _list122.size; ++_i124)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TCell _elem123;
          +                  for (int _i124 = 0; _i124 < _list122.size; ++_i124) {
                               _elem123 = new TCell();
                               _elem123.read(iprot);
                               struct.success.add(_elem123);
          @@ -20631,7 +22988,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_result struc
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -20640,7 +22997,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_result struc
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -20651,20 +23008,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVer_result struc
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getVer_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getVer_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TCell _iter125 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TCell _iter125 : struct.success) {
                         _iter125.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -20682,17 +23041,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getVer_result stru
           
               }
           
          -    private static class getVer_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getVer_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getVer_resultTupleScheme getScheme() {
                   return new getVer_resultTupleScheme();
                 }
               }
           
          -    private static class getVer_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getVer_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getVer_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getVer_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -20704,8 +23067,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVer_result struc
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TCell _iter126 : struct.success)
          -            {
          +            for (TCell _iter126 : struct.success) {
                         _iter126.write(oprot);
                       }
                     }
          @@ -20716,16 +23078,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVer_result struc
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getVer_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getVer_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list127 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list127 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list127.size);
          -            @org.apache.thrift.annotation.Nullable TCell _elem128;
          -            for (int _i129 = 0; _i129 < _list127.size; ++_i129)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TCell _elem128;
          +            for (int _i129 = 0; _i129 < _list127.size; ++_i129) {
                         _elem128 = new TCell();
                         _elem128.read(iprot);
                         struct.success.add(_elem128);
          @@ -20741,23 +23106,43 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getVer_result struct
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getVerTs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getVerTs_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)4);
          -    private static final org.apache.thrift.protocol.TField NUM_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("numVersions", org.apache.thrift.protocol.TType.I32, (short)5);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)6);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getVerTs_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getVerTs_argsTupleSchemeFactory();
          +  public static class getVerTs_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getVerTs_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField NUM_VERSIONS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("numVersions", org.apache.thrift.protocol.TType.I32,
          +            (short) 5);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 6);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getVerTs_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getVerTs_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -20782,36 +23167,40 @@ public static class getVerTs_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row key
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * column name
                  */
          -      COLUMN((short)3, "column"),
          +      COLUMN((short) 3, "column"),
                 /**
                  * timestamp
                  */
          -      TIMESTAMP((short)4, "timestamp"),
          +      TIMESTAMP((short) 4, "timestamp"),
                 /**
                  * number of versions to retrieve
                  */
          -      NUM_VERSIONS((short)5, "numVersions"),
          +      NUM_VERSIONS((short) 5, "numVersions"),
                 /**
                  * Get attributes
                  */
          -      ATTRIBUTES((short)6, "attributes");
          +      ATTRIBUTES((short) 6, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -20824,7 +23213,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -20843,12 +23232,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -20883,36 +23272,52 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.NUM_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("numVersions", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMN,
          +        new org.apache.thrift.meta_data.FieldMetaData("column",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.TIMESTAMP,
          +        new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.NUM_VERSIONS,
          +        new org.apache.thrift.meta_data.FieldMetaData("numVersions",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I32)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getVerTs_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getVerTs_args.class,
          +        metaDataMap);
               }
           
               public getVerTs_args() {
               }
           
          -    public getVerTs_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.nio.ByteBuffer column,
          -      long timestamp,
          -      int numVersions,
          -      java.util.Map attributes)
          -    {
          +    public getVerTs_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp, int numVersions,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -20941,15 +23346,20 @@ public getVerTs_args(getVerTs_args other) {
                 this.timestamp = other.timestamp;
                 this.numVersions = other.numVersions;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -20989,11 +23399,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public getVerTs_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getVerTs_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getVerTs_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -21029,7 +23441,7 @@ public java.nio.ByteBuffer bufferForRow() {
                * row key
                */
               public getVerTs_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          @@ -21069,11 +23481,13 @@ public java.nio.ByteBuffer bufferForColumn() {
                * column name
                */
               public getVerTs_args setColumn(byte[] column) {
          -      this.column = column == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(column.clone());
          +      this.column =
          +          column == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(column.clone());
                 return this;
               }
           
          -    public getVerTs_args setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
          +    public getVerTs_args
          +        setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
                 this.column = org.apache.thrift.TBaseHelper.copyBinary(column);
                 return this;
               }
          @@ -21110,7 +23524,8 @@ public getVerTs_args setTimestamp(long timestamp) {
               }
           
               public void unsetTimestamp() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
               }
           
               /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -21119,7 +23534,8 @@ public boolean isSetTimestamp() {
               }
           
               public void setTimestampIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
               }
           
               /**
          @@ -21139,7 +23555,8 @@ public getVerTs_args setNumVersions(int numVersions) {
               }
           
               public void unsetNumVersions() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUMVERSIONS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUMVERSIONS_ISSET_ID);
               }
           
               /** Returns true if field numVersions is set (has been assigned a value) and false otherwise */
          @@ -21148,7 +23565,8 @@ public boolean isSetNumVersions() {
               }
           
               public void setNumVersionsIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUMVERSIONS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUMVERSIONS_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -21157,7 +23575,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -21166,14 +23584,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Get attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Get attributes
                */
          -    public getVerTs_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public getVerTs_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -21193,67 +23612,68 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMN:
          -        if (value == null) {
          -          unsetColumn();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setColumn((byte[])value);
          +        case COLUMN:
          +          if (value == null) {
          +            unsetColumn();
                     } else {
          -            setColumn((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setColumn((byte[]) value);
          +            } else {
          +              setColumn((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TIMESTAMP:
          -        if (value == null) {
          -          unsetTimestamp();
          -        } else {
          -          setTimestamp((java.lang.Long)value);
          -        }
          -        break;
          +        case TIMESTAMP:
          +          if (value == null) {
          +            unsetTimestamp();
          +          } else {
          +            setTimestamp((java.lang.Long) value);
          +          }
          +          break;
           
          -      case NUM_VERSIONS:
          -        if (value == null) {
          -          unsetNumVersions();
          -        } else {
          -          setNumVersions((java.lang.Integer)value);
          -        }
          -        break;
          +        case NUM_VERSIONS:
          +          if (value == null) {
          +            unsetNumVersions();
          +          } else {
          +            setNumVersions((java.lang.Integer) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -21261,116 +23681,104 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case COLUMN:
          -        return getColumn();
          +        case COLUMN:
          +          return getColumn();
           
          -      case TIMESTAMP:
          -        return getTimestamp();
          +        case TIMESTAMP:
          +          return getTimestamp();
           
          -      case NUM_VERSIONS:
          -        return getNumVersions();
          +        case NUM_VERSIONS:
          +          return getNumVersions();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case COLUMN:
          -        return isSetColumn();
          -      case TIMESTAMP:
          -        return isSetTimestamp();
          -      case NUM_VERSIONS:
          -        return isSetNumVersions();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case COLUMN:
          +          return isSetColumn();
          +        case TIMESTAMP:
          +          return isSetTimestamp();
          +        case NUM_VERSIONS:
          +          return isSetNumVersions();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getVerTs_args)
          -        return this.equals((getVerTs_args)that);
          +      if (that instanceof getVerTs_args) return this.equals((getVerTs_args) that);
                 return false;
               }
           
               public boolean equals(getVerTs_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_column = true && this.isSetColumn();
                 boolean that_present_column = true && that.isSetColumn();
                 if (this_present_column || that_present_column) {
          -        if (!(this_present_column && that_present_column))
          -          return false;
          -        if (!this.column.equals(that.column))
          -          return false;
          +        if (!(this_present_column && that_present_column)) return false;
          +        if (!this.column.equals(that.column)) return false;
                 }
           
                 boolean this_present_timestamp = true;
                 boolean that_present_timestamp = true;
                 if (this_present_timestamp || that_present_timestamp) {
          -        if (!(this_present_timestamp && that_present_timestamp))
          -          return false;
          -        if (this.timestamp != that.timestamp)
          -          return false;
          +        if (!(this_present_timestamp && that_present_timestamp)) return false;
          +        if (this.timestamp != that.timestamp) return false;
                 }
           
                 boolean this_present_numVersions = true;
                 boolean that_present_numVersions = true;
                 if (this_present_numVersions || that_present_numVersions) {
          -        if (!(this_present_numVersions && that_present_numVersions))
          -          return false;
          -        if (this.numVersions != that.numVersions)
          -          return false;
          +        if (!(this_present_numVersions && that_present_numVersions)) return false;
          +        if (this.numVersions != that.numVersions) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -21381,24 +23789,20 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -      if (isSetColumn())
          -        hashCode = hashCode * 8191 + column.hashCode();
          +      if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
                 hashCode = hashCode * 8191 + numVersions;
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -21456,7 +23860,8 @@ public int compareTo(getVerTs_args other) {
                   return lastComparison;
                 }
                 if (isSetNumVersions()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numVersions, other.numVersions);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.numVersions, other.numVersions);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -21479,11 +23884,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -21542,37 +23949,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getVerTs_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getVerTs_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getVerTs_argsStandardScheme getScheme() {
                   return new getVerTs_argsStandardScheme();
                 }
               }
           
          -    private static class getVerTs_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getVerTs_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -21580,7 +23993,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_args struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -21588,7 +24001,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_args struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -21596,7 +24009,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_args struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.column = iprot.readBinary();
                           struct.setColumnIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -21604,7 +24017,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_args struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.timestamp = iprot.readI64();
                           struct.setTimestampIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -21612,7 +24025,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_args struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.numVersions = iprot.readI32();
                           struct.setNumVersionsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -21620,11 +24033,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_args struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map130 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map130.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key131;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val132;
          -                  for (int _i133 = 0; _i133 < _map130.size; ++_i133)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map130.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key131;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val132;
          +                  for (int _i133 = 0; _i133 < _map130.size; ++_i133) {
                               _key131 = iprot.readBinary();
                               _val132 = iprot.readBinary();
                               struct.attributes.put(_key131, _val132);
          @@ -21632,7 +24048,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_args struc
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -21643,11 +24059,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_args struc
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getVerTs_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getVerTs_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -21675,9 +24093,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getVerTs_args stru
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter134 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter134 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter134.getKey());
                         oprot.writeBinary(_iter134.getValue());
                       }
          @@ -21691,17 +24111,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getVerTs_args stru
           
               }
           
          -    private static class getVerTs_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getVerTs_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getVerTs_argsTupleScheme getScheme() {
                   return new getVerTs_argsTupleScheme();
                 }
               }
           
          -    private static class getVerTs_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getVerTs_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getVerTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getVerTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -21740,8 +24164,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVerTs_args struc
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter135 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter135 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter135.getKey());
                         oprot.writeBinary(_iter135.getValue());
                       }
          @@ -21750,8 +24174,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVerTs_args struc
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getVerTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getVerTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(6);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -21775,12 +24201,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getVerTs_args struct
                   }
                   if (incoming.get(5)) {
                     {
          -            org.apache.thrift.protocol.TMap _map136 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map136.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key137;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val138;
          -            for (int _i139 = 0; _i139 < _map136.size; ++_i139)
          -            {
          +            org.apache.thrift.protocol.TMap _map136 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map136.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key137;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val138;
          +            for (int _i139 = 0; _i139 < _map136.size; ++_i139) {
                         _key137 = iprot.readBinary();
                         _val138 = iprot.readBinary();
                         struct.attributes.put(_key137, _val138);
          @@ -21791,29 +24220,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getVerTs_args struct
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getVerTs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getVerTs_result");
          +  public static class getVerTs_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getVerTs_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getVerTs_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getVerTs_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getVerTs_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getVerTs_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -21826,7 +24270,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -21837,12 +24281,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -21874,23 +24318,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCell.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TCell.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getVerTs_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getVerTs_result.class,
          +        metaDataMap);
               }
           
               public getVerTs_result() {
               }
           
          -    public getVerTs_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getVerTs_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -21901,7 +24350,8 @@ public getVerTs_result(
                */
               public getVerTs_result(getVerTs_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TCell other_element : other.success) {
                     __this__success.add(new TCell(other_element));
                   }
          @@ -21943,7 +24393,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getVerTs_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getVerTs_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -21988,23 +24439,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -22012,60 +24464,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getVerTs_result)
          -        return this.equals((getVerTs_result)that);
          +      if (that instanceof getVerTs_result) return this.equals((getVerTs_result) that);
                 return false;
               }
           
               public boolean equals(getVerTs_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -22076,12 +24524,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -22122,13 +24568,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -22161,35 +24609,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getVerTs_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getVerTs_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getVerTs_resultStandardScheme getScheme() {
                   return new getVerTs_resultStandardScheme();
                 }
               }
           
          -    private static class getVerTs_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getVerTs_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -22198,9 +24651,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_result str
                           {
                             org.apache.thrift.protocol.TList _list140 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list140.size);
          -                  @org.apache.thrift.annotation.Nullable TCell _elem141;
          -                  for (int _i142 = 0; _i142 < _list140.size; ++_i142)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TCell _elem141;
          +                  for (int _i142 = 0; _i142 < _list140.size; ++_i142) {
                               _elem141 = new TCell();
                               _elem141.read(iprot);
                               struct.success.add(_elem141);
          @@ -22208,7 +24661,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_result str
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -22217,7 +24670,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_result str
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -22228,20 +24681,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getVerTs_result str
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getVerTs_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getVerTs_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TCell _iter143 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TCell _iter143 : struct.success) {
                         _iter143.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -22259,17 +24714,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getVerTs_result st
           
               }
           
          -    private static class getVerTs_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getVerTs_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getVerTs_resultTupleScheme getScheme() {
                   return new getVerTs_resultTupleScheme();
                 }
               }
           
          -    private static class getVerTs_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getVerTs_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getVerTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getVerTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -22281,8 +24740,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVerTs_result str
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TCell _iter144 : struct.success)
          -            {
          +            for (TCell _iter144 : struct.success) {
                         _iter144.write(oprot);
                       }
                     }
          @@ -22293,16 +24751,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getVerTs_result str
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getVerTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getVerTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list145 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list145 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list145.size);
          -            @org.apache.thrift.annotation.Nullable TCell _elem146;
          -            for (int _i147 = 0; _i147 < _list145.size; ++_i147)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TCell _elem146;
          +            for (int _i147 = 0; _i147 < _list145.size; ++_i147) {
                         _elem146 = new TCell();
                         _elem146.read(iprot);
                         struct.success.add(_elem146);
          @@ -22318,20 +24779,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getVerTs_result stru
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRow_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRow_args");
          +  public static class getRow_args
          +      implements org.apache.thrift.TBase, java.io.Serializable,
          +      Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRow_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)3);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 3);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRow_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRow_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRow_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRow_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -22344,24 +24819,28 @@ public static class getRow_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row key
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * Get attributes
                  */
          -      ATTRIBUTES((short)3, "attributes");
          +      ATTRIBUTES((short) 3, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -22374,7 +24853,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -22387,12 +24866,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -22424,27 +24903,36 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRow_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRow_args.class,
          +        metaDataMap);
               }
           
               public getRow_args() {
               }
           
          -    public getRow_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.util.Map attributes)
          -    {
          +    public getRow_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -22462,15 +24950,20 @@ public getRow_args(getRow_args other) {
                   this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -22505,11 +24998,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public getRow_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getRow_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getRow_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -22545,7 +25040,7 @@ public java.nio.ByteBuffer bufferForRow() {
                * row key
                */
               public getRow_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          @@ -22575,7 +25070,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -22584,14 +25079,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Get attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Get attributes
                */
          -    public getRow_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public getRow_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -22611,39 +25107,40 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -22651,74 +25148,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRow_args)
          -        return this.equals((getRow_args)that);
          +      if (that instanceof getRow_args) return this.equals((getRow_args) that);
                 return false;
               }
           
               public boolean equals(getRow_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -22729,16 +25220,13 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -22789,11 +25277,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -22836,35 +25326,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRow_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRow_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRow_argsStandardScheme getScheme() {
                   return new getRow_argsStandardScheme();
                 }
               }
           
          -    private static class getRow_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRow_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -22872,7 +25367,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_args struct)
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -22880,7 +25375,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_args struct)
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -22888,11 +25383,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_args struct)
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map148 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map148.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key149;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val150;
          -                  for (int _i151 = 0; _i151 < _map148.size; ++_i151)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map148.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key149;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val150;
          +                  for (int _i151 = 0; _i151 < _map148.size; ++_i151) {
                               _key149 = iprot.readBinary();
                               _val150 = iprot.readBinary();
                               struct.attributes.put(_key149, _val150);
          @@ -22900,7 +25398,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_args struct)
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -22911,11 +25409,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_args struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRow_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRow_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -22932,9 +25432,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRow_args struct
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter152 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter152 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter152.getKey());
                         oprot.writeBinary(_iter152.getValue());
                       }
          @@ -22948,17 +25450,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRow_args struct
           
               }
           
          -    private static class getRow_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRow_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRow_argsTupleScheme getScheme() {
                   return new getRow_argsTupleScheme();
                 }
               }
           
          -    private static class getRow_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRow_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRow_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRow_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -22979,8 +25485,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRow_args struct)
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter153 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter153 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter153.getKey());
                         oprot.writeBinary(_iter153.getValue());
                       }
          @@ -22989,8 +25495,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRow_args struct)
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRow_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRow_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(3);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -23002,12 +25510,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRow_args struct)
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TMap _map154 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map154.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key155;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val156;
          -            for (int _i157 = 0; _i157 < _map154.size; ++_i157)
          -            {
          +            org.apache.thrift.protocol.TMap _map154 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map154.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key155;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val156;
          +            for (int _i157 = 0; _i157 < _map154.size; ++_i157) {
                         _key155 = iprot.readBinary();
                         _val156 = iprot.readBinary();
                         struct.attributes.put(_key155, _val156);
          @@ -23018,29 +25529,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRow_args struct)
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRow_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRow_result");
          +  public static class getRow_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRow_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRow_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRow_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRow_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRow_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -23053,7 +25579,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -23064,12 +25590,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -23101,23 +25627,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRow_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRow_result.class,
          +        metaDataMap);
               }
           
               public getRow_result() {
               }
           
          -    public getRow_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getRow_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -23128,7 +25659,8 @@ public getRow_result(
                */
               public getRow_result(getRow_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TRowResult other_element : other.success) {
                     __this__success.add(new TRowResult(other_element));
                   }
          @@ -23170,7 +25702,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getRow_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getRow_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -23215,23 +25748,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -23239,60 +25773,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRow_result)
          -        return this.equals((getRow_result)that);
          +      if (that instanceof getRow_result) return this.equals((getRow_result) that);
                 return false;
               }
           
               public boolean equals(getRow_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -23303,12 +25833,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -23349,13 +25877,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -23388,35 +25918,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRow_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRow_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRow_resultStandardScheme getScheme() {
                   return new getRow_resultStandardScheme();
                 }
               }
           
          -    private static class getRow_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRow_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -23425,9 +25960,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_result struc
                           {
                             org.apache.thrift.protocol.TList _list158 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list158.size);
          -                  @org.apache.thrift.annotation.Nullable TRowResult _elem159;
          -                  for (int _i160 = 0; _i160 < _list158.size; ++_i160)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TRowResult _elem159;
          +                  for (int _i160 = 0; _i160 < _list158.size; ++_i160) {
                               _elem159 = new TRowResult();
                               _elem159.read(iprot);
                               struct.success.add(_elem159);
          @@ -23435,7 +25970,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_result struc
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -23444,7 +25979,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_result struc
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -23455,20 +25990,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRow_result struc
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRow_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRow_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TRowResult _iter161 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TRowResult _iter161 : struct.success) {
                         _iter161.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -23486,17 +26023,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRow_result stru
           
               }
           
          -    private static class getRow_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRow_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRow_resultTupleScheme getScheme() {
                   return new getRow_resultTupleScheme();
                 }
               }
           
          -    private static class getRow_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRow_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRow_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRow_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -23508,8 +26049,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRow_result struc
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TRowResult _iter162 : struct.success)
          -            {
          +            for (TRowResult _iter162 : struct.success) {
                         _iter162.write(oprot);
                       }
                     }
          @@ -23520,16 +26060,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRow_result struc
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRow_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRow_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list163 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list163 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list163.size);
          -            @org.apache.thrift.annotation.Nullable TRowResult _elem164;
          -            for (int _i165 = 0; _i165 < _list163.size; ++_i165)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TRowResult _elem164;
          +            for (int _i165 = 0; _i165 < _list163.size; ++_i165) {
                         _elem164 = new TRowResult();
                         _elem164.read(iprot);
                         struct.success.add(_elem164);
          @@ -23545,21 +26088,37 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRow_result struct
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowWithColumns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowWithColumns_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowWithColumns_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowWithColumns_argsTupleSchemeFactory();
          +  public static class getRowWithColumns_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowWithColumns_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowWithColumns_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowWithColumns_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -23576,28 +26135,32 @@ public static class getRowWithColumns_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row key
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * List of columns to return, null for all columns
                  */
          -      COLUMNS((short)3, "columns"),
          +      COLUMNS((short) 3, "columns"),
                 /**
                  * Get attributes
                  */
          -      ATTRIBUTES((short)4, "attributes");
          +      ATTRIBUTES((short) 4, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -23610,7 +26173,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -23625,12 +26188,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -23662,31 +26225,43 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMNS,
          +        new org.apache.thrift.meta_data.FieldMetaData("columns",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowWithColumns_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowWithColumns_args.class,
          +        metaDataMap);
               }
           
               public getRowWithColumns_args() {
               }
           
          -    public getRowWithColumns_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.util.List columns,
          -      java.util.Map attributes)
          -    {
          +    public getRowWithColumns_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List columns,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -23705,22 +26280,28 @@ public getRowWithColumns_args(getRowWithColumns_args other) {
                   this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
                 }
                 if (other.isSetColumns()) {
          -        java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +        java.util.List __this__columns =
          +            new java.util.ArrayList(other.columns.size());
                   for (java.nio.ByteBuffer other_element : other.columns) {
                     __this__columns.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
                   this.columns = __this__columns;
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -23756,11 +26337,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public getRowWithColumns_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getRowWithColumns_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getRowWithColumns_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -23796,11 +26379,12 @@ public java.nio.ByteBuffer bufferForRow() {
                * row key
                */
               public getRowWithColumns_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          -    public getRowWithColumns_args setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
          +    public getRowWithColumns_args
          +        setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
                 return this;
               }
          @@ -23847,7 +26431,8 @@ public java.util.List getColumns() {
               /**
                * List of columns to return, null for all columns
                */
          -    public getRowWithColumns_args setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +    public getRowWithColumns_args setColumns(
          +        @org.apache.thrift.annotation.Nullable java.util.List columns) {
                 this.columns = columns;
                 return this;
               }
          @@ -23873,7 +26458,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -23882,14 +26467,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Get attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Get attributes
                */
          -    public getRowWithColumns_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public getRowWithColumns_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -23909,47 +26495,48 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMNS:
          -        if (value == null) {
          -          unsetColumns();
          -        } else {
          -          setColumns((java.util.List)value);
          -        }
          -        break;
          +        case COLUMNS:
          +          if (value == null) {
          +            unsetColumns();
          +          } else {
          +            setColumns((java.util.List) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -23957,88 +26544,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case COLUMNS:
          -        return getColumns();
          +        case COLUMNS:
          +          return getColumns();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case COLUMNS:
          -        return isSetColumns();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case COLUMNS:
          +          return isSetColumns();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRowWithColumns_args)
          -        return this.equals((getRowWithColumns_args)that);
          +      if (that instanceof getRowWithColumns_args) return this.equals((getRowWithColumns_args) that);
                 return false;
               }
           
               public boolean equals(getRowWithColumns_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_columns = true && this.isSetColumns();
                 boolean that_present_columns = true && that.isSetColumns();
                 if (this_present_columns || that_present_columns) {
          -        if (!(this_present_columns && that_present_columns))
          -          return false;
          -        if (!this.columns.equals(that.columns))
          -          return false;
          +        if (!(this_present_columns && that_present_columns)) return false;
          +        if (!this.columns.equals(that.columns)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -24049,20 +26628,16 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -      if (isSetColumns())
          -        hashCode = hashCode * 8191 + columns.hashCode();
          +      if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -24123,11 +26698,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -24178,35 +26755,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowWithColumns_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowWithColumns_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowWithColumns_argsStandardScheme getScheme() {
                   return new getRowWithColumns_argsStandardScheme();
                 }
               }
           
          -    private static class getRowWithColumns_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowWithColumns_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -24214,7 +26796,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_a
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -24222,7 +26804,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_a
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -24231,16 +26813,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_a
                           {
                             org.apache.thrift.protocol.TList _list166 = iprot.readListBegin();
                             struct.columns = new java.util.ArrayList(_list166.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem167;
          -                  for (int _i168 = 0; _i168 < _list166.size; ++_i168)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem167;
          +                  for (int _i168 = 0; _i168 < _list166.size; ++_i168) {
                               _elem167 = iprot.readBinary();
                               struct.columns.add(_elem167);
                             }
                             iprot.readListEnd();
                           }
                           struct.setColumnsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -24248,11 +26830,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_a
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map169 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map169.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key170;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val171;
          -                  for (int _i172 = 0; _i172 < _map169.size; ++_i172)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map169.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key170;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val171;
          +                  for (int _i172 = 0; _i172 < _map169.size; ++_i172) {
                               _key170 = iprot.readBinary();
                               _val171 = iprot.readBinary();
                               struct.attributes.put(_key170, _val171);
          @@ -24260,7 +26845,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_a
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -24271,11 +26856,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_a
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumns_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumns_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -24292,9 +26879,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumns_
                   if (struct.columns != null) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          -            for (java.nio.ByteBuffer _iter173 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          +            for (java.nio.ByteBuffer _iter173 : struct.columns) {
                         oprot.writeBinary(_iter173);
                       }
                       oprot.writeListEnd();
          @@ -24304,9 +26891,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumns_
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter174 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter174 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter174.getKey());
                         oprot.writeBinary(_iter174.getValue());
                       }
          @@ -24320,17 +26909,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumns_
           
               }
           
          -    private static class getRowWithColumns_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowWithColumns_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowWithColumns_argsTupleScheme getScheme() {
                   return new getRowWithColumns_argsTupleScheme();
                 }
               }
           
          -    private static class getRowWithColumns_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowWithColumns_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -24354,8 +26947,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_a
                   if (struct.isSetColumns()) {
                     {
                       oprot.writeI32(struct.columns.size());
          -            for (java.nio.ByteBuffer _iter175 : struct.columns)
          -            {
          +            for (java.nio.ByteBuffer _iter175 : struct.columns) {
                         oprot.writeBinary(_iter175);
                       }
                     }
          @@ -24363,8 +26955,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_a
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter176 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter176 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter176.getKey());
                         oprot.writeBinary(_iter176.getValue());
                       }
          @@ -24373,8 +26965,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_a
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -24386,11 +26980,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_ar
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TList _list177 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list177 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.columns = new java.util.ArrayList(_list177.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem178;
          -            for (int _i179 = 0; _i179 < _list177.size; ++_i179)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem178;
          +            for (int _i179 = 0; _i179 < _list177.size; ++_i179) {
                         _elem178 = iprot.readBinary();
                         struct.columns.add(_elem178);
                       }
          @@ -24399,12 +26994,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_ar
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TMap _map180 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map180.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key181;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val182;
          -            for (int _i183 = 0; _i183 < _map180.size; ++_i183)
          -            {
          +            org.apache.thrift.protocol.TMap _map180 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map180.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key181;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val182;
          +            for (int _i183 = 0; _i183 < _map180.size; ++_i183) {
                         _key181 = iprot.readBinary();
                         _val182 = iprot.readBinary();
                         struct.attributes.put(_key181, _val182);
          @@ -24415,29 +27013,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_ar
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowWithColumns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowWithColumns_result");
          +  public static class getRowWithColumns_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowWithColumns_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowWithColumns_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowWithColumns_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowWithColumns_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowWithColumns_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -24450,7 +27063,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -24461,12 +27074,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -24498,23 +27111,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowWithColumns_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowWithColumns_result.class,
          +        metaDataMap);
               }
           
               public getRowWithColumns_result() {
               }
           
          -    public getRowWithColumns_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getRowWithColumns_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -24525,7 +27143,8 @@ public getRowWithColumns_result(
                */
               public getRowWithColumns_result(getRowWithColumns_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TRowResult other_element : other.success) {
                     __this__success.add(new TRowResult(other_element));
                   }
          @@ -24567,7 +27186,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getRowWithColumns_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getRowWithColumns_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -24612,23 +27232,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -24636,27 +27257,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -24664,32 +27288,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getRowWithColumns_result)
          -        return this.equals((getRowWithColumns_result)that);
          +        return this.equals((getRowWithColumns_result) that);
                 return false;
               }
           
               public boolean equals(getRowWithColumns_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -24700,12 +27318,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -24746,13 +27362,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -24785,35 +27403,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowWithColumns_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowWithColumns_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowWithColumns_resultStandardScheme getScheme() {
                   return new getRowWithColumns_resultStandardScheme();
                 }
               }
           
          -    private static class getRowWithColumns_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowWithColumns_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -24822,9 +27445,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_r
                           {
                             org.apache.thrift.protocol.TList _list184 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list184.size);
          -                  @org.apache.thrift.annotation.Nullable TRowResult _elem185;
          -                  for (int _i186 = 0; _i186 < _list184.size; ++_i186)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TRowResult _elem185;
          +                  for (int _i186 = 0; _i186 < _list184.size; ++_i186) {
                               _elem185 = new TRowResult();
                               _elem185.read(iprot);
                               struct.success.add(_elem185);
          @@ -24832,7 +27455,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_r
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -24841,7 +27464,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_r
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -24852,20 +27475,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumns_r
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumns_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumns_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TRowResult _iter187 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TRowResult _iter187 : struct.success) {
                         _iter187.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -24883,17 +27508,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumns_
           
               }
           
          -    private static class getRowWithColumns_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowWithColumns_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowWithColumns_resultTupleScheme getScheme() {
                   return new getRowWithColumns_resultTupleScheme();
                 }
               }
           
          -    private static class getRowWithColumns_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowWithColumns_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -24905,8 +27534,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_r
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TRowResult _iter188 : struct.success)
          -            {
          +            for (TRowResult _iter188 : struct.success) {
                         _iter188.write(oprot);
                       }
                     }
          @@ -24917,16 +27545,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_r
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list189 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list189 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list189.size);
          -            @org.apache.thrift.annotation.Nullable TRowResult _elem190;
          -            for (int _i191 = 0; _i191 < _list189.size; ++_i191)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TRowResult _elem190;
          +            for (int _i191 = 0; _i191 < _list189.size; ++_i191) {
                         _elem190 = new TRowResult();
                         _elem190.read(iprot);
                         struct.success.add(_elem190);
          @@ -24942,21 +27573,37 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumns_re
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowTs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowTs_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)3);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowTs_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowTs_argsTupleSchemeFactory();
          +  public static class getRowTs_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowTs_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowTs_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowTs_argsTupleSchemeFactory();
           
               /**
                * name of the table
          @@ -24973,28 +27620,32 @@ public static class getRowTs_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of the table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row key
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * timestamp
                  */
          -      TIMESTAMP((short)3, "timestamp"),
          +      TIMESTAMP((short) 3, "timestamp"),
                 /**
                  * Get attributes
                  */
          -      ATTRIBUTES((short)4, "attributes");
          +      ATTRIBUTES((short) 4, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -25007,7 +27658,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -25022,12 +27673,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -25061,30 +27712,41 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.TIMESTAMP,
          +        new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowTs_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowTs_args.class,
          +        metaDataMap);
               }
           
               public getRowTs_args() {
               }
           
          -    public getRowTs_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      long timestamp,
          -      java.util.Map attributes)
          -    {
          +    public getRowTs_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row, long timestamp,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -25106,15 +27768,20 @@ public getRowTs_args(getRowTs_args other) {
                 }
                 this.timestamp = other.timestamp;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -25151,11 +27818,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of the table
                */
               public getRowTs_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getRowTs_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getRowTs_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -25191,7 +27860,7 @@ public java.nio.ByteBuffer bufferForRow() {
                * row key
                */
               public getRowTs_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          @@ -25232,7 +27901,8 @@ public getRowTs_args setTimestamp(long timestamp) {
               }
           
               public void unsetTimestamp() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
               }
           
               /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -25241,7 +27911,8 @@ public boolean isSetTimestamp() {
               }
           
               public void setTimestampIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -25250,7 +27921,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -25259,14 +27930,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Get attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Get attributes
                */
          -    public getRowTs_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public getRowTs_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -25286,47 +27958,48 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TIMESTAMP:
          -        if (value == null) {
          -          unsetTimestamp();
          -        } else {
          -          setTimestamp((java.lang.Long)value);
          -        }
          -        break;
          +        case TIMESTAMP:
          +          if (value == null) {
          +            unsetTimestamp();
          +          } else {
          +            setTimestamp((java.lang.Long) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -25334,88 +28007,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case TIMESTAMP:
          -        return getTimestamp();
          +        case TIMESTAMP:
          +          return getTimestamp();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case TIMESTAMP:
          -        return isSetTimestamp();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case TIMESTAMP:
          +          return isSetTimestamp();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRowTs_args)
          -        return this.equals((getRowTs_args)that);
          +      if (that instanceof getRowTs_args) return this.equals((getRowTs_args) that);
                 return false;
               }
           
               public boolean equals(getRowTs_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_timestamp = true;
                 boolean that_present_timestamp = true;
                 if (this_present_timestamp || that_present_timestamp) {
          -        if (!(this_present_timestamp && that_present_timestamp))
          -          return false;
          -        if (this.timestamp != that.timestamp)
          -          return false;
          +        if (!(this_present_timestamp && that_present_timestamp)) return false;
          +        if (this.timestamp != that.timestamp) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -25426,18 +28091,15 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -25498,11 +28160,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -25549,37 +28213,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowTs_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowTs_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowTs_argsStandardScheme getScheme() {
                   return new getRowTs_argsStandardScheme();
                 }
               }
           
          -    private static class getRowTs_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowTs_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -25587,7 +28257,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_args struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -25595,7 +28265,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_args struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -25603,7 +28273,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_args struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.timestamp = iprot.readI64();
                           struct.setTimestampIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -25611,11 +28281,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_args struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map192 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map192.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key193;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val194;
          -                  for (int _i195 = 0; _i195 < _map192.size; ++_i195)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map192.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key193;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val194;
          +                  for (int _i195 = 0; _i195 < _map192.size; ++_i195) {
                               _key193 = iprot.readBinary();
                               _val194 = iprot.readBinary();
                               struct.attributes.put(_key193, _val194);
          @@ -25623,7 +28296,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_args struc
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -25634,11 +28307,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_args struc
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowTs_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowTs_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -25658,9 +28333,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowTs_args stru
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter196 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter196 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter196.getKey());
                         oprot.writeBinary(_iter196.getValue());
                       }
          @@ -25674,17 +28351,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowTs_args stru
           
               }
           
          -    private static class getRowTs_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowTs_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowTs_argsTupleScheme getScheme() {
                   return new getRowTs_argsTupleScheme();
                 }
               }
           
          -    private static class getRowTs_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowTs_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRowTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -25711,8 +28392,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowTs_args struc
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter197 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter197 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter197.getKey());
                         oprot.writeBinary(_iter197.getValue());
                       }
          @@ -25721,8 +28402,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowTs_args struc
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRowTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -25738,12 +28421,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowTs_args struct
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TMap _map198 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map198.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key199;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val200;
          -            for (int _i201 = 0; _i201 < _map198.size; ++_i201)
          -            {
          +            org.apache.thrift.protocol.TMap _map198 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map198.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key199;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val200;
          +            for (int _i201 = 0; _i201 < _map198.size; ++_i201) {
                         _key199 = iprot.readBinary();
                         _val200 = iprot.readBinary();
                         struct.attributes.put(_key199, _val200);
          @@ -25754,29 +28440,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowTs_args struct
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowTs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowTs_result");
          +  public static class getRowTs_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowTs_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowTs_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowTs_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowTs_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowTs_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -25789,7 +28490,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -25800,12 +28501,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -25837,23 +28538,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowTs_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowTs_result.class,
          +        metaDataMap);
               }
           
               public getRowTs_result() {
               }
           
          -    public getRowTs_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getRowTs_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -25864,7 +28570,8 @@ public getRowTs_result(
                */
               public getRowTs_result(getRowTs_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TRowResult other_element : other.success) {
                     __this__success.add(new TRowResult(other_element));
                   }
          @@ -25906,7 +28613,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getRowTs_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getRowTs_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -25951,23 +28659,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -25975,60 +28684,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRowTs_result)
          -        return this.equals((getRowTs_result)that);
          +      if (that instanceof getRowTs_result) return this.equals((getRowTs_result) that);
                 return false;
               }
           
               public boolean equals(getRowTs_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -26039,12 +28744,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -26085,13 +28788,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -26124,35 +28829,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowTs_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowTs_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowTs_resultStandardScheme getScheme() {
                   return new getRowTs_resultStandardScheme();
                 }
               }
           
          -    private static class getRowTs_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowTs_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -26161,9 +28871,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_result str
                           {
                             org.apache.thrift.protocol.TList _list202 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list202.size);
          -                  @org.apache.thrift.annotation.Nullable TRowResult _elem203;
          -                  for (int _i204 = 0; _i204 < _list202.size; ++_i204)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TRowResult _elem203;
          +                  for (int _i204 = 0; _i204 < _list202.size; ++_i204) {
                               _elem203 = new TRowResult();
                               _elem203.read(iprot);
                               struct.success.add(_elem203);
          @@ -26171,7 +28881,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_result str
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -26180,7 +28890,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_result str
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -26191,20 +28901,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowTs_result str
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowTs_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowTs_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TRowResult _iter205 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TRowResult _iter205 : struct.success) {
                         _iter205.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -26222,17 +28934,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowTs_result st
           
               }
           
          -    private static class getRowTs_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowTs_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowTs_resultTupleScheme getScheme() {
                   return new getRowTs_resultTupleScheme();
                 }
               }
           
          -    private static class getRowTs_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowTs_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRowTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -26244,8 +28960,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowTs_result str
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TRowResult _iter206 : struct.success)
          -            {
          +            for (TRowResult _iter206 : struct.success) {
                         _iter206.write(oprot);
                       }
                     }
          @@ -26256,16 +28971,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowTs_result str
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRowTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list207 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list207 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list207.size);
          -            @org.apache.thrift.annotation.Nullable TRowResult _elem208;
          -            for (int _i209 = 0; _i209 < _list207.size; ++_i209)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TRowResult _elem208;
          +            for (int _i209 = 0; _i209 < _list207.size; ++_i209) {
                         _elem208 = new TRowResult();
                         _elem208.read(iprot);
                         struct.success.add(_elem208);
          @@ -26281,22 +28999,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowTs_result stru
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowWithColumnsTs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowWithColumnsTs_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3);
          -    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)4);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)5);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowWithColumnsTs_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowWithColumnsTs_argsTupleSchemeFactory();
          +  public static class getRowWithColumnsTs_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowWithColumnsTs_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 5);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowWithColumnsTs_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowWithColumnsTs_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -26314,29 +29050,32 @@ public static class getRowWithColumnsTs_args implements org.apache.thrift.TBase<
               /**
                * Get attributes
                */
          -    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row key
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * List of columns to return, null for all columns
                  */
          -      COLUMNS((short)3, "columns"),
          -      TIMESTAMP((short)4, "timestamp"),
          +      COLUMNS((short) 3, "columns"), TIMESTAMP((short) 4, "timestamp"),
                 /**
                  * Get attributes
                  */
          -      ATTRIBUTES((short)5, "attributes");
          +      ATTRIBUTES((short) 5, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -26349,7 +29088,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -26366,12 +29105,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -26405,34 +29144,48 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMNS,
          +        new org.apache.thrift.meta_data.FieldMetaData("columns",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.TIMESTAMP,
          +        new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowWithColumnsTs_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowWithColumnsTs_args.class,
          +        metaDataMap);
               }
           
               public getRowWithColumnsTs_args() {
               }
           
          -    public getRowWithColumnsTs_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.util.List columns,
          -      long timestamp,
          -      java.util.Map attributes)
          -    {
          +    public getRowWithColumnsTs_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List columns, long timestamp,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -26454,7 +29207,8 @@ public getRowWithColumnsTs_args(getRowWithColumnsTs_args other) {
                   this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
                 }
                 if (other.isSetColumns()) {
          -        java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +        java.util.List __this__columns =
          +            new java.util.ArrayList(other.columns.size());
                   for (java.nio.ByteBuffer other_element : other.columns) {
                     __this__columns.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
          @@ -26462,15 +29216,20 @@ public getRowWithColumnsTs_args(getRowWithColumnsTs_args other) {
                 }
                 this.timestamp = other.timestamp;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -26508,11 +29267,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public getRowWithColumnsTs_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getRowWithColumnsTs_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getRowWithColumnsTs_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -26548,11 +29309,12 @@ public java.nio.ByteBuffer bufferForRow() {
                * row key
                */
               public getRowWithColumnsTs_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          -    public getRowWithColumnsTs_args setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
          +    public getRowWithColumnsTs_args
          +        setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
                 return this;
               }
          @@ -26599,7 +29361,8 @@ public java.util.List getColumns() {
               /**
                * List of columns to return, null for all columns
                */
          -    public getRowWithColumnsTs_args setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +    public getRowWithColumnsTs_args setColumns(
          +        @org.apache.thrift.annotation.Nullable java.util.List columns) {
                 this.columns = columns;
                 return this;
               }
          @@ -26630,7 +29393,8 @@ public getRowWithColumnsTs_args setTimestamp(long timestamp) {
               }
           
               public void unsetTimestamp() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
               }
           
               /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -26639,7 +29403,8 @@ public boolean isSetTimestamp() {
               }
           
               public void setTimestampIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -26648,7 +29413,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -26657,14 +29422,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Get attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Get attributes
                */
          -    public getRowWithColumnsTs_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public getRowWithColumnsTs_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -26684,55 +29450,56 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMNS:
          -        if (value == null) {
          -          unsetColumns();
          -        } else {
          -          setColumns((java.util.List)value);
          -        }
          -        break;
          +        case COLUMNS:
          +          if (value == null) {
          +            unsetColumns();
          +          } else {
          +            setColumns((java.util.List) value);
          +          }
          +          break;
           
          -      case TIMESTAMP:
          -        if (value == null) {
          -          unsetTimestamp();
          -        } else {
          -          setTimestamp((java.lang.Long)value);
          -        }
          -        break;
          +        case TIMESTAMP:
          +          if (value == null) {
          +            unsetTimestamp();
          +          } else {
          +            setTimestamp((java.lang.Long) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -26740,42 +29507,45 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case COLUMNS:
          -        return getColumns();
          +        case COLUMNS:
          +          return getColumns();
           
          -      case TIMESTAMP:
          -        return getTimestamp();
          +        case TIMESTAMP:
          +          return getTimestamp();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case COLUMNS:
          -        return isSetColumns();
          -      case TIMESTAMP:
          -        return isSetTimestamp();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case COLUMNS:
          +          return isSetColumns();
          +        case TIMESTAMP:
          +          return isSetTimestamp();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -26783,59 +29553,47 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getRowWithColumnsTs_args)
          -        return this.equals((getRowWithColumnsTs_args)that);
          +        return this.equals((getRowWithColumnsTs_args) that);
                 return false;
               }
           
               public boolean equals(getRowWithColumnsTs_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_columns = true && this.isSetColumns();
                 boolean that_present_columns = true && that.isSetColumns();
                 if (this_present_columns || that_present_columns) {
          -        if (!(this_present_columns && that_present_columns))
          -          return false;
          -        if (!this.columns.equals(that.columns))
          -          return false;
          +        if (!(this_present_columns && that_present_columns)) return false;
          +        if (!this.columns.equals(that.columns)) return false;
                 }
           
                 boolean this_present_timestamp = true;
                 boolean that_present_timestamp = true;
                 if (this_present_timestamp || that_present_timestamp) {
          -        if (!(this_present_timestamp && that_present_timestamp))
          -          return false;
          -        if (this.timestamp != that.timestamp)
          -          return false;
          +        if (!(this_present_timestamp && that_present_timestamp)) return false;
          +        if (this.timestamp != that.timestamp) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -26846,22 +29604,18 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -      if (isSetColumns())
          -        hashCode = hashCode * 8191 + columns.hashCode();
          +      if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -26932,11 +29686,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -26991,37 +29747,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowWithColumnsTs_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowWithColumnsTs_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowWithColumnsTs_argsStandardScheme getScheme() {
                   return new getRowWithColumnsTs_argsStandardScheme();
                 }
               }
           
          -    private static class getRowWithColumnsTs_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowWithColumnsTs_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -27029,7 +29791,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27037,7 +29799,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27046,16 +29808,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs
                           {
                             org.apache.thrift.protocol.TList _list210 = iprot.readListBegin();
                             struct.columns = new java.util.ArrayList(_list210.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem211;
          -                  for (int _i212 = 0; _i212 < _list210.size; ++_i212)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem211;
          +                  for (int _i212 = 0; _i212 < _list210.size; ++_i212) {
                               _elem211 = iprot.readBinary();
                               struct.columns.add(_elem211);
                             }
                             iprot.readListEnd();
                           }
                           struct.setColumnsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27063,7 +29825,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.timestamp = iprot.readI64();
                           struct.setTimestampIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27071,11 +29833,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map213 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map213.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key214;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val215;
          -                  for (int _i216 = 0; _i216 < _map213.size; ++_i216)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map213.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key214;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val215;
          +                  for (int _i216 = 0; _i216 < _map213.size; ++_i216) {
                               _key214 = iprot.readBinary();
                               _val215 = iprot.readBinary();
                               struct.attributes.put(_key214, _val215);
          @@ -27083,7 +29848,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27094,11 +29859,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumnsTs_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumnsTs_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -27115,9 +29882,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumnsT
                   if (struct.columns != null) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          -            for (java.nio.ByteBuffer _iter217 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          +            for (java.nio.ByteBuffer _iter217 : struct.columns) {
                         oprot.writeBinary(_iter217);
                       }
                       oprot.writeListEnd();
          @@ -27130,9 +29897,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumnsT
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter218 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter218 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter218.getKey());
                         oprot.writeBinary(_iter218.getValue());
                       }
          @@ -27146,17 +29915,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumnsT
           
               }
           
          -    private static class getRowWithColumnsTs_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowWithColumnsTs_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowWithColumnsTs_argsTupleScheme getScheme() {
                   return new getRowWithColumnsTs_argsTupleScheme();
                 }
               }
           
          -    private static class getRowWithColumnsTs_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowWithColumnsTs_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -27183,8 +29956,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs
                   if (struct.isSetColumns()) {
                     {
                       oprot.writeI32(struct.columns.size());
          -            for (java.nio.ByteBuffer _iter219 : struct.columns)
          -            {
          +            for (java.nio.ByteBuffer _iter219 : struct.columns) {
                         oprot.writeBinary(_iter219);
                       }
                     }
          @@ -27195,8 +29967,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter220 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter220 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter220.getKey());
                         oprot.writeBinary(_iter220.getValue());
                       }
          @@ -27205,8 +29977,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(5);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -27218,11 +29992,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TList _list221 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list221 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.columns = new java.util.ArrayList(_list221.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem222;
          -            for (int _i223 = 0; _i223 < _list221.size; ++_i223)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem222;
          +            for (int _i223 = 0; _i223 < _list221.size; ++_i223) {
                         _elem222 = iprot.readBinary();
                         struct.columns.add(_elem222);
                       }
          @@ -27235,12 +30010,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_
                   }
                   if (incoming.get(4)) {
                     {
          -            org.apache.thrift.protocol.TMap _map224 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map224.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key225;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val226;
          -            for (int _i227 = 0; _i227 < _map224.size; ++_i227)
          -            {
          +            org.apache.thrift.protocol.TMap _map224 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map224.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key225;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val226;
          +            for (int _i227 = 0; _i227 < _map224.size; ++_i227) {
                         _key225 = iprot.readBinary();
                         _val226 = iprot.readBinary();
                         struct.attributes.put(_key225, _val226);
          @@ -27251,29 +30029,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowWithColumnsTs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowWithColumnsTs_result");
          +  public static class getRowWithColumnsTs_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowWithColumnsTs_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowWithColumnsTs_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowWithColumnsTs_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowWithColumnsTs_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowWithColumnsTs_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -27286,7 +30079,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -27297,12 +30090,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -27334,23 +30127,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowWithColumnsTs_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getRowWithColumnsTs_result.class, metaDataMap);
               }
           
               public getRowWithColumnsTs_result() {
               }
           
          -    public getRowWithColumnsTs_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getRowWithColumnsTs_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -27361,7 +30159,8 @@ public getRowWithColumnsTs_result(
                */
               public getRowWithColumnsTs_result(getRowWithColumnsTs_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TRowResult other_element : other.success) {
                     __this__success.add(new TRowResult(other_element));
                   }
          @@ -27403,7 +30202,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getRowWithColumnsTs_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getRowWithColumnsTs_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -27448,23 +30248,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -27472,27 +30273,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -27500,32 +30304,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getRowWithColumnsTs_result)
          -        return this.equals((getRowWithColumnsTs_result)that);
          +        return this.equals((getRowWithColumnsTs_result) that);
                 return false;
               }
           
               public boolean equals(getRowWithColumnsTs_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -27536,12 +30334,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -27582,13 +30378,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -27621,35 +30419,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowWithColumnsTs_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowWithColumnsTs_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowWithColumnsTs_resultStandardScheme getScheme() {
                   return new getRowWithColumnsTs_resultStandardScheme();
                 }
               }
           
          -    private static class getRowWithColumnsTs_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowWithColumnsTs_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getRowWithColumnsTs_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -27658,9 +30461,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs
                           {
                             org.apache.thrift.protocol.TList _list228 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list228.size);
          -                  @org.apache.thrift.annotation.Nullable TRowResult _elem229;
          -                  for (int _i230 = 0; _i230 < _list228.size; ++_i230)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TRowResult _elem229;
          +                  for (int _i230 = 0; _i230 < _list228.size; ++_i230) {
                               _elem229 = new TRowResult();
                               _elem229.read(iprot);
                               struct.success.add(_elem229);
          @@ -27668,7 +30471,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27677,7 +30480,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27688,20 +30491,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowWithColumnsTs
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumnsTs_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getRowWithColumnsTs_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TRowResult _iter231 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TRowResult _iter231 : struct.success) {
                         _iter231.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -27719,17 +30524,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowWithColumnsT
           
               }
           
          -    private static class getRowWithColumnsTs_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowWithColumnsTs_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowWithColumnsTs_resultTupleScheme getScheme() {
                   return new getRowWithColumnsTs_resultTupleScheme();
                 }
               }
           
          -    private static class getRowWithColumnsTs_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowWithColumnsTs_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getRowWithColumnsTs_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -27741,8 +30550,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TRowResult _iter232 : struct.success)
          -            {
          +            for (TRowResult _iter232 : struct.success) {
                         _iter232.write(oprot);
                       }
                     }
          @@ -27753,16 +30561,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list233 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list233 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list233.size);
          -            @org.apache.thrift.annotation.Nullable TRowResult _elem234;
          -            for (int _i235 = 0; _i235 < _list233.size; ++_i235)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TRowResult _elem234;
          +            for (int _i235 = 0; _i235 < _list233.size; ++_i235) {
                         _elem234 = new TRowResult();
                         _elem234.read(iprot);
                         struct.success.add(_elem234);
          @@ -27778,20 +30589,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowWithColumnsTs_
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRows_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRows_args");
          +  public static class getRows_args
          +      implements org.apache.thrift.TBase, java.io.Serializable,
          +      Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRows_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST, (short)2);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)3);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 3);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRows_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRows_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRows_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRows_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -27804,24 +30629,28 @@ public static class getRows_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row keys
                  */
          -      ROWS((short)2, "rows"),
          +      ROWS((short) 2, "rows"),
                 /**
                  * Get attributes
                  */
          -      ATTRIBUTES((short)3, "attributes");
          +      ATTRIBUTES((short) 3, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -27834,7 +30663,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROWS
          @@ -27847,12 +30676,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -27884,28 +30713,37 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROWS, new org.apache.thrift.meta_data.FieldMetaData("rows", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROWS,
          +        new org.apache.thrift.meta_data.FieldMetaData("rows",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRows_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRows_args.class,
          +        metaDataMap);
               }
           
               public getRows_args() {
               }
           
          -    public getRows_args(
          -      java.nio.ByteBuffer tableName,
          -      java.util.List rows,
          -      java.util.Map attributes)
          -    {
          +    public getRows_args(java.nio.ByteBuffer tableName, java.util.List rows,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.rows = rows;
          @@ -27920,22 +30758,28 @@ public getRows_args(getRows_args other) {
                   this.tableName = org.apache.thrift.TBaseHelper.copyBinary(other.tableName);
                 }
                 if (other.isSetRows()) {
          -        java.util.List __this__rows = new java.util.ArrayList(other.rows.size());
          +        java.util.List __this__rows =
          +            new java.util.ArrayList(other.rows.size());
                   for (java.nio.ByteBuffer other_element : other.rows) {
                     __this__rows.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
                   this.rows = __this__rows;
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -27970,11 +30814,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public getRows_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getRows_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getRows_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -28021,7 +30867,8 @@ public java.util.List getRows() {
               /**
                * row keys
                */
          -    public getRows_args setRows(@org.apache.thrift.annotation.Nullable java.util.List rows) {
          +    public getRows_args
          +        setRows(@org.apache.thrift.annotation.Nullable java.util.List rows) {
                 this.rows = rows;
                 return this;
               }
          @@ -28047,7 +30894,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -28056,14 +30903,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Get attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Get attributes
                */
          -    public getRows_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public getRows_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -28083,35 +30931,36 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROWS:
          -        if (value == null) {
          -          unsetRows();
          -        } else {
          -          setRows((java.util.List)value);
          -        }
          -        break;
          +        case ROWS:
          +          if (value == null) {
          +            unsetRows();
          +          } else {
          +            setRows((java.util.List) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -28119,74 +30968,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROWS:
          -        return getRows();
          +        case ROWS:
          +          return getRows();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROWS:
          -        return isSetRows();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROWS:
          +          return isSetRows();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRows_args)
          -        return this.equals((getRows_args)that);
          +      if (that instanceof getRows_args) return this.equals((getRows_args) that);
                 return false;
               }
           
               public boolean equals(getRows_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_rows = true && this.isSetRows();
                 boolean that_present_rows = true && that.isSetRows();
                 if (this_present_rows || that_present_rows) {
          -        if (!(this_present_rows && that_present_rows))
          -          return false;
          -        if (!this.rows.equals(that.rows))
          -          return false;
          +        if (!(this_present_rows && that_present_rows)) return false;
          +        if (!this.rows.equals(that.rows)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -28197,16 +31040,13 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRows()) ? 131071 : 524287);
          -      if (isSetRows())
          -        hashCode = hashCode * 8191 + rows.hashCode();
          +      if (isSetRows()) hashCode = hashCode * 8191 + rows.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -28257,11 +31097,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -28304,35 +31146,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRows_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRows_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRows_argsStandardScheme getScheme() {
                   return new getRows_argsStandardScheme();
                 }
               }
           
          -    private static class getRows_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRows_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -28340,7 +31187,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_args struct
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -28349,16 +31196,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_args struct
                           {
                             org.apache.thrift.protocol.TList _list236 = iprot.readListBegin();
                             struct.rows = new java.util.ArrayList(_list236.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem237;
          -                  for (int _i238 = 0; _i238 < _list236.size; ++_i238)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem237;
          +                  for (int _i238 = 0; _i238 < _list236.size; ++_i238) {
                               _elem237 = iprot.readBinary();
                               struct.rows.add(_elem237);
                             }
                             iprot.readListEnd();
                           }
                           struct.setRowsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -28366,11 +31213,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_args struct
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map239 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map239.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key240;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val241;
          -                  for (int _i242 = 0; _i242 < _map239.size; ++_i242)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map239.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key240;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val241;
          +                  for (int _i242 = 0; _i242 < _map239.size; ++_i242) {
                               _key240 = iprot.readBinary();
                               _val241 = iprot.readBinary();
                               struct.attributes.put(_key240, _val241);
          @@ -28378,7 +31228,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_args struct
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -28389,11 +31239,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_args struct
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRows_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRows_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -28405,9 +31257,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRows_args struc
                   if (struct.rows != null) {
                     oprot.writeFieldBegin(ROWS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.rows.size()));
          -            for (java.nio.ByteBuffer _iter243 : struct.rows)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.rows.size()));
          +            for (java.nio.ByteBuffer _iter243 : struct.rows) {
                         oprot.writeBinary(_iter243);
                       }
                       oprot.writeListEnd();
          @@ -28417,9 +31269,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRows_args struc
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter244 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter244 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter244.getKey());
                         oprot.writeBinary(_iter244.getValue());
                       }
          @@ -28433,17 +31287,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRows_args struc
           
               }
           
          -    private static class getRows_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRows_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRows_argsTupleScheme getScheme() {
                   return new getRows_argsTupleScheme();
                 }
               }
           
          -    private static class getRows_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRows_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRows_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRows_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -28461,8 +31319,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRows_args struct
                   if (struct.isSetRows()) {
                     {
                       oprot.writeI32(struct.rows.size());
          -            for (java.nio.ByteBuffer _iter245 : struct.rows)
          -            {
          +            for (java.nio.ByteBuffer _iter245 : struct.rows) {
                         oprot.writeBinary(_iter245);
                       }
                     }
          @@ -28470,8 +31327,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRows_args struct
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter246 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter246 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter246.getKey());
                         oprot.writeBinary(_iter246.getValue());
                       }
          @@ -28480,8 +31337,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRows_args struct
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRows_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRows_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(3);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -28489,11 +31348,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRows_args struct)
                   }
                   if (incoming.get(1)) {
                     {
          -            org.apache.thrift.protocol.TList _list247 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list247 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.rows = new java.util.ArrayList(_list247.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem248;
          -            for (int _i249 = 0; _i249 < _list247.size; ++_i249)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem248;
          +            for (int _i249 = 0; _i249 < _list247.size; ++_i249) {
                         _elem248 = iprot.readBinary();
                         struct.rows.add(_elem248);
                       }
          @@ -28502,12 +31362,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRows_args struct)
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TMap _map250 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map250.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key251;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val252;
          -            for (int _i253 = 0; _i253 < _map250.size; ++_i253)
          -            {
          +            org.apache.thrift.protocol.TMap _map250 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map250.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key251;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val252;
          +            for (int _i253 = 0; _i253 < _map250.size; ++_i253) {
                         _key251 = iprot.readBinary();
                         _val252 = iprot.readBinary();
                         struct.attributes.put(_key251, _val252);
          @@ -28518,29 +31381,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRows_args struct)
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRows_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRows_result");
          +  public static class getRows_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRows_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRows_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRows_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRows_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRows_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -28553,7 +31431,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -28564,12 +31442,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -28601,23 +31479,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRows_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRows_result.class,
          +        metaDataMap);
               }
           
               public getRows_result() {
               }
           
          -    public getRows_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getRows_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -28628,7 +31511,8 @@ public getRows_result(
                */
               public getRows_result(getRows_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TRowResult other_element : other.success) {
                     __this__success.add(new TRowResult(other_element));
                   }
          @@ -28670,7 +31554,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getRows_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getRows_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -28715,23 +31600,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -28739,60 +31625,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRows_result)
          -        return this.equals((getRows_result)that);
          +      if (that instanceof getRows_result) return this.equals((getRows_result) that);
                 return false;
               }
           
               public boolean equals(getRows_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -28803,12 +31685,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -28849,13 +31729,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -28888,35 +31770,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRows_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRows_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRows_resultStandardScheme getScheme() {
                   return new getRows_resultStandardScheme();
                 }
               }
           
          -    private static class getRows_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRows_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -28925,9 +31812,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_result stru
                           {
                             org.apache.thrift.protocol.TList _list254 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list254.size);
          -                  @org.apache.thrift.annotation.Nullable TRowResult _elem255;
          -                  for (int _i256 = 0; _i256 < _list254.size; ++_i256)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TRowResult _elem255;
          +                  for (int _i256 = 0; _i256 < _list254.size; ++_i256) {
                               _elem255 = new TRowResult();
                               _elem255.read(iprot);
                               struct.success.add(_elem255);
          @@ -28935,7 +31822,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_result stru
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -28944,7 +31831,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_result stru
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -28955,20 +31842,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRows_result stru
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRows_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRows_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TRowResult _iter257 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TRowResult _iter257 : struct.success) {
                         _iter257.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -28986,17 +31875,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRows_result str
           
               }
           
          -    private static class getRows_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRows_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRows_resultTupleScheme getScheme() {
                   return new getRows_resultTupleScheme();
                 }
               }
           
          -    private static class getRows_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRows_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRows_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRows_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -29008,8 +31901,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRows_result stru
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TRowResult _iter258 : struct.success)
          -            {
          +            for (TRowResult _iter258 : struct.success) {
                         _iter258.write(oprot);
                       }
                     }
          @@ -29020,16 +31912,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRows_result stru
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRows_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRows_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list259 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list259 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list259.size);
          -            @org.apache.thrift.annotation.Nullable TRowResult _elem260;
          -            for (int _i261 = 0; _i261 < _list259.size; ++_i261)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TRowResult _elem260;
          +            for (int _i261 = 0; _i261 < _list259.size; ++_i261) {
                         _elem260 = new TRowResult();
                         _elem260.read(iprot);
                         struct.success.add(_elem260);
          @@ -29045,21 +31940,37 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRows_result struc
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowsWithColumns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowsWithColumns_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowsWithColumns_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowsWithColumns_argsTupleSchemeFactory();
          +  public static class getRowsWithColumns_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowsWithColumns_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowsWithColumns_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowsWithColumns_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -29076,28 +31987,32 @@ public static class getRowsWithColumns_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row keys
                  */
          -      ROWS((short)2, "rows"),
          +      ROWS((short) 2, "rows"),
                 /**
                  * List of columns to return, null for all columns
                  */
          -      COLUMNS((short)3, "columns"),
          +      COLUMNS((short) 3, "columns"),
                 /**
                  * Get attributes
                  */
          -      ATTRIBUTES((short)4, "attributes");
          +      ATTRIBUTES((short) 4, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -29110,7 +32025,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROWS
          @@ -29125,12 +32040,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -29162,32 +32077,44 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROWS, new org.apache.thrift.meta_data.FieldMetaData("rows", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROWS,
          +        new org.apache.thrift.meta_data.FieldMetaData("rows",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.COLUMNS,
          +        new org.apache.thrift.meta_data.FieldMetaData("columns",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowsWithColumns_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowsWithColumns_args.class,
          +        metaDataMap);
               }
           
               public getRowsWithColumns_args() {
               }
           
          -    public getRowsWithColumns_args(
          -      java.nio.ByteBuffer tableName,
          -      java.util.List rows,
          -      java.util.List columns,
          -      java.util.Map attributes)
          -    {
          +    public getRowsWithColumns_args(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.rows = rows;
          @@ -29203,29 +32130,36 @@ public getRowsWithColumns_args(getRowsWithColumns_args other) {
                   this.tableName = org.apache.thrift.TBaseHelper.copyBinary(other.tableName);
                 }
                 if (other.isSetRows()) {
          -        java.util.List __this__rows = new java.util.ArrayList(other.rows.size());
          +        java.util.List __this__rows =
          +            new java.util.ArrayList(other.rows.size());
                   for (java.nio.ByteBuffer other_element : other.rows) {
                     __this__rows.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
                   this.rows = __this__rows;
                 }
                 if (other.isSetColumns()) {
          -        java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +        java.util.List __this__columns =
          +            new java.util.ArrayList(other.columns.size());
                   for (java.nio.ByteBuffer other_element : other.columns) {
                     __this__columns.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
                   this.columns = __this__columns;
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -29261,11 +32195,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public getRowsWithColumns_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getRowsWithColumns_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getRowsWithColumns_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -29312,7 +32248,8 @@ public java.util.List getRows() {
               /**
                * row keys
                */
          -    public getRowsWithColumns_args setRows(@org.apache.thrift.annotation.Nullable java.util.List rows) {
          +    public getRowsWithColumns_args
          +        setRows(@org.apache.thrift.annotation.Nullable java.util.List rows) {
                 this.rows = rows;
                 return this;
               }
          @@ -29359,7 +32296,8 @@ public java.util.List getColumns() {
               /**
                * List of columns to return, null for all columns
                */
          -    public getRowsWithColumns_args setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +    public getRowsWithColumns_args setColumns(
          +        @org.apache.thrift.annotation.Nullable java.util.List columns) {
                 this.columns = columns;
                 return this;
               }
          @@ -29385,7 +32323,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -29394,14 +32332,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Get attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Get attributes
                */
          -    public getRowsWithColumns_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public getRowsWithColumns_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -29421,43 +32360,44 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROWS:
          -        if (value == null) {
          -          unsetRows();
          -        } else {
          -          setRows((java.util.List)value);
          -        }
          -        break;
          +        case ROWS:
          +          if (value == null) {
          +            unsetRows();
          +          } else {
          +            setRows((java.util.List) value);
          +          }
          +          break;
           
          -      case COLUMNS:
          -        if (value == null) {
          -          unsetColumns();
          -        } else {
          -          setColumns((java.util.List)value);
          -        }
          -        break;
          +        case COLUMNS:
          +          if (value == null) {
          +            unsetColumns();
          +          } else {
          +            setColumns((java.util.List) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -29465,37 +32405,40 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROWS:
          -        return getRows();
          +        case ROWS:
          +          return getRows();
           
          -      case COLUMNS:
          -        return getColumns();
          +        case COLUMNS:
          +          return getColumns();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROWS:
          -        return isSetRows();
          -      case COLUMNS:
          -        return isSetColumns();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROWS:
          +          return isSetRows();
          +        case COLUMNS:
          +          return isSetColumns();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -29503,50 +32446,40 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getRowsWithColumns_args)
          -        return this.equals((getRowsWithColumns_args)that);
          +        return this.equals((getRowsWithColumns_args) that);
                 return false;
               }
           
               public boolean equals(getRowsWithColumns_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_rows = true && this.isSetRows();
                 boolean that_present_rows = true && that.isSetRows();
                 if (this_present_rows || that_present_rows) {
          -        if (!(this_present_rows && that_present_rows))
          -          return false;
          -        if (!this.rows.equals(that.rows))
          -          return false;
          +        if (!(this_present_rows && that_present_rows)) return false;
          +        if (!this.rows.equals(that.rows)) return false;
                 }
           
                 boolean this_present_columns = true && this.isSetColumns();
                 boolean that_present_columns = true && that.isSetColumns();
                 if (this_present_columns || that_present_columns) {
          -        if (!(this_present_columns && that_present_columns))
          -          return false;
          -        if (!this.columns.equals(that.columns))
          -          return false;
          +        if (!(this_present_columns && that_present_columns)) return false;
          +        if (!this.columns.equals(that.columns)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -29557,20 +32490,16 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRows()) ? 131071 : 524287);
          -      if (isSetRows())
          -        hashCode = hashCode * 8191 + rows.hashCode();
          +      if (isSetRows()) hashCode = hashCode * 8191 + rows.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -      if (isSetColumns())
          -        hashCode = hashCode * 8191 + columns.hashCode();
          +      if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -29631,11 +32560,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -29686,35 +32617,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowsWithColumns_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsWithColumns_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsWithColumns_argsStandardScheme getScheme() {
                   return new getRowsWithColumns_argsStandardScheme();
                 }
               }
           
          -    private static class getRowsWithColumns_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowsWithColumns_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -29722,7 +32658,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -29731,16 +32667,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_
                           {
                             org.apache.thrift.protocol.TList _list262 = iprot.readListBegin();
                             struct.rows = new java.util.ArrayList(_list262.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem263;
          -                  for (int _i264 = 0; _i264 < _list262.size; ++_i264)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem263;
          +                  for (int _i264 = 0; _i264 < _list262.size; ++_i264) {
                               _elem263 = iprot.readBinary();
                               struct.rows.add(_elem263);
                             }
                             iprot.readListEnd();
                           }
                           struct.setRowsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -29749,16 +32685,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_
                           {
                             org.apache.thrift.protocol.TList _list265 = iprot.readListBegin();
                             struct.columns = new java.util.ArrayList(_list265.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem266;
          -                  for (int _i267 = 0; _i267 < _list265.size; ++_i267)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem266;
          +                  for (int _i267 = 0; _i267 < _list265.size; ++_i267) {
                               _elem266 = iprot.readBinary();
                               struct.columns.add(_elem266);
                             }
                             iprot.readListEnd();
                           }
                           struct.setColumnsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -29766,11 +32702,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map268 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map268.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key269;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val270;
          -                  for (int _i271 = 0; _i271 < _map268.size; ++_i271)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map268.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key269;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val270;
          +                  for (int _i271 = 0; _i271 < _map268.size; ++_i271) {
                               _key269 = iprot.readBinary();
                               _val270 = iprot.readBinary();
                               struct.attributes.put(_key269, _val270);
          @@ -29778,7 +32717,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -29789,11 +32728,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -29805,9 +32746,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns
                   if (struct.rows != null) {
                     oprot.writeFieldBegin(ROWS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.rows.size()));
          -            for (java.nio.ByteBuffer _iter272 : struct.rows)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.rows.size()));
          +            for (java.nio.ByteBuffer _iter272 : struct.rows) {
                         oprot.writeBinary(_iter272);
                       }
                       oprot.writeListEnd();
          @@ -29817,9 +32758,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns
                   if (struct.columns != null) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          -            for (java.nio.ByteBuffer _iter273 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          +            for (java.nio.ByteBuffer _iter273 : struct.columns) {
                         oprot.writeBinary(_iter273);
                       }
                       oprot.writeListEnd();
          @@ -29829,9 +32770,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter274 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter274 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter274.getKey());
                         oprot.writeBinary(_iter274.getValue());
                       }
          @@ -29845,17 +32788,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns
           
               }
           
          -    private static class getRowsWithColumns_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsWithColumns_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsWithColumns_argsTupleScheme getScheme() {
                   return new getRowsWithColumns_argsTupleScheme();
                 }
               }
           
          -    private static class getRowsWithColumns_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowsWithColumns_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -29876,8 +32823,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_
                   if (struct.isSetRows()) {
                     {
                       oprot.writeI32(struct.rows.size());
          -            for (java.nio.ByteBuffer _iter275 : struct.rows)
          -            {
          +            for (java.nio.ByteBuffer _iter275 : struct.rows) {
                         oprot.writeBinary(_iter275);
                       }
                     }
          @@ -29885,8 +32831,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_
                   if (struct.isSetColumns()) {
                     {
                       oprot.writeI32(struct.columns.size());
          -            for (java.nio.ByteBuffer _iter276 : struct.columns)
          -            {
          +            for (java.nio.ByteBuffer _iter276 : struct.columns) {
                         oprot.writeBinary(_iter276);
                       }
                     }
          @@ -29894,8 +32839,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter277 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter277 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter277.getKey());
                         oprot.writeBinary(_iter277.getValue());
                       }
          @@ -29904,8 +32849,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -29913,11 +32860,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_a
                   }
                   if (incoming.get(1)) {
                     {
          -            org.apache.thrift.protocol.TList _list278 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list278 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.rows = new java.util.ArrayList(_list278.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem279;
          -            for (int _i280 = 0; _i280 < _list278.size; ++_i280)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem279;
          +            for (int _i280 = 0; _i280 < _list278.size; ++_i280) {
                         _elem279 = iprot.readBinary();
                         struct.rows.add(_elem279);
                       }
          @@ -29926,11 +32874,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_a
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TList _list281 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list281 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.columns = new java.util.ArrayList(_list281.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem282;
          -            for (int _i283 = 0; _i283 < _list281.size; ++_i283)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem282;
          +            for (int _i283 = 0; _i283 < _list281.size; ++_i283) {
                         _elem282 = iprot.readBinary();
                         struct.columns.add(_elem282);
                       }
          @@ -29939,12 +32888,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_a
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TMap _map284 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map284.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key285;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val286;
          -            for (int _i287 = 0; _i287 < _map284.size; ++_i287)
          -            {
          +            org.apache.thrift.protocol.TMap _map284 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map284.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key285;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val286;
          +            for (int _i287 = 0; _i287 < _map284.size; ++_i287) {
                         _key285 = iprot.readBinary();
                         _val286 = iprot.readBinary();
                         struct.attributes.put(_key285, _val286);
          @@ -29955,29 +32907,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_a
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowsWithColumns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowsWithColumns_result");
          +  public static class getRowsWithColumns_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowsWithColumns_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowsWithColumns_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowsWithColumns_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowsWithColumns_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowsWithColumns_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -29990,7 +32957,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -30001,12 +32968,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -30038,23 +33005,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowsWithColumns_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getRowsWithColumns_result.class, metaDataMap);
               }
           
               public getRowsWithColumns_result() {
               }
           
          -    public getRowsWithColumns_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getRowsWithColumns_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -30065,7 +33037,8 @@ public getRowsWithColumns_result(
                */
               public getRowsWithColumns_result(getRowsWithColumns_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TRowResult other_element : other.success) {
                     __this__success.add(new TRowResult(other_element));
                   }
          @@ -30107,7 +33080,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getRowsWithColumns_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getRowsWithColumns_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -30152,23 +33126,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -30176,27 +33151,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -30204,32 +33182,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getRowsWithColumns_result)
          -        return this.equals((getRowsWithColumns_result)that);
          +        return this.equals((getRowsWithColumns_result) that);
                 return false;
               }
           
               public boolean equals(getRowsWithColumns_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -30240,12 +33212,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -30286,13 +33256,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -30325,35 +33297,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowsWithColumns_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsWithColumns_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsWithColumns_resultStandardScheme getScheme() {
                   return new getRowsWithColumns_resultStandardScheme();
                 }
               }
           
          -    private static class getRowsWithColumns_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowsWithColumns_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -30362,9 +33339,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_
                           {
                             org.apache.thrift.protocol.TList _list288 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list288.size);
          -                  @org.apache.thrift.annotation.Nullable TRowResult _elem289;
          -                  for (int _i290 = 0; _i290 < _list288.size; ++_i290)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TRowResult _elem289;
          +                  for (int _i290 = 0; _i290 < _list288.size; ++_i290) {
                               _elem289 = new TRowResult();
                               _elem289.read(iprot);
                               struct.success.add(_elem289);
          @@ -30372,7 +33349,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -30381,7 +33358,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -30392,20 +33369,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumns_
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getRowsWithColumns_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TRowResult _iter291 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TRowResult _iter291 : struct.success) {
                         _iter291.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -30423,17 +33402,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns
           
               }
           
          -    private static class getRowsWithColumns_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsWithColumns_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsWithColumns_resultTupleScheme getScheme() {
                   return new getRowsWithColumns_resultTupleScheme();
                 }
               }
           
          -    private static class getRowsWithColumns_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowsWithColumns_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -30445,8 +33428,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TRowResult _iter292 : struct.success)
          -            {
          +            for (TRowResult _iter292 : struct.success) {
                         _iter292.write(oprot);
                       }
                     }
          @@ -30457,16 +33439,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list293 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list293 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list293.size);
          -            @org.apache.thrift.annotation.Nullable TRowResult _elem294;
          -            for (int _i295 = 0; _i295 < _list293.size; ++_i295)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TRowResult _elem294;
          +            for (int _i295 = 0; _i295 < _list293.size; ++_i295) {
                         _elem294 = new TRowResult();
                         _elem294.read(iprot);
                         struct.success.add(_elem294);
          @@ -30482,21 +33467,37 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumns_r
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowsTs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowsTs_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST, (short)2);
          -    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)3);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowsTs_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowsTs_argsTupleSchemeFactory();
          +  public static class getRowsTs_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowsTs_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowsTs_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowsTs_argsTupleSchemeFactory();
           
               /**
                * name of the table
          @@ -30513,28 +33514,32 @@ public static class getRowsTs_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of the table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row keys
                  */
          -      ROWS((short)2, "rows"),
          +      ROWS((short) 2, "rows"),
                 /**
                  * timestamp
                  */
          -      TIMESTAMP((short)3, "timestamp"),
          +      TIMESTAMP((short) 3, "timestamp"),
                 /**
                  * Get attributes
                  */
          -      ATTRIBUTES((short)4, "attributes");
          +      ATTRIBUTES((short) 4, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -30547,7 +33552,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROWS
          @@ -30562,12 +33567,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -30601,31 +33606,42 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROWS, new org.apache.thrift.meta_data.FieldMetaData("rows", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROWS,
          +        new org.apache.thrift.meta_data.FieldMetaData("rows",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.TIMESTAMP,
          +        new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowsTs_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowsTs_args.class,
          +        metaDataMap);
               }
           
               public getRowsTs_args() {
               }
           
          -    public getRowsTs_args(
          -      java.nio.ByteBuffer tableName,
          -      java.util.List rows,
          -      long timestamp,
          -      java.util.Map attributes)
          -    {
          +    public getRowsTs_args(java.nio.ByteBuffer tableName, java.util.List rows,
          +        long timestamp, java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.rows = rows;
          @@ -30643,7 +33659,8 @@ public getRowsTs_args(getRowsTs_args other) {
                   this.tableName = org.apache.thrift.TBaseHelper.copyBinary(other.tableName);
                 }
                 if (other.isSetRows()) {
          -        java.util.List __this__rows = new java.util.ArrayList(other.rows.size());
          +        java.util.List __this__rows =
          +            new java.util.ArrayList(other.rows.size());
                   for (java.nio.ByteBuffer other_element : other.rows) {
                     __this__rows.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
          @@ -30651,15 +33668,20 @@ public getRowsTs_args(getRowsTs_args other) {
                 }
                 this.timestamp = other.timestamp;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -30696,11 +33718,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of the table
                */
               public getRowsTs_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getRowsTs_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getRowsTs_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -30747,7 +33771,8 @@ public java.util.List getRows() {
               /**
                * row keys
                */
          -    public getRowsTs_args setRows(@org.apache.thrift.annotation.Nullable java.util.List rows) {
          +    public getRowsTs_args
          +        setRows(@org.apache.thrift.annotation.Nullable java.util.List rows) {
                 this.rows = rows;
                 return this;
               }
          @@ -30784,7 +33809,8 @@ public getRowsTs_args setTimestamp(long timestamp) {
               }
           
               public void unsetTimestamp() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
               }
           
               /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -30793,7 +33819,8 @@ public boolean isSetTimestamp() {
               }
           
               public void setTimestampIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -30802,7 +33829,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -30811,14 +33838,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Get attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Get attributes
                */
          -    public getRowsTs_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public getRowsTs_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -30838,43 +33866,44 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROWS:
          -        if (value == null) {
          -          unsetRows();
          -        } else {
          -          setRows((java.util.List)value);
          -        }
          -        break;
          +        case ROWS:
          +          if (value == null) {
          +            unsetRows();
          +          } else {
          +            setRows((java.util.List) value);
          +          }
          +          break;
           
          -      case TIMESTAMP:
          -        if (value == null) {
          -          unsetTimestamp();
          -        } else {
          -          setTimestamp((java.lang.Long)value);
          -        }
          -        break;
          +        case TIMESTAMP:
          +          if (value == null) {
          +            unsetTimestamp();
          +          } else {
          +            setTimestamp((java.lang.Long) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -30882,88 +33911,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROWS:
          -        return getRows();
          +        case ROWS:
          +          return getRows();
           
          -      case TIMESTAMP:
          -        return getTimestamp();
          +        case TIMESTAMP:
          +          return getTimestamp();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROWS:
          -        return isSetRows();
          -      case TIMESTAMP:
          -        return isSetTimestamp();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROWS:
          +          return isSetRows();
          +        case TIMESTAMP:
          +          return isSetTimestamp();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRowsTs_args)
          -        return this.equals((getRowsTs_args)that);
          +      if (that instanceof getRowsTs_args) return this.equals((getRowsTs_args) that);
                 return false;
               }
           
               public boolean equals(getRowsTs_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_rows = true && this.isSetRows();
                 boolean that_present_rows = true && that.isSetRows();
                 if (this_present_rows || that_present_rows) {
          -        if (!(this_present_rows && that_present_rows))
          -          return false;
          -        if (!this.rows.equals(that.rows))
          -          return false;
          +        if (!(this_present_rows && that_present_rows)) return false;
          +        if (!this.rows.equals(that.rows)) return false;
                 }
           
                 boolean this_present_timestamp = true;
                 boolean that_present_timestamp = true;
                 if (this_present_timestamp || that_present_timestamp) {
          -        if (!(this_present_timestamp && that_present_timestamp))
          -          return false;
          -        if (this.timestamp != that.timestamp)
          -          return false;
          +        if (!(this_present_timestamp && that_present_timestamp)) return false;
          +        if (this.timestamp != that.timestamp) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -30974,18 +33995,15 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRows()) ? 131071 : 524287);
          -      if (isSetRows())
          -        hashCode = hashCode * 8191 + rows.hashCode();
          +      if (isSetRows()) hashCode = hashCode * 8191 + rows.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -31046,11 +34064,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -31097,37 +34117,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowsTs_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsTs_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsTs_argsStandardScheme getScheme() {
                   return new getRowsTs_argsStandardScheme();
                 }
               }
           
          -    private static class getRowsTs_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowsTs_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -31135,7 +34161,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -31144,16 +34170,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_args stru
                           {
                             org.apache.thrift.protocol.TList _list296 = iprot.readListBegin();
                             struct.rows = new java.util.ArrayList(_list296.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem297;
          -                  for (int _i298 = 0; _i298 < _list296.size; ++_i298)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem297;
          +                  for (int _i298 = 0; _i298 < _list296.size; ++_i298) {
                               _elem297 = iprot.readBinary();
                               struct.rows.add(_elem297);
                             }
                             iprot.readListEnd();
                           }
                           struct.setRowsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -31161,7 +34187,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.timestamp = iprot.readI64();
                           struct.setTimestampIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -31169,11 +34195,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map299 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map299.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key300;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val301;
          -                  for (int _i302 = 0; _i302 < _map299.size; ++_i302)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map299.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key300;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val301;
          +                  for (int _i302 = 0; _i302 < _map299.size; ++_i302) {
                               _key300 = iprot.readBinary();
                               _val301 = iprot.readBinary();
                               struct.attributes.put(_key300, _val301);
          @@ -31181,7 +34210,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_args stru
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -31192,11 +34221,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_args stru
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsTs_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsTs_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -31208,9 +34239,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsTs_args str
                   if (struct.rows != null) {
                     oprot.writeFieldBegin(ROWS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.rows.size()));
          -            for (java.nio.ByteBuffer _iter303 : struct.rows)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.rows.size()));
          +            for (java.nio.ByteBuffer _iter303 : struct.rows) {
                         oprot.writeBinary(_iter303);
                       }
                       oprot.writeListEnd();
          @@ -31223,9 +34254,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsTs_args str
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter304 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter304 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter304.getKey());
                         oprot.writeBinary(_iter304.getValue());
                       }
          @@ -31239,17 +34272,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsTs_args str
           
               }
           
          -    private static class getRowsTs_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsTs_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsTs_argsTupleScheme getScheme() {
                   return new getRowsTs_argsTupleScheme();
                 }
               }
           
          -    private static class getRowsTs_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowsTs_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -31270,8 +34307,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args stru
                   if (struct.isSetRows()) {
                     {
                       oprot.writeI32(struct.rows.size());
          -            for (java.nio.ByteBuffer _iter305 : struct.rows)
          -            {
          +            for (java.nio.ByteBuffer _iter305 : struct.rows) {
                         oprot.writeBinary(_iter305);
                       }
                     }
          @@ -31282,8 +34318,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args stru
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter306 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter306 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter306.getKey());
                         oprot.writeBinary(_iter306.getValue());
                       }
          @@ -31292,8 +34328,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args stru
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -31301,11 +34339,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args struc
                   }
                   if (incoming.get(1)) {
                     {
          -            org.apache.thrift.protocol.TList _list307 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list307 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.rows = new java.util.ArrayList(_list307.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem308;
          -            for (int _i309 = 0; _i309 < _list307.size; ++_i309)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem308;
          +            for (int _i309 = 0; _i309 < _list307.size; ++_i309) {
                         _elem308 = iprot.readBinary();
                         struct.rows.add(_elem308);
                       }
          @@ -31318,12 +34357,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args struc
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TMap _map310 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map310.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key311;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val312;
          -            for (int _i313 = 0; _i313 < _map310.size; ++_i313)
          -            {
          +            org.apache.thrift.protocol.TMap _map310 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map310.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key311;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val312;
          +            for (int _i313 = 0; _i313 < _map310.size; ++_i313) {
                         _key311 = iprot.readBinary();
                         _val312 = iprot.readBinary();
                         struct.attributes.put(_key311, _val312);
          @@ -31334,29 +34376,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsTs_args struc
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowsTs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowsTs_result");
          +  public static class getRowsTs_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowsTs_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowsTs_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowsTs_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowsTs_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowsTs_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -31369,7 +34426,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -31380,12 +34437,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -31417,23 +34474,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowsTs_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowsTs_result.class,
          +        metaDataMap);
               }
           
               public getRowsTs_result() {
               }
           
          -    public getRowsTs_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getRowsTs_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -31444,7 +34506,8 @@ public getRowsTs_result(
                */
               public getRowsTs_result(getRowsTs_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TRowResult other_element : other.success) {
                     __this__success.add(new TRowResult(other_element));
                   }
          @@ -31486,7 +34549,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getRowsTs_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getRowsTs_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -31531,23 +34595,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -31555,60 +34620,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRowsTs_result)
          -        return this.equals((getRowsTs_result)that);
          +      if (that instanceof getRowsTs_result) return this.equals((getRowsTs_result) that);
                 return false;
               }
           
               public boolean equals(getRowsTs_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -31619,12 +34680,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -31665,13 +34724,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -31704,35 +34765,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowsTs_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsTs_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsTs_resultStandardScheme getScheme() {
                   return new getRowsTs_resultStandardScheme();
                 }
               }
           
          -    private static class getRowsTs_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowsTs_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -31741,9 +34807,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_result st
                           {
                             org.apache.thrift.protocol.TList _list314 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list314.size);
          -                  @org.apache.thrift.annotation.Nullable TRowResult _elem315;
          -                  for (int _i316 = 0; _i316 < _list314.size; ++_i316)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TRowResult _elem315;
          +                  for (int _i316 = 0; _i316 < _list314.size; ++_i316) {
                               _elem315 = new TRowResult();
                               _elem315.read(iprot);
                               struct.success.add(_elem315);
          @@ -31751,7 +34817,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_result st
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -31760,7 +34826,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_result st
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -31771,20 +34837,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsTs_result st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsTs_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsTs_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TRowResult _iter317 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TRowResult _iter317 : struct.success) {
                         _iter317.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -31802,17 +34870,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsTs_result s
           
               }
           
          -    private static class getRowsTs_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsTs_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsTs_resultTupleScheme getScheme() {
                   return new getRowsTs_resultTupleScheme();
                 }
               }
           
          -    private static class getRowsTs_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowsTs_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -31824,8 +34896,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_result st
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TRowResult _iter318 : struct.success)
          -            {
          +            for (TRowResult _iter318 : struct.success) {
                         _iter318.write(oprot);
                       }
                     }
          @@ -31836,16 +34907,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsTs_result st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowsTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRowsTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list319 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list319 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list319.size);
          -            @org.apache.thrift.annotation.Nullable TRowResult _elem320;
          -            for (int _i321 = 0; _i321 < _list319.size; ++_i321)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TRowResult _elem320;
          +            for (int _i321 = 0; _i321 < _list319.size; ++_i321) {
                         _elem320 = new TRowResult();
                         _elem320.read(iprot);
                         struct.success.add(_elem320);
          @@ -31861,22 +34935,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsTs_result str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowsWithColumnsTs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowsWithColumnsTs_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3);
          -    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)4);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)5);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowsWithColumnsTs_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowsWithColumnsTs_argsTupleSchemeFactory();
          +  public static class getRowsWithColumnsTs_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowsWithColumnsTs_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 5);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowsWithColumnsTs_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowsWithColumnsTs_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -31894,29 +34986,32 @@ public static class getRowsWithColumnsTs_args implements org.apache.thrift.TBase
               /**
                * Get attributes
                */
          -    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row keys
                  */
          -      ROWS((short)2, "rows"),
          +      ROWS((short) 2, "rows"),
                 /**
                  * List of columns to return, null for all columns
                  */
          -      COLUMNS((short)3, "columns"),
          -      TIMESTAMP((short)4, "timestamp"),
          +      COLUMNS((short) 3, "columns"), TIMESTAMP((short) 4, "timestamp"),
                 /**
                  * Get attributes
                  */
          -      ATTRIBUTES((short)5, "attributes");
          +      ATTRIBUTES((short) 5, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -31929,7 +35024,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROWS
          @@ -31946,12 +35041,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -31985,35 +35080,49 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROWS, new org.apache.thrift.meta_data.FieldMetaData("rows", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROWS,
          +        new org.apache.thrift.meta_data.FieldMetaData("rows",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.COLUMNS,
          +        new org.apache.thrift.meta_data.FieldMetaData("columns",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.TIMESTAMP,
          +        new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowsWithColumnsTs_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getRowsWithColumnsTs_args.class, metaDataMap);
               }
           
               public getRowsWithColumnsTs_args() {
               }
           
          -    public getRowsWithColumnsTs_args(
          -      java.nio.ByteBuffer tableName,
          -      java.util.List rows,
          -      java.util.List columns,
          -      long timestamp,
          -      java.util.Map attributes)
          -    {
          +    public getRowsWithColumnsTs_args(java.nio.ByteBuffer tableName,
          +        java.util.List rows, java.util.List columns,
          +        long timestamp, java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.rows = rows;
          @@ -32032,14 +35141,16 @@ public getRowsWithColumnsTs_args(getRowsWithColumnsTs_args other) {
                   this.tableName = org.apache.thrift.TBaseHelper.copyBinary(other.tableName);
                 }
                 if (other.isSetRows()) {
          -        java.util.List __this__rows = new java.util.ArrayList(other.rows.size());
          +        java.util.List __this__rows =
          +            new java.util.ArrayList(other.rows.size());
                   for (java.nio.ByteBuffer other_element : other.rows) {
                     __this__rows.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
                   this.rows = __this__rows;
                 }
                 if (other.isSetColumns()) {
          -        java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +        java.util.List __this__columns =
          +            new java.util.ArrayList(other.columns.size());
                   for (java.nio.ByteBuffer other_element : other.columns) {
                     __this__columns.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
          @@ -32047,15 +35158,20 @@ public getRowsWithColumnsTs_args(getRowsWithColumnsTs_args other) {
                 }
                 this.timestamp = other.timestamp;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -32093,11 +35209,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public getRowsWithColumnsTs_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public getRowsWithColumnsTs_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public getRowsWithColumnsTs_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -32144,7 +35262,8 @@ public java.util.List getRows() {
               /**
                * row keys
                */
          -    public getRowsWithColumnsTs_args setRows(@org.apache.thrift.annotation.Nullable java.util.List rows) {
          +    public getRowsWithColumnsTs_args
          +        setRows(@org.apache.thrift.annotation.Nullable java.util.List rows) {
                 this.rows = rows;
                 return this;
               }
          @@ -32191,7 +35310,8 @@ public java.util.List getColumns() {
               /**
                * List of columns to return, null for all columns
                */
          -    public getRowsWithColumnsTs_args setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +    public getRowsWithColumnsTs_args setColumns(
          +        @org.apache.thrift.annotation.Nullable java.util.List columns) {
                 this.columns = columns;
                 return this;
               }
          @@ -32222,7 +35342,8 @@ public getRowsWithColumnsTs_args setTimestamp(long timestamp) {
               }
           
               public void unsetTimestamp() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
               }
           
               /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -32231,7 +35352,8 @@ public boolean isSetTimestamp() {
               }
           
               public void setTimestampIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -32240,7 +35362,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -32249,14 +35371,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Get attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Get attributes
                */
          -    public getRowsWithColumnsTs_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public getRowsWithColumnsTs_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -32276,51 +35399,52 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROWS:
          -        if (value == null) {
          -          unsetRows();
          -        } else {
          -          setRows((java.util.List)value);
          -        }
          -        break;
          +        case ROWS:
          +          if (value == null) {
          +            unsetRows();
          +          } else {
          +            setRows((java.util.List) value);
          +          }
          +          break;
           
          -      case COLUMNS:
          -        if (value == null) {
          -          unsetColumns();
          -        } else {
          -          setColumns((java.util.List)value);
          -        }
          -        break;
          +        case COLUMNS:
          +          if (value == null) {
          +            unsetColumns();
          +          } else {
          +            setColumns((java.util.List) value);
          +          }
          +          break;
           
          -      case TIMESTAMP:
          -        if (value == null) {
          -          unsetTimestamp();
          -        } else {
          -          setTimestamp((java.lang.Long)value);
          -        }
          -        break;
          +        case TIMESTAMP:
          +          if (value == null) {
          +            unsetTimestamp();
          +          } else {
          +            setTimestamp((java.lang.Long) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -32328,42 +35452,45 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROWS:
          -        return getRows();
          +        case ROWS:
          +          return getRows();
           
          -      case COLUMNS:
          -        return getColumns();
          +        case COLUMNS:
          +          return getColumns();
           
          -      case TIMESTAMP:
          -        return getTimestamp();
          +        case TIMESTAMP:
          +          return getTimestamp();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROWS:
          -        return isSetRows();
          -      case COLUMNS:
          -        return isSetColumns();
          -      case TIMESTAMP:
          -        return isSetTimestamp();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROWS:
          +          return isSetRows();
          +        case COLUMNS:
          +          return isSetColumns();
          +        case TIMESTAMP:
          +          return isSetTimestamp();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -32371,59 +35498,47 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getRowsWithColumnsTs_args)
          -        return this.equals((getRowsWithColumnsTs_args)that);
          +        return this.equals((getRowsWithColumnsTs_args) that);
                 return false;
               }
           
               public boolean equals(getRowsWithColumnsTs_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_rows = true && this.isSetRows();
                 boolean that_present_rows = true && that.isSetRows();
                 if (this_present_rows || that_present_rows) {
          -        if (!(this_present_rows && that_present_rows))
          -          return false;
          -        if (!this.rows.equals(that.rows))
          -          return false;
          +        if (!(this_present_rows && that_present_rows)) return false;
          +        if (!this.rows.equals(that.rows)) return false;
                 }
           
                 boolean this_present_columns = true && this.isSetColumns();
                 boolean that_present_columns = true && that.isSetColumns();
                 if (this_present_columns || that_present_columns) {
          -        if (!(this_present_columns && that_present_columns))
          -          return false;
          -        if (!this.columns.equals(that.columns))
          -          return false;
          +        if (!(this_present_columns && that_present_columns)) return false;
          +        if (!this.columns.equals(that.columns)) return false;
                 }
           
                 boolean this_present_timestamp = true;
                 boolean that_present_timestamp = true;
                 if (this_present_timestamp || that_present_timestamp) {
          -        if (!(this_present_timestamp && that_present_timestamp))
          -          return false;
          -        if (this.timestamp != that.timestamp)
          -          return false;
          +        if (!(this_present_timestamp && that_present_timestamp)) return false;
          +        if (this.timestamp != that.timestamp) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -32434,22 +35549,18 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRows()) ? 131071 : 524287);
          -      if (isSetRows())
          -        hashCode = hashCode * 8191 + rows.hashCode();
          +      if (isSetRows()) hashCode = hashCode * 8191 + rows.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -      if (isSetColumns())
          -        hashCode = hashCode * 8191 + columns.hashCode();
          +      if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -32520,11 +35631,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -32579,37 +35692,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowsWithColumnsTs_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsWithColumnsTs_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsWithColumnsTs_argsStandardScheme getScheme() {
                   return new getRowsWithColumnsTs_argsStandardScheme();
                 }
               }
           
          -    private static class getRowsWithColumnsTs_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowsWithColumnsTs_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsTs_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsTs_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -32617,7 +35736,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -32626,16 +35745,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT
                           {
                             org.apache.thrift.protocol.TList _list322 = iprot.readListBegin();
                             struct.rows = new java.util.ArrayList(_list322.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem323;
          -                  for (int _i324 = 0; _i324 < _list322.size; ++_i324)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem323;
          +                  for (int _i324 = 0; _i324 < _list322.size; ++_i324) {
                               _elem323 = iprot.readBinary();
                               struct.rows.add(_elem323);
                             }
                             iprot.readListEnd();
                           }
                           struct.setRowsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -32644,16 +35763,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT
                           {
                             org.apache.thrift.protocol.TList _list325 = iprot.readListBegin();
                             struct.columns = new java.util.ArrayList(_list325.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem326;
          -                  for (int _i327 = 0; _i327 < _list325.size; ++_i327)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem326;
          +                  for (int _i327 = 0; _i327 < _list325.size; ++_i327) {
                               _elem326 = iprot.readBinary();
                               struct.columns.add(_elem326);
                             }
                             iprot.readListEnd();
                           }
                           struct.setColumnsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -32661,7 +35780,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.timestamp = iprot.readI64();
                           struct.setTimestampIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -32669,11 +35788,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map328 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map328.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key329;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val330;
          -                  for (int _i331 = 0; _i331 < _map328.size; ++_i331)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map328.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key329;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val330;
          +                  for (int _i331 = 0; _i331 < _map328.size; ++_i331) {
                               _key329 = iprot.readBinary();
                               _val330 = iprot.readBinary();
                               struct.attributes.put(_key329, _val330);
          @@ -32681,7 +35803,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -32692,11 +35814,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumnsTs_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getRowsWithColumnsTs_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -32708,9 +35832,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns
                   if (struct.rows != null) {
                     oprot.writeFieldBegin(ROWS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.rows.size()));
          -            for (java.nio.ByteBuffer _iter332 : struct.rows)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.rows.size()));
          +            for (java.nio.ByteBuffer _iter332 : struct.rows) {
                         oprot.writeBinary(_iter332);
                       }
                       oprot.writeListEnd();
          @@ -32720,9 +35844,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns
                   if (struct.columns != null) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          -            for (java.nio.ByteBuffer _iter333 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          +            for (java.nio.ByteBuffer _iter333 : struct.columns) {
                         oprot.writeBinary(_iter333);
                       }
                       oprot.writeListEnd();
          @@ -32735,9 +35859,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter334 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter334 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter334.getKey());
                         oprot.writeBinary(_iter334.getValue());
                       }
          @@ -32751,17 +35877,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns
           
               }
           
          -    private static class getRowsWithColumnsTs_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsWithColumnsTs_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsWithColumnsTs_argsTupleScheme getScheme() {
                   return new getRowsWithColumnsTs_argsTupleScheme();
                 }
               }
           
          -    private static class getRowsWithColumnsTs_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowsWithColumnsTs_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -32785,8 +35915,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsT
                   if (struct.isSetRows()) {
                     {
                       oprot.writeI32(struct.rows.size());
          -            for (java.nio.ByteBuffer _iter335 : struct.rows)
          -            {
          +            for (java.nio.ByteBuffer _iter335 : struct.rows) {
                         oprot.writeBinary(_iter335);
                       }
                     }
          @@ -32794,8 +35923,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsT
                   if (struct.isSetColumns()) {
                     {
                       oprot.writeI32(struct.columns.size());
          -            for (java.nio.ByteBuffer _iter336 : struct.columns)
          -            {
          +            for (java.nio.ByteBuffer _iter336 : struct.columns) {
                         oprot.writeBinary(_iter336);
                       }
                     }
          @@ -32806,8 +35934,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsT
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter337 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter337 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter337.getKey());
                         oprot.writeBinary(_iter337.getValue());
                       }
          @@ -32816,8 +35944,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsT
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(5);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -32825,11 +35955,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs
                   }
                   if (incoming.get(1)) {
                     {
          -            org.apache.thrift.protocol.TList _list338 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list338 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.rows = new java.util.ArrayList(_list338.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem339;
          -            for (int _i340 = 0; _i340 < _list338.size; ++_i340)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem339;
          +            for (int _i340 = 0; _i340 < _list338.size; ++_i340) {
                         _elem339 = iprot.readBinary();
                         struct.rows.add(_elem339);
                       }
          @@ -32838,11 +35969,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TList _list341 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list341 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.columns = new java.util.ArrayList(_list341.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem342;
          -            for (int _i343 = 0; _i343 < _list341.size; ++_i343)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem342;
          +            for (int _i343 = 0; _i343 < _list341.size; ++_i343) {
                         _elem342 = iprot.readBinary();
                         struct.columns.add(_elem342);
                       }
          @@ -32855,12 +35987,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs
                   }
                   if (incoming.get(4)) {
                     {
          -            org.apache.thrift.protocol.TMap _map344 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map344.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key345;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val346;
          -            for (int _i347 = 0; _i347 < _map344.size; ++_i347)
          -            {
          +            org.apache.thrift.protocol.TMap _map344 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map344.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key345;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val346;
          +            for (int _i347 = 0; _i347 < _map344.size; ++_i347) {
                         _key345 = iprot.readBinary();
                         _val346 = iprot.readBinary();
                         struct.attributes.put(_key345, _val346);
          @@ -32871,29 +36006,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRowsWithColumnsTs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowsWithColumnsTs_result");
          +  public static class getRowsWithColumnsTs_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRowsWithColumnsTs_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRowsWithColumnsTs_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRowsWithColumnsTs_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRowsWithColumnsTs_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRowsWithColumnsTs_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -32906,7 +36056,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -32917,12 +36067,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -32954,23 +36104,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowsWithColumnsTs_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getRowsWithColumnsTs_result.class, metaDataMap);
               }
           
               public getRowsWithColumnsTs_result() {
               }
           
          -    public getRowsWithColumnsTs_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public getRowsWithColumnsTs_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -32981,7 +36136,8 @@ public getRowsWithColumnsTs_result(
                */
               public getRowsWithColumnsTs_result(getRowsWithColumnsTs_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TRowResult other_element : other.success) {
                     __this__success.add(new TRowResult(other_element));
                   }
          @@ -33023,7 +36179,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getRowsWithColumnsTs_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getRowsWithColumnsTs_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -33068,23 +36225,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -33092,27 +36250,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -33120,32 +36281,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getRowsWithColumnsTs_result)
          -        return this.equals((getRowsWithColumnsTs_result)that);
          +        return this.equals((getRowsWithColumnsTs_result) that);
                 return false;
               }
           
               public boolean equals(getRowsWithColumnsTs_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -33156,12 +36311,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -33202,13 +36355,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -33241,35 +36396,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRowsWithColumnsTs_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsWithColumnsTs_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsWithColumnsTs_resultStandardScheme getScheme() {
                   return new getRowsWithColumnsTs_resultStandardScheme();
                 }
               }
           
          -    private static class getRowsWithColumnsTs_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRowsWithColumnsTs_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsTs_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getRowsWithColumnsTs_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -33278,9 +36438,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT
                           {
                             org.apache.thrift.protocol.TList _list348 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list348.size);
          -                  @org.apache.thrift.annotation.Nullable TRowResult _elem349;
          -                  for (int _i350 = 0; _i350 < _list348.size; ++_i350)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TRowResult _elem349;
          +                  for (int _i350 = 0; _i350 < _list348.size; ++_i350) {
                               _elem349 = new TRowResult();
                               _elem349.read(iprot);
                               struct.success.add(_elem349);
          @@ -33288,7 +36448,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -33297,7 +36457,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -33308,20 +36468,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRowsWithColumnsT
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumnsTs_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getRowsWithColumnsTs_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TRowResult _iter351 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TRowResult _iter351 : struct.success) {
                         _iter351.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -33339,17 +36501,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRowsWithColumns
           
               }
           
          -    private static class getRowsWithColumnsTs_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRowsWithColumnsTs_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRowsWithColumnsTs_resultTupleScheme getScheme() {
                   return new getRowsWithColumnsTs_resultTupleScheme();
                 }
               }
           
          -    private static class getRowsWithColumnsTs_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRowsWithColumnsTs_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getRowsWithColumnsTs_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -33361,8 +36527,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsT
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TRowResult _iter352 : struct.success)
          -            {
          +            for (TRowResult _iter352 : struct.success) {
                         _iter352.write(oprot);
                       }
                     }
          @@ -33373,16 +36538,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsT
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getRowsWithColumnsTs_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list353 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list353 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list353.size);
          -            @org.apache.thrift.annotation.Nullable TRowResult _elem354;
          -            for (int _i355 = 0; _i355 < _list353.size; ++_i355)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TRowResult _elem354;
          +            for (int _i355 = 0; _i355 < _list353.size; ++_i355) {
                         _elem354 = new TRowResult();
                         _elem354.read(iprot);
                         struct.success.add(_elem354);
          @@ -33398,21 +36566,37 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRowsWithColumnsTs
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class mutateRow_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("mutateRow_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField MUTATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("mutations", org.apache.thrift.protocol.TType.LIST, (short)3);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new mutateRow_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new mutateRow_argsTupleSchemeFactory();
          +  public static class mutateRow_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("mutateRow_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField MUTATIONS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("mutations", org.apache.thrift.protocol.TType.LIST,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new mutateRow_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new mutateRow_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -33429,28 +36613,32 @@ public static class mutateRow_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row key
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * list of mutation commands
                  */
          -      MUTATIONS((short)3, "mutations"),
          +      MUTATIONS((short) 3, "mutations"),
                 /**
                  * Mutation attributes
                  */
          -      ATTRIBUTES((short)4, "attributes");
          +      ATTRIBUTES((short) 4, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -33463,7 +36651,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -33478,12 +36666,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -33515,31 +36703,43 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("mutations", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Mutation.class))));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.MUTATIONS,
          +        new org.apache.thrift.meta_data.FieldMetaData("mutations",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, Mutation.class))));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRow_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRow_args.class,
          +        metaDataMap);
               }
           
               public mutateRow_args() {
               }
           
          -    public mutateRow_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.util.List mutations,
          -      java.util.Map attributes)
          -    {
          +    public mutateRow_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -33558,22 +36758,28 @@ public mutateRow_args(mutateRow_args other) {
                   this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
                 }
                 if (other.isSetMutations()) {
          -        java.util.List __this__mutations = new java.util.ArrayList(other.mutations.size());
          +        java.util.List __this__mutations =
          +            new java.util.ArrayList(other.mutations.size());
                   for (Mutation other_element : other.mutations) {
                     __this__mutations.add(new Mutation(other_element));
                   }
                   this.mutations = __this__mutations;
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -33609,11 +36815,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public mutateRow_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public mutateRow_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public mutateRow_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -33649,7 +36857,7 @@ public java.nio.ByteBuffer bufferForRow() {
                * row key
                */
               public mutateRow_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          @@ -33700,7 +36908,8 @@ public java.util.List getMutations() {
               /**
                * list of mutation commands
                */
          -    public mutateRow_args setMutations(@org.apache.thrift.annotation.Nullable java.util.List mutations) {
          +    public mutateRow_args
          +        setMutations(@org.apache.thrift.annotation.Nullable java.util.List mutations) {
                 this.mutations = mutations;
                 return this;
               }
          @@ -33726,7 +36935,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -33735,14 +36944,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Mutation attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Mutation attributes
                */
          -    public mutateRow_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public mutateRow_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -33762,47 +36972,48 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case MUTATIONS:
          -        if (value == null) {
          -          unsetMutations();
          -        } else {
          -          setMutations((java.util.List)value);
          -        }
          -        break;
          +        case MUTATIONS:
          +          if (value == null) {
          +            unsetMutations();
          +          } else {
          +            setMutations((java.util.List) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -33810,88 +37021,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case MUTATIONS:
          -        return getMutations();
          +        case MUTATIONS:
          +          return getMutations();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case MUTATIONS:
          -        return isSetMutations();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case MUTATIONS:
          +          return isSetMutations();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof mutateRow_args)
          -        return this.equals((mutateRow_args)that);
          +      if (that instanceof mutateRow_args) return this.equals((mutateRow_args) that);
                 return false;
               }
           
               public boolean equals(mutateRow_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_mutations = true && this.isSetMutations();
                 boolean that_present_mutations = true && that.isSetMutations();
                 if (this_present_mutations || that_present_mutations) {
          -        if (!(this_present_mutations && that_present_mutations))
          -          return false;
          -        if (!this.mutations.equals(that.mutations))
          -          return false;
          +        if (!(this_present_mutations && that_present_mutations)) return false;
          +        if (!this.mutations.equals(that.mutations)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -33902,20 +37105,16 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetMutations()) ? 131071 : 524287);
          -      if (isSetMutations())
          -        hashCode = hashCode * 8191 + mutations.hashCode();
          +      if (isSetMutations()) hashCode = hashCode * 8191 + mutations.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -33976,11 +37175,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -34031,35 +37232,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class mutateRow_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRow_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRow_argsStandardScheme getScheme() {
                   return new mutateRow_argsStandardScheme();
                 }
               }
           
          -    private static class mutateRow_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class mutateRow_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -34067,7 +37273,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -34075,7 +37281,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -34084,9 +37290,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru
                           {
                             org.apache.thrift.protocol.TList _list356 = iprot.readListBegin();
                             struct.mutations = new java.util.ArrayList(_list356.size);
          -                  @org.apache.thrift.annotation.Nullable Mutation _elem357;
          -                  for (int _i358 = 0; _i358 < _list356.size; ++_i358)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  Mutation _elem357;
          +                  for (int _i358 = 0; _i358 < _list356.size; ++_i358) {
                               _elem357 = new Mutation();
                               _elem357.read(iprot);
                               struct.mutations.add(_elem357);
          @@ -34094,7 +37300,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru
                             iprot.readListEnd();
                           }
                           struct.setMutationsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -34102,11 +37308,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map359 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map359.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key360;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val361;
          -                  for (int _i362 = 0; _i362 < _map359.size; ++_i362)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map359.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key360;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val361;
          +                  for (int _i362 = 0; _i362 < _map359.size; ++_i362) {
                               _key360 = iprot.readBinary();
                               _val361 = iprot.readBinary();
                               struct.attributes.put(_key360, _val361);
          @@ -34114,7 +37323,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -34125,11 +37334,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -34146,9 +37357,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_args str
                   if (struct.mutations != null) {
                     oprot.writeFieldBegin(MUTATIONS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mutations.size()));
          -            for (Mutation _iter363 : struct.mutations)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.mutations.size()));
          +            for (Mutation _iter363 : struct.mutations) {
                         _iter363.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -34158,9 +37369,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_args str
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter364 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter364 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter364.getKey());
                         oprot.writeBinary(_iter364.getValue());
                       }
          @@ -34174,17 +37387,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_args str
           
               }
           
          -    private static class mutateRow_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRow_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRow_argsTupleScheme getScheme() {
                   return new mutateRow_argsTupleScheme();
                 }
               }
           
          -    private static class mutateRow_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class mutateRow_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -34208,8 +37425,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_args stru
                   if (struct.isSetMutations()) {
                     {
                       oprot.writeI32(struct.mutations.size());
          -            for (Mutation _iter365 : struct.mutations)
          -            {
          +            for (Mutation _iter365 : struct.mutations) {
                         _iter365.write(oprot);
                       }
                     }
          @@ -34217,8 +37433,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_args stru
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter366 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter366 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter366.getKey());
                         oprot.writeBinary(_iter366.getValue());
                       }
          @@ -34227,8 +37443,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_args stru
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -34240,11 +37458,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struc
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TList _list367 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list367 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.mutations = new java.util.ArrayList(_list367.size);
          -            @org.apache.thrift.annotation.Nullable Mutation _elem368;
          -            for (int _i369 = 0; _i369 < _list367.size; ++_i369)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            Mutation _elem368;
          +            for (int _i369 = 0; _i369 < _list367.size; ++_i369) {
                         _elem368 = new Mutation();
                         _elem368.read(iprot);
                         struct.mutations.add(_elem368);
          @@ -34254,12 +37473,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struc
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TMap _map370 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map370.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key371;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val372;
          -            for (int _i373 = 0; _i373 < _map370.size; ++_i373)
          -            {
          +            org.apache.thrift.protocol.TMap _map370 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map370.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key371;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val372;
          +            for (int _i373 = 0; _i373 < _map370.size; ++_i373) {
                         _key371 = iprot.readBinary();
                         _val372 = iprot.readBinary();
                         struct.attributes.put(_key371, _val372);
          @@ -34270,29 +37492,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struc
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class mutateRow_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("mutateRow_result");
          +  public static class mutateRow_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("mutateRow_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new mutateRow_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new mutateRow_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new mutateRow_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new mutateRow_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
               public @org.apache.thrift.annotation.Nullable IllegalArgument ia; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io"),
          -      IA((short)2, "ia");
          +      IO((short) 1, "io"), IA((short) 2, "ia");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -34305,7 +37542,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     case 2: // IA
          @@ -34316,12 +37553,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -34353,22 +37590,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IllegalArgument.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IllegalArgument.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRow_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRow_result.class,
          +        metaDataMap);
               }
           
               public mutateRow_result() {
               }
           
          -    public mutateRow_result(
          -      IOError io,
          -      IllegalArgument ia)
          -    {
          +    public mutateRow_result(IOError io, IllegalArgument ia) {
                 this();
                 this.io = io;
                 this.ia = ia;
          @@ -34446,23 +37688,24 @@ public void setIaIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((IllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((IllegalArgument) value);
          +          }
          +          break;
           
                 }
               }
          @@ -34470,60 +37713,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof mutateRow_result)
          -        return this.equals((mutateRow_result)that);
          +      if (that instanceof mutateRow_result) return this.equals((mutateRow_result) that);
                 return false;
               }
           
               public boolean equals(mutateRow_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 return true;
          @@ -34534,12 +37773,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 return hashCode;
               }
          @@ -34580,13 +37817,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -34619,35 +37858,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class mutateRow_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRow_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRow_resultStandardScheme getScheme() {
                   return new mutateRow_resultStandardScheme();
                 }
               }
           
          -    private static class mutateRow_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class mutateRow_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -34656,7 +37900,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_result st
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -34665,7 +37909,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_result st
                           struct.ia = new IllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -34676,11 +37920,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_result st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -34700,17 +37946,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_result s
           
               }
           
          -    private static class mutateRow_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRow_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRow_resultTupleScheme getScheme() {
                   return new mutateRow_resultTupleScheme();
                 }
               }
           
          -    private static class mutateRow_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class mutateRow_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -34728,8 +37978,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_result st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -34744,22 +37996,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_result str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class mutateRowTs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("mutateRowTs_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField MUTATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("mutations", org.apache.thrift.protocol.TType.LIST, (short)3);
          -    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)4);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)5);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new mutateRowTs_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new mutateRowTs_argsTupleSchemeFactory();
          +  public static class mutateRowTs_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("mutateRowTs_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField MUTATIONS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("mutations", org.apache.thrift.protocol.TType.LIST,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 5);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new mutateRowTs_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new mutateRowTs_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -34780,32 +38050,36 @@ public static class mutateRowTs_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row key
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * list of mutation commands
                  */
          -      MUTATIONS((short)3, "mutations"),
          +      MUTATIONS((short) 3, "mutations"),
                 /**
                  * timestamp
                  */
          -      TIMESTAMP((short)4, "timestamp"),
          +      TIMESTAMP((short) 4, "timestamp"),
                 /**
                  * Mutation attributes
                  */
          -      ATTRIBUTES((short)5, "attributes");
          +      ATTRIBUTES((short) 5, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -34818,7 +38092,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -34835,12 +38109,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -34874,34 +38148,48 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("mutations", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Mutation.class))));
          -      tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.MUTATIONS,
          +        new org.apache.thrift.meta_data.FieldMetaData("mutations",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, Mutation.class))));
          +      tmpMap.put(_Fields.TIMESTAMP,
          +        new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRowTs_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRowTs_args.class,
          +        metaDataMap);
               }
           
               public mutateRowTs_args() {
               }
           
          -    public mutateRowTs_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.util.List mutations,
          -      long timestamp,
          -      java.util.Map attributes)
          -    {
          +    public mutateRowTs_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.List mutations, long timestamp,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -34923,7 +38211,8 @@ public mutateRowTs_args(mutateRowTs_args other) {
                   this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
                 }
                 if (other.isSetMutations()) {
          -        java.util.List __this__mutations = new java.util.ArrayList(other.mutations.size());
          +        java.util.List __this__mutations =
          +            new java.util.ArrayList(other.mutations.size());
                   for (Mutation other_element : other.mutations) {
                     __this__mutations.add(new Mutation(other_element));
                   }
          @@ -34931,15 +38220,20 @@ public mutateRowTs_args(mutateRowTs_args other) {
                 }
                 this.timestamp = other.timestamp;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -34977,11 +38271,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public mutateRowTs_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public mutateRowTs_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public mutateRowTs_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -35017,7 +38313,7 @@ public java.nio.ByteBuffer bufferForRow() {
                * row key
                */
               public mutateRowTs_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          @@ -35068,7 +38364,8 @@ public java.util.List getMutations() {
               /**
                * list of mutation commands
                */
          -    public mutateRowTs_args setMutations(@org.apache.thrift.annotation.Nullable java.util.List mutations) {
          +    public mutateRowTs_args
          +        setMutations(@org.apache.thrift.annotation.Nullable java.util.List mutations) {
                 this.mutations = mutations;
                 return this;
               }
          @@ -35105,7 +38402,8 @@ public mutateRowTs_args setTimestamp(long timestamp) {
               }
           
               public void unsetTimestamp() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
               }
           
               /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -35114,7 +38412,8 @@ public boolean isSetTimestamp() {
               }
           
               public void setTimestampIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -35123,7 +38422,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -35132,14 +38431,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Mutation attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Mutation attributes
                */
          -    public mutateRowTs_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public mutateRowTs_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -35159,55 +38459,56 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case MUTATIONS:
          -        if (value == null) {
          -          unsetMutations();
          -        } else {
          -          setMutations((java.util.List)value);
          -        }
          -        break;
          +        case MUTATIONS:
          +          if (value == null) {
          +            unsetMutations();
          +          } else {
          +            setMutations((java.util.List) value);
          +          }
          +          break;
           
          -      case TIMESTAMP:
          -        if (value == null) {
          -          unsetTimestamp();
          -        } else {
          -          setTimestamp((java.lang.Long)value);
          -        }
          -        break;
          +        case TIMESTAMP:
          +          if (value == null) {
          +            unsetTimestamp();
          +          } else {
          +            setTimestamp((java.lang.Long) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -35215,102 +38516,92 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case MUTATIONS:
          -        return getMutations();
          +        case MUTATIONS:
          +          return getMutations();
           
          -      case TIMESTAMP:
          -        return getTimestamp();
          +        case TIMESTAMP:
          +          return getTimestamp();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case MUTATIONS:
          -        return isSetMutations();
          -      case TIMESTAMP:
          -        return isSetTimestamp();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case MUTATIONS:
          +          return isSetMutations();
          +        case TIMESTAMP:
          +          return isSetTimestamp();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof mutateRowTs_args)
          -        return this.equals((mutateRowTs_args)that);
          +      if (that instanceof mutateRowTs_args) return this.equals((mutateRowTs_args) that);
                 return false;
               }
           
               public boolean equals(mutateRowTs_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_mutations = true && this.isSetMutations();
                 boolean that_present_mutations = true && that.isSetMutations();
                 if (this_present_mutations || that_present_mutations) {
          -        if (!(this_present_mutations && that_present_mutations))
          -          return false;
          -        if (!this.mutations.equals(that.mutations))
          -          return false;
          +        if (!(this_present_mutations && that_present_mutations)) return false;
          +        if (!this.mutations.equals(that.mutations)) return false;
                 }
           
                 boolean this_present_timestamp = true;
                 boolean that_present_timestamp = true;
                 if (this_present_timestamp || that_present_timestamp) {
          -        if (!(this_present_timestamp && that_present_timestamp))
          -          return false;
          -        if (this.timestamp != that.timestamp)
          -          return false;
          +        if (!(this_present_timestamp && that_present_timestamp)) return false;
          +        if (this.timestamp != that.timestamp) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -35321,22 +38612,18 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetMutations()) ? 131071 : 524287);
          -      if (isSetMutations())
          -        hashCode = hashCode * 8191 + mutations.hashCode();
          +      if (isSetMutations()) hashCode = hashCode * 8191 + mutations.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -35407,11 +38694,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -35466,37 +38755,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class mutateRowTs_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRowTs_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRowTs_argsStandardScheme getScheme() {
                   return new mutateRowTs_argsStandardScheme();
                 }
               }
           
          -    private static class mutateRowTs_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class mutateRowTs_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -35504,7 +38799,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -35512,7 +38807,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -35521,9 +38816,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args st
                           {
                             org.apache.thrift.protocol.TList _list374 = iprot.readListBegin();
                             struct.mutations = new java.util.ArrayList(_list374.size);
          -                  @org.apache.thrift.annotation.Nullable Mutation _elem375;
          -                  for (int _i376 = 0; _i376 < _list374.size; ++_i376)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  Mutation _elem375;
          +                  for (int _i376 = 0; _i376 < _list374.size; ++_i376) {
                               _elem375 = new Mutation();
                               _elem375.read(iprot);
                               struct.mutations.add(_elem375);
          @@ -35531,7 +38826,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args st
                             iprot.readListEnd();
                           }
                           struct.setMutationsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -35539,7 +38834,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.timestamp = iprot.readI64();
                           struct.setTimestampIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -35547,11 +38842,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map377 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map377.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key378;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val379;
          -                  for (int _i380 = 0; _i380 < _map377.size; ++_i380)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map377.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key378;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val379;
          +                  for (int _i380 = 0; _i380 < _map377.size; ++_i380) {
                               _key378 = iprot.readBinary();
                               _val379 = iprot.readBinary();
                               struct.attributes.put(_key378, _val379);
          @@ -35559,7 +38857,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args st
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -35570,11 +38868,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowTs_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowTs_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -35591,9 +38891,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowTs_args s
                   if (struct.mutations != null) {
                     oprot.writeFieldBegin(MUTATIONS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mutations.size()));
          -            for (Mutation _iter381 : struct.mutations)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.mutations.size()));
          +            for (Mutation _iter381 : struct.mutations) {
                         _iter381.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -35606,9 +38906,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowTs_args s
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter382 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter382 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter382.getKey());
                         oprot.writeBinary(_iter382.getValue());
                       }
          @@ -35622,17 +38924,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowTs_args s
           
               }
           
          -    private static class mutateRowTs_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRowTs_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRowTs_argsTupleScheme getScheme() {
                   return new mutateRowTs_argsTupleScheme();
                 }
               }
           
          -    private static class mutateRowTs_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class mutateRowTs_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -35659,8 +38965,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args st
                   if (struct.isSetMutations()) {
                     {
                       oprot.writeI32(struct.mutations.size());
          -            for (Mutation _iter383 : struct.mutations)
          -            {
          +            for (Mutation _iter383 : struct.mutations) {
                         _iter383.write(oprot);
                       }
                     }
          @@ -35671,8 +38976,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args st
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter384 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter384 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter384.getKey());
                         oprot.writeBinary(_iter384.getValue());
                       }
          @@ -35681,8 +38986,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(5);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -35694,11 +39001,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args str
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TList _list385 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list385 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.mutations = new java.util.ArrayList(_list385.size);
          -            @org.apache.thrift.annotation.Nullable Mutation _elem386;
          -            for (int _i387 = 0; _i387 < _list385.size; ++_i387)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            Mutation _elem386;
          +            for (int _i387 = 0; _i387 < _list385.size; ++_i387) {
                         _elem386 = new Mutation();
                         _elem386.read(iprot);
                         struct.mutations.add(_elem386);
          @@ -35712,12 +39020,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args str
                   }
                   if (incoming.get(4)) {
                     {
          -            org.apache.thrift.protocol.TMap _map388 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map388.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key389;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val390;
          -            for (int _i391 = 0; _i391 < _map388.size; ++_i391)
          -            {
          +            org.apache.thrift.protocol.TMap _map388 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map388.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key389;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val390;
          +            for (int _i391 = 0; _i391 < _map388.size; ++_i391) {
                         _key389 = iprot.readBinary();
                         _val390 = iprot.readBinary();
                         struct.attributes.put(_key389, _val390);
          @@ -35728,29 +39039,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class mutateRowTs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("mutateRowTs_result");
          +  public static class mutateRowTs_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("mutateRowTs_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new mutateRowTs_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new mutateRowTs_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new mutateRowTs_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new mutateRowTs_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
               public @org.apache.thrift.annotation.Nullable IllegalArgument ia; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io"),
          -      IA((short)2, "ia");
          +      IO((short) 1, "io"), IA((short) 2, "ia");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -35763,7 +39089,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     case 2: // IA
          @@ -35774,12 +39100,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -35811,22 +39137,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IllegalArgument.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IllegalArgument.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRowTs_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRowTs_result.class,
          +        metaDataMap);
               }
           
               public mutateRowTs_result() {
               }
           
          -    public mutateRowTs_result(
          -      IOError io,
          -      IllegalArgument ia)
          -    {
          +    public mutateRowTs_result(IOError io, IllegalArgument ia) {
                 this();
                 this.io = io;
                 this.ia = ia;
          @@ -35904,23 +39235,24 @@ public void setIaIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((IllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((IllegalArgument) value);
          +          }
          +          break;
           
                 }
               }
          @@ -35928,60 +39260,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof mutateRowTs_result)
          -        return this.equals((mutateRowTs_result)that);
          +      if (that instanceof mutateRowTs_result) return this.equals((mutateRowTs_result) that);
                 return false;
               }
           
               public boolean equals(mutateRowTs_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 return true;
          @@ -35992,12 +39320,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 return hashCode;
               }
          @@ -36038,13 +39364,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -36077,35 +39405,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class mutateRowTs_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRowTs_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRowTs_resultStandardScheme getScheme() {
                   return new mutateRowTs_resultStandardScheme();
                 }
               }
           
          -    private static class mutateRowTs_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class mutateRowTs_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -36114,7 +39447,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -36123,7 +39456,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_result
                           struct.ia = new IllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -36134,11 +39467,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowTs_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowTs_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowTs_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -36158,17 +39493,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowTs_result
           
               }
           
          -    private static class mutateRowTs_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRowTs_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRowTs_resultTupleScheme getScheme() {
                   return new mutateRowTs_resultTupleScheme();
                 }
               }
           
          -    private static class mutateRowTs_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class mutateRowTs_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -36186,8 +39525,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -36202,20 +39543,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowTs_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class mutateRows_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("mutateRows_args");
          +  public static class mutateRows_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("mutateRows_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_BATCHES_FIELD_DESC = new org.apache.thrift.protocol.TField("rowBatches", org.apache.thrift.protocol.TType.LIST, (short)2);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)3);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_BATCHES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("rowBatches", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 3);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new mutateRows_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new mutateRows_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new mutateRows_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new mutateRows_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -36228,24 +39583,28 @@ public static class mutateRows_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * list of row batches
                  */
          -      ROW_BATCHES((short)2, "rowBatches"),
          +      ROW_BATCHES((short) 2, "rowBatches"),
                 /**
                  * Mutation attributes
                  */
          -      ATTRIBUTES((short)3, "attributes");
          +      ATTRIBUTES((short) 3, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -36258,7 +39617,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW_BATCHES
          @@ -36271,12 +39630,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -36308,28 +39667,37 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW_BATCHES, new org.apache.thrift.meta_data.FieldMetaData("rowBatches", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, BatchMutation.class))));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW_BATCHES,
          +        new org.apache.thrift.meta_data.FieldMetaData("rowBatches",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, BatchMutation.class))));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRows_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRows_args.class,
          +        metaDataMap);
               }
           
               public mutateRows_args() {
               }
           
          -    public mutateRows_args(
          -      java.nio.ByteBuffer tableName,
          -      java.util.List rowBatches,
          -      java.util.Map attributes)
          -    {
          +    public mutateRows_args(java.nio.ByteBuffer tableName, java.util.List rowBatches,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.rowBatches = rowBatches;
          @@ -36344,22 +39712,28 @@ public mutateRows_args(mutateRows_args other) {
                   this.tableName = org.apache.thrift.TBaseHelper.copyBinary(other.tableName);
                 }
                 if (other.isSetRowBatches()) {
          -        java.util.List __this__rowBatches = new java.util.ArrayList(other.rowBatches.size());
          +        java.util.List __this__rowBatches =
          +            new java.util.ArrayList(other.rowBatches.size());
                   for (BatchMutation other_element : other.rowBatches) {
                     __this__rowBatches.add(new BatchMutation(other_element));
                   }
                   this.rowBatches = __this__rowBatches;
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -36394,11 +39768,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public mutateRows_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public mutateRows_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public mutateRows_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -36445,7 +39821,8 @@ public java.util.List getRowBatches() {
               /**
                * list of row batches
                */
          -    public mutateRows_args setRowBatches(@org.apache.thrift.annotation.Nullable java.util.List rowBatches) {
          +    public mutateRows_args setRowBatches(
          +        @org.apache.thrift.annotation.Nullable java.util.List rowBatches) {
                 this.rowBatches = rowBatches;
                 return this;
               }
          @@ -36471,7 +39848,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -36480,14 +39857,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Mutation attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Mutation attributes
                */
          -    public mutateRows_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public mutateRows_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -36507,35 +39885,36 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW_BATCHES:
          -        if (value == null) {
          -          unsetRowBatches();
          -        } else {
          -          setRowBatches((java.util.List)value);
          -        }
          -        break;
          +        case ROW_BATCHES:
          +          if (value == null) {
          +            unsetRowBatches();
          +          } else {
          +            setRowBatches((java.util.List) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -36543,74 +39922,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW_BATCHES:
          -        return getRowBatches();
          +        case ROW_BATCHES:
          +          return getRowBatches();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW_BATCHES:
          -        return isSetRowBatches();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW_BATCHES:
          +          return isSetRowBatches();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof mutateRows_args)
          -        return this.equals((mutateRows_args)that);
          +      if (that instanceof mutateRows_args) return this.equals((mutateRows_args) that);
                 return false;
               }
           
               public boolean equals(mutateRows_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_rowBatches = true && this.isSetRowBatches();
                 boolean that_present_rowBatches = true && that.isSetRowBatches();
                 if (this_present_rowBatches || that_present_rowBatches) {
          -        if (!(this_present_rowBatches && that_present_rowBatches))
          -          return false;
          -        if (!this.rowBatches.equals(that.rowBatches))
          -          return false;
          +        if (!(this_present_rowBatches && that_present_rowBatches)) return false;
          +        if (!this.rowBatches.equals(that.rowBatches)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -36621,16 +39994,13 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRowBatches()) ? 131071 : 524287);
          -      if (isSetRowBatches())
          -        hashCode = hashCode * 8191 + rowBatches.hashCode();
          +      if (isSetRowBatches()) hashCode = hashCode * 8191 + rowBatches.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -36681,11 +40051,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -36728,35 +40100,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class mutateRows_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRows_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRows_argsStandardScheme getScheme() {
                   return new mutateRows_argsStandardScheme();
                 }
               }
           
          -    private static class mutateRows_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class mutateRows_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -36764,7 +40141,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_args str
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -36773,9 +40150,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_args str
                           {
                             org.apache.thrift.protocol.TList _list392 = iprot.readListBegin();
                             struct.rowBatches = new java.util.ArrayList(_list392.size);
          -                  @org.apache.thrift.annotation.Nullable BatchMutation _elem393;
          -                  for (int _i394 = 0; _i394 < _list392.size; ++_i394)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  BatchMutation _elem393;
          +                  for (int _i394 = 0; _i394 < _list392.size; ++_i394) {
                               _elem393 = new BatchMutation();
                               _elem393.read(iprot);
                               struct.rowBatches.add(_elem393);
          @@ -36783,7 +40160,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_args str
                             iprot.readListEnd();
                           }
                           struct.setRowBatchesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -36791,11 +40168,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_args str
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map395 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map395.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key396;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val397;
          -                  for (int _i398 = 0; _i398 < _map395.size; ++_i398)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map395.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key396;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val397;
          +                  for (int _i398 = 0; _i398 < _map395.size; ++_i398) {
                               _key396 = iprot.readBinary();
                               _val397 = iprot.readBinary();
                               struct.attributes.put(_key396, _val397);
          @@ -36803,7 +40183,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_args str
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -36814,11 +40194,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_args str
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRows_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRows_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -36830,9 +40212,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRows_args st
                   if (struct.rowBatches != null) {
                     oprot.writeFieldBegin(ROW_BATCHES_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.rowBatches.size()));
          -            for (BatchMutation _iter399 : struct.rowBatches)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.rowBatches.size()));
          +            for (BatchMutation _iter399 : struct.rowBatches) {
                         _iter399.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -36842,9 +40224,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRows_args st
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter400 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter400 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter400.getKey());
                         oprot.writeBinary(_iter400.getValue());
                       }
          @@ -36858,17 +40242,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRows_args st
           
               }
           
          -    private static class mutateRows_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRows_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRows_argsTupleScheme getScheme() {
                   return new mutateRows_argsTupleScheme();
                 }
               }
           
          -    private static class mutateRows_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class mutateRows_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRows_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRows_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -36886,8 +40274,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRows_args str
                   if (struct.isSetRowBatches()) {
                     {
                       oprot.writeI32(struct.rowBatches.size());
          -            for (BatchMutation _iter401 : struct.rowBatches)
          -            {
          +            for (BatchMutation _iter401 : struct.rowBatches) {
                         _iter401.write(oprot);
                       }
                     }
          @@ -36895,8 +40282,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRows_args str
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter402 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter402 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter402.getKey());
                         oprot.writeBinary(_iter402.getValue());
                       }
          @@ -36905,8 +40292,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRows_args str
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRows_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRows_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(3);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -36914,11 +40303,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRows_args stru
                   }
                   if (incoming.get(1)) {
                     {
          -            org.apache.thrift.protocol.TList _list403 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list403 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.rowBatches = new java.util.ArrayList(_list403.size);
          -            @org.apache.thrift.annotation.Nullable BatchMutation _elem404;
          -            for (int _i405 = 0; _i405 < _list403.size; ++_i405)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            BatchMutation _elem404;
          +            for (int _i405 = 0; _i405 < _list403.size; ++_i405) {
                         _elem404 = new BatchMutation();
                         _elem404.read(iprot);
                         struct.rowBatches.add(_elem404);
          @@ -36928,12 +40318,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRows_args stru
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TMap _map406 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map406.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key407;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val408;
          -            for (int _i409 = 0; _i409 < _map406.size; ++_i409)
          -            {
          +            org.apache.thrift.protocol.TMap _map406 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map406.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key407;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val408;
          +            for (int _i409 = 0; _i409 < _map406.size; ++_i409) {
                         _key407 = iprot.readBinary();
                         _val408 = iprot.readBinary();
                         struct.attributes.put(_key407, _val408);
          @@ -36944,29 +40337,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRows_args stru
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class mutateRows_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("mutateRows_result");
          +  public static class mutateRows_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("mutateRows_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new mutateRows_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new mutateRows_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new mutateRows_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new mutateRows_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
               public @org.apache.thrift.annotation.Nullable IllegalArgument ia; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io"),
          -      IA((short)2, "ia");
          +      IO((short) 1, "io"), IA((short) 2, "ia");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -36979,7 +40387,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     case 2: // IA
          @@ -36990,12 +40398,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -37027,22 +40435,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IllegalArgument.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IllegalArgument.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRows_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRows_result.class,
          +        metaDataMap);
               }
           
               public mutateRows_result() {
               }
           
          -    public mutateRows_result(
          -      IOError io,
          -      IllegalArgument ia)
          -    {
          +    public mutateRows_result(IOError io, IllegalArgument ia) {
                 this();
                 this.io = io;
                 this.ia = ia;
          @@ -37120,23 +40533,24 @@ public void setIaIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((IllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((IllegalArgument) value);
          +          }
          +          break;
           
                 }
               }
          @@ -37144,60 +40558,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof mutateRows_result)
          -        return this.equals((mutateRows_result)that);
          +      if (that instanceof mutateRows_result) return this.equals((mutateRows_result) that);
                 return false;
               }
           
               public boolean equals(mutateRows_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 return true;
          @@ -37208,12 +40618,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 return hashCode;
               }
          @@ -37254,13 +40662,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -37293,35 +40703,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class mutateRows_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRows_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRows_resultStandardScheme getScheme() {
                   return new mutateRows_resultStandardScheme();
                 }
               }
           
          -    private static class mutateRows_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class mutateRows_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -37330,7 +40745,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_result s
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -37339,7 +40754,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_result s
                           struct.ia = new IllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -37350,11 +40765,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRows_result s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRows_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRows_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -37374,17 +40791,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRows_result
           
               }
           
          -    private static class mutateRows_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRows_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRows_resultTupleScheme getScheme() {
                   return new mutateRows_resultTupleScheme();
                 }
               }
           
          -    private static class mutateRows_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class mutateRows_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRows_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRows_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -37402,8 +40823,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRows_result s
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRows_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRows_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -37418,21 +40841,37 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRows_result st
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class mutateRowsTs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("mutateRowsTs_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_BATCHES_FIELD_DESC = new org.apache.thrift.protocol.TField("rowBatches", org.apache.thrift.protocol.TType.LIST, (short)2);
          -    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)3);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new mutateRowsTs_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new mutateRowsTs_argsTupleSchemeFactory();
          +  public static class mutateRowsTs_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("mutateRowsTs_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_BATCHES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("rowBatches", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new mutateRowsTs_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new mutateRowsTs_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -37449,28 +40888,32 @@ public static class mutateRowsTs_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * list of row batches
                  */
          -      ROW_BATCHES((short)2, "rowBatches"),
          +      ROW_BATCHES((short) 2, "rowBatches"),
                 /**
                  * timestamp
                  */
          -      TIMESTAMP((short)3, "timestamp"),
          +      TIMESTAMP((short) 3, "timestamp"),
                 /**
                  * Mutation attributes
                  */
          -      ATTRIBUTES((short)4, "attributes");
          +      ATTRIBUTES((short) 4, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -37483,7 +40926,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW_BATCHES
          @@ -37498,12 +40941,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -37537,31 +40980,43 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW_BATCHES, new org.apache.thrift.meta_data.FieldMetaData("rowBatches", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, BatchMutation.class))));
          -      tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW_BATCHES,
          +        new org.apache.thrift.meta_data.FieldMetaData("rowBatches",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, BatchMutation.class))));
          +      tmpMap.put(_Fields.TIMESTAMP,
          +        new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRowsTs_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRowsTs_args.class,
          +        metaDataMap);
               }
           
               public mutateRowsTs_args() {
               }
           
          -    public mutateRowsTs_args(
          -      java.nio.ByteBuffer tableName,
          -      java.util.List rowBatches,
          -      long timestamp,
          -      java.util.Map attributes)
          -    {
          +    public mutateRowsTs_args(java.nio.ByteBuffer tableName,
          +        java.util.List rowBatches, long timestamp,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.rowBatches = rowBatches;
          @@ -37579,7 +41034,8 @@ public mutateRowsTs_args(mutateRowsTs_args other) {
                   this.tableName = org.apache.thrift.TBaseHelper.copyBinary(other.tableName);
                 }
                 if (other.isSetRowBatches()) {
          -        java.util.List __this__rowBatches = new java.util.ArrayList(other.rowBatches.size());
          +        java.util.List __this__rowBatches =
          +            new java.util.ArrayList(other.rowBatches.size());
                   for (BatchMutation other_element : other.rowBatches) {
                     __this__rowBatches.add(new BatchMutation(other_element));
                   }
          @@ -37587,15 +41043,20 @@ public mutateRowsTs_args(mutateRowsTs_args other) {
                 }
                 this.timestamp = other.timestamp;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -37632,11 +41093,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public mutateRowsTs_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public mutateRowsTs_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public mutateRowsTs_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -37683,7 +41146,8 @@ public java.util.List getRowBatches() {
               /**
                * list of row batches
                */
          -    public mutateRowsTs_args setRowBatches(@org.apache.thrift.annotation.Nullable java.util.List rowBatches) {
          +    public mutateRowsTs_args setRowBatches(
          +        @org.apache.thrift.annotation.Nullable java.util.List rowBatches) {
                 this.rowBatches = rowBatches;
                 return this;
               }
          @@ -37720,7 +41184,8 @@ public mutateRowsTs_args setTimestamp(long timestamp) {
               }
           
               public void unsetTimestamp() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
               }
           
               /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -37729,7 +41194,8 @@ public boolean isSetTimestamp() {
               }
           
               public void setTimestampIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -37738,7 +41204,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -37747,14 +41213,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Mutation attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Mutation attributes
                */
          -    public mutateRowsTs_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public mutateRowsTs_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -37774,43 +41241,44 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW_BATCHES:
          -        if (value == null) {
          -          unsetRowBatches();
          -        } else {
          -          setRowBatches((java.util.List)value);
          -        }
          -        break;
          +        case ROW_BATCHES:
          +          if (value == null) {
          +            unsetRowBatches();
          +          } else {
          +            setRowBatches((java.util.List) value);
          +          }
          +          break;
           
          -      case TIMESTAMP:
          -        if (value == null) {
          -          unsetTimestamp();
          -        } else {
          -          setTimestamp((java.lang.Long)value);
          -        }
          -        break;
          +        case TIMESTAMP:
          +          if (value == null) {
          +            unsetTimestamp();
          +          } else {
          +            setTimestamp((java.lang.Long) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -37818,88 +41286,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW_BATCHES:
          -        return getRowBatches();
          +        case ROW_BATCHES:
          +          return getRowBatches();
           
          -      case TIMESTAMP:
          -        return getTimestamp();
          +        case TIMESTAMP:
          +          return getTimestamp();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW_BATCHES:
          -        return isSetRowBatches();
          -      case TIMESTAMP:
          -        return isSetTimestamp();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW_BATCHES:
          +          return isSetRowBatches();
          +        case TIMESTAMP:
          +          return isSetTimestamp();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof mutateRowsTs_args)
          -        return this.equals((mutateRowsTs_args)that);
          +      if (that instanceof mutateRowsTs_args) return this.equals((mutateRowsTs_args) that);
                 return false;
               }
           
               public boolean equals(mutateRowsTs_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_rowBatches = true && this.isSetRowBatches();
                 boolean that_present_rowBatches = true && that.isSetRowBatches();
                 if (this_present_rowBatches || that_present_rowBatches) {
          -        if (!(this_present_rowBatches && that_present_rowBatches))
          -          return false;
          -        if (!this.rowBatches.equals(that.rowBatches))
          -          return false;
          +        if (!(this_present_rowBatches && that_present_rowBatches)) return false;
          +        if (!this.rowBatches.equals(that.rowBatches)) return false;
                 }
           
                 boolean this_present_timestamp = true;
                 boolean that_present_timestamp = true;
                 if (this_present_timestamp || that_present_timestamp) {
          -        if (!(this_present_timestamp && that_present_timestamp))
          -          return false;
          -        if (this.timestamp != that.timestamp)
          -          return false;
          +        if (!(this_present_timestamp && that_present_timestamp)) return false;
          +        if (this.timestamp != that.timestamp) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -37910,18 +41370,15 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRowBatches()) ? 131071 : 524287);
          -      if (isSetRowBatches())
          -        hashCode = hashCode * 8191 + rowBatches.hashCode();
          +      if (isSetRowBatches()) hashCode = hashCode * 8191 + rowBatches.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -37982,11 +41439,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -38033,37 +41492,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class mutateRowsTs_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRowsTs_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRowsTs_argsStandardScheme getScheme() {
                   return new mutateRowsTs_argsStandardScheme();
                 }
               }
           
          -    private static class mutateRowsTs_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class mutateRowsTs_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -38071,7 +41536,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_args s
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -38080,9 +41545,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_args s
                           {
                             org.apache.thrift.protocol.TList _list410 = iprot.readListBegin();
                             struct.rowBatches = new java.util.ArrayList(_list410.size);
          -                  @org.apache.thrift.annotation.Nullable BatchMutation _elem411;
          -                  for (int _i412 = 0; _i412 < _list410.size; ++_i412)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  BatchMutation _elem411;
          +                  for (int _i412 = 0; _i412 < _list410.size; ++_i412) {
                               _elem411 = new BatchMutation();
                               _elem411.read(iprot);
                               struct.rowBatches.add(_elem411);
          @@ -38090,7 +41555,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_args s
                             iprot.readListEnd();
                           }
                           struct.setRowBatchesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -38098,7 +41563,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_args s
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.timestamp = iprot.readI64();
                           struct.setTimestampIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -38106,11 +41571,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_args s
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map413 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map413.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key414;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val415;
          -                  for (int _i416 = 0; _i416 < _map413.size; ++_i416)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map413.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key414;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val415;
          +                  for (int _i416 = 0; _i416 < _map413.size; ++_i416) {
                               _key414 = iprot.readBinary();
                               _val415 = iprot.readBinary();
                               struct.attributes.put(_key414, _val415);
          @@ -38118,7 +41586,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_args s
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -38129,11 +41597,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_args s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowsTs_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowsTs_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -38145,9 +41615,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowsTs_args
                   if (struct.rowBatches != null) {
                     oprot.writeFieldBegin(ROW_BATCHES_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.rowBatches.size()));
          -            for (BatchMutation _iter417 : struct.rowBatches)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.rowBatches.size()));
          +            for (BatchMutation _iter417 : struct.rowBatches) {
                         _iter417.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -38160,9 +41630,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowsTs_args
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter418 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter418 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter418.getKey());
                         oprot.writeBinary(_iter418.getValue());
                       }
          @@ -38176,17 +41648,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowsTs_args
           
               }
           
          -    private static class mutateRowsTs_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRowsTs_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRowsTs_argsTupleScheme getScheme() {
                   return new mutateRowsTs_argsTupleScheme();
                 }
               }
           
          -    private static class mutateRowsTs_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class mutateRowsTs_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -38207,8 +41683,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args s
                   if (struct.isSetRowBatches()) {
                     {
                       oprot.writeI32(struct.rowBatches.size());
          -            for (BatchMutation _iter419 : struct.rowBatches)
          -            {
          +            for (BatchMutation _iter419 : struct.rowBatches) {
                         _iter419.write(oprot);
                       }
                     }
          @@ -38219,8 +41694,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args s
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter420 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter420 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter420.getKey());
                         oprot.writeBinary(_iter420.getValue());
                       }
          @@ -38229,8 +41704,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args s
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -38238,11 +41715,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args st
                   }
                   if (incoming.get(1)) {
                     {
          -            org.apache.thrift.protocol.TList _list421 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list421 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.rowBatches = new java.util.ArrayList(_list421.size);
          -            @org.apache.thrift.annotation.Nullable BatchMutation _elem422;
          -            for (int _i423 = 0; _i423 < _list421.size; ++_i423)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            BatchMutation _elem422;
          +            for (int _i423 = 0; _i423 < _list421.size; ++_i423) {
                         _elem422 = new BatchMutation();
                         _elem422.read(iprot);
                         struct.rowBatches.add(_elem422);
          @@ -38256,12 +41734,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args st
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TMap _map424 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map424.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key425;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val426;
          -            for (int _i427 = 0; _i427 < _map424.size; ++_i427)
          -            {
          +            org.apache.thrift.protocol.TMap _map424 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map424.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key425;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val426;
          +            for (int _i427 = 0; _i427 < _map424.size; ++_i427) {
                         _key425 = iprot.readBinary();
                         _val426 = iprot.readBinary();
                         struct.attributes.put(_key425, _val426);
          @@ -38272,29 +41753,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_args st
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class mutateRowsTs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("mutateRowsTs_result");
          +  public static class mutateRowsTs_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("mutateRowsTs_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new mutateRowsTs_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new mutateRowsTs_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new mutateRowsTs_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new mutateRowsTs_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
               public @org.apache.thrift.annotation.Nullable IllegalArgument ia; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io"),
          -      IA((short)2, "ia");
          +      IO((short) 1, "io"), IA((short) 2, "ia");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -38307,7 +41803,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     case 2: // IA
          @@ -38318,12 +41814,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -38355,22 +41851,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IllegalArgument.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IllegalArgument.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRowsTs_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRowsTs_result.class,
          +        metaDataMap);
               }
           
               public mutateRowsTs_result() {
               }
           
          -    public mutateRowsTs_result(
          -      IOError io,
          -      IllegalArgument ia)
          -    {
          +    public mutateRowsTs_result(IOError io, IllegalArgument ia) {
                 this();
                 this.io = io;
                 this.ia = ia;
          @@ -38448,23 +41949,24 @@ public void setIaIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((IllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((IllegalArgument) value);
          +          }
          +          break;
           
                 }
               }
          @@ -38472,60 +41974,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof mutateRowsTs_result)
          -        return this.equals((mutateRowsTs_result)that);
          +      if (that instanceof mutateRowsTs_result) return this.equals((mutateRowsTs_result) that);
                 return false;
               }
           
               public boolean equals(mutateRowsTs_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 return true;
          @@ -38536,12 +42034,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 return hashCode;
               }
          @@ -38582,13 +42078,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -38621,35 +42119,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class mutateRowsTs_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRowsTs_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRowsTs_resultStandardScheme getScheme() {
                   return new mutateRowsTs_resultStandardScheme();
                 }
               }
           
          -    private static class mutateRowsTs_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class mutateRowsTs_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -38658,7 +42161,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -38667,7 +42170,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_result
                           struct.ia = new IllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -38678,11 +42181,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRowsTs_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowsTs_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowsTs_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -38702,17 +42207,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRowsTs_resul
           
               }
           
          -    private static class mutateRowsTs_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRowsTs_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRowsTs_resultTupleScheme getScheme() {
                   return new mutateRowsTs_resultTupleScheme();
                 }
               }
           
          -    private static class mutateRowsTs_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class mutateRowsTs_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -38730,8 +42239,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -38746,21 +42257,37 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRowsTs_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class atomicIncrement_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("atomicIncrement_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.I64, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new atomicIncrement_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new atomicIncrement_argsTupleSchemeFactory();
          +  public static class atomicIncrement_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("atomicIncrement_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.I64,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new atomicIncrement_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new atomicIncrement_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -38779,26 +42306,30 @@ public static class atomicIncrement_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -38811,7 +42342,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -38826,12 +42357,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -38865,28 +42396,38 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMN,
          +        new org.apache.thrift.meta_data.FieldMetaData("column",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.VALUE,
          +        new org.apache.thrift.meta_data.FieldMetaData("value",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(atomicIncrement_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(atomicIncrement_args.class,
          +        metaDataMap);
               }
           
               public atomicIncrement_args() {
               }
           
          -    public atomicIncrement_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.nio.ByteBuffer column,
          -      long value)
          -    {
          +    public atomicIncrement_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long value) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -38941,11 +42482,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public atomicIncrement_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public atomicIncrement_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public atomicIncrement_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -38981,11 +42524,12 @@ public java.nio.ByteBuffer bufferForRow() {
                * row to increment
                */
               public atomicIncrement_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          -    public atomicIncrement_args setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
          +    public atomicIncrement_args
          +        setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
                 return this;
               }
          @@ -39021,11 +42565,13 @@ public java.nio.ByteBuffer bufferForColumn() {
                * name of column
                */
               public atomicIncrement_args setColumn(byte[] column) {
          -      this.column = column == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(column.clone());
          +      this.column =
          +          column == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(column.clone());
                 return this;
               }
           
          -    public atomicIncrement_args setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
          +    public atomicIncrement_args
          +        setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
                 this.column = org.apache.thrift.TBaseHelper.copyBinary(column);
                 return this;
               }
          @@ -39062,7 +42608,8 @@ public atomicIncrement_args setValue(long value) {
               }
           
               public void unsetValue() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID);
               }
           
               /** Returns true if field value is set (has been assigned a value) and false otherwise */
          @@ -39071,54 +42618,56 @@ public boolean isSetValue() {
               }
           
               public void setValueIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value);
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMN:
          -        if (value == null) {
          -          unsetColumn();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setColumn((byte[])value);
          +        case COLUMN:
          +          if (value == null) {
          +            unsetColumn();
                     } else {
          -            setColumn((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setColumn((byte[]) value);
          +            } else {
          +              setColumn((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case VALUE:
          -        if (value == null) {
          -          unsetValue();
          -        } else {
          -          setValue((java.lang.Long)value);
          -        }
          -        break;
          +        case VALUE:
          +          if (value == null) {
          +            unsetValue();
          +          } else {
          +            setValue((java.lang.Long) value);
          +          }
          +          break;
           
                 }
               }
          @@ -39126,88 +42675,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case COLUMN:
          -        return getColumn();
          +        case COLUMN:
          +          return getColumn();
           
          -      case VALUE:
          -        return getValue();
          +        case VALUE:
          +          return getValue();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case COLUMN:
          -        return isSetColumn();
          -      case VALUE:
          -        return isSetValue();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case COLUMN:
          +          return isSetColumn();
          +        case VALUE:
          +          return isSetValue();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof atomicIncrement_args)
          -        return this.equals((atomicIncrement_args)that);
          +      if (that instanceof atomicIncrement_args) return this.equals((atomicIncrement_args) that);
                 return false;
               }
           
               public boolean equals(atomicIncrement_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_column = true && this.isSetColumn();
                 boolean that_present_column = true && that.isSetColumn();
                 if (this_present_column || that_present_column) {
          -        if (!(this_present_column && that_present_column))
          -          return false;
          -        if (!this.column.equals(that.column))
          -          return false;
          +        if (!(this_present_column && that_present_column)) return false;
          +        if (!this.column.equals(that.column)) return false;
                 }
           
                 boolean this_present_value = true;
                 boolean that_present_value = true;
                 if (this_present_value || that_present_value) {
          -        if (!(this_present_value && that_present_value))
          -          return false;
          -        if (this.value != that.value)
          -          return false;
          +        if (!(this_present_value && that_present_value)) return false;
          +        if (this.value != that.value) return false;
                 }
           
                 return true;
          @@ -39218,16 +42759,13 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -      if (isSetColumn())
          -        hashCode = hashCode * 8191 + column.hashCode();
          +      if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(value);
           
          @@ -39290,11 +42828,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -39341,37 +42881,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class atomicIncrement_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class atomicIncrement_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public atomicIncrement_argsStandardScheme getScheme() {
                   return new atomicIncrement_argsStandardScheme();
                 }
               }
           
          -    private static class atomicIncrement_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class atomicIncrement_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -39379,7 +42925,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_arg
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -39387,7 +42933,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_arg
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -39395,7 +42941,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_arg
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.column = iprot.readBinary();
                           struct.setColumnIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -39403,7 +42949,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_arg
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.value = iprot.readI64();
                           struct.setValueIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -39414,11 +42960,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_arg
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, atomicIncrement_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, atomicIncrement_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -39446,17 +42994,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, atomicIncrement_ar
           
               }
           
          -    private static class atomicIncrement_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class atomicIncrement_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public atomicIncrement_argsTupleScheme getScheme() {
                   return new atomicIncrement_argsTupleScheme();
                 }
               }
           
          -    private static class atomicIncrement_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class atomicIncrement_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -39486,8 +43038,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_arg
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -39508,32 +43062,48 @@ public void read(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_args
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class atomicIncrement_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("atomicIncrement_result");
          -
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I64, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new atomicIncrement_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new atomicIncrement_resultTupleSchemeFactory();
          +  public static class atomicIncrement_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("atomicIncrement_result");
          +
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I64,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new atomicIncrement_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new atomicIncrement_resultTupleSchemeFactory();
           
               public long success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
               public @org.apache.thrift.annotation.Nullable IllegalArgument ia; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io"),
          -      IA((short)2, "ia");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io"), IA((short) 2, "ia");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -39546,7 +43116,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -39559,12 +43129,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -39598,25 +43168,32 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IllegalArgument.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IllegalArgument.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(atomicIncrement_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(atomicIncrement_result.class,
          +        metaDataMap);
               }
           
               public atomicIncrement_result() {
               }
           
          -    public atomicIncrement_result(
          -      long success,
          -      IOError io,
          -      IllegalArgument ia)
          -    {
          +    public atomicIncrement_result(long success, IOError io, IllegalArgument ia) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -39661,7 +43238,8 @@ public atomicIncrement_result setSuccess(long success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -39670,7 +43248,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -39723,31 +43302,32 @@ public void setIaIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Long)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Long) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((IllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((IllegalArgument) value);
          +          }
          +          break;
           
                 }
               }
          @@ -39755,74 +43335,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof atomicIncrement_result)
          -        return this.equals((atomicIncrement_result)that);
          +      if (that instanceof atomicIncrement_result) return this.equals((atomicIncrement_result) that);
                 return false;
               }
           
               public boolean equals(atomicIncrement_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 return true;
          @@ -39835,12 +43409,10 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(success);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 return hashCode;
               }
          @@ -39891,13 +43463,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -39934,37 +43508,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class atomicIncrement_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class atomicIncrement_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public atomicIncrement_resultStandardScheme getScheme() {
                   return new atomicIncrement_resultStandardScheme();
                 }
               }
           
          -    private static class atomicIncrement_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class atomicIncrement_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -39972,7 +43552,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_res
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.success = iprot.readI64();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -39981,7 +43561,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_res
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -39990,7 +43570,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_res
                           struct.ia = new IllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -40001,11 +43581,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, atomicIncrement_res
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, atomicIncrement_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, atomicIncrement_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -40030,17 +43612,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, atomicIncrement_re
           
               }
           
          -    private static class atomicIncrement_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class atomicIncrement_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public atomicIncrement_resultTupleScheme getScheme() {
                   return new atomicIncrement_resultTupleScheme();
                 }
               }
           
          -    private static class atomicIncrement_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class atomicIncrement_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -40064,8 +43650,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_res
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(3);
                   if (incoming.get(0)) {
                     struct.success = iprot.readI64();
          @@ -40084,21 +43672,37 @@ public void read(org.apache.thrift.protocol.TProtocol prot, atomicIncrement_resu
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteAll_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteAll_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteAll_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteAll_argsTupleSchemeFactory();
          +  public static class deleteAll_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteAll_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteAll_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteAll_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -40115,28 +43719,32 @@ public static class deleteAll_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * Row to update
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * name of column whose value is to be deleted
                  */
          -      COLUMN((short)3, "column"),
          +      COLUMN((short) 3, "column"),
                 /**
                  * Delete attributes
                  */
          -      ATTRIBUTES((short)4, "attributes");
          +      ATTRIBUTES((short) 4, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -40149,7 +43757,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -40164,12 +43772,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -40201,30 +43809,42 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMN,
          +        new org.apache.thrift.meta_data.FieldMetaData("column",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAll_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAll_args.class,
          +        metaDataMap);
               }
           
               public deleteAll_args() {
               }
           
          -    public deleteAll_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.nio.ByteBuffer column,
          -      java.util.Map attributes)
          -    {
          +    public deleteAll_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -40246,15 +43866,20 @@ public deleteAll_args(deleteAll_args other) {
                   this.column = org.apache.thrift.TBaseHelper.copyBinary(other.column);
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -40290,11 +43915,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public deleteAll_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public deleteAll_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public deleteAll_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -40330,7 +43957,7 @@ public java.nio.ByteBuffer bufferForRow() {
                * Row to update
                */
               public deleteAll_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          @@ -40370,11 +43997,13 @@ public java.nio.ByteBuffer bufferForColumn() {
                * name of column whose value is to be deleted
                */
               public deleteAll_args setColumn(byte[] column) {
          -      this.column = column == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(column.clone());
          +      this.column =
          +          column == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(column.clone());
                 return this;
               }
           
          -    public deleteAll_args setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
          +    public deleteAll_args
          +        setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
                 this.column = org.apache.thrift.TBaseHelper.copyBinary(column);
                 return this;
               }
          @@ -40400,7 +44029,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -40409,14 +44038,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Delete attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Delete attributes
                */
          -    public deleteAll_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public deleteAll_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -40436,51 +44066,52 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMN:
          -        if (value == null) {
          -          unsetColumn();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setColumn((byte[])value);
          +        case COLUMN:
          +          if (value == null) {
          +            unsetColumn();
                     } else {
          -            setColumn((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setColumn((byte[]) value);
          +            } else {
          +              setColumn((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -40488,88 +44119,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case COLUMN:
          -        return getColumn();
          +        case COLUMN:
          +          return getColumn();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case COLUMN:
          -        return isSetColumn();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case COLUMN:
          +          return isSetColumn();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteAll_args)
          -        return this.equals((deleteAll_args)that);
          +      if (that instanceof deleteAll_args) return this.equals((deleteAll_args) that);
                 return false;
               }
           
               public boolean equals(deleteAll_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_column = true && this.isSetColumn();
                 boolean that_present_column = true && that.isSetColumn();
                 if (this_present_column || that_present_column) {
          -        if (!(this_present_column && that_present_column))
          -          return false;
          -        if (!this.column.equals(that.column))
          -          return false;
          +        if (!(this_present_column && that_present_column)) return false;
          +        if (!this.column.equals(that.column)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -40580,20 +44203,16 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -      if (isSetColumn())
          -        hashCode = hashCode * 8191 + column.hashCode();
          +      if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -40654,11 +44273,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -40709,35 +44330,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteAll_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAll_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAll_argsStandardScheme getScheme() {
                   return new deleteAll_argsStandardScheme();
                 }
               }
           
          -    private static class deleteAll_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteAll_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -40745,7 +44371,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -40753,7 +44379,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -40761,7 +44387,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.column = iprot.readBinary();
                           struct.setColumnIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -40769,11 +44395,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map428 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map428.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key429;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val430;
          -                  for (int _i431 = 0; _i431 < _map428.size; ++_i431)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map428.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key429;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val430;
          +                  for (int _i431 = 0; _i431 < _map428.size; ++_i431) {
                               _key429 = iprot.readBinary();
                               _val430 = iprot.readBinary();
                               struct.attributes.put(_key429, _val430);
          @@ -40781,7 +44410,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_args stru
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -40792,11 +44421,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_args stru
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAll_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAll_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -40818,9 +44449,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAll_args str
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter432 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter432 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter432.getKey());
                         oprot.writeBinary(_iter432.getValue());
                       }
          @@ -40834,17 +44467,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAll_args str
           
               }
           
          -    private static class deleteAll_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAll_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAll_argsTupleScheme getScheme() {
                   return new deleteAll_argsTupleScheme();
                 }
               }
           
          -    private static class deleteAll_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteAll_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAll_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAll_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -40871,8 +44508,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAll_args stru
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter433 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter433 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter433.getKey());
                         oprot.writeBinary(_iter433.getValue());
                       }
          @@ -40881,8 +44518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAll_args stru
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAll_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAll_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -40898,12 +44537,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAll_args struc
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TMap _map434 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map434.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key435;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val436;
          -            for (int _i437 = 0; _i437 < _map434.size; ++_i437)
          -            {
          +            org.apache.thrift.protocol.TMap _map434 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map434.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key435;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val436;
          +            for (int _i437 = 0; _i437 < _map434.size; ++_i437) {
                         _key435 = iprot.readBinary();
                         _val436 = iprot.readBinary();
                         struct.attributes.put(_key435, _val436);
          @@ -40914,26 +44556,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAll_args struc
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteAll_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteAll_result");
          +  public static class deleteAll_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteAll_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteAll_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteAll_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteAll_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteAll_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -40946,7 +44602,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -40955,12 +44611,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -40992,19 +44648,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAll_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAll_result.class,
          +        metaDataMap);
               }
           
               public deleteAll_result() {
               }
           
          -    public deleteAll_result(
          -      IOError io)
          -    {
          +    public deleteAll_result(IOError io) {
                 this();
                 this.io = io;
               }
          @@ -41052,15 +44711,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -41068,46 +44728,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteAll_result)
          -        return this.equals((deleteAll_result)that);
          +      if (that instanceof deleteAll_result) return this.equals((deleteAll_result) that);
                 return false;
               }
           
               public boolean equals(deleteAll_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -41118,8 +44776,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -41150,13 +44807,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -41181,35 +44840,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteAll_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAll_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAll_resultStandardScheme getScheme() {
                   return new deleteAll_resultStandardScheme();
                 }
               }
           
          -    private static class deleteAll_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteAll_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -41218,7 +44882,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_result st
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -41229,11 +44893,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAll_result st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAll_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAll_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -41248,17 +44914,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAll_result s
           
               }
           
          -    private static class deleteAll_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAll_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAll_resultTupleScheme getScheme() {
                   return new deleteAll_resultTupleScheme();
                 }
               }
           
          -    private static class deleteAll_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteAll_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAll_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAll_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -41270,8 +44940,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAll_result st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAll_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAll_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -41281,22 +44953,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAll_result str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteAllTs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteAllTs_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)4);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)5);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteAllTs_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteAllTs_argsTupleSchemeFactory();
          +  public static class deleteAllTs_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteAllTs_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 5);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteAllTs_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteAllTs_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -41317,32 +45007,36 @@ public static class deleteAllTs_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * Row to update
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * name of column whose value is to be deleted
                  */
          -      COLUMN((short)3, "column"),
          +      COLUMN((short) 3, "column"),
                 /**
                  * timestamp
                  */
          -      TIMESTAMP((short)4, "timestamp"),
          +      TIMESTAMP((short) 4, "timestamp"),
                 /**
                  * Delete attributes
                  */
          -      ATTRIBUTES((short)5, "attributes");
          +      ATTRIBUTES((short) 5, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -41355,7 +45049,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -41372,12 +45066,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -41411,33 +45105,47 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMN,
          +        new org.apache.thrift.meta_data.FieldMetaData("column",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.TIMESTAMP,
          +        new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllTs_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllTs_args.class,
          +        metaDataMap);
               }
           
               public deleteAllTs_args() {
               }
           
          -    public deleteAllTs_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.nio.ByteBuffer column,
          -      long timestamp,
          -      java.util.Map attributes)
          -    {
          +    public deleteAllTs_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, long timestamp,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -41463,15 +45171,20 @@ public deleteAllTs_args(deleteAllTs_args other) {
                 }
                 this.timestamp = other.timestamp;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -41509,11 +45222,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public deleteAllTs_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public deleteAllTs_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public deleteAllTs_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -41549,7 +45264,7 @@ public java.nio.ByteBuffer bufferForRow() {
                * Row to update
                */
               public deleteAllTs_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          @@ -41589,11 +45304,13 @@ public java.nio.ByteBuffer bufferForColumn() {
                * name of column whose value is to be deleted
                */
               public deleteAllTs_args setColumn(byte[] column) {
          -      this.column = column == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(column.clone());
          +      this.column =
          +          column == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(column.clone());
                 return this;
               }
           
          -    public deleteAllTs_args setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
          +    public deleteAllTs_args
          +        setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
                 this.column = org.apache.thrift.TBaseHelper.copyBinary(column);
                 return this;
               }
          @@ -41630,7 +45347,8 @@ public deleteAllTs_args setTimestamp(long timestamp) {
               }
           
               public void unsetTimestamp() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
               }
           
               /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -41639,7 +45357,8 @@ public boolean isSetTimestamp() {
               }
           
               public void setTimestampIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -41648,7 +45367,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -41657,14 +45376,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Delete attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Delete attributes
                */
          -    public deleteAllTs_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public deleteAllTs_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -41684,59 +45404,60 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMN:
          -        if (value == null) {
          -          unsetColumn();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setColumn((byte[])value);
          +        case COLUMN:
          +          if (value == null) {
          +            unsetColumn();
                     } else {
          -            setColumn((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setColumn((byte[]) value);
          +            } else {
          +              setColumn((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TIMESTAMP:
          -        if (value == null) {
          -          unsetTimestamp();
          -        } else {
          -          setTimestamp((java.lang.Long)value);
          -        }
          -        break;
          +        case TIMESTAMP:
          +          if (value == null) {
          +            unsetTimestamp();
          +          } else {
          +            setTimestamp((java.lang.Long) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -41744,102 +45465,92 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case COLUMN:
          -        return getColumn();
          +        case COLUMN:
          +          return getColumn();
           
          -      case TIMESTAMP:
          -        return getTimestamp();
          +        case TIMESTAMP:
          +          return getTimestamp();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case COLUMN:
          -        return isSetColumn();
          -      case TIMESTAMP:
          -        return isSetTimestamp();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case COLUMN:
          +          return isSetColumn();
          +        case TIMESTAMP:
          +          return isSetTimestamp();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteAllTs_args)
          -        return this.equals((deleteAllTs_args)that);
          +      if (that instanceof deleteAllTs_args) return this.equals((deleteAllTs_args) that);
                 return false;
               }
           
               public boolean equals(deleteAllTs_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_column = true && this.isSetColumn();
                 boolean that_present_column = true && that.isSetColumn();
                 if (this_present_column || that_present_column) {
          -        if (!(this_present_column && that_present_column))
          -          return false;
          -        if (!this.column.equals(that.column))
          -          return false;
          +        if (!(this_present_column && that_present_column)) return false;
          +        if (!this.column.equals(that.column)) return false;
                 }
           
                 boolean this_present_timestamp = true;
                 boolean that_present_timestamp = true;
                 if (this_present_timestamp || that_present_timestamp) {
          -        if (!(this_present_timestamp && that_present_timestamp))
          -          return false;
          -        if (this.timestamp != that.timestamp)
          -          return false;
          +        if (!(this_present_timestamp && that_present_timestamp)) return false;
          +        if (this.timestamp != that.timestamp) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -41850,22 +45561,18 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -      if (isSetColumn())
          -        hashCode = hashCode * 8191 + column.hashCode();
          +      if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -41936,11 +45643,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -41995,37 +45704,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteAllTs_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllTs_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllTs_argsStandardScheme getScheme() {
                   return new deleteAllTs_argsStandardScheme();
                 }
               }
           
          -    private static class deleteAllTs_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteAllTs_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -42033,7 +45748,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -42041,7 +45756,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -42049,7 +45764,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.column = iprot.readBinary();
                           struct.setColumnIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -42057,7 +45772,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.timestamp = iprot.readI64();
                           struct.setTimestampIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -42065,11 +45780,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map438 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map438.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key439;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val440;
          -                  for (int _i441 = 0; _i441 < _map438.size; ++_i441)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map438.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key439;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val440;
          +                  for (int _i441 = 0; _i441 < _map438.size; ++_i441) {
                               _key439 = iprot.readBinary();
                               _val440 = iprot.readBinary();
                               struct.attributes.put(_key439, _val440);
          @@ -42077,7 +45795,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_args st
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -42088,11 +45806,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllTs_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllTs_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -42117,9 +45837,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllTs_args s
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter442 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter442 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter442.getKey());
                         oprot.writeBinary(_iter442.getValue());
                       }
          @@ -42133,17 +45855,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllTs_args s
           
               }
           
          -    private static class deleteAllTs_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllTs_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllTs_argsTupleScheme getScheme() {
                   return new deleteAllTs_argsTupleScheme();
                 }
               }
           
          -    private static class deleteAllTs_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteAllTs_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -42176,8 +45902,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_args st
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter443 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter443 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter443.getKey());
                         oprot.writeBinary(_iter443.getValue());
                       }
          @@ -42186,8 +45912,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_args st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(5);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -42207,12 +45935,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_args str
                   }
                   if (incoming.get(4)) {
                     {
          -            org.apache.thrift.protocol.TMap _map444 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map444.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key445;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val446;
          -            for (int _i447 = 0; _i447 < _map444.size; ++_i447)
          -            {
          +            org.apache.thrift.protocol.TMap _map444 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map444.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key445;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val446;
          +            for (int _i447 = 0; _i447 < _map444.size; ++_i447) {
                         _key445 = iprot.readBinary();
                         _val446 = iprot.readBinary();
                         struct.attributes.put(_key445, _val446);
          @@ -42223,26 +45954,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteAllTs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteAllTs_result");
          +  public static class deleteAllTs_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteAllTs_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteAllTs_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteAllTs_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteAllTs_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteAllTs_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -42255,7 +46000,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -42264,12 +46009,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -42301,19 +46046,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllTs_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllTs_result.class,
          +        metaDataMap);
               }
           
               public deleteAllTs_result() {
               }
           
          -    public deleteAllTs_result(
          -      IOError io)
          -    {
          +    public deleteAllTs_result(IOError io) {
                 this();
                 this.io = io;
               }
          @@ -42361,15 +46109,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -42377,46 +46126,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteAllTs_result)
          -        return this.equals((deleteAllTs_result)that);
          +      if (that instanceof deleteAllTs_result) return this.equals((deleteAllTs_result) that);
                 return false;
               }
           
               public boolean equals(deleteAllTs_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -42427,8 +46174,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -42459,13 +46205,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -42490,35 +46238,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteAllTs_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllTs_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllTs_resultStandardScheme getScheme() {
                   return new deleteAllTs_resultStandardScheme();
                 }
               }
           
          -    private static class deleteAllTs_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteAllTs_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -42527,7 +46280,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -42538,11 +46291,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllTs_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllTs_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllTs_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -42557,17 +46312,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllTs_result
           
               }
           
          -    private static class deleteAllTs_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllTs_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllTs_resultTupleScheme getScheme() {
                   return new deleteAllTs_resultTupleScheme();
                 }
               }
           
          -    private static class deleteAllTs_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteAllTs_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -42579,8 +46338,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -42590,20 +46351,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllTs_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteAllRow_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteAllRow_args");
          +  public static class deleteAllRow_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteAllRow_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)3);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 3);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteAllRow_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteAllRow_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteAllRow_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteAllRow_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -42616,24 +46391,28 @@ public static class deleteAllRow_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * key of the row to be completely deleted.
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * Delete attributes
                  */
          -      ATTRIBUTES((short)3, "attributes");
          +      ATTRIBUTES((short) 3, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -42646,7 +46425,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -42659,12 +46438,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -42696,27 +46475,36 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllRow_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllRow_args.class,
          +        metaDataMap);
               }
           
               public deleteAllRow_args() {
               }
           
          -    public deleteAllRow_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.util.Map attributes)
          -    {
          +    public deleteAllRow_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -42734,15 +46522,20 @@ public deleteAllRow_args(deleteAllRow_args other) {
                   this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -42777,11 +46570,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public deleteAllRow_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public deleteAllRow_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public deleteAllRow_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -42817,11 +46612,12 @@ public java.nio.ByteBuffer bufferForRow() {
                * key of the row to be completely deleted.
                */
               public deleteAllRow_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          -    public deleteAllRow_args setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
          +    public deleteAllRow_args
          +        setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
                 return this;
               }
          @@ -42847,7 +46643,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -42856,14 +46652,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Delete attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Delete attributes
                */
          -    public deleteAllRow_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public deleteAllRow_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -42883,39 +46680,40 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -42923,74 +46721,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteAllRow_args)
          -        return this.equals((deleteAllRow_args)that);
          +      if (that instanceof deleteAllRow_args) return this.equals((deleteAllRow_args) that);
                 return false;
               }
           
               public boolean equals(deleteAllRow_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -43001,16 +46793,13 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -43061,11 +46850,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -43108,35 +46899,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteAllRow_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllRow_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllRow_argsStandardScheme getScheme() {
                   return new deleteAllRow_argsStandardScheme();
                 }
               }
           
          -    private static class deleteAllRow_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteAllRow_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -43144,7 +46940,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_args s
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -43152,7 +46948,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_args s
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -43160,11 +46956,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_args s
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map448 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map448.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key449;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val450;
          -                  for (int _i451 = 0; _i451 < _map448.size; ++_i451)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map448.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key449;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val450;
          +                  for (int _i451 = 0; _i451 < _map448.size; ++_i451) {
                               _key449 = iprot.readBinary();
                               _val450 = iprot.readBinary();
                               struct.attributes.put(_key449, _val450);
          @@ -43172,7 +46971,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_args s
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -43183,11 +46982,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_args s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRow_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRow_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -43204,9 +47005,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRow_args
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter452 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter452 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter452.getKey());
                         oprot.writeBinary(_iter452.getValue());
                       }
          @@ -43220,17 +47023,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRow_args
           
               }
           
          -    private static class deleteAllRow_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllRow_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllRow_argsTupleScheme getScheme() {
                   return new deleteAllRow_argsTupleScheme();
                 }
               }
           
          -    private static class deleteAllRow_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteAllRow_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -43251,8 +47058,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_args s
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter453 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter453 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter453.getKey());
                         oprot.writeBinary(_iter453.getValue());
                       }
          @@ -43261,8 +47068,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_args s
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(3);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -43274,12 +47083,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_args st
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TMap _map454 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map454.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key455;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val456;
          -            for (int _i457 = 0; _i457 < _map454.size; ++_i457)
          -            {
          +            org.apache.thrift.protocol.TMap _map454 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map454.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key455;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val456;
          +            for (int _i457 = 0; _i457 < _map454.size; ++_i457) {
                         _key455 = iprot.readBinary();
                         _val456 = iprot.readBinary();
                         struct.attributes.put(_key455, _val456);
          @@ -43290,26 +47102,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_args st
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteAllRow_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteAllRow_result");
          +  public static class deleteAllRow_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteAllRow_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteAllRow_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteAllRow_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteAllRow_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteAllRow_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -43322,7 +47148,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -43331,12 +47157,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -43368,19 +47194,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllRow_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllRow_result.class,
          +        metaDataMap);
               }
           
               public deleteAllRow_result() {
               }
           
          -    public deleteAllRow_result(
          -      IOError io)
          -    {
          +    public deleteAllRow_result(IOError io) {
                 this();
                 this.io = io;
               }
          @@ -43428,15 +47257,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -43444,46 +47274,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteAllRow_result)
          -        return this.equals((deleteAllRow_result)that);
          +      if (that instanceof deleteAllRow_result) return this.equals((deleteAllRow_result) that);
                 return false;
               }
           
               public boolean equals(deleteAllRow_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -43494,8 +47322,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -43526,13 +47353,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -43557,35 +47386,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteAllRow_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllRow_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllRow_resultStandardScheme getScheme() {
                   return new deleteAllRow_resultStandardScheme();
                 }
               }
           
          -    private static class deleteAllRow_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteAllRow_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -43594,7 +47428,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -43605,11 +47439,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRow_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRow_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRow_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -43624,17 +47460,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRow_resul
           
               }
           
          -    private static class deleteAllRow_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllRow_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllRow_resultTupleScheme getScheme() {
                   return new deleteAllRow_resultTupleScheme();
                 }
               }
           
          -    private static class deleteAllRow_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteAllRow_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -43646,8 +47486,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -43657,32 +47499,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRow_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class increment_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("increment_args");
          +  public static class increment_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("increment_args");
           
          -    private static final org.apache.thrift.protocol.TField INCREMENT_FIELD_DESC = new org.apache.thrift.protocol.TField("increment", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField INCREMENT_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("increment", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new increment_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new increment_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new increment_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new increment_argsTupleSchemeFactory();
           
               /**
                * The single increment to apply
                */
               public @org.apache.thrift.annotation.Nullable TIncrement increment; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * The single increment to apply
                  */
          -      INCREMENT((short)1, "increment");
          +      INCREMENT((short) 1, "increment");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -43695,7 +47551,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // INCREMENT
                       return INCREMENT;
                     default:
          @@ -43704,12 +47560,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -43741,19 +47597,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.INCREMENT, new org.apache.thrift.meta_data.FieldMetaData("increment", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIncrement.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.INCREMENT,
          +        new org.apache.thrift.meta_data.FieldMetaData("increment",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIncrement.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(increment_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(increment_args.class,
          +        metaDataMap);
               }
           
               public increment_args() {
               }
           
          -    public increment_args(
          -      TIncrement increment)
          -    {
          +    public increment_args(TIncrement increment) {
                 this();
                 this.increment = increment;
               }
          @@ -43787,7 +47646,8 @@ public TIncrement getIncrement() {
               /**
                * The single increment to apply
                */
          -    public increment_args setIncrement(@org.apache.thrift.annotation.Nullable TIncrement increment) {
          +    public increment_args
          +        setIncrement(@org.apache.thrift.annotation.Nullable TIncrement increment) {
                 this.increment = increment;
                 return this;
               }
          @@ -43807,15 +47667,16 @@ public void setIncrementIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case INCREMENT:
          -        if (value == null) {
          -          unsetIncrement();
          -        } else {
          -          setIncrement((TIncrement)value);
          -        }
          -        break;
          +        case INCREMENT:
          +          if (value == null) {
          +            unsetIncrement();
          +          } else {
          +            setIncrement((TIncrement) value);
          +          }
          +          break;
           
                 }
               }
          @@ -43823,46 +47684,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case INCREMENT:
          -        return getIncrement();
          +        case INCREMENT:
          +          return getIncrement();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case INCREMENT:
          -        return isSetIncrement();
          +        case INCREMENT:
          +          return isSetIncrement();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof increment_args)
          -        return this.equals((increment_args)that);
          +      if (that instanceof increment_args) return this.equals((increment_args) that);
                 return false;
               }
           
               public boolean equals(increment_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_increment = true && this.isSetIncrement();
                 boolean that_present_increment = true && that.isSetIncrement();
                 if (this_present_increment || that_present_increment) {
          -        if (!(this_present_increment && that_present_increment))
          -          return false;
          -        if (!this.increment.equals(that.increment))
          -          return false;
          +        if (!(this_present_increment && that_present_increment)) return false;
          +        if (!this.increment.equals(that.increment)) return false;
                 }
           
                 return true;
          @@ -43873,8 +47732,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIncrement()) ? 131071 : 524287);
          -      if (isSetIncrement())
          -        hashCode = hashCode * 8191 + increment.hashCode();
          +      if (isSetIncrement()) hashCode = hashCode * 8191 + increment.hashCode();
           
                 return hashCode;
               }
          @@ -43905,11 +47763,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -43939,35 +47799,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class increment_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class increment_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public increment_argsStandardScheme getScheme() {
                   return new increment_argsStandardScheme();
                 }
               }
           
          -    private static class increment_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class increment_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, increment_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, increment_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -43976,7 +47841,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, increment_args stru
                           struct.increment = new TIncrement();
                           struct.increment.read(iprot);
                           struct.setIncrementIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -43987,11 +47852,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, increment_args stru
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, increment_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, increment_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -44006,17 +47873,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, increment_args str
           
               }
           
          -    private static class increment_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class increment_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public increment_argsTupleScheme getScheme() {
                   return new increment_argsTupleScheme();
                 }
               }
           
          -    private static class increment_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class increment_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, increment_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, increment_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIncrement()) {
                     optionals.set(0);
          @@ -44028,8 +47899,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, increment_args stru
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, increment_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, increment_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.increment = new TIncrement();
          @@ -44039,26 +47912,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, increment_args struc
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class increment_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("increment_result");
          +  public static class increment_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("increment_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new increment_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new increment_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new increment_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new increment_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -44071,7 +47958,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -44080,12 +47967,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -44117,19 +48004,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(increment_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(increment_result.class,
          +        metaDataMap);
               }
           
               public increment_result() {
               }
           
          -    public increment_result(
          -      IOError io)
          -    {
          +    public increment_result(IOError io) {
                 this();
                 this.io = io;
               }
          @@ -44177,15 +48067,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -44193,46 +48084,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof increment_result)
          -        return this.equals((increment_result)that);
          +      if (that instanceof increment_result) return this.equals((increment_result) that);
                 return false;
               }
           
               public boolean equals(increment_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -44243,8 +48132,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -44275,13 +48163,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -44306,35 +48196,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class increment_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class increment_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public increment_resultStandardScheme getScheme() {
                   return new increment_resultStandardScheme();
                 }
               }
           
          -    private static class increment_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class increment_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, increment_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, increment_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -44343,7 +48238,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, increment_result st
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -44354,11 +48249,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, increment_result st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, increment_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, increment_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -44373,17 +48270,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, increment_result s
           
               }
           
          -    private static class increment_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class increment_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public increment_resultTupleScheme getScheme() {
                   return new increment_resultTupleScheme();
                 }
               }
           
          -    private static class increment_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class increment_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, increment_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, increment_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -44395,8 +48296,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, increment_result st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, increment_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, increment_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -44406,32 +48309,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, increment_result str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class incrementRows_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("incrementRows_args");
          +  public static class incrementRows_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("incrementRows_args");
           
          -    private static final org.apache.thrift.protocol.TField INCREMENTS_FIELD_DESC = new org.apache.thrift.protocol.TField("increments", org.apache.thrift.protocol.TType.LIST, (short)1);
          +    private static final org.apache.thrift.protocol.TField INCREMENTS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("increments", org.apache.thrift.protocol.TType.LIST,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new incrementRows_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new incrementRows_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new incrementRows_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new incrementRows_argsTupleSchemeFactory();
           
               /**
                * The list of increments
                */
               public @org.apache.thrift.annotation.Nullable java.util.List increments; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * The list of increments
                  */
          -      INCREMENTS((short)1, "increments");
          +      INCREMENTS((short) 1, "increments");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -44444,7 +48361,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // INCREMENTS
                       return INCREMENTS;
                     default:
          @@ -44453,12 +48370,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -44490,20 +48407,23 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.INCREMENTS, new org.apache.thrift.meta_data.FieldMetaData("increments", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIncrement.class))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.INCREMENTS,
          +        new org.apache.thrift.meta_data.FieldMetaData("increments",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TIncrement.class))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(incrementRows_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(incrementRows_args.class,
          +        metaDataMap);
               }
           
               public incrementRows_args() {
               }
           
          -    public incrementRows_args(
          -      java.util.List increments)
          -    {
          +    public incrementRows_args(java.util.List increments) {
                 this();
                 this.increments = increments;
               }
          @@ -44513,7 +48433,8 @@ public incrementRows_args(
                */
               public incrementRows_args(incrementRows_args other) {
                 if (other.isSetIncrements()) {
          -        java.util.List __this__increments = new java.util.ArrayList(other.increments.size());
          +        java.util.List __this__increments =
          +            new java.util.ArrayList(other.increments.size());
                   for (TIncrement other_element : other.increments) {
                     __this__increments.add(new TIncrement(other_element));
                   }
          @@ -44557,7 +48478,8 @@ public java.util.List getIncrements() {
               /**
                * The list of increments
                */
          -    public incrementRows_args setIncrements(@org.apache.thrift.annotation.Nullable java.util.List increments) {
          +    public incrementRows_args setIncrements(
          +        @org.apache.thrift.annotation.Nullable java.util.List increments) {
                 this.increments = increments;
                 return this;
               }
          @@ -44577,15 +48499,16 @@ public void setIncrementsIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case INCREMENTS:
          -        if (value == null) {
          -          unsetIncrements();
          -        } else {
          -          setIncrements((java.util.List)value);
          -        }
          -        break;
          +        case INCREMENTS:
          +          if (value == null) {
          +            unsetIncrements();
          +          } else {
          +            setIncrements((java.util.List) value);
          +          }
          +          break;
           
                 }
               }
          @@ -44593,46 +48516,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case INCREMENTS:
          -        return getIncrements();
          +        case INCREMENTS:
          +          return getIncrements();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case INCREMENTS:
          -        return isSetIncrements();
          +        case INCREMENTS:
          +          return isSetIncrements();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof incrementRows_args)
          -        return this.equals((incrementRows_args)that);
          +      if (that instanceof incrementRows_args) return this.equals((incrementRows_args) that);
                 return false;
               }
           
               public boolean equals(incrementRows_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_increments = true && this.isSetIncrements();
                 boolean that_present_increments = true && that.isSetIncrements();
                 if (this_present_increments || that_present_increments) {
          -        if (!(this_present_increments && that_present_increments))
          -          return false;
          -        if (!this.increments.equals(that.increments))
          -          return false;
          +        if (!(this_present_increments && that_present_increments)) return false;
          +        if (!this.increments.equals(that.increments)) return false;
                 }
           
                 return true;
          @@ -44643,8 +48564,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIncrements()) ? 131071 : 524287);
          -      if (isSetIncrements())
          -        hashCode = hashCode * 8191 + increments.hashCode();
          +      if (isSetIncrements()) hashCode = hashCode * 8191 + increments.hashCode();
           
                 return hashCode;
               }
          @@ -44675,11 +48595,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -44706,35 +48628,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class incrementRows_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class incrementRows_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public incrementRows_argsStandardScheme getScheme() {
                   return new incrementRows_argsStandardScheme();
                 }
               }
           
          -    private static class incrementRows_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class incrementRows_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, incrementRows_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, incrementRows_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -44743,9 +48670,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, incrementRows_args
                           {
                             org.apache.thrift.protocol.TList _list458 = iprot.readListBegin();
                             struct.increments = new java.util.ArrayList(_list458.size);
          -                  @org.apache.thrift.annotation.Nullable TIncrement _elem459;
          -                  for (int _i460 = 0; _i460 < _list458.size; ++_i460)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TIncrement _elem459;
          +                  for (int _i460 = 0; _i460 < _list458.size; ++_i460) {
                               _elem459 = new TIncrement();
                               _elem459.read(iprot);
                               struct.increments.add(_elem459);
          @@ -44753,7 +48680,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, incrementRows_args
                             iprot.readListEnd();
                           }
                           struct.setIncrementsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -44764,20 +48691,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, incrementRows_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, incrementRows_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, incrementRows_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.increments != null) {
                     oprot.writeFieldBegin(INCREMENTS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.increments.size()));
          -            for (TIncrement _iter461 : struct.increments)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.increments.size()));
          +            for (TIncrement _iter461 : struct.increments) {
                         _iter461.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -44790,17 +48719,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, incrementRows_args
           
               }
           
          -    private static class incrementRows_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class incrementRows_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public incrementRows_argsTupleScheme getScheme() {
                   return new incrementRows_argsTupleScheme();
                 }
               }
           
          -    private static class incrementRows_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class incrementRows_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, incrementRows_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, incrementRows_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIncrements()) {
                     optionals.set(0);
          @@ -44809,8 +48742,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, incrementRows_args
                   if (struct.isSetIncrements()) {
                     {
                       oprot.writeI32(struct.increments.size());
          -            for (TIncrement _iter462 : struct.increments)
          -            {
          +            for (TIncrement _iter462 : struct.increments) {
                         _iter462.write(oprot);
                       }
                     }
          @@ -44818,16 +48750,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, incrementRows_args
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, incrementRows_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, incrementRows_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list463 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list463 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.increments = new java.util.ArrayList(_list463.size);
          -            @org.apache.thrift.annotation.Nullable TIncrement _elem464;
          -            for (int _i465 = 0; _i465 < _list463.size; ++_i465)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TIncrement _elem464;
          +            for (int _i465 = 0; _i465 < _list463.size; ++_i465) {
                         _elem464 = new TIncrement();
                         _elem464.read(iprot);
                         struct.increments.add(_elem464);
          @@ -44838,26 +48773,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, incrementRows_args s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class incrementRows_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("incrementRows_result");
          +  public static class incrementRows_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("incrementRows_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new incrementRows_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new incrementRows_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new incrementRows_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new incrementRows_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -44870,7 +48819,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -44879,12 +48828,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -44916,19 +48865,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(incrementRows_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(incrementRows_result.class,
          +        metaDataMap);
               }
           
               public incrementRows_result() {
               }
           
          -    public incrementRows_result(
          -      IOError io)
          -    {
          +    public incrementRows_result(IOError io) {
                 this();
                 this.io = io;
               }
          @@ -44976,15 +48928,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -44992,46 +48945,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof incrementRows_result)
          -        return this.equals((incrementRows_result)that);
          +      if (that instanceof incrementRows_result) return this.equals((incrementRows_result) that);
                 return false;
               }
           
               public boolean equals(incrementRows_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -45042,8 +48993,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -45074,13 +49024,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -45105,35 +49057,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class incrementRows_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class incrementRows_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public incrementRows_resultStandardScheme getScheme() {
                   return new incrementRows_resultStandardScheme();
                 }
               }
           
          -    private static class incrementRows_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class incrementRows_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, incrementRows_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, incrementRows_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -45142,7 +49099,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, incrementRows_resul
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -45153,11 +49110,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, incrementRows_resul
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, incrementRows_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, incrementRows_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -45172,17 +49131,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, incrementRows_resu
           
               }
           
          -    private static class incrementRows_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class incrementRows_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public incrementRows_resultTupleScheme getScheme() {
                   return new incrementRows_resultTupleScheme();
                 }
               }
           
          -    private static class incrementRows_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class incrementRows_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, incrementRows_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, incrementRows_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -45194,8 +49157,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, incrementRows_resul
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, incrementRows_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, incrementRows_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -45205,21 +49170,37 @@ public void read(org.apache.thrift.protocol.TProtocol prot, incrementRows_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteAllRowTs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteAllRowTs_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)3);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteAllRowTs_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteAllRowTs_argsTupleSchemeFactory();
          +  public static class deleteAllRowTs_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteAllRowTs_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteAllRowTs_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteAllRowTs_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -45236,28 +49217,32 @@ public static class deleteAllRowTs_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * key of the row to be completely deleted.
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * timestamp
                  */
          -      TIMESTAMP((short)3, "timestamp"),
          +      TIMESTAMP((short) 3, "timestamp"),
                 /**
                  * Delete attributes
                  */
          -      ATTRIBUTES((short)4, "attributes");
          +      ATTRIBUTES((short) 4, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -45270,7 +49255,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -45285,12 +49270,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -45324,30 +49309,41 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.TIMESTAMP,
          +        new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllRowTs_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllRowTs_args.class,
          +        metaDataMap);
               }
           
               public deleteAllRowTs_args() {
               }
           
          -    public deleteAllRowTs_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      long timestamp,
          -      java.util.Map attributes)
          -    {
          +    public deleteAllRowTs_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        long timestamp, java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -45369,15 +49365,20 @@ public deleteAllRowTs_args(deleteAllRowTs_args other) {
                 }
                 this.timestamp = other.timestamp;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -45414,11 +49415,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public deleteAllRowTs_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public deleteAllRowTs_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public deleteAllRowTs_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -45454,11 +49457,12 @@ public java.nio.ByteBuffer bufferForRow() {
                * key of the row to be completely deleted.
                */
               public deleteAllRowTs_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          -    public deleteAllRowTs_args setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
          +    public deleteAllRowTs_args
          +        setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
                 return this;
               }
          @@ -45495,7 +49499,8 @@ public deleteAllRowTs_args setTimestamp(long timestamp) {
               }
           
               public void unsetTimestamp() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
               }
           
               /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -45504,7 +49509,8 @@ public boolean isSetTimestamp() {
               }
           
               public void setTimestampIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -45513,7 +49519,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -45522,14 +49528,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Delete attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Delete attributes
                */
          -    public deleteAllRowTs_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public deleteAllRowTs_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -45549,47 +49556,48 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TIMESTAMP:
          -        if (value == null) {
          -          unsetTimestamp();
          -        } else {
          -          setTimestamp((java.lang.Long)value);
          -        }
          -        break;
          +        case TIMESTAMP:
          +          if (value == null) {
          +            unsetTimestamp();
          +          } else {
          +            setTimestamp((java.lang.Long) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -45597,88 +49605,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case TIMESTAMP:
          -        return getTimestamp();
          +        case TIMESTAMP:
          +          return getTimestamp();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case TIMESTAMP:
          -        return isSetTimestamp();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case TIMESTAMP:
          +          return isSetTimestamp();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteAllRowTs_args)
          -        return this.equals((deleteAllRowTs_args)that);
          +      if (that instanceof deleteAllRowTs_args) return this.equals((deleteAllRowTs_args) that);
                 return false;
               }
           
               public boolean equals(deleteAllRowTs_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_timestamp = true;
                 boolean that_present_timestamp = true;
                 if (this_present_timestamp || that_present_timestamp) {
          -        if (!(this_present_timestamp && that_present_timestamp))
          -          return false;
          -        if (this.timestamp != that.timestamp)
          -          return false;
          +        if (!(this_present_timestamp && that_present_timestamp)) return false;
          +        if (this.timestamp != that.timestamp) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -45689,18 +49689,15 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -45761,11 +49758,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -45812,37 +49811,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteAllRowTs_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllRowTs_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllRowTs_argsStandardScheme getScheme() {
                   return new deleteAllRowTs_argsStandardScheme();
                 }
               }
           
          -    private static class deleteAllRowTs_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteAllRowTs_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -45850,7 +49855,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -45858,7 +49863,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -45866,7 +49871,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.timestamp = iprot.readI64();
                           struct.setTimestampIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -45874,11 +49879,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map466 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map466.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key467;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val468;
          -                  for (int _i469 = 0; _i469 < _map466.size; ++_i469)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map466.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key467;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val468;
          +                  for (int _i469 = 0; _i469 < _map466.size; ++_i469) {
                               _key467 = iprot.readBinary();
                               _val468 = iprot.readBinary();
                               struct.attributes.put(_key467, _val468);
          @@ -45886,7 +49894,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_args
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -45897,11 +49905,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRowTs_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRowTs_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -45921,9 +49931,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRowTs_arg
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter470 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter470 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter470.getKey());
                         oprot.writeBinary(_iter470.getValue());
                       }
          @@ -45937,17 +49949,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRowTs_arg
           
               }
           
          -    private static class deleteAllRowTs_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllRowTs_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllRowTs_argsTupleScheme getScheme() {
                   return new deleteAllRowTs_argsTupleScheme();
                 }
               }
           
          -    private static class deleteAllRowTs_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteAllRowTs_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -45974,8 +49990,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_args
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter471 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter471 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter471.getKey());
                         oprot.writeBinary(_iter471.getValue());
                       }
          @@ -45984,8 +50000,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_args
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -46001,12 +50019,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_args
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TMap _map472 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map472.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key473;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val474;
          -            for (int _i475 = 0; _i475 < _map472.size; ++_i475)
          -            {
          +            org.apache.thrift.protocol.TMap _map472 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map472.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key473;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val474;
          +            for (int _i475 = 0; _i475 < _map472.size; ++_i475) {
                         _key473 = iprot.readBinary();
                         _val474 = iprot.readBinary();
                         struct.attributes.put(_key473, _val474);
          @@ -46017,26 +50038,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_args
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteAllRowTs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteAllRowTs_result");
          +  public static class deleteAllRowTs_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteAllRowTs_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteAllRowTs_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteAllRowTs_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteAllRowTs_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteAllRowTs_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -46049,7 +50084,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -46058,12 +50093,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -46095,19 +50130,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllRowTs_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteAllRowTs_result.class,
          +        metaDataMap);
               }
           
               public deleteAllRowTs_result() {
               }
           
          -    public deleteAllRowTs_result(
          -      IOError io)
          -    {
          +    public deleteAllRowTs_result(IOError io) {
                 this();
                 this.io = io;
               }
          @@ -46155,15 +50193,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -46171,46 +50210,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteAllRowTs_result)
          -        return this.equals((deleteAllRowTs_result)that);
          +      if (that instanceof deleteAllRowTs_result) return this.equals((deleteAllRowTs_result) that);
                 return false;
               }
           
               public boolean equals(deleteAllRowTs_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -46221,8 +50258,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -46253,13 +50289,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -46284,35 +50322,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteAllRowTs_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllRowTs_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllRowTs_resultStandardScheme getScheme() {
                   return new deleteAllRowTs_resultStandardScheme();
                 }
               }
           
          -    private static class deleteAllRowTs_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteAllRowTs_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -46321,7 +50364,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_resu
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -46332,11 +50375,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteAllRowTs_resu
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRowTs_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRowTs_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -46351,17 +50396,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteAllRowTs_res
           
               }
           
          -    private static class deleteAllRowTs_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteAllRowTs_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteAllRowTs_resultTupleScheme getScheme() {
                   return new deleteAllRowTs_resultTupleScheme();
                 }
               }
           
          -    private static class deleteAllRowTs_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteAllRowTs_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -46373,8 +50422,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_resu
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -46384,20 +50435,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteAllRowTs_resul
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpenWithScan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpenWithScan_args");
          +  public static class scannerOpenWithScan_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpenWithScan_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField SCAN_FIELD_DESC = new org.apache.thrift.protocol.TField("scan", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)3);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField SCAN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("scan", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 3);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpenWithScan_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpenWithScan_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpenWithScan_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpenWithScan_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -46410,24 +50475,28 @@ public static class scannerOpenWithScan_args implements org.apache.thrift.TBase<
               /**
                * Scan attributes
                */
          -    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * Scan instance
                  */
          -      SCAN((short)2, "scan"),
          +      SCAN((short) 2, "scan"),
                 /**
                  * Scan attributes
                  */
          -      ATTRIBUTES((short)3, "attributes");
          +      ATTRIBUTES((short) 3, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -46440,7 +50509,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // SCAN
          @@ -46453,12 +50522,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -46490,27 +50559,36 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.SCAN, new org.apache.thrift.meta_data.FieldMetaData("scan", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TScan.class)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.SCAN,
          +        new org.apache.thrift.meta_data.FieldMetaData("scan",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TScan.class)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenWithScan_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenWithScan_args.class,
          +        metaDataMap);
               }
           
               public scannerOpenWithScan_args() {
               }
           
          -    public scannerOpenWithScan_args(
          -      java.nio.ByteBuffer tableName,
          -      TScan scan,
          -      java.util.Map attributes)
          -    {
          +    public scannerOpenWithScan_args(java.nio.ByteBuffer tableName, TScan scan,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.scan = scan;
          @@ -46528,15 +50606,20 @@ public scannerOpenWithScan_args(scannerOpenWithScan_args other) {
                   this.scan = new TScan(other.scan);
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -46571,11 +50654,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public scannerOpenWithScan_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public scannerOpenWithScan_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public scannerOpenWithScan_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -46632,7 +50717,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -46641,14 +50726,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Scan attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Scan attributes
                */
          -    public scannerOpenWithScan_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public scannerOpenWithScan_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -46668,35 +50754,36 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case SCAN:
          -        if (value == null) {
          -          unsetScan();
          -        } else {
          -          setScan((TScan)value);
          -        }
          -        break;
          +        case SCAN:
          +          if (value == null) {
          +            unsetScan();
          +          } else {
          +            setScan((TScan) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -46704,32 +50791,35 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case SCAN:
          -        return getScan();
          +        case SCAN:
          +          return getScan();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case SCAN:
          -        return isSetScan();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case SCAN:
          +          return isSetScan();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -46737,41 +50827,33 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof scannerOpenWithScan_args)
          -        return this.equals((scannerOpenWithScan_args)that);
          +        return this.equals((scannerOpenWithScan_args) that);
                 return false;
               }
           
               public boolean equals(scannerOpenWithScan_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_scan = true && this.isSetScan();
                 boolean that_present_scan = true && that.isSetScan();
                 if (this_present_scan || that_present_scan) {
          -        if (!(this_present_scan && that_present_scan))
          -          return false;
          -        if (!this.scan.equals(that.scan))
          -          return false;
          +        if (!(this_present_scan && that_present_scan)) return false;
          +        if (!this.scan.equals(that.scan)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -46782,16 +50864,13 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetScan()) ? 131071 : 524287);
          -      if (isSetScan())
          -        hashCode = hashCode * 8191 + scan.hashCode();
          +      if (isSetScan()) hashCode = hashCode * 8191 + scan.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -46842,11 +50921,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -46892,35 +50973,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpenWithScan_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithScan_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithScan_argsStandardScheme getScheme() {
                   return new scannerOpenWithScan_argsStandardScheme();
                 }
               }
           
          -    private static class scannerOpenWithScan_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpenWithScan_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -46928,7 +51014,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -46937,7 +51023,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan
                           struct.scan = new TScan();
                           struct.scan.read(iprot);
                           struct.setScanIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -46945,11 +51031,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map476 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map476.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key477;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val478;
          -                  for (int _i479 = 0; _i479 < _map476.size; ++_i479)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map476.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key477;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val478;
          +                  for (int _i479 = 0; _i479 < _map476.size; ++_i479) {
                               _key477 = iprot.readBinary();
                               _val478 = iprot.readBinary();
                               struct.attributes.put(_key477, _val478);
          @@ -46957,7 +51046,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -46968,11 +51057,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithScan_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithScan_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -46989,9 +51080,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSca
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter480 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter480 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter480.getKey());
                         oprot.writeBinary(_iter480.getValue());
                       }
          @@ -47005,17 +51098,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSca
           
               }
           
          -    private static class scannerOpenWithScan_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithScan_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithScan_argsTupleScheme getScheme() {
                   return new scannerOpenWithScan_argsTupleScheme();
                 }
               }
           
          -    private static class scannerOpenWithScan_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpenWithScan_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -47036,8 +51133,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter481 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter481 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter481.getKey());
                         oprot.writeBinary(_iter481.getValue());
                       }
          @@ -47046,8 +51143,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(3);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -47060,12 +51159,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan_
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TMap _map482 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map482.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key483;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val484;
          -            for (int _i485 = 0; _i485 < _map482.size; ++_i485)
          -            {
          +            org.apache.thrift.protocol.TMap _map482 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map482.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key483;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val484;
          +            for (int _i485 = 0; _i485 < _map482.size; ++_i485) {
                         _key483 = iprot.readBinary();
                         _val484 = iprot.readBinary();
                         struct.attributes.put(_key483, _val484);
          @@ -47076,29 +51178,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan_
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpenWithScan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpenWithScan_result");
          +  public static class scannerOpenWithScan_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpenWithScan_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpenWithScan_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpenWithScan_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpenWithScan_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpenWithScan_resultTupleSchemeFactory();
           
               public int success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -47111,7 +51228,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -47122,12 +51239,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -47161,22 +51278,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32          , "ScannerID")));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32,
          +                "ScannerID")));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenWithScan_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(scannerOpenWithScan_result.class, metaDataMap);
               }
           
               public scannerOpenWithScan_result() {
               }
           
          -    public scannerOpenWithScan_result(
          -      int success,
          -      IOError io)
          -    {
          +    public scannerOpenWithScan_result(int success, IOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -47216,7 +51338,8 @@ public scannerOpenWithScan_result setSuccess(int success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -47225,7 +51348,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -47253,23 +51377,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Integer)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Integer) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -47277,27 +51402,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -47305,32 +51433,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof scannerOpenWithScan_result)
          -        return this.equals((scannerOpenWithScan_result)that);
          +        return this.equals((scannerOpenWithScan_result) that);
                 return false;
               }
           
               public boolean equals(scannerOpenWithScan_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -47343,8 +51465,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + success;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -47385,13 +51506,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -47420,37 +51543,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpenWithScan_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithScan_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithScan_resultStandardScheme getScheme() {
                   return new scannerOpenWithScan_resultStandardScheme();
                 }
               }
           
          -    private static class scannerOpenWithScan_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpenWithScan_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          scannerOpenWithScan_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -47458,7 +51587,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.success = iprot.readI32();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -47467,7 +51596,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -47478,11 +51607,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithScan
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithScan_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          scannerOpenWithScan_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -47502,17 +51633,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSca
           
               }
           
          -    private static class scannerOpenWithScan_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithScan_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithScan_resultTupleScheme getScheme() {
                   return new scannerOpenWithScan_resultTupleScheme();
                 }
               }
           
          -    private static class scannerOpenWithScan_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpenWithScan_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          scannerOpenWithScan_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -47530,8 +51665,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readI32();
          @@ -47545,65 +51682,81 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithScan_
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpen_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpen_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpen_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpen_argsTupleSchemeFactory();
          +  public static class scannerOpen_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpen_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpen_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpen_argsTupleSchemeFactory();
           
               /**
                * name of table
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow; // required
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
               public @org.apache.thrift.annotation.Nullable java.util.List columns; // required
               /**
                * Scan attributes
                */
          -    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
          -       * Starting row in table to scan.
          -       * Send "" (empty string) to start at the first row.
          +       * Starting row in table to scan. Send "" (empty string) to start at the first row.
                  */
          -      START_ROW((short)2, "startRow"),
          +      START_ROW((short) 2, "startRow"),
                 /**
          -       * columns to scan. If column name is a column family, all
          -       * columns of the specified column family are returned. It's also possible
          -       * to pass a regex in the column qualifier.
          +       * columns to scan. If column name is a column family, all columns of the specified column
          +       * family are returned. It's also possible to pass a regex in the column qualifier.
                  */
          -      COLUMNS((short)3, "columns"),
          +      COLUMNS((short) 3, "columns"),
                 /**
                  * Scan attributes
                  */
          -      ATTRIBUTES((short)4, "attributes");
          +      ATTRIBUTES((short) 4, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -47616,7 +51769,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // START_ROW
          @@ -47631,12 +51784,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -47668,31 +51821,43 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.START_ROW, new org.apache.thrift.meta_data.FieldMetaData("startRow", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.START_ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("startRow",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMNS,
          +        new org.apache.thrift.meta_data.FieldMetaData("columns",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpen_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpen_args.class,
          +        metaDataMap);
               }
           
               public scannerOpen_args() {
               }
           
          -    public scannerOpen_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer startRow,
          -      java.util.List columns,
          -      java.util.Map attributes)
          -    {
          +    public scannerOpen_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.startRow = org.apache.thrift.TBaseHelper.copyBinary(startRow);
          @@ -47711,22 +51876,28 @@ public scannerOpen_args(scannerOpen_args other) {
                   this.startRow = org.apache.thrift.TBaseHelper.copyBinary(other.startRow);
                 }
                 if (other.isSetColumns()) {
          -        java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +        java.util.List __this__columns =
          +            new java.util.ArrayList(other.columns.size());
                   for (java.nio.ByteBuffer other_element : other.columns) {
                     __this__columns.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
                   this.columns = __this__columns;
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -47762,11 +51933,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public scannerOpen_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public scannerOpen_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public scannerOpen_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -47787,8 +51960,7 @@ public void setTableNameIsSet(boolean value) {
               }
           
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public byte[] getStartRow() {
                 setStartRow(org.apache.thrift.TBaseHelper.rightSize(startRow));
          @@ -47800,15 +51972,16 @@ public java.nio.ByteBuffer bufferForStartRow() {
               }
           
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public scannerOpen_args setStartRow(byte[] startRow) {
          -      this.startRow = startRow == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(startRow.clone());
          +      this.startRow = startRow == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(startRow.clone());
                 return this;
               }
           
          -    public scannerOpen_args setStartRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow) {
          +    public scannerOpen_args
          +        setStartRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow) {
                 this.startRow = org.apache.thrift.TBaseHelper.copyBinary(startRow);
                 return this;
               }
          @@ -47845,9 +52018,8 @@ public void addToColumns(java.nio.ByteBuffer elem) {
               }
           
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
               @org.apache.thrift.annotation.Nullable
               public java.util.List getColumns() {
          @@ -47855,11 +52027,11 @@ public java.util.List getColumns() {
               }
           
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
          -    public scannerOpen_args setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +    public scannerOpen_args setColumns(
          +        @org.apache.thrift.annotation.Nullable java.util.List columns) {
                 this.columns = columns;
                 return this;
               }
          @@ -47885,7 +52057,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -47894,14 +52066,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Scan attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Scan attributes
                */
          -    public scannerOpen_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public scannerOpen_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -47921,47 +52094,48 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case START_ROW:
          -        if (value == null) {
          -          unsetStartRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setStartRow((byte[])value);
          +        case START_ROW:
          +          if (value == null) {
          +            unsetStartRow();
                     } else {
          -            setStartRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setStartRow((byte[]) value);
          +            } else {
          +              setStartRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMNS:
          -        if (value == null) {
          -          unsetColumns();
          -        } else {
          -          setColumns((java.util.List)value);
          -        }
          -        break;
          +        case COLUMNS:
          +          if (value == null) {
          +            unsetColumns();
          +          } else {
          +            setColumns((java.util.List) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -47969,88 +52143,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case START_ROW:
          -        return getStartRow();
          +        case START_ROW:
          +          return getStartRow();
           
          -      case COLUMNS:
          -        return getColumns();
          +        case COLUMNS:
          +          return getColumns();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case START_ROW:
          -        return isSetStartRow();
          -      case COLUMNS:
          -        return isSetColumns();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case START_ROW:
          +          return isSetStartRow();
          +        case COLUMNS:
          +          return isSetColumns();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof scannerOpen_args)
          -        return this.equals((scannerOpen_args)that);
          +      if (that instanceof scannerOpen_args) return this.equals((scannerOpen_args) that);
                 return false;
               }
           
               public boolean equals(scannerOpen_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_startRow = true && this.isSetStartRow();
                 boolean that_present_startRow = true && that.isSetStartRow();
                 if (this_present_startRow || that_present_startRow) {
          -        if (!(this_present_startRow && that_present_startRow))
          -          return false;
          -        if (!this.startRow.equals(that.startRow))
          -          return false;
          +        if (!(this_present_startRow && that_present_startRow)) return false;
          +        if (!this.startRow.equals(that.startRow)) return false;
                 }
           
                 boolean this_present_columns = true && this.isSetColumns();
                 boolean that_present_columns = true && that.isSetColumns();
                 if (this_present_columns || that_present_columns) {
          -        if (!(this_present_columns && that_present_columns))
          -          return false;
          -        if (!this.columns.equals(that.columns))
          -          return false;
          +        if (!(this_present_columns && that_present_columns)) return false;
          +        if (!this.columns.equals(that.columns)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -48061,20 +52227,16 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetStartRow()) ? 131071 : 524287);
          -      if (isSetStartRow())
          -        hashCode = hashCode * 8191 + startRow.hashCode();
          +      if (isSetStartRow()) hashCode = hashCode * 8191 + startRow.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -      if (isSetColumns())
          -        hashCode = hashCode * 8191 + columns.hashCode();
          +      if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -48135,11 +52297,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -48190,35 +52354,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpen_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpen_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpen_argsStandardScheme getScheme() {
                   return new scannerOpen_argsStandardScheme();
                 }
               }
           
          -    private static class scannerOpen_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpen_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -48226,7 +52395,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -48234,7 +52403,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.startRow = iprot.readBinary();
                           struct.setStartRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -48243,16 +52412,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_args st
                           {
                             org.apache.thrift.protocol.TList _list486 = iprot.readListBegin();
                             struct.columns = new java.util.ArrayList(_list486.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem487;
          -                  for (int _i488 = 0; _i488 < _list486.size; ++_i488)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem487;
          +                  for (int _i488 = 0; _i488 < _list486.size; ++_i488) {
                               _elem487 = iprot.readBinary();
                               struct.columns.add(_elem487);
                             }
                             iprot.readListEnd();
                           }
                           struct.setColumnsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -48260,11 +52429,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map489 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map489.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key490;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val491;
          -                  for (int _i492 = 0; _i492 < _map489.size; ++_i492)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map489.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key490;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val491;
          +                  for (int _i492 = 0; _i492 < _map489.size; ++_i492) {
                               _key490 = iprot.readBinary();
                               _val491 = iprot.readBinary();
                               struct.attributes.put(_key490, _val491);
          @@ -48272,7 +52444,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_args st
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -48283,11 +52455,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpen_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpen_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -48304,9 +52478,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpen_args s
                   if (struct.columns != null) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          -            for (java.nio.ByteBuffer _iter493 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          +            for (java.nio.ByteBuffer _iter493 : struct.columns) {
                         oprot.writeBinary(_iter493);
                       }
                       oprot.writeListEnd();
          @@ -48316,9 +52490,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpen_args s
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter494 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter494 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter494.getKey());
                         oprot.writeBinary(_iter494.getValue());
                       }
          @@ -48332,17 +52508,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpen_args s
           
               }
           
          -    private static class scannerOpen_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpen_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpen_argsTupleScheme getScheme() {
                   return new scannerOpen_argsTupleScheme();
                 }
               }
           
          -    private static class scannerOpen_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpen_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -48366,8 +52546,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args st
                   if (struct.isSetColumns()) {
                     {
                       oprot.writeI32(struct.columns.size());
          -            for (java.nio.ByteBuffer _iter495 : struct.columns)
          -            {
          +            for (java.nio.ByteBuffer _iter495 : struct.columns) {
                         oprot.writeBinary(_iter495);
                       }
                     }
          @@ -48375,8 +52554,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args st
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter496 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter496 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter496.getKey());
                         oprot.writeBinary(_iter496.getValue());
                       }
          @@ -48385,8 +52564,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -48398,11 +52579,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args str
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TList _list497 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list497 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.columns = new java.util.ArrayList(_list497.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem498;
          -            for (int _i499 = 0; _i499 < _list497.size; ++_i499)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem498;
          +            for (int _i499 = 0; _i499 < _list497.size; ++_i499) {
                         _elem498 = iprot.readBinary();
                         struct.columns.add(_elem498);
                       }
          @@ -48411,12 +52593,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args str
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TMap _map500 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map500.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key501;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val502;
          -            for (int _i503 = 0; _i503 < _map500.size; ++_i503)
          -            {
          +            org.apache.thrift.protocol.TMap _map500 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map500.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key501;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val502;
          +            for (int _i503 = 0; _i503 < _map500.size; ++_i503) {
                         _key501 = iprot.readBinary();
                         _val502 = iprot.readBinary();
                         struct.attributes.put(_key501, _val502);
          @@ -48427,29 +52612,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpen_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpen_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpen_result");
          +  public static class scannerOpen_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpen_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpen_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpen_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpen_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpen_resultTupleSchemeFactory();
           
               public int success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -48462,7 +52662,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -48473,12 +52673,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -48512,22 +52712,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32          , "ScannerID")));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32,
          +                "ScannerID")));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpen_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpen_result.class,
          +        metaDataMap);
               }
           
               public scannerOpen_result() {
               }
           
          -    public scannerOpen_result(
          -      int success,
          -      IOError io)
          -    {
          +    public scannerOpen_result(int success, IOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -48567,7 +52772,8 @@ public scannerOpen_result setSuccess(int success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -48576,7 +52782,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -48604,23 +52811,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Integer)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Integer) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -48628,60 +52836,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof scannerOpen_result)
          -        return this.equals((scannerOpen_result)that);
          +      if (that instanceof scannerOpen_result) return this.equals((scannerOpen_result) that);
                 return false;
               }
           
               public boolean equals(scannerOpen_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -48694,8 +52898,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + success;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -48736,13 +52939,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -48771,37 +52976,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpen_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpen_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpen_resultStandardScheme getScheme() {
                   return new scannerOpen_resultStandardScheme();
                 }
               }
           
          -    private static class scannerOpen_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpen_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -48809,7 +53020,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_result
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.success = iprot.readI32();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -48818,7 +53029,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -48829,11 +53040,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpen_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpen_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpen_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -48853,17 +53066,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpen_result
           
               }
           
          -    private static class scannerOpen_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpen_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpen_resultTupleScheme getScheme() {
                   return new scannerOpen_resultTupleScheme();
                 }
               }
           
          -    private static class scannerOpen_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpen_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpen_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpen_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -48881,8 +53098,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpen_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpen_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpen_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readI32();
          @@ -48896,76 +53115,92 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpen_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpenWithStop_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpenWithStop_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField STOP_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("stopRow", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)4);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)5);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpenWithStop_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpenWithStop_argsTupleSchemeFactory();
          +  public static class scannerOpenWithStop_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpenWithStop_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField STOP_ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("stopRow", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 5);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpenWithStop_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpenWithStop_argsTupleSchemeFactory();
           
               /**
                * name of table
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow; // required
               /**
          -     * row to stop scanning on. This row is *not* included in the
          -     * scanner's results
          +     * row to stop scanning on. This row is *not* included in the scanner's results
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer stopRow; // required
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
               public @org.apache.thrift.annotation.Nullable java.util.List columns; // required
               /**
                * Scan attributes
                */
          -    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
          -       * Starting row in table to scan.
          -       * Send "" (empty string) to start at the first row.
          +       * Starting row in table to scan. Send "" (empty string) to start at the first row.
                  */
          -      START_ROW((short)2, "startRow"),
          +      START_ROW((short) 2, "startRow"),
                 /**
          -       * row to stop scanning on. This row is *not* included in the
          -       * scanner's results
          +       * row to stop scanning on. This row is *not* included in the scanner's results
                  */
          -      STOP_ROW((short)3, "stopRow"),
          +      STOP_ROW((short) 3, "stopRow"),
                 /**
          -       * columns to scan. If column name is a column family, all
          -       * columns of the specified column family are returned. It's also possible
          -       * to pass a regex in the column qualifier.
          +       * columns to scan. If column name is a column family, all columns of the specified column
          +       * family are returned. It's also possible to pass a regex in the column qualifier.
                  */
          -      COLUMNS((short)4, "columns"),
          +      COLUMNS((short) 4, "columns"),
                 /**
                  * Scan attributes
                  */
          -      ATTRIBUTES((short)5, "attributes");
          +      ATTRIBUTES((short) 5, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -48978,7 +53213,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // START_ROW
          @@ -48995,12 +53230,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -49032,34 +53267,48 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.START_ROW, new org.apache.thrift.meta_data.FieldMetaData("startRow", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.STOP_ROW, new org.apache.thrift.meta_data.FieldMetaData("stopRow", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.START_ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("startRow",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.STOP_ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("stopRow",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMNS,
          +        new org.apache.thrift.meta_data.FieldMetaData("columns",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenWithStop_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenWithStop_args.class,
          +        metaDataMap);
               }
           
               public scannerOpenWithStop_args() {
               }
           
          -    public scannerOpenWithStop_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer startRow,
          -      java.nio.ByteBuffer stopRow,
          -      java.util.List columns,
          -      java.util.Map attributes)
          -    {
          +    public scannerOpenWithStop_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.nio.ByteBuffer stopRow, java.util.List columns,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.startRow = org.apache.thrift.TBaseHelper.copyBinary(startRow);
          @@ -49082,22 +53331,28 @@ public scannerOpenWithStop_args(scannerOpenWithStop_args other) {
                   this.stopRow = org.apache.thrift.TBaseHelper.copyBinary(other.stopRow);
                 }
                 if (other.isSetColumns()) {
          -        java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +        java.util.List __this__columns =
          +            new java.util.ArrayList(other.columns.size());
                   for (java.nio.ByteBuffer other_element : other.columns) {
                     __this__columns.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
                   this.columns = __this__columns;
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -49134,11 +53389,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public scannerOpenWithStop_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public scannerOpenWithStop_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public scannerOpenWithStop_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -49159,8 +53416,7 @@ public void setTableNameIsSet(boolean value) {
               }
           
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public byte[] getStartRow() {
                 setStartRow(org.apache.thrift.TBaseHelper.rightSize(startRow));
          @@ -49172,15 +53428,16 @@ public java.nio.ByteBuffer bufferForStartRow() {
               }
           
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public scannerOpenWithStop_args setStartRow(byte[] startRow) {
          -      this.startRow = startRow == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(startRow.clone());
          +      this.startRow = startRow == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(startRow.clone());
                 return this;
               }
           
          -    public scannerOpenWithStop_args setStartRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow) {
          +    public scannerOpenWithStop_args
          +        setStartRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow) {
                 this.startRow = org.apache.thrift.TBaseHelper.copyBinary(startRow);
                 return this;
               }
          @@ -49201,8 +53458,7 @@ public void setStartRowIsSet(boolean value) {
               }
           
               /**
          -     * row to stop scanning on. This row is *not* included in the
          -     * scanner's results
          +     * row to stop scanning on. This row is *not* included in the scanner's results
                */
               public byte[] getStopRow() {
                 setStopRow(org.apache.thrift.TBaseHelper.rightSize(stopRow));
          @@ -49214,15 +53470,16 @@ public java.nio.ByteBuffer bufferForStopRow() {
               }
           
               /**
          -     * row to stop scanning on. This row is *not* included in the
          -     * scanner's results
          +     * row to stop scanning on. This row is *not* included in the scanner's results
                */
               public scannerOpenWithStop_args setStopRow(byte[] stopRow) {
          -      this.stopRow = stopRow == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(stopRow.clone());
          +      this.stopRow =
          +          stopRow == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(stopRow.clone());
                 return this;
               }
           
          -    public scannerOpenWithStop_args setStopRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer stopRow) {
          +    public scannerOpenWithStop_args
          +        setStopRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer stopRow) {
                 this.stopRow = org.apache.thrift.TBaseHelper.copyBinary(stopRow);
                 return this;
               }
          @@ -49259,9 +53516,8 @@ public void addToColumns(java.nio.ByteBuffer elem) {
               }
           
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
               @org.apache.thrift.annotation.Nullable
               public java.util.List getColumns() {
          @@ -49269,11 +53525,11 @@ public java.util.List getColumns() {
               }
           
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
          -    public scannerOpenWithStop_args setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +    public scannerOpenWithStop_args setColumns(
          +        @org.apache.thrift.annotation.Nullable java.util.List columns) {
                 this.columns = columns;
                 return this;
               }
          @@ -49299,7 +53555,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -49308,14 +53564,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Scan attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Scan attributes
                */
          -    public scannerOpenWithStop_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public scannerOpenWithStop_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -49335,59 +53592,60 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case START_ROW:
          -        if (value == null) {
          -          unsetStartRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setStartRow((byte[])value);
          +        case START_ROW:
          +          if (value == null) {
          +            unsetStartRow();
                     } else {
          -            setStartRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setStartRow((byte[]) value);
          +            } else {
          +              setStartRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case STOP_ROW:
          -        if (value == null) {
          -          unsetStopRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setStopRow((byte[])value);
          +        case STOP_ROW:
          +          if (value == null) {
          +            unsetStopRow();
                     } else {
          -            setStopRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setStopRow((byte[]) value);
          +            } else {
          +              setStopRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMNS:
          -        if (value == null) {
          -          unsetColumns();
          -        } else {
          -          setColumns((java.util.List)value);
          -        }
          -        break;
          +        case COLUMNS:
          +          if (value == null) {
          +            unsetColumns();
          +          } else {
          +            setColumns((java.util.List) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -49395,42 +53653,45 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case START_ROW:
          -        return getStartRow();
          +        case START_ROW:
          +          return getStartRow();
           
          -      case STOP_ROW:
          -        return getStopRow();
          +        case STOP_ROW:
          +          return getStopRow();
           
          -      case COLUMNS:
          -        return getColumns();
          +        case COLUMNS:
          +          return getColumns();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case START_ROW:
          -        return isSetStartRow();
          -      case STOP_ROW:
          -        return isSetStopRow();
          -      case COLUMNS:
          -        return isSetColumns();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case START_ROW:
          +          return isSetStartRow();
          +        case STOP_ROW:
          +          return isSetStopRow();
          +        case COLUMNS:
          +          return isSetColumns();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -49438,59 +53699,47 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof scannerOpenWithStop_args)
          -        return this.equals((scannerOpenWithStop_args)that);
          +        return this.equals((scannerOpenWithStop_args) that);
                 return false;
               }
           
               public boolean equals(scannerOpenWithStop_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_startRow = true && this.isSetStartRow();
                 boolean that_present_startRow = true && that.isSetStartRow();
                 if (this_present_startRow || that_present_startRow) {
          -        if (!(this_present_startRow && that_present_startRow))
          -          return false;
          -        if (!this.startRow.equals(that.startRow))
          -          return false;
          +        if (!(this_present_startRow && that_present_startRow)) return false;
          +        if (!this.startRow.equals(that.startRow)) return false;
                 }
           
                 boolean this_present_stopRow = true && this.isSetStopRow();
                 boolean that_present_stopRow = true && that.isSetStopRow();
                 if (this_present_stopRow || that_present_stopRow) {
          -        if (!(this_present_stopRow && that_present_stopRow))
          -          return false;
          -        if (!this.stopRow.equals(that.stopRow))
          -          return false;
          +        if (!(this_present_stopRow && that_present_stopRow)) return false;
          +        if (!this.stopRow.equals(that.stopRow)) return false;
                 }
           
                 boolean this_present_columns = true && this.isSetColumns();
                 boolean that_present_columns = true && that.isSetColumns();
                 if (this_present_columns || that_present_columns) {
          -        if (!(this_present_columns && that_present_columns))
          -          return false;
          -        if (!this.columns.equals(that.columns))
          -          return false;
          +        if (!(this_present_columns && that_present_columns)) return false;
          +        if (!this.columns.equals(that.columns)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -49501,24 +53750,19 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetStartRow()) ? 131071 : 524287);
          -      if (isSetStartRow())
          -        hashCode = hashCode * 8191 + startRow.hashCode();
          +      if (isSetStartRow()) hashCode = hashCode * 8191 + startRow.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetStopRow()) ? 131071 : 524287);
          -      if (isSetStopRow())
          -        hashCode = hashCode * 8191 + stopRow.hashCode();
          +      if (isSetStopRow()) hashCode = hashCode * 8191 + stopRow.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -      if (isSetColumns())
          -        hashCode = hashCode * 8191 + columns.hashCode();
          +      if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -49589,11 +53833,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -49652,35 +53898,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpenWithStop_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithStop_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithStop_argsStandardScheme getScheme() {
                   return new scannerOpenWithStop_argsStandardScheme();
                 }
               }
           
          -    private static class scannerOpenWithStop_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpenWithStop_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -49688,7 +53939,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -49696,7 +53947,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.startRow = iprot.readBinary();
                           struct.setStartRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -49704,7 +53955,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.stopRow = iprot.readBinary();
                           struct.setStopRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -49713,16 +53964,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                           {
                             org.apache.thrift.protocol.TList _list504 = iprot.readListBegin();
                             struct.columns = new java.util.ArrayList(_list504.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem505;
          -                  for (int _i506 = 0; _i506 < _list504.size; ++_i506)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem505;
          +                  for (int _i506 = 0; _i506 < _list504.size; ++_i506) {
                               _elem505 = iprot.readBinary();
                               struct.columns.add(_elem505);
                             }
                             iprot.readListEnd();
                           }
                           struct.setColumnsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -49730,11 +53981,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map507 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map507.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key508;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val509;
          -                  for (int _i510 = 0; _i510 < _map507.size; ++_i510)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map507.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key508;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val509;
          +                  for (int _i510 = 0; _i510 < _map507.size; ++_i510) {
                               _key508 = iprot.readBinary();
                               _val509 = iprot.readBinary();
                               struct.attributes.put(_key508, _val509);
          @@ -49742,7 +53996,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -49753,11 +54007,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithStop_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithStop_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -49779,9 +54035,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto
                   if (struct.columns != null) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          -            for (java.nio.ByteBuffer _iter511 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          +            for (java.nio.ByteBuffer _iter511 : struct.columns) {
                         oprot.writeBinary(_iter511);
                       }
                       oprot.writeListEnd();
          @@ -49791,9 +54047,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter512 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter512 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter512.getKey());
                         oprot.writeBinary(_iter512.getValue());
                       }
          @@ -49807,17 +54065,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto
           
               }
           
          -    private static class scannerOpenWithStop_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithStop_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithStop_argsTupleScheme getScheme() {
                   return new scannerOpenWithStop_argsTupleScheme();
                 }
               }
           
          -    private static class scannerOpenWithStop_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpenWithStop_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -49847,8 +54109,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop
                   if (struct.isSetColumns()) {
                     {
                       oprot.writeI32(struct.columns.size());
          -            for (java.nio.ByteBuffer _iter513 : struct.columns)
          -            {
          +            for (java.nio.ByteBuffer _iter513 : struct.columns) {
                         oprot.writeBinary(_iter513);
                       }
                     }
          @@ -49856,8 +54117,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter514 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter514 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter514.getKey());
                         oprot.writeBinary(_iter514.getValue());
                       }
          @@ -49866,8 +54127,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(5);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -49883,11 +54146,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TList _list515 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list515 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.columns = new java.util.ArrayList(_list515.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem516;
          -            for (int _i517 = 0; _i517 < _list515.size; ++_i517)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem516;
          +            for (int _i517 = 0; _i517 < _list515.size; ++_i517) {
                         _elem516 = iprot.readBinary();
                         struct.columns.add(_elem516);
                       }
          @@ -49896,12 +54160,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_
                   }
                   if (incoming.get(4)) {
                     {
          -            org.apache.thrift.protocol.TMap _map518 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map518.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key519;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val520;
          -            for (int _i521 = 0; _i521 < _map518.size; ++_i521)
          -            {
          +            org.apache.thrift.protocol.TMap _map518 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map518.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key519;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val520;
          +            for (int _i521 = 0; _i521 < _map518.size; ++_i521) {
                         _key519 = iprot.readBinary();
                         _val520 = iprot.readBinary();
                         struct.attributes.put(_key519, _val520);
          @@ -49912,29 +54179,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpenWithStop_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpenWithStop_result");
          +  public static class scannerOpenWithStop_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpenWithStop_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpenWithStop_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpenWithStop_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpenWithStop_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpenWithStop_resultTupleSchemeFactory();
           
               public int success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -49947,7 +54229,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -49958,12 +54240,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -49997,22 +54279,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32          , "ScannerID")));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32,
          +                "ScannerID")));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenWithStop_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(scannerOpenWithStop_result.class, metaDataMap);
               }
           
               public scannerOpenWithStop_result() {
               }
           
          -    public scannerOpenWithStop_result(
          -      int success,
          -      IOError io)
          -    {
          +    public scannerOpenWithStop_result(int success, IOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -50052,7 +54339,8 @@ public scannerOpenWithStop_result setSuccess(int success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -50061,7 +54349,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -50089,23 +54378,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Integer)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Integer) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -50113,27 +54403,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -50141,32 +54434,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof scannerOpenWithStop_result)
          -        return this.equals((scannerOpenWithStop_result)that);
          +        return this.equals((scannerOpenWithStop_result) that);
                 return false;
               }
           
               public boolean equals(scannerOpenWithStop_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -50179,8 +54466,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + success;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -50221,13 +54507,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -50256,37 +54544,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpenWithStop_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithStop_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithStop_resultStandardScheme getScheme() {
                   return new scannerOpenWithStop_resultStandardScheme();
                 }
               }
           
          -    private static class scannerOpenWithStop_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpenWithStop_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          scannerOpenWithStop_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -50294,7 +54588,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.success = iprot.readI32();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -50303,7 +54597,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -50314,11 +54608,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithStop_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          scannerOpenWithStop_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -50338,17 +54634,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto
           
               }
           
          -    private static class scannerOpenWithStop_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithStop_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithStop_resultTupleScheme getScheme() {
                   return new scannerOpenWithStop_resultTupleScheme();
                 }
               }
           
          -    private static class scannerOpenWithStop_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpenWithStop_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          scannerOpenWithStop_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -50366,8 +54666,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readI32();
          @@ -50381,21 +54683,37 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop_
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpenWithPrefix_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpenWithPrefix_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField START_AND_PREFIX_FIELD_DESC = new org.apache.thrift.protocol.TField("startAndPrefix", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpenWithPrefix_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpenWithPrefix_argsTupleSchemeFactory();
          +  public static class scannerOpenWithPrefix_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpenWithPrefix_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField START_AND_PREFIX_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("startAndPrefix",
          +            org.apache.thrift.protocol.TType.STRING, (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 4);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpenWithPrefix_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpenWithPrefix_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -50412,28 +54730,32 @@ public static class scannerOpenWithPrefix_args implements org.apache.thrift.TBas
               /**
                * Scan attributes
                */
          -    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * the prefix (and thus start row) of the keys you want
                  */
          -      START_AND_PREFIX((short)2, "startAndPrefix"),
          +      START_AND_PREFIX((short) 2, "startAndPrefix"),
                 /**
                  * the columns you want returned
                  */
          -      COLUMNS((short)3, "columns"),
          +      COLUMNS((short) 3, "columns"),
                 /**
                  * Scan attributes
                  */
          -      ATTRIBUTES((short)4, "attributes");
          +      ATTRIBUTES((short) 4, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -50446,7 +54768,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // START_AND_PREFIX
          @@ -50461,12 +54783,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -50498,31 +54820,43 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.START_AND_PREFIX, new org.apache.thrift.meta_data.FieldMetaData("startAndPrefix", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.START_AND_PREFIX,
          +        new org.apache.thrift.meta_data.FieldMetaData("startAndPrefix",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMNS,
          +        new org.apache.thrift.meta_data.FieldMetaData("columns",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenWithPrefix_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(scannerOpenWithPrefix_args.class, metaDataMap);
               }
           
               public scannerOpenWithPrefix_args() {
               }
           
          -    public scannerOpenWithPrefix_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer startAndPrefix,
          -      java.util.List columns,
          -      java.util.Map attributes)
          -    {
          +    public scannerOpenWithPrefix_args(java.nio.ByteBuffer tableName,
          +        java.nio.ByteBuffer startAndPrefix, java.util.List columns,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.startAndPrefix = org.apache.thrift.TBaseHelper.copyBinary(startAndPrefix);
          @@ -50541,22 +54875,28 @@ public scannerOpenWithPrefix_args(scannerOpenWithPrefix_args other) {
                   this.startAndPrefix = org.apache.thrift.TBaseHelper.copyBinary(other.startAndPrefix);
                 }
                 if (other.isSetColumns()) {
          -        java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +        java.util.List __this__columns =
          +            new java.util.ArrayList(other.columns.size());
                   for (java.nio.ByteBuffer other_element : other.columns) {
                     __this__columns.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
                   this.columns = __this__columns;
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -50592,11 +54932,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public scannerOpenWithPrefix_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public scannerOpenWithPrefix_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public scannerOpenWithPrefix_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -50632,11 +54974,13 @@ public java.nio.ByteBuffer bufferForStartAndPrefix() {
                * the prefix (and thus start row) of the keys you want
                */
               public scannerOpenWithPrefix_args setStartAndPrefix(byte[] startAndPrefix) {
          -      this.startAndPrefix = startAndPrefix == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(startAndPrefix.clone());
          +      this.startAndPrefix = startAndPrefix == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(startAndPrefix.clone());
                 return this;
               }
           
          -    public scannerOpenWithPrefix_args setStartAndPrefix(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startAndPrefix) {
          +    public scannerOpenWithPrefix_args setStartAndPrefix(
          +        @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startAndPrefix) {
                 this.startAndPrefix = org.apache.thrift.TBaseHelper.copyBinary(startAndPrefix);
                 return this;
               }
          @@ -50645,7 +54989,9 @@ public void unsetStartAndPrefix() {
                 this.startAndPrefix = null;
               }
           
          -    /** Returns true if field startAndPrefix is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field startAndPrefix is set (has been assigned a value) and false otherwise
          +     */
               public boolean isSetStartAndPrefix() {
                 return this.startAndPrefix != null;
               }
          @@ -50683,7 +55029,8 @@ public java.util.List getColumns() {
               /**
                * the columns you want returned
                */
          -    public scannerOpenWithPrefix_args setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +    public scannerOpenWithPrefix_args setColumns(
          +        @org.apache.thrift.annotation.Nullable java.util.List columns) {
                 this.columns = columns;
                 return this;
               }
          @@ -50709,7 +55056,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -50718,14 +55065,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Scan attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Scan attributes
                */
          -    public scannerOpenWithPrefix_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public scannerOpenWithPrefix_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -50745,47 +55093,48 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case START_AND_PREFIX:
          -        if (value == null) {
          -          unsetStartAndPrefix();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setStartAndPrefix((byte[])value);
          +        case START_AND_PREFIX:
          +          if (value == null) {
          +            unsetStartAndPrefix();
                     } else {
          -            setStartAndPrefix((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setStartAndPrefix((byte[]) value);
          +            } else {
          +              setStartAndPrefix((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMNS:
          -        if (value == null) {
          -          unsetColumns();
          -        } else {
          -          setColumns((java.util.List)value);
          -        }
          -        break;
          +        case COLUMNS:
          +          if (value == null) {
          +            unsetColumns();
          +          } else {
          +            setColumns((java.util.List) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -50793,37 +55142,40 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case START_AND_PREFIX:
          -        return getStartAndPrefix();
          +        case START_AND_PREFIX:
          +          return getStartAndPrefix();
           
          -      case COLUMNS:
          -        return getColumns();
          +        case COLUMNS:
          +          return getColumns();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case START_AND_PREFIX:
          -        return isSetStartAndPrefix();
          -      case COLUMNS:
          -        return isSetColumns();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case START_AND_PREFIX:
          +          return isSetStartAndPrefix();
          +        case COLUMNS:
          +          return isSetColumns();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -50831,50 +55183,40 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof scannerOpenWithPrefix_args)
          -        return this.equals((scannerOpenWithPrefix_args)that);
          +        return this.equals((scannerOpenWithPrefix_args) that);
                 return false;
               }
           
               public boolean equals(scannerOpenWithPrefix_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_startAndPrefix = true && this.isSetStartAndPrefix();
                 boolean that_present_startAndPrefix = true && that.isSetStartAndPrefix();
                 if (this_present_startAndPrefix || that_present_startAndPrefix) {
          -        if (!(this_present_startAndPrefix && that_present_startAndPrefix))
          -          return false;
          -        if (!this.startAndPrefix.equals(that.startAndPrefix))
          -          return false;
          +        if (!(this_present_startAndPrefix && that_present_startAndPrefix)) return false;
          +        if (!this.startAndPrefix.equals(that.startAndPrefix)) return false;
                 }
           
                 boolean this_present_columns = true && this.isSetColumns();
                 boolean that_present_columns = true && that.isSetColumns();
                 if (this_present_columns || that_present_columns) {
          -        if (!(this_present_columns && that_present_columns))
          -          return false;
          -        if (!this.columns.equals(that.columns))
          -          return false;
          +        if (!(this_present_columns && that_present_columns)) return false;
          +        if (!this.columns.equals(that.columns)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -50885,20 +55227,16 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetStartAndPrefix()) ? 131071 : 524287);
          -      if (isSetStartAndPrefix())
          -        hashCode = hashCode * 8191 + startAndPrefix.hashCode();
          +      if (isSetStartAndPrefix()) hashCode = hashCode * 8191 + startAndPrefix.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -      if (isSetColumns())
          -        hashCode = hashCode * 8191 + columns.hashCode();
          +      if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -50921,12 +55259,14 @@ public int compareTo(scannerOpenWithPrefix_args other) {
                     return lastComparison;
                   }
                 }
          -      lastComparison = java.lang.Boolean.compare(isSetStartAndPrefix(), other.isSetStartAndPrefix());
          +      lastComparison =
          +          java.lang.Boolean.compare(isSetStartAndPrefix(), other.isSetStartAndPrefix());
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
                 if (isSetStartAndPrefix()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.startAndPrefix, other.startAndPrefix);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.startAndPrefix, other.startAndPrefix);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -50959,11 +55299,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -51014,35 +55356,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpenWithPrefix_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithPrefix_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithPrefix_argsStandardScheme getScheme() {
                   return new scannerOpenWithPrefix_argsStandardScheme();
                 }
               }
           
          -    private static class scannerOpenWithPrefix_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpenWithPrefix_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPrefix_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          scannerOpenWithPrefix_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -51050,7 +55397,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPref
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -51058,7 +55405,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPref
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.startAndPrefix = iprot.readBinary();
                           struct.setStartAndPrefixIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -51067,16 +55414,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPref
                           {
                             org.apache.thrift.protocol.TList _list522 = iprot.readListBegin();
                             struct.columns = new java.util.ArrayList(_list522.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem523;
          -                  for (int _i524 = 0; _i524 < _list522.size; ++_i524)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem523;
          +                  for (int _i524 = 0; _i524 < _list522.size; ++_i524) {
                               _elem523 = iprot.readBinary();
                               struct.columns.add(_elem523);
                             }
                             iprot.readListEnd();
                           }
                           struct.setColumnsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -51084,11 +55431,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPref
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map525 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map525.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key526;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val527;
          -                  for (int _i528 = 0; _i528 < _map525.size; ++_i528)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map525.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key526;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val527;
          +                  for (int _i528 = 0; _i528 < _map525.size; ++_i528) {
                               _key526 = iprot.readBinary();
                               _val527 = iprot.readBinary();
                               struct.attributes.put(_key526, _val527);
          @@ -51096,7 +55446,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPref
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -51107,11 +55457,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPref
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithPrefix_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          scannerOpenWithPrefix_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -51128,9 +55480,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithPre
                   if (struct.columns != null) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          -            for (java.nio.ByteBuffer _iter529 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          +            for (java.nio.ByteBuffer _iter529 : struct.columns) {
                         oprot.writeBinary(_iter529);
                       }
                       oprot.writeListEnd();
          @@ -51140,9 +55492,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithPre
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter530 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter530 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter530.getKey());
                         oprot.writeBinary(_iter530.getValue());
                       }
          @@ -51156,17 +55510,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithPre
           
               }
           
          -    private static class scannerOpenWithPrefix_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithPrefix_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithPrefix_argsTupleScheme getScheme() {
                   return new scannerOpenWithPrefix_argsTupleScheme();
                 }
               }
           
          -    private static class scannerOpenWithPrefix_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpenWithPrefix_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPrefix_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          scannerOpenWithPrefix_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -51190,8 +55548,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPref
                   if (struct.isSetColumns()) {
                     {
                       oprot.writeI32(struct.columns.size());
          -            for (java.nio.ByteBuffer _iter531 : struct.columns)
          -            {
          +            for (java.nio.ByteBuffer _iter531 : struct.columns) {
                         oprot.writeBinary(_iter531);
                       }
                     }
          @@ -51199,8 +55556,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPref
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter532 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter532 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter532.getKey());
                         oprot.writeBinary(_iter532.getValue());
                       }
          @@ -51209,8 +55566,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPref
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPrefix_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPrefix_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(4);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -51222,11 +55581,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPrefi
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TList _list533 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list533 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.columns = new java.util.ArrayList(_list533.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem534;
          -            for (int _i535 = 0; _i535 < _list533.size; ++_i535)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem534;
          +            for (int _i535 = 0; _i535 < _list533.size; ++_i535) {
                         _elem534 = iprot.readBinary();
                         struct.columns.add(_elem534);
                       }
          @@ -51235,12 +55595,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPrefi
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TMap _map536 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map536.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key537;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val538;
          -            for (int _i539 = 0; _i539 < _map536.size; ++_i539)
          -            {
          +            org.apache.thrift.protocol.TMap _map536 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map536.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key537;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val538;
          +            for (int _i539 = 0; _i539 < _map536.size; ++_i539) {
                         _key537 = iprot.readBinary();
                         _val538 = iprot.readBinary();
                         struct.attributes.put(_key537, _val538);
          @@ -51251,29 +55614,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPrefi
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpenWithPrefix_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpenWithPrefix_result");
          +  public static class scannerOpenWithPrefix_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpenWithPrefix_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpenWithPrefix_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpenWithPrefix_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpenWithPrefix_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpenWithPrefix_resultTupleSchemeFactory();
           
               public int success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -51286,7 +55664,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -51297,12 +55675,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -51336,22 +55714,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32          , "ScannerID")));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32,
          +                "ScannerID")));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenWithPrefix_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(scannerOpenWithPrefix_result.class, metaDataMap);
               }
           
               public scannerOpenWithPrefix_result() {
               }
           
          -    public scannerOpenWithPrefix_result(
          -      int success,
          -      IOError io)
          -    {
          +    public scannerOpenWithPrefix_result(int success, IOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -51391,7 +55774,8 @@ public scannerOpenWithPrefix_result setSuccess(int success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -51400,7 +55784,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -51428,23 +55813,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Integer)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Integer) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -51452,27 +55838,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -51480,32 +55869,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof scannerOpenWithPrefix_result)
          -        return this.equals((scannerOpenWithPrefix_result)that);
          +        return this.equals((scannerOpenWithPrefix_result) that);
                 return false;
               }
           
               public boolean equals(scannerOpenWithPrefix_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -51518,8 +55901,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + success;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -51560,13 +55942,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -51595,37 +55979,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpenWithPrefix_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithPrefix_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithPrefix_resultStandardScheme getScheme() {
                   return new scannerOpenWithPrefix_resultStandardScheme();
                 }
               }
           
          -    private static class scannerOpenWithPrefix_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpenWithPrefix_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPrefix_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          scannerOpenWithPrefix_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -51633,7 +56023,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPref
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.success = iprot.readI32();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -51642,7 +56032,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPref
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -51653,11 +56043,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithPref
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithPrefix_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          scannerOpenWithPrefix_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -51677,17 +56069,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithPre
           
               }
           
          -    private static class scannerOpenWithPrefix_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithPrefix_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithPrefix_resultTupleScheme getScheme() {
                   return new scannerOpenWithPrefix_resultTupleScheme();
                 }
               }
           
          -    private static class scannerOpenWithPrefix_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpenWithPrefix_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPrefix_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          scannerOpenWithPrefix_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -51705,8 +56101,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPref
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPrefix_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          scannerOpenWithPrefix_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readI32();
          @@ -51720,36 +56118,52 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithPrefi
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpenTs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpenTs_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3);
          -    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)4);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)5);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpenTs_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpenTs_argsTupleSchemeFactory();
          +  public static class scannerOpenTs_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpenTs_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 5);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpenTs_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpenTs_argsTupleSchemeFactory();
           
               /**
                * name of table
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow; // required
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
               public @org.apache.thrift.annotation.Nullable java.util.List columns; // required
               /**
          @@ -51759,35 +56173,37 @@ public static class scannerOpenTs_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
          -       * Starting row in table to scan.
          -       * Send "" (empty string) to start at the first row.
          +       * Starting row in table to scan. Send "" (empty string) to start at the first row.
                  */
          -      START_ROW((short)2, "startRow"),
          +      START_ROW((short) 2, "startRow"),
                 /**
          -       * columns to scan. If column name is a column family, all
          -       * columns of the specified column family are returned. It's also possible
          -       * to pass a regex in the column qualifier.
          +       * columns to scan. If column name is a column family, all columns of the specified column
          +       * family are returned. It's also possible to pass a regex in the column qualifier.
                  */
          -      COLUMNS((short)3, "columns"),
          +      COLUMNS((short) 3, "columns"),
                 /**
                  * timestamp
                  */
          -      TIMESTAMP((short)4, "timestamp"),
          +      TIMESTAMP((short) 4, "timestamp"),
                 /**
                  * Scan attributes
                  */
          -      ATTRIBUTES((short)5, "attributes");
          +      ATTRIBUTES((short) 5, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -51800,7 +56216,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // START_ROW
          @@ -51817,12 +56233,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -51856,34 +56272,48 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.START_ROW, new org.apache.thrift.meta_data.FieldMetaData("startRow", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.START_ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("startRow",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMNS,
          +        new org.apache.thrift.meta_data.FieldMetaData("columns",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.TIMESTAMP,
          +        new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenTs_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenTs_args.class,
          +        metaDataMap);
               }
           
               public scannerOpenTs_args() {
               }
           
          -    public scannerOpenTs_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer startRow,
          -      java.util.List columns,
          -      long timestamp,
          -      java.util.Map attributes)
          -    {
          +    public scannerOpenTs_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.util.List columns, long timestamp,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.startRow = org.apache.thrift.TBaseHelper.copyBinary(startRow);
          @@ -51905,7 +56335,8 @@ public scannerOpenTs_args(scannerOpenTs_args other) {
                   this.startRow = org.apache.thrift.TBaseHelper.copyBinary(other.startRow);
                 }
                 if (other.isSetColumns()) {
          -        java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +        java.util.List __this__columns =
          +            new java.util.ArrayList(other.columns.size());
                   for (java.nio.ByteBuffer other_element : other.columns) {
                     __this__columns.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
          @@ -51913,15 +56344,20 @@ public scannerOpenTs_args(scannerOpenTs_args other) {
                 }
                 this.timestamp = other.timestamp;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -51959,11 +56395,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public scannerOpenTs_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public scannerOpenTs_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public scannerOpenTs_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -51984,8 +56422,7 @@ public void setTableNameIsSet(boolean value) {
               }
           
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public byte[] getStartRow() {
                 setStartRow(org.apache.thrift.TBaseHelper.rightSize(startRow));
          @@ -51997,15 +56434,16 @@ public java.nio.ByteBuffer bufferForStartRow() {
               }
           
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public scannerOpenTs_args setStartRow(byte[] startRow) {
          -      this.startRow = startRow == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(startRow.clone());
          +      this.startRow = startRow == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(startRow.clone());
                 return this;
               }
           
          -    public scannerOpenTs_args setStartRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow) {
          +    public scannerOpenTs_args
          +        setStartRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow) {
                 this.startRow = org.apache.thrift.TBaseHelper.copyBinary(startRow);
                 return this;
               }
          @@ -52042,9 +56480,8 @@ public void addToColumns(java.nio.ByteBuffer elem) {
               }
           
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
               @org.apache.thrift.annotation.Nullable
               public java.util.List getColumns() {
          @@ -52052,11 +56489,11 @@ public java.util.List getColumns() {
               }
           
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
          -    public scannerOpenTs_args setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +    public scannerOpenTs_args setColumns(
          +        @org.apache.thrift.annotation.Nullable java.util.List columns) {
                 this.columns = columns;
                 return this;
               }
          @@ -52093,7 +56530,8 @@ public scannerOpenTs_args setTimestamp(long timestamp) {
               }
           
               public void unsetTimestamp() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
               }
           
               /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -52102,7 +56540,8 @@ public boolean isSetTimestamp() {
               }
           
               public void setTimestampIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -52111,7 +56550,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -52120,14 +56559,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Scan attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Scan attributes
                */
          -    public scannerOpenTs_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public scannerOpenTs_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -52147,55 +56587,56 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case START_ROW:
          -        if (value == null) {
          -          unsetStartRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setStartRow((byte[])value);
          +        case START_ROW:
          +          if (value == null) {
          +            unsetStartRow();
                     } else {
          -            setStartRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setStartRow((byte[]) value);
          +            } else {
          +              setStartRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMNS:
          -        if (value == null) {
          -          unsetColumns();
          -        } else {
          -          setColumns((java.util.List)value);
          -        }
          -        break;
          +        case COLUMNS:
          +          if (value == null) {
          +            unsetColumns();
          +          } else {
          +            setColumns((java.util.List) value);
          +          }
          +          break;
           
          -      case TIMESTAMP:
          -        if (value == null) {
          -          unsetTimestamp();
          -        } else {
          -          setTimestamp((java.lang.Long)value);
          -        }
          -        break;
          +        case TIMESTAMP:
          +          if (value == null) {
          +            unsetTimestamp();
          +          } else {
          +            setTimestamp((java.lang.Long) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -52203,102 +56644,92 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case START_ROW:
          -        return getStartRow();
          +        case START_ROW:
          +          return getStartRow();
           
          -      case COLUMNS:
          -        return getColumns();
          +        case COLUMNS:
          +          return getColumns();
           
          -      case TIMESTAMP:
          -        return getTimestamp();
          +        case TIMESTAMP:
          +          return getTimestamp();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case START_ROW:
          -        return isSetStartRow();
          -      case COLUMNS:
          -        return isSetColumns();
          -      case TIMESTAMP:
          -        return isSetTimestamp();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case START_ROW:
          +          return isSetStartRow();
          +        case COLUMNS:
          +          return isSetColumns();
          +        case TIMESTAMP:
          +          return isSetTimestamp();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof scannerOpenTs_args)
          -        return this.equals((scannerOpenTs_args)that);
          +      if (that instanceof scannerOpenTs_args) return this.equals((scannerOpenTs_args) that);
                 return false;
               }
           
               public boolean equals(scannerOpenTs_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_startRow = true && this.isSetStartRow();
                 boolean that_present_startRow = true && that.isSetStartRow();
                 if (this_present_startRow || that_present_startRow) {
          -        if (!(this_present_startRow && that_present_startRow))
          -          return false;
          -        if (!this.startRow.equals(that.startRow))
          -          return false;
          +        if (!(this_present_startRow && that_present_startRow)) return false;
          +        if (!this.startRow.equals(that.startRow)) return false;
                 }
           
                 boolean this_present_columns = true && this.isSetColumns();
                 boolean that_present_columns = true && that.isSetColumns();
                 if (this_present_columns || that_present_columns) {
          -        if (!(this_present_columns && that_present_columns))
          -          return false;
          -        if (!this.columns.equals(that.columns))
          -          return false;
          +        if (!(this_present_columns && that_present_columns)) return false;
          +        if (!this.columns.equals(that.columns)) return false;
                 }
           
                 boolean this_present_timestamp = true;
                 boolean that_present_timestamp = true;
                 if (this_present_timestamp || that_present_timestamp) {
          -        if (!(this_present_timestamp && that_present_timestamp))
          -          return false;
          -        if (this.timestamp != that.timestamp)
          -          return false;
          +        if (!(this_present_timestamp && that_present_timestamp)) return false;
          +        if (this.timestamp != that.timestamp) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -52309,22 +56740,18 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetStartRow()) ? 131071 : 524287);
          -      if (isSetStartRow())
          -        hashCode = hashCode * 8191 + startRow.hashCode();
          +      if (isSetStartRow()) hashCode = hashCode * 8191 + startRow.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -      if (isSetColumns())
          -        hashCode = hashCode * 8191 + columns.hashCode();
          +      if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -52395,11 +56822,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -52454,37 +56883,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpenTs_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenTs_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenTs_argsStandardScheme getScheme() {
                   return new scannerOpenTs_argsStandardScheme();
                 }
               }
           
          -    private static class scannerOpenTs_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpenTs_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -52492,7 +56927,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -52500,7 +56935,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.startRow = iprot.readBinary();
                           struct.setStartRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -52509,16 +56944,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_args
                           {
                             org.apache.thrift.protocol.TList _list540 = iprot.readListBegin();
                             struct.columns = new java.util.ArrayList(_list540.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem541;
          -                  for (int _i542 = 0; _i542 < _list540.size; ++_i542)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem541;
          +                  for (int _i542 = 0; _i542 < _list540.size; ++_i542) {
                               _elem541 = iprot.readBinary();
                               struct.columns.add(_elem541);
                             }
                             iprot.readListEnd();
                           }
                           struct.setColumnsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -52526,7 +56961,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.timestamp = iprot.readI64();
                           struct.setTimestampIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -52534,11 +56969,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map543 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map543.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key544;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val545;
          -                  for (int _i546 = 0; _i546 < _map543.size; ++_i546)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map543.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key544;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val545;
          +                  for (int _i546 = 0; _i546 < _map543.size; ++_i546) {
                               _key544 = iprot.readBinary();
                               _val545 = iprot.readBinary();
                               struct.attributes.put(_key544, _val545);
          @@ -52546,7 +56984,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_args
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -52557,11 +56995,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenTs_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenTs_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -52578,9 +57018,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenTs_args
                   if (struct.columns != null) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          -            for (java.nio.ByteBuffer _iter547 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          +            for (java.nio.ByteBuffer _iter547 : struct.columns) {
                         oprot.writeBinary(_iter547);
                       }
                       oprot.writeListEnd();
          @@ -52593,9 +57033,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenTs_args
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter548 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter548 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter548.getKey());
                         oprot.writeBinary(_iter548.getValue());
                       }
          @@ -52609,17 +57051,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenTs_args
           
               }
           
          -    private static class scannerOpenTs_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenTs_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenTs_argsTupleScheme getScheme() {
                   return new scannerOpenTs_argsTupleScheme();
                 }
               }
           
          -    private static class scannerOpenTs_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpenTs_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -52646,8 +57092,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args
                   if (struct.isSetColumns()) {
                     {
                       oprot.writeI32(struct.columns.size());
          -            for (java.nio.ByteBuffer _iter549 : struct.columns)
          -            {
          +            for (java.nio.ByteBuffer _iter549 : struct.columns) {
                         oprot.writeBinary(_iter549);
                       }
                     }
          @@ -52658,8 +57103,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter550 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter550 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter550.getKey());
                         oprot.writeBinary(_iter550.getValue());
                       }
          @@ -52668,8 +57113,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(5);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -52681,11 +57128,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args s
                   }
                   if (incoming.get(2)) {
                     {
          -            org.apache.thrift.protocol.TList _list551 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list551 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.columns = new java.util.ArrayList(_list551.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem552;
          -            for (int _i553 = 0; _i553 < _list551.size; ++_i553)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem552;
          +            for (int _i553 = 0; _i553 < _list551.size; ++_i553) {
                         _elem552 = iprot.readBinary();
                         struct.columns.add(_elem552);
                       }
          @@ -52698,12 +57146,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args s
                   }
                   if (incoming.get(4)) {
                     {
          -            org.apache.thrift.protocol.TMap _map554 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map554.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key555;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val556;
          -            for (int _i557 = 0; _i557 < _map554.size; ++_i557)
          -            {
          +            org.apache.thrift.protocol.TMap _map554 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map554.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key555;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val556;
          +            for (int _i557 = 0; _i557 < _map554.size; ++_i557) {
                         _key555 = iprot.readBinary();
                         _val556 = iprot.readBinary();
                         struct.attributes.put(_key555, _val556);
          @@ -52714,29 +57165,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_args s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpenTs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpenTs_result");
          +  public static class scannerOpenTs_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpenTs_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpenTs_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpenTs_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpenTs_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpenTs_resultTupleSchemeFactory();
           
               public int success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -52749,7 +57215,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -52760,12 +57226,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -52799,22 +57265,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32          , "ScannerID")));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32,
          +                "ScannerID")));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenTs_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenTs_result.class,
          +        metaDataMap);
               }
           
               public scannerOpenTs_result() {
               }
           
          -    public scannerOpenTs_result(
          -      int success,
          -      IOError io)
          -    {
          +    public scannerOpenTs_result(int success, IOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -52854,7 +57325,8 @@ public scannerOpenTs_result setSuccess(int success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -52863,7 +57335,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -52891,23 +57364,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Integer)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Integer) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -52915,60 +57389,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof scannerOpenTs_result)
          -        return this.equals((scannerOpenTs_result)that);
          +      if (that instanceof scannerOpenTs_result) return this.equals((scannerOpenTs_result) that);
                 return false;
               }
           
               public boolean equals(scannerOpenTs_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -52981,8 +57451,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + success;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -53023,13 +57492,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -53058,37 +57529,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpenTs_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenTs_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenTs_resultStandardScheme getScheme() {
                   return new scannerOpenTs_resultStandardScheme();
                 }
               }
           
          -    private static class scannerOpenTs_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpenTs_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -53096,7 +57573,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_resul
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.success = iprot.readI32();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -53105,7 +57582,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_resul
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -53116,11 +57593,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenTs_resul
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenTs_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenTs_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -53140,17 +57619,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenTs_resu
           
               }
           
          -    private static class scannerOpenTs_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenTs_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenTs_resultTupleScheme getScheme() {
                   return new scannerOpenTs_resultTupleScheme();
                 }
               }
           
          -    private static class scannerOpenTs_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpenTs_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -53168,8 +57651,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_resul
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readI32();
          @@ -53183,42 +57668,59 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenTs_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpenWithStopTs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpenWithStopTs_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField STOP_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("stopRow", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)4);
          -    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)5);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)6);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpenWithStopTs_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpenWithStopTs_argsTupleSchemeFactory();
          +  public static class scannerOpenWithStopTs_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpenWithStopTs_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField STOP_ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("stopRow", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +            (short) 5);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 6);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpenWithStopTs_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpenWithStopTs_argsTupleSchemeFactory();
           
               /**
                * name of table
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow; // required
               /**
          -     * row to stop scanning on. This row is *not* included in the
          -     * scanner's results
          +     * row to stop scanning on. This row is *not* included in the scanner's results
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer stopRow; // required
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
               public @org.apache.thrift.annotation.Nullable java.util.List columns; // required
               /**
          @@ -53228,40 +57730,41 @@ public static class scannerOpenWithStopTs_args implements org.apache.thrift.TBas
               /**
                * Scan attributes
                */
          -    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
          -       * Starting row in table to scan.
          -       * Send "" (empty string) to start at the first row.
          +       * Starting row in table to scan. Send "" (empty string) to start at the first row.
                  */
          -      START_ROW((short)2, "startRow"),
          +      START_ROW((short) 2, "startRow"),
                 /**
          -       * row to stop scanning on. This row is *not* included in the
          -       * scanner's results
          +       * row to stop scanning on. This row is *not* included in the scanner's results
                  */
          -      STOP_ROW((short)3, "stopRow"),
          +      STOP_ROW((short) 3, "stopRow"),
                 /**
          -       * columns to scan. If column name is a column family, all
          -       * columns of the specified column family are returned. It's also possible
          -       * to pass a regex in the column qualifier.
          +       * columns to scan. If column name is a column family, all columns of the specified column
          +       * family are returned. It's also possible to pass a regex in the column qualifier.
                  */
          -      COLUMNS((short)4, "columns"),
          +      COLUMNS((short) 4, "columns"),
                 /**
                  * timestamp
                  */
          -      TIMESTAMP((short)5, "timestamp"),
          +      TIMESTAMP((short) 5, "timestamp"),
                 /**
                  * Scan attributes
                  */
          -      ATTRIBUTES((short)6, "attributes");
          +      ATTRIBUTES((short) 6, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -53274,7 +57777,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // START_ROW
          @@ -53293,12 +57796,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -53332,37 +57835,53 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.START_ROW, new org.apache.thrift.meta_data.FieldMetaData("startRow", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.STOP_ROW, new org.apache.thrift.meta_data.FieldMetaData("stopRow", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          -      tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.START_ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("startRow",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.STOP_ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("stopRow",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMNS,
          +        new org.apache.thrift.meta_data.FieldMetaData("columns",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
          +      tmpMap.put(_Fields.TIMESTAMP,
          +        new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I64)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenWithStopTs_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(scannerOpenWithStopTs_args.class, metaDataMap);
               }
           
               public scannerOpenWithStopTs_args() {
               }
           
          -    public scannerOpenWithStopTs_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer startRow,
          -      java.nio.ByteBuffer stopRow,
          -      java.util.List columns,
          -      long timestamp,
          -      java.util.Map attributes)
          -    {
          +    public scannerOpenWithStopTs_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer startRow,
          +        java.nio.ByteBuffer stopRow, java.util.List columns, long timestamp,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.startRow = org.apache.thrift.TBaseHelper.copyBinary(startRow);
          @@ -53388,7 +57907,8 @@ public scannerOpenWithStopTs_args(scannerOpenWithStopTs_args other) {
                   this.stopRow = org.apache.thrift.TBaseHelper.copyBinary(other.stopRow);
                 }
                 if (other.isSetColumns()) {
          -        java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +        java.util.List __this__columns =
          +            new java.util.ArrayList(other.columns.size());
                   for (java.nio.ByteBuffer other_element : other.columns) {
                     __this__columns.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                   }
          @@ -53396,15 +57916,20 @@ public scannerOpenWithStopTs_args(scannerOpenWithStopTs_args other) {
                 }
                 this.timestamp = other.timestamp;
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -53443,11 +57968,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public scannerOpenWithStopTs_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public scannerOpenWithStopTs_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public scannerOpenWithStopTs_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -53468,8 +57995,7 @@ public void setTableNameIsSet(boolean value) {
               }
           
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public byte[] getStartRow() {
                 setStartRow(org.apache.thrift.TBaseHelper.rightSize(startRow));
          @@ -53481,15 +58007,16 @@ public java.nio.ByteBuffer bufferForStartRow() {
               }
           
               /**
          -     * Starting row in table to scan.
          -     * Send "" (empty string) to start at the first row.
          +     * Starting row in table to scan. Send "" (empty string) to start at the first row.
                */
               public scannerOpenWithStopTs_args setStartRow(byte[] startRow) {
          -      this.startRow = startRow == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(startRow.clone());
          +      this.startRow = startRow == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(startRow.clone());
                 return this;
               }
           
          -    public scannerOpenWithStopTs_args setStartRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow) {
          +    public scannerOpenWithStopTs_args
          +        setStartRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow) {
                 this.startRow = org.apache.thrift.TBaseHelper.copyBinary(startRow);
                 return this;
               }
          @@ -53510,8 +58037,7 @@ public void setStartRowIsSet(boolean value) {
               }
           
               /**
          -     * row to stop scanning on. This row is *not* included in the
          -     * scanner's results
          +     * row to stop scanning on. This row is *not* included in the scanner's results
                */
               public byte[] getStopRow() {
                 setStopRow(org.apache.thrift.TBaseHelper.rightSize(stopRow));
          @@ -53523,15 +58049,16 @@ public java.nio.ByteBuffer bufferForStopRow() {
               }
           
               /**
          -     * row to stop scanning on. This row is *not* included in the
          -     * scanner's results
          +     * row to stop scanning on. This row is *not* included in the scanner's results
                */
               public scannerOpenWithStopTs_args setStopRow(byte[] stopRow) {
          -      this.stopRow = stopRow == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(stopRow.clone());
          +      this.stopRow =
          +          stopRow == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(stopRow.clone());
                 return this;
               }
           
          -    public scannerOpenWithStopTs_args setStopRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer stopRow) {
          +    public scannerOpenWithStopTs_args
          +        setStopRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer stopRow) {
                 this.stopRow = org.apache.thrift.TBaseHelper.copyBinary(stopRow);
                 return this;
               }
          @@ -53568,9 +58095,8 @@ public void addToColumns(java.nio.ByteBuffer elem) {
               }
           
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
               @org.apache.thrift.annotation.Nullable
               public java.util.List getColumns() {
          @@ -53578,11 +58104,11 @@ public java.util.List getColumns() {
               }
           
               /**
          -     * columns to scan. If column name is a column family, all
          -     * columns of the specified column family are returned. It's also possible
          -     * to pass a regex in the column qualifier.
          +     * columns to scan. If column name is a column family, all columns of the specified column
          +     * family are returned. It's also possible to pass a regex in the column qualifier.
                */
          -    public scannerOpenWithStopTs_args setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +    public scannerOpenWithStopTs_args setColumns(
          +        @org.apache.thrift.annotation.Nullable java.util.List columns) {
                 this.columns = columns;
                 return this;
               }
          @@ -53619,7 +58145,8 @@ public scannerOpenWithStopTs_args setTimestamp(long timestamp) {
               }
           
               public void unsetTimestamp() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
               }
           
               /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -53628,7 +58155,8 @@ public boolean isSetTimestamp() {
               }
           
               public void setTimestampIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
               }
           
               public int getAttributesSize() {
          @@ -53637,7 +58165,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -53646,14 +58174,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Scan attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Scan attributes
                */
          -    public scannerOpenWithStopTs_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public scannerOpenWithStopTs_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -53673,67 +58202,68 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case START_ROW:
          -        if (value == null) {
          -          unsetStartRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setStartRow((byte[])value);
          +        case START_ROW:
          +          if (value == null) {
          +            unsetStartRow();
                     } else {
          -            setStartRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setStartRow((byte[]) value);
          +            } else {
          +              setStartRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case STOP_ROW:
          -        if (value == null) {
          -          unsetStopRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setStopRow((byte[])value);
          +        case STOP_ROW:
          +          if (value == null) {
          +            unsetStopRow();
                     } else {
          -            setStopRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setStopRow((byte[]) value);
          +            } else {
          +              setStopRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMNS:
          -        if (value == null) {
          -          unsetColumns();
          -        } else {
          -          setColumns((java.util.List)value);
          -        }
          -        break;
          +        case COLUMNS:
          +          if (value == null) {
          +            unsetColumns();
          +          } else {
          +            setColumns((java.util.List) value);
          +          }
          +          break;
           
          -      case TIMESTAMP:
          -        if (value == null) {
          -          unsetTimestamp();
          -        } else {
          -          setTimestamp((java.lang.Long)value);
          -        }
          -        break;
          +        case TIMESTAMP:
          +          if (value == null) {
          +            unsetTimestamp();
          +          } else {
          +            setTimestamp((java.lang.Long) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -53741,47 +58271,50 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case START_ROW:
          -        return getStartRow();
          +        case START_ROW:
          +          return getStartRow();
           
          -      case STOP_ROW:
          -        return getStopRow();
          +        case STOP_ROW:
          +          return getStopRow();
           
          -      case COLUMNS:
          -        return getColumns();
          +        case COLUMNS:
          +          return getColumns();
           
          -      case TIMESTAMP:
          -        return getTimestamp();
          +        case TIMESTAMP:
          +          return getTimestamp();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case START_ROW:
          -        return isSetStartRow();
          -      case STOP_ROW:
          -        return isSetStopRow();
          -      case COLUMNS:
          -        return isSetColumns();
          -      case TIMESTAMP:
          -        return isSetTimestamp();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case START_ROW:
          +          return isSetStartRow();
          +        case STOP_ROW:
          +          return isSetStopRow();
          +        case COLUMNS:
          +          return isSetColumns();
          +        case TIMESTAMP:
          +          return isSetTimestamp();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -53789,68 +58322,54 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof scannerOpenWithStopTs_args)
          -        return this.equals((scannerOpenWithStopTs_args)that);
          +        return this.equals((scannerOpenWithStopTs_args) that);
                 return false;
               }
           
               public boolean equals(scannerOpenWithStopTs_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_startRow = true && this.isSetStartRow();
                 boolean that_present_startRow = true && that.isSetStartRow();
                 if (this_present_startRow || that_present_startRow) {
          -        if (!(this_present_startRow && that_present_startRow))
          -          return false;
          -        if (!this.startRow.equals(that.startRow))
          -          return false;
          +        if (!(this_present_startRow && that_present_startRow)) return false;
          +        if (!this.startRow.equals(that.startRow)) return false;
                 }
           
                 boolean this_present_stopRow = true && this.isSetStopRow();
                 boolean that_present_stopRow = true && that.isSetStopRow();
                 if (this_present_stopRow || that_present_stopRow) {
          -        if (!(this_present_stopRow && that_present_stopRow))
          -          return false;
          -        if (!this.stopRow.equals(that.stopRow))
          -          return false;
          +        if (!(this_present_stopRow && that_present_stopRow)) return false;
          +        if (!this.stopRow.equals(that.stopRow)) return false;
                 }
           
                 boolean this_present_columns = true && this.isSetColumns();
                 boolean that_present_columns = true && that.isSetColumns();
                 if (this_present_columns || that_present_columns) {
          -        if (!(this_present_columns && that_present_columns))
          -          return false;
          -        if (!this.columns.equals(that.columns))
          -          return false;
          +        if (!(this_present_columns && that_present_columns)) return false;
          +        if (!this.columns.equals(that.columns)) return false;
                 }
           
                 boolean this_present_timestamp = true;
                 boolean that_present_timestamp = true;
                 if (this_present_timestamp || that_present_timestamp) {
          -        if (!(this_present_timestamp && that_present_timestamp))
          -          return false;
          -        if (this.timestamp != that.timestamp)
          -          return false;
          +        if (!(this_present_timestamp && that_present_timestamp)) return false;
          +        if (this.timestamp != that.timestamp) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -53861,26 +58380,21 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetStartRow()) ? 131071 : 524287);
          -      if (isSetStartRow())
          -        hashCode = hashCode * 8191 + startRow.hashCode();
          +      if (isSetStartRow()) hashCode = hashCode * 8191 + startRow.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetStopRow()) ? 131071 : 524287);
          -      if (isSetStopRow())
          -        hashCode = hashCode * 8191 + stopRow.hashCode();
          +      if (isSetStopRow()) hashCode = hashCode * 8191 + stopRow.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -      if (isSetColumns())
          -        hashCode = hashCode * 8191 + columns.hashCode();
          +      if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -53961,11 +58475,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -54028,37 +58544,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpenWithStopTs_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithStopTs_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithStopTs_argsStandardScheme getScheme() {
                   return new scannerOpenWithStopTs_argsStandardScheme();
                 }
               }
           
          -    private static class scannerOpenWithStopTs_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpenWithStopTs_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStopTs_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          scannerOpenWithStopTs_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -54066,7 +58588,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -54074,7 +58596,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.startRow = iprot.readBinary();
                           struct.setStartRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -54082,7 +58604,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.stopRow = iprot.readBinary();
                           struct.setStopRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -54091,16 +58613,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                           {
                             org.apache.thrift.protocol.TList _list558 = iprot.readListBegin();
                             struct.columns = new java.util.ArrayList(_list558.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem559;
          -                  for (int _i560 = 0; _i560 < _list558.size; ++_i560)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem559;
          +                  for (int _i560 = 0; _i560 < _list558.size; ++_i560) {
                               _elem559 = iprot.readBinary();
                               struct.columns.add(_elem559);
                             }
                             iprot.readListEnd();
                           }
                           struct.setColumnsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -54108,7 +58630,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                         if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                           struct.timestamp = iprot.readI64();
                           struct.setTimestampIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -54116,11 +58638,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map561 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map561.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key562;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val563;
          -                  for (int _i564 = 0; _i564 < _map561.size; ++_i564)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map561.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key562;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val563;
          +                  for (int _i564 = 0; _i564 < _map561.size; ++_i564) {
                               _key562 = iprot.readBinary();
                               _val563 = iprot.readBinary();
                               struct.attributes.put(_key562, _val563);
          @@ -54128,7 +58653,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -54139,11 +58664,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithStopTs_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          scannerOpenWithStopTs_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -54165,9 +58692,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto
                   if (struct.columns != null) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          -            for (java.nio.ByteBuffer _iter565 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          +            for (java.nio.ByteBuffer _iter565 : struct.columns) {
                         oprot.writeBinary(_iter565);
                       }
                       oprot.writeListEnd();
          @@ -54180,9 +58707,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter566 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter566 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter566.getKey());
                         oprot.writeBinary(_iter566.getValue());
                       }
          @@ -54196,17 +58725,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto
           
               }
           
          -    private static class scannerOpenWithStopTs_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithStopTs_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithStopTs_argsTupleScheme getScheme() {
                   return new scannerOpenWithStopTs_argsTupleScheme();
                 }
               }
           
          -    private static class scannerOpenWithStopTs_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpenWithStopTs_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStopTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          scannerOpenWithStopTs_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -54239,8 +58772,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop
                   if (struct.isSetColumns()) {
                     {
                       oprot.writeI32(struct.columns.size());
          -            for (java.nio.ByteBuffer _iter567 : struct.columns)
          -            {
          +            for (java.nio.ByteBuffer _iter567 : struct.columns) {
                         oprot.writeBinary(_iter567);
                       }
                     }
          @@ -54251,8 +58783,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter568 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter568 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter568.getKey());
                         oprot.writeBinary(_iter568.getValue());
                       }
          @@ -54261,8 +58793,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStopTs_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStopTs_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(6);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -54278,11 +58812,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStopT
                   }
                   if (incoming.get(3)) {
                     {
          -            org.apache.thrift.protocol.TList _list569 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list569 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.columns = new java.util.ArrayList(_list569.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem570;
          -            for (int _i571 = 0; _i571 < _list569.size; ++_i571)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem570;
          +            for (int _i571 = 0; _i571 < _list569.size; ++_i571) {
                         _elem570 = iprot.readBinary();
                         struct.columns.add(_elem570);
                       }
          @@ -54295,12 +58830,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStopT
                   }
                   if (incoming.get(5)) {
                     {
          -            org.apache.thrift.protocol.TMap _map572 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map572.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key573;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val574;
          -            for (int _i575 = 0; _i575 < _map572.size; ++_i575)
          -            {
          +            org.apache.thrift.protocol.TMap _map572 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map572.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key573;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val574;
          +            for (int _i575 = 0; _i575 < _map572.size; ++_i575) {
                         _key573 = iprot.readBinary();
                         _val574 = iprot.readBinary();
                         struct.attributes.put(_key573, _val574);
          @@ -54311,29 +58849,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStopT
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerOpenWithStopTs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerOpenWithStopTs_result");
          +  public static class scannerOpenWithStopTs_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerOpenWithStopTs_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerOpenWithStopTs_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerOpenWithStopTs_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerOpenWithStopTs_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerOpenWithStopTs_resultTupleSchemeFactory();
           
               public int success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -54346,7 +58899,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -54357,12 +58910,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -54396,22 +58949,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32          , "ScannerID")));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32,
          +                "ScannerID")));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerOpenWithStopTs_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(scannerOpenWithStopTs_result.class, metaDataMap);
               }
           
               public scannerOpenWithStopTs_result() {
               }
           
          -    public scannerOpenWithStopTs_result(
          -      int success,
          -      IOError io)
          -    {
          +    public scannerOpenWithStopTs_result(int success, IOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -54451,7 +59009,8 @@ public scannerOpenWithStopTs_result setSuccess(int success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -54460,7 +59019,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -54488,23 +59048,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Integer)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Integer) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -54512,27 +59073,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -54540,32 +59104,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof scannerOpenWithStopTs_result)
          -        return this.equals((scannerOpenWithStopTs_result)that);
          +        return this.equals((scannerOpenWithStopTs_result) that);
                 return false;
               }
           
               public boolean equals(scannerOpenWithStopTs_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -54578,8 +59136,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + success;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -54620,13 +59177,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -54655,37 +59214,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerOpenWithStopTs_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithStopTs_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithStopTs_resultStandardScheme getScheme() {
                   return new scannerOpenWithStopTs_resultStandardScheme();
                 }
               }
           
          -    private static class scannerOpenWithStopTs_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerOpenWithStopTs_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStopTs_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          scannerOpenWithStopTs_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -54693,7 +59258,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.success = iprot.readI32();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -54702,7 +59267,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -54713,11 +59278,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerOpenWithStop
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithStopTs_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          scannerOpenWithStopTs_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -54737,17 +59304,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerOpenWithSto
           
               }
           
          -    private static class scannerOpenWithStopTs_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerOpenWithStopTs_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerOpenWithStopTs_resultTupleScheme getScheme() {
                   return new scannerOpenWithStopTs_resultTupleScheme();
                 }
               }
           
          -    private static class scannerOpenWithStopTs_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerOpenWithStopTs_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStopTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          scannerOpenWithStopTs_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -54765,8 +59336,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStop
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStopTs_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          scannerOpenWithStopTs_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readI32();
          @@ -54780,32 +59353,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerOpenWithStopT
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerGet_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerGet_args");
          +  public static class scannerGet_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerGet_args");
           
          -    private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I32, (short)1);
          +    private static final org.apache.thrift.protocol.TField ID_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I32,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerGet_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerGet_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerGet_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerGet_argsTupleSchemeFactory();
           
               /**
                * id of a scanner returned by scannerOpen
                */
               public int id; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * id of a scanner returned by scannerOpen
                  */
          -      ID((short)1, "id");
          +      ID((short) 1, "id");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -54818,7 +59405,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // ID
                       return ID;
                     default:
          @@ -54827,12 +59414,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -54866,19 +59453,22 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32          , "ScannerID")));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.ID,
          +        new org.apache.thrift.meta_data.FieldMetaData("id",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32,
          +                "ScannerID")));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerGet_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerGet_args.class,
          +        metaDataMap);
               }
           
               public scannerGet_args() {
               }
           
          -    public scannerGet_args(
          -      int id)
          -    {
          +    public scannerGet_args(int id) {
                 this();
                 this.id = id;
                 setIdIsSet(true);
          @@ -54928,18 +59518,20 @@ public boolean isSetId() {
               }
           
               public void setIdIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case ID:
          -        if (value == null) {
          -          unsetId();
          -        } else {
          -          setId((java.lang.Integer)value);
          -        }
          -        break;
          +        case ID:
          +          if (value == null) {
          +            unsetId();
          +          } else {
          +            setId((java.lang.Integer) value);
          +          }
          +          break;
           
                 }
               }
          @@ -54947,46 +59539,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case ID:
          -        return getId();
          +        case ID:
          +          return getId();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case ID:
          -        return isSetId();
          +        case ID:
          +          return isSetId();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof scannerGet_args)
          -        return this.equals((scannerGet_args)that);
          +      if (that instanceof scannerGet_args) return this.equals((scannerGet_args) that);
                 return false;
               }
           
               public boolean equals(scannerGet_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_id = true;
                 boolean that_present_id = true;
                 if (this_present_id || that_present_id) {
          -        if (!(this_present_id && that_present_id))
          -          return false;
          -        if (this.id != that.id)
          -          return false;
          +        if (!(this_present_id && that_present_id)) return false;
          +        if (this.id != that.id) return false;
                 }
           
                 return true;
          @@ -55027,11 +59617,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -55054,37 +59646,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerGet_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerGet_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerGet_argsStandardScheme getScheme() {
                   return new scannerGet_argsStandardScheme();
                 }
               }
           
          -    private static class scannerGet_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerGet_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -55092,7 +59690,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_args str
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.id = iprot.readI32();
                           struct.setIdIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -55103,11 +59701,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_args str
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGet_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGet_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -55120,17 +59720,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGet_args st
           
               }
           
          -    private static class scannerGet_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerGet_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerGet_argsTupleScheme getScheme() {
                   return new scannerGet_argsTupleScheme();
                 }
               }
           
          -    private static class scannerGet_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerGet_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerGet_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerGet_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetId()) {
                     optionals.set(0);
          @@ -55142,8 +59746,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerGet_args str
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerGet_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerGet_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.id = iprot.readI32();
          @@ -55152,32 +59758,48 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerGet_args stru
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerGet_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerGet_result");
          -
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerGet_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerGet_resultTupleSchemeFactory();
          +  public static class scannerGet_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerGet_result");
          +
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerGet_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerGet_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
               public @org.apache.thrift.annotation.Nullable IllegalArgument ia; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io"),
          -      IA((short)2, "ia");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io"), IA((short) 2, "ia");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -55190,7 +59812,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -55203,12 +59825,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -55240,26 +59862,33 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IllegalArgument.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IllegalArgument.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerGet_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerGet_result.class,
          +        metaDataMap);
               }
           
               public scannerGet_result() {
               }
           
          -    public scannerGet_result(
          -      java.util.List success,
          -      IOError io,
          -      IllegalArgument ia)
          -    {
          +    public scannerGet_result(java.util.List success, IOError io, IllegalArgument ia) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -55271,7 +59900,8 @@ public scannerGet_result(
                */
               public scannerGet_result(scannerGet_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TRowResult other_element : other.success) {
                     __this__success.add(new TRowResult(other_element));
                   }
          @@ -55317,7 +59947,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public scannerGet_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public scannerGet_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -55387,31 +60018,32 @@ public void setIaIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((IllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((IllegalArgument) value);
          +          }
          +          break;
           
                 }
               }
          @@ -55419,74 +60051,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof scannerGet_result)
          -        return this.equals((scannerGet_result)that);
          +      if (that instanceof scannerGet_result) return this.equals((scannerGet_result) that);
                 return false;
               }
           
               public boolean equals(scannerGet_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 return true;
          @@ -55497,16 +60123,13 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 return hashCode;
               }
          @@ -55557,13 +60180,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -55604,35 +60229,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerGet_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerGet_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerGet_resultStandardScheme getScheme() {
                   return new scannerGet_resultStandardScheme();
                 }
               }
           
          -    private static class scannerGet_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerGet_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -55641,9 +60271,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_result s
                           {
                             org.apache.thrift.protocol.TList _list576 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list576.size);
          -                  @org.apache.thrift.annotation.Nullable TRowResult _elem577;
          -                  for (int _i578 = 0; _i578 < _list576.size; ++_i578)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TRowResult _elem577;
          +                  for (int _i578 = 0; _i578 < _list576.size; ++_i578) {
                               _elem577 = new TRowResult();
                               _elem577.read(iprot);
                               struct.success.add(_elem577);
          @@ -55651,7 +60281,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_result s
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -55660,7 +60290,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_result s
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -55669,7 +60299,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_result s
                           struct.ia = new IllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -55680,20 +60310,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGet_result s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGet_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGet_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TRowResult _iter579 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TRowResult _iter579 : struct.success) {
                         _iter579.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -55716,17 +60348,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGet_result
           
               }
           
          -    private static class scannerGet_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerGet_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerGet_resultTupleScheme getScheme() {
                   return new scannerGet_resultTupleScheme();
                 }
               }
           
          -    private static class scannerGet_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerGet_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerGet_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerGet_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -55741,8 +60377,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerGet_result s
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TRowResult _iter580 : struct.success)
          -            {
          +            for (TRowResult _iter580 : struct.success) {
                         _iter580.write(oprot);
                       }
                     }
          @@ -55756,16 +60391,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerGet_result s
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerGet_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerGet_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(3);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list581 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list581 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list581.size);
          -            @org.apache.thrift.annotation.Nullable TRowResult _elem582;
          -            for (int _i583 = 0; _i583 < _list581.size; ++_i583)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TRowResult _elem582;
          +            for (int _i583 = 0; _i583 < _list581.size; ++_i583) {
                         _elem582 = new TRowResult();
                         _elem582.read(iprot);
                         struct.success.add(_elem582);
          @@ -55786,19 +60424,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerGet_result st
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerGetList_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerGetList_args");
          +  public static class scannerGetList_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerGetList_args");
           
          -    private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I32, (short)1);
          -    private static final org.apache.thrift.protocol.TField NB_ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("nbRows", org.apache.thrift.protocol.TType.I32, (short)2);
          +    private static final org.apache.thrift.protocol.TField ID_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I32,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField NB_ROWS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("nbRows", org.apache.thrift.protocol.TType.I32,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerGetList_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerGetList_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerGetList_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerGetList_argsTupleSchemeFactory();
           
               /**
                * id of a scanner returned by scannerOpen
          @@ -55809,18 +60459,22 @@ public static class scannerGetList_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -55833,7 +60487,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // ID
                       return ID;
                     case 2: // NB_ROWS
          @@ -55844,12 +60498,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -55884,22 +60538,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32          , "ScannerID")));
          -      tmpMap.put(_Fields.NB_ROWS, new org.apache.thrift.meta_data.FieldMetaData("nbRows", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.ID,
          +        new org.apache.thrift.meta_data.FieldMetaData("id",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32,
          +                "ScannerID")));
          +      tmpMap.put(_Fields.NB_ROWS,
          +        new org.apache.thrift.meta_data.FieldMetaData("nbRows",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I32)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerGetList_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerGetList_args.class,
          +        metaDataMap);
               }
           
               public scannerGetList_args() {
               }
           
          -    public scannerGetList_args(
          -      int id,
          -      int nbRows)
          -    {
          +    public scannerGetList_args(int id, int nbRows) {
                 this();
                 this.id = id;
                 setIdIsSet(true);
          @@ -55954,7 +60613,8 @@ public boolean isSetId() {
               }
           
               public void setIdIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
               }
           
               /**
          @@ -55974,7 +60634,8 @@ public scannerGetList_args setNbRows(int nbRows) {
               }
           
               public void unsetNbRows() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NBROWS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NBROWS_ISSET_ID);
               }
           
               /** Returns true if field nbRows is set (has been assigned a value) and false otherwise */
          @@ -55983,26 +60644,28 @@ public boolean isSetNbRows() {
               }
           
               public void setNbRowsIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NBROWS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NBROWS_ISSET_ID, value);
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case ID:
          -        if (value == null) {
          -          unsetId();
          -        } else {
          -          setId((java.lang.Integer)value);
          -        }
          -        break;
          +        case ID:
          +          if (value == null) {
          +            unsetId();
          +          } else {
          +            setId((java.lang.Integer) value);
          +          }
          +          break;
           
          -      case NB_ROWS:
          -        if (value == null) {
          -          unsetNbRows();
          -        } else {
          -          setNbRows((java.lang.Integer)value);
          -        }
          -        break;
          +        case NB_ROWS:
          +          if (value == null) {
          +            unsetNbRows();
          +          } else {
          +            setNbRows((java.lang.Integer) value);
          +          }
          +          break;
           
                 }
               }
          @@ -56010,60 +60673,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case ID:
          -        return getId();
          +        case ID:
          +          return getId();
           
          -      case NB_ROWS:
          -        return getNbRows();
          +        case NB_ROWS:
          +          return getNbRows();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case ID:
          -        return isSetId();
          -      case NB_ROWS:
          -        return isSetNbRows();
          +        case ID:
          +          return isSetId();
          +        case NB_ROWS:
          +          return isSetNbRows();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof scannerGetList_args)
          -        return this.equals((scannerGetList_args)that);
          +      if (that instanceof scannerGetList_args) return this.equals((scannerGetList_args) that);
                 return false;
               }
           
               public boolean equals(scannerGetList_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_id = true;
                 boolean that_present_id = true;
                 if (this_present_id || that_present_id) {
          -        if (!(this_present_id && that_present_id))
          -          return false;
          -        if (this.id != that.id)
          -          return false;
          +        if (!(this_present_id && that_present_id)) return false;
          +        if (this.id != that.id) return false;
                 }
           
                 boolean this_present_nbRows = true;
                 boolean that_present_nbRows = true;
                 if (this_present_nbRows || that_present_nbRows) {
          -        if (!(this_present_nbRows && that_present_nbRows))
          -          return false;
          -        if (this.nbRows != that.nbRows)
          -          return false;
          +        if (!(this_present_nbRows && that_present_nbRows)) return false;
          +        if (this.nbRows != that.nbRows) return false;
                 }
           
                 return true;
          @@ -56116,11 +60775,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -56147,37 +60808,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerGetList_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerGetList_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerGetList_argsStandardScheme getScheme() {
                   return new scannerGetList_argsStandardScheme();
                 }
               }
           
          -    private static class scannerGetList_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerGetList_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -56185,7 +60852,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.id = iprot.readI32();
                           struct.setIdIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -56193,7 +60860,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.nbRows = iprot.readI32();
                           struct.setNbRowsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -56204,11 +60871,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGetList_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGetList_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -56224,17 +60893,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGetList_arg
           
               }
           
          -    private static class scannerGetList_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerGetList_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerGetList_argsTupleScheme getScheme() {
                   return new scannerGetList_argsTupleScheme();
                 }
               }
           
          -    private static class scannerGetList_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerGetList_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerGetList_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerGetList_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetId()) {
                     optionals.set(0);
          @@ -56252,8 +60925,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerGetList_args
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerGetList_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerGetList_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.id = iprot.readI32();
          @@ -56266,32 +60941,48 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerGetList_args
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerGetList_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerGetList_result");
          -
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerGetList_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerGetList_resultTupleSchemeFactory();
          +  public static class scannerGetList_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerGetList_result");
          +
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerGetList_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerGetList_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
               public @org.apache.thrift.annotation.Nullable IllegalArgument ia; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io"),
          -      IA((short)2, "ia");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io"), IA((short) 2, "ia");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -56304,7 +60995,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -56317,12 +61008,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -56354,26 +61045,34 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IllegalArgument.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TRowResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IllegalArgument.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerGetList_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerGetList_result.class,
          +        metaDataMap);
               }
           
               public scannerGetList_result() {
               }
           
          -    public scannerGetList_result(
          -      java.util.List success,
          -      IOError io,
          -      IllegalArgument ia)
          -    {
          +    public scannerGetList_result(java.util.List success, IOError io,
          +        IllegalArgument ia) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -56385,7 +61084,8 @@ public scannerGetList_result(
                */
               public scannerGetList_result(scannerGetList_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TRowResult other_element : other.success) {
                     __this__success.add(new TRowResult(other_element));
                   }
          @@ -56431,7 +61131,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public scannerGetList_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public scannerGetList_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -56501,31 +61202,32 @@ public void setIaIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((IllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((IllegalArgument) value);
          +          }
          +          break;
           
                 }
               }
          @@ -56533,74 +61235,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof scannerGetList_result)
          -        return this.equals((scannerGetList_result)that);
          +      if (that instanceof scannerGetList_result) return this.equals((scannerGetList_result) that);
                 return false;
               }
           
               public boolean equals(scannerGetList_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 return true;
          @@ -56611,16 +61307,13 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 return hashCode;
               }
          @@ -56671,13 +61364,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -56718,35 +61413,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerGetList_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerGetList_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerGetList_resultStandardScheme getScheme() {
                   return new scannerGetList_resultStandardScheme();
                 }
               }
           
          -    private static class scannerGetList_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerGetList_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -56755,9 +61455,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_resu
                           {
                             org.apache.thrift.protocol.TList _list584 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list584.size);
          -                  @org.apache.thrift.annotation.Nullable TRowResult _elem585;
          -                  for (int _i586 = 0; _i586 < _list584.size; ++_i586)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TRowResult _elem585;
          +                  for (int _i586 = 0; _i586 < _list584.size; ++_i586) {
                               _elem585 = new TRowResult();
                               _elem585.read(iprot);
                               struct.success.add(_elem585);
          @@ -56765,7 +61465,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_resu
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -56774,7 +61474,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_resu
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -56783,7 +61483,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_resu
                           struct.ia = new IllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -56794,20 +61494,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerGetList_resu
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGetList_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGetList_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TRowResult _iter587 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TRowResult _iter587 : struct.success) {
                         _iter587.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -56830,17 +61532,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerGetList_res
           
               }
           
          -    private static class scannerGetList_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerGetList_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerGetList_resultTupleScheme getScheme() {
                   return new scannerGetList_resultTupleScheme();
                 }
               }
           
          -    private static class scannerGetList_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerGetList_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerGetList_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerGetList_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -56855,8 +61561,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerGetList_resu
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TRowResult _iter588 : struct.success)
          -            {
          +            for (TRowResult _iter588 : struct.success) {
                         _iter588.write(oprot);
                       }
                     }
          @@ -56870,16 +61575,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerGetList_resu
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerGetList_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerGetList_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(3);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list589 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list589 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list589.size);
          -            @org.apache.thrift.annotation.Nullable TRowResult _elem590;
          -            for (int _i591 = 0; _i591 < _list589.size; ++_i591)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TRowResult _elem590;
          +            for (int _i591 = 0; _i591 < _list589.size; ++_i591) {
                         _elem590 = new TRowResult();
                         _elem590.read(iprot);
                         struct.success.add(_elem590);
          @@ -56900,32 +61608,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerGetList_resul
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerClose_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerClose_args");
          +  public static class scannerClose_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerClose_args");
           
          -    private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I32, (short)1);
          +    private static final org.apache.thrift.protocol.TField ID_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I32,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerClose_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerClose_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerClose_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerClose_argsTupleSchemeFactory();
           
               /**
                * id of a scanner returned by scannerOpen
                */
               public int id; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * id of a scanner returned by scannerOpen
                  */
          -      ID((short)1, "id");
          +      ID((short) 1, "id");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -56938,7 +61660,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // ID
                       return ID;
                     default:
          @@ -56947,12 +61669,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -56986,19 +61708,22 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32          , "ScannerID")));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.ID,
          +        new org.apache.thrift.meta_data.FieldMetaData("id",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32,
          +                "ScannerID")));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerClose_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerClose_args.class,
          +        metaDataMap);
               }
           
               public scannerClose_args() {
               }
           
          -    public scannerClose_args(
          -      int id)
          -    {
          +    public scannerClose_args(int id) {
                 this();
                 this.id = id;
                 setIdIsSet(true);
          @@ -57048,18 +61773,20 @@ public boolean isSetId() {
               }
           
               public void setIdIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case ID:
          -        if (value == null) {
          -          unsetId();
          -        } else {
          -          setId((java.lang.Integer)value);
          -        }
          -        break;
          +        case ID:
          +          if (value == null) {
          +            unsetId();
          +          } else {
          +            setId((java.lang.Integer) value);
          +          }
          +          break;
           
                 }
               }
          @@ -57067,46 +61794,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case ID:
          -        return getId();
          +        case ID:
          +          return getId();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case ID:
          -        return isSetId();
          +        case ID:
          +          return isSetId();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof scannerClose_args)
          -        return this.equals((scannerClose_args)that);
          +      if (that instanceof scannerClose_args) return this.equals((scannerClose_args) that);
                 return false;
               }
           
               public boolean equals(scannerClose_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_id = true;
                 boolean that_present_id = true;
                 if (this_present_id || that_present_id) {
          -        if (!(this_present_id && that_present_id))
          -          return false;
          -        if (this.id != that.id)
          -          return false;
          +        if (!(this_present_id && that_present_id)) return false;
          +        if (this.id != that.id) return false;
                 }
           
                 return true;
          @@ -57147,11 +61872,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -57174,37 +61901,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerClose_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerClose_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerClose_argsStandardScheme getScheme() {
                   return new scannerClose_argsStandardScheme();
                 }
               }
           
          -    private static class scannerClose_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerClose_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerClose_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerClose_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -57212,7 +61945,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerClose_args s
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.id = iprot.readI32();
                           struct.setIdIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -57223,11 +61956,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerClose_args s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerClose_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerClose_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -57240,17 +61975,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerClose_args
           
               }
           
          -    private static class scannerClose_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerClose_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerClose_argsTupleScheme getScheme() {
                   return new scannerClose_argsTupleScheme();
                 }
               }
           
          -    private static class scannerClose_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerClose_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerClose_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerClose_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetId()) {
                     optionals.set(0);
          @@ -57262,8 +62001,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerClose_args s
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerClose_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerClose_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.id = iprot.readI32();
          @@ -57272,29 +62013,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerClose_args st
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class scannerClose_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("scannerClose_result");
          +  public static class scannerClose_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("scannerClose_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new scannerClose_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new scannerClose_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new scannerClose_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new scannerClose_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable IOError io; // required
               public @org.apache.thrift.annotation.Nullable IllegalArgument ia; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io"),
          -      IA((short)2, "ia");
          +      IO((short) 1, "io"), IA((short) 2, "ia");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -57307,7 +62063,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     case 2: // IA
          @@ -57318,12 +62074,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -57355,22 +62111,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IllegalArgument.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IllegalArgument.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerClose_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(scannerClose_result.class,
          +        metaDataMap);
               }
           
               public scannerClose_result() {
               }
           
          -    public scannerClose_result(
          -      IOError io,
          -      IllegalArgument ia)
          -    {
          +    public scannerClose_result(IOError io, IllegalArgument ia) {
                 this();
                 this.io = io;
                 this.ia = ia;
          @@ -57448,23 +62209,24 @@ public void setIaIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((IllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((IllegalArgument) value);
          +          }
          +          break;
           
                 }
               }
          @@ -57472,60 +62234,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof scannerClose_result)
          -        return this.equals((scannerClose_result)that);
          +      if (that instanceof scannerClose_result) return this.equals((scannerClose_result) that);
                 return false;
               }
           
               public boolean equals(scannerClose_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 return true;
          @@ -57536,12 +62294,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 return hashCode;
               }
          @@ -57582,13 +62338,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -57621,35 +62379,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class scannerClose_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerClose_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerClose_resultStandardScheme getScheme() {
                   return new scannerClose_resultStandardScheme();
                 }
               }
           
          -    private static class scannerClose_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class scannerClose_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerClose_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, scannerClose_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -57658,7 +62421,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerClose_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -57667,7 +62430,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerClose_result
                           struct.ia = new IllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -57678,11 +62441,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, scannerClose_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerClose_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, scannerClose_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -57702,17 +62467,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, scannerClose_resul
           
               }
           
          -    private static class scannerClose_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class scannerClose_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public scannerClose_resultTupleScheme getScheme() {
                   return new scannerClose_resultTupleScheme();
                 }
               }
           
          -    private static class scannerClose_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class scannerClose_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, scannerClose_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, scannerClose_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -57730,8 +62499,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, scannerClose_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, scannerClose_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, scannerClose_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.io = new IOError();
          @@ -57746,32 +62517,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, scannerClose_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRegionInfo_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRegionInfo_args");
          +  public static class getRegionInfo_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRegionInfo_args");
           
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRegionInfo_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRegionInfo_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRegionInfo_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRegionInfo_argsTupleSchemeFactory();
           
               /**
                * row key
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * row key
                  */
          -      ROW((short)1, "row");
          +      ROW((short) 1, "row");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -57784,7 +62569,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // ROW
                       return ROW;
                     default:
          @@ -57793,12 +62578,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -57830,19 +62615,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRegionInfo_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRegionInfo_args.class,
          +        metaDataMap);
               }
           
               public getRegionInfo_args() {
               }
           
          -    public getRegionInfo_args(
          -      java.nio.ByteBuffer row)
          -    {
          +    public getRegionInfo_args(java.nio.ByteBuffer row) {
                 this();
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
               }
          @@ -57881,11 +62669,12 @@ public java.nio.ByteBuffer bufferForRow() {
                * row key
                */
               public getRegionInfo_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          -    public getRegionInfo_args setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
          +    public getRegionInfo_args
          +        setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
                 return this;
               }
          @@ -57905,19 +62694,20 @@ public void setRowIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
                 }
               }
          @@ -57925,46 +62715,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case ROW:
          -        return isSetRow();
          +        case ROW:
          +          return isSetRow();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRegionInfo_args)
          -        return this.equals((getRegionInfo_args)that);
          +      if (that instanceof getRegionInfo_args) return this.equals((getRegionInfo_args) that);
                 return false;
               }
           
               public boolean equals(getRegionInfo_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 return true;
          @@ -57975,8 +62763,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 return hashCode;
               }
          @@ -58007,11 +62794,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -58038,35 +62827,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRegionInfo_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRegionInfo_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRegionInfo_argsStandardScheme getScheme() {
                   return new getRegionInfo_argsStandardScheme();
                 }
               }
           
          -    private static class getRegionInfo_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRegionInfo_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionInfo_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionInfo_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -58074,7 +62868,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionInfo_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -58085,11 +62879,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionInfo_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionInfo_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionInfo_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -58104,17 +62900,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionInfo_args
           
               }
           
          -    private static class getRegionInfo_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRegionInfo_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRegionInfo_argsTupleScheme getScheme() {
                   return new getRegionInfo_argsTupleScheme();
                 }
               }
           
          -    private static class getRegionInfo_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRegionInfo_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetRow()) {
                     optionals.set(0);
          @@ -58126,8 +62926,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_args
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.row = iprot.readBinary();
          @@ -58136,29 +62938,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_args s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRegionInfo_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRegionInfo_result");
          +  public static class getRegionInfo_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRegionInfo_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRegionInfo_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRegionInfo_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRegionInfo_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRegionInfo_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TRegionInfo success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -58171,7 +62988,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -58182,12 +62999,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -58219,22 +63036,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRegionInfo.class)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TRegionInfo.class)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRegionInfo_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRegionInfo_result.class,
          +        metaDataMap);
               }
           
               public getRegionInfo_result() {
               }
           
          -    public getRegionInfo_result(
          -      TRegionInfo success,
          -      IOError io)
          -    {
          +    public getRegionInfo_result(TRegionInfo success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -58267,7 +63089,8 @@ public TRegionInfo getSuccess() {
                 return this.success;
               }
           
          -    public getRegionInfo_result setSuccess(@org.apache.thrift.annotation.Nullable TRegionInfo success) {
          +    public getRegionInfo_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable TRegionInfo success) {
                 this.success = success;
                 return this;
               }
          @@ -58312,23 +63135,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((TRegionInfo)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((TRegionInfo) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -58336,60 +63160,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRegionInfo_result)
          -        return this.equals((getRegionInfo_result)that);
          +      if (that instanceof getRegionInfo_result) return this.equals((getRegionInfo_result) that);
                 return false;
               }
           
               public boolean equals(getRegionInfo_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -58400,12 +63220,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -58446,13 +63264,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -58488,35 +63308,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRegionInfo_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRegionInfo_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRegionInfo_resultStandardScheme getScheme() {
                   return new getRegionInfo_resultStandardScheme();
                 }
               }
           
          -    private static class getRegionInfo_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRegionInfo_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionInfo_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionInfo_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -58525,7 +63350,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionInfo_resul
                           struct.success = new TRegionInfo();
                           struct.success.read(iprot);
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -58534,7 +63359,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionInfo_resul
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -58545,11 +63370,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionInfo_resul
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionInfo_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionInfo_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -58569,17 +63396,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionInfo_resu
           
               }
           
          -    private static class getRegionInfo_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRegionInfo_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRegionInfo_resultTupleScheme getScheme() {
                   return new getRegionInfo_resultTupleScheme();
                 }
               }
           
          -    private static class getRegionInfo_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRegionInfo_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -58597,8 +63428,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_resul
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = new TRegionInfo();
          @@ -58613,32 +63446,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRegionInfo_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class append_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("append_args");
          +  public static class append_args
          +      implements org.apache.thrift.TBase, java.io.Serializable,
          +      Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("append_args");
           
          -    private static final org.apache.thrift.protocol.TField APPEND_FIELD_DESC = new org.apache.thrift.protocol.TField("append", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField APPEND_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("append", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new append_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new append_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new append_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new append_argsTupleSchemeFactory();
           
               /**
                * The single append operation to apply
                */
               public @org.apache.thrift.annotation.Nullable TAppend append; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * The single append operation to apply
                  */
          -      APPEND((short)1, "append");
          +      APPEND((short) 1, "append");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -58651,7 +63498,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // APPEND
                       return APPEND;
                     default:
          @@ -58660,12 +63507,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -58697,19 +63544,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.APPEND, new org.apache.thrift.meta_data.FieldMetaData("append", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TAppend.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.APPEND,
          +        new org.apache.thrift.meta_data.FieldMetaData("append",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TAppend.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_args.class,
          +        metaDataMap);
               }
           
               public append_args() {
               }
           
          -    public append_args(
          -      TAppend append)
          -    {
          +    public append_args(TAppend append) {
                 this();
                 this.append = append;
               }
          @@ -58763,15 +63613,16 @@ public void setAppendIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case APPEND:
          -        if (value == null) {
          -          unsetAppend();
          -        } else {
          -          setAppend((TAppend)value);
          -        }
          -        break;
          +        case APPEND:
          +          if (value == null) {
          +            unsetAppend();
          +          } else {
          +            setAppend((TAppend) value);
          +          }
          +          break;
           
                 }
               }
          @@ -58779,46 +63630,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case APPEND:
          -        return getAppend();
          +        case APPEND:
          +          return getAppend();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case APPEND:
          -        return isSetAppend();
          +        case APPEND:
          +          return isSetAppend();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof append_args)
          -        return this.equals((append_args)that);
          +      if (that instanceof append_args) return this.equals((append_args) that);
                 return false;
               }
           
               public boolean equals(append_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_append = true && this.isSetAppend();
                 boolean that_present_append = true && that.isSetAppend();
                 if (this_present_append || that_present_append) {
          -        if (!(this_present_append && that_present_append))
          -          return false;
          -        if (!this.append.equals(that.append))
          -          return false;
          +        if (!(this_present_append && that_present_append)) return false;
          +        if (!this.append.equals(that.append)) return false;
                 }
           
                 return true;
          @@ -58829,8 +63678,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetAppend()) ? 131071 : 524287);
          -      if (isSetAppend())
          -        hashCode = hashCode * 8191 + append.hashCode();
          +      if (isSetAppend()) hashCode = hashCode * 8191 + append.hashCode();
           
                 return hashCode;
               }
          @@ -58861,11 +63709,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -58895,35 +63745,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class append_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class append_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public append_argsStandardScheme getScheme() {
                   return new append_argsStandardScheme();
                 }
               }
           
          -    private static class append_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class append_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, append_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, append_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -58932,7 +63787,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_args struct)
                           struct.append = new TAppend();
                           struct.append.read(iprot);
                           struct.setAppendIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -58943,11 +63798,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_args struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, append_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, append_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -58962,17 +63819,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_args struct
           
               }
           
          -    private static class append_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class append_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public append_argsTupleScheme getScheme() {
                   return new append_argsTupleScheme();
                 }
               }
           
          -    private static class append_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class append_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, append_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, append_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetAppend()) {
                     optionals.set(0);
          @@ -58984,8 +63845,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_args struct)
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, append_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, append_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.append = new TAppend();
          @@ -58995,29 +63858,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_args struct)
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class append_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("append_result");
          +  public static class append_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("append_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new append_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new append_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new append_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new append_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -59030,7 +63908,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -59041,12 +63919,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -59078,23 +63956,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCell.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TCell.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_result.class,
          +        metaDataMap);
               }
           
               public append_result() {
               }
           
          -    public append_result(
          -      java.util.List success,
          -      IOError io)
          -    {
          +    public append_result(java.util.List success, IOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -59105,7 +63988,8 @@ public append_result(
                */
               public append_result(append_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TCell other_element : other.success) {
                     __this__success.add(new TCell(other_element));
                   }
          @@ -59147,7 +64031,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public append_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public append_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -59192,23 +64077,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -59216,60 +64102,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof append_result)
          -        return this.equals((append_result)that);
          +      if (that instanceof append_result) return this.equals((append_result) that);
                 return false;
               }
           
               public boolean equals(append_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -59280,12 +64162,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -59326,13 +64206,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -59365,35 +64247,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class append_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class append_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public append_resultStandardScheme getScheme() {
                   return new append_resultStandardScheme();
                 }
               }
           
          -    private static class append_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class append_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -59402,9 +64289,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struc
                           {
                             org.apache.thrift.protocol.TList _list592 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list592.size);
          -                  @org.apache.thrift.annotation.Nullable TCell _elem593;
          -                  for (int _i594 = 0; _i594 < _list592.size; ++_i594)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TCell _elem593;
          +                  for (int _i594 = 0; _i594 < _list592.size; ++_i594) {
                               _elem593 = new TCell();
                               _elem593.read(iprot);
                               struct.success.add(_elem593);
          @@ -59412,7 +64299,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struc
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -59421,7 +64308,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struc
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -59432,20 +64319,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struc
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, append_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, append_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TCell _iter595 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TCell _iter595 : struct.success) {
                         _iter595.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -59463,17 +64352,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_result stru
           
               }
           
          -    private static class append_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class append_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public append_resultTupleScheme getScheme() {
                   return new append_resultTupleScheme();
                 }
               }
           
          -    private static class append_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class append_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, append_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, append_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -59485,8 +64378,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_result struc
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TCell _iter596 : struct.success)
          -            {
          +            for (TCell _iter596 : struct.success) {
                         _iter596.write(oprot);
                       }
                     }
          @@ -59497,16 +64389,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_result struc
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, append_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, append_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list597 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list597 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list597.size);
          -            @org.apache.thrift.annotation.Nullable TCell _elem598;
          -            for (int _i599 = 0; _i599 < _list597.size; ++_i599)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TCell _elem598;
          +            for (int _i599 = 0; _i599 < _list597.size; ++_i599) {
                         _elem598 = new TCell();
                         _elem598.read(iprot);
                         struct.success.add(_elem598);
          @@ -59522,23 +64417,43 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_result struct
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class checkAndPut_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("checkAndPut_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)5);
          -    private static final org.apache.thrift.protocol.TField MPUT_FIELD_DESC = new org.apache.thrift.protocol.TField("mput", org.apache.thrift.protocol.TType.STRUCT, (short)6);
          -    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)7);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new checkAndPut_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new checkAndPut_argsTupleSchemeFactory();
          +  public static class checkAndPut_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("checkAndPut_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING,
          +            (short) 5);
          +    private static final org.apache.thrift.protocol.TField MPUT_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("mput", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 6);
          +    private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +            (short) 7);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new checkAndPut_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new checkAndPut_argsTupleSchemeFactory();
           
               /**
                * name of table
          @@ -59553,9 +64468,8 @@ public static class checkAndPut_args implements org.apache.thrift.TBase attributes; // required
          +    public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of table
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * row key
                  */
          -      ROW((short)2, "row"),
          +      ROW((short) 2, "row"),
                 /**
                  * column name
                  */
          -      COLUMN((short)3, "column"),
          +      COLUMN((short) 3, "column"),
                 /**
          -       * the expected value for the column parameter, if not
          -       * provided the check is for the non-existence of the
          -       * column in question
          +       * the expected value for the column parameter, if not provided the check is for the
          +       * non-existence of the column in question
                  */
          -      VALUE((short)5, "value"),
          +      VALUE((short) 5, "value"),
                 /**
                  * mutation for the put
                  */
          -      MPUT((short)6, "mput"),
          +      MPUT((short) 6, "mput"),
                 /**
                  * Mutation attributes
                  */
          -      ATTRIBUTES((short)7, "attributes");
          +      ATTRIBUTES((short) 7, "attributes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -59609,7 +64526,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // ROW
          @@ -59628,12 +64545,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -59665,36 +64582,52 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , "Text")));
          -      tmpMap.put(_Fields.MPUT, new org.apache.thrift.meta_data.FieldMetaData("mput", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Mutation.class)));
          -      tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"), 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , "Text"))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.COLUMN,
          +        new org.apache.thrift.meta_data.FieldMetaData("column",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.VALUE,
          +        new org.apache.thrift.meta_data.FieldMetaData("value",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text")));
          +      tmpMap.put(_Fields.MPUT,
          +        new org.apache.thrift.meta_data.FieldMetaData("mput",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                Mutation.class)));
          +      tmpMap.put(_Fields.ATTRIBUTES,
          +        new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"),
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, "Text"))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndPut_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndPut_args.class,
          +        metaDataMap);
               }
           
               public checkAndPut_args() {
               }
           
          -    public checkAndPut_args(
          -      java.nio.ByteBuffer tableName,
          -      java.nio.ByteBuffer row,
          -      java.nio.ByteBuffer column,
          -      java.nio.ByteBuffer value,
          -      Mutation mput,
          -      java.util.Map attributes)
          -    {
          +    public checkAndPut_args(java.nio.ByteBuffer tableName, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer column, java.nio.ByteBuffer value, Mutation mput,
          +        java.util.Map attributes) {
                 this();
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -59724,15 +64657,20 @@ public checkAndPut_args(checkAndPut_args other) {
                   this.mput = new Mutation(other.mput);
                 }
                 if (other.isSetAttributes()) {
          -        java.util.Map __this__attributes = new java.util.HashMap(other.attributes.size());
          -        for (java.util.Map.Entry other_element : other.attributes.entrySet()) {
          +        java.util.Map __this__attributes =
          +            new java.util.HashMap(
          +                other.attributes.size());
          +        for (java.util.Map.Entry other_element : other.attributes
          +            .entrySet()) {
           
                     java.nio.ByteBuffer other_element_key = other_element.getKey();
                     java.nio.ByteBuffer other_element_value = other_element.getValue();
           
          -          java.nio.ByteBuffer __this__attributes_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +          java.nio.ByteBuffer __this__attributes_copy_key =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
          -          java.nio.ByteBuffer __this__attributes_copy_value = org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
          +          java.nio.ByteBuffer __this__attributes_copy_value =
          +              org.apache.thrift.TBaseHelper.copyBinary(other_element_value);
           
                     __this__attributes.put(__this__attributes_copy_key, __this__attributes_copy_value);
                   }
          @@ -59770,11 +64708,13 @@ public java.nio.ByteBuffer bufferForTableName() {
                * name of table
                */
               public checkAndPut_args setTableName(byte[] tableName) {
          -      this.tableName = tableName == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(tableName.clone());
          +      this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(tableName.clone());
                 return this;
               }
           
          -    public checkAndPut_args setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +    public checkAndPut_args
          +        setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
                 this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
                 return this;
               }
          @@ -59810,7 +64750,7 @@ public java.nio.ByteBuffer bufferForRow() {
                * row key
                */
               public checkAndPut_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          @@ -59850,11 +64790,13 @@ public java.nio.ByteBuffer bufferForColumn() {
                * column name
                */
               public checkAndPut_args setColumn(byte[] column) {
          -      this.column = column == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(column.clone());
          +      this.column =
          +          column == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(column.clone());
                 return this;
               }
           
          -    public checkAndPut_args setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
          +    public checkAndPut_args
          +        setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
                 this.column = org.apache.thrift.TBaseHelper.copyBinary(column);
                 return this;
               }
          @@ -59875,9 +64817,8 @@ public void setColumnIsSet(boolean value) {
               }
           
               /**
          -     * the expected value for the column parameter, if not
          -     * provided the check is for the non-existence of the
          -     * column in question
          +     * the expected value for the column parameter, if not provided the check is for the
          +     * non-existence of the column in question
                */
               public byte[] getValue() {
                 setValue(org.apache.thrift.TBaseHelper.rightSize(value));
          @@ -59889,16 +64830,17 @@ public java.nio.ByteBuffer bufferForValue() {
               }
           
               /**
          -     * the expected value for the column parameter, if not
          -     * provided the check is for the non-existence of the
          -     * column in question
          +     * the expected value for the column parameter, if not provided the check is for the
          +     * non-existence of the column in question
                */
               public checkAndPut_args setValue(byte[] value) {
          -      this.value = value == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(value.clone());
          +      this.value =
          +          value == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(value.clone());
                 return this;
               }
           
          -    public checkAndPut_args setValue(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value) {
          +    public checkAndPut_args
          +        setValue(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value) {
                 this.value = org.apache.thrift.TBaseHelper.copyBinary(value);
                 return this;
               }
          @@ -59955,7 +64897,7 @@ public int getAttributesSize() {
           
               public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                 if (this.attributes == null) {
          -        this.attributes = new java.util.HashMap();
          +        this.attributes = new java.util.HashMap();
                 }
                 this.attributes.put(key, val);
               }
          @@ -59964,14 +64906,15 @@ public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
                * Mutation attributes
                */
               @org.apache.thrift.annotation.Nullable
          -    public java.util.Map getAttributes() {
          +    public java.util.Map getAttributes() {
                 return this.attributes;
               }
           
               /**
                * Mutation attributes
                */
          -    public checkAndPut_args setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +    public checkAndPut_args setAttributes(
          +        @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
                 this.attributes = attributes;
                 return this;
               }
          @@ -59991,71 +64934,72 @@ public void setAttributesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTableName((byte[])value);
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
                     } else {
          -            setTableName((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTableName((byte[]) value);
          +            } else {
          +              setTableName((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COLUMN:
          -        if (value == null) {
          -          unsetColumn();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setColumn((byte[])value);
          +        case COLUMN:
          +          if (value == null) {
          +            unsetColumn();
                     } else {
          -            setColumn((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setColumn((byte[]) value);
          +            } else {
          +              setColumn((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case VALUE:
          -        if (value == null) {
          -          unsetValue();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setValue((byte[])value);
          +        case VALUE:
          +          if (value == null) {
          +            unsetValue();
                     } else {
          -            setValue((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setValue((byte[]) value);
          +            } else {
          +              setValue((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case MPUT:
          -        if (value == null) {
          -          unsetMput();
          -        } else {
          -          setMput((Mutation)value);
          -        }
          -        break;
          +        case MPUT:
          +          if (value == null) {
          +            unsetMput();
          +          } else {
          +            setMput((Mutation) value);
          +          }
          +          break;
           
          -      case ATTRIBUTES:
          -        if (value == null) {
          -          unsetAttributes();
          -        } else {
          -          setAttributes((java.util.Map)value);
          -        }
          -        break;
          +        case ATTRIBUTES:
          +          if (value == null) {
          +            unsetAttributes();
          +          } else {
          +            setAttributes((java.util.Map) value);
          +          }
          +          break;
           
                 }
               }
          @@ -60063,116 +65007,104 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case COLUMN:
          -        return getColumn();
          +        case COLUMN:
          +          return getColumn();
           
          -      case VALUE:
          -        return getValue();
          +        case VALUE:
          +          return getValue();
           
          -      case MPUT:
          -        return getMput();
          +        case MPUT:
          +          return getMput();
           
          -      case ATTRIBUTES:
          -        return getAttributes();
          +        case ATTRIBUTES:
          +          return getAttributes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case ROW:
          -        return isSetRow();
          -      case COLUMN:
          -        return isSetColumn();
          -      case VALUE:
          -        return isSetValue();
          -      case MPUT:
          -        return isSetMput();
          -      case ATTRIBUTES:
          -        return isSetAttributes();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case ROW:
          +          return isSetRow();
          +        case COLUMN:
          +          return isSetColumn();
          +        case VALUE:
          +          return isSetValue();
          +        case MPUT:
          +          return isSetMput();
          +        case ATTRIBUTES:
          +          return isSetAttributes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof checkAndPut_args)
          -        return this.equals((checkAndPut_args)that);
          +      if (that instanceof checkAndPut_args) return this.equals((checkAndPut_args) that);
                 return false;
               }
           
               public boolean equals(checkAndPut_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_column = true && this.isSetColumn();
                 boolean that_present_column = true && that.isSetColumn();
                 if (this_present_column || that_present_column) {
          -        if (!(this_present_column && that_present_column))
          -          return false;
          -        if (!this.column.equals(that.column))
          -          return false;
          +        if (!(this_present_column && that_present_column)) return false;
          +        if (!this.column.equals(that.column)) return false;
                 }
           
                 boolean this_present_value = true && this.isSetValue();
                 boolean that_present_value = true && that.isSetValue();
                 if (this_present_value || that_present_value) {
          -        if (!(this_present_value && that_present_value))
          -          return false;
          -        if (!this.value.equals(that.value))
          -          return false;
          +        if (!(this_present_value && that_present_value)) return false;
          +        if (!this.value.equals(that.value)) return false;
                 }
           
                 boolean this_present_mput = true && this.isSetMput();
                 boolean that_present_mput = true && that.isSetMput();
                 if (this_present_mput || that_present_mput) {
          -        if (!(this_present_mput && that_present_mput))
          -          return false;
          -        if (!this.mput.equals(that.mput))
          -          return false;
          +        if (!(this_present_mput && that_present_mput)) return false;
          +        if (!this.mput.equals(that.mput)) return false;
                 }
           
                 boolean this_present_attributes = true && this.isSetAttributes();
                 boolean that_present_attributes = true && that.isSetAttributes();
                 if (this_present_attributes || that_present_attributes) {
          -        if (!(this_present_attributes && that_present_attributes))
          -          return false;
          -        if (!this.attributes.equals(that.attributes))
          -          return false;
          +        if (!(this_present_attributes && that_present_attributes)) return false;
          +        if (!this.attributes.equals(that.attributes)) return false;
                 }
           
                 return true;
          @@ -60183,28 +65115,22 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -      if (isSetColumn())
          -        hashCode = hashCode * 8191 + column.hashCode();
          +      if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetValue()) ? 131071 : 524287);
          -      if (isSetValue())
          -        hashCode = hashCode * 8191 + value.hashCode();
          +      if (isSetValue()) hashCode = hashCode * 8191 + value.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetMput()) ? 131071 : 524287);
          -      if (isSetMput())
          -        hashCode = hashCode * 8191 + mput.hashCode();
          +      if (isSetMput()) hashCode = hashCode * 8191 + mput.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -      if (isSetAttributes())
          -        hashCode = hashCode * 8191 + attributes.hashCode();
          +      if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
                 return hashCode;
               }
          @@ -60285,11 +65211,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -60359,35 +65287,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class checkAndPut_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndPut_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndPut_argsStandardScheme getScheme() {
                   return new checkAndPut_argsStandardScheme();
                 }
               }
           
          -    private static class checkAndPut_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class checkAndPut_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -60395,7 +65328,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.tableName = iprot.readBinary();
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -60403,7 +65336,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -60411,7 +65344,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.column = iprot.readBinary();
                           struct.setColumnIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -60419,7 +65352,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.value = iprot.readBinary();
                           struct.setValueIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -60428,7 +65361,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                           struct.mput = new Mutation();
                           struct.mput.read(iprot);
                           struct.setMputIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -60436,11 +65369,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                           {
                             org.apache.thrift.protocol.TMap _map600 = iprot.readMapBegin();
          -                  struct.attributes = new java.util.HashMap(2*_map600.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key601;
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val602;
          -                  for (int _i603 = 0; _i603 < _map600.size; ++_i603)
          -                  {
          +                  struct.attributes =
          +                      new java.util.HashMap(
          +                          2 * _map600.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _key601;
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _val602;
          +                  for (int _i603 = 0; _i603 < _map600.size; ++_i603) {
                               _key601 = iprot.readBinary();
                               _val602 = iprot.readBinary();
                               struct.attributes.put(_key601, _val602);
          @@ -60448,7 +65384,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                             iprot.readMapEnd();
                           }
                           struct.setAttributesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -60459,11 +65395,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -60495,9 +65433,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_args s
                   if (struct.attributes != null) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter604 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter604 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter604.getKey());
                         oprot.writeBinary(_iter604.getValue());
                       }
          @@ -60511,17 +65451,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_args s
           
               }
           
          -    private static class checkAndPut_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndPut_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndPut_argsTupleScheme getScheme() {
                   return new checkAndPut_argsTupleScheme();
                 }
               }
           
          -    private static class checkAndPut_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class checkAndPut_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -60560,8 +65504,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args st
                   if (struct.isSetAttributes()) {
                     {
                       oprot.writeI32(struct.attributes.size());
          -            for (java.util.Map.Entry _iter605 : struct.attributes.entrySet())
          -            {
          +            for (java.util.Map.Entry _iter605 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter605.getKey());
                         oprot.writeBinary(_iter605.getValue());
                       }
          @@ -60570,8 +65514,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(6);
                   if (incoming.get(0)) {
                     struct.tableName = iprot.readBinary();
          @@ -60596,12 +65542,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args str
                   }
                   if (incoming.get(5)) {
                     {
          -            org.apache.thrift.protocol.TMap _map606 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -            struct.attributes = new java.util.HashMap(2*_map606.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key607;
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val608;
          -            for (int _i609 = 0; _i609 < _map606.size; ++_i609)
          -            {
          +            org.apache.thrift.protocol.TMap _map606 = iprot.readMapBegin(
          +              org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +            struct.attributes =
          +                new java.util.HashMap(2 * _map606.size);
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _key607;
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _val608;
          +            for (int _i609 = 0; _i609 < _map606.size; ++_i609) {
                         _key607 = iprot.readBinary();
                         _val608 = iprot.readBinary();
                         struct.attributes.put(_key607, _val608);
          @@ -60612,32 +65561,48 @@ public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class checkAndPut_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("checkAndPut_result");
          -
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new checkAndPut_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new checkAndPut_resultTupleSchemeFactory();
          +  public static class checkAndPut_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("checkAndPut_result");
          +
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new checkAndPut_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new checkAndPut_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
               public @org.apache.thrift.annotation.Nullable IllegalArgument ia; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io"),
          -      IA((short)2, "ia");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io"), IA((short) 2, "ia");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -60650,7 +65615,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -60663,12 +65628,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -60702,25 +65667,32 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IllegalArgument.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IllegalArgument.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndPut_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndPut_result.class,
          +        metaDataMap);
               }
           
               public checkAndPut_result() {
               }
           
          -    public checkAndPut_result(
          -      boolean success,
          -      IOError io,
          -      IllegalArgument ia)
          -    {
          +    public checkAndPut_result(boolean success, IOError io, IllegalArgument ia) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -60765,7 +65737,8 @@ public checkAndPut_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -60774,7 +65747,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -60827,31 +65801,32 @@ public void setIaIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((IllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((IllegalArgument) value);
          +          }
          +          break;
           
                 }
               }
          @@ -60859,74 +65834,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof checkAndPut_result)
          -        return this.equals((checkAndPut_result)that);
          +      if (that instanceof checkAndPut_result) return this.equals((checkAndPut_result) that);
                 return false;
               }
           
               public boolean equals(checkAndPut_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 return true;
          @@ -60939,12 +65908,10 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 return hashCode;
               }
          @@ -60995,13 +65962,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -61038,37 +66007,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class checkAndPut_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndPut_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndPut_resultStandardScheme getScheme() {
                   return new checkAndPut_resultStandardScheme();
                 }
               }
           
          -    private static class checkAndPut_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class checkAndPut_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -61076,7 +66051,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_result
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -61085,7 +66060,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_result
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -61094,7 +66069,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_result
                           struct.ia = new IllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -61105,11 +66080,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -61134,17 +66111,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_result
           
               }
           
          -    private static class checkAndPut_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndPut_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndPut_resultTupleScheme getScheme() {
                   return new checkAndPut_resultTupleScheme();
                 }
               }
           
          -    private static class checkAndPut_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class checkAndPut_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -61168,8 +66149,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(3);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -61188,24 +66171,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getThriftServerType_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getThriftServerType_args");
          -
          +  public static class getThriftServerType_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getThriftServerType_args");
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getThriftServerType_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getThriftServerType_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getThriftServerType_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getThriftServerType_argsTupleSchemeFactory();
           
          -
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -;
          +      ;
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -61218,19 +66211,19 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     default:
                       return null;
                   }
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -61258,11 +66251,14 @@ public java.lang.String getFieldName() {
                   return _fieldName;
                 }
               }
          +
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftServerType_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftServerType_args.class,
          +        metaDataMap);
               }
           
               public getThriftServerType_args() {
          @@ -61282,7 +66278,8 @@ public getThriftServerType_args deepCopy() {
               public void clear() {
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
                 }
               }
          @@ -61294,7 +66291,10 @@ public java.lang.Object getFieldValue(_Fields field) {
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
          @@ -61308,15 +66308,13 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getThriftServerType_args)
          -        return this.equals((getThriftServerType_args)that);
          +        return this.equals((getThriftServerType_args) that);
                 return false;
               }
           
               public boolean equals(getThriftServerType_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 return true;
               }
          @@ -61344,11 +66342,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -61368,35 +66368,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getThriftServerType_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getThriftServerType_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getThriftServerType_argsStandardScheme getScheme() {
                   return new getThriftServerType_argsStandardScheme();
                 }
               }
           
          -    private static class getThriftServerType_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getThriftServerType_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -61407,11 +66412,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerType_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerType_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -61421,53 +66428,71 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerTyp
           
               }
           
          -    private static class getThriftServerType_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getThriftServerType_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getThriftServerType_argsTupleScheme getScheme() {
                   return new getThriftServerType_argsTupleScheme();
                 }
               }
           
          -    private static class getThriftServerType_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getThriftServerType_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getThriftServerType_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getThriftServerType_result");
          +  public static class getThriftServerType_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getThriftServerType_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32,
          +            (short) 0);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getThriftServerType_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getThriftServerType_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getThriftServerType_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getThriftServerType_resultTupleSchemeFactory();
           
               /**
          -     * 
                * @see TThriftServerType
                */
               public @org.apache.thrift.annotation.Nullable TThriftServerType success; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
          -       * 
                  * @see TThriftServerType
                  */
          -      SUCCESS((short)0, "success");
          +      SUCCESS((short) 0, "success");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -61480,7 +66505,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     default:
          @@ -61489,12 +66514,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -61526,19 +66551,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TThriftServerType.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +                TThriftServerType.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftServerType_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getThriftServerType_result.class, metaDataMap);
               }
           
               public getThriftServerType_result() {
               }
           
          -    public getThriftServerType_result(
          -      TThriftServerType success)
          -    {
          +    public getThriftServerType_result(TThriftServerType success) {
                 this();
                 this.success = success;
               }
          @@ -61562,7 +66590,6 @@ public void clear() {
               }
           
               /**
          -     * 
                * @see TThriftServerType
                */
               @org.apache.thrift.annotation.Nullable
          @@ -61571,10 +66598,10 @@ public TThriftServerType getSuccess() {
               }
           
               /**
          -     * 
                * @see TThriftServerType
                */
          -    public getThriftServerType_result setSuccess(@org.apache.thrift.annotation.Nullable TThriftServerType success) {
          +    public getThriftServerType_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable TThriftServerType success) {
                 this.success = success;
                 return this;
               }
          @@ -61594,15 +66621,16 @@ public void setSuccessIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((TThriftServerType)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((TThriftServerType) value);
          +          }
          +          break;
           
                 }
               }
          @@ -61610,22 +66638,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          +        case SUCCESS:
          +          return isSetSuccess();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -61633,23 +66664,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getThriftServerType_result)
          -        return this.equals((getThriftServerType_result)that);
          +        return this.equals((getThriftServerType_result) that);
                 return false;
               }
           
               public boolean equals(getThriftServerType_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 return true;
          @@ -61660,8 +66687,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.getValue();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.getValue();
           
                 return hashCode;
               }
          @@ -61692,13 +66718,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -61723,43 +66751,49 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getThriftServerType_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getThriftServerType_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getThriftServerType_resultStandardScheme getScheme() {
                   return new getThriftServerType_resultStandardScheme();
                 }
               }
           
          -    private static class getThriftServerType_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getThriftServerType_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getThriftServerType_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
                       case 0: // SUCCESS
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -                struct.success = org.apache.hadoop.hbase.thrift.generated.TThriftServerType.findByValue(iprot.readI32());
          +                struct.success = org.apache.hadoop.hbase.thrift.generated.TThriftServerType
          +                    .findByValue(iprot.readI32());
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -61770,11 +66804,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerType_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getThriftServerType_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -61789,17 +66825,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerTyp
           
               }
           
          -    private static class getThriftServerType_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getThriftServerType_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getThriftServerType_resultTupleScheme getScheme() {
                   return new getThriftServerType_resultTupleScheme();
                 }
               }
           
          -    private static class getThriftServerType_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getThriftServerType_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getThriftServerType_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -61811,34 +66851,47 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
          -          struct.success = org.apache.hadoop.hbase.thrift.generated.TThriftServerType.findByValue(iprot.readI32());
          +          struct.success = org.apache.hadoop.hbase.thrift.generated.TThriftServerType
          +              .findByValue(iprot.readI32());
                     struct.setSuccessIsSet(true);
                   }
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getClusterId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterId_args");
          -
          +  public static class getClusterId_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getClusterId_args");
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getClusterId_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getClusterId_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getClusterId_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getClusterId_argsTupleSchemeFactory();
           
          -
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -;
          +      ;
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -61851,19 +66904,19 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     default:
                       return null;
                   }
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -61891,11 +66944,14 @@ public java.lang.String getFieldName() {
                   return _fieldName;
                 }
               }
          +
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_args.class,
          +        metaDataMap);
               }
           
               public getClusterId_args() {
          @@ -61915,7 +66971,8 @@ public getClusterId_args deepCopy() {
               public void clear() {
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
                 }
               }
          @@ -61927,7 +66984,10 @@ public java.lang.Object getFieldValue(_Fields field) {
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
          @@ -61940,16 +67000,13 @@ public boolean isSet(_Fields field) {
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getClusterId_args)
          -        return this.equals((getClusterId_args)that);
          +      if (that instanceof getClusterId_args) return this.equals((getClusterId_args) that);
                 return false;
               }
           
               public boolean equals(getClusterId_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 return true;
               }
          @@ -61977,11 +67034,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -62001,35 +67060,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getClusterId_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getClusterId_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getClusterId_argsStandardScheme getScheme() {
                   return new getClusterId_argsStandardScheme();
                 }
               }
           
          -    private static class getClusterId_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getClusterId_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -62040,11 +67104,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_args s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -62054,45 +67120,65 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_args
           
               }
           
          -    private static class getClusterId_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getClusterId_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getClusterId_argsTupleScheme getScheme() {
                   return new getClusterId_argsTupleScheme();
                 }
               }
           
          -    private static class getClusterId_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getClusterId_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getClusterId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterId_result");
          +  public static class getClusterId_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getClusterId_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING,
          +            (short) 0);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getClusterId_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getClusterId_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getClusterId_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getClusterId_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.lang.String success; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success");
          +      SUCCESS((short) 0, "success");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -62105,7 +67191,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     default:
          @@ -62114,12 +67200,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -62151,19 +67237,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_result.class,
          +        metaDataMap);
               }
           
               public getClusterId_result() {
               }
           
          -    public getClusterId_result(
          -      java.lang.String success)
          -    {
          +    public getClusterId_result(java.lang.String success) {
                 this();
                 this.success = success;
               }
          @@ -62191,7 +67280,8 @@ public java.lang.String getSuccess() {
                 return this.success;
               }
           
          -    public getClusterId_result setSuccess(@org.apache.thrift.annotation.Nullable java.lang.String success) {
          +    public getClusterId_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.lang.String success) {
                 this.success = success;
                 return this;
               }
          @@ -62211,15 +67301,16 @@ public void setSuccessIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.String)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.String) value);
          +          }
          +          break;
           
                 }
               }
          @@ -62227,46 +67318,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          +        case SUCCESS:
          +          return isSetSuccess();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getClusterId_result)
          -        return this.equals((getClusterId_result)that);
          +      if (that instanceof getClusterId_result) return this.equals((getClusterId_result) that);
                 return false;
               }
           
               public boolean equals(getClusterId_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 return true;
          @@ -62277,8 +67366,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 return hashCode;
               }
          @@ -62309,13 +67397,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -62340,35 +67430,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getClusterId_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getClusterId_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getClusterId_resultStandardScheme getScheme() {
                   return new getClusterId_resultStandardScheme();
                 }
               }
           
          -    private static class getClusterId_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getClusterId_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -62376,7 +67471,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_result
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.success = iprot.readString();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -62387,11 +67482,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -62406,17 +67503,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_resul
           
               }
           
          -    private static class getClusterId_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getClusterId_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getClusterId_resultTupleScheme getScheme() {
                   return new getClusterId_resultTupleScheme();
                 }
               }
           
          -    private static class getClusterId_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getClusterId_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -62428,8 +67529,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.success = iprot.readString();
          @@ -62438,26 +67541,39 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class grant_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("grant_args");
          +  public static class grant_args implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("grant_args");
           
          -    private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new grant_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new grant_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new grant_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new grant_argsTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TAccessControlEntity info; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      INFO((short)1, "info");
          +      INFO((short) 1, "info");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -62470,7 +67586,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // INFO
                       return INFO;
                     default:
          @@ -62479,12 +67595,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -62516,9 +67632,13 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.INFO, new org.apache.thrift.meta_data.FieldMetaData("info", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TAccessControlEntity.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.INFO,
          +        new org.apache.thrift.meta_data.FieldMetaData("info",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TAccessControlEntity.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
                 org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(grant_args.class, metaDataMap);
               }
          @@ -62526,9 +67646,7 @@ public java.lang.String getFieldName() {
               public grant_args() {
               }
           
          -    public grant_args(
          -      TAccessControlEntity info)
          -    {
          +    public grant_args(TAccessControlEntity info) {
                 this();
                 this.info = info;
               }
          @@ -62576,15 +67694,16 @@ public void setInfoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case INFO:
          -        if (value == null) {
          -          unsetInfo();
          -        } else {
          -          setInfo((TAccessControlEntity)value);
          -        }
          -        break;
          +        case INFO:
          +          if (value == null) {
          +            unsetInfo();
          +          } else {
          +            setInfo((TAccessControlEntity) value);
          +          }
          +          break;
           
                 }
               }
          @@ -62592,46 +67711,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case INFO:
          -        return getInfo();
          +        case INFO:
          +          return getInfo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case INFO:
          -        return isSetInfo();
          +        case INFO:
          +          return isSetInfo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof grant_args)
          -        return this.equals((grant_args)that);
          +      if (that instanceof grant_args) return this.equals((grant_args) that);
                 return false;
               }
           
               public boolean equals(grant_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_info = true && this.isSetInfo();
                 boolean that_present_info = true && that.isSetInfo();
                 if (this_present_info || that_present_info) {
          -        if (!(this_present_info && that_present_info))
          -          return false;
          -        if (!this.info.equals(that.info))
          -          return false;
          +        if (!(this_present_info && that_present_info)) return false;
          +        if (!this.info.equals(that.info)) return false;
                 }
           
                 return true;
          @@ -62642,8 +67759,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetInfo()) ? 131071 : 524287);
          -      if (isSetInfo())
          -        hashCode = hashCode * 8191 + info.hashCode();
          +      if (isSetInfo()) hashCode = hashCode * 8191 + info.hashCode();
           
                 return hashCode;
               }
          @@ -62674,11 +67790,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -62701,7 +67819,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (info == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'info' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'info' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (info != null) {
          @@ -62711,35 +67830,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class grant_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class grant_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public grant_argsStandardScheme getScheme() {
                   return new grant_argsStandardScheme();
                 }
               }
           
          -    private static class grant_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class grant_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, grant_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, grant_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -62748,7 +67872,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, grant_args struct)
                           struct.info = new TAccessControlEntity();
                           struct.info.read(iprot);
                           struct.setInfoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -62759,11 +67883,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, grant_args struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, grant_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, grant_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -62778,52 +67904,73 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, grant_args struct)
           
               }
           
          -    private static class grant_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class grant_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public grant_argsTupleScheme getScheme() {
                   return new grant_argsTupleScheme();
                 }
               }
           
          -    private static class grant_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class grant_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, grant_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, grant_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.info.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, grant_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, grant_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.info = new TAccessControlEntity();
                   struct.info.read(iprot);
                   struct.setInfoIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class grant_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("grant_result");
          +  public static class grant_result
          +      implements org.apache.thrift.TBase, java.io.Serializable,
          +      Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("grant_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new grant_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new grant_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new grant_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new grant_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -62836,7 +67983,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -62847,12 +67994,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -62886,22 +68033,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(grant_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(grant_result.class,
          +        metaDataMap);
               }
           
               public grant_result() {
               }
           
          -    public grant_result(
          -      boolean success,
          -      IOError io)
          -    {
          +    public grant_result(boolean success, IOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -62941,7 +68093,8 @@ public grant_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -62950,7 +68103,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -62978,23 +68132,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -63002,60 +68157,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof grant_result)
          -        return this.equals((grant_result)that);
          +      if (that instanceof grant_result) return this.equals((grant_result) that);
                 return false;
               }
           
               public boolean equals(grant_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -63068,8 +68219,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -63110,13 +68260,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -63145,37 +68297,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class grant_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class grant_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public grant_resultStandardScheme getScheme() {
                   return new grant_resultStandardScheme();
                 }
               }
           
          -    private static class grant_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class grant_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, grant_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, grant_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -63183,7 +68341,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, grant_result struct
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -63192,7 +68350,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, grant_result struct
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -63203,11 +68361,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, grant_result struct
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, grant_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, grant_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -63227,17 +68387,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, grant_result struc
           
               }
           
          -    private static class grant_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class grant_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public grant_resultTupleScheme getScheme() {
                   return new grant_resultTupleScheme();
                 }
               }
           
          -    private static class grant_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class grant_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, grant_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, grant_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -63255,8 +68419,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, grant_result struct
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, grant_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, grant_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -63270,26 +68436,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, grant_result struct)
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class revoke_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("revoke_args");
          +  public static class revoke_args
          +      implements org.apache.thrift.TBase, java.io.Serializable,
          +      Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("revoke_args");
           
          -    private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new revoke_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new revoke_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new revoke_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new revoke_argsTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TAccessControlEntity info; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      INFO((short)1, "info");
          +      INFO((short) 1, "info");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -63302,7 +68482,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // INFO
                       return INFO;
                     default:
          @@ -63311,12 +68491,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -63348,19 +68528,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.INFO, new org.apache.thrift.meta_data.FieldMetaData("info", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TAccessControlEntity.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.INFO,
          +        new org.apache.thrift.meta_data.FieldMetaData("info",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TAccessControlEntity.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(revoke_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(revoke_args.class,
          +        metaDataMap);
               }
           
               public revoke_args() {
               }
           
          -    public revoke_args(
          -      TAccessControlEntity info)
          -    {
          +    public revoke_args(TAccessControlEntity info) {
                 this();
                 this.info = info;
               }
          @@ -63408,15 +68591,16 @@ public void setInfoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case INFO:
          -        if (value == null) {
          -          unsetInfo();
          -        } else {
          -          setInfo((TAccessControlEntity)value);
          -        }
          -        break;
          +        case INFO:
          +          if (value == null) {
          +            unsetInfo();
          +          } else {
          +            setInfo((TAccessControlEntity) value);
          +          }
          +          break;
           
                 }
               }
          @@ -63424,46 +68608,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case INFO:
          -        return getInfo();
          +        case INFO:
          +          return getInfo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case INFO:
          -        return isSetInfo();
          +        case INFO:
          +          return isSetInfo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof revoke_args)
          -        return this.equals((revoke_args)that);
          +      if (that instanceof revoke_args) return this.equals((revoke_args) that);
                 return false;
               }
           
               public boolean equals(revoke_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_info = true && this.isSetInfo();
                 boolean that_present_info = true && that.isSetInfo();
                 if (this_present_info || that_present_info) {
          -        if (!(this_present_info && that_present_info))
          -          return false;
          -        if (!this.info.equals(that.info))
          -          return false;
          +        if (!(this_present_info && that_present_info)) return false;
          +        if (!this.info.equals(that.info)) return false;
                 }
           
                 return true;
          @@ -63474,8 +68656,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetInfo()) ? 131071 : 524287);
          -      if (isSetInfo())
          -        hashCode = hashCode * 8191 + info.hashCode();
          +      if (isSetInfo()) hashCode = hashCode * 8191 + info.hashCode();
           
                 return hashCode;
               }
          @@ -63506,11 +68687,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -63533,7 +68716,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (info == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'info' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'info' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (info != null) {
          @@ -63543,35 +68727,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class revoke_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class revoke_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public revoke_argsStandardScheme getScheme() {
                   return new revoke_argsStandardScheme();
                 }
               }
           
          -    private static class revoke_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class revoke_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -63580,7 +68769,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_args struct)
                           struct.info = new TAccessControlEntity();
                           struct.info.read(iprot);
                           struct.setInfoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -63591,11 +68780,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_args struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -63610,52 +68801,73 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_args struct
           
               }
           
          -    private static class revoke_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class revoke_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public revoke_argsTupleScheme getScheme() {
                   return new revoke_argsTupleScheme();
                 }
               }
           
          -    private static class revoke_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class revoke_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, revoke_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, revoke_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.info.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, revoke_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, revoke_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.info = new TAccessControlEntity();
                   struct.info.read(iprot);
                   struct.setInfoIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class revoke_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("revoke_result");
          +  public static class revoke_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("revoke_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new revoke_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new revoke_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new revoke_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new revoke_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable IOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -63668,7 +68880,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -63679,12 +68891,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -63718,22 +68930,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, IOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                IOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(revoke_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(revoke_result.class,
          +        metaDataMap);
               }
           
               public revoke_result() {
               }
           
          -    public revoke_result(
          -      boolean success,
          -      IOError io)
          -    {
          +    public revoke_result(boolean success, IOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -63773,7 +68990,8 @@ public revoke_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -63782,7 +69000,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -63810,23 +69029,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((IOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((IOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -63834,60 +69054,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof revoke_result)
          -        return this.equals((revoke_result)that);
          +      if (that instanceof revoke_result) return this.equals((revoke_result) that);
                 return false;
               }
           
               public boolean equals(revoke_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -63900,8 +69116,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -63942,13 +69157,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -63977,37 +69194,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class revoke_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class revoke_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public revoke_resultStandardScheme getScheme() {
                   return new revoke_resultStandardScheme();
                 }
               }
           
          -    private static class revoke_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class revoke_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -64015,7 +69238,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_result struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -64024,7 +69247,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_result struc
                           struct.io = new IOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -64035,11 +69258,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_result struc
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -64059,17 +69284,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_result stru
           
               }
           
          -    private static class revoke_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class revoke_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public revoke_resultTupleScheme getScheme() {
                   return new revoke_resultTupleScheme();
                 }
               }
           
          -    private static class revoke_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class revoke_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, revoke_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, revoke_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -64087,8 +69316,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, revoke_result struc
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, revoke_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, revoke_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -64102,8 +69333,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, revoke_result struct
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java
          index f5f6b565c56c..d4fa0f9fd75d 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java
          @@ -1,36 +1,59 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * An IOError exception signals that an error occurred communicating
          - * to the Hbase master or an Hbase region server.  Also used to return
          - * more general Hbase error conditions.
          + * An IOError exception signals that an error occurred communicating to the Hbase master or an Hbase
          + * region server. Also used to return more general Hbase error conditions.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class IOError extends org.apache.thrift.TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IOError");
          -
          -  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField CAN_RETRY_FIELD_DESC = new org.apache.thrift.protocol.TField("canRetry", org.apache.thrift.protocol.TType.BOOL, (short)2);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new IOErrorStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new IOErrorTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class IOError extends org.apache.thrift.TException
          +    implements org.apache.thrift.TBase, java.io.Serializable, Cloneable,
          +    Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("IOError");
          +
          +  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField CAN_RETRY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("canRetry", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 2);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new IOErrorStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new IOErrorTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.lang.String message; // required
             public boolean canRetry; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    MESSAGE((short)1, "message"),
          -    CAN_RETRY((short)2, "canRetry");
          +    MESSAGE((short) 1, "message"), CAN_RETRY((short) 2, "canRetry");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -43,7 +66,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // MESSAGE
                     return MESSAGE;
                   case 2: // CAN_RETRY
          @@ -54,12 +77,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -93,10 +116,15 @@ public java.lang.String getFieldName() {
             private byte __isset_bitfield = 0;
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.CAN_RETRY, new org.apache.thrift.meta_data.FieldMetaData("canRetry", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.MESSAGE,
          +      new org.apache.thrift.meta_data.FieldMetaData("message",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.CAN_RETRY, new org.apache.thrift.meta_data.FieldMetaData("canRetry",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(IOError.class, metaDataMap);
          @@ -105,10 +133,7 @@ public java.lang.String getFieldName() {
             public IOError() {
             }
           
          -  public IOError(
          -    java.lang.String message,
          -    boolean canRetry)
          -  {
          +  public IOError(java.lang.String message, boolean canRetry) {
               this();
               this.message = message;
               this.canRetry = canRetry;
          @@ -173,7 +198,8 @@ public IOError setCanRetry(boolean canRetry) {
             }
           
             public void unsetCanRetry() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CANRETRY_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CANRETRY_ISSET_ID);
             }
           
             /** Returns true if field canRetry is set (has been assigned a value) and false otherwise */
          @@ -182,26 +208,28 @@ public boolean isSetCanRetry() {
             }
           
             public void setCanRetryIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CANRETRY_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CANRETRY_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case MESSAGE:
          -      if (value == null) {
          -        unsetMessage();
          -      } else {
          -        setMessage((java.lang.String)value);
          -      }
          -      break;
          +      case MESSAGE:
          +        if (value == null) {
          +          unsetMessage();
          +        } else {
          +          setMessage((java.lang.String) value);
          +        }
          +        break;
           
          -    case CAN_RETRY:
          -      if (value == null) {
          -        unsetCanRetry();
          -      } else {
          -        setCanRetry((java.lang.Boolean)value);
          -      }
          -      break;
          +      case CAN_RETRY:
          +        if (value == null) {
          +          unsetCanRetry();
          +        } else {
          +          setCanRetry((java.lang.Boolean) value);
          +        }
          +        break;
           
               }
             }
          @@ -209,60 +237,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case MESSAGE:
          -      return getMessage();
          +      case MESSAGE:
          +        return getMessage();
           
          -    case CAN_RETRY:
          -      return isCanRetry();
          +      case CAN_RETRY:
          +        return isCanRetry();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case MESSAGE:
          -      return isSetMessage();
          -    case CAN_RETRY:
          -      return isSetCanRetry();
          +      case MESSAGE:
          +        return isSetMessage();
          +      case CAN_RETRY:
          +        return isSetCanRetry();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof IOError)
          -      return this.equals((IOError)that);
          +    if (that instanceof IOError) return this.equals((IOError) that);
               return false;
             }
           
             public boolean equals(IOError that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_message = true && this.isSetMessage();
               boolean that_present_message = true && that.isSetMessage();
               if (this_present_message || that_present_message) {
          -      if (!(this_present_message && that_present_message))
          -        return false;
          -      if (!this.message.equals(that.message))
          -        return false;
          +      if (!(this_present_message && that_present_message)) return false;
          +      if (!this.message.equals(that.message)) return false;
               }
           
               boolean this_present_canRetry = true;
               boolean that_present_canRetry = true;
               if (this_present_canRetry || that_present_canRetry) {
          -      if (!(this_present_canRetry && that_present_canRetry))
          -        return false;
          -      if (this.canRetry != that.canRetry)
          -        return false;
          +      if (!(this_present_canRetry && that_present_canRetry)) return false;
          +      if (this.canRetry != that.canRetry) return false;
               }
           
               return true;
          @@ -273,8 +297,7 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetMessage()) ? 131071 : 524287);
          -    if (isSetMessage())
          -      hashCode = hashCode * 8191 + message.hashCode();
          +    if (isSetMessage()) hashCode = hashCode * 8191 + message.hashCode();
           
               hashCode = hashCode * 8191 + ((canRetry) ? 131071 : 524287);
           
          @@ -321,7 +344,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -352,37 +376,43 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class IOErrorStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class IOErrorStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public IOErrorStandardScheme getScheme() {
                 return new IOErrorStandardScheme();
               }
             }
           
          -  private static class IOErrorStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class IOErrorStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, IOError struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, IOError struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -390,7 +420,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, IOError struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.message = iprot.readString();
                         struct.setMessageIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -398,7 +428,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, IOError struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.canRetry = iprot.readBool();
                         struct.setCanRetryIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -413,7 +443,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, IOError struct) thr
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, IOError struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, IOError struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -440,8 +471,10 @@ public IOErrorTupleScheme getScheme() {
             private static class IOErrorTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, IOError struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, IOError struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetMessage()) {
                   optionals.set(0);
          @@ -459,8 +492,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, IOError struct) thr
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, IOError struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, IOError struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(2);
                 if (incoming.get(0)) {
                   struct.message = iprot.readString();
          @@ -473,8 +508,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, IOError struct) thro
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java
          index a0e2e97827a3..11dc3b2e1a4e 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java
          @@ -1,32 +1,55 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * An IllegalArgument exception indicates an illegal or invalid
          - * argument was passed into a procedure.
          + * An IllegalArgument exception indicates an illegal or invalid argument was passed into a
          + * procedure.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class IllegalArgument extends org.apache.thrift.TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IllegalArgument");
          -
          -  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new IllegalArgumentStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new IllegalArgumentTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class IllegalArgument extends org.apache.thrift.TException
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("IllegalArgument");
          +
          +  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new IllegalArgumentStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new IllegalArgumentTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.lang.String message; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    MESSAGE((short)1, "message");
          +    MESSAGE((short) 1, "message");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -39,7 +62,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // MESSAGE
                     return MESSAGE;
                   default:
          @@ -48,12 +71,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -85,19 +108,22 @@ public java.lang.String getFieldName() {
             // isset id assignments
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.MESSAGE,
          +      new org.apache.thrift.meta_data.FieldMetaData("message",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(IllegalArgument.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(IllegalArgument.class,
          +      metaDataMap);
             }
           
             public IllegalArgument() {
             }
           
          -  public IllegalArgument(
          -    java.lang.String message)
          -  {
          +  public IllegalArgument(java.lang.String message) {
               this();
               this.message = message;
             }
          @@ -125,7 +151,8 @@ public java.lang.String getMessage() {
               return this.message;
             }
           
          -  public IllegalArgument setMessage(@org.apache.thrift.annotation.Nullable java.lang.String message) {
          +  public IllegalArgument
          +      setMessage(@org.apache.thrift.annotation.Nullable java.lang.String message) {
               this.message = message;
               return this;
             }
          @@ -145,15 +172,16 @@ public void setMessageIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case MESSAGE:
          -      if (value == null) {
          -        unsetMessage();
          -      } else {
          -        setMessage((java.lang.String)value);
          -      }
          -      break;
          +      case MESSAGE:
          +        if (value == null) {
          +          unsetMessage();
          +        } else {
          +          setMessage((java.lang.String) value);
          +        }
          +        break;
           
               }
             }
          @@ -161,46 +189,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case MESSAGE:
          -      return getMessage();
          +      case MESSAGE:
          +        return getMessage();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case MESSAGE:
          -      return isSetMessage();
          +      case MESSAGE:
          +        return isSetMessage();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof IllegalArgument)
          -      return this.equals((IllegalArgument)that);
          +    if (that instanceof IllegalArgument) return this.equals((IllegalArgument) that);
               return false;
             }
           
             public boolean equals(IllegalArgument that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_message = true && this.isSetMessage();
               boolean that_present_message = true && that.isSetMessage();
               if (this_present_message || that_present_message) {
          -      if (!(this_present_message && that_present_message))
          -        return false;
          -      if (!this.message.equals(that.message))
          -        return false;
          +      if (!(this_present_message && that_present_message)) return false;
          +      if (!this.message.equals(that.message)) return false;
               }
           
               return true;
          @@ -211,8 +237,7 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetMessage()) ? 131071 : 524287);
          -    if (isSetMessage())
          -      hashCode = hashCode * 8191 + message.hashCode();
          +    if (isSetMessage()) hashCode = hashCode * 8191 + message.hashCode();
           
               return hashCode;
             }
          @@ -247,7 +272,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -274,35 +300,40 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class IllegalArgumentStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class IllegalArgumentStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public IllegalArgumentStandardScheme getScheme() {
                 return new IllegalArgumentStandardScheme();
               }
             }
           
          -  private static class IllegalArgumentStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class IllegalArgumentStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, IllegalArgument struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, IllegalArgument struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -310,7 +341,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, IllegalArgument str
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.message = iprot.readString();
                         struct.setMessageIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -325,7 +356,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, IllegalArgument str
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, IllegalArgument struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, IllegalArgument struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -340,17 +372,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, IllegalArgument st
           
             }
           
          -  private static class IllegalArgumentTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class IllegalArgumentTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public IllegalArgumentTupleScheme getScheme() {
                 return new IllegalArgumentTupleScheme();
               }
             }
           
          -  private static class IllegalArgumentTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class IllegalArgumentTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, IllegalArgument struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, IllegalArgument struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetMessage()) {
                   optionals.set(0);
          @@ -362,8 +398,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, IllegalArgument str
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, IllegalArgument struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, IllegalArgument struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(1);
                 if (incoming.get(0)) {
                   struct.message = iprot.readString();
          @@ -372,8 +410,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, IllegalArgument stru
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java
          index 7bf919fda33e..10fe70546a08 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java
          @@ -1,40 +1,66 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * A Mutation object is used to either update or delete a column-value.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class Mutation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Mutation");
          -
          -  private static final org.apache.thrift.protocol.TField IS_DELETE_FIELD_DESC = new org.apache.thrift.protocol.TField("isDelete", org.apache.thrift.protocol.TType.BOOL, (short)1);
          -  private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)2);
          -  private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)3);
          -  private static final org.apache.thrift.protocol.TField WRITE_TO_WAL_FIELD_DESC = new org.apache.thrift.protocol.TField("writeToWAL", org.apache.thrift.protocol.TType.BOOL, (short)4);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new MutationStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new MutationTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class Mutation implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("Mutation");
          +
          +  private static final org.apache.thrift.protocol.TField IS_DELETE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("isDelete", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField WRITE_TO_WAL_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("writeToWAL", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 4);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new MutationStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new MutationTupleSchemeFactory();
           
             public boolean isDelete; // required
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column; // required
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value; // required
             public boolean writeToWAL; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    IS_DELETE((short)1, "isDelete"),
          -    COLUMN((short)2, "column"),
          -    VALUE((short)3, "value"),
          -    WRITE_TO_WAL((short)4, "writeToWAL");
          +    IS_DELETE((short) 1, "isDelete"), COLUMN((short) 2, "column"), VALUE((short) 3, "value"),
          +    WRITE_TO_WAL((short) 4, "writeToWAL");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -47,7 +73,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // IS_DELETE
                     return IS_DELETE;
                   case 2: // COLUMN
          @@ -62,12 +88,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -102,14 +128,23 @@ public java.lang.String getFieldName() {
             private byte __isset_bitfield = 0;
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.IS_DELETE, new org.apache.thrift.meta_data.FieldMetaData("isDelete", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.IS_DELETE, new org.apache.thrift.meta_data.FieldMetaData("isDelete",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.WRITE_TO_WAL, new org.apache.thrift.meta_data.FieldMetaData("writeToWAL", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    tmpMap.put(_Fields.COLUMN,
          +      new org.apache.thrift.meta_data.FieldMetaData("column",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.VALUE,
          +      new org.apache.thrift.meta_data.FieldMetaData("value",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.WRITE_TO_WAL, new org.apache.thrift.meta_data.FieldMetaData("writeToWAL",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Mutation.class, metaDataMap);
          @@ -122,12 +157,8 @@ public Mutation() {
           
             }
           
          -  public Mutation(
          -    boolean isDelete,
          -    java.nio.ByteBuffer column,
          -    java.nio.ByteBuffer value,
          -    boolean writeToWAL)
          -  {
          +  public Mutation(boolean isDelete, java.nio.ByteBuffer column, java.nio.ByteBuffer value,
          +      boolean writeToWAL) {
               this();
               this.isDelete = isDelete;
               setIsDeleteIsSet(true);
          @@ -177,7 +208,8 @@ public Mutation setIsDelete(boolean isDelete) {
             }
           
             public void unsetIsDelete() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __ISDELETE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __ISDELETE_ISSET_ID);
             }
           
             /** Returns true if field isDelete is set (has been assigned a value) and false otherwise */
          @@ -186,7 +218,8 @@ public boolean isSetIsDelete() {
             }
           
             public void setIsDeleteIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ISDELETE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ISDELETE_ISSET_ID, value);
             }
           
             public byte[] getColumn() {
          @@ -199,7 +232,8 @@ public java.nio.ByteBuffer bufferForColumn() {
             }
           
             public Mutation setColumn(byte[] column) {
          -    this.column = column == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(column.clone());
          +    this.column =
          +        column == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(column.clone());
               return this;
             }
           
          @@ -233,7 +267,8 @@ public java.nio.ByteBuffer bufferForValue() {
             }
           
             public Mutation setValue(byte[] value) {
          -    this.value = value == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(value.clone());
          +    this.value =
          +        value == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(value.clone());
               return this;
             }
           
          @@ -268,7 +303,8 @@ public Mutation setWriteToWAL(boolean writeToWAL) {
             }
           
             public void unsetWriteToWAL() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __WRITETOWAL_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __WRITETOWAL_ISSET_ID);
             }
           
             /** Returns true if field writeToWAL is set (has been assigned a value) and false otherwise */
          @@ -277,50 +313,52 @@ public boolean isSetWriteToWAL() {
             }
           
             public void setWriteToWALIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __WRITETOWAL_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __WRITETOWAL_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case IS_DELETE:
          -      if (value == null) {
          -        unsetIsDelete();
          -      } else {
          -        setIsDelete((java.lang.Boolean)value);
          -      }
          -      break;
          -
          -    case COLUMN:
          -      if (value == null) {
          -        unsetColumn();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setColumn((byte[])value);
          +      case IS_DELETE:
          +        if (value == null) {
          +          unsetIsDelete();
                   } else {
          -          setColumn((java.nio.ByteBuffer)value);
          +          setIsDelete((java.lang.Boolean) value);
                   }
          -      }
          -      break;
          -
          -    case VALUE:
          -      if (value == null) {
          -        unsetValue();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setValue((byte[])value);
          +        break;
          +
          +      case COLUMN:
          +        if (value == null) {
          +          unsetColumn();
                   } else {
          -          setValue((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setColumn((byte[]) value);
          +          } else {
          +            setColumn((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case WRITE_TO_WAL:
          -      if (value == null) {
          -        unsetWriteToWAL();
          -      } else {
          -        setWriteToWAL((java.lang.Boolean)value);
          -      }
          -      break;
          +      case VALUE:
          +        if (value == null) {
          +          unsetValue();
          +        } else {
          +          if (value instanceof byte[]) {
          +            setValue((byte[]) value);
          +          } else {
          +            setValue((java.nio.ByteBuffer) value);
          +          }
          +        }
          +        break;
          +
          +      case WRITE_TO_WAL:
          +        if (value == null) {
          +          unsetWriteToWAL();
          +        } else {
          +          setWriteToWAL((java.lang.Boolean) value);
          +        }
          +        break;
           
               }
             }
          @@ -328,88 +366,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case IS_DELETE:
          -      return isIsDelete();
          +      case IS_DELETE:
          +        return isIsDelete();
           
          -    case COLUMN:
          -      return getColumn();
          +      case COLUMN:
          +        return getColumn();
           
          -    case VALUE:
          -      return getValue();
          +      case VALUE:
          +        return getValue();
           
          -    case WRITE_TO_WAL:
          -      return isWriteToWAL();
          +      case WRITE_TO_WAL:
          +        return isWriteToWAL();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case IS_DELETE:
          -      return isSetIsDelete();
          -    case COLUMN:
          -      return isSetColumn();
          -    case VALUE:
          -      return isSetValue();
          -    case WRITE_TO_WAL:
          -      return isSetWriteToWAL();
          +      case IS_DELETE:
          +        return isSetIsDelete();
          +      case COLUMN:
          +        return isSetColumn();
          +      case VALUE:
          +        return isSetValue();
          +      case WRITE_TO_WAL:
          +        return isSetWriteToWAL();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof Mutation)
          -      return this.equals((Mutation)that);
          +    if (that instanceof Mutation) return this.equals((Mutation) that);
               return false;
             }
           
             public boolean equals(Mutation that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_isDelete = true;
               boolean that_present_isDelete = true;
               if (this_present_isDelete || that_present_isDelete) {
          -      if (!(this_present_isDelete && that_present_isDelete))
          -        return false;
          -      if (this.isDelete != that.isDelete)
          -        return false;
          +      if (!(this_present_isDelete && that_present_isDelete)) return false;
          +      if (this.isDelete != that.isDelete) return false;
               }
           
               boolean this_present_column = true && this.isSetColumn();
               boolean that_present_column = true && that.isSetColumn();
               if (this_present_column || that_present_column) {
          -      if (!(this_present_column && that_present_column))
          -        return false;
          -      if (!this.column.equals(that.column))
          -        return false;
          +      if (!(this_present_column && that_present_column)) return false;
          +      if (!this.column.equals(that.column)) return false;
               }
           
               boolean this_present_value = true && this.isSetValue();
               boolean that_present_value = true && that.isSetValue();
               if (this_present_value || that_present_value) {
          -      if (!(this_present_value && that_present_value))
          -        return false;
          -      if (!this.value.equals(that.value))
          -        return false;
          +      if (!(this_present_value && that_present_value)) return false;
          +      if (!this.value.equals(that.value)) return false;
               }
           
               boolean this_present_writeToWAL = true;
               boolean that_present_writeToWAL = true;
               if (this_present_writeToWAL || that_present_writeToWAL) {
          -      if (!(this_present_writeToWAL && that_present_writeToWAL))
          -        return false;
          -      if (this.writeToWAL != that.writeToWAL)
          -        return false;
          +      if (!(this_present_writeToWAL && that_present_writeToWAL)) return false;
          +      if (this.writeToWAL != that.writeToWAL) return false;
               }
           
               return true;
          @@ -422,12 +452,10 @@ public int hashCode() {
               hashCode = hashCode * 8191 + ((isDelete) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -    if (isSetColumn())
          -      hashCode = hashCode * 8191 + column.hashCode();
          +    if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetValue()) ? 131071 : 524287);
          -    if (isSetValue())
          -      hashCode = hashCode * 8191 + value.hashCode();
          +    if (isSetValue()) hashCode = hashCode * 8191 + value.hashCode();
           
               hashCode = hashCode * 8191 + ((writeToWAL) ? 131071 : 524287);
           
          @@ -494,7 +522,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -537,37 +566,43 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class MutationStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class MutationStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public MutationStandardScheme getScheme() {
                 return new MutationStandardScheme();
               }
             }
           
          -  private static class MutationStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class MutationStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, Mutation struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, Mutation struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -575,7 +610,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Mutation struct) th
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.isDelete = iprot.readBool();
                         struct.setIsDeleteIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -583,7 +618,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Mutation struct) th
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.column = iprot.readBinary();
                         struct.setColumnIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -591,7 +626,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Mutation struct) th
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.value = iprot.readBinary();
                         struct.setValueIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -599,7 +634,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Mutation struct) th
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.writeToWAL = iprot.readBool();
                         struct.setWriteToWALIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -614,7 +649,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Mutation struct) th
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, Mutation struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, Mutation struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -640,7 +676,8 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Mutation struct) t
           
             }
           
          -  private static class MutationTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class MutationTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public MutationTupleScheme getScheme() {
                 return new MutationTupleScheme();
               }
          @@ -649,8 +686,10 @@ public MutationTupleScheme getScheme() {
             private static class MutationTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, Mutation struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, Mutation struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetIsDelete()) {
                   optionals.set(0);
          @@ -680,8 +719,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Mutation struct) th
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, Mutation struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, Mutation struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(4);
                 if (incoming.get(0)) {
                   struct.isDelete = iprot.readBool();
          @@ -702,8 +743,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Mutation struct) thr
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAccessControlEntity.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAccessControlEntity.java
          index 24fcb0586a24..e1e7e70d268c 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAccessControlEntity.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAccessControlEntity.java
          @@ -1,31 +1,57 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * TAccessControlEntity for permission control
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TAccessControlEntity implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAccessControlEntity");
          -
          -  private static final org.apache.thrift.protocol.TField USERNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("username", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField SCOPE_FIELD_DESC = new org.apache.thrift.protocol.TField("scope", org.apache.thrift.protocol.TType.I32, (short)2);
          -  private static final org.apache.thrift.protocol.TField ACTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("actions", org.apache.thrift.protocol.TType.STRING, (short)4);
          -  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)5);
          -  private static final org.apache.thrift.protocol.TField NS_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("nsName", org.apache.thrift.protocol.TType.STRING, (short)6);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TAccessControlEntityStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TAccessControlEntityTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TAccessControlEntity
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TAccessControlEntity");
          +
          +  private static final org.apache.thrift.protocol.TField USERNAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("username", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField SCOPE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("scope", org.apache.thrift.protocol.TType.I32,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField ACTIONS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("actions", org.apache.thrift.protocol.TType.STRING,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 5);
          +  private static final org.apache.thrift.protocol.TField NS_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("nsName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 6);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TAccessControlEntityStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TAccessControlEntityTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.lang.String username; // required
             /**
          -   * 
              * @see TPermissionScope
              */
             public @org.apache.thrift.annotation.Nullable TPermissionScope scope; // required
          @@ -33,19 +59,20 @@ public class TAccessControlEntity implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -58,7 +85,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // USERNAME
                     return USERNAME;
                   case 2: // SCOPE
          @@ -75,12 +102,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -110,32 +137,46 @@ public java.lang.String getFieldName() {
             }
           
             // isset id assignments
          -  private static final _Fields optionals[] = {_Fields.TABLE_NAME,_Fields.NS_NAME};
          +  private static final _Fields optionals[] = { _Fields.TABLE_NAME, _Fields.NS_NAME };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.USERNAME, new org.apache.thrift.meta_data.FieldMetaData("username", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.SCOPE, new org.apache.thrift.meta_data.FieldMetaData("scope", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TPermissionScope.class)));
          -    tmpMap.put(_Fields.ACTIONS, new org.apache.thrift.meta_data.FieldMetaData("actions", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Bytes")));
          -    tmpMap.put(_Fields.NS_NAME, new org.apache.thrift.meta_data.FieldMetaData("nsName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.USERNAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("username",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.SCOPE,
          +      new org.apache.thrift.meta_data.FieldMetaData("scope",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TPermissionScope.class)));
          +    tmpMap.put(_Fields.ACTIONS,
          +      new org.apache.thrift.meta_data.FieldMetaData("actions",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.TABLE_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Bytes")));
          +    tmpMap.put(_Fields.NS_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("nsName",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TAccessControlEntity.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TAccessControlEntity.class,
          +      metaDataMap);
             }
           
             public TAccessControlEntity() {
             }
           
          -  public TAccessControlEntity(
          -    java.lang.String username,
          -    TPermissionScope scope,
          -    java.lang.String actions)
          -  {
          +  public TAccessControlEntity(java.lang.String username, TPermissionScope scope,
          +      java.lang.String actions) {
               this();
               this.username = username;
               this.scope = scope;
          @@ -181,7 +222,8 @@ public java.lang.String getUsername() {
               return this.username;
             }
           
          -  public TAccessControlEntity setUsername(@org.apache.thrift.annotation.Nullable java.lang.String username) {
          +  public TAccessControlEntity
          +      setUsername(@org.apache.thrift.annotation.Nullable java.lang.String username) {
               this.username = username;
               return this;
             }
          @@ -202,7 +244,6 @@ public void setUsernameIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TPermissionScope
              */
             @org.apache.thrift.annotation.Nullable
          @@ -211,10 +252,10 @@ public TPermissionScope getScope() {
             }
           
             /**
          -   * 
              * @see TPermissionScope
              */
          -  public TAccessControlEntity setScope(@org.apache.thrift.annotation.Nullable TPermissionScope scope) {
          +  public TAccessControlEntity
          +      setScope(@org.apache.thrift.annotation.Nullable TPermissionScope scope) {
               this.scope = scope;
               return this;
             }
          @@ -239,7 +280,8 @@ public java.lang.String getActions() {
               return this.actions;
             }
           
          -  public TAccessControlEntity setActions(@org.apache.thrift.annotation.Nullable java.lang.String actions) {
          +  public TAccessControlEntity
          +      setActions(@org.apache.thrift.annotation.Nullable java.lang.String actions) {
               this.actions = actions;
               return this;
             }
          @@ -269,11 +311,13 @@ public java.nio.ByteBuffer bufferForTableName() {
             }
           
             public TAccessControlEntity setTableName(byte[] tableName) {
          -    this.tableName = tableName == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(tableName.clone());
          +    this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(tableName.clone());
               return this;
             }
           
          -  public TAccessControlEntity setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +  public TAccessControlEntity
          +      setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
               this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
               return this;
             }
          @@ -298,7 +342,8 @@ public java.lang.String getNsName() {
               return this.nsName;
             }
           
          -  public TAccessControlEntity setNsName(@org.apache.thrift.annotation.Nullable java.lang.String nsName) {
          +  public TAccessControlEntity
          +      setNsName(@org.apache.thrift.annotation.Nullable java.lang.String nsName) {
               this.nsName = nsName;
               return this;
             }
          @@ -318,51 +363,52 @@ public void setNsNameIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case USERNAME:
          -      if (value == null) {
          -        unsetUsername();
          -      } else {
          -        setUsername((java.lang.String)value);
          -      }
          -      break;
          +      case USERNAME:
          +        if (value == null) {
          +          unsetUsername();
          +        } else {
          +          setUsername((java.lang.String) value);
          +        }
          +        break;
           
          -    case SCOPE:
          -      if (value == null) {
          -        unsetScope();
          -      } else {
          -        setScope((TPermissionScope)value);
          -      }
          -      break;
          +      case SCOPE:
          +        if (value == null) {
          +          unsetScope();
          +        } else {
          +          setScope((TPermissionScope) value);
          +        }
          +        break;
           
          -    case ACTIONS:
          -      if (value == null) {
          -        unsetActions();
          -      } else {
          -        setActions((java.lang.String)value);
          -      }
          -      break;
          +      case ACTIONS:
          +        if (value == null) {
          +          unsetActions();
          +        } else {
          +          setActions((java.lang.String) value);
          +        }
          +        break;
           
          -    case TABLE_NAME:
          -      if (value == null) {
          -        unsetTableName();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setTableName((byte[])value);
          +      case TABLE_NAME:
          +        if (value == null) {
          +          unsetTableName();
                   } else {
          -          setTableName((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setTableName((byte[]) value);
          +          } else {
          +            setTableName((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case NS_NAME:
          -      if (value == null) {
          -        unsetNsName();
          -      } else {
          -        setNsName((java.lang.String)value);
          -      }
          -      break;
          +      case NS_NAME:
          +        if (value == null) {
          +          unsetNsName();
          +        } else {
          +          setNsName((java.lang.String) value);
          +        }
          +        break;
           
               }
             }
          @@ -370,102 +416,92 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case USERNAME:
          -      return getUsername();
          +      case USERNAME:
          +        return getUsername();
           
          -    case SCOPE:
          -      return getScope();
          +      case SCOPE:
          +        return getScope();
           
          -    case ACTIONS:
          -      return getActions();
          +      case ACTIONS:
          +        return getActions();
           
          -    case TABLE_NAME:
          -      return getTableName();
          +      case TABLE_NAME:
          +        return getTableName();
           
          -    case NS_NAME:
          -      return getNsName();
          +      case NS_NAME:
          +        return getNsName();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case USERNAME:
          -      return isSetUsername();
          -    case SCOPE:
          -      return isSetScope();
          -    case ACTIONS:
          -      return isSetActions();
          -    case TABLE_NAME:
          -      return isSetTableName();
          -    case NS_NAME:
          -      return isSetNsName();
          +      case USERNAME:
          +        return isSetUsername();
          +      case SCOPE:
          +        return isSetScope();
          +      case ACTIONS:
          +        return isSetActions();
          +      case TABLE_NAME:
          +        return isSetTableName();
          +      case NS_NAME:
          +        return isSetNsName();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TAccessControlEntity)
          -      return this.equals((TAccessControlEntity)that);
          +    if (that instanceof TAccessControlEntity) return this.equals((TAccessControlEntity) that);
               return false;
             }
           
             public boolean equals(TAccessControlEntity that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_username = true && this.isSetUsername();
               boolean that_present_username = true && that.isSetUsername();
               if (this_present_username || that_present_username) {
          -      if (!(this_present_username && that_present_username))
          -        return false;
          -      if (!this.username.equals(that.username))
          -        return false;
          +      if (!(this_present_username && that_present_username)) return false;
          +      if (!this.username.equals(that.username)) return false;
               }
           
               boolean this_present_scope = true && this.isSetScope();
               boolean that_present_scope = true && that.isSetScope();
               if (this_present_scope || that_present_scope) {
          -      if (!(this_present_scope && that_present_scope))
          -        return false;
          -      if (!this.scope.equals(that.scope))
          -        return false;
          +      if (!(this_present_scope && that_present_scope)) return false;
          +      if (!this.scope.equals(that.scope)) return false;
               }
           
               boolean this_present_actions = true && this.isSetActions();
               boolean that_present_actions = true && that.isSetActions();
               if (this_present_actions || that_present_actions) {
          -      if (!(this_present_actions && that_present_actions))
          -        return false;
          -      if (!this.actions.equals(that.actions))
          -        return false;
          +      if (!(this_present_actions && that_present_actions)) return false;
          +      if (!this.actions.equals(that.actions)) return false;
               }
           
               boolean this_present_tableName = true && this.isSetTableName();
               boolean that_present_tableName = true && that.isSetTableName();
               if (this_present_tableName || that_present_tableName) {
          -      if (!(this_present_tableName && that_present_tableName))
          -        return false;
          -      if (!this.tableName.equals(that.tableName))
          -        return false;
          +      if (!(this_present_tableName && that_present_tableName)) return false;
          +      if (!this.tableName.equals(that.tableName)) return false;
               }
           
               boolean this_present_nsName = true && this.isSetNsName();
               boolean that_present_nsName = true && that.isSetNsName();
               if (this_present_nsName || that_present_nsName) {
          -      if (!(this_present_nsName && that_present_nsName))
          -        return false;
          -      if (!this.nsName.equals(that.nsName))
          -        return false;
          +      if (!(this_present_nsName && that_present_nsName)) return false;
          +      if (!this.nsName.equals(that.nsName)) return false;
               }
           
               return true;
          @@ -476,24 +512,19 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetUsername()) ? 131071 : 524287);
          -    if (isSetUsername())
          -      hashCode = hashCode * 8191 + username.hashCode();
          +    if (isSetUsername()) hashCode = hashCode * 8191 + username.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetScope()) ? 131071 : 524287);
          -    if (isSetScope())
          -      hashCode = hashCode * 8191 + scope.getValue();
          +    if (isSetScope()) hashCode = hashCode * 8191 + scope.getValue();
           
               hashCode = hashCode * 8191 + ((isSetActions()) ? 131071 : 524287);
          -    if (isSetActions())
          -      hashCode = hashCode * 8191 + actions.hashCode();
          +    if (isSetActions()) hashCode = hashCode * 8191 + actions.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -    if (isSetTableName())
          -      hashCode = hashCode * 8191 + tableName.hashCode();
          +    if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetNsName()) ? 131071 : 524287);
          -    if (isSetNsName())
          -      hashCode = hashCode * 8191 + nsName.hashCode();
          +    if (isSetNsName()) hashCode = hashCode * 8191 + nsName.hashCode();
           
               return hashCode;
             }
          @@ -568,7 +599,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -627,48 +659,56 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (username == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'username' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'username' was not present! Struct: " + toString());
               }
               if (scope == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'scope' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'scope' was not present! Struct: " + toString());
               }
               if (actions == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'actions' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'actions' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TAccessControlEntityStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TAccessControlEntityStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TAccessControlEntityStandardScheme getScheme() {
                 return new TAccessControlEntityStandardScheme();
               }
             }
           
          -  private static class TAccessControlEntityStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TAccessControlEntityStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntity struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntity struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -676,15 +716,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntit
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.username = iprot.readString();
                         struct.setUsernameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 2: // SCOPE
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.scope = org.apache.hadoop.hbase.thrift.generated.TPermissionScope.findByValue(iprot.readI32());
          +              struct.scope = org.apache.hadoop.hbase.thrift.generated.TPermissionScope
          +                  .findByValue(iprot.readI32());
                         struct.setScopeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -692,7 +733,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntit
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.actions = iprot.readString();
                         struct.setActionsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -700,7 +741,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntit
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.tableName = iprot.readBinary();
                         struct.setTableNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -708,7 +749,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntit
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.nsName = iprot.readString();
                         struct.setNsNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -723,7 +764,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntit
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TAccessControlEntity struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TAccessControlEntity struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -762,17 +804,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TAccessControlEnti
           
             }
           
          -  private static class TAccessControlEntityTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TAccessControlEntityTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TAccessControlEntityTupleScheme getScheme() {
                 return new TAccessControlEntityTupleScheme();
               }
             }
           
          -  private static class TAccessControlEntityTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TAccessControlEntityTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntity struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntity struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeString(struct.username);
                 oprot.writeI32(struct.scope.getValue());
                 oprot.writeString(struct.actions);
          @@ -793,11 +839,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntit
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntity struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntity struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.username = iprot.readString();
                 struct.setUsernameIsSet(true);
          -      struct.scope = org.apache.hadoop.hbase.thrift.generated.TPermissionScope.findByValue(iprot.readI32());
          +      struct.scope =
          +          org.apache.hadoop.hbase.thrift.generated.TPermissionScope.findByValue(iprot.readI32());
                 struct.setScopeIsSet(true);
                 struct.actions = iprot.readString();
                 struct.setActionsIsSet(true);
          @@ -813,8 +862,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntity
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java
          index 3d0333ea30f6..5bf4585ba99f 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java
          @@ -1,40 +1,66 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * An Append object is used to specify the parameters for performing the append operation.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TAppend implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAppend");
          -
          -  private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3);
          -  private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)4);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TAppendStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TAppendTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TAppend implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TAppend");
          +
          +  private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST,
          +          (short) 4);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TAppendStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TAppendTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table; // required
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
             public @org.apache.thrift.annotation.Nullable java.util.List columns; // required
             public @org.apache.thrift.annotation.Nullable java.util.List values; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    TABLE((short)1, "table"),
          -    ROW((short)2, "row"),
          -    COLUMNS((short)3, "columns"),
          -    VALUES((short)4, "values");
          +    TABLE((short) 1, "table"), ROW((short) 2, "row"), COLUMNS((short) 3, "columns"),
          +    VALUES((short) 4, "values");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -47,7 +73,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // TABLE
                     return TABLE;
                   case 2: // ROW
          @@ -62,12 +88,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -99,17 +125,30 @@ public java.lang.String getFieldName() {
             // isset id assignments
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , "Text"))));
          -    tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , "Text"))));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.TABLE,
          +      new org.apache.thrift.meta_data.FieldMetaData("table",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("row",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.COLUMNS,
          +      new org.apache.thrift.meta_data.FieldMetaData("columns",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, "Text"))));
          +    tmpMap.put(_Fields.VALUES,
          +      new org.apache.thrift.meta_data.FieldMetaData("values",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, "Text"))));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TAppend.class, metaDataMap);
             }
          @@ -117,12 +156,8 @@ public java.lang.String getFieldName() {
             public TAppend() {
             }
           
          -  public TAppend(
          -    java.nio.ByteBuffer table,
          -    java.nio.ByteBuffer row,
          -    java.util.List columns,
          -    java.util.List values)
          -  {
          +  public TAppend(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +      java.util.List columns, java.util.List values) {
               this();
               this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
               this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -141,14 +176,16 @@ public TAppend(TAppend other) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
               }
               if (other.isSetColumns()) {
          -      java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +      java.util.List __this__columns =
          +          new java.util.ArrayList(other.columns.size());
                 for (java.nio.ByteBuffer other_element : other.columns) {
                   __this__columns.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                 }
                 this.columns = __this__columns;
               }
               if (other.isSetValues()) {
          -      java.util.List __this__values = new java.util.ArrayList(other.values.size());
          +      java.util.List __this__values =
          +          new java.util.ArrayList(other.values.size());
                 for (java.nio.ByteBuffer other_element : other.values) {
                   __this__values.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                 }
          @@ -178,7 +215,8 @@ public java.nio.ByteBuffer bufferForTable() {
             }
           
             public TAppend setTable(byte[] table) {
          -    this.table = table == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(table.clone());
          +    this.table =
          +        table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
               return this;
             }
           
          @@ -212,7 +250,7 @@ public java.nio.ByteBuffer bufferForRow() {
             }
           
             public TAppend setRow(byte[] row) {
          -    this.row = row == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(row.clone());
          +    this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
               return this;
             }
           
          @@ -257,7 +295,8 @@ public java.util.List getColumns() {
               return this.columns;
             }
           
          -  public TAppend setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +  public TAppend setColumns(
          +      @org.apache.thrift.annotation.Nullable java.util.List columns) {
               this.columns = columns;
               return this;
             }
          @@ -298,7 +337,8 @@ public java.util.List getValues() {
               return this.values;
             }
           
          -  public TAppend setValues(@org.apache.thrift.annotation.Nullable java.util.List values) {
          +  public TAppend
          +      setValues(@org.apache.thrift.annotation.Nullable java.util.List values) {
               this.values = values;
               return this;
             }
          @@ -318,47 +358,48 @@ public void setValuesIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case TABLE:
          -      if (value == null) {
          -        unsetTable();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setTable((byte[])value);
          +      case TABLE:
          +        if (value == null) {
          +          unsetTable();
                   } else {
          -          setTable((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setTable((byte[]) value);
          +          } else {
          +            setTable((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          -
          -    case ROW:
          -      if (value == null) {
          -        unsetRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setRow((byte[])value);
          +        break;
          +
          +      case ROW:
          +        if (value == null) {
          +          unsetRow();
                   } else {
          -          setRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setRow((byte[]) value);
          +          } else {
          +            setRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case COLUMNS:
          -      if (value == null) {
          -        unsetColumns();
          -      } else {
          -        setColumns((java.util.List)value);
          -      }
          -      break;
          +      case COLUMNS:
          +        if (value == null) {
          +          unsetColumns();
          +        } else {
          +          setColumns((java.util.List) value);
          +        }
          +        break;
           
          -    case VALUES:
          -      if (value == null) {
          -        unsetValues();
          -      } else {
          -        setValues((java.util.List)value);
          -      }
          -      break;
          +      case VALUES:
          +        if (value == null) {
          +          unsetValues();
          +        } else {
          +          setValues((java.util.List) value);
          +        }
          +        break;
           
               }
             }
          @@ -366,88 +407,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case TABLE:
          -      return getTable();
          +      case TABLE:
          +        return getTable();
           
          -    case ROW:
          -      return getRow();
          +      case ROW:
          +        return getRow();
           
          -    case COLUMNS:
          -      return getColumns();
          +      case COLUMNS:
          +        return getColumns();
           
          -    case VALUES:
          -      return getValues();
          +      case VALUES:
          +        return getValues();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case TABLE:
          -      return isSetTable();
          -    case ROW:
          -      return isSetRow();
          -    case COLUMNS:
          -      return isSetColumns();
          -    case VALUES:
          -      return isSetValues();
          +      case TABLE:
          +        return isSetTable();
          +      case ROW:
          +        return isSetRow();
          +      case COLUMNS:
          +        return isSetColumns();
          +      case VALUES:
          +        return isSetValues();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TAppend)
          -      return this.equals((TAppend)that);
          +    if (that instanceof TAppend) return this.equals((TAppend) that);
               return false;
             }
           
             public boolean equals(TAppend that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_table = true && this.isSetTable();
               boolean that_present_table = true && that.isSetTable();
               if (this_present_table || that_present_table) {
          -      if (!(this_present_table && that_present_table))
          -        return false;
          -      if (!this.table.equals(that.table))
          -        return false;
          +      if (!(this_present_table && that_present_table)) return false;
          +      if (!this.table.equals(that.table)) return false;
               }
           
               boolean this_present_row = true && this.isSetRow();
               boolean that_present_row = true && that.isSetRow();
               if (this_present_row || that_present_row) {
          -      if (!(this_present_row && that_present_row))
          -        return false;
          -      if (!this.row.equals(that.row))
          -        return false;
          +      if (!(this_present_row && that_present_row)) return false;
          +      if (!this.row.equals(that.row)) return false;
               }
           
               boolean this_present_columns = true && this.isSetColumns();
               boolean that_present_columns = true && that.isSetColumns();
               if (this_present_columns || that_present_columns) {
          -      if (!(this_present_columns && that_present_columns))
          -        return false;
          -      if (!this.columns.equals(that.columns))
          -        return false;
          +      if (!(this_present_columns && that_present_columns)) return false;
          +      if (!this.columns.equals(that.columns)) return false;
               }
           
               boolean this_present_values = true && this.isSetValues();
               boolean that_present_values = true && that.isSetValues();
               if (this_present_values || that_present_values) {
          -      if (!(this_present_values && that_present_values))
          -        return false;
          -      if (!this.values.equals(that.values))
          -        return false;
          +      if (!(this_present_values && that_present_values)) return false;
          +      if (!this.values.equals(that.values)) return false;
               }
           
               return true;
          @@ -458,20 +491,16 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -    if (isSetTable())
          -      hashCode = hashCode * 8191 + table.hashCode();
          +    if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -    if (isSetRow())
          -      hashCode = hashCode * 8191 + row.hashCode();
          +    if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -    if (isSetColumns())
          -      hashCode = hashCode * 8191 + columns.hashCode();
          +    if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetValues()) ? 131071 : 524287);
          -    if (isSetValues())
          -      hashCode = hashCode * 8191 + values.hashCode();
          +    if (isSetValues()) hashCode = hashCode * 8191 + values.hashCode();
           
               return hashCode;
             }
          @@ -536,7 +565,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -587,35 +617,40 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TAppendStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TAppendStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TAppendStandardScheme getScheme() {
                 return new TAppendStandardScheme();
               }
             }
           
          -  private static class TAppendStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TAppendStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -623,7 +658,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.table = iprot.readBinary();
                         struct.setTableIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -631,7 +666,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.row = iprot.readBinary();
                         struct.setRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -640,16 +675,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                         {
                           org.apache.thrift.protocol.TList _list34 = iprot.readListBegin();
                           struct.columns = new java.util.ArrayList(_list34.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem35;
          -                for (int _i36 = 0; _i36 < _list34.size; ++_i36)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _elem35;
          +                for (int _i36 = 0; _i36 < _list34.size; ++_i36) {
                             _elem35 = iprot.readBinary();
                             struct.columns.add(_elem35);
                           }
                           iprot.readListEnd();
                         }
                         struct.setColumnsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -658,16 +693,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                         {
                           org.apache.thrift.protocol.TList _list37 = iprot.readListBegin();
                           struct.values = new java.util.ArrayList(_list37.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem38;
          -                for (int _i39 = 0; _i39 < _list37.size; ++_i39)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _elem38;
          +                for (int _i39 = 0; _i39 < _list37.size; ++_i39) {
                             _elem38 = iprot.readBinary();
                             struct.values.add(_elem38);
                           }
                           iprot.readListEnd();
                         }
                         struct.setValuesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -682,7 +717,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TAppend struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TAppend struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -699,9 +735,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TAppend struct) th
                 if (struct.columns != null) {
                   oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                   {
          -          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          -          for (java.nio.ByteBuffer _iter40 : struct.columns)
          -          {
          +          oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +              org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          +          for (java.nio.ByteBuffer _iter40 : struct.columns) {
                       oprot.writeBinary(_iter40);
                     }
                     oprot.writeListEnd();
          @@ -711,9 +747,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TAppend struct) th
                 if (struct.values != null) {
                   oprot.writeFieldBegin(VALUES_FIELD_DESC);
                   {
          -          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size()));
          -          for (java.nio.ByteBuffer _iter41 : struct.values)
          -          {
          +          oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +              org.apache.thrift.protocol.TType.STRING, struct.values.size()));
          +          for (java.nio.ByteBuffer _iter41 : struct.values) {
                       oprot.writeBinary(_iter41);
                     }
                     oprot.writeListEnd();
          @@ -735,8 +771,10 @@ public TAppendTupleScheme getScheme() {
             private static class TAppendTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TAppend struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TAppend struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetTable()) {
                   optionals.set(0);
          @@ -760,8 +798,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TAppend struct) thr
                 if (struct.isSetColumns()) {
                   {
                     oprot.writeI32(struct.columns.size());
          -          for (java.nio.ByteBuffer _iter42 : struct.columns)
          -          {
          +          for (java.nio.ByteBuffer _iter42 : struct.columns) {
                       oprot.writeBinary(_iter42);
                     }
                   }
          @@ -769,8 +806,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TAppend struct) thr
                 if (struct.isSetValues()) {
                   {
                     oprot.writeI32(struct.values.size());
          -          for (java.nio.ByteBuffer _iter43 : struct.values)
          -          {
          +          for (java.nio.ByteBuffer _iter43 : struct.values) {
                       oprot.writeBinary(_iter43);
                     }
                   }
          @@ -778,8 +814,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TAppend struct) thr
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TAppend struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TAppend struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(4);
                 if (incoming.get(0)) {
                   struct.table = iprot.readBinary();
          @@ -791,11 +829,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TAppend struct) thro
                 }
                 if (incoming.get(2)) {
                   {
          -          org.apache.thrift.protocol.TList _list44 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +          org.apache.thrift.protocol.TList _list44 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                     struct.columns = new java.util.ArrayList(_list44.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem45;
          -          for (int _i46 = 0; _i46 < _list44.size; ++_i46)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _elem45;
          +          for (int _i46 = 0; _i46 < _list44.size; ++_i46) {
                       _elem45 = iprot.readBinary();
                       struct.columns.add(_elem45);
                     }
          @@ -804,11 +843,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TAppend struct) thro
                 }
                 if (incoming.get(3)) {
                   {
          -          org.apache.thrift.protocol.TList _list47 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +          org.apache.thrift.protocol.TList _list47 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                     struct.values = new java.util.ArrayList(_list47.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem48;
          -          for (int _i49 = 0; _i49 < _list47.size; ++_i49)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _elem48;
          +          for (int _i49 = 0; _i49 < _list47.size; ++_i49) {
                       _elem48 = iprot.readBinary();
                       struct.values.add(_elem48);
                     }
          @@ -818,8 +858,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TAppend struct) thro
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java
          index fe6ccf21d416..0705c2404ae0 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java
          @@ -1,37 +1,60 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * TCell - Used to transport a cell value (byte[]) and the timestamp it was
          - * stored with together as a result for get and getRow methods. This promotes
          - * the timestamp of a cell to a first-class value, making it easy to take
          - * note of temporal data. Cell is used all the way from HStore up to HTable.
          + * TCell - Used to transport a cell value (byte[]) and the timestamp it was stored with together as
          + * a result for get and getRow methods. This promotes the timestamp of a cell to a first-class
          + * value, making it easy to take note of temporal data. Cell is used all the way from HStore up to
          + * HTable.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TCell implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCell");
          -
          -  private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)2);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TCellStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TCellTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TCell implements org.apache.thrift.TBase, java.io.Serializable,
          +    Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TCell");
          +
          +  private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +          (short) 2);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TCellStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TCellTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value; // required
             public long timestamp; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    VALUE((short)1, "value"),
          -    TIMESTAMP((short)2, "timestamp");
          +    VALUE((short) 1, "value"), TIMESTAMP((short) 2, "timestamp");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -44,7 +67,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // VALUE
                     return VALUE;
                   case 2: // TIMESTAMP
          @@ -55,12 +78,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -94,10 +117,15 @@ public java.lang.String getFieldName() {
             private byte __isset_bitfield = 0;
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Bytes")));
          -    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.VALUE,
          +      new org.apache.thrift.meta_data.FieldMetaData("value",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Bytes")));
          +    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCell.class, metaDataMap);
          @@ -106,10 +134,7 @@ public java.lang.String getFieldName() {
             public TCell() {
             }
           
          -  public TCell(
          -    java.nio.ByteBuffer value,
          -    long timestamp)
          -  {
          +  public TCell(java.nio.ByteBuffer value, long timestamp) {
               this();
               this.value = org.apache.thrift.TBaseHelper.copyBinary(value);
               this.timestamp = timestamp;
          @@ -148,7 +173,8 @@ public java.nio.ByteBuffer bufferForValue() {
             }
           
             public TCell setValue(byte[] value) {
          -    this.value = value == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(value.clone());
          +    this.value =
          +        value == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(value.clone());
               return this;
             }
           
          @@ -183,7 +209,8 @@ public TCell setTimestamp(long timestamp) {
             }
           
             public void unsetTimestamp() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
             }
           
             /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -192,30 +219,32 @@ public boolean isSetTimestamp() {
             }
           
             public void setTimestampIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case VALUE:
          -      if (value == null) {
          -        unsetValue();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setValue((byte[])value);
          +      case VALUE:
          +        if (value == null) {
          +          unsetValue();
                   } else {
          -          setValue((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setValue((byte[]) value);
          +          } else {
          +            setValue((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case TIMESTAMP:
          -      if (value == null) {
          -        unsetTimestamp();
          -      } else {
          -        setTimestamp((java.lang.Long)value);
          -      }
          -      break;
          +      case TIMESTAMP:
          +        if (value == null) {
          +          unsetTimestamp();
          +        } else {
          +          setTimestamp((java.lang.Long) value);
          +        }
          +        break;
           
               }
             }
          @@ -223,60 +252,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case VALUE:
          -      return getValue();
          +      case VALUE:
          +        return getValue();
           
          -    case TIMESTAMP:
          -      return getTimestamp();
          +      case TIMESTAMP:
          +        return getTimestamp();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case VALUE:
          -      return isSetValue();
          -    case TIMESTAMP:
          -      return isSetTimestamp();
          +      case VALUE:
          +        return isSetValue();
          +      case TIMESTAMP:
          +        return isSetTimestamp();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TCell)
          -      return this.equals((TCell)that);
          +    if (that instanceof TCell) return this.equals((TCell) that);
               return false;
             }
           
             public boolean equals(TCell that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_value = true && this.isSetValue();
               boolean that_present_value = true && that.isSetValue();
               if (this_present_value || that_present_value) {
          -      if (!(this_present_value && that_present_value))
          -        return false;
          -      if (!this.value.equals(that.value))
          -        return false;
          +      if (!(this_present_value && that_present_value)) return false;
          +      if (!this.value.equals(that.value)) return false;
               }
           
               boolean this_present_timestamp = true;
               boolean that_present_timestamp = true;
               if (this_present_timestamp || that_present_timestamp) {
          -      if (!(this_present_timestamp && that_present_timestamp))
          -        return false;
          -      if (this.timestamp != that.timestamp)
          -        return false;
          +      if (!(this_present_timestamp && that_present_timestamp)) return false;
          +      if (this.timestamp != that.timestamp) return false;
               }
           
               return true;
          @@ -287,8 +312,7 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetValue()) ? 131071 : 524287);
          -    if (isSetValue())
          -      hashCode = hashCode * 8191 + value.hashCode();
          +    if (isSetValue()) hashCode = hashCode * 8191 + value.hashCode();
           
               hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
          @@ -335,7 +359,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -366,23 +391,28 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TCellStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TCellStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TCellStandardScheme getScheme() {
                 return new TCellStandardScheme();
               }
          @@ -390,13 +420,13 @@ public TCellStandardScheme getScheme() {
           
             private static class TCellStandardScheme extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TCell struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TCell struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -404,7 +434,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TCell struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.value = iprot.readBinary();
                         struct.setValueIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -412,7 +442,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TCell struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.timestamp = iprot.readI64();
                         struct.setTimestampIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -427,7 +457,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TCell struct) throw
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TCell struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TCell struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -454,8 +485,10 @@ public TCellTupleScheme getScheme() {
             private static class TCellTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TCell struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TCell struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetValue()) {
                   optionals.set(0);
          @@ -473,8 +506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TCell struct) throw
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TCell struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TCell struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(2);
                 if (incoming.get(0)) {
                   struct.value = iprot.readBinary();
          @@ -487,8 +522,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TCell struct) throws
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java
          index 8f486104691b..9aeb120333eb 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java
          @@ -1,34 +1,57 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * Holds column name and the cell.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumn");
          -
          -  private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("columnName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField CELL_FIELD_DESC = new org.apache.thrift.protocol.TField("cell", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TColumnStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TColumnTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TColumn implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TColumn");
          +
          +  private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columnName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField CELL_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("cell", org.apache.thrift.protocol.TType.STRUCT,
          +          (short) 2);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TColumnStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TColumnTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer columnName; // required
             public @org.apache.thrift.annotation.Nullable TCell cell; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    COLUMN_NAME((short)1, "columnName"),
          -    CELL((short)2, "cell");
          +    COLUMN_NAME((short) 1, "columnName"), CELL((short) 2, "cell");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -41,7 +64,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // COLUMN_NAME
                     return COLUMN_NAME;
                   case 2: // CELL
          @@ -52,12 +75,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -89,11 +112,18 @@ public java.lang.String getFieldName() {
             // isset id assignments
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("columnName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.CELL, new org.apache.thrift.meta_data.FieldMetaData("cell", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCell.class)));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.COLUMN_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("columnName",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.CELL,
          +      new org.apache.thrift.meta_data.FieldMetaData("cell",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TCell.class)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumn.class, metaDataMap);
             }
          @@ -101,10 +131,7 @@ public java.lang.String getFieldName() {
             public TColumn() {
             }
           
          -  public TColumn(
          -    java.nio.ByteBuffer columnName,
          -    TCell cell)
          -  {
          +  public TColumn(java.nio.ByteBuffer columnName, TCell cell) {
               this();
               this.columnName = org.apache.thrift.TBaseHelper.copyBinary(columnName);
               this.cell = cell;
          @@ -142,11 +169,13 @@ public java.nio.ByteBuffer bufferForColumnName() {
             }
           
             public TColumn setColumnName(byte[] columnName) {
          -    this.columnName = columnName == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(columnName.clone());
          +    this.columnName = columnName == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(columnName.clone());
               return this;
             }
           
          -  public TColumn setColumnName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer columnName) {
          +  public TColumn
          +      setColumnName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer columnName) {
               this.columnName = org.apache.thrift.TBaseHelper.copyBinary(columnName);
               return this;
             }
          @@ -191,27 +220,28 @@ public void setCellIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case COLUMN_NAME:
          -      if (value == null) {
          -        unsetColumnName();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setColumnName((byte[])value);
          +      case COLUMN_NAME:
          +        if (value == null) {
          +          unsetColumnName();
                   } else {
          -          setColumnName((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setColumnName((byte[]) value);
          +          } else {
          +            setColumnName((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case CELL:
          -      if (value == null) {
          -        unsetCell();
          -      } else {
          -        setCell((TCell)value);
          -      }
          -      break;
          +      case CELL:
          +        if (value == null) {
          +          unsetCell();
          +        } else {
          +          setCell((TCell) value);
          +        }
          +        break;
           
               }
             }
          @@ -219,60 +249,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case COLUMN_NAME:
          -      return getColumnName();
          +      case COLUMN_NAME:
          +        return getColumnName();
           
          -    case CELL:
          -      return getCell();
          +      case CELL:
          +        return getCell();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case COLUMN_NAME:
          -      return isSetColumnName();
          -    case CELL:
          -      return isSetCell();
          +      case COLUMN_NAME:
          +        return isSetColumnName();
          +      case CELL:
          +        return isSetCell();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TColumn)
          -      return this.equals((TColumn)that);
          +    if (that instanceof TColumn) return this.equals((TColumn) that);
               return false;
             }
           
             public boolean equals(TColumn that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_columnName = true && this.isSetColumnName();
               boolean that_present_columnName = true && that.isSetColumnName();
               if (this_present_columnName || that_present_columnName) {
          -      if (!(this_present_columnName && that_present_columnName))
          -        return false;
          -      if (!this.columnName.equals(that.columnName))
          -        return false;
          +      if (!(this_present_columnName && that_present_columnName)) return false;
          +      if (!this.columnName.equals(that.columnName)) return false;
               }
           
               boolean this_present_cell = true && this.isSetCell();
               boolean that_present_cell = true && that.isSetCell();
               if (this_present_cell || that_present_cell) {
          -      if (!(this_present_cell && that_present_cell))
          -        return false;
          -      if (!this.cell.equals(that.cell))
          -        return false;
          +      if (!(this_present_cell && that_present_cell)) return false;
          +      if (!this.cell.equals(that.cell)) return false;
               }
           
               return true;
          @@ -283,12 +309,10 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetColumnName()) ? 131071 : 524287);
          -    if (isSetColumnName())
          -      hashCode = hashCode * 8191 + columnName.hashCode();
          +    if (isSetColumnName()) hashCode = hashCode * 8191 + columnName.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetCell()) ? 131071 : 524287);
          -    if (isSetCell())
          -      hashCode = hashCode * 8191 + cell.hashCode();
          +    if (isSetCell()) hashCode = hashCode * 8191 + cell.hashCode();
           
               return hashCode;
             }
          @@ -333,7 +357,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -371,35 +396,40 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TColumnStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TColumnStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TColumnStandardScheme getScheme() {
                 return new TColumnStandardScheme();
               }
             }
           
          -  private static class TColumnStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TColumnStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TColumn struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TColumn struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -407,7 +437,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumn struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.columnName = iprot.readBinary();
                         struct.setColumnNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -416,7 +446,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumn struct) thr
                         struct.cell = new TCell();
                         struct.cell.read(iprot);
                         struct.setCellIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -431,7 +461,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumn struct) thr
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TColumn struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TColumn struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -460,8 +491,10 @@ public TColumnTupleScheme getScheme() {
             private static class TColumnTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TColumn struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TColumn struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetColumnName()) {
                   optionals.set(0);
          @@ -479,8 +512,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TColumn struct) thr
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TColumn struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TColumn struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(2);
                 if (incoming.get(0)) {
                   struct.columnName = iprot.readBinary();
          @@ -494,8 +529,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TColumn struct) thro
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java
          index ee1fdd1d0573..7d6352454ab4 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java
          @@ -1,41 +1,66 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * For increments that are not incrementColumnValue
          - * equivalents.
          + * For increments that are not incrementColumnValue equivalents.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIncrement");
          -
          -  private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -  private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)3);
          -  private static final org.apache.thrift.protocol.TField AMMOUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("ammount", org.apache.thrift.protocol.TType.I64, (short)4);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TIncrementStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TIncrementTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TIncrement implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TIncrement");
          +
          +  private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField AMMOUNT_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("ammount", org.apache.thrift.protocol.TType.I64,
          +          (short) 4);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TIncrementStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TIncrementTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table; // required
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column; // required
             public long ammount; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    TABLE((short)1, "table"),
          -    ROW((short)2, "row"),
          -    COLUMN((short)3, "column"),
          -    AMMOUNT((short)4, "ammount");
          +    TABLE((short) 1, "table"), ROW((short) 2, "row"), COLUMN((short) 3, "column"),
          +    AMMOUNT((short) 4, "ammount");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -48,7 +73,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // TABLE
                     return TABLE;
                   case 2: // ROW
          @@ -63,12 +88,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -102,14 +127,25 @@ public java.lang.String getFieldName() {
             private byte __isset_bitfield = 0;
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.AMMOUNT, new org.apache.thrift.meta_data.FieldMetaData("ammount", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.TABLE,
          +      new org.apache.thrift.meta_data.FieldMetaData("table",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("row",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.COLUMN,
          +      new org.apache.thrift.meta_data.FieldMetaData("column",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.AMMOUNT, new org.apache.thrift.meta_data.FieldMetaData("ammount",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TIncrement.class, metaDataMap);
          @@ -118,12 +154,8 @@ public java.lang.String getFieldName() {
             public TIncrement() {
             }
           
          -  public TIncrement(
          -    java.nio.ByteBuffer table,
          -    java.nio.ByteBuffer row,
          -    java.nio.ByteBuffer column,
          -    long ammount)
          -  {
          +  public TIncrement(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer column,
          +      long ammount) {
               this();
               this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
               this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -172,7 +204,8 @@ public java.nio.ByteBuffer bufferForTable() {
             }
           
             public TIncrement setTable(byte[] table) {
          -    this.table = table == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(table.clone());
          +    this.table =
          +        table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
               return this;
             }
           
          @@ -206,7 +239,7 @@ public java.nio.ByteBuffer bufferForRow() {
             }
           
             public TIncrement setRow(byte[] row) {
          -    this.row = row == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(row.clone());
          +    this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
               return this;
             }
           
          @@ -240,7 +273,8 @@ public java.nio.ByteBuffer bufferForColumn() {
             }
           
             public TIncrement setColumn(byte[] column) {
          -    this.column = column == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(column.clone());
          +    this.column =
          +        column == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(column.clone());
               return this;
             }
           
          @@ -275,7 +309,8 @@ public TIncrement setAmmount(long ammount) {
             }
           
             public void unsetAmmount() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __AMMOUNT_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __AMMOUNT_ISSET_ID);
             }
           
             /** Returns true if field ammount is set (has been assigned a value) and false otherwise */
          @@ -284,54 +319,56 @@ public boolean isSetAmmount() {
             }
           
             public void setAmmountIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __AMMOUNT_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __AMMOUNT_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case TABLE:
          -      if (value == null) {
          -        unsetTable();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setTable((byte[])value);
          +      case TABLE:
          +        if (value == null) {
          +          unsetTable();
                   } else {
          -          setTable((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setTable((byte[]) value);
          +          } else {
          +            setTable((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          -
          -    case ROW:
          -      if (value == null) {
          -        unsetRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setRow((byte[])value);
          +        break;
          +
          +      case ROW:
          +        if (value == null) {
          +          unsetRow();
                   } else {
          -          setRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setRow((byte[]) value);
          +          } else {
          +            setRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          -
          -    case COLUMN:
          -      if (value == null) {
          -        unsetColumn();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setColumn((byte[])value);
          +        break;
          +
          +      case COLUMN:
          +        if (value == null) {
          +          unsetColumn();
                   } else {
          -          setColumn((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setColumn((byte[]) value);
          +          } else {
          +            setColumn((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case AMMOUNT:
          -      if (value == null) {
          -        unsetAmmount();
          -      } else {
          -        setAmmount((java.lang.Long)value);
          -      }
          -      break;
          +      case AMMOUNT:
          +        if (value == null) {
          +          unsetAmmount();
          +        } else {
          +          setAmmount((java.lang.Long) value);
          +        }
          +        break;
           
               }
             }
          @@ -339,88 +376,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case TABLE:
          -      return getTable();
          +      case TABLE:
          +        return getTable();
           
          -    case ROW:
          -      return getRow();
          +      case ROW:
          +        return getRow();
           
          -    case COLUMN:
          -      return getColumn();
          +      case COLUMN:
          +        return getColumn();
           
          -    case AMMOUNT:
          -      return getAmmount();
          +      case AMMOUNT:
          +        return getAmmount();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case TABLE:
          -      return isSetTable();
          -    case ROW:
          -      return isSetRow();
          -    case COLUMN:
          -      return isSetColumn();
          -    case AMMOUNT:
          -      return isSetAmmount();
          +      case TABLE:
          +        return isSetTable();
          +      case ROW:
          +        return isSetRow();
          +      case COLUMN:
          +        return isSetColumn();
          +      case AMMOUNT:
          +        return isSetAmmount();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TIncrement)
          -      return this.equals((TIncrement)that);
          +    if (that instanceof TIncrement) return this.equals((TIncrement) that);
               return false;
             }
           
             public boolean equals(TIncrement that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_table = true && this.isSetTable();
               boolean that_present_table = true && that.isSetTable();
               if (this_present_table || that_present_table) {
          -      if (!(this_present_table && that_present_table))
          -        return false;
          -      if (!this.table.equals(that.table))
          -        return false;
          +      if (!(this_present_table && that_present_table)) return false;
          +      if (!this.table.equals(that.table)) return false;
               }
           
               boolean this_present_row = true && this.isSetRow();
               boolean that_present_row = true && that.isSetRow();
               if (this_present_row || that_present_row) {
          -      if (!(this_present_row && that_present_row))
          -        return false;
          -      if (!this.row.equals(that.row))
          -        return false;
          +      if (!(this_present_row && that_present_row)) return false;
          +      if (!this.row.equals(that.row)) return false;
               }
           
               boolean this_present_column = true && this.isSetColumn();
               boolean that_present_column = true && that.isSetColumn();
               if (this_present_column || that_present_column) {
          -      if (!(this_present_column && that_present_column))
          -        return false;
          -      if (!this.column.equals(that.column))
          -        return false;
          +      if (!(this_present_column && that_present_column)) return false;
          +      if (!this.column.equals(that.column)) return false;
               }
           
               boolean this_present_ammount = true;
               boolean that_present_ammount = true;
               if (this_present_ammount || that_present_ammount) {
          -      if (!(this_present_ammount && that_present_ammount))
          -        return false;
          -      if (this.ammount != that.ammount)
          -        return false;
          +      if (!(this_present_ammount && that_present_ammount)) return false;
          +      if (this.ammount != that.ammount) return false;
               }
           
               return true;
          @@ -431,16 +460,13 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -    if (isSetTable())
          -      hashCode = hashCode * 8191 + table.hashCode();
          +    if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -    if (isSetRow())
          -      hashCode = hashCode * 8191 + row.hashCode();
          +    if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -    if (isSetColumn())
          -      hashCode = hashCode * 8191 + column.hashCode();
          +    if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
               hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(ammount);
           
          @@ -507,7 +533,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -554,37 +581,43 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TIncrementStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TIncrementStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TIncrementStandardScheme getScheme() {
                 return new TIncrementStandardScheme();
               }
             }
           
          -  private static class TIncrementStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TIncrementStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -592,7 +625,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.table = iprot.readBinary();
                         struct.setTableIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -600,7 +633,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.row = iprot.readBinary();
                         struct.setRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -608,7 +641,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.column = iprot.readBinary();
                         struct.setColumnIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -616,7 +649,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.ammount = iprot.readI64();
                         struct.setAmmountIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -631,7 +664,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TIncrement struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TIncrement struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -659,17 +693,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TIncrement struct)
           
             }
           
          -  private static class TIncrementTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TIncrementTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TIncrementTupleScheme getScheme() {
                 return new TIncrementTupleScheme();
               }
             }
           
          -  private static class TIncrementTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TIncrementTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TIncrement struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TIncrement struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetTable()) {
                   optionals.set(0);
          @@ -699,8 +737,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TIncrement struct)
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TIncrement struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TIncrement struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(4);
                 if (incoming.get(0)) {
                   struct.table = iprot.readBinary();
          @@ -721,8 +761,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TIncrement struct) t
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TPermissionScope.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TPermissionScope.java
          index dc31e774d4b5..68db431ff235 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TPermissionScope.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TPermissionScope.java
          @@ -1,16 +1,26 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TPermissionScope implements org.apache.thrift.TEnum {
          -  TABLE(0),
          -  NAMESPACE(1);
          +  TABLE(0), NAMESPACE(1);
           
             private final int value;
           
          @@ -30,7 +40,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TPermissionScope findByValue(int value) { 
          +  public static TPermissionScope findByValue(int value) {
               switch (value) {
                 case 0:
                   return TABLE;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java
          index fdfb11aa8c87..327f53955dba 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java
          @@ -1,29 +1,58 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * A TRegionInfo contains information about an HTable region.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TRegionInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRegionInfo");
          -
          -  private static final org.apache.thrift.protocol.TField START_KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("startKey", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField END_KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("endKey", org.apache.thrift.protocol.TType.STRING, (short)2);
          -  private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I64, (short)3);
          -  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)4);
          -  private static final org.apache.thrift.protocol.TField VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("version", org.apache.thrift.protocol.TType.BYTE, (short)5);
          -  private static final org.apache.thrift.protocol.TField SERVER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("serverName", org.apache.thrift.protocol.TType.STRING, (short)6);
          -  private static final org.apache.thrift.protocol.TField PORT_FIELD_DESC = new org.apache.thrift.protocol.TField("port", org.apache.thrift.protocol.TType.I32, (short)7);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TRegionInfoStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TRegionInfoTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TRegionInfo implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TRegionInfo");
          +
          +  private static final org.apache.thrift.protocol.TField START_KEY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("startKey", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField END_KEY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("endKey", org.apache.thrift.protocol.TType.STRING,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField ID_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I64, (short) 3);
          +  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField VERSION_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("version", org.apache.thrift.protocol.TType.BYTE,
          +          (short) 5);
          +  private static final org.apache.thrift.protocol.TField SERVER_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("serverName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 6);
          +  private static final org.apache.thrift.protocol.TField PORT_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("port", org.apache.thrift.protocol.TType.I32,
          +          (short) 7);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TRegionInfoStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TRegionInfoTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startKey; // required
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer endKey; // required
          @@ -33,17 +62,17 @@ public class TRegionInfo implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -56,7 +85,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // START_KEY
                     return START_KEY;
                   case 2: // END_KEY
          @@ -77,12 +106,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -118,20 +147,36 @@ public java.lang.String getFieldName() {
             private byte __isset_bitfield = 0;
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.START_KEY, new org.apache.thrift.meta_data.FieldMetaData("startKey", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.END_KEY, new org.apache.thrift.meta_data.FieldMetaData("endKey", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.START_KEY,
          +      new org.apache.thrift.meta_data.FieldMetaData("startKey",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.END_KEY,
          +      new org.apache.thrift.meta_data.FieldMetaData("endKey",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.VERSION, new org.apache.thrift.meta_data.FieldMetaData("version", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    tmpMap.put(_Fields.NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("name",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.VERSION, new org.apache.thrift.meta_data.FieldMetaData("version",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BYTE)));
          -    tmpMap.put(_Fields.SERVER_NAME, new org.apache.thrift.meta_data.FieldMetaData("serverName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.PORT, new org.apache.thrift.meta_data.FieldMetaData("port", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          +    tmpMap.put(_Fields.SERVER_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("serverName",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.PORT, new org.apache.thrift.meta_data.FieldMetaData("port",
          +        org.apache.thrift.TFieldRequirementType.DEFAULT,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRegionInfo.class, metaDataMap);
          @@ -140,15 +185,8 @@ public java.lang.String getFieldName() {
             public TRegionInfo() {
             }
           
          -  public TRegionInfo(
          -    java.nio.ByteBuffer startKey,
          -    java.nio.ByteBuffer endKey,
          -    long id,
          -    java.nio.ByteBuffer name,
          -    byte version,
          -    java.nio.ByteBuffer serverName,
          -    int port)
          -  {
          +  public TRegionInfo(java.nio.ByteBuffer startKey, java.nio.ByteBuffer endKey, long id,
          +      java.nio.ByteBuffer name, byte version, java.nio.ByteBuffer serverName, int port) {
               this();
               this.startKey = org.apache.thrift.TBaseHelper.copyBinary(startKey);
               this.endKey = org.apache.thrift.TBaseHelper.copyBinary(endKey);
          @@ -212,11 +250,13 @@ public java.nio.ByteBuffer bufferForStartKey() {
             }
           
             public TRegionInfo setStartKey(byte[] startKey) {
          -    this.startKey = startKey == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(startKey.clone());
          +    this.startKey =
          +        startKey == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(startKey.clone());
               return this;
             }
           
          -  public TRegionInfo setStartKey(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startKey) {
          +  public TRegionInfo
          +      setStartKey(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startKey) {
               this.startKey = org.apache.thrift.TBaseHelper.copyBinary(startKey);
               return this;
             }
          @@ -246,7 +286,8 @@ public java.nio.ByteBuffer bufferForEndKey() {
             }
           
             public TRegionInfo setEndKey(byte[] endKey) {
          -    this.endKey = endKey == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(endKey.clone());
          +    this.endKey =
          +        endKey == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(endKey.clone());
               return this;
             }
           
          @@ -290,7 +331,8 @@ public boolean isSetId() {
             }
           
             public void setIdIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
             }
           
             public byte[] getName() {
          @@ -303,7 +345,7 @@ public java.nio.ByteBuffer bufferForName() {
             }
           
             public TRegionInfo setName(byte[] name) {
          -    this.name = name == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(name.clone());
          +    this.name = name == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(name.clone());
               return this;
             }
           
          @@ -338,7 +380,8 @@ public TRegionInfo setVersion(byte version) {
             }
           
             public void unsetVersion() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __VERSION_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __VERSION_ISSET_ID);
             }
           
             /** Returns true if field version is set (has been assigned a value) and false otherwise */
          @@ -347,7 +390,8 @@ public boolean isSetVersion() {
             }
           
             public void setVersionIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __VERSION_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __VERSION_ISSET_ID, value);
             }
           
             public byte[] getServerName() {
          @@ -360,11 +404,13 @@ public java.nio.ByteBuffer bufferForServerName() {
             }
           
             public TRegionInfo setServerName(byte[] serverName) {
          -    this.serverName = serverName == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(serverName.clone());
          +    this.serverName = serverName == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(serverName.clone());
               return this;
             }
           
          -  public TRegionInfo setServerName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer serverName) {
          +  public TRegionInfo
          +      setServerName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer serverName) {
               this.serverName = org.apache.thrift.TBaseHelper.copyBinary(serverName);
               return this;
             }
          @@ -404,82 +450,84 @@ public boolean isSetPort() {
             }
           
             public void setPortIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __PORT_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __PORT_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case START_KEY:
          -      if (value == null) {
          -        unsetStartKey();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setStartKey((byte[])value);
          +      case START_KEY:
          +        if (value == null) {
          +          unsetStartKey();
                   } else {
          -          setStartKey((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setStartKey((byte[]) value);
          +          } else {
          +            setStartKey((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          -
          -    case END_KEY:
          -      if (value == null) {
          -        unsetEndKey();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setEndKey((byte[])value);
          +        break;
          +
          +      case END_KEY:
          +        if (value == null) {
          +          unsetEndKey();
                   } else {
          -          setEndKey((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setEndKey((byte[]) value);
          +          } else {
          +            setEndKey((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case ID:
          -      if (value == null) {
          -        unsetId();
          -      } else {
          -        setId((java.lang.Long)value);
          -      }
          -      break;
          -
          -    case NAME:
          -      if (value == null) {
          -        unsetName();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setName((byte[])value);
          +      case ID:
          +        if (value == null) {
          +          unsetId();
                   } else {
          -          setName((java.nio.ByteBuffer)value);
          +          setId((java.lang.Long) value);
                   }
          -      }
          -      break;
          +        break;
           
          -    case VERSION:
          -      if (value == null) {
          -        unsetVersion();
          -      } else {
          -        setVersion((java.lang.Byte)value);
          -      }
          -      break;
          -
          -    case SERVER_NAME:
          -      if (value == null) {
          -        unsetServerName();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setServerName((byte[])value);
          +      case NAME:
          +        if (value == null) {
          +          unsetName();
                   } else {
          -          setServerName((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setName((byte[]) value);
          +          } else {
          +            setName((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case PORT:
          -      if (value == null) {
          -        unsetPort();
          -      } else {
          -        setPort((java.lang.Integer)value);
          -      }
          -      break;
          +      case VERSION:
          +        if (value == null) {
          +          unsetVersion();
          +        } else {
          +          setVersion((java.lang.Byte) value);
          +        }
          +        break;
          +
          +      case SERVER_NAME:
          +        if (value == null) {
          +          unsetServerName();
          +        } else {
          +          if (value instanceof byte[]) {
          +            setServerName((byte[]) value);
          +          } else {
          +            setServerName((java.nio.ByteBuffer) value);
          +          }
          +        }
          +        break;
          +
          +      case PORT:
          +        if (value == null) {
          +          unsetPort();
          +        } else {
          +          setPort((java.lang.Integer) value);
          +        }
          +        break;
           
               }
             }
          @@ -487,130 +535,116 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case START_KEY:
          -      return getStartKey();
          +      case START_KEY:
          +        return getStartKey();
           
          -    case END_KEY:
          -      return getEndKey();
          +      case END_KEY:
          +        return getEndKey();
           
          -    case ID:
          -      return getId();
          +      case ID:
          +        return getId();
           
          -    case NAME:
          -      return getName();
          +      case NAME:
          +        return getName();
           
          -    case VERSION:
          -      return getVersion();
          +      case VERSION:
          +        return getVersion();
           
          -    case SERVER_NAME:
          -      return getServerName();
          +      case SERVER_NAME:
          +        return getServerName();
           
          -    case PORT:
          -      return getPort();
          +      case PORT:
          +        return getPort();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case START_KEY:
          -      return isSetStartKey();
          -    case END_KEY:
          -      return isSetEndKey();
          -    case ID:
          -      return isSetId();
          -    case NAME:
          -      return isSetName();
          -    case VERSION:
          -      return isSetVersion();
          -    case SERVER_NAME:
          -      return isSetServerName();
          -    case PORT:
          -      return isSetPort();
          +      case START_KEY:
          +        return isSetStartKey();
          +      case END_KEY:
          +        return isSetEndKey();
          +      case ID:
          +        return isSetId();
          +      case NAME:
          +        return isSetName();
          +      case VERSION:
          +        return isSetVersion();
          +      case SERVER_NAME:
          +        return isSetServerName();
          +      case PORT:
          +        return isSetPort();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TRegionInfo)
          -      return this.equals((TRegionInfo)that);
          +    if (that instanceof TRegionInfo) return this.equals((TRegionInfo) that);
               return false;
             }
           
             public boolean equals(TRegionInfo that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_startKey = true && this.isSetStartKey();
               boolean that_present_startKey = true && that.isSetStartKey();
               if (this_present_startKey || that_present_startKey) {
          -      if (!(this_present_startKey && that_present_startKey))
          -        return false;
          -      if (!this.startKey.equals(that.startKey))
          -        return false;
          +      if (!(this_present_startKey && that_present_startKey)) return false;
          +      if (!this.startKey.equals(that.startKey)) return false;
               }
           
               boolean this_present_endKey = true && this.isSetEndKey();
               boolean that_present_endKey = true && that.isSetEndKey();
               if (this_present_endKey || that_present_endKey) {
          -      if (!(this_present_endKey && that_present_endKey))
          -        return false;
          -      if (!this.endKey.equals(that.endKey))
          -        return false;
          +      if (!(this_present_endKey && that_present_endKey)) return false;
          +      if (!this.endKey.equals(that.endKey)) return false;
               }
           
               boolean this_present_id = true;
               boolean that_present_id = true;
               if (this_present_id || that_present_id) {
          -      if (!(this_present_id && that_present_id))
          -        return false;
          -      if (this.id != that.id)
          -        return false;
          +      if (!(this_present_id && that_present_id)) return false;
          +      if (this.id != that.id) return false;
               }
           
               boolean this_present_name = true && this.isSetName();
               boolean that_present_name = true && that.isSetName();
               if (this_present_name || that_present_name) {
          -      if (!(this_present_name && that_present_name))
          -        return false;
          -      if (!this.name.equals(that.name))
          -        return false;
          +      if (!(this_present_name && that_present_name)) return false;
          +      if (!this.name.equals(that.name)) return false;
               }
           
               boolean this_present_version = true;
               boolean that_present_version = true;
               if (this_present_version || that_present_version) {
          -      if (!(this_present_version && that_present_version))
          -        return false;
          -      if (this.version != that.version)
          -        return false;
          +      if (!(this_present_version && that_present_version)) return false;
          +      if (this.version != that.version) return false;
               }
           
               boolean this_present_serverName = true && this.isSetServerName();
               boolean that_present_serverName = true && that.isSetServerName();
               if (this_present_serverName || that_present_serverName) {
          -      if (!(this_present_serverName && that_present_serverName))
          -        return false;
          -      if (!this.serverName.equals(that.serverName))
          -        return false;
          +      if (!(this_present_serverName && that_present_serverName)) return false;
          +      if (!this.serverName.equals(that.serverName)) return false;
               }
           
               boolean this_present_port = true;
               boolean that_present_port = true;
               if (this_present_port || that_present_port) {
          -      if (!(this_present_port && that_present_port))
          -        return false;
          -      if (this.port != that.port)
          -        return false;
          +      if (!(this_present_port && that_present_port)) return false;
          +      if (this.port != that.port) return false;
               }
           
               return true;
          @@ -621,24 +655,20 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetStartKey()) ? 131071 : 524287);
          -    if (isSetStartKey())
          -      hashCode = hashCode * 8191 + startKey.hashCode();
          +    if (isSetStartKey()) hashCode = hashCode * 8191 + startKey.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetEndKey()) ? 131071 : 524287);
          -    if (isSetEndKey())
          -      hashCode = hashCode * 8191 + endKey.hashCode();
          +    if (isSetEndKey()) hashCode = hashCode * 8191 + endKey.hashCode();
           
               hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(id);
           
               hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287);
          -    if (isSetName())
          -      hashCode = hashCode * 8191 + name.hashCode();
          +    if (isSetName()) hashCode = hashCode * 8191 + name.hashCode();
           
               hashCode = hashCode * 8191 + (int) (version);
           
               hashCode = hashCode * 8191 + ((isSetServerName()) ? 131071 : 524287);
          -    if (isSetServerName())
          -      hashCode = hashCode * 8191 + serverName.hashCode();
          +    if (isSetServerName()) hashCode = hashCode * 8191 + serverName.hashCode();
           
               hashCode = hashCode * 8191 + port;
           
          @@ -735,7 +765,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -798,37 +829,43 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TRegionInfoStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TRegionInfoStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TRegionInfoStandardScheme getScheme() {
                 return new TRegionInfoStandardScheme();
               }
             }
           
          -  private static class TRegionInfoStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TRegionInfoStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TRegionInfo struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TRegionInfo struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -836,7 +873,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRegionInfo struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.startKey = iprot.readBinary();
                         struct.setStartKeyIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -844,7 +881,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRegionInfo struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.endKey = iprot.readBinary();
                         struct.setEndKeyIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -852,7 +889,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRegionInfo struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.id = iprot.readI64();
                         struct.setIdIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -860,7 +897,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRegionInfo struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.name = iprot.readBinary();
                         struct.setNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -868,7 +905,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRegionInfo struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.BYTE) {
                         struct.version = iprot.readByte();
                         struct.setVersionIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -876,7 +913,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRegionInfo struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.serverName = iprot.readBinary();
                         struct.setServerNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -884,7 +921,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRegionInfo struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.port = iprot.readI32();
                         struct.setPortIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -899,7 +936,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRegionInfo struct)
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TRegionInfo struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TRegionInfo struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -938,17 +976,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TRegionInfo struct
           
             }
           
          -  private static class TRegionInfoTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TRegionInfoTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TRegionInfoTupleScheme getScheme() {
                 return new TRegionInfoTupleScheme();
               }
             }
           
          -  private static class TRegionInfoTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TRegionInfoTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TRegionInfo struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TRegionInfo struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetStartKey()) {
                   optionals.set(0);
          @@ -996,8 +1038,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TRegionInfo struct)
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TRegionInfo struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TRegionInfo struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(7);
                 if (incoming.get(0)) {
                   struct.startKey = iprot.readBinary();
          @@ -1030,8 +1074,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TRegionInfo struct)
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java
          index d3959114e728..f1af2052b577 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java
          @@ -1,37 +1,62 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * Holds row name and then a map of columns to cells.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TRowResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRowResult");
          -
          -  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.MAP, (short)2);
          -  private static final org.apache.thrift.protocol.TField SORTED_COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("sortedColumns", org.apache.thrift.protocol.TType.LIST, (short)3);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TRowResultStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TRowResultTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TRowResult implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TRowResult");
          +
          +  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.MAP,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField SORTED_COLUMNS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("sortedColumns", org.apache.thrift.protocol.TType.LIST,
          +          (short) 3);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TRowResultStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TRowResultTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
          -  public @org.apache.thrift.annotation.Nullable java.util.Map columns; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map columns; // optional
             public @org.apache.thrift.annotation.Nullable java.util.List sortedColumns; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    ROW((short)1, "row"),
          -    COLUMNS((short)2, "columns"),
          -    SORTED_COLUMNS((short)3, "sortedColumns");
          +    ROW((short) 1, "row"), COLUMNS((short) 2, "columns"),
          +    SORTED_COLUMNS((short) 3, "sortedColumns");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -44,7 +69,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // ROW
                     return ROW;
                   case 2: // COLUMNS
          @@ -57,12 +82,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -92,19 +117,28 @@ public java.lang.String getFieldName() {
             }
           
             // isset id assignments
          -  private static final _Fields optionals[] = {_Fields.COLUMNS,_Fields.SORTED_COLUMNS};
          +  private static final _Fields optionals[] = { _Fields.COLUMNS, _Fields.SORTED_COLUMNS };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , "Text"), 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCell.class))));
          -    tmpMap.put(_Fields.SORTED_COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("sortedColumns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumn.class))));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("row",
          +          org.apache.thrift.TFieldRequirementType.DEFAULT,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, "Text"),
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TCell.class))));
          +    tmpMap.put(_Fields.SORTED_COLUMNS, new org.apache.thrift.meta_data.FieldMetaData(
          +        "sortedColumns", org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TColumn.class))));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRowResult.class, metaDataMap);
             }
          @@ -112,9 +146,7 @@ public java.lang.String getFieldName() {
             public TRowResult() {
             }
           
          -  public TRowResult(
          -    java.nio.ByteBuffer row)
          -  {
          +  public TRowResult(java.nio.ByteBuffer row) {
               this();
               this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
             }
          @@ -127,13 +159,16 @@ public TRowResult(TRowResult other) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
               }
               if (other.isSetColumns()) {
          -      java.util.Map __this__columns = new java.util.HashMap(other.columns.size());
          -      for (java.util.Map.Entry other_element : other.columns.entrySet()) {
          +      java.util.Map __this__columns =
          +          new java.util.HashMap(other.columns.size());
          +      for (java.util.Map.Entry other_element : other.columns
          +          .entrySet()) {
           
                   java.nio.ByteBuffer other_element_key = other_element.getKey();
                   TCell other_element_value = other_element.getValue();
           
          -        java.nio.ByteBuffer __this__columns_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +        java.nio.ByteBuffer __this__columns_copy_key =
          +            org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
                   TCell __this__columns_copy_value = new TCell(other_element_value);
           
          @@ -142,7 +177,8 @@ public TRowResult(TRowResult other) {
                 this.columns = __this__columns;
               }
               if (other.isSetSortedColumns()) {
          -      java.util.List __this__sortedColumns = new java.util.ArrayList(other.sortedColumns.size());
          +      java.util.List __this__sortedColumns =
          +          new java.util.ArrayList(other.sortedColumns.size());
                 for (TColumn other_element : other.sortedColumns) {
                   __this__sortedColumns.add(new TColumn(other_element));
                 }
          @@ -171,7 +207,7 @@ public java.nio.ByteBuffer bufferForRow() {
             }
           
             public TRowResult setRow(byte[] row) {
          -    this.row = row == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(row.clone());
          +    this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
               return this;
             }
           
          @@ -201,17 +237,18 @@ public int getColumnsSize() {
           
             public void putToColumns(java.nio.ByteBuffer key, TCell val) {
               if (this.columns == null) {
          -      this.columns = new java.util.HashMap();
          +      this.columns = new java.util.HashMap();
               }
               this.columns.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getColumns() {
          +  public java.util.Map getColumns() {
               return this.columns;
             }
           
          -  public TRowResult setColumns(@org.apache.thrift.annotation.Nullable java.util.Map columns) {
          +  public TRowResult setColumns(
          +      @org.apache.thrift.annotation.Nullable java.util.Map columns) {
               this.columns = columns;
               return this;
             }
          @@ -252,7 +289,8 @@ public java.util.List getSortedColumns() {
               return this.sortedColumns;
             }
           
          -  public TRowResult setSortedColumns(@org.apache.thrift.annotation.Nullable java.util.List sortedColumns) {
          +  public TRowResult setSortedColumns(
          +      @org.apache.thrift.annotation.Nullable java.util.List sortedColumns) {
               this.sortedColumns = sortedColumns;
               return this;
             }
          @@ -272,35 +310,36 @@ public void setSortedColumnsIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case ROW:
          -      if (value == null) {
          -        unsetRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setRow((byte[])value);
          +      case ROW:
          +        if (value == null) {
          +          unsetRow();
                   } else {
          -          setRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setRow((byte[]) value);
          +          } else {
          +            setRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case COLUMNS:
          -      if (value == null) {
          -        unsetColumns();
          -      } else {
          -        setColumns((java.util.Map)value);
          -      }
          -      break;
          +      case COLUMNS:
          +        if (value == null) {
          +          unsetColumns();
          +        } else {
          +          setColumns((java.util.Map) value);
          +        }
          +        break;
           
          -    case SORTED_COLUMNS:
          -      if (value == null) {
          -        unsetSortedColumns();
          -      } else {
          -        setSortedColumns((java.util.List)value);
          -      }
          -      break;
          +      case SORTED_COLUMNS:
          +        if (value == null) {
          +          unsetSortedColumns();
          +        } else {
          +          setSortedColumns((java.util.List) value);
          +        }
          +        break;
           
               }
             }
          @@ -308,74 +347,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case ROW:
          -      return getRow();
          +      case ROW:
          +        return getRow();
           
          -    case COLUMNS:
          -      return getColumns();
          +      case COLUMNS:
          +        return getColumns();
           
          -    case SORTED_COLUMNS:
          -      return getSortedColumns();
          +      case SORTED_COLUMNS:
          +        return getSortedColumns();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case ROW:
          -      return isSetRow();
          -    case COLUMNS:
          -      return isSetColumns();
          -    case SORTED_COLUMNS:
          -      return isSetSortedColumns();
          +      case ROW:
          +        return isSetRow();
          +      case COLUMNS:
          +        return isSetColumns();
          +      case SORTED_COLUMNS:
          +        return isSetSortedColumns();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TRowResult)
          -      return this.equals((TRowResult)that);
          +    if (that instanceof TRowResult) return this.equals((TRowResult) that);
               return false;
             }
           
             public boolean equals(TRowResult that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_row = true && this.isSetRow();
               boolean that_present_row = true && that.isSetRow();
               if (this_present_row || that_present_row) {
          -      if (!(this_present_row && that_present_row))
          -        return false;
          -      if (!this.row.equals(that.row))
          -        return false;
          +      if (!(this_present_row && that_present_row)) return false;
          +      if (!this.row.equals(that.row)) return false;
               }
           
               boolean this_present_columns = true && this.isSetColumns();
               boolean that_present_columns = true && that.isSetColumns();
               if (this_present_columns || that_present_columns) {
          -      if (!(this_present_columns && that_present_columns))
          -        return false;
          -      if (!this.columns.equals(that.columns))
          -        return false;
          +      if (!(this_present_columns && that_present_columns)) return false;
          +      if (!this.columns.equals(that.columns)) return false;
               }
           
               boolean this_present_sortedColumns = true && this.isSetSortedColumns();
               boolean that_present_sortedColumns = true && that.isSetSortedColumns();
               if (this_present_sortedColumns || that_present_sortedColumns) {
          -      if (!(this_present_sortedColumns && that_present_sortedColumns))
          -        return false;
          -      if (!this.sortedColumns.equals(that.sortedColumns))
          -        return false;
          +      if (!(this_present_sortedColumns && that_present_sortedColumns)) return false;
          +      if (!this.sortedColumns.equals(that.sortedColumns)) return false;
               }
           
               return true;
          @@ -386,16 +419,13 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -    if (isSetRow())
          -      hashCode = hashCode * 8191 + row.hashCode();
          +    if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -    if (isSetColumns())
          -      hashCode = hashCode * 8191 + columns.hashCode();
          +    if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetSortedColumns()) ? 131071 : 524287);
          -    if (isSetSortedColumns())
          -      hashCode = hashCode * 8191 + sortedColumns.hashCode();
          +    if (isSetSortedColumns()) hashCode = hashCode * 8191 + sortedColumns.hashCode();
           
               return hashCode;
             }
          @@ -433,7 +463,8 @@ public int compareTo(TRowResult other) {
                 return lastComparison;
               }
               if (isSetSortedColumns()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sortedColumns, other.sortedColumns);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.sortedColumns, other.sortedColumns);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -450,7 +481,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -497,35 +529,40 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TRowResultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TRowResultStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TRowResultStandardScheme getScheme() {
                 return new TRowResultStandardScheme();
               }
             }
           
          -  private static class TRowResultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TRowResultStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TRowResult struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TRowResult struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -533,7 +570,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowResult struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.row = iprot.readBinary();
                         struct.setRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -541,11 +578,12 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowResult struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map8 = iprot.readMapBegin();
          -                struct.columns = new java.util.HashMap(2*_map8.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key9;
          -                @org.apache.thrift.annotation.Nullable TCell _val10;
          -                for (int _i11 = 0; _i11 < _map8.size; ++_i11)
          -                {
          +                struct.columns = new java.util.HashMap(2 * _map8.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _key9;
          +                @org.apache.thrift.annotation.Nullable
          +                TCell _val10;
          +                for (int _i11 = 0; _i11 < _map8.size; ++_i11) {
                             _key9 = iprot.readBinary();
                             _val10 = new TCell();
                             _val10.read(iprot);
          @@ -554,7 +592,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowResult struct)
                           iprot.readMapEnd();
                         }
                         struct.setColumnsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -563,9 +601,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowResult struct)
                         {
                           org.apache.thrift.protocol.TList _list12 = iprot.readListBegin();
                           struct.sortedColumns = new java.util.ArrayList(_list12.size);
          -                @org.apache.thrift.annotation.Nullable TColumn _elem13;
          -                for (int _i14 = 0; _i14 < _list12.size; ++_i14)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                TColumn _elem13;
          +                for (int _i14 = 0; _i14 < _list12.size; ++_i14) {
                             _elem13 = new TColumn();
                             _elem13.read(iprot);
                             struct.sortedColumns.add(_elem13);
          @@ -573,7 +611,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowResult struct)
                           iprot.readListEnd();
                         }
                         struct.setSortedColumnsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -588,7 +626,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowResult struct)
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TRowResult struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TRowResult struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -601,9 +640,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TRowResult struct)
                   if (struct.isSetColumns()) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          -            for (java.util.Map.Entry _iter15 : struct.columns.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          +            for (java.util.Map.Entry _iter15 : struct.columns
          +                .entrySet()) {
                         oprot.writeBinary(_iter15.getKey());
                         _iter15.getValue().write(oprot);
                       }
          @@ -616,9 +657,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TRowResult struct)
                   if (struct.isSetSortedColumns()) {
                     oprot.writeFieldBegin(SORTED_COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.sortedColumns.size()));
          -            for (TColumn _iter16 : struct.sortedColumns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.sortedColumns.size()));
          +            for (TColumn _iter16 : struct.sortedColumns) {
                         _iter16.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -632,17 +673,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TRowResult struct)
           
             }
           
          -  private static class TRowResultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TRowResultTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TRowResultTupleScheme getScheme() {
                 return new TRowResultTupleScheme();
               }
             }
           
          -  private static class TRowResultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TRowResultTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TRowResult struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TRowResult struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetRow()) {
                   optionals.set(0);
          @@ -660,8 +705,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TRowResult struct)
                 if (struct.isSetColumns()) {
                   {
                     oprot.writeI32(struct.columns.size());
          -          for (java.util.Map.Entry _iter17 : struct.columns.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter17 : struct.columns
          +              .entrySet()) {
                       oprot.writeBinary(_iter17.getKey());
                       _iter17.getValue().write(oprot);
                     }
          @@ -670,8 +715,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TRowResult struct)
                 if (struct.isSetSortedColumns()) {
                   {
                     oprot.writeI32(struct.sortedColumns.size());
          -          for (TColumn _iter18 : struct.sortedColumns)
          -          {
          +          for (TColumn _iter18 : struct.sortedColumns) {
                       _iter18.write(oprot);
                     }
                   }
          @@ -679,8 +723,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TRowResult struct)
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TRowResult struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TRowResult struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(3);
                 if (incoming.get(0)) {
                   struct.row = iprot.readBinary();
          @@ -688,12 +734,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TRowResult struct) t
                 }
                 if (incoming.get(1)) {
                   {
          -          org.apache.thrift.protocol.TMap _map19 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT); 
          -          struct.columns = new java.util.HashMap(2*_map19.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key20;
          -          @org.apache.thrift.annotation.Nullable TCell _val21;
          -          for (int _i22 = 0; _i22 < _map19.size; ++_i22)
          -          {
          +          org.apache.thrift.protocol.TMap _map19 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT);
          +          struct.columns = new java.util.HashMap(2 * _map19.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _key20;
          +          @org.apache.thrift.annotation.Nullable
          +          TCell _val21;
          +          for (int _i22 = 0; _i22 < _map19.size; ++_i22) {
                       _key20 = iprot.readBinary();
                       _val21 = new TCell();
                       _val21.read(iprot);
          @@ -704,11 +752,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TRowResult struct) t
                 }
                 if (incoming.get(2)) {
                   {
          -          org.apache.thrift.protocol.TList _list23 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +          org.apache.thrift.protocol.TList _list23 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                     struct.sortedColumns = new java.util.ArrayList(_list23.size);
          -          @org.apache.thrift.annotation.Nullable TColumn _elem24;
          -          for (int _i25 = 0; _i25 < _list23.size; ++_i25)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          TColumn _elem24;
          +          for (int _i25 = 0; _i25 < _list23.size; ++_i25) {
                       _elem24 = new TColumn();
                       _elem24.read(iprot);
                       struct.sortedColumns.add(_elem24);
          @@ -719,8 +768,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TRowResult struct) t
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java
          index ec486accf3ba..de6ed02c97e8 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java
          @@ -1,32 +1,68 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * A Scan object is used to specify scanner parameters when opening a scanner.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TScan implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TScan");
          -
          -  private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField STOP_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("stopRow", org.apache.thrift.protocol.TType.STRING, (short)2);
          -  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)3);
          -  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)4);
          -  private static final org.apache.thrift.protocol.TField CACHING_FIELD_DESC = new org.apache.thrift.protocol.TField("caching", org.apache.thrift.protocol.TType.I32, (short)5);
          -  private static final org.apache.thrift.protocol.TField FILTER_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("filterString", org.apache.thrift.protocol.TType.STRING, (short)6);
          -  private static final org.apache.thrift.protocol.TField BATCH_SIZE_FIELD_DESC = new org.apache.thrift.protocol.TField("batchSize", org.apache.thrift.protocol.TType.I32, (short)7);
          -  private static final org.apache.thrift.protocol.TField SORT_COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("sortColumns", org.apache.thrift.protocol.TType.BOOL, (short)8);
          -  private static final org.apache.thrift.protocol.TField REVERSED_FIELD_DESC = new org.apache.thrift.protocol.TField("reversed", org.apache.thrift.protocol.TType.BOOL, (short)9);
          -  private static final org.apache.thrift.protocol.TField CACHE_BLOCKS_FIELD_DESC = new org.apache.thrift.protocol.TField("cacheBlocks", org.apache.thrift.protocol.TType.BOOL, (short)10);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TScanStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TScanTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TScan implements org.apache.thrift.TBase, java.io.Serializable,
          +    Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TScan");
          +
          +  private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField STOP_ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("stopRow", org.apache.thrift.protocol.TType.STRING,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField CACHING_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("caching", org.apache.thrift.protocol.TType.I32,
          +          (short) 5);
          +  private static final org.apache.thrift.protocol.TField FILTER_STRING_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("filterString", org.apache.thrift.protocol.TType.STRING,
          +          (short) 6);
          +  private static final org.apache.thrift.protocol.TField BATCH_SIZE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("batchSize", org.apache.thrift.protocol.TType.I32,
          +          (short) 7);
          +  private static final org.apache.thrift.protocol.TField SORT_COLUMNS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("sortColumns", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 8);
          +  private static final org.apache.thrift.protocol.TField REVERSED_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("reversed", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 9);
          +  private static final org.apache.thrift.protocol.TField CACHE_BLOCKS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("cacheBlocks", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 10);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TScanStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TScanTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow; // optional
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer stopRow; // optional
          @@ -39,20 +75,19 @@ public class TScan implements org.apache.thrift.TBase, jav
             public boolean reversed; // optional
             public boolean cacheBlocks; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    START_ROW((short)1, "startRow"),
          -    STOP_ROW((short)2, "stopRow"),
          -    TIMESTAMP((short)3, "timestamp"),
          -    COLUMNS((short)4, "columns"),
          -    CACHING((short)5, "caching"),
          -    FILTER_STRING((short)6, "filterString"),
          -    BATCH_SIZE((short)7, "batchSize"),
          -    SORT_COLUMNS((short)8, "sortColumns"),
          -    REVERSED((short)9, "reversed"),
          -    CACHE_BLOCKS((short)10, "cacheBlocks");
          -
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    START_ROW((short) 1, "startRow"), STOP_ROW((short) 2, "stopRow"),
          +    TIMESTAMP((short) 3, "timestamp"), COLUMNS((short) 4, "columns"), CACHING((short) 5, "caching"),
          +    FILTER_STRING((short) 6, "filterString"), BATCH_SIZE((short) 7, "batchSize"),
          +    SORT_COLUMNS((short) 8, "sortColumns"), REVERSED((short) 9, "reversed"),
          +    CACHE_BLOCKS((short) 10, "cacheBlocks");
          +
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -65,7 +100,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // START_ROW
                     return START_ROW;
                   case 2: // STOP_ROW
          @@ -92,12 +127,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -134,30 +169,51 @@ public java.lang.String getFieldName() {
             private static final int __REVERSED_ISSET_ID = 4;
             private static final int __CACHEBLOCKS_ISSET_ID = 5;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.TIMESTAMP,_Fields.COLUMNS,_Fields.CACHING,_Fields.FILTER_STRING,_Fields.BATCH_SIZE,_Fields.SORT_COLUMNS,_Fields.REVERSED,_Fields.CACHE_BLOCKS};
          +  private static final _Fields optionals[] = { _Fields.START_ROW, _Fields.STOP_ROW,
          +      _Fields.TIMESTAMP, _Fields.COLUMNS, _Fields.CACHING, _Fields.FILTER_STRING,
          +      _Fields.BATCH_SIZE, _Fields.SORT_COLUMNS, _Fields.REVERSED, _Fields.CACHE_BLOCKS };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.START_ROW, new org.apache.thrift.meta_data.FieldMetaData("startRow", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.STOP_ROW, new org.apache.thrift.meta_data.FieldMetaData("stopRow", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.START_ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("startRow",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.STOP_ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("stopRow",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , "Text"))));
          -    tmpMap.put(_Fields.CACHING, new org.apache.thrift.meta_data.FieldMetaData("caching", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.COLUMNS,
          +      new org.apache.thrift.meta_data.FieldMetaData("columns",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, "Text"))));
          +    tmpMap.put(_Fields.CACHING, new org.apache.thrift.meta_data.FieldMetaData("caching",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.FILTER_STRING, new org.apache.thrift.meta_data.FieldMetaData("filterString", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , "Text")));
          -    tmpMap.put(_Fields.BATCH_SIZE, new org.apache.thrift.meta_data.FieldMetaData("batchSize", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.FILTER_STRING,
          +      new org.apache.thrift.meta_data.FieldMetaData("filterString",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, "Text")));
          +    tmpMap.put(_Fields.BATCH_SIZE, new org.apache.thrift.meta_data.FieldMetaData("batchSize",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.SORT_COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("sortColumns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.SORT_COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("sortColumns",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.REVERSED, new org.apache.thrift.meta_data.FieldMetaData("reversed", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.REVERSED, new org.apache.thrift.meta_data.FieldMetaData("reversed",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.CACHE_BLOCKS, new org.apache.thrift.meta_data.FieldMetaData("cacheBlocks", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.CACHE_BLOCKS, new org.apache.thrift.meta_data.FieldMetaData("cacheBlocks",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TScan.class, metaDataMap);
          @@ -179,7 +235,8 @@ public TScan(TScan other) {
               }
               this.timestamp = other.timestamp;
               if (other.isSetColumns()) {
          -      java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +      java.util.List __this__columns =
          +          new java.util.ArrayList(other.columns.size());
                 for (java.nio.ByteBuffer other_element : other.columns) {
                   __this__columns.add(org.apache.thrift.TBaseHelper.copyBinary(other_element));
                 }
          @@ -229,7 +286,8 @@ public java.nio.ByteBuffer bufferForStartRow() {
             }
           
             public TScan setStartRow(byte[] startRow) {
          -    this.startRow = startRow == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(startRow.clone());
          +    this.startRow =
          +        startRow == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(startRow.clone());
               return this;
             }
           
          @@ -263,7 +321,8 @@ public java.nio.ByteBuffer bufferForStopRow() {
             }
           
             public TScan setStopRow(byte[] stopRow) {
          -    this.stopRow = stopRow == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(stopRow.clone());
          +    this.stopRow =
          +        stopRow == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(stopRow.clone());
               return this;
             }
           
          @@ -298,7 +357,8 @@ public TScan setTimestamp(long timestamp) {
             }
           
             public void unsetTimestamp() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
             }
           
             /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -307,7 +367,8 @@ public boolean isSetTimestamp() {
             }
           
             public void setTimestampIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
             }
           
             public int getColumnsSize() {
          @@ -331,7 +392,8 @@ public java.util.List getColumns() {
               return this.columns;
             }
           
          -  public TScan setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +  public TScan setColumns(
          +      @org.apache.thrift.annotation.Nullable java.util.List columns) {
               this.columns = columns;
               return this;
             }
          @@ -362,7 +424,8 @@ public TScan setCaching(int caching) {
             }
           
             public void unsetCaching() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHING_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHING_ISSET_ID);
             }
           
             /** Returns true if field caching is set (has been assigned a value) and false otherwise */
          @@ -371,7 +434,8 @@ public boolean isSetCaching() {
             }
           
             public void setCachingIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHING_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHING_ISSET_ID, value);
             }
           
             public byte[] getFilterString() {
          @@ -384,11 +448,13 @@ public java.nio.ByteBuffer bufferForFilterString() {
             }
           
             public TScan setFilterString(byte[] filterString) {
          -    this.filterString = filterString == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(filterString.clone());
          +    this.filterString = filterString == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(filterString.clone());
               return this;
             }
           
          -  public TScan setFilterString(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterString) {
          +  public TScan
          +      setFilterString(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterString) {
               this.filterString = org.apache.thrift.TBaseHelper.copyBinary(filterString);
               return this;
             }
          @@ -419,7 +485,8 @@ public TScan setBatchSize(int batchSize) {
             }
           
             public void unsetBatchSize() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BATCHSIZE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BATCHSIZE_ISSET_ID);
             }
           
             /** Returns true if field batchSize is set (has been assigned a value) and false otherwise */
          @@ -428,7 +495,8 @@ public boolean isSetBatchSize() {
             }
           
             public void setBatchSizeIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __BATCHSIZE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __BATCHSIZE_ISSET_ID, value);
             }
           
             public boolean isSortColumns() {
          @@ -442,7 +510,8 @@ public TScan setSortColumns(boolean sortColumns) {
             }
           
             public void unsetSortColumns() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SORTCOLUMNS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SORTCOLUMNS_ISSET_ID);
             }
           
             /** Returns true if field sortColumns is set (has been assigned a value) and false otherwise */
          @@ -451,7 +520,8 @@ public boolean isSetSortColumns() {
             }
           
             public void setSortColumnsIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SORTCOLUMNS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SORTCOLUMNS_ISSET_ID, value);
             }
           
             public boolean isReversed() {
          @@ -465,7 +535,8 @@ public TScan setReversed(boolean reversed) {
             }
           
             public void unsetReversed() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __REVERSED_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __REVERSED_ISSET_ID);
             }
           
             /** Returns true if field reversed is set (has been assigned a value) and false otherwise */
          @@ -474,7 +545,8 @@ public boolean isSetReversed() {
             }
           
             public void setReversedIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __REVERSED_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __REVERSED_ISSET_ID, value);
             }
           
             public boolean isCacheBlocks() {
          @@ -488,7 +560,8 @@ public TScan setCacheBlocks(boolean cacheBlocks) {
             }
           
             public void unsetCacheBlocks() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID);
             }
           
             /** Returns true if field cacheBlocks is set (has been assigned a value) and false otherwise */
          @@ -497,102 +570,104 @@ public boolean isSetCacheBlocks() {
             }
           
             public void setCacheBlocksIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case START_ROW:
          -      if (value == null) {
          -        unsetStartRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setStartRow((byte[])value);
          +      case START_ROW:
          +        if (value == null) {
          +          unsetStartRow();
                   } else {
          -          setStartRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setStartRow((byte[]) value);
          +          } else {
          +            setStartRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case STOP_ROW:
          -      if (value == null) {
          -        unsetStopRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setStopRow((byte[])value);
          +      case STOP_ROW:
          +        if (value == null) {
          +          unsetStopRow();
                   } else {
          -          setStopRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setStopRow((byte[]) value);
          +          } else {
          +            setStopRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case TIMESTAMP:
          -      if (value == null) {
          -        unsetTimestamp();
          -      } else {
          -        setTimestamp((java.lang.Long)value);
          -      }
          -      break;
          +      case TIMESTAMP:
          +        if (value == null) {
          +          unsetTimestamp();
          +        } else {
          +          setTimestamp((java.lang.Long) value);
          +        }
          +        break;
           
          -    case COLUMNS:
          -      if (value == null) {
          -        unsetColumns();
          -      } else {
          -        setColumns((java.util.List)value);
          -      }
          -      break;
          +      case COLUMNS:
          +        if (value == null) {
          +          unsetColumns();
          +        } else {
          +          setColumns((java.util.List) value);
          +        }
          +        break;
           
          -    case CACHING:
          -      if (value == null) {
          -        unsetCaching();
          -      } else {
          -        setCaching((java.lang.Integer)value);
          -      }
          -      break;
          +      case CACHING:
          +        if (value == null) {
          +          unsetCaching();
          +        } else {
          +          setCaching((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case FILTER_STRING:
          -      if (value == null) {
          -        unsetFilterString();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setFilterString((byte[])value);
          +      case FILTER_STRING:
          +        if (value == null) {
          +          unsetFilterString();
                   } else {
          -          setFilterString((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setFilterString((byte[]) value);
          +          } else {
          +            setFilterString((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case BATCH_SIZE:
          -      if (value == null) {
          -        unsetBatchSize();
          -      } else {
          -        setBatchSize((java.lang.Integer)value);
          -      }
          -      break;
          +      case BATCH_SIZE:
          +        if (value == null) {
          +          unsetBatchSize();
          +        } else {
          +          setBatchSize((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case SORT_COLUMNS:
          -      if (value == null) {
          -        unsetSortColumns();
          -      } else {
          -        setSortColumns((java.lang.Boolean)value);
          -      }
          -      break;
          +      case SORT_COLUMNS:
          +        if (value == null) {
          +          unsetSortColumns();
          +        } else {
          +          setSortColumns((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case REVERSED:
          -      if (value == null) {
          -        unsetReversed();
          -      } else {
          -        setReversed((java.lang.Boolean)value);
          -      }
          -      break;
          +      case REVERSED:
          +        if (value == null) {
          +          unsetReversed();
          +        } else {
          +          setReversed((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case CACHE_BLOCKS:
          -      if (value == null) {
          -        unsetCacheBlocks();
          -      } else {
          -        setCacheBlocks((java.lang.Boolean)value);
          -      }
          -      break;
          +      case CACHE_BLOCKS:
          +        if (value == null) {
          +          unsetCacheBlocks();
          +        } else {
          +          setCacheBlocks((java.lang.Boolean) value);
          +        }
          +        break;
           
               }
             }
          @@ -600,172 +675,152 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case START_ROW:
          -      return getStartRow();
          +      case START_ROW:
          +        return getStartRow();
           
          -    case STOP_ROW:
          -      return getStopRow();
          +      case STOP_ROW:
          +        return getStopRow();
           
          -    case TIMESTAMP:
          -      return getTimestamp();
          +      case TIMESTAMP:
          +        return getTimestamp();
           
          -    case COLUMNS:
          -      return getColumns();
          +      case COLUMNS:
          +        return getColumns();
           
          -    case CACHING:
          -      return getCaching();
          +      case CACHING:
          +        return getCaching();
           
          -    case FILTER_STRING:
          -      return getFilterString();
          +      case FILTER_STRING:
          +        return getFilterString();
           
          -    case BATCH_SIZE:
          -      return getBatchSize();
          +      case BATCH_SIZE:
          +        return getBatchSize();
           
          -    case SORT_COLUMNS:
          -      return isSortColumns();
          +      case SORT_COLUMNS:
          +        return isSortColumns();
           
          -    case REVERSED:
          -      return isReversed();
          +      case REVERSED:
          +        return isReversed();
           
          -    case CACHE_BLOCKS:
          -      return isCacheBlocks();
          +      case CACHE_BLOCKS:
          +        return isCacheBlocks();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case START_ROW:
          -      return isSetStartRow();
          -    case STOP_ROW:
          -      return isSetStopRow();
          -    case TIMESTAMP:
          -      return isSetTimestamp();
          -    case COLUMNS:
          -      return isSetColumns();
          -    case CACHING:
          -      return isSetCaching();
          -    case FILTER_STRING:
          -      return isSetFilterString();
          -    case BATCH_SIZE:
          -      return isSetBatchSize();
          -    case SORT_COLUMNS:
          -      return isSetSortColumns();
          -    case REVERSED:
          -      return isSetReversed();
          -    case CACHE_BLOCKS:
          -      return isSetCacheBlocks();
          +      case START_ROW:
          +        return isSetStartRow();
          +      case STOP_ROW:
          +        return isSetStopRow();
          +      case TIMESTAMP:
          +        return isSetTimestamp();
          +      case COLUMNS:
          +        return isSetColumns();
          +      case CACHING:
          +        return isSetCaching();
          +      case FILTER_STRING:
          +        return isSetFilterString();
          +      case BATCH_SIZE:
          +        return isSetBatchSize();
          +      case SORT_COLUMNS:
          +        return isSetSortColumns();
          +      case REVERSED:
          +        return isSetReversed();
          +      case CACHE_BLOCKS:
          +        return isSetCacheBlocks();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TScan)
          -      return this.equals((TScan)that);
          +    if (that instanceof TScan) return this.equals((TScan) that);
               return false;
             }
           
             public boolean equals(TScan that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_startRow = true && this.isSetStartRow();
               boolean that_present_startRow = true && that.isSetStartRow();
               if (this_present_startRow || that_present_startRow) {
          -      if (!(this_present_startRow && that_present_startRow))
          -        return false;
          -      if (!this.startRow.equals(that.startRow))
          -        return false;
          +      if (!(this_present_startRow && that_present_startRow)) return false;
          +      if (!this.startRow.equals(that.startRow)) return false;
               }
           
               boolean this_present_stopRow = true && this.isSetStopRow();
               boolean that_present_stopRow = true && that.isSetStopRow();
               if (this_present_stopRow || that_present_stopRow) {
          -      if (!(this_present_stopRow && that_present_stopRow))
          -        return false;
          -      if (!this.stopRow.equals(that.stopRow))
          -        return false;
          +      if (!(this_present_stopRow && that_present_stopRow)) return false;
          +      if (!this.stopRow.equals(that.stopRow)) return false;
               }
           
               boolean this_present_timestamp = true && this.isSetTimestamp();
               boolean that_present_timestamp = true && that.isSetTimestamp();
               if (this_present_timestamp || that_present_timestamp) {
          -      if (!(this_present_timestamp && that_present_timestamp))
          -        return false;
          -      if (this.timestamp != that.timestamp)
          -        return false;
          +      if (!(this_present_timestamp && that_present_timestamp)) return false;
          +      if (this.timestamp != that.timestamp) return false;
               }
           
               boolean this_present_columns = true && this.isSetColumns();
               boolean that_present_columns = true && that.isSetColumns();
               if (this_present_columns || that_present_columns) {
          -      if (!(this_present_columns && that_present_columns))
          -        return false;
          -      if (!this.columns.equals(that.columns))
          -        return false;
          +      if (!(this_present_columns && that_present_columns)) return false;
          +      if (!this.columns.equals(that.columns)) return false;
               }
           
               boolean this_present_caching = true && this.isSetCaching();
               boolean that_present_caching = true && that.isSetCaching();
               if (this_present_caching || that_present_caching) {
          -      if (!(this_present_caching && that_present_caching))
          -        return false;
          -      if (this.caching != that.caching)
          -        return false;
          +      if (!(this_present_caching && that_present_caching)) return false;
          +      if (this.caching != that.caching) return false;
               }
           
               boolean this_present_filterString = true && this.isSetFilterString();
               boolean that_present_filterString = true && that.isSetFilterString();
               if (this_present_filterString || that_present_filterString) {
          -      if (!(this_present_filterString && that_present_filterString))
          -        return false;
          -      if (!this.filterString.equals(that.filterString))
          -        return false;
          +      if (!(this_present_filterString && that_present_filterString)) return false;
          +      if (!this.filterString.equals(that.filterString)) return false;
               }
           
               boolean this_present_batchSize = true && this.isSetBatchSize();
               boolean that_present_batchSize = true && that.isSetBatchSize();
               if (this_present_batchSize || that_present_batchSize) {
          -      if (!(this_present_batchSize && that_present_batchSize))
          -        return false;
          -      if (this.batchSize != that.batchSize)
          -        return false;
          +      if (!(this_present_batchSize && that_present_batchSize)) return false;
          +      if (this.batchSize != that.batchSize) return false;
               }
           
               boolean this_present_sortColumns = true && this.isSetSortColumns();
               boolean that_present_sortColumns = true && that.isSetSortColumns();
               if (this_present_sortColumns || that_present_sortColumns) {
          -      if (!(this_present_sortColumns && that_present_sortColumns))
          -        return false;
          -      if (this.sortColumns != that.sortColumns)
          -        return false;
          +      if (!(this_present_sortColumns && that_present_sortColumns)) return false;
          +      if (this.sortColumns != that.sortColumns) return false;
               }
           
               boolean this_present_reversed = true && this.isSetReversed();
               boolean that_present_reversed = true && that.isSetReversed();
               if (this_present_reversed || that_present_reversed) {
          -      if (!(this_present_reversed && that_present_reversed))
          -        return false;
          -      if (this.reversed != that.reversed)
          -        return false;
          +      if (!(this_present_reversed && that_present_reversed)) return false;
          +      if (this.reversed != that.reversed) return false;
               }
           
               boolean this_present_cacheBlocks = true && this.isSetCacheBlocks();
               boolean that_present_cacheBlocks = true && that.isSetCacheBlocks();
               if (this_present_cacheBlocks || that_present_cacheBlocks) {
          -      if (!(this_present_cacheBlocks && that_present_cacheBlocks))
          -        return false;
          -      if (this.cacheBlocks != that.cacheBlocks)
          -        return false;
          +      if (!(this_present_cacheBlocks && that_present_cacheBlocks)) return false;
          +      if (this.cacheBlocks != that.cacheBlocks) return false;
               }
           
               return true;
          @@ -776,44 +831,35 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetStartRow()) ? 131071 : 524287);
          -    if (isSetStartRow())
          -      hashCode = hashCode * 8191 + startRow.hashCode();
          +    if (isSetStartRow()) hashCode = hashCode * 8191 + startRow.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetStopRow()) ? 131071 : 524287);
          -    if (isSetStopRow())
          -      hashCode = hashCode * 8191 + stopRow.hashCode();
          +    if (isSetStopRow()) hashCode = hashCode * 8191 + stopRow.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetTimestamp()) ? 131071 : 524287);
               if (isSetTimestamp())
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
               hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -    if (isSetColumns())
          -      hashCode = hashCode * 8191 + columns.hashCode();
          +    if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetCaching()) ? 131071 : 524287);
          -    if (isSetCaching())
          -      hashCode = hashCode * 8191 + caching;
          +    if (isSetCaching()) hashCode = hashCode * 8191 + caching;
           
               hashCode = hashCode * 8191 + ((isSetFilterString()) ? 131071 : 524287);
          -    if (isSetFilterString())
          -      hashCode = hashCode * 8191 + filterString.hashCode();
          +    if (isSetFilterString()) hashCode = hashCode * 8191 + filterString.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetBatchSize()) ? 131071 : 524287);
          -    if (isSetBatchSize())
          -      hashCode = hashCode * 8191 + batchSize;
          +    if (isSetBatchSize()) hashCode = hashCode * 8191 + batchSize;
           
               hashCode = hashCode * 8191 + ((isSetSortColumns()) ? 131071 : 524287);
          -    if (isSetSortColumns())
          -      hashCode = hashCode * 8191 + ((sortColumns) ? 131071 : 524287);
          +    if (isSetSortColumns()) hashCode = hashCode * 8191 + ((sortColumns) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetReversed()) ? 131071 : 524287);
          -    if (isSetReversed())
          -      hashCode = hashCode * 8191 + ((reversed) ? 131071 : 524287);
          +    if (isSetReversed()) hashCode = hashCode * 8191 + ((reversed) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetCacheBlocks()) ? 131071 : 524287);
          -    if (isSetCacheBlocks())
          -      hashCode = hashCode * 8191 + ((cacheBlocks) ? 131071 : 524287);
          +    if (isSetCacheBlocks()) hashCode = hashCode * 8191 + ((cacheBlocks) ? 131071 : 524287);
           
               return hashCode;
             }
          @@ -881,7 +927,8 @@ public int compareTo(TScan other) {
                 return lastComparison;
               }
               if (isSetFilterString()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filterString, other.filterString);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.filterString, other.filterString);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -938,7 +985,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -1033,23 +1081,28 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TScanStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TScanStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TScanStandardScheme getScheme() {
                 return new TScanStandardScheme();
               }
          @@ -1057,13 +1110,13 @@ public TScanStandardScheme getScheme() {
           
             private static class TScanStandardScheme extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -1071,7 +1124,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.startRow = iprot.readBinary();
                         struct.setStartRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1079,7 +1132,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.stopRow = iprot.readBinary();
                         struct.setStopRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1087,7 +1140,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.timestamp = iprot.readI64();
                         struct.setTimestampIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1096,16 +1149,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                         {
                           org.apache.thrift.protocol.TList _list26 = iprot.readListBegin();
                           struct.columns = new java.util.ArrayList(_list26.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem27;
          -                for (int _i28 = 0; _i28 < _list26.size; ++_i28)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _elem27;
          +                for (int _i28 = 0; _i28 < _list26.size; ++_i28) {
                             _elem27 = iprot.readBinary();
                             struct.columns.add(_elem27);
                           }
                           iprot.readListEnd();
                         }
                         struct.setColumnsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1113,7 +1166,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.caching = iprot.readI32();
                         struct.setCachingIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1121,7 +1174,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.filterString = iprot.readBinary();
                         struct.setFilterStringIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1129,7 +1182,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.batchSize = iprot.readI32();
                         struct.setBatchSizeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1137,7 +1190,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.sortColumns = iprot.readBool();
                         struct.setSortColumnsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1145,7 +1198,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.reversed = iprot.readBool();
                         struct.setReversedIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1153,7 +1206,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.cacheBlocks = iprot.readBool();
                         struct.setCacheBlocksIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1168,7 +1221,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TScan struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TScan struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -1195,9 +1249,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TScan struct) thro
                   if (struct.isSetColumns()) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          -            for (java.nio.ByteBuffer _iter29 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
          +            for (java.nio.ByteBuffer _iter29 : struct.columns) {
                         oprot.writeBinary(_iter29);
                       }
                       oprot.writeListEnd();
          @@ -1252,8 +1306,10 @@ public TScanTupleScheme getScheme() {
             private static class TScanTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TScan struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetStartRow()) {
                   optionals.set(0);
          @@ -1298,8 +1354,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TScan struct) throw
                 if (struct.isSetColumns()) {
                   {
                     oprot.writeI32(struct.columns.size());
          -          for (java.nio.ByteBuffer _iter30 : struct.columns)
          -          {
          +          for (java.nio.ByteBuffer _iter30 : struct.columns) {
                       oprot.writeBinary(_iter30);
                     }
                   }
          @@ -1325,8 +1380,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TScan struct) throw
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(10);
                 if (incoming.get(0)) {
                   struct.startRow = iprot.readBinary();
          @@ -1342,11 +1399,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws
                 }
                 if (incoming.get(3)) {
                   {
          -          org.apache.thrift.protocol.TList _list31 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +          org.apache.thrift.protocol.TList _list31 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                     struct.columns = new java.util.ArrayList(_list31.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem32;
          -          for (int _i33 = 0; _i33 < _list31.size; ++_i33)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _elem32;
          +          for (int _i33 = 0; _i33 < _list31.size; ++_i33) {
                       _elem32 = iprot.readBinary();
                       struct.columns.add(_elem32);
                     }
          @@ -1380,8 +1438,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TThriftServerType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TThriftServerType.java
          index 17bdd3e4e40a..9a1daab6b69a 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TThriftServerType.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TThriftServerType.java
          @@ -1,19 +1,29 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift.generated;
           
          -
           /**
            * Specify type of thrift server: thrift and thrift2
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TThriftServerType implements org.apache.thrift.TEnum {
          -  ONE(1),
          -  TWO(2);
          +  ONE(1), TWO(2);
           
             private final int value;
           
          @@ -33,7 +43,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TThriftServerType findByValue(int value) { 
          +  public static TThriftServerType findByValue(int value) {
               switch (value) {
                 case 1:
                   return ONE;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
          index 19fee578b8cb..1a9aaac65606 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java
          @@ -1,5 +1,4 @@
          -/**
          - *
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -64,10 +63,10 @@
           import org.apache.hadoop.hbase.ServerName;
           import org.apache.hadoop.hbase.TableName;
           import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
          -import org.apache.hadoop.hbase.client.RegionLocator;
          -import org.apache.hadoop.hbase.client.ResultScanner;
           import org.apache.hadoop.hbase.client.LogQueryFilter;
           import org.apache.hadoop.hbase.client.OnlineLogRecord;
          +import org.apache.hadoop.hbase.client.RegionLocator;
          +import org.apache.hadoop.hbase.client.ResultScanner;
           import org.apache.hadoop.hbase.client.Table;
           import org.apache.hadoop.hbase.client.TableDescriptor;
           import org.apache.hadoop.hbase.security.UserProvider;
          @@ -98,14 +97,15 @@
           import org.apache.hadoop.hbase.thrift2.generated.TTableName;
           import org.apache.hadoop.hbase.thrift2.generated.TThriftServerType;
           import org.apache.hadoop.hbase.util.Bytes;
          -import org.apache.hbase.thirdparty.com.google.common.cache.Cache;
          -import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
          -import org.apache.hbase.thirdparty.com.google.common.cache.RemovalListener;
           import org.apache.thrift.TException;
           import org.apache.yetus.audience.InterfaceAudience;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
           
          +import org.apache.hbase.thirdparty.com.google.common.cache.Cache;
          +import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
          +import org.apache.hbase.thirdparty.com.google.common.cache.RemovalListener;
          +
           /**
            * This class is a glue object that connects Thrift RPC calls to the HBase client API primarily
            * defined in the Table interface.
          @@ -121,8 +121,8 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements TH
             private final AtomicInteger nextScannerId = new AtomicInteger(0);
             private final Cache scannerMap;
           
          -  private static final IOException ioe
          -      = new DoNotRetryIOException("Thrift Server is in Read-only mode.");
          +  private static final IOException ioe =
          +      new DoNotRetryIOException("Thrift Server is in Read-only mode.");
             private boolean isReadOnly;
           
             private static class TIOErrorWithCause extends TIOError {
          @@ -140,8 +140,7 @@ public synchronized Throwable getCause() {
           
               @Override
               public boolean equals(Object other) {
          -      if (super.equals(other) &&
          -          other instanceof TIOErrorWithCause) {
          +      if (super.equals(other) && other instanceof TIOErrorWithCause) {
                   Throwable otherCause = ((TIOErrorWithCause) other).getCause();
                   if (this.getCause() != null) {
                     return otherCause != null && this.getCause().equals(otherCause);
          @@ -160,17 +159,17 @@ public int hashCode() {
               }
             }
           
          -  public ThriftHBaseServiceHandler(final Configuration conf,
          -      final UserProvider userProvider) throws IOException {
          +  public ThriftHBaseServiceHandler(final Configuration conf, final UserProvider userProvider)
          +      throws IOException {
               super(conf, userProvider);
               long cacheTimeout = conf.getLong(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
                 DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
               isReadOnly = conf.getBoolean(THRIFT_READONLY_ENABLED, THRIFT_READONLY_ENABLED_DEFAULT);
          -    scannerMap = CacheBuilder.newBuilder()
          -      .expireAfterAccess(cacheTimeout, TimeUnit.MILLISECONDS)
          -      .removalListener((RemovalListener) removalNotification ->
          -          removalNotification.getValue().close())
          -      .build();
          +    scannerMap = CacheBuilder.newBuilder().expireAfterAccess(cacheTimeout, TimeUnit.MILLISECONDS)
          +        .removalListener(
          +          (RemovalListener) removalNotification -> removalNotification
          +              .getValue().close())
          +        .build();
             }
           
             @Override
          @@ -305,8 +304,9 @@ public boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family,
               checkReadOnlyMode();
               Table htable = getTable(table);
               try {
          -      Table.CheckAndMutateBuilder builder = htable.checkAndMutate(byteBufferToByteArray(row),
          -          byteBufferToByteArray(family)).qualifier(byteBufferToByteArray(qualifier));
          +      Table.CheckAndMutateBuilder builder =
          +          htable.checkAndMutate(byteBufferToByteArray(row), byteBufferToByteArray(family))
          +              .qualifier(byteBufferToByteArray(qualifier));
                 if (value == null) {
                   return builder.ifNotExists().thenPut(putFromThrift(put));
                 } else {
          @@ -346,8 +346,8 @@ public void deleteSingle(ByteBuffer table, TDelete deleteSingle) throws TIOError
             }
           
             @Override
          -  public List deleteMultiple(ByteBuffer table, List deletes) throws TIOError,
          -      TException {
          +  public List deleteMultiple(ByteBuffer table, List deletes)
          +      throws TIOError, TException {
               checkReadOnlyMode();
               Table htable = getTable(table);
               try {
          @@ -359,12 +359,11 @@ public List deleteMultiple(ByteBuffer table, List deletes) thr
               }
               return Collections.emptyList();
             }
          -  
          +
             @Override
             public boolean checkAndMutate(ByteBuffer table, ByteBuffer row, ByteBuffer family,
                 ByteBuffer qualifier, TCompareOperator compareOp, ByteBuffer value,
          -      TRowMutations rowMutations)
          -          throws TIOError, TException {
          +      TRowMutations rowMutations) throws TIOError, TException {
               checkReadOnlyMode();
               try (final Table htable = getTable(table)) {
                 return htable.checkAndMutate(byteBufferToByteArray(row), byteBufferToByteArray(family))
          @@ -439,8 +438,8 @@ public int openScanner(ByteBuffer table, TScan scan) throws TIOError, TException
             }
           
             @Override
          -  public List getScannerRows(int scannerId, int numRows) throws TIOError,
          -      TIllegalArgument, TException {
          +  public List getScannerRows(int scannerId, int numRows)
          +      throws TIOError, TIllegalArgument, TException {
               ResultScanner scanner = getScanner(scannerId);
               if (scanner == null) {
                 TIllegalArgument ex = new TIllegalArgument();
          @@ -573,8 +572,8 @@ public List getTableDescriptors(List tables)
                 throws TIOError, TException {
               try {
                 List tableNames = ThriftUtilities.tableNamesFromThrift(tables);
          -      List tableDescriptors = connectionCache.getAdmin()
          -          .listTableDescriptors(tableNames);
          +      List tableDescriptors =
          +          connectionCache.getAdmin().listTableDescriptors(tableNames);
                 return tableDescriptorsFromHBase(tableDescriptors);
               } catch (IOException e) {
                 throw getTIOError(e);
          @@ -596,8 +595,8 @@ public List getTableDescriptorsByPattern(String regex, boolean
                 throws TIOError, TException {
               try {
                 Pattern pattern = (regex == null ? null : Pattern.compile(regex));
          -      List tableDescriptors = connectionCache.getAdmin()
          -          .listTableDescriptors(pattern, includeSysTables);
          +      List tableDescriptors =
          +          connectionCache.getAdmin().listTableDescriptors(pattern, includeSysTables);
                 return tableDescriptorsFromHBase(tableDescriptors);
               } catch (IOException e) {
                 throw getTIOError(e);
          @@ -608,8 +607,8 @@ public List getTableDescriptorsByPattern(String regex, boolean
             public List getTableDescriptorsByNamespace(String name)
                 throws TIOError, TException {
               try {
          -      List descriptors = connectionCache.getAdmin()
          -          .listTableDescriptorsByNamespace(Bytes.toBytes(name));
          +      List descriptors =
          +          connectionCache.getAdmin().listTableDescriptorsByNamespace(Bytes.toBytes(name));
                 return tableDescriptorsFromHBase(descriptors);
               } catch (IOException e) {
                 throw getTIOError(e);
          @@ -621,8 +620,7 @@ public List getTableNamesByPattern(String regex, boolean includeSysT
                 throws TIOError, TException {
               try {
                 Pattern pattern = (regex == null ? null : Pattern.compile(regex));
          -      TableName[] tableNames = connectionCache.getAdmin()
          -          .listTableNames(pattern, includeSysTables);
          +      TableName[] tableNames = connectionCache.getAdmin().listTableNames(pattern, includeSysTables);
                 return tableNamesFromHBase(tableNames);
               } catch (IOException e) {
                 throw getTIOError(e);
          @@ -821,7 +819,7 @@ public List listNamespaces() throws TIOError, TException {
               try {
                 String[] namespaces = connectionCache.getAdmin().listNamespaces();
                 List result = new ArrayList<>(namespaces.length);
          -      for (String ns: namespaces) {
          +      for (String ns : namespaces) {
                   result.add(ns);
                 }
                 return result;
          @@ -845,10 +843,9 @@ public List getSlowLogResponses(Set tServerNames,
                 TLogQueryFilter tLogQueryFilter) throws TIOError, TException {
               try {
                 Set serverNames = ThriftUtilities.getServerNamesFromThrift(tServerNames);
          -      LogQueryFilter logQueryFilter =
          -        ThriftUtilities.getSlowLogQueryFromThrift(tLogQueryFilter);
          +      LogQueryFilter logQueryFilter = ThriftUtilities.getSlowLogQueryFromThrift(tLogQueryFilter);
                 List onlineLogRecords =
          -        connectionCache.getAdmin().getSlowLogResponses(serverNames, logQueryFilter);
          +          connectionCache.getAdmin().getSlowLogResponses(serverNames, logQueryFilter);
                 return ThriftUtilities.getSlowLogRecordsFromHBase(onlineLogRecords);
               } catch (IOException e) {
                 throw getTIOError(e);
          @@ -871,12 +868,12 @@ public boolean grant(TAccessControlEntity info) throws TIOError, TException {
               Permission.Action[] actions = ThriftUtilities.permissionActionsFromString(info.actions);
               try {
                 if (info.scope == TPermissionScope.NAMESPACE) {
          -        AccessControlClient.grant(connectionCache.getAdmin().getConnection(),
          -          info.getNsName(), info.getUsername(), actions);
          +        AccessControlClient.grant(connectionCache.getAdmin().getConnection(), info.getNsName(),
          +          info.getUsername(), actions);
                 } else if (info.scope == TPermissionScope.TABLE) {
                   TableName tableName = TableName.valueOf(info.getTableName());
          -        AccessControlClient.grant(connectionCache.getAdmin().getConnection(),
          -          tableName, info.getUsername(), null, null, actions);
          +        AccessControlClient.grant(connectionCache.getAdmin().getConnection(), tableName,
          +          info.getUsername(), null, null, actions);
                 }
               } catch (Throwable t) {
                 if (t instanceof IOException) {
          @@ -893,12 +890,12 @@ public boolean revoke(TAccessControlEntity info) throws TIOError, TException {
               Permission.Action[] actions = ThriftUtilities.permissionActionsFromString(info.actions);
               try {
                 if (info.scope == TPermissionScope.NAMESPACE) {
          -        AccessControlClient.revoke(connectionCache.getAdmin().getConnection(),
          -          info.getNsName(), info.getUsername(), actions);
          +        AccessControlClient.revoke(connectionCache.getAdmin().getConnection(), info.getNsName(),
          +          info.getUsername(), actions);
                 } else if (info.scope == TPermissionScope.TABLE) {
                   TableName tableName = TableName.valueOf(info.getTableName());
          -        AccessControlClient.revoke(connectionCache.getAdmin().getConnection(),
          -          tableName, info.getUsername(), null, null, actions);
          +        AccessControlClient.revoke(connectionCache.getAdmin().getConnection(), tableName,
          +          info.getUsername(), null, null, actions);
                 }
               } catch (Throwable t) {
                 if (t instanceof IOException) {
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
          index e04a112bca87..d528545c4f59 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
          @@ -1,5 +1,4 @@
          -/**
          - *
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -23,7 +22,6 @@
           import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_READONLY_ENABLED_DEFAULT;
           
           import java.io.IOException;
          -
           import org.apache.hadoop.conf.Configuration;
           import org.apache.hadoop.hbase.HBaseConfiguration;
           import org.apache.hadoop.hbase.HBaseInterfaceAudience;
          @@ -54,21 +52,18 @@
           public class ThriftServer extends org.apache.hadoop.hbase.thrift.ThriftServer {
             private static final Logger log = LoggerFactory.getLogger(ThriftServer.class);
           
          -
             public ThriftServer(Configuration conf) {
               super(conf);
             }
           
             @Override
          -  protected void printUsageAndExit(Options options, int exitCode)
          -      throws Shell.ExitCodeException {
          +  protected void printUsageAndExit(Options options, int exitCode) throws Shell.ExitCodeException {
               HelpFormatter formatter = new HelpFormatter();
               formatter.printHelp("Thrift", null, options,
          -        "To start the Thrift server run 'hbase-daemon.sh start thrift2' or " +
          -            "'hbase thrift2'\n" +
          -            "To shutdown the thrift server run 'hbase-daemon.sh stop thrift2' or" +
          -            " send a kill signal to the thrift server pid",
          -        true);
          +      "To start the Thrift server run 'hbase-daemon.sh start thrift2' or " + "'hbase thrift2'\n"
          +          + "To shutdown the thrift server run 'hbase-daemon.sh stop thrift2' or"
          +          + " send a kill signal to the thrift server pid",
          +      true);
               throw new Shell.ExitCodeException(exitCode, "");
             }
           
          @@ -93,7 +88,7 @@ protected TProcessor createProcessor() {
             protected void addOptions(Options options) {
               super.addOptions(options);
               options.addOption("ro", READONLY_OPTION, false,
          -        "Respond only to read method requests [default: false]");
          +      "Respond only to read method requests [default: false]");
             }
           
             @Override
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
          index 967e63ec2717..6b4c69e5d7af 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
          @@ -1,5 +1,4 @@
          -/**
          - *
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -121,25 +120,19 @@
           @InterfaceAudience.Private
           public final class ThriftUtilities {
           
          -  private final static Cell[] EMPTY_CELL_ARRAY = new Cell[]{};
          +  private final static Cell[] EMPTY_CELL_ARRAY = new Cell[] {};
             private final static Result EMPTY_RESULT = Result.create(EMPTY_CELL_ARRAY);
             private final static Result EMPTY_RESULT_STALE = Result.create(EMPTY_CELL_ARRAY, null, true);
           
          -
          -
             private ThriftUtilities() {
               throw new UnsupportedOperationException("Can't initialize class");
             }
           
             /**
          -   * Creates a {@link Get} (HBase) from a {@link TGet} (Thrift).
          -   *
          -   * This ignores any timestamps set on {@link TColumn} objects.
          -   *
          +   * Creates a {@link Get} (HBase) from a {@link TGet} (Thrift). This ignores any timestamps set on
          +   * {@link TColumn} objects.
              * @param in the TGet to convert
          -   *
              * @return Get object
          -   *
              * @throws IOException if an invalid time range or max version parameter is given
              */
             public static Get getFromThrift(TGet in) throws IOException {
          @@ -162,7 +155,7 @@ public static Get getFromThrift(TGet in) throws IOException {
               }
           
               if (in.isSetAttributes()) {
          -      addAttributes(out,in.getAttributes());
          +      addAttributes(out, in.getAttributes());
               }
           
               if (in.isSetAuthorizations()) {
          @@ -208,11 +201,8 @@ public static Get getFromThrift(TGet in) throws IOException {
           
             /**
              * Converts multiple {@link TGet}s (Thrift) into a list of {@link Get}s (HBase).
          -   *
              * @param in list of TGets to convert
          -   *
              * @return list of Get objects
          -   *
              * @throws IOException if an invalid time range or max version parameter is given
              * @see #getFromThrift(TGet)
              */
          @@ -226,9 +216,7 @@ public static List getsFromThrift(List in) throws IOException {
           
             /**
              * Creates a {@link TResult} (Thrift) from a {@link Result} (HBase).
          -   *
              * @param in the Result to convert
          -   *
              * @return converted result, returns an empty result if the input is null
              */
             public static TResult resultFromHBase(Result in) {
          @@ -261,11 +249,8 @@ public static TResult resultFromHBase(Result in) {
           
             /**
              * Converts multiple {@link Result}s (HBase) into a list of {@link TResult}s (Thrift).
          -   *
              * @param in array of Results to convert
          -   *
              * @return list of converted TResults
          -   *
              * @see #resultFromHBase(Result)
              */
             public static List resultsFromHBase(Result[] in) {
          @@ -278,9 +263,7 @@ public static List resultsFromHBase(Result[] in) {
           
             /**
              * Creates a {@link Put} (HBase) from a {@link TPut} (Thrift)
          -   *
              * @param in the TPut to convert
          -   *
              * @return converted Put
              */
             public static Put putFromThrift(TPut in) {
          @@ -299,23 +282,15 @@ public static Put putFromThrift(TPut in) {
               for (TColumnValue columnValue : in.getColumnValues()) {
                 try {
                   if (columnValue.isSetTimestamp()) {
          -          out.add(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
          -              .setRow(out.getRow())
          -              .setFamily(columnValue.getFamily())
          -              .setQualifier(columnValue.getQualifier())
          -              .setTimestamp(columnValue.getTimestamp())
          -              .setType(Cell.Type.Put)
          -              .setValue(columnValue.getValue())
          -              .build());
          +          out.add(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(out.getRow())
          +              .setFamily(columnValue.getFamily()).setQualifier(columnValue.getQualifier())
          +              .setTimestamp(columnValue.getTimestamp()).setType(Cell.Type.Put)
          +              .setValue(columnValue.getValue()).build());
                   } else {
          -          out.add(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
          -              .setRow(out.getRow())
          -              .setFamily(columnValue.getFamily())
          -              .setQualifier(columnValue.getQualifier())
          -              .setTimestamp(out.getTimestamp())
          -              .setType(Cell.Type.Put)
          -              .setValue(columnValue.getValue())
          -              .build());
          +          out.add(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(out.getRow())
          +              .setFamily(columnValue.getFamily()).setQualifier(columnValue.getQualifier())
          +              .setTimestamp(out.getTimestamp()).setType(Cell.Type.Put)
          +              .setValue(columnValue.getValue()).build());
                   }
                 } catch (IOException e) {
                   throw new IllegalArgumentException((e));
          @@ -323,7 +298,7 @@ public static Put putFromThrift(TPut in) {
               }
           
               if (in.isSetAttributes()) {
          -      addAttributes(out,in.getAttributes());
          +      addAttributes(out, in.getAttributes());
               }
           
               if (in.getCellVisibility() != null) {
          @@ -335,11 +310,8 @@ public static Put putFromThrift(TPut in) {
           
             /**
              * Converts multiple {@link TPut}s (Thrift) into a list of {@link Put}s (HBase).
          -   *
              * @param in list of TPuts to convert
          -   *
              * @return list of converted Puts
          -   *
              * @see #putFromThrift(TPut)
              */
             public static List putsFromThrift(List in) {
          @@ -352,9 +324,7 @@ public static List putsFromThrift(List in) {
           
             /**
              * Creates a {@link Delete} (HBase) from a {@link TDelete} (Thrift).
          -   *
              * @param in the TDelete to convert
          -   *
              * @return converted Delete
              */
             public static Delete deleteFromThrift(TDelete in) {
          @@ -410,7 +380,7 @@ public static Delete deleteFromThrift(TDelete in) {
               }
           
               if (in.isSetAttributes()) {
          -      addAttributes(out,in.getAttributes());
          +      addAttributes(out, in.getAttributes());
               }
           
               if (in.isSetDurability()) {
          @@ -422,11 +392,8 @@ public static Delete deleteFromThrift(TDelete in) {
           
             /**
              * Converts multiple {@link TDelete}s (Thrift) into a list of {@link Delete}s (HBase).
          -   *
              * @param in list of TDeletes to convert
          -   *
              * @return list of converted Deletes
          -   *
              * @see #deleteFromThrift(TDelete)
              */
           
          @@ -440,12 +407,18 @@ public static List deletesFromThrift(List in) {
           
             public static TDeleteType deleteTypeFromHBase(Cell.Type type) {
               switch (type) {
          -      case Delete: return TDeleteType.DELETE_COLUMN;
          -      case DeleteColumn: return TDeleteType.DELETE_COLUMNS;
          -      case DeleteFamily: return TDeleteType.DELETE_FAMILY;
          -      case DeleteFamilyVersion: return TDeleteType.DELETE_FAMILY_VERSION;
          -      default: throw new IllegalArgumentException("Unknow delete type " + type);
          -    }  }
          +      case Delete:
          +        return TDeleteType.DELETE_COLUMN;
          +      case DeleteColumn:
          +        return TDeleteType.DELETE_COLUMNS;
          +      case DeleteFamily:
          +        return TDeleteType.DELETE_FAMILY;
          +      case DeleteFamilyVersion:
          +        return TDeleteType.DELETE_FAMILY_VERSION;
          +      default:
          +        throw new IllegalArgumentException("Unknow delete type " + type);
          +    }
          +  }
           
             public static TDelete deleteFromHBase(Delete in) {
               TDelete out = new TDelete(ByteBuffer.wrap(in.getRow()));
          @@ -458,9 +431,9 @@ public static TDelete deleteFromHBase(Delete in) {
           
               for (Map.Entry attribute : in.getAttributesMap().entrySet()) {
                 out.putToAttributes(ByteBuffer.wrap(Bytes.toBytes(attribute.getKey())),
          -          ByteBuffer.wrap(attribute.getValue()));
          +        ByteBuffer.wrap(attribute.getValue()));
               }
          -    if (in.getDurability() != Durability.USE_DEFAULT)  {
          +    if (in.getDurability() != Durability.USE_DEFAULT) {
                 out.setDurability(durabilityFromHBase(in.getDurability()));
               }
               // Delete the whole row
          @@ -468,15 +441,14 @@ public static TDelete deleteFromHBase(Delete in) {
                 return out;
               }
               TDeleteType type = null;
          -    for (Map.Entry> familyEntry:
          -        in.getFamilyCellMap().entrySet()) {
          +    for (Map.Entry> familyEntry : in.getFamilyCellMap().entrySet()) {
                 byte[] family = familyEntry.getKey();
                 TColumn column = new TColumn(ByteBuffer.wrap(familyEntry.getKey()));
          -      for (Cell cell: familyEntry.getValue()) {
          +      for (Cell cell : familyEntry.getValue()) {
                   TDeleteType cellDeleteType = deleteTypeFromHBase(cell.getType());
                   if (type == null) {
                     type = cellDeleteType;
          -        } else if (type != cellDeleteType){
          +        } else if (type != cellDeleteType) {
                     throw new RuntimeException("Only the same delete type is supported, but two delete type "
                         + "is founded, one is " + type + " the other one is " + cellDeleteType);
                   }
          @@ -500,9 +472,7 @@ public static TDelete deleteFromHBase(Delete in) {
           
             /**
              * Creates a {@link RowMutations} (HBase) from a {@link TRowMutations} (Thrift)
          -   *
              * @param in the TRowMutations to convert
          -   *
              * @return converted RowMutations
              */
             public static RowMutations rowMutationsFromThrift(TRowMutations in) throws IOException {
          @@ -546,8 +516,7 @@ public static Scan scanFromThrift(TScan in) throws IOException {
               }
           
               TTimeRange timeRange = in.getTimeRange();
          -    if (timeRange != null &&
          -        timeRange.isSetMinStamp() && timeRange.isSetMaxStamp()) {
          +    if (timeRange != null && timeRange.isSetMinStamp() && timeRange.isSetMaxStamp()) {
                 out.setTimeRange(timeRange.getMinStamp(), timeRange.getMaxStamp());
               }
           
          @@ -561,7 +530,7 @@ public static Scan scanFromThrift(TScan in) throws IOException {
               }
           
               if (in.isSetAttributes()) {
          -      addAttributes(out,in.getAttributes());
          +      addAttributes(out, in.getAttributes());
               }
           
               if (in.isSetAuthorizations()) {
          @@ -581,7 +550,7 @@ public static Scan scanFromThrift(TScan in) throws IOException {
                 if (MapUtils.isNotEmpty(colFamTimeRangeMap)) {
                   for (Map.Entry entry : colFamTimeRangeMap.entrySet()) {
                     out.setColumnFamilyTimeRange(Bytes.toBytes(entry.getKey()),
          -              entry.getValue().getMinStamp(), entry.getValue().getMaxStamp());
          +            entry.getValue().getMinStamp(), entry.getValue().getMaxStamp());
                   }
                 }
               }
          @@ -615,7 +584,7 @@ public static byte[] filterFromHBase(Filter filter) throws IOException {
             }
           
             public static Filter filterFromThrift(byte[] filterBytes) throws IOException {
          -    FilterProtos.Filter filterPB  = FilterProtos.Filter.parseFrom(filterBytes);
          +    FilterProtos.Filter filterPB = FilterProtos.Filter.parseFrom(filterBytes);
               return ProtobufUtil.toFilter(filterPB);
             }
           
          @@ -647,7 +616,7 @@ public static TScan scanFromHBase(Scan in) throws IOException {
           
               for (Map.Entry attribute : in.getAttributesMap().entrySet()) {
                 out.putToAttributes(ByteBuffer.wrap(Bytes.toBytes(attribute.getKey())),
          -          ByteBuffer.wrap(attribute.getValue()));
          +        ByteBuffer.wrap(attribute.getValue()));
               }
           
               try {
          @@ -691,14 +660,14 @@ public static Increment incrementFromThrift(TIncrement in) throws IOException {
               }
           
               if (in.isSetAttributes()) {
          -      addAttributes(out,in.getAttributes());
          +      addAttributes(out, in.getAttributes());
               }
           
               if (in.isSetDurability()) {
                 out.setDurability(durabilityFromThrift(in.getDurability()));
               }
           
          -    if(in.getCellVisibility() != null) {
          +    if (in.getCellVisibility() != null) {
                 out.setCellVisibility(new CellVisibility(in.getCellVisibility().getExpression()));
               }
           
          @@ -723,7 +692,7 @@ public static Append appendFromThrift(TAppend append) throws IOException {
                 out.setDurability(durabilityFromThrift(append.getDurability()));
               }
           
          -    if(append.getCellVisibility() != null) {
          +    if (append.getCellVisibility() != null) {
                 out.setCellVisibility(new CellVisibility(append.getCellVisibility().getExpression()));
               }
           
          @@ -761,7 +730,7 @@ public static THRegionLocation regionLocationFromHBase(HRegionLocation hrl) {
           
             public static List regionLocationsFromHBase(List locations) {
               List tlocations = new ArrayList<>(locations.size());
          -    for (HRegionLocation hrl:locations) {
          +    for (HRegionLocation hrl : locations) {
                 tlocations.add(regionLocationFromHBase(hrl));
               }
               return tlocations;
          @@ -771,64 +740,89 @@ public static List regionLocationsFromHBase(List attributes) {
          +      Map attributes) {
               if (attributes == null || attributes.isEmpty()) {
                 return;
               }
               for (Map.Entry entry : attributes.entrySet()) {
                 String name = Bytes.toStringBinary(getBytes(entry.getKey()));
          -      byte[] value =  getBytes(entry.getValue());
          +      byte[] value = getBytes(entry.getValue());
                 op.setAttribute(name, value);
               }
             }
           
             private static Durability durabilityFromThrift(TDurability tDurability) {
               switch (tDurability.getValue()) {
          -      case 0: return Durability.USE_DEFAULT;
          -      case 1: return Durability.SKIP_WAL;
          -      case 2: return Durability.ASYNC_WAL;
          -      case 3: return Durability.SYNC_WAL;
          -      case 4: return Durability.FSYNC_WAL;
          -      default: return Durability.USE_DEFAULT;
          +      case 0:
          +        return Durability.USE_DEFAULT;
          +      case 1:
          +        return Durability.SKIP_WAL;
          +      case 2:
          +        return Durability.ASYNC_WAL;
          +      case 3:
          +        return Durability.SYNC_WAL;
          +      case 4:
          +        return Durability.FSYNC_WAL;
          +      default:
          +        return Durability.USE_DEFAULT;
               }
             }
           
             public static CompareOperator compareOpFromThrift(TCompareOperator tCompareOp) {
               switch (tCompareOp.getValue()) {
          -      case 0: return CompareOperator.LESS;
          -      case 1: return CompareOperator.LESS_OR_EQUAL;
          -      case 2: return CompareOperator.EQUAL;
          -      case 3: return CompareOperator.NOT_EQUAL;
          -      case 4: return CompareOperator.GREATER_OR_EQUAL;
          -      case 5: return CompareOperator.GREATER;
          -      case 6: return CompareOperator.NO_OP;
          -      default: return null;
          +      case 0:
          +        return CompareOperator.LESS;
          +      case 1:
          +        return CompareOperator.LESS_OR_EQUAL;
          +      case 2:
          +        return CompareOperator.EQUAL;
          +      case 3:
          +        return CompareOperator.NOT_EQUAL;
          +      case 4:
          +        return CompareOperator.GREATER_OR_EQUAL;
          +      case 5:
          +        return CompareOperator.GREATER;
          +      case 6:
          +        return CompareOperator.NO_OP;
          +      default:
          +        return null;
               }
             }
           
             private static ReadType readTypeFromThrift(TReadType tReadType) {
               switch (tReadType.getValue()) {
          -      case 1: return ReadType.DEFAULT;
          -      case 2: return ReadType.STREAM;
          -      case 3: return ReadType.PREAD;
          -      default: return null;
          +      case 1:
          +        return ReadType.DEFAULT;
          +      case 2:
          +        return ReadType.STREAM;
          +      case 3:
          +        return ReadType.PREAD;
          +      default:
          +        return null;
               }
             }
           
             private static TReadType readTypeFromHBase(ReadType readType) {
               switch (readType) {
          -      case DEFAULT: return TReadType.DEFAULT;
          -      case STREAM: return TReadType.STREAM;
          -      case PREAD: return TReadType.PREAD;
          -      default: return TReadType.DEFAULT;
          +      case DEFAULT:
          +        return TReadType.DEFAULT;
          +      case STREAM:
          +        return TReadType.STREAM;
          +      case PREAD:
          +        return TReadType.PREAD;
          +      default:
          +        return TReadType.DEFAULT;
               }
             }
           
             private static Consistency consistencyFromThrift(TConsistency tConsistency) {
               switch (tConsistency.getValue()) {
          -      case 1: return Consistency.STRONG;
          -      case 2: return Consistency.TIMELINE;
          -      default: return Consistency.STRONG;
          +      case 1:
          +        return Consistency.STRONG;
          +      case 2:
          +        return Consistency.TIMELINE;
          +      default:
          +        return Consistency.STRONG;
               }
             }
           
          @@ -890,51 +884,73 @@ public static byte[][] splitKeyFromThrift(List in) {
           
             public static BloomType bloomFilterFromThrift(TBloomFilterType in) {
               switch (in.getValue()) {
          -      case 0: return BloomType.NONE;
          -      case 1: return BloomType.ROW;
          -      case 2: return BloomType.ROWCOL;
          -      case 3: return BloomType.ROWPREFIX_FIXED_LENGTH;
          -      default: return BloomType.ROW;
          +      case 0:
          +        return BloomType.NONE;
          +      case 1:
          +        return BloomType.ROW;
          +      case 2:
          +        return BloomType.ROWCOL;
          +      case 3:
          +        return BloomType.ROWPREFIX_FIXED_LENGTH;
          +      default:
          +        return BloomType.ROW;
               }
             }
           
             public static Compression.Algorithm compressionAlgorithmFromThrift(TCompressionAlgorithm in) {
               switch (in.getValue()) {
          -      case 0: return Compression.Algorithm.LZO;
          -      case 1: return Compression.Algorithm.GZ;
          -      case 2: return Compression.Algorithm.NONE;
          -      case 3: return Compression.Algorithm.SNAPPY;
          -      case 4: return Compression.Algorithm.LZ4;
          -      case 5: return Compression.Algorithm.BZIP2;
          -      case 6: return Compression.Algorithm.ZSTD;
          -      default: return Compression.Algorithm.NONE;
          +      case 0:
          +        return Compression.Algorithm.LZO;
          +      case 1:
          +        return Compression.Algorithm.GZ;
          +      case 2:
          +        return Compression.Algorithm.NONE;
          +      case 3:
          +        return Compression.Algorithm.SNAPPY;
          +      case 4:
          +        return Compression.Algorithm.LZ4;
          +      case 5:
          +        return Compression.Algorithm.BZIP2;
          +      case 6:
          +        return Compression.Algorithm.ZSTD;
          +      default:
          +        return Compression.Algorithm.NONE;
               }
             }
           
             public static DataBlockEncoding dataBlockEncodingFromThrift(TDataBlockEncoding in) {
               switch (in.getValue()) {
          -      case 0: return DataBlockEncoding.NONE;
          -      case 2: return DataBlockEncoding.PREFIX;
          -      case 3: return DataBlockEncoding.DIFF;
          -      case 4: return DataBlockEncoding.FAST_DIFF;
          -      case 7: return DataBlockEncoding.ROW_INDEX_V1;
          -      default: return DataBlockEncoding.NONE;
          +      case 0:
          +        return DataBlockEncoding.NONE;
          +      case 2:
          +        return DataBlockEncoding.PREFIX;
          +      case 3:
          +        return DataBlockEncoding.DIFF;
          +      case 4:
          +        return DataBlockEncoding.FAST_DIFF;
          +      case 7:
          +        return DataBlockEncoding.ROW_INDEX_V1;
          +      default:
          +        return DataBlockEncoding.NONE;
               }
             }
           
             public static KeepDeletedCells keepDeletedCellsFromThrift(TKeepDeletedCells in) {
               switch (in.getValue()) {
          -      case 0: return KeepDeletedCells.FALSE;
          -      case 1: return KeepDeletedCells.TRUE;
          -      case 2: return KeepDeletedCells.TTL;
          -      default: return KeepDeletedCells.FALSE;
          +      case 0:
          +        return KeepDeletedCells.FALSE;
          +      case 1:
          +        return KeepDeletedCells.TRUE;
          +      case 2:
          +        return KeepDeletedCells.TTL;
          +      default:
          +        return KeepDeletedCells.FALSE;
               }
             }
           
          -  public static ColumnFamilyDescriptor columnFamilyDescriptorFromThrift(
          -      TColumnFamilyDescriptor in) {
          -    ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder
          -        .newBuilder(in.getName());
          +  public static ColumnFamilyDescriptor
          +      columnFamilyDescriptorFromThrift(TColumnFamilyDescriptor in) {
          +    ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(in.getName());
           
               if (in.isSetAttributes()) {
                 for (Map.Entry attribute : in.getAttributes().entrySet()) {
          @@ -998,7 +1014,6 @@ public static ColumnFamilyDescriptor columnFamilyDescriptorFromThrift(
                 builder.setInMemory(in.isInMemory());
               }
           
          -
               return builder.build();
             }
           
          @@ -1021,8 +1036,7 @@ public static TNamespaceDescriptor namespaceDescriptorFromHBase(NamespaceDescrip
               return out;
             }
           
          -  public static List namespaceDescriptorsFromHBase(
          -      NamespaceDescriptor[] in) {
          +  public static List namespaceDescriptorsFromHBase(NamespaceDescriptor[] in) {
               List out = new ArrayList<>(in.length);
               for (NamespaceDescriptor descriptor : in) {
                 out.add(namespaceDescriptorFromHBase(descriptor));
          @@ -1031,8 +1045,8 @@ public static List namespaceDescriptorsFromHBase(
             }
           
             public static TableDescriptor tableDescriptorFromThrift(TTableDescriptor in) {
          -    TableDescriptorBuilder builder = TableDescriptorBuilder
          -        .newBuilder(tableNameFromThrift(in.getTableName()));
          +    TableDescriptorBuilder builder =
          +        TableDescriptorBuilder.newBuilder(tableNameFromThrift(in.getTableName()));
               for (TColumnFamilyDescriptor column : in.getColumns()) {
                 builder.setColumnFamily(columnFamilyDescriptorFromThrift(column));
               }
          @@ -1057,12 +1071,18 @@ public static List tableDescriptorsFromThrift(List attributes = in.getValues();
               for (Map.Entry attribute : attributes.entrySet()) {
                 out.putToAttributes(ByteBuffer.wrap(attribute.getKey().get()),
          -          ByteBuffer.wrap(attribute.getValue().get()));
          +        ByteBuffer.wrap(attribute.getValue().get()));
               }
               for (ColumnFamilyDescriptor column : in.getColumnFamilies()) {
                 out.addToColumns(columnFamilyDescriptorFromHBase(column));
          @@ -1097,57 +1117,78 @@ public static List tableDescriptorsFromHBase(TableDescriptor[]
               return out;
             }
           
          -
             public static TBloomFilterType bloomFilterFromHBase(BloomType in) {
               switch (in) {
          -      case NONE: return TBloomFilterType.NONE;
          -      case ROW: return TBloomFilterType.ROW;
          -      case ROWCOL: return TBloomFilterType.ROWCOL;
          -      case ROWPREFIX_FIXED_LENGTH: return TBloomFilterType.ROWPREFIX_FIXED_LENGTH;
          -      default: return TBloomFilterType.ROW;
          +      case NONE:
          +        return TBloomFilterType.NONE;
          +      case ROW:
          +        return TBloomFilterType.ROW;
          +      case ROWCOL:
          +        return TBloomFilterType.ROWCOL;
          +      case ROWPREFIX_FIXED_LENGTH:
          +        return TBloomFilterType.ROWPREFIX_FIXED_LENGTH;
          +      default:
          +        return TBloomFilterType.ROW;
               }
             }
           
             public static TCompressionAlgorithm compressionAlgorithmFromHBase(Compression.Algorithm in) {
               switch (in) {
          -      case LZO: return TCompressionAlgorithm.LZO;
          -      case GZ: return TCompressionAlgorithm.GZ;
          -      case NONE: return TCompressionAlgorithm.NONE;
          -      case SNAPPY: return TCompressionAlgorithm.SNAPPY;
          -      case LZ4: return TCompressionAlgorithm.LZ4;
          -      case BZIP2: return TCompressionAlgorithm.BZIP2;
          -      case ZSTD: return TCompressionAlgorithm.ZSTD;
          -      default: return TCompressionAlgorithm.NONE;
          +      case LZO:
          +        return TCompressionAlgorithm.LZO;
          +      case GZ:
          +        return TCompressionAlgorithm.GZ;
          +      case NONE:
          +        return TCompressionAlgorithm.NONE;
          +      case SNAPPY:
          +        return TCompressionAlgorithm.SNAPPY;
          +      case LZ4:
          +        return TCompressionAlgorithm.LZ4;
          +      case BZIP2:
          +        return TCompressionAlgorithm.BZIP2;
          +      case ZSTD:
          +        return TCompressionAlgorithm.ZSTD;
          +      default:
          +        return TCompressionAlgorithm.NONE;
               }
             }
           
             public static TDataBlockEncoding dataBlockEncodingFromHBase(DataBlockEncoding in) {
               switch (in) {
          -      case NONE: return TDataBlockEncoding.NONE;
          -      case PREFIX: return TDataBlockEncoding.PREFIX;
          -      case DIFF: return TDataBlockEncoding.DIFF;
          -      case FAST_DIFF: return TDataBlockEncoding.FAST_DIFF;
          -      case ROW_INDEX_V1: return TDataBlockEncoding.ROW_INDEX_V1;
          -      default: return TDataBlockEncoding.NONE;
          +      case NONE:
          +        return TDataBlockEncoding.NONE;
          +      case PREFIX:
          +        return TDataBlockEncoding.PREFIX;
          +      case DIFF:
          +        return TDataBlockEncoding.DIFF;
          +      case FAST_DIFF:
          +        return TDataBlockEncoding.FAST_DIFF;
          +      case ROW_INDEX_V1:
          +        return TDataBlockEncoding.ROW_INDEX_V1;
          +      default:
          +        return TDataBlockEncoding.NONE;
               }
             }
           
             public static TKeepDeletedCells keepDeletedCellsFromHBase(KeepDeletedCells in) {
               switch (in) {
          -      case FALSE: return TKeepDeletedCells.FALSE;
          -      case TRUE: return TKeepDeletedCells.TRUE;
          -      case TTL: return TKeepDeletedCells.TTL;
          -      default: return TKeepDeletedCells.FALSE;
          +      case FALSE:
          +        return TKeepDeletedCells.FALSE;
          +      case TRUE:
          +        return TKeepDeletedCells.TRUE;
          +      case TTL:
          +        return TKeepDeletedCells.TTL;
          +      default:
          +        return TKeepDeletedCells.FALSE;
               }
             }
           
          -  public static TColumnFamilyDescriptor columnFamilyDescriptorFromHBase(
          -      ColumnFamilyDescriptor in) {
          +  public static TColumnFamilyDescriptor columnFamilyDescriptorFromHBase(ColumnFamilyDescriptor in) {
               TColumnFamilyDescriptor out = new TColumnFamilyDescriptor();
               out.setName(in.getName());
               for (Map.Entry attribute : in.getValues().entrySet()) {
                 out.putToAttributes(ByteBuffer.wrap(attribute.getKey().get()),
          -          ByteBuffer.wrap(attribute.getValue().get()));
          +        ByteBuffer.wrap(attribute.getValue().get()));
               }
               for (Map.Entry conf : in.getConfiguration().entrySet()) {
                 out.putToConfiguration(conf.getKey(), conf.getValue());
          @@ -1172,12 +1213,14 @@ public static TColumnFamilyDescriptor columnFamilyDescriptorFromHBase(
               return out;
             }
           
          -
             private static TConsistency consistencyFromHBase(Consistency consistency) {
               switch (consistency) {
          -      case STRONG: return TConsistency.STRONG;
          -      case TIMELINE: return TConsistency.TIMELINE;
          -      default: return TConsistency.STRONG;
          +      case STRONG:
          +        return TConsistency.STRONG;
          +      case TIMELINE:
          +        return TConsistency.TIMELINE;
          +      default:
          +        return TConsistency.STRONG;
               }
             }
           
          @@ -1192,7 +1235,7 @@ public static TGet getFromHBase(Get in) {
           
               for (Map.Entry attribute : in.getAttributesMap().entrySet()) {
                 out.putToAttributes(ByteBuffer.wrap(Bytes.toBytes(attribute.getKey())),
          -          ByteBuffer.wrap(attribute.getValue()));
          +        ByteBuffer.wrap(attribute.getValue()));
               }
               try {
                 Authorizations authorizations = in.getAuthorizations();
          @@ -1236,28 +1279,17 @@ public static TGet getFromHBase(Get in) {
             }
           
             public static Cell toCell(ExtendedCellBuilder cellBuilder, byte[] row, TColumnValue columnValue) {
          -    return cellBuilder.clear()
          -        .setRow(row)
          -        .setFamily(columnValue.getFamily())
          -        .setQualifier(columnValue.getQualifier())
          -        .setTimestamp(columnValue.getTimestamp())
          -        .setType(columnValue.getType())
          -        .setValue(columnValue.getValue())
          -        .setTags(columnValue.getTags())
          -        .build();
          +    return cellBuilder.clear().setRow(row).setFamily(columnValue.getFamily())
          +        .setQualifier(columnValue.getQualifier()).setTimestamp(columnValue.getTimestamp())
          +        .setType(columnValue.getType()).setValue(columnValue.getValue())
          +        .setTags(columnValue.getTags()).build();
             }
           
          -
          -
          -
          -
          -
          -
             public static Result resultFromThrift(TResult in) {
               if (in == null) {
                 return null;
               }
          -    if (!in.isSetColumnValues() || in.getColumnValues().isEmpty()){
          +    if (!in.isSetColumnValues() || in.getColumnValues().isEmpty()) {
                 return in.isStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT;
               }
               List cells = new ArrayList<>(in.getColumnValues().size());
          @@ -1277,14 +1309,12 @@ public static TPut putFromHBase(Put in) {
               if (in.getDurability() != Durability.USE_DEFAULT) {
                 out.setDurability(durabilityFromHBase(in.getDurability()));
               }
          -    for (Map.Entry> entry : in.getFamilyCellMap().entrySet()) {
          +    for (Map.Entry> entry : in.getFamilyCellMap().entrySet()) {
                 byte[] family = entry.getKey();
                 for (Cell cell : entry.getValue()) {
                   TColumnValue columnValue = new TColumnValue();
          -        columnValue.setFamily(family)
          -            .setQualifier(CellUtil.cloneQualifier(cell))
          -            .setType(cell.getType().getCode())
          -            .setTimestamp(cell.getTimestamp())
          +        columnValue.setFamily(family).setQualifier(CellUtil.cloneQualifier(cell))
          +            .setType(cell.getType().getCode()).setTimestamp(cell.getTimestamp())
                       .setValue(CellUtil.cloneValue(cell));
                   if (cell.getTagsLength() != 0) {
                     columnValue.setTags(PrivateCellUtil.cloneTags(cell));
          @@ -1294,7 +1324,7 @@ public static TPut putFromHBase(Put in) {
               }
               for (Map.Entry attribute : in.getAttributesMap().entrySet()) {
                 out.putToAttributes(ByteBuffer.wrap(Bytes.toBytes(attribute.getKey())),
          -          ByteBuffer.wrap(attribute.getValue()));
          +        ByteBuffer.wrap(attribute.getValue()));
               }
               try {
                 CellVisibility cellVisibility = in.getCellVisibility();
          @@ -1317,8 +1347,8 @@ public static List putsFromHBase(List in) {
               return out;
             }
           
          -  public static NamespaceDescriptor[] namespaceDescriptorsFromThrift(
          -      List in) {
          +  public static NamespaceDescriptor[]
          +      namespaceDescriptorsFromThrift(List in) {
               NamespaceDescriptor[] out = new NamespaceDescriptor[in.size()];
               int index = 0;
               for (TNamespaceDescriptor descriptor : in) {
          @@ -1342,14 +1372,12 @@ public static TAppend appendFromHBase(Append in) throws IOException {
               if (in.getDurability() != Durability.USE_DEFAULT) {
                 out.setDurability(durabilityFromHBase(in.getDurability()));
               }
          -    for (Map.Entry> entry : in.getFamilyCellMap().entrySet()) {
          +    for (Map.Entry> entry : in.getFamilyCellMap().entrySet()) {
                 byte[] family = entry.getKey();
                 for (Cell cell : entry.getValue()) {
                   TColumnValue columnValue = new TColumnValue();
          -        columnValue.setFamily(family)
          -            .setQualifier(CellUtil.cloneQualifier(cell))
          -            .setType(cell.getType().getCode())
          -            .setTimestamp(cell.getTimestamp())
          +        columnValue.setFamily(family).setQualifier(CellUtil.cloneQualifier(cell))
          +            .setType(cell.getType().getCode()).setTimestamp(cell.getTimestamp())
                       .setValue(CellUtil.cloneValue(cell));
                   if (cell.getTagsLength() != 0) {
                     columnValue.setTags(PrivateCellUtil.cloneTags(cell));
          @@ -1359,7 +1387,7 @@ public static TAppend appendFromHBase(Append in) throws IOException {
               }
               for (Map.Entry attribute : in.getAttributesMap().entrySet()) {
                 out.putToAttributes(ByteBuffer.wrap(Bytes.toBytes(attribute.getKey())),
          -          ByteBuffer.wrap(attribute.getValue()));
          +        ByteBuffer.wrap(attribute.getValue()));
               }
               try {
                 CellVisibility cellVisibility = in.getCellVisibility();
          @@ -1382,19 +1410,19 @@ public static TIncrement incrementFromHBase(Increment in) throws IOException {
               if (in.getDurability() != Durability.USE_DEFAULT) {
                 out.setDurability(durabilityFromHBase(in.getDurability()));
               }
          -    for (Map.Entry> entry : in.getFamilyCellMap().entrySet()) {
          +    for (Map.Entry> entry : in.getFamilyCellMap().entrySet()) {
                 byte[] family = entry.getKey();
                 for (Cell cell : entry.getValue()) {
                   TColumnIncrement columnValue = new TColumnIncrement();
                   columnValue.setFamily(family).setQualifier(CellUtil.cloneQualifier(cell));
                   columnValue.setAmount(
          -            Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
          +          Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
                   out.addToColumns(columnValue);
                 }
               }
               for (Map.Entry attribute : in.getAttributesMap().entrySet()) {
                 out.putToAttributes(ByteBuffer.wrap(Bytes.toBytes(attribute.getKey())),
          -          ByteBuffer.wrap(attribute.getValue()));
          +        ByteBuffer.wrap(attribute.getValue()));
               }
               try {
                 CellVisibility cellVisibility = in.getCellVisibility();
          @@ -1416,9 +1444,9 @@ public static TRowMutations rowMutationsFromHBase(RowMutations in) {
               for (Mutation mutation : in.getMutations()) {
                 TMutation tMutation = new TMutation();
                 if (mutation instanceof Put) {
          -        tMutation.setPut(ThriftUtilities.putFromHBase((Put)mutation));
          +        tMutation.setPut(ThriftUtilities.putFromHBase((Put) mutation));
                 } else if (mutation instanceof Delete) {
          -        tMutation.setDeleteSingle(ThriftUtilities.deleteFromHBase((Delete)mutation));
          +        tMutation.setDeleteSingle(ThriftUtilities.deleteFromHBase((Delete) mutation));
                 } else {
                   throw new IllegalArgumentException(
                       "Only Put and Delete is supported in mutateRow, but muation=" + mutation);
          @@ -1430,16 +1458,25 @@ public static TRowMutations rowMutationsFromHBase(RowMutations in) {
           
             public static TCompareOperator compareOpFromHBase(CompareOperator compareOp) {
               switch (compareOp) {
          -      case LESS: return TCompareOperator.LESS;
          -      case LESS_OR_EQUAL: return TCompareOperator.LESS_OR_EQUAL;
          -      case EQUAL: return TCompareOperator.EQUAL;
          -      case NOT_EQUAL: return TCompareOperator.NOT_EQUAL;
          -      case GREATER_OR_EQUAL: return TCompareOperator.GREATER_OR_EQUAL;
          -      case GREATER: return TCompareOperator.GREATER;
          -      case NO_OP: return TCompareOperator.NO_OP;
          -      default: return null;
          +      case LESS:
          +        return TCompareOperator.LESS;
          +      case LESS_OR_EQUAL:
          +        return TCompareOperator.LESS_OR_EQUAL;
          +      case EQUAL:
          +        return TCompareOperator.EQUAL;
          +      case NOT_EQUAL:
          +        return TCompareOperator.NOT_EQUAL;
          +      case GREATER_OR_EQUAL:
          +        return TCompareOperator.GREATER_OR_EQUAL;
          +      case GREATER:
          +        return TCompareOperator.GREATER;
          +      case NO_OP:
          +        return TCompareOperator.NO_OP;
          +      default:
          +        return null;
               }
             }
          +
             public static List splitKeyFromHBase(byte[][] in) {
               if (in == null || in.length == 0) {
                 return null;
          @@ -1485,15 +1522,11 @@ public static Set getServerNamesFromThrift(Set tServerN
               if (CollectionUtils.isEmpty(tServerNames)) {
                 return Collections.emptySet();
               }
          -    return tServerNames.stream().map(tServerName ->
          -      ServerName.valueOf(tServerName.getHostName(),
          -        tServerName.getPort(),
          -        tServerName.getStartCode()))
          -      .collect(Collectors.toSet());
          +    return tServerNames.stream().map(tServerName -> ServerName.valueOf(tServerName.getHostName(),
          +      tServerName.getPort(), tServerName.getStartCode())).collect(Collectors.toSet());
             }
           
          -  public static TLogQueryFilter getSlowLogQueryFromHBase(
          -      LogQueryFilter logQueryFilter) {
          +  public static TLogQueryFilter getSlowLogQueryFromHBase(LogQueryFilter logQueryFilter) {
               TLogQueryFilter tLogQueryFilter = new TLogQueryFilter();
               tLogQueryFilter.setRegionName(logQueryFilter.getRegionName());
               tLogQueryFilter.setClientAddress(logQueryFilter.getClientAddress());
          @@ -1543,8 +1576,7 @@ private static TFilterByOperator getTFilterByFromHBase(final LogQueryFilter logQ
               return tFilterByOperator;
             }
           
          -  public static LogQueryFilter getSlowLogQueryFromThrift(
          -      TLogQueryFilter tLogQueryFilter) {
          +  public static LogQueryFilter getSlowLogQueryFromThrift(TLogQueryFilter tLogQueryFilter) {
               LogQueryFilter logQueryFilter = new LogQueryFilter();
               logQueryFilter.setRegionName(tLogQueryFilter.getRegionName());
               logQueryFilter.setClientAddress(tLogQueryFilter.getClientAddress());
          @@ -1558,8 +1590,8 @@ public static LogQueryFilter getSlowLogQueryFromThrift(
               return logQueryFilter;
             }
           
          -  private static LogQueryFilter.Type getLogTypeFromThrift(
          -      final TLogQueryFilter tSlowLogQueryFilter) {
          +  private static LogQueryFilter.Type
          +      getLogTypeFromThrift(final TLogQueryFilter tSlowLogQueryFilter) {
               LogQueryFilter.Type type;
               switch (tSlowLogQueryFilter.getLogType()) {
                 case SLOW_LOG: {
          @@ -1577,8 +1609,8 @@ private static LogQueryFilter.Type getLogTypeFromThrift(
               return type;
             }
           
          -  private static LogQueryFilter.FilterByOperator getFilterByFromThrift(
          -      final TLogQueryFilter tLogQueryFilter) {
          +  private static LogQueryFilter.FilterByOperator
          +      getFilterByFromThrift(final TLogQueryFilter tLogQueryFilter) {
               LogQueryFilter.FilterByOperator filterByOperator;
               switch (tLogQueryFilter.getFilterByOperator()) {
                 case AND: {
          @@ -1596,67 +1628,74 @@ private static LogQueryFilter.FilterByOperator getFilterByFromThrift(
               return filterByOperator;
             }
           
          -  public static List getSlowLogRecordsFromHBase(
          -      List onlineLogRecords) {
          +  public static List
          +      getSlowLogRecordsFromHBase(List onlineLogRecords) {
               if (CollectionUtils.isEmpty(onlineLogRecords)) {
                 return Collections.emptyList();
               }
          -    return onlineLogRecords.stream()
          -      .map(slowLogRecord -> {
          -        TOnlineLogRecord tOnlineLogRecord = new TOnlineLogRecord();
          -        tOnlineLogRecord.setCallDetails(slowLogRecord.getCallDetails());
          -        tOnlineLogRecord.setClientAddress(slowLogRecord.getClientAddress());
          -        tOnlineLogRecord.setMethodName(slowLogRecord.getMethodName());
          -        tOnlineLogRecord.setMultiGetsCount(slowLogRecord.getMultiGetsCount());
          -        tOnlineLogRecord.setMultiMutationsCount(slowLogRecord.getMultiMutationsCount());
          -        tOnlineLogRecord.setMultiServiceCalls(slowLogRecord.getMultiServiceCalls());
          -        tOnlineLogRecord.setParam(slowLogRecord.getParam());
          -        tOnlineLogRecord.setProcessingTime(slowLogRecord.getProcessingTime());
          -        tOnlineLogRecord.setQueueTime(slowLogRecord.getQueueTime());
          -        tOnlineLogRecord.setRegionName(slowLogRecord.getRegionName());
          -        tOnlineLogRecord.setResponseSize(slowLogRecord.getResponseSize());
          -        tOnlineLogRecord.setServerClass(slowLogRecord.getServerClass());
          -        tOnlineLogRecord.setStartTime(slowLogRecord.getStartTime());
          -        tOnlineLogRecord.setUserName(slowLogRecord.getUserName());
          -        return tOnlineLogRecord;
          -      }).collect(Collectors.toList());
          -  }
          -
          -  public static List getSlowLogRecordsFromThrift(
          -      List tOnlineLogRecords) {
          +    return onlineLogRecords.stream().map(slowLogRecord -> {
          +      TOnlineLogRecord tOnlineLogRecord = new TOnlineLogRecord();
          +      tOnlineLogRecord.setCallDetails(slowLogRecord.getCallDetails());
          +      tOnlineLogRecord.setClientAddress(slowLogRecord.getClientAddress());
          +      tOnlineLogRecord.setMethodName(slowLogRecord.getMethodName());
          +      tOnlineLogRecord.setMultiGetsCount(slowLogRecord.getMultiGetsCount());
          +      tOnlineLogRecord.setMultiMutationsCount(slowLogRecord.getMultiMutationsCount());
          +      tOnlineLogRecord.setMultiServiceCalls(slowLogRecord.getMultiServiceCalls());
          +      tOnlineLogRecord.setParam(slowLogRecord.getParam());
          +      tOnlineLogRecord.setProcessingTime(slowLogRecord.getProcessingTime());
          +      tOnlineLogRecord.setQueueTime(slowLogRecord.getQueueTime());
          +      tOnlineLogRecord.setRegionName(slowLogRecord.getRegionName());
          +      tOnlineLogRecord.setResponseSize(slowLogRecord.getResponseSize());
          +      tOnlineLogRecord.setServerClass(slowLogRecord.getServerClass());
          +      tOnlineLogRecord.setStartTime(slowLogRecord.getStartTime());
          +      tOnlineLogRecord.setUserName(slowLogRecord.getUserName());
          +      return tOnlineLogRecord;
          +    }).collect(Collectors.toList());
          +  }
          +
          +  public static List
          +      getSlowLogRecordsFromThrift(List tOnlineLogRecords) {
               if (CollectionUtils.isEmpty(tOnlineLogRecords)) {
                 return Collections.emptyList();
               }
          -    return tOnlineLogRecords.stream()
          -      .map(tSlowLogRecord -> new OnlineLogRecord.OnlineLogRecordBuilder()
          -        .setCallDetails(tSlowLogRecord.getCallDetails())
          -        .setClientAddress(tSlowLogRecord.getClientAddress())
          -        .setMethodName(tSlowLogRecord.getMethodName())
          -        .setMultiGetsCount(tSlowLogRecord.getMultiGetsCount())
          -        .setMultiMutationsCount(tSlowLogRecord.getMultiMutationsCount())
          -        .setMultiServiceCalls(tSlowLogRecord.getMultiServiceCalls())
          -        .setParam(tSlowLogRecord.getParam())
          -        .setProcessingTime(tSlowLogRecord.getProcessingTime())
          -        .setQueueTime(tSlowLogRecord.getQueueTime())
          -        .setRegionName(tSlowLogRecord.getRegionName())
          -        .setResponseSize(tSlowLogRecord.getResponseSize())
          -        .setServerClass(tSlowLogRecord.getServerClass())
          -        .setStartTime(tSlowLogRecord.getStartTime())
          -        .setUserName(tSlowLogRecord.getUserName())
          -        .build())
          -      .collect(Collectors.toList());
          +    return tOnlineLogRecords.stream().map(
          +      tSlowLogRecord -> new OnlineLogRecord.OnlineLogRecordBuilder()
          +          .setCallDetails(tSlowLogRecord.getCallDetails())
          +          .setClientAddress(tSlowLogRecord.getClientAddress())
          +          .setMethodName(tSlowLogRecord.getMethodName())
          +          .setMultiGetsCount(tSlowLogRecord.getMultiGetsCount())
          +          .setMultiMutationsCount(tSlowLogRecord.getMultiMutationsCount())
          +          .setMultiServiceCalls(tSlowLogRecord.getMultiServiceCalls())
          +          .setParam(tSlowLogRecord.getParam()).setProcessingTime(tSlowLogRecord.getProcessingTime())
          +          .setQueueTime(tSlowLogRecord.getQueueTime()).setRegionName(tSlowLogRecord.getRegionName())
          +          .setResponseSize(tSlowLogRecord.getResponseSize())
          +          .setServerClass(tSlowLogRecord.getServerClass())
          +          .setStartTime(tSlowLogRecord.getStartTime()).setUserName(tSlowLogRecord.getUserName())
          +          .build())
          +        .collect(Collectors.toList());
             }
           
             public static Permission.Action[] permissionActionsFromString(String permission_actions) {
               Set actions = new HashSet<>();
               for (char c : permission_actions.toCharArray()) {
                 switch (c) {
          -        case 'R': actions.add(Permission.Action.READ);   break;
          -        case 'W': actions.add(Permission.Action.WRITE);  break;
          -        case 'C': actions.add(Permission.Action.CREATE); break;
          -        case 'X': actions.add(Permission.Action.EXEC);   break;
          -        case 'A': actions.add(Permission.Action.ADMIN);  break;
          -        default:                                         break;
          +        case 'R':
          +          actions.add(Permission.Action.READ);
          +          break;
          +        case 'W':
          +          actions.add(Permission.Action.WRITE);
          +          break;
          +        case 'C':
          +          actions.add(Permission.Action.CREATE);
          +          break;
          +        case 'X':
          +          actions.add(Permission.Action.EXEC);
          +          break;
          +        case 'A':
          +          actions.add(Permission.Action.ADMIN);
          +          break;
          +        default:
          +          break;
                 }
               }
               return actions.toArray(new Permission.Action[0]);
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
          index 437bfca5a2d9..a59393a9f2bf 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
          @@ -27,7 +27,6 @@
           import java.util.regex.Pattern;
           import org.apache.commons.lang3.NotImplementedException;
           import org.apache.hadoop.conf.Configuration;
          -import org.apache.hadoop.hbase.client.BalanceRequest;
           import org.apache.hadoop.hbase.CacheEvictionStats;
           import org.apache.hadoop.hbase.ClusterMetrics;
           import org.apache.hadoop.hbase.HConstants;
          @@ -39,17 +38,18 @@
           import org.apache.hadoop.hbase.TableName;
           import org.apache.hadoop.hbase.TableNotFoundException;
           import org.apache.hadoop.hbase.client.Admin;
          +import org.apache.hadoop.hbase.client.BalanceRequest;
           import org.apache.hadoop.hbase.client.BalanceResponse;
           import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
           import org.apache.hadoop.hbase.client.CompactType;
           import org.apache.hadoop.hbase.client.CompactionState;
           import org.apache.hadoop.hbase.client.Connection;
          -import org.apache.hadoop.hbase.client.ServerType;
           import org.apache.hadoop.hbase.client.LogEntry;
           import org.apache.hadoop.hbase.client.LogQueryFilter;
           import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
           import org.apache.hadoop.hbase.client.OnlineLogRecord;
           import org.apache.hadoop.hbase.client.RegionInfo;
          +import org.apache.hadoop.hbase.client.ServerType;
           import org.apache.hadoop.hbase.client.SnapshotDescription;
           import org.apache.hadoop.hbase.client.SnapshotType;
           import org.apache.hadoop.hbase.client.TableDescriptor;
          @@ -92,7 +92,6 @@ public class ThriftAdmin implements Admin {
             private int syncWaitTimeout;
             private Configuration conf;
           
          -
             public ThriftAdmin(THBaseService.Client client, TTransport tTransport, Configuration conf) {
               this.client = client;
               this.transport = tTransport;
          @@ -166,8 +165,8 @@ public List listTableDescriptors(Pattern pattern, boolean inclu
                 throws IOException {
               try {
                 String regex = (pattern == null ? null : pattern.toString());
          -      List tTableDescriptors = client
          -          .getTableDescriptorsByPattern(regex, includeSysTables);
          +      List tTableDescriptors =
          +          client.getTableDescriptorsByPattern(regex, includeSysTables);
                 return ThriftUtilities.tableDescriptorsFromThrift(tTableDescriptors);
           
               } catch (TException e) {
          @@ -211,8 +210,8 @@ public TableDescriptor getDescriptor(TableName tableName)
             @Override
             public List listTableDescriptorsByNamespace(byte[] name) throws IOException {
               try {
          -      List tTableDescriptors = client
          -          .getTableDescriptorsByNamespace(Bytes.toString(name));
          +      List tTableDescriptors =
          +          client.getTableDescriptorsByNamespace(Bytes.toString(name));
                 return ThriftUtilities.tableDescriptorsFromThrift(tTableDescriptors);
               } catch (TException e) {
                 throw new IOException(e);
          @@ -237,17 +236,17 @@ public void createTable(TableDescriptor desc) throws IOException {
             @Override
             public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions)
                 throws IOException {
          -    if(numRegions < 3) {
          +    if (numRegions < 3) {
                 throw new IllegalArgumentException("Must create at least three regions");
          -    } else if(Bytes.compareTo(startKey, endKey) >= 0) {
          +    } else if (Bytes.compareTo(startKey, endKey) >= 0) {
                 throw new IllegalArgumentException("Start key must be smaller than end key");
               }
               if (numRegions == 3) {
          -      createTable(desc, new byte[][]{startKey, endKey});
          +      createTable(desc, new byte[][] { startKey, endKey });
                 return;
               }
          -    byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
          -    if(splitKeys == null || splitKeys.length != numRegions - 1) {
          +    byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
          +    if (splitKeys == null || splitKeys.length != numRegions - 1) {
                 throw new IllegalArgumentException("Unable to split key range into enough regions");
               }
               createTable(desc, splitKeys);
          @@ -338,8 +337,8 @@ public boolean isTableAvailable(TableName tableName) throws IOException {
             public void addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily)
                 throws IOException {
               TTableName tTableName = ThriftUtilities.tableNameFromHBase(tableName);
          -    TColumnFamilyDescriptor tColumnFamilyDescriptor = ThriftUtilities
          -        .columnFamilyDescriptorFromHBase(columnFamily);
          +    TColumnFamilyDescriptor tColumnFamilyDescriptor =
          +        ThriftUtilities.columnFamilyDescriptorFromHBase(columnFamily);
               try {
                 client.addColumnFamily(tTableName, tColumnFamilyDescriptor);
               } catch (TException e) {
          @@ -361,8 +360,8 @@ public void deleteColumnFamily(TableName tableName, byte[] columnFamily) throws
             public void modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily)
                 throws IOException {
               TTableName tTableName = ThriftUtilities.tableNameFromHBase(tableName);
          -    TColumnFamilyDescriptor tColumnFamilyDescriptor = ThriftUtilities
          -        .columnFamilyDescriptorFromHBase(columnFamily);
          +    TColumnFamilyDescriptor tColumnFamilyDescriptor =
          +        ThriftUtilities.columnFamilyDescriptorFromHBase(columnFamily);
               try {
                 client.modifyColumnFamily(tTableName, tColumnFamilyDescriptor);
               } catch (TException e) {
          @@ -372,8 +371,7 @@ public void modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor colum
           
             @Override
             public void modifyTable(TableDescriptor td) throws IOException {
          -    TTableDescriptor tTableDescriptor = ThriftUtilities
          -        .tableDescriptorFromHBase(td);
          +    TTableDescriptor tTableDescriptor = ThriftUtilities.tableDescriptorFromHBase(td);
               try {
                 client.modifyTable(tTableDescriptor);
               } catch (TException e) {
          @@ -383,8 +381,8 @@ public void modifyTable(TableDescriptor td) throws IOException {
           
             @Override
             public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
          -    TNamespaceDescriptor tNamespaceDescriptor = ThriftUtilities
          -        .namespaceDescriptorFromHBase(descriptor);
          +    TNamespaceDescriptor tNamespaceDescriptor =
          +        ThriftUtilities.namespaceDescriptorFromHBase(descriptor);
               try {
                 client.modifyNamespace(tNamespaceDescriptor);
               } catch (TException e) {
          @@ -434,8 +432,8 @@ public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
           
             @Override
             public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
          -    TNamespaceDescriptor tNamespaceDescriptor = ThriftUtilities
          -        .namespaceDescriptorFromHBase(descriptor);
          +    TNamespaceDescriptor tNamespaceDescriptor =
          +        ThriftUtilities.namespaceDescriptorFromHBase(descriptor);
               try {
                 client.createNamespace(tNamespaceDescriptor);
               } catch (TException e) {
          @@ -862,6 +860,7 @@ public void restoreSnapshot(String snapshotName) {
               throw new NotImplementedException("restoreSnapshot not supported in ThriftAdmin");
           
             }
          +
             @Override
             public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot,
                 boolean restoreAcl) {
          @@ -870,7 +869,7 @@ public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot,
           
             @Override
             public Future cloneSnapshotAsync(String snapshotName, TableName tableName, boolean cloneAcl,
          -    String customSFT) throws IOException, TableExistsException, RestoreSnapshotException {
          +      String customSFT) throws IOException, TableExistsException, RestoreSnapshotException {
               throw new NotImplementedException("cloneSnapshotAsync not supported in ThriftAdmin");
             }
           
          @@ -895,6 +894,7 @@ public boolean isProcedureFinished(String signature, String instance, Map listSnapshots() {
               throw new NotImplementedException("listSnapshots not supported in ThriftAdmin");
             }
          +
             @Override
             public List listSnapshots(Pattern pattern) {
               throw new NotImplementedException("listSnapshots not supported in ThriftAdmin");
          @@ -1023,6 +1023,7 @@ public List listReplicationPeers() {
             public List listReplicationPeers(Pattern pattern) {
               throw new NotImplementedException("listReplicationPeers not supported in ThriftAdmin");
             }
          +
             @Override
             public Future transitReplicationPeerSyncReplicationStateAsync(String peerId,
                 SyncReplicationState state) {
          @@ -1141,10 +1142,10 @@ public Map getSpaceQuotaTableSizes() throws IOException {
             }
           
             @Override
          -  public Map getRegionServerSpaceQuotaSnapshots(
          -      ServerName serverName) throws IOException {
          +  public Map
          +      getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException {
               throw new NotImplementedException(
          -      "getRegionServerSpaceQuotaSnapshots not supported in ThriftAdmin");
          +        "getRegionServerSpaceQuotaSnapshots not supported in ThriftAdmin");
             }
           
             @Override
          @@ -1168,8 +1169,8 @@ public void revoke(UserPermission userPermission) {
             }
           
             @Override
          -  public List getUserPermissions(
          -      GetUserPermissionsRequest getUserPermissionsRequest) {
          +  public List
          +      getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest) {
               throw new NotImplementedException("getUserPermissions not supported in ThriftAdmin");
             }
           
          @@ -1192,11 +1193,10 @@ public boolean isSnapshotCleanupEnabled() {
             public List getSlowLogResponses(final Set serverNames,
                 final LogQueryFilter logQueryFilter) throws IOException {
               Set tServerNames = ThriftUtilities.getServerNamesFromHBase(serverNames);
          -    TLogQueryFilter tLogQueryFilter =
          -      ThriftUtilities.getSlowLogQueryFromHBase(logQueryFilter);
          +    TLogQueryFilter tLogQueryFilter = ThriftUtilities.getSlowLogQueryFromHBase(logQueryFilter);
               try {
                 List tOnlineLogRecords =
          -        client.getSlowLogResponses(tServerNames, tLogQueryFilter);
          +          client.getSlowLogResponses(tServerNames, tLogQueryFilter);
                 return ThriftUtilities.getSlowLogRecordsFromThrift(tOnlineLogRecords);
               } catch (TException e) {
                 throw new IOException(e);
          @@ -1204,8 +1204,7 @@ public List getSlowLogResponses(final Set serverNam
             }
           
             @Override
          -  public List clearSlowLogResponses(final Set serverNames)
          -      throws IOException {
          +  public List clearSlowLogResponses(final Set serverNames) throws IOException {
               Set tServerNames = ThriftUtilities.getServerNamesFromHBase(serverNames);
               try {
                 return client.clearSlowLogResponses(tServerNames);
          @@ -1276,7 +1275,7 @@ public List listTablesInRSGroup(String groupName) throws IOException
           
             @Override
             public Pair, List>
          -    getConfiguredNamespacesAndTablesInRSGroup(String groupName) throws IOException {
          +      getConfiguredNamespacesAndTablesInRSGroup(String groupName) throws IOException {
               throw new NotImplementedException("setRSGroup not supported in ThriftAdmin");
             }
           
          @@ -1293,22 +1292,21 @@ public void updateRSGroupConfig(String groupName, Map configurat
           
             @Override
             public List getLogEntries(Set serverNames, String logType,
          -      ServerType serverType, int limit, Map filterParams)
          -      throws IOException {
          +      ServerType serverType, int limit, Map filterParams) throws IOException {
               throw new NotImplementedException("getLogEntries not supported in ThriftAdmin");
             }
           
             @Override
             public Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] family,
          -    String dstSFT) throws IOException {
          +      String dstSFT) throws IOException {
               throw new NotImplementedException(
          -      "modifyColumnFamilyStoreFileTrackerAsync not supported in ThriftAdmin");
          +        "modifyColumnFamilyStoreFileTrackerAsync not supported in ThriftAdmin");
             }
           
             @Override
             public Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT)
          -    throws IOException {
          +      throws IOException {
               throw new NotImplementedException(
          -      "modifyTableStoreFileTrackerAsync not supported in ThriftAdmin");
          +        "modifyTableStoreFileTrackerAsync not supported in ThriftAdmin");
             }
           }
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftClientBuilder.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftClientBuilder.java
          index 3c11c98b32ab..c7d4d77cee55 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftClientBuilder.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftClientBuilder.java
          @@ -1,5 +1,4 @@
          -/**
          - *
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -19,7 +18,6 @@
           package org.apache.hadoop.hbase.thrift2.client;
           
           import java.io.IOException;
          -
           import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
           import org.apache.hadoop.hbase.util.Pair;
           import org.apache.thrift.transport.TTransport;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
          index 0e58afe4acbd..20091cd1e5c9 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
          @@ -1,5 +1,4 @@
          -/**
          - *
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -101,18 +100,17 @@ public ThriftConnection(Configuration conf, ExecutorService pool, final User use
               this.isFramed = conf.getBoolean(Constants.FRAMED_CONF_KEY, Constants.FRAMED_CONF_DEFAULT);
               this.isCompact = conf.getBoolean(Constants.COMPACT_CONF_KEY, Constants.COMPACT_CONF_DEFAULT);
               this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
          -        HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
          +      HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
               this.connectTimeout = conf.getInt(SOCKET_TIMEOUT_CONNECT, DEFAULT_SOCKET_TIMEOUT_CONNECT);
           
               String className = conf.get(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS,
          -        DefaultThriftClientBuilder.class.getName());
          +      DefaultThriftClientBuilder.class.getName());
               try {
                 Class clazz = Class.forName(className);
          -      Constructor constructor = clazz
          -          .getDeclaredConstructor(ThriftConnection.class);
          +      Constructor constructor = clazz.getDeclaredConstructor(ThriftConnection.class);
                 constructor.setAccessible(true);
                 clientBuilder = (ThriftClientBuilder) constructor.newInstance(this);
          -    }catch (Exception e) {
          +    } catch (Exception e) {
                 throw new IOException(e);
               }
             }
          @@ -151,12 +149,10 @@ public int getConnectTimeout() {
             }
           
             /**
          -   * the default thrift client builder.
          -   * One can extend the ThriftClientBuilder to builder custom client, implement
          -   * features like authentication(hbase-examples/thrift/DemoClient)
          -   *
          +   * the default thrift client builder. One can extend the ThriftClientBuilder to builder custom
          +   * client, implement features like authentication(hbase-examples/thrift/DemoClient)
              */
          -  public static class DefaultThriftClientBuilder extends ThriftClientBuilder  {
          +  public static class DefaultThriftClientBuilder extends ThriftClientBuilder {
           
               @Override
               public Pair getClient() throws IOException {
          @@ -190,13 +186,12 @@ public DefaultThriftClientBuilder(ThriftConnection connection) {
             }
           
             /**
          -   * the default thrift http client builder.
          -   * One can extend the ThriftClientBuilder to builder custom http client, implement
          -   * features like authentication or 'DoAs'(hbase-examples/thrift/HttpDoAsClient)
          -   *
          +   * the default thrift http client builder. One can extend the ThriftClientBuilder to builder
          +   * custom http client, implement features like authentication or
          +   * 'DoAs'(hbase-examples/thrift/HttpDoAsClient)
              */
             public static class HTTPThriftClientBuilder extends ThriftClientBuilder {
          -    Map customHeader = new HashMap<>();
          +    Map customHeader = new HashMap<>();
           
               public HTTPThriftClientBuilder(ThriftConnection connection) {
                 super(connection);
          @@ -209,7 +204,7 @@ public void addCostumHeader(String key, String value) {
               @Override
               public Pair getClient() throws IOException {
                 Preconditions.checkArgument(connection.getHost().startsWith("http"),
          -          "http client host must start with http or https");
          +        "http client host must start with http or https");
                 String url = connection.getHost() + ":" + connection.getPort();
                 try {
                   THttpClient httpClient = new THttpClient(url, connection.getHttpClient());
          @@ -242,10 +237,8 @@ public static class DelayRetryHandler extends DefaultHttpRequestRetryHandler {
               private long pause;
           
               public DelayRetryHandler(int retryCount, long pause) {
          -      super(retryCount, true, Arrays.asList(
          -          InterruptedIOException.class,
          -          UnknownHostException.class,
          -          SSLException.class));
          +      super(retryCount, true, Arrays.asList(InterruptedIOException.class,
          +        UnknownHostException.class, SSLException.class));
                 this.pause = pause;
               }
           
          @@ -257,7 +250,7 @@ public boolean retryRequest(IOException exception, int executionCount, HttpConte
                     long sleepTime = ConnectionUtils.getPauseTime(pause, executionCount - 1);
                     Thread.sleep(sleepTime);
                   } catch (InterruptedException ie) {
          -          //reset interrupt marker
          +          // reset interrupt marker
                     Thread.currentThread().interrupt();
                   }
                 }
          @@ -275,7 +268,7 @@ public synchronized HttpClient getHttpClient() {
                 return httpClient;
               }
               int retry = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
          -        HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
          +      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
               long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, 5);
               HttpClientBuilder builder = HttpClientBuilder.create();
               RequestConfig.Builder requestBuilder = RequestConfig.custom();
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java
          index 487c2ca7d963..a0e78e5a3508 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java
          @@ -1,5 +1,4 @@
          -/**
          - *
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -93,10 +92,9 @@ public ThriftTable(TableName tableName, THBaseService.Client client, TTransport
               this.tTransport = tTransport;
               this.client = client;
               this.scannerCaching = conf.getInt(HBASE_THRIFT_CLIENT_SCANNER_CACHING,
          -        HBASE_THRIFT_CLIENT_SCANNER_CACHING_DEFAULT);
          +      HBASE_THRIFT_CLIENT_SCANNER_CACHING_DEFAULT);
               this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
          -        HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
          -
          +      HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
           
             }
           
          @@ -113,8 +111,8 @@ public Configuration getConfiguration() {
             @Override
             public TableDescriptor getDescriptor() throws IOException {
               try {
          -      TTableDescriptor tableDescriptor = client
          -          .getTableDescriptor(ThriftUtilities.tableNameFromHBase(tableName));
          +      TTableDescriptor tableDescriptor =
          +          client.getTableDescriptor(ThriftUtilities.tableNameFromHBase(tableName));
                 return ThriftUtilities.tableDescriptorFromThrift(tableDescriptor);
               } catch (TException e) {
                 throw new IOException(e);
          @@ -126,7 +124,7 @@ public boolean exists(Get get) throws IOException {
               TGet tGet = ThriftUtilities.getFromHBase(get);
               try {
                 return client.exists(tableNameInBytes, tGet);
          -    }  catch (TException e) {
          +    } catch (TException e) {
                 throw new IOException(e);
               }
             }
          @@ -134,24 +132,22 @@ public boolean exists(Get get) throws IOException {
             @Override
             public boolean[] exists(List gets) throws IOException {
               List tGets = new ArrayList<>();
          -    for (Get get: gets) {
          +    for (Get get : gets) {
                 tGets.add(ThriftUtilities.getFromHBase(get));
               }
               try {
                 List results = client.existsAll(tableNameInBytes, tGets);
                 return Booleans.toArray(results);
          -    }  catch (TException e) {
          +    } catch (TException e) {
                 throw new IOException(e);
               }
             }
           
             @Override
          -  public void batch(List actions, Object[] results)
          -      throws IOException {
          +  public void batch(List actions, Object[] results) throws IOException {
               throw new IOException("Batch not supported in ThriftTable, use put(List puts), "
                   + "get(List gets) or delete(List deletes) respectively");
           
          -
             }
           
             @Override
          @@ -167,7 +163,7 @@ public Result get(Get get) throws IOException {
               try {
                 TResult tResult = client.get(tableNameInBytes, tGet);
                 return ThriftUtilities.resultFromThrift(tResult);
          -    }  catch (TException e) {
          +    } catch (TException e) {
                 throw new IOException(e);
               }
             }
          @@ -178,28 +174,26 @@ public Result[] get(List gets) throws IOException {
               try {
                 List results = client.getMultiple(tableNameInBytes, tGets);
                 return ThriftUtilities.resultsFromThrift(results);
          -    }  catch (TException e) {
          +    } catch (TException e) {
                 throw new IOException(e);
               }
             }
           
             /**
          -   * A scanner to perform scan from thrift server
          -   * getScannerResults is used in this scanner
          +   * A scanner to perform scan from thrift server getScannerResults is used in this scanner
              */
             private class Scanner implements ResultScanner {
               protected TScan scan;
               protected Result lastResult = null;
               protected final Queue cache = new ArrayDeque<>();;
           
          -
               public Scanner(Scan scan) throws IOException {
                 if (scan.getBatch() > 0) {
                   throw new IOException("Batch is not supported in Scanner");
                 }
                 if (scan.getCaching() <= 0) {
                   scan.setCaching(scannerCaching);
          -      } else if (scan.getCaching() == 1 && scan.isReversed()){
          +      } else if (scan.getCaching() == 1 && scan.isReversed()) {
                   // for reverse scan, we need to pass the last row to the next scanner
                   // we need caching number bigger than 1
                   scan.setCaching(scan.getCaching() + 1);
          @@ -207,14 +201,13 @@ public Scanner(Scan scan) throws IOException {
                 this.scan = ThriftUtilities.scanFromHBase(scan);
               }
           
          -
               @Override
               public Result next() throws IOException {
                 if (cache.size() == 0) {
                   setupNextScanner();
                   try {
          -          List tResults = client
          -              .getScannerResults(tableNameInBytes, scan, scan.getCaching());
          +          List tResults =
          +              client.getScannerResults(tableNameInBytes, scan, scan.getCaching());
                     Result[] results = ThriftUtilities.resultsFromThrift(tResults);
                     boolean firstKey = true;
                     for (Result result : results) {
          @@ -241,7 +234,7 @@ public Result next() throws IOException {
                 if (cache.size() > 0) {
                   return cache.poll();
                 } else {
          -        //scan finished
          +        // scan finished
                   return null;
                 }
               }
          @@ -261,11 +254,11 @@ public ScanMetrics getScanMetrics() {
               }
           
               private void setupNextScanner() {
          -      //if lastResult is null null, it means it is not the fist scan
          -      if (lastResult!= null) {
          +      // if lastResult is null null, it means it is not the fist scan
          +      if (lastResult != null) {
                   byte[] lastRow = lastResult.getRow();
                   if (scan.isReversed()) {
          -          //for reverse scan, we can't find the closet row before this row
          +          // for reverse scan, we can't find the closet row before this row
                     scan.setStartRow(lastRow);
                   } else {
                     scan.setStartRow(createClosestRowAfter(lastRow));
          @@ -273,7 +266,6 @@ private void setupNextScanner() {
                 }
               }
           
          -
               /**
                * Create the closest row after the specified row
                */
          @@ -309,7 +301,7 @@ public void put(Put put) throws IOException {
               TPut tPut = ThriftUtilities.putFromHBase(put);
               try {
                 client.put(tableNameInBytes, tPut);
          -    }  catch (TException e) {
          +    } catch (TException e) {
                 throw new IOException(e);
               }
             }
          @@ -319,7 +311,7 @@ public void put(List puts) throws IOException {
               List tPuts = ThriftUtilities.putsFromHBase(puts);
               try {
                 client.putMultiple(tableNameInBytes, tPuts);
          -    }  catch (TException e) {
          +    } catch (TException e) {
                 throw new IOException(e);
               }
             }
          @@ -329,7 +321,7 @@ public void delete(Delete delete) throws IOException {
               TDelete tDelete = ThriftUtilities.deleteFromHBase(delete);
               try {
                 client.deleteSingle(tableNameInBytes, tDelete);
          -    }  catch (TException e) {
          +    } catch (TException e) {
                 throw new IOException(e);
               }
             }
          @@ -339,7 +331,7 @@ public void delete(List deletes) throws IOException {
               List tDeletes = ThriftUtilities.deletesFromHBase(deletes);
               try {
                 client.deleteMultiple(tableNameInBytes, tDeletes);
          -    }  catch (TException e) {
          +    } catch (TException e) {
                 throw new IOException(e);
               }
             }
          @@ -359,8 +351,8 @@ private class CheckAndMutateBuilderImpl implements CheckAndMutateBuilder {
           
               @Override
               public CheckAndMutateBuilder qualifier(byte[] qualifier) {
          -      this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" +
          -          " an empty byte array, or just do not call this method if you want a null qualifier");
          +      this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using"
          +          + " an empty byte array, or just do not call this method if you want a null qualifier");
                 return this;
               }
           
          @@ -384,8 +376,8 @@ public CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value)
               }
           
               private void preCheck() {
          -      Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by" +
          -          " calling ifNotExists/ifEquals/ifMatches before executing the request");
          +      Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by"
          +          + " calling ifNotExists/ifEquals/ifMatches before executing the request");
               }
           
               @Override
          @@ -449,7 +441,7 @@ public Result mutateRow(RowMutations rm) throws IOException {
               try {
                 client.mutateRow(tableNameInBytes, tRowMutations);
                 return Result.EMPTY_RESULT;
          -    }  catch (TException e) {
          +    } catch (TException e) {
                 throw new IOException(e);
               }
             }
          @@ -460,7 +452,7 @@ public Result append(Append append) throws IOException {
               try {
                 TResult tResult = client.append(tableNameInBytes, tAppend);
                 return ThriftUtilities.resultFromThrift(tResult);
          -    }  catch (TException e) {
          +    } catch (TException e) {
                 throw new IOException(e);
               }
             }
          @@ -471,7 +463,7 @@ public Result increment(Increment increment) throws IOException {
               try {
                 TResult tResult = client.increment(tableNameInBytes, tIncrement);
                 return ThriftUtilities.resultFromThrift(tResult);
          -    }  catch (TException e) {
          +    } catch (TException e) {
                 throw new IOException(e);
               }
             }
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/NamespaceDescriptor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/NamespaceDescriptor.java
          index 3f9f51242478..7d03e24ccec6 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/NamespaceDescriptor.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/NamespaceDescriptor.java
          @@ -1,59 +1,79 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.9.3)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          +import java.nio.ByteBuffer;
          +import java.util.ArrayList;
          +import java.util.Arrays;
          +import java.util.BitSet;
          +import java.util.Collections;
          +import java.util.EnumMap;
          +import java.util.EnumSet;
          +import java.util.HashMap;
          +import java.util.HashSet;
          +import java.util.List;
          +import java.util.Map;
          +import java.util.Set;
          +import javax.annotation.Generated;
          +import org.apache.thrift.EncodingUtils;
          +import org.apache.thrift.TException;
          +import org.apache.thrift.async.AsyncMethodCallback;
          +import org.apache.thrift.protocol.TProtocolException;
          +import org.apache.thrift.protocol.TTupleProtocol;
           import org.apache.thrift.scheme.IScheme;
           import org.apache.thrift.scheme.SchemeFactory;
           import org.apache.thrift.scheme.StandardScheme;
          -
           import org.apache.thrift.scheme.TupleScheme;
          -import org.apache.thrift.protocol.TTupleProtocol;
          -import org.apache.thrift.protocol.TProtocolException;
          -import org.apache.thrift.EncodingUtils;
          -import org.apache.thrift.TException;
          -import org.apache.thrift.async.AsyncMethodCallback;
           import org.apache.thrift.server.AbstractNonblockingServer.*;
          -import java.util.List;
          -import java.util.ArrayList;
          -import java.util.Map;
          -import java.util.HashMap;
          -import java.util.EnumMap;
          -import java.util.Set;
          -import java.util.HashSet;
          -import java.util.EnumSet;
          -import java.util.Collections;
          -import java.util.BitSet;
          -import java.nio.ByteBuffer;
          -import java.util.Arrays;
          -import javax.annotation.Generated;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked" })
           @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2018-12-26")
          -public class NamespaceDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NamespaceDescriptor");
          -
          -  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField CONFIGURATION_FIELD_DESC = new org.apache.thrift.protocol.TField("configuration", org.apache.thrift.protocol.TType.MAP, (short)2);
          -
          -  private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>();
          +public class NamespaceDescriptor
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("NamespaceDescriptor");
          +
          +  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField CONFIGURATION_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("configuration", org.apache.thrift.protocol.TType.MAP,
          +          (short) 2);
          +
          +  private static final Map, SchemeFactory> schemes =
          +      new HashMap, SchemeFactory>();
             static {
               schemes.put(StandardScheme.class, new NamespaceDescriptorStandardSchemeFactory());
               schemes.put(TupleScheme.class, new NamespaceDescriptorTupleSchemeFactory());
             }
           
             public String name; // required
          -  public Map configuration; // optional
          +  public Map configuration; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    NAME((short)1, "name"),
          -    CONFIGURATION((short)2, "configuration");
          +    NAME((short) 1, "name"), CONFIGURATION((short) 2, "configuration");
           
               private static final Map byName = new HashMap();
           
          @@ -67,7 +87,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                * Find the _Fields constant that matches fieldId, or null if its not found.
                */
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // NAME
                     return NAME;
                   case 2: // CONFIGURATION
          @@ -78,12 +98,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -112,26 +132,33 @@ public String getFieldName() {
             }
           
             // isset id assignments
          -  private static final _Fields optionals[] = {_Fields.CONFIGURATION};
          +  private static final _Fields optionals[] = { _Fields.CONFIGURATION };
             public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.CONFIGURATION, new org.apache.thrift.meta_data.FieldMetaData("configuration", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
          +    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("name",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.CONFIGURATION,
          +      new org.apache.thrift.meta_data.FieldMetaData("configuration",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING),
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING))));
               metaDataMap = Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NamespaceDescriptor.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NamespaceDescriptor.class,
          +      metaDataMap);
             }
           
             public NamespaceDescriptor() {
             }
           
          -  public NamespaceDescriptor(
          -    String name)
          -  {
          +  public NamespaceDescriptor(String name) {
               this();
               this.name = name;
             }
          @@ -144,7 +171,7 @@ public NamespaceDescriptor(NamespaceDescriptor other) {
                 this.name = other.name;
               }
               if (other.isSetConfiguration()) {
          -      Map __this__configuration = new HashMap(other.configuration);
          +      Map __this__configuration = new HashMap(other.configuration);
                 this.configuration = __this__configuration;
               }
             }
          @@ -189,16 +216,16 @@ public int getConfigurationSize() {
           
             public void putToConfiguration(String key, String val) {
               if (this.configuration == null) {
          -      this.configuration = new HashMap();
          +      this.configuration = new HashMap();
               }
               this.configuration.put(key, val);
             }
           
          -  public Map getConfiguration() {
          +  public Map getConfiguration() {
               return this.configuration;
             }
           
          -  public NamespaceDescriptor setConfiguration(Map configuration) {
          +  public NamespaceDescriptor setConfiguration(Map configuration) {
               this.configuration = configuration;
               return this;
             }
          @@ -220,81 +247,77 @@ public void setConfigurationIsSet(boolean value) {
           
             public void setFieldValue(_Fields field, Object value) {
               switch (field) {
          -    case NAME:
          -      if (value == null) {
          -        unsetName();
          -      } else {
          -        setName((String)value);
          -      }
          -      break;
          +      case NAME:
          +        if (value == null) {
          +          unsetName();
          +        } else {
          +          setName((String) value);
          +        }
          +        break;
           
          -    case CONFIGURATION:
          -      if (value == null) {
          -        unsetConfiguration();
          -      } else {
          -        setConfiguration((Map)value);
          -      }
          -      break;
          +      case CONFIGURATION:
          +        if (value == null) {
          +          unsetConfiguration();
          +        } else {
          +          setConfiguration((Map) value);
          +        }
          +        break;
           
               }
             }
           
             public Object getFieldValue(_Fields field) {
               switch (field) {
          -    case NAME:
          -      return getName();
          +      case NAME:
          +        return getName();
           
          -    case CONFIGURATION:
          -      return getConfiguration();
          +      case CONFIGURATION:
          +        return getConfiguration();
           
               }
               throw new IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new IllegalArgumentException();
               }
           
               switch (field) {
          -    case NAME:
          -      return isSetName();
          -    case CONFIGURATION:
          -      return isSetConfiguration();
          +      case NAME:
          +        return isSetName();
          +      case CONFIGURATION:
          +        return isSetConfiguration();
               }
               throw new IllegalStateException();
             }
           
             @Override
             public boolean equals(Object that) {
          -    if (that == null)
          -      return false;
          -    if (that instanceof NamespaceDescriptor)
          -      return this.equals((NamespaceDescriptor)that);
          +    if (that == null) return false;
          +    if (that instanceof NamespaceDescriptor) return this.equals((NamespaceDescriptor) that);
               return false;
             }
           
             public boolean equals(NamespaceDescriptor that) {
          -    if (that == null)
          -      return false;
          +    if (that == null) return false;
           
               boolean this_present_name = true && this.isSetName();
               boolean that_present_name = true && that.isSetName();
               if (this_present_name || that_present_name) {
          -      if (!(this_present_name && that_present_name))
          -        return false;
          -      if (!this.name.equals(that.name))
          -        return false;
          +      if (!(this_present_name && that_present_name)) return false;
          +      if (!this.name.equals(that.name)) return false;
               }
           
               boolean this_present_configuration = true && this.isSetConfiguration();
               boolean that_present_configuration = true && that.isSetConfiguration();
               if (this_present_configuration || that_present_configuration) {
          -      if (!(this_present_configuration && that_present_configuration))
          -        return false;
          -      if (!this.configuration.equals(that.configuration))
          -        return false;
          +      if (!(this_present_configuration && that_present_configuration)) return false;
          +      if (!this.configuration.equals(that.configuration)) return false;
               }
           
               return true;
          @@ -306,13 +329,11 @@ public int hashCode() {
           
               boolean present_name = true && (isSetName());
               list.add(present_name);
          -    if (present_name)
          -      list.add(name);
          +    if (present_name) list.add(name);
           
               boolean present_configuration = true && (isSetConfiguration());
               list.add(present_configuration);
          -    if (present_configuration)
          -      list.add(configuration);
          +    if (present_configuration) list.add(configuration);
           
               return list.hashCode();
             }
          @@ -340,7 +361,8 @@ public int compareTo(NamespaceDescriptor other) {
                 return lastComparison;
               }
               if (isSetConfiguration()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.configuration, other.configuration);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.configuration, other.configuration);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -356,7 +378,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
             }
           
          @@ -389,22 +412,26 @@ public String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (name == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'name' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
          @@ -416,15 +443,16 @@ public NamespaceDescriptorStandardScheme getScheme() {
               }
             }
           
          -  private static class NamespaceDescriptorStandardScheme extends StandardScheme {
          +  private static class NamespaceDescriptorStandardScheme
          +      extends StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, NamespaceDescriptor struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, NamespaceDescriptor struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -432,7 +460,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NamespaceDescriptor
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.name = iprot.readString();
                         struct.setNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -440,11 +468,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NamespaceDescriptor
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map180 = iprot.readMapBegin();
          -                struct.configuration = new HashMap(2*_map180.size);
          +                struct.configuration = new HashMap(2 * _map180.size);
                           String _key181;
                           String _val182;
          -                for (int _i183 = 0; _i183 < _map180.size; ++_i183)
          -                {
          +                for (int _i183 = 0; _i183 < _map180.size; ++_i183) {
                             _key181 = iprot.readString();
                             _val182 = iprot.readString();
                             struct.configuration.put(_key181, _val182);
          @@ -452,7 +479,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NamespaceDescriptor
                           iprot.readMapEnd();
                         }
                         struct.setConfigurationIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -467,7 +494,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NamespaceDescriptor
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, NamespaceDescriptor struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, NamespaceDescriptor struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -480,9 +508,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NamespaceDescripto
                   if (struct.isSetConfiguration()) {
                     oprot.writeFieldBegin(CONFIGURATION_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.configuration.size()));
          -            for (Map.Entry _iter184 : struct.configuration.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.configuration.size()));
          +            for (Map.Entry _iter184 : struct.configuration.entrySet()) {
                         oprot.writeString(_iter184.getKey());
                         oprot.writeString(_iter184.getValue());
                       }
          @@ -506,7 +535,8 @@ public NamespaceDescriptorTupleScheme getScheme() {
             private static class NamespaceDescriptorTupleScheme extends TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, NamespaceDescriptor struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol prot, NamespaceDescriptor struct)
          +        throws org.apache.thrift.TException {
                 TTupleProtocol oprot = (TTupleProtocol) prot;
                 oprot.writeString(struct.name);
                 BitSet optionals = new BitSet();
          @@ -517,8 +547,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NamespaceDescriptor
                 if (struct.isSetConfiguration()) {
                   {
                     oprot.writeI32(struct.configuration.size());
          -          for (Map.Entry _iter185 : struct.configuration.entrySet())
          -          {
          +          for (Map.Entry _iter185 : struct.configuration.entrySet()) {
                       oprot.writeString(_iter185.getKey());
                       oprot.writeString(_iter185.getValue());
                     }
          @@ -527,19 +556,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NamespaceDescriptor
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, NamespaceDescriptor struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol prot, NamespaceDescriptor struct)
          +        throws org.apache.thrift.TException {
                 TTupleProtocol iprot = (TTupleProtocol) prot;
                 struct.name = iprot.readString();
                 struct.setNameIsSet(true);
                 BitSet incoming = iprot.readBitSet(1);
                 if (incoming.get(0)) {
                   {
          -          org.apache.thrift.protocol.TMap _map186 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
          -          struct.configuration = new HashMap(2*_map186.size);
          +          org.apache.thrift.protocol.TMap _map186 =
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, iprot.readI32());
          +          struct.configuration = new HashMap(2 * _map186.size);
                     String _key187;
                     String _val188;
          -          for (int _i189 = 0; _i189 < _map186.size; ++_i189)
          -          {
          +          for (int _i189 = 0; _i189 < _map186.size; ++_i189) {
                       _key187 = iprot.readString();
                       _val188 = iprot.readString();
                       struct.configuration.put(_key187, _val188);
          @@ -551,4 +582,3 @@ public void read(org.apache.thrift.protocol.TProtocol prot, NamespaceDescriptor
             }
           
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAccessControlEntity.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAccessControlEntity.java
          index b68d7acd7a3d..50539a518a4d 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAccessControlEntity.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAccessControlEntity.java
          @@ -1,31 +1,57 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * TAccessControlEntity for permission control
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TAccessControlEntity implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAccessControlEntity");
          -
          -  private static final org.apache.thrift.protocol.TField USERNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("username", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField SCOPE_FIELD_DESC = new org.apache.thrift.protocol.TField("scope", org.apache.thrift.protocol.TType.I32, (short)2);
          -  private static final org.apache.thrift.protocol.TField ACTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("actions", org.apache.thrift.protocol.TType.STRING, (short)4);
          -  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)5);
          -  private static final org.apache.thrift.protocol.TField NS_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("nsName", org.apache.thrift.protocol.TType.STRING, (short)6);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TAccessControlEntityStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TAccessControlEntityTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TAccessControlEntity
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TAccessControlEntity");
          +
          +  private static final org.apache.thrift.protocol.TField USERNAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("username", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField SCOPE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("scope", org.apache.thrift.protocol.TType.I32,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField ACTIONS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("actions", org.apache.thrift.protocol.TType.STRING,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 5);
          +  private static final org.apache.thrift.protocol.TField NS_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("nsName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 6);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TAccessControlEntityStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TAccessControlEntityTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.lang.String username; // required
             /**
          -   * 
              * @see TPermissionScope
              */
             public @org.apache.thrift.annotation.Nullable TPermissionScope scope; // required
          @@ -33,19 +59,20 @@ public class TAccessControlEntity implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -58,7 +85,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // USERNAME
                     return USERNAME;
                   case 2: // SCOPE
          @@ -75,12 +102,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -110,32 +137,46 @@ public java.lang.String getFieldName() {
             }
           
             // isset id assignments
          -  private static final _Fields optionals[] = {_Fields.TABLE_NAME,_Fields.NS_NAME};
          +  private static final _Fields optionals[] = { _Fields.TABLE_NAME, _Fields.NS_NAME };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.USERNAME, new org.apache.thrift.meta_data.FieldMetaData("username", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.SCOPE, new org.apache.thrift.meta_data.FieldMetaData("scope", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TPermissionScope.class)));
          -    tmpMap.put(_Fields.ACTIONS, new org.apache.thrift.meta_data.FieldMetaData("actions", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.NS_NAME, new org.apache.thrift.meta_data.FieldMetaData("nsName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.USERNAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("username",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.SCOPE,
          +      new org.apache.thrift.meta_data.FieldMetaData("scope",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TPermissionScope.class)));
          +    tmpMap.put(_Fields.ACTIONS,
          +      new org.apache.thrift.meta_data.FieldMetaData("actions",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.TABLE_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.NS_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("nsName",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TAccessControlEntity.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TAccessControlEntity.class,
          +      metaDataMap);
             }
           
             public TAccessControlEntity() {
             }
           
          -  public TAccessControlEntity(
          -    java.lang.String username,
          -    TPermissionScope scope,
          -    java.lang.String actions)
          -  {
          +  public TAccessControlEntity(java.lang.String username, TPermissionScope scope,
          +      java.lang.String actions) {
               this();
               this.username = username;
               this.scope = scope;
          @@ -181,7 +222,8 @@ public java.lang.String getUsername() {
               return this.username;
             }
           
          -  public TAccessControlEntity setUsername(@org.apache.thrift.annotation.Nullable java.lang.String username) {
          +  public TAccessControlEntity
          +      setUsername(@org.apache.thrift.annotation.Nullable java.lang.String username) {
               this.username = username;
               return this;
             }
          @@ -202,7 +244,6 @@ public void setUsernameIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TPermissionScope
              */
             @org.apache.thrift.annotation.Nullable
          @@ -211,10 +252,10 @@ public TPermissionScope getScope() {
             }
           
             /**
          -   * 
              * @see TPermissionScope
              */
          -  public TAccessControlEntity setScope(@org.apache.thrift.annotation.Nullable TPermissionScope scope) {
          +  public TAccessControlEntity
          +      setScope(@org.apache.thrift.annotation.Nullable TPermissionScope scope) {
               this.scope = scope;
               return this;
             }
          @@ -239,7 +280,8 @@ public java.lang.String getActions() {
               return this.actions;
             }
           
          -  public TAccessControlEntity setActions(@org.apache.thrift.annotation.Nullable java.lang.String actions) {
          +  public TAccessControlEntity
          +      setActions(@org.apache.thrift.annotation.Nullable java.lang.String actions) {
               this.actions = actions;
               return this;
             }
          @@ -264,7 +306,8 @@ public java.lang.String getTableName() {
               return this.tableName;
             }
           
          -  public TAccessControlEntity setTableName(@org.apache.thrift.annotation.Nullable java.lang.String tableName) {
          +  public TAccessControlEntity
          +      setTableName(@org.apache.thrift.annotation.Nullable java.lang.String tableName) {
               this.tableName = tableName;
               return this;
             }
          @@ -289,7 +332,8 @@ public java.lang.String getNsName() {
               return this.nsName;
             }
           
          -  public TAccessControlEntity setNsName(@org.apache.thrift.annotation.Nullable java.lang.String nsName) {
          +  public TAccessControlEntity
          +      setNsName(@org.apache.thrift.annotation.Nullable java.lang.String nsName) {
               this.nsName = nsName;
               return this;
             }
          @@ -309,47 +353,48 @@ public void setNsNameIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case USERNAME:
          -      if (value == null) {
          -        unsetUsername();
          -      } else {
          -        setUsername((java.lang.String)value);
          -      }
          -      break;
          +      case USERNAME:
          +        if (value == null) {
          +          unsetUsername();
          +        } else {
          +          setUsername((java.lang.String) value);
          +        }
          +        break;
           
          -    case SCOPE:
          -      if (value == null) {
          -        unsetScope();
          -      } else {
          -        setScope((TPermissionScope)value);
          -      }
          -      break;
          +      case SCOPE:
          +        if (value == null) {
          +          unsetScope();
          +        } else {
          +          setScope((TPermissionScope) value);
          +        }
          +        break;
           
          -    case ACTIONS:
          -      if (value == null) {
          -        unsetActions();
          -      } else {
          -        setActions((java.lang.String)value);
          -      }
          -      break;
          +      case ACTIONS:
          +        if (value == null) {
          +          unsetActions();
          +        } else {
          +          setActions((java.lang.String) value);
          +        }
          +        break;
           
          -    case TABLE_NAME:
          -      if (value == null) {
          -        unsetTableName();
          -      } else {
          -        setTableName((java.lang.String)value);
          -      }
          -      break;
          +      case TABLE_NAME:
          +        if (value == null) {
          +          unsetTableName();
          +        } else {
          +          setTableName((java.lang.String) value);
          +        }
          +        break;
           
          -    case NS_NAME:
          -      if (value == null) {
          -        unsetNsName();
          -      } else {
          -        setNsName((java.lang.String)value);
          -      }
          -      break;
          +      case NS_NAME:
          +        if (value == null) {
          +          unsetNsName();
          +        } else {
          +          setNsName((java.lang.String) value);
          +        }
          +        break;
           
               }
             }
          @@ -357,102 +402,92 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case USERNAME:
          -      return getUsername();
          +      case USERNAME:
          +        return getUsername();
           
          -    case SCOPE:
          -      return getScope();
          +      case SCOPE:
          +        return getScope();
           
          -    case ACTIONS:
          -      return getActions();
          +      case ACTIONS:
          +        return getActions();
           
          -    case TABLE_NAME:
          -      return getTableName();
          +      case TABLE_NAME:
          +        return getTableName();
           
          -    case NS_NAME:
          -      return getNsName();
          +      case NS_NAME:
          +        return getNsName();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case USERNAME:
          -      return isSetUsername();
          -    case SCOPE:
          -      return isSetScope();
          -    case ACTIONS:
          -      return isSetActions();
          -    case TABLE_NAME:
          -      return isSetTableName();
          -    case NS_NAME:
          -      return isSetNsName();
          +      case USERNAME:
          +        return isSetUsername();
          +      case SCOPE:
          +        return isSetScope();
          +      case ACTIONS:
          +        return isSetActions();
          +      case TABLE_NAME:
          +        return isSetTableName();
          +      case NS_NAME:
          +        return isSetNsName();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TAccessControlEntity)
          -      return this.equals((TAccessControlEntity)that);
          +    if (that instanceof TAccessControlEntity) return this.equals((TAccessControlEntity) that);
               return false;
             }
           
             public boolean equals(TAccessControlEntity that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_username = true && this.isSetUsername();
               boolean that_present_username = true && that.isSetUsername();
               if (this_present_username || that_present_username) {
          -      if (!(this_present_username && that_present_username))
          -        return false;
          -      if (!this.username.equals(that.username))
          -        return false;
          +      if (!(this_present_username && that_present_username)) return false;
          +      if (!this.username.equals(that.username)) return false;
               }
           
               boolean this_present_scope = true && this.isSetScope();
               boolean that_present_scope = true && that.isSetScope();
               if (this_present_scope || that_present_scope) {
          -      if (!(this_present_scope && that_present_scope))
          -        return false;
          -      if (!this.scope.equals(that.scope))
          -        return false;
          +      if (!(this_present_scope && that_present_scope)) return false;
          +      if (!this.scope.equals(that.scope)) return false;
               }
           
               boolean this_present_actions = true && this.isSetActions();
               boolean that_present_actions = true && that.isSetActions();
               if (this_present_actions || that_present_actions) {
          -      if (!(this_present_actions && that_present_actions))
          -        return false;
          -      if (!this.actions.equals(that.actions))
          -        return false;
          +      if (!(this_present_actions && that_present_actions)) return false;
          +      if (!this.actions.equals(that.actions)) return false;
               }
           
               boolean this_present_tableName = true && this.isSetTableName();
               boolean that_present_tableName = true && that.isSetTableName();
               if (this_present_tableName || that_present_tableName) {
          -      if (!(this_present_tableName && that_present_tableName))
          -        return false;
          -      if (!this.tableName.equals(that.tableName))
          -        return false;
          +      if (!(this_present_tableName && that_present_tableName)) return false;
          +      if (!this.tableName.equals(that.tableName)) return false;
               }
           
               boolean this_present_nsName = true && this.isSetNsName();
               boolean that_present_nsName = true && that.isSetNsName();
               if (this_present_nsName || that_present_nsName) {
          -      if (!(this_present_nsName && that_present_nsName))
          -        return false;
          -      if (!this.nsName.equals(that.nsName))
          -        return false;
          +      if (!(this_present_nsName && that_present_nsName)) return false;
          +      if (!this.nsName.equals(that.nsName)) return false;
               }
           
               return true;
          @@ -463,24 +498,19 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetUsername()) ? 131071 : 524287);
          -    if (isSetUsername())
          -      hashCode = hashCode * 8191 + username.hashCode();
          +    if (isSetUsername()) hashCode = hashCode * 8191 + username.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetScope()) ? 131071 : 524287);
          -    if (isSetScope())
          -      hashCode = hashCode * 8191 + scope.getValue();
          +    if (isSetScope()) hashCode = hashCode * 8191 + scope.getValue();
           
               hashCode = hashCode * 8191 + ((isSetActions()) ? 131071 : 524287);
          -    if (isSetActions())
          -      hashCode = hashCode * 8191 + actions.hashCode();
          +    if (isSetActions()) hashCode = hashCode * 8191 + actions.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -    if (isSetTableName())
          -      hashCode = hashCode * 8191 + tableName.hashCode();
          +    if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetNsName()) ? 131071 : 524287);
          -    if (isSetNsName())
          -      hashCode = hashCode * 8191 + nsName.hashCode();
          +    if (isSetNsName()) hashCode = hashCode * 8191 + nsName.hashCode();
           
               return hashCode;
             }
          @@ -555,7 +585,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -614,48 +645,56 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (username == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'username' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'username' was not present! Struct: " + toString());
               }
               if (scope == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'scope' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'scope' was not present! Struct: " + toString());
               }
               if (actions == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'actions' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'actions' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TAccessControlEntityStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TAccessControlEntityStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TAccessControlEntityStandardScheme getScheme() {
                 return new TAccessControlEntityStandardScheme();
               }
             }
           
          -  private static class TAccessControlEntityStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TAccessControlEntityStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntity struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntity struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -663,15 +702,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntit
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.username = iprot.readString();
                         struct.setUsernameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 2: // SCOPE
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.scope = org.apache.hadoop.hbase.thrift2.generated.TPermissionScope.findByValue(iprot.readI32());
          +              struct.scope = org.apache.hadoop.hbase.thrift2.generated.TPermissionScope
          +                  .findByValue(iprot.readI32());
                         struct.setScopeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -679,7 +719,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntit
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.actions = iprot.readString();
                         struct.setActionsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -687,7 +727,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntit
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.tableName = iprot.readString();
                         struct.setTableNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -695,7 +735,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntit
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.nsName = iprot.readString();
                         struct.setNsNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -710,7 +750,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAccessControlEntit
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TAccessControlEntity struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TAccessControlEntity struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -749,17 +790,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TAccessControlEnti
           
             }
           
          -  private static class TAccessControlEntityTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TAccessControlEntityTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TAccessControlEntityTupleScheme getScheme() {
                 return new TAccessControlEntityTupleScheme();
               }
             }
           
          -  private static class TAccessControlEntityTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TAccessControlEntityTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntity struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntity struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeString(struct.username);
                 oprot.writeI32(struct.scope.getValue());
                 oprot.writeString(struct.actions);
          @@ -780,11 +825,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntit
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntity struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntity struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.username = iprot.readString();
                 struct.setUsernameIsSet(true);
          -      struct.scope = org.apache.hadoop.hbase.thrift2.generated.TPermissionScope.findByValue(iprot.readI32());
          +      struct.scope =
          +          org.apache.hadoop.hbase.thrift2.generated.TPermissionScope.findByValue(iprot.readI32());
                 struct.setScopeIsSet(true);
                 struct.actions = iprot.readString();
                 struct.setActionsIsSet(true);
          @@ -800,8 +848,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TAccessControlEntity
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java
          index 33ccfd5cc8f8..6ccfb47d89fc 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java
          @@ -1,51 +1,78 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TAppend implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAppend");
          -
          -  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)2);
          -  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)3);
          -  private static final org.apache.thrift.protocol.TField DURABILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("durability", org.apache.thrift.protocol.TType.I32, (short)4);
          -  private static final org.apache.thrift.protocol.TField CELL_VISIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("cellVisibility", org.apache.thrift.protocol.TType.STRUCT, (short)5);
          -  private static final org.apache.thrift.protocol.TField RETURN_RESULTS_FIELD_DESC = new org.apache.thrift.protocol.TField("returnResults", org.apache.thrift.protocol.TType.BOOL, (short)6);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TAppendStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TAppendTupleSchemeFactory();
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TAppend implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TAppend");
          +
          +  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField DURABILITY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("durability", org.apache.thrift.protocol.TType.I32,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField CELL_VISIBILITY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("cellVisibility",
          +          org.apache.thrift.protocol.TType.STRUCT, (short) 5);
          +  private static final org.apache.thrift.protocol.TField RETURN_RESULTS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("returnResults", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 6);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TAppendStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TAppendTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
             public @org.apache.thrift.annotation.Nullable java.util.List columns; // required
          -  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
             /**
          -   * 
              * @see TDurability
              */
             public @org.apache.thrift.annotation.Nullable TDurability durability; // optional
             public @org.apache.thrift.annotation.Nullable TCellVisibility cellVisibility; // optional
             public boolean returnResults; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    ROW((short)1, "row"),
          -    COLUMNS((short)2, "columns"),
          -    ATTRIBUTES((short)3, "attributes"),
          +    ROW((short) 1, "row"), COLUMNS((short) 2, "columns"), ATTRIBUTES((short) 3, "attributes"),
               /**
          -     * 
                * @see TDurability
                */
          -    DURABILITY((short)4, "durability"),
          -    CELL_VISIBILITY((short)5, "cellVisibility"),
          -    RETURN_RESULTS((short)6, "returnResults");
          +    DURABILITY((short) 4, "durability"), CELL_VISIBILITY((short) 5, "cellVisibility"),
          +    RETURN_RESULTS((short) 6, "returnResults");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -58,7 +85,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // ROW
                     return ROW;
                   case 2: // COLUMNS
          @@ -77,12 +104,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -114,24 +141,42 @@ public java.lang.String getFieldName() {
             // isset id assignments
             private static final int __RETURNRESULTS_ISSET_ID = 0;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.ATTRIBUTES,_Fields.DURABILITY,_Fields.CELL_VISIBILITY,_Fields.RETURN_RESULTS};
          +  private static final _Fields optionals[] =
          +      { _Fields.ATTRIBUTES, _Fields.DURABILITY, _Fields.CELL_VISIBILITY, _Fields.RETURN_RESULTS };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnValue.class))));
          -    tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true), 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true))));
          -    tmpMap.put(_Fields.DURABILITY, new org.apache.thrift.meta_data.FieldMetaData("durability", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TDurability.class)));
          -    tmpMap.put(_Fields.CELL_VISIBILITY, new org.apache.thrift.meta_data.FieldMetaData("cellVisibility", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCellVisibility.class)));
          -    tmpMap.put(_Fields.RETURN_RESULTS, new org.apache.thrift.meta_data.FieldMetaData("returnResults", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("row",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns",
          +        org.apache.thrift.TFieldRequirementType.REQUIRED,
          +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TColumnValue.class))));
          +    tmpMap.put(_Fields.ATTRIBUTES,
          +      new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true),
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true))));
          +    tmpMap.put(_Fields.DURABILITY,
          +      new org.apache.thrift.meta_data.FieldMetaData("durability",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TDurability.class)));
          +    tmpMap.put(_Fields.CELL_VISIBILITY,
          +      new org.apache.thrift.meta_data.FieldMetaData("cellVisibility",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TCellVisibility.class)));
          +    tmpMap.put(_Fields.RETURN_RESULTS, new org.apache.thrift.meta_data.FieldMetaData(
          +        "returnResults", org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TAppend.class, metaDataMap);
          @@ -140,10 +185,7 @@ public java.lang.String getFieldName() {
             public TAppend() {
             }
           
          -  public TAppend(
          -    java.nio.ByteBuffer row,
          -    java.util.List columns)
          -  {
          +  public TAppend(java.nio.ByteBuffer row, java.util.List columns) {
               this();
               this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
               this.columns = columns;
          @@ -158,14 +200,16 @@ public TAppend(TAppend other) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
               }
               if (other.isSetColumns()) {
          -      java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +      java.util.List __this__columns =
          +          new java.util.ArrayList(other.columns.size());
                 for (TColumnValue other_element : other.columns) {
                   __this__columns.add(new TColumnValue(other_element));
                 }
                 this.columns = __this__columns;
               }
               if (other.isSetAttributes()) {
          -      java.util.Map __this__attributes = new java.util.HashMap(other.attributes);
          +      java.util.Map __this__attributes =
          +          new java.util.HashMap(other.attributes);
                 this.attributes = __this__attributes;
               }
               if (other.isSetDurability()) {
          @@ -202,7 +246,7 @@ public java.nio.ByteBuffer bufferForRow() {
             }
           
             public TAppend setRow(byte[] row) {
          -    this.row = row == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(row.clone());
          +    this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
               return this;
             }
           
          @@ -247,7 +291,8 @@ public java.util.List getColumns() {
               return this.columns;
             }
           
          -  public TAppend setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +  public TAppend
          +      setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
               this.columns = columns;
               return this;
             }
          @@ -273,17 +318,18 @@ public int getAttributesSize() {
           
             public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
               if (this.attributes == null) {
          -      this.attributes = new java.util.HashMap();
          +      this.attributes = new java.util.HashMap();
               }
               this.attributes.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getAttributes() {
          +  public java.util.Map getAttributes() {
               return this.attributes;
             }
           
          -  public TAppend setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +  public TAppend setAttributes(
          +      @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
               this.attributes = attributes;
               return this;
             }
          @@ -304,7 +350,6 @@ public void setAttributesIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TDurability
              */
             @org.apache.thrift.annotation.Nullable
          @@ -313,7 +358,6 @@ public TDurability getDurability() {
             }
           
             /**
          -   * 
              * @see TDurability
              */
             public TAppend setDurability(@org.apache.thrift.annotation.Nullable TDurability durability) {
          @@ -341,7 +385,8 @@ public TCellVisibility getCellVisibility() {
               return this.cellVisibility;
             }
           
          -  public TAppend setCellVisibility(@org.apache.thrift.annotation.Nullable TCellVisibility cellVisibility) {
          +  public TAppend
          +      setCellVisibility(@org.apache.thrift.annotation.Nullable TCellVisibility cellVisibility) {
               this.cellVisibility = cellVisibility;
               return this;
             }
          @@ -372,7 +417,8 @@ public TAppend setReturnResults(boolean returnResults) {
             }
           
             public void unsetReturnResults() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __RETURNRESULTS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __RETURNRESULTS_ISSET_ID);
             }
           
             /** Returns true if field returnResults is set (has been assigned a value) and false otherwise */
          @@ -381,62 +427,64 @@ public boolean isSetReturnResults() {
             }
           
             public void setReturnResultsIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __RETURNRESULTS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __RETURNRESULTS_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case ROW:
          -      if (value == null) {
          -        unsetRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setRow((byte[])value);
          +      case ROW:
          +        if (value == null) {
          +          unsetRow();
                   } else {
          -          setRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setRow((byte[]) value);
          +          } else {
          +            setRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case COLUMNS:
          -      if (value == null) {
          -        unsetColumns();
          -      } else {
          -        setColumns((java.util.List)value);
          -      }
          -      break;
          +      case COLUMNS:
          +        if (value == null) {
          +          unsetColumns();
          +        } else {
          +          setColumns((java.util.List) value);
          +        }
          +        break;
           
          -    case ATTRIBUTES:
          -      if (value == null) {
          -        unsetAttributes();
          -      } else {
          -        setAttributes((java.util.Map)value);
          -      }
          -      break;
          +      case ATTRIBUTES:
          +        if (value == null) {
          +          unsetAttributes();
          +        } else {
          +          setAttributes((java.util.Map) value);
          +        }
          +        break;
           
          -    case DURABILITY:
          -      if (value == null) {
          -        unsetDurability();
          -      } else {
          -        setDurability((TDurability)value);
          -      }
          -      break;
          +      case DURABILITY:
          +        if (value == null) {
          +          unsetDurability();
          +        } else {
          +          setDurability((TDurability) value);
          +        }
          +        break;
           
          -    case CELL_VISIBILITY:
          -      if (value == null) {
          -        unsetCellVisibility();
          -      } else {
          -        setCellVisibility((TCellVisibility)value);
          -      }
          -      break;
          +      case CELL_VISIBILITY:
          +        if (value == null) {
          +          unsetCellVisibility();
          +        } else {
          +          setCellVisibility((TCellVisibility) value);
          +        }
          +        break;
           
          -    case RETURN_RESULTS:
          -      if (value == null) {
          -        unsetReturnResults();
          -      } else {
          -        setReturnResults((java.lang.Boolean)value);
          -      }
          -      break;
          +      case RETURN_RESULTS:
          +        if (value == null) {
          +          unsetReturnResults();
          +        } else {
          +          setReturnResults((java.lang.Boolean) value);
          +        }
          +        break;
           
               }
             }
          @@ -444,116 +492,104 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case ROW:
          -      return getRow();
          +      case ROW:
          +        return getRow();
           
          -    case COLUMNS:
          -      return getColumns();
          +      case COLUMNS:
          +        return getColumns();
           
          -    case ATTRIBUTES:
          -      return getAttributes();
          +      case ATTRIBUTES:
          +        return getAttributes();
           
          -    case DURABILITY:
          -      return getDurability();
          +      case DURABILITY:
          +        return getDurability();
           
          -    case CELL_VISIBILITY:
          -      return getCellVisibility();
          +      case CELL_VISIBILITY:
          +        return getCellVisibility();
           
          -    case RETURN_RESULTS:
          -      return isReturnResults();
          +      case RETURN_RESULTS:
          +        return isReturnResults();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case ROW:
          -      return isSetRow();
          -    case COLUMNS:
          -      return isSetColumns();
          -    case ATTRIBUTES:
          -      return isSetAttributes();
          -    case DURABILITY:
          -      return isSetDurability();
          -    case CELL_VISIBILITY:
          -      return isSetCellVisibility();
          -    case RETURN_RESULTS:
          -      return isSetReturnResults();
          +      case ROW:
          +        return isSetRow();
          +      case COLUMNS:
          +        return isSetColumns();
          +      case ATTRIBUTES:
          +        return isSetAttributes();
          +      case DURABILITY:
          +        return isSetDurability();
          +      case CELL_VISIBILITY:
          +        return isSetCellVisibility();
          +      case RETURN_RESULTS:
          +        return isSetReturnResults();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TAppend)
          -      return this.equals((TAppend)that);
          +    if (that instanceof TAppend) return this.equals((TAppend) that);
               return false;
             }
           
             public boolean equals(TAppend that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_row = true && this.isSetRow();
               boolean that_present_row = true && that.isSetRow();
               if (this_present_row || that_present_row) {
          -      if (!(this_present_row && that_present_row))
          -        return false;
          -      if (!this.row.equals(that.row))
          -        return false;
          +      if (!(this_present_row && that_present_row)) return false;
          +      if (!this.row.equals(that.row)) return false;
               }
           
               boolean this_present_columns = true && this.isSetColumns();
               boolean that_present_columns = true && that.isSetColumns();
               if (this_present_columns || that_present_columns) {
          -      if (!(this_present_columns && that_present_columns))
          -        return false;
          -      if (!this.columns.equals(that.columns))
          -        return false;
          +      if (!(this_present_columns && that_present_columns)) return false;
          +      if (!this.columns.equals(that.columns)) return false;
               }
           
               boolean this_present_attributes = true && this.isSetAttributes();
               boolean that_present_attributes = true && that.isSetAttributes();
               if (this_present_attributes || that_present_attributes) {
          -      if (!(this_present_attributes && that_present_attributes))
          -        return false;
          -      if (!this.attributes.equals(that.attributes))
          -        return false;
          +      if (!(this_present_attributes && that_present_attributes)) return false;
          +      if (!this.attributes.equals(that.attributes)) return false;
               }
           
               boolean this_present_durability = true && this.isSetDurability();
               boolean that_present_durability = true && that.isSetDurability();
               if (this_present_durability || that_present_durability) {
          -      if (!(this_present_durability && that_present_durability))
          -        return false;
          -      if (!this.durability.equals(that.durability))
          -        return false;
          +      if (!(this_present_durability && that_present_durability)) return false;
          +      if (!this.durability.equals(that.durability)) return false;
               }
           
               boolean this_present_cellVisibility = true && this.isSetCellVisibility();
               boolean that_present_cellVisibility = true && that.isSetCellVisibility();
               if (this_present_cellVisibility || that_present_cellVisibility) {
          -      if (!(this_present_cellVisibility && that_present_cellVisibility))
          -        return false;
          -      if (!this.cellVisibility.equals(that.cellVisibility))
          -        return false;
          +      if (!(this_present_cellVisibility && that_present_cellVisibility)) return false;
          +      if (!this.cellVisibility.equals(that.cellVisibility)) return false;
               }
           
               boolean this_present_returnResults = true && this.isSetReturnResults();
               boolean that_present_returnResults = true && that.isSetReturnResults();
               if (this_present_returnResults || that_present_returnResults) {
          -      if (!(this_present_returnResults && that_present_returnResults))
          -        return false;
          -      if (this.returnResults != that.returnResults)
          -        return false;
          +      if (!(this_present_returnResults && that_present_returnResults)) return false;
          +      if (this.returnResults != that.returnResults) return false;
               }
           
               return true;
          @@ -564,28 +600,22 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -    if (isSetRow())
          -      hashCode = hashCode * 8191 + row.hashCode();
          +    if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -    if (isSetColumns())
          -      hashCode = hashCode * 8191 + columns.hashCode();
          +    if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -    if (isSetAttributes())
          -      hashCode = hashCode * 8191 + attributes.hashCode();
          +    if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetDurability()) ? 131071 : 524287);
          -    if (isSetDurability())
          -      hashCode = hashCode * 8191 + durability.getValue();
          +    if (isSetDurability()) hashCode = hashCode * 8191 + durability.getValue();
           
               hashCode = hashCode * 8191 + ((isSetCellVisibility()) ? 131071 : 524287);
          -    if (isSetCellVisibility())
          -      hashCode = hashCode * 8191 + cellVisibility.hashCode();
          +    if (isSetCellVisibility()) hashCode = hashCode * 8191 + cellVisibility.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetReturnResults()) ? 131071 : 524287);
          -    if (isSetReturnResults())
          -      hashCode = hashCode * 8191 + ((returnResults) ? 131071 : 524287);
          +    if (isSetReturnResults()) hashCode = hashCode * 8191 + ((returnResults) ? 131071 : 524287);
           
               return hashCode;
             }
          @@ -643,7 +673,8 @@ public int compareTo(TAppend other) {
                 return lastComparison;
               }
               if (isSetCellVisibility()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cellVisibility, other.cellVisibility);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.cellVisibility, other.cellVisibility);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -653,7 +684,8 @@ public int compareTo(TAppend other) {
                 return lastComparison;
               }
               if (isSetReturnResults()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.returnResults, other.returnResults);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.returnResults, other.returnResults);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -670,7 +702,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -737,10 +770,12 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (row == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'row' was not present! Struct: " + toString());
               }
               if (columns == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'columns' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'columns' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
               if (cellVisibility != null) {
          @@ -750,37 +785,43 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TAppendStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TAppendStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TAppendStandardScheme getScheme() {
                 return new TAppendStandardScheme();
               }
             }
           
          -  private static class TAppendStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TAppendStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -788,7 +829,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.row = iprot.readBinary();
                         struct.setRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -797,9 +838,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                         {
                           org.apache.thrift.protocol.TList _list88 = iprot.readListBegin();
                           struct.columns = new java.util.ArrayList(_list88.size);
          -                @org.apache.thrift.annotation.Nullable TColumnValue _elem89;
          -                for (int _i90 = 0; _i90 < _list88.size; ++_i90)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                TColumnValue _elem89;
          +                for (int _i90 = 0; _i90 < _list88.size; ++_i90) {
                             _elem89 = new TColumnValue();
                             _elem89.read(iprot);
                             struct.columns.add(_elem89);
          @@ -807,7 +848,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                           iprot.readListEnd();
                         }
                         struct.setColumnsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -815,11 +856,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map91 = iprot.readMapBegin();
          -                struct.attributes = new java.util.HashMap(2*_map91.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key92;
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val93;
          -                for (int _i94 = 0; _i94 < _map91.size; ++_i94)
          -                {
          +                struct.attributes = new java.util.HashMap(
          +                    2 * _map91.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _key92;
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _val93;
          +                for (int _i94 = 0; _i94 < _map91.size; ++_i94) {
                             _key92 = iprot.readBinary();
                             _val93 = iprot.readBinary();
                             struct.attributes.put(_key92, _val93);
          @@ -827,15 +870,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                           iprot.readMapEnd();
                         }
                         struct.setAttributesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 4: // DURABILITY
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
          +              struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability
          +                  .findByValue(iprot.readI32());
                         struct.setDurabilityIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -844,7 +888,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                         struct.cellVisibility = new TCellVisibility();
                         struct.cellVisibility.read(iprot);
                         struct.setCellVisibilityIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -852,7 +896,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.returnResults = iprot.readBool();
                         struct.setReturnResultsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -867,7 +911,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAppend struct) thr
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TAppend struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TAppend struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -879,9 +924,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TAppend struct) th
                 if (struct.columns != null) {
                   oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                   {
          -          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          -          for (TColumnValue _iter95 : struct.columns)
          -          {
          +          oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +              org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          +          for (TColumnValue _iter95 : struct.columns) {
                       _iter95.write(oprot);
                     }
                     oprot.writeListEnd();
          @@ -892,9 +937,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TAppend struct) th
                   if (struct.isSetAttributes()) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter96 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter96 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter96.getKey());
                         oprot.writeBinary(_iter96.getValue());
                       }
          @@ -937,13 +984,14 @@ public TAppendTupleScheme getScheme() {
             private static class TAppendTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TAppend struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TAppend struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeBinary(struct.row);
                 {
                   oprot.writeI32(struct.columns.size());
          -        for (TColumnValue _iter97 : struct.columns)
          -        {
          +        for (TColumnValue _iter97 : struct.columns) {
                     _iter97.write(oprot);
                   }
                 }
          @@ -964,8 +1012,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TAppend struct) thr
                 if (struct.isSetAttributes()) {
                   {
                     oprot.writeI32(struct.attributes.size());
          -          for (java.util.Map.Entry _iter98 : struct.attributes.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter98 : struct.attributes
          +              .entrySet()) {
                       oprot.writeBinary(_iter98.getKey());
                       oprot.writeBinary(_iter98.getValue());
                     }
          @@ -983,16 +1031,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TAppend struct) thr
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TAppend struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TAppend struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.row = iprot.readBinary();
                 struct.setRowIsSet(true);
                 {
          -        org.apache.thrift.protocol.TList _list99 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +        org.apache.thrift.protocol.TList _list99 =
          +            iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                   struct.columns = new java.util.ArrayList(_list99.size);
          -        @org.apache.thrift.annotation.Nullable TColumnValue _elem100;
          -        for (int _i101 = 0; _i101 < _list99.size; ++_i101)
          -        {
          +        @org.apache.thrift.annotation.Nullable
          +        TColumnValue _elem100;
          +        for (int _i101 = 0; _i101 < _list99.size; ++_i101) {
                     _elem100 = new TColumnValue();
                     _elem100.read(iprot);
                     struct.columns.add(_elem100);
          @@ -1002,12 +1053,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TAppend struct) thro
                 java.util.BitSet incoming = iprot.readBitSet(4);
                 if (incoming.get(0)) {
                   {
          -          org.apache.thrift.protocol.TMap _map102 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -          struct.attributes = new java.util.HashMap(2*_map102.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key103;
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val104;
          -          for (int _i105 = 0; _i105 < _map102.size; ++_i105)
          -          {
          +          org.apache.thrift.protocol.TMap _map102 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +          struct.attributes =
          +              new java.util.HashMap(2 * _map102.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _key103;
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _val104;
          +          for (int _i105 = 0; _i105 < _map102.size; ++_i105) {
                       _key103 = iprot.readBinary();
                       _val104 = iprot.readBinary();
                       struct.attributes.put(_key103, _val104);
          @@ -1016,7 +1070,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TAppend struct) thro
                   struct.setAttributesIsSet(true);
                 }
                 if (incoming.get(1)) {
          -        struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
          +        struct.durability =
          +            org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
                   struct.setDurabilityIsSet(true);
                 }
                 if (incoming.get(2)) {
          @@ -1031,8 +1086,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TAppend struct) thro
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java
          index 7962bfa8c26d..ae9ae19fb327 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java
          @@ -1,28 +1,51 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TAuthorization implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAuthorization");
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TAuthorization
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TAuthorization");
           
          -  private static final org.apache.thrift.protocol.TField LABELS_FIELD_DESC = new org.apache.thrift.protocol.TField("labels", org.apache.thrift.protocol.TType.LIST, (short)1);
          +  private static final org.apache.thrift.protocol.TField LABELS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("labels", org.apache.thrift.protocol.TType.LIST,
          +          (short) 1);
           
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TAuthorizationStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TAuthorizationTupleSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TAuthorizationStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TAuthorizationTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.util.List labels; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    LABELS((short)1, "labels");
          +    LABELS((short) 1, "labels");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -35,7 +58,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // LABELS
                     return LABELS;
                   default:
          @@ -44,12 +67,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -79,15 +102,20 @@ public java.lang.String getFieldName() {
             }
           
             // isset id assignments
          -  private static final _Fields optionals[] = {_Fields.LABELS};
          +  private static final _Fields optionals[] = { _Fields.LABELS };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.LABELS, new org.apache.thrift.meta_data.FieldMetaData("labels", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.LABELS,
          +      new org.apache.thrift.meta_data.FieldMetaData("labels",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING))));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TAuthorization.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TAuthorization.class,
          +      metaDataMap);
             }
           
             public TAuthorization() {
          @@ -98,7 +126,8 @@ public TAuthorization() {
              */
             public TAuthorization(TAuthorization other) {
               if (other.isSetLabels()) {
          -      java.util.List __this__labels = new java.util.ArrayList(other.labels);
          +      java.util.List __this__labels =
          +          new java.util.ArrayList(other.labels);
                 this.labels = __this__labels;
               }
             }
          @@ -133,7 +162,8 @@ public java.util.List getLabels() {
               return this.labels;
             }
           
          -  public TAuthorization setLabels(@org.apache.thrift.annotation.Nullable java.util.List labels) {
          +  public TAuthorization
          +      setLabels(@org.apache.thrift.annotation.Nullable java.util.List labels) {
               this.labels = labels;
               return this;
             }
          @@ -153,15 +183,16 @@ public void setLabelsIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case LABELS:
          -      if (value == null) {
          -        unsetLabels();
          -      } else {
          -        setLabels((java.util.List)value);
          -      }
          -      break;
          +      case LABELS:
          +        if (value == null) {
          +          unsetLabels();
          +        } else {
          +          setLabels((java.util.List) value);
          +        }
          +        break;
           
               }
             }
          @@ -169,46 +200,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case LABELS:
          -      return getLabels();
          +      case LABELS:
          +        return getLabels();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case LABELS:
          -      return isSetLabels();
          +      case LABELS:
          +        return isSetLabels();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TAuthorization)
          -      return this.equals((TAuthorization)that);
          +    if (that instanceof TAuthorization) return this.equals((TAuthorization) that);
               return false;
             }
           
             public boolean equals(TAuthorization that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_labels = true && this.isSetLabels();
               boolean that_present_labels = true && that.isSetLabels();
               if (this_present_labels || that_present_labels) {
          -      if (!(this_present_labels && that_present_labels))
          -        return false;
          -      if (!this.labels.equals(that.labels))
          -        return false;
          +      if (!(this_present_labels && that_present_labels)) return false;
          +      if (!this.labels.equals(that.labels)) return false;
               }
           
               return true;
          @@ -219,8 +248,7 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetLabels()) ? 131071 : 524287);
          -    if (isSetLabels())
          -      hashCode = hashCode * 8191 + labels.hashCode();
          +    if (isSetLabels()) hashCode = hashCode * 8191 + labels.hashCode();
           
               return hashCode;
             }
          @@ -255,7 +283,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -284,35 +313,40 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TAuthorizationStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TAuthorizationStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TAuthorizationStandardScheme getScheme() {
                 return new TAuthorizationStandardScheme();
               }
             }
           
          -  private static class TAuthorizationStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TAuthorizationStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TAuthorization struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TAuthorization struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -321,16 +355,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAuthorization stru
                         {
                           org.apache.thrift.protocol.TList _list8 = iprot.readListBegin();
                           struct.labels = new java.util.ArrayList(_list8.size);
          -                @org.apache.thrift.annotation.Nullable java.lang.String _elem9;
          -                for (int _i10 = 0; _i10 < _list8.size; ++_i10)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                java.lang.String _elem9;
          +                for (int _i10 = 0; _i10 < _list8.size; ++_i10) {
                             _elem9 = iprot.readString();
                             struct.labels.add(_elem9);
                           }
                           iprot.readListEnd();
                         }
                         struct.setLabelsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -345,7 +379,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TAuthorization stru
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TAuthorization struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TAuthorization struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -353,9 +388,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TAuthorization str
                   if (struct.isSetLabels()) {
                     oprot.writeFieldBegin(LABELS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.labels.size()));
          -            for (java.lang.String _iter11 : struct.labels)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.labels.size()));
          +            for (java.lang.String _iter11 : struct.labels) {
                         oprot.writeString(_iter11);
                       }
                       oprot.writeListEnd();
          @@ -369,17 +404,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TAuthorization str
           
             }
           
          -  private static class TAuthorizationTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TAuthorizationTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TAuthorizationTupleScheme getScheme() {
                 return new TAuthorizationTupleScheme();
               }
             }
           
          -  private static class TAuthorizationTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TAuthorizationTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TAuthorization struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TAuthorization struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetLabels()) {
                   optionals.set(0);
          @@ -388,8 +427,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TAuthorization stru
                 if (struct.isSetLabels()) {
                   {
                     oprot.writeI32(struct.labels.size());
          -          for (java.lang.String _iter12 : struct.labels)
          -          {
          +          for (java.lang.String _iter12 : struct.labels) {
                       oprot.writeString(_iter12);
                     }
                   }
          @@ -397,16 +435,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TAuthorization stru
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TAuthorization struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TAuthorization struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(1);
                 if (incoming.get(0)) {
                   {
          -          org.apache.thrift.protocol.TList _list13 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +          org.apache.thrift.protocol.TList _list13 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                     struct.labels = new java.util.ArrayList(_list13.size);
          -          @org.apache.thrift.annotation.Nullable java.lang.String _elem14;
          -          for (int _i15 = 0; _i15 < _list13.size; ++_i15)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          java.lang.String _elem14;
          +          for (int _i15 = 0; _i15 < _list13.size; ++_i15) {
                       _elem14 = iprot.readString();
                       struct.labels.add(_elem14);
                     }
          @@ -416,8 +457,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TAuthorization struc
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java
          index 35bcfd58f8a3..bb481644ac84 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java
          @@ -1,17 +1,27 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
           /**
          - * Thrift wrapper around
          - * org.apache.hadoop.hbase.regionserver.BloomType
          + * Thrift wrapper around org.apache.hadoop.hbase.regionserver.BloomType
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TBloomFilterType implements org.apache.thrift.TEnum {
             /**
              * Bloomfilters disabled
          @@ -48,7 +58,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TBloomFilterType findByValue(int value) { 
          +  public static TBloomFilterType findByValue(int value) {
               switch (value) {
                 case 0:
                   return NONE;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
          index 7a29bd7596ad..388a101e3d88 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
          @@ -1,28 +1,51 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TCellVisibility implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCellVisibility");
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TCellVisibility
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TCellVisibility");
           
          -  private static final org.apache.thrift.protocol.TField EXPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("expression", org.apache.thrift.protocol.TType.STRING, (short)1);
          +  private static final org.apache.thrift.protocol.TField EXPRESSION_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("expression", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
           
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TCellVisibilityStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TCellVisibilityTupleSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TCellVisibilityStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TCellVisibilityTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.lang.String expression; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    EXPRESSION((short)1, "expression");
          +    EXPRESSION((short) 1, "expression");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -35,7 +58,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // EXPRESSION
                     return EXPRESSION;
                   default:
          @@ -44,12 +67,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -79,14 +102,19 @@ public java.lang.String getFieldName() {
             }
           
             // isset id assignments
          -  private static final _Fields optionals[] = {_Fields.EXPRESSION};
          +  private static final _Fields optionals[] = { _Fields.EXPRESSION };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.EXPRESSION, new org.apache.thrift.meta_data.FieldMetaData("expression", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.EXPRESSION,
          +      new org.apache.thrift.meta_data.FieldMetaData("expression",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCellVisibility.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCellVisibility.class,
          +      metaDataMap);
             }
           
             public TCellVisibility() {
          @@ -115,7 +143,8 @@ public java.lang.String getExpression() {
               return this.expression;
             }
           
          -  public TCellVisibility setExpression(@org.apache.thrift.annotation.Nullable java.lang.String expression) {
          +  public TCellVisibility
          +      setExpression(@org.apache.thrift.annotation.Nullable java.lang.String expression) {
               this.expression = expression;
               return this;
             }
          @@ -135,15 +164,16 @@ public void setExpressionIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case EXPRESSION:
          -      if (value == null) {
          -        unsetExpression();
          -      } else {
          -        setExpression((java.lang.String)value);
          -      }
          -      break;
          +      case EXPRESSION:
          +        if (value == null) {
          +          unsetExpression();
          +        } else {
          +          setExpression((java.lang.String) value);
          +        }
          +        break;
           
               }
             }
          @@ -151,46 +181,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case EXPRESSION:
          -      return getExpression();
          +      case EXPRESSION:
          +        return getExpression();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case EXPRESSION:
          -      return isSetExpression();
          +      case EXPRESSION:
          +        return isSetExpression();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TCellVisibility)
          -      return this.equals((TCellVisibility)that);
          +    if (that instanceof TCellVisibility) return this.equals((TCellVisibility) that);
               return false;
             }
           
             public boolean equals(TCellVisibility that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_expression = true && this.isSetExpression();
               boolean that_present_expression = true && that.isSetExpression();
               if (this_present_expression || that_present_expression) {
          -      if (!(this_present_expression && that_present_expression))
          -        return false;
          -      if (!this.expression.equals(that.expression))
          -        return false;
          +      if (!(this_present_expression && that_present_expression)) return false;
          +      if (!this.expression.equals(that.expression)) return false;
               }
           
               return true;
          @@ -201,8 +229,7 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetExpression()) ? 131071 : 524287);
          -    if (isSetExpression())
          -      hashCode = hashCode * 8191 + expression.hashCode();
          +    if (isSetExpression()) hashCode = hashCode * 8191 + expression.hashCode();
           
               return hashCode;
             }
          @@ -237,7 +264,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -266,35 +294,40 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TCellVisibilityStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TCellVisibilityStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TCellVisibilityStandardScheme getScheme() {
                 return new TCellVisibilityStandardScheme();
               }
             }
           
          -  private static class TCellVisibilityStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TCellVisibilityStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TCellVisibility struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TCellVisibility struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -302,7 +335,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TCellVisibility str
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.expression = iprot.readString();
                         struct.setExpressionIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -317,7 +350,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TCellVisibility str
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TCellVisibility struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TCellVisibility struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -334,17 +368,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TCellVisibility st
           
             }
           
          -  private static class TCellVisibilityTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TCellVisibilityTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TCellVisibilityTupleScheme getScheme() {
                 return new TCellVisibilityTupleScheme();
               }
             }
           
          -  private static class TCellVisibilityTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TCellVisibilityTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TCellVisibility struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TCellVisibility struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetExpression()) {
                   optionals.set(0);
          @@ -356,8 +394,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TCellVisibility str
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TCellVisibility struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TCellVisibility struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(1);
                 if (incoming.get(0)) {
                   struct.expression = iprot.readString();
          @@ -366,8 +406,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TCellVisibility stru
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
          index 90f7cdec2204..a830285a017d 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
          @@ -1,39 +1,63 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Addresses a single cell or multiple cells
          - * in a HBase table by column family and optionally
          - * a column qualifier and timestamp
          + * Addresses a single cell or multiple cells in a HBase table by column family and optionally a
          + * column qualifier and timestamp
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumn");
          -
          -  private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING, (short)2);
          -  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)3);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TColumnStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TColumnTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TColumn implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TColumn");
          +
          +  private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +          (short) 3);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TColumnStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TColumnTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer family; // required
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier; // optional
             public long timestamp; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    FAMILY((short)1, "family"),
          -    QUALIFIER((short)2, "qualifier"),
          -    TIMESTAMP((short)3, "timestamp");
          +    FAMILY((short) 1, "family"), QUALIFIER((short) 2, "qualifier"),
          +    TIMESTAMP((short) 3, "timestamp");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -46,7 +70,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // FAMILY
                     return FAMILY;
                   case 2: // QUALIFIER
          @@ -59,12 +83,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -96,15 +120,23 @@ public java.lang.String getFieldName() {
             // isset id assignments
             private static final int __TIMESTAMP_ISSET_ID = 0;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.QUALIFIER,_Fields.TIMESTAMP};
          +  private static final _Fields optionals[] = { _Fields.QUALIFIER, _Fields.TIMESTAMP };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.FAMILY, new org.apache.thrift.meta_data.FieldMetaData("family", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("qualifier", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.FAMILY,
          +      new org.apache.thrift.meta_data.FieldMetaData("family",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.QUALIFIER,
          +      new org.apache.thrift.meta_data.FieldMetaData("qualifier",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumn.class, metaDataMap);
          @@ -113,9 +145,7 @@ public java.lang.String getFieldName() {
             public TColumn() {
             }
           
          -  public TColumn(
          -    java.nio.ByteBuffer family)
          -  {
          +  public TColumn(java.nio.ByteBuffer family) {
               this();
               this.family = org.apache.thrift.TBaseHelper.copyBinary(family);
             }
          @@ -156,7 +186,8 @@ public java.nio.ByteBuffer bufferForFamily() {
             }
           
             public TColumn setFamily(byte[] family) {
          -    this.family = family == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(family.clone());
          +    this.family =
          +        family == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(family.clone());
               return this;
             }
           
          @@ -190,11 +221,13 @@ public java.nio.ByteBuffer bufferForQualifier() {
             }
           
             public TColumn setQualifier(byte[] qualifier) {
          -    this.qualifier = qualifier == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(qualifier.clone());
          +    this.qualifier = qualifier == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(qualifier.clone());
               return this;
             }
           
          -  public TColumn setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
          +  public TColumn
          +      setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
               this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier);
               return this;
             }
          @@ -225,7 +258,8 @@ public TColumn setTimestamp(long timestamp) {
             }
           
             public void unsetTimestamp() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
             }
           
             /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -234,42 +268,44 @@ public boolean isSetTimestamp() {
             }
           
             public void setTimestampIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case FAMILY:
          -      if (value == null) {
          -        unsetFamily();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setFamily((byte[])value);
          +      case FAMILY:
          +        if (value == null) {
          +          unsetFamily();
                   } else {
          -          setFamily((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setFamily((byte[]) value);
          +          } else {
          +            setFamily((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case QUALIFIER:
          -      if (value == null) {
          -        unsetQualifier();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setQualifier((byte[])value);
          +      case QUALIFIER:
          +        if (value == null) {
          +          unsetQualifier();
                   } else {
          -          setQualifier((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setQualifier((byte[]) value);
          +          } else {
          +            setQualifier((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case TIMESTAMP:
          -      if (value == null) {
          -        unsetTimestamp();
          -      } else {
          -        setTimestamp((java.lang.Long)value);
          -      }
          -      break;
          +      case TIMESTAMP:
          +        if (value == null) {
          +          unsetTimestamp();
          +        } else {
          +          setTimestamp((java.lang.Long) value);
          +        }
          +        break;
           
               }
             }
          @@ -277,74 +313,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case FAMILY:
          -      return getFamily();
          +      case FAMILY:
          +        return getFamily();
           
          -    case QUALIFIER:
          -      return getQualifier();
          +      case QUALIFIER:
          +        return getQualifier();
           
          -    case TIMESTAMP:
          -      return getTimestamp();
          +      case TIMESTAMP:
          +        return getTimestamp();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case FAMILY:
          -      return isSetFamily();
          -    case QUALIFIER:
          -      return isSetQualifier();
          -    case TIMESTAMP:
          -      return isSetTimestamp();
          +      case FAMILY:
          +        return isSetFamily();
          +      case QUALIFIER:
          +        return isSetQualifier();
          +      case TIMESTAMP:
          +        return isSetTimestamp();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TColumn)
          -      return this.equals((TColumn)that);
          +    if (that instanceof TColumn) return this.equals((TColumn) that);
               return false;
             }
           
             public boolean equals(TColumn that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_family = true && this.isSetFamily();
               boolean that_present_family = true && that.isSetFamily();
               if (this_present_family || that_present_family) {
          -      if (!(this_present_family && that_present_family))
          -        return false;
          -      if (!this.family.equals(that.family))
          -        return false;
          +      if (!(this_present_family && that_present_family)) return false;
          +      if (!this.family.equals(that.family)) return false;
               }
           
               boolean this_present_qualifier = true && this.isSetQualifier();
               boolean that_present_qualifier = true && that.isSetQualifier();
               if (this_present_qualifier || that_present_qualifier) {
          -      if (!(this_present_qualifier && that_present_qualifier))
          -        return false;
          -      if (!this.qualifier.equals(that.qualifier))
          -        return false;
          +      if (!(this_present_qualifier && that_present_qualifier)) return false;
          +      if (!this.qualifier.equals(that.qualifier)) return false;
               }
           
               boolean this_present_timestamp = true && this.isSetTimestamp();
               boolean that_present_timestamp = true && that.isSetTimestamp();
               if (this_present_timestamp || that_present_timestamp) {
          -      if (!(this_present_timestamp && that_present_timestamp))
          -        return false;
          -      if (this.timestamp != that.timestamp)
          -        return false;
          +      if (!(this_present_timestamp && that_present_timestamp)) return false;
          +      if (this.timestamp != that.timestamp) return false;
               }
           
               return true;
          @@ -355,12 +385,10 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetFamily()) ? 131071 : 524287);
          -    if (isSetFamily())
          -      hashCode = hashCode * 8191 + family.hashCode();
          +    if (isSetFamily()) hashCode = hashCode * 8191 + family.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetQualifier()) ? 131071 : 524287);
          -    if (isSetQualifier())
          -      hashCode = hashCode * 8191 + qualifier.hashCode();
          +    if (isSetQualifier()) hashCode = hashCode * 8191 + qualifier.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetTimestamp()) ? 131071 : 524287);
               if (isSetTimestamp())
          @@ -419,7 +447,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -458,44 +487,51 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (family == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'family' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'family' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TColumnStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TColumnStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TColumnStandardScheme getScheme() {
                 return new TColumnStandardScheme();
               }
             }
           
          -  private static class TColumnStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TColumnStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TColumn struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TColumn struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -503,7 +539,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumn struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.family = iprot.readBinary();
                         struct.setFamilyIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -511,7 +547,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumn struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.qualifier = iprot.readBinary();
                         struct.setQualifierIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -519,7 +555,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumn struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.timestamp = iprot.readI64();
                         struct.setTimestampIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -534,7 +570,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumn struct) thr
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TColumn struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TColumn struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -570,8 +607,10 @@ public TColumnTupleScheme getScheme() {
             private static class TColumnTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TColumn struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TColumn struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeBinary(struct.family);
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetQualifier()) {
          @@ -590,8 +629,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TColumn struct) thr
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TColumn struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TColumn struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.family = iprot.readBinary();
                 struct.setFamilyIsSet(true);
                 java.util.BitSet incoming = iprot.readBitSet(2);
          @@ -606,8 +647,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TColumn struct) thro
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java
          index 0c48ba68effb..cfc6f9eff5dd 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java
          @@ -1,66 +1,118 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Thrift wrapper around
          - * org.apache.hadoop.hbase.client.ColumnFamilyDescriptor
          + * Thrift wrapper around org.apache.hadoop.hbase.client.ColumnFamilyDescriptor
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TColumnFamilyDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnFamilyDescriptor");
          -
          -  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)2);
          -  private static final org.apache.thrift.protocol.TField CONFIGURATION_FIELD_DESC = new org.apache.thrift.protocol.TField("configuration", org.apache.thrift.protocol.TType.MAP, (short)3);
          -  private static final org.apache.thrift.protocol.TField BLOCK_SIZE_FIELD_DESC = new org.apache.thrift.protocol.TField("blockSize", org.apache.thrift.protocol.TType.I32, (short)4);
          -  private static final org.apache.thrift.protocol.TField BLOOMN_FILTER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("bloomnFilterType", org.apache.thrift.protocol.TType.I32, (short)5);
          -  private static final org.apache.thrift.protocol.TField COMPRESSION_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("compressionType", org.apache.thrift.protocol.TType.I32, (short)6);
          -  private static final org.apache.thrift.protocol.TField DFS_REPLICATION_FIELD_DESC = new org.apache.thrift.protocol.TField("dfsReplication", org.apache.thrift.protocol.TType.I16, (short)7);
          -  private static final org.apache.thrift.protocol.TField DATA_BLOCK_ENCODING_FIELD_DESC = new org.apache.thrift.protocol.TField("dataBlockEncoding", org.apache.thrift.protocol.TType.I32, (short)8);
          -  private static final org.apache.thrift.protocol.TField KEEP_DELETED_CELLS_FIELD_DESC = new org.apache.thrift.protocol.TField("keepDeletedCells", org.apache.thrift.protocol.TType.I32, (short)9);
          -  private static final org.apache.thrift.protocol.TField MAX_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxVersions", org.apache.thrift.protocol.TType.I32, (short)10);
          -  private static final org.apache.thrift.protocol.TField MIN_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("minVersions", org.apache.thrift.protocol.TType.I32, (short)11);
          -  private static final org.apache.thrift.protocol.TField SCOPE_FIELD_DESC = new org.apache.thrift.protocol.TField("scope", org.apache.thrift.protocol.TType.I32, (short)12);
          -  private static final org.apache.thrift.protocol.TField TIME_TO_LIVE_FIELD_DESC = new org.apache.thrift.protocol.TField("timeToLive", org.apache.thrift.protocol.TType.I32, (short)13);
          -  private static final org.apache.thrift.protocol.TField BLOCK_CACHE_ENABLED_FIELD_DESC = new org.apache.thrift.protocol.TField("blockCacheEnabled", org.apache.thrift.protocol.TType.BOOL, (short)14);
          -  private static final org.apache.thrift.protocol.TField CACHE_BLOOMS_ON_WRITE_FIELD_DESC = new org.apache.thrift.protocol.TField("cacheBloomsOnWrite", org.apache.thrift.protocol.TType.BOOL, (short)15);
          -  private static final org.apache.thrift.protocol.TField CACHE_DATA_ON_WRITE_FIELD_DESC = new org.apache.thrift.protocol.TField("cacheDataOnWrite", org.apache.thrift.protocol.TType.BOOL, (short)16);
          -  private static final org.apache.thrift.protocol.TField CACHE_INDEXES_ON_WRITE_FIELD_DESC = new org.apache.thrift.protocol.TField("cacheIndexesOnWrite", org.apache.thrift.protocol.TType.BOOL, (short)17);
          -  private static final org.apache.thrift.protocol.TField COMPRESS_TAGS_FIELD_DESC = new org.apache.thrift.protocol.TField("compressTags", org.apache.thrift.protocol.TType.BOOL, (short)18);
          -  private static final org.apache.thrift.protocol.TField EVICT_BLOCKS_ON_CLOSE_FIELD_DESC = new org.apache.thrift.protocol.TField("evictBlocksOnClose", org.apache.thrift.protocol.TType.BOOL, (short)19);
          -  private static final org.apache.thrift.protocol.TField IN_MEMORY_FIELD_DESC = new org.apache.thrift.protocol.TField("inMemory", org.apache.thrift.protocol.TType.BOOL, (short)20);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TColumnFamilyDescriptorStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TColumnFamilyDescriptorTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TColumnFamilyDescriptor
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TColumnFamilyDescriptor");
          +
          +  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField CONFIGURATION_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("configuration", org.apache.thrift.protocol.TType.MAP,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField BLOCK_SIZE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("blockSize", org.apache.thrift.protocol.TType.I32,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField BLOOMN_FILTER_TYPE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("bloomnFilterType",
          +          org.apache.thrift.protocol.TType.I32, (short) 5);
          +  private static final org.apache.thrift.protocol.TField COMPRESSION_TYPE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("compressionType", org.apache.thrift.protocol.TType.I32,
          +          (short) 6);
          +  private static final org.apache.thrift.protocol.TField DFS_REPLICATION_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("dfsReplication", org.apache.thrift.protocol.TType.I16,
          +          (short) 7);
          +  private static final org.apache.thrift.protocol.TField DATA_BLOCK_ENCODING_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("dataBlockEncoding",
          +          org.apache.thrift.protocol.TType.I32, (short) 8);
          +  private static final org.apache.thrift.protocol.TField KEEP_DELETED_CELLS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("keepDeletedCells",
          +          org.apache.thrift.protocol.TType.I32, (short) 9);
          +  private static final org.apache.thrift.protocol.TField MAX_VERSIONS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("maxVersions", org.apache.thrift.protocol.TType.I32,
          +          (short) 10);
          +  private static final org.apache.thrift.protocol.TField MIN_VERSIONS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("minVersions", org.apache.thrift.protocol.TType.I32,
          +          (short) 11);
          +  private static final org.apache.thrift.protocol.TField SCOPE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("scope", org.apache.thrift.protocol.TType.I32,
          +          (short) 12);
          +  private static final org.apache.thrift.protocol.TField TIME_TO_LIVE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("timeToLive", org.apache.thrift.protocol.TType.I32,
          +          (short) 13);
          +  private static final org.apache.thrift.protocol.TField BLOCK_CACHE_ENABLED_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("blockCacheEnabled",
          +          org.apache.thrift.protocol.TType.BOOL, (short) 14);
          +  private static final org.apache.thrift.protocol.TField CACHE_BLOOMS_ON_WRITE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("cacheBloomsOnWrite",
          +          org.apache.thrift.protocol.TType.BOOL, (short) 15);
          +  private static final org.apache.thrift.protocol.TField CACHE_DATA_ON_WRITE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("cacheDataOnWrite",
          +          org.apache.thrift.protocol.TType.BOOL, (short) 16);
          +  private static final org.apache.thrift.protocol.TField CACHE_INDEXES_ON_WRITE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("cacheIndexesOnWrite",
          +          org.apache.thrift.protocol.TType.BOOL, (short) 17);
          +  private static final org.apache.thrift.protocol.TField COMPRESS_TAGS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("compressTags", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 18);
          +  private static final org.apache.thrift.protocol.TField EVICT_BLOCKS_ON_CLOSE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("evictBlocksOnClose",
          +          org.apache.thrift.protocol.TType.BOOL, (short) 19);
          +  private static final org.apache.thrift.protocol.TField IN_MEMORY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("inMemory", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 20);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TColumnFamilyDescriptorStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TColumnFamilyDescriptorTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer name; // required
          -  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
          -  public @org.apache.thrift.annotation.Nullable java.util.Map configuration; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map configuration; // optional
             public int blockSize; // optional
             /**
          -   * 
              * @see TBloomFilterType
              */
             public @org.apache.thrift.annotation.Nullable TBloomFilterType bloomnFilterType; // optional
             /**
          -   * 
              * @see TCompressionAlgorithm
              */
             public @org.apache.thrift.annotation.Nullable TCompressionAlgorithm compressionType; // optional
             public short dfsReplication; // optional
             /**
          -   * 
              * @see TDataBlockEncoding
              */
             public @org.apache.thrift.annotation.Nullable TDataBlockEncoding dataBlockEncoding; // optional
             /**
          -   * 
              * @see TKeepDeletedCells
              */
             public @org.apache.thrift.annotation.Nullable TKeepDeletedCells keepDeletedCells; // optional
          @@ -76,46 +128,39 @@ public class TColumnFamilyDescriptor implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +    KEEP_DELETED_CELLS((short) 9, "keepDeletedCells"), MAX_VERSIONS((short) 10, "maxVersions"),
          +    MIN_VERSIONS((short) 11, "minVersions"), SCOPE((short) 12, "scope"),
          +    TIME_TO_LIVE((short) 13, "timeToLive"), BLOCK_CACHE_ENABLED((short) 14, "blockCacheEnabled"),
          +    CACHE_BLOOMS_ON_WRITE((short) 15, "cacheBloomsOnWrite"),
          +    CACHE_DATA_ON_WRITE((short) 16, "cacheDataOnWrite"),
          +    CACHE_INDEXES_ON_WRITE((short) 17, "cacheIndexesOnWrite"),
          +    COMPRESS_TAGS((short) 18, "compressTags"),
          +    EVICT_BLOCKS_ON_CLOSE((short) 19, "evictBlocksOnClose"), IN_MEMORY((short) 20, "inMemory");
          +
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -128,7 +173,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // NAME
                     return NAME;
                   case 2: // ATTRIBUTES
          @@ -175,12 +220,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -224,64 +269,106 @@ public java.lang.String getFieldName() {
             private static final int __EVICTBLOCKSONCLOSE_ISSET_ID = 11;
             private static final int __INMEMORY_ISSET_ID = 12;
             private short __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.ATTRIBUTES,_Fields.CONFIGURATION,_Fields.BLOCK_SIZE,_Fields.BLOOMN_FILTER_TYPE,_Fields.COMPRESSION_TYPE,_Fields.DFS_REPLICATION,_Fields.DATA_BLOCK_ENCODING,_Fields.KEEP_DELETED_CELLS,_Fields.MAX_VERSIONS,_Fields.MIN_VERSIONS,_Fields.SCOPE,_Fields.TIME_TO_LIVE,_Fields.BLOCK_CACHE_ENABLED,_Fields.CACHE_BLOOMS_ON_WRITE,_Fields.CACHE_DATA_ON_WRITE,_Fields.CACHE_INDEXES_ON_WRITE,_Fields.COMPRESS_TAGS,_Fields.EVICT_BLOCKS_ON_CLOSE,_Fields.IN_MEMORY};
          +  private static final _Fields optionals[] =
          +      { _Fields.ATTRIBUTES, _Fields.CONFIGURATION, _Fields.BLOCK_SIZE, _Fields.BLOOMN_FILTER_TYPE,
          +          _Fields.COMPRESSION_TYPE, _Fields.DFS_REPLICATION, _Fields.DATA_BLOCK_ENCODING,
          +          _Fields.KEEP_DELETED_CELLS, _Fields.MAX_VERSIONS, _Fields.MIN_VERSIONS, _Fields.SCOPE,
          +          _Fields.TIME_TO_LIVE, _Fields.BLOCK_CACHE_ENABLED, _Fields.CACHE_BLOOMS_ON_WRITE,
          +          _Fields.CACHE_DATA_ON_WRITE, _Fields.CACHE_INDEXES_ON_WRITE, _Fields.COMPRESS_TAGS,
          +          _Fields.EVICT_BLOCKS_ON_CLOSE, _Fields.IN_MEMORY };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true), 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true))));
          -    tmpMap.put(_Fields.CONFIGURATION, new org.apache.thrift.meta_data.FieldMetaData("configuration", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
          -    tmpMap.put(_Fields.BLOCK_SIZE, new org.apache.thrift.meta_data.FieldMetaData("blockSize", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("name",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.ATTRIBUTES,
          +      new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true),
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true))));
          +    tmpMap.put(_Fields.CONFIGURATION,
          +      new org.apache.thrift.meta_data.FieldMetaData("configuration",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING),
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING))));
          +    tmpMap.put(_Fields.BLOCK_SIZE, new org.apache.thrift.meta_data.FieldMetaData("blockSize",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.BLOOMN_FILTER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("bloomnFilterType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TBloomFilterType.class)));
          -    tmpMap.put(_Fields.COMPRESSION_TYPE, new org.apache.thrift.meta_data.FieldMetaData("compressionType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TCompressionAlgorithm.class)));
          -    tmpMap.put(_Fields.DFS_REPLICATION, new org.apache.thrift.meta_data.FieldMetaData("dfsReplication", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.BLOOMN_FILTER_TYPE,
          +      new org.apache.thrift.meta_data.FieldMetaData("bloomnFilterType",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TBloomFilterType.class)));
          +    tmpMap.put(_Fields.COMPRESSION_TYPE,
          +      new org.apache.thrift.meta_data.FieldMetaData("compressionType",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TCompressionAlgorithm.class)));
          +    tmpMap.put(_Fields.DFS_REPLICATION, new org.apache.thrift.meta_data.FieldMetaData(
          +        "dfsReplication", org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16)));
          -    tmpMap.put(_Fields.DATA_BLOCK_ENCODING, new org.apache.thrift.meta_data.FieldMetaData("dataBlockEncoding", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TDataBlockEncoding.class)));
          -    tmpMap.put(_Fields.KEEP_DELETED_CELLS, new org.apache.thrift.meta_data.FieldMetaData("keepDeletedCells", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TKeepDeletedCells.class)));
          -    tmpMap.put(_Fields.MAX_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("maxVersions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.DATA_BLOCK_ENCODING,
          +      new org.apache.thrift.meta_data.FieldMetaData("dataBlockEncoding",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TDataBlockEncoding.class)));
          +    tmpMap.put(_Fields.KEEP_DELETED_CELLS,
          +      new org.apache.thrift.meta_data.FieldMetaData("keepDeletedCells",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TKeepDeletedCells.class)));
          +    tmpMap.put(_Fields.MAX_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("maxVersions",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.MIN_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("minVersions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.MIN_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("minVersions",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.SCOPE, new org.apache.thrift.meta_data.FieldMetaData("scope", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.SCOPE, new org.apache.thrift.meta_data.FieldMetaData("scope",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.TIME_TO_LIVE, new org.apache.thrift.meta_data.FieldMetaData("timeToLive", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.TIME_TO_LIVE, new org.apache.thrift.meta_data.FieldMetaData("timeToLive",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.BLOCK_CACHE_ENABLED, new org.apache.thrift.meta_data.FieldMetaData("blockCacheEnabled", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.BLOCK_CACHE_ENABLED, new org.apache.thrift.meta_data.FieldMetaData(
          +        "blockCacheEnabled", org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.CACHE_BLOOMS_ON_WRITE, new org.apache.thrift.meta_data.FieldMetaData("cacheBloomsOnWrite", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.CACHE_BLOOMS_ON_WRITE, new org.apache.thrift.meta_data.FieldMetaData(
          +        "cacheBloomsOnWrite", org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.CACHE_DATA_ON_WRITE, new org.apache.thrift.meta_data.FieldMetaData("cacheDataOnWrite", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.CACHE_DATA_ON_WRITE, new org.apache.thrift.meta_data.FieldMetaData(
          +        "cacheDataOnWrite", org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.CACHE_INDEXES_ON_WRITE, new org.apache.thrift.meta_data.FieldMetaData("cacheIndexesOnWrite", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.CACHE_INDEXES_ON_WRITE, new org.apache.thrift.meta_data.FieldMetaData(
          +        "cacheIndexesOnWrite", org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.COMPRESS_TAGS, new org.apache.thrift.meta_data.FieldMetaData("compressTags", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.COMPRESS_TAGS, new org.apache.thrift.meta_data.FieldMetaData("compressTags",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.EVICT_BLOCKS_ON_CLOSE, new org.apache.thrift.meta_data.FieldMetaData("evictBlocksOnClose", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.EVICT_BLOCKS_ON_CLOSE, new org.apache.thrift.meta_data.FieldMetaData(
          +        "evictBlocksOnClose", org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.IN_MEMORY, new org.apache.thrift.meta_data.FieldMetaData("inMemory", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.IN_MEMORY, new org.apache.thrift.meta_data.FieldMetaData("inMemory",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumnFamilyDescriptor.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumnFamilyDescriptor.class,
          +      metaDataMap);
             }
           
             public TColumnFamilyDescriptor() {
             }
           
          -  public TColumnFamilyDescriptor(
          -    java.nio.ByteBuffer name)
          -  {
          +  public TColumnFamilyDescriptor(java.nio.ByteBuffer name) {
               this();
               this.name = org.apache.thrift.TBaseHelper.copyBinary(name);
             }
          @@ -295,11 +382,13 @@ public TColumnFamilyDescriptor(TColumnFamilyDescriptor other) {
                 this.name = org.apache.thrift.TBaseHelper.copyBinary(other.name);
               }
               if (other.isSetAttributes()) {
          -      java.util.Map __this__attributes = new java.util.HashMap(other.attributes);
          +      java.util.Map __this__attributes =
          +          new java.util.HashMap(other.attributes);
                 this.attributes = __this__attributes;
               }
               if (other.isSetConfiguration()) {
          -      java.util.Map __this__configuration = new java.util.HashMap(other.configuration);
          +      java.util.Map __this__configuration =
          +          new java.util.HashMap(other.configuration);
                 this.configuration = __this__configuration;
               }
               this.blockSize = other.blockSize;
          @@ -380,11 +469,12 @@ public java.nio.ByteBuffer bufferForName() {
             }
           
             public TColumnFamilyDescriptor setName(byte[] name) {
          -    this.name = name == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(name.clone());
          +    this.name = name == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(name.clone());
               return this;
             }
           
          -  public TColumnFamilyDescriptor setName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer name) {
          +  public TColumnFamilyDescriptor
          +      setName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer name) {
               this.name = org.apache.thrift.TBaseHelper.copyBinary(name);
               return this;
             }
          @@ -410,17 +500,18 @@ public int getAttributesSize() {
           
             public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
               if (this.attributes == null) {
          -      this.attributes = new java.util.HashMap();
          +      this.attributes = new java.util.HashMap();
               }
               this.attributes.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getAttributes() {
          +  public java.util.Map getAttributes() {
               return this.attributes;
             }
           
          -  public TColumnFamilyDescriptor setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +  public TColumnFamilyDescriptor setAttributes(
          +      @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
               this.attributes = attributes;
               return this;
             }
          @@ -446,17 +537,18 @@ public int getConfigurationSize() {
           
             public void putToConfiguration(java.lang.String key, java.lang.String val) {
               if (this.configuration == null) {
          -      this.configuration = new java.util.HashMap();
          +      this.configuration = new java.util.HashMap();
               }
               this.configuration.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getConfiguration() {
          +  public java.util.Map getConfiguration() {
               return this.configuration;
             }
           
          -  public TColumnFamilyDescriptor setConfiguration(@org.apache.thrift.annotation.Nullable java.util.Map configuration) {
          +  public TColumnFamilyDescriptor setConfiguration(
          +      @org.apache.thrift.annotation.Nullable java.util.Map configuration) {
               this.configuration = configuration;
               return this;
             }
          @@ -487,7 +579,8 @@ public TColumnFamilyDescriptor setBlockSize(int blockSize) {
             }
           
             public void unsetBlockSize() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BLOCKSIZE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BLOCKSIZE_ISSET_ID);
             }
           
             /** Returns true if field blockSize is set (has been assigned a value) and false otherwise */
          @@ -496,11 +589,11 @@ public boolean isSetBlockSize() {
             }
           
             public void setBlockSizeIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __BLOCKSIZE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __BLOCKSIZE_ISSET_ID, value);
             }
           
             /**
          -   * 
              * @see TBloomFilterType
              */
             @org.apache.thrift.annotation.Nullable
          @@ -509,10 +602,10 @@ public TBloomFilterType getBloomnFilterType() {
             }
           
             /**
          -   * 
              * @see TBloomFilterType
              */
          -  public TColumnFamilyDescriptor setBloomnFilterType(@org.apache.thrift.annotation.Nullable TBloomFilterType bloomnFilterType) {
          +  public TColumnFamilyDescriptor setBloomnFilterType(
          +      @org.apache.thrift.annotation.Nullable TBloomFilterType bloomnFilterType) {
               this.bloomnFilterType = bloomnFilterType;
               return this;
             }
          @@ -521,7 +614,9 @@ public void unsetBloomnFilterType() {
               this.bloomnFilterType = null;
             }
           
          -  /** Returns true if field bloomnFilterType is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field bloomnFilterType is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetBloomnFilterType() {
               return this.bloomnFilterType != null;
             }
          @@ -533,7 +628,6 @@ public void setBloomnFilterTypeIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TCompressionAlgorithm
              */
             @org.apache.thrift.annotation.Nullable
          @@ -542,10 +636,10 @@ public TCompressionAlgorithm getCompressionType() {
             }
           
             /**
          -   * 
              * @see TCompressionAlgorithm
              */
          -  public TColumnFamilyDescriptor setCompressionType(@org.apache.thrift.annotation.Nullable TCompressionAlgorithm compressionType) {
          +  public TColumnFamilyDescriptor setCompressionType(
          +      @org.apache.thrift.annotation.Nullable TCompressionAlgorithm compressionType) {
               this.compressionType = compressionType;
               return this;
             }
          @@ -554,7 +648,9 @@ public void unsetCompressionType() {
               this.compressionType = null;
             }
           
          -  /** Returns true if field compressionType is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field compressionType is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetCompressionType() {
               return this.compressionType != null;
             }
          @@ -576,7 +672,8 @@ public TColumnFamilyDescriptor setDfsReplication(short dfsReplication) {
             }
           
             public void unsetDfsReplication() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __DFSREPLICATION_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __DFSREPLICATION_ISSET_ID);
             }
           
             /** Returns true if field dfsReplication is set (has been assigned a value) and false otherwise */
          @@ -585,11 +682,11 @@ public boolean isSetDfsReplication() {
             }
           
             public void setDfsReplicationIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __DFSREPLICATION_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __DFSREPLICATION_ISSET_ID, value);
             }
           
             /**
          -   * 
              * @see TDataBlockEncoding
              */
             @org.apache.thrift.annotation.Nullable
          @@ -598,10 +695,10 @@ public TDataBlockEncoding getDataBlockEncoding() {
             }
           
             /**
          -   * 
              * @see TDataBlockEncoding
              */
          -  public TColumnFamilyDescriptor setDataBlockEncoding(@org.apache.thrift.annotation.Nullable TDataBlockEncoding dataBlockEncoding) {
          +  public TColumnFamilyDescriptor setDataBlockEncoding(
          +      @org.apache.thrift.annotation.Nullable TDataBlockEncoding dataBlockEncoding) {
               this.dataBlockEncoding = dataBlockEncoding;
               return this;
             }
          @@ -610,7 +707,9 @@ public void unsetDataBlockEncoding() {
               this.dataBlockEncoding = null;
             }
           
          -  /** Returns true if field dataBlockEncoding is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field dataBlockEncoding is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetDataBlockEncoding() {
               return this.dataBlockEncoding != null;
             }
          @@ -622,7 +721,6 @@ public void setDataBlockEncodingIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TKeepDeletedCells
              */
             @org.apache.thrift.annotation.Nullable
          @@ -631,10 +729,10 @@ public TKeepDeletedCells getKeepDeletedCells() {
             }
           
             /**
          -   * 
              * @see TKeepDeletedCells
              */
          -  public TColumnFamilyDescriptor setKeepDeletedCells(@org.apache.thrift.annotation.Nullable TKeepDeletedCells keepDeletedCells) {
          +  public TColumnFamilyDescriptor setKeepDeletedCells(
          +      @org.apache.thrift.annotation.Nullable TKeepDeletedCells keepDeletedCells) {
               this.keepDeletedCells = keepDeletedCells;
               return this;
             }
          @@ -643,7 +741,9 @@ public void unsetKeepDeletedCells() {
               this.keepDeletedCells = null;
             }
           
          -  /** Returns true if field keepDeletedCells is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field keepDeletedCells is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetKeepDeletedCells() {
               return this.keepDeletedCells != null;
             }
          @@ -665,7 +765,8 @@ public TColumnFamilyDescriptor setMaxVersions(int maxVersions) {
             }
           
             public void unsetMaxVersions() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID);
             }
           
             /** Returns true if field maxVersions is set (has been assigned a value) and false otherwise */
          @@ -674,7 +775,8 @@ public boolean isSetMaxVersions() {
             }
           
             public void setMaxVersionsIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID, value);
             }
           
             public int getMinVersions() {
          @@ -688,7 +790,8 @@ public TColumnFamilyDescriptor setMinVersions(int minVersions) {
             }
           
             public void unsetMinVersions() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MINVERSIONS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MINVERSIONS_ISSET_ID);
             }
           
             /** Returns true if field minVersions is set (has been assigned a value) and false otherwise */
          @@ -697,7 +800,8 @@ public boolean isSetMinVersions() {
             }
           
             public void setMinVersionsIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MINVERSIONS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MINVERSIONS_ISSET_ID, value);
             }
           
             public int getScope() {
          @@ -720,7 +824,8 @@ public boolean isSetScope() {
             }
           
             public void setScopeIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SCOPE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SCOPE_ISSET_ID, value);
             }
           
             public int getTimeToLive() {
          @@ -734,7 +839,8 @@ public TColumnFamilyDescriptor setTimeToLive(int timeToLive) {
             }
           
             public void unsetTimeToLive() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMETOLIVE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMETOLIVE_ISSET_ID);
             }
           
             /** Returns true if field timeToLive is set (has been assigned a value) and false otherwise */
          @@ -743,7 +849,8 @@ public boolean isSetTimeToLive() {
             }
           
             public void setTimeToLiveIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMETOLIVE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMETOLIVE_ISSET_ID, value);
             }
           
             public boolean isBlockCacheEnabled() {
          @@ -757,16 +864,20 @@ public TColumnFamilyDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
             }
           
             public void unsetBlockCacheEnabled() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BLOCKCACHEENABLED_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BLOCKCACHEENABLED_ISSET_ID);
             }
           
          -  /** Returns true if field blockCacheEnabled is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field blockCacheEnabled is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetBlockCacheEnabled() {
               return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __BLOCKCACHEENABLED_ISSET_ID);
             }
           
             public void setBlockCacheEnabledIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __BLOCKCACHEENABLED_ISSET_ID, value);
          +    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +      __BLOCKCACHEENABLED_ISSET_ID, value);
             }
           
             public boolean isCacheBloomsOnWrite() {
          @@ -780,16 +891,20 @@ public TColumnFamilyDescriptor setCacheBloomsOnWrite(boolean cacheBloomsOnWrite)
             }
           
             public void unsetCacheBloomsOnWrite() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEBLOOMSONWRITE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEBLOOMSONWRITE_ISSET_ID);
             }
           
          -  /** Returns true if field cacheBloomsOnWrite is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field cacheBloomsOnWrite is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetCacheBloomsOnWrite() {
               return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __CACHEBLOOMSONWRITE_ISSET_ID);
             }
           
             public void setCacheBloomsOnWriteIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHEBLOOMSONWRITE_ISSET_ID, value);
          +    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +      __CACHEBLOOMSONWRITE_ISSET_ID, value);
             }
           
             public boolean isCacheDataOnWrite() {
          @@ -803,16 +918,20 @@ public TColumnFamilyDescriptor setCacheDataOnWrite(boolean cacheDataOnWrite) {
             }
           
             public void unsetCacheDataOnWrite() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEDATAONWRITE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEDATAONWRITE_ISSET_ID);
             }
           
          -  /** Returns true if field cacheDataOnWrite is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field cacheDataOnWrite is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetCacheDataOnWrite() {
               return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __CACHEDATAONWRITE_ISSET_ID);
             }
           
             public void setCacheDataOnWriteIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHEDATAONWRITE_ISSET_ID, value);
          +    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +      __CACHEDATAONWRITE_ISSET_ID, value);
             }
           
             public boolean isCacheIndexesOnWrite() {
          @@ -826,16 +945,22 @@ public TColumnFamilyDescriptor setCacheIndexesOnWrite(boolean cacheIndexesOnWrit
             }
           
             public void unsetCacheIndexesOnWrite() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEINDEXESONWRITE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEINDEXESONWRITE_ISSET_ID);
             }
           
          -  /** Returns true if field cacheIndexesOnWrite is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field cacheIndexesOnWrite is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSetCacheIndexesOnWrite() {
          -    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __CACHEINDEXESONWRITE_ISSET_ID);
          +    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield,
          +      __CACHEINDEXESONWRITE_ISSET_ID);
             }
           
             public void setCacheIndexesOnWriteIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHEINDEXESONWRITE_ISSET_ID, value);
          +    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +      __CACHEINDEXESONWRITE_ISSET_ID, value);
             }
           
             public boolean isCompressTags() {
          @@ -849,7 +974,8 @@ public TColumnFamilyDescriptor setCompressTags(boolean compressTags) {
             }
           
             public void unsetCompressTags() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __COMPRESSTAGS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __COMPRESSTAGS_ISSET_ID);
             }
           
             /** Returns true if field compressTags is set (has been assigned a value) and false otherwise */
          @@ -858,7 +984,8 @@ public boolean isSetCompressTags() {
             }
           
             public void setCompressTagsIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __COMPRESSTAGS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __COMPRESSTAGS_ISSET_ID, value);
             }
           
             public boolean isEvictBlocksOnClose() {
          @@ -872,16 +999,20 @@ public TColumnFamilyDescriptor setEvictBlocksOnClose(boolean evictBlocksOnClose)
             }
           
             public void unsetEvictBlocksOnClose() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __EVICTBLOCKSONCLOSE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __EVICTBLOCKSONCLOSE_ISSET_ID);
             }
           
          -  /** Returns true if field evictBlocksOnClose is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field evictBlocksOnClose is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetEvictBlocksOnClose() {
               return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __EVICTBLOCKSONCLOSE_ISSET_ID);
             }
           
             public void setEvictBlocksOnCloseIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __EVICTBLOCKSONCLOSE_ISSET_ID, value);
          +    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +      __EVICTBLOCKSONCLOSE_ISSET_ID, value);
             }
           
             public boolean isInMemory() {
          @@ -895,7 +1026,8 @@ public TColumnFamilyDescriptor setInMemory(boolean inMemory) {
             }
           
             public void unsetInMemory() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __INMEMORY_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __INMEMORY_ISSET_ID);
             }
           
             /** Returns true if field inMemory is set (has been assigned a value) and false otherwise */
          @@ -904,174 +1036,176 @@ public boolean isSetInMemory() {
             }
           
             public void setInMemoryIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __INMEMORY_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __INMEMORY_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case NAME:
          -      if (value == null) {
          -        unsetName();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setName((byte[])value);
          +      case NAME:
          +        if (value == null) {
          +          unsetName();
                   } else {
          -          setName((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setName((byte[]) value);
          +          } else {
          +            setName((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case ATTRIBUTES:
          -      if (value == null) {
          -        unsetAttributes();
          -      } else {
          -        setAttributes((java.util.Map)value);
          -      }
          -      break;
          +      case ATTRIBUTES:
          +        if (value == null) {
          +          unsetAttributes();
          +        } else {
          +          setAttributes((java.util.Map) value);
          +        }
          +        break;
           
          -    case CONFIGURATION:
          -      if (value == null) {
          -        unsetConfiguration();
          -      } else {
          -        setConfiguration((java.util.Map)value);
          -      }
          -      break;
          +      case CONFIGURATION:
          +        if (value == null) {
          +          unsetConfiguration();
          +        } else {
          +          setConfiguration((java.util.Map) value);
          +        }
          +        break;
           
          -    case BLOCK_SIZE:
          -      if (value == null) {
          -        unsetBlockSize();
          -      } else {
          -        setBlockSize((java.lang.Integer)value);
          -      }
          -      break;
          +      case BLOCK_SIZE:
          +        if (value == null) {
          +          unsetBlockSize();
          +        } else {
          +          setBlockSize((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case BLOOMN_FILTER_TYPE:
          -      if (value == null) {
          -        unsetBloomnFilterType();
          -      } else {
          -        setBloomnFilterType((TBloomFilterType)value);
          -      }
          -      break;
          +      case BLOOMN_FILTER_TYPE:
          +        if (value == null) {
          +          unsetBloomnFilterType();
          +        } else {
          +          setBloomnFilterType((TBloomFilterType) value);
          +        }
          +        break;
           
          -    case COMPRESSION_TYPE:
          -      if (value == null) {
          -        unsetCompressionType();
          -      } else {
          -        setCompressionType((TCompressionAlgorithm)value);
          -      }
          -      break;
          +      case COMPRESSION_TYPE:
          +        if (value == null) {
          +          unsetCompressionType();
          +        } else {
          +          setCompressionType((TCompressionAlgorithm) value);
          +        }
          +        break;
           
          -    case DFS_REPLICATION:
          -      if (value == null) {
          -        unsetDfsReplication();
          -      } else {
          -        setDfsReplication((java.lang.Short)value);
          -      }
          -      break;
          +      case DFS_REPLICATION:
          +        if (value == null) {
          +          unsetDfsReplication();
          +        } else {
          +          setDfsReplication((java.lang.Short) value);
          +        }
          +        break;
           
          -    case DATA_BLOCK_ENCODING:
          -      if (value == null) {
          -        unsetDataBlockEncoding();
          -      } else {
          -        setDataBlockEncoding((TDataBlockEncoding)value);
          -      }
          -      break;
          +      case DATA_BLOCK_ENCODING:
          +        if (value == null) {
          +          unsetDataBlockEncoding();
          +        } else {
          +          setDataBlockEncoding((TDataBlockEncoding) value);
          +        }
          +        break;
           
          -    case KEEP_DELETED_CELLS:
          -      if (value == null) {
          -        unsetKeepDeletedCells();
          -      } else {
          -        setKeepDeletedCells((TKeepDeletedCells)value);
          -      }
          -      break;
          +      case KEEP_DELETED_CELLS:
          +        if (value == null) {
          +          unsetKeepDeletedCells();
          +        } else {
          +          setKeepDeletedCells((TKeepDeletedCells) value);
          +        }
          +        break;
           
          -    case MAX_VERSIONS:
          -      if (value == null) {
          -        unsetMaxVersions();
          -      } else {
          -        setMaxVersions((java.lang.Integer)value);
          -      }
          -      break;
          +      case MAX_VERSIONS:
          +        if (value == null) {
          +          unsetMaxVersions();
          +        } else {
          +          setMaxVersions((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case MIN_VERSIONS:
          -      if (value == null) {
          -        unsetMinVersions();
          -      } else {
          -        setMinVersions((java.lang.Integer)value);
          -      }
          -      break;
          +      case MIN_VERSIONS:
          +        if (value == null) {
          +          unsetMinVersions();
          +        } else {
          +          setMinVersions((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case SCOPE:
          -      if (value == null) {
          -        unsetScope();
          -      } else {
          -        setScope((java.lang.Integer)value);
          -      }
          -      break;
          +      case SCOPE:
          +        if (value == null) {
          +          unsetScope();
          +        } else {
          +          setScope((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case TIME_TO_LIVE:
          -      if (value == null) {
          -        unsetTimeToLive();
          -      } else {
          -        setTimeToLive((java.lang.Integer)value);
          -      }
          -      break;
          +      case TIME_TO_LIVE:
          +        if (value == null) {
          +          unsetTimeToLive();
          +        } else {
          +          setTimeToLive((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case BLOCK_CACHE_ENABLED:
          -      if (value == null) {
          -        unsetBlockCacheEnabled();
          -      } else {
          -        setBlockCacheEnabled((java.lang.Boolean)value);
          -      }
          -      break;
          +      case BLOCK_CACHE_ENABLED:
          +        if (value == null) {
          +          unsetBlockCacheEnabled();
          +        } else {
          +          setBlockCacheEnabled((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case CACHE_BLOOMS_ON_WRITE:
          -      if (value == null) {
          -        unsetCacheBloomsOnWrite();
          -      } else {
          -        setCacheBloomsOnWrite((java.lang.Boolean)value);
          -      }
          -      break;
          +      case CACHE_BLOOMS_ON_WRITE:
          +        if (value == null) {
          +          unsetCacheBloomsOnWrite();
          +        } else {
          +          setCacheBloomsOnWrite((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case CACHE_DATA_ON_WRITE:
          -      if (value == null) {
          -        unsetCacheDataOnWrite();
          -      } else {
          -        setCacheDataOnWrite((java.lang.Boolean)value);
          -      }
          -      break;
          +      case CACHE_DATA_ON_WRITE:
          +        if (value == null) {
          +          unsetCacheDataOnWrite();
          +        } else {
          +          setCacheDataOnWrite((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case CACHE_INDEXES_ON_WRITE:
          -      if (value == null) {
          -        unsetCacheIndexesOnWrite();
          -      } else {
          -        setCacheIndexesOnWrite((java.lang.Boolean)value);
          -      }
          -      break;
          +      case CACHE_INDEXES_ON_WRITE:
          +        if (value == null) {
          +          unsetCacheIndexesOnWrite();
          +        } else {
          +          setCacheIndexesOnWrite((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case COMPRESS_TAGS:
          -      if (value == null) {
          -        unsetCompressTags();
          -      } else {
          -        setCompressTags((java.lang.Boolean)value);
          -      }
          -      break;
          +      case COMPRESS_TAGS:
          +        if (value == null) {
          +          unsetCompressTags();
          +        } else {
          +          setCompressTags((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case EVICT_BLOCKS_ON_CLOSE:
          -      if (value == null) {
          -        unsetEvictBlocksOnClose();
          -      } else {
          -        setEvictBlocksOnClose((java.lang.Boolean)value);
          -      }
          -      break;
          +      case EVICT_BLOCKS_ON_CLOSE:
          +        if (value == null) {
          +          unsetEvictBlocksOnClose();
          +        } else {
          +          setEvictBlocksOnClose((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case IN_MEMORY:
          -      if (value == null) {
          -        unsetInMemory();
          -      } else {
          -        setInMemory((java.lang.Boolean)value);
          -      }
          -      break;
          +      case IN_MEMORY:
          +        if (value == null) {
          +          unsetInMemory();
          +        } else {
          +          setInMemory((java.lang.Boolean) value);
          +        }
          +        break;
           
               }
             }
          @@ -1079,312 +1213,272 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case NAME:
          -      return getName();
          +      case NAME:
          +        return getName();
           
          -    case ATTRIBUTES:
          -      return getAttributes();
          +      case ATTRIBUTES:
          +        return getAttributes();
           
          -    case CONFIGURATION:
          -      return getConfiguration();
          +      case CONFIGURATION:
          +        return getConfiguration();
           
          -    case BLOCK_SIZE:
          -      return getBlockSize();
          +      case BLOCK_SIZE:
          +        return getBlockSize();
           
          -    case BLOOMN_FILTER_TYPE:
          -      return getBloomnFilterType();
          +      case BLOOMN_FILTER_TYPE:
          +        return getBloomnFilterType();
           
          -    case COMPRESSION_TYPE:
          -      return getCompressionType();
          +      case COMPRESSION_TYPE:
          +        return getCompressionType();
           
          -    case DFS_REPLICATION:
          -      return getDfsReplication();
          +      case DFS_REPLICATION:
          +        return getDfsReplication();
           
          -    case DATA_BLOCK_ENCODING:
          -      return getDataBlockEncoding();
          +      case DATA_BLOCK_ENCODING:
          +        return getDataBlockEncoding();
           
          -    case KEEP_DELETED_CELLS:
          -      return getKeepDeletedCells();
          +      case KEEP_DELETED_CELLS:
          +        return getKeepDeletedCells();
           
          -    case MAX_VERSIONS:
          -      return getMaxVersions();
          +      case MAX_VERSIONS:
          +        return getMaxVersions();
           
          -    case MIN_VERSIONS:
          -      return getMinVersions();
          +      case MIN_VERSIONS:
          +        return getMinVersions();
           
          -    case SCOPE:
          -      return getScope();
          +      case SCOPE:
          +        return getScope();
           
          -    case TIME_TO_LIVE:
          -      return getTimeToLive();
          +      case TIME_TO_LIVE:
          +        return getTimeToLive();
           
          -    case BLOCK_CACHE_ENABLED:
          -      return isBlockCacheEnabled();
          +      case BLOCK_CACHE_ENABLED:
          +        return isBlockCacheEnabled();
           
          -    case CACHE_BLOOMS_ON_WRITE:
          -      return isCacheBloomsOnWrite();
          +      case CACHE_BLOOMS_ON_WRITE:
          +        return isCacheBloomsOnWrite();
           
          -    case CACHE_DATA_ON_WRITE:
          -      return isCacheDataOnWrite();
          +      case CACHE_DATA_ON_WRITE:
          +        return isCacheDataOnWrite();
           
          -    case CACHE_INDEXES_ON_WRITE:
          -      return isCacheIndexesOnWrite();
          +      case CACHE_INDEXES_ON_WRITE:
          +        return isCacheIndexesOnWrite();
           
          -    case COMPRESS_TAGS:
          -      return isCompressTags();
          +      case COMPRESS_TAGS:
          +        return isCompressTags();
           
          -    case EVICT_BLOCKS_ON_CLOSE:
          -      return isEvictBlocksOnClose();
          +      case EVICT_BLOCKS_ON_CLOSE:
          +        return isEvictBlocksOnClose();
           
          -    case IN_MEMORY:
          -      return isInMemory();
          +      case IN_MEMORY:
          +        return isInMemory();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case NAME:
          -      return isSetName();
          -    case ATTRIBUTES:
          -      return isSetAttributes();
          -    case CONFIGURATION:
          -      return isSetConfiguration();
          -    case BLOCK_SIZE:
          -      return isSetBlockSize();
          -    case BLOOMN_FILTER_TYPE:
          -      return isSetBloomnFilterType();
          -    case COMPRESSION_TYPE:
          -      return isSetCompressionType();
          -    case DFS_REPLICATION:
          -      return isSetDfsReplication();
          -    case DATA_BLOCK_ENCODING:
          -      return isSetDataBlockEncoding();
          -    case KEEP_DELETED_CELLS:
          -      return isSetKeepDeletedCells();
          -    case MAX_VERSIONS:
          -      return isSetMaxVersions();
          -    case MIN_VERSIONS:
          -      return isSetMinVersions();
          -    case SCOPE:
          -      return isSetScope();
          -    case TIME_TO_LIVE:
          -      return isSetTimeToLive();
          -    case BLOCK_CACHE_ENABLED:
          -      return isSetBlockCacheEnabled();
          -    case CACHE_BLOOMS_ON_WRITE:
          -      return isSetCacheBloomsOnWrite();
          -    case CACHE_DATA_ON_WRITE:
          -      return isSetCacheDataOnWrite();
          -    case CACHE_INDEXES_ON_WRITE:
          -      return isSetCacheIndexesOnWrite();
          -    case COMPRESS_TAGS:
          -      return isSetCompressTags();
          -    case EVICT_BLOCKS_ON_CLOSE:
          -      return isSetEvictBlocksOnClose();
          -    case IN_MEMORY:
          -      return isSetInMemory();
          +      case NAME:
          +        return isSetName();
          +      case ATTRIBUTES:
          +        return isSetAttributes();
          +      case CONFIGURATION:
          +        return isSetConfiguration();
          +      case BLOCK_SIZE:
          +        return isSetBlockSize();
          +      case BLOOMN_FILTER_TYPE:
          +        return isSetBloomnFilterType();
          +      case COMPRESSION_TYPE:
          +        return isSetCompressionType();
          +      case DFS_REPLICATION:
          +        return isSetDfsReplication();
          +      case DATA_BLOCK_ENCODING:
          +        return isSetDataBlockEncoding();
          +      case KEEP_DELETED_CELLS:
          +        return isSetKeepDeletedCells();
          +      case MAX_VERSIONS:
          +        return isSetMaxVersions();
          +      case MIN_VERSIONS:
          +        return isSetMinVersions();
          +      case SCOPE:
          +        return isSetScope();
          +      case TIME_TO_LIVE:
          +        return isSetTimeToLive();
          +      case BLOCK_CACHE_ENABLED:
          +        return isSetBlockCacheEnabled();
          +      case CACHE_BLOOMS_ON_WRITE:
          +        return isSetCacheBloomsOnWrite();
          +      case CACHE_DATA_ON_WRITE:
          +        return isSetCacheDataOnWrite();
          +      case CACHE_INDEXES_ON_WRITE:
          +        return isSetCacheIndexesOnWrite();
          +      case COMPRESS_TAGS:
          +        return isSetCompressTags();
          +      case EVICT_BLOCKS_ON_CLOSE:
          +        return isSetEvictBlocksOnClose();
          +      case IN_MEMORY:
          +        return isSetInMemory();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TColumnFamilyDescriptor)
          -      return this.equals((TColumnFamilyDescriptor)that);
          +    if (that instanceof TColumnFamilyDescriptor) return this.equals((TColumnFamilyDescriptor) that);
               return false;
             }
           
             public boolean equals(TColumnFamilyDescriptor that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_name = true && this.isSetName();
               boolean that_present_name = true && that.isSetName();
               if (this_present_name || that_present_name) {
          -      if (!(this_present_name && that_present_name))
          -        return false;
          -      if (!this.name.equals(that.name))
          -        return false;
          +      if (!(this_present_name && that_present_name)) return false;
          +      if (!this.name.equals(that.name)) return false;
               }
           
               boolean this_present_attributes = true && this.isSetAttributes();
               boolean that_present_attributes = true && that.isSetAttributes();
               if (this_present_attributes || that_present_attributes) {
          -      if (!(this_present_attributes && that_present_attributes))
          -        return false;
          -      if (!this.attributes.equals(that.attributes))
          -        return false;
          +      if (!(this_present_attributes && that_present_attributes)) return false;
          +      if (!this.attributes.equals(that.attributes)) return false;
               }
           
               boolean this_present_configuration = true && this.isSetConfiguration();
               boolean that_present_configuration = true && that.isSetConfiguration();
               if (this_present_configuration || that_present_configuration) {
          -      if (!(this_present_configuration && that_present_configuration))
          -        return false;
          -      if (!this.configuration.equals(that.configuration))
          -        return false;
          +      if (!(this_present_configuration && that_present_configuration)) return false;
          +      if (!this.configuration.equals(that.configuration)) return false;
               }
           
               boolean this_present_blockSize = true && this.isSetBlockSize();
               boolean that_present_blockSize = true && that.isSetBlockSize();
               if (this_present_blockSize || that_present_blockSize) {
          -      if (!(this_present_blockSize && that_present_blockSize))
          -        return false;
          -      if (this.blockSize != that.blockSize)
          -        return false;
          +      if (!(this_present_blockSize && that_present_blockSize)) return false;
          +      if (this.blockSize != that.blockSize) return false;
               }
           
               boolean this_present_bloomnFilterType = true && this.isSetBloomnFilterType();
               boolean that_present_bloomnFilterType = true && that.isSetBloomnFilterType();
               if (this_present_bloomnFilterType || that_present_bloomnFilterType) {
          -      if (!(this_present_bloomnFilterType && that_present_bloomnFilterType))
          -        return false;
          -      if (!this.bloomnFilterType.equals(that.bloomnFilterType))
          -        return false;
          +      if (!(this_present_bloomnFilterType && that_present_bloomnFilterType)) return false;
          +      if (!this.bloomnFilterType.equals(that.bloomnFilterType)) return false;
               }
           
               boolean this_present_compressionType = true && this.isSetCompressionType();
               boolean that_present_compressionType = true && that.isSetCompressionType();
               if (this_present_compressionType || that_present_compressionType) {
          -      if (!(this_present_compressionType && that_present_compressionType))
          -        return false;
          -      if (!this.compressionType.equals(that.compressionType))
          -        return false;
          +      if (!(this_present_compressionType && that_present_compressionType)) return false;
          +      if (!this.compressionType.equals(that.compressionType)) return false;
               }
           
               boolean this_present_dfsReplication = true && this.isSetDfsReplication();
               boolean that_present_dfsReplication = true && that.isSetDfsReplication();
               if (this_present_dfsReplication || that_present_dfsReplication) {
          -      if (!(this_present_dfsReplication && that_present_dfsReplication))
          -        return false;
          -      if (this.dfsReplication != that.dfsReplication)
          -        return false;
          +      if (!(this_present_dfsReplication && that_present_dfsReplication)) return false;
          +      if (this.dfsReplication != that.dfsReplication) return false;
               }
           
               boolean this_present_dataBlockEncoding = true && this.isSetDataBlockEncoding();
               boolean that_present_dataBlockEncoding = true && that.isSetDataBlockEncoding();
               if (this_present_dataBlockEncoding || that_present_dataBlockEncoding) {
          -      if (!(this_present_dataBlockEncoding && that_present_dataBlockEncoding))
          -        return false;
          -      if (!this.dataBlockEncoding.equals(that.dataBlockEncoding))
          -        return false;
          +      if (!(this_present_dataBlockEncoding && that_present_dataBlockEncoding)) return false;
          +      if (!this.dataBlockEncoding.equals(that.dataBlockEncoding)) return false;
               }
           
               boolean this_present_keepDeletedCells = true && this.isSetKeepDeletedCells();
               boolean that_present_keepDeletedCells = true && that.isSetKeepDeletedCells();
               if (this_present_keepDeletedCells || that_present_keepDeletedCells) {
          -      if (!(this_present_keepDeletedCells && that_present_keepDeletedCells))
          -        return false;
          -      if (!this.keepDeletedCells.equals(that.keepDeletedCells))
          -        return false;
          +      if (!(this_present_keepDeletedCells && that_present_keepDeletedCells)) return false;
          +      if (!this.keepDeletedCells.equals(that.keepDeletedCells)) return false;
               }
           
               boolean this_present_maxVersions = true && this.isSetMaxVersions();
               boolean that_present_maxVersions = true && that.isSetMaxVersions();
               if (this_present_maxVersions || that_present_maxVersions) {
          -      if (!(this_present_maxVersions && that_present_maxVersions))
          -        return false;
          -      if (this.maxVersions != that.maxVersions)
          -        return false;
          +      if (!(this_present_maxVersions && that_present_maxVersions)) return false;
          +      if (this.maxVersions != that.maxVersions) return false;
               }
           
               boolean this_present_minVersions = true && this.isSetMinVersions();
               boolean that_present_minVersions = true && that.isSetMinVersions();
               if (this_present_minVersions || that_present_minVersions) {
          -      if (!(this_present_minVersions && that_present_minVersions))
          -        return false;
          -      if (this.minVersions != that.minVersions)
          -        return false;
          +      if (!(this_present_minVersions && that_present_minVersions)) return false;
          +      if (this.minVersions != that.minVersions) return false;
               }
           
               boolean this_present_scope = true && this.isSetScope();
               boolean that_present_scope = true && that.isSetScope();
               if (this_present_scope || that_present_scope) {
          -      if (!(this_present_scope && that_present_scope))
          -        return false;
          -      if (this.scope != that.scope)
          -        return false;
          +      if (!(this_present_scope && that_present_scope)) return false;
          +      if (this.scope != that.scope) return false;
               }
           
               boolean this_present_timeToLive = true && this.isSetTimeToLive();
               boolean that_present_timeToLive = true && that.isSetTimeToLive();
               if (this_present_timeToLive || that_present_timeToLive) {
          -      if (!(this_present_timeToLive && that_present_timeToLive))
          -        return false;
          -      if (this.timeToLive != that.timeToLive)
          -        return false;
          +      if (!(this_present_timeToLive && that_present_timeToLive)) return false;
          +      if (this.timeToLive != that.timeToLive) return false;
               }
           
               boolean this_present_blockCacheEnabled = true && this.isSetBlockCacheEnabled();
               boolean that_present_blockCacheEnabled = true && that.isSetBlockCacheEnabled();
               if (this_present_blockCacheEnabled || that_present_blockCacheEnabled) {
          -      if (!(this_present_blockCacheEnabled && that_present_blockCacheEnabled))
          -        return false;
          -      if (this.blockCacheEnabled != that.blockCacheEnabled)
          -        return false;
          +      if (!(this_present_blockCacheEnabled && that_present_blockCacheEnabled)) return false;
          +      if (this.blockCacheEnabled != that.blockCacheEnabled) return false;
               }
           
               boolean this_present_cacheBloomsOnWrite = true && this.isSetCacheBloomsOnWrite();
               boolean that_present_cacheBloomsOnWrite = true && that.isSetCacheBloomsOnWrite();
               if (this_present_cacheBloomsOnWrite || that_present_cacheBloomsOnWrite) {
          -      if (!(this_present_cacheBloomsOnWrite && that_present_cacheBloomsOnWrite))
          -        return false;
          -      if (this.cacheBloomsOnWrite != that.cacheBloomsOnWrite)
          -        return false;
          +      if (!(this_present_cacheBloomsOnWrite && that_present_cacheBloomsOnWrite)) return false;
          +      if (this.cacheBloomsOnWrite != that.cacheBloomsOnWrite) return false;
               }
           
               boolean this_present_cacheDataOnWrite = true && this.isSetCacheDataOnWrite();
               boolean that_present_cacheDataOnWrite = true && that.isSetCacheDataOnWrite();
               if (this_present_cacheDataOnWrite || that_present_cacheDataOnWrite) {
          -      if (!(this_present_cacheDataOnWrite && that_present_cacheDataOnWrite))
          -        return false;
          -      if (this.cacheDataOnWrite != that.cacheDataOnWrite)
          -        return false;
          +      if (!(this_present_cacheDataOnWrite && that_present_cacheDataOnWrite)) return false;
          +      if (this.cacheDataOnWrite != that.cacheDataOnWrite) return false;
               }
           
               boolean this_present_cacheIndexesOnWrite = true && this.isSetCacheIndexesOnWrite();
               boolean that_present_cacheIndexesOnWrite = true && that.isSetCacheIndexesOnWrite();
               if (this_present_cacheIndexesOnWrite || that_present_cacheIndexesOnWrite) {
          -      if (!(this_present_cacheIndexesOnWrite && that_present_cacheIndexesOnWrite))
          -        return false;
          -      if (this.cacheIndexesOnWrite != that.cacheIndexesOnWrite)
          -        return false;
          +      if (!(this_present_cacheIndexesOnWrite && that_present_cacheIndexesOnWrite)) return false;
          +      if (this.cacheIndexesOnWrite != that.cacheIndexesOnWrite) return false;
               }
           
               boolean this_present_compressTags = true && this.isSetCompressTags();
               boolean that_present_compressTags = true && that.isSetCompressTags();
               if (this_present_compressTags || that_present_compressTags) {
          -      if (!(this_present_compressTags && that_present_compressTags))
          -        return false;
          -      if (this.compressTags != that.compressTags)
          -        return false;
          +      if (!(this_present_compressTags && that_present_compressTags)) return false;
          +      if (this.compressTags != that.compressTags) return false;
               }
           
               boolean this_present_evictBlocksOnClose = true && this.isSetEvictBlocksOnClose();
               boolean that_present_evictBlocksOnClose = true && that.isSetEvictBlocksOnClose();
               if (this_present_evictBlocksOnClose || that_present_evictBlocksOnClose) {
          -      if (!(this_present_evictBlocksOnClose && that_present_evictBlocksOnClose))
          -        return false;
          -      if (this.evictBlocksOnClose != that.evictBlocksOnClose)
          -        return false;
          +      if (!(this_present_evictBlocksOnClose && that_present_evictBlocksOnClose)) return false;
          +      if (this.evictBlocksOnClose != that.evictBlocksOnClose) return false;
               }
           
               boolean this_present_inMemory = true && this.isSetInMemory();
               boolean that_present_inMemory = true && that.isSetInMemory();
               if (this_present_inMemory || that_present_inMemory) {
          -      if (!(this_present_inMemory && that_present_inMemory))
          -        return false;
          -      if (this.inMemory != that.inMemory)
          -        return false;
          +      if (!(this_present_inMemory && that_present_inMemory)) return false;
          +      if (this.inMemory != that.inMemory) return false;
               }
           
               return true;
          @@ -1395,56 +1489,43 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287);
          -    if (isSetName())
          -      hashCode = hashCode * 8191 + name.hashCode();
          +    if (isSetName()) hashCode = hashCode * 8191 + name.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -    if (isSetAttributes())
          -      hashCode = hashCode * 8191 + attributes.hashCode();
          +    if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetConfiguration()) ? 131071 : 524287);
          -    if (isSetConfiguration())
          -      hashCode = hashCode * 8191 + configuration.hashCode();
          +    if (isSetConfiguration()) hashCode = hashCode * 8191 + configuration.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetBlockSize()) ? 131071 : 524287);
          -    if (isSetBlockSize())
          -      hashCode = hashCode * 8191 + blockSize;
          +    if (isSetBlockSize()) hashCode = hashCode * 8191 + blockSize;
           
               hashCode = hashCode * 8191 + ((isSetBloomnFilterType()) ? 131071 : 524287);
          -    if (isSetBloomnFilterType())
          -      hashCode = hashCode * 8191 + bloomnFilterType.getValue();
          +    if (isSetBloomnFilterType()) hashCode = hashCode * 8191 + bloomnFilterType.getValue();
           
               hashCode = hashCode * 8191 + ((isSetCompressionType()) ? 131071 : 524287);
          -    if (isSetCompressionType())
          -      hashCode = hashCode * 8191 + compressionType.getValue();
          +    if (isSetCompressionType()) hashCode = hashCode * 8191 + compressionType.getValue();
           
               hashCode = hashCode * 8191 + ((isSetDfsReplication()) ? 131071 : 524287);
          -    if (isSetDfsReplication())
          -      hashCode = hashCode * 8191 + dfsReplication;
          +    if (isSetDfsReplication()) hashCode = hashCode * 8191 + dfsReplication;
           
               hashCode = hashCode * 8191 + ((isSetDataBlockEncoding()) ? 131071 : 524287);
          -    if (isSetDataBlockEncoding())
          -      hashCode = hashCode * 8191 + dataBlockEncoding.getValue();
          +    if (isSetDataBlockEncoding()) hashCode = hashCode * 8191 + dataBlockEncoding.getValue();
           
               hashCode = hashCode * 8191 + ((isSetKeepDeletedCells()) ? 131071 : 524287);
          -    if (isSetKeepDeletedCells())
          -      hashCode = hashCode * 8191 + keepDeletedCells.getValue();
          +    if (isSetKeepDeletedCells()) hashCode = hashCode * 8191 + keepDeletedCells.getValue();
           
               hashCode = hashCode * 8191 + ((isSetMaxVersions()) ? 131071 : 524287);
          -    if (isSetMaxVersions())
          -      hashCode = hashCode * 8191 + maxVersions;
          +    if (isSetMaxVersions()) hashCode = hashCode * 8191 + maxVersions;
           
               hashCode = hashCode * 8191 + ((isSetMinVersions()) ? 131071 : 524287);
          -    if (isSetMinVersions())
          -      hashCode = hashCode * 8191 + minVersions;
          +    if (isSetMinVersions()) hashCode = hashCode * 8191 + minVersions;
           
               hashCode = hashCode * 8191 + ((isSetScope()) ? 131071 : 524287);
          -    if (isSetScope())
          -      hashCode = hashCode * 8191 + scope;
          +    if (isSetScope()) hashCode = hashCode * 8191 + scope;
           
               hashCode = hashCode * 8191 + ((isSetTimeToLive()) ? 131071 : 524287);
          -    if (isSetTimeToLive())
          -      hashCode = hashCode * 8191 + timeToLive;
          +    if (isSetTimeToLive()) hashCode = hashCode * 8191 + timeToLive;
           
               hashCode = hashCode * 8191 + ((isSetBlockCacheEnabled()) ? 131071 : 524287);
               if (isSetBlockCacheEnabled())
          @@ -1463,16 +1544,14 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((cacheIndexesOnWrite) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetCompressTags()) ? 131071 : 524287);
          -    if (isSetCompressTags())
          -      hashCode = hashCode * 8191 + ((compressTags) ? 131071 : 524287);
          +    if (isSetCompressTags()) hashCode = hashCode * 8191 + ((compressTags) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetEvictBlocksOnClose()) ? 131071 : 524287);
               if (isSetEvictBlocksOnClose())
                 hashCode = hashCode * 8191 + ((evictBlocksOnClose) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetInMemory()) ? 131071 : 524287);
          -    if (isSetInMemory())
          -      hashCode = hashCode * 8191 + ((inMemory) ? 131071 : 524287);
          +    if (isSetInMemory()) hashCode = hashCode * 8191 + ((inMemory) ? 131071 : 524287);
           
               return hashCode;
             }
          @@ -1510,7 +1589,8 @@ public int compareTo(TColumnFamilyDescriptor other) {
                 return lastComparison;
               }
               if (isSetConfiguration()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.configuration, other.configuration);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.configuration, other.configuration);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1525,22 +1605,26 @@ public int compareTo(TColumnFamilyDescriptor other) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetBloomnFilterType(), other.isSetBloomnFilterType());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetBloomnFilterType(), other.isSetBloomnFilterType());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetBloomnFilterType()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bloomnFilterType, other.bloomnFilterType);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.bloomnFilterType, other.bloomnFilterType);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetCompressionType(), other.isSetCompressionType());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetCompressionType(), other.isSetCompressionType());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetCompressionType()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compressionType, other.compressionType);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.compressionType, other.compressionType);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1550,27 +1634,32 @@ public int compareTo(TColumnFamilyDescriptor other) {
                 return lastComparison;
               }
               if (isSetDfsReplication()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dfsReplication, other.dfsReplication);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.dfsReplication, other.dfsReplication);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetDataBlockEncoding(), other.isSetDataBlockEncoding());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetDataBlockEncoding(), other.isSetDataBlockEncoding());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetDataBlockEncoding()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dataBlockEncoding, other.dataBlockEncoding);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.dataBlockEncoding, other.dataBlockEncoding);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetKeepDeletedCells(), other.isSetKeepDeletedCells());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetKeepDeletedCells(), other.isSetKeepDeletedCells());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetKeepDeletedCells()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keepDeletedCells, other.keepDeletedCells);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.keepDeletedCells, other.keepDeletedCells);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1615,42 +1704,50 @@ public int compareTo(TColumnFamilyDescriptor other) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetBlockCacheEnabled(), other.isSetBlockCacheEnabled());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetBlockCacheEnabled(), other.isSetBlockCacheEnabled());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetBlockCacheEnabled()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.blockCacheEnabled, other.blockCacheEnabled);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.blockCacheEnabled, other.blockCacheEnabled);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetCacheBloomsOnWrite(), other.isSetCacheBloomsOnWrite());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetCacheBloomsOnWrite(), other.isSetCacheBloomsOnWrite());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetCacheBloomsOnWrite()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cacheBloomsOnWrite, other.cacheBloomsOnWrite);
          +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cacheBloomsOnWrite,
          +        other.cacheBloomsOnWrite);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetCacheDataOnWrite(), other.isSetCacheDataOnWrite());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetCacheDataOnWrite(), other.isSetCacheDataOnWrite());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetCacheDataOnWrite()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cacheDataOnWrite, other.cacheDataOnWrite);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.cacheDataOnWrite, other.cacheDataOnWrite);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetCacheIndexesOnWrite(), other.isSetCacheIndexesOnWrite());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetCacheIndexesOnWrite(), other.isSetCacheIndexesOnWrite());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetCacheIndexesOnWrite()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cacheIndexesOnWrite, other.cacheIndexesOnWrite);
          +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cacheIndexesOnWrite,
          +        other.cacheIndexesOnWrite);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1660,17 +1757,20 @@ public int compareTo(TColumnFamilyDescriptor other) {
                 return lastComparison;
               }
               if (isSetCompressTags()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compressTags, other.compressTags);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.compressTags, other.compressTags);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetEvictBlocksOnClose(), other.isSetEvictBlocksOnClose());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetEvictBlocksOnClose(), other.isSetEvictBlocksOnClose());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetEvictBlocksOnClose()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.evictBlocksOnClose, other.evictBlocksOnClose);
          +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.evictBlocksOnClose,
          +        other.evictBlocksOnClose);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1697,7 +1797,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -1858,44 +1959,51 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (name == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'name' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TColumnFamilyDescriptorStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TColumnFamilyDescriptorStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TColumnFamilyDescriptorStandardScheme getScheme() {
                 return new TColumnFamilyDescriptorStandardScheme();
               }
             }
           
          -  private static class TColumnFamilyDescriptorStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TColumnFamilyDescriptorStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescriptor struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescriptor struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -1903,7 +2011,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.name = iprot.readBinary();
                         struct.setNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1911,11 +2019,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map142 = iprot.readMapBegin();
          -                struct.attributes = new java.util.HashMap(2*_map142.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key143;
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val144;
          -                for (int _i145 = 0; _i145 < _map142.size; ++_i145)
          -                {
          +                struct.attributes = new java.util.HashMap(
          +                    2 * _map142.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _key143;
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _val144;
          +                for (int _i145 = 0; _i145 < _map142.size; ++_i145) {
                             _key143 = iprot.readBinary();
                             _val144 = iprot.readBinary();
                             struct.attributes.put(_key143, _val144);
          @@ -1923,7 +2033,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                           iprot.readMapEnd();
                         }
                         struct.setAttributesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1931,11 +2041,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map146 = iprot.readMapBegin();
          -                struct.configuration = new java.util.HashMap(2*_map146.size);
          -                @org.apache.thrift.annotation.Nullable java.lang.String _key147;
          -                @org.apache.thrift.annotation.Nullable java.lang.String _val148;
          -                for (int _i149 = 0; _i149 < _map146.size; ++_i149)
          -                {
          +                struct.configuration =
          +                    new java.util.HashMap(2 * _map146.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.lang.String _key147;
          +                @org.apache.thrift.annotation.Nullable
          +                java.lang.String _val148;
          +                for (int _i149 = 0; _i149 < _map146.size; ++_i149) {
                             _key147 = iprot.readString();
                             _val148 = iprot.readString();
                             struct.configuration.put(_key147, _val148);
          @@ -1943,7 +2055,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                           iprot.readMapEnd();
                         }
                         struct.setConfigurationIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1951,23 +2063,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.blockSize = iprot.readI32();
                         struct.setBlockSizeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 5: // BLOOMN_FILTER_TYPE
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.bloomnFilterType = org.apache.hadoop.hbase.thrift2.generated.TBloomFilterType.findByValue(iprot.readI32());
          +              struct.bloomnFilterType = org.apache.hadoop.hbase.thrift2.generated.TBloomFilterType
          +                  .findByValue(iprot.readI32());
                         struct.setBloomnFilterTypeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 6: // COMPRESSION_TYPE
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.compressionType = org.apache.hadoop.hbase.thrift2.generated.TCompressionAlgorithm.findByValue(iprot.readI32());
          +              struct.compressionType =
          +                  org.apache.hadoop.hbase.thrift2.generated.TCompressionAlgorithm
          +                      .findByValue(iprot.readI32());
                         struct.setCompressionTypeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1975,23 +2090,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.I16) {
                         struct.dfsReplication = iprot.readI16();
                         struct.setDfsReplicationIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 8: // DATA_BLOCK_ENCODING
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.dataBlockEncoding = org.apache.hadoop.hbase.thrift2.generated.TDataBlockEncoding.findByValue(iprot.readI32());
          +              struct.dataBlockEncoding =
          +                  org.apache.hadoop.hbase.thrift2.generated.TDataBlockEncoding
          +                      .findByValue(iprot.readI32());
                         struct.setDataBlockEncodingIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 9: // KEEP_DELETED_CELLS
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.keepDeletedCells = org.apache.hadoop.hbase.thrift2.generated.TKeepDeletedCells.findByValue(iprot.readI32());
          +              struct.keepDeletedCells = org.apache.hadoop.hbase.thrift2.generated.TKeepDeletedCells
          +                  .findByValue(iprot.readI32());
                         struct.setKeepDeletedCellsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1999,7 +2117,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.maxVersions = iprot.readI32();
                         struct.setMaxVersionsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2007,7 +2125,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.minVersions = iprot.readI32();
                         struct.setMinVersionsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2015,7 +2133,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.scope = iprot.readI32();
                         struct.setScopeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2023,7 +2141,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.timeToLive = iprot.readI32();
                         struct.setTimeToLiveIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2031,7 +2149,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.blockCacheEnabled = iprot.readBool();
                         struct.setBlockCacheEnabledIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2039,7 +2157,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.cacheBloomsOnWrite = iprot.readBool();
                         struct.setCacheBloomsOnWriteIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2047,7 +2165,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.cacheDataOnWrite = iprot.readBool();
                         struct.setCacheDataOnWriteIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2055,7 +2173,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.cacheIndexesOnWrite = iprot.readBool();
                         struct.setCacheIndexesOnWriteIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2063,7 +2181,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.compressTags = iprot.readBool();
                         struct.setCompressTagsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2071,7 +2189,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.evictBlocksOnClose = iprot.readBool();
                         struct.setEvictBlocksOnCloseIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2079,7 +2197,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.inMemory = iprot.readBool();
                         struct.setInMemoryIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2094,7 +2212,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnFamilyDescri
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnFamilyDescriptor struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnFamilyDescriptor struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -2107,9 +2226,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnFamilyDescr
                   if (struct.isSetAttributes()) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter150 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter150 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter150.getKey());
                         oprot.writeBinary(_iter150.getValue());
                       }
          @@ -2122,9 +2243,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnFamilyDescr
                   if (struct.isSetConfiguration()) {
                     oprot.writeFieldBegin(CONFIGURATION_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.configuration.size()));
          -            for (java.util.Map.Entry _iter151 : struct.configuration.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.configuration.size()));
          +            for (java.util.Map.Entry _iter151 : struct.configuration
          +                .entrySet()) {
                         oprot.writeString(_iter151.getKey());
                         oprot.writeString(_iter151.getValue());
                       }
          @@ -2232,17 +2355,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnFamilyDescr
           
             }
           
          -  private static class TColumnFamilyDescriptorTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TColumnFamilyDescriptorTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TColumnFamilyDescriptorTupleScheme getScheme() {
                 return new TColumnFamilyDescriptorTupleScheme();
               }
             }
           
          -  private static class TColumnFamilyDescriptorTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TColumnFamilyDescriptorTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TColumnFamilyDescriptor struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TColumnFamilyDescriptor struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeBinary(struct.name);
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetAttributes()) {
          @@ -2306,8 +2433,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TColumnFamilyDescri
                 if (struct.isSetAttributes()) {
                   {
                     oprot.writeI32(struct.attributes.size());
          -          for (java.util.Map.Entry _iter152 : struct.attributes.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter152 : struct.attributes
          +              .entrySet()) {
                       oprot.writeBinary(_iter152.getKey());
                       oprot.writeBinary(_iter152.getValue());
                     }
          @@ -2316,8 +2443,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TColumnFamilyDescri
                 if (struct.isSetConfiguration()) {
                   {
                     oprot.writeI32(struct.configuration.size());
          -          for (java.util.Map.Entry _iter153 : struct.configuration.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter153 : struct.configuration
          +              .entrySet()) {
                       oprot.writeString(_iter153.getKey());
                       oprot.writeString(_iter153.getValue());
                     }
          @@ -2377,19 +2504,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TColumnFamilyDescri
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TColumnFamilyDescriptor struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TColumnFamilyDescriptor struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.name = iprot.readBinary();
                 struct.setNameIsSet(true);
                 java.util.BitSet incoming = iprot.readBitSet(19);
                 if (incoming.get(0)) {
                   {
          -          org.apache.thrift.protocol.TMap _map154 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -          struct.attributes = new java.util.HashMap(2*_map154.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key155;
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val156;
          -          for (int _i157 = 0; _i157 < _map154.size; ++_i157)
          -          {
          +          org.apache.thrift.protocol.TMap _map154 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +          struct.attributes =
          +              new java.util.HashMap(2 * _map154.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _key155;
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _val156;
          +          for (int _i157 = 0; _i157 < _map154.size; ++_i157) {
                       _key155 = iprot.readBinary();
                       _val156 = iprot.readBinary();
                       struct.attributes.put(_key155, _val156);
          @@ -2399,12 +2531,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TColumnFamilyDescrip
                 }
                 if (incoming.get(1)) {
                   {
          -          org.apache.thrift.protocol.TMap _map158 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -          struct.configuration = new java.util.HashMap(2*_map158.size);
          -          @org.apache.thrift.annotation.Nullable java.lang.String _key159;
          -          @org.apache.thrift.annotation.Nullable java.lang.String _val160;
          -          for (int _i161 = 0; _i161 < _map158.size; ++_i161)
          -          {
          +          org.apache.thrift.protocol.TMap _map158 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +          struct.configuration =
          +              new java.util.HashMap(2 * _map158.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.lang.String _key159;
          +          @org.apache.thrift.annotation.Nullable
          +          java.lang.String _val160;
          +          for (int _i161 = 0; _i161 < _map158.size; ++_i161) {
                       _key159 = iprot.readString();
                       _val160 = iprot.readString();
                       struct.configuration.put(_key159, _val160);
          @@ -2417,11 +2552,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TColumnFamilyDescrip
                   struct.setBlockSizeIsSet(true);
                 }
                 if (incoming.get(3)) {
          -        struct.bloomnFilterType = org.apache.hadoop.hbase.thrift2.generated.TBloomFilterType.findByValue(iprot.readI32());
          +        struct.bloomnFilterType =
          +            org.apache.hadoop.hbase.thrift2.generated.TBloomFilterType.findByValue(iprot.readI32());
                   struct.setBloomnFilterTypeIsSet(true);
                 }
                 if (incoming.get(4)) {
          -        struct.compressionType = org.apache.hadoop.hbase.thrift2.generated.TCompressionAlgorithm.findByValue(iprot.readI32());
          +        struct.compressionType = org.apache.hadoop.hbase.thrift2.generated.TCompressionAlgorithm
          +            .findByValue(iprot.readI32());
                   struct.setCompressionTypeIsSet(true);
                 }
                 if (incoming.get(5)) {
          @@ -2429,11 +2566,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TColumnFamilyDescrip
                   struct.setDfsReplicationIsSet(true);
                 }
                 if (incoming.get(6)) {
          -        struct.dataBlockEncoding = org.apache.hadoop.hbase.thrift2.generated.TDataBlockEncoding.findByValue(iprot.readI32());
          +        struct.dataBlockEncoding = org.apache.hadoop.hbase.thrift2.generated.TDataBlockEncoding
          +            .findByValue(iprot.readI32());
                   struct.setDataBlockEncodingIsSet(true);
                 }
                 if (incoming.get(7)) {
          -        struct.keepDeletedCells = org.apache.hadoop.hbase.thrift2.generated.TKeepDeletedCells.findByValue(iprot.readI32());
          +        struct.keepDeletedCells = org.apache.hadoop.hbase.thrift2.generated.TKeepDeletedCells
          +            .findByValue(iprot.readI32());
                   struct.setKeepDeletedCellsIsSet(true);
                 }
                 if (incoming.get(8)) {
          @@ -2483,8 +2622,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TColumnFamilyDescrip
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
          index 2fb514d3a127..42024cb10727 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
          @@ -1,37 +1,62 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * Represents a single cell and the amount to increment it by
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TColumnIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnIncrement");
          -
          -  private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING, (short)2);
          -  private static final org.apache.thrift.protocol.TField AMOUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("amount", org.apache.thrift.protocol.TType.I64, (short)3);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TColumnIncrementStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TColumnIncrementTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TColumnIncrement
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TColumnIncrement");
          +
          +  private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField AMOUNT_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("amount", org.apache.thrift.protocol.TType.I64,
          +          (short) 3);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TColumnIncrementStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TColumnIncrementTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer family; // required
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier; // required
             public long amount; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    FAMILY((short)1, "family"),
          -    QUALIFIER((short)2, "qualifier"),
          -    AMOUNT((short)3, "amount");
          +    FAMILY((short) 1, "family"), QUALIFIER((short) 2, "qualifier"), AMOUNT((short) 3, "amount");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -44,7 +69,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // FAMILY
                     return FAMILY;
                   case 2: // QUALIFIER
          @@ -57,12 +82,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -94,18 +119,27 @@ public java.lang.String getFieldName() {
             // isset id assignments
             private static final int __AMOUNT_ISSET_ID = 0;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.AMOUNT};
          +  private static final _Fields optionals[] = { _Fields.AMOUNT };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.FAMILY, new org.apache.thrift.meta_data.FieldMetaData("family", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("qualifier", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.AMOUNT, new org.apache.thrift.meta_data.FieldMetaData("amount", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.FAMILY,
          +      new org.apache.thrift.meta_data.FieldMetaData("family",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.QUALIFIER,
          +      new org.apache.thrift.meta_data.FieldMetaData("qualifier",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.AMOUNT, new org.apache.thrift.meta_data.FieldMetaData("amount",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumnIncrement.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumnIncrement.class,
          +      metaDataMap);
             }
           
             public TColumnIncrement() {
          @@ -113,10 +147,7 @@ public TColumnIncrement() {
           
             }
           
          -  public TColumnIncrement(
          -    java.nio.ByteBuffer family,
          -    java.nio.ByteBuffer qualifier)
          -  {
          +  public TColumnIncrement(java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier) {
               this();
               this.family = org.apache.thrift.TBaseHelper.copyBinary(family);
               this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier);
          @@ -158,11 +189,13 @@ public java.nio.ByteBuffer bufferForFamily() {
             }
           
             public TColumnIncrement setFamily(byte[] family) {
          -    this.family = family == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(family.clone());
          +    this.family =
          +        family == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(family.clone());
               return this;
             }
           
          -  public TColumnIncrement setFamily(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer family) {
          +  public TColumnIncrement
          +      setFamily(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer family) {
               this.family = org.apache.thrift.TBaseHelper.copyBinary(family);
               return this;
             }
          @@ -192,11 +225,13 @@ public java.nio.ByteBuffer bufferForQualifier() {
             }
           
             public TColumnIncrement setQualifier(byte[] qualifier) {
          -    this.qualifier = qualifier == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(qualifier.clone());
          +    this.qualifier = qualifier == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(qualifier.clone());
               return this;
             }
           
          -  public TColumnIncrement setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
          +  public TColumnIncrement
          +      setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
               this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier);
               return this;
             }
          @@ -227,7 +262,8 @@ public TColumnIncrement setAmount(long amount) {
             }
           
             public void unsetAmount() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __AMOUNT_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __AMOUNT_ISSET_ID);
             }
           
             /** Returns true if field amount is set (has been assigned a value) and false otherwise */
          @@ -236,42 +272,44 @@ public boolean isSetAmount() {
             }
           
             public void setAmountIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __AMOUNT_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __AMOUNT_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case FAMILY:
          -      if (value == null) {
          -        unsetFamily();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setFamily((byte[])value);
          +      case FAMILY:
          +        if (value == null) {
          +          unsetFamily();
                   } else {
          -          setFamily((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setFamily((byte[]) value);
          +          } else {
          +            setFamily((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          -
          -    case QUALIFIER:
          -      if (value == null) {
          -        unsetQualifier();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setQualifier((byte[])value);
          +        break;
          +
          +      case QUALIFIER:
          +        if (value == null) {
          +          unsetQualifier();
                   } else {
          -          setQualifier((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setQualifier((byte[]) value);
          +          } else {
          +            setQualifier((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case AMOUNT:
          -      if (value == null) {
          -        unsetAmount();
          -      } else {
          -        setAmount((java.lang.Long)value);
          -      }
          -      break;
          +      case AMOUNT:
          +        if (value == null) {
          +          unsetAmount();
          +        } else {
          +          setAmount((java.lang.Long) value);
          +        }
          +        break;
           
               }
             }
          @@ -279,74 +317,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case FAMILY:
          -      return getFamily();
          +      case FAMILY:
          +        return getFamily();
           
          -    case QUALIFIER:
          -      return getQualifier();
          +      case QUALIFIER:
          +        return getQualifier();
           
          -    case AMOUNT:
          -      return getAmount();
          +      case AMOUNT:
          +        return getAmount();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case FAMILY:
          -      return isSetFamily();
          -    case QUALIFIER:
          -      return isSetQualifier();
          -    case AMOUNT:
          -      return isSetAmount();
          +      case FAMILY:
          +        return isSetFamily();
          +      case QUALIFIER:
          +        return isSetQualifier();
          +      case AMOUNT:
          +        return isSetAmount();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TColumnIncrement)
          -      return this.equals((TColumnIncrement)that);
          +    if (that instanceof TColumnIncrement) return this.equals((TColumnIncrement) that);
               return false;
             }
           
             public boolean equals(TColumnIncrement that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_family = true && this.isSetFamily();
               boolean that_present_family = true && that.isSetFamily();
               if (this_present_family || that_present_family) {
          -      if (!(this_present_family && that_present_family))
          -        return false;
          -      if (!this.family.equals(that.family))
          -        return false;
          +      if (!(this_present_family && that_present_family)) return false;
          +      if (!this.family.equals(that.family)) return false;
               }
           
               boolean this_present_qualifier = true && this.isSetQualifier();
               boolean that_present_qualifier = true && that.isSetQualifier();
               if (this_present_qualifier || that_present_qualifier) {
          -      if (!(this_present_qualifier && that_present_qualifier))
          -        return false;
          -      if (!this.qualifier.equals(that.qualifier))
          -        return false;
          +      if (!(this_present_qualifier && that_present_qualifier)) return false;
          +      if (!this.qualifier.equals(that.qualifier)) return false;
               }
           
               boolean this_present_amount = true && this.isSetAmount();
               boolean that_present_amount = true && that.isSetAmount();
               if (this_present_amount || that_present_amount) {
          -      if (!(this_present_amount && that_present_amount))
          -        return false;
          -      if (this.amount != that.amount)
          -        return false;
          +      if (!(this_present_amount && that_present_amount)) return false;
          +      if (this.amount != that.amount) return false;
               }
           
               return true;
          @@ -357,16 +389,13 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetFamily()) ? 131071 : 524287);
          -    if (isSetFamily())
          -      hashCode = hashCode * 8191 + family.hashCode();
          +    if (isSetFamily()) hashCode = hashCode * 8191 + family.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetQualifier()) ? 131071 : 524287);
          -    if (isSetQualifier())
          -      hashCode = hashCode * 8191 + qualifier.hashCode();
          +    if (isSetQualifier()) hashCode = hashCode * 8191 + qualifier.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetAmount()) ? 131071 : 524287);
          -    if (isSetAmount())
          -      hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(amount);
          +    if (isSetAmount()) hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(amount);
           
               return hashCode;
             }
          @@ -421,7 +450,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -458,47 +488,55 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (family == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'family' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'family' was not present! Struct: " + toString());
               }
               if (qualifier == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'qualifier' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'qualifier' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TColumnIncrementStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TColumnIncrementStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TColumnIncrementStandardScheme getScheme() {
                 return new TColumnIncrementStandardScheme();
               }
             }
           
          -  private static class TColumnIncrementStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TColumnIncrementStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnIncrement struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnIncrement struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -506,7 +544,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnIncrement st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.family = iprot.readBinary();
                         struct.setFamilyIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -514,7 +552,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnIncrement st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.qualifier = iprot.readBinary();
                         struct.setQualifierIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -522,7 +560,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnIncrement st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.amount = iprot.readI64();
                         struct.setAmountIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -537,7 +575,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnIncrement st
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnIncrement struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnIncrement struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -562,17 +601,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnIncrement s
           
             }
           
          -  private static class TColumnIncrementTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TColumnIncrementTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TColumnIncrementTupleScheme getScheme() {
                 return new TColumnIncrementTupleScheme();
               }
             }
           
          -  private static class TColumnIncrementTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TColumnIncrementTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TColumnIncrement struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TColumnIncrement struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeBinary(struct.family);
                 oprot.writeBinary(struct.qualifier);
                 java.util.BitSet optionals = new java.util.BitSet();
          @@ -586,8 +629,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TColumnIncrement st
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TColumnIncrement struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TColumnIncrement struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.family = iprot.readBinary();
                 struct.setFamilyIsSet(true);
                 struct.qualifier = iprot.readBinary();
          @@ -600,8 +645,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TColumnIncrement str
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
          index a30487aac56d..c9ad2fefbfd6 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
          @@ -1,28 +1,56 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * Represents a single cell and its value.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TColumnValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnValue");
          -
          -  private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING, (short)2);
          -  private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)3);
          -  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)4);
          -  private static final org.apache.thrift.protocol.TField TAGS_FIELD_DESC = new org.apache.thrift.protocol.TField("tags", org.apache.thrift.protocol.TType.STRING, (short)5);
          -  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.BYTE, (short)6);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TColumnValueStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TColumnValueTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TColumnValue implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TColumnValue");
          +
          +  private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField TAGS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("tags", org.apache.thrift.protocol.TType.STRING,
          +          (short) 5);
          +  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.BYTE,
          +          (short) 6);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TColumnValueStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TColumnValueTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer family; // required
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier; // required
          @@ -31,16 +59,16 @@ public class TColumnValue implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -53,7 +81,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // FAMILY
                     return FAMILY;
                   case 2: // QUALIFIER
          @@ -72,12 +100,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -110,21 +138,36 @@ public java.lang.String getFieldName() {
             private static final int __TIMESTAMP_ISSET_ID = 0;
             private static final int __TYPE_ISSET_ID = 1;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.TIMESTAMP,_Fields.TAGS,_Fields.TYPE};
          +  private static final _Fields optionals[] = { _Fields.TIMESTAMP, _Fields.TAGS, _Fields.TYPE };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.FAMILY, new org.apache.thrift.meta_data.FieldMetaData("family", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("qualifier", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.FAMILY,
          +      new org.apache.thrift.meta_data.FieldMetaData("family",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.QUALIFIER,
          +      new org.apache.thrift.meta_data.FieldMetaData("qualifier",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.VALUE,
          +      new org.apache.thrift.meta_data.FieldMetaData("value",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -    tmpMap.put(_Fields.TAGS, new org.apache.thrift.meta_data.FieldMetaData("tags", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.TAGS,
          +      new org.apache.thrift.meta_data.FieldMetaData("tags",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BYTE)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumnValue.class, metaDataMap);
          @@ -133,11 +176,8 @@ public java.lang.String getFieldName() {
             public TColumnValue() {
             }
           
          -  public TColumnValue(
          -    java.nio.ByteBuffer family,
          -    java.nio.ByteBuffer qualifier,
          -    java.nio.ByteBuffer value)
          -  {
          +  public TColumnValue(java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier,
          +      java.nio.ByteBuffer value) {
               this();
               this.family = org.apache.thrift.TBaseHelper.copyBinary(family);
               this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier);
          @@ -191,7 +231,8 @@ public java.nio.ByteBuffer bufferForFamily() {
             }
           
             public TColumnValue setFamily(byte[] family) {
          -    this.family = family == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(family.clone());
          +    this.family =
          +        family == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(family.clone());
               return this;
             }
           
          @@ -225,11 +266,13 @@ public java.nio.ByteBuffer bufferForQualifier() {
             }
           
             public TColumnValue setQualifier(byte[] qualifier) {
          -    this.qualifier = qualifier == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(qualifier.clone());
          +    this.qualifier = qualifier == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(qualifier.clone());
               return this;
             }
           
          -  public TColumnValue setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
          +  public TColumnValue
          +      setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
               this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier);
               return this;
             }
          @@ -259,7 +302,8 @@ public java.nio.ByteBuffer bufferForValue() {
             }
           
             public TColumnValue setValue(byte[] value) {
          -    this.value = value == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(value.clone());
          +    this.value =
          +        value == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(value.clone());
               return this;
             }
           
          @@ -294,7 +338,8 @@ public TColumnValue setTimestamp(long timestamp) {
             }
           
             public void unsetTimestamp() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
             }
           
             /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -303,7 +348,8 @@ public boolean isSetTimestamp() {
             }
           
             public void setTimestampIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
             }
           
             public byte[] getTags() {
          @@ -316,7 +362,7 @@ public java.nio.ByteBuffer bufferForTags() {
             }
           
             public TColumnValue setTags(byte[] tags) {
          -    this.tags = tags == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(tags.clone());
          +    this.tags = tags == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(tags.clone());
               return this;
             }
           
          @@ -360,74 +406,76 @@ public boolean isSetType() {
             }
           
             public void setTypeIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TYPE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TYPE_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case FAMILY:
          -      if (value == null) {
          -        unsetFamily();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setFamily((byte[])value);
          +      case FAMILY:
          +        if (value == null) {
          +          unsetFamily();
                   } else {
          -          setFamily((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setFamily((byte[]) value);
          +          } else {
          +            setFamily((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case QUALIFIER:
          -      if (value == null) {
          -        unsetQualifier();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setQualifier((byte[])value);
          +      case QUALIFIER:
          +        if (value == null) {
          +          unsetQualifier();
                   } else {
          -          setQualifier((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setQualifier((byte[]) value);
          +          } else {
          +            setQualifier((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case VALUE:
          -      if (value == null) {
          -        unsetValue();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setValue((byte[])value);
          +      case VALUE:
          +        if (value == null) {
          +          unsetValue();
                   } else {
          -          setValue((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setValue((byte[]) value);
          +          } else {
          +            setValue((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case TIMESTAMP:
          -      if (value == null) {
          -        unsetTimestamp();
          -      } else {
          -        setTimestamp((java.lang.Long)value);
          -      }
          -      break;
          +      case TIMESTAMP:
          +        if (value == null) {
          +          unsetTimestamp();
          +        } else {
          +          setTimestamp((java.lang.Long) value);
          +        }
          +        break;
           
          -    case TAGS:
          -      if (value == null) {
          -        unsetTags();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setTags((byte[])value);
          +      case TAGS:
          +        if (value == null) {
          +          unsetTags();
                   } else {
          -          setTags((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setTags((byte[]) value);
          +          } else {
          +            setTags((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case TYPE:
          -      if (value == null) {
          -        unsetType();
          -      } else {
          -        setType((java.lang.Byte)value);
          -      }
          -      break;
          +      case TYPE:
          +        if (value == null) {
          +          unsetType();
          +        } else {
          +          setType((java.lang.Byte) value);
          +        }
          +        break;
           
               }
             }
          @@ -435,116 +483,104 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case FAMILY:
          -      return getFamily();
          +      case FAMILY:
          +        return getFamily();
           
          -    case QUALIFIER:
          -      return getQualifier();
          +      case QUALIFIER:
          +        return getQualifier();
           
          -    case VALUE:
          -      return getValue();
          +      case VALUE:
          +        return getValue();
           
          -    case TIMESTAMP:
          -      return getTimestamp();
          +      case TIMESTAMP:
          +        return getTimestamp();
           
          -    case TAGS:
          -      return getTags();
          +      case TAGS:
          +        return getTags();
           
          -    case TYPE:
          -      return getType();
          +      case TYPE:
          +        return getType();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case FAMILY:
          -      return isSetFamily();
          -    case QUALIFIER:
          -      return isSetQualifier();
          -    case VALUE:
          -      return isSetValue();
          -    case TIMESTAMP:
          -      return isSetTimestamp();
          -    case TAGS:
          -      return isSetTags();
          -    case TYPE:
          -      return isSetType();
          +      case FAMILY:
          +        return isSetFamily();
          +      case QUALIFIER:
          +        return isSetQualifier();
          +      case VALUE:
          +        return isSetValue();
          +      case TIMESTAMP:
          +        return isSetTimestamp();
          +      case TAGS:
          +        return isSetTags();
          +      case TYPE:
          +        return isSetType();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TColumnValue)
          -      return this.equals((TColumnValue)that);
          +    if (that instanceof TColumnValue) return this.equals((TColumnValue) that);
               return false;
             }
           
             public boolean equals(TColumnValue that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_family = true && this.isSetFamily();
               boolean that_present_family = true && that.isSetFamily();
               if (this_present_family || that_present_family) {
          -      if (!(this_present_family && that_present_family))
          -        return false;
          -      if (!this.family.equals(that.family))
          -        return false;
          +      if (!(this_present_family && that_present_family)) return false;
          +      if (!this.family.equals(that.family)) return false;
               }
           
               boolean this_present_qualifier = true && this.isSetQualifier();
               boolean that_present_qualifier = true && that.isSetQualifier();
               if (this_present_qualifier || that_present_qualifier) {
          -      if (!(this_present_qualifier && that_present_qualifier))
          -        return false;
          -      if (!this.qualifier.equals(that.qualifier))
          -        return false;
          +      if (!(this_present_qualifier && that_present_qualifier)) return false;
          +      if (!this.qualifier.equals(that.qualifier)) return false;
               }
           
               boolean this_present_value = true && this.isSetValue();
               boolean that_present_value = true && that.isSetValue();
               if (this_present_value || that_present_value) {
          -      if (!(this_present_value && that_present_value))
          -        return false;
          -      if (!this.value.equals(that.value))
          -        return false;
          +      if (!(this_present_value && that_present_value)) return false;
          +      if (!this.value.equals(that.value)) return false;
               }
           
               boolean this_present_timestamp = true && this.isSetTimestamp();
               boolean that_present_timestamp = true && that.isSetTimestamp();
               if (this_present_timestamp || that_present_timestamp) {
          -      if (!(this_present_timestamp && that_present_timestamp))
          -        return false;
          -      if (this.timestamp != that.timestamp)
          -        return false;
          +      if (!(this_present_timestamp && that_present_timestamp)) return false;
          +      if (this.timestamp != that.timestamp) return false;
               }
           
               boolean this_present_tags = true && this.isSetTags();
               boolean that_present_tags = true && that.isSetTags();
               if (this_present_tags || that_present_tags) {
          -      if (!(this_present_tags && that_present_tags))
          -        return false;
          -      if (!this.tags.equals(that.tags))
          -        return false;
          +      if (!(this_present_tags && that_present_tags)) return false;
          +      if (!this.tags.equals(that.tags)) return false;
               }
           
               boolean this_present_type = true && this.isSetType();
               boolean that_present_type = true && that.isSetType();
               if (this_present_type || that_present_type) {
          -      if (!(this_present_type && that_present_type))
          -        return false;
          -      if (this.type != that.type)
          -        return false;
          +      if (!(this_present_type && that_present_type)) return false;
          +      if (this.type != that.type) return false;
               }
           
               return true;
          @@ -555,28 +591,23 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetFamily()) ? 131071 : 524287);
          -    if (isSetFamily())
          -      hashCode = hashCode * 8191 + family.hashCode();
          +    if (isSetFamily()) hashCode = hashCode * 8191 + family.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetQualifier()) ? 131071 : 524287);
          -    if (isSetQualifier())
          -      hashCode = hashCode * 8191 + qualifier.hashCode();
          +    if (isSetQualifier()) hashCode = hashCode * 8191 + qualifier.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetValue()) ? 131071 : 524287);
          -    if (isSetValue())
          -      hashCode = hashCode * 8191 + value.hashCode();
          +    if (isSetValue()) hashCode = hashCode * 8191 + value.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetTimestamp()) ? 131071 : 524287);
               if (isSetTimestamp())
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
               hashCode = hashCode * 8191 + ((isSetTags()) ? 131071 : 524287);
          -    if (isSetTags())
          -      hashCode = hashCode * 8191 + tags.hashCode();
          +    if (isSetTags()) hashCode = hashCode * 8191 + tags.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetType()) ? 131071 : 524287);
          -    if (isSetType())
          -      hashCode = hashCode * 8191 + (int) (type);
          +    if (isSetType()) hashCode = hashCode * 8191 + (int) (type);
           
               return hashCode;
             }
          @@ -661,7 +692,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -722,50 +754,59 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (family == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'family' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'family' was not present! Struct: " + toString());
               }
               if (qualifier == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'qualifier' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'qualifier' was not present! Struct: " + toString());
               }
               if (value == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'value' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'value' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TColumnValueStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TColumnValueStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TColumnValueStandardScheme getScheme() {
                 return new TColumnValueStandardScheme();
               }
             }
           
          -  private static class TColumnValueStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TColumnValueStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnValue struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnValue struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -773,7 +814,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnValue struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.family = iprot.readBinary();
                         struct.setFamilyIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -781,7 +822,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnValue struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.qualifier = iprot.readBinary();
                         struct.setQualifierIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -789,7 +830,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnValue struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.value = iprot.readBinary();
                         struct.setValueIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -797,7 +838,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnValue struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.timestamp = iprot.readI64();
                         struct.setTimestampIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -805,7 +846,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnValue struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.tags = iprot.readBinary();
                         struct.setTagsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -813,7 +854,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnValue struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.BYTE) {
                         struct.type = iprot.readByte();
                         struct.setTypeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -828,7 +869,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnValue struct
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnValue struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnValue struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -870,17 +912,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnValue struc
           
             }
           
          -  private static class TColumnValueTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TColumnValueTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TColumnValueTupleScheme getScheme() {
                 return new TColumnValueTupleScheme();
               }
             }
           
          -  private static class TColumnValueTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TColumnValueTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TColumnValue struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TColumnValue struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeBinary(struct.family);
                 oprot.writeBinary(struct.qualifier);
                 oprot.writeBinary(struct.value);
          @@ -907,8 +953,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TColumnValue struct
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TColumnValue struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TColumnValue struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.family = iprot.readBinary();
                 struct.setFamilyIsSet(true);
                 struct.qualifier = iprot.readBinary();
          @@ -931,8 +979,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TColumnValue struct)
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOperator.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOperator.java
          index 6c749d587869..18990af1fcef 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOperator.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompareOperator.java
          @@ -1,25 +1,29 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
           /**
          - * Thrift wrapper around
          - * org.apache.hadoop.hbase.CompareOperator.
          + * Thrift wrapper around org.apache.hadoop.hbase.CompareOperator.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TCompareOperator implements org.apache.thrift.TEnum {
          -  LESS(0),
          -  LESS_OR_EQUAL(1),
          -  EQUAL(2),
          -  NOT_EQUAL(3),
          -  GREATER_OR_EQUAL(4),
          -  GREATER(5),
          -  NO_OP(6);
          +  LESS(0), LESS_OR_EQUAL(1), EQUAL(2), NOT_EQUAL(3), GREATER_OR_EQUAL(4), GREATER(5), NO_OP(6);
           
             private final int value;
           
          @@ -39,7 +43,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TCompareOperator findByValue(int value) { 
          +  public static TCompareOperator findByValue(int value) {
               switch (value) {
                 case 0:
                   return LESS;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java
          index e4deb1078832..4ba05e456568 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java
          @@ -1,25 +1,29 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
           /**
          - * Thrift wrapper around
          - * org.apache.hadoop.hbase.io.compress.Algorithm
          + * Thrift wrapper around org.apache.hadoop.hbase.io.compress.Algorithm
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TCompressionAlgorithm implements org.apache.thrift.TEnum {
          -  LZO(0),
          -  GZ(1),
          -  NONE(2),
          -  SNAPPY(3),
          -  LZ4(4),
          -  BZIP2(5),
          -  ZSTD(6);
          +  LZO(0), GZ(1), NONE(2), SNAPPY(3), LZ4(4), BZIP2(5), ZSTD(6);
           
             private final int value;
           
          @@ -39,7 +43,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TCompressionAlgorithm findByValue(int value) { 
          +  public static TCompressionAlgorithm findByValue(int value) {
               switch (value) {
                 case 0:
                   return LZO;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TConsistency.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TConsistency.java
          index 17b6d2bc0eba..13f401509eaf 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TConsistency.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TConsistency.java
          @@ -1,21 +1,30 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
           /**
          - * Specify Consistency:
          - *  - STRONG means reads only from primary region
          - *  - TIMELINE means reads might return values from secondary region replicas
          + * Specify Consistency: - STRONG means reads only from primary region - TIMELINE means reads might
          + * return values from secondary region replicas
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TConsistency implements org.apache.thrift.TEnum {
          -  STRONG(1),
          -  TIMELINE(2);
          +  STRONG(1), TIMELINE(2);
           
             private final int value;
           
          @@ -35,7 +44,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TConsistency findByValue(int value) { 
          +  public static TConsistency findByValue(int value) {
               switch (value) {
                 case 1:
                   return STRONG;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDataBlockEncoding.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDataBlockEncoding.java
          index c3c7429f024a..7bd5dac69f8f 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDataBlockEncoding.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDataBlockEncoding.java
          @@ -1,26 +1,32 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
           /**
          - * Thrift wrapper around
          - * org.apache.hadoop.hbase.io.encoding.DataBlockEncoding
          + * Thrift wrapper around org.apache.hadoop.hbase.io.encoding.DataBlockEncoding
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TDataBlockEncoding implements org.apache.thrift.TEnum {
             /**
              * Disable data block encoding.
              */
          -  NONE(0),
          -  PREFIX(2),
          -  DIFF(3),
          -  FAST_DIFF(4),
          -  ROW_INDEX_V1(7);
          +  NONE(0), PREFIX(2), DIFF(3), FAST_DIFF(4), ROW_INDEX_V1(7);
           
             private final int value;
           
          @@ -40,7 +46,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TDataBlockEncoding findByValue(int value) { 
          +  public static TDataBlockEncoding findByValue(int value) {
               switch (value) {
                 case 0:
                   return NONE;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java
          index bb217d13f96d..2a6dd1966c6b 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java
          @@ -1,85 +1,99 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Used to perform Delete operations on a single row.
          - * 
          - * The scope can be further narrowed down by specifying a list of
          - * columns or column families as TColumns.
          - * 
          - * Specifying only a family in a TColumn will delete the whole family.
          - * If a timestamp is specified all versions with a timestamp less than
          - * or equal to this will be deleted. If no timestamp is specified the
          - * current time will be used.
          - * 
          - * Specifying a family and a column qualifier in a TColumn will delete only
          - * this qualifier. If a timestamp is specified only versions equal
          - * to this timestamp will be deleted. If no timestamp is specified the
          - * most recent version will be deleted.  To delete all previous versions,
          - * specify the DELETE_COLUMNS TDeleteType.
          - * 
          - * The top level timestamp is only used if a complete row should be deleted
          - * (i.e. no columns are passed) and if it is specified it works the same way
          - * as if you had added a TColumn for every column family and this timestamp
          - * (i.e. all versions older than or equal in all column families will be deleted)
          - * 
          - * You can specify how this Delete should be written to the write-ahead Log (WAL)
          - * by changing the durability. If you don't provide durability, it defaults to
          + * Used to perform Delete operations on a single row. The scope can be further narrowed down by
          + * specifying a list of columns or column families as TColumns. Specifying only a family in a
          + * TColumn will delete the whole family. If a timestamp is specified all versions with a timestamp
          + * less than or equal to this will be deleted. If no timestamp is specified the current time will be
          + * used. Specifying a family and a column qualifier in a TColumn will delete only this qualifier. If
          + * a timestamp is specified only versions equal to this timestamp will be deleted. If no timestamp
          + * is specified the most recent version will be deleted. To delete all previous versions, specify
          + * the DELETE_COLUMNS TDeleteType. The top level timestamp is only used if a complete row should be
          + * deleted (i.e. no columns are passed) and if it is specified it works the same way as if you had
          + * added a TColumn for every column family and this timestamp (i.e. all versions older than or equal
          + * in all column families will be deleted) You can specify how this Delete should be written to the
          + * write-ahead Log (WAL) by changing the durability. If you don't provide durability, it defaults to
            * column family's default setting for durability.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TDelete implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TDelete");
          -
          -  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)2);
          -  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)3);
          -  private static final org.apache.thrift.protocol.TField DELETE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteType", org.apache.thrift.protocol.TType.I32, (short)4);
          -  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)6);
          -  private static final org.apache.thrift.protocol.TField DURABILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("durability", org.apache.thrift.protocol.TType.I32, (short)7);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TDeleteStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TDeleteTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TDelete implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TDelete");
          +
          +  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField DELETE_TYPE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("deleteType", org.apache.thrift.protocol.TType.I32,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +          (short) 6);
          +  private static final org.apache.thrift.protocol.TField DURABILITY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("durability", org.apache.thrift.protocol.TType.I32,
          +          (short) 7);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TDeleteStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TDeleteTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
             public @org.apache.thrift.annotation.Nullable java.util.List columns; // optional
             public long timestamp; // optional
             /**
          -   * 
              * @see TDeleteType
              */
             public @org.apache.thrift.annotation.Nullable TDeleteType deleteType; // optional
          -  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
             /**
          -   * 
              * @see TDurability
              */
             public @org.apache.thrift.annotation.Nullable TDurability durability; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    ROW((short)1, "row"),
          -    COLUMNS((short)2, "columns"),
          -    TIMESTAMP((short)3, "timestamp"),
          +    ROW((short) 1, "row"), COLUMNS((short) 2, "columns"), TIMESTAMP((short) 3, "timestamp"),
               /**
          -     * 
                * @see TDeleteType
                */
          -    DELETE_TYPE((short)4, "deleteType"),
          -    ATTRIBUTES((short)6, "attributes"),
          +    DELETE_TYPE((short) 4, "deleteType"), ATTRIBUTES((short) 6, "attributes"),
               /**
          -     * 
                * @see TDurability
                */
          -    DURABILITY((short)7, "durability");
          +    DURABILITY((short) 7, "durability");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -92,7 +106,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // ROW
                     return ROW;
                   case 2: // COLUMNS
          @@ -111,12 +125,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -148,25 +162,43 @@ public java.lang.String getFieldName() {
             // isset id assignments
             private static final int __TIMESTAMP_ISSET_ID = 0;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.COLUMNS,_Fields.TIMESTAMP,_Fields.DELETE_TYPE,_Fields.ATTRIBUTES,_Fields.DURABILITY};
          +  private static final _Fields optionals[] = { _Fields.COLUMNS, _Fields.TIMESTAMP,
          +      _Fields.DELETE_TYPE, _Fields.ATTRIBUTES, _Fields.DURABILITY };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumn.class))));
          -    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("row",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TColumn.class))));
          +    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -    tmpMap.put(_Fields.DELETE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("deleteType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TDeleteType.class)));
          -    tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true), 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true))));
          -    tmpMap.put(_Fields.DURABILITY, new org.apache.thrift.meta_data.FieldMetaData("durability", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TDurability.class)));
          +    tmpMap.put(_Fields.DELETE_TYPE,
          +      new org.apache.thrift.meta_data.FieldMetaData("deleteType",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TDeleteType.class)));
          +    tmpMap.put(_Fields.ATTRIBUTES,
          +      new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true),
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true))));
          +    tmpMap.put(_Fields.DURABILITY,
          +      new org.apache.thrift.meta_data.FieldMetaData("durability",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TDurability.class)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TDelete.class, metaDataMap);
             }
          @@ -176,9 +208,7 @@ public TDelete() {
           
             }
           
          -  public TDelete(
          -    java.nio.ByteBuffer row)
          -  {
          +  public TDelete(java.nio.ByteBuffer row) {
               this();
               this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
             }
          @@ -192,7 +222,8 @@ public TDelete(TDelete other) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
               }
               if (other.isSetColumns()) {
          -      java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +      java.util.List __this__columns =
          +          new java.util.ArrayList(other.columns.size());
                 for (TColumn other_element : other.columns) {
                   __this__columns.add(new TColumn(other_element));
                 }
          @@ -203,7 +234,8 @@ public TDelete(TDelete other) {
                 this.deleteType = other.deleteType;
               }
               if (other.isSetAttributes()) {
          -      java.util.Map __this__attributes = new java.util.HashMap(other.attributes);
          +      java.util.Map __this__attributes =
          +          new java.util.HashMap(other.attributes);
                 this.attributes = __this__attributes;
               }
               if (other.isSetDurability()) {
          @@ -237,7 +269,7 @@ public java.nio.ByteBuffer bufferForRow() {
             }
           
             public TDelete setRow(byte[] row) {
          -    this.row = row == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(row.clone());
          +    this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
               return this;
             }
           
          @@ -282,7 +314,8 @@ public java.util.List getColumns() {
               return this.columns;
             }
           
          -  public TDelete setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +  public TDelete
          +      setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
               this.columns = columns;
               return this;
             }
          @@ -313,7 +346,8 @@ public TDelete setTimestamp(long timestamp) {
             }
           
             public void unsetTimestamp() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
             }
           
             /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -322,11 +356,11 @@ public boolean isSetTimestamp() {
             }
           
             public void setTimestampIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
             }
           
             /**
          -   * 
              * @see TDeleteType
              */
             @org.apache.thrift.annotation.Nullable
          @@ -335,7 +369,6 @@ public TDeleteType getDeleteType() {
             }
           
             /**
          -   * 
              * @see TDeleteType
              */
             public TDelete setDeleteType(@org.apache.thrift.annotation.Nullable TDeleteType deleteType) {
          @@ -364,17 +397,18 @@ public int getAttributesSize() {
           
             public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
               if (this.attributes == null) {
          -      this.attributes = new java.util.HashMap();
          +      this.attributes = new java.util.HashMap();
               }
               this.attributes.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getAttributes() {
          +  public java.util.Map getAttributes() {
               return this.attributes;
             }
           
          -  public TDelete setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +  public TDelete setAttributes(
          +      @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
               this.attributes = attributes;
               return this;
             }
          @@ -395,7 +429,6 @@ public void setAttributesIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TDurability
              */
             @org.apache.thrift.annotation.Nullable
          @@ -404,7 +437,6 @@ public TDurability getDurability() {
             }
           
             /**
          -   * 
              * @see TDurability
              */
             public TDelete setDurability(@org.apache.thrift.annotation.Nullable TDurability durability) {
          @@ -427,59 +459,60 @@ public void setDurabilityIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case ROW:
          -      if (value == null) {
          -        unsetRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setRow((byte[])value);
          +      case ROW:
          +        if (value == null) {
          +          unsetRow();
                   } else {
          -          setRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setRow((byte[]) value);
          +          } else {
          +            setRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case COLUMNS:
          -      if (value == null) {
          -        unsetColumns();
          -      } else {
          -        setColumns((java.util.List)value);
          -      }
          -      break;
          +      case COLUMNS:
          +        if (value == null) {
          +          unsetColumns();
          +        } else {
          +          setColumns((java.util.List) value);
          +        }
          +        break;
           
          -    case TIMESTAMP:
          -      if (value == null) {
          -        unsetTimestamp();
          -      } else {
          -        setTimestamp((java.lang.Long)value);
          -      }
          -      break;
          +      case TIMESTAMP:
          +        if (value == null) {
          +          unsetTimestamp();
          +        } else {
          +          setTimestamp((java.lang.Long) value);
          +        }
          +        break;
           
          -    case DELETE_TYPE:
          -      if (value == null) {
          -        unsetDeleteType();
          -      } else {
          -        setDeleteType((TDeleteType)value);
          -      }
          -      break;
          +      case DELETE_TYPE:
          +        if (value == null) {
          +          unsetDeleteType();
          +        } else {
          +          setDeleteType((TDeleteType) value);
          +        }
          +        break;
           
          -    case ATTRIBUTES:
          -      if (value == null) {
          -        unsetAttributes();
          -      } else {
          -        setAttributes((java.util.Map)value);
          -      }
          -      break;
          +      case ATTRIBUTES:
          +        if (value == null) {
          +          unsetAttributes();
          +        } else {
          +          setAttributes((java.util.Map) value);
          +        }
          +        break;
           
          -    case DURABILITY:
          -      if (value == null) {
          -        unsetDurability();
          -      } else {
          -        setDurability((TDurability)value);
          -      }
          -      break;
          +      case DURABILITY:
          +        if (value == null) {
          +          unsetDurability();
          +        } else {
          +          setDurability((TDurability) value);
          +        }
          +        break;
           
               }
             }
          @@ -487,116 +520,104 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case ROW:
          -      return getRow();
          +      case ROW:
          +        return getRow();
           
          -    case COLUMNS:
          -      return getColumns();
          +      case COLUMNS:
          +        return getColumns();
           
          -    case TIMESTAMP:
          -      return getTimestamp();
          +      case TIMESTAMP:
          +        return getTimestamp();
           
          -    case DELETE_TYPE:
          -      return getDeleteType();
          +      case DELETE_TYPE:
          +        return getDeleteType();
           
          -    case ATTRIBUTES:
          -      return getAttributes();
          +      case ATTRIBUTES:
          +        return getAttributes();
           
          -    case DURABILITY:
          -      return getDurability();
          +      case DURABILITY:
          +        return getDurability();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case ROW:
          -      return isSetRow();
          -    case COLUMNS:
          -      return isSetColumns();
          -    case TIMESTAMP:
          -      return isSetTimestamp();
          -    case DELETE_TYPE:
          -      return isSetDeleteType();
          -    case ATTRIBUTES:
          -      return isSetAttributes();
          -    case DURABILITY:
          -      return isSetDurability();
          +      case ROW:
          +        return isSetRow();
          +      case COLUMNS:
          +        return isSetColumns();
          +      case TIMESTAMP:
          +        return isSetTimestamp();
          +      case DELETE_TYPE:
          +        return isSetDeleteType();
          +      case ATTRIBUTES:
          +        return isSetAttributes();
          +      case DURABILITY:
          +        return isSetDurability();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TDelete)
          -      return this.equals((TDelete)that);
          +    if (that instanceof TDelete) return this.equals((TDelete) that);
               return false;
             }
           
             public boolean equals(TDelete that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_row = true && this.isSetRow();
               boolean that_present_row = true && that.isSetRow();
               if (this_present_row || that_present_row) {
          -      if (!(this_present_row && that_present_row))
          -        return false;
          -      if (!this.row.equals(that.row))
          -        return false;
          +      if (!(this_present_row && that_present_row)) return false;
          +      if (!this.row.equals(that.row)) return false;
               }
           
               boolean this_present_columns = true && this.isSetColumns();
               boolean that_present_columns = true && that.isSetColumns();
               if (this_present_columns || that_present_columns) {
          -      if (!(this_present_columns && that_present_columns))
          -        return false;
          -      if (!this.columns.equals(that.columns))
          -        return false;
          +      if (!(this_present_columns && that_present_columns)) return false;
          +      if (!this.columns.equals(that.columns)) return false;
               }
           
               boolean this_present_timestamp = true && this.isSetTimestamp();
               boolean that_present_timestamp = true && that.isSetTimestamp();
               if (this_present_timestamp || that_present_timestamp) {
          -      if (!(this_present_timestamp && that_present_timestamp))
          -        return false;
          -      if (this.timestamp != that.timestamp)
          -        return false;
          +      if (!(this_present_timestamp && that_present_timestamp)) return false;
          +      if (this.timestamp != that.timestamp) return false;
               }
           
               boolean this_present_deleteType = true && this.isSetDeleteType();
               boolean that_present_deleteType = true && that.isSetDeleteType();
               if (this_present_deleteType || that_present_deleteType) {
          -      if (!(this_present_deleteType && that_present_deleteType))
          -        return false;
          -      if (!this.deleteType.equals(that.deleteType))
          -        return false;
          +      if (!(this_present_deleteType && that_present_deleteType)) return false;
          +      if (!this.deleteType.equals(that.deleteType)) return false;
               }
           
               boolean this_present_attributes = true && this.isSetAttributes();
               boolean that_present_attributes = true && that.isSetAttributes();
               if (this_present_attributes || that_present_attributes) {
          -      if (!(this_present_attributes && that_present_attributes))
          -        return false;
          -      if (!this.attributes.equals(that.attributes))
          -        return false;
          +      if (!(this_present_attributes && that_present_attributes)) return false;
          +      if (!this.attributes.equals(that.attributes)) return false;
               }
           
               boolean this_present_durability = true && this.isSetDurability();
               boolean that_present_durability = true && that.isSetDurability();
               if (this_present_durability || that_present_durability) {
          -      if (!(this_present_durability && that_present_durability))
          -        return false;
          -      if (!this.durability.equals(that.durability))
          -        return false;
          +      if (!(this_present_durability && that_present_durability)) return false;
          +      if (!this.durability.equals(that.durability)) return false;
               }
           
               return true;
          @@ -607,28 +628,23 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -    if (isSetRow())
          -      hashCode = hashCode * 8191 + row.hashCode();
          +    if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -    if (isSetColumns())
          -      hashCode = hashCode * 8191 + columns.hashCode();
          +    if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetTimestamp()) ? 131071 : 524287);
               if (isSetTimestamp())
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
               hashCode = hashCode * 8191 + ((isSetDeleteType()) ? 131071 : 524287);
          -    if (isSetDeleteType())
          -      hashCode = hashCode * 8191 + deleteType.getValue();
          +    if (isSetDeleteType()) hashCode = hashCode * 8191 + deleteType.getValue();
           
               hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -    if (isSetAttributes())
          -      hashCode = hashCode * 8191 + attributes.hashCode();
          +    if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetDurability()) ? 131071 : 524287);
          -    if (isSetDurability())
          -      hashCode = hashCode * 8191 + durability.getValue();
          +    if (isSetDurability()) hashCode = hashCode * 8191 + durability.getValue();
           
               return hashCode;
             }
          @@ -713,7 +729,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -782,44 +799,51 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (row == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'row' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TDeleteStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TDeleteStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TDeleteStandardScheme getScheme() {
                 return new TDeleteStandardScheme();
               }
             }
           
          -  private static class TDeleteStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TDeleteStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TDelete struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TDelete struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -827,7 +851,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TDelete struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.row = iprot.readBinary();
                         struct.setRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -836,9 +860,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TDelete struct) thr
                         {
                           org.apache.thrift.protocol.TList _list52 = iprot.readListBegin();
                           struct.columns = new java.util.ArrayList(_list52.size);
          -                @org.apache.thrift.annotation.Nullable TColumn _elem53;
          -                for (int _i54 = 0; _i54 < _list52.size; ++_i54)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                TColumn _elem53;
          +                for (int _i54 = 0; _i54 < _list52.size; ++_i54) {
                             _elem53 = new TColumn();
                             _elem53.read(iprot);
                             struct.columns.add(_elem53);
          @@ -846,7 +870,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TDelete struct) thr
                           iprot.readListEnd();
                         }
                         struct.setColumnsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -854,15 +878,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TDelete struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.timestamp = iprot.readI64();
                         struct.setTimestampIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 4: // DELETE_TYPE
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.deleteType = org.apache.hadoop.hbase.thrift2.generated.TDeleteType.findByValue(iprot.readI32());
          +              struct.deleteType = org.apache.hadoop.hbase.thrift2.generated.TDeleteType
          +                  .findByValue(iprot.readI32());
                         struct.setDeleteTypeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -870,11 +895,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TDelete struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map55 = iprot.readMapBegin();
          -                struct.attributes = new java.util.HashMap(2*_map55.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key56;
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val57;
          -                for (int _i58 = 0; _i58 < _map55.size; ++_i58)
          -                {
          +                struct.attributes = new java.util.HashMap(
          +                    2 * _map55.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _key56;
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _val57;
          +                for (int _i58 = 0; _i58 < _map55.size; ++_i58) {
                             _key56 = iprot.readBinary();
                             _val57 = iprot.readBinary();
                             struct.attributes.put(_key56, _val57);
          @@ -882,15 +909,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TDelete struct) thr
                           iprot.readMapEnd();
                         }
                         struct.setAttributesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 7: // DURABILITY
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
          +              struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability
          +                  .findByValue(iprot.readI32());
                         struct.setDurabilityIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -905,7 +933,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TDelete struct) thr
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TDelete struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TDelete struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -918,9 +947,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TDelete struct) th
                   if (struct.isSetColumns()) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          -            for (TColumn _iter59 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          +            for (TColumn _iter59 : struct.columns) {
                         _iter59.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -944,9 +973,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TDelete struct) th
                   if (struct.isSetAttributes()) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter60 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter60 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter60.getKey());
                         oprot.writeBinary(_iter60.getValue());
                       }
          @@ -977,8 +1008,10 @@ public TDeleteTupleScheme getScheme() {
             private static class TDeleteTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TDelete struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TDelete struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeBinary(struct.row);
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetColumns()) {
          @@ -1000,8 +1033,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TDelete struct) thr
                 if (struct.isSetColumns()) {
                   {
                     oprot.writeI32(struct.columns.size());
          -          for (TColumn _iter61 : struct.columns)
          -          {
          +          for (TColumn _iter61 : struct.columns) {
                       _iter61.write(oprot);
                     }
                   }
          @@ -1015,8 +1047,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TDelete struct) thr
                 if (struct.isSetAttributes()) {
                   {
                     oprot.writeI32(struct.attributes.size());
          -          for (java.util.Map.Entry _iter62 : struct.attributes.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter62 : struct.attributes
          +              .entrySet()) {
                       oprot.writeBinary(_iter62.getKey());
                       oprot.writeBinary(_iter62.getValue());
                     }
          @@ -1028,18 +1060,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TDelete struct) thr
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TDelete struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TDelete struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.row = iprot.readBinary();
                 struct.setRowIsSet(true);
                 java.util.BitSet incoming = iprot.readBitSet(5);
                 if (incoming.get(0)) {
                   {
          -          org.apache.thrift.protocol.TList _list63 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +          org.apache.thrift.protocol.TList _list63 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                     struct.columns = new java.util.ArrayList(_list63.size);
          -          @org.apache.thrift.annotation.Nullable TColumn _elem64;
          -          for (int _i65 = 0; _i65 < _list63.size; ++_i65)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          TColumn _elem64;
          +          for (int _i65 = 0; _i65 < _list63.size; ++_i65) {
                       _elem64 = new TColumn();
                       _elem64.read(iprot);
                       struct.columns.add(_elem64);
          @@ -1052,17 +1087,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TDelete struct) thro
                   struct.setTimestampIsSet(true);
                 }
                 if (incoming.get(2)) {
          -        struct.deleteType = org.apache.hadoop.hbase.thrift2.generated.TDeleteType.findByValue(iprot.readI32());
          +        struct.deleteType =
          +            org.apache.hadoop.hbase.thrift2.generated.TDeleteType.findByValue(iprot.readI32());
                   struct.setDeleteTypeIsSet(true);
                 }
                 if (incoming.get(3)) {
                   {
          -          org.apache.thrift.protocol.TMap _map66 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -          struct.attributes = new java.util.HashMap(2*_map66.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key67;
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val68;
          -          for (int _i69 = 0; _i69 < _map66.size; ++_i69)
          -          {
          +          org.apache.thrift.protocol.TMap _map66 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +          struct.attributes =
          +              new java.util.HashMap(2 * _map66.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _key67;
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _val68;
          +          for (int _i69 = 0; _i69 < _map66.size; ++_i69) {
                       _key67 = iprot.readBinary();
                       _val68 = iprot.readBinary();
                       struct.attributes.put(_key67, _val68);
          @@ -1071,14 +1110,17 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TDelete struct) thro
                   struct.setAttributesIsSet(true);
                 }
                 if (incoming.get(4)) {
          -        struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
          +        struct.durability =
          +            org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
                   struct.setDurabilityIsSet(true);
                 }
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDeleteType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDeleteType.java
          index 3ccf01ea4300..58e3df1d2636 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDeleteType.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDeleteType.java
          @@ -1,23 +1,30 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
           /**
          - * Specify type of delete:
          - *  - DELETE_COLUMN means exactly one version will be removed,
          - *  - DELETE_COLUMNS means previous versions will also be removed.
          + * Specify type of delete: - DELETE_COLUMN means exactly one version will be removed, -
          + * DELETE_COLUMNS means previous versions will also be removed.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TDeleteType implements org.apache.thrift.TEnum {
          -  DELETE_COLUMN(0),
          -  DELETE_COLUMNS(1),
          -  DELETE_FAMILY(2),
          -  DELETE_FAMILY_VERSION(3);
          +  DELETE_COLUMN(0), DELETE_COLUMNS(1), DELETE_FAMILY(2), DELETE_FAMILY_VERSION(3);
           
             private final int value;
           
          @@ -37,7 +44,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TDeleteType findByValue(int value) { 
          +  public static TDeleteType findByValue(int value) {
               switch (value) {
                 case 0:
                   return DELETE_COLUMN;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDurability.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDurability.java
          index 638d440c01c9..b3f4ac86a206 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDurability.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDurability.java
          @@ -1,26 +1,32 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
           /**
          - * Specify Durability:
          - *  - SKIP_WAL means do not write the Mutation to the WAL.
          - *  - ASYNC_WAL means write the Mutation to the WAL asynchronously,
          - *  - SYNC_WAL means write the Mutation to the WAL synchronously,
          - *  - FSYNC_WAL means Write the Mutation to the WAL synchronously and force the entries to disk.
          + * Specify Durability: - SKIP_WAL means do not write the Mutation to the WAL. - ASYNC_WAL means
          + * write the Mutation to the WAL asynchronously, - SYNC_WAL means write the Mutation to the WAL
          + * synchronously, - FSYNC_WAL means Write the Mutation to the WAL synchronously and force the
          + * entries to disk.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TDurability implements org.apache.thrift.TEnum {
          -  USE_DEFAULT(0),
          -  SKIP_WAL(1),
          -  ASYNC_WAL(2),
          -  SYNC_WAL(3),
          -  FSYNC_WAL(4);
          +  USE_DEFAULT(0), SKIP_WAL(1), ASYNC_WAL(2), SYNC_WAL(3), FSYNC_WAL(4);
           
             private final int value;
           
          @@ -40,7 +46,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TDurability findByValue(int value) { 
          +  public static TDurability findByValue(int value) {
               switch (value) {
                 case 0:
                   return USE_DEFAULT;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TFilterByOperator.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TFilterByOperator.java
          index 61ee2f6de513..9297f0a3d025 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TFilterByOperator.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TFilterByOperator.java
          @@ -1,16 +1,26 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TFilterByOperator implements org.apache.thrift.TEnum {
          -  AND(0),
          -  OR(1);
          +  AND(0), OR(1);
           
             private final int value;
           
          @@ -30,7 +40,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TFilterByOperator findByValue(int value) { 
          +  public static TFilterByOperator findByValue(int value) {
               switch (value) {
                 case 0:
                   return AND;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
          index dfecde9ffb22..6bf07e91a576 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
          @@ -1,47 +1,87 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Used to perform Get operations on a single row.
          - * 
          - * The scope can be further narrowed down by specifying a list of
          - * columns or column families.
          - * 
          - * To get everything for a row, instantiate a Get object with just the row to get.
          - * To further define the scope of what to get you can add a timestamp or time range
          - * with an optional maximum number of versions to return.
          - * 
          - * If you specify a time range and a timestamp the range is ignored.
          - * Timestamps on TColumns are ignored.
          + * Used to perform Get operations on a single row. The scope can be further narrowed down by
          + * specifying a list of columns or column families. To get everything for a row, instantiate a Get
          + * object with just the row to get. To further define the scope of what to get you can add a
          + * timestamp or time range with an optional maximum number of versions to return. If you specify a
          + * time range and a timestamp the range is ignored. Timestamps on TColumns are ignored.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TGet implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGet");
          -
          -  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)2);
          -  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)3);
          -  private static final org.apache.thrift.protocol.TField TIME_RANGE_FIELD_DESC = new org.apache.thrift.protocol.TField("timeRange", org.apache.thrift.protocol.TType.STRUCT, (short)4);
          -  private static final org.apache.thrift.protocol.TField MAX_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxVersions", org.apache.thrift.protocol.TType.I32, (short)5);
          -  private static final org.apache.thrift.protocol.TField FILTER_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("filterString", org.apache.thrift.protocol.TType.STRING, (short)6);
          -  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)7);
          -  private static final org.apache.thrift.protocol.TField AUTHORIZATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("authorizations", org.apache.thrift.protocol.TType.STRUCT, (short)8);
          -  private static final org.apache.thrift.protocol.TField CONSISTENCY_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency", org.apache.thrift.protocol.TType.I32, (short)9);
          -  private static final org.apache.thrift.protocol.TField TARGET_REPLICA_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("targetReplicaId", org.apache.thrift.protocol.TType.I32, (short)10);
          -  private static final org.apache.thrift.protocol.TField CACHE_BLOCKS_FIELD_DESC = new org.apache.thrift.protocol.TField("cacheBlocks", org.apache.thrift.protocol.TType.BOOL, (short)11);
          -  private static final org.apache.thrift.protocol.TField STORE_LIMIT_FIELD_DESC = new org.apache.thrift.protocol.TField("storeLimit", org.apache.thrift.protocol.TType.I32, (short)12);
          -  private static final org.apache.thrift.protocol.TField STORE_OFFSET_FIELD_DESC = new org.apache.thrift.protocol.TField("storeOffset", org.apache.thrift.protocol.TType.I32, (short)13);
          -  private static final org.apache.thrift.protocol.TField EXISTENCE_ONLY_FIELD_DESC = new org.apache.thrift.protocol.TField("existence_only", org.apache.thrift.protocol.TType.BOOL, (short)14);
          -  private static final org.apache.thrift.protocol.TField FILTER_BYTES_FIELD_DESC = new org.apache.thrift.protocol.TField("filterBytes", org.apache.thrift.protocol.TType.STRING, (short)15);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TGetStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TGetTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TGet implements org.apache.thrift.TBase, java.io.Serializable,
          +    Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TGet");
          +
          +  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField TIME_RANGE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("timeRange", org.apache.thrift.protocol.TType.STRUCT,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField MAX_VERSIONS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("maxVersions", org.apache.thrift.protocol.TType.I32,
          +          (short) 5);
          +  private static final org.apache.thrift.protocol.TField FILTER_STRING_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("filterString", org.apache.thrift.protocol.TType.STRING,
          +          (short) 6);
          +  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +          (short) 7);
          +  private static final org.apache.thrift.protocol.TField AUTHORIZATIONS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("authorizations",
          +          org.apache.thrift.protocol.TType.STRUCT, (short) 8);
          +  private static final org.apache.thrift.protocol.TField CONSISTENCY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("consistency", org.apache.thrift.protocol.TType.I32,
          +          (short) 9);
          +  private static final org.apache.thrift.protocol.TField TARGET_REPLICA_ID_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("targetReplicaId", org.apache.thrift.protocol.TType.I32,
          +          (short) 10);
          +  private static final org.apache.thrift.protocol.TField CACHE_BLOCKS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("cacheBlocks", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 11);
          +  private static final org.apache.thrift.protocol.TField STORE_LIMIT_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("storeLimit", org.apache.thrift.protocol.TType.I32,
          +          (short) 12);
          +  private static final org.apache.thrift.protocol.TField STORE_OFFSET_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("storeOffset", org.apache.thrift.protocol.TType.I32,
          +          (short) 13);
          +  private static final org.apache.thrift.protocol.TField EXISTENCE_ONLY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("existence_only", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 14);
          +  private static final org.apache.thrift.protocol.TField FILTER_BYTES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("filterBytes", org.apache.thrift.protocol.TType.STRING,
          +          (short) 15);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TGetStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TGetTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
             public @org.apache.thrift.annotation.Nullable java.util.List columns; // optional
          @@ -49,10 +89,9 @@ public class TGet implements org.apache.thrift.TBase, java.i
             public @org.apache.thrift.annotation.Nullable TTimeRange timeRange; // optional
             public int maxVersions; // optional
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterString; // optional
          -  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
             public @org.apache.thrift.annotation.Nullable TAuthorization authorizations; // optional
             /**
          -   * 
              * @see TConsistency
              */
             public @org.apache.thrift.annotation.Nullable TConsistency consistency; // optional
          @@ -63,29 +102,25 @@ public class TGet implements org.apache.thrift.TBase, java.i
             public boolean existence_only; // optional
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterBytes; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    ROW((short)1, "row"),
          -    COLUMNS((short)2, "columns"),
          -    TIMESTAMP((short)3, "timestamp"),
          -    TIME_RANGE((short)4, "timeRange"),
          -    MAX_VERSIONS((short)5, "maxVersions"),
          -    FILTER_STRING((short)6, "filterString"),
          -    ATTRIBUTES((short)7, "attributes"),
          -    AUTHORIZATIONS((short)8, "authorizations"),
          +    ROW((short) 1, "row"), COLUMNS((short) 2, "columns"), TIMESTAMP((short) 3, "timestamp"),
          +    TIME_RANGE((short) 4, "timeRange"), MAX_VERSIONS((short) 5, "maxVersions"),
          +    FILTER_STRING((short) 6, "filterString"), ATTRIBUTES((short) 7, "attributes"),
          +    AUTHORIZATIONS((short) 8, "authorizations"),
               /**
          -     * 
                * @see TConsistency
                */
          -    CONSISTENCY((short)9, "consistency"),
          -    TARGET_REPLICA_ID((short)10, "targetReplicaId"),
          -    CACHE_BLOCKS((short)11, "cacheBlocks"),
          -    STORE_LIMIT((short)12, "storeLimit"),
          -    STORE_OFFSET((short)13, "storeOffset"),
          -    EXISTENCE_ONLY((short)14, "existence_only"),
          -    FILTER_BYTES((short)15, "filterBytes");
          +    CONSISTENCY((short) 9, "consistency"), TARGET_REPLICA_ID((short) 10, "targetReplicaId"),
          +    CACHE_BLOCKS((short) 11, "cacheBlocks"), STORE_LIMIT((short) 12, "storeLimit"),
          +    STORE_OFFSET((short) 13, "storeOffset"), EXISTENCE_ONLY((short) 14, "existence_only"),
          +    FILTER_BYTES((short) 15, "filterBytes");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -98,7 +133,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // ROW
                     return ROW;
                   case 2: // COLUMNS
          @@ -135,12 +170,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -178,43 +213,78 @@ public java.lang.String getFieldName() {
             private static final int __STOREOFFSET_ISSET_ID = 5;
             private static final int __EXISTENCE_ONLY_ISSET_ID = 6;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.COLUMNS,_Fields.TIMESTAMP,_Fields.TIME_RANGE,_Fields.MAX_VERSIONS,_Fields.FILTER_STRING,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.CONSISTENCY,_Fields.TARGET_REPLICA_ID,_Fields.CACHE_BLOCKS,_Fields.STORE_LIMIT,_Fields.STORE_OFFSET,_Fields.EXISTENCE_ONLY,_Fields.FILTER_BYTES};
          +  private static final _Fields optionals[] = { _Fields.COLUMNS, _Fields.TIMESTAMP,
          +      _Fields.TIME_RANGE, _Fields.MAX_VERSIONS, _Fields.FILTER_STRING, _Fields.ATTRIBUTES,
          +      _Fields.AUTHORIZATIONS, _Fields.CONSISTENCY, _Fields.TARGET_REPLICA_ID, _Fields.CACHE_BLOCKS,
          +      _Fields.STORE_LIMIT, _Fields.STORE_OFFSET, _Fields.EXISTENCE_ONLY, _Fields.FILTER_BYTES };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumn.class))));
          -    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("row",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TColumn.class))));
          +    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -    tmpMap.put(_Fields.TIME_RANGE, new org.apache.thrift.meta_data.FieldMetaData("timeRange", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTimeRange.class)));
          -    tmpMap.put(_Fields.MAX_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("maxVersions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.TIME_RANGE,
          +      new org.apache.thrift.meta_data.FieldMetaData("timeRange",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TTimeRange.class)));
          +    tmpMap.put(_Fields.MAX_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("maxVersions",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.FILTER_STRING, new org.apache.thrift.meta_data.FieldMetaData("filterString", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true), 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true))));
          -    tmpMap.put(_Fields.AUTHORIZATIONS, new org.apache.thrift.meta_data.FieldMetaData("authorizations", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TAuthorization.class)));
          -    tmpMap.put(_Fields.CONSISTENCY, new org.apache.thrift.meta_data.FieldMetaData("consistency", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TConsistency.class)));
          -    tmpMap.put(_Fields.TARGET_REPLICA_ID, new org.apache.thrift.meta_data.FieldMetaData("targetReplicaId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.FILTER_STRING,
          +      new org.apache.thrift.meta_data.FieldMetaData("filterString",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.ATTRIBUTES,
          +      new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true),
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true))));
          +    tmpMap.put(_Fields.AUTHORIZATIONS,
          +      new org.apache.thrift.meta_data.FieldMetaData("authorizations",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TAuthorization.class)));
          +    tmpMap.put(_Fields.CONSISTENCY,
          +      new org.apache.thrift.meta_data.FieldMetaData("consistency",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TConsistency.class)));
          +    tmpMap.put(_Fields.TARGET_REPLICA_ID, new org.apache.thrift.meta_data.FieldMetaData(
          +        "targetReplicaId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.CACHE_BLOCKS, new org.apache.thrift.meta_data.FieldMetaData("cacheBlocks", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.CACHE_BLOCKS, new org.apache.thrift.meta_data.FieldMetaData("cacheBlocks",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.STORE_LIMIT, new org.apache.thrift.meta_data.FieldMetaData("storeLimit", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.STORE_LIMIT, new org.apache.thrift.meta_data.FieldMetaData("storeLimit",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.STORE_OFFSET, new org.apache.thrift.meta_data.FieldMetaData("storeOffset", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.STORE_OFFSET, new org.apache.thrift.meta_data.FieldMetaData("storeOffset",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.EXISTENCE_ONLY, new org.apache.thrift.meta_data.FieldMetaData("existence_only", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.EXISTENCE_ONLY, new org.apache.thrift.meta_data.FieldMetaData(
          +        "existence_only", org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.FILTER_BYTES, new org.apache.thrift.meta_data.FieldMetaData("filterBytes", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          +    tmpMap.put(_Fields.FILTER_BYTES,
          +      new org.apache.thrift.meta_data.FieldMetaData("filterBytes",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGet.class, metaDataMap);
             }
          @@ -222,9 +292,7 @@ public java.lang.String getFieldName() {
             public TGet() {
             }
           
          -  public TGet(
          -    java.nio.ByteBuffer row)
          -  {
          +  public TGet(java.nio.ByteBuffer row) {
               this();
               this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
             }
          @@ -238,7 +306,8 @@ public TGet(TGet other) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
               }
               if (other.isSetColumns()) {
          -      java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +      java.util.List __this__columns =
          +          new java.util.ArrayList(other.columns.size());
                 for (TColumn other_element : other.columns) {
                   __this__columns.add(new TColumn(other_element));
                 }
          @@ -253,7 +322,8 @@ public TGet(TGet other) {
                 this.filterString = org.apache.thrift.TBaseHelper.copyBinary(other.filterString);
               }
               if (other.isSetAttributes()) {
          -      java.util.Map __this__attributes = new java.util.HashMap(other.attributes);
          +      java.util.Map __this__attributes =
          +          new java.util.HashMap(other.attributes);
                 this.attributes = __this__attributes;
               }
               if (other.isSetAuthorizations()) {
          @@ -312,7 +382,7 @@ public java.nio.ByteBuffer bufferForRow() {
             }
           
             public TGet setRow(byte[] row) {
          -    this.row = row == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(row.clone());
          +    this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
               return this;
             }
           
          @@ -388,7 +458,8 @@ public TGet setTimestamp(long timestamp) {
             }
           
             public void unsetTimestamp() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
             }
           
             /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -397,7 +468,8 @@ public boolean isSetTimestamp() {
             }
           
             public void setTimestampIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
             }
           
             @org.apache.thrift.annotation.Nullable
          @@ -436,7 +508,8 @@ public TGet setMaxVersions(int maxVersions) {
             }
           
             public void unsetMaxVersions() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID);
             }
           
             /** Returns true if field maxVersions is set (has been assigned a value) and false otherwise */
          @@ -445,7 +518,8 @@ public boolean isSetMaxVersions() {
             }
           
             public void setMaxVersionsIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID, value);
             }
           
             public byte[] getFilterString() {
          @@ -458,11 +532,13 @@ public java.nio.ByteBuffer bufferForFilterString() {
             }
           
             public TGet setFilterString(byte[] filterString) {
          -    this.filterString = filterString == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(filterString.clone());
          +    this.filterString = filterString == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(filterString.clone());
               return this;
             }
           
          -  public TGet setFilterString(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterString) {
          +  public TGet
          +      setFilterString(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterString) {
               this.filterString = org.apache.thrift.TBaseHelper.copyBinary(filterString);
               return this;
             }
          @@ -488,17 +564,18 @@ public int getAttributesSize() {
           
             public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
               if (this.attributes == null) {
          -      this.attributes = new java.util.HashMap();
          +      this.attributes = new java.util.HashMap();
               }
               this.attributes.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getAttributes() {
          +  public java.util.Map getAttributes() {
               return this.attributes;
             }
           
          -  public TGet setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +  public TGet setAttributes(
          +      @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
               this.attributes = attributes;
               return this;
             }
          @@ -523,7 +600,8 @@ public TAuthorization getAuthorizations() {
               return this.authorizations;
             }
           
          -  public TGet setAuthorizations(@org.apache.thrift.annotation.Nullable TAuthorization authorizations) {
          +  public TGet
          +      setAuthorizations(@org.apache.thrift.annotation.Nullable TAuthorization authorizations) {
               this.authorizations = authorizations;
               return this;
             }
          @@ -544,7 +622,6 @@ public void setAuthorizationsIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TConsistency
              */
             @org.apache.thrift.annotation.Nullable
          @@ -553,7 +630,6 @@ public TConsistency getConsistency() {
             }
           
             /**
          -   * 
              * @see TConsistency
              */
             public TGet setConsistency(@org.apache.thrift.annotation.Nullable TConsistency consistency) {
          @@ -587,16 +663,20 @@ public TGet setTargetReplicaId(int targetReplicaId) {
             }
           
             public void unsetTargetReplicaId() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TARGETREPLICAID_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TARGETREPLICAID_ISSET_ID);
             }
           
          -  /** Returns true if field targetReplicaId is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field targetReplicaId is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetTargetReplicaId() {
               return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __TARGETREPLICAID_ISSET_ID);
             }
           
             public void setTargetReplicaIdIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TARGETREPLICAID_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TARGETREPLICAID_ISSET_ID, value);
             }
           
             public boolean isCacheBlocks() {
          @@ -610,7 +690,8 @@ public TGet setCacheBlocks(boolean cacheBlocks) {
             }
           
             public void unsetCacheBlocks() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID);
             }
           
             /** Returns true if field cacheBlocks is set (has been assigned a value) and false otherwise */
          @@ -619,7 +700,8 @@ public boolean isSetCacheBlocks() {
             }
           
             public void setCacheBlocksIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID, value);
             }
           
             public int getStoreLimit() {
          @@ -633,7 +715,8 @@ public TGet setStoreLimit(int storeLimit) {
             }
           
             public void unsetStoreLimit() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __STORELIMIT_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __STORELIMIT_ISSET_ID);
             }
           
             /** Returns true if field storeLimit is set (has been assigned a value) and false otherwise */
          @@ -642,7 +725,8 @@ public boolean isSetStoreLimit() {
             }
           
             public void setStoreLimitIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __STORELIMIT_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __STORELIMIT_ISSET_ID, value);
             }
           
             public int getStoreOffset() {
          @@ -656,7 +740,8 @@ public TGet setStoreOffset(int storeOffset) {
             }
           
             public void unsetStoreOffset() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __STOREOFFSET_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __STOREOFFSET_ISSET_ID);
             }
           
             /** Returns true if field storeOffset is set (has been assigned a value) and false otherwise */
          @@ -665,7 +750,8 @@ public boolean isSetStoreOffset() {
             }
           
             public void setStoreOffsetIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __STOREOFFSET_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __STOREOFFSET_ISSET_ID, value);
             }
           
             public boolean isExistence_only() {
          @@ -679,7 +765,8 @@ public TGet setExistence_only(boolean existence_only) {
             }
           
             public void unsetExistence_only() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __EXISTENCE_ONLY_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __EXISTENCE_ONLY_ISSET_ID);
             }
           
             /** Returns true if field existence_only is set (has been assigned a value) and false otherwise */
          @@ -688,7 +775,8 @@ public boolean isSetExistence_only() {
             }
           
             public void setExistence_onlyIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __EXISTENCE_ONLY_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __EXISTENCE_ONLY_ISSET_ID, value);
             }
           
             public byte[] getFilterBytes() {
          @@ -701,11 +789,13 @@ public java.nio.ByteBuffer bufferForFilterBytes() {
             }
           
             public TGet setFilterBytes(byte[] filterBytes) {
          -    this.filterBytes = filterBytes == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(filterBytes.clone());
          +    this.filterBytes = filterBytes == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(filterBytes.clone());
               return this;
             }
           
          -  public TGet setFilterBytes(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterBytes) {
          +  public TGet
          +      setFilterBytes(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterBytes) {
               this.filterBytes = org.apache.thrift.TBaseHelper.copyBinary(filterBytes);
               return this;
             }
          @@ -725,139 +815,140 @@ public void setFilterBytesIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case ROW:
          -      if (value == null) {
          -        unsetRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setRow((byte[])value);
          +      case ROW:
          +        if (value == null) {
          +          unsetRow();
                   } else {
          -          setRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setRow((byte[]) value);
          +          } else {
          +            setRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case COLUMNS:
          -      if (value == null) {
          -        unsetColumns();
          -      } else {
          -        setColumns((java.util.List)value);
          -      }
          -      break;
          +      case COLUMNS:
          +        if (value == null) {
          +          unsetColumns();
          +        } else {
          +          setColumns((java.util.List) value);
          +        }
          +        break;
           
          -    case TIMESTAMP:
          -      if (value == null) {
          -        unsetTimestamp();
          -      } else {
          -        setTimestamp((java.lang.Long)value);
          -      }
          -      break;
          +      case TIMESTAMP:
          +        if (value == null) {
          +          unsetTimestamp();
          +        } else {
          +          setTimestamp((java.lang.Long) value);
          +        }
          +        break;
           
          -    case TIME_RANGE:
          -      if (value == null) {
          -        unsetTimeRange();
          -      } else {
          -        setTimeRange((TTimeRange)value);
          -      }
          -      break;
          +      case TIME_RANGE:
          +        if (value == null) {
          +          unsetTimeRange();
          +        } else {
          +          setTimeRange((TTimeRange) value);
          +        }
          +        break;
           
          -    case MAX_VERSIONS:
          -      if (value == null) {
          -        unsetMaxVersions();
          -      } else {
          -        setMaxVersions((java.lang.Integer)value);
          -      }
          -      break;
          +      case MAX_VERSIONS:
          +        if (value == null) {
          +          unsetMaxVersions();
          +        } else {
          +          setMaxVersions((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case FILTER_STRING:
          -      if (value == null) {
          -        unsetFilterString();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setFilterString((byte[])value);
          +      case FILTER_STRING:
          +        if (value == null) {
          +          unsetFilterString();
                   } else {
          -          setFilterString((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setFilterString((byte[]) value);
          +          } else {
          +            setFilterString((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case ATTRIBUTES:
          -      if (value == null) {
          -        unsetAttributes();
          -      } else {
          -        setAttributes((java.util.Map)value);
          -      }
          -      break;
          +      case ATTRIBUTES:
          +        if (value == null) {
          +          unsetAttributes();
          +        } else {
          +          setAttributes((java.util.Map) value);
          +        }
          +        break;
           
          -    case AUTHORIZATIONS:
          -      if (value == null) {
          -        unsetAuthorizations();
          -      } else {
          -        setAuthorizations((TAuthorization)value);
          -      }
          -      break;
          +      case AUTHORIZATIONS:
          +        if (value == null) {
          +          unsetAuthorizations();
          +        } else {
          +          setAuthorizations((TAuthorization) value);
          +        }
          +        break;
           
          -    case CONSISTENCY:
          -      if (value == null) {
          -        unsetConsistency();
          -      } else {
          -        setConsistency((TConsistency)value);
          -      }
          -      break;
          +      case CONSISTENCY:
          +        if (value == null) {
          +          unsetConsistency();
          +        } else {
          +          setConsistency((TConsistency) value);
          +        }
          +        break;
           
          -    case TARGET_REPLICA_ID:
          -      if (value == null) {
          -        unsetTargetReplicaId();
          -      } else {
          -        setTargetReplicaId((java.lang.Integer)value);
          -      }
          -      break;
          +      case TARGET_REPLICA_ID:
          +        if (value == null) {
          +          unsetTargetReplicaId();
          +        } else {
          +          setTargetReplicaId((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case CACHE_BLOCKS:
          -      if (value == null) {
          -        unsetCacheBlocks();
          -      } else {
          -        setCacheBlocks((java.lang.Boolean)value);
          -      }
          -      break;
          +      case CACHE_BLOCKS:
          +        if (value == null) {
          +          unsetCacheBlocks();
          +        } else {
          +          setCacheBlocks((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case STORE_LIMIT:
          -      if (value == null) {
          -        unsetStoreLimit();
          -      } else {
          -        setStoreLimit((java.lang.Integer)value);
          -      }
          -      break;
          +      case STORE_LIMIT:
          +        if (value == null) {
          +          unsetStoreLimit();
          +        } else {
          +          setStoreLimit((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case STORE_OFFSET:
          -      if (value == null) {
          -        unsetStoreOffset();
          -      } else {
          -        setStoreOffset((java.lang.Integer)value);
          -      }
          -      break;
          +      case STORE_OFFSET:
          +        if (value == null) {
          +          unsetStoreOffset();
          +        } else {
          +          setStoreOffset((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case EXISTENCE_ONLY:
          -      if (value == null) {
          -        unsetExistence_only();
          -      } else {
          -        setExistence_only((java.lang.Boolean)value);
          -      }
          -      break;
          +      case EXISTENCE_ONLY:
          +        if (value == null) {
          +          unsetExistence_only();
          +        } else {
          +          setExistence_only((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case FILTER_BYTES:
          -      if (value == null) {
          -        unsetFilterBytes();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setFilterBytes((byte[])value);
          +      case FILTER_BYTES:
          +        if (value == null) {
          +          unsetFilterBytes();
                   } else {
          -          setFilterBytes((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setFilterBytes((byte[]) value);
          +          } else {
          +            setFilterBytes((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
               }
             }
          @@ -865,242 +956,212 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case ROW:
          -      return getRow();
          +      case ROW:
          +        return getRow();
           
          -    case COLUMNS:
          -      return getColumns();
          +      case COLUMNS:
          +        return getColumns();
           
          -    case TIMESTAMP:
          -      return getTimestamp();
          +      case TIMESTAMP:
          +        return getTimestamp();
           
          -    case TIME_RANGE:
          -      return getTimeRange();
          +      case TIME_RANGE:
          +        return getTimeRange();
           
          -    case MAX_VERSIONS:
          -      return getMaxVersions();
          +      case MAX_VERSIONS:
          +        return getMaxVersions();
           
          -    case FILTER_STRING:
          -      return getFilterString();
          +      case FILTER_STRING:
          +        return getFilterString();
           
          -    case ATTRIBUTES:
          -      return getAttributes();
          +      case ATTRIBUTES:
          +        return getAttributes();
           
          -    case AUTHORIZATIONS:
          -      return getAuthorizations();
          +      case AUTHORIZATIONS:
          +        return getAuthorizations();
           
          -    case CONSISTENCY:
          -      return getConsistency();
          +      case CONSISTENCY:
          +        return getConsistency();
           
          -    case TARGET_REPLICA_ID:
          -      return getTargetReplicaId();
          +      case TARGET_REPLICA_ID:
          +        return getTargetReplicaId();
           
          -    case CACHE_BLOCKS:
          -      return isCacheBlocks();
          +      case CACHE_BLOCKS:
          +        return isCacheBlocks();
           
          -    case STORE_LIMIT:
          -      return getStoreLimit();
          +      case STORE_LIMIT:
          +        return getStoreLimit();
           
          -    case STORE_OFFSET:
          -      return getStoreOffset();
          +      case STORE_OFFSET:
          +        return getStoreOffset();
           
          -    case EXISTENCE_ONLY:
          -      return isExistence_only();
          +      case EXISTENCE_ONLY:
          +        return isExistence_only();
           
          -    case FILTER_BYTES:
          -      return getFilterBytes();
          +      case FILTER_BYTES:
          +        return getFilterBytes();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case ROW:
          -      return isSetRow();
          -    case COLUMNS:
          -      return isSetColumns();
          -    case TIMESTAMP:
          -      return isSetTimestamp();
          -    case TIME_RANGE:
          -      return isSetTimeRange();
          -    case MAX_VERSIONS:
          -      return isSetMaxVersions();
          -    case FILTER_STRING:
          -      return isSetFilterString();
          -    case ATTRIBUTES:
          -      return isSetAttributes();
          -    case AUTHORIZATIONS:
          -      return isSetAuthorizations();
          -    case CONSISTENCY:
          -      return isSetConsistency();
          -    case TARGET_REPLICA_ID:
          -      return isSetTargetReplicaId();
          -    case CACHE_BLOCKS:
          -      return isSetCacheBlocks();
          -    case STORE_LIMIT:
          -      return isSetStoreLimit();
          -    case STORE_OFFSET:
          -      return isSetStoreOffset();
          -    case EXISTENCE_ONLY:
          -      return isSetExistence_only();
          -    case FILTER_BYTES:
          -      return isSetFilterBytes();
          +      case ROW:
          +        return isSetRow();
          +      case COLUMNS:
          +        return isSetColumns();
          +      case TIMESTAMP:
          +        return isSetTimestamp();
          +      case TIME_RANGE:
          +        return isSetTimeRange();
          +      case MAX_VERSIONS:
          +        return isSetMaxVersions();
          +      case FILTER_STRING:
          +        return isSetFilterString();
          +      case ATTRIBUTES:
          +        return isSetAttributes();
          +      case AUTHORIZATIONS:
          +        return isSetAuthorizations();
          +      case CONSISTENCY:
          +        return isSetConsistency();
          +      case TARGET_REPLICA_ID:
          +        return isSetTargetReplicaId();
          +      case CACHE_BLOCKS:
          +        return isSetCacheBlocks();
          +      case STORE_LIMIT:
          +        return isSetStoreLimit();
          +      case STORE_OFFSET:
          +        return isSetStoreOffset();
          +      case EXISTENCE_ONLY:
          +        return isSetExistence_only();
          +      case FILTER_BYTES:
          +        return isSetFilterBytes();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TGet)
          -      return this.equals((TGet)that);
          +    if (that instanceof TGet) return this.equals((TGet) that);
               return false;
             }
           
             public boolean equals(TGet that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_row = true && this.isSetRow();
               boolean that_present_row = true && that.isSetRow();
               if (this_present_row || that_present_row) {
          -      if (!(this_present_row && that_present_row))
          -        return false;
          -      if (!this.row.equals(that.row))
          -        return false;
          +      if (!(this_present_row && that_present_row)) return false;
          +      if (!this.row.equals(that.row)) return false;
               }
           
               boolean this_present_columns = true && this.isSetColumns();
               boolean that_present_columns = true && that.isSetColumns();
               if (this_present_columns || that_present_columns) {
          -      if (!(this_present_columns && that_present_columns))
          -        return false;
          -      if (!this.columns.equals(that.columns))
          -        return false;
          +      if (!(this_present_columns && that_present_columns)) return false;
          +      if (!this.columns.equals(that.columns)) return false;
               }
           
               boolean this_present_timestamp = true && this.isSetTimestamp();
               boolean that_present_timestamp = true && that.isSetTimestamp();
               if (this_present_timestamp || that_present_timestamp) {
          -      if (!(this_present_timestamp && that_present_timestamp))
          -        return false;
          -      if (this.timestamp != that.timestamp)
          -        return false;
          +      if (!(this_present_timestamp && that_present_timestamp)) return false;
          +      if (this.timestamp != that.timestamp) return false;
               }
           
               boolean this_present_timeRange = true && this.isSetTimeRange();
               boolean that_present_timeRange = true && that.isSetTimeRange();
               if (this_present_timeRange || that_present_timeRange) {
          -      if (!(this_present_timeRange && that_present_timeRange))
          -        return false;
          -      if (!this.timeRange.equals(that.timeRange))
          -        return false;
          +      if (!(this_present_timeRange && that_present_timeRange)) return false;
          +      if (!this.timeRange.equals(that.timeRange)) return false;
               }
           
               boolean this_present_maxVersions = true && this.isSetMaxVersions();
               boolean that_present_maxVersions = true && that.isSetMaxVersions();
               if (this_present_maxVersions || that_present_maxVersions) {
          -      if (!(this_present_maxVersions && that_present_maxVersions))
          -        return false;
          -      if (this.maxVersions != that.maxVersions)
          -        return false;
          +      if (!(this_present_maxVersions && that_present_maxVersions)) return false;
          +      if (this.maxVersions != that.maxVersions) return false;
               }
           
               boolean this_present_filterString = true && this.isSetFilterString();
               boolean that_present_filterString = true && that.isSetFilterString();
               if (this_present_filterString || that_present_filterString) {
          -      if (!(this_present_filterString && that_present_filterString))
          -        return false;
          -      if (!this.filterString.equals(that.filterString))
          -        return false;
          +      if (!(this_present_filterString && that_present_filterString)) return false;
          +      if (!this.filterString.equals(that.filterString)) return false;
               }
           
               boolean this_present_attributes = true && this.isSetAttributes();
               boolean that_present_attributes = true && that.isSetAttributes();
               if (this_present_attributes || that_present_attributes) {
          -      if (!(this_present_attributes && that_present_attributes))
          -        return false;
          -      if (!this.attributes.equals(that.attributes))
          -        return false;
          +      if (!(this_present_attributes && that_present_attributes)) return false;
          +      if (!this.attributes.equals(that.attributes)) return false;
               }
           
               boolean this_present_authorizations = true && this.isSetAuthorizations();
               boolean that_present_authorizations = true && that.isSetAuthorizations();
               if (this_present_authorizations || that_present_authorizations) {
          -      if (!(this_present_authorizations && that_present_authorizations))
          -        return false;
          -      if (!this.authorizations.equals(that.authorizations))
          -        return false;
          +      if (!(this_present_authorizations && that_present_authorizations)) return false;
          +      if (!this.authorizations.equals(that.authorizations)) return false;
               }
           
               boolean this_present_consistency = true && this.isSetConsistency();
               boolean that_present_consistency = true && that.isSetConsistency();
               if (this_present_consistency || that_present_consistency) {
          -      if (!(this_present_consistency && that_present_consistency))
          -        return false;
          -      if (!this.consistency.equals(that.consistency))
          -        return false;
          +      if (!(this_present_consistency && that_present_consistency)) return false;
          +      if (!this.consistency.equals(that.consistency)) return false;
               }
           
               boolean this_present_targetReplicaId = true && this.isSetTargetReplicaId();
               boolean that_present_targetReplicaId = true && that.isSetTargetReplicaId();
               if (this_present_targetReplicaId || that_present_targetReplicaId) {
          -      if (!(this_present_targetReplicaId && that_present_targetReplicaId))
          -        return false;
          -      if (this.targetReplicaId != that.targetReplicaId)
          -        return false;
          +      if (!(this_present_targetReplicaId && that_present_targetReplicaId)) return false;
          +      if (this.targetReplicaId != that.targetReplicaId) return false;
               }
           
               boolean this_present_cacheBlocks = true && this.isSetCacheBlocks();
               boolean that_present_cacheBlocks = true && that.isSetCacheBlocks();
               if (this_present_cacheBlocks || that_present_cacheBlocks) {
          -      if (!(this_present_cacheBlocks && that_present_cacheBlocks))
          -        return false;
          -      if (this.cacheBlocks != that.cacheBlocks)
          -        return false;
          +      if (!(this_present_cacheBlocks && that_present_cacheBlocks)) return false;
          +      if (this.cacheBlocks != that.cacheBlocks) return false;
               }
           
               boolean this_present_storeLimit = true && this.isSetStoreLimit();
               boolean that_present_storeLimit = true && that.isSetStoreLimit();
               if (this_present_storeLimit || that_present_storeLimit) {
          -      if (!(this_present_storeLimit && that_present_storeLimit))
          -        return false;
          -      if (this.storeLimit != that.storeLimit)
          -        return false;
          +      if (!(this_present_storeLimit && that_present_storeLimit)) return false;
          +      if (this.storeLimit != that.storeLimit) return false;
               }
           
               boolean this_present_storeOffset = true && this.isSetStoreOffset();
               boolean that_present_storeOffset = true && that.isSetStoreOffset();
               if (this_present_storeOffset || that_present_storeOffset) {
          -      if (!(this_present_storeOffset && that_present_storeOffset))
          -        return false;
          -      if (this.storeOffset != that.storeOffset)
          -        return false;
          +      if (!(this_present_storeOffset && that_present_storeOffset)) return false;
          +      if (this.storeOffset != that.storeOffset) return false;
               }
           
               boolean this_present_existence_only = true && this.isSetExistence_only();
               boolean that_present_existence_only = true && that.isSetExistence_only();
               if (this_present_existence_only || that_present_existence_only) {
          -      if (!(this_present_existence_only && that_present_existence_only))
          -        return false;
          -      if (this.existence_only != that.existence_only)
          -        return false;
          +      if (!(this_present_existence_only && that_present_existence_only)) return false;
          +      if (this.existence_only != that.existence_only) return false;
               }
           
               boolean this_present_filterBytes = true && this.isSetFilterBytes();
               boolean that_present_filterBytes = true && that.isSetFilterBytes();
               if (this_present_filterBytes || that_present_filterBytes) {
          -      if (!(this_present_filterBytes && that_present_filterBytes))
          -        return false;
          -      if (!this.filterBytes.equals(that.filterBytes))
          -        return false;
          +      if (!(this_present_filterBytes && that_present_filterBytes)) return false;
          +      if (!this.filterBytes.equals(that.filterBytes)) return false;
               }
           
               return true;
          @@ -1111,64 +1172,50 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -    if (isSetRow())
          -      hashCode = hashCode * 8191 + row.hashCode();
          +    if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -    if (isSetColumns())
          -      hashCode = hashCode * 8191 + columns.hashCode();
          +    if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetTimestamp()) ? 131071 : 524287);
               if (isSetTimestamp())
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
               hashCode = hashCode * 8191 + ((isSetTimeRange()) ? 131071 : 524287);
          -    if (isSetTimeRange())
          -      hashCode = hashCode * 8191 + timeRange.hashCode();
          +    if (isSetTimeRange()) hashCode = hashCode * 8191 + timeRange.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetMaxVersions()) ? 131071 : 524287);
          -    if (isSetMaxVersions())
          -      hashCode = hashCode * 8191 + maxVersions;
          +    if (isSetMaxVersions()) hashCode = hashCode * 8191 + maxVersions;
           
               hashCode = hashCode * 8191 + ((isSetFilterString()) ? 131071 : 524287);
          -    if (isSetFilterString())
          -      hashCode = hashCode * 8191 + filterString.hashCode();
          +    if (isSetFilterString()) hashCode = hashCode * 8191 + filterString.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -    if (isSetAttributes())
          -      hashCode = hashCode * 8191 + attributes.hashCode();
          +    if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetAuthorizations()) ? 131071 : 524287);
          -    if (isSetAuthorizations())
          -      hashCode = hashCode * 8191 + authorizations.hashCode();
          +    if (isSetAuthorizations()) hashCode = hashCode * 8191 + authorizations.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetConsistency()) ? 131071 : 524287);
          -    if (isSetConsistency())
          -      hashCode = hashCode * 8191 + consistency.getValue();
          +    if (isSetConsistency()) hashCode = hashCode * 8191 + consistency.getValue();
           
               hashCode = hashCode * 8191 + ((isSetTargetReplicaId()) ? 131071 : 524287);
          -    if (isSetTargetReplicaId())
          -      hashCode = hashCode * 8191 + targetReplicaId;
          +    if (isSetTargetReplicaId()) hashCode = hashCode * 8191 + targetReplicaId;
           
               hashCode = hashCode * 8191 + ((isSetCacheBlocks()) ? 131071 : 524287);
          -    if (isSetCacheBlocks())
          -      hashCode = hashCode * 8191 + ((cacheBlocks) ? 131071 : 524287);
          +    if (isSetCacheBlocks()) hashCode = hashCode * 8191 + ((cacheBlocks) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetStoreLimit()) ? 131071 : 524287);
          -    if (isSetStoreLimit())
          -      hashCode = hashCode * 8191 + storeLimit;
          +    if (isSetStoreLimit()) hashCode = hashCode * 8191 + storeLimit;
           
               hashCode = hashCode * 8191 + ((isSetStoreOffset()) ? 131071 : 524287);
          -    if (isSetStoreOffset())
          -      hashCode = hashCode * 8191 + storeOffset;
          +    if (isSetStoreOffset()) hashCode = hashCode * 8191 + storeOffset;
           
               hashCode = hashCode * 8191 + ((isSetExistence_only()) ? 131071 : 524287);
          -    if (isSetExistence_only())
          -      hashCode = hashCode * 8191 + ((existence_only) ? 131071 : 524287);
          +    if (isSetExistence_only()) hashCode = hashCode * 8191 + ((existence_only) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetFilterBytes()) ? 131071 : 524287);
          -    if (isSetFilterBytes())
          -      hashCode = hashCode * 8191 + filterBytes.hashCode();
          +    if (isSetFilterBytes()) hashCode = hashCode * 8191 + filterBytes.hashCode();
           
               return hashCode;
             }
          @@ -1236,7 +1283,8 @@ public int compareTo(TGet other) {
                 return lastComparison;
               }
               if (isSetFilterString()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filterString, other.filterString);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.filterString, other.filterString);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1256,7 +1304,8 @@ public int compareTo(TGet other) {
                 return lastComparison;
               }
               if (isSetAuthorizations()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authorizations, other.authorizations);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.authorizations, other.authorizations);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1271,12 +1320,14 @@ public int compareTo(TGet other) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetTargetReplicaId(), other.isSetTargetReplicaId());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetTargetReplicaId(), other.isSetTargetReplicaId());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetTargetReplicaId()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.targetReplicaId, other.targetReplicaId);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.targetReplicaId, other.targetReplicaId);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1316,7 +1367,8 @@ public int compareTo(TGet other) {
                 return lastComparison;
               }
               if (isSetExistence_only()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.existence_only, other.existence_only);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.existence_only, other.existence_only);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1343,7 +1395,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -1478,7 +1531,8 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (row == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'row' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
               if (timeRange != null) {
          @@ -1491,17 +1545,21 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
          @@ -1515,13 +1573,13 @@ public TGetStandardScheme getScheme() {
           
             private static class TGetStandardScheme extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -1529,7 +1587,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.row = iprot.readBinary();
                         struct.setRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1538,9 +1596,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                         {
                           org.apache.thrift.protocol.TList _list16 = iprot.readListBegin();
                           struct.columns = new java.util.ArrayList(_list16.size);
          -                @org.apache.thrift.annotation.Nullable TColumn _elem17;
          -                for (int _i18 = 0; _i18 < _list16.size; ++_i18)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                TColumn _elem17;
          +                for (int _i18 = 0; _i18 < _list16.size; ++_i18) {
                             _elem17 = new TColumn();
                             _elem17.read(iprot);
                             struct.columns.add(_elem17);
          @@ -1548,7 +1606,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                           iprot.readListEnd();
                         }
                         struct.setColumnsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1556,7 +1614,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.timestamp = iprot.readI64();
                         struct.setTimestampIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1565,7 +1623,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                         struct.timeRange = new TTimeRange();
                         struct.timeRange.read(iprot);
                         struct.setTimeRangeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1573,7 +1631,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.maxVersions = iprot.readI32();
                         struct.setMaxVersionsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1581,7 +1639,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.filterString = iprot.readBinary();
                         struct.setFilterStringIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1589,11 +1647,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map19 = iprot.readMapBegin();
          -                struct.attributes = new java.util.HashMap(2*_map19.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key20;
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val21;
          -                for (int _i22 = 0; _i22 < _map19.size; ++_i22)
          -                {
          +                struct.attributes = new java.util.HashMap(
          +                    2 * _map19.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _key20;
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _val21;
          +                for (int _i22 = 0; _i22 < _map19.size; ++_i22) {
                             _key20 = iprot.readBinary();
                             _val21 = iprot.readBinary();
                             struct.attributes.put(_key20, _val21);
          @@ -1601,7 +1661,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                           iprot.readMapEnd();
                         }
                         struct.setAttributesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1610,15 +1670,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                         struct.authorizations = new TAuthorization();
                         struct.authorizations.read(iprot);
                         struct.setAuthorizationsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 9: // CONSISTENCY
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.consistency = org.apache.hadoop.hbase.thrift2.generated.TConsistency.findByValue(iprot.readI32());
          +              struct.consistency = org.apache.hadoop.hbase.thrift2.generated.TConsistency
          +                  .findByValue(iprot.readI32());
                         struct.setConsistencyIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1626,7 +1687,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.targetReplicaId = iprot.readI32();
                         struct.setTargetReplicaIdIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1634,7 +1695,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.cacheBlocks = iprot.readBool();
                         struct.setCacheBlocksIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1642,7 +1703,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.storeLimit = iprot.readI32();
                         struct.setStoreLimitIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1650,7 +1711,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.storeOffset = iprot.readI32();
                         struct.setStoreOffsetIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1658,7 +1719,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.existence_only = iprot.readBool();
                         struct.setExistence_onlyIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1666,7 +1727,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.filterBytes = iprot.readBinary();
                         struct.setFilterBytesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1681,7 +1742,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGet struct) throws
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TGet struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TGet struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -1694,9 +1756,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TGet struct) throw
                   if (struct.isSetColumns()) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          -            for (TColumn _iter23 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          +            for (TColumn _iter23 : struct.columns) {
                         _iter23.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -1732,9 +1794,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TGet struct) throw
                   if (struct.isSetAttributes()) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter24 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter24 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter24.getKey());
                         oprot.writeBinary(_iter24.getValue());
                       }
          @@ -1804,8 +1868,10 @@ public TGetTupleScheme getScheme() {
             private static class TGetTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TGet struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TGet struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeBinary(struct.row);
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetColumns()) {
          @@ -1854,8 +1920,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TGet struct) throws
                 if (struct.isSetColumns()) {
                   {
                     oprot.writeI32(struct.columns.size());
          -          for (TColumn _iter25 : struct.columns)
          -          {
          +          for (TColumn _iter25 : struct.columns) {
                       _iter25.write(oprot);
                     }
                   }
          @@ -1875,8 +1940,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TGet struct) throws
                 if (struct.isSetAttributes()) {
                   {
                     oprot.writeI32(struct.attributes.size());
          -          for (java.util.Map.Entry _iter26 : struct.attributes.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter26 : struct.attributes
          +              .entrySet()) {
                       oprot.writeBinary(_iter26.getKey());
                       oprot.writeBinary(_iter26.getValue());
                     }
          @@ -1909,18 +1974,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TGet struct) throws
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TGet struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TGet struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.row = iprot.readBinary();
                 struct.setRowIsSet(true);
                 java.util.BitSet incoming = iprot.readBitSet(14);
                 if (incoming.get(0)) {
                   {
          -          org.apache.thrift.protocol.TList _list27 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +          org.apache.thrift.protocol.TList _list27 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                     struct.columns = new java.util.ArrayList(_list27.size);
          -          @org.apache.thrift.annotation.Nullable TColumn _elem28;
          -          for (int _i29 = 0; _i29 < _list27.size; ++_i29)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          TColumn _elem28;
          +          for (int _i29 = 0; _i29 < _list27.size; ++_i29) {
                       _elem28 = new TColumn();
                       _elem28.read(iprot);
                       struct.columns.add(_elem28);
          @@ -1947,12 +2015,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TGet struct) throws
                 }
                 if (incoming.get(5)) {
                   {
          -          org.apache.thrift.protocol.TMap _map30 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -          struct.attributes = new java.util.HashMap(2*_map30.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key31;
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val32;
          -          for (int _i33 = 0; _i33 < _map30.size; ++_i33)
          -          {
          +          org.apache.thrift.protocol.TMap _map30 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +          struct.attributes =
          +              new java.util.HashMap(2 * _map30.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _key31;
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _val32;
          +          for (int _i33 = 0; _i33 < _map30.size; ++_i33) {
                       _key31 = iprot.readBinary();
                       _val32 = iprot.readBinary();
                       struct.attributes.put(_key31, _val32);
          @@ -1966,7 +2037,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TGet struct) throws
                   struct.setAuthorizationsIsSet(true);
                 }
                 if (incoming.get(7)) {
          -        struct.consistency = org.apache.hadoop.hbase.thrift2.generated.TConsistency.findByValue(iprot.readI32());
          +        struct.consistency =
          +            org.apache.hadoop.hbase.thrift2.generated.TConsistency.findByValue(iprot.readI32());
                   struct.setConsistencyIsSet(true);
                 }
                 if (incoming.get(8)) {
          @@ -1996,8 +2068,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TGet struct) throws
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
          index 0fde1dbf4753..98ab5f325e8d 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
          @@ -1,283 +1,241 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public class THBaseService {
           
             public interface Iface {
           
               /**
                * Test for the existence of columns in the table, as specified in the TGet.
          -     * 
                * @return true if the specified TGet matches one or more keys, false if not
          -     * 
                * @param table the table to check on
          -     * 
                * @param tget the TGet to check for
                */
          -    public boolean exists(java.nio.ByteBuffer table, TGet tget) throws TIOError, org.apache.thrift.TException;
          +    public boolean exists(java.nio.ByteBuffer table, TGet tget)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Test for the existence of columns in the table, as specified by the TGets.
          -     * 
          -     * This will return an array of booleans. Each value will be true if the related Get matches
          -     * one or more keys, false if not.
          -     * 
          +     * Test for the existence of columns in the table, as specified by the TGets. This will return
          +     * an array of booleans. Each value will be true if the related Get matches one or more keys,
          +     * false if not.
                * @param table the table to check on
          -     * 
                * @param tgets a list of TGets to check for
                */
          -    public java.util.List existsAll(java.nio.ByteBuffer table, java.util.List tgets) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List existsAll(java.nio.ByteBuffer table,
          +        java.util.List tgets) throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Method for getting data from a row.
          -     * 
          -     * If the row cannot be found an empty Result is returned.
          +     * Method for getting data from a row. If the row cannot be found an empty Result is returned.
                * This can be checked by the empty field of the TResult
          -     * 
                * @return the result
          -     * 
                * @param table the table to get from
          -     * 
                * @param tget the TGet to fetch
                */
          -    public TResult get(java.nio.ByteBuffer table, TGet tget) throws TIOError, org.apache.thrift.TException;
          +    public TResult get(java.nio.ByteBuffer table, TGet tget)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Method for getting multiple rows.
          -     * 
          -     * If a row cannot be found there will be a null
          -     * value in the result list for that TGet at the
          -     * same position.
          -     * 
          -     * So the Results are in the same order as the TGets.
          -     * 
          +     * Method for getting multiple rows. If a row cannot be found there will be a null value in the
          +     * result list for that TGet at the same position. So the Results are in the same order as the
          +     * TGets.
                * @param table the table to get from
          -     * 
          -     * @param tgets a list of TGets to fetch, the Result list
          -     * will have the Results at corresponding positions
          -     * or null if there was an error
          +     * @param tgets a list of TGets to fetch, the Result list will have the Results at corresponding
          +     *          positions or null if there was an error
                */
          -    public java.util.List getMultiple(java.nio.ByteBuffer table, java.util.List tgets) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List getMultiple(java.nio.ByteBuffer table,
          +        java.util.List tgets) throws TIOError, org.apache.thrift.TException;
           
               /**
                * Commit a TPut to a table.
          -     * 
                * @param table the table to put data in
          -     * 
                * @param tput the TPut to put
                */
          -    public void put(java.nio.ByteBuffer table, TPut tput) throws TIOError, org.apache.thrift.TException;
          +    public void put(java.nio.ByteBuffer table, TPut tput)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Atomically checks if a row/family/qualifier value matches the expected
          -     * value. If it does, it adds the TPut.
          -     * 
          +     * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
          +     * adds the TPut.
                * @return true if the new put was executed, false otherwise
          -     * 
                * @param table to check in and put to
          -     * 
                * @param row row to check
          -     * 
                * @param family column family to check
          -     * 
                * @param qualifier column qualifier to check
          -     * 
          -     * @param value the expected value, if not provided the
          -     * check is for the non-existence of the
          -     * column in question
          -     * 
          +     * @param value the expected value, if not provided the check is for the non-existence of the
          +     *          column in question
                * @param tput the TPut to put if the check succeeds
                */
          -    public boolean checkAndPut(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TPut tput) throws TIOError, org.apache.thrift.TException;
          +    public boolean checkAndPut(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TPut tput) throws TIOError, org.apache.thrift.TException;
           
               /**
                * Commit a List of Puts to the table.
          -     * 
                * @param table the table to put data in
          -     * 
                * @param tputs a list of TPuts to commit
                */
          -    public void putMultiple(java.nio.ByteBuffer table, java.util.List tputs) throws TIOError, org.apache.thrift.TException;
          +    public void putMultiple(java.nio.ByteBuffer table, java.util.List tputs)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Deletes as specified by the TDelete.
          -     * 
          -     * Note: "delete" is a reserved keyword and cannot be used in Thrift
          -     * thus the inconsistent naming scheme from the other functions.
          -     * 
          +     * Deletes as specified by the TDelete. Note: "delete" is a reserved keyword and cannot be used
          +     * in Thrift thus the inconsistent naming scheme from the other functions.
                * @param table the table to delete from
          -     * 
                * @param tdelete the TDelete to delete
                */
          -    public void deleteSingle(java.nio.ByteBuffer table, TDelete tdelete) throws TIOError, org.apache.thrift.TException;
          +    public void deleteSingle(java.nio.ByteBuffer table, TDelete tdelete)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Bulk commit a List of TDeletes to the table.
          -     * 
          -     * Throws a TIOError if any of the deletes fail.
          -     * 
          +     * Bulk commit a List of TDeletes to the table. Throws a TIOError if any of the deletes fail.
                * Always returns an empty list for backwards compatibility.
          -     * 
                * @param table the table to delete from
          -     * 
                * @param tdeletes list of TDeletes to delete
                */
          -    public java.util.List deleteMultiple(java.nio.ByteBuffer table, java.util.List tdeletes) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List deleteMultiple(java.nio.ByteBuffer table,
          +        java.util.List tdeletes) throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Atomically checks if a row/family/qualifier value matches the expected
          -     * value. If it does, it adds the delete.
          -     * 
          +     * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
          +     * adds the delete.
                * @return true if the new delete was executed, false otherwise
          -     * 
                * @param table to check in and delete from
          -     * 
                * @param row row to check
          -     * 
                * @param family column family to check
          -     * 
                * @param qualifier column qualifier to check
          -     * 
          -     * @param value the expected value, if not provided the
          -     * check is for the non-existence of the
          -     * column in question
          -     * 
          +     * @param value the expected value, if not provided the check is for the non-existence of the
          +     *          column in question
                * @param tdelete the TDelete to execute if the check succeeds
                */
          -    public boolean checkAndDelete(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TDelete tdelete) throws TIOError, org.apache.thrift.TException;
          +    public boolean checkAndDelete(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TDelete tdelete) throws TIOError, org.apache.thrift.TException;
           
          -    public TResult increment(java.nio.ByteBuffer table, TIncrement tincrement) throws TIOError, org.apache.thrift.TException;
          +    public TResult increment(java.nio.ByteBuffer table, TIncrement tincrement)
          +        throws TIOError, org.apache.thrift.TException;
           
          -    public TResult append(java.nio.ByteBuffer table, TAppend tappend) throws TIOError, org.apache.thrift.TException;
          +    public TResult append(java.nio.ByteBuffer table, TAppend tappend)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Get a Scanner for the provided TScan object.
          -     * 
                * @return Scanner Id to be used with other scanner procedures
          -     * 
                * @param table the table to get the Scanner for
          -     * 
                * @param tscan the scan object to get a Scanner for
                */
          -    public int openScanner(java.nio.ByteBuffer table, TScan tscan) throws TIOError, org.apache.thrift.TException;
          +    public int openScanner(java.nio.ByteBuffer table, TScan tscan)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Grabs multiple rows from a Scanner.
          -     * 
                * @return Between zero and numRows TResults
          -     * 
          -     * @param scannerId the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
          -     * 
          +     * @param scannerId the Id of the Scanner to return rows from. This is an Id returned from the
          +     *          openScanner function.
                * @param numRows number of rows to return
                */
          -    public java.util.List getScannerRows(int scannerId, int numRows) throws TIOError, TIllegalArgument, org.apache.thrift.TException;
          +    public java.util.List getScannerRows(int scannerId, int numRows)
          +        throws TIOError, TIllegalArgument, org.apache.thrift.TException;
           
               /**
          -     * Closes the scanner. Should be called to free server side resources timely.
          -     * Typically close once the scanner is not needed anymore, i.e. after looping
          -     * over it to get all the required rows.
          -     * 
          +     * Closes the scanner. Should be called to free server side resources timely. Typically close
          +     * once the scanner is not needed anymore, i.e. after looping over it to get all the required
          +     * rows.
                * @param scannerId the Id of the Scanner to close *
                */
          -    public void closeScanner(int scannerId) throws TIOError, TIllegalArgument, org.apache.thrift.TException;
          +    public void closeScanner(int scannerId)
          +        throws TIOError, TIllegalArgument, org.apache.thrift.TException;
           
               /**
                * mutateRow performs multiple mutations atomically on a single row.
          -     * 
                * @param table table to apply the mutations
          -     * 
                * @param trowMutations mutations to apply
                */
          -    public void mutateRow(java.nio.ByteBuffer table, TRowMutations trowMutations) throws TIOError, org.apache.thrift.TException;
          +    public void mutateRow(java.nio.ByteBuffer table, TRowMutations trowMutations)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Get results for the provided TScan object.
          -     * This helper function opens a scanner, get the results and close the scanner.
          -     * 
          +     * Get results for the provided TScan object. This helper function opens a scanner, get the
          +     * results and close the scanner.
                * @return between zero and numRows TResults
          -     * 
                * @param table the table to get the Scanner for
          -     * 
                * @param tscan the scan object to get a Scanner for
          -     * 
                * @param numRows number of rows to return
                */
          -    public java.util.List getScannerResults(java.nio.ByteBuffer table, TScan tscan, int numRows) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List getScannerResults(java.nio.ByteBuffer table, TScan tscan,
          +        int numRows) throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Given a table and a row get the location of the region that
          -     * would contain the given row key.
          -     * 
          -     * reload = true means the cache will be cleared and the location
          -     * will be fetched from meta.
          -     * 
          +     * Given a table and a row get the location of the region that would contain the given row key.
          +     * reload = true means the cache will be cleared and the location will be fetched from meta.
                * @param table
                * @param row
                * @param reload
                */
          -    public THRegionLocation getRegionLocation(java.nio.ByteBuffer table, java.nio.ByteBuffer row, boolean reload) throws TIOError, org.apache.thrift.TException;
          +    public THRegionLocation getRegionLocation(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        boolean reload) throws TIOError, org.apache.thrift.TException;
           
               /**
                * Get all of the region locations for a given table.
          -     * 
          -     * 
                * @param table
                */
          -    public java.util.List getAllRegionLocations(java.nio.ByteBuffer table) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List getAllRegionLocations(java.nio.ByteBuffer table)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Atomically checks if a row/family/qualifier value matches the expected
          -     * value. If it does, it mutates the row.
          -     * 
          +     * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
          +     * mutates the row.
                * @return true if the row was mutated, false otherwise
          -     * 
                * @param table to check in and delete from
          -     * 
                * @param row row to check
          -     * 
                * @param family column family to check
          -     * 
                * @param qualifier column qualifier to check
          -     * 
                * @param compareOperator comparison to make on the value
          -     * 
          -     * @param value the expected value to be compared against, if not provided the
          -     * check is for the non-existence of the column in question
          -     * 
          +     * @param value the expected value to be compared against, if not provided the check is for the
          +     *          non-existence of the column in question
                * @param rowMutations row mutations to execute if the value matches
                */
          -    public boolean checkAndMutate(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator, java.nio.ByteBuffer value, TRowMutations rowMutations) throws TIOError, org.apache.thrift.TException;
          +    public boolean checkAndMutate(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator,
          +        java.nio.ByteBuffer value, TRowMutations rowMutations)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Get a table descriptor.
                * @return the TableDescriptor of the giving tablename
          -     * 
          -     * 
                * @param table the tablename of the table to get tableDescriptor
                */
          -    public TTableDescriptor getTableDescriptor(TTableName table) throws TIOError, org.apache.thrift.TException;
          +    public TTableDescriptor getTableDescriptor(TTableName table)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Get table descriptors of tables.
                * @return the TableDescriptor of the giving tablename
          -     * 
          -     * 
                * @param tables the tablename list of the tables to get tableDescriptor
                */
          -    public java.util.List getTableDescriptors(java.util.List tables) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List getTableDescriptors(java.util.List tables)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * 
                * @return true if table exists already, false if not
          -     * 
          -     * 
                * @param tableName the tablename of the tables to check
                */
               public boolean tableExists(TTableName tableName) throws TIOError, org.apache.thrift.TException;
          @@ -285,224 +243,184 @@ public interface Iface {
               /**
                * Get table descriptors of tables that match the given pattern
                * @return the tableDescriptors of the matching table
          -     * 
          -     * 
                * @param regex The regular expression to match against
          -     * 
                * @param includeSysTables set to false if match only against userspace tables
                */
          -    public java.util.List getTableDescriptorsByPattern(java.lang.String regex, boolean includeSysTables) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List getTableDescriptorsByPattern(java.lang.String regex,
          +        boolean includeSysTables) throws TIOError, org.apache.thrift.TException;
           
               /**
                * Get table descriptors of tables in the given namespace
                * @return the tableDescriptors in the namespce
          -     * 
          -     * 
                * @param name The namesapce's name
                */
          -    public java.util.List getTableDescriptorsByNamespace(java.lang.String name) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List getTableDescriptorsByNamespace(java.lang.String name)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Get table names of tables that match the given pattern
                * @return the table names of the matching table
          -     * 
          -     * 
                * @param regex The regular expression to match against
          -     * 
                * @param includeSysTables set to false if match only against userspace tables
                */
          -    public java.util.List getTableNamesByPattern(java.lang.String regex, boolean includeSysTables) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List getTableNamesByPattern(java.lang.String regex,
          +        boolean includeSysTables) throws TIOError, org.apache.thrift.TException;
           
               /**
                * Get table names of tables in the given namespace
                * @return the table names of the matching table
          -     * 
          -     * 
                * @param name The namesapce's name
                */
          -    public java.util.List getTableNamesByNamespace(java.lang.String name) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List getTableNamesByNamespace(java.lang.String name)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Creates a new table with an initial set of empty regions defined by the specified split keys.
                * The total number of regions created will be the number of split keys plus one. Synchronous
                * operation.
          -     * 
          -     * 
                * @param desc table descriptor for table
          -     * 
                * @param splitKeys rray of split keys for the initial regions of the table
                */
          -    public void createTable(TTableDescriptor desc, java.util.List splitKeys) throws TIOError, org.apache.thrift.TException;
          +    public void createTable(TTableDescriptor desc, java.util.List splitKeys)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Deletes a table. Synchronous operation.
          -     * 
          -     * 
                * @param tableName the tablename to delete
                */
               public void deleteTable(TTableName tableName) throws TIOError, org.apache.thrift.TException;
           
               /**
                * Truncate a table. Synchronous operation.
          -     * 
          -     * 
                * @param tableName the tablename to truncate
          -     * 
          -     * @param preserveSplits whether to  preserve previous splits
          +     * @param preserveSplits whether to preserve previous splits
                */
          -    public void truncateTable(TTableName tableName, boolean preserveSplits) throws TIOError, org.apache.thrift.TException;
          +    public void truncateTable(TTableName tableName, boolean preserveSplits)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Enalbe a table
          -     * 
          -     * 
                * @param tableName the tablename to enable
                */
               public void enableTable(TTableName tableName) throws TIOError, org.apache.thrift.TException;
           
               /**
                * Disable a table
          -     * 
          -     * 
                * @param tableName the tablename to disable
                */
               public void disableTable(TTableName tableName) throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * 
                * @return true if table is enabled, false if not
          -     * 
          -     * 
                * @param tableName the tablename to check
                */
          -    public boolean isTableEnabled(TTableName tableName) throws TIOError, org.apache.thrift.TException;
          +    public boolean isTableEnabled(TTableName tableName)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * 
                * @return true if table is disabled, false if not
          -     * 
          -     * 
                * @param tableName the tablename to check
                */
          -    public boolean isTableDisabled(TTableName tableName) throws TIOError, org.apache.thrift.TException;
          +    public boolean isTableDisabled(TTableName tableName)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * 
                * @return true if table is available, false if not
          -     * 
          -     * 
                * @param tableName the tablename to check
                */
          -    public boolean isTableAvailable(TTableName tableName) throws TIOError, org.apache.thrift.TException;
          +    public boolean isTableAvailable(TTableName tableName)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     *  * Use this api to check if the table has been created with the specified number of splitkeys
          -     *  * which was used while creating the given table. Note : If this api is used after a table's
          -     *  * region gets splitted, the api may return false.
          -     *  *
          -     *  * @return true if table is available, false if not
          -     *  *
          -     *  * @deprecated Since 2.2.0. Because the same method in Table interface has been deprecated
          -     *  * since 2.0.0, we will remove it in 3.0.0 release.
          -     *  * Use {@link #isTableAvailable(TTableName tableName)} instead
          -     * *
          -     * 
          +     * * Use this api to check if the table has been created with the specified number of splitkeys
          +     * * which was used while creating the given table. Note : If this api is used after a table's *
          +     * region gets splitted, the api may return false. * * @return true if table is available, false
          +     * if not * * @deprecated Since 2.2.0. Because the same method in Table interface has been
          +     * deprecated * since 2.0.0, we will remove it in 3.0.0 release. * Use
          +     * {@link #isTableAvailable(TTableName tableName)} instead *
                * @param tableName the tablename to check
          -     * 
                * @param splitKeys keys to check if the table has been created with all split keys
                */
          -    public boolean isTableAvailableWithSplit(TTableName tableName, java.util.List splitKeys) throws TIOError, org.apache.thrift.TException;
          +    public boolean isTableAvailableWithSplit(TTableName tableName,
          +        java.util.List splitKeys)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Add a column family to an existing table. Synchronous operation.
          -     * 
          -     * 
                * @param tableName the tablename to add column family to
          -     * 
                * @param column column family descriptor of column family to be added
                */
          -    public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column) throws TIOError, org.apache.thrift.TException;
          +    public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Delete a column family from a table. Synchronous operation.
          -     * 
          -     * 
                * @param tableName the tablename to delete column family from
          -     * 
                * @param column name of column family to be deleted
                */
          -    public void deleteColumnFamily(TTableName tableName, java.nio.ByteBuffer column) throws TIOError, org.apache.thrift.TException;
          +    public void deleteColumnFamily(TTableName tableName, java.nio.ByteBuffer column)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Modify an existing column family on a table. Synchronous operation.
          -     * 
          -     * 
                * @param tableName the tablename to modify column family
          -     * 
                * @param column column family descriptor of column family to be modified
                */
          -    public void modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column) throws TIOError, org.apache.thrift.TException;
          +    public void modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Modify an existing table
          -     * 
          -     * 
                * @param desc the descriptor of the table to modify
                */
               public void modifyTable(TTableDescriptor desc) throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Create a new namespace. Blocks until namespace has been successfully created or an exception is
          -     * thrown
          -     * 
          -     * 
          +     * Create a new namespace. Blocks until namespace has been successfully created or an exception
          +     * is thrown
                * @param namespaceDesc descriptor which describes the new namespace
                */
          -    public void createNamespace(TNamespaceDescriptor namespaceDesc) throws TIOError, org.apache.thrift.TException;
          +    public void createNamespace(TNamespaceDescriptor namespaceDesc)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Modify an existing namespace.  Blocks until namespace has been successfully modified or an
          +     * Modify an existing namespace. Blocks until namespace has been successfully modified or an
                * exception is thrown
          -     * 
          -     * 
                * @param namespaceDesc descriptor which describes the new namespace
                */
          -    public void modifyNamespace(TNamespaceDescriptor namespaceDesc) throws TIOError, org.apache.thrift.TException;
          +    public void modifyNamespace(TNamespaceDescriptor namespaceDesc)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
          -     * Blocks until namespace has been successfully deleted or an
          -     * exception is thrown.
          -     * 
          -     * 
          +     * Delete an existing namespace. Only empty namespaces (no tables) can be removed. Blocks until
          +     * namespace has been successfully deleted or an exception is thrown.
                * @param name namespace name
                */
          -    public void deleteNamespace(java.lang.String name) throws TIOError, org.apache.thrift.TException;
          +    public void deleteNamespace(java.lang.String name)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Get a namespace descriptor by name.
                * @retrun the descriptor
          -     * 
          -     * 
                * @param name name of namespace descriptor
                */
          -    public TNamespaceDescriptor getNamespaceDescriptor(java.lang.String name) throws TIOError, org.apache.thrift.TException;
          +    public TNamespaceDescriptor getNamespaceDescriptor(java.lang.String name)
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * @return all namespaces
          -     * 
                */
          -    public java.util.List listNamespaceDescriptors() throws TIOError, org.apache.thrift.TException;
          +    public java.util.List listNamespaceDescriptors()
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * @return all namespace names
          -     * 
                */
          -    public java.util.List listNamespaces() throws TIOError, org.apache.thrift.TException;
          +    public java.util.List listNamespaces()
          +        throws TIOError, org.apache.thrift.TException;
           
               /**
                * Get the type of this thrift server.
          -     * 
                * @return the type of this thrift server
                */
               public TThriftServerType getThriftServerType() throws org.apache.thrift.TException;
          @@ -513,40 +431,34 @@ public interface Iface {
               public java.lang.String getClusterId() throws org.apache.thrift.TException;
           
               /**
          -     * Retrieves online slow RPC logs from the provided list of
          -     * RegionServers
          -     * 
          +     * Retrieves online slow RPC logs from the provided list of RegionServers
                * @return online slowlog response list
                * @throws TIOError if a remote or network exception occurs
          -     * 
                * @param serverNames @param serverNames Server names to get slowlog responses from
          -     * 
                * @param logQueryFilter @param logQueryFilter filter to be used if provided
                */
          -    public java.util.List getSlowLogResponses(java.util.Set serverNames, TLogQueryFilter logQueryFilter) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List
          +        getSlowLogResponses(java.util.Set serverNames, TLogQueryFilter logQueryFilter)
          +            throws TIOError, org.apache.thrift.TException;
           
               /**
          -     * Clears online slow/large RPC logs from the provided list of
          -     * RegionServers
          -     * 
          -     * @return List of booleans representing if online slowlog response buffer is cleaned
          -     *   from each RegionServer
          +     * Clears online slow/large RPC logs from the provided list of RegionServers
          +     * @return List of booleans representing if online slowlog response buffer is cleaned from each
          +     *         RegionServer
                * @throws TIOError if a remote or network exception occurs
          -     * 
                * @param serverNames @param serverNames Set of Server names to clean slowlog responses from
                */
          -    public java.util.List clearSlowLogResponses(java.util.Set serverNames) throws TIOError, org.apache.thrift.TException;
          +    public java.util.List clearSlowLogResponses(
          +        java.util.Set serverNames) throws TIOError, org.apache.thrift.TException;
           
               /**
                * Grant permissions in table or namespace level.
          -     * 
                * @param info
                */
               public boolean grant(TAccessControlEntity info) throws TIOError, org.apache.thrift.TException;
           
               /**
                * Revoke permissions in table or namespace level.
          -     * 
                * @param info
                */
               public boolean revoke(TAccessControlEntity info) throws TIOError, org.apache.thrift.TException;
          @@ -555,148 +467,262 @@ public interface Iface {
           
             public interface AsyncIface {
           
          -    public void exists(java.nio.ByteBuffer table, TGet tget, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void existsAll(java.nio.ByteBuffer table, java.util.List tgets, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void get(java.nio.ByteBuffer table, TGet tget, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getMultiple(java.nio.ByteBuffer table, java.util.List tgets, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void put(java.nio.ByteBuffer table, TPut tput, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void checkAndPut(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TPut tput, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void putMultiple(java.nio.ByteBuffer table, java.util.List tputs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void deleteSingle(java.nio.ByteBuffer table, TDelete tdelete, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void deleteMultiple(java.nio.ByteBuffer table, java.util.List tdeletes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void checkAndDelete(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TDelete tdelete, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void increment(java.nio.ByteBuffer table, TIncrement tincrement, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void append(java.nio.ByteBuffer table, TAppend tappend, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void openScanner(java.nio.ByteBuffer table, TScan tscan, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getScannerRows(int scannerId, int numRows, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void closeScanner(int scannerId, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void mutateRow(java.nio.ByteBuffer table, TRowMutations trowMutations, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getScannerResults(java.nio.ByteBuffer table, TScan tscan, int numRows, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getRegionLocation(java.nio.ByteBuffer table, java.nio.ByteBuffer row, boolean reload, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getAllRegionLocations(java.nio.ByteBuffer table, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void checkAndMutate(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator, java.nio.ByteBuffer value, TRowMutations rowMutations, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getTableDescriptor(TTableName table, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getTableDescriptors(java.util.List tables, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void tableExists(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getTableDescriptorsByPattern(java.lang.String regex, boolean includeSysTables, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getTableDescriptorsByNamespace(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          -
          -    public void getTableNamesByPattern(java.lang.String regex, boolean includeSysTables, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          +    public void exists(java.nio.ByteBuffer table, TGet tget,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void existsAll(java.nio.ByteBuffer table, java.util.List tgets,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void get(java.nio.ByteBuffer table, TGet tget,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getMultiple(java.nio.ByteBuffer table, java.util.List tgets,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void put(java.nio.ByteBuffer table, TPut tput,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void checkAndPut(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TPut tput, org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void getTableNamesByNamespace(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          +    public void putMultiple(java.nio.ByteBuffer table, java.util.List tputs,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void createTable(TTableDescriptor desc, java.util.List splitKeys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void deleteSingle(java.nio.ByteBuffer table, TDelete tdelete,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void deleteTable(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void deleteMultiple(java.nio.ByteBuffer table, java.util.List tdeletes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void truncateTable(TTableName tableName, boolean preserveSplits, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void checkAndDelete(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TDelete tdelete,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void increment(java.nio.ByteBuffer table, TIncrement tincrement,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void append(java.nio.ByteBuffer table, TAppend tappend,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void openScanner(java.nio.ByteBuffer table, TScan tscan,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getScannerRows(int scannerId, int numRows,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void closeScanner(int scannerId,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void mutateRow(java.nio.ByteBuffer table, TRowMutations trowMutations,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getScannerResults(java.nio.ByteBuffer table, TScan tscan, int numRows,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getRegionLocation(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        boolean reload, org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getAllRegionLocations(java.nio.ByteBuffer table,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void checkAndMutate(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator,
          +        java.nio.ByteBuffer value, TRowMutations rowMutations,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getTableDescriptor(TTableName table,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getTableDescriptors(java.util.List tables,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void tableExists(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getTableDescriptorsByPattern(java.lang.String regex, boolean includeSysTables,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getTableDescriptorsByNamespace(java.lang.String name,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getTableNamesByPattern(java.lang.String regex, boolean includeSysTables,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void getTableNamesByNamespace(java.lang.String name,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void createTable(TTableDescriptor desc, java.util.List splitKeys,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void deleteTable(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
          +
          +    public void truncateTable(TTableName tableName, boolean preserveSplits,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void enableTable(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void enableTable(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void disableTable(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void disableTable(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void isTableEnabled(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void isTableEnabled(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void isTableDisabled(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void isTableDisabled(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void isTableAvailable(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void isTableAvailable(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void isTableAvailableWithSplit(TTableName tableName, java.util.List splitKeys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void isTableAvailableWithSplit(TTableName tableName,
          +        java.util.List splitKeys,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void deleteColumnFamily(TTableName tableName, java.nio.ByteBuffer column, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void deleteColumnFamily(TTableName tableName, java.nio.ByteBuffer column,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void modifyTable(TTableDescriptor desc, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void modifyTable(TTableDescriptor desc,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void createNamespace(TNamespaceDescriptor namespaceDesc, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void createNamespace(TNamespaceDescriptor namespaceDesc,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void modifyNamespace(TNamespaceDescriptor namespaceDesc, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void modifyNamespace(TNamespaceDescriptor namespaceDesc,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void deleteNamespace(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void deleteNamespace(java.lang.String name,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void getNamespaceDescriptor(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void getNamespaceDescriptor(java.lang.String name,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void listNamespaceDescriptors(org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          +    public void listNamespaceDescriptors(
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void listNamespaces(org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          +    public void listNamespaces(
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void getThriftServerType(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void getThriftServerType(
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void getClusterId(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void
          +        getClusterId(org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +            throws org.apache.thrift.TException;
           
          -    public void getSlowLogResponses(java.util.Set serverNames, TLogQueryFilter logQueryFilter, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          +    public void getSlowLogResponses(java.util.Set serverNames,
          +        TLogQueryFilter logQueryFilter,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void clearSlowLogResponses(java.util.Set serverNames, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException;
          +    public void clearSlowLogResponses(java.util.Set serverNames,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void grant(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void grant(TAccessControlEntity info,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
          -    public void revoke(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
          +    public void revoke(TAccessControlEntity info,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException;
           
             }
           
             public static class Client extends org.apache.thrift.TServiceClient implements Iface {
               public static class Factory implements org.apache.thrift.TServiceClientFactory {
          -      public Factory() {}
          +      public Factory() {
          +      }
          +
                 public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
                   return new Client(prot);
                 }
          -      public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
          +
          +      public Client getClient(org.apache.thrift.protocol.TProtocol iprot,
          +          org.apache.thrift.protocol.TProtocol oprot) {
                   return new Client(iprot, oprot);
                 }
               }
           
          -    public Client(org.apache.thrift.protocol.TProtocol prot)
          -    {
          +    public Client(org.apache.thrift.protocol.TProtocol prot) {
                 super(prot, prot);
               }
           
          -    public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
          +    public Client(org.apache.thrift.protocol.TProtocol iprot,
          +        org.apache.thrift.protocol.TProtocol oprot) {
                 super(iprot, oprot);
               }
           
          -    public boolean exists(java.nio.ByteBuffer table, TGet tget) throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean exists(java.nio.ByteBuffer table, TGet tget)
          +        throws TIOError, org.apache.thrift.TException {
                 send_exists(table, tget);
                 return recv_exists();
               }
           
          -    public void send_exists(java.nio.ByteBuffer table, TGet tget) throws org.apache.thrift.TException
          -    {
          +    public void send_exists(java.nio.ByteBuffer table, TGet tget)
          +        throws org.apache.thrift.TException {
                 exists_args args = new exists_args();
                 args.setTable(table);
                 args.setTget(tget);
                 sendBase("exists", args);
               }
           
          -    public boolean recv_exists() throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_exists() throws TIOError, org.apache.thrift.TException {
                 exists_result result = new exists_result();
                 receiveBase(result, "exists");
                 if (result.isSetSuccess()) {
          @@ -705,25 +731,26 @@ public boolean recv_exists() throws TIOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "exists failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "exists failed: unknown result");
               }
           
          -    public java.util.List existsAll(java.nio.ByteBuffer table, java.util.List tgets) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List existsAll(java.nio.ByteBuffer table,
          +        java.util.List tgets) throws TIOError, org.apache.thrift.TException {
                 send_existsAll(table, tgets);
                 return recv_existsAll();
               }
           
          -    public void send_existsAll(java.nio.ByteBuffer table, java.util.List tgets) throws org.apache.thrift.TException
          -    {
          +    public void send_existsAll(java.nio.ByteBuffer table, java.util.List tgets)
          +        throws org.apache.thrift.TException {
                 existsAll_args args = new existsAll_args();
                 args.setTable(table);
                 args.setTgets(tgets);
                 sendBase("existsAll", args);
               }
           
          -    public java.util.List recv_existsAll() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_existsAll()
          +        throws TIOError, org.apache.thrift.TException {
                 existsAll_result result = new existsAll_result();
                 receiveBase(result, "existsAll");
                 if (result.isSetSuccess()) {
          @@ -732,25 +759,25 @@ public java.util.List recv_existsAll() throws TIOError, org.a
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "existsAll failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "existsAll failed: unknown result");
               }
           
          -    public TResult get(java.nio.ByteBuffer table, TGet tget) throws TIOError, org.apache.thrift.TException
          -    {
          +    public TResult get(java.nio.ByteBuffer table, TGet tget)
          +        throws TIOError, org.apache.thrift.TException {
                 send_get(table, tget);
                 return recv_get();
               }
           
          -    public void send_get(java.nio.ByteBuffer table, TGet tget) throws org.apache.thrift.TException
          -    {
          +    public void send_get(java.nio.ByteBuffer table, TGet tget) throws org.apache.thrift.TException {
                 get_args args = new get_args();
                 args.setTable(table);
                 args.setTget(tget);
                 sendBase("get", args);
               }
           
          -    public TResult recv_get() throws TIOError, org.apache.thrift.TException
          -    {
          +    public TResult recv_get() throws TIOError, org.apache.thrift.TException {
                 get_result result = new get_result();
                 receiveBase(result, "get");
                 if (result.isSetSuccess()) {
          @@ -759,25 +786,26 @@ public TResult recv_get() throws TIOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "get failed: unknown result");
               }
           
          -    public java.util.List getMultiple(java.nio.ByteBuffer table, java.util.List tgets) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getMultiple(java.nio.ByteBuffer table,
          +        java.util.List tgets) throws TIOError, org.apache.thrift.TException {
                 send_getMultiple(table, tgets);
                 return recv_getMultiple();
               }
           
          -    public void send_getMultiple(java.nio.ByteBuffer table, java.util.List tgets) throws org.apache.thrift.TException
          -    {
          +    public void send_getMultiple(java.nio.ByteBuffer table, java.util.List tgets)
          +        throws org.apache.thrift.TException {
                 getMultiple_args args = new getMultiple_args();
                 args.setTable(table);
                 args.setTgets(tgets);
                 sendBase("getMultiple", args);
               }
           
          -    public java.util.List recv_getMultiple() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getMultiple()
          +        throws TIOError, org.apache.thrift.TException {
                 getMultiple_result result = new getMultiple_result();
                 receiveBase(result, "getMultiple");
                 if (result.isSetSuccess()) {
          @@ -786,25 +814,25 @@ public java.util.List recv_getMultiple() throws TIOError, org.apache.th
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getMultiple failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getMultiple failed: unknown result");
               }
           
          -    public void put(java.nio.ByteBuffer table, TPut tput) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void put(java.nio.ByteBuffer table, TPut tput)
          +        throws TIOError, org.apache.thrift.TException {
                 send_put(table, tput);
                 recv_put();
               }
           
          -    public void send_put(java.nio.ByteBuffer table, TPut tput) throws org.apache.thrift.TException
          -    {
          +    public void send_put(java.nio.ByteBuffer table, TPut tput) throws org.apache.thrift.TException {
                 put_args args = new put_args();
                 args.setTable(table);
                 args.setTput(tput);
                 sendBase("put", args);
               }
           
          -    public void recv_put() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_put() throws TIOError, org.apache.thrift.TException {
                 put_result result = new put_result();
                 receiveBase(result, "put");
                 if (result.io != null) {
          @@ -813,14 +841,16 @@ public void recv_put() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public boolean checkAndPut(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TPut tput) throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean checkAndPut(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TPut tput) throws TIOError, org.apache.thrift.TException {
                 send_checkAndPut(table, row, family, qualifier, value, tput);
                 return recv_checkAndPut();
               }
           
          -    public void send_checkAndPut(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TPut tput) throws org.apache.thrift.TException
          -    {
          +    public void send_checkAndPut(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TPut tput) throws org.apache.thrift.TException {
                 checkAndPut_args args = new checkAndPut_args();
                 args.setTable(table);
                 args.setRow(row);
          @@ -831,8 +861,7 @@ public void send_checkAndPut(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
                 sendBase("checkAndPut", args);
               }
           
          -    public boolean recv_checkAndPut() throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_checkAndPut() throws TIOError, org.apache.thrift.TException {
                 checkAndPut_result result = new checkAndPut_result();
                 receiveBase(result, "checkAndPut");
                 if (result.isSetSuccess()) {
          @@ -841,25 +870,26 @@ public boolean recv_checkAndPut() throws TIOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "checkAndPut failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "checkAndPut failed: unknown result");
               }
           
          -    public void putMultiple(java.nio.ByteBuffer table, java.util.List tputs) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void putMultiple(java.nio.ByteBuffer table, java.util.List tputs)
          +        throws TIOError, org.apache.thrift.TException {
                 send_putMultiple(table, tputs);
                 recv_putMultiple();
               }
           
          -    public void send_putMultiple(java.nio.ByteBuffer table, java.util.List tputs) throws org.apache.thrift.TException
          -    {
          +    public void send_putMultiple(java.nio.ByteBuffer table, java.util.List tputs)
          +        throws org.apache.thrift.TException {
                 putMultiple_args args = new putMultiple_args();
                 args.setTable(table);
                 args.setTputs(tputs);
                 sendBase("putMultiple", args);
               }
           
          -    public void recv_putMultiple() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_putMultiple() throws TIOError, org.apache.thrift.TException {
                 putMultiple_result result = new putMultiple_result();
                 receiveBase(result, "putMultiple");
                 if (result.io != null) {
          @@ -868,22 +898,21 @@ public void recv_putMultiple() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void deleteSingle(java.nio.ByteBuffer table, TDelete tdelete) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void deleteSingle(java.nio.ByteBuffer table, TDelete tdelete)
          +        throws TIOError, org.apache.thrift.TException {
                 send_deleteSingle(table, tdelete);
                 recv_deleteSingle();
               }
           
          -    public void send_deleteSingle(java.nio.ByteBuffer table, TDelete tdelete) throws org.apache.thrift.TException
          -    {
          +    public void send_deleteSingle(java.nio.ByteBuffer table, TDelete tdelete)
          +        throws org.apache.thrift.TException {
                 deleteSingle_args args = new deleteSingle_args();
                 args.setTable(table);
                 args.setTdelete(tdelete);
                 sendBase("deleteSingle", args);
               }
           
          -    public void recv_deleteSingle() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_deleteSingle() throws TIOError, org.apache.thrift.TException {
                 deleteSingle_result result = new deleteSingle_result();
                 receiveBase(result, "deleteSingle");
                 if (result.io != null) {
          @@ -892,22 +921,22 @@ public void recv_deleteSingle() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public java.util.List deleteMultiple(java.nio.ByteBuffer table, java.util.List tdeletes) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List deleteMultiple(java.nio.ByteBuffer table,
          +        java.util.List tdeletes) throws TIOError, org.apache.thrift.TException {
                 send_deleteMultiple(table, tdeletes);
                 return recv_deleteMultiple();
               }
           
          -    public void send_deleteMultiple(java.nio.ByteBuffer table, java.util.List tdeletes) throws org.apache.thrift.TException
          -    {
          +    public void send_deleteMultiple(java.nio.ByteBuffer table, java.util.List tdeletes)
          +        throws org.apache.thrift.TException {
                 deleteMultiple_args args = new deleteMultiple_args();
                 args.setTable(table);
                 args.setTdeletes(tdeletes);
                 sendBase("deleteMultiple", args);
               }
           
          -    public java.util.List recv_deleteMultiple() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_deleteMultiple()
          +        throws TIOError, org.apache.thrift.TException {
                 deleteMultiple_result result = new deleteMultiple_result();
                 receiveBase(result, "deleteMultiple");
                 if (result.isSetSuccess()) {
          @@ -916,17 +945,21 @@ public java.util.List recv_deleteMultiple() throws TIOError, org.apache
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "deleteMultiple failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "deleteMultiple failed: unknown result");
               }
           
          -    public boolean checkAndDelete(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TDelete tdelete) throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean checkAndDelete(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TDelete tdelete) throws TIOError, org.apache.thrift.TException {
                 send_checkAndDelete(table, row, family, qualifier, value, tdelete);
                 return recv_checkAndDelete();
               }
           
          -    public void send_checkAndDelete(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TDelete tdelete) throws org.apache.thrift.TException
          -    {
          +    public void send_checkAndDelete(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TDelete tdelete) throws org.apache.thrift.TException {
                 checkAndDelete_args args = new checkAndDelete_args();
                 args.setTable(table);
                 args.setRow(row);
          @@ -937,8 +970,7 @@ public void send_checkAndDelete(java.nio.ByteBuffer table, java.nio.ByteBuffer r
                 sendBase("checkAndDelete", args);
               }
           
          -    public boolean recv_checkAndDelete() throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_checkAndDelete() throws TIOError, org.apache.thrift.TException {
                 checkAndDelete_result result = new checkAndDelete_result();
                 receiveBase(result, "checkAndDelete");
                 if (result.isSetSuccess()) {
          @@ -947,25 +979,26 @@ public boolean recv_checkAndDelete() throws TIOError, org.apache.thrift.TExcepti
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "checkAndDelete failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "checkAndDelete failed: unknown result");
               }
           
          -    public TResult increment(java.nio.ByteBuffer table, TIncrement tincrement) throws TIOError, org.apache.thrift.TException
          -    {
          +    public TResult increment(java.nio.ByteBuffer table, TIncrement tincrement)
          +        throws TIOError, org.apache.thrift.TException {
                 send_increment(table, tincrement);
                 return recv_increment();
               }
           
          -    public void send_increment(java.nio.ByteBuffer table, TIncrement tincrement) throws org.apache.thrift.TException
          -    {
          +    public void send_increment(java.nio.ByteBuffer table, TIncrement tincrement)
          +        throws org.apache.thrift.TException {
                 increment_args args = new increment_args();
                 args.setTable(table);
                 args.setTincrement(tincrement);
                 sendBase("increment", args);
               }
           
          -    public TResult recv_increment() throws TIOError, org.apache.thrift.TException
          -    {
          +    public TResult recv_increment() throws TIOError, org.apache.thrift.TException {
                 increment_result result = new increment_result();
                 receiveBase(result, "increment");
                 if (result.isSetSuccess()) {
          @@ -974,25 +1007,26 @@ public TResult recv_increment() throws TIOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "increment failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "increment failed: unknown result");
               }
           
          -    public TResult append(java.nio.ByteBuffer table, TAppend tappend) throws TIOError, org.apache.thrift.TException
          -    {
          +    public TResult append(java.nio.ByteBuffer table, TAppend tappend)
          +        throws TIOError, org.apache.thrift.TException {
                 send_append(table, tappend);
                 return recv_append();
               }
           
          -    public void send_append(java.nio.ByteBuffer table, TAppend tappend) throws org.apache.thrift.TException
          -    {
          +    public void send_append(java.nio.ByteBuffer table, TAppend tappend)
          +        throws org.apache.thrift.TException {
                 append_args args = new append_args();
                 args.setTable(table);
                 args.setTappend(tappend);
                 sendBase("append", args);
               }
           
          -    public TResult recv_append() throws TIOError, org.apache.thrift.TException
          -    {
          +    public TResult recv_append() throws TIOError, org.apache.thrift.TException {
                 append_result result = new append_result();
                 receiveBase(result, "append");
                 if (result.isSetSuccess()) {
          @@ -1001,25 +1035,25 @@ public TResult recv_append() throws TIOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "append failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "append failed: unknown result");
               }
           
          -    public int openScanner(java.nio.ByteBuffer table, TScan tscan) throws TIOError, org.apache.thrift.TException
          -    {
          +    public int openScanner(java.nio.ByteBuffer table, TScan tscan)
          +        throws TIOError, org.apache.thrift.TException {
                 send_openScanner(table, tscan);
                 return recv_openScanner();
               }
           
          -    public void send_openScanner(java.nio.ByteBuffer table, TScan tscan) throws org.apache.thrift.TException
          -    {
          +    public void send_openScanner(java.nio.ByteBuffer table, TScan tscan)
          +        throws org.apache.thrift.TException {
                 openScanner_args args = new openScanner_args();
                 args.setTable(table);
                 args.setTscan(tscan);
                 sendBase("openScanner", args);
               }
           
          -    public int recv_openScanner() throws TIOError, org.apache.thrift.TException
          -    {
          +    public int recv_openScanner() throws TIOError, org.apache.thrift.TException {
                 openScanner_result result = new openScanner_result();
                 receiveBase(result, "openScanner");
                 if (result.isSetSuccess()) {
          @@ -1028,25 +1062,27 @@ public int recv_openScanner() throws TIOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "openScanner failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "openScanner failed: unknown result");
               }
           
          -    public java.util.List getScannerRows(int scannerId, int numRows) throws TIOError, TIllegalArgument, org.apache.thrift.TException
          -    {
          +    public java.util.List getScannerRows(int scannerId, int numRows)
          +        throws TIOError, TIllegalArgument, org.apache.thrift.TException {
                 send_getScannerRows(scannerId, numRows);
                 return recv_getScannerRows();
               }
           
          -    public void send_getScannerRows(int scannerId, int numRows) throws org.apache.thrift.TException
          -    {
          +    public void send_getScannerRows(int scannerId, int numRows)
          +        throws org.apache.thrift.TException {
                 getScannerRows_args args = new getScannerRows_args();
                 args.setScannerId(scannerId);
                 args.setNumRows(numRows);
                 sendBase("getScannerRows", args);
               }
           
          -    public java.util.List recv_getScannerRows() throws TIOError, TIllegalArgument, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getScannerRows()
          +        throws TIOError, TIllegalArgument, org.apache.thrift.TException {
                 getScannerRows_result result = new getScannerRows_result();
                 receiveBase(result, "getScannerRows");
                 if (result.isSetSuccess()) {
          @@ -1058,24 +1094,25 @@ public java.util.List recv_getScannerRows() throws TIOError, TIllegalAr
                 if (result.ia != null) {
                   throw result.ia;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getScannerRows failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getScannerRows failed: unknown result");
               }
           
          -    public void closeScanner(int scannerId) throws TIOError, TIllegalArgument, org.apache.thrift.TException
          -    {
          +    public void closeScanner(int scannerId)
          +        throws TIOError, TIllegalArgument, org.apache.thrift.TException {
                 send_closeScanner(scannerId);
                 recv_closeScanner();
               }
           
          -    public void send_closeScanner(int scannerId) throws org.apache.thrift.TException
          -    {
          +    public void send_closeScanner(int scannerId) throws org.apache.thrift.TException {
                 closeScanner_args args = new closeScanner_args();
                 args.setScannerId(scannerId);
                 sendBase("closeScanner", args);
               }
           
          -    public void recv_closeScanner() throws TIOError, TIllegalArgument, org.apache.thrift.TException
          -    {
          +    public void recv_closeScanner()
          +        throws TIOError, TIllegalArgument, org.apache.thrift.TException {
                 closeScanner_result result = new closeScanner_result();
                 receiveBase(result, "closeScanner");
                 if (result.io != null) {
          @@ -1087,22 +1124,21 @@ public void recv_closeScanner() throws TIOError, TIllegalArgument, org.apache.th
                 return;
               }
           
          -    public void mutateRow(java.nio.ByteBuffer table, TRowMutations trowMutations) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void mutateRow(java.nio.ByteBuffer table, TRowMutations trowMutations)
          +        throws TIOError, org.apache.thrift.TException {
                 send_mutateRow(table, trowMutations);
                 recv_mutateRow();
               }
           
          -    public void send_mutateRow(java.nio.ByteBuffer table, TRowMutations trowMutations) throws org.apache.thrift.TException
          -    {
          +    public void send_mutateRow(java.nio.ByteBuffer table, TRowMutations trowMutations)
          +        throws org.apache.thrift.TException {
                 mutateRow_args args = new mutateRow_args();
                 args.setTable(table);
                 args.setTrowMutations(trowMutations);
                 sendBase("mutateRow", args);
               }
           
          -    public void recv_mutateRow() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_mutateRow() throws TIOError, org.apache.thrift.TException {
                 mutateRow_result result = new mutateRow_result();
                 receiveBase(result, "mutateRow");
                 if (result.io != null) {
          @@ -1111,14 +1147,14 @@ public void recv_mutateRow() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public java.util.List getScannerResults(java.nio.ByteBuffer table, TScan tscan, int numRows) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getScannerResults(java.nio.ByteBuffer table, TScan tscan,
          +        int numRows) throws TIOError, org.apache.thrift.TException {
                 send_getScannerResults(table, tscan, numRows);
                 return recv_getScannerResults();
               }
           
          -    public void send_getScannerResults(java.nio.ByteBuffer table, TScan tscan, int numRows) throws org.apache.thrift.TException
          -    {
          +    public void send_getScannerResults(java.nio.ByteBuffer table, TScan tscan, int numRows)
          +        throws org.apache.thrift.TException {
                 getScannerResults_args args = new getScannerResults_args();
                 args.setTable(table);
                 args.setTscan(tscan);
          @@ -1126,8 +1162,8 @@ public void send_getScannerResults(java.nio.ByteBuffer table, TScan tscan, int n
                 sendBase("getScannerResults", args);
               }
           
          -    public java.util.List recv_getScannerResults() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getScannerResults()
          +        throws TIOError, org.apache.thrift.TException {
                 getScannerResults_result result = new getScannerResults_result();
                 receiveBase(result, "getScannerResults");
                 if (result.isSetSuccess()) {
          @@ -1136,17 +1172,19 @@ public java.util.List recv_getScannerResults() throws TIOError, org.apa
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getScannerResults failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getScannerResults failed: unknown result");
               }
           
          -    public THRegionLocation getRegionLocation(java.nio.ByteBuffer table, java.nio.ByteBuffer row, boolean reload) throws TIOError, org.apache.thrift.TException
          -    {
          +    public THRegionLocation getRegionLocation(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        boolean reload) throws TIOError, org.apache.thrift.TException {
                 send_getRegionLocation(table, row, reload);
                 return recv_getRegionLocation();
               }
           
          -    public void send_getRegionLocation(java.nio.ByteBuffer table, java.nio.ByteBuffer row, boolean reload) throws org.apache.thrift.TException
          -    {
          +    public void send_getRegionLocation(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        boolean reload) throws org.apache.thrift.TException {
                 getRegionLocation_args args = new getRegionLocation_args();
                 args.setTable(table);
                 args.setRow(row);
          @@ -1154,8 +1192,7 @@ public void send_getRegionLocation(java.nio.ByteBuffer table, java.nio.ByteBuffe
                 sendBase("getRegionLocation", args);
               }
           
          -    public THRegionLocation recv_getRegionLocation() throws TIOError, org.apache.thrift.TException
          -    {
          +    public THRegionLocation recv_getRegionLocation() throws TIOError, org.apache.thrift.TException {
                 getRegionLocation_result result = new getRegionLocation_result();
                 receiveBase(result, "getRegionLocation");
                 if (result.isSetSuccess()) {
          @@ -1164,24 +1201,26 @@ public THRegionLocation recv_getRegionLocation() throws TIOError, org.apache.thr
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRegionLocation failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getRegionLocation failed: unknown result");
               }
           
          -    public java.util.List getAllRegionLocations(java.nio.ByteBuffer table) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getAllRegionLocations(java.nio.ByteBuffer table)
          +        throws TIOError, org.apache.thrift.TException {
                 send_getAllRegionLocations(table);
                 return recv_getAllRegionLocations();
               }
           
          -    public void send_getAllRegionLocations(java.nio.ByteBuffer table) throws org.apache.thrift.TException
          -    {
          +    public void send_getAllRegionLocations(java.nio.ByteBuffer table)
          +        throws org.apache.thrift.TException {
                 getAllRegionLocations_args args = new getAllRegionLocations_args();
                 args.setTable(table);
                 sendBase("getAllRegionLocations", args);
               }
           
          -    public java.util.List recv_getAllRegionLocations() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getAllRegionLocations()
          +        throws TIOError, org.apache.thrift.TException {
                 getAllRegionLocations_result result = new getAllRegionLocations_result();
                 receiveBase(result, "getAllRegionLocations");
                 if (result.isSetSuccess()) {
          @@ -1190,17 +1229,22 @@ public java.util.List recv_getAllRegionLocations() throws TIOE
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getAllRegionLocations failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getAllRegionLocations failed: unknown result");
               }
           
          -    public boolean checkAndMutate(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator, java.nio.ByteBuffer value, TRowMutations rowMutations) throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean checkAndMutate(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator,
          +        java.nio.ByteBuffer value, TRowMutations rowMutations)
          +        throws TIOError, org.apache.thrift.TException {
                 send_checkAndMutate(table, row, family, qualifier, compareOperator, value, rowMutations);
                 return recv_checkAndMutate();
               }
           
          -    public void send_checkAndMutate(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator, java.nio.ByteBuffer value, TRowMutations rowMutations) throws org.apache.thrift.TException
          -    {
          +    public void send_checkAndMutate(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator,
          +        java.nio.ByteBuffer value, TRowMutations rowMutations) throws org.apache.thrift.TException {
                 checkAndMutate_args args = new checkAndMutate_args();
                 args.setTable(table);
                 args.setRow(row);
          @@ -1212,8 +1256,7 @@ public void send_checkAndMutate(java.nio.ByteBuffer table, java.nio.ByteBuffer r
                 sendBase("checkAndMutate", args);
               }
           
          -    public boolean recv_checkAndMutate() throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_checkAndMutate() throws TIOError, org.apache.thrift.TException {
                 checkAndMutate_result result = new checkAndMutate_result();
                 receiveBase(result, "checkAndMutate");
                 if (result.isSetSuccess()) {
          @@ -1222,24 +1265,25 @@ public boolean recv_checkAndMutate() throws TIOError, org.apache.thrift.TExcepti
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "checkAndMutate failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "checkAndMutate failed: unknown result");
               }
           
          -    public TTableDescriptor getTableDescriptor(TTableName table) throws TIOError, org.apache.thrift.TException
          -    {
          +    public TTableDescriptor getTableDescriptor(TTableName table)
          +        throws TIOError, org.apache.thrift.TException {
                 send_getTableDescriptor(table);
                 return recv_getTableDescriptor();
               }
           
          -    public void send_getTableDescriptor(TTableName table) throws org.apache.thrift.TException
          -    {
          +    public void send_getTableDescriptor(TTableName table) throws org.apache.thrift.TException {
                 getTableDescriptor_args args = new getTableDescriptor_args();
                 args.setTable(table);
                 sendBase("getTableDescriptor", args);
               }
           
          -    public TTableDescriptor recv_getTableDescriptor() throws TIOError, org.apache.thrift.TException
          -    {
          +    public TTableDescriptor recv_getTableDescriptor()
          +        throws TIOError, org.apache.thrift.TException {
                 getTableDescriptor_result result = new getTableDescriptor_result();
                 receiveBase(result, "getTableDescriptor");
                 if (result.isSetSuccess()) {
          @@ -1248,24 +1292,26 @@ public TTableDescriptor recv_getTableDescriptor() throws TIOError, org.apache.th
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTableDescriptor failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getTableDescriptor failed: unknown result");
               }
           
          -    public java.util.List getTableDescriptors(java.util.List tables) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getTableDescriptors(java.util.List tables)
          +        throws TIOError, org.apache.thrift.TException {
                 send_getTableDescriptors(tables);
                 return recv_getTableDescriptors();
               }
           
          -    public void send_getTableDescriptors(java.util.List tables) throws org.apache.thrift.TException
          -    {
          +    public void send_getTableDescriptors(java.util.List tables)
          +        throws org.apache.thrift.TException {
                 getTableDescriptors_args args = new getTableDescriptors_args();
                 args.setTables(tables);
                 sendBase("getTableDescriptors", args);
               }
           
          -    public java.util.List recv_getTableDescriptors() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getTableDescriptors()
          +        throws TIOError, org.apache.thrift.TException {
                 getTableDescriptors_result result = new getTableDescriptors_result();
                 receiveBase(result, "getTableDescriptors");
                 if (result.isSetSuccess()) {
          @@ -1274,24 +1320,23 @@ public java.util.List recv_getTableDescriptors() throws TIOErr
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTableDescriptors failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getTableDescriptors failed: unknown result");
               }
           
          -    public boolean tableExists(TTableName tableName) throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean tableExists(TTableName tableName) throws TIOError, org.apache.thrift.TException {
                 send_tableExists(tableName);
                 return recv_tableExists();
               }
           
          -    public void send_tableExists(TTableName tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_tableExists(TTableName tableName) throws org.apache.thrift.TException {
                 tableExists_args args = new tableExists_args();
                 args.setTableName(tableName);
                 sendBase("tableExists", args);
               }
           
          -    public boolean recv_tableExists() throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_tableExists() throws TIOError, org.apache.thrift.TException {
                 tableExists_result result = new tableExists_result();
                 receiveBase(result, "tableExists");
                 if (result.isSetSuccess()) {
          @@ -1300,25 +1345,27 @@ public boolean recv_tableExists() throws TIOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "tableExists failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "tableExists failed: unknown result");
               }
           
          -    public java.util.List getTableDescriptorsByPattern(java.lang.String regex, boolean includeSysTables) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getTableDescriptorsByPattern(java.lang.String regex,
          +        boolean includeSysTables) throws TIOError, org.apache.thrift.TException {
                 send_getTableDescriptorsByPattern(regex, includeSysTables);
                 return recv_getTableDescriptorsByPattern();
               }
           
          -    public void send_getTableDescriptorsByPattern(java.lang.String regex, boolean includeSysTables) throws org.apache.thrift.TException
          -    {
          +    public void send_getTableDescriptorsByPattern(java.lang.String regex, boolean includeSysTables)
          +        throws org.apache.thrift.TException {
                 getTableDescriptorsByPattern_args args = new getTableDescriptorsByPattern_args();
                 args.setRegex(regex);
                 args.setIncludeSysTables(includeSysTables);
                 sendBase("getTableDescriptorsByPattern", args);
               }
           
          -    public java.util.List recv_getTableDescriptorsByPattern() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getTableDescriptorsByPattern()
          +        throws TIOError, org.apache.thrift.TException {
                 getTableDescriptorsByPattern_result result = new getTableDescriptorsByPattern_result();
                 receiveBase(result, "getTableDescriptorsByPattern");
                 if (result.isSetSuccess()) {
          @@ -1327,24 +1374,26 @@ public java.util.List recv_getTableDescriptorsByPattern() thro
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTableDescriptorsByPattern failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getTableDescriptorsByPattern failed: unknown result");
               }
           
          -    public java.util.List getTableDescriptorsByNamespace(java.lang.String name) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getTableDescriptorsByNamespace(java.lang.String name)
          +        throws TIOError, org.apache.thrift.TException {
                 send_getTableDescriptorsByNamespace(name);
                 return recv_getTableDescriptorsByNamespace();
               }
           
          -    public void send_getTableDescriptorsByNamespace(java.lang.String name) throws org.apache.thrift.TException
          -    {
          +    public void send_getTableDescriptorsByNamespace(java.lang.String name)
          +        throws org.apache.thrift.TException {
                 getTableDescriptorsByNamespace_args args = new getTableDescriptorsByNamespace_args();
                 args.setName(name);
                 sendBase("getTableDescriptorsByNamespace", args);
               }
           
          -    public java.util.List recv_getTableDescriptorsByNamespace() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getTableDescriptorsByNamespace()
          +        throws TIOError, org.apache.thrift.TException {
                 getTableDescriptorsByNamespace_result result = new getTableDescriptorsByNamespace_result();
                 receiveBase(result, "getTableDescriptorsByNamespace");
                 if (result.isSetSuccess()) {
          @@ -1353,25 +1402,27 @@ public java.util.List recv_getTableDescriptorsByNamespace() th
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTableDescriptorsByNamespace failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getTableDescriptorsByNamespace failed: unknown result");
               }
           
          -    public java.util.List getTableNamesByPattern(java.lang.String regex, boolean includeSysTables) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getTableNamesByPattern(java.lang.String regex,
          +        boolean includeSysTables) throws TIOError, org.apache.thrift.TException {
                 send_getTableNamesByPattern(regex, includeSysTables);
                 return recv_getTableNamesByPattern();
               }
           
          -    public void send_getTableNamesByPattern(java.lang.String regex, boolean includeSysTables) throws org.apache.thrift.TException
          -    {
          +    public void send_getTableNamesByPattern(java.lang.String regex, boolean includeSysTables)
          +        throws org.apache.thrift.TException {
                 getTableNamesByPattern_args args = new getTableNamesByPattern_args();
                 args.setRegex(regex);
                 args.setIncludeSysTables(includeSysTables);
                 sendBase("getTableNamesByPattern", args);
               }
           
          -    public java.util.List recv_getTableNamesByPattern() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getTableNamesByPattern()
          +        throws TIOError, org.apache.thrift.TException {
                 getTableNamesByPattern_result result = new getTableNamesByPattern_result();
                 receiveBase(result, "getTableNamesByPattern");
                 if (result.isSetSuccess()) {
          @@ -1380,24 +1431,26 @@ public java.util.List recv_getTableNamesByPattern() throws TIOError,
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTableNamesByPattern failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getTableNamesByPattern failed: unknown result");
               }
           
          -    public java.util.List getTableNamesByNamespace(java.lang.String name) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List getTableNamesByNamespace(java.lang.String name)
          +        throws TIOError, org.apache.thrift.TException {
                 send_getTableNamesByNamespace(name);
                 return recv_getTableNamesByNamespace();
               }
           
          -    public void send_getTableNamesByNamespace(java.lang.String name) throws org.apache.thrift.TException
          -    {
          +    public void send_getTableNamesByNamespace(java.lang.String name)
          +        throws org.apache.thrift.TException {
                 getTableNamesByNamespace_args args = new getTableNamesByNamespace_args();
                 args.setName(name);
                 sendBase("getTableNamesByNamespace", args);
               }
           
          -    public java.util.List recv_getTableNamesByNamespace() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getTableNamesByNamespace()
          +        throws TIOError, org.apache.thrift.TException {
                 getTableNamesByNamespace_result result = new getTableNamesByNamespace_result();
                 receiveBase(result, "getTableNamesByNamespace");
                 if (result.isSetSuccess()) {
          @@ -1406,25 +1459,26 @@ public java.util.List recv_getTableNamesByNamespace() throws TIOErro
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTableNamesByNamespace failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getTableNamesByNamespace failed: unknown result");
               }
           
          -    public void createTable(TTableDescriptor desc, java.util.List splitKeys) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void createTable(TTableDescriptor desc, java.util.List splitKeys)
          +        throws TIOError, org.apache.thrift.TException {
                 send_createTable(desc, splitKeys);
                 recv_createTable();
               }
           
          -    public void send_createTable(TTableDescriptor desc, java.util.List splitKeys) throws org.apache.thrift.TException
          -    {
          +    public void send_createTable(TTableDescriptor desc,
          +        java.util.List splitKeys) throws org.apache.thrift.TException {
                 createTable_args args = new createTable_args();
                 args.setDesc(desc);
                 args.setSplitKeys(splitKeys);
                 sendBase("createTable", args);
               }
           
          -    public void recv_createTable() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_createTable() throws TIOError, org.apache.thrift.TException {
                 createTable_result result = new createTable_result();
                 receiveBase(result, "createTable");
                 if (result.io != null) {
          @@ -1433,21 +1487,18 @@ public void recv_createTable() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void deleteTable(TTableName tableName) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void deleteTable(TTableName tableName) throws TIOError, org.apache.thrift.TException {
                 send_deleteTable(tableName);
                 recv_deleteTable();
               }
           
          -    public void send_deleteTable(TTableName tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_deleteTable(TTableName tableName) throws org.apache.thrift.TException {
                 deleteTable_args args = new deleteTable_args();
                 args.setTableName(tableName);
                 sendBase("deleteTable", args);
               }
           
          -    public void recv_deleteTable() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_deleteTable() throws TIOError, org.apache.thrift.TException {
                 deleteTable_result result = new deleteTable_result();
                 receiveBase(result, "deleteTable");
                 if (result.io != null) {
          @@ -1456,22 +1507,21 @@ public void recv_deleteTable() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void truncateTable(TTableName tableName, boolean preserveSplits) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void truncateTable(TTableName tableName, boolean preserveSplits)
          +        throws TIOError, org.apache.thrift.TException {
                 send_truncateTable(tableName, preserveSplits);
                 recv_truncateTable();
               }
           
          -    public void send_truncateTable(TTableName tableName, boolean preserveSplits) throws org.apache.thrift.TException
          -    {
          +    public void send_truncateTable(TTableName tableName, boolean preserveSplits)
          +        throws org.apache.thrift.TException {
                 truncateTable_args args = new truncateTable_args();
                 args.setTableName(tableName);
                 args.setPreserveSplits(preserveSplits);
                 sendBase("truncateTable", args);
               }
           
          -    public void recv_truncateTable() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_truncateTable() throws TIOError, org.apache.thrift.TException {
                 truncateTable_result result = new truncateTable_result();
                 receiveBase(result, "truncateTable");
                 if (result.io != null) {
          @@ -1480,21 +1530,18 @@ public void recv_truncateTable() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void enableTable(TTableName tableName) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void enableTable(TTableName tableName) throws TIOError, org.apache.thrift.TException {
                 send_enableTable(tableName);
                 recv_enableTable();
               }
           
          -    public void send_enableTable(TTableName tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_enableTable(TTableName tableName) throws org.apache.thrift.TException {
                 enableTable_args args = new enableTable_args();
                 args.setTableName(tableName);
                 sendBase("enableTable", args);
               }
           
          -    public void recv_enableTable() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_enableTable() throws TIOError, org.apache.thrift.TException {
                 enableTable_result result = new enableTable_result();
                 receiveBase(result, "enableTable");
                 if (result.io != null) {
          @@ -1503,21 +1550,18 @@ public void recv_enableTable() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void disableTable(TTableName tableName) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void disableTable(TTableName tableName) throws TIOError, org.apache.thrift.TException {
                 send_disableTable(tableName);
                 recv_disableTable();
               }
           
          -    public void send_disableTable(TTableName tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_disableTable(TTableName tableName) throws org.apache.thrift.TException {
                 disableTable_args args = new disableTable_args();
                 args.setTableName(tableName);
                 sendBase("disableTable", args);
               }
           
          -    public void recv_disableTable() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_disableTable() throws TIOError, org.apache.thrift.TException {
                 disableTable_result result = new disableTable_result();
                 receiveBase(result, "disableTable");
                 if (result.io != null) {
          @@ -1526,21 +1570,19 @@ public void recv_disableTable() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public boolean isTableEnabled(TTableName tableName) throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean isTableEnabled(TTableName tableName)
          +        throws TIOError, org.apache.thrift.TException {
                 send_isTableEnabled(tableName);
                 return recv_isTableEnabled();
               }
           
          -    public void send_isTableEnabled(TTableName tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_isTableEnabled(TTableName tableName) throws org.apache.thrift.TException {
                 isTableEnabled_args args = new isTableEnabled_args();
                 args.setTableName(tableName);
                 sendBase("isTableEnabled", args);
               }
           
          -    public boolean recv_isTableEnabled() throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_isTableEnabled() throws TIOError, org.apache.thrift.TException {
                 isTableEnabled_result result = new isTableEnabled_result();
                 receiveBase(result, "isTableEnabled");
                 if (result.isSetSuccess()) {
          @@ -1549,24 +1591,24 @@ public boolean recv_isTableEnabled() throws TIOError, org.apache.thrift.TExcepti
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "isTableEnabled failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "isTableEnabled failed: unknown result");
               }
           
          -    public boolean isTableDisabled(TTableName tableName) throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean isTableDisabled(TTableName tableName)
          +        throws TIOError, org.apache.thrift.TException {
                 send_isTableDisabled(tableName);
                 return recv_isTableDisabled();
               }
           
          -    public void send_isTableDisabled(TTableName tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_isTableDisabled(TTableName tableName) throws org.apache.thrift.TException {
                 isTableDisabled_args args = new isTableDisabled_args();
                 args.setTableName(tableName);
                 sendBase("isTableDisabled", args);
               }
           
          -    public boolean recv_isTableDisabled() throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_isTableDisabled() throws TIOError, org.apache.thrift.TException {
                 isTableDisabled_result result = new isTableDisabled_result();
                 receiveBase(result, "isTableDisabled");
                 if (result.isSetSuccess()) {
          @@ -1575,24 +1617,24 @@ public boolean recv_isTableDisabled() throws TIOError, org.apache.thrift.TExcept
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "isTableDisabled failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "isTableDisabled failed: unknown result");
               }
           
          -    public boolean isTableAvailable(TTableName tableName) throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean isTableAvailable(TTableName tableName)
          +        throws TIOError, org.apache.thrift.TException {
                 send_isTableAvailable(tableName);
                 return recv_isTableAvailable();
               }
           
          -    public void send_isTableAvailable(TTableName tableName) throws org.apache.thrift.TException
          -    {
          +    public void send_isTableAvailable(TTableName tableName) throws org.apache.thrift.TException {
                 isTableAvailable_args args = new isTableAvailable_args();
                 args.setTableName(tableName);
                 sendBase("isTableAvailable", args);
               }
           
          -    public boolean recv_isTableAvailable() throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_isTableAvailable() throws TIOError, org.apache.thrift.TException {
                 isTableAvailable_result result = new isTableAvailable_result();
                 receiveBase(result, "isTableAvailable");
                 if (result.isSetSuccess()) {
          @@ -1601,25 +1643,27 @@ public boolean recv_isTableAvailable() throws TIOError, org.apache.thrift.TExcep
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "isTableAvailable failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "isTableAvailable failed: unknown result");
               }
           
          -    public boolean isTableAvailableWithSplit(TTableName tableName, java.util.List splitKeys) throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean isTableAvailableWithSplit(TTableName tableName,
          +        java.util.List splitKeys)
          +        throws TIOError, org.apache.thrift.TException {
                 send_isTableAvailableWithSplit(tableName, splitKeys);
                 return recv_isTableAvailableWithSplit();
               }
           
          -    public void send_isTableAvailableWithSplit(TTableName tableName, java.util.List splitKeys) throws org.apache.thrift.TException
          -    {
          +    public void send_isTableAvailableWithSplit(TTableName tableName,
          +        java.util.List splitKeys) throws org.apache.thrift.TException {
                 isTableAvailableWithSplit_args args = new isTableAvailableWithSplit_args();
                 args.setTableName(tableName);
                 args.setSplitKeys(splitKeys);
                 sendBase("isTableAvailableWithSplit", args);
               }
           
          -    public boolean recv_isTableAvailableWithSplit() throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_isTableAvailableWithSplit() throws TIOError, org.apache.thrift.TException {
                 isTableAvailableWithSplit_result result = new isTableAvailableWithSplit_result();
                 receiveBase(result, "isTableAvailableWithSplit");
                 if (result.isSetSuccess()) {
          @@ -1628,25 +1672,26 @@ public boolean recv_isTableAvailableWithSplit() throws TIOError, org.apache.thri
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "isTableAvailableWithSplit failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "isTableAvailableWithSplit failed: unknown result");
               }
           
          -    public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column)
          +        throws TIOError, org.apache.thrift.TException {
                 send_addColumnFamily(tableName, column);
                 recv_addColumnFamily();
               }
           
          -    public void send_addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column) throws org.apache.thrift.TException
          -    {
          +    public void send_addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column)
          +        throws org.apache.thrift.TException {
                 addColumnFamily_args args = new addColumnFamily_args();
                 args.setTableName(tableName);
                 args.setColumn(column);
                 sendBase("addColumnFamily", args);
               }
           
          -    public void recv_addColumnFamily() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_addColumnFamily() throws TIOError, org.apache.thrift.TException {
                 addColumnFamily_result result = new addColumnFamily_result();
                 receiveBase(result, "addColumnFamily");
                 if (result.io != null) {
          @@ -1655,22 +1700,21 @@ public void recv_addColumnFamily() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void deleteColumnFamily(TTableName tableName, java.nio.ByteBuffer column) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void deleteColumnFamily(TTableName tableName, java.nio.ByteBuffer column)
          +        throws TIOError, org.apache.thrift.TException {
                 send_deleteColumnFamily(tableName, column);
                 recv_deleteColumnFamily();
               }
           
          -    public void send_deleteColumnFamily(TTableName tableName, java.nio.ByteBuffer column) throws org.apache.thrift.TException
          -    {
          +    public void send_deleteColumnFamily(TTableName tableName, java.nio.ByteBuffer column)
          +        throws org.apache.thrift.TException {
                 deleteColumnFamily_args args = new deleteColumnFamily_args();
                 args.setTableName(tableName);
                 args.setColumn(column);
                 sendBase("deleteColumnFamily", args);
               }
           
          -    public void recv_deleteColumnFamily() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_deleteColumnFamily() throws TIOError, org.apache.thrift.TException {
                 deleteColumnFamily_result result = new deleteColumnFamily_result();
                 receiveBase(result, "deleteColumnFamily");
                 if (result.io != null) {
          @@ -1679,22 +1723,21 @@ public void recv_deleteColumnFamily() throws TIOError, org.apache.thrift.TExcept
                 return;
               }
           
          -    public void modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column)
          +        throws TIOError, org.apache.thrift.TException {
                 send_modifyColumnFamily(tableName, column);
                 recv_modifyColumnFamily();
               }
           
          -    public void send_modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column) throws org.apache.thrift.TException
          -    {
          +    public void send_modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column)
          +        throws org.apache.thrift.TException {
                 modifyColumnFamily_args args = new modifyColumnFamily_args();
                 args.setTableName(tableName);
                 args.setColumn(column);
                 sendBase("modifyColumnFamily", args);
               }
           
          -    public void recv_modifyColumnFamily() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_modifyColumnFamily() throws TIOError, org.apache.thrift.TException {
                 modifyColumnFamily_result result = new modifyColumnFamily_result();
                 receiveBase(result, "modifyColumnFamily");
                 if (result.io != null) {
          @@ -1703,21 +1746,18 @@ public void recv_modifyColumnFamily() throws TIOError, org.apache.thrift.TExcept
                 return;
               }
           
          -    public void modifyTable(TTableDescriptor desc) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void modifyTable(TTableDescriptor desc) throws TIOError, org.apache.thrift.TException {
                 send_modifyTable(desc);
                 recv_modifyTable();
               }
           
          -    public void send_modifyTable(TTableDescriptor desc) throws org.apache.thrift.TException
          -    {
          +    public void send_modifyTable(TTableDescriptor desc) throws org.apache.thrift.TException {
                 modifyTable_args args = new modifyTable_args();
                 args.setDesc(desc);
                 sendBase("modifyTable", args);
               }
           
          -    public void recv_modifyTable() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_modifyTable() throws TIOError, org.apache.thrift.TException {
                 modifyTable_result result = new modifyTable_result();
                 receiveBase(result, "modifyTable");
                 if (result.io != null) {
          @@ -1726,21 +1766,20 @@ public void recv_modifyTable() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void createNamespace(TNamespaceDescriptor namespaceDesc) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void createNamespace(TNamespaceDescriptor namespaceDesc)
          +        throws TIOError, org.apache.thrift.TException {
                 send_createNamespace(namespaceDesc);
                 recv_createNamespace();
               }
           
          -    public void send_createNamespace(TNamespaceDescriptor namespaceDesc) throws org.apache.thrift.TException
          -    {
          +    public void send_createNamespace(TNamespaceDescriptor namespaceDesc)
          +        throws org.apache.thrift.TException {
                 createNamespace_args args = new createNamespace_args();
                 args.setNamespaceDesc(namespaceDesc);
                 sendBase("createNamespace", args);
               }
           
          -    public void recv_createNamespace() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_createNamespace() throws TIOError, org.apache.thrift.TException {
                 createNamespace_result result = new createNamespace_result();
                 receiveBase(result, "createNamespace");
                 if (result.io != null) {
          @@ -1749,21 +1788,20 @@ public void recv_createNamespace() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void modifyNamespace(TNamespaceDescriptor namespaceDesc) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void modifyNamespace(TNamespaceDescriptor namespaceDesc)
          +        throws TIOError, org.apache.thrift.TException {
                 send_modifyNamespace(namespaceDesc);
                 recv_modifyNamespace();
               }
           
          -    public void send_modifyNamespace(TNamespaceDescriptor namespaceDesc) throws org.apache.thrift.TException
          -    {
          +    public void send_modifyNamespace(TNamespaceDescriptor namespaceDesc)
          +        throws org.apache.thrift.TException {
                 modifyNamespace_args args = new modifyNamespace_args();
                 args.setNamespaceDesc(namespaceDesc);
                 sendBase("modifyNamespace", args);
               }
           
          -    public void recv_modifyNamespace() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_modifyNamespace() throws TIOError, org.apache.thrift.TException {
                 modifyNamespace_result result = new modifyNamespace_result();
                 receiveBase(result, "modifyNamespace");
                 if (result.io != null) {
          @@ -1772,21 +1810,19 @@ public void recv_modifyNamespace() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public void deleteNamespace(java.lang.String name) throws TIOError, org.apache.thrift.TException
          -    {
          +    public void deleteNamespace(java.lang.String name)
          +        throws TIOError, org.apache.thrift.TException {
                 send_deleteNamespace(name);
                 recv_deleteNamespace();
               }
           
          -    public void send_deleteNamespace(java.lang.String name) throws org.apache.thrift.TException
          -    {
          +    public void send_deleteNamespace(java.lang.String name) throws org.apache.thrift.TException {
                 deleteNamespace_args args = new deleteNamespace_args();
                 args.setName(name);
                 sendBase("deleteNamespace", args);
               }
           
          -    public void recv_deleteNamespace() throws TIOError, org.apache.thrift.TException
          -    {
          +    public void recv_deleteNamespace() throws TIOError, org.apache.thrift.TException {
                 deleteNamespace_result result = new deleteNamespace_result();
                 receiveBase(result, "deleteNamespace");
                 if (result.io != null) {
          @@ -1795,21 +1831,21 @@ public void recv_deleteNamespace() throws TIOError, org.apache.thrift.TException
                 return;
               }
           
          -    public TNamespaceDescriptor getNamespaceDescriptor(java.lang.String name) throws TIOError, org.apache.thrift.TException
          -    {
          +    public TNamespaceDescriptor getNamespaceDescriptor(java.lang.String name)
          +        throws TIOError, org.apache.thrift.TException {
                 send_getNamespaceDescriptor(name);
                 return recv_getNamespaceDescriptor();
               }
           
          -    public void send_getNamespaceDescriptor(java.lang.String name) throws org.apache.thrift.TException
          -    {
          +    public void send_getNamespaceDescriptor(java.lang.String name)
          +        throws org.apache.thrift.TException {
                 getNamespaceDescriptor_args args = new getNamespaceDescriptor_args();
                 args.setName(name);
                 sendBase("getNamespaceDescriptor", args);
               }
           
          -    public TNamespaceDescriptor recv_getNamespaceDescriptor() throws TIOError, org.apache.thrift.TException
          -    {
          +    public TNamespaceDescriptor recv_getNamespaceDescriptor()
          +        throws TIOError, org.apache.thrift.TException {
                 getNamespaceDescriptor_result result = new getNamespaceDescriptor_result();
                 receiveBase(result, "getNamespaceDescriptor");
                 if (result.isSetSuccess()) {
          @@ -1818,23 +1854,24 @@ public TNamespaceDescriptor recv_getNamespaceDescriptor() throws TIOError, org.a
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getNamespaceDescriptor failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getNamespaceDescriptor failed: unknown result");
               }
           
          -    public java.util.List listNamespaceDescriptors() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List listNamespaceDescriptors()
          +        throws TIOError, org.apache.thrift.TException {
                 send_listNamespaceDescriptors();
                 return recv_listNamespaceDescriptors();
               }
           
          -    public void send_listNamespaceDescriptors() throws org.apache.thrift.TException
          -    {
          +    public void send_listNamespaceDescriptors() throws org.apache.thrift.TException {
                 listNamespaceDescriptors_args args = new listNamespaceDescriptors_args();
                 sendBase("listNamespaceDescriptors", args);
               }
           
          -    public java.util.List recv_listNamespaceDescriptors() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_listNamespaceDescriptors()
          +        throws TIOError, org.apache.thrift.TException {
                 listNamespaceDescriptors_result result = new listNamespaceDescriptors_result();
                 receiveBase(result, "listNamespaceDescriptors");
                 if (result.isSetSuccess()) {
          @@ -1843,23 +1880,24 @@ public java.util.List recv_listNamespaceDescriptors() thro
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "listNamespaceDescriptors failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "listNamespaceDescriptors failed: unknown result");
               }
           
          -    public java.util.List listNamespaces() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List listNamespaces()
          +        throws TIOError, org.apache.thrift.TException {
                 send_listNamespaces();
                 return recv_listNamespaces();
               }
           
          -    public void send_listNamespaces() throws org.apache.thrift.TException
          -    {
          +    public void send_listNamespaces() throws org.apache.thrift.TException {
                 listNamespaces_args args = new listNamespaces_args();
                 sendBase("listNamespaces", args);
               }
           
          -    public java.util.List recv_listNamespaces() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_listNamespaces()
          +        throws TIOError, org.apache.thrift.TException {
                 listNamespaces_result result = new listNamespaces_result();
                 receiveBase(result, "listNamespaces");
                 if (result.isSetSuccess()) {
          @@ -1868,69 +1906,70 @@ public java.util.List recv_listNamespaces() throws TIOError, o
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "listNamespaces failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "listNamespaces failed: unknown result");
               }
           
          -    public TThriftServerType getThriftServerType() throws org.apache.thrift.TException
          -    {
          +    public TThriftServerType getThriftServerType() throws org.apache.thrift.TException {
                 send_getThriftServerType();
                 return recv_getThriftServerType();
               }
           
          -    public void send_getThriftServerType() throws org.apache.thrift.TException
          -    {
          +    public void send_getThriftServerType() throws org.apache.thrift.TException {
                 getThriftServerType_args args = new getThriftServerType_args();
                 sendBase("getThriftServerType", args);
               }
           
          -    public TThriftServerType recv_getThriftServerType() throws org.apache.thrift.TException
          -    {
          +    public TThriftServerType recv_getThriftServerType() throws org.apache.thrift.TException {
                 getThriftServerType_result result = new getThriftServerType_result();
                 receiveBase(result, "getThriftServerType");
                 if (result.isSetSuccess()) {
                   return result.success;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getThriftServerType failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getThriftServerType failed: unknown result");
               }
           
          -    public java.lang.String getClusterId() throws org.apache.thrift.TException
          -    {
          +    public java.lang.String getClusterId() throws org.apache.thrift.TException {
                 send_getClusterId();
                 return recv_getClusterId();
               }
           
          -    public void send_getClusterId() throws org.apache.thrift.TException
          -    {
          +    public void send_getClusterId() throws org.apache.thrift.TException {
                 getClusterId_args args = new getClusterId_args();
                 sendBase("getClusterId", args);
               }
           
          -    public java.lang.String recv_getClusterId() throws org.apache.thrift.TException
          -    {
          +    public java.lang.String recv_getClusterId() throws org.apache.thrift.TException {
                 getClusterId_result result = new getClusterId_result();
                 receiveBase(result, "getClusterId");
                 if (result.isSetSuccess()) {
                   return result.success;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getClusterId failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getClusterId failed: unknown result");
               }
           
          -    public java.util.List getSlowLogResponses(java.util.Set serverNames, TLogQueryFilter logQueryFilter) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List
          +        getSlowLogResponses(java.util.Set serverNames, TLogQueryFilter logQueryFilter)
          +            throws TIOError, org.apache.thrift.TException {
                 send_getSlowLogResponses(serverNames, logQueryFilter);
                 return recv_getSlowLogResponses();
               }
           
          -    public void send_getSlowLogResponses(java.util.Set serverNames, TLogQueryFilter logQueryFilter) throws org.apache.thrift.TException
          -    {
          +    public void send_getSlowLogResponses(java.util.Set serverNames,
          +        TLogQueryFilter logQueryFilter) throws org.apache.thrift.TException {
                 getSlowLogResponses_args args = new getSlowLogResponses_args();
                 args.setServerNames(serverNames);
                 args.setLogQueryFilter(logQueryFilter);
                 sendBase("getSlowLogResponses", args);
               }
           
          -    public java.util.List recv_getSlowLogResponses() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_getSlowLogResponses()
          +        throws TIOError, org.apache.thrift.TException {
                 getSlowLogResponses_result result = new getSlowLogResponses_result();
                 receiveBase(result, "getSlowLogResponses");
                 if (result.isSetSuccess()) {
          @@ -1939,24 +1978,26 @@ public java.util.List recv_getSlowLogResponses() throws TIOErr
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getSlowLogResponses failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "getSlowLogResponses failed: unknown result");
               }
           
          -    public java.util.List clearSlowLogResponses(java.util.Set serverNames) throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List clearSlowLogResponses(
          +        java.util.Set serverNames) throws TIOError, org.apache.thrift.TException {
                 send_clearSlowLogResponses(serverNames);
                 return recv_clearSlowLogResponses();
               }
           
          -    public void send_clearSlowLogResponses(java.util.Set serverNames) throws org.apache.thrift.TException
          -    {
          +    public void send_clearSlowLogResponses(java.util.Set serverNames)
          +        throws org.apache.thrift.TException {
                 clearSlowLogResponses_args args = new clearSlowLogResponses_args();
                 args.setServerNames(serverNames);
                 sendBase("clearSlowLogResponses", args);
               }
           
          -    public java.util.List recv_clearSlowLogResponses() throws TIOError, org.apache.thrift.TException
          -    {
          +    public java.util.List recv_clearSlowLogResponses()
          +        throws TIOError, org.apache.thrift.TException {
                 clearSlowLogResponses_result result = new clearSlowLogResponses_result();
                 receiveBase(result, "clearSlowLogResponses");
                 if (result.isSetSuccess()) {
          @@ -1965,24 +2006,23 @@ public java.util.List recv_clearSlowLogResponses() throws TIO
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "clearSlowLogResponses failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT,
          +          "clearSlowLogResponses failed: unknown result");
               }
           
          -    public boolean grant(TAccessControlEntity info) throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean grant(TAccessControlEntity info) throws TIOError, org.apache.thrift.TException {
                 send_grant(info);
                 return recv_grant();
               }
           
          -    public void send_grant(TAccessControlEntity info) throws org.apache.thrift.TException
          -    {
          +    public void send_grant(TAccessControlEntity info) throws org.apache.thrift.TException {
                 grant_args args = new grant_args();
                 args.setInfo(info);
                 sendBase("grant", args);
               }
           
          -    public boolean recv_grant() throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_grant() throws TIOError, org.apache.thrift.TException {
                 grant_result result = new grant_result();
                 receiveBase(result, "grant");
                 if (result.isSetSuccess()) {
          @@ -1991,24 +2031,22 @@ public boolean recv_grant() throws TIOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "grant failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "grant failed: unknown result");
               }
           
          -    public boolean revoke(TAccessControlEntity info) throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean revoke(TAccessControlEntity info) throws TIOError, org.apache.thrift.TException {
                 send_revoke(info);
                 return recv_revoke();
               }
           
          -    public void send_revoke(TAccessControlEntity info) throws org.apache.thrift.TException
          -    {
          +    public void send_revoke(TAccessControlEntity info) throws org.apache.thrift.TException {
                 revoke_args args = new revoke_args();
                 args.setInfo(info);
                 sendBase("revoke", args);
               }
           
          -    public boolean recv_revoke() throws TIOError, org.apache.thrift.TException
          -    {
          +    public boolean recv_revoke() throws TIOError, org.apache.thrift.TException {
                 revoke_result result = new revoke_result();
                 receiveBase(result, "revoke");
                 if (result.isSetSuccess()) {
          @@ -2017,45 +2055,67 @@ public boolean recv_revoke() throws TIOError, org.apache.thrift.TException
                 if (result.io != null) {
                   throw result.io;
                 }
          -      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "revoke failed: unknown result");
          +      throw new org.apache.thrift.TApplicationException(
          +          org.apache.thrift.TApplicationException.MISSING_RESULT, "revoke failed: unknown result");
               }
           
             }
          -  public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface {
          -    public static class Factory implements org.apache.thrift.async.TAsyncClientFactory {
          +
          +  public static class AsyncClient extends org.apache.thrift.async.TAsyncClient
          +      implements AsyncIface {
          +    public static class Factory
          +        implements org.apache.thrift.async.TAsyncClientFactory {
                 private org.apache.thrift.async.TAsyncClientManager clientManager;
                 private org.apache.thrift.protocol.TProtocolFactory protocolFactory;
          -      public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
          +
          +      public Factory(org.apache.thrift.async.TAsyncClientManager clientManager,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory) {
                   this.clientManager = clientManager;
                   this.protocolFactory = protocolFactory;
                 }
          -      public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) {
          +
          +      public AsyncClient
          +          getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) {
                   return new AsyncClient(protocolFactory, clientManager, transport);
                 }
               }
           
          -    public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) {
          +    public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +        org.apache.thrift.async.TAsyncClientManager clientManager,
          +        org.apache.thrift.transport.TNonblockingTransport transport) {
                 super(protocolFactory, clientManager, transport);
               }
           
          -    public void exists(java.nio.ByteBuffer table, TGet tget, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void exists(java.nio.ByteBuffer table, TGet tget,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      exists_call method_call = new exists_call(table, tget, resultHandler, this, ___protocolFactory, ___transport);
          +      exists_call method_call =
          +          new exists_call(table, tget, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class exists_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class exists_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private TGet tget;
          -      public exists_call(java.nio.ByteBuffer table, TGet tget, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public exists_call(java.nio.ByteBuffer table, TGet tget,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tget = tget;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("exists", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("exists",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   exists_args args = new exists_args();
                   args.setTable(table);
                   args.setTget(tget);
          @@ -2067,30 +2127,44 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_exists();
                 }
               }
           
          -    public void existsAll(java.nio.ByteBuffer table, java.util.List tgets, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void existsAll(java.nio.ByteBuffer table, java.util.List tgets,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      existsAll_call method_call = new existsAll_call(table, tgets, resultHandler, this, ___protocolFactory, ___transport);
          +      existsAll_call method_call =
          +          new existsAll_call(table, tgets, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class existsAll_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class existsAll_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer table;
                 private java.util.List tgets;
          -      public existsAll_call(java.nio.ByteBuffer table, java.util.List tgets, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public existsAll_call(java.nio.ByteBuffer table, java.util.List tgets,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tgets = tgets;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("existsAll", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("existsAll",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   existsAll_args args = new existsAll_args();
                   args.setTable(table);
                   args.setTgets(tgets);
          @@ -2098,19 +2172,25 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws TIOError, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_existsAll();
                 }
               }
           
          -    public void get(java.nio.ByteBuffer table, TGet tget, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void get(java.nio.ByteBuffer table, TGet tget,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      get_call method_call = new get_call(table, tget, resultHandler, this, ___protocolFactory, ___transport);
          +      get_call method_call =
          +          new get_call(table, tget, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -2118,14 +2198,22 @@ public void get(java.nio.ByteBuffer table, TGet tget, org.apache.thrift.async.As
               public static class get_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private TGet tget;
          -      public get_call(java.nio.ByteBuffer table, TGet tget, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public get_call(java.nio.ByteBuffer table, TGet tget,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tget = tget;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   get_args args = new get_args();
                   args.setTable(table);
                   args.setTget(tget);
          @@ -2137,30 +2225,44 @@ public TResult getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_get();
                 }
               }
           
          -    public void getMultiple(java.nio.ByteBuffer table, java.util.List tgets, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getMultiple(java.nio.ByteBuffer table, java.util.List tgets,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getMultiple_call method_call = new getMultiple_call(table, tgets, resultHandler, this, ___protocolFactory, ___transport);
          +      getMultiple_call method_call =
          +          new getMultiple_call(table, tgets, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getMultiple_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getMultiple_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer table;
                 private java.util.List tgets;
          -      public getMultiple_call(java.nio.ByteBuffer table, java.util.List tgets, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getMultiple_call(java.nio.ByteBuffer table, java.util.List tgets,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tgets = tgets;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getMultiple", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getMultiple",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getMultiple_args args = new getMultiple_args();
                   args.setTable(table);
                   args.setTgets(tgets);
          @@ -2172,15 +2274,20 @@ public java.util.List getResult() throws TIOError, org.apache.thrift.TE
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getMultiple();
                 }
               }
           
          -    public void put(java.nio.ByteBuffer table, TPut tput, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void put(java.nio.ByteBuffer table, TPut tput,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      put_call method_call = new put_call(table, tput, resultHandler, this, ___protocolFactory, ___transport);
          +      put_call method_call =
          +          new put_call(table, tput, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -2188,14 +2295,22 @@ public void put(java.nio.ByteBuffer table, TPut tput, org.apache.thrift.async.As
               public static class put_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private TPut tput;
          -      public put_call(java.nio.ByteBuffer table, TPut tput, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public put_call(java.nio.ByteBuffer table, TPut tput,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tput = tput;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("put", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("put",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   put_args args = new put_args();
                   args.setTable(table);
                   args.setTput(tput);
          @@ -2207,27 +2322,41 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void checkAndPut(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TPut tput, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void checkAndPut(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TPut tput, org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      checkAndPut_call method_call = new checkAndPut_call(table, row, family, qualifier, value, tput, resultHandler, this, ___protocolFactory, ___transport);
          +      checkAndPut_call method_call = new checkAndPut_call(table, row, family, qualifier, value,
          +          tput, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class checkAndPut_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class checkAndPut_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private java.nio.ByteBuffer row;
                 private java.nio.ByteBuffer family;
                 private java.nio.ByteBuffer qualifier;
                 private java.nio.ByteBuffer value;
                 private TPut tput;
          -      public checkAndPut_call(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TPut tput, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public checkAndPut_call(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +          java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +          TPut tput, org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.row = row;
          @@ -2237,8 +2366,10 @@ public checkAndPut_call(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java
                   this.tput = tput;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("checkAndPut", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("checkAndPut",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   checkAndPut_args args = new checkAndPut_args();
                   args.setTable(table);
                   args.setRow(row);
          @@ -2254,15 +2385,20 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_checkAndPut();
                 }
               }
           
          -    public void putMultiple(java.nio.ByteBuffer table, java.util.List tputs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void putMultiple(java.nio.ByteBuffer table, java.util.List tputs,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      putMultiple_call method_call = new putMultiple_call(table, tputs, resultHandler, this, ___protocolFactory, ___transport);
          +      putMultiple_call method_call =
          +          new putMultiple_call(table, tputs, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -2270,14 +2406,22 @@ public void putMultiple(java.nio.ByteBuffer table, java.util.List tputs, o
               public static class putMultiple_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private java.util.List tputs;
          -      public putMultiple_call(java.nio.ByteBuffer table, java.util.List tputs, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public putMultiple_call(java.nio.ByteBuffer table, java.util.List tputs,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tputs = tputs;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("putMultiple", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("putMultiple",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   putMultiple_args args = new putMultiple_args();
                   args.setTable(table);
                   args.setTputs(tputs);
          @@ -2289,15 +2433,20 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void deleteSingle(java.nio.ByteBuffer table, TDelete tdelete, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void deleteSingle(java.nio.ByteBuffer table, TDelete tdelete,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      deleteSingle_call method_call = new deleteSingle_call(table, tdelete, resultHandler, this, ___protocolFactory, ___transport);
          +      deleteSingle_call method_call = new deleteSingle_call(table, tdelete, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -2305,14 +2454,22 @@ public void deleteSingle(java.nio.ByteBuffer table, TDelete tdelete, org.apache.
               public static class deleteSingle_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private TDelete tdelete;
          -      public deleteSingle_call(java.nio.ByteBuffer table, TDelete tdelete, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public deleteSingle_call(java.nio.ByteBuffer table, TDelete tdelete,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tdelete = tdelete;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteSingle", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteSingle",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   deleteSingle_args args = new deleteSingle_args();
                   args.setTable(table);
                   args.setTdelete(tdelete);
          @@ -2324,30 +2481,44 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void deleteMultiple(java.nio.ByteBuffer table, java.util.List tdeletes, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void deleteMultiple(java.nio.ByteBuffer table, java.util.List tdeletes,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      deleteMultiple_call method_call = new deleteMultiple_call(table, tdeletes, resultHandler, this, ___protocolFactory, ___transport);
          +      deleteMultiple_call method_call = new deleteMultiple_call(table, tdeletes, resultHandler,
          +          this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class deleteMultiple_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class deleteMultiple_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer table;
                 private java.util.List tdeletes;
          -      public deleteMultiple_call(java.nio.ByteBuffer table, java.util.List tdeletes, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public deleteMultiple_call(java.nio.ByteBuffer table, java.util.List tdeletes,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tdeletes = tdeletes;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteMultiple", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteMultiple",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   deleteMultiple_args args = new deleteMultiple_args();
                   args.setTable(table);
                   args.setTdeletes(tdeletes);
          @@ -2359,27 +2530,43 @@ public java.util.List getResult() throws TIOError, org.apache.thrift.TE
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_deleteMultiple();
                 }
               }
           
          -    public void checkAndDelete(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TDelete tdelete, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void checkAndDelete(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TDelete tdelete,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      checkAndDelete_call method_call = new checkAndDelete_call(table, row, family, qualifier, value, tdelete, resultHandler, this, ___protocolFactory, ___transport);
          +      checkAndDelete_call method_call = new checkAndDelete_call(table, row, family, qualifier,
          +          value, tdelete, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class checkAndDelete_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class checkAndDelete_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private java.nio.ByteBuffer row;
                 private java.nio.ByteBuffer family;
                 private java.nio.ByteBuffer qualifier;
                 private java.nio.ByteBuffer value;
                 private TDelete tdelete;
          -      public checkAndDelete_call(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value, TDelete tdelete, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public checkAndDelete_call(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +          java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +          TDelete tdelete,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.row = row;
          @@ -2389,8 +2576,10 @@ public checkAndDelete_call(java.nio.ByteBuffer table, java.nio.ByteBuffer row, j
                   this.tdelete = tdelete;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("checkAndDelete", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("checkAndDelete",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   checkAndDelete_args args = new checkAndDelete_args();
                   args.setTable(table);
                   args.setRow(row);
          @@ -2406,15 +2595,20 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_checkAndDelete();
                 }
               }
           
          -    public void increment(java.nio.ByteBuffer table, TIncrement tincrement, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void increment(java.nio.ByteBuffer table, TIncrement tincrement,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      increment_call method_call = new increment_call(table, tincrement, resultHandler, this, ___protocolFactory, ___transport);
          +      increment_call method_call = new increment_call(table, tincrement, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -2422,14 +2616,22 @@ public void increment(java.nio.ByteBuffer table, TIncrement tincrement, org.apac
               public static class increment_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private TIncrement tincrement;
          -      public increment_call(java.nio.ByteBuffer table, TIncrement tincrement, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public increment_call(java.nio.ByteBuffer table, TIncrement tincrement,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tincrement = tincrement;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("increment", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("increment",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   increment_args args = new increment_args();
                   args.setTable(table);
                   args.setTincrement(tincrement);
          @@ -2441,15 +2643,20 @@ public TResult getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_increment();
                 }
               }
           
          -    public void append(java.nio.ByteBuffer table, TAppend tappend, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void append(java.nio.ByteBuffer table, TAppend tappend,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      append_call method_call = new append_call(table, tappend, resultHandler, this, ___protocolFactory, ___transport);
          +      append_call method_call =
          +          new append_call(table, tappend, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -2457,14 +2664,22 @@ public void append(java.nio.ByteBuffer table, TAppend tappend, org.apache.thrift
               public static class append_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private TAppend tappend;
          -      public append_call(java.nio.ByteBuffer table, TAppend tappend, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public append_call(java.nio.ByteBuffer table, TAppend tappend,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tappend = tappend;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("append", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("append",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   append_args args = new append_args();
                   args.setTable(table);
                   args.setTappend(tappend);
          @@ -2476,30 +2691,44 @@ public TResult getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_append();
                 }
               }
           
          -    public void openScanner(java.nio.ByteBuffer table, TScan tscan, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void openScanner(java.nio.ByteBuffer table, TScan tscan,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      openScanner_call method_call = new openScanner_call(table, tscan, resultHandler, this, ___protocolFactory, ___transport);
          +      openScanner_call method_call =
          +          new openScanner_call(table, tscan, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class openScanner_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class openScanner_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private TScan tscan;
          -      public openScanner_call(java.nio.ByteBuffer table, TScan tscan, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public openScanner_call(java.nio.ByteBuffer table, TScan tscan,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tscan = tscan;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("openScanner", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("openScanner",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   openScanner_args args = new openScanner_args();
                   args.setTable(table);
                   args.setTscan(tscan);
          @@ -2511,30 +2740,44 @@ public java.lang.Integer getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_openScanner();
                 }
               }
           
          -    public void getScannerRows(int scannerId, int numRows, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getScannerRows(int scannerId, int numRows,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getScannerRows_call method_call = new getScannerRows_call(scannerId, numRows, resultHandler, this, ___protocolFactory, ___transport);
          +      getScannerRows_call method_call = new getScannerRows_call(scannerId, numRows, resultHandler,
          +          this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getScannerRows_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getScannerRows_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private int scannerId;
                 private int numRows;
          -      public getScannerRows_call(int scannerId, int numRows, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getScannerRows_call(int scannerId, int numRows,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.scannerId = scannerId;
                   this.numRows = numRows;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getScannerRows", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getScannerRows",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getScannerRows_args args = new getScannerRows_args();
                   args.setScannerId(scannerId);
                   args.setNumRows(numRows);
          @@ -2542,32 +2785,46 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws TIOError, TIllegalArgument, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws TIOError, TIllegalArgument, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getScannerRows();
                 }
               }
           
          -    public void closeScanner(int scannerId, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void closeScanner(int scannerId,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      closeScanner_call method_call = new closeScanner_call(scannerId, resultHandler, this, ___protocolFactory, ___transport);
          +      closeScanner_call method_call =
          +          new closeScanner_call(scannerId, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class closeScanner_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private int scannerId;
          -      public closeScanner_call(int scannerId, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public closeScanner_call(int scannerId,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.scannerId = scannerId;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("closeScanner", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("closeScanner",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   closeScanner_args args = new closeScanner_args();
                   args.setScannerId(scannerId);
                   args.write(prot);
          @@ -2578,15 +2835,20 @@ public Void getResult() throws TIOError, TIllegalArgument, org.apache.thrift.TEx
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void mutateRow(java.nio.ByteBuffer table, TRowMutations trowMutations, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void mutateRow(java.nio.ByteBuffer table, TRowMutations trowMutations,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      mutateRow_call method_call = new mutateRow_call(table, trowMutations, resultHandler, this, ___protocolFactory, ___transport);
          +      mutateRow_call method_call = new mutateRow_call(table, trowMutations, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -2594,14 +2856,22 @@ public void mutateRow(java.nio.ByteBuffer table, TRowMutations trowMutations, or
               public static class mutateRow_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private TRowMutations trowMutations;
          -      public mutateRow_call(java.nio.ByteBuffer table, TRowMutations trowMutations, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public mutateRow_call(java.nio.ByteBuffer table, TRowMutations trowMutations,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.trowMutations = trowMutations;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("mutateRow", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("mutateRow",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   mutateRow_args args = new mutateRow_args();
                   args.setTable(table);
                   args.setTrowMutations(trowMutations);
          @@ -2613,32 +2883,46 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void getScannerResults(java.nio.ByteBuffer table, TScan tscan, int numRows, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getScannerResults(java.nio.ByteBuffer table, TScan tscan, int numRows,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getScannerResults_call method_call = new getScannerResults_call(table, tscan, numRows, resultHandler, this, ___protocolFactory, ___transport);
          +      getScannerResults_call method_call = new getScannerResults_call(table, tscan, numRows,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getScannerResults_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getScannerResults_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer table;
                 private TScan tscan;
                 private int numRows;
          -      public getScannerResults_call(java.nio.ByteBuffer table, TScan tscan, int numRows, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getScannerResults_call(java.nio.ByteBuffer table, TScan tscan, int numRows,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.tscan = tscan;
                   this.numRows = numRows;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getScannerResults", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getScannerResults",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getScannerResults_args args = new getScannerResults_args();
                   args.setTable(table);
                   args.setTscan(tscan);
          @@ -2651,32 +2935,47 @@ public java.util.List getResult() throws TIOError, org.apache.thrift.TE
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getScannerResults();
                 }
               }
           
          -    public void getRegionLocation(java.nio.ByteBuffer table, java.nio.ByteBuffer row, boolean reload, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void getRegionLocation(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        boolean reload, org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getRegionLocation_call method_call = new getRegionLocation_call(table, row, reload, resultHandler, this, ___protocolFactory, ___transport);
          +      getRegionLocation_call method_call = new getRegionLocation_call(table, row, reload,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getRegionLocation_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class getRegionLocation_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private java.nio.ByteBuffer row;
                 private boolean reload;
          -      public getRegionLocation_call(java.nio.ByteBuffer table, java.nio.ByteBuffer row, boolean reload, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getRegionLocation_call(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +          boolean reload,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.row = row;
                   this.reload = reload;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRegionLocation", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRegionLocation",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getRegionLocation_args args = new getRegionLocation_args();
                   args.setTable(table);
                   args.setRow(row);
          @@ -2689,52 +2988,76 @@ public THRegionLocation getResult() throws TIOError, org.apache.thrift.TExceptio
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getRegionLocation();
                 }
               }
           
          -    public void getAllRegionLocations(java.nio.ByteBuffer table, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getAllRegionLocations(java.nio.ByteBuffer table,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getAllRegionLocations_call method_call = new getAllRegionLocations_call(table, resultHandler, this, ___protocolFactory, ___transport);
          +      getAllRegionLocations_call method_call = new getAllRegionLocations_call(table, resultHandler,
          +          this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getAllRegionLocations_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getAllRegionLocations_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.nio.ByteBuffer table;
          -      public getAllRegionLocations_call(java.nio.ByteBuffer table, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getAllRegionLocations_call(java.nio.ByteBuffer table,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getAllRegionLocations", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getAllRegionLocations",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getAllRegionLocations_args args = new getAllRegionLocations_args();
                   args.setTable(table);
                   args.write(prot);
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws TIOError, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getAllRegionLocations();
                 }
               }
           
          -    public void checkAndMutate(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator, java.nio.ByteBuffer value, TRowMutations rowMutations, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void checkAndMutate(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator,
          +        java.nio.ByteBuffer value, TRowMutations rowMutations,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      checkAndMutate_call method_call = new checkAndMutate_call(table, row, family, qualifier, compareOperator, value, rowMutations, resultHandler, this, ___protocolFactory, ___transport);
          +      checkAndMutate_call method_call =
          +          new checkAndMutate_call(table, row, family, qualifier, compareOperator, value,
          +              rowMutations, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class checkAndMutate_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class checkAndMutate_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.nio.ByteBuffer table;
                 private java.nio.ByteBuffer row;
                 private java.nio.ByteBuffer family;
          @@ -2742,7 +3065,15 @@ public static class checkAndMutate_call extends org.apache.thrift.async.TAsyncMe
                 private TCompareOperator compareOperator;
                 private java.nio.ByteBuffer value;
                 private TRowMutations rowMutations;
          -      public checkAndMutate_call(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator, java.nio.ByteBuffer value, TRowMutations rowMutations, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public checkAndMutate_call(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +          java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier,
          +          TCompareOperator compareOperator, java.nio.ByteBuffer value, TRowMutations rowMutations,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                   this.row = row;
          @@ -2753,8 +3084,10 @@ public checkAndMutate_call(java.nio.ByteBuffer table, java.nio.ByteBuffer row, j
                   this.rowMutations = rowMutations;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("checkAndMutate", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("checkAndMutate",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   checkAndMutate_args args = new checkAndMutate_args();
                   args.setTable(table);
                   args.setRow(row);
          @@ -2771,28 +3104,42 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_checkAndMutate();
                 }
               }
           
          -    public void getTableDescriptor(TTableName table, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void getTableDescriptor(TTableName table,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getTableDescriptor_call method_call = new getTableDescriptor_call(table, resultHandler, this, ___protocolFactory, ___transport);
          +      getTableDescriptor_call method_call =
          +          new getTableDescriptor_call(table, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getTableDescriptor_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class getTableDescriptor_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName table;
          -      public getTableDescriptor_call(TTableName table, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getTableDescriptor_call(TTableName table,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.table = table;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableDescriptor", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableDescriptor",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getTableDescriptor_args args = new getTableDescriptor_args();
                   args.setTable(table);
                   args.write(prot);
          @@ -2803,60 +3150,89 @@ public TTableDescriptor getResult() throws TIOError, org.apache.thrift.TExceptio
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getTableDescriptor();
                 }
               }
           
          -    public void getTableDescriptors(java.util.List tables, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getTableDescriptors(java.util.List tables,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getTableDescriptors_call method_call = new getTableDescriptors_call(tables, resultHandler, this, ___protocolFactory, ___transport);
          +      getTableDescriptors_call method_call = new getTableDescriptors_call(tables, resultHandler,
          +          this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getTableDescriptors_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getTableDescriptors_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.util.List tables;
          -      public getTableDescriptors_call(java.util.List tables, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getTableDescriptors_call(java.util.List tables,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tables = tables;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableDescriptors", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableDescriptors",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getTableDescriptors_args args = new getTableDescriptors_args();
                   args.setTables(tables);
                   args.write(prot);
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws TIOError, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getTableDescriptors();
                 }
               }
           
          -    public void tableExists(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void tableExists(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      tableExists_call method_call = new tableExists_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      tableExists_call method_call =
          +          new tableExists_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class tableExists_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class tableExists_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
          -      public tableExists_call(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public tableExists_call(TTableName tableName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("tableExists", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("tableExists",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   tableExists_args args = new tableExists_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -2867,30 +3243,44 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_tableExists();
                 }
               }
           
          -    public void getTableDescriptorsByPattern(java.lang.String regex, boolean includeSysTables, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getTableDescriptorsByPattern(java.lang.String regex, boolean includeSysTables,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getTableDescriptorsByPattern_call method_call = new getTableDescriptorsByPattern_call(regex, includeSysTables, resultHandler, this, ___protocolFactory, ___transport);
          +      getTableDescriptorsByPattern_call method_call = new getTableDescriptorsByPattern_call(regex,
          +          includeSysTables, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getTableDescriptorsByPattern_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getTableDescriptorsByPattern_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.lang.String regex;
                 private boolean includeSysTables;
          -      public getTableDescriptorsByPattern_call(java.lang.String regex, boolean includeSysTables, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getTableDescriptorsByPattern_call(java.lang.String regex, boolean includeSysTables,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.regex = regex;
                   this.includeSysTables = includeSysTables;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableDescriptorsByPattern", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage(
          +            "getTableDescriptorsByPattern", org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getTableDescriptorsByPattern_args args = new getTableDescriptorsByPattern_args();
                   args.setRegex(regex);
                   args.setIncludeSysTables(includeSysTables);
          @@ -2898,66 +3288,96 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws TIOError, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getTableDescriptorsByPattern();
                 }
               }
           
          -    public void getTableDescriptorsByNamespace(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getTableDescriptorsByNamespace(java.lang.String name,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getTableDescriptorsByNamespace_call method_call = new getTableDescriptorsByNamespace_call(name, resultHandler, this, ___protocolFactory, ___transport);
          +      getTableDescriptorsByNamespace_call method_call = new getTableDescriptorsByNamespace_call(
          +          name, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getTableDescriptorsByNamespace_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getTableDescriptorsByNamespace_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.lang.String name;
          -      public getTableDescriptorsByNamespace_call(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getTableDescriptorsByNamespace_call(java.lang.String name,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.name = name;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableDescriptorsByNamespace", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage(
          +            "getTableDescriptorsByNamespace", org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getTableDescriptorsByNamespace_args args = new getTableDescriptorsByNamespace_args();
                   args.setName(name);
                   args.write(prot);
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws TIOError, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getTableDescriptorsByNamespace();
                 }
               }
           
          -    public void getTableNamesByPattern(java.lang.String regex, boolean includeSysTables, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getTableNamesByPattern(java.lang.String regex, boolean includeSysTables,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getTableNamesByPattern_call method_call = new getTableNamesByPattern_call(regex, includeSysTables, resultHandler, this, ___protocolFactory, ___transport);
          +      getTableNamesByPattern_call method_call = new getTableNamesByPattern_call(regex,
          +          includeSysTables, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getTableNamesByPattern_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getTableNamesByPattern_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.lang.String regex;
                 private boolean includeSysTables;
          -      public getTableNamesByPattern_call(java.lang.String regex, boolean includeSysTables, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getTableNamesByPattern_call(java.lang.String regex, boolean includeSysTables,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.regex = regex;
                   this.includeSysTables = includeSysTables;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableNamesByPattern", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableNamesByPattern",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getTableNamesByPattern_args args = new getTableNamesByPattern_args();
                   args.setRegex(regex);
                   args.setIncludeSysTables(includeSysTables);
          @@ -2969,28 +3389,42 @@ public java.util.List getResult() throws TIOError, org.apache.thrift
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getTableNamesByPattern();
                 }
               }
           
          -    public void getTableNamesByNamespace(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getTableNamesByNamespace(java.lang.String name,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getTableNamesByNamespace_call method_call = new getTableNamesByNamespace_call(name, resultHandler, this, ___protocolFactory, ___transport);
          +      getTableNamesByNamespace_call method_call = new getTableNamesByNamespace_call(name,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getTableNamesByNamespace_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getTableNamesByNamespace_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.lang.String name;
          -      public getTableNamesByNamespace_call(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getTableNamesByNamespace_call(java.lang.String name,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.name = name;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableNamesByNamespace", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getTableNamesByNamespace",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getTableNamesByNamespace_args args = new getTableNamesByNamespace_args();
                   args.setName(name);
                   args.write(prot);
          @@ -3001,15 +3435,20 @@ public java.util.List getResult() throws TIOError, org.apache.thrift
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getTableNamesByNamespace();
                 }
               }
           
          -    public void createTable(TTableDescriptor desc, java.util.List splitKeys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void createTable(TTableDescriptor desc, java.util.List splitKeys,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      createTable_call method_call = new createTable_call(desc, splitKeys, resultHandler, this, ___protocolFactory, ___transport);
          +      createTable_call method_call = new createTable_call(desc, splitKeys, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -3017,14 +3456,22 @@ public void createTable(TTableDescriptor desc, java.util.List {
                 private TTableDescriptor desc;
                 private java.util.List splitKeys;
          -      public createTable_call(TTableDescriptor desc, java.util.List splitKeys, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public createTable_call(TTableDescriptor desc, java.util.List splitKeys,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.desc = desc;
                   this.splitKeys = splitKeys;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("createTable", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("createTable",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   createTable_args args = new createTable_args();
                   args.setDesc(desc);
                   args.setSplitKeys(splitKeys);
          @@ -3036,28 +3483,41 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void deleteTable(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void deleteTable(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      deleteTable_call method_call = new deleteTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      deleteTable_call method_call =
          +          new deleteTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class deleteTable_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
          -      public deleteTable_call(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public deleteTable_call(TTableName tableName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteTable", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteTable",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   deleteTable_args args = new deleteTable_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -3068,15 +3528,20 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void truncateTable(TTableName tableName, boolean preserveSplits, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void truncateTable(TTableName tableName, boolean preserveSplits,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      truncateTable_call method_call = new truncateTable_call(tableName, preserveSplits, resultHandler, this, ___protocolFactory, ___transport);
          +      truncateTable_call method_call = new truncateTable_call(tableName, preserveSplits,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
          @@ -3084,14 +3549,22 @@ public void truncateTable(TTableName tableName, boolean preserveSplits, org.apac
               public static class truncateTable_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
                 private boolean preserveSplits;
          -      public truncateTable_call(TTableName tableName, boolean preserveSplits, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public truncateTable_call(TTableName tableName, boolean preserveSplits,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.preserveSplits = preserveSplits;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("truncateTable", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("truncateTable",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   truncateTable_args args = new truncateTable_args();
                   args.setTableName(tableName);
                   args.setPreserveSplits(preserveSplits);
          @@ -3103,28 +3576,41 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void enableTable(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void enableTable(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      enableTable_call method_call = new enableTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      enableTable_call method_call =
          +          new enableTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class enableTable_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
          -      public enableTable_call(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public enableTable_call(TTableName tableName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("enableTable", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("enableTable",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   enableTable_args args = new enableTable_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -3135,28 +3621,41 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void disableTable(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void disableTable(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      disableTable_call method_call = new disableTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      disableTable_call method_call =
          +          new disableTable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class disableTable_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
          -      public disableTable_call(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public disableTable_call(TTableName tableName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("disableTable", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("disableTable",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   disableTable_args args = new disableTable_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -3167,28 +3666,42 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void isTableEnabled(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void isTableEnabled(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      isTableEnabled_call method_call = new isTableEnabled_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      isTableEnabled_call method_call =
          +          new isTableEnabled_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class isTableEnabled_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class isTableEnabled_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
          -      public isTableEnabled_call(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public isTableEnabled_call(TTableName tableName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("isTableEnabled", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("isTableEnabled",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   isTableEnabled_args args = new isTableEnabled_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -3199,28 +3712,42 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_isTableEnabled();
                 }
               }
           
          -    public void isTableDisabled(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void isTableDisabled(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      isTableDisabled_call method_call = new isTableDisabled_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      isTableDisabled_call method_call = new isTableDisabled_call(tableName, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class isTableDisabled_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class isTableDisabled_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
          -      public isTableDisabled_call(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public isTableDisabled_call(TTableName tableName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("isTableDisabled", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("isTableDisabled",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   isTableDisabled_args args = new isTableDisabled_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -3231,28 +3758,42 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_isTableDisabled();
                 }
               }
           
          -    public void isTableAvailable(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void isTableAvailable(TTableName tableName,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      isTableAvailable_call method_call = new isTableAvailable_call(tableName, resultHandler, this, ___protocolFactory, ___transport);
          +      isTableAvailable_call method_call = new isTableAvailable_call(tableName, resultHandler, this,
          +          ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class isTableAvailable_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class isTableAvailable_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
          -      public isTableAvailable_call(TTableName tableName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public isTableAvailable_call(TTableName tableName,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("isTableAvailable", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("isTableAvailable",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   isTableAvailable_args args = new isTableAvailable_args();
                   args.setTableName(tableName);
                   args.write(prot);
          @@ -3263,30 +3804,46 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_isTableAvailable();
                 }
               }
           
          -    public void isTableAvailableWithSplit(TTableName tableName, java.util.List splitKeys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void isTableAvailableWithSplit(TTableName tableName,
          +        java.util.List splitKeys,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      isTableAvailableWithSplit_call method_call = new isTableAvailableWithSplit_call(tableName, splitKeys, resultHandler, this, ___protocolFactory, ___transport);
          +      isTableAvailableWithSplit_call method_call = new isTableAvailableWithSplit_call(tableName,
          +          splitKeys, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class isTableAvailableWithSplit_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class isTableAvailableWithSplit_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
                 private java.util.List splitKeys;
          -      public isTableAvailableWithSplit_call(TTableName tableName, java.util.List splitKeys, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public isTableAvailableWithSplit_call(TTableName tableName,
          +          java.util.List splitKeys,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.splitKeys = splitKeys;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("isTableAvailableWithSplit", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("isTableAvailableWithSplit",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   isTableAvailableWithSplit_args args = new isTableAvailableWithSplit_args();
                   args.setTableName(tableName);
                   args.setSplitKeys(splitKeys);
          @@ -3298,30 +3855,44 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_isTableAvailableWithSplit();
                 }
               }
           
          -    public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor column,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      addColumnFamily_call method_call = new addColumnFamily_call(tableName, column, resultHandler, this, ___protocolFactory, ___transport);
          +      addColumnFamily_call method_call = new addColumnFamily_call(tableName, column, resultHandler,
          +          this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class addColumnFamily_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class addColumnFamily_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
                 private TColumnFamilyDescriptor column;
          -      public addColumnFamily_call(TTableName tableName, TColumnFamilyDescriptor column, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public addColumnFamily_call(TTableName tableName, TColumnFamilyDescriptor column,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.column = column;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("addColumnFamily", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("addColumnFamily",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   addColumnFamily_args args = new addColumnFamily_args();
                   args.setTableName(tableName);
                   args.setColumn(column);
          @@ -3333,30 +3904,44 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void deleteColumnFamily(TTableName tableName, java.nio.ByteBuffer column, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void deleteColumnFamily(TTableName tableName, java.nio.ByteBuffer column,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      deleteColumnFamily_call method_call = new deleteColumnFamily_call(tableName, column, resultHandler, this, ___protocolFactory, ___transport);
          +      deleteColumnFamily_call method_call = new deleteColumnFamily_call(tableName, column,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class deleteColumnFamily_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class deleteColumnFamily_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
                 private java.nio.ByteBuffer column;
          -      public deleteColumnFamily_call(TTableName tableName, java.nio.ByteBuffer column, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public deleteColumnFamily_call(TTableName tableName, java.nio.ByteBuffer column,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.column = column;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteColumnFamily", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteColumnFamily",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   deleteColumnFamily_args args = new deleteColumnFamily_args();
                   args.setTableName(tableName);
                   args.setColumn(column);
          @@ -3368,30 +3953,44 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      modifyColumnFamily_call method_call = new modifyColumnFamily_call(tableName, column, resultHandler, this, ___protocolFactory, ___transport);
          +      modifyColumnFamily_call method_call = new modifyColumnFamily_call(tableName, column,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class modifyColumnFamily_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class modifyColumnFamily_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableName tableName;
                 private TColumnFamilyDescriptor column;
          -      public modifyColumnFamily_call(TTableName tableName, TColumnFamilyDescriptor column, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public modifyColumnFamily_call(TTableName tableName, TColumnFamilyDescriptor column,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.tableName = tableName;
                   this.column = column;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("modifyColumnFamily", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("modifyColumnFamily",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   modifyColumnFamily_args args = new modifyColumnFamily_args();
                   args.setTableName(tableName);
                   args.setColumn(column);
          @@ -3403,28 +4002,41 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void modifyTable(TTableDescriptor desc, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void modifyTable(TTableDescriptor desc,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      modifyTable_call method_call = new modifyTable_call(desc, resultHandler, this, ___protocolFactory, ___transport);
          +      modifyTable_call method_call =
          +          new modifyTable_call(desc, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
               public static class modifyTable_call extends org.apache.thrift.async.TAsyncMethodCall {
                 private TTableDescriptor desc;
          -      public modifyTable_call(TTableDescriptor desc, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public modifyTable_call(TTableDescriptor desc,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.desc = desc;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("modifyTable", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("modifyTable",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   modifyTable_args args = new modifyTable_args();
                   args.setDesc(desc);
                   args.write(prot);
          @@ -3435,28 +4047,42 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void createNamespace(TNamespaceDescriptor namespaceDesc, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void createNamespace(TNamespaceDescriptor namespaceDesc,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      createNamespace_call method_call = new createNamespace_call(namespaceDesc, resultHandler, this, ___protocolFactory, ___transport);
          +      createNamespace_call method_call = new createNamespace_call(namespaceDesc, resultHandler,
          +          this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class createNamespace_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class createNamespace_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TNamespaceDescriptor namespaceDesc;
          -      public createNamespace_call(TNamespaceDescriptor namespaceDesc, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public createNamespace_call(TNamespaceDescriptor namespaceDesc,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.namespaceDesc = namespaceDesc;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("createNamespace", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("createNamespace",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   createNamespace_args args = new createNamespace_args();
                   args.setNamespaceDesc(namespaceDesc);
                   args.write(prot);
          @@ -3467,28 +4093,42 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void modifyNamespace(TNamespaceDescriptor namespaceDesc, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void modifyNamespace(TNamespaceDescriptor namespaceDesc,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      modifyNamespace_call method_call = new modifyNamespace_call(namespaceDesc, resultHandler, this, ___protocolFactory, ___transport);
          +      modifyNamespace_call method_call = new modifyNamespace_call(namespaceDesc, resultHandler,
          +          this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class modifyNamespace_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class modifyNamespace_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TNamespaceDescriptor namespaceDesc;
          -      public modifyNamespace_call(TNamespaceDescriptor namespaceDesc, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public modifyNamespace_call(TNamespaceDescriptor namespaceDesc,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.namespaceDesc = namespaceDesc;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("modifyNamespace", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("modifyNamespace",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   modifyNamespace_args args = new modifyNamespace_args();
                   args.setNamespaceDesc(namespaceDesc);
                   args.write(prot);
          @@ -3499,28 +4139,42 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void deleteNamespace(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void deleteNamespace(java.lang.String name,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      deleteNamespace_call method_call = new deleteNamespace_call(name, resultHandler, this, ___protocolFactory, ___transport);
          +      deleteNamespace_call method_call =
          +          new deleteNamespace_call(name, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class deleteNamespace_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class deleteNamespace_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.lang.String name;
          -      public deleteNamespace_call(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public deleteNamespace_call(java.lang.String name,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.name = name;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteNamespace", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deleteNamespace",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   deleteNamespace_args args = new deleteNamespace_args();
                   args.setName(name);
                   args.write(prot);
          @@ -3531,28 +4185,42 @@ public Void getResult() throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return null;
                 }
               }
           
          -    public void getNamespaceDescriptor(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void getNamespaceDescriptor(java.lang.String name,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getNamespaceDescriptor_call method_call = new getNamespaceDescriptor_call(name, resultHandler, this, ___protocolFactory, ___transport);
          +      getNamespaceDescriptor_call method_call = new getNamespaceDescriptor_call(name, resultHandler,
          +          this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getNamespaceDescriptor_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class getNamespaceDescriptor_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private java.lang.String name;
          -      public getNamespaceDescriptor_call(java.lang.String name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getNamespaceDescriptor_call(java.lang.String name,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.name = name;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getNamespaceDescriptor", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getNamespaceDescriptor",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getNamespaceDescriptor_args args = new getNamespaceDescriptor_args();
                   args.setName(name);
                   args.write(prot);
          @@ -3563,84 +4231,125 @@ public TNamespaceDescriptor getResult() throws TIOError, org.apache.thrift.TExce
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getNamespaceDescriptor();
                 }
               }
           
          -    public void listNamespaceDescriptors(org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void listNamespaceDescriptors(
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      listNamespaceDescriptors_call method_call = new listNamespaceDescriptors_call(resultHandler, this, ___protocolFactory, ___transport);
          +      listNamespaceDescriptors_call method_call =
          +          new listNamespaceDescriptors_call(resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class listNamespaceDescriptors_call extends org.apache.thrift.async.TAsyncMethodCall> {
          -      public listNamespaceDescriptors_call(org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +    public static class listNamespaceDescriptors_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
          +      public listNamespaceDescriptors_call(
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("listNamespaceDescriptors", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("listNamespaceDescriptors",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   listNamespaceDescriptors_args args = new listNamespaceDescriptors_args();
                   args.write(prot);
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws TIOError, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_listNamespaceDescriptors();
                 }
               }
           
          -    public void listNamespaces(org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void listNamespaces(
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      listNamespaces_call method_call = new listNamespaces_call(resultHandler, this, ___protocolFactory, ___transport);
          +      listNamespaces_call method_call =
          +          new listNamespaces_call(resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class listNamespaces_call extends org.apache.thrift.async.TAsyncMethodCall> {
          -      public listNamespaces_call(org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +    public static class listNamespaces_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
          +      public listNamespaces_call(
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("listNamespaces", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("listNamespaces",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   listNamespaces_args args = new listNamespaces_args();
                   args.write(prot);
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws TIOError, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_listNamespaces();
                 }
               }
           
          -    public void getThriftServerType(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void getThriftServerType(
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getThriftServerType_call method_call = new getThriftServerType_call(resultHandler, this, ___protocolFactory, ___transport);
          +      getThriftServerType_call method_call =
          +          new getThriftServerType_call(resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getThriftServerType_call extends org.apache.thrift.async.TAsyncMethodCall {
          -      public getThriftServerType_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +    public static class getThriftServerType_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
          +      public getThriftServerType_call(
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getThriftServerType", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getThriftServerType",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getThriftServerType_args args = new getThriftServerType_args();
                   args.write(prot);
                   prot.writeMessageEnd();
          @@ -3650,26 +4359,39 @@ public TThriftServerType getResult() throws org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getThriftServerType();
                 }
               }
           
          -    public void getClusterId(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void
          +        getClusterId(org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +            throws org.apache.thrift.TException {
                 checkReady();
          -      getClusterId_call method_call = new getClusterId_call(resultHandler, this, ___protocolFactory, ___transport);
          +      getClusterId_call method_call =
          +          new getClusterId_call(resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getClusterId_call extends org.apache.thrift.async.TAsyncMethodCall {
          -      public getClusterId_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +    public static class getClusterId_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
          +      public getClusterId_call(
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getClusterId", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getClusterId",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getClusterId_args args = new getClusterId_args();
                   args.write(prot);
                   prot.writeMessageEnd();
          @@ -3679,30 +4401,46 @@ public java.lang.String getResult() throws org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getClusterId();
                 }
               }
           
          -    public void getSlowLogResponses(java.util.Set serverNames, TLogQueryFilter logQueryFilter, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void getSlowLogResponses(java.util.Set serverNames,
          +        TLogQueryFilter logQueryFilter,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      getSlowLogResponses_call method_call = new getSlowLogResponses_call(serverNames, logQueryFilter, resultHandler, this, ___protocolFactory, ___transport);
          +      getSlowLogResponses_call method_call = new getSlowLogResponses_call(serverNames,
          +          logQueryFilter, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class getSlowLogResponses_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class getSlowLogResponses_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.util.Set serverNames;
                 private TLogQueryFilter logQueryFilter;
          -      public getSlowLogResponses_call(java.util.Set serverNames, TLogQueryFilter logQueryFilter, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public getSlowLogResponses_call(java.util.Set serverNames,
          +          TLogQueryFilter logQueryFilter,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.serverNames = serverNames;
                   this.logQueryFilter = logQueryFilter;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getSlowLogResponses", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getSlowLogResponses",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   getSlowLogResponses_args args = new getSlowLogResponses_args();
                   args.setServerNames(serverNames);
                   args.setLogQueryFilter(logQueryFilter);
          @@ -3710,64 +4448,94 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws TIOError, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_getSlowLogResponses();
                 }
               }
           
          -    public void clearSlowLogResponses(java.util.Set serverNames, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +    public void clearSlowLogResponses(java.util.Set serverNames,
          +        org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      clearSlowLogResponses_call method_call = new clearSlowLogResponses_call(serverNames, resultHandler, this, ___protocolFactory, ___transport);
          +      clearSlowLogResponses_call method_call = new clearSlowLogResponses_call(serverNames,
          +          resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class clearSlowLogResponses_call extends org.apache.thrift.async.TAsyncMethodCall> {
          +    public static class clearSlowLogResponses_call
          +        extends org.apache.thrift.async.TAsyncMethodCall> {
                 private java.util.Set serverNames;
          -      public clearSlowLogResponses_call(java.util.Set serverNames, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public clearSlowLogResponses_call(java.util.Set serverNames,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.serverNames = serverNames;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("clearSlowLogResponses", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("clearSlowLogResponses",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   clearSlowLogResponses_args args = new clearSlowLogResponses_args();
                   args.setServerNames(serverNames);
                   args.write(prot);
                   prot.writeMessageEnd();
                 }
           
          -      public java.util.List getResult() throws TIOError, org.apache.thrift.TException {
          +      public java.util.List getResult()
          +          throws TIOError, org.apache.thrift.TException {
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_clearSlowLogResponses();
                 }
               }
           
          -    public void grant(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void grant(TAccessControlEntity info,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      grant_call method_call = new grant_call(info, resultHandler, this, ___protocolFactory, ___transport);
          +      grant_call method_call =
          +          new grant_call(info, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class grant_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class grant_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TAccessControlEntity info;
          -      public grant_call(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public grant_call(TAccessControlEntity info,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.info = info;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("grant", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("grant",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   grant_args args = new grant_args();
                   args.setInfo(info);
                   args.write(prot);
          @@ -3778,28 +4546,42 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_grant();
                 }
               }
           
          -    public void revoke(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +    public void revoke(TAccessControlEntity info,
          +        org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +        throws org.apache.thrift.TException {
                 checkReady();
          -      revoke_call method_call = new revoke_call(info, resultHandler, this, ___protocolFactory, ___transport);
          +      revoke_call method_call =
          +          new revoke_call(info, resultHandler, this, ___protocolFactory, ___transport);
                 this.___currentMethod = method_call;
                 ___manager.call(method_call);
               }
           
          -    public static class revoke_call extends org.apache.thrift.async.TAsyncMethodCall {
          +    public static class revoke_call
          +        extends org.apache.thrift.async.TAsyncMethodCall {
                 private TAccessControlEntity info;
          -      public revoke_call(TAccessControlEntity info, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
          +
          +      public revoke_call(TAccessControlEntity info,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler,
          +          org.apache.thrift.async.TAsyncClient client,
          +          org.apache.thrift.protocol.TProtocolFactory protocolFactory,
          +          org.apache.thrift.transport.TNonblockingTransport transport)
          +          throws org.apache.thrift.TException {
                   super(client, protocolFactory, transport, resultHandler, false);
                   this.info = info;
                 }
           
          -      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
          -        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("revoke", org.apache.thrift.protocol.TMessageType.CALL, 0));
          +      public void write_args(org.apache.thrift.protocol.TProtocol prot)
          +          throws org.apache.thrift.TException {
          +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("revoke",
          +            org.apache.thrift.protocol.TMessageType.CALL, 0));
                   revoke_args args = new revoke_args();
                   args.setInfo(info);
                   args.write(prot);
          @@ -3810,25 +4592,35 @@ public java.lang.Boolean getResult() throws TIOError, org.apache.thrift.TExcepti
                   if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
                     throw new java.lang.IllegalStateException("Method call not finished!");
                   }
          -        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          -        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
          +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport =
          +            new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
          +        org.apache.thrift.protocol.TProtocol prot =
          +            client.getProtocolFactory().getProtocol(memoryTransport);
                   return (new Client(prot)).recv_revoke();
                 }
               }
           
             }
           
          -  public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor {
          -    private static final org.slf4j.Logger _LOGGER = org.slf4j.LoggerFactory.getLogger(Processor.class.getName());
          +  public static class Processor extends org.apache.thrift.TBaseProcessor
          +      implements org.apache.thrift.TProcessor {
          +    private static final org.slf4j.Logger _LOGGER =
          +        org.slf4j.LoggerFactory.getLogger(Processor.class.getName());
          +
               public Processor(I iface) {
          -      super(iface, getProcessMap(new java.util.HashMap>()));
          +      super(iface, getProcessMap(
          +        new java.util.HashMap>()));
               }
           
          -    protected Processor(I iface, java.util.Map> processMap) {
          +    protected Processor(I iface,
          +        java.util.Map> processMap) {
                 super(iface, getProcessMap(processMap));
               }
           
          -    private static  java.util.Map> getProcessMap(java.util.Map> processMap) {
          +    private static 
          +        java.util.Map>
          +        getProcessMap(
          +            java.util.Map> processMap) {
                 processMap.put("exists", new exists());
                 processMap.put("existsAll", new existsAll());
                 processMap.put("get", new get());
          @@ -3884,7 +4676,8 @@ protected Processor(I iface, java.util.Map extends org.apache.thrift.ProcessFunction {
          +    public static class exists
          +        extends org.apache.thrift.ProcessFunction {
                 public exists() {
                   super("exists");
                 }
          @@ -3902,7 +4695,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public exists_result getResult(I iface, exists_args args) throws org.apache.thrift.TException {
          +      public exists_result getResult(I iface, exists_args args)
          +          throws org.apache.thrift.TException {
                   exists_result result = new exists_result();
                   try {
                     result.success = iface.exists(args.table, args.tget);
          @@ -3914,7 +4708,8 @@ public exists_result getResult(I iface, exists_args args) throws org.apache.thri
                 }
               }
           
          -    public static class existsAll extends org.apache.thrift.ProcessFunction {
          +    public static class existsAll
          +        extends org.apache.thrift.ProcessFunction {
                 public existsAll() {
                   super("existsAll");
                 }
          @@ -3932,7 +4727,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public existsAll_result getResult(I iface, existsAll_args args) throws org.apache.thrift.TException {
          +      public existsAll_result getResult(I iface, existsAll_args args)
          +          throws org.apache.thrift.TException {
                   existsAll_result result = new existsAll_result();
                   try {
                     result.success = iface.existsAll(args.table, args.tgets);
          @@ -3943,7 +4739,8 @@ public existsAll_result getResult(I iface, existsAll_args args) throws org.apach
                 }
               }
           
          -    public static class get extends org.apache.thrift.ProcessFunction {
          +    public static class get
          +        extends org.apache.thrift.ProcessFunction {
                 public get() {
                   super("get");
                 }
          @@ -3972,7 +4769,8 @@ public get_result getResult(I iface, get_args args) throws org.apache.thrift.TEx
                 }
               }
           
          -    public static class getMultiple extends org.apache.thrift.ProcessFunction {
          +    public static class getMultiple
          +        extends org.apache.thrift.ProcessFunction {
                 public getMultiple() {
                   super("getMultiple");
                 }
          @@ -3990,7 +4788,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getMultiple_result getResult(I iface, getMultiple_args args) throws org.apache.thrift.TException {
          +      public getMultiple_result getResult(I iface, getMultiple_args args)
          +          throws org.apache.thrift.TException {
                   getMultiple_result result = new getMultiple_result();
                   try {
                     result.success = iface.getMultiple(args.table, args.tgets);
          @@ -4001,7 +4800,8 @@ public getMultiple_result getResult(I iface, getMultiple_args args) throws org.a
                 }
               }
           
          -    public static class put extends org.apache.thrift.ProcessFunction {
          +    public static class put
          +        extends org.apache.thrift.ProcessFunction {
                 public put() {
                   super("put");
                 }
          @@ -4030,7 +4830,8 @@ public put_result getResult(I iface, put_args args) throws org.apache.thrift.TEx
                 }
               }
           
          -    public static class checkAndPut extends org.apache.thrift.ProcessFunction {
          +    public static class checkAndPut
          +        extends org.apache.thrift.ProcessFunction {
                 public checkAndPut() {
                   super("checkAndPut");
                 }
          @@ -4048,10 +4849,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public checkAndPut_result getResult(I iface, checkAndPut_args args) throws org.apache.thrift.TException {
          +      public checkAndPut_result getResult(I iface, checkAndPut_args args)
          +          throws org.apache.thrift.TException {
                   checkAndPut_result result = new checkAndPut_result();
                   try {
          -          result.success = iface.checkAndPut(args.table, args.row, args.family, args.qualifier, args.value, args.tput);
          +          result.success = iface.checkAndPut(args.table, args.row, args.family, args.qualifier,
          +            args.value, args.tput);
                     result.setSuccessIsSet(true);
                   } catch (TIOError io) {
                     result.io = io;
          @@ -4060,7 +4863,8 @@ public checkAndPut_result getResult(I iface, checkAndPut_args args) throws org.a
                 }
               }
           
          -    public static class putMultiple extends org.apache.thrift.ProcessFunction {
          +    public static class putMultiple
          +        extends org.apache.thrift.ProcessFunction {
                 public putMultiple() {
                   super("putMultiple");
                 }
          @@ -4078,7 +4882,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public putMultiple_result getResult(I iface, putMultiple_args args) throws org.apache.thrift.TException {
          +      public putMultiple_result getResult(I iface, putMultiple_args args)
          +          throws org.apache.thrift.TException {
                   putMultiple_result result = new putMultiple_result();
                   try {
                     iface.putMultiple(args.table, args.tputs);
          @@ -4089,7 +4894,8 @@ public putMultiple_result getResult(I iface, putMultiple_args args) throws org.a
                 }
               }
           
          -    public static class deleteSingle extends org.apache.thrift.ProcessFunction {
          +    public static class deleteSingle
          +        extends org.apache.thrift.ProcessFunction {
                 public deleteSingle() {
                   super("deleteSingle");
                 }
          @@ -4107,7 +4913,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public deleteSingle_result getResult(I iface, deleteSingle_args args) throws org.apache.thrift.TException {
          +      public deleteSingle_result getResult(I iface, deleteSingle_args args)
          +          throws org.apache.thrift.TException {
                   deleteSingle_result result = new deleteSingle_result();
                   try {
                     iface.deleteSingle(args.table, args.tdelete);
          @@ -4118,7 +4925,8 @@ public deleteSingle_result getResult(I iface, deleteSingle_args args) throws org
                 }
               }
           
          -    public static class deleteMultiple extends org.apache.thrift.ProcessFunction {
          +    public static class deleteMultiple
          +        extends org.apache.thrift.ProcessFunction {
                 public deleteMultiple() {
                   super("deleteMultiple");
                 }
          @@ -4136,7 +4944,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public deleteMultiple_result getResult(I iface, deleteMultiple_args args) throws org.apache.thrift.TException {
          +      public deleteMultiple_result getResult(I iface, deleteMultiple_args args)
          +          throws org.apache.thrift.TException {
                   deleteMultiple_result result = new deleteMultiple_result();
                   try {
                     result.success = iface.deleteMultiple(args.table, args.tdeletes);
          @@ -4147,7 +4956,8 @@ public deleteMultiple_result getResult(I iface, deleteMultiple_args args) throws
                 }
               }
           
          -    public static class checkAndDelete extends org.apache.thrift.ProcessFunction {
          +    public static class checkAndDelete
          +        extends org.apache.thrift.ProcessFunction {
                 public checkAndDelete() {
                   super("checkAndDelete");
                 }
          @@ -4165,10 +4975,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public checkAndDelete_result getResult(I iface, checkAndDelete_args args) throws org.apache.thrift.TException {
          +      public checkAndDelete_result getResult(I iface, checkAndDelete_args args)
          +          throws org.apache.thrift.TException {
                   checkAndDelete_result result = new checkAndDelete_result();
                   try {
          -          result.success = iface.checkAndDelete(args.table, args.row, args.family, args.qualifier, args.value, args.tdelete);
          +          result.success = iface.checkAndDelete(args.table, args.row, args.family, args.qualifier,
          +            args.value, args.tdelete);
                     result.setSuccessIsSet(true);
                   } catch (TIOError io) {
                     result.io = io;
          @@ -4177,7 +4989,8 @@ public checkAndDelete_result getResult(I iface, checkAndDelete_args args) throws
                 }
               }
           
          -    public static class increment extends org.apache.thrift.ProcessFunction {
          +    public static class increment
          +        extends org.apache.thrift.ProcessFunction {
                 public increment() {
                   super("increment");
                 }
          @@ -4195,7 +5008,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public increment_result getResult(I iface, increment_args args) throws org.apache.thrift.TException {
          +      public increment_result getResult(I iface, increment_args args)
          +          throws org.apache.thrift.TException {
                   increment_result result = new increment_result();
                   try {
                     result.success = iface.increment(args.table, args.tincrement);
          @@ -4206,7 +5020,8 @@ public increment_result getResult(I iface, increment_args args) throws org.apach
                 }
               }
           
          -    public static class append extends org.apache.thrift.ProcessFunction {
          +    public static class append
          +        extends org.apache.thrift.ProcessFunction {
                 public append() {
                   super("append");
                 }
          @@ -4224,7 +5039,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public append_result getResult(I iface, append_args args) throws org.apache.thrift.TException {
          +      public append_result getResult(I iface, append_args args)
          +          throws org.apache.thrift.TException {
                   append_result result = new append_result();
                   try {
                     result.success = iface.append(args.table, args.tappend);
          @@ -4235,7 +5051,8 @@ public append_result getResult(I iface, append_args args) throws org.apache.thri
                 }
               }
           
          -    public static class openScanner extends org.apache.thrift.ProcessFunction {
          +    public static class openScanner
          +        extends org.apache.thrift.ProcessFunction {
                 public openScanner() {
                   super("openScanner");
                 }
          @@ -4253,7 +5070,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public openScanner_result getResult(I iface, openScanner_args args) throws org.apache.thrift.TException {
          +      public openScanner_result getResult(I iface, openScanner_args args)
          +          throws org.apache.thrift.TException {
                   openScanner_result result = new openScanner_result();
                   try {
                     result.success = iface.openScanner(args.table, args.tscan);
          @@ -4265,7 +5083,8 @@ public openScanner_result getResult(I iface, openScanner_args args) throws org.a
                 }
               }
           
          -    public static class getScannerRows extends org.apache.thrift.ProcessFunction {
          +    public static class getScannerRows
          +        extends org.apache.thrift.ProcessFunction {
                 public getScannerRows() {
                   super("getScannerRows");
                 }
          @@ -4283,7 +5102,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getScannerRows_result getResult(I iface, getScannerRows_args args) throws org.apache.thrift.TException {
          +      public getScannerRows_result getResult(I iface, getScannerRows_args args)
          +          throws org.apache.thrift.TException {
                   getScannerRows_result result = new getScannerRows_result();
                   try {
                     result.success = iface.getScannerRows(args.scannerId, args.numRows);
          @@ -4296,7 +5116,8 @@ public getScannerRows_result getResult(I iface, getScannerRows_args args) throws
                 }
               }
           
          -    public static class closeScanner extends org.apache.thrift.ProcessFunction {
          +    public static class closeScanner
          +        extends org.apache.thrift.ProcessFunction {
                 public closeScanner() {
                   super("closeScanner");
                 }
          @@ -4314,7 +5135,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public closeScanner_result getResult(I iface, closeScanner_args args) throws org.apache.thrift.TException {
          +      public closeScanner_result getResult(I iface, closeScanner_args args)
          +          throws org.apache.thrift.TException {
                   closeScanner_result result = new closeScanner_result();
                   try {
                     iface.closeScanner(args.scannerId);
          @@ -4327,7 +5149,8 @@ public closeScanner_result getResult(I iface, closeScanner_args args) throws org
                 }
               }
           
          -    public static class mutateRow extends org.apache.thrift.ProcessFunction {
          +    public static class mutateRow
          +        extends org.apache.thrift.ProcessFunction {
                 public mutateRow() {
                   super("mutateRow");
                 }
          @@ -4345,7 +5168,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public mutateRow_result getResult(I iface, mutateRow_args args) throws org.apache.thrift.TException {
          +      public mutateRow_result getResult(I iface, mutateRow_args args)
          +          throws org.apache.thrift.TException {
                   mutateRow_result result = new mutateRow_result();
                   try {
                     iface.mutateRow(args.table, args.trowMutations);
          @@ -4356,7 +5180,8 @@ public mutateRow_result getResult(I iface, mutateRow_args args) throws org.apach
                 }
               }
           
          -    public static class getScannerResults extends org.apache.thrift.ProcessFunction {
          +    public static class getScannerResults
          +        extends org.apache.thrift.ProcessFunction {
                 public getScannerResults() {
                   super("getScannerResults");
                 }
          @@ -4374,7 +5199,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getScannerResults_result getResult(I iface, getScannerResults_args args) throws org.apache.thrift.TException {
          +      public getScannerResults_result getResult(I iface, getScannerResults_args args)
          +          throws org.apache.thrift.TException {
                   getScannerResults_result result = new getScannerResults_result();
                   try {
                     result.success = iface.getScannerResults(args.table, args.tscan, args.numRows);
          @@ -4385,7 +5211,8 @@ public getScannerResults_result getResult(I iface, getScannerResults_args args)
                 }
               }
           
          -    public static class getRegionLocation extends org.apache.thrift.ProcessFunction {
          +    public static class getRegionLocation
          +        extends org.apache.thrift.ProcessFunction {
                 public getRegionLocation() {
                   super("getRegionLocation");
                 }
          @@ -4403,7 +5230,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getRegionLocation_result getResult(I iface, getRegionLocation_args args) throws org.apache.thrift.TException {
          +      public getRegionLocation_result getResult(I iface, getRegionLocation_args args)
          +          throws org.apache.thrift.TException {
                   getRegionLocation_result result = new getRegionLocation_result();
                   try {
                     result.success = iface.getRegionLocation(args.table, args.row, args.reload);
          @@ -4414,7 +5242,8 @@ public getRegionLocation_result getResult(I iface, getRegionLocation_args args)
                 }
               }
           
          -    public static class getAllRegionLocations extends org.apache.thrift.ProcessFunction {
          +    public static class getAllRegionLocations
          +        extends org.apache.thrift.ProcessFunction {
                 public getAllRegionLocations() {
                   super("getAllRegionLocations");
                 }
          @@ -4432,7 +5261,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getAllRegionLocations_result getResult(I iface, getAllRegionLocations_args args) throws org.apache.thrift.TException {
          +      public getAllRegionLocations_result getResult(I iface, getAllRegionLocations_args args)
          +          throws org.apache.thrift.TException {
                   getAllRegionLocations_result result = new getAllRegionLocations_result();
                   try {
                     result.success = iface.getAllRegionLocations(args.table);
          @@ -4443,7 +5273,8 @@ public getAllRegionLocations_result getResult(I iface, getAllRegionLocations_arg
                 }
               }
           
          -    public static class checkAndMutate extends org.apache.thrift.ProcessFunction {
          +    public static class checkAndMutate
          +        extends org.apache.thrift.ProcessFunction {
                 public checkAndMutate() {
                   super("checkAndMutate");
                 }
          @@ -4461,10 +5292,12 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public checkAndMutate_result getResult(I iface, checkAndMutate_args args) throws org.apache.thrift.TException {
          +      public checkAndMutate_result getResult(I iface, checkAndMutate_args args)
          +          throws org.apache.thrift.TException {
                   checkAndMutate_result result = new checkAndMutate_result();
                   try {
          -          result.success = iface.checkAndMutate(args.table, args.row, args.family, args.qualifier, args.compareOperator, args.value, args.rowMutations);
          +          result.success = iface.checkAndMutate(args.table, args.row, args.family, args.qualifier,
          +            args.compareOperator, args.value, args.rowMutations);
                     result.setSuccessIsSet(true);
                   } catch (TIOError io) {
                     result.io = io;
          @@ -4473,7 +5306,8 @@ public checkAndMutate_result getResult(I iface, checkAndMutate_args args) throws
                 }
               }
           
          -    public static class getTableDescriptor extends org.apache.thrift.ProcessFunction {
          +    public static class getTableDescriptor
          +        extends org.apache.thrift.ProcessFunction {
                 public getTableDescriptor() {
                   super("getTableDescriptor");
                 }
          @@ -4491,7 +5325,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getTableDescriptor_result getResult(I iface, getTableDescriptor_args args) throws org.apache.thrift.TException {
          +      public getTableDescriptor_result getResult(I iface, getTableDescriptor_args args)
          +          throws org.apache.thrift.TException {
                   getTableDescriptor_result result = new getTableDescriptor_result();
                   try {
                     result.success = iface.getTableDescriptor(args.table);
          @@ -4502,7 +5337,8 @@ public getTableDescriptor_result getResult(I iface, getTableDescriptor_args args
                 }
               }
           
          -    public static class getTableDescriptors extends org.apache.thrift.ProcessFunction {
          +    public static class getTableDescriptors
          +        extends org.apache.thrift.ProcessFunction {
                 public getTableDescriptors() {
                   super("getTableDescriptors");
                 }
          @@ -4520,7 +5356,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getTableDescriptors_result getResult(I iface, getTableDescriptors_args args) throws org.apache.thrift.TException {
          +      public getTableDescriptors_result getResult(I iface, getTableDescriptors_args args)
          +          throws org.apache.thrift.TException {
                   getTableDescriptors_result result = new getTableDescriptors_result();
                   try {
                     result.success = iface.getTableDescriptors(args.tables);
          @@ -4531,7 +5368,8 @@ public getTableDescriptors_result getResult(I iface, getTableDescriptors_args ar
                 }
               }
           
          -    public static class tableExists extends org.apache.thrift.ProcessFunction {
          +    public static class tableExists
          +        extends org.apache.thrift.ProcessFunction {
                 public tableExists() {
                   super("tableExists");
                 }
          @@ -4549,7 +5387,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public tableExists_result getResult(I iface, tableExists_args args) throws org.apache.thrift.TException {
          +      public tableExists_result getResult(I iface, tableExists_args args)
          +          throws org.apache.thrift.TException {
                   tableExists_result result = new tableExists_result();
                   try {
                     result.success = iface.tableExists(args.tableName);
          @@ -4561,7 +5400,8 @@ public tableExists_result getResult(I iface, tableExists_args args) throws org.a
                 }
               }
           
          -    public static class getTableDescriptorsByPattern extends org.apache.thrift.ProcessFunction {
          +    public static class getTableDescriptorsByPattern
          +        extends org.apache.thrift.ProcessFunction {
                 public getTableDescriptorsByPattern() {
                   super("getTableDescriptorsByPattern");
                 }
          @@ -4579,7 +5419,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getTableDescriptorsByPattern_result getResult(I iface, getTableDescriptorsByPattern_args args) throws org.apache.thrift.TException {
          +      public getTableDescriptorsByPattern_result getResult(I iface,
          +          getTableDescriptorsByPattern_args args) throws org.apache.thrift.TException {
                   getTableDescriptorsByPattern_result result = new getTableDescriptorsByPattern_result();
                   try {
                     result.success = iface.getTableDescriptorsByPattern(args.regex, args.includeSysTables);
          @@ -4590,7 +5431,8 @@ public getTableDescriptorsByPattern_result getResult(I iface, getTableDescriptor
                 }
               }
           
          -    public static class getTableDescriptorsByNamespace extends org.apache.thrift.ProcessFunction {
          +    public static class getTableDescriptorsByNamespace
          +        extends org.apache.thrift.ProcessFunction {
                 public getTableDescriptorsByNamespace() {
                   super("getTableDescriptorsByNamespace");
                 }
          @@ -4608,7 +5450,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getTableDescriptorsByNamespace_result getResult(I iface, getTableDescriptorsByNamespace_args args) throws org.apache.thrift.TException {
          +      public getTableDescriptorsByNamespace_result getResult(I iface,
          +          getTableDescriptorsByNamespace_args args) throws org.apache.thrift.TException {
                   getTableDescriptorsByNamespace_result result = new getTableDescriptorsByNamespace_result();
                   try {
                     result.success = iface.getTableDescriptorsByNamespace(args.name);
          @@ -4619,7 +5462,8 @@ public getTableDescriptorsByNamespace_result getResult(I iface, getTableDescript
                 }
               }
           
          -    public static class getTableNamesByPattern extends org.apache.thrift.ProcessFunction {
          +    public static class getTableNamesByPattern
          +        extends org.apache.thrift.ProcessFunction {
                 public getTableNamesByPattern() {
                   super("getTableNamesByPattern");
                 }
          @@ -4637,7 +5481,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getTableNamesByPattern_result getResult(I iface, getTableNamesByPattern_args args) throws org.apache.thrift.TException {
          +      public getTableNamesByPattern_result getResult(I iface, getTableNamesByPattern_args args)
          +          throws org.apache.thrift.TException {
                   getTableNamesByPattern_result result = new getTableNamesByPattern_result();
                   try {
                     result.success = iface.getTableNamesByPattern(args.regex, args.includeSysTables);
          @@ -4648,7 +5493,8 @@ public getTableNamesByPattern_result getResult(I iface, getTableNamesByPattern_a
                 }
               }
           
          -    public static class getTableNamesByNamespace extends org.apache.thrift.ProcessFunction {
          +    public static class getTableNamesByNamespace
          +        extends org.apache.thrift.ProcessFunction {
                 public getTableNamesByNamespace() {
                   super("getTableNamesByNamespace");
                 }
          @@ -4666,7 +5512,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getTableNamesByNamespace_result getResult(I iface, getTableNamesByNamespace_args args) throws org.apache.thrift.TException {
          +      public getTableNamesByNamespace_result getResult(I iface, getTableNamesByNamespace_args args)
          +          throws org.apache.thrift.TException {
                   getTableNamesByNamespace_result result = new getTableNamesByNamespace_result();
                   try {
                     result.success = iface.getTableNamesByNamespace(args.name);
          @@ -4677,7 +5524,8 @@ public getTableNamesByNamespace_result getResult(I iface, getTableNamesByNamespa
                 }
               }
           
          -    public static class createTable extends org.apache.thrift.ProcessFunction {
          +    public static class createTable
          +        extends org.apache.thrift.ProcessFunction {
                 public createTable() {
                   super("createTable");
                 }
          @@ -4695,7 +5543,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public createTable_result getResult(I iface, createTable_args args) throws org.apache.thrift.TException {
          +      public createTable_result getResult(I iface, createTable_args args)
          +          throws org.apache.thrift.TException {
                   createTable_result result = new createTable_result();
                   try {
                     iface.createTable(args.desc, args.splitKeys);
          @@ -4706,7 +5555,8 @@ public createTable_result getResult(I iface, createTable_args args) throws org.a
                 }
               }
           
          -    public static class deleteTable extends org.apache.thrift.ProcessFunction {
          +    public static class deleteTable
          +        extends org.apache.thrift.ProcessFunction {
                 public deleteTable() {
                   super("deleteTable");
                 }
          @@ -4724,7 +5574,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public deleteTable_result getResult(I iface, deleteTable_args args) throws org.apache.thrift.TException {
          +      public deleteTable_result getResult(I iface, deleteTable_args args)
          +          throws org.apache.thrift.TException {
                   deleteTable_result result = new deleteTable_result();
                   try {
                     iface.deleteTable(args.tableName);
          @@ -4735,7 +5586,8 @@ public deleteTable_result getResult(I iface, deleteTable_args args) throws org.a
                 }
               }
           
          -    public static class truncateTable extends org.apache.thrift.ProcessFunction {
          +    public static class truncateTable
          +        extends org.apache.thrift.ProcessFunction {
                 public truncateTable() {
                   super("truncateTable");
                 }
          @@ -4753,7 +5605,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public truncateTable_result getResult(I iface, truncateTable_args args) throws org.apache.thrift.TException {
          +      public truncateTable_result getResult(I iface, truncateTable_args args)
          +          throws org.apache.thrift.TException {
                   truncateTable_result result = new truncateTable_result();
                   try {
                     iface.truncateTable(args.tableName, args.preserveSplits);
          @@ -4764,7 +5617,8 @@ public truncateTable_result getResult(I iface, truncateTable_args args) throws o
                 }
               }
           
          -    public static class enableTable extends org.apache.thrift.ProcessFunction {
          +    public static class enableTable
          +        extends org.apache.thrift.ProcessFunction {
                 public enableTable() {
                   super("enableTable");
                 }
          @@ -4782,7 +5636,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public enableTable_result getResult(I iface, enableTable_args args) throws org.apache.thrift.TException {
          +      public enableTable_result getResult(I iface, enableTable_args args)
          +          throws org.apache.thrift.TException {
                   enableTable_result result = new enableTable_result();
                   try {
                     iface.enableTable(args.tableName);
          @@ -4793,7 +5648,8 @@ public enableTable_result getResult(I iface, enableTable_args args) throws org.a
                 }
               }
           
          -    public static class disableTable extends org.apache.thrift.ProcessFunction {
          +    public static class disableTable
          +        extends org.apache.thrift.ProcessFunction {
                 public disableTable() {
                   super("disableTable");
                 }
          @@ -4811,7 +5667,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public disableTable_result getResult(I iface, disableTable_args args) throws org.apache.thrift.TException {
          +      public disableTable_result getResult(I iface, disableTable_args args)
          +          throws org.apache.thrift.TException {
                   disableTable_result result = new disableTable_result();
                   try {
                     iface.disableTable(args.tableName);
          @@ -4822,7 +5679,8 @@ public disableTable_result getResult(I iface, disableTable_args args) throws org
                 }
               }
           
          -    public static class isTableEnabled extends org.apache.thrift.ProcessFunction {
          +    public static class isTableEnabled
          +        extends org.apache.thrift.ProcessFunction {
                 public isTableEnabled() {
                   super("isTableEnabled");
                 }
          @@ -4840,7 +5698,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public isTableEnabled_result getResult(I iface, isTableEnabled_args args) throws org.apache.thrift.TException {
          +      public isTableEnabled_result getResult(I iface, isTableEnabled_args args)
          +          throws org.apache.thrift.TException {
                   isTableEnabled_result result = new isTableEnabled_result();
                   try {
                     result.success = iface.isTableEnabled(args.tableName);
          @@ -4852,7 +5711,8 @@ public isTableEnabled_result getResult(I iface, isTableEnabled_args args) throws
                 }
               }
           
          -    public static class isTableDisabled extends org.apache.thrift.ProcessFunction {
          +    public static class isTableDisabled
          +        extends org.apache.thrift.ProcessFunction {
                 public isTableDisabled() {
                   super("isTableDisabled");
                 }
          @@ -4870,7 +5730,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public isTableDisabled_result getResult(I iface, isTableDisabled_args args) throws org.apache.thrift.TException {
          +      public isTableDisabled_result getResult(I iface, isTableDisabled_args args)
          +          throws org.apache.thrift.TException {
                   isTableDisabled_result result = new isTableDisabled_result();
                   try {
                     result.success = iface.isTableDisabled(args.tableName);
          @@ -4882,7 +5743,8 @@ public isTableDisabled_result getResult(I iface, isTableDisabled_args args) thro
                 }
               }
           
          -    public static class isTableAvailable extends org.apache.thrift.ProcessFunction {
          +    public static class isTableAvailable
          +        extends org.apache.thrift.ProcessFunction {
                 public isTableAvailable() {
                   super("isTableAvailable");
                 }
          @@ -4900,7 +5762,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public isTableAvailable_result getResult(I iface, isTableAvailable_args args) throws org.apache.thrift.TException {
          +      public isTableAvailable_result getResult(I iface, isTableAvailable_args args)
          +          throws org.apache.thrift.TException {
                   isTableAvailable_result result = new isTableAvailable_result();
                   try {
                     result.success = iface.isTableAvailable(args.tableName);
          @@ -4912,7 +5775,8 @@ public isTableAvailable_result getResult(I iface, isTableAvailable_args args) th
                 }
               }
           
          -    public static class isTableAvailableWithSplit extends org.apache.thrift.ProcessFunction {
          +    public static class isTableAvailableWithSplit
          +        extends org.apache.thrift.ProcessFunction {
                 public isTableAvailableWithSplit() {
                   super("isTableAvailableWithSplit");
                 }
          @@ -4930,7 +5794,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public isTableAvailableWithSplit_result getResult(I iface, isTableAvailableWithSplit_args args) throws org.apache.thrift.TException {
          +      public isTableAvailableWithSplit_result getResult(I iface,
          +          isTableAvailableWithSplit_args args) throws org.apache.thrift.TException {
                   isTableAvailableWithSplit_result result = new isTableAvailableWithSplit_result();
                   try {
                     result.success = iface.isTableAvailableWithSplit(args.tableName, args.splitKeys);
          @@ -4942,7 +5807,8 @@ public isTableAvailableWithSplit_result getResult(I iface, isTableAvailableWithS
                 }
               }
           
          -    public static class addColumnFamily extends org.apache.thrift.ProcessFunction {
          +    public static class addColumnFamily
          +        extends org.apache.thrift.ProcessFunction {
                 public addColumnFamily() {
                   super("addColumnFamily");
                 }
          @@ -4960,7 +5826,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public addColumnFamily_result getResult(I iface, addColumnFamily_args args) throws org.apache.thrift.TException {
          +      public addColumnFamily_result getResult(I iface, addColumnFamily_args args)
          +          throws org.apache.thrift.TException {
                   addColumnFamily_result result = new addColumnFamily_result();
                   try {
                     iface.addColumnFamily(args.tableName, args.column);
          @@ -4971,7 +5838,8 @@ public addColumnFamily_result getResult(I iface, addColumnFamily_args args) thro
                 }
               }
           
          -    public static class deleteColumnFamily extends org.apache.thrift.ProcessFunction {
          +    public static class deleteColumnFamily
          +        extends org.apache.thrift.ProcessFunction {
                 public deleteColumnFamily() {
                   super("deleteColumnFamily");
                 }
          @@ -4989,7 +5857,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public deleteColumnFamily_result getResult(I iface, deleteColumnFamily_args args) throws org.apache.thrift.TException {
          +      public deleteColumnFamily_result getResult(I iface, deleteColumnFamily_args args)
          +          throws org.apache.thrift.TException {
                   deleteColumnFamily_result result = new deleteColumnFamily_result();
                   try {
                     iface.deleteColumnFamily(args.tableName, args.column);
          @@ -5000,7 +5869,8 @@ public deleteColumnFamily_result getResult(I iface, deleteColumnFamily_args args
                 }
               }
           
          -    public static class modifyColumnFamily extends org.apache.thrift.ProcessFunction {
          +    public static class modifyColumnFamily
          +        extends org.apache.thrift.ProcessFunction {
                 public modifyColumnFamily() {
                   super("modifyColumnFamily");
                 }
          @@ -5018,7 +5888,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public modifyColumnFamily_result getResult(I iface, modifyColumnFamily_args args) throws org.apache.thrift.TException {
          +      public modifyColumnFamily_result getResult(I iface, modifyColumnFamily_args args)
          +          throws org.apache.thrift.TException {
                   modifyColumnFamily_result result = new modifyColumnFamily_result();
                   try {
                     iface.modifyColumnFamily(args.tableName, args.column);
          @@ -5029,7 +5900,8 @@ public modifyColumnFamily_result getResult(I iface, modifyColumnFamily_args args
                 }
               }
           
          -    public static class modifyTable extends org.apache.thrift.ProcessFunction {
          +    public static class modifyTable
          +        extends org.apache.thrift.ProcessFunction {
                 public modifyTable() {
                   super("modifyTable");
                 }
          @@ -5047,7 +5919,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public modifyTable_result getResult(I iface, modifyTable_args args) throws org.apache.thrift.TException {
          +      public modifyTable_result getResult(I iface, modifyTable_args args)
          +          throws org.apache.thrift.TException {
                   modifyTable_result result = new modifyTable_result();
                   try {
                     iface.modifyTable(args.desc);
          @@ -5058,7 +5931,8 @@ public modifyTable_result getResult(I iface, modifyTable_args args) throws org.a
                 }
               }
           
          -    public static class createNamespace extends org.apache.thrift.ProcessFunction {
          +    public static class createNamespace
          +        extends org.apache.thrift.ProcessFunction {
                 public createNamespace() {
                   super("createNamespace");
                 }
          @@ -5076,7 +5950,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public createNamespace_result getResult(I iface, createNamespace_args args) throws org.apache.thrift.TException {
          +      public createNamespace_result getResult(I iface, createNamespace_args args)
          +          throws org.apache.thrift.TException {
                   createNamespace_result result = new createNamespace_result();
                   try {
                     iface.createNamespace(args.namespaceDesc);
          @@ -5087,7 +5962,8 @@ public createNamespace_result getResult(I iface, createNamespace_args args) thro
                 }
               }
           
          -    public static class modifyNamespace extends org.apache.thrift.ProcessFunction {
          +    public static class modifyNamespace
          +        extends org.apache.thrift.ProcessFunction {
                 public modifyNamespace() {
                   super("modifyNamespace");
                 }
          @@ -5105,7 +5981,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public modifyNamespace_result getResult(I iface, modifyNamespace_args args) throws org.apache.thrift.TException {
          +      public modifyNamespace_result getResult(I iface, modifyNamespace_args args)
          +          throws org.apache.thrift.TException {
                   modifyNamespace_result result = new modifyNamespace_result();
                   try {
                     iface.modifyNamespace(args.namespaceDesc);
          @@ -5116,7 +5993,8 @@ public modifyNamespace_result getResult(I iface, modifyNamespace_args args) thro
                 }
               }
           
          -    public static class deleteNamespace extends org.apache.thrift.ProcessFunction {
          +    public static class deleteNamespace
          +        extends org.apache.thrift.ProcessFunction {
                 public deleteNamespace() {
                   super("deleteNamespace");
                 }
          @@ -5134,7 +6012,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public deleteNamespace_result getResult(I iface, deleteNamespace_args args) throws org.apache.thrift.TException {
          +      public deleteNamespace_result getResult(I iface, deleteNamespace_args args)
          +          throws org.apache.thrift.TException {
                   deleteNamespace_result result = new deleteNamespace_result();
                   try {
                     iface.deleteNamespace(args.name);
          @@ -5145,7 +6024,8 @@ public deleteNamespace_result getResult(I iface, deleteNamespace_args args) thro
                 }
               }
           
          -    public static class getNamespaceDescriptor extends org.apache.thrift.ProcessFunction {
          +    public static class getNamespaceDescriptor
          +        extends org.apache.thrift.ProcessFunction {
                 public getNamespaceDescriptor() {
                   super("getNamespaceDescriptor");
                 }
          @@ -5163,7 +6043,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getNamespaceDescriptor_result getResult(I iface, getNamespaceDescriptor_args args) throws org.apache.thrift.TException {
          +      public getNamespaceDescriptor_result getResult(I iface, getNamespaceDescriptor_args args)
          +          throws org.apache.thrift.TException {
                   getNamespaceDescriptor_result result = new getNamespaceDescriptor_result();
                   try {
                     result.success = iface.getNamespaceDescriptor(args.name);
          @@ -5174,7 +6055,8 @@ public getNamespaceDescriptor_result getResult(I iface, getNamespaceDescriptor_a
                 }
               }
           
          -    public static class listNamespaceDescriptors extends org.apache.thrift.ProcessFunction {
          +    public static class listNamespaceDescriptors
          +        extends org.apache.thrift.ProcessFunction {
                 public listNamespaceDescriptors() {
                   super("listNamespaceDescriptors");
                 }
          @@ -5192,7 +6074,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public listNamespaceDescriptors_result getResult(I iface, listNamespaceDescriptors_args args) throws org.apache.thrift.TException {
          +      public listNamespaceDescriptors_result getResult(I iface, listNamespaceDescriptors_args args)
          +          throws org.apache.thrift.TException {
                   listNamespaceDescriptors_result result = new listNamespaceDescriptors_result();
                   try {
                     result.success = iface.listNamespaceDescriptors();
          @@ -5203,7 +6086,8 @@ public listNamespaceDescriptors_result getResult(I iface, listNamespaceDescripto
                 }
               }
           
          -    public static class listNamespaces extends org.apache.thrift.ProcessFunction {
          +    public static class listNamespaces
          +        extends org.apache.thrift.ProcessFunction {
                 public listNamespaces() {
                   super("listNamespaces");
                 }
          @@ -5221,7 +6105,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public listNamespaces_result getResult(I iface, listNamespaces_args args) throws org.apache.thrift.TException {
          +      public listNamespaces_result getResult(I iface, listNamespaces_args args)
          +          throws org.apache.thrift.TException {
                   listNamespaces_result result = new listNamespaces_result();
                   try {
                     result.success = iface.listNamespaces();
          @@ -5232,7 +6117,8 @@ public listNamespaces_result getResult(I iface, listNamespaces_args args) throws
                 }
               }
           
          -    public static class getThriftServerType extends org.apache.thrift.ProcessFunction {
          +    public static class getThriftServerType
          +        extends org.apache.thrift.ProcessFunction {
                 public getThriftServerType() {
                   super("getThriftServerType");
                 }
          @@ -5250,14 +6136,16 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getThriftServerType_result getResult(I iface, getThriftServerType_args args) throws org.apache.thrift.TException {
          +      public getThriftServerType_result getResult(I iface, getThriftServerType_args args)
          +          throws org.apache.thrift.TException {
                   getThriftServerType_result result = new getThriftServerType_result();
                   result.success = iface.getThriftServerType();
                   return result;
                 }
               }
           
          -    public static class getClusterId extends org.apache.thrift.ProcessFunction {
          +    public static class getClusterId
          +        extends org.apache.thrift.ProcessFunction {
                 public getClusterId() {
                   super("getClusterId");
                 }
          @@ -5275,14 +6163,16 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getClusterId_result getResult(I iface, getClusterId_args args) throws org.apache.thrift.TException {
          +      public getClusterId_result getResult(I iface, getClusterId_args args)
          +          throws org.apache.thrift.TException {
                   getClusterId_result result = new getClusterId_result();
                   result.success = iface.getClusterId();
                   return result;
                 }
               }
           
          -    public static class getSlowLogResponses extends org.apache.thrift.ProcessFunction {
          +    public static class getSlowLogResponses
          +        extends org.apache.thrift.ProcessFunction {
                 public getSlowLogResponses() {
                   super("getSlowLogResponses");
                 }
          @@ -5300,7 +6190,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public getSlowLogResponses_result getResult(I iface, getSlowLogResponses_args args) throws org.apache.thrift.TException {
          +      public getSlowLogResponses_result getResult(I iface, getSlowLogResponses_args args)
          +          throws org.apache.thrift.TException {
                   getSlowLogResponses_result result = new getSlowLogResponses_result();
                   try {
                     result.success = iface.getSlowLogResponses(args.serverNames, args.logQueryFilter);
          @@ -5311,7 +6202,8 @@ public getSlowLogResponses_result getResult(I iface, getSlowLogResponses_args ar
                 }
               }
           
          -    public static class clearSlowLogResponses extends org.apache.thrift.ProcessFunction {
          +    public static class clearSlowLogResponses
          +        extends org.apache.thrift.ProcessFunction {
                 public clearSlowLogResponses() {
                   super("clearSlowLogResponses");
                 }
          @@ -5329,7 +6221,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public clearSlowLogResponses_result getResult(I iface, clearSlowLogResponses_args args) throws org.apache.thrift.TException {
          +      public clearSlowLogResponses_result getResult(I iface, clearSlowLogResponses_args args)
          +          throws org.apache.thrift.TException {
                   clearSlowLogResponses_result result = new clearSlowLogResponses_result();
                   try {
                     result.success = iface.clearSlowLogResponses(args.serverNames);
          @@ -5340,7 +6233,8 @@ public clearSlowLogResponses_result getResult(I iface, clearSlowLogResponses_arg
                 }
               }
           
          -    public static class grant extends org.apache.thrift.ProcessFunction {
          +    public static class grant
          +        extends org.apache.thrift.ProcessFunction {
                 public grant() {
                   super("grant");
                 }
          @@ -5370,7 +6264,8 @@ public grant_result getResult(I iface, grant_args args) throws org.apache.thrift
                 }
               }
           
          -    public static class revoke extends org.apache.thrift.ProcessFunction {
          +    public static class revoke
          +        extends org.apache.thrift.ProcessFunction {
                 public revoke() {
                   super("revoke");
                 }
          @@ -5388,7 +6283,8 @@ protected boolean rethrowUnhandledExceptions() {
                   return false;
                 }
           
          -      public revoke_result getResult(I iface, revoke_args args) throws org.apache.thrift.TException {
          +      public revoke_result getResult(I iface, revoke_args args)
          +          throws org.apache.thrift.TException {
                   revoke_result result = new revoke_result();
                   try {
                     result.success = iface.revoke(args.info);
          @@ -5402,17 +6298,25 @@ public revoke_result getResult(I iface, revoke_args args) throws org.apache.thri
           
             }
           
          -  public static class AsyncProcessor extends org.apache.thrift.TBaseAsyncProcessor {
          -    private static final org.slf4j.Logger _LOGGER = org.slf4j.LoggerFactory.getLogger(AsyncProcessor.class.getName());
          +  public static class AsyncProcessor
          +      extends org.apache.thrift.TBaseAsyncProcessor {
          +    private static final org.slf4j.Logger _LOGGER =
          +        org.slf4j.LoggerFactory.getLogger(AsyncProcessor.class.getName());
          +
               public AsyncProcessor(I iface) {
          -      super(iface, getProcessMap(new java.util.HashMap>()));
          +      super(iface, getProcessMap(
          +        new java.util.HashMap>()));
               }
           
          -    protected AsyncProcessor(I iface, java.util.Map> processMap) {
          +    protected AsyncProcessor(I iface,
          +        java.util.Map> processMap) {
                 super(iface, getProcessMap(processMap));
               }
           
          -    private static  java.util.Map> getProcessMap(java.util.Map> processMap) {
          +    private static 
          +        java.util.Map>
          +        getProcessMap(
          +            java.util.Map> processMap) {
                 processMap.put("exists", new exists());
                 processMap.put("existsAll", new existsAll());
                 processMap.put("get", new get());
          @@ -5468,7 +6372,8 @@ protected AsyncProcessor(I iface, java.util.Map extends org.apache.thrift.AsyncProcessFunction {
          +    public static class exists
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public exists() {
                   super("exists");
                 }
          @@ -5477,15 +6382,17 @@ public exists_args getEmptyArgsInstance() {
                   return new exists_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       exists_result result = new exists_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5494,6 +6401,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5509,14 +6417,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5529,12 +6438,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, exists_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.exists(args.table, args.tget,resultHandler);
          +      public void start(I iface, exists_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.exists(args.table, args.tget, resultHandler);
                 }
               }
           
          -    public static class existsAll extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class existsAll extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public existsAll() {
                   super("existsAll");
                 }
          @@ -5543,14 +6455,17 @@ public existsAll_args getEmptyArgsInstance() {
                   return new existsAll_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       existsAll_result result = new existsAll_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5559,6 +6474,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5574,14 +6490,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5594,12 +6511,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, existsAll_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.existsAll(args.table, args.tgets,resultHandler);
          +      public void start(I iface, existsAll_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.existsAll(args.table, args.tgets, resultHandler);
                 }
               }
           
          -    public static class get extends org.apache.thrift.AsyncProcessFunction {
          +    public static class get
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public get() {
                   super("get");
                 }
          @@ -5608,14 +6528,16 @@ public get_args getEmptyArgsInstance() {
                   return new get_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(TResult o) {
                       get_result result = new get_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5624,6 +6546,7 @@ public void onComplete(TResult o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5639,14 +6562,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5659,12 +6583,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, get_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.get(args.table, args.tget,resultHandler);
          +      public void start(I iface, get_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.get(args.table, args.tget, resultHandler);
                 }
               }
           
          -    public static class getMultiple extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getMultiple extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getMultiple() {
                   super("getMultiple");
                 }
          @@ -5673,14 +6600,16 @@ public getMultiple_args getEmptyArgsInstance() {
                   return new getMultiple_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getMultiple_result result = new getMultiple_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5689,6 +6618,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5704,14 +6634,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5724,12 +6655,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getMultiple_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getMultiple(args.table, args.tgets,resultHandler);
          +      public void start(I iface, getMultiple_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getMultiple(args.table, args.tgets, resultHandler);
                 }
               }
           
          -    public static class put extends org.apache.thrift.AsyncProcessFunction {
          +    public static class put
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public put() {
                   super("put");
                 }
          @@ -5738,13 +6672,15 @@ public put_args getEmptyArgsInstance() {
                   return new put_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       put_result result = new put_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5753,6 +6689,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5768,14 +6705,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5788,12 +6726,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, put_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.put(args.table, args.tput,resultHandler);
          +      public void start(I iface, put_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.put(args.table, args.tput, resultHandler);
                 }
               }
           
          -    public static class checkAndPut extends org.apache.thrift.AsyncProcessFunction {
          +    public static class checkAndPut
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public checkAndPut() {
                   super("checkAndPut");
                 }
          @@ -5802,15 +6743,17 @@ public checkAndPut_args getEmptyArgsInstance() {
                   return new checkAndPut_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       checkAndPut_result result = new checkAndPut_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5819,6 +6762,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5834,14 +6778,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5854,12 +6799,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, checkAndPut_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.checkAndPut(args.table, args.row, args.family, args.qualifier, args.value, args.tput,resultHandler);
          +      public void start(I iface, checkAndPut_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.checkAndPut(args.table, args.row, args.family, args.qualifier, args.value, args.tput,
          +          resultHandler);
                 }
               }
           
          -    public static class putMultiple extends org.apache.thrift.AsyncProcessFunction {
          +    public static class putMultiple
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public putMultiple() {
                   super("putMultiple");
                 }
          @@ -5868,13 +6817,15 @@ public putMultiple_args getEmptyArgsInstance() {
                   return new putMultiple_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       putMultiple_result result = new putMultiple_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5883,6 +6834,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5898,14 +6850,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5918,12 +6871,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, putMultiple_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.putMultiple(args.table, args.tputs,resultHandler);
          +      public void start(I iface, putMultiple_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.putMultiple(args.table, args.tputs, resultHandler);
                 }
               }
           
          -    public static class deleteSingle extends org.apache.thrift.AsyncProcessFunction {
          +    public static class deleteSingle
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public deleteSingle() {
                   super("deleteSingle");
                 }
          @@ -5932,13 +6888,15 @@ public deleteSingle_args getEmptyArgsInstance() {
                   return new deleteSingle_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       deleteSingle_result result = new deleteSingle_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -5947,6 +6905,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -5962,14 +6921,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -5982,12 +6942,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, deleteSingle_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.deleteSingle(args.table, args.tdelete,resultHandler);
          +      public void start(I iface, deleteSingle_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.deleteSingle(args.table, args.tdelete, resultHandler);
                 }
               }
           
          -    public static class deleteMultiple extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class deleteMultiple extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public deleteMultiple() {
                   super("deleteMultiple");
                 }
          @@ -5996,14 +6959,16 @@ public deleteMultiple_args getEmptyArgsInstance() {
                   return new deleteMultiple_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       deleteMultiple_result result = new deleteMultiple_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6012,6 +6977,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6027,14 +6993,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6047,12 +7014,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, deleteMultiple_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.deleteMultiple(args.table, args.tdeletes,resultHandler);
          +      public void start(I iface, deleteMultiple_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.deleteMultiple(args.table, args.tdeletes, resultHandler);
                 }
               }
           
          -    public static class checkAndDelete extends org.apache.thrift.AsyncProcessFunction {
          +    public static class checkAndDelete
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public checkAndDelete() {
                   super("checkAndDelete");
                 }
          @@ -6061,15 +7031,17 @@ public checkAndDelete_args getEmptyArgsInstance() {
                   return new checkAndDelete_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       checkAndDelete_result result = new checkAndDelete_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6078,6 +7050,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6093,14 +7066,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6113,12 +7087,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, checkAndDelete_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.checkAndDelete(args.table, args.row, args.family, args.qualifier, args.value, args.tdelete,resultHandler);
          +      public void start(I iface, checkAndDelete_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.checkAndDelete(args.table, args.row, args.family, args.qualifier, args.value,
          +          args.tdelete, resultHandler);
                 }
               }
           
          -    public static class increment extends org.apache.thrift.AsyncProcessFunction {
          +    public static class increment
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public increment() {
                   super("increment");
                 }
          @@ -6127,14 +7105,16 @@ public increment_args getEmptyArgsInstance() {
                   return new increment_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(TResult o) {
                       increment_result result = new increment_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6143,6 +7123,7 @@ public void onComplete(TResult o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6158,14 +7139,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6178,12 +7160,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, increment_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.increment(args.table, args.tincrement,resultHandler);
          +      public void start(I iface, increment_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.increment(args.table, args.tincrement, resultHandler);
                 }
               }
           
          -    public static class append extends org.apache.thrift.AsyncProcessFunction {
          +    public static class append
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public append() {
                   super("append");
                 }
          @@ -6192,14 +7177,16 @@ public append_args getEmptyArgsInstance() {
                   return new append_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(TResult o) {
                       append_result result = new append_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6208,6 +7195,7 @@ public void onComplete(TResult o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6223,14 +7211,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6243,12 +7232,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, append_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.append(args.table, args.tappend,resultHandler);
          +      public void start(I iface, append_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.append(args.table, args.tappend, resultHandler);
                 }
               }
           
          -    public static class openScanner extends org.apache.thrift.AsyncProcessFunction {
          +    public static class openScanner
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public openScanner() {
                   super("openScanner");
                 }
          @@ -6257,15 +7249,17 @@ public openScanner_args getEmptyArgsInstance() {
                   return new openScanner_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Integer o) {
                       openScanner_result result = new openScanner_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6274,6 +7268,7 @@ public void onComplete(java.lang.Integer o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6289,14 +7284,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6309,12 +7305,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, openScanner_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.openScanner(args.table, args.tscan,resultHandler);
          +      public void start(I iface, openScanner_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.openScanner(args.table, args.tscan, resultHandler);
                 }
               }
           
          -    public static class getScannerRows extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getScannerRows extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getScannerRows() {
                   super("getScannerRows");
                 }
          @@ -6323,14 +7322,16 @@ public getScannerRows_args getEmptyArgsInstance() {
                   return new getScannerRows_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getScannerRows_result result = new getScannerRows_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6339,6 +7340,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6358,14 +7360,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6378,12 +7381,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getScannerRows_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getScannerRows(args.scannerId, args.numRows,resultHandler);
          +      public void start(I iface, getScannerRows_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getScannerRows(args.scannerId, args.numRows, resultHandler);
                 }
               }
           
          -    public static class closeScanner extends org.apache.thrift.AsyncProcessFunction {
          +    public static class closeScanner
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public closeScanner() {
                   super("closeScanner");
                 }
          @@ -6392,13 +7398,15 @@ public closeScanner_args getEmptyArgsInstance() {
                   return new closeScanner_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       closeScanner_result result = new closeScanner_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6407,6 +7415,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6426,14 +7435,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6446,12 +7456,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, closeScanner_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.closeScanner(args.scannerId,resultHandler);
          +      public void start(I iface, closeScanner_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.closeScanner(args.scannerId, resultHandler);
                 }
               }
           
          -    public static class mutateRow extends org.apache.thrift.AsyncProcessFunction {
          +    public static class mutateRow
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public mutateRow() {
                   super("mutateRow");
                 }
          @@ -6460,13 +7473,15 @@ public mutateRow_args getEmptyArgsInstance() {
                   return new mutateRow_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       mutateRow_result result = new mutateRow_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6475,6 +7490,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6490,14 +7506,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6510,12 +7527,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, mutateRow_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.mutateRow(args.table, args.trowMutations,resultHandler);
          +      public void start(I iface, mutateRow_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.mutateRow(args.table, args.trowMutations, resultHandler);
                 }
               }
           
          -    public static class getScannerResults extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getScannerResults extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getScannerResults() {
                   super("getScannerResults");
                 }
          @@ -6524,14 +7544,16 @@ public getScannerResults_args getEmptyArgsInstance() {
                   return new getScannerResults_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getScannerResults_result result = new getScannerResults_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6540,6 +7562,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6555,14 +7578,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6575,12 +7599,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getScannerResults_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getScannerResults(args.table, args.tscan, args.numRows,resultHandler);
          +      public void start(I iface, getScannerResults_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getScannerResults(args.table, args.tscan, args.numRows, resultHandler);
                 }
               }
           
          -    public static class getRegionLocation extends org.apache.thrift.AsyncProcessFunction {
          +    public static class getRegionLocation extends
          +        org.apache.thrift.AsyncProcessFunction {
                 public getRegionLocation() {
                   super("getRegionLocation");
                 }
          @@ -6589,14 +7616,16 @@ public getRegionLocation_args getEmptyArgsInstance() {
                   return new getRegionLocation_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(THRegionLocation o) {
                       getRegionLocation_result result = new getRegionLocation_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6605,6 +7634,7 @@ public void onComplete(THRegionLocation o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6620,14 +7650,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6640,12 +7671,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getRegionLocation_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.getRegionLocation(args.table, args.row, args.reload,resultHandler);
          +      public void start(I iface, getRegionLocation_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getRegionLocation(args.table, args.row, args.reload, resultHandler);
                 }
               }
           
          -    public static class getAllRegionLocations extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getAllRegionLocations extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getAllRegionLocations() {
                   super("getAllRegionLocations");
                 }
          @@ -6654,14 +7688,17 @@ public getAllRegionLocations_args getEmptyArgsInstance() {
                   return new getAllRegionLocations_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getAllRegionLocations_result result = new getAllRegionLocations_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6670,6 +7707,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6685,14 +7723,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6705,12 +7744,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getAllRegionLocations_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getAllRegionLocations(args.table,resultHandler);
          +      public void start(I iface, getAllRegionLocations_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getAllRegionLocations(args.table, resultHandler);
                 }
               }
           
          -    public static class checkAndMutate extends org.apache.thrift.AsyncProcessFunction {
          +    public static class checkAndMutate
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public checkAndMutate() {
                   super("checkAndMutate");
                 }
          @@ -6719,15 +7761,17 @@ public checkAndMutate_args getEmptyArgsInstance() {
                   return new checkAndMutate_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       checkAndMutate_result result = new checkAndMutate_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6736,6 +7780,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6751,14 +7796,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6771,12 +7817,16 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, checkAndMutate_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.checkAndMutate(args.table, args.row, args.family, args.qualifier, args.compareOperator, args.value, args.rowMutations,resultHandler);
          +      public void start(I iface, checkAndMutate_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.checkAndMutate(args.table, args.row, args.family, args.qualifier,
          +          args.compareOperator, args.value, args.rowMutations, resultHandler);
                 }
               }
           
          -    public static class getTableDescriptor extends org.apache.thrift.AsyncProcessFunction {
          +    public static class getTableDescriptor extends
          +        org.apache.thrift.AsyncProcessFunction {
                 public getTableDescriptor() {
                   super("getTableDescriptor");
                 }
          @@ -6785,14 +7835,16 @@ public getTableDescriptor_args getEmptyArgsInstance() {
                   return new getTableDescriptor_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(TTableDescriptor o) {
                       getTableDescriptor_result result = new getTableDescriptor_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6801,6 +7853,7 @@ public void onComplete(TTableDescriptor o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6816,14 +7869,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6836,12 +7890,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getTableDescriptor_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.getTableDescriptor(args.table,resultHandler);
          +      public void start(I iface, getTableDescriptor_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getTableDescriptor(args.table, resultHandler);
                 }
               }
           
          -    public static class getTableDescriptors extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getTableDescriptors extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getTableDescriptors() {
                   super("getTableDescriptors");
                 }
          @@ -6850,14 +7907,17 @@ public getTableDescriptors_args getEmptyArgsInstance() {
                   return new getTableDescriptors_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getTableDescriptors_result result = new getTableDescriptors_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6866,6 +7926,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6881,14 +7942,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6901,12 +7963,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getTableDescriptors_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getTableDescriptors(args.tables,resultHandler);
          +      public void start(I iface, getTableDescriptors_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getTableDescriptors(args.tables, resultHandler);
                 }
               }
           
          -    public static class tableExists extends org.apache.thrift.AsyncProcessFunction {
          +    public static class tableExists
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public tableExists() {
                   super("tableExists");
                 }
          @@ -6915,15 +7980,17 @@ public tableExists_args getEmptyArgsInstance() {
                   return new tableExists_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       tableExists_result result = new tableExists_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6932,6 +7999,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -6947,14 +8015,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -6967,12 +8036,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, tableExists_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.tableExists(args.tableName,resultHandler);
          +      public void start(I iface, tableExists_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.tableExists(args.tableName, resultHandler);
                 }
               }
           
          -    public static class getTableDescriptorsByPattern extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getTableDescriptorsByPattern extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getTableDescriptorsByPattern() {
                   super("getTableDescriptorsByPattern");
                 }
          @@ -6981,14 +8053,17 @@ public getTableDescriptorsByPattern_args getEmptyArgsInstance() {
                   return new getTableDescriptorsByPattern_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getTableDescriptorsByPattern_result result = new getTableDescriptorsByPattern_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -6997,6 +8072,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7012,14 +8088,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7032,12 +8109,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getTableDescriptorsByPattern_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getTableDescriptorsByPattern(args.regex, args.includeSysTables,resultHandler);
          +      public void start(I iface, getTableDescriptorsByPattern_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getTableDescriptorsByPattern(args.regex, args.includeSysTables, resultHandler);
                 }
               }
           
          -    public static class getTableDescriptorsByNamespace extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getTableDescriptorsByNamespace extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getTableDescriptorsByNamespace() {
                   super("getTableDescriptorsByNamespace");
                 }
          @@ -7046,14 +8126,18 @@ public getTableDescriptorsByNamespace_args getEmptyArgsInstance() {
                   return new getTableDescriptorsByNamespace_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
          -            getTableDescriptorsByNamespace_result result = new getTableDescriptorsByNamespace_result();
          +            getTableDescriptorsByNamespace_result result =
          +                new getTableDescriptorsByNamespace_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7062,10 +8146,12 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          -            getTableDescriptorsByNamespace_result result = new getTableDescriptorsByNamespace_result();
          +            getTableDescriptorsByNamespace_result result =
          +                new getTableDescriptorsByNamespace_result();
                       if (e instanceof TIOError) {
                         result.io = (TIOError) e;
                         result.setIoIsSet(true);
          @@ -7077,14 +8163,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7097,12 +8184,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getTableDescriptorsByNamespace_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getTableDescriptorsByNamespace(args.name,resultHandler);
          +      public void start(I iface, getTableDescriptorsByNamespace_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getTableDescriptorsByNamespace(args.name, resultHandler);
                 }
               }
           
          -    public static class getTableNamesByPattern extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getTableNamesByPattern extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getTableNamesByPattern() {
                   super("getTableNamesByPattern");
                 }
          @@ -7111,14 +8201,17 @@ public getTableNamesByPattern_args getEmptyArgsInstance() {
                   return new getTableNamesByPattern_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getTableNamesByPattern_result result = new getTableNamesByPattern_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7127,6 +8220,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7142,14 +8236,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7162,12 +8257,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getTableNamesByPattern_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getTableNamesByPattern(args.regex, args.includeSysTables,resultHandler);
          +      public void start(I iface, getTableNamesByPattern_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getTableNamesByPattern(args.regex, args.includeSysTables, resultHandler);
                 }
               }
           
          -    public static class getTableNamesByNamespace extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getTableNamesByNamespace extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getTableNamesByNamespace() {
                   super("getTableNamesByNamespace");
                 }
          @@ -7176,14 +8274,17 @@ public getTableNamesByNamespace_args getEmptyArgsInstance() {
                   return new getTableNamesByNamespace_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getTableNamesByNamespace_result result = new getTableNamesByNamespace_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7192,6 +8293,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7207,14 +8309,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7227,12 +8330,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getTableNamesByNamespace_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getTableNamesByNamespace(args.name,resultHandler);
          +      public void start(I iface, getTableNamesByNamespace_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getTableNamesByNamespace(args.name, resultHandler);
                 }
               }
           
          -    public static class createTable extends org.apache.thrift.AsyncProcessFunction {
          +    public static class createTable
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public createTable() {
                   super("createTable");
                 }
          @@ -7241,13 +8347,15 @@ public createTable_args getEmptyArgsInstance() {
                   return new createTable_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       createTable_result result = new createTable_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7256,6 +8364,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7271,14 +8380,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7291,12 +8401,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, createTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.createTable(args.desc, args.splitKeys,resultHandler);
          +      public void start(I iface, createTable_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.createTable(args.desc, args.splitKeys, resultHandler);
                 }
               }
           
          -    public static class deleteTable extends org.apache.thrift.AsyncProcessFunction {
          +    public static class deleteTable
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public deleteTable() {
                   super("deleteTable");
                 }
          @@ -7305,13 +8418,15 @@ public deleteTable_args getEmptyArgsInstance() {
                   return new deleteTable_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       deleteTable_result result = new deleteTable_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7320,6 +8435,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7335,14 +8451,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7355,12 +8472,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, deleteTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.deleteTable(args.tableName,resultHandler);
          +      public void start(I iface, deleteTable_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.deleteTable(args.tableName, resultHandler);
                 }
               }
           
          -    public static class truncateTable extends org.apache.thrift.AsyncProcessFunction {
          +    public static class truncateTable
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public truncateTable() {
                   super("truncateTable");
                 }
          @@ -7369,13 +8489,15 @@ public truncateTable_args getEmptyArgsInstance() {
                   return new truncateTable_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       truncateTable_result result = new truncateTable_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7384,6 +8506,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7399,14 +8522,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7419,12 +8543,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, truncateTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.truncateTable(args.tableName, args.preserveSplits,resultHandler);
          +      public void start(I iface, truncateTable_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.truncateTable(args.tableName, args.preserveSplits, resultHandler);
                 }
               }
           
          -    public static class enableTable extends org.apache.thrift.AsyncProcessFunction {
          +    public static class enableTable
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public enableTable() {
                   super("enableTable");
                 }
          @@ -7433,13 +8560,15 @@ public enableTable_args getEmptyArgsInstance() {
                   return new enableTable_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       enableTable_result result = new enableTable_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7448,6 +8577,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7463,14 +8593,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7483,12 +8614,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, enableTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.enableTable(args.tableName,resultHandler);
          +      public void start(I iface, enableTable_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.enableTable(args.tableName, resultHandler);
                 }
               }
           
          -    public static class disableTable extends org.apache.thrift.AsyncProcessFunction {
          +    public static class disableTable
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public disableTable() {
                   super("disableTable");
                 }
          @@ -7497,13 +8631,15 @@ public disableTable_args getEmptyArgsInstance() {
                   return new disableTable_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       disableTable_result result = new disableTable_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7512,6 +8648,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7527,14 +8664,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7547,12 +8685,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, disableTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.disableTable(args.tableName,resultHandler);
          +      public void start(I iface, disableTable_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.disableTable(args.tableName, resultHandler);
                 }
               }
           
          -    public static class isTableEnabled extends org.apache.thrift.AsyncProcessFunction {
          +    public static class isTableEnabled
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public isTableEnabled() {
                   super("isTableEnabled");
                 }
          @@ -7561,15 +8702,17 @@ public isTableEnabled_args getEmptyArgsInstance() {
                   return new isTableEnabled_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       isTableEnabled_result result = new isTableEnabled_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7578,6 +8721,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7593,14 +8737,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7613,12 +8758,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, isTableEnabled_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.isTableEnabled(args.tableName,resultHandler);
          +      public void start(I iface, isTableEnabled_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.isTableEnabled(args.tableName, resultHandler);
                 }
               }
           
          -    public static class isTableDisabled extends org.apache.thrift.AsyncProcessFunction {
          +    public static class isTableDisabled
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public isTableDisabled() {
                   super("isTableDisabled");
                 }
          @@ -7627,15 +8775,17 @@ public isTableDisabled_args getEmptyArgsInstance() {
                   return new isTableDisabled_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       isTableDisabled_result result = new isTableDisabled_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7644,6 +8794,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7659,14 +8810,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7679,12 +8831,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, isTableDisabled_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.isTableDisabled(args.tableName,resultHandler);
          +      public void start(I iface, isTableDisabled_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.isTableDisabled(args.tableName, resultHandler);
                 }
               }
           
          -    public static class isTableAvailable extends org.apache.thrift.AsyncProcessFunction {
          +    public static class isTableAvailable extends
          +        org.apache.thrift.AsyncProcessFunction {
                 public isTableAvailable() {
                   super("isTableAvailable");
                 }
          @@ -7693,15 +8848,17 @@ public isTableAvailable_args getEmptyArgsInstance() {
                   return new isTableAvailable_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       isTableAvailable_result result = new isTableAvailable_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7710,6 +8867,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7725,14 +8883,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7745,12 +8904,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, isTableAvailable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.isTableAvailable(args.tableName,resultHandler);
          +      public void start(I iface, isTableAvailable_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.isTableAvailable(args.tableName, resultHandler);
                 }
               }
           
          -    public static class isTableAvailableWithSplit extends org.apache.thrift.AsyncProcessFunction {
          +    public static class isTableAvailableWithSplit extends
          +        org.apache.thrift.AsyncProcessFunction {
                 public isTableAvailableWithSplit() {
                   super("isTableAvailableWithSplit");
                 }
          @@ -7759,15 +8921,17 @@ public isTableAvailableWithSplit_args getEmptyArgsInstance() {
                   return new isTableAvailableWithSplit_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       isTableAvailableWithSplit_result result = new isTableAvailableWithSplit_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7776,6 +8940,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7791,14 +8956,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7811,12 +8977,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, isTableAvailableWithSplit_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.isTableAvailableWithSplit(args.tableName, args.splitKeys,resultHandler);
          +      public void start(I iface, isTableAvailableWithSplit_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.isTableAvailableWithSplit(args.tableName, args.splitKeys, resultHandler);
                 }
               }
           
          -    public static class addColumnFamily extends org.apache.thrift.AsyncProcessFunction {
          +    public static class addColumnFamily
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public addColumnFamily() {
                   super("addColumnFamily");
                 }
          @@ -7825,13 +8994,15 @@ public addColumnFamily_args getEmptyArgsInstance() {
                   return new addColumnFamily_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       addColumnFamily_result result = new addColumnFamily_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7840,6 +9011,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7855,14 +9027,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7875,12 +9048,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, addColumnFamily_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.addColumnFamily(args.tableName, args.column,resultHandler);
          +      public void start(I iface, addColumnFamily_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.addColumnFamily(args.tableName, args.column, resultHandler);
                 }
               }
           
          -    public static class deleteColumnFamily extends org.apache.thrift.AsyncProcessFunction {
          +    public static class deleteColumnFamily
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public deleteColumnFamily() {
                   super("deleteColumnFamily");
                 }
          @@ -7889,13 +9065,15 @@ public deleteColumnFamily_args getEmptyArgsInstance() {
                   return new deleteColumnFamily_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       deleteColumnFamily_result result = new deleteColumnFamily_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7904,6 +9082,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7919,14 +9098,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -7939,12 +9119,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, deleteColumnFamily_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.deleteColumnFamily(args.tableName, args.column,resultHandler);
          +      public void start(I iface, deleteColumnFamily_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.deleteColumnFamily(args.tableName, args.column, resultHandler);
                 }
               }
           
          -    public static class modifyColumnFamily extends org.apache.thrift.AsyncProcessFunction {
          +    public static class modifyColumnFamily
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public modifyColumnFamily() {
                   super("modifyColumnFamily");
                 }
          @@ -7953,13 +9136,15 @@ public modifyColumnFamily_args getEmptyArgsInstance() {
                   return new modifyColumnFamily_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       modifyColumnFamily_result result = new modifyColumnFamily_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -7968,6 +9153,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -7983,14 +9169,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8003,12 +9190,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, modifyColumnFamily_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.modifyColumnFamily(args.tableName, args.column,resultHandler);
          +      public void start(I iface, modifyColumnFamily_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.modifyColumnFamily(args.tableName, args.column, resultHandler);
                 }
               }
           
          -    public static class modifyTable extends org.apache.thrift.AsyncProcessFunction {
          +    public static class modifyTable
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public modifyTable() {
                   super("modifyTable");
                 }
          @@ -8017,13 +9207,15 @@ public modifyTable_args getEmptyArgsInstance() {
                   return new modifyTable_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       modifyTable_result result = new modifyTable_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8032,6 +9224,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8047,14 +9240,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8067,12 +9261,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, modifyTable_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.modifyTable(args.desc,resultHandler);
          +      public void start(I iface, modifyTable_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.modifyTable(args.desc, resultHandler);
                 }
               }
           
          -    public static class createNamespace extends org.apache.thrift.AsyncProcessFunction {
          +    public static class createNamespace
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public createNamespace() {
                   super("createNamespace");
                 }
          @@ -8081,13 +9278,15 @@ public createNamespace_args getEmptyArgsInstance() {
                   return new createNamespace_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       createNamespace_result result = new createNamespace_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8096,6 +9295,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8111,14 +9311,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8131,12 +9332,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, createNamespace_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.createNamespace(args.namespaceDesc,resultHandler);
          +      public void start(I iface, createNamespace_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.createNamespace(args.namespaceDesc, resultHandler);
                 }
               }
           
          -    public static class modifyNamespace extends org.apache.thrift.AsyncProcessFunction {
          +    public static class modifyNamespace
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public modifyNamespace() {
                   super("modifyNamespace");
                 }
          @@ -8145,13 +9349,15 @@ public modifyNamespace_args getEmptyArgsInstance() {
                   return new modifyNamespace_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       modifyNamespace_result result = new modifyNamespace_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8160,6 +9366,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8175,14 +9382,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8195,12 +9403,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, modifyNamespace_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.modifyNamespace(args.namespaceDesc,resultHandler);
          +      public void start(I iface, modifyNamespace_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.modifyNamespace(args.namespaceDesc, resultHandler);
                 }
               }
           
          -    public static class deleteNamespace extends org.apache.thrift.AsyncProcessFunction {
          +    public static class deleteNamespace
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public deleteNamespace() {
                   super("deleteNamespace");
                 }
          @@ -8209,13 +9420,15 @@ public deleteNamespace_args getEmptyArgsInstance() {
                   return new deleteNamespace_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(Void o) {
                       deleteNamespace_result result = new deleteNamespace_result();
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8224,6 +9437,7 @@ public void onComplete(Void o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8239,14 +9453,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8259,12 +9474,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, deleteNamespace_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.deleteNamespace(args.name,resultHandler);
          +      public void start(I iface, deleteNamespace_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.deleteNamespace(args.name, resultHandler);
                 }
               }
           
          -    public static class getNamespaceDescriptor extends org.apache.thrift.AsyncProcessFunction {
          +    public static class getNamespaceDescriptor extends
          +        org.apache.thrift.AsyncProcessFunction {
                 public getNamespaceDescriptor() {
                   super("getNamespaceDescriptor");
                 }
          @@ -8273,14 +9491,16 @@ public getNamespaceDescriptor_args getEmptyArgsInstance() {
                   return new getNamespaceDescriptor_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(TNamespaceDescriptor o) {
                       getNamespaceDescriptor_result result = new getNamespaceDescriptor_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8289,6 +9509,7 @@ public void onComplete(TNamespaceDescriptor o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8304,14 +9525,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8324,12 +9546,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getNamespaceDescriptor_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.getNamespaceDescriptor(args.name,resultHandler);
          +      public void start(I iface, getNamespaceDescriptor_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getNamespaceDescriptor(args.name, resultHandler);
                 }
               }
           
          -    public static class listNamespaceDescriptors extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class listNamespaceDescriptors extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public listNamespaceDescriptors() {
                   super("listNamespaceDescriptors");
                 }
          @@ -8338,14 +9563,17 @@ public listNamespaceDescriptors_args getEmptyArgsInstance() {
                   return new listNamespaceDescriptors_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       listNamespaceDescriptors_result result = new listNamespaceDescriptors_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8354,6 +9582,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8369,14 +9598,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8389,12 +9619,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, listNamespaceDescriptors_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +      public void start(I iface, listNamespaceDescriptors_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
                   iface.listNamespaceDescriptors(resultHandler);
                 }
               }
           
          -    public static class listNamespaces extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class listNamespaces extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public listNamespaces() {
                   super("listNamespaces");
                 }
          @@ -8403,14 +9636,17 @@ public listNamespaces_args getEmptyArgsInstance() {
                   return new listNamespaces_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       listNamespaces_result result = new listNamespaces_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8419,6 +9655,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8434,14 +9671,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8454,12 +9692,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, listNamespaces_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          +      public void start(I iface, listNamespaces_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
                   iface.listNamespaces(resultHandler);
                 }
               }
           
          -    public static class getThriftServerType extends org.apache.thrift.AsyncProcessFunction {
          +    public static class getThriftServerType extends
          +        org.apache.thrift.AsyncProcessFunction {
                 public getThriftServerType() {
                   super("getThriftServerType");
                 }
          @@ -8468,14 +9709,16 @@ public getThriftServerType_args getEmptyArgsInstance() {
                   return new getThriftServerType_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(TThriftServerType o) {
                       getThriftServerType_result result = new getThriftServerType_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8484,6 +9727,7 @@ public void onComplete(TThriftServerType o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8495,14 +9739,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8515,12 +9760,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getThriftServerType_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +      public void start(I iface, getThriftServerType_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
                   iface.getThriftServerType(resultHandler);
                 }
               }
           
          -    public static class getClusterId extends org.apache.thrift.AsyncProcessFunction {
          +    public static class getClusterId
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public getClusterId() {
                   super("getClusterId");
                 }
          @@ -8529,14 +9777,16 @@ public getClusterId_args getEmptyArgsInstance() {
                   return new getClusterId_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.String o) {
                       getClusterId_result result = new getClusterId_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8545,6 +9795,7 @@ public void onComplete(java.lang.String o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8556,14 +9807,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8576,12 +9828,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getClusterId_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          +      public void start(I iface, getClusterId_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
                   iface.getClusterId(resultHandler);
                 }
               }
           
          -    public static class getSlowLogResponses extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class getSlowLogResponses extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public getSlowLogResponses() {
                   super("getSlowLogResponses");
                 }
          @@ -8590,14 +9845,17 @@ public getSlowLogResponses_args getEmptyArgsInstance() {
                   return new getSlowLogResponses_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       getSlowLogResponses_result result = new getSlowLogResponses_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8606,6 +9864,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8621,14 +9880,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8641,12 +9901,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, getSlowLogResponses_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.getSlowLogResponses(args.serverNames, args.logQueryFilter,resultHandler);
          +      public void start(I iface, getSlowLogResponses_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.getSlowLogResponses(args.serverNames, args.logQueryFilter, resultHandler);
                 }
               }
           
          -    public static class clearSlowLogResponses extends org.apache.thrift.AsyncProcessFunction> {
          +    public static class clearSlowLogResponses extends
          +        org.apache.thrift.AsyncProcessFunction> {
                 public clearSlowLogResponses() {
                   super("clearSlowLogResponses");
                 }
          @@ -8655,14 +9918,17 @@ public clearSlowLogResponses_args getEmptyArgsInstance() {
                   return new clearSlowLogResponses_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback>
          +          getResultHandler(
          +              final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +              final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback>() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback>() {
                     public void onComplete(java.util.List o) {
                       clearSlowLogResponses_result result = new clearSlowLogResponses_result();
                       result.success = o;
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8671,6 +9937,7 @@ public void onComplete(java.util.List o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8686,14 +9953,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8706,12 +9974,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, clearSlowLogResponses_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException {
          -        iface.clearSlowLogResponses(args.serverNames,resultHandler);
          +      public void start(I iface, clearSlowLogResponses_args args,
          +          org.apache.thrift.async.AsyncMethodCallback> resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.clearSlowLogResponses(args.serverNames, resultHandler);
                 }
               }
           
          -    public static class grant extends org.apache.thrift.AsyncProcessFunction {
          +    public static class grant
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public grant() {
                   super("grant");
                 }
          @@ -8720,15 +9991,17 @@ public grant_args getEmptyArgsInstance() {
                   return new grant_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       grant_result result = new grant_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8737,6 +10010,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8752,14 +10026,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8772,12 +10047,15 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, grant_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.grant(args.info,resultHandler);
          +      public void start(I iface, grant_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.grant(args.info, resultHandler);
                 }
               }
           
          -    public static class revoke extends org.apache.thrift.AsyncProcessFunction {
          +    public static class revoke
          +        extends org.apache.thrift.AsyncProcessFunction {
                 public revoke() {
                   super("revoke");
                 }
          @@ -8786,15 +10064,17 @@ public revoke_args getEmptyArgsInstance() {
                   return new revoke_args();
                 }
           
          -      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) {
          +      public org.apache.thrift.async.AsyncMethodCallback getResultHandler(
          +          final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb,
          +          final int seqid) {
                   final org.apache.thrift.AsyncProcessFunction fcall = this;
          -        return new org.apache.thrift.async.AsyncMethodCallback() { 
          +        return new org.apache.thrift.async.AsyncMethodCallback() {
                     public void onComplete(java.lang.Boolean o) {
                       revoke_result result = new revoke_result();
                       result.success = o;
                       result.setSuccessIsSet(true);
                       try {
          -              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
          +              fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY, seqid);
                       } catch (org.apache.thrift.transport.TTransportException e) {
                         _LOGGER.error("TTransportException writing to internal frame buffer", e);
                         fb.close();
          @@ -8803,6 +10083,7 @@ public void onComplete(java.lang.Boolean o) {
                         onError(e);
                       }
                     }
          +
                     public void onError(java.lang.Exception e) {
                       byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
                       org.apache.thrift.TSerializable msg;
          @@ -8818,14 +10099,15 @@ public void onError(java.lang.Exception e) {
                       } else if (e instanceof org.apache.thrift.TApplicationException) {
                         _LOGGER.error("TApplicationException inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = (org.apache.thrift.TApplicationException)e;
          +              msg = (org.apache.thrift.TApplicationException) e;
                       } else {
                         _LOGGER.error("Exception inside handler", e);
                         msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
          -              msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
          +              msg = new org.apache.thrift.TApplicationException(
          +                  org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
                       }
                       try {
          -              fcall.sendResponse(fb,msg,msgType,seqid);
          +              fcall.sendResponse(fb, msg, msgType, seqid);
                       } catch (java.lang.Exception ex) {
                         _LOGGER.error("Exception writing to internal frame buffer", ex);
                         fb.close();
          @@ -8838,21 +10120,32 @@ protected boolean isOneway() {
                   return false;
                 }
           
          -      public void start(I iface, revoke_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
          -        iface.revoke(args.info,resultHandler);
          +      public void start(I iface, revoke_args args,
          +          org.apache.thrift.async.AsyncMethodCallback resultHandler)
          +          throws org.apache.thrift.TException {
          +        iface.revoke(args.info, resultHandler);
                 }
               }
           
             }
           
          -  public static class exists_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("exists_args");
          +  public static class exists_args
          +      implements org.apache.thrift.TBase, java.io.Serializable,
          +      Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("exists_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TGET_FIELD_DESC = new org.apache.thrift.protocol.TField("tget", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TGET_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tget", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new exists_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new exists_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new exists_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new exists_argsTupleSchemeFactory();
           
               /**
                * the table to check on
          @@ -8863,18 +10156,22 @@ public static class exists_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -8887,7 +10184,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TGET
          @@ -8898,12 +10195,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -8935,22 +10232,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TGET, new org.apache.thrift.meta_data.FieldMetaData("tget", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGet.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TGET,
          +        new org.apache.thrift.meta_data.FieldMetaData("tget",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TGet.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(exists_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(exists_args.class,
          +        metaDataMap);
               }
           
               public exists_args() {
               }
           
          -    public exists_args(
          -      java.nio.ByteBuffer table,
          -      TGet tget)
          -    {
          +    public exists_args(java.nio.ByteBuffer table, TGet tget) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tget = tget;
          @@ -8994,7 +10296,8 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to check on
                */
               public exists_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          @@ -9049,27 +10352,28 @@ public void setTgetIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TGET:
          -        if (value == null) {
          -          unsetTget();
          -        } else {
          -          setTget((TGet)value);
          -        }
          -        break;
          +        case TGET:
          +          if (value == null) {
          +            unsetTget();
          +          } else {
          +            setTget((TGet) value);
          +          }
          +          break;
           
                 }
               }
          @@ -9077,60 +10381,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TGET:
          -        return getTget();
          +        case TGET:
          +          return getTget();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TGET:
          -        return isSetTget();
          +        case TABLE:
          +          return isSetTable();
          +        case TGET:
          +          return isSetTget();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof exists_args)
          -        return this.equals((exists_args)that);
          +      if (that instanceof exists_args) return this.equals((exists_args) that);
                 return false;
               }
           
               public boolean equals(exists_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tget = true && this.isSetTget();
                 boolean that_present_tget = true && that.isSetTget();
                 if (this_present_tget || that_present_tget) {
          -        if (!(this_present_tget && that_present_tget))
          -          return false;
          -        if (!this.tget.equals(that.tget))
          -          return false;
          +        if (!(this_present_tget && that_present_tget)) return false;
          +        if (!this.tget.equals(that.tget)) return false;
                 }
           
                 return true;
          @@ -9141,12 +10441,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTget()) ? 131071 : 524287);
          -      if (isSetTget())
          -        hashCode = hashCode * 8191 + tget.hashCode();
          +      if (isSetTget()) hashCode = hashCode * 8191 + tget.hashCode();
           
                 return hashCode;
               }
          @@ -9187,11 +10485,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -9222,10 +10522,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tget == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tget' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tget' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tget != null) {
          @@ -9235,35 +10537,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class exists_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class exists_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public exists_argsStandardScheme getScheme() {
                   return new exists_argsStandardScheme();
                 }
               }
           
          -    private static class exists_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class exists_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, exists_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, exists_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -9271,7 +10578,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exists_args struct)
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -9280,7 +10587,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exists_args struct)
                           struct.tget = new TGet();
                           struct.tget.read(iprot);
                           struct.setTgetIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -9291,11 +10598,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exists_args struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, exists_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, exists_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -9315,24 +10624,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exists_args struct
           
               }
           
          -    private static class exists_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class exists_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public exists_argsTupleScheme getScheme() {
                   return new exists_argsTupleScheme();
                 }
               }
           
          -    private static class exists_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class exists_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, exists_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, exists_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   struct.tget.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, exists_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, exists_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.tget = new TGet();
          @@ -9341,29 +10656,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exists_args struct)
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class exists_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("exists_result");
          +  public static class exists_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("exists_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new exists_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new exists_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new exists_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new exists_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -9376,7 +10706,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -9387,12 +10717,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -9426,22 +10756,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(exists_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(exists_result.class,
          +        metaDataMap);
               }
           
               public exists_result() {
               }
           
          -    public exists_result(
          -      boolean success,
          -      TIOError io)
          -    {
          +    public exists_result(boolean success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -9481,7 +10816,8 @@ public exists_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -9490,7 +10826,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -9518,23 +10855,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -9542,60 +10880,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof exists_result)
          -        return this.equals((exists_result)that);
          +      if (that instanceof exists_result) return this.equals((exists_result) that);
                 return false;
               }
           
               public boolean equals(exists_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -9608,8 +10942,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -9650,13 +10983,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -9685,37 +11020,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class exists_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class exists_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public exists_resultStandardScheme getScheme() {
                   return new exists_resultStandardScheme();
                 }
               }
           
          -    private static class exists_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class exists_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, exists_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, exists_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -9723,7 +11064,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exists_result struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -9732,7 +11073,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exists_result struc
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -9743,11 +11084,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exists_result struc
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, exists_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, exists_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -9767,17 +11110,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exists_result stru
           
               }
           
          -    private static class exists_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class exists_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public exists_resultTupleScheme getScheme() {
                   return new exists_resultTupleScheme();
                 }
               }
           
          -    private static class exists_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class exists_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, exists_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, exists_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -9795,8 +11142,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exists_result struc
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, exists_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, exists_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -9810,19 +11159,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exists_result struct
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class existsAll_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("existsAll_args");
          +  public static class existsAll_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("existsAll_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TGETS_FIELD_DESC = new org.apache.thrift.protocol.TField("tgets", org.apache.thrift.protocol.TType.LIST, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TGETS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tgets", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new existsAll_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new existsAll_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new existsAll_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new existsAll_argsTupleSchemeFactory();
           
               /**
                * the table to check on
          @@ -9833,18 +11194,22 @@ public static class existsAll_args implements org.apache.thrift.TBase tgets; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the table to check on
                  */
          -      TABLE((short)1, "table"),
          +      TABLE((short) 1, "table"),
                 /**
                  * a list of TGets to check for
                  */
          -      TGETS((short)2, "tgets");
          +      TGETS((short) 2, "tgets");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -9857,7 +11222,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TGETS
          @@ -9868,12 +11233,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -9905,23 +11270,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TGETS, new org.apache.thrift.meta_data.FieldMetaData("tgets", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGet.class))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TGETS,
          +        new org.apache.thrift.meta_data.FieldMetaData("tgets",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TGet.class))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(existsAll_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(existsAll_args.class,
          +        metaDataMap);
               }
           
               public existsAll_args() {
               }
           
          -    public existsAll_args(
          -      java.nio.ByteBuffer table,
          -      java.util.List tgets)
          -    {
          +    public existsAll_args(java.nio.ByteBuffer table, java.util.List tgets) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tgets = tgets;
          @@ -9969,11 +11339,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to check on
                */
               public existsAll_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public existsAll_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public existsAll_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -10020,7 +11392,8 @@ public java.util.List getTgets() {
               /**
                * a list of TGets to check for
                */
          -    public existsAll_args setTgets(@org.apache.thrift.annotation.Nullable java.util.List tgets) {
          +    public existsAll_args
          +        setTgets(@org.apache.thrift.annotation.Nullable java.util.List tgets) {
                 this.tgets = tgets;
                 return this;
               }
          @@ -10040,27 +11413,28 @@ public void setTgetsIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TGETS:
          -        if (value == null) {
          -          unsetTgets();
          -        } else {
          -          setTgets((java.util.List)value);
          -        }
          -        break;
          +        case TGETS:
          +          if (value == null) {
          +            unsetTgets();
          +          } else {
          +            setTgets((java.util.List) value);
          +          }
          +          break;
           
                 }
               }
          @@ -10068,60 +11442,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TGETS:
          -        return getTgets();
          +        case TGETS:
          +          return getTgets();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TGETS:
          -        return isSetTgets();
          +        case TABLE:
          +          return isSetTable();
          +        case TGETS:
          +          return isSetTgets();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof existsAll_args)
          -        return this.equals((existsAll_args)that);
          +      if (that instanceof existsAll_args) return this.equals((existsAll_args) that);
                 return false;
               }
           
               public boolean equals(existsAll_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tgets = true && this.isSetTgets();
                 boolean that_present_tgets = true && that.isSetTgets();
                 if (this_present_tgets || that_present_tgets) {
          -        if (!(this_present_tgets && that_present_tgets))
          -          return false;
          -        if (!this.tgets.equals(that.tgets))
          -          return false;
          +        if (!(this_present_tgets && that_present_tgets)) return false;
          +        if (!this.tgets.equals(that.tgets)) return false;
                 }
           
                 return true;
          @@ -10132,12 +11502,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTgets()) ? 131071 : 524287);
          -      if (isSetTgets())
          -        hashCode = hashCode * 8191 + tgets.hashCode();
          +      if (isSetTgets()) hashCode = hashCode * 8191 + tgets.hashCode();
           
                 return hashCode;
               }
          @@ -10178,11 +11546,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -10213,45 +11583,52 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tgets == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tgets' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tgets' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class existsAll_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class existsAll_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public existsAll_argsStandardScheme getScheme() {
                   return new existsAll_argsStandardScheme();
                 }
               }
           
          -    private static class existsAll_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class existsAll_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, existsAll_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, existsAll_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -10259,7 +11636,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, existsAll_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -10268,9 +11645,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, existsAll_args stru
                           {
                             org.apache.thrift.protocol.TList _list190 = iprot.readListBegin();
                             struct.tgets = new java.util.ArrayList(_list190.size);
          -                  @org.apache.thrift.annotation.Nullable TGet _elem191;
          -                  for (int _i192 = 0; _i192 < _list190.size; ++_i192)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TGet _elem191;
          +                  for (int _i192 = 0; _i192 < _list190.size; ++_i192) {
                               _elem191 = new TGet();
                               _elem191.read(iprot);
                               struct.tgets.add(_elem191);
          @@ -10278,7 +11655,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, existsAll_args stru
                             iprot.readListEnd();
                           }
                           struct.setTgetsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -10289,11 +11666,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, existsAll_args stru
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, existsAll_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, existsAll_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -10305,9 +11684,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, existsAll_args str
                   if (struct.tgets != null) {
                     oprot.writeFieldBegin(TGETS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tgets.size()));
          -            for (TGet _iter193 : struct.tgets)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.tgets.size()));
          +            for (TGet _iter193 : struct.tgets) {
                         _iter193.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -10320,38 +11699,44 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, existsAll_args str
           
               }
           
          -    private static class existsAll_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class existsAll_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public existsAll_argsTupleScheme getScheme() {
                   return new existsAll_argsTupleScheme();
                 }
               }
           
          -    private static class existsAll_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class existsAll_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, existsAll_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, existsAll_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   {
                     oprot.writeI32(struct.tgets.size());
          -          for (TGet _iter194 : struct.tgets)
          -          {
          +          for (TGet _iter194 : struct.tgets) {
                       _iter194.write(oprot);
                     }
                   }
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, existsAll_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, existsAll_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   {
          -          org.apache.thrift.protocol.TList _list195 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +          org.apache.thrift.protocol.TList _list195 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                     struct.tgets = new java.util.ArrayList(_list195.size);
          -          @org.apache.thrift.annotation.Nullable TGet _elem196;
          -          for (int _i197 = 0; _i197 < _list195.size; ++_i197)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          TGet _elem196;
          +          for (int _i197 = 0; _i197 < _list195.size; ++_i197) {
                       _elem196 = new TGet();
                       _elem196.read(iprot);
                       struct.tgets.add(_elem196);
          @@ -10361,29 +11746,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, existsAll_args struc
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class existsAll_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("existsAll_result");
          +  public static class existsAll_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("existsAll_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new existsAll_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new existsAll_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new existsAll_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new existsAll_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -10396,7 +11796,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -10407,12 +11807,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -10444,23 +11844,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.BOOL))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(existsAll_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(existsAll_result.class,
          +        metaDataMap);
               }
           
               public existsAll_result() {
               }
           
          -    public existsAll_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public existsAll_result(java.util.List success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -10471,7 +11876,8 @@ public existsAll_result(
                */
               public existsAll_result(existsAll_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success);
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success);
                   this.success = __this__success;
                 }
                 if (other.isSetIo()) {
          @@ -10510,7 +11916,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public existsAll_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public existsAll_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -10555,23 +11962,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -10579,60 +11987,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof existsAll_result)
          -        return this.equals((existsAll_result)that);
          +      if (that instanceof existsAll_result) return this.equals((existsAll_result) that);
                 return false;
               }
           
               public boolean equals(existsAll_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -10643,12 +12047,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -10689,13 +12091,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -10728,35 +12132,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class existsAll_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class existsAll_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public existsAll_resultStandardScheme getScheme() {
                   return new existsAll_resultStandardScheme();
                 }
               }
           
          -    private static class existsAll_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class existsAll_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, existsAll_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, existsAll_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -10766,15 +12175,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, existsAll_result st
                             org.apache.thrift.protocol.TList _list198 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list198.size);
                             boolean _elem199;
          -                  for (int _i200 = 0; _i200 < _list198.size; ++_i200)
          -                  {
          +                  for (int _i200 = 0; _i200 < _list198.size; ++_i200) {
                               _elem199 = iprot.readBool();
                               struct.success.add(_elem199);
                             }
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -10783,7 +12191,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, existsAll_result st
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -10794,20 +12202,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, existsAll_result st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, existsAll_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, existsAll_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.BOOL, struct.success.size()));
          -            for (boolean _iter201 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.BOOL, struct.success.size()));
          +            for (boolean _iter201 : struct.success) {
                         oprot.writeBool(_iter201);
                       }
                       oprot.writeListEnd();
          @@ -10825,17 +12235,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, existsAll_result s
           
               }
           
          -    private static class existsAll_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class existsAll_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public existsAll_resultTupleScheme getScheme() {
                   return new existsAll_resultTupleScheme();
                 }
               }
           
          -    private static class existsAll_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class existsAll_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, existsAll_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, existsAll_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -10847,8 +12261,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, existsAll_result st
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (boolean _iter202 : struct.success)
          -            {
          +            for (boolean _iter202 : struct.success) {
                         oprot.writeBool(_iter202);
                       }
                     }
          @@ -10859,16 +12272,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, existsAll_result st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, existsAll_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, existsAll_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list203 = iprot.readListBegin(org.apache.thrift.protocol.TType.BOOL);
          +            org.apache.thrift.protocol.TList _list203 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.BOOL);
                       struct.success = new java.util.ArrayList(_list203.size);
                       boolean _elem204;
          -            for (int _i205 = 0; _i205 < _list203.size; ++_i205)
          -            {
          +            for (int _i205 = 0; _i205 < _list203.size; ++_i205) {
                         _elem204 = iprot.readBool();
                         struct.success.add(_elem204);
                       }
          @@ -10883,19 +12298,30 @@ public void read(org.apache.thrift.protocol.TProtocol prot, existsAll_result str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class get_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_args");
          +  public static class get_args implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("get_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TGET_FIELD_DESC = new org.apache.thrift.protocol.TField("tget", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TGET_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tget", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new get_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new get_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new get_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new get_argsTupleSchemeFactory();
           
               /**
                * the table to get from
          @@ -10906,18 +12332,22 @@ public static class get_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -10930,7 +12360,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TGET
          @@ -10941,12 +12371,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -10978,11 +12408,18 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TGET, new org.apache.thrift.meta_data.FieldMetaData("tget", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGet.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TGET,
          +        new org.apache.thrift.meta_data.FieldMetaData("tget",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TGet.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
                 org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_args.class, metaDataMap);
               }
          @@ -10990,10 +12427,7 @@ public java.lang.String getFieldName() {
               public get_args() {
               }
           
          -    public get_args(
          -      java.nio.ByteBuffer table,
          -      TGet tget)
          -    {
          +    public get_args(java.nio.ByteBuffer table, TGet tget) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tget = tget;
          @@ -11037,7 +12471,8 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to get from
                */
               public get_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          @@ -11092,27 +12527,28 @@ public void setTgetIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TGET:
          -        if (value == null) {
          -          unsetTget();
          -        } else {
          -          setTget((TGet)value);
          -        }
          -        break;
          +        case TGET:
          +          if (value == null) {
          +            unsetTget();
          +          } else {
          +            setTget((TGet) value);
          +          }
          +          break;
           
                 }
               }
          @@ -11120,60 +12556,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TGET:
          -        return getTget();
          +        case TGET:
          +          return getTget();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TGET:
          -        return isSetTget();
          +        case TABLE:
          +          return isSetTable();
          +        case TGET:
          +          return isSetTget();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof get_args)
          -        return this.equals((get_args)that);
          +      if (that instanceof get_args) return this.equals((get_args) that);
                 return false;
               }
           
               public boolean equals(get_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tget = true && this.isSetTget();
                 boolean that_present_tget = true && that.isSetTget();
                 if (this_present_tget || that_present_tget) {
          -        if (!(this_present_tget && that_present_tget))
          -          return false;
          -        if (!this.tget.equals(that.tget))
          -          return false;
          +        if (!(this_present_tget && that_present_tget)) return false;
          +        if (!this.tget.equals(that.tget)) return false;
                 }
           
                 return true;
          @@ -11184,12 +12616,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTget()) ? 131071 : 524287);
          -      if (isSetTget())
          -        hashCode = hashCode * 8191 + tget.hashCode();
          +      if (isSetTget()) hashCode = hashCode * 8191 + tget.hashCode();
           
                 return hashCode;
               }
          @@ -11230,11 +12660,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -11265,10 +12697,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tget == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tget' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tget' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tget != null) {
          @@ -11278,35 +12712,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class get_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class get_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public get_argsStandardScheme getScheme() {
                   return new get_argsStandardScheme();
                 }
               }
           
          -    private static class get_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class get_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -11314,7 +12753,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) th
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -11323,7 +12762,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) th
                           struct.tget = new TGet();
                           struct.tget.read(iprot);
                           struct.setTgetIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -11334,11 +12773,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_args struct) th
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, get_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, get_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -11358,24 +12799,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_args struct) t
           
               }
           
          -    private static class get_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class get_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public get_argsTupleScheme getScheme() {
                   return new get_argsTupleScheme();
                 }
               }
           
          -    private static class get_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class get_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, get_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, get_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   struct.tget.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, get_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, get_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.tget = new TGet();
          @@ -11384,29 +12831,43 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_args struct) thr
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class get_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_result");
          +  public static class get_result implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("get_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new get_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new get_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new get_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new get_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TResult success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -11419,7 +12880,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -11430,12 +12891,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -11467,11 +12928,18 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TResult.class)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TResult.class)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
                 org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_result.class, metaDataMap);
               }
          @@ -11479,10 +12947,7 @@ public java.lang.String getFieldName() {
               public get_result() {
               }
           
          -    public get_result(
          -      TResult success,
          -      TIOError io)
          -    {
          +    public get_result(TResult success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -11560,23 +13025,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((TResult)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((TResult) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -11584,60 +13050,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof get_result)
          -        return this.equals((get_result)that);
          +      if (that instanceof get_result) return this.equals((get_result) that);
                 return false;
               }
           
               public boolean equals(get_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -11648,12 +13110,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -11694,13 +13154,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -11736,35 +13198,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class get_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class get_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public get_resultStandardScheme getScheme() {
                   return new get_resultStandardScheme();
                 }
               }
           
          -    private static class get_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class get_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -11773,7 +13240,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct)
                           struct.success = new TResult();
                           struct.success.read(iprot);
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -11782,7 +13249,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct)
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -11793,11 +13260,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_result struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, get_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, get_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -11817,17 +13286,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_result struct)
           
               }
           
          -    private static class get_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class get_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public get_resultTupleScheme getScheme() {
                   return new get_resultTupleScheme();
                 }
               }
           
          -    private static class get_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class get_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, get_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, get_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -11845,8 +13318,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_result struct)
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, get_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, get_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = new TResult();
          @@ -11861,45 +13336,59 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_result struct) t
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getMultiple_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMultiple_args");
          +  public static class getMultiple_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getMultiple_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TGETS_FIELD_DESC = new org.apache.thrift.protocol.TField("tgets", org.apache.thrift.protocol.TType.LIST, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TGETS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tgets", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getMultiple_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getMultiple_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getMultiple_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getMultiple_argsTupleSchemeFactory();
           
               /**
                * the table to get from
                */
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table; // required
               /**
          -     * a list of TGets to fetch, the Result list
          -     * will have the Results at corresponding positions
          -     * or null if there was an error
          +     * a list of TGets to fetch, the Result list will have the Results at corresponding positions or
          +     * null if there was an error
                */
               public @org.apache.thrift.annotation.Nullable java.util.List tgets; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the table to get from
                  */
          -      TABLE((short)1, "table"),
          +      TABLE((short) 1, "table"),
                 /**
          -       * a list of TGets to fetch, the Result list
          -       * will have the Results at corresponding positions
          +       * a list of TGets to fetch, the Result list will have the Results at corresponding positions
                  * or null if there was an error
                  */
          -      TGETS((short)2, "tgets");
          +      TGETS((short) 2, "tgets");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -11912,7 +13401,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TGETS
          @@ -11923,12 +13412,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -11960,23 +13449,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TGETS, new org.apache.thrift.meta_data.FieldMetaData("tgets", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGet.class))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TGETS,
          +        new org.apache.thrift.meta_data.FieldMetaData("tgets",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TGet.class))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMultiple_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMultiple_args.class,
          +        metaDataMap);
               }
           
               public getMultiple_args() {
               }
           
          -    public getMultiple_args(
          -      java.nio.ByteBuffer table,
          -      java.util.List tgets)
          -    {
          +    public getMultiple_args(java.nio.ByteBuffer table, java.util.List tgets) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tgets = tgets;
          @@ -12024,11 +13518,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to get from
                */
               public getMultiple_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public getMultiple_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public getMultiple_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -12065,9 +13561,8 @@ public void addToTgets(TGet elem) {
               }
           
               /**
          -     * a list of TGets to fetch, the Result list
          -     * will have the Results at corresponding positions
          -     * or null if there was an error
          +     * a list of TGets to fetch, the Result list will have the Results at corresponding positions or
          +     * null if there was an error
                */
               @org.apache.thrift.annotation.Nullable
               public java.util.List getTgets() {
          @@ -12075,11 +13570,11 @@ public java.util.List getTgets() {
               }
           
               /**
          -     * a list of TGets to fetch, the Result list
          -     * will have the Results at corresponding positions
          -     * or null if there was an error
          +     * a list of TGets to fetch, the Result list will have the Results at corresponding positions or
          +     * null if there was an error
                */
          -    public getMultiple_args setTgets(@org.apache.thrift.annotation.Nullable java.util.List tgets) {
          +    public getMultiple_args
          +        setTgets(@org.apache.thrift.annotation.Nullable java.util.List tgets) {
                 this.tgets = tgets;
                 return this;
               }
          @@ -12099,27 +13594,28 @@ public void setTgetsIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TGETS:
          -        if (value == null) {
          -          unsetTgets();
          -        } else {
          -          setTgets((java.util.List)value);
          -        }
          -        break;
          +        case TGETS:
          +          if (value == null) {
          +            unsetTgets();
          +          } else {
          +            setTgets((java.util.List) value);
          +          }
          +          break;
           
                 }
               }
          @@ -12127,60 +13623,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TGETS:
          -        return getTgets();
          +        case TGETS:
          +          return getTgets();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TGETS:
          -        return isSetTgets();
          +        case TABLE:
          +          return isSetTable();
          +        case TGETS:
          +          return isSetTgets();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getMultiple_args)
          -        return this.equals((getMultiple_args)that);
          +      if (that instanceof getMultiple_args) return this.equals((getMultiple_args) that);
                 return false;
               }
           
               public boolean equals(getMultiple_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tgets = true && this.isSetTgets();
                 boolean that_present_tgets = true && that.isSetTgets();
                 if (this_present_tgets || that_present_tgets) {
          -        if (!(this_present_tgets && that_present_tgets))
          -          return false;
          -        if (!this.tgets.equals(that.tgets))
          -          return false;
          +        if (!(this_present_tgets && that_present_tgets)) return false;
          +        if (!this.tgets.equals(that.tgets)) return false;
                 }
           
                 return true;
          @@ -12191,12 +13683,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTgets()) ? 131071 : 524287);
          -      if (isSetTgets())
          -        hashCode = hashCode * 8191 + tgets.hashCode();
          +      if (isSetTgets()) hashCode = hashCode * 8191 + tgets.hashCode();
           
                 return hashCode;
               }
          @@ -12237,11 +13727,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -12272,45 +13764,52 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tgets == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tgets' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tgets' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getMultiple_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getMultiple_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getMultiple_argsStandardScheme getScheme() {
                   return new getMultiple_argsStandardScheme();
                 }
               }
           
          -    private static class getMultiple_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getMultiple_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -12318,7 +13817,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -12327,9 +13826,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_args st
                           {
                             org.apache.thrift.protocol.TList _list206 = iprot.readListBegin();
                             struct.tgets = new java.util.ArrayList(_list206.size);
          -                  @org.apache.thrift.annotation.Nullable TGet _elem207;
          -                  for (int _i208 = 0; _i208 < _list206.size; ++_i208)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TGet _elem207;
          +                  for (int _i208 = 0; _i208 < _list206.size; ++_i208) {
                               _elem207 = new TGet();
                               _elem207.read(iprot);
                               struct.tgets.add(_elem207);
          @@ -12337,7 +13836,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_args st
                             iprot.readListEnd();
                           }
                           struct.setTgetsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -12348,11 +13847,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getMultiple_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getMultiple_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -12364,9 +13865,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getMultiple_args s
                   if (struct.tgets != null) {
                     oprot.writeFieldBegin(TGETS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tgets.size()));
          -            for (TGet _iter209 : struct.tgets)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.tgets.size()));
          +            for (TGet _iter209 : struct.tgets) {
                         _iter209.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -12379,38 +13880,44 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getMultiple_args s
           
               }
           
          -    private static class getMultiple_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getMultiple_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getMultiple_argsTupleScheme getScheme() {
                   return new getMultiple_argsTupleScheme();
                 }
               }
           
          -    private static class getMultiple_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getMultiple_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getMultiple_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getMultiple_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   {
                     oprot.writeI32(struct.tgets.size());
          -          for (TGet _iter210 : struct.tgets)
          -          {
          +          for (TGet _iter210 : struct.tgets) {
                       _iter210.write(oprot);
                     }
                   }
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getMultiple_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getMultiple_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   {
          -          org.apache.thrift.protocol.TList _list211 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +          org.apache.thrift.protocol.TList _list211 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                     struct.tgets = new java.util.ArrayList(_list211.size);
          -          @org.apache.thrift.annotation.Nullable TGet _elem212;
          -          for (int _i213 = 0; _i213 < _list211.size; ++_i213)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          TGet _elem212;
          +          for (int _i213 = 0; _i213 < _list211.size; ++_i213) {
                       _elem212 = new TGet();
                       _elem212.read(iprot);
                       struct.tgets.add(_elem212);
          @@ -12420,29 +13927,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getMultiple_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getMultiple_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMultiple_result");
          +  public static class getMultiple_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getMultiple_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getMultiple_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getMultiple_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getMultiple_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getMultiple_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -12455,7 +13977,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -12466,12 +13988,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -12503,23 +14025,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMultiple_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMultiple_result.class,
          +        metaDataMap);
               }
           
               public getMultiple_result() {
               }
           
          -    public getMultiple_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public getMultiple_result(java.util.List success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -12530,7 +14057,8 @@ public getMultiple_result(
                */
               public getMultiple_result(getMultiple_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TResult other_element : other.success) {
                     __this__success.add(new TResult(other_element));
                   }
          @@ -12572,7 +14100,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getMultiple_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getMultiple_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -12617,23 +14146,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -12641,60 +14171,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getMultiple_result)
          -        return this.equals((getMultiple_result)that);
          +      if (that instanceof getMultiple_result) return this.equals((getMultiple_result) that);
                 return false;
               }
           
               public boolean equals(getMultiple_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -12705,12 +14231,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -12751,13 +14275,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -12790,35 +14316,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getMultiple_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getMultiple_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getMultiple_resultStandardScheme getScheme() {
                   return new getMultiple_resultStandardScheme();
                 }
               }
           
          -    private static class getMultiple_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getMultiple_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -12827,9 +14358,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_result
                           {
                             org.apache.thrift.protocol.TList _list214 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list214.size);
          -                  @org.apache.thrift.annotation.Nullable TResult _elem215;
          -                  for (int _i216 = 0; _i216 < _list214.size; ++_i216)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TResult _elem215;
          +                  for (int _i216 = 0; _i216 < _list214.size; ++_i216) {
                               _elem215 = new TResult();
                               _elem215.read(iprot);
                               struct.success.add(_elem215);
          @@ -12837,7 +14368,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_result
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -12846,7 +14377,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -12857,20 +14388,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMultiple_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getMultiple_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getMultiple_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TResult _iter217 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TResult _iter217 : struct.success) {
                         _iter217.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -12888,17 +14421,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getMultiple_result
           
               }
           
          -    private static class getMultiple_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getMultiple_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getMultiple_resultTupleScheme getScheme() {
                   return new getMultiple_resultTupleScheme();
                 }
               }
           
          -    private static class getMultiple_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getMultiple_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getMultiple_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getMultiple_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -12910,8 +14447,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getMultiple_result
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TResult _iter218 : struct.success)
          -            {
          +            for (TResult _iter218 : struct.success) {
                         _iter218.write(oprot);
                       }
                     }
          @@ -12922,16 +14458,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getMultiple_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getMultiple_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getMultiple_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list219 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list219 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list219.size);
          -            @org.apache.thrift.annotation.Nullable TResult _elem220;
          -            for (int _i221 = 0; _i221 < _list219.size; ++_i221)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TResult _elem220;
          +            for (int _i221 = 0; _i221 < _list219.size; ++_i221) {
                         _elem220 = new TResult();
                         _elem220.read(iprot);
                         struct.success.add(_elem220);
          @@ -12947,19 +14486,30 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getMultiple_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class put_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_args");
          +  public static class put_args implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("put_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TPUT_FIELD_DESC = new org.apache.thrift.protocol.TField("tput", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TPUT_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tput", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new put_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new put_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new put_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new put_argsTupleSchemeFactory();
           
               /**
                * the table to put data in
          @@ -12970,18 +14520,22 @@ public static class put_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -12994,7 +14548,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TPUT
          @@ -13005,12 +14559,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -13042,11 +14596,18 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TPUT, new org.apache.thrift.meta_data.FieldMetaData("tput", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPut.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TPUT,
          +        new org.apache.thrift.meta_data.FieldMetaData("tput",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TPut.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
                 org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_args.class, metaDataMap);
               }
          @@ -13054,10 +14615,7 @@ public java.lang.String getFieldName() {
               public put_args() {
               }
           
          -    public put_args(
          -      java.nio.ByteBuffer table,
          -      TPut tput)
          -    {
          +    public put_args(java.nio.ByteBuffer table, TPut tput) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tput = tput;
          @@ -13101,7 +14659,8 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to put data in
                */
               public put_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          @@ -13156,27 +14715,28 @@ public void setTputIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TPUT:
          -        if (value == null) {
          -          unsetTput();
          -        } else {
          -          setTput((TPut)value);
          -        }
          -        break;
          +        case TPUT:
          +          if (value == null) {
          +            unsetTput();
          +          } else {
          +            setTput((TPut) value);
          +          }
          +          break;
           
                 }
               }
          @@ -13184,60 +14744,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TPUT:
          -        return getTput();
          +        case TPUT:
          +          return getTput();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TPUT:
          -        return isSetTput();
          +        case TABLE:
          +          return isSetTable();
          +        case TPUT:
          +          return isSetTput();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof put_args)
          -        return this.equals((put_args)that);
          +      if (that instanceof put_args) return this.equals((put_args) that);
                 return false;
               }
           
               public boolean equals(put_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tput = true && this.isSetTput();
                 boolean that_present_tput = true && that.isSetTput();
                 if (this_present_tput || that_present_tput) {
          -        if (!(this_present_tput && that_present_tput))
          -          return false;
          -        if (!this.tput.equals(that.tput))
          -          return false;
          +        if (!(this_present_tput && that_present_tput)) return false;
          +        if (!this.tput.equals(that.tput)) return false;
                 }
           
                 return true;
          @@ -13248,12 +14804,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTput()) ? 131071 : 524287);
          -      if (isSetTput())
          -        hashCode = hashCode * 8191 + tput.hashCode();
          +      if (isSetTput()) hashCode = hashCode * 8191 + tput.hashCode();
           
                 return hashCode;
               }
          @@ -13294,11 +14848,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -13329,10 +14885,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tput == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tput' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tput' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tput != null) {
          @@ -13342,35 +14900,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class put_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class put_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public put_argsStandardScheme getScheme() {
                   return new put_argsStandardScheme();
                 }
               }
           
          -    private static class put_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class put_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, put_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, put_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -13378,7 +14941,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_args struct) th
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -13387,7 +14950,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_args struct) th
                           struct.tput = new TPut();
                           struct.tput.read(iprot);
                           struct.setTputIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -13398,11 +14961,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_args struct) th
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, put_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, put_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -13422,24 +14987,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, put_args struct) t
           
               }
           
          -    private static class put_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class put_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public put_argsTupleScheme getScheme() {
                   return new put_argsTupleScheme();
                 }
               }
           
          -    private static class put_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class put_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, put_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, put_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   struct.tput.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, put_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, put_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.tput = new TPut();
          @@ -13448,26 +15019,39 @@ public void read(org.apache.thrift.protocol.TProtocol prot, put_args struct) thr
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class put_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_result");
          +  public static class put_result implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("put_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new put_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new put_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new put_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new put_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -13480,7 +15064,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -13489,12 +15073,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -13526,9 +15110,13 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
                 org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_result.class, metaDataMap);
               }
          @@ -13536,9 +15124,7 @@ public java.lang.String getFieldName() {
               public put_result() {
               }
           
          -    public put_result(
          -      TIOError io)
          -    {
          +    public put_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -13586,15 +15172,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -13602,46 +15189,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof put_result)
          -        return this.equals((put_result)that);
          +      if (that instanceof put_result) return this.equals((put_result) that);
                 return false;
               }
           
               public boolean equals(put_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -13652,8 +15237,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -13684,13 +15268,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -13715,35 +15301,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class put_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class put_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public put_resultStandardScheme getScheme() {
                   return new put_resultStandardScheme();
                 }
               }
           
          -    private static class put_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class put_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, put_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, put_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -13752,7 +15343,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_result struct)
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -13763,11 +15354,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_result struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, put_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, put_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -13782,17 +15375,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, put_result struct)
           
               }
           
          -    private static class put_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class put_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public put_resultTupleScheme getScheme() {
                   return new put_resultTupleScheme();
                 }
               }
           
          -    private static class put_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class put_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, put_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, put_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -13804,8 +15401,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, put_result struct)
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, put_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, put_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -13815,23 +15414,43 @@ public void read(org.apache.thrift.protocol.TProtocol prot, put_result struct) t
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class checkAndPut_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("checkAndPut_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING, (short)4);
          -    private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)5);
          -    private static final org.apache.thrift.protocol.TField TPUT_FIELD_DESC = new org.apache.thrift.protocol.TField("tput", org.apache.thrift.protocol.TType.STRUCT, (short)6);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new checkAndPut_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new checkAndPut_argsTupleSchemeFactory();
          +  public static class checkAndPut_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("checkAndPut_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING,
          +            (short) 5);
          +    private static final org.apache.thrift.protocol.TField TPUT_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tput", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 6);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new checkAndPut_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new checkAndPut_argsTupleSchemeFactory();
           
               /**
                * to check in and put to
          @@ -13850,9 +15469,8 @@ public static class checkAndPut_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -13902,7 +15523,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // ROW
          @@ -13921,12 +15542,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -13958,34 +15579,49 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.FAMILY, new org.apache.thrift.meta_data.FieldMetaData("family", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("qualifier", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TPUT, new org.apache.thrift.meta_data.FieldMetaData("tput", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPut.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.FAMILY,
          +        new org.apache.thrift.meta_data.FieldMetaData("family",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.QUALIFIER,
          +        new org.apache.thrift.meta_data.FieldMetaData("qualifier",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.VALUE,
          +        new org.apache.thrift.meta_data.FieldMetaData("value",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TPUT,
          +        new org.apache.thrift.meta_data.FieldMetaData("tput",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TPut.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndPut_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndPut_args.class,
          +        metaDataMap);
               }
           
               public checkAndPut_args() {
               }
           
          -    public checkAndPut_args(
          -      java.nio.ByteBuffer table,
          -      java.nio.ByteBuffer row,
          -      java.nio.ByteBuffer family,
          -      java.nio.ByteBuffer qualifier,
          -      java.nio.ByteBuffer value,
          -      TPut tput)
          -    {
          +    public checkAndPut_args(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TPut tput) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -14049,11 +15685,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * to check in and put to
                */
               public checkAndPut_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public checkAndPut_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public checkAndPut_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -14089,7 +15727,7 @@ public java.nio.ByteBuffer bufferForRow() {
                * row to check
                */
               public checkAndPut_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          @@ -14129,11 +15767,13 @@ public java.nio.ByteBuffer bufferForFamily() {
                * column family to check
                */
               public checkAndPut_args setFamily(byte[] family) {
          -      this.family = family == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(family.clone());
          +      this.family =
          +          family == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(family.clone());
                 return this;
               }
           
          -    public checkAndPut_args setFamily(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer family) {
          +    public checkAndPut_args
          +        setFamily(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer family) {
                 this.family = org.apache.thrift.TBaseHelper.copyBinary(family);
                 return this;
               }
          @@ -14169,11 +15809,13 @@ public java.nio.ByteBuffer bufferForQualifier() {
                * column qualifier to check
                */
               public checkAndPut_args setQualifier(byte[] qualifier) {
          -      this.qualifier = qualifier == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(qualifier.clone());
          +      this.qualifier = qualifier == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(qualifier.clone());
                 return this;
               }
           
          -    public checkAndPut_args setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
          +    public checkAndPut_args
          +        setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
                 this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier);
                 return this;
               }
          @@ -14194,9 +15836,8 @@ public void setQualifierIsSet(boolean value) {
               }
           
               /**
          -     * the expected value, if not provided the
          -     * check is for the non-existence of the
          -     * column in question
          +     * the expected value, if not provided the check is for the non-existence of the column in
          +     * question
                */
               public byte[] getValue() {
                 setValue(org.apache.thrift.TBaseHelper.rightSize(value));
          @@ -14208,16 +15849,17 @@ public java.nio.ByteBuffer bufferForValue() {
               }
           
               /**
          -     * the expected value, if not provided the
          -     * check is for the non-existence of the
          -     * column in question
          +     * the expected value, if not provided the check is for the non-existence of the column in
          +     * question
                */
               public checkAndPut_args setValue(byte[] value) {
          -      this.value = value == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(value.clone());
          +      this.value =
          +          value == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(value.clone());
                 return this;
               }
           
          -    public checkAndPut_args setValue(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value) {
          +    public checkAndPut_args
          +        setValue(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value) {
                 this.value = org.apache.thrift.TBaseHelper.copyBinary(value);
                 return this;
               }
          @@ -14268,75 +15910,76 @@ public void setTputIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case FAMILY:
          -        if (value == null) {
          -          unsetFamily();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setFamily((byte[])value);
          +        case FAMILY:
          +          if (value == null) {
          +            unsetFamily();
                     } else {
          -            setFamily((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setFamily((byte[]) value);
          +            } else {
          +              setFamily((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case QUALIFIER:
          -        if (value == null) {
          -          unsetQualifier();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setQualifier((byte[])value);
          +        case QUALIFIER:
          +          if (value == null) {
          +            unsetQualifier();
                     } else {
          -            setQualifier((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setQualifier((byte[]) value);
          +            } else {
          +              setQualifier((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case VALUE:
          -        if (value == null) {
          -          unsetValue();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setValue((byte[])value);
          +        case VALUE:
          +          if (value == null) {
          +            unsetValue();
                     } else {
          -            setValue((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setValue((byte[]) value);
          +            } else {
          +              setValue((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TPUT:
          -        if (value == null) {
          -          unsetTput();
          -        } else {
          -          setTput((TPut)value);
          -        }
          -        break;
          +        case TPUT:
          +          if (value == null) {
          +            unsetTput();
          +          } else {
          +            setTput((TPut) value);
          +          }
          +          break;
           
                 }
               }
          @@ -14344,116 +15987,104 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case FAMILY:
          -        return getFamily();
          +        case FAMILY:
          +          return getFamily();
           
          -      case QUALIFIER:
          -        return getQualifier();
          +        case QUALIFIER:
          +          return getQualifier();
           
          -      case VALUE:
          -        return getValue();
          +        case VALUE:
          +          return getValue();
           
          -      case TPUT:
          -        return getTput();
          +        case TPUT:
          +          return getTput();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case ROW:
          -        return isSetRow();
          -      case FAMILY:
          -        return isSetFamily();
          -      case QUALIFIER:
          -        return isSetQualifier();
          -      case VALUE:
          -        return isSetValue();
          -      case TPUT:
          -        return isSetTput();
          +        case TABLE:
          +          return isSetTable();
          +        case ROW:
          +          return isSetRow();
          +        case FAMILY:
          +          return isSetFamily();
          +        case QUALIFIER:
          +          return isSetQualifier();
          +        case VALUE:
          +          return isSetValue();
          +        case TPUT:
          +          return isSetTput();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof checkAndPut_args)
          -        return this.equals((checkAndPut_args)that);
          +      if (that instanceof checkAndPut_args) return this.equals((checkAndPut_args) that);
                 return false;
               }
           
               public boolean equals(checkAndPut_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_family = true && this.isSetFamily();
                 boolean that_present_family = true && that.isSetFamily();
                 if (this_present_family || that_present_family) {
          -        if (!(this_present_family && that_present_family))
          -          return false;
          -        if (!this.family.equals(that.family))
          -          return false;
          +        if (!(this_present_family && that_present_family)) return false;
          +        if (!this.family.equals(that.family)) return false;
                 }
           
                 boolean this_present_qualifier = true && this.isSetQualifier();
                 boolean that_present_qualifier = true && that.isSetQualifier();
                 if (this_present_qualifier || that_present_qualifier) {
          -        if (!(this_present_qualifier && that_present_qualifier))
          -          return false;
          -        if (!this.qualifier.equals(that.qualifier))
          -          return false;
          +        if (!(this_present_qualifier && that_present_qualifier)) return false;
          +        if (!this.qualifier.equals(that.qualifier)) return false;
                 }
           
                 boolean this_present_value = true && this.isSetValue();
                 boolean that_present_value = true && that.isSetValue();
                 if (this_present_value || that_present_value) {
          -        if (!(this_present_value && that_present_value))
          -          return false;
          -        if (!this.value.equals(that.value))
          -          return false;
          +        if (!(this_present_value && that_present_value)) return false;
          +        if (!this.value.equals(that.value)) return false;
                 }
           
                 boolean this_present_tput = true && this.isSetTput();
                 boolean that_present_tput = true && that.isSetTput();
                 if (this_present_tput || that_present_tput) {
          -        if (!(this_present_tput && that_present_tput))
          -          return false;
          -        if (!this.tput.equals(that.tput))
          -          return false;
          +        if (!(this_present_tput && that_present_tput)) return false;
          +        if (!this.tput.equals(that.tput)) return false;
                 }
           
                 return true;
          @@ -14464,28 +16095,22 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetFamily()) ? 131071 : 524287);
          -      if (isSetFamily())
          -        hashCode = hashCode * 8191 + family.hashCode();
          +      if (isSetFamily()) hashCode = hashCode * 8191 + family.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetQualifier()) ? 131071 : 524287);
          -      if (isSetQualifier())
          -        hashCode = hashCode * 8191 + qualifier.hashCode();
          +      if (isSetQualifier()) hashCode = hashCode * 8191 + qualifier.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetValue()) ? 131071 : 524287);
          -      if (isSetValue())
          -        hashCode = hashCode * 8191 + value.hashCode();
          +      if (isSetValue()) hashCode = hashCode * 8191 + value.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTput()) ? 131071 : 524287);
          -      if (isSetTput())
          -        hashCode = hashCode * 8191 + tput.hashCode();
          +      if (isSetTput()) hashCode = hashCode * 8191 + tput.hashCode();
           
                 return hashCode;
               }
          @@ -14566,11 +16191,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -14633,19 +16260,24 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (row == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'row' was not present! Struct: " + toString());
                 }
                 if (family == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'family' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'family' was not present! Struct: " + toString());
                 }
                 if (qualifier == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'qualifier' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'qualifier' was not present! Struct: " + toString());
                 }
                 if (tput == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tput' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tput' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tput != null) {
          @@ -14655,35 +16287,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class checkAndPut_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndPut_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndPut_argsStandardScheme getScheme() {
                   return new checkAndPut_argsStandardScheme();
                 }
               }
           
          -    private static class checkAndPut_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class checkAndPut_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -14691,7 +16328,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -14699,7 +16336,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -14707,7 +16344,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.family = iprot.readBinary();
                           struct.setFamilyIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -14715,7 +16352,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.qualifier = iprot.readBinary();
                           struct.setQualifierIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -14723,7 +16360,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.value = iprot.readBinary();
                           struct.setValueIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -14732,7 +16369,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                           struct.tput = new TPut();
                           struct.tput.read(iprot);
                           struct.setTputIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -14743,11 +16380,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -14787,17 +16426,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_args s
           
               }
           
          -    private static class checkAndPut_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndPut_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndPut_argsTupleScheme getScheme() {
                   return new checkAndPut_argsTupleScheme();
                 }
               }
           
          -    private static class checkAndPut_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class checkAndPut_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   oprot.writeBinary(struct.row);
                   oprot.writeBinary(struct.family);
          @@ -14814,8 +16457,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.row = iprot.readBinary();
          @@ -14835,29 +16480,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class checkAndPut_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("checkAndPut_result");
          +  public static class checkAndPut_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("checkAndPut_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new checkAndPut_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new checkAndPut_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new checkAndPut_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new checkAndPut_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -14870,7 +16530,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -14881,12 +16541,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -14920,22 +16580,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndPut_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndPut_result.class,
          +        metaDataMap);
               }
           
               public checkAndPut_result() {
               }
           
          -    public checkAndPut_result(
          -      boolean success,
          -      TIOError io)
          -    {
          +    public checkAndPut_result(boolean success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -14975,7 +16640,8 @@ public checkAndPut_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -14984,7 +16650,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -15012,23 +16679,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -15036,60 +16704,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof checkAndPut_result)
          -        return this.equals((checkAndPut_result)that);
          +      if (that instanceof checkAndPut_result) return this.equals((checkAndPut_result) that);
                 return false;
               }
           
               public boolean equals(checkAndPut_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -15102,8 +16766,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -15144,13 +16807,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -15179,37 +16844,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class checkAndPut_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndPut_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndPut_resultStandardScheme getScheme() {
                   return new checkAndPut_resultStandardScheme();
                 }
               }
           
          -    private static class checkAndPut_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class checkAndPut_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -15217,7 +16888,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_result
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -15226,7 +16897,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -15237,11 +16908,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndPut_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -15261,17 +16934,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndPut_result
           
               }
           
          -    private static class checkAndPut_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndPut_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndPut_resultTupleScheme getScheme() {
                   return new checkAndPut_resultTupleScheme();
                 }
               }
           
          -    private static class checkAndPut_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class checkAndPut_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -15289,8 +16966,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -15304,19 +16983,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, checkAndPut_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class putMultiple_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("putMultiple_args");
          +  public static class putMultiple_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("putMultiple_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TPUTS_FIELD_DESC = new org.apache.thrift.protocol.TField("tputs", org.apache.thrift.protocol.TType.LIST, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TPUTS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tputs", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new putMultiple_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new putMultiple_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new putMultiple_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new putMultiple_argsTupleSchemeFactory();
           
               /**
                * the table to put data in
          @@ -15327,18 +17018,22 @@ public static class putMultiple_args implements org.apache.thrift.TBase tputs; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the table to put data in
                  */
          -      TABLE((short)1, "table"),
          +      TABLE((short) 1, "table"),
                 /**
                  * a list of TPuts to commit
                  */
          -      TPUTS((short)2, "tputs");
          +      TPUTS((short) 2, "tputs");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -15351,7 +17046,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TPUTS
          @@ -15362,12 +17057,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -15399,23 +17094,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TPUTS, new org.apache.thrift.meta_data.FieldMetaData("tputs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPut.class))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TPUTS,
          +        new org.apache.thrift.meta_data.FieldMetaData("tputs",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TPut.class))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(putMultiple_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(putMultiple_args.class,
          +        metaDataMap);
               }
           
               public putMultiple_args() {
               }
           
          -    public putMultiple_args(
          -      java.nio.ByteBuffer table,
          -      java.util.List tputs)
          -    {
          +    public putMultiple_args(java.nio.ByteBuffer table, java.util.List tputs) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tputs = tputs;
          @@ -15463,11 +17163,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to put data in
                */
               public putMultiple_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public putMultiple_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public putMultiple_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -15514,7 +17216,8 @@ public java.util.List getTputs() {
               /**
                * a list of TPuts to commit
                */
          -    public putMultiple_args setTputs(@org.apache.thrift.annotation.Nullable java.util.List tputs) {
          +    public putMultiple_args
          +        setTputs(@org.apache.thrift.annotation.Nullable java.util.List tputs) {
                 this.tputs = tputs;
                 return this;
               }
          @@ -15534,27 +17237,28 @@ public void setTputsIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TPUTS:
          -        if (value == null) {
          -          unsetTputs();
          -        } else {
          -          setTputs((java.util.List)value);
          -        }
          -        break;
          +        case TPUTS:
          +          if (value == null) {
          +            unsetTputs();
          +          } else {
          +            setTputs((java.util.List) value);
          +          }
          +          break;
           
                 }
               }
          @@ -15562,60 +17266,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TPUTS:
          -        return getTputs();
          +        case TPUTS:
          +          return getTputs();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TPUTS:
          -        return isSetTputs();
          +        case TABLE:
          +          return isSetTable();
          +        case TPUTS:
          +          return isSetTputs();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof putMultiple_args)
          -        return this.equals((putMultiple_args)that);
          +      if (that instanceof putMultiple_args) return this.equals((putMultiple_args) that);
                 return false;
               }
           
               public boolean equals(putMultiple_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tputs = true && this.isSetTputs();
                 boolean that_present_tputs = true && that.isSetTputs();
                 if (this_present_tputs || that_present_tputs) {
          -        if (!(this_present_tputs && that_present_tputs))
          -          return false;
          -        if (!this.tputs.equals(that.tputs))
          -          return false;
          +        if (!(this_present_tputs && that_present_tputs)) return false;
          +        if (!this.tputs.equals(that.tputs)) return false;
                 }
           
                 return true;
          @@ -15626,12 +17326,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTputs()) ? 131071 : 524287);
          -      if (isSetTputs())
          -        hashCode = hashCode * 8191 + tputs.hashCode();
          +      if (isSetTputs()) hashCode = hashCode * 8191 + tputs.hashCode();
           
                 return hashCode;
               }
          @@ -15672,11 +17370,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -15707,45 +17407,52 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tputs == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tputs' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tputs' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class putMultiple_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class putMultiple_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public putMultiple_argsStandardScheme getScheme() {
                   return new putMultiple_argsStandardScheme();
                 }
               }
           
          -    private static class putMultiple_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class putMultiple_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, putMultiple_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, putMultiple_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -15753,7 +17460,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, putMultiple_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -15762,9 +17469,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, putMultiple_args st
                           {
                             org.apache.thrift.protocol.TList _list222 = iprot.readListBegin();
                             struct.tputs = new java.util.ArrayList(_list222.size);
          -                  @org.apache.thrift.annotation.Nullable TPut _elem223;
          -                  for (int _i224 = 0; _i224 < _list222.size; ++_i224)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TPut _elem223;
          +                  for (int _i224 = 0; _i224 < _list222.size; ++_i224) {
                               _elem223 = new TPut();
                               _elem223.read(iprot);
                               struct.tputs.add(_elem223);
          @@ -15772,7 +17479,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, putMultiple_args st
                             iprot.readListEnd();
                           }
                           struct.setTputsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -15783,11 +17490,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, putMultiple_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, putMultiple_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, putMultiple_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -15799,9 +17508,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, putMultiple_args s
                   if (struct.tputs != null) {
                     oprot.writeFieldBegin(TPUTS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tputs.size()));
          -            for (TPut _iter225 : struct.tputs)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.tputs.size()));
          +            for (TPut _iter225 : struct.tputs) {
                         _iter225.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -15814,38 +17523,44 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, putMultiple_args s
           
               }
           
          -    private static class putMultiple_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class putMultiple_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public putMultiple_argsTupleScheme getScheme() {
                   return new putMultiple_argsTupleScheme();
                 }
               }
           
          -    private static class putMultiple_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class putMultiple_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, putMultiple_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, putMultiple_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   {
                     oprot.writeI32(struct.tputs.size());
          -          for (TPut _iter226 : struct.tputs)
          -          {
          +          for (TPut _iter226 : struct.tputs) {
                       _iter226.write(oprot);
                     }
                   }
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, putMultiple_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, putMultiple_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   {
          -          org.apache.thrift.protocol.TList _list227 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +          org.apache.thrift.protocol.TList _list227 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                     struct.tputs = new java.util.ArrayList(_list227.size);
          -          @org.apache.thrift.annotation.Nullable TPut _elem228;
          -          for (int _i229 = 0; _i229 < _list227.size; ++_i229)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          TPut _elem228;
          +          for (int _i229 = 0; _i229 < _list227.size; ++_i229) {
                       _elem228 = new TPut();
                       _elem228.read(iprot);
                       struct.tputs.add(_elem228);
          @@ -15855,26 +17570,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, putMultiple_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class putMultiple_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("putMultiple_result");
          +  public static class putMultiple_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("putMultiple_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new putMultiple_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new putMultiple_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new putMultiple_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new putMultiple_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -15887,7 +17616,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -15896,12 +17625,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -15933,19 +17662,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(putMultiple_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(putMultiple_result.class,
          +        metaDataMap);
               }
           
               public putMultiple_result() {
               }
           
          -    public putMultiple_result(
          -      TIOError io)
          -    {
          +    public putMultiple_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -15993,15 +17725,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -16009,46 +17742,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof putMultiple_result)
          -        return this.equals((putMultiple_result)that);
          +      if (that instanceof putMultiple_result) return this.equals((putMultiple_result) that);
                 return false;
               }
           
               public boolean equals(putMultiple_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -16059,8 +17790,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -16091,13 +17821,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -16122,35 +17854,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class putMultiple_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class putMultiple_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public putMultiple_resultStandardScheme getScheme() {
                   return new putMultiple_resultStandardScheme();
                 }
               }
           
          -    private static class putMultiple_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class putMultiple_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, putMultiple_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, putMultiple_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -16159,7 +17896,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, putMultiple_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -16170,11 +17907,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, putMultiple_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, putMultiple_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, putMultiple_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -16189,17 +17928,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, putMultiple_result
           
               }
           
          -    private static class putMultiple_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class putMultiple_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public putMultiple_resultTupleScheme getScheme() {
                   return new putMultiple_resultTupleScheme();
                 }
               }
           
          -    private static class putMultiple_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class putMultiple_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, putMultiple_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, putMultiple_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -16211,8 +17954,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, putMultiple_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, putMultiple_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, putMultiple_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -16222,19 +17967,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, putMultiple_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteSingle_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteSingle_args");
          +  public static class deleteSingle_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteSingle_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TDELETE_FIELD_DESC = new org.apache.thrift.protocol.TField("tdelete", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TDELETE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tdelete", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteSingle_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteSingle_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteSingle_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteSingle_argsTupleSchemeFactory();
           
               /**
                * the table to delete from
          @@ -16245,18 +18002,22 @@ public static class deleteSingle_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -16269,7 +18030,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TDELETE
          @@ -16280,12 +18041,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -16317,22 +18078,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TDELETE, new org.apache.thrift.meta_data.FieldMetaData("tdelete", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TDelete.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TDELETE,
          +        new org.apache.thrift.meta_data.FieldMetaData("tdelete",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TDelete.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteSingle_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteSingle_args.class,
          +        metaDataMap);
               }
           
               public deleteSingle_args() {
               }
           
          -    public deleteSingle_args(
          -      java.nio.ByteBuffer table,
          -      TDelete tdelete)
          -    {
          +    public deleteSingle_args(java.nio.ByteBuffer table, TDelete tdelete) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tdelete = tdelete;
          @@ -16376,11 +18142,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to delete from
                */
               public deleteSingle_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public deleteSingle_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public deleteSingle_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -16431,27 +18199,28 @@ public void setTdeleteIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TDELETE:
          -        if (value == null) {
          -          unsetTdelete();
          -        } else {
          -          setTdelete((TDelete)value);
          -        }
          -        break;
          +        case TDELETE:
          +          if (value == null) {
          +            unsetTdelete();
          +          } else {
          +            setTdelete((TDelete) value);
          +          }
          +          break;
           
                 }
               }
          @@ -16459,60 +18228,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TDELETE:
          -        return getTdelete();
          +        case TDELETE:
          +          return getTdelete();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TDELETE:
          -        return isSetTdelete();
          +        case TABLE:
          +          return isSetTable();
          +        case TDELETE:
          +          return isSetTdelete();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteSingle_args)
          -        return this.equals((deleteSingle_args)that);
          +      if (that instanceof deleteSingle_args) return this.equals((deleteSingle_args) that);
                 return false;
               }
           
               public boolean equals(deleteSingle_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tdelete = true && this.isSetTdelete();
                 boolean that_present_tdelete = true && that.isSetTdelete();
                 if (this_present_tdelete || that_present_tdelete) {
          -        if (!(this_present_tdelete && that_present_tdelete))
          -          return false;
          -        if (!this.tdelete.equals(that.tdelete))
          -          return false;
          +        if (!(this_present_tdelete && that_present_tdelete)) return false;
          +        if (!this.tdelete.equals(that.tdelete)) return false;
                 }
           
                 return true;
          @@ -16523,12 +18288,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTdelete()) ? 131071 : 524287);
          -      if (isSetTdelete())
          -        hashCode = hashCode * 8191 + tdelete.hashCode();
          +      if (isSetTdelete()) hashCode = hashCode * 8191 + tdelete.hashCode();
           
                 return hashCode;
               }
          @@ -16569,11 +18332,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -16604,10 +18369,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tdelete == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tdelete' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tdelete' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tdelete != null) {
          @@ -16617,35 +18384,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteSingle_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteSingle_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteSingle_argsStandardScheme getScheme() {
                   return new deleteSingle_argsStandardScheme();
                 }
               }
           
          -    private static class deleteSingle_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteSingle_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteSingle_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteSingle_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -16653,7 +18425,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteSingle_args s
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -16662,7 +18434,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteSingle_args s
                           struct.tdelete = new TDelete();
                           struct.tdelete.read(iprot);
                           struct.setTdeleteIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -16673,11 +18445,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteSingle_args s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteSingle_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteSingle_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -16697,24 +18471,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteSingle_args
           
               }
           
          -    private static class deleteSingle_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteSingle_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteSingle_argsTupleScheme getScheme() {
                   return new deleteSingle_argsTupleScheme();
                 }
               }
           
          -    private static class deleteSingle_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteSingle_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteSingle_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteSingle_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   struct.tdelete.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteSingle_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteSingle_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.tdelete = new TDelete();
          @@ -16723,26 +18503,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteSingle_args st
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteSingle_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteSingle_result");
          +  public static class deleteSingle_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteSingle_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteSingle_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteSingle_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteSingle_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteSingle_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -16755,7 +18549,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -16764,12 +18558,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -16801,19 +18595,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteSingle_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteSingle_result.class,
          +        metaDataMap);
               }
           
               public deleteSingle_result() {
               }
           
          -    public deleteSingle_result(
          -      TIOError io)
          -    {
          +    public deleteSingle_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -16861,15 +18658,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -16877,46 +18675,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteSingle_result)
          -        return this.equals((deleteSingle_result)that);
          +      if (that instanceof deleteSingle_result) return this.equals((deleteSingle_result) that);
                 return false;
               }
           
               public boolean equals(deleteSingle_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -16927,8 +18723,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -16959,13 +18754,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -16990,35 +18787,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteSingle_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteSingle_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteSingle_resultStandardScheme getScheme() {
                   return new deleteSingle_resultStandardScheme();
                 }
               }
           
          -    private static class deleteSingle_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteSingle_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteSingle_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteSingle_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -17027,7 +18829,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteSingle_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -17038,11 +18840,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteSingle_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteSingle_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteSingle_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -17057,17 +18861,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteSingle_resul
           
               }
           
          -    private static class deleteSingle_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteSingle_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteSingle_resultTupleScheme getScheme() {
                   return new deleteSingle_resultTupleScheme();
                 }
               }
           
          -    private static class deleteSingle_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteSingle_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteSingle_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteSingle_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -17079,8 +18887,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteSingle_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteSingle_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteSingle_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -17090,19 +18900,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteSingle_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteMultiple_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteMultiple_args");
          +  public static class deleteMultiple_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteMultiple_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TDELETES_FIELD_DESC = new org.apache.thrift.protocol.TField("tdeletes", org.apache.thrift.protocol.TType.LIST, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TDELETES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tdeletes", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteMultiple_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteMultiple_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteMultiple_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteMultiple_argsTupleSchemeFactory();
           
               /**
                * the table to delete from
          @@ -17113,18 +18935,22 @@ public static class deleteMultiple_args implements org.apache.thrift.TBase tdeletes; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the table to delete from
                  */
          -      TABLE((short)1, "table"),
          +      TABLE((short) 1, "table"),
                 /**
                  * list of TDeletes to delete
                  */
          -      TDELETES((short)2, "tdeletes");
          +      TDELETES((short) 2, "tdeletes");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -17137,7 +18963,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TDELETES
          @@ -17148,12 +18974,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -17185,23 +19011,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TDELETES, new org.apache.thrift.meta_data.FieldMetaData("tdeletes", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TDelete.class))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TDELETES,
          +        new org.apache.thrift.meta_data.FieldMetaData("tdeletes",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TDelete.class))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteMultiple_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteMultiple_args.class,
          +        metaDataMap);
               }
           
               public deleteMultiple_args() {
               }
           
          -    public deleteMultiple_args(
          -      java.nio.ByteBuffer table,
          -      java.util.List tdeletes)
          -    {
          +    public deleteMultiple_args(java.nio.ByteBuffer table, java.util.List tdeletes) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tdeletes = tdeletes;
          @@ -17215,7 +19046,8 @@ public deleteMultiple_args(deleteMultiple_args other) {
                   this.table = org.apache.thrift.TBaseHelper.copyBinary(other.table);
                 }
                 if (other.isSetTdeletes()) {
          -        java.util.List __this__tdeletes = new java.util.ArrayList(other.tdeletes.size());
          +        java.util.List __this__tdeletes =
          +            new java.util.ArrayList(other.tdeletes.size());
                   for (TDelete other_element : other.tdeletes) {
                     __this__tdeletes.add(new TDelete(other_element));
                   }
          @@ -17249,11 +19081,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to delete from
                */
               public deleteMultiple_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public deleteMultiple_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public deleteMultiple_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -17300,7 +19134,8 @@ public java.util.List getTdeletes() {
               /**
                * list of TDeletes to delete
                */
          -    public deleteMultiple_args setTdeletes(@org.apache.thrift.annotation.Nullable java.util.List tdeletes) {
          +    public deleteMultiple_args
          +        setTdeletes(@org.apache.thrift.annotation.Nullable java.util.List tdeletes) {
                 this.tdeletes = tdeletes;
                 return this;
               }
          @@ -17320,27 +19155,28 @@ public void setTdeletesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TDELETES:
          -        if (value == null) {
          -          unsetTdeletes();
          -        } else {
          -          setTdeletes((java.util.List)value);
          -        }
          -        break;
          +        case TDELETES:
          +          if (value == null) {
          +            unsetTdeletes();
          +          } else {
          +            setTdeletes((java.util.List) value);
          +          }
          +          break;
           
                 }
               }
          @@ -17348,60 +19184,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TDELETES:
          -        return getTdeletes();
          +        case TDELETES:
          +          return getTdeletes();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TDELETES:
          -        return isSetTdeletes();
          +        case TABLE:
          +          return isSetTable();
          +        case TDELETES:
          +          return isSetTdeletes();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteMultiple_args)
          -        return this.equals((deleteMultiple_args)that);
          +      if (that instanceof deleteMultiple_args) return this.equals((deleteMultiple_args) that);
                 return false;
               }
           
               public boolean equals(deleteMultiple_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tdeletes = true && this.isSetTdeletes();
                 boolean that_present_tdeletes = true && that.isSetTdeletes();
                 if (this_present_tdeletes || that_present_tdeletes) {
          -        if (!(this_present_tdeletes && that_present_tdeletes))
          -          return false;
          -        if (!this.tdeletes.equals(that.tdeletes))
          -          return false;
          +        if (!(this_present_tdeletes && that_present_tdeletes)) return false;
          +        if (!this.tdeletes.equals(that.tdeletes)) return false;
                 }
           
                 return true;
          @@ -17412,12 +19244,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTdeletes()) ? 131071 : 524287);
          -      if (isSetTdeletes())
          -        hashCode = hashCode * 8191 + tdeletes.hashCode();
          +      if (isSetTdeletes()) hashCode = hashCode * 8191 + tdeletes.hashCode();
           
                 return hashCode;
               }
          @@ -17458,11 +19288,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -17493,45 +19325,52 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tdeletes == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tdeletes' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tdeletes' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteMultiple_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteMultiple_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteMultiple_argsStandardScheme getScheme() {
                   return new deleteMultiple_argsStandardScheme();
                 }
               }
           
          -    private static class deleteMultiple_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteMultiple_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -17539,7 +19378,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -17548,9 +19387,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_args
                           {
                             org.apache.thrift.protocol.TList _list230 = iprot.readListBegin();
                             struct.tdeletes = new java.util.ArrayList(_list230.size);
          -                  @org.apache.thrift.annotation.Nullable TDelete _elem231;
          -                  for (int _i232 = 0; _i232 < _list230.size; ++_i232)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TDelete _elem231;
          +                  for (int _i232 = 0; _i232 < _list230.size; ++_i232) {
                               _elem231 = new TDelete();
                               _elem231.read(iprot);
                               struct.tdeletes.add(_elem231);
          @@ -17558,7 +19397,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_args
                             iprot.readListEnd();
                           }
                           struct.setTdeletesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -17569,11 +19408,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteMultiple_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteMultiple_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -17585,9 +19426,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteMultiple_arg
                   if (struct.tdeletes != null) {
                     oprot.writeFieldBegin(TDELETES_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tdeletes.size()));
          -            for (TDelete _iter233 : struct.tdeletes)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.tdeletes.size()));
          +            for (TDelete _iter233 : struct.tdeletes) {
                         _iter233.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -17600,38 +19441,44 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteMultiple_arg
           
               }
           
          -    private static class deleteMultiple_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteMultiple_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteMultiple_argsTupleScheme getScheme() {
                   return new deleteMultiple_argsTupleScheme();
                 }
               }
           
          -    private static class deleteMultiple_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteMultiple_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   {
                     oprot.writeI32(struct.tdeletes.size());
          -          for (TDelete _iter234 : struct.tdeletes)
          -          {
          +          for (TDelete _iter234 : struct.tdeletes) {
                       _iter234.write(oprot);
                     }
                   }
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   {
          -          org.apache.thrift.protocol.TList _list235 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +          org.apache.thrift.protocol.TList _list235 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                     struct.tdeletes = new java.util.ArrayList(_list235.size);
          -          @org.apache.thrift.annotation.Nullable TDelete _elem236;
          -          for (int _i237 = 0; _i237 < _list235.size; ++_i237)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          TDelete _elem236;
          +          for (int _i237 = 0; _i237 < _list235.size; ++_i237) {
                       _elem236 = new TDelete();
                       _elem236.read(iprot);
                       struct.tdeletes.add(_elem236);
          @@ -17641,29 +19488,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_args
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteMultiple_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteMultiple_result");
          +  public static class deleteMultiple_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteMultiple_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteMultiple_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteMultiple_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteMultiple_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteMultiple_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -17676,7 +19538,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -17687,12 +19549,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -17724,23 +19586,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TDelete.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TDelete.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteMultiple_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteMultiple_result.class,
          +        metaDataMap);
               }
           
               public deleteMultiple_result() {
               }
           
          -    public deleteMultiple_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public deleteMultiple_result(java.util.List success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -17751,7 +19618,8 @@ public deleteMultiple_result(
                */
               public deleteMultiple_result(deleteMultiple_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TDelete other_element : other.success) {
                     __this__success.add(new TDelete(other_element));
                   }
          @@ -17793,7 +19661,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public deleteMultiple_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public deleteMultiple_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -17838,23 +19707,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -17862,60 +19732,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteMultiple_result)
          -        return this.equals((deleteMultiple_result)that);
          +      if (that instanceof deleteMultiple_result) return this.equals((deleteMultiple_result) that);
                 return false;
               }
           
               public boolean equals(deleteMultiple_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -17926,12 +19792,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -17972,13 +19836,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -18011,35 +19877,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteMultiple_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteMultiple_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteMultiple_resultStandardScheme getScheme() {
                   return new deleteMultiple_resultStandardScheme();
                 }
               }
           
          -    private static class deleteMultiple_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteMultiple_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -18048,9 +19919,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_resu
                           {
                             org.apache.thrift.protocol.TList _list238 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list238.size);
          -                  @org.apache.thrift.annotation.Nullable TDelete _elem239;
          -                  for (int _i240 = 0; _i240 < _list238.size; ++_i240)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TDelete _elem239;
          +                  for (int _i240 = 0; _i240 < _list238.size; ++_i240) {
                               _elem239 = new TDelete();
                               _elem239.read(iprot);
                               struct.success.add(_elem239);
          @@ -18058,7 +19929,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_resu
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -18067,7 +19938,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_resu
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -18078,20 +19949,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteMultiple_resu
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteMultiple_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteMultiple_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TDelete _iter241 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TDelete _iter241 : struct.success) {
                         _iter241.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -18109,17 +19982,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteMultiple_res
           
               }
           
          -    private static class deleteMultiple_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteMultiple_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteMultiple_resultTupleScheme getScheme() {
                   return new deleteMultiple_resultTupleScheme();
                 }
               }
           
          -    private static class deleteMultiple_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteMultiple_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -18131,8 +20008,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_resu
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TDelete _iter242 : struct.success)
          -            {
          +            for (TDelete _iter242 : struct.success) {
                         _iter242.write(oprot);
                       }
                     }
          @@ -18143,16 +20019,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_resu
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list243 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list243 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list243.size);
          -            @org.apache.thrift.annotation.Nullable TDelete _elem244;
          -            for (int _i245 = 0; _i245 < _list243.size; ++_i245)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TDelete _elem244;
          +            for (int _i245 = 0; _i245 < _list243.size; ++_i245) {
                         _elem244 = new TDelete();
                         _elem244.read(iprot);
                         struct.success.add(_elem244);
          @@ -18168,23 +20047,43 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteMultiple_resul
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class checkAndDelete_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("checkAndDelete_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING, (short)4);
          -    private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)5);
          -    private static final org.apache.thrift.protocol.TField TDELETE_FIELD_DESC = new org.apache.thrift.protocol.TField("tdelete", org.apache.thrift.protocol.TType.STRUCT, (short)6);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new checkAndDelete_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new checkAndDelete_argsTupleSchemeFactory();
          +  public static class checkAndDelete_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("checkAndDelete_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING,
          +            (short) 5);
          +    private static final org.apache.thrift.protocol.TField TDELETE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tdelete", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 6);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new checkAndDelete_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new checkAndDelete_argsTupleSchemeFactory();
           
               /**
                * to check in and delete from
          @@ -18203,9 +20102,8 @@ public static class checkAndDelete_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -18255,7 +20156,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // ROW
          @@ -18274,12 +20175,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -18311,34 +20212,49 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.FAMILY, new org.apache.thrift.meta_data.FieldMetaData("family", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("qualifier", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TDELETE, new org.apache.thrift.meta_data.FieldMetaData("tdelete", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TDelete.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.FAMILY,
          +        new org.apache.thrift.meta_data.FieldMetaData("family",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.QUALIFIER,
          +        new org.apache.thrift.meta_data.FieldMetaData("qualifier",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.VALUE,
          +        new org.apache.thrift.meta_data.FieldMetaData("value",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TDELETE,
          +        new org.apache.thrift.meta_data.FieldMetaData("tdelete",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TDelete.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndDelete_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndDelete_args.class,
          +        metaDataMap);
               }
           
               public checkAndDelete_args() {
               }
           
          -    public checkAndDelete_args(
          -      java.nio.ByteBuffer table,
          -      java.nio.ByteBuffer row,
          -      java.nio.ByteBuffer family,
          -      java.nio.ByteBuffer qualifier,
          -      java.nio.ByteBuffer value,
          -      TDelete tdelete)
          -    {
          +    public checkAndDelete_args(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, java.nio.ByteBuffer value,
          +        TDelete tdelete) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -18402,11 +20318,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * to check in and delete from
                */
               public checkAndDelete_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public checkAndDelete_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public checkAndDelete_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -18442,11 +20360,12 @@ public java.nio.ByteBuffer bufferForRow() {
                * row to check
                */
               public checkAndDelete_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          -    public checkAndDelete_args setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
          +    public checkAndDelete_args
          +        setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
                 return this;
               }
          @@ -18482,11 +20401,13 @@ public java.nio.ByteBuffer bufferForFamily() {
                * column family to check
                */
               public checkAndDelete_args setFamily(byte[] family) {
          -      this.family = family == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(family.clone());
          +      this.family =
          +          family == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(family.clone());
                 return this;
               }
           
          -    public checkAndDelete_args setFamily(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer family) {
          +    public checkAndDelete_args
          +        setFamily(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer family) {
                 this.family = org.apache.thrift.TBaseHelper.copyBinary(family);
                 return this;
               }
          @@ -18522,11 +20443,13 @@ public java.nio.ByteBuffer bufferForQualifier() {
                * column qualifier to check
                */
               public checkAndDelete_args setQualifier(byte[] qualifier) {
          -      this.qualifier = qualifier == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(qualifier.clone());
          +      this.qualifier = qualifier == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(qualifier.clone());
                 return this;
               }
           
          -    public checkAndDelete_args setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
          +    public checkAndDelete_args
          +        setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
                 this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier);
                 return this;
               }
          @@ -18547,9 +20470,8 @@ public void setQualifierIsSet(boolean value) {
               }
           
               /**
          -     * the expected value, if not provided the
          -     * check is for the non-existence of the
          -     * column in question
          +     * the expected value, if not provided the check is for the non-existence of the column in
          +     * question
                */
               public byte[] getValue() {
                 setValue(org.apache.thrift.TBaseHelper.rightSize(value));
          @@ -18561,16 +20483,17 @@ public java.nio.ByteBuffer bufferForValue() {
               }
           
               /**
          -     * the expected value, if not provided the
          -     * check is for the non-existence of the
          -     * column in question
          +     * the expected value, if not provided the check is for the non-existence of the column in
          +     * question
                */
               public checkAndDelete_args setValue(byte[] value) {
          -      this.value = value == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(value.clone());
          +      this.value =
          +          value == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(value.clone());
                 return this;
               }
           
          -    public checkAndDelete_args setValue(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value) {
          +    public checkAndDelete_args
          +        setValue(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value) {
                 this.value = org.apache.thrift.TBaseHelper.copyBinary(value);
                 return this;
               }
          @@ -18621,75 +20544,76 @@ public void setTdeleteIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case FAMILY:
          -        if (value == null) {
          -          unsetFamily();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setFamily((byte[])value);
          +        case FAMILY:
          +          if (value == null) {
          +            unsetFamily();
                     } else {
          -            setFamily((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setFamily((byte[]) value);
          +            } else {
          +              setFamily((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case QUALIFIER:
          -        if (value == null) {
          -          unsetQualifier();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setQualifier((byte[])value);
          +        case QUALIFIER:
          +          if (value == null) {
          +            unsetQualifier();
                     } else {
          -            setQualifier((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setQualifier((byte[]) value);
          +            } else {
          +              setQualifier((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case VALUE:
          -        if (value == null) {
          -          unsetValue();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setValue((byte[])value);
          +        case VALUE:
          +          if (value == null) {
          +            unsetValue();
                     } else {
          -            setValue((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setValue((byte[]) value);
          +            } else {
          +              setValue((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TDELETE:
          -        if (value == null) {
          -          unsetTdelete();
          -        } else {
          -          setTdelete((TDelete)value);
          -        }
          -        break;
          +        case TDELETE:
          +          if (value == null) {
          +            unsetTdelete();
          +          } else {
          +            setTdelete((TDelete) value);
          +          }
          +          break;
           
                 }
               }
          @@ -18697,116 +20621,104 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case FAMILY:
          -        return getFamily();
          +        case FAMILY:
          +          return getFamily();
           
          -      case QUALIFIER:
          -        return getQualifier();
          +        case QUALIFIER:
          +          return getQualifier();
           
          -      case VALUE:
          -        return getValue();
          +        case VALUE:
          +          return getValue();
           
          -      case TDELETE:
          -        return getTdelete();
          +        case TDELETE:
          +          return getTdelete();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case ROW:
          -        return isSetRow();
          -      case FAMILY:
          -        return isSetFamily();
          -      case QUALIFIER:
          -        return isSetQualifier();
          -      case VALUE:
          -        return isSetValue();
          -      case TDELETE:
          -        return isSetTdelete();
          +        case TABLE:
          +          return isSetTable();
          +        case ROW:
          +          return isSetRow();
          +        case FAMILY:
          +          return isSetFamily();
          +        case QUALIFIER:
          +          return isSetQualifier();
          +        case VALUE:
          +          return isSetValue();
          +        case TDELETE:
          +          return isSetTdelete();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof checkAndDelete_args)
          -        return this.equals((checkAndDelete_args)that);
          +      if (that instanceof checkAndDelete_args) return this.equals((checkAndDelete_args) that);
                 return false;
               }
           
               public boolean equals(checkAndDelete_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_family = true && this.isSetFamily();
                 boolean that_present_family = true && that.isSetFamily();
                 if (this_present_family || that_present_family) {
          -        if (!(this_present_family && that_present_family))
          -          return false;
          -        if (!this.family.equals(that.family))
          -          return false;
          +        if (!(this_present_family && that_present_family)) return false;
          +        if (!this.family.equals(that.family)) return false;
                 }
           
                 boolean this_present_qualifier = true && this.isSetQualifier();
                 boolean that_present_qualifier = true && that.isSetQualifier();
                 if (this_present_qualifier || that_present_qualifier) {
          -        if (!(this_present_qualifier && that_present_qualifier))
          -          return false;
          -        if (!this.qualifier.equals(that.qualifier))
          -          return false;
          +        if (!(this_present_qualifier && that_present_qualifier)) return false;
          +        if (!this.qualifier.equals(that.qualifier)) return false;
                 }
           
                 boolean this_present_value = true && this.isSetValue();
                 boolean that_present_value = true && that.isSetValue();
                 if (this_present_value || that_present_value) {
          -        if (!(this_present_value && that_present_value))
          -          return false;
          -        if (!this.value.equals(that.value))
          -          return false;
          +        if (!(this_present_value && that_present_value)) return false;
          +        if (!this.value.equals(that.value)) return false;
                 }
           
                 boolean this_present_tdelete = true && this.isSetTdelete();
                 boolean that_present_tdelete = true && that.isSetTdelete();
                 if (this_present_tdelete || that_present_tdelete) {
          -        if (!(this_present_tdelete && that_present_tdelete))
          -          return false;
          -        if (!this.tdelete.equals(that.tdelete))
          -          return false;
          +        if (!(this_present_tdelete && that_present_tdelete)) return false;
          +        if (!this.tdelete.equals(that.tdelete)) return false;
                 }
           
                 return true;
          @@ -18817,28 +20729,22 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetFamily()) ? 131071 : 524287);
          -      if (isSetFamily())
          -        hashCode = hashCode * 8191 + family.hashCode();
          +      if (isSetFamily()) hashCode = hashCode * 8191 + family.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetQualifier()) ? 131071 : 524287);
          -      if (isSetQualifier())
          -        hashCode = hashCode * 8191 + qualifier.hashCode();
          +      if (isSetQualifier()) hashCode = hashCode * 8191 + qualifier.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetValue()) ? 131071 : 524287);
          -      if (isSetValue())
          -        hashCode = hashCode * 8191 + value.hashCode();
          +      if (isSetValue()) hashCode = hashCode * 8191 + value.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTdelete()) ? 131071 : 524287);
          -      if (isSetTdelete())
          -        hashCode = hashCode * 8191 + tdelete.hashCode();
          +      if (isSetTdelete()) hashCode = hashCode * 8191 + tdelete.hashCode();
           
                 return hashCode;
               }
          @@ -18919,11 +20825,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -18986,19 +20894,24 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (row == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'row' was not present! Struct: " + toString());
                 }
                 if (family == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'family' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'family' was not present! Struct: " + toString());
                 }
                 if (qualifier == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'qualifier' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'qualifier' was not present! Struct: " + toString());
                 }
                 if (tdelete == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tdelete' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tdelete' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tdelete != null) {
          @@ -19008,35 +20921,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class checkAndDelete_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndDelete_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndDelete_argsStandardScheme getScheme() {
                   return new checkAndDelete_argsStandardScheme();
                 }
               }
           
          -    private static class checkAndDelete_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class checkAndDelete_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -19044,7 +20962,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -19052,7 +20970,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -19060,7 +20978,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.family = iprot.readBinary();
                           struct.setFamilyIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -19068,7 +20986,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.qualifier = iprot.readBinary();
                           struct.setQualifierIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -19076,7 +20994,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.value = iprot.readBinary();
                           struct.setValueIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -19085,7 +21003,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_args
                           struct.tdelete = new TDelete();
                           struct.tdelete.read(iprot);
                           struct.setTdeleteIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -19096,11 +21014,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndDelete_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndDelete_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -19140,17 +21060,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndDelete_arg
           
               }
           
          -    private static class checkAndDelete_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndDelete_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndDelete_argsTupleScheme getScheme() {
                   return new checkAndDelete_argsTupleScheme();
                 }
               }
           
          -    private static class checkAndDelete_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class checkAndDelete_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   oprot.writeBinary(struct.row);
                   oprot.writeBinary(struct.family);
          @@ -19167,8 +21091,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_args
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.row = iprot.readBinary();
          @@ -19188,29 +21114,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_args
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class checkAndDelete_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("checkAndDelete_result");
          +  public static class checkAndDelete_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("checkAndDelete_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new checkAndDelete_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new checkAndDelete_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new checkAndDelete_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new checkAndDelete_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -19223,7 +21164,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -19234,12 +21175,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -19273,22 +21214,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndDelete_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndDelete_result.class,
          +        metaDataMap);
               }
           
               public checkAndDelete_result() {
               }
           
          -    public checkAndDelete_result(
          -      boolean success,
          -      TIOError io)
          -    {
          +    public checkAndDelete_result(boolean success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -19328,7 +21274,8 @@ public checkAndDelete_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -19337,7 +21284,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -19365,23 +21313,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -19389,60 +21338,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof checkAndDelete_result)
          -        return this.equals((checkAndDelete_result)that);
          +      if (that instanceof checkAndDelete_result) return this.equals((checkAndDelete_result) that);
                 return false;
               }
           
               public boolean equals(checkAndDelete_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -19455,8 +21400,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -19497,13 +21441,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -19532,37 +21478,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class checkAndDelete_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndDelete_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndDelete_resultStandardScheme getScheme() {
                   return new checkAndDelete_resultStandardScheme();
                 }
               }
           
          -    private static class checkAndDelete_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class checkAndDelete_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -19570,7 +21522,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_resu
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -19579,7 +21531,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_resu
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -19590,11 +21542,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndDelete_resu
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndDelete_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndDelete_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -19614,17 +21568,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndDelete_res
           
               }
           
          -    private static class checkAndDelete_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndDelete_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndDelete_resultTupleScheme getScheme() {
                   return new checkAndDelete_resultTupleScheme();
                 }
               }
           
          -    private static class checkAndDelete_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class checkAndDelete_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -19642,8 +21600,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_resu
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -19657,19 +21617,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, checkAndDelete_resul
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class increment_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("increment_args");
          +  public static class increment_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("increment_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TINCREMENT_FIELD_DESC = new org.apache.thrift.protocol.TField("tincrement", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TINCREMENT_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tincrement", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new increment_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new increment_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new increment_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new increment_argsTupleSchemeFactory();
           
               /**
                * the table to increment the value on
          @@ -19680,18 +21652,22 @@ public static class increment_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -19704,7 +21680,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TINCREMENT
          @@ -19715,12 +21691,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -19752,22 +21728,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TINCREMENT, new org.apache.thrift.meta_data.FieldMetaData("tincrement", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIncrement.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TINCREMENT,
          +        new org.apache.thrift.meta_data.FieldMetaData("tincrement",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIncrement.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(increment_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(increment_args.class,
          +        metaDataMap);
               }
           
               public increment_args() {
               }
           
          -    public increment_args(
          -      java.nio.ByteBuffer table,
          -      TIncrement tincrement)
          -    {
          +    public increment_args(java.nio.ByteBuffer table, TIncrement tincrement) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tincrement = tincrement;
          @@ -19811,11 +21792,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to increment the value on
                */
               public increment_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public increment_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public increment_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -19846,7 +21829,8 @@ public TIncrement getTincrement() {
               /**
                * the TIncrement to increment
                */
          -    public increment_args setTincrement(@org.apache.thrift.annotation.Nullable TIncrement tincrement) {
          +    public increment_args
          +        setTincrement(@org.apache.thrift.annotation.Nullable TIncrement tincrement) {
                 this.tincrement = tincrement;
                 return this;
               }
          @@ -19866,27 +21850,28 @@ public void setTincrementIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TINCREMENT:
          -        if (value == null) {
          -          unsetTincrement();
          -        } else {
          -          setTincrement((TIncrement)value);
          -        }
          -        break;
          +        case TINCREMENT:
          +          if (value == null) {
          +            unsetTincrement();
          +          } else {
          +            setTincrement((TIncrement) value);
          +          }
          +          break;
           
                 }
               }
          @@ -19894,60 +21879,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TINCREMENT:
          -        return getTincrement();
          +        case TINCREMENT:
          +          return getTincrement();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TINCREMENT:
          -        return isSetTincrement();
          +        case TABLE:
          +          return isSetTable();
          +        case TINCREMENT:
          +          return isSetTincrement();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof increment_args)
          -        return this.equals((increment_args)that);
          +      if (that instanceof increment_args) return this.equals((increment_args) that);
                 return false;
               }
           
               public boolean equals(increment_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tincrement = true && this.isSetTincrement();
                 boolean that_present_tincrement = true && that.isSetTincrement();
                 if (this_present_tincrement || that_present_tincrement) {
          -        if (!(this_present_tincrement && that_present_tincrement))
          -          return false;
          -        if (!this.tincrement.equals(that.tincrement))
          -          return false;
          +        if (!(this_present_tincrement && that_present_tincrement)) return false;
          +        if (!this.tincrement.equals(that.tincrement)) return false;
                 }
           
                 return true;
          @@ -19958,12 +21939,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTincrement()) ? 131071 : 524287);
          -      if (isSetTincrement())
          -        hashCode = hashCode * 8191 + tincrement.hashCode();
          +      if (isSetTincrement()) hashCode = hashCode * 8191 + tincrement.hashCode();
           
                 return hashCode;
               }
          @@ -20004,11 +21983,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -20039,10 +22020,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tincrement == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tincrement' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tincrement' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tincrement != null) {
          @@ -20052,35 +22035,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class increment_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class increment_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public increment_argsStandardScheme getScheme() {
                   return new increment_argsStandardScheme();
                 }
               }
           
          -    private static class increment_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class increment_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, increment_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, increment_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -20088,7 +22076,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, increment_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -20097,7 +22085,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, increment_args stru
                           struct.tincrement = new TIncrement();
                           struct.tincrement.read(iprot);
                           struct.setTincrementIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -20108,11 +22096,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, increment_args stru
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, increment_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, increment_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -20132,24 +22122,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, increment_args str
           
               }
           
          -    private static class increment_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class increment_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public increment_argsTupleScheme getScheme() {
                   return new increment_argsTupleScheme();
                 }
               }
           
          -    private static class increment_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class increment_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, increment_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, increment_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   struct.tincrement.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, increment_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, increment_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.tincrement = new TIncrement();
          @@ -20158,29 +22154,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, increment_args struc
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class increment_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("increment_result");
          +  public static class increment_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("increment_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new increment_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new increment_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new increment_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new increment_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TResult success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -20193,7 +22204,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -20204,12 +22215,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -20241,22 +22252,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TResult.class)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TResult.class)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(increment_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(increment_result.class,
          +        metaDataMap);
               }
           
               public increment_result() {
               }
           
          -    public increment_result(
          -      TResult success,
          -      TIOError io)
          -    {
          +    public increment_result(TResult success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -20334,23 +22350,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((TResult)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((TResult) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -20358,60 +22375,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof increment_result)
          -        return this.equals((increment_result)that);
          +      if (that instanceof increment_result) return this.equals((increment_result) that);
                 return false;
               }
           
               public boolean equals(increment_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -20422,12 +22435,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -20468,13 +22479,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -20510,35 +22523,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class increment_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class increment_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public increment_resultStandardScheme getScheme() {
                   return new increment_resultStandardScheme();
                 }
               }
           
          -    private static class increment_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class increment_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, increment_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, increment_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -20547,7 +22565,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, increment_result st
                           struct.success = new TResult();
                           struct.success.read(iprot);
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -20556,7 +22574,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, increment_result st
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -20567,11 +22585,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, increment_result st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, increment_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, increment_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -20591,17 +22611,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, increment_result s
           
               }
           
          -    private static class increment_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class increment_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public increment_resultTupleScheme getScheme() {
                   return new increment_resultTupleScheme();
                 }
               }
           
          -    private static class increment_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class increment_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, increment_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, increment_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -20619,8 +22643,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, increment_result st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, increment_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, increment_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = new TResult();
          @@ -20635,19 +22661,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, increment_result str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class append_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("append_args");
          +  public static class append_args
          +      implements org.apache.thrift.TBase, java.io.Serializable,
          +      Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("append_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TAPPEND_FIELD_DESC = new org.apache.thrift.protocol.TField("tappend", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TAPPEND_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tappend", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new append_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new append_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new append_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new append_argsTupleSchemeFactory();
           
               /**
                * the table to append the value on
          @@ -20658,18 +22696,22 @@ public static class append_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -20682,7 +22724,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TAPPEND
          @@ -20693,12 +22735,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -20730,22 +22772,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TAPPEND, new org.apache.thrift.meta_data.FieldMetaData("tappend", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TAppend.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TAPPEND,
          +        new org.apache.thrift.meta_data.FieldMetaData("tappend",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TAppend.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_args.class,
          +        metaDataMap);
               }
           
               public append_args() {
               }
           
          -    public append_args(
          -      java.nio.ByteBuffer table,
          -      TAppend tappend)
          -    {
          +    public append_args(java.nio.ByteBuffer table, TAppend tappend) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tappend = tappend;
          @@ -20789,7 +22836,8 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to append the value on
                */
               public append_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          @@ -20844,27 +22892,28 @@ public void setTappendIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TAPPEND:
          -        if (value == null) {
          -          unsetTappend();
          -        } else {
          -          setTappend((TAppend)value);
          -        }
          -        break;
          +        case TAPPEND:
          +          if (value == null) {
          +            unsetTappend();
          +          } else {
          +            setTappend((TAppend) value);
          +          }
          +          break;
           
                 }
               }
          @@ -20872,60 +22921,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TAPPEND:
          -        return getTappend();
          +        case TAPPEND:
          +          return getTappend();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TAPPEND:
          -        return isSetTappend();
          +        case TABLE:
          +          return isSetTable();
          +        case TAPPEND:
          +          return isSetTappend();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof append_args)
          -        return this.equals((append_args)that);
          +      if (that instanceof append_args) return this.equals((append_args) that);
                 return false;
               }
           
               public boolean equals(append_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tappend = true && this.isSetTappend();
                 boolean that_present_tappend = true && that.isSetTappend();
                 if (this_present_tappend || that_present_tappend) {
          -        if (!(this_present_tappend && that_present_tappend))
          -          return false;
          -        if (!this.tappend.equals(that.tappend))
          -          return false;
          +        if (!(this_present_tappend && that_present_tappend)) return false;
          +        if (!this.tappend.equals(that.tappend)) return false;
                 }
           
                 return true;
          @@ -20936,12 +22981,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTappend()) ? 131071 : 524287);
          -      if (isSetTappend())
          -        hashCode = hashCode * 8191 + tappend.hashCode();
          +      if (isSetTappend()) hashCode = hashCode * 8191 + tappend.hashCode();
           
                 return hashCode;
               }
          @@ -20982,11 +23025,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -21017,10 +23062,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tappend == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tappend' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tappend' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tappend != null) {
          @@ -21030,35 +23077,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class append_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class append_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public append_argsStandardScheme getScheme() {
                   return new append_argsStandardScheme();
                 }
               }
           
          -    private static class append_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class append_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, append_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, append_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -21066,7 +23118,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_args struct)
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -21075,7 +23127,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_args struct)
                           struct.tappend = new TAppend();
                           struct.tappend.read(iprot);
                           struct.setTappendIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -21086,11 +23138,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_args struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, append_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, append_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -21110,24 +23164,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_args struct
           
               }
           
          -    private static class append_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class append_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public append_argsTupleScheme getScheme() {
                   return new append_argsTupleScheme();
                 }
               }
           
          -    private static class append_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class append_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, append_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, append_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   struct.tappend.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, append_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, append_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.tappend = new TAppend();
          @@ -21136,29 +23196,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_args struct)
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class append_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("append_result");
          +  public static class append_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("append_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new append_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new append_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new append_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new append_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TResult success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -21171,7 +23246,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -21182,12 +23257,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -21219,22 +23294,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TResult.class)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TResult.class)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(append_result.class,
          +        metaDataMap);
               }
           
               public append_result() {
               }
           
          -    public append_result(
          -      TResult success,
          -      TIOError io)
          -    {
          +    public append_result(TResult success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -21312,23 +23392,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((TResult)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((TResult) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -21336,60 +23417,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof append_result)
          -        return this.equals((append_result)that);
          +      if (that instanceof append_result) return this.equals((append_result) that);
                 return false;
               }
           
               public boolean equals(append_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -21400,12 +23477,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -21446,13 +23521,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -21488,35 +23565,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class append_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class append_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public append_resultStandardScheme getScheme() {
                   return new append_resultStandardScheme();
                 }
               }
           
          -    private static class append_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class append_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -21525,7 +23607,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struc
                           struct.success = new TResult();
                           struct.success.read(iprot);
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -21534,7 +23616,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struc
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -21545,11 +23627,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_result struc
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, append_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, append_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -21569,17 +23653,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_result stru
           
               }
           
          -    private static class append_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class append_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public append_resultTupleScheme getScheme() {
                   return new append_resultTupleScheme();
                 }
               }
           
          -    private static class append_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class append_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, append_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, append_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -21597,8 +23685,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_result struc
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, append_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, append_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = new TResult();
          @@ -21613,19 +23703,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_result struct
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class openScanner_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("openScanner_args");
          +  public static class openScanner_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("openScanner_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TSCAN_FIELD_DESC = new org.apache.thrift.protocol.TField("tscan", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TSCAN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tscan", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new openScanner_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new openScanner_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new openScanner_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new openScanner_argsTupleSchemeFactory();
           
               /**
                * the table to get the Scanner for
          @@ -21636,18 +23738,22 @@ public static class openScanner_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -21660,7 +23766,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TSCAN
          @@ -21671,12 +23777,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -21708,22 +23814,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TSCAN, new org.apache.thrift.meta_data.FieldMetaData("tscan", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TScan.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TSCAN,
          +        new org.apache.thrift.meta_data.FieldMetaData("tscan",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TScan.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(openScanner_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(openScanner_args.class,
          +        metaDataMap);
               }
           
               public openScanner_args() {
               }
           
          -    public openScanner_args(
          -      java.nio.ByteBuffer table,
          -      TScan tscan)
          -    {
          +    public openScanner_args(java.nio.ByteBuffer table, TScan tscan) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tscan = tscan;
          @@ -21767,11 +23878,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to get the Scanner for
                */
               public openScanner_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public openScanner_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public openScanner_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -21822,27 +23935,28 @@ public void setTscanIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TSCAN:
          -        if (value == null) {
          -          unsetTscan();
          -        } else {
          -          setTscan((TScan)value);
          -        }
          -        break;
          +        case TSCAN:
          +          if (value == null) {
          +            unsetTscan();
          +          } else {
          +            setTscan((TScan) value);
          +          }
          +          break;
           
                 }
               }
          @@ -21850,60 +23964,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TSCAN:
          -        return getTscan();
          +        case TSCAN:
          +          return getTscan();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TSCAN:
          -        return isSetTscan();
          +        case TABLE:
          +          return isSetTable();
          +        case TSCAN:
          +          return isSetTscan();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof openScanner_args)
          -        return this.equals((openScanner_args)that);
          +      if (that instanceof openScanner_args) return this.equals((openScanner_args) that);
                 return false;
               }
           
               public boolean equals(openScanner_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tscan = true && this.isSetTscan();
                 boolean that_present_tscan = true && that.isSetTscan();
                 if (this_present_tscan || that_present_tscan) {
          -        if (!(this_present_tscan && that_present_tscan))
          -          return false;
          -        if (!this.tscan.equals(that.tscan))
          -          return false;
          +        if (!(this_present_tscan && that_present_tscan)) return false;
          +        if (!this.tscan.equals(that.tscan)) return false;
                 }
           
                 return true;
          @@ -21914,12 +24024,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTscan()) ? 131071 : 524287);
          -      if (isSetTscan())
          -        hashCode = hashCode * 8191 + tscan.hashCode();
          +      if (isSetTscan()) hashCode = hashCode * 8191 + tscan.hashCode();
           
                 return hashCode;
               }
          @@ -21960,11 +24068,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -21995,10 +24105,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tscan == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tscan' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tscan' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tscan != null) {
          @@ -22008,35 +24120,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class openScanner_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class openScanner_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public openScanner_argsStandardScheme getScheme() {
                   return new openScanner_argsStandardScheme();
                 }
               }
           
          -    private static class openScanner_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class openScanner_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, openScanner_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, openScanner_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -22044,7 +24161,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, openScanner_args st
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -22053,7 +24170,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, openScanner_args st
                           struct.tscan = new TScan();
                           struct.tscan.read(iprot);
                           struct.setTscanIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -22064,11 +24181,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, openScanner_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, openScanner_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, openScanner_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -22088,24 +24207,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, openScanner_args s
           
               }
           
          -    private static class openScanner_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class openScanner_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public openScanner_argsTupleScheme getScheme() {
                   return new openScanner_argsTupleScheme();
                 }
               }
           
          -    private static class openScanner_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class openScanner_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, openScanner_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, openScanner_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   struct.tscan.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, openScanner_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, openScanner_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.tscan = new TScan();
          @@ -22114,29 +24239,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, openScanner_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class openScanner_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("openScanner_result");
          +  public static class openScanner_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("openScanner_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new openScanner_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new openScanner_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new openScanner_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new openScanner_resultTupleSchemeFactory();
           
               public int success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -22149,7 +24289,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -22160,12 +24300,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -22199,22 +24339,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I32)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(openScanner_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(openScanner_result.class,
          +        metaDataMap);
               }
           
               public openScanner_result() {
               }
           
          -    public openScanner_result(
          -      int success,
          -      TIOError io)
          -    {
          +    public openScanner_result(int success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -22254,7 +24399,8 @@ public openScanner_result setSuccess(int success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -22263,7 +24409,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -22291,23 +24438,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Integer)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Integer) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -22315,60 +24463,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof openScanner_result)
          -        return this.equals((openScanner_result)that);
          +      if (that instanceof openScanner_result) return this.equals((openScanner_result) that);
                 return false;
               }
           
               public boolean equals(openScanner_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -22381,8 +24525,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + success;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -22423,13 +24566,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -22458,37 +24603,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class openScanner_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class openScanner_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public openScanner_resultStandardScheme getScheme() {
                   return new openScanner_resultStandardScheme();
                 }
               }
           
          -    private static class openScanner_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class openScanner_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, openScanner_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, openScanner_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -22496,7 +24647,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, openScanner_result
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.success = iprot.readI32();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -22505,7 +24656,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, openScanner_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -22516,11 +24667,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, openScanner_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, openScanner_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, openScanner_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -22540,17 +24693,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, openScanner_result
           
               }
           
          -    private static class openScanner_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class openScanner_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public openScanner_resultTupleScheme getScheme() {
                   return new openScanner_resultTupleScheme();
                 }
               }
           
          -    private static class openScanner_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class openScanner_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, openScanner_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, openScanner_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -22568,8 +24725,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, openScanner_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, openScanner_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, openScanner_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readI32();
          @@ -22583,22 +24742,35 @@ public void read(org.apache.thrift.protocol.TProtocol prot, openScanner_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getScannerRows_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getScannerRows_args");
          +  public static class getScannerRows_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getScannerRows_args");
           
          -    private static final org.apache.thrift.protocol.TField SCANNER_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("scannerId", org.apache.thrift.protocol.TType.I32, (short)1);
          -    private static final org.apache.thrift.protocol.TField NUM_ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("numRows", org.apache.thrift.protocol.TType.I32, (short)2);
          +    private static final org.apache.thrift.protocol.TField SCANNER_ID_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("scannerId", org.apache.thrift.protocol.TType.I32,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField NUM_ROWS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("numRows", org.apache.thrift.protocol.TType.I32,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getScannerRows_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getScannerRows_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getScannerRows_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getScannerRows_argsTupleSchemeFactory();
           
               /**
          -     * the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
          +     * the Id of the Scanner to return rows from. This is an Id returned from the openScanner
          +     * function.
                */
               public int scannerId; // required
               /**
          @@ -22606,18 +24778,23 @@ public static class getScannerRows_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -22630,7 +24807,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // SCANNER_ID
                       return SCANNER_ID;
                     case 2: // NUM_ROWS
          @@ -22641,12 +24818,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -22681,13 +24858,21 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SCANNER_ID, new org.apache.thrift.meta_data.FieldMetaData("scannerId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -      tmpMap.put(_Fields.NUM_ROWS, new org.apache.thrift.meta_data.FieldMetaData("numRows", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SCANNER_ID,
          +        new org.apache.thrift.meta_data.FieldMetaData("scannerId",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I32)));
          +      tmpMap.put(_Fields.NUM_ROWS,
          +        new org.apache.thrift.meta_data.FieldMetaData("numRows",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I32)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getScannerRows_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getScannerRows_args.class,
          +        metaDataMap);
               }
           
               public getScannerRows_args() {
          @@ -22695,10 +24880,7 @@ public getScannerRows_args() {
           
               }
           
          -    public getScannerRows_args(
          -      int scannerId,
          -      int numRows)
          -    {
          +    public getScannerRows_args(int scannerId, int numRows) {
                 this();
                 this.scannerId = scannerId;
                 setScannerIdIsSet(true);
          @@ -22728,14 +24910,16 @@ public void clear() {
               }
           
               /**
          -     * the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
          +     * the Id of the Scanner to return rows from. This is an Id returned from the openScanner
          +     * function.
                */
               public int getScannerId() {
                 return this.scannerId;
               }
           
               /**
          -     * the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
          +     * the Id of the Scanner to return rows from. This is an Id returned from the openScanner
          +     * function.
                */
               public getScannerRows_args setScannerId(int scannerId) {
                 this.scannerId = scannerId;
          @@ -22744,7 +24928,8 @@ public getScannerRows_args setScannerId(int scannerId) {
               }
           
               public void unsetScannerId() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SCANNERID_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SCANNERID_ISSET_ID);
               }
           
               /** Returns true if field scannerId is set (has been assigned a value) and false otherwise */
          @@ -22753,7 +24938,8 @@ public boolean isSetScannerId() {
               }
           
               public void setScannerIdIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SCANNERID_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SCANNERID_ISSET_ID, value);
               }
           
               /**
          @@ -22773,7 +24959,8 @@ public getScannerRows_args setNumRows(int numRows) {
               }
           
               public void unsetNumRows() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUMROWS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUMROWS_ISSET_ID);
               }
           
               /** Returns true if field numRows is set (has been assigned a value) and false otherwise */
          @@ -22782,26 +24969,28 @@ public boolean isSetNumRows() {
               }
           
               public void setNumRowsIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUMROWS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUMROWS_ISSET_ID, value);
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SCANNER_ID:
          -        if (value == null) {
          -          unsetScannerId();
          -        } else {
          -          setScannerId((java.lang.Integer)value);
          -        }
          -        break;
          +        case SCANNER_ID:
          +          if (value == null) {
          +            unsetScannerId();
          +          } else {
          +            setScannerId((java.lang.Integer) value);
          +          }
          +          break;
           
          -      case NUM_ROWS:
          -        if (value == null) {
          -          unsetNumRows();
          -        } else {
          -          setNumRows((java.lang.Integer)value);
          -        }
          -        break;
          +        case NUM_ROWS:
          +          if (value == null) {
          +            unsetNumRows();
          +          } else {
          +            setNumRows((java.lang.Integer) value);
          +          }
          +          break;
           
                 }
               }
          @@ -22809,60 +24998,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SCANNER_ID:
          -        return getScannerId();
          +        case SCANNER_ID:
          +          return getScannerId();
           
          -      case NUM_ROWS:
          -        return getNumRows();
          +        case NUM_ROWS:
          +          return getNumRows();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SCANNER_ID:
          -        return isSetScannerId();
          -      case NUM_ROWS:
          -        return isSetNumRows();
          +        case SCANNER_ID:
          +          return isSetScannerId();
          +        case NUM_ROWS:
          +          return isSetNumRows();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getScannerRows_args)
          -        return this.equals((getScannerRows_args)that);
          +      if (that instanceof getScannerRows_args) return this.equals((getScannerRows_args) that);
                 return false;
               }
           
               public boolean equals(getScannerRows_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_scannerId = true;
                 boolean that_present_scannerId = true;
                 if (this_present_scannerId || that_present_scannerId) {
          -        if (!(this_present_scannerId && that_present_scannerId))
          -          return false;
          -        if (this.scannerId != that.scannerId)
          -          return false;
          +        if (!(this_present_scannerId && that_present_scannerId)) return false;
          +        if (this.scannerId != that.scannerId) return false;
                 }
           
                 boolean this_present_numRows = true;
                 boolean that_present_numRows = true;
                 if (this_present_numRows || that_present_numRows) {
          -        if (!(this_present_numRows && that_present_numRows))
          -          return false;
          -        if (this.numRows != that.numRows)
          -          return false;
          +        if (!(this_present_numRows && that_present_numRows)) return false;
          +        if (this.numRows != that.numRows) return false;
                 }
           
                 return true;
          @@ -22915,11 +25100,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -22941,43 +25128,50 @@ public java.lang.String toString() {
           
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
          -      // alas, we cannot check 'scannerId' because it's a primitive and you chose the non-beans generator.
          +      // alas, we cannot check 'scannerId' because it's a primitive and you chose the non-beans
          +      // generator.
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getScannerRows_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getScannerRows_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getScannerRows_argsStandardScheme getScheme() {
                   return new getScannerRows_argsStandardScheme();
                 }
               }
           
          -    private static class getScannerRows_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getScannerRows_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -22985,7 +25179,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.scannerId = iprot.readI32();
                           struct.setScannerIdIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -22993,7 +25187,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.numRows = iprot.readI32();
                           struct.setNumRowsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -23004,14 +25198,17 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   if (!struct.isSetScannerId()) {
          -          throw new org.apache.thrift.protocol.TProtocolException("Required field 'scannerId' was not found in serialized data! Struct: " + toString());
          +          throw new org.apache.thrift.protocol.TProtocolException(
          +              "Required field 'scannerId' was not found in serialized data! Struct: " + toString());
                   }
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerRows_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerRows_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -23027,17 +25224,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerRows_arg
           
               }
           
          -    private static class getScannerRows_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getScannerRows_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getScannerRows_argsTupleScheme getScheme() {
                   return new getScannerRows_argsTupleScheme();
                 }
               }
           
          -    private static class getScannerRows_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getScannerRows_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getScannerRows_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getScannerRows_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeI32(struct.scannerId);
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetNumRows()) {
          @@ -23050,8 +25251,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getScannerRows_args
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getScannerRows_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getScannerRows_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.scannerId = iprot.readI32();
                   struct.setScannerIdIsSet(true);
                   java.util.BitSet incoming = iprot.readBitSet(1);
          @@ -23062,20 +25265,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getScannerRows_args
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getScannerRows_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getScannerRows_result");
          -
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getScannerRows_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getScannerRows_resultTupleSchemeFactory();
          +  public static class getScannerRows_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getScannerRows_result");
          +
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getScannerRows_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getScannerRows_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
          @@ -23084,16 +25301,19 @@ public static class getScannerRows_result implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -23106,7 +25326,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -23119,12 +25339,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -23156,26 +25376,34 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIllegalArgument.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIllegalArgument.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getScannerRows_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getScannerRows_result.class,
          +        metaDataMap);
               }
           
               public getScannerRows_result() {
               }
           
          -    public getScannerRows_result(
          -      java.util.List success,
          -      TIOError io,
          -      TIllegalArgument ia)
          -    {
          +    public getScannerRows_result(java.util.List success, TIOError io,
          +        TIllegalArgument ia) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -23187,7 +25415,8 @@ public getScannerRows_result(
                */
               public getScannerRows_result(getScannerRows_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TResult other_element : other.success) {
                     __this__success.add(new TResult(other_element));
                   }
          @@ -23233,7 +25462,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getScannerRows_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getScannerRows_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -23309,31 +25539,32 @@ public void setIaIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((TIllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((TIllegalArgument) value);
          +          }
          +          break;
           
                 }
               }
          @@ -23341,74 +25572,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getScannerRows_result)
          -        return this.equals((getScannerRows_result)that);
          +      if (that instanceof getScannerRows_result) return this.equals((getScannerRows_result) that);
                 return false;
               }
           
               public boolean equals(getScannerRows_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 return true;
          @@ -23419,16 +25644,13 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 return hashCode;
               }
          @@ -23479,13 +25701,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -23526,35 +25750,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getScannerRows_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getScannerRows_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getScannerRows_resultStandardScheme getScheme() {
                   return new getScannerRows_resultStandardScheme();
                 }
               }
           
          -    private static class getScannerRows_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getScannerRows_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -23563,9 +25792,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_resu
                           {
                             org.apache.thrift.protocol.TList _list246 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list246.size);
          -                  @org.apache.thrift.annotation.Nullable TResult _elem247;
          -                  for (int _i248 = 0; _i248 < _list246.size; ++_i248)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TResult _elem247;
          +                  for (int _i248 = 0; _i248 < _list246.size; ++_i248) {
                               _elem247 = new TResult();
                               _elem247.read(iprot);
                               struct.success.add(_elem247);
          @@ -23573,7 +25802,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_resu
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -23582,7 +25811,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_resu
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -23591,7 +25820,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_resu
                           struct.ia = new TIllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -23602,20 +25831,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerRows_resu
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerRows_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerRows_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TResult _iter249 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TResult _iter249 : struct.success) {
                         _iter249.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -23638,17 +25869,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerRows_res
           
               }
           
          -    private static class getScannerRows_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getScannerRows_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getScannerRows_resultTupleScheme getScheme() {
                   return new getScannerRows_resultTupleScheme();
                 }
               }
           
          -    private static class getScannerRows_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getScannerRows_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getScannerRows_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getScannerRows_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -23663,8 +25898,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getScannerRows_resu
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TResult _iter250 : struct.success)
          -            {
          +            for (TResult _iter250 : struct.success) {
                         _iter250.write(oprot);
                       }
                     }
          @@ -23678,16 +25912,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getScannerRows_resu
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getScannerRows_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getScannerRows_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(3);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list251 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list251 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list251.size);
          -            @org.apache.thrift.annotation.Nullable TResult _elem252;
          -            for (int _i253 = 0; _i253 < _list251.size; ++_i253)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TResult _elem252;
          +            for (int _i253 = 0; _i253 < _list251.size; ++_i253) {
                         _elem252 = new TResult();
                         _elem252.read(iprot);
                         struct.success.add(_elem252);
          @@ -23708,32 +25945,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getScannerRows_resul
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class closeScanner_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("closeScanner_args");
          +  public static class closeScanner_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("closeScanner_args");
           
          -    private static final org.apache.thrift.protocol.TField SCANNER_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("scannerId", org.apache.thrift.protocol.TType.I32, (short)1);
          +    private static final org.apache.thrift.protocol.TField SCANNER_ID_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("scannerId", org.apache.thrift.protocol.TType.I32,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new closeScanner_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new closeScanner_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new closeScanner_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new closeScanner_argsTupleSchemeFactory();
           
               /**
                * the Id of the Scanner to close *
                */
               public int scannerId; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the Id of the Scanner to close *
                  */
          -      SCANNER_ID((short)1, "scannerId");
          +      SCANNER_ID((short) 1, "scannerId");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -23746,7 +25997,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // SCANNER_ID
                       return SCANNER_ID;
                     default:
          @@ -23755,12 +26006,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -23794,19 +26045,22 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SCANNER_ID, new org.apache.thrift.meta_data.FieldMetaData("scannerId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SCANNER_ID,
          +        new org.apache.thrift.meta_data.FieldMetaData("scannerId",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I32)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(closeScanner_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(closeScanner_args.class,
          +        metaDataMap);
               }
           
               public closeScanner_args() {
               }
           
          -    public closeScanner_args(
          -      int scannerId)
          -    {
          +    public closeScanner_args(int scannerId) {
                 this();
                 this.scannerId = scannerId;
                 setScannerIdIsSet(true);
          @@ -23847,7 +26101,8 @@ public closeScanner_args setScannerId(int scannerId) {
               }
           
               public void unsetScannerId() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SCANNERID_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SCANNERID_ISSET_ID);
               }
           
               /** Returns true if field scannerId is set (has been assigned a value) and false otherwise */
          @@ -23856,18 +26111,20 @@ public boolean isSetScannerId() {
               }
           
               public void setScannerIdIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SCANNERID_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SCANNERID_ISSET_ID, value);
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SCANNER_ID:
          -        if (value == null) {
          -          unsetScannerId();
          -        } else {
          -          setScannerId((java.lang.Integer)value);
          -        }
          -        break;
          +        case SCANNER_ID:
          +          if (value == null) {
          +            unsetScannerId();
          +          } else {
          +            setScannerId((java.lang.Integer) value);
          +          }
          +          break;
           
                 }
               }
          @@ -23875,46 +26132,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SCANNER_ID:
          -        return getScannerId();
          +        case SCANNER_ID:
          +          return getScannerId();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SCANNER_ID:
          -        return isSetScannerId();
          +        case SCANNER_ID:
          +          return isSetScannerId();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof closeScanner_args)
          -        return this.equals((closeScanner_args)that);
          +      if (that instanceof closeScanner_args) return this.equals((closeScanner_args) that);
                 return false;
               }
           
               public boolean equals(closeScanner_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_scannerId = true;
                 boolean that_present_scannerId = true;
                 if (this_present_scannerId || that_present_scannerId) {
          -        if (!(this_present_scannerId && that_present_scannerId))
          -          return false;
          -        if (this.scannerId != that.scannerId)
          -          return false;
          +        if (!(this_present_scannerId && that_present_scannerId)) return false;
          +        if (this.scannerId != that.scannerId) return false;
                 }
           
                 return true;
          @@ -23955,11 +26210,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -23977,43 +26234,50 @@ public java.lang.String toString() {
           
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
          -      // alas, we cannot check 'scannerId' because it's a primitive and you chose the non-beans generator.
          +      // alas, we cannot check 'scannerId' because it's a primitive and you chose the non-beans
          +      // generator.
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class closeScanner_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class closeScanner_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public closeScanner_argsStandardScheme getScheme() {
                   return new closeScanner_argsStandardScheme();
                 }
               }
           
          -    private static class closeScanner_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class closeScanner_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, closeScanner_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, closeScanner_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -24021,7 +26285,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, closeScanner_args s
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.scannerId = iprot.readI32();
                           struct.setScannerIdIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -24032,14 +26296,17 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, closeScanner_args s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   if (!struct.isSetScannerId()) {
          -          throw new org.apache.thrift.protocol.TProtocolException("Required field 'scannerId' was not found in serialized data! Struct: " + toString());
          +          throw new org.apache.thrift.protocol.TProtocolException(
          +              "Required field 'scannerId' was not found in serialized data! Struct: " + toString());
                   }
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, closeScanner_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, closeScanner_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -24052,41 +26319,59 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, closeScanner_args
           
               }
           
          -    private static class closeScanner_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class closeScanner_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public closeScanner_argsTupleScheme getScheme() {
                   return new closeScanner_argsTupleScheme();
                 }
               }
           
          -    private static class closeScanner_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class closeScanner_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, closeScanner_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, closeScanner_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeI32(struct.scannerId);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, closeScanner_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, closeScanner_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.scannerId = iprot.readI32();
                   struct.setScannerIdIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class closeScanner_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("closeScanner_result");
          +  public static class closeScanner_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("closeScanner_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC = new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField IA_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("ia", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new closeScanner_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new closeScanner_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new closeScanner_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new closeScanner_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
               /**
          @@ -24094,15 +26379,19 @@ public static class closeScanner_result implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -24115,7 +26404,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     case 2: // IA
          @@ -24126,12 +26415,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -24163,22 +26452,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          -      tmpMap.put(_Fields.IA, new org.apache.thrift.meta_data.FieldMetaData("ia", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIllegalArgument.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
          +      tmpMap.put(_Fields.IA,
          +        new org.apache.thrift.meta_data.FieldMetaData("ia",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIllegalArgument.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(closeScanner_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(closeScanner_result.class,
          +        metaDataMap);
               }
           
               public closeScanner_result() {
               }
           
          -    public closeScanner_result(
          -      TIOError io,
          -      TIllegalArgument ia)
          -    {
          +    public closeScanner_result(TIOError io, TIllegalArgument ia) {
                 this();
                 this.io = io;
                 this.ia = ia;
          @@ -24262,23 +26556,24 @@ public void setIaIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
          -      case IA:
          -        if (value == null) {
          -          unsetIa();
          -        } else {
          -          setIa((TIllegalArgument)value);
          -        }
          -        break;
          +        case IA:
          +          if (value == null) {
          +            unsetIa();
          +          } else {
          +            setIa((TIllegalArgument) value);
          +          }
          +          break;
           
                 }
               }
          @@ -24286,60 +26581,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
          -      case IA:
          -        return getIa();
          +        case IA:
          +          return getIa();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          -      case IA:
          -        return isSetIa();
          +        case IO:
          +          return isSetIo();
          +        case IA:
          +          return isSetIa();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof closeScanner_result)
          -        return this.equals((closeScanner_result)that);
          +      if (that instanceof closeScanner_result) return this.equals((closeScanner_result) that);
                 return false;
               }
           
               public boolean equals(closeScanner_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 boolean this_present_ia = true && this.isSetIa();
                 boolean that_present_ia = true && that.isSetIa();
                 if (this_present_ia || that_present_ia) {
          -        if (!(this_present_ia && that_present_ia))
          -          return false;
          -        if (!this.ia.equals(that.ia))
          -          return false;
          +        if (!(this_present_ia && that_present_ia)) return false;
          +        if (!this.ia.equals(that.ia)) return false;
                 }
           
                 return true;
          @@ -24350,12 +26641,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIa()) ? 131071 : 524287);
          -      if (isSetIa())
          -        hashCode = hashCode * 8191 + ia.hashCode();
          +      if (isSetIa()) hashCode = hashCode * 8191 + ia.hashCode();
           
                 return hashCode;
               }
          @@ -24396,13 +26685,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -24435,35 +26726,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class closeScanner_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class closeScanner_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public closeScanner_resultStandardScheme getScheme() {
                   return new closeScanner_resultStandardScheme();
                 }
               }
           
          -    private static class closeScanner_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class closeScanner_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, closeScanner_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, closeScanner_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -24472,7 +26768,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, closeScanner_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -24481,7 +26777,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, closeScanner_result
                           struct.ia = new TIllegalArgument();
                           struct.ia.read(iprot);
                           struct.setIaIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -24492,11 +26788,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, closeScanner_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, closeScanner_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, closeScanner_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -24516,17 +26814,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, closeScanner_resul
           
               }
           
          -    private static class closeScanner_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class closeScanner_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public closeScanner_resultTupleScheme getScheme() {
                   return new closeScanner_resultTupleScheme();
                 }
               }
           
          -    private static class closeScanner_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class closeScanner_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, closeScanner_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, closeScanner_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -24544,8 +26846,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, closeScanner_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, closeScanner_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, closeScanner_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -24560,19 +26864,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, closeScanner_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class mutateRow_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("mutateRow_args");
          +  public static class mutateRow_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("mutateRow_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TROW_MUTATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("trowMutations", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TROW_MUTATIONS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("trowMutations",
          +            org.apache.thrift.protocol.TType.STRUCT, (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new mutateRow_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new mutateRow_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new mutateRow_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new mutateRow_argsTupleSchemeFactory();
           
               /**
                * table to apply the mutations
          @@ -24583,18 +26899,22 @@ public static class mutateRow_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -24607,7 +26927,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TROW_MUTATIONS
          @@ -24618,12 +26938,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -24655,22 +26975,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TROW_MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("trowMutations", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowMutations.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TROW_MUTATIONS,
          +        new org.apache.thrift.meta_data.FieldMetaData("trowMutations",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TRowMutations.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRow_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRow_args.class,
          +        metaDataMap);
               }
           
               public mutateRow_args() {
               }
           
          -    public mutateRow_args(
          -      java.nio.ByteBuffer table,
          -      TRowMutations trowMutations)
          -    {
          +    public mutateRow_args(java.nio.ByteBuffer table, TRowMutations trowMutations) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.trowMutations = trowMutations;
          @@ -24714,11 +27039,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * table to apply the mutations
                */
               public mutateRow_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public mutateRow_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public mutateRow_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -24749,7 +27076,8 @@ public TRowMutations getTrowMutations() {
               /**
                * mutations to apply
                */
          -    public mutateRow_args setTrowMutations(@org.apache.thrift.annotation.Nullable TRowMutations trowMutations) {
          +    public mutateRow_args
          +        setTrowMutations(@org.apache.thrift.annotation.Nullable TRowMutations trowMutations) {
                 this.trowMutations = trowMutations;
                 return this;
               }
          @@ -24758,7 +27086,9 @@ public void unsetTrowMutations() {
                 this.trowMutations = null;
               }
           
          -    /** Returns true if field trowMutations is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field trowMutations is set (has been assigned a value) and false otherwise
          +     */
               public boolean isSetTrowMutations() {
                 return this.trowMutations != null;
               }
          @@ -24769,27 +27099,28 @@ public void setTrowMutationsIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TROW_MUTATIONS:
          -        if (value == null) {
          -          unsetTrowMutations();
          -        } else {
          -          setTrowMutations((TRowMutations)value);
          -        }
          -        break;
          +        case TROW_MUTATIONS:
          +          if (value == null) {
          +            unsetTrowMutations();
          +          } else {
          +            setTrowMutations((TRowMutations) value);
          +          }
          +          break;
           
                 }
               }
          @@ -24797,60 +27128,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TROW_MUTATIONS:
          -        return getTrowMutations();
          +        case TROW_MUTATIONS:
          +          return getTrowMutations();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TROW_MUTATIONS:
          -        return isSetTrowMutations();
          +        case TABLE:
          +          return isSetTable();
          +        case TROW_MUTATIONS:
          +          return isSetTrowMutations();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof mutateRow_args)
          -        return this.equals((mutateRow_args)that);
          +      if (that instanceof mutateRow_args) return this.equals((mutateRow_args) that);
                 return false;
               }
           
               public boolean equals(mutateRow_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_trowMutations = true && this.isSetTrowMutations();
                 boolean that_present_trowMutations = true && that.isSetTrowMutations();
                 if (this_present_trowMutations || that_present_trowMutations) {
          -        if (!(this_present_trowMutations && that_present_trowMutations))
          -          return false;
          -        if (!this.trowMutations.equals(that.trowMutations))
          -          return false;
          +        if (!(this_present_trowMutations && that_present_trowMutations)) return false;
          +        if (!this.trowMutations.equals(that.trowMutations)) return false;
                 }
           
                 return true;
          @@ -24861,12 +27188,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTrowMutations()) ? 131071 : 524287);
          -      if (isSetTrowMutations())
          -        hashCode = hashCode * 8191 + trowMutations.hashCode();
          +      if (isSetTrowMutations()) hashCode = hashCode * 8191 + trowMutations.hashCode();
           
                 return hashCode;
               }
          @@ -24894,7 +27219,8 @@ public int compareTo(mutateRow_args other) {
                   return lastComparison;
                 }
                 if (isSetTrowMutations()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.trowMutations, other.trowMutations);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.trowMutations, other.trowMutations);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -24907,11 +27233,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -24942,10 +27270,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (trowMutations == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'trowMutations' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'trowMutations' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (trowMutations != null) {
          @@ -24955,35 +27285,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class mutateRow_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRow_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRow_argsStandardScheme getScheme() {
                   return new mutateRow_argsStandardScheme();
                 }
               }
           
          -    private static class mutateRow_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class mutateRow_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -24991,7 +27326,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -25000,7 +27335,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru
                           struct.trowMutations = new TRowMutations();
                           struct.trowMutations.read(iprot);
                           struct.setTrowMutationsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -25011,11 +27346,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_args stru
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -25035,24 +27372,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_args str
           
               }
           
          -    private static class mutateRow_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRow_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRow_argsTupleScheme getScheme() {
                   return new mutateRow_argsTupleScheme();
                 }
               }
           
          -    private static class mutateRow_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class mutateRow_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   struct.trowMutations.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.trowMutations = new TRowMutations();
          @@ -25061,26 +27404,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_args struc
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class mutateRow_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("mutateRow_result");
          +  public static class mutateRow_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("mutateRow_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new mutateRow_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new mutateRow_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new mutateRow_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new mutateRow_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -25093,7 +27450,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -25102,12 +27459,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -25139,19 +27496,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRow_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(mutateRow_result.class,
          +        metaDataMap);
               }
           
               public mutateRow_result() {
               }
           
          -    public mutateRow_result(
          -      TIOError io)
          -    {
          +    public mutateRow_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -25199,15 +27559,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -25215,46 +27576,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof mutateRow_result)
          -        return this.equals((mutateRow_result)that);
          +      if (that instanceof mutateRow_result) return this.equals((mutateRow_result) that);
                 return false;
               }
           
               public boolean equals(mutateRow_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -25265,8 +27624,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -25297,13 +27655,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -25328,35 +27688,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class mutateRow_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRow_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRow_resultStandardScheme getScheme() {
                   return new mutateRow_resultStandardScheme();
                 }
               }
           
          -    private static class mutateRow_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class mutateRow_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -25365,7 +27730,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_result st
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -25376,11 +27741,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, mutateRow_result st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -25395,17 +27762,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, mutateRow_result s
           
               }
           
          -    private static class mutateRow_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class mutateRow_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public mutateRow_resultTupleScheme getScheme() {
                   return new mutateRow_resultTupleScheme();
                 }
               }
           
          -    private static class mutateRow_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class mutateRow_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -25417,8 +27788,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, mutateRow_result st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -25428,20 +27801,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, mutateRow_result str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getScannerResults_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getScannerResults_args");
          +  public static class getScannerResults_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getScannerResults_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField TSCAN_FIELD_DESC = new org.apache.thrift.protocol.TField("tscan", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          -    private static final org.apache.thrift.protocol.TField NUM_ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("numRows", org.apache.thrift.protocol.TType.I32, (short)3);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField TSCAN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tscan", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField NUM_ROWS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("numRows", org.apache.thrift.protocol.TType.I32,
          +            (short) 3);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getScannerResults_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getScannerResults_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getScannerResults_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getScannerResults_argsTupleSchemeFactory();
           
               /**
                * the table to get the Scanner for
          @@ -25456,22 +27843,26 @@ public static class getScannerResults_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -25484,7 +27875,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // TSCAN
          @@ -25497,12 +27888,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -25536,15 +27927,26 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.TSCAN, new org.apache.thrift.meta_data.FieldMetaData("tscan", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TScan.class)));
          -      tmpMap.put(_Fields.NUM_ROWS, new org.apache.thrift.meta_data.FieldMetaData("numRows", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.TSCAN,
          +        new org.apache.thrift.meta_data.FieldMetaData("tscan",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TScan.class)));
          +      tmpMap.put(_Fields.NUM_ROWS,
          +        new org.apache.thrift.meta_data.FieldMetaData("numRows",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.I32)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getScannerResults_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getScannerResults_args.class,
          +        metaDataMap);
               }
           
               public getScannerResults_args() {
          @@ -25552,11 +27954,7 @@ public getScannerResults_args() {
           
               }
           
          -    public getScannerResults_args(
          -      java.nio.ByteBuffer table,
          -      TScan tscan,
          -      int numRows)
          -    {
          +    public getScannerResults_args(java.nio.ByteBuffer table, TScan tscan, int numRows) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.tscan = tscan;
          @@ -25606,11 +28004,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * the table to get the Scanner for
                */
               public getScannerResults_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public getScannerResults_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public getScannerResults_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -25678,7 +28078,8 @@ public getScannerResults_args setNumRows(int numRows) {
               }
           
               public void unsetNumRows() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUMROWS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __NUMROWS_ISSET_ID);
               }
           
               /** Returns true if field numRows is set (has been assigned a value) and false otherwise */
          @@ -25687,38 +28088,40 @@ public boolean isSetNumRows() {
               }
           
               public void setNumRowsIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUMROWS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NUMROWS_ISSET_ID, value);
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case TSCAN:
          -        if (value == null) {
          -          unsetTscan();
          -        } else {
          -          setTscan((TScan)value);
          -        }
          -        break;
          +        case TSCAN:
          +          if (value == null) {
          +            unsetTscan();
          +          } else {
          +            setTscan((TScan) value);
          +          }
          +          break;
           
          -      case NUM_ROWS:
          -        if (value == null) {
          -          unsetNumRows();
          -        } else {
          -          setNumRows((java.lang.Integer)value);
          -        }
          -        break;
          +        case NUM_ROWS:
          +          if (value == null) {
          +            unsetNumRows();
          +          } else {
          +            setNumRows((java.lang.Integer) value);
          +          }
          +          break;
           
                 }
               }
          @@ -25726,74 +28129,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case TSCAN:
          -        return getTscan();
          +        case TSCAN:
          +          return getTscan();
           
          -      case NUM_ROWS:
          -        return getNumRows();
          +        case NUM_ROWS:
          +          return getNumRows();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case TSCAN:
          -        return isSetTscan();
          -      case NUM_ROWS:
          -        return isSetNumRows();
          +        case TABLE:
          +          return isSetTable();
          +        case TSCAN:
          +          return isSetTscan();
          +        case NUM_ROWS:
          +          return isSetNumRows();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getScannerResults_args)
          -        return this.equals((getScannerResults_args)that);
          +      if (that instanceof getScannerResults_args) return this.equals((getScannerResults_args) that);
                 return false;
               }
           
               public boolean equals(getScannerResults_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_tscan = true && this.isSetTscan();
                 boolean that_present_tscan = true && that.isSetTscan();
                 if (this_present_tscan || that_present_tscan) {
          -        if (!(this_present_tscan && that_present_tscan))
          -          return false;
          -        if (!this.tscan.equals(that.tscan))
          -          return false;
          +        if (!(this_present_tscan && that_present_tscan)) return false;
          +        if (!this.tscan.equals(that.tscan)) return false;
                 }
           
                 boolean this_present_numRows = true;
                 boolean that_present_numRows = true;
                 if (this_present_numRows || that_present_numRows) {
          -        if (!(this_present_numRows && that_present_numRows))
          -          return false;
          -        if (this.numRows != that.numRows)
          -          return false;
          +        if (!(this_present_numRows && that_present_numRows)) return false;
          +        if (this.numRows != that.numRows) return false;
                 }
           
                 return true;
          @@ -25804,12 +28201,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetTscan()) ? 131071 : 524287);
          -      if (isSetTscan())
          -        hashCode = hashCode * 8191 + tscan.hashCode();
          +      if (isSetTscan()) hashCode = hashCode * 8191 + tscan.hashCode();
           
                 hashCode = hashCode * 8191 + numRows;
           
          @@ -25862,11 +28257,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -25901,10 +28298,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (tscan == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tscan' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tscan' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tscan != null) {
          @@ -25914,37 +28313,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getScannerResults_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getScannerResults_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getScannerResults_argsStandardScheme getScheme() {
                   return new getScannerResults_argsStandardScheme();
                 }
               }
           
          -    private static class getScannerResults_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getScannerResults_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -25952,7 +28357,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_a
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -25961,7 +28366,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_a
                           struct.tscan = new TScan();
                           struct.tscan.read(iprot);
                           struct.setTscanIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -25969,7 +28374,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_a
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                           struct.numRows = iprot.readI32();
                           struct.setNumRowsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -25980,11 +28385,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_a
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerResults_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerResults_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -26007,17 +28414,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerResults_
           
               }
           
          -    private static class getScannerResults_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getScannerResults_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getScannerResults_argsTupleScheme getScheme() {
                   return new getScannerResults_argsTupleScheme();
                 }
               }
           
          -    private static class getScannerResults_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getScannerResults_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getScannerResults_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getScannerResults_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   struct.tscan.write(oprot);
                   java.util.BitSet optionals = new java.util.BitSet();
          @@ -26031,8 +28442,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getScannerResults_a
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getScannerResults_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getScannerResults_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.tscan = new TScan();
          @@ -26046,29 +28459,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getScannerResults_ar
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getScannerResults_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getScannerResults_result");
          +  public static class getScannerResults_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getScannerResults_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getScannerResults_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getScannerResults_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getScannerResults_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getScannerResults_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -26081,7 +28509,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -26092,12 +28520,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -26129,23 +28557,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TResult.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TResult.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getScannerResults_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getScannerResults_result.class,
          +        metaDataMap);
               }
           
               public getScannerResults_result() {
               }
           
          -    public getScannerResults_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public getScannerResults_result(java.util.List success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -26156,7 +28589,8 @@ public getScannerResults_result(
                */
               public getScannerResults_result(getScannerResults_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TResult other_element : other.success) {
                     __this__success.add(new TResult(other_element));
                   }
          @@ -26198,7 +28632,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getScannerResults_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getScannerResults_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -26243,23 +28678,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -26267,27 +28703,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -26295,32 +28734,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getScannerResults_result)
          -        return this.equals((getScannerResults_result)that);
          +        return this.equals((getScannerResults_result) that);
                 return false;
               }
           
               public boolean equals(getScannerResults_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -26331,12 +28764,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -26377,13 +28808,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -26416,35 +28849,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getScannerResults_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getScannerResults_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getScannerResults_resultStandardScheme getScheme() {
                   return new getScannerResults_resultStandardScheme();
                 }
               }
           
          -    private static class getScannerResults_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getScannerResults_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -26453,9 +28891,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_r
                           {
                             org.apache.thrift.protocol.TList _list254 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list254.size);
          -                  @org.apache.thrift.annotation.Nullable TResult _elem255;
          -                  for (int _i256 = 0; _i256 < _list254.size; ++_i256)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TResult _elem255;
          +                  for (int _i256 = 0; _i256 < _list254.size; ++_i256) {
                               _elem255 = new TResult();
                               _elem255.read(iprot);
                               struct.success.add(_elem255);
          @@ -26463,7 +28901,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_r
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -26472,7 +28910,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_r
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -26483,20 +28921,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getScannerResults_r
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerResults_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerResults_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TResult _iter257 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TResult _iter257 : struct.success) {
                         _iter257.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -26514,17 +28954,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getScannerResults_
           
               }
           
          -    private static class getScannerResults_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getScannerResults_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getScannerResults_resultTupleScheme getScheme() {
                   return new getScannerResults_resultTupleScheme();
                 }
               }
           
          -    private static class getScannerResults_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getScannerResults_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getScannerResults_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getScannerResults_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -26536,8 +28980,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getScannerResults_r
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TResult _iter258 : struct.success)
          -            {
          +            for (TResult _iter258 : struct.success) {
                         _iter258.write(oprot);
                       }
                     }
          @@ -26548,16 +28991,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getScannerResults_r
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getScannerResults_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getScannerResults_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list259 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list259 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list259.size);
          -            @org.apache.thrift.annotation.Nullable TResult _elem260;
          -            for (int _i261 = 0; _i261 < _list259.size; ++_i261)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TResult _elem260;
          +            for (int _i261 = 0; _i261 < _list259.size; ++_i261) {
                         _elem260 = new TResult();
                         _elem260.read(iprot);
                         struct.success.add(_elem260);
          @@ -26573,32 +29019,48 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getScannerResults_re
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRegionLocation_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRegionLocation_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField RELOAD_FIELD_DESC = new org.apache.thrift.protocol.TField("reload", org.apache.thrift.protocol.TType.BOOL, (short)3);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRegionLocation_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRegionLocation_argsTupleSchemeFactory();
          +  public static class getRegionLocation_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRegionLocation_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField RELOAD_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("reload", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 3);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRegionLocation_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRegionLocation_argsTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table; // required
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
               public boolean reload; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      TABLE((short)1, "table"),
          -      ROW((short)2, "row"),
          -      RELOAD((short)3, "reload");
          +      TABLE((short) 1, "table"), ROW((short) 2, "row"), RELOAD((short) 3, "reload");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -26611,7 +29073,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // ROW
          @@ -26624,12 +29086,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -26663,25 +29125,33 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.RELOAD, new org.apache.thrift.meta_data.FieldMetaData("reload", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.RELOAD,
          +        new org.apache.thrift.meta_data.FieldMetaData("reload",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRegionLocation_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRegionLocation_args.class,
          +        metaDataMap);
               }
           
               public getRegionLocation_args() {
               }
           
          -    public getRegionLocation_args(
          -      java.nio.ByteBuffer table,
          -      java.nio.ByteBuffer row,
          -      boolean reload)
          -    {
          +    public getRegionLocation_args(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        boolean reload) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -26725,11 +29195,13 @@ public java.nio.ByteBuffer bufferForTable() {
               }
           
               public getRegionLocation_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public getRegionLocation_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public getRegionLocation_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -26759,11 +29231,12 @@ public java.nio.ByteBuffer bufferForRow() {
               }
           
               public getRegionLocation_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          -    public getRegionLocation_args setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
          +    public getRegionLocation_args
          +        setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
                 return this;
               }
          @@ -26794,7 +29267,8 @@ public getRegionLocation_args setReload(boolean reload) {
               }
           
               public void unsetReload() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __RELOAD_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __RELOAD_ISSET_ID);
               }
           
               /** Returns true if field reload is set (has been assigned a value) and false otherwise */
          @@ -26803,42 +29277,44 @@ public boolean isSetReload() {
               }
           
               public void setReloadIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __RELOAD_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __RELOAD_ISSET_ID, value);
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case RELOAD:
          -        if (value == null) {
          -          unsetReload();
          -        } else {
          -          setReload((java.lang.Boolean)value);
          -        }
          -        break;
          +        case RELOAD:
          +          if (value == null) {
          +            unsetReload();
          +          } else {
          +            setReload((java.lang.Boolean) value);
          +          }
          +          break;
           
                 }
               }
          @@ -26846,74 +29322,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case RELOAD:
          -        return isReload();
          +        case RELOAD:
          +          return isReload();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case ROW:
          -        return isSetRow();
          -      case RELOAD:
          -        return isSetReload();
          +        case TABLE:
          +          return isSetTable();
          +        case ROW:
          +          return isSetRow();
          +        case RELOAD:
          +          return isSetReload();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getRegionLocation_args)
          -        return this.equals((getRegionLocation_args)that);
          +      if (that instanceof getRegionLocation_args) return this.equals((getRegionLocation_args) that);
                 return false;
               }
           
               public boolean equals(getRegionLocation_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_reload = true;
                 boolean that_present_reload = true;
                 if (this_present_reload || that_present_reload) {
          -        if (!(this_present_reload && that_present_reload))
          -          return false;
          -        if (this.reload != that.reload)
          -          return false;
          +        if (!(this_present_reload && that_present_reload)) return false;
          +        if (this.reload != that.reload) return false;
                 }
           
                 return true;
          @@ -26924,12 +29394,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((reload) ? 131071 : 524287);
           
          @@ -26982,11 +29450,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -27021,47 +29491,55 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (row == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'row' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRegionLocation_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRegionLocation_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRegionLocation_argsStandardScheme getScheme() {
                   return new getRegionLocation_argsStandardScheme();
                 }
               }
           
          -    private static class getRegionLocation_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRegionLocation_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionLocation_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionLocation_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -27069,7 +29547,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionLocation_a
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27077,7 +29555,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionLocation_a
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27085,7 +29563,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionLocation_a
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.reload = iprot.readBool();
                           struct.setReloadIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27096,11 +29574,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionLocation_a
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionLocation_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionLocation_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -27123,17 +29603,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionLocation_
           
               }
           
          -    private static class getRegionLocation_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRegionLocation_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRegionLocation_argsTupleScheme getScheme() {
                   return new getRegionLocation_argsTupleScheme();
                 }
               }
           
          -    private static class getRegionLocation_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRegionLocation_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   oprot.writeBinary(struct.row);
                   java.util.BitSet optionals = new java.util.BitSet();
          @@ -27147,8 +29631,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_a
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.row = iprot.readBinary();
          @@ -27161,29 +29647,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_ar
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getRegionLocation_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRegionLocation_result");
          +  public static class getRegionLocation_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getRegionLocation_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getRegionLocation_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getRegionLocation_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getRegionLocation_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getRegionLocation_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable THRegionLocation success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -27196,7 +29697,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -27207,12 +29708,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -27244,22 +29745,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, THRegionLocation.class)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                THRegionLocation.class)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRegionLocation_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRegionLocation_result.class,
          +        metaDataMap);
               }
           
               public getRegionLocation_result() {
               }
           
          -    public getRegionLocation_result(
          -      THRegionLocation success,
          -      TIOError io)
          -    {
          +    public getRegionLocation_result(THRegionLocation success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -27292,7 +29798,8 @@ public THRegionLocation getSuccess() {
                 return this.success;
               }
           
          -    public getRegionLocation_result setSuccess(@org.apache.thrift.annotation.Nullable THRegionLocation success) {
          +    public getRegionLocation_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable THRegionLocation success) {
                 this.success = success;
                 return this;
               }
          @@ -27337,23 +29844,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((THRegionLocation)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((THRegionLocation) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -27361,27 +29869,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -27389,32 +29900,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getRegionLocation_result)
          -        return this.equals((getRegionLocation_result)that);
          +        return this.equals((getRegionLocation_result) that);
                 return false;
               }
           
               public boolean equals(getRegionLocation_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -27425,12 +29930,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -27471,13 +29974,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -27513,35 +30018,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getRegionLocation_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRegionLocation_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRegionLocation_resultStandardScheme getScheme() {
                   return new getRegionLocation_resultStandardScheme();
                 }
               }
           
          -    private static class getRegionLocation_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getRegionLocation_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionLocation_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionLocation_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -27550,7 +30060,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionLocation_r
                           struct.success = new THRegionLocation();
                           struct.success.read(iprot);
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27559,7 +30069,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionLocation_r
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27570,11 +30080,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getRegionLocation_r
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionLocation_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionLocation_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -27594,17 +30106,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getRegionLocation_
           
               }
           
          -    private static class getRegionLocation_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getRegionLocation_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getRegionLocation_resultTupleScheme getScheme() {
                   return new getRegionLocation_resultTupleScheme();
                 }
               }
           
          -    private static class getRegionLocation_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getRegionLocation_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -27622,8 +30138,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_r
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = new THRegionLocation();
          @@ -27638,26 +30156,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getRegionLocation_re
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getAllRegionLocations_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getAllRegionLocations_args");
          +  public static class getAllRegionLocations_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getAllRegionLocations_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getAllRegionLocations_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getAllRegionLocations_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getAllRegionLocations_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getAllRegionLocations_argsTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      TABLE((short)1, "table");
          +      TABLE((short) 1, "table");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -27670,7 +30202,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     default:
          @@ -27679,12 +30211,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -27716,19 +30248,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getAllRegionLocations_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getAllRegionLocations_args.class, metaDataMap);
               }
           
               public getAllRegionLocations_args() {
               }
           
          -    public getAllRegionLocations_args(
          -      java.nio.ByteBuffer table)
          -    {
          +    public getAllRegionLocations_args(java.nio.ByteBuffer table) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
               }
          @@ -27761,11 +30296,13 @@ public java.nio.ByteBuffer bufferForTable() {
               }
           
               public getAllRegionLocations_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public getAllRegionLocations_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public getAllRegionLocations_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -27785,19 +30322,20 @@ public void setTableIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
                 }
               }
          @@ -27805,22 +30343,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          +        case TABLE:
          +          return isSetTable();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -27828,23 +30369,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getAllRegionLocations_args)
          -        return this.equals((getAllRegionLocations_args)that);
          +        return this.equals((getAllRegionLocations_args) that);
                 return false;
               }
           
               public boolean equals(getAllRegionLocations_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 return true;
          @@ -27855,8 +30392,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 return hashCode;
               }
          @@ -27887,11 +30423,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -27914,42 +30452,48 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getAllRegionLocations_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getAllRegionLocations_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getAllRegionLocations_argsStandardScheme getScheme() {
                   return new getAllRegionLocations_argsStandardScheme();
                 }
               }
           
          -    private static class getAllRegionLocations_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getAllRegionLocations_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getAllRegionLocations_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getAllRegionLocations_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -27957,7 +30501,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getAllRegionLocatio
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -27968,11 +30512,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getAllRegionLocatio
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getAllRegionLocations_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getAllRegionLocations_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -27987,51 +30533,72 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getAllRegionLocati
           
               }
           
          -    private static class getAllRegionLocations_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getAllRegionLocations_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getAllRegionLocations_argsTupleScheme getScheme() {
                   return new getAllRegionLocations_argsTupleScheme();
                 }
               }
           
          -    private static class getAllRegionLocations_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getAllRegionLocations_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getAllRegionLocations_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getAllRegionLocations_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getAllRegionLocations_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getAllRegionLocations_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getAllRegionLocations_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getAllRegionLocations_result");
          +  public static class getAllRegionLocations_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getAllRegionLocations_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getAllRegionLocations_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getAllRegionLocations_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getAllRegionLocations_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getAllRegionLocations_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -28044,7 +30611,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -28055,12 +30622,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -28092,23 +30659,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, THRegionLocation.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, THRegionLocation.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getAllRegionLocations_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getAllRegionLocations_result.class, metaDataMap);
               }
           
               public getAllRegionLocations_result() {
               }
           
          -    public getAllRegionLocations_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public getAllRegionLocations_result(java.util.List success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -28119,7 +30691,8 @@ public getAllRegionLocations_result(
                */
               public getAllRegionLocations_result(getAllRegionLocations_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (THRegionLocation other_element : other.success) {
                     __this__success.add(new THRegionLocation(other_element));
                   }
          @@ -28161,7 +30734,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getAllRegionLocations_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getAllRegionLocations_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -28206,23 +30780,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -28230,27 +30805,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -28258,32 +30836,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getAllRegionLocations_result)
          -        return this.equals((getAllRegionLocations_result)that);
          +        return this.equals((getAllRegionLocations_result) that);
                 return false;
               }
           
               public boolean equals(getAllRegionLocations_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -28294,12 +30866,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -28340,13 +30910,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -28379,35 +30951,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getAllRegionLocations_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getAllRegionLocations_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getAllRegionLocations_resultStandardScheme getScheme() {
                   return new getAllRegionLocations_resultStandardScheme();
                 }
               }
           
          -    private static class getAllRegionLocations_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getAllRegionLocations_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getAllRegionLocations_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getAllRegionLocations_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -28416,9 +30993,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getAllRegionLocatio
                           {
                             org.apache.thrift.protocol.TList _list262 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list262.size);
          -                  @org.apache.thrift.annotation.Nullable THRegionLocation _elem263;
          -                  for (int _i264 = 0; _i264 < _list262.size; ++_i264)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  THRegionLocation _elem263;
          +                  for (int _i264 = 0; _i264 < _list262.size; ++_i264) {
                               _elem263 = new THRegionLocation();
                               _elem263.read(iprot);
                               struct.success.add(_elem263);
          @@ -28426,7 +31003,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getAllRegionLocatio
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -28435,7 +31012,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getAllRegionLocatio
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -28446,20 +31023,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getAllRegionLocatio
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getAllRegionLocations_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getAllRegionLocations_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (THRegionLocation _iter265 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (THRegionLocation _iter265 : struct.success) {
                         _iter265.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -28477,17 +31056,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getAllRegionLocati
           
               }
           
          -    private static class getAllRegionLocations_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getAllRegionLocations_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getAllRegionLocations_resultTupleScheme getScheme() {
                   return new getAllRegionLocations_resultTupleScheme();
                 }
               }
           
          -    private static class getAllRegionLocations_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getAllRegionLocations_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getAllRegionLocations_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getAllRegionLocations_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -28499,8 +31082,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getAllRegionLocatio
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (THRegionLocation _iter266 : struct.success)
          -            {
          +            for (THRegionLocation _iter266 : struct.success) {
                         _iter266.write(oprot);
                       }
                     }
          @@ -28511,16 +31093,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getAllRegionLocatio
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getAllRegionLocations_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getAllRegionLocations_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list267 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list267 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list267.size);
          -            @org.apache.thrift.annotation.Nullable THRegionLocation _elem268;
          -            for (int _i269 = 0; _i269 < _list267.size; ++_i269)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            THRegionLocation _elem268;
          +            for (int _i269 = 0; _i269 < _list267.size; ++_i269) {
                         _elem268 = new THRegionLocation();
                         _elem268.read(iprot);
                         struct.success.add(_elem268);
          @@ -28536,24 +31121,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getAllRegionLocation
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class checkAndMutate_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("checkAndMutate_args");
          -
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2);
          -    private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING, (short)3);
          -    private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING, (short)4);
          -    private static final org.apache.thrift.protocol.TField COMPARE_OPERATOR_FIELD_DESC = new org.apache.thrift.protocol.TField("compareOperator", org.apache.thrift.protocol.TType.I32, (short)5);
          -    private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)6);
          -    private static final org.apache.thrift.protocol.TField ROW_MUTATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("rowMutations", org.apache.thrift.protocol.TType.STRUCT, (short)7);
          -
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new checkAndMutate_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new checkAndMutate_argsTupleSchemeFactory();
          +  public static class checkAndMutate_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("checkAndMutate_args");
          +
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
          +    private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING,
          +            (short) 3);
          +    private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING,
          +            (short) 4);
          +    private static final org.apache.thrift.protocol.TField COMPARE_OPERATOR_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("compareOperator",
          +            org.apache.thrift.protocol.TType.I32, (short) 5);
          +    private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING,
          +            (short) 6);
          +    private static final org.apache.thrift.protocol.TField ROW_MUTATIONS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("rowMutations",
          +            org.apache.thrift.protocol.TType.STRUCT, (short) 7);
          +
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new checkAndMutate_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new checkAndMutate_argsTupleSchemeFactory();
           
               /**
                * to check in and delete from
          @@ -28573,13 +31180,12 @@ public static class checkAndMutate_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -28634,7 +31243,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     case 2: // ROW
          @@ -28655,12 +31264,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -28692,37 +31301,54 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.FAMILY, new org.apache.thrift.meta_data.FieldMetaData("family", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("qualifier", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.COMPARE_OPERATOR, new org.apache.thrift.meta_data.FieldMetaData("compareOperator", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TCompareOperator.class)));
          -      tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          -      tmpMap.put(_Fields.ROW_MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("rowMutations", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowMutations.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.ROW,
          +        new org.apache.thrift.meta_data.FieldMetaData("row",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.FAMILY,
          +        new org.apache.thrift.meta_data.FieldMetaData("family",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.QUALIFIER,
          +        new org.apache.thrift.meta_data.FieldMetaData("qualifier",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.COMPARE_OPERATOR,
          +        new org.apache.thrift.meta_data.FieldMetaData("compareOperator",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +                TCompareOperator.class)));
          +      tmpMap.put(_Fields.VALUE,
          +        new org.apache.thrift.meta_data.FieldMetaData("value",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
          +      tmpMap.put(_Fields.ROW_MUTATIONS,
          +        new org.apache.thrift.meta_data.FieldMetaData("rowMutations",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TRowMutations.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndMutate_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndMutate_args.class,
          +        metaDataMap);
               }
           
               public checkAndMutate_args() {
               }
           
          -    public checkAndMutate_args(
          -      java.nio.ByteBuffer table,
          -      java.nio.ByteBuffer row,
          -      java.nio.ByteBuffer family,
          -      java.nio.ByteBuffer qualifier,
          -      TCompareOperator compareOperator,
          -      java.nio.ByteBuffer value,
          -      TRowMutations rowMutations)
          -    {
          +    public checkAndMutate_args(java.nio.ByteBuffer table, java.nio.ByteBuffer row,
          +        java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, TCompareOperator compareOperator,
          +        java.nio.ByteBuffer value, TRowMutations rowMutations) {
                 this();
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
          @@ -28791,11 +31417,13 @@ public java.nio.ByteBuffer bufferForTable() {
                * to check in and delete from
                */
               public checkAndMutate_args setTable(byte[] table) {
          -      this.table = table == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(table.clone());
          +      this.table =
          +          table == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(table.clone());
                 return this;
               }
           
          -    public checkAndMutate_args setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
          +    public checkAndMutate_args
          +        setTable(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer table) {
                 this.table = org.apache.thrift.TBaseHelper.copyBinary(table);
                 return this;
               }
          @@ -28831,11 +31459,12 @@ public java.nio.ByteBuffer bufferForRow() {
                * row to check
                */
               public checkAndMutate_args setRow(byte[] row) {
          -      this.row = row == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(row.clone());
          +      this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
                 return this;
               }
           
          -    public checkAndMutate_args setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
          +    public checkAndMutate_args
          +        setRow(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
                 return this;
               }
          @@ -28871,11 +31500,13 @@ public java.nio.ByteBuffer bufferForFamily() {
                * column family to check
                */
               public checkAndMutate_args setFamily(byte[] family) {
          -      this.family = family == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(family.clone());
          +      this.family =
          +          family == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(family.clone());
                 return this;
               }
           
          -    public checkAndMutate_args setFamily(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer family) {
          +    public checkAndMutate_args
          +        setFamily(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer family) {
                 this.family = org.apache.thrift.TBaseHelper.copyBinary(family);
                 return this;
               }
          @@ -28911,11 +31542,13 @@ public java.nio.ByteBuffer bufferForQualifier() {
                * column qualifier to check
                */
               public checkAndMutate_args setQualifier(byte[] qualifier) {
          -      this.qualifier = qualifier == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(qualifier.clone());
          +      this.qualifier = qualifier == null ? (java.nio.ByteBuffer) null
          +          : java.nio.ByteBuffer.wrap(qualifier.clone());
                 return this;
               }
           
          -    public checkAndMutate_args setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
          +    public checkAndMutate_args
          +        setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
                 this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier);
                 return this;
               }
          @@ -28937,7 +31570,6 @@ public void setQualifierIsSet(boolean value) {
           
               /**
                * comparison to make on the value
          -     * 
                * @see TCompareOperator
                */
               @org.apache.thrift.annotation.Nullable
          @@ -28947,10 +31579,10 @@ public TCompareOperator getCompareOperator() {
           
               /**
                * comparison to make on the value
          -     * 
                * @see TCompareOperator
                */
          -    public checkAndMutate_args setCompareOperator(@org.apache.thrift.annotation.Nullable TCompareOperator compareOperator) {
          +    public checkAndMutate_args setCompareOperator(
          +        @org.apache.thrift.annotation.Nullable TCompareOperator compareOperator) {
                 this.compareOperator = compareOperator;
                 return this;
               }
          @@ -28959,7 +31591,9 @@ public void unsetCompareOperator() {
                 this.compareOperator = null;
               }
           
          -    /** Returns true if field compareOperator is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field compareOperator is set (has been assigned a value) and false otherwise
          +     */
               public boolean isSetCompareOperator() {
                 return this.compareOperator != null;
               }
          @@ -28971,8 +31605,8 @@ public void setCompareOperatorIsSet(boolean value) {
               }
           
               /**
          -     * the expected value to be compared against, if not provided the
          -     * check is for the non-existence of the column in question
          +     * the expected value to be compared against, if not provided the check is for the non-existence
          +     * of the column in question
                */
               public byte[] getValue() {
                 setValue(org.apache.thrift.TBaseHelper.rightSize(value));
          @@ -28984,15 +31618,17 @@ public java.nio.ByteBuffer bufferForValue() {
               }
           
               /**
          -     * the expected value to be compared against, if not provided the
          -     * check is for the non-existence of the column in question
          +     * the expected value to be compared against, if not provided the check is for the non-existence
          +     * of the column in question
                */
               public checkAndMutate_args setValue(byte[] value) {
          -      this.value = value == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(value.clone());
          +      this.value =
          +          value == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(value.clone());
                 return this;
               }
           
          -    public checkAndMutate_args setValue(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value) {
          +    public checkAndMutate_args
          +        setValue(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer value) {
                 this.value = org.apache.thrift.TBaseHelper.copyBinary(value);
                 return this;
               }
          @@ -29023,7 +31659,8 @@ public TRowMutations getRowMutations() {
               /**
                * row mutations to execute if the value matches
                */
          -    public checkAndMutate_args setRowMutations(@org.apache.thrift.annotation.Nullable TRowMutations rowMutations) {
          +    public checkAndMutate_args
          +        setRowMutations(@org.apache.thrift.annotation.Nullable TRowMutations rowMutations) {
                 this.rowMutations = rowMutations;
                 return this;
               }
          @@ -29043,83 +31680,84 @@ public void setRowMutationsIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setTable((byte[])value);
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
                     } else {
          -            setTable((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setTable((byte[]) value);
          +            } else {
          +              setTable((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW:
          -        if (value == null) {
          -          unsetRow();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setRow((byte[])value);
          +        case ROW:
          +          if (value == null) {
          +            unsetRow();
                     } else {
          -            setRow((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setRow((byte[]) value);
          +            } else {
          +              setRow((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case FAMILY:
          -        if (value == null) {
          -          unsetFamily();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setFamily((byte[])value);
          +        case FAMILY:
          +          if (value == null) {
          +            unsetFamily();
                     } else {
          -            setFamily((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setFamily((byte[]) value);
          +            } else {
          +              setFamily((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case QUALIFIER:
          -        if (value == null) {
          -          unsetQualifier();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setQualifier((byte[])value);
          +        case QUALIFIER:
          +          if (value == null) {
          +            unsetQualifier();
                     } else {
          -            setQualifier((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setQualifier((byte[]) value);
          +            } else {
          +              setQualifier((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case COMPARE_OPERATOR:
          -        if (value == null) {
          -          unsetCompareOperator();
          -        } else {
          -          setCompareOperator((TCompareOperator)value);
          -        }
          -        break;
          +        case COMPARE_OPERATOR:
          +          if (value == null) {
          +            unsetCompareOperator();
          +          } else {
          +            setCompareOperator((TCompareOperator) value);
          +          }
          +          break;
           
          -      case VALUE:
          -        if (value == null) {
          -          unsetValue();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setValue((byte[])value);
          +        case VALUE:
          +          if (value == null) {
          +            unsetValue();
                     } else {
          -            setValue((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setValue((byte[]) value);
          +            } else {
          +              setValue((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
          -      case ROW_MUTATIONS:
          -        if (value == null) {
          -          unsetRowMutations();
          -        } else {
          -          setRowMutations((TRowMutations)value);
          -        }
          -        break;
          +        case ROW_MUTATIONS:
          +          if (value == null) {
          +            unsetRowMutations();
          +          } else {
          +            setRowMutations((TRowMutations) value);
          +          }
          +          break;
           
                 }
               }
          @@ -29127,130 +31765,116 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
          -      case ROW:
          -        return getRow();
          +        case ROW:
          +          return getRow();
           
          -      case FAMILY:
          -        return getFamily();
          +        case FAMILY:
          +          return getFamily();
           
          -      case QUALIFIER:
          -        return getQualifier();
          +        case QUALIFIER:
          +          return getQualifier();
           
          -      case COMPARE_OPERATOR:
          -        return getCompareOperator();
          +        case COMPARE_OPERATOR:
          +          return getCompareOperator();
           
          -      case VALUE:
          -        return getValue();
          +        case VALUE:
          +          return getValue();
           
          -      case ROW_MUTATIONS:
          -        return getRowMutations();
          +        case ROW_MUTATIONS:
          +          return getRowMutations();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          -      case ROW:
          -        return isSetRow();
          -      case FAMILY:
          -        return isSetFamily();
          -      case QUALIFIER:
          -        return isSetQualifier();
          -      case COMPARE_OPERATOR:
          -        return isSetCompareOperator();
          -      case VALUE:
          -        return isSetValue();
          -      case ROW_MUTATIONS:
          -        return isSetRowMutations();
          +        case TABLE:
          +          return isSetTable();
          +        case ROW:
          +          return isSetRow();
          +        case FAMILY:
          +          return isSetFamily();
          +        case QUALIFIER:
          +          return isSetQualifier();
          +        case COMPARE_OPERATOR:
          +          return isSetCompareOperator();
          +        case VALUE:
          +          return isSetValue();
          +        case ROW_MUTATIONS:
          +          return isSetRowMutations();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof checkAndMutate_args)
          -        return this.equals((checkAndMutate_args)that);
          +      if (that instanceof checkAndMutate_args) return this.equals((checkAndMutate_args) that);
                 return false;
               }
           
               public boolean equals(checkAndMutate_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 boolean this_present_row = true && this.isSetRow();
                 boolean that_present_row = true && that.isSetRow();
                 if (this_present_row || that_present_row) {
          -        if (!(this_present_row && that_present_row))
          -          return false;
          -        if (!this.row.equals(that.row))
          -          return false;
          +        if (!(this_present_row && that_present_row)) return false;
          +        if (!this.row.equals(that.row)) return false;
                 }
           
                 boolean this_present_family = true && this.isSetFamily();
                 boolean that_present_family = true && that.isSetFamily();
                 if (this_present_family || that_present_family) {
          -        if (!(this_present_family && that_present_family))
          -          return false;
          -        if (!this.family.equals(that.family))
          -          return false;
          +        if (!(this_present_family && that_present_family)) return false;
          +        if (!this.family.equals(that.family)) return false;
                 }
           
                 boolean this_present_qualifier = true && this.isSetQualifier();
                 boolean that_present_qualifier = true && that.isSetQualifier();
                 if (this_present_qualifier || that_present_qualifier) {
          -        if (!(this_present_qualifier && that_present_qualifier))
          -          return false;
          -        if (!this.qualifier.equals(that.qualifier))
          -          return false;
          +        if (!(this_present_qualifier && that_present_qualifier)) return false;
          +        if (!this.qualifier.equals(that.qualifier)) return false;
                 }
           
                 boolean this_present_compareOperator = true && this.isSetCompareOperator();
                 boolean that_present_compareOperator = true && that.isSetCompareOperator();
                 if (this_present_compareOperator || that_present_compareOperator) {
          -        if (!(this_present_compareOperator && that_present_compareOperator))
          -          return false;
          -        if (!this.compareOperator.equals(that.compareOperator))
          -          return false;
          +        if (!(this_present_compareOperator && that_present_compareOperator)) return false;
          +        if (!this.compareOperator.equals(that.compareOperator)) return false;
                 }
           
                 boolean this_present_value = true && this.isSetValue();
                 boolean that_present_value = true && that.isSetValue();
                 if (this_present_value || that_present_value) {
          -        if (!(this_present_value && that_present_value))
          -          return false;
          -        if (!this.value.equals(that.value))
          -          return false;
          +        if (!(this_present_value && that_present_value)) return false;
          +        if (!this.value.equals(that.value)) return false;
                 }
           
                 boolean this_present_rowMutations = true && this.isSetRowMutations();
                 boolean that_present_rowMutations = true && that.isSetRowMutations();
                 if (this_present_rowMutations || that_present_rowMutations) {
          -        if (!(this_present_rowMutations && that_present_rowMutations))
          -          return false;
          -        if (!this.rowMutations.equals(that.rowMutations))
          -          return false;
          +        if (!(this_present_rowMutations && that_present_rowMutations)) return false;
          +        if (!this.rowMutations.equals(that.rowMutations)) return false;
                 }
           
                 return true;
          @@ -29261,32 +31885,25 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -      if (isSetRow())
          -        hashCode = hashCode * 8191 + row.hashCode();
          +      if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetFamily()) ? 131071 : 524287);
          -      if (isSetFamily())
          -        hashCode = hashCode * 8191 + family.hashCode();
          +      if (isSetFamily()) hashCode = hashCode * 8191 + family.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetQualifier()) ? 131071 : 524287);
          -      if (isSetQualifier())
          -        hashCode = hashCode * 8191 + qualifier.hashCode();
          +      if (isSetQualifier()) hashCode = hashCode * 8191 + qualifier.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetCompareOperator()) ? 131071 : 524287);
          -      if (isSetCompareOperator())
          -        hashCode = hashCode * 8191 + compareOperator.getValue();
          +      if (isSetCompareOperator()) hashCode = hashCode * 8191 + compareOperator.getValue();
           
                 hashCode = hashCode * 8191 + ((isSetValue()) ? 131071 : 524287);
          -      if (isSetValue())
          -        hashCode = hashCode * 8191 + value.hashCode();
          +      if (isSetValue()) hashCode = hashCode * 8191 + value.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetRowMutations()) ? 131071 : 524287);
          -      if (isSetRowMutations())
          -        hashCode = hashCode * 8191 + rowMutations.hashCode();
          +      if (isSetRowMutations()) hashCode = hashCode * 8191 + rowMutations.hashCode();
           
                 return hashCode;
               }
          @@ -29339,12 +31956,14 @@ public int compareTo(checkAndMutate_args other) {
                     return lastComparison;
                   }
                 }
          -      lastComparison = java.lang.Boolean.compare(isSetCompareOperator(), other.isSetCompareOperator());
          +      lastComparison =
          +          java.lang.Boolean.compare(isSetCompareOperator(), other.isSetCompareOperator());
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
                 if (isSetCompareOperator()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compareOperator, other.compareOperator);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.compareOperator, other.compareOperator);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -29364,7 +31983,8 @@ public int compareTo(checkAndMutate_args other) {
                   return lastComparison;
                 }
                 if (isSetRowMutations()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rowMutations, other.rowMutations);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.rowMutations, other.rowMutations);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -29377,11 +31997,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -29452,22 +32074,28 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 if (row == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'row' was not present! Struct: " + toString());
                 }
                 if (family == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'family' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'family' was not present! Struct: " + toString());
                 }
                 if (qualifier == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'qualifier' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'qualifier' was not present! Struct: " + toString());
                 }
                 if (compareOperator == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'compareOperator' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'compareOperator' was not present! Struct: " + toString());
                 }
                 if (rowMutations == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'rowMutations' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'rowMutations' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (rowMutations != null) {
          @@ -29477,35 +32105,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class checkAndMutate_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndMutate_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndMutate_argsStandardScheme getScheme() {
                   return new checkAndMutate_argsStandardScheme();
                 }
               }
           
          -    private static class checkAndMutate_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class checkAndMutate_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -29513,7 +32146,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.table = iprot.readBinary();
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -29521,7 +32154,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.row = iprot.readBinary();
                           struct.setRowIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -29529,7 +32162,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.family = iprot.readBinary();
                           struct.setFamilyIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -29537,15 +32170,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.qualifier = iprot.readBinary();
                           struct.setQualifierIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
                       case 5: // COMPARE_OPERATOR
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -                struct.compareOperator = org.apache.hadoop.hbase.thrift2.generated.TCompareOperator.findByValue(iprot.readI32());
          +                struct.compareOperator = org.apache.hadoop.hbase.thrift2.generated.TCompareOperator
          +                    .findByValue(iprot.readI32());
                           struct.setCompareOperatorIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -29553,7 +32187,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.value = iprot.readBinary();
                           struct.setValueIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -29562,7 +32196,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_args
                           struct.rowMutations = new TRowMutations();
                           struct.rowMutations.read(iprot);
                           struct.setRowMutationsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -29573,11 +32207,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndMutate_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndMutate_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -29622,17 +32258,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndMutate_arg
           
               }
           
          -    private static class checkAndMutate_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndMutate_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndMutate_argsTupleScheme getScheme() {
                   return new checkAndMutate_argsTupleScheme();
                 }
               }
           
          -    private static class checkAndMutate_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class checkAndMutate_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBinary(struct.table);
                   oprot.writeBinary(struct.row);
                   oprot.writeBinary(struct.family);
          @@ -29650,8 +32290,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_args
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = iprot.readBinary();
                   struct.setTableIsSet(true);
                   struct.row = iprot.readBinary();
          @@ -29660,7 +32302,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_args
                   struct.setFamilyIsSet(true);
                   struct.qualifier = iprot.readBinary();
                   struct.setQualifierIsSet(true);
          -        struct.compareOperator = org.apache.hadoop.hbase.thrift2.generated.TCompareOperator.findByValue(iprot.readI32());
          +        struct.compareOperator =
          +            org.apache.hadoop.hbase.thrift2.generated.TCompareOperator.findByValue(iprot.readI32());
                   struct.setCompareOperatorIsSet(true);
                   struct.rowMutations = new TRowMutations();
                   struct.rowMutations.read(iprot);
          @@ -29673,29 +32316,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_args
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class checkAndMutate_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("checkAndMutate_result");
          +  public static class checkAndMutate_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("checkAndMutate_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new checkAndMutate_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new checkAndMutate_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new checkAndMutate_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new checkAndMutate_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -29708,7 +32366,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -29719,12 +32377,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -29758,22 +32416,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndMutate_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(checkAndMutate_result.class,
          +        metaDataMap);
               }
           
               public checkAndMutate_result() {
               }
           
          -    public checkAndMutate_result(
          -      boolean success,
          -      TIOError io)
          -    {
          +    public checkAndMutate_result(boolean success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -29813,7 +32476,8 @@ public checkAndMutate_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -29822,7 +32486,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -29850,23 +32515,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -29874,60 +32540,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof checkAndMutate_result)
          -        return this.equals((checkAndMutate_result)that);
          +      if (that instanceof checkAndMutate_result) return this.equals((checkAndMutate_result) that);
                 return false;
               }
           
               public boolean equals(checkAndMutate_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -29940,8 +32602,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -29982,13 +32643,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -30017,37 +32680,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class checkAndMutate_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndMutate_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndMutate_resultStandardScheme getScheme() {
                   return new checkAndMutate_resultStandardScheme();
                 }
               }
           
          -    private static class checkAndMutate_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class checkAndMutate_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -30055,7 +32724,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_resu
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -30064,7 +32733,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_resu
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -30075,11 +32744,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, checkAndMutate_resu
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndMutate_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndMutate_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -30099,17 +32770,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, checkAndMutate_res
           
               }
           
          -    private static class checkAndMutate_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class checkAndMutate_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public checkAndMutate_resultTupleScheme getScheme() {
                   return new checkAndMutate_resultTupleScheme();
                 }
               }
           
          -    private static class checkAndMutate_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class checkAndMutate_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -30127,8 +32802,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_resu
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -30142,32 +32819,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, checkAndMutate_resul
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableDescriptor_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableDescriptor_args");
          +  public static class getTableDescriptor_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableDescriptor_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableDescriptor_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableDescriptor_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableDescriptor_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableDescriptor_argsTupleSchemeFactory();
           
               /**
                * the tablename of the table to get tableDescriptor
                */
               public @org.apache.thrift.annotation.Nullable TTableName table; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the tablename of the table to get tableDescriptor
                  */
          -      TABLE((short)1, "table");
          +      TABLE((short) 1, "table");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -30180,7 +32871,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE
                       return TABLE;
                     default:
          @@ -30189,12 +32880,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -30226,19 +32917,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE,
          +        new org.apache.thrift.meta_data.FieldMetaData("table",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableDescriptor_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableDescriptor_args.class,
          +        metaDataMap);
               }
           
               public getTableDescriptor_args() {
               }
           
          -    public getTableDescriptor_args(
          -      TTableName table)
          -    {
          +    public getTableDescriptor_args(TTableName table) {
                 this();
                 this.table = table;
               }
          @@ -30272,7 +32966,8 @@ public TTableName getTable() {
               /**
                * the tablename of the table to get tableDescriptor
                */
          -    public getTableDescriptor_args setTable(@org.apache.thrift.annotation.Nullable TTableName table) {
          +    public getTableDescriptor_args
          +        setTable(@org.apache.thrift.annotation.Nullable TTableName table) {
                 this.table = table;
                 return this;
               }
          @@ -30292,15 +32987,16 @@ public void setTableIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE:
          -        if (value == null) {
          -          unsetTable();
          -        } else {
          -          setTable((TTableName)value);
          -        }
          -        break;
          +        case TABLE:
          +          if (value == null) {
          +            unsetTable();
          +          } else {
          +            setTable((TTableName) value);
          +          }
          +          break;
           
                 }
               }
          @@ -30308,22 +33004,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE:
          -        return getTable();
          +        case TABLE:
          +          return getTable();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE:
          -        return isSetTable();
          +        case TABLE:
          +          return isSetTable();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -30331,23 +33030,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableDescriptor_args)
          -        return this.equals((getTableDescriptor_args)that);
          +        return this.equals((getTableDescriptor_args) that);
                 return false;
               }
           
               public boolean equals(getTableDescriptor_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_table = true && this.isSetTable();
                 boolean that_present_table = true && that.isSetTable();
                 if (this_present_table || that_present_table) {
          -        if (!(this_present_table && that_present_table))
          -          return false;
          -        if (!this.table.equals(that.table))
          -          return false;
          +        if (!(this_present_table && that_present_table)) return false;
          +        if (!this.table.equals(that.table)) return false;
                 }
           
                 return true;
          @@ -30358,8 +33053,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTable()) ? 131071 : 524287);
          -      if (isSetTable())
          -        hashCode = hashCode * 8191 + table.hashCode();
          +      if (isSetTable()) hashCode = hashCode * 8191 + table.hashCode();
           
                 return hashCode;
               }
          @@ -30390,11 +33084,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -30417,7 +33113,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (table == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'table' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (table != null) {
          @@ -30427,35 +33124,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableDescriptor_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptor_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptor_argsStandardScheme getScheme() {
                   return new getTableDescriptor_argsStandardScheme();
                 }
               }
           
          -    private static class getTableDescriptor_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableDescriptor_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptor_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptor_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -30464,7 +33166,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptor_
                           struct.table = new TTableName();
                           struct.table.read(iprot);
                           struct.setTableIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -30475,11 +33177,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptor_
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptor_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptor_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -30494,52 +33198,73 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptor
           
               }
           
          -    private static class getTableDescriptor_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptor_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptor_argsTupleScheme getScheme() {
                   return new getTableDescriptor_argsTupleScheme();
                 }
               }
           
          -    private static class getTableDescriptor_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableDescriptor_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptor_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptor_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptor_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptor_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.table = new TTableName();
                   struct.table.read(iprot);
                   struct.setTableIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableDescriptor_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableDescriptor_result");
          +  public static class getTableDescriptor_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableDescriptor_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableDescriptor_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableDescriptor_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableDescriptor_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableDescriptor_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TTableDescriptor success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -30552,7 +33277,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -30563,12 +33288,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -30600,22 +33325,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableDescriptor.class)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableDescriptor.class)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableDescriptor_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableDescriptor_result.class, metaDataMap);
               }
           
               public getTableDescriptor_result() {
               }
           
          -    public getTableDescriptor_result(
          -      TTableDescriptor success,
          -      TIOError io)
          -    {
          +    public getTableDescriptor_result(TTableDescriptor success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -30648,7 +33378,8 @@ public TTableDescriptor getSuccess() {
                 return this.success;
               }
           
          -    public getTableDescriptor_result setSuccess(@org.apache.thrift.annotation.Nullable TTableDescriptor success) {
          +    public getTableDescriptor_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable TTableDescriptor success) {
                 this.success = success;
                 return this;
               }
          @@ -30693,23 +33424,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((TTableDescriptor)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((TTableDescriptor) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -30717,27 +33449,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -30745,32 +33480,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableDescriptor_result)
          -        return this.equals((getTableDescriptor_result)that);
          +        return this.equals((getTableDescriptor_result) that);
                 return false;
               }
           
               public boolean equals(getTableDescriptor_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -30781,12 +33510,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -30827,13 +33554,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -30869,35 +33598,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableDescriptor_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptor_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptor_resultStandardScheme getScheme() {
                   return new getTableDescriptor_resultStandardScheme();
                 }
               }
           
          -    private static class getTableDescriptor_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableDescriptor_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptor_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptor_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -30906,7 +33640,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptor_
                           struct.success = new TTableDescriptor();
                           struct.success.read(iprot);
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -30915,7 +33649,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptor_
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -30926,11 +33660,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptor_
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptor_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableDescriptor_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -30950,17 +33686,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptor
           
               }
           
          -    private static class getTableDescriptor_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptor_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptor_resultTupleScheme getScheme() {
                   return new getTableDescriptor_resultTupleScheme();
                 }
               }
           
          -    private static class getTableDescriptor_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableDescriptor_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptor_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptor_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -30978,8 +33718,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptor_
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptor_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptor_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = new TTableDescriptor();
          @@ -30994,32 +33736,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptor_r
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableDescriptors_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableDescriptors_args");
          +  public static class getTableDescriptors_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableDescriptors_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLES_FIELD_DESC = new org.apache.thrift.protocol.TField("tables", org.apache.thrift.protocol.TType.LIST, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tables", org.apache.thrift.protocol.TType.LIST,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableDescriptors_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableDescriptors_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableDescriptors_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableDescriptors_argsTupleSchemeFactory();
           
               /**
                * the tablename list of the tables to get tableDescriptor
                */
               public @org.apache.thrift.annotation.Nullable java.util.List tables; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the tablename list of the tables to get tableDescriptor
                  */
          -      TABLES((short)1, "tables");
          +      TABLES((short) 1, "tables");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -31032,7 +33788,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLES
                       return TABLES;
                     default:
          @@ -31041,12 +33797,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -31078,20 +33834,23 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLES, new org.apache.thrift.meta_data.FieldMetaData("tables", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLES,
          +        new org.apache.thrift.meta_data.FieldMetaData("tables",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TTableName.class))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableDescriptors_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableDescriptors_args.class,
          +        metaDataMap);
               }
           
               public getTableDescriptors_args() {
               }
           
          -    public getTableDescriptors_args(
          -      java.util.List tables)
          -    {
          +    public getTableDescriptors_args(java.util.List tables) {
                 this();
                 this.tables = tables;
               }
          @@ -31101,7 +33860,8 @@ public getTableDescriptors_args(
                */
               public getTableDescriptors_args(getTableDescriptors_args other) {
                 if (other.isSetTables()) {
          -        java.util.List __this__tables = new java.util.ArrayList(other.tables.size());
          +        java.util.List __this__tables =
          +            new java.util.ArrayList(other.tables.size());
                   for (TTableName other_element : other.tables) {
                     __this__tables.add(new TTableName(other_element));
                   }
          @@ -31145,7 +33905,8 @@ public java.util.List getTables() {
               /**
                * the tablename list of the tables to get tableDescriptor
                */
          -    public getTableDescriptors_args setTables(@org.apache.thrift.annotation.Nullable java.util.List tables) {
          +    public getTableDescriptors_args
          +        setTables(@org.apache.thrift.annotation.Nullable java.util.List tables) {
                 this.tables = tables;
                 return this;
               }
          @@ -31165,15 +33926,16 @@ public void setTablesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLES:
          -        if (value == null) {
          -          unsetTables();
          -        } else {
          -          setTables((java.util.List)value);
          -        }
          -        break;
          +        case TABLES:
          +          if (value == null) {
          +            unsetTables();
          +          } else {
          +            setTables((java.util.List) value);
          +          }
          +          break;
           
                 }
               }
          @@ -31181,22 +33943,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLES:
          -        return getTables();
          +        case TABLES:
          +          return getTables();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLES:
          -        return isSetTables();
          +        case TABLES:
          +          return isSetTables();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -31204,23 +33969,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableDescriptors_args)
          -        return this.equals((getTableDescriptors_args)that);
          +        return this.equals((getTableDescriptors_args) that);
                 return false;
               }
           
               public boolean equals(getTableDescriptors_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tables = true && this.isSetTables();
                 boolean that_present_tables = true && that.isSetTables();
                 if (this_present_tables || that_present_tables) {
          -        if (!(this_present_tables && that_present_tables))
          -          return false;
          -        if (!this.tables.equals(that.tables))
          -          return false;
          +        if (!(this_present_tables && that_present_tables)) return false;
          +        if (!this.tables.equals(that.tables)) return false;
                 }
           
                 return true;
          @@ -31231,8 +33992,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTables()) ? 131071 : 524287);
          -      if (isSetTables())
          -        hashCode = hashCode * 8191 + tables.hashCode();
          +      if (isSetTables()) hashCode = hashCode * 8191 + tables.hashCode();
           
                 return hashCode;
               }
          @@ -31263,11 +34023,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -31290,42 +34052,48 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tables == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tables' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tables' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableDescriptors_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptors_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptors_argsStandardScheme getScheme() {
                   return new getTableDescriptors_argsStandardScheme();
                 }
               }
           
          -    private static class getTableDescriptors_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableDescriptors_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -31334,9 +34102,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                           {
                             org.apache.thrift.protocol.TList _list270 = iprot.readListBegin();
                             struct.tables = new java.util.ArrayList(_list270.size);
          -                  @org.apache.thrift.annotation.Nullable TTableName _elem271;
          -                  for (int _i272 = 0; _i272 < _list270.size; ++_i272)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TTableName _elem271;
          +                  for (int _i272 = 0; _i272 < _list270.size; ++_i272) {
                               _elem271 = new TTableName();
                               _elem271.read(iprot);
                               struct.tables.add(_elem271);
          @@ -31344,7 +34112,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                             iprot.readListEnd();
                           }
                           struct.setTablesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -31355,20 +34123,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptors_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptors_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.tables != null) {
                     oprot.writeFieldBegin(TABLES_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size()));
          -            for (TTableName _iter273 : struct.tables)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.tables.size()));
          +            for (TTableName _iter273 : struct.tables) {
                         _iter273.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -31381,35 +34151,41 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptor
           
               }
           
          -    private static class getTableDescriptors_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptors_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptors_argsTupleScheme getScheme() {
                   return new getTableDescriptors_argsTupleScheme();
                 }
               }
           
          -    private static class getTableDescriptors_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableDescriptors_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   {
                     oprot.writeI32(struct.tables.size());
          -          for (TTableName _iter274 : struct.tables)
          -          {
          +          for (TTableName _iter274 : struct.tables) {
                       _iter274.write(oprot);
                     }
                   }
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   {
          -          org.apache.thrift.protocol.TList _list275 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +          org.apache.thrift.protocol.TList _list275 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                     struct.tables = new java.util.ArrayList(_list275.size);
          -          @org.apache.thrift.annotation.Nullable TTableName _elem276;
          -          for (int _i277 = 0; _i277 < _list275.size; ++_i277)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          TTableName _elem276;
          +          for (int _i277 = 0; _i277 < _list275.size; ++_i277) {
                       _elem276 = new TTableName();
                       _elem276.read(iprot);
                       struct.tables.add(_elem276);
          @@ -31419,29 +34195,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors_
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableDescriptors_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableDescriptors_result");
          +  public static class getTableDescriptors_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableDescriptors_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableDescriptors_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableDescriptors_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableDescriptors_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableDescriptors_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -31454,7 +34245,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -31465,12 +34256,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -31502,23 +34293,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableDescriptor.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TTableDescriptor.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableDescriptors_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableDescriptors_result.class, metaDataMap);
               }
           
               public getTableDescriptors_result() {
               }
           
          -    public getTableDescriptors_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public getTableDescriptors_result(java.util.List success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -31529,7 +34325,8 @@ public getTableDescriptors_result(
                */
               public getTableDescriptors_result(getTableDescriptors_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TTableDescriptor other_element : other.success) {
                     __this__success.add(new TTableDescriptor(other_element));
                   }
          @@ -31571,7 +34368,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getTableDescriptors_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getTableDescriptors_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -31616,23 +34414,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -31640,27 +34439,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -31668,32 +34470,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableDescriptors_result)
          -        return this.equals((getTableDescriptors_result)that);
          +        return this.equals((getTableDescriptors_result) that);
                 return false;
               }
           
               public boolean equals(getTableDescriptors_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -31704,12 +34500,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -31750,13 +34544,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -31789,35 +34585,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableDescriptors_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptors_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptors_resultStandardScheme getScheme() {
                   return new getTableDescriptors_resultStandardScheme();
                 }
               }
           
          -    private static class getTableDescriptors_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableDescriptors_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getTableDescriptors_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -31826,9 +34627,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                           {
                             org.apache.thrift.protocol.TList _list278 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list278.size);
          -                  @org.apache.thrift.annotation.Nullable TTableDescriptor _elem279;
          -                  for (int _i280 = 0; _i280 < _list278.size; ++_i280)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TTableDescriptor _elem279;
          +                  for (int _i280 = 0; _i280 < _list278.size; ++_i280) {
                               _elem279 = new TTableDescriptor();
                               _elem279.read(iprot);
                               struct.success.add(_elem279);
          @@ -31836,7 +34637,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -31845,7 +34646,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -31856,20 +34657,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptors_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableDescriptors_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TTableDescriptor _iter281 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TTableDescriptor _iter281 : struct.success) {
                         _iter281.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -31887,17 +34690,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptor
           
               }
           
          -    private static class getTableDescriptors_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptors_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptors_resultTupleScheme getScheme() {
                   return new getTableDescriptors_resultTupleScheme();
                 }
               }
           
          -    private static class getTableDescriptors_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableDescriptors_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getTableDescriptors_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -31909,8 +34716,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TTableDescriptor _iter282 : struct.success)
          -            {
          +            for (TTableDescriptor _iter282 : struct.success) {
                         _iter282.write(oprot);
                       }
                     }
          @@ -31921,16 +34727,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list283 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list283 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list283.size);
          -            @org.apache.thrift.annotation.Nullable TTableDescriptor _elem284;
          -            for (int _i285 = 0; _i285 < _list283.size; ++_i285)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TTableDescriptor _elem284;
          +            for (int _i285 = 0; _i285 < _list283.size; ++_i285) {
                         _elem284 = new TTableDescriptor();
                         _elem284.read(iprot);
                         struct.success.add(_elem284);
          @@ -31946,32 +34755,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors_
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class tableExists_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("tableExists_args");
          +  public static class tableExists_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("tableExists_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new tableExists_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new tableExists_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new tableExists_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new tableExists_argsTupleSchemeFactory();
           
               /**
                * the tablename of the tables to check
                */
               public @org.apache.thrift.annotation.Nullable TTableName tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the tablename of the tables to check
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -31984,7 +34807,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -31993,12 +34816,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -32030,19 +34853,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(tableExists_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(tableExists_args.class,
          +        metaDataMap);
               }
           
               public tableExists_args() {
               }
           
          -    public tableExists_args(
          -      TTableName tableName)
          -    {
          +    public tableExists_args(TTableName tableName) {
                 this();
                 this.tableName = tableName;
               }
          @@ -32076,7 +34902,8 @@ public TTableName getTableName() {
               /**
                * the tablename of the tables to check
                */
          -    public tableExists_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public tableExists_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -32096,15 +34923,16 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
                 }
               }
          @@ -32112,46 +34940,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof tableExists_args)
          -        return this.equals((tableExists_args)that);
          +      if (that instanceof tableExists_args) return this.equals((tableExists_args) that);
                 return false;
               }
           
               public boolean equals(tableExists_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -32162,8 +34988,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -32194,11 +35019,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -32228,35 +35055,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class tableExists_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class tableExists_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public tableExists_argsStandardScheme getScheme() {
                   return new tableExists_argsStandardScheme();
                 }
               }
           
          -    private static class tableExists_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class tableExists_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, tableExists_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, tableExists_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -32265,7 +35097,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, tableExists_args st
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -32276,11 +35108,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, tableExists_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, tableExists_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, tableExists_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -32295,17 +35129,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, tableExists_args s
           
               }
           
          -    private static class tableExists_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class tableExists_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public tableExists_argsTupleScheme getScheme() {
                   return new tableExists_argsTupleScheme();
                 }
               }
           
          -    private static class tableExists_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class tableExists_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, tableExists_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, tableExists_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetTableName()) {
                     optionals.set(0);
          @@ -32317,8 +35155,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, tableExists_args st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, tableExists_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, tableExists_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.tableName = new TTableName();
          @@ -32328,29 +35168,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, tableExists_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class tableExists_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("tableExists_result");
          +  public static class tableExists_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("tableExists_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new tableExists_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new tableExists_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new tableExists_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new tableExists_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -32363,7 +35218,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -32374,12 +35229,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -32413,22 +35268,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(tableExists_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(tableExists_result.class,
          +        metaDataMap);
               }
           
               public tableExists_result() {
               }
           
          -    public tableExists_result(
          -      boolean success,
          -      TIOError io)
          -    {
          +    public tableExists_result(boolean success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -32468,7 +35328,8 @@ public tableExists_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -32477,7 +35338,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -32505,23 +35367,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -32529,60 +35392,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof tableExists_result)
          -        return this.equals((tableExists_result)that);
          +      if (that instanceof tableExists_result) return this.equals((tableExists_result) that);
                 return false;
               }
           
               public boolean equals(tableExists_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -32595,8 +35454,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -32637,13 +35495,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -32672,37 +35532,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class tableExists_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class tableExists_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public tableExists_resultStandardScheme getScheme() {
                   return new tableExists_resultStandardScheme();
                 }
               }
           
          -    private static class tableExists_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class tableExists_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, tableExists_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, tableExists_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -32710,7 +35576,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, tableExists_result
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -32719,7 +35585,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, tableExists_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -32730,11 +35596,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, tableExists_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, tableExists_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, tableExists_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -32754,17 +35622,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, tableExists_result
           
               }
           
          -    private static class tableExists_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class tableExists_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public tableExists_resultTupleScheme getScheme() {
                   return new tableExists_resultTupleScheme();
                 }
               }
           
          -    private static class tableExists_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class tableExists_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, tableExists_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, tableExists_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -32782,8 +35654,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, tableExists_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, tableExists_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, tableExists_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -32797,19 +35671,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, tableExists_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableDescriptorsByPattern_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableDescriptorsByPattern_args");
          +  public static class getTableDescriptorsByPattern_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableDescriptorsByPattern_args");
           
          -    private static final org.apache.thrift.protocol.TField REGEX_FIELD_DESC = new org.apache.thrift.protocol.TField("regex", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField INCLUDE_SYS_TABLES_FIELD_DESC = new org.apache.thrift.protocol.TField("includeSysTables", org.apache.thrift.protocol.TType.BOOL, (short)2);
          +    private static final org.apache.thrift.protocol.TField REGEX_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("regex", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField INCLUDE_SYS_TABLES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("includeSysTables",
          +            org.apache.thrift.protocol.TType.BOOL, (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableDescriptorsByPattern_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableDescriptorsByPattern_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableDescriptorsByPattern_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableDescriptorsByPattern_argsTupleSchemeFactory();
           
               /**
                * The regular expression to match against
          @@ -32820,18 +35706,22 @@ public static class getTableDescriptorsByPattern_args implements org.apache.thri
                */
               public boolean includeSysTables; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * The regular expression to match against
                  */
          -      REGEX((short)1, "regex"),
          +      REGEX((short) 1, "regex"),
                 /**
                  * set to false if match only against userspace tables
                  */
          -      INCLUDE_SYS_TABLES((short)2, "includeSysTables");
          +      INCLUDE_SYS_TABLES((short) 2, "includeSysTables");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -32844,7 +35734,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // REGEX
                       return REGEX;
                     case 2: // INCLUDE_SYS_TABLES
          @@ -32855,12 +35745,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -32894,22 +35784,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.REGEX, new org.apache.thrift.meta_data.FieldMetaData("regex", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -      tmpMap.put(_Fields.INCLUDE_SYS_TABLES, new org.apache.thrift.meta_data.FieldMetaData("includeSysTables", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.REGEX,
          +        new org.apache.thrift.meta_data.FieldMetaData("regex",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING)));
          +      tmpMap.put(_Fields.INCLUDE_SYS_TABLES,
          +        new org.apache.thrift.meta_data.FieldMetaData("includeSysTables",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableDescriptorsByPattern_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableDescriptorsByPattern_args.class, metaDataMap);
               }
           
               public getTableDescriptorsByPattern_args() {
               }
           
          -    public getTableDescriptorsByPattern_args(
          -      java.lang.String regex,
          -      boolean includeSysTables)
          -    {
          +    public getTableDescriptorsByPattern_args(java.lang.String regex, boolean includeSysTables) {
                 this();
                 this.regex = regex;
                 this.includeSysTables = includeSysTables;
          @@ -32949,7 +35844,8 @@ public java.lang.String getRegex() {
               /**
                * The regular expression to match against
                */
          -    public getTableDescriptorsByPattern_args setRegex(@org.apache.thrift.annotation.Nullable java.lang.String regex) {
          +    public getTableDescriptorsByPattern_args
          +        setRegex(@org.apache.thrift.annotation.Nullable java.lang.String regex) {
                 this.regex = regex;
                 return this;
               }
          @@ -32986,35 +35882,40 @@ public getTableDescriptorsByPattern_args setIncludeSysTables(boolean includeSysT
               }
           
               public void unsetIncludeSysTables() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __INCLUDESYSTABLES_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __INCLUDESYSTABLES_ISSET_ID);
               }
           
          -    /** Returns true if field includeSysTables is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field includeSysTables is set (has been assigned a value) and false otherwise
          +     */
               public boolean isSetIncludeSysTables() {
                 return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __INCLUDESYSTABLES_ISSET_ID);
               }
           
               public void setIncludeSysTablesIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __INCLUDESYSTABLES_ISSET_ID, value);
          +      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +        __INCLUDESYSTABLES_ISSET_ID, value);
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case REGEX:
          -        if (value == null) {
          -          unsetRegex();
          -        } else {
          -          setRegex((java.lang.String)value);
          -        }
          -        break;
          +        case REGEX:
          +          if (value == null) {
          +            unsetRegex();
          +          } else {
          +            setRegex((java.lang.String) value);
          +          }
          +          break;
           
          -      case INCLUDE_SYS_TABLES:
          -        if (value == null) {
          -          unsetIncludeSysTables();
          -        } else {
          -          setIncludeSysTables((java.lang.Boolean)value);
          -        }
          -        break;
          +        case INCLUDE_SYS_TABLES:
          +          if (value == null) {
          +            unsetIncludeSysTables();
          +          } else {
          +            setIncludeSysTables((java.lang.Boolean) value);
          +          }
          +          break;
           
                 }
               }
          @@ -33022,27 +35923,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case REGEX:
          -        return getRegex();
          +        case REGEX:
          +          return getRegex();
           
          -      case INCLUDE_SYS_TABLES:
          -        return isIncludeSysTables();
          +        case INCLUDE_SYS_TABLES:
          +          return isIncludeSysTables();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case REGEX:
          -        return isSetRegex();
          -      case INCLUDE_SYS_TABLES:
          -        return isSetIncludeSysTables();
          +        case REGEX:
          +          return isSetRegex();
          +        case INCLUDE_SYS_TABLES:
          +          return isSetIncludeSysTables();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -33050,32 +35954,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableDescriptorsByPattern_args)
          -        return this.equals((getTableDescriptorsByPattern_args)that);
          +        return this.equals((getTableDescriptorsByPattern_args) that);
                 return false;
               }
           
               public boolean equals(getTableDescriptorsByPattern_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_regex = true && this.isSetRegex();
                 boolean that_present_regex = true && that.isSetRegex();
                 if (this_present_regex || that_present_regex) {
          -        if (!(this_present_regex && that_present_regex))
          -          return false;
          -        if (!this.regex.equals(that.regex))
          -          return false;
          +        if (!(this_present_regex && that_present_regex)) return false;
          +        if (!this.regex.equals(that.regex)) return false;
                 }
           
                 boolean this_present_includeSysTables = true;
                 boolean that_present_includeSysTables = true;
                 if (this_present_includeSysTables || that_present_includeSysTables) {
          -        if (!(this_present_includeSysTables && that_present_includeSysTables))
          -          return false;
          -        if (this.includeSysTables != that.includeSysTables)
          -          return false;
          +        if (!(this_present_includeSysTables && that_present_includeSysTables)) return false;
          +        if (this.includeSysTables != that.includeSysTables) return false;
                 }
           
                 return true;
          @@ -33086,8 +35984,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetRegex()) ? 131071 : 524287);
          -      if (isSetRegex())
          -        hashCode = hashCode * 8191 + regex.hashCode();
          +      if (isSetRegex()) hashCode = hashCode * 8191 + regex.hashCode();
           
                 hashCode = hashCode * 8191 + ((includeSysTables) ? 131071 : 524287);
           
          @@ -33112,12 +36009,14 @@ public int compareTo(getTableDescriptorsByPattern_args other) {
                     return lastComparison;
                   }
                 }
          -      lastComparison = java.lang.Boolean.compare(isSetIncludeSysTables(), other.isSetIncludeSysTables());
          +      lastComparison =
          +          java.lang.Boolean.compare(isSetIncludeSysTables(), other.isSetIncludeSysTables());
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
                 if (isSetIncludeSysTables()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.includeSysTables, other.includeSysTables);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.includeSysTables, other.includeSysTables);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -33130,17 +36029,20 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
               @Override
               public java.lang.String toString() {
          -      java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableDescriptorsByPattern_args(");
          +      java.lang.StringBuilder sb =
          +          new java.lang.StringBuilder("getTableDescriptorsByPattern_args(");
                 boolean first = true;
           
                 sb.append("regex:");
          @@ -33160,43 +36062,50 @@ public java.lang.String toString() {
           
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
          -      // alas, we cannot check 'includeSysTables' because it's a primitive and you chose the non-beans generator.
          +      // alas, we cannot check 'includeSysTables' because it's a primitive and you chose the
          +      // non-beans generator.
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableDescriptorsByPattern_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptorsByPattern_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptorsByPattern_argsStandardScheme getScheme() {
                   return new getTableDescriptorsByPattern_argsStandardScheme();
                 }
               }
           
          -    private static class getTableDescriptorsByPattern_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableDescriptorsByPattern_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptorsByPattern_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getTableDescriptorsByPattern_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -33204,7 +36113,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.regex = iprot.readString();
                           struct.setRegexIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -33212,7 +36121,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.includeSysTables = iprot.readBool();
                           struct.setIncludeSysTablesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -33223,14 +36132,18 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   if (!struct.isSetIncludeSysTables()) {
          -          throw new org.apache.thrift.protocol.TProtocolException("Required field 'includeSysTables' was not found in serialized data! Struct: " + toString());
          +          throw new org.apache.thrift.protocol.TProtocolException(
          +              "Required field 'includeSysTables' was not found in serialized data! Struct: "
          +                  + toString());
                   }
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptorsByPattern_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableDescriptorsByPattern_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -33248,17 +36161,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptor
           
               }
           
          -    private static class getTableDescriptorsByPattern_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptorsByPattern_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptorsByPattern_argsTupleScheme getScheme() {
                   return new getTableDescriptorsByPattern_argsTupleScheme();
                 }
               }
           
          -    private static class getTableDescriptorsByPattern_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableDescriptorsByPattern_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptorsByPattern_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getTableDescriptorsByPattern_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBool(struct.includeSysTables);
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetRegex()) {
          @@ -33271,8 +36188,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptorsByPattern_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getTableDescriptorsByPattern_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.includeSysTables = iprot.readBool();
                   struct.setIncludeSysTablesIsSet(true);
                   java.util.BitSet incoming = iprot.readBitSet(1);
          @@ -33283,29 +36202,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptorsB
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableDescriptorsByPattern_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableDescriptorsByPattern_result");
          +  public static class getTableDescriptorsByPattern_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableDescriptorsByPattern_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableDescriptorsByPattern_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableDescriptorsByPattern_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableDescriptorsByPattern_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableDescriptorsByPattern_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -33318,7 +36252,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -33329,12 +36263,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -33366,23 +36300,29 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableDescriptor.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TTableDescriptor.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableDescriptorsByPattern_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableDescriptorsByPattern_result.class, metaDataMap);
               }
           
               public getTableDescriptorsByPattern_result() {
               }
           
          -    public getTableDescriptorsByPattern_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public getTableDescriptorsByPattern_result(java.util.List success,
          +        TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -33393,7 +36333,8 @@ public getTableDescriptorsByPattern_result(
                */
               public getTableDescriptorsByPattern_result(getTableDescriptorsByPattern_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TTableDescriptor other_element : other.success) {
                     __this__success.add(new TTableDescriptor(other_element));
                   }
          @@ -33435,7 +36376,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getTableDescriptorsByPattern_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getTableDescriptorsByPattern_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -33460,7 +36402,8 @@ public TIOError getIo() {
                 return this.io;
               }
           
          -    public getTableDescriptorsByPattern_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) {
          +    public getTableDescriptorsByPattern_result
          +        setIo(@org.apache.thrift.annotation.Nullable TIOError io) {
                 this.io = io;
                 return this;
               }
          @@ -33480,23 +36423,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -33504,27 +36448,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -33532,32 +36479,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableDescriptorsByPattern_result)
          -        return this.equals((getTableDescriptorsByPattern_result)that);
          +        return this.equals((getTableDescriptorsByPattern_result) that);
                 return false;
               }
           
               public boolean equals(getTableDescriptorsByPattern_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -33568,12 +36509,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -33614,17 +36553,20 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          -      java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableDescriptorsByPattern_result(");
          +      java.lang.StringBuilder sb =
          +          new java.lang.StringBuilder("getTableDescriptorsByPattern_result(");
                 boolean first = true;
           
                 sb.append("success:");
          @@ -33653,35 +36595,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableDescriptorsByPattern_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptorsByPattern_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptorsByPattern_resultStandardScheme getScheme() {
                   return new getTableDescriptorsByPattern_resultStandardScheme();
                 }
               }
           
          -    private static class getTableDescriptorsByPattern_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableDescriptorsByPattern_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptorsByPattern_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getTableDescriptorsByPattern_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -33690,9 +36637,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                           {
                             org.apache.thrift.protocol.TList _list286 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list286.size);
          -                  @org.apache.thrift.annotation.Nullable TTableDescriptor _elem287;
          -                  for (int _i288 = 0; _i288 < _list286.size; ++_i288)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TTableDescriptor _elem287;
          +                  for (int _i288 = 0; _i288 < _list286.size; ++_i288) {
                               _elem287 = new TTableDescriptor();
                               _elem287.read(iprot);
                               struct.success.add(_elem287);
          @@ -33700,7 +36647,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -33709,7 +36656,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -33720,20 +36667,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptorsByPattern_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableDescriptorsByPattern_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TTableDescriptor _iter289 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TTableDescriptor _iter289 : struct.success) {
                         _iter289.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -33751,17 +36700,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptor
           
               }
           
          -    private static class getTableDescriptorsByPattern_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptorsByPattern_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptorsByPattern_resultTupleScheme getScheme() {
                   return new getTableDescriptorsByPattern_resultTupleScheme();
                 }
               }
           
          -    private static class getTableDescriptorsByPattern_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableDescriptorsByPattern_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptorsByPattern_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getTableDescriptorsByPattern_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -33773,8 +36726,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TTableDescriptor _iter290 : struct.success)
          -            {
          +            for (TTableDescriptor _iter290 : struct.success) {
                         _iter290.write(oprot);
                       }
                     }
          @@ -33785,16 +36737,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptorsByPattern_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getTableDescriptorsByPattern_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list291 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list291 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list291.size);
          -            @org.apache.thrift.annotation.Nullable TTableDescriptor _elem292;
          -            for (int _i293 = 0; _i293 < _list291.size; ++_i293)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TTableDescriptor _elem292;
          +            for (int _i293 = 0; _i293 < _list291.size; ++_i293) {
                         _elem292 = new TTableDescriptor();
                         _elem292.read(iprot);
                         struct.success.add(_elem292);
          @@ -33810,32 +36765,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptorsB
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableDescriptorsByNamespace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableDescriptorsByNamespace_args");
          +  public static class getTableDescriptorsByNamespace_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableDescriptorsByNamespace_args");
           
          -    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableDescriptorsByNamespace_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableDescriptorsByNamespace_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableDescriptorsByNamespace_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableDescriptorsByNamespace_argsTupleSchemeFactory();
           
               /**
                * The namesapce's name
                */
               public @org.apache.thrift.annotation.Nullable java.lang.String name; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * The namesapce's name
                  */
          -      NAME((short)1, "name");
          +      NAME((short) 1, "name");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -33848,7 +36817,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // NAME
                       return NAME;
                     default:
          @@ -33857,12 +36826,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -33894,19 +36863,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("name",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableDescriptorsByNamespace_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableDescriptorsByNamespace_args.class, metaDataMap);
               }
           
               public getTableDescriptorsByNamespace_args() {
               }
           
          -    public getTableDescriptorsByNamespace_args(
          -      java.lang.String name)
          -    {
          +    public getTableDescriptorsByNamespace_args(java.lang.String name) {
                 this();
                 this.name = name;
               }
          @@ -33940,7 +36912,8 @@ public java.lang.String getName() {
               /**
                * The namesapce's name
                */
          -    public getTableDescriptorsByNamespace_args setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
          +    public getTableDescriptorsByNamespace_args
          +        setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
                 this.name = name;
                 return this;
               }
          @@ -33960,15 +36933,16 @@ public void setNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case NAME:
          -        if (value == null) {
          -          unsetName();
          -        } else {
          -          setName((java.lang.String)value);
          -        }
          -        break;
          +        case NAME:
          +          if (value == null) {
          +            unsetName();
          +          } else {
          +            setName((java.lang.String) value);
          +          }
          +          break;
           
                 }
               }
          @@ -33976,22 +36950,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case NAME:
          -        return getName();
          +        case NAME:
          +          return getName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case NAME:
          -        return isSetName();
          +        case NAME:
          +          return isSetName();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -33999,23 +36976,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableDescriptorsByNamespace_args)
          -        return this.equals((getTableDescriptorsByNamespace_args)that);
          +        return this.equals((getTableDescriptorsByNamespace_args) that);
                 return false;
               }
           
               public boolean equals(getTableDescriptorsByNamespace_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_name = true && this.isSetName();
                 boolean that_present_name = true && that.isSetName();
                 if (this_present_name || that_present_name) {
          -        if (!(this_present_name && that_present_name))
          -          return false;
          -        if (!this.name.equals(that.name))
          -          return false;
          +        if (!(this_present_name && that_present_name)) return false;
          +        if (!this.name.equals(that.name)) return false;
                 }
           
                 return true;
          @@ -34026,8 +36999,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287);
          -      if (isSetName())
          -        hashCode = hashCode * 8191 + name.hashCode();
          +      if (isSetName()) hashCode = hashCode * 8191 + name.hashCode();
           
                 return hashCode;
               }
          @@ -34058,17 +37030,20 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
               @Override
               public java.lang.String toString() {
          -      java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableDescriptorsByNamespace_args(");
          +      java.lang.StringBuilder sb =
          +          new java.lang.StringBuilder("getTableDescriptorsByNamespace_args(");
                 boolean first = true;
           
                 sb.append("name:");
          @@ -34085,42 +37060,48 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (name == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'name' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableDescriptorsByNamespace_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptorsByNamespace_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptorsByNamespace_argsStandardScheme getScheme() {
                   return new getTableDescriptorsByNamespace_argsStandardScheme();
                 }
               }
           
          -    private static class getTableDescriptorsByNamespace_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableDescriptorsByNamespace_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptorsByNamespace_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getTableDescriptorsByNamespace_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -34128,7 +37109,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.name = iprot.readString();
                           struct.setNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -34139,11 +37120,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptorsByNamespace_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableDescriptorsByNamespace_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -34158,51 +37141,72 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptor
           
               }
           
          -    private static class getTableDescriptorsByNamespace_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptorsByNamespace_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptorsByNamespace_argsTupleScheme getScheme() {
                   return new getTableDescriptorsByNamespace_argsTupleScheme();
                 }
               }
           
          -    private static class getTableDescriptorsByNamespace_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableDescriptorsByNamespace_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptorsByNamespace_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getTableDescriptorsByNamespace_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeString(struct.name);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptorsByNamespace_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getTableDescriptorsByNamespace_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.name = iprot.readString();
                   struct.setNameIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableDescriptorsByNamespace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableDescriptorsByNamespace_result");
          +  public static class getTableDescriptorsByNamespace_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableDescriptorsByNamespace_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableDescriptorsByNamespace_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableDescriptorsByNamespace_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableDescriptorsByNamespace_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableDescriptorsByNamespace_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -34215,7 +37219,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -34226,12 +37230,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -34263,23 +37267,29 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableDescriptor.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TTableDescriptor.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableDescriptorsByNamespace_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableDescriptorsByNamespace_result.class, metaDataMap);
               }
           
               public getTableDescriptorsByNamespace_result() {
               }
           
          -    public getTableDescriptorsByNamespace_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public getTableDescriptorsByNamespace_result(java.util.List success,
          +        TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -34290,7 +37300,8 @@ public getTableDescriptorsByNamespace_result(
                */
               public getTableDescriptorsByNamespace_result(getTableDescriptorsByNamespace_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TTableDescriptor other_element : other.success) {
                     __this__success.add(new TTableDescriptor(other_element));
                   }
          @@ -34332,7 +37343,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getTableDescriptorsByNamespace_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getTableDescriptorsByNamespace_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -34357,7 +37369,8 @@ public TIOError getIo() {
                 return this.io;
               }
           
          -    public getTableDescriptorsByNamespace_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) {
          +    public getTableDescriptorsByNamespace_result
          +        setIo(@org.apache.thrift.annotation.Nullable TIOError io) {
                 this.io = io;
                 return this;
               }
          @@ -34377,23 +37390,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -34401,27 +37415,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -34429,32 +37446,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableDescriptorsByNamespace_result)
          -        return this.equals((getTableDescriptorsByNamespace_result)that);
          +        return this.equals((getTableDescriptorsByNamespace_result) that);
                 return false;
               }
           
               public boolean equals(getTableDescriptorsByNamespace_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -34465,12 +37476,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -34511,17 +37520,20 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          -      java.lang.StringBuilder sb = new java.lang.StringBuilder("getTableDescriptorsByNamespace_result(");
          +      java.lang.StringBuilder sb =
          +          new java.lang.StringBuilder("getTableDescriptorsByNamespace_result(");
                 boolean first = true;
           
                 sb.append("success:");
          @@ -34550,35 +37562,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableDescriptorsByNamespace_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptorsByNamespace_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptorsByNamespace_resultStandardScheme getScheme() {
                   return new getTableDescriptorsByNamespace_resultStandardScheme();
                 }
               }
           
          -    private static class getTableDescriptorsByNamespace_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableDescriptorsByNamespace_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptorsByNamespace_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getTableDescriptorsByNamespace_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -34587,9 +37604,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                           {
                             org.apache.thrift.protocol.TList _list294 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list294.size);
          -                  @org.apache.thrift.annotation.Nullable TTableDescriptor _elem295;
          -                  for (int _i296 = 0; _i296 < _list294.size; ++_i296)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TTableDescriptor _elem295;
          +                  for (int _i296 = 0; _i296 < _list294.size; ++_i296) {
                               _elem295 = new TTableDescriptor();
                               _elem295.read(iprot);
                               struct.success.add(_elem295);
          @@ -34597,7 +37614,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -34606,7 +37623,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -34617,20 +37634,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableDescriptors
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptorsByNamespace_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableDescriptorsByNamespace_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TTableDescriptor _iter297 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TTableDescriptor _iter297 : struct.success) {
                         _iter297.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -34648,17 +37667,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableDescriptor
           
               }
           
          -    private static class getTableDescriptorsByNamespace_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableDescriptorsByNamespace_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableDescriptorsByNamespace_resultTupleScheme getScheme() {
                   return new getTableDescriptorsByNamespace_resultTupleScheme();
                 }
               }
           
          -    private static class getTableDescriptorsByNamespace_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableDescriptorsByNamespace_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptorsByNamespace_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getTableDescriptorsByNamespace_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -34670,8 +37693,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TTableDescriptor _iter298 : struct.success)
          -            {
          +            for (TTableDescriptor _iter298 : struct.success) {
                         _iter298.write(oprot);
                       }
                     }
          @@ -34682,16 +37704,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableDescriptors
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptorsByNamespace_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getTableDescriptorsByNamespace_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list299 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list299 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list299.size);
          -            @org.apache.thrift.annotation.Nullable TTableDescriptor _elem300;
          -            for (int _i301 = 0; _i301 < _list299.size; ++_i301)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TTableDescriptor _elem300;
          +            for (int _i301 = 0; _i301 < _list299.size; ++_i301) {
                         _elem300 = new TTableDescriptor();
                         _elem300.read(iprot);
                         struct.success.add(_elem300);
          @@ -34707,19 +37732,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableDescriptorsB
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableNamesByPattern_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNamesByPattern_args");
          +  public static class getTableNamesByPattern_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableNamesByPattern_args");
           
          -    private static final org.apache.thrift.protocol.TField REGEX_FIELD_DESC = new org.apache.thrift.protocol.TField("regex", org.apache.thrift.protocol.TType.STRING, (short)1);
          -    private static final org.apache.thrift.protocol.TField INCLUDE_SYS_TABLES_FIELD_DESC = new org.apache.thrift.protocol.TField("includeSysTables", org.apache.thrift.protocol.TType.BOOL, (short)2);
          +    private static final org.apache.thrift.protocol.TField REGEX_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("regex", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField INCLUDE_SYS_TABLES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("includeSysTables",
          +            org.apache.thrift.protocol.TType.BOOL, (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNamesByPattern_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNamesByPattern_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableNamesByPattern_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableNamesByPattern_argsTupleSchemeFactory();
           
               /**
                * The regular expression to match against
          @@ -34730,18 +37767,22 @@ public static class getTableNamesByPattern_args implements org.apache.thrift.TBa
                */
               public boolean includeSysTables; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * The regular expression to match against
                  */
          -      REGEX((short)1, "regex"),
          +      REGEX((short) 1, "regex"),
                 /**
                  * set to false if match only against userspace tables
                  */
          -      INCLUDE_SYS_TABLES((short)2, "includeSysTables");
          +      INCLUDE_SYS_TABLES((short) 2, "includeSysTables");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -34754,7 +37795,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // REGEX
                       return REGEX;
                     case 2: // INCLUDE_SYS_TABLES
          @@ -34765,12 +37806,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -34804,22 +37845,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.REGEX, new org.apache.thrift.meta_data.FieldMetaData("regex", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -      tmpMap.put(_Fields.INCLUDE_SYS_TABLES, new org.apache.thrift.meta_data.FieldMetaData("includeSysTables", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.REGEX,
          +        new org.apache.thrift.meta_data.FieldMetaData("regex",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING)));
          +      tmpMap.put(_Fields.INCLUDE_SYS_TABLES,
          +        new org.apache.thrift.meta_data.FieldMetaData("includeSysTables",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNamesByPattern_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableNamesByPattern_args.class, metaDataMap);
               }
           
               public getTableNamesByPattern_args() {
               }
           
          -    public getTableNamesByPattern_args(
          -      java.lang.String regex,
          -      boolean includeSysTables)
          -    {
          +    public getTableNamesByPattern_args(java.lang.String regex, boolean includeSysTables) {
                 this();
                 this.regex = regex;
                 this.includeSysTables = includeSysTables;
          @@ -34859,7 +37905,8 @@ public java.lang.String getRegex() {
               /**
                * The regular expression to match against
                */
          -    public getTableNamesByPattern_args setRegex(@org.apache.thrift.annotation.Nullable java.lang.String regex) {
          +    public getTableNamesByPattern_args
          +        setRegex(@org.apache.thrift.annotation.Nullable java.lang.String regex) {
                 this.regex = regex;
                 return this;
               }
          @@ -34896,35 +37943,40 @@ public getTableNamesByPattern_args setIncludeSysTables(boolean includeSysTables)
               }
           
               public void unsetIncludeSysTables() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __INCLUDESYSTABLES_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __INCLUDESYSTABLES_ISSET_ID);
               }
           
          -    /** Returns true if field includeSysTables is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field includeSysTables is set (has been assigned a value) and false otherwise
          +     */
               public boolean isSetIncludeSysTables() {
                 return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __INCLUDESYSTABLES_ISSET_ID);
               }
           
               public void setIncludeSysTablesIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __INCLUDESYSTABLES_ISSET_ID, value);
          +      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +        __INCLUDESYSTABLES_ISSET_ID, value);
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case REGEX:
          -        if (value == null) {
          -          unsetRegex();
          -        } else {
          -          setRegex((java.lang.String)value);
          -        }
          -        break;
          +        case REGEX:
          +          if (value == null) {
          +            unsetRegex();
          +          } else {
          +            setRegex((java.lang.String) value);
          +          }
          +          break;
           
          -      case INCLUDE_SYS_TABLES:
          -        if (value == null) {
          -          unsetIncludeSysTables();
          -        } else {
          -          setIncludeSysTables((java.lang.Boolean)value);
          -        }
          -        break;
          +        case INCLUDE_SYS_TABLES:
          +          if (value == null) {
          +            unsetIncludeSysTables();
          +          } else {
          +            setIncludeSysTables((java.lang.Boolean) value);
          +          }
          +          break;
           
                 }
               }
          @@ -34932,27 +37984,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case REGEX:
          -        return getRegex();
          +        case REGEX:
          +          return getRegex();
           
          -      case INCLUDE_SYS_TABLES:
          -        return isIncludeSysTables();
          +        case INCLUDE_SYS_TABLES:
          +          return isIncludeSysTables();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case REGEX:
          -        return isSetRegex();
          -      case INCLUDE_SYS_TABLES:
          -        return isSetIncludeSysTables();
          +        case REGEX:
          +          return isSetRegex();
          +        case INCLUDE_SYS_TABLES:
          +          return isSetIncludeSysTables();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -34960,32 +38015,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableNamesByPattern_args)
          -        return this.equals((getTableNamesByPattern_args)that);
          +        return this.equals((getTableNamesByPattern_args) that);
                 return false;
               }
           
               public boolean equals(getTableNamesByPattern_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_regex = true && this.isSetRegex();
                 boolean that_present_regex = true && that.isSetRegex();
                 if (this_present_regex || that_present_regex) {
          -        if (!(this_present_regex && that_present_regex))
          -          return false;
          -        if (!this.regex.equals(that.regex))
          -          return false;
          +        if (!(this_present_regex && that_present_regex)) return false;
          +        if (!this.regex.equals(that.regex)) return false;
                 }
           
                 boolean this_present_includeSysTables = true;
                 boolean that_present_includeSysTables = true;
                 if (this_present_includeSysTables || that_present_includeSysTables) {
          -        if (!(this_present_includeSysTables && that_present_includeSysTables))
          -          return false;
          -        if (this.includeSysTables != that.includeSysTables)
          -          return false;
          +        if (!(this_present_includeSysTables && that_present_includeSysTables)) return false;
          +        if (this.includeSysTables != that.includeSysTables) return false;
                 }
           
                 return true;
          @@ -34996,8 +38045,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetRegex()) ? 131071 : 524287);
          -      if (isSetRegex())
          -        hashCode = hashCode * 8191 + regex.hashCode();
          +      if (isSetRegex()) hashCode = hashCode * 8191 + regex.hashCode();
           
                 hashCode = hashCode * 8191 + ((includeSysTables) ? 131071 : 524287);
           
          @@ -35022,12 +38070,14 @@ public int compareTo(getTableNamesByPattern_args other) {
                     return lastComparison;
                   }
                 }
          -      lastComparison = java.lang.Boolean.compare(isSetIncludeSysTables(), other.isSetIncludeSysTables());
          +      lastComparison =
          +          java.lang.Boolean.compare(isSetIncludeSysTables(), other.isSetIncludeSysTables());
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
                 if (isSetIncludeSysTables()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.includeSysTables, other.includeSysTables);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.includeSysTables, other.includeSysTables);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -35040,11 +38090,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -35070,43 +38122,50 @@ public java.lang.String toString() {
           
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
          -      // alas, we cannot check 'includeSysTables' because it's a primitive and you chose the non-beans generator.
          +      // alas, we cannot check 'includeSysTables' because it's a primitive and you chose the
          +      // non-beans generator.
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableNamesByPattern_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesByPattern_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesByPattern_argsStandardScheme getScheme() {
                   return new getTableNamesByPattern_argsStandardScheme();
                 }
               }
           
          -    private static class getTableNamesByPattern_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableNamesByPattern_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByPattern_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getTableNamesByPattern_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -35114,7 +38173,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByPatt
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.regex = iprot.readString();
                           struct.setRegexIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -35122,7 +38181,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByPatt
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.includeSysTables = iprot.readBool();
                           struct.setIncludeSysTablesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -35133,14 +38192,18 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByPatt
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   if (!struct.isSetIncludeSysTables()) {
          -          throw new org.apache.thrift.protocol.TProtocolException("Required field 'includeSysTables' was not found in serialized data! Struct: " + toString());
          +          throw new org.apache.thrift.protocol.TProtocolException(
          +              "Required field 'includeSysTables' was not found in serialized data! Struct: "
          +                  + toString());
                   }
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesByPattern_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableNamesByPattern_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -35158,17 +38221,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesByPat
           
               }
           
          -    private static class getTableNamesByPattern_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesByPattern_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesByPattern_argsTupleScheme getScheme() {
                   return new getTableNamesByPattern_argsTupleScheme();
                 }
               }
           
          -    private static class getTableNamesByPattern_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableNamesByPattern_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesByPattern_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesByPattern_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeBool(struct.includeSysTables);
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetRegex()) {
          @@ -35181,8 +38248,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesByPatt
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesByPattern_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesByPattern_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.includeSysTables = iprot.readBool();
                   struct.setIncludeSysTablesIsSet(true);
                   java.util.BitSet incoming = iprot.readBitSet(1);
          @@ -35193,29 +38262,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesByPatte
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableNamesByPattern_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNamesByPattern_result");
          +  public static class getTableNamesByPattern_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableNamesByPattern_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNamesByPattern_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNamesByPattern_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableNamesByPattern_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableNamesByPattern_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -35228,7 +38312,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -35239,12 +38323,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -35276,23 +38360,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TTableName.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNamesByPattern_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableNamesByPattern_result.class, metaDataMap);
               }
           
               public getTableNamesByPattern_result() {
               }
           
          -    public getTableNamesByPattern_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public getTableNamesByPattern_result(java.util.List success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -35303,7 +38392,8 @@ public getTableNamesByPattern_result(
                */
               public getTableNamesByPattern_result(getTableNamesByPattern_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TTableName other_element : other.success) {
                     __this__success.add(new TTableName(other_element));
                   }
          @@ -35345,7 +38435,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getTableNamesByPattern_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getTableNamesByPattern_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -35390,23 +38481,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -35414,27 +38506,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -35442,32 +38537,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableNamesByPattern_result)
          -        return this.equals((getTableNamesByPattern_result)that);
          +        return this.equals((getTableNamesByPattern_result) that);
                 return false;
               }
           
               public boolean equals(getTableNamesByPattern_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -35478,12 +38567,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -35524,13 +38611,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -35563,35 +38652,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableNamesByPattern_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesByPattern_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesByPattern_resultStandardScheme getScheme() {
                   return new getTableNamesByPattern_resultStandardScheme();
                 }
               }
           
          -    private static class getTableNamesByPattern_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableNamesByPattern_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByPattern_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getTableNamesByPattern_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -35600,9 +38694,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByPatt
                           {
                             org.apache.thrift.protocol.TList _list302 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list302.size);
          -                  @org.apache.thrift.annotation.Nullable TTableName _elem303;
          -                  for (int _i304 = 0; _i304 < _list302.size; ++_i304)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TTableName _elem303;
          +                  for (int _i304 = 0; _i304 < _list302.size; ++_i304) {
                               _elem303 = new TTableName();
                               _elem303.read(iprot);
                               struct.success.add(_elem303);
          @@ -35610,7 +38704,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByPatt
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -35619,7 +38713,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByPatt
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -35630,20 +38724,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByPatt
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesByPattern_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableNamesByPattern_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TTableName _iter305 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TTableName _iter305 : struct.success) {
                         _iter305.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -35661,17 +38757,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesByPat
           
               }
           
          -    private static class getTableNamesByPattern_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesByPattern_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesByPattern_resultTupleScheme getScheme() {
                   return new getTableNamesByPattern_resultTupleScheme();
                 }
               }
           
          -    private static class getTableNamesByPattern_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableNamesByPattern_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesByPattern_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesByPattern_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -35683,8 +38783,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesByPatt
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TTableName _iter306 : struct.success)
          -            {
          +            for (TTableName _iter306 : struct.success) {
                         _iter306.write(oprot);
                       }
                     }
          @@ -35695,16 +38794,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesByPatt
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesByPattern_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesByPattern_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list307 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list307 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list307.size);
          -            @org.apache.thrift.annotation.Nullable TTableName _elem308;
          -            for (int _i309 = 0; _i309 < _list307.size; ++_i309)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TTableName _elem308;
          +            for (int _i309 = 0; _i309 < _list307.size; ++_i309) {
                         _elem308 = new TTableName();
                         _elem308.read(iprot);
                         struct.success.add(_elem308);
          @@ -35720,32 +38822,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesByPatte
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableNamesByNamespace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNamesByNamespace_args");
          +  public static class getTableNamesByNamespace_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableNamesByNamespace_args");
           
          -    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNamesByNamespace_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNamesByNamespace_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableNamesByNamespace_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableNamesByNamespace_argsTupleSchemeFactory();
           
               /**
                * The namesapce's name
                */
               public @org.apache.thrift.annotation.Nullable java.lang.String name; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * The namesapce's name
                  */
          -      NAME((short)1, "name");
          +      NAME((short) 1, "name");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -35758,7 +38874,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // NAME
                       return NAME;
                     default:
          @@ -35767,12 +38883,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -35804,19 +38920,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("name",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNamesByNamespace_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableNamesByNamespace_args.class, metaDataMap);
               }
           
               public getTableNamesByNamespace_args() {
               }
           
          -    public getTableNamesByNamespace_args(
          -      java.lang.String name)
          -    {
          +    public getTableNamesByNamespace_args(java.lang.String name) {
                 this();
                 this.name = name;
               }
          @@ -35850,7 +38969,8 @@ public java.lang.String getName() {
               /**
                * The namesapce's name
                */
          -    public getTableNamesByNamespace_args setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
          +    public getTableNamesByNamespace_args
          +        setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
                 this.name = name;
                 return this;
               }
          @@ -35870,15 +38990,16 @@ public void setNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case NAME:
          -        if (value == null) {
          -          unsetName();
          -        } else {
          -          setName((java.lang.String)value);
          -        }
          -        break;
          +        case NAME:
          +          if (value == null) {
          +            unsetName();
          +          } else {
          +            setName((java.lang.String) value);
          +          }
          +          break;
           
                 }
               }
          @@ -35886,22 +39007,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case NAME:
          -        return getName();
          +        case NAME:
          +          return getName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case NAME:
          -        return isSetName();
          +        case NAME:
          +          return isSetName();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -35909,23 +39033,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableNamesByNamespace_args)
          -        return this.equals((getTableNamesByNamespace_args)that);
          +        return this.equals((getTableNamesByNamespace_args) that);
                 return false;
               }
           
               public boolean equals(getTableNamesByNamespace_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_name = true && this.isSetName();
                 boolean that_present_name = true && that.isSetName();
                 if (this_present_name || that_present_name) {
          -        if (!(this_present_name && that_present_name))
          -          return false;
          -        if (!this.name.equals(that.name))
          -          return false;
          +        if (!(this_present_name && that_present_name)) return false;
          +        if (!this.name.equals(that.name)) return false;
                 }
           
                 return true;
          @@ -35936,8 +39056,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287);
          -      if (isSetName())
          -        hashCode = hashCode * 8191 + name.hashCode();
          +      if (isSetName()) hashCode = hashCode * 8191 + name.hashCode();
           
                 return hashCode;
               }
          @@ -35968,11 +39087,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -35995,42 +39116,48 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (name == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'name' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableNamesByNamespace_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesByNamespace_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesByNamespace_argsStandardScheme getScheme() {
                   return new getTableNamesByNamespace_argsStandardScheme();
                 }
               }
           
          -    private static class getTableNamesByNamespace_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableNamesByNamespace_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByNamespace_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getTableNamesByNamespace_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -36038,7 +39165,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByName
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.name = iprot.readString();
                           struct.setNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -36049,11 +39176,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByName
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesByNamespace_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableNamesByNamespace_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -36068,51 +39197,72 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesByNam
           
               }
           
          -    private static class getTableNamesByNamespace_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesByNamespace_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesByNamespace_argsTupleScheme getScheme() {
                   return new getTableNamesByNamespace_argsTupleScheme();
                 }
               }
           
          -    private static class getTableNamesByNamespace_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableNamesByNamespace_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesByNamespace_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesByNamespace_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeString(struct.name);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesByNamespace_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesByNamespace_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.name = iprot.readString();
                   struct.setNameIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getTableNamesByNamespace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getTableNamesByNamespace_result");
          +  public static class getTableNamesByNamespace_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getTableNamesByNamespace_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getTableNamesByNamespace_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getTableNamesByNamespace_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getTableNamesByNamespace_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getTableNamesByNamespace_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -36125,7 +39275,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -36136,12 +39286,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -36173,23 +39323,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TTableName.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getTableNamesByNamespace_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getTableNamesByNamespace_result.class, metaDataMap);
               }
           
               public getTableNamesByNamespace_result() {
               }
           
          -    public getTableNamesByNamespace_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public getTableNamesByNamespace_result(java.util.List success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -36200,7 +39355,8 @@ public getTableNamesByNamespace_result(
                */
               public getTableNamesByNamespace_result(getTableNamesByNamespace_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TTableName other_element : other.success) {
                     __this__success.add(new TTableName(other_element));
                   }
          @@ -36242,7 +39398,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getTableNamesByNamespace_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getTableNamesByNamespace_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -36267,7 +39424,8 @@ public TIOError getIo() {
                 return this.io;
               }
           
          -    public getTableNamesByNamespace_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) {
          +    public getTableNamesByNamespace_result
          +        setIo(@org.apache.thrift.annotation.Nullable TIOError io) {
                 this.io = io;
                 return this;
               }
          @@ -36287,23 +39445,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -36311,27 +39470,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -36339,32 +39501,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getTableNamesByNamespace_result)
          -        return this.equals((getTableNamesByNamespace_result)that);
          +        return this.equals((getTableNamesByNamespace_result) that);
                 return false;
               }
           
               public boolean equals(getTableNamesByNamespace_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -36375,12 +39531,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -36421,13 +39575,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -36460,35 +39616,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getTableNamesByNamespace_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesByNamespace_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesByNamespace_resultStandardScheme getScheme() {
                   return new getTableNamesByNamespace_resultStandardScheme();
                 }
               }
           
          -    private static class getTableNamesByNamespace_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getTableNamesByNamespace_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByNamespace_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getTableNamesByNamespace_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -36497,9 +39658,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByName
                           {
                             org.apache.thrift.protocol.TList _list310 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list310.size);
          -                  @org.apache.thrift.annotation.Nullable TTableName _elem311;
          -                  for (int _i312 = 0; _i312 < _list310.size; ++_i312)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TTableName _elem311;
          +                  for (int _i312 = 0; _i312 < _list310.size; ++_i312) {
                               _elem311 = new TTableName();
                               _elem311.read(iprot);
                               struct.success.add(_elem311);
          @@ -36507,7 +39668,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByName
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -36516,7 +39677,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByName
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -36527,20 +39688,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getTableNamesByName
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesByNamespace_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getTableNamesByNamespace_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TTableName _iter313 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TTableName _iter313 : struct.success) {
                         _iter313.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -36558,17 +39721,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getTableNamesByNam
           
               }
           
          -    private static class getTableNamesByNamespace_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getTableNamesByNamespace_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getTableNamesByNamespace_resultTupleScheme getScheme() {
                   return new getTableNamesByNamespace_resultTupleScheme();
                 }
               }
           
          -    private static class getTableNamesByNamespace_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getTableNamesByNamespace_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesByNamespace_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesByNamespace_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -36580,8 +39747,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesByName
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TTableName _iter314 : struct.success)
          -            {
          +            for (TTableName _iter314 : struct.success) {
                         _iter314.write(oprot);
                       }
                     }
          @@ -36592,16 +39758,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getTableNamesByName
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesByNamespace_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getTableNamesByNamespace_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list315 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list315 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list315.size);
          -            @org.apache.thrift.annotation.Nullable TTableName _elem316;
          -            for (int _i317 = 0; _i317 < _list315.size; ++_i317)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TTableName _elem316;
          +            for (int _i317 = 0; _i317 < _list315.size; ++_i317) {
                         _elem316 = new TTableName();
                         _elem316.read(iprot);
                         struct.success.add(_elem316);
          @@ -36617,19 +39786,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getTableNamesByNames
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class createTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("createTable_args");
          +  public static class createTable_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("createTable_args");
           
          -    private static final org.apache.thrift.protocol.TField DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("desc", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField SPLIT_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("splitKeys", org.apache.thrift.protocol.TType.LIST, (short)2);
          +    private static final org.apache.thrift.protocol.TField DESC_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("desc", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField SPLIT_KEYS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("splitKeys", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new createTable_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new createTable_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new createTable_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new createTable_argsTupleSchemeFactory();
           
               /**
                * table descriptor for table
          @@ -36640,18 +39821,22 @@ public static class createTable_args implements org.apache.thrift.TBase splitKeys; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * table descriptor for table
                  */
          -      DESC((short)1, "desc"),
          +      DESC((short) 1, "desc"),
                 /**
                  * rray of split keys for the initial regions of the table
                  */
          -      SPLIT_KEYS((short)2, "splitKeys");
          +      SPLIT_KEYS((short) 2, "splitKeys");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -36664,7 +39849,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // DESC
                       return DESC;
                     case 2: // SPLIT_KEYS
          @@ -36675,12 +39860,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -36712,23 +39897,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.DESC, new org.apache.thrift.meta_data.FieldMetaData("desc", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableDescriptor.class)));
          -      tmpMap.put(_Fields.SPLIT_KEYS, new org.apache.thrift.meta_data.FieldMetaData("splitKeys", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , true))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.DESC,
          +        new org.apache.thrift.meta_data.FieldMetaData("desc",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableDescriptor.class)));
          +      tmpMap.put(_Fields.SPLIT_KEYS,
          +        new org.apache.thrift.meta_data.FieldMetaData("splitKeys",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, true))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createTable_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createTable_args.class,
          +        metaDataMap);
               }
           
               public createTable_args() {
               }
           
          -    public createTable_args(
          -      TTableDescriptor desc,
          -      java.util.List splitKeys)
          -    {
          +    public createTable_args(TTableDescriptor desc, java.util.List splitKeys) {
                 this();
                 this.desc = desc;
                 this.splitKeys = splitKeys;
          @@ -36742,7 +39932,8 @@ public createTable_args(createTable_args other) {
                   this.desc = new TTableDescriptor(other.desc);
                 }
                 if (other.isSetSplitKeys()) {
          -        java.util.List __this__splitKeys = new java.util.ArrayList(other.splitKeys);
          +        java.util.List __this__splitKeys =
          +            new java.util.ArrayList(other.splitKeys);
                   this.splitKeys = __this__splitKeys;
                 }
               }
          @@ -36815,7 +40006,8 @@ public java.util.List getSplitKeys() {
               /**
                * rray of split keys for the initial regions of the table
                */
          -    public createTable_args setSplitKeys(@org.apache.thrift.annotation.Nullable java.util.List splitKeys) {
          +    public createTable_args setSplitKeys(
          +        @org.apache.thrift.annotation.Nullable java.util.List splitKeys) {
                 this.splitKeys = splitKeys;
                 return this;
               }
          @@ -36835,23 +40027,24 @@ public void setSplitKeysIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case DESC:
          -        if (value == null) {
          -          unsetDesc();
          -        } else {
          -          setDesc((TTableDescriptor)value);
          -        }
          -        break;
          +        case DESC:
          +          if (value == null) {
          +            unsetDesc();
          +          } else {
          +            setDesc((TTableDescriptor) value);
          +          }
          +          break;
           
          -      case SPLIT_KEYS:
          -        if (value == null) {
          -          unsetSplitKeys();
          -        } else {
          -          setSplitKeys((java.util.List)value);
          -        }
          -        break;
          +        case SPLIT_KEYS:
          +          if (value == null) {
          +            unsetSplitKeys();
          +          } else {
          +            setSplitKeys((java.util.List) value);
          +          }
          +          break;
           
                 }
               }
          @@ -36859,60 +40052,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case DESC:
          -        return getDesc();
          +        case DESC:
          +          return getDesc();
           
          -      case SPLIT_KEYS:
          -        return getSplitKeys();
          +        case SPLIT_KEYS:
          +          return getSplitKeys();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case DESC:
          -        return isSetDesc();
          -      case SPLIT_KEYS:
          -        return isSetSplitKeys();
          +        case DESC:
          +          return isSetDesc();
          +        case SPLIT_KEYS:
          +          return isSetSplitKeys();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof createTable_args)
          -        return this.equals((createTable_args)that);
          +      if (that instanceof createTable_args) return this.equals((createTable_args) that);
                 return false;
               }
           
               public boolean equals(createTable_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_desc = true && this.isSetDesc();
                 boolean that_present_desc = true && that.isSetDesc();
                 if (this_present_desc || that_present_desc) {
          -        if (!(this_present_desc && that_present_desc))
          -          return false;
          -        if (!this.desc.equals(that.desc))
          -          return false;
          +        if (!(this_present_desc && that_present_desc)) return false;
          +        if (!this.desc.equals(that.desc)) return false;
                 }
           
                 boolean this_present_splitKeys = true && this.isSetSplitKeys();
                 boolean that_present_splitKeys = true && that.isSetSplitKeys();
                 if (this_present_splitKeys || that_present_splitKeys) {
          -        if (!(this_present_splitKeys && that_present_splitKeys))
          -          return false;
          -        if (!this.splitKeys.equals(that.splitKeys))
          -          return false;
          +        if (!(this_present_splitKeys && that_present_splitKeys)) return false;
          +        if (!this.splitKeys.equals(that.splitKeys)) return false;
                 }
           
                 return true;
          @@ -36923,12 +40112,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetDesc()) ? 131071 : 524287);
          -      if (isSetDesc())
          -        hashCode = hashCode * 8191 + desc.hashCode();
          +      if (isSetDesc()) hashCode = hashCode * 8191 + desc.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetSplitKeys()) ? 131071 : 524287);
          -      if (isSetSplitKeys())
          -        hashCode = hashCode * 8191 + splitKeys.hashCode();
          +      if (isSetSplitKeys()) hashCode = hashCode * 8191 + splitKeys.hashCode();
           
                 return hashCode;
               }
          @@ -36969,11 +40156,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -37004,7 +40193,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (desc == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'desc' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'desc' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (desc != null) {
          @@ -37014,35 +40204,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class createTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createTable_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createTable_argsStandardScheme getScheme() {
                   return new createTable_argsStandardScheme();
                 }
               }
           
          -    private static class createTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class createTable_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -37051,7 +40246,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args st
                           struct.desc = new TTableDescriptor();
                           struct.desc.read(iprot);
                           struct.setDescIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -37060,16 +40255,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args st
                           {
                             org.apache.thrift.protocol.TList _list318 = iprot.readListBegin();
                             struct.splitKeys = new java.util.ArrayList(_list318.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem319;
          -                  for (int _i320 = 0; _i320 < _list318.size; ++_i320)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem319;
          +                  for (int _i320 = 0; _i320 < _list318.size; ++_i320) {
                               _elem319 = iprot.readBinary();
                               struct.splitKeys.add(_elem319);
                             }
                             iprot.readListEnd();
                           }
                           struct.setSplitKeysIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -37080,11 +40275,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -37096,9 +40293,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_args s
                   if (struct.splitKeys != null) {
                     oprot.writeFieldBegin(SPLIT_KEYS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.splitKeys.size()));
          -            for (java.nio.ByteBuffer _iter321 : struct.splitKeys)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.splitKeys.size()));
          +            for (java.nio.ByteBuffer _iter321 : struct.splitKeys) {
                         oprot.writeBinary(_iter321);
                       }
                       oprot.writeListEnd();
          @@ -37111,17 +40308,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_args s
           
               }
           
          -    private static class createTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createTable_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createTable_argsTupleScheme getScheme() {
                   return new createTable_argsTupleScheme();
                 }
               }
           
          -    private static class createTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class createTable_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, createTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, createTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.desc.write(oprot);
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSplitKeys()) {
          @@ -37131,8 +40332,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, createTable_args st
                   if (struct.isSetSplitKeys()) {
                     {
                       oprot.writeI32(struct.splitKeys.size());
          -            for (java.nio.ByteBuffer _iter322 : struct.splitKeys)
          -            {
          +            for (java.nio.ByteBuffer _iter322 : struct.splitKeys) {
                         oprot.writeBinary(_iter322);
                       }
                     }
          @@ -37140,19 +40340,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, createTable_args st
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, createTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, createTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.desc = new TTableDescriptor();
                   struct.desc.read(iprot);
                   struct.setDescIsSet(true);
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list323 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list323 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.splitKeys = new java.util.ArrayList(_list323.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem324;
          -            for (int _i325 = 0; _i325 < _list323.size; ++_i325)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem324;
          +            for (int _i325 = 0; _i325 < _list323.size; ++_i325) {
                         _elem324 = iprot.readBinary();
                         struct.splitKeys.add(_elem324);
                       }
          @@ -37162,26 +40365,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, createTable_args str
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class createTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("createTable_result");
          +  public static class createTable_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("createTable_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new createTable_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new createTable_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new createTable_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new createTable_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -37194,7 +40411,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -37203,12 +40420,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -37240,19 +40457,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createTable_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createTable_result.class,
          +        metaDataMap);
               }
           
               public createTable_result() {
               }
           
          -    public createTable_result(
          -      TIOError io)
          -    {
          +    public createTable_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -37300,15 +40520,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -37316,46 +40537,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof createTable_result)
          -        return this.equals((createTable_result)that);
          +      if (that instanceof createTable_result) return this.equals((createTable_result) that);
                 return false;
               }
           
               public boolean equals(createTable_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -37366,8 +40585,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -37398,13 +40616,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -37429,35 +40649,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class createTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createTable_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createTable_resultStandardScheme getScheme() {
                   return new createTable_resultStandardScheme();
                 }
               }
           
          -    private static class createTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class createTable_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -37466,7 +40691,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -37477,11 +40702,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createTable_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -37496,17 +40723,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, createTable_result
           
               }
           
          -    private static class createTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createTable_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createTable_resultTupleScheme getScheme() {
                   return new createTable_resultTupleScheme();
                 }
               }
           
          -    private static class createTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class createTable_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, createTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, createTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -37518,8 +40749,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, createTable_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, createTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, createTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -37529,32 +40762,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, createTable_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteTable_args");
          +  public static class deleteTable_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteTable_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteTable_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteTable_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteTable_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteTable_argsTupleSchemeFactory();
           
               /**
                * the tablename to delete
                */
               public @org.apache.thrift.annotation.Nullable TTableName tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the tablename to delete
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -37567,7 +40814,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -37576,12 +40823,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -37613,19 +40860,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteTable_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteTable_args.class,
          +        metaDataMap);
               }
           
               public deleteTable_args() {
               }
           
          -    public deleteTable_args(
          -      TTableName tableName)
          -    {
          +    public deleteTable_args(TTableName tableName) {
                 this();
                 this.tableName = tableName;
               }
          @@ -37659,7 +40909,8 @@ public TTableName getTableName() {
               /**
                * the tablename to delete
                */
          -    public deleteTable_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public deleteTable_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -37679,15 +40930,16 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
                 }
               }
          @@ -37695,46 +40947,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteTable_args)
          -        return this.equals((deleteTable_args)that);
          +      if (that instanceof deleteTable_args) return this.equals((deleteTable_args) that);
                 return false;
               }
           
               public boolean equals(deleteTable_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -37745,8 +40995,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -37777,11 +41026,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -37804,7 +41055,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tableName == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tableName' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tableName != null) {
          @@ -37814,35 +41066,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteTable_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteTable_argsStandardScheme getScheme() {
                   return new deleteTable_argsStandardScheme();
                 }
               }
           
          -    private static class deleteTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteTable_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -37851,7 +41108,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_args st
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -37862,11 +41119,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -37881,49 +41140,69 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_args s
           
               }
           
          -    private static class deleteTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteTable_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteTable_argsTupleScheme getScheme() {
                   return new deleteTable_argsTupleScheme();
                 }
               }
           
          -    private static class deleteTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteTable_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName = new TTableName();
                   struct.tableName.read(iprot);
                   struct.setTableNameIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteTable_result");
          +  public static class deleteTable_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteTable_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteTable_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteTable_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteTable_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteTable_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -37936,7 +41215,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -37945,12 +41224,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -37982,19 +41261,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteTable_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteTable_result.class,
          +        metaDataMap);
               }
           
               public deleteTable_result() {
               }
           
          -    public deleteTable_result(
          -      TIOError io)
          -    {
          +    public deleteTable_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -38042,15 +41324,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -38058,46 +41341,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteTable_result)
          -        return this.equals((deleteTable_result)that);
          +      if (that instanceof deleteTable_result) return this.equals((deleteTable_result) that);
                 return false;
               }
           
               public boolean equals(deleteTable_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -38108,8 +41389,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -38140,13 +41420,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -38171,35 +41453,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteTable_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteTable_resultStandardScheme getScheme() {
                   return new deleteTable_resultStandardScheme();
                 }
               }
           
          -    private static class deleteTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteTable_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -38208,7 +41495,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -38219,11 +41506,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteTable_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -38238,17 +41527,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteTable_result
           
               }
           
          -    private static class deleteTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteTable_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteTable_resultTupleScheme getScheme() {
                   return new deleteTable_resultTupleScheme();
                 }
               }
           
          -    private static class deleteTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteTable_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -38260,8 +41553,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteTable_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -38271,41 +41566,57 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteTable_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class truncateTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncateTable_args");
          +  public static class truncateTable_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("truncateTable_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField PRESERVE_SPLITS_FIELD_DESC = new org.apache.thrift.protocol.TField("preserveSplits", org.apache.thrift.protocol.TType.BOOL, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField PRESERVE_SPLITS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("preserveSplits",
          +            org.apache.thrift.protocol.TType.BOOL, (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new truncateTable_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new truncateTable_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new truncateTable_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new truncateTable_argsTupleSchemeFactory();
           
               /**
                * the tablename to truncate
                */
               public @org.apache.thrift.annotation.Nullable TTableName tableName; // required
               /**
          -     * whether to  preserve previous splits
          +     * whether to preserve previous splits
                */
               public boolean preserveSplits; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the tablename to truncate
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
          -       * whether to  preserve previous splits
          +       * whether to preserve previous splits
                  */
          -      PRESERVE_SPLITS((short)2, "preserveSplits");
          +      PRESERVE_SPLITS((short) 2, "preserveSplits");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -38318,7 +41629,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // PRESERVE_SPLITS
          @@ -38329,12 +41640,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -38368,22 +41679,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          -      tmpMap.put(_Fields.PRESERVE_SPLITS, new org.apache.thrift.meta_data.FieldMetaData("preserveSplits", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
          +      tmpMap.put(_Fields.PRESERVE_SPLITS,
          +        new org.apache.thrift.meta_data.FieldMetaData("preserveSplits",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncateTable_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncateTable_args.class,
          +        metaDataMap);
               }
           
               public truncateTable_args() {
               }
           
          -    public truncateTable_args(
          -      TTableName tableName,
          -      boolean preserveSplits)
          -    {
          +    public truncateTable_args(TTableName tableName, boolean preserveSplits) {
                 this();
                 this.tableName = tableName;
                 this.preserveSplits = preserveSplits;
          @@ -38423,7 +41739,8 @@ public TTableName getTableName() {
               /**
                * the tablename to truncate
                */
          -    public truncateTable_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public truncateTable_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -38444,14 +41761,14 @@ public void setTableNameIsSet(boolean value) {
               }
           
               /**
          -     * whether to  preserve previous splits
          +     * whether to preserve previous splits
                */
               public boolean isPreserveSplits() {
                 return this.preserveSplits;
               }
           
               /**
          -     * whether to  preserve previous splits
          +     * whether to preserve previous splits
                */
               public truncateTable_args setPreserveSplits(boolean preserveSplits) {
                 this.preserveSplits = preserveSplits;
          @@ -38460,35 +41777,40 @@ public truncateTable_args setPreserveSplits(boolean preserveSplits) {
               }
           
               public void unsetPreserveSplits() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __PRESERVESPLITS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __PRESERVESPLITS_ISSET_ID);
               }
           
          -    /** Returns true if field preserveSplits is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field preserveSplits is set (has been assigned a value) and false otherwise
          +     */
               public boolean isSetPreserveSplits() {
                 return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __PRESERVESPLITS_ISSET_ID);
               }
           
               public void setPreserveSplitsIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __PRESERVESPLITS_ISSET_ID, value);
          +      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +        __PRESERVESPLITS_ISSET_ID, value);
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
          -      case PRESERVE_SPLITS:
          -        if (value == null) {
          -          unsetPreserveSplits();
          -        } else {
          -          setPreserveSplits((java.lang.Boolean)value);
          -        }
          -        break;
          +        case PRESERVE_SPLITS:
          +          if (value == null) {
          +            unsetPreserveSplits();
          +          } else {
          +            setPreserveSplits((java.lang.Boolean) value);
          +          }
          +          break;
           
                 }
               }
          @@ -38496,60 +41818,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case PRESERVE_SPLITS:
          -        return isPreserveSplits();
          +        case PRESERVE_SPLITS:
          +          return isPreserveSplits();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case PRESERVE_SPLITS:
          -        return isSetPreserveSplits();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case PRESERVE_SPLITS:
          +          return isSetPreserveSplits();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof truncateTable_args)
          -        return this.equals((truncateTable_args)that);
          +      if (that instanceof truncateTable_args) return this.equals((truncateTable_args) that);
                 return false;
               }
           
               public boolean equals(truncateTable_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_preserveSplits = true;
                 boolean that_present_preserveSplits = true;
                 if (this_present_preserveSplits || that_present_preserveSplits) {
          -        if (!(this_present_preserveSplits && that_present_preserveSplits))
          -          return false;
          -        if (this.preserveSplits != that.preserveSplits)
          -          return false;
          +        if (!(this_present_preserveSplits && that_present_preserveSplits)) return false;
          +        if (this.preserveSplits != that.preserveSplits) return false;
                 }
           
                 return true;
          @@ -38560,8 +41878,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((preserveSplits) ? 131071 : 524287);
           
          @@ -38586,12 +41903,14 @@ public int compareTo(truncateTable_args other) {
                     return lastComparison;
                   }
                 }
          -      lastComparison = java.lang.Boolean.compare(isSetPreserveSplits(), other.isSetPreserveSplits());
          +      lastComparison =
          +          java.lang.Boolean.compare(isSetPreserveSplits(), other.isSetPreserveSplits());
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
                 if (isSetPreserveSplits()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.preserveSplits, other.preserveSplits);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.preserveSplits, other.preserveSplits);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -38604,11 +41923,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -38635,9 +41956,11 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tableName == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tableName' was not present! Struct: " + toString());
                 }
          -      // alas, we cannot check 'preserveSplits' because it's a primitive and you chose the non-beans generator.
          +      // alas, we cannot check 'preserveSplits' because it's a primitive and you chose the non-beans
          +      // generator.
                 // check for sub-struct validity
                 if (tableName != null) {
                   tableName.validate();
          @@ -38646,37 +41969,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class truncateTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class truncateTable_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public truncateTable_argsStandardScheme getScheme() {
                   return new truncateTable_argsStandardScheme();
                 }
               }
           
          -    private static class truncateTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class truncateTable_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, truncateTable_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, truncateTable_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -38685,7 +42014,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncateTable_args
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -38693,7 +42022,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncateTable_args
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.preserveSplits = iprot.readBool();
                           struct.setPreserveSplitsIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -38704,14 +42033,18 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncateTable_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   if (!struct.isSetPreserveSplits()) {
          -          throw new org.apache.thrift.protocol.TProtocolException("Required field 'preserveSplits' was not found in serialized data! Struct: " + toString());
          +          throw new org.apache.thrift.protocol.TProtocolException(
          +              "Required field 'preserveSplits' was not found in serialized data! Struct: "
          +                  + toString());
                   }
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, truncateTable_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, truncateTable_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -38729,24 +42062,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncateTable_args
           
               }
           
          -    private static class truncateTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class truncateTable_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public truncateTable_argsTupleScheme getScheme() {
                   return new truncateTable_argsTupleScheme();
                 }
               }
           
          -    private static class truncateTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class truncateTable_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, truncateTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, truncateTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName.write(oprot);
                   oprot.writeBool(struct.preserveSplits);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, truncateTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, truncateTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName = new TTableName();
                   struct.tableName.read(iprot);
                   struct.setTableNameIsSet(true);
          @@ -38755,26 +42094,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncateTable_args s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class truncateTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncateTable_result");
          +  public static class truncateTable_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("truncateTable_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new truncateTable_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new truncateTable_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new truncateTable_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new truncateTable_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -38787,7 +42140,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -38796,12 +42149,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -38833,19 +42186,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncateTable_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncateTable_result.class,
          +        metaDataMap);
               }
           
               public truncateTable_result() {
               }
           
          -    public truncateTable_result(
          -      TIOError io)
          -    {
          +    public truncateTable_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -38893,15 +42249,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -38909,46 +42266,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof truncateTable_result)
          -        return this.equals((truncateTable_result)that);
          +      if (that instanceof truncateTable_result) return this.equals((truncateTable_result) that);
                 return false;
               }
           
               public boolean equals(truncateTable_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -38959,8 +42314,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -38991,13 +42345,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -39022,35 +42378,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class truncateTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class truncateTable_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public truncateTable_resultStandardScheme getScheme() {
                   return new truncateTable_resultStandardScheme();
                 }
               }
           
          -    private static class truncateTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class truncateTable_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, truncateTable_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, truncateTable_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -39059,7 +42420,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncateTable_resul
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -39070,11 +42431,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncateTable_resul
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, truncateTable_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, truncateTable_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -39089,17 +42452,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncateTable_resu
           
               }
           
          -    private static class truncateTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class truncateTable_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public truncateTable_resultTupleScheme getScheme() {
                   return new truncateTable_resultTupleScheme();
                 }
               }
           
          -    private static class truncateTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class truncateTable_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, truncateTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, truncateTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -39111,8 +42478,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncateTable_resul
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, truncateTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, truncateTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -39122,32 +42491,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncateTable_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class enableTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("enableTable_args");
          +  public static class enableTable_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("enableTable_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new enableTable_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new enableTable_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new enableTable_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new enableTable_argsTupleSchemeFactory();
           
               /**
                * the tablename to enable
                */
               public @org.apache.thrift.annotation.Nullable TTableName tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the tablename to enable
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -39160,7 +42543,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -39169,12 +42552,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -39206,19 +42589,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(enableTable_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(enableTable_args.class,
          +        metaDataMap);
               }
           
               public enableTable_args() {
               }
           
          -    public enableTable_args(
          -      TTableName tableName)
          -    {
          +    public enableTable_args(TTableName tableName) {
                 this();
                 this.tableName = tableName;
               }
          @@ -39252,7 +42638,8 @@ public TTableName getTableName() {
               /**
                * the tablename to enable
                */
          -    public enableTable_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public enableTable_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -39272,15 +42659,16 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
                 }
               }
          @@ -39288,46 +42676,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof enableTable_args)
          -        return this.equals((enableTable_args)that);
          +      if (that instanceof enableTable_args) return this.equals((enableTable_args) that);
                 return false;
               }
           
               public boolean equals(enableTable_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -39338,8 +42724,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -39370,11 +42755,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -39397,7 +42784,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tableName == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tableName' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tableName != null) {
          @@ -39407,35 +42795,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class enableTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class enableTable_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public enableTable_argsStandardScheme getScheme() {
                   return new enableTable_argsStandardScheme();
                 }
               }
           
          -    private static class enableTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class enableTable_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -39444,7 +42837,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_args st
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -39455,11 +42848,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -39474,49 +42869,69 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_args s
           
               }
           
          -    private static class enableTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class enableTable_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public enableTable_argsTupleScheme getScheme() {
                   return new enableTable_argsTupleScheme();
                 }
               }
           
          -    private static class enableTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class enableTable_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName = new TTableName();
                   struct.tableName.read(iprot);
                   struct.setTableNameIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class enableTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("enableTable_result");
          +  public static class enableTable_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("enableTable_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new enableTable_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new enableTable_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new enableTable_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new enableTable_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -39529,7 +42944,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -39538,12 +42953,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -39575,19 +42990,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(enableTable_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(enableTable_result.class,
          +        metaDataMap);
               }
           
               public enableTable_result() {
               }
           
          -    public enableTable_result(
          -      TIOError io)
          -    {
          +    public enableTable_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -39635,15 +43053,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -39651,46 +43070,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof enableTable_result)
          -        return this.equals((enableTable_result)that);
          +      if (that instanceof enableTable_result) return this.equals((enableTable_result) that);
                 return false;
               }
           
               public boolean equals(enableTable_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -39701,8 +43118,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -39733,13 +43149,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -39764,35 +43182,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class enableTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class enableTable_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public enableTable_resultStandardScheme getScheme() {
                   return new enableTable_resultStandardScheme();
                 }
               }
           
          -    private static class enableTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class enableTable_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -39801,7 +43224,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -39812,11 +43235,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, enableTable_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -39831,17 +43256,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, enableTable_result
           
               }
           
          -    private static class enableTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class enableTable_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public enableTable_resultTupleScheme getScheme() {
                   return new enableTable_resultTupleScheme();
                 }
               }
           
          -    private static class enableTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class enableTable_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -39853,8 +43282,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, enableTable_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -39864,32 +43295,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, enableTable_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class disableTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("disableTable_args");
          +  public static class disableTable_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("disableTable_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new disableTable_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new disableTable_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new disableTable_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new disableTable_argsTupleSchemeFactory();
           
               /**
                * the tablename to disable
                */
               public @org.apache.thrift.annotation.Nullable TTableName tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the tablename to disable
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -39902,7 +43347,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -39911,12 +43356,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -39948,19 +43393,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_args.class,
          +        metaDataMap);
               }
           
               public disableTable_args() {
               }
           
          -    public disableTable_args(
          -      TTableName tableName)
          -    {
          +    public disableTable_args(TTableName tableName) {
                 this();
                 this.tableName = tableName;
               }
          @@ -39994,7 +43442,8 @@ public TTableName getTableName() {
               /**
                * the tablename to disable
                */
          -    public disableTable_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public disableTable_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -40014,15 +43463,16 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
                 }
               }
          @@ -40030,46 +43480,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof disableTable_args)
          -        return this.equals((disableTable_args)that);
          +      if (that instanceof disableTable_args) return this.equals((disableTable_args) that);
                 return false;
               }
           
               public boolean equals(disableTable_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -40080,8 +43528,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -40112,11 +43559,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -40139,7 +43588,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tableName == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tableName' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tableName != null) {
          @@ -40149,35 +43599,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class disableTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class disableTable_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public disableTable_argsStandardScheme getScheme() {
                   return new disableTable_argsStandardScheme();
                 }
               }
           
          -    private static class disableTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class disableTable_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -40186,7 +43641,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_args s
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -40197,11 +43652,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_args s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -40216,49 +43673,69 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_args
           
               }
           
          -    private static class disableTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class disableTable_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public disableTable_argsTupleScheme getScheme() {
                   return new disableTable_argsTupleScheme();
                 }
               }
           
          -    private static class disableTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class disableTable_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName = new TTableName();
                   struct.tableName.read(iprot);
                   struct.setTableNameIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class disableTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("disableTable_result");
          +  public static class disableTable_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("disableTable_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new disableTable_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new disableTable_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new disableTable_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new disableTable_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -40271,7 +43748,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -40280,12 +43757,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -40317,19 +43794,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(disableTable_result.class,
          +        metaDataMap);
               }
           
               public disableTable_result() {
               }
           
          -    public disableTable_result(
          -      TIOError io)
          -    {
          +    public disableTable_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -40377,15 +43857,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -40393,46 +43874,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof disableTable_result)
          -        return this.equals((disableTable_result)that);
          +      if (that instanceof disableTable_result) return this.equals((disableTable_result) that);
                 return false;
               }
           
               public boolean equals(disableTable_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -40443,8 +43922,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -40475,13 +43953,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -40506,35 +43986,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class disableTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class disableTable_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public disableTable_resultStandardScheme getScheme() {
                   return new disableTable_resultStandardScheme();
                 }
               }
           
          -    private static class disableTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class disableTable_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -40543,7 +44028,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -40554,11 +44039,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, disableTable_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -40573,17 +44060,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, disableTable_resul
           
               }
           
          -    private static class disableTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class disableTable_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public disableTable_resultTupleScheme getScheme() {
                   return new disableTable_resultTupleScheme();
                 }
               }
           
          -    private static class disableTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class disableTable_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -40595,8 +44086,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, disableTable_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -40606,32 +44099,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, disableTable_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class isTableEnabled_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableEnabled_args");
          +  public static class isTableEnabled_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("isTableEnabled_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableEnabled_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableEnabled_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new isTableEnabled_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new isTableEnabled_argsTupleSchemeFactory();
           
               /**
                * the tablename to check
                */
               public @org.apache.thrift.annotation.Nullable TTableName tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the tablename to check
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -40644,7 +44151,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -40653,12 +44160,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -40690,19 +44197,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_args.class,
          +        metaDataMap);
               }
           
               public isTableEnabled_args() {
               }
           
          -    public isTableEnabled_args(
          -      TTableName tableName)
          -    {
          +    public isTableEnabled_args(TTableName tableName) {
                 this();
                 this.tableName = tableName;
               }
          @@ -40736,7 +44246,8 @@ public TTableName getTableName() {
               /**
                * the tablename to check
                */
          -    public isTableEnabled_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public isTableEnabled_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -40756,15 +44267,16 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
                 }
               }
          @@ -40772,46 +44284,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof isTableEnabled_args)
          -        return this.equals((isTableEnabled_args)that);
          +      if (that instanceof isTableEnabled_args) return this.equals((isTableEnabled_args) that);
                 return false;
               }
           
               public boolean equals(isTableEnabled_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -40822,8 +44332,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -40854,11 +44363,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -40881,7 +44392,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tableName == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tableName' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tableName != null) {
          @@ -40891,35 +44403,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class isTableEnabled_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableEnabled_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableEnabled_argsStandardScheme getScheme() {
                   return new isTableEnabled_argsStandardScheme();
                 }
               }
           
          -    private static class isTableEnabled_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class isTableEnabled_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -40928,7 +44445,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -40939,11 +44456,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -40958,52 +44477,73 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_arg
           
               }
           
          -    private static class isTableEnabled_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableEnabled_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableEnabled_argsTupleScheme getScheme() {
                   return new isTableEnabled_argsTupleScheme();
                 }
               }
           
          -    private static class isTableEnabled_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class isTableEnabled_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName = new TTableName();
                   struct.tableName.read(iprot);
                   struct.setTableNameIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class isTableEnabled_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableEnabled_result");
          +  public static class isTableEnabled_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("isTableEnabled_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableEnabled_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableEnabled_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new isTableEnabled_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new isTableEnabled_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -41016,7 +44556,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -41027,12 +44567,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -41066,22 +44606,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableEnabled_result.class,
          +        metaDataMap);
               }
           
               public isTableEnabled_result() {
               }
           
          -    public isTableEnabled_result(
          -      boolean success,
          -      TIOError io)
          -    {
          +    public isTableEnabled_result(boolean success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -41121,7 +44666,8 @@ public isTableEnabled_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -41130,7 +44676,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -41158,23 +44705,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -41182,60 +44730,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof isTableEnabled_result)
          -        return this.equals((isTableEnabled_result)that);
          +      if (that instanceof isTableEnabled_result) return this.equals((isTableEnabled_result) that);
                 return false;
               }
           
               public boolean equals(isTableEnabled_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -41248,8 +44792,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -41290,13 +44833,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -41325,37 +44870,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class isTableEnabled_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableEnabled_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableEnabled_resultStandardScheme getScheme() {
                   return new isTableEnabled_resultStandardScheme();
                 }
               }
           
          -    private static class isTableEnabled_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class isTableEnabled_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -41363,7 +44914,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_resu
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -41372,7 +44923,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_resu
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -41383,11 +44934,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableEnabled_resu
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -41407,17 +44960,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableEnabled_res
           
               }
           
          -    private static class isTableEnabled_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableEnabled_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableEnabled_resultTupleScheme getScheme() {
                   return new isTableEnabled_resultTupleScheme();
                 }
               }
           
          -    private static class isTableEnabled_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class isTableEnabled_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -41435,8 +44992,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_resu
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -41450,32 +45009,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isTableEnabled_resul
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class isTableDisabled_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableDisabled_args");
          +  public static class isTableDisabled_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("isTableDisabled_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableDisabled_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableDisabled_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new isTableDisabled_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new isTableDisabled_argsTupleSchemeFactory();
           
               /**
                * the tablename to check
                */
               public @org.apache.thrift.annotation.Nullable TTableName tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the tablename to check
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -41488,7 +45061,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -41497,12 +45070,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -41534,19 +45107,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableDisabled_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableDisabled_args.class,
          +        metaDataMap);
               }
           
               public isTableDisabled_args() {
               }
           
          -    public isTableDisabled_args(
          -      TTableName tableName)
          -    {
          +    public isTableDisabled_args(TTableName tableName) {
                 this();
                 this.tableName = tableName;
               }
          @@ -41580,7 +45156,8 @@ public TTableName getTableName() {
               /**
                * the tablename to check
                */
          -    public isTableDisabled_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public isTableDisabled_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -41600,15 +45177,16 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
                 }
               }
          @@ -41616,46 +45194,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof isTableDisabled_args)
          -        return this.equals((isTableDisabled_args)that);
          +      if (that instanceof isTableDisabled_args) return this.equals((isTableDisabled_args) that);
                 return false;
               }
           
               public boolean equals(isTableDisabled_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -41666,8 +45242,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -41698,11 +45273,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -41725,7 +45302,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tableName == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tableName' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tableName != null) {
          @@ -41735,35 +45313,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class isTableDisabled_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableDisabled_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableDisabled_argsStandardScheme getScheme() {
                   return new isTableDisabled_argsStandardScheme();
                 }
               }
           
          -    private static class isTableDisabled_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class isTableDisabled_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableDisabled_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableDisabled_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -41772,7 +45355,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableDisabled_arg
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -41783,11 +45366,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableDisabled_arg
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableDisabled_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableDisabled_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -41802,52 +45387,73 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableDisabled_ar
           
               }
           
          -    private static class isTableDisabled_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableDisabled_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableDisabled_argsTupleScheme getScheme() {
                   return new isTableDisabled_argsTupleScheme();
                 }
               }
           
          -    private static class isTableDisabled_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class isTableDisabled_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, isTableDisabled_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, isTableDisabled_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, isTableDisabled_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, isTableDisabled_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName = new TTableName();
                   struct.tableName.read(iprot);
                   struct.setTableNameIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class isTableDisabled_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableDisabled_result");
          +  public static class isTableDisabled_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("isTableDisabled_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableDisabled_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableDisabled_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new isTableDisabled_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new isTableDisabled_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -41860,7 +45466,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -41871,12 +45477,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -41910,22 +45516,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableDisabled_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableDisabled_result.class,
          +        metaDataMap);
               }
           
               public isTableDisabled_result() {
               }
           
          -    public isTableDisabled_result(
          -      boolean success,
          -      TIOError io)
          -    {
          +    public isTableDisabled_result(boolean success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -41965,7 +45576,8 @@ public isTableDisabled_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -41974,7 +45586,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -42002,23 +45615,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -42026,60 +45640,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof isTableDisabled_result)
          -        return this.equals((isTableDisabled_result)that);
          +      if (that instanceof isTableDisabled_result) return this.equals((isTableDisabled_result) that);
                 return false;
               }
           
               public boolean equals(isTableDisabled_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -42092,8 +45702,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -42134,13 +45743,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -42169,37 +45780,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class isTableDisabled_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableDisabled_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableDisabled_resultStandardScheme getScheme() {
                   return new isTableDisabled_resultStandardScheme();
                 }
               }
           
          -    private static class isTableDisabled_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class isTableDisabled_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableDisabled_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableDisabled_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -42207,7 +45824,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableDisabled_res
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -42216,7 +45833,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableDisabled_res
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -42227,11 +45844,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableDisabled_res
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableDisabled_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableDisabled_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -42251,17 +45870,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableDisabled_re
           
               }
           
          -    private static class isTableDisabled_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableDisabled_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableDisabled_resultTupleScheme getScheme() {
                   return new isTableDisabled_resultTupleScheme();
                 }
               }
           
          -    private static class isTableDisabled_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class isTableDisabled_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, isTableDisabled_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, isTableDisabled_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -42279,8 +45902,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isTableDisabled_res
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, isTableDisabled_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, isTableDisabled_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -42294,32 +45919,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isTableDisabled_resu
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class isTableAvailable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableAvailable_args");
          +  public static class isTableAvailable_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("isTableAvailable_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableAvailable_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableAvailable_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new isTableAvailable_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new isTableAvailable_argsTupleSchemeFactory();
           
               /**
                * the tablename to check
                */
               public @org.apache.thrift.annotation.Nullable TTableName tableName; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the tablename to check
                  */
          -      TABLE_NAME((short)1, "tableName");
          +      TABLE_NAME((short) 1, "tableName");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -42332,7 +45971,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     default:
          @@ -42341,12 +45980,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -42378,19 +46017,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableAvailable_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableAvailable_args.class,
          +        metaDataMap);
               }
           
               public isTableAvailable_args() {
               }
           
          -    public isTableAvailable_args(
          -      TTableName tableName)
          -    {
          +    public isTableAvailable_args(TTableName tableName) {
                 this();
                 this.tableName = tableName;
               }
          @@ -42424,7 +46066,8 @@ public TTableName getTableName() {
               /**
                * the tablename to check
                */
          -    public isTableAvailable_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public isTableAvailable_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -42444,15 +46087,16 @@ public void setTableNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
                 }
               }
          @@ -42460,46 +46104,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          +        case TABLE_NAME:
          +          return isSetTableName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof isTableAvailable_args)
          -        return this.equals((isTableAvailable_args)that);
          +      if (that instanceof isTableAvailable_args) return this.equals((isTableAvailable_args) that);
                 return false;
               }
           
               public boolean equals(isTableAvailable_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 return true;
          @@ -42510,8 +46152,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 return hashCode;
               }
          @@ -42542,11 +46183,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -42569,7 +46212,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tableName == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tableName' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tableName != null) {
          @@ -42579,35 +46223,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class isTableAvailable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableAvailable_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableAvailable_argsStandardScheme getScheme() {
                   return new isTableAvailable_argsStandardScheme();
                 }
               }
           
          -    private static class isTableAvailable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class isTableAvailable_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailable_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailable_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -42616,7 +46265,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailable_ar
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -42627,11 +46276,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailable_ar
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailable_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailable_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -42646,52 +46297,73 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailable_a
           
               }
           
          -    private static class isTableAvailable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableAvailable_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableAvailable_argsTupleScheme getScheme() {
                   return new isTableAvailable_argsTupleScheme();
                 }
               }
           
          -    private static class isTableAvailable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class isTableAvailable_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, isTableAvailable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, isTableAvailable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName = new TTableName();
                   struct.tableName.read(iprot);
                   struct.setTableNameIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class isTableAvailable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableAvailable_result");
          +  public static class isTableAvailable_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("isTableAvailable_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableAvailable_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableAvailable_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new isTableAvailable_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new isTableAvailable_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -42704,7 +46376,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -42715,12 +46387,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -42754,22 +46426,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableAvailable_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableAvailable_result.class,
          +        metaDataMap);
               }
           
               public isTableAvailable_result() {
               }
           
          -    public isTableAvailable_result(
          -      boolean success,
          -      TIOError io)
          -    {
          +    public isTableAvailable_result(boolean success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -42809,7 +46486,8 @@ public isTableAvailable_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -42818,7 +46496,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -42846,23 +46525,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -42870,27 +46550,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -42898,32 +46581,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof isTableAvailable_result)
          -        return this.equals((isTableAvailable_result)that);
          +        return this.equals((isTableAvailable_result) that);
                 return false;
               }
           
               public boolean equals(isTableAvailable_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -42936,8 +46613,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -42978,13 +46654,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -43013,37 +46691,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class isTableAvailable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableAvailable_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableAvailable_resultStandardScheme getScheme() {
                   return new isTableAvailable_resultStandardScheme();
                 }
               }
           
          -    private static class isTableAvailable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class isTableAvailable_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailable_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailable_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -43051,7 +46735,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailable_re
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -43060,7 +46744,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailable_re
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -43071,11 +46755,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailable_re
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailable_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailable_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -43095,17 +46781,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailable_r
           
               }
           
          -    private static class isTableAvailable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableAvailable_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableAvailable_resultTupleScheme getScheme() {
                   return new isTableAvailable_resultTupleScheme();
                 }
               }
           
          -    private static class isTableAvailable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class isTableAvailable_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -43123,8 +46813,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailable_re
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, isTableAvailable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, isTableAvailable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -43138,19 +46830,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isTableAvailable_res
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class isTableAvailableWithSplit_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableAvailableWithSplit_args");
          +  public static class isTableAvailableWithSplit_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("isTableAvailableWithSplit_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField SPLIT_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("splitKeys", org.apache.thrift.protocol.TType.LIST, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField SPLIT_KEYS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("splitKeys", org.apache.thrift.protocol.TType.LIST,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableAvailableWithSplit_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableAvailableWithSplit_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new isTableAvailableWithSplit_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new isTableAvailableWithSplit_argsTupleSchemeFactory();
           
               /**
                * the tablename to check
          @@ -43161,18 +46865,22 @@ public static class isTableAvailableWithSplit_args implements org.apache.thrift.
                */
               public @org.apache.thrift.annotation.Nullable java.util.List splitKeys; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the tablename to check
                  */
          -      TABLE_NAME((short)1, "tableName"),
          +      TABLE_NAME((short) 1, "tableName"),
                 /**
                  * keys to check if the table has been created with all split keys
                  */
          -      SPLIT_KEYS((short)2, "splitKeys");
          +      SPLIT_KEYS((short) 2, "splitKeys");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -43185,7 +46893,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // SPLIT_KEYS
          @@ -43196,12 +46904,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -43233,23 +46941,29 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          -      tmpMap.put(_Fields.SPLIT_KEYS, new org.apache.thrift.meta_data.FieldMetaData("splitKeys", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING              , true))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
          +      tmpMap.put(_Fields.SPLIT_KEYS,
          +        new org.apache.thrift.meta_data.FieldMetaData("splitKeys",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING, true))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableAvailableWithSplit_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(isTableAvailableWithSplit_args.class, metaDataMap);
               }
           
               public isTableAvailableWithSplit_args() {
               }
           
          -    public isTableAvailableWithSplit_args(
          -      TTableName tableName,
          -      java.util.List splitKeys)
          -    {
          +    public isTableAvailableWithSplit_args(TTableName tableName,
          +        java.util.List splitKeys) {
                 this();
                 this.tableName = tableName;
                 this.splitKeys = splitKeys;
          @@ -43263,7 +46977,8 @@ public isTableAvailableWithSplit_args(isTableAvailableWithSplit_args other) {
                   this.tableName = new TTableName(other.tableName);
                 }
                 if (other.isSetSplitKeys()) {
          -        java.util.List __this__splitKeys = new java.util.ArrayList(other.splitKeys);
          +        java.util.List __this__splitKeys =
          +            new java.util.ArrayList(other.splitKeys);
                   this.splitKeys = __this__splitKeys;
                 }
               }
          @@ -43289,7 +47004,8 @@ public TTableName getTableName() {
               /**
                * the tablename to check
                */
          -    public isTableAvailableWithSplit_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public isTableAvailableWithSplit_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -43336,7 +47052,8 @@ public java.util.List getSplitKeys() {
               /**
                * keys to check if the table has been created with all split keys
                */
          -    public isTableAvailableWithSplit_args setSplitKeys(@org.apache.thrift.annotation.Nullable java.util.List splitKeys) {
          +    public isTableAvailableWithSplit_args setSplitKeys(
          +        @org.apache.thrift.annotation.Nullable java.util.List splitKeys) {
                 this.splitKeys = splitKeys;
                 return this;
               }
          @@ -43356,23 +47073,24 @@ public void setSplitKeysIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
          -      case SPLIT_KEYS:
          -        if (value == null) {
          -          unsetSplitKeys();
          -        } else {
          -          setSplitKeys((java.util.List)value);
          -        }
          -        break;
          +        case SPLIT_KEYS:
          +          if (value == null) {
          +            unsetSplitKeys();
          +          } else {
          +            setSplitKeys((java.util.List) value);
          +          }
          +          break;
           
                 }
               }
          @@ -43380,27 +47098,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case SPLIT_KEYS:
          -        return getSplitKeys();
          +        case SPLIT_KEYS:
          +          return getSplitKeys();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case SPLIT_KEYS:
          -        return isSetSplitKeys();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case SPLIT_KEYS:
          +          return isSetSplitKeys();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -43408,32 +47129,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof isTableAvailableWithSplit_args)
          -        return this.equals((isTableAvailableWithSplit_args)that);
          +        return this.equals((isTableAvailableWithSplit_args) that);
                 return false;
               }
           
               public boolean equals(isTableAvailableWithSplit_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_splitKeys = true && this.isSetSplitKeys();
                 boolean that_present_splitKeys = true && that.isSetSplitKeys();
                 if (this_present_splitKeys || that_present_splitKeys) {
          -        if (!(this_present_splitKeys && that_present_splitKeys))
          -          return false;
          -        if (!this.splitKeys.equals(that.splitKeys))
          -          return false;
          +        if (!(this_present_splitKeys && that_present_splitKeys)) return false;
          +        if (!this.splitKeys.equals(that.splitKeys)) return false;
                 }
           
                 return true;
          @@ -43444,12 +47159,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetSplitKeys()) ? 131071 : 524287);
          -      if (isSetSplitKeys())
          -        hashCode = hashCode * 8191 + splitKeys.hashCode();
          +      if (isSetSplitKeys()) hashCode = hashCode * 8191 + splitKeys.hashCode();
           
                 return hashCode;
               }
          @@ -43490,11 +47203,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -43525,7 +47240,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tableName == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tableName' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tableName != null) {
          @@ -43535,35 +47251,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class isTableAvailableWithSplit_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableAvailableWithSplit_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableAvailableWithSplit_argsStandardScheme getScheme() {
                   return new isTableAvailableWithSplit_argsStandardScheme();
                 }
               }
           
          -    private static class isTableAvailableWithSplit_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class isTableAvailableWithSplit_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -43572,7 +47293,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWit
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -43581,16 +47302,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWit
                           {
                             org.apache.thrift.protocol.TList _list326 = iprot.readListBegin();
                             struct.splitKeys = new java.util.ArrayList(_list326.size);
          -                  @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem327;
          -                  for (int _i328 = 0; _i328 < _list326.size; ++_i328)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.nio.ByteBuffer _elem327;
          +                  for (int _i328 = 0; _i328 < _list326.size; ++_i328) {
                               _elem327 = iprot.readBinary();
                               struct.splitKeys.add(_elem327);
                             }
                             iprot.readListEnd();
                           }
                           struct.setSplitKeysIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -43601,11 +47322,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWit
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -43617,9 +47340,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailableWi
                   if (struct.splitKeys != null) {
                     oprot.writeFieldBegin(SPLIT_KEYS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.splitKeys.size()));
          -            for (java.nio.ByteBuffer _iter329 : struct.splitKeys)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.splitKeys.size()));
          +            for (java.nio.ByteBuffer _iter329 : struct.splitKeys) {
                         oprot.writeBinary(_iter329);
                       }
                       oprot.writeListEnd();
          @@ -43632,17 +47355,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailableWi
           
               }
           
          -    private static class isTableAvailableWithSplit_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableAvailableWithSplit_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableAvailableWithSplit_argsTupleScheme getScheme() {
                   return new isTableAvailableWithSplit_argsTupleScheme();
                 }
               }
           
          -    private static class isTableAvailableWithSplit_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class isTableAvailableWithSplit_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName.write(oprot);
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSplitKeys()) {
          @@ -43652,8 +47379,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWit
                   if (struct.isSetSplitKeys()) {
                     {
                       oprot.writeI32(struct.splitKeys.size());
          -            for (java.nio.ByteBuffer _iter330 : struct.splitKeys)
          -            {
          +            for (java.nio.ByteBuffer _iter330 : struct.splitKeys) {
                         oprot.writeBinary(_iter330);
                       }
                     }
          @@ -43661,19 +47387,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWit
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          isTableAvailableWithSplit_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName = new TTableName();
                   struct.tableName.read(iprot);
                   struct.setTableNameIsSet(true);
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list331 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list331 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.splitKeys = new java.util.ArrayList(_list331.size);
          -            @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _elem332;
          -            for (int _i333 = 0; _i333 < _list331.size; ++_i333)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.nio.ByteBuffer _elem332;
          +            for (int _i333 = 0; _i333 < _list331.size; ++_i333) {
                         _elem332 = iprot.readBinary();
                         struct.splitKeys.add(_elem332);
                       }
          @@ -43683,29 +47412,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWith
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class isTableAvailableWithSplit_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("isTableAvailableWithSplit_result");
          +  public static class isTableAvailableWithSplit_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("isTableAvailableWithSplit_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new isTableAvailableWithSplit_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new isTableAvailableWithSplit_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new isTableAvailableWithSplit_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new isTableAvailableWithSplit_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -43718,7 +47462,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -43729,12 +47473,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -43768,22 +47512,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(isTableAvailableWithSplit_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(isTableAvailableWithSplit_result.class, metaDataMap);
               }
           
               public isTableAvailableWithSplit_result() {
               }
           
          -    public isTableAvailableWithSplit_result(
          -      boolean success,
          -      TIOError io)
          -    {
          +    public isTableAvailableWithSplit_result(boolean success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -43823,7 +47572,8 @@ public isTableAvailableWithSplit_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -43832,7 +47582,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -43840,7 +47591,8 @@ public TIOError getIo() {
                 return this.io;
               }
           
          -    public isTableAvailableWithSplit_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) {
          +    public isTableAvailableWithSplit_result
          +        setIo(@org.apache.thrift.annotation.Nullable TIOError io) {
                 this.io = io;
                 return this;
               }
          @@ -43860,23 +47612,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -43884,27 +47637,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -43912,32 +47668,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof isTableAvailableWithSplit_result)
          -        return this.equals((isTableAvailableWithSplit_result)that);
          +        return this.equals((isTableAvailableWithSplit_result) that);
                 return false;
               }
           
               public boolean equals(isTableAvailableWithSplit_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -43950,8 +47700,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -43992,13 +47741,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -44027,37 +47778,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class isTableAvailableWithSplit_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableAvailableWithSplit_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableAvailableWithSplit_resultStandardScheme getScheme() {
                   return new isTableAvailableWithSplit_resultStandardScheme();
                 }
               }
           
          -    private static class isTableAvailableWithSplit_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class isTableAvailableWithSplit_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -44065,7 +47822,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWit
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -44074,7 +47831,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWit
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -44085,11 +47842,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isTableAvailableWit
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -44109,17 +47868,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isTableAvailableWi
           
               }
           
          -    private static class isTableAvailableWithSplit_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class isTableAvailableWithSplit_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public isTableAvailableWithSplit_resultTupleScheme getScheme() {
                   return new isTableAvailableWithSplit_resultTupleScheme();
                 }
               }
           
          -    private static class isTableAvailableWithSplit_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class isTableAvailableWithSplit_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -44137,8 +47900,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWit
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          isTableAvailableWithSplit_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -44152,19 +47917,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isTableAvailableWith
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class addColumnFamily_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("addColumnFamily_args");
          +  public static class addColumnFamily_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("addColumnFamily_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new addColumnFamily_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new addColumnFamily_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new addColumnFamily_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new addColumnFamily_argsTupleSchemeFactory();
           
               /**
                * the tablename to add column family to
          @@ -44175,18 +47952,22 @@ public static class addColumnFamily_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -44199,7 +47980,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // COLUMN
          @@ -44210,12 +47991,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -44247,22 +48028,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          -      tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnFamilyDescriptor.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
          +      tmpMap.put(_Fields.COLUMN,
          +        new org.apache.thrift.meta_data.FieldMetaData("column",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TColumnFamilyDescriptor.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(addColumnFamily_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(addColumnFamily_args.class,
          +        metaDataMap);
               }
           
               public addColumnFamily_args() {
               }
           
          -    public addColumnFamily_args(
          -      TTableName tableName,
          -      TColumnFamilyDescriptor column)
          -    {
          +    public addColumnFamily_args(TTableName tableName, TColumnFamilyDescriptor column) {
                 this();
                 this.tableName = tableName;
                 this.column = column;
          @@ -44301,7 +48087,8 @@ public TTableName getTableName() {
               /**
                * the tablename to add column family to
                */
          -    public addColumnFamily_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public addColumnFamily_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -44332,7 +48119,8 @@ public TColumnFamilyDescriptor getColumn() {
               /**
                * column family descriptor of column family to be added
                */
          -    public addColumnFamily_args setColumn(@org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column) {
          +    public addColumnFamily_args
          +        setColumn(@org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column) {
                 this.column = column;
                 return this;
               }
          @@ -44352,23 +48140,24 @@ public void setColumnIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
          -      case COLUMN:
          -        if (value == null) {
          -          unsetColumn();
          -        } else {
          -          setColumn((TColumnFamilyDescriptor)value);
          -        }
          -        break;
          +        case COLUMN:
          +          if (value == null) {
          +            unsetColumn();
          +          } else {
          +            setColumn((TColumnFamilyDescriptor) value);
          +          }
          +          break;
           
                 }
               }
          @@ -44376,60 +48165,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case COLUMN:
          -        return getColumn();
          +        case COLUMN:
          +          return getColumn();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case COLUMN:
          -        return isSetColumn();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case COLUMN:
          +          return isSetColumn();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof addColumnFamily_args)
          -        return this.equals((addColumnFamily_args)that);
          +      if (that instanceof addColumnFamily_args) return this.equals((addColumnFamily_args) that);
                 return false;
               }
           
               public boolean equals(addColumnFamily_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_column = true && this.isSetColumn();
                 boolean that_present_column = true && that.isSetColumn();
                 if (this_present_column || that_present_column) {
          -        if (!(this_present_column && that_present_column))
          -          return false;
          -        if (!this.column.equals(that.column))
          -          return false;
          +        if (!(this_present_column && that_present_column)) return false;
          +        if (!this.column.equals(that.column)) return false;
                 }
           
                 return true;
          @@ -44440,12 +48225,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -      if (isSetColumn())
          -        hashCode = hashCode * 8191 + column.hashCode();
          +      if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
                 return hashCode;
               }
          @@ -44486,11 +48269,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -44521,10 +48306,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tableName == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tableName' was not present! Struct: " + toString());
                 }
                 if (column == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'column' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'column' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tableName != null) {
          @@ -44537,35 +48324,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class addColumnFamily_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class addColumnFamily_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public addColumnFamily_argsStandardScheme getScheme() {
                   return new addColumnFamily_argsStandardScheme();
                 }
               }
           
          -    private static class addColumnFamily_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class addColumnFamily_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -44574,7 +48366,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_arg
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -44583,7 +48375,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_arg
                           struct.column = new TColumnFamilyDescriptor();
                           struct.column.read(iprot);
                           struct.setColumnIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -44594,11 +48386,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_arg
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -44618,24 +48412,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_ar
           
               }
           
          -    private static class addColumnFamily_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class addColumnFamily_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public addColumnFamily_argsTupleScheme getScheme() {
                   return new addColumnFamily_argsTupleScheme();
                 }
               }
           
          -    private static class addColumnFamily_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class addColumnFamily_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName.write(oprot);
                   struct.column.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName = new TTableName();
                   struct.tableName.read(iprot);
                   struct.setTableNameIsSet(true);
          @@ -44645,26 +48445,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_args
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class addColumnFamily_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("addColumnFamily_result");
          +  public static class addColumnFamily_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("addColumnFamily_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new addColumnFamily_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new addColumnFamily_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new addColumnFamily_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new addColumnFamily_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -44677,7 +48491,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -44686,12 +48500,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -44723,19 +48537,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(addColumnFamily_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(addColumnFamily_result.class,
          +        metaDataMap);
               }
           
               public addColumnFamily_result() {
               }
           
          -    public addColumnFamily_result(
          -      TIOError io)
          -    {
          +    public addColumnFamily_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -44783,15 +48600,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -44799,46 +48617,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof addColumnFamily_result)
          -        return this.equals((addColumnFamily_result)that);
          +      if (that instanceof addColumnFamily_result) return this.equals((addColumnFamily_result) that);
                 return false;
               }
           
               public boolean equals(addColumnFamily_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -44849,8 +48665,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -44881,13 +48696,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -44912,35 +48729,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class addColumnFamily_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class addColumnFamily_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public addColumnFamily_resultStandardScheme getScheme() {
                   return new addColumnFamily_resultStandardScheme();
                 }
               }
           
          -    private static class addColumnFamily_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class addColumnFamily_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -44949,7 +48771,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_res
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -44960,11 +48782,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, addColumnFamily_res
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -44979,17 +48803,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, addColumnFamily_re
           
               }
           
          -    private static class addColumnFamily_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class addColumnFamily_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public addColumnFamily_resultTupleScheme getScheme() {
                   return new addColumnFamily_resultTupleScheme();
                 }
               }
           
          -    private static class addColumnFamily_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class addColumnFamily_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -45001,8 +48829,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_res
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -45012,19 +48842,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, addColumnFamily_resu
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteColumnFamily_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteColumnFamily_args");
          +  public static class deleteColumnFamily_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteColumnFamily_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRING,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteColumnFamily_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteColumnFamily_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteColumnFamily_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteColumnFamily_argsTupleSchemeFactory();
           
               /**
                * the tablename to delete column family from
          @@ -45035,18 +48877,22 @@ public static class deleteColumnFamily_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -45059,7 +48905,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // COLUMN
          @@ -45070,12 +48916,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -45107,22 +48953,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          -      tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
          +      tmpMap.put(_Fields.COLUMN,
          +        new org.apache.thrift.meta_data.FieldMetaData("column",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteColumnFamily_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteColumnFamily_args.class,
          +        metaDataMap);
               }
           
               public deleteColumnFamily_args() {
               }
           
          -    public deleteColumnFamily_args(
          -      TTableName tableName,
          -      java.nio.ByteBuffer column)
          -    {
          +    public deleteColumnFamily_args(TTableName tableName, java.nio.ByteBuffer column) {
                 this();
                 this.tableName = tableName;
                 this.column = org.apache.thrift.TBaseHelper.copyBinary(column);
          @@ -45161,7 +49012,8 @@ public TTableName getTableName() {
               /**
                * the tablename to delete column family from
                */
          -    public deleteColumnFamily_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public deleteColumnFamily_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -45197,11 +49049,13 @@ public java.nio.ByteBuffer bufferForColumn() {
                * name of column family to be deleted
                */
               public deleteColumnFamily_args setColumn(byte[] column) {
          -      this.column = column == null ? (java.nio.ByteBuffer)null     : java.nio.ByteBuffer.wrap(column.clone());
          +      this.column =
          +          column == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(column.clone());
                 return this;
               }
           
          -    public deleteColumnFamily_args setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
          +    public deleteColumnFamily_args
          +        setColumn(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer column) {
                 this.column = org.apache.thrift.TBaseHelper.copyBinary(column);
                 return this;
               }
          @@ -45221,27 +49075,28 @@ public void setColumnIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
          -      case COLUMN:
          -        if (value == null) {
          -          unsetColumn();
          -        } else {
          -          if (value instanceof byte[]) {
          -            setColumn((byte[])value);
          +        case COLUMN:
          +          if (value == null) {
          +            unsetColumn();
                     } else {
          -            setColumn((java.nio.ByteBuffer)value);
          +            if (value instanceof byte[]) {
          +              setColumn((byte[]) value);
          +            } else {
          +              setColumn((java.nio.ByteBuffer) value);
          +            }
                     }
          -        }
          -        break;
          +          break;
           
                 }
               }
          @@ -45249,27 +49104,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case COLUMN:
          -        return getColumn();
          +        case COLUMN:
          +          return getColumn();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case COLUMN:
          -        return isSetColumn();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case COLUMN:
          +          return isSetColumn();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -45277,32 +49135,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof deleteColumnFamily_args)
          -        return this.equals((deleteColumnFamily_args)that);
          +        return this.equals((deleteColumnFamily_args) that);
                 return false;
               }
           
               public boolean equals(deleteColumnFamily_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_column = true && this.isSetColumn();
                 boolean that_present_column = true && that.isSetColumn();
                 if (this_present_column || that_present_column) {
          -        if (!(this_present_column && that_present_column))
          -          return false;
          -        if (!this.column.equals(that.column))
          -          return false;
          +        if (!(this_present_column && that_present_column)) return false;
          +        if (!this.column.equals(that.column)) return false;
                 }
           
                 return true;
          @@ -45313,12 +49165,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -      if (isSetColumn())
          -        hashCode = hashCode * 8191 + column.hashCode();
          +      if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
                 return hashCode;
               }
          @@ -45359,11 +49209,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -45394,10 +49246,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tableName == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tableName' was not present! Struct: " + toString());
                 }
                 if (column == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'column' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'column' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tableName != null) {
          @@ -45407,35 +49261,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteColumnFamily_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteColumnFamily_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteColumnFamily_argsStandardScheme getScheme() {
                   return new deleteColumnFamily_argsStandardScheme();
                 }
               }
           
          -    private static class deleteColumnFamily_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteColumnFamily_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -45444,7 +49303,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -45452,7 +49311,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.column = iprot.readBinary();
                           struct.setColumnIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -45463,11 +49322,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -45487,24 +49348,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily
           
               }
           
          -    private static class deleteColumnFamily_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteColumnFamily_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteColumnFamily_argsTupleScheme getScheme() {
                   return new deleteColumnFamily_argsTupleScheme();
                 }
               }
           
          -    private static class deleteColumnFamily_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteColumnFamily_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName.write(oprot);
                   oprot.writeBinary(struct.column);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName = new TTableName();
                   struct.tableName.read(iprot);
                   struct.setTableNameIsSet(true);
          @@ -45513,26 +49380,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_a
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteColumnFamily_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteColumnFamily_result");
          +  public static class deleteColumnFamily_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteColumnFamily_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteColumnFamily_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteColumnFamily_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteColumnFamily_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteColumnFamily_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -45545,7 +49426,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -45554,12 +49435,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -45591,19 +49472,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteColumnFamily_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(deleteColumnFamily_result.class, metaDataMap);
               }
           
               public deleteColumnFamily_result() {
               }
           
          -    public deleteColumnFamily_result(
          -      TIOError io)
          -    {
          +    public deleteColumnFamily_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -45651,15 +49535,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -45667,22 +49552,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -45690,23 +49578,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof deleteColumnFamily_result)
          -        return this.equals((deleteColumnFamily_result)that);
          +        return this.equals((deleteColumnFamily_result) that);
                 return false;
               }
           
               public boolean equals(deleteColumnFamily_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -45717,8 +49601,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -45749,13 +49632,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -45780,35 +49665,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteColumnFamily_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteColumnFamily_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteColumnFamily_resultStandardScheme getScheme() {
                   return new deleteColumnFamily_resultStandardScheme();
                 }
               }
           
          -    private static class deleteColumnFamily_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteColumnFamily_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -45817,7 +49707,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -45828,11 +49718,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteColumnFamily_
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          deleteColumnFamily_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -45847,17 +49739,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteColumnFamily
           
               }
           
          -    private static class deleteColumnFamily_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteColumnFamily_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteColumnFamily_resultTupleScheme getScheme() {
                   return new deleteColumnFamily_resultTupleScheme();
                 }
               }
           
          -    private static class deleteColumnFamily_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteColumnFamily_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -45869,8 +49765,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -45880,19 +49778,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteColumnFamily_r
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class modifyColumnFamily_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyColumnFamily_args");
          +  public static class modifyColumnFamily_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("modifyColumnFamily_args");
           
          -    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC = new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField COLUMN_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("column", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyColumnFamily_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyColumnFamily_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new modifyColumnFamily_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new modifyColumnFamily_argsTupleSchemeFactory();
           
               /**
                * the tablename to modify column family
          @@ -45903,18 +49813,22 @@ public static class modifyColumnFamily_args implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -45927,7 +49841,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // TABLE_NAME
                       return TABLE_NAME;
                     case 2: // COLUMN
          @@ -45938,12 +49852,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -45975,22 +49889,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          -      tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnFamilyDescriptor.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.TABLE_NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableName.class)));
          +      tmpMap.put(_Fields.COLUMN,
          +        new org.apache.thrift.meta_data.FieldMetaData("column",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TColumnFamilyDescriptor.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyColumnFamily_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyColumnFamily_args.class,
          +        metaDataMap);
               }
           
               public modifyColumnFamily_args() {
               }
           
          -    public modifyColumnFamily_args(
          -      TTableName tableName,
          -      TColumnFamilyDescriptor column)
          -    {
          +    public modifyColumnFamily_args(TTableName tableName, TColumnFamilyDescriptor column) {
                 this();
                 this.tableName = tableName;
                 this.column = column;
          @@ -46029,7 +49948,8 @@ public TTableName getTableName() {
               /**
                * the tablename to modify column family
                */
          -    public modifyColumnFamily_args setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +    public modifyColumnFamily_args
          +        setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
                 this.tableName = tableName;
                 return this;
               }
          @@ -46060,7 +49980,8 @@ public TColumnFamilyDescriptor getColumn() {
               /**
                * column family descriptor of column family to be modified
                */
          -    public modifyColumnFamily_args setColumn(@org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column) {
          +    public modifyColumnFamily_args
          +        setColumn(@org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor column) {
                 this.column = column;
                 return this;
               }
          @@ -46080,23 +50001,24 @@ public void setColumnIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case TABLE_NAME:
          -        if (value == null) {
          -          unsetTableName();
          -        } else {
          -          setTableName((TTableName)value);
          -        }
          -        break;
          +        case TABLE_NAME:
          +          if (value == null) {
          +            unsetTableName();
          +          } else {
          +            setTableName((TTableName) value);
          +          }
          +          break;
           
          -      case COLUMN:
          -        if (value == null) {
          -          unsetColumn();
          -        } else {
          -          setColumn((TColumnFamilyDescriptor)value);
          -        }
          -        break;
          +        case COLUMN:
          +          if (value == null) {
          +            unsetColumn();
          +          } else {
          +            setColumn((TColumnFamilyDescriptor) value);
          +          }
          +          break;
           
                 }
               }
          @@ -46104,27 +50026,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case TABLE_NAME:
          -        return getTableName();
          +        case TABLE_NAME:
          +          return getTableName();
           
          -      case COLUMN:
          -        return getColumn();
          +        case COLUMN:
          +          return getColumn();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case TABLE_NAME:
          -        return isSetTableName();
          -      case COLUMN:
          -        return isSetColumn();
          +        case TABLE_NAME:
          +          return isSetTableName();
          +        case COLUMN:
          +          return isSetColumn();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -46132,32 +50057,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof modifyColumnFamily_args)
          -        return this.equals((modifyColumnFamily_args)that);
          +        return this.equals((modifyColumnFamily_args) that);
                 return false;
               }
           
               public boolean equals(modifyColumnFamily_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_tableName = true && this.isSetTableName();
                 boolean that_present_tableName = true && that.isSetTableName();
                 if (this_present_tableName || that_present_tableName) {
          -        if (!(this_present_tableName && that_present_tableName))
          -          return false;
          -        if (!this.tableName.equals(that.tableName))
          -          return false;
          +        if (!(this_present_tableName && that_present_tableName)) return false;
          +        if (!this.tableName.equals(that.tableName)) return false;
                 }
           
                 boolean this_present_column = true && this.isSetColumn();
                 boolean that_present_column = true && that.isSetColumn();
                 if (this_present_column || that_present_column) {
          -        if (!(this_present_column && that_present_column))
          -          return false;
          -        if (!this.column.equals(that.column))
          -          return false;
          +        if (!(this_present_column && that_present_column)) return false;
          +        if (!this.column.equals(that.column)) return false;
                 }
           
                 return true;
          @@ -46168,12 +50087,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -      if (isSetTableName())
          -        hashCode = hashCode * 8191 + tableName.hashCode();
          +      if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetColumn()) ? 131071 : 524287);
          -      if (isSetColumn())
          -        hashCode = hashCode * 8191 + column.hashCode();
          +      if (isSetColumn()) hashCode = hashCode * 8191 + column.hashCode();
           
                 return hashCode;
               }
          @@ -46214,11 +50131,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -46249,10 +50168,12 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (tableName == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'tableName' was not present! Struct: " + toString());
                 }
                 if (column == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'column' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'column' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (tableName != null) {
          @@ -46265,35 +50186,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class modifyColumnFamily_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyColumnFamily_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyColumnFamily_argsStandardScheme getScheme() {
                   return new modifyColumnFamily_argsStandardScheme();
                 }
               }
           
          -    private static class modifyColumnFamily_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class modifyColumnFamily_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -46302,7 +50228,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_
                           struct.tableName = new TTableName();
                           struct.tableName.read(iprot);
                           struct.setTableNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -46311,7 +50237,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_
                           struct.column = new TColumnFamilyDescriptor();
                           struct.column.read(iprot);
                           struct.setColumnIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -46322,11 +50248,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, modifyColumnFamily_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, modifyColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -46346,24 +50274,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyColumnFamily
           
               }
           
          -    private static class modifyColumnFamily_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyColumnFamily_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyColumnFamily_argsTupleScheme getScheme() {
                   return new modifyColumnFamily_argsTupleScheme();
                 }
               }
           
          -    private static class modifyColumnFamily_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class modifyColumnFamily_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName.write(oprot);
                   struct.column.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.tableName = new TTableName();
                   struct.tableName.read(iprot);
                   struct.setTableNameIsSet(true);
          @@ -46373,26 +50307,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_a
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class modifyColumnFamily_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyColumnFamily_result");
          +  public static class modifyColumnFamily_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("modifyColumnFamily_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyColumnFamily_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyColumnFamily_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new modifyColumnFamily_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new modifyColumnFamily_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -46405,7 +50353,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -46414,12 +50362,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -46451,19 +50399,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyColumnFamily_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(modifyColumnFamily_result.class, metaDataMap);
               }
           
               public modifyColumnFamily_result() {
               }
           
          -    public modifyColumnFamily_result(
          -      TIOError io)
          -    {
          +    public modifyColumnFamily_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -46511,15 +50462,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -46527,22 +50479,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -46550,23 +50505,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof modifyColumnFamily_result)
          -        return this.equals((modifyColumnFamily_result)that);
          +        return this.equals((modifyColumnFamily_result) that);
                 return false;
               }
           
               public boolean equals(modifyColumnFamily_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -46577,8 +50528,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -46609,13 +50559,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -46640,35 +50592,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class modifyColumnFamily_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyColumnFamily_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyColumnFamily_resultStandardScheme getScheme() {
                   return new modifyColumnFamily_resultStandardScheme();
                 }
               }
           
          -    private static class modifyColumnFamily_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class modifyColumnFamily_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -46677,7 +50634,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -46688,11 +50645,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyColumnFamily_
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, modifyColumnFamily_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          modifyColumnFamily_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -46707,17 +50666,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyColumnFamily
           
               }
           
          -    private static class modifyColumnFamily_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyColumnFamily_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyColumnFamily_resultTupleScheme getScheme() {
                   return new modifyColumnFamily_resultTupleScheme();
                 }
               }
           
          -    private static class modifyColumnFamily_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class modifyColumnFamily_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -46729,8 +50692,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -46740,32 +50705,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, modifyColumnFamily_r
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class modifyTable_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyTable_args");
          +  public static class modifyTable_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("modifyTable_args");
           
          -    private static final org.apache.thrift.protocol.TField DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("desc", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField DESC_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("desc", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyTable_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyTable_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new modifyTable_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new modifyTable_argsTupleSchemeFactory();
           
               /**
                * the descriptor of the table to modify
                */
               public @org.apache.thrift.annotation.Nullable TTableDescriptor desc; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * the descriptor of the table to modify
                  */
          -      DESC((short)1, "desc");
          +      DESC((short) 1, "desc");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -46778,7 +50757,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // DESC
                       return DESC;
                     default:
          @@ -46787,12 +50766,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -46824,19 +50803,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.DESC, new org.apache.thrift.meta_data.FieldMetaData("desc", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableDescriptor.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.DESC,
          +        new org.apache.thrift.meta_data.FieldMetaData("desc",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTableDescriptor.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyTable_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyTable_args.class,
          +        metaDataMap);
               }
           
               public modifyTable_args() {
               }
           
          -    public modifyTable_args(
          -      TTableDescriptor desc)
          -    {
          +    public modifyTable_args(TTableDescriptor desc) {
                 this();
                 this.desc = desc;
               }
          @@ -46890,15 +50872,16 @@ public void setDescIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case DESC:
          -        if (value == null) {
          -          unsetDesc();
          -        } else {
          -          setDesc((TTableDescriptor)value);
          -        }
          -        break;
          +        case DESC:
          +          if (value == null) {
          +            unsetDesc();
          +          } else {
          +            setDesc((TTableDescriptor) value);
          +          }
          +          break;
           
                 }
               }
          @@ -46906,46 +50889,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case DESC:
          -        return getDesc();
          +        case DESC:
          +          return getDesc();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case DESC:
          -        return isSetDesc();
          +        case DESC:
          +          return isSetDesc();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof modifyTable_args)
          -        return this.equals((modifyTable_args)that);
          +      if (that instanceof modifyTable_args) return this.equals((modifyTable_args) that);
                 return false;
               }
           
               public boolean equals(modifyTable_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_desc = true && this.isSetDesc();
                 boolean that_present_desc = true && that.isSetDesc();
                 if (this_present_desc || that_present_desc) {
          -        if (!(this_present_desc && that_present_desc))
          -          return false;
          -        if (!this.desc.equals(that.desc))
          -          return false;
          +        if (!(this_present_desc && that_present_desc)) return false;
          +        if (!this.desc.equals(that.desc)) return false;
                 }
           
                 return true;
          @@ -46956,8 +50937,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetDesc()) ? 131071 : 524287);
          -      if (isSetDesc())
          -        hashCode = hashCode * 8191 + desc.hashCode();
          +      if (isSetDesc()) hashCode = hashCode * 8191 + desc.hashCode();
           
                 return hashCode;
               }
          @@ -46988,11 +50968,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -47015,7 +50997,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (desc == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'desc' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'desc' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (desc != null) {
          @@ -47025,35 +51008,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class modifyTable_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyTable_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyTable_argsStandardScheme getScheme() {
                   return new modifyTable_argsStandardScheme();
                 }
               }
           
          -    private static class modifyTable_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class modifyTable_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -47062,7 +51050,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_args st
                           struct.desc = new TTableDescriptor();
                           struct.desc.read(iprot);
                           struct.setDescIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -47073,11 +51061,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_args st
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -47092,49 +51082,69 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_args s
           
               }
           
          -    private static class modifyTable_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyTable_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyTable_argsTupleScheme getScheme() {
                   return new modifyTable_argsTupleScheme();
                 }
               }
           
          -    private static class modifyTable_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class modifyTable_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, modifyTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, modifyTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.desc.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, modifyTable_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, modifyTable_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.desc = new TTableDescriptor();
                   struct.desc.read(iprot);
                   struct.setDescIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class modifyTable_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyTable_result");
          +  public static class modifyTable_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("modifyTable_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyTable_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyTable_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new modifyTable_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new modifyTable_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -47147,7 +51157,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -47156,12 +51166,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -47193,19 +51203,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyTable_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyTable_result.class,
          +        metaDataMap);
               }
           
               public modifyTable_result() {
               }
           
          -    public modifyTable_result(
          -      TIOError io)
          -    {
          +    public modifyTable_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -47253,15 +51266,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -47269,46 +51283,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof modifyTable_result)
          -        return this.equals((modifyTable_result)that);
          +      if (that instanceof modifyTable_result) return this.equals((modifyTable_result) that);
                 return false;
               }
           
               public boolean equals(modifyTable_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -47319,8 +51331,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -47351,13 +51362,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -47382,35 +51395,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class modifyTable_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyTable_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyTable_resultStandardScheme getScheme() {
                   return new modifyTable_resultStandardScheme();
                 }
               }
           
          -    private static class modifyTable_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class modifyTable_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -47419,7 +51437,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_result
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -47430,11 +51448,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyTable_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -47449,17 +51469,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyTable_result
           
               }
           
          -    private static class modifyTable_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyTable_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyTable_resultTupleScheme getScheme() {
                   return new modifyTable_resultTupleScheme();
                 }
               }
           
          -    private static class modifyTable_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class modifyTable_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, modifyTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, modifyTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -47471,8 +51495,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, modifyTable_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, modifyTable_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, modifyTable_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -47482,32 +51508,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, modifyTable_result s
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class createNamespace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("createNamespace_args");
          +  public static class createNamespace_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("createNamespace_args");
           
          -    private static final org.apache.thrift.protocol.TField NAMESPACE_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("namespaceDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField NAMESPACE_DESC_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("namespaceDesc",
          +            org.apache.thrift.protocol.TType.STRUCT, (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new createNamespace_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new createNamespace_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new createNamespace_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new createNamespace_argsTupleSchemeFactory();
           
               /**
                * descriptor which describes the new namespace
                */
               public @org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * descriptor which describes the new namespace
                  */
          -      NAMESPACE_DESC((short)1, "namespaceDesc");
          +      NAMESPACE_DESC((short) 1, "namespaceDesc");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -47520,7 +51560,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // NAMESPACE_DESC
                       return NAMESPACE_DESC;
                     default:
          @@ -47529,12 +51569,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -47566,19 +51606,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.NAMESPACE_DESC, new org.apache.thrift.meta_data.FieldMetaData("namespaceDesc", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.NAMESPACE_DESC,
          +        new org.apache.thrift.meta_data.FieldMetaData("namespaceDesc",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TNamespaceDescriptor.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createNamespace_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createNamespace_args.class,
          +        metaDataMap);
               }
           
               public createNamespace_args() {
               }
           
          -    public createNamespace_args(
          -      TNamespaceDescriptor namespaceDesc)
          -    {
          +    public createNamespace_args(TNamespaceDescriptor namespaceDesc) {
                 this();
                 this.namespaceDesc = namespaceDesc;
               }
          @@ -47612,7 +51655,8 @@ public TNamespaceDescriptor getNamespaceDesc() {
               /**
                * descriptor which describes the new namespace
                */
          -    public createNamespace_args setNamespaceDesc(@org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc) {
          +    public createNamespace_args setNamespaceDesc(
          +        @org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc) {
                 this.namespaceDesc = namespaceDesc;
                 return this;
               }
          @@ -47621,7 +51665,9 @@ public void unsetNamespaceDesc() {
                 this.namespaceDesc = null;
               }
           
          -    /** Returns true if field namespaceDesc is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field namespaceDesc is set (has been assigned a value) and false otherwise
          +     */
               public boolean isSetNamespaceDesc() {
                 return this.namespaceDesc != null;
               }
          @@ -47632,15 +51678,16 @@ public void setNamespaceDescIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case NAMESPACE_DESC:
          -        if (value == null) {
          -          unsetNamespaceDesc();
          -        } else {
          -          setNamespaceDesc((TNamespaceDescriptor)value);
          -        }
          -        break;
          +        case NAMESPACE_DESC:
          +          if (value == null) {
          +            unsetNamespaceDesc();
          +          } else {
          +            setNamespaceDesc((TNamespaceDescriptor) value);
          +          }
          +          break;
           
                 }
               }
          @@ -47648,46 +51695,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case NAMESPACE_DESC:
          -        return getNamespaceDesc();
          +        case NAMESPACE_DESC:
          +          return getNamespaceDesc();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case NAMESPACE_DESC:
          -        return isSetNamespaceDesc();
          +        case NAMESPACE_DESC:
          +          return isSetNamespaceDesc();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof createNamespace_args)
          -        return this.equals((createNamespace_args)that);
          +      if (that instanceof createNamespace_args) return this.equals((createNamespace_args) that);
                 return false;
               }
           
               public boolean equals(createNamespace_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_namespaceDesc = true && this.isSetNamespaceDesc();
                 boolean that_present_namespaceDesc = true && that.isSetNamespaceDesc();
                 if (this_present_namespaceDesc || that_present_namespaceDesc) {
          -        if (!(this_present_namespaceDesc && that_present_namespaceDesc))
          -          return false;
          -        if (!this.namespaceDesc.equals(that.namespaceDesc))
          -          return false;
          +        if (!(this_present_namespaceDesc && that_present_namespaceDesc)) return false;
          +        if (!this.namespaceDesc.equals(that.namespaceDesc)) return false;
                 }
           
                 return true;
          @@ -47698,8 +51743,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetNamespaceDesc()) ? 131071 : 524287);
          -      if (isSetNamespaceDesc())
          -        hashCode = hashCode * 8191 + namespaceDesc.hashCode();
          +      if (isSetNamespaceDesc()) hashCode = hashCode * 8191 + namespaceDesc.hashCode();
           
                 return hashCode;
               }
          @@ -47717,7 +51761,8 @@ public int compareTo(createNamespace_args other) {
                   return lastComparison;
                 }
                 if (isSetNamespaceDesc()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.namespaceDesc, other.namespaceDesc);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.namespaceDesc, other.namespaceDesc);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -47730,11 +51775,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -47757,7 +51804,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (namespaceDesc == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'namespaceDesc' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'namespaceDesc' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (namespaceDesc != null) {
          @@ -47767,35 +51815,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class createNamespace_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createNamespace_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createNamespace_argsStandardScheme getScheme() {
                   return new createNamespace_argsStandardScheme();
                 }
               }
           
          -    private static class createNamespace_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class createNamespace_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -47804,7 +51857,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_arg
                           struct.namespaceDesc = new TNamespaceDescriptor();
                           struct.namespaceDesc.read(iprot);
                           struct.setNamespaceDescIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -47815,11 +51868,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_arg
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -47834,49 +51889,69 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_ar
           
               }
           
          -    private static class createNamespace_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createNamespace_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createNamespace_argsTupleScheme getScheme() {
                   return new createNamespace_argsTupleScheme();
                 }
               }
           
          -    private static class createNamespace_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class createNamespace_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, createNamespace_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, createNamespace_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.namespaceDesc.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, createNamespace_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, createNamespace_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.namespaceDesc = new TNamespaceDescriptor();
                   struct.namespaceDesc.read(iprot);
                   struct.setNamespaceDescIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class createNamespace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("createNamespace_result");
          +  public static class createNamespace_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("createNamespace_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new createNamespace_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new createNamespace_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new createNamespace_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new createNamespace_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -47889,7 +51964,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -47898,12 +51973,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -47935,19 +52010,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createNamespace_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(createNamespace_result.class,
          +        metaDataMap);
               }
           
               public createNamespace_result() {
               }
           
          -    public createNamespace_result(
          -      TIOError io)
          -    {
          +    public createNamespace_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -47995,15 +52073,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -48011,46 +52090,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof createNamespace_result)
          -        return this.equals((createNamespace_result)that);
          +      if (that instanceof createNamespace_result) return this.equals((createNamespace_result) that);
                 return false;
               }
           
               public boolean equals(createNamespace_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -48061,8 +52138,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -48093,13 +52169,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -48124,35 +52202,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class createNamespace_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createNamespace_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createNamespace_resultStandardScheme getScheme() {
                   return new createNamespace_resultStandardScheme();
                 }
               }
           
          -    private static class createNamespace_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class createNamespace_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -48161,7 +52244,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_res
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -48172,11 +52255,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, createNamespace_res
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -48191,17 +52276,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, createNamespace_re
           
               }
           
          -    private static class createNamespace_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class createNamespace_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public createNamespace_resultTupleScheme getScheme() {
                   return new createNamespace_resultTupleScheme();
                 }
               }
           
          -    private static class createNamespace_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class createNamespace_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, createNamespace_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, createNamespace_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -48213,8 +52302,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, createNamespace_res
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, createNamespace_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, createNamespace_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -48224,32 +52315,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, createNamespace_resu
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class modifyNamespace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyNamespace_args");
          +  public static class modifyNamespace_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("modifyNamespace_args");
           
          -    private static final org.apache.thrift.protocol.TField NAMESPACE_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("namespaceDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField NAMESPACE_DESC_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("namespaceDesc",
          +            org.apache.thrift.protocol.TType.STRUCT, (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyNamespace_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyNamespace_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new modifyNamespace_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new modifyNamespace_argsTupleSchemeFactory();
           
               /**
                * descriptor which describes the new namespace
                */
               public @org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * descriptor which describes the new namespace
                  */
          -      NAMESPACE_DESC((short)1, "namespaceDesc");
          +      NAMESPACE_DESC((short) 1, "namespaceDesc");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -48262,7 +52367,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // NAMESPACE_DESC
                       return NAMESPACE_DESC;
                     default:
          @@ -48271,12 +52376,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -48308,19 +52413,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.NAMESPACE_DESC, new org.apache.thrift.meta_data.FieldMetaData("namespaceDesc", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.NAMESPACE_DESC,
          +        new org.apache.thrift.meta_data.FieldMetaData("namespaceDesc",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TNamespaceDescriptor.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyNamespace_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyNamespace_args.class,
          +        metaDataMap);
               }
           
               public modifyNamespace_args() {
               }
           
          -    public modifyNamespace_args(
          -      TNamespaceDescriptor namespaceDesc)
          -    {
          +    public modifyNamespace_args(TNamespaceDescriptor namespaceDesc) {
                 this();
                 this.namespaceDesc = namespaceDesc;
               }
          @@ -48354,7 +52462,8 @@ public TNamespaceDescriptor getNamespaceDesc() {
               /**
                * descriptor which describes the new namespace
                */
          -    public modifyNamespace_args setNamespaceDesc(@org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc) {
          +    public modifyNamespace_args setNamespaceDesc(
          +        @org.apache.thrift.annotation.Nullable TNamespaceDescriptor namespaceDesc) {
                 this.namespaceDesc = namespaceDesc;
                 return this;
               }
          @@ -48363,7 +52472,9 @@ public void unsetNamespaceDesc() {
                 this.namespaceDesc = null;
               }
           
          -    /** Returns true if field namespaceDesc is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field namespaceDesc is set (has been assigned a value) and false otherwise
          +     */
               public boolean isSetNamespaceDesc() {
                 return this.namespaceDesc != null;
               }
          @@ -48374,15 +52485,16 @@ public void setNamespaceDescIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case NAMESPACE_DESC:
          -        if (value == null) {
          -          unsetNamespaceDesc();
          -        } else {
          -          setNamespaceDesc((TNamespaceDescriptor)value);
          -        }
          -        break;
          +        case NAMESPACE_DESC:
          +          if (value == null) {
          +            unsetNamespaceDesc();
          +          } else {
          +            setNamespaceDesc((TNamespaceDescriptor) value);
          +          }
          +          break;
           
                 }
               }
          @@ -48390,46 +52502,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case NAMESPACE_DESC:
          -        return getNamespaceDesc();
          +        case NAMESPACE_DESC:
          +          return getNamespaceDesc();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case NAMESPACE_DESC:
          -        return isSetNamespaceDesc();
          +        case NAMESPACE_DESC:
          +          return isSetNamespaceDesc();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof modifyNamespace_args)
          -        return this.equals((modifyNamespace_args)that);
          +      if (that instanceof modifyNamespace_args) return this.equals((modifyNamespace_args) that);
                 return false;
               }
           
               public boolean equals(modifyNamespace_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_namespaceDesc = true && this.isSetNamespaceDesc();
                 boolean that_present_namespaceDesc = true && that.isSetNamespaceDesc();
                 if (this_present_namespaceDesc || that_present_namespaceDesc) {
          -        if (!(this_present_namespaceDesc && that_present_namespaceDesc))
          -          return false;
          -        if (!this.namespaceDesc.equals(that.namespaceDesc))
          -          return false;
          +        if (!(this_present_namespaceDesc && that_present_namespaceDesc)) return false;
          +        if (!this.namespaceDesc.equals(that.namespaceDesc)) return false;
                 }
           
                 return true;
          @@ -48440,8 +52550,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetNamespaceDesc()) ? 131071 : 524287);
          -      if (isSetNamespaceDesc())
          -        hashCode = hashCode * 8191 + namespaceDesc.hashCode();
          +      if (isSetNamespaceDesc()) hashCode = hashCode * 8191 + namespaceDesc.hashCode();
           
                 return hashCode;
               }
          @@ -48459,7 +52568,8 @@ public int compareTo(modifyNamespace_args other) {
                   return lastComparison;
                 }
                 if (isSetNamespaceDesc()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.namespaceDesc, other.namespaceDesc);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.namespaceDesc, other.namespaceDesc);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -48472,11 +52582,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -48499,7 +52611,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (namespaceDesc == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'namespaceDesc' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'namespaceDesc' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (namespaceDesc != null) {
          @@ -48509,35 +52622,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class modifyNamespace_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyNamespace_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyNamespace_argsStandardScheme getScheme() {
                   return new modifyNamespace_argsStandardScheme();
                 }
               }
           
          -    private static class modifyNamespace_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class modifyNamespace_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -48546,7 +52664,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_arg
                           struct.namespaceDesc = new TNamespaceDescriptor();
                           struct.namespaceDesc.read(iprot);
                           struct.setNamespaceDescIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -48557,11 +52675,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_arg
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -48576,49 +52696,69 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_ar
           
               }
           
          -    private static class modifyNamespace_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyNamespace_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyNamespace_argsTupleScheme getScheme() {
                   return new modifyNamespace_argsTupleScheme();
                 }
               }
           
          -    private static class modifyNamespace_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class modifyNamespace_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.namespaceDesc.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.namespaceDesc = new TNamespaceDescriptor();
                   struct.namespaceDesc.read(iprot);
                   struct.setNamespaceDescIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class modifyNamespace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("modifyNamespace_result");
          +  public static class modifyNamespace_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("modifyNamespace_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new modifyNamespace_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new modifyNamespace_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new modifyNamespace_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new modifyNamespace_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -48631,7 +52771,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -48640,12 +52780,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -48677,19 +52817,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyNamespace_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(modifyNamespace_result.class,
          +        metaDataMap);
               }
           
               public modifyNamespace_result() {
               }
           
          -    public modifyNamespace_result(
          -      TIOError io)
          -    {
          +    public modifyNamespace_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -48737,15 +52880,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -48753,46 +52897,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof modifyNamespace_result)
          -        return this.equals((modifyNamespace_result)that);
          +      if (that instanceof modifyNamespace_result) return this.equals((modifyNamespace_result) that);
                 return false;
               }
           
               public boolean equals(modifyNamespace_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -48803,8 +52945,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -48835,13 +52976,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -48866,35 +53009,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class modifyNamespace_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyNamespace_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyNamespace_resultStandardScheme getScheme() {
                   return new modifyNamespace_resultStandardScheme();
                 }
               }
           
          -    private static class modifyNamespace_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class modifyNamespace_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -48903,7 +53051,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_res
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -48914,11 +53062,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, modifyNamespace_res
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -48933,17 +53083,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, modifyNamespace_re
           
               }
           
          -    private static class modifyNamespace_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class modifyNamespace_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public modifyNamespace_resultTupleScheme getScheme() {
                   return new modifyNamespace_resultTupleScheme();
                 }
               }
           
          -    private static class modifyNamespace_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class modifyNamespace_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -48955,8 +53109,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_res
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -48966,32 +53122,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, modifyNamespace_resu
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteNamespace_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteNamespace_args");
          +  public static class deleteNamespace_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteNamespace_args");
           
          -    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteNamespace_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteNamespace_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteNamespace_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteNamespace_argsTupleSchemeFactory();
           
               /**
                * namespace name
                */
               public @org.apache.thrift.annotation.Nullable java.lang.String name; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * namespace name
                  */
          -      NAME((short)1, "name");
          +      NAME((short) 1, "name");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -49004,7 +53174,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // NAME
                       return NAME;
                     default:
          @@ -49013,12 +53183,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -49050,19 +53220,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("name",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteNamespace_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteNamespace_args.class,
          +        metaDataMap);
               }
           
               public deleteNamespace_args() {
               }
           
          -    public deleteNamespace_args(
          -      java.lang.String name)
          -    {
          +    public deleteNamespace_args(java.lang.String name) {
                 this();
                 this.name = name;
               }
          @@ -49096,7 +53269,8 @@ public java.lang.String getName() {
               /**
                * namespace name
                */
          -    public deleteNamespace_args setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
          +    public deleteNamespace_args
          +        setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
                 this.name = name;
                 return this;
               }
          @@ -49116,15 +53290,16 @@ public void setNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case NAME:
          -        if (value == null) {
          -          unsetName();
          -        } else {
          -          setName((java.lang.String)value);
          -        }
          -        break;
          +        case NAME:
          +          if (value == null) {
          +            unsetName();
          +          } else {
          +            setName((java.lang.String) value);
          +          }
          +          break;
           
                 }
               }
          @@ -49132,46 +53307,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case NAME:
          -        return getName();
          +        case NAME:
          +          return getName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case NAME:
          -        return isSetName();
          +        case NAME:
          +          return isSetName();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteNamespace_args)
          -        return this.equals((deleteNamespace_args)that);
          +      if (that instanceof deleteNamespace_args) return this.equals((deleteNamespace_args) that);
                 return false;
               }
           
               public boolean equals(deleteNamespace_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_name = true && this.isSetName();
                 boolean that_present_name = true && that.isSetName();
                 if (this_present_name || that_present_name) {
          -        if (!(this_present_name && that_present_name))
          -          return false;
          -        if (!this.name.equals(that.name))
          -          return false;
          +        if (!(this_present_name && that_present_name)) return false;
          +        if (!this.name.equals(that.name)) return false;
                 }
           
                 return true;
          @@ -49182,8 +53355,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287);
          -      if (isSetName())
          -        hashCode = hashCode * 8191 + name.hashCode();
          +      if (isSetName()) hashCode = hashCode * 8191 + name.hashCode();
           
                 return hashCode;
               }
          @@ -49214,11 +53386,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -49241,42 +53415,48 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (name == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'name' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteNamespace_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteNamespace_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteNamespace_argsStandardScheme getScheme() {
                   return new deleteNamespace_argsStandardScheme();
                 }
               }
           
          -    private static class deleteNamespace_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteNamespace_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -49284,7 +53464,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_arg
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.name = iprot.readString();
                           struct.setNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -49295,11 +53475,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_arg
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -49314,48 +53496,68 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_ar
           
               }
           
          -    private static class deleteNamespace_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteNamespace_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteNamespace_argsTupleScheme getScheme() {
                   return new deleteNamespace_argsTupleScheme();
                 }
               }
           
          -    private static class deleteNamespace_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteNamespace_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeString(struct.name);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.name = iprot.readString();
                   struct.setNameIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class deleteNamespace_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("deleteNamespace_result");
          +  public static class deleteNamespace_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("deleteNamespace_result");
           
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new deleteNamespace_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new deleteNamespace_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new deleteNamespace_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new deleteNamespace_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      IO((short)1, "io");
          +      IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -49368,7 +53570,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // IO
                       return IO;
                     default:
          @@ -49377,12 +53579,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -49414,19 +53616,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteNamespace_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(deleteNamespace_result.class,
          +        metaDataMap);
               }
           
               public deleteNamespace_result() {
               }
           
          -    public deleteNamespace_result(
          -      TIOError io)
          -    {
          +    public deleteNamespace_result(TIOError io) {
                 this();
                 this.io = io;
               }
          @@ -49474,15 +53679,16 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -49490,46 +53696,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case IO:
          -        return isSetIo();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof deleteNamespace_result)
          -        return this.equals((deleteNamespace_result)that);
          +      if (that instanceof deleteNamespace_result) return this.equals((deleteNamespace_result) that);
                 return false;
               }
           
               public boolean equals(deleteNamespace_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -49540,8 +53744,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -49572,13 +53775,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -49603,35 +53808,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class deleteNamespace_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteNamespace_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteNamespace_resultStandardScheme getScheme() {
                   return new deleteNamespace_resultStandardScheme();
                 }
               }
           
          -    private static class deleteNamespace_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class deleteNamespace_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -49640,7 +53850,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_res
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -49651,11 +53861,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, deleteNamespace_res
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -49670,17 +53882,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, deleteNamespace_re
           
               }
           
          -    private static class deleteNamespace_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class deleteNamespace_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public deleteNamespace_resultTupleScheme getScheme() {
                   return new deleteNamespace_resultTupleScheme();
                 }
               }
           
          -    private static class deleteNamespace_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class deleteNamespace_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetIo()) {
                     optionals.set(0);
          @@ -49692,8 +53908,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_res
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.io = new TIOError();
          @@ -49703,32 +53921,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, deleteNamespace_resu
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getNamespaceDescriptor_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getNamespaceDescriptor_args");
          +  public static class getNamespaceDescriptor_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getNamespaceDescriptor_args");
           
          -    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
          +    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getNamespaceDescriptor_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getNamespaceDescriptor_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getNamespaceDescriptor_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getNamespaceDescriptor_argsTupleSchemeFactory();
           
               /**
                * name of namespace descriptor
                */
               public @org.apache.thrift.annotation.Nullable java.lang.String name; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * name of namespace descriptor
                  */
          -      NAME((short)1, "name");
          +      NAME((short) 1, "name");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -49741,7 +53973,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // NAME
                       return NAME;
                     default:
          @@ -49750,12 +53982,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -49787,19 +54019,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.NAME,
          +        new org.apache.thrift.meta_data.FieldMetaData("name",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getNamespaceDescriptor_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getNamespaceDescriptor_args.class, metaDataMap);
               }
           
               public getNamespaceDescriptor_args() {
               }
           
          -    public getNamespaceDescriptor_args(
          -      java.lang.String name)
          -    {
          +    public getNamespaceDescriptor_args(java.lang.String name) {
                 this();
                 this.name = name;
               }
          @@ -49833,7 +54068,8 @@ public java.lang.String getName() {
               /**
                * name of namespace descriptor
                */
          -    public getNamespaceDescriptor_args setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
          +    public getNamespaceDescriptor_args
          +        setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
                 this.name = name;
                 return this;
               }
          @@ -49853,15 +54089,16 @@ public void setNameIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case NAME:
          -        if (value == null) {
          -          unsetName();
          -        } else {
          -          setName((java.lang.String)value);
          -        }
          -        break;
          +        case NAME:
          +          if (value == null) {
          +            unsetName();
          +          } else {
          +            setName((java.lang.String) value);
          +          }
          +          break;
           
                 }
               }
          @@ -49869,22 +54106,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case NAME:
          -        return getName();
          +        case NAME:
          +          return getName();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case NAME:
          -        return isSetName();
          +        case NAME:
          +          return isSetName();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -49892,23 +54132,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getNamespaceDescriptor_args)
          -        return this.equals((getNamespaceDescriptor_args)that);
          +        return this.equals((getNamespaceDescriptor_args) that);
                 return false;
               }
           
               public boolean equals(getNamespaceDescriptor_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_name = true && this.isSetName();
                 boolean that_present_name = true && that.isSetName();
                 if (this_present_name || that_present_name) {
          -        if (!(this_present_name && that_present_name))
          -          return false;
          -        if (!this.name.equals(that.name))
          -          return false;
          +        if (!(this_present_name && that_present_name)) return false;
          +        if (!this.name.equals(that.name)) return false;
                 }
           
                 return true;
          @@ -49919,8 +54155,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287);
          -      if (isSetName())
          -        hashCode = hashCode * 8191 + name.hashCode();
          +      if (isSetName()) hashCode = hashCode * 8191 + name.hashCode();
           
                 return hashCode;
               }
          @@ -49951,11 +54186,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -49978,42 +54215,48 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (name == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'name' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
               }
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getNamespaceDescriptor_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getNamespaceDescriptor_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getNamespaceDescriptor_argsStandardScheme getScheme() {
                   return new getNamespaceDescriptor_argsStandardScheme();
                 }
               }
           
          -    private static class getNamespaceDescriptor_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getNamespaceDescriptor_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getNamespaceDescriptor_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -50021,7 +54264,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescrip
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.name = iprot.readString();
                           struct.setNameIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -50032,11 +54275,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescrip
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getNamespaceDescriptor_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -50051,51 +54296,72 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getNamespaceDescri
           
               }
           
          -    private static class getNamespaceDescriptor_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getNamespaceDescriptor_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getNamespaceDescriptor_argsTupleScheme getScheme() {
                   return new getNamespaceDescriptor_argsTupleScheme();
                 }
               }
           
          -    private static class getNamespaceDescriptor_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getNamespaceDescriptor_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getNamespaceDescriptor_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   oprot.writeString(struct.name);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getNamespaceDescriptor_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.name = iprot.readString();
                   struct.setNameIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getNamespaceDescriptor_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getNamespaceDescriptor_result");
          +  public static class getNamespaceDescriptor_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getNamespaceDescriptor_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getNamespaceDescriptor_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getNamespaceDescriptor_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getNamespaceDescriptor_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getNamespaceDescriptor_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TNamespaceDescriptor success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -50108,7 +54374,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -50119,12 +54385,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -50156,22 +54422,27 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TNamespaceDescriptor.class)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getNamespaceDescriptor_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getNamespaceDescriptor_result.class, metaDataMap);
               }
           
               public getNamespaceDescriptor_result() {
               }
           
          -    public getNamespaceDescriptor_result(
          -      TNamespaceDescriptor success,
          -      TIOError io)
          -    {
          +    public getNamespaceDescriptor_result(TNamespaceDescriptor success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -50204,7 +54475,8 @@ public TNamespaceDescriptor getSuccess() {
                 return this.success;
               }
           
          -    public getNamespaceDescriptor_result setSuccess(@org.apache.thrift.annotation.Nullable TNamespaceDescriptor success) {
          +    public getNamespaceDescriptor_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable TNamespaceDescriptor success) {
                 this.success = success;
                 return this;
               }
          @@ -50249,23 +54521,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((TNamespaceDescriptor)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((TNamespaceDescriptor) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -50273,27 +54546,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -50301,32 +54577,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getNamespaceDescriptor_result)
          -        return this.equals((getNamespaceDescriptor_result)that);
          +        return this.equals((getNamespaceDescriptor_result) that);
                 return false;
               }
           
               public boolean equals(getNamespaceDescriptor_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -50337,12 +54607,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -50383,13 +54651,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -50425,35 +54695,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getNamespaceDescriptor_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getNamespaceDescriptor_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getNamespaceDescriptor_resultStandardScheme getScheme() {
                   return new getNamespaceDescriptor_resultStandardScheme();
                 }
               }
           
          -    private static class getNamespaceDescriptor_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getNamespaceDescriptor_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getNamespaceDescriptor_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -50462,7 +54737,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescrip
                           struct.success = new TNamespaceDescriptor();
                           struct.success.read(iprot);
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -50471,7 +54746,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescrip
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -50482,11 +54757,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getNamespaceDescrip
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getNamespaceDescriptor_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -50506,17 +54783,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getNamespaceDescri
           
               }
           
          -    private static class getNamespaceDescriptor_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getNamespaceDescriptor_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getNamespaceDescriptor_resultTupleScheme getScheme() {
                   return new getNamespaceDescriptor_resultTupleScheme();
                 }
               }
           
          -    private static class getNamespaceDescriptor_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getNamespaceDescriptor_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getNamespaceDescriptor_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -50534,8 +54815,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescrip
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescriptor_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          getNamespaceDescriptor_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = new TNamespaceDescriptor();
          @@ -50550,24 +54833,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getNamespaceDescript
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class listNamespaceDescriptors_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaceDescriptors_args");
          -
          +  public static class listNamespaceDescriptors_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("listNamespaceDescriptors_args");
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaceDescriptors_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaceDescriptors_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new listNamespaceDescriptors_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new listNamespaceDescriptors_argsTupleSchemeFactory();
           
          -
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -;
          +      ;
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -50580,19 +54873,19 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     default:
                       return null;
                   }
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -50620,11 +54913,14 @@ public java.lang.String getFieldName() {
                   return _fieldName;
                 }
               }
          +
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaceDescriptors_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(listNamespaceDescriptors_args.class, metaDataMap);
               }
           
               public listNamespaceDescriptors_args() {
          @@ -50644,7 +54940,8 @@ public listNamespaceDescriptors_args deepCopy() {
               public void clear() {
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
                 }
               }
          @@ -50656,7 +54953,10 @@ public java.lang.Object getFieldValue(_Fields field) {
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
          @@ -50670,15 +54970,13 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof listNamespaceDescriptors_args)
          -        return this.equals((listNamespaceDescriptors_args)that);
          +        return this.equals((listNamespaceDescriptors_args) that);
                 return false;
               }
           
               public boolean equals(listNamespaceDescriptors_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 return true;
               }
          @@ -50706,11 +55004,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -50730,35 +55030,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class listNamespaceDescriptors_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class listNamespaceDescriptors_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public listNamespaceDescriptors_argsStandardScheme getScheme() {
                   return new listNamespaceDescriptors_argsStandardScheme();
                 }
               }
           
          -    private static class listNamespaceDescriptors_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class listNamespaceDescriptors_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          listNamespaceDescriptors_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -50769,11 +55074,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescri
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          listNamespaceDescriptors_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -50783,48 +55090,69 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaceDescr
           
               }
           
          -    private static class listNamespaceDescriptors_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class listNamespaceDescriptors_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public listNamespaceDescriptors_argsTupleScheme getScheme() {
                   return new listNamespaceDescriptors_argsTupleScheme();
                 }
               }
           
          -    private static class listNamespaceDescriptors_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class listNamespaceDescriptors_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          listNamespaceDescriptors_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          listNamespaceDescriptors_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class listNamespaceDescriptors_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaceDescriptors_result");
          +  public static class listNamespaceDescriptors_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("listNamespaceDescriptors_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaceDescriptors_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaceDescriptors_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new listNamespaceDescriptors_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new listNamespaceDescriptors_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -50837,7 +55165,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -50848,12 +55176,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -50885,23 +55213,29 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TNamespaceDescriptor.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaceDescriptors_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(listNamespaceDescriptors_result.class, metaDataMap);
               }
           
               public listNamespaceDescriptors_result() {
               }
           
          -    public listNamespaceDescriptors_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public listNamespaceDescriptors_result(java.util.List success,
          +        TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -50912,7 +55246,8 @@ public listNamespaceDescriptors_result(
                */
               public listNamespaceDescriptors_result(listNamespaceDescriptors_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TNamespaceDescriptor other_element : other.success) {
                     __this__success.add(new TNamespaceDescriptor(other_element));
                   }
          @@ -50954,7 +55289,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public listNamespaceDescriptors_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public listNamespaceDescriptors_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -50979,7 +55315,8 @@ public TIOError getIo() {
                 return this.io;
               }
           
          -    public listNamespaceDescriptors_result setIo(@org.apache.thrift.annotation.Nullable TIOError io) {
          +    public listNamespaceDescriptors_result
          +        setIo(@org.apache.thrift.annotation.Nullable TIOError io) {
                 this.io = io;
                 return this;
               }
          @@ -50999,23 +55336,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -51023,27 +55361,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -51051,32 +55392,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof listNamespaceDescriptors_result)
          -        return this.equals((listNamespaceDescriptors_result)that);
          +        return this.equals((listNamespaceDescriptors_result) that);
                 return false;
               }
           
               public boolean equals(listNamespaceDescriptors_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -51087,12 +55422,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -51133,13 +55466,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -51172,35 +55507,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class listNamespaceDescriptors_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class listNamespaceDescriptors_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public listNamespaceDescriptors_resultStandardScheme getScheme() {
                   return new listNamespaceDescriptors_resultStandardScheme();
                 }
               }
           
          -    private static class listNamespaceDescriptors_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class listNamespaceDescriptors_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          listNamespaceDescriptors_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -51209,9 +55549,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescri
                           {
                             org.apache.thrift.protocol.TList _list334 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list334.size);
          -                  @org.apache.thrift.annotation.Nullable TNamespaceDescriptor _elem335;
          -                  for (int _i336 = 0; _i336 < _list334.size; ++_i336)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TNamespaceDescriptor _elem335;
          +                  for (int _i336 = 0; _i336 < _list334.size; ++_i336) {
                               _elem335 = new TNamespaceDescriptor();
                               _elem335.read(iprot);
                               struct.success.add(_elem335);
          @@ -51219,7 +55559,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescri
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -51228,7 +55568,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescri
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -51239,20 +55579,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaceDescri
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          listNamespaceDescriptors_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TNamespaceDescriptor _iter337 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TNamespaceDescriptor _iter337 : struct.success) {
                         _iter337.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -51270,17 +55612,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaceDescr
           
               }
           
          -    private static class listNamespaceDescriptors_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class listNamespaceDescriptors_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public listNamespaceDescriptors_resultTupleScheme getScheme() {
                   return new listNamespaceDescriptors_resultTupleScheme();
                 }
               }
           
          -    private static class listNamespaceDescriptors_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class listNamespaceDescriptors_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          listNamespaceDescriptors_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -51292,8 +55638,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescri
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TNamespaceDescriptor _iter338 : struct.success)
          -            {
          +            for (TNamespaceDescriptor _iter338 : struct.success) {
                         _iter338.write(oprot);
                       }
                     }
          @@ -51304,16 +55649,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescri
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescriptors_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          listNamespaceDescriptors_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list339 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list339 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list339.size);
          -            @org.apache.thrift.annotation.Nullable TNamespaceDescriptor _elem340;
          -            for (int _i341 = 0; _i341 < _list339.size; ++_i341)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TNamespaceDescriptor _elem340;
          +            for (int _i341 = 0; _i341 < _list339.size; ++_i341) {
                         _elem340 = new TNamespaceDescriptor();
                         _elem340.read(iprot);
                         struct.success.add(_elem340);
          @@ -51329,24 +55677,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaceDescrip
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class listNamespaces_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaces_args");
          +  public static class listNamespaces_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("listNamespaces_args");
           
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new listNamespaces_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new listNamespaces_argsTupleSchemeFactory();
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaces_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaces_argsTupleSchemeFactory();
          -
          -
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -;
          +      ;
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -51359,19 +55717,19 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     default:
                       return null;
                   }
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -51399,11 +55757,14 @@ public java.lang.String getFieldName() {
                   return _fieldName;
                 }
               }
          +
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaces_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaces_args.class,
          +        metaDataMap);
               }
           
               public listNamespaces_args() {
          @@ -51423,7 +55784,8 @@ public listNamespaces_args deepCopy() {
               public void clear() {
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
                 }
               }
          @@ -51435,7 +55797,10 @@ public java.lang.Object getFieldValue(_Fields field) {
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
          @@ -51448,16 +55813,13 @@ public boolean isSet(_Fields field) {
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof listNamespaces_args)
          -        return this.equals((listNamespaces_args)that);
          +      if (that instanceof listNamespaces_args) return this.equals((listNamespaces_args) that);
                 return false;
               }
           
               public boolean equals(listNamespaces_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 return true;
               }
          @@ -51485,11 +55847,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -51509,35 +55873,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class listNamespaces_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class listNamespaces_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public listNamespaces_argsStandardScheme getScheme() {
                   return new listNamespaces_argsStandardScheme();
                 }
               }
           
          -    private static class listNamespaces_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class listNamespaces_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -51548,11 +55917,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_args
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -51562,48 +55933,69 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_arg
           
               }
           
          -    private static class listNamespaces_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class listNamespaces_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public listNamespaces_argsTupleScheme getScheme() {
                   return new listNamespaces_argsTupleScheme();
                 }
               }
           
          -    private static class listNamespaces_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class listNamespaces_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaces_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaces_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaces_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaces_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class listNamespaces_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("listNamespaces_result");
          +  public static class listNamespaces_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("listNamespaces_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new listNamespaces_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new listNamespaces_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new listNamespaces_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new listNamespaces_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -51616,7 +56008,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -51627,12 +56019,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -51664,23 +56056,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.STRING))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaces_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(listNamespaces_result.class,
          +        metaDataMap);
               }
           
               public listNamespaces_result() {
               }
           
          -    public listNamespaces_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public listNamespaces_result(java.util.List success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -51691,7 +56088,8 @@ public listNamespaces_result(
                */
               public listNamespaces_result(listNamespaces_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success);
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success);
                   this.success = __this__success;
                 }
                 if (other.isSetIo()) {
          @@ -51730,7 +56128,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public listNamespaces_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public listNamespaces_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -51775,23 +56174,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -51799,60 +56199,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof listNamespaces_result)
          -        return this.equals((listNamespaces_result)that);
          +      if (that instanceof listNamespaces_result) return this.equals((listNamespaces_result) that);
                 return false;
               }
           
               public boolean equals(listNamespaces_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -51863,12 +56259,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -51909,13 +56303,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -51948,35 +56344,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class listNamespaces_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class listNamespaces_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public listNamespaces_resultStandardScheme getScheme() {
                   return new listNamespaces_resultStandardScheme();
                 }
               }
           
          -    private static class listNamespaces_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class listNamespaces_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -51985,16 +56386,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_resu
                           {
                             org.apache.thrift.protocol.TList _list342 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list342.size);
          -                  @org.apache.thrift.annotation.Nullable java.lang.String _elem343;
          -                  for (int _i344 = 0; _i344 < _list342.size; ++_i344)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  java.lang.String _elem343;
          +                  for (int _i344 = 0; _i344 < _list342.size; ++_i344) {
                               _elem343 = iprot.readString();
                               struct.success.add(_elem343);
                             }
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -52003,7 +56404,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_resu
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -52014,20 +56415,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, listNamespaces_resu
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
          -            for (java.lang.String _iter345 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRING, struct.success.size()));
          +            for (java.lang.String _iter345 : struct.success) {
                         oprot.writeString(_iter345);
                       }
                       oprot.writeListEnd();
          @@ -52045,17 +56448,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, listNamespaces_res
           
               }
           
          -    private static class listNamespaces_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class listNamespaces_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public listNamespaces_resultTupleScheme getScheme() {
                   return new listNamespaces_resultTupleScheme();
                 }
               }
           
          -    private static class listNamespaces_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class listNamespaces_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaces_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaces_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -52067,8 +56474,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaces_resu
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (java.lang.String _iter346 : struct.success)
          -            {
          +            for (java.lang.String _iter346 : struct.success) {
                         oprot.writeString(_iter346);
                       }
                     }
          @@ -52079,16 +56485,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, listNamespaces_resu
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaces_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaces_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list347 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
          +            org.apache.thrift.protocol.TList _list347 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRING);
                       struct.success = new java.util.ArrayList(_list347.size);
          -            @org.apache.thrift.annotation.Nullable java.lang.String _elem348;
          -            for (int _i349 = 0; _i349 < _list347.size; ++_i349)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            java.lang.String _elem348;
          +            for (int _i349 = 0; _i349 < _list347.size; ++_i349) {
                         _elem348 = iprot.readString();
                         struct.success.add(_elem348);
                       }
          @@ -52103,24 +56512,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, listNamespaces_resul
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getThriftServerType_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getThriftServerType_args");
          -
          +  public static class getThriftServerType_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getThriftServerType_args");
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getThriftServerType_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getThriftServerType_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getThriftServerType_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getThriftServerType_argsTupleSchemeFactory();
           
          -
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -;
          +      ;
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -52133,19 +56552,19 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     default:
                       return null;
                   }
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -52173,11 +56592,14 @@ public java.lang.String getFieldName() {
                   return _fieldName;
                 }
               }
          +
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftServerType_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftServerType_args.class,
          +        metaDataMap);
               }
           
               public getThriftServerType_args() {
          @@ -52197,7 +56619,8 @@ public getThriftServerType_args deepCopy() {
               public void clear() {
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
                 }
               }
          @@ -52209,7 +56632,10 @@ public java.lang.Object getFieldValue(_Fields field) {
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
          @@ -52223,15 +56649,13 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getThriftServerType_args)
          -        return this.equals((getThriftServerType_args)that);
          +        return this.equals((getThriftServerType_args) that);
                 return false;
               }
           
               public boolean equals(getThriftServerType_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 return true;
               }
          @@ -52259,11 +56683,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -52283,35 +56709,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getThriftServerType_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getThriftServerType_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getThriftServerType_argsStandardScheme getScheme() {
                   return new getThriftServerType_argsStandardScheme();
                 }
               }
           
          -    private static class getThriftServerType_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getThriftServerType_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -52322,11 +56753,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerType_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerType_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -52336,53 +56769,71 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerTyp
           
               }
           
          -    private static class getThriftServerType_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getThriftServerType_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getThriftServerType_argsTupleScheme getScheme() {
                   return new getThriftServerType_argsTupleScheme();
                 }
               }
           
          -    private static class getThriftServerType_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getThriftServerType_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getThriftServerType_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getThriftServerType_result");
          +  public static class getThriftServerType_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getThriftServerType_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32, (short)0);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.I32,
          +            (short) 0);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getThriftServerType_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getThriftServerType_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getThriftServerType_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getThriftServerType_resultTupleSchemeFactory();
           
               /**
          -     * 
                * @see TThriftServerType
                */
               public @org.apache.thrift.annotation.Nullable TThriftServerType success; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
          -       * 
                  * @see TThriftServerType
                  */
          -      SUCCESS((short)0, "success");
          +      SUCCESS((short) 0, "success");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -52395,7 +56846,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     default:
          @@ -52404,12 +56855,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -52441,19 +56892,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TThriftServerType.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +                TThriftServerType.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftServerType_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getThriftServerType_result.class, metaDataMap);
               }
           
               public getThriftServerType_result() {
               }
           
          -    public getThriftServerType_result(
          -      TThriftServerType success)
          -    {
          +    public getThriftServerType_result(TThriftServerType success) {
                 this();
                 this.success = success;
               }
          @@ -52477,7 +56931,6 @@ public void clear() {
               }
           
               /**
          -     * 
                * @see TThriftServerType
                */
               @org.apache.thrift.annotation.Nullable
          @@ -52486,10 +56939,10 @@ public TThriftServerType getSuccess() {
               }
           
               /**
          -     * 
                * @see TThriftServerType
                */
          -    public getThriftServerType_result setSuccess(@org.apache.thrift.annotation.Nullable TThriftServerType success) {
          +    public getThriftServerType_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable TThriftServerType success) {
                 this.success = success;
                 return this;
               }
          @@ -52509,15 +56962,16 @@ public void setSuccessIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((TThriftServerType)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((TThriftServerType) value);
          +          }
          +          break;
           
                 }
               }
          @@ -52525,22 +56979,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          +        case SUCCESS:
          +          return isSetSuccess();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -52548,23 +57005,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getThriftServerType_result)
          -        return this.equals((getThriftServerType_result)that);
          +        return this.equals((getThriftServerType_result) that);
                 return false;
               }
           
               public boolean equals(getThriftServerType_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 return true;
          @@ -52575,8 +57028,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.getValue();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.getValue();
           
                 return hashCode;
               }
          @@ -52607,13 +57059,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -52638,43 +57092,49 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getThriftServerType_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getThriftServerType_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getThriftServerType_resultStandardScheme getScheme() {
                   return new getThriftServerType_resultStandardScheme();
                 }
               }
           
          -    private static class getThriftServerType_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getThriftServerType_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getThriftServerType_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
                       case 0: // SUCCESS
                         if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -                struct.success = org.apache.hadoop.hbase.thrift2.generated.TThriftServerType.findByValue(iprot.readI32());
          +                struct.success = org.apache.hadoop.hbase.thrift2.generated.TThriftServerType
          +                    .findByValue(iprot.readI32());
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -52685,11 +57145,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftServerType
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerType_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getThriftServerType_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -52704,17 +57166,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftServerTyp
           
               }
           
          -    private static class getThriftServerType_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getThriftServerType_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getThriftServerType_resultTupleScheme getScheme() {
                   return new getThriftServerType_resultTupleScheme();
                 }
               }
           
          -    private static class getThriftServerType_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getThriftServerType_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getThriftServerType_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -52726,34 +57192,47 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getThriftServerType
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getThriftServerType_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
          -          struct.success = org.apache.hadoop.hbase.thrift2.generated.TThriftServerType.findByValue(iprot.readI32());
          +          struct.success = org.apache.hadoop.hbase.thrift2.generated.TThriftServerType
          +              .findByValue(iprot.readI32());
                     struct.setSuccessIsSet(true);
                   }
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getClusterId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterId_args");
          -
          +  public static class getClusterId_args
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getClusterId_args");
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getClusterId_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getClusterId_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getClusterId_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getClusterId_argsTupleSchemeFactory();
           
          -
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -;
          +      ;
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -52766,19 +57245,19 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     default:
                       return null;
                   }
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -52806,11 +57285,14 @@ public java.lang.String getFieldName() {
                   return _fieldName;
                 }
               }
          +
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_args.class,
          +        metaDataMap);
               }
           
               public getClusterId_args() {
          @@ -52830,7 +57312,8 @@ public getClusterId_args deepCopy() {
               public void clear() {
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
                 }
               }
          @@ -52842,7 +57325,10 @@ public java.lang.Object getFieldValue(_Fields field) {
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
          @@ -52855,16 +57341,13 @@ public boolean isSet(_Fields field) {
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getClusterId_args)
          -        return this.equals((getClusterId_args)that);
          +      if (that instanceof getClusterId_args) return this.equals((getClusterId_args) that);
                 return false;
               }
           
               public boolean equals(getClusterId_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 return true;
               }
          @@ -52892,11 +57375,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -52916,35 +57401,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getClusterId_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getClusterId_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getClusterId_argsStandardScheme getScheme() {
                   return new getClusterId_argsStandardScheme();
                 }
               }
           
          -    private static class getClusterId_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getClusterId_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -52955,11 +57445,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_args s
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -52969,45 +57461,65 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_args
           
               }
           
          -    private static class getClusterId_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getClusterId_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getClusterId_argsTupleScheme getScheme() {
                   return new getClusterId_argsTupleScheme();
                 }
               }
           
          -    private static class getClusterId_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getClusterId_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getClusterId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterId_result");
          +  public static class getClusterId_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getClusterId_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING,
          +            (short) 0);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getClusterId_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getClusterId_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getClusterId_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getClusterId_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.lang.String success; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success");
          +      SUCCESS((short) 0, "success");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -53020,7 +57532,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     default:
          @@ -53029,12 +57541,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -53066,19 +57578,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterId_result.class,
          +        metaDataMap);
               }
           
               public getClusterId_result() {
               }
           
          -    public getClusterId_result(
          -      java.lang.String success)
          -    {
          +    public getClusterId_result(java.lang.String success) {
                 this();
                 this.success = success;
               }
          @@ -53106,7 +57621,8 @@ public java.lang.String getSuccess() {
                 return this.success;
               }
           
          -    public getClusterId_result setSuccess(@org.apache.thrift.annotation.Nullable java.lang.String success) {
          +    public getClusterId_result
          +        setSuccess(@org.apache.thrift.annotation.Nullable java.lang.String success) {
                 this.success = success;
                 return this;
               }
          @@ -53126,15 +57642,16 @@ public void setSuccessIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.String)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.String) value);
          +          }
          +          break;
           
                 }
               }
          @@ -53142,46 +57659,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          +        case SUCCESS:
          +          return isSetSuccess();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof getClusterId_result)
          -        return this.equals((getClusterId_result)that);
          +      if (that instanceof getClusterId_result) return this.equals((getClusterId_result) that);
                 return false;
               }
           
               public boolean equals(getClusterId_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 return true;
          @@ -53192,8 +57707,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 return hashCode;
               }
          @@ -53224,13 +57738,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -53255,35 +57771,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getClusterId_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getClusterId_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getClusterId_resultStandardScheme getScheme() {
                   return new getClusterId_resultStandardScheme();
                 }
               }
           
          -    private static class getClusterId_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getClusterId_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -53291,7 +57812,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_result
                         if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                           struct.success = iprot.readString();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -53302,11 +57823,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterId_result
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -53321,17 +57844,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterId_resul
           
               }
           
          -    private static class getClusterId_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getClusterId_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getClusterId_resultTupleScheme getScheme() {
                   return new getClusterId_resultTupleScheme();
                 }
               }
           
          -    private static class getClusterId_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getClusterId_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -53343,8 +57870,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getClusterId_result
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     struct.success = iprot.readString();
          @@ -53353,19 +57882,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getClusterId_result
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getSlowLogResponses_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getSlowLogResponses_args");
          +  public static class getSlowLogResponses_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getSlowLogResponses_args");
           
          -    private static final org.apache.thrift.protocol.TField SERVER_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("serverNames", org.apache.thrift.protocol.TType.SET, (short)1);
          -    private static final org.apache.thrift.protocol.TField LOG_QUERY_FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("logQueryFilter", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          +    private static final org.apache.thrift.protocol.TField SERVER_NAMES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("serverNames", org.apache.thrift.protocol.TType.SET,
          +            (short) 1);
          +    private static final org.apache.thrift.protocol.TField LOG_QUERY_FILTER_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("logQueryFilter",
          +            org.apache.thrift.protocol.TType.STRUCT, (short) 2);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getSlowLogResponses_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getSlowLogResponses_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getSlowLogResponses_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getSlowLogResponses_argsTupleSchemeFactory();
           
               /**
                * @param serverNames Server names to get slowlog responses from
          @@ -53376,18 +57917,22 @@ public static class getSlowLogResponses_args implements org.apache.thrift.TBase<
                */
               public @org.apache.thrift.annotation.Nullable TLogQueryFilter logQueryFilter; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * @param serverNames Server names to get slowlog responses from
                  */
          -      SERVER_NAMES((short)1, "serverNames"),
          +      SERVER_NAMES((short) 1, "serverNames"),
                 /**
                  * @param logQueryFilter filter to be used if provided
                  */
          -      LOG_QUERY_FILTER((short)2, "logQueryFilter");
          +      LOG_QUERY_FILTER((short) 2, "logQueryFilter");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -53400,7 +57945,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // SERVER_NAMES
                       return SERVER_NAMES;
                     case 2: // LOG_QUERY_FILTER
          @@ -53411,12 +57956,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -53448,23 +57993,29 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SERVER_NAMES, new org.apache.thrift.meta_data.FieldMetaData("serverNames", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TServerName.class))));
          -      tmpMap.put(_Fields.LOG_QUERY_FILTER, new org.apache.thrift.meta_data.FieldMetaData("logQueryFilter", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TLogQueryFilter.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SERVER_NAMES,
          +        new org.apache.thrift.meta_data.FieldMetaData("serverNames",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TServerName.class))));
          +      tmpMap.put(_Fields.LOG_QUERY_FILTER,
          +        new org.apache.thrift.meta_data.FieldMetaData("logQueryFilter",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TLogQueryFilter.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getSlowLogResponses_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getSlowLogResponses_args.class,
          +        metaDataMap);
               }
           
               public getSlowLogResponses_args() {
               }
           
          -    public getSlowLogResponses_args(
          -      java.util.Set serverNames,
          -      TLogQueryFilter logQueryFilter)
          -    {
          +    public getSlowLogResponses_args(java.util.Set serverNames,
          +        TLogQueryFilter logQueryFilter) {
                 this();
                 this.serverNames = serverNames;
                 this.logQueryFilter = logQueryFilter;
          @@ -53475,7 +58026,8 @@ public getSlowLogResponses_args(
                */
               public getSlowLogResponses_args(getSlowLogResponses_args other) {
                 if (other.isSetServerNames()) {
          -        java.util.Set __this__serverNames = new java.util.HashSet(other.serverNames.size());
          +        java.util.Set __this__serverNames =
          +            new java.util.HashSet(other.serverNames.size());
                   for (TServerName other_element : other.serverNames) {
                     __this__serverNames.add(new TServerName(other_element));
                   }
          @@ -53523,7 +58075,8 @@ public java.util.Set getServerNames() {
               /**
                * @param serverNames Server names to get slowlog responses from
                */
          -    public getSlowLogResponses_args setServerNames(@org.apache.thrift.annotation.Nullable java.util.Set serverNames) {
          +    public getSlowLogResponses_args setServerNames(
          +        @org.apache.thrift.annotation.Nullable java.util.Set serverNames) {
                 this.serverNames = serverNames;
                 return this;
               }
          @@ -53554,7 +58107,8 @@ public TLogQueryFilter getLogQueryFilter() {
               /**
                * @param logQueryFilter filter to be used if provided
                */
          -    public getSlowLogResponses_args setLogQueryFilter(@org.apache.thrift.annotation.Nullable TLogQueryFilter logQueryFilter) {
          +    public getSlowLogResponses_args
          +        setLogQueryFilter(@org.apache.thrift.annotation.Nullable TLogQueryFilter logQueryFilter) {
                 this.logQueryFilter = logQueryFilter;
                 return this;
               }
          @@ -53563,7 +58117,9 @@ public void unsetLogQueryFilter() {
                 this.logQueryFilter = null;
               }
           
          -    /** Returns true if field logQueryFilter is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field logQueryFilter is set (has been assigned a value) and false otherwise
          +     */
               public boolean isSetLogQueryFilter() {
                 return this.logQueryFilter != null;
               }
          @@ -53574,23 +58130,24 @@ public void setLogQueryFilterIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SERVER_NAMES:
          -        if (value == null) {
          -          unsetServerNames();
          -        } else {
          -          setServerNames((java.util.Set)value);
          -        }
          -        break;
          +        case SERVER_NAMES:
          +          if (value == null) {
          +            unsetServerNames();
          +          } else {
          +            setServerNames((java.util.Set) value);
          +          }
          +          break;
           
          -      case LOG_QUERY_FILTER:
          -        if (value == null) {
          -          unsetLogQueryFilter();
          -        } else {
          -          setLogQueryFilter((TLogQueryFilter)value);
          -        }
          -        break;
          +        case LOG_QUERY_FILTER:
          +          if (value == null) {
          +            unsetLogQueryFilter();
          +          } else {
          +            setLogQueryFilter((TLogQueryFilter) value);
          +          }
          +          break;
           
                 }
               }
          @@ -53598,27 +58155,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SERVER_NAMES:
          -        return getServerNames();
          +        case SERVER_NAMES:
          +          return getServerNames();
           
          -      case LOG_QUERY_FILTER:
          -        return getLogQueryFilter();
          +        case LOG_QUERY_FILTER:
          +          return getLogQueryFilter();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SERVER_NAMES:
          -        return isSetServerNames();
          -      case LOG_QUERY_FILTER:
          -        return isSetLogQueryFilter();
          +        case SERVER_NAMES:
          +          return isSetServerNames();
          +        case LOG_QUERY_FILTER:
          +          return isSetLogQueryFilter();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -53626,32 +58186,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getSlowLogResponses_args)
          -        return this.equals((getSlowLogResponses_args)that);
          +        return this.equals((getSlowLogResponses_args) that);
                 return false;
               }
           
               public boolean equals(getSlowLogResponses_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_serverNames = true && this.isSetServerNames();
                 boolean that_present_serverNames = true && that.isSetServerNames();
                 if (this_present_serverNames || that_present_serverNames) {
          -        if (!(this_present_serverNames && that_present_serverNames))
          -          return false;
          -        if (!this.serverNames.equals(that.serverNames))
          -          return false;
          +        if (!(this_present_serverNames && that_present_serverNames)) return false;
          +        if (!this.serverNames.equals(that.serverNames)) return false;
                 }
           
                 boolean this_present_logQueryFilter = true && this.isSetLogQueryFilter();
                 boolean that_present_logQueryFilter = true && that.isSetLogQueryFilter();
                 if (this_present_logQueryFilter || that_present_logQueryFilter) {
          -        if (!(this_present_logQueryFilter && that_present_logQueryFilter))
          -          return false;
          -        if (!this.logQueryFilter.equals(that.logQueryFilter))
          -          return false;
          +        if (!(this_present_logQueryFilter && that_present_logQueryFilter)) return false;
          +        if (!this.logQueryFilter.equals(that.logQueryFilter)) return false;
                 }
           
                 return true;
          @@ -53662,12 +58216,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetServerNames()) ? 131071 : 524287);
          -      if (isSetServerNames())
          -        hashCode = hashCode * 8191 + serverNames.hashCode();
          +      if (isSetServerNames()) hashCode = hashCode * 8191 + serverNames.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetLogQueryFilter()) ? 131071 : 524287);
          -      if (isSetLogQueryFilter())
          -        hashCode = hashCode * 8191 + logQueryFilter.hashCode();
          +      if (isSetLogQueryFilter()) hashCode = hashCode * 8191 + logQueryFilter.hashCode();
           
                 return hashCode;
               }
          @@ -53685,17 +58237,20 @@ public int compareTo(getSlowLogResponses_args other) {
                   return lastComparison;
                 }
                 if (isSetServerNames()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serverNames, other.serverNames);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.serverNames, other.serverNames);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
                 }
          -      lastComparison = java.lang.Boolean.compare(isSetLogQueryFilter(), other.isSetLogQueryFilter());
          +      lastComparison =
          +          java.lang.Boolean.compare(isSetLogQueryFilter(), other.isSetLogQueryFilter());
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
                 if (isSetLogQueryFilter()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.logQueryFilter, other.logQueryFilter);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.logQueryFilter, other.logQueryFilter);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -53708,11 +58263,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -53750,35 +58307,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getSlowLogResponses_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getSlowLogResponses_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getSlowLogResponses_argsStandardScheme getScheme() {
                   return new getSlowLogResponses_argsStandardScheme();
                 }
               }
           
          -    private static class getSlowLogResponses_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getSlowLogResponses_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -53786,10 +58348,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses
                         if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
                           {
                             org.apache.thrift.protocol.TSet _set350 = iprot.readSetBegin();
          -                  struct.serverNames = new java.util.HashSet(2*_set350.size);
          -                  @org.apache.thrift.annotation.Nullable TServerName _elem351;
          -                  for (int _i352 = 0; _i352 < _set350.size; ++_i352)
          -                  {
          +                  struct.serverNames = new java.util.HashSet(2 * _set350.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  TServerName _elem351;
          +                  for (int _i352 = 0; _i352 < _set350.size; ++_i352) {
                               _elem351 = new TServerName();
                               _elem351.read(iprot);
                               struct.serverNames.add(_elem351);
          @@ -53797,7 +58359,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses
                             iprot.readSetEnd();
                           }
                           struct.setServerNamesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -53806,7 +58368,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses
                           struct.logQueryFilter = new TLogQueryFilter();
                           struct.logQueryFilter.read(iprot);
                           struct.setLogQueryFilterIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -53817,20 +58379,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getSlowLogResponses_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, getSlowLogResponses_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.serverNames != null) {
                     oprot.writeFieldBegin(SERVER_NAMES_FIELD_DESC);
                     {
          -            oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRUCT, struct.serverNames.size()));
          -            for (TServerName _iter353 : struct.serverNames)
          -            {
          +            oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.serverNames.size()));
          +            for (TServerName _iter353 : struct.serverNames) {
                         _iter353.write(oprot);
                       }
                       oprot.writeSetEnd();
          @@ -53848,17 +58412,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getSlowLogResponse
           
               }
           
          -    private static class getSlowLogResponses_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getSlowLogResponses_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getSlowLogResponses_argsTupleScheme getScheme() {
                   return new getSlowLogResponses_argsTupleScheme();
                 }
               }
           
          -    private static class getSlowLogResponses_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getSlowLogResponses_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetServerNames()) {
                     optionals.set(0);
          @@ -53870,8 +58438,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses
                   if (struct.isSetServerNames()) {
                     {
                       oprot.writeI32(struct.serverNames.size());
          -            for (TServerName _iter354 : struct.serverNames)
          -            {
          +            for (TServerName _iter354 : struct.serverNames) {
                         _iter354.write(oprot);
                       }
                     }
          @@ -53882,16 +58449,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TSet _set355 = iprot.readSetBegin(org.apache.thrift.protocol.TType.STRUCT);
          -            struct.serverNames = new java.util.HashSet(2*_set355.size);
          -            @org.apache.thrift.annotation.Nullable TServerName _elem356;
          -            for (int _i357 = 0; _i357 < _set355.size; ++_i357)
          -            {
          +            org.apache.thrift.protocol.TSet _set355 =
          +                iprot.readSetBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            struct.serverNames = new java.util.HashSet(2 * _set355.size);
          +            @org.apache.thrift.annotation.Nullable
          +            TServerName _elem356;
          +            for (int _i357 = 0; _i357 < _set355.size; ++_i357) {
                         _elem356 = new TServerName();
                         _elem356.read(iprot);
                         struct.serverNames.add(_elem356);
          @@ -53907,29 +58477,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses_
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class getSlowLogResponses_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getSlowLogResponses_result");
          +  public static class getSlowLogResponses_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("getSlowLogResponses_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getSlowLogResponses_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getSlowLogResponses_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new getSlowLogResponses_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new getSlowLogResponses_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -53942,7 +58527,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -53953,12 +58538,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -53990,23 +58575,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOnlineLogRecord.class))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TOnlineLogRecord.class))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getSlowLogResponses_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(getSlowLogResponses_result.class, metaDataMap);
               }
           
               public getSlowLogResponses_result() {
               }
           
          -    public getSlowLogResponses_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public getSlowLogResponses_result(java.util.List success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -54017,7 +58607,8 @@ public getSlowLogResponses_result(
                */
               public getSlowLogResponses_result(getSlowLogResponses_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success.size());
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success.size());
                   for (TOnlineLogRecord other_element : other.success) {
                     __this__success.add(new TOnlineLogRecord(other_element));
                   }
          @@ -54059,7 +58650,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public getSlowLogResponses_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public getSlowLogResponses_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -54104,23 +58696,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -54128,27 +58721,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -54156,32 +58752,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof getSlowLogResponses_result)
          -        return this.equals((getSlowLogResponses_result)that);
          +        return this.equals((getSlowLogResponses_result) that);
                 return false;
               }
           
               public boolean equals(getSlowLogResponses_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -54192,12 +58782,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -54238,13 +58826,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -54277,35 +58867,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class getSlowLogResponses_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getSlowLogResponses_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getSlowLogResponses_resultStandardScheme getScheme() {
                   return new getSlowLogResponses_resultStandardScheme();
                 }
               }
           
          -    private static class getSlowLogResponses_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class getSlowLogResponses_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          getSlowLogResponses_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -54314,9 +58909,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses
                           {
                             org.apache.thrift.protocol.TList _list358 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list358.size);
          -                  @org.apache.thrift.annotation.Nullable TOnlineLogRecord _elem359;
          -                  for (int _i360 = 0; _i360 < _list358.size; ++_i360)
          -                  {
          +                  @org.apache.thrift.annotation.Nullable
          +                  TOnlineLogRecord _elem359;
          +                  for (int _i360 = 0; _i360 < _list358.size; ++_i360) {
                               _elem359 = new TOnlineLogRecord();
                               _elem359.read(iprot);
                               struct.success.add(_elem359);
          @@ -54324,7 +58919,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -54333,7 +58928,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -54344,20 +58939,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getSlowLogResponses
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, getSlowLogResponses_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          getSlowLogResponses_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          -            for (TOnlineLogRecord _iter361 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
          +            for (TOnlineLogRecord _iter361 : struct.success) {
                         _iter361.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -54375,17 +58972,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getSlowLogResponse
           
               }
           
          -    private static class getSlowLogResponses_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class getSlowLogResponses_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public getSlowLogResponses_resultTupleScheme getScheme() {
                   return new getSlowLogResponses_resultTupleScheme();
                 }
               }
           
          -    private static class getSlowLogResponses_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class getSlowLogResponses_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          getSlowLogResponses_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -54397,8 +58998,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (TOnlineLogRecord _iter362 : struct.success)
          -            {
          +            for (TOnlineLogRecord _iter362 : struct.success) {
                         _iter362.write(oprot);
                       }
                     }
          @@ -54409,16 +59009,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list363 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            org.apache.thrift.protocol.TList _list363 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                       struct.success = new java.util.ArrayList(_list363.size);
          -            @org.apache.thrift.annotation.Nullable TOnlineLogRecord _elem364;
          -            for (int _i365 = 0; _i365 < _list363.size; ++_i365)
          -            {
          +            @org.apache.thrift.annotation.Nullable
          +            TOnlineLogRecord _elem364;
          +            for (int _i365 = 0; _i365 < _list363.size; ++_i365) {
                         _elem364 = new TOnlineLogRecord();
                         _elem364.read(iprot);
                         struct.success.add(_elem364);
          @@ -54434,32 +59037,46 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getSlowLogResponses_
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class clearSlowLogResponses_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clearSlowLogResponses_args");
          +  public static class clearSlowLogResponses_args implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("clearSlowLogResponses_args");
           
          -    private static final org.apache.thrift.protocol.TField SERVER_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("serverNames", org.apache.thrift.protocol.TType.SET, (short)1);
          +    private static final org.apache.thrift.protocol.TField SERVER_NAMES_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("serverNames", org.apache.thrift.protocol.TType.SET,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new clearSlowLogResponses_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new clearSlowLogResponses_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new clearSlowLogResponses_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new clearSlowLogResponses_argsTupleSchemeFactory();
           
               /**
                * @param serverNames Set of Server names to clean slowlog responses from
                */
               public @org.apache.thrift.annotation.Nullable java.util.Set serverNames; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                 /**
                  * @param serverNames Set of Server names to clean slowlog responses from
                  */
          -      SERVER_NAMES((short)1, "serverNames");
          +      SERVER_NAMES((short) 1, "serverNames");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -54472,7 +59089,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // SERVER_NAMES
                       return SERVER_NAMES;
                     default:
          @@ -54481,12 +59098,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -54518,20 +59135,23 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SERVER_NAMES, new org.apache.thrift.meta_data.FieldMetaData("serverNames", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, 
          -              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TServerName.class))));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SERVER_NAMES,
          +        new org.apache.thrift.meta_data.FieldMetaData("serverNames",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET,
          +                new org.apache.thrift.meta_data.StructMetaData(
          +                    org.apache.thrift.protocol.TType.STRUCT, TServerName.class))));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clearSlowLogResponses_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(clearSlowLogResponses_args.class, metaDataMap);
               }
           
               public clearSlowLogResponses_args() {
               }
           
          -    public clearSlowLogResponses_args(
          -      java.util.Set serverNames)
          -    {
          +    public clearSlowLogResponses_args(java.util.Set serverNames) {
                 this();
                 this.serverNames = serverNames;
               }
          @@ -54541,7 +59161,8 @@ public clearSlowLogResponses_args(
                */
               public clearSlowLogResponses_args(clearSlowLogResponses_args other) {
                 if (other.isSetServerNames()) {
          -        java.util.Set __this__serverNames = new java.util.HashSet(other.serverNames.size());
          +        java.util.Set __this__serverNames =
          +            new java.util.HashSet(other.serverNames.size());
                   for (TServerName other_element : other.serverNames) {
                     __this__serverNames.add(new TServerName(other_element));
                   }
          @@ -54585,7 +59206,8 @@ public java.util.Set getServerNames() {
               /**
                * @param serverNames Set of Server names to clean slowlog responses from
                */
          -    public clearSlowLogResponses_args setServerNames(@org.apache.thrift.annotation.Nullable java.util.Set serverNames) {
          +    public clearSlowLogResponses_args setServerNames(
          +        @org.apache.thrift.annotation.Nullable java.util.Set serverNames) {
                 this.serverNames = serverNames;
                 return this;
               }
          @@ -54605,15 +59227,16 @@ public void setServerNamesIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SERVER_NAMES:
          -        if (value == null) {
          -          unsetServerNames();
          -        } else {
          -          setServerNames((java.util.Set)value);
          -        }
          -        break;
          +        case SERVER_NAMES:
          +          if (value == null) {
          +            unsetServerNames();
          +          } else {
          +            setServerNames((java.util.Set) value);
          +          }
          +          break;
           
                 }
               }
          @@ -54621,22 +59244,25 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SERVER_NAMES:
          -        return getServerNames();
          +        case SERVER_NAMES:
          +          return getServerNames();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SERVER_NAMES:
          -        return isSetServerNames();
          +        case SERVER_NAMES:
          +          return isSetServerNames();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -54644,23 +59270,19 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof clearSlowLogResponses_args)
          -        return this.equals((clearSlowLogResponses_args)that);
          +        return this.equals((clearSlowLogResponses_args) that);
                 return false;
               }
           
               public boolean equals(clearSlowLogResponses_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_serverNames = true && this.isSetServerNames();
                 boolean that_present_serverNames = true && that.isSetServerNames();
                 if (this_present_serverNames || that_present_serverNames) {
          -        if (!(this_present_serverNames && that_present_serverNames))
          -          return false;
          -        if (!this.serverNames.equals(that.serverNames))
          -          return false;
          +        if (!(this_present_serverNames && that_present_serverNames)) return false;
          +        if (!this.serverNames.equals(that.serverNames)) return false;
                 }
           
                 return true;
          @@ -54671,8 +59293,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetServerNames()) ? 131071 : 524287);
          -      if (isSetServerNames())
          -        hashCode = hashCode * 8191 + serverNames.hashCode();
          +      if (isSetServerNames()) hashCode = hashCode * 8191 + serverNames.hashCode();
           
                 return hashCode;
               }
          @@ -54690,7 +59311,8 @@ public int compareTo(clearSlowLogResponses_args other) {
                   return lastComparison;
                 }
                 if (isSetServerNames()) {
          -        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serverNames, other.serverNames);
          +        lastComparison =
          +            org.apache.thrift.TBaseHelper.compareTo(this.serverNames, other.serverNames);
                   if (lastComparison != 0) {
                     return lastComparison;
                   }
          @@ -54703,11 +59325,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -54734,35 +59358,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class clearSlowLogResponses_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class clearSlowLogResponses_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public clearSlowLogResponses_argsStandardScheme getScheme() {
                   return new clearSlowLogResponses_argsStandardScheme();
                 }
               }
           
          -    private static class clearSlowLogResponses_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class clearSlowLogResponses_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, clearSlowLogResponses_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          clearSlowLogResponses_args struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -54770,10 +59399,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clearSlowLogRespons
                         if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
                           {
                             org.apache.thrift.protocol.TSet _set366 = iprot.readSetBegin();
          -                  struct.serverNames = new java.util.HashSet(2*_set366.size);
          -                  @org.apache.thrift.annotation.Nullable TServerName _elem367;
          -                  for (int _i368 = 0; _i368 < _set366.size; ++_i368)
          -                  {
          +                  struct.serverNames = new java.util.HashSet(2 * _set366.size);
          +                  @org.apache.thrift.annotation.Nullable
          +                  TServerName _elem367;
          +                  for (int _i368 = 0; _i368 < _set366.size; ++_i368) {
                               _elem367 = new TServerName();
                               _elem367.read(iprot);
                               struct.serverNames.add(_elem367);
          @@ -54781,7 +59410,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clearSlowLogRespons
                             iprot.readSetEnd();
                           }
                           struct.setServerNamesIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -54792,20 +59421,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clearSlowLogRespons
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, clearSlowLogResponses_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          clearSlowLogResponses_args struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.serverNames != null) {
                     oprot.writeFieldBegin(SERVER_NAMES_FIELD_DESC);
                     {
          -            oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRUCT, struct.serverNames.size()));
          -            for (TServerName _iter369 : struct.serverNames)
          -            {
          +            oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.serverNames.size()));
          +            for (TServerName _iter369 : struct.serverNames) {
                         _iter369.write(oprot);
                       }
                       oprot.writeSetEnd();
          @@ -54818,17 +59449,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, clearSlowLogRespon
           
               }
           
          -    private static class clearSlowLogResponses_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class clearSlowLogResponses_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public clearSlowLogResponses_argsTupleScheme getScheme() {
                   return new clearSlowLogResponses_argsTupleScheme();
                 }
               }
           
          -    private static class clearSlowLogResponses_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class clearSlowLogResponses_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, clearSlowLogResponses_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          clearSlowLogResponses_args struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetServerNames()) {
                     optionals.set(0);
          @@ -54837,8 +59472,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clearSlowLogRespons
                   if (struct.isSetServerNames()) {
                     {
                       oprot.writeI32(struct.serverNames.size());
          -            for (TServerName _iter370 : struct.serverNames)
          -            {
          +            for (TServerName _iter370 : struct.serverNames) {
                         _iter370.write(oprot);
                       }
                     }
          @@ -54846,16 +59480,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clearSlowLogRespons
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, clearSlowLogResponses_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, clearSlowLogResponses_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(1);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TSet _set371 = iprot.readSetBegin(org.apache.thrift.protocol.TType.STRUCT);
          -            struct.serverNames = new java.util.HashSet(2*_set371.size);
          -            @org.apache.thrift.annotation.Nullable TServerName _elem372;
          -            for (int _i373 = 0; _i373 < _set371.size; ++_i373)
          -            {
          +            org.apache.thrift.protocol.TSet _set371 =
          +                iprot.readSetBegin(org.apache.thrift.protocol.TType.STRUCT);
          +            struct.serverNames = new java.util.HashSet(2 * _set371.size);
          +            @org.apache.thrift.annotation.Nullable
          +            TServerName _elem372;
          +            for (int _i373 = 0; _i373 < _set371.size; ++_i373) {
                         _elem372 = new TServerName();
                         _elem372.read(iprot);
                         struct.serverNames.add(_elem372);
          @@ -54866,29 +59503,44 @@ public void read(org.apache.thrift.protocol.TProtocol prot, clearSlowLogResponse
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class clearSlowLogResponses_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clearSlowLogResponses_result");
          +  public static class clearSlowLogResponses_result implements
          +      org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("clearSlowLogResponses_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new clearSlowLogResponses_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new clearSlowLogResponses_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new clearSlowLogResponses_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new clearSlowLogResponses_resultTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable java.util.List success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -54901,7 +59553,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -54912,12 +59564,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -54949,23 +59601,28 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +                new org.apache.thrift.meta_data.FieldValueMetaData(
          +                    org.apache.thrift.protocol.TType.BOOL))));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clearSlowLogResponses_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData
          +          .addStructMetaDataMap(clearSlowLogResponses_result.class, metaDataMap);
               }
           
               public clearSlowLogResponses_result() {
               }
           
          -    public clearSlowLogResponses_result(
          -      java.util.List success,
          -      TIOError io)
          -    {
          +    public clearSlowLogResponses_result(java.util.List success, TIOError io) {
                 this();
                 this.success = success;
                 this.io = io;
          @@ -54976,7 +59633,8 @@ public clearSlowLogResponses_result(
                */
               public clearSlowLogResponses_result(clearSlowLogResponses_result other) {
                 if (other.isSetSuccess()) {
          -        java.util.List __this__success = new java.util.ArrayList(other.success);
          +        java.util.List __this__success =
          +            new java.util.ArrayList(other.success);
                   this.success = __this__success;
                 }
                 if (other.isSetIo()) {
          @@ -55015,7 +59673,8 @@ public java.util.List getSuccess() {
                 return this.success;
               }
           
          -    public clearSlowLogResponses_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) {
          +    public clearSlowLogResponses_result setSuccess(
          +        @org.apache.thrift.annotation.Nullable java.util.List success) {
                 this.success = success;
                 return this;
               }
          @@ -55060,23 +59719,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.util.List)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.util.List) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -55084,27 +59744,30 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return getSuccess();
          +        case SUCCESS:
          +          return getSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
          @@ -55112,32 +59775,26 @@ public boolean isSet(_Fields field) {
               @Override
               public boolean equals(java.lang.Object that) {
                 if (that instanceof clearSlowLogResponses_result)
          -        return this.equals((clearSlowLogResponses_result)that);
          +        return this.equals((clearSlowLogResponses_result) that);
                 return false;
               }
           
               public boolean equals(clearSlowLogResponses_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true && this.isSetSuccess();
                 boolean that_present_success = true && that.isSetSuccess();
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (!this.success.equals(that.success))
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (!this.success.equals(that.success)) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -55148,12 +59805,10 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287);
          -      if (isSetSuccess())
          -        hashCode = hashCode * 8191 + success.hashCode();
          +      if (isSetSuccess()) hashCode = hashCode * 8191 + success.hashCode();
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -55194,13 +59849,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -55233,35 +59890,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class clearSlowLogResponses_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class clearSlowLogResponses_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public clearSlowLogResponses_resultStandardScheme getScheme() {
                   return new clearSlowLogResponses_resultStandardScheme();
                 }
               }
           
          -    private static class clearSlowLogResponses_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class clearSlowLogResponses_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, clearSlowLogResponses_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot,
          +          clearSlowLogResponses_result struct) throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -55271,15 +59933,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clearSlowLogRespons
                             org.apache.thrift.protocol.TList _list374 = iprot.readListBegin();
                             struct.success = new java.util.ArrayList(_list374.size);
                             boolean _elem375;
          -                  for (int _i376 = 0; _i376 < _list374.size; ++_i376)
          -                  {
          +                  for (int _i376 = 0; _i376 < _list374.size; ++_i376) {
                               _elem375 = iprot.readBool();
                               struct.success.add(_elem375);
                             }
                             iprot.readListEnd();
                           }
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -55288,7 +59949,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clearSlowLogRespons
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -55299,20 +59960,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clearSlowLogRespons
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, clearSlowLogResponses_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot,
          +          clearSlowLogResponses_result struct) throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
                   if (struct.success != null) {
                     oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.BOOL, struct.success.size()));
          -            for (boolean _iter377 : struct.success)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.BOOL, struct.success.size()));
          +            for (boolean _iter377 : struct.success) {
                         oprot.writeBool(_iter377);
                       }
                       oprot.writeListEnd();
          @@ -55330,17 +59993,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, clearSlowLogRespon
           
               }
           
          -    private static class clearSlowLogResponses_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class clearSlowLogResponses_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public clearSlowLogResponses_resultTupleScheme getScheme() {
                   return new clearSlowLogResponses_resultTupleScheme();
                 }
               }
           
          -    private static class clearSlowLogResponses_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class clearSlowLogResponses_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, clearSlowLogResponses_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot,
          +          clearSlowLogResponses_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -55352,8 +60019,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clearSlowLogRespons
                   if (struct.isSetSuccess()) {
                     {
                       oprot.writeI32(struct.success.size());
          -            for (boolean _iter378 : struct.success)
          -            {
          +            for (boolean _iter378 : struct.success) {
                         oprot.writeBool(_iter378);
                       }
                     }
          @@ -55364,16 +60030,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clearSlowLogRespons
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, clearSlowLogResponses_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot,
          +          clearSlowLogResponses_result struct) throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     {
          -            org.apache.thrift.protocol.TList _list379 = iprot.readListBegin(org.apache.thrift.protocol.TType.BOOL);
          +            org.apache.thrift.protocol.TList _list379 =
          +                iprot.readListBegin(org.apache.thrift.protocol.TType.BOOL);
                       struct.success = new java.util.ArrayList(_list379.size);
                       boolean _elem380;
          -            for (int _i381 = 0; _i381 < _list379.size; ++_i381)
          -            {
          +            for (int _i381 = 0; _i381 < _list379.size; ++_i381) {
                         _elem380 = iprot.readBool();
                         struct.success.add(_elem380);
                       }
          @@ -55388,26 +60056,39 @@ public void read(org.apache.thrift.protocol.TProtocol prot, clearSlowLogResponse
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class grant_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("grant_args");
          +  public static class grant_args implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("grant_args");
           
          -    private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new grant_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new grant_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new grant_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new grant_argsTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TAccessControlEntity info; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      INFO((short)1, "info");
          +      INFO((short) 1, "info");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -55420,7 +60101,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // INFO
                       return INFO;
                     default:
          @@ -55429,12 +60110,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -55466,9 +60147,13 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.INFO, new org.apache.thrift.meta_data.FieldMetaData("info", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TAccessControlEntity.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.INFO,
          +        new org.apache.thrift.meta_data.FieldMetaData("info",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TAccessControlEntity.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
                 org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(grant_args.class, metaDataMap);
               }
          @@ -55476,9 +60161,7 @@ public java.lang.String getFieldName() {
               public grant_args() {
               }
           
          -    public grant_args(
          -      TAccessControlEntity info)
          -    {
          +    public grant_args(TAccessControlEntity info) {
                 this();
                 this.info = info;
               }
          @@ -55526,15 +60209,16 @@ public void setInfoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case INFO:
          -        if (value == null) {
          -          unsetInfo();
          -        } else {
          -          setInfo((TAccessControlEntity)value);
          -        }
          -        break;
          +        case INFO:
          +          if (value == null) {
          +            unsetInfo();
          +          } else {
          +            setInfo((TAccessControlEntity) value);
          +          }
          +          break;
           
                 }
               }
          @@ -55542,46 +60226,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case INFO:
          -        return getInfo();
          +        case INFO:
          +          return getInfo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case INFO:
          -        return isSetInfo();
          +        case INFO:
          +          return isSetInfo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof grant_args)
          -        return this.equals((grant_args)that);
          +      if (that instanceof grant_args) return this.equals((grant_args) that);
                 return false;
               }
           
               public boolean equals(grant_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_info = true && this.isSetInfo();
                 boolean that_present_info = true && that.isSetInfo();
                 if (this_present_info || that_present_info) {
          -        if (!(this_present_info && that_present_info))
          -          return false;
          -        if (!this.info.equals(that.info))
          -          return false;
          +        if (!(this_present_info && that_present_info)) return false;
          +        if (!this.info.equals(that.info)) return false;
                 }
           
                 return true;
          @@ -55592,8 +60274,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetInfo()) ? 131071 : 524287);
          -      if (isSetInfo())
          -        hashCode = hashCode * 8191 + info.hashCode();
          +      if (isSetInfo()) hashCode = hashCode * 8191 + info.hashCode();
           
                 return hashCode;
               }
          @@ -55624,11 +60305,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -55651,7 +60334,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (info == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'info' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'info' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (info != null) {
          @@ -55661,35 +60345,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class grant_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class grant_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public grant_argsStandardScheme getScheme() {
                   return new grant_argsStandardScheme();
                 }
               }
           
          -    private static class grant_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class grant_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, grant_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, grant_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -55698,7 +60387,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, grant_args struct)
                           struct.info = new TAccessControlEntity();
                           struct.info.read(iprot);
                           struct.setInfoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -55709,11 +60398,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, grant_args struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, grant_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, grant_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -55728,52 +60419,73 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, grant_args struct)
           
               }
           
          -    private static class grant_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class grant_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public grant_argsTupleScheme getScheme() {
                   return new grant_argsTupleScheme();
                 }
               }
           
          -    private static class grant_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class grant_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, grant_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, grant_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.info.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, grant_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, grant_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.info = new TAccessControlEntity();
                   struct.info.read(iprot);
                   struct.setInfoIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class grant_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("grant_result");
          +  public static class grant_result
          +      implements org.apache.thrift.TBase, java.io.Serializable,
          +      Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("grant_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new grant_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new grant_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new grant_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new grant_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -55786,7 +60498,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -55797,12 +60509,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -55836,22 +60548,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(grant_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(grant_result.class,
          +        metaDataMap);
               }
           
               public grant_result() {
               }
           
          -    public grant_result(
          -      boolean success,
          -      TIOError io)
          -    {
          +    public grant_result(boolean success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -55891,7 +60608,8 @@ public grant_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -55900,7 +60618,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -55928,23 +60647,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -55952,60 +60672,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof grant_result)
          -        return this.equals((grant_result)that);
          +      if (that instanceof grant_result) return this.equals((grant_result) that);
                 return false;
               }
           
               public boolean equals(grant_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -56018,8 +60734,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -56060,13 +60775,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -56095,37 +60812,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class grant_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class grant_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public grant_resultStandardScheme getScheme() {
                   return new grant_resultStandardScheme();
                 }
               }
           
          -    private static class grant_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class grant_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, grant_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, grant_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -56133,7 +60856,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, grant_result struct
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -56142,7 +60865,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, grant_result struct
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -56153,11 +60876,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, grant_result struct
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, grant_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, grant_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -56177,17 +60902,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, grant_result struc
           
               }
           
          -    private static class grant_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class grant_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public grant_resultTupleScheme getScheme() {
                   return new grant_resultTupleScheme();
                 }
               }
           
          -    private static class grant_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class grant_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, grant_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, grant_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -56205,8 +60934,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, grant_result struct
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, grant_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, grant_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -56220,26 +60951,40 @@ public void read(org.apache.thrift.protocol.TProtocol prot, grant_result struct)
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class revoke_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("revoke_args");
          +  public static class revoke_args
          +      implements org.apache.thrift.TBase, java.io.Serializable,
          +      Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("revoke_args");
           
          -    private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new revoke_argsStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new revoke_argsTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new revoke_argsStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new revoke_argsTupleSchemeFactory();
           
               public @org.apache.thrift.annotation.Nullable TAccessControlEntity info; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      INFO((short)1, "info");
          +      INFO((short) 1, "info");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -56252,7 +60997,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 1: // INFO
                       return INFO;
                     default:
          @@ -56261,12 +61006,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -56298,19 +61043,22 @@ public java.lang.String getFieldName() {
               // isset id assignments
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.INFO, new org.apache.thrift.meta_data.FieldMetaData("info", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TAccessControlEntity.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.INFO,
          +        new org.apache.thrift.meta_data.FieldMetaData("info",
          +            org.apache.thrift.TFieldRequirementType.REQUIRED,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TAccessControlEntity.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(revoke_args.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(revoke_args.class,
          +        metaDataMap);
               }
           
               public revoke_args() {
               }
           
          -    public revoke_args(
          -      TAccessControlEntity info)
          -    {
          +    public revoke_args(TAccessControlEntity info) {
                 this();
                 this.info = info;
               }
          @@ -56358,15 +61106,16 @@ public void setInfoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case INFO:
          -        if (value == null) {
          -          unsetInfo();
          -        } else {
          -          setInfo((TAccessControlEntity)value);
          -        }
          -        break;
          +        case INFO:
          +          if (value == null) {
          +            unsetInfo();
          +          } else {
          +            setInfo((TAccessControlEntity) value);
          +          }
          +          break;
           
                 }
               }
          @@ -56374,46 +61123,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case INFO:
          -        return getInfo();
          +        case INFO:
          +          return getInfo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case INFO:
          -        return isSetInfo();
          +        case INFO:
          +          return isSetInfo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof revoke_args)
          -        return this.equals((revoke_args)that);
          +      if (that instanceof revoke_args) return this.equals((revoke_args) that);
                 return false;
               }
           
               public boolean equals(revoke_args that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_info = true && this.isSetInfo();
                 boolean that_present_info = true && that.isSetInfo();
                 if (this_present_info || that_present_info) {
          -        if (!(this_present_info && that_present_info))
          -          return false;
          -        if (!this.info.equals(that.info))
          -          return false;
          +        if (!(this_present_info && that_present_info)) return false;
          +        if (!this.info.equals(that.info)) return false;
                 }
           
                 return true;
          @@ -56424,8 +61171,7 @@ public int hashCode() {
                 int hashCode = 1;
           
                 hashCode = hashCode * 8191 + ((isSetInfo()) ? 131071 : 524287);
          -      if (isSetInfo())
          -        hashCode = hashCode * 8191 + info.hashCode();
          +      if (isSetInfo()) hashCode = hashCode * 8191 + info.hashCode();
           
                 return hashCode;
               }
          @@ -56456,11 +61202,13 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
               }
           
          @@ -56483,7 +61231,8 @@ public java.lang.String toString() {
               public void validate() throws org.apache.thrift.TException {
                 // check for required fields
                 if (info == null) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'info' was not present! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'info' was not present! Struct: " + toString());
                 }
                 // check for sub-struct validity
                 if (info != null) {
          @@ -56493,35 +61242,40 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class revoke_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class revoke_argsStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public revoke_argsStandardScheme getScheme() {
                   return new revoke_argsStandardScheme();
                 }
               }
           
          -    private static class revoke_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class revoke_argsStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_args struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_args struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -56530,7 +61284,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_args struct)
                           struct.info = new TAccessControlEntity();
                           struct.info.read(iprot);
                           struct.setInfoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -56541,11 +61295,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_args struct)
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_args struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_args struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -56560,52 +61316,73 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_args struct
           
               }
           
          -    private static class revoke_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class revoke_argsTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public revoke_argsTupleScheme getScheme() {
                   return new revoke_argsTupleScheme();
                 }
               }
           
          -    private static class revoke_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class revoke_argsTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, revoke_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, revoke_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.info.write(oprot);
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, revoke_args struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, revoke_args struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   struct.info = new TAccessControlEntity();
                   struct.info.read(iprot);
                   struct.setInfoIsSet(true);
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          -  public static class revoke_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable   {
          -    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("revoke_result");
          +  public static class revoke_result
          +      implements org.apache.thrift.TBase,
          +      java.io.Serializable, Cloneable, Comparable {
          +    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +        new org.apache.thrift.protocol.TStruct("revoke_result");
           
          -    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0);
          -    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          +    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL,
          +            (short) 0);
          +    private static final org.apache.thrift.protocol.TField IO_FIELD_DESC =
          +        new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT,
          +            (short) 1);
           
          -    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new revoke_resultStandardSchemeFactory();
          -    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new revoke_resultTupleSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +        new revoke_resultStandardSchemeFactory();
          +    private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +        new revoke_resultTupleSchemeFactory();
           
               public boolean success; // required
               public @org.apache.thrift.annotation.Nullable TIOError io; // required
           
          -    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +    /**
          +     * The set of fields this struct contains, along with convenience methods for finding and
          +     * manipulating them.
          +     */
               public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -      SUCCESS((short)0, "success"),
          -      IO((short)1, "io");
          +      SUCCESS((short) 0, "success"), IO((short) 1, "io");
           
          -      private static final java.util.Map byName = new java.util.HashMap();
          +      private static final java.util.Map byName =
          +          new java.util.HashMap();
           
                 static {
                   for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -56618,7 +61395,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                  */
                 @org.apache.thrift.annotation.Nullable
                 public static _Fields findByThriftId(int fieldId) {
          -        switch(fieldId) {
          +        switch (fieldId) {
                     case 0: // SUCCESS
                       return SUCCESS;
                     case 1: // IO
          @@ -56629,12 +61406,12 @@ public static _Fields findByThriftId(int fieldId) {
                 }
           
                 /**
          -       * Find the _Fields constant that matches fieldId, throwing an exception
          -       * if it is not found.
          +       * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                  */
                 public static _Fields findByThriftIdOrThrow(int fieldId) {
                   _Fields fields = findByThriftId(fieldId);
          -        if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +        if (fields == null)
          +          throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                   return fields;
                 }
           
          @@ -56668,22 +61445,27 @@ public java.lang.String getFieldName() {
               private byte __isset_bitfield = 0;
               public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
               static {
          -      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -      tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, 
          -          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TIOError.class)));
          +      java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +          new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +      tmpMap.put(_Fields.SUCCESS,
          +        new org.apache.thrift.meta_data.FieldMetaData("success",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.BOOL)));
          +      tmpMap.put(_Fields.IO,
          +        new org.apache.thrift.meta_data.FieldMetaData("io",
          +            org.apache.thrift.TFieldRequirementType.DEFAULT,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TIOError.class)));
                 metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(revoke_result.class, metaDataMap);
          +      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(revoke_result.class,
          +        metaDataMap);
               }
           
               public revoke_result() {
               }
           
          -    public revoke_result(
          -      boolean success,
          -      TIOError io)
          -    {
          +    public revoke_result(boolean success, TIOError io) {
                 this();
                 this.success = success;
                 setSuccessIsSet(true);
          @@ -56723,7 +61505,8 @@ public revoke_result setSuccess(boolean success) {
               }
           
               public void unsetSuccess() {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
               }
           
               /** Returns true if field success is set (has been assigned a value) and false otherwise */
          @@ -56732,7 +61515,8 @@ public boolean isSetSuccess() {
               }
           
               public void setSuccessIsSet(boolean value) {
          -      __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
          +      __isset_bitfield =
          +          org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
               }
           
               @org.apache.thrift.annotation.Nullable
          @@ -56760,23 +61544,24 @@ public void setIoIsSet(boolean value) {
                 }
               }
           
          -    public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +    public void setFieldValue(_Fields field,
          +        @org.apache.thrift.annotation.Nullable java.lang.Object value) {
                 switch (field) {
          -      case SUCCESS:
          -        if (value == null) {
          -          unsetSuccess();
          -        } else {
          -          setSuccess((java.lang.Boolean)value);
          -        }
          -        break;
          +        case SUCCESS:
          +          if (value == null) {
          +            unsetSuccess();
          +          } else {
          +            setSuccess((java.lang.Boolean) value);
          +          }
          +          break;
           
          -      case IO:
          -        if (value == null) {
          -          unsetIo();
          -        } else {
          -          setIo((TIOError)value);
          -        }
          -        break;
          +        case IO:
          +          if (value == null) {
          +            unsetIo();
          +          } else {
          +            setIo((TIOError) value);
          +          }
          +          break;
           
                 }
               }
          @@ -56784,60 +61569,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
               @org.apache.thrift.annotation.Nullable
               public java.lang.Object getFieldValue(_Fields field) {
                 switch (field) {
          -      case SUCCESS:
          -        return isSuccess();
          +        case SUCCESS:
          +          return isSuccess();
           
          -      case IO:
          -        return getIo();
          +        case IO:
          +          return getIo();
           
                 }
                 throw new java.lang.IllegalStateException();
               }
           
          -    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +    /**
          +     * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +     * otherwise
          +     */
               public boolean isSet(_Fields field) {
                 if (field == null) {
                   throw new java.lang.IllegalArgumentException();
                 }
           
                 switch (field) {
          -      case SUCCESS:
          -        return isSetSuccess();
          -      case IO:
          -        return isSetIo();
          +        case SUCCESS:
          +          return isSetSuccess();
          +        case IO:
          +          return isSetIo();
                 }
                 throw new java.lang.IllegalStateException();
               }
           
               @Override
               public boolean equals(java.lang.Object that) {
          -      if (that instanceof revoke_result)
          -        return this.equals((revoke_result)that);
          +      if (that instanceof revoke_result) return this.equals((revoke_result) that);
                 return false;
               }
           
               public boolean equals(revoke_result that) {
          -      if (that == null)
          -        return false;
          -      if (this == that)
          -        return true;
          +      if (that == null) return false;
          +      if (this == that) return true;
           
                 boolean this_present_success = true;
                 boolean that_present_success = true;
                 if (this_present_success || that_present_success) {
          -        if (!(this_present_success && that_present_success))
          -          return false;
          -        if (this.success != that.success)
          -          return false;
          +        if (!(this_present_success && that_present_success)) return false;
          +        if (this.success != that.success) return false;
                 }
           
                 boolean this_present_io = true && this.isSetIo();
                 boolean that_present_io = true && that.isSetIo();
                 if (this_present_io || that_present_io) {
          -        if (!(this_present_io && that_present_io))
          -          return false;
          -        if (!this.io.equals(that.io))
          -          return false;
          +        if (!(this_present_io && that_present_io)) return false;
          +        if (!this.io.equals(that.io)) return false;
                 }
           
                 return true;
          @@ -56850,8 +61631,7 @@ public int hashCode() {
                 hashCode = hashCode * 8191 + ((success) ? 131071 : 524287);
           
                 hashCode = hashCode * 8191 + ((isSetIo()) ? 131071 : 524287);
          -      if (isSetIo())
          -        hashCode = hashCode * 8191 + io.hashCode();
          +      if (isSetIo()) hashCode = hashCode * 8191 + io.hashCode();
           
                 return hashCode;
               }
          @@ -56892,13 +61672,15 @@ public _Fields fieldForId(int fieldId) {
                 return _Fields.findByThriftId(fieldId);
               }
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot)
          +        throws org.apache.thrift.TException {
                 scheme(iprot).read(iprot, this);
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot)
          +        throws org.apache.thrift.TException {
                 scheme(oprot).write(oprot, this);
          -      }
          +    }
           
               @Override
               public java.lang.String toString() {
          @@ -56927,37 +61709,43 @@ public void validate() throws org.apache.thrift.TException {
           
               private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
                 try {
          -        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +        write(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(out)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +    private void readObject(java.io.ObjectInputStream in)
          +        throws java.io.IOException, java.lang.ClassNotFoundException {
                 try {
          -        // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +        // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +        // doesn't call the default constructor.
                   __isset_bitfield = 0;
          -        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +        read(new org.apache.thrift.protocol.TCompactProtocol(
          +            new org.apache.thrift.transport.TIOStreamTransport(in)));
                 } catch (org.apache.thrift.TException te) {
                   throw new java.io.IOException(te);
                 }
               }
           
          -    private static class revoke_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class revoke_resultStandardSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public revoke_resultStandardScheme getScheme() {
                   return new revoke_resultStandardScheme();
                 }
               }
           
          -    private static class revoke_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +    private static class revoke_resultStandardScheme
          +        extends org.apache.thrift.scheme.StandardScheme {
           
          -      public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_result struct) throws org.apache.thrift.TException {
          +      public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_result struct)
          +          throws org.apache.thrift.TException {
                   org.apache.thrift.protocol.TField schemeField;
                   iprot.readStructBegin();
          -        while (true)
          -        {
          +        while (true) {
                     schemeField = iprot.readFieldBegin();
          -          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                       break;
                     }
                     switch (schemeField.id) {
          @@ -56965,7 +61753,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_result struc
                         if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                           struct.success = iprot.readBool();
                           struct.setSuccessIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -56974,7 +61762,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_result struc
                           struct.io = new TIOError();
                           struct.io.read(iprot);
                           struct.setIoIsSet(true);
          -              } else { 
          +              } else {
                           org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                         }
                         break;
          @@ -56985,11 +61773,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, revoke_result struc
                   }
                   iprot.readStructEnd();
           
          -        // check for required fields of primitive type, which can't be checked in the validate method
          +        // check for required fields of primitive type, which can't be checked in the validate
          +        // method
                   struct.validate();
                 }
           
          -      public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_result struct) throws org.apache.thrift.TException {
          +      public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_result struct)
          +          throws org.apache.thrift.TException {
                   struct.validate();
           
                   oprot.writeStructBegin(STRUCT_DESC);
          @@ -57009,17 +61799,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, revoke_result stru
           
               }
           
          -    private static class revoke_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +    private static class revoke_resultTupleSchemeFactory
          +        implements org.apache.thrift.scheme.SchemeFactory {
                 public revoke_resultTupleScheme getScheme() {
                   return new revoke_resultTupleScheme();
                 }
               }
           
          -    private static class revoke_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +    private static class revoke_resultTupleScheme
          +        extends org.apache.thrift.scheme.TupleScheme {
           
                 @Override
          -      public void write(org.apache.thrift.protocol.TProtocol prot, revoke_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void write(org.apache.thrift.protocol.TProtocol prot, revoke_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol oprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet optionals = new java.util.BitSet();
                   if (struct.isSetSuccess()) {
                     optionals.set(0);
          @@ -57037,8 +61831,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, revoke_result struc
                 }
           
                 @Override
          -      public void read(org.apache.thrift.protocol.TProtocol prot, revoke_result struct) throws org.apache.thrift.TException {
          -        org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +      public void read(org.apache.thrift.protocol.TProtocol prot, revoke_result struct)
          +          throws org.apache.thrift.TException {
          +        org.apache.thrift.protocol.TTupleProtocol iprot =
          +            (org.apache.thrift.protocol.TTupleProtocol) prot;
                   java.util.BitSet incoming = iprot.readBitSet(2);
                   if (incoming.get(0)) {
                     struct.success = iprot.readBool();
          @@ -57052,8 +61848,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, revoke_result struct
                 }
               }
           
          -    private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +    private static  S
          +        scheme(org.apache.thrift.protocol.TProtocol proto) {
          +      return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +          ? STANDARD_SCHEME_FACTORY
          +          : TUPLE_SCHEME_FACTORY).getScheme();
               }
             }
           
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
          index b4ad2b9612ce..f85ae916fcaf 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
          @@ -1,26 +1,56 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class THRegionInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THRegionInfo");
          -
          -  private static final org.apache.thrift.protocol.TField REGION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("regionId", org.apache.thrift.protocol.TType.I64, (short)1);
          -  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2);
          -  private static final org.apache.thrift.protocol.TField START_KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("startKey", org.apache.thrift.protocol.TType.STRING, (short)3);
          -  private static final org.apache.thrift.protocol.TField END_KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("endKey", org.apache.thrift.protocol.TType.STRING, (short)4);
          -  private static final org.apache.thrift.protocol.TField OFFLINE_FIELD_DESC = new org.apache.thrift.protocol.TField("offline", org.apache.thrift.protocol.TType.BOOL, (short)5);
          -  private static final org.apache.thrift.protocol.TField SPLIT_FIELD_DESC = new org.apache.thrift.protocol.TField("split", org.apache.thrift.protocol.TType.BOOL, (short)6);
          -  private static final org.apache.thrift.protocol.TField REPLICA_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("replicaId", org.apache.thrift.protocol.TType.I32, (short)7);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new THRegionInfoStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new THRegionInfoTupleSchemeFactory();
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class THRegionInfo implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("THRegionInfo");
          +
          +  private static final org.apache.thrift.protocol.TField REGION_ID_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("regionId", org.apache.thrift.protocol.TType.I64,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField START_KEY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("startKey", org.apache.thrift.protocol.TType.STRING,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField END_KEY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("endKey", org.apache.thrift.protocol.TType.STRING,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField OFFLINE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("offline", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 5);
          +  private static final org.apache.thrift.protocol.TField SPLIT_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("split", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 6);
          +  private static final org.apache.thrift.protocol.TField REPLICA_ID_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("replicaId", org.apache.thrift.protocol.TType.I32,
          +          (short) 7);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new THRegionInfoStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new THRegionInfoTupleSchemeFactory();
           
             public long regionId; // required
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName; // required
          @@ -30,17 +60,17 @@ public class THRegionInfo implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -53,7 +83,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // REGION_ID
                     return REGION_ID;
                   case 2: // TABLE_NAME
          @@ -74,12 +104,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -114,23 +144,38 @@ public java.lang.String getFieldName() {
             private static final int __SPLIT_ISSET_ID = 2;
             private static final int __REPLICAID_ISSET_ID = 3;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.START_KEY,_Fields.END_KEY,_Fields.OFFLINE,_Fields.SPLIT,_Fields.REPLICA_ID};
          +  private static final _Fields optionals[] =
          +      { _Fields.START_KEY, _Fields.END_KEY, _Fields.OFFLINE, _Fields.SPLIT, _Fields.REPLICA_ID };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.REGION_ID, new org.apache.thrift.meta_data.FieldMetaData("regionId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.REGION_ID, new org.apache.thrift.meta_data.FieldMetaData("regionId",
          +        org.apache.thrift.TFieldRequirementType.REQUIRED,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.START_KEY, new org.apache.thrift.meta_data.FieldMetaData("startKey", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.END_KEY, new org.apache.thrift.meta_data.FieldMetaData("endKey", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.OFFLINE, new org.apache.thrift.meta_data.FieldMetaData("offline", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.TABLE_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.START_KEY,
          +      new org.apache.thrift.meta_data.FieldMetaData("startKey",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.END_KEY,
          +      new org.apache.thrift.meta_data.FieldMetaData("endKey",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.OFFLINE, new org.apache.thrift.meta_data.FieldMetaData("offline",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.SPLIT, new org.apache.thrift.meta_data.FieldMetaData("split", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.SPLIT, new org.apache.thrift.meta_data.FieldMetaData("split",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.REPLICA_ID, new org.apache.thrift.meta_data.FieldMetaData("replicaId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.REPLICA_ID, new org.apache.thrift.meta_data.FieldMetaData("replicaId",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(THRegionInfo.class, metaDataMap);
          @@ -139,10 +184,7 @@ public java.lang.String getFieldName() {
             public THRegionInfo() {
             }
           
          -  public THRegionInfo(
          -    long regionId,
          -    java.nio.ByteBuffer tableName)
          -  {
          +  public THRegionInfo(long regionId, java.nio.ByteBuffer tableName) {
               this();
               this.regionId = regionId;
               setRegionIdIsSet(true);
          @@ -199,7 +241,8 @@ public THRegionInfo setRegionId(long regionId) {
             }
           
             public void unsetRegionId() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __REGIONID_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __REGIONID_ISSET_ID);
             }
           
             /** Returns true if field regionId is set (has been assigned a value) and false otherwise */
          @@ -208,7 +251,8 @@ public boolean isSetRegionId() {
             }
           
             public void setRegionIdIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __REGIONID_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __REGIONID_ISSET_ID, value);
             }
           
             public byte[] getTableName() {
          @@ -221,11 +265,13 @@ public java.nio.ByteBuffer bufferForTableName() {
             }
           
             public THRegionInfo setTableName(byte[] tableName) {
          -    this.tableName = tableName == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(tableName.clone());
          +    this.tableName = tableName == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(tableName.clone());
               return this;
             }
           
          -  public THRegionInfo setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
          +  public THRegionInfo
          +      setTableName(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer tableName) {
               this.tableName = org.apache.thrift.TBaseHelper.copyBinary(tableName);
               return this;
             }
          @@ -255,11 +301,13 @@ public java.nio.ByteBuffer bufferForStartKey() {
             }
           
             public THRegionInfo setStartKey(byte[] startKey) {
          -    this.startKey = startKey == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(startKey.clone());
          +    this.startKey =
          +        startKey == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(startKey.clone());
               return this;
             }
           
          -  public THRegionInfo setStartKey(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startKey) {
          +  public THRegionInfo
          +      setStartKey(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startKey) {
               this.startKey = org.apache.thrift.TBaseHelper.copyBinary(startKey);
               return this;
             }
          @@ -289,7 +337,8 @@ public java.nio.ByteBuffer bufferForEndKey() {
             }
           
             public THRegionInfo setEndKey(byte[] endKey) {
          -    this.endKey = endKey == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(endKey.clone());
          +    this.endKey =
          +        endKey == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(endKey.clone());
               return this;
             }
           
          @@ -324,7 +373,8 @@ public THRegionInfo setOffline(boolean offline) {
             }
           
             public void unsetOffline() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __OFFLINE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __OFFLINE_ISSET_ID);
             }
           
             /** Returns true if field offline is set (has been assigned a value) and false otherwise */
          @@ -333,7 +383,8 @@ public boolean isSetOffline() {
             }
           
             public void setOfflineIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __OFFLINE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __OFFLINE_ISSET_ID, value);
             }
           
             public boolean isSplit() {
          @@ -356,7 +407,8 @@ public boolean isSetSplit() {
             }
           
             public void setSplitIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SPLIT_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SPLIT_ISSET_ID, value);
             }
           
             public int getReplicaId() {
          @@ -370,7 +422,8 @@ public THRegionInfo setReplicaId(int replicaId) {
             }
           
             public void unsetReplicaId() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __REPLICAID_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __REPLICAID_ISSET_ID);
             }
           
             /** Returns true if field replicaId is set (has been assigned a value) and false otherwise */
          @@ -379,78 +432,80 @@ public boolean isSetReplicaId() {
             }
           
             public void setReplicaIdIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __REPLICAID_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __REPLICAID_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case REGION_ID:
          -      if (value == null) {
          -        unsetRegionId();
          -      } else {
          -        setRegionId((java.lang.Long)value);
          -      }
          -      break;
          +      case REGION_ID:
          +        if (value == null) {
          +          unsetRegionId();
          +        } else {
          +          setRegionId((java.lang.Long) value);
          +        }
          +        break;
           
          -    case TABLE_NAME:
          -      if (value == null) {
          -        unsetTableName();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setTableName((byte[])value);
          +      case TABLE_NAME:
          +        if (value == null) {
          +          unsetTableName();
                   } else {
          -          setTableName((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setTableName((byte[]) value);
          +          } else {
          +            setTableName((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case START_KEY:
          -      if (value == null) {
          -        unsetStartKey();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setStartKey((byte[])value);
          +      case START_KEY:
          +        if (value == null) {
          +          unsetStartKey();
                   } else {
          -          setStartKey((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setStartKey((byte[]) value);
          +          } else {
          +            setStartKey((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case END_KEY:
          -      if (value == null) {
          -        unsetEndKey();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setEndKey((byte[])value);
          +      case END_KEY:
          +        if (value == null) {
          +          unsetEndKey();
                   } else {
          -          setEndKey((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setEndKey((byte[]) value);
          +          } else {
          +            setEndKey((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case OFFLINE:
          -      if (value == null) {
          -        unsetOffline();
          -      } else {
          -        setOffline((java.lang.Boolean)value);
          -      }
          -      break;
          +      case OFFLINE:
          +        if (value == null) {
          +          unsetOffline();
          +        } else {
          +          setOffline((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case SPLIT:
          -      if (value == null) {
          -        unsetSplit();
          -      } else {
          -        setSplit((java.lang.Boolean)value);
          -      }
          -      break;
          +      case SPLIT:
          +        if (value == null) {
          +          unsetSplit();
          +        } else {
          +          setSplit((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case REPLICA_ID:
          -      if (value == null) {
          -        unsetReplicaId();
          -      } else {
          -        setReplicaId((java.lang.Integer)value);
          -      }
          -      break;
          +      case REPLICA_ID:
          +        if (value == null) {
          +          unsetReplicaId();
          +        } else {
          +          setReplicaId((java.lang.Integer) value);
          +        }
          +        break;
           
               }
             }
          @@ -458,130 +513,116 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case REGION_ID:
          -      return getRegionId();
          +      case REGION_ID:
          +        return getRegionId();
           
          -    case TABLE_NAME:
          -      return getTableName();
          +      case TABLE_NAME:
          +        return getTableName();
           
          -    case START_KEY:
          -      return getStartKey();
          +      case START_KEY:
          +        return getStartKey();
           
          -    case END_KEY:
          -      return getEndKey();
          +      case END_KEY:
          +        return getEndKey();
           
          -    case OFFLINE:
          -      return isOffline();
          +      case OFFLINE:
          +        return isOffline();
           
          -    case SPLIT:
          -      return isSplit();
          +      case SPLIT:
          +        return isSplit();
           
          -    case REPLICA_ID:
          -      return getReplicaId();
          +      case REPLICA_ID:
          +        return getReplicaId();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case REGION_ID:
          -      return isSetRegionId();
          -    case TABLE_NAME:
          -      return isSetTableName();
          -    case START_KEY:
          -      return isSetStartKey();
          -    case END_KEY:
          -      return isSetEndKey();
          -    case OFFLINE:
          -      return isSetOffline();
          -    case SPLIT:
          -      return isSetSplit();
          -    case REPLICA_ID:
          -      return isSetReplicaId();
          +      case REGION_ID:
          +        return isSetRegionId();
          +      case TABLE_NAME:
          +        return isSetTableName();
          +      case START_KEY:
          +        return isSetStartKey();
          +      case END_KEY:
          +        return isSetEndKey();
          +      case OFFLINE:
          +        return isSetOffline();
          +      case SPLIT:
          +        return isSetSplit();
          +      case REPLICA_ID:
          +        return isSetReplicaId();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof THRegionInfo)
          -      return this.equals((THRegionInfo)that);
          +    if (that instanceof THRegionInfo) return this.equals((THRegionInfo) that);
               return false;
             }
           
             public boolean equals(THRegionInfo that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_regionId = true;
               boolean that_present_regionId = true;
               if (this_present_regionId || that_present_regionId) {
          -      if (!(this_present_regionId && that_present_regionId))
          -        return false;
          -      if (this.regionId != that.regionId)
          -        return false;
          +      if (!(this_present_regionId && that_present_regionId)) return false;
          +      if (this.regionId != that.regionId) return false;
               }
           
               boolean this_present_tableName = true && this.isSetTableName();
               boolean that_present_tableName = true && that.isSetTableName();
               if (this_present_tableName || that_present_tableName) {
          -      if (!(this_present_tableName && that_present_tableName))
          -        return false;
          -      if (!this.tableName.equals(that.tableName))
          -        return false;
          +      if (!(this_present_tableName && that_present_tableName)) return false;
          +      if (!this.tableName.equals(that.tableName)) return false;
               }
           
               boolean this_present_startKey = true && this.isSetStartKey();
               boolean that_present_startKey = true && that.isSetStartKey();
               if (this_present_startKey || that_present_startKey) {
          -      if (!(this_present_startKey && that_present_startKey))
          -        return false;
          -      if (!this.startKey.equals(that.startKey))
          -        return false;
          +      if (!(this_present_startKey && that_present_startKey)) return false;
          +      if (!this.startKey.equals(that.startKey)) return false;
               }
           
               boolean this_present_endKey = true && this.isSetEndKey();
               boolean that_present_endKey = true && that.isSetEndKey();
               if (this_present_endKey || that_present_endKey) {
          -      if (!(this_present_endKey && that_present_endKey))
          -        return false;
          -      if (!this.endKey.equals(that.endKey))
          -        return false;
          +      if (!(this_present_endKey && that_present_endKey)) return false;
          +      if (!this.endKey.equals(that.endKey)) return false;
               }
           
               boolean this_present_offline = true && this.isSetOffline();
               boolean that_present_offline = true && that.isSetOffline();
               if (this_present_offline || that_present_offline) {
          -      if (!(this_present_offline && that_present_offline))
          -        return false;
          -      if (this.offline != that.offline)
          -        return false;
          +      if (!(this_present_offline && that_present_offline)) return false;
          +      if (this.offline != that.offline) return false;
               }
           
               boolean this_present_split = true && this.isSetSplit();
               boolean that_present_split = true && that.isSetSplit();
               if (this_present_split || that_present_split) {
          -      if (!(this_present_split && that_present_split))
          -        return false;
          -      if (this.split != that.split)
          -        return false;
          +      if (!(this_present_split && that_present_split)) return false;
          +      if (this.split != that.split) return false;
               }
           
               boolean this_present_replicaId = true && this.isSetReplicaId();
               boolean that_present_replicaId = true && that.isSetReplicaId();
               if (this_present_replicaId || that_present_replicaId) {
          -      if (!(this_present_replicaId && that_present_replicaId))
          -        return false;
          -      if (this.replicaId != that.replicaId)
          -        return false;
          +      if (!(this_present_replicaId && that_present_replicaId)) return false;
          +      if (this.replicaId != that.replicaId) return false;
               }
           
               return true;
          @@ -594,28 +635,22 @@ public int hashCode() {
               hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(regionId);
           
               hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -    if (isSetTableName())
          -      hashCode = hashCode * 8191 + tableName.hashCode();
          +    if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetStartKey()) ? 131071 : 524287);
          -    if (isSetStartKey())
          -      hashCode = hashCode * 8191 + startKey.hashCode();
          +    if (isSetStartKey()) hashCode = hashCode * 8191 + startKey.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetEndKey()) ? 131071 : 524287);
          -    if (isSetEndKey())
          -      hashCode = hashCode * 8191 + endKey.hashCode();
          +    if (isSetEndKey()) hashCode = hashCode * 8191 + endKey.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetOffline()) ? 131071 : 524287);
          -    if (isSetOffline())
          -      hashCode = hashCode * 8191 + ((offline) ? 131071 : 524287);
          +    if (isSetOffline()) hashCode = hashCode * 8191 + ((offline) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetSplit()) ? 131071 : 524287);
          -    if (isSetSplit())
          -      hashCode = hashCode * 8191 + ((split) ? 131071 : 524287);
          +    if (isSetSplit()) hashCode = hashCode * 8191 + ((split) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetReplicaId()) ? 131071 : 524287);
          -    if (isSetReplicaId())
          -      hashCode = hashCode * 8191 + replicaId;
          +    if (isSetReplicaId()) hashCode = hashCode * 8191 + replicaId;
           
               return hashCode;
             }
          @@ -710,7 +745,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -774,46 +810,54 @@ public java.lang.String toString() {
           
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
          -    // alas, we cannot check 'regionId' because it's a primitive and you chose the non-beans generator.
          +    // alas, we cannot check 'regionId' because it's a primitive and you chose the non-beans
          +    // generator.
               if (tableName == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'tableName' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class THRegionInfoStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class THRegionInfoStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public THRegionInfoStandardScheme getScheme() {
                 return new THRegionInfoStandardScheme();
               }
             }
           
          -  private static class THRegionInfoStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class THRegionInfoStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionInfo struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionInfo struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -821,7 +865,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionInfo struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.regionId = iprot.readI64();
                         struct.setRegionIdIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -829,7 +873,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionInfo struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.tableName = iprot.readBinary();
                         struct.setTableNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -837,7 +881,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionInfo struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.startKey = iprot.readBinary();
                         struct.setStartKeyIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -845,7 +889,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionInfo struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.endKey = iprot.readBinary();
                         struct.setEndKeyIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -853,7 +897,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionInfo struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.offline = iprot.readBool();
                         struct.setOfflineIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -861,7 +905,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionInfo struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.split = iprot.readBool();
                         struct.setSplitIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -869,7 +913,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionInfo struct
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.replicaId = iprot.readI32();
                         struct.setReplicaIdIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -882,12 +926,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionInfo struct
           
                 // check for required fields of primitive type, which can't be checked in the validate method
                 if (!struct.isSetRegionId()) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'regionId' was not found in serialized data! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'regionId' was not found in serialized data! Struct: " + toString());
                 }
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, THRegionInfo struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, THRegionInfo struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -934,17 +980,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, THRegionInfo struc
           
             }
           
          -  private static class THRegionInfoTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class THRegionInfoTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public THRegionInfoTupleScheme getScheme() {
                 return new THRegionInfoTupleScheme();
               }
             }
           
          -  private static class THRegionInfoTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class THRegionInfoTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, THRegionInfo struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, THRegionInfo struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeI64(struct.regionId);
                 oprot.writeBinary(struct.tableName);
                 java.util.BitSet optionals = new java.util.BitSet();
          @@ -982,8 +1032,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, THRegionInfo struct
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, THRegionInfo struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, THRegionInfo struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.regionId = iprot.readI64();
                 struct.setRegionIdIsSet(true);
                 struct.tableName = iprot.readBinary();
          @@ -1012,8 +1064,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, THRegionInfo struct)
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
          index 8c9f2ba14d22..ee36d36d0d9d 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
          @@ -1,31 +1,55 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class THRegionLocation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THRegionLocation");
          -
          -  private static final org.apache.thrift.protocol.TField SERVER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("serverName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -  private static final org.apache.thrift.protocol.TField REGION_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("regionInfo", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new THRegionLocationStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new THRegionLocationTupleSchemeFactory();
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class THRegionLocation
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("THRegionLocation");
          +
          +  private static final org.apache.thrift.protocol.TField SERVER_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("serverName", org.apache.thrift.protocol.TType.STRUCT,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField REGION_INFO_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("regionInfo", org.apache.thrift.protocol.TType.STRUCT,
          +          (short) 2);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new THRegionLocationStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new THRegionLocationTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable TServerName serverName; // required
             public @org.apache.thrift.annotation.Nullable THRegionInfo regionInfo; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    SERVER_NAME((short)1, "serverName"),
          -    REGION_INFO((short)2, "regionInfo");
          +    SERVER_NAME((short) 1, "serverName"), REGION_INFO((short) 2, "regionInfo");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -38,7 +62,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // SERVER_NAME
                     return SERVER_NAME;
                   case 2: // REGION_INFO
          @@ -49,12 +73,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -86,22 +110,27 @@ public java.lang.String getFieldName() {
             // isset id assignments
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.SERVER_NAME, new org.apache.thrift.meta_data.FieldMetaData("serverName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TServerName.class)));
          -    tmpMap.put(_Fields.REGION_INFO, new org.apache.thrift.meta_data.FieldMetaData("regionInfo", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, THRegionInfo.class)));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.SERVER_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("serverName",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TServerName.class)));
          +    tmpMap.put(_Fields.REGION_INFO,
          +      new org.apache.thrift.meta_data.FieldMetaData("regionInfo",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              THRegionInfo.class)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(THRegionLocation.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(THRegionLocation.class,
          +      metaDataMap);
             }
           
             public THRegionLocation() {
             }
           
          -  public THRegionLocation(
          -    TServerName serverName,
          -    THRegionInfo regionInfo)
          -  {
          +  public THRegionLocation(TServerName serverName, THRegionInfo regionInfo) {
               this();
               this.serverName = serverName;
               this.regionInfo = regionInfo;
          @@ -134,7 +163,8 @@ public TServerName getServerName() {
               return this.serverName;
             }
           
          -  public THRegionLocation setServerName(@org.apache.thrift.annotation.Nullable TServerName serverName) {
          +  public THRegionLocation
          +      setServerName(@org.apache.thrift.annotation.Nullable TServerName serverName) {
               this.serverName = serverName;
               return this;
             }
          @@ -159,7 +189,8 @@ public THRegionInfo getRegionInfo() {
               return this.regionInfo;
             }
           
          -  public THRegionLocation setRegionInfo(@org.apache.thrift.annotation.Nullable THRegionInfo regionInfo) {
          +  public THRegionLocation
          +      setRegionInfo(@org.apache.thrift.annotation.Nullable THRegionInfo regionInfo) {
               this.regionInfo = regionInfo;
               return this;
             }
          @@ -179,23 +210,24 @@ public void setRegionInfoIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case SERVER_NAME:
          -      if (value == null) {
          -        unsetServerName();
          -      } else {
          -        setServerName((TServerName)value);
          -      }
          -      break;
          +      case SERVER_NAME:
          +        if (value == null) {
          +          unsetServerName();
          +        } else {
          +          setServerName((TServerName) value);
          +        }
          +        break;
           
          -    case REGION_INFO:
          -      if (value == null) {
          -        unsetRegionInfo();
          -      } else {
          -        setRegionInfo((THRegionInfo)value);
          -      }
          -      break;
          +      case REGION_INFO:
          +        if (value == null) {
          +          unsetRegionInfo();
          +        } else {
          +          setRegionInfo((THRegionInfo) value);
          +        }
          +        break;
           
               }
             }
          @@ -203,60 +235,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case SERVER_NAME:
          -      return getServerName();
          +      case SERVER_NAME:
          +        return getServerName();
           
          -    case REGION_INFO:
          -      return getRegionInfo();
          +      case REGION_INFO:
          +        return getRegionInfo();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case SERVER_NAME:
          -      return isSetServerName();
          -    case REGION_INFO:
          -      return isSetRegionInfo();
          +      case SERVER_NAME:
          +        return isSetServerName();
          +      case REGION_INFO:
          +        return isSetRegionInfo();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof THRegionLocation)
          -      return this.equals((THRegionLocation)that);
          +    if (that instanceof THRegionLocation) return this.equals((THRegionLocation) that);
               return false;
             }
           
             public boolean equals(THRegionLocation that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_serverName = true && this.isSetServerName();
               boolean that_present_serverName = true && that.isSetServerName();
               if (this_present_serverName || that_present_serverName) {
          -      if (!(this_present_serverName && that_present_serverName))
          -        return false;
          -      if (!this.serverName.equals(that.serverName))
          -        return false;
          +      if (!(this_present_serverName && that_present_serverName)) return false;
          +      if (!this.serverName.equals(that.serverName)) return false;
               }
           
               boolean this_present_regionInfo = true && this.isSetRegionInfo();
               boolean that_present_regionInfo = true && that.isSetRegionInfo();
               if (this_present_regionInfo || that_present_regionInfo) {
          -      if (!(this_present_regionInfo && that_present_regionInfo))
          -        return false;
          -      if (!this.regionInfo.equals(that.regionInfo))
          -        return false;
          +      if (!(this_present_regionInfo && that_present_regionInfo)) return false;
          +      if (!this.regionInfo.equals(that.regionInfo)) return false;
               }
           
               return true;
          @@ -267,12 +295,10 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetServerName()) ? 131071 : 524287);
          -    if (isSetServerName())
          -      hashCode = hashCode * 8191 + serverName.hashCode();
          +    if (isSetServerName()) hashCode = hashCode * 8191 + serverName.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetRegionInfo()) ? 131071 : 524287);
          -    if (isSetRegionInfo())
          -      hashCode = hashCode * 8191 + regionInfo.hashCode();
          +    if (isSetRegionInfo()) hashCode = hashCode * 8191 + regionInfo.hashCode();
           
               return hashCode;
             }
          @@ -317,7 +343,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -348,10 +375,12 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (serverName == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'serverName' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'serverName' was not present! Struct: " + toString());
               }
               if (regionInfo == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'regionInfo' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'regionInfo' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
               if (serverName != null) {
          @@ -364,35 +393,40 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class THRegionLocationStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class THRegionLocationStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public THRegionLocationStandardScheme getScheme() {
                 return new THRegionLocationStandardScheme();
               }
             }
           
          -  private static class THRegionLocationStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class THRegionLocationStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionLocation struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionLocation struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -401,7 +435,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionLocation st
                         struct.serverName = new TServerName();
                         struct.serverName.read(iprot);
                         struct.setServerNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -410,7 +444,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionLocation st
                         struct.regionInfo = new THRegionInfo();
                         struct.regionInfo.read(iprot);
                         struct.setRegionInfoIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -425,7 +459,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, THRegionLocation st
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, THRegionLocation struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, THRegionLocation struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -445,24 +480,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, THRegionLocation s
           
             }
           
          -  private static class THRegionLocationTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class THRegionLocationTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public THRegionLocationTupleScheme getScheme() {
                 return new THRegionLocationTupleScheme();
               }
             }
           
          -  private static class THRegionLocationTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class THRegionLocationTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, THRegionLocation struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, THRegionLocation struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.serverName.write(oprot);
                 struct.regionInfo.write(oprot);
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, THRegionLocation struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, THRegionLocation struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.serverName = new TServerName();
                 struct.serverName.read(iprot);
                 struct.setServerNameIsSet(true);
          @@ -472,8 +513,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, THRegionLocation str
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
          index 86f8077cda47..143dfda2e18e 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
          @@ -1,36 +1,59 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * A TIOError exception signals that an error occurred communicating
          - * to the HBase master or a HBase region server. Also used to return
          - * more general HBase error conditions.
          + * A TIOError exception signals that an error occurred communicating to the HBase master or a HBase
          + * region server. Also used to return more general HBase error conditions.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TIOError extends org.apache.thrift.TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIOError");
          -
          -  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField CAN_RETRY_FIELD_DESC = new org.apache.thrift.protocol.TField("canRetry", org.apache.thrift.protocol.TType.BOOL, (short)2);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TIOErrorStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TIOErrorTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TIOError extends org.apache.thrift.TException
          +    implements org.apache.thrift.TBase, java.io.Serializable, Cloneable,
          +    Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TIOError");
          +
          +  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField CAN_RETRY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("canRetry", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 2);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TIOErrorStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TIOErrorTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.lang.String message; // optional
             public boolean canRetry; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    MESSAGE((short)1, "message"),
          -    CAN_RETRY((short)2, "canRetry");
          +    MESSAGE((short) 1, "message"), CAN_RETRY((short) 2, "canRetry");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -43,7 +66,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // MESSAGE
                     return MESSAGE;
                   case 2: // CAN_RETRY
          @@ -54,12 +77,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -91,13 +114,18 @@ public java.lang.String getFieldName() {
             // isset id assignments
             private static final int __CANRETRY_ISSET_ID = 0;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.MESSAGE,_Fields.CAN_RETRY};
          +  private static final _Fields optionals[] = { _Fields.MESSAGE, _Fields.CAN_RETRY };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.CAN_RETRY, new org.apache.thrift.meta_data.FieldMetaData("canRetry", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.MESSAGE,
          +      new org.apache.thrift.meta_data.FieldMetaData("message",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.CAN_RETRY, new org.apache.thrift.meta_data.FieldMetaData("canRetry",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TIOError.class, metaDataMap);
          @@ -164,7 +192,8 @@ public TIOError setCanRetry(boolean canRetry) {
             }
           
             public void unsetCanRetry() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CANRETRY_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CANRETRY_ISSET_ID);
             }
           
             /** Returns true if field canRetry is set (has been assigned a value) and false otherwise */
          @@ -173,26 +202,28 @@ public boolean isSetCanRetry() {
             }
           
             public void setCanRetryIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CANRETRY_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CANRETRY_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case MESSAGE:
          -      if (value == null) {
          -        unsetMessage();
          -      } else {
          -        setMessage((java.lang.String)value);
          -      }
          -      break;
          +      case MESSAGE:
          +        if (value == null) {
          +          unsetMessage();
          +        } else {
          +          setMessage((java.lang.String) value);
          +        }
          +        break;
           
          -    case CAN_RETRY:
          -      if (value == null) {
          -        unsetCanRetry();
          -      } else {
          -        setCanRetry((java.lang.Boolean)value);
          -      }
          -      break;
          +      case CAN_RETRY:
          +        if (value == null) {
          +          unsetCanRetry();
          +        } else {
          +          setCanRetry((java.lang.Boolean) value);
          +        }
          +        break;
           
               }
             }
          @@ -200,60 +231,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case MESSAGE:
          -      return getMessage();
          +      case MESSAGE:
          +        return getMessage();
           
          -    case CAN_RETRY:
          -      return isCanRetry();
          +      case CAN_RETRY:
          +        return isCanRetry();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case MESSAGE:
          -      return isSetMessage();
          -    case CAN_RETRY:
          -      return isSetCanRetry();
          +      case MESSAGE:
          +        return isSetMessage();
          +      case CAN_RETRY:
          +        return isSetCanRetry();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TIOError)
          -      return this.equals((TIOError)that);
          +    if (that instanceof TIOError) return this.equals((TIOError) that);
               return false;
             }
           
             public boolean equals(TIOError that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_message = true && this.isSetMessage();
               boolean that_present_message = true && that.isSetMessage();
               if (this_present_message || that_present_message) {
          -      if (!(this_present_message && that_present_message))
          -        return false;
          -      if (!this.message.equals(that.message))
          -        return false;
          +      if (!(this_present_message && that_present_message)) return false;
          +      if (!this.message.equals(that.message)) return false;
               }
           
               boolean this_present_canRetry = true && this.isSetCanRetry();
               boolean that_present_canRetry = true && that.isSetCanRetry();
               if (this_present_canRetry || that_present_canRetry) {
          -      if (!(this_present_canRetry && that_present_canRetry))
          -        return false;
          -      if (this.canRetry != that.canRetry)
          -        return false;
          +      if (!(this_present_canRetry && that_present_canRetry)) return false;
          +      if (this.canRetry != that.canRetry) return false;
               }
           
               return true;
          @@ -264,12 +291,10 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetMessage()) ? 131071 : 524287);
          -    if (isSetMessage())
          -      hashCode = hashCode * 8191 + message.hashCode();
          +    if (isSetMessage()) hashCode = hashCode * 8191 + message.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetCanRetry()) ? 131071 : 524287);
          -    if (isSetCanRetry())
          -      hashCode = hashCode * 8191 + ((canRetry) ? 131071 : 524287);
          +    if (isSetCanRetry()) hashCode = hashCode * 8191 + ((canRetry) ? 131071 : 524287);
           
               return hashCode;
             }
          @@ -314,7 +339,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -349,37 +375,43 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TIOErrorStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TIOErrorStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TIOErrorStandardScheme getScheme() {
                 return new TIOErrorStandardScheme();
               }
             }
           
          -  private static class TIOErrorStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TIOErrorStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TIOError struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TIOError struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -387,7 +419,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIOError struct) th
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.message = iprot.readString();
                         struct.setMessageIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -395,7 +427,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIOError struct) th
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.canRetry = iprot.readBool();
                         struct.setCanRetryIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -410,7 +442,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIOError struct) th
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TIOError struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TIOError struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -432,7 +465,8 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TIOError struct) t
           
             }
           
          -  private static class TIOErrorTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TIOErrorTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TIOErrorTupleScheme getScheme() {
                 return new TIOErrorTupleScheme();
               }
          @@ -441,8 +475,10 @@ public TIOErrorTupleScheme getScheme() {
             private static class TIOErrorTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TIOError struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TIOError struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetMessage()) {
                   optionals.set(0);
          @@ -460,8 +496,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TIOError struct) th
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TIOError struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TIOError struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(2);
                 if (incoming.get(0)) {
                   struct.message = iprot.readString();
          @@ -474,8 +512,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TIOError struct) thr
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
          index 9b634c54f10c..bbbb96edf979 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
          @@ -1,32 +1,55 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * A TIllegalArgument exception indicates an illegal or invalid
          - * argument was passed into a procedure.
          + * A TIllegalArgument exception indicates an illegal or invalid argument was passed into a
          + * procedure.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TIllegalArgument extends org.apache.thrift.TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIllegalArgument");
          -
          -  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TIllegalArgumentStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TIllegalArgumentTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TIllegalArgument extends org.apache.thrift.TException
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TIllegalArgument");
          +
          +  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TIllegalArgumentStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TIllegalArgumentTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.lang.String message; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    MESSAGE((short)1, "message");
          +    MESSAGE((short) 1, "message");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -39,7 +62,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // MESSAGE
                     return MESSAGE;
                   default:
          @@ -48,12 +71,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -83,14 +106,19 @@ public java.lang.String getFieldName() {
             }
           
             // isset id assignments
          -  private static final _Fields optionals[] = {_Fields.MESSAGE};
          +  private static final _Fields optionals[] = { _Fields.MESSAGE };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.MESSAGE,
          +      new org.apache.thrift.meta_data.FieldMetaData("message",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TIllegalArgument.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TIllegalArgument.class,
          +      metaDataMap);
             }
           
             public TIllegalArgument() {
          @@ -119,7 +147,8 @@ public java.lang.String getMessage() {
               return this.message;
             }
           
          -  public TIllegalArgument setMessage(@org.apache.thrift.annotation.Nullable java.lang.String message) {
          +  public TIllegalArgument
          +      setMessage(@org.apache.thrift.annotation.Nullable java.lang.String message) {
               this.message = message;
               return this;
             }
          @@ -139,15 +168,16 @@ public void setMessageIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case MESSAGE:
          -      if (value == null) {
          -        unsetMessage();
          -      } else {
          -        setMessage((java.lang.String)value);
          -      }
          -      break;
          +      case MESSAGE:
          +        if (value == null) {
          +          unsetMessage();
          +        } else {
          +          setMessage((java.lang.String) value);
          +        }
          +        break;
           
               }
             }
          @@ -155,46 +185,44 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case MESSAGE:
          -      return getMessage();
          +      case MESSAGE:
          +        return getMessage();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case MESSAGE:
          -      return isSetMessage();
          +      case MESSAGE:
          +        return isSetMessage();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TIllegalArgument)
          -      return this.equals((TIllegalArgument)that);
          +    if (that instanceof TIllegalArgument) return this.equals((TIllegalArgument) that);
               return false;
             }
           
             public boolean equals(TIllegalArgument that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_message = true && this.isSetMessage();
               boolean that_present_message = true && that.isSetMessage();
               if (this_present_message || that_present_message) {
          -      if (!(this_present_message && that_present_message))
          -        return false;
          -      if (!this.message.equals(that.message))
          -        return false;
          +      if (!(this_present_message && that_present_message)) return false;
          +      if (!this.message.equals(that.message)) return false;
               }
           
               return true;
          @@ -205,8 +233,7 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetMessage()) ? 131071 : 524287);
          -    if (isSetMessage())
          -      hashCode = hashCode * 8191 + message.hashCode();
          +    if (isSetMessage()) hashCode = hashCode * 8191 + message.hashCode();
           
               return hashCode;
             }
          @@ -241,7 +268,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -270,35 +298,40 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TIllegalArgumentStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TIllegalArgumentStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TIllegalArgumentStandardScheme getScheme() {
                 return new TIllegalArgumentStandardScheme();
               }
             }
           
          -  private static class TIllegalArgumentStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TIllegalArgumentStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TIllegalArgument struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TIllegalArgument struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -306,7 +339,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIllegalArgument st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.message = iprot.readString();
                         struct.setMessageIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -321,7 +354,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIllegalArgument st
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TIllegalArgument struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TIllegalArgument struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -338,17 +372,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TIllegalArgument s
           
             }
           
          -  private static class TIllegalArgumentTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TIllegalArgumentTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TIllegalArgumentTupleScheme getScheme() {
                 return new TIllegalArgumentTupleScheme();
               }
             }
           
          -  private static class TIllegalArgumentTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TIllegalArgumentTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TIllegalArgument struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TIllegalArgument struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetMessage()) {
                   optionals.set(0);
          @@ -360,8 +398,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TIllegalArgument st
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TIllegalArgument struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TIllegalArgument struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(1);
                 if (incoming.get(0)) {
                   struct.message = iprot.readString();
          @@ -370,8 +410,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TIllegalArgument str
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java
          index 746f2199eedd..88028aed1693 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java
          @@ -1,58 +1,83 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Used to perform Increment operations for a single row.
          - * 
          - * You can specify how this Increment should be written to the write-ahead Log (WAL)
          - * by changing the durability. If you don't provide durability, it defaults to
          - * column family's default setting for durability.
          + * Used to perform Increment operations for a single row. You can specify how this Increment should
          + * be written to the write-ahead Log (WAL) by changing the durability. If you don't provide
          + * durability, it defaults to column family's default setting for durability.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIncrement");
          -
          -  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)2);
          -  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)4);
          -  private static final org.apache.thrift.protocol.TField DURABILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("durability", org.apache.thrift.protocol.TType.I32, (short)5);
          -  private static final org.apache.thrift.protocol.TField CELL_VISIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("cellVisibility", org.apache.thrift.protocol.TType.STRUCT, (short)6);
          -  private static final org.apache.thrift.protocol.TField RETURN_RESULTS_FIELD_DESC = new org.apache.thrift.protocol.TField("returnResults", org.apache.thrift.protocol.TType.BOOL, (short)7);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TIncrementStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TIncrementTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TIncrement implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TIncrement");
          +
          +  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField DURABILITY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("durability", org.apache.thrift.protocol.TType.I32,
          +          (short) 5);
          +  private static final org.apache.thrift.protocol.TField CELL_VISIBILITY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("cellVisibility",
          +          org.apache.thrift.protocol.TType.STRUCT, (short) 6);
          +  private static final org.apache.thrift.protocol.TField RETURN_RESULTS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("returnResults", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 7);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TIncrementStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TIncrementTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
             public @org.apache.thrift.annotation.Nullable java.util.List columns; // required
          -  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
             /**
          -   * 
              * @see TDurability
              */
             public @org.apache.thrift.annotation.Nullable TDurability durability; // optional
             public @org.apache.thrift.annotation.Nullable TCellVisibility cellVisibility; // optional
             public boolean returnResults; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    ROW((short)1, "row"),
          -    COLUMNS((short)2, "columns"),
          -    ATTRIBUTES((short)4, "attributes"),
          +    ROW((short) 1, "row"), COLUMNS((short) 2, "columns"), ATTRIBUTES((short) 4, "attributes"),
               /**
          -     * 
                * @see TDurability
                */
          -    DURABILITY((short)5, "durability"),
          -    CELL_VISIBILITY((short)6, "cellVisibility"),
          -    RETURN_RESULTS((short)7, "returnResults");
          +    DURABILITY((short) 5, "durability"), CELL_VISIBILITY((short) 6, "cellVisibility"),
          +    RETURN_RESULTS((short) 7, "returnResults");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -65,7 +90,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // ROW
                     return ROW;
                   case 2: // COLUMNS
          @@ -84,12 +109,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -121,24 +146,42 @@ public java.lang.String getFieldName() {
             // isset id assignments
             private static final int __RETURNRESULTS_ISSET_ID = 0;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.ATTRIBUTES,_Fields.DURABILITY,_Fields.CELL_VISIBILITY,_Fields.RETURN_RESULTS};
          +  private static final _Fields optionals[] =
          +      { _Fields.ATTRIBUTES, _Fields.DURABILITY, _Fields.CELL_VISIBILITY, _Fields.RETURN_RESULTS };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnIncrement.class))));
          -    tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true), 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true))));
          -    tmpMap.put(_Fields.DURABILITY, new org.apache.thrift.meta_data.FieldMetaData("durability", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TDurability.class)));
          -    tmpMap.put(_Fields.CELL_VISIBILITY, new org.apache.thrift.meta_data.FieldMetaData("cellVisibility", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCellVisibility.class)));
          -    tmpMap.put(_Fields.RETURN_RESULTS, new org.apache.thrift.meta_data.FieldMetaData("returnResults", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("row",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns",
          +        org.apache.thrift.TFieldRequirementType.REQUIRED,
          +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TColumnIncrement.class))));
          +    tmpMap.put(_Fields.ATTRIBUTES,
          +      new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true),
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true))));
          +    tmpMap.put(_Fields.DURABILITY,
          +      new org.apache.thrift.meta_data.FieldMetaData("durability",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TDurability.class)));
          +    tmpMap.put(_Fields.CELL_VISIBILITY,
          +      new org.apache.thrift.meta_data.FieldMetaData("cellVisibility",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TCellVisibility.class)));
          +    tmpMap.put(_Fields.RETURN_RESULTS, new org.apache.thrift.meta_data.FieldMetaData(
          +        "returnResults", org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TIncrement.class, metaDataMap);
          @@ -147,10 +190,7 @@ public java.lang.String getFieldName() {
             public TIncrement() {
             }
           
          -  public TIncrement(
          -    java.nio.ByteBuffer row,
          -    java.util.List columns)
          -  {
          +  public TIncrement(java.nio.ByteBuffer row, java.util.List columns) {
               this();
               this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
               this.columns = columns;
          @@ -165,14 +205,16 @@ public TIncrement(TIncrement other) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
               }
               if (other.isSetColumns()) {
          -      java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +      java.util.List __this__columns =
          +          new java.util.ArrayList(other.columns.size());
                 for (TColumnIncrement other_element : other.columns) {
                   __this__columns.add(new TColumnIncrement(other_element));
                 }
                 this.columns = __this__columns;
               }
               if (other.isSetAttributes()) {
          -      java.util.Map __this__attributes = new java.util.HashMap(other.attributes);
          +      java.util.Map __this__attributes =
          +          new java.util.HashMap(other.attributes);
                 this.attributes = __this__attributes;
               }
               if (other.isSetDurability()) {
          @@ -209,7 +251,7 @@ public java.nio.ByteBuffer bufferForRow() {
             }
           
             public TIncrement setRow(byte[] row) {
          -    this.row = row == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(row.clone());
          +    this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
               return this;
             }
           
          @@ -254,7 +296,8 @@ public java.util.List getColumns() {
               return this.columns;
             }
           
          -  public TIncrement setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +  public TIncrement
          +      setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
               this.columns = columns;
               return this;
             }
          @@ -280,17 +323,18 @@ public int getAttributesSize() {
           
             public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
               if (this.attributes == null) {
          -      this.attributes = new java.util.HashMap();
          +      this.attributes = new java.util.HashMap();
               }
               this.attributes.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getAttributes() {
          +  public java.util.Map getAttributes() {
               return this.attributes;
             }
           
          -  public TIncrement setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +  public TIncrement setAttributes(
          +      @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
               this.attributes = attributes;
               return this;
             }
          @@ -311,7 +355,6 @@ public void setAttributesIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TDurability
              */
             @org.apache.thrift.annotation.Nullable
          @@ -320,7 +363,6 @@ public TDurability getDurability() {
             }
           
             /**
          -   * 
              * @see TDurability
              */
             public TIncrement setDurability(@org.apache.thrift.annotation.Nullable TDurability durability) {
          @@ -348,7 +390,8 @@ public TCellVisibility getCellVisibility() {
               return this.cellVisibility;
             }
           
          -  public TIncrement setCellVisibility(@org.apache.thrift.annotation.Nullable TCellVisibility cellVisibility) {
          +  public TIncrement
          +      setCellVisibility(@org.apache.thrift.annotation.Nullable TCellVisibility cellVisibility) {
               this.cellVisibility = cellVisibility;
               return this;
             }
          @@ -379,7 +422,8 @@ public TIncrement setReturnResults(boolean returnResults) {
             }
           
             public void unsetReturnResults() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __RETURNRESULTS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __RETURNRESULTS_ISSET_ID);
             }
           
             /** Returns true if field returnResults is set (has been assigned a value) and false otherwise */
          @@ -388,62 +432,64 @@ public boolean isSetReturnResults() {
             }
           
             public void setReturnResultsIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __RETURNRESULTS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __RETURNRESULTS_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case ROW:
          -      if (value == null) {
          -        unsetRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setRow((byte[])value);
          +      case ROW:
          +        if (value == null) {
          +          unsetRow();
                   } else {
          -          setRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setRow((byte[]) value);
          +          } else {
          +            setRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case COLUMNS:
          -      if (value == null) {
          -        unsetColumns();
          -      } else {
          -        setColumns((java.util.List)value);
          -      }
          -      break;
          +      case COLUMNS:
          +        if (value == null) {
          +          unsetColumns();
          +        } else {
          +          setColumns((java.util.List) value);
          +        }
          +        break;
           
          -    case ATTRIBUTES:
          -      if (value == null) {
          -        unsetAttributes();
          -      } else {
          -        setAttributes((java.util.Map)value);
          -      }
          -      break;
          +      case ATTRIBUTES:
          +        if (value == null) {
          +          unsetAttributes();
          +        } else {
          +          setAttributes((java.util.Map) value);
          +        }
          +        break;
           
          -    case DURABILITY:
          -      if (value == null) {
          -        unsetDurability();
          -      } else {
          -        setDurability((TDurability)value);
          -      }
          -      break;
          +      case DURABILITY:
          +        if (value == null) {
          +          unsetDurability();
          +        } else {
          +          setDurability((TDurability) value);
          +        }
          +        break;
           
          -    case CELL_VISIBILITY:
          -      if (value == null) {
          -        unsetCellVisibility();
          -      } else {
          -        setCellVisibility((TCellVisibility)value);
          -      }
          -      break;
          +      case CELL_VISIBILITY:
          +        if (value == null) {
          +          unsetCellVisibility();
          +        } else {
          +          setCellVisibility((TCellVisibility) value);
          +        }
          +        break;
           
          -    case RETURN_RESULTS:
          -      if (value == null) {
          -        unsetReturnResults();
          -      } else {
          -        setReturnResults((java.lang.Boolean)value);
          -      }
          -      break;
          +      case RETURN_RESULTS:
          +        if (value == null) {
          +          unsetReturnResults();
          +        } else {
          +          setReturnResults((java.lang.Boolean) value);
          +        }
          +        break;
           
               }
             }
          @@ -451,116 +497,104 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case ROW:
          -      return getRow();
          +      case ROW:
          +        return getRow();
           
          -    case COLUMNS:
          -      return getColumns();
          +      case COLUMNS:
          +        return getColumns();
           
          -    case ATTRIBUTES:
          -      return getAttributes();
          +      case ATTRIBUTES:
          +        return getAttributes();
           
          -    case DURABILITY:
          -      return getDurability();
          +      case DURABILITY:
          +        return getDurability();
           
          -    case CELL_VISIBILITY:
          -      return getCellVisibility();
          +      case CELL_VISIBILITY:
          +        return getCellVisibility();
           
          -    case RETURN_RESULTS:
          -      return isReturnResults();
          +      case RETURN_RESULTS:
          +        return isReturnResults();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case ROW:
          -      return isSetRow();
          -    case COLUMNS:
          -      return isSetColumns();
          -    case ATTRIBUTES:
          -      return isSetAttributes();
          -    case DURABILITY:
          -      return isSetDurability();
          -    case CELL_VISIBILITY:
          -      return isSetCellVisibility();
          -    case RETURN_RESULTS:
          -      return isSetReturnResults();
          +      case ROW:
          +        return isSetRow();
          +      case COLUMNS:
          +        return isSetColumns();
          +      case ATTRIBUTES:
          +        return isSetAttributes();
          +      case DURABILITY:
          +        return isSetDurability();
          +      case CELL_VISIBILITY:
          +        return isSetCellVisibility();
          +      case RETURN_RESULTS:
          +        return isSetReturnResults();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TIncrement)
          -      return this.equals((TIncrement)that);
          +    if (that instanceof TIncrement) return this.equals((TIncrement) that);
               return false;
             }
           
             public boolean equals(TIncrement that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_row = true && this.isSetRow();
               boolean that_present_row = true && that.isSetRow();
               if (this_present_row || that_present_row) {
          -      if (!(this_present_row && that_present_row))
          -        return false;
          -      if (!this.row.equals(that.row))
          -        return false;
          +      if (!(this_present_row && that_present_row)) return false;
          +      if (!this.row.equals(that.row)) return false;
               }
           
               boolean this_present_columns = true && this.isSetColumns();
               boolean that_present_columns = true && that.isSetColumns();
               if (this_present_columns || that_present_columns) {
          -      if (!(this_present_columns && that_present_columns))
          -        return false;
          -      if (!this.columns.equals(that.columns))
          -        return false;
          +      if (!(this_present_columns && that_present_columns)) return false;
          +      if (!this.columns.equals(that.columns)) return false;
               }
           
               boolean this_present_attributes = true && this.isSetAttributes();
               boolean that_present_attributes = true && that.isSetAttributes();
               if (this_present_attributes || that_present_attributes) {
          -      if (!(this_present_attributes && that_present_attributes))
          -        return false;
          -      if (!this.attributes.equals(that.attributes))
          -        return false;
          +      if (!(this_present_attributes && that_present_attributes)) return false;
          +      if (!this.attributes.equals(that.attributes)) return false;
               }
           
               boolean this_present_durability = true && this.isSetDurability();
               boolean that_present_durability = true && that.isSetDurability();
               if (this_present_durability || that_present_durability) {
          -      if (!(this_present_durability && that_present_durability))
          -        return false;
          -      if (!this.durability.equals(that.durability))
          -        return false;
          +      if (!(this_present_durability && that_present_durability)) return false;
          +      if (!this.durability.equals(that.durability)) return false;
               }
           
               boolean this_present_cellVisibility = true && this.isSetCellVisibility();
               boolean that_present_cellVisibility = true && that.isSetCellVisibility();
               if (this_present_cellVisibility || that_present_cellVisibility) {
          -      if (!(this_present_cellVisibility && that_present_cellVisibility))
          -        return false;
          -      if (!this.cellVisibility.equals(that.cellVisibility))
          -        return false;
          +      if (!(this_present_cellVisibility && that_present_cellVisibility)) return false;
          +      if (!this.cellVisibility.equals(that.cellVisibility)) return false;
               }
           
               boolean this_present_returnResults = true && this.isSetReturnResults();
               boolean that_present_returnResults = true && that.isSetReturnResults();
               if (this_present_returnResults || that_present_returnResults) {
          -      if (!(this_present_returnResults && that_present_returnResults))
          -        return false;
          -      if (this.returnResults != that.returnResults)
          -        return false;
          +      if (!(this_present_returnResults && that_present_returnResults)) return false;
          +      if (this.returnResults != that.returnResults) return false;
               }
           
               return true;
          @@ -571,28 +605,22 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -    if (isSetRow())
          -      hashCode = hashCode * 8191 + row.hashCode();
          +    if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -    if (isSetColumns())
          -      hashCode = hashCode * 8191 + columns.hashCode();
          +    if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -    if (isSetAttributes())
          -      hashCode = hashCode * 8191 + attributes.hashCode();
          +    if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetDurability()) ? 131071 : 524287);
          -    if (isSetDurability())
          -      hashCode = hashCode * 8191 + durability.getValue();
          +    if (isSetDurability()) hashCode = hashCode * 8191 + durability.getValue();
           
               hashCode = hashCode * 8191 + ((isSetCellVisibility()) ? 131071 : 524287);
          -    if (isSetCellVisibility())
          -      hashCode = hashCode * 8191 + cellVisibility.hashCode();
          +    if (isSetCellVisibility()) hashCode = hashCode * 8191 + cellVisibility.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetReturnResults()) ? 131071 : 524287);
          -    if (isSetReturnResults())
          -      hashCode = hashCode * 8191 + ((returnResults) ? 131071 : 524287);
          +    if (isSetReturnResults()) hashCode = hashCode * 8191 + ((returnResults) ? 131071 : 524287);
           
               return hashCode;
             }
          @@ -650,7 +678,8 @@ public int compareTo(TIncrement other) {
                 return lastComparison;
               }
               if (isSetCellVisibility()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cellVisibility, other.cellVisibility);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.cellVisibility, other.cellVisibility);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -660,7 +689,8 @@ public int compareTo(TIncrement other) {
                 return lastComparison;
               }
               if (isSetReturnResults()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.returnResults, other.returnResults);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.returnResults, other.returnResults);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -677,7 +707,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -744,10 +775,12 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (row == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'row' was not present! Struct: " + toString());
               }
               if (columns == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'columns' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'columns' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
               if (cellVisibility != null) {
          @@ -757,37 +790,43 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TIncrementStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TIncrementStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TIncrementStandardScheme getScheme() {
                 return new TIncrementStandardScheme();
               }
             }
           
          -  private static class TIncrementStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TIncrementStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -795,7 +834,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.row = iprot.readBinary();
                         struct.setRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -804,9 +843,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                         {
                           org.apache.thrift.protocol.TList _list70 = iprot.readListBegin();
                           struct.columns = new java.util.ArrayList(_list70.size);
          -                @org.apache.thrift.annotation.Nullable TColumnIncrement _elem71;
          -                for (int _i72 = 0; _i72 < _list70.size; ++_i72)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                TColumnIncrement _elem71;
          +                for (int _i72 = 0; _i72 < _list70.size; ++_i72) {
                             _elem71 = new TColumnIncrement();
                             _elem71.read(iprot);
                             struct.columns.add(_elem71);
          @@ -814,7 +853,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                           iprot.readListEnd();
                         }
                         struct.setColumnsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -822,11 +861,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map73 = iprot.readMapBegin();
          -                struct.attributes = new java.util.HashMap(2*_map73.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key74;
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val75;
          -                for (int _i76 = 0; _i76 < _map73.size; ++_i76)
          -                {
          +                struct.attributes = new java.util.HashMap(
          +                    2 * _map73.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _key74;
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _val75;
          +                for (int _i76 = 0; _i76 < _map73.size; ++_i76) {
                             _key74 = iprot.readBinary();
                             _val75 = iprot.readBinary();
                             struct.attributes.put(_key74, _val75);
          @@ -834,15 +875,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                           iprot.readMapEnd();
                         }
                         struct.setAttributesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 5: // DURABILITY
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
          +              struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability
          +                  .findByValue(iprot.readI32());
                         struct.setDurabilityIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -851,7 +893,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                         struct.cellVisibility = new TCellVisibility();
                         struct.cellVisibility.read(iprot);
                         struct.setCellVisibilityIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -859,7 +901,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.returnResults = iprot.readBool();
                         struct.setReturnResultsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -874,7 +916,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TIncrement struct)
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TIncrement struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TIncrement struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -886,9 +929,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TIncrement struct)
                 if (struct.columns != null) {
                   oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                   {
          -          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          -          for (TColumnIncrement _iter77 : struct.columns)
          -          {
          +          oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +              org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          +          for (TColumnIncrement _iter77 : struct.columns) {
                       _iter77.write(oprot);
                     }
                     oprot.writeListEnd();
          @@ -899,9 +942,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TIncrement struct)
                   if (struct.isSetAttributes()) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter78 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter78 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter78.getKey());
                         oprot.writeBinary(_iter78.getValue());
                       }
          @@ -935,22 +980,25 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TIncrement struct)
           
             }
           
          -  private static class TIncrementTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TIncrementTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TIncrementTupleScheme getScheme() {
                 return new TIncrementTupleScheme();
               }
             }
           
          -  private static class TIncrementTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TIncrementTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TIncrement struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TIncrement struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeBinary(struct.row);
                 {
                   oprot.writeI32(struct.columns.size());
          -        for (TColumnIncrement _iter79 : struct.columns)
          -        {
          +        for (TColumnIncrement _iter79 : struct.columns) {
                     _iter79.write(oprot);
                   }
                 }
          @@ -971,8 +1019,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TIncrement struct)
                 if (struct.isSetAttributes()) {
                   {
                     oprot.writeI32(struct.attributes.size());
          -          for (java.util.Map.Entry _iter80 : struct.attributes.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter80 : struct.attributes
          +              .entrySet()) {
                       oprot.writeBinary(_iter80.getKey());
                       oprot.writeBinary(_iter80.getValue());
                     }
          @@ -990,16 +1038,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TIncrement struct)
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TIncrement struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TIncrement struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.row = iprot.readBinary();
                 struct.setRowIsSet(true);
                 {
          -        org.apache.thrift.protocol.TList _list81 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +        org.apache.thrift.protocol.TList _list81 =
          +            iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                   struct.columns = new java.util.ArrayList(_list81.size);
          -        @org.apache.thrift.annotation.Nullable TColumnIncrement _elem82;
          -        for (int _i83 = 0; _i83 < _list81.size; ++_i83)
          -        {
          +        @org.apache.thrift.annotation.Nullable
          +        TColumnIncrement _elem82;
          +        for (int _i83 = 0; _i83 < _list81.size; ++_i83) {
                     _elem82 = new TColumnIncrement();
                     _elem82.read(iprot);
                     struct.columns.add(_elem82);
          @@ -1009,12 +1060,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TIncrement struct) t
                 java.util.BitSet incoming = iprot.readBitSet(4);
                 if (incoming.get(0)) {
                   {
          -          org.apache.thrift.protocol.TMap _map84 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -          struct.attributes = new java.util.HashMap(2*_map84.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key85;
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val86;
          -          for (int _i87 = 0; _i87 < _map84.size; ++_i87)
          -          {
          +          org.apache.thrift.protocol.TMap _map84 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +          struct.attributes =
          +              new java.util.HashMap(2 * _map84.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _key85;
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _val86;
          +          for (int _i87 = 0; _i87 < _map84.size; ++_i87) {
                       _key85 = iprot.readBinary();
                       _val86 = iprot.readBinary();
                       struct.attributes.put(_key85, _val86);
          @@ -1023,7 +1077,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TIncrement struct) t
                   struct.setAttributesIsSet(true);
                 }
                 if (incoming.get(1)) {
          -        struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
          +        struct.durability =
          +            org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
                   struct.setDurabilityIsSet(true);
                 }
                 if (incoming.get(2)) {
          @@ -1038,8 +1093,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TIncrement struct) t
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TKeepDeletedCells.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TKeepDeletedCells.java
          index dd723fd73242..18c93ca7ab1c 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TKeepDeletedCells.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TKeepDeletedCells.java
          @@ -1,34 +1,41 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
           /**
          - * Thrift wrapper around
          - * org.apache.hadoop.hbase.KeepDeletedCells
          + * Thrift wrapper around org.apache.hadoop.hbase.KeepDeletedCells
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TKeepDeletedCells implements org.apache.thrift.TEnum {
             /**
              * Deleted Cells are not retained.
              */
             FALSE(0),
             /**
          -   * Deleted Cells are retained until they are removed by other means
          -   * such TTL or VERSIONS.
          -   * If no TTL is specified or no new versions of delete cells are
          -   * written, they are retained forever.
          +   * Deleted Cells are retained until they are removed by other means such TTL or VERSIONS. If no
          +   * TTL is specified or no new versions of delete cells are written, they are retained forever.
              */
             TRUE(1),
             /**
          -   * Deleted Cells are retained until the delete marker expires due to TTL.
          -   * This is useful when TTL is combined with MIN_VERSIONS and one
          -   * wants to keep a minimum number of versions around but at the same
          -   * time remove deleted cells after the TTL.
          +   * Deleted Cells are retained until the delete marker expires due to TTL. This is useful when TTL
          +   * is combined with MIN_VERSIONS and one wants to keep a minimum number of versions around but at
          +   * the same time remove deleted cells after the TTL.
              */
             TTL(2);
           
          @@ -50,7 +57,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TKeepDeletedCells findByValue(int value) { 
          +  public static TKeepDeletedCells findByValue(int value) {
               switch (value) {
                 case 0:
                   return FALSE;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogQueryFilter.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogQueryFilter.java
          index 49f7d5a4cd70..7d0b4af22147 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogQueryFilter.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogQueryFilter.java
          @@ -1,30 +1,60 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Thrift wrapper around
          - * org.apache.hadoop.hbase.client.LogQueryFilter
          + * Thrift wrapper around org.apache.hadoop.hbase.client.LogQueryFilter
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TLogQueryFilter implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TLogQueryFilter");
          -
          -  private static final org.apache.thrift.protocol.TField REGION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("regionName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField CLIENT_ADDRESS_FIELD_DESC = new org.apache.thrift.protocol.TField("clientAddress", org.apache.thrift.protocol.TType.STRING, (short)2);
          -  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3);
          -  private static final org.apache.thrift.protocol.TField USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("userName", org.apache.thrift.protocol.TType.STRING, (short)4);
          -  private static final org.apache.thrift.protocol.TField LIMIT_FIELD_DESC = new org.apache.thrift.protocol.TField("limit", org.apache.thrift.protocol.TType.I32, (short)5);
          -  private static final org.apache.thrift.protocol.TField LOG_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("logType", org.apache.thrift.protocol.TType.I32, (short)6);
          -  private static final org.apache.thrift.protocol.TField FILTER_BY_OPERATOR_FIELD_DESC = new org.apache.thrift.protocol.TField("filterByOperator", org.apache.thrift.protocol.TType.I32, (short)7);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TLogQueryFilterStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TLogQueryFilterTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TLogQueryFilter
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TLogQueryFilter");
          +
          +  private static final org.apache.thrift.protocol.TField REGION_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("regionName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField CLIENT_ADDRESS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("clientAddress",
          +          org.apache.thrift.protocol.TType.STRING, (short) 2);
          +  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField USER_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("userName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField LIMIT_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("limit", org.apache.thrift.protocol.TType.I32,
          +          (short) 5);
          +  private static final org.apache.thrift.protocol.TField LOG_TYPE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("logType", org.apache.thrift.protocol.TType.I32,
          +          (short) 6);
          +  private static final org.apache.thrift.protocol.TField FILTER_BY_OPERATOR_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("filterByOperator",
          +          org.apache.thrift.protocol.TType.I32, (short) 7);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TLogQueryFilterStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TLogQueryFilterTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.lang.String regionName; // optional
             public @org.apache.thrift.annotation.Nullable java.lang.String clientAddress; // optional
          @@ -32,35 +62,32 @@ public class TLogQueryFilter implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -73,7 +100,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // REGION_NAME
                     return REGION_NAME;
                   case 2: // CLIENT_ADDRESS
          @@ -94,12 +121,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -131,26 +158,49 @@ public java.lang.String getFieldName() {
             // isset id assignments
             private static final int __LIMIT_ISSET_ID = 0;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.REGION_NAME,_Fields.CLIENT_ADDRESS,_Fields.TABLE_NAME,_Fields.USER_NAME,_Fields.LIMIT,_Fields.LOG_TYPE,_Fields.FILTER_BY_OPERATOR};
          +  private static final _Fields optionals[] =
          +      { _Fields.REGION_NAME, _Fields.CLIENT_ADDRESS, _Fields.TABLE_NAME, _Fields.USER_NAME,
          +          _Fields.LIMIT, _Fields.LOG_TYPE, _Fields.FILTER_BY_OPERATOR };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.REGION_NAME, new org.apache.thrift.meta_data.FieldMetaData("regionName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.CLIENT_ADDRESS, new org.apache.thrift.meta_data.FieldMetaData("clientAddress", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.USER_NAME, new org.apache.thrift.meta_data.FieldMetaData("userName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.LIMIT, new org.apache.thrift.meta_data.FieldMetaData("limit", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.REGION_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("regionName",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.CLIENT_ADDRESS,
          +      new org.apache.thrift.meta_data.FieldMetaData("clientAddress",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.TABLE_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.USER_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("userName",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.LIMIT, new org.apache.thrift.meta_data.FieldMetaData("limit",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.LOG_TYPE, new org.apache.thrift.meta_data.FieldMetaData("logType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TLogType.class)));
          -    tmpMap.put(_Fields.FILTER_BY_OPERATOR, new org.apache.thrift.meta_data.FieldMetaData("filterByOperator", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TFilterByOperator.class)));
          +    tmpMap.put(_Fields.LOG_TYPE,
          +      new org.apache.thrift.meta_data.FieldMetaData("logType",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TLogType.class)));
          +    tmpMap.put(_Fields.FILTER_BY_OPERATOR,
          +      new org.apache.thrift.meta_data.FieldMetaData("filterByOperator",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TFilterByOperator.class)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TLogQueryFilter.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TLogQueryFilter.class,
          +      metaDataMap);
             }
           
             public TLogQueryFilter() {
          @@ -211,7 +261,8 @@ public java.lang.String getRegionName() {
               return this.regionName;
             }
           
          -  public TLogQueryFilter setRegionName(@org.apache.thrift.annotation.Nullable java.lang.String regionName) {
          +  public TLogQueryFilter
          +      setRegionName(@org.apache.thrift.annotation.Nullable java.lang.String regionName) {
               this.regionName = regionName;
               return this;
             }
          @@ -236,7 +287,8 @@ public java.lang.String getClientAddress() {
               return this.clientAddress;
             }
           
          -  public TLogQueryFilter setClientAddress(@org.apache.thrift.annotation.Nullable java.lang.String clientAddress) {
          +  public TLogQueryFilter
          +      setClientAddress(@org.apache.thrift.annotation.Nullable java.lang.String clientAddress) {
               this.clientAddress = clientAddress;
               return this;
             }
          @@ -261,7 +313,8 @@ public java.lang.String getTableName() {
               return this.tableName;
             }
           
          -  public TLogQueryFilter setTableName(@org.apache.thrift.annotation.Nullable java.lang.String tableName) {
          +  public TLogQueryFilter
          +      setTableName(@org.apache.thrift.annotation.Nullable java.lang.String tableName) {
               this.tableName = tableName;
               return this;
             }
          @@ -286,7 +339,8 @@ public java.lang.String getUserName() {
               return this.userName;
             }
           
          -  public TLogQueryFilter setUserName(@org.apache.thrift.annotation.Nullable java.lang.String userName) {
          +  public TLogQueryFilter
          +      setUserName(@org.apache.thrift.annotation.Nullable java.lang.String userName) {
               this.userName = userName;
               return this;
             }
          @@ -326,11 +380,11 @@ public boolean isSetLimit() {
             }
           
             public void setLimitIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __LIMIT_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __LIMIT_ISSET_ID, value);
             }
           
             /**
          -   * 
              * @see TLogType
              */
             @org.apache.thrift.annotation.Nullable
          @@ -339,7 +393,6 @@ public TLogType getLogType() {
             }
           
             /**
          -   * 
              * @see TLogType
              */
             public TLogQueryFilter setLogType(@org.apache.thrift.annotation.Nullable TLogType logType) {
          @@ -363,7 +416,6 @@ public void setLogTypeIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TFilterByOperator
              */
             @org.apache.thrift.annotation.Nullable
          @@ -372,10 +424,10 @@ public TFilterByOperator getFilterByOperator() {
             }
           
             /**
          -   * 
              * @see TFilterByOperator
              */
          -  public TLogQueryFilter setFilterByOperator(@org.apache.thrift.annotation.Nullable TFilterByOperator filterByOperator) {
          +  public TLogQueryFilter setFilterByOperator(
          +      @org.apache.thrift.annotation.Nullable TFilterByOperator filterByOperator) {
               this.filterByOperator = filterByOperator;
               return this;
             }
          @@ -384,7 +436,9 @@ public void unsetFilterByOperator() {
               this.filterByOperator = null;
             }
           
          -  /** Returns true if field filterByOperator is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field filterByOperator is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetFilterByOperator() {
               return this.filterByOperator != null;
             }
          @@ -395,63 +449,64 @@ public void setFilterByOperatorIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case REGION_NAME:
          -      if (value == null) {
          -        unsetRegionName();
          -      } else {
          -        setRegionName((java.lang.String)value);
          -      }
          -      break;
          +      case REGION_NAME:
          +        if (value == null) {
          +          unsetRegionName();
          +        } else {
          +          setRegionName((java.lang.String) value);
          +        }
          +        break;
           
          -    case CLIENT_ADDRESS:
          -      if (value == null) {
          -        unsetClientAddress();
          -      } else {
          -        setClientAddress((java.lang.String)value);
          -      }
          -      break;
          +      case CLIENT_ADDRESS:
          +        if (value == null) {
          +          unsetClientAddress();
          +        } else {
          +          setClientAddress((java.lang.String) value);
          +        }
          +        break;
           
          -    case TABLE_NAME:
          -      if (value == null) {
          -        unsetTableName();
          -      } else {
          -        setTableName((java.lang.String)value);
          -      }
          -      break;
          +      case TABLE_NAME:
          +        if (value == null) {
          +          unsetTableName();
          +        } else {
          +          setTableName((java.lang.String) value);
          +        }
          +        break;
           
          -    case USER_NAME:
          -      if (value == null) {
          -        unsetUserName();
          -      } else {
          -        setUserName((java.lang.String)value);
          -      }
          -      break;
          +      case USER_NAME:
          +        if (value == null) {
          +          unsetUserName();
          +        } else {
          +          setUserName((java.lang.String) value);
          +        }
          +        break;
           
          -    case LIMIT:
          -      if (value == null) {
          -        unsetLimit();
          -      } else {
          -        setLimit((java.lang.Integer)value);
          -      }
          -      break;
          +      case LIMIT:
          +        if (value == null) {
          +          unsetLimit();
          +        } else {
          +          setLimit((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case LOG_TYPE:
          -      if (value == null) {
          -        unsetLogType();
          -      } else {
          -        setLogType((TLogType)value);
          -      }
          -      break;
          +      case LOG_TYPE:
          +        if (value == null) {
          +          unsetLogType();
          +        } else {
          +          setLogType((TLogType) value);
          +        }
          +        break;
           
          -    case FILTER_BY_OPERATOR:
          -      if (value == null) {
          -        unsetFilterByOperator();
          -      } else {
          -        setFilterByOperator((TFilterByOperator)value);
          -      }
          -      break;
          +      case FILTER_BY_OPERATOR:
          +        if (value == null) {
          +          unsetFilterByOperator();
          +        } else {
          +          setFilterByOperator((TFilterByOperator) value);
          +        }
          +        break;
           
               }
             }
          @@ -459,130 +514,116 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case REGION_NAME:
          -      return getRegionName();
          +      case REGION_NAME:
          +        return getRegionName();
           
          -    case CLIENT_ADDRESS:
          -      return getClientAddress();
          +      case CLIENT_ADDRESS:
          +        return getClientAddress();
           
          -    case TABLE_NAME:
          -      return getTableName();
          +      case TABLE_NAME:
          +        return getTableName();
           
          -    case USER_NAME:
          -      return getUserName();
          +      case USER_NAME:
          +        return getUserName();
           
          -    case LIMIT:
          -      return getLimit();
          +      case LIMIT:
          +        return getLimit();
           
          -    case LOG_TYPE:
          -      return getLogType();
          +      case LOG_TYPE:
          +        return getLogType();
           
          -    case FILTER_BY_OPERATOR:
          -      return getFilterByOperator();
          +      case FILTER_BY_OPERATOR:
          +        return getFilterByOperator();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case REGION_NAME:
          -      return isSetRegionName();
          -    case CLIENT_ADDRESS:
          -      return isSetClientAddress();
          -    case TABLE_NAME:
          -      return isSetTableName();
          -    case USER_NAME:
          -      return isSetUserName();
          -    case LIMIT:
          -      return isSetLimit();
          -    case LOG_TYPE:
          -      return isSetLogType();
          -    case FILTER_BY_OPERATOR:
          -      return isSetFilterByOperator();
          +      case REGION_NAME:
          +        return isSetRegionName();
          +      case CLIENT_ADDRESS:
          +        return isSetClientAddress();
          +      case TABLE_NAME:
          +        return isSetTableName();
          +      case USER_NAME:
          +        return isSetUserName();
          +      case LIMIT:
          +        return isSetLimit();
          +      case LOG_TYPE:
          +        return isSetLogType();
          +      case FILTER_BY_OPERATOR:
          +        return isSetFilterByOperator();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TLogQueryFilter)
          -      return this.equals((TLogQueryFilter)that);
          +    if (that instanceof TLogQueryFilter) return this.equals((TLogQueryFilter) that);
               return false;
             }
           
             public boolean equals(TLogQueryFilter that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_regionName = true && this.isSetRegionName();
               boolean that_present_regionName = true && that.isSetRegionName();
               if (this_present_regionName || that_present_regionName) {
          -      if (!(this_present_regionName && that_present_regionName))
          -        return false;
          -      if (!this.regionName.equals(that.regionName))
          -        return false;
          +      if (!(this_present_regionName && that_present_regionName)) return false;
          +      if (!this.regionName.equals(that.regionName)) return false;
               }
           
               boolean this_present_clientAddress = true && this.isSetClientAddress();
               boolean that_present_clientAddress = true && that.isSetClientAddress();
               if (this_present_clientAddress || that_present_clientAddress) {
          -      if (!(this_present_clientAddress && that_present_clientAddress))
          -        return false;
          -      if (!this.clientAddress.equals(that.clientAddress))
          -        return false;
          +      if (!(this_present_clientAddress && that_present_clientAddress)) return false;
          +      if (!this.clientAddress.equals(that.clientAddress)) return false;
               }
           
               boolean this_present_tableName = true && this.isSetTableName();
               boolean that_present_tableName = true && that.isSetTableName();
               if (this_present_tableName || that_present_tableName) {
          -      if (!(this_present_tableName && that_present_tableName))
          -        return false;
          -      if (!this.tableName.equals(that.tableName))
          -        return false;
          +      if (!(this_present_tableName && that_present_tableName)) return false;
          +      if (!this.tableName.equals(that.tableName)) return false;
               }
           
               boolean this_present_userName = true && this.isSetUserName();
               boolean that_present_userName = true && that.isSetUserName();
               if (this_present_userName || that_present_userName) {
          -      if (!(this_present_userName && that_present_userName))
          -        return false;
          -      if (!this.userName.equals(that.userName))
          -        return false;
          +      if (!(this_present_userName && that_present_userName)) return false;
          +      if (!this.userName.equals(that.userName)) return false;
               }
           
               boolean this_present_limit = true && this.isSetLimit();
               boolean that_present_limit = true && that.isSetLimit();
               if (this_present_limit || that_present_limit) {
          -      if (!(this_present_limit && that_present_limit))
          -        return false;
          -      if (this.limit != that.limit)
          -        return false;
          +      if (!(this_present_limit && that_present_limit)) return false;
          +      if (this.limit != that.limit) return false;
               }
           
               boolean this_present_logType = true && this.isSetLogType();
               boolean that_present_logType = true && that.isSetLogType();
               if (this_present_logType || that_present_logType) {
          -      if (!(this_present_logType && that_present_logType))
          -        return false;
          -      if (!this.logType.equals(that.logType))
          -        return false;
          +      if (!(this_present_logType && that_present_logType)) return false;
          +      if (!this.logType.equals(that.logType)) return false;
               }
           
               boolean this_present_filterByOperator = true && this.isSetFilterByOperator();
               boolean that_present_filterByOperator = true && that.isSetFilterByOperator();
               if (this_present_filterByOperator || that_present_filterByOperator) {
          -      if (!(this_present_filterByOperator && that_present_filterByOperator))
          -        return false;
          -      if (!this.filterByOperator.equals(that.filterByOperator))
          -        return false;
          +      if (!(this_present_filterByOperator && that_present_filterByOperator)) return false;
          +      if (!this.filterByOperator.equals(that.filterByOperator)) return false;
               }
           
               return true;
          @@ -593,32 +634,25 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetRegionName()) ? 131071 : 524287);
          -    if (isSetRegionName())
          -      hashCode = hashCode * 8191 + regionName.hashCode();
          +    if (isSetRegionName()) hashCode = hashCode * 8191 + regionName.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetClientAddress()) ? 131071 : 524287);
          -    if (isSetClientAddress())
          -      hashCode = hashCode * 8191 + clientAddress.hashCode();
          +    if (isSetClientAddress()) hashCode = hashCode * 8191 + clientAddress.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -    if (isSetTableName())
          -      hashCode = hashCode * 8191 + tableName.hashCode();
          +    if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetUserName()) ? 131071 : 524287);
          -    if (isSetUserName())
          -      hashCode = hashCode * 8191 + userName.hashCode();
          +    if (isSetUserName()) hashCode = hashCode * 8191 + userName.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetLimit()) ? 131071 : 524287);
          -    if (isSetLimit())
          -      hashCode = hashCode * 8191 + limit;
          +    if (isSetLimit()) hashCode = hashCode * 8191 + limit;
           
               hashCode = hashCode * 8191 + ((isSetLogType()) ? 131071 : 524287);
          -    if (isSetLogType())
          -      hashCode = hashCode * 8191 + logType.getValue();
          +    if (isSetLogType()) hashCode = hashCode * 8191 + logType.getValue();
           
               hashCode = hashCode * 8191 + ((isSetFilterByOperator()) ? 131071 : 524287);
          -    if (isSetFilterByOperator())
          -      hashCode = hashCode * 8191 + filterByOperator.getValue();
          +    if (isSetFilterByOperator()) hashCode = hashCode * 8191 + filterByOperator.getValue();
           
               return hashCode;
             }
          @@ -646,7 +680,8 @@ public int compareTo(TLogQueryFilter other) {
                 return lastComparison;
               }
               if (isSetClientAddress()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.clientAddress, other.clientAddress);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.clientAddress, other.clientAddress);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -691,12 +726,14 @@ public int compareTo(TLogQueryFilter other) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetFilterByOperator(), other.isSetFilterByOperator());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetFilterByOperator(), other.isSetFilterByOperator());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetFilterByOperator()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filterByOperator, other.filterByOperator);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.filterByOperator, other.filterByOperator);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -713,7 +750,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -798,37 +836,43 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TLogQueryFilterStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TLogQueryFilterStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TLogQueryFilterStandardScheme getScheme() {
                 return new TLogQueryFilterStandardScheme();
               }
             }
           
          -  private static class TLogQueryFilterStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TLogQueryFilterStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TLogQueryFilter struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TLogQueryFilter struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -836,7 +880,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TLogQueryFilter str
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.regionName = iprot.readString();
                         struct.setRegionNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -844,7 +888,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TLogQueryFilter str
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.clientAddress = iprot.readString();
                         struct.setClientAddressIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -852,7 +896,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TLogQueryFilter str
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.tableName = iprot.readString();
                         struct.setTableNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -860,7 +904,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TLogQueryFilter str
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.userName = iprot.readString();
                         struct.setUserNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -868,23 +912,25 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TLogQueryFilter str
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.limit = iprot.readI32();
                         struct.setLimitIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 6: // LOG_TYPE
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.logType = org.apache.hadoop.hbase.thrift2.generated.TLogType.findByValue(iprot.readI32());
          +              struct.logType =
          +                  org.apache.hadoop.hbase.thrift2.generated.TLogType.findByValue(iprot.readI32());
                         struct.setLogTypeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 7: // FILTER_BY_OPERATOR
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.filterByOperator = org.apache.hadoop.hbase.thrift2.generated.TFilterByOperator.findByValue(iprot.readI32());
          +              struct.filterByOperator = org.apache.hadoop.hbase.thrift2.generated.TFilterByOperator
          +                  .findByValue(iprot.readI32());
                         struct.setFilterByOperatorIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -899,7 +945,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TLogQueryFilter str
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TLogQueryFilter struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TLogQueryFilter struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -956,17 +1003,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TLogQueryFilter st
           
             }
           
          -  private static class TLogQueryFilterTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TLogQueryFilterTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TLogQueryFilterTupleScheme getScheme() {
                 return new TLogQueryFilterTupleScheme();
               }
             }
           
          -  private static class TLogQueryFilterTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TLogQueryFilterTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TLogQueryFilter struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TLogQueryFilter struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetRegionName()) {
                   optionals.set(0);
          @@ -1014,8 +1065,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TLogQueryFilter str
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TLogQueryFilter struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TLogQueryFilter struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(7);
                 if (incoming.get(0)) {
                   struct.regionName = iprot.readString();
          @@ -1038,18 +1091,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TLogQueryFilter stru
                   struct.setLimitIsSet(true);
                 }
                 if (incoming.get(5)) {
          -        struct.logType = org.apache.hadoop.hbase.thrift2.generated.TLogType.findByValue(iprot.readI32());
          +        struct.logType =
          +            org.apache.hadoop.hbase.thrift2.generated.TLogType.findByValue(iprot.readI32());
                   struct.setLogTypeIsSet(true);
                 }
                 if (incoming.get(6)) {
          -        struct.filterByOperator = org.apache.hadoop.hbase.thrift2.generated.TFilterByOperator.findByValue(iprot.readI32());
          +        struct.filterByOperator = org.apache.hadoop.hbase.thrift2.generated.TFilterByOperator
          +            .findByValue(iprot.readI32());
                   struct.setFilterByOperatorIsSet(true);
                 }
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogType.java
          index a353374d1004..eb7829ddf4bf 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogType.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TLogType.java
          @@ -1,16 +1,26 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TLogType implements org.apache.thrift.TEnum {
          -  SLOW_LOG(1),
          -  LARGE_LOG(2);
          +  SLOW_LOG(1), LARGE_LOG(2);
           
             private final int value;
           
          @@ -30,7 +40,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TLogType findByValue(int value) { 
          +  public static TLogType findByValue(int value) {
               switch (value) {
                 case 1:
                   return SLOW_LOG;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TMutation.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TMutation.java
          index 6039ac39c02c..d6c3017d1260 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TMutation.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TMutation.java
          @@ -1,27 +1,47 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * Atomic mutation for the specified row. It can be either Put or Delete.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public class TMutation extends org.apache.thrift.TUnion {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TMutation");
          -  private static final org.apache.thrift.protocol.TField PUT_FIELD_DESC = new org.apache.thrift.protocol.TField("put", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -  private static final org.apache.thrift.protocol.TField DELETE_SINGLE_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteSingle", org.apache.thrift.protocol.TType.STRUCT, (short)2);
          -
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TMutation");
          +  private static final org.apache.thrift.protocol.TField PUT_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("put", org.apache.thrift.protocol.TType.STRUCT,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField DELETE_SINGLE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("deleteSingle", org.apache.thrift.protocol.TType.STRUCT,
          +          (short) 2);
          +
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    PUT((short)1, "put"),
          -    DELETE_SINGLE((short)2, "deleteSingle");
          +    PUT((short) 1, "put"), DELETE_SINGLE((short) 2, "deleteSingle");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -34,7 +54,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // PUT
                     return PUT;
                   case 2: // DELETE_SINGLE
          @@ -45,12 +65,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -81,11 +101,18 @@ public java.lang.String getFieldName() {
           
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.PUT, new org.apache.thrift.meta_data.FieldMetaData("put", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPut.class)));
          -    tmpMap.put(_Fields.DELETE_SINGLE, new org.apache.thrift.meta_data.FieldMetaData("deleteSingle", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TDelete.class)));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.PUT,
          +      new org.apache.thrift.meta_data.FieldMetaData("put",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TPut.class)));
          +    tmpMap.put(_Fields.DELETE_SINGLE,
          +      new org.apache.thrift.meta_data.FieldMetaData("deleteSingle",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TDelete.class)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TMutation.class, metaDataMap);
             }
          @@ -101,6 +128,7 @@ public TMutation(_Fields setField, java.lang.Object value) {
             public TMutation(TMutation other) {
               super(other);
             }
          +
             public TMutation deepCopy() {
               return new TMutation(this);
             }
          @@ -117,27 +145,32 @@ public static TMutation deleteSingle(TDelete value) {
               return x;
             }
           
          -
             @Override
          -  protected void checkType(_Fields setField, java.lang.Object value) throws java.lang.ClassCastException {
          +  protected void checkType(_Fields setField, java.lang.Object value)
          +      throws java.lang.ClassCastException {
               switch (setField) {
                 case PUT:
                   if (value instanceof TPut) {
                     break;
                   }
          -        throw new java.lang.ClassCastException("Was expecting value of type TPut for field 'put', but got " + value.getClass().getSimpleName());
          +        throw new java.lang.ClassCastException(
          +            "Was expecting value of type TPut for field 'put', but got "
          +                + value.getClass().getSimpleName());
                 case DELETE_SINGLE:
                   if (value instanceof TDelete) {
                     break;
                   }
          -        throw new java.lang.ClassCastException("Was expecting value of type TDelete for field 'deleteSingle', but got " + value.getClass().getSimpleName());
          +        throw new java.lang.ClassCastException(
          +            "Was expecting value of type TDelete for field 'deleteSingle', but got "
          +                + value.getClass().getSimpleName());
                 default:
                   throw new java.lang.IllegalArgumentException("Unknown field id " + setField);
               }
             }
           
             @Override
          -  protected java.lang.Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException {
          +  protected java.lang.Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot,
          +      org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException {
               _Fields setField = _Fields.findByThriftId(field.id);
               if (setField != null) {
                 switch (setField) {
          @@ -162,7 +195,8 @@ protected java.lang.Object standardSchemeReadValue(org.apache.thrift.protocol.TP
                       return null;
                     }
                   default:
          -          throw new java.lang.IllegalStateException("setField wasn't null, but didn't match any of the case statements!");
          +          throw new java.lang.IllegalStateException(
          +              "setField wasn't null, but didn't match any of the case statements!");
                 }
               } else {
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
          @@ -171,23 +205,26 @@ protected java.lang.Object standardSchemeReadValue(org.apache.thrift.protocol.TP
             }
           
             @Override
          -  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               switch (setField_) {
                 case PUT:
          -        TPut put = (TPut)value_;
          +        TPut put = (TPut) value_;
                   put.write(oprot);
                   return;
                 case DELETE_SINGLE:
          -        TDelete deleteSingle = (TDelete)value_;
          +        TDelete deleteSingle = (TDelete) value_;
                   deleteSingle.write(oprot);
                   return;
                 default:
          -        throw new java.lang.IllegalStateException("Cannot write union with unknown field " + setField_);
          +        throw new java.lang.IllegalStateException(
          +            "Cannot write union with unknown field " + setField_);
               }
             }
           
             @Override
          -  protected java.lang.Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException {
          +  protected java.lang.Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot,
          +      short fieldID) throws org.apache.thrift.TException {
               _Fields setField = _Fields.findByThriftId(fieldID);
               if (setField != null) {
                 switch (setField) {
          @@ -202,26 +239,30 @@ protected java.lang.Object tupleSchemeReadValue(org.apache.thrift.protocol.TProt
                     deleteSingle.read(iprot);
                     return deleteSingle;
                   default:
          -          throw new java.lang.IllegalStateException("setField wasn't null, but didn't match any of the case statements!");
          +          throw new java.lang.IllegalStateException(
          +              "setField wasn't null, but didn't match any of the case statements!");
                 }
               } else {
          -      throw new org.apache.thrift.protocol.TProtocolException("Couldn't find a field with field id " + fieldID);
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Couldn't find a field with field id " + fieldID);
               }
             }
           
             @Override
          -  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               switch (setField_) {
                 case PUT:
          -        TPut put = (TPut)value_;
          +        TPut put = (TPut) value_;
                   put.write(oprot);
                   return;
                 case DELETE_SINGLE:
          -        TDelete deleteSingle = (TDelete)value_;
          +        TDelete deleteSingle = (TDelete) value_;
                   deleteSingle.write(oprot);
                   return;
                 default:
          -        throw new java.lang.IllegalStateException("Cannot write union with unknown field " + setField_);
          +        throw new java.lang.IllegalStateException(
          +            "Cannot write union with unknown field " + setField_);
               }
             }
           
          @@ -252,65 +293,67 @@ public _Fields fieldForId(int fieldId) {
               return _Fields.findByThriftId(fieldId);
             }
           
          -
             public TPut getPut() {
               if (getSetField() == _Fields.PUT) {
          -      return (TPut)getFieldValue();
          +      return (TPut) getFieldValue();
               } else {
          -      throw new java.lang.RuntimeException("Cannot get field 'put' because union is currently set to " + getFieldDesc(getSetField()).name);
          +      throw new java.lang.RuntimeException(
          +          "Cannot get field 'put' because union is currently set to "
          +              + getFieldDesc(getSetField()).name);
               }
             }
           
             public void setPut(TPut value) {
               setField_ = _Fields.PUT;
          -    value_ = java.util.Objects.requireNonNull(value,"_Fields.PUT");
          +    value_ = java.util.Objects.requireNonNull(value, "_Fields.PUT");
             }
           
             public TDelete getDeleteSingle() {
               if (getSetField() == _Fields.DELETE_SINGLE) {
          -      return (TDelete)getFieldValue();
          +      return (TDelete) getFieldValue();
               } else {
          -      throw new java.lang.RuntimeException("Cannot get field 'deleteSingle' because union is currently set to " + getFieldDesc(getSetField()).name);
          +      throw new java.lang.RuntimeException(
          +          "Cannot get field 'deleteSingle' because union is currently set to "
          +              + getFieldDesc(getSetField()).name);
               }
             }
           
             public void setDeleteSingle(TDelete value) {
               setField_ = _Fields.DELETE_SINGLE;
          -    value_ = java.util.Objects.requireNonNull(value,"_Fields.DELETE_SINGLE");
          +    value_ = java.util.Objects.requireNonNull(value, "_Fields.DELETE_SINGLE");
             }
           
             public boolean isSetPut() {
               return setField_ == _Fields.PUT;
             }
           
          -
             public boolean isSetDeleteSingle() {
               return setField_ == _Fields.DELETE_SINGLE;
             }
           
          -
             public boolean equals(java.lang.Object other) {
               if (other instanceof TMutation) {
          -      return equals((TMutation)other);
          +      return equals((TMutation) other);
               } else {
                 return false;
               }
             }
           
             public boolean equals(TMutation other) {
          -    return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue());
          +    return other != null && getSetField() == other.getSetField()
          +        && getFieldValue().equals(other.getFieldValue());
             }
           
             @Override
             public int compareTo(TMutation other) {
          -    int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField());
          +    int lastComparison =
          +        org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField());
               if (lastComparison == 0) {
                 return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue());
               }
               return lastComparison;
             }
           
          -
             @Override
             public int hashCode() {
               java.util.List list = new java.util.ArrayList();
          @@ -320,29 +363,31 @@ public int hashCode() {
                 list.add(setField.getThriftFieldId());
                 java.lang.Object value = getFieldValue();
                 if (value instanceof org.apache.thrift.TEnum) {
          -        list.add(((org.apache.thrift.TEnum)getFieldValue()).getValue());
          +        list.add(((org.apache.thrift.TEnum) getFieldValue()).getValue());
                 } else {
                   list.add(value);
                 }
               }
               return list.hashCode();
             }
          +
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -
           }
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TNamespaceDescriptor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TNamespaceDescriptor.java
          index ad79d1f7117f..346ba0ea4bef 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TNamespaceDescriptor.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TNamespaceDescriptor.java
          @@ -1,35 +1,58 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Thrift wrapper around
          - * org.apache.hadoop.hbase.NamespaceDescriptor
          + * Thrift wrapper around org.apache.hadoop.hbase.NamespaceDescriptor
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TNamespaceDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TNamespaceDescriptor");
          -
          -  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField CONFIGURATION_FIELD_DESC = new org.apache.thrift.protocol.TField("configuration", org.apache.thrift.protocol.TType.MAP, (short)2);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TNamespaceDescriptorStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TNamespaceDescriptorTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TNamespaceDescriptor
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TNamespaceDescriptor");
          +
          +  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField CONFIGURATION_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("configuration", org.apache.thrift.protocol.TType.MAP,
          +          (short) 2);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TNamespaceDescriptorStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TNamespaceDescriptorTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.lang.String name; // required
          -  public @org.apache.thrift.annotation.Nullable java.util.Map configuration; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map configuration; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    NAME((short)1, "name"),
          -    CONFIGURATION((short)2, "configuration");
          +    NAME((short) 1, "name"), CONFIGURATION((short) 2, "configuration");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -42,7 +65,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // NAME
                     return NAME;
                   case 2: // CONFIGURATION
          @@ -53,12 +76,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -88,26 +111,33 @@ public java.lang.String getFieldName() {
             }
           
             // isset id assignments
          -  private static final _Fields optionals[] = {_Fields.CONFIGURATION};
          +  private static final _Fields optionals[] = { _Fields.CONFIGURATION };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.CONFIGURATION, new org.apache.thrift.meta_data.FieldMetaData("configuration", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("name",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.CONFIGURATION,
          +      new org.apache.thrift.meta_data.FieldMetaData("configuration",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING),
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING))));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TNamespaceDescriptor.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TNamespaceDescriptor.class,
          +      metaDataMap);
             }
           
             public TNamespaceDescriptor() {
             }
           
          -  public TNamespaceDescriptor(
          -    java.lang.String name)
          -  {
          +  public TNamespaceDescriptor(java.lang.String name) {
               this();
               this.name = name;
             }
          @@ -120,7 +150,8 @@ public TNamespaceDescriptor(TNamespaceDescriptor other) {
                 this.name = other.name;
               }
               if (other.isSetConfiguration()) {
          -      java.util.Map __this__configuration = new java.util.HashMap(other.configuration);
          +      java.util.Map __this__configuration =
          +          new java.util.HashMap(other.configuration);
                 this.configuration = __this__configuration;
               }
             }
          @@ -140,7 +171,8 @@ public java.lang.String getName() {
               return this.name;
             }
           
          -  public TNamespaceDescriptor setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
          +  public TNamespaceDescriptor
          +      setName(@org.apache.thrift.annotation.Nullable java.lang.String name) {
               this.name = name;
               return this;
             }
          @@ -166,17 +198,18 @@ public int getConfigurationSize() {
           
             public void putToConfiguration(java.lang.String key, java.lang.String val) {
               if (this.configuration == null) {
          -      this.configuration = new java.util.HashMap();
          +      this.configuration = new java.util.HashMap();
               }
               this.configuration.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getConfiguration() {
          +  public java.util.Map getConfiguration() {
               return this.configuration;
             }
           
          -  public TNamespaceDescriptor setConfiguration(@org.apache.thrift.annotation.Nullable java.util.Map configuration) {
          +  public TNamespaceDescriptor setConfiguration(
          +      @org.apache.thrift.annotation.Nullable java.util.Map configuration) {
               this.configuration = configuration;
               return this;
             }
          @@ -196,23 +229,24 @@ public void setConfigurationIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case NAME:
          -      if (value == null) {
          -        unsetName();
          -      } else {
          -        setName((java.lang.String)value);
          -      }
          -      break;
          +      case NAME:
          +        if (value == null) {
          +          unsetName();
          +        } else {
          +          setName((java.lang.String) value);
          +        }
          +        break;
           
          -    case CONFIGURATION:
          -      if (value == null) {
          -        unsetConfiguration();
          -      } else {
          -        setConfiguration((java.util.Map)value);
          -      }
          -      break;
          +      case CONFIGURATION:
          +        if (value == null) {
          +          unsetConfiguration();
          +        } else {
          +          setConfiguration((java.util.Map) value);
          +        }
          +        break;
           
               }
             }
          @@ -220,60 +254,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case NAME:
          -      return getName();
          +      case NAME:
          +        return getName();
           
          -    case CONFIGURATION:
          -      return getConfiguration();
          +      case CONFIGURATION:
          +        return getConfiguration();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case NAME:
          -      return isSetName();
          -    case CONFIGURATION:
          -      return isSetConfiguration();
          +      case NAME:
          +        return isSetName();
          +      case CONFIGURATION:
          +        return isSetConfiguration();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TNamespaceDescriptor)
          -      return this.equals((TNamespaceDescriptor)that);
          +    if (that instanceof TNamespaceDescriptor) return this.equals((TNamespaceDescriptor) that);
               return false;
             }
           
             public boolean equals(TNamespaceDescriptor that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_name = true && this.isSetName();
               boolean that_present_name = true && that.isSetName();
               if (this_present_name || that_present_name) {
          -      if (!(this_present_name && that_present_name))
          -        return false;
          -      if (!this.name.equals(that.name))
          -        return false;
          +      if (!(this_present_name && that_present_name)) return false;
          +      if (!this.name.equals(that.name)) return false;
               }
           
               boolean this_present_configuration = true && this.isSetConfiguration();
               boolean that_present_configuration = true && that.isSetConfiguration();
               if (this_present_configuration || that_present_configuration) {
          -      if (!(this_present_configuration && that_present_configuration))
          -        return false;
          -      if (!this.configuration.equals(that.configuration))
          -        return false;
          +      if (!(this_present_configuration && that_present_configuration)) return false;
          +      if (!this.configuration.equals(that.configuration)) return false;
               }
           
               return true;
          @@ -284,12 +314,10 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287);
          -    if (isSetName())
          -      hashCode = hashCode * 8191 + name.hashCode();
          +    if (isSetName()) hashCode = hashCode * 8191 + name.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetConfiguration()) ? 131071 : 524287);
          -    if (isSetConfiguration())
          -      hashCode = hashCode * 8191 + configuration.hashCode();
          +    if (isSetConfiguration()) hashCode = hashCode * 8191 + configuration.hashCode();
           
               return hashCode;
             }
          @@ -317,7 +345,8 @@ public int compareTo(TNamespaceDescriptor other) {
                 return lastComparison;
               }
               if (isSetConfiguration()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.configuration, other.configuration);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.configuration, other.configuration);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -334,7 +363,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -367,42 +397,48 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (name == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'name' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TNamespaceDescriptorStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TNamespaceDescriptorStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TNamespaceDescriptorStandardScheme getScheme() {
                 return new TNamespaceDescriptorStandardScheme();
               }
             }
           
          -  private static class TNamespaceDescriptorStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TNamespaceDescriptorStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TNamespaceDescriptor struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TNamespaceDescriptor struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -410,7 +446,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TNamespaceDescripto
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.name = iprot.readString();
                         struct.setNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -418,11 +454,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TNamespaceDescripto
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map180 = iprot.readMapBegin();
          -                struct.configuration = new java.util.HashMap(2*_map180.size);
          -                @org.apache.thrift.annotation.Nullable java.lang.String _key181;
          -                @org.apache.thrift.annotation.Nullable java.lang.String _val182;
          -                for (int _i183 = 0; _i183 < _map180.size; ++_i183)
          -                {
          +                struct.configuration =
          +                    new java.util.HashMap(2 * _map180.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.lang.String _key181;
          +                @org.apache.thrift.annotation.Nullable
          +                java.lang.String _val182;
          +                for (int _i183 = 0; _i183 < _map180.size; ++_i183) {
                             _key181 = iprot.readString();
                             _val182 = iprot.readString();
                             struct.configuration.put(_key181, _val182);
          @@ -430,7 +468,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TNamespaceDescripto
                           iprot.readMapEnd();
                         }
                         struct.setConfigurationIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -445,7 +483,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TNamespaceDescripto
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TNamespaceDescriptor struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TNamespaceDescriptor struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -458,9 +497,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TNamespaceDescript
                   if (struct.isSetConfiguration()) {
                     oprot.writeFieldBegin(CONFIGURATION_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.configuration.size()));
          -            for (java.util.Map.Entry _iter184 : struct.configuration.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.configuration.size()));
          +            for (java.util.Map.Entry _iter184 : struct.configuration
          +                .entrySet()) {
                         oprot.writeString(_iter184.getKey());
                         oprot.writeString(_iter184.getValue());
                       }
          @@ -475,17 +516,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TNamespaceDescript
           
             }
           
          -  private static class TNamespaceDescriptorTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TNamespaceDescriptorTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TNamespaceDescriptorTupleScheme getScheme() {
                 return new TNamespaceDescriptorTupleScheme();
               }
             }
           
          -  private static class TNamespaceDescriptorTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TNamespaceDescriptorTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TNamespaceDescriptor struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TNamespaceDescriptor struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeString(struct.name);
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetConfiguration()) {
          @@ -495,8 +540,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TNamespaceDescripto
                 if (struct.isSetConfiguration()) {
                   {
                     oprot.writeI32(struct.configuration.size());
          -          for (java.util.Map.Entry _iter185 : struct.configuration.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter185 : struct.configuration
          +              .entrySet()) {
                       oprot.writeString(_iter185.getKey());
                       oprot.writeString(_iter185.getValue());
                     }
          @@ -505,19 +550,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TNamespaceDescripto
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TNamespaceDescriptor struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TNamespaceDescriptor struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.name = iprot.readString();
                 struct.setNameIsSet(true);
                 java.util.BitSet incoming = iprot.readBitSet(1);
                 if (incoming.get(0)) {
                   {
          -          org.apache.thrift.protocol.TMap _map186 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -          struct.configuration = new java.util.HashMap(2*_map186.size);
          -          @org.apache.thrift.annotation.Nullable java.lang.String _key187;
          -          @org.apache.thrift.annotation.Nullable java.lang.String _val188;
          -          for (int _i189 = 0; _i189 < _map186.size; ++_i189)
          -          {
          +          org.apache.thrift.protocol.TMap _map186 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +          struct.configuration =
          +              new java.util.HashMap(2 * _map186.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.lang.String _key187;
          +          @org.apache.thrift.annotation.Nullable
          +          java.lang.String _val188;
          +          for (int _i189 = 0; _i189 < _map186.size; ++_i189) {
                       _key187 = iprot.readString();
                       _val188 = iprot.readString();
                       struct.configuration.put(_key187, _val188);
          @@ -528,8 +578,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TNamespaceDescriptor
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java
          index 4dcfb7561174..b66fbbc98bd5 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TOnlineLogRecord.java
          @@ -1,37 +1,81 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Thrift wrapper around
          - * org.apache.hadoop.hbase.client.OnlineLogRecordrd
          + * Thrift wrapper around org.apache.hadoop.hbase.client.OnlineLogRecordrd
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TOnlineLogRecord implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOnlineLogRecord");
          -
          -  private static final org.apache.thrift.protocol.TField START_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("startTime", org.apache.thrift.protocol.TType.I64, (short)1);
          -  private static final org.apache.thrift.protocol.TField PROCESSING_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("processingTime", org.apache.thrift.protocol.TType.I32, (short)2);
          -  private static final org.apache.thrift.protocol.TField QUEUE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("queueTime", org.apache.thrift.protocol.TType.I32, (short)3);
          -  private static final org.apache.thrift.protocol.TField RESPONSE_SIZE_FIELD_DESC = new org.apache.thrift.protocol.TField("responseSize", org.apache.thrift.protocol.TType.I64, (short)4);
          -  private static final org.apache.thrift.protocol.TField CLIENT_ADDRESS_FIELD_DESC = new org.apache.thrift.protocol.TField("clientAddress", org.apache.thrift.protocol.TType.STRING, (short)5);
          -  private static final org.apache.thrift.protocol.TField SERVER_CLASS_FIELD_DESC = new org.apache.thrift.protocol.TField("serverClass", org.apache.thrift.protocol.TType.STRING, (short)6);
          -  private static final org.apache.thrift.protocol.TField METHOD_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("methodName", org.apache.thrift.protocol.TType.STRING, (short)7);
          -  private static final org.apache.thrift.protocol.TField CALL_DETAILS_FIELD_DESC = new org.apache.thrift.protocol.TField("callDetails", org.apache.thrift.protocol.TType.STRING, (short)8);
          -  private static final org.apache.thrift.protocol.TField PARAM_FIELD_DESC = new org.apache.thrift.protocol.TField("param", org.apache.thrift.protocol.TType.STRING, (short)9);
          -  private static final org.apache.thrift.protocol.TField USER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("userName", org.apache.thrift.protocol.TType.STRING, (short)10);
          -  private static final org.apache.thrift.protocol.TField MULTI_GETS_COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("multiGetsCount", org.apache.thrift.protocol.TType.I32, (short)11);
          -  private static final org.apache.thrift.protocol.TField MULTI_MUTATIONS_COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("multiMutationsCount", org.apache.thrift.protocol.TType.I32, (short)12);
          -  private static final org.apache.thrift.protocol.TField MULTI_SERVICE_CALLS_FIELD_DESC = new org.apache.thrift.protocol.TField("multiServiceCalls", org.apache.thrift.protocol.TType.I32, (short)13);
          -  private static final org.apache.thrift.protocol.TField REGION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("regionName", org.apache.thrift.protocol.TType.STRING, (short)14);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TOnlineLogRecordStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TOnlineLogRecordTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TOnlineLogRecord
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TOnlineLogRecord");
          +
          +  private static final org.apache.thrift.protocol.TField START_TIME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("startTime", org.apache.thrift.protocol.TType.I64,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField PROCESSING_TIME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("processingTime", org.apache.thrift.protocol.TType.I32,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField QUEUE_TIME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("queueTime", org.apache.thrift.protocol.TType.I32,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField RESPONSE_SIZE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("responseSize", org.apache.thrift.protocol.TType.I64,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField CLIENT_ADDRESS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("clientAddress",
          +          org.apache.thrift.protocol.TType.STRING, (short) 5);
          +  private static final org.apache.thrift.protocol.TField SERVER_CLASS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("serverClass", org.apache.thrift.protocol.TType.STRING,
          +          (short) 6);
          +  private static final org.apache.thrift.protocol.TField METHOD_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("methodName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 7);
          +  private static final org.apache.thrift.protocol.TField CALL_DETAILS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("callDetails", org.apache.thrift.protocol.TType.STRING,
          +          (short) 8);
          +  private static final org.apache.thrift.protocol.TField PARAM_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("param", org.apache.thrift.protocol.TType.STRING,
          +          (short) 9);
          +  private static final org.apache.thrift.protocol.TField USER_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("userName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 10);
          +  private static final org.apache.thrift.protocol.TField MULTI_GETS_COUNT_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("multiGetsCount", org.apache.thrift.protocol.TType.I32,
          +          (short) 11);
          +  private static final org.apache.thrift.protocol.TField MULTI_MUTATIONS_COUNT_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("multiMutationsCount",
          +          org.apache.thrift.protocol.TType.I32, (short) 12);
          +  private static final org.apache.thrift.protocol.TField MULTI_SERVICE_CALLS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("multiServiceCalls",
          +          org.apache.thrift.protocol.TType.I32, (short) 13);
          +  private static final org.apache.thrift.protocol.TField REGION_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("regionName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 14);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TOnlineLogRecordStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TOnlineLogRecordTupleSchemeFactory();
           
             public long startTime; // required
             public int processingTime; // required
          @@ -48,24 +92,22 @@ public class TOnlineLogRecord implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +    START_TIME((short) 1, "startTime"), PROCESSING_TIME((short) 2, "processingTime"),
          +    QUEUE_TIME((short) 3, "queueTime"), RESPONSE_SIZE((short) 4, "responseSize"),
          +    CLIENT_ADDRESS((short) 5, "clientAddress"), SERVER_CLASS((short) 6, "serverClass"),
          +    METHOD_NAME((short) 7, "methodName"), CALL_DETAILS((short) 8, "callDetails"),
          +    PARAM((short) 9, "param"), USER_NAME((short) 10, "userName"),
          +    MULTI_GETS_COUNT((short) 11, "multiGetsCount"),
          +    MULTI_MUTATIONS_COUNT((short) 12, "multiMutationsCount"),
          +    MULTI_SERVICE_CALLS((short) 13, "multiServiceCalls"), REGION_NAME((short) 14, "regionName");
          +
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -78,7 +120,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // START_TIME
                     return START_TIME;
                   case 2: // PROCESSING_TIME
          @@ -113,12 +155,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -156,60 +198,79 @@ public java.lang.String getFieldName() {
             private static final int __MULTIMUTATIONSCOUNT_ISSET_ID = 5;
             private static final int __MULTISERVICECALLS_ISSET_ID = 6;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.REGION_NAME};
          +  private static final _Fields optionals[] = { _Fields.REGION_NAME };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.START_TIME, new org.apache.thrift.meta_data.FieldMetaData("startTime", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.START_TIME, new org.apache.thrift.meta_data.FieldMetaData("startTime",
          +        org.apache.thrift.TFieldRequirementType.REQUIRED,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -    tmpMap.put(_Fields.PROCESSING_TIME, new org.apache.thrift.meta_data.FieldMetaData("processingTime", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          +    tmpMap.put(_Fields.PROCESSING_TIME, new org.apache.thrift.meta_data.FieldMetaData(
          +        "processingTime", org.apache.thrift.TFieldRequirementType.REQUIRED,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.QUEUE_TIME, new org.apache.thrift.meta_data.FieldMetaData("queueTime", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          +    tmpMap.put(_Fields.QUEUE_TIME, new org.apache.thrift.meta_data.FieldMetaData("queueTime",
          +        org.apache.thrift.TFieldRequirementType.REQUIRED,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.RESPONSE_SIZE, new org.apache.thrift.meta_data.FieldMetaData("responseSize", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          +    tmpMap.put(_Fields.RESPONSE_SIZE, new org.apache.thrift.meta_data.FieldMetaData("responseSize",
          +        org.apache.thrift.TFieldRequirementType.REQUIRED,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -    tmpMap.put(_Fields.CLIENT_ADDRESS, new org.apache.thrift.meta_data.FieldMetaData("clientAddress", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.SERVER_CLASS, new org.apache.thrift.meta_data.FieldMetaData("serverClass", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.METHOD_NAME, new org.apache.thrift.meta_data.FieldMetaData("methodName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.CALL_DETAILS, new org.apache.thrift.meta_data.FieldMetaData("callDetails", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.PARAM, new org.apache.thrift.meta_data.FieldMetaData("param", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.USER_NAME, new org.apache.thrift.meta_data.FieldMetaData("userName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.MULTI_GETS_COUNT, new org.apache.thrift.meta_data.FieldMetaData("multiGetsCount", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          +    tmpMap.put(_Fields.CLIENT_ADDRESS,
          +      new org.apache.thrift.meta_data.FieldMetaData("clientAddress",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.SERVER_CLASS,
          +      new org.apache.thrift.meta_data.FieldMetaData("serverClass",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.METHOD_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("methodName",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.CALL_DETAILS,
          +      new org.apache.thrift.meta_data.FieldMetaData("callDetails",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.PARAM,
          +      new org.apache.thrift.meta_data.FieldMetaData("param",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.USER_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("userName",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.MULTI_GETS_COUNT, new org.apache.thrift.meta_data.FieldMetaData(
          +        "multiGetsCount", org.apache.thrift.TFieldRequirementType.REQUIRED,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.MULTI_MUTATIONS_COUNT, new org.apache.thrift.meta_data.FieldMetaData("multiMutationsCount", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          +    tmpMap.put(_Fields.MULTI_MUTATIONS_COUNT, new org.apache.thrift.meta_data.FieldMetaData(
          +        "multiMutationsCount", org.apache.thrift.TFieldRequirementType.REQUIRED,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.MULTI_SERVICE_CALLS, new org.apache.thrift.meta_data.FieldMetaData("multiServiceCalls", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          +    tmpMap.put(_Fields.MULTI_SERVICE_CALLS, new org.apache.thrift.meta_data.FieldMetaData(
          +        "multiServiceCalls", org.apache.thrift.TFieldRequirementType.REQUIRED,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.REGION_NAME, new org.apache.thrift.meta_data.FieldMetaData("regionName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.REGION_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("regionName",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TOnlineLogRecord.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TOnlineLogRecord.class,
          +      metaDataMap);
             }
           
             public TOnlineLogRecord() {
             }
           
          -  public TOnlineLogRecord(
          -    long startTime,
          -    int processingTime,
          -    int queueTime,
          -    long responseSize,
          -    java.lang.String clientAddress,
          -    java.lang.String serverClass,
          -    java.lang.String methodName,
          -    java.lang.String callDetails,
          -    java.lang.String param,
          -    java.lang.String userName,
          -    int multiGetsCount,
          -    int multiMutationsCount,
          -    int multiServiceCalls)
          -  {
          +  public TOnlineLogRecord(long startTime, int processingTime, int queueTime, long responseSize,
          +      java.lang.String clientAddress, java.lang.String serverClass, java.lang.String methodName,
          +      java.lang.String callDetails, java.lang.String param, java.lang.String userName,
          +      int multiGetsCount, int multiMutationsCount, int multiServiceCalls) {
               this();
               this.startTime = startTime;
               setStartTimeIsSet(true);
          @@ -308,7 +369,8 @@ public TOnlineLogRecord setStartTime(long startTime) {
             }
           
             public void unsetStartTime() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __STARTTIME_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __STARTTIME_ISSET_ID);
             }
           
             /** Returns true if field startTime is set (has been assigned a value) and false otherwise */
          @@ -317,7 +379,8 @@ public boolean isSetStartTime() {
             }
           
             public void setStartTimeIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __STARTTIME_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __STARTTIME_ISSET_ID, value);
             }
           
             public int getProcessingTime() {
          @@ -331,7 +394,8 @@ public TOnlineLogRecord setProcessingTime(int processingTime) {
             }
           
             public void unsetProcessingTime() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __PROCESSINGTIME_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __PROCESSINGTIME_ISSET_ID);
             }
           
             /** Returns true if field processingTime is set (has been assigned a value) and false otherwise */
          @@ -340,7 +404,8 @@ public boolean isSetProcessingTime() {
             }
           
             public void setProcessingTimeIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __PROCESSINGTIME_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __PROCESSINGTIME_ISSET_ID, value);
             }
           
             public int getQueueTime() {
          @@ -354,7 +419,8 @@ public TOnlineLogRecord setQueueTime(int queueTime) {
             }
           
             public void unsetQueueTime() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __QUEUETIME_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __QUEUETIME_ISSET_ID);
             }
           
             /** Returns true if field queueTime is set (has been assigned a value) and false otherwise */
          @@ -363,7 +429,8 @@ public boolean isSetQueueTime() {
             }
           
             public void setQueueTimeIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __QUEUETIME_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __QUEUETIME_ISSET_ID, value);
             }
           
             public long getResponseSize() {
          @@ -377,7 +444,8 @@ public TOnlineLogRecord setResponseSize(long responseSize) {
             }
           
             public void unsetResponseSize() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __RESPONSESIZE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __RESPONSESIZE_ISSET_ID);
             }
           
             /** Returns true if field responseSize is set (has been assigned a value) and false otherwise */
          @@ -386,7 +454,8 @@ public boolean isSetResponseSize() {
             }
           
             public void setResponseSizeIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __RESPONSESIZE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __RESPONSESIZE_ISSET_ID, value);
             }
           
             @org.apache.thrift.annotation.Nullable
          @@ -394,7 +463,8 @@ public java.lang.String getClientAddress() {
               return this.clientAddress;
             }
           
          -  public TOnlineLogRecord setClientAddress(@org.apache.thrift.annotation.Nullable java.lang.String clientAddress) {
          +  public TOnlineLogRecord
          +      setClientAddress(@org.apache.thrift.annotation.Nullable java.lang.String clientAddress) {
               this.clientAddress = clientAddress;
               return this;
             }
          @@ -419,7 +489,8 @@ public java.lang.String getServerClass() {
               return this.serverClass;
             }
           
          -  public TOnlineLogRecord setServerClass(@org.apache.thrift.annotation.Nullable java.lang.String serverClass) {
          +  public TOnlineLogRecord
          +      setServerClass(@org.apache.thrift.annotation.Nullable java.lang.String serverClass) {
               this.serverClass = serverClass;
               return this;
             }
          @@ -444,7 +515,8 @@ public java.lang.String getMethodName() {
               return this.methodName;
             }
           
          -  public TOnlineLogRecord setMethodName(@org.apache.thrift.annotation.Nullable java.lang.String methodName) {
          +  public TOnlineLogRecord
          +      setMethodName(@org.apache.thrift.annotation.Nullable java.lang.String methodName) {
               this.methodName = methodName;
               return this;
             }
          @@ -469,7 +541,8 @@ public java.lang.String getCallDetails() {
               return this.callDetails;
             }
           
          -  public TOnlineLogRecord setCallDetails(@org.apache.thrift.annotation.Nullable java.lang.String callDetails) {
          +  public TOnlineLogRecord
          +      setCallDetails(@org.apache.thrift.annotation.Nullable java.lang.String callDetails) {
               this.callDetails = callDetails;
               return this;
             }
          @@ -519,7 +592,8 @@ public java.lang.String getUserName() {
               return this.userName;
             }
           
          -  public TOnlineLogRecord setUserName(@org.apache.thrift.annotation.Nullable java.lang.String userName) {
          +  public TOnlineLogRecord
          +      setUserName(@org.apache.thrift.annotation.Nullable java.lang.String userName) {
               this.userName = userName;
               return this;
             }
          @@ -550,7 +624,8 @@ public TOnlineLogRecord setMultiGetsCount(int multiGetsCount) {
             }
           
             public void unsetMultiGetsCount() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MULTIGETSCOUNT_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MULTIGETSCOUNT_ISSET_ID);
             }
           
             /** Returns true if field multiGetsCount is set (has been assigned a value) and false otherwise */
          @@ -559,7 +634,8 @@ public boolean isSetMultiGetsCount() {
             }
           
             public void setMultiGetsCountIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MULTIGETSCOUNT_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MULTIGETSCOUNT_ISSET_ID, value);
             }
           
             public int getMultiMutationsCount() {
          @@ -573,16 +649,22 @@ public TOnlineLogRecord setMultiMutationsCount(int multiMutationsCount) {
             }
           
             public void unsetMultiMutationsCount() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MULTIMUTATIONSCOUNT_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MULTIMUTATIONSCOUNT_ISSET_ID);
             }
           
          -  /** Returns true if field multiMutationsCount is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field multiMutationsCount is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSetMultiMutationsCount() {
          -    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __MULTIMUTATIONSCOUNT_ISSET_ID);
          +    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield,
          +      __MULTIMUTATIONSCOUNT_ISSET_ID);
             }
           
             public void setMultiMutationsCountIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MULTIMUTATIONSCOUNT_ISSET_ID, value);
          +    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +      __MULTIMUTATIONSCOUNT_ISSET_ID, value);
             }
           
             public int getMultiServiceCalls() {
          @@ -596,16 +678,20 @@ public TOnlineLogRecord setMultiServiceCalls(int multiServiceCalls) {
             }
           
             public void unsetMultiServiceCalls() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MULTISERVICECALLS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MULTISERVICECALLS_ISSET_ID);
             }
           
          -  /** Returns true if field multiServiceCalls is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field multiServiceCalls is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetMultiServiceCalls() {
               return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __MULTISERVICECALLS_ISSET_ID);
             }
           
             public void setMultiServiceCallsIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MULTISERVICECALLS_ISSET_ID, value);
          +    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield,
          +      __MULTISERVICECALLS_ISSET_ID, value);
             }
           
             @org.apache.thrift.annotation.Nullable
          @@ -613,7 +699,8 @@ public java.lang.String getRegionName() {
               return this.regionName;
             }
           
          -  public TOnlineLogRecord setRegionName(@org.apache.thrift.annotation.Nullable java.lang.String regionName) {
          +  public TOnlineLogRecord
          +      setRegionName(@org.apache.thrift.annotation.Nullable java.lang.String regionName) {
               this.regionName = regionName;
               return this;
             }
          @@ -633,119 +720,120 @@ public void setRegionNameIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case START_TIME:
          -      if (value == null) {
          -        unsetStartTime();
          -      } else {
          -        setStartTime((java.lang.Long)value);
          -      }
          -      break;
          +      case START_TIME:
          +        if (value == null) {
          +          unsetStartTime();
          +        } else {
          +          setStartTime((java.lang.Long) value);
          +        }
          +        break;
           
          -    case PROCESSING_TIME:
          -      if (value == null) {
          -        unsetProcessingTime();
          -      } else {
          -        setProcessingTime((java.lang.Integer)value);
          -      }
          -      break;
          +      case PROCESSING_TIME:
          +        if (value == null) {
          +          unsetProcessingTime();
          +        } else {
          +          setProcessingTime((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case QUEUE_TIME:
          -      if (value == null) {
          -        unsetQueueTime();
          -      } else {
          -        setQueueTime((java.lang.Integer)value);
          -      }
          -      break;
          +      case QUEUE_TIME:
          +        if (value == null) {
          +          unsetQueueTime();
          +        } else {
          +          setQueueTime((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case RESPONSE_SIZE:
          -      if (value == null) {
          -        unsetResponseSize();
          -      } else {
          -        setResponseSize((java.lang.Long)value);
          -      }
          -      break;
          +      case RESPONSE_SIZE:
          +        if (value == null) {
          +          unsetResponseSize();
          +        } else {
          +          setResponseSize((java.lang.Long) value);
          +        }
          +        break;
           
          -    case CLIENT_ADDRESS:
          -      if (value == null) {
          -        unsetClientAddress();
          -      } else {
          -        setClientAddress((java.lang.String)value);
          -      }
          -      break;
          +      case CLIENT_ADDRESS:
          +        if (value == null) {
          +          unsetClientAddress();
          +        } else {
          +          setClientAddress((java.lang.String) value);
          +        }
          +        break;
           
          -    case SERVER_CLASS:
          -      if (value == null) {
          -        unsetServerClass();
          -      } else {
          -        setServerClass((java.lang.String)value);
          -      }
          -      break;
          +      case SERVER_CLASS:
          +        if (value == null) {
          +          unsetServerClass();
          +        } else {
          +          setServerClass((java.lang.String) value);
          +        }
          +        break;
           
          -    case METHOD_NAME:
          -      if (value == null) {
          -        unsetMethodName();
          -      } else {
          -        setMethodName((java.lang.String)value);
          -      }
          -      break;
          +      case METHOD_NAME:
          +        if (value == null) {
          +          unsetMethodName();
          +        } else {
          +          setMethodName((java.lang.String) value);
          +        }
          +        break;
           
          -    case CALL_DETAILS:
          -      if (value == null) {
          -        unsetCallDetails();
          -      } else {
          -        setCallDetails((java.lang.String)value);
          -      }
          -      break;
          +      case CALL_DETAILS:
          +        if (value == null) {
          +          unsetCallDetails();
          +        } else {
          +          setCallDetails((java.lang.String) value);
          +        }
          +        break;
           
          -    case PARAM:
          -      if (value == null) {
          -        unsetParam();
          -      } else {
          -        setParam((java.lang.String)value);
          -      }
          -      break;
          +      case PARAM:
          +        if (value == null) {
          +          unsetParam();
          +        } else {
          +          setParam((java.lang.String) value);
          +        }
          +        break;
           
          -    case USER_NAME:
          -      if (value == null) {
          -        unsetUserName();
          -      } else {
          -        setUserName((java.lang.String)value);
          -      }
          -      break;
          +      case USER_NAME:
          +        if (value == null) {
          +          unsetUserName();
          +        } else {
          +          setUserName((java.lang.String) value);
          +        }
          +        break;
           
          -    case MULTI_GETS_COUNT:
          -      if (value == null) {
          -        unsetMultiGetsCount();
          -      } else {
          -        setMultiGetsCount((java.lang.Integer)value);
          -      }
          -      break;
          +      case MULTI_GETS_COUNT:
          +        if (value == null) {
          +          unsetMultiGetsCount();
          +        } else {
          +          setMultiGetsCount((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case MULTI_MUTATIONS_COUNT:
          -      if (value == null) {
          -        unsetMultiMutationsCount();
          -      } else {
          -        setMultiMutationsCount((java.lang.Integer)value);
          -      }
          -      break;
          +      case MULTI_MUTATIONS_COUNT:
          +        if (value == null) {
          +          unsetMultiMutationsCount();
          +        } else {
          +          setMultiMutationsCount((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case MULTI_SERVICE_CALLS:
          -      if (value == null) {
          -        unsetMultiServiceCalls();
          -      } else {
          -        setMultiServiceCalls((java.lang.Integer)value);
          -      }
          -      break;
          +      case MULTI_SERVICE_CALLS:
          +        if (value == null) {
          +          unsetMultiServiceCalls();
          +        } else {
          +          setMultiServiceCalls((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case REGION_NAME:
          -      if (value == null) {
          -        unsetRegionName();
          -      } else {
          -        setRegionName((java.lang.String)value);
          -      }
          -      break;
          +      case REGION_NAME:
          +        if (value == null) {
          +          unsetRegionName();
          +        } else {
          +          setRegionName((java.lang.String) value);
          +        }
          +        break;
           
               }
             }
          @@ -753,228 +841,200 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case START_TIME:
          -      return getStartTime();
          +      case START_TIME:
          +        return getStartTime();
           
          -    case PROCESSING_TIME:
          -      return getProcessingTime();
          +      case PROCESSING_TIME:
          +        return getProcessingTime();
           
          -    case QUEUE_TIME:
          -      return getQueueTime();
          +      case QUEUE_TIME:
          +        return getQueueTime();
           
          -    case RESPONSE_SIZE:
          -      return getResponseSize();
          +      case RESPONSE_SIZE:
          +        return getResponseSize();
           
          -    case CLIENT_ADDRESS:
          -      return getClientAddress();
          +      case CLIENT_ADDRESS:
          +        return getClientAddress();
           
          -    case SERVER_CLASS:
          -      return getServerClass();
          +      case SERVER_CLASS:
          +        return getServerClass();
           
          -    case METHOD_NAME:
          -      return getMethodName();
          +      case METHOD_NAME:
          +        return getMethodName();
           
          -    case CALL_DETAILS:
          -      return getCallDetails();
          +      case CALL_DETAILS:
          +        return getCallDetails();
           
          -    case PARAM:
          -      return getParam();
          +      case PARAM:
          +        return getParam();
           
          -    case USER_NAME:
          -      return getUserName();
          +      case USER_NAME:
          +        return getUserName();
           
          -    case MULTI_GETS_COUNT:
          -      return getMultiGetsCount();
          +      case MULTI_GETS_COUNT:
          +        return getMultiGetsCount();
           
          -    case MULTI_MUTATIONS_COUNT:
          -      return getMultiMutationsCount();
          +      case MULTI_MUTATIONS_COUNT:
          +        return getMultiMutationsCount();
           
          -    case MULTI_SERVICE_CALLS:
          -      return getMultiServiceCalls();
          +      case MULTI_SERVICE_CALLS:
          +        return getMultiServiceCalls();
           
          -    case REGION_NAME:
          -      return getRegionName();
          +      case REGION_NAME:
          +        return getRegionName();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case START_TIME:
          -      return isSetStartTime();
          -    case PROCESSING_TIME:
          -      return isSetProcessingTime();
          -    case QUEUE_TIME:
          -      return isSetQueueTime();
          -    case RESPONSE_SIZE:
          -      return isSetResponseSize();
          -    case CLIENT_ADDRESS:
          -      return isSetClientAddress();
          -    case SERVER_CLASS:
          -      return isSetServerClass();
          -    case METHOD_NAME:
          -      return isSetMethodName();
          -    case CALL_DETAILS:
          -      return isSetCallDetails();
          -    case PARAM:
          -      return isSetParam();
          -    case USER_NAME:
          -      return isSetUserName();
          -    case MULTI_GETS_COUNT:
          -      return isSetMultiGetsCount();
          -    case MULTI_MUTATIONS_COUNT:
          -      return isSetMultiMutationsCount();
          -    case MULTI_SERVICE_CALLS:
          -      return isSetMultiServiceCalls();
          -    case REGION_NAME:
          -      return isSetRegionName();
          +      case START_TIME:
          +        return isSetStartTime();
          +      case PROCESSING_TIME:
          +        return isSetProcessingTime();
          +      case QUEUE_TIME:
          +        return isSetQueueTime();
          +      case RESPONSE_SIZE:
          +        return isSetResponseSize();
          +      case CLIENT_ADDRESS:
          +        return isSetClientAddress();
          +      case SERVER_CLASS:
          +        return isSetServerClass();
          +      case METHOD_NAME:
          +        return isSetMethodName();
          +      case CALL_DETAILS:
          +        return isSetCallDetails();
          +      case PARAM:
          +        return isSetParam();
          +      case USER_NAME:
          +        return isSetUserName();
          +      case MULTI_GETS_COUNT:
          +        return isSetMultiGetsCount();
          +      case MULTI_MUTATIONS_COUNT:
          +        return isSetMultiMutationsCount();
          +      case MULTI_SERVICE_CALLS:
          +        return isSetMultiServiceCalls();
          +      case REGION_NAME:
          +        return isSetRegionName();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TOnlineLogRecord)
          -      return this.equals((TOnlineLogRecord)that);
          +    if (that instanceof TOnlineLogRecord) return this.equals((TOnlineLogRecord) that);
               return false;
             }
           
             public boolean equals(TOnlineLogRecord that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_startTime = true;
               boolean that_present_startTime = true;
               if (this_present_startTime || that_present_startTime) {
          -      if (!(this_present_startTime && that_present_startTime))
          -        return false;
          -      if (this.startTime != that.startTime)
          -        return false;
          +      if (!(this_present_startTime && that_present_startTime)) return false;
          +      if (this.startTime != that.startTime) return false;
               }
           
               boolean this_present_processingTime = true;
               boolean that_present_processingTime = true;
               if (this_present_processingTime || that_present_processingTime) {
          -      if (!(this_present_processingTime && that_present_processingTime))
          -        return false;
          -      if (this.processingTime != that.processingTime)
          -        return false;
          +      if (!(this_present_processingTime && that_present_processingTime)) return false;
          +      if (this.processingTime != that.processingTime) return false;
               }
           
               boolean this_present_queueTime = true;
               boolean that_present_queueTime = true;
               if (this_present_queueTime || that_present_queueTime) {
          -      if (!(this_present_queueTime && that_present_queueTime))
          -        return false;
          -      if (this.queueTime != that.queueTime)
          -        return false;
          +      if (!(this_present_queueTime && that_present_queueTime)) return false;
          +      if (this.queueTime != that.queueTime) return false;
               }
           
               boolean this_present_responseSize = true;
               boolean that_present_responseSize = true;
               if (this_present_responseSize || that_present_responseSize) {
          -      if (!(this_present_responseSize && that_present_responseSize))
          -        return false;
          -      if (this.responseSize != that.responseSize)
          -        return false;
          +      if (!(this_present_responseSize && that_present_responseSize)) return false;
          +      if (this.responseSize != that.responseSize) return false;
               }
           
               boolean this_present_clientAddress = true && this.isSetClientAddress();
               boolean that_present_clientAddress = true && that.isSetClientAddress();
               if (this_present_clientAddress || that_present_clientAddress) {
          -      if (!(this_present_clientAddress && that_present_clientAddress))
          -        return false;
          -      if (!this.clientAddress.equals(that.clientAddress))
          -        return false;
          +      if (!(this_present_clientAddress && that_present_clientAddress)) return false;
          +      if (!this.clientAddress.equals(that.clientAddress)) return false;
               }
           
               boolean this_present_serverClass = true && this.isSetServerClass();
               boolean that_present_serverClass = true && that.isSetServerClass();
               if (this_present_serverClass || that_present_serverClass) {
          -      if (!(this_present_serverClass && that_present_serverClass))
          -        return false;
          -      if (!this.serverClass.equals(that.serverClass))
          -        return false;
          +      if (!(this_present_serverClass && that_present_serverClass)) return false;
          +      if (!this.serverClass.equals(that.serverClass)) return false;
               }
           
               boolean this_present_methodName = true && this.isSetMethodName();
               boolean that_present_methodName = true && that.isSetMethodName();
               if (this_present_methodName || that_present_methodName) {
          -      if (!(this_present_methodName && that_present_methodName))
          -        return false;
          -      if (!this.methodName.equals(that.methodName))
          -        return false;
          +      if (!(this_present_methodName && that_present_methodName)) return false;
          +      if (!this.methodName.equals(that.methodName)) return false;
               }
           
               boolean this_present_callDetails = true && this.isSetCallDetails();
               boolean that_present_callDetails = true && that.isSetCallDetails();
               if (this_present_callDetails || that_present_callDetails) {
          -      if (!(this_present_callDetails && that_present_callDetails))
          -        return false;
          -      if (!this.callDetails.equals(that.callDetails))
          -        return false;
          +      if (!(this_present_callDetails && that_present_callDetails)) return false;
          +      if (!this.callDetails.equals(that.callDetails)) return false;
               }
           
               boolean this_present_param = true && this.isSetParam();
               boolean that_present_param = true && that.isSetParam();
               if (this_present_param || that_present_param) {
          -      if (!(this_present_param && that_present_param))
          -        return false;
          -      if (!this.param.equals(that.param))
          -        return false;
          +      if (!(this_present_param && that_present_param)) return false;
          +      if (!this.param.equals(that.param)) return false;
               }
           
               boolean this_present_userName = true && this.isSetUserName();
               boolean that_present_userName = true && that.isSetUserName();
               if (this_present_userName || that_present_userName) {
          -      if (!(this_present_userName && that_present_userName))
          -        return false;
          -      if (!this.userName.equals(that.userName))
          -        return false;
          +      if (!(this_present_userName && that_present_userName)) return false;
          +      if (!this.userName.equals(that.userName)) return false;
               }
           
               boolean this_present_multiGetsCount = true;
               boolean that_present_multiGetsCount = true;
               if (this_present_multiGetsCount || that_present_multiGetsCount) {
          -      if (!(this_present_multiGetsCount && that_present_multiGetsCount))
          -        return false;
          -      if (this.multiGetsCount != that.multiGetsCount)
          -        return false;
          +      if (!(this_present_multiGetsCount && that_present_multiGetsCount)) return false;
          +      if (this.multiGetsCount != that.multiGetsCount) return false;
               }
           
               boolean this_present_multiMutationsCount = true;
               boolean that_present_multiMutationsCount = true;
               if (this_present_multiMutationsCount || that_present_multiMutationsCount) {
          -      if (!(this_present_multiMutationsCount && that_present_multiMutationsCount))
          -        return false;
          -      if (this.multiMutationsCount != that.multiMutationsCount)
          -        return false;
          +      if (!(this_present_multiMutationsCount && that_present_multiMutationsCount)) return false;
          +      if (this.multiMutationsCount != that.multiMutationsCount) return false;
               }
           
               boolean this_present_multiServiceCalls = true;
               boolean that_present_multiServiceCalls = true;
               if (this_present_multiServiceCalls || that_present_multiServiceCalls) {
          -      if (!(this_present_multiServiceCalls && that_present_multiServiceCalls))
          -        return false;
          -      if (this.multiServiceCalls != that.multiServiceCalls)
          -        return false;
          +      if (!(this_present_multiServiceCalls && that_present_multiServiceCalls)) return false;
          +      if (this.multiServiceCalls != that.multiServiceCalls) return false;
               }
           
               boolean this_present_regionName = true && this.isSetRegionName();
               boolean that_present_regionName = true && that.isSetRegionName();
               if (this_present_regionName || that_present_regionName) {
          -      if (!(this_present_regionName && that_present_regionName))
          -        return false;
          -      if (!this.regionName.equals(that.regionName))
          -        return false;
          +      if (!(this_present_regionName && that_present_regionName)) return false;
          +      if (!this.regionName.equals(that.regionName)) return false;
               }
           
               return true;
          @@ -993,28 +1053,22 @@ public int hashCode() {
               hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(responseSize);
           
               hashCode = hashCode * 8191 + ((isSetClientAddress()) ? 131071 : 524287);
          -    if (isSetClientAddress())
          -      hashCode = hashCode * 8191 + clientAddress.hashCode();
          +    if (isSetClientAddress()) hashCode = hashCode * 8191 + clientAddress.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetServerClass()) ? 131071 : 524287);
          -    if (isSetServerClass())
          -      hashCode = hashCode * 8191 + serverClass.hashCode();
          +    if (isSetServerClass()) hashCode = hashCode * 8191 + serverClass.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetMethodName()) ? 131071 : 524287);
          -    if (isSetMethodName())
          -      hashCode = hashCode * 8191 + methodName.hashCode();
          +    if (isSetMethodName()) hashCode = hashCode * 8191 + methodName.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetCallDetails()) ? 131071 : 524287);
          -    if (isSetCallDetails())
          -      hashCode = hashCode * 8191 + callDetails.hashCode();
          +    if (isSetCallDetails()) hashCode = hashCode * 8191 + callDetails.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetParam()) ? 131071 : 524287);
          -    if (isSetParam())
          -      hashCode = hashCode * 8191 + param.hashCode();
          +    if (isSetParam()) hashCode = hashCode * 8191 + param.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetUserName()) ? 131071 : 524287);
          -    if (isSetUserName())
          -      hashCode = hashCode * 8191 + userName.hashCode();
          +    if (isSetUserName()) hashCode = hashCode * 8191 + userName.hashCode();
           
               hashCode = hashCode * 8191 + multiGetsCount;
           
          @@ -1023,8 +1077,7 @@ public int hashCode() {
               hashCode = hashCode * 8191 + multiServiceCalls;
           
               hashCode = hashCode * 8191 + ((isSetRegionName()) ? 131071 : 524287);
          -    if (isSetRegionName())
          -      hashCode = hashCode * 8191 + regionName.hashCode();
          +    if (isSetRegionName()) hashCode = hashCode * 8191 + regionName.hashCode();
           
               return hashCode;
             }
          @@ -1052,7 +1105,8 @@ public int compareTo(TOnlineLogRecord other) {
                 return lastComparison;
               }
               if (isSetProcessingTime()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.processingTime, other.processingTime);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.processingTime, other.processingTime);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1072,7 +1126,8 @@ public int compareTo(TOnlineLogRecord other) {
                 return lastComparison;
               }
               if (isSetResponseSize()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.responseSize, other.responseSize);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.responseSize, other.responseSize);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1082,7 +1137,8 @@ public int compareTo(TOnlineLogRecord other) {
                 return lastComparison;
               }
               if (isSetClientAddress()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.clientAddress, other.clientAddress);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.clientAddress, other.clientAddress);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1142,27 +1198,32 @@ public int compareTo(TOnlineLogRecord other) {
                 return lastComparison;
               }
               if (isSetMultiGetsCount()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.multiGetsCount, other.multiGetsCount);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.multiGetsCount, other.multiGetsCount);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetMultiMutationsCount(), other.isSetMultiMutationsCount());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetMultiMutationsCount(), other.isSetMultiMutationsCount());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetMultiMutationsCount()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.multiMutationsCount, other.multiMutationsCount);
          +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.multiMutationsCount,
          +        other.multiMutationsCount);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetMultiServiceCalls(), other.isSetMultiServiceCalls());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetMultiServiceCalls(), other.isSetMultiServiceCalls());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetMultiServiceCalls()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.multiServiceCalls, other.multiServiceCalls);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.multiServiceCalls, other.multiServiceCalls);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1189,7 +1250,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -1289,67 +1351,86 @@ public java.lang.String toString() {
           
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
          -    // alas, we cannot check 'startTime' because it's a primitive and you chose the non-beans generator.
          -    // alas, we cannot check 'processingTime' because it's a primitive and you chose the non-beans generator.
          -    // alas, we cannot check 'queueTime' because it's a primitive and you chose the non-beans generator.
          -    // alas, we cannot check 'responseSize' because it's a primitive and you chose the non-beans generator.
          +    // alas, we cannot check 'startTime' because it's a primitive and you chose the non-beans
          +    // generator.
          +    // alas, we cannot check 'processingTime' because it's a primitive and you chose the non-beans
          +    // generator.
          +    // alas, we cannot check 'queueTime' because it's a primitive and you chose the non-beans
          +    // generator.
          +    // alas, we cannot check 'responseSize' because it's a primitive and you chose the non-beans
          +    // generator.
               if (clientAddress == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'clientAddress' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'clientAddress' was not present! Struct: " + toString());
               }
               if (serverClass == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'serverClass' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'serverClass' was not present! Struct: " + toString());
               }
               if (methodName == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'methodName' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'methodName' was not present! Struct: " + toString());
               }
               if (callDetails == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'callDetails' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'callDetails' was not present! Struct: " + toString());
               }
               if (param == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'param' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'param' was not present! Struct: " + toString());
               }
               if (userName == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'userName' was not present! Struct: " + toString());
          -    }
          -    // alas, we cannot check 'multiGetsCount' because it's a primitive and you chose the non-beans generator.
          -    // alas, we cannot check 'multiMutationsCount' because it's a primitive and you chose the non-beans generator.
          -    // alas, we cannot check 'multiServiceCalls' because it's a primitive and you chose the non-beans generator.
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'userName' was not present! Struct: " + toString());
          +    }
          +    // alas, we cannot check 'multiGetsCount' because it's a primitive and you chose the non-beans
          +    // generator.
          +    // alas, we cannot check 'multiMutationsCount' because it's a primitive and you chose the
          +    // non-beans generator.
          +    // alas, we cannot check 'multiServiceCalls' because it's a primitive and you chose the
          +    // non-beans generator.
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TOnlineLogRecordStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TOnlineLogRecordStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TOnlineLogRecordStandardScheme getScheme() {
                 return new TOnlineLogRecordStandardScheme();
               }
             }
           
          -  private static class TOnlineLogRecordStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TOnlineLogRecordStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -1357,7 +1438,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.startTime = iprot.readI64();
                         struct.setStartTimeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1365,7 +1446,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.processingTime = iprot.readI32();
                         struct.setProcessingTimeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1373,7 +1454,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.queueTime = iprot.readI32();
                         struct.setQueueTimeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1381,7 +1462,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.responseSize = iprot.readI64();
                         struct.setResponseSizeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1389,7 +1470,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.clientAddress = iprot.readString();
                         struct.setClientAddressIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1397,7 +1478,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.serverClass = iprot.readString();
                         struct.setServerClassIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1405,7 +1486,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.methodName = iprot.readString();
                         struct.setMethodNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1413,7 +1494,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.callDetails = iprot.readString();
                         struct.setCallDetailsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1421,7 +1502,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.param = iprot.readString();
                         struct.setParamIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1429,7 +1510,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.userName = iprot.readString();
                         struct.setUserNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1437,7 +1518,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.multiGetsCount = iprot.readI32();
                         struct.setMultiGetsCountIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1445,7 +1526,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.multiMutationsCount = iprot.readI32();
                         struct.setMultiMutationsCountIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1453,7 +1534,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.multiServiceCalls = iprot.readI32();
                         struct.setMultiServiceCallsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1461,7 +1542,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.regionName = iprot.readString();
                         struct.setRegionNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1474,30 +1555,43 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TOnlineLogRecord st
           
                 // check for required fields of primitive type, which can't be checked in the validate method
                 if (!struct.isSetStartTime()) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'startTime' was not found in serialized data! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'startTime' was not found in serialized data! Struct: " + toString());
                 }
                 if (!struct.isSetProcessingTime()) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'processingTime' was not found in serialized data! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'processingTime' was not found in serialized data! Struct: "
          +                + toString());
                 }
                 if (!struct.isSetQueueTime()) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'queueTime' was not found in serialized data! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'queueTime' was not found in serialized data! Struct: " + toString());
                 }
                 if (!struct.isSetResponseSize()) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'responseSize' was not found in serialized data! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'responseSize' was not found in serialized data! Struct: "
          +                + toString());
                 }
                 if (!struct.isSetMultiGetsCount()) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'multiGetsCount' was not found in serialized data! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'multiGetsCount' was not found in serialized data! Struct: "
          +                + toString());
                 }
                 if (!struct.isSetMultiMutationsCount()) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'multiMutationsCount' was not found in serialized data! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'multiMutationsCount' was not found in serialized data! Struct: "
          +                + toString());
                 }
                 if (!struct.isSetMultiServiceCalls()) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'multiServiceCalls' was not found in serialized data! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'multiServiceCalls' was not found in serialized data! Struct: "
          +                + toString());
                 }
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TOnlineLogRecord struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TOnlineLogRecord struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -1565,17 +1659,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TOnlineLogRecord s
           
             }
           
          -  private static class TOnlineLogRecordTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TOnlineLogRecordTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TOnlineLogRecordTupleScheme getScheme() {
                 return new TOnlineLogRecordTupleScheme();
               }
             }
           
          -  private static class TOnlineLogRecordTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TOnlineLogRecordTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TOnlineLogRecord struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TOnlineLogRecord struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeI64(struct.startTime);
                 oprot.writeI32(struct.processingTime);
                 oprot.writeI32(struct.queueTime);
          @@ -1600,8 +1698,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TOnlineLogRecord st
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TOnlineLogRecord struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TOnlineLogRecord struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.startTime = iprot.readI64();
                 struct.setStartTimeIsSet(true);
                 struct.processingTime = iprot.readI32();
          @@ -1636,8 +1736,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TOnlineLogRecord str
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPermissionScope.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPermissionScope.java
          index 7ca83ced9d51..1558aec092cc 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPermissionScope.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPermissionScope.java
          @@ -1,16 +1,26 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TPermissionScope implements org.apache.thrift.TEnum {
          -  TABLE(0),
          -  NAMESPACE(1);
          +  TABLE(0), NAMESPACE(1);
           
             private final int value;
           
          @@ -30,7 +40,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TPermissionScope findByValue(int value) { 
          +  public static TPermissionScope findByValue(int value) {
               switch (value) {
                 case 0:
                   return TABLE;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java
          index b458182fdc4c..ef8f4361c812 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java
          @@ -1,63 +1,85 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Used to perform Put operations for a single row.
          - * 
          - * Add column values to this object and they'll be added.
          - * You can provide a default timestamp if the column values
          - * don't have one. If you don't provide a default timestamp
          - * the current time is inserted.
          - * 
          - * You can specify how this Put should be written to the write-ahead Log (WAL)
          - * by changing the durability. If you don't provide durability, it defaults to
          - * column family's default setting for durability.
          + * Used to perform Put operations for a single row. Add column values to this object and they'll be
          + * added. You can provide a default timestamp if the column values don't have one. If you don't
          + * provide a default timestamp the current time is inserted. You can specify how this Put should be
          + * written to the write-ahead Log (WAL) by changing the durability. If you don't provide durability,
          + * it defaults to column family's default setting for durability.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TPut implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPut");
          -
          -  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField COLUMN_VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("columnValues", org.apache.thrift.protocol.TType.LIST, (short)2);
          -  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)3);
          -  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)5);
          -  private static final org.apache.thrift.protocol.TField DURABILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("durability", org.apache.thrift.protocol.TType.I32, (short)6);
          -  private static final org.apache.thrift.protocol.TField CELL_VISIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("cellVisibility", org.apache.thrift.protocol.TType.STRUCT, (short)7);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TPutStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TPutTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TPut implements org.apache.thrift.TBase, java.io.Serializable,
          +    Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TPut");
          +
          +  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField COLUMN_VALUES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columnValues", org.apache.thrift.protocol.TType.LIST,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +          (short) 5);
          +  private static final org.apache.thrift.protocol.TField DURABILITY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("durability", org.apache.thrift.protocol.TType.I32,
          +          (short) 6);
          +  private static final org.apache.thrift.protocol.TField CELL_VISIBILITY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("cellVisibility",
          +          org.apache.thrift.protocol.TType.STRUCT, (short) 7);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TPutStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TPutTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
             public @org.apache.thrift.annotation.Nullable java.util.List columnValues; // required
             public long timestamp; // optional
          -  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
             /**
          -   * 
              * @see TDurability
              */
             public @org.apache.thrift.annotation.Nullable TDurability durability; // optional
             public @org.apache.thrift.annotation.Nullable TCellVisibility cellVisibility; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    ROW((short)1, "row"),
          -    COLUMN_VALUES((short)2, "columnValues"),
          -    TIMESTAMP((short)3, "timestamp"),
          -    ATTRIBUTES((short)5, "attributes"),
          +    ROW((short) 1, "row"), COLUMN_VALUES((short) 2, "columnValues"),
          +    TIMESTAMP((short) 3, "timestamp"), ATTRIBUTES((short) 5, "attributes"),
               /**
          -     * 
                * @see TDurability
                */
          -    DURABILITY((short)6, "durability"),
          -    CELL_VISIBILITY((short)7, "cellVisibility");
          +    DURABILITY((short) 6, "durability"), CELL_VISIBILITY((short) 7, "cellVisibility");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -70,7 +92,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // ROW
                     return ROW;
                   case 2: // COLUMN_VALUES
          @@ -89,12 +111,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -126,25 +148,43 @@ public java.lang.String getFieldName() {
             // isset id assignments
             private static final int __TIMESTAMP_ISSET_ID = 0;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.TIMESTAMP,_Fields.ATTRIBUTES,_Fields.DURABILITY,_Fields.CELL_VISIBILITY};
          +  private static final _Fields optionals[] =
          +      { _Fields.TIMESTAMP, _Fields.ATTRIBUTES, _Fields.DURABILITY, _Fields.CELL_VISIBILITY };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.COLUMN_VALUES, new org.apache.thrift.meta_data.FieldMetaData("columnValues", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnValue.class))));
          -    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("row",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.COLUMN_VALUES, new org.apache.thrift.meta_data.FieldMetaData("columnValues",
          +        org.apache.thrift.TFieldRequirementType.REQUIRED,
          +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TColumnValue.class))));
          +    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -    tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true), 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true))));
          -    tmpMap.put(_Fields.DURABILITY, new org.apache.thrift.meta_data.FieldMetaData("durability", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TDurability.class)));
          -    tmpMap.put(_Fields.CELL_VISIBILITY, new org.apache.thrift.meta_data.FieldMetaData("cellVisibility", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCellVisibility.class)));
          +    tmpMap.put(_Fields.ATTRIBUTES,
          +      new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true),
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true))));
          +    tmpMap.put(_Fields.DURABILITY,
          +      new org.apache.thrift.meta_data.FieldMetaData("durability",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TDurability.class)));
          +    tmpMap.put(_Fields.CELL_VISIBILITY,
          +      new org.apache.thrift.meta_data.FieldMetaData("cellVisibility",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TCellVisibility.class)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TPut.class, metaDataMap);
             }
          @@ -152,10 +192,7 @@ public java.lang.String getFieldName() {
             public TPut() {
             }
           
          -  public TPut(
          -    java.nio.ByteBuffer row,
          -    java.util.List columnValues)
          -  {
          +  public TPut(java.nio.ByteBuffer row, java.util.List columnValues) {
               this();
               this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
               this.columnValues = columnValues;
          @@ -170,7 +207,8 @@ public TPut(TPut other) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
               }
               if (other.isSetColumnValues()) {
          -      java.util.List __this__columnValues = new java.util.ArrayList(other.columnValues.size());
          +      java.util.List __this__columnValues =
          +          new java.util.ArrayList(other.columnValues.size());
                 for (TColumnValue other_element : other.columnValues) {
                   __this__columnValues.add(new TColumnValue(other_element));
                 }
          @@ -178,7 +216,8 @@ public TPut(TPut other) {
               }
               this.timestamp = other.timestamp;
               if (other.isSetAttributes()) {
          -      java.util.Map __this__attributes = new java.util.HashMap(other.attributes);
          +      java.util.Map __this__attributes =
          +          new java.util.HashMap(other.attributes);
                 this.attributes = __this__attributes;
               }
               if (other.isSetDurability()) {
          @@ -214,7 +253,7 @@ public java.nio.ByteBuffer bufferForRow() {
             }
           
             public TPut setRow(byte[] row) {
          -    this.row = row == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(row.clone());
          +    this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
               return this;
             }
           
          @@ -259,7 +298,8 @@ public java.util.List getColumnValues() {
               return this.columnValues;
             }
           
          -  public TPut setColumnValues(@org.apache.thrift.annotation.Nullable java.util.List columnValues) {
          +  public TPut setColumnValues(
          +      @org.apache.thrift.annotation.Nullable java.util.List columnValues) {
               this.columnValues = columnValues;
               return this;
             }
          @@ -290,7 +330,8 @@ public TPut setTimestamp(long timestamp) {
             }
           
             public void unsetTimestamp() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
             }
           
             /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
          @@ -299,7 +340,8 @@ public boolean isSetTimestamp() {
             }
           
             public void setTimestampIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
             }
           
             public int getAttributesSize() {
          @@ -308,17 +350,18 @@ public int getAttributesSize() {
           
             public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
               if (this.attributes == null) {
          -      this.attributes = new java.util.HashMap();
          +      this.attributes = new java.util.HashMap();
               }
               this.attributes.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getAttributes() {
          +  public java.util.Map getAttributes() {
               return this.attributes;
             }
           
          -  public TPut setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +  public TPut setAttributes(
          +      @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
               this.attributes = attributes;
               return this;
             }
          @@ -339,7 +382,6 @@ public void setAttributesIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TDurability
              */
             @org.apache.thrift.annotation.Nullable
          @@ -348,7 +390,6 @@ public TDurability getDurability() {
             }
           
             /**
          -   * 
              * @see TDurability
              */
             public TPut setDurability(@org.apache.thrift.annotation.Nullable TDurability durability) {
          @@ -376,7 +417,8 @@ public TCellVisibility getCellVisibility() {
               return this.cellVisibility;
             }
           
          -  public TPut setCellVisibility(@org.apache.thrift.annotation.Nullable TCellVisibility cellVisibility) {
          +  public TPut
          +      setCellVisibility(@org.apache.thrift.annotation.Nullable TCellVisibility cellVisibility) {
               this.cellVisibility = cellVisibility;
               return this;
             }
          @@ -396,59 +438,60 @@ public void setCellVisibilityIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case ROW:
          -      if (value == null) {
          -        unsetRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setRow((byte[])value);
          +      case ROW:
          +        if (value == null) {
          +          unsetRow();
                   } else {
          -          setRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setRow((byte[]) value);
          +          } else {
          +            setRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case COLUMN_VALUES:
          -      if (value == null) {
          -        unsetColumnValues();
          -      } else {
          -        setColumnValues((java.util.List)value);
          -      }
          -      break;
          +      case COLUMN_VALUES:
          +        if (value == null) {
          +          unsetColumnValues();
          +        } else {
          +          setColumnValues((java.util.List) value);
          +        }
          +        break;
           
          -    case TIMESTAMP:
          -      if (value == null) {
          -        unsetTimestamp();
          -      } else {
          -        setTimestamp((java.lang.Long)value);
          -      }
          -      break;
          +      case TIMESTAMP:
          +        if (value == null) {
          +          unsetTimestamp();
          +        } else {
          +          setTimestamp((java.lang.Long) value);
          +        }
          +        break;
           
          -    case ATTRIBUTES:
          -      if (value == null) {
          -        unsetAttributes();
          -      } else {
          -        setAttributes((java.util.Map)value);
          -      }
          -      break;
          +      case ATTRIBUTES:
          +        if (value == null) {
          +          unsetAttributes();
          +        } else {
          +          setAttributes((java.util.Map) value);
          +        }
          +        break;
           
          -    case DURABILITY:
          -      if (value == null) {
          -        unsetDurability();
          -      } else {
          -        setDurability((TDurability)value);
          -      }
          -      break;
          +      case DURABILITY:
          +        if (value == null) {
          +          unsetDurability();
          +        } else {
          +          setDurability((TDurability) value);
          +        }
          +        break;
           
          -    case CELL_VISIBILITY:
          -      if (value == null) {
          -        unsetCellVisibility();
          -      } else {
          -        setCellVisibility((TCellVisibility)value);
          -      }
          -      break;
          +      case CELL_VISIBILITY:
          +        if (value == null) {
          +          unsetCellVisibility();
          +        } else {
          +          setCellVisibility((TCellVisibility) value);
          +        }
          +        break;
           
               }
             }
          @@ -456,116 +499,104 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case ROW:
          -      return getRow();
          +      case ROW:
          +        return getRow();
           
          -    case COLUMN_VALUES:
          -      return getColumnValues();
          +      case COLUMN_VALUES:
          +        return getColumnValues();
           
          -    case TIMESTAMP:
          -      return getTimestamp();
          +      case TIMESTAMP:
          +        return getTimestamp();
           
          -    case ATTRIBUTES:
          -      return getAttributes();
          +      case ATTRIBUTES:
          +        return getAttributes();
           
          -    case DURABILITY:
          -      return getDurability();
          +      case DURABILITY:
          +        return getDurability();
           
          -    case CELL_VISIBILITY:
          -      return getCellVisibility();
          +      case CELL_VISIBILITY:
          +        return getCellVisibility();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case ROW:
          -      return isSetRow();
          -    case COLUMN_VALUES:
          -      return isSetColumnValues();
          -    case TIMESTAMP:
          -      return isSetTimestamp();
          -    case ATTRIBUTES:
          -      return isSetAttributes();
          -    case DURABILITY:
          -      return isSetDurability();
          -    case CELL_VISIBILITY:
          -      return isSetCellVisibility();
          +      case ROW:
          +        return isSetRow();
          +      case COLUMN_VALUES:
          +        return isSetColumnValues();
          +      case TIMESTAMP:
          +        return isSetTimestamp();
          +      case ATTRIBUTES:
          +        return isSetAttributes();
          +      case DURABILITY:
          +        return isSetDurability();
          +      case CELL_VISIBILITY:
          +        return isSetCellVisibility();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TPut)
          -      return this.equals((TPut)that);
          +    if (that instanceof TPut) return this.equals((TPut) that);
               return false;
             }
           
             public boolean equals(TPut that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_row = true && this.isSetRow();
               boolean that_present_row = true && that.isSetRow();
               if (this_present_row || that_present_row) {
          -      if (!(this_present_row && that_present_row))
          -        return false;
          -      if (!this.row.equals(that.row))
          -        return false;
          +      if (!(this_present_row && that_present_row)) return false;
          +      if (!this.row.equals(that.row)) return false;
               }
           
               boolean this_present_columnValues = true && this.isSetColumnValues();
               boolean that_present_columnValues = true && that.isSetColumnValues();
               if (this_present_columnValues || that_present_columnValues) {
          -      if (!(this_present_columnValues && that_present_columnValues))
          -        return false;
          -      if (!this.columnValues.equals(that.columnValues))
          -        return false;
          +      if (!(this_present_columnValues && that_present_columnValues)) return false;
          +      if (!this.columnValues.equals(that.columnValues)) return false;
               }
           
               boolean this_present_timestamp = true && this.isSetTimestamp();
               boolean that_present_timestamp = true && that.isSetTimestamp();
               if (this_present_timestamp || that_present_timestamp) {
          -      if (!(this_present_timestamp && that_present_timestamp))
          -        return false;
          -      if (this.timestamp != that.timestamp)
          -        return false;
          +      if (!(this_present_timestamp && that_present_timestamp)) return false;
          +      if (this.timestamp != that.timestamp) return false;
               }
           
               boolean this_present_attributes = true && this.isSetAttributes();
               boolean that_present_attributes = true && that.isSetAttributes();
               if (this_present_attributes || that_present_attributes) {
          -      if (!(this_present_attributes && that_present_attributes))
          -        return false;
          -      if (!this.attributes.equals(that.attributes))
          -        return false;
          +      if (!(this_present_attributes && that_present_attributes)) return false;
          +      if (!this.attributes.equals(that.attributes)) return false;
               }
           
               boolean this_present_durability = true && this.isSetDurability();
               boolean that_present_durability = true && that.isSetDurability();
               if (this_present_durability || that_present_durability) {
          -      if (!(this_present_durability && that_present_durability))
          -        return false;
          -      if (!this.durability.equals(that.durability))
          -        return false;
          +      if (!(this_present_durability && that_present_durability)) return false;
          +      if (!this.durability.equals(that.durability)) return false;
               }
           
               boolean this_present_cellVisibility = true && this.isSetCellVisibility();
               boolean that_present_cellVisibility = true && that.isSetCellVisibility();
               if (this_present_cellVisibility || that_present_cellVisibility) {
          -      if (!(this_present_cellVisibility && that_present_cellVisibility))
          -        return false;
          -      if (!this.cellVisibility.equals(that.cellVisibility))
          -        return false;
          +      if (!(this_present_cellVisibility && that_present_cellVisibility)) return false;
          +      if (!this.cellVisibility.equals(that.cellVisibility)) return false;
               }
           
               return true;
          @@ -576,28 +607,23 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -    if (isSetRow())
          -      hashCode = hashCode * 8191 + row.hashCode();
          +    if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetColumnValues()) ? 131071 : 524287);
          -    if (isSetColumnValues())
          -      hashCode = hashCode * 8191 + columnValues.hashCode();
          +    if (isSetColumnValues()) hashCode = hashCode * 8191 + columnValues.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetTimestamp()) ? 131071 : 524287);
               if (isSetTimestamp())
                 hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(timestamp);
           
               hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -    if (isSetAttributes())
          -      hashCode = hashCode * 8191 + attributes.hashCode();
          +    if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetDurability()) ? 131071 : 524287);
          -    if (isSetDurability())
          -      hashCode = hashCode * 8191 + durability.getValue();
          +    if (isSetDurability()) hashCode = hashCode * 8191 + durability.getValue();
           
               hashCode = hashCode * 8191 + ((isSetCellVisibility()) ? 131071 : 524287);
          -    if (isSetCellVisibility())
          -      hashCode = hashCode * 8191 + cellVisibility.hashCode();
          +    if (isSetCellVisibility()) hashCode = hashCode * 8191 + cellVisibility.hashCode();
           
               return hashCode;
             }
          @@ -625,7 +651,8 @@ public int compareTo(TPut other) {
                 return lastComparison;
               }
               if (isSetColumnValues()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columnValues, other.columnValues);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.columnValues, other.columnValues);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -665,7 +692,8 @@ public int compareTo(TPut other) {
                 return lastComparison;
               }
               if (isSetCellVisibility()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cellVisibility, other.cellVisibility);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.cellVisibility, other.cellVisibility);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -682,7 +710,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -749,10 +778,12 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (row == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'row' was not present! Struct: " + toString());
               }
               if (columnValues == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'columnValues' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'columnValues' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
               if (cellVisibility != null) {
          @@ -762,17 +793,21 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
          @@ -786,13 +821,13 @@ public TPutStandardScheme getScheme() {
           
             private static class TPutStandardScheme extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TPut struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TPut struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -800,7 +835,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPut struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.row = iprot.readBinary();
                         struct.setRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -809,9 +844,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPut struct) throws
                         {
                           org.apache.thrift.protocol.TList _list34 = iprot.readListBegin();
                           struct.columnValues = new java.util.ArrayList(_list34.size);
          -                @org.apache.thrift.annotation.Nullable TColumnValue _elem35;
          -                for (int _i36 = 0; _i36 < _list34.size; ++_i36)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                TColumnValue _elem35;
          +                for (int _i36 = 0; _i36 < _list34.size; ++_i36) {
                             _elem35 = new TColumnValue();
                             _elem35.read(iprot);
                             struct.columnValues.add(_elem35);
          @@ -819,7 +854,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPut struct) throws
                           iprot.readListEnd();
                         }
                         struct.setColumnValuesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -827,7 +862,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPut struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.timestamp = iprot.readI64();
                         struct.setTimestampIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -835,11 +870,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPut struct) throws
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map37 = iprot.readMapBegin();
          -                struct.attributes = new java.util.HashMap(2*_map37.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key38;
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val39;
          -                for (int _i40 = 0; _i40 < _map37.size; ++_i40)
          -                {
          +                struct.attributes = new java.util.HashMap(
          +                    2 * _map37.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _key38;
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _val39;
          +                for (int _i40 = 0; _i40 < _map37.size; ++_i40) {
                             _key38 = iprot.readBinary();
                             _val39 = iprot.readBinary();
                             struct.attributes.put(_key38, _val39);
          @@ -847,15 +884,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPut struct) throws
                           iprot.readMapEnd();
                         }
                         struct.setAttributesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 6: // DURABILITY
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
          +              struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability
          +                  .findByValue(iprot.readI32());
                         struct.setDurabilityIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -864,7 +902,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPut struct) throws
                         struct.cellVisibility = new TCellVisibility();
                         struct.cellVisibility.read(iprot);
                         struct.setCellVisibilityIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -879,7 +917,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TPut struct) throws
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TPut struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TPut struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -891,9 +930,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TPut struct) throw
                 if (struct.columnValues != null) {
                   oprot.writeFieldBegin(COLUMN_VALUES_FIELD_DESC);
                   {
          -          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columnValues.size()));
          -          for (TColumnValue _iter41 : struct.columnValues)
          -          {
          +          oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +              org.apache.thrift.protocol.TType.STRUCT, struct.columnValues.size()));
          +          for (TColumnValue _iter41 : struct.columnValues) {
                       _iter41.write(oprot);
                     }
                     oprot.writeListEnd();
          @@ -909,9 +948,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TPut struct) throw
                   if (struct.isSetAttributes()) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter42 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter42 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter42.getKey());
                         oprot.writeBinary(_iter42.getValue());
                       }
          @@ -949,13 +990,14 @@ public TPutTupleScheme getScheme() {
             private static class TPutTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TPut struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TPut struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeBinary(struct.row);
                 {
                   oprot.writeI32(struct.columnValues.size());
          -        for (TColumnValue _iter43 : struct.columnValues)
          -        {
          +        for (TColumnValue _iter43 : struct.columnValues) {
                     _iter43.write(oprot);
                   }
                 }
          @@ -979,8 +1021,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TPut struct) throws
                 if (struct.isSetAttributes()) {
                   {
                     oprot.writeI32(struct.attributes.size());
          -          for (java.util.Map.Entry _iter44 : struct.attributes.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter44 : struct.attributes
          +              .entrySet()) {
                       oprot.writeBinary(_iter44.getKey());
                       oprot.writeBinary(_iter44.getValue());
                     }
          @@ -995,16 +1037,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TPut struct) throws
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TPut struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TPut struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.row = iprot.readBinary();
                 struct.setRowIsSet(true);
                 {
          -        org.apache.thrift.protocol.TList _list45 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +        org.apache.thrift.protocol.TList _list45 =
          +            iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                   struct.columnValues = new java.util.ArrayList(_list45.size);
          -        @org.apache.thrift.annotation.Nullable TColumnValue _elem46;
          -        for (int _i47 = 0; _i47 < _list45.size; ++_i47)
          -        {
          +        @org.apache.thrift.annotation.Nullable
          +        TColumnValue _elem46;
          +        for (int _i47 = 0; _i47 < _list45.size; ++_i47) {
                     _elem46 = new TColumnValue();
                     _elem46.read(iprot);
                     struct.columnValues.add(_elem46);
          @@ -1018,12 +1063,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TPut struct) throws
                 }
                 if (incoming.get(1)) {
                   {
          -          org.apache.thrift.protocol.TMap _map48 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -          struct.attributes = new java.util.HashMap(2*_map48.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key49;
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val50;
          -          for (int _i51 = 0; _i51 < _map48.size; ++_i51)
          -          {
          +          org.apache.thrift.protocol.TMap _map48 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +          struct.attributes =
          +              new java.util.HashMap(2 * _map48.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _key49;
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _val50;
          +          for (int _i51 = 0; _i51 < _map48.size; ++_i51) {
                       _key49 = iprot.readBinary();
                       _val50 = iprot.readBinary();
                       struct.attributes.put(_key49, _val50);
          @@ -1032,7 +1080,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TPut struct) throws
                   struct.setAttributesIsSet(true);
                 }
                 if (incoming.get(2)) {
          -        struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
          +        struct.durability =
          +            org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
                   struct.setDurabilityIsSet(true);
                 }
                 if (incoming.get(3)) {
          @@ -1043,8 +1092,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TPut struct) throws
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java
          index 8af01cd1ed4c..54517a54ef75 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TReadType.java
          @@ -1,17 +1,26 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TReadType implements org.apache.thrift.TEnum {
          -  DEFAULT(1),
          -  STREAM(2),
          -  PREAD(3);
          +  DEFAULT(1), STREAM(2), PREAD(3);
           
             private final int value;
           
          @@ -31,7 +40,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TReadType findByValue(int value) { 
          +  public static TReadType findByValue(int value) {
               switch (value) {
                 case 1:
                   return DEFAULT;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java
          index 757856e3e649..3a6fb55ed384 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java
          @@ -1,40 +1,66 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * if no Result is found, row and columnValues will not be set.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TResult");
          -
          -  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField COLUMN_VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("columnValues", org.apache.thrift.protocol.TType.LIST, (short)2);
          -  private static final org.apache.thrift.protocol.TField STALE_FIELD_DESC = new org.apache.thrift.protocol.TField("stale", org.apache.thrift.protocol.TType.BOOL, (short)3);
          -  private static final org.apache.thrift.protocol.TField PARTIAL_FIELD_DESC = new org.apache.thrift.protocol.TField("partial", org.apache.thrift.protocol.TType.BOOL, (short)4);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TResultStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TResultTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TResult implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TResult");
          +
          +  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField COLUMN_VALUES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columnValues", org.apache.thrift.protocol.TType.LIST,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField STALE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("stale", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField PARTIAL_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("partial", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 4);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TResultStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TResultTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // optional
             public @org.apache.thrift.annotation.Nullable java.util.List columnValues; // required
             public boolean stale; // optional
             public boolean partial; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    ROW((short)1, "row"),
          -    COLUMN_VALUES((short)2, "columnValues"),
          -    STALE((short)3, "stale"),
          -    PARTIAL((short)4, "partial");
          +    ROW((short) 1, "row"), COLUMN_VALUES((short) 2, "columnValues"), STALE((short) 3, "stale"),
          +    PARTIAL((short) 4, "partial");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -47,7 +73,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // ROW
                     return ROW;
                   case 2: // COLUMN_VALUES
          @@ -62,12 +88,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -100,18 +126,26 @@ public java.lang.String getFieldName() {
             private static final int __STALE_ISSET_ID = 0;
             private static final int __PARTIAL_ISSET_ID = 1;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.ROW,_Fields.STALE,_Fields.PARTIAL};
          +  private static final _Fields optionals[] = { _Fields.ROW, _Fields.STALE, _Fields.PARTIAL };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.COLUMN_VALUES, new org.apache.thrift.meta_data.FieldMetaData("columnValues", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnValue.class))));
          -    tmpMap.put(_Fields.STALE, new org.apache.thrift.meta_data.FieldMetaData("stale", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("row",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.COLUMN_VALUES, new org.apache.thrift.meta_data.FieldMetaData("columnValues",
          +        org.apache.thrift.TFieldRequirementType.REQUIRED,
          +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TColumnValue.class))));
          +    tmpMap.put(_Fields.STALE, new org.apache.thrift.meta_data.FieldMetaData("stale",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.PARTIAL, new org.apache.thrift.meta_data.FieldMetaData("partial", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.PARTIAL, new org.apache.thrift.meta_data.FieldMetaData("partial",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TResult.class, metaDataMap);
          @@ -124,9 +158,7 @@ public TResult() {
           
             }
           
          -  public TResult(
          -    java.util.List columnValues)
          -  {
          +  public TResult(java.util.List columnValues) {
               this();
               this.columnValues = columnValues;
             }
          @@ -140,7 +172,8 @@ public TResult(TResult other) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
               }
               if (other.isSetColumnValues()) {
          -      java.util.List __this__columnValues = new java.util.ArrayList(other.columnValues.size());
          +      java.util.List __this__columnValues =
          +          new java.util.ArrayList(other.columnValues.size());
                 for (TColumnValue other_element : other.columnValues) {
                   __this__columnValues.add(new TColumnValue(other_element));
                 }
          @@ -174,7 +207,7 @@ public java.nio.ByteBuffer bufferForRow() {
             }
           
             public TResult setRow(byte[] row) {
          -    this.row = row == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(row.clone());
          +    this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
               return this;
             }
           
          @@ -219,7 +252,8 @@ public java.util.List getColumnValues() {
               return this.columnValues;
             }
           
          -  public TResult setColumnValues(@org.apache.thrift.annotation.Nullable java.util.List columnValues) {
          +  public TResult setColumnValues(
          +      @org.apache.thrift.annotation.Nullable java.util.List columnValues) {
               this.columnValues = columnValues;
               return this;
             }
          @@ -259,7 +293,8 @@ public boolean isSetStale() {
             }
           
             public void setStaleIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __STALE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __STALE_ISSET_ID, value);
             }
           
             public boolean isPartial() {
          @@ -273,7 +308,8 @@ public TResult setPartial(boolean partial) {
             }
           
             public void unsetPartial() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __PARTIAL_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __PARTIAL_ISSET_ID);
             }
           
             /** Returns true if field partial is set (has been assigned a value) and false otherwise */
          @@ -282,46 +318,48 @@ public boolean isSetPartial() {
             }
           
             public void setPartialIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __PARTIAL_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __PARTIAL_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case ROW:
          -      if (value == null) {
          -        unsetRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setRow((byte[])value);
          +      case ROW:
          +        if (value == null) {
          +          unsetRow();
                   } else {
          -          setRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setRow((byte[]) value);
          +          } else {
          +            setRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case COLUMN_VALUES:
          -      if (value == null) {
          -        unsetColumnValues();
          -      } else {
          -        setColumnValues((java.util.List)value);
          -      }
          -      break;
          +      case COLUMN_VALUES:
          +        if (value == null) {
          +          unsetColumnValues();
          +        } else {
          +          setColumnValues((java.util.List) value);
          +        }
          +        break;
           
          -    case STALE:
          -      if (value == null) {
          -        unsetStale();
          -      } else {
          -        setStale((java.lang.Boolean)value);
          -      }
          -      break;
          +      case STALE:
          +        if (value == null) {
          +          unsetStale();
          +        } else {
          +          setStale((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case PARTIAL:
          -      if (value == null) {
          -        unsetPartial();
          -      } else {
          -        setPartial((java.lang.Boolean)value);
          -      }
          -      break;
          +      case PARTIAL:
          +        if (value == null) {
          +          unsetPartial();
          +        } else {
          +          setPartial((java.lang.Boolean) value);
          +        }
          +        break;
           
               }
             }
          @@ -329,88 +367,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case ROW:
          -      return getRow();
          +      case ROW:
          +        return getRow();
           
          -    case COLUMN_VALUES:
          -      return getColumnValues();
          +      case COLUMN_VALUES:
          +        return getColumnValues();
           
          -    case STALE:
          -      return isStale();
          +      case STALE:
          +        return isStale();
           
          -    case PARTIAL:
          -      return isPartial();
          +      case PARTIAL:
          +        return isPartial();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case ROW:
          -      return isSetRow();
          -    case COLUMN_VALUES:
          -      return isSetColumnValues();
          -    case STALE:
          -      return isSetStale();
          -    case PARTIAL:
          -      return isSetPartial();
          +      case ROW:
          +        return isSetRow();
          +      case COLUMN_VALUES:
          +        return isSetColumnValues();
          +      case STALE:
          +        return isSetStale();
          +      case PARTIAL:
          +        return isSetPartial();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TResult)
          -      return this.equals((TResult)that);
          +    if (that instanceof TResult) return this.equals((TResult) that);
               return false;
             }
           
             public boolean equals(TResult that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_row = true && this.isSetRow();
               boolean that_present_row = true && that.isSetRow();
               if (this_present_row || that_present_row) {
          -      if (!(this_present_row && that_present_row))
          -        return false;
          -      if (!this.row.equals(that.row))
          -        return false;
          +      if (!(this_present_row && that_present_row)) return false;
          +      if (!this.row.equals(that.row)) return false;
               }
           
               boolean this_present_columnValues = true && this.isSetColumnValues();
               boolean that_present_columnValues = true && that.isSetColumnValues();
               if (this_present_columnValues || that_present_columnValues) {
          -      if (!(this_present_columnValues && that_present_columnValues))
          -        return false;
          -      if (!this.columnValues.equals(that.columnValues))
          -        return false;
          +      if (!(this_present_columnValues && that_present_columnValues)) return false;
          +      if (!this.columnValues.equals(that.columnValues)) return false;
               }
           
               boolean this_present_stale = true && this.isSetStale();
               boolean that_present_stale = true && that.isSetStale();
               if (this_present_stale || that_present_stale) {
          -      if (!(this_present_stale && that_present_stale))
          -        return false;
          -      if (this.stale != that.stale)
          -        return false;
          +      if (!(this_present_stale && that_present_stale)) return false;
          +      if (this.stale != that.stale) return false;
               }
           
               boolean this_present_partial = true && this.isSetPartial();
               boolean that_present_partial = true && that.isSetPartial();
               if (this_present_partial || that_present_partial) {
          -      if (!(this_present_partial && that_present_partial))
          -        return false;
          -      if (this.partial != that.partial)
          -        return false;
          +      if (!(this_present_partial && that_present_partial)) return false;
          +      if (this.partial != that.partial) return false;
               }
           
               return true;
          @@ -421,20 +451,16 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -    if (isSetRow())
          -      hashCode = hashCode * 8191 + row.hashCode();
          +    if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetColumnValues()) ? 131071 : 524287);
          -    if (isSetColumnValues())
          -      hashCode = hashCode * 8191 + columnValues.hashCode();
          +    if (isSetColumnValues()) hashCode = hashCode * 8191 + columnValues.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetStale()) ? 131071 : 524287);
          -    if (isSetStale())
          -      hashCode = hashCode * 8191 + ((stale) ? 131071 : 524287);
          +    if (isSetStale()) hashCode = hashCode * 8191 + ((stale) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetPartial()) ? 131071 : 524287);
          -    if (isSetPartial())
          -      hashCode = hashCode * 8191 + ((partial) ? 131071 : 524287);
          +    if (isSetPartial()) hashCode = hashCode * 8191 + ((partial) ? 131071 : 524287);
           
               return hashCode;
             }
          @@ -462,7 +488,8 @@ public int compareTo(TResult other) {
                 return lastComparison;
               }
               if (isSetColumnValues()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columnValues, other.columnValues);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.columnValues, other.columnValues);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -499,7 +526,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -544,44 +572,51 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (columnValues == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'columnValues' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'columnValues' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TResultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TResultStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TResultStandardScheme getScheme() {
                 return new TResultStandardScheme();
               }
             }
           
          -  private static class TResultStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TResultStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TResult struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TResult struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -589,7 +624,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TResult struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.row = iprot.readBinary();
                         struct.setRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -598,9 +633,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TResult struct) thr
                         {
                           org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
                           struct.columnValues = new java.util.ArrayList(_list0.size);
          -                @org.apache.thrift.annotation.Nullable TColumnValue _elem1;
          -                for (int _i2 = 0; _i2 < _list0.size; ++_i2)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                TColumnValue _elem1;
          +                for (int _i2 = 0; _i2 < _list0.size; ++_i2) {
                             _elem1 = new TColumnValue();
                             _elem1.read(iprot);
                             struct.columnValues.add(_elem1);
          @@ -608,7 +643,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TResult struct) thr
                           iprot.readListEnd();
                         }
                         struct.setColumnValuesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -616,7 +651,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TResult struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.stale = iprot.readBool();
                         struct.setStaleIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -624,7 +659,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TResult struct) thr
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.partial = iprot.readBool();
                         struct.setPartialIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -639,7 +674,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TResult struct) thr
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TResult struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TResult struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -653,9 +689,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TResult struct) th
                 if (struct.columnValues != null) {
                   oprot.writeFieldBegin(COLUMN_VALUES_FIELD_DESC);
                   {
          -          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columnValues.size()));
          -          for (TColumnValue _iter3 : struct.columnValues)
          -          {
          +          oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +              org.apache.thrift.protocol.TType.STRUCT, struct.columnValues.size()));
          +          for (TColumnValue _iter3 : struct.columnValues) {
                       _iter3.write(oprot);
                     }
                     oprot.writeListEnd();
          @@ -687,12 +723,13 @@ public TResultTupleScheme getScheme() {
             private static class TResultTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TResult struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TResult struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 {
                   oprot.writeI32(struct.columnValues.size());
          -        for (TColumnValue _iter4 : struct.columnValues)
          -        {
          +        for (TColumnValue _iter4 : struct.columnValues) {
                     _iter4.write(oprot);
                   }
                 }
          @@ -719,14 +756,17 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TResult struct) thr
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TResult struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TResult struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 {
          -        org.apache.thrift.protocol.TList _list5 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +        org.apache.thrift.protocol.TList _list5 =
          +            iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                   struct.columnValues = new java.util.ArrayList(_list5.size);
          -        @org.apache.thrift.annotation.Nullable TColumnValue _elem6;
          -        for (int _i7 = 0; _i7 < _list5.size; ++_i7)
          -        {
          +        @org.apache.thrift.annotation.Nullable
          +        TColumnValue _elem6;
          +        for (int _i7 = 0; _i7 < _list5.size; ++_i7) {
                     _elem6 = new TColumnValue();
                     _elem6.read(iprot);
                     struct.columnValues.add(_elem6);
          @@ -749,8 +789,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TResult struct) thro
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java
          index 6accf9d569ae..8dd5c536fc67 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java
          @@ -1,34 +1,57 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
            * A TRowMutations object is used to apply a number of Mutations to a single row.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TRowMutations implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRowMutations");
          -
          -  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField MUTATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("mutations", org.apache.thrift.protocol.TType.LIST, (short)2);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TRowMutationsStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TRowMutationsTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TRowMutations implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TRowMutations");
          +
          +  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField MUTATIONS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("mutations", org.apache.thrift.protocol.TType.LIST,
          +          (short) 2);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TRowMutationsStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TRowMutationsTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer row; // required
             public @org.apache.thrift.annotation.Nullable java.util.List mutations; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    ROW((short)1, "row"),
          -    MUTATIONS((short)2, "mutations");
          +    ROW((short) 1, "row"), MUTATIONS((short) 2, "mutations");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -41,7 +64,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // ROW
                     return ROW;
                   case 2: // MUTATIONS
          @@ -52,12 +75,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -89,23 +112,27 @@ public java.lang.String getFieldName() {
             // isset id assignments
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("mutations", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TMutation.class))));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("row",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("mutations",
          +        org.apache.thrift.TFieldRequirementType.REQUIRED,
          +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TMutation.class))));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRowMutations.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRowMutations.class,
          +      metaDataMap);
             }
           
             public TRowMutations() {
             }
           
          -  public TRowMutations(
          -    java.nio.ByteBuffer row,
          -    java.util.List mutations)
          -  {
          +  public TRowMutations(java.nio.ByteBuffer row, java.util.List mutations) {
               this();
               this.row = org.apache.thrift.TBaseHelper.copyBinary(row);
               this.mutations = mutations;
          @@ -119,7 +146,8 @@ public TRowMutations(TRowMutations other) {
                 this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
               }
               if (other.isSetMutations()) {
          -      java.util.List __this__mutations = new java.util.ArrayList(other.mutations.size());
          +      java.util.List __this__mutations =
          +          new java.util.ArrayList(other.mutations.size());
                 for (TMutation other_element : other.mutations) {
                   __this__mutations.add(new TMutation(other_element));
                 }
          @@ -147,7 +175,7 @@ public java.nio.ByteBuffer bufferForRow() {
             }
           
             public TRowMutations setRow(byte[] row) {
          -    this.row = row == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(row.clone());
          +    this.row = row == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(row.clone());
               return this;
             }
           
          @@ -192,7 +220,8 @@ public java.util.List getMutations() {
               return this.mutations;
             }
           
          -  public TRowMutations setMutations(@org.apache.thrift.annotation.Nullable java.util.List mutations) {
          +  public TRowMutations
          +      setMutations(@org.apache.thrift.annotation.Nullable java.util.List mutations) {
               this.mutations = mutations;
               return this;
             }
          @@ -212,27 +241,28 @@ public void setMutationsIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case ROW:
          -      if (value == null) {
          -        unsetRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setRow((byte[])value);
          +      case ROW:
          +        if (value == null) {
          +          unsetRow();
                   } else {
          -          setRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setRow((byte[]) value);
          +          } else {
          +            setRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case MUTATIONS:
          -      if (value == null) {
          -        unsetMutations();
          -      } else {
          -        setMutations((java.util.List)value);
          -      }
          -      break;
          +      case MUTATIONS:
          +        if (value == null) {
          +          unsetMutations();
          +        } else {
          +          setMutations((java.util.List) value);
          +        }
          +        break;
           
               }
             }
          @@ -240,60 +270,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case ROW:
          -      return getRow();
          +      case ROW:
          +        return getRow();
           
          -    case MUTATIONS:
          -      return getMutations();
          +      case MUTATIONS:
          +        return getMutations();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case ROW:
          -      return isSetRow();
          -    case MUTATIONS:
          -      return isSetMutations();
          +      case ROW:
          +        return isSetRow();
          +      case MUTATIONS:
          +        return isSetMutations();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TRowMutations)
          -      return this.equals((TRowMutations)that);
          +    if (that instanceof TRowMutations) return this.equals((TRowMutations) that);
               return false;
             }
           
             public boolean equals(TRowMutations that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_row = true && this.isSetRow();
               boolean that_present_row = true && that.isSetRow();
               if (this_present_row || that_present_row) {
          -      if (!(this_present_row && that_present_row))
          -        return false;
          -      if (!this.row.equals(that.row))
          -        return false;
          +      if (!(this_present_row && that_present_row)) return false;
          +      if (!this.row.equals(that.row)) return false;
               }
           
               boolean this_present_mutations = true && this.isSetMutations();
               boolean that_present_mutations = true && that.isSetMutations();
               if (this_present_mutations || that_present_mutations) {
          -      if (!(this_present_mutations && that_present_mutations))
          -        return false;
          -      if (!this.mutations.equals(that.mutations))
          -        return false;
          +      if (!(this_present_mutations && that_present_mutations)) return false;
          +      if (!this.mutations.equals(that.mutations)) return false;
               }
           
               return true;
          @@ -304,12 +330,10 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetRow()) ? 131071 : 524287);
          -    if (isSetRow())
          -      hashCode = hashCode * 8191 + row.hashCode();
          +    if (isSetRow()) hashCode = hashCode * 8191 + row.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetMutations()) ? 131071 : 524287);
          -    if (isSetMutations())
          -      hashCode = hashCode * 8191 + mutations.hashCode();
          +    if (isSetMutations()) hashCode = hashCode * 8191 + mutations.hashCode();
           
               return hashCode;
             }
          @@ -354,7 +378,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -385,45 +410,52 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (row == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'row' was not present! Struct: " + toString());
               }
               if (mutations == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'mutations' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'mutations' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TRowMutationsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TRowMutationsStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TRowMutationsStandardScheme getScheme() {
                 return new TRowMutationsStandardScheme();
               }
             }
           
          -  private static class TRowMutationsStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TRowMutationsStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TRowMutations struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TRowMutations struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -431,7 +463,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowMutations struc
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.row = iprot.readBinary();
                         struct.setRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -440,9 +472,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowMutations struc
                         {
                           org.apache.thrift.protocol.TList _list134 = iprot.readListBegin();
                           struct.mutations = new java.util.ArrayList(_list134.size);
          -                @org.apache.thrift.annotation.Nullable TMutation _elem135;
          -                for (int _i136 = 0; _i136 < _list134.size; ++_i136)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                TMutation _elem135;
          +                for (int _i136 = 0; _i136 < _list134.size; ++_i136) {
                             _elem135 = new TMutation();
                             _elem135.read(iprot);
                             struct.mutations.add(_elem135);
          @@ -450,7 +482,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowMutations struc
                           iprot.readListEnd();
                         }
                         struct.setMutationsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -465,7 +497,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowMutations struc
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TRowMutations struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TRowMutations struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -477,9 +510,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TRowMutations stru
                 if (struct.mutations != null) {
                   oprot.writeFieldBegin(MUTATIONS_FIELD_DESC);
                   {
          -          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mutations.size()));
          -          for (TMutation _iter137 : struct.mutations)
          -          {
          +          oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +              org.apache.thrift.protocol.TType.STRUCT, struct.mutations.size()));
          +          for (TMutation _iter137 : struct.mutations) {
                       _iter137.write(oprot);
                     }
                     oprot.writeListEnd();
          @@ -492,38 +525,44 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TRowMutations stru
           
             }
           
          -  private static class TRowMutationsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TRowMutationsTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TRowMutationsTupleScheme getScheme() {
                 return new TRowMutationsTupleScheme();
               }
             }
           
          -  private static class TRowMutationsTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TRowMutationsTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TRowMutations struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TRowMutations struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeBinary(struct.row);
                 {
                   oprot.writeI32(struct.mutations.size());
          -        for (TMutation _iter138 : struct.mutations)
          -        {
          +        for (TMutation _iter138 : struct.mutations) {
                     _iter138.write(oprot);
                   }
                 }
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TRowMutations struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TRowMutations struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.row = iprot.readBinary();
                 struct.setRowIsSet(true);
                 {
          -        org.apache.thrift.protocol.TList _list139 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +        org.apache.thrift.protocol.TList _list139 =
          +            iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                   struct.mutations = new java.util.ArrayList(_list139.size);
          -        @org.apache.thrift.annotation.Nullable TMutation _elem140;
          -        for (int _i141 = 0; _i141 < _list139.size; ++_i141)
          -        {
          +        @org.apache.thrift.annotation.Nullable
          +        TMutation _elem140;
          +        for (int _i141 = 0; _i141 < _list139.size; ++_i141) {
                     _elem140 = new TMutation();
                     _elem140.read(iprot);
                     struct.mutations.add(_elem140);
          @@ -533,8 +572,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TRowMutations struct
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java
          index 6cfae7ed49e7..130261143252 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java
          @@ -1,41 +1,93 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Any timestamps in the columns are ignored but the colFamTimeRangeMap included, use timeRange to select by timestamp.
          - * Max versions defaults to 1.
          + * Any timestamps in the columns are ignored but the colFamTimeRangeMap included, use timeRange to
          + * select by timestamp. Max versions defaults to 1.
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TScan implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TScan");
          -
          -  private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField STOP_ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("stopRow", org.apache.thrift.protocol.TType.STRING, (short)2);
          -  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3);
          -  private static final org.apache.thrift.protocol.TField CACHING_FIELD_DESC = new org.apache.thrift.protocol.TField("caching", org.apache.thrift.protocol.TType.I32, (short)4);
          -  private static final org.apache.thrift.protocol.TField MAX_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxVersions", org.apache.thrift.protocol.TType.I32, (short)5);
          -  private static final org.apache.thrift.protocol.TField TIME_RANGE_FIELD_DESC = new org.apache.thrift.protocol.TField("timeRange", org.apache.thrift.protocol.TType.STRUCT, (short)6);
          -  private static final org.apache.thrift.protocol.TField FILTER_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("filterString", org.apache.thrift.protocol.TType.STRING, (short)7);
          -  private static final org.apache.thrift.protocol.TField BATCH_SIZE_FIELD_DESC = new org.apache.thrift.protocol.TField("batchSize", org.apache.thrift.protocol.TType.I32, (short)8);
          -  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)9);
          -  private static final org.apache.thrift.protocol.TField AUTHORIZATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("authorizations", org.apache.thrift.protocol.TType.STRUCT, (short)10);
          -  private static final org.apache.thrift.protocol.TField REVERSED_FIELD_DESC = new org.apache.thrift.protocol.TField("reversed", org.apache.thrift.protocol.TType.BOOL, (short)11);
          -  private static final org.apache.thrift.protocol.TField CACHE_BLOCKS_FIELD_DESC = new org.apache.thrift.protocol.TField("cacheBlocks", org.apache.thrift.protocol.TType.BOOL, (short)12);
          -  private static final org.apache.thrift.protocol.TField COL_FAM_TIME_RANGE_MAP_FIELD_DESC = new org.apache.thrift.protocol.TField("colFamTimeRangeMap", org.apache.thrift.protocol.TType.MAP, (short)13);
          -  private static final org.apache.thrift.protocol.TField READ_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("readType", org.apache.thrift.protocol.TType.I32, (short)14);
          -  private static final org.apache.thrift.protocol.TField LIMIT_FIELD_DESC = new org.apache.thrift.protocol.TField("limit", org.apache.thrift.protocol.TType.I32, (short)15);
          -  private static final org.apache.thrift.protocol.TField CONSISTENCY_FIELD_DESC = new org.apache.thrift.protocol.TField("consistency", org.apache.thrift.protocol.TType.I32, (short)16);
          -  private static final org.apache.thrift.protocol.TField TARGET_REPLICA_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("targetReplicaId", org.apache.thrift.protocol.TType.I32, (short)17);
          -  private static final org.apache.thrift.protocol.TField FILTER_BYTES_FIELD_DESC = new org.apache.thrift.protocol.TField("filterBytes", org.apache.thrift.protocol.TType.STRING, (short)18);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TScanStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TScanTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TScan implements org.apache.thrift.TBase, java.io.Serializable,
          +    Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TScan");
          +
          +  private static final org.apache.thrift.protocol.TField START_ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("startRow", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField STOP_ROW_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("stopRow", org.apache.thrift.protocol.TType.STRING,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField CACHING_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("caching", org.apache.thrift.protocol.TType.I32,
          +          (short) 4);
          +  private static final org.apache.thrift.protocol.TField MAX_VERSIONS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("maxVersions", org.apache.thrift.protocol.TType.I32,
          +          (short) 5);
          +  private static final org.apache.thrift.protocol.TField TIME_RANGE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("timeRange", org.apache.thrift.protocol.TType.STRUCT,
          +          (short) 6);
          +  private static final org.apache.thrift.protocol.TField FILTER_STRING_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("filterString", org.apache.thrift.protocol.TType.STRING,
          +          (short) 7);
          +  private static final org.apache.thrift.protocol.TField BATCH_SIZE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("batchSize", org.apache.thrift.protocol.TType.I32,
          +          (short) 8);
          +  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +          (short) 9);
          +  private static final org.apache.thrift.protocol.TField AUTHORIZATIONS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("authorizations",
          +          org.apache.thrift.protocol.TType.STRUCT, (short) 10);
          +  private static final org.apache.thrift.protocol.TField REVERSED_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("reversed", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 11);
          +  private static final org.apache.thrift.protocol.TField CACHE_BLOCKS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("cacheBlocks", org.apache.thrift.protocol.TType.BOOL,
          +          (short) 12);
          +  private static final org.apache.thrift.protocol.TField COL_FAM_TIME_RANGE_MAP_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("colFamTimeRangeMap",
          +          org.apache.thrift.protocol.TType.MAP, (short) 13);
          +  private static final org.apache.thrift.protocol.TField READ_TYPE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("readType", org.apache.thrift.protocol.TType.I32,
          +          (short) 14);
          +  private static final org.apache.thrift.protocol.TField LIMIT_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("limit", org.apache.thrift.protocol.TType.I32,
          +          (short) 15);
          +  private static final org.apache.thrift.protocol.TField CONSISTENCY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("consistency", org.apache.thrift.protocol.TType.I32,
          +          (short) 16);
          +  private static final org.apache.thrift.protocol.TField TARGET_REPLICA_ID_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("targetReplicaId", org.apache.thrift.protocol.TType.I32,
          +          (short) 17);
          +  private static final org.apache.thrift.protocol.TField FILTER_BYTES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("filterBytes", org.apache.thrift.protocol.TType.STRING,
          +          (short) 18);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TScanStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TScanTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer startRow; // optional
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer stopRow; // optional
          @@ -45,55 +97,47 @@ public class TScan implements org.apache.thrift.TBase, jav
             public @org.apache.thrift.annotation.Nullable TTimeRange timeRange; // optional
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterString; // optional
             public int batchSize; // optional
          -  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
             public @org.apache.thrift.annotation.Nullable TAuthorization authorizations; // optional
             public boolean reversed; // optional
             public boolean cacheBlocks; // optional
          -  public @org.apache.thrift.annotation.Nullable java.util.Map colFamTimeRangeMap; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map colFamTimeRangeMap; // optional
             /**
          -   * 
              * @see TReadType
              */
             public @org.apache.thrift.annotation.Nullable TReadType readType; // optional
             public int limit; // optional
             /**
          -   * 
              * @see TConsistency
              */
             public @org.apache.thrift.annotation.Nullable TConsistency consistency; // optional
             public int targetReplicaId; // optional
             public @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterBytes; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    START_ROW((short)1, "startRow"),
          -    STOP_ROW((short)2, "stopRow"),
          -    COLUMNS((short)3, "columns"),
          -    CACHING((short)4, "caching"),
          -    MAX_VERSIONS((short)5, "maxVersions"),
          -    TIME_RANGE((short)6, "timeRange"),
          -    FILTER_STRING((short)7, "filterString"),
          -    BATCH_SIZE((short)8, "batchSize"),
          -    ATTRIBUTES((short)9, "attributes"),
          -    AUTHORIZATIONS((short)10, "authorizations"),
          -    REVERSED((short)11, "reversed"),
          -    CACHE_BLOCKS((short)12, "cacheBlocks"),
          -    COL_FAM_TIME_RANGE_MAP((short)13, "colFamTimeRangeMap"),
          +    START_ROW((short) 1, "startRow"), STOP_ROW((short) 2, "stopRow"), COLUMNS((short) 3, "columns"),
          +    CACHING((short) 4, "caching"), MAX_VERSIONS((short) 5, "maxVersions"),
          +    TIME_RANGE((short) 6, "timeRange"), FILTER_STRING((short) 7, "filterString"),
          +    BATCH_SIZE((short) 8, "batchSize"), ATTRIBUTES((short) 9, "attributes"),
          +    AUTHORIZATIONS((short) 10, "authorizations"), REVERSED((short) 11, "reversed"),
          +    CACHE_BLOCKS((short) 12, "cacheBlocks"),
          +    COL_FAM_TIME_RANGE_MAP((short) 13, "colFamTimeRangeMap"),
               /**
          -     * 
                * @see TReadType
                */
          -    READ_TYPE((short)14, "readType"),
          -    LIMIT((short)15, "limit"),
          +    READ_TYPE((short) 14, "readType"), LIMIT((short) 15, "limit"),
               /**
          -     * 
                * @see TConsistency
                */
          -    CONSISTENCY((short)16, "consistency"),
          -    TARGET_REPLICA_ID((short)17, "targetReplicaId"),
          -    FILTER_BYTES((short)18, "filterBytes");
          +    CONSISTENCY((short) 16, "consistency"), TARGET_REPLICA_ID((short) 17, "targetReplicaId"),
          +    FILTER_BYTES((short) 18, "filterBytes");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -106,7 +150,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // START_ROW
                     return START_ROW;
                   case 2: // STOP_ROW
          @@ -149,12 +193,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -192,51 +236,96 @@ public java.lang.String getFieldName() {
             private static final int __LIMIT_ISSET_ID = 5;
             private static final int __TARGETREPLICAID_ISSET_ID = 6;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.COLUMNS,_Fields.CACHING,_Fields.MAX_VERSIONS,_Fields.TIME_RANGE,_Fields.FILTER_STRING,_Fields.BATCH_SIZE,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.REVERSED,_Fields.CACHE_BLOCKS,_Fields.COL_FAM_TIME_RANGE_MAP,_Fields.READ_TYPE,_Fields.LIMIT,_Fields.CONSISTENCY,_Fields.TARGET_REPLICA_ID,_Fields.FILTER_BYTES};
          +  private static final _Fields optionals[] = { _Fields.START_ROW, _Fields.STOP_ROW, _Fields.COLUMNS,
          +      _Fields.CACHING, _Fields.MAX_VERSIONS, _Fields.TIME_RANGE, _Fields.FILTER_STRING,
          +      _Fields.BATCH_SIZE, _Fields.ATTRIBUTES, _Fields.AUTHORIZATIONS, _Fields.REVERSED,
          +      _Fields.CACHE_BLOCKS, _Fields.COL_FAM_TIME_RANGE_MAP, _Fields.READ_TYPE, _Fields.LIMIT,
          +      _Fields.CONSISTENCY, _Fields.TARGET_REPLICA_ID, _Fields.FILTER_BYTES };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.START_ROW, new org.apache.thrift.meta_data.FieldMetaData("startRow", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.STOP_ROW, new org.apache.thrift.meta_data.FieldMetaData("stopRow", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumn.class))));
          -    tmpMap.put(_Fields.CACHING, new org.apache.thrift.meta_data.FieldMetaData("caching", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.START_ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("startRow",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.STOP_ROW,
          +      new org.apache.thrift.meta_data.FieldMetaData("stopRow",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TColumn.class))));
          +    tmpMap.put(_Fields.CACHING, new org.apache.thrift.meta_data.FieldMetaData("caching",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.MAX_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("maxVersions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.MAX_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("maxVersions",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.TIME_RANGE, new org.apache.thrift.meta_data.FieldMetaData("timeRange", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTimeRange.class)));
          -    tmpMap.put(_Fields.FILTER_STRING, new org.apache.thrift.meta_data.FieldMetaData("filterString", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.BATCH_SIZE, new org.apache.thrift.meta_data.FieldMetaData("batchSize", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.TIME_RANGE,
          +      new org.apache.thrift.meta_data.FieldMetaData("timeRange",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TTimeRange.class)));
          +    tmpMap.put(_Fields.FILTER_STRING,
          +      new org.apache.thrift.meta_data.FieldMetaData("filterString",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.BATCH_SIZE, new org.apache.thrift.meta_data.FieldMetaData("batchSize",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true), 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true))));
          -    tmpMap.put(_Fields.AUTHORIZATIONS, new org.apache.thrift.meta_data.FieldMetaData("authorizations", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TAuthorization.class)));
          -    tmpMap.put(_Fields.REVERSED, new org.apache.thrift.meta_data.FieldMetaData("reversed", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.ATTRIBUTES,
          +      new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true),
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true))));
          +    tmpMap.put(_Fields.AUTHORIZATIONS,
          +      new org.apache.thrift.meta_data.FieldMetaData("authorizations",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TAuthorization.class)));
          +    tmpMap.put(_Fields.REVERSED, new org.apache.thrift.meta_data.FieldMetaData("reversed",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.CACHE_BLOCKS, new org.apache.thrift.meta_data.FieldMetaData("cacheBlocks", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.CACHE_BLOCKS, new org.apache.thrift.meta_data.FieldMetaData("cacheBlocks",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
          -    tmpMap.put(_Fields.COL_FAM_TIME_RANGE_MAP, new org.apache.thrift.meta_data.FieldMetaData("colFamTimeRangeMap", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true), 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTimeRange.class))));
          -    tmpMap.put(_Fields.READ_TYPE, new org.apache.thrift.meta_data.FieldMetaData("readType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TReadType.class)));
          -    tmpMap.put(_Fields.LIMIT, new org.apache.thrift.meta_data.FieldMetaData("limit", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.COL_FAM_TIME_RANGE_MAP, new org.apache.thrift.meta_data.FieldMetaData(
          +        "colFamTimeRangeMap", org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +            new org.apache.thrift.meta_data.FieldValueMetaData(
          +                org.apache.thrift.protocol.TType.STRING, true),
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TTimeRange.class))));
          +    tmpMap.put(_Fields.READ_TYPE,
          +      new org.apache.thrift.meta_data.FieldMetaData("readType",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TReadType.class)));
          +    tmpMap.put(_Fields.LIMIT, new org.apache.thrift.meta_data.FieldMetaData("limit",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.CONSISTENCY, new org.apache.thrift.meta_data.FieldMetaData("consistency", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TConsistency.class)));
          -    tmpMap.put(_Fields.TARGET_REPLICA_ID, new org.apache.thrift.meta_data.FieldMetaData("targetReplicaId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.CONSISTENCY,
          +      new org.apache.thrift.meta_data.FieldMetaData("consistency",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TConsistency.class)));
          +    tmpMap.put(_Fields.TARGET_REPLICA_ID, new org.apache.thrift.meta_data.FieldMetaData(
          +        "targetReplicaId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.FILTER_BYTES, new org.apache.thrift.meta_data.FieldMetaData("filterBytes", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          +    tmpMap.put(_Fields.FILTER_BYTES,
          +      new org.apache.thrift.meta_data.FieldMetaData("filterBytes",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TScan.class, metaDataMap);
             }
          @@ -258,7 +347,8 @@ public TScan(TScan other) {
                 this.stopRow = org.apache.thrift.TBaseHelper.copyBinary(other.stopRow);
               }
               if (other.isSetColumns()) {
          -      java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +      java.util.List __this__columns =
          +          new java.util.ArrayList(other.columns.size());
                 for (TColumn other_element : other.columns) {
                   __this__columns.add(new TColumn(other_element));
                 }
          @@ -274,7 +364,8 @@ public TScan(TScan other) {
               }
               this.batchSize = other.batchSize;
               if (other.isSetAttributes()) {
          -      java.util.Map __this__attributes = new java.util.HashMap(other.attributes);
          +      java.util.Map __this__attributes =
          +          new java.util.HashMap(other.attributes);
                 this.attributes = __this__attributes;
               }
               if (other.isSetAuthorizations()) {
          @@ -283,17 +374,21 @@ public TScan(TScan other) {
               this.reversed = other.reversed;
               this.cacheBlocks = other.cacheBlocks;
               if (other.isSetColFamTimeRangeMap()) {
          -      java.util.Map __this__colFamTimeRangeMap = new java.util.HashMap(other.colFamTimeRangeMap.size());
          -      for (java.util.Map.Entry other_element : other.colFamTimeRangeMap.entrySet()) {
          +      java.util.Map __this__colFamTimeRangeMap =
          +          new java.util.HashMap(other.colFamTimeRangeMap.size());
          +      for (java.util.Map.Entry other_element : other.colFamTimeRangeMap
          +          .entrySet()) {
           
                   java.nio.ByteBuffer other_element_key = other_element.getKey();
                   TTimeRange other_element_value = other_element.getValue();
           
          -        java.nio.ByteBuffer __this__colFamTimeRangeMap_copy_key = org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
          +        java.nio.ByteBuffer __this__colFamTimeRangeMap_copy_key =
          +            org.apache.thrift.TBaseHelper.copyBinary(other_element_key);
           
                   TTimeRange __this__colFamTimeRangeMap_copy_value = new TTimeRange(other_element_value);
           
          -        __this__colFamTimeRangeMap.put(__this__colFamTimeRangeMap_copy_key, __this__colFamTimeRangeMap_copy_value);
          +        __this__colFamTimeRangeMap.put(__this__colFamTimeRangeMap_copy_key,
          +          __this__colFamTimeRangeMap_copy_value);
                 }
                 this.colFamTimeRangeMap = __this__colFamTimeRangeMap;
               }
          @@ -353,7 +448,8 @@ public java.nio.ByteBuffer bufferForStartRow() {
             }
           
             public TScan setStartRow(byte[] startRow) {
          -    this.startRow = startRow == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(startRow.clone());
          +    this.startRow =
          +        startRow == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(startRow.clone());
               return this;
             }
           
          @@ -387,7 +483,8 @@ public java.nio.ByteBuffer bufferForStopRow() {
             }
           
             public TScan setStopRow(byte[] stopRow) {
          -    this.stopRow = stopRow == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(stopRow.clone());
          +    this.stopRow =
          +        stopRow == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(stopRow.clone());
               return this;
             }
           
          @@ -463,7 +560,8 @@ public TScan setCaching(int caching) {
             }
           
             public void unsetCaching() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHING_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHING_ISSET_ID);
             }
           
             /** Returns true if field caching is set (has been assigned a value) and false otherwise */
          @@ -472,7 +570,8 @@ public boolean isSetCaching() {
             }
           
             public void setCachingIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHING_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHING_ISSET_ID, value);
             }
           
             public int getMaxVersions() {
          @@ -486,7 +585,8 @@ public TScan setMaxVersions(int maxVersions) {
             }
           
             public void unsetMaxVersions() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID);
             }
           
             /** Returns true if field maxVersions is set (has been assigned a value) and false otherwise */
          @@ -495,7 +595,8 @@ public boolean isSetMaxVersions() {
             }
           
             public void setMaxVersionsIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MAXVERSIONS_ISSET_ID, value);
             }
           
             @org.apache.thrift.annotation.Nullable
          @@ -533,11 +634,13 @@ public java.nio.ByteBuffer bufferForFilterString() {
             }
           
             public TScan setFilterString(byte[] filterString) {
          -    this.filterString = filterString == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(filterString.clone());
          +    this.filterString = filterString == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(filterString.clone());
               return this;
             }
           
          -  public TScan setFilterString(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterString) {
          +  public TScan
          +      setFilterString(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterString) {
               this.filterString = org.apache.thrift.TBaseHelper.copyBinary(filterString);
               return this;
             }
          @@ -568,7 +671,8 @@ public TScan setBatchSize(int batchSize) {
             }
           
             public void unsetBatchSize() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BATCHSIZE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __BATCHSIZE_ISSET_ID);
             }
           
             /** Returns true if field batchSize is set (has been assigned a value) and false otherwise */
          @@ -577,7 +681,8 @@ public boolean isSetBatchSize() {
             }
           
             public void setBatchSizeIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __BATCHSIZE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __BATCHSIZE_ISSET_ID, value);
             }
           
             public int getAttributesSize() {
          @@ -586,17 +691,18 @@ public int getAttributesSize() {
           
             public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
               if (this.attributes == null) {
          -      this.attributes = new java.util.HashMap();
          +      this.attributes = new java.util.HashMap();
               }
               this.attributes.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getAttributes() {
          +  public java.util.Map getAttributes() {
               return this.attributes;
             }
           
          -  public TScan setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +  public TScan setAttributes(
          +      @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
               this.attributes = attributes;
               return this;
             }
          @@ -621,7 +727,8 @@ public TAuthorization getAuthorizations() {
               return this.authorizations;
             }
           
          -  public TScan setAuthorizations(@org.apache.thrift.annotation.Nullable TAuthorization authorizations) {
          +  public TScan
          +      setAuthorizations(@org.apache.thrift.annotation.Nullable TAuthorization authorizations) {
               this.authorizations = authorizations;
               return this;
             }
          @@ -652,7 +759,8 @@ public TScan setReversed(boolean reversed) {
             }
           
             public void unsetReversed() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __REVERSED_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __REVERSED_ISSET_ID);
             }
           
             /** Returns true if field reversed is set (has been assigned a value) and false otherwise */
          @@ -661,7 +769,8 @@ public boolean isSetReversed() {
             }
           
             public void setReversedIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __REVERSED_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __REVERSED_ISSET_ID, value);
             }
           
             public boolean isCacheBlocks() {
          @@ -675,7 +784,8 @@ public TScan setCacheBlocks(boolean cacheBlocks) {
             }
           
             public void unsetCacheBlocks() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID);
             }
           
             /** Returns true if field cacheBlocks is set (has been assigned a value) and false otherwise */
          @@ -684,7 +794,8 @@ public boolean isSetCacheBlocks() {
             }
           
             public void setCacheBlocksIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID, value);
             }
           
             public int getColFamTimeRangeMapSize() {
          @@ -693,17 +804,18 @@ public int getColFamTimeRangeMapSize() {
           
             public void putToColFamTimeRangeMap(java.nio.ByteBuffer key, TTimeRange val) {
               if (this.colFamTimeRangeMap == null) {
          -      this.colFamTimeRangeMap = new java.util.HashMap();
          +      this.colFamTimeRangeMap = new java.util.HashMap();
               }
               this.colFamTimeRangeMap.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getColFamTimeRangeMap() {
          +  public java.util.Map getColFamTimeRangeMap() {
               return this.colFamTimeRangeMap;
             }
           
          -  public TScan setColFamTimeRangeMap(@org.apache.thrift.annotation.Nullable java.util.Map colFamTimeRangeMap) {
          +  public TScan setColFamTimeRangeMap(
          +      @org.apache.thrift.annotation.Nullable java.util.Map colFamTimeRangeMap) {
               this.colFamTimeRangeMap = colFamTimeRangeMap;
               return this;
             }
          @@ -712,7 +824,9 @@ public void unsetColFamTimeRangeMap() {
               this.colFamTimeRangeMap = null;
             }
           
          -  /** Returns true if field colFamTimeRangeMap is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field colFamTimeRangeMap is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetColFamTimeRangeMap() {
               return this.colFamTimeRangeMap != null;
             }
          @@ -724,7 +838,6 @@ public void setColFamTimeRangeMapIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TReadType
              */
             @org.apache.thrift.annotation.Nullable
          @@ -733,7 +846,6 @@ public TReadType getReadType() {
             }
           
             /**
          -   * 
              * @see TReadType
              */
             public TScan setReadType(@org.apache.thrift.annotation.Nullable TReadType readType) {
          @@ -776,11 +888,11 @@ public boolean isSetLimit() {
             }
           
             public void setLimitIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __LIMIT_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __LIMIT_ISSET_ID, value);
             }
           
             /**
          -   * 
              * @see TConsistency
              */
             @org.apache.thrift.annotation.Nullable
          @@ -789,7 +901,6 @@ public TConsistency getConsistency() {
             }
           
             /**
          -   * 
              * @see TConsistency
              */
             public TScan setConsistency(@org.apache.thrift.annotation.Nullable TConsistency consistency) {
          @@ -823,16 +934,20 @@ public TScan setTargetReplicaId(int targetReplicaId) {
             }
           
             public void unsetTargetReplicaId() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TARGETREPLICAID_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TARGETREPLICAID_ISSET_ID);
             }
           
          -  /** Returns true if field targetReplicaId is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field targetReplicaId is set (has been assigned a value) and false otherwise
          +   */
             public boolean isSetTargetReplicaId() {
               return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __TARGETREPLICAID_ISSET_ID);
             }
           
             public void setTargetReplicaIdIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TARGETREPLICAID_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TARGETREPLICAID_ISSET_ID, value);
             }
           
             public byte[] getFilterBytes() {
          @@ -845,11 +960,13 @@ public java.nio.ByteBuffer bufferForFilterBytes() {
             }
           
             public TScan setFilterBytes(byte[] filterBytes) {
          -    this.filterBytes = filterBytes == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(filterBytes.clone());
          +    this.filterBytes = filterBytes == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(filterBytes.clone());
               return this;
             }
           
          -  public TScan setFilterBytes(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterBytes) {
          +  public TScan
          +      setFilterBytes(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer filterBytes) {
               this.filterBytes = org.apache.thrift.TBaseHelper.copyBinary(filterBytes);
               return this;
             }
          @@ -869,167 +986,168 @@ public void setFilterBytesIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case START_ROW:
          -      if (value == null) {
          -        unsetStartRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setStartRow((byte[])value);
          +      case START_ROW:
          +        if (value == null) {
          +          unsetStartRow();
                   } else {
          -          setStartRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setStartRow((byte[]) value);
          +          } else {
          +            setStartRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case STOP_ROW:
          -      if (value == null) {
          -        unsetStopRow();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setStopRow((byte[])value);
          +      case STOP_ROW:
          +        if (value == null) {
          +          unsetStopRow();
                   } else {
          -          setStopRow((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setStopRow((byte[]) value);
          +          } else {
          +            setStopRow((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case COLUMNS:
          -      if (value == null) {
          -        unsetColumns();
          -      } else {
          -        setColumns((java.util.List)value);
          -      }
          -      break;
          +      case COLUMNS:
          +        if (value == null) {
          +          unsetColumns();
          +        } else {
          +          setColumns((java.util.List) value);
          +        }
          +        break;
           
          -    case CACHING:
          -      if (value == null) {
          -        unsetCaching();
          -      } else {
          -        setCaching((java.lang.Integer)value);
          -      }
          -      break;
          +      case CACHING:
          +        if (value == null) {
          +          unsetCaching();
          +        } else {
          +          setCaching((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case MAX_VERSIONS:
          -      if (value == null) {
          -        unsetMaxVersions();
          -      } else {
          -        setMaxVersions((java.lang.Integer)value);
          -      }
          -      break;
          +      case MAX_VERSIONS:
          +        if (value == null) {
          +          unsetMaxVersions();
          +        } else {
          +          setMaxVersions((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case TIME_RANGE:
          -      if (value == null) {
          -        unsetTimeRange();
          -      } else {
          -        setTimeRange((TTimeRange)value);
          -      }
          -      break;
          +      case TIME_RANGE:
          +        if (value == null) {
          +          unsetTimeRange();
          +        } else {
          +          setTimeRange((TTimeRange) value);
          +        }
          +        break;
           
          -    case FILTER_STRING:
          -      if (value == null) {
          -        unsetFilterString();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setFilterString((byte[])value);
          +      case FILTER_STRING:
          +        if (value == null) {
          +          unsetFilterString();
                   } else {
          -          setFilterString((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setFilterString((byte[]) value);
          +          } else {
          +            setFilterString((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case BATCH_SIZE:
          -      if (value == null) {
          -        unsetBatchSize();
          -      } else {
          -        setBatchSize((java.lang.Integer)value);
          -      }
          -      break;
          +      case BATCH_SIZE:
          +        if (value == null) {
          +          unsetBatchSize();
          +        } else {
          +          setBatchSize((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case ATTRIBUTES:
          -      if (value == null) {
          -        unsetAttributes();
          -      } else {
          -        setAttributes((java.util.Map)value);
          -      }
          -      break;
          +      case ATTRIBUTES:
          +        if (value == null) {
          +          unsetAttributes();
          +        } else {
          +          setAttributes((java.util.Map) value);
          +        }
          +        break;
           
          -    case AUTHORIZATIONS:
          -      if (value == null) {
          -        unsetAuthorizations();
          -      } else {
          -        setAuthorizations((TAuthorization)value);
          -      }
          -      break;
          +      case AUTHORIZATIONS:
          +        if (value == null) {
          +          unsetAuthorizations();
          +        } else {
          +          setAuthorizations((TAuthorization) value);
          +        }
          +        break;
           
          -    case REVERSED:
          -      if (value == null) {
          -        unsetReversed();
          -      } else {
          -        setReversed((java.lang.Boolean)value);
          -      }
          -      break;
          +      case REVERSED:
          +        if (value == null) {
          +          unsetReversed();
          +        } else {
          +          setReversed((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case CACHE_BLOCKS:
          -      if (value == null) {
          -        unsetCacheBlocks();
          -      } else {
          -        setCacheBlocks((java.lang.Boolean)value);
          -      }
          -      break;
          +      case CACHE_BLOCKS:
          +        if (value == null) {
          +          unsetCacheBlocks();
          +        } else {
          +          setCacheBlocks((java.lang.Boolean) value);
          +        }
          +        break;
           
          -    case COL_FAM_TIME_RANGE_MAP:
          -      if (value == null) {
          -        unsetColFamTimeRangeMap();
          -      } else {
          -        setColFamTimeRangeMap((java.util.Map)value);
          -      }
          -      break;
          +      case COL_FAM_TIME_RANGE_MAP:
          +        if (value == null) {
          +          unsetColFamTimeRangeMap();
          +        } else {
          +          setColFamTimeRangeMap((java.util.Map) value);
          +        }
          +        break;
           
          -    case READ_TYPE:
          -      if (value == null) {
          -        unsetReadType();
          -      } else {
          -        setReadType((TReadType)value);
          -      }
          -      break;
          +      case READ_TYPE:
          +        if (value == null) {
          +          unsetReadType();
          +        } else {
          +          setReadType((TReadType) value);
          +        }
          +        break;
           
          -    case LIMIT:
          -      if (value == null) {
          -        unsetLimit();
          -      } else {
          -        setLimit((java.lang.Integer)value);
          -      }
          -      break;
          +      case LIMIT:
          +        if (value == null) {
          +          unsetLimit();
          +        } else {
          +          setLimit((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case CONSISTENCY:
          -      if (value == null) {
          -        unsetConsistency();
          -      } else {
          -        setConsistency((TConsistency)value);
          -      }
          -      break;
          +      case CONSISTENCY:
          +        if (value == null) {
          +          unsetConsistency();
          +        } else {
          +          setConsistency((TConsistency) value);
          +        }
          +        break;
           
          -    case TARGET_REPLICA_ID:
          -      if (value == null) {
          -        unsetTargetReplicaId();
          -      } else {
          -        setTargetReplicaId((java.lang.Integer)value);
          -      }
          -      break;
          +      case TARGET_REPLICA_ID:
          +        if (value == null) {
          +          unsetTargetReplicaId();
          +        } else {
          +          setTargetReplicaId((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case FILTER_BYTES:
          -      if (value == null) {
          -        unsetFilterBytes();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setFilterBytes((byte[])value);
          +      case FILTER_BYTES:
          +        if (value == null) {
          +          unsetFilterBytes();
                   } else {
          -          setFilterBytes((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setFilterBytes((byte[]) value);
          +          } else {
          +            setFilterBytes((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
               }
             }
          @@ -1037,284 +1155,248 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case START_ROW:
          -      return getStartRow();
          +      case START_ROW:
          +        return getStartRow();
           
          -    case STOP_ROW:
          -      return getStopRow();
          +      case STOP_ROW:
          +        return getStopRow();
           
          -    case COLUMNS:
          -      return getColumns();
          +      case COLUMNS:
          +        return getColumns();
           
          -    case CACHING:
          -      return getCaching();
          +      case CACHING:
          +        return getCaching();
           
          -    case MAX_VERSIONS:
          -      return getMaxVersions();
          +      case MAX_VERSIONS:
          +        return getMaxVersions();
           
          -    case TIME_RANGE:
          -      return getTimeRange();
          +      case TIME_RANGE:
          +        return getTimeRange();
           
          -    case FILTER_STRING:
          -      return getFilterString();
          +      case FILTER_STRING:
          +        return getFilterString();
           
          -    case BATCH_SIZE:
          -      return getBatchSize();
          +      case BATCH_SIZE:
          +        return getBatchSize();
           
          -    case ATTRIBUTES:
          -      return getAttributes();
          +      case ATTRIBUTES:
          +        return getAttributes();
           
          -    case AUTHORIZATIONS:
          -      return getAuthorizations();
          +      case AUTHORIZATIONS:
          +        return getAuthorizations();
           
          -    case REVERSED:
          -      return isReversed();
          +      case REVERSED:
          +        return isReversed();
           
          -    case CACHE_BLOCKS:
          -      return isCacheBlocks();
          +      case CACHE_BLOCKS:
          +        return isCacheBlocks();
           
          -    case COL_FAM_TIME_RANGE_MAP:
          -      return getColFamTimeRangeMap();
          +      case COL_FAM_TIME_RANGE_MAP:
          +        return getColFamTimeRangeMap();
           
          -    case READ_TYPE:
          -      return getReadType();
          +      case READ_TYPE:
          +        return getReadType();
           
          -    case LIMIT:
          -      return getLimit();
          +      case LIMIT:
          +        return getLimit();
           
          -    case CONSISTENCY:
          -      return getConsistency();
          +      case CONSISTENCY:
          +        return getConsistency();
           
          -    case TARGET_REPLICA_ID:
          -      return getTargetReplicaId();
          +      case TARGET_REPLICA_ID:
          +        return getTargetReplicaId();
           
          -    case FILTER_BYTES:
          -      return getFilterBytes();
          +      case FILTER_BYTES:
          +        return getFilterBytes();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case START_ROW:
          -      return isSetStartRow();
          -    case STOP_ROW:
          -      return isSetStopRow();
          -    case COLUMNS:
          -      return isSetColumns();
          -    case CACHING:
          -      return isSetCaching();
          -    case MAX_VERSIONS:
          -      return isSetMaxVersions();
          -    case TIME_RANGE:
          -      return isSetTimeRange();
          -    case FILTER_STRING:
          -      return isSetFilterString();
          -    case BATCH_SIZE:
          -      return isSetBatchSize();
          -    case ATTRIBUTES:
          -      return isSetAttributes();
          -    case AUTHORIZATIONS:
          -      return isSetAuthorizations();
          -    case REVERSED:
          -      return isSetReversed();
          -    case CACHE_BLOCKS:
          -      return isSetCacheBlocks();
          -    case COL_FAM_TIME_RANGE_MAP:
          -      return isSetColFamTimeRangeMap();
          -    case READ_TYPE:
          -      return isSetReadType();
          -    case LIMIT:
          -      return isSetLimit();
          -    case CONSISTENCY:
          -      return isSetConsistency();
          -    case TARGET_REPLICA_ID:
          -      return isSetTargetReplicaId();
          -    case FILTER_BYTES:
          -      return isSetFilterBytes();
          +      case START_ROW:
          +        return isSetStartRow();
          +      case STOP_ROW:
          +        return isSetStopRow();
          +      case COLUMNS:
          +        return isSetColumns();
          +      case CACHING:
          +        return isSetCaching();
          +      case MAX_VERSIONS:
          +        return isSetMaxVersions();
          +      case TIME_RANGE:
          +        return isSetTimeRange();
          +      case FILTER_STRING:
          +        return isSetFilterString();
          +      case BATCH_SIZE:
          +        return isSetBatchSize();
          +      case ATTRIBUTES:
          +        return isSetAttributes();
          +      case AUTHORIZATIONS:
          +        return isSetAuthorizations();
          +      case REVERSED:
          +        return isSetReversed();
          +      case CACHE_BLOCKS:
          +        return isSetCacheBlocks();
          +      case COL_FAM_TIME_RANGE_MAP:
          +        return isSetColFamTimeRangeMap();
          +      case READ_TYPE:
          +        return isSetReadType();
          +      case LIMIT:
          +        return isSetLimit();
          +      case CONSISTENCY:
          +        return isSetConsistency();
          +      case TARGET_REPLICA_ID:
          +        return isSetTargetReplicaId();
          +      case FILTER_BYTES:
          +        return isSetFilterBytes();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TScan)
          -      return this.equals((TScan)that);
          +    if (that instanceof TScan) return this.equals((TScan) that);
               return false;
             }
           
             public boolean equals(TScan that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_startRow = true && this.isSetStartRow();
               boolean that_present_startRow = true && that.isSetStartRow();
               if (this_present_startRow || that_present_startRow) {
          -      if (!(this_present_startRow && that_present_startRow))
          -        return false;
          -      if (!this.startRow.equals(that.startRow))
          -        return false;
          +      if (!(this_present_startRow && that_present_startRow)) return false;
          +      if (!this.startRow.equals(that.startRow)) return false;
               }
           
               boolean this_present_stopRow = true && this.isSetStopRow();
               boolean that_present_stopRow = true && that.isSetStopRow();
               if (this_present_stopRow || that_present_stopRow) {
          -      if (!(this_present_stopRow && that_present_stopRow))
          -        return false;
          -      if (!this.stopRow.equals(that.stopRow))
          -        return false;
          +      if (!(this_present_stopRow && that_present_stopRow)) return false;
          +      if (!this.stopRow.equals(that.stopRow)) return false;
               }
           
               boolean this_present_columns = true && this.isSetColumns();
               boolean that_present_columns = true && that.isSetColumns();
               if (this_present_columns || that_present_columns) {
          -      if (!(this_present_columns && that_present_columns))
          -        return false;
          -      if (!this.columns.equals(that.columns))
          -        return false;
          +      if (!(this_present_columns && that_present_columns)) return false;
          +      if (!this.columns.equals(that.columns)) return false;
               }
           
               boolean this_present_caching = true && this.isSetCaching();
               boolean that_present_caching = true && that.isSetCaching();
               if (this_present_caching || that_present_caching) {
          -      if (!(this_present_caching && that_present_caching))
          -        return false;
          -      if (this.caching != that.caching)
          -        return false;
          +      if (!(this_present_caching && that_present_caching)) return false;
          +      if (this.caching != that.caching) return false;
               }
           
               boolean this_present_maxVersions = true && this.isSetMaxVersions();
               boolean that_present_maxVersions = true && that.isSetMaxVersions();
               if (this_present_maxVersions || that_present_maxVersions) {
          -      if (!(this_present_maxVersions && that_present_maxVersions))
          -        return false;
          -      if (this.maxVersions != that.maxVersions)
          -        return false;
          +      if (!(this_present_maxVersions && that_present_maxVersions)) return false;
          +      if (this.maxVersions != that.maxVersions) return false;
               }
           
               boolean this_present_timeRange = true && this.isSetTimeRange();
               boolean that_present_timeRange = true && that.isSetTimeRange();
               if (this_present_timeRange || that_present_timeRange) {
          -      if (!(this_present_timeRange && that_present_timeRange))
          -        return false;
          -      if (!this.timeRange.equals(that.timeRange))
          -        return false;
          +      if (!(this_present_timeRange && that_present_timeRange)) return false;
          +      if (!this.timeRange.equals(that.timeRange)) return false;
               }
           
               boolean this_present_filterString = true && this.isSetFilterString();
               boolean that_present_filterString = true && that.isSetFilterString();
               if (this_present_filterString || that_present_filterString) {
          -      if (!(this_present_filterString && that_present_filterString))
          -        return false;
          -      if (!this.filterString.equals(that.filterString))
          -        return false;
          +      if (!(this_present_filterString && that_present_filterString)) return false;
          +      if (!this.filterString.equals(that.filterString)) return false;
               }
           
               boolean this_present_batchSize = true && this.isSetBatchSize();
               boolean that_present_batchSize = true && that.isSetBatchSize();
               if (this_present_batchSize || that_present_batchSize) {
          -      if (!(this_present_batchSize && that_present_batchSize))
          -        return false;
          -      if (this.batchSize != that.batchSize)
          -        return false;
          +      if (!(this_present_batchSize && that_present_batchSize)) return false;
          +      if (this.batchSize != that.batchSize) return false;
               }
           
               boolean this_present_attributes = true && this.isSetAttributes();
               boolean that_present_attributes = true && that.isSetAttributes();
               if (this_present_attributes || that_present_attributes) {
          -      if (!(this_present_attributes && that_present_attributes))
          -        return false;
          -      if (!this.attributes.equals(that.attributes))
          -        return false;
          +      if (!(this_present_attributes && that_present_attributes)) return false;
          +      if (!this.attributes.equals(that.attributes)) return false;
               }
           
               boolean this_present_authorizations = true && this.isSetAuthorizations();
               boolean that_present_authorizations = true && that.isSetAuthorizations();
               if (this_present_authorizations || that_present_authorizations) {
          -      if (!(this_present_authorizations && that_present_authorizations))
          -        return false;
          -      if (!this.authorizations.equals(that.authorizations))
          -        return false;
          +      if (!(this_present_authorizations && that_present_authorizations)) return false;
          +      if (!this.authorizations.equals(that.authorizations)) return false;
               }
           
               boolean this_present_reversed = true && this.isSetReversed();
               boolean that_present_reversed = true && that.isSetReversed();
               if (this_present_reversed || that_present_reversed) {
          -      if (!(this_present_reversed && that_present_reversed))
          -        return false;
          -      if (this.reversed != that.reversed)
          -        return false;
          +      if (!(this_present_reversed && that_present_reversed)) return false;
          +      if (this.reversed != that.reversed) return false;
               }
           
               boolean this_present_cacheBlocks = true && this.isSetCacheBlocks();
               boolean that_present_cacheBlocks = true && that.isSetCacheBlocks();
               if (this_present_cacheBlocks || that_present_cacheBlocks) {
          -      if (!(this_present_cacheBlocks && that_present_cacheBlocks))
          -        return false;
          -      if (this.cacheBlocks != that.cacheBlocks)
          -        return false;
          +      if (!(this_present_cacheBlocks && that_present_cacheBlocks)) return false;
          +      if (this.cacheBlocks != that.cacheBlocks) return false;
               }
           
               boolean this_present_colFamTimeRangeMap = true && this.isSetColFamTimeRangeMap();
               boolean that_present_colFamTimeRangeMap = true && that.isSetColFamTimeRangeMap();
               if (this_present_colFamTimeRangeMap || that_present_colFamTimeRangeMap) {
          -      if (!(this_present_colFamTimeRangeMap && that_present_colFamTimeRangeMap))
          -        return false;
          -      if (!this.colFamTimeRangeMap.equals(that.colFamTimeRangeMap))
          -        return false;
          +      if (!(this_present_colFamTimeRangeMap && that_present_colFamTimeRangeMap)) return false;
          +      if (!this.colFamTimeRangeMap.equals(that.colFamTimeRangeMap)) return false;
               }
           
               boolean this_present_readType = true && this.isSetReadType();
               boolean that_present_readType = true && that.isSetReadType();
               if (this_present_readType || that_present_readType) {
          -      if (!(this_present_readType && that_present_readType))
          -        return false;
          -      if (!this.readType.equals(that.readType))
          -        return false;
          +      if (!(this_present_readType && that_present_readType)) return false;
          +      if (!this.readType.equals(that.readType)) return false;
               }
           
               boolean this_present_limit = true && this.isSetLimit();
               boolean that_present_limit = true && that.isSetLimit();
               if (this_present_limit || that_present_limit) {
          -      if (!(this_present_limit && that_present_limit))
          -        return false;
          -      if (this.limit != that.limit)
          -        return false;
          +      if (!(this_present_limit && that_present_limit)) return false;
          +      if (this.limit != that.limit) return false;
               }
           
               boolean this_present_consistency = true && this.isSetConsistency();
               boolean that_present_consistency = true && that.isSetConsistency();
               if (this_present_consistency || that_present_consistency) {
          -      if (!(this_present_consistency && that_present_consistency))
          -        return false;
          -      if (!this.consistency.equals(that.consistency))
          -        return false;
          +      if (!(this_present_consistency && that_present_consistency)) return false;
          +      if (!this.consistency.equals(that.consistency)) return false;
               }
           
               boolean this_present_targetReplicaId = true && this.isSetTargetReplicaId();
               boolean that_present_targetReplicaId = true && that.isSetTargetReplicaId();
               if (this_present_targetReplicaId || that_present_targetReplicaId) {
          -      if (!(this_present_targetReplicaId && that_present_targetReplicaId))
          -        return false;
          -      if (this.targetReplicaId != that.targetReplicaId)
          -        return false;
          +      if (!(this_present_targetReplicaId && that_present_targetReplicaId)) return false;
          +      if (this.targetReplicaId != that.targetReplicaId) return false;
               }
           
               boolean this_present_filterBytes = true && this.isSetFilterBytes();
               boolean that_present_filterBytes = true && that.isSetFilterBytes();
               if (this_present_filterBytes || that_present_filterBytes) {
          -      if (!(this_present_filterBytes && that_present_filterBytes))
          -        return false;
          -      if (!this.filterBytes.equals(that.filterBytes))
          -        return false;
          +      if (!(this_present_filterBytes && that_present_filterBytes)) return false;
          +      if (!this.filterBytes.equals(that.filterBytes)) return false;
               }
           
               return true;
          @@ -1325,76 +1407,58 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetStartRow()) ? 131071 : 524287);
          -    if (isSetStartRow())
          -      hashCode = hashCode * 8191 + startRow.hashCode();
          +    if (isSetStartRow()) hashCode = hashCode * 8191 + startRow.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetStopRow()) ? 131071 : 524287);
          -    if (isSetStopRow())
          -      hashCode = hashCode * 8191 + stopRow.hashCode();
          +    if (isSetStopRow()) hashCode = hashCode * 8191 + stopRow.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -    if (isSetColumns())
          -      hashCode = hashCode * 8191 + columns.hashCode();
          +    if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetCaching()) ? 131071 : 524287);
          -    if (isSetCaching())
          -      hashCode = hashCode * 8191 + caching;
          +    if (isSetCaching()) hashCode = hashCode * 8191 + caching;
           
               hashCode = hashCode * 8191 + ((isSetMaxVersions()) ? 131071 : 524287);
          -    if (isSetMaxVersions())
          -      hashCode = hashCode * 8191 + maxVersions;
          +    if (isSetMaxVersions()) hashCode = hashCode * 8191 + maxVersions;
           
               hashCode = hashCode * 8191 + ((isSetTimeRange()) ? 131071 : 524287);
          -    if (isSetTimeRange())
          -      hashCode = hashCode * 8191 + timeRange.hashCode();
          +    if (isSetTimeRange()) hashCode = hashCode * 8191 + timeRange.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetFilterString()) ? 131071 : 524287);
          -    if (isSetFilterString())
          -      hashCode = hashCode * 8191 + filterString.hashCode();
          +    if (isSetFilterString()) hashCode = hashCode * 8191 + filterString.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetBatchSize()) ? 131071 : 524287);
          -    if (isSetBatchSize())
          -      hashCode = hashCode * 8191 + batchSize;
          +    if (isSetBatchSize()) hashCode = hashCode * 8191 + batchSize;
           
               hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -    if (isSetAttributes())
          -      hashCode = hashCode * 8191 + attributes.hashCode();
          +    if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetAuthorizations()) ? 131071 : 524287);
          -    if (isSetAuthorizations())
          -      hashCode = hashCode * 8191 + authorizations.hashCode();
          +    if (isSetAuthorizations()) hashCode = hashCode * 8191 + authorizations.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetReversed()) ? 131071 : 524287);
          -    if (isSetReversed())
          -      hashCode = hashCode * 8191 + ((reversed) ? 131071 : 524287);
          +    if (isSetReversed()) hashCode = hashCode * 8191 + ((reversed) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetCacheBlocks()) ? 131071 : 524287);
          -    if (isSetCacheBlocks())
          -      hashCode = hashCode * 8191 + ((cacheBlocks) ? 131071 : 524287);
          +    if (isSetCacheBlocks()) hashCode = hashCode * 8191 + ((cacheBlocks) ? 131071 : 524287);
           
               hashCode = hashCode * 8191 + ((isSetColFamTimeRangeMap()) ? 131071 : 524287);
          -    if (isSetColFamTimeRangeMap())
          -      hashCode = hashCode * 8191 + colFamTimeRangeMap.hashCode();
          +    if (isSetColFamTimeRangeMap()) hashCode = hashCode * 8191 + colFamTimeRangeMap.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetReadType()) ? 131071 : 524287);
          -    if (isSetReadType())
          -      hashCode = hashCode * 8191 + readType.getValue();
          +    if (isSetReadType()) hashCode = hashCode * 8191 + readType.getValue();
           
               hashCode = hashCode * 8191 + ((isSetLimit()) ? 131071 : 524287);
          -    if (isSetLimit())
          -      hashCode = hashCode * 8191 + limit;
          +    if (isSetLimit()) hashCode = hashCode * 8191 + limit;
           
               hashCode = hashCode * 8191 + ((isSetConsistency()) ? 131071 : 524287);
          -    if (isSetConsistency())
          -      hashCode = hashCode * 8191 + consistency.getValue();
          +    if (isSetConsistency()) hashCode = hashCode * 8191 + consistency.getValue();
           
               hashCode = hashCode * 8191 + ((isSetTargetReplicaId()) ? 131071 : 524287);
          -    if (isSetTargetReplicaId())
          -      hashCode = hashCode * 8191 + targetReplicaId;
          +    if (isSetTargetReplicaId()) hashCode = hashCode * 8191 + targetReplicaId;
           
               hashCode = hashCode * 8191 + ((isSetFilterBytes()) ? 131071 : 524287);
          -    if (isSetFilterBytes())
          -      hashCode = hashCode * 8191 + filterBytes.hashCode();
          +    if (isSetFilterBytes()) hashCode = hashCode * 8191 + filterBytes.hashCode();
           
               return hashCode;
             }
          @@ -1472,7 +1536,8 @@ public int compareTo(TScan other) {
                 return lastComparison;
               }
               if (isSetFilterString()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filterString, other.filterString);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.filterString, other.filterString);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1502,7 +1567,8 @@ public int compareTo(TScan other) {
                 return lastComparison;
               }
               if (isSetAuthorizations()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authorizations, other.authorizations);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.authorizations, other.authorizations);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1527,12 +1593,14 @@ public int compareTo(TScan other) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetColFamTimeRangeMap(), other.isSetColFamTimeRangeMap());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetColFamTimeRangeMap(), other.isSetColFamTimeRangeMap());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetColFamTimeRangeMap()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colFamTimeRangeMap, other.colFamTimeRangeMap);
          +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colFamTimeRangeMap,
          +        other.colFamTimeRangeMap);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1567,12 +1635,14 @@ public int compareTo(TScan other) {
                   return lastComparison;
                 }
               }
          -    lastComparison = java.lang.Boolean.compare(isSetTargetReplicaId(), other.isSetTargetReplicaId());
          +    lastComparison =
          +        java.lang.Boolean.compare(isSetTargetReplicaId(), other.isSetTargetReplicaId());
               if (lastComparison != 0) {
                 return lastComparison;
               }
               if (isSetTargetReplicaId()) {
          -      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.targetReplicaId, other.targetReplicaId);
          +      lastComparison =
          +          org.apache.thrift.TBaseHelper.compareTo(this.targetReplicaId, other.targetReplicaId);
                 if (lastComparison != 0) {
                   return lastComparison;
                 }
          @@ -1599,7 +1669,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -1776,23 +1847,28 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TScanStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TScanStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TScanStandardScheme getScheme() {
                 return new TScanStandardScheme();
               }
          @@ -1800,13 +1876,13 @@ public TScanStandardScheme getScheme() {
           
             private static class TScanStandardScheme extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -1814,7 +1890,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.startRow = iprot.readBinary();
                         struct.setStartRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1822,7 +1898,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.stopRow = iprot.readBinary();
                         struct.setStopRowIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1831,9 +1907,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                         {
                           org.apache.thrift.protocol.TList _list106 = iprot.readListBegin();
                           struct.columns = new java.util.ArrayList(_list106.size);
          -                @org.apache.thrift.annotation.Nullable TColumn _elem107;
          -                for (int _i108 = 0; _i108 < _list106.size; ++_i108)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                TColumn _elem107;
          +                for (int _i108 = 0; _i108 < _list106.size; ++_i108) {
                             _elem107 = new TColumn();
                             _elem107.read(iprot);
                             struct.columns.add(_elem107);
          @@ -1841,7 +1917,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                           iprot.readListEnd();
                         }
                         struct.setColumnsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1849,7 +1925,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.caching = iprot.readI32();
                         struct.setCachingIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1857,7 +1933,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.maxVersions = iprot.readI32();
                         struct.setMaxVersionsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1866,7 +1942,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                         struct.timeRange = new TTimeRange();
                         struct.timeRange.read(iprot);
                         struct.setTimeRangeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1874,7 +1950,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.filterString = iprot.readBinary();
                         struct.setFilterStringIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1882,7 +1958,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.batchSize = iprot.readI32();
                         struct.setBatchSizeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1890,11 +1966,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map109 = iprot.readMapBegin();
          -                struct.attributes = new java.util.HashMap(2*_map109.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key110;
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val111;
          -                for (int _i112 = 0; _i112 < _map109.size; ++_i112)
          -                {
          +                struct.attributes = new java.util.HashMap(
          +                    2 * _map109.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _key110;
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _val111;
          +                for (int _i112 = 0; _i112 < _map109.size; ++_i112) {
                             _key110 = iprot.readBinary();
                             _val111 = iprot.readBinary();
                             struct.attributes.put(_key110, _val111);
          @@ -1902,7 +1980,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                           iprot.readMapEnd();
                         }
                         struct.setAttributesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1911,7 +1989,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                         struct.authorizations = new TAuthorization();
                         struct.authorizations.read(iprot);
                         struct.setAuthorizationsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1919,7 +1997,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.reversed = iprot.readBool();
                         struct.setReversedIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1927,7 +2005,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
                         struct.cacheBlocks = iprot.readBool();
                         struct.setCacheBlocksIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1935,11 +2013,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map113 = iprot.readMapBegin();
          -                struct.colFamTimeRangeMap = new java.util.HashMap(2*_map113.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key114;
          -                @org.apache.thrift.annotation.Nullable TTimeRange _val115;
          -                for (int _i116 = 0; _i116 < _map113.size; ++_i116)
          -                {
          +                struct.colFamTimeRangeMap =
          +                    new java.util.HashMap(2 * _map113.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _key114;
          +                @org.apache.thrift.annotation.Nullable
          +                TTimeRange _val115;
          +                for (int _i116 = 0; _i116 < _map113.size; ++_i116) {
                             _key114 = iprot.readBinary();
                             _val115 = new TTimeRange();
                             _val115.read(iprot);
          @@ -1948,15 +2028,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                           iprot.readMapEnd();
                         }
                         struct.setColFamTimeRangeMapIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 14: // READ_TYPE
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.readType = org.apache.hadoop.hbase.thrift2.generated.TReadType.findByValue(iprot.readI32());
          +              struct.readType =
          +                  org.apache.hadoop.hbase.thrift2.generated.TReadType.findByValue(iprot.readI32());
                         struct.setReadTypeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1964,15 +2045,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.limit = iprot.readI32();
                         struct.setLimitIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 16: // CONSISTENCY
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.consistency = org.apache.hadoop.hbase.thrift2.generated.TConsistency.findByValue(iprot.readI32());
          +              struct.consistency = org.apache.hadoop.hbase.thrift2.generated.TConsistency
          +                  .findByValue(iprot.readI32());
                         struct.setConsistencyIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1980,7 +2062,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.targetReplicaId = iprot.readI32();
                         struct.setTargetReplicaIdIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -1988,7 +2070,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.filterBytes = iprot.readBinary();
                         struct.setFilterBytesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -2003,7 +2085,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TScan struct) throw
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TScan struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TScan struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -2025,9 +2108,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TScan struct) thro
                   if (struct.isSetColumns()) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          -            for (TColumn _iter117 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          +            for (TColumn _iter117 : struct.columns) {
                         _iter117.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -2068,9 +2151,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TScan struct) thro
                   if (struct.isSetAttributes()) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter118 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter118 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter118.getKey());
                         oprot.writeBinary(_iter118.getValue());
                       }
          @@ -2100,9 +2185,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TScan struct) thro
                   if (struct.isSetColFamTimeRangeMap()) {
                     oprot.writeFieldBegin(COL_FAM_TIME_RANGE_MAP_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.colFamTimeRangeMap.size()));
          -            for (java.util.Map.Entry _iter119 : struct.colFamTimeRangeMap.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRUCT, struct.colFamTimeRangeMap.size()));
          +            for (java.util.Map.Entry _iter119 : struct.colFamTimeRangeMap
          +                .entrySet()) {
                         oprot.writeBinary(_iter119.getKey());
                         _iter119.getValue().write(oprot);
                       }
          @@ -2157,8 +2244,10 @@ public TScanTupleScheme getScheme() {
             private static class TScanTupleScheme extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TScan struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetStartRow()) {
                   optionals.set(0);
          @@ -2224,8 +2313,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TScan struct) throw
                 if (struct.isSetColumns()) {
                   {
                     oprot.writeI32(struct.columns.size());
          -          for (TColumn _iter120 : struct.columns)
          -          {
          +          for (TColumn _iter120 : struct.columns) {
                       _iter120.write(oprot);
                     }
                   }
          @@ -2248,8 +2336,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TScan struct) throw
                 if (struct.isSetAttributes()) {
                   {
                     oprot.writeI32(struct.attributes.size());
          -          for (java.util.Map.Entry _iter121 : struct.attributes.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter121 : struct.attributes
          +              .entrySet()) {
                       oprot.writeBinary(_iter121.getKey());
                       oprot.writeBinary(_iter121.getValue());
                     }
          @@ -2267,8 +2355,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TScan struct) throw
                 if (struct.isSetColFamTimeRangeMap()) {
                   {
                     oprot.writeI32(struct.colFamTimeRangeMap.size());
          -          for (java.util.Map.Entry _iter122 : struct.colFamTimeRangeMap.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter122 : struct.colFamTimeRangeMap
          +              .entrySet()) {
                       oprot.writeBinary(_iter122.getKey());
                       _iter122.getValue().write(oprot);
                     }
          @@ -2292,8 +2380,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TScan struct) throw
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 java.util.BitSet incoming = iprot.readBitSet(18);
                 if (incoming.get(0)) {
                   struct.startRow = iprot.readBinary();
          @@ -2305,11 +2395,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws
                 }
                 if (incoming.get(2)) {
                   {
          -          org.apache.thrift.protocol.TList _list123 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +          org.apache.thrift.protocol.TList _list123 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                     struct.columns = new java.util.ArrayList(_list123.size);
          -          @org.apache.thrift.annotation.Nullable TColumn _elem124;
          -          for (int _i125 = 0; _i125 < _list123.size; ++_i125)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          TColumn _elem124;
          +          for (int _i125 = 0; _i125 < _list123.size; ++_i125) {
                       _elem124 = new TColumn();
                       _elem124.read(iprot);
                       struct.columns.add(_elem124);
          @@ -2340,12 +2431,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws
                 }
                 if (incoming.get(8)) {
                   {
          -          org.apache.thrift.protocol.TMap _map126 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -          struct.attributes = new java.util.HashMap(2*_map126.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key127;
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val128;
          -          for (int _i129 = 0; _i129 < _map126.size; ++_i129)
          -          {
          +          org.apache.thrift.protocol.TMap _map126 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +          struct.attributes =
          +              new java.util.HashMap(2 * _map126.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _key127;
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _val128;
          +          for (int _i129 = 0; _i129 < _map126.size; ++_i129) {
                       _key127 = iprot.readBinary();
                       _val128 = iprot.readBinary();
                       struct.attributes.put(_key127, _val128);
          @@ -2368,12 +2462,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws
                 }
                 if (incoming.get(12)) {
                   {
          -          org.apache.thrift.protocol.TMap _map130 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT); 
          -          struct.colFamTimeRangeMap = new java.util.HashMap(2*_map130.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key131;
          -          @org.apache.thrift.annotation.Nullable TTimeRange _val132;
          -          for (int _i133 = 0; _i133 < _map130.size; ++_i133)
          -          {
          +          org.apache.thrift.protocol.TMap _map130 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT);
          +          struct.colFamTimeRangeMap =
          +              new java.util.HashMap(2 * _map130.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _key131;
          +          @org.apache.thrift.annotation.Nullable
          +          TTimeRange _val132;
          +          for (int _i133 = 0; _i133 < _map130.size; ++_i133) {
                       _key131 = iprot.readBinary();
                       _val132 = new TTimeRange();
                       _val132.read(iprot);
          @@ -2383,7 +2480,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws
                   struct.setColFamTimeRangeMapIsSet(true);
                 }
                 if (incoming.get(13)) {
          -        struct.readType = org.apache.hadoop.hbase.thrift2.generated.TReadType.findByValue(iprot.readI32());
          +        struct.readType =
          +            org.apache.hadoop.hbase.thrift2.generated.TReadType.findByValue(iprot.readI32());
                   struct.setReadTypeIsSet(true);
                 }
                 if (incoming.get(14)) {
          @@ -2391,7 +2489,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws
                   struct.setLimitIsSet(true);
                 }
                 if (incoming.get(15)) {
          -        struct.consistency = org.apache.hadoop.hbase.thrift2.generated.TConsistency.findByValue(iprot.readI32());
          +        struct.consistency =
          +            org.apache.hadoop.hbase.thrift2.generated.TConsistency.findByValue(iprot.readI32());
                   struct.setConsistencyIsSet(true);
                 }
                 if (incoming.get(16)) {
          @@ -2405,8 +2504,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java
          index ec426e3296d3..46d64134718a 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java
          @@ -1,34 +1,58 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TServerName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TServerName");
          -
          -  private static final org.apache.thrift.protocol.TField HOST_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("hostName", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField PORT_FIELD_DESC = new org.apache.thrift.protocol.TField("port", org.apache.thrift.protocol.TType.I32, (short)2);
          -  private static final org.apache.thrift.protocol.TField START_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("startCode", org.apache.thrift.protocol.TType.I64, (short)3);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TServerNameStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TServerNameTupleSchemeFactory();
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TServerName implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TServerName");
          +
          +  private static final org.apache.thrift.protocol.TField HOST_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("hostName", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField PORT_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("port", org.apache.thrift.protocol.TType.I32,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField START_CODE_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("startCode", org.apache.thrift.protocol.TType.I64,
          +          (short) 3);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TServerNameStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TServerNameTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable java.lang.String hostName; // required
             public int port; // optional
             public long startCode; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    HOST_NAME((short)1, "hostName"),
          -    PORT((short)2, "port"),
          -    START_CODE((short)3, "startCode");
          +    HOST_NAME((short) 1, "hostName"), PORT((short) 2, "port"), START_CODE((short) 3, "startCode");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -41,7 +65,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // HOST_NAME
                     return HOST_NAME;
                   case 2: // PORT
          @@ -54,12 +78,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -92,15 +116,21 @@ public java.lang.String getFieldName() {
             private static final int __PORT_ISSET_ID = 0;
             private static final int __STARTCODE_ISSET_ID = 1;
             private byte __isset_bitfield = 0;
          -  private static final _Fields optionals[] = {_Fields.PORT,_Fields.START_CODE};
          +  private static final _Fields optionals[] = { _Fields.PORT, _Fields.START_CODE };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.HOST_NAME, new org.apache.thrift.meta_data.FieldMetaData("hostName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
          -    tmpMap.put(_Fields.PORT, new org.apache.thrift.meta_data.FieldMetaData("port", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.HOST_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("hostName",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING)));
          +    tmpMap.put(_Fields.PORT, new org.apache.thrift.meta_data.FieldMetaData("port",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
          -    tmpMap.put(_Fields.START_CODE, new org.apache.thrift.meta_data.FieldMetaData("startCode", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          +    tmpMap.put(_Fields.START_CODE, new org.apache.thrift.meta_data.FieldMetaData("startCode",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TServerName.class, metaDataMap);
          @@ -109,9 +139,7 @@ public java.lang.String getFieldName() {
             public TServerName() {
             }
           
          -  public TServerName(
          -    java.lang.String hostName)
          -  {
          +  public TServerName(java.lang.String hostName) {
               this();
               this.hostName = hostName;
             }
          @@ -186,7 +214,8 @@ public boolean isSetPort() {
             }
           
             public void setPortIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __PORT_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __PORT_ISSET_ID, value);
             }
           
             public long getStartCode() {
          @@ -200,7 +229,8 @@ public TServerName setStartCode(long startCode) {
             }
           
             public void unsetStartCode() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __STARTCODE_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __STARTCODE_ISSET_ID);
             }
           
             /** Returns true if field startCode is set (has been assigned a value) and false otherwise */
          @@ -209,34 +239,36 @@ public boolean isSetStartCode() {
             }
           
             public void setStartCodeIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __STARTCODE_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __STARTCODE_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case HOST_NAME:
          -      if (value == null) {
          -        unsetHostName();
          -      } else {
          -        setHostName((java.lang.String)value);
          -      }
          -      break;
          +      case HOST_NAME:
          +        if (value == null) {
          +          unsetHostName();
          +        } else {
          +          setHostName((java.lang.String) value);
          +        }
          +        break;
           
          -    case PORT:
          -      if (value == null) {
          -        unsetPort();
          -      } else {
          -        setPort((java.lang.Integer)value);
          -      }
          -      break;
          +      case PORT:
          +        if (value == null) {
          +          unsetPort();
          +        } else {
          +          setPort((java.lang.Integer) value);
          +        }
          +        break;
           
          -    case START_CODE:
          -      if (value == null) {
          -        unsetStartCode();
          -      } else {
          -        setStartCode((java.lang.Long)value);
          -      }
          -      break;
          +      case START_CODE:
          +        if (value == null) {
          +          unsetStartCode();
          +        } else {
          +          setStartCode((java.lang.Long) value);
          +        }
          +        break;
           
               }
             }
          @@ -244,74 +276,68 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case HOST_NAME:
          -      return getHostName();
          +      case HOST_NAME:
          +        return getHostName();
           
          -    case PORT:
          -      return getPort();
          +      case PORT:
          +        return getPort();
           
          -    case START_CODE:
          -      return getStartCode();
          +      case START_CODE:
          +        return getStartCode();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case HOST_NAME:
          -      return isSetHostName();
          -    case PORT:
          -      return isSetPort();
          -    case START_CODE:
          -      return isSetStartCode();
          +      case HOST_NAME:
          +        return isSetHostName();
          +      case PORT:
          +        return isSetPort();
          +      case START_CODE:
          +        return isSetStartCode();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TServerName)
          -      return this.equals((TServerName)that);
          +    if (that instanceof TServerName) return this.equals((TServerName) that);
               return false;
             }
           
             public boolean equals(TServerName that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_hostName = true && this.isSetHostName();
               boolean that_present_hostName = true && that.isSetHostName();
               if (this_present_hostName || that_present_hostName) {
          -      if (!(this_present_hostName && that_present_hostName))
          -        return false;
          -      if (!this.hostName.equals(that.hostName))
          -        return false;
          +      if (!(this_present_hostName && that_present_hostName)) return false;
          +      if (!this.hostName.equals(that.hostName)) return false;
               }
           
               boolean this_present_port = true && this.isSetPort();
               boolean that_present_port = true && that.isSetPort();
               if (this_present_port || that_present_port) {
          -      if (!(this_present_port && that_present_port))
          -        return false;
          -      if (this.port != that.port)
          -        return false;
          +      if (!(this_present_port && that_present_port)) return false;
          +      if (this.port != that.port) return false;
               }
           
               boolean this_present_startCode = true && this.isSetStartCode();
               boolean that_present_startCode = true && that.isSetStartCode();
               if (this_present_startCode || that_present_startCode) {
          -      if (!(this_present_startCode && that_present_startCode))
          -        return false;
          -      if (this.startCode != that.startCode)
          -        return false;
          +      if (!(this_present_startCode && that_present_startCode)) return false;
          +      if (this.startCode != that.startCode) return false;
               }
           
               return true;
          @@ -322,12 +348,10 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetHostName()) ? 131071 : 524287);
          -    if (isSetHostName())
          -      hashCode = hashCode * 8191 + hostName.hashCode();
          +    if (isSetHostName()) hashCode = hashCode * 8191 + hostName.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetPort()) ? 131071 : 524287);
          -    if (isSetPort())
          -      hashCode = hashCode * 8191 + port;
          +    if (isSetPort()) hashCode = hashCode * 8191 + port;
           
               hashCode = hashCode * 8191 + ((isSetStartCode()) ? 131071 : 524287);
               if (isSetStartCode())
          @@ -386,7 +410,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -421,44 +446,51 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (hostName == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'hostName' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'hostName' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TServerNameStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TServerNameStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TServerNameStandardScheme getScheme() {
                 return new TServerNameStandardScheme();
               }
             }
           
          -  private static class TServerNameStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TServerNameStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TServerName struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TServerName struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -466,7 +498,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TServerName struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.hostName = iprot.readString();
                         struct.setHostNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -474,7 +506,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TServerName struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
                         struct.port = iprot.readI32();
                         struct.setPortIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -482,7 +514,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TServerName struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.startCode = iprot.readI64();
                         struct.setStartCodeIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -497,7 +529,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TServerName struct)
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TServerName struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TServerName struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -522,17 +555,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TServerName struct
           
             }
           
          -  private static class TServerNameTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TServerNameTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TServerNameTupleScheme getScheme() {
                 return new TServerNameTupleScheme();
               }
             }
           
          -  private static class TServerNameTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TServerNameTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TServerName struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TServerName struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeString(struct.hostName);
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetPort()) {
          @@ -551,8 +588,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TServerName struct)
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TServerName struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TServerName struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.hostName = iprot.readString();
                 struct.setHostNameIsSet(true);
                 java.util.BitSet incoming = iprot.readBitSet(2);
          @@ -567,8 +606,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TServerName struct)
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
          index 7a3079a1a4c3..f3461fe12322 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
          @@ -1,49 +1,74 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Thrift wrapper around
          - * org.apache.hadoop.hbase.client.TableDescriptor
          + * Thrift wrapper around org.apache.hadoop.hbase.client.TableDescriptor
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TTableDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTableDescriptor");
          -
          -  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT, (short)1);
          -  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)2);
          -  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)3);
          -  private static final org.apache.thrift.protocol.TField DURABILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("durability", org.apache.thrift.protocol.TType.I32, (short)4);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TTableDescriptorStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TTableDescriptorTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TTableDescriptor
          +    implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TTableDescriptor");
          +
          +  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRUCT,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST,
          +          (short) 2);
          +  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP,
          +          (short) 3);
          +  private static final org.apache.thrift.protocol.TField DURABILITY_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("durability", org.apache.thrift.protocol.TType.I32,
          +          (short) 4);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TTableDescriptorStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TTableDescriptorTupleSchemeFactory();
           
             public @org.apache.thrift.annotation.Nullable TTableName tableName; // required
             public @org.apache.thrift.annotation.Nullable java.util.List columns; // optional
          -  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
          +  public @org.apache.thrift.annotation.Nullable java.util.Map attributes; // optional
             /**
          -   * 
              * @see TDurability
              */
             public @org.apache.thrift.annotation.Nullable TDurability durability; // optional
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    TABLE_NAME((short)1, "tableName"),
          -    COLUMNS((short)2, "columns"),
          -    ATTRIBUTES((short)3, "attributes"),
          +    TABLE_NAME((short) 1, "tableName"), COLUMNS((short) 2, "columns"),
          +    ATTRIBUTES((short) 3, "attributes"),
               /**
          -     * 
                * @see TDurability
                */
          -    DURABILITY((short)4, "durability");
          +    DURABILITY((short) 4, "durability");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -56,7 +81,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // TABLE_NAME
                     return TABLE_NAME;
                   case 2: // COLUMNS
          @@ -71,12 +96,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -106,31 +131,44 @@ public java.lang.String getFieldName() {
             }
           
             // isset id assignments
          -  private static final _Fields optionals[] = {_Fields.COLUMNS,_Fields.ATTRIBUTES,_Fields.DURABILITY};
          +  private static final _Fields optionals[] =
          +      { _Fields.COLUMNS, _Fields.ATTRIBUTES, _Fields.DURABILITY };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableName.class)));
          -    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
          -            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnFamilyDescriptor.class))));
          -    tmpMap.put(_Fields.ATTRIBUTES, new org.apache.thrift.meta_data.FieldMetaData("attributes", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true), 
          -            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true))));
          -    tmpMap.put(_Fields.DURABILITY, new org.apache.thrift.meta_data.FieldMetaData("durability", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TDurability.class)));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.TABLE_NAME,
          +      new org.apache.thrift.meta_data.FieldMetaData("tableName",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +              TTableName.class)));
          +    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns",
          +        org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
          +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT,
          +                TColumnFamilyDescriptor.class))));
          +    tmpMap.put(_Fields.ATTRIBUTES,
          +      new org.apache.thrift.meta_data.FieldMetaData("attributes",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true),
          +              new org.apache.thrift.meta_data.FieldValueMetaData(
          +                  org.apache.thrift.protocol.TType.STRING, true))));
          +    tmpMap.put(_Fields.DURABILITY,
          +      new org.apache.thrift.meta_data.FieldMetaData("durability",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM,
          +              TDurability.class)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
          -    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTableDescriptor.class, metaDataMap);
          +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTableDescriptor.class,
          +      metaDataMap);
             }
           
             public TTableDescriptor() {
             }
           
          -  public TTableDescriptor(
          -    TTableName tableName)
          -  {
          +  public TTableDescriptor(TTableName tableName) {
               this();
               this.tableName = tableName;
             }
          @@ -143,14 +181,16 @@ public TTableDescriptor(TTableDescriptor other) {
                 this.tableName = new TTableName(other.tableName);
               }
               if (other.isSetColumns()) {
          -      java.util.List __this__columns = new java.util.ArrayList(other.columns.size());
          +      java.util.List __this__columns =
          +          new java.util.ArrayList(other.columns.size());
                 for (TColumnFamilyDescriptor other_element : other.columns) {
                   __this__columns.add(new TColumnFamilyDescriptor(other_element));
                 }
                 this.columns = __this__columns;
               }
               if (other.isSetAttributes()) {
          -      java.util.Map __this__attributes = new java.util.HashMap(other.attributes);
          +      java.util.Map __this__attributes =
          +          new java.util.HashMap(other.attributes);
                 this.attributes = __this__attributes;
               }
               if (other.isSetDurability()) {
          @@ -175,7 +215,8 @@ public TTableName getTableName() {
               return this.tableName;
             }
           
          -  public TTableDescriptor setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
          +  public TTableDescriptor
          +      setTableName(@org.apache.thrift.annotation.Nullable TTableName tableName) {
               this.tableName = tableName;
               return this;
             }
          @@ -216,7 +257,8 @@ public java.util.List getColumns() {
               return this.columns;
             }
           
          -  public TTableDescriptor setColumns(@org.apache.thrift.annotation.Nullable java.util.List columns) {
          +  public TTableDescriptor setColumns(
          +      @org.apache.thrift.annotation.Nullable java.util.List columns) {
               this.columns = columns;
               return this;
             }
          @@ -242,17 +284,18 @@ public int getAttributesSize() {
           
             public void putToAttributes(java.nio.ByteBuffer key, java.nio.ByteBuffer val) {
               if (this.attributes == null) {
          -      this.attributes = new java.util.HashMap();
          +      this.attributes = new java.util.HashMap();
               }
               this.attributes.put(key, val);
             }
           
             @org.apache.thrift.annotation.Nullable
          -  public java.util.Map getAttributes() {
          +  public java.util.Map getAttributes() {
               return this.attributes;
             }
           
          -  public TTableDescriptor setAttributes(@org.apache.thrift.annotation.Nullable java.util.Map attributes) {
          +  public TTableDescriptor setAttributes(
          +      @org.apache.thrift.annotation.Nullable java.util.Map attributes) {
               this.attributes = attributes;
               return this;
             }
          @@ -273,7 +316,6 @@ public void setAttributesIsSet(boolean value) {
             }
           
             /**
          -   * 
              * @see TDurability
              */
             @org.apache.thrift.annotation.Nullable
          @@ -282,10 +324,10 @@ public TDurability getDurability() {
             }
           
             /**
          -   * 
              * @see TDurability
              */
          -  public TTableDescriptor setDurability(@org.apache.thrift.annotation.Nullable TDurability durability) {
          +  public TTableDescriptor
          +      setDurability(@org.apache.thrift.annotation.Nullable TDurability durability) {
               this.durability = durability;
               return this;
             }
          @@ -305,39 +347,40 @@ public void setDurabilityIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case TABLE_NAME:
          -      if (value == null) {
          -        unsetTableName();
          -      } else {
          -        setTableName((TTableName)value);
          -      }
          -      break;
          +      case TABLE_NAME:
          +        if (value == null) {
          +          unsetTableName();
          +        } else {
          +          setTableName((TTableName) value);
          +        }
          +        break;
           
          -    case COLUMNS:
          -      if (value == null) {
          -        unsetColumns();
          -      } else {
          -        setColumns((java.util.List)value);
          -      }
          -      break;
          +      case COLUMNS:
          +        if (value == null) {
          +          unsetColumns();
          +        } else {
          +          setColumns((java.util.List) value);
          +        }
          +        break;
           
          -    case ATTRIBUTES:
          -      if (value == null) {
          -        unsetAttributes();
          -      } else {
          -        setAttributes((java.util.Map)value);
          -      }
          -      break;
          +      case ATTRIBUTES:
          +        if (value == null) {
          +          unsetAttributes();
          +        } else {
          +          setAttributes((java.util.Map) value);
          +        }
          +        break;
           
          -    case DURABILITY:
          -      if (value == null) {
          -        unsetDurability();
          -      } else {
          -        setDurability((TDurability)value);
          -      }
          -      break;
          +      case DURABILITY:
          +        if (value == null) {
          +          unsetDurability();
          +        } else {
          +          setDurability((TDurability) value);
          +        }
          +        break;
           
               }
             }
          @@ -345,88 +388,80 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case TABLE_NAME:
          -      return getTableName();
          +      case TABLE_NAME:
          +        return getTableName();
           
          -    case COLUMNS:
          -      return getColumns();
          +      case COLUMNS:
          +        return getColumns();
           
          -    case ATTRIBUTES:
          -      return getAttributes();
          +      case ATTRIBUTES:
          +        return getAttributes();
           
          -    case DURABILITY:
          -      return getDurability();
          +      case DURABILITY:
          +        return getDurability();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case TABLE_NAME:
          -      return isSetTableName();
          -    case COLUMNS:
          -      return isSetColumns();
          -    case ATTRIBUTES:
          -      return isSetAttributes();
          -    case DURABILITY:
          -      return isSetDurability();
          +      case TABLE_NAME:
          +        return isSetTableName();
          +      case COLUMNS:
          +        return isSetColumns();
          +      case ATTRIBUTES:
          +        return isSetAttributes();
          +      case DURABILITY:
          +        return isSetDurability();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TTableDescriptor)
          -      return this.equals((TTableDescriptor)that);
          +    if (that instanceof TTableDescriptor) return this.equals((TTableDescriptor) that);
               return false;
             }
           
             public boolean equals(TTableDescriptor that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_tableName = true && this.isSetTableName();
               boolean that_present_tableName = true && that.isSetTableName();
               if (this_present_tableName || that_present_tableName) {
          -      if (!(this_present_tableName && that_present_tableName))
          -        return false;
          -      if (!this.tableName.equals(that.tableName))
          -        return false;
          +      if (!(this_present_tableName && that_present_tableName)) return false;
          +      if (!this.tableName.equals(that.tableName)) return false;
               }
           
               boolean this_present_columns = true && this.isSetColumns();
               boolean that_present_columns = true && that.isSetColumns();
               if (this_present_columns || that_present_columns) {
          -      if (!(this_present_columns && that_present_columns))
          -        return false;
          -      if (!this.columns.equals(that.columns))
          -        return false;
          +      if (!(this_present_columns && that_present_columns)) return false;
          +      if (!this.columns.equals(that.columns)) return false;
               }
           
               boolean this_present_attributes = true && this.isSetAttributes();
               boolean that_present_attributes = true && that.isSetAttributes();
               if (this_present_attributes || that_present_attributes) {
          -      if (!(this_present_attributes && that_present_attributes))
          -        return false;
          -      if (!this.attributes.equals(that.attributes))
          -        return false;
          +      if (!(this_present_attributes && that_present_attributes)) return false;
          +      if (!this.attributes.equals(that.attributes)) return false;
               }
           
               boolean this_present_durability = true && this.isSetDurability();
               boolean that_present_durability = true && that.isSetDurability();
               if (this_present_durability || that_present_durability) {
          -      if (!(this_present_durability && that_present_durability))
          -        return false;
          -      if (!this.durability.equals(that.durability))
          -        return false;
          +      if (!(this_present_durability && that_present_durability)) return false;
          +      if (!this.durability.equals(that.durability)) return false;
               }
           
               return true;
          @@ -437,20 +472,16 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetTableName()) ? 131071 : 524287);
          -    if (isSetTableName())
          -      hashCode = hashCode * 8191 + tableName.hashCode();
          +    if (isSetTableName()) hashCode = hashCode * 8191 + tableName.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetColumns()) ? 131071 : 524287);
          -    if (isSetColumns())
          -      hashCode = hashCode * 8191 + columns.hashCode();
          +    if (isSetColumns()) hashCode = hashCode * 8191 + columns.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetAttributes()) ? 131071 : 524287);
          -    if (isSetAttributes())
          -      hashCode = hashCode * 8191 + attributes.hashCode();
          +    if (isSetAttributes()) hashCode = hashCode * 8191 + attributes.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetDurability()) ? 131071 : 524287);
          -    if (isSetDurability())
          -      hashCode = hashCode * 8191 + durability.getValue();
          +    if (isSetDurability()) hashCode = hashCode * 8191 + durability.getValue();
           
               return hashCode;
             }
          @@ -515,7 +546,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -568,7 +600,8 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (tableName == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'tableName' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
               if (tableName != null) {
          @@ -578,35 +611,40 @@ public void validate() throws org.apache.thrift.TException {
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TTableDescriptorStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TTableDescriptorStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TTableDescriptorStandardScheme getScheme() {
                 return new TTableDescriptorStandardScheme();
               }
             }
           
          -  private static class TTableDescriptorStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TTableDescriptorStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TTableDescriptor struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TTableDescriptor struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -615,7 +653,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTableDescriptor st
                         struct.tableName = new TTableName();
                         struct.tableName.read(iprot);
                         struct.setTableNameIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -624,9 +662,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTableDescriptor st
                         {
                           org.apache.thrift.protocol.TList _list162 = iprot.readListBegin();
                           struct.columns = new java.util.ArrayList(_list162.size);
          -                @org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor _elem163;
          -                for (int _i164 = 0; _i164 < _list162.size; ++_i164)
          -                {
          +                @org.apache.thrift.annotation.Nullable
          +                TColumnFamilyDescriptor _elem163;
          +                for (int _i164 = 0; _i164 < _list162.size; ++_i164) {
                             _elem163 = new TColumnFamilyDescriptor();
                             _elem163.read(iprot);
                             struct.columns.add(_elem163);
          @@ -634,7 +672,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTableDescriptor st
                           iprot.readListEnd();
                         }
                         struct.setColumnsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -642,11 +680,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTableDescriptor st
                       if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                         {
                           org.apache.thrift.protocol.TMap _map165 = iprot.readMapBegin();
          -                struct.attributes = new java.util.HashMap(2*_map165.size);
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key166;
          -                @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val167;
          -                for (int _i168 = 0; _i168 < _map165.size; ++_i168)
          -                {
          +                struct.attributes = new java.util.HashMap(
          +                    2 * _map165.size);
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _key166;
          +                @org.apache.thrift.annotation.Nullable
          +                java.nio.ByteBuffer _val167;
          +                for (int _i168 = 0; _i168 < _map165.size; ++_i168) {
                             _key166 = iprot.readBinary();
                             _val167 = iprot.readBinary();
                             struct.attributes.put(_key166, _val167);
          @@ -654,15 +694,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTableDescriptor st
                           iprot.readMapEnd();
                         }
                         struct.setAttributesIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
                     case 4: // DURABILITY
                       if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
          -              struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
          +              struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability
          +                  .findByValue(iprot.readI32());
                         struct.setDurabilityIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -677,7 +718,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTableDescriptor st
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TTableDescriptor struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TTableDescriptor struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -690,9 +732,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TTableDescriptor s
                   if (struct.isSetColumns()) {
                     oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
                     {
          -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          -            for (TColumnFamilyDescriptor _iter169 : struct.columns)
          -            {
          +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(
          +                org.apache.thrift.protocol.TType.STRUCT, struct.columns.size()));
          +            for (TColumnFamilyDescriptor _iter169 : struct.columns) {
                         _iter169.write(oprot);
                       }
                       oprot.writeListEnd();
          @@ -704,9 +746,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TTableDescriptor s
                   if (struct.isSetAttributes()) {
                     oprot.writeFieldBegin(ATTRIBUTES_FIELD_DESC);
                     {
          -            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          -            for (java.util.Map.Entry _iter170 : struct.attributes.entrySet())
          -            {
          +            oprot.writeMapBegin(
          +              new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING,
          +                  org.apache.thrift.protocol.TType.STRING, struct.attributes.size()));
          +            for (java.util.Map.Entry _iter170 : struct.attributes
          +                .entrySet()) {
                         oprot.writeBinary(_iter170.getKey());
                         oprot.writeBinary(_iter170.getValue());
                       }
          @@ -728,17 +772,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TTableDescriptor s
           
             }
           
          -  private static class TTableDescriptorTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TTableDescriptorTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TTableDescriptorTupleScheme getScheme() {
                 return new TTableDescriptorTupleScheme();
               }
             }
           
          -  private static class TTableDescriptorTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TTableDescriptorTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TTableDescriptor struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TTableDescriptor struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.tableName.write(oprot);
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetColumns()) {
          @@ -754,8 +802,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TTableDescriptor st
                 if (struct.isSetColumns()) {
                   {
                     oprot.writeI32(struct.columns.size());
          -          for (TColumnFamilyDescriptor _iter171 : struct.columns)
          -          {
          +          for (TColumnFamilyDescriptor _iter171 : struct.columns) {
                       _iter171.write(oprot);
                     }
                   }
          @@ -763,8 +810,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TTableDescriptor st
                 if (struct.isSetAttributes()) {
                   {
                     oprot.writeI32(struct.attributes.size());
          -          for (java.util.Map.Entry _iter172 : struct.attributes.entrySet())
          -          {
          +          for (java.util.Map.Entry _iter172 : struct.attributes
          +              .entrySet()) {
                       oprot.writeBinary(_iter172.getKey());
                       oprot.writeBinary(_iter172.getValue());
                     }
          @@ -776,19 +823,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TTableDescriptor st
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TTableDescriptor struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TTableDescriptor struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.tableName = new TTableName();
                 struct.tableName.read(iprot);
                 struct.setTableNameIsSet(true);
                 java.util.BitSet incoming = iprot.readBitSet(3);
                 if (incoming.get(0)) {
                   {
          -          org.apache.thrift.protocol.TList _list173 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
          +          org.apache.thrift.protocol.TList _list173 =
          +              iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT);
                     struct.columns = new java.util.ArrayList(_list173.size);
          -          @org.apache.thrift.annotation.Nullable TColumnFamilyDescriptor _elem174;
          -          for (int _i175 = 0; _i175 < _list173.size; ++_i175)
          -          {
          +          @org.apache.thrift.annotation.Nullable
          +          TColumnFamilyDescriptor _elem174;
          +          for (int _i175 = 0; _i175 < _list173.size; ++_i175) {
                       _elem174 = new TColumnFamilyDescriptor();
                       _elem174.read(iprot);
                       struct.columns.add(_elem174);
          @@ -798,12 +848,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TTableDescriptor str
                 }
                 if (incoming.get(1)) {
                   {
          -          org.apache.thrift.protocol.TMap _map176 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); 
          -          struct.attributes = new java.util.HashMap(2*_map176.size);
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _key177;
          -          @org.apache.thrift.annotation.Nullable java.nio.ByteBuffer _val178;
          -          for (int _i179 = 0; _i179 < _map176.size; ++_i179)
          -          {
          +          org.apache.thrift.protocol.TMap _map176 = iprot.readMapBegin(
          +            org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING);
          +          struct.attributes =
          +              new java.util.HashMap(2 * _map176.size);
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _key177;
          +          @org.apache.thrift.annotation.Nullable
          +          java.nio.ByteBuffer _val178;
          +          for (int _i179 = 0; _i179 < _map176.size; ++_i179) {
                       _key177 = iprot.readBinary();
                       _val178 = iprot.readBinary();
                       struct.attributes.put(_key177, _val178);
          @@ -812,14 +865,17 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TTableDescriptor str
                   struct.setAttributesIsSet(true);
                 }
                 if (incoming.get(2)) {
          -        struct.durability = org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
          +        struct.durability =
          +            org.apache.hadoop.hbase.thrift2.generated.TDurability.findByValue(iprot.readI32());
                   struct.setDurabilityIsSet(true);
                 }
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
          index 80ae046e2ad3..1906e9fe4294 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
          @@ -1,25 +1,44 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
           /**
          - * Thrift wrapper around
          - * org.apache.hadoop.hbase.TableName
          + * Thrift wrapper around org.apache.hadoop.hbase.TableName
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TTableName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTableName");
          -
          -  private static final org.apache.thrift.protocol.TField NS_FIELD_DESC = new org.apache.thrift.protocol.TField("ns", org.apache.thrift.protocol.TType.STRING, (short)1);
          -  private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING, (short)2);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TTableNameStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TTableNameTupleSchemeFactory();
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TTableName implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TTableName");
          +
          +  private static final org.apache.thrift.protocol.TField NS_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("ns", org.apache.thrift.protocol.TType.STRING,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING,
          +          (short) 2);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TTableNameStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TTableNameTupleSchemeFactory();
           
             /**
              * namespace name
          @@ -30,18 +49,22 @@ public class TTableName implements org.apache.thrift.TBase byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -54,7 +77,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // NS
                     return NS;
                   case 2: // QUALIFIER
          @@ -65,12 +88,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -100,14 +123,21 @@ public java.lang.String getFieldName() {
             }
           
             // isset id assignments
          -  private static final _Fields optionals[] = {_Fields.NS};
          +  private static final _Fields optionals[] = { _Fields.NS };
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.NS, new org.apache.thrift.meta_data.FieldMetaData("ns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          -    tmpMap.put(_Fields.QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("qualifier", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          -        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.NS,
          +      new org.apache.thrift.meta_data.FieldMetaData("ns",
          +          org.apache.thrift.TFieldRequirementType.OPTIONAL,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
          +    tmpMap.put(_Fields.QUALIFIER,
          +      new org.apache.thrift.meta_data.FieldMetaData("qualifier",
          +          org.apache.thrift.TFieldRequirementType.REQUIRED,
          +          new org.apache.thrift.meta_data.FieldValueMetaData(
          +              org.apache.thrift.protocol.TType.STRING, true)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTableName.class, metaDataMap);
             }
          @@ -115,9 +145,7 @@ public java.lang.String getFieldName() {
             public TTableName() {
             }
           
          -  public TTableName(
          -    java.nio.ByteBuffer qualifier)
          -  {
          +  public TTableName(java.nio.ByteBuffer qualifier) {
               this();
               this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier);
             }
          @@ -160,7 +188,7 @@ public java.nio.ByteBuffer bufferForNs() {
              * namespace name
              */
             public TTableName setNs(byte[] ns) {
          -    this.ns = ns == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(ns.clone());
          +    this.ns = ns == null ? (java.nio.ByteBuffer) null : java.nio.ByteBuffer.wrap(ns.clone());
               return this;
             }
           
          @@ -200,11 +228,13 @@ public java.nio.ByteBuffer bufferForQualifier() {
              * tablename
              */
             public TTableName setQualifier(byte[] qualifier) {
          -    this.qualifier = qualifier == null ? (java.nio.ByteBuffer)null   : java.nio.ByteBuffer.wrap(qualifier.clone());
          +    this.qualifier = qualifier == null ? (java.nio.ByteBuffer) null
          +        : java.nio.ByteBuffer.wrap(qualifier.clone());
               return this;
             }
           
          -  public TTableName setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
          +  public TTableName
          +      setQualifier(@org.apache.thrift.annotation.Nullable java.nio.ByteBuffer qualifier) {
               this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(qualifier);
               return this;
             }
          @@ -224,31 +254,32 @@ public void setQualifierIsSet(boolean value) {
               }
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case NS:
          -      if (value == null) {
          -        unsetNs();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setNs((byte[])value);
          +      case NS:
          +        if (value == null) {
          +          unsetNs();
                   } else {
          -          setNs((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setNs((byte[]) value);
          +          } else {
          +            setNs((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
          -    case QUALIFIER:
          -      if (value == null) {
          -        unsetQualifier();
          -      } else {
          -        if (value instanceof byte[]) {
          -          setQualifier((byte[])value);
          +      case QUALIFIER:
          +        if (value == null) {
          +          unsetQualifier();
                   } else {
          -          setQualifier((java.nio.ByteBuffer)value);
          +          if (value instanceof byte[]) {
          +            setQualifier((byte[]) value);
          +          } else {
          +            setQualifier((java.nio.ByteBuffer) value);
          +          }
                   }
          -      }
          -      break;
          +        break;
           
               }
             }
          @@ -256,60 +287,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case NS:
          -      return getNs();
          +      case NS:
          +        return getNs();
           
          -    case QUALIFIER:
          -      return getQualifier();
          +      case QUALIFIER:
          +        return getQualifier();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case NS:
          -      return isSetNs();
          -    case QUALIFIER:
          -      return isSetQualifier();
          +      case NS:
          +        return isSetNs();
          +      case QUALIFIER:
          +        return isSetQualifier();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TTableName)
          -      return this.equals((TTableName)that);
          +    if (that instanceof TTableName) return this.equals((TTableName) that);
               return false;
             }
           
             public boolean equals(TTableName that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_ns = true && this.isSetNs();
               boolean that_present_ns = true && that.isSetNs();
               if (this_present_ns || that_present_ns) {
          -      if (!(this_present_ns && that_present_ns))
          -        return false;
          -      if (!this.ns.equals(that.ns))
          -        return false;
          +      if (!(this_present_ns && that_present_ns)) return false;
          +      if (!this.ns.equals(that.ns)) return false;
               }
           
               boolean this_present_qualifier = true && this.isSetQualifier();
               boolean that_present_qualifier = true && that.isSetQualifier();
               if (this_present_qualifier || that_present_qualifier) {
          -      if (!(this_present_qualifier && that_present_qualifier))
          -        return false;
          -      if (!this.qualifier.equals(that.qualifier))
          -        return false;
          +      if (!(this_present_qualifier && that_present_qualifier)) return false;
          +      if (!this.qualifier.equals(that.qualifier)) return false;
               }
           
               return true;
          @@ -320,12 +347,10 @@ public int hashCode() {
               int hashCode = 1;
           
               hashCode = hashCode * 8191 + ((isSetNs()) ? 131071 : 524287);
          -    if (isSetNs())
          -      hashCode = hashCode * 8191 + ns.hashCode();
          +    if (isSetNs()) hashCode = hashCode * 8191 + ns.hashCode();
           
               hashCode = hashCode * 8191 + ((isSetQualifier()) ? 131071 : 524287);
          -    if (isSetQualifier())
          -      hashCode = hashCode * 8191 + qualifier.hashCode();
          +    if (isSetQualifier()) hashCode = hashCode * 8191 + qualifier.hashCode();
           
               return hashCode;
             }
          @@ -370,7 +395,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -403,42 +429,48 @@ public java.lang.String toString() {
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
               if (qualifier == null) {
          -      throw new org.apache.thrift.protocol.TProtocolException("Required field 'qualifier' was not present! Struct: " + toString());
          +      throw new org.apache.thrift.protocol.TProtocolException(
          +          "Required field 'qualifier' was not present! Struct: " + toString());
               }
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TTableNameStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TTableNameStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TTableNameStandardScheme getScheme() {
                 return new TTableNameStandardScheme();
               }
             }
           
          -  private static class TTableNameStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TTableNameStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TTableName struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TTableName struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -446,7 +478,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTableName struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.ns = iprot.readBinary();
                         struct.setNsIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -454,7 +486,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTableName struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
                         struct.qualifier = iprot.readBinary();
                         struct.setQualifierIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -469,7 +501,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTableName struct)
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TTableName struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TTableName struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -491,17 +524,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TTableName struct)
           
             }
           
          -  private static class TTableNameTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TTableNameTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TTableNameTupleScheme getScheme() {
                 return new TTableNameTupleScheme();
               }
             }
           
          -  private static class TTableNameTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TTableNameTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TTableName struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TTableName struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeBinary(struct.qualifier);
                 java.util.BitSet optionals = new java.util.BitSet();
                 if (struct.isSetNs()) {
          @@ -514,8 +551,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TTableName struct)
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TTableName struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TTableName struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.qualifier = iprot.readBinary();
                 struct.setQualifierIsSet(true);
                 java.util.BitSet incoming = iprot.readBitSet(1);
          @@ -526,8 +565,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TTableName struct) t
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TThriftServerType.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TThriftServerType.java
          index 722b0f582396..a67f357a7c4f 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TThriftServerType.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TThriftServerType.java
          @@ -1,19 +1,29 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
          + *
          + *     http://www.apache.org/licenses/LICENSE-2.0
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -
           /**
            * Specify type of thrift server: thrift and thrift2
            */
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
           public enum TThriftServerType implements org.apache.thrift.TEnum {
          -  ONE(1),
          -  TWO(2);
          +  ONE(1), TWO(2);
           
             private final int value;
           
          @@ -33,7 +43,7 @@ public int getValue() {
              * @return null if the value is not found.
              */
             @org.apache.thrift.annotation.Nullable
          -  public static TThriftServerType findByValue(int value) { 
          +  public static TThriftServerType findByValue(int value) {
               switch (value) {
                 case 1:
                   return ONE;
          diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
          index fefa7cbf3e93..d2aa99fce791 100644
          --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
          +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
          @@ -1,31 +1,54 @@
          -/**
          - * Autogenerated by Thrift Compiler (0.14.1)
          +/*
          + * Licensed to the Apache Software Foundation (ASF) under one
          + * or more contributor license agreements.  See the NOTICE file
          + * distributed with this work for additional information
          + * regarding copyright ownership.  The ASF licenses this file
          + * to you under the Apache License, Version 2.0 (the
          + * "License"); you may not use this file except in compliance
          + * with the License.  You may obtain a copy of the License at
            *
          - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
          - *  @generated
          + *     http://www.apache.org/licenses/LICENSE-2.0
          + *
          + * Unless required by applicable law or agreed to in writing, software
          + * distributed under the License is distributed on an "AS IS" BASIS,
          + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          + * See the License for the specific language governing permissions and
          + * limitations under the License.
            */
           package org.apache.hadoop.hbase.thrift2.generated;
           
          -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"})
          -@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)", date = "2021-07-19")
          -public class TTimeRange implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
          -  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTimeRange");
          -
          -  private static final org.apache.thrift.protocol.TField MIN_STAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("minStamp", org.apache.thrift.protocol.TType.I64, (short)1);
          -  private static final org.apache.thrift.protocol.TField MAX_STAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("maxStamp", org.apache.thrift.protocol.TType.I64, (short)2);
          -
          -  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TTimeRangeStandardSchemeFactory();
          -  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TTimeRangeTupleSchemeFactory();
          +@SuppressWarnings({ "cast", "rawtypes", "serial", "unchecked", "unused" })
          +@javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.14.1)",
          +    date = "2021-07-19")
          +public class TTimeRange implements org.apache.thrift.TBase,
          +    java.io.Serializable, Cloneable, Comparable {
          +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC =
          +      new org.apache.thrift.protocol.TStruct("TTimeRange");
          +
          +  private static final org.apache.thrift.protocol.TField MIN_STAMP_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("minStamp", org.apache.thrift.protocol.TType.I64,
          +          (short) 1);
          +  private static final org.apache.thrift.protocol.TField MAX_STAMP_FIELD_DESC =
          +      new org.apache.thrift.protocol.TField("maxStamp", org.apache.thrift.protocol.TType.I64,
          +          (short) 2);
          +
          +  private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY =
          +      new TTimeRangeStandardSchemeFactory();
          +  private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY =
          +      new TTimeRangeTupleSchemeFactory();
           
             public long minStamp; // required
             public long maxStamp; // required
           
          -  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
          +  /**
          +   * The set of fields this struct contains, along with convenience methods for finding and
          +   * manipulating them.
          +   */
             public enum _Fields implements org.apache.thrift.TFieldIdEnum {
          -    MIN_STAMP((short)1, "minStamp"),
          -    MAX_STAMP((short)2, "maxStamp");
          +    MIN_STAMP((short) 1, "minStamp"), MAX_STAMP((short) 2, "maxStamp");
           
          -    private static final java.util.Map byName = new java.util.HashMap();
          +    private static final java.util.Map byName =
          +        new java.util.HashMap();
           
               static {
                 for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) {
          @@ -38,7 +61,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
                */
               @org.apache.thrift.annotation.Nullable
               public static _Fields findByThriftId(int fieldId) {
          -      switch(fieldId) {
          +      switch (fieldId) {
                   case 1: // MIN_STAMP
                     return MIN_STAMP;
                   case 2: // MAX_STAMP
          @@ -49,12 +72,12 @@ public static _Fields findByThriftId(int fieldId) {
               }
           
               /**
          -     * Find the _Fields constant that matches fieldId, throwing an exception
          -     * if it is not found.
          +     * Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
                */
               public static _Fields findByThriftIdOrThrow(int fieldId) {
                 _Fields fields = findByThriftId(fieldId);
          -      if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
          +      if (fields == null)
          +        throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!");
                 return fields;
               }
           
          @@ -89,10 +112,13 @@ public java.lang.String getFieldName() {
             private byte __isset_bitfield = 0;
             public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
             static {
          -    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          -    tmpMap.put(_Fields.MIN_STAMP, new org.apache.thrift.meta_data.FieldMetaData("minStamp", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          +    java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap =
          +        new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
          +    tmpMap.put(_Fields.MIN_STAMP, new org.apache.thrift.meta_data.FieldMetaData("minStamp",
          +        org.apache.thrift.TFieldRequirementType.REQUIRED,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
          -    tmpMap.put(_Fields.MAX_STAMP, new org.apache.thrift.meta_data.FieldMetaData("maxStamp", org.apache.thrift.TFieldRequirementType.REQUIRED, 
          +    tmpMap.put(_Fields.MAX_STAMP, new org.apache.thrift.meta_data.FieldMetaData("maxStamp",
          +        org.apache.thrift.TFieldRequirementType.REQUIRED,
                   new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
               metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
               org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTimeRange.class, metaDataMap);
          @@ -101,10 +127,7 @@ public java.lang.String getFieldName() {
             public TTimeRange() {
             }
           
          -  public TTimeRange(
          -    long minStamp,
          -    long maxStamp)
          -  {
          +  public TTimeRange(long minStamp, long maxStamp) {
               this();
               this.minStamp = minStamp;
               setMinStampIsSet(true);
          @@ -144,7 +167,8 @@ public TTimeRange setMinStamp(long minStamp) {
             }
           
             public void unsetMinStamp() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MINSTAMP_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MINSTAMP_ISSET_ID);
             }
           
             /** Returns true if field minStamp is set (has been assigned a value) and false otherwise */
          @@ -153,7 +177,8 @@ public boolean isSetMinStamp() {
             }
           
             public void setMinStampIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MINSTAMP_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MINSTAMP_ISSET_ID, value);
             }
           
             public long getMaxStamp() {
          @@ -167,7 +192,8 @@ public TTimeRange setMaxStamp(long maxStamp) {
             }
           
             public void unsetMaxStamp() {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MAXSTAMP_ISSET_ID);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __MAXSTAMP_ISSET_ID);
             }
           
             /** Returns true if field maxStamp is set (has been assigned a value) and false otherwise */
          @@ -176,26 +202,28 @@ public boolean isSetMaxStamp() {
             }
           
             public void setMaxStampIsSet(boolean value) {
          -    __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MAXSTAMP_ISSET_ID, value);
          +    __isset_bitfield =
          +        org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __MAXSTAMP_ISSET_ID, value);
             }
           
          -  public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) {
          +  public void setFieldValue(_Fields field,
          +      @org.apache.thrift.annotation.Nullable java.lang.Object value) {
               switch (field) {
          -    case MIN_STAMP:
          -      if (value == null) {
          -        unsetMinStamp();
          -      } else {
          -        setMinStamp((java.lang.Long)value);
          -      }
          -      break;
          +      case MIN_STAMP:
          +        if (value == null) {
          +          unsetMinStamp();
          +        } else {
          +          setMinStamp((java.lang.Long) value);
          +        }
          +        break;
           
          -    case MAX_STAMP:
          -      if (value == null) {
          -        unsetMaxStamp();
          -      } else {
          -        setMaxStamp((java.lang.Long)value);
          -      }
          -      break;
          +      case MAX_STAMP:
          +        if (value == null) {
          +          unsetMaxStamp();
          +        } else {
          +          setMaxStamp((java.lang.Long) value);
          +        }
          +        break;
           
               }
             }
          @@ -203,60 +231,56 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable
             @org.apache.thrift.annotation.Nullable
             public java.lang.Object getFieldValue(_Fields field) {
               switch (field) {
          -    case MIN_STAMP:
          -      return getMinStamp();
          +      case MIN_STAMP:
          +        return getMinStamp();
           
          -    case MAX_STAMP:
          -      return getMaxStamp();
          +      case MAX_STAMP:
          +        return getMaxStamp();
           
               }
               throw new java.lang.IllegalStateException();
             }
           
          -  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
          +  /**
          +   * Returns true if field corresponding to fieldID is set (has been assigned a value) and false
          +   * otherwise
          +   */
             public boolean isSet(_Fields field) {
               if (field == null) {
                 throw new java.lang.IllegalArgumentException();
               }
           
               switch (field) {
          -    case MIN_STAMP:
          -      return isSetMinStamp();
          -    case MAX_STAMP:
          -      return isSetMaxStamp();
          +      case MIN_STAMP:
          +        return isSetMinStamp();
          +      case MAX_STAMP:
          +        return isSetMaxStamp();
               }
               throw new java.lang.IllegalStateException();
             }
           
             @Override
             public boolean equals(java.lang.Object that) {
          -    if (that instanceof TTimeRange)
          -      return this.equals((TTimeRange)that);
          +    if (that instanceof TTimeRange) return this.equals((TTimeRange) that);
               return false;
             }
           
             public boolean equals(TTimeRange that) {
          -    if (that == null)
          -      return false;
          -    if (this == that)
          -      return true;
          +    if (that == null) return false;
          +    if (this == that) return true;
           
               boolean this_present_minStamp = true;
               boolean that_present_minStamp = true;
               if (this_present_minStamp || that_present_minStamp) {
          -      if (!(this_present_minStamp && that_present_minStamp))
          -        return false;
          -      if (this.minStamp != that.minStamp)
          -        return false;
          +      if (!(this_present_minStamp && that_present_minStamp)) return false;
          +      if (this.minStamp != that.minStamp) return false;
               }
           
               boolean this_present_maxStamp = true;
               boolean that_present_maxStamp = true;
               if (this_present_maxStamp || that_present_maxStamp) {
          -      if (!(this_present_maxStamp && that_present_maxStamp))
          -        return false;
          -      if (this.maxStamp != that.maxStamp)
          -        return false;
          +      if (!(this_present_maxStamp && that_present_maxStamp)) return false;
          +      if (this.maxStamp != that.maxStamp) return false;
               }
           
               return true;
          @@ -313,7 +337,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
               scheme(iprot).read(iprot, this);
             }
           
          -  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
          +  public void write(org.apache.thrift.protocol.TProtocol oprot)
          +      throws org.apache.thrift.TException {
               scheme(oprot).write(oprot, this);
             }
           
          @@ -335,44 +360,52 @@ public java.lang.String toString() {
           
             public void validate() throws org.apache.thrift.TException {
               // check for required fields
          -    // alas, we cannot check 'minStamp' because it's a primitive and you chose the non-beans generator.
          -    // alas, we cannot check 'maxStamp' because it's a primitive and you chose the non-beans generator.
          +    // alas, we cannot check 'minStamp' because it's a primitive and you chose the non-beans
          +    // generator.
          +    // alas, we cannot check 'maxStamp' because it's a primitive and you chose the non-beans
          +    // generator.
               // check for sub-struct validity
             }
           
             private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
               try {
          -      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
          +      write(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(out)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException {
          +  private void readObject(java.io.ObjectInputStream in)
          +      throws java.io.IOException, java.lang.ClassNotFoundException {
               try {
          -      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
          +      // it doesn't seem like you should have to do this, but java serialization is wacky, and
          +      // doesn't call the default constructor.
                 __isset_bitfield = 0;
          -      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
          +      read(new org.apache.thrift.protocol.TCompactProtocol(
          +          new org.apache.thrift.transport.TIOStreamTransport(in)));
               } catch (org.apache.thrift.TException te) {
                 throw new java.io.IOException(te);
               }
             }
           
          -  private static class TTimeRangeStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TTimeRangeStandardSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TTimeRangeStandardScheme getScheme() {
                 return new TTimeRangeStandardScheme();
               }
             }
           
          -  private static class TTimeRangeStandardScheme extends org.apache.thrift.scheme.StandardScheme {
          +  private static class TTimeRangeStandardScheme
          +      extends org.apache.thrift.scheme.StandardScheme {
           
          -    public void read(org.apache.thrift.protocol.TProtocol iprot, TTimeRange struct) throws org.apache.thrift.TException {
          +    public void read(org.apache.thrift.protocol.TProtocol iprot, TTimeRange struct)
          +        throws org.apache.thrift.TException {
                 org.apache.thrift.protocol.TField schemeField;
                 iprot.readStructBegin();
          -      while (true)
          -      {
          +      while (true) {
                   schemeField = iprot.readFieldBegin();
          -        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
          +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
                     break;
                   }
                   switch (schemeField.id) {
          @@ -380,7 +413,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTimeRange struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.minStamp = iprot.readI64();
                         struct.setMinStampIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -388,7 +421,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTimeRange struct)
                       if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
                         struct.maxStamp = iprot.readI64();
                         struct.setMaxStampIsSet(true);
          -            } else { 
          +            } else {
                         org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                       }
                       break;
          @@ -401,15 +434,18 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTimeRange struct)
           
                 // check for required fields of primitive type, which can't be checked in the validate method
                 if (!struct.isSetMinStamp()) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'minStamp' was not found in serialized data! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'minStamp' was not found in serialized data! Struct: " + toString());
                 }
                 if (!struct.isSetMaxStamp()) {
          -        throw new org.apache.thrift.protocol.TProtocolException("Required field 'maxStamp' was not found in serialized data! Struct: " + toString());
          +        throw new org.apache.thrift.protocol.TProtocolException(
          +            "Required field 'maxStamp' was not found in serialized data! Struct: " + toString());
                 }
                 struct.validate();
               }
           
          -    public void write(org.apache.thrift.protocol.TProtocol oprot, TTimeRange struct) throws org.apache.thrift.TException {
          +    public void write(org.apache.thrift.protocol.TProtocol oprot, TTimeRange struct)
          +        throws org.apache.thrift.TException {
                 struct.validate();
           
                 oprot.writeStructBegin(STRUCT_DESC);
          @@ -425,24 +461,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TTimeRange struct)
           
             }
           
          -  private static class TTimeRangeTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory {
          +  private static class TTimeRangeTupleSchemeFactory
          +      implements org.apache.thrift.scheme.SchemeFactory {
               public TTimeRangeTupleScheme getScheme() {
                 return new TTimeRangeTupleScheme();
               }
             }
           
          -  private static class TTimeRangeTupleScheme extends org.apache.thrift.scheme.TupleScheme {
          +  private static class TTimeRangeTupleScheme
          +      extends org.apache.thrift.scheme.TupleScheme {
           
               @Override
          -    public void write(org.apache.thrift.protocol.TProtocol prot, TTimeRange struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void write(org.apache.thrift.protocol.TProtocol prot, TTimeRange struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol oprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 oprot.writeI64(struct.minStamp);
                 oprot.writeI64(struct.maxStamp);
               }
           
               @Override
          -    public void read(org.apache.thrift.protocol.TProtocol prot, TTimeRange struct) throws org.apache.thrift.TException {
          -      org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot;
          +    public void read(org.apache.thrift.protocol.TProtocol prot, TTimeRange struct)
          +        throws org.apache.thrift.TException {
          +      org.apache.thrift.protocol.TTupleProtocol iprot =
          +          (org.apache.thrift.protocol.TTupleProtocol) prot;
                 struct.minStamp = iprot.readI64();
                 struct.setMinStampIsSet(true);
                 struct.maxStamp = iprot.readI64();
          @@ -450,8 +492,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TTimeRange struct) t
               }
             }
           
          -  private static  S scheme(org.apache.thrift.protocol.TProtocol proto) {
          -    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme();
          +  private static  S
          +      scheme(org.apache.thrift.protocol.TProtocol proto) {
          +    return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme())
          +        ? STANDARD_SCHEME_FACTORY
          +        : TUPLE_SCHEME_FACTORY).getScheme();
             }
           }
          -
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ErrorThrowingGetObserver.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ErrorThrowingGetObserver.java
          index 683884d0efdf..8daf2ad66e92 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ErrorThrowingGetObserver.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ErrorThrowingGetObserver.java
          @@ -15,13 +15,11 @@
            * See the License for the specific language governing permissions and
            * limitations under the License.
            */
          -
           package org.apache.hadoop.hbase.thrift;
           
           import java.io.IOException;
           import java.util.List;
           import java.util.Optional;
          -
           import org.apache.hadoop.hbase.CallDroppedException;
           import org.apache.hadoop.hbase.CallQueueTooBigException;
           import org.apache.hadoop.hbase.Cell;
          @@ -58,8 +56,8 @@ public Optional getRegionObserver() {
             public static final String SHOULD_ERROR_ATTRIBUTE = "error";
           
             @Override
          -  public void preGetOp(ObserverContext e,
          -                       Get get, List results) throws IOException {
          +  public void preGetOp(ObserverContext e, Get get, List results)
          +      throws IOException {
               byte[] errorType = get.getAttribute(SHOULD_ERROR_ATTRIBUTE);
               if (errorType != null) {
                 ErrorType type = ErrorType.valueOf(Bytes.toString(errorType));
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/HBaseThriftTestingUtility.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/HBaseThriftTestingUtility.java
          index c34cf7e5c6de..2fcf0e8b9243 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/HBaseThriftTestingUtility.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/HBaseThriftTestingUtility.java
          @@ -1,5 +1,4 @@
          -/**
          - *
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -19,6 +18,7 @@
           package org.apache.hadoop.hbase.thrift;
           
           import static org.apache.hadoop.hbase.thrift.Constants.INFOPORT_OPTION;
          +
           import java.io.IOException;
           import java.util.ArrayList;
           import java.util.List;
          @@ -27,6 +27,7 @@
           import org.apache.hadoop.hbase.thrift.ThriftMetrics.ThriftServerType;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
          +
           import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
           
           public class HBaseThriftTestingUtility {
          @@ -116,7 +117,7 @@ private void waitForThriftServer() throws Exception {
               }
             }
           
          -  public void stopThriftServer() throws Exception{
          +  public void stopThriftServer() throws Exception {
               LOG.debug("Stopping Thrift Server");
               thriftServer.stop();
               thriftServerThread.join();
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestBindExceptionHandling.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestBindExceptionHandling.java
          index e28929675baf..04467324b94b 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestBindExceptionHandling.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestBindExceptionHandling.java
          @@ -18,6 +18,8 @@
           package org.apache.hadoop.hbase.thrift;
           
           import static org.junit.Assert.assertNotNull;
          +
          +import java.io.IOException;
           import org.apache.hadoop.hbase.HBaseClassTestRule;
           import org.apache.hadoop.hbase.HBaseTestingUtil;
           import org.apache.hadoop.hbase.testclassification.ClientTests;
          @@ -25,13 +27,12 @@
           import org.junit.ClassRule;
           import org.junit.Test;
           import org.junit.experimental.categories.Category;
          -import java.io.IOException;
           
          -@Category({ ClientTests.class, MediumTests.class})
          +@Category({ ClientTests.class, MediumTests.class })
           public class TestBindExceptionHandling {
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestBindExceptionHandling.class);
          +      HBaseClassTestRule.forClass(TestBindExceptionHandling.class);
           
             private static final HBaseTestingUtil HTU = new HBaseTestingUtil();
           
          @@ -40,8 +41,8 @@ public class TestBindExceptionHandling {
              */
             @Test
             public void testProtocolPortClash() throws Exception {
          -    try (ThriftServerRunner tsr = TestThriftServerCmdLine.
          -        createBoundServer(() -> new ThriftServer(HTU.getConfiguration()), true, false)) {
          +    try (ThriftServerRunner tsr = TestThriftServerCmdLine
          +        .createBoundServer(() -> new ThriftServer(HTU.getConfiguration()), true, false)) {
                 assertNotNull(tsr.getThriftServer());
               }
             }
          @@ -51,8 +52,8 @@ public void testProtocolPortClash() throws Exception {
              */
             @Test
             public void testInfoPortClash() throws Exception {
          -    try (ThriftServerRunner tsr = TestThriftServerCmdLine.
          -        createBoundServer(() -> new ThriftServer(HTU.getConfiguration()), false, true)) {
          +    try (ThriftServerRunner tsr = TestThriftServerCmdLine
          +        .createBoundServer(() -> new ThriftServer(HTU.getConfiguration()), false, true)) {
                 assertNotNull(tsr.getThriftServer());
               }
             }
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
          index da84bffe16d1..2f1ccc94524b 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -37,10 +37,9 @@
           import org.slf4j.LoggerFactory;
           
           /**
          - * Unit testing for CallQueue, a part of the
          - * org.apache.hadoop.hbase.thrift package.
          + * Unit testing for CallQueue, a part of the org.apache.hadoop.hbase.thrift package.
            */
          -@Category({ClientTests.class, SmallTests.class})
          +@Category({ ClientTests.class, SmallTests.class })
           @RunWith(Parameterized.class)
           public class TestCallQueue {
           
          @@ -60,9 +59,9 @@ public class TestCallQueue {
             @Parameters
             public static Collection getParameters() {
               Collection parameters = new ArrayList<>();
          -    for (int elementsAdded : new int[] {100, 200, 300}) {
          -      for (int elementsRemoved : new int[] {0, 20, 100}) {
          -        parameters.add(new Object[]{ elementsAdded, elementsRemoved });
          +    for (int elementsAdded : new int[] { 100, 200, 300 }) {
          +      for (int elementsRemoved : new int[] { 0, 20, 100 }) {
          +        parameters.add(new Object[] { elementsAdded, elementsRemoved });
                 }
               }
               return parameters;
          @@ -71,8 +70,7 @@ public static Collection getParameters() {
             public TestCallQueue(int elementsAdded, int elementsRemoved) {
               this.elementsAdded = elementsAdded;
               this.elementsRemoved = elementsRemoved;
          -    LOG.debug("elementsAdded:" + elementsAdded +
          -              " elementsRemoved:" + elementsRemoved);
          +    LOG.debug("elementsAdded:" + elementsAdded + " elementsRemoved:" + elementsRemoved);
           
             }
           
          @@ -109,7 +107,6 @@ private static ThriftMetrics createMetrics() throws Exception {
               return m;
             }
           
          -
             private static void verifyMetrics(ThriftMetrics metrics, String name, int expectValue)
                 throws Exception {
               metricsHelper.assertCounter(name, expectValue, metrics.getSource());
          @@ -124,4 +121,3 @@ public void run() {
             }
           
           }
          -
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
          index b426cd15b2c0..337a545f86b3 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
          @@ -54,17 +54,16 @@
           import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
           
           /**
          - * Start the HBase Thrift HTTP server on a random port through the command-line
          - * interface and talk to it from client side.
          + * Start the HBase Thrift HTTP server on a random port through the command-line interface and talk
          + * to it from client side.
            */
          -@Category({ClientTests.class, LargeTests.class})
          +@Category({ ClientTests.class, LargeTests.class })
           public class TestThriftHttpServer {
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
                 HBaseClassTestRule.forClass(TestThriftHttpServer.class);
           
          -  private static final Logger LOG =
          -      LoggerFactory.getLogger(TestThriftHttpServer.class);
          +  private static final Logger LOG = LoggerFactory.getLogger(TestThriftHttpServer.class);
           
             protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
           
          @@ -73,8 +72,8 @@ public static void setUpBeforeClass() throws Exception {
               TEST_UTIL.getConfiguration().setBoolean(Constants.USE_HTTP_CONF_KEY, true);
               TEST_UTIL.getConfiguration().setBoolean(TableDescriptorChecker.TABLE_SANITY_CHECKS, false);
               TEST_UTIL.startMiniCluster();
          -    //ensure that server time increments every time we do an operation, otherwise
          -    //successive puts having the same timestamp will override each other
          +    // ensure that server time increments every time we do an operation, otherwise
          +    // successive puts having the same timestamp will override each other
               EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
             }
           
          @@ -93,8 +92,8 @@ public void testExceptionThrownWhenMisConfigured() throws IOException {
               ThriftServerRunner tsr = null;
               try {
                 thrown.expect(IllegalArgumentException.class);
          -      thrown.expectMessage("Thrift HTTP Server's QoP is privacy, " +
          -          "but hbase.thrift.ssl.enabled is false");
          +      thrown.expectMessage(
          +        "Thrift HTTP Server's QoP is privacy, " + "but hbase.thrift.ssl.enabled is false");
                 tsr = TestThriftServerCmdLine.createBoundServer(() -> new ThriftServer(conf));
                 fail("Thrift HTTP Server starts up even with wrong security configurations.");
               } catch (Exception e) {
          @@ -135,9 +134,9 @@ public void testRunThriftServer() throws Exception {
           
             void runThriftServer(int customHeaderSize) throws Exception {
               // Add retries in case we see stuff like connection reset
          -    Exception clientSideException =  null;
          +    Exception clientSideException = null;
               for (int i = 0; i < 10; i++) {
          -      clientSideException =  null;
          +      clientSideException = null;
                 ThriftServerRunner tsr = createBoundServer(getThriftServerSupplier());
                 String url = "http://" + HConstants.LOCALHOST + ":" + tsr.getThriftServer().listenPort;
                 try {
          @@ -169,8 +168,8 @@ private void checkHttpMethods(String url) throws Exception {
               HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection();
               conn.setRequestMethod("TRACE");
               conn.connect();
          -    Assert.assertEquals(conn.getResponseMessage(),
          -      HttpURLConnection.HTTP_FORBIDDEN, conn.getResponseCode());
          +    Assert.assertEquals(conn.getResponseMessage(), HttpURLConnection.HTTP_FORBIDDEN,
          +      conn.getResponseCode());
             }
           
             protected static volatile boolean tableCreated = false;
          @@ -191,7 +190,7 @@ protected void talkToThriftServer(String url, int customHeaderSize) throws Excep
                 TProtocol prot;
                 prot = new TBinaryProtocol(httpClient);
                 Hbase.Client client = new Hbase.Client(prot);
          -      if (!tableCreated){
          +      if (!tableCreated) {
                   TestThriftServer.createTestTables(client);
                   tableCreated = true;
                 }
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
          index 3063b068a34e..d4fc10892792 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
          @@ -87,7 +87,7 @@
            * Unit testing for ThriftServerRunner.HBaseServiceHandler, a part of the
            * org.apache.hadoop.hbase.thrift package.
            */
          -@Category({ClientTests.class, LargeTests.class})
          +@Category({ ClientTests.class, LargeTests.class })
           public class TestThriftServer {
           
             @ClassRule
          @@ -96,13 +96,14 @@ public class TestThriftServer {
           
             private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
             private static final Logger LOG = LoggerFactory.getLogger(TestThriftServer.class);
          -  private static final MetricsAssertHelper metricsHelper = CompatibilityFactory
          -      .getInstance(MetricsAssertHelper.class);
          +  private static final MetricsAssertHelper metricsHelper =
          +      CompatibilityFactory.getInstance(MetricsAssertHelper.class);
             protected static final int MAXVERSIONS = 3;
           
             private static ByteBuffer asByteBuffer(String i) {
               return ByteBuffer.wrap(Bytes.toBytes(i));
             }
          +
             private static ByteBuffer asByteBuffer(long l) {
               return ByteBuffer.wrap(Bytes.toBytes(l));
             }
          @@ -138,10 +139,9 @@ public static void afterClass() throws Exception {
             }
           
             /**
          -   * Runs all of the tests under a single JUnit test method.  We
          -   * consolidate all testing to one method because HBaseClusterTestCase
          -   * is prone to OutOfMemoryExceptions when there are three or more
          -   * JUnit test methods.
          +   * Runs all of the tests under a single JUnit test method. We consolidate all testing to one
          +   * method because HBaseClusterTestCase is prone to OutOfMemoryExceptions when there are three or
          +   * more JUnit test methods.
              */
             @Test
             public void testAll() throws Exception {
          @@ -160,13 +160,11 @@ public void testAll() throws Exception {
             }
           
             /**
          -   * Tests for creating, enabling, disabling, and deleting tables.  Also
          -   * tests that creating a table with an invalid column name yields an
          -   * IllegalArgument exception.
          +   * Tests for creating, enabling, disabling, and deleting tables. Also tests that creating a table
          +   * with an invalid column name yields an IllegalArgument exception.
              */
             public void doTestTableCreateDrop() throws Exception {
          -    ThriftHBaseServiceHandler handler =
          -      new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          +    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
                   UserProvider.instantiate(UTIL.getConfiguration()));
               doTestTableCreateDrop(handler);
             }
          @@ -179,8 +177,7 @@ public static void doTestTableCreateDrop(Hbase.Iface handler) throws Exception {
             public static final class MySlowHBaseHandler extends ThriftHBaseServiceHandler
                 implements Hbase.Iface {
           
          -    protected MySlowHBaseHandler(Configuration c)
          -        throws IOException {
          +    protected MySlowHBaseHandler(Configuration c) throws IOException {
                 super(c, UserProvider.instantiate(c));
               }
           
          @@ -193,8 +190,8 @@ public List getTableNames() throws IOError {
           
             /**
              * TODO: These counts are supposed to be zero but sometimes they are not, they are equal to the
          -   * passed in maybe.  Investigate why.  My guess is they are set by the test that runs just
          -   * previous to this one.  Sometimes they are cleared.  Sometimes not.
          +   * passed in maybe. Investigate why. My guess is they are set by the test that runs just previous
          +   * to this one. Sometimes they are cleared. Sometimes not.
              */
             private int getCurrentCount(final String name, final int maybe, final ThriftMetrics metrics) {
               int currentCount = 0;
          @@ -232,9 +229,9 @@ public void doTestThriftMetrics() throws Exception {
               // 3 to 6 seconds (to account for potential slowness), measured in nanoseconds
               try {
                 metricsHelper.assertGaugeGt("getTableNames_avg_time", 3L * 1000 * 1000 * 1000,
          -              metrics.getSource());
          -      metricsHelper.assertGaugeLt("getTableNames_avg_time",6L * 1000 * 1000 * 1000,
          -              metrics.getSource());
          +        metrics.getSource());
          +      metricsHelper.assertGaugeLt("getTableNames_avg_time", 6L * 1000 * 1000 * 1000,
          +        metrics.getSource());
               } catch (AssertionError e) {
                 LOG.info("Fix me!  Why does this happen?  A concurrent cluster running?", e);
               }
          @@ -243,19 +240,18 @@ public void doTestThriftMetrics() throws Exception {
             private static Hbase.Iface getHandlerForMetricsTest(ThriftMetrics metrics, Configuration conf)
                 throws Exception {
               Hbase.Iface handler = new MySlowHBaseHandler(conf);
          -    return HbaseHandlerMetricsProxy.newInstance((ThriftHBaseServiceHandler)handler, metrics, conf);
          +    return HbaseHandlerMetricsProxy.newInstance((ThriftHBaseServiceHandler) handler, metrics, conf);
             }
           
             private static ThriftMetrics getMetrics(Configuration conf) throws Exception {
               return new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.ONE);
             }
           
          -
             public static void createTestTables(Hbase.Iface handler) throws Exception {
               // Create/enable/disable/delete tables, ensure methods act correctly
               List bbs = handler.getTableNames();
          -    assertEquals(bbs.stream().map(b -> Bytes.toString(b.array())).
          -      collect(Collectors.joining(",")), 0, bbs.size());
          +    assertEquals(bbs.stream().map(b -> Bytes.toString(b.array())).collect(Collectors.joining(",")),
          +      0, bbs.size());
               handler.createTable(tableAname, getColumnDescriptors());
               assertEquals(1, handler.getTableNames().size());
               assertEquals(2, handler.getColumnDescriptors(tableAname).size());
          @@ -275,18 +271,17 @@ public static void dropTestTables(Hbase.Iface handler) throws Exception {
               assertEquals(1, handler.getTableNames().size());
               handler.disableTable(tableAname);
               assertFalse(handler.isTableEnabled(tableAname));
          -    /* TODO Reenable.
          -    assertFalse(handler.isTableEnabled(tableAname));
          -    handler.enableTable(tableAname);
          -    assertTrue(handler.isTableEnabled(tableAname));
          -    handler.disableTable(tableAname);*/
          +    /*
          +     * TODO Reenable. assertFalse(handler.isTableEnabled(tableAname));
          +     * handler.enableTable(tableAname); assertTrue(handler.isTableEnabled(tableAname));
          +     * handler.disableTable(tableAname);
          +     */
               handler.deleteTable(tableAname);
               assertEquals(0, handler.getTableNames().size());
             }
           
             public void doTestIncrements() throws Exception {
          -    ThriftHBaseServiceHandler handler =
          -      new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          +    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
                   UserProvider.instantiate(UTIL.getConfiguration()));
               createTestTables(handler);
               doTestIncrements(handler);
          @@ -328,13 +323,11 @@ public static void doTestIncrements(ThriftHBaseServiceHandler handler) throws Ex
             }
           
             /**
          -   * Tests adding a series of Mutations and BatchMutations, including a
          -   * delete mutation.  Also tests data retrieval, and getting back multiple
          -   * versions.
          +   * Tests adding a series of Mutations and BatchMutations, including a delete mutation. Also tests
          +   * data retrieval, and getting back multiple versions.
              */
             public void doTestTableMutations() throws Exception {
          -    ThriftHBaseServiceHandler handler =
          -      new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          +    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
                   UserProvider.instantiate(UTIL.getConfiguration()));
               doTestTableMutations(handler);
             }
          @@ -344,17 +337,15 @@ public static void doTestTableMutations(Hbase.Iface handler) throws Exception {
               handler.createTable(tableAname, getColumnDescriptors());
           
               // Apply a few Mutations to rowA
          -    //     mutations.add(new Mutation(false, columnAname, valueAname));
          -    //     mutations.add(new Mutation(false, columnBname, valueBname));
          +    // mutations.add(new Mutation(false, columnAname, valueAname));
          +    // mutations.add(new Mutation(false, columnBname, valueBname));
               handler.mutateRow(tableAname, rowAname, getMutations(), null);
           
               // Assert that the changes were made
          -    assertEquals(valueAname,
          -      handler.get(tableAname, rowAname, columnAname, null).get(0).value);
          +    assertEquals(valueAname, handler.get(tableAname, rowAname, columnAname, null).get(0).value);
               TRowResult rowResult1 = handler.getRow(tableAname, rowAname, null).get(0);
               assertEquals(rowAname, rowResult1.row);
          -    assertEquals(valueBname,
          -      rowResult1.columns.get(columnBname).value);
          +    assertEquals(valueBname, rowResult1.columns.get(columnBname).value);
           
               // Apply a few BatchMutations for rowA and rowB
               // rowAmutations.add(new Mutation(true, columnAname, null));
          @@ -404,14 +395,12 @@ public static void doTestTableMutations(Hbase.Iface handler) throws Exception {
             }
           
             /**
          -   * Similar to testTableMutations(), except Mutations are applied with
          -   * specific timestamps and data retrieval uses these timestamps to
          -   * extract specific versions of data.
          +   * Similar to testTableMutations(), except Mutations are applied with specific timestamps and data
          +   * retrieval uses these timestamps to extract specific versions of data.
              */
             public void doTestTableTimestampsAndColumns() throws Exception {
               // Setup
          -    ThriftHBaseServiceHandler handler =
          -      new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          +    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
                   UserProvider.instantiate(UTIL.getConfiguration()));
               handler.createTable(tableAname, getColumnDescriptors());
           
          @@ -433,15 +422,15 @@ public void doTestTableTimestampsAndColumns() throws Exception {
               time2 += 2;
           
               // Assert that the timestamp-related methods retrieve the correct data
          -    assertEquals(2, handler.getVerTs(tableAname, rowAname, columnBname, time2,
          -      MAXVERSIONS, null).size());
          -    assertEquals(1, handler.getVerTs(tableAname, rowAname, columnBname, time1,
          -      MAXVERSIONS, null).size());
          +    assertEquals(2,
          +      handler.getVerTs(tableAname, rowAname, columnBname, time2, MAXVERSIONS, null).size());
          +    assertEquals(1,
          +      handler.getVerTs(tableAname, rowAname, columnBname, time1, MAXVERSIONS, null).size());
           
               TRowResult rowResult1 = handler.getRowTs(tableAname, rowAname, time1, null).get(0);
               TRowResult rowResult2 = handler.getRowTs(tableAname, rowAname, time2, null).get(0);
               // columnA was completely deleted
          -    //assertTrue(Bytes.equals(rowResult1.columns.get(columnAname).value, valueAname));
          +    // assertTrue(Bytes.equals(rowResult1.columns.get(columnAname).value, valueAname));
               assertEquals(rowResult1.columns.get(columnBname).value, valueBname);
               assertEquals(rowResult2.columns.get(columnBname).value, valueCname);
           
          @@ -483,13 +472,12 @@ public void doTestTableTimestampsAndColumns() throws Exception {
             }
           
             /**
          -   * Tests the four different scanner-opening methods (with and without
          -   * a stoprow, with and without a timestamp).
          +   * Tests the four different scanner-opening methods (with and without a stoprow, with and without
          +   * a timestamp).
              */
             public void doTestTableScanners() throws Exception {
               // Setup
          -    ThriftHBaseServiceHandler handler =
          -      new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          +    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
                   UserProvider.instantiate(UTIL.getConfiguration()));
               handler.createTable(tableAname, getColumnDescriptors());
           
          @@ -511,7 +499,7 @@ public void doTestTableScanners() throws Exception {
               int scanner1 = handler.scannerOpen(tableAname, rowAname, getColumnList(true, true), null);
               TRowResult rowResult1a = handler.scannerGet(scanner1).get(0);
               assertEquals(rowResult1a.row, rowAname);
          -    // This used to be '1'.  I don't know why when we are asking for two columns
          +    // This used to be '1'. I don't know why when we are asking for two columns
               // and when the mutations above would seem to add two columns to the row.
               // -- St.Ack 05/12/2009
               assertEquals(1, rowResult1a.columns.size());
          @@ -525,23 +513,23 @@ public void doTestTableScanners() throws Exception {
               closeScanner(scanner1, handler);
           
               // Test a scanner on all rows and all columns, with timestamp
          -    int scanner2 = handler.scannerOpenTs(tableAname, rowAname, getColumnList(true, true), time1,
          -            null);
          +    int scanner2 =
          +        handler.scannerOpenTs(tableAname, rowAname, getColumnList(true, true), time1, null);
               TRowResult rowResult2a = handler.scannerGet(scanner2).get(0);
               assertEquals(1, rowResult2a.columns.size());
               // column A deleted, does not exist.
          -    //assertTrue(Bytes.equals(rowResult2a.columns.get(columnAname).value, valueAname));
          +    // assertTrue(Bytes.equals(rowResult2a.columns.get(columnAname).value, valueAname));
               assertEquals(rowResult2a.columns.get(columnBname).value, valueBname);
               closeScanner(scanner2, handler);
           
               // Test a scanner on the first row and first column only, no timestamp
               int scanner3 = handler.scannerOpenWithStop(tableAname, rowAname, rowBname,
          -        getColumnList(true, false), null);
          +      getColumnList(true, false), null);
               closeScanner(scanner3, handler);
           
               // Test a scanner on the first row and second column only, with timestamp
               int scanner4 = handler.scannerOpenWithStopTs(tableAname, rowAname, rowBname,
          -        getColumnList(false, true), time1, null);
          +      getColumnList(false, true), time1, null);
               TRowResult rowResult4a = handler.scannerGet(scanner4).get(0);
               assertEquals(1, rowResult4a.columns.size());
               assertEquals(rowResult4a.columns.get(columnBname).value, valueBname);
          @@ -551,7 +539,7 @@ public void doTestTableScanners() throws Exception {
               scanNoSortColumns.setStartRow(rowAname);
               scanNoSortColumns.setStopRow(rowBname);
           
          -    int scanner5 = handler.scannerOpenWithScan(tableAname , scanNoSortColumns, null);
          +    int scanner5 = handler.scannerOpenWithScan(tableAname, scanNoSortColumns, null);
               TRowResult rowResult5 = handler.scannerGet(scanner5).get(0);
               assertEquals(1, rowResult5.columns.size());
               assertEquals(rowResult5.columns.get(columnBname).value, valueCname);
          @@ -561,7 +549,7 @@ public void doTestTableScanners() throws Exception {
               scanSortColumns.setStopRow(rowBname);
               scanSortColumns = scanSortColumns.setSortColumns(true);
           
          -    int scanner6 = handler.scannerOpenWithScan(tableAname ,scanSortColumns, null);
          +    int scanner6 = handler.scannerOpenWithScan(tableAname, scanSortColumns, null);
               TRowResult rowResult6 = handler.scannerGet(scanner6).get(0);
               assertEquals(1, rowResult6.sortedColumns.size());
               assertEquals(rowResult6.sortedColumns.get(0).getCell().value, valueCname);
          @@ -591,7 +579,7 @@ public void doTestTableScanners() throws Exception {
               reversedScan.setStartRow(rowBname);
               reversedScan.setStopRow(rowAname);
           
          -    int scanner8 = handler.scannerOpenWithScan(tableAname , reversedScan, null);
          +    int scanner8 = handler.scannerOpenWithScan(tableAname, reversedScan, null);
               List results = handler.scannerGet(scanner8);
               handler.scannerClose(scanner8);
               assertEquals(1, results.size());
          @@ -603,31 +591,28 @@ public void doTestTableScanners() throws Exception {
             }
           
             /**
          -   * For HBASE-2556
          -   * Tests for GetTableRegions
          +   * For HBASE-2556 Tests for GetTableRegions
              */
             public void doTestGetTableRegions() throws Exception {
          -    ThriftHBaseServiceHandler handler =
          -      new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          +    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
                   UserProvider.instantiate(UTIL.getConfiguration()));
               doTestGetTableRegions(handler);
             }
           
          -  public static void doTestGetTableRegions(Hbase.Iface handler)
          -      throws Exception {
          +  public static void doTestGetTableRegions(Hbase.Iface handler) throws Exception {
               assertEquals(0, handler.getTableNames().size());
               handler.createTable(tableAname, getColumnDescriptors());
               assertEquals(1, handler.getTableNames().size());
               List regions = handler.getTableRegions(tableAname);
               int regionCount = regions.size();
          -    assertEquals("empty table should have only 1 region, " +
          -            "but found " + regionCount, 1, regionCount);
          +    assertEquals("empty table should have only 1 region, " + "but found " + regionCount, 1,
          +      regionCount);
               LOG.info("Region found:" + regions.get(0));
               handler.disableTable(tableAname);
               handler.deleteTable(tableAname);
               regionCount = handler.getTableRegions(tableAname).size();
          -    assertEquals("non-existing table should have 0 region, " +
          -            "but found " + regionCount, 0, regionCount);
          +    assertEquals("non-existing table should have 0 region, " + "but found " + regionCount, 0,
          +      regionCount);
             }
           
             public void doTestFilterRegistration() throws Exception {
          @@ -643,8 +628,7 @@ public void doTestFilterRegistration() throws Exception {
             }
           
             public void doTestGetRegionInfo() throws Exception {
          -    ThriftHBaseServiceHandler handler =
          -      new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          +    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
                   UserProvider.instantiate(UTIL.getConfiguration()));
               doTestGetRegionInfo(handler);
             }
          @@ -654,12 +638,11 @@ public static void doTestGetRegionInfo(Hbase.Iface handler) throws Exception {
               handler.createTable(tableAname, getColumnDescriptors());
               try {
                 handler.mutateRow(tableAname, rowAname, getMutations(), null);
          -      byte[] searchRow = RegionInfo.createRegionName(
          -          TableName.valueOf(tableAname.array()), rowAname.array(),
          -          HConstants.NINES, false);
          +      byte[] searchRow = RegionInfo.createRegionName(TableName.valueOf(tableAname.array()),
          +        rowAname.array(), HConstants.NINES, false);
                 TRegionInfo regionInfo = handler.getRegionInfo(ByteBuffer.wrap(searchRow));
          -      assertTrue(Bytes.toStringBinary(regionInfo.getName()).startsWith(
          -            Bytes.toStringBinary(tableAname)));
          +      assertTrue(
          +        Bytes.toStringBinary(regionInfo.getName()).startsWith(Bytes.toStringBinary(tableAname)));
               } finally {
                 handler.disableTable(tableAname);
                 handler.deleteTable(tableAname);
          @@ -670,8 +653,7 @@ public static void doTestGetRegionInfo(Hbase.Iface handler) throws Exception {
              * Appends the value to a cell and checks that the cell value is updated properly.
              */
             public static void doTestAppend() throws Exception {
          -    ThriftHBaseServiceHandler handler =
          -      new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          +    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
                   UserProvider.instantiate(UTIL.getConfiguration()));
               handler.createTable(tableAname, getColumnDescriptors());
               try {
          @@ -702,8 +684,7 @@ public static void doTestAppend() throws Exception {
              * the checkAndPut succeeds.
              */
             public static void doTestCheckAndPut() throws Exception {
          -    ThriftHBaseServiceHandler handler =
          -      new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          +    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
                   UserProvider.instantiate(UTIL.getConfiguration()));
               handler.createTable(tableAname, getColumnDescriptors());
               try {
          @@ -727,9 +708,8 @@ public static void doTestCheckAndPut() throws Exception {
             }
           
             @Test
          -  public void testGetTableNamesWithStatus() throws Exception{
          -    ThriftHBaseServiceHandler handler =
          -      new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          +  public void testGetTableNamesWithStatus() throws Exception {
          +    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
                   UserProvider.instantiate(UTIL.getConfiguration()));
           
               createTestTables(handler);
          @@ -748,10 +728,9 @@ public void testGetTableNamesWithStatus() throws Exception{
           
             private static int countTablesByStatus(Boolean isEnabled, Hbase.Iface handler) throws Exception {
               AtomicInteger counter = new AtomicInteger(0);
          -    handler.getTableNamesWithIsTableEnabled().forEach(
          -      (table, tableStatus) -> {
          -        if (tableStatus.equals(isEnabled)) counter.getAndIncrement();
          -      });
          +    handler.getTableNamesWithIsTableEnabled().forEach((table, tableStatus) -> {
          +      if (tableStatus.equals(isEnabled)) counter.getAndIncrement();
          +    });
               return counter.get();
             }
           
          @@ -764,28 +743,28 @@ public void testMetricsWithException() throws Exception {
               final TableName tableName = TableName.valueOf(name.getMethodName());
               try {
                 ColumnFamilyDescriptor columnFamilyDescriptor =
          -        ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).build();
          +          ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)).build();
                 TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName)
          -        .setCoprocessor(ErrorThrowingGetObserver.class.getName())
          -        .setColumnFamily(columnFamilyDescriptor).build();
          +          .setCoprocessor(ErrorThrowingGetObserver.class.getName())
          +          .setColumnFamily(columnFamilyDescriptor).build();
           
                 Table table = UTIL.createTable(tableDescriptor, null);
                 long now = EnvironmentEdgeManager.currentTime();
          -      table.put(new Put(Bytes.toBytes(rowkey))
          -        .addColumn(Bytes.toBytes(family), Bytes.toBytes(col), now, Bytes.toBytes("val1")));
          +      table.put(new Put(Bytes.toBytes(rowkey)).addColumn(Bytes.toBytes(family), Bytes.toBytes(col),
          +        now, Bytes.toBytes("val1")));
           
                 Configuration conf = UTIL.getConfiguration();
                 ThriftMetrics metrics = getMetrics(conf);
          -      ThriftHBaseServiceHandler hbaseHandler =
          -        new ThriftHBaseServiceHandler(UTIL.getConfiguration(), UserProvider.instantiate(UTIL.getConfiguration()));
          +      ThriftHBaseServiceHandler hbaseHandler = new ThriftHBaseServiceHandler(
          +          UTIL.getConfiguration(), UserProvider.instantiate(UTIL.getConfiguration()));
                 Hbase.Iface handler = HbaseHandlerMetricsProxy.newInstance(hbaseHandler, metrics, conf);
           
                 ByteBuffer tTableName = asByteBuffer(tableName.getNameAsString());
           
                 // check metrics increment with a successful get
          -      long preGetCounter = metricsHelper.checkCounterExists("getRow_num_ops", metrics.getSource()) ?
          -        metricsHelper.getCounter("getRow_num_ops", metrics.getSource()) :
          -        0;
          +      long preGetCounter = metricsHelper.checkCounterExists("getRow_num_ops", metrics.getSource())
          +          ? metricsHelper.getCounter("getRow_num_ops", metrics.getSource())
          +          : 0;
                 List tRowResult = handler.getRow(tTableName, asByteBuffer(rowkey), null);
                 assertEquals(1, tRowResult.size());
                 TRowResult tResult = tRowResult.get(0);
          @@ -808,17 +787,16 @@ public void testMetricsWithException() throws Exception {
               }
             }
           
          -  private void testExceptionType(Hbase.Iface handler, ThriftMetrics metrics,
          -                                 ByteBuffer tTableName, String rowkey,
          -                                 ErrorThrowingGetObserver.ErrorType errorType) throws Exception {
          +  private void testExceptionType(Hbase.Iface handler, ThriftMetrics metrics, ByteBuffer tTableName,
          +      String rowkey, ErrorThrowingGetObserver.ErrorType errorType) throws Exception {
               long preGetCounter = metricsHelper.getCounter("getRow_num_ops", metrics.getSource());
               String exceptionKey = errorType.getMetricName();
          -    long preExceptionCounter = metricsHelper.checkCounterExists(exceptionKey, metrics.getSource()) ?
          -        metricsHelper.getCounter(exceptionKey, metrics.getSource()) :
          -        0;
          +    long preExceptionCounter = metricsHelper.checkCounterExists(exceptionKey, metrics.getSource())
          +        ? metricsHelper.getCounter(exceptionKey, metrics.getSource())
          +        : 0;
               Map attributes = new HashMap<>();
               attributes.put(asByteBuffer(ErrorThrowingGetObserver.SHOULD_ERROR_ATTRIBUTE),
          -        asByteBuffer(errorType.name()));
          +      asByteBuffer(errorType.name()));
               try {
                 List tRowResult = handler.getRow(tTableName, asByteBuffer(rowkey), attributes);
                 fail("Get with error attribute should have thrown an exception");
          @@ -830,8 +808,8 @@ private void testExceptionType(Hbase.Iface handler, ThriftMetrics metrics,
             }
           
             /**
          -   * @return a List of ColumnDescriptors for use in creating a table.  Has one
          -   *         default ColumnDescriptor and one ColumnDescriptor with fewer versions
          +   * @return a List of ColumnDescriptors for use in creating a table. Has one default
          +   *         ColumnDescriptor and one ColumnDescriptor with fewer versions
              */
             private static List getColumnDescriptors() {
               ArrayList cDescriptors = new ArrayList<>(2);
          @@ -842,15 +820,14 @@ private static List getColumnDescriptors() {
               cDescriptors.add(cDescA);
           
               // A slightly customized ColumnDescriptor (only 2 versions)
          -    ColumnDescriptor cDescB = new ColumnDescriptor(columnBname, 2, "NONE",
          -        false, "NONE", 0, 0, false, -1);
          +    ColumnDescriptor cDescB =
          +        new ColumnDescriptor(columnBname, 2, "NONE", false, "NONE", 0, 0, false, -1);
               cDescriptors.add(cDescB);
           
               return cDescriptors;
             }
           
             /**
          -   *
              * @param includeA whether or not to include columnA
              * @param includeB whether or not to include columnB
              * @return a List of column names for use in retrieving a scanner
          @@ -869,8 +846,7 @@ private List getColumnList(boolean includeA, boolean includeB) {
             }
           
             /**
          -   * @return a List of Mutations for a row, with columnA having valueA
          -   *         and columnB having valueB
          +   * @return a List of Mutations for a row, with columnA having valueA and columnB having valueB
              */
             private static List getMutations() {
               List mutations = new ArrayList<>(2);
          @@ -880,16 +856,13 @@ private static List getMutations() {
             }
           
             /**
          -   * @return a List of BatchMutations with the following effects:
          -   *         (rowA, columnA): delete
          -   *         (rowA, columnB): place valueC
          -   *         (rowB, columnA): place valueC
          -   *         (rowB, columnB): place valueD
          +   * @return a List of BatchMutations with the following effects: (rowA, columnA): delete (rowA,
          +   *         columnB): place valueC (rowB, columnA): place valueC (rowB, columnB): place valueD
              */
             private static List getBatchMutations() {
               List batchMutations = new ArrayList<>(3);
           
          -    // Mutations to rowA.  You can't mix delete and put anymore.
          +    // Mutations to rowA. You can't mix delete and put anymore.
               List rowAmutations = new ArrayList<>(1);
               rowAmutations.add(new Mutation(true, columnAname, null, true));
               batchMutations.add(new BatchMutation(rowAname, rowAmutations));
          @@ -908,23 +881,19 @@ private static List getBatchMutations() {
             }
           
             /**
          -   * Asserts that the passed scanner is exhausted, and then closes
          -   * the scanner.
          -   *
          +   * Asserts that the passed scanner is exhausted, and then closes the scanner.
              * @param scannerId the scanner to close
              * @param handler the HBaseServiceHandler interfacing to HBase
              */
          -  private void closeScanner(
          -      int scannerId, ThriftHBaseServiceHandler handler) throws Exception {
          +  private void closeScanner(int scannerId, ThriftHBaseServiceHandler handler) throws Exception {
               handler.scannerGet(scannerId);
               handler.scannerClose(scannerId);
             }
           
             @Test
             public void testGetThriftServerType() throws Exception {
          -    ThriftHBaseServiceHandler handler =
          -        new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          -            UserProvider.instantiate(UTIL.getConfiguration()));
          +    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
          +        UserProvider.instantiate(UTIL.getConfiguration()));
               assertEquals(TThriftServerType.ONE, handler.getThriftServerType());
             }
           
          @@ -938,8 +907,8 @@ public void testGetThriftServerOneType() throws Exception {
           
               LOG.info("Starting HBase Thrift Server Two");
               THRIFT_TEST_UTIL.startThriftServer(UTIL.getConfiguration(), ThriftServerType.TWO);
          -    try (TTransport transport = new TSocket(InetAddress.getLocalHost().getHostName(),
          -        THRIFT_TEST_UTIL.getServerPort())){
          +    try (TTransport transport =
          +        new TSocket(InetAddress.getLocalHost().getHostName(), THRIFT_TEST_UTIL.getServerPort())) {
                 TProtocol protocol = new TBinaryProtocol(transport);
                 // This is our thrift client.
                 Hbase.Client client = new Hbase.Client(protocol);
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.java
          index 43b630ed950f..44f7f3cb0aab 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.java
          @@ -24,6 +24,7 @@
           import static org.apache.hadoop.hbase.thrift.Constants.PORT_OPTION;
           import static org.junit.Assert.assertEquals;
           import static org.junit.Assert.assertTrue;
          +
           import java.io.IOException;
           import java.net.BindException;
           import java.net.InetAddress;
          @@ -59,13 +60,14 @@
           import org.junit.runners.Parameterized.Parameters;
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
          +
           import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
           
           /**
          - * Start the HBase Thrift server on a random port through the command-line
          - * interface and talk to it from client side.
          + * Start the HBase Thrift server on a random port through the command-line interface and talk to it
          + * from client side.
            */
          -@Category({ClientTests.class, LargeTests.class})
          +@Category({ ClientTests.class, LargeTests.class })
           @RunWith(Parameterized.class)
           public class TestThriftServerCmdLine {
           
          @@ -73,30 +75,26 @@ public class TestThriftServerCmdLine {
             public static final HBaseClassTestRule CLASS_RULE =
                 HBaseClassTestRule.forClass(TestThriftServerCmdLine.class);
           
          -  private static final Logger LOG =
          -      LoggerFactory.getLogger(TestThriftServerCmdLine.class);
          +  private static final Logger LOG = LoggerFactory.getLogger(TestThriftServerCmdLine.class);
           
             protected final ImplType implType;
             protected boolean specifyFramed;
             protected boolean specifyBindIP;
             protected boolean specifyCompact;
           
          -  protected static final HBaseTestingUtil TEST_UTIL =
          -      new HBaseTestingUtil();
          +  protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
           
             @Parameters
             public static Collection getParameters() {
               Collection parameters = new ArrayList<>();
               for (ImplType implType : ImplType.values()) {
          -      for (boolean specifyFramed : new boolean[] {false, true}) {
          -        for (boolean specifyBindIP : new boolean[] {false, true}) {
          +      for (boolean specifyFramed : new boolean[] { false, true }) {
          +        for (boolean specifyBindIP : new boolean[] { false, true }) {
                     if (specifyBindIP && !implType.canSpecifyBindIP) {
                       continue;
                     }
          -          for (boolean specifyCompact : new boolean[] {false, true}) {
          -            parameters.add(new Object[] {
          -              implType, specifyFramed, specifyBindIP, specifyCompact
          -            });
          +          for (boolean specifyCompact : new boolean[] { false, true }) {
          +            parameters.add(new Object[] { implType, specifyFramed, specifyBindIP, specifyCompact });
                     }
                   }
                 }
          @@ -104,8 +102,8 @@ public static Collection getParameters() {
               return parameters;
             }
           
          -  public TestThriftServerCmdLine(ImplType implType, boolean specifyFramed,
          -      boolean specifyBindIP, boolean specifyCompact) {
          +  public TestThriftServerCmdLine(ImplType implType, boolean specifyFramed, boolean specifyBindIP,
          +      boolean specifyCompact) {
               this.implType = implType;
               this.specifyFramed = specifyFramed;
               this.specifyBindIP = specifyBindIP;
          @@ -114,18 +112,16 @@ public TestThriftServerCmdLine(ImplType implType, boolean specifyFramed,
             }
           
             private String getParametersString() {
          -    return "implType=" + implType + ", " +
          -        "specifyFramed=" + specifyFramed + ", " +
          -        "specifyBindIP=" + specifyBindIP + ", " +
          -        "specifyCompact=" + specifyCompact;
          +    return "implType=" + implType + ", " + "specifyFramed=" + specifyFramed + ", "
          +        + "specifyBindIP=" + specifyBindIP + ", " + "specifyCompact=" + specifyCompact;
             }
           
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
               TEST_UTIL.getConfiguration().setBoolean(TableDescriptorChecker.TABLE_SANITY_CHECKS, false);
               TEST_UTIL.startMiniCluster();
          -    //ensure that server time increments every time we do an operation, otherwise
          -    //successive puts having the same timestamp will override each other
          +    // ensure that server time increments every time we do an operation, otherwise
          +    // successive puts having the same timestamp will override each other
               EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
             }
           
          @@ -159,8 +155,8 @@ static ThriftServerRunner createBoundServer(Supplier thriftServerS
           
             static ThriftServerRunner createBoundServer(Supplier thriftServerSupplier,
                 boolean protocolPortClash, boolean infoPortClash) throws Exception {
          -    return createBoundServer(thriftServerSupplier, null, false, false,
          -      false, protocolPortClash, infoPortClash);
          +    return createBoundServer(thriftServerSupplier, null, false, false, false, protocolPortClash,
          +      infoPortClash);
             }
           
             static ThriftServerRunner createBoundServer(Supplier thriftServerSupplier,
          @@ -172,7 +168,8 @@ static ThriftServerRunner createBoundServer(Supplier thriftServerS
           
             /**
              * @param protocolPortClash This param is just so we can manufacture a port clash so we can test
          -   *   the code does the right thing when this happens during actual test runs. Ugly but works.
          +   *          the code does the right thing when this happens during actual test runs. Ugly but
          +   *          works.
              * @see TestBindExceptionHandling#testProtocolPortClash()
              */
             static ThriftServerRunner createBoundServer(Supplier thriftServerSupplier,
          @@ -230,8 +227,8 @@ static ThriftServerRunner createBoundServer(Supplier thriftServerS
           
                 tsr = startCmdLineThread(thriftServerSupplier, args.toArray(new String[args.size()]));
                 // wait up to 10s for the server to start
          -      for (int ii = 0; ii < 100 && (tsr.getThriftServer().tserver == null &&
          -          tsr.getRunException() == null); ii++) {
          +      for (int ii = 0; ii < 100
          +          && (tsr.getThriftServer().tserver == null && tsr.getRunException() == null); ii++) {
                   Threads.sleep(100);
                 }
                 if (isBindException(tsr.getRunException())) {
          @@ -258,7 +255,7 @@ static ThriftServerRunner createBoundServer(Supplier thriftServerS
               }
               if (tsr.getThriftServer().tserver != null) {
                 Class expectedClass =
          -        implType != null ? implType.serverClass : TBoundedThreadPoolServer.class;
          +          implType != null ? implType.serverClass : TBoundedThreadPoolServer.class;
                 assertEquals(expectedClass, tsr.getThriftServer().tserver.getClass());
               }
               return tsr;
          @@ -271,8 +268,8 @@ private static boolean isBindException(Exception cmdLineException) {
               if (cmdLineException instanceof BindException) {
                 return true;
               }
          -    if (cmdLineException.getCause() != null &&
          -        cmdLineException.getCause() instanceof BindException) {
          +    if (cmdLineException.getCause() != null
          +        && cmdLineException.getCause() instanceof BindException) {
                 return true;
               }
               return false;
          @@ -281,9 +278,9 @@ private static boolean isBindException(Exception cmdLineException) {
             @Test
             public void testRunThriftServer() throws Exception {
               // Add retries in case we see stuff like connection reset
          -    Exception clientSideException =  null;
          +    Exception clientSideException = null;
               for (int i = 0; i < 10; i++) {
          -      clientSideException =  null;
          +      clientSideException = null;
                 ThriftServerRunner thriftServerRunner = createBoundServer(getThriftServerSupplier(),
                   this.implType, this.specifyFramed, this.specifyCompact, this.specifyBindIP);
                 try {
          @@ -330,7 +327,7 @@ protected void talkToThriftServer(int port) throws Exception {
                 }
           
                 Hbase.Client client = new Hbase.Client(prot);
          -      if (!tableCreated){
          +      if (!tableCreated) {
                   TestThriftServer.createTestTables(client);
                   tableCreated = true;
                 }
          @@ -341,4 +338,3 @@ protected void talkToThriftServer(int port) throws Exception {
               }
             }
           }
          -
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpFallbackServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpFallbackServer.java
          index 20470f9d39aa..8f24248ece8d 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpFallbackServer.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpFallbackServer.java
          @@ -21,6 +21,7 @@
           import static org.junit.Assert.assertFalse;
           import static org.junit.Assert.assertNotNull;
           import static org.junit.Assert.assertTrue;
          +
           import java.io.File;
           import java.nio.file.Paths;
           import java.security.Principal;
          @@ -68,21 +69,19 @@
           import org.slf4j.LoggerFactory;
           
           /**
          - * Start the HBase Thrift HTTP server on a random port through the command-line
          - * interface and talk to it from client side with SPNEGO security enabled.
          - *
          - * Supplemental test to TestThriftSpnegoHttpServer which falls back to the original
          - * Kerberos principal and keytab configuration properties, not the separate
          - * SPNEGO-specific properties.
          + * Start the HBase Thrift HTTP server on a random port through the command-line interface and talk
          + * to it from client side with SPNEGO security enabled. Supplemental test to
          + * TestThriftSpnegoHttpServer which falls back to the original Kerberos principal and keytab
          + * configuration properties, not the separate SPNEGO-specific properties.
            */
          -@Category({ClientTests.class, LargeTests.class})
          +@Category({ ClientTests.class, LargeTests.class })
           public class TestThriftSpnegoHttpFallbackServer extends TestThriftHttpServer {
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestThriftSpnegoHttpFallbackServer.class);
          +      HBaseClassTestRule.forClass(TestThriftSpnegoHttpFallbackServer.class);
           
             private static final Logger LOG =
          -    LoggerFactory.getLogger(TestThriftSpnegoHttpFallbackServer.class);
          +      LoggerFactory.getLogger(TestThriftSpnegoHttpFallbackServer.class);
           
             private static SimpleKdcServer kdc;
             private static File serverKeytab;
          @@ -103,8 +102,7 @@ private static void addSecurityConfigurations(Configuration conf) {
               conf.set(Constants.THRIFT_KERBEROS_PRINCIPAL_KEY, serverPrincipal);
               conf.set(Constants.THRIFT_KEYTAB_FILE_KEY, serverKeytab.getAbsolutePath());
           
          -    HBaseKerberosUtils.setSecuredConfiguration(conf, spnegoServerPrincipal,
          -      spnegoServerPrincipal);
          +    HBaseKerberosUtils.setSecuredConfiguration(conf, spnegoServerPrincipal, spnegoServerPrincipal);
               conf.set("hadoop.proxyuser.HTTP.hosts", "*");
               conf.set("hadoop.proxyuser.HTTP.groups", "*");
               conf.set(Constants.THRIFT_KERBEROS_PRINCIPAL_KEY, spnegoServerPrincipal);
          @@ -112,9 +110,8 @@ private static void addSecurityConfigurations(Configuration conf) {
           
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
          -    kdc = SimpleKdcServerUtil.
          -      getRunningSimpleKdcServer(new File(TEST_UTIL.getDataTestDir().toString()),
          -        HBaseTestingUtil::randomFreePort);
          +    kdc = SimpleKdcServerUtil.getRunningSimpleKdcServer(
          +      new File(TEST_UTIL.getDataTestDir().toString()), HBaseTestingUtil::randomFreePort);
           
               File keytabDir = Paths.get(TEST_UTIL.getRandomDir().toString()).toAbsolutePath().toFile();
               assertTrue(keytabDir.mkdirs());
          @@ -153,10 +150,8 @@ public static void tearDownAfterClass() throws Exception {
             @Override
             protected void talkToThriftServer(String url, int customHeaderSize) throws Exception {
               // Close httpClient and THttpClient automatically on any failures
          -    try (
          -      CloseableHttpClient httpClient = createHttpClient();
          -      THttpClient tHttpClient = new THttpClient(url, httpClient)
          -    ) {
          +    try (CloseableHttpClient httpClient = createHttpClient();
          +        THttpClient tHttpClient = new THttpClient(url, httpClient)) {
                 tHttpClient.open();
                 if (customHeaderSize > 0) {
                   StringBuilder sb = new StringBuilder();
          @@ -178,15 +173,13 @@ private CloseableHttpClient createHttpClient() throws Exception {
               final Subject clientSubject = JaasKrbUtil.loginUsingKeytab(clientPrincipal, clientKeytab);
               final Set clientPrincipals = clientSubject.getPrincipals();
               // Make sure the subject has a principal
          -    assertFalse("Found no client principals in the clientSubject.",
          -      clientPrincipals.isEmpty());
          +    assertFalse("Found no client principals in the clientSubject.", clientPrincipals.isEmpty());
           
               // Get a TGT for the subject (might have many, different encryption types). The first should
               // be the default encryption type.
               Set privateCredentials =
          -      clientSubject.getPrivateCredentials(KerberosTicket.class);
          -    assertFalse("Found no private credentials in the clientSubject.",
          -      privateCredentials.isEmpty());
          +        clientSubject.getPrivateCredentials(KerberosTicket.class);
          +    assertFalse("Found no private credentials in the clientSubject.", privateCredentials.isEmpty());
               KerberosTicket tgt = privateCredentials.iterator().next();
               assertNotNull("No kerberos ticket found.", tgt);
           
          @@ -202,33 +195,32 @@ private CloseableHttpClient createHttpClient() throws Exception {
                 GSSCredential credential = gssManager.createCredential(gssClient,
                   GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY);
           
          -      Lookup authRegistry = RegistryBuilder.create()
          -        .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true))
          -        .build();
          +      Lookup authRegistry = RegistryBuilder. create()
          +          .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build();
           
                 BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider();
                 credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential));
           
          -      return HttpClients.custom()
          -        .setDefaultAuthSchemeRegistry(authRegistry)
          -        .setDefaultCredentialsProvider(credentialsProvider)
          -        .build();
          +      return HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry)
          +          .setDefaultCredentialsProvider(credentialsProvider).build();
               });
             }
           
          -  @Override protected Supplier getThriftServerSupplier() {
          +  @Override
          +  protected Supplier getThriftServerSupplier() {
               return () -> new ThriftServer(TEST_UTIL.getConfiguration());
             }
           
             /**
          -   * Block call through to this method. It is a messy test that fails because of bad config
          -   * and then succeeds only the first attempt adds a table which the second attempt doesn't
          -   * want to be in place to succeed. Let the super impl of this test be responsible for
          -   * verifying we fail if bad header size.
          +   * Block call through to this method. It is a messy test that fails because of bad config and then
          +   * succeeds only the first attempt adds a table which the second attempt doesn't want to be in
          +   * place to succeed. Let the super impl of this test be responsible for verifying we fail if bad
          +   * header size.
              */
             @org.junit.Ignore
             @Test
          -  @Override public void testRunThriftServerWithHeaderBufferLength() throws Exception {
          +  @Override
          +  public void testRunThriftServerWithHeaderBufferLength() throws Exception {
               super.testRunThriftServerWithHeaderBufferLength();
             }
           }
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpServer.java
          index 2ec903df6df4..ba8c561def70 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpServer.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpServer.java
          @@ -20,6 +20,7 @@
           import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_SUPPORT_PROXYUSER_KEY;
           import static org.junit.Assert.assertFalse;
           import static org.junit.Assert.assertNotNull;
          +
           import java.io.File;
           import java.net.InetAddress;
           import java.nio.ByteBuffer;
          @@ -72,17 +73,16 @@
           import org.slf4j.LoggerFactory;
           
           /**
          - * Start the HBase Thrift HTTP server on a random port through the command-line
          - * interface and talk to it from client side with SPNEGO security enabled.
          + * Start the HBase Thrift HTTP server on a random port through the command-line interface and talk
          + * to it from client side with SPNEGO security enabled.
            */
          -@Category({ClientTests.class, LargeTests.class})
          +@Category({ ClientTests.class, LargeTests.class })
           public class TestThriftSpnegoHttpServer extends TestThriftHttpServer {
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          -    HBaseClassTestRule.forClass(TestThriftSpnegoHttpServer.class);
          +      HBaseClassTestRule.forClass(TestThriftSpnegoHttpServer.class);
           
          -  private static final Logger LOG =
          -    LoggerFactory.getLogger(TestThriftSpnegoHttpServer.class);
          +  private static final Logger LOG = LoggerFactory.getLogger(TestThriftSpnegoHttpServer.class);
           
             private static SimpleKdcServer kdc;
             private static File serverKeytab;
          @@ -113,9 +113,8 @@ private static void addSecurityConfigurations(Configuration conf) {
           
             @BeforeClass
             public static void setUpBeforeClass() throws Exception {
          -    kdc = SimpleKdcServerUtil.
          -      getRunningSimpleKdcServer(new File(TEST_UTIL.getDataTestDir().toString()),
          -        HBaseTestingUtil::randomFreePort);
          +    kdc = SimpleKdcServerUtil.getRunningSimpleKdcServer(
          +      new File(TEST_UTIL.getDataTestDir().toString()), HBaseTestingUtil::randomFreePort);
               File keytabDir = Paths.get(TEST_UTIL.getRandomDir().toString()).toAbsolutePath().toFile();
               Assert.assertTrue(keytabDir.mkdirs());
           
          @@ -139,7 +138,8 @@ public static void setUpBeforeClass() throws Exception {
               TestThriftHttpServer.setUpBeforeClass();
             }
           
          -  @Override protected Supplier getThriftServerSupplier() {
          +  @Override
          +  protected Supplier getThriftServerSupplier() {
               return () -> new ThriftServer(TEST_UTIL.getConfiguration());
             }
           
          @@ -158,24 +158,23 @@ public static void tearDownAfterClass() throws Exception {
             }
           
             /**
          -   * Block call through to this method. It is a messy test that fails because of bad config
          -   * and then succeeds only the first attempt adds a table which the second attempt doesn't
          -   * want to be in place to succeed. Let the super impl of this test be responsible for
          -   * verifying we fail if bad header size.
          +   * Block call through to this method. It is a messy test that fails because of bad config and then
          +   * succeeds only the first attempt adds a table which the second attempt doesn't want to be in
          +   * place to succeed. Let the super impl of this test be responsible for verifying we fail if bad
          +   * header size.
              */
             @org.junit.Ignore
             @Test
          -  @Override public void testRunThriftServerWithHeaderBufferLength() throws Exception {
          +  @Override
          +  public void testRunThriftServerWithHeaderBufferLength() throws Exception {
               super.testRunThriftServerWithHeaderBufferLength();
             }
           
             @Override
             protected void talkToThriftServer(String url, int customHeaderSize) throws Exception {
               // Close httpClient and THttpClient automatically on any failures
          -    try (
          -        CloseableHttpClient httpClient = createHttpClient();
          -        THttpClient tHttpClient = new THttpClient(url, httpClient)
          -    ) {
          +    try (CloseableHttpClient httpClient = createHttpClient();
          +        THttpClient tHttpClient = new THttpClient(url, httpClient)) {
                 tHttpClient.open();
                 if (customHeaderSize > 0) {
                   StringBuilder sb = new StringBuilder();
          @@ -188,10 +187,10 @@ protected void talkToThriftServer(String url, int customHeaderSize) throws Excep
                 TProtocol prot = new TBinaryProtocol(tHttpClient);
                 Hbase.Client client = new Hbase.Client(prot);
                 List bbs = client.getTableNames();
          -      LOG.info("PRE-EXISTING {}", bbs.stream().
          -        map(b -> Bytes.toString(b.array())).collect(Collectors.joining(",")));
          +      LOG.info("PRE-EXISTING {}",
          +        bbs.stream().map(b -> Bytes.toString(b.array())).collect(Collectors.joining(",")));
                 if (!bbs.isEmpty()) {
          -        for (ByteBuffer bb: bbs) {
          +        for (ByteBuffer bb : bbs) {
                     client.disableTable(bb);
                     client.deleteTable(bb);
                   }
          @@ -206,15 +205,13 @@ private CloseableHttpClient createHttpClient() throws Exception {
               final Subject clientSubject = JaasKrbUtil.loginUsingKeytab(clientPrincipal, clientKeytab);
               final Set clientPrincipals = clientSubject.getPrincipals();
               // Make sure the subject has a principal
          -    assertFalse("Found no client principals in the clientSubject.",
          -      clientPrincipals.isEmpty());
          +    assertFalse("Found no client principals in the clientSubject.", clientPrincipals.isEmpty());
           
               // Get a TGT for the subject (might have many, different encryption types). The first should
               // be the default encryption type.
               Set privateCredentials =
                   clientSubject.getPrivateCredentials(KerberosTicket.class);
          -    assertFalse("Found no private credentials in the clientSubject.",
          -      privateCredentials.isEmpty());
          +    assertFalse("Found no private credentials in the clientSubject.", privateCredentials.isEmpty());
               KerberosTicket tgt = privateCredentials.iterator().next();
               assertNotNull("No kerberos ticket found.", tgt);
           
          @@ -228,19 +225,16 @@ private CloseableHttpClient createHttpClient() throws Exception {
                 Oid oid = new Oid("1.2.840.113554.1.2.2");
                 GSSName gssClient = gssManager.createName(clientPrincipalName, GSSName.NT_USER_NAME);
                 GSSCredential credential = gssManager.createCredential(gssClient,
          -          GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY);
          +        GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY);
           
          -      Lookup authRegistry = RegistryBuilder.create()
          -          .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true))
          -          .build();
          +      Lookup authRegistry = RegistryBuilder. create()
          +          .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build();
           
                 BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider();
                 credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential));
           
          -      return HttpClients.custom()
          -          .setDefaultAuthSchemeRegistry(authRegistry)
          -          .setDefaultCredentialsProvider(credentialsProvider)
          -          .build();
          +      return HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry)
          +          .setDefaultCredentialsProvider(credentialsProvider).build();
               });
             }
           }
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
          index a006045e71b2..701daeb58526 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
          @@ -17,23 +17,23 @@
            */
           package org.apache.hadoop.hbase.thrift;
           
          -import org.slf4j.Logger;
          -import org.slf4j.LoggerFactory;
           import java.io.Closeable;
           import java.io.IOException;
          +import org.slf4j.Logger;
          +import org.slf4j.LoggerFactory;
           
           /**
            * Run ThriftServer with passed arguments. Access the exception thrown after we complete run -- if
          - * an exception thrown -- via {@link #getRunException()}}. Call close to shutdown this Runner
          - * and hosted {@link ThriftServer}.
          + * an exception thrown -- via {@link #getRunException()}}. Call close to shutdown this Runner and
          + * hosted {@link ThriftServer}.
            */
           class ThriftServerRunner extends Thread implements Closeable {
             private static final Logger LOG = LoggerFactory.getLogger(ThriftServerRunner.class);
             Exception exception = null;
             private final ThriftServer thriftServer;
          -  private final String [] args;
          +  private final String[] args;
           
          -  ThriftServerRunner(ThriftServer thriftServer, String [] args) {
          +  ThriftServerRunner(ThriftServer thriftServer, String[] args) {
               this.thriftServer = thriftServer;
               this.args = args;
               LOG.info("thriftServer={}, args={}", getThriftServer(), args);
          @@ -50,7 +50,8 @@ Exception getRunException() {
               return this.exception;
             }
           
          -  @Override public void run() {
          +  @Override
          +  public void run() {
               try {
                 this.thriftServer.run(this.args);
               } catch (Exception e) {
          @@ -59,7 +60,8 @@ Exception getRunException() {
               }
             }
           
          -  @Override public void close() throws IOException {
          +  @Override
          +  public void close() throws IOException {
               LOG.info("Stopping {}", this);
               this.thriftServer.stop();
             }
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThrift2HttpServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThrift2HttpServer.java
          index d5a05edb89f7..79d3d4e6c55e 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThrift2HttpServer.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThrift2HttpServer.java
          @@ -19,7 +19,6 @@
           
           import java.util.ArrayList;
           import java.util.function.Supplier;
          -
           import org.apache.hadoop.hbase.HBaseClassTestRule;
           import org.apache.hadoop.hbase.testclassification.ClientTests;
           import org.apache.hadoop.hbase.testclassification.MediumTests;
          @@ -37,7 +36,7 @@
           import org.junit.ClassRule;
           import org.junit.experimental.categories.Category;
           
          -@Category({ ClientTests.class, MediumTests.class})
          +@Category({ ClientTests.class, MediumTests.class })
           public class TestThrift2HttpServer extends TestThriftHttpServer {
             private static final String TABLENAME = "TestThrift2HttpServerTable";
           
          @@ -45,7 +44,8 @@ public class TestThrift2HttpServer extends TestThriftHttpServer {
             public static final HBaseClassTestRule CLASS_RULE =
                 HBaseClassTestRule.forClass(TestThrift2HttpServer.class);
           
          -  @Override protected Supplier getThriftServerSupplier() {
          +  @Override
          +  protected Supplier getThriftServerSupplier() {
               return () -> new org.apache.hadoop.hbase.thrift2.ThriftServer(TEST_UTIL.getConfiguration());
             }
           
          @@ -69,7 +69,7 @@ protected void talkToThriftServer(String url, int customHeaderSize) throws Excep
                 TTableName tTableName = new TTableName();
                 tTableName.setNs(Bytes.toBytes(""));
                 tTableName.setQualifier(Bytes.toBytes(TABLENAME));
          -      if (!tableCreated){
          +      if (!tableCreated) {
                   Assert.assertTrue(!client.tableExists(tTableName));
                   TTableDescriptor tTableDescriptor = new TTableDescriptor();
                   tTableDescriptor.setTableName(tTableName);
          @@ -85,5 +85,4 @@ protected void talkToThriftServer(String url, int customHeaderSize) throws Excep
               }
             }
           
          -
           }
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThrift2ServerCmdLine.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThrift2ServerCmdLine.java
          index 5ab96f60ec6a..df2a118bd9fd 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThrift2ServerCmdLine.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThrift2ServerCmdLine.java
          @@ -20,7 +20,6 @@
           import java.net.InetAddress;
           import java.util.ArrayList;
           import java.util.function.Supplier;
          -
           import org.apache.hadoop.hbase.HBaseClassTestRule;
           import org.apache.hadoop.hbase.testclassification.ClientTests;
           import org.apache.hadoop.hbase.testclassification.MediumTests;
          @@ -42,7 +41,7 @@
           import org.junit.ClassRule;
           import org.junit.experimental.categories.Category;
           
          -@Category({ ClientTests.class, MediumTests.class})
          +@Category({ ClientTests.class, MediumTests.class })
           public class TestThrift2ServerCmdLine extends TestThriftServerCmdLine {
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          @@ -50,12 +49,13 @@ public class TestThrift2ServerCmdLine extends TestThriftServerCmdLine {
           
             private static final String TABLENAME = "TestThrift2ServerCmdLineTable";
           
          -  public TestThrift2ServerCmdLine(ImplType implType, boolean specifyFramed,
          -      boolean specifyBindIP, boolean specifyCompact) {
          +  public TestThrift2ServerCmdLine(ImplType implType, boolean specifyFramed, boolean specifyBindIP,
          +      boolean specifyCompact) {
               super(implType, specifyFramed, specifyBindIP, specifyCompact);
             }
           
          -  @Override protected Supplier getThriftServerSupplier() {
          +  @Override
          +  protected Supplier getThriftServerSupplier() {
               return () -> new org.apache.hadoop.hbase.thrift2.ThriftServer(TEST_UTIL.getConfiguration());
             }
           
          @@ -79,7 +79,7 @@ protected void talkToThriftServer(int port) throws Exception {
                 TTableName tTableName = new TTableName();
                 tTableName.setNs(Bytes.toBytes(""));
                 tTableName.setQualifier(Bytes.toBytes(TABLENAME));
          -      if (!tableCreated){
          +      if (!tableCreated) {
                   Assert.assertTrue(!client.tableExists(tTableName));
                   TTableDescriptor tTableDescriptor = new TTableDescriptor();
                   tTableDescriptor.setTableName(tTableName);
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
          index 2eb1e4c8fbb5..c94d8e84b26b 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -74,11 +74,10 @@
           import org.slf4j.Logger;
           import org.slf4j.LoggerFactory;
           
          -@Category({ RestTests.class, MediumTests.class})
          +@Category({ RestTests.class, MediumTests.class })
           
           public class TestThriftConnection {
          -  private static final Logger LOG =
          -      LoggerFactory.getLogger(TestThriftConnection.class);
          +  private static final Logger LOG = LoggerFactory.getLogger(TestThriftConnection.class);
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
          @@ -103,7 +102,6 @@ public class TestThriftConnection {
             private static final long TS_2 = EnvironmentEdgeManager.currentTime();
             private static final long TS_1 = TS_2 - ONE_HOUR;
           
          -
             protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
           
             protected static ThriftServer thriftServer;
          @@ -126,7 +124,7 @@ private static ThriftServer startThriftServer(int port, boolean useHttp) {
               }
               ThriftServer server = new ThriftServer(thriftServerConf);
               Thread thriftServerThread = new Thread(() -> {
          -      try{
          +      try {
                   server.run();
                 } catch (Exception t) {
                   LOG.error("Thrift Server failed", t);
          @@ -144,11 +142,10 @@ private static ThriftServer startThriftServer(int port, boolean useHttp) {
           
             private static Connection createConnection(int port, boolean useHttp) throws IOException {
               Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
          -    conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
          -        ThriftConnection.class.getName());
          +    conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ThriftConnection.class.getName());
               if (useHttp) {
                 conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS,
          -          ThriftConnection.HTTPThriftClientBuilder.class.getName());
          +        ThriftConnection.HTTPThriftClientBuilder.class.getName());
               }
               String host = HConstants.LOCALHOST;
               if (useHttp) {
          @@ -159,11 +156,10 @@ private static Connection createConnection(int port, boolean useHttp) throws IOE
               return ConnectionFactory.createConnection(conf);
             }
           
          -
             @BeforeClass
             public static void setUp() throws Exception {
               // Do not start info server
          -    TEST_UTIL.getConfiguration().setInt(THRIFT_INFO_SERVER_PORT , -1);
          +    TEST_UTIL.getConfiguration().setInt(THRIFT_INFO_SERVER_PORT, -1);
               TEST_UTIL.startMiniCluster();
               thriftPort = HBaseTestingUtil.randomFreePort();
               httpPort = HBaseTestingUtil.randomFreePort();
          @@ -199,7 +195,7 @@ public static void shutdown() throws Exception {
             @Test
             public void testGetClusterId() {
               String actualClusterId = TEST_UTIL.getMiniHBaseCluster().getMaster().getClusterId();
          -    for (Connection conn: new Connection[] {thriftConnection, thriftHttpConnection}) {
          +    for (Connection conn : new Connection[] { thriftConnection, thriftHttpConnection }) {
                 String thriftClusterId = conn.getClusterId();
                 assertEquals(actualClusterId, thriftClusterId);
               }
          @@ -209,7 +205,7 @@ public void testGetClusterId() {
             public void testThriftAdmin() throws Exception {
               testThriftAdmin(thriftConnection, "testThriftAdminNamespace", "testThriftAdminTable");
               testThriftAdmin(thriftHttpConnection, "testThriftHttpAdminNamespace",
          -        "testThriftHttpAdminTable");
          +      "testThriftHttpAdminTable");
             }
           
             @Test
          @@ -221,7 +217,7 @@ public void testGet() throws Exception {
           
             private void testGet(Connection connection, String tableName) throws IOException {
               createTable(thriftAdmin, tableName);
          -    try (Table table = connection.getTable(TableName.valueOf(tableName))){
          +    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                 Get get = new Get(ROW_1);
                 Result result = table.get(get);
                 byte[] value1 = result.getValue(FAMILYA, QUALIFIER_1);
          @@ -310,7 +306,7 @@ private void testGet(Connection connection, String tableName) throws IOException
                 get.readVersions(2);
                 result = table.get(get);
                 int count = 0;
          -      for (Cell kv: result.listCells()) {
          +      for (Cell kv : result.listCells()) {
                   if (CellUtil.matchingFamily(kv, FAMILYA) && TS_1 == kv.getTimestamp()) {
                     assertTrue(CellUtil.matchingValue(kv, VALUE_1)); // @TS_1
                     count++;
          @@ -326,14 +322,14 @@ private void testGet(Connection connection, String tableName) throws IOException
             }
           
             @Test
          -  public void testHBASE22011()throws Exception{
          +  public void testHBASE22011() throws Exception {
               testHBASE22011(thriftConnection, "testHBASE22011Table");
               testHBASE22011(thriftHttpConnection, "testHBASE22011HttpTable");
             }
           
             public void testHBASE22011(Connection connection, String tableName) throws IOException {
               createTable(thriftAdmin, tableName);
          -    try (Table table = connection.getTable(TableName.valueOf(tableName))){
          +    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                 Get get = new Get(ROW_2);
                 Result result = table.get(get);
                 assertEquals(2, result.listCells().size());
          @@ -353,7 +349,7 @@ public void testMultiGet() throws Exception {
           
             public void testMultiGet(Connection connection, String tableName) throws Exception {
               createTable(thriftAdmin, tableName);
          -    try (Table table = connection.getTable(TableName.valueOf(tableName))){
          +    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                 ArrayList gets = new ArrayList<>(2);
                 gets.add(new Get(ROW_1));
                 gets.add(new Get(ROW_2));
          @@ -363,7 +359,7 @@ public void testMultiGet(Connection connection, String tableName) throws Excepti
                 assertEquals(1, results[0].size());
                 assertEquals(2, results[1].size());
           
          -      //Test Versions
          +      // Test Versions
                 gets = new ArrayList<>(2);
                 Get g = new Get(ROW_1);
                 g.readVersions(3);
          @@ -403,7 +399,7 @@ public void testPut() throws Exception {
           
             public void testPut(Connection connection, String tableName) throws IOException {
               createTable(thriftAdmin, tableName);
          -    try (Table table = connection.getTable(TableName.valueOf(tableName))){
          +    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                 Put put = new Put(ROW_3);
                 put.addColumn(FAMILYA, QUALIFIER_1, VALUE_1);
                 table.put(put);
          @@ -454,7 +450,7 @@ public void testDelete() throws Exception {
           
             public void testDelete(Connection connection, String tableName) throws IOException {
               createTable(thriftAdmin, tableName);
          -    try (Table table = connection.getTable(TableName.valueOf(tableName))){
          +    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                 Put put = new Put(ROW_3);
                 put.addColumn(FAMILYA, QUALIFIER_1, VALUE_1);
                 put.addColumn(FAMILYB, QUALIFIER_2, VALUE_2);
          @@ -544,7 +540,7 @@ public void testScanner() throws Exception {
           
             public void testScanner(Connection connection, String tableName) throws IOException {
               createTable(thriftAdmin, tableName);
          -    try (Table table = connection.getTable(TableName.valueOf(tableName))){
          +    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                 List puts = new ArrayList<>(4);
                 Put put = new Put(ROW_1);
                 put.addColumn(FAMILYA, QUALIFIER_1, VALUE_1);
          @@ -592,7 +588,7 @@ public void testScanner(Connection connection, String tableName) throws IOExcept
           
                 scanner.close();
           
          -      scanner = table.getScanner(FAMILYA,QUALIFIER_1);
          +      scanner = table.getScanner(FAMILYA, QUALIFIER_1);
                 results = scanner.next(4);
                 assertNotNull(results);
                 assertEquals(4, results.length);
          @@ -611,10 +607,9 @@ public void testCheckAndDelete() throws Exception {
               testCheckAndDelete(thriftHttpConnection, "testCheckAndDeleteHttpTable");
             }
           
          -
             public void testCheckAndDelete(Connection connection, String tableName) throws IOException {
               createTable(thriftAdmin, tableName);
          -    try (Table table = connection.getTable(TableName.valueOf(tableName))){
          +    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                 Get get = new Get(ROW_1);
                 Result result = table.get(get);
                 byte[] value1 = result.getValue(FAMILYA, QUALIFIER_1);
          @@ -626,18 +621,18 @@ public void testCheckAndDelete(Connection connection, String tableName) throws I
                 assertEquals(1, table.exists(Collections.singletonList(get)).length);
                 Delete delete = new Delete(ROW_1);
           
          -      table.checkAndMutate(ROW_1, FAMILYA).qualifier(QUALIFIER_1)
          -          .ifEquals(VALUE_1).thenDelete(delete);
          +      table.checkAndMutate(ROW_1, FAMILYA).qualifier(QUALIFIER_1).ifEquals(VALUE_1)
          +          .thenDelete(delete);
                 assertFalse(table.exists(get));
           
                 Put put = new Put(ROW_1);
                 put.addColumn(FAMILYA, QUALIFIER_1, VALUE_1);
                 table.put(put);
           
          -      assertTrue(table.checkAndMutate(ROW_1, FAMILYA).qualifier(QUALIFIER_1)
          -          .ifEquals(VALUE_1).thenPut(put));
          -      assertFalse(table.checkAndMutate(ROW_1, FAMILYA).qualifier(QUALIFIER_1)
          -          .ifEquals(VALUE_2).thenPut(put));
          +      assertTrue(
          +        table.checkAndMutate(ROW_1, FAMILYA).qualifier(QUALIFIER_1).ifEquals(VALUE_1).thenPut(put));
          +      assertFalse(
          +        table.checkAndMutate(ROW_1, FAMILYA).qualifier(QUALIFIER_1).ifEquals(VALUE_2).thenPut(put));
               }
           
             }
          @@ -650,7 +645,7 @@ public void testIteratorScaner() throws Exception {
           
             public void testIteratorScanner(Connection connection, String tableName) throws IOException {
               createTable(thriftAdmin, tableName);
          -    try (Table table = connection.getTable(TableName.valueOf(tableName))){
          +    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                 List puts = new ArrayList<>(4);
                 Put put = new Put(ROW_1);
                 put.addColumn(FAMILYA, QUALIFIER_1, VALUE_1);
          @@ -688,7 +683,7 @@ public void testReverseScan() throws Exception {
           
             public void testReverseScan(Connection connection, String tableName) throws IOException {
               createTable(thriftAdmin, tableName);
          -    try (Table table = connection.getTable(TableName.valueOf(tableName))){
          +    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                 List puts = new ArrayList<>(4);
                 Put put = new Put(ROW_1);
                 put.addColumn(FAMILYA, QUALIFIER_1, VALUE_1);
          @@ -724,7 +719,6 @@ public void testReverseScan(Connection connection, String tableName) throws IOEx
           
             }
           
          -
             @Test
             public void testScanWithFilters() throws Exception {
               testScanWithFilters(thriftConnection, "testScanWithFiltersTable");
          @@ -733,11 +727,11 @@ public void testScanWithFilters() throws Exception {
           
             private void testScanWithFilters(Connection connection, String tableName) throws IOException {
               createTable(thriftAdmin, tableName);
          -    try (Table table = connection.getTable(TableName.valueOf(tableName))){
          +    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                 FilterList filterList = new FilterList();
                 PrefixFilter prefixFilter = new PrefixFilter(Bytes.toBytes("testrow"));
          -      ColumnValueFilter columnValueFilter = new ColumnValueFilter(FAMILYA, QUALIFIER_1,
          -          CompareOperator.EQUAL, VALUE_1);
          +      ColumnValueFilter columnValueFilter =
          +          new ColumnValueFilter(FAMILYA, QUALIFIER_1, CompareOperator.EQUAL, VALUE_1);
                 filterList.addFilter(prefixFilter);
                 filterList.addFilter(columnValueFilter);
                 Scan scan = new Scan();
          @@ -755,18 +749,17 @@ private void testScanWithFilters(Connection connection, String tableName) throws
               }
             }
           
          -
             private TableDescriptor createTable(Admin admin, String tableName) throws IOException {
          -    TableDescriptorBuilder builder = TableDescriptorBuilder
          -        .newBuilder(TableName.valueOf(tableName));
          -    ColumnFamilyDescriptorBuilder familyABuilder = ColumnFamilyDescriptorBuilder
          -        .newBuilder(FAMILYA);
          +    TableDescriptorBuilder builder =
          +        TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName));
          +    ColumnFamilyDescriptorBuilder familyABuilder =
          +        ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA);
               familyABuilder.setMaxVersions(3);
          -    ColumnFamilyDescriptorBuilder familyBBuilder = ColumnFamilyDescriptorBuilder
          -        .newBuilder(FAMILYB);
          +    ColumnFamilyDescriptorBuilder familyBBuilder =
          +        ColumnFamilyDescriptorBuilder.newBuilder(FAMILYB);
               familyBBuilder.setMaxVersions(3);
          -    ColumnFamilyDescriptorBuilder familyCBuilder = ColumnFamilyDescriptorBuilder
          -        .newBuilder(FAMILYC);
          +    ColumnFamilyDescriptorBuilder familyCBuilder =
          +        ColumnFamilyDescriptorBuilder.newBuilder(FAMILYC);
               familyCBuilder.setMaxVersions(3);
               builder.setColumnFamily(familyABuilder.build());
               builder.setColumnFamily(familyBBuilder.build());
          @@ -790,13 +783,13 @@ private TableDescriptor createTable(Admin admin, String tableName) throws IOExce
           
             private void testThriftAdmin(Connection connection, String namespace, String table)
                 throws Exception {
          -    try (Admin admin = connection.getAdmin()){
          -      //create name space
          +    try (Admin admin = connection.getAdmin()) {
          +      // create name space
                 NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create(namespace).build();
                 namespaceDescriptor.setConfiguration("key1", "value1");
                 namespaceDescriptor.setConfiguration("key2", "value2");
                 admin.createNamespace(namespaceDescriptor);
          -      //list namespace
          +      // list namespace
                 NamespaceDescriptor[] namespaceDescriptors = admin.listNamespaceDescriptors();
                 boolean found = false;
                 for (NamespaceDescriptor nd : namespaceDescriptors) {
          @@ -806,56 +799,56 @@ private void testThriftAdmin(Connection connection, String namespace, String tab
                   }
                 }
                 assertTrue(found);
          -      //modify namesapce
          +      // modify namesapce
                 namespaceDescriptor.setConfiguration("kye3", "value3");
                 admin.modifyNamespace(namespaceDescriptor);
          -      //get namespace
          +      // get namespace
                 NamespaceDescriptor namespaceDescriptorReturned = admin.getNamespaceDescriptor(namespace);
                 assertTrue(namespaceDescriptorReturned.getConfiguration().size() == 3);
          -      //create table
          +      // create table
                 TableDescriptor tableDescriptor = createTable(admin, table);
          -      //modify table
          +      // modify table
                 TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor);
                 builder.setDurability(Durability.ASYNC_WAL);
                 admin.modifyTable(builder.build());
          -      //modify column family
          +      // modify column family
                 ColumnFamilyDescriptor familyA = tableDescriptor.getColumnFamily(FAMILYA);
          -      ColumnFamilyDescriptorBuilder familyABuilder = ColumnFamilyDescriptorBuilder
          -          .newBuilder(familyA);
          +      ColumnFamilyDescriptorBuilder familyABuilder =
          +          ColumnFamilyDescriptorBuilder.newBuilder(familyA);
                 familyABuilder.setInMemory(true);
                 admin.modifyColumnFamily(tableDescriptor.getTableName(), familyABuilder.build());
          -      //add column family
          -      ColumnFamilyDescriptorBuilder familyDBuilder = ColumnFamilyDescriptorBuilder
          -          .newBuilder(FAMILYD);
          +      // add column family
          +      ColumnFamilyDescriptorBuilder familyDBuilder =
          +          ColumnFamilyDescriptorBuilder.newBuilder(FAMILYD);
                 familyDBuilder.setDataBlockEncoding(DataBlockEncoding.PREFIX);
                 admin.addColumnFamily(tableDescriptor.getTableName(), familyDBuilder.build());
          -      //get table descriptor
          +      // get table descriptor
                 TableDescriptor tableDescriptorReturned = admin.getDescriptor(tableDescriptor.getTableName());
                 assertTrue(tableDescriptorReturned.getColumnFamilies().length == 4);
          -      assertTrue(tableDescriptorReturned.getDurability() ==  Durability.ASYNC_WAL);
          -      ColumnFamilyDescriptor columnFamilyADescriptor1Returned = tableDescriptorReturned
          -          .getColumnFamily(FAMILYA);
          +      assertTrue(tableDescriptorReturned.getDurability() == Durability.ASYNC_WAL);
          +      ColumnFamilyDescriptor columnFamilyADescriptor1Returned =
          +          tableDescriptorReturned.getColumnFamily(FAMILYA);
                 assertTrue(columnFamilyADescriptor1Returned.isInMemory() == true);
          -      //delete column family
          +      // delete column family
                 admin.deleteColumnFamily(tableDescriptor.getTableName(), FAMILYA);
                 tableDescriptorReturned = admin.getDescriptor(tableDescriptor.getTableName());
                 assertTrue(tableDescriptorReturned.getColumnFamilies().length == 3);
          -      //disable table
          +      // disable table
                 admin.disableTable(tableDescriptor.getTableName());
                 assertTrue(admin.isTableDisabled(tableDescriptor.getTableName()));
          -      //enable table
          +      // enable table
                 admin.enableTable(tableDescriptor.getTableName());
                 assertTrue(admin.isTableEnabled(tableDescriptor.getTableName()));
                 assertTrue(admin.isTableAvailable(tableDescriptor.getTableName()));
          -      //truncate table
          +      // truncate table
                 admin.disableTable(tableDescriptor.getTableName());
                 admin.truncateTable(tableDescriptor.getTableName(), true);
                 assertTrue(admin.isTableAvailable(tableDescriptor.getTableName()));
          -      //delete table
          +      // delete table
                 admin.disableTable(tableDescriptor.getTableName());
                 admin.deleteTable(tableDescriptor.getTableName());
                 assertFalse(admin.tableExists(tableDescriptor.getTableName()));
          -      //delete namespace
          +      // delete namespace
                 admin.deleteNamespace(namespace);
                 namespaceDescriptors = admin.listNamespaceDescriptors();
                 // should have 2 namespace, default and hbase
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
          index 1453f371db7c..3c680ba35fdd 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
          @@ -148,7 +148,7 @@
            * Unit testing for ThriftServer.HBaseServiceHandler, a part of the org.apache.hadoop.hbase.thrift2
            * package.
            */
          -@Category({ClientTests.class, MediumTests.class})
          +@Category({ ClientTests.class, MediumTests.class })
           public class TestThriftHBaseServiceHandler {
           
             @ClassRule
          @@ -167,9 +167,8 @@ public class TestThriftHBaseServiceHandler {
             private static byte[] valueAname = Bytes.toBytes("valueA");
             private static byte[] valueBname = Bytes.toBytes("valueB");
             private static ColumnFamilyDescriptor[] families = new ColumnFamilyDescriptor[] {
          -    ColumnFamilyDescriptorBuilder.newBuilder(familyAname).setMaxVersions(3).build(),
          -    ColumnFamilyDescriptorBuilder.newBuilder(familyBname).setMaxVersions(2).build() };
          -
          +      ColumnFamilyDescriptorBuilder.newBuilder(familyAname).setMaxVersions(3).build(),
          +      ColumnFamilyDescriptorBuilder.newBuilder(familyBname).setMaxVersions(2).build() };
           
             private static final MetricsAssertHelper metricsHelper =
                 CompatibilityFactory.getInstance(MetricsAssertHelper.class);
          @@ -177,7 +176,6 @@ public class TestThriftHBaseServiceHandler {
             @Rule
             public TestName name = new TestName();
           
          -
             public void assertTColumnValuesEqual(List columnValuesA,
                 List columnValuesB) {
               assertEquals(columnValuesA.size(), columnValuesB.size());
          @@ -185,7 +183,7 @@ public void assertTColumnValuesEqual(List columnValuesA,
                 @Override
                 public int compare(TColumnValue o1, TColumnValue o2) {
                   return Bytes.compareTo(Bytes.add(o1.getFamily(), o1.getQualifier()),
          -            Bytes.add(o2.getFamily(), o2.getQualifier()));
          +          Bytes.add(o2.getFamily(), o2.getQualifier()));
                 }
               };
               Collections.sort(columnValuesA, comparator);
          @@ -223,8 +221,9 @@ public static void beforeClass() throws Exception {
               UTIL.getConfiguration().set("hbase.superuser", System.getProperty("user.name"));
           
               UTIL.startMiniCluster();
          -    TableDescriptor tableDescriptor = TableDescriptorBuilder
          -      .newBuilder(TableName.valueOf(tableAname)).setColumnFamilies(Arrays.asList(families)).build();
          +    TableDescriptor tableDescriptor =
          +        TableDescriptorBuilder.newBuilder(TableName.valueOf(tableAname))
          +            .setColumnFamilies(Arrays.asList(families)).build();
               try (Admin admin = UTIL.getAdmin()) {
                 admin.createTable(tableDescriptor);
               }
          @@ -385,10 +384,10 @@ public void testDelete() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               List columnValues = new ArrayList<>(2);
          -    TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -      wrap(valueAname));
          -    TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname),
          -      wrap(valueBname));
          +    TColumnValue columnValueA =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
          +    TColumnValue columnValueB =
          +        new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname));
               columnValues.add(columnValueA);
               columnValues.add(columnValueB);
               TPut put = new TPut(wrap(rowName), columnValues);
          @@ -422,8 +421,8 @@ public void testDeleteAllTimestamps() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               List columnValues = new ArrayList<>(1);
          -    TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -      wrap(valueAname));
          +    TColumnValue columnValueA =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               columnValueA.setTimestamp(EnvironmentEdgeManager.currentTime() - 10);
               columnValues.add(columnValueA);
               TPut put = new TPut(wrap(rowName), columnValues);
          @@ -465,8 +464,8 @@ public void testDeleteSingleTimestamp() throws Exception {
               long timestamp2 = EnvironmentEdgeManager.currentTime();
           
               List columnValues = new ArrayList<>(1);
          -    TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -      wrap(valueAname));
          +    TColumnValue columnValueA =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               columnValueA.setTimestamp(timestamp1);
               columnValues.add(columnValueA);
               TPut put = new TPut(wrap(rowName), columnValues);
          @@ -593,8 +592,8 @@ public void testIncrement() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               List columnValues = new ArrayList<>(1);
          -    columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -      wrap(Bytes.toBytes(1L))));
          +    columnValues
          +        .add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(Bytes.toBytes(1L))));
               TPut put = new TPut(wrap(rowName), columnValues);
               put.setColumnValues(columnValues);
               handler.put(table, put);
          @@ -641,8 +640,8 @@ public void testAppend() throws Exception {
             }
           
             /**
          -   * check that checkAndPut fails if the cell does not exist, then put in the cell, then check
          -   * that the checkAndPut succeeds.
          +   * check that checkAndPut fails if the cell does not exist, then put in the cell, then check that
          +   * the checkAndPut succeeds.
              */
             @Test
             public void testCheckAndPut() throws Exception {
          @@ -651,21 +650,21 @@ public void testCheckAndPut() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               List columnValuesA = new ArrayList<>(1);
          -    TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -      wrap(valueAname));
          +    TColumnValue columnValueA =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               columnValuesA.add(columnValueA);
               TPut putA = new TPut(wrap(rowName), columnValuesA);
               putA.setColumnValues(columnValuesA);
           
               List columnValuesB = new ArrayList<>(1);
          -    TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname),
          -      wrap(valueBname));
          +    TColumnValue columnValueB =
          +        new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname));
               columnValuesB.add(columnValueB);
               TPut putB = new TPut(wrap(rowName), columnValuesB);
               putB.setColumnValues(columnValuesB);
           
          -    assertFalse(handler.checkAndPut(table, wrap(rowName), wrap(familyAname),
          -      wrap(qualifierAname), wrap(valueAname), putB));
          +    assertFalse(handler.checkAndPut(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname),
          +      wrap(valueAname), putB));
           
               TGet get = new TGet(wrap(rowName));
               TResult result = handler.get(table, get);
          @@ -673,8 +672,8 @@ public void testCheckAndPut() throws Exception {
           
               handler.put(table, putA);
           
          -    assertTrue(handler.checkAndPut(table, wrap(rowName), wrap(familyAname),
          -      wrap(qualifierAname), wrap(valueAname), putB));
          +    assertTrue(handler.checkAndPut(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname),
          +      wrap(valueAname), putB));
           
               result = handler.get(table, get);
               assertArrayEquals(rowName, result.getRow());
          @@ -686,8 +685,8 @@ public void testCheckAndPut() throws Exception {
             }
           
             /**
          -   * check that checkAndDelete fails if the cell does not exist, then put in the cell, then
          -   * check that the checkAndDelete succeeds.
          +   * check that checkAndDelete fails if the cell does not exist, then put in the cell, then check
          +   * that the checkAndDelete succeeds.
              */
             @Test
             public void testCheckAndDelete() throws Exception {
          @@ -696,15 +695,15 @@ public void testCheckAndDelete() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               List columnValuesA = new ArrayList<>(1);
          -    TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -      wrap(valueAname));
          +    TColumnValue columnValueA =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               columnValuesA.add(columnValueA);
               TPut putA = new TPut(wrap(rowName), columnValuesA);
               putA.setColumnValues(columnValuesA);
           
               List columnValuesB = new ArrayList<>(1);
          -    TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname),
          -      wrap(valueBname));
          +    TColumnValue columnValueB =
          +        new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname));
               columnValuesB.add(columnValueB);
               TPut putB = new TPut(wrap(rowName), columnValuesB);
               putB.setColumnValues(columnValuesB);
          @@ -715,7 +714,7 @@ public void testCheckAndDelete() throws Exception {
               TDelete delete = new TDelete(wrap(rowName));
           
               assertFalse(handler.checkAndDelete(table, wrap(rowName), wrap(familyAname),
          -        wrap(qualifierAname), wrap(valueAname), delete));
          +      wrap(qualifierAname), wrap(valueAname), delete));
           
               TGet get = new TGet(wrap(rowName));
               TResult result = handler.get(table, get);
          @@ -724,8 +723,8 @@ public void testCheckAndDelete() throws Exception {
           
               handler.put(table, putA);
           
          -    assertTrue(handler.checkAndDelete(table, wrap(rowName), wrap(familyAname),
          -      wrap(qualifierAname), wrap(valueAname), delete));
          +    assertTrue(handler.checkAndDelete(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname),
          +      wrap(valueAname), delete));
           
               result = handler.get(table, get);
               assertFalse(result.isSetRow());
          @@ -738,8 +737,8 @@ public void testScan() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               // insert data
          -    TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -      wrap(valueAname));
          +    TColumnValue columnValue =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               List columnValues = new ArrayList<>(1);
               columnValues.add(columnValue);
               for (int i = 0; i < 10; i++) {
          @@ -784,13 +783,14 @@ public void testScan() throws Exception {
              * Tests keeping a HBase scanner alive for long periods of time. Each call to getScannerRow()
              * should reset the ConnectionCache timeout for the scanner's connection.
              */
          -  @org.junit.Ignore @Test // Flakey. Diasabled by HBASE-24079. Renable with Fails with HBASE-24083.
          -  //  Caused by: java.util.concurrent.RejectedExecutionException:
          -  //  Task org.apache.hadoop.hbase.client.ResultBoundedCompletionService$QueueingFuture@e385431
          -  //  rejected from java.util.concurrent.ThreadPoolExecutor@   52b027d[Terminated, pool size = 0,
          -  //  active threads = 0, queued tasks = 0, completed tasks = 1]
          -  //  at org.apache.hadoop.hbase.thrift2.TestThriftHBaseServiceHandler.
          -  //  testLongLivedScan(TestThriftHBaseServiceHandler.java:804)
          +  @org.junit.Ignore
          +  @Test // Flakey. Diasabled by HBASE-24079. Renable with Fails with HBASE-24083.
          +  // Caused by: java.util.concurrent.RejectedExecutionException:
          +  // Task org.apache.hadoop.hbase.client.ResultBoundedCompletionService$QueueingFuture@e385431
          +  // rejected from java.util.concurrent.ThreadPoolExecutor@ 52b027d[Terminated, pool size = 0,
          +  // active threads = 0, queued tasks = 0, completed tasks = 1]
          +  // at org.apache.hadoop.hbase.thrift2.TestThriftHBaseServiceHandler.
          +  // testLongLivedScan(TestThriftHBaseServiceHandler.java:804)
             public void testLongLivedScan() throws Exception {
               int numTrials = 6;
               int trialPause = 1000;
          @@ -799,13 +799,13 @@ public void testLongLivedScan() throws Exception {
               // Set the ConnectionCache timeout to trigger halfway through the trials
               conf.setInt(MAX_IDLETIME, (numTrials / 2) * trialPause);
               conf.setInt(CLEANUP_INTERVAL, cleanUpInterval);
          -    ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(conf,
          -        UserProvider.instantiate(conf));
          +    ThriftHBaseServiceHandler handler =
          +        new ThriftHBaseServiceHandler(conf, UserProvider.instantiate(conf));
           
               ByteBuffer table = wrap(tableAname);
               // insert data
          -    TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -        wrap(valueAname));
          +    TColumnValue columnValue =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               List columnValues = new ArrayList<>(1);
               columnValues.add(columnValue);
               for (int i = 0; i < numTrials; i++) {
          @@ -842,8 +842,8 @@ public void testReverseScan() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               // insert data
          -    TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -      wrap(valueAname));
          +    TColumnValue columnValue =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               List columnValues = new ArrayList<>(1);
               columnValues.add(columnValue);
               for (int i = 0; i < 10; i++) {
          @@ -891,8 +891,8 @@ public void testScanWithFilter() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               // insert data
          -    TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -      wrap(valueAname));
          +    TColumnValue columnValue =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               List columnValues = new ArrayList<>(1);
               columnValues.add(columnValue);
               for (int i = 0; i < 10; i++) {
          @@ -943,10 +943,10 @@ public void testScanWithColumnFamilyTimeRange() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               // insert data
          -    TColumnValue familyAColumnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -        wrap(valueAname));
          -    TColumnValue familyBColumnValue = new TColumnValue(wrap(familyBname), wrap(qualifierBname),
          -        wrap(valueBname));
          +    TColumnValue familyAColumnValue =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
          +    TColumnValue familyBColumnValue =
          +        new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname));
               long minTimestamp = EnvironmentEdgeManager.currentTime();
               for (int i = 0; i < 10; i++) {
                 familyAColumnValue.setTimestamp(minTimestamp + i);
          @@ -954,14 +954,14 @@ public void testScanWithColumnFamilyTimeRange() throws Exception {
                 List columnValues = new ArrayList<>(2);
                 columnValues.add(familyAColumnValue);
                 columnValues.add(familyBColumnValue);
          -      TPut put = new TPut(wrap(Bytes.toBytes("testScanWithColumnFamilyTimeRange" + i)),
          -          columnValues);
          +      TPut put =
          +          new TPut(wrap(Bytes.toBytes("testScanWithColumnFamilyTimeRange" + i)), columnValues);
                 handler.put(table, put);
               }
           
               // create scan instance with column family time range
               TScan scan = new TScan();
          -    Map colFamTimeRangeMap = new HashMap<>(2);
          +    Map colFamTimeRangeMap = new HashMap<>(2);
               colFamTimeRangeMap.put(wrap(familyAname), new TTimeRange(minTimestamp + 3, minTimestamp + 5));
               colFamTimeRangeMap.put(wrap(familyBname), new TTimeRange(minTimestamp + 6, minTimestamp + 9));
               scan.setColFamTimeRangeMap(colFamTimeRangeMap);
          @@ -1004,8 +1004,8 @@ public void testSmallScan() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               // insert data
          -    TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -            wrap(valueAname));
          +    TColumnValue columnValue =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               List columnValues = new ArrayList<>();
               columnValues.add(columnValue);
               for (int i = 0; i < 10; i++) {
          @@ -1047,7 +1047,7 @@ public void testExpiredScanner() throws Exception {
               Configuration conf = UTIL.getConfiguration();
               conf.setLong(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 1000);
               ThriftHBaseServiceHandler handler =
          -      new ThriftHBaseServiceHandler(conf, UserProvider.instantiate(conf));
          +        new ThriftHBaseServiceHandler(conf, UserProvider.instantiate(conf));
           
               TScan scan = new TScan();
               ByteBuffer table = wrap(tableAname);
          @@ -1075,12 +1075,8 @@ public void testPutTTL() throws Exception {
               List columnValues = new ArrayList<>(1);
           
               // Add some dummy data
          -    columnValues.add(
          -        new TColumnValue(
          -            wrap(familyAname),
          -            wrap(qualifierAname),
          -            wrap(Bytes.toBytes(1L))));
          -
          +    columnValues
          +        .add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(Bytes.toBytes(1L))));
           
               TPut put = new TPut(wrap(rowName), columnValues);
               put.setColumnValues(columnValues);
          @@ -1111,7 +1107,6 @@ public void testPutTTL() throws Exception {
               TGet getTwo = new TGet(wrap(rowName));
               TResult resultTwo = handler.get(table, getTwo);
           
          -
               // Nothing should be there since it's ttl'd out.
               assertNull(resultTwo.getRow());
               assertEquals(0, resultTwo.getColumnValuesSize());
          @@ -1119,9 +1114,8 @@ public void testPutTTL() throws Exception {
           
             /**
              * Padding numbers to make comparison of sort order easier in a for loop
          -   *
          -   * @param n  The number to pad.
          -   * @param pad  The length to pad up to.
          +   * @param n The number to pad.
          +   * @param pad The length to pad up to.
              * @return The padded number as a string.
              */
             private String pad(int n, byte pad) {
          @@ -1142,7 +1136,7 @@ public void testScanWithBatchSize() throws Exception {
               for (int i = 0; i < 100; i++) {
                 String colNum = pad(i, (byte) 3);
                 TColumnValue columnValue = new TColumnValue(wrap(familyAname),
          -        wrap(Bytes.toBytes("col" + colNum)), wrap(Bytes.toBytes("val" + colNum)));
          +          wrap(Bytes.toBytes("col" + colNum)), wrap(Bytes.toBytes("val" + colNum)));
                 columnValues.add(columnValue);
               }
               TPut put = new TPut(wrap(Bytes.toBytes("testScanWithBatchSize")), columnValues);
          @@ -1223,8 +1217,8 @@ public void testGetScannerResults() throws Exception {
               assertEquals(5, results.size());
               for (int i = 0; i < 5; i++) {
                 // check if the rows are returned and in order
          -      assertArrayEquals(Bytes.toBytes("testGetScannerResults" + pad(i, (byte) 2)), results.get(i)
          -          .getRow());
          +      assertArrayEquals(Bytes.toBytes("testGetScannerResults" + pad(i, (byte) 2)),
          +        results.get(i).getRow());
               }
           
               // get 10 rows and check the returned results
          @@ -1233,8 +1227,8 @@ public void testGetScannerResults() throws Exception {
               assertEquals(10, results.size());
               for (int i = 0; i < 10; i++) {
                 // check if the rows are returned and in order
          -      assertArrayEquals(Bytes.toBytes("testGetScannerResults" + pad(i, (byte) 2)), results.get(i)
          -          .getRow());
          +      assertArrayEquals(Bytes.toBytes("testGetScannerResults" + pad(i, (byte) 2)),
          +        results.get(i).getRow());
               }
           
               // get 20 rows and check the returned results
          @@ -1243,8 +1237,8 @@ public void testGetScannerResults() throws Exception {
               assertEquals(20, results.size());
               for (int i = 0; i < 20; i++) {
                 // check if the rows are returned and in order
          -      assertArrayEquals(Bytes.toBytes("testGetScannerResults" + pad(i, (byte) 2)), results.get(i)
          -          .getRow());
          +      assertArrayEquals(Bytes.toBytes("testGetScannerResults" + pad(i, (byte) 2)),
          +        results.get(i).getRow());
               }
           
               // reverse scan
          @@ -1258,7 +1252,7 @@ public void testGetScannerResults() throws Exception {
               for (int i = 0; i < 20; i++) {
                 // check if the rows are returned and in order
                 assertArrayEquals(Bytes.toBytes("testGetScannerResults" + pad(19 - i, (byte) 2)),
          -          results.get(i).getRow());
          +        results.get(i).getRow());
               }
             }
           
          @@ -1276,8 +1270,7 @@ public void testMetrics() throws Exception {
               Configuration conf = UTIL.getConfiguration();
               ThriftMetrics metrics = getMetrics(conf);
               ThriftHBaseServiceHandler hbaseHandler = createHandler();
          -    THBaseService.Iface handler =
          -        HbaseHandlerMetricsProxy.newInstance(hbaseHandler, metrics,  conf);
          +    THBaseService.Iface handler = HbaseHandlerMetricsProxy.newInstance(hbaseHandler, metrics, conf);
               byte[] rowName = Bytes.toBytes("testMetrics");
               ByteBuffer table = wrap(tableAname);
           
          @@ -1286,7 +1279,7 @@ public void testMetrics() throws Exception {
           
               List columnValues = new ArrayList<>(2);
               columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
          -    columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname),  wrap(valueBname)));
          +    columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)));
               TPut put = new TPut(wrap(rowName), columnValues);
               put.setColumnValues(columnValues);
           
          @@ -1299,7 +1292,7 @@ public void testMetrics() throws Exception {
           
             private static ThriftMetrics getMetrics(Configuration conf) throws Exception {
               ThriftMetrics m = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.TWO);
          -    m.getSource().init(); //Clear all the metrics
          +    m.getSource().init(); // Clear all the metrics
               return m;
             }
           
          @@ -1311,28 +1304,26 @@ public void testMetricsWithException() throws Exception {
               // create a table which will throw exceptions for requests
               TableName tableName = TableName.valueOf(name.getMethodName());
               TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
          -      .setCoprocessor(ErrorThrowingGetObserver.class.getName())
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
          +        .setCoprocessor(ErrorThrowingGetObserver.class.getName())
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
           
               Table table = UTIL.createTable(tableDesc, null);
               table.put(new Put(rowkey).addColumn(family, col, Bytes.toBytes("val1")));
           
               ThriftHBaseServiceHandler hbaseHandler = createHandler();
               ThriftMetrics metrics = getMetrics(UTIL.getConfiguration());
          -    THBaseService.Iface handler =
          -        HbaseHandlerMetricsProxy.newInstance(hbaseHandler, metrics, null);
          +    THBaseService.Iface handler = HbaseHandlerMetricsProxy.newInstance(hbaseHandler, metrics, null);
               ByteBuffer tTableName = wrap(tableName.getName());
           
               // check metrics increment with a successful get
          -    long preGetCounter = metricsHelper.checkCounterExists("get_num_ops", metrics.getSource()) ?
          -        metricsHelper.getCounter("get_num_ops", metrics.getSource()) :
          -        0;
          +    long preGetCounter = metricsHelper.checkCounterExists("get_num_ops", metrics.getSource())
          +        ? metricsHelper.getCounter("get_num_ops", metrics.getSource())
          +        : 0;
               TGet tGet = new TGet(wrap(rowkey));
               TResult tResult = handler.get(tTableName, tGet);
           
          -    List expectedColumnValues = Lists.newArrayList(
          -        new TColumnValue(wrap(family), wrap(col), wrap(Bytes.toBytes("val1")))
          -    );
          +    List expectedColumnValues =
          +        Lists.newArrayList(new TColumnValue(wrap(family), wrap(col), wrap(Bytes.toBytes("val1"))));
               assertArrayEquals(rowkey, tResult.getRow());
               List returnedColumnValues = tResult.getColumnValues();
               assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues);
          @@ -1349,13 +1340,13 @@ private void testExceptionType(THBaseService.Iface handler, ThriftMetrics metric
                 ByteBuffer tTableName, byte[] rowkey, ErrorThrowingGetObserver.ErrorType errorType) {
               long preGetCounter = metricsHelper.getCounter("get_num_ops", metrics.getSource());
               String exceptionKey = errorType.getMetricName();
          -    long preExceptionCounter = metricsHelper.checkCounterExists(exceptionKey, metrics.getSource()) ?
          -        metricsHelper.getCounter(exceptionKey, metrics.getSource()) :
          -        0;
          +    long preExceptionCounter = metricsHelper.checkCounterExists(exceptionKey, metrics.getSource())
          +        ? metricsHelper.getCounter(exceptionKey, metrics.getSource())
          +        : 0;
               TGet tGet = new TGet(wrap(rowkey));
               Map attributes = new HashMap<>();
               attributes.put(wrap(Bytes.toBytes(ErrorThrowingGetObserver.SHOULD_ERROR_ATTRIBUTE)),
          -        wrap(Bytes.toBytes(errorType.name())));
          +      wrap(Bytes.toBytes(errorType.name())));
               tGet.setAttributes(attributes);
               try {
                 TResult tResult = handler.get(tTableName, tGet);
          @@ -1369,10 +1360,8 @@ private void testExceptionType(THBaseService.Iface handler, ThriftMetrics metric
             }
           
             /**
          -   * See HBASE-17611
          -   *
          -   * Latency metrics were capped at ~ 2 seconds due to the use of an int variable to capture the
          -   * duration.
          +   * See HBASE-17611 Latency metrics were capped at ~ 2 seconds due to the use of an int variable to
          +   * capture the duration.
              */
             @Test
             public void testMetricsPrecision() throws Exception {
          @@ -1382,8 +1371,8 @@ public void testMetricsPrecision() throws Exception {
               // create a table which will throw exceptions for requests
               TableName tableName = TableName.valueOf("testMetricsPrecision");
               TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName)
          -      .setCoprocessor(DelayingRegionObserver.class.getName())
          -      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
          +        .setCoprocessor(DelayingRegionObserver.class.getName())
          +        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
           
               Table table = null;
               try {
          @@ -1401,9 +1390,8 @@ public void testMetricsPrecision() throws Exception {
                 TGet tGet = new TGet(wrap(rowkey));
                 TResult tResult = handler.get(tTableName, tGet);
           
          -      List expectedColumnValues = Lists.newArrayList(
          -          new TColumnValue(wrap(family), wrap(col), wrap(Bytes.toBytes("val1")))
          -      );
          +      List expectedColumnValues = Lists
          +          .newArrayList(new TColumnValue(wrap(family), wrap(col), wrap(Bytes.toBytes("val1"))));
                 assertArrayEquals(rowkey, tResult.getRow());
                 List returnedColumnValues = tResult.getColumnValues();
                 assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues);
          @@ -1420,7 +1408,6 @@ public void testMetricsPrecision() throws Exception {
               }
             }
           
          -
             @Test
             public void testAttribute() throws Exception {
               byte[] rowName = Bytes.toBytes("testAttribute");
          @@ -1436,7 +1423,7 @@ public void testAttribute() throws Exception {
           
               List columnValues = new ArrayList<>(1);
               columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
          -    TPut tPut = new TPut(wrap(rowName) , columnValues);
          +    TPut tPut = new TPut(wrap(rowName), columnValues);
               tPut.setAttributes(attributes);
               Put put = putFromThrift(tPut);
               assertArrayEquals(put.getAttribute("attribute1"), attributeValue);
          @@ -1470,13 +1457,13 @@ public void testMutateRow() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               List columnValuesA = new ArrayList<>(1);
          -    TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -        wrap(valueAname));
          +    TColumnValue columnValueA =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               columnValuesA.add(columnValueA);
               TPut putA = new TPut(wrap(rowName), columnValuesA);
               putA.setColumnValues(columnValuesA);
           
          -    handler.put(table,putA);
          +    handler.put(table, putA);
           
               TGet get = new TGet(wrap(rowName));
               TResult result = handler.get(table, get);
          @@ -1488,8 +1475,8 @@ public void testMutateRow() throws Exception {
               assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues);
           
               List columnValuesB = new ArrayList<>(1);
          -    TColumnValue columnValueB = new TColumnValue(wrap(familyAname), wrap(qualifierBname),
          -        wrap(valueBname));
          +    TColumnValue columnValueB =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierBname), wrap(valueBname));
               columnValuesB.add(columnValueB);
               TPut putB = new TPut(wrap(rowName), columnValuesB);
               putB.setColumnValues(columnValuesB);
          @@ -1508,8 +1495,8 @@ public void testMutateRow() throws Exception {
               TMutation mutationB = TMutation.deleteSingle(delete);
               mutations.add(mutationB);
           
          -    TRowMutations tRowMutations = new TRowMutations(wrap(rowName),mutations);
          -    handler.mutateRow(table,tRowMutations);
          +    TRowMutations tRowMutations = new TRowMutations(wrap(rowName), mutations);
          +    handler.mutateRow(table, tRowMutations);
           
               result = handler.get(table, get);
               assertArrayEquals(rowName, result.getRow());
          @@ -1521,9 +1508,9 @@ public void testMutateRow() throws Exception {
             }
           
             /**
          -   * Create TPut, TDelete , TIncrement objects, set durability then call ThriftUtility
          -   * functions to get Put , Delete and Increment respectively. Use getDurability to make sure
          -   * the returned objects have the appropriate durability setting.
          +   * Create TPut, TDelete , TIncrement objects, set durability then call ThriftUtility functions to
          +   * get Put , Delete and Increment respectively. Use getDurability to make sure the returned
          +   * objects have the appropriate durability setting.
              */
             @Test
             public void testDurability() throws Exception {
          @@ -1603,17 +1590,16 @@ public void testCheckAndMutate() throws Exception {
               TPut putB = new TPut(row, columnValuesB);
               putB.setColumnValues(columnValuesB);
           
          -    TRowMutations tRowMutations = new TRowMutations(row,
          -        Arrays. asList(TMutation.put(putB)));
          +    TRowMutations tRowMutations =
          +        new TRowMutations(row, Arrays. asList(TMutation.put(putB)));
           
               // Empty table when we begin
               TResult result = handler.get(table, new TGet(row));
               assertEquals(0, result.getColumnValuesSize());
           
               // checkAndMutate -- condition should fail because the value doesn't exist.
          -    assertFalse("Expected condition to not pass",
          -        handler.checkAndMutate(table, row, family, qualifier, TCompareOperator.EQUAL, value,
          -            tRowMutations));
          +    assertFalse("Expected condition to not pass", handler.checkAndMutate(table, row, family,
          +      qualifier, TCompareOperator.EQUAL, value, tRowMutations));
           
               List columnValuesA = new ArrayList<>(1);
               TColumnValue columnValueA = new TColumnValue(family, qualifier, value);
          @@ -1628,9 +1614,8 @@ public void testCheckAndMutate() throws Exception {
               assertTColumnValueEqual(columnValueA, result.getColumnValues().get(0));
           
               // checkAndMutate -- condition should pass since we added the value
          -    assertTrue("Expected condition to pass",
          -        handler.checkAndMutate(table, row, family, qualifier, TCompareOperator.EQUAL, value,
          -            tRowMutations));
          +    assertTrue("Expected condition to pass", handler.checkAndMutate(table, row, family, qualifier,
          +      TCompareOperator.EQUAL, value, tRowMutations));
           
               result = handler.get(table, new TGet(row));
               assertEquals(2, result.getColumnValuesSize());
          @@ -1677,23 +1662,23 @@ public void testDDLOpertions() throws Exception {
               tTableName.setNs(Bytes.toBytes(namespace));
               tTableName.setQualifier(Bytes.toBytes(table));
               ThriftHBaseServiceHandler handler = createHandler();
          -    //create name space
          +    // create name space
               TNamespaceDescriptor namespaceDescriptor = new TNamespaceDescriptor();
               namespaceDescriptor.setName(namespace);
               namespaceDescriptor.putToConfiguration("key1", "value1");
               namespaceDescriptor.putToConfiguration("key2", "value2");
               handler.createNamespace(namespaceDescriptor);
          -    //list namespace
          +    // list namespace
               List namespaceDescriptors = handler.listNamespaceDescriptors();
               // should have 3 namespace, default hbase and testDDLOpertionsNamespace
               assertTrue(namespaceDescriptors.size() == 3);
          -    //modify namesapce
          +    // modify namesapce
               namespaceDescriptor.putToConfiguration("kye3", "value3");
               handler.modifyNamespace(namespaceDescriptor);
          -    //get namespace
          +    // get namespace
               TNamespaceDescriptor namespaceDescriptorReturned = handler.getNamespaceDescriptor(namespace);
               assertTrue(namespaceDescriptorReturned.getConfiguration().size() == 3);
          -    //create table
          +    // create table
               TTableDescriptor tableDescriptor = new TTableDescriptor();
               tableDescriptor.setTableName(tTableName);
               TColumnFamilyDescriptor columnFamilyDescriptor1 = new TColumnFamilyDescriptor();
          @@ -1703,44 +1688,44 @@ public void testDDLOpertions() throws Exception {
               List splitKeys = new ArrayList<>();
               splitKeys.add(ByteBuffer.wrap(Bytes.toBytes(5)));
               handler.createTable(tableDescriptor, splitKeys);
          -    //modify table
          +    // modify table
               tableDescriptor.setDurability(TDurability.ASYNC_WAL);
               handler.modifyTable(tableDescriptor);
          -    //modify column family
          +    // modify column family
               columnFamilyDescriptor1.setInMemory(true);
               handler.modifyColumnFamily(tTableName, columnFamilyDescriptor1);
          -    //add column family
          +    // add column family
               TColumnFamilyDescriptor columnFamilyDescriptor2 = new TColumnFamilyDescriptor();
               columnFamilyDescriptor2.setName(familyBname);
               columnFamilyDescriptor2.setDataBlockEncoding(TDataBlockEncoding.PREFIX);
               handler.addColumnFamily(tTableName, columnFamilyDescriptor2);
          -    //get table descriptor
          +    // get table descriptor
               TTableDescriptor tableDescriptorReturned = handler.getTableDescriptor(tTableName);
               assertTrue(tableDescriptorReturned.getColumns().size() == 2);
          -    assertTrue(tableDescriptorReturned.getDurability() ==  TDurability.ASYNC_WAL);
          +    assertTrue(tableDescriptorReturned.getDurability() == TDurability.ASYNC_WAL);
               TColumnFamilyDescriptor columnFamilyDescriptor1Returned = tableDescriptorReturned.getColumns()
                   .stream().filter(desc -> Bytes.equals(desc.getName(), familyAname)).findFirst().get();
               assertTrue(columnFamilyDescriptor1Returned.isInMemory() == true);
          -    //delete column family
          +    // delete column family
               handler.deleteColumnFamily(tTableName, ByteBuffer.wrap(familyBname));
               tableDescriptorReturned = handler.getTableDescriptor(tTableName);
               assertTrue(tableDescriptorReturned.getColumns().size() == 1);
          -    //disable table
          +    // disable table
               handler.disableTable(tTableName);
               assertTrue(handler.isTableDisabled(tTableName));
          -    //enable table
          +    // enable table
               handler.enableTable(tTableName);
               assertTrue(handler.isTableEnabled(tTableName));
               assertTrue(handler.isTableAvailable(tTableName));
          -    //truncate table
          +    // truncate table
               handler.disableTable(tTableName);
               handler.truncateTable(tTableName, true);
               assertTrue(handler.isTableAvailable(tTableName));
          -    //delete table
          +    // delete table
               handler.disableTable(tTableName);
               handler.deleteTable(tTableName);
               assertFalse(handler.tableExists(tTableName));
          -    //delete namespace
          +    // delete namespace
               handler.deleteNamespace(namespace);
               namespaceDescriptors = handler.listNamespaceDescriptors();
               // should have 2 namespace, default and hbase
          @@ -1776,8 +1761,8 @@ public void testGetThriftServerOneType() throws Exception {
           
               LOG.info("Starting HBase Thrift server One");
               THRIFT_TEST_UTIL.startThriftServer(UTIL.getConfiguration(), ThriftServerType.ONE);
          -    try (TTransport transport = new TSocket(InetAddress.getLocalHost().getHostName(),
          -        THRIFT_TEST_UTIL.getServerPort())){
          +    try (TTransport transport =
          +        new TSocket(InetAddress.getLocalHost().getHostName(), THRIFT_TEST_UTIL.getServerPort())) {
                 TProtocol protocol = new TBinaryProtocol(transport);
                 // This is our thrift2 client.
                 THBaseService.Iface client = new THBaseService.Client(protocol);
          @@ -1799,13 +1784,11 @@ public void testSlowLogResponses() throws Exception {
           
               THRIFT_TEST_UTIL.startThriftServer(configuration, ThriftServerType.ONE);
               ThriftHBaseServiceHandler thriftHBaseServiceHandler =
          -      new ThriftHBaseServiceHandler(configuration,
          -        UserProvider.instantiate(configuration));
          +        new ThriftHBaseServiceHandler(configuration, UserProvider.instantiate(configuration));
               Collection serverNames = UTIL.getAdmin().getRegionServers();
               Set tServerNames =
          -      ThriftUtilities.getServerNamesFromHBase(new HashSet<>(serverNames));
          -    List clearedResponses =
          -      thriftHBaseServiceHandler.clearSlowLogResponses(tServerNames);
          +        ThriftUtilities.getServerNamesFromHBase(new HashSet<>(serverNames));
          +    List clearedResponses = thriftHBaseServiceHandler.clearSlowLogResponses(tServerNames);
               clearedResponses.forEach(Assert::assertTrue);
               TLogQueryFilter tLogQueryFilter = new TLogQueryFilter();
               tLogQueryFilter.setLimit(15);
          @@ -1816,7 +1799,7 @@ public void testSlowLogResponses() throws Exception {
               logQueryFilter = ThriftUtilities.getSlowLogQueryFromThrift(tLogQueryFilter);
               Assert.assertEquals(logQueryFilter.getFilterByOperator(), LogQueryFilter.FilterByOperator.AND);
               List tLogRecords =
          -      thriftHBaseServiceHandler.getSlowLogResponses(tServerNames, tLogQueryFilter);
          +        thriftHBaseServiceHandler.getSlowLogResponses(tServerNames, tLogQueryFilter);
               assertEquals(tLogRecords.size(), 0);
             }
           
          @@ -1833,8 +1816,8 @@ public void testPerformTablePermissions() throws Throwable {
               ThriftHBaseServiceHandler handler = createHandler();
               handler.grant(tce);
           
          -    List permissionList = AccessControlClient.getUserPermissions(UTIL.getConnection(),
          -        Bytes.toString(tableAname), fakeUser);
          +    List permissionList = AccessControlClient
          +        .getUserPermissions(UTIL.getConnection(), Bytes.toString(tableAname), fakeUser);
               // we only grant one R permission
               assertEquals(permissionList.size(), 1);
           
          @@ -1851,7 +1834,6 @@ public void testPerformTablePermissions() throws Throwable {
               assertEquals(0, permissionList.size());
             }
           
          -
             @Test
             public void testPerformNamespacePermissions() throws Throwable {
               // initialize fake objects. We test the permission grant and revoke on default NS.
          @@ -1866,8 +1848,8 @@ public void testPerformNamespacePermissions() throws Throwable {
               ThriftHBaseServiceHandler handler = createHandler();
               handler.grant(tce);
           
          -    List permissionList = AccessControlClient.getUserPermissions(UTIL.getConnection(),
          -      "@" + defaultNameSpace, fakeUser);
          +    List permissionList = AccessControlClient
          +        .getUserPermissions(UTIL.getConnection(), "@" + defaultNameSpace, fakeUser);
           
               // we only grant one R permission
               assertEquals(permissionList.size(), 1);
          @@ -1897,13 +1879,12 @@ public Optional getRegionObserver() {
           
               @Override
               public void start(CoprocessorEnvironment e) throws IOException {
          -      this.delayMillis = e.getConfiguration()
          -          .getLong("delayingregionobserver.delay", 3000);
          +      this.delayMillis = e.getConfiguration().getLong("delayingregionobserver.delay", 3000);
               }
           
               @Override
               public void preGetOp(ObserverContext e, Get get,
          -                         List results) throws IOException {
          +        List results) throws IOException {
                 try {
                   long start = EnvironmentEdgeManager.currentTime();
                   TimeUnit.MILLISECONDS.sleep(delayMillis);
          @@ -1916,4 +1897,3 @@ public void preGetOp(ObserverContext e, Get get,
               }
             }
           }
          -
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java
          index eceaf482b7e5..018ec71ba191 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -77,15 +77,15 @@
           
           import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
           
          -@Category({ClientTests.class, MediumTests.class})
          +@Category({ ClientTests.class, MediumTests.class })
           public class TestThriftHBaseServiceHandlerWithLabels {
           
             @ClassRule
             public static final HBaseClassTestRule CLASS_RULE =
                 HBaseClassTestRule.forClass(TestThriftHBaseServiceHandlerWithLabels.class);
           
          -  private static final Logger LOG = LoggerFactory
          -    .getLogger(TestThriftHBaseServiceHandlerWithLabels.class);
          +  private static final Logger LOG =
          +      LoggerFactory.getLogger(TestThriftHBaseServiceHandlerWithLabels.class);
             private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
           
             // Static names for tables, columns, rows, and values
          @@ -97,8 +97,8 @@ public class TestThriftHBaseServiceHandlerWithLabels {
             private static byte[] valueAname = Bytes.toBytes("valueA");
             private static byte[] valueBname = Bytes.toBytes("valueB");
             private static ColumnFamilyDescriptor[] families = new ColumnFamilyDescriptor[] {
          -    ColumnFamilyDescriptorBuilder.newBuilder(familyAname).setMaxVersions(3).build(),
          -    ColumnFamilyDescriptorBuilder.newBuilder(familyBname).setMaxVersions(2).build() };
          +      ColumnFamilyDescriptorBuilder.newBuilder(familyAname).setMaxVersions(3).build(),
          +      ColumnFamilyDescriptorBuilder.newBuilder(familyBname).setMaxVersions(2).build() };
           
             private final static String TOPSECRET = "topsecret";
             private final static String PUBLIC = "public";
          @@ -116,7 +116,7 @@ public void assertTColumnValuesEqual(List columnValuesA,
                 @Override
                 public int compare(TColumnValue o1, TColumnValue o2) {
                   return Bytes.compareTo(Bytes.add(o1.getFamily(), o1.getQualifier()),
          -            Bytes.add(o2.getFamily(), o2.getQualifier()));
          +          Bytes.add(o2.getFamily(), o2.getQualifier()));
                 }
               };
               Collections.sort(columnValuesA, comparator);
          @@ -133,11 +133,10 @@ public int compare(TColumnValue o1, TColumnValue o2) {
           
             @BeforeClass
             public static void beforeClass() throws Exception {
          -    SUPERUSER = User.createUserForTesting(conf, "admin",
          -        new String[] { "supergroup" });
          +    SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" });
               conf = UTIL.getConfiguration();
          -    conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS,
          -        SimpleScanLabelGenerator.class, ScanLabelGenerator.class);
          +    conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class,
          +      ScanLabelGenerator.class);
               conf.set("hbase.superuser", SUPERUSER.getShortName());
               VisibilityTestUtil.enableVisiblityLabels(conf);
               UTIL.startMiniCluster(1);
          @@ -145,8 +144,9 @@ public static void beforeClass() throws Exception {
               UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000);
               createLabels();
               Admin admin = UTIL.getAdmin();
          -    TableDescriptor tableDescriptor = TableDescriptorBuilder
          -      .newBuilder(TableName.valueOf(tableAname)).setColumnFamilies(Arrays.asList(families)).build();
          +    TableDescriptor tableDescriptor =
          +        TableDescriptorBuilder.newBuilder(TableName.valueOf(tableAname))
          +            .setColumnFamilies(Arrays.asList(families)).build();
               admin.createTable(tableDescriptor);
               admin.close();
               setAuths();
          @@ -155,17 +155,17 @@ public static void beforeClass() throws Exception {
             private static void createLabels() throws IOException, InterruptedException {
               PrivilegedExceptionAction action =
                   new PrivilegedExceptionAction() {
          -      @Override
          -      public VisibilityLabelsResponse run() throws Exception {
          -        String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, PUBLIC, TOPSECRET };
          -        try (Connection conn = ConnectionFactory.createConnection(conf)) {
          -          VisibilityClient.addLabels(conn, labels);
          -        } catch (Throwable t) {
          -          throw new IOException(t);
          -        }
          -        return null;
          -      }
          -    };
          +          @Override
          +          public VisibilityLabelsResponse run() throws Exception {
          +            String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, PUBLIC, TOPSECRET };
          +            try (Connection conn = ConnectionFactory.createConnection(conf)) {
          +              VisibilityClient.addLabels(conn, labels);
          +            } catch (Throwable t) {
          +              throw new IOException(t);
          +            }
          +            return null;
          +          }
          +        };
               SUPERUSER.runAs(action);
             }
           
          @@ -198,8 +198,8 @@ public void testScanWithVisibilityLabels() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               // insert data
          -    TColumnValue columnValue = new TColumnValue(wrap(familyAname),
          -        wrap(qualifierAname), wrap(valueAname));
          +    TColumnValue columnValue =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               List columnValues = new ArrayList<>(1);
               columnValues.add(columnValue);
               for (int i = 0; i < 10; i++) {
          @@ -207,8 +207,8 @@ public void testScanWithVisibilityLabels() throws Exception {
                 if (i == 5) {
                   put.setCellVisibility(new TCellVisibility().setExpression(PUBLIC));
                 } else {
          -        put.setCellVisibility(new TCellVisibility().setExpression("(" + SECRET
          -            + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET));
          +        put.setCellVisibility(new TCellVisibility()
          +            .setExpression("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET));
                 }
                 handler.put(table, put);
               }
          @@ -241,8 +241,7 @@ public void testScanWithVisibilityLabels() throws Exception {
                 } else if (i == 5) {
                   continue;
                 } else {
          -        assertArrayEquals(Bytes.toBytes("testScan" + (i + 1)), results.get(i)
          -            .getRow());
          +        assertArrayEquals(Bytes.toBytes("testScan" + (i + 1)), results.get(i).getRow());
                 }
               }
           
          @@ -265,18 +264,18 @@ public void testGetScannerResultsWithAuthorizations() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               // insert data
          -    TColumnValue columnValue = new TColumnValue(wrap(familyAname),
          -        wrap(qualifierAname), wrap(valueAname));
          +    TColumnValue columnValue =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               List columnValues = new ArrayList<>(1);
               columnValues.add(columnValue);
               for (int i = 0; i < 20; i++) {
          -      TPut put = new TPut(
          -          wrap(Bytes.toBytes("testGetScannerResults" + pad(i, (byte) 2))), columnValues);
          +      TPut put =
          +          new TPut(wrap(Bytes.toBytes("testGetScannerResults" + pad(i, (byte) 2))), columnValues);
                 if (i == 3) {
                   put.setCellVisibility(new TCellVisibility().setExpression(PUBLIC));
                 } else {
          -        put.setCellVisibility(new TCellVisibility().setExpression("(" + SECRET
          -            + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET));
          +        put.setCellVisibility(new TCellVisibility()
          +            .setExpression("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET));
                 }
                 handler.put(table, put);
               }
          @@ -303,13 +302,13 @@ public void testGetScannerResultsWithAuthorizations() throws Exception {
               assertEquals(4, results.size());
               for (int i = 0; i < 4; i++) {
                 if (i < 3) {
          -        assertArrayEquals(
          -            Bytes.toBytes("testGetScannerResults" + pad(i, (byte) 2)), results.get(i).getRow());
          +        assertArrayEquals(Bytes.toBytes("testGetScannerResults" + pad(i, (byte) 2)),
          +          results.get(i).getRow());
                 } else if (i == 3) {
                   continue;
                 } else {
          -        assertArrayEquals(
          -            Bytes.toBytes("testGetScannerResults" + pad(i + 1, (byte) 2)), results.get(i).getRow());
          +        assertArrayEquals(Bytes.toBytes("testGetScannerResults" + pad(i + 1, (byte) 2)),
          +          results.get(i).getRow());
                 }
               }
             }
          @@ -321,15 +320,13 @@ public void testGetsWithLabels() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               List columnValues = new ArrayList<>(2);
          -    columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -        wrap(valueAname)));
          -    columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname),
          -        wrap(valueBname)));
          +    columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
          +    columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)));
               TPut put = new TPut(wrap(rowName), columnValues);
           
               put.setColumnValues(columnValues);
          -    put.setCellVisibility(new TCellVisibility().setExpression("(" + SECRET + "|"
          -        + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET));
          +    put.setCellVisibility(new TCellVisibility()
          +        .setExpression("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET));
               handler.put(table, put);
               TGet get = new TGet(wrap(rowName));
               TAuthorization tauth = new TAuthorization();
          @@ -351,16 +348,15 @@ public void testIncrementWithTags() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               List columnValues = new ArrayList<>(1);
          -    columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -        wrap(Bytes.toBytes(1L))));
          +    columnValues
          +        .add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(Bytes.toBytes(1L))));
               TPut put = new TPut(wrap(rowName), columnValues);
               put.setColumnValues(columnValues);
               put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE));
               handler.put(table, put);
           
               List incrementColumns = new ArrayList<>(1);
          -    incrementColumns.add(new TColumnIncrement(wrap(familyAname),
          -        wrap(qualifierAname)));
          +    incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname)));
               TIncrement increment = new TIncrement(wrap(rowName), incrementColumns);
               increment.setCellVisibility(new TCellVisibility().setExpression(SECRET));
               handler.increment(table, increment);
          @@ -386,16 +382,15 @@ public void testIncrementWithTagsWithNotMatchLabels() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               List columnValues = new ArrayList<>(1);
          -    columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -        wrap(Bytes.toBytes(1L))));
          +    columnValues
          +        .add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(Bytes.toBytes(1L))));
               TPut put = new TPut(wrap(rowName), columnValues);
               put.setColumnValues(columnValues);
               put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE));
               handler.put(table, put);
           
               List incrementColumns = new ArrayList<>(1);
          -    incrementColumns.add(new TColumnIncrement(wrap(familyAname),
          -        wrap(qualifierAname)));
          +    incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname)));
               TIncrement increment = new TIncrement(wrap(rowName), incrementColumns);
               increment.setCellVisibility(new TCellVisibility().setExpression(SECRET));
               handler.increment(table, increment);
          @@ -418,16 +413,15 @@ public void testAppend() throws Exception {
               byte[] v1 = Bytes.toBytes(1L);
               byte[] v2 = Bytes.toBytes(5L);
               List columnValues = new ArrayList<>(1);
          -    columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -        wrap(Bytes.toBytes(1L))));
          +    columnValues
          +        .add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(Bytes.toBytes(1L))));
               TPut put = new TPut(wrap(rowName), columnValues);
               put.setColumnValues(columnValues);
               put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE));
               handler.put(table, put);
           
               List appendColumns = new ArrayList<>(1);
          -    appendColumns.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -        wrap(v2)));
          +    appendColumns.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v2)));
               TAppend append = new TAppend(wrap(rowName), appendColumns);
               append.setCellVisibility(new TCellVisibility().setExpression(SECRET));
               handler.append(table, append);
          @@ -448,11 +442,8 @@ public void testAppend() throws Exception {
           
             /**
              * Padding numbers to make comparison of sort order easier in a for loop
          -   *
          -   * @param n
          -   *          The number to pad.
          -   * @param pad
          -   *          The length to pad up to.
          +   * @param n The number to pad.
          +   * @param pad The length to pad up to.
              * @return The padded number as a string.
              */
             private String pad(int n, byte pad) {
          diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithReadOnly.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithReadOnly.java
          index 7fa905d3557e..c63186450474 100644
          --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithReadOnly.java
          +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithReadOnly.java
          @@ -1,4 +1,4 @@
          -/**
          +/*
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -61,7 +61,7 @@
           import org.junit.Test;
           import org.junit.experimental.categories.Category;
           
          -@Category({ClientTests.class, MediumTests.class})
          +@Category({ ClientTests.class, MediumTests.class })
           public class TestThriftHBaseServiceHandlerWithReadOnly {
           
             @ClassRule
          @@ -79,8 +79,8 @@ public class TestThriftHBaseServiceHandlerWithReadOnly {
             private static byte[] valueAname = Bytes.toBytes("valueA");
             private static byte[] valueBname = Bytes.toBytes("valueB");
             private static ColumnFamilyDescriptor[] families = new ColumnFamilyDescriptor[] {
          -    ColumnFamilyDescriptorBuilder.newBuilder(familyAname).setMaxVersions(3).build(),
          -    ColumnFamilyDescriptorBuilder.newBuilder(familyBname).setMaxVersions(2).build() };
          +      ColumnFamilyDescriptorBuilder.newBuilder(familyAname).setMaxVersions(3).build(),
          +      ColumnFamilyDescriptorBuilder.newBuilder(familyBname).setMaxVersions(2).build() };
           
             @BeforeClass
             public static void beforeClass() throws Exception {
          @@ -88,8 +88,9 @@ public static void beforeClass() throws Exception {
               UTIL.getConfiguration().set("hbase.client.retries.number", "3");
               UTIL.startMiniCluster();
               Admin admin = UTIL.getAdmin();
          -    TableDescriptor tableDescriptor = TableDescriptorBuilder
          -      .newBuilder(TableName.valueOf(tableAname)).setColumnFamilies(Arrays.asList(families)).build();
          +    TableDescriptor tableDescriptor =
          +        TableDescriptorBuilder.newBuilder(TableName.valueOf(tableAname))
          +            .setColumnFamilies(Arrays.asList(families)).build();
               admin.createTable(tableDescriptor);
               admin.close();
             }
          @@ -221,23 +222,23 @@ public void testCheckAndPutWithReadOnly() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               List columnValuesA = new ArrayList<>(1);
          -    TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -        wrap(valueAname));
          +    TColumnValue columnValueA =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               columnValuesA.add(columnValueA);
               TPut putA = new TPut(wrap(rowName), columnValuesA);
               putA.setColumnValues(columnValuesA);
           
               List columnValuesB = new ArrayList<>(1);
          -    TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname),
          -        wrap(valueBname));
          +    TColumnValue columnValueB =
          +        new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname));
               columnValuesB.add(columnValueB);
               TPut putB = new TPut(wrap(rowName), columnValuesB);
               putB.setColumnValues(columnValuesB);
           
               boolean exceptionCaught = false;
               try {
          -      handler.checkAndPut(table, wrap(rowName), wrap(familyAname),
          -          wrap(qualifierAname), wrap(valueAname), putB);
          +      handler.checkAndPut(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname),
          +        wrap(valueAname), putB);
               } catch (TIOError e) {
                 exceptionCaught = true;
                 assertTrue(e.getCause() instanceof DoNotRetryIOException);
          @@ -331,13 +332,13 @@ public void testCheckAndMutateWithReadOnly() throws Exception {
               TPut putB = new TPut(row, columnValuesB);
               putB.setColumnValues(columnValuesB);
           
          -    TRowMutations tRowMutations = new TRowMutations(row,
          -        Arrays. asList(TMutation.put(putB)));
          +    TRowMutations tRowMutations =
          +        new TRowMutations(row, Arrays. asList(TMutation.put(putB)));
           
               boolean exceptionCaught = false;
               try {
                 handler.checkAndMutate(table, row, family, qualifier, TCompareOperator.EQUAL, value,
          -          tRowMutations);
          +        tRowMutations);
               } catch (TIOError e) {
                 exceptionCaught = true;
                 assertTrue(e.getCause() instanceof DoNotRetryIOException);
          @@ -357,8 +358,8 @@ public void testCheckAndDeleteWithReadOnly() throws Exception {
           
               boolean exceptionCaught = false;
               try {
          -      handler.checkAndDelete(table, wrap(rowName), wrap(familyAname),
          -          wrap(qualifierAname), wrap(valueAname), delete);
          +      handler.checkAndDelete(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname),
          +        wrap(valueAname), delete);
               } catch (TIOError e) {
                 exceptionCaught = true;
                 assertTrue(e.getCause() instanceof DoNotRetryIOException);
          @@ -420,8 +421,8 @@ public void testMutateRowWithReadOnly() throws Exception {
               ByteBuffer table = wrap(tableAname);
           
               List columnValuesA = new ArrayList<>(1);
          -    TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
          -        wrap(valueAname));
          +    TColumnValue columnValueA =
          +        new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
               columnValuesA.add(columnValueA);
               TPut putA = new TPut(wrap(rowName), columnValuesA);
               putA.setColumnValues(columnValuesA);
          @@ -433,11 +434,11 @@ public void testMutateRowWithReadOnly() throws Exception {
               mutations.add(mutationA);
               TMutation mutationB = TMutation.deleteSingle(delete);
               mutations.add(mutationB);
          -    TRowMutations tRowMutations = new TRowMutations(wrap(rowName),mutations);
          +    TRowMutations tRowMutations = new TRowMutations(wrap(rowName), mutations);
           
               boolean exceptionCaught = false;
               try {
          -      handler.mutateRow(table,tRowMutations);
          +      handler.mutateRow(table, tRowMutations);
               } catch (TIOError e) {
                 exceptionCaught = true;
                 assertTrue(e.getCause() instanceof DoNotRetryIOException);
          diff --git a/hbase-zookeeper/pom.xml b/hbase-zookeeper/pom.xml
          index 27e11832c315..f5b2ec660b2f 100644
          --- a/hbase-zookeeper/pom.xml
          +++ b/hbase-zookeeper/pom.xml
          @@ -1,6 +1,6 @@
          -
          +
           
          -
             4.0.0
             
          -    hbase-build-configuration
               org.apache.hbase
          +    hbase-build-configuration
               3.0.0-alpha-3-SNAPSHOT
               ../hbase-build-configuration
             
             hbase-zookeeper
             Apache HBase - Zookeeper
             Zookeeper Helpers for HBase
          -
          -  
          -    
          -    
          -      
          -      
          -        src/test/resources/META-INF/
          -        META-INF/
          -        
          -          NOTICE
          -        
          -        true
          -      
          -      
          -        src/test/resources
          -        
          -          **/**
          -        
          -      
          -    
          -    
          -      
          -      
          -        
          -        maven-assembly-plugin
          -        
          -          true
          -        
          -      
          -      
          -        org.apache.maven.plugins
          -        maven-checkstyle-plugin
          -        
          -          true
          -        
          -      
          -      
          -        net.revelc.code
          -        warbucks-maven-plugin
          -      
          -    
          -  
             
               
                 org.apache.hbase.thirdparty
          @@ -185,6 +140,51 @@
                 test
               
             
          +
          +  
          +    
          +    
          +      
          +      
          +        META-INF/
          +        true
          +        src/test/resources/META-INF/
          +        
          +          NOTICE
          +        
          +      
          +      
          +        src/test/resources
          +        
          +          **/**
          +        
          +      
          +    
          +    
          +      
          +      
          +        
          +        maven-assembly-plugin
          +        
          +          true
          +        
          +      
          +      
          +        org.apache.maven.plugins
          +        maven-checkstyle-plugin
          +        
          +          true
          +        
          +      
          +      
          +        net.revelc.code
          +        warbucks-maven-plugin
          +      
          +    
          +  
             
               
               
          @@ -197,10 +197,10 @@
                       
                         
                           license-javadocs
          -                prepare-package
                           
                             copy-resources
                           
          +                prepare-package
                           
                             ${project.build.directory}/apidocs
                             
          @@ -241,7 +241,9 @@
               
                 hadoop-3.0
                 
          -        !hadoop.profile
          +        
          +          !hadoop.profile
          +        
                 
                 
                   
          @@ -260,10 +262,10 @@
                       
                         
                           create-mrapp-generated-classpath
          -                generate-test-resources
                           
                             build-classpath
                           
          +                generate-test-resources
                           
                             
                       
          @@ -348,7 +324,7 @@
                                   
                                 
                                 
          -                        
          +                        
                                 
                               
                             
          @@ -357,6 +333,31 @@
                       
                     
                   
          +        
          +          
          +            org.apache.maven.plugins
          +            maven-eclipse-plugin
          +            
          +              
          +                org.jamon.project.jamonnature
          +              
          +              
          +                org.jamon.project.templateBuilder
          +                org.eclipse.jdt.core.javabuilder
          +                org.jamon.project.markerUpdater
          +              
          +              
          +                
          +                  .settings/org.jamon.prefs
          +                  # now
          +                    eclipse.preferences.version=1
          +                    templateSourceDir=src/main/jamon
          +                    templateOutputDir=target/generated-jamon
          +                
          +              
          +            
          +          
          +        
                 
               
             
          diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java
          index 4abcc482e0be..e37bb356c9cc 100644
          --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java
          +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java
          @@ -1,5 +1,4 @@
           /*
          - *
            * Licensed to the Apache Software Foundation (ASF) under one
            * or more contributor license agreements.  See the NOTICE file
            * distributed with this work for additional information
          @@ -28,11 +27,10 @@
           import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
           
           /**
          - * Tracker on cluster settings up in zookeeper.
          - * This is not related to {@link org.apache.hadoop.hbase.ClusterMetrics}. That class
          - * is a data structure that holds snapshot of current view on cluster. This class
          - * is about tracking cluster attributes up in zookeeper.
          - *
          + * Tracker on cluster settings up in zookeeper. This is not related to
          + * {@link org.apache.hadoop.hbase.ClusterMetrics}. That class is a data structure that holds
          + * snapshot of current view on cluster. This class is about tracking cluster attributes up in
          + * zookeeper.
            */
           @InterfaceAudience.Private
           public class ClusterStatusTracker extends ZKNodeTracker {
          @@ -40,11 +38,10 @@ public class ClusterStatusTracker extends ZKNodeTracker {
           
             /**
              * Creates a cluster status tracker.
          -   *
          -   * 

          After construction, use {@link #start} to kick off tracking. - * + *

          + * After construction, use {@link #start} to kick off tracking. * @param watcher reference to the {@link ZKWatcher} which also contains configuration and - * constants + * constants * @param abortable used to abort if a fatal error occurs */ public ClusterStatusTracker(ZKWatcher watcher, Abortable abortable) { @@ -53,8 +50,8 @@ public ClusterStatusTracker(ZKWatcher watcher, Abortable abortable) { /** * Checks if cluster is up. - * @return true if the cluster up ('shutdown' is its name up in zk) znode - * exists with data, false if not + * @return true if the cluster up ('shutdown' is its name up in zk) znode exists with data, false + * if not */ public boolean isClusterUp() { return super.getData(false) != null; @@ -64,12 +61,11 @@ public boolean isClusterUp() { * Sets the cluster as up. * @throws KeeperException unexpected zk exception */ - public void setClusterUp() - throws KeeperException { - byte [] upData = toByteArray(); + public void setClusterUp() throws KeeperException { + byte[] upData = toByteArray(); try { ZKUtil.createAndWatch(watcher, watcher.getZNodePaths().clusterStateZNode, upData); - } catch(KeeperException.NodeExistsException nee) { + } catch (KeeperException.NodeExistsException nee) { ZKUtil.setData(watcher, watcher.getZNodePaths().clusterStateZNode, upData); } } @@ -78,23 +74,20 @@ public void setClusterUp() * Sets the cluster as down by deleting the znode. * @throws KeeperException unexpected zk exception */ - public void setClusterDown() - throws KeeperException { + public void setClusterDown() throws KeeperException { try { ZKUtil.deleteNode(watcher, watcher.getZNodePaths().clusterStateZNode); - } catch(KeeperException.NoNodeException nne) { - LOG.warn("Attempted to set cluster as down but already down, cluster " + - "state node (" + watcher.getZNodePaths().clusterStateZNode + ") not found"); + } catch (KeeperException.NoNodeException nne) { + LOG.warn("Attempted to set cluster as down but already down, cluster " + "state node (" + + watcher.getZNodePaths().clusterStateZNode + ") not found"); } } /** - * @return Content of the clusterup znode as a serialized pb with the pb - * magic as prefix. + * @return Content of the clusterup znode as a serialized pb with the pb magic as prefix. */ - static byte [] toByteArray() { - ZooKeeperProtos.ClusterUp.Builder builder = - ZooKeeperProtos.ClusterUp.newBuilder(); + static byte[] toByteArray() { + ZooKeeperProtos.ClusterUp.Builder builder = ZooKeeperProtos.ClusterUp.newBuilder(); builder.setStartDate(new java.util.Date().toString()); return ProtobufUtil.prependPBMagic(builder.build().toByteArray()); } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/DeletionListener.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/DeletionListener.java index 0654c23b8173..1bfa28cb1aff 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/DeletionListener.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/DeletionListener.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +18,6 @@ package org.apache.hadoop.hbase.zookeeper; import java.util.concurrent.CountDownLatch; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; @@ -42,12 +39,10 @@ public class DeletionListener extends ZKListener { /** * Create a new instance of the deletion watcher. * @param zkWatcher ZookeeperWatcher instance - * @param pathToWatch (Fully qualified) ZNode path that we are waiting to - * be deleted. + * @param pathToWatch (Fully qualified) ZNode path that we are waiting to be deleted. * @param deletedLatch Count down on this latch when deletion has occurred. */ - public DeletionListener(ZKWatcher zkWatcher, String pathToWatch, - CountDownLatch deletedLatch) { + public DeletionListener(ZKWatcher zkWatcher, String pathToWatch, CountDownLatch deletedLatch) { super(zkWatcher); this.pathToWatch = pathToWatch; this.deletedLatch = deletedLatch; @@ -56,16 +51,15 @@ public DeletionListener(ZKWatcher zkWatcher, String pathToWatch, /** * Check if an exception has occurred when re-setting the watch. - * @return True if we were unable to re-set a watch on a ZNode due to - * an exception. + * @return True if we were unable to re-set a watch on a ZNode due to an exception. */ public boolean hasException() { return exception != null; } /** - * Get the last exception which has occurred when re-setting the watch. - * Use hasException() to check whether or not an exception has occurred. + * Get the last exception which has occurred when re-setting the watch. Use hasException() to + * check whether or not an exception has occurred. * @return The last exception observed when re-setting the watch. */ public Throwable getException() { diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java index ce6a5feacc03..ed1e9d9ca31e 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java @@ -28,8 +28,11 @@ public final class EmptyWatcher implements Watcher { // Used in this package but also by tests so needs to be public public static final EmptyWatcher instance = new EmptyWatcher(); - private EmptyWatcher() {} + + private EmptyWatcher() { + } @Override - public void process(WatchedEvent event) {} + public void process(WatchedEvent event) { + } } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java index b968b6591d91..e1abdb251934 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +28,6 @@ import java.util.List; import java.util.Map.Entry; import java.util.Properties; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -39,21 +37,20 @@ import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; +import org.apache.zookeeper.server.DatadirCleanupManager; import org.apache.zookeeper.server.ServerConfig; import org.apache.zookeeper.server.ZooKeeperServerMain; import org.apache.zookeeper.server.admin.AdminServer; import org.apache.zookeeper.server.quorum.QuorumPeerConfig; import org.apache.zookeeper.server.quorum.QuorumPeerMain; -import org.apache.zookeeper.server.DatadirCleanupManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * HBase's version of ZooKeeper's QuorumPeer. When HBase is set to manage - * ZooKeeper, this class is used to start up QuorumPeer instances. By doing - * things in here rather than directly calling to ZooKeeper, we have more - * control over the process. This class uses {@link ZKConfig} to get settings - * from the hbase-site.xml file. + * HBase's version of ZooKeeper's QuorumPeer. When HBase is set to manage ZooKeeper, this class is + * used to start up QuorumPeer instances. By doing things in here rather than directly calling to + * ZooKeeper, we have more control over the process. This class uses {@link ZKConfig} to get + * settings from the hbase-site.xml file. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Evolving @@ -77,8 +74,7 @@ public static void main(String[] args) { // login the zookeeper server principal (if using security) ZKAuthentication.loginServer(conf, HConstants.ZK_SERVER_KEYTAB_FILE, - HConstants.ZK_SERVER_KERBEROS_PRINCIPAL, - zkConfig.getClientPortAddress().getHostName()); + HConstants.ZK_SERVER_KERBEROS_PRINCIPAL, zkConfig.getClientPortAddress().getHostName()); runZKServer(zkConfig); } catch (Exception e) { @@ -88,19 +84,16 @@ public static void main(String[] args) { } private static void runZKServer(QuorumPeerConfig zkConfig) - throws IOException, AdminServer.AdminServerException { + throws IOException, AdminServer.AdminServerException { /** - * Start and schedule the purge task - * autopurge.purgeInterval is 0 by default,so in fact the DatadirCleanupManager task will not - * be started to clean the logs by default. Config is recommended only for standalone server. + * Start and schedule the purge task autopurge.purgeInterval is 0 by default,so in fact the + * DatadirCleanupManager task will not be started to clean the logs by default. Config is + * recommended only for standalone server. */ - DatadirCleanupManager purgeMgr=new DatadirCleanupManager( - zkConfig.getDataDir(), - zkConfig.getDataLogDir(), - zkConfig.getSnapRetainCount(), - zkConfig.getPurgeInterval()); + DatadirCleanupManager purgeMgr = new DatadirCleanupManager(zkConfig.getDataDir(), + zkConfig.getDataLogDir(), zkConfig.getSnapRetainCount(), zkConfig.getPurgeInterval()); purgeMgr.start(); if (zkConfig.isDistributed()) { @@ -122,23 +115,20 @@ static void writeMyID(Properties properties) throws IOException { long myId = -1; Configuration conf = HBaseConfiguration.create(); - String myAddress = Strings.domainNamePointerToHostName(DNS.getDefaultHost( - conf.get("hbase.zookeeper.dns.interface","default"), - conf.get("hbase.zookeeper.dns.nameserver","default"))); + String myAddress = Strings.domainNamePointerToHostName( + DNS.getDefaultHost(conf.get("hbase.zookeeper.dns.interface", "default"), + conf.get("hbase.zookeeper.dns.nameserver", "default"))); List ips = new ArrayList<>(); // Add what could be the best (configured) match - ips.add(myAddress.contains(".") ? - myAddress : - StringUtils.simpleHostname(myAddress)); + ips.add(myAddress.contains(".") ? myAddress : StringUtils.simpleHostname(myAddress)); // For all nics get all hostnames and IPs Enumeration nics = NetworkInterface.getNetworkInterfaces(); - while(nics.hasMoreElements()) { - Enumeration rawAdrs = - ((NetworkInterface)nics.nextElement()).getInetAddresses(); - while(rawAdrs.hasMoreElements()) { + while (nics.hasMoreElements()) { + Enumeration rawAdrs = ((NetworkInterface) nics.nextElement()).getInetAddresses(); + while (rawAdrs.hasMoreElements()) { InetAddress inet = (InetAddress) rawAdrs.nextElement(); ips.add(StringUtils.simpleHostname(inet.getHostName())); ips.add(inet.getHostAddress()); @@ -162,11 +152,11 @@ static void writeMyID(Properties properties) throws IOException { // Set the max session timeout from the provided client-side timeout properties.setProperty("maxSessionTimeout", conf.get(HConstants.ZK_SESSION_TIMEOUT, - Integer.toString(HConstants.DEFAULT_ZK_SESSION_TIMEOUT))); + Integer.toString(HConstants.DEFAULT_ZK_SESSION_TIMEOUT))); if (myId == -1) { - throw new IOException("Could not find my address: " + myAddress + - " in list of ZooKeeper quorum servers"); + throw new IOException( + "Could not find my address: " + myAddress + " in list of ZooKeeper quorum servers"); } String dataDirStr = properties.get("dataDir").toString().trim(); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java index e63bfc56ac9c..c934f077dd87 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import java.util.concurrent.CountDownLatch; /** - * Placeholder of an instance which will be accessed by other threads - * but is not yet created. Thread safe. + * Placeholder of an instance which will be accessed by other threads but is not yet created. Thread + * safe. */ class InstancePending { // Based on a subtle part of the Java Language Specification, @@ -44,9 +43,8 @@ private static class InstanceHolder { } /** - * Returns the instance given by the method {@link #prepare}. - * This is an uninterruptible blocking method - * and the interruption flag will be set just before returning if any. + * Returns the instance given by the method {@link #prepare}. This is an uninterruptible blocking + * method and the interruption flag will be set just before returning if any. */ T get() { InstanceHolder instanceHolder; @@ -67,9 +65,8 @@ T get() { } /** - * Associates the given instance for the method {@link #get}. - * This method should be called once, and {@code instance} should be non-null. - * This method is expected to call as soon as possible + * Associates the given instance for the method {@link #get}. This method should be called once, + * and {@code instance} should be non-null. This method is expected to call as soon as possible * because the method {@code get} is uninterruptibly blocked until this method is called. */ void prepare(T instance) { diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.java index f00dfa8b7a07..8403b6a97a12 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; - import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.util.Bytes; @@ -37,8 +36,7 @@ public class LoadBalancerTracker extends ZKNodeTracker { private static final Logger LOG = LoggerFactory.getLogger(LoadBalancerTracker.class); - public LoadBalancerTracker(ZKWatcher watcher, - Abortable abortable) { + public LoadBalancerTracker(ZKWatcher watcher, Abortable abortable) { super(watcher, watcher.getZNodePaths().balancerZNode, abortable); } @@ -46,7 +44,7 @@ public LoadBalancerTracker(ZKWatcher watcher, * Return true if the balance switch is on, false otherwise */ public boolean isBalancerOn() { - byte [] upData = super.getData(false); + byte[] upData = super.getData(false); try { // if data in ZK is null, use default of on. return upData == null || parseFrom(upData).getBalancerOn(); @@ -59,33 +57,32 @@ public boolean isBalancerOn() { /** * Set the balancer on/off. - * * @param balancerOn true if the balancher should be on, false otherwise * @throws KeeperException if a ZooKeeper operation fails */ public void setBalancerOn(boolean balancerOn) throws KeeperException { - byte [] upData = toByteArray(balancerOn); + byte[] upData = toByteArray(balancerOn); try { ZKUtil.setData(watcher, watcher.getZNodePaths().balancerZNode, upData); - } catch(KeeperException.NoNodeException nne) { + } catch (KeeperException.NoNodeException nne) { ZKUtil.createAndWatch(watcher, watcher.getZNodePaths().balancerZNode, upData); } super.nodeDataChanged(watcher.getZNodePaths().balancerZNode); } - private byte [] toByteArray(boolean isBalancerOn) { + private byte[] toByteArray(boolean isBalancerOn) { LoadBalancerProtos.LoadBalancerState.Builder builder = - LoadBalancerProtos.LoadBalancerState.newBuilder(); + LoadBalancerProtos.LoadBalancerState.newBuilder(); builder.setBalancerOn(isBalancerOn); return ProtobufUtil.prependPBMagic(builder.build().toByteArray()); } - private LoadBalancerProtos.LoadBalancerState parseFrom(byte [] pbBytes) - throws DeserializationException { + private LoadBalancerProtos.LoadBalancerState parseFrom(byte[] pbBytes) + throws DeserializationException { ProtobufUtil.expectPBMagicPrefix(pbBytes); LoadBalancerProtos.LoadBalancerState.Builder builder = - LoadBalancerProtos.LoadBalancerState.newBuilder(); + LoadBalancerProtos.LoadBalancerState.newBuilder(); try { int magicLen = ProtobufUtil.lengthOfPBMagic(); ProtobufUtil.mergeFrom(builder, pbBytes, magicLen, pbBytes.length - magicLen); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java index 34b5cdf11009..61308dac35e8 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java @@ -40,20 +40,18 @@ /** * Manages the location of the current active Master for the RegionServer. *

          - * Listens for ZooKeeper events related to the master address. The node - * /master will contain the address of the current master. - * This listener is interested in - * NodeDeleted and NodeCreated events on - * /master. + * Listens for ZooKeeper events related to the master address. The node /master will + * contain the address of the current master. This listener is interested in + * NodeDeleted and NodeCreated events on /master. *

          * Utilizes {@link ZKNodeTracker} for zk interactions. *

          * You can get the current master via {@link #getMasterAddress()} or via - * {@link #getMasterAddress(ZKWatcher)} if you do not have a running - * instance of this Tracker in your context. + * {@link #getMasterAddress(ZKWatcher)} if you do not have a running instance of this Tracker in + * your context. *

          - * This class also includes utility for interacting with the master znode, for - * writing and reading the znode content. + * This class also includes utility for interacting with the master znode, for writing and reading + * the znode content. */ @InterfaceAudience.Private public class MasterAddressTracker extends ZKNodeTracker { @@ -61,13 +59,10 @@ public class MasterAddressTracker extends ZKNodeTracker { private volatile List backupMasters = Collections.emptyList(); /** - * Construct a master address listener with the specified - * zookeeper reference. + * Construct a master address listener with the specified zookeeper reference. *

          - * This constructor does not trigger any actions, you must call methods - * explicitly. Normally you will just want to execute {@link #start()} to - * begin tracking of the master address. - * + * This constructor does not trigger any actions, you must call methods explicitly. Normally you + * will just want to execute {@link #start()} to begin tracking of the master address. * @param watcher zk reference and watcher * @param abortable abortable in case of fatal error */ @@ -96,8 +91,7 @@ public void nodeChildrenChanged(String path) { } /** - * Get the address of the current master if one is available. Returns null - * if no current master. + * Get the address of the current master if one is available. Returns null if no current master. * @return Server name or null if timed out. */ public ServerName getMasterAddress() { @@ -105,8 +99,8 @@ public ServerName getMasterAddress() { } /** - * Get the info port of the current master of one is available. - * Return 0 if no current master or zookeeper is unavailable + * Get the info port of the current master of one is available. Return 0 if no current master or + * zookeeper is unavailable * @return info port or 0 if timed out */ public int getMasterInfoPort() { @@ -121,15 +115,16 @@ public int getMasterInfoPort() { return 0; } } + /** - * Get the info port of the backup master if it is available. - * Return 0 if no backup master or zookeeper is unavailable + * Get the info port of the backup master if it is available. Return 0 if no backup master or + * zookeeper is unavailable * @param sn server name of backup master * @return info port or 0 if timed out or exceptions */ public int getBackupMasterInfoPort(final ServerName sn) { - String backupZNode = ZNodePaths.joinZNode(watcher.getZNodePaths().backupMasterAddressesZNode, - sn.toString()); + String backupZNode = + ZNodePaths.joinZNode(watcher.getZNodePaths().backupMasterAddressesZNode, sn.toString()); try { byte[] data = ZKUtil.getData(watcher, backupZNode); final ZooKeeperProtos.Master backup = parse(data); @@ -144,10 +139,8 @@ public int getBackupMasterInfoPort(final ServerName sn) { } /** - * Get the address of the current master if one is available. Returns null - * if no current master. If refresh is set, try to load the data from ZK again, - * otherwise, cached data will be used. - * + * Get the address of the current master if one is available. Returns null if no current master. + * If refresh is set, try to load the data from ZK again, otherwise, cached data will be used. * @param refresh whether to refresh the data by calling ZK directly. * @return Server name or null if timed out. */ @@ -161,25 +154,23 @@ public ServerName getMasterAddress(final boolean refresh) { } /** - * Get master address. - * Use this instead of {@link #getMasterAddress()} if you do not have an + * Get master address. Use this instead of {@link #getMasterAddress()} if you do not have an * instance of this tracker in your context. * @param zkw ZKWatcher to use - * @return ServerName stored in the the master address znode or null if no - * znode present. + * @return ServerName stored in the the master address znode or null if no znode present. * @throws KeeperException if a ZooKeeper operation fails * @throws IOException if the address of the ZooKeeper master cannot be retrieved */ public static ServerName getMasterAddress(final ZKWatcher zkw) - throws KeeperException, IOException { - byte [] data; + throws KeeperException, IOException { + byte[] data; try { data = ZKUtil.getData(zkw, zkw.getZNodePaths().masterAddressZNode); } catch (InterruptedException e) { throw new InterruptedIOException(); } // TODO javadoc claims we return null in this case. :/ - if (data == null){ + if (data == null) { throw new IOException("Can't get master address from ZooKeeper; znode data == null"); } try { @@ -192,13 +183,11 @@ public static ServerName getMasterAddress(final ZKWatcher zkw) } /** - * Get master info port. - * Use this instead of {@link #getMasterInfoPort()} if you do not have an + * Get master info port. Use this instead of {@link #getMasterInfoPort()} if you do not have an * instance of this tracker in your context. * @param zkw ZKWatcher to use - * @return master info port in the the master address znode or null if no - * znode present. - * // TODO can't return null for 'int' return type. non-static verison returns 0 + * @return master info port in the the master address znode or null if no znode present. // TODO + * can't return null for 'int' return type. non-static verison returns 0 * @throws KeeperException if a ZooKeeper operation fails * @throws IOException if the address of the ZooKeeper master cannot be retrieved */ @@ -223,19 +212,16 @@ public static int getMasterInfoPort(final ZKWatcher zkw) throws KeeperException, } /** - * Get backup master info port. - * Use this instead of {@link #getBackupMasterInfoPort(ServerName)} if you do not have an - * instance of this tracker in your context. - * + * Get backup master info port. Use this instead of {@link #getBackupMasterInfoPort(ServerName)} + * if you do not have an instance of this tracker in your context. * @param zkw ZKWatcher to use - * @param sn ServerName of the backup master - * @return backup master info port in the the master address znode or 0 if no - * znode present. + * @param sn ServerName of the backup master + * @return backup master info port in the the master address znode or 0 if no znode present. * @throws KeeperException if a ZooKeeper operation fails - * @throws IOException if the address of the ZooKeeper master cannot be retrieved + * @throws IOException if the address of the ZooKeeper master cannot be retrieved */ public static int getBackupMasterInfoPort(ZKWatcher zkw, final ServerName sn) - throws KeeperException, IOException { + throws KeeperException, IOException { byte[] data; try { data = ZKUtil.getData(zkw, @@ -260,19 +246,17 @@ public static int getBackupMasterInfoPort(ZKWatcher zkw, final ServerName sn) } /** - * Set master address into the master znode or into the backup - * subdirectory of backup masters; switch off the passed in znode - * path. + * Set master address into the master znode or into the backup subdirectory of backup + * masters; switch off the passed in znode path. * @param zkw The ZKWatcher to use. - * @param znode Where to create the znode; could be at the top level or it - * could be under backup masters + * @param znode Where to create the znode; could be at the top level or it could be under backup + * masters * @param master ServerName of the current master must not be null. * @return true if node created, false if not; a watch is set in both cases * @throws KeeperException if a ZooKeeper operation fails */ - public static boolean setMasterAddress(final ZKWatcher zkw, - final String znode, final ServerName master, int infoPort) - throws KeeperException { + public static boolean setMasterAddress(final ZKWatcher zkw, final String znode, + final ServerName master, int infoPort) throws KeeperException { return ZKUtil.createEphemeralNodeAndWatch(zkw, znode, toByteArray(master, infoPort)); } @@ -286,8 +270,7 @@ public boolean hasMaster() { /** * @param sn must not be null - * @return Content of the master znode as a serialized pb with the pb - * magic as prefix. + * @return Content of the master znode as a serialized pb with the pb magic as prefix. */ static byte[] toByteArray(final ServerName sn, int infoPort) { ZooKeeperProtos.Master.Builder mbuilder = ZooKeeperProtos.Master.newBuilder(); @@ -324,7 +307,7 @@ public static ZooKeeperProtos.Master parse(byte[] data) throws DeserializationEx * @param content must not be null */ public static boolean deleteIfEquals(ZKWatcher zkw, final String content) { - if (content == null){ + if (content == null) { throw new IllegalArgumentException("Content must not be null"); } @@ -355,13 +338,13 @@ public List getBackupMasters() { * @return List of backup masters. * @throws InterruptedIOException if there is any issue fetching the required data from Zookeeper. */ - public static List getBackupMastersAndRenewWatch( - ZKWatcher zkw) throws InterruptedIOException { + public static List getBackupMastersAndRenewWatch(ZKWatcher zkw) + throws InterruptedIOException { // Build Set of backup masters from ZK nodes List backupMasterStrings = null; try { backupMasterStrings = ZKUtil.listChildrenAndWatchForNewChildren(zkw, - zkw.getZNodePaths().backupMasterAddressesZNode); + zkw.getZNodePaths().backupMasterAddressesZNode); } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to list backup servers"), e); } @@ -369,12 +352,12 @@ public static List getBackupMastersAndRenewWatch( List backupMasters = Collections.emptyList(); if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) { backupMasters = new ArrayList<>(backupMasterStrings.size()); - for (String s: backupMasterStrings) { + for (String s : backupMasterStrings) { try { - byte [] bytes; + byte[] bytes; try { - bytes = ZKUtil.getData(zkw, ZNodePaths.joinZNode( - zkw.getZNodePaths().backupMasterAddressesZNode, s)); + bytes = ZKUtil.getData(zkw, + ZNodePaths.joinZNode(zkw.getZNodePaths().backupMasterAddressesZNode, s)); } catch (InterruptedException e) { throw new InterruptedIOException(); } @@ -389,8 +372,7 @@ public static List getBackupMastersAndRenewWatch( backupMasters.add(sn); } } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to get information about " + - "backup servers"), e); + LOG.warn(zkw.prefix("Unable to get information about " + "backup servers"), e); } } backupMasters.sort(Comparator.comparing(ServerName::getServerName)); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java index fc9462ac1017..dc549306a67d 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,16 +18,13 @@ package org.apache.hadoop.hbase.zookeeper; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; /** - * Tracks the master Maintenance Mode via ZK. - * - * Unused. Used to be set by hbck to prevent concurrent splits/merges, but those use PV2 now and - * HBCK2 uses it's own service, so no longer an issue. Left in, in case we need to use this for - * the incomplete parts of HBCK2... + * Tracks the master Maintenance Mode via ZK. Unused. Used to be set by hbck to prevent concurrent + * splits/merges, but those use PV2 now and HBCK2 uses it's own service, so no longer an issue. Left + * in, in case we need to use this for the incomplete parts of HBCK2... */ @InterfaceAudience.Private public class MasterMaintenanceModeTracker extends ZKListener { @@ -51,9 +47,8 @@ private void update(String path) { private void update() { try { - List children = - ZKUtil.listChildrenAndWatchForNewChildren(watcher, - watcher.getZNodePaths().masterMaintZNode); + List children = ZKUtil.listChildrenAndWatchForNewChildren(watcher, + watcher.getZNodePaths().masterMaintZNode); hasChildren = (children != null && children.size() > 0); } catch (KeeperException e) { // Ignore the ZK keeper exception diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java index 57f5f3ec4892..26abaeed6fe0 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java @@ -53,12 +53,12 @@ private MetaTableLocator() { } /** - * Gets the meta region location, if available. Does not block. + * Gets the meta region location, if available. Does not block. * @param zkw zookeeper connection to use * @return server name or null if we failed to get the data. */ @RestrictedApi(explanation = "Should only be called in tests or ZKUtil", link = "", - allowedOnPath = ".*/src/test/.*|.*/ZKDump\\.java") + allowedOnPath = ".*/src/test/.*|.*/ZKDump\\.java") public static ServerName getMetaRegionLocation(final ZKWatcher zkw) { try { RegionState state = getMetaRegionState(zkw); @@ -69,13 +69,13 @@ public static ServerName getMetaRegionLocation(final ZKWatcher zkw) { } /** - * Gets the meta region location, if available. Does not block. + * Gets the meta region location, if available. Does not block. * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation * @param replicaId the ID of the replica * @return server name */ @RestrictedApi(explanation = "Should only be called in self or ZKUtil", link = "", - allowedOnPath = ".*(MetaTableLocator|ZKDump)\\.java") + allowedOnPath = ".*(MetaTableLocator|ZKDump)\\.java") public static ServerName getMetaRegionLocation(final ZKWatcher zkw, int replicaId) { try { RegionState state = getMetaRegionState(zkw, replicaId); @@ -97,7 +97,7 @@ public static ServerName getMetaRegionLocation(final ZKWatcher zkw, int replicaI * @throws NotAllMetaRegionsOnlineException if a meta or root region is not online */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") public static ServerName waitMetaRegionLocation(ZKWatcher zkw, long timeout) throws InterruptedException, NotAllMetaRegionsOnlineException { return waitMetaRegionLocation(zkw, RegionInfo.DEFAULT_REPLICA_ID, timeout); @@ -119,8 +119,8 @@ private static ServerName waitMetaRegionLocation(ZKWatcher zkw, int replicaId, l throws InterruptedException, NotAllMetaRegionsOnlineException { try { if (ZKUtil.checkExists(zkw, zkw.getZNodePaths().baseZNode) == -1) { - String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. " + - "There could be a mismatch with the one configured in the master."; + String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. " + + "There could be a mismatch with the one configured in the master."; LOG.error(errorMsg); throw new IllegalArgumentException(errorMsg); } @@ -137,24 +137,23 @@ private static ServerName waitMetaRegionLocation(ZKWatcher zkw, int replicaId, l } /** - * Sets the location of hbase:meta in ZooKeeper to the - * specified server address. + * Sets the location of hbase:meta in ZooKeeper to the specified server address. * @param zookeeper zookeeper reference * @param serverName The server hosting hbase:meta * @param state The region transition state * @throws KeeperException unexpected zookeeper exception */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") - public static void setMetaLocation(ZKWatcher zookeeper, - ServerName serverName, RegionState.State state) throws KeeperException { + allowedOnPath = ".*/src/test/.*") + public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, + RegionState.State state) throws KeeperException { setMetaLocation(zookeeper, serverName, RegionInfo.DEFAULT_REPLICA_ID, state); } /** * Sets the location of hbase:meta in ZooKeeper to the specified server address. * @param zookeeper reference to the {@link ZKWatcher} which also contains configuration and - * operation + * operation * @param serverName the name of the server * @param replicaId the ID of the replica * @param state the state of the region @@ -170,23 +169,21 @@ public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, i serverName, state); // Make the MetaRegionServer pb and then get its bytes and save this as // the znode content. - MetaRegionServer pbrsr = MetaRegionServer.newBuilder() - .setServer(ProtobufUtil.toServerName(serverName)) - .setRpcVersion(HConstants.RPC_CURRENT_VERSION) - .setState(state.convert()).build(); + MetaRegionServer pbrsr = + MetaRegionServer.newBuilder().setServer(ProtobufUtil.toServerName(serverName)) + .setRpcVersion(HConstants.RPC_CURRENT_VERSION).setState(state.convert()).build(); byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray()); try { - ZKUtil.setData(zookeeper, - zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data); - } catch(KeeperException.NoNodeException nne) { + ZKUtil.setData(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data); + } catch (KeeperException.NoNodeException nne) { if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) { LOG.debug("hbase:meta region location doesn't exist, create it"); } else { - LOG.debug("hbase:meta region location doesn't exist for replicaId=" + replicaId + - ", create it"); + LOG.debug( + "hbase:meta region location doesn't exist for replicaId=" + replicaId + ", create it"); } ZKUtil.createAndWatch(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId), - data); + data); } } @@ -194,14 +191,13 @@ public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, i * Load the meta region state from the meta server ZNode. */ @RestrictedApi(explanation = "Should only be called in self or tests", link = "", - allowedOnPath = ".*/src/test/.*|.*/MetaTableLocator\\.java") + allowedOnPath = ".*/src/test/.*|.*/MetaTableLocator\\.java") public static RegionState getMetaRegionState(ZKWatcher zkw) throws KeeperException { return getMetaRegionState(zkw, RegionInfo.DEFAULT_REPLICA_ID); } /** * Load the meta region state from the meta region server ZNode. - * * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation * @param replicaId the ID of the replica * @return regionstate @@ -227,23 +223,21 @@ public static RegionState getMetaRegionState(ZKWatcher zkw, int replicaId) * @throws KeeperException unexpected zookeeper exception */ @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") - public static void deleteMetaLocation(ZKWatcher zookeeper) - throws KeeperException { + allowedOnPath = ".*/src/test/.*") + public static void deleteMetaLocation(ZKWatcher zookeeper) throws KeeperException { deleteMetaLocation(zookeeper, RegionInfo.DEFAULT_REPLICA_ID); } - public static void deleteMetaLocation(ZKWatcher zookeeper, int replicaId) - throws KeeperException { + public static void deleteMetaLocation(ZKWatcher zookeeper, int replicaId) throws KeeperException { if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) { LOG.info("Deleting hbase:meta region location in ZooKeeper"); } else { LOG.info("Deleting hbase:meta for {} region location in ZooKeeper", replicaId); } try { - // Just delete the node. Don't need any watches. + // Just delete the node. Don't need any watches. ZKUtil.deleteNode(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId)); - } catch(KeeperException.NoNodeException nne) { + } catch (KeeperException.NoNodeException nne) { // Has already been deleted } } @@ -257,7 +251,7 @@ public static void deleteMetaLocation(ZKWatcher zookeeper, int replicaId) * @throws InterruptedException if waiting for the socket operation fails */ private static ServerName blockUntilAvailable(final ZKWatcher zkw, int replicaId, - final long timeout) throws InterruptedException { + final long timeout) throws InterruptedException { if (timeout < 0) { throw new IllegalArgumentException(); } @@ -270,8 +264,8 @@ private static ServerName blockUntilAvailable(final ZKWatcher zkw, int replicaId ServerName sn = null; while (true) { sn = getMetaRegionLocation(zkw, replicaId); - if (sn != null || (EnvironmentEdgeManager.currentTime() - startTime) > timeout - - HConstants.SOCKET_RETRY_WAIT_MS) { + if (sn != null || (EnvironmentEdgeManager.currentTime() - startTime) > timeout + - HConstants.SOCKET_RETRY_WAIT_MS) { break; } Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java index 62b27de6bd80..a81fbcb6f41c 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.zookeeper; import static org.apache.zookeeper.client.FourLetterWordMain.send4LetterWord; + import java.io.File; import java.io.IOException; import java.io.InterruptedIOException; @@ -30,7 +31,6 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.net.Address; @@ -46,9 +46,8 @@ import org.slf4j.LoggerFactory; /** - * TODO: Most of the code in this class is ripped from ZooKeeper tests. Instead - * of redoing it, we should contribute updates to their code which let us more - * easily access testing helper objects. + * TODO: Most of the code in this class is ripped from ZooKeeper tests. Instead of redoing it, we + * should contribute updates to their code which let us more easily access testing helper objects. */ @InterfaceAudience.Public public class MiniZooKeeperCluster { @@ -88,13 +87,12 @@ public MiniZooKeeperCluster(Configuration configuration) { zooKeeperServers = new ArrayList<>(); clientPortList = new ArrayList<>(); standaloneServerFactoryList = new ArrayList<>(); - connectionTimeout = configuration - .getInt(HConstants.ZK_SESSION_TIMEOUT + ".localHBaseCluster", DEFAULT_CONNECTION_TIMEOUT); + connectionTimeout = configuration.getInt(HConstants.ZK_SESSION_TIMEOUT + ".localHBaseCluster", + DEFAULT_CONNECTION_TIMEOUT); } /** * Add a client port to the list. - * * @param clientPort the specified port */ public void addClientPort(int clientPort) { @@ -103,7 +101,6 @@ public void addClientPort(int clientPort) { /** * Get the list of client ports. - * * @return clientPortList the client port list */ @InterfaceAudience.Private @@ -113,7 +110,6 @@ public List getClientPortList() { /** * Check whether the client port in a specific position of the client port list is valid. - * * @param index the specified position */ private boolean hasValidClientPortInList(int index) { @@ -129,7 +125,6 @@ public void setDefaultClientPort(int clientPort) { /** * Selects a ZK client port. - * * @param seedPort the seed port to start with; -1 means first time. * @return a valid and unused client port */ @@ -194,11 +189,11 @@ public int startup(File baseDir) throws IOException, InterruptedException { } /** - * @param baseDir the base directory to use + * @param baseDir the base directory to use * @param numZooKeeperServers the number of ZooKeeper servers * @return ClientPort server bound to, -1 if there was a binding problem and we couldn't pick - * another port. - * @throws IOException if an operation fails during the startup + * another port. + * @throws IOException if an operation fails during the startup * @throws InterruptedException if the startup fails */ public int startup(File baseDir, int numZooKeeperServers) @@ -234,10 +229,10 @@ public int startup(File baseDir, int numZooKeeperServers) ZooKeeperServer server = new ZooKeeperServer(dir, dir, tickTimeToUse); // Setting {min,max}SessionTimeout defaults to be the same as in Zookeeper - server.setMinSessionTimeout(configuration.getInt("hbase.zookeeper.property.minSessionTimeout", - -1)); - server.setMaxSessionTimeout(configuration.getInt("hbase.zookeeper.property.maxSessionTimeout", - -1)); + server.setMinSessionTimeout( + configuration.getInt("hbase.zookeeper.property.minSessionTimeout", -1)); + server.setMaxSessionTimeout( + configuration.getInt("hbase.zookeeper.property.maxSessionTimeout", -1)); NIOServerCnxnFactory standaloneServerFactory; while (true) { try { @@ -265,13 +260,12 @@ public int startup(File baseDir, int numZooKeeperServers) getServerConfigurationOnOneLine(server)); // Runs a 'stat' against the servers. if (!waitForServerUp(currentClientPort, connectionTimeout)) { - Threads.printThreadInfo(System.out, - "Why is zk standalone server not coming up?"); - throw new IOException("Waiting for startup of standalone server; " + - "server isRunning=" + server.isRunning()); + Threads.printThreadInfo(System.out, "Why is zk standalone server not coming up?"); + throw new IOException("Waiting for startup of standalone server; " + "server isRunning=" + + server.isRunning()); } - // We have selected a port as a client port. Update clientPortList if necessary. + // We have selected a port as a client port. Update clientPortList if necessary. if (clientPortList.size() <= i) { // it is not in the list, add the port clientPortList.add(currentClientPort); } else if (clientPortList.get(i) <= 0) { // the list has invalid port, update with valid port @@ -294,12 +288,14 @@ public int startup(File baseDir, int numZooKeeperServers) private String getServerConfigurationOnOneLine(ZooKeeperServer server) { StringWriter sw = new StringWriter(); try (PrintWriter pw = new PrintWriter(sw) { - @Override public void println(int x) { + @Override + public void println(int x) { super.print(x); super.print(", "); } - @Override public void println(String x) { + @Override + public void println(String x) { super.print(x); super.print(", "); } @@ -329,13 +325,13 @@ public void shutdown() throws IOException { int clientPort = clientPortList.get(i); standaloneServerFactory.shutdown(); if (!waitForServerDown(clientPort, connectionTimeout)) { - throw new IOException("Waiting for shutdown of standalone server at port=" + clientPort + - ", timeout=" + this.connectionTimeout); + throw new IOException("Waiting for shutdown of standalone server at port=" + clientPort + + ", timeout=" + this.connectionTimeout); } } standaloneServerFactoryList.clear(); - for (ZooKeeperServer zkServer: zooKeeperServers) { + for (ZooKeeperServer zkServer : zooKeeperServers) { // Explicitly close ZKDatabase since ZookeeperServer does not close them zkServer.getZKDatabase().close(); } @@ -351,8 +347,8 @@ public void shutdown() throws IOException { } /** - * @return clientPort return clientPort if there is another ZK backup can run - * when killing the current active; return -1, if there is no backups. + * @return clientPort return clientPort if there is another ZK backup can run when killing the + * current active; return -1, if there is no backups. * @throws IOException if waiting for the shutdown of a server fails */ public int killCurrentActiveZooKeeperServer() throws IOException, InterruptedException { @@ -362,7 +358,7 @@ public int killCurrentActiveZooKeeperServer() throws IOException, InterruptedExc // Shutdown the current active one NIOServerCnxnFactory standaloneServerFactory = - standaloneServerFactoryList.get(activeZKServerIndex); + standaloneServerFactoryList.get(activeZKServerIndex); int clientPort = clientPortList.get(activeZKServerIndex); standaloneServerFactory.shutdown(); @@ -390,18 +386,17 @@ public int killCurrentActiveZooKeeperServer() throws IOException, InterruptedExc /** * Kill one back up ZK servers. - * * @throws IOException if waiting for the shutdown of a server fails */ public void killOneBackupZooKeeperServer() throws IOException, InterruptedException { if (!started || activeZKServerIndex < 0 || standaloneServerFactoryList.size() <= 1) { - return ; + return; } - int backupZKServerIndex = activeZKServerIndex+1; + int backupZKServerIndex = activeZKServerIndex + 1; // Shutdown the current active one NIOServerCnxnFactory standaloneServerFactory = - standaloneServerFactoryList.get(backupZKServerIndex); + standaloneServerFactoryList.get(backupZKServerIndex); int clientPort = clientPortList.get(backupZKServerIndex); standaloneServerFactory.shutdown(); @@ -423,7 +418,7 @@ private static boolean waitForServerDown(int port, long timeout) throws IOExcept long start = EnvironmentEdgeManager.currentTime(); while (true) { try { - send4LetterWord(HOST, port, "stat", false, (int)timeout); + send4LetterWord(HOST, port, "stat", false, (int) timeout); } catch (IOException | X509Exception.SSLContextException e) { return true; } @@ -433,7 +428,7 @@ private static boolean waitForServerDown(int port, long timeout) throws IOExcept try { Thread.sleep(TIMEOUT); } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } return false; @@ -445,7 +440,7 @@ private static boolean waitForServerUp(int port, long timeout) throws IOExceptio long start = EnvironmentEdgeManager.currentTime(); while (true) { try { - String result = send4LetterWord(HOST, port, "stat", false, (int)timeout); + String result = send4LetterWord(HOST, port, "stat", false, (int) timeout); if (result.startsWith("Zookeeper version:") && !result.contains("READ-ONLY")) { return true; } else { @@ -465,7 +460,7 @@ private static boolean waitForServerUp(int port, long timeout) throws IOExceptio try { Thread.sleep(TIMEOUT); } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } return false; @@ -477,7 +472,7 @@ public int getClientPort() { } /** - * @return Address for this cluster instance. + * @return Address for this cluster instance. */ public Address getAddress() { return Address.fromParts(HOST, getClientPort()); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java index da7d1767e722..8a11e9ee7db9 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import org.apache.zookeeper.WatchedEvent; @@ -25,12 +24,11 @@ * Placeholder of a watcher which might be triggered before the instance is not yet created. *

          * {@code ZooKeeper} starts its event thread within its constructor (and that is an anti-pattern), - * and the watcher passed to the constructor might be called back by the event thread - * before you get the instance of {@code ZooKeeper} from the constructor. - * If your watcher calls methods of {@code ZooKeeper}, - * pass this placeholder to the constructor of the {@code ZooKeeper}, - * create your watcher using the instance of {@code ZooKeeper}, - * and then call the method {@code PendingWatcher.prepare}. + * and the watcher passed to the constructor might be called back by the event thread before you get + * the instance of {@code ZooKeeper} from the constructor. If your watcher calls methods of + * {@code ZooKeeper}, pass this placeholder to the constructor of the {@code ZooKeeper}, create your + * watcher using the instance of {@code ZooKeeper}, and then call the method + * {@code PendingWatcher.prepare}. */ class PendingWatcher implements Watcher { private final InstancePending pending = new InstancePending<>(); @@ -41,11 +39,10 @@ public void process(WatchedEvent event) { } /** - * Associates the substantial watcher of processing events. - * This method should be called once, and {@code watcher} should be non-null. - * This method is expected to call as soon as possible - * because the event processing, being invoked by the ZooKeeper event thread, - * is uninterruptibly blocked until this method is called. + * Associates the substantial watcher of processing events. This method should be called once, and + * {@code watcher} should be non-null. This method is expected to call as soon as possible because + * the event processing, being invoked by the ZooKeeper event thread, is uninterruptibly blocked + * until this method is called. */ void prepare(Watcher watcher) { pending.prepare(watcher); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index 66ef868fdfdc..b5fd874c5c7d 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,26 +48,21 @@ import org.slf4j.LoggerFactory; /** - * A zookeeper that can handle 'recoverable' errors. - * To handle recoverable errors, developers need to realize that there are two - * classes of requests: idempotent and non-idempotent requests. Read requests - * and unconditional sets and deletes are examples of idempotent requests, they - * can be reissued with the same results. - * (Although, the delete may throw a NoNodeException on reissue its effect on - * the ZooKeeper state is the same.) Non-idempotent requests need special - * handling, application and library writers need to keep in mind that they may - * need to encode information in the data or name of znodes to detect - * retries. A simple example is a create that uses a sequence flag. - * If a process issues a create("/x-", ..., SEQUENCE) and gets a connection - * loss exception, that process will reissue another - * create("/x-", ..., SEQUENCE) and get back x-111. When the process does a - * getChildren("/"), it sees x-1,x-30,x-109,x-110,x-111, now it could be - * that x-109 was the result of the previous create, so the process actually - * owns both x-109 and x-111. An easy way around this is to use "x-process id-" - * when doing the create. If the process is using an id of 352, before reissuing - * the create it will do a getChildren("/") and see "x-222-1", "x-542-30", - * "x-352-109", x-333-110". The process will know that the original create - * succeeded an the znode it created is "x-352-109". + * A zookeeper that can handle 'recoverable' errors. To handle recoverable errors, developers need + * to realize that there are two classes of requests: idempotent and non-idempotent requests. Read + * requests and unconditional sets and deletes are examples of idempotent requests, they can be + * reissued with the same results. (Although, the delete may throw a NoNodeException on reissue its + * effect on the ZooKeeper state is the same.) Non-idempotent requests need special handling, + * application and library writers need to keep in mind that they may need to encode information in + * the data or name of znodes to detect retries. A simple example is a create that uses a sequence + * flag. If a process issues a create("/x-", ..., SEQUENCE) and gets a connection loss exception, + * that process will reissue another create("/x-", ..., SEQUENCE) and get back x-111. When the + * process does a getChildren("/"), it sees x-1,x-30,x-109,x-110,x-111, now it could be that x-109 + * was the result of the previous create, so the process actually owns both x-109 and x-111. An easy + * way around this is to use "x-process id-" when doing the create. If the process is using an id of + * 352, before reissuing the create it will do a getChildren("/") and see "x-222-1", "x-542-30", + * "x-352-109", x-333-110". The process will know that the original create succeeded an the znode it + * created is "x-352-109". * @see "https://cwiki.apache.org/confluence/display/HADOOP2/ZooKeeper+ErrorHandling" */ @InterfaceAudience.Private @@ -89,7 +83,7 @@ public class RecoverableZooKeeper { * See {@link #connect(Configuration, String, Watcher, String)} */ public static RecoverableZooKeeper connect(Configuration conf, Watcher watcher) - throws IOException { + throws IOException { String ensemble = ZKConfig.getZKQuorumServersString(conf); return connect(conf, ensemble, watcher); } @@ -97,18 +91,15 @@ public static RecoverableZooKeeper connect(Configuration conf, Watcher watcher) /** * See {@link #connect(Configuration, String, Watcher, String)} */ - public static RecoverableZooKeeper connect(Configuration conf, String ensemble, - Watcher watcher) - throws IOException { + public static RecoverableZooKeeper connect(Configuration conf, String ensemble, Watcher watcher) + throws IOException { return connect(conf, ensemble, watcher, null); } /** - * Creates a new connection to ZooKeeper, pulling settings and ensemble config - * from the specified configuration object using methods from {@link ZKConfig}. - * - * Sets the connection status monitoring watcher to the specified watcher. - * + * Creates a new connection to ZooKeeper, pulling settings and ensemble config from the specified + * configuration object using methods from {@link ZKConfig}. Sets the connection status monitoring + * watcher to the specified watcher. * @param conf configuration to pull ensemble and other settings from * @param watcher watcher to monitor connection changes * @param ensemble ZooKeeper servers quorum string @@ -116,35 +107,31 @@ public static RecoverableZooKeeper connect(Configuration conf, String ensemble, * @return connection to zookeeper * @throws IOException if unable to connect to zk or config problem */ - public static RecoverableZooKeeper connect(Configuration conf, String ensemble, - Watcher watcher, final String identifier) - throws IOException { - if(ensemble == null) { + public static RecoverableZooKeeper connect(Configuration conf, String ensemble, Watcher watcher, + final String identifier) throws IOException { + if (ensemble == null) { throw new IOException("Unable to determine ZooKeeper ensemble"); } - int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, - HConstants.DEFAULT_ZK_SESSION_TIMEOUT); + int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); if (LOG.isTraceEnabled()) { LOG.trace("{} opening connection to ZooKeeper ensemble={}", identifier, ensemble); } int retry = conf.getInt("zookeeper.recovery.retry", 3); - int retryIntervalMillis = - conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); + int retryIntervalMillis = conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); int maxSleepTime = conf.getInt("zookeeper.recovery.retry.maxsleeptime", 60000); - int multiMaxSize = conf.getInt("zookeeper.multi.max.size", 1024*1024); - return new RecoverableZooKeeper(ensemble, timeout, watcher, - retry, retryIntervalMillis, maxSleepTime, identifier, multiMaxSize); + int multiMaxSize = conf.getInt("zookeeper.multi.max.size", 1024 * 1024); + return new RecoverableZooKeeper(ensemble, timeout, watcher, retry, retryIntervalMillis, + maxSleepTime, identifier, multiMaxSize); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE", - justification="None. Its always been this way.") - public RecoverableZooKeeper(String quorumServers, int sessionTimeout, - Watcher watcher, int maxRetries, int retryIntervalMillis, int maxSleepTime, String identifier, - int maxMultiSize) - throws IOException { + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DE_MIGHT_IGNORE", + justification = "None. Its always been this way.") + public RecoverableZooKeeper(String quorumServers, int sessionTimeout, Watcher watcher, + int maxRetries, int retryIntervalMillis, int maxSleepTime, String identifier, + int maxMultiSize) throws IOException { // TODO: Add support for zk 'chroot'; we don't add it to the quorumServers String as we should. this.retryCounterFactory = - new RetryCounterFactory(maxRetries+1, retryIntervalMillis, maxSleepTime); + new RetryCounterFactory(maxRetries + 1, retryIntervalMillis, maxSleepTime); if (identifier == null || identifier.length() == 0) { // the identifier = processID@hostName @@ -168,11 +155,10 @@ public RecoverableZooKeeper(String quorumServers, int sessionTimeout, } /** - * Returns the maximum size (in bytes) that should be included in any single multi() call. - * - * NB: This is an approximation, so there may be variance in the msg actually sent over the - * wire. Please be sure to set this approximately, with respect to your ZK server configuration - * for jute.maxbuffer. + * Returns the maximum size (in bytes) that should be included in any single multi() call. NB: + * This is an approximation, so there may be variance in the msg actually sent over the wire. + * Please be sure to set this approximately, with respect to your ZK server configuration for + * jute.maxbuffer. */ public int getMaxMultiSizeLimit() { return maxMultiSize; @@ -197,23 +183,21 @@ protected synchronized ZooKeeper checkZk() throws KeeperException { } public synchronized void reconnectAfterExpiration() - throws IOException, KeeperException, InterruptedException { + throws IOException, KeeperException, InterruptedException { if (zk != null) { - LOG.info("Closing dead ZooKeeper connection, session" + - " was: 0x"+Long.toHexString(zk.getSessionId())); + LOG.info("Closing dead ZooKeeper connection, session" + " was: 0x" + + Long.toHexString(zk.getSessionId())); zk.close(); // reset the ZooKeeper connection zk = null; } checkZk(); - LOG.info("Recreated a ZooKeeper, session" + - " is: 0x"+Long.toHexString(zk.getSessionId())); + LOG.info("Recreated a ZooKeeper, session" + " is: 0x" + Long.toHexString(zk.getSessionId())); } /** - * delete is an idempotent operation. Retry before throwing exception. - * This function will not throw NoNodeException if the path does not - * exist. + * delete is an idempotent operation. Retry before throwing exception. This function will not + * throw NoNodeException if the path does not exist. */ public void delete(String path, int version) throws InterruptedException, KeeperException { Span span = TraceUtil.getGlobalTracer().spanBuilder("RecoverableZookeeper.delete").startSpan(); @@ -228,8 +212,8 @@ public void delete(String path, int version) throws InterruptedException, Keeper switch (e.code()) { case NONODE: if (isRetry) { - LOG.debug("Node " + path + " already deleted. Assuming a " + - "previous attempt succeeded."); + LOG.debug( + "Node " + path + " already deleted. Assuming a " + "previous attempt succeeded."); return; } LOG.debug("Node {} already deleted, retry={}", path, isRetry); @@ -262,7 +246,7 @@ public Stat exists(String path, Watcher watcher) throws KeeperException, Interru } private Stat exists(String path, Watcher watcher, Boolean watch) - throws InterruptedException, KeeperException { + throws InterruptedException, KeeperException { Span span = TraceUtil.getGlobalTracer().spanBuilder("RecoverableZookeeper.exists").startSpan(); try (Scope scope = span.makeCurrent()) { RetryCounter retryCounter = retryCounterFactory.create(); @@ -302,8 +286,8 @@ public Stat exists(String path, boolean watch) throws KeeperException, Interrupt return exists(path, null, watch); } - private void retryOrThrow(RetryCounter retryCounter, KeeperException e, - String opName) throws KeeperException { + private void retryOrThrow(RetryCounter retryCounter, KeeperException e, String opName) + throws KeeperException { if (!retryCounter.shouldRetry()) { LOG.error("ZooKeeper {} failed after {} attempts", opName, retryCounter.getMaxAttempts()); throw e; @@ -316,14 +300,14 @@ private void retryOrThrow(RetryCounter retryCounter, KeeperException e, * @return List of children znodes */ public List getChildren(String path, Watcher watcher) - throws KeeperException, InterruptedException { + throws KeeperException, InterruptedException { return getChildren(path, watcher, null); } private List getChildren(String path, Watcher watcher, Boolean watch) - throws InterruptedException, KeeperException { + throws InterruptedException, KeeperException { Span span = - TraceUtil.getGlobalTracer().spanBuilder("RecoverableZookeeper.getChildren").startSpan(); + TraceUtil.getGlobalTracer().spanBuilder("RecoverableZookeeper.getChildren").startSpan(); try (Scope scope = span.makeCurrent()) { RetryCounter retryCounter = retryCounterFactory.create(); while (true) { @@ -359,7 +343,7 @@ private List getChildren(String path, Watcher watcher, Boolean watch) * @return List of children znodes */ public List getChildren(String path, boolean watch) - throws KeeperException, InterruptedException { + throws KeeperException, InterruptedException { return getChildren(path, null, watch); } @@ -368,12 +352,12 @@ public List getChildren(String path, boolean watch) * @return Data */ public byte[] getData(String path, Watcher watcher, Stat stat) - throws KeeperException, InterruptedException { + throws KeeperException, InterruptedException { return getData(path, watcher, null, stat); } private byte[] getData(String path, Watcher watcher, Boolean watch, Stat stat) - throws InterruptedException, KeeperException { + throws InterruptedException, KeeperException { Span span = TraceUtil.getGlobalTracer().spanBuilder("RecoverableZookeeper.getData").startSpan(); try (Scope scope = span.makeCurrent()) { RetryCounter retryCounter = retryCounterFactory.create(); @@ -410,18 +394,18 @@ private byte[] getData(String path, Watcher watcher, Boolean watch, Stat stat) * @return Data */ public byte[] getData(String path, boolean watch, Stat stat) - throws KeeperException, InterruptedException { + throws KeeperException, InterruptedException { return getData(path, null, watch, stat); } /** - * setData is NOT an idempotent operation. Retry may cause BadVersion Exception - * Adding an identifier field into the data to check whether - * badversion is caused by the result of previous correctly setData + * setData is NOT an idempotent operation. Retry may cause BadVersion Exception Adding an + * identifier field into the data to check whether badversion is caused by the result of previous + * correctly setData * @return Stat instance */ public Stat setData(String path, byte[] data, int version) - throws KeeperException, InterruptedException { + throws KeeperException, InterruptedException { Span span = TraceUtil.getGlobalTracer().spanBuilder("RecoverableZookeeper.setData").startSpan(); try (Scope scope = span.makeCurrent()) { RetryCounter retryCounter = retryCounterFactory.create(); @@ -440,19 +424,19 @@ public Stat setData(String path, byte[] data, int version) case BADVERSION: if (isRetry) { // try to verify whether the previous setData success or not - try{ + try { Stat stat = new Stat(); byte[] revData = checkZk().getData(path, false, stat); - if(Bytes.compareTo(revData, newData) == 0) { + if (Bytes.compareTo(revData, newData) == 0) { // the bad version is caused by previous successful setData return stat; } - } catch(KeeperException keeperException){ + } catch (KeeperException keeperException) { // the ZK is not reliable at this moment. just throwing exception throw keeperException; } } - // throw other exceptions and verified bad version exceptions + // throw other exceptions and verified bad version exceptions default: throw e; } @@ -500,7 +484,7 @@ public List getAcl(String path, Stat stat) throws KeeperException, Interrup * @return list of ACLs */ public Stat setAcl(String path, List acls, int version) - throws KeeperException, InterruptedException { + throws KeeperException, InterruptedException { Span span = TraceUtil.getGlobalTracer().spanBuilder("RecoverableZookeeper.setAcl").startSpan(); try (Scope scope = span.makeCurrent()) { RetryCounter retryCounter = retryCounterFactory.create(); @@ -527,21 +511,17 @@ public Stat setAcl(String path, List acls, int version) /** *

          - * NONSEQUENTIAL create is idempotent operation. - * Retry before throwing exceptions. - * But this function will not throw the NodeExist exception back to the - * application. + * NONSEQUENTIAL create is idempotent operation. Retry before throwing exceptions. But this + * function will not throw the NodeExist exception back to the application. *

          *

          - * But SEQUENTIAL is NOT idempotent operation. It is necessary to add - * identifier to the path to verify, whether the previous one is successful - * or not. + * But SEQUENTIAL is NOT idempotent operation. It is necessary to add identifier to the path to + * verify, whether the previous one is successful or not. *

          - * * @return Path */ public String create(String path, byte[] data, List acl, CreateMode createMode) - throws KeeperException, InterruptedException { + throws KeeperException, InterruptedException { Span span = TraceUtil.getGlobalTracer().spanBuilder("RecoverableZookeeper.create").startSpan(); try (Scope scope = span.makeCurrent()) { byte[] newData = ZKMetadata.appendMetaData(id, data); @@ -555,16 +535,15 @@ public String create(String path, byte[] data, List acl, CreateMode createM return createSequential(path, newData, acl, createMode); default: - throw new IllegalArgumentException("Unrecognized CreateMode: " + - createMode); + throw new IllegalArgumentException("Unrecognized CreateMode: " + createMode); } } finally { span.end(); } } - private String createNonSequential(String path, byte[] data, List acl, - CreateMode createMode) throws KeeperException, InterruptedException { + private String createNonSequential(String path, byte[] data, List acl, CreateMode createMode) + throws KeeperException, InterruptedException { RetryCounter retryCounter = retryCounterFactory.create(); boolean isRetry = false; // False for first attempt, true for all retries. while (true) { @@ -578,14 +557,12 @@ private String createNonSequential(String path, byte[] data, List acl, // we have successfully created the node at our previous attempt, // so we read the node and compare. byte[] currentData = checkZk().getData(path, false, null); - if (currentData != null && - Bytes.compareTo(currentData, data) == 0) { + if (currentData != null && Bytes.compareTo(currentData, data) == 0) { // We successfully created a non-sequential node return path; } - LOG.error("Node " + path + " already exists with " + - Bytes.toStringBinary(currentData) + ", could not write " + - Bytes.toStringBinary(data)); + LOG.error("Node " + path + " already exists with " + Bytes.toStringBinary(currentData) + + ", could not write " + Bytes.toStringBinary(data)); throw e; } LOG.trace("Node {} already exists", path); @@ -606,12 +583,11 @@ private String createNonSequential(String path, byte[] data, List acl, } } - private String createSequential(String path, byte[] data, - List acl, CreateMode createMode) - throws KeeperException, InterruptedException { + private String createSequential(String path, byte[] data, List acl, CreateMode createMode) + throws KeeperException, InterruptedException { RetryCounter retryCounter = retryCounterFactory.create(); boolean first = true; - String newPath = path+this.identifier; + String newPath = path + this.identifier; while (true) { try { if (!first) { @@ -638,28 +614,29 @@ private String createSequential(String path, byte[] data, retryCounter.sleepUntilNextRetry(); } } + /** - * Convert Iterable of {@link org.apache.zookeeper.Op} we got into the ZooKeeper.Op - * instances to actually pass to multi (need to do this in order to appendMetaData). + * Convert Iterable of {@link org.apache.zookeeper.Op} we got into the ZooKeeper.Op instances to + * actually pass to multi (need to do this in order to appendMetaData). */ private Iterable prepareZKMulti(Iterable ops) throws UnsupportedOperationException { - if(ops == null) { + if (ops == null) { return null; } List preparedOps = new LinkedList<>(); for (Op op : ops) { if (op.getType() == ZooDefs.OpCode.create) { - CreateRequest create = (CreateRequest)op.toRequestRecord(); + CreateRequest create = (CreateRequest) op.toRequestRecord(); preparedOps.add(Op.create(create.getPath(), ZKMetadata.appendMetaData(id, create.getData()), create.getAcl(), create.getFlags())); } else if (op.getType() == ZooDefs.OpCode.delete) { // no need to appendMetaData for delete preparedOps.add(op); } else if (op.getType() == ZooDefs.OpCode.setData) { - SetDataRequest setData = (SetDataRequest)op.toRequestRecord(); + SetDataRequest setData = (SetDataRequest) op.toRequestRecord(); preparedOps.add(Op.setData(setData.getPath(), - ZKMetadata.appendMetaData(id, setData.getData()), setData.getVersion())); + ZKMetadata.appendMetaData(id, setData.getData()), setData.getVersion())); } else { throw new UnsupportedOperationException("Unexpected ZKOp type: " + op.getClass().getName()); } @@ -698,11 +675,11 @@ public List multi(Iterable ops) throws KeeperException, Interrupte } private String findPreviousSequentialNode(String path) - throws KeeperException, InterruptedException { + throws KeeperException, InterruptedException { int lastSlashIdx = path.lastIndexOf('/'); - assert(lastSlashIdx != -1); + assert (lastSlashIdx != -1); String parent = path.substring(0, lastSlashIdx); - String nodePrefix = path.substring(lastSlashIdx+1); + String nodePrefix = path.substring(lastSlashIdx + 1); List nodes = checkZk().getChildren(parent, false); List matching = filterByPrefix(nodes, nodePrefix); for (String node : matching) { @@ -738,20 +715,17 @@ public void sync(String path, AsyncCallback.VoidCallback cb, Object ctx) throws } /** - * Filters the given node list by the given prefixes. - * This method is all-inclusive--if any element in the node list starts - * with any of the given prefixes, then it is included in the result. - * + * Filters the given node list by the given prefixes. This method is all-inclusive--if any element + * in the node list starts with any of the given prefixes, then it is included in the result. * @param nodes the nodes to filter * @param prefixes the prefixes to include in the result * @return list of every element that starts with one of the prefixes */ - private static List filterByPrefix(List nodes, - String... prefixes) { + private static List filterByPrefix(List nodes, String... prefixes) { List lockChildren = new ArrayList<>(); - for (String child : nodes){ - for (String prefix : prefixes){ - if (child.startsWith(prefix)){ + for (String child : nodes) { + for (String prefix : prefixes) { + if (child.startsWith(prefix)) { lockChildren.add(child); break; } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java index d69e75094b6b..863f26f59fdf 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; @@ -45,13 +44,13 @@ public RegionNormalizerTracker(ZKWatcher watcher, Abortable abortable) { * Return true if region normalizer is on, false otherwise */ public boolean isNormalizerOn() { - byte [] upData = super.getData(false); + byte[] upData = super.getData(false); try { // if data in ZK is null, use default of on. return upData == null || parseFrom(upData).getNormalizerOn(); } catch (DeserializationException dex) { - LOG.error("ZK state for RegionNormalizer could not be parsed " - + Bytes.toStringBinary(upData)); + LOG.error( + "ZK state for RegionNormalizer could not be parsed " + Bytes.toStringBinary(upData)); // return false to be safe. return false; } @@ -63,27 +62,27 @@ public boolean isNormalizerOn() { * @throws KeeperException if a ZooKeeper operation fails */ public void setNormalizerOn(boolean normalizerOn) throws KeeperException { - byte [] upData = toByteArray(normalizerOn); + byte[] upData = toByteArray(normalizerOn); try { ZKUtil.setData(watcher, watcher.getZNodePaths().regionNormalizerZNode, upData); - } catch(KeeperException.NoNodeException nne) { + } catch (KeeperException.NoNodeException nne) { ZKUtil.createAndWatch(watcher, watcher.getZNodePaths().regionNormalizerZNode, upData); } super.nodeDataChanged(watcher.getZNodePaths().regionNormalizerZNode); } - private byte [] toByteArray(boolean isNormalizerOn) { + private byte[] toByteArray(boolean isNormalizerOn) { RegionNormalizerProtos.RegionNormalizerState.Builder builder = - RegionNormalizerProtos.RegionNormalizerState.newBuilder(); + RegionNormalizerProtos.RegionNormalizerState.newBuilder(); builder.setNormalizerOn(isNormalizerOn); return ProtobufUtil.prependPBMagic(builder.build().toByteArray()); } - private RegionNormalizerProtos.RegionNormalizerState parseFrom(byte [] pbBytes) - throws DeserializationException { + private RegionNormalizerProtos.RegionNormalizerState parseFrom(byte[] pbBytes) + throws DeserializationException { ProtobufUtil.expectPBMagicPrefix(pbBytes); RegionNormalizerProtos.RegionNormalizerState.Builder builder = - RegionNormalizerProtos.RegionNormalizerState.newBuilder(); + RegionNormalizerProtos.RegionNormalizerState.newBuilder(); try { int magicLen = ProtobufUtil.lengthOfPBMagic(); ProtobufUtil.mergeFrom(builder, pbBytes, magicLen, pbBytes.length - magicLen); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/SnapshotCleanupTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/SnapshotCleanupTracker.java index 433c7ab34013..248957558fce 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/SnapshotCleanupTracker.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/SnapshotCleanupTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; - import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.util.Bytes; @@ -37,11 +35,10 @@ public class SnapshotCleanupTracker extends ZKNodeTracker { /** * Constructs a new ZK node tracker. - * - *

          After construction, use {@link #start} to kick off tracking. - * + *

          + * After construction, use {@link #start} to kick off tracking. * @param watcher reference to the {@link ZKWatcher} which also contains configuration and - * constants + * constants * @param abortable used to abort if a fatal error occurs */ public SnapshotCleanupTracker(ZKWatcher watcher, Abortable abortable) { @@ -50,19 +47,19 @@ public SnapshotCleanupTracker(ZKWatcher watcher, Abortable abortable) { /** * Returns the current state of the snapshot auto cleanup based on TTL - * - * @return true if the snapshot auto cleanup is enabled, - * false otherwise. + * @return true if the snapshot auto cleanup is enabled, false + * otherwise. */ public boolean isSnapshotCleanupEnabled() { byte[] snapshotCleanupZNodeData = super.getData(false); try { // if data in ZK is null, use default of on. - return snapshotCleanupZNodeData == null || - parseFrom(snapshotCleanupZNodeData).getSnapshotCleanupEnabled(); + return snapshotCleanupZNodeData == null + || parseFrom(snapshotCleanupZNodeData).getSnapshotCleanupEnabled(); } catch (DeserializationException dex) { - LOG.error("ZK state for Snapshot Cleanup could not be parsed " + - Bytes.toStringBinary(snapshotCleanupZNodeData), dex); + LOG.error("ZK state for Snapshot Cleanup could not be parsed " + + Bytes.toStringBinary(snapshotCleanupZNodeData), + dex); // return false to be safe. return false; } @@ -70,20 +67,18 @@ public boolean isSnapshotCleanupEnabled() { /** * Set snapshot auto clean on/off - * - * @param snapshotCleanupEnabled true if the snapshot auto cleanup should be on, - * false otherwise + * @param snapshotCleanupEnabled true if the snapshot auto cleanup should be on, false otherwise * @throws KeeperException if ZooKeeper operation fails */ public void setSnapshotCleanupEnabled(final boolean snapshotCleanupEnabled) throws KeeperException { - byte [] snapshotCleanupZNodeData = toByteArray(snapshotCleanupEnabled); + byte[] snapshotCleanupZNodeData = toByteArray(snapshotCleanupEnabled); try { ZKUtil.setData(watcher, watcher.getZNodePaths().snapshotCleanupZNode, - snapshotCleanupZNodeData); - } catch(KeeperException.NoNodeException nne) { + snapshotCleanupZNodeData); + } catch (KeeperException.NoNodeException nne) { ZKUtil.createAndWatch(watcher, watcher.getZNodePaths().snapshotCleanupZNode, - snapshotCleanupZNodeData); + snapshotCleanupZNodeData); } super.nodeDataChanged(watcher.getZNodePaths().snapshotCleanupZNode); } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAclReset.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAclReset.java index 81d3fc76432a..72453bd546ed 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAclReset.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAclReset.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import java.util.List; @@ -32,24 +30,20 @@ import org.slf4j.LoggerFactory; /** - * You may add the jaas.conf option - * -Djava.security.auth.login.config=/PATH/jaas.conf - * - * You may also specify -D to set options - * "hbase.zookeeper.quorum" (it should be in hbase-site.xml) - * "zookeeper.znode.parent" (it should be in hbase-site.xml) - * - * Use -set-acls to set the ACLs, no option to erase ACLs + * You may add the jaas.conf option -Djava.security.auth.login.config=/PATH/jaas.conf You may also + * specify -D to set options "hbase.zookeeper.quorum" (it should be in hbase-site.xml) + * "zookeeper.znode.parent" (it should be in hbase-site.xml) Use -set-acls to set the ACLs, no + * option to erase ACLs */ @InterfaceAudience.Private public class ZKAclReset extends Configured implements Tool { private static final Logger LOG = LoggerFactory.getLogger(ZKAclReset.class); - private static void resetAcls(final ZKWatcher zkw, final String znode, - final boolean eraseAcls) throws Exception { + private static void resetAcls(final ZKWatcher zkw, final String znode, final boolean eraseAcls) + throws Exception { List children = ZKUtil.listChildrenNoWatch(zkw, znode); if (children != null) { - for (String child: children) { + for (String child : children) { resetAcls(zkw, ZNodePaths.joinZNode(znode, child), eraseAcls); } } @@ -64,8 +58,7 @@ private static void resetAcls(final ZKWatcher zkw, final String znode, } } - private static void resetAcls(final Configuration conf, boolean eraseAcls) - throws Exception { + private static void resetAcls(final Configuration conf, boolean eraseAcls) throws Exception { try (ZKWatcher zkw = new ZKWatcher(conf, "ZKAclReset", null)) { LOG.info((eraseAcls ? "Erase" : "Set") + " HBase ACLs for {} {}", zkw.getQuorum(), zkw.getZNodePaths().baseZNode); @@ -110,7 +103,7 @@ public int run(String[] args) throws Exception { } resetAcls(getConf(), eraseAcls); - return(0); + return (0); } public static void main(String[] args) throws Exception { diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAuthentication.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAuthentication.java index 7f04490fd4de..b930d9a1d203 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAuthentication.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAuthentication.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; @@ -39,58 +38,54 @@ public final class ZKAuthentication { private static final Logger LOG = LoggerFactory.getLogger(ZKAuthentication.class); - private ZKAuthentication() {} + private ZKAuthentication() { + } /** - * Log in the current zookeeper server process using the given configuration - * keys for the credential file and login principal. - * - *

          This is only applicable when running on secure hbase - * On regular HBase (without security features), this will safely be ignored. + * Log in the current zookeeper server process using the given configuration keys for the + * credential file and login principal. + *

          + * This is only applicable when running on secure hbase On regular HBase (without + * security features), this will safely be ignored. *

          - * * @param conf The configuration data to use * @param keytabFileKey Property key used to configure the path to the credential file * @param userNameKey Property key used to configure the login principal * @param hostname Current hostname to use in any credentials * @throws IOException underlying exception from SecurityUtil.login() call */ - public static void loginServer(Configuration conf, String keytabFileKey, - String userNameKey, String hostname) throws IOException { - login(conf, keytabFileKey, userNameKey, hostname, - ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY, - JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME); + public static void loginServer(Configuration conf, String keytabFileKey, String userNameKey, + String hostname) throws IOException { + login(conf, keytabFileKey, userNameKey, hostname, ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY, + JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME); } /** - * Log in the current zookeeper client using the given configuration - * keys for the credential file and login principal. - * - *

          This is only applicable when running on secure hbase - * On regular HBase (without security features), this will safely be ignored. + * Log in the current zookeeper client using the given configuration keys for the credential file + * and login principal. + *

          + * This is only applicable when running on secure hbase On regular HBase (without + * security features), this will safely be ignored. *

          - * * @param conf The configuration data to use * @param keytabFileKey Property key used to configure the path to the credential file * @param userNameKey Property key used to configure the login principal * @param hostname Current hostname to use in any credentials * @throws IOException underlying exception from SecurityUtil.login() call */ - public static void loginClient(Configuration conf, String keytabFileKey, - String userNameKey, String hostname) throws IOException { - login(conf, keytabFileKey, userNameKey, hostname, - ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, - JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME); + public static void loginClient(Configuration conf, String keytabFileKey, String userNameKey, + String hostname) throws IOException { + login(conf, keytabFileKey, userNameKey, hostname, ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, + JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME); } /** - * Log in the current process using the given configuration keys for the - * credential file and login principal. - * - *

          This is only applicable when running on secure hbase - * On regular HBase (without security features), this will safely be ignored. + * Log in the current process using the given configuration keys for the credential file and login + * principal. + *

          + * This is only applicable when running on secure hbase On regular HBase (without + * security features), this will safely be ignored. *

          - * * @param conf The configuration data to use * @param keytabFileKey Property key used to configure the path to the credential file * @param userNameKey Property key used to configure the login principal @@ -99,10 +94,8 @@ public static void loginClient(Configuration conf, String keytabFileKey, * @param loginContextName jaas entry name * @throws IOException underlying exception from SecurityUtil.login() call */ - private static void login(Configuration conf, String keytabFileKey, - String userNameKey, String hostname, - String loginContextProperty, String loginContextName) - throws IOException { + private static void login(Configuration conf, String keytabFileKey, String userNameKey, + String hostname, String loginContextProperty, String loginContextName) throws IOException { if (!isSecureZooKeeper(conf)) { return; } @@ -126,16 +119,15 @@ private static void login(Configuration conf, String keytabFileKey, // Initialize the "jaas.conf" for keyTab/principal, // If keyTab is not specified use the Ticket Cache. // and set the zookeeper login context name. - JaasConfiguration jaasConf = new JaasConfiguration(loginContextName, - principalName, keytabFilename); + JaasConfiguration jaasConf = + new JaasConfiguration(loginContextName, principalName, keytabFilename); javax.security.auth.login.Configuration.setConfiguration(jaasConf); System.setProperty(loginContextProperty, loginContextName); } /** - * Returns {@code true} when secure authentication is enabled - * (whether {@code hbase.security.authentication} is set to - * "{@code kerberos}"). + * Returns {@code true} when secure authentication is enabled (whether + * {@code hbase.security.authentication} is set to "{@code kerberos}"). */ public static boolean isSecureZooKeeper(Configuration conf) { // Detection for embedded HBase client with jaas configuration @@ -147,13 +139,13 @@ public static boolean isSecureZooKeeper(Configuration conf) { && testConfig.getAppConfigurationEntry( JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME) == null && testConfig.getAppConfigurationEntry( - JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME) == null + JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME) == null && conf.get(HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL) == null && conf.get(HConstants.ZK_SERVER_KERBEROS_PRINCIPAL) == null) { return false; } - } catch(Exception e) { + } catch (Exception e) { // No Jaas configuration defined. return false; } @@ -169,9 +161,9 @@ private static class JaasConfiguration extends javax.security.auth.login.Configu private static final Logger LOG = LoggerFactory.getLogger(JaasConfiguration.class); public static final String SERVER_KEYTAB_KERBEROS_CONFIG_NAME = - "zookeeper-server-keytab-kerberos"; + "zookeeper-server-keytab-kerberos"; public static final String CLIENT_KEYTAB_KERBEROS_CONFIG_NAME = - "zookeeper-client-keytab-kerberos"; + "zookeeper-client-keytab-kerberos"; private static final Map BASIC_JAAS_OPTIONS = new HashMap<>(); @@ -192,11 +184,11 @@ private static class JaasConfiguration extends javax.security.auth.login.Configu } private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN = - new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(), - AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, KEYTAB_KERBEROS_OPTIONS); + new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(), + AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, KEYTAB_KERBEROS_OPTIONS); private static final AppConfigurationEntry[] KEYTAB_KERBEROS_CONF = - new AppConfigurationEntry[] { KEYTAB_KERBEROS_LOGIN }; + new AppConfigurationEntry[] { KEYTAB_KERBEROS_LOGIN }; private javax.security.auth.login.Configuration baseConfig; private final String loginContextName; @@ -209,7 +201,7 @@ public JaasConfiguration(String loginContextName, String principal, String keyta } private JaasConfiguration(String loginContextName, String principal, String keytabFile, - boolean useTicketCache) { + boolean useTicketCache) { try { this.baseConfig = javax.security.auth.login.Configuration.getConfiguration(); } catch (SecurityException e) { @@ -219,12 +211,12 @@ private JaasConfiguration(String loginContextName, String principal, String keyt this.useTicketCache = useTicketCache; this.keytabFile = keytabFile; this.principal = principal; - LOG.info( - "JaasConfiguration loginContextName={} principal={} useTicketCache={} keytabFile={}", + LOG.info("JaasConfiguration loginContextName={} principal={} useTicketCache={} keytabFile={}", loginContextName, principal, useTicketCache, keytabFile); } - @Override public AppConfigurationEntry[] getAppConfigurationEntry(String appName) { + @Override + public AppConfigurationEntry[] getAppConfigurationEntry(String appName) { if (loginContextName.equals(appName)) { if (!useTicketCache) { KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java index 8bc204414f5c..71d8f87bbc2b 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import java.util.UUID; - import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -28,10 +25,9 @@ import org.apache.zookeeper.KeeperException; /** - * Publishes and synchronizes a unique identifier specific to a given HBase - * cluster. The stored identifier is read from the file system by the active - * master on startup, and is subsequently available to all watchers (including - * clients). + * Publishes and synchronizes a unique identifier specific to a given HBase cluster. The stored + * identifier is read from the file system by the active master on startup, and is subsequently + * available to all watchers (including clients). */ @InterfaceAudience.Private public class ZKClusterId { @@ -54,16 +50,14 @@ public String getId() { id = readClusterIdZNode(watcher); } } catch (KeeperException ke) { - abortable.abort("Unexpected exception from ZooKeeper reading cluster ID", - ke); + abortable.abort("Unexpected exception from ZooKeeper reading cluster ID", ke); } return id; } - public static String readClusterIdZNode(ZKWatcher watcher) - throws KeeperException { + public static String readClusterIdZNode(ZKWatcher watcher) throws KeeperException { if (ZKUtil.checkExists(watcher, watcher.getZNodePaths().clusterIdZNode) != -1) { - byte [] data; + byte[] data; try { data = ZKUtil.getData(watcher, watcher.getZNodePaths().clusterIdZNode); } catch (InterruptedException e) { @@ -81,8 +75,7 @@ public static String readClusterIdZNode(ZKWatcher watcher) return null; } - public static void setClusterId(ZKWatcher watcher, ClusterId id) - throws KeeperException { + public static void setClusterId(ZKWatcher watcher, ClusterId id) throws KeeperException { ZKUtil.createSetData(watcher, watcher.getZNodePaths().clusterIdZNode, id.toByteArray()); } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java index 4d93d143a90e..150c8983d219 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import java.io.BufferedReader; @@ -38,7 +37,9 @@ import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; @@ -50,11 +51,12 @@ public final class ZKDump { private static final Logger LOG = LoggerFactory.getLogger(ZKDump.class); - private ZKDump() {} + private ZKDump() { + } public static String dump(final ZKWatcher zkWatcher) { - final int zkDumpConnectionTimeOut = zkWatcher.getConfiguration() - .getInt("zookeeper.dump.connection.timeout", 1000); + final int zkDumpConnectionTimeOut = + zkWatcher.getConfiguration().getInt("zookeeper.dump.connection.timeout", 1000); StringBuilder sb = new StringBuilder(); try { sb.append("HBase is rooted at ").append(zkWatcher.getZNodePaths().baseZNode); @@ -76,13 +78,12 @@ public static String dump(final ZKWatcher zkWatcher) { sb.append("\n ").append(MetaTableLocator.getMetaRegionLocation(zkWatcher)); int numMetaReplicas = zkWatcher.getMetaReplicaNodes().size(); for (int i = 1; i < numMetaReplicas; i++) { - sb.append("\n") - .append(" replica").append(i).append(": ") - .append(MetaTableLocator.getMetaRegionLocation(zkWatcher, i)); + sb.append("\n").append(" replica").append(i).append(": ") + .append(MetaTableLocator.getMetaRegionLocation(zkWatcher, i)); } sb.append("\nRegion servers:"); final List rsChildrenNoWatchList = - ZKUtil.listChildrenNoWatch(zkWatcher, zkWatcher.getZNodePaths().rsZNode); + ZKUtil.listChildrenNoWatch(zkWatcher, zkWatcher.getZNodePaths().rsZNode); if (rsChildrenNoWatchList != null) { for (String child : rsChildrenNoWatchList) { sb.append("\n ").append(child); @@ -121,13 +122,12 @@ public static String dump(final ZKWatcher zkWatcher) { /** * Appends replication znodes to the passed StringBuilder. - * * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation * @param sb the {@link StringBuilder} to append to * @throws KeeperException if a ZooKeeper operation fails */ private static void getReplicationZnodesDump(ZKWatcher zkw, StringBuilder sb) - throws KeeperException { + throws KeeperException { String replicationZnode = zkw.getZNodePaths().replicationZNode; if (ZKUtil.checkExists(zkw, replicationZnode) == -1) { @@ -152,11 +152,11 @@ private static void getReplicationZnodesDump(ZKWatcher zkw, StringBuilder sb) } } - private static void appendHFileRefsZNodes(ZKWatcher zkw, String hFileRefsZNode, - StringBuilder sb) throws KeeperException { + private static void appendHFileRefsZNodes(ZKWatcher zkw, String hFileRefsZNode, StringBuilder sb) + throws KeeperException { sb.append("\n").append(hFileRefsZNode).append(": "); final List hFileRefChildrenNoWatchList = - ZKUtil.listChildrenNoWatch(zkw, hFileRefsZNode); + ZKUtil.listChildrenNoWatch(zkw, hFileRefsZNode); if (hFileRefChildrenNoWatchList != null) { for (String peerIdZNode : hFileRefChildrenNoWatchList) { String zNodeToProcess = ZNodePaths.joinZNode(hFileRefsZNode, peerIdZNode); @@ -181,7 +181,7 @@ public static String getReplicationZnodesDump(ZKWatcher zkw) throws KeeperExcept } private static void appendRSZnodes(ZKWatcher zkw, String znode, StringBuilder sb) - throws KeeperException { + throws KeeperException { List stack = new LinkedList<>(); stack.add(znode); do { @@ -211,8 +211,8 @@ private static void appendRSZnodes(ZKWatcher zkw, String znode, StringBuilder sb } while (stack.size() > 0); } - private static void appendPeersZnodes(ZKWatcher zkw, String peersZnode, - StringBuilder sb) throws KeeperException { + private static void appendPeersZnodes(ZKWatcher zkw, String peersZnode, StringBuilder sb) + throws KeeperException { int pblen = ProtobufUtil.lengthOfPBMagic(); sb.append("\n").append(peersZnode).append(": "); for (String peerIdZnode : ZKUtil.listChildrenNoWatch(zkw, peersZnode)) { @@ -227,7 +227,7 @@ private static void appendPeersZnodes(ZKWatcher zkw, String peersZnode, // parse the data of the above peer znode. try { ReplicationProtos.ReplicationPeer.Builder builder = - ReplicationProtos.ReplicationPeer.newBuilder(); + ReplicationProtos.ReplicationPeer.newBuilder(); ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen); String clusterKey = builder.getClusterkey(); sb.append("\n").append(znodeToProcess).append(": ").append(clusterKey); @@ -240,9 +240,9 @@ private static void appendPeersZnodes(ZKWatcher zkw, String peersZnode, } private static void appendPeerState(ZKWatcher zkw, String znodeToProcess, StringBuilder sb) - throws KeeperException, InvalidProtocolBufferException { - String peerState = zkw.getConfiguration().get("zookeeper.znode.replication.peers.state", - "peer-state"); + throws KeeperException, InvalidProtocolBufferException { + String peerState = + zkw.getConfiguration().get("zookeeper.znode.replication.peers.state", "peer-state"); int pblen = ProtobufUtil.lengthOfPBMagic(); for (String child : ZKUtil.listChildrenNoWatch(zkw, znodeToProcess)) { if (!child.equals(peerState)) { @@ -255,7 +255,7 @@ private static void appendPeerState(ZKWatcher zkw, String znodeToProcess, String try { peerStateData = ZKUtil.getData(zkw, peerStateZnode); ReplicationProtos.ReplicationState.Builder builder = - ReplicationProtos.ReplicationState.newBuilder(); + ReplicationProtos.ReplicationState.newBuilder(); ProtobufUtil.mergeFrom(builder, peerStateData, pblen, peerStateData.length - pblen); sb.append(builder.getState().name()); } catch (IOException ipbe) { @@ -269,22 +269,19 @@ private static void appendPeerState(ZKWatcher zkw, String znodeToProcess, String /** * Gets the statistics from the given server. - * - * @param server The server to get the statistics from. - * @param timeout The socket timeout to use. + * @param server The server to get the statistics from. + * @param timeout The socket timeout to use. * @return The array of response strings. * @throws IOException When the socket communication fails. */ - private static String[] getServerStats(String server, int timeout) - throws IOException { + private static String[] getServerStats(String server, int timeout) throws IOException { String[] sp = server.split(":"); if (sp.length == 0) { return null; } String host = sp[0]; - int port = sp.length > 1 ? Integer.parseInt(sp[1]) - : HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT; + int port = sp.length > 1 ? Integer.parseInt(sp[1]) : HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT; try (Socket socket = new Socket()) { InetSocketAddress sockAddr = new InetSocketAddress(host, port); @@ -293,10 +290,14 @@ private static String[] getServerStats(String server, int timeout) } socket.connect(sockAddr, timeout); socket.setSoTimeout(timeout); - try (PrintWriter out = new PrintWriter(new BufferedWriter( - new OutputStreamWriter(socket.getOutputStream(), StandardCharsets.UTF_8)), true); - BufferedReader in = new BufferedReader( - new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8))) { + try ( + PrintWriter out = + new PrintWriter( + new BufferedWriter( + new OutputStreamWriter(socket.getOutputStream(), StandardCharsets.UTF_8)), + true); + BufferedReader in = new BufferedReader( + new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8))) { out.println("stat"); out.flush(); ArrayList res = new ArrayList<>(); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java index ed2f97797473..8ab4026c2df7 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -28,14 +26,12 @@ import org.slf4j.LoggerFactory; /** - * Handles coordination of a single "leader" instance among many possible - * candidates. The first {@link ZKLeaderManager} to successfully create - * the given znode becomes the leader, allowing the instance to continue - * with whatever processing must be protected. Other {@link ZKLeaderManager} - * instances will wait to be notified of changes to the leader znode. - * If the current master instance fails, the ephemeral leader znode will - * be removed, and all waiting instances will be notified, with the race - * to claim the leader znode beginning all over again. + * Handles coordination of a single "leader" instance among many possible candidates. The first + * {@link ZKLeaderManager} to successfully create the given znode becomes the leader, allowing the + * instance to continue with whatever processing must be protected. Other {@link ZKLeaderManager} + * instances will wait to be notified of changes to the leader znode. If the current master instance + * fails, the ephemeral leader znode will be removed, and all waiting instances will be notified, + * with the race to claim the leader znode beginning all over again. * @deprecated Not used */ @Deprecated @@ -49,8 +45,8 @@ public class ZKLeaderManager extends ZKListener { private final byte[] nodeId; private final Stoppable candidate; - public ZKLeaderManager(ZKWatcher watcher, String leaderZNode, - byte[] identifier, Stoppable candidate) { + public ZKLeaderManager(ZKWatcher watcher, String leaderZNode, byte[] identifier, + Stoppable candidate) { super(watcher); this.leaderZNode = leaderZNode; this.nodeId = identifier; @@ -66,7 +62,7 @@ public void start() { } } catch (KeeperException ke) { watcher.abort("Unhandled zk exception when starting", ke); - candidate.stop("Unhandled zk exception starting up: "+ke.getMessage()); + candidate.stop("Unhandled zk exception starting up: " + ke.getMessage()); } } @@ -86,7 +82,7 @@ public void nodeDeleted(String path) { private void handleLeaderChange() { try { - synchronized(lock) { + synchronized (lock) { if (ZKUtil.watchAndCheckExists(watcher, leaderZNode)) { LOG.info("Found new leader for znode: {}", leaderZNode); leaderExists.set(true); @@ -98,7 +94,7 @@ private void handleLeaderChange() { } } catch (KeeperException ke) { watcher.abort("ZooKeeper error checking for leader znode", ke); - candidate.stop("ZooKeeper error checking for leader: "+ke.getMessage()); + candidate.stop("ZooKeeper error checking for leader: " + ke.getMessage()); } } @@ -112,8 +108,7 @@ public void waitToBecomeLeader() { // claimed the leader znode leaderExists.set(true); if (LOG.isDebugEnabled()) { - LOG.debug("Claimed the leader znode as '"+ - Bytes.toStringBinary(nodeId)+"'"); + LOG.debug("Claimed the leader znode as '" + Bytes.toStringBinary(nodeId) + "'"); } return; } @@ -122,8 +117,8 @@ public void waitToBecomeLeader() { byte[] currentId = ZKUtil.getDataAndWatch(watcher, leaderZNode); if (currentId != null && Bytes.equals(currentId, nodeId)) { // claimed with our ID, but we didn't grab it, possibly restarted? - LOG.info("Found existing leader with our ID ("+ - Bytes.toStringBinary(nodeId)+"), removing"); + LOG.info( + "Found existing leader with our ID (" + Bytes.toStringBinary(nodeId) + "), removing"); ZKUtil.deleteNode(watcher, leaderZNode); leaderExists.set(false); } else { @@ -132,12 +127,12 @@ public void waitToBecomeLeader() { } } catch (KeeperException ke) { watcher.abort("Unexpected error from ZK, stopping candidate", ke); - candidate.stop("Unexpected error from ZK: "+ke.getMessage()); + candidate.stop("Unexpected error from ZK: " + ke.getMessage()); return; } // wait for next chance - synchronized(lock) { + synchronized (lock) { while (leaderExists.get() && !candidate.isStopped()) { try { lock.wait(); @@ -154,7 +149,7 @@ public void waitToBecomeLeader() { */ public void stepDownAsLeader() { try { - synchronized(lock) { + synchronized (lock) { if (!leaderExists.get()) { return; } @@ -169,12 +164,10 @@ public void stepDownAsLeader() { } } catch (KeeperException ke) { watcher.abort("Unhandled zookeeper exception removing leader node", ke); - candidate.stop("Unhandled zookeeper exception removing leader node: " - + ke.getMessage()); + candidate.stop("Unhandled zookeeper exception removing leader node: " + ke.getMessage()); } catch (InterruptedException e) { watcher.abort("Unhandled zookeeper exception removing leader node", e); - candidate.stop("Unhandled zookeeper exception removing leader node: " - + e.getMessage()); + candidate.stop("Unhandled zookeeper exception removing leader node: " + e.getMessage()); } } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKListener.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKListener.java index 595e71304261..f42781a8caad 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKListener.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKListener.java @@ -20,16 +20,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Base class for internal listeners of ZooKeeper events. - * - * The {@link ZKWatcher} for a process will execute the appropriate - * methods of implementations of this class. In order to receive events from + * Base class for internal listeners of ZooKeeper events. The {@link ZKWatcher} for a process will + * execute the appropriate methods of implementations of this class. In order to receive events from * the watcher, every listener must register itself via {@link ZKWatcher#registerListener}. - * - * Subclasses need only override those methods in which they are interested. - * - * Note that the watcher will be blocked when invoking methods in listeners so - * they must not be long-running. + * Subclasses need only override those methods in which they are interested. Note that the watcher + * will be blocked when invoking methods in listeners so they must not be long-running. */ @InterfaceAudience.Private public abstract class ZKListener { diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java index d20b0eb58861..701980a20b24 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -27,10 +25,9 @@ import org.apache.zookeeper.ZooKeeperMain; import org.apache.zookeeper.cli.CliException; - /** - * Tool for running ZookeeperMain from HBase by reading a ZooKeeper server - * from HBase XML configuration. + * Tool for running ZookeeperMain from HBase by reading a ZooKeeper server from HBase XML + * configuration. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class ZKMainServer { @@ -41,12 +38,12 @@ public String parse(final Configuration c) { } /** - * ZooKeeper 3.4.6 broke being able to pass commands on command line. - * See ZOOKEEPER-1897. This class is a hack to restore this faclity. + * ZooKeeper 3.4.6 broke being able to pass commands on command line. See ZOOKEEPER-1897. This + * class is a hack to restore this faclity. */ private static class HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain extends ZooKeeperMain { public HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(String[] args) - throws IOException, InterruptedException { + throws IOException, InterruptedException { super(args); // Make sure we are connected before we proceed. Can take a while on some systems. If we // run the command without being connected, we get ConnectionLoss KeeperErrorConnection... @@ -56,7 +53,7 @@ public HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(String[] args) } /** - * Run the command-line args passed. Calls System.exit when done. + * Run the command-line args passed. Calls System.exit when done. * @throws IOException in case of a network failure * @throws InterruptedException if the ZooKeeper client closes * @throws CliException if the ZooKeeper exception happens in cli command @@ -96,7 +93,7 @@ private static boolean hasCommandLineArguments(final String[] args) { * @param args Command line arguments. First arg is path to zookeepers file. */ public static void main(String[] args) throws Exception { - String [] newArgs = args; + String[] newArgs = args; if (!hasServer(args)) { // Add the zk ensemble from configuration if none passed on command-line. Configuration conf = HBaseConfiguration.create(); @@ -113,7 +110,7 @@ public static void main(String[] args) throws Exception { // 3.4.6 breaks command-processing; TODO. if (hasCommandLineArguments(args)) { HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain zkm = - new HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(newArgs); + new HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(newArgs); zkm.runCmdLine(); } else { ZooKeeperMain.main(newArgs); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodeTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodeTracker.java index 63de74cf914d..5e296a025523 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodeTracker.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodeTracker.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,12 +26,11 @@ /** * Tracks the availability and value of a single ZooKeeper node. - * - *

          Utilizes the {@link ZKListener} interface to get the necessary - * ZooKeeper events related to the node. - * - *

          This is the base class used by trackers in both the Master and - * RegionServers. + *

          + * Utilizes the {@link ZKListener} interface to get the necessary ZooKeeper events related to the + * node. + *

          + * This is the base class used by trackers in both the Master and RegionServers. */ @InterfaceAudience.Private public abstract class ZKNodeTracker extends ZKListener { @@ -74,9 +72,9 @@ public ZKNodeTracker(ZKWatcher watcher, String node, Abortable abortable) { public synchronized void start() { this.watcher.registerListener(this); try { - if(ZKUtil.watchAndCheckExists(watcher, node)) { - byte [] data = ZKUtil.getDataAndWatch(watcher, node); - if(data != null) { + if (ZKUtil.watchAndCheckExists(watcher, node)) { + byte[] data = ZKUtil.getDataAndWatch(watcher, node); + if (data != null) { this.data = data; } else { // It existed but now does not, try again to ensure a watch is set @@ -103,26 +101,23 @@ public synchronized void stop() { /** * Gets the data of the node, blocking until the node is available. - * * @return data of the node * @throws InterruptedException if the waiting thread is interrupted */ - public synchronized byte [] blockUntilAvailable() - throws InterruptedException { + public synchronized byte[] blockUntilAvailable() throws InterruptedException { return blockUntilAvailable(0, false); } /** - * Gets the data of the node, blocking until the node is available or the - * specified timeout has elapsed. - * + * Gets the data of the node, blocking until the node is available or the specified timeout has + * elapsed. * @param timeout maximum time to wait for the node data to be available, n milliseconds. Pass 0 - * for no timeout. + * for no timeout. * @return data of the node * @throws InterruptedException if the waiting thread is interrupted */ - public synchronized byte [] blockUntilAvailable(long timeout, boolean refresh) - throws InterruptedException { + public synchronized byte[] blockUntilAvailable(long timeout, boolean refresh) + throws InterruptedException { if (timeout < 0) { throw new IllegalArgumentException(); } @@ -134,25 +129,26 @@ public synchronized void stop() { try { // This does not create a watch if the node does not exists this.data = ZKUtil.getDataAndWatch(watcher, node); - } catch(KeeperException e) { + } catch (KeeperException e) { // We use to abort here, but in some cases the abort is ignored ( - // (empty Abortable), so it's better to log... + // (empty Abortable), so it's better to log... LOG.warn("Unexpected exception handling blockUntilAvailable", e); abortable.abort("Unexpected exception handling blockUntilAvailable", e); } } - boolean nodeExistsChecked = (!refresh ||data!=null); + boolean nodeExistsChecked = (!refresh || data != null); while (!this.stopped && (notimeout || remaining > 0) && this.data == null) { if (!nodeExistsChecked) { try { nodeExistsChecked = (ZKUtil.checkExists(watcher, node) != -1); } catch (KeeperException e) { - LOG.warn("Got exception while trying to check existence in ZooKeeper" + - " of the node: " + node + ", retrying if timeout not reached", e); + LOG.warn("Got exception while trying to check existence in ZooKeeper" + " of the node: " + + node + ", retrying if timeout not reached", + e); } // It did not exists, and now it does. - if (nodeExistsChecked){ + if (nodeExistsChecked) { LOG.debug("Node {} now exists, resetting a watcher", node); try { // This does not create a watch if the node does not exists @@ -164,7 +160,7 @@ public synchronized void stop() { } } // We expect a notification; but we wait with a - // a timeout to lower the impact of a race condition if any + // a timeout to lower the impact of a race condition if any wait(100); remaining = timeout - (EnvironmentEdgeManager.currentTime() - startTime); } @@ -173,18 +169,17 @@ public synchronized void stop() { /** * Gets the data of the node. - * - *

          If the node is currently available, the most up-to-date known version of - * the data is returned. If the node is not currently available, null is - * returned. + *

          + * If the node is currently available, the most up-to-date known version of the data is returned. + * If the node is not currently available, null is returned. * @param refresh whether to refresh the data by calling ZK directly. * @return data of the node, null if unavailable */ - public synchronized byte [] getData(boolean refresh) { + public synchronized byte[] getData(boolean refresh) { if (refresh) { try { this.data = ZKUtil.getDataAndWatch(watcher, node); - } catch(KeeperException e) { + } catch (KeeperException e) { abortable.abort("Unexpected exception handling getData", e); } } @@ -202,28 +197,28 @@ public synchronized void nodeCreated(String path) { } try { - byte [] data = ZKUtil.getDataAndWatch(watcher, node); + byte[] data = ZKUtil.getDataAndWatch(watcher, node); if (data != null) { this.data = data; notifyAll(); } else { nodeDeleted(path); } - } catch(KeeperException e) { + } catch (KeeperException e) { abortable.abort("Unexpected exception handling nodeCreated event", e); } } @Override public synchronized void nodeDeleted(String path) { - if(path.equals(node)) { + if (path.equals(node)) { try { - if(ZKUtil.watchAndCheckExists(watcher, node)) { + if (ZKUtil.watchAndCheckExists(watcher, node)) { nodeCreated(path); } else { this.data = null; } - } catch(KeeperException e) { + } catch (KeeperException e) { abortable.abort("Unexpected exception handling nodeDeleted event", e); } } @@ -231,16 +226,14 @@ public synchronized void nodeDeleted(String path) { @Override public synchronized void nodeDataChanged(String path) { - if(path.equals(node)) { + if (path.equals(node)) { nodeCreated(path); } } /** - * Checks if the baseznode set as per the property 'zookeeper.znode.parent' - * exists. - * @return true if baseznode exists. - * false if doesnot exists. + * Checks if the baseznode set as per the property 'zookeeper.znode.parent' exists. + * @return true if baseznode exists. false if doesnot exists. */ public boolean checkIfBaseNodeAvailable() { try { @@ -257,7 +250,6 @@ public boolean checkIfBaseNodeAvailable() { @Override public String toString() { - return "ZKNodeTracker{" + - "node='" + node + ", stopped=" + stopped + '}'; + return "ZKNodeTracker{" + "node='" + node + ", stopped=" + stopped + '}'; } } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java index 0db6205c693a..9e69f4f05164 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import java.util.LinkedList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -30,8 +27,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Tool for reading ZooKeeper servers from HBase XML configuration and producing - * a line-by-line list for use by bash scripts. + * Tool for reading ZooKeeper servers from HBase XML configuration and producing a line-by-line list + * for use by bash scripts. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public final class ZKServerTool { @@ -60,7 +57,7 @@ public static ServerName[] readZKNodes(Configuration conf) { * @param args Command line arguments. */ public static void main(String[] args) { - for(ServerName server: readZKNodes(HBaseConfiguration.create())) { + for (ServerName server : readZKNodes(HBaseConfiguration.create())) { // bin/zookeeper.sh relies on the "ZK host" string for grepping which is case sensitive. System.out.println("ZK host: " + server.getHostname()); } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java index 63e2857b0e9c..997d3b43d975 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java @@ -1,5 +1,5 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one + * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file @@ -21,7 +21,6 @@ import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; @@ -32,8 +31,7 @@ /** * Common methods and attributes used by SplitLogManager and SplitLogWorker running distributed * splitting of WAL logs. - * @deprecated since 2.4.0 and 3.0.0 replaced by procedure-based WAL splitting; see - * SplitWALManager. + * @deprecated since 2.4.0 and 3.0.0 replaced by procedure-based WAL splitting; see SplitWALManager. */ @Deprecated @InterfaceAudience.Private @@ -44,8 +42,8 @@ private ZKSplitLog() { } /** - * Gets the full path node name for the log file being split. - * This method will url encode the filename. + * Gets the full path node name for the log file being split. This method will url encode the + * filename. * @param zkw zk reference * @param filename log file name (only the basename) */ @@ -88,7 +86,6 @@ public static boolean isRescanNode(String name) { /** * Checks if the given path represents a rescan node. - * * @param zkw reference to the {@link ZKWatcher} which also contains configuration and constants * @param path the absolute path, starts with '/' * @return whether the path represents a rescan node @@ -110,19 +107,17 @@ public static Path getSplitLogDir(Path rootdir, String tmpname) { return new Path(new Path(rootdir, HConstants.SPLIT_LOGDIR_NAME), tmpname); } - public static void markCorrupted(Path rootdir, String logFileName, - FileSystem fs) { + public static void markCorrupted(Path rootdir, String logFileName, FileSystem fs) { Path file = new Path(getSplitLogDir(rootdir, logFileName), "corrupt"); try { fs.createNewFile(file); } catch (IOException e) { - LOG.warn("Could not flag a log file as corrupted. Failed to create " + - file, e); + LOG.warn("Could not flag a log file as corrupted. Failed to create " + file, e); } } - public static boolean isCorrupted(Path rootdir, String logFileName, - FileSystem fs) throws IOException { + public static boolean isCorrupted(Path rootdir, String logFileName, FileSystem fs) + throws IOException { Path file = new Path(getSplitLogDir(rootdir, logFileName), "corrupt"); boolean isCorrupt; isCorrupt = fs.exists(file); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index acce316bae3b..ae8d44439160 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -48,17 +48,18 @@ import org.apache.zookeeper.proto.SetDataRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; /** * Internal HBase utility class for ZooKeeper. - * - *

          Contains only static methods and constants. - * - *

          Methods all throw {@link KeeperException} if there is an unexpected - * zookeeper exception, so callers of these methods must handle appropriately. - * If ZK is required for the operation, the server will need to be aborted. + *

          + * Contains only static methods and constants. + *

          + * Methods all throw {@link KeeperException} if there is an unexpected zookeeper exception, so + * callers of these methods must handle appropriately. If ZK is required for the operation, the + * server will need to be aborted. */ @InterfaceAudience.Private public final class ZKUtil { @@ -86,7 +87,7 @@ public static String getParent(String node) { * @return name of the current node */ public static String getNodeName(String path) { - return path.substring(path.lastIndexOf("/")+1); + return path.substring(path.lastIndexOf("/") + 1); } // @@ -94,17 +95,15 @@ public static String getNodeName(String path) { // /** - * Watch the specified znode for delete/create/change events. The watcher is - * set whether or not the node exists. If the node already exists, the method - * returns true. If the node does not exist, the method returns false. - * + * Watch the specified znode for delete/create/change events. The watcher is set whether or not + * the node exists. If the node already exists, the method returns true. If the node does not + * exist, the method returns false. * @param zkw zk reference * @param znode path of node to watch * @return true if znode exists, false if does not exist or error * @throws KeeperException if unexpected zookeeper exception */ - public static boolean watchAndCheckExists(ZKWatcher zkw, String znode) - throws KeeperException { + public static boolean watchAndCheckExists(ZKWatcher zkw, String znode) throws KeeperException { try { Stat s = zkw.getRecoverableZooKeeper().exists(znode, zkw); boolean exists = s != null; @@ -126,17 +125,15 @@ public static boolean watchAndCheckExists(ZKWatcher zkw, String znode) } /** - * Watch the specified znode, but only if exists. Useful when watching - * for deletions. Uses .getData() (and handles NoNodeException) instead - * of .exists() to accomplish this, as .getData() will only set a watch if - * the znode exists. + * Watch the specified znode, but only if exists. Useful when watching for deletions. Uses + * .getData() (and handles NoNodeException) instead of .exists() to accomplish this, as .getData() + * will only set a watch if the znode exists. * @param zkw zk reference * @param znode path of node to watch * @return true if the watch is set, false if node does not exists * @throws KeeperException if unexpected zookeeper exception */ - public static boolean setWatchIfNodeExists(ZKWatcher zkw, String znode) - throws KeeperException { + public static boolean setWatchIfNodeExists(ZKWatcher zkw, String znode) throws KeeperException { try { zkw.getRecoverableZooKeeper().getData(znode, true, null); return true; @@ -150,15 +147,13 @@ public static boolean setWatchIfNodeExists(ZKWatcher zkw, String znode) } /** - * Check if the specified node exists. Sets no watches. - * + * Check if the specified node exists. Sets no watches. * @param zkw zk reference * @param znode path of node to watch * @return version of the node if it exists, -1 if does not exist * @throws KeeperException if unexpected zookeeper exception */ - public static int checkExists(ZKWatcher zkw, String znode) - throws KeeperException { + public static int checkExists(ZKWatcher zkw, String znode) throws KeeperException { try { Stat s = zkw.getRecoverableZooKeeper().exists(znode, null); return s != null ? s.getVersion() : -1; @@ -178,29 +173,24 @@ public static int checkExists(ZKWatcher zkw, String znode) // /** - * Lists the children znodes of the specified znode. Also sets a watch on - * the specified znode which will capture a NodeDeleted event on the specified - * znode as well as NodeChildrenChanged if any children of the specified znode - * are created or deleted. - * - * Returns null if the specified node does not exist. Otherwise returns a - * list of children of the specified node. If the node exists but it has no - * children, an empty list will be returned. - * + * Lists the children znodes of the specified znode. Also sets a watch on the specified znode + * which will capture a NodeDeleted event on the specified znode as well as NodeChildrenChanged if + * any children of the specified znode are created or deleted. Returns null if the specified node + * does not exist. Otherwise returns a list of children of the specified node. If the node exists + * but it has no children, an empty list will be returned. * @param zkw zk reference * @param znode path of node to list and watch children of - * @return list of children of the specified node, an empty list if the node - * exists but has no children, and null if the node does not exist + * @return list of children of the specified node, an empty list if the node exists but has no + * children, and null if the node does not exist * @throws KeeperException if unexpected zookeeper exception */ - public static List listChildrenAndWatchForNewChildren( - ZKWatcher zkw, String znode) - throws KeeperException { + public static List listChildrenAndWatchForNewChildren(ZKWatcher zkw, String znode) + throws KeeperException { try { return zkw.getRecoverableZooKeeper().getChildren(znode, zkw); - } catch(KeeperException.NoNodeException ke) { - LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + - "because node does not exist (not an error)")); + } catch (KeeperException.NoNodeException ke) { + LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + + "because node does not exist (not an error)")); } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e); zkw.keeperException(e); @@ -213,16 +203,16 @@ public static List listChildrenAndWatchForNewChildren( } /** - * List all the children of the specified znode, setting a watch for children - * changes and also setting a watch on every individual child in order to get - * the NodeCreated and NodeDeleted events. + * List all the children of the specified znode, setting a watch for children changes and also + * setting a watch on every individual child in order to get the NodeCreated and NodeDeleted + * events. * @param zkw zookeeper reference * @param znode node to get children of and watch * @return list of znode names, null if the node doesn't exist * @throws KeeperException if a ZooKeeper operation fails */ - public static List listChildrenAndWatchThem(ZKWatcher zkw, - String znode) throws KeeperException { + public static List listChildrenAndWatchThem(ZKWatcher zkw, String znode) + throws KeeperException { List children = listChildrenAndWatchForNewChildren(zkw, znode); if (children == null) { return null; @@ -234,28 +224,24 @@ public static List listChildrenAndWatchThem(ZKWatcher zkw, } /** - * Lists the children of the specified znode without setting any watches. - * - * Sets no watches at all, this method is best effort. - * - * Returns an empty list if the node has no children. Returns null if the - * parent node itself does not exist. - * + * Lists the children of the specified znode without setting any watches. Sets no watches at all, + * this method is best effort. Returns an empty list if the node has no children. Returns null if + * the parent node itself does not exist. * @param zkw zookeeper reference * @param znode node to get children - * @return list of data of children of specified znode, empty if no children, - * null if parent does not exist + * @return list of data of children of specified znode, empty if no children, null if parent does + * not exist * @throws KeeperException if unexpected zookeeper exception */ public static List listChildrenNoWatch(ZKWatcher zkw, String znode) - throws KeeperException { + throws KeeperException { List children = null; try { // List the children without watching children = zkw.getRecoverableZooKeeper().getChildren(znode, null); - } catch(KeeperException.NoNodeException nne) { + } catch (KeeperException.NoNodeException nne) { return null; - } catch(InterruptedException ie) { + } catch (InterruptedException ie) { zkw.interruptedException(ie); } return children; @@ -268,49 +254,48 @@ public static List listChildrenNoWatch(ZKWatcher zkw, String znode) @Deprecated public static class NodeAndData { private String node; - private byte [] data; - public NodeAndData(String node, byte [] data) { + private byte[] data; + + public NodeAndData(String node, byte[] data) { this.node = node; this.data = data; } + public String getNode() { return node; } - public byte [] getData() { + + public byte[] getData() { return data; } + @Override public String toString() { return node; } + public boolean isEmpty() { return (data == null || data.length == 0); } } /** - * Checks if the specified znode has any children. Sets no watches. - * - * Returns true if the node exists and has children. Returns false if the - * node does not exist or if the node does not have any children. - * - * Used during master initialization to determine if the master is a - * failed-over-to master or the first master during initial cluster startup. - * If the directory for regionserver ephemeral nodes is empty then this is - * a cluster startup, if not then it is not cluster startup. - * + * Checks if the specified znode has any children. Sets no watches. Returns true if the node + * exists and has children. Returns false if the node does not exist or if the node does not have + * any children. Used during master initialization to determine if the master is a failed-over-to + * master or the first master during initial cluster startup. If the directory for regionserver + * ephemeral nodes is empty then this is a cluster startup, if not then it is not cluster startup. * @param zkw zk reference * @param znode path of node to check for children of * @return true if node has children, false if not or node does not exist * @throws KeeperException if unexpected zookeeper exception */ - public static boolean nodeHasChildren(ZKWatcher zkw, String znode) - throws KeeperException { + public static boolean nodeHasChildren(ZKWatcher zkw, String znode) throws KeeperException { try { return !zkw.getRecoverableZooKeeper().getChildren(znode, null).isEmpty(); - } catch(KeeperException.NoNodeException ke) { - LOG.debug(zkw.prefix("Unable to list children of znode " + znode + - " because node does not exist (not an error)")); + } catch (KeeperException.NoNodeException ke) { + LOG.debug(zkw.prefix("Unable to list children of znode " + znode + + " because node does not exist (not an error)")); return false; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); @@ -324,27 +309,21 @@ public static boolean nodeHasChildren(ZKWatcher zkw, String znode) } /** - * Get the number of children of the specified node. - * - * If the node does not exist or has no children, returns 0. - * - * Sets no watches at all. - * + * Get the number of children of the specified node. If the node does not exist or has no + * children, returns 0. Sets no watches at all. * @param zkw zk reference * @param znode path of node to count children of - * @return number of children of specified node, 0 if none or parent does not - * exist + * @return number of children of specified node, 0 if none or parent does not exist * @throws KeeperException if unexpected zookeeper exception */ - public static int getNumberOfChildren(ZKWatcher zkw, String znode) - throws KeeperException { + public static int getNumberOfChildren(ZKWatcher zkw, String znode) throws KeeperException { try { Stat stat = zkw.getRecoverableZooKeeper().exists(znode, null); return stat == null ? 0 : stat.getNumChildren(); - } catch(KeeperException e) { + } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to get children of node " + znode)); zkw.keeperException(e); - } catch(InterruptedException e) { + } catch (InterruptedException e) { zkw.interruptedException(e); } return 0; @@ -356,18 +335,17 @@ public static int getNumberOfChildren(ZKWatcher zkw, String znode) /** * Get znode data. Does not set a watcher. - * * @return ZNode data, null if the node does not exist or if there is an error. */ - public static byte [] getData(ZKWatcher zkw, String znode) + public static byte[] getData(ZKWatcher zkw, String znode) throws KeeperException, InterruptedException { try { - byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, null); + byte[] data = zkw.getRecoverableZooKeeper().getData(znode, null, null); logRetrievedMsg(zkw, znode, data, false); return data; } catch (KeeperException.NoNodeException e) { - LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + - "because node does not exist (not an error)")); + LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + + "because node does not exist (not an error)")); return null; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); @@ -377,11 +355,9 @@ public static int getNumberOfChildren(ZKWatcher zkw, String znode) } /** - * Get the data at the specified znode and set a watch. - * - * Returns the data and sets a watch if the node exists. Returns null and no - * watch is set if the node does not exist or there is an exception. - * + * Get the data at the specified znode and set a watch. Returns the data and sets a watch if the + * node exists. Returns null and no watch is set if the node does not exist or there is an + * exception. * @param zkw zk reference * @param znode path of node * @return data of the specified znode, or null @@ -392,50 +368,46 @@ public static byte[] getDataAndWatch(ZKWatcher zkw, String znode) throws KeeperE } /** - * Get the data at the specified znode and set a watch. - * Returns the data and sets a watch if the node exists. Returns null and no - * watch is set if the node does not exist or there is an exception. - * - * @param zkw zk reference - * @param znode path of node + * Get the data at the specified znode and set a watch. Returns the data and sets a watch if the + * node exists. Returns null and no watch is set if the node does not exist or there is an + * exception. + * @param zkw zk reference + * @param znode path of node * @param throwOnInterrupt if false then just interrupt the thread, do not throw exception * @return data of the specified znode, or null * @throws KeeperException if unexpected zookeeper exception */ public static byte[] getDataAndWatch(ZKWatcher zkw, String znode, boolean throwOnInterrupt) - throws KeeperException { + throws KeeperException { return getDataInternal(zkw, znode, null, true, throwOnInterrupt); } /** - * Get the data at the specified znode and set a watch. - * - * Returns the data and sets a watch if the node exists. Returns null and no - * watch is set if the node does not exist or there is an exception. - * + * Get the data at the specified znode and set a watch. Returns the data and sets a watch if the + * node exists. Returns null and no watch is set if the node does not exist or there is an + * exception. * @param zkw zk reference * @param znode path of node * @param stat object to populate the version of the znode * @return data of the specified znode, or null * @throws KeeperException if unexpected zookeeper exception */ - public static byte[] getDataAndWatch(ZKWatcher zkw, String znode, - Stat stat) throws KeeperException { + public static byte[] getDataAndWatch(ZKWatcher zkw, String znode, Stat stat) + throws KeeperException { return getDataInternal(zkw, znode, stat, true, true); } private static byte[] getDataInternal(ZKWatcher zkw, String znode, Stat stat, boolean watcherSet, - boolean throwOnInterrupt) - throws KeeperException { + boolean throwOnInterrupt) throws KeeperException { try { - byte [] data = zkw.getRecoverableZooKeeper().getData(znode, zkw, stat); + byte[] data = zkw.getRecoverableZooKeeper().getData(znode, zkw, stat); logRetrievedMsg(zkw, znode, data, watcherSet); return data; } catch (KeeperException.NoNodeException e) { // This log can get pretty annoying when we cycle on 100ms waits. // Enable trace if you really want to see it. - LOG.trace(zkw.prefix("Unable to get data of znode " + znode + " " + - "because node does not exist (not an error)")); + LOG.trace(zkw.prefix("Unable to get data of znode " + znode + " " + + "because node does not exist (not an error)")); return null; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); @@ -453,30 +425,24 @@ private static byte[] getDataInternal(ZKWatcher zkw, String znode, Stat stat, bo } /** - * Get the data at the specified znode without setting a watch. - * - * Returns the data if the node exists. Returns null if the node does not - * exist. - * - * Sets the stats of the node in the passed Stat object. Pass a null stat if - * not interested. - * + * Get the data at the specified znode without setting a watch. Returns the data if the node + * exists. Returns null if the node does not exist. Sets the stats of the node in the passed Stat + * object. Pass a null stat if not interested. * @param zkw zk reference * @param znode path of node * @param stat node status to get if node exists * @return data of the specified znode, or null if node does not exist * @throws KeeperException if unexpected zookeeper exception */ - public static byte [] getDataNoWatch(ZKWatcher zkw, String znode, - Stat stat) - throws KeeperException { + public static byte[] getDataNoWatch(ZKWatcher zkw, String znode, Stat stat) + throws KeeperException { try { - byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, stat); + byte[] data = zkw.getRecoverableZooKeeper().getData(znode, null, stat); logRetrievedMsg(zkw, znode, data, false); return data; } catch (KeeperException.NoNodeException e) { - LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + - "because node does not exist (not necessarily an error)")); + LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + + "because node does not exist (not necessarily an error)")); return null; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); @@ -490,51 +456,42 @@ private static byte[] getDataInternal(ZKWatcher zkw, String znode, Stat stat, bo } /** - * Returns the date of child znodes of the specified znode. Also sets a watch on - * the specified znode which will capture a NodeDeleted event on the specified - * znode as well as NodeChildrenChanged if any children of the specified znode - * are created or deleted. - * - * Returns null if the specified node does not exist. Otherwise returns a - * list of children of the specified node. If the node exists but it has no - * children, an empty list will be returned. - * + * Returns the date of child znodes of the specified znode. Also sets a watch on the specified + * znode which will capture a NodeDeleted event on the specified znode as well as + * NodeChildrenChanged if any children of the specified znode are created or deleted. Returns null + * if the specified node does not exist. Otherwise returns a list of children of the specified + * node. If the node exists but it has no children, an empty list will be returned. * @param zkw zk reference * @param baseNode path of node to list and watch children of - * @return list of data of children of the specified node, an empty list if the node - * exists but has no children, and null if the node does not exist + * @return list of data of children of the specified node, an empty list if the node exists but + * has no children, and null if the node does not exist * @throws KeeperException if unexpected zookeeper exception * @deprecated Unused */ @Deprecated public static List getChildDataAndWatchForNewChildren(ZKWatcher zkw, String baseNode) - throws KeeperException { + throws KeeperException { return getChildDataAndWatchForNewChildren(zkw, baseNode, true); } /** - * Returns the date of child znodes of the specified znode. Also sets a watch on - * the specified znode which will capture a NodeDeleted event on the specified - * znode as well as NodeChildrenChanged if any children of the specified znode - * are created or deleted. - * - * Returns null if the specified node does not exist. Otherwise returns a - * list of children of the specified node. If the node exists but it has no - * children, an empty list will be returned. - * + * Returns the date of child znodes of the specified znode. Also sets a watch on the specified + * znode which will capture a NodeDeleted event on the specified znode as well as + * NodeChildrenChanged if any children of the specified znode are created or deleted. Returns null + * if the specified node does not exist. Otherwise returns a list of children of the specified + * node. If the node exists but it has no children, an empty list will be returned. * @param zkw zk reference * @param baseNode path of node to list and watch children of * @param throwOnInterrupt if true then just interrupt the thread, do not throw exception - * @return list of data of children of the specified node, an empty list if the node - * exists but has no children, and null if the node does not exist + * @return list of data of children of the specified node, an empty list if the node exists but + * has no children, and null if the node does not exist * @throws KeeperException if unexpected zookeeper exception * @deprecated Unused */ @Deprecated - public static List getChildDataAndWatchForNewChildren( - ZKWatcher zkw, String baseNode, boolean throwOnInterrupt) throws KeeperException { - List nodes = - ZKUtil.listChildrenAndWatchForNewChildren(zkw, baseNode); + public static List getChildDataAndWatchForNewChildren(ZKWatcher zkw, String baseNode, + boolean throwOnInterrupt) throws KeeperException { + List nodes = ZKUtil.listChildrenAndWatchForNewChildren(zkw, baseNode); if (nodes != null) { List newNodes = new ArrayList<>(); for (String node : nodes) { @@ -552,13 +509,9 @@ public static List getChildDataAndWatchForNewChildren( } /** - * Update the data of an existing node with the expected version to have the - * specified data. - * - * Throws an exception if there is a version mismatch or some other problem. - * - * Sets no watches under any conditions. - * + * Update the data of an existing node with the expected version to have the specified data. + * Throws an exception if there is a version mismatch or some other problem. Sets no watches under + * any conditions. * @param zkw zk reference * @param znode the path to the ZNode * @param data the data to store in ZooKeeper @@ -572,7 +525,7 @@ public static void updateExistingNodeData(ZKWatcher zkw, String znode, byte[] da int expectedVersion) throws KeeperException { try { zkw.getRecoverableZooKeeper().setData(znode, data, expectedVersion); - } catch(InterruptedException ie) { + } catch (InterruptedException ie) { zkw.interruptedException(ie); } } @@ -582,18 +535,16 @@ public static void updateExistingNodeData(ZKWatcher zkw, String znode, byte[] da // /** - * Sets the data of the existing znode to be the specified data. Ensures that - * the current data has the specified expected version. - * - *

          If the node does not exist, a {@link NoNodeException} will be thrown. - * - *

          If their is a version mismatch, method returns null. - * - *

          No watches are set but setting data will trigger other watchers of this - * node. - * - *

          If there is another problem, a KeeperException will be thrown. - * + * Sets the data of the existing znode to be the specified data. Ensures that the current data has + * the specified expected version. + *

          + * If the node does not exist, a {@link NoNodeException} will be thrown. + *

          + * If their is a version mismatch, method returns null. + *

          + * No watches are set but setting data will trigger other watchers of this node. + *

          + * If there is another problem, a KeeperException will be thrown. * @param zkw zk reference * @param znode path of node * @param data data to set for node @@ -601,9 +552,8 @@ public static void updateExistingNodeData(ZKWatcher zkw, String znode, byte[] da * @return true if data set, false if version mismatch * @throws KeeperException if unexpected zookeeper exception */ - public static boolean setData(ZKWatcher zkw, String znode, - byte [] data, int expectedVersion) - throws KeeperException, KeeperException.NoNodeException { + public static boolean setData(ZKWatcher zkw, String znode, byte[] data, int expectedVersion) + throws KeeperException, KeeperException.NoNodeException { try { return zkw.getRecoverableZooKeeper().setData(znode, data, expectedVersion) != null; } catch (InterruptedException e) { @@ -613,16 +563,14 @@ public static boolean setData(ZKWatcher zkw, String znode, } /** - * Set data into node creating node if it doesn't yet exist. - * Does not set watch. - * + * Set data into node creating node if it doesn't yet exist. Does not set watch. * @param zkw zk reference * @param znode path of node * @param data data to set for node * @throws KeeperException if a ZooKeeper operation fails */ - public static void createSetData(final ZKWatcher zkw, final String znode, final byte [] data) - throws KeeperException { + public static void createSetData(final ZKWatcher zkw, final String znode, final byte[] data) + throws KeeperException { if (checkExists(zkw, znode) == -1) { ZKUtil.createWithParents(zkw, znode, data); } else { @@ -631,29 +579,27 @@ public static void createSetData(final ZKWatcher zkw, final String znode, final } /** - * Sets the data of the existing znode to be the specified data. The node - * must exist but no checks are done on the existing data or version. - * - *

          If the node does not exist, a {@link NoNodeException} will be thrown. - * - *

          No watches are set but setting data will trigger other watchers of this - * node. - * - *

          If there is another problem, a KeeperException will be thrown. - * + * Sets the data of the existing znode to be the specified data. The node must exist but no checks + * are done on the existing data or version. + *

          + * If the node does not exist, a {@link NoNodeException} will be thrown. + *

          + * No watches are set but setting data will trigger other watchers of this node. + *

          + * If there is another problem, a KeeperException will be thrown. * @param zkw zk reference * @param znode path of node * @param data data to set for node * @throws KeeperException if unexpected zookeeper exception */ - public static void setData(ZKWatcher zkw, String znode, byte [] data) - throws KeeperException, KeeperException.NoNodeException { - setData(zkw, (SetData)ZKUtilOp.setData(znode, data)); + public static void setData(ZKWatcher zkw, String znode, byte[] data) + throws KeeperException, KeeperException.NoNodeException { + setData(zkw, (SetData) ZKUtilOp.setData(znode, data)); } private static void setData(ZKWatcher zkw, SetData setData) - throws KeeperException, KeeperException.NoNodeException { - SetDataRequest sd = (SetDataRequest)toZooKeeperOp(zkw, setData).toRequestRecord(); + throws KeeperException, KeeperException.NoNodeException { + SetDataRequest sd = (SetDataRequest) toZooKeeperOp(zkw, setData).toRequestRecord(); setData(zkw, sd.getPath(), sd.getData(), sd.getVersion()); } @@ -662,36 +608,28 @@ private static void setData(ZKWatcher zkw, SetData setData) // /** - * - * Set the specified znode to be an ephemeral node carrying the specified - * data. - * - * If the node is created successfully, a watcher is also set on the node. - * - * If the node is not created successfully because it already exists, this - * method will also set a watcher on the node. - * - * If there is another problem, a KeeperException will be thrown. - * + * Set the specified znode to be an ephemeral node carrying the specified data. If the node is + * created successfully, a watcher is also set on the node. If the node is not created + * successfully because it already exists, this method will also set a watcher on the node. If + * there is another problem, a KeeperException will be thrown. * @param zkw zk reference * @param znode path of node * @param data data of node * @return true if node created, false if not, watch set in both cases * @throws KeeperException if unexpected zookeeper exception */ - public static boolean createEphemeralNodeAndWatch(ZKWatcher zkw, String znode, byte [] data) - throws KeeperException { + public static boolean createEphemeralNodeAndWatch(ZKWatcher zkw, String znode, byte[] data) + throws KeeperException { boolean ret = true; try { - zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode), - CreateMode.EPHEMERAL); + zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode), CreateMode.EPHEMERAL); } catch (KeeperException.NodeExistsException nee) { ret = false; } catch (InterruptedException e) { LOG.info("Interrupted", e); Thread.currentThread().interrupt(); } - if(!watchAndCheckExists(zkw, znode)) { + if (!watchAndCheckExists(zkw, znode)) { // It did exist but now it doesn't, try again return createEphemeralNodeAndWatch(zkw, znode, data); } @@ -699,32 +637,23 @@ public static boolean createEphemeralNodeAndWatch(ZKWatcher zkw, String znode, b } /** - * Creates the specified znode to be a persistent node carrying the specified - * data. - * - * Returns true if the node was successfully created, false if the node - * already existed. - * - * If the node is created successfully, a watcher is also set on the node. - * - * If the node is not created successfully because it already exists, this - * method will also set a watcher on the node but return false. - * - * If there is another problem, a KeeperException will be thrown. - * + * Creates the specified znode to be a persistent node carrying the specified data. Returns true + * if the node was successfully created, false if the node already existed. If the node is created + * successfully, a watcher is also set on the node. If the node is not created successfully + * because it already exists, this method will also set a watcher on the node but return false. If + * there is another problem, a KeeperException will be thrown. * @param zkw zk reference * @param znode path of node * @param data data of node * @return true if node created, false if not, watch set in both cases * @throws KeeperException if unexpected zookeeper exception */ - public static boolean createNodeIfNotExistsAndWatch( - ZKWatcher zkw, String znode, byte [] data) - throws KeeperException { + public static boolean createNodeIfNotExistsAndWatch(ZKWatcher zkw, String znode, byte[] data) + throws KeeperException { boolean ret = true; try { zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode), - CreateMode.PERSISTENT); + CreateMode.PERSISTENT); } catch (KeeperException.NodeExistsException nee) { ret = false; } catch (InterruptedException e) { @@ -741,12 +670,8 @@ public static boolean createNodeIfNotExistsAndWatch( } /** - * Creates the specified znode with the specified data but does not watch it. - * - * Returns the znode of the newly created node - * - * If there is another problem, a KeeperException will be thrown. - * + * Creates the specified znode with the specified data but does not watch it. Returns the znode of + * the newly created node If there is another problem, a KeeperException will be thrown. * @param zkw zk reference * @param znode path of node * @param data data of node @@ -768,13 +693,12 @@ public static String createNodeIfNotExistsNoWatch(ZKWatcher zkw, String znode, b /** * Creates the specified node with the specified data and watches it. - * - *

          Throws an exception if the node already exists. - * - *

          The node created is persistent and open access. - * - *

          Returns the version number of the created node if successful. - * + *

          + * Throws an exception if the node already exists. + *

          + * The node created is persistent and open access. + *

          + * Returns the version number of the created node if successful. * @param zkw zk reference * @param znode path of node to create * @param data data of node to create @@ -782,17 +706,16 @@ public static String createNodeIfNotExistsNoWatch(ZKWatcher zkw, String znode, b * @throws KeeperException if unexpected zookeeper exception * @throws KeeperException.NodeExistsException if node already exists */ - public static int createAndWatch(ZKWatcher zkw, - String znode, byte [] data) - throws KeeperException, KeeperException.NodeExistsException { + public static int createAndWatch(ZKWatcher zkw, String znode, byte[] data) + throws KeeperException, KeeperException.NodeExistsException { try { zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode), CreateMode.PERSISTENT); Stat stat = zkw.getRecoverableZooKeeper().exists(znode, zkw); - if (stat == null){ + if (stat == null) { // Likely a race condition. Someone deleted the znode. throw KeeperException.create(KeeperException.Code.SYSTEMERROR, - "ZK.exists returned null (i.e.: znode does not exist) for znode=" + znode); + "ZK.exists returned null (i.e.: znode does not exist) for znode=" + znode); } return stat.getVersion(); @@ -804,60 +727,50 @@ public static int createAndWatch(ZKWatcher zkw, /** * Async creates the specified node with the specified data. - * - *

          Throws an exception if the node already exists. - * - *

          The node created is persistent and open access. - * + *

          + * Throws an exception if the node already exists. + *

          + * The node created is persistent and open access. * @param zkw zk reference * @param znode path of node to create * @param data data of node to create * @param cb the callback to use for the creation * @param ctx the context to use for the creation */ - public static void asyncCreate(ZKWatcher zkw, - String znode, byte [] data, final AsyncCallback.StringCallback cb, - final Object ctx) { - zkw.getRecoverableZooKeeper().getZooKeeper().create(znode, data, - zkw.createACL(znode), CreateMode.PERSISTENT, cb, ctx); + public static void asyncCreate(ZKWatcher zkw, String znode, byte[] data, + final AsyncCallback.StringCallback cb, final Object ctx) { + zkw.getRecoverableZooKeeper().getZooKeeper().create(znode, data, zkw.createACL(znode), + CreateMode.PERSISTENT, cb, ctx); } /** - * Creates the specified node, iff the node does not exist. Does not set a - * watch and fails silently if the node already exists. - * - * The node created is persistent and open access. - * + * Creates the specified node, iff the node does not exist. Does not set a watch and fails + * silently if the node already exists. The node created is persistent and open access. * @param zkw zk reference * @param znode path of node * @throws KeeperException if unexpected zookeeper exception */ - public static void createAndFailSilent(ZKWatcher zkw, - String znode) throws KeeperException { + public static void createAndFailSilent(ZKWatcher zkw, String znode) throws KeeperException { createAndFailSilent(zkw, znode, new byte[0]); } /** - * Creates the specified node containing specified data, iff the node does not exist. Does - * not set a watch and fails silently if the node already exists. - * - * The node created is persistent and open access. - * + * Creates the specified node containing specified data, iff the node does not exist. Does not set + * a watch and fails silently if the node already exists. The node created is persistent and open + * access. * @param zkw zk reference * @param znode path of node * @param data a byte array data to store in the znode * @throws KeeperException if unexpected zookeeper exception */ - public static void createAndFailSilent(ZKWatcher zkw, - String znode, byte[] data) - throws KeeperException { - createAndFailSilent(zkw, - (CreateAndFailSilent)ZKUtilOp.createAndFailSilent(znode, data)); + public static void createAndFailSilent(ZKWatcher zkw, String znode, byte[] data) + throws KeeperException { + createAndFailSilent(zkw, (CreateAndFailSilent) ZKUtilOp.createAndFailSilent(znode, data)); } private static void createAndFailSilent(ZKWatcher zkw, CreateAndFailSilent cafs) - throws KeeperException { - CreateRequest create = (CreateRequest)toZooKeeperOp(zkw, cafs).toRequestRecord(); + throws KeeperException { + CreateRequest create = (CreateRequest) toZooKeeperOp(zkw, cafs).toRequestRecord(); String znode = create.getPath(); try { RecoverableZooKeeper zk = zkw.getRecoverableZooKeeper(); @@ -870,7 +783,7 @@ private static void createAndFailSilent(ZKWatcher zkw, CreateAndFailSilent cafs) try { if (null == zkw.getRecoverableZooKeeper().exists(znode, false)) { // If we failed to create the file and it does not already exist. - throw(nee); + throw (nee); } } catch (InterruptedException ie) { zkw.interruptedException(ie); @@ -881,48 +794,40 @@ private static void createAndFailSilent(ZKWatcher zkw, CreateAndFailSilent cafs) } /** - * Creates the specified node and all parent nodes required for it to exist. - * - * No watches are set and no errors are thrown if the node already exists. - * - * The nodes created are persistent and open access. - * + * Creates the specified node and all parent nodes required for it to exist. No watches are set + * and no errors are thrown if the node already exists. The nodes created are persistent and open + * access. * @param zkw zk reference * @param znode path of node * @throws KeeperException if unexpected zookeeper exception */ - public static void createWithParents(ZKWatcher zkw, String znode) - throws KeeperException { + public static void createWithParents(ZKWatcher zkw, String znode) throws KeeperException { createWithParents(zkw, znode, new byte[0]); } /** - * Creates the specified node and all parent nodes required for it to exist. The creation of + * Creates the specified node and all parent nodes required for it to exist. The creation of * parent znodes is not atomic with the leafe znode creation but the data is written atomically - * when the leaf node is created. - * - * No watches are set and no errors are thrown if the node already exists. - * - * The nodes created are persistent and open access. - * + * when the leaf node is created. No watches are set and no errors are thrown if the node already + * exists. The nodes created are persistent and open access. * @param zkw zk reference * @param znode path of node * @throws KeeperException if unexpected zookeeper exception */ public static void createWithParents(ZKWatcher zkw, String znode, byte[] data) - throws KeeperException { + throws KeeperException { try { - if(znode == null) { + if (znode == null) { return; } zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode), - CreateMode.PERSISTENT); - } catch(KeeperException.NodeExistsException nee) { + CreateMode.PERSISTENT); + } catch (KeeperException.NodeExistsException nee) { return; - } catch(KeeperException.NoNodeException nne) { + } catch (KeeperException.NoNodeException nne) { createWithParents(zkw, getParent(znode)); createWithParents(zkw, znode, data); - } catch(InterruptedException ie) { + } catch (InterruptedException ie) { zkw.interruptedException(ie); } } @@ -932,115 +837,91 @@ public static void createWithParents(ZKWatcher zkw, String znode, byte[] data) // /** - * Delete the specified node. Sets no watches. Throws all exceptions. + * Delete the specified node. Sets no watches. Throws all exceptions. */ - public static void deleteNode(ZKWatcher zkw, String node) - throws KeeperException { + public static void deleteNode(ZKWatcher zkw, String node) throws KeeperException { deleteNode(zkw, node, -1); } /** - * Delete the specified node with the specified version. Sets no watches. - * Throws all exceptions. + * Delete the specified node with the specified version. Sets no watches. Throws all exceptions. */ - public static boolean deleteNode(ZKWatcher zkw, String node, - int version) - throws KeeperException { + public static boolean deleteNode(ZKWatcher zkw, String node, int version) throws KeeperException { try { zkw.getRecoverableZooKeeper().delete(node, version); return true; - } catch(KeeperException.BadVersionException bve) { + } catch (KeeperException.BadVersionException bve) { return false; - } catch(InterruptedException ie) { + } catch (InterruptedException ie) { zkw.interruptedException(ie); return false; } } /** - * Deletes the specified node. Fails silent if the node does not exist. - * + * Deletes the specified node. Fails silent if the node does not exist. * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation * @param node the node to delete * @throws KeeperException if a ZooKeeper operation fails */ - public static void deleteNodeFailSilent(ZKWatcher zkw, String node) - throws KeeperException { - deleteNodeFailSilent(zkw, - (DeleteNodeFailSilent)ZKUtilOp.deleteNodeFailSilent(node)); + public static void deleteNodeFailSilent(ZKWatcher zkw, String node) throws KeeperException { + deleteNodeFailSilent(zkw, (DeleteNodeFailSilent) ZKUtilOp.deleteNodeFailSilent(node)); } - private static void deleteNodeFailSilent(ZKWatcher zkw, - DeleteNodeFailSilent dnfs) throws KeeperException { - DeleteRequest delete = (DeleteRequest)toZooKeeperOp(zkw, dnfs).toRequestRecord(); + private static void deleteNodeFailSilent(ZKWatcher zkw, DeleteNodeFailSilent dnfs) + throws KeeperException { + DeleteRequest delete = (DeleteRequest) toZooKeeperOp(zkw, dnfs).toRequestRecord(); try { zkw.getRecoverableZooKeeper().delete(delete.getPath(), delete.getVersion()); - } catch(KeeperException.NoNodeException nne) { - } catch(InterruptedException ie) { + } catch (KeeperException.NoNodeException nne) { + } catch (InterruptedException ie) { zkw.interruptedException(ie); } } - /** * Delete the specified node and all of it's children. *

          * If the node does not exist, just returns. *

          - * Sets no watches. Throws all exceptions besides dealing with deletion of - * children. + * Sets no watches. Throws all exceptions besides dealing with deletion of children. */ - public static void deleteNodeRecursively(ZKWatcher zkw, String node) - throws KeeperException { + public static void deleteNodeRecursively(ZKWatcher zkw, String node) throws KeeperException { deleteNodeRecursivelyMultiOrSequential(zkw, true, node); } /** - * Delete all the children of the specified node but not the node itself. - * - * Sets no watches. Throws all exceptions besides dealing with deletion of - * children. - * + * Delete all the children of the specified node but not the node itself. Sets no watches. Throws + * all exceptions besides dealing with deletion of children. * @throws KeeperException if a ZooKeeper operation fails */ - public static void deleteChildrenRecursively(ZKWatcher zkw, String node) - throws KeeperException { + public static void deleteChildrenRecursively(ZKWatcher zkw, String node) throws KeeperException { deleteChildrenRecursivelyMultiOrSequential(zkw, true, node); } /** - * Delete all the children of the specified node but not the node itself. This - * will first traverse the znode tree for listing the children and then delete - * these znodes using multi-update api or sequential based on the specified - * configurations. + * Delete all the children of the specified node but not the node itself. This will first traverse + * the znode tree for listing the children and then delete these znodes using multi-update api or + * sequential based on the specified configurations. *

          - * Sets no watches. Throws all exceptions besides dealing with deletion of - * children. + * Sets no watches. Throws all exceptions besides dealing with deletion of children. *

          * If the following is true: *

            *
          • runSequentialOnMultiFailure is true *
          - * on calling multi, we get a ZooKeeper exception that can be handled by a - * sequential call(*), we retry the operations one-by-one (sequentially). - * - * @param zkw - * - zk reference - * @param runSequentialOnMultiFailure - * - if true when we get a ZooKeeper exception that could retry the - * operations one-by-one (sequentially) - * @param pathRoots - * - path of the parent node(s) - * @throws KeeperException.NotEmptyException - * if node has children while deleting - * @throws KeeperException - * if unexpected ZooKeeper exception - * @throws IllegalArgumentException - * if an invalid path is specified + * on calling multi, we get a ZooKeeper exception that can be handled by a sequential call(*), we + * retry the operations one-by-one (sequentially). + * @param zkw - zk reference + * @param runSequentialOnMultiFailure - if true when we get a ZooKeeper exception that could retry + * the operations one-by-one (sequentially) + * @param pathRoots - path of the parent node(s) + * @throws KeeperException.NotEmptyException if node has children while deleting + * @throws KeeperException if unexpected ZooKeeper exception + * @throws IllegalArgumentException if an invalid path is specified */ - public static void deleteChildrenRecursivelyMultiOrSequential( - ZKWatcher zkw, boolean runSequentialOnMultiFailure, - String... pathRoots) throws KeeperException { + public static void deleteChildrenRecursivelyMultiOrSequential(ZKWatcher zkw, + boolean runSequentialOnMultiFailure, String... pathRoots) throws KeeperException { if (pathRoots == null || pathRoots.length <= 0) { LOG.warn("Given path is not valid!"); return; @@ -1057,34 +938,25 @@ public static void deleteChildrenRecursivelyMultiOrSequential( } /** - * Delete the specified node and its children. This traverse the - * znode tree for listing the children and then delete - * these znodes including the parent using multi-update api or - * sequential based on the specified configurations. + * Delete the specified node and its children. This traverse the znode tree for listing the + * children and then delete these znodes including the parent using multi-update api or sequential + * based on the specified configurations. *

          - * Sets no watches. Throws all exceptions besides dealing with deletion of - * children. + * Sets no watches. Throws all exceptions besides dealing with deletion of children. *

          * If the following is true: *

            *
          • runSequentialOnMultiFailure is true *
          - * on calling multi, we get a ZooKeeper exception that can be handled by a - * sequential call(*), we retry the operations one-by-one (sequentially). - * - * @param zkw - * - zk reference - * @param runSequentialOnMultiFailure - * - if true when we get a ZooKeeper exception that could retry the - * operations one-by-one (sequentially) - * @param pathRoots - * - path of the parent node(s) - * @throws KeeperException.NotEmptyException - * if node has children while deleting - * @throws KeeperException - * if unexpected ZooKeeper exception - * @throws IllegalArgumentException - * if an invalid path is specified + * on calling multi, we get a ZooKeeper exception that can be handled by a sequential call(*), we + * retry the operations one-by-one (sequentially). + * @param zkw - zk reference + * @param runSequentialOnMultiFailure - if true when we get a ZooKeeper exception that could retry + * the operations one-by-one (sequentially) + * @param pathRoots - path of the parent node(s) + * @throws KeeperException.NotEmptyException if node has children while deleting + * @throws KeeperException if unexpected ZooKeeper exception + * @throws IllegalArgumentException if an invalid path is specified */ public static void deleteNodeRecursivelyMultiOrSequential(ZKWatcher zkw, boolean runSequentialOnMultiFailure, String... pathRoots) throws KeeperException { @@ -1114,14 +986,13 @@ public static void deleteNodeRecursivelyMultiOrSequential(ZKWatcher zkw, /** * Chunks the provided {@code ops} when their approximate size exceeds the the configured limit. - * Take caution that this can ONLY be used for operations where atomicity is not important, - * e.g. deletions. It must not be used when atomicity of the operations is critical. - * + * Take caution that this can ONLY be used for operations where atomicity is not important, e.g. + * deletions. It must not be used when atomicity of the operations is critical. * @param zkw reference to the {@link ZKWatcher} which contains configuration and constants - * @param runSequentialOnMultiFailure if true when we get a ZooKeeper exception that could - * retry the operations one-by-one (sequentially) - * @param ops list of ZKUtilOp {@link ZKUtilOp} to partition while submitting batched multi - * or sequential + * @param runSequentialOnMultiFailure if true when we get a ZooKeeper exception that could retry + * the operations one-by-one (sequentially) + * @param ops list of ZKUtilOp {@link ZKUtilOp} to partition while submitting batched multi or + * sequential * @throws KeeperException unexpected ZooKeeper Exception / Zookeeper unreachable */ private static void submitBatchedMultiOrSequential(ZKWatcher zkw, @@ -1174,20 +1045,15 @@ static int estimateSize(ZKUtilOp op) { } /** - * BFS Traversal of all the children under path, with the entries in the list, - * in the same order as that of the traversal. Lists all the children without - * setting any watches. - * - * @param zkw - * - zk reference - * @param znode - * - path of node + * BFS Traversal of all the children under path, with the entries in the list, in the same order + * as that of the traversal. Lists all the children without setting any watches. + * @param zkw - zk reference + * @param znode - path of node * @return list of children znodes under the path - * @throws KeeperException - * if unexpected ZooKeeper exception + * @throws KeeperException if unexpected ZooKeeper exception */ - private static List listChildrenBFSNoWatch(ZKWatcher zkw, - final String znode) throws KeeperException { + private static List listChildrenBFSNoWatch(ZKWatcher zkw, final String znode) + throws KeeperException { Deque queue = new LinkedList<>(); List tree = new ArrayList<>(); queue.add(znode); @@ -1210,17 +1076,12 @@ private static List listChildrenBFSNoWatch(ZKWatcher zkw, } /** - * BFS Traversal of all the children under path, with the entries in the list, - * in the same order as that of the traversal. - * Lists all the children and set watches on to them. - * - * @param zkw - * - zk reference - * @param znode - * - path of node + * BFS Traversal of all the children under path, with the entries in the list, in the same order + * as that of the traversal. Lists all the children and set watches on to them. + * @param zkw - zk reference + * @param znode - path of node * @return list of children znodes under the path - * @throws KeeperException - * if unexpected ZooKeeper exception + * @throws KeeperException if unexpected ZooKeeper exception */ private static List listChildrenBFSAndWatchThem(ZKWatcher zkw, final String znode) throws KeeperException { @@ -1246,14 +1107,14 @@ private static List listChildrenBFSAndWatchThem(ZKWatcher zkw, final Str } /** - * Represents an action taken by ZKUtil, e.g. createAndFailSilent. - * These actions are higher-level than ZKOp actions, which represent - * individual actions in the ZooKeeper API, like create. + * Represents an action taken by ZKUtil, e.g. createAndFailSilent. These actions are higher-level + * than ZKOp actions, which represent individual actions in the ZooKeeper API, like create. */ public abstract static class ZKUtilOp { private String path; - @Override public String toString() { + @Override + public String toString() { return this.getClass().getSimpleName() + ", path=" + this.path; } @@ -1297,13 +1158,13 @@ public String getPath() { } /** - * ZKUtilOp representing createAndFailSilent in ZooKeeper - * (attempt to create node, ignore error if already exists) + * ZKUtilOp representing createAndFailSilent in ZooKeeper (attempt to create node, ignore error + * if already exists) */ public static final class CreateAndFailSilent extends ZKUtilOp { - private byte [] data; + private byte[] data; - private CreateAndFailSilent(String path, byte [] data) { + private CreateAndFailSilent(String path, byte[] data) { super(path); this.data = data; } @@ -1333,8 +1194,8 @@ public int hashCode() { } /** - * ZKUtilOp representing deleteNodeFailSilent in ZooKeeper - * (attempt to delete node, ignore error if node doesn't exist) + * ZKUtilOp representing deleteNodeFailSilent in ZooKeeper (attempt to delete node, ignore error + * if node doesn't exist) */ public static final class DeleteNodeFailSilent extends ZKUtilOp { private DeleteNodeFailSilent(String path) { @@ -1412,23 +1273,23 @@ public int hashCode() { * Convert from ZKUtilOp to ZKOp */ private static Op toZooKeeperOp(ZKWatcher zkw, ZKUtilOp op) throws UnsupportedOperationException { - if(op == null) { + if (op == null) { return null; } if (op instanceof CreateAndFailSilent) { - CreateAndFailSilent cafs = (CreateAndFailSilent)op; + CreateAndFailSilent cafs = (CreateAndFailSilent) op; return Op.create(cafs.getPath(), cafs.getData(), zkw.createACL(cafs.getPath()), CreateMode.PERSISTENT); } else if (op instanceof DeleteNodeFailSilent) { - DeleteNodeFailSilent dnfs = (DeleteNodeFailSilent)op; + DeleteNodeFailSilent dnfs = (DeleteNodeFailSilent) op; return Op.delete(dnfs.getPath(), -1); } else if (op instanceof SetData) { SetData sd = (SetData) op; return Op.setData(sd.getPath(), sd.getData(), sd.getVersion()); } else { - throw new UnsupportedOperationException("Unexpected ZKUtilOp type: " - + op.getClass().getName()); + throw new UnsupportedOperationException( + "Unexpected ZKUtilOp type: " + op.getClass().getName()); } } @@ -1439,22 +1300,16 @@ private static Op toZooKeeperOp(ZKWatcher zkw, ZKUtilOp op) throws UnsupportedOp private static boolean useMultiWarn = true; /** - * Use ZooKeeper's multi-update functionality. - * - * If all of the following are true: - * - runSequentialOnMultiFailure is true - * - on calling multi, we get a ZooKeeper exception that can be handled by a sequential call(*) - * Then: - * - we retry the operations one-by-one (sequentially) - * - * Note *: an example is receiving a NodeExistsException from a "create" call. Without multi, - * a user could call "createAndFailSilent" to ensure that a node exists if they don't care who - * actually created the node (i.e. the NodeExistsException from ZooKeeper is caught). - * This will cause all operations in the multi to fail, however, because - * the NodeExistsException that zk.create throws will fail the multi transaction. - * In this case, if the previous conditions hold, the commands are run sequentially, which should - * result in the correct final state, but means that the operations will not run atomically. - * + * Use ZooKeeper's multi-update functionality. If all of the following are true: - + * runSequentialOnMultiFailure is true - on calling multi, we get a ZooKeeper exception that can + * be handled by a sequential call(*) Then: - we retry the operations one-by-one (sequentially) + * Note *: an example is receiving a NodeExistsException from a "create" call. Without multi, a + * user could call "createAndFailSilent" to ensure that a node exists if they don't care who + * actually created the node (i.e. the NodeExistsException from ZooKeeper is caught). This will + * cause all operations in the multi to fail, however, because the NodeExistsException that + * zk.create throws will fail the multi transaction. In this case, if the previous conditions + * hold, the commands are run sequentially, which should result in the correct final state, but + * means that the operations will not run atomically. * @throws KeeperException if a ZooKeeper operation fails */ public static void multiOrSequential(ZKWatcher zkw, List ops, @@ -1484,9 +1339,10 @@ public static void multiOrSequential(ZKWatcher zkw, List ops, // if we get an exception that could be solved by running sequentially // (and the client asked us to), then break out and run sequentially if (runSequentialOnMultiFailure) { - LOG.info("multi exception: {}; running operations sequentially " + - "(runSequentialOnMultiFailure=true); {}", ke.toString(), - ops.stream().map(o -> o.toString()).collect(Collectors.joining(","))); + LOG.info( + "multi exception: {}; running operations sequentially " + + "(runSequentialOnMultiFailure=true); {}", + ke.toString(), ops.stream().map(o -> o.toString()).collect(Collectors.joining(","))); processSequentially(zkw, ops); break; } @@ -1508,8 +1364,8 @@ private static void processSequentially(ZKWatcher zkw, List ops) } else if (op instanceof SetData) { setData(zkw, (SetData) op); } else { - throw new UnsupportedOperationException("Unexpected ZKUtilOp type: " - + op.getClass().getName()); + throw new UnsupportedOperationException( + "Unexpected ZKUtilOp type: " + op.getClass().getName()); } } } @@ -1518,24 +1374,23 @@ private static void processSequentially(ZKWatcher zkw, List ops) // ZooKeeper cluster information // - private static void logRetrievedMsg(final ZKWatcher zkw, - final String znode, final byte [] data, final boolean watcherSet) { + private static void logRetrievedMsg(final ZKWatcher zkw, final String znode, final byte[] data, + final boolean watcherSet) { if (!LOG.isTraceEnabled()) { return; } - LOG.trace(zkw.prefix("Retrieved " + ((data == null)? 0: data.length) + - " byte(s) of data from znode " + znode + - (watcherSet? " and set watcher; ": "; data=") + - (data == null? "null": data.length == 0? "empty": ( - zkw.getZNodePaths().isMetaZNodePath(znode)? - getServerNameOrEmptyString(data): - znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode)? - getServerNameOrEmptyString(data): - StringUtils.abbreviate(Bytes.toStringBinary(data), 32))))); + LOG.trace(zkw.prefix("Retrieved " + ((data == null) ? 0 : data.length) + + " byte(s) of data from znode " + znode + (watcherSet ? " and set watcher; " : "; data=") + + (data == null ? "null" + : data.length == 0 ? "empty" + : (zkw.getZNodePaths().isMetaZNodePath(znode) ? getServerNameOrEmptyString(data) + : znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode) + ? getServerNameOrEmptyString(data) + : StringUtils.abbreviate(Bytes.toStringBinary(data), 32))))); } - private static String getServerNameOrEmptyString(final byte [] data) { + private static String getServerNameOrEmptyString(final byte[] data) { try { return ProtobufUtil.parseServerNameFrom(data).toString(); } catch (DeserializationException e) { @@ -1549,11 +1404,11 @@ private static String getServerNameOrEmptyString(final byte [] data) { */ public static void waitForBaseZNode(Configuration conf) throws IOException { LOG.info("Waiting until the base znode is available"); - String parentZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + String parentZNode = + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); ZooKeeper zk = new ZooKeeper(ZKConfig.getZKQuorumServersString(conf), - conf.getInt(HConstants.ZK_SESSION_TIMEOUT, - HConstants.DEFAULT_ZK_SESSION_TIMEOUT), EmptyWatcher.instance); + conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT), + EmptyWatcher.instance); final int maxTimeMs = 10000; final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS; @@ -1586,8 +1441,8 @@ public static void waitForBaseZNode(Configuration conf) throws IOException { } /** - * Convert a {@link DeserializationException} to a more palatable {@link KeeperException}. - * Used when can't let a {@link DeserializationException} out w/o changing public API. + * Convert a {@link DeserializationException} to a more palatable {@link KeeperException}. Used + * when can't let a {@link DeserializationException} out w/o changing public API. * @param e Exception to convert * @return Converted exception */ @@ -1621,8 +1476,7 @@ public static void logZKTree(ZKWatcher zkw, String root) { * @see #logZKTree(ZKWatcher, String) * @throws KeeperException if an unexpected exception occurs */ - private static void logZKTree(ZKWatcher zkw, String root, String prefix) - throws KeeperException { + private static void logZKTree(ZKWatcher zkw, String root, String prefix) throws KeeperException { List children = ZKUtil.listChildrenNoWatch(zkw, root); if (children == null) { diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java index fb086c193804..7df70647f840 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,24 +48,25 @@ import org.apache.zookeeper.data.Stat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * Acts as the single ZooKeeper Watcher. One instance of this is instantiated - * for each Master, RegionServer, and client process. - * - *

          This is the only class that implements {@link Watcher}. Other internal - * classes which need to be notified of ZooKeeper events must register with - * the local instance of this watcher via {@link #registerListener}. - * - *

          This class also holds and manages the connection to ZooKeeper. Code to - * deal with connection related events and exceptions are handled here. + * Acts as the single ZooKeeper Watcher. One instance of this is instantiated for each Master, + * RegionServer, and client process. + *

          + * This is the only class that implements {@link Watcher}. Other internal classes which need to be + * notified of ZooKeeper events must register with the local instance of this watcher via + * {@link #registerListener}. + *

          + * This class also holds and manages the connection to ZooKeeper. Code to deal with connection + * related events and exceptions are handled here. */ @InterfaceAudience.Private public class ZKWatcher implements Watcher, Abortable, Closeable { private static final Logger LOG = LoggerFactory.getLogger(ZKWatcher.class); - // Identifier for this watcher (for logging only). It is made of the prefix + // Identifier for this watcher (for logging only). It is made of the prefix // passed on construction and the zookeeper sessionid. private final String prefix; private String identifier; @@ -98,7 +98,7 @@ public class ZKWatcher implements Watcher, Abortable, Closeable { // requests using a single while loop and hence there is no performance degradation. private final ExecutorService zkEventProcessor = Executors.newSingleThreadExecutor( new ThreadFactoryBuilder().setNameFormat("zk-event-processor-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); private final Configuration conf; @@ -109,13 +109,13 @@ public class ZKWatcher implements Watcher, Abortable, Closeable { /** * Instantiate a ZooKeeper connection and watcher. - * @param identifier string that is passed to RecoverableZookeeper to be used as - * identifier for this instance. Use null for default. + * @param identifier string that is passed to RecoverableZookeeper to be used as identifier for + * this instance. Use null for default. * @throws IOException if the connection to ZooKeeper fails * @throws ZooKeeperConnectionException if the client can't connect to ZooKeeper */ - public ZKWatcher(Configuration conf, String identifier, - Abortable abortable) throws ZooKeeperConnectionException, IOException { + public ZKWatcher(Configuration conf, String identifier, Abortable abortable) + throws ZooKeeperConnectionException, IOException { this(conf, identifier, abortable, false); } @@ -130,9 +130,8 @@ public ZKWatcher(Configuration conf, String identifier, * @throws IOException if the connection to ZooKeeper fails * @throws ZooKeeperConnectionException if the client can't connect to ZooKeeper */ - public ZKWatcher(Configuration conf, String identifier, - Abortable abortable, boolean canCreateBaseZNode) - throws IOException, ZooKeeperConnectionException { + public ZKWatcher(Configuration conf, String identifier, Abortable abortable, + boolean canCreateBaseZNode) throws IOException, ZooKeeperConnectionException { this(conf, identifier, abortable, canCreateBaseZNode, false); } @@ -178,7 +177,7 @@ public ZKWatcher(Configuration conf, String identifier, Abortable abortable, this.znodePaths = new ZNodePaths(conf); PendingWatcher pendingWatcher = new PendingWatcher(); this.recoverableZooKeeper = - RecoverableZooKeeper.connect(conf, quorum, pendingWatcher, identifier); + RecoverableZooKeeper.connect(conf, quorum, pendingWatcher, identifier); pendingWatcher.prepare(this); if (canCreateBaseZNode) { try { @@ -194,7 +193,7 @@ public ZKWatcher(Configuration conf, String identifier, Abortable abortable, } } this.zkSyncTimeout = conf.getLong(HConstants.ZK_SYNC_BLOCKING_TIMEOUT_MS, - HConstants.ZK_SYNC_BLOCKING_TIMEOUT_DEFAULT_MS); + HConstants.ZK_SYNC_BLOCKING_TIMEOUT_DEFAULT_MS); } public List createACL(String node) { @@ -222,14 +221,15 @@ public List createACL(String node, boolean isSecureZooKeeper) { // TODO: Set node ACL for groups when ZK supports this feature groups.add(user); } else { - if(!user.equals(hbaseUser)) { + if (!user.equals(hbaseUser)) { acls.add(new ACL(Perms.ALL, new Id("sasl", user))); } } } if (!groups.isEmpty()) { - LOG.warn("Znode ACL setting for group {} is skipped, ZooKeeper doesn't support this " + - "feature presently.", groups); + LOG.warn("Znode ACL setting for group {} is skipped, ZooKeeper doesn't support this " + + "feature presently.", + groups); } } // Certain znodes are accessed directly by the client, @@ -263,10 +263,9 @@ private void createBaseZNodes() throws ZooKeeperConnectionException { } /** - * On master start, we check the znode ACLs under the root directory and set the ACLs properly - * if needed. If the cluster goes from an unsecure setup to a secure setup, this step is needed - * so that the existing znodes created with open permissions are now changed with restrictive - * perms. + * On master start, we check the znode ACLs under the root directory and set the ACLs properly if + * needed. If the cluster goes from an unsecure setup to a secure setup, this step is needed so + * that the existing znodes created with open permissions are now changed with restrictive perms. */ public void checkAndSetZNodeAcls() { if (!ZKAuthentication.isSecureZooKeeper(getConfiguration())) { @@ -283,18 +282,18 @@ public void checkAndSetZNodeAcls() { LOG.info("setting znode ACLs"); setZnodeAclsRecursive(znodePaths.baseZNode); } - } catch(KeeperException.NoNodeException nne) { + } catch (KeeperException.NoNodeException nne) { return; - } catch(InterruptedException ie) { + } catch (InterruptedException ie) { interruptedExceptionNoThrow(ie, false); - } catch (IOException|KeeperException e) { + } catch (IOException | KeeperException e) { LOG.warn("Received exception while checking and setting zookeeper ACLs", e); } } /** - * Set the znode perms recursively. This will do post-order recursion, so that baseZnode ACLs - * will be set last in case the master fails in between. + * Set the znode perms recursively. This will do post-order recursion, so that baseZnode ACLs will + * be set last in case the master fails in between. * @param znode the ZNode to set the permissions for */ private void setZnodeAclsRecursive(String znode) throws KeeperException, InterruptedException { @@ -438,8 +437,8 @@ public String toString() { /** * Adds this instance's identifier as a prefix to the passed str * @param str String to amend. - * @return A new string with this instance's identifier as prefix: e.g. - * if passed 'hello world', the returned string could be + * @return A new string with this instance's identifier as prefix: e.g. if passed 'hello world', + * the returned string could be */ public String prefix(final String str) { return this.toString() + " " + str; @@ -492,8 +491,8 @@ public void registerListener(ZKListener listener) { } /** - * Register the specified listener to receive ZooKeeper events and add it as - * the first in the list of current listeners. + * Register the specified listener to receive ZooKeeper events and add it as the first in the list + * of current listeners. * @param listener the listener to register */ public void registerListenerFirst(ZKListener listener) { @@ -555,7 +554,7 @@ public ZNodePaths getZNodePaths() { } private void processEvent(WatchedEvent event) { - switch(event.getType()) { + switch (event.getType()) { // If event type is NONE, this is a connection status change case None: { connectionEvent(event); @@ -564,50 +563,47 @@ private void processEvent(WatchedEvent event) { // Otherwise pass along to the listeners case NodeCreated: { - for(ZKListener listener : listeners) { + for (ZKListener listener : listeners) { listener.nodeCreated(event.getPath()); } break; } case NodeDeleted: { - for(ZKListener listener : listeners) { + for (ZKListener listener : listeners) { listener.nodeDeleted(event.getPath()); } break; } case NodeDataChanged: { - for(ZKListener listener : listeners) { + for (ZKListener listener : listeners) { listener.nodeDataChanged(event.getPath()); } break; } case NodeChildrenChanged: { - for(ZKListener listener : listeners) { + for (ZKListener listener : listeners) { listener.nodeChildrenChanged(event.getPath()); } break; } default: - LOG.error("Invalid event of type {} received for path {}. Ignoring.", - event.getState(), event.getPath()); + LOG.error("Invalid event of type {} received for path {}. Ignoring.", event.getState(), + event.getPath()); } } /** * Method called from ZooKeeper for events and connection status. *

          - * Valid events are passed along to listeners. Connection status changes - * are dealt with locally. + * Valid events are passed along to listeners. Connection status changes are dealt with locally. */ @Override public void process(WatchedEvent event) { - LOG.debug(prefix("Received ZooKeeper Event, " + - "type=" + event.getType() + ", " + - "state=" + event.getState() + ", " + - "path=" + event.getPath())); + LOG.debug(prefix("Received ZooKeeper Event, " + "type=" + event.getType() + ", " + "state=" + + event.getState() + ", " + "path=" + event.getPath())); zkEventProcessor.submit(() -> processEvent(event)); } @@ -616,19 +612,19 @@ public void process(WatchedEvent event) { /** * Called when there is a connection-related event via the Watcher callback. *

          - * If Disconnected or Expired, this should shutdown the cluster. But, since - * we send a KeeperException.SessionExpiredException along with the abort - * call, it's possible for the Abortable to catch it and try to create a new - * session with ZooKeeper. This is what the client does in HCM. + * If Disconnected or Expired, this should shutdown the cluster. But, since we send a + * KeeperException.SessionExpiredException along with the abort call, it's possible for the + * Abortable to catch it and try to create a new session with ZooKeeper. This is what the client + * does in HCM. *

          * @param event the connection-related event */ private void connectionEvent(WatchedEvent event) { - switch(event.getState()) { + switch (event.getState()) { case SyncConnected: - this.identifier = this.prefix + "-0x" + - Long.toHexString(this.recoverableZooKeeper.getSessionId()); - // Update our identifier. Otherwise ignore. + this.identifier = + this.prefix + "-0x" + Long.toHexString(this.recoverableZooKeeper.getSessionId()); + // Update our identifier. Otherwise ignore. LOG.debug("{} connected", this.identifier); break; @@ -642,8 +638,7 @@ private void connectionEvent(WatchedEvent event) { break; case Expired: - String msg = prefix(this.identifier + " received expired from " + - "ZooKeeper, aborting"); + String msg = prefix(this.identifier + " received expired from " + "ZooKeeper, aborting"); // TODO: One thought is to add call to ZKListener so say, // ZKNodeTracker can zero out its data values. if (this.abortable != null) { @@ -665,14 +660,12 @@ private void connectionEvent(WatchedEvent event) { * Forces a synchronization of this ZooKeeper client connection within a timeout. Enforcing a * timeout lets the callers fail-fast rather than wait forever for the sync to finish. *

          - * Executing this method before running other methods will ensure that the - * subsequent operations are up-to-date and consistent as of the time that - * the sync is complete. + * Executing this method before running other methods will ensure that the subsequent operations + * are up-to-date and consistent as of the time that the sync is complete. *

          - * This is used for compareAndSwap type operations where we need to read the - * data of an existing node and delete or transition that node, utilizing the - * previously read version and data. We want to ensure that the version read - * is up-to-date from when we begin the operation. + * This is used for compareAndSwap type operations where we need to read the data of an existing + * node and delete or transition that node, utilizing the previously read version and data. We + * want to ensure that the version read is up-to-date from when we begin the operation. *

          */ public void syncOrTimeout(String path) throws KeeperException { @@ -682,7 +675,8 @@ public void syncOrTimeout(String path) throws KeeperException { try { if (!latch.await(zkSyncTimeout, TimeUnit.MILLISECONDS)) { LOG.warn("sync() operation to ZK timed out. Configured timeout: {}ms. This usually points " - + "to a ZK side issue. Check ZK server logs and metrics.", zkSyncTimeout); + + "to a ZK side issue. Check ZK server logs and metrics.", + zkSyncTimeout); throw new KeeperException.RequestTimeoutException(); } } catch (InterruptedException e) { @@ -740,7 +734,6 @@ public void interruptedExceptionNoThrow(InterruptedException ie, boolean throwLa /** * Close the connection to ZooKeeper. - * */ @Override public void close() { @@ -768,6 +761,6 @@ public void abort(String why, Throwable e) { @Override public boolean isAborted() { - return this.abortable == null? this.aborted: this.abortable.isAborted(); + return this.abortable == null ? this.aborted : this.abortable.isAborted(); } } diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/HBaseZKTestingUtil.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/HBaseZKTestingUtil.java index e1223cef00ea..0cd0bb9a51dd 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/HBaseZKTestingUtil.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/HBaseZKTestingUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -99,7 +99,7 @@ public MiniZooKeeperCluster startMiniZKCluster() throws Exception { * @return zk cluster started. */ public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum, int... clientPortList) - throws Exception { + throws Exception { setupClusterTestDir(); return startMiniZKCluster(clusterTestDir, zooKeeperServerNum, clientPortList); } @@ -109,7 +109,7 @@ public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum, int... cl * port mentioned is used as the default port for ZooKeeper. */ private MiniZooKeeperCluster startMiniZKCluster(File dir, int zooKeeperServerNum, - int[] clientPortList) throws Exception { + int[] clientPortList) throws Exception { if (this.zkCluster != null) { throw new IOException("Cluster already running at " + dir); } diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java index 97e1279af130..447e9a3f86a1 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,11 +50,10 @@ public class TestHQuorumPeer { private static int PORT_NO = 21818; private Path dataDir; - - @Before public void setup() throws IOException { + @Before + public void setup() throws IOException { // Set it to a non-standard port. - TEST_UTIL.getConfiguration().setInt(HConstants.ZOOKEEPER_CLIENT_PORT, - PORT_NO); + TEST_UTIL.getConfiguration().setInt(HConstants.ZOOKEEPER_CLIENT_PORT, PORT_NO); this.dataDir = TEST_UTIL.getDataTestDir(this.getClass().getName()); FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); if (fs.exists(this.dataDir)) { @@ -67,13 +66,13 @@ public class TestHQuorumPeer { } } - @Test public void testMakeZKProps() { + @Test + public void testMakeZKProps() { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); conf.set(HConstants.ZOOKEEPER_DATA_DIR, this.dataDir.toString()); Properties properties = ZKConfig.makeZKProps(conf); - assertEquals(dataDir.toString(), (String)properties.get("dataDir")); - assertEquals(Integer.valueOf(PORT_NO), - Integer.valueOf(properties.getProperty("clientPort"))); + assertEquals(dataDir.toString(), (String) properties.get("dataDir")); + assertEquals(Integer.valueOf(PORT_NO), Integer.valueOf(properties.getProperty("clientPort"))); assertEquals("127.0.0.1:2888:3888", properties.get("server.0")); assertNull(properties.get("server.1")); @@ -81,8 +80,7 @@ public class TestHQuorumPeer { conf.set(HConstants.ZOOKEEPER_QUORUM, "a.foo.bar,b.foo.bar,c.foo.bar"); properties = ZKConfig.makeZKProps(conf); assertEquals(dataDir.toString(), properties.get("dataDir")); - assertEquals(Integer.valueOf(PORT_NO), - Integer.valueOf(properties.getProperty("clientPort"))); + assertEquals(Integer.valueOf(PORT_NO), Integer.valueOf(properties.getProperty("clientPort"))); assertEquals("a.foo.bar:2888:3888", properties.get("server.0")); assertEquals("b.foo.bar:2888:3888", properties.get("server.1")); assertEquals("c.foo.bar:2888:3888", properties.get("server.2")); @@ -90,7 +88,8 @@ public class TestHQuorumPeer { conf.set(HConstants.ZOOKEEPER_QUORUM, oldValue); } - @Test public void testShouldAssignDefaultZookeeperClientPort() { + @Test + public void testShouldAssignDefaultZookeeperClientPort() { Configuration config = HBaseConfiguration.create(); config.clear(); Properties p = ZKConfig.makeZKProps(config); diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java index caa0beb116bd..0d6ca65cfa26 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMasterAddressTracker.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMasterAddressTracker.java index deaa4833f879..86b70ee4f4f8 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMasterAddressTracker.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMasterAddressTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public class TestMasterAddressTracker { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterAddressTracker.class); + HBaseClassTestRule.forClass(TestMasterAddressTracker.class); private static final Logger LOG = LoggerFactory.getLogger(TestMasterAddressTracker.class); @@ -78,7 +78,7 @@ public static void tearDownAfterClass() throws Exception { @Test public void testDeleteIfEquals() throws Exception { final ServerName sn = - ServerName.valueOf("localhost", 1234, EnvironmentEdgeManager.currentTime()); + ServerName.valueOf("localhost", 1234, EnvironmentEdgeManager.currentTime()); final MasterAddressTracker addressTracker = setupMasterTracker(sn, 1772); try { assertFalse("shouldn't have deleted wrong master server.", @@ -95,7 +95,7 @@ public void testDeleteIfEquals() throws Exception { * @param infoPort if there is an active master, set its info port. */ private MasterAddressTracker setupMasterTracker(final ServerName sn, final int infoPort) - throws Exception { + throws Exception { zk = new ZKWatcher(TEST_UTIL.getConfiguration(), name.getMethodName(), null); ZKUtil.createAndFailSilent(zk, zk.getZNodePaths().baseZNode); ZKUtil.createAndFailSilent(zk, zk.getZNodePaths().backupMasterAddressesZNode); @@ -108,7 +108,7 @@ private MasterAddressTracker setupMasterTracker(final ServerName sn, final int i // Use a listener to capture when the node is actually created NodeCreationListener listener = - new NodeCreationListener(zk, zk.getZNodePaths().masterAddressZNode); + new NodeCreationListener(zk, zk.getZNodePaths().masterAddressZNode); zk.registerListener(listener); if (sn != null) { @@ -133,7 +133,7 @@ public void testMasterAddressTrackerFromZK() throws Exception { // Create the master node with a dummy address final int infoPort = 1235; final ServerName sn = - ServerName.valueOf("localhost", 1234, EnvironmentEdgeManager.currentTime()); + ServerName.valueOf("localhost", 1234, EnvironmentEdgeManager.currentTime()); final MasterAddressTracker addressTracker = setupMasterTracker(sn, infoPort); try { assertTrue(addressTracker.hasMaster()); @@ -154,7 +154,7 @@ public void testParsingNull() throws Exception { @Test public void testNoBackups() throws Exception { final ServerName sn = - ServerName.valueOf("localhost", 1234, EnvironmentEdgeManager.currentTime()); + ServerName.valueOf("localhost", 1234, EnvironmentEdgeManager.currentTime()); final MasterAddressTracker addressTracker = setupMasterTracker(sn, 1772); try { assertEquals("Should receive 0 for backup not found.", 0, @@ -177,7 +177,7 @@ public void testNoMaster() throws Exception { @Test public void testBackupMasters() throws Exception { final ServerName sn = - ServerName.valueOf("localhost", 5678, EnvironmentEdgeManager.currentTime()); + ServerName.valueOf("localhost", 5678, EnvironmentEdgeManager.currentTime()); final MasterAddressTracker addressTracker = setupMasterTracker(sn, 1111); assertTrue(addressTracker.hasMaster()); ServerName activeMaster = addressTracker.getMasterAddress(); @@ -187,10 +187,10 @@ public void testBackupMasters() throws Exception { assertEquals(0, backupMasters.size()); ServerName backupMaster1 = ServerName.valueOf("localhost", 2222, -1); ServerName backupMaster2 = ServerName.valueOf("localhost", 3333, -1); - String backupZNode1 = - ZNodePaths.joinZNode(zk.getZNodePaths().backupMasterAddressesZNode, backupMaster1.toString()); - String backupZNode2 = - ZNodePaths.joinZNode(zk.getZNodePaths().backupMasterAddressesZNode, backupMaster2.toString()); + String backupZNode1 = ZNodePaths.joinZNode(zk.getZNodePaths().backupMasterAddressesZNode, + backupMaster1.toString()); + String backupZNode2 = ZNodePaths.joinZNode(zk.getZNodePaths().backupMasterAddressesZNode, + backupMaster2.toString()); // Add backup masters MasterAddressTracker.setMasterAddress(zk, backupZNode1, backupMaster1, 2222); MasterAddressTracker.setMasterAddress(zk, backupZNode2, backupMaster2, 3333); diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java index 686bca31bf72..5cc8c67e8877 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestReadOnlyZKClient.java @@ -187,8 +187,8 @@ public void testNotCloseZkWhenPending() throws Exception { doAnswer(i -> { exchanger.exchange(i.getArgument(2)); return null; - }).when(mockedZK).getData(anyString(), anyBoolean(), - any(AsyncCallback.DataCallback.class), any()); + }).when(mockedZK).getData(anyString(), anyBoolean(), any(AsyncCallback.DataCallback.class), + any()); doAnswer(i -> null).when(mockedZK).close(); when(mockedZK.getState()).thenReturn(ZooKeeper.States.CONNECTED); RO_ZK.zookeeper = mockedZK; diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java index 700781c849fb..82a4f6c1a222 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,8 +74,7 @@ public static void tearDownAfterClass() throws Exception { public void testSetDataVersionMismatchInLoop() throws Exception { String znode = "/hbase/splitWAL/9af7cfc9b15910a0b3d714bf40a3248f"; Configuration conf = TEST_UTIL.getConfiguration(); - ZKWatcher zkw = new ZKWatcher(conf, "testSetDataVersionMismatchInLoop", - abortable, true); + ZKWatcher zkw = new ZKWatcher(conf, "testSetDataVersionMismatchInLoop", abortable, true); String ensemble = ZKConfig.getZKQuorumServersString(conf); RecoverableZooKeeper rzk = RecoverableZooKeeper.connect(conf, ensemble, zkw); rzk.create(znode, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); @@ -95,8 +94,7 @@ public void testSetDataVersionMismatchInLoop() throws Exception { static class ZookeeperStub extends ZooKeeper { private int throwExceptionInNumOperations; - ZookeeperStub(String connectString, int sessionTimeout, Watcher watcher) - throws IOException { + ZookeeperStub(String connectString, int sessionTimeout, Watcher watcher) throws IOException { super(connectString, sessionTimeout, watcher); } @@ -115,8 +113,8 @@ private void checkThrowKeeperException() throws KeeperException { } @Override - public Stat setData(String path, byte[] data, int version) throws KeeperException, - InterruptedException { + public Stat setData(String path, byte[] data, int version) + throws KeeperException, InterruptedException { Stat stat = super.setData(path, data, version); checkThrowKeeperException(); return stat; diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKLeaderManager.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKLeaderManager.java index 08f12481d016..bdd4c3f79152 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKLeaderManager.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKLeaderManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,8 +51,7 @@ public class TestZKLeaderManager { private static final Logger LOG = LoggerFactory.getLogger(TestZKLeaderManager.class); - private static final String LEADER_ZNODE = - "/test/" + TestZKLeaderManager.class.getSimpleName(); + private static final String LEADER_ZNODE = "/test/" + TestZKLeaderManager.class.getSimpleName(); private static class MockAbortable implements Abortable { private boolean aborted; @@ -60,7 +59,7 @@ private static class MockAbortable implements Abortable { @Override public void abort(String why, Throwable e) { aborted = true; - LOG.error(HBaseMarkers.FATAL, "Aborting during test: "+why, e); + LOG.error(HBaseMarkers.FATAL, "Aborting during test: " + why, e); fail("Aborted during test: " + why); } @@ -82,8 +81,7 @@ private static class MockLeader extends Thread implements Stoppable { setName("TestZKLeaderManager-leader-" + index); this.index = index; this.watcher = watcher; - this.zkLeader = new ZKLeaderManager(watcher, LEADER_ZNODE, - Bytes.toBytes(index), this); + this.zkLeader = new ZKLeaderManager(watcher, LEADER_ZNODE, Bytes.toBytes(index), this); } public boolean isMaster() { @@ -108,7 +106,8 @@ public void run() { while (master.get() && !stopped) { try { Thread.sleep(10); - } catch (InterruptedException ignored) {} + } catch (InterruptedException ignored) { + } } } } @@ -146,7 +145,7 @@ public static void setupBeforeClass() throws Exception { int count = 5; CANDIDATES = new MockLeader[count]; for (int i = 0; i < count; i++) { - ZKWatcher watcher = newZK(conf, "server"+i, abortable); + ZKWatcher watcher = newZK(conf, "server" + i, abortable); CANDIDATES[i] = new MockLeader(watcher, i); CANDIDATES[i].start(); } @@ -162,15 +161,14 @@ public void testLeaderSelection() throws Exception { MockLeader currentLeader = getCurrentLeader(); // one leader should have been found assertNotNull("Leader should exist", currentLeader); - LOG.debug("Current leader index is "+currentLeader.getIndex()); + LOG.debug("Current leader index is " + currentLeader.getIndex()); byte[] znodeData = ZKUtil.getData(currentLeader.getWatcher(), LEADER_ZNODE); assertNotNull("Leader znode should contain leader index", znodeData); assertTrue("Leader znode should not be empty", znodeData.length > 0); int storedIndex = Bytes.toInt(znodeData); - LOG.debug("Stored leader index in ZK is "+storedIndex); - assertEquals("Leader znode should match leader index", - currentLeader.getIndex(), storedIndex); + LOG.debug("Stored leader index in ZK is " + storedIndex); + assertEquals("Leader znode should match leader index", currentLeader.getIndex(), storedIndex); // force a leader transition currentLeader.abdicate(); @@ -179,15 +177,14 @@ public void testLeaderSelection() throws Exception { currentLeader = getCurrentLeader(); // one leader should have been found assertNotNull("New leader should exist after abdication", currentLeader); - LOG.debug("New leader index is "+currentLeader.getIndex()); + LOG.debug("New leader index is " + currentLeader.getIndex()); znodeData = ZKUtil.getData(currentLeader.getWatcher(), LEADER_ZNODE); assertNotNull("Leader znode should contain leader index", znodeData); assertTrue("Leader znode should not be empty", znodeData.length > 0); storedIndex = Bytes.toInt(znodeData); - LOG.debug("Stored leader index in ZK is "+storedIndex); - assertEquals("Leader znode should match leader index", - currentLeader.getIndex(), storedIndex); + LOG.debug("Stored leader index in ZK is " + storedIndex); + assertEquals("Leader znode should match leader index", currentLeader.getIndex(), storedIndex); // force another transition by stopping the current currentLeader.stop("Stopping for test"); @@ -196,15 +193,14 @@ public void testLeaderSelection() throws Exception { currentLeader = getCurrentLeader(); // one leader should have been found assertNotNull("New leader should exist after stop", currentLeader); - LOG.debug("New leader index is "+currentLeader.getIndex()); + LOG.debug("New leader index is " + currentLeader.getIndex()); znodeData = ZKUtil.getData(currentLeader.getWatcher(), LEADER_ZNODE); assertNotNull("Leader znode should contain leader index", znodeData); assertTrue("Leader znode should not be empty", znodeData.length > 0); storedIndex = Bytes.toInt(znodeData); - LOG.debug("Stored leader index in ZK is "+storedIndex); - assertEquals("Leader znode should match leader index", - currentLeader.getIndex(), storedIndex); + LOG.debug("Stored leader index in ZK is " + storedIndex); + assertEquals("Leader znode should match leader index", currentLeader.getIndex(), storedIndex); // with a second stop we can guarantee that a previous leader has resumed leading currentLeader.stop("Stopping for test"); @@ -223,7 +219,8 @@ private MockLeader getCurrentLeader() { if (CANDIDATES[j].isMaster()) { // should only be one leader if (currentLeader != null) { - fail("Both candidate "+currentLeader.getIndex()+" and "+j+" claim to be leader!"); + fail( + "Both candidate " + currentLeader.getIndex() + " and " + j + " claim to be leader!"); } currentLeader = CANDIDATES[j]; } diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMainServer.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMainServer.java index ad69c85cf73e..a99206e1f3a6 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMainServer.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMainServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.security.Permission; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -38,7 +39,7 @@ public class TestZKMainServer { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestZKMainServer.class); - // ZKMS calls System.exit. Catch the call and prevent exit using trick described up in + // ZKMS calls System.exit. Catch the call and prevent exit using trick described up in // http://stackoverflow.com/questions/309396/java-how-to-test-methods-that-call-system-exit protected static class ExitException extends SecurityException { private static final long serialVersionUID = 1L; @@ -83,8 +84,8 @@ public void testCommandLineWorks() throws Exception { ZKUtil.checkExists(zkw, znode); boolean exception = false; try { - ZKMainServer.main(new String [] {"-server", htu.getZkCluster().getAddress().toString(), - "delete", znode}); + ZKMainServer.main( + new String[] { "-server", htu.getZkCluster().getAddress().toString(), "delete", znode }); } catch (ExitException ee) { // ZKMS calls System.exit which should trigger this exception. exception = true; @@ -121,28 +122,28 @@ public void testHostPortParse() { assertEquals(ensemble, "example1.com:5678,example2.com:9012,example3.com:" + port); // multiple servers(IPv6) with its own port - c.set("hbase.zookeeper.quorum", "[2001:db8:1::242:ac11:2]:2181," + - "[2001:db8:1::242:ac11:3]:5678"); + c.set("hbase.zookeeper.quorum", + "[2001:db8:1::242:ac11:2]:2181," + "[2001:db8:1::242:ac11:3]:5678"); ensemble = parser.parse(c); - assertEquals("[2001:db8:1::242:ac11:2]:2181," + - "[2001:db8:1::242:ac11:3]:5678", ensemble); + assertEquals("[2001:db8:1::242:ac11:2]:2181," + "[2001:db8:1::242:ac11:3]:5678", ensemble); // some servers(IPv6) without its own port, which will be assigned the default client port - c.set("hbase.zookeeper.quorum", "[1001:db8:1::242:ac11:8], [2001:db8:1::242:df23:2]:9876," + - "[2001:db8:1::242:ac11:3]:5678"); + c.set("hbase.zookeeper.quorum", + "[1001:db8:1::242:ac11:8], [2001:db8:1::242:df23:2]:9876," + "[2001:db8:1::242:ac11:3]:5678"); ensemble = parser.parse(c); - assertEquals("[1001:db8:1::242:ac11:8]:1234, [2001:db8:1::242:df23:2]:9876," + - "[2001:db8:1::242:ac11:3]:5678", ensemble); + assertEquals("[1001:db8:1::242:ac11:8]:1234, [2001:db8:1::242:df23:2]:9876," + + "[2001:db8:1::242:ac11:3]:5678", + ensemble); - //a bad case + // a bad case try { // some servers(IPv6) with an invaild Ipv6 address in it - c.set("hbase.zookeeper.quorum", "[1001:db8:1::242:ac11:8], [2001:db8:1::242:df23:2]:9876," + - "[1001:db8:1::242:ac11:8:89:67]:5678"); + c.set("hbase.zookeeper.quorum", "[1001:db8:1::242:ac11:8], [2001:db8:1::242:df23:2]:9876," + + "[1001:db8:1::242:ac11:8:89:67]:5678"); parser.parse(c); Assert.fail("IPv6 address should be 8 groups."); } catch (IllegalArgumentException e) { - //expected + // expected } } } diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java index 83e11ae16aa5..9ac7d68cfe1e 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.Collections; import java.util.LinkedList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -77,8 +76,7 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); Abortable abortable = new ZKMultiAbortable(); - zkw = new ZKWatcher(conf, - "TestZKMulti", abortable, true); + zkw = new ZKWatcher(conf, "TestZKMulti", abortable, true); } @AfterClass @@ -103,7 +101,7 @@ public void testSimpleMulti() throws Exception { // single setdata LinkedList singleSetData = new LinkedList<>(); - byte [] data = Bytes.toBytes("foobar"); + byte[] data = Bytes.toBytes("foobar"); singleSetData.add(ZKUtilOp.setData(path, data)); ZKUtil.multiOrSequential(zkw, singleSetData, false); assertTrue(Bytes.equals(ZKUtil.getData(zkw, path), data)); @@ -232,7 +230,7 @@ public void testMultiFailure() throws Exception { String pathV = ZNodePaths.joinZNode(zkw.getZNodePaths().baseZNode, "testMultiFailureV"); String pathW = ZNodePaths.joinZNode(zkw.getZNodePaths().baseZNode, "testMultiFailureW"); ops = new LinkedList<>(); - ops.add(ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathX))); // fail -- already exists + ops.add(ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathX))); // fail -- already exists ops.add(ZKUtilOp.setData(pathY, Bytes.toBytes(pathY))); // fail -- doesn't exist ops.add(ZKUtilOp.deleteNodeFailSilent(pathZ)); // fail -- doesn't exist ops.add(ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathV))); // pass @@ -291,8 +289,8 @@ public void testRunSequentialOnMultiFailure() throws Exception { ops.add(ZKUtilOp.setData(path1, Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); // pass ops.add(ZKUtilOp.deleteNodeFailSilent(path2)); // pass ops.add(ZKUtilOp.deleteNodeFailSilent(path3)); // fail -- node doesn't exist - ops.add(ZKUtilOp.createAndFailSilent(path4, - Bytes.add(Bytes.toBytes(path4), Bytes.toBytes(path4)))); // pass + ops.add( + ZKUtilOp.createAndFailSilent(path4, Bytes.add(Bytes.toBytes(path4), Bytes.toBytes(path4)))); // pass ZKUtil.multiOrSequential(zkw, ops, true); assertTrue(Bytes.equals(ZKUtil.getData(zkw, path1), Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); @@ -302,8 +300,8 @@ public void testRunSequentialOnMultiFailure() throws Exception { } /** - * Verifies that for the given root node, it should delete all the child nodes - * recursively using multi-update api. + * Verifies that for the given root node, it should delete all the child nodes recursively using + * multi-update api. */ @Test public void testdeleteChildrenRecursivelyMulti() throws Exception { @@ -312,10 +310,8 @@ public void testdeleteChildrenRecursivelyMulti() throws Exception { ZKUtil.deleteChildrenRecursivelyMultiOrSequential(zkw, true, parentZNode); - assertTrue("Wrongly deleted parent znode!", - ZKUtil.checkExists(zkw, parentZNode) > -1); - List children = zkw.getRecoverableZooKeeper().getChildren( - parentZNode, false); + assertTrue("Wrongly deleted parent znode!", ZKUtil.checkExists(zkw, parentZNode) > -1); + List children = zkw.getRecoverableZooKeeper().getChildren(parentZNode, false); assertEquals("Failed to delete child znodes!", 0, children.size()); } @@ -379,8 +375,8 @@ public void testBatchedDeletesOfWideZNodes() throws Exception { final int batchSize = 50; Configuration localConf = new Configuration(TEST_UTIL.getConfiguration()); localConf.setInt("zookeeper.multi.max.size", batchSize); - try (ZKWatcher customZkw = new ZKWatcher(localConf, - "TestZKMulti_Custom", new ZKMultiAbortable(), true)) { + try (ZKWatcher customZkw = + new ZKWatcher(localConf, "TestZKMulti_Custom", new ZKMultiAbortable(), true)) { // With a parent znode like this, we'll get batches of 2-3 elements final String parent1 = "/batchedDeletes1"; @@ -391,8 +387,8 @@ public void testBatchedDeletesOfWideZNodes() throws Exception { List ops = new ArrayList<>(); ops.add(Op.create(parent1, EMPTY_BYTES, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT)); for (int i = 0; i < batchSize * 2; i++) { - ops.add(Op.create( - parent1 + "/" + i, EMPTY_BYTES, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT)); + ops.add( + Op.create(parent1 + "/" + i, EMPTY_BYTES, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT)); } customZkw.getRecoverableZooKeeper().multi(ops); @@ -400,8 +396,8 @@ public void testBatchedDeletesOfWideZNodes() throws Exception { ops.clear(); ops.add(Op.create(parent2, EMPTY_BYTES, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT)); for (int i = 0; i < batchSize * 4; i++) { - ops.add(Op.create( - parent2 + "/" + i, EMPTY_BYTES, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT)); + ops.add( + Op.create(parent2 + "/" + i, EMPTY_BYTES, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT)); } customZkw.getRecoverableZooKeeper().multi(ops); @@ -418,46 +414,42 @@ public void testListPartitioning() { // Simple, single element case assertEquals(Collections.singletonList(Collections.singletonList(tenByteOp)), - ZKUtil.partitionOps(Collections.singletonList(tenByteOp), 15)); + ZKUtil.partitionOps(Collections.singletonList(tenByteOp), 15)); // Simple case where we exceed the limit, but must make the list assertEquals(Collections.singletonList(Collections.singletonList(tenByteOp)), - ZKUtil.partitionOps(Collections.singletonList(tenByteOp), 5)); + ZKUtil.partitionOps(Collections.singletonList(tenByteOp), 5)); // Each gets its own bucket assertEquals( - Arrays.asList(Collections.singletonList(tenByteOp), Collections.singletonList(tenByteOp), - Collections.singletonList(tenByteOp)), - ZKUtil.partitionOps(Arrays.asList(tenByteOp, tenByteOp, tenByteOp), 15)); + Arrays.asList(Collections.singletonList(tenByteOp), Collections.singletonList(tenByteOp), + Collections.singletonList(tenByteOp)), + ZKUtil.partitionOps(Arrays.asList(tenByteOp, tenByteOp, tenByteOp), 15)); // Test internal boundary assertEquals( - Arrays.asList(Arrays.asList(tenByteOp,tenByteOp), Collections.singletonList(tenByteOp)), - ZKUtil.partitionOps(Arrays.asList(tenByteOp, tenByteOp, tenByteOp), 20)); + Arrays.asList(Arrays.asList(tenByteOp, tenByteOp), Collections.singletonList(tenByteOp)), + ZKUtil.partitionOps(Arrays.asList(tenByteOp, tenByteOp, tenByteOp), 20)); // Plenty of space for one partition - assertEquals( - Collections.singletonList(Arrays.asList(tenByteOp, tenByteOp, tenByteOp)), - ZKUtil.partitionOps(Arrays.asList(tenByteOp, tenByteOp, tenByteOp), 50)); + assertEquals(Collections.singletonList(Arrays.asList(tenByteOp, tenByteOp, tenByteOp)), + ZKUtil.partitionOps(Arrays.asList(tenByteOp, tenByteOp, tenByteOp), 50)); } - private void createZNodeTree(String rootZNode) throws KeeperException, - InterruptedException { + private void createZNodeTree(String rootZNode) throws KeeperException, InterruptedException { List opList = new ArrayList<>(); - opList.add(Op.create(rootZNode, new byte[0], Ids.OPEN_ACL_UNSAFE, - CreateMode.PERSISTENT)); + opList.add(Op.create(rootZNode, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT)); int level = 0; String parentZNode = rootZNode; while (level < 10) { // define parent node parentZNode = parentZNode + "/" + level; - opList.add(Op.create(parentZNode, new byte[0], Ids.OPEN_ACL_UNSAFE, - CreateMode.PERSISTENT)); + opList.add(Op.create(parentZNode, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT)); int elements = 0; // add elements to the parent node while (elements < level) { - opList.add(Op.create(parentZNode + "/" + elements, new byte[0], - Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT)); + opList.add(Op.create(parentZNode + "/" + elements, new byte[0], Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT)); elements++; } level++; diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKNodeTracker.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKNodeTracker.java index 48c42f5b4c4a..97045bc03296 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKNodeTracker.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKNodeTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -93,15 +93,14 @@ public void testInterruptible() throws IOException, InterruptedException { @Test public void testNodeTracker() throws Exception { Abortable abortable = new StubAbortable(); - ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(), - "testNodeTracker", abortable); + ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(), "testNodeTracker", abortable); ZKUtil.createAndFailSilent(zk, zk.getZNodePaths().baseZNode); final String node = ZNodePaths.joinZNode(zk.getZNodePaths().baseZNode, Long.toString(ThreadLocalRandom.current().nextLong())); - final byte [] dataOne = Bytes.toBytes("dataOne"); - final byte [] dataTwo = Bytes.toBytes("dataTwo"); + final byte[] dataOne = Bytes.toBytes("dataOne"); + final byte[] dataTwo = Bytes.toBytes("dataTwo"); // Start a ZKNT with no node currently available TestTracker localTracker = new TestTracker(zk, node, abortable); @@ -130,9 +129,8 @@ public void testNodeTracker() throws Exception { // Create a completely separate zk connection for test triggers and avoid // any weird watcher interactions from the test - final ZooKeeper zkconn = ZooKeeperHelper. - getConnectedZooKeeper(ZKConfig.getZKQuorumServersString(TEST_UTIL.getConfiguration()), - 60000); + final ZooKeeper zkconn = ZooKeeperHelper.getConnectedZooKeeper( + ZKConfig.getZKQuorumServersString(TEST_UTIL.getConfiguration()), 60000); // Add the node with data one zkconn.create(node, dataOne, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); @@ -259,7 +257,7 @@ public static class TestingZKListener extends ZKListener { @Override public void nodeDeleted(String path) { - if(path.equals(node)) { + if (path.equals(node)) { LOG.debug("nodeDeleted(" + path + ")"); deletedLock.release(); } @@ -267,7 +265,7 @@ public void nodeDeleted(String path) { @Override public void nodeCreated(String path) { - if(path.equals(node)) { + if (path.equals(node)) { LOG.debug("nodeCreated(" + path + ")"); createdLock.release(); } @@ -275,7 +273,7 @@ public void nodeCreated(String path) { @Override public void nodeDataChanged(String path) { - if(path.equals(node)) { + if (path.equals(node)) { LOG.debug("nodeDataChanged(" + path + ")"); changedLock.release(); } @@ -296,7 +294,8 @@ void waitForDataChange() throws InterruptedException { public static class StubAbortable implements Abortable { @Override - public void abort(final String msg, final Throwable t) {} + public void abort(final String msg, final Throwable t) { + } @Override public boolean isAborted() { @@ -306,16 +305,15 @@ public boolean isAborted() { @Test public void testCleanZNode() throws Exception { - ZKWatcher zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), - "testNodeTracker", new TestZKNodeTracker.StubAbortable()); + ZKWatcher zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), "testNodeTracker", + new TestZKNodeTracker.StubAbortable()); final ServerName sn = ServerName.valueOf("127.0.0.1:52", 45L); - ZKUtil.createAndFailSilent(zkw, - TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT)); + ZKUtil.createAndFailSilent(zkw, TEST_UTIL.getConfiguration() + .get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT)); - final String nodeName = zkw.getZNodePaths().masterAddressZNode; + final String nodeName = zkw.getZNodePaths().masterAddressZNode; // Check that we manage the case when there is no data ZKUtil.createAndFailSilent(zkw, nodeName); @@ -328,7 +326,7 @@ public void testCleanZNode() throws Exception { assertNotNull(ZKUtil.getData(zkw, nodeName)); // Check that we delete when we're supposed to - ZKUtil.setData(zkw, nodeName,MasterAddressTracker.toByteArray(sn, 0)); + ZKUtil.setData(zkw, nodeName, MasterAddressTracker.toByteArray(sn, 0)); MasterAddressTracker.deleteIfEquals(zkw, sn.toString()); assertNull(ZKUtil.getData(zkw, nodeName)); diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java index 21b4b64f61b5..5b9d64c41db9 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,8 +54,7 @@ @Category({ ZKTests.class, MediumTests.class }) public class TestZKUtil { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZKUtil.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestZKUtil.class); private static final Logger LOG = LoggerFactory.getLogger(TestZKUtil.class); diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtilNoServer.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtilNoServer.java index d429a706ebe4..7be5cbff0163 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtilNoServer.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtilNoServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/pom.xml b/pom.xml index 169755d63a51..007f6294df19 100644 --- a/pom.xml +++ b/pom.xml @@ -2675,8 +2675,6 @@ spotless-maven-plugin ${spotless.version} - - 620e5c6d0af57ddb67e18bb97725e1d87ffb5fe5